Merge "Fix comparison operator signature"
diff --git a/WORKSPACE b/WORKSPACE
index d532887..0ab90fb 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -476,13 +476,6 @@
     url = "https://www.frc971.org/Build-Dependencies/gcc-arm-none-eabi-7-2018-q2-update-linux.tar.bz2",
 )
 
-http_archive(
-    name = "cgal_repo",
-    build_file = "@//debian:cgal.BUILD",
-    sha256 = "d564dda558570344b4caa66c5bae2cdae9ef68e07829d64f5651b25f2c6a0e9e",
-    url = "https://www.frc971.org/Build-Dependencies/cgal-dev-4.5-2.tar.gz",
-)
-
 # Java9 JDK.
 http_archive(
     name = "openjdk_linux_archive",
diff --git a/debian/cgal.BUILD b/debian/cgal.BUILD
deleted file mode 100644
index d41bccf..0000000
--- a/debian/cgal.BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-cc_library(
-    name = "cgal",
-    srcs = [
-        "usr/lib/libCGAL.so.10",
-        "usr/lib/x86_64-linux-gnu/libgmp.so.10.2.0",
-    ],
-    hdrs = glob([
-        "usr/include/**/*.h",
-        "usr/include/**/*.hpp",
-    ]),
-    includes = [
-        "usr/include",
-        "usr/include/x86_64-linux-gnu",
-    ],
-    visibility = ["//visibility:public"],
-)
diff --git a/third_party/abseil/absl/functional/function_ref.h b/third_party/abseil/absl/functional/function_ref.h
index 6e03ac2..5790a65 100644
--- a/third_party/abseil/absl/functional/function_ref.h
+++ b/third_party/abseil/absl/functional/function_ref.h
@@ -122,6 +122,7 @@
   // To help prevent subtle lifetime bugs, FunctionRef is not assignable.
   // Typically, it should only be used as an argument type.
   FunctionRef& operator=(const FunctionRef& rhs) = delete;
+  FunctionRef(const FunctionRef& rhs) = default;
 
   // Call the underlying object.
   R operator()(Args... args) const {
diff --git a/third_party/blasfeo/.gitignore b/third_party/blasfeo/.gitignore
deleted file mode 100644
index 1c36c52..0000000
--- a/third_party/blasfeo/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-*.swp
-*.s
-*.o
-*.out
-libblasfeo.a
-libblasfeo.so
-octave-workspace
-build/
diff --git a/third_party/blasfeo/BUILD b/third_party/blasfeo/BUILD
deleted file mode 100644
index 1b6f8c1..0000000
--- a/third_party/blasfeo/BUILD
+++ /dev/null
@@ -1,87 +0,0 @@
-licenses(["notice"])  # lgpl
-
-cc_library(
-    name = "blasfeo",
-    srcs = [
-        # ext
-        "auxiliary/d_aux_lib4.c",
-        "auxiliary/avx/kernel_dgecp_lib4.c",
-        "auxiliary/avx/kernel_dgetr_lib4.c",
-        "auxiliary/s_aux_lib8.c",
-        "auxiliary/m_aux_lib48.c",
-        # kernels
-        "kernel/avx/kernel_dgemm_8x4_lib4.S",
-        "kernel/avx/kernel_dgemm_4x4_lib4.S",
-        "kernel/avx/kernel_dgemm_diag_lib4.c",
-        "kernel/avx/kernel_dgemv_12_lib4.S",
-        "kernel/avx/kernel_dgemv_8_lib4.S",
-        "kernel/avx/kernel_dgemv_4_lib4.S",
-        "kernel/avx/kernel_dsymv_6_lib4.S",
-        "kernel/avx/kernel_dgetrf_pivot_4_lib4.c",
-        "kernel/avx/kernel_dgeqrf_4_lib4.c",
-        "kernel/avx/kernel_dgebp_lib4.S",
-        "kernel/avx/kernel_sgemm_16x4_lib8.S",
-        "kernel/avx/kernel_sgemm_8x8_lib8.S",
-        "kernel/avx/kernel_sgemm_8x4_lib8.S",
-        "kernel/avx/kernel_sgecp_lib8.S",
-        "kernel/avx/kernel_sgemm_diag_lib8.c",
-        "kernel/avx/kernel_sgetr_lib8.S",
-        "kernel/avx/kernel_sgead_lib8.S",
-        "kernel/avx/kernel_sgesc_lib8.S",
-        "kernel/avx/kernel_sgemv_8_lib8.S",
-        "kernel/avx/kernel_sgemv_4_lib8.S",
-        # blas
-        "blas/d_blas1_lib4.c",
-        "blas/d_blas2_lib4.c",
-        "blas/d_blas2_diag_lib.c",
-        "blas/d_blas3_lib4.c",
-        "blas/d_blas3_diag_lib4.c",
-        "blas/d_lapack_lib4.c",
-        "blas/s_blas1_lib8.c",
-        "blas/s_blas2_lib8.c",
-        "blas/s_blas2_diag_lib.c",
-        "blas/s_blas3_lib8.c",
-        "blas/s_blas3_diag_lib8.c",
-        "blas/s_lapack_lib8.c",
-        # ext_dep
-        "auxiliary/d_aux_ext_dep_lib.c",
-        "auxiliary/s_aux_ext_dep_lib.c",
-        "auxiliary/v_aux_ext_dep_lib.c",
-        "auxiliary/i_aux_ext_dep_lib.c",
-    ],
-    hdrs = [
-        "include/blasfeo_block_size.h",
-        "include/blasfeo_common.h",
-        "include/blasfeo_d_aux.h",
-        "include/blasfeo_d_aux_ext_dep.h",
-        "include/blasfeo_d_blas.h",
-        "include/blasfeo_d_kernel.h",
-        "include/blasfeo_i_aux_ext_dep.h",
-        "include/blasfeo_m_aux.h",
-        "include/blasfeo_s_aux.h",
-        "include/blasfeo_s_blas.h",
-        "include/blasfeo_s_kernel.h",
-        "include/blasfeo_target.h",
-        "include/blasfeo_v_aux_ext_dep.h",
-    ],
-    copts = [
-        "-mavx",
-        "-DTARGET_X64_INTEL_SANDY_BRIDGE",
-        "-DLA_HIGH_PERFORMANCE",
-        "-DOS_LINUX",
-        "-DEXT_DEP",
-        "-Wno-unused-variable",
-        "-Wno-uninitialized",
-        "-Wno-unused-parameter",
-        "-Wno-unused-label",
-        "-Wno-cast-align",
-    ],
-    includes = [
-        "include",
-    ],
-    target_compatible_with = ["@platforms//cpu:x86_64"],
-    textual_hdrs = [
-        "blas/x_blas2_diag_lib.c",
-    ],
-    visibility = ["//visibility:public"],
-)
diff --git a/third_party/blasfeo/CMakeLists.txt b/third_party/blasfeo/CMakeLists.txt
deleted file mode 100644
index b7cfbf5..0000000
--- a/third_party/blasfeo/CMakeLists.txt
+++ /dev/null
@@ -1,611 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of HPIPM.                                                                     #
-#                                                                                                 #
-# HPIPM -- High Performance Interior Point Method.                                                #
-# Copyright (C) 2017 by Gianluca Frison.                                                          #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-cmake_minimum_required(VERSION 2.8.11)
-
-project(blasfeo)
-
-enable_language(C ASM)
-
-# Target architecture
-#set(TARGET X64_INTEL_HASWELL)
-set(TARGET X64_INTEL_SANDY_BRIDGE CACHE STRING "Target architecture")
-#set(TARGET X64_INTEL_CORE)
-#set(TARGET X64_AMD_BULLDOZER)
-#set(TARGET ARMV8A_ARM_CORTEX_A57)
-#set(TARGET ARMV7A_ARM_CORTEX_A15)
-#set(TARGET GENERIC)
-
-# Linear Algebra library
-set(LA HIGH_PERFORMANCE CACHE STRING "Linear algebra optimization level")
-#set(LA REFERENCE)
-#set(LA BLAS)
-
-# BLAS and LAPACK version (for LA=BLAS in BLASFEO)
-set(REF_BLAS 0 CACHE STRING "Reference blas to use")
-#set(REF_BLAS OPENBLAS)
-#set(REF_BLAS NETLIB)
-#set(REF_BLAS MKL)
-#set(REF_BLAS BLIS)
-#set(REF_BLAS ATLAS)
-
-# Compile auxiliary functions with external dependencies (for memory allocation and printing)
-set(EXT_DEP ON CACHE BOOL "Compile external dependencies in BLASFEO")
-
-configure_file(${PROJECT_SOURCE_DIR}/blasfeo_target.h.in
-	${CMAKE_CURRENT_SOURCE_DIR}/include/blasfeo_target.h @ONLY)
-
-# C Compiler
-# set(CC_COMPILER gcc CACHE STRING "compiler")
-#set(CC_COMPILER clang)
-#set(CC_COMPILER x86_64-w64-mingw32-gcc)
-
-# build shared library
-#set(BUILD_SHARED_LIBS ON CACHE STRING "Build shared libraries")
-
-# installation directory
-if(CMAKE_INSTALL_PREFIX MATCHES "/usr/local")
-	set(CMAKE_INSTALL_PREFIX "/opt/blasfeo")
-endif()
-
-# headers installation directory
-set(BLASFEO_HEADERS_INSTALLATION_DIRECTORY "include" CACHE STRING "Headers local installation directory")
-
-# Macro level (code size vs performance in assembly kernels): 0 (no macro), 1 (all macro but gemm kernel), 2 (all macro)
-set(MACRO_LEVEL 0)
-
-# enable runtine checks
-set(RUNTIME_CHECKS 0)
-#set(RUNTIME_CHECKS 0)
-
-# compiler flags
-if(CMAKE_C_COMPILER_ID MATCHES "GNU" OR CMAKE_C_COMPILER_ID MATCHES "Clang")
-	set(CMAKE_C_FLAGS "")
-	set(CMAKE_ASM_FLAGS "")
-	set(CMAKE_C_FLAGS_RELEASE "")
-	set(CMAKE_ASM_FLAGS_RELEASE "")
-	# optimization flags
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O2")
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
-	# debugging flags
-	#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g")
-	#set(CMAKE_ASM_FLAGS "${CMAKE_C_FLAGS} -g")
-endif()
-
-# search directories
-#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I${BLASFEO_PATH}/include") XXX
-
-#
-if(${LA} MATCHES HIGH_PERFORMANCE)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DLA_HIGH_PERFORMANCE")
-endif()
-if(${LA} MATCHES REFERENCE)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DLA_REFERENCE")
-endif()
-if(${LA} MATCHES BLAS)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DLA_BLAS")
-endif()
-
-#
-if(${RUNTIME_CHECKS} MATCHES 1)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDIM_CHECK")
-endif()
-
-#
-if(${EXT_DEP})
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DEXT_DEP")
-endif()
-
-#
-if(${MACRO_LEVEL} MATCHES 1)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DMACRO_LEVEL=1")
-endif()
-if(${MACRO_LEVEL} MATCHES 2)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DMACRO_LEVEL=2")
-endif()
-
-#
-if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DOS_LINUX")
-	set(CMAKE_ASM_FLAGS "${CMAKE_C_FLAGS} -DOS_LINUX")
-endif()
-if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DOS_MAC")
-	set(CMAKE_ASM_FLAGS "${CMAKE_C_FLAGS} -DOS_MAC")
-endif()
-if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DOS_WINDOWS")
-	set(CMAKE_ASM_FLAGS "${CMAKE_C_FLAGS} -DOS_WINDOWS")
-endif()
-
-#
-if(${REF_BLAS} MATCHES 0)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ")
-endif(${REF_BLAS} MATCHES 0)
-if(${REF_BLAS} MATCHES OPENBLAS)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DREF_BLAS_OPENBLAS -I/opt/openblas/include")
-endif(${REF_BLAS} MATCHES OPENBLAS)
-if(${REF_BLAS} MATCHES BLIS)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DREF_BLAS_BLIS -std=c99")
-endif(${REF_BLAS} MATCHES BLIS)
-if(${REF_BLAS} MATCHES NETLIB)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DREF_BLAS_NETLIB")
-endif(${REF_BLAS} MATCHES NETLIB)
-if(${REF_BLAS} MATCHES MKL)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DREF_BLAS_MKL -m64 -I/opt/intel/mkl/include")
-endif(${REF_BLAS} MATCHES MKL)
-if(${REF_BLAS} MATCHES ATLAS)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DREF_BLAS_ATLAS")
-endif(${REF_BLAS} MATCHES ATLAS)
-
-# architecture-specific flags
-if(${TARGET} MATCHES X64_INTEL_HASWELL)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DTARGET_X64_INTEL_HASWELL")
-	if(CMAKE_C_COMPILER_ID MATCHES "GNU" OR CMAKE_C_COMPILER_ID MATCHES "Clang")
-		set()
-	endif()
-endif()
-
-if(${TARGET} MATCHES X64_INTEL_SANDY_BRIDGE)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DTARGET_X64_INTEL_SANDY_BRIDGE")
-	if(CMAKE_C_COMPILER_ID MATCHES "GNU" OR CMAKE_C_COMPILER_ID MATCHES "Clang")
-		set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m64 -mavx")
-	endif()
-endif()
-
-if(${TARGET} MATCHES X64_INTEL_CORE)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DTARGET_X64_INTEL_CORE")
-	if(CMAKE_C_COMPILER_ID MATCHES "GNU" OR CMAKE_C_COMPILER_ID MATCHES "Clang")
-		set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m64 -msse3")
-	endif()
-endif()
-
-if(${TARGET} MATCHES X64_AMD_BULLDOZER)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DTARGET_X64_AMD_BULLDOZER")
-	if(CMAKE_C_COMPILER_ID MATCHES "GNU" OR CMAKE_C_COMPILER_ID MATCHES "Clang")
-		set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m64 -mavx -mfma")
-	endif()
-endif()
-
-if(${TARGET} MATCHES ARMV8A_ARM_CORTEX_A57)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DTARGET_ARMV8A_ARM_CORTEX_A57")
-	set(CMAKE_ASM_FLAGS "${CMAKE_C_FLAGS} -DTARGET_ARMV8A_ARM_CORTEX_A57")
-	if(CMAKE_C_COMPILER_ID MATCHES "GNU" OR CMAKE_C_COMPILER_ID MATCHES "Clang")
-		set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto+fp+simd")
-	endif()
-endif()
-
-if(${TARGET} MATCHES ARMV7A_ARM_CORTEX_A15)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DTARGET_ARMV7A_ARM_CORTEX_A15")
-	set(CMAKE_ASM_FLAGS "${CMAKE_C_FLAGS} -DTARGET_ARMV7A_ARM_CORTEX_A15")
-	if(CMAKE_C_COMPILER_ID MATCHES "GNU" OR CMAKE_C_COMPILER_ID MATCHES "Clang")
-		set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -marm -mfloat-abi=hard -mfpu=neon-vfpv4 -mcpu=cortex-a15")
-		set(CMAKE_ASM_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon-vfpv4")
-	endif()
-endif()
-
-if(${TARGET} MATCHES GENERIC)
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DTARGET_GENERIC")
-endif()
-
-
-
-# source files
-
-if(${LA} MATCHES HIGH_PERFORMANCE)
-
-	if(${TARGET} MATCHES X64_INTEL_HASWELL)
-
-		file(GLOB AUX_SRC
-			${PROJECT_SOURCE_DIR}/auxiliary/d_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/avx/kernel_dgecp_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/avx2/kernel_dgetr_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/s_aux_lib8.c
-			${PROJECT_SOURCE_DIR}/auxiliary/m_aux_lib48.c
-			)
-
-		file(GLOB KERNEL_SRC
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_dgemm_12x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_dgemm_8x8_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_dgemm_8x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_dgemm_4x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_dgemv_8_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgemv_4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_dsymv_6_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dsymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_dgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgeqrf_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_dgebp_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_dgelqf_4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_sgemm_24x4_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_sgemm_16x4_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_sgemm_8x8_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx2/kernel_sgemm_8x4_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgemm_diag_lib8.c
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgead_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgecp_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgetr_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgesc_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgemv_8_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgemv_4_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_ssymv_4_lib8.c
-			)
-
-		file(GLOB BLAS_SRC
-			${PROJECT_SOURCE_DIR}/blas/d_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_lapack_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas1_lib8.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_lib8.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_lib8.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_diag_lib8.c
-			${PROJECT_SOURCE_DIR}/blas/s_lapack_lib8.c
-			)
-
-	endif(${TARGET} MATCHES X64_INTEL_HASWELL)
-
-	if(${TARGET} MATCHES X64_INTEL_SANDY_BRIDGE)
-
-		file(GLOB AUX_SRC
-			${PROJECT_SOURCE_DIR}/auxiliary/d_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/avx/kernel_dgecp_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/avx/kernel_dgetr_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/s_aux_lib8.c
-			${PROJECT_SOURCE_DIR}/auxiliary/m_aux_lib48.c
-			)
-
-		file(GLOB KERNEL_SRC
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgemm_8x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgemm_4x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgemv_12_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgemv_8_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgemv_4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dsymv_6_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dsymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgeqrf_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgebp_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_dgelqf_4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgemm_16x4_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgemm_8x8_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgemm_8x4_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgemm_diag_lib8.c
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgead_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgecp_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgetr_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgesc_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgemv_8_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/avx/kernel_sgemv_4_lib8.S
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_ssymv_4_lib8.c
-			)
-
-		file(GLOB BLAS_SRC
-			${PROJECT_SOURCE_DIR}/blas/d_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_lapack_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas1_lib8.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_lib8.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_lib8.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_diag_lib8.c
-			${PROJECT_SOURCE_DIR}/blas/s_lapack_lib8.c
-			)
-
-	endif(${TARGET} MATCHES X64_INTEL_SANDY_BRIDGE)
-
-	if(${TARGET} MATCHES X64_INTEL_CORE)
-	
-		file(GLOB AUX_SRC
-			${PROJECT_SOURCE_DIR}/auxiliary/d_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_dgecp_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_dgetr_lib4.c 
-			${PROJECT_SOURCE_DIR}/auxiliary/s_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_sgetr_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/m_aux_lib44.c
-			)
-
-		file(GLOB KERNEL_SRC
-			${PROJECT_SOURCE_DIR}/kernel/sse3/kernel_dgemm_4x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemm_4x4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dsymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgeqrf_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemm_4x4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_ssymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgecp_lib4.c
-			)
-
-		file(GLOB BLAS_SRC
-			${PROJECT_SOURCE_DIR}/blas/d_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_lapack_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_lapack_lib4.c
-			)
-
-	endif(${TARGET} MATCHES X64_INTEL_CORE)
-
-	if(${TARGET} MATCHES X64_AMD_BULLDOZER)
-	
-		file(GLOB AUX_SRC
-			${PROJECT_SOURCE_DIR}/auxiliary/d_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_dgecp_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_dgetr_lib4.c 
-			${PROJECT_SOURCE_DIR}/auxiliary/s_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_sgetr_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/m_aux_lib44.c
-			)
-
-		file(GLOB KERNEL_SRC
-			${PROJECT_SOURCE_DIR}/kernel/fma/kernel_dgemm_4x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemm_4x4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dsymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgeqrf_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemm_4x4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_ssymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgecp_lib4.c
-			)
-
-		file(GLOB BLAS_SRC
-			${PROJECT_SOURCE_DIR}/blas/d_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_lapack_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_lapack_lib4.c
-			)
-
-	endif(${TARGET} MATCHES X64_AMD_BULLDOZER)
-
-	if(${TARGET} MATCHES ARMV8A_ARM_CORTEX_A57)
-	
-		file(GLOB AUX_SRC
-			${PROJECT_SOURCE_DIR}/auxiliary/d_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_dgecp_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_dgetr_lib4.c 
-			${PROJECT_SOURCE_DIR}/auxiliary/s_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_sgetr_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/m_aux_lib44.c
-			)
-
-		file(GLOB KERNEL_SRC
-			${PROJECT_SOURCE_DIR}/kernel/armv8a/kernel_dgemm_8x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/armv8a/kernel_dgemm_4x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemm_4x4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dsymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgeqrf_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/armv8a/kernel_sgemm_16x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/armv8a/kernel_sgemm_12x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/armv8a/kernel_sgemm_8x8_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/armv8a/kernel_sgemm_8x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/armv8a/kernel_sgemm_4x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemm_4x4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_ssymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgecp_lib4.c
-			)
-
-		file(GLOB BLAS_SRC
-			${PROJECT_SOURCE_DIR}/blas/d_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_lapack_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_lapack_lib4.c
-			)
-
-	endif(${TARGET} MATCHES ARMV8A_ARM_CORTEX_A57)
-
-	if(${TARGET} MATCHES ARMV7A_ARM_CORTEX_A15)
-	
-		file(GLOB AUX_SRC
-			${PROJECT_SOURCE_DIR}/auxiliary/d_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_dgecp_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_dgetr_lib4.c 
-			${PROJECT_SOURCE_DIR}/auxiliary/s_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_sgetr_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/m_aux_lib44.c
-			)
-
-		file(GLOB KERNEL_SRC
-			${PROJECT_SOURCE_DIR}/kernel/armv7a/kernel_dgemm_4x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemm_4x4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dsymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgeqrf_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/armv7a/kernel_sgemm_12x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/armv7a/kernel_sgemm_8x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/armv7a/kernel_sgemm_4x4_lib4.S
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemm_4x4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_ssymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgecp_lib4.c
-			)
-
-		file(GLOB BLAS_SRC
-			${PROJECT_SOURCE_DIR}/blas/d_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_lapack_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_lapack_lib4.c
-			)
-
-	endif(${TARGET} MATCHES ARMV7A_ARM_CORTEX_A15)
-
-	if(${TARGET} MATCHES GENERIC)
-	
-		file(GLOB AUX_SRC
-			${PROJECT_SOURCE_DIR}/auxiliary/d_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_dgecp_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_dgetr_lib4.c 
-			${PROJECT_SOURCE_DIR}/auxiliary/s_aux_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/c99/kernel_sgetr_lib4.c
-			${PROJECT_SOURCE_DIR}/auxiliary/m_aux_lib44.c
-			)
-
-		file(GLOB KERNEL_SRC
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemm_4x4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgemv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dsymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_dgeqrf_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemm_4x4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemm_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgemv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_ssymv_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgetrf_pivot_4_lib4.c
-			${PROJECT_SOURCE_DIR}/kernel/c99/kernel_sgecp_lib4.c
-			)
-
-		file(GLOB BLAS_SRC
-			${PROJECT_SOURCE_DIR}/blas/d_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/d_lapack_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas1_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas2_diag_lib.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_blas3_diag_lib4.c
-			${PROJECT_SOURCE_DIR}/blas/s_lapack_lib4.c
-			)
-
-	endif(${TARGET} MATCHES GENERIC)
-
-else(${LA} MATCHES HIGH_PERFORMANCE) # REFERENCE BLAS
-
-	file(GLOB AUX_SRC
-		${PROJECT_SOURCE_DIR}/auxiliary/d_aux_lib.c
-		${PROJECT_SOURCE_DIR}/auxiliary/s_aux_lib.c
-		${PROJECT_SOURCE_DIR}/auxiliary/m_aux_lib.c
-		)
-
-	file(GLOB BLAS_SRC
-		${PROJECT_SOURCE_DIR}/blas/d_blas1_lib.c
-		${PROJECT_SOURCE_DIR}/blas/d_blas2_lib.c
-		${PROJECT_SOURCE_DIR}/blas/d_blas2_diag_lib.c
-		${PROJECT_SOURCE_DIR}/blas/d_blas3_lib.c
-		${PROJECT_SOURCE_DIR}/blas/d_blas3_diag_lib.c
-		${PROJECT_SOURCE_DIR}/blas/d_lapack_lib.c
-		${PROJECT_SOURCE_DIR}/blas/s_blas1_lib.c
-		${PROJECT_SOURCE_DIR}/blas/s_blas2_lib.c
-		${PROJECT_SOURCE_DIR}/blas/s_blas2_diag_lib.c
-		${PROJECT_SOURCE_DIR}/blas/s_blas3_lib.c
-		${PROJECT_SOURCE_DIR}/blas/s_blas3_diag_lib.c
-		${PROJECT_SOURCE_DIR}/blas/s_lapack_lib.c
-		)
-
-endif(${LA} MATCHES HIGH_PERFORMANCE)
-
-if(${EXT_DEP})
-
-	file(GLOB EXT_SRC
-		${PROJECT_SOURCE_DIR}/auxiliary/d_aux_ext_dep_lib.c
-		${PROJECT_SOURCE_DIR}/auxiliary/s_aux_ext_dep_lib.c
-		${PROJECT_SOURCE_DIR}/auxiliary/v_aux_ext_dep_lib.c
-		${PROJECT_SOURCE_DIR}/auxiliary/i_aux_ext_dep_lib.c
-		)
-
-endif()
-
-set(BLASFEO_SRC ${AUX_SRC} ${KERNEL_SRC} ${BLAS_SRC} ${EXT_SRC})
-
-# add library
-add_library(blasfeo ${BLASFEO_SRC})
-target_include_directories(blasfeo
-	PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>)
-
-install(TARGETS blasfeo EXPORT blasfeoConfig
-	LIBRARY DESTINATION lib
-	ARCHIVE DESTINATION lib
-	RUNTIME DESTINATION bin)
-
-install(EXPORT blasfeoConfig DESTINATION cmake)
-
-file(GLOB_RECURSE BLASFEO_HEADERS "include/*.h")
-install(FILES ${BLASFEO_HEADERS} DESTINATION ${BLASFEO_HEADERS_INSTALLATION_DIRECTORY})
-
-# test problems
-# add_subdirectory(test_problems)
diff --git a/third_party/blasfeo/LICENSE.txt b/third_party/blasfeo/LICENSE.txt
deleted file mode 100644
index 5ab7695..0000000
--- a/third_party/blasfeo/LICENSE.txt
+++ /dev/null
@@ -1,504 +0,0 @@
-		  GNU LESSER GENERAL PUBLIC LICENSE
-		       Version 2.1, February 1999
-
- Copyright (C) 1991, 1999 Free Software Foundation, Inc.
- 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-[This is the first released version of the Lesser GPL.  It also counts
- as the successor of the GNU Library Public License, version 2, hence
- the version number 2.1.]
-
-			    Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-Licenses are intended to guarantee your freedom to share and change
-free software--to make sure the software is free for all its users.
-
-  This license, the Lesser General Public License, applies to some
-specially designated software packages--typically libraries--of the
-Free Software Foundation and other authors who decide to use it.  You
-can use it too, but we suggest you first think carefully about whether
-this license or the ordinary General Public License is the better
-strategy to use in any particular case, based on the explanations below.
-
-  When we speak of free software, we are referring to freedom of use,
-not price.  Our General Public Licenses are designed to make sure that
-you have the freedom to distribute copies of free software (and charge
-for this service if you wish); that you receive source code or can get
-it if you want it; that you can change the software and use pieces of
-it in new free programs; and that you are informed that you can do
-these things.
-
-  To protect your rights, we need to make restrictions that forbid
-distributors to deny you these rights or to ask you to surrender these
-rights.  These restrictions translate to certain responsibilities for
-you if you distribute copies of the library or if you modify it.
-
-  For example, if you distribute copies of the library, whether gratis
-or for a fee, you must give the recipients all the rights that we gave
-you.  You must make sure that they, too, receive or can get the source
-code.  If you link other code with the library, you must provide
-complete object files to the recipients, so that they can relink them
-with the library after making changes to the library and recompiling
-it.  And you must show them these terms so they know their rights.
-
-  We protect your rights with a two-step method: (1) we copyright the
-library, and (2) we offer you this license, which gives you legal
-permission to copy, distribute and/or modify the library.
-
-  To protect each distributor, we want to make it very clear that
-there is no warranty for the free library.  Also, if the library is
-modified by someone else and passed on, the recipients should know
-that what they have is not the original version, so that the original
-author's reputation will not be affected by problems that might be
-introduced by others.
-
-  Finally, software patents pose a constant threat to the existence of
-any free program.  We wish to make sure that a company cannot
-effectively restrict the users of a free program by obtaining a
-restrictive license from a patent holder.  Therefore, we insist that
-any patent license obtained for a version of the library must be
-consistent with the full freedom of use specified in this license.
-
-  Most GNU software, including some libraries, is covered by the
-ordinary GNU General Public License.  This license, the GNU Lesser
-General Public License, applies to certain designated libraries, and
-is quite different from the ordinary General Public License.  We use
-this license for certain libraries in order to permit linking those
-libraries into non-free programs.
-
-  When a program is linked with a library, whether statically or using
-a shared library, the combination of the two is legally speaking a
-combined work, a derivative of the original library.  The ordinary
-General Public License therefore permits such linking only if the
-entire combination fits its criteria of freedom.  The Lesser General
-Public License permits more lax criteria for linking other code with
-the library.
-
-  We call this license the "Lesser" General Public License because it
-does Less to protect the user's freedom than the ordinary General
-Public License.  It also provides other free software developers Less
-of an advantage over competing non-free programs.  These disadvantages
-are the reason we use the ordinary General Public License for many
-libraries.  However, the Lesser license provides advantages in certain
-special circumstances.
-
-  For example, on rare occasions, there may be a special need to
-encourage the widest possible use of a certain library, so that it becomes
-a de-facto standard.  To achieve this, non-free programs must be
-allowed to use the library.  A more frequent case is that a free
-library does the same job as widely used non-free libraries.  In this
-case, there is little to gain by limiting the free library to free
-software only, so we use the Lesser General Public License.
-
-  In other cases, permission to use a particular library in non-free
-programs enables a greater number of people to use a large body of
-free software.  For example, permission to use the GNU C Library in
-non-free programs enables many more people to use the whole GNU
-operating system, as well as its variant, the GNU/Linux operating
-system.
-
-  Although the Lesser General Public License is Less protective of the
-users' freedom, it does ensure that the user of a program that is
-linked with the Library has the freedom and the wherewithal to run
-that program using a modified version of the Library.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.  Pay close attention to the difference between a
-"work based on the library" and a "work that uses the library".  The
-former contains code derived from the library, whereas the latter must
-be combined with the library in order to run.
-
-		  GNU LESSER GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License Agreement applies to any software library or other
-program which contains a notice placed by the copyright holder or
-other authorized party saying it may be distributed under the terms of
-this Lesser General Public License (also called "this License").
-Each licensee is addressed as "you".
-
-  A "library" means a collection of software functions and/or data
-prepared so as to be conveniently linked with application programs
-(which use some of those functions and data) to form executables.
-
-  The "Library", below, refers to any such software library or work
-which has been distributed under these terms.  A "work based on the
-Library" means either the Library or any derivative work under
-copyright law: that is to say, a work containing the Library or a
-portion of it, either verbatim or with modifications and/or translated
-straightforwardly into another language.  (Hereinafter, translation is
-included without limitation in the term "modification".)
-
-  "Source code" for a work means the preferred form of the work for
-making modifications to it.  For a library, complete source code means
-all the source code for all modules it contains, plus any associated
-interface definition files, plus the scripts used to control compilation
-and installation of the library.
-
-  Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running a program using the Library is not restricted, and output from
-such a program is covered only if its contents constitute a work based
-on the Library (independent of the use of the Library in a tool for
-writing it).  Whether that is true depends on what the Library does
-and what the program that uses the Library does.
-  
-  1. You may copy and distribute verbatim copies of the Library's
-complete source code as you receive it, in any medium, provided that
-you conspicuously and appropriately publish on each copy an
-appropriate copyright notice and disclaimer of warranty; keep intact
-all the notices that refer to this License and to the absence of any
-warranty; and distribute a copy of this License along with the
-Library.
-
-  You may charge a fee for the physical act of transferring a copy,
-and you may at your option offer warranty protection in exchange for a
-fee.
-
-  2. You may modify your copy or copies of the Library or any portion
-of it, thus forming a work based on the Library, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) The modified work must itself be a software library.
-
-    b) You must cause the files modified to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    c) You must cause the whole of the work to be licensed at no
-    charge to all third parties under the terms of this License.
-
-    d) If a facility in the modified Library refers to a function or a
-    table of data to be supplied by an application program that uses
-    the facility, other than as an argument passed when the facility
-    is invoked, then you must make a good faith effort to ensure that,
-    in the event an application does not supply such function or
-    table, the facility still operates, and performs whatever part of
-    its purpose remains meaningful.
-
-    (For example, a function in a library to compute square roots has
-    a purpose that is entirely well-defined independent of the
-    application.  Therefore, Subsection 2d requires that any
-    application-supplied function or table used by this function must
-    be optional: if the application does not supply it, the square
-    root function must still compute square roots.)
-
-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Library,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Library, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote
-it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Library.
-
-In addition, mere aggregation of another work not based on the Library
-with the Library (or with a work based on the Library) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may opt to apply the terms of the ordinary GNU General Public
-License instead of this License to a given copy of the Library.  To do
-this, you must alter all the notices that refer to this License, so
-that they refer to the ordinary GNU General Public License, version 2,
-instead of to this License.  (If a newer version than version 2 of the
-ordinary GNU General Public License has appeared, then you can specify
-that version instead if you wish.)  Do not make any other change in
-these notices.
-
-  Once this change is made in a given copy, it is irreversible for
-that copy, so the ordinary GNU General Public License applies to all
-subsequent copies and derivative works made from that copy.
-
-  This option is useful when you wish to copy part of the code of
-the Library into a program that is not a library.
-
-  4. You may copy and distribute the Library (or a portion or
-derivative of it, under Section 2) in object code or executable form
-under the terms of Sections 1 and 2 above provided that you accompany
-it with the complete corresponding machine-readable source code, which
-must be distributed under the terms of Sections 1 and 2 above on a
-medium customarily used for software interchange.
-
-  If distribution of object code is made by offering access to copy
-from a designated place, then offering equivalent access to copy the
-source code from the same place satisfies the requirement to
-distribute the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
-  5. A program that contains no derivative of any portion of the
-Library, but is designed to work with the Library by being compiled or
-linked with it, is called a "work that uses the Library".  Such a
-work, in isolation, is not a derivative work of the Library, and
-therefore falls outside the scope of this License.
-
-  However, linking a "work that uses the Library" with the Library
-creates an executable that is a derivative of the Library (because it
-contains portions of the Library), rather than a "work that uses the
-library".  The executable is therefore covered by this License.
-Section 6 states terms for distribution of such executables.
-
-  When a "work that uses the Library" uses material from a header file
-that is part of the Library, the object code for the work may be a
-derivative work of the Library even though the source code is not.
-Whether this is true is especially significant if the work can be
-linked without the Library, or if the work is itself a library.  The
-threshold for this to be true is not precisely defined by law.
-
-  If such an object file uses only numerical parameters, data
-structure layouts and accessors, and small macros and small inline
-functions (ten lines or less in length), then the use of the object
-file is unrestricted, regardless of whether it is legally a derivative
-work.  (Executables containing this object code plus portions of the
-Library will still fall under Section 6.)
-
-  Otherwise, if the work is a derivative of the Library, you may
-distribute the object code for the work under the terms of Section 6.
-Any executables containing that work also fall under Section 6,
-whether or not they are linked directly with the Library itself.
-
-  6. As an exception to the Sections above, you may also combine or
-link a "work that uses the Library" with the Library to produce a
-work containing portions of the Library, and distribute that work
-under terms of your choice, provided that the terms permit
-modification of the work for the customer's own use and reverse
-engineering for debugging such modifications.
-
-  You must give prominent notice with each copy of the work that the
-Library is used in it and that the Library and its use are covered by
-this License.  You must supply a copy of this License.  If the work
-during execution displays copyright notices, you must include the
-copyright notice for the Library among them, as well as a reference
-directing the user to the copy of this License.  Also, you must do one
-of these things:
-
-    a) Accompany the work with the complete corresponding
-    machine-readable source code for the Library including whatever
-    changes were used in the work (which must be distributed under
-    Sections 1 and 2 above); and, if the work is an executable linked
-    with the Library, with the complete machine-readable "work that
-    uses the Library", as object code and/or source code, so that the
-    user can modify the Library and then relink to produce a modified
-    executable containing the modified Library.  (It is understood
-    that the user who changes the contents of definitions files in the
-    Library will not necessarily be able to recompile the application
-    to use the modified definitions.)
-
-    b) Use a suitable shared library mechanism for linking with the
-    Library.  A suitable mechanism is one that (1) uses at run time a
-    copy of the library already present on the user's computer system,
-    rather than copying library functions into the executable, and (2)
-    will operate properly with a modified version of the library, if
-    the user installs one, as long as the modified version is
-    interface-compatible with the version that the work was made with.
-
-    c) Accompany the work with a written offer, valid for at
-    least three years, to give the same user the materials
-    specified in Subsection 6a, above, for a charge no more
-    than the cost of performing this distribution.
-
-    d) If distribution of the work is made by offering access to copy
-    from a designated place, offer equivalent access to copy the above
-    specified materials from the same place.
-
-    e) Verify that the user has already received a copy of these
-    materials or that you have already sent this user a copy.
-
-  For an executable, the required form of the "work that uses the
-Library" must include any data and utility programs needed for
-reproducing the executable from it.  However, as a special exception,
-the materials to be distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies
-the executable.
-
-  It may happen that this requirement contradicts the license
-restrictions of other proprietary libraries that do not normally
-accompany the operating system.  Such a contradiction means you cannot
-use both them and the Library together in an executable that you
-distribute.
-
-  7. You may place library facilities that are a work based on the
-Library side-by-side in a single library together with other library
-facilities not covered by this License, and distribute such a combined
-library, provided that the separate distribution of the work based on
-the Library and of the other library facilities is otherwise
-permitted, and provided that you do these two things:
-
-    a) Accompany the combined library with a copy of the same work
-    based on the Library, uncombined with any other library
-    facilities.  This must be distributed under the terms of the
-    Sections above.
-
-    b) Give prominent notice with the combined library of the fact
-    that part of it is a work based on the Library, and explaining
-    where to find the accompanying uncombined form of the same work.
-
-  8. You may not copy, modify, sublicense, link with, or distribute
-the Library except as expressly provided under this License.  Any
-attempt otherwise to copy, modify, sublicense, link with, or
-distribute the Library is void, and will automatically terminate your
-rights under this License.  However, parties who have received copies,
-or rights, from you under this License will not have their licenses
-terminated so long as such parties remain in full compliance.
-
-  9. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Library or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Library (or any work based on the
-Library), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Library or works based on it.
-
-  10. Each time you redistribute the Library (or any work based on the
-Library), the recipient automatically receives a license from the
-original licensor to copy, distribute, link with or modify the Library
-subject to these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties with
-this License.
-
-  11. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Library at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Library by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Library.
-
-If any portion of this section is held invalid or unenforceable under any
-particular circumstance, the balance of the section is intended to apply,
-and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
-  12. If the distribution and/or use of the Library is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Library under this License may add
-an explicit geographical distribution limitation excluding those countries,
-so that distribution is permitted only in or among countries not thus
-excluded.  In such case, this License incorporates the limitation as if
-written in the body of this License.
-
-  13. The Free Software Foundation may publish revised and/or new
-versions of the Lesser General Public License from time to time.
-Such new versions will be similar in spirit to the present version,
-but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Library
-specifies a version number of this License which applies to it and
-"any later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation.  If the Library does not specify a
-license version number, you may choose any version ever published by
-the Free Software Foundation.
-
-  14. If you wish to incorporate parts of the Library into other free
-programs whose distribution conditions are incompatible with these,
-write to the author to ask for permission.  For software which is
-copyrighted by the Free Software Foundation, write to the Free
-Software Foundation; we sometimes make exceptions for this.  Our
-decision will be guided by the two goals of preserving the free status
-of all derivatives of our free software and of promoting the sharing
-and reuse of software generally.
-
-			    NO WARRANTY
-
-  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
-KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
-LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
-THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
-FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
-CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
-LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
-RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
-FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
-SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGES.
-
-		     END OF TERMS AND CONDITIONS
-
-           How to Apply These Terms to Your New Libraries
-
-  If you develop a new library, and you want it to be of the greatest
-possible use to the public, we recommend making it free software that
-everyone can redistribute and change.  You can do so by permitting
-redistribution under these terms (or, alternatively, under the terms of the
-ordinary General Public License).
-
-  To apply these terms, attach the following notices to the library.  It is
-safest to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the library's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This library is free software; you can redistribute it and/or
-    modify it under the terms of the GNU Lesser General Public
-    License as published by the Free Software Foundation; either
-    version 2.1 of the License, or (at your option) any later version.
-
-    This library is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-    Lesser General Public License for more details.
-
-    You should have received a copy of the GNU Lesser General Public
-    License along with this library; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the library, if
-necessary.  Here is a sample; alter the names:
-
-  Yoyodyne, Inc., hereby disclaims all copyright interest in the
-  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
-
-  <signature of Ty Coon>, 1 April 1990
-  Ty Coon, President of Vice
-
-That's all there is to it!
-
-
diff --git a/third_party/blasfeo/Makefile b/third_party/blasfeo/Makefile
deleted file mode 100644
index b7a438f..0000000
--- a/third_party/blasfeo/Makefile
+++ /dev/null
@@ -1,257 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ./Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-# aux
-OBJS += ./auxiliary/d_aux_lib4.o ./auxiliary/avx/kernel_dgecp_lib4.o ./auxiliary/avx2/kernel_dgetr_lib4.o
-OBJS += ./auxiliary/s_aux_lib8.o
-OBJS += ./auxiliary/m_aux_lib48.o
-# kernels
-OBJS += ./kernel/avx2/kernel_dgemm_12x4_lib4.o ./kernel/avx2/kernel_dgemm_8x8_lib4.o ./kernel/avx2/kernel_dgemm_8x4_lib4.o ./kernel/avx2/kernel_dgemm_4x4_lib4.o ./kernel/avx/kernel_dgemm_diag_lib4.o ./kernel/avx2/kernel_dgemv_8_lib4.o ./kernel/avx/kernel_dgemv_4_lib4.o ./kernel/avx2/kernel_dsymv_6_lib4.o ./kernel/avx2/kernel_dgetrf_pivot_4_lib4.o ./kernel/avx/kernel_dgeqrf_4_lib4.o kernel/avx2/kernel_dgebp_lib4.o kernel/avx2/kernel_dgelqf_4_lib4.o
-OBJS += ./kernel/avx2/kernel_sgemm_24x4_lib8.o ./kernel/avx2/kernel_sgemm_16x4_lib8.o ./kernel/avx2/kernel_sgemm_8x8_lib8.o ./kernel/avx2/kernel_sgemm_8x4_lib8.o ./kernel/avx/kernel_sgemm_diag_lib8.o ./kernel/avx/kernel_sgecp_lib8.o ./kernel/avx/kernel_sgetr_lib8.o ./kernel/avx/kernel_sgead_lib8.o ./kernel/avx/kernel_sgesc_lib8.o ./kernel/avx/kernel_sgemv_8_lib8.o ./kernel/avx/kernel_sgemv_4_lib8.o
-# blas
-OBJS += ./blas/d_blas1_lib4.o ./blas/d_blas2_lib4.o ./blas/d_blas2_diag_lib.o ./blas/d_blas3_lib4.o ./blas/d_blas3_diag_lib4.o ./blas/d_lapack_lib4.o
-OBJS += ./blas/s_blas1_lib8.o ./blas/s_blas2_lib8.o ./blas/s_blas2_diag_lib.o ./blas/s_blas3_lib8.o ./blas/s_blas3_diag_lib8.o ./blas/s_lapack_lib8.o
-endif
-
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-# aux
-OBJS += ./auxiliary/d_aux_lib4.o ./auxiliary/avx/kernel_dgecp_lib4.o ./auxiliary/avx/kernel_dgetr_lib4.o 
-OBJS += ./auxiliary/s_aux_lib8.o
-OBJS += ./auxiliary/m_aux_lib48.o
-# kernels
-OBJS += ./kernel/avx/kernel_dgemm_8x4_lib4.o ./kernel/avx/kernel_dgemm_4x4_lib4.o ./kernel/avx/kernel_dgemm_diag_lib4.o ./kernel/avx/kernel_dgemv_12_lib4.o ./kernel/avx/kernel_dgemv_8_lib4.o ./kernel/avx/kernel_dgemv_4_lib4.o ./kernel/avx/kernel_dsymv_6_lib4.o ./kernel/avx/kernel_dgetrf_pivot_4_lib4.o ./kernel/avx/kernel_dgeqrf_4_lib4.o kernel/avx/kernel_dgebp_lib4.o
-OBJS += ./kernel/avx/kernel_sgemm_16x4_lib8.o ./kernel/avx/kernel_sgemm_8x8_lib8.o ./kernel/avx/kernel_sgemm_8x4_lib8.o ./kernel/avx/kernel_sgecp_lib8.o ./kernel/avx/kernel_sgemm_diag_lib8.o ./kernel/avx/kernel_sgetr_lib8.o ./kernel/avx/kernel_sgead_lib8.o ./kernel/avx/kernel_sgesc_lib8.o ./kernel/avx/kernel_sgemv_8_lib8.o ./kernel/avx/kernel_sgemv_4_lib8.o
-# blas
-OBJS += ./blas/d_blas1_lib4.o ./blas/d_blas2_lib4.o ./blas/d_blas2_diag_lib.o ./blas/d_blas3_lib4.o ./blas/d_blas3_diag_lib4.o ./blas/d_lapack_lib4.o
-OBJS += ./blas/s_blas1_lib8.o ./blas/s_blas2_lib8.o ./blas/s_blas2_diag_lib.o ./blas/s_blas3_lib8.o ./blas/s_blas3_diag_lib8.o ./blas/s_lapack_lib8.o
-endif
-
-ifeq ($(TARGET), X64_INTEL_CORE)
-# aux
-OBJS += ./auxiliary/d_aux_lib4.o ./auxiliary/c99/kernel_dgecp_lib4.o ./auxiliary/c99/kernel_dgetr_lib4.o 
-OBJS += ./auxiliary/s_aux_lib4.o ./auxiliary/c99/kernel_sgetr_lib4.o 
-OBJS += ./auxiliary/m_aux_lib44.o
-# kernels
-OBJS += ./kernel/sse3/kernel_dgemm_4x4_lib4.o ./kernel/c99/kernel_dgemm_4x4_lib4.o ./kernel/c99/kernel_dgemm_diag_lib4.o ./kernel/c99/kernel_dgemv_4_lib4.o ./kernel/c99/kernel_dsymv_4_lib4.o ./kernel/c99/kernel_dgetrf_pivot_4_lib4.o ./kernel/c99/kernel_dgeqrf_4_lib4.o
-OBJS += ./kernel/c99/kernel_sgemm_4x4_lib4.o ./kernel/c99/kernel_sgemm_diag_lib4.o ./kernel/c99/kernel_sgemv_4_lib4.o ./kernel/c99/kernel_ssymv_4_lib4.o ./kernel/c99/kernel_sgetrf_pivot_4_lib4.o ./kernel/c99/kernel_sgecp_lib4.o
-# blas
-OBJS += ./blas/d_blas1_lib4.o ./blas/d_blas2_lib4.o ./blas/d_blas2_diag_lib.o ./blas/d_blas3_lib4.o ./blas/d_blas3_diag_lib4.o ./blas/d_lapack_lib4.o
-OBJS += ./blas/s_blas1_lib4.o ./blas/s_blas2_lib4.o ./blas/s_blas2_diag_lib.o ./blas/s_blas3_lib4.o ./blas/s_blas3_diag_lib4.o ./blas/s_lapack_lib4.o
-endif
-
-ifeq ($(TARGET), X64_AMD_BULLDOZER)
-# aux
-OBJS += ./auxiliary/d_aux_lib4.o ./auxiliary/c99/kernel_dgecp_lib4.o ./auxiliary/c99/kernel_dgetr_lib4.o 
-OBJS += ./auxiliary/s_aux_lib4.o ./auxiliary/c99/kernel_sgetr_lib4.o 
-OBJS += ./auxiliary/m_aux_lib44.o
-# kernels
-OBJS += ./kernel/fma/kernel_dgemm_4x4_lib4.o ./kernel/c99/kernel_dgemm_4x4_lib4.o ./kernel/c99/kernel_dgemm_diag_lib4.o ./kernel/c99/kernel_dgemv_4_lib4.o ./kernel/c99/kernel_dsymv_4_lib4.o ./kernel/c99/kernel_dgetrf_pivot_4_lib4.o ./kernel/c99/kernel_dgeqrf_4_lib4.o
-OBJS += ./kernel/c99/kernel_sgemm_4x4_lib4.o ./kernel/c99/kernel_sgemm_diag_lib4.o ./kernel/c99/kernel_sgemv_4_lib4.o ./kernel/c99/kernel_ssymv_4_lib4.o ./kernel/c99/kernel_sgetrf_pivot_4_lib4.o ./kernel/c99/kernel_sgecp_lib4.o
-# blas
-OBJS += ./blas/d_blas1_lib4.o ./blas/d_blas2_lib4.o ./blas/d_blas2_diag_lib.o ./blas/d_blas3_lib4.o ./blas/d_blas3_diag_lib4.o ./blas/d_lapack_lib4.o
-OBJS += ./blas/s_blas1_lib4.o ./blas/s_blas2_lib4.o ./blas/s_blas2_diag_lib.o ./blas/s_blas3_lib4.o ./blas/s_blas3_diag_lib4.o ./blas/s_lapack_lib4.o
-endif
-
-ifeq ($(TARGET), ARMV8A_ARM_CORTEX_A57)
-# aux
-OBJS += ./auxiliary/d_aux_lib4.o ./auxiliary/c99/kernel_dgecp_lib4.o ./auxiliary/c99/kernel_dgetr_lib4.o 
-OBJS += ./auxiliary/s_aux_lib4.o ./auxiliary/c99/kernel_sgetr_lib4.o 
-OBJS += ./auxiliary/m_aux_lib44.o
-# kernels
-OBJS += ./kernel/armv8a/kernel_dgemm_8x4_lib4.o ./kernel/armv8a/kernel_dgemm_4x4_lib4.o ./kernel/c99/kernel_dgemm_4x4_lib4.o ./kernel/c99/kernel_dgemm_diag_lib4.o ./kernel/c99/kernel_dgemv_4_lib4.o ./kernel/c99/kernel_dsymv_4_lib4.o ./kernel/c99/kernel_dgetrf_pivot_4_lib4.o ./kernel/c99/kernel_dgeqrf_4_lib4.o
-OBJS += ./kernel/armv8a/kernel_sgemm_16x4_lib4.o ./kernel/armv8a/kernel_sgemm_12x4_lib4.o ./kernel/armv8a/kernel_sgemm_8x8_lib4.o ./kernel/armv8a/kernel_sgemm_8x4_lib4.o ./kernel/armv8a/kernel_sgemm_4x4_lib4.o ./kernel/c99/kernel_sgemm_4x4_lib4.o ./kernel/c99/kernel_sgemm_diag_lib4.o ./kernel/c99/kernel_sgemv_4_lib4.o ./kernel/c99/kernel_ssymv_4_lib4.o ./kernel/c99/kernel_sgetrf_pivot_4_lib4.o ./kernel/c99/kernel_sgecp_lib4.o
-# blas
-OBJS += ./blas/d_blas1_lib4.o ./blas/d_blas2_lib4.o ./blas/d_blas2_diag_lib.o ./blas/d_blas3_lib4.o ./blas/d_blas3_diag_lib4.o ./blas/d_lapack_lib4.o
-OBJS += ./blas/s_blas1_lib4.o ./blas/s_blas2_lib4.o ./blas/s_blas2_diag_lib.o ./blas/s_blas3_lib4.o ./blas/s_blas3_diag_lib4.o ./blas/s_lapack_lib4.o
-endif
-
-ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
-# aux
-OBJS += ./auxiliary/d_aux_lib4.o ./auxiliary/c99/kernel_dgecp_lib4.o ./auxiliary/c99/kernel_dgetr_lib4.o 
-OBJS += ./auxiliary/s_aux_lib4.o ./auxiliary/c99/kernel_sgetr_lib4.o 
-OBJS += ./auxiliary/m_aux_lib44.o
-# kernels
-OBJS += ./kernel/armv7a/kernel_dgemm_4x4_lib4.o ./kernel/c99/kernel_dgemm_4x4_lib4.o ./kernel/c99/kernel_dgemm_diag_lib4.o ./kernel/c99/kernel_dgemv_4_lib4.o ./kernel/c99/kernel_dsymv_4_lib4.o ./kernel/c99/kernel_dgetrf_pivot_4_lib4.o ./kernel/c99/kernel_dgeqrf_4_lib4.o
-OBJS += ./kernel/armv7a/kernel_sgemm_12x4_lib4.o ./kernel/armv7a/kernel_sgemm_8x4_lib4.o ./kernel/armv7a/kernel_sgemm_4x4_lib4.o ./kernel/c99/kernel_sgemm_4x4_lib4.o ./kernel/c99/kernel_sgemm_diag_lib4.o ./kernel/c99/kernel_sgemv_4_lib4.o ./kernel/c99/kernel_ssymv_4_lib4.o ./kernel/c99/kernel_sgetrf_pivot_4_lib4.o ./kernel/c99/kernel_sgecp_lib4.o
-# blas
-OBJS += ./blas/d_blas1_lib4.o ./blas/d_blas2_lib4.o ./blas/d_blas2_diag_lib.o ./blas/d_blas3_lib4.o ./blas/d_blas3_diag_lib4.o ./blas/d_lapack_lib4.o
-OBJS += ./blas/s_blas1_lib4.o ./blas/s_blas2_lib4.o ./blas/s_blas2_diag_lib.o ./blas/s_blas3_lib4.o ./blas/s_blas3_diag_lib4.o ./blas/s_lapack_lib4.o
-endif
-
-ifeq ($(TARGET), GENERIC)
-# aux
-OBJS += ./auxiliary/d_aux_lib4.o ./auxiliary/c99/kernel_dgecp_lib4.o ./auxiliary/c99/kernel_dgetr_lib4.o 
-OBJS += ./auxiliary/s_aux_lib4.o ./auxiliary/c99/kernel_sgetr_lib4.o 
-OBJS += ./auxiliary/m_aux_lib44.o
-# kernels
-OBJS += ./kernel/c99/kernel_dgemm_4x4_lib4.o ./kernel/c99/kernel_dgemm_diag_lib4.o ./kernel/c99/kernel_dgemv_4_lib4.o ./kernel/c99/kernel_dsymv_4_lib4.o ./kernel/c99/kernel_dgetrf_pivot_4_lib4.o ./kernel/c99/kernel_dgeqrf_4_lib4.o
-OBJS += ./kernel/c99/kernel_sgemm_4x4_lib4.o ./kernel/c99/kernel_sgemm_diag_lib4.o ./kernel/c99/kernel_sgemv_4_lib4.o ./kernel/c99/kernel_ssymv_4_lib4.o ./kernel/c99/kernel_sgetrf_pivot_4_lib4.o ./kernel/c99/kernel_sgecp_lib4.o
-# blas
-OBJS += ./blas/d_blas1_lib4.o ./blas/d_blas2_lib4.o ./blas/d_blas2_diag_lib.o ./blas/d_blas3_lib4.o ./blas/d_blas3_diag_lib4.o ./blas/d_lapack_lib4.o
-OBJS += ./blas/s_blas1_lib4.o ./blas/s_blas2_lib4.o ./blas/s_blas2_diag_lib.o ./blas/s_blas3_lib4.o ./blas/s_blas3_diag_lib4.o ./blas/s_lapack_lib4.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-# aux
-OBJS += ./auxiliary/d_aux_lib.o
-OBJS += ./auxiliary/s_aux_lib.o
-OBJS += ./auxiliary/m_aux_lib.o
-# blas
-OBJS += ./blas/d_blas1_lib.o ./blas/d_blas2_lib.o ./blas/d_blas2_diag_lib.o ./blas/d_blas3_lib.o ./blas/d_blas3_diag_lib.o ./blas/d_lapack_lib.o
-OBJS += ./blas/s_blas1_lib.o ./blas/s_blas2_lib.o ./blas/s_blas2_diag_lib.o ./blas/s_blas3_lib.o ./blas/s_blas3_diag_lib.o ./blas/s_lapack_lib.o
-
-endif # LA choice
-
-ifeq ($(EXT_DEP), 1)
-# ext dep
-OBJS += ./auxiliary/d_aux_ext_dep_lib.o
-OBJS += ./auxiliary/s_aux_ext_dep_lib.o
-OBJS += ./auxiliary/v_aux_ext_dep_lib.o
-OBJS += ./auxiliary/i_aux_ext_dep_lib.o
-endif
-
-
-
-all: clean static_library
-
-static_library: target
-	( cd auxiliary; $(MAKE) obj)
-	( cd kernel; $(MAKE) obj)
-	( cd blas; $(MAKE) obj)
-	ar rcs libblasfeo.a $(OBJS) 
-	cp libblasfeo.a ./lib/
-	@echo
-	@echo " libblasfeo.a static library build complete."
-	@echo
-
-shared_library: target
-	( cd auxiliary; $(MAKE) obj)
-	( cd kernel; $(MAKE) obj)
-	( cd blas; $(MAKE) obj)
-	gcc -shared -o libblasfeo.so $(OBJS)
-	cp libblasfeo.so ./lib/
-	@echo
-	@echo " libblasfeo.so shared library build complete."
-	@echo
-
-target:
-	touch ./include/blasfeo_target.h
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-	echo "#ifndef TARGET_X64_INTEL_HASWELL" > ./include/blasfeo_target.h
-	echo "#define TARGET_X64_INTEL_HASWELL" >> ./include/blasfeo_target.h
-	echo "#endif" >> ./include/blasfeo_target.h
-endif
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-	echo "#ifndef TARGET_X64_INTEL_SANDY_BRIDGE" > ./include/blasfeo_target.h
-	echo "#define TARGET_X64_INTEL_SANDY_BRIDGE" >> ./include/blasfeo_target.h
-	echo "#endif" >> ./include/blasfeo_target.h
-endif
-ifeq ($(TARGET), X64_INTEL_CORE)
-	echo "#ifndef TARGET_X64_INTEL_CORE" > ./include/blasfeo_target.h
-	echo "#define TARGET_X64_INTEL_CORE" >> ./include/blasfeo_target.h
-	echo "#endif" >> ./include/blasfeo_target.h
-endif
-ifeq ($(TARGET), X64_AMD_BULLDOZER)
-	echo "#ifndef TARGET_X64_AMD_BULLDOZER" > ./include/blasfeo_target.h
-	echo "#define TARGET_X64_AMD_BULLDOZER" >> ./include/blasfeo_target.h
-	echo "#endif" >> ./include/blasfeo_target.h
-endif
-ifeq ($(TARGET), GENERIC)
-	echo "#ifndef TARGET_GENERIC" > ./include/blasfeo_target.h
-	echo "#define TARGET_GENERIC" >> ./include/blasfeo_target.h
-	echo "#endif" >> ./include/blasfeo_target.h
-endif
-ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
-	echo "#ifndef TARGET_ARMV7A_ARM_CORTEX_A15" > ./include/blasfeo_target.h
-	echo "#define TARGET_ARMV7A_ARM_CORTEX_A15" >> ./include/blasfeo_target.h
-	echo "#endif" >> ./include/blasfeo_target.h
-endif
-ifeq ($(LA), HIGH_PERFORMANCE)
-	echo "#ifndef LA_HIGH_PERFORMANCE" >> ./include/blasfeo_target.h
-	echo "#define LA_HIGH_PERFORMANCE" >> ./include/blasfeo_target.h
-	echo "#endif" >> ./include/blasfeo_target.h
-endif
-ifeq ($(LA), BLAS)
-	echo "#ifndef LA_BLAS" >> ./include/blasfeo_target.h
-	echo "#define LA_BLAS" >> ./include/blasfeo_target.h
-	echo "#endif" >> ./include/blasfeo_target.h
-endif
-ifeq ($(LA), REFERENCE)
-	echo "#ifndef LA_REFERENCE" >> ./include/blasfeo_target.h
-	echo "#define LA_REFERENCE" >> ./include/blasfeo_target.h
-	echo "#endif" >> ./include/blasfeo_target.h
-endif
-ifeq ($(EXT_DEP), 1)
-	echo "#ifndef EXT_DEP" >> ./include/blasfeo_target.h
-	echo "#define EXT_DEP" >> ./include/blasfeo_target.h
-	echo "#endif" >> ./include/blasfeo_target.h
-endif
-
-install_static:
-	mkdir -p $(PREFIX)/blasfeo
-	mkdir -p $(PREFIX)/blasfeo/lib
-	cp -f libblasfeo.a $(PREFIX)/blasfeo/lib/
-	mkdir -p $(PREFIX)/blasfeo/include
-	cp -f ./include/*.h $(PREFIX)/blasfeo/include/
-
-install_shared:
-	mkdir -p $(PREFIX)/blasfeo
-	mkdir -p $(PREFIX)/blasfeo/lib
-	cp -f libblasfeo.so $(PREFIX)/blasfeo/lib/
-	mkdir -p $(PREFIX)/blasfeo/include
-	cp -f ./include/*.h $(PREFIX)/blasfeo/include/
-
-test_problem:
-	cp libblasfeo.a ./test_problems/libblasfeo.a
-	make -C test_problems obj
-	@echo
-	@echo " Test problem build complete."
-	@echo
-
-run:
-	./test_problems/test.out
-
-clean:
-	rm -f libblasfeo.a
-	rm -f libblasfeo.so
-	rm -f ./lib/libblasfeo.a
-	rm -f ./lib/libblasfeo.so
-	make -C auxiliary clean
-	make -C kernel clean
-	make -C blas clean
-	make -C test_problems clean
-	make -C examples clean
-
diff --git a/third_party/blasfeo/Makefile.rule b/third_party/blasfeo/Makefile.rule
deleted file mode 100644
index 200721e..0000000
--- a/third_party/blasfeo/Makefile.rule
+++ /dev/null
@@ -1,183 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-# Target architecture
-# X64_INTEL_HASWELL : x86_64 architecture with AVX2 and FMA ISA (64 bit OS) code optimized for Intel Haswell and Intel Skylake architectures.
-# X64_INTEL_SANDY_BRIDGE : x86_64 architecture with AVX ISA (64 bit OS) code optimized for Intel Sandy-Bridge architecture.
-# X64_INTEL_CORE : x86_64 architecture with SSE3 (64 bit OS) code optimized for Intel Core archiecture.
-# X64_AMD_BULLDOZER : x86_64 architecture with AVX and FMA ISA (64 bit OS) code optimized for AMD Bulldozer.
-# ARMV7A_ARM_CORTEX_A15 : ARMv7A architecture with NEON-VFPv4 ISA (32 bit OS) code optimized for ARM Cortex A15.
-# GENERIC : generic c99 code
-TARGET = X64_INTEL_HASWELL
-#TARGET = X64_INTEL_SANDY_BRIDGE
-#TARGET = X64_INTEL_CORE
-#TARGET = X64_AMD_BULLDOZER
-#TARGET = ARMV8A_ARM_CORTEX_A57
-#TARGET = ARMV7A_ARM_CORTEX_A15
-#TARGET = GENERIC
-
-# Linear Algebra library
-LA = HIGH_PERFORMANCE
-#LA = REFERENCE
-#LA = BLAS
-
-# BLAS and LAPACK version (for LA=BLAS)
-REF_BLAS = 0
-#REF_BLAS = OPENBLAS
-#REF_BLAS = NETLIB
-#REF_BLAS = MKL
-#REF_BLAS = BLIS
-#REF_BLAS = ATLAS
-
-# Compile auxiliary functions with external dependencies (for memory allocation and printing)
-#EXT_DEP = 0
-EXT_DEP = 1
-
-# Enable on-line checks for matrix and vector dimensions
-RUNTIME_CHECKS = 0
-#RUNTIME_CHECKS = 1
-
-# Operating system
-UNAME_S := $(shell uname -s)
-ifeq ($(UNAME_S),Linux)
-    OS = LINUX
-endif
-ifeq ($(UNAME_S),Darwin)
-    OS = MAC
-endif
-#OS = LINUX
-#OS = MAC
-#OS = WINDOWS
-
-# C Compiler
-CC = gcc
-#CC = clang
-#CC = x86_64-w64-mingw32-gcc
-
-# Installation directory
-PREFIX = /opt
-
-# Macro level (code size vs performance in assembly kernels): 0 (no macro), 1 (all macro but gemm kernel), 2 (all macro)
-MACRO_LEVEL = 0
-
-# compiler / assembler / linker flags
-CFLAGS  = 
-ASFLAGS = 
-LDFLAGS =
-
-# Optimization flags
-CFLAGS += -O2 -fPIC
-
-# Debugging flags
-#CFLAGS  += -g #-Wall -pedantic -Wfloat-equal #-pg
-#ASFLAGS += -g
-
-# Definirions
-ifeq ($(LA), HIGH_PERFORMANCE)
-CFLAGS  += -DLA_HIGH_PERFORMANCE
-endif
-ifeq ($(LA), REFERENCE)
-CFLAGS  += -DLA_REFERENCE
-endif
-ifeq ($(LA), BLAS)
-CFLAGS  += -DLA_BLAS
-endif
-
-ifeq ($(RUNTIME_CHECKS), 1)
-CFLAGS += -DDIM_CHECK
-endif
-
-ifeq ($(EXT_DEP), 1)
-CFLAGS += -DEXT_DEP
-endif
-
-ifeq ($(MACRO_LEVEL), 1)
-ASFLAGS += -DMACRO_LEVEL=1
-endif
-ifeq ($(MACRO_LEVEL), 2)
-ASFLAGS += -DMACRO_LEVEL=2
-endif
-
-ifeq ($(OS), LINUX)
-CFLAGS  += -DOS_LINUX
-ASFLAGS += -DOS_LINUX
-endif
-ifeq ($(OS), MAC)
-CFLAGS  += -DOS_MAC
-ASFLAGS += -DOS_MAC
-endif
-ifeq ($(OS), WINDOWS)
-CFLAGS  += -DOS_WINDOWS
-ASFLAGS += -DOS_WINDOWS
-endif
-
-ifeq ($(REF_BLAS), 0)
-CFLAGS  += 
-endif
-ifeq ($(REF_BLAS), OPENBLAS)
-CFLAGS  += -DREF_BLAS_OPENBLAS -I/opt/openblas/include
-endif
-ifeq ($(REF_BLAS), BLIS)
-CFLAGS  += -DREF_BLAS_BLIS -std=c99
-endif
-ifeq ($(REF_BLAS), NETLIB)
-CFLAGS  += -DREF_BLAS_NETLIB
-endif
-ifeq ($(REF_BLAS), MKL)
-CFLAGS  += -DREF_BLAS_MKL -m64 -I/opt/intel/mkl/include
-endif
-ifeq ($(REF_BLAS), ATLAS)
-CFLAGS  += -DREF_BLAS_ATLAS
-endif
-
-# Architecture-specific flags
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-CFLAGS  += -m64 -mavx2 -mfma -DTARGET_X64_INTEL_HASWELL
-endif
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-CFLAGS  += -m64 -mavx -DTARGET_X64_INTEL_SANDY_BRIDGE
-endif
-ifeq ($(TARGET), X64_INTEL_CORE)
-CFLAGS  += -m64 -msse3 -DTARGET_X64_INTEL_CORE
-endif
-ifeq ($(TARGET), X64_AMD_BULLDOZER)
-CFLAGS  += -m64 -mavx -mfma -DTARGET_X64_AMD_BULLDOZER
-endif
-ifeq ($(TARGET), ARMV8A_ARM_CORTEX_A57)
-CFLAGS  += -march=armv8-a+crc+crypto+fp+simd -DTARGET_ARMV8A_ARM_CORTEX_A57
-ASFLAGS += -DTARGET_ARMV7A_ARM_CORTEX_A15
-endif
-ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
-CFLAGS  += -marm -mfloat-abi=hard -mfpu=neon-vfpv4 -mcpu=cortex-a15 -DTARGET_ARMV7A_ARM_CORTEX_A15
-ASFLAGS += -mfpu=neon-vfpv4 -DTARGET_ARMV7A_ARM_CORTEX_A15
-endif
-ifeq ($(TARGET), GENERIC)
-CFLAGS  += -DTARGET_GENERIC
-endif
-
-
diff --git a/third_party/blasfeo/README.txt b/third_party/blasfeo/README.txt
deleted file mode 100644
index 685a2c8..0000000
--- a/third_party/blasfeo/README.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-BLASFEO - BLAS For Embedded Optimization
-
-BLASFEO provides a set of linear algebra routines optimized for use in embedded optimization.
-It is for example employed in the Model Predictive Control software package HPMPC.
-
-BLASFEO provides three implementations of each linear algebra routine (LA):
-- HIGH_PERFORMANCE: a high-performance implementation hand-optimized for different computer architectures.
-- REFERENCE: a lightly-optimized version, coded entirely in C withou assumptions about the computer architecture.
-- BLAS: a wrapper to BLAS and LAPACK routines.
-
-The currently supported compter architectures (TARGET) are:
-- X64_INTEL_HASWELL: Intel Haswell architecture or newer, AVX2 and FMA ISA, 64-bit OS.
-- X64_INTEL_SANDY_BRIDGE: Intel Sandy-Bridge architecture or newer, AVX ISA, 64-bit OS.
-- X64_INTEL_CORE: Intel Core architecture or newer, SSE3 ISA, 64-bit OS.
-- X64_AMD_BULLDOZER: AMD Bulldozer architecture, AVX and FMA ISAs, 64-bit OS.
-- ARMV78_ARM_CORTEX_A57: ARMv78 architecture, VFPv4 and NEONv2 ISAs, 64-bit OS.
-- ARMV7A_ARM_CORTEX_A15: ARMv7A architecture, VFPv3 and NEON ISAs, 32-bit OS.
-- GENERIC: generic target, coded in C, giving better performance if the architecture provides more than 16 scalar FP registers (e.g. many RISC such as ARM).
-
-The optimized linear algebra kernels are currently provided for OS_LINUX (x86_64 64-bit, ARMv8A 64-bit, ARMv7A 32-bit), OS_WINDOWS (x86_64 64-bit) and OS_MAC (x86_64 64-bit).
-
-BLASFEO employes structures to describe matrices (d_strmat) and vectors (d_strvec), defined in include/blasfeo_common.h.
-The actual implementation of d_strmat and d_strvec depends on the LA and TARGET choice.
-
-More information about BLASFEO can be found in the ArXiv paper at https://arxiv.org/abs/1704.02457
diff --git a/third_party/blasfeo/TODOlist.txt b/third_party/blasfeo/TODOlist.txt
deleted file mode 100644
index bba5ee0..0000000
--- a/third_party/blasfeo/TODOlist.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-- syrk_potrf_ln_mn
-- alpha for trsm
-- kernels and _mn_ version of trmv
-- kernel dsymv dgemv_nt 4 avx
-- remove n from trmv
-- store_gen in single precision
-- clean target.h and create it also from cmake (see "file")
diff --git a/third_party/blasfeo/auxiliary/Makefile b/third_party/blasfeo/auxiliary/Makefile
deleted file mode 100644
index d1242bd..0000000
--- a/third_party/blasfeo/auxiliary/Makefile
+++ /dev/null
@@ -1,124 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-OBJS += d_aux_lib4.o
-OBJS += s_aux_lib8.o
-OBJS += m_aux_lib48.o
-endif
-
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-OBJS += d_aux_lib4.o
-OBJS += s_aux_lib8.o
-OBJS += m_aux_lib48.o
-endif
-
-ifeq ($(TARGET), X64_INTEL_CORE)
-OBJS += d_aux_lib4.o
-OBJS += s_aux_lib4.o
-OBJS += m_aux_lib44.o
-endif
-
-ifeq ($(TARGET), X64_AMD_BULLDOZER)
-OBJS += d_aux_lib4.o
-OBJS += s_aux_lib4.o
-OBJS += m_aux_lib44.o
-endif
-
-ifeq ($(TARGET), ARMV8A_ARM_CORTEX_A57)
-OBJS += d_aux_lib4.o
-OBJS += s_aux_lib4.o
-OBJS += m_aux_lib44.o
-endif
-
-ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
-OBJS += d_aux_lib4.o
-OBJS += s_aux_lib4.o
-OBJS += m_aux_lib44.o
-endif
-
-ifeq ($(TARGET), GENERIC)
-OBJS += d_aux_lib4.o
-OBJS += s_aux_lib4.o
-OBJS += m_aux_lib44.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-OBJS += d_aux_lib.o
-OBJS += s_aux_lib.o
-OBJS += m_aux_lib.o
-
-endif # LA choice
-
-ifeq ($(EXT_DEP), 1)
-#ext dep
-OBJS += d_aux_ext_dep_lib.o
-OBJS += s_aux_ext_dep_lib.o
-OBJS += v_aux_ext_dep_lib.o
-OBJS += i_aux_ext_dep_lib.o 
-endif
-
-obj: $(OBJS)
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-	( cd avx2; $(MAKE) obj)
-	( cd avx; $(MAKE) obj)
-	( cd c99; $(MAKE) obj)
-endif
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-	( cd avx; $(MAKE) obj)
-	( cd c99; $(MAKE) obj)
-endif
-ifeq ($(TARGET), X64_INTEL_CORE)
-	( cd c99; $(MAKE) obj)
-endif
-ifeq ($(TARGET), X64_AMD_BULLDOZER)
-	( cd c99; $(MAKE) obj)
-endif
-ifeq ($(TARGET), ARMV8A_ARM_CORTEX_A57)
-	( cd c99; $(MAKE) obj)
-endif
-ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
-	( cd c99; $(MAKE) obj)
-endif
-ifeq ($(TARGET), GENERIC)
-	( cd c99; $(MAKE) obj)
-endif
-
-
-clean:
-	rm -f *.o
-	make -C avx2 clean
-	make -C avx clean
-	make -C c99 clean
diff --git a/third_party/blasfeo/auxiliary/avx/Makefile b/third_party/blasfeo/auxiliary/avx/Makefile
deleted file mode 100644
index 84e0154..0000000
--- a/third_party/blasfeo/auxiliary/avx/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../../Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-OBJS += kernel_dgecp_lib4.o 
-endif
-
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
diff --git a/third_party/blasfeo/auxiliary/avx/kernel_dgecp_lib4.c b/third_party/blasfeo/auxiliary/avx/kernel_dgecp_lib4.c
deleted file mode 100644
index 4bc8c9a..0000000
--- a/third_party/blasfeo/auxiliary/avx/kernel_dgecp_lib4.c
+++ /dev/null
@@ -1,3024 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-
-
-
-// both A and B are aligned to 256-bit boundaries
-void kernel_dgecp_8_0_lib4(int tri, int kmax, double alpha, double *A0, int sda,  double *B0, int sdb)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 8-wide + end 7x7 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-	double *B1 = B0 + bs*sdb;
-
-	__m256d
-		alpha_0,
-		a_0;
-	
-	__m128d
-		c_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B0[0+bs*0], a_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B0[0+bs*1], a_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B0[0+bs*2], a_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B0[0+bs*3], a_0 );
-
-		A0 += 16;
-		B0 += 16;
-
-		a_0 = _mm256_load_pd( &A1[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B1[0+bs*0], a_0 );
-
-		a_0 = _mm256_load_pd( &A1[0+bs*1] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B1[0+bs*1], a_0 );
-
-		a_0 = _mm256_load_pd( &A1[0+bs*2] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B1[0+bs*2], a_0 );
-
-		a_0 = _mm256_load_pd( &A1[0+bs*3] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B1[0+bs*3], a_0 );
-
-		A1 += 16;
-		B1 += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B0[0+bs*0], a_0 );
-
-		A0 += 4;
-		B0 += 4;
-
-		a_0 = _mm256_load_pd( &A1[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B1[0+bs*0], a_0 );
-
-		A1 += 4;
-		B1 += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 7x7 triangle 
-
-		c_0 = _mm_load_sd( &A0[1+0*bs] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[1+0*bs], c_0 );
-		c_0 = _mm_load_pd( &A0[2+0*bs] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B0[2+0*bs], c_0 );
-		a_0 = _mm256_load_pd( &A1[0+0*bs] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B1[0+0*bs], a_0 );
-
-		c_0 = _mm_load_pd( &A0[2+1*bs] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B0[2+1*bs], c_0 );
-		a_0 = _mm256_load_pd( &A1[0+1*bs] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B1[0+1*bs], a_0 );
-
-		c_0 = _mm_load_sd( &A0[3+2*bs] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[3+2*bs], c_0 );
-		a_0 = _mm256_load_pd( &A1[0+2*bs] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B1[0+2*bs], a_0 );
-
-		a_0 = _mm256_load_pd( &A1[0+3*bs] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B1[0+3*bs], a_0 );
-
-		c_0 = _mm_load_sd( &A1[1+4*bs] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[1+4*bs], c_0 );
-		c_0 = _mm_load_pd( &A1[2+4*bs] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[2+4*bs], c_0 );
-
-		c_0 = _mm_load_pd( &A1[2+5*bs] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[2+5*bs], c_0 );
-
-		c_0 = _mm_load_sd( &A1[3+6*bs] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+6*bs], c_0 );
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
-void kernel_dgecp_8_1_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B0, int sdb)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 8-wide + end 7x7 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-	double *A2 = A1 + bs*sda;
-	double *B1 = B0 + bs*sdb;
-
-	__m256d
-		alpha_0,
-		a_0, a_1, a_2,
-		b_0, b_1;
-	
-	__m128d
-		c_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_2 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_shuffle_pd( a_1, a_2, 0x5 );
-		b_0 = _mm256_shuffle_pd( a_0, b_0, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-
-		a_2 = _mm256_load_pd( &A2[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_2 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_shuffle_pd( a_1, a_2, 0x5 );
-		b_0 = _mm256_shuffle_pd( a_0, b_0, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B1[0+bs*1], b_1 );
-		_mm256_store_pd( &B0[0+bs*1], b_0 );
-
-		a_2 = _mm256_load_pd( &A2[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_2 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_shuffle_pd( a_1, a_2, 0x5 );
-		b_0 = _mm256_shuffle_pd( a_0, b_0, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B1[0+bs*2], b_1 );
-		_mm256_store_pd( &B0[0+bs*2], b_0 );
-
-		a_2 = _mm256_load_pd( &A2[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_2 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_shuffle_pd( a_1, a_2, 0x5 );
-		b_0 = _mm256_shuffle_pd( a_0, b_0, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B1[0+bs*3], b_1 );
-		_mm256_store_pd( &B0[0+bs*3], b_0 );
-
-		A0 += 16;
-		A1 += 16;
-		A2 += 16;
-		B0 += 16;
-		B1 += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_2 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_shuffle_pd( a_1, a_2, 0x5 );
-		b_0 = _mm256_shuffle_pd( a_0, b_0, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-
-		A0 += 4;
-		A1 += 4;
-		A2 += 4;
-		B0 += 4;
-		B1 += 4;
-
-		}
-
-	if(tri==1)
-		{
-		// 7x7 triangle
-
-		c_0 = _mm_load_pd( &A0[2+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B0[1+bs*0], c_0 );
-		c_0 = _mm_load_sd( &A1[0+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[3+bs*0], c_0 );
-		c_0 = _mm_load_sd( &A1[1+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[0+bs*0], c_0 );
-		c_0 = _mm_load_pd( &A1[2+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B1[1+bs*0], c_0 );
-		c_0 = _mm_load_sd( &A2[0+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*0], c_0 );
-
-		c_0 = _mm_load_sd( &A0[3+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[2+bs*1], c_0 );
-		c_0 = _mm_load_sd( &A1[0+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[3+bs*1], c_0 );
-		c_0 = _mm_load_sd( &A1[1+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[0+bs*1], c_0 );
-		c_0 = _mm_load_pd( &A1[2+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B1[1+bs*1], c_0 );
-		c_0 = _mm_load_sd( &A2[0+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*1], c_0 );
-
-		c_0 = _mm_load_sd( &A1[0+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[3+bs*2], c_0 );
-		c_0 = _mm_load_sd( &A1[1+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[0+bs*2], c_0 );
-		c_0 = _mm_load_pd( &A1[2+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B1[1+bs*2], c_0 );
-		c_0 = _mm_load_sd( &A2[0+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*2], c_0 );
-
-		c_0 = _mm_load_sd( &A1[1+bs*3] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[0+bs*3], c_0 );
-		c_0 = _mm_load_pd( &A1[2+bs*3] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B1[1+bs*3], c_0 );
-		c_0 = _mm_load_sd( &A2[0+bs*3] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*3], c_0 );
-
-		c_0 = _mm_load_pd( &A1[2+bs*4] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B1[1+bs*4], c_0 );
-		c_0 = _mm_load_sd( &A2[0+bs*4] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*4], c_0 );
-
-		c_0 = _mm_load_sd( &A1[3+bs*5] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[2+bs*5], c_0 );
-		c_0 = _mm_load_sd( &A2[0+bs*5] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*5], c_0 );
-
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		c_0 = _mm_load_sd( &A2[0+bs*6] );
-		_mm_store_sd( &B1[3+bs*6], c_0 );
-
-		}
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_dgecp_8_2_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B0, int sdb)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 8-wide + end 7x7 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-	double *A2 = A1 + bs*sda;
-	double *B1 = B0 + bs*sdb;
-
-	__m256d
-		alpha_0,
-		a_0, a_1, a_2,
-		b_0, b_1;
-	
-	__m128d
-		c_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		a_2 = _mm256_load_pd( &A2[0+bs*1] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B0[0+bs*1], b_0 );
-		_mm256_store_pd( &B1[0+bs*1], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		a_2 = _mm256_load_pd( &A2[0+bs*2] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B0[0+bs*2], b_0 );
-		_mm256_store_pd( &B1[0+bs*2], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		a_2 = _mm256_load_pd( &A2[0+bs*3] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B0[0+bs*3], b_0 );
-		_mm256_store_pd( &B1[0+bs*3], b_1 );
-
-		A0 += 16;
-		A1 += 16;
-		A2 += 16;
-		B0 += 16;
-		B1 += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-
-		A0 += 4;
-		A1 += 4;
-		A2 += 4;
-		B0 += 4;
-		B1 += 4;
-
-		}
-
-	if(tri==1)
-		{
-		// 7x7 triangle 
-
-		c_0 = _mm_load_sd( &A0[3+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[1+bs*0], c_0 );
-		c_0 = _mm_load_pd( &A1[0+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B0[2+bs*0], c_0 );
-		c_0 = _mm_load_pd( &A1[2+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[0+bs*0], c_0 );
-		c_0 = _mm_load_pd( &A2[0+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[2+bs*0], c_0 );
-
-		c_0 = _mm_load_pd( &A1[0+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B0[2+bs*1], c_0 );
-		c_0 = _mm_load_pd( &A1[2+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[0+bs*1], c_0 );
-		c_0 = _mm_load_pd( &A2[0+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[2+bs*1], c_0 );
-
-		c_0 = _mm_load_sd( &A1[1+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[3+bs*2], c_0 );
-		c_0 = _mm_load_pd( &A1[2+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[0+bs*2], c_0 );
-		c_0 = _mm_load_pd( &A2[0+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[2+bs*2], c_0 );
-
-		c_0 = _mm_load_pd( &A1[2+bs*3] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[0+bs*3], c_0 );
-		c_0 = _mm_load_pd( &A2[0+bs*3] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[2+bs*3], c_0 );
-
-		c_0 = _mm_load_sd( &A1[3+bs*4] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[1+bs*4], c_0 );
-		c_0 = _mm_load_pd( &A2[0+bs*4] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[2+bs*4], c_0 );
-
-		c_0 = _mm_load_pd( &A2[0+bs*5] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B1[2+bs*5], c_0 );
-
-		c_0 = _mm_load_sd( &A2[1+bs*6] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*6], c_0 );
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_dgecp_8_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B0, int sdb)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 8-wide + end 7x7 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-	double *A2 = A1 + bs*sda;
-	double *B1 = B0 + bs*sdb;
-
-	__m256d
-		alpha_0,
-		a_0, a_1, a_2,
-		b_0, b_1;
-	
-	__m128d
-		c_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_1 = _mm256_shuffle_pd( b_1, a_2, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		a_2 = _mm256_load_pd( &A2[0+bs*1] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_1 = _mm256_shuffle_pd( b_1, a_2, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B0[0+bs*1], b_0 );
-		_mm256_store_pd( &B1[0+bs*1], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		a_2 = _mm256_load_pd( &A2[0+bs*2] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_1 = _mm256_shuffle_pd( b_1, a_2, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B0[0+bs*2], b_0 );
-		_mm256_store_pd( &B1[0+bs*2], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		a_2 = _mm256_load_pd( &A2[0+bs*3] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_1 = _mm256_shuffle_pd( b_1, a_2, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B0[0+bs*3], b_0 );
-		_mm256_store_pd( &B1[0+bs*3], b_1 );
-
-		A0 += 16;
-		A1 += 16;
-		A2 += 16;
-		B0 += 16;
-		B1 += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_1 = _mm256_shuffle_pd( b_1, a_2, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-
-		A0 += 4;
-		A1 += 4;
-		A2 += 4;
-		B0 += 4;
-		B1 += 4;
-
-		}
-
-	if(tri==1)
-		{
-		// 7x7 triangle 
-
-		c_0 = _mm_load_pd( &A1[0+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B0[1+bs*0], c_0 );
-		c_0 = _mm_load_sd( &A1[2+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[3+bs*0], c_0 );
-		c_0 = _mm_load_sd( &A1[3+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[0+bs*0], c_0 );
-		c_0 = _mm_load_pd( &A2[0+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B1[1+bs*0], c_0 );
-		c_0 = _mm_load_sd( &A2[2+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*0], c_0 );
-
-		c_0 = _mm_load_sd( &A1[1+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[2+bs*1], c_0 );
-		c_0 = _mm_load_sd( &A1[2+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[3+bs*1], c_0 );
-		c_0 = _mm_load_sd( &A1[3+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[0+bs*1], c_0 );
-		c_0 = _mm_load_pd( &A2[0+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B1[1+bs*1], c_0 );
-		c_0 = _mm_load_sd( &A2[2+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*1], c_0 );
-
-		c_0 = _mm_load_sd( &A1[2+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B0[3+bs*2], c_0 );
-		c_0 = _mm_load_sd( &A1[3+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[0+bs*2], c_0 );
-		c_0 = _mm_load_pd( &A2[0+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B1[1+bs*2], c_0 );
-		c_0 = _mm_load_sd( &A2[2+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*2], c_0 );
-
-		c_0 = _mm_load_sd( &A1[3+bs*3] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[0+bs*3], c_0 );
-		c_0 = _mm_load_pd( &A2[0+bs*3] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B1[1+bs*3], c_0 );
-		c_0 = _mm_load_sd( &A2[2+bs*3] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*3], c_0 );
-
-		c_0 = _mm_load_pd( &A2[0+bs*4] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B1[1+bs*4], c_0 );
-		c_0 = _mm_load_sd( &A2[2+bs*4] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*4], c_0 );
-
-		c_0 = _mm_load_sd( &A2[1+bs*5] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[2+bs*5], c_0 );
-		c_0 = _mm_load_sd( &A2[2+bs*5] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*5], c_0 );
-
-		c_0 = _mm_load_sd( &A2[2+bs*6] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B1[3+bs*6], c_0 );
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries
-void kernel_dgecp_4_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	__m256d
-		alpha_0,
-		a_0;
-	
-	__m128d
-		c_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B[0+bs*0], a_0 );
-
-		a_0 = _mm256_load_pd( &A[0+bs*1] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B[0+bs*1], a_0 );
-
-		a_0 = _mm256_load_pd( &A[0+bs*2] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B[0+bs*2], a_0 );
-
-		a_0 = _mm256_load_pd( &A[0+bs*3] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B[0+bs*3], a_0 );
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		_mm256_store_pd( &B[0+bs*0], a_0 );
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 3x3 triangle
-
-		c_0 = _mm_load_sd( &A[1+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[1+bs*0], c_0 );
-		c_0 = _mm_load_pd( &A[2+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B[2+bs*0], c_0 );
-
-		c_0 = _mm_load_pd( &A[2+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B[2+bs*1], c_0 );
-
-		c_0 = _mm_load_sd( &A[3+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[3+bs*2], c_0 );
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
-void kernel_dgecp_4_1_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m256d
-		alpha_0,
-		a_0, a_1,
-		b_0;
-	
-	__m128d
-		c_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_1 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		a_1 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*1], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		a_1 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*2], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		a_1 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*3], b_0 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_1 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 3x3 triangle
-
-		c_0 = _mm_load_pd( &A0[2+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B[1+bs*0], c_0 );
-		c_0 = _mm_load_sd( &A1[0+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[3+bs*0], c_0 );
-
-		c_0 = _mm_load_sd( &A0[3+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[2+bs*1], c_0 );
-		c_0 = _mm_load_sd( &A1[0+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[3+bs*1], c_0 );
-
-		c_0 = _mm_load_sd( &A1[0+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[3+bs*2], c_0 );
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_dgecp_4_2_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m256d
-		alpha_0,
-		a_0, a_1,
-		b_0;
-	
-	__m128d
-		c_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*1], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*2], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*3], b_0 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 3x3 triangle
-
-		c_0 = _mm_load_sd( &A0[3+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[1+bs*0], c_0 );
-		c_0 = _mm_load_pd( &A1[0+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B[2+bs*0], c_0 );
-
-		c_0 = _mm_load_pd( &A1[0+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_pd( &B[2+bs*1], c_0 );
-
-		c_0 = _mm_load_sd( &A1[1+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[3+bs*2], c_0 );
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_dgecp_4_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m256d
-		alpha_0,
-		a_0, a_1,
-		b_0;
-	
-	__m128d
-		c_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*1], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*2], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*3], b_0 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 3x3 triangle
-
-		c_0 = _mm_load_pd( &A1[0+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_storeu_pd( &B[1+bs*0], c_0 );
-		c_0 = _mm_load_sd( &A1[2+bs*0] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[3+bs*0], c_0 );
-
-		c_0 = _mm_load_sd( &A1[1+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[2+bs*1], c_0 );
-		c_0 = _mm_load_sd( &A1[2+bs*1] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[3+bs*1], c_0 );
-
-		c_0 = _mm_load_sd( &A1[2+bs*2] );
-		c_0 = _mm_mul_pd( _mm256_castpd256_pd128( alpha_0 ), c_0 );
-		_mm_store_sd( &B[3+bs*2], c_0 );
-		}
-
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_dgecp_3_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 3-wide + end 2x2 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	__m128d
-		alpha_0,
-		a_0, a_1;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_loadu_pd( &A[0+bs*0] );
-		a_1 = _mm_load_sd( &A[2+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-		_mm_store_sd( &B[2+bs*0], a_1 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*1] );
-		a_1 = _mm_load_sd( &A[2+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_storeu_pd( &B[0+bs*1], a_0 );
-		_mm_store_sd( &B[2+bs*1], a_1 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*2] );
-		a_1 = _mm_load_sd( &A[2+bs*2] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_storeu_pd( &B[0+bs*2], a_0 );
-		_mm_store_sd( &B[2+bs*2], a_1 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*3] );
-		a_1 = _mm_load_sd( &A[2+bs*3] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_storeu_pd( &B[0+bs*3], a_0 );
-		_mm_store_sd( &B[2+bs*3], a_1 );
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_loadu_pd( &A[0+bs*0] );
-		a_1 = _mm_load_sd( &A[2+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-		_mm_store_sd( &B[2+bs*0], a_1 );
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 2x2 triangle
-
-		a_0 = _mm_loadu_pd( &A[1+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[1+bs*0], a_0 );
-
-		a_0 = _mm_load_sd( &A[2+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[2+bs*1], a_0 );
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_dgecp_3_2_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 3-wide + end 2x2 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m128d
-		alpha_0,
-		a_0, a_1;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_loadu_pd( &A0[2+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-		a_1 = _mm_load_sd( &A1[0+bs*0] );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_store_sd( &B[2+bs*0], a_1 );
-
-		a_0 = _mm_loadu_pd( &A0[2+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*1], a_0 );
-		a_1 = _mm_load_sd( &A1[0+bs*1] );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_store_sd( &B[2+bs*1], a_1 );
-
-		a_0 = _mm_loadu_pd( &A0[2+bs*2] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*2], a_0 );
-		a_1 = _mm_load_sd( &A1[0+bs*2] );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_store_sd( &B[2+bs*2], a_1 );
-
-		a_0 = _mm_loadu_pd( &A0[2+bs*3] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*3], a_0 );
-		a_1 = _mm_load_sd( &A1[0+bs*3] );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_store_sd( &B[2+bs*3], a_1 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_loadu_pd( &A0[2+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-		a_1 = _mm_load_sd( &A1[0+bs*0] );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_store_sd( &B[2+bs*0], a_1 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 2x2 triangle
-
-		a_0 = _mm_load_sd( &A0[3+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[1+bs*0], a_0 );
-		a_0 = _mm_load_sd( &A1[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[2+bs*0], a_0 );
-
-		a_0 = _mm_load_sd( &A1[0+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[2+bs*1], a_0 );
-
-		}
-
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_dgecp_3_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 3-wide + end 2x2 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m128d
-		alpha_0,
-		a_0, a_1;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_load_sd( &A0[3+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[0+bs*0], a_0 );
-		a_1 = _mm_loadu_pd( &A1[0+bs*0] );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_storeu_pd( &B[1+bs*0], a_1 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[0+bs*1], a_0 );
-		a_1 = _mm_loadu_pd( &A1[0+bs*1] );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_storeu_pd( &B[1+bs*1], a_1 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*2] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[0+bs*2], a_0 );
-		a_1 = _mm_loadu_pd( &A1[0+bs*2] );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_storeu_pd( &B[1+bs*2], a_1 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*3] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[0+bs*3], a_0 );
-		a_1 = _mm_loadu_pd( &A1[0+bs*3] );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_storeu_pd( &B[1+bs*3], a_1 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_load_sd( &A0[3+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[0+bs*0], a_0 );
-		a_1 = _mm_loadu_pd( &A1[0+bs*0] );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		_mm_storeu_pd( &B[1+bs*0], a_1 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 2x2 triangle
-
-		a_0 = _mm_loadu_pd( &A1[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[1+bs*0], a_0 );
-
-		a_0 = _mm_load_sd( &A1[1+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[2+bs*1], a_0 );
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_dgecp_2_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 2-wide + end 1x1 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	__m128d
-		alpha_0,
-		a_0;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_loadu_pd( &A[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*1], a_0 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*2] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*2], a_0 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*3] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*3], a_0 );
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_loadu_pd( &A[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 1x1 triangle
-
-		a_0 = _mm_load_sd( &A[1+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[1+bs*0], a_0 );
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 128-bit boundaries, 3 elements of A must be skipped
-void kernel_dgecp_2_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 2-wide + end 1x1 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m128d
-		alpha_0,
-		a_0;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_load_sd( &A0[3+bs*0] );
-		a_0 = _mm_loadh_pd( a_0, &A1[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*1] );
-		a_0 = _mm_loadh_pd( a_0, &A1[0+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*1], a_0 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*2] );
-		a_0 = _mm_loadh_pd( a_0, &A1[0+bs*2] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*2], a_0 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*3] );
-		a_0 = _mm_loadh_pd( a_0, &A1[0+bs*3] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*3], a_0 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_load_sd( &A0[3+bs*0] );
-		a_0 = _mm_loadh_pd( a_0, &A1[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 1x1 triangle
-
-		a_0 = _mm_load_sd( &A1[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[1+bs*0], a_0 );
-
-		}
-
-	}
-
-
-
-// both A and B are aligned 64-bit boundaries
-void kernel_dgecp_1_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 1-wide
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	__m128d
-		alpha_0,
-		a_0;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_load_sd( &A[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[0+bs*0], a_0 );
-
-		a_0 = _mm_load_sd( &A[0+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[0+bs*1], a_0 );
-
-		a_0 = _mm_load_sd( &A[0+bs*2] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[0+bs*2], a_0 );
-
-		a_0 = _mm_load_sd( &A[0+bs*3] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[0+bs*3], a_0 );
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_load_sd( &A[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		_mm_store_sd( &B[0+bs*0], a_0 );
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-
-// both A and B are aligned to 256-bit boundaries
-void kernel_dgead_8_0_lib4(int kmax, double alpha, double *A0, int sda,  double *B0, int sdb)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-	double *B1 = B0 + bs*sdb;
-
-	__m256d
-		a_0, c_0, alpha_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		c_0 = _mm256_load_pd( &B0[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( a_0, c_0 );
-		_mm256_store_pd( &B0[0+bs*0], a_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		c_0 = _mm256_load_pd( &B0[0+bs*1] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( a_0, c_0 );
-		_mm256_store_pd( &B0[0+bs*1], a_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		c_0 = _mm256_load_pd( &B0[0+bs*2] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( a_0, c_0 );
-		_mm256_store_pd( &B0[0+bs*2], a_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		c_0 = _mm256_load_pd( &B0[0+bs*3] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( a_0, c_0 );
-		_mm256_store_pd( &B0[0+bs*3], a_0 );
-
-		A0 += 16;
-		B0 += 16;
-
-		a_0 = _mm256_load_pd( &A1[0+bs*0] );
-		c_0 = _mm256_load_pd( &B1[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( a_0, c_0 );
-		_mm256_store_pd( &B1[0+bs*0], a_0 );
-
-		a_0 = _mm256_load_pd( &A1[0+bs*1] );
-		c_0 = _mm256_load_pd( &B1[0+bs*1] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( a_0, c_0 );
-		_mm256_store_pd( &B1[0+bs*1], a_0 );
-
-		a_0 = _mm256_load_pd( &A1[0+bs*2] );
-		c_0 = _mm256_load_pd( &B1[0+bs*2] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( a_0, c_0 );
-		_mm256_store_pd( &B1[0+bs*2], a_0 );
-
-		a_0 = _mm256_load_pd( &A1[0+bs*3] );
-		c_0 = _mm256_load_pd( &B1[0+bs*3] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( a_0, c_0 );
-		_mm256_store_pd( &B1[0+bs*3], a_0 );
-
-		A1 += 16;
-		B1 += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		c_0 = _mm256_load_pd( &B0[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( a_0, c_0 );
-		_mm256_store_pd( &B0[0+bs*0], a_0 );
-
-		A0 += 4;
-		B0 += 4;
-
-		a_0 = _mm256_load_pd( &A1[0+bs*0] );
-		c_0 = _mm256_load_pd( &B1[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( a_0, c_0 );
-		_mm256_store_pd( &B1[0+bs*0], a_0 );
-
-		A1 += 4;
-		B1 += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
-void kernel_dgead_8_1_lib4(int kmax, double alpha, double *A0, int sda, double *B0, int sdb)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-	double *A2 = A1 + bs*sda;
-	double *B1 = B0 + bs*sdb;
-
-	__m256d
-		a_0, a_1, a_2,
-		b_0, b_1,
-		alpha_0, c_0, c_1;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_2 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_shuffle_pd( a_1, a_2, 0x5 );
-		b_0 = _mm256_shuffle_pd( a_0, b_0, 0x5 );
-		c_1 = _mm256_load_pd( &B1[0+bs*0] );
-		c_0 = _mm256_load_pd( &B0[0+bs*0] );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-
-		a_2 = _mm256_load_pd( &A2[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_2 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_shuffle_pd( a_1, a_2, 0x5 );
-		b_0 = _mm256_shuffle_pd( a_0, b_0, 0x5 );
-		c_1 = _mm256_load_pd( &B1[0+bs*1] );
-		c_0 = _mm256_load_pd( &B0[0+bs*1] );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		_mm256_store_pd( &B1[0+bs*1], b_1 );
-		_mm256_store_pd( &B0[0+bs*1], b_0 );
-
-		a_2 = _mm256_load_pd( &A2[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_2 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_shuffle_pd( a_1, a_2, 0x5 );
-		b_0 = _mm256_shuffle_pd( a_0, b_0, 0x5 );
-		c_1 = _mm256_load_pd( &B1[0+bs*2] );
-		c_0 = _mm256_load_pd( &B0[0+bs*2] );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		_mm256_store_pd( &B1[0+bs*2], b_1 );
-		_mm256_store_pd( &B0[0+bs*2], b_0 );
-
-		a_2 = _mm256_load_pd( &A2[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_2 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_shuffle_pd( a_1, a_2, 0x5 );
-		b_0 = _mm256_shuffle_pd( a_0, b_0, 0x5 );
-		c_1 = _mm256_load_pd( &B1[0+bs*3] );
-		c_0 = _mm256_load_pd( &B0[0+bs*3] );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		_mm256_store_pd( &B1[0+bs*3], b_1 );
-		_mm256_store_pd( &B0[0+bs*3], b_0 );
-
-		A0 += 16;
-		A1 += 16;
-		A2 += 16;
-		B0 += 16;
-		B1 += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_2 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_shuffle_pd( a_1, a_2, 0x5 );
-		b_0 = _mm256_shuffle_pd( a_0, b_0, 0x5 );
-		c_1 = _mm256_load_pd( &B1[0+bs*0] );
-		c_0 = _mm256_load_pd( &B0[0+bs*0] );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-
-		A0 += 4;
-		A1 += 4;
-		A2 += 4;
-		B0 += 4;
-		B1 += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_dgead_8_2_lib4(int kmax, double alpha, double *A0, int sda, double *B0, int sdb)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-	double *A2 = A1 + bs*sda;
-	double *B1 = B0 + bs*sdb;
-
-	__m256d
-		a_0, a_1, a_2,
-		b_0, b_1,
-		alpha_0, c_0, c_1;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		c_0 = _mm256_load_pd( &B0[0+bs*0] );
-		c_1 = _mm256_load_pd( &B1[0+bs*0] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		a_2 = _mm256_load_pd( &A2[0+bs*1] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		c_0 = _mm256_load_pd( &B0[0+bs*1] );
-		c_1 = _mm256_load_pd( &B1[0+bs*1] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		_mm256_store_pd( &B0[0+bs*1], b_0 );
-		_mm256_store_pd( &B1[0+bs*1], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		a_2 = _mm256_load_pd( &A2[0+bs*2] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		c_0 = _mm256_load_pd( &B0[0+bs*2] );
-		c_1 = _mm256_load_pd( &B1[0+bs*2] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		_mm256_store_pd( &B0[0+bs*2], b_0 );
-		_mm256_store_pd( &B1[0+bs*2], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		a_2 = _mm256_load_pd( &A2[0+bs*3] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		c_0 = _mm256_load_pd( &B0[0+bs*3] );
-		c_1 = _mm256_load_pd( &B1[0+bs*3] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		_mm256_store_pd( &B0[0+bs*3], b_0 );
-		_mm256_store_pd( &B1[0+bs*3], b_1 );
-
-		A0 += 16;
-		A1 += 16;
-		A2 += 16;
-		B0 += 16;
-		B1 += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		c_0 = _mm256_load_pd( &B0[0+bs*0] );
-		c_1 = _mm256_load_pd( &B1[0+bs*0] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-
-		A0 += 4;
-		A1 += 4;
-		A2 += 4;
-		B0 += 4;
-		B1 += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_dgead_8_3_lib4(int kmax, double alpha, double *A0, int sda, double *B0, int sdb)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-	double *A2 = A1 + bs*sda;
-	double *B1 = B0 + bs*sdb;
-
-	__m256d
-		a_0, a_1, a_2,
-		b_0, b_1,
-		alpha_0, c_0, c_1;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_1 = _mm256_shuffle_pd( b_1, a_2, 0x5 );
-		c_0 = _mm256_load_pd( &B0[0+bs*0] );
-		c_1 = _mm256_load_pd( &B1[0+bs*0] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		a_2 = _mm256_load_pd( &A2[0+bs*1] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_1 = _mm256_shuffle_pd( b_1, a_2, 0x5 );
-		c_0 = _mm256_load_pd( &B0[0+bs*1] );
-		c_1 = _mm256_load_pd( &B1[0+bs*1] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		_mm256_store_pd( &B0[0+bs*1], b_0 );
-		_mm256_store_pd( &B1[0+bs*1], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		a_2 = _mm256_load_pd( &A2[0+bs*2] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_1 = _mm256_shuffle_pd( b_1, a_2, 0x5 );
-		c_0 = _mm256_load_pd( &B0[0+bs*2] );
-		c_1 = _mm256_load_pd( &B1[0+bs*2] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		_mm256_store_pd( &B0[0+bs*2], b_0 );
-		_mm256_store_pd( &B1[0+bs*2], b_1 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		a_2 = _mm256_load_pd( &A2[0+bs*3] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_1 = _mm256_shuffle_pd( b_1, a_2, 0x5 );
-		c_0 = _mm256_load_pd( &B0[0+bs*3] );
-		c_1 = _mm256_load_pd( &B1[0+bs*3] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		_mm256_store_pd( &B0[0+bs*3], b_0 );
-		_mm256_store_pd( &B1[0+bs*3], b_1 );
-
-		A0 += 16;
-		A1 += 16;
-		A2 += 16;
-		B0 += 16;
-		B1 += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_2 = _mm256_load_pd( &A2[0+bs*0] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_1 = _mm256_permute2f128_pd( a_1, a_2, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		b_1 = _mm256_shuffle_pd( b_1, a_2, 0x5 );
-		c_0 = _mm256_load_pd( &B0[0+bs*0] );
-		c_1 = _mm256_load_pd( &B1[0+bs*0] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_1 = _mm256_mul_pd( alpha_0, b_1 );
-		b_0 = _mm256_add_pd ( c_0, b_0 );
-		b_1 = _mm256_add_pd ( c_1, b_1 );
-		_mm256_store_pd( &B0[0+bs*0], b_0 );
-		_mm256_store_pd( &B1[0+bs*0], b_1 );
-
-		A0 += 4;
-		A1 += 4;
-		A2 += 4;
-		B0 += 4;
-		B1 += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries
-void kernel_dgead_4_0_lib4(int kmax, double alpha, double *A, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	__m256d
-		a_0, c_0, alpha_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A[0+bs*0] );
-		c_0 = _mm256_load_pd( &B[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( c_0, a_0 );
-		_mm256_store_pd( &B[0+bs*0], a_0 );
-
-		a_0 = _mm256_load_pd( &A[0+bs*1] );
-		c_0 = _mm256_load_pd( &B[0+bs*1] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( c_0, a_0 );
-		_mm256_store_pd( &B[0+bs*1], a_0 );
-
-		a_0 = _mm256_load_pd( &A[0+bs*2] );
-		c_0 = _mm256_load_pd( &B[0+bs*2] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( c_0, a_0 );
-		_mm256_store_pd( &B[0+bs*2], a_0 );
-
-		a_0 = _mm256_load_pd( &A[0+bs*3] );
-		c_0 = _mm256_load_pd( &B[0+bs*3] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( c_0, a_0 );
-		_mm256_store_pd( &B[0+bs*3], a_0 );
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A[0+bs*0] );
-		c_0 = _mm256_load_pd( &B[0+bs*0] );
-		a_0 = _mm256_mul_pd( alpha_0, a_0 );
-		a_0 = _mm256_add_pd( c_0, a_0 );
-		_mm256_store_pd( &B[0+bs*0], a_0 );
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
-void kernel_dgead_4_1_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m256d
-		a_0, a_1,
-		b_0,
-		alpha_0, c_0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_1 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		c_0 = _mm256_load_pd( &B[0+bs*0] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		a_1 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		c_0 = _mm256_load_pd( &B[0+bs*1] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*1], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		a_1 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		c_0 = _mm256_load_pd( &B[0+bs*2] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*2], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		a_1 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		c_0 = _mm256_load_pd( &B[0+bs*3] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*3], b_0 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_1 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		c_0 = _mm256_load_pd( &B[0+bs*0] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_dgead_4_2_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m256d
-		a_0, a_1,
-		b_0,
-		alpha_0, c_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		c_0 = _mm256_load_pd( &B[0+bs*0] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		c_0 = _mm256_load_pd( &B[0+bs*1] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*1], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		c_0 = _mm256_load_pd( &B[0+bs*2] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*2], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		c_0 = _mm256_load_pd( &B[0+bs*3] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*3], b_0 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		b_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		c_0 = _mm256_load_pd( &B[0+bs*0] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_dgead_4_3_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m256d
-		a_0, a_1,
-		b_0,
-		alpha_0, c_0;
-	
-	int k;
-
-	alpha_0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		c_0 = _mm256_load_pd( &B[0+bs*0] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*1] );
-		a_1 = _mm256_load_pd( &A1[0+bs*1] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		c_0 = _mm256_load_pd( &B[0+bs*1] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*1], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*2] );
-		a_1 = _mm256_load_pd( &A1[0+bs*2] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		c_0 = _mm256_load_pd( &B[0+bs*2] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*2], b_0 );
-
-		a_0 = _mm256_load_pd( &A0[0+bs*3] );
-		a_1 = _mm256_load_pd( &A1[0+bs*3] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		c_0 = _mm256_load_pd( &B[0+bs*3] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*3], b_0 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm256_load_pd( &A0[0+bs*0] );
-		a_1 = _mm256_load_pd( &A1[0+bs*0] );
-		a_0 = _mm256_permute2f128_pd( a_0, a_1, 0x21 );
-		b_0 = _mm256_shuffle_pd( a_0, a_1, 0x5 );
-		c_0 = _mm256_load_pd( &B[0+bs*0] );
-		b_0 = _mm256_mul_pd( alpha_0, b_0 );
-		b_0 = _mm256_add_pd( c_0, b_0 );
-		_mm256_store_pd( &B[0+bs*0], b_0 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_dgead_3_0_lib4(int kmax, double alpha, double *A, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	__m128d
-		a_0, a_1,
-		alpha_0, c_0, c_1;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_loadu_pd( &A[0+bs*0] );
-		a_1 = _mm_load_sd( &A[2+bs*0] );
-		c_0 = _mm_loadu_pd( &B[0+bs*0] );
-		c_1 = _mm_load_sd( &B[2+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_sd( alpha_0, a_1 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		a_1 = _mm_add_sd( c_1, a_1 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-		_mm_store_sd( &B[2+bs*0], a_1 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*1] );
-		a_1 = _mm_load_sd( &A[2+bs*1] );
-		c_0 = _mm_loadu_pd( &B[0+bs*1] );
-		c_1 = _mm_load_sd( &B[2+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_sd( alpha_0, a_1 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		a_1 = _mm_add_sd( c_1, a_1 );
-		_mm_storeu_pd( &B[0+bs*1], a_0 );
-		_mm_store_sd( &B[2+bs*1], a_1 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*2] );
-		a_1 = _mm_load_sd( &A[2+bs*2] );
-		c_0 = _mm_loadu_pd( &B[0+bs*2] );
-		c_1 = _mm_load_sd( &B[2+bs*2] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_sd( alpha_0, a_1 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		a_1 = _mm_add_sd( c_1, a_1 );
-		_mm_storeu_pd( &B[0+bs*2], a_0 );
-		_mm_store_sd( &B[2+bs*2], a_1 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*3] );
-		a_1 = _mm_load_sd( &A[2+bs*3] );
-		c_0 = _mm_loadu_pd( &B[0+bs*3] );
-		c_1 = _mm_load_sd( &B[2+bs*3] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_sd( alpha_0, a_1 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		a_1 = _mm_add_sd( c_1, a_1 );
-		_mm_storeu_pd( &B[0+bs*3], a_0 );
-		_mm_store_sd( &B[2+bs*3], a_1 );
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_loadu_pd( &A[0+bs*0] );
-		a_1 = _mm_load_sd( &A[2+bs*0] );
-		c_0 = _mm_loadu_pd( &B[0+bs*0] );
-		c_1 = _mm_load_sd( &B[2+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_sd( alpha_0, a_1 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		a_1 = _mm_add_sd( c_1, a_1 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-		_mm_store_sd( &B[2+bs*0], a_1 );
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_dgead_3_2_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m128d
-		a_0, a_1,
-		alpha_0, c_0, c_1;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_loadu_pd( &A0[2+bs*0] );
-		a_1 = _mm_load_sd( &A1[0+bs*0] );
-		c_0 = _mm_loadu_pd( &B[0+bs*0] );
-		c_1 = _mm_load_sd( &B[2+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_sd( alpha_0, a_1 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		a_1 = _mm_add_sd( c_1, a_1 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-		_mm_store_sd( &B[2+bs*0], a_1 );
-
-		a_0 = _mm_loadu_pd( &A0[2+bs*1] );
-		a_1 = _mm_load_sd( &A1[0+bs*1] );
-		c_0 = _mm_loadu_pd( &B[0+bs*1] );
-		c_1 = _mm_load_sd( &B[2+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_sd( alpha_0, a_1 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		a_1 = _mm_add_sd( c_1, a_1 );
-		_mm_storeu_pd( &B[0+bs*1], a_0 );
-		_mm_store_sd( &B[2+bs*1], a_1 );
-
-		a_0 = _mm_loadu_pd( &A0[2+bs*2] );
-		a_1 = _mm_load_sd( &A1[0+bs*2] );
-		c_0 = _mm_loadu_pd( &B[0+bs*2] );
-		c_1 = _mm_load_sd( &B[2+bs*2] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_sd( alpha_0, a_1 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		a_1 = _mm_add_sd( c_1, a_1 );
-		_mm_storeu_pd( &B[0+bs*2], a_0 );
-		_mm_store_sd( &B[2+bs*2], a_1 );
-
-		a_0 = _mm_loadu_pd( &A0[2+bs*3] );
-		a_1 = _mm_load_sd( &A1[0+bs*3] );
-		c_0 = _mm_loadu_pd( &B[0+bs*3] );
-		c_1 = _mm_load_sd( &B[2+bs*3] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_sd( alpha_0, a_1 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		a_1 = _mm_add_sd( c_1, a_1 );
-		_mm_storeu_pd( &B[0+bs*3], a_0 );
-		_mm_store_sd( &B[2+bs*3], a_1 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_loadu_pd( &A0[2+bs*0] );
-		a_1 = _mm_load_sd( &A1[0+bs*0] );
-		c_0 = _mm_loadu_pd( &B[0+bs*0] );
-		c_1 = _mm_load_sd( &B[2+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_1 = _mm_mul_sd( alpha_0, a_1 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		a_1 = _mm_add_sd( c_1, a_1 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-		_mm_store_sd( &B[2+bs*0], a_1 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_dgead_3_3_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m128d
-		a_0, a_1,
-		alpha_0, c_0, c_1;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_load_sd( &A0[3+bs*0] );
-		a_1 = _mm_loadu_pd( &A1[0+bs*0] );
-		c_0 = _mm_load_sd( &B[0+bs*0] );
-		c_1 = _mm_loadu_pd( &B[1+bs*0] );
-		a_0 = _mm_mul_sd( alpha_0, a_0 );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		a_0 = _mm_add_sd( c_0, a_0 );
-		a_1 = _mm_add_pd( c_1, a_1 );
-		_mm_store_sd( &B[0+bs*0], a_0 );
-		_mm_storeu_pd( &B[1+bs*0], a_1 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*1] );
-		a_1 = _mm_loadu_pd( &A1[0+bs*1] );
-		c_0 = _mm_load_sd( &B[0+bs*1] );
-		c_1 = _mm_loadu_pd( &B[1+bs*1] );
-		a_0 = _mm_mul_sd( alpha_0, a_0 );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		a_0 = _mm_add_sd( c_0, a_0 );
-		a_1 = _mm_add_pd( c_1, a_1 );
-		_mm_store_sd( &B[0+bs*1], a_0 );
-		_mm_storeu_pd( &B[1+bs*1], a_1 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*2] );
-		a_1 = _mm_loadu_pd( &A1[0+bs*2] );
-		c_0 = _mm_load_sd( &B[0+bs*2] );
-		c_1 = _mm_loadu_pd( &B[1+bs*2] );
-		a_0 = _mm_mul_sd( alpha_0, a_0 );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		a_0 = _mm_add_sd( c_0, a_0 );
-		a_1 = _mm_add_pd( c_1, a_1 );
-		_mm_store_sd( &B[0+bs*2], a_0 );
-		_mm_storeu_pd( &B[1+bs*2], a_1 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*3] );
-		a_1 = _mm_loadu_pd( &A1[0+bs*3] );
-		c_0 = _mm_load_sd( &B[0+bs*3] );
-		c_1 = _mm_loadu_pd( &B[1+bs*3] );
-		a_0 = _mm_mul_sd( alpha_0, a_0 );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		a_0 = _mm_add_sd( c_0, a_0 );
-		a_1 = _mm_add_pd( c_1, a_1 );
-		_mm_store_sd( &B[0+bs*3], a_0 );
-		_mm_storeu_pd( &B[1+bs*3], a_1 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_load_sd( &A0[3+bs*0] );
-		a_1 = _mm_loadu_pd( &A1[0+bs*0] );
-		c_0 = _mm_load_sd( &B[0+bs*0] );
-		c_1 = _mm_loadu_pd( &B[1+bs*0] );
-		a_0 = _mm_mul_sd( alpha_0, a_0 );
-		a_1 = _mm_mul_pd( alpha_0, a_1 );
-		a_0 = _mm_add_sd( c_0, a_0 );
-		a_1 = _mm_add_pd( c_1, a_1 );
-		_mm_store_sd( &B[0+bs*0], a_0 );
-		_mm_storeu_pd( &B[1+bs*0], a_1 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_dgead_2_0_lib4(int kmax, double alpha, double *A, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	__m128d
-		a_0, c_0, alpha_0;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_loadu_pd( &A[0+bs*0] );
-		c_0 = _mm_loadu_pd( &B[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*1] );
-		c_0 = _mm_loadu_pd( &B[0+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*1], a_0 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*2] );
-		c_0 = _mm_loadu_pd( &B[0+bs*2] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*2], a_0 );
-
-		a_0 = _mm_loadu_pd( &A[0+bs*3] );
-		c_0 = _mm_loadu_pd( &B[0+bs*3] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*3], a_0 );
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_loadu_pd( &A[0+bs*0] );
-		c_0 = _mm_loadu_pd( &B[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 128-bit boundaries, 3 elements of A must be skipped
-void kernel_dgead_2_3_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	__m128d
-		a_0, c_0, alpha_0;
-	
-	int k;
-
-	alpha_0 = _mm_loaddup_pd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_load_sd( &A0[3+bs*0] );
-		a_0 = _mm_loadh_pd( a_0, &A1[0+bs*0] );
-		c_0 = _mm_loadu_pd( &B[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*1] );
-		a_0 = _mm_loadh_pd( a_0, &A1[0+bs*1] );
-		c_0 = _mm_loadu_pd( &B[0+bs*1] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*1], a_0 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*2] );
-		a_0 = _mm_loadh_pd( a_0, &A1[0+bs*2] );
-		c_0 = _mm_loadu_pd( &B[0+bs*2] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*2], a_0 );
-
-		a_0 = _mm_load_sd( &A0[3+bs*3] );
-		a_0 = _mm_loadh_pd( a_0, &A1[0+bs*3] );
-		c_0 = _mm_loadu_pd( &B[0+bs*3] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*3], a_0 );
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_load_sd( &A0[3+bs*0] );
-		a_0 = _mm_loadh_pd( a_0, &A1[0+bs*0] );
-		c_0 = _mm_loadu_pd( &B[0+bs*0] );
-		a_0 = _mm_mul_pd( alpha_0, a_0 );
-		a_0 = _mm_add_pd( c_0, a_0 );
-		_mm_storeu_pd( &B[0+bs*0], a_0 );
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned 64-bit boundaries
-void kernel_dgead_1_0_lib4(int kmax, double alpha, double *A, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	__m128d
-		a_0, c_0, alpha_0;
-	
-	int k;
-
-	alpha_0 = _mm_load_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_0 = _mm_load_sd( &A[0+bs*0] );
-		c_0 = _mm_load_sd( &B[0+bs*0] );
-		a_0 = _mm_mul_sd( alpha_0, a_0 );
-		a_0 = _mm_add_sd( c_0, a_0 );
-		_mm_store_sd( &B[0+bs*0], a_0 );
-
-		a_0 = _mm_load_sd( &A[0+bs*1] );
-		c_0 = _mm_load_sd( &B[0+bs*1] );
-		a_0 = _mm_mul_sd( alpha_0, a_0 );
-		a_0 = _mm_add_sd( c_0, a_0 );
-		_mm_store_sd( &B[0+bs*1], a_0 );
-
-		a_0 = _mm_load_sd( &A[0+bs*2] );
-		c_0 = _mm_load_sd( &B[0+bs*2] );
-		a_0 = _mm_mul_sd( alpha_0, a_0 );
-		a_0 = _mm_add_sd( c_0, a_0 );
-		_mm_store_sd( &B[0+bs*2], a_0 );
-
-		a_0 = _mm_load_sd( &A[0+bs*3] );
-		c_0 = _mm_load_sd( &B[0+bs*3] );
-		a_0 = _mm_mul_sd( alpha_0, a_0 );
-		a_0 = _mm_add_sd( c_0, a_0 );
-		_mm_store_sd( &B[0+bs*3], a_0 );
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		a_0 = _mm_load_sd( &A[0+bs*0] );
-		c_0 = _mm_load_sd( &B[0+bs*0] );
-		a_0 = _mm_mul_sd( alpha_0, a_0 );
-		a_0 = _mm_add_sd( c_0, a_0 );
-		_mm_store_sd( &B[0+bs*0], a_0 );
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-void kernel_dgeset_4_lib4(int kmax, double alpha, double *A)
-	{
-
-	int k;
-
-	__m256d 
-		a0;
-
-	a0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		_mm256_store_pd( &A[0], a0 );
-		_mm256_store_pd( &A[4], a0 );
-		_mm256_store_pd( &A[8], a0 );
-		_mm256_store_pd( &A[12], a0 );
-
-		A += 16;
-
-		}	
-	for(; k<kmax; k++)
-		{
-
-		_mm256_store_pd( &A[0], a0 );
-
-		A += 4;
-
-		}
-	
-	}
-
-
-// A lower triangular
-void kernel_dtrset_4_lib4(int kmax, double alpha, double *A)
-	{
-
-	int k;
-
-	__m256d 
-		a0;
-
-	a0 = _mm256_broadcast_sd( &alpha );
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		_mm256_store_pd( &A[0], a0 );
-		_mm256_store_pd( &A[4], a0 );
-		_mm256_store_pd( &A[8], a0 );
-		_mm256_store_pd( &A[12], a0 );
-
-		A += 16;
-
-		}	
-	for(; k<kmax; k++)
-		{
-
-		_mm256_store_pd( &A[0], a0 );
-
-		A += 4;
-
-		}
-	
-	// final 4x4 triangle
-	_mm256_store_pd( &A[0], a0 );
-
-	_mm_store_sd( &A[5], _mm256_castpd256_pd128( a0 ) );
-	_mm_store_pd( &A[6], _mm256_castpd256_pd128( a0 ) );
-	
-	_mm_store_pd( &A[10], _mm256_castpd256_pd128( a0 ) );
-
-	_mm_store_sd( &A[15], _mm256_castpd256_pd128( a0 ) );
-
-	}
-
-
-
diff --git a/third_party/blasfeo/auxiliary/avx/kernel_dgetr_lib4.c b/third_party/blasfeo/auxiliary/avx/kernel_dgetr_lib4.c
deleted file mode 100644
index 29d095b..0000000
--- a/third_party/blasfeo/auxiliary/avx/kernel_dgetr_lib4.c
+++ /dev/null
@@ -1,490 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_4_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	__m256d
-		alph,
-		v0, v1, v2, v3,
-		v4, v5, v6, v7;
-	
-	alph = _mm256_broadcast_sd( &alpha );
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-
-	for( ; k<kmax-7; k+=8)
-		{
-
-		v0 = _mm256_load_pd( &A[0+bs*0] ); // 00 10 20 30
-		v1 = _mm256_load_pd( &A[0+bs*1] ); // 01 11 21 31
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 20 21
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 30 31
-		v2 = _mm256_load_pd( &A[0+bs*2] ); // 02 12 22 32
-		v3 = _mm256_load_pd( &A[0+bs*3] ); // 03 13 23 33
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 02 03 22 23
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 12 13 32 33
-		
-		A += bs*bs;
-
-		v0 = _mm256_permute2f128_pd( v4, v6, 0x20 ); // 00 01 02 03
-		v0 = _mm256_mul_pd( v0, alph );
-		_mm256_store_pd( &C[0+bs*0], v0 );
-		v2 = _mm256_permute2f128_pd( v4, v6, 0x31 ); // 20 21 22 23
-		v2 = _mm256_mul_pd( v2, alph );
-		_mm256_store_pd( &C[0+bs*2], v2 );
-		v1 = _mm256_permute2f128_pd( v5, v7, 0x20 ); // 10 11 12 13
-		v1 = _mm256_mul_pd( v1, alph );
-		_mm256_store_pd( &C[0+bs*1], v1 );
-		v3 = _mm256_permute2f128_pd( v5, v7, 0x31 ); // 30 31 32 33
-		v3 = _mm256_mul_pd( v3, alph );
-		_mm256_store_pd( &C[0+bs*3], v3 );
-
-		C += bs*sdc;
-
-		v0 = _mm256_load_pd( &A[0+bs*0] ); // 00 10 20 30
-		v1 = _mm256_load_pd( &A[0+bs*1] ); // 01 11 21 31
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 20 21
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 30 31
-		v2 = _mm256_load_pd( &A[0+bs*2] ); // 02 12 22 32
-		v3 = _mm256_load_pd( &A[0+bs*3] ); // 03 13 23 33
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 02 03 22 23
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 12 13 32 33
-		
-		A += bs*bs;
-
-		v0 = _mm256_permute2f128_pd( v4, v6, 0x20 ); // 00 01 02 03
-		v0 = _mm256_mul_pd( v0, alph );
-		_mm256_store_pd( &C[0+bs*0], v0 );
-		v2 = _mm256_permute2f128_pd( v4, v6, 0x31 ); // 20 21 22 23
-		v2 = _mm256_mul_pd( v2, alph );
-		_mm256_store_pd( &C[0+bs*2], v2 );
-		v1 = _mm256_permute2f128_pd( v5, v7, 0x20 ); // 10 11 12 13
-		v1 = _mm256_mul_pd( v1, alph );
-		_mm256_store_pd( &C[0+bs*1], v1 );
-		v3 = _mm256_permute2f128_pd( v5, v7, 0x31 ); // 30 31 32 33
-		v3 = _mm256_mul_pd( v3, alph );
-		_mm256_store_pd( &C[0+bs*3], v3 );
-
-		C += bs*sdc;
-
-		}
-
-	for( ; k<kmax-3; k+=4)
-		{
-
-		v0 = _mm256_load_pd( &A[0+bs*0] ); // 00 10 20 30
-		v1 = _mm256_load_pd( &A[0+bs*1] ); // 01 11 21 31
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 20 21
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 30 31
-		v2 = _mm256_load_pd( &A[0+bs*2] ); // 02 12 22 32
-		v3 = _mm256_load_pd( &A[0+bs*3] ); // 03 13 23 33
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 02 03 22 23
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 12 13 32 33
-		
-		A += bs*bs;
-
-		v0 = _mm256_permute2f128_pd( v4, v6, 0x20 ); // 00 01 02 03
-		v0 = _mm256_mul_pd( v0, alph );
-		_mm256_store_pd( &C[0+bs*0], v0 );
-		v2 = _mm256_permute2f128_pd( v4, v6, 0x31 ); // 20 21 22 23
-		v2 = _mm256_mul_pd( v2, alph );
-		_mm256_store_pd( &C[0+bs*2], v2 );
-		v1 = _mm256_permute2f128_pd( v5, v7, 0x20 ); // 10 11 12 13
-		v1 = _mm256_mul_pd( v1, alph );
-		_mm256_store_pd( &C[0+bs*1], v1 );
-		v3 = _mm256_permute2f128_pd( v5, v7, 0x31 ); // 30 31 32 33
-		v3 = _mm256_mul_pd( v3, alph );
-		_mm256_store_pd( &C[0+bs*3], v3 );
-
-		C += bs*sdc;
-
-		}
-
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-		C[0+bs*3] = alpha * A[3+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	if(tri==1)
-		{
-		// end 3x3 triangle
-		kna = (bs-(bs-kna+kmax)%bs)%bs;
-
-		if(kna==1)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
-			C[1+bs*(sdc+2)] = alpha * A[3+bs*1];
-			C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
-			}
-		else if(kna==2)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			C[1+bs*3] = alpha * A[3+bs*1];
-			C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
-			}
-		else
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			C[1+bs*3] = alpha * A[3+bs*1];
-			C[2+bs*3] = alpha * A[3+bs*2];
-			}
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_3_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 3-wide + end 2x2 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-		C[1+bs*1] = alpha * A[1+bs*1];
-		C[1+bs*2] = alpha * A[2+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-		C[2+bs*1] = alpha * A[1+bs*2];
-		C[2+bs*2] = alpha * A[2+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-		C[3+bs*1] = alpha * A[1+bs*3];
-		C[3+bs*2] = alpha * A[2+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	if(tri==1)
-		{
-		// end 2x2 triangle
-		kna = (bs-(bs-kna+kmax)%bs)%bs;
-
-		if(kna==1)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
-			}
-		else
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			}
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_2_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 2-wide + end 1x1 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-		C[1+bs*1] = alpha * A[1+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-		C[2+bs*1] = alpha * A[1+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-		C[3+bs*1] = alpha * A[1+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-	
-	if(tri==1)
-		{
-		// end 1x1 triangle
-		C[0+bs*1] = alpha * A[1+bs*0];
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_1_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 1-wide
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	}
-
-
-
-// transposed of general matrices, read across panels, write along panels
-void kernel_dgetr_4_0_lib4(int kmax, double *A, int sda, double *B)
-	{
-	const int ps = 4;
-	__m256d
-		v0, v1, v2, v3, v4, v5, v6, v7;
-	int k;
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		v0 = _mm256_load_pd( &A[0+ps*0] ); // 00 10 20 30
-		v1 = _mm256_load_pd( &A[0+ps*1] ); // 01 11 21 31
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 20 21
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 30 31
-		v2 = _mm256_load_pd( &A[0+ps*2] ); // 02 12 22 32
-		v3 = _mm256_load_pd( &A[0+ps*3] ); // 03 13 23 33
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 02 03 22 23
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 12 13 32 33
-
-		v0 = _mm256_permute2f128_pd( v4, v6, 0x20 ); // 00 01 02 03
-		_mm256_store_pd( &B[0+ps*0], v0 );
-		v2 = _mm256_permute2f128_pd( v4, v6, 0x31 ); // 20 21 22 23
-		_mm256_store_pd( &B[0+ps*2], v2 );
-		v1 = _mm256_permute2f128_pd( v5, v7, 0x20 ); // 10 11 12 13
-		_mm256_store_pd( &B[0+ps*1], v1 );
-		v3 = _mm256_permute2f128_pd( v5, v7, 0x31 ); // 30 31 32 33
-		_mm256_store_pd( &B[0+ps*3], v3 );
-
-		A += ps*sda;
-		B += ps*ps;
-		}
-	for( ; k<kmax; k++)
-		{
-		//
-		B[0+ps*0] = A[0+ps*0];
-		B[1+ps*0] = A[0+ps*1];
-		B[2+ps*0] = A[0+ps*2];
-		B[3+ps*0] = A[0+ps*3];
-
-		A += 1;
-		B += ps;
-		}
-	return;
-	}
-
diff --git a/third_party/blasfeo/auxiliary/avx2/Makefile b/third_party/blasfeo/auxiliary/avx2/Makefile
deleted file mode 100644
index 463ebf5..0000000
--- a/third_party/blasfeo/auxiliary/avx2/Makefile
+++ /dev/null
@@ -1,46 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../../Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-OBJS += kernel_dgetr_lib4.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
diff --git a/third_party/blasfeo/auxiliary/avx2/kernel_dgetr_lib4.c b/third_party/blasfeo/auxiliary/avx2/kernel_dgetr_lib4.c
deleted file mode 100644
index 14d00ef..0000000
--- a/third_party/blasfeo/auxiliary/avx2/kernel_dgetr_lib4.c
+++ /dev/null
@@ -1,756 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-
-
-
-
-// TODO tri !!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-void kernel_dgetr_8_lib4(int tri, int kmax, int kna, double alpha, double *A0, int sda, double *C, int sdc)
-	{
-
-	const int bs = 4;
-	
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	__m256d
-		alph, 
-		v0, v1, v2, v3, v4, v5, v6, v7,
-		v8, v9, va, vb, vc, vd, ve, vf;
-	
-	alph = _mm256_broadcast_sd( &alpha );
-	
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A0[0+bs*0];
-			C[0+bs*1] = alpha * A0[1+bs*0];
-			C[0+bs*2] = alpha * A0[2+bs*0];
-			C[0+bs*3] = alpha * A0[3+bs*0];
-
-			C[0+bs*4] = alpha * A1[0+bs*0];
-			C[0+bs*5] = alpha * A1[1+bs*0];
-			C[0+bs*6] = alpha * A1[2+bs*0];
-			C[0+bs*7] = alpha * A1[3+bs*0];
-
-			C  += 1;
-			A0 += bs;
-			A1 += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for(; k<kmax-7; k+=8)
-		{
-
-		v0 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[0+bs*0] ) ), _mm_load_pd( &A0[0+bs*2]) , 0x1 ); // 00 10 02 12
-		v1 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[0+bs*1] ) ), _mm_load_pd( &A0[0+bs*3]) , 0x1 ); // 01 11 03 13
-		v2 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[2+bs*0] ) ), _mm_load_pd( &A0[2+bs*2]) , 0x1 ); // 20 30 22 32
-		v3 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[2+bs*1] ) ), _mm_load_pd( &A0[2+bs*3]) , 0x1 ); // 21 31 23 33
-		
-		A0 += 4*bs;
-
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 02 03
-		v4 = _mm256_mul_pd( v4, alph );
-		_mm256_store_pd( &C[0+bs*0], v4 );
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 12 13
-		v5 = _mm256_mul_pd( v5, alph );
-		_mm256_store_pd( &C[0+bs*1], v5 );
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 20 21 22 23
-		v6 = _mm256_mul_pd( v6, alph );
-		_mm256_store_pd( &C[0+bs*2], v6 );
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 30 31 32 33
-		v7 = _mm256_mul_pd( v7, alph );
-		_mm256_store_pd( &C[0+bs*3], v7 );
-
-		v0 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[0+bs*0] ) ), _mm_load_pd( &A1[0+bs*2]) , 0x1 ); // 00 10 02 12
-		v1 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[0+bs*1] ) ), _mm_load_pd( &A1[0+bs*3]) , 0x1 ); // 01 11 03 13
-		v2 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[2+bs*0] ) ), _mm_load_pd( &A1[2+bs*2]) , 0x1 ); // 20 30 22 32
-		v3 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[2+bs*1] ) ), _mm_load_pd( &A1[2+bs*3]) , 0x1 ); // 21 31 23 33
-
-		A1 += 4*bs;
-
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 02 03
-		v4 = _mm256_mul_pd( v4, alph );
-		_mm256_store_pd( &C[0+bs*4], v4 );
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 12 13
-		v5 = _mm256_mul_pd( v5, alph );
-		_mm256_store_pd( &C[0+bs*5], v5 );
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 20 21 22 23
-		v6 = _mm256_mul_pd( v6, alph );
-		_mm256_store_pd( &C[0+bs*6], v6 );
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 30 31 32 33
-		v7 = _mm256_mul_pd( v7, alph );
-		_mm256_store_pd( &C[0+bs*7], v7 );
-
-		C += sdc*bs;
-
-		v0 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[0+bs*0] ) ), _mm_load_pd( &A0[0+bs*2]) , 0x1 ); // 00 10 02 12
-		v1 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[0+bs*1] ) ), _mm_load_pd( &A0[0+bs*3]) , 0x1 ); // 01 11 03 13
-		v2 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[2+bs*0] ) ), _mm_load_pd( &A0[2+bs*2]) , 0x1 ); // 20 30 22 32
-		v3 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[2+bs*1] ) ), _mm_load_pd( &A0[2+bs*3]) , 0x1 ); // 21 31 23 33
-		
-		A0 += 4*bs;
-
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 02 03
-		v4 = _mm256_mul_pd( v4, alph );
-		_mm256_store_pd( &C[0+bs*0], v4 );
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 12 13
-		v5 = _mm256_mul_pd( v5, alph );
-		_mm256_store_pd( &C[0+bs*1], v5 );
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 20 21 22 23
-		v6 = _mm256_mul_pd( v6, alph );
-		_mm256_store_pd( &C[0+bs*2], v6 );
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 30 31 32 33
-		v7 = _mm256_mul_pd( v7, alph );
-		_mm256_store_pd( &C[0+bs*3], v7 );
-
-		v0 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[0+bs*0] ) ), _mm_load_pd( &A1[0+bs*2]) , 0x1 ); // 00 10 02 12
-		v1 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[0+bs*1] ) ), _mm_load_pd( &A1[0+bs*3]) , 0x1 ); // 01 11 03 13
-		v2 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[2+bs*0] ) ), _mm_load_pd( &A1[2+bs*2]) , 0x1 ); // 20 30 22 32
-		v3 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[2+bs*1] ) ), _mm_load_pd( &A1[2+bs*3]) , 0x1 ); // 21 31 23 33
-
-		A1 += 4*bs;
-
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 02 03
-		v4 = _mm256_mul_pd( v4, alph );
-		_mm256_store_pd( &C[0+bs*4], v4 );
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 12 13
-		v5 = _mm256_mul_pd( v5, alph );
-		_mm256_store_pd( &C[0+bs*5], v5 );
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 20 21 22 23
-		v6 = _mm256_mul_pd( v6, alph );
-		_mm256_store_pd( &C[0+bs*6], v6 );
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 30 31 32 33
-		v7 = _mm256_mul_pd( v7, alph );
-		_mm256_store_pd( &C[0+bs*7], v7 );
-
-		C += sdc*bs;
-
-		}
-
-	for(; k<kmax-3; k+=4)
-		{
-
-		v0 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[0+bs*0] ) ), _mm_load_pd( &A0[0+bs*2]) , 0x1 ); // 00 10 02 12
-		v1 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[0+bs*1] ) ), _mm_load_pd( &A0[0+bs*3]) , 0x1 ); // 01 11 03 13
-		v2 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[2+bs*0] ) ), _mm_load_pd( &A0[2+bs*2]) , 0x1 ); // 20 30 22 32
-		v3 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A0[2+bs*1] ) ), _mm_load_pd( &A0[2+bs*3]) , 0x1 ); // 21 31 23 33
-		
-		A0 += 4*bs;
-
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 02 03
-		v4 = _mm256_mul_pd( v4, alph );
-		_mm256_store_pd( &C[0+bs*0], v4 );
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 12 13
-		v5 = _mm256_mul_pd( v5, alph );
-		_mm256_store_pd( &C[0+bs*1], v5 );
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 20 21 22 23
-		v6 = _mm256_mul_pd( v6, alph );
-		_mm256_store_pd( &C[0+bs*2], v6 );
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 30 31 32 33
-		v7 = _mm256_mul_pd( v7, alph );
-		_mm256_store_pd( &C[0+bs*3], v7 );
-
-		v0 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[0+bs*0] ) ), _mm_load_pd( &A1[0+bs*2]) , 0x1 ); // 00 10 02 12
-		v1 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[0+bs*1] ) ), _mm_load_pd( &A1[0+bs*3]) , 0x1 ); // 01 11 03 13
-		v2 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[2+bs*0] ) ), _mm_load_pd( &A1[2+bs*2]) , 0x1 ); // 20 30 22 32
-		v3 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A1[2+bs*1] ) ), _mm_load_pd( &A1[2+bs*3]) , 0x1 ); // 21 31 23 33
-
-		A1 += 4*bs;
-
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 02 03
-		v4 = _mm256_mul_pd( v4, alph );
-		_mm256_store_pd( &C[0+bs*4], v4 );
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 12 13
-		v5 = _mm256_mul_pd( v5, alph );
-		_mm256_store_pd( &C[0+bs*5], v5 );
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 20 21 22 23
-		v6 = _mm256_mul_pd( v6, alph );
-		_mm256_store_pd( &C[0+bs*6], v6 );
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 30 31 32 33
-		v7 = _mm256_mul_pd( v7, alph );
-		_mm256_store_pd( &C[0+bs*7], v7 );
-
-		C += sdc*bs;
-
-		}
-
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A0[0+bs*0];
-		C[0+bs*1] = alpha * A0[1+bs*0];
-		C[0+bs*2] = alpha * A0[2+bs*0];
-		C[0+bs*3] = alpha * A0[3+bs*0];
-
-		C[0+bs*4] = alpha * A1[0+bs*0];
-		C[0+bs*5] = alpha * A1[1+bs*0];
-		C[0+bs*6] = alpha * A1[2+bs*0];
-		C[0+bs*7] = alpha * A1[3+bs*0];
-
-		C  += 1;
-		A0 += bs;
-		A1 += bs;
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_4_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	__m256d
-		alph,
-		v0, v1, v2, v3,
-		v4, v5, v6, v7;
-	
-	alph = _mm256_broadcast_sd( &alpha );
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-
-	for( ; k<kmax-7; k+=8)
-		{
-
-#if 1
-
-		v0 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[0+bs*0] ) ), _mm_load_pd( &A[0+bs*2]) , 0x1 ); // 00 10 02 12
-		v1 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[0+bs*1] ) ), _mm_load_pd( &A[0+bs*3]) , 0x1 ); // 01 11 03 13
-		v2 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[2+bs*0] ) ), _mm_load_pd( &A[2+bs*2]) , 0x1 ); // 20 30 22 32
-		v3 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[2+bs*1] ) ), _mm_load_pd( &A[2+bs*3]) , 0x1 ); // 21 31 23 33
-		
-		A += 4*bs;
-
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 02 03
-		v4 = _mm256_mul_pd( v4, alph );
-		_mm256_store_pd( &C[0+bs*0], v4 );
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 12 13
-		v5 = _mm256_mul_pd( v5, alph );
-		_mm256_store_pd( &C[0+bs*1], v5 );
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 20 21 22 23
-		v6 = _mm256_mul_pd( v6, alph );
-		_mm256_store_pd( &C[0+bs*2], v6 );
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 30 31 32 33
-		v7 = _mm256_mul_pd( v7, alph );
-		_mm256_store_pd( &C[0+bs*3], v7 );
-
-		C += sdc*bs;
-
-		v0 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[0+bs*0] ) ), _mm_load_pd( &A[0+bs*2]) , 0x1 );
-		v1 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[0+bs*1] ) ), _mm_load_pd( &A[0+bs*3]) , 0x1 );
-		v2 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[2+bs*0] ) ), _mm_load_pd( &A[2+bs*2]) , 0x1 );
-		v3 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[2+bs*1] ) ), _mm_load_pd( &A[2+bs*3]) , 0x1 );
-		
-		A += 4*bs;
-
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 02 03
-		v4 = _mm256_mul_pd( v4, alph );
-		_mm256_store_pd( &C[0+bs*0], v4 );
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 12 13
-		v5 = _mm256_mul_pd( v5, alph );
-		_mm256_store_pd( &C[0+bs*1], v5 );
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 20 21 22 23
-		v6 = _mm256_mul_pd( v6, alph );
-		_mm256_store_pd( &C[0+bs*2], v6 );
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 30 31 32 33
-		v7 = _mm256_mul_pd( v7, alph );
-		_mm256_store_pd( &C[0+bs*3], v7 );
-
-		C += sdc*bs;
-
-#else // TODO alpha
-
-		v0 = _mm256_load_pd( &A[0+bs*0] ); // 00 10 20 30
-		v1 = _mm256_load_pd( &A[0+bs*1] ); // 01 11 21 31
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 20 21
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 30 31
-		v2 = _mm256_load_pd( &A[0+bs*2] ); // 02 12 22 32
-		v3 = _mm256_load_pd( &A[0+bs*3] ); // 03 13 23 33
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 02 03 22 23
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 12 13 32 33
-		
-		A += bs*bs;
-
-		v0 = _mm256_permute2f128_pd( v4, v6, 0x20 ); // 00 01 02 03
-		_mm256_store_pd( &C[0+bs*0], v0 );
-		v2 = _mm256_permute2f128_pd( v4, v6, 0x31 ); // 20 21 22 23
-		_mm256_store_pd( &C[0+bs*2], v2 );
-		v1 = _mm256_permute2f128_pd( v5, v7, 0x20 ); // 10 11 12 13
-		_mm256_store_pd( &C[0+bs*1], v1 );
-		v3 = _mm256_permute2f128_pd( v5, v7, 0x31 ); // 30 31 32 33
-		_mm256_store_pd( &C[0+bs*3], v3 );
-
-		C += bs*sdc;
-
-		v0 = _mm256_load_pd( &A[0+bs*0] ); // 00 10 20 30
-		v1 = _mm256_load_pd( &A[0+bs*1] ); // 01 11 21 31
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 20 21
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 30 31
-		v2 = _mm256_load_pd( &A[0+bs*2] ); // 02 12 22 32
-		v3 = _mm256_load_pd( &A[0+bs*3] ); // 03 13 23 33
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 02 03 22 23
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 12 13 32 33
-		
-		A += bs*bs;
-
-		v0 = _mm256_permute2f128_pd( v4, v6, 0x20 ); // 00 01 02 03
-		_mm256_store_pd( &C[0+bs*0], v0 );
-		v2 = _mm256_permute2f128_pd( v4, v6, 0x31 ); // 20 21 22 23
-		_mm256_store_pd( &C[0+bs*2], v2 );
-		v1 = _mm256_permute2f128_pd( v5, v7, 0x20 ); // 10 11 12 13
-		_mm256_store_pd( &C[0+bs*1], v1 );
-		v3 = _mm256_permute2f128_pd( v5, v7, 0x31 ); // 30 31 32 33
-		_mm256_store_pd( &C[0+bs*3], v3 );
-
-		C += bs*sdc;
-
-#endif
-
-		}
-
-	for( ; k<kmax-3; k+=4)
-		{
-
-#if 1
-
-		v0 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[0+bs*0] ) ), _mm_load_pd( &A[0+bs*2]) , 0x1 ); // 00 10 02 12
-		v1 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[0+bs*1] ) ), _mm_load_pd( &A[0+bs*3]) , 0x1 ); // 01 11 03 13
-		v2 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[2+bs*0] ) ), _mm_load_pd( &A[2+bs*2]) , 0x1 ); // 20 30 22 32
-		v3 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[2+bs*1] ) ), _mm_load_pd( &A[2+bs*3]) , 0x1 ); // 21 31 23 33
-		
-		A += 4*bs;
-
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 02 03
-		v4 = _mm256_mul_pd( v4, alph );
-		_mm256_store_pd( &C[0+bs*0], v4 );
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 12 13
-		v5 = _mm256_mul_pd( v5, alph );
-		_mm256_store_pd( &C[0+bs*1], v5 );
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 20 21 22 23
-		v6 = _mm256_mul_pd( v6, alph );
-		_mm256_store_pd( &C[0+bs*2], v6 );
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 30 31 32 33
-		v7 = _mm256_mul_pd( v7, alph );
-		_mm256_store_pd( &C[0+bs*3], v7 );
-
-		C += sdc*bs;
-
-#else
-
-		v0 = _mm256_load_pd( &A[0+bs*0] ); // 00 10 20 30
-		v1 = _mm256_load_pd( &A[0+bs*1] ); // 01 11 21 31
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 20 21
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 30 31
-		v2 = _mm256_load_pd( &A[0+bs*2] ); // 02 12 22 32
-		v3 = _mm256_load_pd( &A[0+bs*3] ); // 03 13 23 33
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 02 03 22 23
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 12 13 32 33
-		
-		A += bs*bs;
-
-		v0 = _mm256_permute2f128_pd( v4, v6, 0x20 ); // 00 01 02 03
-		_mm256_store_pd( &C[0+bs*0], v0 );
-		v2 = _mm256_permute2f128_pd( v4, v6, 0x31 ); // 20 21 22 23
-		_mm256_store_pd( &C[0+bs*2], v2 );
-		v1 = _mm256_permute2f128_pd( v5, v7, 0x20 ); // 10 11 12 13
-		_mm256_store_pd( &C[0+bs*1], v1 );
-		v3 = _mm256_permute2f128_pd( v5, v7, 0x31 ); // 30 31 32 33
-		_mm256_store_pd( &C[0+bs*3], v3 );
-
-		C += bs*sdc;
-
-#endif
-
-		}
-
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-		C[0+bs*3] = alpha * A[3+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	if(tri==1)
-		{
-		// end 3x3 triangle
-		kna = (bs-(bs-kna+kmax)%bs)%bs;
-
-		if(kna==1)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
-			C[1+bs*(sdc+2)] = alpha * A[3+bs*1];
-			C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
-			}
-		else if(kna==2)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			C[1+bs*3] = alpha * A[3+bs*1];
-			C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
-			}
-		else
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			C[1+bs*3] = alpha * A[3+bs*1];
-			C[2+bs*3] = alpha * A[3+bs*2];
-			}
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_3_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 3-wide + end 2x2 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-		C[1+bs*1] = alpha * A[1+bs*1];
-		C[1+bs*2] = alpha * A[2+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-		C[2+bs*1] = alpha * A[1+bs*2];
-		C[2+bs*2] = alpha * A[2+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-		C[3+bs*1] = alpha * A[1+bs*3];
-		C[3+bs*2] = alpha * A[2+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	if(tri==1)
-		{
-		// end 2x2 triangle
-		kna = (bs-(bs-kna+kmax)%bs)%bs;
-
-		if(kna==1)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
-			}
-		else
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			}
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_2_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 2-wide + end 1x1 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-		C[1+bs*1] = alpha * A[1+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-		C[2+bs*1] = alpha * A[1+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-		C[3+bs*1] = alpha * A[1+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-	
-	if(tri==1)
-		{
-		// end 1x1 triangle
-		C[0+bs*1] = alpha * A[1+bs*0];
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_1_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 1-wide
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	}
-
-
-
-// transposed of general matrices, read across panels, write along panels
-void kernel_dgetr_4_0_lib4(int kmax, double *A, int sda, double *B)
-	{
-	const int ps = 4;
-	__m256d
-		v0, v1, v2, v3, v4, v5, v6, v7;
-	int k;
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		v0 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[0+ps*0] ) ), _mm_load_pd( &A[0+ps*2]) , 0x1 ); // 00 10 02 12
-		v1 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[0+ps*1] ) ), _mm_load_pd( &A[0+ps*3]) , 0x1 ); // 01 11 03 13
-		v2 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[2+ps*0] ) ), _mm_load_pd( &A[2+ps*2]) , 0x1 ); // 20 30 22 32
-		v3 = _mm256_insertf128_pd( _mm256_castpd128_pd256( _mm_load_pd( &A[2+ps*1] ) ), _mm_load_pd( &A[2+ps*3]) , 0x1 ); // 21 31 23 33
-		
-		v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 02 03
-		_mm256_store_pd( &B[0+ps*0], v4 );
-		v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 12 13
-		_mm256_store_pd( &B[0+ps*1], v5 );
-		v6 = _mm256_unpacklo_pd( v2, v3 ); // 20 21 22 23
-		_mm256_store_pd( &B[0+ps*2], v6 );
-		v7 = _mm256_unpackhi_pd( v2, v3 ); // 30 31 32 33
-		_mm256_store_pd( &B[0+ps*3], v7 );
-
-		A += ps*sda;
-		B += ps*ps;
-		}
-	for( ; k<kmax; k++)
-		{
-		//
-		B[0+ps*0] = A[0+ps*0];
-		B[1+ps*0] = A[0+ps*1];
-		B[2+ps*0] = A[0+ps*2];
-		B[3+ps*0] = A[0+ps*3];
-
-		A += 1;
-		B += ps;
-		}
-	return;
-	}
-
diff --git a/third_party/blasfeo/auxiliary/c99/Makefile b/third_party/blasfeo/auxiliary/c99/Makefile
deleted file mode 100644
index 6e9ea7b..0000000
--- a/third_party/blasfeo/auxiliary/c99/Makefile
+++ /dev/null
@@ -1,77 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../../Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-OBJS += 
-OBJS += kernel_sgetr_lib4.o
-endif
-
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-OBJS += 
-OBJS += kernel_sgetr_lib4.o
-endif
-
-ifeq ($(TARGET), X64_INTEL_CORE)
-OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
-OBJS += kernel_sgetr_lib4.o
-endif
-
-ifeq ($(TARGET), X64_AMD_BULLDOZER)
-OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
-OBJS += kernel_sgetr_lib4.o
-endif
-
-ifeq ($(TARGET), ARMV8A_ARM_CORTEX_A57)
-OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
-OBJS += kernel_sgetr_lib4.o
-endif
-
-ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
-OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
-OBJS += kernel_sgetr_lib4.o
-endif
-
-ifeq ($(TARGET), GENERIC)
-OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
-OBJS += kernel_sgetr_lib4.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
diff --git a/third_party/blasfeo/auxiliary/c99/kernel_dgecp_lib4.c b/third_party/blasfeo/auxiliary/c99/kernel_dgecp_lib4.c
deleted file mode 100644
index e883072..0000000
--- a/third_party/blasfeo/auxiliary/c99/kernel_dgecp_lib4.c
+++ /dev/null
@@ -1,1261 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-// both A and B are aligned to 256-bit boundaries
-void kernel_dgecp_4_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		B[0+bs*0] = alpha*A[0+bs*0];
-		B[1+bs*0] = alpha*A[1+bs*0];
-		B[2+bs*0] = alpha*A[2+bs*0];
-		B[3+bs*0] = alpha*A[3+bs*0];
-
-		B[0+bs*1] = alpha*A[0+bs*1];
-		B[1+bs*1] = alpha*A[1+bs*1];
-		B[2+bs*1] = alpha*A[2+bs*1];
-		B[3+bs*1] = alpha*A[3+bs*1];
-
-		B[0+bs*2] = alpha*A[0+bs*2];
-		B[1+bs*2] = alpha*A[1+bs*2];
-		B[2+bs*2] = alpha*A[2+bs*2];
-		B[3+bs*2] = alpha*A[3+bs*2];
-
-		B[0+bs*3] = alpha*A[0+bs*3];
-		B[1+bs*3] = alpha*A[1+bs*3];
-		B[2+bs*3] = alpha*A[2+bs*3];
-		B[3+bs*3] = alpha*A[3+bs*3];
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] = alpha*A[0+bs*0];
-		B[1+bs*0] = alpha*A[1+bs*0];
-		B[2+bs*0] = alpha*A[2+bs*0];
-		B[3+bs*0] = alpha*A[3+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 3x3 triangle
-
-		B[1+bs*0] = alpha*A[1+bs*0];
-		B[2+bs*0] = alpha*A[2+bs*0];
-		B[3+bs*0] = alpha*A[3+bs*0];
-
-		B[2+bs*1] = alpha*A[2+bs*1];
-		B[3+bs*1] = alpha*A[3+bs*1];
-
-		B[3+bs*2] = alpha*A[3+bs*2];
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
-void kernel_dgecp_4_1_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] = alpha*A0[1+bs*0];
-		B[1+bs*0] = alpha*A0[2+bs*0];
-		B[2+bs*0] = alpha*A0[3+bs*0];
-		B[3+bs*0] = alpha*A1[0+bs*0];
-
-		B[0+bs*1] = alpha*A0[1+bs*1];
-		B[1+bs*1] = alpha*A0[2+bs*1];
-		B[2+bs*1] = alpha*A0[3+bs*1];
-		B[3+bs*1] = alpha*A1[0+bs*1];
-
-		B[0+bs*2] = alpha*A0[1+bs*2];
-		B[1+bs*2] = alpha*A0[2+bs*2];
-		B[2+bs*2] = alpha*A0[3+bs*2];
-		B[3+bs*2] = alpha*A1[0+bs*2];
-
-		B[0+bs*3] = alpha*A0[1+bs*3];
-		B[1+bs*3] = alpha*A0[2+bs*3];
-		B[2+bs*3] = alpha*A0[3+bs*3];
-		B[3+bs*3] = alpha*A1[0+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] = alpha*A0[1+bs*0];
-		B[1+bs*0] = alpha*A0[2+bs*0];
-		B[2+bs*0] = alpha*A0[3+bs*0];
-		B[3+bs*0] = alpha*A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 3x3 triangle
-
-		B[1+0*bs] = alpha*A0[2+0*bs];
-		B[2+0*bs] = alpha*A0[3+0*bs];
-		B[3+0*bs] = alpha*A1[0+0*bs];
-
-		B[2+1*bs] = alpha*A0[3+1*bs];
-		B[3+1*bs] = alpha*A1[0+1*bs];
-
-		B[3+2*bs] = alpha*A1[0+2*bs];
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_dgecp_4_2_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] = alpha*A0[2+bs*0];
-		B[1+bs*0] = alpha*A0[3+bs*0];
-		B[2+bs*0] = alpha*A1[0+bs*0];
-		B[3+bs*0] = alpha*A1[1+bs*0];
-
-		B[0+bs*1] = alpha*A0[2+bs*1];
-		B[1+bs*1] = alpha*A0[3+bs*1];
-		B[2+bs*1] = alpha*A1[0+bs*1];
-		B[3+bs*1] = alpha*A1[1+bs*1];
-
-		B[0+bs*2] = alpha*A0[2+bs*2];
-		B[1+bs*2] = alpha*A0[3+bs*2];
-		B[2+bs*2] = alpha*A1[0+bs*2];
-		B[3+bs*2] = alpha*A1[1+bs*2];
-
-		B[0+bs*3] = alpha*A0[2+bs*3];
-		B[1+bs*3] = alpha*A0[3+bs*3];
-		B[2+bs*3] = alpha*A1[0+bs*3];
-		B[3+bs*3] = alpha*A1[1+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] = alpha*A0[2+bs*0];
-		B[1+bs*0] = alpha*A0[3+bs*0];
-		B[2+bs*0] = alpha*A1[0+bs*0];
-		B[3+bs*0] = alpha*A1[1+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 3x3 triangle}
-
-		B[1+bs*0] = alpha*A0[3+bs*0];
-		B[2+bs*0] = alpha*A1[0+bs*0];
-		B[3+bs*0] = alpha*A1[1+bs*0];
-
-		B[2+bs*1] = alpha*A1[0+bs*1];
-		B[3+bs*1] = alpha*A1[1+bs*1];
-
-		B[3+bs*2] = alpha*A1[1+bs*2];
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_dgecp_4_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] = alpha*A0[3+bs*0];
-		B[1+bs*0] = alpha*A1[0+bs*0];
-		B[2+bs*0] = alpha*A1[1+bs*0];
-		B[3+bs*0] = alpha*A1[2+bs*0];
-
-		B[0+bs*1] = alpha*A0[3+bs*1];
-		B[1+bs*1] = alpha*A1[0+bs*1];
-		B[2+bs*1] = alpha*A1[1+bs*1];
-		B[3+bs*1] = alpha*A1[2+bs*1];
-
-		B[0+bs*2] = alpha*A0[3+bs*2];
-		B[1+bs*2] = alpha*A1[0+bs*2];
-		B[2+bs*2] = alpha*A1[1+bs*2];
-		B[3+bs*2] = alpha*A1[2+bs*2];
-
-		B[0+bs*3] = alpha*A0[3+bs*3];
-		B[1+bs*3] = alpha*A1[0+bs*3];
-		B[2+bs*3] = alpha*A1[1+bs*3];
-		B[3+bs*3] = alpha*A1[2+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] = alpha*A0[3+bs*0];
-		B[1+bs*0] = alpha*A1[0+bs*0];
-		B[2+bs*0] = alpha*A1[1+bs*0];
-		B[3+bs*0] = alpha*A1[2+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 3x3 triangle
-
-		B[1+bs*0] = alpha*A1[0+bs*0];
-		B[2+bs*0] = alpha*A1[1+bs*0];
-		B[3+bs*0] = alpha*A1[2+bs*0];
-
-		B[2+bs*1] = alpha*A1[1+bs*1];
-		B[3+bs*1] = alpha*A1[2+bs*1];
-
-		B[3+bs*2] = alpha*A1[2+bs*2];
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_dgecp_3_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 3-wide + end 2x2 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		B[0+bs*0] = alpha*A[0+bs*0];
-		B[1+bs*0] = alpha*A[1+bs*0];
-		B[2+bs*0] = alpha*A[2+bs*0];
-
-		B[0+bs*1] = alpha*A[0+bs*1];
-		B[1+bs*1] = alpha*A[1+bs*1];
-		B[2+bs*1] = alpha*A[2+bs*1];
-
-		B[0+bs*2] = alpha*A[0+bs*2];
-		B[1+bs*2] = alpha*A[1+bs*2];
-		B[2+bs*2] = alpha*A[2+bs*2];
-
-		B[0+bs*3] = alpha*A[0+bs*3];
-		B[1+bs*3] = alpha*A[1+bs*3];
-		B[2+bs*3] = alpha*A[2+bs*3];
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] = alpha*A[0+bs*0];
-		B[1+bs*0] = alpha*A[1+bs*0];
-		B[2+bs*0] = alpha*A[2+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 2x2 triangle
-
-		B[1+bs*0] = alpha*A[1+bs*0];
-		B[2+bs*0] = alpha*A[2+bs*0];
-
-		B[2+bs*1] = alpha*A[2+bs*1];
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_dgecp_3_2_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 3-wide + end 2x2 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] = alpha*A0[2+bs*0];
-		B[1+bs*0] = alpha*A0[3+bs*0];
-		B[2+bs*0] = alpha*A1[0+bs*0];
-
-		B[0+bs*1] = alpha*A0[2+bs*1];
-		B[1+bs*1] = alpha*A0[3+bs*1];
-		B[2+bs*1] = alpha*A1[0+bs*1];
-
-		B[0+bs*2] = alpha*A0[2+bs*2];
-		B[1+bs*2] = alpha*A0[3+bs*2];
-		B[2+bs*2] = alpha*A1[0+bs*2];
-
-		B[0+bs*3] = alpha*A0[2+bs*3];
-		B[1+bs*3] = alpha*A0[3+bs*3];
-		B[2+bs*3] = alpha*A1[0+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] = alpha*A0[2+bs*0];
-		B[1+bs*0] = alpha*A0[3+bs*0];
-		B[2+bs*0] = alpha*A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 2x2 triangle
-
-		B[1+bs*0] = alpha*A0[3+bs*0];
-		B[2+bs*0] = alpha*A1[0+bs*0];
-
-		B[2+bs*1] = alpha*A1[0+bs*1];
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_dgecp_3_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 3-wide + end 2x2 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] = alpha*A0[3+bs*0];
-		B[1+bs*0] = alpha*A1[0+bs*0];
-		B[2+bs*0] = alpha*A1[1+bs*0];
-
-		B[0+bs*1] = alpha*A0[3+bs*1];
-		B[1+bs*1] = alpha*A1[0+bs*1];
-		B[2+bs*1] = alpha*A1[1+bs*1];
-
-		B[0+bs*2] = alpha*A0[3+bs*2];
-		B[1+bs*2] = alpha*A1[0+bs*2];
-		B[2+bs*2] = alpha*A1[1+bs*2];
-
-		B[0+bs*3] = alpha*A0[3+bs*3];
-		B[1+bs*3] = alpha*A1[0+bs*3];
-		B[2+bs*3] = alpha*A1[1+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] = alpha*A0[3+bs*0];
-		B[1+bs*0] = alpha*A1[0+bs*0];
-		B[2+bs*0] = alpha*A1[1+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 2x2 triangle
-
-		B[1+bs*0] = alpha*A1[0+bs*0];
-		B[2+bs*0] = alpha*A1[1+bs*0];
-
-		B[2+bs*1] = alpha*A1[1+bs*1];
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_dgecp_2_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 2-wide + end 1x1 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		B[0+bs*0] = alpha*A[0+bs*0];
-		B[1+bs*0] = alpha*A[1+bs*0];
-
-		B[0+bs*1] = alpha*A[0+bs*1];
-		B[1+bs*1] = alpha*A[1+bs*1];
-
-		B[0+bs*2] = alpha*A[0+bs*2];
-		B[1+bs*2] = alpha*A[1+bs*2];
-
-		B[0+bs*3] = alpha*A[0+bs*3];
-		B[1+bs*3] = alpha*A[1+bs*3];
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] = alpha*A[0+bs*0];
-		B[1+bs*0] = alpha*A[1+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 1x1 triangle
-
-		B[1+bs*0] = alpha*A[1+bs*0];
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 128-bit boundaries, 3 elements of A must be skipped
-void kernel_dgecp_2_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 2-wide + end 1x1 triangle
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] = alpha*A0[3+bs*0];
-		B[1+bs*0] = alpha*A1[0+bs*0];
-
-		B[0+bs*1] = alpha*A0[3+bs*1];
-		B[1+bs*1] = alpha*A1[0+bs*1];
-
-		B[0+bs*2] = alpha*A0[3+bs*2];
-		B[1+bs*2] = alpha*A1[0+bs*2];
-
-		B[0+bs*3] = alpha*A0[3+bs*3];
-		B[1+bs*3] = alpha*A1[0+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] = alpha*A0[3+bs*0];
-		B[1+bs*0] = alpha*A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	if(tri==1)
-		{
-		// 1x1 triangle
-
-		B[1+bs*0] = alpha*A1[0+bs*0];
-
-		}
-
-	}
-
-
-
-// both A and B are aligned 64-bit boundaries
-void kernel_dgecp_1_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
-	{
-
-	if(tri==1)
-		{
-		// A and C are lower triangular
-		// kmax+1 1-wide
-
-		kmax += 1;
-		}
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		B[0+bs*0] = alpha*A[0+bs*0];
-
-		B[0+bs*1] = alpha*A[0+bs*1];
-
-		B[0+bs*2] = alpha*A[0+bs*2];
-
-		B[0+bs*3] = alpha*A[0+bs*3];
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] = alpha*A[0+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-
-// both A and B are aligned to 256-bit boundaries
-void kernel_dgead_4_0_lib4(int kmax, double alpha, double *A, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		B[0+bs*0] += alpha * A[0+bs*0];
-		B[1+bs*0] += alpha * A[1+bs*0];
-		B[2+bs*0] += alpha * A[2+bs*0];
-		B[3+bs*0] += alpha * A[3+bs*0];
-
-		B[0+bs*1] += alpha * A[0+bs*1];
-		B[1+bs*1] += alpha * A[1+bs*1];
-		B[2+bs*1] += alpha * A[2+bs*1];
-		B[3+bs*1] += alpha * A[3+bs*1];
-
-		B[0+bs*2] += alpha * A[0+bs*2];
-		B[1+bs*2] += alpha * A[1+bs*2];
-		B[2+bs*2] += alpha * A[2+bs*2];
-		B[3+bs*2] += alpha * A[3+bs*2];
-
-		B[0+bs*3] += alpha * A[0+bs*3];
-		B[1+bs*3] += alpha * A[1+bs*3];
-		B[2+bs*3] += alpha * A[2+bs*3];
-		B[3+bs*3] += alpha * A[3+bs*3];
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A[0+bs*0];
-		B[1+bs*0] += alpha * A[1+bs*0];
-		B[2+bs*0] += alpha * A[2+bs*0];
-		B[3+bs*0] += alpha * A[3+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
-void kernel_dgead_4_1_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] += alpha * A0[1+bs*0];
-		B[1+bs*0] += alpha * A0[2+bs*0];
-		B[2+bs*0] += alpha * A0[3+bs*0];
-		B[3+bs*0] += alpha * A1[0+bs*0];
-
-		B[0+bs*1] += alpha * A0[1+bs*1];
-		B[1+bs*1] += alpha * A0[2+bs*1];
-		B[2+bs*1] += alpha * A0[3+bs*1];
-		B[3+bs*1] += alpha * A1[0+bs*1];
-
-		B[0+bs*2] += alpha * A0[1+bs*2];
-		B[1+bs*2] += alpha * A0[2+bs*2];
-		B[2+bs*2] += alpha * A0[3+bs*2];
-		B[3+bs*2] += alpha * A1[0+bs*2];
-
-		B[0+bs*3] += alpha * A0[1+bs*3];
-		B[1+bs*3] += alpha * A0[2+bs*3];
-		B[2+bs*3] += alpha * A0[3+bs*3];
-		B[3+bs*3] += alpha * A1[0+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[1+bs*0];
-		B[1+bs*0] += alpha * A0[2+bs*0];
-		B[2+bs*0] += alpha * A0[3+bs*0];
-		B[3+bs*0] += alpha * A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_dgead_4_2_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] += alpha * A0[2+bs*0];
-		B[1+bs*0] += alpha * A0[3+bs*0];
-		B[2+bs*0] += alpha * A1[0+bs*0];
-		B[3+bs*0] += alpha * A1[1+bs*0];
-
-		B[0+bs*1] += alpha * A0[2+bs*1];
-		B[1+bs*1] += alpha * A0[3+bs*1];
-		B[2+bs*1] += alpha * A1[0+bs*1];
-		B[3+bs*1] += alpha * A1[1+bs*1];
-
-		B[0+bs*2] += alpha * A0[2+bs*2];
-		B[1+bs*2] += alpha * A0[3+bs*2];
-		B[2+bs*2] += alpha * A1[0+bs*2];
-		B[3+bs*2] += alpha * A1[1+bs*2];
-
-		B[0+bs*3] += alpha * A0[2+bs*3];
-		B[1+bs*3] += alpha * A0[3+bs*3];
-		B[2+bs*3] += alpha * A1[0+bs*3];
-		B[3+bs*3] += alpha * A1[1+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[2+bs*0];
-		B[1+bs*0] += alpha * A0[3+bs*0];
-		B[2+bs*0] += alpha * A1[0+bs*0];
-		B[3+bs*0] += alpha * A1[1+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_dgead_4_3_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] += alpha * A0[3+bs*0];
-		B[1+bs*0] += alpha * A1[0+bs*0];
-		B[2+bs*0] += alpha * A1[1+bs*0];
-		B[3+bs*0] += alpha * A1[2+bs*0];
-
-		B[0+bs*1] += alpha * A0[3+bs*1];
-		B[1+bs*1] += alpha * A1[0+bs*1];
-		B[2+bs*1] += alpha * A1[1+bs*1];
-		B[3+bs*1] += alpha * A1[2+bs*1];
-
-		B[0+bs*2] += alpha * A0[3+bs*2];
-		B[1+bs*2] += alpha * A1[0+bs*2];
-		B[2+bs*2] += alpha * A1[1+bs*2];
-		B[3+bs*2] += alpha * A1[2+bs*2];
-
-		B[0+bs*3] += alpha * A0[3+bs*3];
-		B[1+bs*3] += alpha * A1[0+bs*3];
-		B[2+bs*3] += alpha * A1[1+bs*3];
-		B[3+bs*3] += alpha * A1[2+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[3+bs*0];
-		B[1+bs*0] += alpha * A1[0+bs*0];
-		B[2+bs*0] += alpha * A1[1+bs*0];
-		B[3+bs*0] += alpha * A1[2+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_dgead_3_0_lib4(int kmax, double alpha, double *A, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		B[0+bs*0] += alpha * A[0+bs*0];
-		B[1+bs*0] += alpha * A[1+bs*0];
-		B[2+bs*0] += alpha * A[2+bs*0];
-
-		B[0+bs*1] += alpha * A[0+bs*1];
-		B[1+bs*1] += alpha * A[1+bs*1];
-		B[2+bs*1] += alpha * A[2+bs*1];
-
-		B[0+bs*2] += alpha * A[0+bs*2];
-		B[1+bs*2] += alpha * A[1+bs*2];
-		B[2+bs*2] += alpha * A[2+bs*2];
-
-		B[0+bs*3] += alpha * A[0+bs*3];
-		B[1+bs*3] += alpha * A[1+bs*3];
-		B[2+bs*3] += alpha * A[2+bs*3];
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A[0+bs*0];
-		B[1+bs*0] += alpha * A[1+bs*0];
-		B[2+bs*0] += alpha * A[2+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_dgead_3_2_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] += alpha * A0[2+bs*0];
-		B[1+bs*0] += alpha * A0[3+bs*0];
-		B[2+bs*0] += alpha * A1[0+bs*0];
-
-		B[0+bs*1] += alpha * A0[2+bs*1];
-		B[1+bs*1] += alpha * A0[3+bs*1];
-		B[2+bs*1] += alpha * A1[0+bs*1];
-
-		B[0+bs*2] += alpha * A0[2+bs*2];
-		B[1+bs*2] += alpha * A0[3+bs*2];
-		B[2+bs*2] += alpha * A1[0+bs*2];
-
-		B[0+bs*3] += alpha * A0[2+bs*3];
-		B[1+bs*3] += alpha * A0[3+bs*3];
-		B[2+bs*3] += alpha * A1[0+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[2+bs*0];
-		B[1+bs*0] += alpha * A0[3+bs*0];
-		B[2+bs*0] += alpha * A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_dgead_3_3_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] += alpha * A0[3+bs*0];
-		B[1+bs*0] += alpha * A1[0+bs*0];
-		B[2+bs*0] += alpha * A1[1+bs*0];
-
-		B[0+bs*1] += alpha * A0[3+bs*1];
-		B[1+bs*1] += alpha * A1[0+bs*1];
-		B[2+bs*1] += alpha * A1[1+bs*1];
-
-		B[0+bs*2] += alpha * A0[3+bs*2];
-		B[1+bs*2] += alpha * A1[0+bs*2];
-		B[2+bs*2] += alpha * A1[1+bs*2];
-
-		B[0+bs*3] += alpha * A0[3+bs*3];
-		B[1+bs*3] += alpha * A1[0+bs*3];
-		B[2+bs*3] += alpha * A1[1+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[3+bs*0];
-		B[1+bs*0] += alpha * A1[0+bs*0];
-		B[2+bs*0] += alpha * A1[1+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_dgead_2_0_lib4(int kmax, double alpha, double *A, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		B[0+bs*0] += alpha * A[0+bs*0];
-		B[1+bs*0] += alpha * A[1+bs*0];
-
-		B[0+bs*1] += alpha * A[0+bs*1];
-		B[1+bs*1] += alpha * A[1+bs*1];
-
-		B[0+bs*2] += alpha * A[0+bs*2];
-		B[1+bs*2] += alpha * A[1+bs*2];
-
-		B[0+bs*3] += alpha * A[0+bs*3];
-		B[1+bs*3] += alpha * A[1+bs*3];
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A[0+bs*0];
-		B[1+bs*0] += alpha * A[1+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 128-bit boundaries, 3 elements of A must be skipped
-void kernel_dgead_2_3_lib4(int kmax, double alpha, double *A0, int sda, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	double *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		B[0+bs*0] += alpha * A0[3+bs*0];
-		B[1+bs*0] += alpha * A1[0+bs*0];
-
-		B[0+bs*1] += alpha * A0[3+bs*1];
-		B[1+bs*1] += alpha * A1[0+bs*1];
-
-		B[0+bs*2] += alpha * A0[3+bs*2];
-		B[1+bs*2] += alpha * A1[0+bs*2];
-
-		B[0+bs*3] += alpha * A0[3+bs*3];
-		B[1+bs*3] += alpha * A1[0+bs*3];
-
-		A0 += 16;
-		A1 += 16;
-		B  += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[3+bs*0];
-		B[1+bs*0] += alpha * A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned 64-bit boundaries
-void kernel_dgead_1_0_lib4(int kmax, double alpha, double *A, double *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		B[0+bs*0] += alpha * A[0+bs*0];
-
-		B[0+bs*1] += alpha * A[0+bs*1];
-
-		B[0+bs*2] += alpha * A[0+bs*2];
-
-		B[0+bs*3] += alpha * A[0+bs*3];
-
-		A += 16;
-		B += 16;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A[0+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-
diff --git a/third_party/blasfeo/auxiliary/c99/kernel_dgetr_lib4.c b/third_party/blasfeo/auxiliary/c99/kernel_dgetr_lib4.c
deleted file mode 100644
index 7d62277..0000000
--- a/third_party/blasfeo/auxiliary/c99/kernel_dgetr_lib4.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_4_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-		C[0+bs*3] = alpha * A[3+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-		C[1+bs*1] = alpha * A[1+bs*1];
-		C[1+bs*2] = alpha * A[2+bs*1];
-		C[1+bs*3] = alpha * A[3+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-		C[2+bs*1] = alpha * A[1+bs*2];
-		C[2+bs*2] = alpha * A[2+bs*2];
-		C[2+bs*3] = alpha * A[3+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-		C[3+bs*1] = alpha * A[1+bs*3];
-		C[3+bs*2] = alpha * A[2+bs*3];
-		C[3+bs*3] = alpha * A[3+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-		C[0+bs*3] = alpha * A[3+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	if(tri==1)
-		{
-		// end 3x3 triangle
-		kna = (bs-(bs-kna+kmax)%bs)%bs;
-
-		if(kna==1)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
-			C[1+bs*(sdc+2)] = alpha * A[3+bs*1];
-			C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
-			}
-		else if(kna==2)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			C[1+bs*3] = alpha * A[3+bs*1];
-			C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
-			}
-		else
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			C[1+bs*3] = alpha * A[3+bs*1];
-			C[2+bs*3] = alpha * A[3+bs*2];
-			}
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_3_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 3-wide + end 2x2 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-		C[1+bs*1] = alpha * A[1+bs*1];
-		C[1+bs*2] = alpha * A[2+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-		C[2+bs*1] = alpha * A[1+bs*2];
-		C[2+bs*2] = alpha * A[2+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-		C[3+bs*1] = alpha * A[1+bs*3];
-		C[3+bs*2] = alpha * A[2+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	if(tri==1)
-		{
-		// end 2x2 triangle
-		kna = (bs-(bs-kna+kmax)%bs)%bs;
-
-		if(kna==1)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
-			}
-		else
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			}
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_2_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 2-wide + end 1x1 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-		C[1+bs*1] = alpha * A[1+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-		C[2+bs*1] = alpha * A[1+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-		C[3+bs*1] = alpha * A[1+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-	
-	if(tri==1)
-		{
-		// end 1x1 triangle
-		C[0+bs*1] = alpha * A[1+bs*0];
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_dgetr_1_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 1-wide
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	}
-
-
-
-// transposed of general matrices, read across panels, write along panels
-void kernel_dgetr_4_0_lib4(int kmax, double *A, int sda, double *B)
-	{
-	const int ps = 4;
-	int k;
-	for(k=0; k<kmax-3; k+=4)
-		{
-		//
-		B[0+ps*0] = A[0+ps*0];
-		B[0+ps*1] = A[1+ps*0];
-		B[0+ps*2] = A[2+ps*0];
-		B[0+ps*3] = A[3+ps*0];
-		//
-		B[1+ps*0] = A[0+ps*1];
-		B[1+ps*1] = A[1+ps*1];
-		B[1+ps*2] = A[2+ps*1];
-		B[1+ps*3] = A[3+ps*1];
-		//
-		B[2+ps*0] = A[0+ps*2];
-		B[2+ps*1] = A[1+ps*2];
-		B[2+ps*2] = A[2+ps*2];
-		B[2+ps*3] = A[3+ps*2];
-		//
-		B[3+ps*0] = A[0+ps*3];
-		B[3+ps*1] = A[1+ps*3];
-		B[3+ps*2] = A[2+ps*3];
-		B[3+ps*3] = A[3+ps*3];
-
-		A += ps*sda;
-		B += ps*ps;
-		}
-	for( ; k<kmax; k++)
-		{
-		//
-		B[0+ps*0] = A[0+ps*0];
-		B[1+ps*0] = A[0+ps*1];
-		B[2+ps*0] = A[0+ps*2];
-		B[3+ps*0] = A[0+ps*3];
-
-		A += 1;
-		B += ps;
-		}
-	return;
-	}
-
diff --git a/third_party/blasfeo/auxiliary/c99/kernel_sgetr_lib4.c b/third_party/blasfeo/auxiliary/c99/kernel_sgetr_lib4.c
deleted file mode 100644
index 4cf6fa2..0000000
--- a/third_party/blasfeo/auxiliary/c99/kernel_sgetr_lib4.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_sgetr_4_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 4-wide + end 3x3 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-		C[0+bs*3] = alpha * A[3+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-		C[1+bs*1] = alpha * A[1+bs*1];
-		C[1+bs*2] = alpha * A[2+bs*1];
-		C[1+bs*3] = alpha * A[3+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-		C[2+bs*1] = alpha * A[1+bs*2];
-		C[2+bs*2] = alpha * A[2+bs*2];
-		C[2+bs*3] = alpha * A[3+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-		C[3+bs*1] = alpha * A[1+bs*3];
-		C[3+bs*2] = alpha * A[2+bs*3];
-		C[3+bs*3] = alpha * A[3+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-		C[0+bs*3] = alpha * A[3+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	if(tri==1)
-		{
-		// end 3x3 triangle
-		kna = (bs-(bs-kna+kmax)%bs)%bs;
-
-		if(kna==1)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
-			C[1+bs*(sdc+2)] = alpha * A[3+bs*1];
-			C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
-			}
-		else if(kna==2)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			C[1+bs*3] = alpha * A[3+bs*1];
-			C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
-			}
-		else
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[0+bs*3] = alpha * A[3+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			C[1+bs*3] = alpha * A[3+bs*1];
-			C[2+bs*3] = alpha * A[3+bs*2];
-			}
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_sgetr_3_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 3-wide + end 2x2 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-		C[1+bs*1] = alpha * A[1+bs*1];
-		C[1+bs*2] = alpha * A[2+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-		C[2+bs*1] = alpha * A[1+bs*2];
-		C[2+bs*2] = alpha * A[2+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-		C[3+bs*1] = alpha * A[1+bs*3];
-		C[3+bs*2] = alpha * A[2+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-		C[0+bs*2] = alpha * A[2+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	if(tri==1)
-		{
-		// end 2x2 triangle
-		kna = (bs-(bs-kna+kmax)%bs)%bs;
-
-		if(kna==1)
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
-			}
-		else
-			{
-			C[0+bs*1] = alpha * A[1+bs*0];
-			C[0+bs*2] = alpha * A[2+bs*0];
-			C[1+bs*2] = alpha * A[2+bs*1];
-			}
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_sgetr_2_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 2-wide + end 1x1 triangle
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-			C[0+bs*1] = alpha * A[1+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-		C[1+bs*1] = alpha * A[1+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-		C[2+bs*1] = alpha * A[1+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-		C[3+bs*1] = alpha * A[1+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-		C[0+bs*1] = alpha * A[1+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-	
-	if(tri==1)
-		{
-		// end 1x1 triangle
-		C[0+bs*1] = alpha * A[1+bs*0];
-		}
-
-	}
-
-
-
-// transposed of general matrices, read along panels, write across panels
-void kernel_sgetr_1_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc)
-	{
-
-	if(tri==1)
-		{
-		// A is lower triangular, C is upper triangular
-		// kmax+1 1-wide
-
-		kmax += 1;
-		}
-
-	const int bs = 4;
-	
-	int k;
-
-	k = 0;
-
-	if(kmax<kna)
-		goto cleanup_loop;
-
-	if(kna>0)
-		{
-		for( ; k<kna; k++)
-			{
-			C[0+bs*0] = alpha * A[0+bs*0];
-
-			C += 1;
-			A += bs;
-			}
-		C += bs*(sdc-1);
-		}
-	
-	for( ; k<kmax-3; k+=4)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-
-		C[1+bs*0] = alpha * A[0+bs*1];
-
-		C[2+bs*0] = alpha * A[0+bs*2];
-
-		C[3+bs*0] = alpha * A[0+bs*3];
-
-		C += bs*sdc;
-		A += bs*bs;
-		}
-	
-	cleanup_loop:
-
-	for( ; k<kmax; k++)
-		{
-		C[0+bs*0] = alpha * A[0+bs*0];
-
-		C += 1;
-		A += bs;
-		}
-
-	}
-
-
-
-
diff --git a/third_party/blasfeo/auxiliary/d_aux_ext_dep_lib.c b/third_party/blasfeo/auxiliary/d_aux_ext_dep_lib.c
deleted file mode 100644
index c12da10..0000000
--- a/third_party/blasfeo/auxiliary/d_aux_ext_dep_lib.c
+++ /dev/null
@@ -1,632 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#if 0
-#include <malloc.h>
-#endif
-
-#include "../include/blasfeo_common.h"
-
-
-
-#if ! defined(OS_WINDOWS)
-int posix_memalign(void **memptr, size_t alignment, size_t size);
-#endif
-
-
-
-/* creates a zero matrix */
-void d_zeros(double **pA, int row, int col)
-	{
-	*pA = malloc((row*col)*sizeof(double));
-	double *A = *pA;
-	int i;
-	for(i=0; i<row*col; i++) A[i] = 0.0;
-	}
-
-
-
-/* creates a zero matrix aligned to a cache line */
-void d_zeros_align(double **pA, int row, int col)
-	{
-#if defined(OS_WINDOWS)
-	*pA = (double *) _aligned_malloc( (row*col)*sizeof(double), 64 );
-#else
-	void *temp;
-	int err = posix_memalign(&temp, 64, (row*col)*sizeof(double));
-	if(err!=0)
-		{
-		printf("Memory allocation error");
-		exit(1);
-		}
-	*pA = temp;
-#endif
-	double *A = *pA;
-	int i;
-	for(i=0; i<row*col; i++) A[i] = 0.0;
-	}
-
-
-
-/* frees matrix */
-void d_free(double *pA)
-	{
-	free( pA );
-	}
-
-
-
-/* frees aligned matrix */
-void d_free_align(double *pA)
-	{
-#if defined(OS_WINDOWS)
-	_aligned_free( pA );
-#else
-	free( pA );
-#endif
-	}
-
-
-
-/* prints a matrix in column-major format */
-void d_print_mat(int m, int n, double *A, int lda)
-	{
-	int i, j;
-	for(i=0; i<m; i++)
-		{
-		for(j=0; j<n; j++)
-			{
-			printf("%9.5f ", A[i+lda*j]);
-			}
-		printf("\n");
-		}
-	printf("\n");
-	}	
-
-
-
-/* prints the transposed of a matrix in column-major format */
-void d_print_tran_mat(int row, int col, double *A, int lda)
-	{
-	int i, j;
-	for(j=0; j<col; j++)
-		{
-		for(i=0; i<row; i++)
-			{
-			printf("%9.5f ", A[i+lda*j]);
-			}
-		printf("\n");
-		}
-	printf("\n");
-	}	
-
-
-
-/* prints a matrix in column-major format */
-void d_print_to_file_mat(FILE *file, int row, int col, double *A, int lda)
-	{
-	int i, j;
-	for(i=0; i<row; i++)
-		{
-		for(j=0; j<col; j++)
-			{
-			fprintf(file, "%9.5f ", A[i+lda*j]);
-			}
-		fprintf(file, "\n");
-		}
-	fprintf(file, "\n");
-	}	
-
-
-
-/* prints the transposed of a matrix in column-major format */
-void d_print_tran_to_file_mat(FILE *file, int row, int col, double *A, int lda)
-	{
-	int i, j;
-	for(j=0; j<col; j++)
-		{
-		for(i=0; i<row; i++)
-			{
-			fprintf(file, "%9.5f ", A[i+lda*j]);
-			}
-		fprintf(file, "\n");
-		}
-	fprintf(file, "\n");
-	}	
-
-
-
-/* prints a matrix in column-major format (exponential notation) */
-void d_print_e_mat(int m, int n, double *A, int lda)
-	{
-	int i, j;
-	for(i=0; i<m; i++)
-		{
-		for(j=0; j<n; j++)
-			{
-			printf("%1.15e\t", A[i+lda*j]);
-			}
-		printf("\n");
-		}
-	printf("\n");
-	}	
-
-
-
-/* prints the transposed of a matrix in column-major format (exponential notation) */
-void d_print_e_tran_mat(int row, int col, double *A, int lda)
-	{
-	int i, j;
-	for(j=0; j<col; j++)
-		{
-		for(i=0; i<row; i++)
-			{
-			printf("%e\t", A[i+lda*j]);
-			}
-		printf("\n");
-		}
-	printf("\n");
-	}	
-
-
-
-/****************************
-* new interface
-****************************/
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-#include "../include/blasfeo_block_size.h"
-
-
-
-// create a matrix structure for a matrix of size m*n by dynamically allocating the memory
-void d_allocate_strmat(int m, int n, struct d_strmat *sA)
-	{
-	const int bs = D_PS;
-	int nc = D_NC;
-	int al = bs*nc;
-	sA->m = m;
-	sA->n = n;
-	int pm = (m+bs-1)/bs*bs;
-	int cn = (n+nc-1)/nc*nc;
-	sA->pm = pm;
-	sA->cn = cn;
-	d_zeros_align(&(sA->pA), sA->pm, sA->cn);
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	d_zeros_align(&(sA->dA), tmp, 1);
-	sA->use_dA = 0;
-	sA->memory_size = (pm*cn+tmp)*sizeof(double);
-	return;
-	}
-
-
-
-// free memory of a matrix structure
-void d_free_strmat(struct d_strmat *sA)
-	{
-	d_free_align(sA->pA);
-	d_free_align(sA->dA);
-	return;
-	}
-
-
-
-// create a vector structure for a vector of size m by dynamically allocating the memory
-void d_allocate_strvec(int m, struct d_strvec *sa)
-	{
-	const int bs = D_PS;
-//	int nc = D_NC;
-//	int al = bs*nc;
-	sa->m = m;
-	int pm = (m+bs-1)/bs*bs;
-	sa->pm = pm;
-	d_zeros_align(&(sa->pa), sa->pm, 1);
-	sa->memory_size = pm*sizeof(double);
-	return;
-	}
-
-
-
-// free memory of a matrix structure
-void d_free_strvec(struct d_strvec *sa)
-	{
-	d_free_align(sa->pa);
-	return;
-	}
-
-
-
-// print a matrix structure
-void d_print_strmat(int m, int n, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = D_PS;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int ii, i, j, tmp;
-	ii = 0;
-	if(ai%bs>0)
-		{
-		tmp = bs-ai%bs;
-		tmp = m<tmp ? m : tmp;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%9.5f ", pA[i+bs*j]);
-				}
-			printf("\n");
-			}
-		pA += tmp + bs*(sda-1);
-		m -= tmp;
-		}
-	for( ; ii<m-(bs-1); ii+=bs)
-		{
-		for(i=0; i<bs; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%9.5f ", pA[i+bs*j+sda*ii]);
-				}
-			printf("\n");
-			}
-		}
-	if(ii<m)
-		{
-		tmp = m-ii;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%9.5f ", pA[i+bs*j+sda*ii]);
-				}
-			printf("\n");
-			}
-		}
-	printf("\n");
-	return;
-	}
-
-
-
-// print a vector structure
-void d_print_strvec(int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_mat(m, 1, pa, m);
-	return;
-	}
-
-
-
-// print the transposed of a vector structure
-void d_print_tran_strvec(int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_mat(1, m, pa, 1);
-	return;
-	}
-
-
-
-// print a matrix structure
-void d_print_to_file_strmat(FILE * file, int m, int n, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = D_PS;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int ii, i, j, tmp;
-	ii = 0;
-	if(ai%bs>0)
-		{
-		tmp = bs-ai%bs;
-		tmp = m<tmp ? m : tmp;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				fprintf(file, "%9.5f ", pA[i+bs*j]);
-				}
-			fprintf(file, "\n");
-			}
-		pA += tmp + bs*(sda-1);
-		m -= tmp;
-		}
-	for( ; ii<m-(bs-1); ii+=bs)
-		{
-		for(i=0; i<bs; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				fprintf(file, "%9.5f ", pA[i+bs*j+sda*ii]);
-				}
-			fprintf(file, "\n");
-			}
-		}
-	if(ii<m)
-		{
-		tmp = m-ii;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				fprintf(file, "%9.5f ", pA[i+bs*j+sda*ii]);
-				}
-			fprintf(file, "\n");
-			}
-		}
-	fprintf(file, "\n");
-	return;
-	}
-
-
-
-// print a vector structure
-void d_print_to_file_strvec(FILE * file, int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_to_file_mat(file, m, 1, pa, m);
-	return;
-	}
-
-
-
-// print the transposed of a vector structure
-void d_print_tran_to_file_strvec(FILE * file, int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_to_file_mat(file, 1, m, pa, 1);
-	return;
-	}
-
-
-
-// print a matrix structure
-void d_print_e_strmat(int m, int n, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = D_PS;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int ii, i, j, tmp;
-	ii = 0;
-	if(ai%bs>0)
-		{
-		tmp = bs-ai%bs;
-		tmp = m<tmp ? m : tmp;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%e\t", pA[i+bs*j]);
-				}
-			printf("\n");
-			}
-		pA += tmp + bs*(sda-1);
-		m -= tmp;
-		}
-	for( ; ii<m-(bs-1); ii+=bs)
-		{
-		for(i=0; i<bs; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%e\t", pA[i+bs*j+sda*ii]);
-				}
-			printf("\n");
-			}
-		}
-	if(ii<m)
-		{
-		tmp = m-ii;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%e\t", pA[i+bs*j+sda*ii]);
-				}
-			printf("\n");
-			}
-		}
-	printf("\n");
-	return;
-	}
-
-
-
-// print a vector structure
-void d_print_e_strvec(int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_e_mat(m, 1, pa, m);
-	return;
-	}
-
-
-
-// print the transposed of a vector structure
-void d_print_e_tran_strvec(int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_e_mat(1, m, pa, 1);
-	return;
-	}
-
-
-
-#elif defined(LA_BLAS) | defined(LA_REFERENCE)
-
-
-
-// create a matrix structure for a matrix of size m*n
-void d_allocate_strmat(int m, int n, struct d_strmat *sA)
-	{
-	sA->m = m;
-	sA->n = n;
-	d_zeros(&(sA->pA), sA->m, sA->n);
-	int tmp = m<n ? m : n; // al(min(m,n)) // XXX max ???
-	d_zeros(&(sA->dA), tmp, 1);
-	sA->memory_size = (m*n+tmp)*sizeof(double);
-	return;
-	}
-
-
-
-// free memory of a matrix structure
-void d_free_strmat(struct d_strmat *sA)
-	{
-	free(sA->pA);
-	free(sA->dA);
-	return;
-	}
-
-
-
-// create a vector structure for a vector of size m
-void d_allocate_strvec(int m, struct d_strvec *sa)
-	{
-	sa->m = m;
-	d_zeros(&(sa->pa), sa->m, 1);
-	sa->memory_size = m*sizeof(double);
-	return;
-	}
-
-
-
-// free memory of a vector structure
-void d_free_strvec(struct d_strvec *sa)
-	{
-	free(sa->pa);
-	return;
-	}
-
-
-
-// print a matrix structure
-void d_print_strmat(int m, int n, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	d_print_mat(m, n, pA, lda);
-	return;
-	}
-
-
-
-// print a vector structure
-void d_print_strvec(int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_mat(m, 1, pa, m);
-	return;
-	}
-
-
-
-// print and transpose a vector structure
-void d_print_tran_strvec(int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_mat(1, m, pa, 1);
-	return;
-	}
-
-
-
-// print a matrix structure
-void d_print_to_file_strmat(FILE *file, int m, int n, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	d_print_to_file_mat(file, m, n, pA, lda);
-	return;
-	}
-
-
-
-// print a vector structure
-void d_print_to_file_strvec(FILE *file, int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_to_file_mat(file, m, 1, pa, m);
-	return;
-	}
-
-
-
-// print and transpose a vector structure
-void d_print_to_file_tran_strvec(FILE *file, int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_to_file_mat(file, 1, m, pa, 1);
-	return;
-	}
-
-
-
-// print a matrix structure
-void d_print_e_strmat(int m, int n, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	d_print_e_mat(m, n, pA, lda);
-	return;
-	}
-
-
-
-// print a vector structure
-void d_print_e_strvec(int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_e_mat(m, 1, pa, m);
-	return;
-	}
-
-
-
-// print and transpose a vector structure
-void d_print_e_tran_strvec(int m, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	d_print_e_mat(1, m, pa, 1);
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
diff --git a/third_party/blasfeo/auxiliary/d_aux_lib.c b/third_party/blasfeo/auxiliary/d_aux_lib.c
deleted file mode 100644
index 6f1f5d1..0000000
--- a/third_party/blasfeo/auxiliary/d_aux_lib.c
+++ /dev/null
@@ -1,982 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#include "../include/blasfeo_common.h"
-
-
-
-#if defined(LA_REFERENCE) | defined(LA_BLAS)
-
-
-
-// return memory size (in bytes) needed for a strmat
-int d_size_strmat(int m, int n)
-	{
-	int tmp = m<n ? m : n; // al(min(m,n)) // XXX max ???
-	int size = (m*n+tmp)*sizeof(double);
-	return size;
-	}
-
-
-
-// return memory size (in bytes) needed for the diagonal of a strmat
-int d_size_diag_strmat(int m, int n)
-	{
-	int size = 0;
-	int tmp = m<n ? m : n; // al(min(m,n)) // XXX max ???
-	size = tmp*sizeof(double);
-	return size;
-	}
-
-
-
-// create a matrix structure for a matrix of size m*n by using memory passed by a pointer
-void d_create_strmat(int m, int n, struct d_strmat *sA, void *memory)
-	{
-	sA->m = m;
-	sA->n = n;
-	double *ptr = (double *) memory;
-	sA->pA = ptr;
-	ptr += m*n;
-	int tmp = m<n ? m : n; // al(min(m,n)) // XXX max ???
-	sA->dA = ptr;
-	ptr += tmp;
-	sA->use_dA = 0;
-	sA->memory_size = (m*n+tmp)*sizeof(double);
-	return;
-	}
-
-
-
-// return memory size (in bytes) needed for a strvec
-int d_size_strvec(int m)
-	{
-	int size = m*sizeof(double);
-	return size;
-	}
-
-
-
-// create a matrix structure for a matrix of size m*n by using memory passed by a pointer
-void d_create_strvec(int m, struct d_strvec *sa, void *memory)
-	{
-	sa->m = m;
-	double *ptr = (double *) memory;
-	sa->pa = ptr;
-//	ptr += m * n;
-	sa->memory_size = m*sizeof(double);
-	return;
-	}
-
-
-
-// convert a matrix into a matrix structure
-void d_cvt_mat2strmat(int m, int n, double *A, int lda, struct d_strmat *sA, int ai, int aj)
-	{
-	int ii, jj;
-	int lda2 = sA->m;
-	double *pA = sA->pA + ai + aj*lda2;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pA[ii+0+jj*lda2] = A[ii+0+jj*lda];
-			pA[ii+1+jj*lda2] = A[ii+1+jj*lda];
-			pA[ii+2+jj*lda2] = A[ii+2+jj*lda];
-			pA[ii+3+jj*lda2] = A[ii+3+jj*lda];
-			}
-		for(; ii<m; ii++)
-			{
-			pA[ii+0+jj*lda2] = A[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// convert and transpose a matrix into a matrix structure
-void d_cvt_tran_mat2strmat(int m, int n, double *A, int lda, struct d_strmat *sA, int ai, int aj)
-	{
-	int ii, jj;
-	int lda2 = sA->m;
-	double *pA = sA->pA + ai + aj*lda2;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pA[jj+(ii+0)*lda2] = A[ii+0+jj*lda];
-			pA[jj+(ii+1)*lda2] = A[ii+1+jj*lda];
-			pA[jj+(ii+2)*lda2] = A[ii+2+jj*lda];
-			pA[jj+(ii+3)*lda2] = A[ii+3+jj*lda];
-			}
-		for(; ii<m; ii++)
-			{
-			pA[jj+(ii+0)*lda2] = A[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// convert a vector into a vector structure
-void d_cvt_vec2strvec(int m, double *a, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		pa[ii] = a[ii];
-	return;
-	}
-
-
-
-// convert a matrix structure into a matrix
-void d_cvt_strmat2mat(int m, int n, struct d_strmat *sA, int ai, int aj, double *A, int lda)
-	{
-	int ii, jj;
-	int lda2 = sA->m;
-	double *pA = sA->pA + ai + aj*lda2;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			A[ii+0+jj*lda] = pA[ii+0+jj*lda2];
-			A[ii+1+jj*lda] = pA[ii+1+jj*lda2];
-			A[ii+2+jj*lda] = pA[ii+2+jj*lda2];
-			A[ii+3+jj*lda] = pA[ii+3+jj*lda2];
-			}
-		for(; ii<m; ii++)
-			{
-			A[ii+0+jj*lda] = pA[ii+0+jj*lda2];
-			}
-		}
-	return;
-	}
-
-
-
-// convert and transpose a matrix structure into a matrix
-void d_cvt_tran_strmat2mat(int m, int n, struct d_strmat *sA, int ai, int aj, double *A, int lda)
-	{
-	int ii, jj;
-	int lda2 = sA->m;
-	double *pA = sA->pA + ai + aj*lda2;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			A[jj+(ii+0)*lda] = pA[ii+0+jj*lda2];
-			A[jj+(ii+1)*lda] = pA[ii+1+jj*lda2];
-			A[jj+(ii+2)*lda] = pA[ii+2+jj*lda2];
-			A[jj+(ii+3)*lda] = pA[ii+3+jj*lda2];
-			}
-		for(; ii<m; ii++)
-			{
-			A[jj+(ii+0)*lda] = pA[ii+0+jj*lda2];
-			}
-		}
-	return;
-	}
-
-
-
-// convert a vector structure into a vector
-void d_cvt_strvec2vec(int m, struct d_strvec *sa, int ai, double *a)
-	{
-	double *pa = sa->pa + ai;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		a[ii] = pa[ii];
-	return;
-	}
-
-
-
-// cast a matrix into a matrix structure
-void d_cast_mat2strmat(double *A, struct d_strmat *sA)
-	{
-	sA->pA = A;
-	return;
-	}
-
-
-
-// cast a matrix into the diagonal of a matrix structure
-void d_cast_diag_mat2strmat(double *dA, struct d_strmat *sA)
-	{
-	sA->dA = dA;
-	return;
-	}
-
-
-
-// cast a vector into a vector structure
-void d_cast_vec2vecmat(double *a, struct d_strvec *sa)
-	{
-	sa->pa = a;
-	return;
-	}
-
-
-
-// insert element into strmat
-void dgein1_libstr(double a, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	pA[0] = a;
-	return;
-	}
-
-
-
-// extract element from strmat
-double dgeex1_libstr(struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	return pA[0];
-	}
-
-
-
-// insert element into strvec
-void dvecin1_libstr(double a, struct d_strvec *sx, int xi)
-	{
-	double *x = sx->pa + xi;
-	x[0] = a;
-	return;
-	}
-
-
-
-// extract element from strvec
-double dvecex1_libstr(struct d_strvec *sx, int xi)
-	{
-	double *x = sx->pa + xi;
-	return x[0];
-	}
-
-
-
-// set all elements of a strmat to a value
-void dgese_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		for(ii=0; ii<m; ii++)
-			{
-			pA[ii+lda*jj] = alpha;
-			}
-		}
-	return;
-	}
-
-
-
-// set all elements of a strvec to a value
-void dvecse_libstr(int m, double alpha, struct d_strvec *sx, int xi)
-	{
-	double *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		x[ii] = alpha;
-	return;
-	}
-
-
-
-// insert a vector into diagonal
-void ddiain_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	double *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii*(lda+1)] = alpha*x[ii];
-	return;
-	}
-
-
-
-// add scalar to diagonal
-void ddiare_libstr(int kmax, double alpha, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii*(lda+1)] += alpha;
-	return;
-	}
-
-
-
-// extract a row into a vector
-void drowex_libstr(int kmax, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	double *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		x[ii] = alpha*pA[ii*lda];
-	return;
-	}
-
-
-
-// insert a vector  into a row
-void drowin_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	double *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii*lda] = alpha*x[ii];
-	return;
-	}
-
-
-
-// add a vector to a row
-void drowad_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	double *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii*lda] += alpha*x[ii];
-	return;
-	}
-
-
-
-// swap two rows of a matrix struct
-void drowsw_libstr(int kmax, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	double *pC = sC->pA + ci + cj*lda;
-	int ii;
-	double tmp;
-	for(ii=0; ii<kmax; ii++)
-		{
-		tmp = pA[ii*lda];
-		pA[ii*lda] = pC[ii*ldc];
-		pC[ii*ldc] = tmp;
-		}
-	return;
-	}
-
-
-
-// permute the rows of a matrix struct
-void drowpe_libstr(int kmax, int *ipiv, struct d_strmat *sA)
-	{
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		{
-		if(ipiv[ii]!=ii)
-			drowsw_libstr(sA->n, sA, ii, 0, sA, ipiv[ii], 0);
-		}
-	return;
-	}
-
-
-
-// extract vector from column
-void dcolex_libstr(int kmax, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	double *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		x[ii] = pA[ii];
-	return;
-	}
-
-
-
-// insert a vector  into a rcol
-void dcolin_libstr(int kmax, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	double *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii] = x[ii];
-	return;
-	}
-
-
-
-// swap two cols of a matrix struct
-void dcolsw_libstr(int kmax, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	double *pC = sC->pA + ci + cj*lda;
-	int ii;
-	double tmp;
-	for(ii=0; ii<kmax; ii++)
-		{
-		tmp = pA[ii];
-		pA[ii] = pC[ii];
-		pC[ii] = tmp;
-		}
-	return;
-	}
-
-
-
-// permute the cols of a matrix struct
-void dcolpe_libstr(int kmax, int *ipiv, struct d_strmat *sA)
-	{
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		{
-		if(ipiv[ii]!=ii)
-			dcolsw_libstr(sA->m, sA, 0, ii, sA, 0, ipiv[ii]);
-		}
-	return;
-	}
-
-
-
-// copy a generic strmat into a generic strmat
-void dgecp_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	double *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pC[ii+0+jj*ldc] = pA[ii+0+jj*lda];
-			pC[ii+1+jj*ldc] = pA[ii+1+jj*lda];
-			pC[ii+2+jj*ldc] = pA[ii+2+jj*lda];
-			pC[ii+3+jj*ldc] = pA[ii+3+jj*lda];
-			}
-		for(; ii<m; ii++)
-			{
-			pC[ii+0+jj*ldc] = pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// scale a generic strmat
-void dgesc_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pA[ii+0+jj*lda] *= alpha;
-			pA[ii+1+jj*lda] *= alpha;
-			pA[ii+2+jj*lda] *= alpha;
-			pA[ii+3+jj*lda] *= alpha;
-			}
-		for(; ii<m; ii++)
-			{
-			pA[ii+0+jj*lda] *= alpha;
-			}
-		}
-	return;
-	}
-
-
-
-// copy a strvec into a strvec
-void dveccp_libstr(int m, struct d_strvec *sa, int ai, struct d_strvec *sc, int ci)
-	{
-	double *pa = sa->pa + ai;
-	double *pc = sc->pa + ci;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pc[ii+0] = pa[ii+0];
-		pc[ii+1] = pa[ii+1];
-		pc[ii+2] = pa[ii+2];
-		pc[ii+3] = pa[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		pc[ii+0] = pa[ii+0];
-		}
-	return;
-	}
-
-
-
-// scale a strvec
-void dvecsc_libstr(int m, double alpha, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pa[ii+0] *= alpha;
-		pa[ii+1] *= alpha;
-		pa[ii+2] *= alpha;
-		pa[ii+3] *= alpha;
-		}
-	for(; ii<m; ii++)
-		{
-		pa[ii+0] *= alpha;
-		}
-	return;
-	}
-
-
-
-// copy a lower triangular strmat into a lower triangular strmat
-void dtrcp_l_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	double *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<m; jj++)
-		{
-		ii = jj;
-		for(; ii<m; ii++)
-			{
-			pC[ii+0+jj*ldc] = pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// scale and add a generic strmat into a generic strmat
-void dgead_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	double *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pC[ii+0+jj*ldc] += alpha*pA[ii+0+jj*lda];
-			pC[ii+1+jj*ldc] += alpha*pA[ii+1+jj*lda];
-			pC[ii+2+jj*ldc] += alpha*pA[ii+2+jj*lda];
-			pC[ii+3+jj*ldc] += alpha*pA[ii+3+jj*lda];
-			}
-		for(; ii<m; ii++)
-			{
-			pC[ii+0+jj*ldc] += alpha*pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// scales and adds a strvec into a strvec
-void dvecad_libstr(int m, double alpha, struct d_strvec *sa, int ai, struct d_strvec *sc, int ci)
-	{
-	double *pa = sa->pa + ai;
-	double *pc = sc->pa + ci;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pc[ii+0] += alpha*pa[ii+0];
-		pc[ii+1] += alpha*pa[ii+1];
-		pc[ii+2] += alpha*pa[ii+2];
-		pc[ii+3] += alpha*pa[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		pc[ii+0] += alpha*pa[ii+0];
-		}
-	return;
-	}
-
-
-
-// copy and transpose a generic strmat into a generic strmat
-void dgetr_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	double *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pC[jj+(ii+0)*ldc] = pA[ii+0+jj*lda];
-			pC[jj+(ii+1)*ldc] = pA[ii+1+jj*lda];
-			pC[jj+(ii+2)*ldc] = pA[ii+2+jj*lda];
-			pC[jj+(ii+3)*ldc] = pA[ii+3+jj*lda];
-			}
-		for(; ii<m; ii++)
-			{
-			pC[jj+(ii+0)*ldc] = pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// copy and transpose a lower triangular strmat into an upper triangular strmat
-void dtrtr_l_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	double *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<m; jj++)
-		{
-		ii = jj;
-		for(; ii<m; ii++)
-			{
-			pC[jj+(ii+0)*ldc] = pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// copy and transpose an upper triangular strmat into a lower triangular strmat
-void dtrtr_u_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	double *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<m; jj++)
-		{
-		ii = 0;
-		for(; ii<=jj; ii++)
-			{
-			pC[jj+(ii+0)*ldc] = pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// insert a strvec to the diagonal of a strmat, sparse formulation
-void ddiain_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strmat *sD, int di, int dj)
-	{
-	double *x = sx->pa + xi;
-	int ldd = sD->m;
-	double *pD = sD->pA + di + dj*ldd;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*(ldd+1)] = alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// extract a vector from diagonal
-void ddiaex_libstr(int kmax, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	double *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		x[ii] = alpha*pA[ii*(lda+1)];
-	return;
-	}
-
-
-
-// extract the diagonal of a strmat from a strvec , sparse formulation
-void ddiaex_sp_libstr(int kmax, double alpha, int *idx, struct d_strmat *sD, int di, int dj, struct d_strvec *sx, int xi)
-	{
-	double *x = sx->pa + xi;
-	int ldd = sD->m;
-	double *pD = sD->pA + di + dj*ldd;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		x[jj] = alpha * pD[ii*(ldd+1)];
-		}
-	return;
-	}
-
-
-
-// add a vector to diagonal
-void ddiaad_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	double *pA = sA->pA + ai + aj*lda;
-	double *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii*(lda+1)] += alpha*x[ii];
-	return;
-	}
-
-
-
-// add scaled strvec to another strvec and insert to diagonal of strmat, sparse formulation
-void ddiaad_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strmat *sD, int di, int dj)
-	{
-	double *x = sx->pa + xi;
-	int ldd = sD->m;
-	double *pD = sD->pA + di + dj*ldd;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*(ldd+1)] += alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to another strvec and insert to diagonal of strmat, sparse formulation
-void ddiaadin_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strvec *sy, int yi, int *idx, struct d_strmat *sD, int di, int dj)
-	{
-	double *x = sx->pa + xi;
-	double *y = sy->pa + yi;
-	int ldd = sD->m;
-	double *pD = sD->pA + di + dj*ldd;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*(ldd+1)] = y[jj] + alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to row of strmat, sparse formulation
-void drowad_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strmat *sD, int di, int dj)
-	{
-	double *x = sx->pa + xi;
-	int ldd = sD->m;
-	double *pD = sD->pA + di + dj*ldd;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*ldd] += alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-
-void dvecad_sp_libstr(int m, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strvec *sz, int zi)
-	{
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[idx[ii]] += alpha * x[ii];
-	return;
-	}
-
-
-
-void dvecin_sp_libstr(int m, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strvec *sz, int zi)
-	{
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[idx[ii]] = alpha * x[ii];
-	return;
-	}
-
-
-
-void dvecex_sp_libstr(int m, double alpha, int *idx, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[ii] = alpha * x[idx[ii]];
-	return;
-	}
-
-
-// clip without mask return
-void dveccl_libstr(int m, struct d_strvec *sxm, int xim, struct d_strvec *sx, int xi, struct d_strvec *sxp, int xip, struct d_strvec *sz, int zi)
-	{
-	double *xm = sxm->pa + xim;
-	double *x  = sx->pa + xi;
-	double *xp = sxp->pa + xip;
-	double *z  = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		if(x[ii]>=xp[ii])
-			{
-			z[ii] = xp[ii];
-			}
-		else if(x[ii]<=xm[ii])
-			{
-			z[ii] = xm[ii];
-			}
-		else
-			{
-			z[ii] = x[ii];
-			}
-		}
-	return;
-	}
-
-
-
-// clip with mask return
-void dveccl_mask_libstr(int m, struct d_strvec *sxm, int xim, struct d_strvec *sx, int xi, struct d_strvec *sxp, int xip, struct d_strvec *sz, int zi, struct d_strvec *sm, int mi)
-	{
-	double *xm = sxm->pa + xim;
-	double *x  = sx->pa + xi;
-	double *xp = sxp->pa + xip;
-	double *z  = sz->pa + zi;
-	double *mask  = sm->pa + mi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		if(x[ii]>=xp[ii])
-			{
-			z[ii] = xp[ii];
-			mask[ii] = 1.0;
-			}
-		else if(x[ii]<=xm[ii])
-			{
-			z[ii] = xm[ii];
-			mask[ii] = -1.0;
-			}
-		else
-			{
-			z[ii] = x[ii];
-			mask[ii] = 0.0;
-			}
-		}
-	return;
-	}
-
-
-// zero out components using mask
-void dvecze_libstr(int m, struct d_strvec *sm, int mi, struct d_strvec *sv, int vi, struct d_strvec *se, int ei)
-	{
-	double *mask = sm->pa + mi;
-	double *v = sv->pa + vi;
-	double *e = se->pa + ei;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		if(mask[ii]==0)
-			{
-			e[ii] = v[ii];
-			}
-		else
-			{
-			e[ii] = 0;
-			}
-		}
-	return;
-	}
-
-
-
-void dvecnrm_inf_libstr(int m, struct d_strvec *sx, int xi, double *ptr_norm)
-	{
-	int ii;
-	double *x = sx->pa + xi;
-	double norm = 0.0;
-	for(ii=0; ii<m; ii++)
-		norm = fmax(norm, fabs(x[ii]));
-	*ptr_norm = norm;
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
diff --git a/third_party/blasfeo/auxiliary/d_aux_lib4.c b/third_party/blasfeo/auxiliary/d_aux_lib4.c
deleted file mode 100644
index 152aed1..0000000
--- a/third_party/blasfeo/auxiliary/d_aux_lib4.c
+++ /dev/null
@@ -1,3609 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_block_size.h"
-#include "../include/blasfeo_d_kernel.h"
-
-
-
-// copies a packed matrix into a packed matrix
-// TODO remove alha !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-void dgecp_lib(int m, int n, double alpha, int offsetA, double *A, int sda, int offsetB, double *B, int sdb)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	int mna, ii;
-
-	int offA = offsetA%bs;
-	int offB = offsetB%bs;
-
-	// A at the beginning of the block
-	A -= offA;
-
-	// A at the beginning of the block
-	B -= offB;
-
-	// same alignment
-	if(offA==offB)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_dgecp_1_0_lib4(0, n, alpha, A+offA, B+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_dgecp_2_0_lib4(0, n, alpha, A+offA, B+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgecp_1_0_lib4(0, n, alpha, A+offA, B+offB);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgecp_2_0_lib4(0, n, alpha, A+offA, B+offB);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgecp_3_0_lib4(0, n, alpha, A+offA, B+offB);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for(; ii<m-7; ii+=8)
-			{
-			kernel_dgecp_8_0_lib4(0, n, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_dgecp_4_0_lib4(0, n, alpha, A, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgecp_1_0_lib4(0, n, alpha, A, B);
-			else if(m-ii==2)
-				kernel_dgecp_2_0_lib4(0, n, alpha, A, B);
-			else // if(m-ii==3)
-				kernel_dgecp_3_0_lib4(0, n, alpha, A, B);
-			}
-		}
-	// skip one element of A
-	else if(offA==(offB+1)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_dgecp_1_0_lib4(0, n, alpha, A+offA, B+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_dgecp_2_0_lib4(0, n, alpha, A+offA, B+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgecp_1_0_lib4(0, n, alpha, A+offA, B+offB);
-				//A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgecp_2_3_lib4(0, n, alpha, A, sda, B+2);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgecp_3_2_lib4(0, n, alpha, A, sda, B+1);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_dgecp_8_1_lib4(0, n, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_dgecp_4_1_lib4(0, n, alpha, A, sda, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgecp_1_0_lib4(0, n, alpha, A+1, B);
-			else if(m-ii==2)
-				kernel_dgecp_2_0_lib4(0, n, alpha, A+1, B);
-			else // if(m-ii==3)
-				kernel_dgecp_3_0_lib4(0, n, alpha, A+1, B);
-			}
-		}
-	// skip 2 elements of A
-	else if(offA==(offB+2)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_dgecp_1_0_lib4(0, n, alpha, A+offA, B+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_dgecp_2_3_lib4(0, n, alpha, A, sda, B+1);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgecp_1_0_lib4(0, n, alpha, A+1, B+3);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgecp_2_0_lib4(0, n, alpha, A, B+2);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgecp_3_3_lib4(0, n, alpha, A, sda, B+1);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for(; ii<m-7; ii+=8)
-			{
-			kernel_dgecp_8_2_lib4(0, n, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_dgecp_4_2_lib4(0, n, alpha, A, sda, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgecp_1_0_lib4(0, n, alpha, A+2, B);
-			else if(m-ii==2)
-				kernel_dgecp_2_0_lib4(0, n, alpha, A+2, B);
-			else // if(m-ii==3)
-				kernel_dgecp_3_2_lib4(0, n, alpha, A, sda, B);
-			}
-		}
-	// skip 3 elements of A
-	else // if(offA==(offB+3)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_dgecp_1_0_lib4(0, n, alpha, A+offA, B+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_dgecp_2_0_lib4(0, n, alpha, A+offA, B+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgecp_1_0_lib4(0, n, alpha, A+offA, B+offB);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgecp_2_0_lib4(0, n, alpha, A+offA, B+offB);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgecp_3_0_lib4(0, n, alpha, A+offA, B+offB);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for(; ii<m-7; ii+=8)
-			{
-			kernel_dgecp_8_3_lib4(0, n, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_dgecp_4_3_lib4(0, n, alpha, A, sda, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgecp_1_0_lib4(0, n, alpha, A+3, B);
-			else if(m-ii==2)
-				kernel_dgecp_2_3_lib4(0, n, alpha, A, sda, B);
-			else // if(m-ii==3)
-				kernel_dgecp_3_3_lib4(0, n, alpha, A, sda, B);
-			}
-		}
-
-	}
-
-
-
-// copies a lower triangular packed matrix into a lower triangular packed matrix
-void dtrcp_l_lib(int m, double alpha, int offsetA, double *A, int sda, int offsetB, double *B, int sdb)
-	{
-
-	if(m<=0)
-		return;
-
-	int n = m;
-
-	const int bs = 4;
-
-	int mna, ii;
-
-	int offA = offsetA%bs;
-	int offB = offsetB%bs;
-
-	// A at the beginning of the block
-	A -= offA;
-
-	// A at the beginning of the block
-	B -= offB;
-
-	// same alignment
-	if(offA==offB)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_dgecp_1_0_lib4(1, ii, alpha, A+offA, B+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_dgecp_2_0_lib4(1, ii, alpha, A+offA, B+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgecp_1_0_lib4(1, ii, alpha, A+offA, B+offB);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgecp_2_0_lib4(1, ii, alpha, A+offA, B+offB);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgecp_3_0_lib4(1, ii, alpha, A+offA, B+offB);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for(; ii<m-7; ii+=8)
-			{
-			kernel_dgecp_8_0_lib4(1, ii, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_dgecp_4_0_lib4(1, ii, alpha, A, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgecp_1_0_lib4(1, ii, alpha, A, B);
-			else if(m-ii==2)
-				kernel_dgecp_2_0_lib4(1, ii, alpha, A, B);
-			else // if(m-ii==3)
-				kernel_dgecp_3_0_lib4(1, ii, alpha, A, B);
-			}
-		}
-	// skip one element of A
-	else if(offA==(offB+1)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_dgecp_1_0_lib4(1, ii, alpha, A+offA, B+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_dgecp_2_0_lib4(1, ii, alpha, A+offA, B+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgecp_1_0_lib4(1, ii, alpha, A+offA, B+offB);
-				//A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgecp_2_3_lib4(1, ii, alpha, A, sda, B+2);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgecp_3_2_lib4(1, ii, alpha, A, sda, B+1);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_dgecp_8_1_lib4(1, ii, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_dgecp_4_1_lib4(1, ii, alpha, A, sda, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgecp_1_0_lib4(1, ii, alpha, A+1, B);
-			else if(m-ii==2)
-				kernel_dgecp_2_0_lib4(1, ii, alpha, A+1, B);
-			else // if(m-ii==3)
-				kernel_dgecp_3_0_lib4(1, ii, alpha, A+1, B);
-			}
-		}
-	// skip 2 elements of A
-	else if(offA==(offB+2)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_dgecp_1_0_lib4(1, ii, alpha, A+offA, B+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_dgecp_2_3_lib4(1, ii, alpha, A, sda, B+1);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgecp_1_0_lib4(1, ii, alpha, A+1, B+3);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgecp_2_0_lib4(1, ii, alpha, A, B+2);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgecp_3_3_lib4(1, ii, alpha, A, sda, B+1);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for(; ii<m-7; ii+=8)
-			{
-			kernel_dgecp_8_2_lib4(1, ii, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_dgecp_4_2_lib4(1, ii, alpha, A, sda, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgecp_1_0_lib4(1, ii, alpha, A+2, B);
-			else if(m-ii==2)
-				kernel_dgecp_2_0_lib4(1, ii, alpha, A+2, B);
-			else // if(m-ii==3)
-				kernel_dgecp_3_2_lib4(1, ii, alpha, A, sda, B);
-			}
-		}
-	// skip 3 elements of A
-	else // if(offA==(offB+3)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_dgecp_1_0_lib4(1, ii, alpha, A+offA, B+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_dgecp_2_0_lib4(1, ii, alpha, A+offA, B+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgecp_1_0_lib4(1, ii, alpha, A+offA, B+offB);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgecp_2_0_lib4(1, ii, alpha, A+offA, B+offB);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgecp_3_0_lib4(1, ii, alpha, A+offA, B+offB);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for(; ii<m-7; ii+=8)
-			{
-			kernel_dgecp_8_3_lib4(1, ii, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_dgecp_4_3_lib4(1, ii, alpha, A, sda, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgecp_1_0_lib4(1, ii, alpha, A+3, B);
-			else if(m-ii==2)
-				kernel_dgecp_2_3_lib4(1, ii, alpha, A, sda, B);
-			else // if(m-ii==3)
-				kernel_dgecp_3_3_lib4(1, ii, alpha, A, sda, B);
-			}
-		}
-
-	}
-
-
-
-// scales and adds a packed matrix into a packed matrix: B = B + alpha*A
-void dgead_lib(int m, int n, double alpha, int offsetA, double *A, int sda, int offsetB, double *B, int sdb)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	int mna, ii;
-
-	int offA = offsetA%bs;
-	int offB = offsetB%bs;
-
-	// A at the beginning of the block
-	A -= offA;
-
-	// A at the beginning of the block
-	B -= offB;
-
-	// same alignment
-	if(offA==offB)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_dgead_1_0_lib4(n, alpha, A+offA, B+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_dgead_2_0_lib4(n, alpha, A+offA, B+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgead_1_0_lib4(n, alpha, A+offA, B+offB);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgead_2_0_lib4(n, alpha, A+offA, B+offB);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgead_3_0_lib4(n, alpha, A+offA, B+offB);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for(; ii<m-7; ii+=8)
-			{
-			kernel_dgead_8_0_lib4(n, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_dgead_4_0_lib4(n, alpha, A, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgead_1_0_lib4(n, alpha, A, B);
-			else if(m-ii==2)
-				kernel_dgead_2_0_lib4(n, alpha, A, B);
-			else // if(m-ii==3)
-				kernel_dgead_3_0_lib4(n, alpha, A, B);
-			}
-		}
-	// skip one element of A
-	else if(offA==(offB+1)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_dgead_1_0_lib4(n, alpha, A+offA, B+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_dgead_2_0_lib4(n, alpha, A+offA, B+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgead_1_0_lib4(n, alpha, A+offA, B+offB);
-				//A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgead_2_3_lib4(n, alpha, A, sda, B+2);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgead_3_2_lib4(n, alpha, A, sda, B+1);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_dgead_8_1_lib4(n, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_dgead_4_1_lib4(n, alpha, A, sda, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgead_1_0_lib4(n, alpha, A+1, B);
-			else if(m-ii==2)
-				kernel_dgead_2_0_lib4(n, alpha, A+1, B);
-			else // if(m-ii==3)
-				kernel_dgead_3_0_lib4(n, alpha, A+1, B);
-			}
-		}
-	// skip 2 elements of A
-	else if(offA==(offB+2)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_dgead_1_0_lib4(n, alpha, A+offA, B+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_dgead_2_3_lib4(n, alpha, A, sda, B+1);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgead_1_0_lib4(n, alpha, A+1, B+3);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgead_2_0_lib4(n, alpha, A, B+2);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgead_3_3_lib4(n, alpha, A, sda, B+1);
-				A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for(; ii<m-7; ii+=8)
-			{
-			kernel_dgead_8_2_lib4(n, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_dgead_4_2_lib4(n, alpha, A, sda, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgead_1_0_lib4(n, alpha, A+2, B);
-			else if(m-ii==2)
-				kernel_dgead_2_0_lib4(n, alpha, A+2, B);
-			else // if(m-ii==3)
-				kernel_dgead_3_2_lib4(n, alpha, A, sda, B);
-			}
-		}
-	// skip 3 elements of A
-	else // if(offA==(offB+3)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_dgead_1_0_lib4(n, alpha, A+offA, B+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_dgead_2_0_lib4(n, alpha, A+offA, B+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_dgead_1_0_lib4(n, alpha, A+offA, B+offB);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_dgead_2_0_lib4(n, alpha, A+offA, B+offB);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_dgead_3_0_lib4(n, alpha, A+offA, B+offB);
-				// A += 4*sda;
-				B += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		for(; ii<m-7; ii+=8)
-			{
-			kernel_dgead_8_3_lib4(n, alpha, A, sda, B, sdb);
-			A += 8*sda;
-			B += 8*sdb;
-			}
-#endif
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_dgead_4_3_lib4(n, alpha, A, sda, B);
-			A += 4*sda;
-			B += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_dgead_1_0_lib4(n, alpha, A+3, B);
-			else if(m-ii==2)
-				kernel_dgead_2_3_lib4(n, alpha, A, sda, B);
-			else // if(m-ii==3)
-				kernel_dgead_3_3_lib4(n, alpha, A, sda, B);
-			}
-		}
-
-	}
-
-
-
-// scales and adds a strvec into a strvec
-void dvecad_libstr(int m, double alpha, struct d_strvec *sa, int ai, struct d_strvec *sc, int ci)
-	{
-	double *pa = sa->pa + ai;
-	double *pc = sc->pa + ci;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pc[ii+0] += alpha*pa[ii+0];
-		pc[ii+1] += alpha*pa[ii+1];
-		pc[ii+2] += alpha*pa[ii+2];
-		pc[ii+3] += alpha*pa[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		pc[ii+0] += alpha*pa[ii+0];
-		}
-	return;
-	}
-
-
-
-// transpose general matrix; m and n are referred to the original matrix
-void dgetr_lib(int m, int n, double alpha, int offsetA, double *pA, int sda, int offsetC, double *pC, int sdc)
-	{
-
-/*
-
-m = 5
-n = 3
-offsetA = 1
-offsetC = 2
-
-A =
- x x x
- -
- x x x
- x x x
- x x x
- x x x
-
-C =
- x x x x x
- x x x x x
- -
- x x x x x
-
-*/
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	int mna = (bs-offsetA%bs)%bs;
-	mna = m<mna ? m : mna;
-	int nna = (bs-offsetC%bs)%bs;
-	nna = n<nna ? n : nna;
-
-	int ii;
-
-	ii = 0;
-
-	if(mna>0)
-		{
-		if(mna==1)
-			kernel_dgetr_1_lib4(0, n, nna, alpha, pA, pC, sdc);
-		else if(mna==2)
-			kernel_dgetr_2_lib4(0, n, nna, alpha, pA, pC, sdc);
-		else //if(mna==3)
-			kernel_dgetr_3_lib4(0, n, nna, alpha, pA, pC, sdc);
-		ii += mna;
-		pA += mna + bs*(sda-1);
-		pC += mna*bs;
-		}
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for( ; ii<m-7; ii+=8)
-		{
-		kernel_dgetr_8_lib4(0, n, nna, alpha, pA, sda, pC, sdc);
-		pA += 2*bs*sda;
-		pC += 2*bs*bs;
-		}
-#endif
-	for( ; ii<m-3; ii+=4)
-//	for( ; ii<m; ii+=4)
-		{
-		kernel_dgetr_4_lib4(0, n, nna, alpha, pA, pC, sdc);
-		pA += bs*sda;
-		pC += bs*bs;
-		}
-
-	// clean-up at the end using smaller kernels
-	if(ii==m)
-		return;
-
-	if(m-ii==1)
-		kernel_dgetr_1_lib4(0, n, nna, alpha, pA, pC, sdc);
-	else if(m-ii==2)
-		kernel_dgetr_2_lib4(0, n, nna, alpha, pA, pC, sdc);
-	else if(m-ii==3)
-		kernel_dgetr_3_lib4(0, n, nna, alpha, pA, pC, sdc);
-
-	return;
-
-	}
-
-
-
-// transpose lower triangular matrix
-void dtrtr_l_lib(int m, double alpha, int offsetA, double *pA, int sda, int offsetC, double *pC, int sdc)
-	{
-
-/*
-
-A =
- x
- x x
- x x x
- x x x x
-
- x x x x x
- x x x x x x
- x x x x x x x
- x x x x x x x x
-
-C =
- x x x x x x x x
-
-   x x x x x x x
-     x x x x x x
-	   x x x x x
-	     x x x x
-
-	       x x x
-	         x x
-	           x
-
-*/
-
-	int n = m;
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	int mna = (bs-offsetA%bs)%bs;
-	mna = m<mna ? m : mna;
-	int nna = (bs-offsetC%bs)%bs;
-	nna = n<nna ? n : nna;
-
-	int ii;
-
-	ii = 0;
-
-	if(mna>0)
-		{
-		if(mna==1)
-			{
-			pC[0] = alpha * pA[0];
-			}
-		else if(mna==2)
-			{
-			if(nna==1)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[1+bs*(0+sdc)] = alpha * pA[1+bs*1];
-				}
-			else
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				}
-			}
-		else //if(mna==3)
-			{
-			if(nna==1)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[0+bs*2] = alpha * pA[2+bs*0];
-				pC[1+bs*(0+sdc)] = alpha * pA[1+bs*1];
-				pC[1+bs*(1+sdc)] = alpha * pA[2+bs*1];
-				pC[2+bs*(1+sdc)] = alpha * pA[2+bs*2];
-				}
-			else if(nna==2)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[0+bs*2] = alpha * pA[2+bs*0];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pC[1+bs*2] = alpha * pA[2+bs*1];
-				pC[2+bs*(1+sdc)] = alpha * pA[2+bs*2];
-				}
-			else
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[0+bs*2] = alpha * pA[2+bs*0];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pC[1+bs*2] = alpha * pA[2+bs*1];
-				pC[2+bs*2] = alpha * pA[2+bs*2];
-				}
-			}
-		ii += mna;
-		pA += mna + bs*(sda-1);
-		pC += mna*bs;
-		}
-#if 0 //defined(TARGET_X64_INTEL_HASWELL)
-	for( ; ii<m-7; ii+=8)
-		{
-		kernel_dgetr_8_lib4(1, n, nna, alpha, pA, sda, pC, sdc);
-		pA += 2*bs*sda;
-		pC += 2*bs*bs;
-		}
-#endif
-	for( ; ii<m-3; ii+=4)
-		{
-		kernel_dgetr_4_lib4(1, ii, nna, alpha, pA, pC, sdc);
-		pA += bs*sda;
-		pC += bs*bs;
-		}
-
-	// clean-up at the end using smaller kernels
-	if(ii==m)
-		return;
-
-	if(m-ii==1)
-		kernel_dgetr_1_lib4(1, ii, nna, alpha, pA, pC, sdc);
-	else if(m-ii==2)
-		kernel_dgetr_2_lib4(1, ii, nna, alpha, pA, pC, sdc);
-	else if(m-ii==3)
-		kernel_dgetr_3_lib4(1, ii, nna, alpha, pA, pC, sdc);
-
-	return;
-
-	}
-
-
-
-// transpose an aligned upper triangular matrix into an aligned lower triangular matrix
-void dtrtr_u_lib(int m, double alpha, int offsetA, double *pA, int sda, int offsetC, double *pC, int sdc)
-	{
-
-/*
-
-A =
- x x x x x x x x
-   x x x x x x x
-
-     x x x x x x
-       x x x x x
-         x x x x
-           x x x
-             x x
-               x
-
-C =
- x
-
- x x
- x x x
- x x x x
- x x x x x
- x x x x x x
- x x x x x x x
- x x x x x x x x
-
-*/
-
-	int n = m;
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	int mna = (bs-offsetA%bs)%bs;
-	mna = m<mna ? m : mna;
-	int nna = (bs-offsetC%bs)%bs;
-	nna = n<nna ? n : nna;
-	int tna = nna;
-
-	int ii;
-
-	ii = 0;
-
-	if(mna>0)
-		{
-		if(mna==1)
-			{
-			kernel_dgetr_1_lib4(0, n, nna, alpha, pA, pC, sdc);
-			if(nna!=1)
-				{
-//				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pA += 1*bs;
-				pC += 1;
-				tna = (bs-(offsetC+1)%bs)%bs;
-				}
-			else //if(nna==1)
-				{
-//				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pA += 1*bs;
-				pC += 1 + (sdc-1)*bs;
-				tna = 0; //(bs-(offsetC+1)%bs)%bs;
-				}
-//			kernel_dgetr_1_lib4(0, n-1, tna, alpha, pA, pC, sdc);
-			}
-		else if(mna==2)
-			{
-			if(nna==0 || nna==3)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pA += 2*bs;
-				pC += 2;
-				tna = (bs-(offsetC+2)%bs)%bs;
-				kernel_dgetr_2_lib4(0, n-2, tna, alpha, pA, pC, sdc);
-				}
-			else if(nna==1)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pA += 1*bs;
-				pC += 1 + (sdc-1)*bs;
-//				pC[0+bs*0] = alpha * pA[0+bs*0];
-//				pC[0+bs*1] = alpha * pA[1+bs*0];
-				kernel_dgetr_2_lib4(0, n-1, 0, alpha, pA, pC, sdc);
-				pA += 1*bs;
-				pC += 1;
-				tna = 3; //(bs-(offsetC+2)%bs)%bs;
-//				kernel_dgetr_2_lib4(0, n-2, tna, alpha, pA, pC, sdc);
-				}
-			else if(nna==2)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pA += 2*bs;
-				pC += 2 + (sdc-1)*bs;
-				tna = 0; //(bs-(offsetC+2)%bs)%bs;
-				kernel_dgetr_2_lib4(0, n-2, tna, alpha, pA, pC, sdc);
-				}
-			}
-		else //if(mna==3)
-			{
-			if(nna==0)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pC[2+bs*0] = alpha * pA[0+bs*2];
-				pC[2+bs*1] = alpha * pA[1+bs*2];
-				pC[2+bs*2] = alpha * pA[2+bs*2];
-				pA += 3*bs;
-				pC += 3;
-				tna = 1;
-				kernel_dgetr_3_lib4(0, n-3, tna, alpha, pA, pC, sdc);
-				}
-			else if(nna==1)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pA += bs;
-				pC += 1 + (sdc-1)*bs;
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pC[1+bs*2] = alpha * pA[2+bs*1];
-				pA += 2*bs;
-				pC += 2;
-				tna = 2;
-				kernel_dgetr_3_lib4(0, n-3, tna, alpha, pA, pC, sdc);
-				}
-			else if(nna==2)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pA += 2*bs;
-				pC += 2 + (sdc-1)*bs;
-//				pC[0+bs*0] = alpha * pA[0+bs*0];
-//				pC[0+bs*1] = alpha * pA[1+bs*0];
-//				pC[0+bs*2] = alpha * pA[2+bs*0];
-				kernel_dgetr_3_lib4(0, n-2, 0, alpha, pA, pC, sdc);
-				pA += 1*bs;
-				pC += 1;
-				tna = 3;
-//				kernel_dgetr_3_lib4(0, n-3, tna, alpha, pA, pC, sdc);
-				}
-			else //if(nna==3)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pC[2+bs*0] = alpha * pA[0+bs*2];
-				pC[2+bs*1] = alpha * pA[1+bs*2];
-				pC[2+bs*2] = alpha * pA[2+bs*2];
-				pA += 3*bs;
-				pC += 3 + (sdc-1)*bs;
-				tna = 0;
-				kernel_dgetr_3_lib4(0, n-3, tna, alpha, pA, pC, sdc);
-				}
-			}
-		ii += mna;
-		pA += mna + bs*(sda-1);
-		pC += mna*bs;
-		}
-#if 0 //defined(TARGET_X64_AVX2)
-	for( ; ii<m-7; ii+=8)
-		{
-		kernel_dgetr_8_lib4(0, n, nna, alpha, pA, sda, pC, sdc);
-		pA += 2*bs*sda;
-		pC += 2*bs*bs;
-		}
-#endif
-	for( ; ii<m-3; ii+=4)
-		{
-		if(tna==0)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[2+bs*0] = alpha * pA[0+bs*2];
-			pC[2+bs*1] = alpha * pA[1+bs*2];
-			pC[2+bs*2] = alpha * pA[2+bs*2];
-			pC[3+bs*0] = alpha * pA[0+bs*3];
-			pC[3+bs*1] = alpha * pA[1+bs*3];
-			pC[3+bs*2] = alpha * pA[2+bs*3];
-			pC[3+bs*3] = alpha * pA[3+bs*3];
-			pA += 4*bs;
-			pC += sdc*bs;
-			kernel_dgetr_4_lib4(0, n-ii-4, 0, alpha, pA, pC, sdc);
-			}
-		else if(tna==1)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pA += bs;
-			pC += 1 + (sdc-1)*bs;
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[0+bs*1] = alpha * pA[1+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[1+bs*2] = alpha * pA[2+bs*1];
-			pC[2+bs*0] = alpha * pA[0+bs*2];
-			pC[2+bs*1] = alpha * pA[1+bs*2];
-			pC[2+bs*2] = alpha * pA[2+bs*2];
-			pC[2+bs*3] = alpha * pA[3+bs*2];
-			pA += 3*bs;
-			pC += 3;
-			kernel_dgetr_4_lib4(0, n-ii-4, 1, alpha, pA, pC, sdc);
-			}
-		else if(tna==2)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pA += 2*bs;
-			pC += 2 + (sdc-1)*bs;
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[0+bs*1] = alpha * pA[1+bs*0];
-			pC[0+bs*2] = alpha * pA[2+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[1+bs*2] = alpha * pA[2+bs*1];
-			pC[1+bs*3] = alpha * pA[3+bs*1];
-			pA += 2*bs;
-			pC += 2;
-			kernel_dgetr_4_lib4(0, n-ii-4, 2, alpha, pA, pC, sdc);
-			}
-		else //if(tna==3)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[2+bs*0] = alpha * pA[0+bs*2];
-			pC[2+bs*1] = alpha * pA[1+bs*2];
-			pC[2+bs*2] = alpha * pA[2+bs*2];
-			pA += 3*bs;
-			pC += 3 + (sdc-1)*bs;
-			kernel_dgetr_4_lib4(0, n-ii-3, 0, alpha, pA, pC, sdc);
-//			pC[0+bs*0] = alpha * pA[0+bs*0];
-//			pC[0+bs*1] = alpha * pA[1+bs*0];
-//			pC[0+bs*2] = alpha * pA[2+bs*0];
-//			pC[0+bs*3] = alpha * pA[3+bs*0];
-			pA += bs;
-			pC += 1;
-//			kernel_dgetr_4_lib4(0, n-ii-4, tna, alpha, pA, pC, sdc);
-			}
-		pA += bs*sda;
-		pC += bs*bs;
-		}
-
-	// clean-up at the end
-	if(ii==m)
-		return;
-
-	if(m-ii==1)
-		{
-		pC[0+bs*0] = alpha * pA[0+bs*0];
-		}
-	else if(m-ii==2)
-		{
-		if(tna!=1)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			}
-		else //if(tna==1)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pA += bs;
-			pC += 1 + (sdc-1)*bs;
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[0+bs*1] = alpha * pA[1+bs*0];
-			}
-		}
-	else if(m-ii==3)
-		{
-		if(tna==0 || tna==3)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[2+bs*0] = alpha * pA[0+bs*2];
-			pC[2+bs*1] = alpha * pA[1+bs*2];
-			pC[2+bs*2] = alpha * pA[2+bs*2];
-			}
-		else if(tna==1)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pA += bs;
-			pC += 1 + (sdc-1)*bs;
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[0+bs*1] = alpha * pA[1+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[1+bs*2] = alpha * pA[2+bs*1];
-			}
-		else //if(tna==2)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pA += 2*bs;
-			pC += 2 + (sdc-1)*bs;
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[0+bs*1] = alpha * pA[1+bs*0];
-			pC[0+bs*2] = alpha * pA[2+bs*0];
-			}
-		}
-
-	return;
-
-	}
-
-
-
-// regularize diagonal
-void ddiareg_lib(int kmax, double reg, int offset, double *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] += reg;
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+(jj+0)*bs+0] += reg;
-		pD[jj*sdd+(jj+1)*bs+1] += reg;
-		pD[jj*sdd+(jj+2)*bs+2] += reg;
-		pD[jj*sdd+(jj+3)*bs+3] += reg;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] += reg;
-		}
-
-	}
-
-
-
-// insert sqrt of vector to diagonal
-void ddiain_sqrt_lib(int kmax, double *x, int offset, double *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] = sqrt(x[ll]);
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+(jj+0)*bs+0] = sqrt(x[jj+0]);
-		pD[jj*sdd+(jj+1)*bs+1] = sqrt(x[jj+1]);
-		pD[jj*sdd+(jj+2)*bs+2] = sqrt(x[jj+2]);
-		pD[jj*sdd+(jj+3)*bs+3] = sqrt(x[jj+3]);
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] = sqrt(x[jj+ll]);
-		}
-
-	}
-
-
-
-// extract diagonal to vector
-void ddiaex_lib(int kmax, double alpha, int offset, double *pD, int sdd, double *x)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			x[ll] = alpha * pD[ll+bs*ll];
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		x[jj+0] = alpha * pD[jj*sdd+(jj+0)*bs+0];
-		x[jj+1] = alpha * pD[jj*sdd+(jj+1)*bs+1];
-		x[jj+2] = alpha * pD[jj*sdd+(jj+2)*bs+2];
-		x[jj+3] = alpha * pD[jj*sdd+(jj+3)*bs+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		x[jj+ll] = alpha * pD[jj*sdd+(jj+ll)*bs+ll];
-		}
-
-	}
-
-
-
-// add scaled vector to diagonal
-void ddiaad_lib(int kmax, double alpha, double *x, int offset, double *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] += alpha * x[ll];
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+(jj+0)*bs+0] += alpha * x[jj+0];
-		pD[jj*sdd+(jj+1)*bs+1] += alpha * x[jj+1];
-		pD[jj*sdd+(jj+2)*bs+2] += alpha * x[jj+2];
-		pD[jj*sdd+(jj+3)*bs+3] += alpha * x[jj+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] += alpha * x[jj+ll];
-		}
-
-	}
-
-
-
-// insert vector to diagonal, sparse formulation
-void ddiain_libsp(int kmax, int *idx, double alpha, double *x, double *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs+ii*bs] = alpha * x[jj];
-		}
-
-	}
-
-
-
-// extract diagonal to vector, sparse formulation
-void ddiaex_libsp(int kmax, int *idx, double alpha, double *pD, int sdd, double *x)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		x[jj] = alpha * pD[ii/bs*bs*sdd+ii%bs+ii*bs];
-		}
-
-	}
-
-
-
-// add scaled vector to diagonal, sparse formulation
-void ddiaad_libsp(int kmax, int *idx, double alpha, double *x, double *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs+ii*bs] += alpha * x[jj];
-		}
-
-	}
-
-
-
-// add scaled vector to another vector and insert to diagonal, sparse formulation
-void ddiaadin_libsp(int kmax, int *idx, double alpha, double *x, double *y, double *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs+ii*bs] = y[jj] + alpha * x[jj];
-		}
-
-	}
-
-
-
-// insert vector to row
-void drowin_lib(int kmax, double alpha, double *x, double *pD)
-	{
-
-	const int bs = 4;
-
-	int jj, ll;
-
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[(jj+0)*bs] = alpha*x[jj+0];
-		pD[(jj+1)*bs] = alpha*x[jj+1];
-		pD[(jj+2)*bs] = alpha*x[jj+2];
-		pD[(jj+3)*bs] = alpha*x[jj+3];
-		}
-	for(; jj<kmax; jj++)
-		{
-		pD[(jj)*bs] = alpha*x[jj];
-		}
-
-	}
-
-
-
-// extract row to vector
-void drowex_lib(int kmax, double alpha, double *pD, double *x)
-	{
-
-	const int bs = 4;
-
-	int jj, ll;
-
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		x[jj+0] = alpha*pD[(jj+0)*bs];
-		x[jj+1] = alpha*pD[(jj+1)*bs];
-		x[jj+2] = alpha*pD[(jj+2)*bs];
-		x[jj+3] = alpha*pD[(jj+3)*bs];
-		}
-	for(; jj<kmax; jj++)
-		{
-		x[jj] = alpha*pD[(jj)*bs];
-		}
-
-	}
-
-
-
-// add scaled vector to row
-void drowad_lib(int kmax, double alpha, double *x, double *pD)
-	{
-
-	const int bs = 4;
-
-	int jj, ll;
-
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[(jj+0)*bs] += alpha * x[jj+0];
-		pD[(jj+1)*bs] += alpha * x[jj+1];
-		pD[(jj+2)*bs] += alpha * x[jj+2];
-		pD[(jj+3)*bs] += alpha * x[jj+3];
-		}
-	for(; jj<kmax; jj++)
-		{
-		pD[(jj)*bs] += alpha * x[jj];
-		}
-
-	}
-
-
-
-// insert vector to row, sparse formulation
-void drowin_libsp(int kmax, double alpha, int *idx, double *x, double *pD)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*bs] = alpha*x[jj];
-		}
-
-	}
-
-
-
-// add scaled vector to row, sparse formulation
-void drowad_libsp(int kmax, int *idx, double alpha, double *x, double *pD)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*bs] += alpha * x[jj];
-		}
-
-	}
-
-
-
-// add scaled vector to another vector and insert to row, sparse formulation
-void drowadin_libsp(int kmax, int *idx, double alpha, double *x, double *y, double *pD)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*bs] = y[jj] + alpha * x[jj];
-		}
-
-	}
-
-
-
-// swap two rows
-void drowsw_lib(int kmax, double *pA, double *pC)
-	{
-
-	const int bs = 4;
-
-	int ii;
-	double tmp;
-
-	for(ii=0; ii<kmax-3; ii+=4)
-		{
-		tmp = pA[0+bs*0];
-		pA[0+bs*0] = pC[0+bs*0];
-		pC[0+bs*0] = tmp;
-		tmp = pA[0+bs*1];
-		pA[0+bs*1] = pC[0+bs*1];
-		pC[0+bs*1] = tmp;
-		tmp = pA[0+bs*2];
-		pA[0+bs*2] = pC[0+bs*2];
-		pC[0+bs*2] = tmp;
-		tmp = pA[0+bs*3];
-		pA[0+bs*3] = pC[0+bs*3];
-		pC[0+bs*3] = tmp;
-		pA += 4*bs;
-		pC += 4*bs;
-		}
-	for( ; ii<kmax; ii++)
-		{
-		tmp = pA[0+bs*0];
-		pA[0+bs*0] = pC[0+bs*0];
-		pC[0+bs*0] = tmp;
-		pA += 1*bs;
-		pC += 1*bs;
-		}
-
-	}
-
-
-
-// extract vector from column
-void dcolex_lib(int kmax, int offset, double *pD, int sdd, double *x)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			x[ll] = pD[ll];
-			}
-		pD += kna + bs*(sdd-1);
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		x[jj+0] = pD[jj*sdd+0];
-		x[jj+1] = pD[jj*sdd+1];
-		x[jj+2] = pD[jj*sdd+2];
-		x[jj+3] = pD[jj*sdd+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		x[jj+ll] = pD[jj*sdd+ll];
-		}
-
-	}
-
-
-
-// insert vector to column
-void dcolin_lib(int kmax, double *x, int offset, double *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll] = x[ll];
-			}
-		pD += kna + bs*(sdd-1);
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+0] = x[jj+0];
-		pD[jj*sdd+1] = x[jj+1];
-		pD[jj*sdd+2] = x[jj+2];
-		pD[jj*sdd+3] = x[jj+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+ll] = x[jj+ll];
-		}
-
-	}
-
-
-
-// add scaled vector to column
-void dcolad_lib(int kmax, double alpha, double *x, int offset, double *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll] += alpha * x[ll];
-			}
-		pD += kna + bs*(sdd-1);
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+0] += alpha * x[jj+0];
-		pD[jj*sdd+1] += alpha * x[jj+1];
-		pD[jj*sdd+2] += alpha * x[jj+2];
-		pD[jj*sdd+3] += alpha * x[jj+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+ll] += alpha * x[jj+ll];
-		}
-
-	}
-
-
-
-// insert vector to diagonal, sparse formulation
-void dcolin_libsp(int kmax, int *idx, double *x, double *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs] = x[jj];
-		}
-
-	}
-
-
-
-// add scaled vector to diagonal, sparse formulation
-void dcolad_libsp(int kmax, double alpha, int *idx, double *x, double *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs] += alpha * x[jj];
-		}
-
-	}
-
-
-
-// swaps two cols
-void dcolsw_lib(int kmax, int offsetA, double *pA, int sda, int offsetC, double *pC, int sdc)
-	{
-
-	const int bs = 4;
-
-	int ii;
-
-	double tmp;
-
-	if(offsetA==offsetC)
-		{
-		if(offsetA>0)
-			{
-			ii = 0;
-			for(; ii<bs-offsetA; ii++)
-				{
-				tmp = pA[0+bs*0];
-				pA[0+bs*0] = pC[0+bs*0];
-				pC[0+bs*0] = tmp;
-				pA += 1;
-				pC += 1;
-				}
-			pA += bs*(sda-1);
-			pC += bs*(sdc-1);
-			kmax -= bs-offsetA;
-			}
-		ii = 0;
-		for(; ii<kmax-3; ii+=4)
-			{
-			tmp = pA[0+bs*0];
-			pA[0+bs*0] = pC[0+bs*0];
-			pC[0+bs*0] = tmp;
-			tmp = pA[1+bs*0];
-			pA[1+bs*0] = pC[1+bs*0];
-			pC[1+bs*0] = tmp;
-			tmp = pA[2+bs*0];
-			pA[2+bs*0] = pC[2+bs*0];
-			pC[2+bs*0] = tmp;
-			tmp = pA[3+bs*0];
-			pA[3+bs*0] = pC[3+bs*0];
-			pC[3+bs*0] = tmp;
-			pA += bs*sda;
-			pC += bs*sdc;
-			}
-		for(; ii<kmax; ii++)
-			{
-			tmp = pA[0+bs*0];
-			pA[0+bs*0] = pC[0+bs*0];
-			pC[0+bs*0] = tmp;
-			pA += 1;
-			pC += 1;
-			}
-		}
-	else
-		{
-		printf("\ndcolsw: feature not implemented yet: offsetA!=offsetC\n\n");
-		exit(1);
-		}
-
-	return;
-
-	}
-
-
-
-// insert vector to vector, sparse formulation
-void dvecin_libsp(int kmax, int *idx, double *x, double *y)
-	{
-
-	int jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		y[idx[jj]] = x[jj];
-		}
-
-	}
-
-
-
-// adds vector to vector, sparse formulation
-void dvecad_libsp(int kmax, int *idx, double alpha, double *x, double *y)
-	{
-
-	int jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		y[idx[jj]] += alpha * x[jj];
-		}
-
-	}
-
-
-
-/****************************
-* new interface
-****************************/
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// return the memory size (in bytes) needed for a strmat
-int d_size_strmat(int m, int n)
-	{
-	const int bs = 4;
-	int nc = D_NC;
-	int al = bs*nc;
-	int pm = (m+bs-1)/bs*bs;
-	int cn = (n+nc-1)/nc*nc;
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	int memory_size = (pm*cn+tmp)*sizeof(double);
-	return memory_size;
-	}
-
-
-
-// return the memory size (in bytes) needed for the digonal of a strmat
-int d_size_diag_strmat(int m, int n)
-	{
-	const int bs = 4;
-	int nc = D_NC;
-	int al = bs*nc;
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	int memory_size = tmp*sizeof(double);
-	return memory_size;
-	}
-
-
-
-// create a matrix structure for a matrix of size m*n by using memory passed by a pointer
-void d_create_strmat(int m, int n, struct d_strmat *sA, void *memory)
-	{
-	const int bs = 4;
-	int nc = D_NC;
-	int al = bs*nc;
-	sA->m = m;
-	sA->n = n;
-	int pm = (m+bs-1)/bs*bs;
-	int cn = (n+nc-1)/nc*nc;
-	sA->pm = pm;
-	sA->cn = cn;
-	double *ptr = (double *) memory;
-	sA->pA = ptr;
-	ptr += pm*cn;
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	sA->dA = ptr;
-	ptr += tmp;
-	sA->use_dA = 0;
-	sA->memory_size = (pm*cn+tmp)*sizeof(double);
-	return;
-	}
-
-
-
-// return memory size (in bytes) needed for a strvec
-int d_size_strvec(int m)
-	{
-	const int bs = 4;
-//	int nc = D_NC;
-//	int al = bs*nc;
-	int pm = (m+bs-1)/bs*bs;
-	int memory_size = pm*sizeof(double);
-	return memory_size;
-	}
-
-
-
-// create a vector structure for a vector of size m by using memory passed by a pointer
-void d_create_strvec(int m, struct d_strvec *sa, void *memory)
-	{
-	const int bs = 4;
-//	int nc = D_NC;
-//	int al = bs*nc;
-	sa->m = m;
-	int pm = (m+bs-1)/bs*bs;
-	sa->pm = pm;
-	double *ptr = (double *) memory;
-	sa->pa = ptr;
-//	ptr += pm;
-	sa->memory_size = pm*sizeof(double);
-	return;
-	}
-
-
-
-// convert a matrix into a matrix structure
-void d_cvt_mat2strmat(int m, int n, double *A, int lda, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, j, jj, m0, m1, m2;
-	double 	*B, *pB;
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	__m256d
-		tmp;
-#endif
-	m0 = (bs-ai%bs)%bs;
-	if(m0>m)
-		m0 = m;
-	m1 = m - m0;
-	jj = 0;
-	for( ; jj<n-3; jj+=4)
-		{
-		B  =  A + jj*lda;
-		pB = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for( ; ii<m0; ii++)
-				{
-				pB[ii+bs*0] = B[ii+lda*0];
-				pB[ii+bs*1] = B[ii+lda*1];
-				pB[ii+bs*2] = B[ii+lda*2];
-				pB[ii+bs*3] = B[ii+lda*3];
-				}
-			B  += m0;
-			pB += m0 + bs*(sda-1);
-			}
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for( ; ii<m-3; ii+=4)
-			{
-			tmp = _mm256_loadu_pd( &B[0+lda*0] );
-			_mm256_store_pd( &pB[0+bs*0], tmp );
-			tmp = _mm256_loadu_pd( &B[0+lda*1] );
-			_mm256_store_pd( &pB[0+bs*1], tmp );
-			tmp = _mm256_loadu_pd( &B[0+lda*2] );
-			_mm256_store_pd( &pB[0+bs*2], tmp );
-			tmp = _mm256_loadu_pd( &B[0+lda*3] );
-			_mm256_store_pd( &pB[0+bs*3], tmp );
-			// update
-			B  += 4;
-			pB += bs*sda;
-			}
-#else
-		for( ; ii<m-3; ii+=4)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			pB[1+bs*0] = B[1+lda*0];
-			pB[2+bs*0] = B[2+lda*0];
-			pB[3+bs*0] = B[3+lda*0];
-			// col 1
-			pB[0+bs*1] = B[0+lda*1];
-			pB[1+bs*1] = B[1+lda*1];
-			pB[2+bs*1] = B[2+lda*1];
-			pB[3+bs*1] = B[3+lda*1];
-			// col 2
-			pB[0+bs*2] = B[0+lda*2];
-			pB[1+bs*2] = B[1+lda*2];
-			pB[2+bs*2] = B[2+lda*2];
-			pB[3+bs*2] = B[3+lda*2];
-			// col 3
-			pB[0+bs*3] = B[0+lda*3];
-			pB[1+bs*3] = B[1+lda*3];
-			pB[2+bs*3] = B[2+lda*3];
-			pB[3+bs*3] = B[3+lda*3];
-			// update
-			B  += 4;
-			pB += bs*sda;
-			}
-#endif
-		for( ; ii<m; ii++)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			// col 1
-			pB[0+bs*1] = B[0+lda*1];
-			// col 2
-			pB[0+bs*2] = B[0+lda*2];
-			// col 3
-			pB[0+bs*3] = B[0+lda*3];
-			// update
-			B  += 1;
-			pB += 1;
-			}
-		}
-	for( ; jj<n; jj++)
-		{
-
-		B  =  A + jj*lda;
-		pB = pA + jj*bs;
-
-		ii = 0;
-		if(m0>0)
-			{
-			for( ; ii<m0; ii++)
-				{
-				pB[ii+bs*0] = B[ii+lda*0];
-				}
-			B  += m0;
-			pB += m0 + bs*(sda-1);
-			}
-		for( ; ii<m-3; ii+=4)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			pB[1+bs*0] = B[1+lda*0];
-			pB[2+bs*0] = B[2+lda*0];
-			pB[3+bs*0] = B[3+lda*0];
-			// update
-			B  += 4;
-			pB += bs*sda;
-			}
-		for( ; ii<m; ii++)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			// update
-			B  += 1;
-			pB += 1;
-			}
-		}
-	return;
-	}
-
-
-
-// convert and transpose a matrix into a matrix structure
-void d_cvt_tran_mat2strmat(int m, int n, double *A, int lda, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, j, m0, m1, m2;
-	double 	*B, *pB;
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	__m256d
-		v0, v1, v2, v3,
-		v4, v5, v6, v7;
-#endif
-	m0 = (bs-ai%bs)%bs;
-	if(m0>n)
-		m0 = n;
-	m1 = n - m0;
-	ii = 0;
-	if(m0>0)
-		{
-		for(j=0; j<m; j++)
-			{
-			for(i=0; i<m0; i++)
-				{
-				pA[i+j*bs+ii*sda] = A[j+(i+ii)*lda];
-				}
-			}
-		A  += m0*lda;
-		pA += m0 + bs*(sda-1);
-		}
-	ii = 0;
-	for(; ii<m1-3; ii+=bs)
-		{
-		j=0;
-		B  = A + ii*lda;
-		pB = pA + ii*sda;
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for(; j<m-3; j+=4)
-			{
-			v0 = _mm256_loadu_pd( &B[0+0*lda] ); // 00 10 20 30
-			v1 = _mm256_loadu_pd( &B[0+1*lda] ); // 01 11 21 31
-			v4 = _mm256_unpacklo_pd( v0, v1 ); // 00 01 20 21
-			v5 = _mm256_unpackhi_pd( v0, v1 ); // 10 11 30 31
-			v2 = _mm256_loadu_pd( &B[0+2*lda] ); // 02 12 22 32
-			v3 = _mm256_loadu_pd( &B[0+3*lda] ); // 03 13 23 33
-			v6 = _mm256_unpacklo_pd( v2, v3 ); // 02 03 22 23
-			v7 = _mm256_unpackhi_pd( v2, v3 ); // 12 13 32 33
-
-			B += 4;
-
-			v0 = _mm256_permute2f128_pd( v4, v6, 0x20 ); // 00 01 02 03
-			_mm256_store_pd( &pB[0+bs*0], v0 );
-			v2 = _mm256_permute2f128_pd( v4, v6, 0x31 ); // 20 21 22 23
-			_mm256_store_pd( &pB[0+bs*2], v2 );
-			v1 = _mm256_permute2f128_pd( v5, v7, 0x20 ); // 10 11 12 13
-			_mm256_store_pd( &pB[0+bs*1], v1 );
-			v3 = _mm256_permute2f128_pd( v5, v7, 0x31 ); // 30 31 32 33
-			_mm256_store_pd( &pB[0+bs*3], v3 );
-
-			pB += 4*bs;
-			}
-#else
-		for(; j<m-3; j+=4)
-			{
-			// unroll 0
-			pB[0+0*bs] = B[0+0*lda];
-			pB[1+0*bs] = B[0+1*lda];
-			pB[2+0*bs] = B[0+2*lda];
-			pB[3+0*bs] = B[0+3*lda];
-			// unroll 1
-			pB[0+1*bs] = B[1+0*lda];
-			pB[1+1*bs] = B[1+1*lda];
-			pB[2+1*bs] = B[1+2*lda];
-			pB[3+1*bs] = B[1+3*lda];
-			// unroll 2
-			pB[0+2*bs] = B[2+0*lda];
-			pB[1+2*bs] = B[2+1*lda];
-			pB[2+2*bs] = B[2+2*lda];
-			pB[3+2*bs] = B[2+3*lda];
-			// unroll 3
-			pB[0+3*bs] = B[3+0*lda];
-			pB[1+3*bs] = B[3+1*lda];
-			pB[2+3*bs] = B[3+2*lda];
-			pB[3+3*bs] = B[3+3*lda];
-			B  += 4;
-			pB += 4*bs;
-			}
-#endif
-		for(; j<m; j++)
-			{
-			// unroll 0
-			pB[0+0*bs] = B[0+0*lda];
-			pB[1+0*bs] = B[0+1*lda];
-			pB[2+0*bs] = B[0+2*lda];
-			pB[3+0*bs] = B[0+3*lda];
-			B  += 1;
-			pB += 1*bs;
-			}
-		}
-	if(ii<m1)
-		{
-		m2 = m1-ii;
-		if(bs<m2) m2 = bs;
-		for(j=0; j<m; j++)
-			{
-			for(i=0; i<m2; i++)
-				{
-				pA[i+j*bs+ii*sda] = A[j+(i+ii)*lda];
-				}
-			}
-		}
-	return;
-	}
-
-
-
-// convert a vector into a vector structure
-void d_cvt_vec2strvec(int m, double *a, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		pa[ii] = a[ii];
-	return;
-	}
-
-
-
-// convert a matrix structure into a matrix
-void d_cvt_strmat2mat(int m, int n, struct d_strmat *sA, int ai, int aj, double *A, int lda)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, jj;
-	int m0 = (bs-ai%bs)%bs;
-	double *ptr_pA;
-	jj=0;
-	for(; jj<n-3; jj+=4)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				// unroll 0
-				A[ii+lda*(jj+0)] = ptr_pA[0+bs*0];
-				// unroll 1
-				A[ii+lda*(jj+1)] = ptr_pA[0+bs*1];
-				// unroll 2
-				A[ii+lda*(jj+2)] = ptr_pA[0+bs*2];
-				// unroll 3
-				A[ii+lda*(jj+3)] = ptr_pA[0+bs*3];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			// unroll 0
-			A[0+ii+lda*(jj+0)] = ptr_pA[0+bs*0];
-			A[1+ii+lda*(jj+0)] = ptr_pA[1+bs*0];
-			A[2+ii+lda*(jj+0)] = ptr_pA[2+bs*0];
-			A[3+ii+lda*(jj+0)] = ptr_pA[3+bs*0];
-			// unroll 0
-			A[0+ii+lda*(jj+1)] = ptr_pA[0+bs*1];
-			A[1+ii+lda*(jj+1)] = ptr_pA[1+bs*1];
-			A[2+ii+lda*(jj+1)] = ptr_pA[2+bs*1];
-			A[3+ii+lda*(jj+1)] = ptr_pA[3+bs*1];
-			// unroll 0
-			A[0+ii+lda*(jj+2)] = ptr_pA[0+bs*2];
-			A[1+ii+lda*(jj+2)] = ptr_pA[1+bs*2];
-			A[2+ii+lda*(jj+2)] = ptr_pA[2+bs*2];
-			A[3+ii+lda*(jj+2)] = ptr_pA[3+bs*2];
-			// unroll 0
-			A[0+ii+lda*(jj+3)] = ptr_pA[0+bs*3];
-			A[1+ii+lda*(jj+3)] = ptr_pA[1+bs*3];
-			A[2+ii+lda*(jj+3)] = ptr_pA[2+bs*3];
-			A[3+ii+lda*(jj+3)] = ptr_pA[3+bs*3];
-			ptr_pA += sda*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			// unroll 0
-			A[ii+lda*(jj+0)] = ptr_pA[0+bs*0];
-			// unroll 1
-			A[ii+lda*(jj+1)] = ptr_pA[0+bs*1];
-			// unroll 2
-			A[ii+lda*(jj+2)] = ptr_pA[0+bs*2];
-			// unroll 3
-			A[ii+lda*(jj+3)] = ptr_pA[0+bs*3];
-			ptr_pA++;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				A[ii+lda*jj] = ptr_pA[0];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			A[0+ii+lda*jj] = ptr_pA[0];
-			A[1+ii+lda*jj] = ptr_pA[1];
-			A[2+ii+lda*jj] = ptr_pA[2];
-			A[3+ii+lda*jj] = ptr_pA[3];
-			ptr_pA += sda*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			A[ii+lda*jj] = ptr_pA[0];
-			ptr_pA++;
-			}
-		}
-	return;
-	}
-
-
-
-// convert and transpose a matrix structure into a matrix
-void d_cvt_tran_strmat2mat(int m, int n, struct d_strmat *sA, int ai, int aj, double *A, int lda)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, jj;
-	int m0 = (bs-ai%bs)%bs;
-	double *ptr_pA;
-	jj=0;
-	for(; jj<n-3; jj+=4)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				// unroll 0
-				A[jj+0+lda*ii] = ptr_pA[0+bs*0];
-				// unroll 1
-				A[jj+1+lda*ii] = ptr_pA[0+bs*1];
-				// unroll 2
-				A[jj+2+lda*ii] = ptr_pA[0+bs*2];
-				// unroll 3
-				A[jj+3+lda*ii] = ptr_pA[0+bs*3];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			// unroll 0
-			A[jj+0+lda*(ii+0)] = ptr_pA[0+bs*0];
-			A[jj+0+lda*(ii+1)] = ptr_pA[1+bs*0];
-			A[jj+0+lda*(ii+2)] = ptr_pA[2+bs*0];
-			A[jj+0+lda*(ii+3)] = ptr_pA[3+bs*0];
-			// unroll 1
-			A[jj+1+lda*(ii+0)] = ptr_pA[0+bs*1];
-			A[jj+1+lda*(ii+1)] = ptr_pA[1+bs*1];
-			A[jj+1+lda*(ii+2)] = ptr_pA[2+bs*1];
-			A[jj+1+lda*(ii+3)] = ptr_pA[3+bs*1];
-			// unroll 2
-			A[jj+2+lda*(ii+0)] = ptr_pA[0+bs*2];
-			A[jj+2+lda*(ii+1)] = ptr_pA[1+bs*2];
-			A[jj+2+lda*(ii+2)] = ptr_pA[2+bs*2];
-			A[jj+2+lda*(ii+3)] = ptr_pA[3+bs*2];
-			// unroll 3
-			A[jj+3+lda*(ii+0)] = ptr_pA[0+bs*3];
-			A[jj+3+lda*(ii+1)] = ptr_pA[1+bs*3];
-			A[jj+3+lda*(ii+2)] = ptr_pA[2+bs*3];
-			A[jj+3+lda*(ii+3)] = ptr_pA[3+bs*3];
-			ptr_pA += sda*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			// unroll 0
-			A[jj+0+lda*ii] = ptr_pA[0+bs*0];
-			// unroll 1
-			A[jj+1+lda*ii] = ptr_pA[0+bs*1];
-			// unroll 2
-			A[jj+2+lda*ii] = ptr_pA[0+bs*2];
-			// unroll 3
-			A[jj+3+lda*ii] = ptr_pA[0+bs*3];
-			ptr_pA++;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				A[jj+lda*ii] = ptr_pA[0];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			i=0;
-			for(; i<bs; i++)
-				{
-				A[jj+lda*(i+ii)] = ptr_pA[0];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			A[jj+lda*ii] = ptr_pA[0];
-			ptr_pA++;
-			}
-		}
-	return;
-	}
-
-
-
-// convert a vector structure into a vector
-void d_cvt_strvec2vec(int m, struct d_strvec *sa, int ai, double *a)
-	{
-	double *pa = sa->pa + ai;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		a[ii] = pa[ii];
-	return;
-	}
-
-
-
-// cast a matrix into a matrix structure
-void d_cast_mat2strmat(double *A, struct d_strmat *sA)
-	{
-	sA->pA = A;
-	return;
-	}
-
-
-
-// cast a matrix into the diagonal of a matrix structure
-void d_cast_diag_mat2strmat(double *dA, struct d_strmat *sA)
-	{
-	sA->dA = dA;
-	return;
-	}
-
-
-
-// cast a vector into a vector structure
-void d_cast_vec2vecmat(double *a, struct d_strvec *sa)
-	{
-	sa->pa = a;
-	return;
-	}
-
-
-
-// insert element into strmat
-void dgein1_libstr(double a, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	pA[0] = a;
-	return;
-	}
-
-
-
-// extract element from strmat
-double dgeex1_libstr(struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	return pA[0];
-	}
-
-
-
-// insert element into strvec
-void dvecin1_libstr(double a, struct d_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	double *x = sx->pa + xi;
-	x[0] = a;
-	return;
-	}
-
-
-
-// extract element from strvec
-double dvecex1_libstr(struct d_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	double *x = sx->pa + xi;
-	return x[0];
-	}
-
-
-
-// set all elements of a strmat to a value
-void dgese_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai%bs + ai/bs*bs*sda + aj*bs;
-	int m0 = m<(bs-ai%bs)%bs ? m : (bs-ai%bs)%bs;
-	int ii, jj;
-	if(m0>0)
-		{
-		for(ii=0; ii<m0; ii++)
-			{
-			for(jj=0; jj<n; jj++)
-				{
-				pA[jj*bs] = alpha;
-				}
-			pA += 1;
-			}
-		pA += bs*(sda-1);
-		m -= m0;
-		}
-	for(ii=0; ii<m-3; ii+=4)
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			pA[0+jj*bs] = alpha;
-			pA[1+jj*bs] = alpha;
-			pA[2+jj*bs] = alpha;
-			pA[3+jj*bs] = alpha;
-			}
-		pA += bs*sda;
-		}
-	for( ; ii<m; ii++)
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			pA[jj*bs] = alpha;
-			}
-		pA += 1;
-		}
-	return;
-	}
-
-
-
-// set all elements of a strvec to a value
-void dvecse_libstr(int m, double alpha, struct d_strvec *sx, int xi)
-	{
-	double *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		x[ii] = alpha;
-	return;
-	}
-
-
-
-// insert a vector into diagonal
-void ddiain_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	double *x = sx->pa + xi;
-	int offsetA = ai%bs;
-
-	int kna = (bs-offsetA%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pA[ll+bs*ll] = alpha*x[ll];
-			}
-		pA += kna + bs*(sda-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pA[jj*sda+(jj+0)*bs+0] = alpha*x[jj+0];
-		pA[jj*sda+(jj+1)*bs+1] = alpha*x[jj+1];
-		pA[jj*sda+(jj+2)*bs+2] = alpha*x[jj+2];
-		pA[jj*sda+(jj+3)*bs+3] = alpha*x[jj+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pA[jj*sda+(jj+ll)*bs+ll] = alpha*x[jj+ll];
-		}
-	return;
-	}
-
-
-
-// add scalar to diagonal
-void ddiare_libstr(int kmax, double alpha, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int offsetA = ai%bs;
-
-	int kna = (bs-offsetA%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pA[ll+bs*ll] += alpha;
-			}
-		pA += kna + bs*(sda-1) + kna*bs;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pA[jj*sda+(jj+0)*bs+0] += alpha;
-		pA[jj*sda+(jj+1)*bs+1] += alpha;
-		pA[jj*sda+(jj+2)*bs+2] += alpha;
-		pA[jj*sda+(jj+3)*bs+3] += alpha;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pA[jj*sda+(jj+ll)*bs+ll] += alpha;
-		}
-	return;
-	}
-
-
-
-// swap two rows of a matrix struct
-void drowsw_libstr(int kmax, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	double *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	drowsw_lib(kmax, pA, pC);
-	return;
-	}
-
-
-
-// permute the rows of a matrix struct
-void drowpe_libstr(int kmax, int *ipiv, struct d_strmat *sA)
-	{
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		{
-		if(ipiv[ii]!=ii)
-			drowsw_libstr(sA->n, sA, ii, 0, sA, ipiv[ii], 0);
-		}
-	return;
-	}
-
-
-// extract a row int a vector
-void drowex_libstr(int kmax, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	double *x = sx->pa + xi;
-	drowex_lib(kmax, alpha, pA, x);
-	return;
-	}
-
-
-
-// insert a vector into a row
-void drowin_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	double *x = sx->pa + xi;
-	drowin_lib(kmax, alpha, x, pA);
-	return;
-	}
-
-
-
-// add a vector to a row
-void drowad_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	double *x = sx->pa + xi;
-	drowad_lib(kmax, alpha, x, pA);
-	return;
-	}
-
-
-
-// extract vector from column
-void dcolex_libstr(int kmax, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	double *x = sx->pa + xi;
-	dcolex_lib(kmax, ai%bs, pA, sda, x);
-	return;
-	}
-
-
-
-
-// insert as vector as a column
-void dcolin_libstr(int kmax, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	double *x = sx->pa + xi;
-	dcolin_lib(kmax, x, ai%bs, pA, sda);
-	return;
-	}
-
-
-
-
-// swap two cols of a matrix struct
-void dcolsw_libstr(int kmax, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	double *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	dcolsw_lib(kmax, ai%bs, pA, sda, ci%bs, pC, sdc);
-	return;
-	}
-
-
-
-// permute the cols of a matrix struct
-void dcolpe_libstr(int kmax, int *ipiv, struct d_strmat *sA)
-	{
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		{
-		if(ipiv[ii]!=ii)
-			dcolsw_libstr(sA->m, sA, 0, ii, sA, 0, ipiv[ii]);
-		}
-	return;
-	}
-
-
-
-// copy a generic strmat into a generic strmat
-void dgecp_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	double *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	dgecp_lib(m, n, 1.0, ai%bs, pA, sda, ci%bs, pC, sdc);
-	return;
-	}
-
-
-
-// scale a generic strmat
-void dgesc_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	dgecp_lib(m, n, alpha, ai%bs, pA, sda, ai%bs, pA, sda);
-	return;
-	}
-
-
-
-// copy a strvec into a strvec
-void dveccp_libstr(int m, struct d_strvec *sa, int ai, struct d_strvec *sc, int ci)
-	{
-	double *pa = sa->pa + ai;
-	double *pc = sc->pa + ci;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pc[ii+0] = pa[ii+0];
-		pc[ii+1] = pa[ii+1];
-		pc[ii+2] = pa[ii+2];
-		pc[ii+3] = pa[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		pc[ii+0] = pa[ii+0];
-		}
-	return;
-	}
-
-
-
-// scale a strvec
-void dvecsc_libstr(int m, double alpha, struct d_strvec *sa, int ai)
-	{
-	double *pa = sa->pa + ai;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pa[ii+0] *= alpha;
-		pa[ii+1] *= alpha;
-		pa[ii+2] *= alpha;
-		pa[ii+3] *= alpha;
-		}
-	for(; ii<m; ii++)
-		{
-		pa[ii+0] *= alpha;
-		}
-	return;
-	}
-
-
-
-// copy a lower triangular strmat into a lower triangular strmat
-void dtrcp_l_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	double *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	dtrcp_l_lib(m, 1.0, ai%bs, pA, sda, ci%bs, pC, sdc);
-	return;
-	}
-
-
-
-// scale and add a generic strmat into a generic strmat
-void dgead_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	double *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	dgead_lib(m, n, alpha, ai%bs, pA, sda, ci%bs, pC, sdc);
-	return;
-	}
-
-
-
-// copy and transpose a generic strmat into a generic strmat
-void dgetr_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	double *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	dgetr_lib(m, n, 1.0, ai%bs, pA, sda, ci%bs, pC, sdc); // TODO remove alpha !!!
-	return;
-	}
-
-
-
-// copy and transpose a lower triangular strmat into an upper triangular strmat
-void dtrtr_l_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	double *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	dtrtr_l_lib(m, 1.0, ai%bs, pA, sda, ci%bs, pC, sdc); // TODO remove alpha !!!
-	return;
-	}
-
-
-
-// copy and transpose an upper triangular strmat into a lower triangular strmat
-void dtrtr_u_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	double *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	dtrtr_u_lib(m, 1.0, ai%bs, pA, sda, ci%bs, pC, sdc); // TODO remove alpha !!!
-	return;
-	}
-
-
-
-// insert a strvec to diagonal of strmat, sparse formulation
-void ddiain_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strmat *sD, int di, int dj)
-	{
-	const int bs = 4;
-	double *x = sx->pa + xi;
-	int sdd = sD->cn;
-	double *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs] = alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// extract a vector from diagonal
-void ddiaex_libstr(int kmax, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	double *x = sx->pa + xi;
-	int offsetA = ai%bs;
-
-	int kna = (bs-offsetA%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			x[ll] = alpha*pA[ll+bs*ll];
-			}
-		pA += kna + bs*(sda-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		x[jj+0] = alpha*pA[jj*sda+(jj+0)*bs+0];
-		x[jj+1] = alpha*pA[jj*sda+(jj+1)*bs+1];
-		x[jj+2] = alpha*pA[jj*sda+(jj+2)*bs+2];
-		x[jj+3] = alpha*pA[jj*sda+(jj+3)*bs+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		x[jj+ll] = alpha*pA[jj*sda+(jj+ll)*bs+ll];
-		}
-	return;
-	}
-
-
-
-// extract the diagonal of a strmat to a strvec, sparse formulation
-void ddiaex_sp_libstr(int kmax, double alpha, int *idx, struct d_strmat *sD, int di, int dj, struct d_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	double *x = sx->pa + xi;
-	int sdd = sD->cn;
-	double *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		x[jj] = alpha * pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs];
-		}
-	return;
-	}
-
-
-
-// add a vector to diagonal
-void ddiaad_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	double *x = sx->pa + xi;
-	int offsetA = ai%bs;
-
-	int kna = (bs-offsetA%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pA[ll+bs*ll] += alpha*x[ll];
-			}
-		pA += kna + bs*(sda-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pA[jj*sda+(jj+0)*bs+0] += alpha*x[jj+0];
-		pA[jj*sda+(jj+1)*bs+1] += alpha*x[jj+1];
-		pA[jj*sda+(jj+2)*bs+2] += alpha*x[jj+2];
-		pA[jj*sda+(jj+3)*bs+3] += alpha*x[jj+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pA[jj*sda+(jj+ll)*bs+ll] += alpha*x[jj+ll];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to diagonal of strmat, sparse formulation
-void ddiaad_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strmat *sD, int di, int dj)
-	{
-	const int bs = 4;
-	double *x = sx->pa + xi;
-	int sdd = sD->cn;
-	double *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs] += alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to another strvec and insert to diagonal of strmat, sparse formulation
-void ddiaadin_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strvec *sy, int yi, int *idx, struct d_strmat *sD, int di, int dj)
-	{
-	const int bs = 4;
-	double *x = sx->pa + xi;
-	double *y = sy->pa + yi;
-	int sdd = sD->cn;
-	double *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs] = y[jj] + alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to row of strmat, sparse formulation
-void drowad_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strmat *sD, int di, int dj)
-	{
-	const int bs = 4;
-	double *x = sx->pa + xi;
-	int sdd = sD->cn;
-	double *pD = sD->pA + di/bs*bs*sdd + di%bs + dj*bs;
-	drowad_libsp(kmax, idx, alpha, x, pD);
-	return;
-	}
-
-
-
-void dvecad_sp_libstr(int m, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strvec *sz, int zi)
-	{
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[idx[ii]] += alpha * x[ii];
-	return;
-	}
-
-
-
-void dvecin_sp_libstr(int m, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strvec *sz, int zi)
-	{
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[idx[ii]] = alpha * x[ii];
-	return;
-	}
-
-
-
-void dvecex_sp_libstr(int m, double alpha, int *idx, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[ii] = alpha * x[idx[ii]];
-	return;
-	}
-
-
-
-void dveccl_libstr(int m, struct d_strvec *sxm, int xim, struct d_strvec *sx, int xi, struct d_strvec *sxp, int xip, struct d_strvec *sz, int zi)
-	{
-
-	double *xm = sxm->pa + xim;
-	double *x  = sx->pa + xi;
-	double *xp = sxp->pa + xip;
-	double *z  = sz->pa + zi;
-
-	int ii;
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	double d0;
-
-	__m256d
-		xm0, x0, xp0, z0, tmp0, tmp1, ones, mones, mask1, mask2;
-
-	ones = _mm256_set_pd( 1.0, 1.0, 1.0, 1.0 );
-	mones = _mm256_set_pd( -1.0, -1.0, -1.0, -1.0 );
-	mask1 = _mm256_set_pd( 3.5, 2.5, 1.5, 0.5 );
-
-	for(ii=0; ii<m-3; ii+=4)
-		{
-		x0  = _mm256_loadu_pd( &x[ii] );
-		xp0 = _mm256_loadu_pd( &xp[ii] );
-		xm0 = _mm256_loadu_pd( &xm[ii] );
-		tmp0 = _mm256_cmp_pd( xp0, x0, 0x2 );
-		tmp1 = _mm256_cmp_pd( x0, xm0, 0x2 );
-		z0 = _mm256_blendv_pd( x0, xp0, tmp0 );
-		z0 = _mm256_blendv_pd( z0, xm0, tmp1 );
-		_mm256_storeu_pd( &z[ii], z0 );
-		}
-	if(ii<m)
-		{
-		d0 = (double) m-ii;
-		mask2 = _mm256_broadcast_sd( &d0 );
-		mask2 = _mm256_sub_pd( mask1, mask2 );
-		x0  = _mm256_loadu_pd( &x[ii] );
-		xp0 = _mm256_loadu_pd( &xp[ii] );
-		xm0 = _mm256_loadu_pd( &xm[ii] );
-		tmp0 = _mm256_cmp_pd( xp0, x0, 0x2 );
-		tmp1 = _mm256_cmp_pd( x0, xm0, 0x2 );
-		z0 = _mm256_blendv_pd( x0, xp0, tmp0 );
-		z0 = _mm256_blendv_pd( z0, xm0, tmp1 );
-		_mm256_maskstore_pd( &z[ii], _mm256_castpd_si256( mask2 ), z0 );
-		}
-#else
-	for(ii=0; ii<m; ii++)
-		{
-		if(x[ii]>=xp[ii])
-			{
-			z[ii] = xp[ii];
-			}
-		else if(x[ii]<=xm[ii])
-			{
-			z[ii] = xm[ii];
-			}
-		else
-			{
-			z[ii] = x[ii];
-			}
-		}
-#endif
-
-	return;
-
-	}
-
-
-
-void dveccl_mask_libstr(int m, struct d_strvec *sxm, int xim, struct d_strvec *sx, int xi, struct d_strvec *sxp, int xip, struct d_strvec *sz, int zi, struct d_strvec *sm, int mi)
-	{
-
-	double *xm = sxm->pa + xim;
-	double *x  = sx->pa + xi;
-	double *xp = sxp->pa + xip;
-	double *z  = sz->pa + zi;
-	double *mask  = sm->pa + mi;
-
-	int ii;
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	double d0;
-
-	__m256d
-		xm0, x0, xp0, z0, mask0, tmp0, tmp1, ones, mones, mask1, mask2;
-
-	ones = _mm256_set_pd( 1.0, 1.0, 1.0, 1.0 );
-	mones = _mm256_set_pd( -1.0, -1.0, -1.0, -1.0 );
-	mask1 = _mm256_set_pd( 3.5, 2.5, 1.5, 0.5 );
-
-	for(ii=0; ii<m-3; ii+=4)
-		{
-		mask0 = _mm256_setzero_pd();
-		x0  = _mm256_loadu_pd( &x[ii] );
-		xp0 = _mm256_loadu_pd( &xp[ii] );
-		xm0 = _mm256_loadu_pd( &xm[ii] );
-		tmp0 = _mm256_cmp_pd( xp0, x0, 0x2 );
-		tmp1 = _mm256_cmp_pd( x0, xm0, 0x2 );
-		z0 = _mm256_blendv_pd( x0, xp0, tmp0 );
-		z0 = _mm256_blendv_pd( z0, xm0, tmp1 );
-		mask0 = _mm256_blendv_pd( mask0, ones, tmp0 );
-		mask0 = _mm256_blendv_pd( mask0, mones, tmp1 );
-		_mm256_storeu_pd( &z[ii], z0 );
-		_mm256_storeu_pd( &mask[ii], mask0 );
-		}
-	if(ii<m)
-		{
-		d0 = (double) m-ii;
-		mask2 = _mm256_broadcast_sd( &d0 );
-		mask2 = _mm256_sub_pd( mask1, mask2 );
-		mask0 = _mm256_setzero_pd();
-		x0  = _mm256_loadu_pd( &x[ii] );
-		xp0 = _mm256_loadu_pd( &xp[ii] );
-		xm0 = _mm256_loadu_pd( &xm[ii] );
-		tmp0 = _mm256_cmp_pd( xp0, x0, 0x2 );
-		tmp1 = _mm256_cmp_pd( x0, xm0, 0x2 );
-		z0 = _mm256_blendv_pd( x0, xp0, tmp0 );
-		z0 = _mm256_blendv_pd( z0, xm0, tmp1 );
-		mask0 = _mm256_blendv_pd( mask0, ones, tmp0 );
-		mask0 = _mm256_blendv_pd( mask0, mones, tmp1 );
-		_mm256_maskstore_pd( &z[ii], _mm256_castpd_si256( mask2 ), z0 );
-		_mm256_maskstore_pd( &mask[ii], _mm256_castpd_si256( mask2 ), mask0 );
-		}
-#else
-	for(ii=0; ii<m; ii++)
-		{
-		if(x[ii]>=xp[ii])
-			{
-			z[ii] = xp[ii];
-			mask[ii] = 1.0;
-			}
-		else if(x[ii]<=xm[ii])
-			{
-			z[ii] = xm[ii];
-			mask[ii] = -1.0;
-			}
-		else
-			{
-			z[ii] = x[ii];
-			mask[ii] = 0.0;
-			}
-		}
-#endif
-
-	return;
-
-	}
-
-
-
-void dvecze_libstr(int m, struct d_strvec *sm, int mi, struct d_strvec *sv, int vi, struct d_strvec *se, int ei)
-	{
-	double *mask = sm->pa + mi;
-	double *v = sv->pa + vi;
-	double *e = se->pa + ei;
-
-	int ii;
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	double d0;
-
-	__m256d
-		mask0, mask1, mask2, mask3, fives, zeros, e0, v0;
-
-	fives = _mm256_set_pd( 0.5, 0.5, 0.5, 0.5 );
-	zeros = _mm256_setzero_pd();
-	mask3 = _mm256_set_pd( 3.5, 2.5, 1.5, 0.5 );
-
-	for(ii=0; ii<m-3; ii+=4)
-		{
-		v0 = _mm256_loadu_pd( &v[ii] );
-		mask0 = _mm256_loadu_pd( &mask[ii] );
-		mask1 = mask0;
-		mask0 = _mm256_sub_pd( mask0, fives);
-		mask1 = _mm256_add_pd( mask1, fives);
-		mask0 = _mm256_xor_pd( mask0, mask1);
-		e0 = _mm256_blendv_pd( zeros, v0, mask0 );
-		_mm256_storeu_pd( &e[ii], e0 );
-		}
-	if(ii<m)
-		{
-		d0 = (double) m-ii;
-		mask2 = _mm256_broadcast_sd( &d0 );
-		mask2 = _mm256_sub_pd( mask3, mask2 );
-		v0 = _mm256_loadu_pd( &v[ii] );
-		mask0 = _mm256_loadu_pd( &mask[ii] );
-		mask1 = mask0;
-		mask0 = _mm256_sub_pd( mask0, fives);
-		mask1 = _mm256_add_pd( mask1, fives);
-		mask0 = _mm256_xor_pd( mask0, mask1);
-		e0 = _mm256_blendv_pd( zeros, v0, mask0 );
-		_mm256_maskstore_pd( &e[ii], _mm256_castpd_si256( mask2 ), e0 );
-		}
-#else
-	for(ii=0; ii<m; ii++)
-		{
-		if(mask[ii]==0)
-			{
-			e[ii] = v[ii];
-			}
-		else
-			{
-			e[ii] = 0;
-			}
-		}
-#endif
-
-	}
-
-
-
-void dvecnrm_inf_libstr(int m, struct d_strvec *sx, int xi, double *ptr_norm)
-	{
-	int ii;
-	double *x = sx->pa + xi;
-	double norm = 0.0;
-	for(ii=0; ii<m; ii++)
-		norm = fmax(norm, fabs(x[ii]));
-	*ptr_norm = norm;
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
diff --git a/third_party/blasfeo/auxiliary/i_aux_ext_dep_lib.c b/third_party/blasfeo/auxiliary/i_aux_ext_dep_lib.c
deleted file mode 100644
index 1ca2292..0000000
--- a/third_party/blasfeo/auxiliary/i_aux_ext_dep_lib.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#if 0
-#include <malloc.h>
-#endif
-
-#if ! defined(OS_WINDOWS)
-int posix_memalign(void **memptr, size_t alignment, size_t size);
-#endif
-
-
-
-/* creates a zero matrix aligned */
-void int_zeros(int **pA, int row, int col)
-	{
-	void *temp = malloc((row*col)*sizeof(int));
-	*pA = temp;
-	int *A = *pA;
-	int i;
-	for(i=0; i<row*col; i++) A[i] = 0;
-	}
-
-
-
-/* creates a zero matrix aligned to a cache line */
-void int_zeros_align(int **pA, int row, int col)
-	{
-#if defined(OS_WINDOWS)
-	*pA = (int *) _aligned_malloc( (row*col)*sizeof(int), 64 );
-#else
-	void *temp;
-	int err = posix_memalign(&temp, 64, (row*col)*sizeof(int));
-	if(err!=0)
-		{
-		printf("Memory allocation error");
-		exit(1);
-		}
-	*pA = temp;
-#endif
-	int *A = *pA;
-	int i;
-	for(i=0; i<row*col; i++) A[i] = 0.0;
-	}
-
-
-
-/* frees matrix */
-void int_free(int *pA)
-	{
-	free( pA );
-	}
-
-
-
-/* frees aligned matrix */
-void int_free_align(int *pA)
-	{
-#if defined(OS_WINDOWS)
-	_aligned_free( pA );
-#else
-	free( pA );
-#endif
-	}
-
-
-
-/* prints a matrix in column-major format */
-void int_print_mat(int row, int col, int *A, int lda)
-	{
-	int i, j;
-	for(i=0; i<row; i++)
-		{
-		for(j=0; j<col; j++)
-			{
-			printf("%d ", A[i+lda*j]);
-			}
-		printf("\n");
-		}
-	printf("\n");
-	}	
-
-
-
diff --git a/third_party/blasfeo/auxiliary/m_aux_lib.c b/third_party/blasfeo/auxiliary/m_aux_lib.c
deleted file mode 100644
index 30cb333..0000000
--- a/third_party/blasfeo/auxiliary/m_aux_lib.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#include "../include/blasfeo_common.h"
-
-
-
-#if defined(LA_REFERENCE) | defined(LA_BLAS)
-
-
-
-
-void m_cvt_d2s_strvec(int m, struct d_strvec *vd, int vdi, struct s_strvec *vs, int vsi)
-	{
-	double *pd = vd->pa+vdi;
-	float *ps = vs->pa+vsi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		ps[ii] = (float) pd[ii];
-		}
-	return;
-	}
-
-
-
-void m_cvt_s2d_strvec(int m, struct s_strvec *vs, int vsi, struct d_strvec *vd, int vdi)
-	{
-	double *pd = vd->pa+vdi;
-	float *ps = vs->pa+vsi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		pd[ii] = (double) ps[ii];
-		}
-	return;
-	}
-
-
-
-void m_cvt_d2s_strmat(int m, int n, struct d_strmat *Md, int mid, int nid, struct s_strmat *Ms, int mis, int nis)
-	{
-	int lda = Md->m;
-	int ldb = Ms->m;
-	double *pA = Md->pA+mid+nid*lda;
-	float *pB = Ms->pA+mis+nis*ldb;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		for(ii=0; ii<m; ii++)
-			{
-			pB[ii+jj*ldb] = (float) pA[ii+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-void m_cvt_s2d_strmat(int m, int n, struct s_strmat *Ms, int mis, int nis, struct d_strmat *Md, int mid, int nid)
-	{
-	int lda = Ms->m;
-	int ldb = Md->m;
-	float *pA = Ms->pA+mis+nis*lda;
-	double *pB = Md->pA+mid+nid*ldb;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		for(ii=0; ii<m; ii++)
-			{
-			pB[ii+jj*ldb] = (double) pA[ii+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
diff --git a/third_party/blasfeo/auxiliary/m_aux_lib44.c b/third_party/blasfeo/auxiliary/m_aux_lib44.c
deleted file mode 100644
index a17d545..0000000
--- a/third_party/blasfeo/auxiliary/m_aux_lib44.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#include "../include/blasfeo_common.h"
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-
-void m_cvt_d2s_strvec(int m, struct d_strvec *vd, int vdi, struct s_strvec *vs, int vsi)
-	{
-	double *pd = vd->pa+vdi;
-	float *ps = vs->pa+vsi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		ps[ii] = (float) pd[ii];
-		}
-	return;
-	}
-
-
-
-void m_cvt_s2d_strvec(int m, struct s_strvec *vs, int vsi, struct d_strvec *vd, int vdi)
-	{
-	double *pd = vd->pa+vdi;
-	float *ps = vs->pa+vsi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		pd[ii] = (double) ps[ii];
-		}
-	return;
-	}
-
-
-
-void m_cvt_d2s_strmat(int m, int n, struct d_strmat *Md, int mid, int nid, struct s_strmat *Ms, int mis, int nis)
-	{
-	printf("\nm_cvt_d2s_strmat: feature not implmeneted yet\n\n");
-	exit(1);
-	return;
-	}
-
-
-
-void m_cvt_s2d_strmat(int m, int n, struct s_strmat *Ms, int mis, int nis, struct d_strmat *Md, int mid, int nid)
-	{
-	printf("\nm_cvt_s2d_strmat: feature not implmeneted yet\n\n");
-	exit(1);
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
diff --git a/third_party/blasfeo/auxiliary/m_aux_lib48.c b/third_party/blasfeo/auxiliary/m_aux_lib48.c
deleted file mode 100644
index e9fdcd2..0000000
--- a/third_party/blasfeo/auxiliary/m_aux_lib48.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#include "../include/blasfeo_common.h"
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-
-void m_cvt_d2s_strvec(int m, struct d_strvec *vd, int vdi, struct s_strvec *vs, int vsi)
-	{
-	double *pd = vd->pa+vdi;
-	float *ps = vs->pa+vsi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		ps[ii] = (float) pd[ii];
-		}
-	return;
-	}
-
-
-
-void m_cvt_s2d_strvec(int m, struct s_strvec *vs, int vsi, struct d_strvec *vd, int vdi)
-	{
-	double *pd = vd->pa+vdi;
-	float *ps = vs->pa+vsi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		pd[ii] = (double) ps[ii];
-		}
-	return;
-	}
-
-
-
-void m_cvt_d2s_strmat(int m, int n, struct d_strmat *Md, int mid, int nid, struct s_strmat *Ms, int mis, int nis)
-	{
-//	printf("\nm_cvt_d2s_strmat: feature not implmeneted yet\n\n");
-//	exit(1);
-	if(mid!=0 | mis!=0)
-		{
-		printf("\nm_cvt_d2s_strmat: feature not implmeneted yet: mid=%d, mis=%d\n\n", mid, mis);
-		exit(1);
-		}
-	const int psd = 4;
-	const int pss = 8;
-	const int sdd = Md->cn;
-	double *D0 = Md->pA + nid*psd;
-	double *D1;
-	const int sds = Ms->cn;
-	float *S = Ms->pA + nis*pss;
-	int ii, jj, ll;
-	for(ii=0; ii<m-7; ii+=8)
-		{
-		D1 = D0 + psd*sdd;
-		for(jj=0; jj<n; jj++)
-			{
-			S[0+jj*pss] = (float) D0[0+jj*psd];
-			S[1+jj*pss] = (float) D0[1+jj*psd];
-			S[2+jj*pss] = (float) D0[2+jj*psd];
-			S[3+jj*pss] = (float) D0[3+jj*psd];
-			S[4+jj*pss] = (float) D1[0+jj*psd];
-			S[5+jj*pss] = (float) D1[1+jj*psd];
-			S[6+jj*pss] = (float) D1[2+jj*psd];
-			S[7+jj*pss] = (float) D1[3+jj*psd];
-			}
-		D0 += 8*sdd;
-		S  += 8*sds;
-		}
-	if(m-ii>0)
-		{
-		if(m-ii<4)
-			{
-			for(jj=0; jj<n; jj++)
-				{
-				for(ll=0; ll<m-ii; ll++)
-					{
-					S[ll+jj*pss] = (float) D0[ll+jj*psd];
-					}
-				}
-			return;
-			}
-		else
-			{
-			D1 = D0 + psd*sdd;
-			for(jj=0; jj<n; jj++)
-				{
-				S[0+jj*pss] = (float) D0[0+jj*psd];
-				S[1+jj*pss] = (float) D0[1+jj*psd];
-				S[2+jj*pss] = (float) D0[2+jj*psd];
-				S[3+jj*pss] = (float) D0[3+jj*psd];
-				for(ll=0; ll<m-ii-4; ll++)
-					{
-					S[4+ll+jj*pss] = (float) D1[ll+jj*psd];
-					}
-				}
-			}
-		}
-	return;
-	}
-
-
-
-void m_cvt_s2d_strmat(int m, int n, struct s_strmat *Ms, int mis, int nis, struct d_strmat *Md, int mid, int nid)
-	{
-	printf("\nm_cvt_s2d_strmat: feature not implmeneted yet\n\n");
-	exit(1);
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
diff --git a/third_party/blasfeo/auxiliary/s_aux_ext_dep_lib.c b/third_party/blasfeo/auxiliary/s_aux_ext_dep_lib.c
deleted file mode 100644
index 85f7ebc..0000000
--- a/third_party/blasfeo/auxiliary/s_aux_ext_dep_lib.c
+++ /dev/null
@@ -1,633 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#if 0
-#include <malloc.h>
-#endif
-
-#include "../include/blasfeo_common.h"
-
-
-
-#if ! defined(OS_WINDOWS)
-int posix_memalign(void **memptr, size_t alignment, size_t size);
-#endif
-
-
-
-/* creates a zero matrix */
-void s_zeros(float **pA, int row, int col)
-	{
-	*pA = malloc((row*col)*sizeof(float));
-	float *A = *pA;
-	int i;
-	for(i=0; i<row*col; i++) A[i] = 0.0;
-	}
-
-
-
-/* creates a zero matrix aligned to a cache line */
-void s_zeros_align(float **pA, int row, int col)
-	{
-#if defined(OS_WINDOWS)
-	*pA = (float *) _aligned_malloc( (row*col)*sizeof(float), 64 );
-#else
-	void *temp;
-	int err = posix_memalign(&temp, 64, (row*col)*sizeof(float));
-	if(err!=0)
-		{
-		printf("Memory allocation error");
-		exit(1);
-		}
-	*pA = temp;
-#endif
-	float *A = *pA;
-	int i;
-	for(i=0; i<row*col; i++) A[i] = 0.0;
-	}
-
-
-
-/* frees matrix */
-void s_free(float *pA)
-	{
-	free( pA );
-	}
-
-
-
-/* frees aligned matrix */
-void s_free_align(float *pA)
-	{
-#if defined(OS_WINDOWS)
-	_aligned_free( pA );
-#else
-	free( pA );
-#endif
-	}
-
-
-
-/* prints a matrix in column-major format */
-void s_print_mat(int m, int n, float *A, int lda)
-	{
-	int i, j;
-	for(i=0; i<m; i++)
-		{
-		for(j=0; j<n; j++)
-			{
-			printf("%9.5f ", A[i+lda*j]);
-			}
-		printf("\n");
-		}
-	printf("\n");
-	}	
-
-
-
-/* prints the transposed of a matrix in column-major format */
-void s_print_tran_mat(int row, int col, float *A, int lda)
-	{
-	int i, j;
-	for(j=0; j<col; j++)
-		{
-		for(i=0; i<row; i++)
-			{
-			printf("%9.5f ", A[i+lda*j]);
-			}
-		printf("\n");
-		}
-	printf("\n");
-	}	
-
-
-
-/* prints a matrix in column-major format */
-void s_print_to_file_mat(FILE *file, int row, int col, float *A, int lda)
-	{
-	int i, j;
-	for(i=0; i<row; i++)
-		{
-		for(j=0; j<col; j++)
-			{
-			fprintf(file, "%9.5f ", A[i+lda*j]);
-			}
-		fprintf(file, "\n");
-		}
-	fprintf(file, "\n");
-	}	
-
-
-
-/* prints the transposed of a matrix in column-major format */
-void s_print_tran_to_file_mat(FILE *file, int row, int col, float *A, int lda)
-	{
-	int i, j;
-	for(j=0; j<col; j++)
-		{
-		for(i=0; i<row; i++)
-			{
-			fprintf(file, "%9.5f ", A[i+lda*j]);
-			}
-		fprintf(file, "\n");
-		}
-	fprintf(file, "\n");
-	}	
-
-
-
-/* prints a matrix in column-major format (exponential notation) */
-void s_print_e_mat(int m, int n, float *A, int lda)
-	{
-	int i, j;
-	for(i=0; i<m; i++)
-		{
-		for(j=0; j<n; j++)
-			{
-			printf("%e\t", A[i+lda*j]);
-			}
-		printf("\n");
-		}
-	printf("\n");
-	}	
-
-
-
-/* prints the transposed of a matrix in column-major format (exponential notation) */
-void s_print_e_tran_mat(int row, int col, float *A, int lda)
-	{
-	int i, j;
-	for(j=0; j<col; j++)
-		{
-		for(i=0; i<row; i++)
-			{
-			printf("%e\t", A[i+lda*j]);
-			}
-		printf("\n");
-		}
-	printf("\n");
-	}	
-
-
-
-/****************************
-* new interface
-****************************/
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-#include "../include/blasfeo_block_size.h"
-
-
-
-// create a matrix structure for a matrix of size m*n by dynamically allocating the memory
-void s_allocate_strmat(int m, int n, struct s_strmat *sA)
-	{
-	const int bs = S_PS;
-	int nc = S_NC;
-	int al = bs*nc;
-	sA->m = m;
-	sA->n = n;
-	int pm = (m+bs-1)/bs*bs;
-	int cn = (n+nc-1)/nc*nc;
-	sA->pm = pm;
-	sA->cn = cn;
-	s_zeros_align(&(sA->pA), sA->pm, sA->cn);
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	s_zeros_align(&(sA->dA), tmp, 1);
-	sA->use_dA = 0;
-	sA->memory_size = (pm*cn+tmp)*sizeof(float);
-	return;
-	}
-
-
-
-// free memory of a matrix structure
-void s_free_strmat(struct s_strmat *sA)
-	{
-	s_free_align(sA->pA);
-	s_free_align(sA->dA);
-	return;
-	}
-
-
-
-// create a vector structure for a vector of size m by dynamically allocating the memory
-void s_allocate_strvec(int m, struct s_strvec *sa)
-	{
-	const int bs = S_PS;
-//	int nc = S_NC;
-//	int al = bs*nc;
-	sa->m = m;
-	int pm = (m+bs-1)/bs*bs;
-	sa->pm = pm;
-	s_zeros_align(&(sa->pa), sa->pm, 1);
-	sa->memory_size = pm*sizeof(float);
-	return;
-	}
-
-
-
-// free memory of a matrix structure
-void s_free_strvec(struct s_strvec *sa)
-	{
-	s_free_align(sa->pa);
-	return;
-	}
-
-
-
-// print a matrix structure
-void s_print_strmat(int m, int n, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = S_PS;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int ii, i, j, tmp;
-	ii = 0;
-	if(ai%bs>0)
-		{
-		tmp = bs-ai%bs;
-		tmp = m<tmp ? m : tmp;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%9.5f ", pA[i+bs*j]);
-				}
-			printf("\n");
-			}
-		pA += tmp + bs*(sda-1);
-		m -= tmp;
-		}
-	for( ; ii<m-(bs-1); ii+=bs)
-		{
-		for(i=0; i<bs; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%9.5f ", pA[i+bs*j+sda*ii]);
-				}
-			printf("\n");
-			}
-		}
-	if(ii<m)
-		{
-		tmp = m-ii;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%9.5f ", pA[i+bs*j+sda*ii]);
-				}
-			printf("\n");
-			}
-		}
-	printf("\n");
-	return;
-	}
-
-
-
-// print a vector structure
-void s_print_strvec(int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_mat(m, 1, pa, m);
-	return;
-	}
-
-
-
-// print the transposed of a vector structure
-void s_print_tran_strvec(int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_mat(1, m, pa, 1);
-	return;
-	}
-
-
-
-// print a matrix structure
-void s_print_to_file_strmat(FILE * file, int m, int n, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = S_PS;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int ii, i, j, tmp;
-	ii = 0;
-	if(ai%bs>0)
-		{
-		tmp = bs-ai%bs;
-		tmp = m<tmp ? m : tmp;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				fprintf(file, "%9.5f ", pA[i+bs*j]);
-				}
-			fprintf(file, "\n");
-			}
-		pA += tmp + bs*(sda-1);
-		m -= tmp;
-		}
-	for( ; ii<m-(bs-1); ii+=bs)
-		{
-		for(i=0; i<bs; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				fprintf(file, "%9.5f ", pA[i+bs*j+sda*ii]);
-				}
-			fprintf(file, "\n");
-			}
-		}
-	if(ii<m)
-		{
-		tmp = m-ii;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				fprintf(file, "%9.5f ", pA[i+bs*j+sda*ii]);
-				}
-			fprintf(file, "\n");
-			}
-		}
-	fprintf(file, "\n");
-	return;
-	}
-
-
-
-// print a vector structure
-void s_print_to_file_strvec(FILE * file, int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_to_file_mat(file, m, 1, pa, m);
-	return;
-	}
-
-
-
-// print the transposed of a vector structure
-void s_print_tran_to_file_strvec(FILE * file, int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_to_file_mat(file, 1, m, pa, 1);
-	return;
-	}
-
-
-
-// print a matrix structure
-void s_print_e_strmat(int m, int n, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = S_PS;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int ii, i, j, tmp;
-	ii = 0;
-	if(ai%bs>0)
-		{
-		tmp = bs-ai%bs;
-		tmp = m<tmp ? m : tmp;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%e\t", pA[i+bs*j]);
-				}
-			printf("\n");
-			}
-		pA += tmp + bs*(sda-1);
-		m -= tmp;
-		}
-	for( ; ii<m-(bs-1); ii+=bs)
-		{
-		for(i=0; i<bs; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%e\t", pA[i+bs*j+sda*ii]);
-				}
-			printf("\n");
-			}
-		}
-	if(ii<m)
-		{
-		tmp = m-ii;
-		for(i=0; i<tmp; i++)
-			{
-			for(j=0; j<n; j++)
-				{
-				printf("%e\t", pA[i+bs*j+sda*ii]);
-				}
-			printf("\n");
-			}
-		}
-	printf("\n");
-	return;
-	}
-
-
-
-// print a vector structure
-void s_print_e_strvec(int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_e_mat(m, 1, pa, m);
-	return;
-	}
-
-
-
-// print the transposed of a vector structure
-void s_print_e_tran_strvec(int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_e_mat(1, m, pa, 1);
-	return;
-	}
-
-
-
-#elif defined(LA_BLAS) | defined(LA_REFERENCE)
-
-
-
-// create a matrix structure for a matrix of size m*n
-void s_allocate_strmat(int m, int n, struct s_strmat *sA)
-	{
-	sA->m = m;
-	sA->n = n;
-	s_zeros(&(sA->pA), sA->m, sA->n);
-	int tmp = m<n ? m : n; // al(min(m,n)) // XXX max ???
-	s_zeros(&(sA->dA), tmp, 1);
-	sA->memory_size = (m*n+tmp)*sizeof(float);
-	return;
-	}
-
-
-
-// free memory of a matrix structure
-void s_free_strmat(struct s_strmat *sA)
-	{
-	free(sA->pA);
-	free(sA->dA);
-	return;
-	}
-
-
-
-// create a vector structure for a vector of size m
-void s_allocate_strvec(int m, struct s_strvec *sa)
-	{
-	sa->m = m;
-	s_zeros(&(sa->pa), sa->m, 1);
-	sa->memory_size = m*sizeof(float);
-	return;
-	}
-
-
-
-// free memory of a vector structure
-void s_free_strvec(struct s_strvec *sa)
-	{
-	free(sa->pa);
-	return;
-	}
-
-
-
-// print a matrix structure
-void s_print_strmat(int m, int n, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	s_print_mat(m, n, pA, lda);
-	return;
-	}
-
-
-
-// print a vector structure
-void s_print_strvec(int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_mat(m, 1, pa, m);
-	return;
-	}
-
-
-
-// print and transpose a vector structure
-void s_print_tran_strvec(int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_mat(1, m, pa, 1);
-	return;
-	}
-
-
-
-// print a matrix structure
-void s_print_to_file_strmat(FILE *file, int m, int n, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	s_print_to_file_mat(file, m, n, pA, lda);
-	return;
-	}
-
-
-
-// print a vector structure
-void s_print_to_file_strvec(FILE *file, int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_to_file_mat(file, m, 1, pa, m);
-	return;
-	}
-
-
-
-// print and transpose a vector structure
-void s_print_to_file_tran_strvec(FILE *file, int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_to_file_mat(file, 1, m, pa, 1);
-	return;
-	}
-
-
-
-// print a matrix structure
-void s_print_e_strmat(int m, int n, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	s_print_e_mat(m, n, pA, lda);
-	return;
-	}
-
-
-
-// print a vector structure
-void s_print_e_strvec(int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_e_mat(m, 1, pa, m);
-	return;
-	}
-
-
-
-// print and transpose a vector structure
-void s_print_e_tran_strvec(int m, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	s_print_e_mat(1, m, pa, 1);
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
diff --git a/third_party/blasfeo/auxiliary/s_aux_lib.c b/third_party/blasfeo/auxiliary/s_aux_lib.c
deleted file mode 100644
index 978eb9a..0000000
--- a/third_party/blasfeo/auxiliary/s_aux_lib.c
+++ /dev/null
@@ -1,956 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#include "../include/blasfeo_common.h"
-
-
-
-#if defined(LA_REFERENCE) | defined(LA_BLAS)
-
-
-
-// return memory size (in bytes) needed for a strmat
-int s_size_strmat(int m, int n)
-	{
-	int tmp = m<n ? m : n; // al(min(m,n)) // XXX max ???
-	int size = (m*n+tmp)*sizeof(float);
-	return size;
-	}
-
-
-
-// return memory size (in bytes) needed for the diagonal of a strmat
-int s_size_diag_strmat(int m, int n)
-	{
-	int size = 0;
-	int tmp = m<n ? m : n; // al(min(m,n)) // XXX max ???
-	size = tmp*sizeof(float);
-	return size;
-	}
-
-
-
-// create a matrix structure for a matrix of size m*n by using memory passed by a pointer
-void s_create_strmat(int m, int n, struct s_strmat *sA, void *memory)
-	{
-	sA->m = m;
-	sA->n = n;
-	float *ptr = (float *) memory;
-	sA->pA = ptr;
-	ptr += m*n;
-	int tmp = m<n ? m : n; // al(min(m,n)) // XXX max ???
-	sA->dA = ptr;
-	ptr += tmp;
-	sA->use_dA = 0;
-	sA->memory_size = (m*n+tmp)*sizeof(float);
-	return;
-	}
-
-
-
-// return memory size (in bytes) needed for a strvec
-int s_size_strvec(int m)
-	{
-	int size = m*sizeof(float);
-	return size;
-	}
-
-
-
-// create a matrix structure for a matrix of size m*n by using memory passed by a pointer
-void s_create_strvec(int m, struct s_strvec *sa, void *memory)
-	{
-	sa->m = m;
-	float *ptr = (float *) memory;
-	sa->pa = ptr;
-//	ptr += m * n;
-	sa->memory_size = m*sizeof(float);
-	return;
-	}
-
-
-
-// convert a matrix into a matrix structure
-void s_cvt_mat2strmat(int m, int n, float *A, int lda, struct s_strmat *sA, int ai, int aj)
-	{
-	int ii, jj;
-	int lda2 = sA->m;
-	float *pA = sA->pA + ai + aj*lda2;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pA[ii+0+jj*lda2] = A[ii+0+jj*lda];
-			pA[ii+1+jj*lda2] = A[ii+1+jj*lda];
-			pA[ii+2+jj*lda2] = A[ii+2+jj*lda];
-			pA[ii+3+jj*lda2] = A[ii+3+jj*lda];
-			}
-		for(; ii<m; ii++)
-			{
-			pA[ii+0+jj*lda2] = A[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// convert and transpose a matrix into a matrix structure
-void s_cvt_tran_mat2strmat(int m, int n, float *A, int lda, struct s_strmat *sA, int ai, int aj)
-	{
-	int ii, jj;
-	int lda2 = sA->m;
-	float *pA = sA->pA + ai + aj*lda2;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pA[jj+(ii+0)*lda2] = A[ii+0+jj*lda];
-			pA[jj+(ii+1)*lda2] = A[ii+1+jj*lda];
-			pA[jj+(ii+2)*lda2] = A[ii+2+jj*lda];
-			pA[jj+(ii+3)*lda2] = A[ii+3+jj*lda];
-			}
-		for(; ii<m; ii++)
-			{
-			pA[jj+(ii+0)*lda2] = A[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// convert a vector into a vector structure
-void s_cvt_vec2strvec(int m, float *a, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		pa[ii] = a[ii];
-	return;
-	}
-
-
-
-// convert a matrix structure into a matrix
-void s_cvt_strmat2mat(int m, int n, struct s_strmat *sA, int ai, int aj, float *A, int lda)
-	{
-	int ii, jj;
-	int lda2 = sA->m;
-	float *pA = sA->pA + ai + aj*lda2;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			A[ii+0+jj*lda] = pA[ii+0+jj*lda2];
-			A[ii+1+jj*lda] = pA[ii+1+jj*lda2];
-			A[ii+2+jj*lda] = pA[ii+2+jj*lda2];
-			A[ii+3+jj*lda] = pA[ii+3+jj*lda2];
-			}
-		for(; ii<m; ii++)
-			{
-			A[ii+0+jj*lda] = pA[ii+0+jj*lda2];
-			}
-		}
-	return;
-	}
-
-
-
-// convert and transpose a matrix structure into a matrix
-void s_cvt_tran_strmat2mat(int m, int n, struct s_strmat *sA, int ai, int aj, float *A, int lda)
-	{
-	int ii, jj;
-	int lda2 = sA->m;
-	float *pA = sA->pA + ai + aj*lda2;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			A[jj+(ii+0)*lda] = pA[ii+0+jj*lda2];
-			A[jj+(ii+1)*lda] = pA[ii+1+jj*lda2];
-			A[jj+(ii+2)*lda] = pA[ii+2+jj*lda2];
-			A[jj+(ii+3)*lda] = pA[ii+3+jj*lda2];
-			}
-		for(; ii<m; ii++)
-			{
-			A[jj+(ii+0)*lda] = pA[ii+0+jj*lda2];
-			}
-		}
-	return;
-	}
-
-
-
-// convert a vector structure into a vector
-void s_cvt_strvec2vec(int m, struct s_strvec *sa, int ai, float *a)
-	{
-	float *pa = sa->pa + ai;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		a[ii] = pa[ii];
-	return;
-	}
-
-
-
-// cast a matrix into a matrix structure
-void s_cast_mat2strmat(float *A, struct s_strmat *sA)
-	{
-	sA->pA = A;
-	return;
-	}
-
-
-
-// cast a matrix into the diagonal of a matrix structure
-void s_cast_diag_mat2strmat(float *dA, struct s_strmat *sA)
-	{
-	sA->dA = dA;
-	return;
-	}
-
-
-
-// cast a vector into a vector structure
-void s_cast_vec2vecmat(float *a, struct s_strvec *sa)
-	{
-	sa->pa = a;
-	return;
-	}
-
-
-
-// insert element into strmat
-void sgein1_libstr(float a, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	pA[0] = a;
-	return;
-	}
-
-
-
-// extract element from strmat
-float sgeex1_libstr(struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	return pA[0];
-	}
-
-
-
-// insert element into strvec
-void svecin1_libstr(float a, struct s_strvec *sx, int xi)
-	{
-	float *x = sx->pa + xi;
-	x[0] = a;
-	return;
-	}
-
-
-
-// extract element from strvec
-float svecex1_libstr(struct s_strvec *sx, int xi)
-	{
-	float *x = sx->pa + xi;
-	return x[0];
-	}
-
-
-
-// set all elements of a strmat to a value
-void sgese_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		for(ii=0; ii<m; ii++)
-			{
-			pA[ii+lda*jj] = alpha;
-			}
-		}
-	return;
-	}
-
-
-
-// set all elements of a strvec to a value
-void svecse_libstr(int m, float alpha, struct s_strvec *sx, int xi)
-	{
-	float *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		x[ii] = alpha;
-	return;
-	}
-
-
-
-// extract diagonal to vector
-void sdiaex_libstr(int kmax, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	float *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		x[ii] = alpha*pA[ii*(lda+1)];
-	return;
-	}
-
-
-
-// insert a vector into diagonal
-void sdiain_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	float *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii*(lda+1)] = alpha*x[ii];
-	return;
-	}
-
-
-
-// extract a row into a vector
-void srowex_libstr(int kmax, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	float *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		x[ii] = alpha*pA[ii*lda];
-	return;
-	}
-
-
-
-// insert a vector  into a row
-void srowin_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	float *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii*lda] = alpha*x[ii];
-	return;
-	}
-
-
-
-// add a vector to a row
-void srowad_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	float *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii*lda] += alpha*x[ii];
-	return;
-	}
-
-
-
-// swap two rows of a matrix struct
-void srowsw_libstr(int kmax, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	float *pC = sC->pA + ci + cj*lda;
-	int ii;
-	float tmp;
-	for(ii=0; ii<kmax; ii++)
-		{
-		tmp = pA[ii*lda];
-		pA[ii*lda] = pC[ii*ldc];
-		pC[ii*ldc] = tmp;
-		}
-	return;
-	}
-
-
-
-// permute the rows of a matrix struct
-void srowpe_libstr(int kmax, int *ipiv, struct s_strmat *sA)
-	{
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		{
-		if(ipiv[ii]!=ii)
-			srowsw_libstr(sA->n, sA, ii, 0, sA, ipiv[ii], 0);
-		}
-	return;
-	}
-
-
-
-// insert a vector  into a rcol
-void scolin_libstr(int kmax, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	float *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii] = x[ii];
-	return;
-	}
-
-
-
-// swap two cols of a matrix struct
-void scolsw_libstr(int kmax, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	float *pC = sC->pA + ci + cj*lda;
-	int ii;
-	float tmp;
-	for(ii=0; ii<kmax; ii++)
-		{
-		tmp = pA[ii];
-		pA[ii] = pC[ii];
-		pC[ii] = tmp;
-		}
-	return;
-	}
-
-
-
-// permute the cols of a matrix struct
-void scolpe_libstr(int kmax, int *ipiv, struct s_strmat *sA)
-	{
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		{
-		if(ipiv[ii]!=ii)
-			scolsw_libstr(sA->m, sA, 0, ii, sA, 0, ipiv[ii]);
-		}
-	return;
-	}
-
-
-
-// copy a generic strmat into a generic strmat
-void sgecp_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	float *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pC[ii+0+jj*ldc] = pA[ii+0+jj*lda];
-			pC[ii+1+jj*ldc] = pA[ii+1+jj*lda];
-			pC[ii+2+jj*ldc] = pA[ii+2+jj*lda];
-			pC[ii+3+jj*ldc] = pA[ii+3+jj*lda];
-			}
-		for(; ii<m; ii++)
-			{
-			pC[ii+0+jj*ldc] = pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// scale a generic strmat
-void sgesc_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pA[ii+0+jj*lda] *= alpha;
-			pA[ii+1+jj*lda] *= alpha;
-			pA[ii+2+jj*lda] *= alpha;
-			pA[ii+3+jj*lda] *= alpha;
-			}
-		for(; ii<m; ii++)
-			{
-			pA[ii+0+jj*lda] *= alpha;
-			}
-		}
-	return;
-	}
-
-
-
-// copy a strvec into a strvec
-void sveccp_libstr(int m, struct s_strvec *sa, int ai, struct s_strvec *sc, int ci)
-	{
-	float *pa = sa->pa + ai;
-	float *pc = sc->pa + ci;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pc[ii+0] = pa[ii+0];
-		pc[ii+1] = pa[ii+1];
-		pc[ii+2] = pa[ii+2];
-		pc[ii+3] = pa[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		pc[ii+0] = pa[ii+0];
-		}
-	return;
-	}
-
-
-
-// scale a strvec
-void svecsc_libstr(int m, float alpha, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pa[ii+0] *= alpha;
-		pa[ii+1] *= alpha;
-		pa[ii+2] *= alpha;
-		pa[ii+3] *= alpha;
-		}
-	for(; ii<m; ii++)
-		{
-		pa[ii+0] *= alpha;
-		}
-	return;
-	}
-
-
-
-// copy a lower triangular strmat into a lower triangular strmat
-void strcp_l_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	float *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<m; jj++)
-		{
-		ii = jj;
-		for(; ii<m; ii++)
-			{
-			pC[ii+0+jj*ldc] = pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// scale and add a generic strmat into a generic strmat
-void sgead_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	float *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pC[ii+0+jj*ldc] += alpha*pA[ii+0+jj*lda];
-			pC[ii+1+jj*ldc] += alpha*pA[ii+1+jj*lda];
-			pC[ii+2+jj*ldc] += alpha*pA[ii+2+jj*lda];
-			pC[ii+3+jj*ldc] += alpha*pA[ii+3+jj*lda];
-			}
-		for(; ii<m; ii++)
-			{
-			pC[ii+0+jj*ldc] += alpha*pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// scales and adds a strvec into a strvec
-void svecad_libstr(int m, float alpha, struct s_strvec *sa, int ai, struct s_strvec *sc, int ci)
-	{
-	float *pa = sa->pa + ai;
-	float *pc = sc->pa + ci;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pc[ii+0] += alpha*pa[ii+0];
-		pc[ii+1] += alpha*pa[ii+1];
-		pc[ii+2] += alpha*pa[ii+2];
-		pc[ii+3] += alpha*pa[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		pc[ii+0] += alpha*pa[ii+0];
-		}
-	return;
-	}
-
-
-
-// copy and transpose a generic strmat into a generic strmat
-void sgetr_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	float *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-3; ii+=4)
-			{
-			pC[jj+(ii+0)*ldc] = pA[ii+0+jj*lda];
-			pC[jj+(ii+1)*ldc] = pA[ii+1+jj*lda];
-			pC[jj+(ii+2)*ldc] = pA[ii+2+jj*lda];
-			pC[jj+(ii+3)*ldc] = pA[ii+3+jj*lda];
-			}
-		for(; ii<m; ii++)
-			{
-			pC[jj+(ii+0)*ldc] = pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// copy and transpose a lower triangular strmat into an upper triangular strmat
-void strtr_l_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	float *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<m; jj++)
-		{
-		ii = jj;
-		for(; ii<m; ii++)
-			{
-			pC[jj+(ii+0)*ldc] = pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// copy and transpose an upper triangular strmat into a lower triangular strmat
-void strtr_u_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	int ldc = sC->m;
-	float *pC = sC->pA + ci + cj*ldc;
-	int ii, jj;
-	for(jj=0; jj<m; jj++)
-		{
-		ii = 0;
-		for(; ii<=jj; ii++)
-			{
-			pC[jj+(ii+0)*ldc] = pA[ii+0+jj*lda];
-			}
-		}
-	return;
-	}
-
-
-
-// insert a strvec to the diagonal of a strmat, sparse formulation
-void sdiain_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	float *x = sx->pa + xi;
-	int ldd = sD->m;
-	float *pD = sD->pA + di + dj*ldd;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*(ldd+1)] = alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// extract the diagonal of a strmat from a strvec , sparse formulation
-void sdiaex_sp_libstr(int kmax, float alpha, int *idx, struct s_strmat *sD, int di, int dj, struct s_strvec *sx, int xi)
-	{
-	float *x = sx->pa + xi;
-	int ldd = sD->m;
-	float *pD = sD->pA + di + dj*ldd;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		x[jj] = alpha * pD[ii*(ldd+1)];
-		}
-	return;
-	}
-
-
-
-// add a vector to diagonal
-void sdiaad_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	int lda = sA->m;
-	float *pA = sA->pA + ai + aj*lda;
-	float *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		pA[ii*(lda+1)] += alpha*x[ii];
-	return;
-	}
-
-
-
-// add scaled strvec to another strvec and insert to diagonal of strmat, sparse formulation
-void sdiaad_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	float *x = sx->pa + xi;
-	int ldd = sD->m;
-	float *pD = sD->pA + di + dj*ldd;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*(ldd+1)] += alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to another strvec and insert to diagonal of strmat, sparse formulation
-void sdiaadin_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	int ldd = sD->m;
-	float *pD = sD->pA + di + dj*ldd;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*(ldd+1)] = y[jj] + alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to row of strmat, sparse formulation
-void srowad_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	float *x = sx->pa + xi;
-	int ldd = sD->m;
-	float *pD = sD->pA + di + dj*ldd;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*ldd] += alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-
-void svecad_sp_libstr(int m, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[idx[ii]] += alpha * x[ii];
-	return;
-	}
-
-
-
-void svecin_sp_libstr(int m, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[idx[ii]] = alpha * x[ii];
-	return;
-	}
-
-
-
-void svecex_sp_libstr(int m, float alpha, int *idx, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[ii] = alpha * x[idx[ii]];
-	return;
-	}
-
-
-// clip without mask return
-void sveccl_libstr(int m, struct s_strvec *sxm, int xim, struct s_strvec *sx, int xi, struct s_strvec *sxp, int xip, struct s_strvec *sz, int zi)
-	{
-	float *xm = sxm->pa + xim;
-	float *x  = sx->pa + xi;
-	float *xp = sxp->pa + xip;
-	float *z  = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		if(x[ii]>=xp[ii])
-			{
-			z[ii] = xp[ii];
-			}
-		else if(x[ii]<=xm[ii])
-			{
-			z[ii] = xm[ii];
-			}
-		else
-			{
-			z[ii] = x[ii];
-			}
-		}
-	return;
-	}
-
-
-
-// clip with mask return
-void sveccl_mask_libstr(int m, struct s_strvec *sxm, int xim, struct s_strvec *sx, int xi, struct s_strvec *sxp, int xip, struct s_strvec *sz, int zi, struct s_strvec *sm, int mi)
-	{
-	float *xm = sxm->pa + xim;
-	float *x  = sx->pa + xi;
-	float *xp = sxp->pa + xip;
-	float *z  = sz->pa + zi;
-	float *mask  = sm->pa + mi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		if(x[ii]>=xp[ii])
-			{
-			z[ii] = xp[ii];
-			mask[ii] = 1.0;
-			}
-		else if(x[ii]<=xm[ii])
-			{
-			z[ii] = xm[ii];
-			mask[ii] = -1.0;
-			}
-		else
-			{
-			z[ii] = x[ii];
-			mask[ii] = 0.0;
-			}
-		}
-	return;
-	}
-
-
-// zero out components using mask
-void svecze_libstr(int m, struct s_strvec *sm, int mi, struct s_strvec *sv, int vi, struct s_strvec *se, int ei)
-	{
-	float *mask = sm->pa + mi;
-	float *v = sv->pa + vi;
-	float *e = se->pa + ei;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		{
-		if(mask[ii]==0)
-			{
-			e[ii] = v[ii];
-			}
-		else
-			{
-			e[ii] = 0;
-			}
-		}
-	return;
-	}
-
-
-
-void svecnrm_inf_libstr(int m, struct s_strvec *sx, int xi, float *ptr_norm)
-	{
-	int ii;
-	float *x = sx->pa + xi;
-	float norm = 0.0;
-	for(ii=0; ii<m; ii++)
-		norm = fmax(norm, fabs(x[ii]));
-	*ptr_norm = norm;
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
diff --git a/third_party/blasfeo/auxiliary/s_aux_lib4.c b/third_party/blasfeo/auxiliary/s_aux_lib4.c
deleted file mode 100644
index 12acc47..0000000
--- a/third_party/blasfeo/auxiliary/s_aux_lib4.c
+++ /dev/null
@@ -1,3107 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_block_size.h"
-#include "../include/blasfeo_s_kernel.h"
-
-
-
-// scales and adds a strvec into a strvec
-void svecad_libstr(int m, float *alphap, struct s_strvec *sa, int ai, struct s_strvec *sc, int ci)
-	{
-	float alpha = alphap[0];
-	float *pa = sa->pa + ai;
-	float *pc = sc->pa + ci;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pc[ii+0] += alpha*pa[ii+0];
-		pc[ii+1] += alpha*pa[ii+1];
-		pc[ii+2] += alpha*pa[ii+2];
-		pc[ii+3] += alpha*pa[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		pc[ii+0] += alpha*pa[ii+0];
-		}
-	return;
-	}
-
-
-
-// transpose general matrix; m and n are referred to the original matrix
-void sgetr_lib(int m, int n, float alpha, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc)
-	{
-
-/*
-
-m = 5
-n = 3
-offsetA = 1
-offsetC = 2
-
-A = 
- x x x
- -
- x x x
- x x x
- x x x
- x x x
-
-C =
- x x x x x
- x x x x x
- -
- x x x x x
-
-*/
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	int mna = (bs-offsetA%bs)%bs;
-	mna = m<mna ? m : mna;
-	int nna = (bs-offsetC%bs)%bs;
-	nna = n<nna ? n : nna;
-	
-	int ii;
-
-	ii = 0;
-
-	if(mna>0)
-		{
-		if(mna==1)
-			kernel_sgetr_1_lib4(0, n, nna, alpha, pA, pC, sdc);
-		else if(mna==2)
-			kernel_sgetr_2_lib4(0, n, nna, alpha, pA, pC, sdc);
-		else //if(mna==3)
-			kernel_sgetr_3_lib4(0, n, nna, alpha, pA, pC, sdc);
-		ii += mna;
-		pA += mna + bs*(sda-1);
-		pC += mna*bs;
-		}
-	for( ; ii<m-3; ii+=4)
-//	for( ; ii<m; ii+=4)
-		{
-		kernel_sgetr_4_lib4(0, n, nna, alpha, pA, pC, sdc);
-		pA += bs*sda;
-		pC += bs*bs;
-		}
-
-	// clean-up at the end using smaller kernels
-	if(ii==m)
-		return;
-	
-	if(m-ii==1)
-		kernel_sgetr_1_lib4(0, n, nna, alpha, pA, pC, sdc);
-	else if(m-ii==2)
-		kernel_sgetr_2_lib4(0, n, nna, alpha, pA, pC, sdc);
-	else if(m-ii==3)
-		kernel_sgetr_3_lib4(0, n, nna, alpha, pA, pC, sdc);
-		
-	return;
-	
-	}	
-
-
-
-// transpose lower triangular matrix
-void strtr_l_lib(int m, float alpha, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc)
-	{
-
-/*
-
-A = 
- x
- x x
- x x x
- x x x x
-  
- x x x x x
- x x x x x x
- x x x x x x x
- x x x x x x x x
-
-C =
- x x x x x x x x
-  
-   x x x x x x x
-     x x x x x x
-	   x x x x x
-	     x x x x
-
-	       x x x
-	         x x
-	           x
-
-*/
-
-	int n = m;
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	int mna = (bs-offsetA%bs)%bs;
-	mna = m<mna ? m : mna;
-	int nna = (bs-offsetC%bs)%bs;
-	nna = n<nna ? n : nna;
-	
-	int ii;
-
-	ii = 0;
-
-	if(mna>0)
-		{
-		if(mna==1)
-			{
-			pC[0] = alpha * pA[0];
-			}
-		else if(mna==2)
-			{
-			if(nna==1)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[1+bs*(0+sdc)] = alpha * pA[1+bs*1];
-				}
-			else
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				}
-			}
-		else //if(mna==3)
-			{
-			if(nna==1)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[0+bs*2] = alpha * pA[2+bs*0];
-				pC[1+bs*(0+sdc)] = alpha * pA[1+bs*1];
-				pC[1+bs*(1+sdc)] = alpha * pA[2+bs*1];
-				pC[2+bs*(1+sdc)] = alpha * pA[2+bs*2];
-				}
-			else if(nna==2)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[0+bs*2] = alpha * pA[2+bs*0];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pC[1+bs*2] = alpha * pA[2+bs*1];
-				pC[2+bs*(1+sdc)] = alpha * pA[2+bs*2];
-				}
-			else
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[0+bs*2] = alpha * pA[2+bs*0];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pC[1+bs*2] = alpha * pA[2+bs*1];
-				pC[2+bs*2] = alpha * pA[2+bs*2];
-				}
-			}
-		ii += mna;
-		pA += mna + bs*(sda-1);
-		pC += mna*bs;
-		}
-	for( ; ii<m-3; ii+=4)
-		{
-		kernel_sgetr_4_lib4(1, ii, nna, alpha, pA, pC, sdc);
-		pA += bs*sda;
-		pC += bs*bs;
-		}
-	
-	// clean-up at the end using smaller kernels
-	if(ii==m)
-		return;
-	
-	if(m-ii==1)
-		kernel_sgetr_1_lib4(1, ii, nna, alpha, pA, pC, sdc);
-	else if(m-ii==2)
-		kernel_sgetr_2_lib4(1, ii, nna, alpha, pA, pC, sdc);
-	else if(m-ii==3)
-		kernel_sgetr_3_lib4(1, ii, nna, alpha, pA, pC, sdc);
-		
-	return;
-
-	}
-
-
-
-// transpose an aligned upper triangular matrix into an aligned lower triangular matrix
-void strtr_u_lib(int m, float alpha, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc)
-	{
-
-/*
-
-A = 
- x x x x x x x x
-   x x x x x x x
-
-     x x x x x x
-       x x x x x
-         x x x x
-           x x x
-             x x
-               x
-
-C = 
- x
-
- x x
- x x x
- x x x x
- x x x x x
- x x x x x x
- x x x x x x x
- x x x x x x x x
-
-*/
-
-	int n = m;
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	int mna = (bs-offsetA%bs)%bs;
-	mna = m<mna ? m : mna;
-	int nna = (bs-offsetC%bs)%bs;
-	nna = n<nna ? n : nna;
-	int tna = nna;
-	
-	int ii;
-
-	ii = 0;
-
-	if(mna>0)
-		{
-		if(mna==1)
-			{
-			kernel_sgetr_1_lib4(0, n, nna, alpha, pA, pC, sdc);
-			if(nna!=1)
-				{
-//				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pA += 1*bs;
-				pC += 1;
-				tna = (bs-(offsetC+1)%bs)%bs;
-				}
-			else //if(nna==1)
-				{
-//				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pA += 1*bs;
-				pC += 1 + (sdc-1)*bs;
-				tna = 0; //(bs-(offsetC+1)%bs)%bs;
-				}
-//			kernel_sgetr_1_lib4(0, n-1, tna, alpha, pA, pC, sdc);
-			}
-		else if(mna==2)
-			{
-			if(nna==0 || nna==3)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pA += 2*bs;
-				pC += 2;
-				tna = (bs-(offsetC+2)%bs)%bs;
-				kernel_sgetr_2_lib4(0, n-2, tna, alpha, pA, pC, sdc);
-				}
-			else if(nna==1)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pA += 1*bs;
-				pC += 1 + (sdc-1)*bs;
-//				pC[0+bs*0] = alpha * pA[0+bs*0];
-//				pC[0+bs*1] = alpha * pA[1+bs*0];
-				kernel_sgetr_2_lib4(0, n-1, 0, alpha, pA, pC, sdc);
-				pA += 1*bs;
-				pC += 1;
-				tna = 3; //(bs-(offsetC+2)%bs)%bs;
-//				kernel_sgetr_2_lib4(0, n-2, tna, alpha, pA, pC, sdc);
-				}
-			else if(nna==2)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pA += 2*bs;
-				pC += 2 + (sdc-1)*bs;
-				tna = 0; //(bs-(offsetC+2)%bs)%bs;
-				kernel_sgetr_2_lib4(0, n-2, tna, alpha, pA, pC, sdc);
-				}
-			}
-		else //if(mna==3)
-			{
-			if(nna==0)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pC[2+bs*0] = alpha * pA[0+bs*2];
-				pC[2+bs*1] = alpha * pA[1+bs*2];
-				pC[2+bs*2] = alpha * pA[2+bs*2];
-				pA += 3*bs;
-				pC += 3;
-				tna = 1;
-				kernel_sgetr_3_lib4(0, n-3, tna, alpha, pA, pC, sdc);
-				}
-			else if(nna==1)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pA += bs;
-				pC += 1 + (sdc-1)*bs;
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[0+bs*1] = alpha * pA[1+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pC[1+bs*2] = alpha * pA[2+bs*1];
-				pA += 2*bs;
-				pC += 2;
-				tna = 2;
-				kernel_sgetr_3_lib4(0, n-3, tna, alpha, pA, pC, sdc);
-				}
-			else if(nna==2)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pA += 2*bs;
-				pC += 2 + (sdc-1)*bs;
-//				pC[0+bs*0] = alpha * pA[0+bs*0];
-//				pC[0+bs*1] = alpha * pA[1+bs*0];
-//				pC[0+bs*2] = alpha * pA[2+bs*0];
-				kernel_sgetr_3_lib4(0, n-2, 0, alpha, pA, pC, sdc);
-				pA += 1*bs;
-				pC += 1;
-				tna = 3;
-//				kernel_sgetr_3_lib4(0, n-3, tna, alpha, pA, pC, sdc);
-				}
-			else //if(nna==3)
-				{
-				pC[0+bs*0] = alpha * pA[0+bs*0];
-				pC[1+bs*0] = alpha * pA[0+bs*1];
-				pC[1+bs*1] = alpha * pA[1+bs*1];
-				pC[2+bs*0] = alpha * pA[0+bs*2];
-				pC[2+bs*1] = alpha * pA[1+bs*2];
-				pC[2+bs*2] = alpha * pA[2+bs*2];
-				pA += 3*bs;
-				pC += 3 + (sdc-1)*bs;
-				tna = 0;
-				kernel_sgetr_3_lib4(0, n-3, tna, alpha, pA, pC, sdc);
-				}
-			}
-		ii += mna;
-		pA += mna + bs*(sda-1);
-		pC += mna*bs;
-		}
-	for( ; ii<m-3; ii+=4)
-		{
-		if(tna==0)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[2+bs*0] = alpha * pA[0+bs*2];
-			pC[2+bs*1] = alpha * pA[1+bs*2];
-			pC[2+bs*2] = alpha * pA[2+bs*2];
-			pC[3+bs*0] = alpha * pA[0+bs*3];
-			pC[3+bs*1] = alpha * pA[1+bs*3];
-			pC[3+bs*2] = alpha * pA[2+bs*3];
-			pC[3+bs*3] = alpha * pA[3+bs*3];
-			pA += 4*bs;
-			pC += sdc*bs;
-			kernel_sgetr_4_lib4(0, n-ii-4, 0, alpha, pA, pC, sdc);
-			}
-		else if(tna==1)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pA += bs;
-			pC += 1 + (sdc-1)*bs;
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[0+bs*1] = alpha * pA[1+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[1+bs*2] = alpha * pA[2+bs*1];
-			pC[2+bs*0] = alpha * pA[0+bs*2];
-			pC[2+bs*1] = alpha * pA[1+bs*2];
-			pC[2+bs*2] = alpha * pA[2+bs*2];
-			pC[2+bs*3] = alpha * pA[3+bs*2];
-			pA += 3*bs;
-			pC += 3;
-			kernel_sgetr_4_lib4(0, n-ii-4, 1, alpha, pA, pC, sdc);
-			}
-		else if(tna==2)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pA += 2*bs;
-			pC += 2 + (sdc-1)*bs;
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[0+bs*1] = alpha * pA[1+bs*0];
-			pC[0+bs*2] = alpha * pA[2+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[1+bs*2] = alpha * pA[2+bs*1];
-			pC[1+bs*3] = alpha * pA[3+bs*1];
-			pA += 2*bs;
-			pC += 2;
-			kernel_sgetr_4_lib4(0, n-ii-4, 2, alpha, pA, pC, sdc);
-			}
-		else //if(tna==3)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[2+bs*0] = alpha * pA[0+bs*2];
-			pC[2+bs*1] = alpha * pA[1+bs*2];
-			pC[2+bs*2] = alpha * pA[2+bs*2];
-			pA += 3*bs;
-			pC += 3 + (sdc-1)*bs;
-			kernel_sgetr_4_lib4(0, n-ii-3, 0, alpha, pA, pC, sdc);
-//			pC[0+bs*0] = alpha * pA[0+bs*0];
-//			pC[0+bs*1] = alpha * pA[1+bs*0];
-//			pC[0+bs*2] = alpha * pA[2+bs*0];
-//			pC[0+bs*3] = alpha * pA[3+bs*0];
-			pA += bs;
-			pC += 1;
-//			kernel_sgetr_4_lib4(0, n-ii-4, tna, alpha, pA, pC, sdc);
-			}
-		pA += bs*sda;
-		pC += bs*bs;
-		}
-
-	// clean-up at the end
-	if(ii==m)
-		return;
-	
-	if(m-ii==1)
-		{
-		pC[0+bs*0] = alpha * pA[0+bs*0];
-		}
-	else if(m-ii==2)
-		{
-		if(tna!=1)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			}
-		else //if(tna==1)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pA += bs;
-			pC += 1 + (sdc-1)*bs;
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[0+bs*1] = alpha * pA[1+bs*0];
-			}
-		}
-	else if(m-ii==3)
-		{
-		if(tna==0 || tna==3)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[2+bs*0] = alpha * pA[0+bs*2];
-			pC[2+bs*1] = alpha * pA[1+bs*2];
-			pC[2+bs*2] = alpha * pA[2+bs*2];
-			}
-		else if(tna==1)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pA += bs;
-			pC += 1 + (sdc-1)*bs;
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[0+bs*1] = alpha * pA[1+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pC[1+bs*2] = alpha * pA[2+bs*1];
-			}
-		else //if(tna==2)
-			{
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[1+bs*0] = alpha * pA[0+bs*1];
-			pC[1+bs*1] = alpha * pA[1+bs*1];
-			pA += 2*bs;
-			pC += 2 + (sdc-1)*bs;
-			pC[0+bs*0] = alpha * pA[0+bs*0];
-			pC[0+bs*1] = alpha * pA[1+bs*0];
-			pC[0+bs*2] = alpha * pA[2+bs*0];
-			}
-		}
-		
-	return;
-
-	}
-
-
-
-// regularize diagonal 
-void sdiareg_lib(int kmax, float reg, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] += reg;
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+(jj+0)*bs+0] += reg;
-		pD[jj*sdd+(jj+1)*bs+1] += reg;
-		pD[jj*sdd+(jj+2)*bs+2] += reg;
-		pD[jj*sdd+(jj+3)*bs+3] += reg;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] += reg;
-		}
-	
-	}
-
-
-
-// insert vector to diagonal 
-void sdiain_lib(int kmax, float alpha, float *x, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] = alpha*x[ll];
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+(jj+0)*bs+0] = alpha*x[jj+0];
-		pD[jj*sdd+(jj+1)*bs+1] = alpha*x[jj+1];
-		pD[jj*sdd+(jj+2)*bs+2] = alpha*x[jj+2];
-		pD[jj*sdd+(jj+3)*bs+3] = alpha*x[jj+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] = alpha*x[jj+ll];
-		}
-	
-	}
-
-
-
-// insert sqrt of vector to diagonal 
-void sdiain_sqrt_lib(int kmax, float *x, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] = sqrt(x[ll]);
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+(jj+0)*bs+0] = sqrt(x[jj+0]);
-		pD[jj*sdd+(jj+1)*bs+1] = sqrt(x[jj+1]);
-		pD[jj*sdd+(jj+2)*bs+2] = sqrt(x[jj+2]);
-		pD[jj*sdd+(jj+3)*bs+3] = sqrt(x[jj+3]);
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] = sqrt(x[jj+ll]);
-		}
-	
-	}
-
-
-
-// extract diagonal to vector 
-void sdiaex_lib(int kmax, float alpha, int offset, float *pD, int sdd, float *x)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			x[ll] = alpha * pD[ll+bs*ll];
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		x[jj+0] = alpha * pD[jj*sdd+(jj+0)*bs+0];
-		x[jj+1] = alpha * pD[jj*sdd+(jj+1)*bs+1];
-		x[jj+2] = alpha * pD[jj*sdd+(jj+2)*bs+2];
-		x[jj+3] = alpha * pD[jj*sdd+(jj+3)*bs+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		x[jj+ll] = alpha * pD[jj*sdd+(jj+ll)*bs+ll];
-		}
-	
-	}
-
-
-
-// add scaled vector to diagonal 
-void sdiaad_lib(int kmax, float alpha, float *x, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] += alpha * x[ll];
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+(jj+0)*bs+0] += alpha * x[jj+0];
-		pD[jj*sdd+(jj+1)*bs+1] += alpha * x[jj+1];
-		pD[jj*sdd+(jj+2)*bs+2] += alpha * x[jj+2];
-		pD[jj*sdd+(jj+3)*bs+3] += alpha * x[jj+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] += alpha * x[jj+ll];
-		}
-	
-	}
-
-
-
-// insert vector to diagonal, sparse formulation 
-void sdiain_libsp(int kmax, int *idx, float alpha, float *x, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs+ii*bs] = alpha * x[jj];
-		}
-	
-	}
-
-
-
-// extract diagonal to vector, sparse formulation 
-void sdiaex_libsp(int kmax, int *idx, float alpha, float *pD, int sdd, float *x)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		x[jj] = alpha * pD[ii/bs*bs*sdd+ii%bs+ii*bs];
-		}
-	
-	}
-
-
-
-// add scaled vector to diagonal, sparse formulation 
-void sdiaad_libsp(int kmax, int *idx, float alpha, float *x, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs+ii*bs] += alpha * x[jj];
-		}
-	
-	}
-
-
-
-// add scaled vector to another vector and insert to diagonal, sparse formulation 
-void sdiaadin_libsp(int kmax, int *idx, float alpha, float *x, float *y, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs+ii*bs] = y[jj] + alpha * x[jj];
-		}
-	
-	}
-
-
-
-// insert vector to row 
-void srowin_lib(int kmax, float alpha, float *x, float *pD)
-	{
-	
-	const int bs = 4;
-
-	int jj, ll;
-
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[(jj+0)*bs] = alpha*x[jj+0];
-		pD[(jj+1)*bs] = alpha*x[jj+1];
-		pD[(jj+2)*bs] = alpha*x[jj+2];
-		pD[(jj+3)*bs] = alpha*x[jj+3];
-		}
-	for(; jj<kmax; jj++)
-		{
-		pD[(jj)*bs] = alpha*x[jj];
-		}
-	
-	}
-
-
-
-// extract row to vector
-void srowex_lib(int kmax, float alpha, float *pD, float *x)
-	{
-	
-	const int bs = 4;
-
-	int jj, ll;
-
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		x[jj+0] = alpha*pD[(jj+0)*bs];
-		x[jj+1] = alpha*pD[(jj+1)*bs];
-		x[jj+2] = alpha*pD[(jj+2)*bs];
-		x[jj+3] = alpha*pD[(jj+3)*bs];
-		}
-	for(; jj<kmax; jj++)
-		{
-		x[jj] = alpha*pD[(jj)*bs];
-		}
-	
-	}
-
-
-
-// add scaled vector to row 
-void srowad_lib(int kmax, float alpha, float *x, float *pD)
-	{
-
-	const int bs = 4;
-
-	int jj, ll;
-
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[(jj+0)*bs] += alpha * x[jj+0];
-		pD[(jj+1)*bs] += alpha * x[jj+1];
-		pD[(jj+2)*bs] += alpha * x[jj+2];
-		pD[(jj+3)*bs] += alpha * x[jj+3];
-		}
-	for(; jj<kmax; jj++)
-		{
-		pD[(jj)*bs] += alpha * x[jj];
-		}
-	
-	}
-
-
-
-// insert vector to row, sparse formulation 
-void srowin_libsp(int kmax, float alpha, int *idx, float *x, float *pD)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*bs] = alpha*x[jj];
-		}
-	
-	}
-
-
-
-// add scaled vector to row, sparse formulation 
-void srowad_libsp(int kmax, int *idx, float alpha, float *x, float *pD)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*bs] += alpha * x[jj];
-		}
-	
-	}
-
-
-
-// add scaled vector to another vector and insert to row, sparse formulation 
-void srowadin_libsp(int kmax, int *idx, float alpha, float *x, float *y, float *pD)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*bs] = y[jj] + alpha * x[jj];
-		}
-	
-	}
-
-
-
-// swap two rows
-void srowsw_lib(int kmax, float *pA, float *pC)
-	{
-
-	const int bs = 4;
-
-	int ii;
-	float tmp;
-
-	for(ii=0; ii<kmax-3; ii+=4)
-		{
-		tmp = pA[0+bs*0];
-		pA[0+bs*0] = pC[0+bs*0];
-		pC[0+bs*0] = tmp;
-		tmp = pA[0+bs*1];
-		pA[0+bs*1] = pC[0+bs*1];
-		pC[0+bs*1] = tmp;
-		tmp = pA[0+bs*2];
-		pA[0+bs*2] = pC[0+bs*2];
-		pC[0+bs*2] = tmp;
-		tmp = pA[0+bs*3];
-		pA[0+bs*3] = pC[0+bs*3];
-		pC[0+bs*3] = tmp;
-		pA += 4*bs;
-		pC += 4*bs;
-		}
-	for( ; ii<kmax; ii++)
-		{
-		tmp = pA[0+bs*0];
-		pA[0+bs*0] = pC[0+bs*0];
-		pC[0+bs*0] = tmp;
-		pA += 1*bs;
-		pC += 1*bs;
-		}
-	
-	}
-
-
-
-// insert vector to column 
-void scolin_lib(int kmax, float *x, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll] = x[ll];
-			}
-		pD += kna + bs*(sdd-1);
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+0] = x[jj+0];
-		pD[jj*sdd+1] = x[jj+1];
-		pD[jj*sdd+2] = x[jj+2];
-		pD[jj*sdd+3] = x[jj+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+ll] = x[jj+ll];
-		}
-	
-	}
-
-
-
-// add scaled vector to column 
-void scolad_lib(int kmax, float alpha, float *x, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll] += alpha * x[ll];
-			}
-		pD += kna + bs*(sdd-1);
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[jj*sdd+0] += alpha * x[jj+0];
-		pD[jj*sdd+1] += alpha * x[jj+1];
-		pD[jj*sdd+2] += alpha * x[jj+2];
-		pD[jj*sdd+3] += alpha * x[jj+3];
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+ll] += alpha * x[jj+ll];
-		}
-	
-	}
-
-
-
-// insert vector to diagonal, sparse formulation 
-void scolin_libsp(int kmax, int *idx, float *x, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs] = x[jj];
-		}
-	
-	}
-
-
-
-// add scaled vector to diagonal, sparse formulation 
-void scolad_libsp(int kmax, float alpha, int *idx, float *x, float *pD, int sdd)
-	{
-
-	const int bs = 4;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs] += alpha * x[jj];
-		}
-	
-	}
-
-
-
-// swaps two cols
-void scolsw_lib(int kmax, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc)
-	{
-
-	const int bs = 4;
-
-	int ii;
-
-	float tmp;
-
-	if(offsetA==offsetC)
-		{
-		if(offsetA>0)
-			{
-			ii = 0;
-			for(; ii<bs-offsetA; ii++)
-				{
-				tmp = pA[0+bs*0];
-				pA[0+bs*0] = pC[0+bs*0];
-				pC[0+bs*0] = tmp;
-				pA += 1;
-				pC += 1;
-				}
-			pA += bs*(sda-1);
-			pC += bs*(sdc-1);
-			kmax -= bs-offsetA;
-			}
-		ii = 0;
-		for(; ii<kmax-3; ii+=4)
-			{
-			tmp = pA[0+bs*0];
-			pA[0+bs*0] = pC[0+bs*0];
-			pC[0+bs*0] = tmp;
-			tmp = pA[1+bs*0];
-			pA[1+bs*0] = pC[1+bs*0];
-			pC[1+bs*0] = tmp;
-			tmp = pA[2+bs*0];
-			pA[2+bs*0] = pC[2+bs*0];
-			pC[2+bs*0] = tmp;
-			tmp = pA[3+bs*0];
-			pA[3+bs*0] = pC[3+bs*0];
-			pC[3+bs*0] = tmp;
-			pA += bs*sda;
-			pC += bs*sdc;
-			}
-		for(; ii<kmax; ii++)
-			{
-			tmp = pA[0+bs*0];
-			pA[0+bs*0] = pC[0+bs*0];
-			pC[0+bs*0] = tmp;
-			pA += 1;
-			pC += 1;
-			}
-		}
-	else
-		{
-		printf("\nscolsw: feature not implemented yet: offsetA!=offsetC\n\n");
-		exit(1);
-		}
-
-	return;
-
-	}
-
-
-
-// insert vector to vector, sparse formulation
-void svecin_libsp(int kmax, int *idx, float *x, float *y)
-	{
-
-	int jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		y[idx[jj]] = x[jj];
-		}
-	
-	}
-
-
-
-// adds vector to vector, sparse formulation
-void svecad_libsp(int kmax, int *idx, float alpha, float *x, float *y)
-	{
-
-	int jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		y[idx[jj]] += alpha * x[jj];
-		}
-	
-	}
-
-
-
-/****************************
-* new interface
-****************************/
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// return the memory size (in bytes) needed for a strmat
-int s_size_strmat(int m, int n)
-	{
-	const int bs = 4;
-	int nc = S_NC;
-	int al = bs*nc;
-	int pm = (m+bs-1)/bs*bs;
-	int cn = (n+nc-1)/nc*nc;
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	int memory_size = (pm*cn+tmp)*sizeof(float);
-	return memory_size;
-	}
-
-
-
-// return the memory size (in bytes) needed for the digonal of a strmat
-int s_size_diag_strmat(int m, int n)
-	{
-	const int bs = 4;
-	int nc = S_NC;
-	int al = bs*nc;
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	int memory_size = tmp*sizeof(float);
-	return memory_size;
-	}
-
-
-
-// create a matrix structure for a matrix of size m*n by using memory passed by a pointer
-void s_create_strmat(int m, int n, struct s_strmat *sA, void *memory)
-	{
-	const int bs = 4;
-	int nc = S_NC;
-	int al = bs*nc;
-	sA->m = m;
-	sA->n = n;
-	int pm = (m+bs-1)/bs*bs;
-	int cn = (n+nc-1)/nc*nc;
-	sA->pm = pm;
-	sA->cn = cn;
-	float *ptr = (float *) memory;
-	sA->pA = ptr;
-	ptr += pm*cn;
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	sA->dA = ptr;
-	ptr += tmp;
-	sA->use_dA = 0;
-	sA->memory_size = (pm*cn+tmp)*sizeof(float);
-	return;
-	}
-
-
-
-// return memory size (in bytes) needed for a strvec
-int s_size_strvec(int m)
-	{
-	const int bs = 4;
-//	int nc = S_NC;
-//	int al = bs*nc;
-	int pm = (m+bs-1)/bs*bs;
-	int memory_size = pm*sizeof(float);
-	return memory_size;
-	}
-
-
-
-// create a vector structure for a vector of size m by using memory passed by a pointer
-void s_create_strvec(int m, struct s_strvec *sa, void *memory)
-	{
-	const int bs = 4;
-//	int nc = S_NC;
-//	int al = bs*nc;
-	sa->m = m;
-	int pm = (m+bs-1)/bs*bs;
-	sa->pm = pm;
-	float *ptr = (float *) memory;
-	sa->pa = ptr;
-//	ptr += pm;
-	sa->memory_size = pm*sizeof(float);
-	return;
-	}
-
-
-
-// convert a matrix into a matrix structure
-void s_cvt_mat2strmat(int m, int n, float *A, int lda, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, j, jj, m0, m1, m2;
-	float *B, *pB;
-	m0 = (bs-ai%bs)%bs;
-	if(m0>m)
-		m0 = m;
-	m1 = m - m0;
-	jj = 0;
-	for( ; jj<n-3; jj+=4)
-		{
-		B  =  A + jj*lda;
-		pB = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for( ; ii<m0; ii++)
-				{
-				pB[ii+bs*0] = B[ii+lda*0];
-				pB[ii+bs*1] = B[ii+lda*1];
-				pB[ii+bs*2] = B[ii+lda*2];
-				pB[ii+bs*3] = B[ii+lda*3];
-				}
-			B  += m0;
-			pB += m0 + bs*(sda-1);
-			}
-		for( ; ii<m-3; ii+=4)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			pB[1+bs*0] = B[1+lda*0];
-			pB[2+bs*0] = B[2+lda*0];
-			pB[3+bs*0] = B[3+lda*0];
-			// col 1
-			pB[0+bs*1] = B[0+lda*1];
-			pB[1+bs*1] = B[1+lda*1];
-			pB[2+bs*1] = B[2+lda*1];
-			pB[3+bs*1] = B[3+lda*1];
-			// col 2
-			pB[0+bs*2] = B[0+lda*2];
-			pB[1+bs*2] = B[1+lda*2];
-			pB[2+bs*2] = B[2+lda*2];
-			pB[3+bs*2] = B[3+lda*2];
-			// col 3
-			pB[0+bs*3] = B[0+lda*3];
-			pB[1+bs*3] = B[1+lda*3];
-			pB[2+bs*3] = B[2+lda*3];
-			pB[3+bs*3] = B[3+lda*3];
-			// update
-			B  += 4;
-			pB += bs*sda;
-			}
-		for( ; ii<m; ii++)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			// col 1
-			pB[0+bs*1] = B[0+lda*1];
-			// col 2
-			pB[0+bs*2] = B[0+lda*2];
-			// col 3
-			pB[0+bs*3] = B[0+lda*3];
-			// update
-			B  += 1;
-			pB += 1;
-			}
-		}
-	for( ; jj<n; jj++)
-		{
-
-		B  =  A + jj*lda;
-		pB = pA + jj*bs;
-
-		ii = 0;
-		if(m0>0)
-			{
-			for( ; ii<m0; ii++)
-				{
-				pB[ii+bs*0] = B[ii+lda*0];
-				}
-			B  += m0;
-			pB += m0 + bs*(sda-1);
-			}
-		for( ; ii<m-3; ii+=4)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			pB[1+bs*0] = B[1+lda*0];
-			pB[2+bs*0] = B[2+lda*0];
-			pB[3+bs*0] = B[3+lda*0];
-			// update
-			B  += 4;
-			pB += bs*sda;
-			}
-		for( ; ii<m; ii++)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			// update
-			B  += 1;
-			pB += 1;
-			}
-		}
-	return;
-	}
-
-
-
-// convert and transpose a matrix into a matrix structure
-void s_cvt_tran_mat2strmat(int m, int n, float *A, int lda, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, j, m0, m1, m2;
-	float 	*B, *pB;
-	m0 = (bs-ai%bs)%bs;
-	if(m0>n)
-		m0 = n;
-	m1 = n - m0;
-	ii = 0;
-	if(m0>0)
-		{
-		for(j=0; j<m; j++)
-			{
-			for(i=0; i<m0; i++)
-				{
-				pA[i+j*bs+ii*sda] = A[j+(i+ii)*lda];
-				}
-			}
-		A  += m0*lda;
-		pA += m0 + bs*(sda-1);
-		}
-	ii = 0;
-	for(; ii<m1-3; ii+=bs)
-		{
-		j=0;
-		B  = A + ii*lda;
-		pB = pA + ii*sda;
-		for(; j<m-3; j+=4)
-			{
-			// unroll 0
-			pB[0+0*bs] = B[0+0*lda];
-			pB[1+0*bs] = B[0+1*lda];
-			pB[2+0*bs] = B[0+2*lda];
-			pB[3+0*bs] = B[0+3*lda];
-			// unroll 1
-			pB[0+1*bs] = B[1+0*lda];
-			pB[1+1*bs] = B[1+1*lda];
-			pB[2+1*bs] = B[1+2*lda];
-			pB[3+1*bs] = B[1+3*lda];
-			// unroll 2
-			pB[0+2*bs] = B[2+0*lda];
-			pB[1+2*bs] = B[2+1*lda];
-			pB[2+2*bs] = B[2+2*lda];
-			pB[3+2*bs] = B[2+3*lda];
-			// unroll 3
-			pB[0+3*bs] = B[3+0*lda];
-			pB[1+3*bs] = B[3+1*lda];
-			pB[2+3*bs] = B[3+2*lda];
-			pB[3+3*bs] = B[3+3*lda];
-			B  += 4;
-			pB += 4*bs;
-			}
-		for(; j<m; j++)
-			{
-			// unroll 0
-			pB[0+0*bs] = B[0+0*lda];
-			pB[1+0*bs] = B[0+1*lda];
-			pB[2+0*bs] = B[0+2*lda];
-			pB[3+0*bs] = B[0+3*lda];
-			B  += 1;
-			pB += 1*bs;
-			}
-		}
-	if(ii<m1)
-		{
-		m2 = m1-ii;
-		if(bs<m2) m2 = bs;
-		for(j=0; j<m; j++)
-			{
-			for(i=0; i<m2; i++)
-				{
-				pA[i+j*bs+ii*sda] = A[j+(i+ii)*lda];
-				}
-			}
-		}
-	return;
-	}
-
-
-
-// convert a vector into a vector structure
-void s_cvt_vec2strvec(int m, float *a, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		pa[ii] = a[ii];
-	return;
-	}
-
-
-
-// convert a matrix structure into a matrix
-void s_cvt_strmat2mat(int m, int n, struct s_strmat *sA, int ai, int aj, float *A, int lda)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, jj;
-	int m0 = (bs-ai%bs)%bs;
-	float *ptr_pA;
-	jj=0;
-	for(; jj<n-3; jj+=4)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				// unroll 0
-				A[ii+lda*(jj+0)] = ptr_pA[0+bs*0];
-				// unroll 1
-				A[ii+lda*(jj+1)] = ptr_pA[0+bs*1];
-				// unroll 2
-				A[ii+lda*(jj+2)] = ptr_pA[0+bs*2];
-				// unroll 3
-				A[ii+lda*(jj+3)] = ptr_pA[0+bs*3];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			// unroll 0
-			A[0+ii+lda*(jj+0)] = ptr_pA[0+bs*0];
-			A[1+ii+lda*(jj+0)] = ptr_pA[1+bs*0];
-			A[2+ii+lda*(jj+0)] = ptr_pA[2+bs*0];
-			A[3+ii+lda*(jj+0)] = ptr_pA[3+bs*0];
-			// unroll 0
-			A[0+ii+lda*(jj+1)] = ptr_pA[0+bs*1];
-			A[1+ii+lda*(jj+1)] = ptr_pA[1+bs*1];
-			A[2+ii+lda*(jj+1)] = ptr_pA[2+bs*1];
-			A[3+ii+lda*(jj+1)] = ptr_pA[3+bs*1];
-			// unroll 0
-			A[0+ii+lda*(jj+2)] = ptr_pA[0+bs*2];
-			A[1+ii+lda*(jj+2)] = ptr_pA[1+bs*2];
-			A[2+ii+lda*(jj+2)] = ptr_pA[2+bs*2];
-			A[3+ii+lda*(jj+2)] = ptr_pA[3+bs*2];
-			// unroll 0
-			A[0+ii+lda*(jj+3)] = ptr_pA[0+bs*3];
-			A[1+ii+lda*(jj+3)] = ptr_pA[1+bs*3];
-			A[2+ii+lda*(jj+3)] = ptr_pA[2+bs*3];
-			A[3+ii+lda*(jj+3)] = ptr_pA[3+bs*3];
-			ptr_pA += sda*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			// unroll 0
-			A[ii+lda*(jj+0)] = ptr_pA[0+bs*0];
-			// unroll 1
-			A[ii+lda*(jj+1)] = ptr_pA[0+bs*1];
-			// unroll 2
-			A[ii+lda*(jj+2)] = ptr_pA[0+bs*2];
-			// unroll 3
-			A[ii+lda*(jj+3)] = ptr_pA[0+bs*3];
-			ptr_pA++;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				A[ii+lda*jj] = ptr_pA[0];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			A[0+ii+lda*jj] = ptr_pA[0];
-			A[1+ii+lda*jj] = ptr_pA[1];
-			A[2+ii+lda*jj] = ptr_pA[2];
-			A[3+ii+lda*jj] = ptr_pA[3];
-			ptr_pA += sda*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			A[ii+lda*jj] = ptr_pA[0];
-			ptr_pA++;
-			}
-		}
-	return;
-	}
-
-
-
-// convert and transpose a matrix structure into a matrix
-void s_cvt_tran_strmat2mat(int m, int n, struct s_strmat *sA, int ai, int aj, float *A, int lda)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, jj;
-	int m0 = (bs-ai%bs)%bs;
-	float *ptr_pA;
-	jj=0;
-	for(; jj<n-3; jj+=4)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				// unroll 0
-				A[jj+0+lda*ii] = ptr_pA[0+bs*0];
-				// unroll 1
-				A[jj+1+lda*ii] = ptr_pA[0+bs*1];
-				// unroll 2
-				A[jj+2+lda*ii] = ptr_pA[0+bs*2];
-				// unroll 3
-				A[jj+3+lda*ii] = ptr_pA[0+bs*3];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			// unroll 0
-			A[jj+0+lda*(ii+0)] = ptr_pA[0+bs*0];
-			A[jj+0+lda*(ii+1)] = ptr_pA[1+bs*0];
-			A[jj+0+lda*(ii+2)] = ptr_pA[2+bs*0];
-			A[jj+0+lda*(ii+3)] = ptr_pA[3+bs*0];
-			// unroll 1
-			A[jj+1+lda*(ii+0)] = ptr_pA[0+bs*1];
-			A[jj+1+lda*(ii+1)] = ptr_pA[1+bs*1];
-			A[jj+1+lda*(ii+2)] = ptr_pA[2+bs*1];
-			A[jj+1+lda*(ii+3)] = ptr_pA[3+bs*1];
-			// unroll 2
-			A[jj+2+lda*(ii+0)] = ptr_pA[0+bs*2];
-			A[jj+2+lda*(ii+1)] = ptr_pA[1+bs*2];
-			A[jj+2+lda*(ii+2)] = ptr_pA[2+bs*2];
-			A[jj+2+lda*(ii+3)] = ptr_pA[3+bs*2];
-			// unroll 3
-			A[jj+3+lda*(ii+0)] = ptr_pA[0+bs*3];
-			A[jj+3+lda*(ii+1)] = ptr_pA[1+bs*3];
-			A[jj+3+lda*(ii+2)] = ptr_pA[2+bs*3];
-			A[jj+3+lda*(ii+3)] = ptr_pA[3+bs*3];
-			ptr_pA += sda*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			// unroll 0
-			A[jj+0+lda*ii] = ptr_pA[0+bs*0];
-			// unroll 1
-			A[jj+1+lda*ii] = ptr_pA[0+bs*1];
-			// unroll 2
-			A[jj+2+lda*ii] = ptr_pA[0+bs*2];
-			// unroll 3
-			A[jj+3+lda*ii] = ptr_pA[0+bs*3];
-			ptr_pA++;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				A[jj+lda*ii] = ptr_pA[0];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			i=0;
-			for(; i<bs; i++)
-				{
-				A[jj+lda*(i+ii)] = ptr_pA[0];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			A[jj+lda*ii] = ptr_pA[0];
-			ptr_pA++;
-			}
-		}
-	return;
-	}
-
-
-
-// convert a vector structure into a vector 
-void s_cvt_strvec2vec(int m, struct s_strvec *sa, int ai, float *a)
-	{
-	float *pa = sa->pa + ai;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		a[ii] = pa[ii];
-	return;
-	}
-
-
-
-// cast a matrix into a matrix structure
-void s_cast_mat2strmat(float *A, struct s_strmat *sA)
-	{
-	sA->pA = A;
-	return;
-	}
-
-
-
-// cast a matrix into the diagonal of a matrix structure
-void s_cast_diag_mat2strmat(float *dA, struct s_strmat *sA)
-	{
-	sA->dA = dA;
-	return;
-	}
-
-
-
-// cast a vector into a vector structure
-void s_cast_vec2vecmat(float *a, struct s_strvec *sa)
-	{
-	sa->pa = a;
-	return;
-	}
-
-
-
-// insert element into strmat
-void sgein1_libstr(float a, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	pA[0] = a;
-	return;
-	}
-
-
-
-// extract element from strmat
-float sgeex1_libstr(struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	return pA[0];
-	}
-
-
-
-// insert element into strvec
-void svecin1_libstr(float a, struct s_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	float *x = sx->pa + xi;
-	x[0] = a;
-	return;
-	}
-
-
-
-// extract element from strvec
-float svecex1_libstr(struct s_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	float *x = sx->pa + xi;
-	return x[0];
-	}
-
-
-
-// set all elements of a strmat to a value
-void sgese_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai%bs + ai/bs*bs*sda + aj*bs;
-	int m0 = m<(bs-ai%bs)%bs ? m : (bs-ai%bs)%bs;
-	int ii, jj;
-	if(m0>0)
-		{
-		for(ii=0; ii<m0; ii++)
-			{
-			for(jj=0; jj<n; jj++)
-				{
-				pA[jj*bs] = alpha;
-				}
-			pA += 1;
-			}
-		pA += bs*(sda-1);
-		m -= m0;
-		}
-	for(ii=0; ii<m-3; ii+=4)
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			pA[0+jj*bs] = alpha;
-			pA[1+jj*bs] = alpha;
-			pA[2+jj*bs] = alpha;
-			pA[3+jj*bs] = alpha;
-			}
-		pA += bs*sda;
-		}
-	for( ; ii<m; ii++)
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			pA[jj*bs] = alpha;
-			}
-		pA += 1;
-		}
-	return;
-	}
-
-
-
-// set all elements of a strvec to a value
-void svecse_libstr(int m, float alpha, struct s_strvec *sx, int xi)
-	{
-	float *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		x[ii] = alpha;
-	return;
-	}
-
-
-
-// extract diagonal to vector
-void sdiaex_libstr(int kmax, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	float *x = sx->pa + xi;
-	sdiaex_lib(kmax, alpha, ai%bs, pA, sda, x);
-	return;
-	}
-
-
-
-// insert a vector into diagonal
-void sdiain_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	float *x = sx->pa + xi;
-	sdiain_lib(kmax, alpha, x, ai%bs, pA, sda);
-	return;
-	}
-
-
-
-// swap two rows of a matrix struct
-void srowsw_libstr(int kmax, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	float *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	srowsw_lib(kmax, pA, pC);
-	return;
-	}
-
-
-
-// permute the rows of a matrix struct
-void srowpe_libstr(int kmax, int *ipiv, struct s_strmat *sA)
-	{
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		{
-		if(ipiv[ii]!=ii)
-			srowsw_libstr(sA->n, sA, ii, 0, sA, ipiv[ii], 0);
-		}
-	return;
-	}
-
-
-// extract a row int a vector
-void srowex_libstr(int kmax, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	float *x = sx->pa + xi;
-	srowex_lib(kmax, alpha, pA, x);
-	return;
-	}
-
-
-
-// insert a vector into a row
-void srowin_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	float *x = sx->pa + xi;
-	srowin_lib(kmax, alpha, x, pA);
-	return;
-	}
-
-
-
-// add a vector to a row
-void srowad_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	float *x = sx->pa + xi;
-	srowad_lib(kmax, alpha, x, pA);
-	return;
-	}
-
-
-
-// swap two cols of a matrix struct
-void scolsw_libstr(int kmax, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	float *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	scolsw_lib(kmax, ai%bs, pA, sda, ci%bs, pC, sdc);
-	return;
-	}
-
-
-
-// permute the cols of a matrix struct
-void scolpe_libstr(int kmax, int *ipiv, struct s_strmat *sA)
-	{
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		{
-		if(ipiv[ii]!=ii)
-			scolsw_libstr(sA->m, sA, 0, ii, sA, 0, ipiv[ii]);
-		}
-	return;
-	}
-
-
-
-// scale a generic strmat
-void sgesc_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** sgesc_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** sgesc_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** sgesc_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** sgesc_libstr : aj<0 : %d<0 *****\n", aj);
-	// inside matrix
-	// A: m x n
-	if(ai+m > sA->m) printf("\n***** sgesc_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** sgesc_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-#endif
-
-	const int bs = 4;
-
-	int mna, ii;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + aj*bs;
-	int offA = ai%bs;
-
-	// same alignment
-	ii = 0;
-	// clean up at the beginning
-	mna = (4-offA)%bs;
-	if(mna>0)
-		{
-		if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-			{
-			if(m==1)
-				{
-				kernel_sgesc_1_lib4(n, &alpha, pA+offA);
-				return;
-				}
-			else //if(m==2 && mna==3)
-				{
-				kernel_sgesc_2_lib4(n, &alpha, pA+offA);
-				return;
-				}
-			}
-		if(mna==1)
-			{
-			kernel_sgesc_1_lib4(n, &alpha, pA+offA);
-			pA += 4*sda;
-			ii += 1;
-			}
-		else if(mna==2)
-			{
-			kernel_sgesc_2_lib4(n, &alpha, pA+offA);
-			pA += 4*sda;
-			ii += 2;
-			}
-		else // if(mna==3)
-			{
-			kernel_sgesc_3_lib4(n, &alpha, pA+offA);
-			pA += 4*sda;
-			ii += 3;
-			}
-		}
-	// main loop
-	for(; ii<m-3; ii+=4)
-		{
-		kernel_sgesc_4_lib4(n, &alpha, pA);
-		pA += 4*sda;
-		}
-	// clean up at the end
-	if(ii<m)
-		{
-		if(m-ii==1)
-			kernel_sgesc_1_lib4(n, &alpha, pA);
-		else if(m-ii==2)
-			kernel_sgesc_2_lib4(n, &alpha, pA);
-		else // if(m-ii==3)
-			kernel_sgesc_3_lib4(n, &alpha, pA);
-		}
-
-	return;
-
-	}
-
-
-
-// copy a generic strmat into a generic strmat
-void sgecp_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** sgecp_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** sgecp_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** sgecp_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** sgecp_libstr : aj<0 : %d<0 *****\n", aj);
-	if(bi<0) printf("\n****** sgecp_libstr : bi<0 : %d<0 *****\n", bi);
-	if(bj<0) printf("\n****** sgecp_libstr : bj<0 : %d<0 *****\n", bj);
-	// inside matrix
-	// A: m x n
-	if(ai+m > sA->m) printf("\n***** sgecp_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** sgecp_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// B: m x n
-	if(bi+m > sB->m) printf("\n***** sgecp_libstr : bi+m > row(B) : %d+%d > %d *****\n", bi, m, sB->m);
-	if(bj+n > sB->n) printf("\n***** sgecp_libstr : bj+n > col(B) : %d+%d > %d *****\n", bj, n, sB->n);
-#endif
-
-	const int bs = 4;
-
-	int mna, ii;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + aj*bs;
-	float *pB = sB->pA + bi/bs*bs*sdb + bj*bs;
-	int offA = ai%bs;
-	int offB = bi%bs;
-
-	// same alignment
-	if(offA==offB)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_sgecp_1_0_lib4(n, pA+offA, pB+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_sgecp_2_0_lib4(n, pA+offA, pB+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_sgecp_1_0_lib4(n, pA+offA, pB+offB);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_sgecp_2_0_lib4(n, pA+offA, pB+offB);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_sgecp_3_0_lib4(n, pA+offA, pB+offB);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_sgecp_4_0_lib4(n, pA, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_sgecp_1_0_lib4(n, pA, pB);
-			else if(m-ii==2)
-				kernel_sgecp_2_0_lib4(n, pA, pB);
-			else // if(m-ii==3)
-				kernel_sgecp_3_0_lib4(n, pA, pB);
-			}
-		}
-	// skip one element of pA
-	else if(offA==(offB+1)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_sgecp_1_0_lib4(n, pA+offA, pB+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_sgecp_2_0_lib4(n, pA+offA, pB+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_sgecp_1_0_lib4(n, pA+offA, pB+offB);
-				//pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_sgecp_2_3_lib4(n, pA, sda, pB+2);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_sgecp_3_2_lib4(n, pA, sda, pB+1);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_sgecp_4_1_lib4(n, pA, sda, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_sgecp_1_0_lib4(n, pA+1, pB);
-			else if(m-ii==2)
-				kernel_sgecp_2_0_lib4(n, pA+1, pB);
-			else // if(m-ii==3)
-				kernel_sgecp_3_0_lib4(n, pA+1, pB);
-			}
-		}
-	// skip 2 elements of pA
-	else if(offA==(offB+2)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_sgecp_1_0_lib4(n, pA+offA, pB+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_sgecp_2_3_lib4(n, pA, sda, pB+1);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_sgecp_1_0_lib4(n, pA+1, pB+3);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_sgecp_2_0_lib4(n, pA, pB+2);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_sgecp_3_3_lib4(n, pA, sda, pB+1);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_sgecp_4_2_lib4(n, pA, sda, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_sgecp_1_0_lib4(n, pA+2, pB);
-			else if(m-ii==2)
-				kernel_sgecp_2_0_lib4(n, pA+2, pB);
-			else // if(m-ii==3)
-				kernel_sgecp_3_2_lib4(n, pA, sda, pB);
-			}
-		}
-	// skip 3 elements of pA
-	else // if(offA==(offB+3)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_sgecp_1_0_lib4(n, pA+offA, pB+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_sgecp_2_0_lib4(n, pA+offA, pB+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_sgecp_1_0_lib4(n, pA+offA, pB+offB);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_sgecp_2_0_lib4(n, pA+offA, pB+offB);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_sgecp_3_0_lib4(n, pA+offA, pB+offB);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_sgecp_4_3_lib4(n, pA, sda, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_sgecp_1_0_lib4(n, pA+3, pB);
-			else if(m-ii==2)
-				kernel_sgecp_2_3_lib4(n, pA, sda, pB);
-			else // if(m-ii==3)
-				kernel_sgecp_3_3_lib4(n, pA, sda, pB);
-			}
-		}
-
-	return;
-
-	}
-
-
-
-// scale a strvec
-void svecsc_libstr(int m, float alpha, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pa[ii+0] *= alpha;
-		pa[ii+1] *= alpha;
-		pa[ii+2] *= alpha;
-		pa[ii+3] *= alpha;
-		}
-	for(; ii<m; ii++)
-		{
-		pa[ii+0] *= alpha;
-		}
-	return;
-	}
-
-
-
-// copy a strvec into a strvec
-void sveccp_libstr(int m, struct s_strvec *sa, int ai, struct s_strvec *sc, int ci)
-	{
-	float *pa = sa->pa + ai;
-	float *pc = sc->pa + ci;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pc[ii+0] = pa[ii+0];
-		pc[ii+1] = pa[ii+1];
-		pc[ii+2] = pa[ii+2];
-		pc[ii+3] = pa[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		pc[ii+0] = pa[ii+0];
-		}
-	return;
-	}
-
-
-
-// copy a lower triangular strmat into a lower triangular strmat
-void strcp_l_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj)
-	{
-
-	if(m<=0)
-		return;
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + aj*bs;
-	float *pB = sB->pA + bi/bs*bs*sdb + bj*bs;
-	int offA = ai%bs;
-	int offB = bi%bs;
-
-	int ii, mna;
-
-	// same alignment
-	if(offA==offB)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_strcp_l_1_0_lib4(ii, pA+offA, pB+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_strcp_l_2_0_lib4(ii, pA+offA, pB+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_strcp_l_1_0_lib4(ii, pA+offA, pB+offB);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_strcp_l_2_0_lib4(ii, pA+offA, pB+offB);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_strcp_l_3_0_lib4(ii, pA+offA, pB+offB);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_strcp_l_4_0_lib4(ii, pA, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_strcp_l_1_0_lib4(ii, pA, pB);
-			else if(m-ii==2)
-				kernel_strcp_l_2_0_lib4(ii, pA, pB);
-			else // if(m-ii==3)
-				kernel_strcp_l_3_0_lib4(ii, pA, pB);
-			}
-		}
-	// skip one element of pA
-	else if(offA==(offB+1)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_strcp_l_1_0_lib4(ii, pA+offA, pB+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_strcp_l_2_0_lib4(ii, pA+offA, pB+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_strcp_l_1_0_lib4(ii, pA+offA, pB+offB);
-				//pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_strcp_l_2_3_lib4(ii, pA, sda, pB+2);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_strcp_l_3_2_lib4(ii, pA, sda, pB+1);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_strcp_l_4_1_lib4(ii, pA, sda, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_strcp_l_1_0_lib4(ii, pA+1, pB);
-			else if(m-ii==2)
-				kernel_strcp_l_2_0_lib4(ii, pA+1, pB);
-			else // if(m-ii==3)
-				kernel_strcp_l_3_0_lib4(ii, pA+1, pB);
-			}
-		}
-	// skip 2 elements of pA
-	else if(offA==(offB+2)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_strcp_l_1_0_lib4(ii, pA+offA, pB+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_strcp_l_2_3_lib4(ii, pA, sda, pB+1);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_strcp_l_1_0_lib4(ii, pA+1, pB+3);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_strcp_l_2_0_lib4(ii, pA, pB+2);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_strcp_l_3_3_lib4(ii, pA, sda, pB+1);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_strcp_l_4_2_lib4(ii, pA, sda, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_strcp_l_1_0_lib4(ii, pA+2, pB);
-			else if(m-ii==2)
-				kernel_strcp_l_2_0_lib4(ii, pA+2, pB);
-			else // if(m-ii==3)
-				kernel_strcp_l_3_2_lib4(ii, pA, sda, pB);
-			}
-		}
-	// skip 3 elements of pA
-	else // if(offA==(offB+3)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_strcp_l_1_0_lib4(ii, pA+offA, pB+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_strcp_l_2_0_lib4(ii, pA+offA, pB+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_strcp_l_1_0_lib4(ii, pA+offA, pB+offB);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_strcp_l_2_0_lib4(ii, pA+offA, pB+offB);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_strcp_l_3_0_lib4(ii, pA+offA, pB+offB);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_strcp_l_4_3_lib4(ii, pA, sda, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_strcp_l_1_0_lib4(ii, pA+3, pB);
-			else if(m-ii==2)
-				kernel_strcp_l_2_3_lib4(ii, pA, sda, pB);
-			else // if(m-ii==3)
-				kernel_strcp_l_3_3_lib4(ii, pA, sda, pB);
-			}
-		}
-
-	return;
-
-	}
-
-
-
-// scale and add a generic strmat into a generic strmat
-void sgead_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	const int bs = 4;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + aj*bs;
-	float *pB = sB->pA + bi/bs*bs*sdb + bj*bs;
-	int offA = ai%bs;
-	int offB = bi%bs;
-
-	int ii, mna;
-
-	// same alignment
-	if(offA==offB)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_sgead_1_0_lib4(n, &alpha, pA+offA, pB+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_sgead_2_0_lib4(n, &alpha, pA+offA, pB+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_sgead_1_0_lib4(n, &alpha, pA+offA, pB+offB);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_sgead_2_0_lib4(n, &alpha, pA+offA, pB+offB);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_sgead_3_0_lib4(n, &alpha, pA+offA, pB+offB);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_sgead_4_0_lib4(n, &alpha, pA, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_sgead_1_0_lib4(n, &alpha, pA, pB);
-			else if(m-ii==2)
-				kernel_sgead_2_0_lib4(n, &alpha, pA, pB);
-			else // if(m-ii==3)
-				kernel_sgead_3_0_lib4(n, &alpha, pA, pB);
-			}
-		}
-	// skip one element of pA
-	else if(offA==(offB+1)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna) // mna<=3  ==>  m = { 1, 2 }
-				{
-				if(m==1)
-					{
-					kernel_sgead_1_0_lib4(n, &alpha, pA+offA, pB+offB);
-					return;
-					}
-				else //if(m==2 && mna==3)
-					{
-					kernel_sgead_2_0_lib4(n, &alpha, pA+offA, pB+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_sgead_1_0_lib4(n, &alpha, pA+offA, pB+offB);
-				//pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_sgead_2_3_lib4(n, &alpha, pA, sda, pB+2);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_sgead_3_2_lib4(n, &alpha, pA, sda, pB+1);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_sgead_4_1_lib4(n, &alpha, pA, sda, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_sgead_1_0_lib4(n, &alpha, pA+1, pB);
-			else if(m-ii==2)
-				kernel_sgead_2_0_lib4(n, &alpha, pA+1, pB);
-			else // if(m-ii==3)
-				kernel_sgead_3_0_lib4(n, &alpha, pA+1, pB);
-			}
-		}
-	// skip 2 elements of pA
-	else if(offA==(offB+2)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_sgead_1_0_lib4(n, &alpha, pA+offA, pB+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_sgead_2_3_lib4(n, &alpha, pA, sda, pB+1);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_sgead_1_0_lib4(n, &alpha, pA+1, pB+3);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_sgead_2_0_lib4(n, &alpha, pA, pB+2);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_sgead_3_3_lib4(n, &alpha, pA, sda, pB+1);
-				pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_sgead_4_2_lib4(n, &alpha, pA, sda, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_sgead_1_0_lib4(n, &alpha, pA+2, pB);
-			else if(m-ii==2)
-				kernel_sgead_2_0_lib4(n, &alpha, pA+2, pB);
-			else // if(m-ii==3)
-				kernel_sgead_3_2_lib4(n, &alpha, pA, sda, pB);
-			}
-		}
-	// skip 3 elements of pA
-	else // if(offA==(offB+3)%bs)
-		{
-		ii = 0;
-		// clean up at the beginning
-		mna = (4-offB)%bs;
-		if(mna>0)
-			{
-			if(m<mna)
-				{
-				if(m==1)
-					{
-					kernel_sgead_1_0_lib4(n, &alpha, pA+offA, pB+offB);
-					return;
-					}
-				else // if(m==2 && mna==3)
-					{
-					kernel_sgead_2_0_lib4(n, &alpha, pA+offA, pB+offB);
-					return;
-					}
-				}
-			if(mna==1)
-				{
-				kernel_sgead_1_0_lib4(n, &alpha, pA+offA, pB+offB);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 1;
-				}
-			else if(mna==2)
-				{
-				kernel_sgead_2_0_lib4(n, &alpha, pA+offA, pB+offB);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 2;
-				}
-			else // if(mna==3)
-				{
-				kernel_sgead_3_0_lib4(n, &alpha, pA+offA, pB+offB);
-				// pA += 4*sda;
-				pB += 4*sdb;
-				ii += 3;
-				}
-			}
-		// main loop
-		for(; ii<m-3; ii+=4)
-			{
-			kernel_sgead_4_3_lib4(n, &alpha, pA, sda, pB);
-			pA += 4*sda;
-			pB += 4*sdb;
-			}
-		// clean up at the end
-		if(ii<m)
-			{
-			if(m-ii==1)
-				kernel_sgead_1_0_lib4(n, &alpha, pA+3, pB);
-			else if(m-ii==2)
-				kernel_sgead_2_3_lib4(n, &alpha, pA, sda, pB);
-			else // if(m-ii==3)
-				kernel_sgead_3_3_lib4(n, &alpha, pA, sda, pB);
-			}
-		}
-
-	return;
-
-	}
-
-
-
-// copy and transpose a generic strmat into a generic strmat
-void sgetr_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	float *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	sgetr_lib(m, n, 1.0, ai%bs, pA, sda, ci%bs, pC, sdc); // TODO remove alpha !!!
-	return;
-	}
-
-
-
-// copy and transpose a lower triangular strmat into an upper triangular strmat
-void strtr_l_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	float *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	strtr_l_lib(m, 1.0, ai%bs, pA, sda, ci%bs, pC, sdc); // TODO remove alpha !!!
-	return;
-	}
-
-
-
-// copy and transpose an upper triangular strmat into a lower triangular strmat
-void strtr_u_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	const int bs = 4;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	float *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	strtr_u_lib(m, 1.0, ai%bs, pA, sda, ci%bs, pC, sdc); // TODO remove alpha !!!
-	return;
-	}
-
-
-
-// insert a strvec to diagonal of strmat, sparse formulation 
-void sdiain_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	const int bs = 4;
-	float *x = sx->pa + xi;
-	int sdd = sD->cn;
-	float *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs] = alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// extract the diagonal of a strmat to a strvec, sparse formulation 
-void sdiaex_sp_libstr(int kmax, float alpha, int *idx, struct s_strmat *sD, int di, int dj, struct s_strvec *sx, int xi)
-	{
-	const int bs = 4;
-	float *x = sx->pa + xi;
-	int sdd = sD->cn;
-	float *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		x[jj] = alpha * pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to diagonal of strmat, sparse formulation 
-void sdiaad_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	const int bs = 4;
-	float *x = sx->pa + xi;
-	int sdd = sD->cn;
-	float *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs] += alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to another strvec and insert to diagonal of strmat, sparse formulation 
-void sdiaadin_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	const int bs = 4;
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	int sdd = sD->cn;
-	float *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs] = y[jj] + alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to row of strmat, sparse formulation 
-void srowad_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	const int bs = 4;
-	float *x = sx->pa + xi;
-	int sdd = sD->cn;
-	float *pD = sD->pA + di/bs*bs*sdd + di%bs + dj*bs;
-	srowad_libsp(kmax, idx, alpha, x, pD);
-	return;
-	}
-
-
-
-// adds strvec to strvec, sparse formulation
-void svecad_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strvec *sy, int yi)
-	{
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	svecad_libsp(kmax, idx, alpha, x, y);
-	return;
-	}
-
-
-
-void svecin_sp_libstr(int m, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[idx[ii]] = alpha * x[ii];
-	return;
-	}
-
-
-
-void svecex_sp_libstr(int m, float alpha, int *idx, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[ii] = alpha * x[idx[ii]];
-	return;
-	}
-
-
-
-void svecnrm_inf_libstr(int m, struct s_strvec *sx, int xi, float *ptr_norm)
-	{
-	int ii;
-	float *x = sx->pa + xi;
-	float norm = 0.0;
-	for(ii=0; ii<m; ii++)
-		norm = fmax(norm, fabs(x[ii]));
-	*ptr_norm = norm;
-	return;
-	}
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
diff --git a/third_party/blasfeo/auxiliary/s_aux_lib8.c b/third_party/blasfeo/auxiliary/s_aux_lib8.c
deleted file mode 100644
index 94ba22d..0000000
--- a/third_party/blasfeo/auxiliary/s_aux_lib8.c
+++ /dev/null
@@ -1,2647 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_block_size.h"
-#include "../include/blasfeo_s_kernel.h"
-
-
-
-// copies a lower triangular packed matrix into a lower triangular packed matrix
-void strcp_l_lib(int m, int offsetA, float *A, int sda, int offsetB, float *B, int sdb)
-	{
-	printf("\nstrcp_;l_lib: feature not implemented yet\n");
-	exit(1);
-	}
-
-
-
-// scales and adds a strvec into a strvec
-void svecad_libstr(int m, float alpha, struct s_strvec *sa, int ai, struct s_strvec *sc, int ci)
-	{
-	float *pa = sa->pa + ai;
-	float *pc = sc->pa + ci;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pc[ii+0] += alpha*pa[ii+0];
-		pc[ii+1] += alpha*pa[ii+1];
-		pc[ii+2] += alpha*pa[ii+2];
-		pc[ii+3] += alpha*pa[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		pc[ii+0] += alpha*pa[ii+0];
-		}
-	return;
-	}
-
-
-
-// transpose lower triangular matrix
-void strtr_l_lib(int m, float alpha, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc)
-	{
-	printf("\nstrtr_l_lib: feature not implemented yet\n");
-	exit(1);
-	}
-
-
-
-// transpose an aligned upper triangular matrix into an aligned lower triangular matrix
-void strtr_u_lib(int m, float alpha, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc)
-	{
-	printf("\nstrtr_u_lib: feature not implemented yet\n");
-	exit(1);
-	}
-
-
-
-// regularize diagonal 
-void sdiareg_lib(int kmax, float reg, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	float *pD2;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] += reg;
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		kmax -= kna;
-		}
-	pD2 = pD;
-	for(jj=0; jj<kmax-7; jj+=8)
-		{
-		pD2[0+0*bs] += reg;
-		pD2[1+1*bs] += reg;
-		pD2[2+2*bs] += reg;
-		pD2[3+3*bs] += reg;
-		pD2[4+4*bs] += reg;
-		pD2[5+5*bs] += reg;
-		pD2[6+6*bs] += reg;
-		pD2[7+7*bs] += reg;
-		pD2 += bs*sdd+bs*bs;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] += reg;
-		}
-	
-	}
-
-
-
-// insert vector to diagonal 
-void sdiain_lib(int kmax, float alpha, float *x, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	float *pD2, *x2;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] = alpha*x[ll];
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	pD2 = pD;
-	x2 = x;
-	for(jj=0; jj<kmax-7; jj+=8)
-		{
-		pD2[0+bs*0] = alpha*x2[0];
-		pD2[1+bs*1] = alpha*x2[1];
-		pD2[2+bs*2] = alpha*x2[2];
-		pD2[3+bs*3] = alpha*x2[3];
-		pD2[4+bs*4] = alpha*x2[4];
-		pD2[5+bs*5] = alpha*x2[5];
-		pD2[6+bs*6] = alpha*x2[6];
-		pD2[7+bs*7] = alpha*x2[7];
-		pD2 += bs*sdd+bs*bs;
-		x2 += bs;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] = alpha*x[jj+ll];
-		}
-	
-	}
-
-
-
-// insert sqrt of vector to diagonal 
-void sdiain_sqrt_lib(int kmax, float *x, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	float *pD2, *x2;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] = sqrt(x[ll]);
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	pD2 = pD;
-	x2 = x;
-	for(jj=0; jj<kmax-7; jj+=8)
-		{
-		pD2[0+bs*0] = sqrt(x2[0]);
-		pD2[1+bs*1] = sqrt(x2[1]);
-		pD2[2+bs*2] = sqrt(x2[2]);
-		pD2[3+bs*3] = sqrt(x2[3]);
-		pD2[4+bs*4] = sqrt(x2[4]);
-		pD2[5+bs*5] = sqrt(x2[5]);
-		pD2[5+bs*6] = sqrt(x2[6]);
-		pD2[7+bs*7] = sqrt(x2[7]);
-		pD2 += bs*sdd+bs*bs;
-		x2 += bs;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] = sqrt(x[jj+ll]);
-		}
-	
-	}
-
-
-
-// extract diagonal to vector 
-void sdiaex_lib(int kmax, float alpha, int offset, float *pD, int sdd, float *x)
-	{
-
-	const int bs = 8;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	float *pD2, *x2;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			x[ll] = alpha * pD[ll+bs*ll];
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	pD2 = pD;
-	x2 = x;
-	for(jj=0; jj<kmax-7; jj+=8)
-		{
-		x2[0] = alpha * pD2[0+bs*0];
-		x2[1] = alpha * pD2[1+bs*1];
-		x2[2] = alpha * pD2[2+bs*2];
-		x2[3] = alpha * pD2[3+bs*3];
-		x2[4] = alpha * pD2[4+bs*4];
-		x2[5] = alpha * pD2[5+bs*5];
-		x2[6] = alpha * pD2[6+bs*6];
-		x2[7] = alpha * pD2[7+bs*7];
-		pD2 += bs*sdd+bs*bs;
-		x2 += bs;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		x[jj+ll] = alpha * pD[jj*sdd+(jj+ll)*bs+ll];
-		}
-	
-	}
-
-
-
-// add scaled vector to diagonal 
-void sdiaad_lib(int kmax, float alpha, float *x, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	float *pD2, *x2;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll+bs*ll] += alpha * x[ll];
-			}
-		pD += kna + bs*(sdd-1) + kna*bs;
-		x  += kna;
-		kmax -= kna;
-		}
-	pD2 = pD;
-	x2 = x;
-	for(jj=0; jj<kmax-7; jj+=8)
-		{
-		pD2[0+bs*0] += alpha * x2[0];
-		pD2[1+bs*1] += alpha * x2[1];
-		pD2[2+bs*2] += alpha * x2[2];
-		pD2[3+bs*3] += alpha * x2[3];
-		pD2[4+bs*4] += alpha * x2[4];
-		pD2[5+bs*5] += alpha * x2[5];
-		pD2[6+bs*6] += alpha * x2[6];
-		pD2[7+bs*7] += alpha * x2[7];
-		pD2 += bs*sdd+bs*bs;
-		x2 += bs;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[jj*sdd+(jj+ll)*bs+ll] += alpha * x[jj+ll];
-		}
-	return;
-	}
-
-
-
-// insert vector to diagonal, sparse formulation 
-void sdiain_libsp(int kmax, int *idx, float alpha, float *x, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs+ii*bs] = alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// extract diagonal to vector, sparse formulation 
-void sdiaex_libsp(int kmax, int *idx, float alpha, float *pD, int sdd, float *x)
-	{
-
-	const int bs = 8;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		x[jj] = alpha * pD[ii/bs*bs*sdd+ii%bs+ii*bs];
-		}
-	return;
-	}
-
-
-
-// add scaled vector to diagonal, sparse formulation 
-void sdiaad_libsp(int kmax, int *idx, float alpha, float *x, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs+ii*bs] += alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled vector to another vector and insert to diagonal, sparse formulation 
-void sdiaadin_libsp(int kmax, int *idx, float alpha, float *x, float *y, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs+ii*bs] = y[jj] + alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// insert vector to row 
-void srowin_lib(int kmax, float alpha, float *x, float *pD)
-	{
-	
-	const int bs = 8;
-
-	int jj, ll;
-
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[0*bs] = alpha * x[0];
-		pD[1*bs] = alpha * x[1];
-		pD[2*bs] = alpha * x[2];
-		pD[3*bs] = alpha * x[3];
-		pD += 4*bs;
-		x += 4;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[ll*bs] = alpha*x[ll];
-		}
-	return;
-	}
-
-
-
-// extract row to vector
-void srowex_lib(int kmax, float alpha, float *pD, float *x)
-	{
-	
-	const int bs = 8;
-
-	int jj, ll;
-
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		x[0] = alpha * pD[0*bs];
-		x[1] = alpha * pD[1*bs];
-		x[2] = alpha * pD[2*bs];
-		x[3] = alpha * pD[3*bs];
-		pD += 4*bs;
-		x += 4;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		x[ll] = alpha*pD[ll*bs];
-		}
-	return;
-	}
-
-
-
-// add scaled vector to row 
-void srowad_lib(int kmax, float alpha, float *x, float *pD)
-	{
-
-	const int bs = 8;
-
-	int jj, ll;
-
-	for(jj=0; jj<kmax-3; jj+=4)
-		{
-		pD[0*bs] += alpha * x[0];
-		pD[1*bs] += alpha * x[1];
-		pD[2*bs] += alpha * x[2];
-		pD[3*bs] += alpha * x[3];
-		pD += 4*bs;
-		x += 4;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[ll*bs] += alpha * x[ll];
-		}
-	return;
-	}
-
-
-
-// insert vector to row, sparse formulation 
-void srowin_libsp(int kmax, float alpha, int *idx, float *x, float *pD)
-	{
-
-	const int bs = 8;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*bs] = alpha*x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled vector to row, sparse formulation 
-void srowad_libsp(int kmax, int *idx, float alpha, float *x, float *pD)
-	{
-
-	const int bs = 8;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*bs] += alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled vector to another vector and insert to row, sparse formulation 
-void srowadin_libsp(int kmax, int *idx, float alpha, float *x, float *y, float *pD)
-	{
-
-	const int bs = 8;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii*bs] = y[jj] + alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// swap two rows
-void srowsw_lib(int kmax, float *pA, float *pC)
-	{
-
-	const int bs = 8;
-
-	int ii;
-	float tmp;
-
-	for(ii=0; ii<kmax-3; ii+=4)
-		{
-		tmp = pA[0+bs*0];
-		pA[0+bs*0] = pC[0+bs*0];
-		pC[0+bs*0] = tmp;
-		tmp = pA[0+bs*1];
-		pA[0+bs*1] = pC[0+bs*1];
-		pC[0+bs*1] = tmp;
-		tmp = pA[0+bs*2];
-		pA[0+bs*2] = pC[0+bs*2];
-		pC[0+bs*2] = tmp;
-		tmp = pA[0+bs*3];
-		pA[0+bs*3] = pC[0+bs*3];
-		pC[0+bs*3] = tmp;
-		pA += 4*bs;
-		pC += 4*bs;
-		}
-	for( ; ii<kmax; ii++)
-		{
-		tmp = pA[0+bs*0];
-		pA[0+bs*0] = pC[0+bs*0];
-		pC[0+bs*0] = tmp;
-		pA += 1*bs;
-		pC += 1*bs;
-		}
-	return;
-	}
-
-
-
-// insert vector to column 
-void scolin_lib(int kmax, float *x, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll] = x[ll];
-			}
-		pD += kna + bs*(sdd-1);
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-7; jj+=8)
-		{
-		pD[0] = x[0];
-		pD[1] = x[1];
-		pD[2] = x[2];
-		pD[3] = x[3];
-		pD[4] = x[4];
-		pD[5] = x[5];
-		pD[6] = x[6];
-		pD[7] = x[7];
-		pD += bs*sdd;
-		x += bs;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[ll] = x[ll];
-		}
-	
-	}
-
-
-
-// add scaled vector to column 
-void scolad_lib(int kmax, float alpha, float *x, int offset, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int kna = (bs-offset%bs)%bs;
-	kna = kmax<kna ? kmax : kna;
-
-	int jj, ll;
-
-	if(kna>0)
-		{
-		for(ll=0; ll<kna; ll++)
-			{
-			pD[ll] += alpha * x[ll];
-			}
-		pD += kna + bs*(sdd-1);
-		x  += kna;
-		kmax -= kna;
-		}
-	for(jj=0; jj<kmax-7; jj+=8)
-		{
-		pD[0] += alpha * x[0];
-		pD[1] += alpha * x[1];
-		pD[2] += alpha * x[2];
-		pD[3] += alpha * x[3];
-		pD[4] += alpha * x[4];
-		pD[5] += alpha * x[5];
-		pD[6] += alpha * x[6];
-		pD[7] += alpha * x[7];
-		pD += bs*sdd;
-		x += bs;
-		}
-	for(ll=0; ll<kmax-jj; ll++)
-		{
-		pD[ll] += alpha * x[ll];
-		}
-	
-	}
-
-
-
-// insert vector to diagonal, sparse formulation 
-void scolin_libsp(int kmax, int *idx, float *x, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs] = x[jj];
-		}
-	
-	}
-
-
-
-// add scaled vector to diagonal, sparse formulation 
-void scolad_libsp(int kmax, float alpha, int *idx, float *x, float *pD, int sdd)
-	{
-
-	const int bs = 8;
-
-	int ii, jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[ii/bs*bs*sdd+ii%bs] += alpha * x[jj];
-		}
-	
-	}
-
-
-
-// swaps two cols
-void scolsw_lib(int kmax, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc)
-	{
-
-	const int bs = 8;
-
-	int ii;
-
-	float tmp;
-
-	if(offsetA==offsetC)
-		{
-		if(offsetA>0)
-			{
-			ii = 0;
-			for(; ii<bs-offsetA; ii++)
-				{
-				tmp = pA[0+bs*0];
-				pA[0+bs*0] = pC[0+bs*0];
-				pC[0+bs*0] = tmp;
-				pA += 1;
-				pC += 1;
-				}
-			pA += bs*(sda-1);
-			pC += bs*(sdc-1);
-			kmax -= bs-offsetA;
-			}
-		ii = 0;
-		for(; ii<kmax-7; ii+=8)
-			{
-			tmp = pA[0+bs*0];
-			pA[0+bs*0] = pC[0+bs*0];
-			pC[0+bs*0] = tmp;
-			tmp = pA[1+bs*0];
-			pA[1+bs*0] = pC[1+bs*0];
-			pC[1+bs*0] = tmp;
-			tmp = pA[2+bs*0];
-			pA[2+bs*0] = pC[2+bs*0];
-			pC[2+bs*0] = tmp;
-			tmp = pA[3+bs*0];
-			pA[3+bs*0] = pC[3+bs*0];
-			pC[3+bs*0] = tmp;
-			tmp = pA[4+bs*0];
-			pA[4+bs*0] = pC[4+bs*0];
-			pC[4+bs*0] = tmp;
-			tmp = pA[5+bs*0];
-			pA[5+bs*0] = pC[5+bs*0];
-			pC[5+bs*0] = tmp;
-			tmp = pA[6+bs*0];
-			pA[6+bs*0] = pC[6+bs*0];
-			pC[6+bs*0] = tmp;
-			tmp = pA[7+bs*0];
-			pA[7+bs*0] = pC[7+bs*0];
-			pC[7+bs*0] = tmp;
-			pA += bs*sda;
-			pC += bs*sdc;
-			}
-		for(; ii<kmax; ii++)
-			{
-			tmp = pA[0+bs*0];
-			pA[0+bs*0] = pC[0+bs*0];
-			pC[0+bs*0] = tmp;
-			pA += 1;
-			pC += 1;
-			}
-		}
-	else
-		{
-		printf("\nscolsw: feature not implemented yet: offsetA!=offsetC\n\n");
-		exit(1);
-		}
-
-	return;
-
-	}
-
-
-
-// insert vector to vector, sparse formulation
-void svecin_libsp(int kmax, int *idx, float *x, float *y)
-	{
-
-	int jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		y[idx[jj]] = x[jj];
-		}
-	
-	}
-
-
-
-// adds vector to vector, sparse formulation
-void svecad_libsp(int kmax, int *idx, float alpha, float *x, float *y)
-	{
-
-	int jj;
-
-	for(jj=0; jj<kmax; jj++)
-		{
-		y[idx[jj]] += alpha * x[jj];
-		}
-	
-	}
-
-
-
-/****************************
-* new interface
-****************************/
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// return the memory size (in bytes) needed for a strmat
-int s_size_strmat(int m, int n)
-	{
-	const int bs = 8;
-	int nc = S_NC;
-	int al = bs*nc;
-	int pm = (m+bs-1)/bs*bs;
-	int cn = (n+nc-1)/nc*nc;
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	int memory_size = (pm*cn+tmp)*sizeof(float);
-	return memory_size;
-	}
-
-
-
-// return the memory size (in bytes) needed for the digonal of a strmat
-int s_size_diag_strmat(int m, int n)
-	{
-	const int bs = 8;
-	int nc = S_NC;
-	int al = bs*nc;
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	int memory_size = tmp*sizeof(float);
-	return memory_size;
-	}
-
-
-
-// create a matrix structure for a matrix of size m*n by using memory passed by a pointer
-void s_create_strmat(int m, int n, struct s_strmat *sA, void *memory)
-	{
-	const int bs = 8;
-	int nc = S_NC;
-	int al = bs*nc;
-	sA->m = m;
-	sA->n = n;
-	int pm = (m+bs-1)/bs*bs;
-	int cn = (n+nc-1)/nc*nc;
-	sA->pm = pm;
-	sA->cn = cn;
-	float *ptr = (float *) memory;
-	sA->pA = ptr;
-	ptr += pm*cn;
-	int tmp = m<n ? (m+al-1)/al*al : (n+al-1)/al*al; // al(min(m,n)) // XXX max ???
-	sA->dA = ptr;
-	ptr += tmp;
-	sA->use_dA = 0;
-	sA->memory_size = (pm*cn+tmp)*sizeof(float);
-	return;
-	}
-
-
-
-// return memory size (in bytes) needed for a strvec
-int s_size_strvec(int m)
-	{
-	const int bs = 8;
-//	int nc = S_NC;
-//	int al = bs*nc;
-	int pm = (m+bs-1)/bs*bs;
-	int memory_size = pm*sizeof(float);
-	return memory_size;
-	}
-
-
-
-// create a vector structure for a vector of size m by using memory passed by a pointer
-void s_create_strvec(int m, struct s_strvec *sa, void *memory)
-	{
-	const int bs = 8;
-//	int nc = S_NC;
-//	int al = bs*nc;
-	sa->m = m;
-	int pm = (m+bs-1)/bs*bs;
-	sa->pm = pm;
-	float *ptr = (float *) memory;
-	sa->pa = ptr;
-//	ptr += pm;
-	sa->memory_size = pm*sizeof(float);
-	return;
-	}
-
-
-
-// convert a matrix into a matrix structure
-void s_cvt_mat2strmat(int m, int n, float *A, int lda, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, j, jj, m0, m1, m2;
-	float *B, *pB;
-	m0 = (bs-ai%bs)%bs;
-	if(m0>m)
-		m0 = m;
-	m1 = m - m0;
-	jj = 0;
-	for( ; jj<n-3; jj+=4)
-		{
-		B  =  A + jj*lda;
-		pB = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for( ; ii<m0; ii++)
-				{
-				pB[ii+bs*0] = B[ii+lda*0];
-				pB[ii+bs*1] = B[ii+lda*1];
-				pB[ii+bs*2] = B[ii+lda*2];
-				pB[ii+bs*3] = B[ii+lda*3];
-				}
-			B  += m0;
-			pB += m0 + bs*(sda-1);
-			}
-		for( ; ii<m-7; ii+=8)
-			{
-			// unroll 0
-			pB[0+bs*0] = B[0+lda*0];
-			pB[1+bs*0] = B[1+lda*0];
-			pB[2+bs*0] = B[2+lda*0];
-			pB[3+bs*0] = B[3+lda*0];
-			pB[4+bs*0] = B[4+lda*0];
-			pB[5+bs*0] = B[5+lda*0];
-			pB[6+bs*0] = B[6+lda*0];
-			pB[7+bs*0] = B[7+lda*0];
-			// unroll 1
-			pB[0+bs*1] = B[0+lda*1];
-			pB[1+bs*1] = B[1+lda*1];
-			pB[2+bs*1] = B[2+lda*1];
-			pB[3+bs*1] = B[3+lda*1];
-			pB[4+bs*1] = B[4+lda*1];
-			pB[5+bs*1] = B[5+lda*1];
-			pB[6+bs*1] = B[6+lda*1];
-			pB[7+bs*1] = B[7+lda*1];
-			// unroll 2
-			pB[0+bs*2] = B[0+lda*2];
-			pB[1+bs*2] = B[1+lda*2];
-			pB[2+bs*2] = B[2+lda*2];
-			pB[3+bs*2] = B[3+lda*2];
-			pB[4+bs*2] = B[4+lda*2];
-			pB[5+bs*2] = B[5+lda*2];
-			pB[6+bs*2] = B[6+lda*2];
-			pB[7+bs*2] = B[7+lda*2];
-			// unroll 3
-			pB[0+bs*3] = B[0+lda*3];
-			pB[1+bs*3] = B[1+lda*3];
-			pB[2+bs*3] = B[2+lda*3];
-			pB[3+bs*3] = B[3+lda*3];
-			pB[4+bs*3] = B[4+lda*3];
-			pB[5+bs*3] = B[5+lda*3];
-			pB[6+bs*3] = B[6+lda*3];
-			pB[7+bs*3] = B[7+lda*3];
-			// update
-			B  += 8;
-			pB += bs*sda;
-			}
-		for( ; ii<m; ii++)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			// col 1
-			pB[0+bs*1] = B[0+lda*1];
-			// col 2
-			pB[0+bs*2] = B[0+lda*2];
-			// col 3
-			pB[0+bs*3] = B[0+lda*3];
-			// update
-			B  += 1;
-			pB += 1;
-			}
-		}
-	for( ; jj<n; jj++)
-		{
-
-		B  =  A + jj*lda;
-		pB = pA + jj*bs;
-
-		ii = 0;
-		if(m0>0)
-			{
-			for( ; ii<m0; ii++)
-				{
-				pB[ii+bs*0] = B[ii+lda*0];
-				}
-			B  += m0;
-			pB += m0 + bs*(sda-1);
-			}
-		for( ; ii<m-7; ii+=8)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			pB[1+bs*0] = B[1+lda*0];
-			pB[2+bs*0] = B[2+lda*0];
-			pB[3+bs*0] = B[3+lda*0];
-			pB[4+bs*0] = B[4+lda*0];
-			pB[5+bs*0] = B[5+lda*0];
-			pB[6+bs*0] = B[6+lda*0];
-			pB[7+bs*0] = B[7+lda*0];
-			// update
-			B  += 8;
-			pB += bs*sda;
-			}
-		for( ; ii<m; ii++)
-			{
-			// col 0
-			pB[0+bs*0] = B[0+lda*0];
-			// update
-			B  += 1;
-			pB += 1;
-			}
-		}
-	return;
-	}
-
-
-
-// convert and transpose a matrix into a matrix structure
-void s_cvt_tran_mat2strmat(int m, int n, float *A, int lda, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, j, m0, m1, m2;
-	float 	*B, *pB;
-	m0 = (bs-ai%bs)%bs;
-	if(m0>n)
-		m0 = n;
-	m1 = n - m0;
-	ii = 0;
-	if(m0>0)
-		{
-		for(j=0; j<m; j++)
-			{
-			for(i=0; i<m0; i++)
-				{
-				pA[i+j*bs+ii*sda] = A[j+(i+ii)*lda];
-				}
-			}
-		A  += m0*lda;
-		pA += m0 + bs*(sda-1);
-		}
-	ii = 0;
-	for(; ii<m1-7; ii+=bs)
-		{
-		j=0;
-		B  = A + ii*lda;
-		pB = pA + ii*sda;
-		for(; j<m-3; j+=4)
-			{
-			// unroll 0
-			pB[0+0*bs] = B[0+0*lda];
-			pB[1+0*bs] = B[0+1*lda];
-			pB[2+0*bs] = B[0+2*lda];
-			pB[3+0*bs] = B[0+3*lda];
-			pB[4+0*bs] = B[0+4*lda];
-			pB[5+0*bs] = B[0+5*lda];
-			pB[6+0*bs] = B[0+6*lda];
-			pB[7+0*bs] = B[0+7*lda];
-			// unroll 1
-			pB[0+1*bs] = B[1+0*lda];
-			pB[1+1*bs] = B[1+1*lda];
-			pB[2+1*bs] = B[1+2*lda];
-			pB[3+1*bs] = B[1+3*lda];
-			pB[4+1*bs] = B[1+4*lda];
-			pB[5+1*bs] = B[1+5*lda];
-			pB[6+1*bs] = B[1+6*lda];
-			pB[7+1*bs] = B[1+7*lda];
-			// unroll 2
-			pB[0+2*bs] = B[2+0*lda];
-			pB[1+2*bs] = B[2+1*lda];
-			pB[2+2*bs] = B[2+2*lda];
-			pB[3+2*bs] = B[2+3*lda];
-			pB[4+2*bs] = B[2+4*lda];
-			pB[5+2*bs] = B[2+5*lda];
-			pB[6+2*bs] = B[2+6*lda];
-			pB[7+2*bs] = B[2+7*lda];
-			// unroll 3
-			pB[0+3*bs] = B[3+0*lda];
-			pB[1+3*bs] = B[3+1*lda];
-			pB[2+3*bs] = B[3+2*lda];
-			pB[3+3*bs] = B[3+3*lda];
-			pB[4+3*bs] = B[3+4*lda];
-			pB[5+3*bs] = B[3+5*lda];
-			pB[6+3*bs] = B[3+6*lda];
-			pB[7+3*bs] = B[3+7*lda];
-			B  += 4;
-			pB += 4*bs;
-			}
-		for(; j<m; j++)
-			{
-			// unroll 0
-			pB[0+0*bs] = B[0+0*lda];
-			pB[1+0*bs] = B[0+1*lda];
-			pB[2+0*bs] = B[0+2*lda];
-			pB[3+0*bs] = B[0+3*lda];
-			pB[4+0*bs] = B[0+4*lda];
-			pB[5+0*bs] = B[0+5*lda];
-			pB[6+0*bs] = B[0+6*lda];
-			pB[7+0*bs] = B[0+7*lda];
-			B  += 1;
-			pB += 1*bs;
-			}
-		}
-	if(ii<m1)
-		{
-		m2 = m1-ii;
-		if(bs<m2) m2 = bs;
-		for(j=0; j<m; j++)
-			{
-			for(i=0; i<m2; i++)
-				{
-				pA[i+j*bs+ii*sda] = A[j+(i+ii)*lda];
-				}
-			}
-		}
-	return;
-	}
-
-
-
-// convert a vector into a vector structure
-void s_cvt_vec2strvec(int m, float *a, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		pa[ii] = a[ii];
-	return;
-	}
-
-
-
-// convert a matrix structure into a matrix
-void s_cvt_strmat2mat(int m, int n, struct s_strmat *sA, int ai, int aj, float *A, int lda)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, jj;
-	int m0 = (bs-ai%bs)%bs;
-	float *ptr_pA;
-	jj=0;
-	for(; jj<n-3; jj+=4)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				// unroll 0
-				A[ii+lda*(jj+0)] = ptr_pA[0+bs*0];
-				// unroll 1
-				A[ii+lda*(jj+1)] = ptr_pA[0+bs*1];
-				// unroll 2
-				A[ii+lda*(jj+2)] = ptr_pA[0+bs*2];
-				// unroll 3
-				A[ii+lda*(jj+3)] = ptr_pA[0+bs*3];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		// TODO update A !!!!!
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			// unroll 0
-			A[0+ii+lda*(jj+0)] = ptr_pA[0+bs*0];
-			A[1+ii+lda*(jj+0)] = ptr_pA[1+bs*0];
-			A[2+ii+lda*(jj+0)] = ptr_pA[2+bs*0];
-			A[3+ii+lda*(jj+0)] = ptr_pA[3+bs*0];
-			A[4+ii+lda*(jj+0)] = ptr_pA[4+bs*0];
-			A[5+ii+lda*(jj+0)] = ptr_pA[5+bs*0];
-			A[6+ii+lda*(jj+0)] = ptr_pA[6+bs*0];
-			A[7+ii+lda*(jj+0)] = ptr_pA[7+bs*0];
-			// unroll 0
-			A[0+ii+lda*(jj+1)] = ptr_pA[0+bs*1];
-			A[1+ii+lda*(jj+1)] = ptr_pA[1+bs*1];
-			A[2+ii+lda*(jj+1)] = ptr_pA[2+bs*1];
-			A[3+ii+lda*(jj+1)] = ptr_pA[3+bs*1];
-			A[4+ii+lda*(jj+1)] = ptr_pA[4+bs*1];
-			A[5+ii+lda*(jj+1)] = ptr_pA[5+bs*1];
-			A[6+ii+lda*(jj+1)] = ptr_pA[6+bs*1];
-			A[7+ii+lda*(jj+1)] = ptr_pA[7+bs*1];
-			// unroll 0
-			A[0+ii+lda*(jj+2)] = ptr_pA[0+bs*2];
-			A[1+ii+lda*(jj+2)] = ptr_pA[1+bs*2];
-			A[2+ii+lda*(jj+2)] = ptr_pA[2+bs*2];
-			A[3+ii+lda*(jj+2)] = ptr_pA[3+bs*2];
-			A[4+ii+lda*(jj+2)] = ptr_pA[4+bs*2];
-			A[5+ii+lda*(jj+2)] = ptr_pA[5+bs*2];
-			A[6+ii+lda*(jj+2)] = ptr_pA[6+bs*2];
-			A[7+ii+lda*(jj+2)] = ptr_pA[7+bs*2];
-			// unroll 0
-			A[0+ii+lda*(jj+3)] = ptr_pA[0+bs*3];
-			A[1+ii+lda*(jj+3)] = ptr_pA[1+bs*3];
-			A[2+ii+lda*(jj+3)] = ptr_pA[2+bs*3];
-			A[3+ii+lda*(jj+3)] = ptr_pA[3+bs*3];
-			A[4+ii+lda*(jj+3)] = ptr_pA[4+bs*3];
-			A[5+ii+lda*(jj+3)] = ptr_pA[5+bs*3];
-			A[6+ii+lda*(jj+3)] = ptr_pA[6+bs*3];
-			A[7+ii+lda*(jj+3)] = ptr_pA[7+bs*3];
-			ptr_pA += sda*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			// unroll 0
-			A[ii+lda*(jj+0)] = ptr_pA[0+bs*0];
-			// unroll 1
-			A[ii+lda*(jj+1)] = ptr_pA[0+bs*1];
-			// unroll 2
-			A[ii+lda*(jj+2)] = ptr_pA[0+bs*2];
-			// unroll 3
-			A[ii+lda*(jj+3)] = ptr_pA[0+bs*3];
-			ptr_pA++;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				A[ii+lda*jj] = ptr_pA[0];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			A[0+ii+lda*(jj+0)] = ptr_pA[0+bs*0];
-			A[1+ii+lda*(jj+0)] = ptr_pA[1+bs*0];
-			A[2+ii+lda*(jj+0)] = ptr_pA[2+bs*0];
-			A[3+ii+lda*(jj+0)] = ptr_pA[3+bs*0];
-			A[4+ii+lda*(jj+0)] = ptr_pA[4+bs*0];
-			A[5+ii+lda*(jj+0)] = ptr_pA[5+bs*0];
-			A[6+ii+lda*(jj+0)] = ptr_pA[6+bs*0];
-			A[7+ii+lda*(jj+0)] = ptr_pA[7+bs*0];
-			ptr_pA += sda*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			A[ii+lda*jj] = ptr_pA[0];
-			ptr_pA++;
-			}
-		}
-	return;
-	}
-
-
-
-// convert and transpose a matrix structure into a matrix
-void s_cvt_tran_strmat2mat(int m, int n, struct s_strmat *sA, int ai, int aj, float *A, int lda)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	int i, ii, jj;
-	int m0 = (bs-ai%bs)%bs;
-	float *ptr_pA;
-	jj=0;
-	for(; jj<n-3; jj+=4)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				// unroll 0
-				A[jj+0+lda*ii] = ptr_pA[0+bs*0];
-				// unroll 1
-				A[jj+1+lda*ii] = ptr_pA[0+bs*1];
-				// unroll 2
-				A[jj+2+lda*ii] = ptr_pA[0+bs*2];
-				// unroll 3
-				A[jj+3+lda*ii] = ptr_pA[0+bs*3];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		// TODO update A !!!!!
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			// unroll 0
-			A[jj+0+lda*(ii+0)] = ptr_pA[0+bs*0];
-			A[jj+0+lda*(ii+1)] = ptr_pA[1+bs*0];
-			A[jj+0+lda*(ii+2)] = ptr_pA[2+bs*0];
-			A[jj+0+lda*(ii+3)] = ptr_pA[3+bs*0];
-			A[jj+0+lda*(ii+4)] = ptr_pA[4+bs*0];
-			A[jj+0+lda*(ii+5)] = ptr_pA[5+bs*0];
-			A[jj+0+lda*(ii+6)] = ptr_pA[6+bs*0];
-			A[jj+0+lda*(ii+7)] = ptr_pA[7+bs*0];
-			// unroll 1
-			A[jj+1+lda*(ii+0)] = ptr_pA[0+bs*1];
-			A[jj+1+lda*(ii+1)] = ptr_pA[1+bs*1];
-			A[jj+1+lda*(ii+2)] = ptr_pA[2+bs*1];
-			A[jj+1+lda*(ii+3)] = ptr_pA[3+bs*1];
-			A[jj+1+lda*(ii+4)] = ptr_pA[4+bs*1];
-			A[jj+1+lda*(ii+5)] = ptr_pA[5+bs*1];
-			A[jj+1+lda*(ii+6)] = ptr_pA[6+bs*1];
-			A[jj+1+lda*(ii+7)] = ptr_pA[7+bs*1];
-			// unroll 2
-			A[jj+2+lda*(ii+0)] = ptr_pA[0+bs*2];
-			A[jj+2+lda*(ii+1)] = ptr_pA[1+bs*2];
-			A[jj+2+lda*(ii+2)] = ptr_pA[2+bs*2];
-			A[jj+2+lda*(ii+3)] = ptr_pA[3+bs*2];
-			A[jj+2+lda*(ii+4)] = ptr_pA[4+bs*2];
-			A[jj+2+lda*(ii+5)] = ptr_pA[5+bs*2];
-			A[jj+2+lda*(ii+6)] = ptr_pA[6+bs*2];
-			A[jj+2+lda*(ii+7)] = ptr_pA[7+bs*2];
-			// unroll 3
-			A[jj+3+lda*(ii+0)] = ptr_pA[0+bs*3];
-			A[jj+3+lda*(ii+1)] = ptr_pA[1+bs*3];
-			A[jj+3+lda*(ii+2)] = ptr_pA[2+bs*3];
-			A[jj+3+lda*(ii+3)] = ptr_pA[3+bs*3];
-			A[jj+3+lda*(ii+4)] = ptr_pA[4+bs*3];
-			A[jj+3+lda*(ii+5)] = ptr_pA[5+bs*3];
-			A[jj+3+lda*(ii+6)] = ptr_pA[6+bs*3];
-			A[jj+3+lda*(ii+7)] = ptr_pA[7+bs*3];
-			ptr_pA += sda*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			// unroll 0
-			A[jj+0+lda*ii] = ptr_pA[0+bs*0];
-			// unroll 1
-			A[jj+1+lda*ii] = ptr_pA[0+bs*1];
-			// unroll 2
-			A[jj+2+lda*ii] = ptr_pA[0+bs*2];
-			// unroll 3
-			A[jj+3+lda*ii] = ptr_pA[0+bs*3];
-			ptr_pA++;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ptr_pA = pA + jj*bs;
-		ii = 0;
-		if(m0>0)
-			{
-			for(; ii<m0; ii++)
-				{
-				A[jj+lda*ii] = ptr_pA[0];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m-bs+1; ii+=bs)
-			{
-			i=0;
-			// TODO update A !!!!!
-			// TODO unroll !!!!!!
-			for(; i<bs; i++)
-				{
-				A[jj+lda*(i+ii)] = ptr_pA[0];
-				ptr_pA++;
-				}
-			ptr_pA += (sda-1)*bs;
-			}
-		for(; ii<m; ii++)
-			{
-			A[jj+lda*ii] = ptr_pA[0];
-			ptr_pA++;
-			}
-		}
-	return;
-	}
-
-
-
-// convert a vector structure into a vector 
-void s_cvt_strvec2vec(int m, struct s_strvec *sa, int ai, float *a)
-	{
-	float *pa = sa->pa + ai;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		a[ii] = pa[ii];
-	return;
-	}
-
-
-
-// cast a matrix into a matrix structure
-void s_cast_mat2strmat(float *A, struct s_strmat *sA)
-	{
-	sA->pA = A;
-	return;
-	}
-
-
-
-// cast a matrix into the diagonal of a matrix structure
-void s_cast_diag_mat2strmat(float *dA, struct s_strmat *sA)
-	{
-	sA->dA = dA;
-	return;
-	}
-
-
-
-// cast a vector into a vector structure
-void s_cast_vec2vecmat(float *a, struct s_strvec *sa)
-	{
-	sa->pa = a;
-	return;
-	}
-
-
-
-// insert element into strmat
-void sgein1_libstr(float a, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	pA[0] = a;
-	return;
-	}
-
-
-
-// extract element from strmat
-float sgeex1_libstr(struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	return pA[0];
-	}
-
-
-
-// insert element into strvec
-void svecin1_libstr(float a, struct s_strvec *sx, int xi)
-	{
-	const int bs = 8;
-	float *x = sx->pa + xi;
-	x[0] = a;
-	return;
-	}
-
-
-
-// extract element from strvec
-float svecex1_libstr(struct s_strvec *sx, int xi)
-	{
-	const int bs = 8;
-	float *x = sx->pa + xi;
-	return x[0];
-	}
-
-
-
-// set all elements of a strmat to a value
-void sgese_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai%bs + ai/bs*bs*sda + aj*bs;
-	int m0 = m<(bs-ai%bs)%bs ? m : (bs-ai%bs)%bs;
-	int ii, jj;
-	if(m0>0)
-		{
-		for(ii=0; ii<m0; ii++)
-			{
-			for(jj=0; jj<n; jj++)
-				{
-				pA[jj*bs] = alpha;
-				}
-			pA += 1;
-			}
-		pA += bs*(sda-1);
-		m -= m0;
-		}
-	for(ii=0; ii<m-7; ii+=8)
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			pA[0+jj*bs] = alpha;
-			pA[1+jj*bs] = alpha;
-			pA[2+jj*bs] = alpha;
-			pA[3+jj*bs] = alpha;
-			pA[4+jj*bs] = alpha;
-			pA[5+jj*bs] = alpha;
-			pA[6+jj*bs] = alpha;
-			pA[7+jj*bs] = alpha;
-			}
-		pA += bs*sda;
-		}
-	for( ; ii<m; ii++)
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			pA[jj*bs] = alpha;
-			}
-		pA += 1;
-		}
-	return;
-	}
-
-
-
-// set all elements of a strvec to a value
-void svecse_libstr(int m, float alpha, struct s_strvec *sx, int xi)
-	{
-	float *x = sx->pa + xi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		x[ii] = alpha;
-	return;
-	}
-
-
-
-// extract diagonal to vector
-void sdiaex_libstr(int kmax, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	float *x = sx->pa + xi;
-	sdiaex_lib(kmax, alpha, ai%bs, pA, sda, x);
-	return;
-	}
-
-
-
-// insert a vector into diagonal
-void sdiain_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	float *x = sx->pa + xi;
-	sdiain_lib(kmax, alpha, x, ai%bs, pA, sda);
-	return;
-	}
-
-
-
-// swap two rows of a matrix struct
-void srowsw_libstr(int kmax, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	float *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	srowsw_lib(kmax, pA, pC);
-	return;
-	}
-
-
-
-// permute the rows of a matrix struct
-void srowpe_libstr(int kmax, int *ipiv, struct s_strmat *sA)
-	{
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		{
-		if(ipiv[ii]!=ii)
-			srowsw_libstr(sA->n, sA, ii, 0, sA, ipiv[ii], 0);
-		}
-	return;
-	}
-
-
-// extract a row int a vector
-void srowex_libstr(int kmax, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	float *x = sx->pa + xi;
-	srowex_lib(kmax, alpha, pA, x);
-	return;
-	}
-
-
-
-// insert a vector into a row
-void srowin_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	float *x = sx->pa + xi;
-	srowin_lib(kmax, alpha, x, pA);
-	return;
-	}
-
-
-
-// add a vector to a row
-void srowad_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	float *x = sx->pa + xi;
-	srowad_lib(kmax, alpha, x, pA);
-	return;
-	}
-
-
-
-// swap two cols of a matrix struct
-void scolsw_libstr(int kmax, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	float *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	scolsw_lib(kmax, ai%bs, pA, sda, ci%bs, pC, sdc);
-	return;
-	}
-
-
-
-// permute the cols of a matrix struct
-void scolpe_libstr(int kmax, int *ipiv, struct s_strmat *sA)
-	{
-	int ii;
-	for(ii=0; ii<kmax; ii++)
-		{
-		if(ipiv[ii]!=ii)
-			scolsw_libstr(sA->m, sA, 0, ii, sA, 0, ipiv[ii]);
-		}
-	return;
-	}
-
-
-
-// scale a generic strmat
-void sgesc_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj)
-	{
-
-	// early return
-	if(m==0 | n==0)
-		return;
-	
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** sgesc_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** sgesc_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** sgesc_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** sgesc_libstr : aj<0 : %d<0 *****\n", aj);
-	// inside matrix
-	// A: m x n
-	if(ai+m > sA->m) printf("\n***** sgesc_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** sgesc_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-#endif
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + aj*bs;
-	int offsetA = ai%bs;
-
-	int ii, mna;
-
-	if(offsetA>0)
-		{
-		mna = bs-offsetA;
-		mna = m<mna ? m : mna;
-		kernel_sgesc_8_gen_lib8(n, &alpha, &pA[offsetA], mna);
-		m -= mna;
-		pA += 8*sda;
-		}
-	ii = 0;
-	for( ; ii<m-7; ii+=8)
-		{
-		kernel_sgesc_8_lib8(n, &alpha, &pA[0]);
-		pA += 8*sda;
-		}
-	if(ii<m)
-		{
-		kernel_sgesc_8_gen_lib8(n, &alpha, &pA[0], m-ii);
-		}
-
-	return;
-
-	}
-
-
-
-// copy a generic strmat into a generic strmat
-void sgecp_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj)
-	{
-
-	// early return
-	if(m==0 | n==0)
-		return;
-	
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** sgecp_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** sgecp_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** sgecp_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** sgecp_libstr : aj<0 : %d<0 *****\n", aj);
-	if(bi<0) printf("\n****** sgecp_libstr : bi<0 : %d<0 *****\n", bi);
-	if(bj<0) printf("\n****** sgecp_libstr : bj<0 : %d<0 *****\n", bj);
-	// inside matrix
-	// A: m x n
-	if(ai+m > sA->m) printf("\n***** sgecp_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** sgecp_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// B: m x n
-	if(bi+m > sB->m) printf("\n***** sgecp_libstr : bi+m > row(B) : %d+%d > %d *****\n", bi, m, sB->m);
-	if(bj+n > sB->n) printf("\n***** sgecp_libstr : bj+n > col(B) : %d+%d > %d *****\n", bj, n, sB->n);
-#endif
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + aj*bs;
-	float *pB = sB->pA + bi/bs*bs*sdb + bj*bs;
-	int offsetA = ai%bs;
-	int offsetB = bi%bs;
-
-	int ii, mna;
-
-#if 1
-	if(offsetB>0)
-		{
-		if(offsetB>offsetA)
-			{
-			mna = bs-offsetB;
-			mna = m<mna ? m : mna;
-			kernel_sgecp_8_0_gen_lib8(n, &pA[offsetA], &pB[offsetB], mna);
-			m -= mna;
-			//pA += 8*sda;
-			pB += 8*sdb;
-			}
-		else
-			{
-			if(offsetA==0)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgecp_8_0_gen_lib8(n, &pA[0], &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==1)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgecp_8_1_gen_lib8(n, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==2)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgecp_8_2_gen_lib8(n, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==3)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgecp_8_3_gen_lib8(n, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==4)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgecp_8_4_gen_lib8(n, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==5)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgecp_8_5_gen_lib8(n, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==6)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgecp_8_6_gen_lib8(n, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==7)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgecp_8_7_gen_lib8(n, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			}
-		}
-#endif
-
-	// same alignment
-	if(offsetA==offsetB)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgecp_8_0_lib8(n, pA, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgecp_8_0_gen_lib8(n, pA, pB, m-ii);
-			}
-		return;
-		}
-	// XXX different alignment: search tree ???
-	// skip one element of A
-	else if(offsetA==(offsetB+1)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgecp_8_1_lib8(n, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgecp_8_1_gen_lib8(n, pA, sda, pB, m-ii);
-			}
-		}
-	// skip two elements of A
-	else if(offsetA==(offsetB+2)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgecp_8_2_lib8(n, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgecp_8_2_gen_lib8(n, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	// skip three elements of A
-	else if(offsetA==(offsetB+3)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgecp_8_3_lib8(n, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgecp_8_3_gen_lib8(n, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	// skip four elements of A
-	else if(offsetA==(offsetB+4)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgecp_8_4_lib8(n, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgecp_8_4_gen_lib8(n, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	// skip five elements of A
-	else if(offsetA==(offsetB+5)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgecp_8_5_lib8(n, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgecp_8_5_gen_lib8(n, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	// skip six elements of A
-	else if(offsetA==(offsetB+6)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgecp_8_6_lib8(n, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgecp_8_6_gen_lib8(n, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	// skip seven elements of A
-	else //if(offsetA==(offsetB+7)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgecp_8_7_lib8(n, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgecp_8_7_gen_lib8(n, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	
-	return;
-
-	}
-
-
-
-// scale a strvec
-void svecsc_libstr(int m, float alpha, struct s_strvec *sa, int ai)
-	{
-	float *pa = sa->pa + ai;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pa[ii+0] *= alpha;
-		pa[ii+1] *= alpha;
-		pa[ii+2] *= alpha;
-		pa[ii+3] *= alpha;
-		}
-	for(; ii<m; ii++)
-		{
-		pa[ii+0] *= alpha;
-		}
-	return;
-	}
-
-
-
-// copy a strvec into a strvec
-void sveccp_libstr(int m, struct s_strvec *sa, int ai, struct s_strvec *sc, int ci)
-	{
-	float *pa = sa->pa + ai;
-	float *pc = sc->pa + ci;
-	int ii;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		pc[ii+0] = pa[ii+0];
-		pc[ii+1] = pa[ii+1];
-		pc[ii+2] = pa[ii+2];
-		pc[ii+3] = pa[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		pc[ii+0] = pa[ii+0];
-		}
-	return;
-	}
-
-
-
-// copy a lower triangular strmat into a lower triangular strmat
-void strcp_l_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	float *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	strcp_l_lib(m, ai%bs, pA, sda, ci%bs, pC, sdc);
-	// XXX uses full matrix copy !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-//	sgecp_libstr(m, m, sA, ai, aj, sC, ci, cj);
-	return;
-	}
-
-
-
-// scale and add a generic strmat into a generic strmat
-void sgead_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj)
-	{
-
-	// early return
-	if(m==0 | n==0)
-		return;
-	
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** sgead_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** sgead_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** sgead_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** sgead_libstr : aj<0 : %d<0 *****\n", aj);
-	if(bi<0) printf("\n****** sgead_libstr : bi<0 : %d<0 *****\n", bi);
-	if(bj<0) printf("\n****** sgead_libstr : bj<0 : %d<0 *****\n", bj);
-	// inside matrix
-	// A: m x n
-	if(ai+m > sA->m) printf("\n***** sgead_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** sgead_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// B: m x n
-	if(bi+m > sB->m) printf("\n***** sgead_libstr : bi+m > row(B) : %d+%d > %d *****\n", bi, m, sB->m);
-	if(bj+n > sB->n) printf("\n***** sgead_libstr : bj+n > col(B) : %d+%d > %d *****\n", bj, n, sB->n);
-#endif
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + aj*bs;
-	float *pB = sB->pA + bi/bs*bs*sdb + bj*bs;
-	int offsetA = ai%bs;
-	int offsetB = bi%bs;
-
-	int ii, mna;
-
-#if 1
-	if(offsetB>0)
-		{
-		if(offsetB>offsetA)
-			{
-			mna = bs-offsetB;
-			mna = m<mna ? m : mna;
-			kernel_sgead_8_0_gen_lib8(n, &alpha, &pA[offsetA], &pB[offsetB], mna);
-			m -= mna;
-			//pA += 8*sda;
-			pB += 8*sdb;
-			}
-		else
-			{
-			if(offsetA==0)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgead_8_0_gen_lib8(n, &alpha, &pA[0], &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==1)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgead_8_1_gen_lib8(n, &alpha, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==2)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgead_8_2_gen_lib8(n, &alpha, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==3)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgead_8_3_gen_lib8(n, &alpha, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==4)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgead_8_4_gen_lib8(n, &alpha, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==5)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgead_8_5_gen_lib8(n, &alpha, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==6)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgead_8_6_gen_lib8(n, &alpha, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			else if(offsetA==7)
-				{
-				mna = bs-offsetB;
-				mna = m<mna ? m : mna;
-				kernel_sgead_8_7_gen_lib8(n, &alpha, &pA[0], sda, &pB[offsetB], mna);
-				m -= mna;
-				pA += 8*sda;
-				pB += 8*sdb;
-				}
-			}
-		}
-#endif
-
-	// same alignment
-	if(offsetA==offsetB)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgead_8_0_lib8(n, &alpha, pA, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgead_8_0_gen_lib8(n, &alpha, pA, pB, m-ii);
-			}
-		return;
-		}
-	// XXX different alignment: search tree ???
-	// skip one element of A
-	else if(offsetA==(offsetB+1)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgead_8_1_lib8(n, &alpha, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgead_8_1_gen_lib8(n, &alpha, pA, sda, pB, m-ii);
-			}
-		}
-	// skip two elements of A
-	else if(offsetA==(offsetB+2)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgead_8_2_lib8(n, &alpha, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgead_8_2_gen_lib8(n, &alpha, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	// skip three elements of A
-	else if(offsetA==(offsetB+3)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgead_8_3_lib8(n, &alpha, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgead_8_3_gen_lib8(n, &alpha, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	// skip four elements of A
-	else if(offsetA==(offsetB+4)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgead_8_4_lib8(n, &alpha, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgead_8_4_gen_lib8(n, &alpha, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	// skip five elements of A
-	else if(offsetA==(offsetB+5)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgead_8_5_lib8(n, &alpha, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgead_8_5_gen_lib8(n, &alpha, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	// skip six elements of A
-	else if(offsetA==(offsetB+6)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgead_8_6_lib8(n, &alpha, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgead_8_6_gen_lib8(n, &alpha, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	// skip seven elements of A
-	else //if(offsetA==(offsetB+7)%bs)
-		{
-		ii = 0;
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_sgead_8_7_lib8(n, &alpha, pA, sda, pB);
-			pA += 8*sda;
-			pB += 8*sdb;
-			}
-		if(ii<m)
-			{
-			kernel_sgead_8_7_gen_lib8(n, &alpha, pA, sda, pB, m-ii);
-			}
-		return;
-		}
-	
-	return;
-
-	}
-
-
-
-// copy and transpose a generic strmat into a generic strmat
-void sgetr_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj)
-	{
-
-	// early return
-	if(m==0 | n==0)
-		return;
-	
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** sgetr_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** sgetr_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** sgetr_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** sgetr_libstr : aj<0 : %d<0 *****\n", aj);
-	if(bi<0) printf("\n****** sgetr_libstr : bi<0 : %d<0 *****\n", bi);
-	if(bj<0) printf("\n****** sgetr_libstr : bj<0 : %d<0 *****\n", bj);
-	// inside matrix
-	// A: m x n
-	if(ai+m > sA->m) printf("\n***** sgetr_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** sgetr_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// B: n x m
-	if(bi+n > sB->m) printf("\n***** sgetr_libstr : bi+n > row(B) : %d+%d > %d *****\n", bi, n, sB->m);
-	if(bj+m > sB->n) printf("\n***** sgetr_libstr : bj+m > col(B) : %d+%d > %d *****\n", bj, m, sB->n);
-#endif
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + aj*bs;
-	float *pB = sB->pA + bi/bs*bs*sdb + bj*bs;
-	int offsetA = ai%bs;
-	int offsetB = bi%bs;
-
-	int ii, nna;
-
-	if(offsetA==0)
-		{
-		if(offsetB>0)
-			{
-			nna = bs-offsetB;
-			nna = n<nna ? n : nna;
-			kernel_sgetr_8_0_gen_lib8(m, &pA[0], sda, &pB[offsetB], nna);
-			n -= nna;
-			pA += nna*bs;
-			pB += 8*sdb;
-			}
-		for(ii=0; ii<n-7; ii+=8)
-			{
-			kernel_sgetr_8_0_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb]);
-			}
-		if(ii<n)
-			{
-			kernel_sgetr_8_0_gen_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb], n-ii);
-			}
-		}
-	// TODO log serach for offsetA>0 ???
-	else if(offsetA==1)
-		{
-		if(offsetB>0)
-			{
-			nna = bs-offsetB;
-			nna = n<nna ? n : nna;
-			kernel_sgetr_8_1_gen_lib8(m, &pA[0], sda, &pB[offsetB], nna);
-			n -= nna;
-			pA += nna*bs;
-			pB += 8*sdb;
-			}
-		for(ii=0; ii<n-7; ii+=8)
-			{
-			kernel_sgetr_8_1_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb]);
-			}
-		if(ii<n)
-			{
-			kernel_sgetr_8_1_gen_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb], n-ii);
-			}
-		}
-	else if(offsetA==2)
-		{
-		ii = 0;
-		if(offsetB>0)
-			{
-			nna = bs-offsetB;
-			nna = n<nna ? n : nna;
-			kernel_sgetr_8_2_gen_lib8(m, &pA[0], sda, &pB[offsetB], nna);
-			n -= nna;
-			pA += nna*bs;
-			pB += 8*sdb;
-			}
-		for( ; ii<n-7; ii+=8)
-			{
-			kernel_sgetr_8_2_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb]);
-			}
-		if(ii<n)
-			{
-			kernel_sgetr_8_2_gen_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb], n-ii);
-			}
-		}
-	else if(offsetA==3)
-		{
-		ii = 0;
-		if(offsetB>0)
-			{
-			nna = bs-offsetB;
-			nna = n<nna ? n : nna;
-			kernel_sgetr_8_3_gen_lib8(m, &pA[0], sda, &pB[offsetB], nna);
-			n -= nna;
-			pA += nna*bs;
-			pB += 8*sdb;
-			}
-		for( ; ii<n-7; ii+=8)
-			{
-			kernel_sgetr_8_3_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb]);
-			}
-		if(ii<n)
-			{
-			kernel_sgetr_8_3_gen_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb], n-ii);
-			}
-		}
-	else if(offsetA==4)
-		{
-		ii = 0;
-		if(offsetB>0)
-			{
-			nna = bs-offsetB;
-			nna = n<nna ? n : nna;
-			kernel_sgetr_8_4_gen_lib8(m, &pA[0], sda, &pB[offsetB], nna);
-			n -= nna;
-			pA += nna*bs;
-			pB += 8*sdb;
-			}
-		for( ; ii<n-7; ii+=8)
-			{
-			kernel_sgetr_8_4_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb]);
-			}
-		if(ii<n)
-			{
-			kernel_sgetr_8_4_gen_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb], n-ii);
-			}
-		}
-	else if(offsetA==5)
-		{
-		ii = 0;
-		if(offsetB>0)
-			{
-			nna = bs-offsetB;
-			nna = n<nna ? n : nna;
-			kernel_sgetr_8_5_gen_lib8(m, &pA[0], sda, &pB[offsetB], nna);
-			n -= nna;
-			pA += nna*bs;
-			pB += 8*sdb;
-			}
-		for( ; ii<n-7; ii+=8)
-			{
-			kernel_sgetr_8_5_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb]);
-			}
-		if(ii<n)
-			{
-			kernel_sgetr_8_5_gen_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb], n-ii);
-			}
-		}
-	else if(offsetA==6)
-		{
-		ii = 0;
-		if(offsetB>0)
-			{
-			nna = bs-offsetB;
-			nna = n<nna ? n : nna;
-			kernel_sgetr_8_6_gen_lib8(m, &pA[0], sda, &pB[offsetB], nna);
-			n -= nna;
-			pA += nna*bs;
-			pB += 8*sdb;
-			}
-		for( ; ii<n-7; ii+=8)
-			{
-			kernel_sgetr_8_6_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb]);
-			}
-		if(ii<n)
-			{
-			kernel_sgetr_8_6_gen_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb], n-ii);
-			}
-		}
-	else if(offsetA==7)
-		{
-		ii = 0;
-		if(offsetB>0)
-			{
-			nna = bs-offsetB;
-			nna = n<nna ? n : nna;
-			kernel_sgetr_8_7_gen_lib8(m, &pA[0], sda, &pB[offsetB], nna);
-			n -= nna;
-			pA += nna*bs;
-			pB += 8*sdb;
-			}
-		for( ; ii<n-7; ii+=8)
-			{
-			kernel_sgetr_8_7_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb]);
-			}
-		if(ii<n)
-			{
-			kernel_sgetr_8_7_gen_lib8(m, &pA[ii*bs], sda, &pB[ii*sdb], n-ii);
-			}
-		}
-
-	return;
-
-	}
-
-
-
-// copy and transpose a lower triangular strmat into an upper triangular strmat
-void strtr_l_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	float *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	strtr_l_lib(m, 1.0, ai%bs, pA, sda, ci%bs, pC, sdc); // TODO remove alpha !!!
-	return;
-	}
-
-
-
-// copy and transpose an upper triangular strmat into a lower triangular strmat
-void strtr_u_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj)
-	{
-	const int bs = 8;
-	int sda = sA->cn;
-	float *pA = sA->pA + ai/bs*bs*sda + ai%bs + aj*bs;
-	int sdc = sC->cn;
-	float *pC = sC->pA + ci/bs*bs*sdc + ci%bs + cj*bs;
-	strtr_u_lib(m, 1.0, ai%bs, pA, sda, ci%bs, pC, sdc); // TODO remove alpha !!!
-	return;
-	}
-
-
-
-// insert a strvec to diagonal of strmat, sparse formulation 
-void sdiain_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	const int bs = 8;
-	float *x = sx->pa + xi;
-	int sdd = sD->cn;
-	float *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs] = alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// extract the diagonal of a strmat to a strvec, sparse formulation 
-void sdiaex_sp_libstr(int kmax, float alpha, int *idx, struct s_strmat *sD, int di, int dj, struct s_strvec *sx, int xi)
-	{
-	const int bs = 8;
-	float *x = sx->pa + xi;
-	int sdd = sD->cn;
-	float *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		x[jj] = alpha * pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to diagonal of strmat, sparse formulation 
-void sdiaad_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	const int bs = 8;
-	float *x = sx->pa + xi;
-	int sdd = sD->cn;
-	float *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs] += alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to another strvec and insert to diagonal of strmat, sparse formulation 
-void sdiaadin_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	const int bs = 8;
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	int sdd = sD->cn;
-	float *pD = sD->pA;
-	int ii, jj;
-	for(jj=0; jj<kmax; jj++)
-		{
-		ii = idx[jj];
-		pD[(ii+di)/bs*bs*sdd+(ii+di)%bs+(ii+dj)*bs] = y[jj] + alpha * x[jj];
-		}
-	return;
-	}
-
-
-
-// add scaled strvec to row of strmat, sparse formulation 
-void srowad_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj)
-	{
-	const int bs = 8;
-	float *x = sx->pa + xi;
-	int sdd = sD->cn;
-	float *pD = sD->pA + di/bs*bs*sdd + di%bs + dj*bs;
-	srowad_libsp(kmax, idx, alpha, x, pD);
-	return;
-	}
-
-
-
-// adds strvec to strvec, sparse formulation
-void svecad_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strvec *sy, int yi)
-	{
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	svecad_libsp(kmax, idx, alpha, x, y);
-	return;
-	}
-
-
-
-void svecin_sp_libstr(int m, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[idx[ii]] = alpha * x[ii];
-	return;
-	}
-
-
-
-void svecex_sp_libstr(int m, float alpha, int *idx, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-	int ii;
-	for(ii=0; ii<m; ii++)
-		z[ii] = alpha * x[idx[ii]];
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
-
diff --git a/third_party/blasfeo/auxiliary/v_aux_ext_dep_lib.c b/third_party/blasfeo/auxiliary/v_aux_ext_dep_lib.c
deleted file mode 100644
index 3bf5f90..0000000
--- a/third_party/blasfeo/auxiliary/v_aux_ext_dep_lib.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#if 0
-#include <malloc.h>
-#endif
-
-
-
-/* creates a zero matrix given the size in bytes */
-void v_zeros(void **ptrA, int size)
-	{
-	*ptrA = (void *) malloc(size);
-	char *A = *ptrA;
-	int i;
-	for(i=0; i<size; i++) A[i] = 0;
-	}
-
-
-
-/* creates a zero matrix aligned to a cache line given the size in bytes */
-void v_zeros_align(void **ptrA, int size)
-	{
-#if defined(OS_WINDOWS)
-	*ptrA = _aligned_malloc( size, 64 );
-#else
-	int err = posix_memalign(ptrA, 64, size);
-	if(err!=0)
-		{
-		printf("Memory allocation error");
-		exit(1);
-		}
-#endif
-	char *A = *ptrA;
-	int i;
-	for(i=0; i<size; i++) A[i] = 0;
-	}
-
-
-
-/* frees matrix */
-void v_free(void *pA)
-	{
-	free( pA );
-	}
-
-
-
-/* frees aligned matrix */
-void v_free_align(void *pA)
-	{
-#if defined(OS_WINDOWS)
-	_aligned_free( pA );
-#else
-	free( pA );
-#endif
-	}
-
-
-
-/* creates a zero matrix given the size in bytes */
-void c_zeros(char **ptrA, int size)
-	{
-	*ptrA = malloc(size);
-	char *A = *ptrA;
-	int i;
-	for(i=0; i<size; i++) A[i] = 0;
-	}
-
-
-
-/* creates a zero matrix aligned to a cache line given the size in bytes */
-void c_zeros_align(char **ptrA, int size)
-	{
-#if defined(OS_WINDOWS)
-	*ptrA = _aligned_malloc( size, 64 );
-#else
-	void *temp;
-	int err = posix_memalign(&temp, 64, size);
-	if(err!=0)
-		{
-		printf("Memory allocation error");
-		exit(1);
-		}
-	*ptrA = temp;
-#endif
-	char *A = *ptrA;
-	int i;
-	for(i=0; i<size; i++) A[i] = 0;
-	}
-
-
-
-/* frees matrix */
-void c_free(char *pA)
-	{
-	free( pA );
-	}
-
-
-
-/* frees aligned matrix */
-void c_free_align(char *pA)
-	{
-#if defined(OS_WINDOWS)
-	_aligned_free( pA );
-#else
-	free( pA );
-#endif
-	}
-
diff --git a/third_party/blasfeo/blas/Makefile b/third_party/blasfeo/blas/Makefile
deleted file mode 100644
index 304b448..0000000
--- a/third_party/blasfeo/blas/Makefile
+++ /dev/null
@@ -1,88 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../Makefile.rule
-
-OBJS =
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-OBJS += d_blas1_lib4.o d_blas2_lib4.o d_blas2_diag_lib.o d_blas3_lib4.o d_blas3_diag_lib4.o d_lapack_lib4.o
-OBJS += s_blas1_lib8.o s_blas2_lib8.o s_blas2_diag_lib.o s_blas3_lib8.o s_blas3_diag_lib8.o s_lapack_lib8.o
-endif
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-OBJS += d_blas1_lib4.o d_blas2_lib4.o d_blas2_diag_lib.o d_blas3_lib4.o d_blas3_diag_lib4.o d_lapack_lib4.o
-OBJS += s_blas1_lib8.o s_blas2_lib8.o s_blas2_diag_lib.o s_blas3_lib8.o s_blas3_diag_lib8.o s_lapack_lib8.o
-endif
-ifeq ($(TARGET), X64_INTEL_CORE)
-OBJS += d_blas1_lib4.o d_blas2_lib4.o d_blas2_diag_lib.o d_blas3_lib4.o d_blas3_diag_lib4.o d_lapack_lib4.o
-OBJS += s_blas1_lib4.o s_blas2_lib4.o s_blas2_diag_lib.o s_blas3_lib4.o s_blas3_diag_lib4.o s_lapack_lib4.o
-endif
-ifeq ($(TARGET), X64_AMD_BULLDOZER)
-OBJS += d_blas1_lib4.o d_blas2_lib4.o d_blas2_diag_lib.o d_blas3_lib4.o d_blas3_diag_lib4.o d_lapack_lib4.o
-OBJS += s_blas1_lib4.o s_blas2_lib4.o s_blas2_diag_lib.o s_blas3_lib4.o s_blas3_diag_lib4.o s_lapack_lib4.o
-endif
-ifeq ($(TARGET), ARMV8A_ARM_CORTEX_A57)
-OBJS += d_blas1_lib4.o d_blas2_lib4.o d_blas2_diag_lib.o d_blas3_lib4.o d_blas3_diag_lib4.o d_lapack_lib4.o
-OBJS += s_blas1_lib4.o s_blas2_lib4.o s_blas2_diag_lib.o s_blas3_lib4.o s_blas3_diag_lib4.o s_lapack_lib4.o
-endif
-ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
-OBJS += d_blas1_lib4.o d_blas2_lib4.o d_blas2_diag_lib.o d_blas3_lib4.o d_blas3_diag_lib4.o d_lapack_lib4.o
-OBJS += s_blas1_lib4.o s_blas2_lib4.o s_blas2_diag_lib.o s_blas3_lib4.o s_blas3_diag_lib4.o s_lapack_lib4.o
-endif
-ifeq ($(TARGET), GENERIC)
-OBJS += d_blas1_lib4.o d_blas2_lib4.o d_blas2_diag_lib.o d_blas3_lib4.o d_blas3_diag_lib4.o d_lapack_lib4.o
-OBJS += s_blas1_lib4.o s_blas2_lib4.o s_blas2_diag_lib.o s_blas3_lib4.o s_blas3_diag_lib4.o s_lapack_lib4.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-OBJS += d_blas1_lib.o d_blas2_lib.o d_blas2_diag_lib.o d_blas3_lib.o d_blas3_diag_lib.o d_lapack_lib.o
-OBJS += s_blas1_lib.o s_blas2_lib.o s_blas2_diag_lib.o s_blas3_lib.o s_blas3_diag_lib.o s_lapack_lib.o
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
-	rm -f *.s
-
-d_blas1_lib.o: d_blas1_lib.c x_blas1_lib.c
-s_blas1_lib.o: s_blas1_lib.c x_blas1_lib.c
-d_blas2_lib.o: d_blas2_lib.c x_blas2_lib.c
-s_blas2_lib.o: s_blas2_lib.c x_blas2_lib.c
-d_blas2_diag_lib.o: d_blas2_diag_lib.c x_blas2_diag_lib.c
-s_blas2_diag_lib.o: s_blas2_diag_lib.c x_blas2_diag_lib.c
-d_blas3_lib.o: d_blas3_lib.c x_blas3_lib.c
-s_blas3_lib.o: s_blas3_lib.c x_blas3_lib.c
-d_blas3_diag_lib.o: d_blas3_diag_lib.c x_blas3_diag_lib.c
-s_blas3_diag_lib.o: s_blas3_diag_lib.c x_blas3_diag_lib.c
-d_lapack_lib.o: d_lapack_lib.c x_lapack_lib.c
-s_lapack_lib.o: s_lapack_lib.c x_lapack_lib.c
diff --git a/third_party/blasfeo/blas/d_blas.h b/third_party/blasfeo/blas/d_blas.h
deleted file mode 100644
index fc5058b..0000000
--- a/third_party/blasfeo/blas/d_blas.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-// headers to reference BLAS and LAPACK routines employed in BLASFEO WR
-
-// level 1
-void dcopy_(int *m, double *x, int *incx, double *y, int *incy);
-void daxpy_(int *m, double *alpha, double *x, int *incx, double *y, int *incy);
-void dscal_(int *m, double *alpha, double *x, int *incx);
-
-// level 2
-void dgemv_(char *ta, int *m, int *n, double *alpha, double *A, int *lda, double *x, int *incx, double *beta, double *y, int *incy);
-void dsymv_(char *uplo, int *m, double *alpha, double *A, int *lda, double *x, int *incx, double *beta, double *y, int *incy);
-void dtrmv_(char *uplo, char *trans, char *diag, int *n, double *A, int *lda, double *x, int *incx);
-void dtrsv_(char *uplo, char *trans, char *diag, int *n, double *A, int *lda, double *x, int *incx);
-void dger_(int *m, int *n, double *alpha, double *x, int *incx, double *y, int *incy, double *A, int *lda);
-
-// level 3
-void dgemm_(char *ta, char *tb, int *m, int *n, int *k, double *alpha, double *A, int *lda, double *B, int *ldb, double *beta, double *C, int *ldc);
-void dsyrk_(char *uplo, char *trans, int *n, int *k, double *alpha, double *A, int *lda, double *beta, double *C, int *ldc);
-void dtrmm_(char *side, char *uplo, char *trans, char *diag, int *m, int *n, double *alpha, double *A, int *lda, double *B, int *ldb);
-void dtrsm_(char *side, char *uplo, char *trans, char *diag, int *m, int *n, double *alpha, double *A, int *lda, double *B, int *ldb);
-
-// lapack
-int dpotrf_(char *uplo, int *m, double *A, int *lda, int *info);
-int dgetrf_(int *m, int *n, double *A, int *lda, int *ipiv, int *info);
-void dgeqrf_(int *m, int *n, double *A, int *lda, double *tau, double *work, int *lwork, int *info);
-void dgeqr2_(int *m, int *n, double *A, int *lda, double *tau, double *work, int *info);
-void dgelqf_(int *m, int *n, double *A, int *lda, double *tau, double *work, int *lwork, int *info);
-
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/blas/d_blas1_lib.c b/third_party/blasfeo/blas/d_blas1_lib.c
deleted file mode 100644
index 1fd19d3..0000000
--- a/third_party/blasfeo/blas/d_blas1_lib.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#if defined(LA_BLAS)
-#include "d_blas.h"
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_kernel.h"
-
-
-
-#define REAL double
-
-#define STRMAT d_strmat
-#define STRVEC d_strvec
-
-#define AXPY_LIBSTR daxpy_libstr
-#define VECMULDOT_LIBSTR dvecmuldot_libstr
-#define DOT_LIBSTR ddot_libstr
-
-#define AXPY daxpy_
-#define COPY dcopy_
-
-
-#include "x_blas1_lib.c"
diff --git a/third_party/blasfeo/blas/d_blas1_lib4.c b/third_party/blasfeo/blas/d_blas1_lib4.c
deleted file mode 100644
index a4155a9..0000000
--- a/third_party/blasfeo/blas/d_blas1_lib4.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_kernel.h"
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-void daxpy_libstr(int m, double alpha, struct d_strvec *sx, int xi, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	double *x = sx->pa + xi;
-	double *y = sy->pa + yi;
-	double *z = sz->pa + zi;
-
-	int ii;
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	__m256d
-		v_alpha, v_tmp,
-		v_x0, v_y0,
-		v_x1, v_y1;
-#endif
-
-	ii = 0;
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	v_alpha = _mm256_broadcast_sd( &alpha );
-	for( ; ii<m-7; ii+=8)
-		{
-		v_x0  = _mm256_loadu_pd( &x[ii+0] );
-		v_x1  = _mm256_loadu_pd( &x[ii+4] );
-		v_y0  = _mm256_loadu_pd( &y[ii+0] );
-		v_y1  = _mm256_loadu_pd( &y[ii+4] );
-#if defined(TARGET_X64_INTEL_HASWELL)
-		v_y0  = _mm256_fmadd_pd( v_alpha, v_x0, v_y0 );
-		v_y1  = _mm256_fmadd_pd( v_alpha, v_x1, v_y1 );
-#else // sandy bridge
-		v_tmp = _mm256_mul_pd( v_alpha, v_x0 );
-		v_y0  = _mm256_add_pd( v_tmp, v_y0 );
-		v_tmp = _mm256_mul_pd( v_alpha, v_x1 );
-		v_y1  = _mm256_add_pd( v_tmp, v_y1 );
-#endif
-		_mm256_storeu_pd( &z[ii+0], v_y0 );
-		_mm256_storeu_pd( &z[ii+4], v_y1 );
-		}
-	for( ; ii<m-3; ii+=4)
-		{
-		v_x0  = _mm256_loadu_pd( &x[ii] );
-		v_y0  = _mm256_loadu_pd( &y[ii] );
-#if defined(TARGET_X64_INTEL_HASWELL)
-		v_y0  = _mm256_fmadd_pd( v_alpha, v_x0, v_y0 );
-#else // sandy bridge
-		v_tmp = _mm256_mul_pd( v_alpha, v_x0 );
-		v_y0  = _mm256_add_pd( v_tmp, v_y0 );
-#endif
-		_mm256_storeu_pd( &z[ii], v_y0 );
-		}
-#else
-	for( ; ii<m-3; ii+=4)
-		{
-		z[ii+0] = y[ii+0] + alpha*x[ii+0];
-		z[ii+1] = y[ii+1] + alpha*x[ii+1];
-		z[ii+2] = y[ii+2] + alpha*x[ii+2];
-		z[ii+3] = y[ii+3] + alpha*x[ii+3];
-		}
-#endif
-	for( ; ii<m; ii++)
-		{
-		z[ii+0] = y[ii+0] + alpha*x[ii+0];
-		}
-
-	return;
-	}
-
-
-
-// multiply two vectors and compute dot product
-double dvecmuldot_libstr(int m, struct d_strvec *sx, int xi, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return 0.0;
-
-	double *x = sx->pa + xi;
-	double *y = sy->pa + yi;
-	double *z = sz->pa + zi;
-	int ii;
-	double dot = 0.0;
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	__m128d
-		u_tmp, u_dot;
-	__m256d
-		v_tmp,
-		v_x0, v_y0, v_z0;
-	
-	v_tmp = _mm256_setzero_pd();
-#endif
-
-	ii = 0;
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; ii<m-3; ii+=4)
-		{
-		v_x0 = _mm256_loadu_pd( &x[ii+0] );
-		v_y0 = _mm256_loadu_pd( &y[ii+0] );
-		v_z0 = _mm256_mul_pd( v_x0, v_y0 );
-		_mm256_storeu_pd( &z[ii+0], v_z0 );
-		v_tmp = _mm256_add_pd( v_tmp, v_z0 );
-		}
-#endif
-	for(; ii<m; ii++)
-		{
-		z[ii+0] = x[ii+0] * y[ii+0];
-		dot += z[ii+0];
-		}
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	// dot product
-	u_tmp = _mm_add_pd( _mm256_castpd256_pd128( v_tmp ), _mm256_extractf128_pd( v_tmp, 0x1 ) );
-	u_tmp = _mm_hadd_pd( u_tmp, u_tmp);
-	u_dot = _mm_load_sd( &dot );
-	u_dot = _mm_add_sd( u_dot, u_tmp );
-	_mm_store_sd( &dot, u_dot );
-#endif
-	return dot;
-	}
-
-
-
-// compute dot product of two vectors
-double ddot_libstr(int m, struct d_strvec *sx, int xi, struct d_strvec *sy, int yi)
-	{
-
-	if(m<=0)
-		return 0.0;
-
-	double *x = sx->pa + xi;
-	double *y = sy->pa + yi;
-	int ii;
-	double dot = 0.0;
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	__m128d
-		u_dot0, u_x0, u_y0, u_tmp;
-	__m256d
-		v_dot0, v_dot1, v_x0, v_x1, v_y0, v_y1, v_tmp;
-	
-	v_dot0 = _mm256_setzero_pd();
-	v_dot1 = _mm256_setzero_pd();
-	u_dot0 = _mm_setzero_pd();
-
-	ii = 0;
-	for(; ii<m-7; ii+=8)
-		{
-		v_x0 = _mm256_loadu_pd( &x[ii+0] );
-		v_x1 = _mm256_loadu_pd( &x[ii+4] );
-		v_y0 = _mm256_loadu_pd( &y[ii+0] );
-		v_y1 = _mm256_loadu_pd( &y[ii+4] );
-#if defined(TARGET_X64_INTEL_HASWELL)
-		v_dot0  = _mm256_fmadd_pd( v_x0, v_y0, v_dot0 );
-		v_dot1  = _mm256_fmadd_pd( v_x1, v_y1, v_dot1 );
-#else // sandy bridge
-		v_tmp = _mm256_mul_pd( v_x0, v_y0 );
-		v_dot0 = _mm256_add_pd( v_dot0, v_tmp );
-		v_tmp = _mm256_mul_pd( v_x1, v_y1 );
-		v_dot1 = _mm256_add_pd( v_dot1, v_tmp );
-#endif
-		}
-	for(; ii<m-3; ii+=4)
-		{
-		v_x0 = _mm256_loadu_pd( &x[ii+0] );
-		v_y0 = _mm256_loadu_pd( &y[ii+0] );
-#if defined(TARGET_X64_INTEL_HASWELL)
-		v_dot0  = _mm256_fmadd_pd( v_x0, v_y0, v_dot0 );
-#else // sandy bridge
-		v_tmp = _mm256_mul_pd( v_x0, v_y0 );
-		v_dot0 = _mm256_add_pd( v_dot0, v_tmp );
-#endif
-		}
-	for(; ii<m; ii++)
-		{
-		u_x0 = _mm_load_sd( &x[ii+0] );
-		u_y0 = _mm_load_sd( &y[ii+0] );
-#if defined(TARGET_X64_INTEL_HASWELL)
-		u_dot0  = _mm_fmadd_sd( u_x0, u_y0, u_dot0 );
-#else // sandy bridge
-		u_tmp = _mm_mul_sd( u_x0, u_y0 );
-		u_dot0 = _mm_add_sd( u_dot0, u_tmp );
-#endif
-		}
-	// reduce
-	v_dot0 = _mm256_add_pd( v_dot0, v_dot1 );
-	u_tmp = _mm_add_pd( _mm256_castpd256_pd128( v_dot0 ), _mm256_extractf128_pd( v_dot0, 0x1 ) );
-	u_tmp = _mm_hadd_pd( u_tmp, u_tmp);
-	u_dot0 = _mm_add_sd( u_dot0, u_tmp );
-	_mm_store_sd( &dot, u_dot0 );
-#else // no haswell, no sandy bridge
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		dot += x[ii+0] * y[ii+0];
-		dot += x[ii+1] * y[ii+1];
-		dot += x[ii+2] * y[ii+2];
-		dot += x[ii+3] * y[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		dot += x[ii+0] * y[ii+0];
-		}
-#endif // haswell, sandy bridge
-	return dot;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
diff --git a/third_party/blasfeo/blas/d_blas2_diag_lib.c b/third_party/blasfeo/blas/d_blas2_diag_lib.c
deleted file mode 100644
index 8bc3f68..0000000
--- a/third_party/blasfeo/blas/d_blas2_diag_lib.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_kernel.h"
-
-
-
-#define REAL double
-
-#define STRVEC d_strvec
-
-#define GEMV_DIAG_LIBSTR dgemv_diag_libstr
-
-
-
-#include "x_blas2_diag_lib.c"
diff --git a/third_party/blasfeo/blas/d_blas2_lib.c b/third_party/blasfeo/blas/d_blas2_lib.c
deleted file mode 100644
index 9c39fe2..0000000
--- a/third_party/blasfeo/blas/d_blas2_lib.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#if defined(LA_BLAS)
-#include "d_blas.h"
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_aux.h"
-
-
-
-#define REAL double
-
-#define STRMAT d_strmat
-#define STRVEC d_strvec
-
-#define GEMV_N_LIBSTR dgemv_n_libstr
-#define GEMV_NT_LIBSTR dgemv_nt_libstr
-#define GEMV_T_LIBSTR dgemv_t_libstr
-#define SYMV_L_LIBSTR dsymv_l_libstr
-#define TRMV_LNN_LIBSTR dtrmv_lnn_libstr
-#define TRMV_LTN_LIBSTR dtrmv_ltn_libstr
-#define TRMV_UNN_LIBSTR dtrmv_unn_libstr
-#define TRMV_UTN_LIBSTR dtrmv_utn_libstr
-#define TRSV_LNN_LIBSTR dtrsv_lnn_libstr
-#define TRSV_LNN_MN_LIBSTR dtrsv_lnn_mn_libstr
-#define TRSV_LNU_LIBSTR dtrsv_lnu_libstr
-#define TRSV_LTN_LIBSTR dtrsv_ltn_libstr
-#define TRSV_LTN_MN_LIBSTR dtrsv_ltn_mn_libstr
-#define TRSV_LTU_LIBSTR dtrsv_ltu_libstr
-#define TRSV_UNN_LIBSTR dtrsv_unn_libstr
-#define TRSV_UTN_LIBSTR dtrsv_utn_libstr
-
-#define COPY dcopy_
-#define GEMV dgemv_
-#define SYMV dsymv_
-#define TRMV dtrmv_
-#define TRSV dtrsv_
-
-
-
-#include "x_blas2_lib.c"
diff --git a/third_party/blasfeo/blas/d_blas2_lib4.c b/third_party/blasfeo/blas/d_blas2_lib4.c
deleted file mode 100644
index cab8e3c..0000000
--- a/third_party/blasfeo/blas/d_blas2_lib4.c
+++ /dev/null
@@ -1,1060 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_kernel.h"
-#include "../include/blasfeo_d_aux.h"
-
-
-
-void dtrsv_ln_inv_lib(int m, int n, double *pA, int sda, double *inv_diag_A, double *x, double *y)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	// suppose m>=n
-	if(m<n)
-		m = n;
-
-	const int bs = 4;
-
-	double alpha = -1.0;
-	double beta = 1.0;
-
-	int i;
-
-	if(x!=y)
-		{
-		for(i=0; i<m; i++)
-			y[i] = x[i];
-		}
-	
-	i = 0;
-	for( ; i<n-3; i+=4)
-		{
-		kernel_dtrsv_ln_inv_4_lib4(i, &pA[i*sda], &inv_diag_A[i], y, &y[i], &y[i]);
-		}
-	if(i<n)
-		{
-		kernel_dtrsv_ln_inv_4_vs_lib4(i, &pA[i*sda], &inv_diag_A[i], y, &y[i], &y[i], m-i, n-i);
-		i+=4;
-		}
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	for( ; i<m-7; i+=8)
-		{
-		kernel_dgemv_n_8_lib4(n, &alpha, &pA[i*sda], sda, y, &beta, &y[i], &y[i]);
-		}
-	if(i<m-3)
-		{
-		kernel_dgemv_n_4_lib4(n, &alpha, &pA[i*sda], y, &beta, &y[i], &y[i]);
-		i+=4;
-		}
-#else
-	for( ; i<m-3; i+=4)
-		{
-		kernel_dgemv_n_4_lib4(n, &alpha, &pA[i*sda], y, &beta, &y[i], &y[i]);
-		}
-#endif
-	if(i<m)
-		{
-		kernel_dgemv_n_4_gen_lib4(n, &alpha, &pA[i*sda], y, &beta, &y[i], &y[i], 0, m-i);
-		i+=4;
-		}
-
-	}
-
-
-
-void dtrsv_lt_inv_lib(int m, int n, double *pA, int sda, double *inv_diag_A, double *x, double *y)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	if(n>m)
-		n = m;
-	
-	const int bs = 4;
-	
-	int i;
-	
-	if(x!=y)
-		for(i=0; i<m; i++)
-			y[i] = x[i];
-			
-	i=0;
-	if(n%4==1)
-		{
-		kernel_dtrsv_lt_inv_1_lib4(m-n+i+1, &pA[n/bs*bs*sda+(n-i-1)*bs], sda, &inv_diag_A[n-i-1], &y[n-i-1], &y[n-i-1], &y[n-i-1]);
-		i++;
-		}
-	else if(n%4==2)
-		{
-		kernel_dtrsv_lt_inv_2_lib4(m-n+i+2, &pA[n/bs*bs*sda+(n-i-2)*bs], sda, &inv_diag_A[n-i-2], &y[n-i-2], &y[n-i-2], &y[n-i-2]);
-		i+=2;
-		}
-	else if(n%4==3)
-		{
-		kernel_dtrsv_lt_inv_3_lib4(m-n+i+3, &pA[n/bs*bs*sda+(n-i-3)*bs], sda, &inv_diag_A[n-i-3], &y[n-i-3], &y[n-i-3], &y[n-i-3]);
-		i+=3;
-		}
-	for(; i<n-3; i+=4)
-		{
-		kernel_dtrsv_lt_inv_4_lib4(m-n+i+4, &pA[(n-i-4)/bs*bs*sda+(n-i-4)*bs], sda, &inv_diag_A[n-i-4], &y[n-i-4], &y[n-i-4], &y[n-i-4]);
-		}
-
-	}
-
-
-
-void dgemv_nt_lib(int m, int n, double alpha_n, double alpha_t, double *pA, int sda, double *x_n, double *x_t, double beta_n, double beta_t, double *y_n, double *y_t, double *z_n, double *z_t)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-
-	const int bs = 4;
-
-	int ii;
-
-	// copy and scale y_n int z_n
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		z_n[ii+0] = beta_n*y_n[ii+0];
-		z_n[ii+1] = beta_n*y_n[ii+1];
-		z_n[ii+2] = beta_n*y_n[ii+2];
-		z_n[ii+3] = beta_n*y_n[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		z_n[ii+0] = beta_n*y_n[ii+0];
-		}
-	
-	ii = 0;
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; ii<n-5; ii+=6)
-		{
-		kernel_dgemv_nt_6_lib4(m, &alpha_n, &alpha_t, pA+ii*bs, sda, x_n+ii, x_t, &beta_t, y_t+ii, z_n, z_t+ii);
-		}
-#endif
-	for(; ii<n-3; ii+=4)
-		{
-		kernel_dgemv_nt_4_lib4(m, &alpha_n, &alpha_t, pA+ii*bs, sda, x_n+ii, x_t, &beta_t, y_t+ii, z_n, z_t+ii);
-		}
-	if(ii<n)
-		{
-		kernel_dgemv_nt_4_vs_lib4(m, &alpha_n, &alpha_t, pA+ii*bs, sda, x_n+ii, x_t, &beta_t, y_t+ii, z_n, z_t+ii, n-ii);
-		}
-	
-	return;
-
-	}
-
-
-	
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-void dgemv_n_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, double beta, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi)
-	{
-
-	if(m<0)
-		return;
-
-	const int bs = 4;
-
-	int i;
-
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda;
-	double *x = sx->pa + xi;
-	double *y = sy->pa + yi;
-	double *z = sz->pa + zi;
-
-	i = 0;
-	// clean up at the beginning
-	if(ai%bs!=0)
-		{
-		kernel_dgemv_n_4_gen_lib4(n, &alpha, pA, x, &beta, y-ai%bs, z-ai%bs, ai%bs, m+ai%bs);
-		pA += bs*sda;
-		y += 4 - ai%bs;
-		z += 4 - ai%bs;
-		m -= 4 - ai%bs;
-		}
-	// main loop
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for( ; i<m-11; i+=12)
-		{
-		kernel_dgemv_n_12_lib4(n, &alpha, &pA[i*sda], sda, x, &beta, &y[i], &z[i]);
-		}
-#endif
-	for( ; i<m-7; i+=8)
-		{
-		kernel_dgemv_n_8_lib4(n, &alpha, &pA[i*sda], sda, x, &beta, &y[i], &z[i]);
-		}
-	if(i<m-3)
-		{
-		kernel_dgemv_n_4_lib4(n, &alpha, &pA[i*sda], x, &beta, &y[i], &z[i]);
-		i+=4;
-		}
-#else
-	for( ; i<m-3; i+=4)
-		{
-		kernel_dgemv_n_4_lib4(n, &alpha, &pA[i*sda], x, &beta, &y[i], &z[i]);
-		}
-#endif
-	if(i<m)
-		{
-		kernel_dgemv_n_4_vs_lib4(n, &alpha, &pA[i*sda], x, &beta, &y[i], &z[i], m-i);
-		}
-		
-	return;
-
-	}
-
-
-
-void dgemv_t_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, double beta, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi)
-	{
-
-	if(n<=0)
-		return;
-	
-	const int bs = 4;
-
-	int i;
-
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	double *x = sx->pa + xi;
-	double *y = sy->pa + yi;
-	double *z = sz->pa + zi;
-
-	if(ai%bs==0)
-		{
-		i = 0;
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for( ; i<n-11; i+=12)
-			{
-			kernel_dgemv_t_12_lib4(m, &alpha, &pA[i*bs], sda, x, &beta, &y[i], &z[i]);
-			}
-#endif
-		for( ; i<n-7; i+=8)
-			{
-			kernel_dgemv_t_8_lib4(m, &alpha, &pA[i*bs], sda, x, &beta, &y[i], &z[i]);
-			}
-		if(i<n-3)
-			{
-			kernel_dgemv_t_4_lib4(m, &alpha, &pA[i*bs], sda, x, &beta, &y[i], &z[i]);
-			i+=4;
-			}
-#else
-		for( ; i<n-3; i+=4)
-			{
-			kernel_dgemv_t_4_lib4(m, &alpha, &pA[i*bs], sda, x, &beta, &y[i], &z[i]);
-			}
-#endif
-		if(i<n)
-			{
-			kernel_dgemv_t_4_vs_lib4(m, &alpha, &pA[i*bs], sda, x, &beta, &y[i], &z[i], n-i);
-			}
-		}
-	else // TODO kernel 8
-		{
-		i = 0;
-		for( ; i<n; i+=4)
-			{
-			kernel_dgemv_t_4_gen_lib4(m, &alpha, ai%bs, &pA[i*bs], sda, x, &beta, &y[i], &z[i], n-i);
-			}
-		}
-	
-	return;
-
-	}
-
-
-
-void dgemv_nt_libstr(int m, int n, double alpha_n, double alpha_t, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx_n, int xi_n, struct d_strvec *sx_t, int xi_t, double beta_n, double beta_t, struct d_strvec *sy_n, int yi_n, struct d_strvec *sy_t, int yi_t, struct d_strvec *sz_n, int zi_n, struct d_strvec *sz_t, int zi_t)
-	{
-	if(ai!=0)
-		{
-		printf("\ndgemv_nt_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs; // TODO ai
-	double *x_n = sx_n->pa + xi_n;
-	double *x_t = sx_t->pa + xi_t;
-	double *y_n = sy_n->pa + yi_n;
-	double *y_t = sy_t->pa + yi_t;
-	double *z_n = sz_n->pa + zi_n;
-	double *z_t = sz_t->pa + zi_t;
-	dgemv_nt_lib(m, n, alpha_n, alpha_t, pA, sda, x_n, x_t, beta_n, beta_t, y_n, y_t, z_n, z_t);
-	return;
-	}
-
-
-
-void dsymv_l_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, double beta, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-	
-	const int bs = 4;
-
-	int ii, n1;
-
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	double *x = sx->pa + xi;
-	double *y = sy->pa + yi;
-	double *z = sz->pa + zi;
-
-	// copy and scale y int z
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		z[ii+0] = beta*y[ii+0];
-		z[ii+1] = beta*y[ii+1];
-		z[ii+2] = beta*y[ii+2];
-		z[ii+3] = beta*y[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		z[ii+0] = beta*y[ii+0];
-		}
-	
-	// clean up at the beginning
-	if(ai%bs!=0) // 1, 2, 3
-		{
-		n1 = 4-ai%bs;
-		kernel_dsymv_l_4_gen_lib4(m, &alpha, ai%bs, &pA[0], sda, &x[0], &z[0], n<n1 ? n : n1);
-		pA += n1 + n1*bs + (sda-1)*bs;
-		x += n1;
-		z += n1;
-		m -= n1;
-		n -= n1;
-		}
-	// main loop
-	ii = 0;
-	for(; ii<n-3; ii+=4)
-		{
-		kernel_dsymv_l_4_lib4(m-ii, &alpha, &pA[ii*bs+ii*sda], sda, &x[ii], &z[ii]);
-		}
-	// clean up at the end
-	if(ii<n)
-		{
-		kernel_dsymv_l_4_gen_lib4(m-ii, &alpha, 0, &pA[ii*bs+ii*sda], sda, &x[ii], &z[ii], n-ii);
-		}
-	
-	return;
-	}
-
-
-
-// m >= n
-void dtrmv_lnn_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-
-	if(m-n>0)
-		dgemv_n_libstr(m-n, n, 1.0, sA, ai+n, aj, sx, xi, 0.0, sz, zi+n, sz, zi+n);
-
-	double *pA2 = pA;
-	double *z2 = z;
-	int m2 = n;
-	int n2 = 0;
-	double *pA3, *x3;
-
-	double alpha = 1.0;
-	double beta = 1.0;
-
-	double zt[4];
-
-	int ii, jj, jj_end;
-
-	ii = 0;
-
-	if(ai%4!=0)
-		{
-		pA2 += sda*bs - ai%bs;
-		z2 += bs-ai%bs;
-		m2 -= bs-ai%bs;
-		n2 += bs-ai%bs;
-		}
-	
-	pA2 += m2/bs*bs*sda;
-	z2 += m2/bs*bs;
-	n2 += m2/bs*bs;
-
-	if(m2%bs!=0)
-		{
-		//
-		pA3 = pA2 + bs*n2;
-		x3 = x + n2;
-		zt[3] = pA3[3+bs*0]*x3[0] + pA3[3+bs*1]*x3[1] + pA3[3+bs*2]*x3[2] + pA3[3+bs*3]*x3[3];
-		zt[2] = pA3[2+bs*0]*x3[0] + pA3[2+bs*1]*x3[1] + pA3[2+bs*2]*x3[2];
-		zt[1] = pA3[1+bs*0]*x3[0] + pA3[1+bs*1]*x3[1];
-		zt[0] = pA3[0+bs*0]*x3[0];
-		kernel_dgemv_n_4_lib4(n2, &alpha, pA2, x, &beta, zt, zt);
-		for(jj=0; jj<m2%bs; jj++)
-			z2[jj] = zt[jj];
-		}
-	for(; ii<m2-3; ii+=4)
-		{
-		pA2 -= bs*sda;
-		z2 -= 4;
-		n2 -= 4;
-		pA3 = pA2 + bs*n2;
-		x3 = x + n2;
-		z2[3] = pA3[3+bs*0]*x3[0] + pA3[3+bs*1]*x3[1] + pA3[3+bs*2]*x3[2] + pA3[3+bs*3]*x3[3];
-		z2[2] = pA3[2+bs*0]*x3[0] + pA3[2+bs*1]*x3[1] + pA3[2+bs*2]*x3[2];
-		z2[1] = pA3[1+bs*0]*x3[0] + pA3[1+bs*1]*x3[1];
-		z2[0] = pA3[0+bs*0]*x3[0];
-		kernel_dgemv_n_4_lib4(n2, &alpha, pA2, x, &beta, z2, z2);
-		}
-	if(ai%4!=0)
-		{
-		if(ai%bs==1)
-			{
-			zt[2] = pA[2+bs*0]*x[0] + pA[2+bs*1]*x[1] + pA[2+bs*2]*x[2];
-			zt[1] = pA[1+bs*0]*x[0] + pA[1+bs*1]*x[1];
-			zt[0] = pA[0+bs*0]*x[0];
-			jj_end = 4-ai%bs<n ? 4-ai%bs : n;
-			for(jj=0; jj<jj_end; jj++)
-				z[jj] = zt[jj];
-			}
-		else if(ai%bs==2)
-			{
-			zt[1] = pA[1+bs*0]*x[0] + pA[1+bs*1]*x[1];
-			zt[0] = pA[0+bs*0]*x[0];
-			jj_end = 4-ai%bs<n ? 4-ai%bs : n;
-			for(jj=0; jj<jj_end; jj++)
-				z[jj] = zt[jj];
-			}
-		else // if (ai%bs==3)
-			{
-			z[0] = pA[0+bs*0]*x[0];
-			}
-		}
-
-	return;
-
-	}
-
-
-
-// m >= n
-void dtrmv_ltn_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-
-	double xt[4];
-	double zt[4];
-
-	double alpha = 1.0;
-	double beta = 1.0;
-
-	int ii, jj, ll, ll_max;
-
-	jj = 0;
-
-	if(ai%bs!=0)
-		{
-
-		if(ai%bs==1)
-			{
-			ll_max = m-jj<3 ? m-jj : 3;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<3; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2];
-			zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2];
-			zt[2] = pA[2+bs*2]*xt[2];
-			pA += bs*sda - 1;
-			x += 3;
-			kernel_dgemv_t_4_lib4(m-3-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<3 ? n-jj : 3;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*3;
-			z += 3;
-			jj += 3;
-			}
-		else if(ai%bs==2)
-			{
-			ll_max = m-jj<2 ? m-jj : 2;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<2; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1];
-			zt[1] = pA[1+bs*1]*xt[1];
-			pA += bs*sda - 2;
-			x += 2;
-			kernel_dgemv_t_4_lib4(m-2-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<2 ? n-jj : 2;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*2;
-			z += 2;
-			jj += 2;
-			}
-		else // if(ai%bs==3)
-			{
-			ll_max = m-jj<1 ? m-jj : 1;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<1; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0];
-			pA += bs*sda - 3;
-			x += 1;
-			kernel_dgemv_t_4_lib4(m-1-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<1 ? n-jj : 1;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*1;
-			z += 1;
-			jj += 1;
-			}
-
-		}
-	
-	for(; jj<n-3; jj+=4)
-		{
-		zt[0] = pA[0+bs*0]*x[0] + pA[1+bs*0]*x[1] + pA[2+bs*0]*x[2] + pA[3+bs*0]*x[3];
-		zt[1] = pA[1+bs*1]*x[1] + pA[2+bs*1]*x[2] + pA[3+bs*1]*x[3];
-		zt[2] = pA[2+bs*2]*x[2] + pA[3+bs*2]*x[3];
-		zt[3] = pA[3+bs*3]*x[3];
-		pA += bs*sda;
-		x += 4;
-		kernel_dgemv_t_4_lib4(m-4-jj, &alpha, pA, sda, x, &beta, zt, z);
-		pA += bs*4;
-		z += 4;
-		}
-	if(jj<n)
-		{
-		ll_max = m-jj<4 ? m-jj : 4;
-		for(ll=0; ll<ll_max; ll++)
-			xt[ll] = x[ll];
-		for(; ll<4; ll++)
-			xt[ll] = 0.0;
-		zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2] + pA[3+bs*0]*xt[3];
-		zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2] + pA[3+bs*1]*xt[3];
-		zt[2] = pA[2+bs*2]*xt[2] + pA[3+bs*2]*xt[3];
-		zt[3] = pA[3+bs*3]*xt[3];
-		pA += bs*sda;
-		x += 4;
-		kernel_dgemv_t_4_lib4(m-4-jj, &alpha, pA, sda, x, &beta, zt, zt);
-		for(ll=0; ll<n-jj; ll++)
-			z[ll] = zt[ll];
-//		pA += bs*4;
-//		z += 4;
-		}
-
-	return;
-
-	}
-
-
-
-void dtrmv_unn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	if(ai!=0)
-		{
-		printf("\ndtrmv_unn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs; // TODO ai
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-
-	int i;
-	
-	i=0;
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; i<m-7; i+=8)
-		{
-		kernel_dtrmv_un_8_lib4(m-i, pA, sda, x, z);
-		pA += 8*sda+8*bs;
-		x  += 8;
-		z  += 8;
-		}
-#endif
-	for(; i<m-3; i+=4)
-		{
-		kernel_dtrmv_un_4_lib4(m-i, pA, x, z);
-		pA += 4*sda+4*bs;
-		x  += 4;
-		z  += 4;
-		}
-	if(m>i)
-		{
-		if(m-i==1)
-			{
-			z[0] = pA[0+bs*0]*x[0];
-			}
-		else if(m-i==2)
-			{
-			z[0] = pA[0+bs*0]*x[0] + pA[0+bs*1]*x[1];
-			z[1] = pA[1+bs*1]*x[1];
-			}
-		else // if(m-i==3)
-			{
-			z[0] = pA[0+bs*0]*x[0] + pA[0+bs*1]*x[1] + pA[0+bs*2]*x[2];
-			z[1] = pA[1+bs*1]*x[1] + pA[1+bs*2]*x[2];
-			z[2] = pA[2+bs*2]*x[2];
-			}
-		}
-
-	return;
-
-	}
-
-
-
-void dtrmv_utn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	if(ai!=0)
-		{
-		printf("\ndtrmv_utn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs; // TODO ai
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-
-	int ii, idx;
-	
-	double *ptrA;
-	
-	ii=0;
-	idx = m/bs*bs;
-	if(m%bs!=0)
-		{
-		kernel_dtrmv_ut_4_vs_lib4(m, pA+idx*bs, sda, x, z+idx, m%bs);
-		ii += m%bs;
-		}
-	idx -= 4;
-	for(; ii<m; ii+=4)
-		{
-		kernel_dtrmv_ut_4_lib4(idx+4, pA+idx*bs, sda, x, z+idx);
-		idx -= 4;
-		}
-
-	return;
-
-	}
-
-
-
-void dtrsv_lnn_mn_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-	if(m==0 | n==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** dtrsv_lnn_mn_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** dtrsv_lnn_mn_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** dtrsv_lnn_mn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** dtrsv_lnn_mn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** dtrsv_lnn_mn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** dtrsv_lnn_mn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** dtrsv_lnn_mn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** dtrsv_lnn_mn_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** dtrsv_lnn_mn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** dtrsv_lnn_mn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	if(ai!=0 | xi%4!=0)
-		{
-		printf("\ndtrsv_lnn_mn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs; // TODO ai
-	double *dA = sA->dA;
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-	int ii;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			ddiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		ddiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-	dtrsv_ln_inv_lib(m, n, pA, sda, dA, x, z);
-	return;
-	}
-
-
-
-void dtrsv_ltn_mn_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** dtrsv_ltn_mn_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** dtrsv_ltn_mn_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** dtrsv_ltn_mn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** dtrsv_ltn_mn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** dtrsv_ltn_mn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** dtrsv_ltn_mn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** dtrsv_ltn_mn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** dtrsv_ltn_mn_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** dtrsv_ltn_mn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** dtrsv_ltn_mn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	if(ai!=0 | xi%4!=0)
-		{
-		printf("\ndtrsv_ltn_mn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs; // TODO ai
-	double *dA = sA->dA;
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-	int ii;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			ddiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		ddiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-	dtrsv_lt_inv_lib(m, n, pA, sda, dA, x, z);
-	return;
-	}
-
-
-
-void dtrsv_lnn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** dtrsv_lnn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** dtrsv_lnn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** dtrsv_lnn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** dtrsv_lnn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** dtrsv_lnn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** dtrsv_lnn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** dtrsv_lnn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** dtrsv_lnn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** dtrsv_lnn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	if(ai!=0 | xi%4!=0)
-		{
-		printf("\ndtrsv_lnn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs; // TODO ai
-	double *dA = sA->dA;
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-	int ii;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			ddiaex_lib(m, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<m; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		ddiaex_lib(m, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<m; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-	dtrsv_ln_inv_lib(m, m, pA, sda, dA, x, z);
-	return;
-	}
-
-
-
-void dtrsv_lnu_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** dtrsv_lnu_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** dtrsv_lnu_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** dtrsv_lnu_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** dtrsv_lnu_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** dtrsv_lnu_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** dtrsv_lnu_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** dtrsv_lnu_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** dtrsv_lnu_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** dtrsv_lnu_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** dtrsv_lnu_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-void dtrsv_ltn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** dtrsv_ltn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** dtrsv_ltn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** dtrsv_ltn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** dtrsv_ltn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** dtrsv_ltn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** dtrsv_ltn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** dtrsv_ltn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** dtrsv_ltn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** dtrsv_ltn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	if(ai!=0 | xi%4!=0)
-		{
-		printf("\ndtrsv_ltn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-	const int bs = 4;
-	int sda = sA->cn;
-	double *pA = sA->pA + aj*bs; // TODO ai
-	double *dA = sA->dA;
-	double *x = sx->pa + xi;
-	double *z = sz->pa + zi;
-	int ii;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			ddiaex_lib(m, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<m; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		ddiaex_lib(m, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<m; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-	dtrsv_lt_inv_lib(m, m, pA, sda, dA, x, z);
-	return;
-	}
-
-
-
-void dtrsv_ltu_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** dtrsv_ltu_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** dtrsv_ltu_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** dtrsv_ltu_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** dtrsv_ltu_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** dtrsv_ltu_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** dtrsv_ltu_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** dtrsv_ltu_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** dtrsv_ltu_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** dtrsv_ltu_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** dtrsv_ltu_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-void dtrsv_unn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** dtrsv_unn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** dtrsv_unn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** dtrsv_unn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** dtrsv_unn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** dtrsv_unn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** dtrsv_unn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** dtrsv_unn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** dtrsv_unn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** dtrsv_unn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** dtrsv_unn_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-void dtrsv_utn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** dtrsv_utn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** dtrsv_utn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** dtrsv_utn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** dtrsv_utn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** dtrsv_utn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** dtrsv_utn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** dtrsv_utn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** dtrsv_utn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** dtrsv_utn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** dtrsv_utn_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
diff --git a/third_party/blasfeo/blas/d_blas3_diag_lib.c b/third_party/blasfeo/blas/d_blas3_diag_lib.c
deleted file mode 100644
index ff69317..0000000
--- a/third_party/blasfeo/blas/d_blas3_diag_lib.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_kernel.h"
-
-
-
-#define REAL double
-
-#define STRMAT d_strmat
-#define STRVEC d_strvec
-
-#define GEMM_R_DIAG_LIBSTR dgemm_r_diag_libstr
-#define GEMM_L_DIAG_LIBSTR dgemm_l_diag_libstr
-
-
-
-#include "x_blas3_diag_lib.c"
diff --git a/third_party/blasfeo/blas/d_blas3_diag_lib4.c b/third_party/blasfeo/blas/d_blas3_diag_lib4.c
deleted file mode 100644
index 2731d1f..0000000
--- a/third_party/blasfeo/blas/d_blas3_diag_lib4.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_kernel.h"
-
-
-
-/****************************
-* old interface
-****************************/
-
-void dgemm_diag_left_lib(int m, int n, double alpha, double *dA, double *pB, int sdb, double beta, double *pC, int sdc, double *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	int ii;
-
-	ii = 0;
-	if(beta==0.0)
-		{
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_dgemm_diag_left_4_a0_lib4(n, &alpha, &dA[ii], &pB[ii*sdb], &pD[ii*sdd]);
-			}
-		}
-	else
-		{
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_dgemm_diag_left_4_lib4(n, &alpha, &dA[ii], &pB[ii*sdb], &beta, &pC[ii*sdc], &pD[ii*sdd]);
-			}
-		}
-	if(m-ii>0)
-		{
-		if(m-ii==1)
-			kernel_dgemm_diag_left_1_lib4(n, &alpha, &dA[ii], &pB[ii*sdb], &beta, &pC[ii*sdc], &pD[ii*sdd]);
-		else if(m-ii==2)
-			kernel_dgemm_diag_left_2_lib4(n, &alpha, &dA[ii], &pB[ii*sdb], &beta, &pC[ii*sdc], &pD[ii*sdd]);
-		else // if(m-ii==3)
-			kernel_dgemm_diag_left_3_lib4(n, &alpha, &dA[ii], &pB[ii*sdb], &beta, &pC[ii*sdc], &pD[ii*sdd]);
-		}
-	
-	}
-
-
-
-void dgemm_diag_right_lib(int m, int n, double alpha, double *pA, int sda, double *dB, double beta, double *pC, int sdc, double *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	int ii;
-
-	ii = 0;
-	if(beta==0.0)
-		{
-		for( ; ii<n-3; ii+=4)
-			{
-			kernel_dgemm_diag_right_4_a0_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &pD[ii*bs], sdd);
-			}
-		}
-	else
-		{
-		for( ; ii<n-3; ii+=4)
-			{
-			kernel_dgemm_diag_right_4_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-			}
-		}
-	if(n-ii>0)
-		{
-		if(n-ii==1)
-			kernel_dgemm_diag_right_1_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-		else if(n-ii==2)
-			kernel_dgemm_diag_right_2_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-		else // if(n-ii==3)
-			kernel_dgemm_diag_right_3_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-		}
-	
-	}
-
-
-
-/****************************
-* new interface
-****************************/
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// dgemm with A diagonal matrix (stored as strvec)
-void dgemm_l_diag_libstr(int m, int n, double alpha, struct d_strvec *sA, int ai, struct d_strmat *sB, int bi, int bj, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	if(bi!=0 | ci!=0 | di!=0)
-		{
-		printf("\ndgemm_l_diag_libstr: feature not implemented yet: bi=%d, ci=%d, di=%d\n", bi, ci, di);
-		exit(1);
-		}
-	const int bs = 4;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *dA = sA->pa + ai;
-	double *pB = sB->pA + bj*bs;
-	double *pC = sC->pA + cj*bs;
-	double *pD = sD->pA + dj*bs;
-	dgemm_diag_left_lib(m, n, alpha, dA, pB, sdb, beta, pC, sdc, pD, sdd);
-	return;
-	}
-
-
-
-// dgemm with B diagonal matrix (stored as strvec)
-void dgemm_r_diag_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sB, int bi, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	if(ai!=0 | ci!=0 | di!=0)
-		{
-		printf("\ndgemm_r_diag_libstr: feature not implemented yet: ai=%d, ci=%d, di=%d\n", ai, ci, di);
-		exit(1);
-		}
-	const int bs = 4;
-	int sda = sA->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *pA = sA->pA + aj*bs;
-	double *dB = sB->pa + bi;
-	double *pC = sC->pA + cj*bs;
-	double *pD = sD->pA + dj*bs;
-	dgemm_diag_right_lib(m, n, alpha, pA, sda, dB, beta, pC, sdc, pD, sdd);
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
diff --git a/third_party/blasfeo/blas/d_blas3_lib.c b/third_party/blasfeo/blas/d_blas3_lib.c
deleted file mode 100644
index 27c20ab..0000000
--- a/third_party/blasfeo/blas/d_blas3_lib.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#if defined(LA_BLAS)
-#if defined(REF_BLAS_BLIS)
-#include "d_blas_64.h"
-#else
-#include "d_blas.h"
-#endif
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_aux.h"
-
-
-
-#define REAL double
-
-#define STRMAT d_strmat
-
-#define GEMM_NN_LIBSTR dgemm_nn_libstr
-#define GEMM_NT_LIBSTR dgemm_nt_libstr
-#define SYRK_LN_LIBSTR dsyrk_ln_libstr
-#define SYRK_LN_MN_LIBSTR dsyrk_ln_mn_libstr
-#define TRMM_RLNN_LIBSTR dtrmm_rlnn_libstr
-#define TRMM_RUTN_LIBSTR dtrmm_rutn_libstr
-#define TRSM_LLNU_LIBSTR dtrsm_llnu_libstr
-#define TRSM_LUNN_LIBSTR dtrsm_lunn_libstr
-#define TRSM_RLTN_LIBSTR dtrsm_rltn_libstr
-#define TRSM_RLTU_LIBSTR dtrsm_rltu_libstr
-#define TRSM_RUTN_LIBSTR dtrsm_rutn_libstr
-
-#define COPY dcopy_
-#define GEMM dgemm_
-#define SYRK dsyrk_
-#define TRMM dtrmm_
-#define TRSM dtrsm_
-
-
-
-#include "x_blas3_lib.c"
diff --git a/third_party/blasfeo/blas/d_blas3_lib4.c b/third_party/blasfeo/blas/d_blas3_lib4.c
deleted file mode 100644
index dfa3cb8..0000000
--- a/third_party/blasfeo/blas/d_blas3_lib4.c
+++ /dev/null
@@ -1,2728 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_kernel.h"
-#include "../include/blasfeo_d_aux.h"
-
-
-
-/****************************
-* old interface
-****************************/
-
-void dgemm_nt_lib(int m, int n, int k, double alpha, double *pA, int sda, double *pB, int sdb, double beta, double *pC, int sdc, double *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int ps = 4;
-
-	int i, j, l;
-
-	i = 0;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-11; i+=12)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dgemm_nt_12x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			kernel_dgemm_nt_12x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_12;
-			}
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dgemm_nt_8x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			kernel_dgemm_nt_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#elif defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dgemm_nt_8x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-			kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[j*sdb], &beta, &pC[j*ps+(i+4)*sdc], &pD[j*ps+(i+4)*sdd], m-(i+4), n-j);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dgemm_nt_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd]);
-			}
-		if(j<n)
-			{
-			kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dgemm_nt_12x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for(; j<n-8; j+=12)
-		{
-		kernel_dgemm_nt_8x8l_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		kernel_dgemm_nt_8x8u_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[(j+4)*sdb], sdb, &beta, &pC[(j+4)*ps+i*sdc], sdc, &pD[(j+4)*ps+i*sdd], sdd, m-i, n-(j+4));
-		}
-	
-	if(j<n-4)
-		{
-		kernel_dgemm_nt_8x8l_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+i*sdc], &pD[(j+4)*ps+i*sdd], m-i, n-(j+4));
-		}
-	else if(j<n)
-		{
-		kernel_dgemm_nt_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		}
-	return;
-#endif
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_8:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dgemm_nt_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		}
-	return;
-#endif
-#if defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-	left_8:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[j*sdb], &beta, &pC[j*ps+(i+4)*sdc], &pD[j*ps+(i+4)*sdd], m-(i+4), n-j);
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_4:
-	j = 0;
-	for(; j<n-8; j+=12)
-		{
-		kernel_dgemm_nt_4x12_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	if(j<n-4)
-		{
-		kernel_dgemm_nt_4x8_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	else if(j<n)
-		{
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	return;
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_4:
-	j = 0;
-	for(; j<n-4; j+=8)
-		{
-		kernel_dgemm_nt_4x8_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	return;
-#else
-	left_4:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	return;
-#endif
-
-	}
-
-
-
-#if 0
-void dgemm_nn_lib(int m, int n, int k, double alpha, double *pA, int sda, double *pB, int sdb, double beta, double *pC, int sdc, double *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int ps = 4;
-
-	int i, j, l;
-
-	i = 0;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-11; i+=12)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dgemm_nn_12x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			kernel_dgemm_nn_12x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_12;
-			}
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dgemm_nn_8x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			kernel_dgemm_nn_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dgemm_nn_4x4_lib4(k, &alpha, &pA[i*sda], 0, &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd]);
-			}
-		if(j<n)
-			{
-			kernel_dgemm_nn_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dgemm_nn_12x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dgemm_nn_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		}
-	return;
-#endif
-
-	left_4:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dgemm_nn_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	return;
-
-	}
-#endif
-
-
-
-void dtrmm_nt_ru_lib(int m, int n, double alpha, double *pA, int sda, double *pB, int sdb, double beta, double *pC, int sdc, double *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int ps = 4;
-	
-	int i, j;
-	
-	i = 0;
-// XXX there is a bug here !!!!!!
-#if 0//defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-11; i+=12)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dtrmm_nt_ru_12x4_lib4(n-j, &alpha, &pA[j*ps+i*sda], sda, &pB[j*ps+j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-			}
-		if(j<n) // TODO specialized edge routine
-			{
-			kernel_dtrmm_nt_ru_12x4_vs_lib4(n-j, &alpha, &pA[j*ps+i*sda], sda, &pB[j*ps+j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-			}
-		}
-	if(i<m)
-		{
-		if(m-i<5)
-			{
-			goto left_4;
-			}
-		if(m-i<9)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_12;
-			}
-		}
-
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dtrmm_nt_ru_8x4_lib4(n-j, &alpha, &pA[j*ps+i*sda], sda, &pB[j*ps+j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-			}
-		if(j<n) // TODO specialized edge routine
-			{
-			kernel_dtrmm_nt_ru_8x4_vs_lib4(n-j, &alpha, &pA[j*ps+i*sda], sda, &pB[j*ps+j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-			}
-		}
-	if(i<m)
-		{
-		if(m-i<5)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-
-#else
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dtrmm_nt_ru_4x4_lib4(n-j, &alpha, &pA[j*ps+i*sda], &pB[j*ps+j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd]);
-			}
-		if(j<n) // TODO specialized edge routine
-			{
-			kernel_dtrmm_nt_ru_4x4_vs_lib4(n-j, &alpha, &pA[j*ps+i*sda], &pB[j*ps+j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-			}
-		}
-	if(i<m)
-		{
-		goto left_4;
-		}
-#endif
-	
-	// common return
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	// clean up
-	left_12:
-	j = 0;
-//	for(; j<n-3; j+=4)
-	for(; j<n; j+=4)
-		{
-		kernel_dtrmm_nt_ru_12x4_vs_lib4(n-j, &alpha, &pA[j*ps+i*sda], sda, &pB[j*ps+j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		}
-//	if(j<n) // TODO specialized edge routine
-//		{
-//		kernel_dtrmm_nt_ru_8x4_vs_lib4(n-j, &pA[j*ps+i*sda], sda, &pB[j*ps+j*sdb], alg, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-//		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	// clean up
-	left_8:
-	j = 0;
-//	for(; j<n-3; j+=4)
-	for(; j<n; j+=4)
-		{
-		kernel_dtrmm_nt_ru_8x4_vs_lib4(n-j, &alpha, &pA[j*ps+i*sda], sda, &pB[j*ps+j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		}
-//	if(j<n) // TODO specialized edge routine
-//		{
-//		kernel_dtrmm_nt_ru_8x4_vs_lib4(n-j, &pA[j*ps+i*sda], sda, &pB[j*ps+j*sdb], alg, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-//		}
-	return;
-#endif
-
-	left_4:
-	j = 0;
-//	for(; j<n-3; j+=4)
-	for(; j<n; j+=4)
-		{
-		kernel_dtrmm_nt_ru_4x4_vs_lib4(n-j, &alpha, &pA[j*ps+i*sda], &pB[j*ps+j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-//	if(j<n) // TODO specialized edge routine
-//		{
-//		kernel_dtrmm_nt_ru_4x4_vs_lib4(n-j, &pA[j*ps+i*sda], &pB[j*ps+j*sdb], alg, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-//		}
-	return;
-
-	}
-
-
-
-// D <= B * A^{-T} , with A lower triangular with unit diagonal
-void dtrsm_nt_rl_one_lib(int m, int n, double *pA, int sda, double *pB, int sdb, double *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int ps = 4;
-	
-	int i, j;
-	
-	i = 0;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-11; i+=12)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nt_rl_one_12x4_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda]);
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nt_rl_one_12x4_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_12;
-			}
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nt_rl_one_8x4_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda]);
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nt_rl_one_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nt_rl_one_4x4_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*ps+i*sdb], &pD[j*ps+i*sdd], &pA[j*ps+j*sda]);
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nt_rl_one_4x4_vs_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*ps+i*sdb], &pD[j*ps+i*sdd], &pA[j*ps+j*sda], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dtrsm_nt_rl_one_12x4_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], m-i, n-j);
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dtrsm_nt_rl_one_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], m-i, n-j);
-		}
-	return;
-#endif
-
-	left_4:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dtrsm_nt_rl_one_4x4_vs_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*ps+i*sdb], &pD[j*ps+i*sdd], &pA[j*ps+j*sda], m-i, n-j);
-		}
-	return;
-
-	}
-
-
-
-// D <= B * A^{-T} , with A upper triangular employing explicit inverse of diagonal
-void dtrsm_nt_ru_inv_lib(int m, int n, double *pA, int sda, double *inv_diag_A, double *pB, int sdb, double *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int ps = 4;
-	
-	int i, j, idx;
-
-	int rn = n%4;
-
-	double *dummy;
-	
-	i = 0;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-11; i+=12)
-		{
-		j = 0;
-		// clean at the end
-		if(rn>0)
-			{
-			idx = n-rn;
-			kernel_dtrsm_nt_ru_inv_12x4_vs_lib4(0, dummy, 0, dummy, &pB[i*sdb+idx*ps], sdb, &pD[i*sdd+idx*ps], sdd, &pA[idx*sda+idx*ps], &inv_diag_A[idx], m-i, rn);
-			j += rn;
-			}
-		for(; j<n; j+=4)
-			{
-			idx = n-j-4;
-			kernel_dtrsm_nt_ru_inv_12x4_lib4(j, &pD[i*sdd+(idx+4)*ps], sdd, &pA[idx*sda+(idx+4)*ps], &pB[i*sdb+idx*ps], sdb, &pD[i*sdd+idx*ps], sdd, &pA[idx*sda+idx*ps], &inv_diag_A[idx]);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_12;
-			}
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		// clean at the end
-		if(rn>0)
-			{
-			idx = n-rn;
-			kernel_dtrsm_nt_ru_inv_8x4_vs_lib4(0, dummy, 0, dummy, &pB[i*sdb+idx*ps], sdb, &pD[i*sdd+idx*ps], sdd, &pA[idx*sda+idx*ps], &inv_diag_A[idx], m-i, rn);
-			j += rn;
-			}
-		for(; j<n; j+=4)
-			{
-			idx = n-j-4;
-			kernel_dtrsm_nt_ru_inv_8x4_lib4(j, &pD[i*sdd+(idx+4)*ps], sdd, &pA[idx*sda+(idx+4)*ps], &pB[i*sdb+idx*ps], sdb, &pD[i*sdd+idx*ps], sdd, &pA[idx*sda+idx*ps], &inv_diag_A[idx]);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		// clean at the end
-		if(rn>0)
-			{
-			idx = n-rn;
-			kernel_dtrsm_nt_ru_inv_4x4_vs_lib4(0, dummy, dummy, &pB[i*sdb+idx*ps], &pD[i*sdd+idx*ps], &pA[idx*sda+idx*ps], &inv_diag_A[idx], m-i, rn);
-			j += rn;
-			}
-		for(; j<n; j+=4)
-			{
-			idx = n-j-4;
-			kernel_dtrsm_nt_ru_inv_4x4_lib4(j, &pD[i*sdd+(idx+4)*ps], &pA[idx*sda+(idx+4)*ps], &pB[i*sdb+idx*ps], &pD[i*sdd+idx*ps], &pA[idx*sda+idx*ps], &inv_diag_A[idx]);
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	// TODO
-	// clean at the end
-	if(rn>0)
-		{
-		idx = n-rn;
-		kernel_dtrsm_nt_ru_inv_12x4_vs_lib4(0, dummy, 0, dummy, &pB[i*sdb+idx*ps], sdb, &pD[i*sdd+idx*ps], sdd, &pA[idx*sda+idx*ps], &inv_diag_A[idx], m-i, rn);
-		j += rn;
-		}
-	for(; j<n; j+=4)
-		{
-		idx = n-j-4;
-		kernel_dtrsm_nt_ru_inv_12x4_vs_lib4(j, &pD[i*sdd+(idx+4)*ps], sdd, &pA[idx*sda+(idx+4)*ps], &pB[i*sdb+idx*ps], sdb, &pD[i*sdd+idx*ps], sdd, &pA[idx*sda+idx*ps], &inv_diag_A[idx], m-i, 4);
-		}
-	return;
-
-#endif
-
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	// TODO
-	// clean at the end
-	if(rn>0)
-		{
-		idx = n-rn;
-		kernel_dtrsm_nt_ru_inv_8x4_vs_lib4(0, dummy, 0, dummy, &pB[i*sdb+idx*ps], sdb, &pD[i*sdd+idx*ps], sdd, &pA[idx*sda+idx*ps], &inv_diag_A[idx], m-i, rn);
-		j += rn;
-		}
-	for(; j<n; j+=4)
-		{
-		idx = n-j-4;
-		kernel_dtrsm_nt_ru_inv_8x4_vs_lib4(j, &pD[i*sdd+(idx+4)*ps], sdd, &pA[idx*sda+(idx+4)*ps], &pB[i*sdb+idx*ps], sdb, &pD[i*sdd+idx*ps], sdd, &pA[idx*sda+idx*ps], &inv_diag_A[idx], m-i, 4);
-		}
-	return;
-
-#endif
-
-	left_4:
-	j = 0;
-	// TODO
-	// clean at the end
-	if(rn>0)
-		{
-		idx = n-rn;
-		kernel_dtrsm_nt_ru_inv_4x4_vs_lib4(0, dummy, dummy, &pB[i*sdb+idx*ps], &pD[i*sdd+idx*ps], &pA[idx*sda+idx*ps], &inv_diag_A[idx], m-i, rn);
-		j += rn;
-		}
-	for(; j<n; j+=4)
-		{
-		idx = n-j-4;
-		kernel_dtrsm_nt_ru_inv_4x4_vs_lib4(j, &pD[i*sdd+(idx+4)*ps], &pA[idx*sda+(idx+4)*ps], &pB[i*sdb+idx*ps], &pD[i*sdd+idx*ps], &pA[idx*sda+idx*ps], &inv_diag_A[idx], m-i, 4);
-		}
-	return;
-
-	}
-
-
-
-// D <= A^{-1} * B , with A lower triangular with unit diagonal
-void dtrsm_nn_ll_one_lib(int m, int n, double *pA, int sda, double *pB, int sdb, double *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int ps = 4;
-	
-	int i, j;
-	
-	i = 0;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for( ; i<m-11; i+=12)
-		{
-		j = 0;
-		for( ; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nn_ll_one_12x4_lib4(i, pA+i*sda, sda, pD+j*ps, sdd, pB+i*sdb+j*ps, sdb, pD+i*sdd+j*ps, sdd, pA+i*sda+i*ps, sda);
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nn_ll_one_12x4_vs_lib4(i, pA+i*sda, sda, pD+j*ps, sdd, pB+i*sdb+j*ps, sdb, pD+i*sdd+j*ps, sdd, pA+i*sda+i*ps, sda, m-i, n-j);
-			}
-		}
-	if(i<m)
-		{
-		if(m-i<=4)
-			goto left_4;
-		if(m-i<=8)
-			goto left_8;
-		else
-			goto left_12;
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for( ; i<m-7; i+=8)
-		{
-		j = 0;
-		for( ; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nn_ll_one_8x4_lib4(i, pA+i*sda, sda, pD+j*ps, sdd, pB+i*sdb+j*ps, sdb, pD+i*sdd+j*ps, sdd, pA+i*sda+i*ps, sda);
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nn_ll_one_8x4_vs_lib4(i, pA+i*sda, sda, pD+j*ps, sdd, pB+i*sdb+j*ps, sdb, pD+i*sdd+j*ps, sdd, pA+i*sda+i*ps, sda, m-i, n-j);
-			}
-		}
-	if(i<m)
-		{
-		if(m-i<=4)
-			goto left_4;
-		else
-			goto left_8;
-		}
-#else
-	for( ; i<m-3; i+=4)
-		{
-		j = 0;
-		for( ; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nn_ll_one_4x4_lib4(i, pA+i*sda, pD+j*ps, sdd, pB+i*sdb+j*ps, pD+i*sdd+j*ps, pA+i*sda+i*ps);
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nn_ll_one_4x4_vs_lib4(i, pA+i*sda, pD+j*ps, sdd, pB+i*sdb+j*ps, pD+i*sdd+j*ps, pA+i*sda+i*ps, m-i, n-j);
-			}
-		}
-	if(i<m)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	for( ; j<n; j+=4)
-		{
-		kernel_dtrsm_nn_ll_one_12x4_vs_lib4(i, pA+i*sda, sda, pD+j*ps, sdd, pB+i*sdb+j*ps, sdb, pD+i*sdd+j*ps, sdd, pA+i*sda+i*ps, sda, m-i, n-j);
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for( ; j<n; j+=4)
-		{
-		kernel_dtrsm_nn_ll_one_8x4_vs_lib4(i, pA+i*sda, sda, pD+j*ps, sdd, pB+i*sdb+j*ps, sdb, pD+i*sdd+j*ps, sdd, pA+i*sda+i*ps, sda, m-i, n-j);
-		}
-	return;
-#endif
-
-	left_4:
-	j = 0;
-	for( ; j<n; j+=4)
-		{
-		kernel_dtrsm_nn_ll_one_4x4_vs_lib4(i, pA+i*sda, pD+j*ps, sdd, pB+i*sdb+j*ps, pD+i*sdd+j*ps, pA+i*sda+i*ps, m-i, n-j);
-		}
-	return;
-
-	}
-
-
-
-// D <= A^{-1} * B , with A upper triangular employing explicit inverse of diagonal
-void dtrsm_nn_lu_inv_lib(int m, int n, double *pA, int sda, double *inv_diag_A, double *pB, int sdb, double *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int ps = 4;
-	
-	int i, j, idx;
-	double *dummy;
-	
-	i = 0;
-	int rm = m%4;
-	if(rm>0)
-		{
-		// TODO code expliticly the final case
-		idx = m-rm; // position of the part to do
-		j = 0;
-		for( ; j<n; j+=4)
-			{
-			kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(0, dummy, dummy, 0, pB+idx*sdb+j*ps, pD+idx*sdd+j*ps, pA+idx*sda+idx*ps, inv_diag_A+idx, rm, n-j);
-			}
-		// TODO
-		i += rm;
-		}
-//	int em = m-rm;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for( ; i<m-8; i+=12)
-		{
-		idx = m-i; // position of already done part
-		j = 0;
-		for( ; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nn_lu_inv_12x4_lib4(i, pA+(idx-12)*sda+idx*ps, sda, pD+idx*sdd+j*ps, sdd, pB+(idx-12)*sdb+j*ps, sdb, pD+(idx-12)*sdd+j*ps, sdd, pA+(idx-12)*sda+(idx-12)*ps, sda, inv_diag_A+(idx-12));
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nn_lu_inv_12x4_vs_lib4(i, pA+(idx-12)*sda+idx*ps, sda, pD+idx*sdd+j*ps, sdd, pB+(idx-12)*sdb+j*ps, sdb, pD+(idx-12)*sdd+j*ps, sdd, pA+(idx-12)*sda+(idx-12)*ps, sda, inv_diag_A+(idx-12), 12, n-j);
-//			kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(i, pA+(idx-4)*sda+idx*ps, pD+idx*sdd+j*ps, sdd, pB+(idx-4)*sdb+j*ps, pD+(idx-4)*sdd+j*ps, pA+(idx-4)*sda+(idx-4)*ps, inv_diag_A+(idx-4), 4, n-j);
-//			kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(i+4, pA+(idx-8)*sda+(idx-4)*ps, pD+(idx-4)*sdd+j*ps, sdd, pB+(idx-8)*sdb+j*ps, pD+(idx-8)*sdd+j*ps, pA+(idx-8)*sda+(idx-8)*ps, inv_diag_A+(idx-8), 4, n-j);
-//			kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(i+8, pA+(idx-12)*sda+(idx-8)*ps, pD+(idx-8)*sdd+j*ps, sdd, pB+(idx-12)*sdb+j*ps, pD+(idx-12)*sdd+j*ps, pA+(idx-12)*sda+(idx-12)*ps, inv_diag_A+(idx-12), 4, n-j);
-			}
-		}
-#endif
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	for( ; i<m-4; i+=8)
-		{
-		idx = m-i; // position of already done part
-		j = 0;
-		for( ; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nn_lu_inv_8x4_lib4(i, pA+(idx-8)*sda+idx*ps, sda, pD+idx*sdd+j*ps, sdd, pB+(idx-8)*sdb+j*ps, sdb, pD+(idx-8)*sdd+j*ps, sdd, pA+(idx-8)*sda+(idx-8)*ps, sda, inv_diag_A+(idx-8));
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nn_lu_inv_8x4_vs_lib4(i, pA+(idx-8)*sda+idx*ps, sda, pD+idx*sdd+j*ps, sdd, pB+(idx-8)*sdb+j*ps, sdb, pD+(idx-8)*sdd+j*ps, sdd, pA+(idx-8)*sda+(idx-8)*ps, sda, inv_diag_A+(idx-8), 8, n-j);
-//			kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(i, pA+(idx-4)*sda+idx*ps, pD+idx*sdd+j*ps, sdd, pB+(idx-4)*sdb+j*ps, pD+(idx-4)*sdd+j*ps, pA+(idx-4)*sda+(idx-4)*ps, inv_diag_A+(idx-4), 4, n-j);
-//			kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(i+4, pA+(idx-8)*sda+(idx-4)*ps, pD+(idx-4)*sdd+j*ps, sdd, pB+(idx-8)*sdb+j*ps, pD+(idx-8)*sdd+j*ps, pA+(idx-8)*sda+(idx-8)*ps, inv_diag_A+(idx-8), 4, n-j);
-			}
-		}
-#endif
-	for( ; i<m; i+=4)
-		{
-		idx = m-i; // position of already done part
-		j = 0;
-		for( ; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nn_lu_inv_4x4_lib4(i, pA+(idx-4)*sda+idx*ps, pD+idx*sdd+j*ps, sdd, pB+(idx-4)*sdb+j*ps, pD+(idx-4)*sdd+j*ps, pA+(idx-4)*sda+(idx-4)*ps, inv_diag_A+(idx-4));
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(i, pA+(idx-4)*sda+idx*ps, pD+idx*sdd+j*ps, sdd, pB+(idx-4)*sdb+j*ps, pD+(idx-4)*sdd+j*ps, pA+(idx-4)*sda+(idx-4)*ps, inv_diag_A+(idx-4), 4, n-j);
-			}
-		}
-
-	// common return
-	return;
-
-	}
-
-
-
-#if 0
-void dlauum_blk_nt_l_lib(int m, int n, int nv, int *rv, int *cv, double *pA, int sda, double *pB, int sdb, int alg, double *pC, int sdc, double *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	// TODO remove
-	double alpha, beta;
-	if(alg==0)
-		{
-		alpha = 1.0;
-		beta = 0.0;
-		}
-	else if(alg==1)
-		{
-		alpha = 1.0;
-		beta = 1.0;
-		}
-	else
-		{
-		alpha = -1.0;
-		beta = 1.0;
-		}
-
-	// TODO remove
-	int k = cv[nv-1];
-
-	const int ps = 4;
-
-	int i, j, l;
-	int ii, iii, jj, kii, kiii, kjj, k0, k1;
-
-	i = 0;
-	ii = 0;
-	iii = 0;
-
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-7; i+=8)
-		{
-
-		while(ii<nv && rv[ii]<i+8)
-			ii++;
-		if(ii<nv)
-			kii = cv[ii];
-		else
-			kii = cv[ii-1];
-
-		j = 0;
-		jj = 0;
-		for(; j<i && j<n-3; j+=4)
-			{
-
-			while(jj<nv && rv[jj]<j+4)
-				jj++;
-			if(jj<nv)
-				kjj = cv[jj];
-			else
-				kjj = cv[jj-1];
-			k0 = kii<kjj ? kii : kjj;
-
-			kernel_dgemm_nt_8x4_lib4(k0, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-
-			while(jj<nv && rv[jj]<j+4)
-				jj++;
-			if(jj<nv)
-				kjj = cv[jj];
-			else
-				kjj = cv[jj-1];
-			k0 = kii<kjj ? kii : kjj;
-
-			if(j<i) // dgemm
-				{
-				kernel_dgemm_nt_8x4_vs_lib4(k0, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, 8, n-j);
-				}
-			else // dsyrk
-				{
-				kernel_dsyrk_nt_l_8x4_vs_lib4(k0, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, 8, n-j);
-				if(j<n-4)
-					{
-					kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], 4, n-j-4); // TODO
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for(; i<m-3; i+=4)
-		{
-
-		while(ii<nv && rv[ii]<i+4)
-			ii++;
-		if(ii<nv)
-			kii = cv[ii];
-		else
-			kii = cv[ii-1];
-//		k0 = kii;
-//		printf("\nii %d %d %d %d %d\n", i, ii, rv[ii], cv[ii], kii);
-
-		j = 0;
-		jj = 0;
-		for(; j<i && j<n-3; j+=4)
-			{
-
-			while(jj<nv && rv[jj]<j+4)
-				jj++;
-			if(jj<nv)
-				kjj = cv[jj];
-			else
-				kjj = cv[jj-1];
-			k0 = kii<kjj ? kii : kjj;
-//			printf("\njj %d %d %d %d %d\n", j, jj, rv[jj], cv[jj], kjj);
-
-			kernel_dgemm_nt_4x4_lib4(k0, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd]);
-			}
-		if(j<n)
-			{
-
-			while(jj<nv && rv[jj]<j+4)
-				jj++;
-			if(jj<nv)
-				kjj = cv[jj];
-			else
-				kjj = cv[jj-1];
-			k0 = kii<kjj ? kii : kjj;
-//			printf("\njj %d %d %d %d %d\n", j, jj, rv[jj], cv[jj], kjj);
-
-			if(i<j) // dgemm
-				{
-				kernel_dgemm_nt_4x4_vs_lib4(k0, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], 4, n-j);
-				}
-			else // dsyrk
-				{
-				kernel_dsyrk_nt_l_4x4_vs_lib4(k0, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], 4, n-j);
-				}
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-
-	kii = cv[nv-1];
-
-	j = 0;
-	jj = 0;
-	for(; j<i && j<n-3; j+=4)
-		{
-
-		while(jj<nv && rv[jj]<j+4)
-			jj++;
-		if(jj<nv)
-			kjj = cv[jj];
-		else
-			kjj = cv[jj-1];
-		k0 = kii<kjj ? kii : kjj;
-
-		kernel_dgemm_nt_8x4_vs_lib4(k0, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		}
-	if(j<n)
-		{
-
-		while(jj<nv && rv[jj]<j+4)
-			jj++;
-		if(jj<nv)
-			kjj = cv[jj];
-		else
-			kjj = cv[jj-1];
-		k0 = kii<kjj ? kii : kjj;
-
-		if(j<i) // dgemm
-			{
-			kernel_dgemm_nt_8x4_vs_lib4(k0, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-			}
-		else // dsyrk
-			{
-			kernel_dsyrk_nt_l_8x4_vs_lib4(k0, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-			if(j<n-4)
-				{
-				kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], m-i-4, n-j-4); // TODO
-				}
-			}
-		}
-	return;
-#endif
-
-	left_4:
-
-	kii = cv[nv-1];
-
-	j = 0;
-	jj = 0;
-	for(; j<i && j<n-3; j+=4)
-		{
-
-		while(jj<nv && rv[jj]<j+4)
-			jj++;
-		if(jj<nv)
-			kjj = cv[jj];
-		else
-			kjj = cv[jj-1];
-		k0 = kii<kjj ? kii : kjj;
-
-		kernel_dgemm_nt_4x4_vs_lib4(k0, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	if(j<n)
-		{
-
-		while(jj<nv && rv[jj]<j+4)
-			jj++;
-		if(jj<nv)
-			kjj = cv[jj];
-		else
-			kjj = cv[jj-1];
-		k0 = kii<kjj ? kii : kjj;
-
-		if(j<i) // dgemm
-			{
-			kernel_dgemm_nt_4x4_vs_lib4(k0, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-			}
-		else // dsyrk
-			{
-			kernel_dsyrk_nt_l_4x4_vs_lib4(k0, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-			}
-		}
-	return;
-
-	}
-#endif
-
-
-
-/****************************
-* new interface
-****************************/
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// dgemm nt
-void dgemm_nt_libstr(int m, int n, int k, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-	
-	const int ps = 4;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	int air = ai & (ps-1);
-	int bir = bi & (ps-1);
-	double *pA = sA->pA + aj*ps + (ai-air)*sda;
-	double *pB = sB->pA + bj*ps + (bi-bir)*sdb;
-	double *pC = sC->pA + cj*ps;
-	double *pD = sD->pA + dj*ps;
-
-	if(ai==0 & bi==0 & ci==0 & di==0)
-		{
-		dgemm_nt_lib(m, n, k, alpha, pA, sda, pB, sdb, beta, pC, sdc, pD, sdd); 
-		return;
-		}
-	
-	int ci0 = ci-air;
-	int di0 = di-air;
-	int offsetC;
-	int offsetD;
-	if(ci0>=0)
-		{
-		pC += ci0/ps*ps*sdd;
-		offsetC = ci0%ps;
-		}
-	else
-		{
-		pC += -4*sdc;
-		offsetC = ps+ci0;
-		}
-	if(di0>=0)
-		{
-		pD += di0/ps*ps*sdd;
-		offsetD = di0%ps;
-		}
-	else
-		{
-		pD += -4*sdd;
-		offsetD = ps+di0;
-		}
-	
-	int i, j, l;
-
-	int idxB;
-
-	// clean up at the beginning
-	if(air!=0)
-		{
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		if(m>5)
-			{
-			j = 0;
-			idxB = 0;
-			// clean up at the beginning
-			if(bir!=0)
-				{
-				kernel_dgemm_nt_8x4_gen_lib4(k, &alpha, &pA[0], sda, &pB[idxB*sdb], &beta, offsetC, &pC[j*ps]-bir*ps, sdc, offsetD, &pD[j*ps]-bir*ps, sdd, air, air+m, bir, n-j);
-				j += ps-bir;
-				idxB += 4;
-				}
-			// main loop
-			for(; j<n; j+=4)
-				{
-				kernel_dgemm_nt_8x4_gen_lib4(k, &alpha, &pA[0], sda, &pB[idxB*sdb], &beta, offsetC, &pC[j*ps], sdc, offsetD, &pD[j*ps], sdd, air, air+m, 0, n-j);
-				idxB += 4;
-				}
-			m -= 2*ps-air;
-			pA += 2*ps*sda;
-			pC += 2*ps*sdc;
-			pD += 2*ps*sdd;
-			}
-		else // m<=4
-			{
-#endif
-			j = 0;
-			idxB = 0;
-			// clean up at the beginning
-			if(bir!=0)
-				{
-				kernel_dgemm_nt_4x4_gen_lib4(k, &alpha, &pA[0], &pB[idxB*sdb], &beta, offsetC, &pC[j*ps]-bir*ps, sdc, offsetD, &pD[j*ps]-bir*ps, sdd, air, air+m, bir, n-j);
-				j += ps-bir;
-				idxB += 4;
-				}
-			// main loop
-			for(; j<n; j+=4)
-				{
-				kernel_dgemm_nt_4x4_gen_lib4(k, &alpha, &pA[0], &pB[idxB*sdb], &beta, offsetC, &pC[j*ps], sdc, offsetD, &pD[j*ps], sdd, air, air+m, 0, n-j);
-				idxB += 4;
-				}
-			m -= ps-air;
-			pA += ps*sda;
-			pC += ps*sdc;
-			pD += ps*sdd;
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-			// nothing more to do
-			return;
-			}
-#endif
-		}
-	i = 0;
-	// main loop
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; i<m-4; i+=8)
-		{
-		j = 0;
-		idxB = 0;
-		// clean up at the beginning
-		if(bir!=0)
-			{
-			kernel_dgemm_nt_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, &pB[idxB*sdb], &beta, offsetC, &pC[j*ps+i*sdc]-bir*ps, sdc, offsetD, &pD[j*ps+i*sdd]-bir*ps, sdd, 0, m-i, bir, n-j);
-			j += ps-bir;
-			idxB += 4;
-			}
-		// main loop
-		for(; j<n; j+=4)
-			{
-			kernel_dgemm_nt_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, &pB[idxB*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-			idxB += 4;
-			}
-		}
-	if(i<m)
-		{
-		j = 0;
-		idxB = 0;
-		// clean up at the beginning
-		if(bir!=0)
-			{
-			kernel_dgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[idxB*sdb], &beta, offsetC, &pC[j*ps+i*sdc]-bir*ps, sdc, offsetD, &pD[j*ps+i*sdd]-bir*ps, sdd, 0, m-i, bir, n-j);
-			j += ps-bir;
-			idxB += 4;
-			}
-		// main loop
-		for(; j<n; j+=4)
-			{
-			kernel_dgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[idxB*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-			idxB += 4;
-			}
-		}
-#else
-	for(; i<m; i+=4)
-		{
-		j = 0;
-		idxB = 0;
-		// clean up at the beginning
-		if(bir!=0)
-			{
-			kernel_dgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[idxB*sdb], &beta, offsetC, &pC[j*ps+i*sdc]-bir*ps, sdc, offsetD, &pD[j*ps+i*sdd]-bir*ps, sdd, 0, m-i, bir, n-j);
-			j += ps-bir;
-			idxB += 4;
-			}
-		// main loop
-		for(; j<n; j+=4)
-			{
-			kernel_dgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[idxB*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-			idxB += 4;
-			}
-		}
-#endif
-
-	return;
-
-	}
-
-
-
-// dgemm nn
-void dgemm_nn_libstr(int m, int n, int k, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int ps = 4;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	int air = ai & (ps-1);
-	int bir = bi & (ps-1);
-	double *pA = sA->pA + aj*ps + (ai-air)*sda;
-	double *pB = sB->pA + bj*ps + (bi-bir)*sdb;
-	double *pC = sC->pA + cj*ps;
-	double *pD = sD->pA + dj*ps;
-
-	int offsetB = bir;
-
-	int ci0 = ci-air;
-	int di0 = di-air;
-	int offsetC;
-	int offsetD;
-	if(ci0>=0)
-		{
-		pC += ci0/ps*ps*sdd;
-		offsetC = ci0%ps;
-		}
-	else
-		{
-		pC += -4*sdc;
-		offsetC = ps+ci0;
-		}
-	if(di0>=0)
-		{
-		pD += di0/ps*ps*sdd;
-		offsetD = di0%ps;
-		}
-	else
-		{
-		pD += -4*sdd;
-		offsetD = ps+di0;
-		}
-	
-	int i, j, l;
-
-	// clean up at the beginning
-	if(air!=0)
-		{
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-		if(m>5)
-			{
-			j = 0;
-			for(; j<n; j+=4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4(k, &alpha, &pA[0], sda, offsetB, &pB[j*ps], sdb, &beta, offsetC, &pC[j*ps], sdc, offsetD, &pD[j*ps], sdd, air, air+m, 0, n-j);
-				}
-			m -= 2*ps-air;
-			pA += 2*ps*sda;
-			pC += 2*ps*sda;
-			pD += 2*ps*sda;
-			}
-		else // m-i<=4
-			{
-#endif
-			j = 0;
-			for(; j<n; j+=4)
-				{
-				kernel_dgemm_nn_4x4_gen_lib4(k, &alpha, &pA[0], offsetB, &pB[j*ps], sdb, &beta, offsetC, &pC[j*ps], sdc, offsetD, &pD[j*ps], sdd, air, air+m, 0, n-j);
-				}
-			m -= 2*ps-air;
-			pA += 2*ps*sda;
-			pC += 2*ps*sda;
-			pD += 2*ps*sda;
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-			// nothing more to do
-			return;
-			}
-#endif
-		}
-	// main loop
-	i = 0;
-	if(offsetC==0 & offsetD==0)
-		{
-#if defined(TARGET_X64_INTEL_HASWELL)
-		for(; i<m-11; i+=12)
-			{
-			j = 0;
-			for(; j<n-3; j+=4)
-				{
-				kernel_dgemm_nn_12x4_lib4(k, &alpha, &pA[i*sda], sda, offsetB, &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-				}
-			if(j<n)
-				{
-//				kernel_dgemm_nn_12x4_gen_lib4(k, &alpha, &pA[i*sda], sda, offsetB, &pB[j*ps], sdb, &beta, 0, &pC[j*ps+i*sdc], sdc, 0, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-				kernel_dgemm_nn_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, offsetB, &pB[j*ps], sdb, &beta, 0, &pC[j*ps+i*sdc], sdc, 0, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-				kernel_dgemm_nn_4x4_gen_lib4(k, &alpha, &pA[(i+8)*sda], offsetB, &pB[j*ps], sdb, &beta, 0, &pC[j*ps+(i+8)*sdc], sdc, 0, &pD[j*ps+(i+8)*sdd], sdd, 0, m-(i+8), 0, n-j);
-				}
-			}
-		if(m>i)
-			{
-			if(m-i<=4)
-				{
-				goto left_4;
-				}
-			else if(m-i<=8)
-				{
-				goto left_8;
-				}
-			else
-				{
-				goto left_12;
-				}
-			}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for(; i<m-7; i+=8)
-			{
-			j = 0;
-			for(; j<n-3; j+=4)
-				{
-				kernel_dgemm_nn_8x4_lib4(k, &alpha, &pA[i*sda], sda, offsetB, &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-				}
-			if(j<n)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, offsetB, &pB[j*ps], sdb, &beta, 0, &pC[j*ps+i*sdc], sdc, 0, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-				}
-			}
-		if(m>i)
-			{
-			if(m-i<=4)
-				{
-				goto left_4;
-				}
-			else
-				{
-				goto left_8;
-				}
-			}
-#else
-		for(; i<m-3; i+=4)
-			{
-			j = 0;
-			for(; j<n-3; j+=4)
-				{
-				kernel_dgemm_nn_4x4_lib4(k, &alpha, &pA[i*sda], offsetB, &pB[j*ps], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd]);
-				}
-			if(j<n)
-				{
-				kernel_dgemm_nn_4x4_gen_lib4(k, &alpha, &pA[i*sda], offsetB, &pB[j*ps], sdb, &beta, 0, &pC[j*ps+i*sdc], sdc, 0, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-				}
-			}
-		if(m>i)
-			{
-			goto left_4;
-			}
-#endif
-		}
-	else
-		{
-// TODO 12x4
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for(; i<m-4; i+=8)
-			{
-			j = 0;
-			for(; j<n; j+=4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, offsetB, &pB[j*ps], sdb, &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-				}
-			}
-		if(m>i)
-			{
-			goto left_4;
-			}
-#else
-		for(; i<m; i+=4)
-			{
-			j = 0;
-			for(; j<n; j+=4)
-				{
-				kernel_dgemm_nn_4x4_gen_lib4(k, &alpha, &pA[i*sda], offsetB, &pB[j*ps], sdb, &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-				}
-			}
-#endif
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-//		kernel_dgemm_nn_12x4_gen_lib4(k, &alpha, &pA[i*sda], sda, offsetB, &pB[j*sdb], sdb, &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-		kernel_dgemm_nn_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, offsetB, &pB[j*sdb], sdb, &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-		kernel_dgemm_nn_4x4_gen_lib4(k, &alpha, &pA[(i+8)*sda], offsetB, &pB[j*sdb], sdb, &beta, offsetC, &pC[j*ps+(i+8)*sdc], sdc, offsetD, &pD[j*ps+(i+8)*sdd], sdd, 0, m-(i+8), 0, n-j);
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dgemm_nn_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, offsetB, &pB[j*ps], sdb, &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-		}
-	return;
-#endif
-
-	left_4:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dgemm_nn_4x4_gen_lib4(k, &alpha, &pA[i*sda], offsetB, &pB[j*ps], sdb, &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-		}
-	return;
-
-	return;
-	}
-	
-
-
-// dtrsm_nn_llu
-void dtrsm_llnu_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\ndtrsm_llnu_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-	const int ps = 4;
-	// TODO alpha
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	double *pA = sA->pA + aj*ps;
-	double *pB = sB->pA + bj*ps;
-	double *pD = sD->pA + dj*ps;
-	dtrsm_nn_ll_one_lib(m, n, pA, sda, pB, sdb, pD, sdd); 
-	return;
-	}
-
-
-
-// dtrsm_nn_lun
-void dtrsm_lunn_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\ndtrsm_lunn_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-	const int ps = 4;
-	// TODO alpha
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	double *pA = sA->pA + aj*ps;
-	double *pB = sB->pA + bj*ps;
-	double *pD = sD->pA + dj*ps;
-	double *dA = sA->dA;
-	int ii;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			ddiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		ddiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-	dtrsm_nn_lu_inv_lib(m, n, pA, sda, dA, pB, sdb, pD, sdd); 
-	return;
-	}
-
-
-
-// dtrsm_right_lower_transposed_notunit
-void dtrsm_rltn_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\ndtrsm_rltn_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-
-	const int ps = 4;
-
-	// TODO alpha !!!!!
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	double *pA = sA->pA + aj*ps;
-	double *pB = sB->pA + bj*ps;
-	double *pD = sD->pA + dj*ps;
-	double *dA = sA->dA;
-
-	int i, j;
-
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			ddiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(i=0; i<n; i++)
-				dA[i] = 1.0 / dA[i];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		ddiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(i=0; i<n; i++)
-			dA[i] = 1.0 / dA[i];
-		sA->use_dA = 0;
-		}
-
-//	dtrsm_nt_rl_inv_lib(m, n, pA, sda, dA, pB, sdb, pD, sdd); 
-	i = 0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-11; i+=12)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nt_rl_inv_12x4_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], &dA[j]);
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nt_rl_inv_12x4_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], &dA[j], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_12;
-			}
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nt_rl_inv_8x4_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], &dA[j]);
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], &dA[j], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_dtrsm_nt_rl_inv_4x4_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*ps+i*sdb], &pD[j*ps+i*sdd], &pA[j*ps+j*sda], &dA[j]);
-			}
-		if(j<n)
-			{
-			kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*ps+i*sdb], &pD[j*ps+i*sdd], &pA[j*ps+j*sda], &dA[j], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dtrsm_nt_rl_inv_12x4_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], &dA[j], m-i, n-j);
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for(; j<n-8; j+=12)
-		{
-		kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], sda, &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], sda, &dA[j], m-i, n-j);
-		kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4((j+4), &pD[i*sdd], sdd, &pA[(j+4)*sda], sda, &pB[(j+4)*ps+i*sdb], sdb, &pD[(j+4)*ps+i*sdd], sdd, &pA[(j+4)*ps+(j+4)*sda], sda, &dA[(j+4)], m-i, n-(j+4));
-		}
-	if(j<n-4)
-		{
-		kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], sda, &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], sda, &dA[j], m-i, n-j);
-		kernel_dtrsm_nt_rl_inv_4x4_vs_lib4((j+4), &pD[i*sdd], &pA[(j+4)*sda], &pB[(j+4)*ps+i*sdb], &pD[(j+4)*ps+i*sdd], &pA[(j+4)*ps+(j+4)*sda], &dA[(j+4)], m-i, n-(j+4));
-		j += 8;
-		}
-	else if(j<n)
-		{
-		kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], &dA[j], m-i, n-j);
-		j += 4;
-		}
-	return;
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_8:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pA[j*sda], &pB[j*ps+i*sdb], sdb, &pD[j*ps+i*sdd], sdd, &pA[j*ps+j*sda], &dA[j], m-i, n-j);
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_4:
-	j = 0;
-	for(; j<n-8; j+=12)
-		{
-		kernel_dtrsm_nt_rl_inv_4x12_vs_lib4(j, &pD[i*sdd], &pA[j*sda], sda, &pB[j*ps+i*sdb], &pD[j*ps+i*sdd], &pA[j*ps+j*sda], sda, &dA[j], m-i, n-j);
-		}
-	if(j<n-4)
-		{
-		kernel_dtrsm_nt_rl_inv_4x8_vs_lib4(j, &pD[i*sdd], &pA[j*sda], sda, &pB[j*ps+i*sdb], &pD[j*ps+i*sdd], &pA[j*ps+j*sda], sda, &dA[j], m-i, n-j);
-		j += 8;
-		}
-	else if(j<n)
-		{
-		kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*ps+i*sdb], &pD[j*ps+i*sdd], &pA[j*ps+j*sda], &dA[j], m-i, n-j);
-		j += 4;
-		}
-	return;
-#else
-	left_4:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*ps+i*sdb], &pD[j*ps+i*sdd], &pA[j*ps+j*sda], &dA[j], m-i, n-j);
-		}
-	return;
-#endif
-
-	}
-
-
-
-// dtrsm_right_lower_transposed_unit
-void dtrsm_rltu_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\ndtrsm_rltu_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-	const int ps = 4;
-	// TODO alpha
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	double *pA = sA->pA + aj*ps;
-	double *pB = sB->pA + bj*ps;
-	double *pD = sD->pA + dj*ps;
-	dtrsm_nt_rl_one_lib(m, n, pA, sda, pB, sdb, pD, sdd); 
-	return;
-	}
-
-
-
-// dtrsm_right_upper_transposed_notunit
-void dtrsm_rutn_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\ndtrsm_rutn_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-	const int ps = 4;
-	// TODO alpha
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	double *pA = sA->pA + aj*ps;
-	double *pB = sB->pA + bj*ps;
-	double *pD = sD->pA + dj*ps;
-	double *dA = sA->dA;
-	int ii;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			ddiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		ddiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-	dtrsm_nt_ru_inv_lib(m, n, pA, sda, dA, pB, sdb, pD, sdd); 
-	return;
-	}
-
-
-
-// dtrmm_right_upper_transposed_notunit (B, i.e. the first matrix, is triangular !!!)
-void dtrmm_rutn_libstr(int m, int n, double alpha, struct d_strmat *sB, int bi, int bj, struct d_strmat *sA, int ai, int aj, struct d_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | di!=0)
-		{
-		printf("\ndtrmm_rutn_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d\n", ai, bi, di);
-		exit(1);
-		}
-	const int ps = 4;
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	double *pA = sA->pA + aj*ps;
-	double *pB = sB->pA + bj*ps;
-	double *pD = sD->pA + dj*ps;
-	dtrmm_nt_ru_lib(m, n, alpha, pA, sda, pB, sdb, 0.0, pD, sdd, pD, sdd); 
-	return;
-	}
-
-
-
-// dtrmm_right_lower_nottransposed_notunit (B, i.e. the first matrix, is triangular !!!)
-void dtrmm_rlnn_libstr(int m, int n, double alpha, struct d_strmat *sB, int bi, int bj, struct d_strmat *sA, int ai, int aj, struct d_strmat *sD, int di, int dj)
-	{
-
-	const int ps = 4;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	int air = ai & (ps-1);
-	int bir = bi & (ps-1);
-	double *pA = sA->pA + aj*ps + (ai-air)*sda;
-	double *pB = sB->pA + bj*ps + (bi-bir)*sdb;
-	double *pD = sD->pA + dj*ps;
-
-	int offsetB = bir;
-
-	int di0 = di-air;
-	int offsetD;
-	if(di0>=0)
-		{
-		pD += di0/ps*ps*sdd;
-		offsetD = di0%ps;
-		}
-	else
-		{
-		pD += -4*sdd;
-		offsetD = ps+di0;
-		}
-	
-	int ii, jj;
-
-	if(air!=0)
-		{
-		jj = 0;
-		for(; jj<n; jj+=4)
-			{
-			kernel_dtrmm_nn_rl_4x4_gen_lib4(n-jj, &alpha, &pA[jj*ps], offsetB, &pB[jj*sdb+jj*ps], sdb, offsetD, &pD[jj*ps], sdd, air, air+m, 0, n-jj);
-			}
-		m -= ps-air;
-		pA += ps*sda;
-		pD += ps*sdd;
-		}
-	ii = 0;
-	if(offsetD==0)
-		{
-#if defined(TARGET_X64_INTEL_HASWELL)
-		for(; ii<m-11; ii+=12)
-			{
-			jj = 0;
-			for(; jj<n-5; jj+=4)
-				{
-				kernel_dtrmm_nn_rl_12x4_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], sda, offsetB, &pB[jj*sdb+jj*ps], sdb, &pD[ii*sdd+jj*ps], sdd); // n-j>=6 !!!!!
-				}
-			for(; jj<n; jj+=4)
-				{
-				kernel_dtrmm_nn_rl_12x4_vs_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], sda, offsetB, &pB[jj*sdb+jj*ps], sdb, &pD[ii*sdd+jj*ps], sdd, 12, n-jj);
-//				kernel_dtrmm_nn_rl_8x4_vs_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], sda, offsetB, &pB[jj*sdb+jj*ps], sdb, &pD[ii*sdd+jj*ps], sdd, 8, n-jj);
-//				kernel_dtrmm_nn_rl_4x4_gen_lib4(n-jj, &alpha, &pA[(ii+8)*sda+jj*ps], offsetB, &pB[jj*sdb+jj*ps], sdb, 0, &pD[(ii+8)*sdd+jj*ps], sdd, 0, 4, 0, n-jj);
-				}
-			}
-		if(ii<m)
-			{
-			if(ii<m-8)
-				goto left_12;
-			else if(ii<m-4)
-				goto left_8;
-			else
-				goto left_4_gen;
-			}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for(; ii<m-7; ii+=8)
-			{
-			jj = 0;
-			for(; jj<n-5; jj+=4)
-				{
-				kernel_dtrmm_nn_rl_8x4_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], sda, offsetB, &pB[jj*sdb+jj*ps], sdb, &pD[ii*sdd+jj*ps], sdd);
-				}
-			for(; jj<n; jj+=4)
-				{
-				kernel_dtrmm_nn_rl_8x4_gen_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], sda, offsetB, &pB[jj*sdb+jj*ps], sdb, 0, &pD[ii*sdd+jj*ps], sdd, 0, 8, 0, n-jj);
-				}
-			}
-		if(ii<m)
-			{
-			if(ii<m-4)
-				goto left_8_gen;
-			else
-				goto left_4_gen;
-			}
-#else
-		for(; ii<m-3; ii+=4)
-			{
-			jj = 0;
-			for(; jj<n-5; jj+=4)
-				{
-				kernel_dtrmm_nn_rl_4x4_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], offsetB, &pB[jj*sdb+jj*ps], sdb, &pD[ii*sdd+jj*ps]);
-				}
-			for(; jj<n; jj+=4)
-				{
-				kernel_dtrmm_nn_rl_4x4_gen_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], offsetB, &pB[jj*sdb+jj*ps], sdb, 0, &pD[ii*sdd+jj*ps], sdd, 0, 4, 0, n-jj);
-				}
-			}
-		if(ii<m)
-			{
-			goto left_4_gen;
-			}
-#endif
-		}
-	else
-		{
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for(; ii<m-4; ii+=8)
-			{
-			jj = 0;
-			for(; jj<n; jj+=4)
-				{
-				kernel_dtrmm_nn_rl_8x4_gen_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], sda, offsetB, &pB[jj*sdb+jj*ps], sdb, offsetD, &pD[ii*sdd+jj*ps], sdd, 0, m-ii, 0, n-jj);
-				}
-			}
-		if(ii<m)
-			{
-			goto left_4_gen;
-			}
-#else
-		for(; ii<m; ii+=4)
-			{
-			jj = 0;
-			for(; jj<n; jj+=4)
-				{
-				kernel_dtrmm_nn_rl_4x4_gen_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], offsetB, &pB[jj*sdb+jj*ps], sdb, offsetD, &pD[ii*sdd+jj*ps], sdd, 0, m-ii, 0, n-jj);
-				}
-			}
-#endif
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	jj = 0;
-	for(; jj<n; jj+=4)
-		{
-		kernel_dtrmm_nn_rl_12x4_vs_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], sda, offsetB, &pB[jj*sdb+jj*ps], sdb, &pD[ii*sdd+jj*ps], sdd, m-ii, n-jj);
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	jj = 0;
-	for(; jj<n; jj+=4)
-		{
-		kernel_dtrmm_nn_rl_8x4_vs_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], sda, offsetB, &pB[jj*sdb+jj*ps], sdb, &pD[ii*sdd+jj*ps], sdd, m-ii, n-jj);
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_8_gen:
-	jj = 0;
-	for(; jj<n; jj+=4)
-		{
-		kernel_dtrmm_nn_rl_8x4_gen_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], sda, offsetB, &pB[jj*sdb+jj*ps], sdb, offsetD, &pD[ii*sdd+jj*ps], sdd, 0, m-ii, 0, n-jj);
-		}
-	return;
-#endif
-
-	left_4_gen:
-	jj = 0;
-	for(; jj<n; jj+=4)
-		{
-		kernel_dtrmm_nn_rl_4x4_gen_lib4(n-jj, &alpha, &pA[ii*sda+jj*ps], offsetB, &pB[jj*sdb+jj*ps], sdb, offsetD, &pD[ii*sdd+jj*ps], sdd, 0, m-ii, 0, n-jj);
-		}
-	return;
-
-	}
-
-
-
-void dsyrk_ln_libstr(int m, int k, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj)
-	{
-	
-	if(m<=0)
-		return;
-
-	if(ai!=0 | bi!=0)
-		{
-		printf("\ndsyrk_ln_libstr: feature not implemented yet: ai=%d, bi=%d\n", ai, bi);
-		exit(1);
-		}
-
-	const int ps = 4;
-
-	int i, j;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *pA = sA->pA + aj*ps;
-	double *pB = sB->pA + bj*ps;
-	double *pC = sC->pA + cj*ps + (ci-(ci&(ps-1)))*sdc;
-	double *pD = sD->pA + dj*ps + (di-(di&(ps-1)))*sdd;
-
-	// TODO ai and bi
-	int offsetC;
-	int offsetD;
-	offsetC = ci&(ps-1);
-	offsetD = di&(ps-1);
-
-	// main loop
-	i = 0;
-	if(offsetC==0 & offsetD==0)
-		{
-#if defined(TARGET_X64_INTEL_HASWELL)
-		for(; i<m-11; i+=12)
-			{
-			j = 0;
-			for(; j<i; j+=4)
-				{
-				kernel_dgemm_nt_12x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-				}
-			kernel_dsyrk_nt_l_12x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-			kernel_dsyrk_nt_l_8x8_lib4(k, &alpha, &pA[(i+4)*sda], sda, &pB[(j+4)*sdb], sdb, &beta, &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd);
-			}
-		if(m>i)
-			{
-			if(m-i<=4)
-				{
-				goto left_4;
-				}
-			else if(m-i<=8)
-				{
-				goto left_8;
-				}
-			else
-				{
-				goto left_12;
-				}
-			}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for(; i<m-7; i+=8)
-			{
-			j = 0;
-			for(; j<i; j+=4)
-				{
-				kernel_dgemm_nt_8x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-				}
-			kernel_dsyrk_nt_l_8x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-			kernel_dsyrk_nt_l_4x4_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd]);
-			}
-		if(m>i)
-			{
-			if(m-i<=4)
-				{
-				goto left_4;
-				}
-			else
-				{
-				goto left_8;
-				}
-			}
-#else
-		for(; i<m-3; i+=4)
-			{
-			j = 0;
-			for(; j<i; j+=4)
-				{
-				kernel_dgemm_nt_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd]);
-				}
-			kernel_dsyrk_nt_l_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd]);
-			}
-		if(m>i)
-			{
-			goto left_4;
-			}
-#endif
-		}
-	else
-		{
-#if defined(TARGET_X64_INTEL_HASWELL) | defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for(; i<m-4; i+=8)
-			{
-			j = 0;
-			for(; j<i; j+=4)
-				{
-				kernel_dgemm_nt_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, m-j);
-				}
-			kernel_dsyrk_nt_l_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, m-j);
-			kernel_dsyrk_nt_l_4x4_gen_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, offsetC, &pC[(j+4)*ps+(i+4)*sdc], sdc, offsetD, &pD[(j+4)*ps+(i+4)*sdd], sdd, 0, m-i-4, 0, m-j-4);
-			}
-		if(m>i)
-			{
-			goto left_4_gen;
-			}
-#else
-		for(; i<m; i+=4)
-			{
-			j = 0;
-			for(; j<i; j+=4)
-				{
-				kernel_dgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, m-j);
-				}
-			kernel_dsyrk_nt_l_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, m-j);
-			}
-#endif
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	for(; j<i; j+=4)
-		{
-		kernel_dgemm_nt_12x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, m-j);
-		}
-	kernel_dsyrk_nt_l_12x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, m-j);
-	kernel_dsyrk_nt_l_8x8_vs_lib4(k, &alpha, &pA[(i+4)*sda], sda, &pB[(j+4)*sdb], sdb, &beta, &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, m-i-4, m-j-4);
-//	kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[(i+8)*sda], &pB[(j+8)*sdb], &beta, &pC[(j+8)*ps+(i+8)*sdc], &pD[(j+8)*ps+(i+8)*sdd], m-i-8, n-j-8);
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for(; j<i-8; j+=12)
-		{
-		kernel_dgemm_nt_8x8l_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, m-j);
-		kernel_dgemm_nt_8x8u_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[(j+4)*sdb], sdb, &beta, &pC[(j+4)*ps+i*sdc], sdc, &pD[(j+4)*ps+i*sdd], sdd, m-i, m-(j+4));
-		}
-	if(j<i-4)
-		{
-		kernel_dgemm_nt_8x8l_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, m-j);
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+i*sdc], &pD[(j+4)*ps+i*sdd], m-i, m-(j+4));
-		j += 8;
-		}
-	else if(j<i)
-		{
-		kernel_dgemm_nt_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, m-j);
-		j += 4;
-		}
-	kernel_dsyrk_nt_l_8x8_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, m-j);
-//	kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], m-i-4, n-j-4);
-	return;
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_8:
-	j = 0;
-	for(; j<i; j+=4)
-		{
-		kernel_dgemm_nt_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, m-j);
-		}
-	kernel_dsyrk_nt_l_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, m-j);
-	kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], m-i-4, m-j-4);
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_4:
-	j = 0;
-	for(; j<i-8; j+=12)
-		{
-		kernel_dgemm_nt_4x12_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, m-j);
-		}
-	if(j<i-4)
-		{
-		kernel_dgemm_nt_4x8_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, m-j);
-		j += 8;
-		}
-	else if(j<i)
-		{
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, m-j);
-		j += 4;
-		}
-	kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, m-j);
-	return;
-#else
-	left_4:
-	j = 0;
-	for(; j<i; j+=4)
-		{
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, m-j);
-		}
-	kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, m-j);
-	return;
-#endif
-
-	left_4_gen:
-	j = 0;
-	for(; j<i; j+=4)
-		{
-		kernel_dgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, m-j);
-		}
-	kernel_dsyrk_nt_l_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, m-j);
-	return;
-
-	}
-
-
-
-void dsyrk_ln_mn_libstr(int m, int n, int k, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj)
-	{
-	
-	if(m<=0 | n<=0)
-		return;
-
-	if(ai!=0 | bi!=0)
-		{
-		printf("\ndsyrk_ln_libstr: feature not implemented yet: ai=%d, bi=%d\n", ai, bi);
-		exit(1);
-		}
-
-	const int ps = 4;
-
-	int i, j;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *pA = sA->pA + aj*ps;
-	double *pB = sB->pA + bj*ps;
-	double *pC = sC->pA + cj*ps + (ci-(ci&(ps-1)))*sdc;
-	double *pD = sD->pA + dj*ps + (di-(di&(ps-1)))*sdd;
-
-	// TODO ai and bi
-	int offsetC;
-	int offsetD;
-	offsetC = ci&(ps-1);
-	offsetD = di&(ps-1);
-
-	// main loop
-	i = 0;
-	if(offsetC==0 & offsetD==0)
-		{
-#if defined(TARGET_X64_INTEL_HASWELL)
-		for(; i<m-11; i+=12)
-			{
-			j = 0;
-			for(; j<i & j<n-3; j+=4)
-				{
-				kernel_dgemm_nt_12x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-				}
-			if(j<n)
-				{
-				if(j<i) // dgemm
-					{
-					kernel_dgemm_nt_12x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-					}
-				else // dsyrk
-					{
-					if(j<n-11)
-						{
-						kernel_dsyrk_nt_l_12x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-						kernel_dsyrk_nt_l_8x8_lib4(k, &alpha, &pA[(i+4)*sda], sda, &pB[(j+4)*sdb], sdb, &beta, &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd);
-						}
-					else
-						{
-						kernel_dsyrk_nt_l_12x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-						if(j<n-4)
-							{
-							kernel_dsyrk_nt_l_8x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], sda, &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, m-i-4, n-j-4);
-							if(j<n-8)
-								{
-								kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[(i+8)*sda], &pB[(j+8)*sdb], &beta, &pC[(j+8)*ps+(i+8)*sdc], &pD[(j+8)*ps+(i+8)*sdd], m-i-8, n-j-8);
-								}
-							}
-						}
-					}
-				}
-			}
-		if(m>i)
-			{
-			if(m-i<=4)
-				{
-				goto left_4;
-				}
-			else if(m-i<=8)
-				{
-				goto left_8;
-				}
-			else
-				{
-				goto left_12;
-				}
-			}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for(; i<m-7; i+=8)
-			{
-			j = 0;
-			for(; j<i & j<n-3; j+=4)
-				{
-				kernel_dgemm_nt_8x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-				}
-			if(j<n)
-				{
-				if(j<i) // dgemm
-					{
-					kernel_dgemm_nt_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-					}
-				else // dsyrk
-					{
-					if(j<n-7)
-						{
-						kernel_dsyrk_nt_l_8x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd);
-						kernel_dsyrk_nt_l_4x4_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd]);
-						}
-					else
-						{
-						kernel_dsyrk_nt_l_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-						if(j<n-4)
-							{
-							kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], m-i-4, n-j-4);
-							}
-						}
-					}
-				}
-			}
-		if(m>i)
-			{
-			if(m-i<=4)
-				{
-				goto left_4;
-				}
-			else
-				{
-				goto left_8;
-				}
-			}
-#else
-		for(; i<m-3; i+=4)
-			{
-			j = 0;
-			for(; j<i & j<n-3; j+=4)
-				{
-				kernel_dgemm_nt_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd]);
-				}
-			if(j<n)
-				{
-				if(i<j) // dgemm
-					{
-					kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-					}
-				else // dsyrk
-					{
-					if(j<n-3)
-						{
-						kernel_dsyrk_nt_l_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd]);
-						}
-					else
-						{
-						kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-						}
-					}
-				}
-			}
-		if(m>i)
-			{
-			goto left_4;
-			}
-#endif
-		}
-	else
-		{
-#if defined(TARGET_X64_INTEL_HASWELL) | defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for(; i<m-4; i+=8)
-			{
-			j = 0;
-			for(; j<i & j<n; j+=4)
-				{
-				kernel_dgemm_nt_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-				}
-			if(j<n)
-				{
-				kernel_dsyrk_nt_l_8x4_gen_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-				if(j<n-4)
-					{
-					kernel_dsyrk_nt_l_4x4_gen_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, offsetC, &pC[(j+4)*ps+(i+4)*sdc], sdc, offsetD, &pD[(j+4)*ps+(i+4)*sdd], sdd, 0, m-i-4, 0, n-j-4);
-					}
-				}
-			}
-		if(m>i)
-			{
-			goto left_4_gen;
-			}
-#else
-		for(; i<m; i+=4)
-			{
-			j = 0;
-			for(; j<i & j<n; j+=4)
-				{
-				kernel_dgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-				}
-			if(j<n)
-				{
-				kernel_dsyrk_nt_l_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-				}
-			}
-#endif
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	for(; j<i & j<n; j+=4)
-		{
-		kernel_dgemm_nt_12x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_nt_l_12x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		if(j<n-4)
-			{
-			kernel_dsyrk_nt_l_8x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], sda, &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, m-i-4, n-j-4);
-			if(j<n-8)
-				{
-				kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[(i+8)*sda], &pB[(j+8)*sdb], &beta, &pC[(j+8)*ps+(i+8)*sdc], &pD[(j+8)*ps+(i+8)*sdd], m-i-8, n-j-8);
-				}
-			}
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for(; j<i-8 & j<n-8; j+=12)
-		{
-		kernel_dgemm_nt_8x8l_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		kernel_dgemm_nt_8x8u_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[(j+4)*sdb], sdb, &beta, &pC[(j+4)*ps+i*sdc], sdc, &pD[(j+4)*ps+i*sdd], sdd, m-i, n-(j+4));
-		}
-	if(j<i-4 & j<n-4)
-		{
-		kernel_dgemm_nt_8x8l_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+i*sdc], &pD[(j+4)*ps+i*sdd], m-i, n-(j+4));
-		j += 8;
-		}
-	if(j<i & j<n)
-		{
-		kernel_dgemm_nt_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		j += 4;
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_nt_l_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		if(j<n-4)
-			{
-			kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], m-i-4, n-j-4);
-			}
-		}
-	return;
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_8:
-	j = 0;
-	for(; j<i & j<n; j+=4)
-		{
-		kernel_dgemm_nt_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_nt_l_8x4_vs_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, m-i, n-j);
-		if(j<n-4)
-			{
-			kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[(j+4)*sdb], &beta, &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], m-i-4, n-j-4);
-			}
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_4:
-	j = 0;
-	for(; j<i-8 & j<n-8; j+=12)
-		{
-		kernel_dgemm_nt_4x12_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	if(j<i-4 & j<n-4)
-		{
-		kernel_dgemm_nt_4x8_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], sdb, &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		j += 8;
-		}
-	else if(j<i & j<n)
-		{
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		j += 4;
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	return;
-#else
-	left_4:
-	j = 0;
-	for(; j<i & j<n; j+=4)
-		{
-		kernel_dgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], m-i, n-j);
-		}
-	return;
-#endif
-
-	left_4_gen:
-	j = 0;
-	for(; j<i & j<n; j+=4)
-		{
-		kernel_dgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_nt_l_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, offsetC, &pC[j*ps+i*sdc], sdc, offsetD, &pD[j*ps+i*sdd], sdd, 0, m-i, 0, n-j);
-		}
-	return;
-
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
diff --git a/third_party/blasfeo/blas/d_blas_64.h b/third_party/blasfeo/blas/d_blas_64.h
deleted file mode 100644
index 8e6aba2..0000000
--- a/third_party/blasfeo/blas/d_blas_64.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-// headers to reference BLAS and LAPACK routines employed in BLASFEO WR
-
-// level 1
-void dcopy_(long long *m, double *x, long long *incx, double *y, long long *incy);
-void daxpy_(long long *m, double *alpha, double *x, long long *incx, double *y, long long *incy);
-void dscal_(long long *m, double *alpha, double *x, long long *incx);
-
-// level 2
-void dgemv_(char *ta, long long *m, long long *n, double *alpha, double *A, long long *lda, double *x, long long *incx, double *beta, double *y, long long *incy);
-void dsymv_(char *uplo, long long *m, double *alpha, double *A, long long *lda, double *x, long long *incx, double *beta, double *y, long long *incy);
-void dtrmv_(char *uplo, char *trans, char *diag, long long *n, double *A, long long *lda, double *x, long long *incx);
-void dtrsv_(char *uplo, char *trans, char *diag, long long *n, double *A, long long *lda, double *x, long long *incx);
-void dger_(long long *m, long long *n, double *alpha, double *x, long long *incx, double *y, long long *incy, double *A, long long *lda);
-
-// level 3
-void dgemm_(char *ta, char *tb, long long *m, long long *n, long long *k, double *alpha, double *A, long long *lda, double *B, long long *ldb, double *beta, double *C, long long *ldc);
-void dsyrk_(char *uplo, char *trans, long long *n, long long *k, double *alpha, double *A, long long *lda, double *beta, double *C, long long *ldc);
-void dtrmm_(char *side, char *uplo, char *trans, char *diag, long long *m, long long *n, double *alpha, double *A, long long *lda, double *B, long long *ldb);
-void dtrsm_(char *side, char *uplo, char *trans, char *diag, long long *m, long long *n, double *alpha, double *A, long long *lda, double *B, long long *ldb);
-
-// lapack
-long long dpotrf_(char *uplo, long long *m, double *A, long long *lda, long long *info);
-long long dgetrf_(long long *m, long long *n, double *A, long long *lda, long long *ipiv, long long *info);
-void dgeqrf_(long long *m, long long *n, double *A, long long *lda, double *tau, double *work, long long *lwork, long long *info);
-void dgeqr2_(long long *m, long long *n, double *A, long long *lda, double *tau, double *work, long long *info);
-
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/blas/d_lapack_lib.c b/third_party/blasfeo/blas/d_lapack_lib.c
deleted file mode 100644
index ce68c3d..0000000
--- a/third_party/blasfeo/blas/d_lapack_lib.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#if defined(LA_BLAS)
-#if defined(REF_BLAS_BLIS)
-#include "d_blas_64.h"
-#else
-#include "d_blas.h"
-#endif
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_aux.h"
-
-
-
-#define REAL double
-
-#define STRMAT d_strmat
-#define STRVEC d_strvec
-
-#define GELQF_LIBSTR dgelqf_libstr
-#define GELQF_WORK_SIZE_LIBSTR dgelqf_work_size_libstr
-#define GEQRF_LIBSTR dgeqrf_libstr
-#define GEQRF_WORK_SIZE_LIBSTR dgeqrf_work_size_libstr
-#define GETF2_NOPIVOT dgetf2_nopivot
-#define GETRF_NOPIVOT_LIBSTR dgetrf_nopivot_libstr
-#define GETRF_LIBSTR dgetrf_libstr
-#define POTRF_L_LIBSTR dpotrf_l_libstr
-#define POTRF_L_MN_LIBSTR dpotrf_l_mn_libstr
-#define SYRK_POTRF_LN_LIBSTR dsyrk_dpotrf_ln_libstr
-
-#define COPY dcopy_
-#define GELQF dgelqf_
-#define GEMM dgemm_
-#define GER dger_
-#define GEQRF dgeqrf_
-#define GEQR2 dgeqr2_
-#define GETRF dgetrf_
-#define POTRF dpotrf_
-#define SCAL dscal_
-#define SYRK dsyrk_
-#define TRSM dtrsm_
-
-
-#include "x_lapack_lib.c"
diff --git a/third_party/blasfeo/blas/d_lapack_lib4.c b/third_party/blasfeo/blas/d_lapack_lib4.c
deleted file mode 100644
index 75a4a4f..0000000
--- a/third_party/blasfeo/blas/d_lapack_lib4.c
+++ /dev/null
@@ -1,2671 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_aux.h"
-#include "../include/blasfeo_d_kernel.h"
-
-
-
-/****************************
-* old interface
-****************************/
-
-
-
-void dgetrf_nn_nopivot_lib(int m, int n, double *pC, int sdc, double *pD, int sdd, double *inv_diag_D)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int ps = 4;
-
-	int ii, jj, ie;
-
-	// main loop
-	ii = 0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for( ; ii<m-11; ii+=12)
-		{
-		jj = 0;
-		// solve lower
-		ie = n<ii ? n : ii; // ie is multiple of 4
-		for( ; jj<ie-3; jj+=4)
-			{
-			kernel_dtrsm_nn_ru_inv_12x4_lib4(jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+jj*sdd], &inv_diag_D[jj]);
-			}
-		if(jj<ie)
-			{
-			kernel_dtrsm_nn_ru_inv_12x4_vs_lib4(jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+jj*sdd], &inv_diag_D[jj], m-ii, ie-jj);
-			jj+=4;
-			}
-		// factorize
-		if(jj<n-3)
-			{
-			kernel_dgetrf_nn_l_12x4_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj]);
-			jj+=4;
-			}
-		else if(jj<n)
-			{
-			kernel_dgetrf_nn_l_12x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj], m-ii, n-jj);
-			jj+=4;
-			}
-		if(jj<n-3)
-			{
-			kernel_dgetrf_nn_m_12x4_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj]);
-			jj+=4;
-			}
-		else if(jj<n)
-			{
-			kernel_dgetrf_nn_m_12x4_vs_lib4(jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj], m-ii, n-jj);
-			jj+=4;
-			}
-		if(jj<n-3)
-			{
-			kernel_dgetrf_nn_r_12x4_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj]);
-			jj+=4;
-			}
-		else if(jj<n)
-			{
-			kernel_dgetrf_nn_r_12x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj], m-ii, n-jj);
-			jj+=4;
-			}
-		// solve upper
-		for( ; jj<n-3; jj+=4)
-			{
-			kernel_dtrsm_nn_ll_one_12x4_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[ii*ps+ii*sdd], sdd);
-			}
-		if(jj<n)
-			{
-			kernel_dtrsm_nn_ll_one_12x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[ii*ps+ii*sdd], sdd, m-ii, n-jj);
-			}
-		}
-	if(m>ii)
-		{
-		if(m-ii<=4)
-			{
-			goto left_4;
-			}
-		else if(m-ii<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_12;
-			}
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for( ; ii<m-7; ii+=8)
-		{
-		jj = 0;
-		// solve lower
-		ie = n<ii ? n : ii; // ie is multiple of 4
-		for( ; jj<ie-3; jj+=4)
-			{
-			kernel_dtrsm_nn_ru_inv_8x4_lib4(jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+jj*sdd], &inv_diag_D[jj]);
-			}
-		if(jj<ie)
-			{
-			kernel_dtrsm_nn_ru_inv_8x4_vs_lib4(jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+jj*sdd], &inv_diag_D[jj], m-ii, ie-jj);
-			jj+=4;
-			}
-		// factorize
-		if(jj<n-3)
-			{
-			kernel_dgetrf_nn_l_8x4_lib4(jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj]);
-//			kernel_dgetrf_nn_4x4_lib4(jj, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &inv_diag_D[jj]);
-//			kernel_dtrsm_nn_ru_inv_4x4_lib4(jj, &pD[(ii+4)*sdd], &pD[jj*ps], sdd, &pC[jj*ps+(ii+4)*sdc], &pD[jj*ps+(ii+4)*sdd], &pD[jj*ps+jj*sdd], &inv_diag_D[jj]);
-			jj+=4;
-			}
-		else if(jj<n)
-			{
-			kernel_dgetrf_nn_l_8x4_vs_lib4(jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj], m-ii, n-jj);
-//			kernel_dgetrf_nn_4x4_vs_lib4(jj, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &inv_diag_D[jj], m-ii, n-jj);
-//			kernel_dtrsm_nn_ru_inv_4x4_vs_lib4(jj, &pD[(ii+4)*sdd], &pD[jj*ps], sdd, &pC[jj*ps+(ii+4)*sdc], &pD[jj*ps+(ii+4)*sdd], &pD[jj*ps+jj*sdd], &inv_diag_D[jj], m-(ii+4), n-jj);
-			jj+=4;
-			}
-		if(jj<n-3)
-			{
-			kernel_dtrsm_nn_ll_one_4x4_lib4(ii, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &pD[ii*ps+ii*sdd]);
-			kernel_dgetrf_nn_4x4_lib4(jj, &pD[(ii+4)*sdd], &pD[jj*ps], sdd, &pC[jj*ps+(ii+4)*sdc], &pD[jj*ps+(ii+4)*sdd], &inv_diag_D[jj]);
-			jj+=4;
-			}
-		else if(jj<n)
-			{
-			kernel_dtrsm_nn_ll_one_4x4_vs_lib4(ii, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &pD[ii*ps+ii*sdd], m-ii, n-jj);
-			kernel_dgetrf_nn_4x4_vs_lib4(jj, &pD[(ii+4)*sdd], &pD[jj*ps], sdd, &pC[jj*ps+(ii+4)*sdc], &pD[jj*ps+(ii+4)*sdd], &inv_diag_D[jj], m-(ii+4), n-jj);
-			jj+=4;
-			}
-		// solve upper
-		for( ; jj<n-3; jj+=4)
-			{
-			kernel_dtrsm_nn_ll_one_8x4_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd],sdd,  &pD[ii*ps+ii*sdd], sdd);
-			}
-		if(jj<n)
-			{
-			kernel_dtrsm_nn_ll_one_8x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[ii*ps+ii*sdd], sdd, m-ii, n-jj);
-			}
-		}
-	if(m>ii)
-		{
-		if(m-ii<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for( ; ii<m-3; ii+=4)
-		{
-		jj = 0;
-		// solve lower
-		ie = n<ii ? n : ii; // ie is multiple of 4
-		for( ; jj<ie-3; jj+=4)
-			{
-			kernel_dtrsm_nn_ru_inv_4x4_lib4(jj, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &pD[jj*ps+jj*sdd], &inv_diag_D[jj]);
-			}
-		if(jj<ie)
-			{
-			kernel_dtrsm_nn_ru_inv_4x4_vs_lib4(jj, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &pD[jj*ps+jj*sdd], &inv_diag_D[jj], m-ii, ie-jj);
-			jj+=4;
-			}
-		// factorize
-		if(jj<n-3)
-			{
-			kernel_dgetrf_nn_4x4_lib4(jj, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &inv_diag_D[jj]);
-			jj+=4;
-			}
-		else if(jj<n)
-			{
-			kernel_dgetrf_nn_4x4_vs_lib4(jj, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &inv_diag_D[jj], m-ii, n-jj);
-			jj+=4;
-			}
-		// solve upper
-		for( ; jj<n-3; jj+=4)
-			{
-			kernel_dtrsm_nn_ll_one_4x4_lib4(ii, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &pD[ii*ps+ii*sdd]);
-			}
-		if(jj<n)
-			{
-			kernel_dtrsm_nn_ll_one_4x4_vs_lib4(ii, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &pD[ii*ps+ii*sdd], m-ii, n-jj);
-			}
-		}
-	if(m>ii)
-		{
-		goto left_4;
-		}
-
-#endif
-
-	// common return if i==m
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	jj = 0;
-	// solve lower
-	ie = n<ii ? n : ii; // ie is multiple of 4
-	for( ; jj<ie; jj+=4)
-		{
-		kernel_dtrsm_nn_ru_inv_12x4_vs_lib4(jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+jj*sdd], &inv_diag_D[jj], m-ii, ie-jj);
-		}
-	// factorize
-	if(jj<n)
-		{
-		kernel_dgetrf_nn_l_12x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj], m-ii, n-jj);
-		jj+=4;
-		}
-	if(jj<n)
-		{
-		kernel_dgetrf_nn_l_12x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj], m-ii, n-jj);
-		jj+=4;
-		}
-	if(jj<n)
-		{
-		kernel_dgetrf_nn_r_12x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj], m-ii, n-jj);
-		jj+=4;
-		}
-	// solve upper
-	for( ; jj<n; jj+=4)
-		{
-		kernel_dtrsm_nn_ll_one_12x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[ii*ps+ii*sdd], sdd, m-ii, n-jj);
-		}
-	return;
-
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_8:
-	jj = 0;
-	// solve lower
-	ie = n<ii ? n : ii; // ie is multiple of 4
-	for( ; jj<ie; jj+=4)
-		{
-		kernel_dtrsm_nn_ru_inv_8x4_vs_lib4(jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+jj*sdd], &inv_diag_D[jj], m-ii, ie-jj);
-		}
-	// factorize
-	if(jj<n)
-		{
-		kernel_dgetrf_nn_l_8x4_vs_lib4(jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &inv_diag_D[jj], m-ii, n-jj);
-//		kernel_dgetrf_nn_4x4_vs_lib4(jj, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &inv_diag_D[jj], m-ii, n-jj);
-//		kernel_dtrsm_nn_ru_inv_4x4_vs_lib4(jj, &pD[(ii+4)*sdd], &pD[jj*ps], sdd, &pC[jj*ps+(ii+4)*sdc], &pD[jj*ps+(ii+4)*sdd], &pD[jj*ps+jj*sdd], &inv_diag_D[jj], m-(ii+4), n-jj);
-		jj+=4;
-		}
-	if(jj<n)
-		{
-		kernel_dtrsm_nn_ll_one_4x4_vs_lib4(ii, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &pD[ii*ps+ii*sdd], m-ii, n-jj);
-		kernel_dgetrf_nn_4x4_vs_lib4(jj, &pD[(ii+4)*sdd], &pD[jj*ps], sdd, &pC[jj*ps+(ii+4)*sdc], &pD[jj*ps+(ii+4)*sdd], &inv_diag_D[jj], m-(ii+4), n-jj);
-		jj+=4;
-		}
-	// solve upper
-	for( ; jj<n; jj+=4)
-		{
-		kernel_dtrsm_nn_ll_one_8x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], sdc, &pD[jj*ps+ii*sdd], sdd, &pD[ii*ps+ii*sdd], sdd, m-ii, n-jj);
-		}
-	return;
-
-#endif
-
-	left_4:
-	jj = 0;
-	// solve lower
-	ie = n<ii ? n : ii; // ie is multiple of 4
-	for( ; jj<ie; jj+=4)
-		{
-		kernel_dtrsm_nn_ru_inv_4x4_vs_lib4(jj, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &pD[jj*ps+jj*sdd], &inv_diag_D[jj], m-ii, ie-jj);
-		}
-	// factorize
-	if(jj<n)
-		{
-		kernel_dgetrf_nn_4x4_vs_lib4(jj, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &inv_diag_D[jj], m-ii, n-jj);
-		jj+=4;
-		}
-	// solve upper
-	for( ; jj<n; jj+=4)
-		{
-		kernel_dtrsm_nn_ll_one_4x4_vs_lib4(ii, &pD[ii*sdd], &pD[jj*ps], sdd, &pC[jj*ps+ii*sdc], &pD[jj*ps+ii*sdd], &pD[ii*ps+ii*sdd], m-ii, n-jj);
-		}
-	return;
-
-	}
-
-
-
-void dgetrf_nn_lib(int m, int n, double *pC, int sdc, double *pD, int sdd, double *inv_diag_D, int *ipiv)
-	{
-
-	if(m<=0)
-		return;
-
-	const int ps = 4;
-
-	int ii, jj, i0, i1, j0, ll, p;
-
-	double d1 = 1.0;
-	double dm1 = -1.0;
-
-	// needs to perform row-excanges on the yet-to-be-factorized matrix too
-	if(pC!=pD)
-		dgecp_lib(m, n, 1.0, 0, pC, sdc, 0, pD, sdd);
-
-	// minimum matrix size
-	p = n<m ? n : m; // XXX
-
-	// main loop
-#if defined(TARGET_X64_INTEL_HASWELL)
-	// 12 columns at a time
-	jj = 0;
-	for(; jj<p-11; jj+=12)
-		{
-		// pivot & factorize & solve lower
-		// left block-column
-		ii = jj;
-		i0 = ii;
-		for( ; ii<m-11; ii+=12)
-			{
-			kernel_dgemm_nn_12x4_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd);
-			}
-		if(m-ii>0)
-			{
-			if(m-ii>8)
-				{
-				kernel_dgemm_nn_12x4_vs_lib4(jj, &dm1, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd, m-ii, 4);
-				}
-			else if(m-ii>4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			else
-				{
-				kernel_dgemm_nn_4x4_gen_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			}
-		kernel_dgetrf_pivot_4_lib4(m-i0, &pD[jj*ps+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-		ipiv[i0+0] += i0;
-		if(ipiv[i0+0]!=i0+0)
-			{
-			drowsw_lib(jj, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+4)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+4)*ps);
-			}
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			drowsw_lib(jj, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+4)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+4)*ps);
-			}
-		ipiv[i0+2] += i0;
-		if(ipiv[i0+2]!=i0+2)
-			{
-			drowsw_lib(jj, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+4)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+4)*ps);
-			}
-		ipiv[i0+3] += i0;
-		if(ipiv[i0+3]!=i0+3)
-			{
-			drowsw_lib(jj, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+4)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+4)*ps);
-			}
-		// middle block-column
-		ii = i0;
-		kernel_dtrsm_nn_ll_one_4x4_lib4(ii, &pD[ii*sdd], &pD[(jj+4)*ps], sdd, &pD[(jj+4)*ps+ii*sdd], &pD[(jj+4)*ps+ii*sdd], &pD[ii*ps+ii*sdd]);
-		ii += 4;
-		i1 = ii;
-		for( ; ii<m-11; ii+=12)
-			{
-			kernel_dgemm_nn_12x4_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+4)*ps], sdd, &d1, &pD[(jj+4)*ps+ii*sdd], sdd, &pD[(jj+4)*ps+ii*sdd], sdd);
-			}
-		if(m-ii>0)
-			{
-			if(m-ii>8)
-				{
-				kernel_dgemm_nn_12x4_vs_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, &pD[(jj+4)*ps], sdd, &d1, &pD[(jj+4)*ps+ii*sdd], sdd, &pD[(jj+4)*ps+ii*sdd], sdd, m-ii, 4);
-				}
-			else if(m-ii>4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			else
-				{
-				kernel_dgemm_nn_4x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			}
-		kernel_dgetrf_pivot_4_lib4(m-i1, &pD[(jj+4)*ps+i1*sdd], sdd, &inv_diag_D[(jj+4)], &ipiv[i1]);
-		ipiv[i1+0] += i1;
-		if(ipiv[i1+0]!=i1+0)
-			{
-			drowsw_lib(jj+4, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps);
-			drowsw_lib(n-jj-8, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps+(jj+8)*ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps+(jj+8)*ps);
-			}
-		ipiv[i1+1] += i1;
-		if(ipiv[i1+1]!=i1+1)
-			{
-			drowsw_lib(jj+4, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps);
-			drowsw_lib(n-jj-8, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps+(jj+8)*ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps+(jj+8)*ps);
-			}
-		ipiv[i1+2] += i1;
-		if(ipiv[i1+2]!=i1+2)
-			{
-			drowsw_lib(jj+4, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps);
-			drowsw_lib(n-jj-8, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps+(jj+8)*ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps+(jj+8)*ps);
-			}
-		ipiv[i1+3] += i1;
-		if(ipiv[i1+3]!=i1+3)
-			{
-			drowsw_lib(jj+4, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps);
-			drowsw_lib(n-jj-8, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps+(jj+8)*ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps+(jj+8)*ps);
-			}
-		// right block-column
-		ii = i0;
-		kernel_dtrsm_nn_ll_one_8x4_lib4(ii, &pD[ii*sdd], sdd, &pD[(jj+8)*ps], sdd, &pD[(jj+8)*ps+ii*sdd], sdd, &pD[(jj+8)*ps+ii*sdd], sdd, &pD[ii*ps+ii*sdd], sdd);
-		ii += 8;
-		i1 = ii;
-		for( ; ii<m-11; ii+=12)
-			{
-			kernel_dgemm_nn_12x4_lib4((jj+8), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+8)*ps], sdd, &d1, &pD[(jj+8)*ps+ii*sdd], sdd, &pD[(jj+8)*ps+ii*sdd], sdd);
-			}
-		if(m-ii>0)
-			{
-			if(m-ii>8)
-				{
-				kernel_dgemm_nn_12x4_vs_lib4((jj+8), &dm1, &pD[ii*sdd], sdd, &pD[(jj+8)*ps], sdd, &d1, &pD[(jj+8)*ps+ii*sdd], sdd, &pD[(jj+8)*ps+ii*sdd], sdd, m-ii, 4);
-				}
-			else if(m-ii>4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4((jj+8), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+8)*ps], sdd, &d1, 0, &pD[(jj+8)*ps+ii*sdd], sdd, 0, &pD[(jj+8)*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			else
-				{
-				kernel_dgemm_nn_4x4_gen_lib4((jj+8), &dm1, &pD[ii*sdd], 0, &pD[(jj+8)*ps], sdd, &d1, 0, &pD[(jj+8)*ps+ii*sdd], sdd, 0, &pD[(jj+8)*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			}
-		kernel_dgetrf_pivot_4_lib4(m-i1, &pD[(jj+8)*ps+i1*sdd], sdd, &inv_diag_D[(jj+8)], &ipiv[i1]);
-		ipiv[i1+0] += i1;
-		if(ipiv[i1+0]!=i1+0)
-			{
-			drowsw_lib(jj+8, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps);
-			drowsw_lib(n-jj-12, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps+(jj+12)*ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps+(jj+12)*ps);
-			}
-		ipiv[i1+1] += i1;
-		if(ipiv[i1+1]!=i1+1)
-			{
-			drowsw_lib(jj+8, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps);
-			drowsw_lib(n-jj-12, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps+(jj+12)*ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps+(jj+12)*ps);
-			}
-		ipiv[i1+2] += i1;
-		if(ipiv[i1+2]!=i1+2)
-			{
-			drowsw_lib(jj+8, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps);
-			drowsw_lib(n-jj-12, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps+(jj+12)*ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps+(jj+12)*ps);
-			}
-		ipiv[i1+3] += i1;
-		if(ipiv[i1+3]!=i1+3)
-			{
-			drowsw_lib(jj+8, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps);
-			drowsw_lib(n-jj-12, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps+(jj+12)*ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps+(jj+12)*ps);
-			}
-
-		// solve upper
-//		i0 -= 8; // 4 ???
-		ll = jj+12;
-		for( ; ll<n-3; ll+=4)
-			{
-			kernel_dtrsm_nn_ll_one_12x4_lib4(i0, &pD[i0*sdd], sdd, &pD[ll*ps], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[i0*ps+i0*sdd], sdd);
-			}
-		if(ll<n)
-			{
-			kernel_dtrsm_nn_ll_one_12x4_vs_lib4(i0, &pD[i0*sdd], sdd, &pD[ll*ps], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[i0*ps+i0*sdd], sdd, 12, n-ll);
-			}
-		}
-	if(m>=n)
-		{
-		if(n-jj>0)
-			{
-			if(n-jj<=4)
-				goto left_n_4;
-			else if(n-jj<=8)
-				goto left_n_8;
-			else
-				goto left_n_12;
-			}
-		}
-	else // n>m
-		{
-		if(m-jj>0)
-			{
-			if(m-jj<=4)
-				goto left_m_4;
-			else if(m-jj<=8)
-				goto left_m_8;
-			else
-				goto left_m_12;
-			}
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	// 8 columns at a time
-	jj = 0;
-	for(; jj<p-7; jj+=8)
-		{
-		// pivot & factorize & solve lower
-		// left block-column
-		ii = jj;
-		i0 = ii;
-#if defined(TARGET_X64_INTEL_HASWELL) // XXX
-		for( ; ii<m-11; ii+=12)
-			{
-			kernel_dgemm_nn_12x4_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd);
-			}
-		if(m-ii>0)
-			{
-			if(m-ii>8)
-				{
-				kernel_dgemm_nn_12x4_vs_lib4(jj, &dm1, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd, m-ii, 4);
-				}
-			else if(m-ii>4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			else
-				{
-				kernel_dgemm_nn_4x4_gen_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			}
-#else // SANDY_BRIDGE
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_dgemm_nn_8x4_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd);
-			}
-		if(m-ii>0)
-			{
-			if(m-ii>4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			else
-				{
-				kernel_dgemm_nn_4x4_gen_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			}
-#endif
-		kernel_dgetrf_pivot_4_lib4(m-i0, &pD[jj*ps+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-		ipiv[i0+0] += i0;
-		if(ipiv[i0+0]!=i0+0)
-			{
-			drowsw_lib(jj, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+4)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+4)*ps);
-			}
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			drowsw_lib(jj, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+4)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+4)*ps);
-			}
-		ipiv[i0+2] += i0;
-		if(ipiv[i0+2]!=i0+2)
-			{
-			drowsw_lib(jj, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+4)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+4)*ps);
-			}
-		ipiv[i0+3] += i0;
-		if(ipiv[i0+3]!=i0+3)
-			{
-			drowsw_lib(jj, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+4)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+4)*ps);
-			}
-		// right block-column
-		ii = i0;
-		kernel_dtrsm_nn_ll_one_4x4_lib4(ii, &pD[ii*sdd], &pD[(jj+4)*ps], sdd, &pD[(jj+4)*ps+ii*sdd], &pD[(jj+4)*ps+ii*sdd], &pD[ii*ps+ii*sdd]);
-		ii += 4;
-		i0 = ii;
-#if defined(TARGET_X64_INTEL_HASWELL) // XXX
-		for( ; ii<m-11; ii+=12)
-			{
-			kernel_dgemm_nn_12x4_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+4)*ps], sdd, &d1, &pD[(jj+4)*ps+ii*sdd], sdd, &pD[(jj+4)*ps+ii*sdd], sdd);
-			}
-		if(m-ii>0)
-			{
-			if(m-ii>8)
-				{
-				kernel_dgemm_nn_12x4_vs_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, &pD[(jj+4)*ps], sdd, &d1, &pD[(jj+4)*ps+ii*sdd], sdd, &pD[(jj+4)*ps+ii*sdd], sdd, m-ii, 4);
-				}
-			else if(m-ii>4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			else
-				{
-				kernel_dgemm_nn_4x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			}
-#else // SANDY_BRIDGE
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_dgemm_nn_8x4_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+4)*ps], sdd, &d1, &pD[(jj+4)*ps+ii*sdd], sdd, &pD[(jj+4)*ps+ii*sdd], sdd);
-			}
-		if(m-ii>0)
-			{
-			if(m-ii>4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			else
-				{
-				kernel_dgemm_nn_4x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			}
-#endif
-		kernel_dgetrf_pivot_4_lib4(m-i0, &pD[(jj+4)*ps+i0*sdd], sdd, &inv_diag_D[(jj+4)], &ipiv[i0]);
-		ipiv[i0+0] += i0;
-		if(ipiv[i0+0]!=i0+0)
-			{
-			drowsw_lib(jj+4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-			drowsw_lib(n-jj-8, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+8)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+8)*ps);
-			}
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			drowsw_lib(jj+4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-			drowsw_lib(n-jj-8, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+8)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+8)*ps);
-			}
-		ipiv[i0+2] += i0;
-		if(ipiv[i0+2]!=i0+2)
-			{
-			drowsw_lib(jj+4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-			drowsw_lib(n-jj-8, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+8)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+8)*ps);
-			}
-		ipiv[i0+3] += i0;
-		if(ipiv[i0+3]!=i0+3)
-			{
-			drowsw_lib(jj+4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-			drowsw_lib(n-jj-8, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+8)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+8)*ps);
-			}
-
-		// solve upper
-		i0 -= 4;
-		ll = jj+8;
-		for( ; ll<n-3; ll+=4)
-			{
-			kernel_dtrsm_nn_ll_one_8x4_lib4(i0, &pD[i0*sdd], sdd, &pD[ll*ps], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[i0*ps+i0*sdd], sdd);
-			}
-		if(ll<n)
-			{
-			kernel_dtrsm_nn_ll_one_8x4_vs_lib4(i0, &pD[i0*sdd], sdd, &pD[ll*ps], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[i0*ps+i0*sdd], sdd, 8, n-ll);
-			}
-		}
-	if(m>=n)
-		{
-		if(n-jj>0)
-			{
-			if(n-jj<=4) // (m>=1 && n==1) || (m>=2 && n==2) || m>=3 && n==3
-				{
-				goto left_n_4;
-				}
-			else // (m>=5 && n==5) || (m>=6 && n==6) || (m>=7 && n==7)
-				goto left_n_8;
-			}
-		}
-	else // n>m
-		{
-		if(m-jj>0)
-			{
-			if(m-jj<=4) // (m==1 && n>=2) || (m==2 && n>=3) || (m==3 && n>=4) || (m==4 && n>=5)
-				goto left_m_4;
-			else // (m==5 && n>=6) || (m==6 && n>=7) || (m==7 && n>=8)
-				{
-				goto left_m_8;
-				}
-			}
-		}
-#else
-	// 4 columns at a time
-	jj = 0;
-	for(; jj<p-3; jj+=4) // XXX
-		{
-		// pivot & factorize & solve lower
-		ii = jj;
-		i0 = ii;
-#if defined(TARGET_X64_INTEL_HASWELL) // XXX
-		for( ; ii<m-11; ii+=12)
-			{
-			kernel_dgemm_nn_12x4_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd);
-			}
-		if(m-ii>0)
-			{
-			if(m-ii>8)
-				{
-				kernel_dgemm_nn_12x4_vs_lib4(jj, &dm1, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd, m-ii, 4);
-				}
-			else if(m-ii>4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			else
-				{
-				kernel_dgemm_nn_4x4_gen_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE) // XXX
-		for( ; ii<m-7; ii+=8)
-			{
-			kernel_dgemm_nn_8x4_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd);
-			}
-		if(m-ii>0)
-			{
-			if(m-ii>4)
-				{
-				kernel_dgemm_nn_8x4_gen_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			else
-				{
-				kernel_dgemm_nn_4x4_gen_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-				}
-			}
-#else
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_dgemm_nn_4x4_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], &pD[jj*ps+ii*sdd]);
-			}
-		if(m-ii>0)
-			{
-			kernel_dgemm_nn_4x4_gen_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-			}
-#endif
-		kernel_dgetrf_pivot_4_lib4(m-i0, &pD[jj*ps+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-		ipiv[i0+0] += i0;
-		if(ipiv[i0+0]!=i0+0)
-			{
-			drowsw_lib(jj, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+4)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+4)*ps);
-			}
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			drowsw_lib(jj, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+4)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+4)*ps);
-			}
-		ipiv[i0+2] += i0;
-		if(ipiv[i0+2]!=i0+2)
-			{
-			drowsw_lib(jj, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+4)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+4)*ps);
-			}
-		ipiv[i0+3] += i0;
-		if(ipiv[i0+3]!=i0+3)
-			{
-			drowsw_lib(jj, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+4)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+4)*ps);
-			}
-
-		// solve upper
-		ll = jj+4;
-		for( ; ll<n-3; ll+=4)
-			{
-			kernel_dtrsm_nn_ll_one_4x4_lib4(i0, &pD[i0*sdd], &pD[ll*ps], sdd, &pD[ll*ps+i0*sdd], &pD[ll*ps+i0*sdd], &pD[i0*ps+i0*sdd]);
-			}
-		if(n-ll>0)
-			{
-			kernel_dtrsm_nn_ll_one_4x4_vs_lib4(i0, &pD[i0*sdd], &pD[ll*ps], sdd, &pD[ll*ps+i0*sdd], &pD[ll*ps+i0*sdd], &pD[i0*ps+i0*sdd], 4, n-ll);
-			}
-		}
-	if(m>=n)
-		{
-		if(n-jj>0)
-			{
-			goto left_n_4;
-			}
-		}
-	else
-		{
-		if(m-jj>0)
-			{
-			goto left_m_4;
-			}
-		}
-#endif
-
-	// common return if jj==n
-	return;
-
-
-	// clean up
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_n_12:
-	// 9-12 columns at a time
-	// pivot & factorize & solve lower
-	// left block-column
-	ii = jj;
-	i0 = ii;
-	for( ; ii<m-8; ii+=12)
-		{
-		kernel_dgemm_nn_12x4_vs_lib4(jj, &dm1, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd, m-ii, 4);
-		}
-	if(m-ii>4)
-		{
-		kernel_dgemm_nn_8x4_gen_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-//		ii+=8;
-		}
-	else if(m-ii>0)
-		{
-		kernel_dgemm_nn_4x4_gen_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-//		ii+=4;
-		}
-	kernel_dgetrf_pivot_4_lib4(m-i0, &pD[jj*ps+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-	ipiv[i0+0] += i0;
-	if(ipiv[i0+0]!=i0+0)
-		{
-		drowsw_lib(jj, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+4)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+1] += i0;
-	if(ipiv[i0+1]!=i0+1)
-		{
-		drowsw_lib(jj, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+4)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+2] += i0;
-	if(ipiv[i0+2]!=i0+2)
-		{
-		drowsw_lib(jj, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+4)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+3] += i0;
-	if(ipiv[i0+3]!=i0+3)
-		{
-		drowsw_lib(jj, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+4)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+4)*ps);
-		}
-	// middle block-column
-	ii = i0;
-	kernel_dtrsm_nn_ll_one_4x4_vs_lib4(ii, &pD[ii*sdd], &pD[(jj+4)*ps], sdd, &pD[(jj+4)*ps+ii*sdd], &pD[(jj+4)*ps+ii*sdd], &pD[ii*ps+ii*sdd], 4, n-jj-4);
-	ii += 4;
-	i1 = ii;
-	for( ; ii<m-8; ii+=12)
-		{
-		kernel_dgemm_nn_12x4_vs_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, &pD[(jj+4)*ps], sdd, &d1, &pD[(jj+4)*ps+ii*sdd], sdd, &pD[(jj+4)*ps+ii*sdd], sdd, m-ii, n-jj-4);
-		}
-	if(m-ii>4)
-		{
-		kernel_dgemm_nn_8x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, n-jj-4);
-		}
-	else if(m-ii>0)
-		{
-		kernel_dgemm_nn_4x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, n-jj-4);
-		}
-	kernel_dgetrf_pivot_4_vs_lib4(m-i1, n-jj-4, &pD[(jj+4)*ps+i1*sdd], sdd, &inv_diag_D[(jj+4)], &ipiv[i1]);
-	ipiv[i1+0] += i1;
-	if(ipiv[i1+0]!=i1+0)
-		{
-		drowsw_lib(jj+4, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps);
-		drowsw_lib(n-jj-8, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps+(jj+8)*ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps+(jj+8)*ps);
-		}
-	if(n-jj-4>1)
-		{
-		ipiv[i1+1] += i1;
-		if(ipiv[i1+1]!=i1+1)
-			{
-			drowsw_lib(jj+4, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps);
-			drowsw_lib(n-jj-8, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps+(jj+8)*ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps+(jj+8)*ps);
-			}
-		if(n-jj-4>2)
-			{
-			ipiv[i1+2] += i1;
-			if(ipiv[i1+2]!=i1+2)
-				{
-				drowsw_lib(jj+4, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps);
-				drowsw_lib(n-jj-8, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps+(jj+8)*ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps+(jj+8)*ps);
-				}
-			if(n-jj-4>3)
-				{
-				ipiv[i1+3] += i1;
-				if(ipiv[i1+3]!=i1+3)
-					{
-					drowsw_lib(jj+4, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps);
-					drowsw_lib(n-jj-8, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps+(jj+8)*ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps+(jj+8)*ps);
-					}
-				}
-			}
-		}
-	// right block-column
-	ii = i0;
-	kernel_dtrsm_nn_ll_one_8x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[(jj+8)*ps], sdd, &pD[(jj+8)*ps+ii*sdd], sdd, &pD[(jj+8)*ps+ii*sdd], sdd, &pD[ii*ps+ii*sdd], sdd, 8, n-jj-8);
-	ii += 8;
-	i1 = ii;
-	for( ; ii<m-8; ii+=12)
-		{
-		kernel_dgemm_nn_12x4_vs_lib4((jj+8), &dm1, &pD[ii*sdd], sdd, &pD[(jj+8)*ps], sdd, &d1, &pD[(jj+8)*ps+ii*sdd], sdd, &pD[(jj+8)*ps+ii*sdd], sdd, m-ii, n-jj-8);
-		}
-	if(m-ii>4)
-		{
-		kernel_dgemm_nn_8x4_gen_lib4((jj+8), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+8)*ps], sdd, &d1, 0, &pD[(jj+8)*ps+ii*sdd], sdd, 0, &pD[(jj+8)*ps+ii*sdd], sdd, 0, m-ii, 0, n-jj-8);
-		}
-	else if(m-ii>0)
-		{
-		kernel_dgemm_nn_4x4_gen_lib4((jj+8), &dm1, &pD[ii*sdd], 0, &pD[(jj+8)*ps], sdd, &d1, 0, &pD[(jj+8)*ps+ii*sdd], sdd, 0, &pD[(jj+8)*ps+ii*sdd], sdd, 0, m-ii, 0, n-jj-8);
-		}
-	kernel_dgetrf_pivot_4_vs_lib4(m-i1, n-jj-8, &pD[(jj+8)*ps+i1*sdd], sdd, &inv_diag_D[(jj+8)], &ipiv[i1]);
-	ipiv[i1+0] += i1;
-	if(ipiv[i1+0]!=i1+0)
-		{
-		drowsw_lib(jj+8, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps);
-		drowsw_lib(n-jj-12, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps+(jj+12)*ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps+(jj+12)*ps);
-		}
-	if(n-jj-8>1)
-		{
-		ipiv[i1+1] += i1;
-		if(ipiv[i1+1]!=i1+1)
-			{
-			drowsw_lib(jj+8, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps);
-			drowsw_lib(n-jj-12, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps+(jj+12)*ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps+(jj+12)*ps);
-			}
-		if(n-jj-8>2)
-			{
-			ipiv[i1+2] += i1;
-			if(ipiv[i1+2]!=i1+2)
-				{
-				drowsw_lib(jj+8, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps);
-				drowsw_lib(n-jj-12, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps+(jj+12)*ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps+(jj+12)*ps);
-				}
-			if(n-jj-8>3)
-				{
-				ipiv[i1+3] += i1;
-				if(ipiv[i1+3]!=i1+3)
-					{
-					drowsw_lib(jj+8, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps);
-					drowsw_lib(n-jj-12, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps+(jj+12)*ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps+(jj+12)*ps);
-					}
-				}
-			}
-		}
-
-	// solve upper
-	// there is no upper
-	return;
-#endif
-
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_m_12:
-	// 9-12 rows at a time
-	// pivot & factorize & solve lower
-	// left block-column
-	ii = jj;
-	i0 = ii;
-	kernel_dgemm_nn_12x4_vs_lib4(jj, &dm1, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, &d1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd, m-ii, 4);
-	kernel_dgetrf_pivot_4_lib4(m-i0, &pD[jj*ps+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-	ipiv[i0+0] += i0;
-	if(ipiv[i0+0]!=i0+0)
-		{
-		drowsw_lib(jj, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+4)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+1] += i0;
-	if(ipiv[i0+1]!=i0+1)
-		{
-		drowsw_lib(jj, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+4)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+2] += i0;
-	if(ipiv[i0+2]!=i0+2)
-		{
-		drowsw_lib(jj, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+4)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+3] += i0;
-	if(ipiv[i0+3]!=i0+3)
-		{
-		drowsw_lib(jj, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+4)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+4)*ps);
-		}
-	// middle block-column
-	ii = i0;
-	kernel_dtrsm_nn_ll_one_4x4_vs_lib4(ii, &pD[ii*sdd], &pD[(jj+4)*ps], sdd, &pD[(jj+4)*ps+ii*sdd], &pD[(jj+4)*ps+ii*sdd], &pD[ii*ps+ii*sdd], 4, n-jj-4);
-	ii += 4;
-	i1 = ii;
-	kernel_dgemm_nn_8x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, n-jj-4);
-	kernel_dgetrf_pivot_4_vs_lib4(m-i1, n-jj-4, &pD[(jj+4)*ps+i1*sdd], sdd, &inv_diag_D[(jj+4)], &ipiv[i1]);
-	ipiv[i1+0] += i1;
-	if(ipiv[i1+0]!=i1+0)
-		{
-		drowsw_lib(jj+4, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps);
-		drowsw_lib(n-jj-8, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps+(jj+8)*ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps+(jj+8)*ps);
-		}
-	if(m-jj-4>1)
-		{
-		ipiv[i1+1] += i1;
-		if(ipiv[i1+1]!=i1+1)
-			{
-			drowsw_lib(jj+4, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps);
-			drowsw_lib(n-jj-8, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps+(jj+8)*ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps+(jj+8)*ps);
-			}
-		if(m-jj-4>2)
-			{
-			ipiv[i1+2] += i1;
-			if(ipiv[i1+2]!=i1+2)
-				{
-				drowsw_lib(jj+4, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps);
-				drowsw_lib(n-jj-8, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps+(jj+8)*ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps+(jj+8)*ps);
-				}
-			if(m-jj-4>3)
-				{
-				ipiv[i1+3] += i1;
-				if(ipiv[i1+3]!=i1+3)
-					{
-					drowsw_lib(jj+4, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps);
-					drowsw_lib(n-jj-8, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps+(jj+8)*ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps+(jj+8)*ps);
-					}
-				}
-			}
-		}
-	// right block-column
-	ii = i0;
-	kernel_dtrsm_nn_ll_one_8x4_vs_lib4(ii, &pD[ii*sdd], sdd, &pD[(jj+8)*ps], sdd, &pD[(jj+8)*ps+ii*sdd], sdd, &pD[(jj+8)*ps+ii*sdd], sdd, &pD[ii*ps+ii*sdd], sdd, 8, n-jj-8);
-	ii += 8;
-	i1 = ii;
-	kernel_dgemm_nn_4x4_gen_lib4((jj+8), &dm1, &pD[ii*sdd], 0, &pD[(jj+8)*ps], sdd, &d1, 0, &pD[(jj+8)*ps+ii*sdd], sdd, 0, &pD[(jj+8)*ps+ii*sdd], sdd, 0, m-ii, 0, n-jj-8);
-	kernel_dgetrf_pivot_4_vs_lib4(m-i1, n-jj-8, &pD[(jj+8)*ps+i1*sdd], sdd, &inv_diag_D[(jj+8)], &ipiv[i1]);
-	ipiv[i1+0] += i1;
-	if(ipiv[i1+0]!=i1+0)
-		{
-		drowsw_lib(jj+8, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps);
-		drowsw_lib(n-jj-12, pD+(i1+0)/ps*ps*sdd+(i1+0)%ps+(jj+12)*ps, pD+(ipiv[i1+0])/ps*ps*sdd+(ipiv[i1+0])%ps+(jj+12)*ps);
-		}
-	if(m-jj-8>1)
-		{
-		ipiv[i1+1] += i1;
-		if(ipiv[i1+1]!=i1+1)
-			{
-			drowsw_lib(jj+8, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps);
-			drowsw_lib(n-jj-12, pD+(i1+1)/ps*ps*sdd+(i1+1)%ps+(jj+12)*ps, pD+(ipiv[i1+1])/ps*ps*sdd+(ipiv[i1+1])%ps+(jj+12)*ps);
-			}
-		if(m-jj-8>2)
-			{
-			ipiv[i1+2] += i1;
-			if(ipiv[i1+2]!=i1+2)
-				{
-				drowsw_lib(jj+8, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps);
-				drowsw_lib(n-jj-12, pD+(i1+2)/ps*ps*sdd+(i1+2)%ps+(jj+12)*ps, pD+(ipiv[i1+2])/ps*ps*sdd+(ipiv[i1+2])%ps+(jj+12)*ps);
-				}
-			if(m-jj-8>3)
-				{
-				ipiv[i1+3] += i1;
-				if(ipiv[i1+3]!=i1+3)
-					{
-					drowsw_lib(jj+8, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps);
-					drowsw_lib(n-jj-12, pD+(i1+3)/ps*ps*sdd+(i1+3)%ps+(jj+12)*ps, pD+(ipiv[i1+3])/ps*ps*sdd+(ipiv[i1+3])%ps+(jj+12)*ps);
-					}
-				}
-			}
-		}
-
-	// solve upper
-//	i0 -= 8;
-	ll = jj+12;
-	for( ; ll<n; ll+=4)
-		{
-		kernel_dtrsm_nn_ll_one_12x4_vs_lib4(i0, &pD[i0*sdd], sdd, &pD[ll*ps], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[i0*ps+i0*sdd], sdd, m-i0, n-ll);
-		}
-	return;
-#endif
-
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_n_8:
-	// 5-8 columns at a time
-	// pivot & factorize & solve lower
-	// left block-column
-	ii = jj;
-	i0 = ii;
-	for( ; ii<m-4; ii+=8)
-		{
-		kernel_dgemm_nn_8x4_gen_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-		}
-	if(m-ii>0)
-		{
-		kernel_dgemm_nn_4x4_gen_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-//		ii+=4;
-		}
-	kernel_dgetrf_pivot_4_lib4(m-i0, &pD[jj*ps+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-	ipiv[i0+0] += i0;
-	if(ipiv[i0+0]!=i0+0)
-		{
-		drowsw_lib(jj, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+4)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+1] += i0;
-	if(ipiv[i0+1]!=i0+1)
-		{
-		drowsw_lib(jj, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+4)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+2] += i0;
-	if(ipiv[i0+2]!=i0+2)
-		{
-		drowsw_lib(jj, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+4)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+3] += i0;
-	if(ipiv[i0+3]!=i0+3)
-		{
-		drowsw_lib(jj, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+4)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+4)*ps);
-		}
-	// right block-column
-	ii = i0;
-	kernel_dtrsm_nn_ll_one_4x4_vs_lib4(ii, &pD[ii*sdd], &pD[(jj+4)*ps], sdd, &pD[(jj+4)*ps+ii*sdd], &pD[(jj+4)*ps+ii*sdd], &pD[ii*ps+ii*sdd], 4, n-jj-4);
-	ii += 4;
-	i0 = ii;
-	for( ; ii<m-4; ii+=8)
-		{
-		kernel_dgemm_nn_8x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], sdd, 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], 0, sdd, m-ii, 0, n-jj-4);
-		}
-	if(m-ii>0)
-		{
-		kernel_dgemm_nn_4x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, n-jj-4);
-		}
-	kernel_dgetrf_pivot_4_vs_lib4(m-i0, n-jj-4, &pD[(jj+4)*ps+i0*sdd], sdd, &inv_diag_D[(jj+4)], &ipiv[i0]);
-	ipiv[i0+0] += i0;
-	if(ipiv[i0+0]!=i0+0)
-		{
-		drowsw_lib(jj+4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-		drowsw_lib(n-jj-8, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+8)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+8)*ps);
-		}
-	if(n-jj-4>1)
-		{
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			drowsw_lib(jj+4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-			drowsw_lib(n-jj-8, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+8)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+8)*ps);
-			}
-		if(n-jj-4>2)
-			{
-			ipiv[i0+2] += i0;
-			if(ipiv[i0+2]!=i0+2)
-				{
-				drowsw_lib(jj+4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-				drowsw_lib(n-jj-8, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+8)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+8)*ps);
-				}
-			if(n-jj-4>3)
-				{
-				ipiv[i0+3] += i0;
-				if(ipiv[i0+3]!=i0+3)
-					{
-					drowsw_lib(jj+4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-					drowsw_lib(n-jj-8, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+8)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+8)*ps);
-					}
-				}
-			}
-		}
-
-	// solve upper
-	// there is no upper
-	return;
-#endif
-
-
-#if defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_m_8:
-	// 5-8 rows at a time
-	// pivot & factorize & solve lower
-	// left block-column
-	ii = jj;
-	i0 = ii;
-	kernel_dgemm_nn_8x4_gen_lib4(jj, &dm1, &pD[ii*sdd], sdd, 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, 4);
-	kernel_dgetrf_pivot_4_lib4(m-i0, &pD[jj*ps+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-	ipiv[i0+0] += i0;
-	if(ipiv[i0+0]!=i0+0)
-		{
-		drowsw_lib(jj, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+4)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+1] += i0;
-	if(ipiv[i0+1]!=i0+1)
-		{
-		drowsw_lib(jj, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+4)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+2] += i0;
-	if(ipiv[i0+2]!=i0+2)
-		{
-		drowsw_lib(jj, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+4)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+4)*ps);
-		}
-	ipiv[i0+3] += i0;
-	if(ipiv[i0+3]!=i0+3)
-		{
-		drowsw_lib(jj, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+4)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+4)*ps);
-		}
-	// right block-column
-	ii = i0;
-	kernel_dtrsm_nn_ll_one_4x4_vs_lib4(ii, &pD[ii*sdd], &pD[(jj+4)*ps], sdd, &pD[(jj+4)*ps+ii*sdd], &pD[(jj+4)*ps+ii*sdd], &pD[ii*ps+ii*sdd], 4, n-jj-4);
-	ii += 4;
-	i0 = ii;
-	kernel_dgemm_nn_4x4_gen_lib4((jj+4), &dm1, &pD[ii*sdd], 0, &pD[(jj+4)*ps], sdd, &d1, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, &pD[(jj+4)*ps+ii*sdd], sdd, 0, m-ii, 0, n-jj-4);
-	kernel_dgetrf_pivot_4_vs_lib4(m-i0, n-jj-4, &pD[(jj+4)*ps+i0*sdd], sdd, &inv_diag_D[(jj+4)], &ipiv[i0]);
-	ipiv[i0+0] += i0;
-	if(ipiv[i0+0]!=i0+0)
-		{
-		drowsw_lib(jj+4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-		drowsw_lib(n-jj-8, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+8)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+8)*ps);
-		}
-	if(m-jj-4>1)
-		{
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			drowsw_lib(jj+4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-			drowsw_lib(n-jj-8, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+8)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+8)*ps);
-			}
-		if(m-jj-4>2)
-			{
-			ipiv[i0+2] += i0;
-			if(ipiv[i0+2]!=i0+2)
-				{
-				drowsw_lib(jj+4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-				drowsw_lib(n-jj-8, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+8)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+8)*ps);
-				}
-			if(m-jj-4>3)
-				{
-				ipiv[i0+3] += i0;
-				if(ipiv[i0+3]!=i0+3)
-					{
-					drowsw_lib(jj+4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-					drowsw_lib(n-jj-8, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+8)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+8)*ps);
-					}
-				}
-			}
-		}
-
-	// solve upper
-	i0 -= 4;
-	ll = jj+8;
-	for( ; ll<n; ll+=4)
-		{
-		kernel_dtrsm_nn_ll_one_8x4_vs_lib4(i0, &pD[i0*sdd], sdd, &pD[ll*ps], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[ll*ps+i0*sdd], sdd, &pD[i0*ps+i0*sdd], sdd, m-i0, n-ll);
-		}
-	return;
-#endif
-
-
-	left_n_4:
-	// 1-4 columns at a time
-	// pivot & factorize & solve lower
-	ii = jj;
-	i0 = ii;
-#if 0//defined(TARGET_X64_AVX2) || defined(TARGET_X64_AVX)
-	for( ; ii<m-4; ii+=8)
-		{
-		kernel_dgemm_nn_8x4_vs_lib4(m-ii, n-jj, jj, &pD[ii*sdd], sdd, &pD[jj*ps], sdd, -1, &pD[jj*ps+ii*sdd], sdd, &pD[jj*ps+ii*sdd], sdd, 0, 0);
-		}
-	if(m-ii>0)
-		{
-		kernel_dgemm_nn_4x4_vs_lib4(m-ii, n-jj, jj, &pD[ii*sdd], &pD[jj*ps], sdd, -1, &pD[jj*ps+ii*sdd], &pD[jj*ps+ii*sdd], 0, 0);
-//		ii+=4;
-		}
-#else
-	for( ; ii<m; ii+=4)
-		{
-		kernel_dgemm_nn_4x4_gen_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, n-jj);
-		}
-#endif
-	kernel_dgetrf_pivot_4_vs_lib4(m-i0, n-jj, &pD[jj*ps+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-	ipiv[i0+0] += i0;
-	if(ipiv[i0+0]!=i0+0)
-		{
-		drowsw_lib(jj, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+4)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+4)*ps);
-		}
-	if(n-jj>1)
-		{
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			drowsw_lib(jj, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+4)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+4)*ps);
-			}
-		if(n-jj>2)
-			{
-			ipiv[i0+2] += i0;
-			if(ipiv[i0+2]!=i0+2)
-				{
-				drowsw_lib(jj, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-				drowsw_lib(n-jj-4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+4)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+4)*ps);
-				}
-			if(n-jj>3)
-				{
-				ipiv[i0+3] += i0;
-				if(ipiv[i0+3]!=i0+3)
-					{
-					drowsw_lib(jj, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-					drowsw_lib(n-jj-4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+4)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+4)*ps);
-					}
-				}
-			}
-		}
-
-	// solve upper
-	if(0) // there is no upper
-		{
-		ll = jj+4;
-		for( ; ll<n; ll+=4)
-			{
-			kernel_dtrsm_nn_ll_one_4x4_vs_lib4(i0, &pD[i0*sdd], &pD[ll*ps], sdd, &pD[ll*ps+i0*sdd], &pD[ll*ps+i0*sdd], &pD[i0*ps+i0*sdd], m-i0, n-ll);
-			}
-		}
-	return;
-
-
-	left_m_4:
-	// 1-4 rows at a time
-	// pivot & factorize & solve lower
-	ii = jj;
-	i0 = ii;
-	kernel_dgemm_nn_4x4_gen_lib4(jj, &dm1, &pD[ii*sdd], 0, &pD[jj*ps], sdd, &d1, 0, &pD[jj*ps+ii*sdd], sdd, 0, &pD[jj*ps+ii*sdd], sdd, 0, m-ii, 0, n-jj);
-	kernel_dgetrf_pivot_4_vs_lib4(m-i0, n-jj, &pD[jj*ps+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-	ipiv[i0+0] += i0;
-	if(ipiv[i0+0]!=i0+0)
-		{
-		drowsw_lib(jj, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps);
-		drowsw_lib(n-jj-4, pD+(i0+0)/ps*ps*sdd+(i0+0)%ps+(jj+4)*ps, pD+(ipiv[i0+0])/ps*ps*sdd+(ipiv[i0+0])%ps+(jj+4)*ps);
-		}
-	if(m-i0>1)
-		{
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			drowsw_lib(jj, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps);
-			drowsw_lib(n-jj-4, pD+(i0+1)/ps*ps*sdd+(i0+1)%ps+(jj+4)*ps, pD+(ipiv[i0+1])/ps*ps*sdd+(ipiv[i0+1])%ps+(jj+4)*ps);
-			}
-		if(m-i0>2)
-			{
-			ipiv[i0+2] += i0;
-			if(ipiv[i0+2]!=i0+2)
-				{
-				drowsw_lib(jj, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps);
-				drowsw_lib(n-jj-4, pD+(i0+2)/ps*ps*sdd+(i0+2)%ps+(jj+4)*ps, pD+(ipiv[i0+2])/ps*ps*sdd+(ipiv[i0+2])%ps+(jj+4)*ps);
-				}
-			if(m-i0>3)
-				{
-				ipiv[i0+3] += i0;
-				if(ipiv[i0+3]!=i0+3)
-					{
-					drowsw_lib(jj, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps);
-					drowsw_lib(n-jj-4, pD+(i0+3)/ps*ps*sdd+(i0+3)%ps+(jj+4)*ps, pD+(ipiv[i0+3])/ps*ps*sdd+(ipiv[i0+3])%ps+(jj+4)*ps);
-					}
-				}
-			}
-		}
-
-	// solve upper
-	ll = jj+4;
-	for( ; ll<n; ll+=4)
-		{
-		kernel_dtrsm_nn_ll_one_4x4_vs_lib4(i0, &pD[i0*sdd], &pD[ll*ps], sdd, &pD[ll*ps+i0*sdd], &pD[ll*ps+i0*sdd], &pD[i0*ps+i0*sdd], m-i0, n-ll);
-		}
-	return;
-
-	}
-
-
-# if 0
-void dlauum_dpotrf_blk_nt_l_lib(int m, int n, int nv, int *rv, int *cv, double *pA, int sda, double *pB, int sdb, int alg, double *pC, int sdc, double *pD, int sdd, double *inv_diag_D)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	// TODO remove
-	int k = cv[nv-1];
-
-	const int ps = 4;
-
-	int i, j, l;
-	int ii, iii, jj, kii, kiii, kjj, k0, k1;
-
-	i = 0;
-	ii = 0;
-	iii = 0;
-
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-7; i+=8)
-		{
-
-		while(ii<nv && rv[ii]<i+8)
-			ii++;
-		if(ii<nv)
-			kii = cv[ii];
-		else
-			kii = cv[ii-1];
-
-		j = 0;
-		jj = 0;
-		for(; j<i && j<n-3; j+=4)
-			{
-
-			while(jj<nv && rv[jj]<j+4)
-				jj++;
-			if(jj<nv)
-				kjj = cv[jj];
-			else
-				kjj = cv[jj-1];
-			k0 = kii<kjj ? kii : kjj;
-
-			kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4(k0, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], alg, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &inv_diag_D[j]);
-			}
-		if(j<n)
-			{
-
-			while(jj<nv && rv[jj]<j+4)
-				jj++;
-			if(jj<nv)
-				kjj = cv[jj];
-			else
-				kjj = cv[jj-1];
-			k0 = kii<kjj ? kii : kjj;
-
-			if(j<i) // dgemm
-				{
-				kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4(k0, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], alg, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &inv_diag_D[j], 8, n-j);
-				}
-			else // dsyrk
-				{
-				kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4(k0, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], alg, &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &inv_diag_D[j], 8, n-j);
-				if(j<n-4)
-					{
-					kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k, &pA[(i+4)*sda], &pB[(j+4)*sdb], j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], alg, &pC[(j+4)*ps+(j+4)*sdc], &pD[(j+4)*ps+(j+4)*sdd], &inv_diag_D[j+4], 4, n-j-4); // TODO
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for(; i<m-3; i+=4)
-		{
-
-		while(ii<nv && rv[ii]<i+4)
-			ii++;
-		if(ii<nv)
-			kii = cv[ii];
-		else
-			kii = cv[ii-1];
-
-		j = 0;
-		jj = 0;
-		for(; j<i && j<n-3; j+=4)
-			{
-
-			while(jj<nv && rv[jj]<j+4)
-				jj++;
-			if(jj<nv)
-				kjj = cv[jj];
-			else
-				kjj = cv[jj-1];
-			k0 = kii<kjj ? kii : kjj;
-
-			kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4(k0, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], alg, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &inv_diag_D[j]);
-			}
-		if(j<n)
-			{
-
-			while(jj<nv && rv[jj]<j+4)
-				jj++;
-			if(jj<nv)
-				kjj = cv[jj];
-			else
-				kjj = cv[jj-1];
-			k0 = kii<kjj ? kii : kjj;
-
-			if(i<j) // dgemm
-				{
-				kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(k0, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], alg, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &inv_diag_D[j], 4, n-j);
-				}
-			else // dsyrk
-				{
-				kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k0, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], alg, &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &inv_diag_D[j], 4, n-j);
-				}
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-
-	kii = cv[nv-1];
-
-	j = 0;
-	jj = 0;
-	for(; j<i && j<n-3; j+=4)
-		{
-
-		while(jj<nv && rv[jj]<j+4)
-			jj++;
-		if(jj<nv)
-			kjj = cv[jj];
-		else
-			kjj = cv[jj-1];
-		k0 = kii<kjj ? kii : kjj;
-
-		kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4(k0, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], alg, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &inv_diag_D[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-
-		while(jj<nv && rv[jj]<j+4)
-			jj++;
-		if(jj<nv)
-			kjj = cv[jj];
-		else
-			kjj = cv[jj-1];
-		k0 = kii<kjj ? kii : kjj;
-
-		if(j<i) // dgemm
-			{
-			kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4(k0, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], alg, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &inv_diag_D[j], m-i, n-j);
-			}
-		else // dsyrk
-			{
-			kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4(k0, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], alg, &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &inv_diag_D[j], m-i, n-j);
-			if(j<n-4)
-				{
-				kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k, &pA[(i+4)*sda], &pB[(j+4)*sdb], j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], alg, &pC[(j+4)*ps+(j+4)*sdc], &pD[(j+4)*ps+(j+4)*sdd], &inv_diag_D[j+4], m-i-4, n-j-4); // TODO
-				}
-			}
-		}
-	return;
-#endif
-
-	left_4:
-
-	kii = cv[nv-1];
-
-	j = 0;
-	jj = 0;
-	for(; j<i && j<n-3; j+=4)
-		{
-
-		while(jj<nv && rv[jj]<j+4)
-			jj++;
-		if(jj<nv)
-			kjj = cv[jj];
-		else
-			kjj = cv[jj-1];
-		k0 = kii<kjj ? kii : kjj;
-
-		kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(k0, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], alg, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &inv_diag_D[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-
-		while(jj<nv && rv[jj]<j+4)
-			jj++;
-		if(jj<nv)
-			kjj = cv[jj];
-		else
-			kjj = cv[jj-1];
-		k0 = kii<kjj ? kii : kjj;
-
-		if(j<i) // dgemm
-			{
-			kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(k0, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], alg, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &inv_diag_D[j], m-i, n-j);
-			}
-		else // dsyrk
-			{
-			kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k0, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], alg, &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &inv_diag_D[j], m-i, n-j);
-			}
-		}
-	return;
-
-	}
-#endif
-
-
-
-
-/****************************
-* new interface
-****************************/
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// dpotrf
-void dpotrf_l_libstr(int m, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0)
-		return;
-
-	if(ci!=0 | di!=0)
-		{
-		printf("\ndpotrf_l_libstr: feature not implemented yet: ci=%d, di=%d\n", ci, di);
-		exit(1);
-		}
-
-	const int ps = 4;
-
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *pC = sC->pA + cj*ps;
-	double *pD = sD->pA + dj*ps;
-	double *dD = sD->dA;
-
-	if(di==0 & dj==0) // XXX what to do if di and dj are not zero
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-
-	int i, j, l;
-
-	i = 0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-11; i+=12)
-		{
-		j = 0;
-		for(; j<i; j+=4)
-			{
-			kernel_dtrsm_nt_rl_inv_12x4_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j]);
-			}
-		kernel_dpotrf_nt_l_12x4_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j]);
-		kernel_dpotrf_nt_l_8x8_lib4(j+4, &pD[(i+4)*sdd], sdd, &pD[(j+4)*sdd], sdd, &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, &dD[j+4]);
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_12;
-			}
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<i; j+=4)
-			{
-			kernel_dtrsm_nt_rl_inv_8x4_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j]);
-			}
-		kernel_dpotrf_nt_l_8x4_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j]);
-		kernel_dpotrf_nt_l_4x4_lib4(j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], &dD[j+4]);
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<i; j+=4)
-			{
-			kernel_dtrsm_nt_rl_inv_4x4_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j]);
-			}
-		kernel_dpotrf_nt_l_4x4_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j]);
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12: // 9 - 12
-	j = 0;
-	for(; j<i; j+=4)
-		{
-		kernel_dtrsm_nt_rl_inv_12x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, m-j);
-		}
-	kernel_dpotrf_nt_l_12x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, m-j);
-	kernel_dpotrf_nt_l_8x8_vs_lib4(j+4, &pD[(i+4)*sdd], sdd, &pD[(j+4)*sdd], sdd, &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, &dD[j+4], m-i-4, m-j-4);
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for(; j<i-8; j+=12)
-		{
-		kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], sdd, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, m-j);
-		kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4((j+4), &pD[i*sdd], sdd, &pD[(j+4)*sdd], sdd, &pC[(j+4)*ps+i*sdc], sdc, &pD[(j+4)*ps+i*sdd], sdd, &pD[(j+4)*ps+(j+4)*sdd], sdd, &dD[(j+4)], m-i, m-(j+4));
-		}
-	if(j<i-4)
-		{
-		kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], sdd, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, m-j);
-		kernel_dtrsm_nt_rl_inv_4x4_vs_lib4((j+4), &pD[i*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+i*sdc], &pD[(j+4)*ps+i*sdd], &pD[(j+4)*ps+(j+4)*sdd], &dD[(j+4)], m-i, m-(j+4));
-		j += 8;
-		}
-	else if(j<i)
-		{
-		kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, m-j);
-		j += 4;
-		}
-	kernel_dpotrf_nt_l_8x8_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], sdd, &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, m-j);
-	return;
-#endif
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_8:
-	j = 0;
-	for(; j<i; j+=4)
-		{
-		kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, m-j);
-		}
-	kernel_dpotrf_nt_l_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, m-j);
-	kernel_dpotrf_nt_l_4x4_vs_lib4(j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], &dD[j+4], m-i-4, m-j-4);
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_4:
-	j = 0;
-	for(; j<i-8; j+=12)
-		{
-		kernel_dtrsm_nt_rl_inv_4x12_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], sdd, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], sdd, &dD[j], m-i, m-j);
-		}
-	if(j<i-4)
-		{
-		kernel_dtrsm_nt_rl_inv_4x8_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], sdd, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], sdd, &dD[j], m-i, m-j);
-		j += 8;
-		}
-	else if(j<i)
-		{
-		kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j], m-i, m-j);
-		j += 4;
-		}
-	kernel_dpotrf_nt_l_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j], m-i, m-j);
-	return;
-#else
-	left_4:
-	j = 0;
-	for(; j<i; j+=4)
-		{
-		kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j], m-i, m-j);
-		}
-	kernel_dpotrf_nt_l_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j], m-i, m-j);
-	return;
-#endif
-
-	}
-
-
-
-// dpotrf
-void dpotrf_l_mn_libstr(int m, int n, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	if(ci!=0 | di!=0)
-		{
-		printf("\ndpotrf_l_mn_libstr: feature not implemented yet: ci=%d, di=%d\n", ci, di);
-		exit(1);
-		}
-
-	const int ps = 4;
-
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *pC = sC->pA + cj*ps;
-	double *pD = sD->pA + dj*ps;
-	double *dD = sD->dA;
-
-	if(di==0 & dj==0) // XXX what to do if di and dj are not zero
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-
-	int i, j, l;
-
-	i = 0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-11; i+=12)
-		{
-		j = 0;
-		for(; j<i & j<n-3; j+=4)
-			{
-			kernel_dtrsm_nt_rl_inv_12x4_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j]);
-			}
-		if(j<n)
-			{
-			if(j<i) // dtrsm
-				{
-				kernel_dtrsm_nt_rl_inv_12x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // dpptrf
-				{
-				if(n<j-11)
-					{
-					kernel_dpotrf_nt_l_12x4_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j]);
-					kernel_dpotrf_nt_l_8x8_lib4(j+4, &pD[(i+4)*sdd], sdd, &pD[(j+4)*sdd], sdd, &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, &dD[j+4]);
-					}
-				else
-					{
-					kernel_dpotrf_nt_l_12x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-					if(j<n-4)
-						{
-						kernel_dpotrf_nt_l_8x4_vs_lib4(j+4, &pD[(i+4)*sdd], sdd, &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, &dD[j+4], m-i-4, n-j-4);
-						if(j<n-8)
-							{
-							kernel_dpotrf_nt_l_4x4_vs_lib4(j+8, &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*ps+(i+8)*sdc], &pD[(j+8)*ps+(i+8)*sdd], &dD[j+8], m-i-8, n-j-8);
-							}
-						}
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_12;
-			}
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<i & j<n-3; j+=4)
-			{
-			kernel_dtrsm_nt_rl_inv_8x4_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j]);
-			}
-		if(j<n)
-			{
-			if(j<i) // dtrsm
-				{
-				kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // dpotrf
-				{
-				if(j<n-7)
-//				if(0)
-					{
-					kernel_dpotrf_nt_l_8x4_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j]);
-					kernel_dpotrf_nt_l_4x4_lib4(j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], &dD[j+4]);
-					}
-				else
-					{
-					kernel_dpotrf_nt_l_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-					if(j<n-4)
-						{
-						kernel_dpotrf_nt_l_4x4_vs_lib4(j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], &dD[j+4], m-i-4, n-j-4);
-						}
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<i & j<n-3; j+=4)
-			{
-			kernel_dtrsm_nt_rl_inv_4x4_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j]);
-			}
-		if(j<n)
-			{
-			if(i<j) // dtrsm
-				{
-				kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // dpotrf
-				{
-				if(j<n-3)
-					{
-					kernel_dpotrf_nt_l_4x4_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j]);
-					}
-				else
-					{
-					kernel_dpotrf_nt_l_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	for(; j<i & j<n; j+=4)
-		{
-		kernel_dtrsm_nt_rl_inv_12x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dpotrf_nt_l_12x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		if(j<n-4)
-			{
-			kernel_dpotrf_nt_l_8x4_vs_lib4(j+4, &pD[(i+4)*sdd], sdd, &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, &dD[j+4], m-i-4, n-j-4);
-			if(j<n-8)
-				{
-				kernel_dpotrf_nt_l_4x4_vs_lib4(j+8, &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*ps+(i+8)*sdc], &pD[(j+8)*ps+(i+8)*sdd], &dD[j+8], m-i-8, n-j-8);
-				}
-			}
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for(; j<i-8 & j<n-8; j+=12)
-		{
-		kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], sdd, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4((j+4), &pD[i*sdd], sdd, &pD[(j+4)*sdd], sdd, &pC[(j+4)*ps+i*sdc], sdc, &pD[(j+4)*ps+i*sdd], sdd, &pD[(j+4)*ps+(j+4)*sdd], sdd, &dD[(j+4)], m-i, n-(j+4));
-		}
-	if(j<i-4 & j<n-4)
-		{
-		kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], sdd, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		kernel_dtrsm_nt_rl_inv_4x4_vs_lib4((j+4), &pD[i*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+i*sdc], &pD[(j+4)*ps+i*sdd], &pD[(j+4)*ps+(j+4)*sdd], &dD[(j+4)], m-i, n-(j+4));
-		j += 8;
-		}
-	else if(j<i & j<n)
-		{
-		kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		j += 4;
-		}
-	if(j<n)
-		{
-		kernel_dpotrf_nt_l_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		if(j<n-4)
-			{
-			kernel_dpotrf_nt_l_4x4_vs_lib4(j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], &dD[j+4], m-i-4, n-j-4);
-			}
-		}
-	return;
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_8:
-	j = 0;
-	for(; j<i & j<n; j+=4)
-		{
-		kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dpotrf_nt_l_8x4_vs_lib4(j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		if(j<n-4)
-			{
-			kernel_dpotrf_nt_l_4x4_vs_lib4(j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], &dD[j+4], m-i-4, n-j-4);
-			}
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_4:
-	j = 0;
-	for(; j<i-8 & j<n-8; j+=12)
-		{
-		kernel_dtrsm_nt_rl_inv_4x12_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], sdd, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		}
-	if(j<i-4 & j<n-4)
-		{
-		kernel_dtrsm_nt_rl_inv_4x8_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], sdd, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		j += 8;
-		}
-	else if(j<i & j<n)
-		{
-		kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		j += 4;
-		}
-	if(j<n)
-		{
-		kernel_dpotrf_nt_l_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		}
-	return;
-#else
-	left_4:
-	j = 0;
-	for(; j<i & j<n; j+=4)
-		{
-		kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dpotrf_nt_l_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		}
-	return;
-#endif
-
-	}
-
-
-
-// dsyrk dpotrf
-void dsyrk_dpotrf_ln_libstr(int m, int n, int k, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	if(ai!=0 | bi!=0 | ci!=0 | di!=0)
-		{
-		printf("\ndsyrk_dpotrf_ln_libstr: feature not implemented yet: ai=%d, bi=%d, ci=%d, di=%d\n", ai, bi, ci, di);
-		exit(1);
-		}
-
-	const int ps = 4;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *pA = sA->pA + aj*ps;
-	double *pB = sB->pA + bj*ps;
-	double *pC = sC->pA + cj*ps;
-	double *pD = sD->pA + dj*ps;
-	double *dD = sD->dA; // XXX what to do if di and dj are not zero
-
-	if(di==0 & dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-
-	int i, j, l;
-
-	i = 0;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-11; i+=12)
-		{
-		j = 0;
-		for(; j<i & j<n-3; j+=4)
-			{
-			kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j]);
-			}
-		if(j<n)
-			{
-			if(j<i) // dgemm
-				{
-				kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // dsyrk
-				{
-				if(j<n-11)
-					{
-					kernel_dsyrk_dpotrf_nt_l_12x4_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j]);
-					kernel_dsyrk_dpotrf_nt_l_8x8_lib4(k, &pA[(i+4)*sda], sda, &pB[(j+4)*sdb], sdb, j+4, &pD[(i+4)*sdd], sdd, &pD[(j+4)*sdd], sdd, &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, &dD[j+4]);
-					}
-				else
-					{
-					kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-					if(j<n-4)
-						{
-						if(j<n-8)
-							{
-							kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4(k, &pA[(i+4)*sda], sda, &pB[(j+4)*sdb], sdb, j+4, &pD[(i+4)*sdd], sdd, &pD[(j+4)*sdd], sdd, &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, &dD[j+4], m-i-4, n-j-4);
-							}
-						else
-							{
-							kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4(k, &pA[(i+4)*sda], sda, &pB[(j+4)*sdb], j+4, &pD[(i+4)*sdd], sdd, &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, &dD[j+4], m-i-4, n-j-4);
-							}
-						}
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_12;
-			}
-		}
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<i & j<n-3; j+=4)
-			{
-			kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j]);
-			}
-		if(j<n)
-			{
-			if(j<i) // dgemm
-				{
-				kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // dsyrk
-				{
-				if(j<n-7)
-//				if(0)
-					{
-					kernel_dsyrk_dpotrf_nt_l_8x4_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j]);
-					kernel_dsyrk_dpotrf_nt_l_4x4_lib4(k, &pA[(i+4)*sda], &pB[(j+4)*sdb], j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], &dD[j+4]);
-					}
-				else
-					{
-					kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-					if(j<n-4)
-						{
-						kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k, &pA[(i+4)*sda], &pB[(j+4)*sdb], j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], &dD[j+4], m-i-4, n-j-4);
-						}
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else
-			{
-			goto left_8;
-			}
-		}
-#else
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<i & j<n-3; j+=4)
-			{
-			kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j]);
-			}
-		if(j<n)
-			{
-			if(i<j) // dgemm
-				{
-				kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // dsyrk
-				{
-				if(j<n-3)
-					{
-					kernel_dsyrk_dpotrf_nt_l_4x4_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j]);
-					}
-				else
-					{
-					kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12:
-	j = 0;
-	for(; j<i & j<n; j+=4)
-		{
-		kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		if(j<n-4)
-			{
-			kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4(k, &pA[(i+4)*sda], sda, &pB[(j+4)*sdb], j+4, &pD[(i+4)*sdd], sdd, &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], sdc, &pD[(j+4)*ps+(i+4)*sdd], sdd, &dD[j+4], m-i-4, n-j-4);
-			if(j<n-8)
-				{
-				kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k, &pA[(i+8)*sda], &pB[(j+8)*sdb], j+8, &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*ps+(i+8)*sdc], &pD[(j+8)*ps+(i+8)*sdd], &dD[j+8], m-i-8, n-j-8);
-				}
-			}
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_8:
-	j = 0;
-	for(; j<i-8 & j<n-8; j+=12)
-		{
-		kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], sdb, j, &pD[i*sdd], sdd, &pD[j*sdd], sdd, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4(k, &pA[i*sda], sda, &pB[(j+4)*sdb], sdb, (j+4), &pD[i*sdd], sdd, &pD[(j+4)*sdd], sdd, &pC[(j+4)*ps+i*sdc], sdc, &pD[(j+4)*ps+i*sdd], sdd, &pD[(j+4)*ps+(j+4)*sdd], sdd, &dD[(j+4)], m-i, n-(j+4));
-		}
-	if(j<i-3 & j<n-3)
-		{
-		kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], sdb, j, &pD[i*sdd], sdd, &pD[j*sdd], sdd, &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(k, &pA[i*sda], &pB[(j+4)*sdb], (j+4), &pD[i*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+i*sdc], &pD[(j+4)*ps+i*sdd], &pD[(j+4)*ps+(j+4)*sdd], &dD[(j+4)], m-i, n-(j+4));
-		j += 8;
-		}
-	else if(j<i & j<n)
-		{
-		kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		j += 4;
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		if(j<n-4)
-			{
-			kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k, &pA[(i+4)*sda], &pB[(j+4)*sdb], j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], &dD[j+4], m-i-4, n-j-4);
-			}
-		}
-	return;
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_8:
-	j = 0;
-	for(; j<i & j<n; j+=4)
-		{
-		kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+i*sdc], sdc, &pD[j*ps+i*sdd], sdd, &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4(k, &pA[i*sda], sda, &pB[j*sdb], j, &pD[i*sdd], sdd, &pD[j*sdd], &pC[j*ps+j*sdc], sdc, &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		if(j<n-4)
-			{
-			kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k, &pA[(i+4)*sda], &pB[(j+4)*sdb], j+4, &pD[(i+4)*sdd], &pD[(j+4)*sdd], &pC[(j+4)*ps+(i+4)*sdc], &pD[(j+4)*ps+(i+4)*sdd], &dD[j+4], m-i-4, n-j-4);
-			}
-		}
-	return;
-#endif
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_4:
-	j = 0;
-	for(; j<i-8 & j<n-8; j+=12)
-		{
-		kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4(k, &pA[i*sda], &pB[j*sdb], sdb, j, &pD[i*sdd], &pD[j*sdd], sdd, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		}
-	if(j<i-4 & j<n-4)
-		{
-		kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4(k, &pA[i*sda], &pB[j*sdb], sdb, j, &pD[i*sdd], &pD[j*sdd], sdd, &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], sdd, &dD[j], m-i, n-j);
-		j += 8;
-		}
-	else if(j<i & j<n)
-		{
-		kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		j += 4;
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		}
-#else
-	left_4:
-	j = 0;
-	for(; j<i & j<n; j+=4)
-		{
-		kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+i*sdc], &pD[j*ps+i*sdd], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*ps+j*sdc], &pD[j*ps+j*sdd], &dD[j], m-i, n-j);
-		}
-#endif
-
-	return;
-
-	}
-
-
-
-// dgetrf without pivoting
-void dgetrf_nopivot_libstr(int m, int n, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj)
-	{
-	if(ci!=0 | di!=0)
-		{
-		printf("\ndgetf_nopivot_libstr: feature not implemented yet: ci=%d, di=%d\n", ci, di);
-		exit(1);
-		}
-	const int ps = 4;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *pC = sC->pA + cj*ps;
-	double *pD = sD->pA + dj*ps;
-	double *dD = sD->dA; // XXX what to do if di and dj are not zero
-	dgetrf_nn_nopivot_lib(m, n, pC, sdc, pD, sdd, dD);
-	if(di==0 && dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-	return;
-	}
-
-
-
-
-// dgetrf pivoting
-void dgetrf_libstr(int m, int n, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj, int *ipiv)
-	{
-	if(ci!=0 | di!=0)
-		{
-		printf("\ndgetrf_libstr: feature not implemented yet: ci=%d, di=%d\n", ci, di);
-		exit(1);
-		}
-	const int ps = 4;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *pC = sC->pA + cj*ps;
-	double *pD = sD->pA + dj*ps;
-	double *dD = sD->dA; // XXX what to do if di and dj are not zero
-	dgetrf_nn_lib(m, n, pC, sdc, pD, sdd, dD, ipiv);
-	if(di==0 && dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-	return;
-	}
-
-
-
-int dgeqrf_work_size_libstr(int m, int n)
-	{
-	const int ps = 4;
-	int cm = (m+ps-1)/ps*ps;
-	int cn = (n+ps-1)/ps*ps;
-	return ps*(cm+cn)*sizeof(double);
-//	return 0;
-	}
-
-
-
-void dgeqrf_libstr(int m, int n, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj, void *v_work)
-	{
-	char *work = (char *) v_work;
-	if(m<=0 | n<=0)
-		return;
-	const int ps = 4;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *pC = &(DMATEL_LIBSTR(sC,ci,cj));
-	double *pD = &(DMATEL_LIBSTR(sD,di,dj));
-	double *dD = sD->dA + di;
-	int cm = (m+ps-1)/ps*ps;
-	int cn = (n+ps-1)/ps*ps;
-	double *pVt = (double *) work;
-	work += ps*cm*sizeof(double);
-	double *pW = (double *) work;
-	work += ps*cn*sizeof(double);
-	if(pC!=pD)
-		dgecp_lib(m, n, 1.0, ci&(ps-1), pC, sdc, di&(ps-1), pD, sdd);
-	int ii;
-	int imax0 = (ps-(di&(ps-1)))&(ps-1);
-	int imax = m<n ? m : n;
-	imax0 = imax<imax0 ? imax : imax0;
-	if(imax0>0)
-		{
-		kernel_dgeqrf_vs_lib4(m, n, imax0, di&(ps-1), pD, sdd, dD);
-		pD += imax0-ps+ps*sdd+imax0*ps;
-		dD += imax0;
-		m -= imax0;
-		n -= imax0;
-		imax -= imax0;
-		}
-	for(ii=0; ii<imax-3; ii+=4)
-		{
-		kernel_dgeqrf_4_lib4(m-ii, pD+ii*sdd+ii*ps, sdd, dD+ii);
-#if 0
-		kernel_dlarf_4_lib4(m-ii, n-ii-4, pD+ii*sdd+ii*ps, sdd, dD+ii, pD+ii*sdd+(ii+4)*ps, sdd);
-#else
-		kernel_dgetr_4_0_lib4(m-ii, pD+ii*sdd+ii*ps, sdd, pVt);
-		pVt[0+ps*0] = 1.0;
-		pVt[1+ps*0] = 0.0;
-		pVt[2+ps*0] = 0.0;
-		pVt[3+ps*0] = 0.0;
-		pVt[1+ps*1] = 1.0;
-		pVt[2+ps*1] = 0.0;
-		pVt[3+ps*1] = 0.0;
-		pVt[2+ps*2] = 1.0;
-		pVt[3+ps*2] = 0.0;
-		pVt[3+ps*3] = 1.0;
-		kernel_dlarf_t_4_lib4(m-ii, n-ii-4, pD+ii*sdd+ii*ps, sdd, pVt, dD+ii, pD+ii*sdd+(ii+4)*ps, sdd, pW);
-#endif
-		}
-	if(ii<imax)
-		{
-		kernel_dgeqrf_vs_lib4(m-ii, n-ii, imax-ii, ii&(ps-1), pD+ii*sdd+ii*ps, sdd, dD+ii);
-		}
-	return;
-	}
-
-
-
-int dgelqf_work_size_libstr(int m, int n)
-	{
-	return 0;
-	}
-
-
-
-void dgelqf_libstr(int m, int n, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj, void *work)
-	{
-	if(m<=0 | n<=0)
-		return;
-	const int ps = 4;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	double *pC = &(DMATEL_LIBSTR(sC,ci,cj));
-	double *pD = &(DMATEL_LIBSTR(sD,di,dj));
-	double *dD = sD->dA + di;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	double pT[144] __attribute__ ((aligned (64))) = {0};
-	double pK[96] __attribute__ ((aligned (64))) = {0};
-#else
-	double pT[144] = {0};
-	double pK[96] = {0};
-#endif
-	if(pC!=pD)
-		dgecp_lib(m, n, 1.0, ci&(ps-1), pC, sdc, di&(ps-1), pD, sdd);
-	int ii, jj, ll;
-	int imax0 = (ps-(di&(ps-1)))&(ps-1);
-	int imax = m<n ? m : n;
-#if 0
-	kernel_dgelqf_vs_lib4(m, n, imax, di&(ps-1), pD, sdd, dD);
-#else
-	imax0 = imax<imax0 ? imax : imax0;
-	if(imax0>0)
-		{
-		kernel_dgelqf_vs_lib4(m, n, imax0, di&(ps-1), pD, sdd, dD);
-		pD += imax0-ps+ps*sdd+imax0*ps;
-		dD += imax0;
-		m -= imax0;
-		n -= imax0;
-		imax -= imax0;
-		}
-	ii = 0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-//	for(; ii<imax-11; ii+=12)
-	for(; ii<imax-127; ii+=12) // crossover point ~ ii=128
-		{
-		kernel_dgelqf_dlarft12_12_lib4(n-(ii+0), pD+(ii+0)*sdd+(ii+0)*ps, sdd, dD+(ii+0), &pT[0+0*12+0*ps]);
-		jj = ii+12;
-		for(; jj<m; jj+=4)
-			{
-			kernel_dlarfb12_r_4_lib4(n-ii, pD+ii*sdd+ii*ps, sdd, pT, pD+jj*sdd+ii*ps, pK, m-jj);
-			}
-		}
-	for(; ii<imax-11; ii+=4)
-		{
-		kernel_dgelqf_dlarft4_12_lib4(n-ii, pD+ii*sdd+ii*ps, sdd, dD+ii, pT);
-		jj = ii+12;
-		for(; jj<m-11; jj+=12)
-			{
-			kernel_dlarfb4_r_12_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps, sdd);
-			}
-		for(; jj<m-7; jj+=8)
-			{
-			kernel_dlarfb4_r_8_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps, sdd);
-			}
-		for(; jj<m-3; jj+=4)
-			{
-			kernel_dlarfb4_r_4_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps);
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			kernel_dlarfb4_r_1_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+ll+jj*sdd+ii*ps);
-			}
-		}
-	// 8 9 10 11
-	if(ii<imax-7)
-		{
-		kernel_dgelqf_dlarft4_8_lib4(n-ii, pD+ii*sdd+ii*ps, sdd, dD+ii, pT);
-		jj = ii+8;
-		if(jj<m)
-			{
-			for(; jj<m-11; jj+=12)
-				{
-				kernel_dlarfb4_r_12_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps, sdd);
-				}
-			for(; jj<m-7; jj+=8)
-				{
-				kernel_dlarfb4_r_8_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps, sdd);
-				}
-			for(; jj<m-3; jj+=4)
-				{
-				kernel_dlarfb4_r_4_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps);
-				}
-			for(ll=0; ll<m-jj; ll++)
-				{
-				kernel_dlarfb4_r_1_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+ll+jj*sdd+ii*ps);
-				}
-			}
-		ii += 4;
-		}
-	// 4 5 6 7
-	if(ii<imax-3)
-		{
-		kernel_dgelqf_dlarft4_4_lib4(n-ii, pD+ii*sdd+ii*ps, dD+ii, pT);
-		jj = ii+4;
-		if(jj<m)
-			{
-			for(; jj<m-11; jj+=12)
-				{
-				kernel_dlarfb4_r_12_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps, sdd);
-				}
-			for(; jj<m-7; jj+=8)
-				{
-				kernel_dlarfb4_r_8_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps, sdd);
-				}
-			for(; jj<m-3; jj+=4)
-				{
-				kernel_dlarfb4_r_4_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps);
-				}
-			for(ll=0; ll<m-jj; ll++)
-				{
-				kernel_dlarfb4_r_1_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+ll+jj*sdd+ii*ps);
-				}
-			}
-		ii += 4;
-		}
-	// 1 2 3
-	if(ii<imax)
-		{
-		kernel_dgelqf_vs_lib4(m-ii, n-ii, imax-ii, ii&(ps-1), pD+ii*sdd+ii*ps, sdd, dD+ii);
-		}
-#else // no haswell
-	for(ii=0; ii<imax-4; ii+=4)
-		{
-//		kernel_dgelqf_vs_lib4(4, n-ii, 4, 0, pD+ii*sdd+ii*ps, sdd, dD+ii);
-//		kernel_dgelqf_4_lib4(n-ii, pD+ii*sdd+ii*ps, dD+ii);
-//		kernel_dlarft_4_lib4(n-ii, pD+ii*sdd+ii*ps, dD+ii, pT);
-		kernel_dgelqf_dlarft4_4_lib4(n-ii, pD+ii*sdd+ii*ps, dD+ii, pT);
-		jj = ii+4;
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-		for(; jj<m-7; jj+=8)
-			{
-			kernel_dlarfb4_r_8_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps, sdd);
-			}
-#endif
-		for(; jj<m-3; jj+=4)
-			{
-			kernel_dlarfb4_r_4_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+jj*sdd+ii*ps);
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			kernel_dlarfb4_r_1_lib4(n-ii, pD+ii*sdd+ii*ps, pT, pD+ll+jj*sdd+ii*ps);
-			}
-		}
-	if(ii<imax)
-		{
-		if(ii==imax-4)
-			{
-			kernel_dgelqf_4_lib4(n-ii, pD+ii*sdd+ii*ps, dD+ii);
-			}
-		else
-			{
-			kernel_dgelqf_vs_lib4(m-ii, n-ii, imax-ii, ii&(ps-1), pD+ii*sdd+ii*ps, sdd, dD+ii);
-			}
-		}
-#endif // no haswell
-#endif
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
diff --git a/third_party/blasfeo/blas/s_blas.h b/third_party/blasfeo/blas/s_blas.h
deleted file mode 100644
index b6a92a7..0000000
--- a/third_party/blasfeo/blas/s_blas.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-// headers to reference BLAS and LAPACK routines employed in BLASFEO WR
-
-// level 1
-void scopy_(int *m, float *x, int *incx, float *y, int *incy);
-void saxpy_(int *m, float *alpha, float *x, int *incx, float *y, int *incy);
-void sscal_(int *m, float *alpha, float *x, int *incx);
-
-// level 2
-void sgemv_(char *ta, int *m, int *n, float *alpha, float *A, int *lda, float *x, int *incx, float *beta, float *y, int *incy);
-void ssymv_(char *uplo, int *m, float *alpha, float *A, int *lda, float *x, int *incx, float *beta, float *y, int *incy);
-void strmv_(char *uplo, char *trans, char *diag, int *n, float *A, int *lda, float *x, int *incx);
-void strsv_(char *uplo, char *trans, char *diag, int *n, float *A, int *lda, float *x, int *incx);
-void sger_(int *m, int *n, float *alpha, float *x, int *incx, float *y, int *incy, float *A, int *lda);
-
-// level 3
-void sgemm_(char *ta, char *tb, int *m, int *n, int *k, float *alpha, float *A, int *lda, float *B, int *ldb, float *beta, float *C, int *ldc);
-void ssyrk_(char *uplo, char *trans, int *n, int *k, float *alpha, float *A, int *lda, float *beta, float *C, int *ldc);
-void strmm_(char *side, char *uplo, char *transa, char *diag, int *m, int *n, float *alpha, float *A, int *lda, float *B, int *ldb);
-void strsm_(char *side, char *uplo, char *transa, char *diag, int *m, int *n, float *alpha, float *A, int *lda, float *B, int *ldb);
-
-// lapack
-int spotrf_(char *uplo, int *m, float *A, int *lda, int *info);
-int sgetrf_(int *m, int *n, float *A, int *lda, int *ipiv, int *info);
-void sgeqrf_(int *m, int *n, float *A, int *lda, float *tau, float *work, int *lwork, int *info);
-void sgeqr2_(int *m, int *n, float *A, int *lda, float *tau, float *work, int *info);
-void sgelqf_(int *m, int *n, float *A, int *lda, float *tau, float *work, int *lwork, int *info);
-
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/blas/s_blas1_lib.c b/third_party/blasfeo/blas/s_blas1_lib.c
deleted file mode 100644
index 67fec77..0000000
--- a/third_party/blasfeo/blas/s_blas1_lib.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#if defined(LA_BLAS)
-#include "s_blas.h"
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_kernel.h"
-
-
-
-#define REAL float
-
-#define STRVEC s_strvec
-
-#define AXPY_LIBSTR saxpy_libstr
-#define VECMULDOT_LIBSTR svecmuldot_libstr
-#define DOT_LIBSTR sdot_libstr
-
-#define AXPY saxpy_
-#define COPY scopy_
-
-
-#include "x_blas1_lib.c"
-
diff --git a/third_party/blasfeo/blas/s_blas1_lib4.c b/third_party/blasfeo/blas/s_blas1_lib4.c
deleted file mode 100644
index 8588020..0000000
--- a/third_party/blasfeo/blas/s_blas1_lib4.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_kernel.h"
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// z = y + alpha*x, with increments equal to 1
-void saxpy_libstr(int m, float alpha, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-	int ii;
-	ii = 0;
-	for( ; ii<m-3; ii+=4)
-		{
-		z[ii+0] = y[ii+0] + alpha*x[ii+0];
-		z[ii+1] = y[ii+1] + alpha*x[ii+1];
-		z[ii+2] = y[ii+2] + alpha*x[ii+2];
-		z[ii+3] = y[ii+3] + alpha*x[ii+3];
-		}
-	for( ; ii<m; ii++)
-		{
-		z[ii+0] = y[ii+0] + alpha*x[ii+0];
-		}
-	return;
-	}
-
-
-
-void saxpy_bkp_libstr(int m, float alpha, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-	int ii;
-	ii = 0;
-	for( ; ii<m-3; ii+=4)
-		{
-		z[ii+0] = y[ii+0];
-		y[ii+0] = y[ii+0] + alpha*x[ii+0];
-		z[ii+1] = y[ii+1];
-		y[ii+1] = y[ii+1] + alpha*x[ii+1];
-		z[ii+2] = y[ii+2];
-		y[ii+2] = y[ii+2] + alpha*x[ii+2];
-		z[ii+3] = y[ii+3];
-		y[ii+3] = y[ii+3] + alpha*x[ii+3];
-		}
-	for( ; ii<m; ii++)
-		{
-		z[ii+0] = y[ii+0];
-		y[ii+0] = y[ii+0] + alpha*x[ii+0];
-		}
-	return;
-	}
-
-
-
-// multiply two vectors and compute dot product
-float svecmuldot_libstr(int m, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return 0.0;
-
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-	int ii;
-	float dot = 0.0;
-
-	ii = 0;
-
-	for(; ii<m; ii++)
-		{
-		z[ii+0] = x[ii+0] * y[ii+0];
-		dot += z[ii+0];
-		}
-	return dot;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
diff --git a/third_party/blasfeo/blas/s_blas1_lib8.c b/third_party/blasfeo/blas/s_blas1_lib8.c
deleted file mode 100644
index 538c012..0000000
--- a/third_party/blasfeo/blas/s_blas1_lib8.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_kernel.h"
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// z = y + alpha*x, with increments equal to 1
-void saxpy_libstr(int m, float alpha, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-	int ii;
-	ii = 0;
-	for( ; ii<m-3; ii+=4)
-		{
-		z[ii+0] = y[ii+0] + alpha*x[ii+0];
-		z[ii+1] = y[ii+1] + alpha*x[ii+1];
-		z[ii+2] = y[ii+2] + alpha*x[ii+2];
-		z[ii+3] = y[ii+3] + alpha*x[ii+3];
-		}
-	for( ; ii<m; ii++)
-		{
-		z[ii+0] = y[ii+0] + alpha*x[ii+0];
-		}
-	return;
-	}
-
-
-
-void saxpy_bkp_libstr(int m, float alpha, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-	int ii;
-	ii = 0;
-	for( ; ii<m-3; ii+=4)
-		{
-		z[ii+0] = y[ii+0];
-		y[ii+0] = y[ii+0] + alpha*x[ii+0];
-		z[ii+1] = y[ii+1];
-		y[ii+1] = y[ii+1] + alpha*x[ii+1];
-		z[ii+2] = y[ii+2];
-		y[ii+2] = y[ii+2] + alpha*x[ii+2];
-		z[ii+3] = y[ii+3];
-		y[ii+3] = y[ii+3] + alpha*x[ii+3];
-		}
-	for( ; ii<m; ii++)
-		{
-		z[ii+0] = y[ii+0];
-		y[ii+0] = y[ii+0] + alpha*x[ii+0];
-		}
-	return;
-	}
-
-
-
-// multiply two vectors and compute dot product
-float svecmuldot_libstr(int m, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return 0.0;
-
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-	int ii;
-	float dot = 0.0;
-
-	ii = 0;
-
-	for(; ii<m; ii++)
-		{
-		z[ii+0] = x[ii+0] * y[ii+0];
-		dot += z[ii+0];
-		}
-	return dot;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
diff --git a/third_party/blasfeo/blas/s_blas2_diag_lib.c b/third_party/blasfeo/blas/s_blas2_diag_lib.c
deleted file mode 100644
index 1dde42f..0000000
--- a/third_party/blasfeo/blas/s_blas2_diag_lib.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_kernel.h"
-
-
-
-#define REAL float
-
-#define STRVEC s_strvec
-
-#define GEMV_DIAG_LIBSTR sgemv_diag_libstr
-
-
-
-#include "x_blas2_diag_lib.c"
-
diff --git a/third_party/blasfeo/blas/s_blas2_lib.c b/third_party/blasfeo/blas/s_blas2_lib.c
deleted file mode 100644
index 7ab8dc2..0000000
--- a/third_party/blasfeo/blas/s_blas2_lib.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#if defined(LA_BLAS)
-#include "s_blas.h"
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_aux.h"
-
-
-
-#define REAL float
-
-#define STRMAT s_strmat
-#define STRVEC s_strvec
-
-#define GEMV_N_LIBSTR sgemv_n_libstr
-#define GEMV_NT_LIBSTR sgemv_nt_libstr
-#define GEMV_T_LIBSTR sgemv_t_libstr
-#define SYMV_L_LIBSTR ssymv_l_libstr
-#define TRMV_LNN_LIBSTR strmv_lnn_libstr
-#define TRMV_LTN_LIBSTR strmv_ltn_libstr
-#define TRMV_UNN_LIBSTR strmv_unn_libstr
-#define TRMV_UTN_LIBSTR strmv_utn_libstr
-#define TRSV_LNN_LIBSTR strsv_lnn_libstr
-#define TRSV_LNN_MN_LIBSTR strsv_lnn_mn_libstr
-#define TRSV_LNU_LIBSTR strsv_lnu_libstr
-#define TRSV_LTN_LIBSTR strsv_ltn_libstr
-#define TRSV_LTN_MN_LIBSTR strsv_ltn_mn_libstr
-#define TRSV_LTU_LIBSTR strsv_ltu_libstr
-#define TRSV_UNN_LIBSTR strsv_unn_libstr
-#define TRSV_UTN_LIBSTR strsv_utn_libstr
-
-#define COPY scopy_
-#define GEMV sgemv_
-#define SYMV ssymv_
-#define TRMV strmv_
-#define TRSV strsv_
-
-
-
-#include "x_blas2_lib.c"
-
diff --git a/third_party/blasfeo/blas/s_blas2_lib4.c b/third_party/blasfeo/blas/s_blas2_lib4.c
deleted file mode 100644
index b7a947d..0000000
--- a/third_party/blasfeo/blas/s_blas2_lib4.c
+++ /dev/null
@@ -1,1045 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_kernel.h"
-#include "../include/blasfeo_s_aux.h"
-
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-void sgemv_n_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, float beta, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<0)
-		return;
-
-	const int bs = 4;
-
-	int i;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda;
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-
-	i = 0;
-	// clean up at the beginning
-	if(ai%bs!=0)
-		{
-		kernel_sgemv_n_4_gen_lib4(n, &alpha, pA, x, &beta, y-ai%bs, z-ai%bs, ai%bs, m+ai%bs);
-		pA += bs*sda;
-		y += 4 - ai%bs;
-		z += 4 - ai%bs;
-		m -= 4 - ai%bs;
-		}
-	// main loop
-	for( ; i<m-3; i+=4)
-		{
-		kernel_sgemv_n_4_lib4(n, &alpha, &pA[i*sda], x, &beta, &y[i], &z[i]);
-		}
-	if(i<m)
-		{
-		kernel_sgemv_n_4_vs_lib4(n, &alpha, &pA[i*sda], x, &beta, &y[i], &z[i], m-i);
-		}
-		
-	return;
-
-	}
-
-
-
-void sgemv_t_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, float beta, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-
-	if(n<=0)
-		return;
-	
-	const int bs = 4;
-
-	int i;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-
-	if(ai%bs==0)
-		{
-		i = 0;
-		for( ; i<n-3; i+=4)
-			{
-			kernel_sgemv_t_4_lib4(m, &alpha, &pA[i*bs], sda, x, &beta, &y[i], &z[i]);
-			}
-		if(i<n)
-			{
-			kernel_sgemv_t_4_vs_lib4(m, &alpha, &pA[i*bs], sda, x, &beta, &y[i], &z[i], n-i);
-			}
-		}
-	else // TODO kernel 8
-		{
-		i = 0;
-		for( ; i<n; i+=4)
-			{
-			kernel_sgemv_t_4_gen_lib4(m, &alpha, ai%bs, &pA[i*bs], sda, x, &beta, &y[i], &z[i], n-i);
-			}
-		}
-	
-	return;
-
-	}
-
-
-
-void sgemv_nt_libstr(int m, int n, float alpha_n, float alpha_t, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx_n, int xi_n, struct s_strvec *sx_t, int xi_t, float beta_n, float beta_t, struct s_strvec *sy_n, int yi_n, struct s_strvec *sy_t, int yi_t, struct s_strvec *sz_n, int zi_n, struct s_strvec *sz_t, int zi_t)
-	{
-
-	if(ai!=0)
-		{
-		printf("\nsgemv_nt_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *x_n = sx_n->pa + xi_n;
-	float *x_t = sx_t->pa + xi_t;
-	float *y_n = sy_n->pa + yi_n;
-	float *y_t = sy_t->pa + yi_t;
-	float *z_n = sz_n->pa + zi_n;
-	float *z_t = sz_t->pa + zi_t;
-
-//	sgemv_nt_lib(m, n, alpha_n, alpha_t, pA, sda, x_n, x_t, beta_n, beta_t, y_n, y_t, z_n, z_t);
-
-//	if(m<=0 | n<=0)
-//		return;
-
-	int ii;
-
-	// copy and scale y_n int z_n
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		z_n[ii+0] = beta_n*y_n[ii+0];
-		z_n[ii+1] = beta_n*y_n[ii+1];
-		z_n[ii+2] = beta_n*y_n[ii+2];
-		z_n[ii+3] = beta_n*y_n[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		z_n[ii+0] = beta_n*y_n[ii+0];
-		}
-	
-	ii = 0;
-	for(; ii<n-3; ii+=4)
-		{
-		kernel_sgemv_nt_4_lib4(m, &alpha_n, &alpha_t, pA+ii*bs, sda, x_n+ii, x_t, &beta_t, y_t+ii, z_n, z_t+ii);
-		}
-	if(ii<n)
-		{
-		kernel_sgemv_nt_4_vs_lib4(m, &alpha_n, &alpha_t, pA+ii*bs, sda, x_n+ii, x_t, &beta_t, y_t+ii, z_n, z_t+ii, n-ii);
-		}
-	
-		return;
-	}
-
-
-
-void ssymv_l_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, float beta, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-	
-	const int bs = 4;
-
-	int ii, n1;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-
-	// copy and scale y int z
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		z[ii+0] = beta*y[ii+0];
-		z[ii+1] = beta*y[ii+1];
-		z[ii+2] = beta*y[ii+2];
-		z[ii+3] = beta*y[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		z[ii+0] = beta*y[ii+0];
-		}
-	
-	// clean up at the beginning
-	if(ai%bs!=0) // 1, 2, 3
-		{
-		n1 = 4-ai%bs;
-		kernel_ssymv_l_4_gen_lib4(m, &alpha, ai%bs, &pA[0], sda, &x[0], &z[0], n<n1 ? n : n1);
-		pA += n1 + n1*bs + (sda-1)*bs;
-		x += n1;
-		z += n1;
-		m -= n1;
-		n -= n1;
-		}
-	// main loop
-	ii = 0;
-	for(; ii<n-3; ii+=4)
-		{
-		kernel_ssymv_l_4_lib4(m-ii, &alpha, &pA[ii*bs+ii*sda], sda, &x[ii], &z[ii]);
-		}
-	// clean up at the end
-	if(ii<n)
-		{
-		kernel_ssymv_l_4_gen_lib4(m-ii, &alpha, 0, &pA[ii*bs+ii*sda], sda, &x[ii], &z[ii], n-ii);
-		}
-	
-	return;
-	}
-
-
-
-// m >= n
-void strmv_lnn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	if(m-n>0)
-		sgemv_n_libstr(m-n, n, 1.0, sA, ai+n, aj, sx, xi, 0.0, sz, zi+n, sz, zi+n);
-
-	float *pA2 = pA;
-	float *z2 = z;
-	int m2 = n;
-	int n2 = 0;
-	float *pA3, *x3;
-
-	float alpha = 1.0;
-	float beta = 1.0;
-
-	float zt[4];
-
-	int ii, jj, jj_end;
-
-	ii = 0;
-
-	if(ai%4!=0)
-		{
-		pA2 += sda*bs - ai%bs;
-		z2 += bs-ai%bs;
-		m2 -= bs-ai%bs;
-		n2 += bs-ai%bs;
-		}
-	
-	pA2 += m2/bs*bs*sda;
-	z2 += m2/bs*bs;
-	n2 += m2/bs*bs;
-
-	if(m2%bs!=0)
-		{
-		//
-		pA3 = pA2 + bs*n2;
-		x3 = x + n2;
-		zt[3] = pA3[3+bs*0]*x3[0] + pA3[3+bs*1]*x3[1] + pA3[3+bs*2]*x3[2] + pA3[3+bs*3]*x3[3];
-		zt[2] = pA3[2+bs*0]*x3[0] + pA3[2+bs*1]*x3[1] + pA3[2+bs*2]*x3[2];
-		zt[1] = pA3[1+bs*0]*x3[0] + pA3[1+bs*1]*x3[1];
-		zt[0] = pA3[0+bs*0]*x3[0];
-		kernel_sgemv_n_4_lib4(n2, &alpha, pA2, x, &beta, zt, zt);
-		for(jj=0; jj<m2%bs; jj++)
-			z2[jj] = zt[jj];
-		}
-	for(; ii<m2-3; ii+=4)
-		{
-		pA2 -= bs*sda;
-		z2 -= 4;
-		n2 -= 4;
-		pA3 = pA2 + bs*n2;
-		x3 = x + n2;
-		z2[3] = pA3[3+bs*0]*x3[0] + pA3[3+bs*1]*x3[1] + pA3[3+bs*2]*x3[2] + pA3[3+bs*3]*x3[3];
-		z2[2] = pA3[2+bs*0]*x3[0] + pA3[2+bs*1]*x3[1] + pA3[2+bs*2]*x3[2];
-		z2[1] = pA3[1+bs*0]*x3[0] + pA3[1+bs*1]*x3[1];
-		z2[0] = pA3[0+bs*0]*x3[0];
-		kernel_sgemv_n_4_lib4(n2, &alpha, pA2, x, &beta, z2, z2);
-		}
-	if(ai%4!=0)
-		{
-		if(ai%bs==1)
-			{
-			zt[2] = pA[2+bs*0]*x[0] + pA[2+bs*1]*x[1] + pA[2+bs*2]*x[2];
-			zt[1] = pA[1+bs*0]*x[0] + pA[1+bs*1]*x[1];
-			zt[0] = pA[0+bs*0]*x[0];
-			jj_end = 4-ai%bs<n ? 4-ai%bs : n;
-			for(jj=0; jj<jj_end; jj++)
-				z[jj] = zt[jj];
-			}
-		else if(ai%bs==2)
-			{
-			zt[1] = pA[1+bs*0]*x[0] + pA[1+bs*1]*x[1];
-			zt[0] = pA[0+bs*0]*x[0];
-			jj_end = 4-ai%bs<n ? 4-ai%bs : n;
-			for(jj=0; jj<jj_end; jj++)
-				z[jj] = zt[jj];
-			}
-		else // if (ai%bs==3)
-			{
-			z[0] = pA[0+bs*0]*x[0];
-			}
-		}
-
-	return;
-
-	}
-
-
-
-// m >= n
-void strmv_ltn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	float xt[4];
-	float zt[4];
-
-	float alpha = 1.0;
-	float beta = 1.0;
-
-	int ii, jj, ll, ll_max;
-
-	jj = 0;
-
-	if(ai%bs!=0)
-		{
-
-		if(ai%bs==1)
-			{
-			ll_max = m-jj<3 ? m-jj : 3;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<3; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2];
-			zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2];
-			zt[2] = pA[2+bs*2]*xt[2];
-			pA += bs*sda - 1;
-			x += 3;
-			kernel_sgemv_t_4_lib4(m-3-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<3 ? n-jj : 3;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*3;
-			z += 3;
-			jj += 3;
-			}
-		else if(ai%bs==2)
-			{
-			ll_max = m-jj<2 ? m-jj : 2;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<2; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1];
-			zt[1] = pA[1+bs*1]*xt[1];
-			pA += bs*sda - 2;
-			x += 2;
-			kernel_sgemv_t_4_lib4(m-2-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<2 ? n-jj : 2;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*2;
-			z += 2;
-			jj += 2;
-			}
-		else // if(ai%bs==3)
-			{
-			ll_max = m-jj<1 ? m-jj : 1;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<1; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0];
-			pA += bs*sda - 3;
-			x += 1;
-			kernel_sgemv_t_4_lib4(m-1-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<1 ? n-jj : 1;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*1;
-			z += 1;
-			jj += 1;
-			}
-
-		}
-	
-	for(; jj<n-3; jj+=4)
-		{
-		zt[0] = pA[0+bs*0]*x[0] + pA[1+bs*0]*x[1] + pA[2+bs*0]*x[2] + pA[3+bs*0]*x[3];
-		zt[1] = pA[1+bs*1]*x[1] + pA[2+bs*1]*x[2] + pA[3+bs*1]*x[3];
-		zt[2] = pA[2+bs*2]*x[2] + pA[3+bs*2]*x[3];
-		zt[3] = pA[3+bs*3]*x[3];
-		pA += bs*sda;
-		x += 4;
-		kernel_sgemv_t_4_lib4(m-4-jj, &alpha, pA, sda, x, &beta, zt, z);
-		pA += bs*4;
-		z += 4;
-		}
-	if(jj<n)
-		{
-		ll_max = m-jj<4 ? m-jj : 4;
-		for(ll=0; ll<ll_max; ll++)
-			xt[ll] = x[ll];
-		for(; ll<4; ll++)
-			xt[ll] = 0.0;
-		zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2] + pA[3+bs*0]*xt[3];
-		zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2] + pA[3+bs*1]*xt[3];
-		zt[2] = pA[2+bs*2]*xt[2] + pA[3+bs*2]*xt[3];
-		zt[3] = pA[3+bs*3]*xt[3];
-		pA += bs*sda;
-		x += 4;
-		kernel_sgemv_t_4_lib4(m-4-jj, &alpha, pA, sda, x, &beta, zt, zt);
-		for(ll=0; ll<n-jj; ll++)
-			z[ll] = zt[ll];
-//		pA += bs*4;
-//		z += 4;
-		}
-
-	return;
-
-	}
-
-
-
-void strmv_unn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	if(ai!=0)
-		{
-		printf("\ndtrmv_unn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	int i;
-	
-	i=0;
-	for(; i<m-3; i+=4)
-		{
-		kernel_strmv_un_4_lib4(m-i, pA, x, z);
-		pA += 4*sda+4*bs;
-		x  += 4;
-		z  += 4;
-		}
-	if(m>i)
-		{
-		if(m-i==1)
-			{
-			z[0] = pA[0+bs*0]*x[0];
-			}
-		else if(m-i==2)
-			{
-			z[0] = pA[0+bs*0]*x[0] + pA[0+bs*1]*x[1];
-			z[1] = pA[1+bs*1]*x[1];
-			}
-		else // if(m-i==3)
-			{
-			z[0] = pA[0+bs*0]*x[0] + pA[0+bs*1]*x[1] + pA[0+bs*2]*x[2];
-			z[1] = pA[1+bs*1]*x[1] + pA[1+bs*2]*x[2];
-			z[2] = pA[2+bs*2]*x[2];
-			}
-		}
-
-	return;
-
-	}
-
-
-
-void strmv_utn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	if(ai!=0)
-		{
-		printf("\nstrmv_utn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	int ii, idx;
-	
-	float *ptrA;
-	
-	ii=0;
-	idx = m/bs*bs;
-	if(m%bs!=0)
-		{
-		kernel_strmv_ut_4_vs_lib4(m, pA+idx*bs, sda, x, z+idx, m%bs);
-		ii += m%bs;
-		}
-	idx -= 4;
-	for(; ii<m; ii+=4)
-		{
-		kernel_strmv_ut_4_lib4(idx+4, pA+idx*bs, sda, x, z+idx);
-		idx -= 4;
-		}
-
-	return;
-
-	}
-
-
-
-void strsv_lnn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m==0)
-		return;
-
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_lnn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_lnn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_lnn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_lnn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_lnn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_lnn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** strsv_lnn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_lnn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_lnn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-
-	if(ai!=0)
-		{
-		printf("\nstrsv_lnn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *dA = sA->dA;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	int ii;
-
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(m, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<m; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(m, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<m; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-
-	int i;
-
-	if(x!=z)
-		{
-		for(i=0; i<m; i++)
-			z[i] = x[i];
-		}
-	
-	i = 0;
-	for( ; i<m-3; i+=4)
-		{
-		kernel_strsv_ln_inv_4_lib4(i, &pA[i*sda], &dA[i], z, &z[i], &z[i]);
-		}
-	if(i<m)
-		{
-		kernel_strsv_ln_inv_4_vs_lib4(i, &pA[i*sda], &dA[i], z, &z[i], &z[i], m-i, m-i);
-		i+=4;
-		}
-
-	return;
-
-	}
-
-
-
-void strsv_lnn_mn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m==0 | n==0)
-		return;
-
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_lnn_mn_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** strsv_lnn_mn_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_lnn_mn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_lnn_mn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_lnn_mn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_lnn_mn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_lnn_mn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** strsv_lnn_mn_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_lnn_mn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_lnn_mn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-
-	if(ai!=0)
-		{
-		printf("\nstrsv_lnn_mn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *dA = sA->dA;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	int ii;
-
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-
-	if(m<n)
-		m = n;
-
-	float alpha = -1.0;
-	float beta = 1.0;
-
-	int i;
-
-	if(x!=z)
-		{
-		for(i=0; i<m; i++)
-			z[i] = x[i];
-		}
-	
-	i = 0;
-	for( ; i<n-3; i+=4)
-		{
-		kernel_strsv_ln_inv_4_lib4(i, &pA[i*sda], &dA[i], z, &z[i], &z[i]);
-		}
-	if(i<n)
-		{
-		kernel_strsv_ln_inv_4_vs_lib4(i, &pA[i*sda], &dA[i], z, &z[i], &z[i], m-i, n-i);
-		i+=4;
-		}
-	for( ; i<m-3; i+=4)
-		{
-		kernel_sgemv_n_4_lib4(n, &alpha, &pA[i*sda], z, &beta, &z[i], &z[i]);
-		}
-	if(i<m)
-		{
-		kernel_sgemv_n_4_vs_lib4(n, &alpha, &pA[i*sda], z, &beta, &z[i], &z[i], m-i);
-		i+=4;
-		}
-
-	return;
-
-	}
-
-
-
-void strsv_ltn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m==0)
-		return;
-
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_ltn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_ltn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_ltn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_ltn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_ltn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_ltn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** strsv_ltn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_ltn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_ltn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-
-	if(ai!=0)
-		{
-		printf("\nstrsv_ltn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *dA = sA->dA;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	int ii;
-
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(m, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<m; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(m, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<m; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-
-	int i;
-	
-	if(x!=z)
-		for(i=0; i<m; i++)
-			z[i] = x[i];
-			
-	i=0;
-	if(m%4==1)
-		{
-		kernel_strsv_lt_inv_1_lib4(i+1, &pA[m/bs*bs*sda+(m-i-1)*bs], sda, &dA[m-i-1], &z[m-i-1], &z[m-i-1], &z[m-i-1]);
-		i++;
-		}
-	else if(m%4==2)
-		{
-		kernel_strsv_lt_inv_2_lib4(i+2, &pA[m/bs*bs*sda+(m-i-2)*bs], sda, &dA[m-i-2], &z[m-i-2], &z[m-i-2], &z[m-i-2]);
-		i+=2;
-		}
-	else if(m%4==3)
-		{
-		kernel_strsv_lt_inv_3_lib4(i+3, &pA[m/bs*bs*sda+(m-i-3)*bs], sda, &dA[m-i-3], &z[m-i-3], &z[m-i-3], &z[m-i-3]);
-		i+=3;
-		}
-	for(; i<m-3; i+=4)
-		{
-		kernel_strsv_lt_inv_4_lib4(i+4, &pA[(m-i-4)/bs*bs*sda+(m-i-4)*bs], sda, &dA[m-i-4], &z[m-i-4], &z[m-i-4], &z[m-i-4]);
-		}
-
-	return;
-
-	}
-
-
-
-void strsv_ltn_mn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m==0)
-		return;
-
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_ltn_mn_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** strsv_ltn_mn_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_ltn_mn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_ltn_mn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_ltn_mn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_ltn_mn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_ltn_mn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** strsv_ltn_mn_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_ltn_mn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_ltn_mn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-
-	if(ai!=0)
-		{
-		printf("\nstrsv_ltn_mn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *dA = sA->dA;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	int ii;
-
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-
-	if(n>m)
-		n = m;
-	
-	int i;
-	
-	if(x!=z)
-		for(i=0; i<m; i++)
-			z[i] = x[i];
-			
-	i=0;
-	if(n%4==1)
-		{
-		kernel_strsv_lt_inv_1_lib4(m-n+i+1, &pA[n/bs*bs*sda+(n-i-1)*bs], sda, &dA[n-i-1], &z[n-i-1], &z[n-i-1], &z[n-i-1]);
-		i++;
-		}
-	else if(n%4==2)
-		{
-		kernel_strsv_lt_inv_2_lib4(m-n+i+2, &pA[n/bs*bs*sda+(n-i-2)*bs], sda, &dA[n-i-2], &z[n-i-2], &z[n-i-2], &z[n-i-2]);
-		i+=2;
-		}
-	else if(n%4==3)
-		{
-		kernel_strsv_lt_inv_3_lib4(m-n+i+3, &pA[n/bs*bs*sda+(n-i-3)*bs], sda, &dA[n-i-3], &z[n-i-3], &z[n-i-3], &z[n-i-3]);
-		i+=3;
-		}
-	for(; i<n-3; i+=4)
-		{
-		kernel_strsv_lt_inv_4_lib4(m-n+i+4, &pA[(n-i-4)/bs*bs*sda+(n-i-4)*bs], sda, &dA[n-i-4], &z[n-i-4], &z[n-i-4], &z[n-i-4]);
-		}
-
-	return;
-
-	}
-
-
-
-void strsv_lnu_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_lnu_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_lnu_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_lnu_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_lnu_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_lnu_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_lnu_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** strsv_lnu_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_lnu_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_lnu_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** strsv_lnu_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-void strsv_ltu_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_ltu_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_ltu_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_ltu_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_ltu_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_ltu_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_ltu_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** strsv_ltu_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_ltu_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_ltu_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** strsv_ltu_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-void strsv_unn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_unn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_unn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_unn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_unn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_unn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_unn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** strsv_unn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_unn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_unn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** strsv_unn_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-void strsv_utn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_utn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_utn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_utn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_utn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_utn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_utn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** strsv_utn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_utn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_utn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** strsv_utn_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
diff --git a/third_party/blasfeo/blas/s_blas2_lib8.c b/third_party/blasfeo/blas/s_blas2_lib8.c
deleted file mode 100644
index 41a78c4..0000000
--- a/third_party/blasfeo/blas/s_blas2_lib8.c
+++ /dev/null
@@ -1,1008 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_kernel.h"
-#include "../include/blasfeo_s_aux.h"
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-void sgemv_n_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, float beta, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<0)
-		return;
-
-	const int bs = 8;
-
-	int i;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda;
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-
-	i = 0;
-	// clean up at the beginning
-	if(ai%bs!=0)
-		{
-		kernel_sgemv_n_8_gen_lib8(n, &alpha, pA, x, &beta, y-ai%bs, z-ai%bs, ai%bs, m+ai%bs);
-		pA += bs*sda;
-		y += 8 - ai%bs;
-		z += 8 - ai%bs;
-		m -= 8 - ai%bs;
-		}
-	// main loop
-	for( ; i<m-7; i+=8)
-		{
-		kernel_sgemv_n_8_lib8(n, &alpha, &pA[i*sda], x, &beta, &y[i], &z[i]);
-		}
-	if(i<m)
-		{
-		kernel_sgemv_n_8_vs_lib8(n, &alpha, &pA[i*sda], x, &beta, &y[i], &z[i], m-i);
-		}
-		
-	return;
-
-	}
-
-
-
-void sgemv_t_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, float beta, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-
-	if(n<=0)
-		return;
-	
-	const int bs = 8;
-
-	int i;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-
-	if(ai%bs==0)
-		{
-		i = 0;
-		for( ; i<n-7; i+=8)
-			{
-			kernel_sgemv_t_8_lib8(m, &alpha, &pA[i*bs], sda, x, &beta, &y[i], &z[i]);
-			}
-		if(i<n)
-			{
-			if(n-i<=4)
-				{
-				kernel_sgemv_t_4_vs_lib8(m, &alpha, &pA[i*bs], sda, x, &beta, &y[i], &z[i], n-i);
-				}
-			else
-				{
-				kernel_sgemv_t_8_vs_lib8(m, &alpha, &pA[i*bs], sda, x, &beta, &y[i], &z[i], n-i);
-				}
-			}
-		}
-	else
-		{
-		i = 0;
-		for( ; i<n-4; i+=8)
-			{
-			kernel_sgemv_t_8_gen_lib8(m, &alpha, ai%bs, &pA[i*bs], sda, x, &beta, &y[i], &z[i], n-i);
-			}
-		if(i<n)
-			{
-			kernel_sgemv_t_4_gen_lib8(m, &alpha, ai%bs, &pA[i*bs], sda, x, &beta, &y[i], &z[i], n-i);
-			}
-		}
-	
-	return;
-
-	}
-
-
-
-// m >= n
-void strmv_lnn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	if(m-n>0)
-		sgemv_n_libstr(m-n, n, 1.0, sA, ai+n, aj, sx, xi, 0.0, sz, zi+n, sz, zi+n);
-
-	float *pA2 = pA;
-	float *z2 = z;
-	int m2 = n;
-	int n2 = 0;
-	float *pA3, *x3;
-
-	float alpha = 1.0;
-	float beta = 1.0;
-
-	float zt[8];
-
-	int ii, jj, jj_end;
-
-	ii = 0;
-
-	if(ai%bs!=0)
-		{
-		pA2 += sda*bs - ai%bs;
-		z2 += bs-ai%bs;
-		m2 -= bs-ai%bs;
-		n2 += bs-ai%bs;
-		}
-	
-	pA2 += m2/bs*bs*sda;
-	z2 += m2/bs*bs;
-	n2 += m2/bs*bs;
-
-	if(m2%bs!=0)
-		{
-		//
-		pA3 = pA2 + bs*n2;
-		x3 = x + n2;
-		zt[7] = pA3[7+bs*0]*x3[0] + pA3[7+bs*1]*x3[1] + pA3[7+bs*2]*x3[2] + pA3[7+bs*3]*x3[3] + pA3[7+bs*4]*x3[4] + pA3[7+bs*5]*x3[5] + pA3[7+bs*6]*x3[6] + pA3[7+bs*7]*x3[7];
-		zt[6] = pA3[6+bs*0]*x3[0] + pA3[6+bs*1]*x3[1] + pA3[6+bs*2]*x3[2] + pA3[6+bs*3]*x3[3] + pA3[6+bs*4]*x3[4] + pA3[6+bs*5]*x3[5] + pA3[6+bs*6]*x3[6];
-		zt[5] = pA3[5+bs*0]*x3[0] + pA3[5+bs*1]*x3[1] + pA3[5+bs*2]*x3[2] + pA3[5+bs*3]*x3[3] + pA3[5+bs*4]*x3[4] + pA3[5+bs*5]*x3[5];
-		zt[4] = pA3[4+bs*0]*x3[0] + pA3[4+bs*1]*x3[1] + pA3[4+bs*2]*x3[2] + pA3[4+bs*3]*x3[3] + pA3[4+bs*4]*x3[4];
-		zt[3] = pA3[3+bs*0]*x3[0] + pA3[3+bs*1]*x3[1] + pA3[3+bs*2]*x3[2] + pA3[3+bs*3]*x3[3];
-		zt[2] = pA3[2+bs*0]*x3[0] + pA3[2+bs*1]*x3[1] + pA3[2+bs*2]*x3[2];
-		zt[1] = pA3[1+bs*0]*x3[0] + pA3[1+bs*1]*x3[1];
-		zt[0] = pA3[0+bs*0]*x3[0];
-		kernel_sgemv_n_8_lib8(n2, &alpha, pA2, x, &beta, zt, zt);
-		for(jj=0; jj<m2%bs; jj++)
-			z2[jj] = zt[jj];
-		}
-	for(; ii<m2-7; ii+=8)
-		{
-		pA2 -= bs*sda;
-		z2 -= 8;
-		n2 -= 8;
-		pA3 = pA2 + bs*n2;
-		x3 = x + n2;
-		z2[7] = pA3[7+bs*0]*x3[0] + pA3[7+bs*1]*x3[1] + pA3[7+bs*2]*x3[2] + pA3[7+bs*3]*x3[3] + pA3[7+bs*4]*x3[4] + pA3[7+bs*5]*x3[5] + pA3[7+bs*6]*x3[6] + pA3[7+bs*7]*x3[7];
-		z2[6] = pA3[6+bs*0]*x3[0] + pA3[6+bs*1]*x3[1] + pA3[6+bs*2]*x3[2] + pA3[6+bs*3]*x3[3] + pA3[6+bs*4]*x3[4] + pA3[6+bs*5]*x3[5] + pA3[6+bs*6]*x3[6];
-		z2[5] = pA3[5+bs*0]*x3[0] + pA3[5+bs*1]*x3[1] + pA3[5+bs*2]*x3[2] + pA3[5+bs*3]*x3[3] + pA3[5+bs*4]*x3[4] + pA3[5+bs*5]*x3[5];
-		z2[4] = pA3[4+bs*0]*x3[0] + pA3[4+bs*1]*x3[1] + pA3[4+bs*2]*x3[2] + pA3[4+bs*3]*x3[3] + pA3[4+bs*4]*x3[4];
-		z2[3] = pA3[3+bs*0]*x3[0] + pA3[3+bs*1]*x3[1] + pA3[3+bs*2]*x3[2] + pA3[3+bs*3]*x3[3];
-		z2[2] = pA3[2+bs*0]*x3[0] + pA3[2+bs*1]*x3[1] + pA3[2+bs*2]*x3[2];
-		z2[1] = pA3[1+bs*0]*x3[0] + pA3[1+bs*1]*x3[1];
-		z2[0] = pA3[0+bs*0]*x3[0];
-		kernel_sgemv_n_8_lib8(n2, &alpha, pA2, x, &beta, z2, z2);
-		}
-	if(ai%bs!=0)
-		{
-		if(ai%bs==1)
-			{
-			zt[6] = pA[6+bs*0]*x[0] + pA[6+bs*1]*x[1] + pA[6+bs*2]*x[2] + pA[6+bs*3]*x[3] + pA[6+bs*4]*x[4] + pA[6+bs*5]*x[5] + pA[6+bs*6]*x[6];
-			zt[5] = pA[5+bs*0]*x[0] + pA[5+bs*1]*x[1] + pA[5+bs*2]*x[2] + pA[5+bs*3]*x[3] + pA[5+bs*4]*x[4] + pA[5+bs*5]*x[5];
-			zt[4] = pA[4+bs*0]*x[0] + pA[4+bs*1]*x[1] + pA[4+bs*2]*x[2] + pA[4+bs*3]*x[3] + pA[4+bs*4]*x[4];
-			zt[3] = pA[3+bs*0]*x[0] + pA[3+bs*1]*x[1] + pA[3+bs*2]*x[2] + pA[3+bs*3]*x[3];
-			zt[2] = pA[2+bs*0]*x[0] + pA[2+bs*1]*x[1] + pA[2+bs*2]*x[2];
-			zt[1] = pA[1+bs*0]*x[0] + pA[1+bs*1]*x[1];
-			zt[0] = pA[0+bs*0]*x[0];
-			jj_end = 8-ai%bs<n ? 8-ai%bs : n;
-			for(jj=0; jj<jj_end; jj++)
-				z[jj] = zt[jj];
-			}
-		else if(ai%bs==2)
-			{
-			zt[5] = pA[5+bs*0]*x[0] + pA[5+bs*1]*x[1] + pA[5+bs*2]*x[2] + pA[5+bs*3]*x[3] + pA[5+bs*4]*x[4] + pA[5+bs*5]*x[5];
-			zt[4] = pA[4+bs*0]*x[0] + pA[4+bs*1]*x[1] + pA[4+bs*2]*x[2] + pA[4+bs*3]*x[3] + pA[4+bs*4]*x[4];
-			zt[3] = pA[3+bs*0]*x[0] + pA[3+bs*1]*x[1] + pA[3+bs*2]*x[2] + pA[3+bs*3]*x[3];
-			zt[2] = pA[2+bs*0]*x[0] + pA[2+bs*1]*x[1] + pA[2+bs*2]*x[2];
-			zt[1] = pA[1+bs*0]*x[0] + pA[1+bs*1]*x[1];
-			zt[0] = pA[0+bs*0]*x[0];
-			jj_end = 8-ai%bs<n ? 8-ai%bs : n;
-			for(jj=0; jj<jj_end; jj++)
-				z[jj] = zt[jj];
-			}
-		else if(ai%bs==3)
-			{
-			zt[4] = pA[4+bs*0]*x[0] + pA[4+bs*1]*x[1] + pA[4+bs*2]*x[2] + pA[4+bs*3]*x[3] + pA[4+bs*4]*x[4];
-			zt[3] = pA[3+bs*0]*x[0] + pA[3+bs*1]*x[1] + pA[3+bs*2]*x[2] + pA[3+bs*3]*x[3];
-			zt[2] = pA[2+bs*0]*x[0] + pA[2+bs*1]*x[1] + pA[2+bs*2]*x[2];
-			zt[1] = pA[1+bs*0]*x[0] + pA[1+bs*1]*x[1];
-			zt[0] = pA[0+bs*0]*x[0];
-			jj_end = 8-ai%bs<n ? 8-ai%bs : n;
-			for(jj=0; jj<jj_end; jj++)
-				z[jj] = zt[jj];
-			}
-		else if(ai%bs==4)
-			{
-			zt[3] = pA[3+bs*0]*x[0] + pA[3+bs*1]*x[1] + pA[3+bs*2]*x[2] + pA[3+bs*3]*x[3];
-			zt[2] = pA[2+bs*0]*x[0] + pA[2+bs*1]*x[1] + pA[2+bs*2]*x[2];
-			zt[1] = pA[1+bs*0]*x[0] + pA[1+bs*1]*x[1];
-			zt[0] = pA[0+bs*0]*x[0];
-			jj_end = 8-ai%bs<n ? 8-ai%bs : n;
-			for(jj=0; jj<jj_end; jj++)
-				z[jj] = zt[jj];
-			}
-		else if(ai%bs==5)
-			{
-			zt[2] = pA[2+bs*0]*x[0] + pA[2+bs*1]*x[1] + pA[2+bs*2]*x[2];
-			zt[1] = pA[1+bs*0]*x[0] + pA[1+bs*1]*x[1];
-			zt[0] = pA[0+bs*0]*x[0];
-			jj_end = 8-ai%bs<n ? 8-ai%bs : n;
-			for(jj=0; jj<jj_end; jj++)
-				z[jj] = zt[jj];
-			}
-		else if(ai%bs==6)
-			{
-			zt[1] = pA[1+bs*0]*x[0] + pA[1+bs*1]*x[1];
-			zt[0] = pA[0+bs*0]*x[0];
-			jj_end = 8-ai%bs<n ? 8-ai%bs : n;
-			for(jj=0; jj<jj_end; jj++)
-				z[jj] = zt[jj];
-			}
-		else // if (ai%bs==7)
-			{
-			z[0] = pA[0+bs*0]*x[0];
-			}
-		}
-
-	return;
-
-	}
-
-
-
-// m >= n
-void strmv_ltn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m<=0)
-		return;
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	float xt[8];
-	float zt[8];
-
-	float alpha = 1.0;
-	float beta = 1.0;
-
-	int ii, jj, ll, ll_max;
-
-	jj = 0;
-
-	if(ai%bs!=0)
-		{
-
-		if(ai%bs==1)
-			{
-			ll_max = m-jj<7 ? m-jj : 7;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<7; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2] + pA[3+bs*0]*xt[3] + pA[4+bs*0]*xt[4] + pA[5+bs*0]*xt[5] + pA[6+bs*0]*xt[6];
-			zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2] + pA[3+bs*1]*xt[3] + pA[4+bs*1]*xt[4] + pA[5+bs*1]*xt[5] + pA[6+bs*1]*xt[6];
-			zt[2] = pA[2+bs*2]*xt[2] + pA[3+bs*2]*xt[3] + pA[4+bs*2]*xt[4] + pA[5+bs*2]*xt[5] + pA[6+bs*2]*xt[6];
-			zt[3] = pA[3+bs*3]*xt[3] + pA[4+bs*3]*xt[4] + pA[5+bs*3]*xt[5] + pA[6+bs*3]*xt[6];
-			zt[4] = pA[4+bs*4]*xt[4] + pA[5+bs*4]*xt[5] + pA[6+bs*4]*xt[6];
-			zt[5] = pA[5+bs*5]*xt[5] + pA[6+bs*5]*xt[6];
-			zt[6] = pA[6+bs*6]*xt[6];
-			pA += bs*sda - 1;
-			x += 7;
-			kernel_sgemv_t_8_lib8(m-7-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<7 ? n-jj : 7;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*7;
-			z += 7;
-			jj += 7;
-			}
-		else if(ai%bs==2)
-			{
-			ll_max = m-jj<6 ? m-jj : 6;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<6; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2] + pA[3+bs*0]*xt[3] + pA[4+bs*0]*xt[4] + pA[5+bs*0]*xt[5];
-			zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2] + pA[3+bs*1]*xt[3] + pA[4+bs*1]*xt[4] + pA[5+bs*1]*xt[5];
-			zt[2] = pA[2+bs*2]*xt[2] + pA[3+bs*2]*xt[3] + pA[4+bs*2]*xt[4] + pA[5+bs*2]*xt[5];
-			zt[3] = pA[3+bs*3]*xt[3] + pA[4+bs*3]*xt[4] + pA[5+bs*3]*xt[5];
-			zt[4] = pA[4+bs*4]*xt[4] + pA[5+bs*4]*xt[5];
-			zt[5] = pA[5+bs*5]*xt[5];
-			pA += bs*sda - 2;
-			x += 6;
-			kernel_sgemv_t_8_lib8(m-6-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<6 ? n-jj : 6;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*6;
-			z += 6;
-			jj += 6;
-			}
-		else if(ai%bs==3)
-			{
-			ll_max = m-jj<5 ? m-jj : 5;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<5; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2] + pA[3+bs*0]*xt[3] + pA[4+bs*0]*xt[4];
-			zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2] + pA[3+bs*1]*xt[3] + pA[4+bs*1]*xt[4];
-			zt[2] = pA[2+bs*2]*xt[2] + pA[3+bs*2]*xt[3] + pA[4+bs*2]*xt[4];
-			zt[3] = pA[3+bs*3]*xt[3] + pA[4+bs*3]*xt[4];
-			zt[4] = pA[4+bs*4]*xt[4];
-			pA += bs*sda - 3;
-			x += 5;
-			kernel_sgemv_t_8_lib8(m-5-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<5 ? n-jj : 5;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*5;
-			z += 5;
-			jj += 5;
-			}
-		else if(ai%bs==4)
-			{
-			ll_max = m-jj<4 ? m-jj : 4;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<4; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2] + pA[3+bs*0]*xt[3];
-			zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2] + pA[3+bs*1]*xt[3];
-			zt[2] = pA[2+bs*2]*xt[2] + pA[3+bs*2]*xt[3];
-			zt[3] = pA[3+bs*3]*xt[3];
-			pA += bs*sda - 4;
-			x += 4;
-			kernel_sgemv_t_8_lib8(m-4-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<4 ? n-jj : 4;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*4;
-			z += 4;
-			jj += 4;
-			}
-		else if(ai%bs==5)
-			{
-			ll_max = m-jj<3 ? m-jj : 3;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<3; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2];
-			zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2];
-			zt[2] = pA[2+bs*2]*xt[2];
-			pA += bs*sda - 5;
-			x += 3;
-			kernel_sgemv_t_8_lib8(m-3-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<3 ? n-jj : 3;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*3;
-			z += 3;
-			jj += 3;
-			}
-		else if(ai%bs==6)
-			{
-			ll_max = m-jj<2 ? m-jj : 2;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<2; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1];
-			zt[1] = pA[1+bs*1]*xt[1];
-			pA += bs*sda - 6;
-			x += 2;
-			kernel_sgemv_t_8_lib8(m-2-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<2 ? n-jj : 2;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*2;
-			z += 2;
-			jj += 2;
-			}
-		else // if(ai%bs==7)
-			{
-			ll_max = m-jj<1 ? m-jj : 1;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<1; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0];
-			pA += bs*sda - 7;
-			x += 1;
-			kernel_sgemv_t_8_lib8(m-1-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			ll_max = n-jj<1 ? n-jj : 1;
-			for(ll=0; ll<ll_max; ll++)
-				z[ll] = zt[ll];
-			pA += bs*1;
-			z += 1;
-			jj += 1;
-			}
-
-		}
-	
-	for(; jj<n-7; jj+=8)
-		{
-		zt[0] = pA[0+bs*0]*x[0] + pA[1+bs*0]*x[1] + pA[2+bs*0]*x[2] + pA[3+bs*0]*x[3] + pA[4+bs*0]*x[4] + pA[5+bs*0]*x[5] + pA[6+bs*0]*x[6] + pA[7+bs*0]*x[7];
-		zt[1] = pA[1+bs*1]*x[1] + pA[2+bs*1]*x[2] + pA[3+bs*1]*x[3] + pA[4+bs*1]*x[4] + pA[5+bs*1]*x[5] + pA[6+bs*1]*x[6] + pA[7+bs*1]*x[7];
-		zt[2] = pA[2+bs*2]*x[2] + pA[3+bs*2]*x[3] + pA[4+bs*2]*x[4] + pA[5+bs*2]*x[5] + pA[6+bs*2]*x[6] + pA[7+bs*2]*x[7];
-		zt[3] = pA[3+bs*3]*x[3] + pA[4+bs*3]*x[4] + pA[5+bs*3]*x[5] + pA[6+bs*3]*x[6] + pA[7+bs*3]*x[7];
-		zt[4] = pA[4+bs*4]*x[4] + pA[5+bs*4]*x[5] + pA[6+bs*4]*x[6] + pA[7+bs*4]*x[7];
-		zt[5] = pA[5+bs*5]*x[5] + pA[6+bs*5]*x[6] + pA[7+bs*5]*x[7];
-		zt[6] = pA[6+bs*6]*x[6] + pA[7+bs*6]*x[7];
-		zt[7] = pA[7+bs*7]*x[7];
-		pA += bs*sda;
-		x += 8;
-		kernel_sgemv_t_8_lib8(m-8-jj, &alpha, pA, sda, x, &beta, zt, z);
-		pA += bs*8;
-		z += 8;
-		}
-	if(jj<n)
-		{
-		if(n-jj<=4)
-			{
-			ll_max = m-jj<4 ? m-jj : 4;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<4; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2] + pA[3+bs*0]*xt[3];
-			zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2] + pA[3+bs*1]*xt[3];
-			zt[2] = pA[2+bs*2]*xt[2] + pA[3+bs*2]*xt[3];
-			zt[3] = pA[3+bs*3]*xt[3];
-			pA += bs*sda;
-			x += 4;
-			kernel_sgemv_t_4_lib8(m-4-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			for(ll=0; ll<n-jj; ll++)
-				z[ll] = zt[ll];
-//			pA += bs*4;
-//			z += 4;
-			}
-		else
-			{
-			ll_max = m-jj<8 ? m-jj : 8;
-			for(ll=0; ll<ll_max; ll++)
-				xt[ll] = x[ll];
-			for(; ll<8; ll++)
-				xt[ll] = 0.0;
-			zt[0] = pA[0+bs*0]*xt[0] + pA[1+bs*0]*xt[1] + pA[2+bs*0]*xt[2] + pA[3+bs*0]*xt[3] + pA[4+bs*0]*xt[4] + pA[5+bs*0]*xt[5] + pA[6+bs*0]*xt[6] + pA[7+bs*0]*xt[7];
-			zt[1] = pA[1+bs*1]*xt[1] + pA[2+bs*1]*xt[2] + pA[3+bs*1]*xt[3] + pA[4+bs*1]*xt[4] + pA[5+bs*1]*xt[5] + pA[6+bs*1]*xt[6] + pA[7+bs*1]*xt[7];
-			zt[2] = pA[2+bs*2]*xt[2] + pA[3+bs*2]*xt[3] + pA[4+bs*2]*xt[4] + pA[5+bs*2]*xt[5] + pA[6+bs*2]*xt[6] + pA[7+bs*2]*xt[7];
-			zt[3] = pA[3+bs*3]*xt[3] + pA[4+bs*3]*xt[4] + pA[5+bs*3]*xt[5] + pA[6+bs*3]*xt[6] + pA[7+bs*3]*xt[7];
-			zt[4] = pA[4+bs*4]*xt[4] + pA[5+bs*4]*xt[5] + pA[6+bs*4]*xt[6] + pA[7+bs*4]*xt[7];
-			zt[5] = pA[5+bs*5]*xt[5] + pA[6+bs*5]*xt[6] + pA[7+bs*5]*xt[7];
-			zt[6] = pA[6+bs*6]*xt[6] + pA[7+bs*6]*xt[7];
-			zt[7] = pA[7+bs*7]*xt[7];
-			pA += bs*sda;
-			x += 8;
-			kernel_sgemv_t_8_lib8(m-8-jj, &alpha, pA, sda, x, &beta, zt, zt);
-			for(ll=0; ll<n-jj; ll++)
-				z[ll] = zt[ll];
-//			pA += bs*8;
-//			z += 8;
-			}
-		}
-
-	return;
-
-	}
-
-
-
-void strsv_lnn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m==0)
-		return;
-
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_lnn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_lnn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_lnn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_lnn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_lnn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_lnn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** strsv_lnn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_lnn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_lnn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-
-	if(ai!=0)
-		{
-		printf("\nstrsv_lnn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *dA = sA->dA;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	int ii;
-
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(m, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<m; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(m, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<m; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-
-	int i;
-
-	if(x!=z)
-		{
-		for(i=0; i<m; i++)
-			z[i] = x[i];
-		}
-	
-	i = 0;
-	for( ; i<m-7; i+=8)
-		{
-		kernel_strsv_ln_inv_8_lib8(i, &pA[i*sda], &dA[i], z, &z[i], &z[i]);
-		}
-	if(i<m)
-		{
-		kernel_strsv_ln_inv_8_vs_lib8(i, &pA[i*sda], &dA[i], z, &z[i], &z[i], m-i, m-i);
-		i+=8;
-		}
-
-	return;
-
-	}
-
-
-
-void strsv_lnn_mn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m==0 | n==0)
-		return;
-
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_lnn_mn_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** strsv_lnn_mn_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_lnn_mn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_lnn_mn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_lnn_mn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_lnn_mn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_lnn_mn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** strsv_lnn_mn_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_lnn_mn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_lnn_mn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-
-	if(ai!=0)
-		{
-		printf("\nstrsv_lnn_mn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *dA = sA->dA;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	int ii;
-
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-
-	if(m<n)
-		m = n;
-
-	float alpha = -1.0;
-	float beta = 1.0;
-
-	int i;
-
-	if(x!=z)
-		{
-		for(i=0; i<m; i++)
-			z[i] = x[i];
-		}
-	
-	i = 0;
-	for( ; i<n-7; i+=8)
-		{
-		kernel_strsv_ln_inv_8_lib8(i, &pA[i*sda], &dA[i], z, &z[i], &z[i]);
-		}
-	if(i<n)
-		{
-		kernel_strsv_ln_inv_8_vs_lib8(i, &pA[i*sda], &dA[i], z, &z[i], &z[i], m-i, n-i);
-		i+=8;
-		}
-	for( ; i<m-7; i+=8)
-		{
-		kernel_sgemv_n_8_lib8(n, &alpha, &pA[i*sda], z, &beta, &z[i], &z[i]);
-		}
-	if(i<m)
-		{
-		kernel_sgemv_n_8_vs_lib8(n, &alpha, &pA[i*sda], z, &beta, &z[i], &z[i], m-i);
-		i+=8;
-		}
-
-	return;
-
-	}
-
-
-
-void strsv_ltn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m==0)
-		return;
-
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_ltn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_ltn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_ltn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_ltn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_ltn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_ltn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** strsv_ltn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_ltn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_ltn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-
-	if(ai!=0)
-		{
-		printf("\nstrsv_ltn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *dA = sA->dA;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	int ii;
-
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(m, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<m; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(m, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<m; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-
-	int i, i1;
-	
-	if(x!=z)
-		for(i=0; i<m; i++)
-			z[i] = x[i];
-			
-	i=0;
-	i1 = m%8;
-	if(i1!=0)
-		{
-		kernel_strsv_lt_inv_8_vs_lib8(i+i1, &pA[m/bs*bs*sda+(m-i-i1)*bs], sda, &dA[m-i-i1], &z[m-i-i1], &z[m-i-i1], &z[m-i-i1], i1, i1);
-		i += i1;
-		}
-	for(; i<m-7; i+=8)
-		{
-		kernel_strsv_lt_inv_8_lib8(i+8, &pA[(m-i-8)/bs*bs*sda+(m-i-8)*bs], sda, &dA[m-i-8], &z[m-i-8], &z[m-i-8], &z[m-i-8]);
-		}
-
-	return;
-
-	}
-
-
-
-void strsv_ltn_mn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi)
-	{
-
-	if(m==0)
-		return;
-
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** strsv_ltn_mn_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** strsv_ltn_mn_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** strsv_ltn_mn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** strsv_ltn_mn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** strsv_ltn_mn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** strsv_ltn_mn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** strsv_ltn_mn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** strsv_ltn_mn_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** strsv_ltn_mn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** strsv_ltn_mn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-
-	if(ai!=0)
-		{
-		printf("\nstrsv_ltn_mn_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *dA = sA->dA;
-	float *x = sx->pa + xi;
-	float *z = sz->pa + zi;
-
-	int ii;
-
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-
-	if(n>m)
-		n = m;
-	
-	int i, i1;
-	
-	if(x!=z)
-		for(i=0; i<m; i++)
-			z[i] = x[i];
-			
-	i=0;
-	i1 = n%8;
-	if(i1!=0)
-		{
-		kernel_strsv_lt_inv_8_vs_lib8(m-n+i1, &pA[n/bs*bs*sda+(n-i1)*bs], sda, &dA[n-i1], &z[n-i1], &z[n-i1], &z[n-i1], m-n+i1, i1);
-		i += i1;
-		}
-	for(; i<n-7; i+=8)
-		{
-		kernel_strsv_lt_inv_8_lib8(m-n+i+8, &pA[(n-i-8)/bs*bs*sda+(n-i-8)*bs], sda, &dA[n-i-8], &z[n-i-8], &z[n-i-8], &z[n-i-8]);
-		}
-
-	return;
-
-	}
-
-
-
-void sgemv_nt_libstr(int m, int n, float alpha_n, float alpha_t, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx_n, int xi_n, struct s_strvec *sx_t, int xi_t, float beta_n, float beta_t, struct s_strvec *sy_n, int yi_n, struct s_strvec *sy_t, int yi_t, struct s_strvec *sz_n, int zi_n, struct s_strvec *sz_t, int zi_t)
-	{
-
-	if(ai!=0)
-		{
-		printf("\nsgemv_nt_libstr: feature not implemented yet: ai=%d\n", ai);
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs; // TODO ai
-	float *x_n = sx_n->pa + xi_n;
-	float *x_t = sx_t->pa + xi_t;
-	float *y_n = sy_n->pa + yi_n;
-	float *y_t = sy_t->pa + yi_t;
-	float *z_n = sz_n->pa + zi_n;
-	float *z_t = sz_t->pa + zi_t;
-
-//	if(m<=0 | n<=0)
-//		return;
-
-	int ii;
-
-	// copy and scale y_n int z_n
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		z_n[ii+0] = beta_n*y_n[ii+0];
-		z_n[ii+1] = beta_n*y_n[ii+1];
-		z_n[ii+2] = beta_n*y_n[ii+2];
-		z_n[ii+3] = beta_n*y_n[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		z_n[ii+0] = beta_n*y_n[ii+0];
-		}
-	
-	ii = 0;
-	for(; ii<n-3; ii+=4)
-		{
-		kernel_sgemv_nt_4_lib8(m, &alpha_n, &alpha_t, pA+ii*bs, sda, x_n+ii, x_t, &beta_t, y_t+ii, z_n, z_t+ii);
-		}
-	if(ii<n)
-		{
-		kernel_sgemv_nt_4_vs_lib8(m, &alpha_n, &alpha_t, pA+ii*bs, sda, x_n+ii, x_t, &beta_t, y_t+ii, z_n, z_t+ii, n-ii);
-		}
-	
-		return;
-	}
-
-
-
-void ssymv_l_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, float beta, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi)
-	{
-
-//	if(m<=0 | n<=0)
-//		return;
-	
-	const int bs = 8;
-
-	int ii, n1, n2;
-
-	int sda = sA->cn;
-	float *pA = sA->pA + aj*bs + ai/bs*bs*sda + ai%bs;
-	float *x = sx->pa + xi;
-	float *y = sy->pa + yi;
-	float *z = sz->pa + zi;
-
-	// copy and scale y int z
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		z[ii+0] = beta*y[ii+0];
-		z[ii+1] = beta*y[ii+1];
-		z[ii+2] = beta*y[ii+2];
-		z[ii+3] = beta*y[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		z[ii+0] = beta*y[ii+0];
-		}
-	
-	// clean up at the beginning
-	if(ai%bs!=0) // 1, 2, 3
-		{
-		n1 = 8-ai%bs;
-		n2 = n<n1 ? n : n1;
-		kernel_ssymv_l_4l_gen_lib8(m-0, &alpha, ai%bs, &pA[0+(0)*bs], sda, &x[0], &z[0], n2-0);
-		kernel_ssymv_l_4r_gen_lib8(m-4, &alpha, ai%bs, &pA[4+(4)*bs], sda, &x[4], &z[4], n2-4);
-		pA += n1 + n1*bs + (sda-1)*bs;
-		x += n1;
-		z += n1;
-		m -= n1;
-		n -= n1;
-		}
-	// main loop
-	ii = 0;
-	for(; ii<n-7; ii+=8)
-		{
-		kernel_ssymv_l_4l_lib8(m-ii-0, &alpha, &pA[0+(ii+0)*bs+ii*sda], sda, &x[ii+0], &z[ii+0]);
-		kernel_ssymv_l_4r_lib8(m-ii-4, &alpha, &pA[4+(ii+4)*bs+ii*sda], sda, &x[ii+4], &z[ii+4]);
-		}
-	// clean up at the end
-	if(ii<n)
-		{
-		kernel_ssymv_l_4l_gen_lib8(m-ii-0, &alpha, 0, &pA[0+(ii+0)*bs+ii*sda], sda, &x[ii+0], &z[ii+0], n-ii-0);
-		kernel_ssymv_l_4r_gen_lib8(m-ii-4, &alpha, 0, &pA[4+(ii+4)*bs+ii*sda], sda, &x[ii+4], &z[ii+4], n-ii-4);
-		}
-	
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
diff --git a/third_party/blasfeo/blas/s_blas3_diag_lib.c b/third_party/blasfeo/blas/s_blas3_diag_lib.c
deleted file mode 100644
index 23f8e0f..0000000
--- a/third_party/blasfeo/blas/s_blas3_diag_lib.c
+++ /dev/null
@@ -1,49 +0,0 @@
-
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_kernel.h"
-
-
-
-#define REAL float
-
-#define STRMAT s_strmat
-#define STRVEC s_strvec
-
-#define GEMM_L_DIAG_LIBSTR sgemm_l_diag_libstr
-#define GEMM_R_DIAG_LIBSTR sgemm_r_diag_libstr
-
-
-
-#include "x_blas3_diag_lib.c"
-
diff --git a/third_party/blasfeo/blas/s_blas3_diag_lib4.c b/third_party/blasfeo/blas/s_blas3_diag_lib4.c
deleted file mode 100644
index 0319212..0000000
--- a/third_party/blasfeo/blas/s_blas3_diag_lib4.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_kernel.h"
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// dgemm with A diagonal matrix (stored as strvec)
-void sgemm_l_diag_libstr(int m, int n, float alpha, struct s_strvec *sA, int ai, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-
-	if(bi!=0 | ci!=0 | di!=0)
-		{
-		printf("\nsgemm_l_diag_libstr: feature not implemented yet: bi=%d, ci=%d, di=%d\n", bi, ci, di);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *dA = sA->pa + ai;
-	float *pB = sB->pA + bj*bs;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-
-//	sgemm_diag_left_lib(m, n, alpha, dA, pB, sdb, beta, pC, sdc, pD, sdd);
-	int ii;
-
-	ii = 0;
-	if(beta==0.0)
-		{
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_sgemm_diag_left_4_a0_lib4(n, &alpha, &dA[ii], &pB[ii*sdb], &pD[ii*sdd]);
-			}
-		}
-	else
-		{
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_sgemm_diag_left_4_lib4(n, &alpha, &dA[ii], &pB[ii*sdb], &beta, &pC[ii*sdc], &pD[ii*sdd]);
-			}
-		}
-	if(m-ii>0)
-		{
-		if(m-ii==1)
-			kernel_sgemm_diag_left_1_lib4(n, &alpha, &dA[ii], &pB[ii*sdb], &beta, &pC[ii*sdc], &pD[ii*sdd]);
-		else if(m-ii==2)
-			kernel_sgemm_diag_left_2_lib4(n, &alpha, &dA[ii], &pB[ii*sdb], &beta, &pC[ii*sdc], &pD[ii*sdd]);
-		else // if(m-ii==3)
-			kernel_sgemm_diag_left_3_lib4(n, &alpha, &dA[ii], &pB[ii*sdb], &beta, &pC[ii*sdc], &pD[ii*sdd]);
-		}
-	
-	return;
-
-	}
-
-
-
-// dgemm with B diagonal matrix (stored as strvec)
-void sgemm_r_diag_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sB, int bi, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-
-	if(ai!=0 | ci!=0 | di!=0)
-		{
-		printf("\nsgemm_r_diag_libstr: feature not implemented yet: ai=%d, ci=%d, di=%d\n", ai, ci, di);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *dB = sB->pa + bi;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-
-	int ii;
-
-	ii = 0;
-	if(beta==0.0)
-		{
-		for( ; ii<n-3; ii+=4)
-			{
-			kernel_sgemm_diag_right_4_a0_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &pD[ii*bs], sdd);
-			}
-		}
-	else
-		{
-		for( ; ii<n-3; ii+=4)
-			{
-			kernel_sgemm_diag_right_4_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-			}
-		}
-	if(n-ii>0)
-		{
-		if(n-ii==1)
-			kernel_sgemm_diag_right_1_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-		else if(n-ii==2)
-			kernel_sgemm_diag_right_2_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-		else // if(n-ii==3)
-			kernel_sgemm_diag_right_3_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-		}
-		return;
-
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
-
diff --git a/third_party/blasfeo/blas/s_blas3_diag_lib8.c b/third_party/blasfeo/blas/s_blas3_diag_lib8.c
deleted file mode 100644
index 8469345..0000000
--- a/third_party/blasfeo/blas/s_blas3_diag_lib8.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_kernel.h"
-
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// dgemm with B diagonal matrix (stored as strvec)
-void sgemm_r_diag_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sB, int bi, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-
-	if(ai!=0 | ci!=0 | di!=0)
-		{
-		printf("\nsgemm_r_diag_libstr: feature not implemented yet: ai=%d, ci=%d, di=%d\n", ai, ci, di);
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *dB = sB->pa + bi;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-
-	int ii;
-
-	ii = 0;
-	if(beta==0.0)
-		{
-		for( ; ii<n-3; ii+=4)
-			{
-			kernel_sgemm_diag_right_4_a0_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &pD[ii*bs], sdd);
-			}
-		}
-	else
-		{
-		for( ; ii<n-3; ii+=4)
-			{
-			kernel_sgemm_diag_right_4_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-			}
-		}
-	if(n-ii>0)
-		{
-		if(n-ii==1)
-			kernel_sgemm_diag_right_1_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-		else if(n-ii==2)
-			kernel_sgemm_diag_right_2_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-		else // if(n-ii==3)
-			kernel_sgemm_diag_right_3_lib4(m, &alpha, &pA[ii*bs], sda, &dB[ii], &beta, &pC[ii*bs], sdc, &pD[ii*bs], sdd);
-		}
-		return;
-
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
-
diff --git a/third_party/blasfeo/blas/s_blas3_lib.c b/third_party/blasfeo/blas/s_blas3_lib.c
deleted file mode 100644
index dca98ff..0000000
--- a/third_party/blasfeo/blas/s_blas3_lib.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#if defined(LA_BLAS)
-#if defined(REF_BLAS_BLIS)
-#include "s_blas_64.h"
-#else
-#include "s_blas.h"
-#endif
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_aux.h"
-
-
-
-#define REAL float
-
-#define STRMAT s_strmat
-
-#define GEMM_NN_LIBSTR sgemm_nn_libstr
-#define GEMM_NT_LIBSTR sgemm_nt_libstr
-#define SYRK_LN_LIBSTR ssyrk_ln_libstr
-#define SYRK_LN_MN_LIBSTR ssyrk_ln_mn_libstr
-#define TRMM_RLNN_LIBSTR strmm_rlnn_libstr
-#define TRMM_RUTN_LIBSTR strmm_rutn_libstr
-#define TRSM_LLNU_LIBSTR strsm_llnu_libstr
-#define TRSM_LUNN_LIBSTR strsm_lunn_libstr
-#define TRSM_RLTN_LIBSTR strsm_rltn_libstr
-#define TRSM_RLTU_LIBSTR strsm_rltu_libstr
-#define TRSM_RUTN_LIBSTR strsm_rutn_libstr
-
-#define COPY scopy_
-#define GEMM sgemm_
-#define SYRK ssyrk_
-#define TRMM strmm_
-#define TRSM strsm_
-
-
-
-#include "x_blas3_lib.c"
-
diff --git a/third_party/blasfeo/blas/s_blas3_lib4.c b/third_party/blasfeo/blas/s_blas3_lib4.c
deleted file mode 100644
index c6be38f..0000000
--- a/third_party/blasfeo/blas/s_blas3_lib4.c
+++ /dev/null
@@ -1,1062 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_kernel.h"
-#include "../include/blasfeo_s_aux.h"
-
-
-
-/****************************
-* old interface
-****************************/
-
-void sgemm_nt_lib(int m, int n, int k, float alpha, float *pA, int sda, float *pB, int sdb, float beta, float *pC, int sdc, float *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int bs = 4;
-
-	int i, j, l;
-
-	i = 0;
-
-#if defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-	for(; i<m-15; i+=16)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_sgemm_nt_16x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*bs+i*sdc], sdc, &pD[j*bs+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+0)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+0)*sdc], &pD[j*bs+(i+0)*sdd], m-(i+0), n-j);
-			kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+4)*sdc], &pD[j*bs+(i+4)*sdd], m-(i+4), n-j);
-			kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+8)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+8)*sdc], &pD[j*bs+(i+8)*sdd], m-(i+8), n-j);
-			kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+12)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+12)*sdc], &pD[j*bs+(i+12)*sdd], m-(i+12), n-j);
-			}
-		}
-#endif
-#if defined(TARGET_ARMV7A_ARM_CORTEX_A15)  | defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-	for(; i<m-11; i+=12)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_sgemm_nt_12x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*bs+i*sdc], sdc, &pD[j*bs+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+0)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+0)*sdc], &pD[j*bs+(i+0)*sdd], m-(i+0), n-j);
-			kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+4)*sdc], &pD[j*bs+(i+4)*sdd], m-(i+4), n-j);
-			kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+8)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+8)*sdc], &pD[j*bs+(i+8)*sdd], m-(i+8), n-j);
-			}
-		}
-#endif
-#if defined(TARGET_ARMV8A_ARM_CORTEX_A57) | defined(TARGET_ARMV7A_ARM_CORTEX_A15)
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-#if defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-		for(; j<n-7; j+=8)
-			{
-			kernel_sgemm_nt_8x8_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], sdb, &beta, &pC[j*bs+i*sdc], sdc, &pD[j*bs+i*sdd], sdd);
-			}
-#endif
-		for(; j<n-3; j+=4)
-			{
-			kernel_sgemm_nt_8x4_lib4(k, &alpha, &pA[i*sda], sda, &pB[j*sdb], &beta, &pC[j*bs+i*sdc], sdc, &pD[j*bs+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+0)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+0)*sdc], &pD[j*bs+(i+0)*sdd], m-(i+0), n-j);
-			kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+4)*sdc], &pD[j*bs+(i+4)*sdd], m-(i+4), n-j);
-			}
-		}
-#endif
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_sgemm_nt_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd]);
-			}
-		if(j<n)
-			{
-			kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-	left_12:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+0)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+0)*sdc], &pD[j*bs+(i+0)*sdd], m-(i+0), n-j);
-		kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+4)*sdc], &pD[j*bs+(i+4)*sdd], m-(i+4), n-j);
-		kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+8)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+8)*sdc], &pD[j*bs+(i+8)*sdd], m-(i+8), n-j);
-		}
-	return;
-
-	left_8:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+0)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+0)*sdc], &pD[j*bs+(i+0)*sdd], m-(i+0), n-j);
-		kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[(i+4)*sda], &pB[j*sdb], &beta, &pC[j*bs+(i+4)*sdc], &pD[j*bs+(i+4)*sdd], m-(i+4), n-j);
-		}
-	return;
-
-	left_4:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-		}
-	return;
-
-	}
-
-
-
-void sgemm_nn_lib(int m, int n, int k, float alpha, float *pA, int sda, float *pB, int sdb, float beta, float *pC, int sdc, float *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int bs = 4;
-
-	int i, j, l;
-
-	i = 0;
-
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_sgemm_nn_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*bs], sdb, &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd]);
-			}
-		if(j<n)
-			{
-			kernel_sgemm_nn_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*bs], sdb, &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-	left_4:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_sgemm_nn_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*bs], sdb, &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-		}
-	return;
-
-	}
-
-
-
-void strmm_nt_ru_lib(int m, int n, float alpha, float *pA, int sda, float *pB, int sdb, float beta, float *pC, int sdc, float *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int bs = 4;
-	
-	int i, j;
-	
-	i = 0;
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_strmm_nt_ru_4x4_lib4(n-j, &alpha, &pA[j*bs+i*sda], &pB[j*bs+j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd]);
-			}
-		if(j<n) // TODO specialized edge routine
-			{
-			kernel_strmm_nt_ru_4x4_vs_lib4(n-j, &alpha, &pA[j*bs+i*sda], &pB[j*bs+j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-			}
-		}
-	if(i<m)
-		{
-		goto left_4;
-		}
-	
-	// common return
-	return;
-
-	left_4:
-	j = 0;
-//	for(; j<n-3; j+=4)
-	for(; j<n; j+=4)
-		{
-		kernel_strmm_nt_ru_4x4_vs_lib4(n-j, &alpha, &pA[j*bs+i*sda], &pB[j*bs+j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-		}
-//	if(j<n) // TODO specialized edge routine
-//		{
-//		kernel_strmm_nt_ru_4x4_vs_lib4(n-j, &pA[j*bs+i*sda], &pB[j*bs+j*sdb], alg, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-//		}
-	return;
-
-	}
-
-
-
-// D <= B * A^{-T} , with A lower triangular with unit diagonal
-void strsm_nt_rl_one_lib(int m, int n, float *pA, int sda, float *pB, int sdb, float *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int bs = 4;
-	
-	int i, j;
-	
-	i = 0;
-
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_strsm_nt_rl_one_4x4_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*bs+i*sdb], &pD[j*bs+i*sdd], &pA[j*bs+j*sda]);
-			}
-		if(j<n)
-			{
-			kernel_strsm_nt_rl_one_4x4_vs_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*bs+i*sdb], &pD[j*bs+i*sdd], &pA[j*bs+j*sda], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	left_4:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_strsm_nt_rl_one_4x4_vs_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*bs+i*sdb], &pD[j*bs+i*sdd], &pA[j*bs+j*sda], m-i, n-j);
-		}
-	return;
-
-	}
-
-
-
-// D <= B * A^{-T} , with A upper triangular employing explicit inverse of diagonal
-void strsm_nt_ru_inv_lib(int m, int n, float *pA, int sda, float *inv_diag_A, float *pB, int sdb, float *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int bs = 4;
-	
-	int i, j, idx;
-
-	int rn = n%4;
-
-	float *dummy;
-	
-	i = 0;
-
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		// clean at the end
-		if(rn>0)
-			{
-			idx = n-rn;
-			kernel_strsm_nt_ru_inv_4x4_vs_lib4(0, dummy, dummy, &pB[i*sdb+idx*bs], &pD[i*sdd+idx*bs], &pA[idx*sda+idx*bs], &inv_diag_A[idx], m-i, rn);
-			j += rn;
-			}
-		for(; j<n; j+=4)
-			{
-			idx = n-j-4;
-			kernel_strsm_nt_ru_inv_4x4_lib4(j, &pD[i*sdd+(idx+4)*bs], &pA[idx*sda+(idx+4)*bs], &pB[i*sdb+idx*bs], &pD[i*sdd+idx*bs], &pA[idx*sda+idx*bs], &inv_diag_A[idx]);
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	left_4:
-	j = 0;
-	// TODO
-	// clean at the end
-	if(rn>0)
-		{
-		idx = n-rn;
-		kernel_strsm_nt_ru_inv_4x4_vs_lib4(0, dummy, dummy, &pB[i*sdb+idx*bs], &pD[i*sdd+idx*bs], &pA[idx*sda+idx*bs], &inv_diag_A[idx], m-i, rn);
-		j += rn;
-		}
-	for(; j<n; j+=4)
-		{
-		idx = n-j-4;
-		kernel_strsm_nt_ru_inv_4x4_vs_lib4(j, &pD[i*sdd+(idx+4)*bs], &pA[idx*sda+(idx+4)*bs], &pB[i*sdb+idx*bs], &pD[i*sdd+idx*bs], &pA[idx*sda+idx*bs], &inv_diag_A[idx], m-i, 4);
-		}
-	return;
-
-	}
-
-
-
-// D <= A^{-1} * B , with A lower triangular with unit diagonal
-void strsm_nn_ll_one_lib(int m, int n, float *pA, int sda, float *pB, int sdb, float *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int bs = 4;
-	
-	int i, j;
-	
-	i = 0;
-
-	for( ; i<m-3; i+=4)
-		{
-		j = 0;
-		for( ; j<n-3; j+=4)
-			{
-			kernel_strsm_nn_ll_one_4x4_lib4(i, pA+i*sda, pD+j*bs, sdd, pB+i*sdb+j*bs, pD+i*sdd+j*bs, pA+i*sda+i*bs);
-			}
-		if(j<n)
-			{
-			kernel_strsm_nn_ll_one_4x4_vs_lib4(i, pA+i*sda, pD+j*bs, sdd, pB+i*sdb+j*bs, pD+i*sdd+j*bs, pA+i*sda+i*bs, m-i, n-j);
-			}
-		}
-	if(i<m)
-		{
-		goto left_4;
-		}
-
-	// common return
-	return;
-
-	left_4:
-	j = 0;
-	for( ; j<n; j+=4)
-		{
-		kernel_strsm_nn_ll_one_4x4_vs_lib4(i, pA+i*sda, pD+j*bs, sdd, pB+i*sdb+j*bs, pD+i*sdd+j*bs, pA+i*sda+i*bs, m-i, n-j);
-		}
-	return;
-
-	}
-
-
-
-// D <= A^{-1} * B , with A upper triangular employing explicit inverse of diagonal
-void strsm_nn_lu_inv_lib(int m, int n, float *pA, int sda, float *inv_diag_A, float *pB, int sdb, float *pD, int sdd)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int bs = 4;
-	
-	int i, j, idx;
-	float *dummy;
-	
-	i = 0;
-	int rm = m%4;
-	if(rm>0)
-		{
-		// TODO code expliticly the final case
-		idx = m-rm; // position of the part to do
-		j = 0;
-		for( ; j<n; j+=4)
-			{
-			kernel_strsm_nn_lu_inv_4x4_vs_lib4(0, dummy, dummy, 0, pB+idx*sdb+j*bs, pD+idx*sdd+j*bs, pA+idx*sda+idx*bs, inv_diag_A+idx, rm, n-j);
-			}
-		// TODO
-		i += rm;
-		}
-//	int em = m-rm;
-	for( ; i<m; i+=4)
-		{
-		idx = m-i; // position of already done part
-		j = 0;
-		for( ; j<n-3; j+=4)
-			{
-			kernel_strsm_nn_lu_inv_4x4_lib4(i, pA+(idx-4)*sda+idx*bs, pD+idx*sdd+j*bs, sdd, pB+(idx-4)*sdb+j*bs, pD+(idx-4)*sdd+j*bs, pA+(idx-4)*sda+(idx-4)*bs, inv_diag_A+(idx-4));
-			}
-		if(j<n)
-			{
-			kernel_strsm_nn_lu_inv_4x4_vs_lib4(i, pA+(idx-4)*sda+idx*bs, pD+idx*sdd+j*bs, sdd, pB+(idx-4)*sdb+j*bs, pD+(idx-4)*sdd+j*bs, pA+(idx-4)*sda+(idx-4)*bs, inv_diag_A+(idx-4), 4, n-j);
-			}
-		}
-
-	// common return
-	return;
-
-	}
-
-
-
-/****************************
-* new interface
-****************************/
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// dgemm nt
-void sgemm_nt_libstr(int m, int n, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-	
-	const int bs = 4;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-
-	if(ai==0 & bi==0 & ci==0 & di==0)
-		{
-		sgemm_nt_lib(m, n, k, alpha, pA, sda, pB, sdb, beta, pC, sdc, pD, sdd); 
-		return;
-		}
-	
-	pA += ai/bs*bs*sda;
-	pB += bi/bs*bs*sda;
-	int ci0 = ci-ai%bs;
-	int di0 = di-ai%bs;
-	int offsetC;
-	int offsetD;
-	if(ci0>=0)
-		{
-		pC += ci0/bs*bs*sdd;
-		offsetC = ci0%bs;
-		}
-	else
-		{
-		pC += -4*sdc;
-		offsetC = bs+ci0;
-		}
-	if(di0>=0)
-		{
-		pD += di0/bs*bs*sdd;
-		offsetD = di0%bs;
-		}
-	else
-		{
-		pD += -4*sdd;
-		offsetD = bs+di0;
-		}
-	
-	int i, j, l;
-
-	int idxB;
-
-	i = 0;
-	// clean up at the beginning
-	if(ai%bs!=0)
-		{
-		j = 0;
-		idxB = 0;
-		// clean up at the beginning
-		if(bi%bs!=0)
-			{
-			kernel_sgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[idxB*sdb], &beta, offsetC, &pC[j*bs+i*sdc]-bi%bs*bs, sdc, offsetD, &pD[j*bs+i*sdd]-bi%bs*bs, sdd, ai%bs, m-i, bi%bs, n-j);
-			j += bs-bi%bs;
-			idxB += 4;
-			}
-		// main loop
-		for(; j<n; j+=4)
-			{
-			kernel_sgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[idxB*sdb], &beta, offsetC, &pC[j*bs+i*sdc], sdc, offsetD, &pD[j*bs+i*sdd], sdd, ai%bs, m-i, 0, n-j);
-			idxB += 4;
-			}
-		m -= bs-ai%bs;
-		pA += bs*sda;
-		pC += bs*sdc;
-		pD += bs*sdd;
-		}
-	// main loop
-	for(; i<m; i+=4)
-		{
-		j = 0;
-		idxB = 0;
-		// clean up at the beginning
-		if(bi%bs!=0)
-			{
-			kernel_sgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[idxB*sdb], &beta, offsetC, &pC[j*bs+i*sdc]-bi%bs*bs, sdc, offsetD, &pD[j*bs+i*sdd]-bi%bs*bs, sdd, 0, m-i, bi%bs, n-j);
-			j += bs-bi%bs;
-			idxB += 4;
-			}
-		// main loop
-		for(; j<n; j+=4)
-			{
-			kernel_sgemm_nt_4x4_gen_lib4(k, &alpha, &pA[i*sda], &pB[idxB*sdb], &beta, offsetC, &pC[j*bs+i*sdc], sdc, offsetD, &pD[j*bs+i*sdd], sdd, 0, m-i, 0, n-j);
-			idxB += 4;
-			}
-		}
-
-	return;
-
-	}
-
-
-
-// dgemm nn
-void sgemm_nn_libstr(int m, int n, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-	if(m<=0 || n<=0)
-		return;
-	if(ai!=0 | bi!=0 | ci!=0 | di!=0)
-		{
-		printf("\nsgemm_nn_libstr: feature not implemented yet: ai=%d, bi=%d, ci=%d, di=%d\n", ai, bi, ci, di);
-		exit(1);
-		}
-	const int bs = 4;
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-	sgemm_nn_lib(m, n, k, alpha, pA, sda, pB, sdb, beta, pC, sdc, pD, sdd); 
-	return;
-	}
-	
-
-
-// dtrsm_nn_llu
-void strsm_llnu_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\nstrsm_llnu_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-	const int bs = 4;
-	// TODO alpha
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pD = sD->pA + dj*bs;
-	strsm_nn_ll_one_lib(m, n, pA, sda, pB, sdb, pD, sdd); 
-	return;
-	}
-
-
-
-// dtrsm_nn_lun
-void strsm_lunn_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\nstrsm_lunn_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-	const int bs = 4;
-	// TODO alpha
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dA = sA->dA;
-	int ii;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-	strsm_nn_lu_inv_lib(m, n, pA, sda, dA, pB, sdb, pD, sdd); 
-	return;
-	}
-
-
-
-// dtrsm_right_lower_transposed_notunit
-void strsm_rltn_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\nstrsm_rltn_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	// TODO alpha
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dA = sA->dA;
-
-	int i, j;
-	
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(i=0; i<n; i++)
-				dA[i] = 1.0 / dA[i];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(i=0; i<n; i++)
-			dA[i] = 1.0 / dA[i];
-		sA->use_dA = 0;
-		}
-
-	if(m<=0 || n<=0)
-		return;
-	
-	i = 0;
-
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<n-3; j+=4)
-			{
-			kernel_strsm_nt_rl_inv_4x4_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*bs+i*sdb], &pD[j*bs+i*sdd], &pA[j*bs+j*sda], &dA[j]);
-			}
-		if(j<n)
-			{
-			kernel_strsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*bs+i*sdb], &pD[j*bs+i*sdd], &pA[j*bs+j*sda], &dA[j], m-i, n-j);
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	left_4:
-	j = 0;
-	for(; j<n; j+=4)
-		{
-		kernel_strsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pA[j*sda], &pB[j*bs+i*sdb], &pD[j*bs+i*sdd], &pA[j*bs+j*sda], &dA[j], m-i, n-j);
-		}
-	return;
-
-	}
-
-
-
-// dtrsm_right_lower_transposed_unit
-void strsm_rltu_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\nstrsm_rltu_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-	const int bs = 4;
-	// TODO alpha
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pD = sD->pA + dj*bs;
-	strsm_nt_rl_one_lib(m, n, pA, sda, pB, sdb, pD, sdd); 
-	return;
-	}
-
-
-
-// dtrsm_right_upper_transposed_notunit
-void strsm_rutn_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\nstrsm_rutn_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-	const int bs = 4;
-	// TODO alpha
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dA = sA->dA;
-	int ii;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / dA[ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / dA[ii];
-		sA->use_dA = 0;
-		}
-	strsm_nt_ru_inv_lib(m, n, pA, sda, dA, pB, sdb, pD, sdd); 
-	return;
-	}
-
-
-
-// dtrmm_right_upper_transposed_notunit (B, i.e. the first matrix, is triangular !!!)
-void strmm_rutn_libstr(int m, int n, float alpha, struct s_strmat *sB, int bi, int bj, struct s_strmat *sA, int ai, int aj, struct s_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | di!=0)
-		{
-		printf("\nstrmm_rutn_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d\n", ai, bi, di);
-		exit(1);
-		}
-	const int bs = 4;
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pD = sD->pA + dj*bs;
-	strmm_nt_ru_lib(m, n, alpha, pA, sda, pB, sdb, 0.0, pD, sdd, pD, sdd); 
-	return;
-	}
-
-
-
-// dtrmm_right_lower_nottransposed_notunit (B, i.e. the first matrix, is triangular !!!)
-void strmm_rlnn_libstr(int m, int n, float alpha, struct s_strmat *sB, int bi, int bj, struct s_strmat *sA, int ai, int aj, struct s_strmat *sD, int di, int dj)
-	{
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pD = sD->pA + dj*bs;
-
-	pA += ai/bs*bs*sda;
-	pB += bi/bs*bs*sdb;
-	int offsetB = bi%bs;
-	int di0 = di-ai%bs;
-	int offsetD;
-	if(di0>=0)
-		{
-		pD += di0/bs*bs*sdd;
-		offsetD = di0%bs;
-		}
-	else
-		{
-		pD += -4*sdd;
-		offsetD = bs+di0;
-		}
-	
-	int ii, jj;
-
-	ii = 0;
-	if(ai%bs!=0)
-		{
-		jj = 0;
-		for(; jj<n; jj+=4)
-			{
-			kernel_strmm_nn_rl_4x4_gen_lib4(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, ai%bs, m-ii, 0, n-jj);
-			}
-		m -= bs-ai%bs;
-		pA += bs*sda;
-		pD += bs*sdd;
-		}
-	if(offsetD==0)
-		{
-		for(; ii<m-3; ii+=4)
-			{
-			jj = 0;
-			for(; jj<n-5; jj+=4)
-				{
-				kernel_strmm_nn_rl_4x4_lib4(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, &pD[ii*sdd+jj*bs]);
-				}
-			for(; jj<n; jj+=4)
-				{
-				kernel_strmm_nn_rl_4x4_gen_lib4(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, 0, &pD[ii*sdd+jj*bs], sdd, 0, 4, 0, n-jj);
-				}
-			}
-		if(ii<m)
-			{
-			goto left_4;
-			}
-		}
-	else
-		{
-		for(; ii<m; ii+=4)
-			{
-			jj = 0;
-			for(; jj<n; jj+=4)
-				{
-				kernel_strmm_nn_rl_4x4_gen_lib4(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-				}
-			}
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-	left_4:
-	jj = 0;
-	for(; jj<n; jj+=4)
-		{
-		kernel_strmm_nn_rl_4x4_gen_lib4(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-		}
-	return;
-
-	}
-
-
-
-void ssyrk_ln_libstr(int m, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0)
-		return;
-
-	if(ai!=0 | bi!=0 | ci!=0 | di!=0)
-		{
-		printf("\nsryrk_ln_libstr: feature not implemented yet: ai=%d, bi=%d, ci=%d, di=%d\n", ai, bi, ci, di);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-
-//	ssyrk_nt_l_lib(m, n, k, alpha, pA, sda, pB, sdb, beta, pC, sdc, pD, sdd);
-
-	int i, j, l;
-
-	i = 0;
-
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<i; j+=4)
-			{
-			kernel_sgemm_nt_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd]);
-			}
-		kernel_ssyrk_nt_l_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd]);
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-	left_4:
-	j = 0;
-	for(; j<i; j+=4)
-		{
-		kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, m-j);
-		}
-	kernel_ssyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, m-j);
-	return;
-
-	}
-
-
-
-void ssyrk_ln_mn_libstr(int m, int n, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	if(ai!=0 | bi!=0 | ci!=0 | di!=0)
-		{
-		printf("\nsryrk_ln_libstr: feature not implemented yet: ai=%d, bi=%d, ci=%d, di=%d\n", ai, bi, ci, di);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-
-//	ssyrk_nt_l_lib(m, n, k, alpha, pA, sda, pB, sdb, beta, pC, sdc, pD, sdd);
-
-	int i, j, l;
-
-	i = 0;
-
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<i && j<n-3; j+=4)
-			{
-			kernel_sgemm_nt_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd]);
-			}
-		if(j<n)
-			{
-			if(i<j) // dgemm
-				{
-				kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-				}
-			else // dsyrk
-				{
-				if(j<n-3)
-					{
-					kernel_ssyrk_nt_l_4x4_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd]);
-					}
-				else
-					{
-					kernel_ssyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-	left_4:
-	j = 0;
-	for(; j<i && j<n; j+=4)
-		{
-		kernel_sgemm_nt_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_ssyrk_nt_l_4x4_vs_lib4(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-		}
-	return;
-
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
-
diff --git a/third_party/blasfeo/blas/s_blas3_lib8.c b/third_party/blasfeo/blas/s_blas3_lib8.c
deleted file mode 100644
index f0f5144..0000000
--- a/third_party/blasfeo/blas/s_blas3_lib8.c
+++ /dev/null
@@ -1,1325 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#if defined(DIM_CHECK)
-#include <stdio.h>
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_kernel.h"
-#include "../include/blasfeo_s_aux.h"
-
-
-
-void sgemm_nt_libstr(int m, int n, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m==0 | n==0)
-		return;
-	
-#if defined(DIM_CHECK)
-	// TODO check that sA=!sD or that if sA==sD then they do not overlap (same for sB)
-	// non-negative size
-	if(m<0) printf("\n****** sgemm_nt_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** sgemm_nt_libstr : n<0 : %d<0 *****\n", n);
-	if(k<0) printf("\n****** sgemm_nt_libstr : k<0 : %d<0 *****\n", k);
-	// non-negative offset
-	if(ai<0) printf("\n****** sgemm_nt_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** sgemm_nt_libstr : aj<0 : %d<0 *****\n", aj);
-	if(bi<0) printf("\n****** sgemm_nt_libstr : bi<0 : %d<0 *****\n", bi);
-	if(bj<0) printf("\n****** sgemm_nt_libstr : bj<0 : %d<0 *****\n", bj);
-	if(ci<0) printf("\n****** sgemm_nt_libstr : ci<0 : %d<0 *****\n", ci);
-	if(cj<0) printf("\n****** sgemm_nt_libstr : cj<0 : %d<0 *****\n", cj);
-	if(di<0) printf("\n****** sgemm_nt_libstr : di<0 : %d<0 *****\n", di);
-	if(dj<0) printf("\n****** sgemm_nt_libstr : dj<0 : %d<0 *****\n", dj);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** sgemm_nt_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+k > sA->n) printf("\n***** sgemm_nt_libstr : aj+k > col(A) : %d+%d > %d *****\n", aj, k, sA->n);
-	// B: n x k
-	if(bi+n > sB->m) printf("\n***** sgemm_nt_libstr : bi+n > row(B) : %d+%d > %d *****\n", bi, n, sB->m);
-	if(bj+k > sB->n) printf("\n***** sgemm_nt_libstr : bj+k > col(B) : %d+%d > %d *****\n", bj, k, sB->n);
-	// C: m x n
-	if(ci+m > sC->m) printf("\n***** sgemm_nt_libstr : ci+m > row(C) : %d+%d > %d *****\n", ci, n, sC->m);
-	if(cj+n > sC->n) printf("\n***** sgemm_nt_libstr : cj+n > col(C) : %d+%d > %d *****\n", cj, k, sC->n);
-	// D: m x n
-	if(di+m > sD->m) printf("\n***** sgemm_nt_libstr : di+m > row(D) : %d+%d > %d *****\n", di, n, sD->m);
-	if(dj+n > sD->n) printf("\n***** sgemm_nt_libstr : dj+n > col(D) : %d+%d > %d *****\n", dj, k, sD->n);
-#endif
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-
-	int i, j, l;
-
-	i = 0;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-23; i+=24)
-		{
-		j = 0;
-		for(; j<n-7; j+=8)
-			{
-			kernel_sgemm_nt_24x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-			kernel_sgemm_nt_24x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			if(j<n-3)
-				{
-				kernel_sgemm_nt_24x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-				if(j<n-4)
-					{
-					kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, 8, n-(j+4));
-					}
-				}
-			else
-				{
-				kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, 8, n-j);
-				}
-			}
-		}
-	if(m-i>0)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else if(m-i<=12)
-			{
-			goto left_12;
-			}
-		else if(m-i<=16)
-			{
-			goto left_16;
-			}
-//		else if(m-i<=20)
-//			{
-//			goto left_20;
-//			}
-		else
-			{
-			goto left_24;
-			}
-		}
-#else
-	for(; i<m-15; i+=16)
-		{
-		j = 0;
-		for(; j<n-7; j+=8)
-			{
-			kernel_sgemm_nt_16x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-			kernel_sgemm_nt_16x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			if(j<n-3)
-				{
-				kernel_sgemm_nt_16x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-				if(j<n-4)
-					{
-					kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, 8, n-(j+4));
-					}
-				}
-			else
-				{
-				kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, 8, n-j);
-				}
-			}
-		}
-	if(m-i>0)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else if(m-i<=12)
-			{
-			goto left_12;
-			}
-		else
-			{
-			goto left_16;
-			}
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-	left_24:
-	j = 0;
-	for(; j<n-4; j+=8)
-		{
-		kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, 4);
-		kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-		}
-	if(j<n)
-		{
-		kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-j);
-		}
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_20:
-	j = 0;
-	for(; j<n-4; j+=8)
-		{
-		kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, 4);
-		kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-		kernel_sgemm_nt_4x8_vs_lib8(k, &alpha, &pA[(i+16)*sda], &pB[0+j*sdb], &beta, &pC[(j+0)*bs+(i+16)*sdc], &pD[(j+0)*bs+(i+16)*sdd], m-(i+16), n-j);
-		}
-	if(j<n)
-		{
-		kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-j);
-		kernel_sgemm_nt_4x8_vs_lib8(k, &alpha, &pA[(i+16)*sda], &pB[0+j*sdb], &beta, &pC[(j+0)*bs+(i+16)*sdc], &pD[(j+0)*bs+(i+16)*sdd], m-(i+16), n-j);
-		}
-	return;
-#endif
-
-	left_16:
-	j = 0;
-	for(; j<n-4; j+=8)
-		{
-		kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, 4);
-		kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-		}
-	if(j<n)
-		{
-		kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-j);
-		}
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL) | defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-left_12:
-	j = 0;
-	for(; j<n-4; j+=8)
-		{
-		kernel_sgemm_nt_8x8_vs_lib8(k, &alpha, &pA[i*sda], &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], &pD[(j+0)*bs+i*sdd], m-i, n-j);
-		kernel_sgemm_nt_4x8_vs_lib8(k, &alpha, &pA[(i+8)*sda], &pB[0+j*sdb], &beta, &pC[(j+0)*bs+(i+8)*sdc], &pD[(j+0)*bs+(i+8)*sdd], m-(i+8), n-j);
-		}
-	if(j<n)
-		{
-		kernel_sgemm_nt_8x4_vs_lib8(k, &alpha, &pA[i*sda], &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], &pD[(j+0)*bs+i*sdd], m-i, n-j);
-		kernel_sgemm_nt_4x8_vs_lib8(k, &alpha, &pA[(i+8)*sda], &pB[0+j*sdb], &beta, &pC[(j+0)*bs+(i+8)*sdc], &pD[(j+0)*bs+(i+8)*sdd], m-(i+8), n-j);
-		}
-	return;
-#endif
-
-	left_8:
-	j = 0;
-	for(; j<n-4; j+=8)
-		{
-		kernel_sgemm_nt_8x8_vs_lib8(k, &alpha, &pA[i*sda], &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], &pD[(j+0)*bs+i*sdd], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_sgemm_nt_8x4_vs_lib8(k, &alpha, &pA[i*sda], &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], &pD[(j+0)*bs+i*sdd], m-i, n-j);
-		}
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL) | defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	left_4:
-	j = 0;
-	for(; j<n; j+=8)
-		{
-		kernel_sgemm_nt_4x8_vs_lib8(k, &alpha, &pA[i*sda], &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], &pD[(j+0)*bs+i*sdd], m-i, n-j);
-		}
-	return;
-#endif
-
-	}
-
-
-
-void sgemm_nn_libstr(int m, int n, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m==0 | n==0)
-		return;
-	
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** sgemm_nt_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** sgemm_nt_libstr : n<0 : %d<0 *****\n", n);
-	if(k<0) printf("\n****** sgemm_nt_libstr : k<0 : %d<0 *****\n", k);
-	// non-negative offset
-	if(ai<0) printf("\n****** sgemm_nt_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** sgemm_nt_libstr : aj<0 : %d<0 *****\n", aj);
-	if(bi<0) printf("\n****** sgemm_nt_libstr : bi<0 : %d<0 *****\n", bi);
-	if(bj<0) printf("\n****** sgemm_nt_libstr : bj<0 : %d<0 *****\n", bj);
-	if(ci<0) printf("\n****** sgemm_nt_libstr : ci<0 : %d<0 *****\n", ci);
-	if(cj<0) printf("\n****** sgemm_nt_libstr : cj<0 : %d<0 *****\n", cj);
-	if(di<0) printf("\n****** sgemm_nt_libstr : di<0 : %d<0 *****\n", di);
-	if(dj<0) printf("\n****** sgemm_nt_libstr : dj<0 : %d<0 *****\n", dj);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** sgemm_nn_libstr : ai+m > row(A) : %d+%d > %d *****\n\n", ai, m, sA->m);
-	if(aj+k > sA->n) printf("\n***** sgemm_nn_libstr : aj+k > col(A) : %d+%d > %d *****\n\n", aj, k, sA->n);
-	// B: k x n
-	if(bi+k > sB->m) printf("\n***** sgemm_nn_libstr : bi+k > row(B) : %d+%d > %d *****\n\n", bi, k, sB->m);
-	if(bj+n > sB->n) printf("\n***** sgemm_nn_libstr : bj+n > col(B) : %d+%d > %d *****\n\n", bj, n, sB->n);
-	// C: m x n
-	if(ci+m > sC->m) printf("\n***** sgemm_nn_libstr : ci+m > row(C) : %d+%d > %d *****\n\n", ci, n, sC->m);
-	if(cj+n > sC->n) printf("\n***** sgemm_nn_libstr : cj+n > col(C) : %d+%d > %d *****\n\n", cj, k, sC->n);
-	// D: m x n
-	if(di+m > sD->m) printf("\n***** sgemm_nn_libstr : di+m > row(D) : %d+%d > %d *****\n\n", di, n, sD->m);
-	if(dj+n > sD->n) printf("\n***** sgemm_nn_libstr : dj+n > col(D) : %d+%d > %d *****\n\n", dj, k, sD->n);
-#endif
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs + bi/bs*bs*sdb;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-
-	int offsetB = bi%bs;
-
-	int i, j, l;
-
-	i = 0;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-23; i+=24)
-		{
-		j = 0;
-		for(; j<n-7; j+=8)
-			{
-			kernel_sgemm_nn_24x4_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-			kernel_sgemm_nn_24x4_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+4)*bs], sdb, &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			if(j<n-3)
-				{
-				kernel_sgemm_nn_24x4_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-				if(j<n-4)
-					{
-					kernel_sgemm_nn_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+4)*bs], sdb, &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, 16, n-(j+4));
-					}
-				}
-			else
-				{
-				kernel_sgemm_nn_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, 16, n-j);
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else if(m-i<=16)
-			{
-			goto left_16;
-			}
-		else
-			{
-			goto left_24;
-			}
-		}
-#else
-#if 1
-	for(; i<m-15; i+=16)
-		{
-		j = 0;
-		for(; j<n-7; j+=8)
-			{
-			kernel_sgemm_nn_16x4_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-			kernel_sgemm_nn_16x4_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+4)*bs], sdb, &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			if(j<n-3)
-				{
-				kernel_sgemm_nn_16x4_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-				if(j<n-4)
-					{
-					kernel_sgemm_nn_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+4)*bs], sdb, &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, 16, n-(j+4));
-					}
-				}
-			else
-				{
-				kernel_sgemm_nn_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, 16, n-j);
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_16;
-			}
-		}
-#else
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<n-7; j+=8)
-			{
-#if 1
-			kernel_sgemm_nn_8x8_lib8(k, &alpha, &pA[i*sda], offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], &pD[(j+0)*bs+i*sdd]);
-#else
-			kernel_sgemm_nn_8x4_lib8(k, &alpha, &pA[i*sda], offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], &pD[(j+0)*bs+i*sdd]);
-			kernel_sgemm_nn_8x4_lib8(k, &alpha, &pA[i*sda], offsetB, &pB[(j+4)*bs], sdb, &beta, &pC[(j+4)*bs+i*sdc], &pD[(j+4)*bs+i*sdd]);
-#endif
-			}
-		if(j<n)
-			{
-			if(j<n-3)
-				{
-				kernel_sgemm_nn_8x4_lib8(k, &alpha, &pA[i*sda], offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], &pD[(j+0)*bs+i*sdd]);
-				if(j<n-4)
-					{
-					kernel_sgemm_nn_8x4_gen_lib8(k, &alpha, &pA[i*sda], offsetB, &pB[(j+4)*bs], sdb, &beta, 0, &pC[(j+4)*bs+i*sdc], sdc, 0, &pD[(j+4)*bs+i*sdd], sdd, 0, 8, 0, n-(j+4));
-					}
-				}
-			else
-				{
-				kernel_sgemm_nn_8x4_gen_lib8(k, &alpha, &pA[i*sda], offsetB, &pB[(j+0)*bs], sdb, &beta, 0, &pC[(j+0)*bs+i*sdc], sdc, 0, &pD[(j+0)*bs+i*sdd], sdd, 0, 8, 0, n-j);
-				}
-			}
-		}
-	if(m>i)
-		{
-		goto left_8;
-		}
-#endif
-#endif
-
-	// common return if i==m
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_24:
-	j = 0;
-	for(; j<n-4; j+=8)
-		{
-		kernel_sgemm_nn_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-j);
-		kernel_sgemm_nn_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+4)*bs], sdb, &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-		}
-	if(j<n)
-		{
-		kernel_sgemm_nn_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-j);
-		}
-	return;
-#endif
-
-	left_16:
-	j = 0;
-	for(; j<n-4; j+=8)
-		{
-		kernel_sgemm_nn_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-j);
-		kernel_sgemm_nn_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+4)*bs], sdb, &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-		}
-	if(j<n)
-		{
-		kernel_sgemm_nn_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-j);
-		}
-	return;
-
-	left_8:
-	j = 0;
-	for(; j<n-4; j+=8)
-		{
-		kernel_sgemm_nn_8x8_vs_lib8(k, &alpha, &pA[i*sda], offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], &pD[(j+0)*bs+i*sdd], m-i, n-j);
-		}
-	if(j<n)
-		{
-		kernel_sgemm_nn_8x4_vs_lib8(k, &alpha, &pA[i*sda], offsetB, &pB[(j+0)*bs], sdb, &beta, &pC[(j+0)*bs+i*sdc], &pD[(j+0)*bs+i*sdd], m-i, n-j);
-		}
-	return;
-
-	}
-
-
-
-void ssyrk_ln_libstr(int m, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0)
-		return;
-
-	if(ci>0 | di>0)
-		{
-		printf("\nssyrk_ln_libstr: feature not implemented yet: ci>0, di>0\n");
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int i, j;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-
-	i = 0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-23; i+=24)
-		{
-		j = 0;
-		for(; j<i; j+=8)
-			{
-			kernel_sgemm_nt_24x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-			kernel_sgemm_nt_24x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd);
-			}
-
-		kernel_ssyrk_nt_l_24x4_lib8(k, &alpha, &pA[(j+0)*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd);
-		kernel_ssyrk_nt_l_20x4_lib8(k, &alpha, &pA[(j+0)*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd);
-		kernel_ssyrk_nt_l_16x4_lib8(k, &alpha, &pA[(j+8)*sda], sda, &pB[0+(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd);
-		kernel_ssyrk_nt_l_12x4_lib8(k, &alpha, &pA[(j+8)*sda], sda, &pB[4+(j+8)*sdb], &beta, &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd);
-		kernel_ssyrk_nt_l_8x8_lib8(k, &alpha, &pA[(j+16)*sda], &pB[0+(j+16)*sdb], &beta, &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd]);
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else if(m-i<=12)
-			{
-			goto left_12;
-			}
-		else if(m-i<=16)
-			{
-			goto left_16;
-			}
-		else
-			{
-			goto left_24;
-			}
-		}
-#else
-	for(; i<m-15; i+=16)
-		{
-		j = 0;
-		for(; j<i; j+=8)
-			{
-			kernel_sgemm_nt_16x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-			kernel_sgemm_nt_16x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd);
-			}
-		kernel_ssyrk_nt_l_16x4_lib8(k, &alpha, &pA[(j+0)*sda], sda, &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd);
-		kernel_ssyrk_nt_l_12x4_lib8(k, &alpha, &pA[(j+0)*sda], sda, &pB[4+(j+0)*sdb], &beta, &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd);
-		kernel_ssyrk_nt_l_8x8_lib8(k, &alpha, &pA[(j+8)*sda], &pB[0+(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd]);
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else if(m-i<=12)
-			{
-			goto left_12;
-			}
-		else
-			{
-			goto left_16;
-			}
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_24: // 17 <= m <= 23
-	j = 0;
-	for(; j<i & j<m-7; j+=8)
-		{
-		kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, m-(j+0));
-		kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, m-(j+4));
-		}
-	kernel_ssyrk_nt_l_24x4_vs_lib8(k, &alpha, &pA[(j+0)*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, m-(i+0), m-(j+0));
-	kernel_ssyrk_nt_l_20x4_vs_lib8(k, &alpha, &pA[(j+0)*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, m-(i+0), m-(j+4));
-	kernel_ssyrk_nt_l_16x4_vs_lib8(k, &alpha, &pA[(j+8)*sda], sda, &pB[0+(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, m-(i+8), m-(j+8));
-	kernel_ssyrk_nt_l_12x4_vs_lib8(k, &alpha, &pA[(j+8)*sda], sda, &pB[4+(j+8)*sdb], &beta, &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, m-(i+8), m-(j+12));
-	if(j<m-20) // 21 - 23
-		{
-		kernel_ssyrk_nt_l_8x8_vs_lib8(k, &alpha, &pA[(j+16)*sda], &pB[0+(j+16)*sdb], &beta, &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], m-(i+16), m-(j+16));
-		}
-	else // 17 18 19 20
-		{
-		kernel_ssyrk_nt_l_8x4_vs_lib8(k, &alpha, &pA[(j+16)*sda], &pB[0+(j+16)*sdb], &beta, &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], m-(i+16), m-(j+16));
-		}
-	return;
-#endif
-
-	left_16: // 13 <= m <= 16
-	j = 0;
-	for(; j<i; j+=8)
-		{
-		kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, m-(j+0));
-		kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, m-(j+4));
-		}
-	kernel_ssyrk_nt_l_16x4_vs_lib8(k, &alpha, &pA[(j+0)*sda], sda, &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, m-(i+0), m-(j+0));
-	kernel_ssyrk_nt_l_12x4_vs_lib8(k, &alpha, &pA[(j+0)*sda], sda, &pB[4+(j+0)*sdb], &beta, &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, m-(i+0), m-(j+4));
-	if(j<m-12) // 13 - 16
-		{
-		kernel_ssyrk_nt_l_8x8_vs_lib8(k, &alpha, &pA[(j+8)*sda], &pB[0+(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], m-(i+8), m-(j+8));
-		}
-	else // 9 - 12
-		{
-		kernel_ssyrk_nt_l_8x4_vs_lib8(k, &alpha, &pA[(j+8)*sda], &pB[0+(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], m-(i+8), m-(j+8));
-		}
-	return;
-
-	left_12: // 9 <= m <= 12
-	j = 0;
-	for(; j<i; j+=8)
-		{
-		kernel_sgemm_nt_8x8_vs_lib8(k, &alpha, &pA[(i+0)*sda], &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(i+0)*sdc], &pD[(j+0)*bs+(i+0)*sdd], m-(i+0), m-(j+0));
-		kernel_sgemm_nt_4x8_vs_lib8(k, &alpha, &pA[(i+8)*sda], &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(i+8)*sdc], &pD[(j+0)*bs+(i+8)*sdd], m-(i+0), m-(j+0));
-		}
-	kernel_ssyrk_nt_l_8x8_vs_lib8(k, &alpha, &pA[(j+0)*sda], &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], &pD[(j+0)*bs+(j+0)*sdd], m-(i+0), m-(j+0));
-	kernel_sgemm_nt_4x8_vs_lib8(k, &alpha, &pA[(j+8)*sda], &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+8)*sdc], &pD[(j+0)*bs+(j+8)*sdd], m-(i+8), m-(j+0));
-	if(j<m-8) // 9 - 12
-		{
-		kernel_ssyrk_nt_l_8x4_vs_lib8(k, &alpha, &pA[(j+8)*sda], &pB[0+(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], m-(i+8), m-(j+8));
-		}
-	return;
-
-	left_8: // 5 <= m <= 8
-	j = 0;
-	for(; j<i; j+=8)
-		{
-		kernel_sgemm_nt_8x8_vs_lib8(k, &alpha, &pA[(i+0)*sda], &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(i+0)*sdc], &pD[(j+0)*bs+(i+0)*sdd], m-(i+0), m-(j+0));
-		}
-	if(j<m-4) // 5 - 8
-		{
-		kernel_ssyrk_nt_l_8x8_vs_lib8(k, &alpha, &pA[(j+0)*sda], &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], &pD[(j+0)*bs+(j+0)*sdd], m-(i+0), m-(j+0));
-		}
-	else // 1 - 4
-		{
-		kernel_ssyrk_nt_l_8x4_vs_lib8(k, &alpha, &pA[(j+0)*sda], &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], &pD[(j+0)*bs+(j+0)*sdd], m-(i+0), m-(j+0));
-		}
-	return;
-
-	left_4: // 1 <= m <= 4
-	j = 0;
-	for(; j<i; j+=8)
-		{
-		kernel_sgemm_nt_4x8_vs_lib8(k, &alpha, &pA[(i+0)*sda], &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(i+0)*sdc], &pD[(j+0)*bs+(i+0)*sdd], m-(i+0), m-(j+0));
-		}
-	kernel_ssyrk_nt_l_8x4_vs_lib8(k, &alpha, &pA[(j+0)*sda], &pB[0+(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], &pD[(j+0)*bs+(j+0)*sdd], m-(i+0), m-(j+0));
-	return;
-
-	}
-
-
-
-void ssyrk_ln_mn_libstr(int m, int n, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0)
-		return;
-
-	if(ci>0 | di>0)
-		{
-		printf("\nssyrk_ln_mn_libstr: feature not implemented yet: ci>0, di>0\n");
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int i, j;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-
-	i = 0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-23; i+=24)
-		{
-		j = 0;
-		for(; j<i & j<n-7; j+=8)
-			{
-			kernel_sgemm_nt_24x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-			kernel_sgemm_nt_24x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			if(i<j) // dtrsm
-				{
-				kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-(j+0));
-				if(j<n-4) // 5 6 7
-					{
-					kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-					}
-				}
-			else // dpotrf
-				{
-				if(j<n-23)
-					{
-					kernel_ssyrk_nt_l_24x4_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd);
-					kernel_ssyrk_nt_l_20x4_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[4+(j+0)*sdb], &beta, &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd);
-					kernel_ssyrk_nt_l_16x4_lib8(k, &alpha, &pA[(i+8)*sda], sda, &pB[(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd);
-					kernel_ssyrk_nt_l_12x4_lib8(k, &alpha, &pA[(i+8)*sda], sda, &pB[4+(j+8)*sdb], &beta, &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd);
-					kernel_ssyrk_nt_l_8x8_lib8(k, &alpha, &pA[(i+16)*sda], &pB[(j+16)*sdb], &beta, &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd]);
-					}
-				else
-					{
-					if(j<n-4) // 5 - 23
-						{
-						kernel_ssyrk_nt_l_24x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, m-(i+0), n-(j+0));
-						kernel_ssyrk_nt_l_20x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[4+(j+0)*sdb], &beta, &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, m-(i+0), n-(j+4));
-						if(j==n-8)
-							return;
-						if(j<n-12) // 13 - 23
-							{
-							kernel_ssyrk_nt_l_16x4_vs_lib8(k, &alpha, &pA[(i+8)*sda], sda, &pB[(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, m-(i+8), n-(j+8));
-							kernel_ssyrk_nt_l_12x4_vs_lib8(k, &alpha, &pA[(i+8)*sda], sda, &pB[4+(j+8)*sdb], &beta, &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, m-(i+8), n-(j+12));
-							if(j==n-16)
-								return;
-							if(j<n-20) // 21 - 23
-								{
-								kernel_ssyrk_nt_l_8x8_vs_lib8(k, &alpha, &pA[(i+16)*sda], &pB[(j+16)*sdb], &beta, &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], m-(i+16), n-(j+16));
-								}
-							else // 17 18 19 20
-								{
-								kernel_ssyrk_nt_l_8x4_vs_lib8(k, &alpha, &pA[(i+16)*sda], &pB[(j+16)*sdb], &beta, &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], m-(i+16), n-(j+16));
-								}
-							}
-						else // 9 10 11 12
-							{
-							kernel_ssyrk_nt_l_16x4_vs_lib8(k, &alpha, &pA[(i+8)*sda], sda, &pB[(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, m-(i+8), n-(j+8));
-							}
-						}
-					else // 1 2 3 4
-						{
-						kernel_ssyrk_nt_l_24x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[j*sdb], &beta, &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, m-(i+0), n-j);
-						}
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else if(m-i<=16)
-			{
-			goto left_16;
-			}
-		else
-			{
-			goto left_24;
-			}
-		}
-#else
-	for(; i<m-15; i+=16)
-		{
-		j = 0;
-		for(; j<i & j<n-7; j+=8)
-			{
-			kernel_sgemm_nt_16x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd);
-			kernel_sgemm_nt_16x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd);
-			}
-		if(j<n)
-			{
-			if(i<j) // dtrsm
-				{
-				kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-(j+0));
-				if(j<n-4) // 5 6 7
-					{
-					kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-					}
-				}
-			else // dpotrf
-				{
-				if(j<n-15)
-					{
-					kernel_ssyrk_nt_l_16x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd);
-					kernel_ssyrk_nt_l_12x4_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd);
-					kernel_ssyrk_nt_l_8x8_lib8(k, &alpha, &pA[(i+8)*sda], &pB[(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd]);
-					}
-				else
-					{
-					if(j<n-4) // 5 - 15
-						{
-						kernel_ssyrk_nt_l_16x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, m-(i+0), n-(j+0));
-						kernel_ssyrk_nt_l_12x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[4+(j+0)*sdb], &beta, &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, m-(i+0), n-(j+4));
-						if(j==n-8) // 8
-							return;
-						if(j<n-12) // 13 - 15
-							{
-							kernel_ssyrk_nt_l_8x8_vs_lib8(k, &alpha, &pA[(i+8)*sda], &pB[(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], m-(i+8), n-(j+8));
-							}
-						else // 9 10 11 12
-							{
-							kernel_ssyrk_nt_l_8x4_vs_lib8(k, &alpha, &pA[(i+8)*sda], &pB[(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], m-(i+8), n-(j+8));
-							}
-						}
-					else // 1 2 3 4
-						{
-						kernel_ssyrk_nt_l_16x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[j*sdb], &beta, &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, m-(i+0), n-j);
-						}
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_16;
-			}
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_24:
-	j = 0;
-	for(; j<i & j<n-7; j+=8)
-		{
-		kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-(j+0));
-		kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-		}
-	if(j<n)
-		{
-		if(j<i) // dtrsm
-			{
-			kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-(j+0));
-			if(j<n-4) // 5 6 7
-				{
-				kernel_sgemm_nt_24x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-				}
-			}
-		else // dpotrf
-			{
-			if(j<n-4) // 5 - 23
-				{
-				kernel_ssyrk_nt_l_24x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[(j+0)*sdb], &beta, &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, m-(i+0), n-(j+0));
-				kernel_ssyrk_nt_l_20x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[4+(j+0)*sdb], &beta, &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, m-(i+0), n-(j+4));
-				if(j>=n-8)
-					return;
-				if(j<n-12) // 13 - 23
-					{
-					kernel_ssyrk_nt_l_16x4_vs_lib8(k, &alpha, &pA[(i+8)*sda], sda, &pB[(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, m-(i+8), n-(j+8));
-					kernel_ssyrk_nt_l_12x4_vs_lib8(k, &alpha, &pA[(i+8)*sda], sda, &pB[4+(j+8)*sdb], &beta, &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, m-(i+8), n-(j+12));
-					if(j>=n-16)
-						return;
-					if(j<n-20) // 21 - 23
-						{
-						kernel_ssyrk_nt_l_8x8_vs_lib8(k, &alpha, &pA[(i+16)*sda], &pB[(j+16)*sdb], &beta, &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], m-(i+16), n-(j+16));
-						}
-					else // 17 18 19 20
-						{
-						kernel_ssyrk_nt_l_8x4_vs_lib8(k, &alpha, &pA[(i+16)*sda], &pB[(j+16)*sdb], &beta, &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], m-(i+16), n-(j+16));
-						}
-					}
-				else // 9 10 11 12
-					{
-					kernel_ssyrk_nt_l_16x4_vs_lib8(k, &alpha, &pA[(i+8)*sda], sda, &pB[(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, m-(i+8), n-(j+8));
-					}
-				}
-			else // 1 2 3 4
-				{
-				kernel_ssyrk_nt_l_24x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[j*sdb], &beta, &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, m-(i+0), n-j);
-				}
-			}
-		}
-	return;
-#endif
-
-	left_16:
-	j = 0;
-	for(; j<i & j<n-7; j+=8)
-		{
-		kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-(j+0));
-		kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-		}
-	if(j<n)
-		{
-		if(j<i) // dtrsm
-			{
-			kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, m-i, n-(j+0));
-			if(j<n-4) // 5 6 7
-				{
-				kernel_sgemm_nt_16x4_vs_lib8(k, &alpha, &pA[i*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, m-i, n-(j+4));
-				}
-			}
-		else // dpotrf
-			{
-			if(j<n-4) // 5 - 15
-				{
-				kernel_ssyrk_nt_l_16x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[0+j*sdb], &beta, &pC[(j+0)*bs+j*sdc], sdc, &pD[(j+0)*bs+j*sdd], sdd, m-(i+0), n-(j+0));
-				kernel_ssyrk_nt_l_12x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[4+j*sdb], &beta, &pC[(j+4)*bs+j*sdc], sdc, &pD[(j+4)*bs+j*sdd], sdd, m-(i+0), n-(j+4));
-				if(j>=n-8)
-					return;
-				if(j<n-12) // 13 - 15
-					{
-					kernel_ssyrk_nt_l_8x8_vs_lib8(k, &alpha, &pA[(i+8)*sda], &pB[(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], m-(i+8), n-(j+8));
-					}
-				else // 9 - 12
-					{
-					kernel_ssyrk_nt_l_8x4_vs_lib8(k, &alpha, &pA[(i+8)*sda], &pB[(j+8)*sdb], &beta, &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], m-(i+8), n-(j+8));
-					}
-				}
-			else // 1 2 3 4
-				{
-				kernel_ssyrk_nt_l_16x4_vs_lib8(k, &alpha, &pA[(i+0)*sda], sda, &pB[j*sdb], &beta, &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, m-(i+0), n-j);
-				}
-			}
-		}
-	return;
-
-	left_8:
-	j = 0;
-	for(; j<i & j<n-7; j+=8)
-		{
-		kernel_sgemm_nt_8x8_vs_lib8(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-		}
-	if(j<n)
-		{
-		if(j<i) // dtrsm
-			{
-			if(j<n-4) // 5 6 7
-				{
-				kernel_sgemm_nt_8x8_vs_lib8(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-				}
-			else // 1 2 3 4
-				{
-				kernel_sgemm_nt_8x4_vs_lib8(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], m-i, n-j);
-				}
-			}
-		else // dpotrf
-			{
-			if(j<n-4) // 5 6 7
-				{
-				kernel_ssyrk_nt_l_8x8_vs_lib8(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], m-i, n-j);
-				}
-			else // 1 2 3 4
-				{
-				kernel_ssyrk_nt_l_8x4_vs_lib8(k, &alpha, &pA[i*sda], &pB[j*sdb], &beta, &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], m-i, n-j);
-				}
-			}
-		}
-	return;
-
-	}
-
-
-
-// dtrmm_right_lower_nottransposed_notunit (B, i.e. the first matrix, is triangular !!!)
-void strmm_rlnn_libstr(int m, int n, float alpha, struct s_strmat *sB, int bi, int bj, struct s_strmat *sA, int ai, int aj, struct s_strmat *sD, int di, int dj)
-	{
-
-	const int bs = 8;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pD = sD->pA + dj*bs;
-
-	pA += ai/bs*bs*sda;
-	pB += bi/bs*bs*sdb;
-	int offsetB = bi%bs;
-	int di0 = di-ai%bs;
-	int offsetD;
-	if(di0>=0)
-		{
-		pD += di0/bs*bs*sdd;
-		offsetD = di0%bs;
-		}
-	else
-		{
-		pD += -8*sdd;
-		offsetD = bs+di0;
-		}
-	
-	int ii, jj;
-
-	int offsetB4;
-
-	if(offsetB<4)
-		{
-		offsetB4 = offsetB+4;
-		ii = 0;
-		if(ai%bs!=0)
-			{
-			jj = 0;
-			for(; jj<n-4; jj+=8)
-				{
-				kernel_strmm_nn_rl_8x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, ai%bs, m-ii, 0, n-jj);
-				kernel_strmm_nn_rl_8x4_gen_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], offsetB4, &pB[jj*sdb+(jj+4)*bs], sdb, offsetD, &pD[ii*sdd+(jj+4)*bs], sdd, ai%bs, m-ii, 0, n-jj-4);
-				}
-			m -= bs-ai%bs;
-			pA += bs*sda;
-			pD += bs*sdd;
-			}
-		if(offsetD==0)
-			{
-#if defined(TARGET_X64_INTEL_HASWELL)
-			// XXX create left_24 once the _gen_ kernel exist !!!
-			for(; ii<m-23; ii+=24)
-				{
-				jj = 0;
-				for(; jj<n-7; jj+=8)
-					{
-					kernel_strmm_nn_rl_24x4_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, &pD[ii*sdd+jj*bs], sdd);
-					kernel_strmm_nn_rl_24x4_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], sda, offsetB4, &pB[jj*sdb+(jj+4)*bs], sdb, &pD[ii*sdd+(jj+4)*bs], sdd);
-					}
-				if(n-jj>0)
-					{
-					kernel_strmm_nn_rl_24x4_vs_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, &pD[ii*sdd+jj*bs], sdd, 24, n-jj);
-					if(n-jj>4)
-						{
-						kernel_strmm_nn_rl_24x4_vs_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], sda, offsetB4, &pB[jj*sdb+(jj+4)*bs], sdb, &pD[ii*sdd+(jj+4)*bs], sdd, 24, n-jj-4);
-						}
-					}
-				}
-#endif
-			for(; ii<m-15; ii+=16)
-				{
-				jj = 0;
-				for(; jj<n-7; jj+=8)
-					{
-					kernel_strmm_nn_rl_16x4_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, &pD[ii*sdd+jj*bs], sdd);
-					kernel_strmm_nn_rl_16x4_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], sda, offsetB4, &pB[jj*sdb+(jj+4)*bs], sdb, &pD[ii*sdd+(jj+4)*bs], sdd);
-					}
-				if(n-jj>0)
-					{
-					kernel_strmm_nn_rl_16x4_vs_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, &pD[ii*sdd+jj*bs], sdd, 16, n-jj);
-					if(n-jj>4)
-						{
-						kernel_strmm_nn_rl_16x4_vs_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], sda, offsetB4, &pB[jj*sdb+(jj+4)*bs], sdb, &pD[ii*sdd+(jj+4)*bs], sdd, 16, n-jj-4);
-						}
-					}
-				}
-			if(m-ii>0)
-				{
-				if(m-ii<=8)
-					goto left_8;
-				else
-					goto left_16;
-				}
-			}
-		else
-			{
-			for(; ii<m-8; ii+=16)
-				{
-				jj = 0;
-				for(; jj<n-4; jj+=8)
-					{
-					kernel_strmm_nn_rl_16x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-					kernel_strmm_nn_rl_16x4_gen_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], sda, offsetB4, &pB[jj*sdb+(jj+4)*bs], sdb, offsetD, &pD[ii*sdd+(jj+4)*bs], sdd, 0, m-ii, 0, n-jj-4);
-					}
-				if(n-jj>0)
-					{
-					kernel_strmm_nn_rl_16x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-					}
-				}
-			if(m-ii>0)
-				goto left_8;
-			}
-		}
-	else
-		{
-		offsetB4 = offsetB-4;
-		ii = 0;
-		if(ai%bs!=0)
-			{
-			jj = 0;
-			for(; jj<n-4; jj+=8)
-				{
-				kernel_strmm_nn_rl_8x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, ai%bs, m-ii, 0, n-jj);
-				kernel_strmm_nn_rl_8x4_gen_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], offsetB4, &pB[(jj+8)*sdb+(jj+4)*bs], sdb, offsetD, &pD[ii*sdd+(jj+4)*bs], sdd, ai%bs, m-ii, 0, n-jj-4);
-				}
-			m -= bs-ai%bs;
-			pA += bs*sda;
-			pD += bs*sdd;
-			}
-		if(offsetD==0)
-			{
-			for(; ii<m-15; ii+=16)
-				{
-				jj = 0;
-				for(; jj<n-7; jj+=8)
-					{
-					kernel_strmm_nn_rl_16x4_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, &pD[ii*sdd+jj*bs], sdd);
-					kernel_strmm_nn_rl_16x4_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], sda, offsetB4, &pB[(jj+8)*sdb+(jj+4)*bs], sdb, &pD[ii*sdd+(jj+4)*bs], sdd);
-					}
-				if(n-jj>0)
-					{
-					kernel_strmm_nn_rl_16x4_vs_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, &pD[ii*sdd+jj*bs], sdd, 8, n-jj);
-					if(n-jj>4)
-						{
-						kernel_strmm_nn_rl_16x4_vs_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], sda, offsetB4, &pB[(jj+8)*sdb+(jj+4)*bs], sdb, &pD[ii*sdd+(jj+4)*bs], sdd, 8, n-jj-4);
-						}
-					}
-				}
-			if(m-ii>0)
-				{
-				if(m-ii<=8)
-					goto left_8;
-				else
-					goto left_16;
-				}
-			}
-		else
-			{
-			for(; ii<m-8; ii+=16)
-				{
-				jj = 0;
-				for(; jj<n-4; jj+=8)
-					{
-					kernel_strmm_nn_rl_16x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-					kernel_strmm_nn_rl_16x4_gen_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], sda, offsetB4, &pB[(jj+8)*sdb+(jj+4)*bs], sdb, offsetD, &pD[ii*sdd+(jj+4)*bs], sdd, 0, m-ii, 0, n-jj-4);
-					}
-				if(n-jj>0)
-					{
-					kernel_strmm_nn_rl_16x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-					}
-				}
-			if(m-ii>0)
-				goto left_8;
-			}
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-	left_16:
-	if(offsetB<4)
-		{
-		jj = 0;
-		for(; jj<n-4; jj+=8)
-			{
-			kernel_strmm_nn_rl_16x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-			kernel_strmm_nn_rl_16x4_gen_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], sda, offsetB4, &pB[jj*sdb+(jj+4)*bs], sdb, offsetD, &pD[ii*sdd+(jj+4)*bs], sdd, 0, m-ii, 0, n-jj-4);
-			}
-		if(n-jj>0)
-			{
-			kernel_strmm_nn_rl_16x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-			}
-		}
-	else
-		{
-		jj = 0;
-		for(; jj<n-4; jj+=8)
-			{
-			kernel_strmm_nn_rl_16x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-			kernel_strmm_nn_rl_16x4_gen_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], sda, offsetB4, &pB[(jj+8)*sdb+(jj+4)*bs], sdb, offsetD, &pD[ii*sdd+(jj+4)*bs], sdd, 0, m-ii, 0, n-jj-4);
-			}
-		if(n-jj>0)
-			{
-			kernel_strmm_nn_rl_16x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], sda, offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-			}
-		}
-	return;
-
-	left_8:
-	if(offsetB<4)
-		{
-		jj = 0;
-		for(; jj<n-4; jj+=8)
-			{
-			kernel_strmm_nn_rl_8x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-			kernel_strmm_nn_rl_8x4_gen_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], offsetB4, &pB[jj*sdb+(jj+4)*bs], sdb, offsetD, &pD[ii*sdd+(jj+4)*bs], sdd, 0, m-ii, 0, n-jj-4);
-			}
-		if(n-jj>0)
-			{
-			kernel_strmm_nn_rl_8x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-			}
-		}
-	else
-		{
-		jj = 0;
-		for(; jj<n-4; jj+=8)
-			{
-			kernel_strmm_nn_rl_8x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-			kernel_strmm_nn_rl_8x4_gen_lib8(n-jj-4, &alpha, &pA[ii*sda+(jj+4)*bs], offsetB4, &pB[(jj+8)*sdb+(jj+4)*bs], sdb, offsetD, &pD[ii*sdd+(jj+4)*bs], sdd, 0, m-ii, 0, n-jj-4);
-			}
-		if(n-jj>0)
-			{
-			kernel_strmm_nn_rl_8x4_gen_lib8(n-jj, &alpha, &pA[ii*sda+jj*bs], offsetB, &pB[jj*sdb+jj*bs], sdb, offsetD, &pD[ii*sdd+jj*bs], sdd, 0, m-ii, 0, n-jj);
-			}
-		}
-	return;
-
-	}
-
-
-
-// dtrsm_right_lower_transposed_notunit
-void strsm_rltn_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(ai!=0 | bi!=0 | di!=0 | alpha!=1.0)
-		{
-		printf("\nstrsm_rltn_libstr: feature not implemented yet: ai=%d, bi=%d, di=%d, alpha=%f\n", ai, bi, di, alpha);
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	// TODO alpha
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dA = sA->dA;
-
-	int i, j;
-	
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-			for(i=0; i<n; i++)
-				dA[i] = 1.0 / dA[i];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		sdiaex_lib(n, 1.0, ai, pA, sda, dA);
-		for(i=0; i<n; i++)
-			dA[i] = 1.0 / dA[i];
-		sA->use_dA = 0;
-		}
-
-	if(m<=0 || n<=0)
-		return;
-	
-	i = 0;
-
-	for(; i<m-7; i+=8)
-		{
-		j = 0;
-		for(; j<n-7; j+=8)
-			{
-			kernel_strsm_nt_rl_inv_8x4_lib8(j+0, &pD[i*sdd], &pA[0+j*sda], &pB[(j+0)*bs+i*sdb], &pD[(j+0)*bs+i*sdd], &pA[0+(j+0)*bs+j*sda], &dA[j+0]);
-			kernel_strsm_nt_rl_inv_8x4_lib8(j+4, &pD[i*sdd], &pA[4+j*sda], &pB[(j+4)*bs+i*sdb], &pD[(j+4)*bs+i*sdd], &pA[4+(j+4)*bs+j*sda], &dA[j+0]);
-			}
-		if(n-j>0)
-			{
-			kernel_strsm_nt_rl_inv_8x4_vs_lib8(j+0, &pD[i*sdd], &pA[0+j*sda], &pB[(j+0)*bs+i*sdb], &pD[(j+0)*bs+i*sdd], &pA[0+(j+0)*bs+j*sda], &dA[j+0], m-i, n-j-0);
-			if(n-j>4)
-				{
-				kernel_strsm_nt_rl_inv_8x4_vs_lib8(j+4, &pD[i*sdd], &pA[4+j*sda], &pB[(j+4)*bs+i*sdb], &pD[(j+4)*bs+i*sdd], &pA[4+(j+4)*bs+j*sda], &dA[j+4], m-i, n-j-4);
-				}
-			}
-		}
-	if(m>i)
-		{
-		goto left_8;
-		}
-
-	// common return if i==m
-	return;
-
-	left_8:
-	j = 0;
-	for(; j<n-4; j+=8)
-		{
-		kernel_strsm_nt_rl_inv_8x4_vs_lib8(j+0, &pD[i*sdd], &pA[0+j*sda], &pB[(j+0)*bs+i*sdb], &pD[(j+0)*bs+i*sdd], &pA[0+(j+0)*bs+j*sda], &dA[j+0], m-i, n-j-0);
-		kernel_strsm_nt_rl_inv_8x4_vs_lib8(j+4, &pD[i*sdd], &pA[4+j*sda], &pB[(j+4)*bs+i*sdb], &pD[(j+4)*bs+i*sdd], &pA[4+(j+4)*bs+j*sda], &dA[j+4], m-i, n-j-4);
-		}
-	if(n-j>0)
-		{
-		kernel_strsm_nt_rl_inv_8x4_vs_lib8(j+0, &pD[i*sdd], &pA[0+j*sda], &pB[(j+0)*bs+i*sdb], &pD[(j+0)*bs+i*sdd], &pA[0+(j+0)*bs+j*sda], &dA[j+0], m-i, n-j-0);
-		}
-	return;
-
-	}
-
-
-
-
diff --git a/third_party/blasfeo/blas/s_blas_64.h b/third_party/blasfeo/blas/s_blas_64.h
deleted file mode 100644
index 1589867..0000000
--- a/third_party/blasfeo/blas/s_blas_64.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-// headers to reference BLAS and LAPACK routines employed in BLASFEO WR
-
-// level 1
-void scopy_(long long *m, float *x, long long *incx, float *y, long long *incy);
-void saxpy_(long long *m, float *alpha, float *x, long long *incx, float *y, long long *incy);
-void sscal_(long long *m, float *alpha, float *x, long long *incx);
-
-// level 2
-void sgemv_(char *ta, long long *m, long long *n, float *alpha, float *A, long long *lda, float *x, long long *incx, float *beta, float *y, long long *incy);
-void ssymv_(char *uplo, long long *m, float *alpha, float *A, long long *lda, float *x, long long *incx, float *beta, float *y, long long *incy);
-void strmv_(char *uplo, char *trans, char *diag, long long *n, float *A, long long *lda, float *x, long long *incx);
-void strsv_(char *uplo, char *trans, char *diag, long long *n, float *A, long long *lda, float *x, long long *incx);
-void sger_(long long *m, long long *n, float *alpha, float *x, long long *incx, float *y, long long *incy, float *A, long long *lda);
-
-// level 3
-void sgemm_(char *ta, char *tb, long long *m, long long *n, long long *k, float *alpha, float *A, long long *lda, float *B, long long *ldb, float *beta, float *C, long long *ldc);
-void ssyrk_(char *uplo, char *trans, long long *n, long long *k, float *alpha, float *A, long long *lda, float *beta, float *C, long long *ldc);
-void strmm_(char *side, char *uplo, char *transa, char *diag, long long *m, long long *n, float *alpha, float *A, long long *lda, float *B, long long *ldb);
-void strsm_(char *side, char *uplo, char *transa, char *diag, long long *m, long long *n, float *alpha, float *A, long long *lda, float *B, long long *ldb);
-
-// lapack
-long long spotrf_(char *uplo, long long *m, float *A, long long *lda, long long *info);
-long long sgetrf_(long long *m, long long *n, float *A, long long *lda, long long *ipiv, long long *info);
-void sgeqrf_(long long *m, long long *n, float *A, long long *lda, float *tau, float *work, long long *lwork, long long *info);
-void sgeqr2_(long long *m, long long *n, float *A, long long *lda, float *tau, float *work, long long *info);
-
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/blas/s_lapack_lib.c b/third_party/blasfeo/blas/s_lapack_lib.c
deleted file mode 100644
index c7cb56b..0000000
--- a/third_party/blasfeo/blas/s_lapack_lib.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#if defined(LA_BLAS)
-#if defined(REF_BLAS_BLIS)
-#include "s_blas_64.h"
-#else
-#include "s_blas.h"
-#endif
-#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_aux.h"
-
-
-
-#define REAL float
-
-#define STRMAT s_strmat
-#define STRVEC s_strvec
-
-#define GELQF_LIBSTR sgelqf_libstr
-#define GELQF_WORK_SIZE_LIBSTR sgelqf_work_size_libstr
-#define GEQRF_LIBSTR sgeqrf_libstr
-#define GEQRF_WORK_SIZE_LIBSTR sgeqrf_work_size_libstr
-#define GETF2_NOPIVOT sgetf2_nopivot
-#define GETRF_NOPIVOT_LIBSTR sgetrf_nopivot_libstr
-#define GETRF_LIBSTR sgetrf_libstr
-#define POTRF_L_LIBSTR spotrf_l_libstr
-#define POTRF_L_MN_LIBSTR spotrf_l_mn_libstr
-#define SYRK_POTRF_LN_LIBSTR ssyrk_spotrf_ln_libstr
-
-#define COPY scopy_
-#define GELQF sgelqf_
-#define GEMM sgemm_
-#define GER sger_
-#define GEQRF sgeqrf_
-#define GEQR2 sgeqr2_
-#define GETRF sgetrf_
-#define POTRF spotrf_
-#define SCAL sscal_
-#define SYRK ssyrk_
-#define TRSM strsm_
-
-
-#include "x_lapack_lib.c"
-
diff --git a/third_party/blasfeo/blas/s_lapack_lib4.c b/third_party/blasfeo/blas/s_lapack_lib4.c
deleted file mode 100644
index 7d02d36..0000000
--- a/third_party/blasfeo/blas/s_lapack_lib4.c
+++ /dev/null
@@ -1,664 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_aux.h"
-#include "../include/blasfeo_s_kernel.h"
-
-
-
-/****************************
-* old interface
-****************************/
-
-void ssyrk_spotrf_nt_l_lib(int m, int n, int k, float *pA, int sda, float *pB, int sdb, float *pC, int sdc, float *pD, int sdd, float *inv_diag_D)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	int alg = 1; // XXX
-
-	const int bs = 4;
-
-	int i, j, l;
-
-	i = 0;
-
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<i && j<n-3; j+=4)
-			{
-			kernel_sgemm_strsm_nt_rl_inv_4x4_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &inv_diag_D[j]);
-			}
-		if(j<n)
-			{
-			if(i<j) // dgemm
-				{
-				kernel_sgemm_strsm_nt_rl_inv_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &inv_diag_D[j], m-i, n-j);
-				}
-			else // dsyrk
-				{
-				if(j<n-3)
-					{
-					kernel_ssyrk_spotrf_nt_l_4x4_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &inv_diag_D[j]);
-					}
-				else
-					{
-					kernel_ssyrk_spotrf_nt_l_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &inv_diag_D[j], m-i, n-j);
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-	left_4:
-	j = 0;
-	for(; j<i && j<n-3; j+=4)
-		{
-		kernel_sgemm_strsm_nt_rl_inv_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &inv_diag_D[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-		if(j<i) // dgemm
-			{
-			kernel_sgemm_strsm_nt_rl_inv_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &inv_diag_D[j], m-i, n-j);
-			}
-		else // dsyrk
-			{
-			kernel_ssyrk_spotrf_nt_l_4x4_vs_lib4(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &inv_diag_D[j], m-i, n-j);
-			}
-		}
-	return;
-
-	}
-
-
-
-void sgetrf_nn_nopivot_lib(int m, int n, float *pC, int sdc, float *pD, int sdd, float *inv_diag_D)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-	
-	const int bs = 4;
-
-	int ii, jj, ie;
-
-	// main loop
-	ii = 0;
-	for( ; ii<m-3; ii+=4)
-		{
-		jj = 0;
-		// solve lower
-		ie = n<ii ? n : ii; // ie is multiple of 4
-		for( ; jj<ie-3; jj+=4)
-			{
-			kernel_strsm_nn_ru_inv_4x4_lib4(jj, &pD[ii*sdd], &pD[jj*bs], sdd, &pC[jj*bs+ii*sdc], &pD[jj*bs+ii*sdd], &pD[jj*bs+jj*sdd], &inv_diag_D[jj]);
-			}
-		if(jj<ie)
-			{
-			kernel_strsm_nn_ru_inv_4x4_vs_lib4(jj, &pD[ii*sdd], &pD[jj*bs], sdd, &pC[jj*bs+ii*sdc], &pD[jj*bs+ii*sdd], &pD[jj*bs+jj*sdd], &inv_diag_D[jj], m-ii, ie-jj);
-			jj+=4;
-			}
-		// factorize
-		if(jj<n-3)
-			{
-			kernel_sgetrf_nn_4x4_lib4(jj, &pD[ii*sdd], &pD[jj*bs], sdd, &pC[jj*bs+ii*sdc], &pD[jj*bs+ii*sdd], &inv_diag_D[jj]);
-			jj+=4;
-			}
-		else if(jj<n)
-			{
-			kernel_sgetrf_nn_4x4_vs_lib4(jj, &pD[ii*sdd], &pD[jj*bs], sdd, &pC[jj*bs+ii*sdc], &pD[jj*bs+ii*sdd], &inv_diag_D[jj], m-ii, n-jj);
-			jj+=4;
-			}
-		// solve upper 
-		for( ; jj<n-3; jj+=4)
-			{
-			kernel_strsm_nn_ll_one_4x4_lib4(ii, &pD[ii*sdd], &pD[jj*bs], sdd, &pC[jj*bs+ii*sdc], &pD[jj*bs+ii*sdd], &pD[ii*bs+ii*sdd]);
-			}
-		if(jj<n)
-			{
-			kernel_strsm_nn_ll_one_4x4_vs_lib4(ii, &pD[ii*sdd], &pD[jj*bs], sdd, &pC[jj*bs+ii*sdc], &pD[jj*bs+ii*sdd], &pD[ii*bs+ii*sdd], m-ii, n-jj);
-			}
-		}
-	if(m>ii)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	left_4:
-	jj = 0;
-	// solve lower
-	ie = n<ii ? n : ii; // ie is multiple of 4
-	for( ; jj<ie; jj+=4)
-		{
-		kernel_strsm_nn_ru_inv_4x4_vs_lib4(jj, &pD[ii*sdd], &pD[jj*bs], sdd, &pC[jj*bs+ii*sdc], &pD[jj*bs+ii*sdd], &pD[jj*bs+jj*sdd], &inv_diag_D[jj], m-ii, ie-jj);
-		}
-	// factorize
-	if(jj<n)
-		{
-		kernel_sgetrf_nn_4x4_vs_lib4(jj, &pD[ii*sdd], &pD[jj*bs], sdd, &pC[jj*bs+ii*sdc], &pD[jj*bs+ii*sdd], &inv_diag_D[jj], m-ii, n-jj);
-		jj+=4;
-		}
-	// solve upper 
-	for( ; jj<n; jj+=4)
-		{
-		kernel_strsm_nn_ll_one_4x4_vs_lib4(ii, &pD[ii*sdd], &pD[jj*bs], sdd, &pC[jj*bs+ii*sdc], &pD[jj*bs+ii*sdd], &pD[ii*bs+ii*sdd], m-ii, n-jj);
-		}
-	return;
-
-	}
-
-
-
-void sgetrf_nn_lib(int m, int n, float *pC, int sdc, float *pD, int sdd, float *inv_diag_D, int *ipiv)
-	{
-
-	if(m<=0)
-		return;
-	
-	const int bs = 4;
-
-	int ii, jj, i0, i1, j0, ll, p;
-
-	float d1 = 1.0;
-	float dm1 = -1.0;
-
-//	// needs to perform row-excanges on the yet-to-be-factorized matrix too
-//	if(pC!=pD)
-//		sgecp_lib(m, n, 1.0, 0, pC, sdc, 0, pD, sdd);
-
-	// minimum matrix size
-	p = n<m ? n : m; // XXX
-
-	// main loop
-	// 4 columns at a time
-	jj = 0;
-	for(; jj<p-3; jj+=4) // XXX
-		{
-		// pivot & factorize & solve lower
-		ii = jj;
-		i0 = ii;
-		for( ; ii<m-3; ii+=4)
-			{
-			kernel_sgemm_nn_4x4_lib4(jj, &dm1, &pD[ii*sdd], &pD[jj*bs], sdd, &d1, &pD[jj*bs+ii*sdd], &pD[jj*bs+ii*sdd]);
-			}
-		if(m-ii>0)
-			{
-			kernel_sgemm_nn_4x4_vs_lib4(jj, &dm1, &pD[ii*sdd], &pD[jj*bs], sdd, &d1, &pD[jj*bs+ii*sdd], &pD[jj*bs+ii*sdd], m-ii, 4);
-			}
-		kernel_sgetrf_pivot_4_lib4(m-i0, &pD[jj*bs+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-		ipiv[i0+0] += i0;
-		if(ipiv[i0+0]!=i0+0)
-			{
-			srowsw_lib(jj, pD+(i0+0)/bs*bs*sdd+(i0+0)%bs, pD+(ipiv[i0+0])/bs*bs*sdd+(ipiv[i0+0])%bs);
-			srowsw_lib(n-jj-4, pD+(i0+0)/bs*bs*sdd+(i0+0)%bs+(jj+4)*bs, pD+(ipiv[i0+0])/bs*bs*sdd+(ipiv[i0+0])%bs+(jj+4)*bs);
-			}
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			srowsw_lib(jj, pD+(i0+1)/bs*bs*sdd+(i0+1)%bs, pD+(ipiv[i0+1])/bs*bs*sdd+(ipiv[i0+1])%bs);
-			srowsw_lib(n-jj-4, pD+(i0+1)/bs*bs*sdd+(i0+1)%bs+(jj+4)*bs, pD+(ipiv[i0+1])/bs*bs*sdd+(ipiv[i0+1])%bs+(jj+4)*bs);
-			}
-		ipiv[i0+2] += i0;
-		if(ipiv[i0+2]!=i0+2)
-			{
-			srowsw_lib(jj, pD+(i0+2)/bs*bs*sdd+(i0+2)%bs, pD+(ipiv[i0+2])/bs*bs*sdd+(ipiv[i0+2])%bs);
-			srowsw_lib(n-jj-4, pD+(i0+2)/bs*bs*sdd+(i0+2)%bs+(jj+4)*bs, pD+(ipiv[i0+2])/bs*bs*sdd+(ipiv[i0+2])%bs+(jj+4)*bs);
-			}
-		ipiv[i0+3] += i0;
-		if(ipiv[i0+3]!=i0+3)
-			{
-			srowsw_lib(jj, pD+(i0+3)/bs*bs*sdd+(i0+3)%bs, pD+(ipiv[i0+3])/bs*bs*sdd+(ipiv[i0+3])%bs);
-			srowsw_lib(n-jj-4, pD+(i0+3)/bs*bs*sdd+(i0+3)%bs+(jj+4)*bs, pD+(ipiv[i0+3])/bs*bs*sdd+(ipiv[i0+3])%bs+(jj+4)*bs);
-			}
-
-		// solve upper
-		ll = jj+4;
-		for( ; ll<n-3; ll+=4)
-			{
-			kernel_strsm_nn_ll_one_4x4_lib4(i0, &pD[i0*sdd], &pD[ll*bs], sdd, &pD[ll*bs+i0*sdd], &pD[ll*bs+i0*sdd], &pD[i0*bs+i0*sdd]);
-			}
-		if(n-ll>0)
-			{
-			kernel_strsm_nn_ll_one_4x4_vs_lib4(i0, &pD[i0*sdd], &pD[ll*bs], sdd, &pD[ll*bs+i0*sdd], &pD[ll*bs+i0*sdd], &pD[i0*bs+i0*sdd], 4, n-ll);
-			}
-		}
-	if(m>=n)
-		{
-		if(n-jj>0)
-			{
-			goto left_n_4;
-			}
-		}
-	else
-		{
-		if(m-jj>0)
-			{
-			goto left_m_4;
-			}
-		}
-
-	// common return if jj==n
-	return;
-
-	// clean up
-
-	left_n_4:
-	// 1-4 columns at a time
-	// pivot & factorize & solve lower
-	ii = jj;
-	i0 = ii;
-	for( ; ii<m; ii+=4)
-		{
-		kernel_sgemm_nn_4x4_vs_lib4(jj, &dm1, &pD[ii*sdd], &pD[jj*bs], sdd, &d1, &pD[jj*bs+ii*sdd], &pD[jj*bs+ii*sdd], m-ii, n-jj);
-		}
-	kernel_sgetrf_pivot_4_vs_lib4(m-i0, n-jj, &pD[jj*bs+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-	ipiv[i0+0] += i0;
-	if(ipiv[i0+0]!=i0+0)
-		{
-		srowsw_lib(jj, pD+(i0+0)/bs*bs*sdd+(i0+0)%bs, pD+(ipiv[i0+0])/bs*bs*sdd+(ipiv[i0+0])%bs);
-		srowsw_lib(n-jj-4, pD+(i0+0)/bs*bs*sdd+(i0+0)%bs+(jj+4)*bs, pD+(ipiv[i0+0])/bs*bs*sdd+(ipiv[i0+0])%bs+(jj+4)*bs);
-		}
-	if(n-jj>1)
-		{
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			srowsw_lib(jj, pD+(i0+1)/bs*bs*sdd+(i0+1)%bs, pD+(ipiv[i0+1])/bs*bs*sdd+(ipiv[i0+1])%bs);
-			srowsw_lib(n-jj-4, pD+(i0+1)/bs*bs*sdd+(i0+1)%bs+(jj+4)*bs, pD+(ipiv[i0+1])/bs*bs*sdd+(ipiv[i0+1])%bs+(jj+4)*bs);
-			}
-		if(n-jj>2)
-			{
-			ipiv[i0+2] += i0;
-			if(ipiv[i0+2]!=i0+2)
-				{
-				srowsw_lib(jj, pD+(i0+2)/bs*bs*sdd+(i0+2)%bs, pD+(ipiv[i0+2])/bs*bs*sdd+(ipiv[i0+2])%bs);
-				srowsw_lib(n-jj-4, pD+(i0+2)/bs*bs*sdd+(i0+2)%bs+(jj+4)*bs, pD+(ipiv[i0+2])/bs*bs*sdd+(ipiv[i0+2])%bs+(jj+4)*bs);
-				}
-			if(n-jj>3)
-				{
-				ipiv[i0+3] += i0;
-				if(ipiv[i0+3]!=i0+3)
-					{
-					srowsw_lib(jj, pD+(i0+3)/bs*bs*sdd+(i0+3)%bs, pD+(ipiv[i0+3])/bs*bs*sdd+(ipiv[i0+3])%bs);
-					srowsw_lib(n-jj-4, pD+(i0+3)/bs*bs*sdd+(i0+3)%bs+(jj+4)*bs, pD+(ipiv[i0+3])/bs*bs*sdd+(ipiv[i0+3])%bs+(jj+4)*bs);
-					}
-				}
-			}
-		}
-
-	// solve upper
-	if(0) // there is no upper
-		{
-		ll = jj+4;
-		for( ; ll<n; ll+=4)
-			{
-			kernel_strsm_nn_ll_one_4x4_vs_lib4(i0, &pD[i0*sdd], &pD[ll*bs], sdd, &pD[ll*bs+i0*sdd], &pD[ll*bs+i0*sdd], &pD[i0*bs+i0*sdd], m-i0, n-ll);
-			}
-		}
-	return;
-
-
-	left_m_4:
-	// 1-4 rows at a time
-	// pivot & factorize & solve lower
-	ii = jj;
-	i0 = ii;
-	kernel_sgemm_nn_4x4_vs_lib4(jj, &dm1, &pD[ii*sdd], &pD[jj*bs], sdd, &d1, &pD[jj*bs+ii*sdd], &pD[jj*bs+ii*sdd], m-ii, n-jj);
-	kernel_sgetrf_pivot_4_vs_lib4(m-i0, n-jj, &pD[jj*bs+i0*sdd], sdd, &inv_diag_D[jj], &ipiv[i0]);
-	ipiv[i0+0] += i0;
-	if(ipiv[i0+0]!=i0+0)
-		{
-		srowsw_lib(jj, pD+(i0+0)/bs*bs*sdd+(i0+0)%bs, pD+(ipiv[i0+0])/bs*bs*sdd+(ipiv[i0+0])%bs);
-		srowsw_lib(n-jj-4, pD+(i0+0)/bs*bs*sdd+(i0+0)%bs+(jj+4)*bs, pD+(ipiv[i0+0])/bs*bs*sdd+(ipiv[i0+0])%bs+(jj+4)*bs);
-		}
-	if(m-i0>1)
-		{
-		ipiv[i0+1] += i0;
-		if(ipiv[i0+1]!=i0+1)
-			{
-			srowsw_lib(jj, pD+(i0+1)/bs*bs*sdd+(i0+1)%bs, pD+(ipiv[i0+1])/bs*bs*sdd+(ipiv[i0+1])%bs);
-			srowsw_lib(n-jj-4, pD+(i0+1)/bs*bs*sdd+(i0+1)%bs+(jj+4)*bs, pD+(ipiv[i0+1])/bs*bs*sdd+(ipiv[i0+1])%bs+(jj+4)*bs);
-			}
-		if(m-i0>2)
-			{
-			ipiv[i0+2] += i0;
-			if(ipiv[i0+2]!=i0+2)
-				{
-				srowsw_lib(jj, pD+(i0+2)/bs*bs*sdd+(i0+2)%bs, pD+(ipiv[i0+2])/bs*bs*sdd+(ipiv[i0+2])%bs);
-				srowsw_lib(n-jj-4, pD+(i0+2)/bs*bs*sdd+(i0+2)%bs+(jj+4)*bs, pD+(ipiv[i0+2])/bs*bs*sdd+(ipiv[i0+2])%bs+(jj+4)*bs);
-				}
-			if(m-i0>3)
-				{
-				ipiv[i0+3] += i0;
-				if(ipiv[i0+3]!=i0+3)
-					{
-					srowsw_lib(jj, pD+(i0+3)/bs*bs*sdd+(i0+3)%bs, pD+(ipiv[i0+3])/bs*bs*sdd+(ipiv[i0+3])%bs);
-					srowsw_lib(n-jj-4, pD+(i0+3)/bs*bs*sdd+(i0+3)%bs+(jj+4)*bs, pD+(ipiv[i0+3])/bs*bs*sdd+(ipiv[i0+3])%bs+(jj+4)*bs);
-					}
-				}
-			}
-		}
-
-	// solve upper
-	ll = jj+4;
-	for( ; ll<n; ll+=4)
-		{
-		kernel_strsm_nn_ll_one_4x4_vs_lib4(i0, &pD[i0*sdd], &pD[ll*bs], sdd, &pD[ll*bs+i0*sdd], &pD[ll*bs+i0*sdd], &pD[i0*bs+i0*sdd], m-i0, n-ll);
-		}
-	return;
-
-	}
-
-
-
-/****************************
-* new interface
-****************************/
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-
-
-// dpotrf
-void spotrf_l_libstr(int m, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0)
-		return;
-
-	if(ci!=0 | di!=0)
-		{
-		printf("\nspotrf_l_libstr: feature not implemented yet: ci=%d, di=%d\n", ci, di);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dD = sD->dA;
-	if(di==0 && dj==0) // XXX what to do if di and dj are not zero
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-
-	int i, j, l;
-
-	i = 0;
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<i; j+=4)
-			{
-			kernel_strsm_nt_rl_inv_4x4_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j]);
-			}
-		kernel_spotrf_nt_l_4x4_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j]);
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-	left_4: // 1 - 3
-	j = 0;
-	for(; j<i; j+=4)
-		{
-		kernel_strsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, m-j);
-		}
-	kernel_spotrf_nt_l_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, m-j);
-	return;
-
-	return;
-	}
-
-
-
-// dpotrf
-void spotrf_l_mn_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	if(ci!=0 | di!=0)
-		{
-		printf("\nspotrf_l_libstr: feature not implemented yet: ci=%d, di=%d\n", ci, di);
-		exit(1);
-		}
-
-	const int bs = 4;
-
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dD = sD->dA;
-	if(di==0 && dj==0) // XXX what to do if di and dj are not zero
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-
-	int i, j, l;
-
-	i = 0;
-	for(; i<m-3; i+=4)
-		{
-		j = 0;
-		for(; j<i && j<n-3; j+=4)
-			{
-			kernel_strsm_nt_rl_inv_4x4_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j]);
-			}
-		if(j<n)
-			{
-			if(i<j) // dtrsm
-				{
-				kernel_strsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // dpotrf
-				{
-				if(j<n-3)
-					{
-					kernel_spotrf_nt_l_4x4_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j]);
-					}
-				else
-					{
-					kernel_spotrf_nt_l_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		goto left_4;
-		}
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-	left_4:
-	j = 0;
-	for(; j<i && j<n-3; j+=4)
-		{
-		kernel_strsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-		if(j<i) // dtrsm
-			{
-			kernel_strsm_nt_rl_inv_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-			}
-		else // dpotrf
-			{
-			kernel_spotrf_nt_l_4x4_vs_lib4(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-			}
-		}
-	return;
-
-	return;
-	}
-
-
-
-// dsyrk dpotrf
-void ssyrk_spotrf_ln_libstr(int m, int n, int k, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-	if(ai!=0 | bi!=0 | ci!=0 | di!=0)
-		{
-		printf("\nssyrk_spotrf_ln_libstr: feature not implemented yet: ai=%d, bi=%d, ci=%d, di=%d\n", ai, bi, ci, di);
-		exit(1);
-		}
-	const int bs = 4;
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dD = sD->dA; // XXX what to do if di and dj are not zero
-	ssyrk_spotrf_nt_l_lib(m, n, k, pA, sda, pB, sdb, pC, sdc, pD, sdd, dD);
-	if(di==0 && dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-	return;
-	}
-
-
-
-// dgetrf without pivoting
-void sgetrf_nopivot_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-	if(ci!=0 | di!=0)
-		{
-		printf("\nsgetf_nopivot_libstr: feature not implemented yet: ci=%d, di=%d\n", ci, di);
-		exit(1);
-		}
-	const int bs = 4;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dD = sD->dA; // XXX what to do if di and dj are not zero
-	sgetrf_nn_nopivot_lib(m, n, pC, sdc, pD, sdd, dD);
-	if(di==0 && dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-	return;
-	}
-
-
-
-
-// dgetrf pivoting
-void sgetrf_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj, int *ipiv)
-	{
-	if(ci!=0 | di!=0)
-		{
-		printf("\nsgetrf_libstr: feature not implemented yet: ci=%d, di=%d\n", ci, di);
-		exit(1);
-		}
-	const int bs = 4;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dD = sD->dA; // XXX what to do if di and dj are not zero
-	// needs to perform row-excanges on the yet-to-be-factorized matrix too
-	if(pC!=pD)
-		sgecp_libstr(m, n, sC, ci, cj, sD, di, dj);
-	sgetrf_nn_lib(m, n, pC, sdc, pD, sdd, dD, ipiv);
-	if(di==0 && dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-	return;
-	}
-
-
-
-int sgeqrf_work_size_libstr(int m, int n)
-	{
-	printf("\nsgeqrf_work_size_libstr: feature not implemented yet\n");
-	exit(1);
-	return 0;
-	}
-
-
-
-void sgeqrf_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj, void *work)
-	{
-	if(m<=0 | n<=0)
-		return;
-	printf("\nsgeqrf_libstr: feature not implemented yet\n");
-	exit(1);
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
diff --git a/third_party/blasfeo/blas/s_lapack_lib8.c b/third_party/blasfeo/blas/s_lapack_lib8.c
deleted file mode 100644
index 3b5239e..0000000
--- a/third_party/blasfeo/blas/s_lapack_lib8.c
+++ /dev/null
@@ -1,872 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_aux.h"
-#include "../include/blasfeo_s_kernel.h"
-
-
-
-void spotrf_l_libstr(int m, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0)
-		return;
-
-	if(ci>0 | di>0)
-		{
-		printf("\nspotrf_l_libstr: feature not implemented yet: ci>0, di>0\n");
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int i, j;
-
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dD = sD->dA; // XXX what to do if di and dj are not zero
-	if(di==0 & dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-
-	i = 0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-23; i+=24)
-		{
-		j = 0;
-		for(; j<i; j+=8)
-			{
-			kernel_strsm_nt_rl_inv_24x4_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0]);
-			kernel_strsm_nt_rl_inv_24x4_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4]);
-			}
-		kernel_spotrf_nt_l_24x4_lib8((j+0), &pD[(i+0)*sdd], sdd, &pD[(j+0)*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0]);
-		kernel_spotrf_nt_l_20x4_lib8((j+4), &pD[(i+0)*sdd], sdd, &pD[4+(j+0)*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4]);
-		kernel_spotrf_nt_l_16x4_lib8((j+8), &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8]);
-		kernel_spotrf_nt_l_12x4_lib8((j+12), &pD[(i+8)*sdd], sdd, &pD[4+(j+8)*sdd], &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, &dD[j+12]);
-		kernel_spotrf_nt_l_8x8_lib8(j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16]);
-		}
-	if(m>i)
-		{
-		if(m-i<=4)
-			{
-			goto left_4;
-			}
-		else if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else if(m-i<=12)
-			{
-			goto left_12;
-			}
-		else if(m-i<=16)
-			{
-			goto left_16;
-			}
-		else
-			{
-			goto left_24;
-			}
-		}
-#else
-	for(; i<m-15; i+=16)
-		{
-		j = 0;
-		for(; j<i; j+=8)
-			{
-			kernel_strsm_nt_rl_inv_16x4_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0]);
-			kernel_strsm_nt_rl_inv_16x4_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4]);
-			}
-		kernel_spotrf_nt_l_16x4_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0]);
-		kernel_spotrf_nt_l_12x4_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4]);
-		kernel_spotrf_nt_l_8x8_lib8((j+8), &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8]);
-		}
-	if(m>i)
-		{
-		if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_16;
-			}
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_24: // 17 <= m <= 23
-	j = 0;
-	for(; j<i & j<m-7; j+=8)
-		{
-		kernel_strsm_nt_rl_inv_24x4_vs_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, m-(j+0));
-		kernel_strsm_nt_rl_inv_24x4_vs_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4], m-i, m-(j+4));
-		}
-	kernel_spotrf_nt_l_24x4_vs_lib8((j+0), &pD[(i+0)*sdd], sdd, &pD[(j+0)*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0], m-(i+0), m-(j+0));
-	kernel_spotrf_nt_l_20x4_vs_lib8((j+4), &pD[(i+0)*sdd], sdd, &pD[4+(j+0)*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4], m-(i+0), m-(j+4));
-	kernel_spotrf_nt_l_16x4_vs_lib8((j+8), &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8], m-(i+8), m-(j+8));
-	kernel_spotrf_nt_l_12x4_vs_lib8((j+12), &pD[(i+8)*sdd], sdd, &pD[4+(j+8)*sdd], &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, &dD[j+12], m-(i+8), m-(j+12));
-	if(j<m-20) // 21 - 23
-		{
-		kernel_spotrf_nt_l_8x8_vs_lib8(j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16], m-(i+16), m-(j+16));
-		}
-	else // 17 18 19 20
-		{
-		kernel_spotrf_nt_l_8x4_vs_lib8(j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16], m-(i+16), m-(j+16));
-		}
-	return;
-#endif
-
-	left_16: // 9 <= m <= 16
-	j = 0;
-	for(; j<i; j+=8)
-		{
-		kernel_strsm_nt_rl_inv_16x4_vs_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, m-(j+0));
-		kernel_strsm_nt_rl_inv_16x4_vs_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4], m-i, m-(j+4));
-		}
-	kernel_spotrf_nt_l_16x4_vs_lib8(j+0, &pD[(i+0)*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+j*sdc], sdc, &pD[(j+0)*bs+j*sdd], sdd, &dD[j+0], m-(i+0), m-(j+0));
-	kernel_spotrf_nt_l_12x4_vs_lib8(j+4, &pD[(i+0)*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+j*sdc], sdc, &pD[(j+4)*bs+j*sdd], sdd, &dD[j+4], m-(i+0), m-(j+4));
-	if(j<m-12) // 13 - 16
-		{
-		kernel_spotrf_nt_l_8x8_vs_lib8((j+8), &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8], m-(i+8), m-(j+8));
-		}
-	else // 9 - 12
-		{
-		kernel_spotrf_nt_l_8x4_vs_lib8((j+8), &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8], m-(i+8), m-(j+8));
-		}
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_12: // 9 <= m <= 12
-	j = 0;
-	for(; j<i; j+=8)
-		{
-		kernel_strsm_nt_rl_inv_8x8_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, m-j);
-		kernel_strsm_nt_rl_inv_4x8_vs_lib8(j, &pD[(i+8)*sdd], &pD[j*sdd], &pC[j*bs+(i+8)*sdc], &pD[j*bs+(i+8)*sdd], &pD[j*bs+j*sdd], &dD[j], m-(i+8), m-j);
-		}
-	kernel_spotrf_nt_l_8x8_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, m-j);
-	kernel_strsm_nt_rl_inv_4x8_vs_lib8(j, &pD[(i+8)*sdd], &pD[j*sdd], &pC[j*bs+(i+8)*sdc], &pD[j*bs+(i+8)*sdd], &pD[j*bs+j*sdd], &dD[j], m-(i+8), m-j);
-	if(j<m-8) // 9 - 12
-		{
-		kernel_spotrf_nt_l_8x4_vs_lib8((j+8), &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[(j+8)], m-(i+8), m-(j+8));
-		}
-	return;
-#endif
-
-	left_8: // 1 <= m <= 8
-	j = 0;
-	for(; j<i; j+=8)
-		{
-		kernel_strsm_nt_rl_inv_8x8_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, m-j);
-		}
-	if(j<m-4) // 5 - 8
-		{
-		kernel_spotrf_nt_l_8x8_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, m-j);
-		}
-	else // 1 - 4
-		{
-		kernel_spotrf_nt_l_8x4_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, m-j);
-		}
-	return;
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_4: // 1 <= m <= 4
-	j = 0;
-	for(; j<i; j+=8)
-		{
-		kernel_strsm_nt_rl_inv_4x8_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, m-j);
-		}
-	kernel_spotrf_nt_l_8x4_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, m-j);
-	return;
-#endif
-
-	}
-
-
-
-void spotrf_l_mn_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(m<=0 | n<=0)
-		return;
-
-	if(ci>0 | di>0)
-		{
-		printf("\nspotrf_l_mn_libstr: feature not implemented yet: ci>0, di>0\n");
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int i, j;
-
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dD = sD->dA; // XXX what to do if di and dj are not zero
-	if(di==0 & dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-
-	i = 0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-23; i+=24)
-		{
-		j = 0;
-		for(; j<i & j<n-7; j+=8)
-			{
-			kernel_strsm_nt_rl_inv_24x4_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0]);
-			kernel_strsm_nt_rl_inv_24x4_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4]);
-			}
-		if(j<n)
-			{
-			if(i<j) // dtrsm
-				{
-				kernel_strsm_nt_rl_inv_24x4_vs_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-				if(j<n-4) // 5 6 7
-					{
-					kernel_strsm_nt_rl_inv_24x4_vs_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[(j+4)*bs+(j+4)*sdd], &dD[j+4], m-i, n-(j+4));
-					}
-				}
-			else // dpotrf
-				{
-				if(j<n-23)
-					{
-					kernel_spotrf_nt_l_24x4_lib8((j+0), &pD[(i+0)*sdd], sdd, &pD[(j+0)*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0]);
-					kernel_spotrf_nt_l_20x4_lib8((j+4), &pD[(i+0)*sdd], sdd, &pD[4+(j+0)*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4]);
-					kernel_spotrf_nt_l_16x4_lib8((j+8), &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8]);
-					kernel_spotrf_nt_l_12x4_lib8((j+12), &pD[(i+8)*sdd], sdd, &pD[4+(j+8)*sdd], &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, &dD[j+12]);
-					kernel_spotrf_nt_l_8x8_lib8((j+16), &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16]);
-					}
-				else
-					{
-					if(j<n-4) // 5 - 23
-						{
-						kernel_spotrf_nt_l_24x4_vs_lib8((j+0), &pD[(i+0)*sdd], sdd, &pD[(j+0)*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0], m-(i+0), n-(j+0));
-						kernel_spotrf_nt_l_20x4_vs_lib8((j+4), &pD[(i+0)*sdd], sdd, &pD[4+(j+0)*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4], m-(i+0), n-(j+4));
-						if(j==n-8)
-							return;
-						if(j<n-12) // 13 - 23
-							{
-							kernel_spotrf_nt_l_16x4_vs_lib8((j+8), &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8], m-(i+8), n-(j+8));
-							kernel_spotrf_nt_l_12x4_vs_lib8((j+12), &pD[(i+8)*sdd], sdd, &pD[4+(j+8)*sdd], &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, &dD[j+12], m-(i+8), n-(j+12));
-							if(j==n-16)
-								return;
-							if(j<n-20) // 21 - 23
-								{
-								kernel_spotrf_nt_l_8x8_vs_lib8(j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16], m-(i+16), n-(j+16));
-								}
-							else // 17 18 19 20
-								{
-								kernel_spotrf_nt_l_8x4_vs_lib8(j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16], m-(i+16), n-(j+16));
-								}
-							}
-						else // 9 10 11 12
-							{
-							kernel_spotrf_nt_l_16x4_vs_lib8(j+8, &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8], m-(i+8), n-(j+8));
-							}
-						}
-					else // 1 2 3 4
-						{
-						kernel_spotrf_nt_l_24x4_vs_lib8(j, &pD[(i+0)*sdd], sdd, &pD[j*sdd], &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, &dD[j], m-(i+0), n-j);
-						}
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else if(m-i<=16)
-			{
-			goto left_16;
-			}
-		else
-			{
-			goto left_24;
-			}
-		}
-#else
-	for(; i<m-15; i+=16)
-		{
-		j = 0;
-		for(; j<i & j<n-7; j+=8)
-			{
-			kernel_strsm_nt_rl_inv_16x4_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0]);
-			kernel_strsm_nt_rl_inv_16x4_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4]);
-			}
-		if(j<n)
-			{
-			if(i<j) // dtrsm
-				{
-				kernel_strsm_nt_rl_inv_16x4_vs_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-				if(j<n-4) // 5 6 7
-					{
-					kernel_strsm_nt_rl_inv_16x4_vs_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[(j+4)*bs+(j+4)*sdd], &dD[j+4], m-i, n-(j+4));
-					}
-				}
-			else // dpotrf
-				{
-				if(j<n-15)
-					{
-					kernel_spotrf_nt_l_16x4_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0]);
-					kernel_spotrf_nt_l_12x4_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4]);
-					kernel_spotrf_nt_l_8x8_lib8((j+8), &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8]);
-					}
-				else
-					{
-					if(j<n-4) // 5 - 15
-						{
-						kernel_spotrf_nt_l_16x4_vs_lib8((j+0), &pD[(i+0)*sdd], sdd, &pD[(j+0)*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0], m-(i+0), n-(j+0));
-						kernel_spotrf_nt_l_12x4_vs_lib8((j+4), &pD[(i+0)*sdd], sdd, &pD[4+(j+0)*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4], m-(i+0), n-(j+4));
-						if(j==n-8) // 8
-							return;
-						if(j<n-12) // 13 - 15
-							{
-							kernel_spotrf_nt_l_8x8_vs_lib8(j+8, &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8], m-(i+8), n-(j+8));
-							}
-						else // 9 10 11 12
-							{
-							kernel_spotrf_nt_l_8x4_vs_lib8(j+8, &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8], m-(i+8), n-(j+8));
-							}
-						}
-					else // 1 2 3 4
-						{
-						kernel_spotrf_nt_l_16x4_vs_lib8(j, &pD[(i+0)*sdd], sdd, &pD[j*sdd], &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, &dD[j], m-(i+0), n-j);
-						}
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_16;
-			}
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_24:
-	j = 0;
-	for(; j<i & j<n-7; j+=8)
-		{
-		kernel_strsm_nt_rl_inv_24x4_vs_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-		kernel_strsm_nt_rl_inv_24x4_vs_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4], m-i, n-(j+4));
-		}
-	if(j<n)
-		{
-		if(j<i) // dtrsm
-			{
-			kernel_strsm_nt_rl_inv_24x4_vs_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-			if(j<n-4) // 5 6 7
-				{
-				kernel_strsm_nt_rl_inv_24x4_vs_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4], m-i, n-(j+4));
-				}
-			}
-		else // dpotrf
-			{
-			if(j<n-4) // 5 - 23
-				{
-				kernel_spotrf_nt_l_24x4_vs_lib8((j+0), &pD[(i+0)*sdd], sdd, &pD[(j+0)*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0], m-(i+0), n-(j+0));
-				kernel_spotrf_nt_l_20x4_vs_lib8((j+4), &pD[(i+0)*sdd], sdd, &pD[4+(j+0)*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4], m-(i+0), n-(j+4));
-				if(j>=n-8)
-					return;
-				if(j<n-12) // 13 - 23
-					{
-					kernel_spotrf_nt_l_16x4_vs_lib8((j+8), &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8], m-(i+8), n-(j+8));
-					kernel_spotrf_nt_l_12x4_vs_lib8((j+12), &pD[(i+8)*sdd], sdd, &pD[4+(j+8)*sdd], &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, &dD[j+12], m-(i+8), n-(j+12));
-					if(j>=n-16)
-						return;
-					if(j<n-20) // 21 - 23
-						{
-						kernel_spotrf_nt_l_8x8_vs_lib8(j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16], m-(i+16), n-(j+16));
-						}
-					else // 17 18 19 20
-						{
-						kernel_spotrf_nt_l_8x4_vs_lib8(j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16], m-(i+16), n-(j+16));
-						}
-					}
-				else // 9 10 11 12
-					{
-					kernel_spotrf_nt_l_16x4_vs_lib8(j+8, &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8], m-(i+8), n-(j+8));
-					}
-				}
-			else // 1 2 3 4
-				{
-				kernel_spotrf_nt_l_24x4_vs_lib8(j, &pD[(i+0)*sdd], sdd, &pD[j*sdd], &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, &dD[j], m-(i+0), n-j);
-				}
-			}
-		}
-	return;
-#endif
-
-	left_16:
-	j = 0;
-	for(; j<i & j<n-7; j+=8)
-		{
-		kernel_strsm_nt_rl_inv_16x4_vs_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-		kernel_strsm_nt_rl_inv_16x4_vs_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4], m-i, n-(j+4));
-		}
-	if(j<n)
-		{
-		if(j<i) // dtrsm
-			{
-			kernel_strsm_nt_rl_inv_16x4_vs_lib8(j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-			if(j<n-4) // 5 6 7
-				{
-				kernel_strsm_nt_rl_inv_16x4_vs_lib8(j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4], m-i, n-(j+4));
-				}
-			}
-		else // dpotrf
-			{
-			if(j<n-4) // 5 - 15
-				{
-				kernel_spotrf_nt_l_16x4_vs_lib8(j+0, &pD[(i+0)*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+j*sdc], sdc, &pD[(j+0)*bs+j*sdd], sdd, &dD[j+0], m-(i+0), n-(j+0));
-				kernel_spotrf_nt_l_12x4_vs_lib8(j+4, &pD[(i+0)*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+j*sdc], sdc, &pD[(j+4)*bs+j*sdd], sdd, &dD[j+4], m-(i+0), n-(j+4));
-				if(j>=n-8)
-					return;
-				if(j<n-12) // 13 - 15
-					{
-					kernel_spotrf_nt_l_8x8_vs_lib8((j+8), &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8], m-(i+8), n-(j+8));
-					}
-				else // 9 - 12
-					{
-					kernel_spotrf_nt_l_8x4_vs_lib8((j+8), &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8], m-(i+8), n-(j+8));
-					}
-				}
-			else // 1 2 3 4
-				{
-				kernel_spotrf_nt_l_16x4_vs_lib8(j, &pD[(i+0)*sdd], sdd, &pD[j*sdd], &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, &dD[j], m-(i+0), n-j);
-				}
-			}
-		}
-	return;
-
-	left_8:
-	j = 0;
-	for(; j<i & j<n-7; j+=8)
-		{
-		kernel_strsm_nt_rl_inv_8x8_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-		if(j<i) // dtrsm
-			{
-			if(j<n-4) // 5 6 7
-				{
-				kernel_strsm_nt_rl_inv_8x8_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // 1 2 3 4
-				{
-				kernel_strsm_nt_rl_inv_8x4_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-				}
-			}
-		else // dpotrf
-			{
-			if(j<n-4) // 5 6 7
-				{
-				kernel_spotrf_nt_l_8x8_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // 1 2 3 4
-				{
-				kernel_spotrf_nt_l_8x4_vs_lib8(j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-				}
-			}
-		}
-
-	return;
-
-	}
-
-
-
-void ssyrk_spotrf_ln_libstr(int m, int n, int k, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj)
-	{
-
-	if(ai!=0 | bi!=0 | ci!=0 | di!=0)
-		{
-		printf("\nssyrk_spotrf_ln_libstr: feature not implemented yet: ai=%d, bi=%d, ci=%d, di=%d\n", ai, bi, ci, di);
-		exit(1);
-		}
-
-	const int bs = 8;
-
-	int i, j;
-
-	int sda = sA->cn;
-	int sdb = sB->cn;
-	int sdc = sC->cn;
-	int sdd = sD->cn;
-	float *pA = sA->pA + aj*bs;
-	float *pB = sB->pA + bj*bs;
-	float *pC = sC->pA + cj*bs;
-	float *pD = sD->pA + dj*bs;
-	float *dD = sD->dA; // XXX what to do if di and dj are not zero
-
-//	ssyrk_spotrf_nt_l_lib(m, n, k, pA, sda, pB, sdb, pC, sdc, pD, sdd, dD);
-
-	if(di==0 && dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-
-	i = 0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; i<m-23; i+=24)
-		{
-		j = 0;
-		for(; j<i & j<n-7; j+=8)
-			{
-			kernel_sgemm_strsm_nt_rl_inv_24x4_lib8(k, &pA[i*sda], sda, &pB[0+j*sdb], j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0]);
-			kernel_sgemm_strsm_nt_rl_inv_24x4_lib8(k, &pA[i*sda], sda, &pB[4+j*sdb], j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4]);
-			}
-		if(j<n)
-			{
-			if(i<j) // dtrsm
-				{
-				kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8(k, &pA[i*sda], sda, &pB[0+j*sdb], j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-				if(j<n-4) // 5 6 7
-					{
-					kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8(k, &pA[i*sda], sda, &pB[4+j*sdb], j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[(j+4)*bs+(j+4)*sdd], &dD[j+4], m-i, n-(j+4));
-					}
-				}
-			else // dpotrf
-				{
-				if(j<n-23)
-					{
-					kernel_ssyrk_spotrf_nt_l_24x4_lib8(k, &pA[(i+0)*sda], sda, &pB[(j+0)*sdb], (j+0), &pD[(i+0)*sdd], sdd, &pD[(j+0)*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0]);
-					kernel_ssyrk_spotrf_nt_l_20x4_lib8(k, &pA[(i+0)*sda], sda, &pB[4+(j+0)*sdb], (j+4), &pD[(i+0)*sdd], sdd, &pD[4+(j+0)*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4]);
-					kernel_ssyrk_spotrf_nt_l_16x4_lib8(k, &pA[(i+8)*sda], sda, &pB[(j+8)*sdb], (j+8), &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8]);
-					kernel_ssyrk_spotrf_nt_l_12x4_lib8(k, &pA[(i+8)*sda], sda, &pB[4+(j+8)*sdb], (j+12), &pD[(i+8)*sdd], sdd, &pD[4+(j+8)*sdd], &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, &dD[j+12]);
-					kernel_ssyrk_spotrf_nt_l_8x8_lib8(k, &pA[(i+16)*sda], &pB[(j+16)*sdb], (j+16), &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16]);
-					}
-				else
-					{
-					if(j<n-4) // 5 - 23
-						{
-						kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[(j+0)*sdb], (j+0), &pD[(i+0)*sdd], sdd, &pD[(j+0)*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0], m-(i+0), n-(j+0));
-						kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[4+(j+0)*sdb], (j+4), &pD[(i+0)*sdd], sdd, &pD[4+(j+0)*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4], m-(i+0), n-(j+4));
-						if(j==n-8)
-							return;
-						if(j<n-12) // 13 - 23
-							{
-							kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(k, &pA[(i+8)*sda], sda, &pB[(j+8)*sdb], (j+8), &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8], m-(i+8), n-(j+8));
-							kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8(k, &pA[(i+8)*sda], sda, &pB[4+(j+8)*sdb], (j+12), &pD[(i+8)*sdd], sdd, &pD[4+(j+8)*sdd], &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, &dD[j+12], m-(i+8), n-(j+12));
-							if(j==n-16)
-								return;
-							if(j<n-20) // 21 - 23
-								{
-								kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8(k, &pA[(i+16)*sda], &pB[(j+16)*sdb], j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16], m-(i+16), n-(j+16));
-								}
-							else // 17 18 19 20
-								{
-								kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8(k, &pA[(i+16)*sda], &pB[(j+16)*sdb], j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16], m-(i+16), n-(j+16));
-								}
-							}
-						else // 9 10 11 12
-							{
-							kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(k, &pA[(i+8)*sda], sda, &pB[(j+8)*sdb], j+8, &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8], m-(i+8), n-(j+8));
-							}
-						}
-					else // 1 2 3 4
-						{
-						kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[j*sdb], j, &pD[(i+0)*sdd], sdd, &pD[j*sdd], &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, &dD[j], m-(i+0), n-j);
-						}
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else if(m-i<=16)
-			{
-			goto left_16;
-			}
-		else
-			{
-			goto left_24;
-			}
-		}
-#else
-	for(; i<m-15; i+=16)
-		{
-		j = 0;
-		for(; j<i & j<n-7; j+=8)
-			{
-			kernel_sgemm_strsm_nt_rl_inv_16x4_lib8(k, &pA[i*sda], sda, &pB[0+j*sdb], j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0]);
-			kernel_sgemm_strsm_nt_rl_inv_16x4_lib8(k, &pA[i*sda], sda, &pB[4+j*sdb], j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4]);
-			}
-		if(j<n)
-			{
-			if(i<j) // dtrsm
-				{
-				kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8(k, &pA[i*sda], sda, &pB[0+j*sdb], j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-				if(j<n-4) // 5 6 7
-					{
-					kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8(k, &pA[i*sda], sda, &pB[4+j*sdb], j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[(j+4)*bs+(j+4)*sdd], &dD[j+4], m-i, n-(j+4));
-					}
-				}
-			else // dpotrf
-				{
-				if(j<n-15)
-					{
-					kernel_ssyrk_spotrf_nt_l_16x4_lib8(k, &pA[i*sda], sda, &pB[0+j*sdb], j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0]);
-					kernel_ssyrk_spotrf_nt_l_12x4_lib8(k, &pA[i*sda], sda, &pB[4+j*sdb], j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4]);
-					kernel_ssyrk_spotrf_nt_l_8x8_lib8(k, &pA[(i+8)*sda], &pB[(j+8)*sdb], (j+8), &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8]);
-					}
-				else
-					{
-					if(j<n-4) // 5 - 15
-						{
-						kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[(j+0)*sdb], (j+0), &pD[(i+0)*sdd], sdd, &pD[(j+0)*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0], m-(i+0), n-(j+0));
-						kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[4+(j+0)*sdb], j+4, &pD[(i+0)*sdd], sdd, &pD[4+(j+0)*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4], m-(i+0), n-(j+4));
-						if(j==n-8) // 8
-							return;
-						if(j<n-12) // 13 - 15
-							{
-							kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8(k, &pA[(i+8)*sda], &pB[(j+8)*sdb], j+8, &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8], m-(i+8), n-(j+8));
-							}
-						else // 9 10 11 12
-							{
-							kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8(k, &pA[(i+8)*sda], &pB[(j+8)*sdb], j+8, &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8], m-(i+8), n-(j+8));
-							}
-						}
-					else // 1 2 3 4
-						{
-						kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[j*sdb], j, &pD[(i+0)*sdd], sdd, &pD[j*sdd], &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, &dD[j], m-(i+0), n-j);
-						}
-					}
-				}
-			}
-		}
-	if(m>i)
-		{
-		if(m-i<=8)
-			{
-			goto left_8;
-			}
-		else
-			{
-			goto left_16;
-			}
-		}
-#endif
-
-	// common return if i==m
-	return;
-
-	// clean up loops definitions
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-	left_24:
-	j = 0;
-	for(; j<i & j<n-7; j+=8)
-		{
-		kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8(k, &pA[i*sda], sda, &pB[0+j*sdb], j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-		kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8(k, &pA[i*sda], sda, &pB[4+j*sdb], j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4], m-i, n-(j+4));
-		}
-	if(j<n)
-		{
-		if(j<i) // dtrsm
-			{
-			kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8(k, &pA[i*sda], sda, &pB[0+j*sdb], j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-			if(j<n-4) // 5 6 7
-				{
-				kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8(k, &pA[i*sda], sda, &pB[4+j*sdb], j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4], m-i, n-(j+4));
-				}
-			}
-		else // dpotrf
-			{
-			if(j<n-4) // 5 - 23
-				{
-				kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[(j+0)*sdb], (j+0), &pD[(i+0)*sdd], sdd, &pD[(j+0)*sdd], &pC[(j+0)*bs+(j+0)*sdc], sdc, &pD[(j+0)*bs+(j+0)*sdd], sdd, &dD[j+0], m-(i+0), n-(j+0));
-				kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[4+(j+0)*sdb], (j+4), &pD[(i+0)*sdd], sdd, &pD[4+(j+0)*sdd], &pC[(j+4)*bs+(j+0)*sdc], sdc, &pD[(j+4)*bs+(j+0)*sdd], sdd, &dD[j+4], m-(i+0), n-(j+4));
-				if(j>=n-8)
-					return;
-				if(j<n-12) // 13 - 23
-					{
-					kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(k, &pA[(i+8)*sda], sda, &pB[(j+8)*sdb], (j+8), &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8], m-(i+8), n-(j+8));
-					kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8(k, &pA[(i+8)*sda], sda, &pB[4+(j+8)*sdb], j+12, &pD[(i+8)*sdd], sdd, &pD[4+(j+8)*sdd], &pC[(j+12)*bs+(j+8)*sdc], sdc, &pD[(j+12)*bs+(j+8)*sdd], sdd, &dD[j+12], m-(i+8), n-(j+12));
-					if(j>=n-16)
-						return;
-					if(j<n-20) // 21 - 23
-						{
-						kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8(k, &pA[(i+16)*sda], &pB[(j+16)*sdb], j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16], m-(i+16), n-(j+16));
-						}
-					else // 17 18 19 20
-						{
-						kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8(k, &pA[(i+16)*sda], &pB[(j+16)*sdb], j+16, &pD[(i+16)*sdd], &pD[(j+16)*sdd], &pC[(j+16)*bs+(j+16)*sdc], &pD[(j+16)*bs+(j+16)*sdd], &dD[j+16], m-(i+16), n-(j+16));
-						}
-					}
-				else // 9 10 11 12
-					{
-					kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(k, &pA[(i+8)*sda], sda, &pB[(j+8)*sdb], j+8, &pD[(i+8)*sdd], sdd, &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], sdc, &pD[(j+8)*bs+(j+8)*sdd], sdd, &dD[j+8], m-(i+8), n-(j+8));
-					}
-				}
-			else // 1 2 3 4
-				{
-				kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[j*sdb], j, &pD[(i+0)*sdd], sdd, &pD[j*sdd], &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, &dD[j], m-(i+0), n-j);
-				}
-			}
-		}
-	return;
-#endif
-
-	left_16:
-	j = 0;
-	for(; j<i & j<n-7; j+=8)
-		{
-		kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8(k, &pA[i*sda], sda, &pB[0+j*sdb], j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[0+(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-		kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8(k, &pA[i*sda], sda, &pB[4+j*sdb], j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4], m-i, n-(j+4));
-		}
-	if(j<n)
-		{
-		if(j<i) // dtrsm
-			{
-			kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8(k, &pA[i*sda], sda, &pB[0+j*sdb], j+0, &pD[i*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+i*sdc], sdc, &pD[(j+0)*bs+i*sdd], sdd, &pD[(j+0)*bs+(j+0)*sdd], &dD[j+0], m-i, n-(j+0));
-			if(j<n-4) // 5 6 7
-				{
-				kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8(k, &pA[i*sda], sda, &pB[4+j*sdb], j+4, &pD[i*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+i*sdc], sdc, &pD[(j+4)*bs+i*sdd], sdd, &pD[4+(j+4)*bs+(j+0)*sdd], &dD[j+4], m-i, n-(j+4));
-				}
-			}
-		else // dpotrf
-			{
-			if(j<n-4) // 5 - 15
-				{
-				kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[0+j*sdb], j+0, &pD[(i+0)*sdd], sdd, &pD[0+j*sdd], &pC[(j+0)*bs+j*sdc], sdc, &pD[(j+0)*bs+j*sdd], sdd, &dD[j+0], m-(i+0), n-(j+0));
-				kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[4+j*sdb], j+4, &pD[(i+0)*sdd], sdd, &pD[4+j*sdd], &pC[(j+4)*bs+j*sdc], sdc, &pD[(j+4)*bs+j*sdd], sdd, &dD[j+4], m-(i+0), n-(j+4));
-				if(j>=n-8)
-					return;
-				if(j<n-12) // 13 - 15
-					{
-					kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8(k, &pA[(i+8)*sda], &pB[(j+8)*sdb], (j+8), &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8], m-(i+8), n-(j+8));
-					}
-				else // 9 - 12
-					{
-					kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8(k, &pA[(i+8)*sda], &pB[(j+8)*sdb], j+8, &pD[(i+8)*sdd], &pD[(j+8)*sdd], &pC[(j+8)*bs+(j+8)*sdc], &pD[(j+8)*bs+(j+8)*sdd], &dD[j+8], m-(i+8), n-(j+8));
-					}
-				}
-			else // 1 2 3 4
-				{
-				kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(k, &pA[(i+0)*sda], sda, &pB[j*sdb], j, &pD[(i+0)*sdd], sdd, &pD[j*sdd], &pC[j*bs+j*sdc], sdc, &pD[j*bs+j*sdd], sdd, &dD[j], m-(i+0), n-j);
-				}
-			}
-		}
-	return;
-
-	left_8:
-	j = 0;
-	for(; j<i & j<n-7; j+=8)
-		{
-		kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-		}
-	if(j<n)
-		{
-		if(j<i) // dtrsm
-			{
-			if(j<n-4) // 5 6 7
-				{
-				kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // 1 2 3 4
-				{
-				kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+i*sdc], &pD[j*bs+i*sdd], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-				}
-			}
-		else // dpotrf
-			{
-			if(j<n-4) // 5 6 7
-				{
-				kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-				}
-			else // 1 2 3 4
-				{
-				kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8(k, &pA[i*sda], &pB[j*sdb], j, &pD[i*sdd], &pD[j*sdd], &pC[j*bs+j*sdc], &pD[j*bs+j*sdd], &dD[j], m-i, n-j);
-				}
-			}
-		}
-	return;
-
-	}
-
-
-
-int sgeqrf_work_size_libstr(int m, int n)
-	{
-	printf("\nsgeqrf_work_size_libstr: feature not implemented yet\n");
-	exit(1);
-	return 0;
-	}
-
-
-
-void sgeqrf_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj, void *work)
-	{
-	if(m<=0 | n<=0)
-		return;
-	printf("\nsgeqrf_libstr: feature not implemented yet\n");
-	exit(1);
-	return;
-	}
-
-
-
-
diff --git a/third_party/blasfeo/blas/x_blas1_lib.c b/third_party/blasfeo/blas/x_blas1_lib.c
deleted file mode 100644
index 5f8fc2e..0000000
--- a/third_party/blasfeo/blas/x_blas1_lib.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(LA_REFERENCE)
-
-
-
-void AXPY_LIBSTR(int m, REAL alpha, struct STRVEC *sx, int xi, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	if(m<=0)
-		return;
-	int ii;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		z[ii+0] = y[ii+0] + alpha*x[ii+0];
-		z[ii+1] = y[ii+1] + alpha*x[ii+1];
-		z[ii+2] = y[ii+2] + alpha*x[ii+2];
-		z[ii+3] = y[ii+3] + alpha*x[ii+3];
-		}
-	for(; ii<m; ii++)
-		z[ii+0] = y[ii+0] + alpha*x[ii+0];
-	return;
-	}
-
-
-
-// multiply two vectors and compute dot product
-REAL VECMULDOT_LIBSTR(int m, struct STRVEC *sx, int xi, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	if(m<=0)
-		return 0.0;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-	int ii;
-	REAL dot = 0.0;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		z[ii+0] = x[ii+0] * y[ii+0];
-		z[ii+1] = x[ii+1] * y[ii+1];
-		z[ii+2] = x[ii+2] * y[ii+2];
-		z[ii+3] = x[ii+3] * y[ii+3];
-		dot += z[ii+0] + z[ii+1] + z[ii+2] + z[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		z[ii+0] = x[ii+0] * y[ii+0];
-		dot += z[ii+0];
-		}
-	return dot;
-	}
-
-
-
-// compute dot product of two vectors
-REAL DOT_LIBSTR(int m, struct STRVEC *sx, int xi, struct STRVEC *sy, int yi)
-	{
-	if(m<=0)
-		return 0.0;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	int ii;
-	REAL dot = 0.0;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		dot += x[ii+0] * y[ii+0];
-		dot += x[ii+1] * y[ii+1];
-		dot += x[ii+2] * y[ii+2];
-		dot += x[ii+3] * y[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		dot += x[ii+0] * y[ii+0];
-		}
-	return dot;
-	}
-
-
-
-#elif defined(LA_BLAS)
-
-
-
-void AXPY_LIBSTR(int m, REAL alpha, struct STRVEC *sx, int xi, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	if(m<=0)
-		return;
-	int i1 = 1;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-	if(y!=z)
-		COPY(&m, y, &i1, z, &i1);
-	AXPY(&m, &alpha, x, &i1, z, &i1);
-	return;
-	}
-
-
-
-// multiply two vectors and compute dot product
-REAL VECMULDOT_LIBSTR(int m, struct STRVEC *sx, int xi, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	if(m<=0)
-		return 0.0;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-	int ii;
-	REAL dot = 0.0;
-	ii = 0;
-	for(; ii<m; ii++)
-		{
-		z[ii+0] = x[ii+0] * y[ii+0];
-		dot += z[ii+0];
-		}
-	return dot;
-	}
-
-
-
-// compute dot product of two vectors
-REAL DOT_LIBSTR(int m, struct STRVEC *sx, int xi, struct STRVEC *sy, int yi)
-	{
-	if(m<=0)
-		return 0.0;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	int ii;
-	REAL dot = 0.0;
-	ii = 0;
-	for(; ii<m-3; ii+=4)
-		{
-		dot += x[ii+0] * y[ii+0];
-		dot += x[ii+1] * y[ii+1];
-		dot += x[ii+2] * y[ii+2];
-		dot += x[ii+3] * y[ii+3];
-		}
-	for(; ii<m; ii++)
-		{
-		dot += x[ii+0] * y[ii+0];
-		}
-	return dot;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
diff --git a/third_party/blasfeo/blas/x_blas2_diag_lib.c b/third_party/blasfeo/blas/x_blas2_diag_lib.c
deleted file mode 100644
index e90cbd6..0000000
--- a/third_party/blasfeo/blas/x_blas2_diag_lib.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-void GEMV_DIAG_LIBSTR(int m, REAL alpha, struct STRVEC *sA, int ai, struct STRVEC *sx, int xi, REAL beta, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	if(m<=0)
-		return;
-	int ii;
-	REAL *a = sA->pa + ai;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-	if(alpha==1.0 & beta==1.0)
-		{
-		for(ii=0; ii<m; ii++)
-			z[ii] = a[ii]*x[ii] + y[ii];
-		}
-	else
-		{
-		for(ii=0; ii<m; ii++)
-			z[ii] = alpha*a[ii]*x[ii] + beta*y[ii];
-		}
-
-	return;
-
-	}
diff --git a/third_party/blasfeo/blas/x_blas2_lib.c b/third_party/blasfeo/blas/x_blas2_lib.c
deleted file mode 100644
index 32e1e0a..0000000
--- a/third_party/blasfeo/blas/x_blas2_lib.c
+++ /dev/null
@@ -1,1466 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(LA_REFERENCE)
-
-
-
-void GEMV_N_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, REAL beta, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	int ii, jj;
-	REAL 
-		y_0, y_1, y_2, y_3,
-		x_0, x_1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-#if 1 // y reg version
-	ii = 0;
-	for(; ii<m-1; ii+=2)
-		{
-		y_0 = 0.0;
-		y_1 = 0.0;
-		jj = 0;
-		for(; jj<n-1; jj+=2)
-			{
-			y_0 += pA[ii+0+lda*(jj+0)] * x[jj+0] + pA[ii+0+lda*(jj+1)] * x[jj+1];
-			y_1 += pA[ii+1+lda*(jj+0)] * x[jj+0] + pA[ii+1+lda*(jj+1)] * x[jj+1];
-			}
-		if(jj<n)
-			{
-			y_0 += pA[ii+0+lda*jj] * x[jj];
-			y_1 += pA[ii+1+lda*jj] * x[jj];
-			}
-		z[ii+0] = beta * y[ii+0] + alpha * y_0;
-		z[ii+1] = beta * y[ii+1] + alpha * y_1;
-		}
-	for(; ii<m; ii++)
-		{
-		y_0 = 0.0;
-		for(jj=0; jj<n; jj++)
-			{
-			y_0 += pA[ii+lda*jj] * x[jj];
-			}
-		z[ii] = beta * y[ii] + alpha * y_0;
-		}
-#else // x reg version
-	for(ii=0; ii<n; ii++)
-		{
-		z[ii] = beta * y[ii];
-		}
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		x_0 = alpha * x[jj+0];
-		x_1 = alpha * x[jj+1];
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			z[ii+0] += pA[ii+0+lda*(jj+0)] * x_0 + pA[ii+0+lda*(jj+1)] * x_1;
-			z[ii+1] += pA[ii+1+lda*(jj+0)] * x_0 + pA[ii+1+lda*(jj+1)] * x_1;
-			}
-		for(; ii<m; ii++)
-			{
-			z[ii] += pA[ii+lda*(jj+0)] * x_0;
-			z[ii] += pA[ii+lda*(jj+1)] * x_1;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		x_0 = alpha * x[jj+0];
-		for(ii=0; ii<m; ii++)
-			{
-			z[ii] += pA[ii+lda*(jj+0)] * x_0;
-			}
-		}
-#endif
-	return;
-	}
-
-
-
-void GEMV_T_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, REAL beta, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	int ii, jj;
-	REAL 
-		y_0, y_1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		y_0 = 0.0;
-		y_1 = 0.0;
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			y_0 += pA[ii+0+lda*(jj+0)] * x[ii+0] + pA[ii+1+lda*(jj+0)] * x[ii+1];
-			y_1 += pA[ii+0+lda*(jj+1)] * x[ii+0] + pA[ii+1+lda*(jj+1)] * x[ii+1];
-			}
-		if(ii<m)
-			{
-			y_0 += pA[ii+lda*(jj+0)] * x[ii];
-			y_1 += pA[ii+lda*(jj+1)] * x[ii];
-			}
-		z[jj+0] = beta * y[jj+0] + alpha * y_0;
-		z[jj+1] = beta * y[jj+1] + alpha * y_1;
-		}
-	for(; jj<n; jj++)
-		{
-		y_0 = 0.0;
-		for(ii=0; ii<m; ii++)
-			{
-			y_0 += pA[ii+lda*(jj+0)] * x[ii];
-			}
-		z[jj+0] = beta * y[jj+0] + alpha * y_0;
-		}
-	return;
-	}
-
-
-
-// TODO optimize !!!!!
-void GEMV_NT_LIBSTR(int m, int n, REAL alpha_n, REAL alpha_t, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx_n, int xi_n, struct STRVEC *sx_t, int xi_t, REAL beta_n, REAL beta_t, struct STRVEC *sy_n, int yi_n, struct STRVEC *sy_t, int yi_t, struct STRVEC *sz_n, int zi_n, struct STRVEC *sz_t, int zi_t)
-	{
-	int ii, jj;
-	REAL
-		a_00,
-		x_n_0,
-		y_t_0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x_n = sx_n->pa + xi_n;
-	REAL *x_t = sx_t->pa + xi_t;
-	REAL *y_n = sy_n->pa + yi_n;
-	REAL *y_t = sy_t->pa + yi_t;
-	REAL *z_n = sz_n->pa + zi_n;
-	REAL *z_t = sz_t->pa + zi_t;
-	for(ii=0; ii<m; ii++)
-		{
-		z_n[ii] = beta_n * y_n[ii];
-		}
-	for(jj=0; jj<n; jj++)
-		{
-		y_t_0 = 0.0;
-		x_n_0 = alpha_n * x_n[jj];
-		for(ii=0; ii<m; ii++)
-			{
-			a_00 = pA[ii+lda*jj];
-			z_n[ii] += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t[ii];
-			}
-		z_t[jj] = beta_t * y_t[jj] + alpha_t * y_t_0;
-		}
-	return;
-	}
-
-
-
-// TODO optimize !!!!!
-void SYMV_L_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, REAL beta, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	int ii, jj;
-	REAL
-		y_0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-	for(ii=0; ii<n; ii++)
-		{
-		y_0 = 0.0;
-		jj = 0;
-		for(; jj<=ii; jj++)
-			{
-			y_0 += pA[ii+lda*jj] * x[jj];
-			}
-		for( ; jj<m; jj++)
-			{
-			y_0 += pA[jj+lda*ii] * x[jj];
-			}
-		z[ii] = beta * y[ii] + alpha * y_0;
-		}
-	return;
-	}
-
-
-
-void TRMV_LNN_LIBSTR(int m, int n, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	int ii, jj;
-	REAL
-		y_0, y_1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	if(m-n>0)
-		{
-		GEMV_N_LIBSTR(m-n, n, 1.0, sA, ai+n, aj, sx, xi, 0.0, sz, zi+n, sz, zi+n);
-		}
-	if(n%2!=0)
-		{
-		ii = n-1;
-		y_0 = x[ii];
-		y_0 *= pA[ii+lda*ii];
-		for(jj=0; jj<ii; jj++)
-			{
-			y_0 += pA[ii+lda*jj] * x[jj];
-			}
-		z[ii] = y_0;
-		n -= 1;
-		}
-	for(ii=n-2; ii>=0; ii-=2)
-		{
-		y_0 = x[ii+0];
-		y_1 = x[ii+1];
-		y_1 *= pA[ii+1+lda*(ii+1)];
-		y_1 += pA[ii+1+lda*(ii+0)] * y_0;
-		y_0 *= pA[ii+0+lda*(ii+0)];
-		jj = 0;
-		for(; jj<ii-1; jj+=2)
-			{
-			y_0 += pA[ii+0+lda*(jj+0)] * x[jj+0] + pA[ii+0+lda*(jj+1)] * x[jj+1];
-			y_1 += pA[ii+1+lda*(jj+0)] * x[jj+0] + pA[ii+1+lda*(jj+1)] * x[jj+1];
-			}
-//	XXX there is no clean up loop !!!!!
-//		for(; jj<ii; jj++)
-//			{
-//			y_0 += pA[ii+0+lda*jj] * x[jj];
-//			y_1 += pA[ii+1+lda*jj] * x[jj];
-//			}
-		z[ii+0] = y_0;
-		z[ii+1] = y_1;
-		}
-	return;
-	}
-
-
-	
-void TRMV_LTN_LIBSTR(int m, int n, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	int ii, jj;
-	REAL
-		y_0, y_1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		y_0 = x[jj+0];
-		y_1 = x[jj+1];
-		y_0 *= pA[jj+0+lda*(jj+0)];
-		y_0 += pA[jj+1+lda*(jj+0)] * y_1;
-		y_1 *= pA[jj+1+lda*(jj+1)];
-		ii = jj+2;
-		for(; ii<m-1; ii+=2)
-			{
-			y_0 += pA[ii+0+lda*(jj+0)] * x[ii+0] + pA[ii+1+lda*(jj+0)] * x[ii+1];
-			y_1 += pA[ii+0+lda*(jj+1)] * x[ii+0] + pA[ii+1+lda*(jj+1)] * x[ii+1];
-			}
-		for(; ii<m; ii++)
-			{
-			y_0 += pA[ii+lda*(jj+0)] * x[ii];
-			y_1 += pA[ii+lda*(jj+1)] * x[ii];
-			}
-		z[jj+0] = y_0;
-		z[jj+1] = y_1;
-		}
-	for(; jj<n; jj++)
-		{
-		y_0 = x[jj];
-		y_0 *= pA[jj+lda*jj];
-		for(ii=jj+1; ii<m; ii++)
-			{
-			y_0 += pA[ii+lda*jj] * x[ii];
-			}
-		z[jj] = y_0;
-		}
-	return;
-	}
-
-
-
-void TRMV_UNN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	int ii, jj;
-	REAL
-		y_0, y_1,
-		x_0, x_1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-#if 1 // y reg version
-	jj = 0;
-	for(; jj<m-1; jj+=2)
-		{
-		y_0 = x[jj+0];
-		y_1 = x[jj+1];
-		y_0 = pA[jj+0+lda*(jj+0)] * y_0;
-		y_0 += pA[jj+0+lda*(jj+1)] * y_1;
-		y_1 = pA[jj+1+lda*(jj+1)] * y_1;
-		ii = jj+2;
-		for(; ii<m-1; ii+=2)
-			{
-			y_0 += pA[jj+0+lda*(ii+0)] * x[ii+0] + pA[jj+0+lda*(ii+1)] * x[ii+1];
-			y_1 += pA[jj+1+lda*(ii+0)] * x[ii+0] + pA[jj+1+lda*(ii+1)] * x[ii+1];
-			}
-		if(ii<m)
-			{
-			y_0 += pA[jj+0+lda*(ii+0)] * x[ii+0];
-			y_1 += pA[jj+1+lda*(ii+0)] * x[ii+0];
-			}
-		z[jj+0] = y_0;
-		z[jj+1] = y_1;
-		}
-	for(; jj<m; jj++)
-		{
-		y_0 = pA[jj+lda*jj] * x[jj];
-		for(ii=jj+1; ii<m; ii++)
-			{
-			y_0 += pA[jj+lda*ii] * x[ii];
-			}
-		z[jj] = y_0;
-		}
-#else // x reg version
-	if(x != z)
-		{
-		for(ii=0; ii<m; ii++)
-			z[ii] = x[ii];
-		}
-	jj = 0;
-	for(; jj<m-1; jj+=2)
-		{
-		x_0 = z[jj+0];
-		x_1 = z[jj+1];
-		ii = 0;
-		for(; ii<jj-1; ii+=2)
-			{
-			z[ii+0] += pA[ii+0+lda*(jj+0)] * x_0 + pA[ii+0+lda*(jj+1)] * x_1;
-			z[ii+1] += pA[ii+1+lda*(jj+0)] * x_0 + pA[ii+1+lda*(jj+1)] * x_1;
-			}
-//	XXX there is no clean-up loop, since jj+=2 !!!!!
-//		for(; ii<jj; ii++)
-//			{
-//			z[ii+0] += pA[ii+0+lda*(jj+0)] * x_0 + pA[ii+0+lda*(jj+1)] * x_1;
-//			}
-		x_0 *= pA[jj+0+lda*(jj+0)];
-		x_0 += pA[jj+0+lda*(jj+1)] * x_1;
-		x_1 *= pA[jj+1+lda*(jj+1)];
-		z[jj+0] = x_0;
-		z[jj+1] = x_1;
-		}
-	for(; jj<m; jj++)
-		{
-		x_0 = z[jj];
-		for(ii=0; ii<jj; ii++)
-			{
-			z[ii] += pA[ii+lda*jj] * x_0;
-			}
-		x_0 *= pA[jj+lda*jj];
-		z[jj] = x_0;
-		}
-#endif
-	return;
-	}
-
-
-
-void TRMV_UTN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	int ii, jj;
-	REAL
-		y_0, y_1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	if(m%2!=0)
-		{
-		jj = m-1;
-		y_0 = pA[jj+lda*jj] * x[jj];
-		for(ii=0; ii<jj; ii++)
-			{
-			y_0 += pA[ii+lda*jj] * x[ii];
-			}
-		z[jj] = y_0;
-		m -= 1; // XXX
-		}
-	for(jj=m-2; jj>=0; jj-=2)
-		{
-		y_1 = pA[jj+1+lda*(jj+1)] * x[jj+1];
-		y_1 += pA[jj+0+lda*(jj+1)] * x[jj+0];
-		y_0 = pA[jj+0+lda*(jj+0)] * x[jj+0];
-		for(ii=0; ii<jj-1; ii+=2)
-			{
-			y_0 += pA[ii+0+lda*(jj+0)] * x[ii+0] + pA[ii+1+lda*(jj+0)] * x[ii+1];
-			y_1 += pA[ii+0+lda*(jj+1)] * x[ii+0] + pA[ii+1+lda*(jj+1)] * x[ii+1];
-			}
-//	XXX there is no clean-up loop !!!!!
-//		if(ii<jj)
-//			{
-//			y_0 += pA[ii+lda*(jj+0)] * x[ii];
-//			y_1 += pA[ii+lda*(jj+1)] * x[ii];
-//			}
-		z[jj+0] = y_0;
-		z[jj+1] = y_1;
-		}
-	return;
-	}
-
-
-
-void TRSV_LNN_MN_LIBSTR(int m, int n, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0 | n==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_lnn_mn_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** trsv_lnn_mn_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_lnn_mn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_lnn_mn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_lnn_mn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_lnn_mn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_lnn_mn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** trsv_lnn_mn_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_lnn_mn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_lnn_mn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	int ii, jj, j1;
-	REAL
-		y_0, y_1,
-		x_0, x_1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *dA = sA->dA;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / pA[ii+lda*ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / pA[ii+lda*ii];
-		sA->use_dA = 0;
-		}
-#if 1 // y reg version
-	ii = 0;
-	for(; ii<n-1; ii+=2)
-		{
-		y_0 = x[ii+0];
-		y_1 = x[ii+1];
-		jj = 0;
-		for(; jj<ii-1; jj+=2)
-			{
-			y_0 -= pA[ii+0+lda*(jj+0)] * z[jj+0] + pA[ii+0+lda*(jj+1)] * z[jj+1];
-			y_1 -= pA[ii+1+lda*(jj+0)] * z[jj+0] + pA[ii+1+lda*(jj+1)] * z[jj+1];
-			}
-//	XXX there is no clean-up loop !!!!!
-//		if(jj<ii)
-//			{
-//			y_0 -= pA[ii+0+lda*(jj+0)] * z[jj+0];
-//			y_1 -= pA[ii+1+lda*(jj+0)] * z[jj+0];
-//			}
-		y_0 *= dA[ii+0];
-		y_1 -= pA[ii+1+lda*(jj+0)] * y_0;
-		y_1 *= dA[ii+1];
-		z[ii+0] = y_0;
-		z[ii+1] = y_1;
-		}
-	for(; ii<n; ii++)
-		{
-		y_0 = x[ii];
-		for(jj=0; jj<ii; jj++)
-			{
-			y_0 -= pA[ii+lda*jj] * z[jj];
-			}
-		y_0 *= dA[ii];
-		z[ii] = y_0;
-		}
-	for(; ii<m-1; ii+=2)
-		{
-		y_0 = x[ii+0];
-		y_1 = x[ii+1];
-		jj = 0;
-		for(; jj<n-1; jj+=2)
-			{
-			y_0 -= pA[ii+0+lda*(jj+0)] * z[jj+0] + pA[ii+0+lda*(jj+1)] * z[jj+1];
-			y_1 -= pA[ii+1+lda*(jj+0)] * z[jj+0] + pA[ii+1+lda*(jj+1)] * z[jj+1];
-			}
-		if(jj<n)
-			{
-			y_0 -= pA[ii+0+lda*(jj+0)] * z[jj+0];
-			y_1 -= pA[ii+1+lda*(jj+0)] * z[jj+0];
-			}
-		z[ii+0] = y_0;
-		z[ii+1] = y_1;
-		}
-	for(; ii<m; ii++)
-		{
-		y_0 = x[ii];
-		for(jj=0; jj<n; jj++)
-			{
-			y_0 -= pA[ii+lda*jj] * z[jj];
-			}
-		z[ii] = y_0;
-		}
-#else // x reg version
-	if(x != z)
-		{
-		for(ii=0; ii<m; ii++)
-			z[ii] = x[ii];
-		}
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		x_0 = dA[jj+0] * z[jj+0];
-		x_1 = z[jj+1] - pA[jj+1+lda*(jj+0)] * x_0;
-		x_1 = dA[jj+1] * x_1;
-		z[jj+0] = x_0;
-		z[jj+1] = x_1;
-		ii = jj+2;
-		for(; ii<m-1; ii+=2)
-			{
-			z[ii+0] -= pA[ii+0+lda*(jj+0)] * x_0 + pA[ii+0+lda*(jj+1)] * x_1;
-			z[ii+1] -= pA[ii+1+lda*(jj+0)] * x_0 + pA[ii+1+lda*(jj+1)] * x_1;
-			}
-		for(; ii<m; ii++)
-			{
-			z[ii] -= pA[ii+lda*(jj+0)] * x_0 + pA[ii+lda*(jj+1)] * x_1;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		x_0 = dA[jj] * z[jj];
-		z[jj] = x_0;
-		for(ii=jj+1; ii<m; ii++)
-			{
-			z[ii] -= pA[ii+lda*jj] * x_0;
-			}
-		}
-#endif
-	return;
-	}
-
-
-
-void TRSV_LTN_MN_LIBSTR(int m, int n, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_ltn_mn_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** trsv_ltn_mn_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_ltn_mn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_ltn_mn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_ltn_mn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_ltn_mn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_ltn_mn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** trsv_ltn_mn_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_ltn_mn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_ltn_mn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	int ii, jj;
-	REAL
-		y_0, y_1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *dA = sA->dA;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / pA[ii+lda*ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / pA[ii+lda*ii];
-		sA->use_dA = 0;
-		}
-	if(n%2!=0)
-		{
-		jj = n-1;
-		y_0 = x[jj];
-		for(ii=jj+1; ii<m; ii++)
-			{
-			y_0 -= pA[ii+lda*jj] * z[ii];
-			}
-		y_0 *= dA[jj];
-		z[jj] = y_0;
-		jj -= 2;
-		}
-	else
-		{
-		jj = n-2;
-		}
-	for(; jj>=0; jj-=2)
-		{
-		y_0 = x[jj+0];
-		y_1 = x[jj+1];
-		ii = jj+2;
-		for(; ii<m-1; ii+=2)
-			{
-			y_0 -= pA[ii+0+lda*(jj+0)] * z[ii+0] + pA[ii+1+lda*(jj+0)] * z[ii+1];
-			y_1 -= pA[ii+0+lda*(jj+1)] * z[ii+0] + pA[ii+1+lda*(jj+1)] * z[ii+1];
-			}
-		if(ii<m)
-			{
-			y_0 -= pA[ii+lda*(jj+0)] * z[ii];
-			y_1 -= pA[ii+lda*(jj+1)] * z[ii];
-			}
-		y_1 *= dA[jj+1];
-		y_0 -= pA[jj+1+lda*(jj+0)] * y_1;
-		y_0 *= dA[jj+0];
-		z[jj+0] = y_0;
-		z[jj+1] = y_1;
-		}
-	return;
-	}
-
-
-
-void TRSV_LNN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_lnn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_lnn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_lnn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_lnn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_lnn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_lnn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_lnn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_lnn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_lnn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	int ii, jj, j1;
-	REAL
-		y_0, y_1,
-		x_0, x_1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *dA = sA->dA;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			for(ii=0; ii<m; ii++)
-				dA[ii] = 1.0 / pA[ii+lda*ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		for(ii=0; ii<m; ii++)
-			dA[ii] = 1.0 / pA[ii+lda*ii];
-		sA->use_dA = 0;
-		}
-	ii = 0;
-	for(; ii<m-1; ii+=2)
-		{
-		y_0 = x[ii+0];
-		y_1 = x[ii+1];
-		jj = 0;
-		for(; jj<ii-1; jj+=2)
-			{
-			y_0 -= pA[ii+0+lda*(jj+0)] * z[jj+0] + pA[ii+0+lda*(jj+1)] * z[jj+1];
-			y_1 -= pA[ii+1+lda*(jj+0)] * z[jj+0] + pA[ii+1+lda*(jj+1)] * z[jj+1];
-			}
-		y_0 *= dA[ii+0];
-		y_1 -= pA[ii+1+lda*(jj+0)] * y_0;
-		y_1 *= dA[ii+1];
-		z[ii+0] = y_0;
-		z[ii+1] = y_1;
-		}
-	for(; ii<m; ii++)
-		{
-		y_0 = x[ii];
-		for(jj=0; jj<ii; jj++)
-			{
-			y_0 -= pA[ii+lda*jj] * z[jj];
-			}
-		y_0 *= dA[ii];
-		z[ii] = y_0;
-		}
-	return;
-	}
-
-
-
-void TRSV_LNU_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_lnu_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_lnu_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_lnu_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_lnu_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_lnu_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_lnu_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_lnu_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_lnu_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_lnu_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** trsv_lnu_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-void TRSV_LTN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_ltn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_ltn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_ltn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_ltn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_ltn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_ltn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_ltn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_ltn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_ltn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	int ii, jj;
-	REAL
-		y_0, y_1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *dA = sA->dA;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			for(ii=0; ii<m; ii++)
-				dA[ii] = 1.0 / pA[ii+lda*ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		for(ii=0; ii<m; ii++)
-			dA[ii] = 1.0 / pA[ii+lda*ii];
-		sA->use_dA = 0;
-		}
-	if(m%2!=0)
-		{
-		jj = m-1;
-		y_0 = x[jj];
-		y_0 *= dA[jj];
-		z[jj] = y_0;
-		jj -= 2;
-		}
-	else
-		{
-		jj = m-2;
-		}
-	for(; jj>=0; jj-=2)
-		{
-		y_0 = x[jj+0];
-		y_1 = x[jj+1];
-		ii = jj+2;
-		for(; ii<m-1; ii+=2)
-			{
-			y_0 -= pA[ii+0+lda*(jj+0)] * z[ii+0] + pA[ii+1+lda*(jj+0)] * z[ii+1];
-			y_1 -= pA[ii+0+lda*(jj+1)] * z[ii+0] + pA[ii+1+lda*(jj+1)] * z[ii+1];
-			}
-		if(ii<m)
-			{
-			y_0 -= pA[ii+lda*(jj+0)] * z[ii];
-			y_1 -= pA[ii+lda*(jj+1)] * z[ii];
-			}
-		y_1 *= dA[jj+1];
-		y_0 -= pA[jj+1+lda*(jj+0)] * y_1;
-		y_0 *= dA[jj+0];
-		z[jj+0] = y_0;
-		z[jj+1] = y_1;
-		}
-	return;
-	}
-
-
-
-void TRSV_LTU_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_ltu_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_ltu_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_ltu_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_ltu_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_ltu_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_ltu_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_ltu_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_ltu_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_ltu_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** trsv_ltu_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-void TRSV_UNN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_unn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_unn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_unn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_unn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_unn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_unn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_unn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_unn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_unn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** trsv_unn_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-void TRSV_UTN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_utn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_utn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_utn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_utn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_utn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_utn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_utn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_utn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_utn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	printf("\n***** trsv_utn_libstr : feature not implemented yet *****\n");
-	exit(1);
-	}
-
-
-
-#elif defined(LA_BLAS)
-
-
-
-void GEMV_N_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, REAL beta, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, y, &i1, z, &i1);
-	GEMV(&cn, &m, &n, &alpha, pA, &lda, x, &i1, &beta, z, &i1);
-	return;
-	}
-
-
-
-void GEMV_T_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, REAL beta, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-	COPY(&n, y, &i1, z, &i1);
-	GEMV(&ct, &m, &n, &alpha, pA, &lda, x, &i1, &beta, z, &i1);
-	return;
-	}
-
-
-
-void GEMV_NT_LIBSTR(int m, int n, REAL alpha_n, REAL alpha_t, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx_n, int xi_n, struct STRVEC *sx_t, int xi_t, REAL beta_n, REAL beta_t, struct STRVEC *sy_n, int yi_n, struct STRVEC *sy_t, int yi_t, struct STRVEC *sz_n, int zi_n, struct STRVEC *sz_t, int zi_t)
-	{
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x_n = sx_n->pa + xi_n;
-	REAL *x_t = sx_t->pa + xi_t;
-	REAL *y_n = sy_n->pa + yi_n;
-	REAL *y_t = sy_t->pa + yi_t;
-	REAL *z_n = sz_n->pa + zi_n;
-	REAL *z_t = sz_t->pa + zi_t;
-	COPY(&m, y_n, &i1, z_n, &i1);
-	GEMV(&cn, &m, &n, &alpha_n, pA, &lda, x_n, &i1, &beta_n, z_n, &i1);
-	COPY(&n, y_t, &i1, z_t, &i1);
-	GEMV(&ct, &m, &n, &alpha_t, pA, &lda, x_t, &i1, &beta_t, z_t, &i1);
-	return;
-	}
-
-
-
-void SYMV_L_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, REAL beta, struct STRVEC *sy, int yi, struct STRVEC *sz, int zi)
-	{
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *y = sy->pa + yi;
-	REAL *z = sz->pa + zi;
-	int tmp = m-n;
-	COPY(&m, y, &i1, z, &i1);
-	SYMV(&cl, &n, &alpha, pA, &lda, x, &i1, &beta, z, &i1);
-	GEMV(&cn, &tmp, &n, &alpha, pA+n, &lda, x, &i1, &beta, z+n, &i1);
-	GEMV(&ct, &tmp, &n, &alpha, pA+n, &lda, x+n, &i1, &d1, z, &i1);
-	return;
-	}
-
-
-
-void TRMV_LNN_LIBSTR(int m, int n, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL d0 = 0.0;
-	REAL dm1 = -1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	int tmp = m-n;
-	if(x!=z)
-		COPY(&n, x, &i1, z, &i1);
-	GEMV(&cn, &tmp, &n, &d1, pA+n, &lda, x, &i1, &d0, z+n, &i1);
-	TRMV(&cl, &cn, &cn, &n, pA, &lda, z, &i1);
-	return;
-	}
-
-
-
-void TRMV_LTN_LIBSTR(int m, int n, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	int tmp = m-n;
-	if(x!=z)
-		COPY(&n, x, &i1, z, &i1);
-	TRMV(&cl, &ct, &cn, &n, pA, &lda, z, &i1);
-	GEMV(&ct, &tmp, &n, &d1, pA+n, &lda, x+n, &i1, &d1, z, &i1);
-	return;
-	}
-
-
-
-void TRMV_UNN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, x, &i1, z, &i1);
-	TRMV(&cu, &cn, &cn, &m, pA, &lda, z, &i1);
-	return;
-	}
-
-
-
-void TRMV_UTN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, x, &i1, z, &i1);
-	TRMV(&cu, &ct, &cn, &m, pA, &lda, z, &i1);
-	return;
-	}
-
-
-
-void TRSV_LNN_MN_LIBSTR(int m, int n, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0 | n==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_lnn_mn_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** trsv_lnn_mn_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_lnn_mn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_lnn_mn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_lnn_mn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_lnn_mn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_lnn_mn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** trsv_lnn_mn_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_lnn_mn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_lnn_mn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int mmn = m-n;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, x, &i1, z, &i1);
-	TRSV(&cl, &cn, &cn, &n, pA, &lda, z, &i1);
-	GEMV(&cn, &mmn, &n, &dm1, pA+n, &lda, z, &i1, &d1, z+n, &i1);
-	return;
-	}
-
-
-
-void TRSV_LTN_MN_LIBSTR(int m, int n, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_ltn_mn_libstr : m<0 : %d<0 *****\n", m);
-	if(n<0) printf("\n****** trsv_ltn_mn_libstr : n<0 : %d<0 *****\n", n);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_ltn_mn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_ltn_mn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_ltn_mn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_ltn_mn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_ltn_mn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+n > sA->n) printf("\n***** trsv_ltn_mn_libstr : aj+n > col(A) : %d+%d > %d *****\n", aj, n, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_ltn_mn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_ltn_mn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int mmn = m-n;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, x, &i1, z, &i1);
-	GEMV(&ct, &mmn, &n, &dm1, pA+n, &lda, z+n, &i1, &d1, z, &i1);
-	TRSV(&cl, &ct, &cn, &n, pA, &lda, z, &i1);
-	return;
-	}
-
-
-
-void TRSV_LNN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_lnn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_lnn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_lnn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_lnn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_lnn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_lnn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_lnn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_lnn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_lnn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, x, &i1, z, &i1);
-	TRSV(&cl, &cn, &cn, &m, pA, &lda, z, &i1);
-	return;
-	}
-
-
-
-void TRSV_LNU_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_lnu_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_lnu_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_lnu_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_lnu_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_lnu_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_lnu_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_lnu_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_lnu_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_lnu_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, x, &i1, z, &i1);
-	TRSV(&cl, &cn, &cu, &m, pA, &lda, z, &i1);
-	return;
-	}
-
-
-
-void TRSV_LTN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_ltn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_ltn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_ltn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_ltn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_ltn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_ltn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_ltn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_ltn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_ltn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, x, &i1, z, &i1);
-	TRSV(&cl, &ct, &cn, &m, pA, &lda, z, &i1);
-	return;
-	}
-
-
-
-void TRSV_LTU_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_ltu_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_ltu_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_ltu_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_ltu_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_ltu_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_ltu_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_ltu_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_ltu_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_ltu_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, x, &i1, z, &i1);
-	TRSV(&cl, &ct, &cu, &m, pA, &lda, z, &i1);
-	return;
-	}
-
-
-
-void TRSV_UNN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_unn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_unn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_unn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_unn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_unn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_unn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_unn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_unn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_unn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, x, &i1, z, &i1);
-	TRSV(&cu, &cn, &cn, &m, pA, &lda, z, &i1);
-	return;
-	}
-
-
-
-void TRSV_UTN_LIBSTR(int m, struct STRMAT *sA, int ai, int aj, struct STRVEC *sx, int xi, struct STRVEC *sz, int zi)
-	{
-	if(m==0)
-		return;
-#if defined(DIM_CHECK)
-	// non-negative size
-	if(m<0) printf("\n****** trsv_utn_libstr : m<0 : %d<0 *****\n", m);
-	// non-negative offset
-	if(ai<0) printf("\n****** trsv_utn_libstr : ai<0 : %d<0 *****\n", ai);
-	if(aj<0) printf("\n****** trsv_utn_libstr : aj<0 : %d<0 *****\n", aj);
-	if(xi<0) printf("\n****** trsv_utn_libstr : xi<0 : %d<0 *****\n", xi);
-	if(zi<0) printf("\n****** trsv_utn_libstr : zi<0 : %d<0 *****\n", zi);
-	// inside matrix
-	// A: m x k
-	if(ai+m > sA->m) printf("\n***** trsv_utn_libstr : ai+m > row(A) : %d+%d > %d *****\n", ai, m, sA->m);
-	if(aj+m > sA->n) printf("\n***** trsv_utn_libstr : aj+m > col(A) : %d+%d > %d *****\n", aj, m, sA->n);
-	// x: m
-	if(xi+m > sx->m) printf("\n***** trsv_utn_libstr : xi+m > size(x) : %d+%d > %d *****\n", xi, m, sx->m);
-	// z: m
-	if(zi+m > sz->m) printf("\n***** trsv_utn_libstr : zi+m > size(z) : %d+%d > %d *****\n", zi, m, sz->m);
-#endif
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL d1 = 1.0;
-	REAL dm1 = -1.0;
-	int lda = sA->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *x = sx->pa + xi;
-	REAL *z = sz->pa + zi;
-	COPY(&m, x, &i1, z, &i1);
-	TRSV(&cu, &ct, &cn, &m, pA, &lda, z, &i1);
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
diff --git a/third_party/blasfeo/blas/x_blas3_diag_lib.c b/third_party/blasfeo/blas/x_blas3_diag_lib.c
deleted file mode 100644
index d5cce93..0000000
--- a/third_party/blasfeo/blas/x_blas3_diag_lib.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(LA_REFERENCE) | defined(LA_BLAS) 
-
-
-
-// dgemm with A diagonal matrix (stored as strvec)
-void GEMM_L_DIAG_LIBSTR(int m, int n, REAL alpha, struct STRVEC *sA, int ai, struct STRMAT *sB, int bi, int bj, double beta, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	REAL *dA = sA->pa + ai;
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pD = sD->pA + di + dj*ldd;
-	REAL a0, a1;
-	if(beta==0.0)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			a0 = alpha * dA[ii+0];
-			a1 = alpha * dA[ii+1];
-			for(jj=0; jj<n; jj++)
-				{
-				pD[ii+0+ldd*jj] = a0 * pB[ii+0+ldb*jj];
-				pD[ii+1+ldd*jj] = a1 * pB[ii+1+ldb*jj];
-				}
-			}
-		for(; ii<m; ii++)
-			{
-			a0 = alpha * dA[ii];
-			for(jj=0; jj<n; jj++)
-				{
-				pD[ii+0+ldd*jj] = a0 * pB[ii+0+ldb*jj];
-				}
-			}
-		}
-	else
-		{
-		int ldc = sC->m;
-		REAL *pC = sC->pA + ci + cj*ldc;
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			a0 = alpha * dA[ii+0];
-			a1 = alpha * dA[ii+1];
-			for(jj=0; jj<n; jj++)
-				{
-				pD[ii+0+ldd*jj] = a0 * pB[ii+0+ldb*jj] + beta * pC[ii+0+ldc*jj];
-				pD[ii+1+ldd*jj] = a1 * pB[ii+1+ldb*jj] + beta * pC[ii+1+ldc*jj];
-				}
-			}
-		for(; ii<m; ii++)
-			{
-			a0 = alpha * dA[ii];
-			for(jj=0; jj<n; jj++)
-				{
-				pD[ii+0+ldd*jj] = a0 * pB[ii+0+ldb*jj] + beta * pC[ii+0+ldc*jj];
-				}
-			}
-		}
-	return;
-	}
-
-
-
-// dgemm with B diagonal matrix (stored as strvec)
-void GEMM_R_DIAG_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRVEC *sB, int bi, double beta, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj;
-	int lda = sA->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *dB = sB->pa + bi;
-	REAL *pD = sD->pA + di + dj*ldd;
-	REAL a0, a1;
-	if(beta==0)
-		{
-		jj = 0;
-		for(; jj<n-1; jj+=2)
-			{
-			a0 = alpha * dB[jj+0];
-			a1 = alpha * dB[jj+1];
-			for(ii=0; ii<m; ii++)
-				{
-				pD[ii+ldd*(jj+0)] = a0 * pA[ii+lda*(jj+0)];
-				pD[ii+ldd*(jj+1)] = a1 * pA[ii+lda*(jj+1)];
-				}
-			}
-		for(; jj<n; jj++)
-			{
-			a0 = alpha * dB[jj+0];
-			for(ii=0; ii<m; ii++)
-				{
-				pD[ii+ldd*(jj+0)] = a0 * pA[ii+lda*(jj+0)];
-				}
-			}
-		}
-	else
-		{
-		int ldc = sC->m;
-		REAL *pC = sC->pA + ci + cj*ldc;
-		jj = 0;
-		for(; jj<n-1; jj+=2)
-			{
-			a0 = alpha * dB[jj+0];
-			a1 = alpha * dB[jj+1];
-			for(ii=0; ii<m; ii++)
-				{
-				pD[ii+ldd*(jj+0)] = a0 * pA[ii+lda*(jj+0)] + beta * pC[ii+ldc*(jj+0)];
-				pD[ii+ldd*(jj+1)] = a1 * pA[ii+lda*(jj+1)] + beta * pC[ii+ldc*(jj+1)];
-				}
-			}
-		for(; jj<n; jj++)
-			{
-			a0 = alpha * dB[jj+0];
-			for(ii=0; ii<m; ii++)
-				{
-				pD[ii+ldd*(jj+0)] = a0 * pA[ii+lda*(jj+0)] + beta * pC[ii+ldc*(jj+0)];
-				}
-			}
-		}
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
-
-
diff --git a/third_party/blasfeo/blas/x_blas3_lib.c b/third_party/blasfeo/blas/x_blas3_lib.c
deleted file mode 100644
index 29a33c7..0000000
--- a/third_party/blasfeo/blas/x_blas3_lib.c
+++ /dev/null
@@ -1,1531 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(LA_REFERENCE)
-
-
-
-// dgemm nt
-void GEMM_NT_LIBSTR(int m, int n, int k, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, REAL beta, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	REAL 
-		c_00, c_01,
-		c_10, c_11;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pC = sC->pA + ci + cj*ldc;
-	REAL *pD = sD->pA + di + dj*ldd;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = 0.0;
-			c_10 = 0.0;
-			c_01 = 0.0;
-			c_11 = 0.0;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[(ii+0)+lda*kk] * pB[(jj+0)+ldb*kk];
-				c_10 += pA[(ii+1)+lda*kk] * pB[(jj+0)+ldb*kk];
-				c_01 += pA[(ii+0)+lda*kk] * pB[(jj+1)+ldb*kk];
-				c_11 += pA[(ii+1)+lda*kk] * pB[(jj+1)+ldb*kk];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00 + beta * pC[(ii+0)+ldc*(jj+0)];
-			pD[(ii+1)+ldd*(jj+0)] = alpha * c_10 + beta * pC[(ii+1)+ldc*(jj+0)];
-			pD[(ii+0)+ldd*(jj+1)] = alpha * c_01 + beta * pC[(ii+0)+ldc*(jj+1)];
-			pD[(ii+1)+ldd*(jj+1)] = alpha * c_11 + beta * pC[(ii+1)+ldc*(jj+1)];
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = 0.0;
-			c_01 = 0.0;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[(ii+0)+lda*kk] * pB[(jj+0)+ldb*kk];
-				c_01 += pA[(ii+0)+lda*kk] * pB[(jj+1)+ldb*kk];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00 + beta * pC[(ii+0)+ldc*(jj+0)];
-			pD[(ii+0)+ldd*(jj+1)] = alpha * c_01 + beta * pC[(ii+0)+ldc*(jj+1)];
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = 0.0;
-			c_10 = 0.0;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[(ii+0)+lda*kk] * pB[(jj+0)+ldb*kk];
-				c_10 += pA[(ii+1)+lda*kk] * pB[(jj+0)+ldb*kk];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00 + beta * pC[(ii+0)+ldc*(jj+0)];
-			pD[(ii+1)+ldd*(jj+0)] = alpha * c_10 + beta * pC[(ii+1)+ldc*(jj+0)];
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = 0.0;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[(ii+0)+lda*kk] * pB[(jj+0)+ldb*kk];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00 + beta * pC[(ii+0)+ldc*(jj+0)];
-			}
-		}
-	return;
-	}
-
-
-
-// dgemm nn
-void GEMM_NN_LIBSTR(int m, int n, int k, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, REAL beta, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	REAL 
-		c_00, c_01,
-		c_10, c_11;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pC = sC->pA + ci + cj*ldc;
-	REAL *pD = sD->pA + di + dj*ldd;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = 0.0; ;
-			c_10 = 0.0; ;
-			c_01 = 0.0; ;
-			c_11 = 0.0; ;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[(ii+0)+lda*kk] * pB[kk+ldb*(jj+0)];
-				c_10 += pA[(ii+1)+lda*kk] * pB[kk+ldb*(jj+0)];
-				c_01 += pA[(ii+0)+lda*kk] * pB[kk+ldb*(jj+1)];
-				c_11 += pA[(ii+1)+lda*kk] * pB[kk+ldb*(jj+1)];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00 + beta * pC[(ii+0)+ldc*(jj+0)];
-			pD[(ii+1)+ldd*(jj+0)] = alpha * c_10 + beta * pC[(ii+1)+ldc*(jj+0)];
-			pD[(ii+0)+ldd*(jj+1)] = alpha * c_01 + beta * pC[(ii+0)+ldc*(jj+1)];
-			pD[(ii+1)+ldd*(jj+1)] = alpha * c_11 + beta * pC[(ii+1)+ldc*(jj+1)];
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = 0.0; ;
-			c_01 = 0.0; ;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[(ii+0)+lda*kk] * pB[kk+ldb*(jj+0)];
-				c_01 += pA[(ii+0)+lda*kk] * pB[kk+ldb*(jj+1)];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00 + beta * pC[(ii+0)+ldc*(jj+0)];
-			pD[(ii+0)+ldd*(jj+1)] = alpha * c_01 + beta * pC[(ii+0)+ldc*(jj+1)];
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = 0.0; ;
-			c_10 = 0.0; ;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[(ii+0)+lda*kk] * pB[kk+ldb*(jj+0)];
-				c_10 += pA[(ii+1)+lda*kk] * pB[kk+ldb*(jj+0)];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00 + beta * pC[(ii+0)+ldc*(jj+0)];
-			pD[(ii+1)+ldd*(jj+0)] = alpha * c_10 + beta * pC[(ii+1)+ldc*(jj+0)];
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = 0.0; ;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[(ii+0)+lda*kk] * pB[kk+ldb*(jj+0)];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00 + beta * pC[(ii+0)+ldc*(jj+0)];
-			}
-		}
-	return;
-	}
-
-
-
-// dtrsm_left_lower_nottransposed_unit
-void TRSM_LLNU_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	REAL
-		d_00, d_01,
-		d_10, d_11;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda; // triangular
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pD = sD->pA + di + dj*ldd;
-#if 1
-	// solve
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			d_00 = alpha * pB[ii+0+ldb*(jj+0)];
-			d_10 = alpha * pB[ii+1+ldb*(jj+0)];
-			d_01 = alpha * pB[ii+0+ldb*(jj+1)];
-			d_11 = alpha * pB[ii+1+ldb*(jj+1)];
-			kk = 0;
-#if 0
-			for(; kk<ii-1; kk+=2)
-				{
-				d_00 -= pA[ii+0+lda*(kk+0)] * pD[kk+ldd*(jj+0)];
-				d_10 -= pA[ii+1+lda*(kk+0)] * pD[kk+ldd*(jj+0)];
-				d_01 -= pA[ii+0+lda*(kk+0)] * pD[kk+ldd*(jj+1)];
-				d_11 -= pA[ii+1+lda*(kk+0)] * pD[kk+ldd*(jj+1)];
-				d_00 -= pA[ii+0+lda*(kk+1)] * pD[kk+ldd*(jj+0)];
-				d_10 -= pA[ii+1+lda*(kk+1)] * pD[kk+ldd*(jj+0)];
-				d_01 -= pA[ii+0+lda*(kk+1)] * pD[kk+ldd*(jj+1)];
-				d_11 -= pA[ii+1+lda*(kk+1)] * pD[kk+ldd*(jj+1)];
-				}
-			if(kk<ii)
-#else
-			for(; kk<ii; kk++)
-#endif
-				{
-				d_00 -= pA[ii+0+lda*kk] * pD[kk+ldd*(jj+0)];
-				d_10 -= pA[ii+1+lda*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pA[ii+0+lda*kk] * pD[kk+ldd*(jj+1)];
-				d_11 -= pA[ii+1+lda*kk] * pD[kk+ldd*(jj+1)];
-				}
-			d_10 -= pA[ii+1+lda*kk] * d_00;
-			d_11 -= pA[ii+1+lda*kk] * d_01;
-			pD[ii+0+ldd*(jj+0)] = d_00;
-			pD[ii+1+ldd*(jj+0)] = d_10;
-			pD[ii+0+ldd*(jj+1)] = d_01;
-			pD[ii+1+ldd*(jj+1)] = d_11;
-			}
-		for(; ii<m; ii++)
-			{
-			d_00 = alpha * pB[ii+ldb*(jj+0)];
-			d_01 = alpha * pB[ii+ldb*(jj+1)];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pA[ii+lda*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pA[ii+lda*kk] * pD[kk+ldd*(jj+1)];
-				}
-			pD[ii+ldd*(jj+0)] = d_00;
-			pD[ii+ldd*(jj+1)] = d_01;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			d_00 = alpha * pB[ii+0+ldb*jj];
-			d_10 = alpha * pB[ii+1+ldb*jj];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pA[ii+0+lda*kk] * pD[kk+ldd*jj];
-				d_10 -= pA[ii+1+lda*kk] * pD[kk+ldd*jj];
-				}
-			d_10 -= pA[ii+1+lda*kk] * d_00;
-			pD[ii+0+ldd*jj] = d_00;
-			pD[ii+1+ldd*jj] = d_10;
-			}
-		for(; ii<m; ii++)
-			{
-			d_00 = alpha * pB[ii+ldb*jj];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pA[ii+lda*kk] * pD[kk+ldd*jj];
-				}
-			pD[ii+ldd*jj] = d_00;
-			}
-		}
-#else
-	// copy
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			for(ii=0; ii<m; ii++)
-				pD[ii+ldd*jj] = alpha * pB[ii+ldb*jj];
-		}
-	for(jj=0; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m; ii++)
-			{
-			d_00 = pD[ii+ldd*jj];
-			for(kk=ii+1; kk<m; kk++)
-				{
-				pD[kk+ldd*jj] -= pA[kk+lda*ii] * d_00;
-				}
-			}
-		}
-#endif
-	return;
-	}
-
-
-
-// dtrsm_left_upper_nottransposed_notunit
-void TRSM_LUNN_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk, id;
-	REAL
-		d_00, d_01,
-		d_10, d_11;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda; // triangular
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pD = sD->pA + di + dj*ldd;
-	REAL *dA = sA->dA;
-	if(!(sA->use_dA==1 & ai==0 & aj==0))
-		{
-		// inverte diagonal of pA
-		for(ii=0; ii<m; ii++)
-			dA[ii] = 1.0/pA[ii+lda*ii];
-		// use only now
-		sA->use_dA = 0;
-		}
-#if 1
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			id = m-ii-2;
-			d_00 = alpha * pB[id+0+ldb*(jj+0)];
-			d_10 = alpha * pB[id+1+ldb*(jj+0)];
-			d_01 = alpha * pB[id+0+ldb*(jj+1)];
-			d_11 = alpha * pB[id+1+ldb*(jj+1)];
-			kk = id+2;
-#if 0
-			for(; kk<m-1; kk+=2)
-				{
-				d_00 -= pA[id+0+lda*(kk+0)] * pD[kk+0+ldd*(jj+0)];
-				d_10 -= pA[id+1+lda*(kk+0)] * pD[kk+0+ldd*(jj+0)];
-				d_01 -= pA[id+0+lda*(kk+0)] * pD[kk+0+ldd*(jj+1)];
-				d_11 -= pA[id+1+lda*(kk+0)] * pD[kk+0+ldd*(jj+1)];
-				d_00 -= pA[id+0+lda*(kk+1)] * pD[kk+1+ldd*(jj+0)];
-				d_10 -= pA[id+1+lda*(kk+1)] * pD[kk+1+ldd*(jj+0)];
-				d_01 -= pA[id+0+lda*(kk+1)] * pD[kk+1+ldd*(jj+1)];
-				d_11 -= pA[id+1+lda*(kk+1)] * pD[kk+1+ldd*(jj+1)];
-				}
-			if(kk<m)
-#else
-			for(; kk<m; kk++)
-#endif
-				{
-				d_00 -= pA[id+0+lda*(kk+0)] * pD[kk+0+ldd*(jj+0)];
-				d_10 -= pA[id+1+lda*(kk+0)] * pD[kk+0+ldd*(jj+0)];
-				d_01 -= pA[id+0+lda*(kk+0)] * pD[kk+0+ldd*(jj+1)];
-				d_11 -= pA[id+1+lda*(kk+0)] * pD[kk+0+ldd*(jj+1)];
-				}
-			d_10 *= dA[id+1];
-			d_11 *= dA[id+1];
-			d_00 -= pA[id+0+lda*(id+1)] * d_10;
-			d_01 -= pA[id+0+lda*(id+1)] * d_11;
-			d_00 *= dA[id+0];
-			d_01 *= dA[id+0];
-			pD[id+0+ldd*(jj+0)] = d_00;
-			pD[id+1+ldd*(jj+0)] = d_10;
-			pD[id+0+ldd*(jj+1)] = d_01;
-			pD[id+1+ldd*(jj+1)] = d_11;
-			}
-		for(; ii<m; ii++)
-			{
-			id = m-ii-1;
-			d_00 = alpha * pB[id+0+ldb*(jj+0)];
-			d_01 = alpha * pB[id+0+ldb*(jj+1)];
-			kk = id+1;
-			for(; kk<m; kk++)
-				{
-				d_00 -= pA[id+0+lda*(kk+0)] * pD[kk+0+ldd*(jj+0)];
-				d_01 -= pA[id+0+lda*(kk+0)] * pD[kk+0+ldd*(jj+1)];
-				}
-			d_00 *= dA[id+0];
-			d_01 *= dA[id+0];
-			pD[id+0+ldd*(jj+0)] = d_00;
-			pD[id+0+ldd*(jj+1)] = d_01;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			id = m-ii-2;
-			d_00 = alpha * pB[id+0+ldb*(jj+0)];
-			d_10 = alpha * pB[id+1+ldb*(jj+0)];
-			kk = id+2;
-			for(; kk<m; kk++)
-				{
-				d_00 -= pA[id+0+lda*(kk+0)] * pD[kk+0+ldd*(jj+0)];
-				d_10 -= pA[id+1+lda*(kk+0)] * pD[kk+0+ldd*(jj+0)];
-				}
-			d_10 *= dA[id+1];
-			d_00 -= pA[id+0+lda*(id+1)] * d_10;
-			d_00 *= dA[id+0];
-			pD[id+0+ldd*(jj+0)] = d_00;
-			pD[id+1+ldd*(jj+0)] = d_10;
-			}
-		for(; ii<m; ii++)
-			{
-			id = m-ii-1;
-			d_00 = alpha * pB[id+0+ldb*(jj+0)];
-			kk = id+1;
-			for(; kk<m; kk++)
-				{
-				d_00 -= pA[id+0+lda*(kk+0)] * pD[kk+0+ldd*(jj+0)];
-				}
-			d_00 *= dA[id+0];
-			pD[id+0+ldd*(jj+0)] = d_00;
-			}
-		}
-#else
-	// copy
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			for(ii=0; ii<m; ii++)
-				pD[ii+ldd*jj] = alpha * pB[ii+ldb*jj];
-		}
-	// solve
-	for(jj=0; jj<n; jj++)
-		{
-		for(ii=m-1; ii>=0; ii--)
-			{
-			d_00 = pD[ii+ldd*jj] * dA[ii];
-			pD[ii+ldd*jj] = d_00;
-			for(kk=0; kk<ii; kk++)
-				{
-				pD[kk+ldd*jj] -= pA[kk+lda*ii] * d_00;
-				}
-			}
-		}
-#endif
-	return;
-	}
-
-
-
-// dtrsm_right_lower_transposed_unit
-void TRSM_RLTU_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pD = sD->pA + di + dj*ldd;
-	REAL
-		f_10,
-		c_00, c_01,
-		c_10, c_11;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		f_10 = pA[jj+1+lda*(jj+0)];
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = alpha * pB[ii+0+ldb*(jj+0)];
-			c_10 = alpha * pB[ii+1+ldb*(jj+0)];
-			c_01 = alpha * pB[ii+0+ldb*(jj+1)];
-			c_11 = alpha * pB[ii+1+ldb*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+0+ldd*kk] * pA[jj+0+lda*kk];
-				c_10 -= pD[ii+1+ldd*kk] * pA[jj+0+lda*kk];
-				c_01 -= pD[ii+0+ldd*kk] * pA[jj+1+lda*kk];
-				c_11 -= pD[ii+1+ldd*kk] * pA[jj+1+lda*kk];
-				}
-			pD[ii+0+ldd*(jj+0)] = c_00;
-			pD[ii+1+ldd*(jj+0)] = c_10;
-			c_01 -= c_00 * f_10;
-			c_11 -= c_10 * f_10;
-			pD[ii+0+ldd*(jj+1)] = c_01;
-			pD[ii+1+ldd*(jj+1)] = c_11;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = alpha * pB[ii+0+ldb*(jj+0)];
-			c_01 = alpha * pB[ii+0+ldb*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+0+ldd*kk] * pD[jj+0+ldd*kk];
-				c_01 -= pD[ii+0+ldd*kk] * pD[jj+1+ldd*kk];
-				}
-			pD[ii+0+ldd*(jj+0)] = c_00;
-			c_01 -= c_00 * f_10;
-			pD[ii+0+ldd*(jj+1)] = c_01;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		// factorize diagonal
-		for(ii=0; ii<m; ii++)
-			{
-			c_00 = alpha * pB[ii+ldb*jj];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+ldd*kk] * pA[jj+lda*kk];
-				}
-			pD[ii+ldd*jj] = c_00;
-			}
-		}
-	return;
-	}
-
-
-
-// dtrsm_right_lower_transposed_unit
-void TRSM_RLTN_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pD = sD->pA + di + dj*ldd;
-	REAL *dA = sA->dA;
-	if(ai==0 & aj==0)
-		{
-		if(sA->use_dA!=1)
-			{
-			for(ii=0; ii<n; ii++)
-				dA[ii] = 1.0 / pA[ii+lda*ii];
-			sA->use_dA = 1;
-			}
-		}
-	else
-		{
-		for(ii=0; ii<n; ii++)
-			dA[ii] = 1.0 / pA[ii+lda*ii];
-		sA->use_dA = 0;
-		}
-	REAL
-		f_00_inv, 
-		f_10, f_11_inv,
-		c_00, c_01,
-		c_10, c_11;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		f_00_inv = dA[jj+0];
-		f_10 = pA[jj+1+lda*(jj+0)];
-		f_11_inv = dA[jj+1];
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = alpha * pB[ii+0+ldb*(jj+0)];
-			c_10 = alpha * pB[ii+1+ldb*(jj+0)];
-			c_01 = alpha * pB[ii+0+ldb*(jj+1)];
-			c_11 = alpha * pB[ii+1+ldb*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+0+ldd*kk] * pA[jj+0+lda*kk];
-				c_10 -= pD[ii+1+ldd*kk] * pA[jj+0+lda*kk];
-				c_01 -= pD[ii+0+ldd*kk] * pA[jj+1+lda*kk];
-				c_11 -= pD[ii+1+ldd*kk] * pA[jj+1+lda*kk];
-				}
-			c_00 *= f_00_inv;
-			c_10 *= f_00_inv;
-			pD[ii+0+ldd*(jj+0)] = c_00;
-			pD[ii+1+ldd*(jj+0)] = c_10;
-			c_01 -= c_00 * f_10;
-			c_11 -= c_10 * f_10;
-			c_01 *= f_11_inv;
-			c_11 *= f_11_inv;
-			pD[ii+0+ldd*(jj+1)] = c_01;
-			pD[ii+1+ldd*(jj+1)] = c_11;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = alpha * pB[ii+0+ldb*(jj+0)];
-			c_01 = alpha * pB[ii+0+ldb*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+0+ldd*kk] * pA[jj+0+lda*kk];
-				c_01 -= pD[ii+0+ldd*kk] * pA[jj+1+lda*kk];
-				}
-			c_00 *= f_00_inv;
-			pD[ii+0+ldd*(jj+0)] = c_00;
-			c_01 -= c_00 * f_10;
-			c_01 *= f_11_inv;
-			pD[ii+0+ldd*(jj+1)] = c_01;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		// factorize diagonal
-		f_00_inv = dA[jj];
-		for(ii=0; ii<m; ii++)
-			{
-			c_00 = alpha * pB[ii+ldb*jj];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+ldd*kk] * pA[jj+lda*kk];
-				}
-			c_00 *= f_00_inv;
-			pD[ii+ldd*jj] = c_00;
-			}
-		}
-	return;
-	}
-
-
-
-// dtrsm_right_upper_transposed_notunit
-void TRSM_RUTN_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	int i1 = 1;
-	REAL *pA = sA->pA+ai+aj*sA->m;
-	REAL *pB = sB->pA+bi+bj*sB->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-	printf("\ndtrsm_rutn_libstr: feature not implemented yet\n");
-	exit(1);
-//	if(!(pB==pD))
-//		{
-//		for(jj=0; jj<n; jj++)
-//			COPY(&m, pB+jj*sB->m, &i1, pD+jj*sD->m, &i1);
-//		}
-//	TRSM(&cr, &cu, &ct, &cn, &m, &n, &alpha, pA, &(sA->m), pD, &(sD->m));
-	return;
-	}
-
-
-
-// dtrmm_right_upper_transposed_notunit (A triangular !!!)
-void TRMM_RUTN_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	REAL 
-		c_00, c_01,
-		c_10, c_11;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pD = sD->pA + di + dj*ldd;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = 0.0;
-			c_10 = 0.0;
-			c_01 = 0.0;
-			c_11 = 0.0;
-			kk = jj;
-			c_00 += pB[(ii+0)+ldb*kk] * pA[(jj+0)+lda*kk];
-			c_10 += pB[(ii+1)+ldb*kk] * pA[(jj+0)+lda*kk];
-			kk++;
-			for(; kk<n; kk++)
-				{
-				c_00 += pB[(ii+0)+ldb*kk] * pA[(jj+0)+lda*kk];
-				c_10 += pB[(ii+1)+ldb*kk] * pA[(jj+0)+lda*kk];
-				c_01 += pB[(ii+0)+ldb*kk] * pA[(jj+1)+lda*kk];
-				c_11 += pB[(ii+1)+ldb*kk] * pA[(jj+1)+lda*kk];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00;
-			pD[(ii+1)+ldd*(jj+0)] = alpha * c_10;
-			pD[(ii+0)+ldd*(jj+1)] = alpha * c_01;
-			pD[(ii+1)+ldd*(jj+1)] = alpha * c_11;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = 0.0;
-			c_01 = 0.0;
-			kk = jj;
-			c_00 += pB[(ii+0)+ldb*kk] * pA[(jj+0)+lda*kk];
-			kk++;
-			for(; kk<n; kk++)
-				{
-				c_00 += pB[(ii+0)+ldb*kk] * pA[(jj+0)+lda*kk];
-				c_01 += pB[(ii+0)+ldb*kk] * pA[(jj+1)+lda*kk];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00;
-			pD[(ii+0)+ldd*(jj+1)] = alpha * c_01;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = 0.0;
-			c_10 = 0.0;
-			for(kk=jj; kk<n; kk++)
-				{
-				c_00 += pB[(ii+0)+ldb*kk] * pA[(jj+0)+lda*kk];
-				c_10 += pB[(ii+1)+ldb*kk] * pA[(jj+0)+lda*kk];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00;
-			pD[(ii+1)+ldd*(jj+0)] = alpha * c_10;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = 0.0;
-			for(kk=jj; kk<n; kk++)
-				{
-				c_00 += pB[(ii+0)+ldb*kk] * pA[(jj+0)+lda*kk];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00;
-			}
-		}	
-	return;
-	}
-
-
-
-// dtrmm_right_lower_nottransposed_notunit (A triangular !!!)
-void TRMM_RLNN_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	REAL 
-		c_00, c_01,
-		c_10, c_11;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pD = sD->pA + di + dj*ldd;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = 0.0; ;
-			c_10 = 0.0; ;
-			c_01 = 0.0; ;
-			c_11 = 0.0; ;
-			kk = jj;
-			c_00 += pB[(ii+0)+ldb*kk] * pA[kk+lda*(jj+0)];
-			c_10 += pB[(ii+1)+ldb*kk] * pA[kk+lda*(jj+0)];
-			kk++;
-			for(; kk<n; kk++)
-				{
-				c_00 += pB[(ii+0)+ldb*kk] * pA[kk+lda*(jj+0)];
-				c_10 += pB[(ii+1)+ldb*kk] * pA[kk+lda*(jj+0)];
-				c_01 += pB[(ii+0)+ldb*kk] * pA[kk+lda*(jj+1)];
-				c_11 += pB[(ii+1)+ldb*kk] * pA[kk+lda*(jj+1)];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00;
-			pD[(ii+1)+ldd*(jj+0)] = alpha * c_10;
-			pD[(ii+0)+ldd*(jj+1)] = alpha * c_01;
-			pD[(ii+1)+ldd*(jj+1)] = alpha * c_11;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = 0.0; ;
-			c_01 = 0.0; ;
-			kk = jj;
-			c_00 += pB[(ii+0)+ldb*kk] * pA[kk+lda*(jj+0)];
-			kk++;
-			for(; kk<n; kk++)
-				{
-				c_00 += pB[(ii+0)+ldb*kk] * pA[kk+lda*(jj+0)];
-				c_01 += pB[(ii+0)+ldb*kk] * pA[kk+lda*(jj+1)];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00;
-			pD[(ii+0)+ldd*(jj+1)] = alpha * c_01;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = 0.0; ;
-			c_10 = 0.0; ;
-			for(kk=jj; kk<n; kk++)
-				{
-				c_00 += pB[(ii+0)+ldb*kk] * pA[kk+lda*(jj+0)];
-				c_10 += pB[(ii+1)+ldb*kk] * pA[kk+lda*(jj+0)];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00;
-			pD[(ii+1)+ldd*(jj+0)] = alpha * c_10;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = 0.0; ;
-			for(kk=jj; kk<n; kk++)
-				{
-				c_00 += pB[(ii+0)+ldb*kk] * pA[kk+lda*(jj+0)];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = alpha * c_00;
-			}
-		}
-	return;
-	}
-
-
-
-// dsyrk_lower_nortransposed (allowing for different factors => use dgemm !!!)
-void SYRK_LN_LIBSTR(int m, int k, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, REAL beta, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0)
-		return;
-	int ii, jj, kk;
-	int n = m; // TODO optimize for this case !!!!!!!!!
-	REAL
-		c_00, c_01,
-		c_10, c_11;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pC = sC->pA + ci + cj*ldc;
-	REAL *pD = sD->pA + di + dj*ldd;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		// diagonal
-		c_00 = 0.0;
-		c_10 = 0.0;
-		c_11 = 0.0;
-		for(kk=0; kk<k; kk++)
-			{
-			c_00 += pA[jj+0+lda*kk] * pB[jj+0+ldb*kk];
-			c_10 += pA[jj+1+lda*kk] * pB[jj+0+ldb*kk];
-			c_11 += pA[jj+1+lda*kk] * pB[jj+1+ldb*kk];
-			}
-		pD[jj+0+ldd*(jj+0)] = beta * pC[jj+0+ldc*(jj+0)] + alpha * c_00;
-		pD[jj+1+ldd*(jj+0)] = beta * pC[jj+1+ldc*(jj+0)] + alpha * c_10;
-		pD[jj+1+ldd*(jj+1)] = beta * pC[jj+1+ldc*(jj+1)] + alpha * c_11;
-		// lower
-		ii = jj+2;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = 0.0;
-			c_10 = 0.0;
-			c_01 = 0.0;
-			c_11 = 0.0;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[ii+0+lda*kk] * pB[jj+0+ldb*kk];
-				c_10 += pA[ii+1+lda*kk] * pB[jj+0+ldb*kk];
-				c_01 += pA[ii+0+lda*kk] * pB[jj+1+ldb*kk];
-				c_11 += pA[ii+1+lda*kk] * pB[jj+1+ldb*kk];
-				}
-			pD[ii+0+ldd*(jj+0)] = beta * pC[ii+0+ldc*(jj+0)] + alpha * c_00;
-			pD[ii+1+ldd*(jj+0)] = beta * pC[ii+1+ldc*(jj+0)] + alpha * c_10;
-			pD[ii+0+ldd*(jj+1)] = beta * pC[ii+0+ldc*(jj+1)] + alpha * c_01;
-			pD[ii+1+ldd*(jj+1)] = beta * pC[ii+1+ldc*(jj+1)] + alpha * c_11;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = 0.0;
-			c_01 = 0.0;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[ii+0+lda*kk] * pB[jj+0+ldb*kk];
-				c_01 += pA[ii+0+lda*kk] * pB[jj+1+ldb*kk];
-				}
-			pD[ii+0+ldd*(jj+0)] = beta * pC[ii+0+ldc*(jj+0)] + alpha * c_00;
-			pD[ii+0+ldd*(jj+1)] = beta * pC[ii+0+ldc*(jj+1)] + alpha * c_01;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		// diagonal
-		c_00 = 0.0;
-		for(kk=0; kk<k; kk++)
-			{
-			c_00 += pA[jj+lda*kk] * pB[jj+ldb*kk];
-			}
-		pD[jj+ldd*jj] = beta * pC[jj+ldc*jj] + alpha * c_00;
-		// lower
-		for(ii=jj+1; ii<m; ii++)
-			{
-			c_00 = 0.0;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[ii+lda*kk] * pB[jj+ldb*kk];
-				}
-			pD[ii+ldd*jj] = beta * pC[ii+ldc*jj] + alpha * c_00;
-			}
-		}
-	return;
-	}
-
-
-
-// dsyrk_lower_nortransposed (allowing for different factors => use dgemm !!!)
-void SYRK_LN_MN_LIBSTR(int m, int n, int k, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, REAL beta, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	REAL
-		c_00, c_01,
-		c_10, c_11;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pC = sC->pA + ci + cj*ldc;
-	REAL *pD = sD->pA + di + dj*ldd;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		// diagonal
-		c_00 = 0.0;
-		c_10 = 0.0;
-		c_11 = 0.0;
-		for(kk=0; kk<k; kk++)
-			{
-			c_00 += pA[jj+0+lda*kk] * pB[jj+0+ldb*kk];
-			c_10 += pA[jj+1+lda*kk] * pB[jj+0+ldb*kk];
-			c_11 += pA[jj+1+lda*kk] * pB[jj+1+ldb*kk];
-			}
-		pD[jj+0+ldd*(jj+0)] = beta * pC[jj+0+ldc*(jj+0)] + alpha * c_00;
-		pD[jj+1+ldd*(jj+0)] = beta * pC[jj+1+ldc*(jj+0)] + alpha * c_10;
-		pD[jj+1+ldd*(jj+1)] = beta * pC[jj+1+ldc*(jj+1)] + alpha * c_11;
-		// lower
-		ii = jj+2;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = 0.0;
-			c_10 = 0.0;
-			c_01 = 0.0;
-			c_11 = 0.0;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[ii+0+lda*kk] * pB[jj+0+ldb*kk];
-				c_10 += pA[ii+1+lda*kk] * pB[jj+0+ldb*kk];
-				c_01 += pA[ii+0+lda*kk] * pB[jj+1+ldb*kk];
-				c_11 += pA[ii+1+lda*kk] * pB[jj+1+ldb*kk];
-				}
-			pD[ii+0+ldd*(jj+0)] = beta * pC[ii+0+ldc*(jj+0)] + alpha * c_00;
-			pD[ii+1+ldd*(jj+0)] = beta * pC[ii+1+ldc*(jj+0)] + alpha * c_10;
-			pD[ii+0+ldd*(jj+1)] = beta * pC[ii+0+ldc*(jj+1)] + alpha * c_01;
-			pD[ii+1+ldd*(jj+1)] = beta * pC[ii+1+ldc*(jj+1)] + alpha * c_11;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = 0.0;
-			c_01 = 0.0;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[ii+0+lda*kk] * pB[jj+0+ldb*kk];
-				c_01 += pA[ii+0+lda*kk] * pB[jj+1+ldb*kk];
-				}
-			pD[ii+0+ldd*(jj+0)] = beta * pC[ii+0+ldc*(jj+0)] + alpha * c_00;
-			pD[ii+0+ldd*(jj+1)] = beta * pC[ii+0+ldc*(jj+1)] + alpha * c_01;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		// diagonal
-		c_00 = 0.0;
-		for(kk=0; kk<k; kk++)
-			{
-			c_00 += pA[jj+lda*kk] * pB[jj+ldb*kk];
-			}
-		pD[jj+ldd*jj] = beta * pC[jj+ldc*jj] + alpha * c_00;
-		// lower
-		for(ii=jj+1; ii<m; ii++)
-			{
-			c_00 = 0.0;
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[ii+lda*kk] * pB[jj+ldb*kk];
-				}
-			pD[ii+ldd*jj] = beta * pC[ii+ldc*jj] + alpha * c_00;
-			}
-		}
-	return;
-	}
-
-
-
-#elif defined(LA_BLAS)
-
-
-
-// dgemm nt
-void GEMM_NT_LIBSTR(int m, int n, int k, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, REAL beta, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cn = 'n';
-	char ct = 't';
-	REAL *pA = sA->pA+ai+aj*sA->m;
-	REAL *pB = sB->pA+bi+bj*sB->m;
-	REAL *pC = sC->pA+ci+cj*sC->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long kk = k;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(beta==0.0 || pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	GEMM(&cn, &ct, &mm, &nn, &kk, &alpha, pA, &lda, pB, &ldb, &beta, pD, &ldd);
-#else
-	int i1 = 1;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(beta==0.0 || pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	GEMM(&cn, &ct, &m, &n, &k, &alpha, pA, &lda, pB, &ldb, &beta, pD, &ldd);
-#endif
-	return;
-	}
-
-
-
-// dgemm nn
-void GEMM_NN_LIBSTR(int m, int n, int k, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, REAL beta, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cn = 'n';
-	REAL *pA = sA->pA+ai+aj*sA->m;
-	REAL *pB = sB->pA+bi+bj*sB->m;
-	REAL *pC = sC->pA+ci+cj*sC->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long kk = k;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(beta==0.0 || pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	GEMM(&cn, &cn, &mm, &nn, &kk, &alpha, pA, &lda, pB, &ldb, &beta, pD, &ldd);
-#else
-	int i1 = 1;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(beta==0.0 || pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	GEMM(&cn, &cn, &m, &n, &k, &alpha, pA, &lda, pB, &ldb, &beta, pD, &ldd);
-#endif
-	return;
-	}
-
-
-
-// dtrsm_left_lower_nottransposed_unit
-void TRSM_LLNU_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cu = 'u';
-	REAL *pA = sA->pA+ai+aj*sA->m;
-	REAL *pB = sB->pA+bi+bj*sB->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRSM(&cl, &cl, &cn, &cu, &mm, &nn, &alpha, pA, &lda, pD, &ldd);
-#else
-	int i1 = 1;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pB+jj*ldb, &i1, pD+jj*sD->m, &i1);
-		}
-	TRSM(&cl, &cl, &cn, &cu, &m, &n, &alpha, pA, &lda, pD, &ldd);
-#endif
-	return;
-	}
-
-
-
-// dtrsm_left_upper_nottransposed_notunit
-void TRSM_LUNN_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cu = 'u';
-	REAL *pA = sA->pA+ai+aj*sA->m;
-	REAL *pB = sB->pA+bi+bj*sB->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRSM(&cl, &cu, &cn, &cn, &mm, &nn, &alpha, pA, &lda, pD, &ldd);
-#else
-	int i1 = 1;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRSM(&cl, &cu, &cn, &cn, &m, &n, &alpha, pA, &lda, pD, &ldd);
-#endif
-	return;
-	}
-
-
-
-// dtrsm_right_lower_transposed_unit
-void TRSM_RLTU_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	REAL *pA = sA->pA+ai+aj*sA->m;
-	REAL *pB = sB->pA+bi+bj*sB->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRSM(&cr, &cl, &ct, &cu, &mm, &nn, &alpha, pA, &lda, pD, &ldd);
-#else
-	int i1 = 1;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRSM(&cr, &cl, &ct, &cu, &m, &n, &alpha, pA, &lda, pD, &ldd);
-#endif
-	return;
-	}
-
-
-
-// dtrsm_right_lower_transposed_notunit
-void TRSM_RLTN_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	REAL *pA = sA->pA+ai+aj*sA->m;
-	REAL *pB = sB->pA+bi+bj*sB->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRSM(&cr, &cl, &ct, &cn, &mm, &nn, &alpha, pA, &lda, pD, &ldd);
-#else
-	int i1 = 1;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRSM(&cr, &cl, &ct, &cn, &m, &n, &alpha, pA, &lda, pD, &ldd);
-#endif
-	return;
-	}
-
-
-
-// dtrsm_right_upper_transposed_notunit
-void TRSM_RUTN_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	REAL *pA = sA->pA+ai+aj*sA->m;
-	REAL *pB = sB->pA+bi+bj*sB->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRSM(&cr, &cu, &ct, &cn, &mm, &nn, &alpha, pA, &lda, pD, &ldd);
-#else
-	int i1 = 1;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRSM(&cr, &cu, &ct, &cn, &m, &n, &alpha, pA, &lda, pD, &ldd);
-#endif
-	return;
-	}
-
-
-
-// dtrmm_right_upper_transposed_notunit (A triangular !!!)
-void TRMM_RUTN_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	REAL *pA = sA->pA+ai+aj*sA->m;
-	REAL *pB = sB->pA+bi+bj*sB->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRMM(&cr, &cu, &ct, &cn, &mm, &nn, &alpha, pA, &lda, pD, &ldd);
-#else
-	int i1 = 1;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRMM(&cr, &cu, &ct, &cn, &m, &n, &alpha, pA, &lda, pD, &ldd);
-#endif
-	return;
-	}
-
-
-
-// dtrmm_right_lower_nottransposed_notunit (A triangular !!!)
-void TRMM_RLNN_LIBSTR(int m, int n, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	REAL *pA = sA->pA+ai+aj*sA->m;
-	REAL *pB = sB->pA+bi+bj*sB->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRMM(&cr, &cl, &cn, &cn, &mm, &nn, &alpha, pA, &lda, pD, &ldd);
-#else
-	int i1 = 1;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldd = sD->m;
-	if(!(pB==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pB+jj*ldb, &i1, pD+jj*ldd, &i1);
-		}
-	TRMM(&cr, &cl, &cn, &cn, &m, &n, &alpha, pA, &lda, pD, &ldd);
-#endif
-	return;
-	}
-
-
-
-// dsyrk_lower_nortransposed (allowing for different factors => use dgemm !!!)
-void SYRK_LN_LIBSTR(int m, int k, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, REAL beta, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	REAL *pA = sA->pA + ai + aj*sA->m;
-	REAL *pB = sB->pA + bi + bj*sB->m;
-	REAL *pC = sC->pA + ci + cj*sC->m;
-	REAL *pD = sD->pA + di + dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long kk = k;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(beta==0.0 || pC==pD))
-		{
-		for(jj=0; jj<m; jj++)
-			COPY(&mm, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	if(pA==pB)
-		{
-		SYRK(&cl, &cn, &mm, &kk, &alpha, pA, &lda, &beta, pD, &ldd);
-		}
-	else
-		{
-		GEMM(&cn, &ct, &mm, &mm, &kk, &alpha, pA, &lda, pB, &ldb, &beta, pD, &ldd);
-		}
-#else
-	int i1 = 1;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(beta==0.0 || pC==pD))
-		{
-		for(jj=0; jj<m; jj++)
-			COPY(&m, pC+jj*sC->m, &i1, pD+jj*sD->m, &i1);
-		}
-	if(pA==pB)
-		{
-		SYRK(&cl, &cn, &m, &k, &alpha, pA, &lda, &beta, pD, &ldd);
-		}
-	else
-		{
-		GEMM(&cn, &ct, &m, &m, &k, &alpha, pA, &lda, pB, &ldb, &beta, pD, &ldd);
-		}
-#endif
-	return;
-	}
-
-// dsyrk_lower_nortransposed (allowing for different factors => use dgemm !!!)
-void SYRK_LN_MN_LIBSTR(int m, int n, int k, REAL alpha, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, REAL beta, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	REAL *pA = sA->pA + ai + aj*sA->m;
-	REAL *pB = sB->pA + bi + bj*sB->m;
-	REAL *pC = sC->pA + ci + cj*sC->m;
-	REAL *pD = sD->pA + di + dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long kk = k;
-	long long mmn = mm-nn;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(beta==0.0 || pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	if(pA==pB)
-		{
-		SYRK(&cl, &cn, &nn, &kk, &alpha, pA, &lda, &beta, pD, &ldd);
-		GEMM(&cn, &ct, &mmn, &nn, &kk, &alpha, pA+n, &lda, pB, &ldb, &beta, pD+n, &ldd);
-		}
-	else
-		{
-		GEMM(&cn, &ct, &mm, &nn, &kk, &alpha, pA, &lda, pB, &ldb, &beta, pD, &ldd);
-		}
-#else
-	int i1 = 1;
-	int mmn = m-n;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(beta==0.0 || pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pC+jj*sC->m, &i1, pD+jj*sD->m, &i1);
-		}
-	if(pA==pB)
-		{
-		SYRK(&cl, &cn, &n, &k, &alpha, pA, &lda, &beta, pD, &ldd);
-		GEMM(&cn, &ct, &mmn, &n, &k, &alpha, pA+n, &lda, pB, &ldb, &beta, pD+n, &ldd);
-		}
-	else
-		{
-		GEMM(&cn, &ct, &m, &n, &k, &alpha, pA, &lda, pB, &ldb, &beta, pD, &ldd);
-		}
-#endif
-	return;
-	}
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
-
-
diff --git a/third_party/blasfeo/blas/x_lapack_lib.c b/third_party/blasfeo/blas/x_lapack_lib.c
deleted file mode 100644
index 762a8a0..0000000
--- a/third_party/blasfeo/blas/x_lapack_lib.c
+++ /dev/null
@@ -1,2112 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(LA_REFERENCE)
-
-
-
-// dpotrf
-void POTRF_L_LIBSTR(int m, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0)
-		return;
-	int ii, jj, kk;
-	REAL
-		f_00_inv, 
-		f_10, f_11_inv,
-		c_00, c_01,
-		c_10, c_11;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	REAL *pC = sC->pA + ci + cj*ldc;
-	REAL *pD = sD->pA + di + dj*ldd;
-	REAL *dD = sD->dA;
-	if(di==0 & dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-	jj = 0;
-	for(; jj<m-1; jj+=2)
-		{
-		// factorize diagonal
-		c_00 = pC[jj+0+ldc*(jj+0)];;
-		c_10 = pC[jj+1+ldc*(jj+0)];;
-		c_11 = pC[jj+1+ldc*(jj+1)];;
-		for(kk=0; kk<jj; kk++)
-			{
-			c_00 -= pD[jj+0+ldd*kk] * pD[jj+0+ldd*kk];
-			c_10 -= pD[jj+1+ldd*kk] * pD[jj+0+ldd*kk];
-			c_11 -= pD[jj+1+ldd*kk] * pD[jj+1+ldd*kk];
-			}
-		if(c_00>0)
-			{
-			f_00_inv = 1.0/sqrt(c_00);
-			}
-		else
-			{
-			f_00_inv = 0.0;
-			}
-		dD[jj+0] = f_00_inv;
-		pD[jj+0+ldd*(jj+0)] = c_00 * f_00_inv;
-		f_10 = c_10 * f_00_inv;
-		pD[jj+1+ldd*(jj+0)] = f_10;
-		c_11 -= f_10 * f_10;
-		if(c_11>0)
-			{
-			f_11_inv = 1.0/sqrt(c_11);
-			}
-		else
-			{
-			f_11_inv = 0.0;
-			}
-		dD[jj+1] = f_11_inv;
-		pD[jj+1+ldd*(jj+1)] = c_11 * f_11_inv;
-		// solve lower
-		ii = jj+2;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = pC[ii+0+ldc*(jj+0)];
-			c_10 = pC[ii+1+ldc*(jj+0)];
-			c_01 = pC[ii+0+ldc*(jj+1)];
-			c_11 = pC[ii+1+ldc*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+0+ldd*kk] * pD[jj+0+ldd*kk];
-				c_10 -= pD[ii+1+ldd*kk] * pD[jj+0+ldd*kk];
-				c_01 -= pD[ii+0+ldd*kk] * pD[jj+1+ldd*kk];
-				c_11 -= pD[ii+1+ldd*kk] * pD[jj+1+ldd*kk];
-				}
-			c_00 *= f_00_inv;
-			c_10 *= f_00_inv;
-			pD[ii+0+ldd*(jj+0)] = c_00;
-			pD[ii+1+ldd*(jj+0)] = c_10;
-			c_01 -= c_00 * f_10;
-			c_11 -= c_10 * f_10;
-			pD[ii+0+ldd*(jj+1)] = c_01 * f_11_inv;
-			pD[ii+1+ldd*(jj+1)] = c_11 * f_11_inv;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = pC[ii+0+ldc*(jj+0)];
-			c_01 = pC[ii+0+ldc*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+0+ldd*kk] * pD[jj+0+ldd*kk];
-				c_01 -= pD[ii+0+ldd*kk] * pD[jj+1+ldd*kk];
-				}
-			c_00 *= f_00_inv;
-			pD[ii+0+ldd*(jj+0)] = c_00;
-			c_01 -= c_00 * f_10;
-			pD[ii+0+ldd*(jj+1)] = c_01 * f_11_inv;
-			}
-		}
-	for(; jj<m; jj++)
-		{
-		// factorize diagonal
-		c_00 = pC[jj+ldc*jj];;
-		for(kk=0; kk<jj; kk++)
-			{
-			c_00 -= pD[jj+ldd*kk] * pD[jj+ldd*kk];
-			}
-		if(c_00>0)
-			{
-			f_00_inv = 1.0/sqrt(c_00);
-			}
-		else
-			{
-			f_00_inv = 0.0;
-			}
-		dD[jj] = f_00_inv;
-		pD[jj+ldd*jj] = c_00 * f_00_inv;
-		// solve lower
-//		for(ii=jj+1; ii<m; ii++)
-//			{
-//			c_00 = pC[ii+ldc*jj];
-//			for(kk=0; kk<jj; kk++)
-//				{
-//				c_00 -= pD[ii+ldd*kk] * pD[jj+ldd*kk];
-//				}
-//			pD[ii+ldd*jj] = c_00 * f_00_inv;
-//			}
-		}
-	return;
-	}
-
-
-
-// dpotrf
-void POTRF_L_MN_LIBSTR(int m, int n, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	REAL
-		f_00_inv, 
-		f_10, f_11_inv,
-		c_00, c_01,
-		c_10, c_11;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	REAL *pC = sC->pA + ci + cj*ldc;
-	REAL *pD = sD->pA + di + dj*ldd;
-	REAL *dD = sD->dA;
-	if(di==0 & dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		// factorize diagonal
-		c_00 = pC[jj+0+ldc*(jj+0)];;
-		c_10 = pC[jj+1+ldc*(jj+0)];;
-		c_11 = pC[jj+1+ldc*(jj+1)];;
-		for(kk=0; kk<jj; kk++)
-			{
-			c_00 -= pD[jj+0+ldd*kk] * pD[jj+0+ldd*kk];
-			c_10 -= pD[jj+1+ldd*kk] * pD[jj+0+ldd*kk];
-			c_11 -= pD[jj+1+ldd*kk] * pD[jj+1+ldd*kk];
-			}
-		if(c_00>0)
-			{
-			f_00_inv = 1.0/sqrt(c_00);
-			}
-		else
-			{
-			f_00_inv = 0.0;
-			}
-		dD[jj+0] = f_00_inv;
-		pD[jj+0+ldd*(jj+0)] = c_00 * f_00_inv;
-		f_10 = c_10 * f_00_inv;
-		pD[jj+1+ldd*(jj+0)] = f_10;
-		c_11 -= f_10 * f_10;
-		if(c_11>0)
-			{
-			f_11_inv = 1.0/sqrt(c_11);
-			}
-		else
-			{
-			f_11_inv = 0.0;
-			}
-		dD[jj+1] = f_11_inv;
-		pD[jj+1+ldd*(jj+1)] = c_11 * f_11_inv;
-		// solve lower
-		ii = jj+2;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = pC[ii+0+ldc*(jj+0)];
-			c_10 = pC[ii+1+ldc*(jj+0)];
-			c_01 = pC[ii+0+ldc*(jj+1)];
-			c_11 = pC[ii+1+ldc*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+0+ldd*kk] * pD[jj+0+ldd*kk];
-				c_10 -= pD[ii+1+ldd*kk] * pD[jj+0+ldd*kk];
-				c_01 -= pD[ii+0+ldd*kk] * pD[jj+1+ldd*kk];
-				c_11 -= pD[ii+1+ldd*kk] * pD[jj+1+ldd*kk];
-				}
-			c_00 *= f_00_inv;
-			c_10 *= f_00_inv;
-			pD[ii+0+ldd*(jj+0)] = c_00;
-			pD[ii+1+ldd*(jj+0)] = c_10;
-			c_01 -= c_00 * f_10;
-			c_11 -= c_10 * f_10;
-			pD[ii+0+ldd*(jj+1)] = c_01 * f_11_inv;
-			pD[ii+1+ldd*(jj+1)] = c_11 * f_11_inv;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = pC[ii+0+ldc*(jj+0)];
-			c_01 = pC[ii+0+ldc*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+0+ldd*kk] * pD[jj+0+ldd*kk];
-				c_01 -= pD[ii+0+ldd*kk] * pD[jj+1+ldd*kk];
-				}
-			c_00 *= f_00_inv;
-			pD[ii+0+ldd*(jj+0)] = c_00;
-			c_01 -= c_00 * f_10;
-			pD[ii+0+ldd*(jj+1)] = c_01 * f_11_inv;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		// factorize diagonal
-		c_00 = pC[jj+ldc*jj];;
-		for(kk=0; kk<jj; kk++)
-			{
-			c_00 -= pD[jj+ldd*kk] * pD[jj+ldd*kk];
-			}
-		if(c_00>0)
-			{
-			f_00_inv = 1.0/sqrt(c_00);
-			}
-		else
-			{
-			f_00_inv = 0.0;
-			}
-		dD[jj] = f_00_inv;
-		pD[jj+ldd*jj] = c_00 * f_00_inv;
-		// solve lower
-		for(ii=jj+1; ii<m; ii++)
-			{
-			c_00 = pC[ii+ldc*jj];
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+ldd*kk] * pD[jj+ldd*kk];
-				}
-			pD[ii+ldd*jj] = c_00 * f_00_inv;
-			}
-		}
-	return;
-	}
-
-
-
-// dsyrk dpotrf
-void SYRK_POTRF_LN_LIBSTR(int m, int n, int k, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	int ii, jj, kk;
-	REAL
-		f_00_inv, 
-		f_10, f_11_inv,
-		c_00, c_01,
-		c_10, c_11;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA + ai + aj*lda;
-	REAL *pB = sB->pA + bi + bj*ldb;
-	REAL *pC = sC->pA + ci + cj*ldc;
-	REAL *pD = sD->pA + di + dj*ldd;
-	REAL *dD = sD->dA;
-	if(di==0 & dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		// factorize diagonal
-		c_00 = pC[jj+0+ldc*(jj+0)];;
-		c_10 = pC[jj+1+ldc*(jj+0)];;
-		c_11 = pC[jj+1+ldc*(jj+1)];;
-		for(kk=0; kk<k; kk++)
-			{
-			c_00 += pA[jj+0+lda*kk] * pB[jj+0+ldb*kk];
-			c_10 += pA[jj+1+lda*kk] * pB[jj+0+ldb*kk];
-			c_11 += pA[jj+1+lda*kk] * pB[jj+1+ldb*kk];
-			}
-		for(kk=0; kk<jj; kk++)
-			{
-			c_00 -= pD[jj+0+ldd*kk] * pD[jj+0+ldd*kk];
-			c_10 -= pD[jj+1+ldd*kk] * pD[jj+0+ldd*kk];
-			c_11 -= pD[jj+1+ldd*kk] * pD[jj+1+ldd*kk];
-			}
-		if(c_00>0)
-			{
-			f_00_inv = 1.0/sqrt(c_00);
-			}
-		else
-			{
-			f_00_inv = 0.0;
-			}
-		dD[jj+0] = f_00_inv;
-		pD[jj+0+ldd*(jj+0)] = c_00 * f_00_inv;
-		f_10 = c_10 * f_00_inv;
-		pD[jj+1+ldd*(jj+0)] = f_10;
-		c_11 -= f_10 * f_10;
-		if(c_11>0)
-			{
-			f_11_inv = 1.0/sqrt(c_11);
-			}
-		else
-			{
-			f_11_inv = 0.0;
-			}
-		dD[jj+1] = f_11_inv;
-		pD[jj+1+ldd*(jj+1)] = c_11 * f_11_inv;
-		// solve lower
-		ii = jj+2;
-		for(; ii<m-1; ii+=2)
-			{
-			c_00 = pC[ii+0+ldc*(jj+0)];
-			c_10 = pC[ii+1+ldc*(jj+0)];
-			c_01 = pC[ii+0+ldc*(jj+1)];
-			c_11 = pC[ii+1+ldc*(jj+1)];
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[ii+0+lda*kk] * pB[jj+0+ldb*kk];
-				c_10 += pA[ii+1+lda*kk] * pB[jj+0+ldb*kk];
-				c_01 += pA[ii+0+lda*kk] * pB[jj+1+ldb*kk];
-				c_11 += pA[ii+1+lda*kk] * pB[jj+1+ldb*kk];
-				}
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+0+ldd*kk] * pD[jj+0+ldd*kk];
-				c_10 -= pD[ii+1+ldd*kk] * pD[jj+0+ldd*kk];
-				c_01 -= pD[ii+0+ldd*kk] * pD[jj+1+ldd*kk];
-				c_11 -= pD[ii+1+ldd*kk] * pD[jj+1+ldd*kk];
-				}
-			c_00 *= f_00_inv;
-			c_10 *= f_00_inv;
-			pD[ii+0+ldd*(jj+0)] = c_00;
-			pD[ii+1+ldd*(jj+0)] = c_10;
-			c_01 -= c_00 * f_10;
-			c_11 -= c_10 * f_10;
-			pD[ii+0+ldd*(jj+1)] = c_01 * f_11_inv;
-			pD[ii+1+ldd*(jj+1)] = c_11 * f_11_inv;
-			}
-		for(; ii<m; ii++)
-			{
-			c_00 = pC[ii+0+ldc*(jj+0)];
-			c_01 = pC[ii+0+ldc*(jj+1)];
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[ii+0+lda*kk] * pB[jj+0+ldb*kk];
-				c_01 += pA[ii+0+lda*kk] * pB[jj+1+ldb*kk];
-				}
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+0+ldd*kk] * pD[jj+0+ldd*kk];
-				c_01 -= pD[ii+0+ldd*kk] * pD[jj+1+ldd*kk];
-				}
-			c_00 *= f_00_inv;
-			pD[ii+0+ldd*(jj+0)] = c_00;
-			c_01 -= c_00 * f_10;
-			pD[ii+0+ldd*(jj+1)] = c_01 * f_11_inv;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		// factorize diagonal
-		c_00 = pC[jj+ldc*jj];;
-		for(kk=0; kk<k; kk++)
-			{
-			c_00 += pA[jj+lda*kk] * pB[jj+ldb*kk];
-			}
-		for(kk=0; kk<jj; kk++)
-			{
-			c_00 -= pD[jj+ldd*kk] * pD[jj+ldd*kk];
-			}
-		if(c_00>0)
-			{
-			f_00_inv = 1.0/sqrt(c_00);
-			}
-		else
-			{
-			f_00_inv = 0.0;
-			}
-		dD[jj] = f_00_inv;
-		pD[jj+ldd*jj] = c_00 * f_00_inv;
-		// solve lower
-		for(ii=jj+1; ii<m; ii++)
-			{
-			c_00 = pC[ii+ldc*jj];
-			for(kk=0; kk<k; kk++)
-				{
-				c_00 += pA[ii+lda*kk] * pB[jj+ldb*kk];
-				}
-			for(kk=0; kk<jj; kk++)
-				{
-				c_00 -= pD[ii+ldd*kk] * pD[jj+ldd*kk];
-				}
-			pD[ii+ldd*jj] = c_00 * f_00_inv;
-			}
-		}
-	return;
-	}
-
-
-
-// dgetrf without pivoting
-void GETF2_NOPIVOT(int m, int n, REAL *A, int lda, REAL *dA)
-	{
-	int ii, jj, kk, itmp0, itmp1;
-	int iimax = m<n ? m : n;
-	int i1 = 1;
-	REAL dtmp;
-	REAL dm1 = -1.0;
-
-	for(ii=0; ii<iimax; ii++)
-		{
-		itmp0 = m-ii-1;
-		dtmp = 1.0/A[ii+lda*ii];
-		dA[ii] = dtmp;
-		for(jj=0; jj<itmp0; jj++)
-			{
-			A[ii+1+jj+lda*ii] *= dtmp;
-			}
-		itmp1 = n-ii-1;
-		for(jj=0; jj<itmp1; jj++)
-			{
-			for(kk=0; kk<itmp0; kk++)
-				{
-				A[(ii+1+kk)+lda*(ii+1+jj)] -= A[(ii+1+kk)+lda*ii] * A[ii+lda*(ii+1+jj)];
-				}
-			}
-		}
-	return;
-	}
-
-
-
-// dgetrf without pivoting
-void GETRF_NOPIVOT_LIBSTR(int m, int n, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	int ii, jj, kk;
-//	int i1 = 1;
-//	REAL d1 = 1.0;
-	REAL
-		d_00_inv, d_11_inv,
-		d_00, d_01,
-		d_10, d_11;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	REAL *pC = sC->pA + ci + cj*ldc;
-	REAL *pD = sD->pA + di + dj*ldd;
-	REAL *dD = sD->dA;
-	if(di==0 & dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-#if 1
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		// upper
-		ii = 0;
-		for(; ii<jj-1; ii+=2)
-			{
-			// correct upper
-			d_00 = pC[(ii+0)+ldc*(jj+0)];
-			d_10 = pC[(ii+1)+ldc*(jj+0)];
-			d_01 = pC[(ii+0)+ldc*(jj+1)];
-			d_11 = pC[(ii+1)+ldc*(jj+1)];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_10 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				d_11 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				}
-			// solve upper
-			d_10 -= pD[(ii+1)+ldd*kk] * d_00;
-			d_11 -= pD[(ii+1)+ldd*kk] * d_01;
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+1)+ldd*(jj+0)] = d_10;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			pD[(ii+1)+ldd*(jj+1)] = d_11;
-			}
-		for(; ii<jj; ii++)
-			{
-			// correct upper
-			d_00 = pC[(ii+0)+ldc*(jj+0)];
-			d_01 = pC[(ii+0)+ldc*(jj+1)];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				}
-			// solve upper
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			}
-		// diagonal
-		ii = jj;
-		if(ii<m-1)
-			{
-			// correct diagonal
-			d_00 = pC[(ii+0)+ldc*(jj+0)];
-			d_10 = pC[(ii+1)+ldc*(jj+0)];
-			d_01 = pC[(ii+0)+ldc*(jj+1)];
-			d_11 = pC[(ii+1)+ldc*(jj+1)];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_10 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				d_11 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				}
-			// factorize diagonal
-			d_00_inv = 1.0/d_00;
-			d_10 *= d_00_inv;
-			d_11 -= d_10 * d_01;
-			d_11_inv = 1.0/d_11;
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+1)+ldd*(jj+0)] = d_10;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			pD[(ii+1)+ldd*(jj+1)] = d_11;
-			dD[ii+0] = d_00_inv;
-			dD[ii+1] = d_11_inv;
-			ii += 2;
-			}
-		else if(ii<m)
-			{
-			// correct diagonal
-			d_00 = pC[(ii+0)+ldc*(jj+0)];
-			d_01 = pC[(ii+0)+ldc*(jj+1)];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				}
-			// factorize diagonal
-			d_00_inv = 1.0/d_00;
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			dD[ii+0] = d_00_inv;
-			ii += 1;
-			}
-		// lower
-		for(; ii<m-1; ii+=2)
-			{
-			// correct lower
-			d_00 = pC[(ii+0)+ldc*(jj+0)];
-			d_10 = pC[(ii+1)+ldc*(jj+0)];
-			d_01 = pC[(ii+0)+ldc*(jj+1)];
-			d_11 = pC[(ii+1)+ldc*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_10 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				d_11 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				}
-			// solve lower
-			d_00 *= d_00_inv;
-			d_10 *= d_00_inv;
-			d_01 -= d_00 * pD[kk+ldd*(jj+1)];
-			d_11 -= d_10 * pD[kk+ldd*(jj+1)];
-			d_01 *= d_11_inv;
-			d_11 *= d_11_inv;
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+1)+ldd*(jj+0)] = d_10;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			pD[(ii+1)+ldd*(jj+1)] = d_11;
-			}
-		for(; ii<m; ii++)
-			{
-			// correct lower
-			d_00 = pC[(ii+0)+ldc*(jj+0)];
-			d_01 = pC[(ii+0)+ldc*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				}
-			// solve lower
-			d_00 *= d_00_inv;
-			d_01 -= d_00 * pD[kk+ldd*(jj+1)];
-			d_01 *= d_11_inv;
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		// upper
-		ii = 0;
-		for(; ii<jj-1; ii+=2)
-			{
-			// correct upper
-			d_00 = pC[(ii+0)+ldc*jj];
-			d_10 = pC[(ii+1)+ldc*jj];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*jj];
-				d_10 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*jj];
-				}
-			// solve upper
-			d_10 -= pD[(ii+1)+ldd*kk] * d_00;
-			pD[(ii+0)+ldd*jj] = d_00;
-			pD[(ii+1)+ldd*jj] = d_10;
-			}
-		for(; ii<jj; ii++)
-			{
-			// correct upper
-			d_00 = pC[(ii+0)+ldc*jj];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*jj];
-				}
-			// solve upper
-			pD[(ii+0)+ldd*jj] = d_00;
-			}
-		// diagonal
-		ii = jj;
-		if(ii<m-1)
-			{
-			// correct diagonal
-			d_00 = pC[(ii+0)+ldc*jj];
-			d_10 = pC[(ii+1)+ldc*jj];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*jj];
-				d_10 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*jj];
-				}
-			// factorize diagonal
-			d_00_inv = 1.0/d_00;
-			d_10 *= d_00_inv;
-			pD[(ii+0)+ldd*jj] = d_00;
-			pD[(ii+1)+ldd*jj] = d_10;
-			dD[ii+0] = d_00_inv;
-			ii += 2;
-			}
-		else if(ii<m)
-			{
-			// correct diagonal
-			d_00 = pC[(ii+0)+ldc*jj];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*jj];
-				}
-			// factorize diagonal
-			d_00_inv = 1.0/d_00;
-			pD[(ii+0)+ldd*jj] = d_00;
-			dD[ii+0] = d_00_inv;
-			ii += 1;
-			}
-		// lower
-		for(; ii<m-1; ii+=2)
-			{
-			// correct lower
-			d_00 = pC[(ii+0)+ldc*jj];
-			d_10 = pC[(ii+1)+ldc*jj];
-			for(kk=0; kk<jj; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*jj];
-				d_10 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*jj];
-				}
-			// solve lower
-			d_00 *= d_00_inv;
-			d_10 *= d_00_inv;
-			pD[(ii+0)+ldd*jj] = d_00;
-			pD[(ii+1)+ldd*jj] = d_10;
-			}
-		for(; ii<m; ii++)
-			{
-			// correct lower
-			d_00 = pC[(ii+0)+ldc*jj];
-			for(kk=0; kk<jj; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*jj];
-				}
-			// solve lower
-			d_00 *= d_00_inv;
-			pD[(ii+0)+ldd*jj] = d_00;
-			}
-		}
-#else
-	if(pC!=pD)
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			for(ii=0; ii<m; ii++)
-				{
-				pD[ii+ldd*jj] = pC[ii+ldc*jj];
-				}
-			}
-		}
-	GETF2_NOPIVOT(m, n, pD, ldd, dD);
-#endif
-	return;
-	}
-
-
-
-// dgetrf pivoting
-void GETRF_LIBSTR(int m, int n, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj, int *ipiv)
-	{
-	int ii, i0, jj, kk, ip, itmp0, itmp1;
-	REAL dtmp, dmax;
-	REAL
-		d_00_inv, d_11_inv,
-		d_00, d_01,
-		d_10, d_11;
-	int i1 = 1;
-	REAL d1 = 1.0;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	REAL *pC = sC->pA+ci+cj*ldc;
-	REAL *pD = sD->pA+di+dj*ldd;
-	REAL *dD = sD->dA;
-	if(di==0 & dj==0)
-		sD->use_dA = 1;
-	else
-		sD->use_dA = 0;
-	// copy if needed
-	if(pC!=pD)
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			for(ii=0; ii<m; ii++)
-				{
-				pD[ii+ldd*jj] = pC[ii+ldc*jj];
-				}
-			}
-		}
-	// factorize
-#if 1
-	jj = 0;
-	for(; jj<n-1; jj+=2)
-		{
-		ii = 0;
-		for(; ii<jj-1; ii+=2)
-			{
-			// correct upper
-			d_00 = pD[(ii+0)+ldd*(jj+0)];
-			d_10 = pD[(ii+1)+ldd*(jj+0)];
-			d_01 = pD[(ii+0)+ldd*(jj+1)];
-			d_11 = pD[(ii+1)+ldd*(jj+1)];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_10 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				d_11 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				}
-			// solve upper
-			d_10 -= pD[(ii+1)+ldd*kk] * d_00;
-			d_11 -= pD[(ii+1)+ldd*kk] * d_01;
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+1)+ldd*(jj+0)] = d_10;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			pD[(ii+1)+ldd*(jj+1)] = d_11;
-			}
-		for(; ii<jj; ii++)
-			{
-			// correct upper
-			d_00 = pD[(ii+0)+ldd*(jj+0)];
-			d_01 = pD[(ii+0)+ldd*(jj+1)];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				}
-			// solve upper
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			}
-		// correct diagonal and lower and look for pivot
-		// correct
-		ii = jj;
-		i0 = ii;
-		for(; ii<m-1; ii+=2)
-			{
-			d_00 = pD[(ii+0)+ldd*(jj+0)];
-			d_10 = pD[(ii+1)+ldd*(jj+0)];
-			d_01 = pD[(ii+0)+ldd*(jj+1)];
-			d_11 = pD[(ii+1)+ldd*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_10 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				d_11 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+1)+ldd*(jj+0)] = d_10;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			pD[(ii+1)+ldd*(jj+1)] = d_11;
-			}
-		for(; ii<m; ii++)
-			{
-			d_00 = pD[(ii+0)+ldd*(jj+0)];
-			d_01 = pD[(ii+0)+ldd*(jj+1)];
-			for(kk=0; kk<jj; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+0)];
-				d_01 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*(jj+1)];
-				}
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			}
-		// look for pivot & solve
-		// left column
-		ii = i0;
-		dmax = 0;
-		ip = ii;
-		for(; ii<m-1; ii+=2)
-			{
-			d_00 = pD[(ii+0)+ldd*jj];
-			d_10 = pD[(ii+1)+ldd*jj];
-			dtmp = d_00>0.0 ? d_00 : -d_00;
-			if(dtmp>dmax)
-				{
-				dmax = dtmp;
-				ip = ii+0;
-				}
-			dtmp = d_10>0.0 ? d_10 : -d_10;
-			if(dtmp>dmax)
-				{
-				dmax = dtmp;
-				ip = ii+1;
-				}
-			}
-		for(; ii<m; ii++)
-			{
-			d_00 = pD[(ii+0)+ldd*jj];
-			dtmp = d_00>0.0 ? d_00 : -d_00;
-			if(dtmp>dmax)
-				{
-				dmax = dtmp;
-				ip = ii+0;
-				}
-			}
-		// row swap
-		ii = i0;
-		ipiv[ii] = ip;
-		if(ip!=ii)
-			{
-			for(kk=0; kk<n; kk++)
-				{
-				dtmp = pD[ii+ldd*kk];
-				pD[ii+ldd*kk] = pD[ip+ldd*kk];
-				pD[ip+ldd*kk] = dtmp;
-				}
-			}
-		// factorize diagonal
-		d_00 = pD[(ii+0)+ldd*(jj+0)];
-		d_00_inv = 1.0/d_00;
-		pD[(ii+0)+ldd*(jj+0)] = d_00;
-		dD[ii] = d_00_inv;
-		ii += 1;
-		// solve & compute next pivot
-		dmax = 0;
-		ip = ii;
-		for(; ii<m-1; ii+=2)
-			{
-			d_00 = pD[(ii+0)+ldd*(jj+0)];
-			d_10 = pD[(ii+1)+ldd*(jj+0)];
-			d_00 *= d_00_inv;
-			d_10 *= d_00_inv;
-			d_01 = pD[(ii+0)+ldd*(jj+1)];
-			d_11 = pD[(ii+1)+ldd*(jj+1)];
-			d_01 -= d_00 * pD[jj+ldd*(jj+1)];
-			d_11 -= d_10 * pD[jj+ldd*(jj+1)];
-			dtmp = d_01>0.0 ? d_01 : -d_01;
-			if(dtmp>dmax)
-				{
-				dmax = dtmp;
-				ip = ii+0;
-				}
-			dtmp = d_11>0.0 ? d_11 : -d_11;
-			if(dtmp>dmax)
-				{
-				dmax = dtmp;
-				ip = ii+1;
-				}
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+1)+ldd*(jj+0)] = d_10;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			pD[(ii+1)+ldd*(jj+1)] = d_11;
-			}
-		for(; ii<m; ii++)
-			{
-			d_00 = pD[(ii+0)+ldd*(jj+0)];
-			d_00 *= d_00_inv;
-			d_01 = pD[(ii+0)+ldd*(jj+1)];
-			d_01 -= d_00 * pD[jj+ldd*(jj+1)];
-			dtmp = d_01>0.0 ? d_01 : -d_01;
-			if(dtmp>dmax)
-				{
-				dmax = dtmp;
-				ip = ii+0;
-				}
-			pD[(ii+0)+ldd*(jj+0)] = d_00;
-			pD[(ii+0)+ldd*(jj+1)] = d_01;
-			}
-		// row swap
-		ii = i0+1;
-		ipiv[ii] = ip;
-		if(ip!=ii)
-			{
-			for(kk=0; kk<n; kk++)
-				{
-				dtmp = pD[ii+ldd*kk];
-				pD[ii+ldd*kk] = pD[ip+ldd*kk];
-				pD[ip+ldd*kk] = dtmp;
-				}
-			}
-		// factorize diagonal
-		d_00 = pD[ii+ldd*(jj+1)];
-		d_00_inv = 1.0/d_00;
-		pD[ii+ldd*(jj+1)] = d_00;
-		dD[ii] = d_00_inv;
-		ii += 1;
-		// solve lower
-		for(; ii<m; ii++)
-			{
-			d_00 = pD[ii+ldd*(jj+1)];
-			d_00 *= d_00_inv;
-			pD[ii+ldd*(jj+1)] = d_00;
-			}
-		}
-	for(; jj<n; jj++)
-		{
-		ii = 0;
-		for(; ii<jj-1; ii+=2)
-			{
-			// correct upper
-			d_00 = pD[(ii+0)+ldd*jj];
-			d_10 = pD[(ii+1)+ldd*jj];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*jj];
-				d_10 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*jj];
-				}
-			// solve upper
-			d_10 -= pD[(ii+1)+ldd*kk] * d_00;
-			pD[(ii+0)+ldd*jj] = d_00;
-			pD[(ii+1)+ldd*jj] = d_10;
-			}
-		for(; ii<jj; ii++)
-			{
-			// correct upper
-			d_00 = pD[ii+ldd*jj];
-			for(kk=0; kk<ii; kk++)
-				{
-				d_00 -= pD[ii+ldd*kk] * pD[kk+ldd*jj];
-				}
-			// solve upper
-			pD[ii+ldd*jj] = d_00;
-			}
-		i0 = ii;
-		ii = jj;
-		// correct diagonal and lower and look for pivot
-		dmax = 0;
-		ip = ii;
-		for(; ii<m-1; ii+=2)
-			{
-			d_00 = pD[(ii+0)+ldd*jj];
-			d_10 = pD[(ii+1)+ldd*jj];
-			for(kk=0; kk<jj; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*jj];
-				d_10 -= pD[(ii+1)+ldd*kk] * pD[kk+ldd*jj];
-				}
-			dtmp = d_00>0.0 ? d_00 : -d_00;
-			if(dtmp>dmax)
-				{
-				dmax = dtmp;
-				ip = ii+0;
-				}
-			dtmp = d_10>0.0 ? d_10 : -d_10;
-			if(dtmp>dmax)
-				{
-				dmax = dtmp;
-				ip = ii+1;
-				}
-			pD[(ii+0)+ldd*jj] = d_00;
-			pD[(ii+1)+ldd*jj] = d_10;
-			}
-		for(; ii<m; ii++)
-			{
-			d_00 = pD[(ii+0)+ldd*jj];
-			for(kk=0; kk<jj; kk++)
-				{
-				d_00 -= pD[(ii+0)+ldd*kk] * pD[kk+ldd*jj];
-				}
-			dtmp = d_00>0.0 ? d_00 : -d_00;
-			if(dtmp>dmax)
-				{
-				dmax = dtmp;
-				ip = ii+0;
-				}
-			pD[(ii+0)+ldd*jj] = d_00;
-			}
-		// row swap
-		ii = i0;
-		ipiv[ii] = ip;
-		if(ip!=ii)
-			{
-			for(kk=0; kk<n; kk++)
-				{
-				dtmp = pD[ii+ldd*kk];
-				pD[ii+ldd*kk] = pD[ip+ldd*kk];
-				pD[ip+ldd*kk] = dtmp;
-				}
-			}
-		// factorize diagonal
-		d_00 = pD[ii+ldd*jj];
-		d_00_inv = 1.0/d_00;
-		pD[ii+ldd*jj] = d_00;
-		dD[ii] = d_00_inv;
-		ii += 1;
-		for(; ii<m; ii++)
-			{
-			// correct lower
-			d_00 = pD[ii+ldd*jj];
-			// solve lower
-			d_00 *= d_00_inv;
-			pD[ii+ldd*jj] = d_00;
-			}
-		}
-#else
-	int iimax = m<n ? m : n;
-	for(ii=0; ii<iimax; ii++)
-		{
-		dmax = (pD[ii+ldd*ii]>0 ? pD[ii+ldd*ii] : -pD[ii+ldd*ii]);
-		ip = ii;
-		for(jj=1; jj<m-ii; jj++)
-			{
-			dtmp = pD[ii+jj+ldd*ii]>0 ? pD[ii+jj+ldd*ii] : -pD[ii+jj+ldd*ii];
-			if(dtmp>dmax)
-				{
-				dmax = dtmp;
-				ip = ii+jj;
-				}
-			}
-		ipiv[ii] = ip;
-		if(ip!=ii)
-			{
-			for(jj=0; jj<n; jj++)
-				{
-				dtmp = pD[ii+jj*ldd];
-				pD[ii+jj*ldd] = pD[ip+jj*ldd];
-				pD[ip+jj*ldd] = dtmp;
-				}
-			}
-		itmp0 = m-ii-1;
-		dtmp = 1.0/pD[ii+ldd*ii];
-		dD[ii] = dtmp;
-		for(jj=0; jj<itmp0; jj++)
-			{
-			pD[ii+1+jj+ldd*ii] *= dtmp;
-			}
-		itmp1 = n-ii-1;
-		for(jj=0; jj<itmp1; jj++)
-			{
-			for(kk=0; kk<itmp0; kk++)
-				{
-				pD[(ii+1+kk)+ldd*(ii+1+jj)] -= pD[(ii+1+kk)+ldd*ii] * pD[ii+ldd*(ii+1+jj)];
-				}
-			}
-		}
-#endif
-	return;	
-	}
-
-
-
-int GEQRF_WORK_SIZE_LIBSTR(int m, int n)
-	{
-	return 0;
-	}
-
-
-
-void GEQRF_LIBSTR(int m, int n, struct STRMAT *sA, int ai, int aj, struct STRMAT *sD, int di, int dj, void *work)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	int lda = sA->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA+ai+aj*lda;
-	REAL *pD = sD->pA+di+dj*ldd; // matrix of QR
-	REAL *dD = sD->dA+di; // vectors of tau
-	REAL alpha, beta, tmp, w0, w1;
-	REAL *pC00, *pC01, *pC11, *pv0, *pv1;
-	REAL pW[4] = {0.0, 0.0, 0.0, 0.0};
-	int ldw = 2;
-	REAL pT[4] = {0.0, 0.0, 0.0, 0.0};
-	int ldb = 2;
-	int imax, jmax, kmax;
-	// copy if needed
-	if(pA!=pD)
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			for(ii=0; ii<m; ii++)
-				{
-				pD[ii+ldd*jj] = pA[ii+lda*jj];
-				}
-			}
-		}
-	imax = m<n ? m : n;
-	ii = 0;
-#if 1
-	for(; ii<imax-1; ii+=2)
-		{
-		// first column
-		pC00 = &pD[ii+ldd*ii];
-		beta = 0.0;
-		for(jj=1; jj<m-ii; jj++)
-			{
-			tmp = pC00[jj+ldd*0];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			// tau0
-			dD[ii] = 0.0;
-			}
-		else
-			{
-			alpha = pC00[0+ldd*0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			// tau0
-			dD[ii] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			// compute v0
-			pC00[0+ldd*0] = beta;
-			for(jj=1; jj<m-ii; jj++)
-				{
-				pC00[jj+ldd*0] *= tmp;
-				}
-			}
-		// gemv_t & ger
-		pC01 = &pC00[0+ldd*1];
-		pv0 = &pC00[0+ldd*0];
-		kmax = m-ii;
-		w0 = pC01[0+ldd*0]; // pv0[0] = 1.0
-		for(kk=1; kk<kmax; kk++)
-			{
-			w0 += pC01[kk+ldd*0] * pv0[kk];
-			}
-		w0 = - dD[ii] * w0;
-		pC01[0+ldd*0] += w0; // pv0[0] = 1.0
-		for(kk=1; kk<kmax; kk++)
-			{
-			pC01[kk+ldd*0] += w0 * pv0[kk];
-			}
-		// second column
-		pC11 = &pD[(ii+1)+ldd*(ii+1)];
-		beta = 0.0;
-		for(jj=1; jj<m-(ii+1); jj++)
-			{
-			tmp = pC11[jj+ldd*0];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			// tau1
-			dD[(ii+1)] = 0.0;
-			}
-		else
-			{
-			alpha = pC11[0+ldd*0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			// tau1
-			dD[(ii+1)] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			// compute v1
-			pC11[0+ldd*0] = beta;
-			for(jj=1; jj<m-(ii+1); jj++)
-				pC11[jj+ldd*0] *= tmp;
-			}
-		// compute lower triangular T containing tau for matrix update
-		pv0 = &pC00[0+ldd*0];
-		pv1 = &pC00[0+ldd*1];
-		kmax = m-ii;
-		tmp = pv0[1];
-		for(kk=2; kk<kmax; kk++)
-			tmp += pv0[kk]*pv1[kk];
-		pT[0+ldb*0] = dD[ii+0];
-		pT[1+ldb*0] = - dD[ii+1] * tmp * dD[ii+0];
-		pT[1+ldb*1] = dD[ii+1];
-		jmax = n-ii-2;
-		jj = 0;
-		for(; jj<jmax-1; jj+=2)
-			{
-			// compute W^T = C^T * V
-			pW[0+ldw*0] = pC00[0+ldd*(jj+0+2)] + pC00[1+ldd*(jj+0+2)] * pv0[1];
-			pW[1+ldw*0] = pC00[0+ldd*(jj+1+2)] + pC00[1+ldd*(jj+1+2)] * pv0[1];
-			pW[0+ldw*1] =                        pC00[1+ldd*(jj+0+2)];
-			pW[1+ldw*1] =                        pC00[1+ldd*(jj+1+2)];
-			kk = 2;
-			for(; kk<kmax; kk++)
-				{
-				tmp = pC00[kk+ldd*(jj+0+2)];
-				pW[0+ldw*0] += tmp * pv0[kk];
-				pW[0+ldw*1] += tmp * pv1[kk];
-				tmp = pC00[kk+ldd*(jj+1+2)];
-				pW[1+ldw*0] += tmp * pv0[kk];
-				pW[1+ldw*1] += tmp * pv1[kk];
-				}
-			// compute W^T *= T
-			pW[0+ldw*1] = pT[1+ldb*0]*pW[0+ldw*0] + pT[1+ldb*1]*pW[0+ldw*1];
-			pW[1+ldw*1] = pT[1+ldb*0]*pW[1+ldw*0] + pT[1+ldb*1]*pW[1+ldw*1];
-			pW[0+ldw*0] = pT[0+ldb*0]*pW[0+ldw*0];
-			pW[1+ldw*0] = pT[0+ldb*0]*pW[1+ldw*0];
-			// compute C -= V * W^T
-			pC00[0+ldd*(jj+0+2)] -= pW[0+ldw*0];
-			pC00[0+ldd*(jj+1+2)] -= pW[1+ldw*0];
-			pC00[1+ldd*(jj+0+2)] -= pv0[1]*pW[0+ldw*0] + pW[0+ldw*1];
-			pC00[1+ldd*(jj+1+2)] -= pv0[1]*pW[1+ldw*0] + pW[1+ldw*1];
-			kk = 2;
-			for(; kk<kmax-1; kk+=2)
-				{
-				pC00[kk+0+ldd*(jj+0+2)] -= pv0[kk+0]*pW[0+ldw*0] + pv1[kk+0]*pW[0+ldw*1];
-				pC00[kk+1+ldd*(jj+0+2)] -= pv0[kk+1]*pW[0+ldw*0] + pv1[kk+1]*pW[0+ldw*1];
-				pC00[kk+0+ldd*(jj+1+2)] -= pv0[kk+0]*pW[1+ldw*0] + pv1[kk+0]*pW[1+ldw*1];
-				pC00[kk+1+ldd*(jj+1+2)] -= pv0[kk+1]*pW[1+ldw*0] + pv1[kk+1]*pW[1+ldw*1];
-				}
-			for(; kk<kmax; kk++)
-				{
-				pC00[kk+ldd*(jj+0+2)] -= pv0[kk]*pW[0+ldw*0] + pv1[kk]*pW[0+ldw*1];
-				pC00[kk+ldd*(jj+1+2)] -= pv0[kk]*pW[1+ldw*0] + pv1[kk]*pW[1+ldw*1];
-				}
-			}
-		for(; jj<jmax; jj++)
-			{
-			// compute W = T * V^T * C
-			pW[0+ldw*0] = pC00[0+ldd*(jj+0+2)] + pC00[1+ldd*(jj+0+2)] * pv0[1];
-			pW[0+ldw*1] =                        pC00[1+ldd*(jj+0+2)];
-			for(kk=2; kk<kmax; kk++)
-				{
-				tmp = pC00[kk+ldd*(jj+0+2)];
-				pW[0+ldw*0] += tmp * pv0[kk];
-				pW[0+ldw*1] += tmp * pv1[kk];
-				}
-			pW[0+ldw*1] = pT[1+ldb*0]*pW[0+ldw*0] + pT[1+ldb*1]*pW[0+ldw*1];
-			pW[0+ldw*0] = pT[0+ldb*0]*pW[0+ldw*0];
-			// compute C -= V * W^T
-			pC00[0+ldd*(jj+0+2)] -= pW[0+ldw*0];
-			pC00[1+ldd*(jj+0+2)] -= pv0[1]*pW[0+ldw*0] + pW[0+ldw*1];
-			for(kk=2; kk<kmax; kk++)
-				{
-				pC00[kk+ldd*(jj+0+2)] -= pv0[kk]*pW[0+ldw*0] + pv1[kk]*pW[0+ldw*1];
-				}
-			}
-		}
-#endif
-	for(; ii<imax; ii++)
-		{
-		pC00 = &pD[ii+ldd*ii];
-		beta = 0.0;
-		for(jj=1; jj<m-ii; jj++)
-			{
-			tmp = pC00[jj+ldd*0];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			dD[ii] = 0.0;
-			}
-		else
-			{
-			alpha = pC00[0+ldd*0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			dD[ii] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			for(jj=1; jj<m-ii; jj++)
-				pC00[jj+ldd*0] *= tmp;
-			pC00[0+ldd*0] = beta;
-			}
-		if(ii<n)
-			{
-			// gemv_t & ger
-			pC01 = &pC00[0+ldd*1];
-			pv0 = &pC00[0+ldd*0];
-			jmax = n-ii-1;
-			kmax = m-ii;
-			jj = 0;
-			for(; jj<jmax-1; jj+=2)
-				{
-				w0 = pC01[0+ldd*(jj+0)]; // pv0[0] = 1.0
-				w1 = pC01[0+ldd*(jj+1)]; // pv0[0] = 1.0
-				for(kk=1; kk<kmax; kk++)
-					{
-					w0 += pC01[kk+ldd*(jj+0)] * pv0[kk];
-					w1 += pC01[kk+ldd*(jj+1)] * pv0[kk];
-					}
-				w0 = - dD[ii] * w0;
-				w1 = - dD[ii] * w1;
-				pC01[0+ldd*(jj+0)] += w0; // pv0[0] = 1.0
-				pC01[0+ldd*(jj+1)] += w1; // pv0[0] = 1.0
-				for(kk=1; kk<kmax; kk++)
-					{
-					pC01[kk+ldd*(jj+0)] += w0 * pv0[kk];
-					pC01[kk+ldd*(jj+1)] += w1 * pv0[kk];
-					}
-				}
-			for(; jj<jmax; jj++)
-				{
-				w0 = pC01[0+ldd*jj]; // pv0[0] = 1.0
-				for(kk=1; kk<kmax; kk++)
-					{
-					w0 += pC01[kk+ldd*jj] * pv0[kk];
-					}
-				w0 = - dD[ii] * w0;
-				pC01[0+ldd*jj] += w0; // pv0[0] = 1.0
-				for(kk=1; kk<kmax; kk++)
-					{
-					pC01[kk+ldd*jj] += w0 * pv0[kk];
-					}
-				}
-			}
-		}
-	return;
-	}
-
-
-
-int GELQF_WORK_SIZE_LIBSTR(int m, int n)
-	{
-	return 0;
-	}
-
-
-
-void GELQF_LIBSTR(int m, int n, struct STRMAT *sA, int ai, int aj, struct STRMAT *sD, int di, int dj, void *work)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk;
-	int lda = sA->m;
-	int ldd = sD->m;
-	REAL *pA = sA->pA+ai+aj*lda;
-	REAL *pD = sD->pA+di+dj*ldd; // matrix of QR
-	REAL *dD = sD->dA+di; // vectors of tau
-	REAL alpha, beta, tmp, w0, w1;
-	REAL *pC00, *pC10, *pC11, *pv0, *pv1;
-	REAL pW[4] = {0.0, 0.0, 0.0, 0.0};
-	int ldw = 2;
-	REAL pT[4] = {0.0, 0.0, 0.0, 0.0};
-	int ldb = 2;
-	int imax, jmax, kmax;
-	// copy if needed
-	if(pA!=pD)
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			for(ii=0; ii<m; ii++)
-				{
-				pD[ii+ldd*jj] = pA[ii+lda*jj];
-				}
-			}
-		}
-	imax = m<n ? m : n;
-	ii = 0;
-#if 1
-	for(; ii<imax-1; ii+=2)
-		{
-		// first column
-		pC00 = &pD[ii+ldd*ii];
-		beta = 0.0;
-		for(jj=1; jj<n-ii; jj++)
-			{
-			tmp = pC00[0+ldd*jj];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			// tau0
-			dD[ii] = 0.0;
-			}
-		else
-			{
-			alpha = pC00[0+ldd*0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			// tau0
-			dD[ii] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			// compute v0
-			pC00[0+ldd*0] = beta;
-			for(jj=1; jj<n-ii; jj++)
-				{
-				pC00[0+ldd*jj] *= tmp;
-				}
-			}
-		// gemv_t & ger
-		pC10 = &pC00[1+ldd*0];
-		pv0 = &pC00[0+ldd*0];
-		kmax = n-ii;
-		w0 = pC10[0+ldd*0]; // pv0[0] = 1.0
-		for(kk=1; kk<kmax; kk++)
-			{
-			w0 += pC10[0+ldd*kk] * pv0[0+ldd*kk];
-			}
-		w0 = - dD[ii] * w0;
-		pC10[0+ldd*0] += w0; // pv0[0] = 1.0
-		for(kk=1; kk<kmax; kk++)
-			{
-			pC10[0+ldd*kk] += w0 * pv0[0+ldd*kk];
-			}
-		// second row
-		pC11 = &pD[(ii+1)+ldd*(ii+1)];
-		beta = 0.0;
-		for(jj=1; jj<n-(ii+1); jj++)
-			{
-			tmp = pC11[0+ldd*jj];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			// tau1
-			dD[(ii+1)] = 0.0;
-			}
-		else
-			{
-			alpha = pC11[0+ldd*0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			// tau1
-			dD[(ii+1)] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			// compute v1
-			pC11[0+ldd*0] = beta;
-			for(jj=1; jj<n-(ii+1); jj++)
-				pC11[0+ldd*jj] *= tmp;
-			}
-		// compute lower triangular T containing tau for matrix update
-		pv0 = &pC00[0+ldd*0];
-		pv1 = &pC00[1+ldd*0];
-		kmax = n-ii;
-		tmp = pv0[0+ldd*1];
-		for(kk=2; kk<kmax; kk++)
-			tmp += pv0[0+ldd*kk]*pv1[0+ldd*kk];
-		pT[0+ldb*0] = dD[ii+0];
-		pT[1+ldb*0] = - dD[ii+1] * tmp * dD[ii+0];
-		pT[1+ldb*1] = dD[ii+1];
-		// downgrade
-		jmax = m-ii-2;
-		jj = 0;
-		for(; jj<jmax-1; jj+=2)
-			{
-			// compute W^T = C^T * V
-			pW[0+ldw*0] = pC00[jj+0+2+ldd*0] + pC00[jj+0+2+ldd*1] * pv0[0+ldd*1];
-			pW[1+ldw*0] = pC00[jj+1+2+ldd*0] + pC00[jj+1+2+ldd*1] * pv0[0+ldd*1];
-			pW[0+ldw*1] =                      pC00[jj+0+2+ldd*1];
-			pW[1+ldw*1] =                      pC00[jj+1+2+ldd*1];
-			kk = 2;
-			for(; kk<kmax; kk++)
-				{
-				tmp = pC00[jj+0+2+ldd*kk];
-				pW[0+ldw*0] += tmp * pv0[0+ldd*kk];
-				pW[0+ldw*1] += tmp * pv1[0+ldd*kk];
-				tmp = pC00[jj+1+2+ldd*kk];
-				pW[1+ldw*0] += tmp * pv0[0+ldd*kk];
-				pW[1+ldw*1] += tmp * pv1[0+ldd*kk];
-				}
-			// compute W^T *= T
-			pW[0+ldw*1] = pT[1+ldb*0]*pW[0+ldw*0] + pT[1+ldb*1]*pW[0+ldw*1];
-			pW[1+ldw*1] = pT[1+ldb*0]*pW[1+ldw*0] + pT[1+ldb*1]*pW[1+ldw*1];
-			pW[0+ldw*0] = pT[0+ldb*0]*pW[0+ldw*0];
-			pW[1+ldw*0] = pT[0+ldb*0]*pW[1+ldw*0];
-			// compute C -= V * W^T
-			pC00[jj+0+2+ldd*0] -= pW[0+ldw*0];
-			pC00[jj+1+2+ldd*0] -= pW[1+ldw*0];
-			pC00[jj+0+2+ldd*1] -= pv0[0+ldd*1]*pW[0+ldw*0] + pW[0+ldw*1];
-			pC00[jj+1+2+ldd*1] -= pv0[0+ldd*1]*pW[1+ldw*0] + pW[1+ldw*1];
-			kk = 2;
-			for(; kk<kmax-1; kk+=2)
-				{
-				pC00[jj+0+2+ldd*(kk+0)] -= pv0[0+ldd*(kk+0)]*pW[0+ldw*0] + pv1[0+ldd*(kk+0)]*pW[0+ldw*1];
-				pC00[jj+0+2+ldd*(kk+1)] -= pv0[0+ldd*(kk+1)]*pW[0+ldw*0] + pv1[0+ldd*(kk+1)]*pW[0+ldw*1];
-				pC00[jj+1+2+ldd*(kk+0)] -= pv0[0+ldd*(kk+0)]*pW[1+ldw*0] + pv1[0+ldd*(kk+0)]*pW[1+ldw*1];
-				pC00[jj+1+2+ldd*(kk+1)] -= pv0[0+ldd*(kk+1)]*pW[1+ldw*0] + pv1[0+ldd*(kk+1)]*pW[1+ldw*1];
-				}
-			for(; kk<kmax; kk++)
-				{
-				pC00[jj+0+2+ldd*kk] -= pv0[0+ldd*kk]*pW[0+ldw*0] + pv1[0+ldd*kk]*pW[0+ldw*1];
-				pC00[jj+1+2+ldd*kk] -= pv0[0+ldd*kk]*pW[1+ldw*0] + pv1[0+ldd*kk]*pW[1+ldw*1];
-				}
-			}
-		for(; jj<jmax; jj++)
-			{
-			// compute W = T * V^T * C
-			pW[0+ldw*0] = pC00[jj+0+2+ldd*0] + pC00[jj+0+2+ldd*1] * pv0[0+ldd*1];
-			pW[0+ldw*1] =                      pC00[jj+0+2+ldd*1];
-			for(kk=2; kk<kmax; kk++)
-				{
-				tmp = pC00[jj+0+2+ldd*kk];
-				pW[0+ldw*0] += tmp * pv0[0+ldd*kk];
-				pW[0+ldw*1] += tmp * pv1[0+ldd*kk];
-				}
-			pW[0+ldw*1] = pT[1+ldb*0]*pW[0+ldw*0] + pT[1+ldb*1]*pW[0+ldw*1];
-			pW[0+ldw*0] = pT[0+ldb*0]*pW[0+ldw*0];
-			// compute C -= V * W^T
-			pC00[jj+0+2+ldd*0] -= pW[0+ldw*0];
-			pC00[jj+0+2+ldd*1] -= pv0[0+ldd*1]*pW[0+ldw*0] + pW[0+ldw*1];
-			for(kk=2; kk<kmax; kk++)
-				{
-				pC00[jj+0+2+ldd*kk] -= pv0[0+ldd*kk]*pW[0+ldw*0] + pv1[0+ldd*kk]*pW[0+ldw*1];
-				}
-			}
-		}
-#endif
-	for(; ii<imax; ii++)
-		{
-		pC00 = &pD[ii+ldd*ii];
-		beta = 0.0;
-		for(jj=1; jj<n-ii; jj++)
-			{
-			tmp = pC00[0+ldd*jj];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			dD[ii] = 0.0;
-			}
-		else
-			{
-			alpha = pC00[0+ldd*0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			dD[ii] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			for(jj=1; jj<n-ii; jj++)
-				pC00[0+ldd*jj] *= tmp;
-			pC00[0+ldd*0] = beta;
-			}
-		if(ii<n)
-			{
-			// gemv_t & ger
-			pC10 = &pC00[1+ldd*0];
-			pv0 = &pC00[0+ldd*0];
-			jmax = m-ii-1;
-			kmax = n-ii;
-			jj = 0;
-			for(; jj<jmax-1; jj+=2)
-				{
-				w0 = pC10[jj+0+ldd*0]; // pv0[0] = 1.0
-				w1 = pC10[jj+1+ldd*0]; // pv0[0] = 1.0
-				for(kk=1; kk<kmax; kk++)
-					{
-					w0 += pC10[jj+0+ldd*kk] * pv0[0+ldd*kk];
-					w1 += pC10[jj+1+ldd*kk] * pv0[0+ldd*kk];
-					}
-				w0 = - dD[ii] * w0;
-				w1 = - dD[ii] * w1;
-				pC10[jj+0+ldd*0] += w0; // pv0[0] = 1.0
-				pC10[jj+1+ldd*0] += w1; // pv0[0] = 1.0
-				for(kk=1; kk<kmax; kk++)
-					{
-					pC10[jj+0+ldd*kk] += w0 * pv0[0+ldd*kk];
-					pC10[jj+1+ldd*kk] += w1 * pv0[0+ldd*kk];
-					}
-				}
-			for(; jj<jmax; jj++)
-				{
-				w0 = pC10[jj+ldd*0]; // pv0[0] = 1.0
-				for(kk=1; kk<kmax; kk++)
-					{
-					w0 += pC10[jj+ldd*kk] * pv0[0+ldd*kk];
-					}
-				w0 = - dD[ii] * w0;
-				pC10[jj+ldd*0] += w0; // pv0[0] = 1.0
-				for(kk=1; kk<kmax; kk++)
-					{
-					pC10[jj+ldd*kk] += w0 * pv0[0+ldd*kk];
-					}
-				}
-			}
-		}
-	return;
-	}
-
-
-
-#elif defined(LA_BLAS)
-
-
-
-// dpotrf
-void POTRF_L_LIBSTR(int m, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0)
-		return;
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	REAL d1 = 1.0;
-	REAL *pC = sC->pA+ci+cj*sC->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long info;
-	long long tmp;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<m; jj++)
-			{
-			tmp = m-jj;
-			COPY(&tmp, pC+jj*ldc+jj, &i1, pD+jj*ldd+jj, &i1);
-			}
-		}
-	POTRF(&cl, &mm, pD, &ldd, &info);
-#else
-	int i1 = 1;
-	int info;
-	int tmp;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<m; jj++)
-			{
-			tmp = m-jj;
-			COPY(&tmp, pC+jj*ldc+jj, &i1, pD+jj*ldd+jj, &i1);
-			}
-		}
-	POTRF(&cl, &m, pD, &ldd, &info);
-#endif
-	return;
-	}
-
-
-
-// dpotrf
-void POTRF_L_MN_LIBSTR(int m, int n, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	REAL d1 = 1.0;
-	REAL *pC = sC->pA+ci+cj*sC->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long mmn = mm-nn;
-	long long info;
-	long long tmp;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			tmp = m-jj;
-			COPY(&tmp, pC+jj*ldc+jj, &i1, pD+jj*ldd+jj, &i1);
-			}
-		}
-	POTRF(&cl, &nn, pD, &ldd, &info);
-	TRSM(&cr, &cl, &ct, &cn, &mmn, &nn, &d1, pD, &ldd, pD+n, &ldd);
-#else
-	int i1 = 1;
-	int mmn = m-n;
-	int info;
-	int tmp;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			{
-			tmp = m-jj;
-			COPY(&tmp, pC+jj*ldc+jj, &i1, pD+jj*ldd+jj, &i1);
-			}
-		}
-	POTRF(&cl, &n, pD, &ldd, &info);
-	TRSM(&cr, &cl, &ct, &cn, &mmn, &n, &d1, pD, &ldd, pD+n, &ldd);
-#endif
-	return;
-	}
-
-
-
-// dsyrk dpotrf
-void SYRK_POTRF_LN_LIBSTR(int m, int n, int k, struct STRMAT *sA, int ai, int aj, struct STRMAT *sB, int bi, int bj, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int jj;
-	char cl = 'l';
-	char cn = 'n';
-	char cr = 'r';
-	char ct = 't';
-	char cu = 'u';
-	REAL d1 = 1.0;
-	REAL *pA = sA->pA + ai + aj*sA->m;
-	REAL *pB = sB->pA + bi + bj*sB->m;
-	REAL *pC = sC->pA + ci + cj*sC->m;
-	REAL *pD = sD->pA + di + dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long kk = k;
-	long long mmn = mm-nn;
-	long long info;
-	long long lda = sA->m;
-	long long ldb = sB->m;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	if(pA==pB)
-		{
-		SYRK(&cl, &cn, &nn, &kk, &d1, pA, &lda, &d1, pD, &ldd);
-		GEMM(&cn, &ct, &mmn, &nn, &kk, &d1, pA+n, &lda, pB, &ldb, &d1, pD+n, &ldd);
-		POTRF(&cl, &nn, pD, &ldd, &info);
-		TRSM(&cr, &cl, &ct, &cn, &mmn, &nn, &d1, pD, &ldd, pD+n, &ldd);
-		}
-	else
-		{
-		GEMM(&cn, &ct, &mm, &nn, &kk, &d1, pA, &lda, pB, &ldb, &d1, pD, &ldd);
-		POTRF(&cl, &nn, pD, &ldd, &info);
-		TRSM(&cr, &cl, &ct, &cn, &mmn, &nn, &d1, pD, &ldd, pD+n, &ldd);
-		}
-#else
-	int i1 = 1;
-	int mmn = m-n;
-	int info;
-	int lda = sA->m;
-	int ldb = sB->m;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	if(pA==pB)
-		{
-		SYRK(&cl, &cn, &n, &k, &d1, pA, &lda, &d1, pD, &ldd);
-		GEMM(&cn, &ct, &mmn, &n, &k, &d1, pA+n, &lda, pB, &ldb, &d1, pD+n, &ldd);
-		POTRF(&cl, &n, pD, &ldd, &info);
-		TRSM(&cr, &cl, &ct, &cn, &mmn, &n, &d1, pD, &ldd, pD+n, &ldd);
-		}
-	else
-		{
-		GEMM(&cn, &ct, &m, &n, &k, &d1, pA, &lda, pB, &ldb, &d1, pD, &ldd);
-		POTRF(&cl, &n, pD, &ldd, &info);
-		TRSM(&cr, &cl, &ct, &cn, &mmn, &n, &d1, pD, &ldd, pD+n, &ldd);
-		}
-#endif
-	return;
-	}
-
-
-
-// dgetrf without pivoting
-#if defined(REF_BLAS_BLIS)
-static void GETF2_NOPIVOT(long long m, long long n, REAL *A, long long lda)
-	{
-	if(m<=0 | n<=0)
-		return;
-	long long i, j;
-	long long jmax = m<n ? m : n;
-	REAL dtmp;
-	REAL dm1 = -1.0;
-	long long itmp0, itmp1;
-	long long i1 = 1;
-	for(j=0; j<jmax; j++)
-		{
-		itmp0 = m-j-1;
-		dtmp = 1.0/A[j+lda*j];
-		SCAL(&itmp0, &dtmp, &A[(j+1)+lda*j], &i1);
-		itmp1 = n-j-1;
-		GER(&itmp0, &itmp1, &dm1, &A[(j+1)+lda*j], &i1, &A[j+lda*(j+1)], &lda, &A[(j+1)+lda*(j+1)], &lda);
-		}
-	return;
-	}
-#else
-static void GETF2_NOPIVOT(int m, int n, REAL *A, int lda)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int i, j;
-	int jmax = m<n ? m : n;
-	REAL dtmp;
-	REAL dm1 = -1.0;
-	int itmp0, itmp1;
-	int i1 = 1;
-	for(j=0; j<jmax; j++)
-		{
-		itmp0 = m-j-1;
-		dtmp = 1.0/A[j+lda*j];
-		SCAL(&itmp0, &dtmp, &A[(j+1)+lda*j], &i1);
-		itmp1 = n-j-1;
-		GER(&itmp0, &itmp1, &dm1, &A[(j+1)+lda*j], &i1, &A[j+lda*(j+1)], &lda, &A[(j+1)+lda*(j+1)], &lda);
-		}
-	return;
-	}
-#endif
-
-
-
-// dgetrf without pivoting
-void GETRF_NOPIVOT_LIBSTR(int m, int n, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj)
-	{
-	// TODO with custom level 2 LAPACK + level 3 BLAS
-	if(m<=0 | n<=0)
-		return;
-	int jj;
-	REAL d1 = 1.0;
-	REAL *pC = sC->pA+ci+cj*sC->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long mm = m;
-	long long nn = n;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	GETF2_NOPIVOT(mm, nn, pD, ldd);
-#else
-	int i1 = 1;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	GETF2_NOPIVOT(m, n, pD, ldd);
-#endif
-	return;
-	}
-
-
-
-// dgetrf pivoting
-void GETRF_LIBSTR(int m, int n, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj, int *ipiv)
-	{
-	// TODO with custom level 2 LAPACK + level 3 BLAS
-	if(m<=0 | n<=0)
-		return;
-	int jj;
-	int tmp = m<n ? m : n;
-	REAL d1 = 1.0;
-	REAL *pC = sC->pA+ci+cj*sC->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long info;
-	long long mm = m;
-	long long nn = n;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	GETRF(&mm, &nn, pD, &ldd, (long long *) ipiv, &info);
-	for(jj=0; jj<tmp; jj++)
-		ipiv[jj] -= 1;
-#else
-	int i1 = 1;
-	int info;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-	GETRF(&m, &n, pD, &ldd, ipiv, &info);
-	for(jj=0; jj<tmp; jj++)
-		ipiv[jj] -= 1;
-#endif
-	return;
-	}
-
-
-
-int GEQRF_WORK_SIZE_LIBSTR(int m, int n)
-	{
-	REAL dwork;
-	REAL *pD, *dD;
-#if defined(REF_BLAS_BLIS)
-	long long mm = m;
-	long long nn = n;
-	long long lwork = -1;
-	long long info;
-	long long ldd = mm;
-	GEQRF(&mm, &nn, pD, &ldd, dD, &dwork, &lwork, &info);
-#else
-	int lwork = -1;
-	int info;
-	int ldd = m;
-	GEQRF(&m, &n, pD, &ldd, dD, &dwork, &lwork, &info);
-#endif
-	int size = dwork;
-	return size*sizeof(REAL);
-	}
-
-
-
-void GEQRF_LIBSTR(int m, int n, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj, void *work)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int jj;
-	REAL *pC = sC->pA+ci+cj*sC->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-	REAL *dD = sD->dA+di;
-	REAL *dwork = (REAL *) work;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long info = -1;
-	long long mm = m;
-	long long nn = n;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-//	GEQR2(&mm, &nn, pD, &ldd, dD, dwork, &info);
-	long long lwork = -1;
-	GEQRF(&mm, &nn, pD, &ldd, dD, dwork, &lwork, &info);
-	lwork = dwork[0];
-	GEQRF(&mm, &nn, pD, &ldd, dD, dwork, &lwork, &info);
-#else
-	int i1 = 1;
-	int info = -1;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-//	GEQR2(&m, &n, pD, &ldd, dD, dwork, &info);
-	int lwork = -1;
-	GEQRF(&m, &n, pD, &ldd, dD, dwork, &lwork, &info);
-	lwork = dwork[0];
-	GEQRF(&m, &n, pD, &ldd, dD, dwork, &lwork, &info);
-#endif
-	return;
-	}
-
-
-
-int GELQF_WORK_SIZE_LIBSTR(int m, int n)
-	{
-	REAL dwork;
-	REAL *pD, *dD;
-#if defined(REF_BLAS_BLIS)
-	long long mm = m;
-	long long nn = n;
-	long long lwork = -1;
-	long long info;
-	long long ldd = mm;
-	GELQF(&mm, &nn, pD, &ldd, dD, &dwork, &lwork, &info);
-#else
-	int lwork = -1;
-	int info;
-	int ldd = m;
-	GELQF(&m, &n, pD, &ldd, dD, &dwork, &lwork, &info);
-#endif
-	int size = dwork;
-	return size*sizeof(REAL);
-	}
-
-
-
-void GELQF_LIBSTR(int m, int n, struct STRMAT *sC, int ci, int cj, struct STRMAT *sD, int di, int dj, void *work)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int jj;
-	REAL *pC = sC->pA+ci+cj*sC->m;
-	REAL *pD = sD->pA+di+dj*sD->m;
-	REAL *dD = sD->dA+di;
-	REAL *dwork = (REAL *) work;
-#if defined(REF_BLAS_BLIS)
-	long long i1 = 1;
-	long long info = -1;
-	long long mm = m;
-	long long nn = n;
-	long long ldc = sC->m;
-	long long ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&mm, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-//	GEQR2(&mm, &nn, pD, &ldd, dD, dwork, &info);
-	long long lwork = -1;
-	GELQF(&mm, &nn, pD, &ldd, dD, dwork, &lwork, &info);
-	lwork = dwork[0];
-	GELQF(&mm, &nn, pD, &ldd, dD, dwork, &lwork, &info);
-#else
-	int i1 = 1;
-	int info = -1;
-	int ldc = sC->m;
-	int ldd = sD->m;
-	if(!(pC==pD))
-		{
-		for(jj=0; jj<n; jj++)
-			COPY(&m, pC+jj*ldc, &i1, pD+jj*ldd, &i1);
-		}
-//	GEQR2(&m, &n, pD, &ldd, dD, dwork, &info);
-	int lwork = -1;
-	GELQF(&m, &n, pD, &ldd, dD, dwork, &lwork, &info);
-	lwork = dwork[0];
-	GELQF(&m, &n, pD, &ldd, dD, dwork, &lwork, &info);
-#endif
-	return;
-	}
-
-
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-
-
-
diff --git a/third_party/blasfeo/blasfeo_target.h.in b/third_party/blasfeo/blasfeo_target.h.in
deleted file mode 100644
index a98ac81..0000000
--- a/third_party/blasfeo/blasfeo_target.h.in
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef TARGET_@TARGET@
-#define TARGET_@TARGET@
-#endif
-
-#ifndef LA_@LA@
-#define LA_@LA@
-#endif
-
-#ifndef EXT_DEP
-#cmakedefine EXT_DEP @EXT_DEP@
-#endif
diff --git a/third_party/blasfeo/doc/guide.pdf b/third_party/blasfeo/doc/guide.pdf
deleted file mode 100644
index 9f81df3..0000000
--- a/third_party/blasfeo/doc/guide.pdf
+++ /dev/null
Binary files differ
diff --git a/third_party/blasfeo/doc/guide.tex b/third_party/blasfeo/doc/guide.tex
deleted file mode 100644
index 626eaa4..0000000
--- a/third_party/blasfeo/doc/guide.tex
+++ /dev/null
@@ -1,149 +0,0 @@
-\documentclass[a4paper]{report}
-
-\usepackage[margin=3.0cm]{geometry}
-\usepackage{amsmath}
-\usepackage[pdftex]{graphicx}
-%\usepackage{graphics}
-\usepackage{subfig}
-
-
-
-\title{BLASFEO reference guide}
-\author{Gianluca Frison}
-
-
-
-\begin{document}
-
-\maketitle
-\tableofcontents
-
-
-
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-\chapter{Introduction}
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-BLASFEO - BLAS For Embedded Optimization.
-
-
-
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-\chapter{Matrix data type}
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-The fundamental data type in BLASFEO is a C struct defining a matrix, called {\tt strmat}.
-Depending on the chosen linear algebra library, the struct is defined differently.
-
-
-
-\section{{\tt strmat} definition}
-
-
-\subsection{BLASFEO}
-
-\begin{verbatim}
-struct d_strmat 
-	{
-	int bs;
-	int m;
-	int n;
-	int pm;
-	int cn;
-	double *pA;
-	double *dA;
-	int use_dA;
-	int memory_size;
-	};
-\end{verbatim}
-where the struct members are
-\begin{description}
-\item[bs] height of the panel
-\item[m] number of rows
-\item[n] number of columns
-\item[pm] number of rows of the matrix as allocated in memory, used for memory alignment
-\item[cn] number of rows of the matrix as allocated in memory, used for memory alignment
-\item[pA] pointer to a pm$\times$pn array of doubles, the first element is aligned to cache line size
-\item[dA] pointer to a min(m,n) array of doubles, used e.g. to store the inverse of the diagonal of the matrix
-\item[use\_dA] flag to tell if dA contains useful information
-\item[memory\_size] size of the memory (in bytes) needed for pA and pD
-\end{description}
-
-
-\subsection{BLAS}
-
-\begin{verbatim}
-struct d_strmat 
-	{
-	int m; // rows
-	int n; // cols
-	double *pA; // pointer to a m*n array of doubles
-	int memory_size; // size of needed memory
-	};
-\end{verbatim}
-\begin{description}
-\item[m] number of rows
-\item[n] number of columns
-\item[pA] pointer to a m$\times$n array of doubles
-\item[memory\_size] size of the memory (in bytes) needed for pA
-\end{description}
-
-
-
-\section{{\tt strmat} management}
-
-\begin{verbatim}
-void d_allocate_strmat(int m, int n, struct d_strmat *sA);
-\end{verbatim}
-
-\begin{verbatim}
-void d_free_strmat(struct d_strmat *sA);
-\end{verbatim}
-
-\begin{verbatim}
-int d_size_strmat(int m, int n);
-\end{verbatim}
-
-\begin{verbatim}
-void d_create_strmat(int m, int n, struct d_strmat *sA, void *memory);
-\end{verbatim}
-
-
-
-\section{{\tt strmat} conversion}
-
-\begin{verbatim}
-void d_cvt_mat2strmat(int m, int n, double *A, int lda, struct d_strmat *sA, 
-     int ai, int aj);
-\end{verbatim}
-
-\begin{verbatim}
-void d_cvt_tran_mat2strmat(int m, int n, double *A, int lda, struct d_strmat *sA, 
-     int ai, int aj);
-\end{verbatim}
-
-\begin{verbatim}
-void d_cvt_strmat2mat(int m, int n, struct d_strmat *sA, int ai, int aj, 
-     double *A, int lda);
-\end{verbatim}
-
-\begin{verbatim}
-void d_cvt_tran_strmat2mat(int m, int n, struct d_strmat *sA, int ai, int aj, 
-     double *A, int lda);
-\end{verbatim}
-
-
-
-\section{{\tt strmat} print}
-
-\begin{verbatim}
-void d_print_strmat(int m, int n, struct d_strmat *sA, int ai, int aj);
-\end{verbatim}
-
-
-
-\end{document}
diff --git a/third_party/blasfeo/examples/Makefile b/third_party/blasfeo/examples/Makefile
deleted file mode 100644
index 7204cba..0000000
--- a/third_party/blasfeo/examples/Makefile
+++ /dev/null
@@ -1,69 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../Makefile.rule
-
-ifeq ($(REF_BLAS), 0)
-LIBS = -lm 
-endif
-ifeq ($(REF_BLAS), OPENBLAS)
-LIBS = /opt/openblas/lib/libopenblas.a -pthread -lm
-endif
-ifeq ($(REF_BLAS), BLIS)
-LIBS = -lblis -lm -fopenmp
-endif
-ifeq ($(REF_BLAS), NETLIB)
-LIBS = /opt/netlib/liblapack.a /opt/netlib/libblas.a -lgfortran -lm
-endif
-ifeq ($(REF_BLAS), MKL)
-LIBS = -Wl,--start-group /opt/intel/mkl/lib/intel64/libmkl_gf_lp64.a /opt/intel/mkl/lib/intel64/libmkl_core.a /opt/intel/mkl/lib/intel64/libmkl_sequential.a -Wl,--end-group -ldl -lpthread -lm
-endif
-
-ifneq ($(NUM_THREAD), 1)
-LIBS += -pthread 
-endif
-
-#OBJS_TEST = example_d_lu_factorization.o
-#OBJS_TEST = example_s_lu_factorization.o
-OBJS_TEST = tools.o example_d_riccati_recursion.o
-#OBJS_TEST = tools.o example_s_riccati_recursion.o
-
-all: clean obj run
-
-obj: $(OBJS_TEST)
-	cp ../libblasfeo.a .
-	$(CC) -o test.out $(OBJS_TEST) -L. libblasfeo.a $(LIBS) #-pg
-
-run:
-	./test.out
-
-clean:
-	rm -f *.o
-	rm -f test.out
-	rm -f libblasfeo.a
-
diff --git a/third_party/blasfeo/examples/example_d_lu_factorization.c b/third_party/blasfeo/examples/example_d_lu_factorization.c
deleted file mode 100644
index 62b3413..0000000
--- a/third_party/blasfeo/examples/example_d_lu_factorization.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/time.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_i_aux_ext_dep.h"
-#include "../include/blasfeo_v_aux_ext_dep.h"
-#include "../include/blasfeo_d_aux_ext_dep.h"
-#include "../include/blasfeo_d_aux.h"
-#include "../include/blasfeo_d_kernel.h"
-#include "../include/blasfeo_d_blas.h"
-
-
-int main()
-	{
-
-	printf("\nExample of LU factorization and backsolve\n\n");
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-	printf("\nLA provided by BLASFEO\n\n");
-
-#elif defined(LA_REFERENCE)
-
-	printf("\nLA provided by REFERENCE\n\n");
-
-#elif defined(LA_BLAS)
-
-	printf("\nLA provided by BLAS\n\n");
-
-#else
-
-	printf("\nLA provided by ???\n\n");
-	exit(2);
-
-#endif
-
-	int ii;
-
-	int n = 16;
-
-	//
-	// matrices in column-major format
-	//
-
-	double *A; d_zeros(&A, n, n);
-	for(ii=0; ii<n*n; ii++) A[ii] = ii;
-//	d_print_mat(n, n, A, n);
-
-	// spd matrix
-	double *B; d_zeros(&B, n, n);
-	for(ii=0; ii<n; ii++) B[ii*(n+1)] = 1.0;
-//	d_print_mat(n, n, B, n);
-
-	// identity
-	double *I; d_zeros(&I, n, n);
-	for(ii=0; ii<n; ii++) I[ii*(n+1)] = 1.0;
-//	d_print_mat(n, n, B, n);
-
-	// result matrix
-	double *D; d_zeros(&D, n, n);
-//	d_print_mat(n, n, D, n);
-
-	// permutation indeces
-	int *ipiv; int_zeros(&ipiv, n, 1);
-
-	//
-	// matrices in matrix struct format
-	//
-
-	// work space enough for 5 matrix structs for size n times n
-	int size_strmat = 5*d_size_strmat(n, n);
-	void *memory_strmat; v_zeros_align(&memory_strmat, size_strmat);
-	char *ptr_memory_strmat = (char *) memory_strmat;
-
-	struct d_strmat sA;
-//	d_allocate_strmat(n, n, &sA);
-	d_create_strmat(n, n, &sA, ptr_memory_strmat);
-	ptr_memory_strmat += sA.memory_size;
-	// convert from column major matrix to strmat
-	d_cvt_mat2strmat(n, n, A, n, &sA, 0, 0);
-	printf("\nA = \n");
-	d_print_strmat(n, n, &sA, 0, 0);
-
-	struct d_strmat sB;
-//	d_allocate_strmat(n, n, &sB);
-	d_create_strmat(n, n, &sB, ptr_memory_strmat);
-	ptr_memory_strmat += sB.memory_size;
-	// convert from column major matrix to strmat
-	d_cvt_mat2strmat(n, n, B, n, &sB, 0, 0);
-	printf("\nB = \n");
-	d_print_strmat(n, n, &sB, 0, 0);
-
-	struct d_strmat sI;
-//	d_allocate_strmat(n, n, &sI);
-	d_create_strmat(n, n, &sI, ptr_memory_strmat);
-	ptr_memory_strmat += sI.memory_size;
-	// convert from column major matrix to strmat
-
-	struct d_strmat sD;
-//	d_allocate_strmat(n, n, &sD);
-	d_create_strmat(n, n, &sD, ptr_memory_strmat);
-	ptr_memory_strmat += sD.memory_size;
-
-	struct d_strmat sLU;
-//	d_allocate_strmat(n, n, &sD);
-	d_create_strmat(n, n, &sLU, ptr_memory_strmat);
-	ptr_memory_strmat += sLU.memory_size;
-
-	dgemm_nt_libstr(n, n, n, 1.0, &sA, 0, 0, &sA, 0, 0, 1.0, &sB, 0, 0, &sD, 0, 0);
-	printf("\nB+A*A' = \n");
-	d_print_strmat(n, n, &sD, 0, 0);
-
-//	dgetrf_nopivot_libstr(n, n, &sD, 0, 0, &sD, 0, 0);
-	dgetrf_libstr(n, n, &sD, 0, 0, &sLU, 0, 0, ipiv);
-	printf("\nLU = \n");
-	d_print_strmat(n, n, &sLU, 0, 0);
-	printf("\nipiv = \n");
-	int_print_mat(1, n, ipiv, 1);
-
-#if 0 // solve P L U X = P B
-	d_cvt_mat2strmat(n, n, I, n, &sI, 0, 0);
-	printf("\nI = \n");
-	d_print_strmat(n, n, &sI, 0, 0);
-
-	drowpe_libstr(n, ipiv, &sI);
-	printf("\nperm(I) = \n");
-	d_print_strmat(n, n, &sI, 0, 0);
-
-	dtrsm_llnu_libstr(n, n, 1.0, &sLU, 0, 0, &sI, 0, 0, &sD, 0, 0);
-	printf("\nperm(inv(L)) = \n");
-	d_print_strmat(n, n, &sD, 0, 0);
-	dtrsm_lunn_libstr(n, n, 1.0, &sLU, 0, 0, &sD, 0, 0, &sD, 0, 0);
-	printf("\ninv(A) = \n");
-	d_print_strmat(n, n, &sD, 0, 0);
-
-	// convert from strmat to column major matrix
-	d_cvt_strmat2mat(n, n, &sD, 0, 0, D, n);
-#else // solve X^T (P L U)^T = B^T P^T
-	d_cvt_tran_mat2strmat(n, n, I, n, &sI, 0, 0);
-	printf("\nI' = \n");
-	d_print_strmat(n, n, &sI, 0, 0);
-
-	dcolpe_libstr(n, ipiv, &sB);
-	printf("\nperm(I') = \n");
-	d_print_strmat(n, n, &sB, 0, 0);
-
-	dtrsm_rltu_libstr(n, n, 1.0, &sLU, 0, 0, &sB, 0, 0, &sD, 0, 0);
-	printf("\nperm(inv(L')) = \n");
-	d_print_strmat(n, n, &sD, 0, 0);
-	dtrsm_rutn_libstr(n, n, 1.0, &sLU, 0, 0, &sD, 0, 0, &sD, 0, 0);
-	printf("\ninv(A') = \n");
-	d_print_strmat(n, n, &sD, 0, 0);
-
-	// convert from strmat to column major matrix
-	d_cvt_tran_strmat2mat(n, n, &sD, 0, 0, D, n);
-#endif
-
-	// print matrix in column-major format
-	printf("\ninv(A) = \n");
-	d_print_mat(n, n, D, n);
-
-
-
-	//
-	// free memory
-	//
-
-	d_free(A);
-	d_free(B);
-	d_free(D);
-	d_free(I);
-	int_free(ipiv);
-//	d_free_strmat(&sA);
-//	d_free_strmat(&sB);
-//	d_free_strmat(&sD);
-//	d_free_strmat(&sI);
-	v_free_align(memory_strmat);
-
-	return 0;
-	
-	}
diff --git a/third_party/blasfeo/examples/example_d_riccati_recursion.c b/third_party/blasfeo/examples/example_d_riccati_recursion.c
deleted file mode 100644
index 1618ce9..0000000
--- a/third_party/blasfeo/examples/example_d_riccati_recursion.c
+++ /dev/null
@@ -1,595 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/time.h>
-
-#include "tools.h"
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_i_aux_ext_dep.h"
-#include "../include/blasfeo_d_aux_ext_dep.h"
-#include "../include/blasfeo_d_aux.h"
-#include "../include/blasfeo_d_kernel.h"
-#include "../include/blasfeo_d_blas.h"
-
-
-
-static void d_back_ric_sv_libstr(int N, int *nx, int *nu, struct d_strmat *hsBAbt, struct d_strmat *hsRSQrq, struct d_strmat *hsL, struct d_strvec *hsux, struct d_strvec *hspi, struct d_strmat *hswork_mat, struct d_strvec *hswork_vec)
-	{
-
-	int nn;
-
-	// factorization and backward substitution
-
-	// last stage
-	dpotrf_l_libstr(nx[N]+1, nx[N], &hsRSQrq[N], 0, 0, &hsL[N], 0, 0);
-
-	// middle stages
-	for(nn=0; nn<N; nn++)
-		{
-		dtrmm_rlnn_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nx[N-nn], 1.0, &hsL[N-nn], nu[N-nn], nu[N-nn], &hsBAbt[N-nn-1], 0, 0, &hswork_mat[0], 0, 0);
-		dgead_libstr(1, nx[N-nn], 1.0, &hsL[N-nn], nu[N-nn]+nx[N-nn], nu[N-nn], &hswork_mat[0], nu[N-nn-1]+nx[N-nn-1], 0);
-#if 1
-		dsyrk_dpotrf_ln_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nu[N-nn-1]+nx[N-nn-1], nx[N-nn], &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, &hsRSQrq[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-#else
-		dsyrk_ln_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, 1.0, &hsRSQrq[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-		dpotrf_l_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nu[N-nn-1]+nx[N-nn-1], &hsL[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-#endif
-		}
-	
-	// forward substitution
-
-	// first stage
-	nn = 0;
-	drowex_libstr(nu[nn]+nx[nn], -1.0, &hsL[nn], nu[nn]+nx[nn], 0, &hsux[nn], 0);
-	dtrsv_ltn_libstr(nu[nn]+nx[nn], nu[nn]+nx[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-	drowex_libstr(nx[nn+1], 1.0, &hsBAbt[nn], nu[nn]+nx[nn], 0, &hsux[nn+1], nu[nn+1]);
-	dgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsux[nn+1], nu[nn+1], &hsux[nn+1], nu[nn+1]);
-	dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-	drowex_libstr(nx[nn+1], 1.0, &hsL[nn+1], nu[nn+1]+nx[nn+1], nu[nn+1], &hswork_vec[0], 0);
-	dtrmv_ltn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hspi[nn], 0, &hspi[nn], 0);
-	daxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-	dtrmv_lnn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hspi[nn], 0, &hspi[nn], 0);
-
-	// middle stages
-	for(nn=1; nn<N; nn++)
-		{
-		drowex_libstr(nu[nn], -1.0, &hsL[nn], nu[nn]+nx[nn], 0, &hsux[nn], 0);
-		dtrsv_ltn_libstr(nu[nn]+nx[nn], nu[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-		drowex_libstr(nx[nn+1], 1.0, &hsBAbt[nn], nu[nn]+nx[nn], 0, &hsux[nn+1], nu[nn+1]);
-		dgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsux[nn+1], nu[nn+1], &hsux[nn+1], nu[nn+1]);
-		dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-		drowex_libstr(nx[nn+1], 1.0, &hsL[nn+1], nu[nn+1]+nx[nn+1], nu[nn+1], &hswork_vec[0], 0);
-		dtrmv_ltn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hspi[nn], 0, &hspi[nn], 0);
-		daxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-		dtrmv_lnn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hspi[nn], 0, &hspi[nn], 0);
-		}
-
-	return;
-
-	}
-
-
-
-static void d_back_ric_trf_libstr(int N, int *nx, int *nu, struct d_strmat *hsBAbt, struct d_strmat *hsRSQrq, struct d_strmat *hsL, struct d_strmat *hswork_mat)
-	{
-
-	int nn;
-
-	// factorization
-
-	// last stage
-	dpotrf_l_libstr(nx[N], nx[N], &hsRSQrq[N], 0, 0, &hsL[N], 0, 0);
-
-	// middle stages
-	for(nn=0; nn<N; nn++)
-		{
-		dtrmm_rlnn_libstr(nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hsL[N-nn], nu[N-nn], nu[N-nn], &hsBAbt[N-nn-1], 0, 0, &hswork_mat[0], 0, 0);
-#if 1
-		dsyrk_dpotrf_ln_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1]+nx[N-nn-1], nx[N-nn], &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, &hsRSQrq[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-#else
-		dsyrk_ln_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, 1.0, &hsRSQrq[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-		dpotrf_l_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1]+nx[N-nn-1], &hsL[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-#endif
-		}
-	
-	return;
-
-	}
-
-
-
-static void d_back_ric_trs_libstr(int N, int *nx, int *nu, struct d_strmat *hsBAbt, struct d_strvec *hsb, struct d_strvec *hsrq, struct d_strmat *hsL, struct d_strvec *hsPb, struct d_strvec *hsux, struct d_strvec *hspi, struct d_strvec *hswork_vec)
-	{
-
-	int nn;
-
-	// backward substitution
-
-	// last stage
-	dveccp_libstr(nu[N]+nx[N], 1.0, &hsrq[N], 0, &hsux[N], 0);
-
-	// middle stages
-	for(nn=0; nn<N-1; nn++)
-		{
-		// compute Pb
-		dtrmv_ltn_libstr(nx[N-nn], nx[N-nn], &hsL[N-nn], nu[N-nn], nu[N-nn], &hsb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-		dtrmv_lnn_libstr(nx[N-nn], nx[N-nn], &hsL[N-nn], nu[N-nn], nu[N-nn], &hsPb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-		dveccp_libstr(nu[N-nn-1]+nx[N-nn-1], 1.0, &hsrq[N-nn-1], 0, &hsux[N-nn-1], 0);
-		dveccp_libstr(nx[N-nn], 1.0, &hsPb[N-nn-1], 0, &hswork_vec[0], 0);
-		daxpy_libstr(nx[N-nn], 1.0, &hsux[N-nn], nu[N-nn], &hswork_vec[0], 0);
-		dgemv_n_libstr(nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hsBAbt[N-nn-1], 0, 0, &hswork_vec[0], 0, 1.0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-		dtrsv_lnn_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1], &hsL[N-nn-1], 0, 0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-		}
-
-	// first stage
-	nn = N-1;
-	dtrmv_ltn_libstr(nx[N-nn], nx[N-nn], &hsL[N-nn], nu[N-nn], nu[N-nn], &hsb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-	dtrmv_lnn_libstr(nx[N-nn], nx[N-nn], &hsL[N-nn], nu[N-nn], nu[N-nn], &hsPb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-	dveccp_libstr(nu[N-nn-1]+nx[N-nn-1], 1.0, &hsrq[N-nn-1], 0, &hsux[N-nn-1], 0);
-	dveccp_libstr(nx[N-nn], 1.0, &hsPb[N-nn-1], 0, &hswork_vec[0], 0);
-	daxpy_libstr(nx[N-nn], 1.0, &hsux[N-nn], nu[N-nn], &hswork_vec[0], 0);
-	dgemv_n_libstr(nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hsBAbt[N-nn-1], 0, 0, &hswork_vec[0], 0, 1.0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-	dtrsv_lnn_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1]+nx[N-nn-1], &hsL[N-nn-1], 0, 0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-
-	// forward substitution
-
-	// first stage
-	nn = 0;
-	dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-	dveccp_libstr(nu[nn]+nx[nn], -1.0, &hsux[nn], 0, &hsux[nn], 0);
-	dtrsv_ltn_libstr(nu[nn]+nx[nn], nu[nn]+nx[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-	dgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsb[nn], 0, &hsux[nn+1], nu[nn+1]);
-	dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hswork_vec[0], 0);
-	dtrmv_ltn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hswork_vec[0], 0, &hswork_vec[0], 0);
-	dtrmv_lnn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hswork_vec[0], 0, &hswork_vec[0], 0);
-	daxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-
-	// middle stages
-	for(nn=1; nn<N; nn++)
-		{
-		dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-		dveccp_libstr(nu[nn], -1.0, &hsux[nn], 0, &hsux[nn], 0);
-		dtrsv_ltn_libstr(nu[nn]+nx[nn], nu[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-		dgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsb[nn], 0, &hsux[nn+1], nu[nn+1]);
-		dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hswork_vec[0], 0);
-		dtrmv_ltn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hswork_vec[0], 0, &hswork_vec[0], 0);
-		dtrmv_lnn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hswork_vec[0], 0, &hswork_vec[0], 0);
-		daxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-		}
-
-	return;
-
-	}
-
-
-
-/************************************************ 
-Mass-spring system: nx/2 masses connected each other with springs (in a row), and the first and the last one to walls. nu (<=nx) controls act on the first nu masses. The system is sampled with sampling time Ts. 
-************************************************/
-static void d_mass_spring_system(double Ts, int nx, int nu, int N, double *A, double *B, double *b, double *x0)
-	{
-
-	int nx2 = nx*nx;
-
-	int info = 0;
-
-	int pp = nx/2; // number of masses
-	
-/************************************************
-* build the continuous time system 
-************************************************/
-	
-	double *T; d_zeros(&T, pp, pp);
-	int ii;
-	for(ii=0; ii<pp; ii++) T[ii*(pp+1)] = -2;
-	for(ii=0; ii<pp-1; ii++) T[ii*(pp+1)+1] = 1;
-	for(ii=1; ii<pp; ii++) T[ii*(pp+1)-1] = 1;
-
-	double *Z; d_zeros(&Z, pp, pp);
-	double *I; d_zeros(&I, pp, pp); for(ii=0; ii<pp; ii++) I[ii*(pp+1)]=1.0; // = eye(pp);
-	double *Ac; d_zeros(&Ac, nx, nx);
-	dmcopy(pp, pp, Z, pp, Ac, nx);
-	dmcopy(pp, pp, T, pp, Ac+pp, nx);
-	dmcopy(pp, pp, I, pp, Ac+pp*nx, nx);
-	dmcopy(pp, pp, Z, pp, Ac+pp*(nx+1), nx); 
-	free(T);
-	free(Z);
-	free(I);
-	
-	d_zeros(&I, nu, nu); for(ii=0; ii<nu; ii++) I[ii*(nu+1)]=1.0; //I = eye(nu);
-	double *Bc; d_zeros(&Bc, nx, nu);
-	dmcopy(nu, nu, I, nu, Bc+pp, nx);
-	free(I);
-	
-/************************************************
-* compute the discrete time system 
-************************************************/
-
-	double *bb; d_zeros(&bb, nx, 1);
-	dmcopy(nx, 1, bb, nx, b, nx);
-		
-	dmcopy(nx, nx, Ac, nx, A, nx);
-	dscal_3l(nx2, Ts, A);
-	expm(nx, A);
-	
-	d_zeros(&T, nx, nx);
-	d_zeros(&I, nx, nx); for(ii=0; ii<nx; ii++) I[ii*(nx+1)]=1.0; //I = eye(nx);
-	dmcopy(nx, nx, A, nx, T, nx);
-	daxpy_3l(nx2, -1.0, I, T);
-	dgemm_nn_3l(nx, nu, nx, T, nx, Bc, nx, B, nx);
-	free(T);
-	free(I);
-	
-	int *ipiv = (int *) malloc(nx*sizeof(int));
-	dgesv_3l(nx, nu, Ac, nx, ipiv, B, nx, &info);
-	free(ipiv);
-
-	free(Ac);
-	free(Bc);
-	free(bb);
-	
-			
-/************************************************
-* initial state 
-************************************************/
-	
-	if(nx==4)
-		{
-		x0[0] = 5;
-		x0[1] = 10;
-		x0[2] = 15;
-		x0[3] = 20;
-		}
-	else
-		{
-		int jj;
-		for(jj=0; jj<nx; jj++)
-			x0[jj] = 1;
-		}
-
-	}
-
-
-
-int main()
-	{
-
-	printf("\nExample of LU factorization and backsolve\n\n");
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-	printf("\nLA provided by BLASFEO\n\n");
-
-#elif defined(LA_BLAS)
-
-	printf("\nLA provided by BLAS\n\n");
-
-#elif defined(LA_REFERENCE)
-
-	printf("\nLA provided by REFERENCE\n\n");
-
-#else
-
-	printf("\nLA provided by ???\n\n");
-	exit(2);
-
-#endif
-
-	// loop index
-	int ii;
-
-/************************************************
-* problem size
-************************************************/	
-
-	// problem size
-	int N = 4;
-	int nx_ = 4;
-	int nu_ = 1;
-
-	// stage-wise variant size
-	int nx[N+1];
-	nx[0] = 0;
-	for(ii=1; ii<=N; ii++)
-		nx[ii] = nx_;
-	nx[N] = nx_;
-
-	int nu[N+1];
-	for(ii=0; ii<N; ii++)
-		nu[ii] = nu_;
-	nu[N] = 0;
-
-/************************************************
-* dynamical system
-************************************************/	
-
-	double *A; d_zeros(&A, nx_, nx_); // states update matrix
-
-	double *B; d_zeros(&B, nx_, nu_); // inputs matrix
-
-	double *b; d_zeros(&b, nx_, 1); // states offset
-	double *x0; d_zeros(&x0, nx_, 1); // initial state
-
-	double Ts = 0.5; // sampling time
-	d_mass_spring_system(Ts, nx_, nu_, N, A, B, b, x0);
-	
-	for(ii=0; ii<nx_; ii++)
-		b[ii] = 0.1;
-	
-	for(ii=0; ii<nx_; ii++)
-		x0[ii] = 0;
-	x0[0] = 2.5;
-	x0[1] = 2.5;
-
-	d_print_mat(nx_, nx_, A, nx_);
-	d_print_mat(nx_, nu_, B, nx_);
-	d_print_mat(1, nx_, b, 1);
-	d_print_mat(1, nx_, x0, 1);
-
-/************************************************
-* cost function
-************************************************/	
-
-	double *R; d_zeros(&R, nu_, nu_);
-	for(ii=0; ii<nu_; ii++) R[ii*(nu_+1)] = 2.0;
-
-	double *S; d_zeros(&S, nu_, nx_);
-
-	double *Q; d_zeros(&Q, nx_, nx_);
-	for(ii=0; ii<nx_; ii++) Q[ii*(nx_+1)] = 1.0;
-
-	double *r; d_zeros(&r, nu_, 1);
-	for(ii=0; ii<nu_; ii++) r[ii] = 0.2;
-
-	double *q; d_zeros(&q, nx_, 1);
-	for(ii=0; ii<nx_; ii++) q[ii] = 0.1;
-
-	d_print_mat(nu_, nu_, R, nu_);
-	d_print_mat(nu_, nx_, S, nu_);
-	d_print_mat(nx_, nx_, Q, nx_);
-	d_print_mat(1, nu_, r, 1);
-	d_print_mat(1, nx_, q, 1);
-
-/************************************************
-* matrices as strmat
-************************************************/	
-
-	struct d_strmat sA;
-	d_allocate_strmat(nx_, nx_, &sA);
-	d_cvt_mat2strmat(nx_, nx_, A, nx_, &sA, 0, 0);
-	struct d_strvec sb;
-	d_allocate_strvec(nx_, &sb);
-	d_cvt_vec2strvec(nx_, b, &sb, 0);
-	struct d_strvec sx0;
-	d_allocate_strvec(nx_, &sx0);
-	d_cvt_vec2strvec(nx_, x0, &sx0, 0);
-	struct d_strvec sb0;
-	d_allocate_strvec(nx_, &sb0);
-	double *b0; d_zeros(&b0, nx_, 1); // states offset
-	dgemv_n_libstr(nx_, nx_, 1.0, &sA, 0, 0, &sx0, 0, 1.0, &sb, 0, &sb0, 0);
-	d_print_tran_strvec(nx_, &sb0, 0);
-
-	struct d_strmat sBbt0;
-	d_allocate_strmat(nu_+nx_+1, nx_, &sBbt0);
-	d_cvt_tran_mat2strmat(nx_, nx_, B, nx_, &sBbt0, 0, 0);
-	drowin_libstr(nx_, 1.0, &sb0, 0, &sBbt0, nu_, 0);
-	d_print_strmat(nu_+1, nx_, &sBbt0, 0, 0);
-
-	struct d_strmat sBAbt1;
-	d_allocate_strmat(nu_+nx_+1, nx_, &sBAbt1);
-	d_cvt_tran_mat2strmat(nx_, nu_, B, nx_, &sBAbt1, 0, 0);
-	d_cvt_tran_mat2strmat(nx_, nx_, A, nx_, &sBAbt1, nu_, 0);
-	d_cvt_tran_mat2strmat(nx_, 1, b, nx_, &sBAbt1, nu_+nx_, 0);
-	d_print_strmat(nu_+nx_+1, nx_, &sBAbt1, 0, 0);
-
-	struct d_strvec sr0; // XXX no need to update r0 since S=0
-	d_allocate_strvec(nu_, &sr0);
-	d_cvt_vec2strvec(nu_, r, &sr0, 0);
-
-	struct d_strmat sRr0;
-	d_allocate_strmat(nu_+1, nu_, &sRr0);
-	d_cvt_mat2strmat(nu_, nu_, R, nu_, &sRr0, 0, 0);
-	drowin_libstr(nu_, 1.0, &sr0, 0, &sRr0, nu_, 0);
-	d_print_strmat(nu_+1, nu_, &sRr0, 0, 0);
-
-	struct d_strvec srq1;
-	d_allocate_strvec(nu_+nx_, &srq1);
-	d_cvt_vec2strvec(nu_, r, &srq1, 0);
-	d_cvt_vec2strvec(nx_, q, &srq1, nu_);
-
-	struct d_strmat sRSQrq1;
-	d_allocate_strmat(nu_+nx_+1, nu_+nx_, &sRSQrq1);
-	d_cvt_mat2strmat(nu_, nu_, R, nu_, &sRSQrq1, 0, 0);
-	d_cvt_tran_mat2strmat(nu_, nx_, S, nu_, &sRSQrq1, nu_, 0);
-	d_cvt_mat2strmat(nx_, nx_, Q, nx_, &sRSQrq1, nu_, nu_);
-	drowin_libstr(nu_+nx_, 1.0, &srq1, 0, &sRSQrq1, nu_+nx_, 0);
-	d_print_strmat(nu_+nx_+1, nu_+nx_, &sRSQrq1, 0, 0);
-
-	struct d_strvec sqN;
-	d_allocate_strvec(nx_, &sqN);
-	d_cvt_vec2strvec(nx_, q, &sqN, 0);
-
-	struct d_strmat sQqN;
-	d_allocate_strmat(nx_+1, nx_, &sQqN);
-	d_cvt_mat2strmat(nx_, nx_, Q, nx_, &sQqN, 0, 0);
-	drowin_libstr(nx_, 1.0, &sqN, 0, &sQqN, nx_, 0);
-	d_print_strmat(nx_+1, nx_, &sQqN, 0, 0);
-
-/************************************************
-* array of matrices
-************************************************/	
-	
-	struct d_strmat hsBAbt[N];
-	struct d_strvec hsb[N];
-	struct d_strmat hsRSQrq[N+1];
-	struct d_strvec hsrq[N+1];
-	struct d_strmat hsL[N+1];
-	struct d_strvec hsPb[N];
-	struct d_strvec hsux[N+1];
-	struct d_strvec hspi[N];
-	struct d_strmat hswork_mat[1];
-	struct d_strvec hswork_vec[1];
-
-	hsBAbt[0] = sBbt0;
-	hsb[0] = sb0;
-	hsRSQrq[0] = sRr0;
-	hsrq[0] = sr0;
-	d_allocate_strmat(nu_+1, nu_, &hsL[0]);
-	d_allocate_strvec(nx_, &hsPb[0]);
-	d_allocate_strvec(nx_+nu_+1, &hsux[0]);
-	d_allocate_strvec(nx_, &hspi[0]);
-	for(ii=1; ii<N; ii++)
-		{
-		hsBAbt[ii] = sBAbt1;
-		hsb[ii] = sb;
-		hsRSQrq[ii] = sRSQrq1;
-		hsrq[ii] = srq1;
-		d_allocate_strmat(nu_+nx_+1, nu_+nx_, &hsL[ii]);
-		d_allocate_strvec(nx_, &hsPb[ii]);
-		d_allocate_strvec(nx_+nu_+1, &hsux[ii]);
-		d_allocate_strvec(nx_, &hspi[ii]);
-		}
-	hsRSQrq[N] = sQqN;
-	hsrq[N] = sqN;
-	d_allocate_strmat(nx_+1, nx_, &hsL[N]);
-	d_allocate_strvec(nx_+nu_+1, &hsux[N]);
-	d_allocate_strmat(nu_+nx_+1, nx_, &hswork_mat[0]);
-	d_allocate_strvec(nx_, &hswork_vec[0]);
-
-//	for(ii=0; ii<N; ii++)
-//		d_print_strmat(nu[ii]+nx[ii]+1, nx[ii+1], &hsBAbt[ii], 0, 0);
-//	return 0;
-
-/************************************************
-* call Riccati solver
-************************************************/	
-	
-	// timing 
-	struct timeval tv0, tv1, tv2, tv3;
-	int nrep = 1000;
-	int rep;
-
-	gettimeofday(&tv0, NULL); // time
-
-	for(rep=0; rep<nrep; rep++)
-		{
-		d_back_ric_sv_libstr(N, nx, nu, hsBAbt, hsRSQrq, hsL, hsux, hspi, hswork_mat, hswork_vec);
-		}
-
-	gettimeofday(&tv1, NULL); // time
-
-	for(rep=0; rep<nrep; rep++)
-		{
-		d_back_ric_trf_libstr(N, nx, nu, hsBAbt, hsRSQrq, hsL, hswork_mat);
-		}
-
-	gettimeofday(&tv2, NULL); // time
-
-	for(rep=0; rep<nrep; rep++)
-		{
-		d_back_ric_trs_libstr(N, nx, nu, hsBAbt, hsb, hsrq, hsL, hsPb, hsux, hspi, hswork_vec);
-		}
-
-	gettimeofday(&tv3, NULL); // time
-
-	float time_sv  = (float) (tv1.tv_sec-tv0.tv_sec)/(nrep+0.0)+(tv1.tv_usec-tv0.tv_usec)/(nrep*1e6);
-	float time_trf = (float) (tv2.tv_sec-tv1.tv_sec)/(nrep+0.0)+(tv2.tv_usec-tv1.tv_usec)/(nrep*1e6);
-	float time_trs = (float) (tv3.tv_sec-tv2.tv_sec)/(nrep+0.0)+(tv3.tv_usec-tv2.tv_usec)/(nrep*1e6);
-
-	// print sol
-	printf("\nux = \n\n");
-	for(ii=0; ii<=N; ii++)
-		d_print_tran_strvec(nu[ii]+nx[ii], &hsux[ii], 0);
-
-	printf("\npi = \n\n");
-	for(ii=0; ii<N; ii++)
-		d_print_tran_strvec(nx[ii+1], &hspi[ii], 0);
-
-//	printf("\nL = \n\n");
-//	for(ii=0; ii<=N; ii++)
-//		d_print_strmat(nu[ii]+nx[ii]+1, nu[ii]+nx[ii], &hsL[ii], 0, 0);
-
-	printf("\ntime sv\t\ttime trf\t\ttime trs\n");
-	printf("\n%e\t%e\t%e\n", time_sv, time_trf, time_trs);
-	printf("\n");
-
-/************************************************
-* free memory
-************************************************/	
-
-	d_free(A);
-	d_free(B);
-	d_free(b);
-	d_free(x0);
-	d_free(R);
-	d_free(S);
-	d_free(Q);
-	d_free(r);
-	d_free(q);
-	d_free(b0);
-	d_free_strmat(&sA);
-	d_free_strvec(&sb);
-	d_free_strmat(&sBbt0);
-	d_free_strvec(&sb0);
-	d_free_strmat(&sBAbt1);
-	d_free_strmat(&sRr0);
-	d_free_strvec(&sr0);
-	d_free_strmat(&sRSQrq1);
-	d_free_strvec(&srq1);
-	d_free_strmat(&sQqN);
-	d_free_strvec(&sqN);
-	d_free_strmat(&hsL[0]);
-	d_free_strvec(&hsPb[0]);
-	d_free_strvec(&hsux[0]);
-	d_free_strvec(&hspi[0]);
-	for(ii=1; ii<N; ii++)
-		{
-		d_free_strmat(&hsL[ii]);
-		d_free_strvec(&hsPb[ii]);
-		d_free_strvec(&hsux[ii]);
-		d_free_strvec(&hspi[ii]);
-		}
-	d_free_strmat(&hsL[N]);
-	d_free_strvec(&hsux[N]);
-	d_free_strmat(&hswork_mat[0]);
-	d_free_strvec(&hswork_vec[0]);
-
-
-/************************************************
-* return
-************************************************/	
-
-	return 0;
-
-	}
-
-
-
diff --git a/third_party/blasfeo/examples/example_s_lu_factorization.c b/third_party/blasfeo/examples/example_s_lu_factorization.c
deleted file mode 100644
index e298604..0000000
--- a/third_party/blasfeo/examples/example_s_lu_factorization.c
+++ /dev/null
@@ -1,211 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/time.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_i_aux_ext_dep.h"
-#include "../include/blasfeo_v_aux_ext_dep.h"
-#include "../include/blasfeo_s_aux_ext_dep.h"
-#include "../include/blasfeo_s_aux.h"
-#include "../include/blasfeo_s_kernel.h"
-#include "../include/blasfeo_s_blas.h"
-
-
-int main()
-	{
-
-	printf("\nExample of LU factorization and backsolve\n\n");
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-	printf("\nLA provided by BLASFEO\n\n");
-
-#elif defined(LA_REFERENCE)
-
-	printf("\nLA provided by REFERENCE\n\n");
-
-#elif defined(LA_BLAS)
-
-	printf("\nLA provided by BLAS\n\n");
-
-#else
-
-	printf("\nLA provided by ???\n\n");
-	exit(2);
-
-#endif
-
-	int ii;
-
-	int n = 16;
-
-	//
-	// matrices in column-major format
-	//
-
-	float *A; s_zeros(&A, n, n);
-	for(ii=0; ii<n*n; ii++) A[ii] = ii;
-//	s_print_mat(n, n, A, n);
-
-	// spd matrix
-	float *B; s_zeros(&B, n, n);
-	for(ii=0; ii<n; ii++) B[ii*(n+1)] = 1.0;
-//	s_print_mat(n, n, B, n);
-
-	// identity
-	float *I; s_zeros(&I, n, n);
-	for(ii=0; ii<n; ii++) I[ii*(n+1)] = 1.0;
-//	s_print_mat(n, n, B, n);
-
-	// result matrix
-	float *D; s_zeros(&D, n, n);
-//	s_print_mat(n, n, D, n);
-
-	// permutation indeces
-	int *ipiv; int_zeros(&ipiv, n, 1);
-
-	//
-	// matrices in matrix struct format
-	//
-
-	// work space enough for 5 matrix structs for size n times n
-	int size_strmat = 5*s_size_strmat(n, n);
-	void *memory_strmat; v_zeros_align(&memory_strmat, size_strmat);
-	char *ptr_memory_strmat = (char *) memory_strmat;
-
-	struct s_strmat sA;
-//	s_allocate_strmat(n, n, &sA);
-	s_create_strmat(n, n, &sA, ptr_memory_strmat);
-	ptr_memory_strmat += sA.memory_size;
-	// convert from column major matrix to strmat
-	s_cvt_mat2strmat(n, n, A, n, &sA, 0, 0);
-	printf("\nA = \n");
-	s_print_strmat(n, n, &sA, 0, 0);
-
-	struct s_strmat sB;
-//	s_allocate_strmat(n, n, &sB);
-	s_create_strmat(n, n, &sB, ptr_memory_strmat);
-	ptr_memory_strmat += sB.memory_size;
-	// convert from column major matrix to strmat
-	s_cvt_mat2strmat(n, n, B, n, &sB, 0, 0);
-	printf("\nB = \n");
-	s_print_strmat(n, n, &sB, 0, 0);
-
-	struct s_strmat sI;
-//	s_allocate_strmat(n, n, &sI);
-	s_create_strmat(n, n, &sI, ptr_memory_strmat);
-	ptr_memory_strmat += sI.memory_size;
-	// convert from column major matrix to strmat
-
-	struct s_strmat sD;
-//	s_allocate_strmat(n, n, &sD);
-	s_create_strmat(n, n, &sD, ptr_memory_strmat);
-	ptr_memory_strmat += sD.memory_size;
-
-	struct s_strmat sLU;
-//	s_allocate_strmat(n, n, &sD);
-	s_create_strmat(n, n, &sLU, ptr_memory_strmat);
-	ptr_memory_strmat += sLU.memory_size;
-
-	sgemm_nt_libstr(n, n, n, 1.0, &sA, 0, 0, &sA, 0, 0, 1.0, &sB, 0, 0, &sD, 0, 0);
-	printf("\nB+A*A' = \n");
-	s_print_strmat(n, n, &sD, 0, 0);
-
-//	sgetrf_nopivot_libstr(n, n, &sD, 0, 0, &sD, 0, 0);
-	sgetrf_libstr(n, n, &sD, 0, 0, &sLU, 0, 0, ipiv);
-	printf("\nLU = \n");
-	s_print_strmat(n, n, &sLU, 0, 0);
-	printf("\nipiv = \n");
-	int_print_mat(1, n, ipiv, 1);
-
-#if 0 // solve P L U X = P B
-	s_cvt_mat2strmat(n, n, I, n, &sI, 0, 0);
-	printf("\nI = \n");
-	s_print_strmat(n, n, &sI, 0, 0);
-
-	srowpe_libstr(n, ipiv, &sI);
-	printf("\nperm(I) = \n");
-	s_print_strmat(n, n, &sI, 0, 0);
-
-	strsm_llnu_libstr(n, n, 1.0, &sLU, 0, 0, &sI, 0, 0, &sD, 0, 0);
-	printf("\nperm(inv(L)) = \n");
-	s_print_strmat(n, n, &sD, 0, 0);
-	strsm_lunn_libstr(n, n, 1.0, &sLU, 0, 0, &sD, 0, 0, &sD, 0, 0);
-	printf("\ninv(A) = \n");
-	s_print_strmat(n, n, &sD, 0, 0);
-
-	// convert from strmat to column major matrix
-	s_cvt_strmat2mat(n, n, &sD, 0, 0, D, n);
-#else // solve X^T (P L U)^T = B^T P^T
-	s_cvt_tran_mat2strmat(n, n, I, n, &sI, 0, 0);
-	printf("\nI' = \n");
-	s_print_strmat(n, n, &sI, 0, 0);
-
-	scolpe_libstr(n, ipiv, &sB);
-	printf("\nperm(I') = \n");
-	s_print_strmat(n, n, &sB, 0, 0);
-
-	strsm_rltu_libstr(n, n, 1.0, &sLU, 0, 0, &sB, 0, 0, &sD, 0, 0);
-	printf("\nperm(inv(L')) = \n");
-	s_print_strmat(n, n, &sD, 0, 0);
-	strsm_rutn_libstr(n, n, 1.0, &sLU, 0, 0, &sD, 0, 0, &sD, 0, 0);
-	printf("\ninv(A') = \n");
-	s_print_strmat(n, n, &sD, 0, 0);
-
-	// convert from strmat to column major matrix
-	s_cvt_tran_strmat2mat(n, n, &sD, 0, 0, D, n);
-#endif
-
-	// print matrix in column-major format
-	printf("\ninv(A) = \n");
-	s_print_mat(n, n, D, n);
-
-
-
-	//
-	// free memory
-	//
-
-	s_free(A);
-	s_free(B);
-	s_free(D);
-	s_free(I);
-	int_free(ipiv);
-//	s_free_strmat(&sA);
-//	s_free_strmat(&sB);
-//	s_free_strmat(&sD);
-//	s_free_strmat(&sI);
-	v_free_align(memory_strmat);
-
-	return 0;
-	
-	}
-
diff --git a/third_party/blasfeo/examples/example_s_riccati_recursion.c b/third_party/blasfeo/examples/example_s_riccati_recursion.c
deleted file mode 100644
index 03b9fc6..0000000
--- a/third_party/blasfeo/examples/example_s_riccati_recursion.c
+++ /dev/null
@@ -1,605 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/time.h>
-
-#include "tools.h"
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_i_aux_ext_dep.h"
-#include "../include/blasfeo_s_aux_ext_dep.h"
-#include "../include/blasfeo_s_aux.h"
-#include "../include/blasfeo_s_kernel.h"
-#include "../include/blasfeo_s_blas.h"
-
-
-
-static void s_back_ric_sv_libstr(int N, int *nx, int *nu, struct s_strmat *hsBAbt, struct s_strmat *hsRSQrq, struct s_strmat *hsL, struct s_strvec *hsux, struct s_strvec *hspi, struct s_strmat *hswork_mat, struct s_strvec *hswork_vec)
-	{
-
-	int nn;
-
-	// factorization and backward substitution
-
-	// last stage
-	spotrf_l_libstr(nx[N]+1, nx[N], &hsRSQrq[N], 0, 0, &hsL[N], 0, 0);
-
-	// middle stages
-	for(nn=0; nn<N; nn++)
-		{
-		strmm_rlnn_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nx[N-nn], 1.0, &hsL[N-nn], nu[N-nn], nu[N-nn], &hsBAbt[N-nn-1], 0, 0, &hswork_mat[0], 0, 0);
-		sgead_libstr(1, nx[N-nn], 1.0, &hsL[N-nn], nu[N-nn]+nx[N-nn], nu[N-nn], &hswork_mat[0], nu[N-nn-1]+nx[N-nn-1], 0);
-#if 1
-		ssyrk_spotrf_ln_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nu[N-nn-1]+nx[N-nn-1], nx[N-nn], &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, &hsRSQrq[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-#else
-		ssyrk_ln_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, 1.0, &hsRSQrq[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-		spotrf_l_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nu[N-nn-1]+nx[N-nn-1], &hsL[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-#endif
-		}
-	
-	// forward substitution
-
-	// first stage
-	nn = 0;
-	srowex_libstr(nu[nn]+nx[nn], -1.0, &hsL[nn], nu[nn]+nx[nn], 0, &hsux[nn], 0);
-	strsv_ltn_libstr(nu[nn]+nx[nn], nu[nn]+nx[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-	srowex_libstr(nx[nn+1], 1.0, &hsBAbt[nn], nu[nn]+nx[nn], 0, &hsux[nn+1], nu[nn+1]);
-	sgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsux[nn+1], nu[nn+1], &hsux[nn+1], nu[nn+1]);
-	sveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-	srowex_libstr(nx[nn+1], 1.0, &hsL[nn+1], nu[nn+1]+nx[nn+1], nu[nn+1], &hswork_vec[0], 0);
-	strmv_ltn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hspi[nn], 0, &hspi[nn], 0);
-	saxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-	strmv_lnn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hspi[nn], 0, &hspi[nn], 0);
-
-	// middle stages
-	for(nn=1; nn<N; nn++)
-		{
-		srowex_libstr(nu[nn], -1.0, &hsL[nn], nu[nn]+nx[nn], 0, &hsux[nn], 0);
-		strsv_ltn_libstr(nu[nn]+nx[nn], nu[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-		srowex_libstr(nx[nn+1], 1.0, &hsBAbt[nn], nu[nn]+nx[nn], 0, &hsux[nn+1], nu[nn+1]);
-		sgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsux[nn+1], nu[nn+1], &hsux[nn+1], nu[nn+1]);
-		sveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-		srowex_libstr(nx[nn+1], 1.0, &hsL[nn+1], nu[nn+1]+nx[nn+1], nu[nn+1], &hswork_vec[0], 0);
-		strmv_ltn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hspi[nn], 0, &hspi[nn], 0);
-		saxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-		strmv_lnn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hspi[nn], 0, &hspi[nn], 0);
-		}
-
-	return;
-
-	}
-
-
-
-static void s_back_ric_trf_libstr(int N, int *nx, int *nu, struct s_strmat *hsBAbt, struct s_strmat *hsRSQrq, struct s_strmat *hsL, struct s_strmat *hswork_mat)
-	{
-
-	int nn;
-
-	// factorization
-
-	// last stage
-	spotrf_l_libstr(nx[N], nx[N], &hsRSQrq[N], 0, 0, &hsL[N], 0, 0);
-
-	// middle stages
-	for(nn=0; nn<N; nn++)
-		{
-		strmm_rlnn_libstr(nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hsL[N-nn], nu[N-nn], nu[N-nn], &hsBAbt[N-nn-1], 0, 0, &hswork_mat[0], 0, 0);
-#if 1
-		ssyrk_spotrf_ln_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1]+nx[N-nn-1], nx[N-nn], &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, &hsRSQrq[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-#else
-		ssyrk_ln_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, 1.0, &hsRSQrq[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-		spotrf_l_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1]+nx[N-nn-1], &hsL[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-#endif
-		}
-	
-	return;
-
-	}
-
-
-
-static void s_back_ric_trs_libstr(int N, int *nx, int *nu, struct s_strmat *hsBAbt, struct s_strvec *hsb, struct s_strvec *hsrq, struct s_strmat *hsL, struct s_strvec *hsPb, struct s_strvec *hsux, struct s_strvec *hspi, struct s_strvec *hswork_vec)
-	{
-
-	int nn;
-
-	// backward substitution
-
-	// last stage
-	sveccp_libstr(nu[N]+nx[N], 1.0, &hsrq[N], 0, &hsux[N], 0);
-
-	// middle stages
-	for(nn=0; nn<N-1; nn++)
-		{
-		// compute Pb
-		strmv_ltn_libstr(nx[N-nn], nx[N-nn], &hsL[N-nn], nu[N-nn], nu[N-nn], &hsb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-		strmv_lnn_libstr(nx[N-nn], nx[N-nn], &hsL[N-nn], nu[N-nn], nu[N-nn], &hsPb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-		sveccp_libstr(nu[N-nn-1]+nx[N-nn-1], 1.0, &hsrq[N-nn-1], 0, &hsux[N-nn-1], 0);
-		sveccp_libstr(nx[N-nn], 1.0, &hsPb[N-nn-1], 0, &hswork_vec[0], 0);
-		saxpy_libstr(nx[N-nn], 1.0, &hsux[N-nn], nu[N-nn], &hswork_vec[0], 0);
-		sgemv_n_libstr(nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hsBAbt[N-nn-1], 0, 0, &hswork_vec[0], 0, 1.0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-		strsv_lnn_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1], &hsL[N-nn-1], 0, 0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-		}
-
-	// first stage
-	nn = N-1;
-	strmv_ltn_libstr(nx[N-nn], nx[N-nn], &hsL[N-nn], nu[N-nn], nu[N-nn], &hsb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-	strmv_lnn_libstr(nx[N-nn], nx[N-nn], &hsL[N-nn], nu[N-nn], nu[N-nn], &hsPb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-	sveccp_libstr(nu[N-nn-1]+nx[N-nn-1], 1.0, &hsrq[N-nn-1], 0, &hsux[N-nn-1], 0);
-	sveccp_libstr(nx[N-nn], 1.0, &hsPb[N-nn-1], 0, &hswork_vec[0], 0);
-	saxpy_libstr(nx[N-nn], 1.0, &hsux[N-nn], nu[N-nn], &hswork_vec[0], 0);
-	sgemv_n_libstr(nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hsBAbt[N-nn-1], 0, 0, &hswork_vec[0], 0, 1.0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-	strsv_lnn_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1]+nx[N-nn-1], &hsL[N-nn-1], 0, 0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-
-	// forward substitution
-
-	// first stage
-	nn = 0;
-	sveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-	sveccp_libstr(nu[nn]+nx[nn], -1.0, &hsux[nn], 0, &hsux[nn], 0);
-	strsv_ltn_libstr(nu[nn]+nx[nn], nu[nn]+nx[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-	sgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsb[nn], 0, &hsux[nn+1], nu[nn+1]);
-	sveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hswork_vec[0], 0);
-	strmv_ltn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hswork_vec[0], 0, &hswork_vec[0], 0);
-	strmv_lnn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hswork_vec[0], 0, &hswork_vec[0], 0);
-	saxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-
-	// middle stages
-	for(nn=1; nn<N; nn++)
-		{
-		sveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-		sveccp_libstr(nu[nn], -1.0, &hsux[nn], 0, &hsux[nn], 0);
-		strsv_ltn_libstr(nu[nn]+nx[nn], nu[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-		sgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsb[nn], 0, &hsux[nn+1], nu[nn+1]);
-		sveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hswork_vec[0], 0);
-		strmv_ltn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hswork_vec[0], 0, &hswork_vec[0], 0);
-		strmv_lnn_libstr(nx[nn+1], nx[nn+1], &hsL[nn+1], nu[nn+1], nu[nn+1], &hswork_vec[0], 0, &hswork_vec[0], 0);
-		saxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-		}
-
-	return;
-
-	}
-
-
-
-/************************************************ 
-Mass-spring system: nx/2 masses connected each other with springs (in a row), and the first and the last one to walls. nu (<=nx) controls act on the first nu masses. The system is sampled with sampling time Ts. 
-************************************************/
-static void d_mass_spring_system(double Ts, int nx, int nu, int N, double *A, double *B, double *b, double *x0)
-	{
-
-	int nx2 = nx*nx;
-
-	int info = 0;
-
-	int pp = nx/2; // number of masses
-	
-/************************************************
-* build the continuous time system 
-************************************************/
-	
-	double *T; d_zeros(&T, pp, pp);
-	int ii;
-	for(ii=0; ii<pp; ii++) T[ii*(pp+1)] = -2;
-	for(ii=0; ii<pp-1; ii++) T[ii*(pp+1)+1] = 1;
-	for(ii=1; ii<pp; ii++) T[ii*(pp+1)-1] = 1;
-
-	double *Z; d_zeros(&Z, pp, pp);
-	double *I; d_zeros(&I, pp, pp); for(ii=0; ii<pp; ii++) I[ii*(pp+1)]=1.0; // = eye(pp);
-	double *Ac; d_zeros(&Ac, nx, nx);
-	dmcopy(pp, pp, Z, pp, Ac, nx);
-	dmcopy(pp, pp, T, pp, Ac+pp, nx);
-	dmcopy(pp, pp, I, pp, Ac+pp*nx, nx);
-	dmcopy(pp, pp, Z, pp, Ac+pp*(nx+1), nx); 
-	free(T);
-	free(Z);
-	free(I);
-	
-	d_zeros(&I, nu, nu); for(ii=0; ii<nu; ii++) I[ii*(nu+1)]=1.0; //I = eye(nu);
-	double *Bc; d_zeros(&Bc, nx, nu);
-	dmcopy(nu, nu, I, nu, Bc+pp, nx);
-	free(I);
-	
-/************************************************
-* compute the discrete time system 
-************************************************/
-
-	double *bb; d_zeros(&bb, nx, 1);
-	dmcopy(nx, 1, bb, nx, b, nx);
-		
-	dmcopy(nx, nx, Ac, nx, A, nx);
-	dscal_3l(nx2, Ts, A);
-	expm(nx, A);
-	
-	d_zeros(&T, nx, nx);
-	d_zeros(&I, nx, nx); for(ii=0; ii<nx; ii++) I[ii*(nx+1)]=1.0; //I = eye(nx);
-	dmcopy(nx, nx, A, nx, T, nx);
-	daxpy_3l(nx2, -1.0, I, T);
-	dgemm_nn_3l(nx, nu, nx, T, nx, Bc, nx, B, nx);
-	free(T);
-	free(I);
-	
-	int *ipiv = (int *) malloc(nx*sizeof(int));
-	dgesv_3l(nx, nu, Ac, nx, ipiv, B, nx, &info);
-	free(ipiv);
-
-	free(Ac);
-	free(Bc);
-	free(bb);
-	
-			
-/************************************************
-* initial state 
-************************************************/
-	
-	if(nx==4)
-		{
-		x0[0] = 5;
-		x0[1] = 10;
-		x0[2] = 15;
-		x0[3] = 20;
-		}
-	else
-		{
-		int jj;
-		for(jj=0; jj<nx; jj++)
-			x0[jj] = 1;
-		}
-
-	}
-
-
-
-int main()
-	{
-
-	printf("\nExample of LU factorization and backsolve\n\n");
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-	printf("\nLA provided by BLASFEO\n\n");
-
-#elif defined(LA_BLAS)
-
-	printf("\nLA provided by BLAS\n\n");
-
-#elif defined(LA_REFERENCE)
-
-	printf("\nLA provided by REFERENCE\n\n");
-
-#else
-
-	printf("\nLA provided by ???\n\n");
-	exit(2);
-
-#endif
-
-	// loop index
-	int ii;
-
-/************************************************
-* problem size
-************************************************/	
-
-	// problem size
-	int N = 4;
-	int nx_ = 4;
-	int nu_ = 1;
-
-	// stage-wise variant size
-	int nx[N+1];
-	nx[0] = 0;
-	for(ii=1; ii<=N; ii++)
-		nx[ii] = nx_;
-	nx[N] = nx_;
-
-	int nu[N+1];
-	for(ii=0; ii<N; ii++)
-		nu[ii] = nu_;
-	nu[N] = 0;
-
-/************************************************
-* dynamical system
-************************************************/	
-
-	double *Ad; d_zeros(&Ad, nx_, nx_); // states update matrix
-
-	double *Bd; d_zeros(&Bd, nx_, nu_); // inputs matrix
-
-	double *bd; d_zeros(&bd, nx_, 1); // states offset
-	double *x0d; d_zeros(&x0d, nx_, 1); // initial state
-
-	double Ts = 0.5; // sampling time
-	d_mass_spring_system(Ts, nx_, nu_, N, Ad, Bd, bd, x0d);
-
-	float *A; s_zeros(&A, nx_, nx_); for(ii=0; ii<nx_*nx_; ii++) A[ii] = (float) Ad[ii];
-	float *B; s_zeros(&B, nx_, nu_); for(ii=0; ii<nx_*nu_; ii++) B[ii] = (float) Bd[ii];
-	float *b; s_zeros(&b, nx_, 1); for(ii=0; ii<nx_; ii++) b[ii] = (float) bd[ii];
-	float *x0; s_zeros(&x0, nx_, 1); for(ii=0; ii<nx_; ii++) x0[ii] = (float) x0d[ii];
-	
-	for(ii=0; ii<nx_; ii++)
-		b[ii] = 0.1;
-	
-	for(ii=0; ii<nx_; ii++)
-		x0[ii] = 0;
-	x0[0] = 2.5;
-	x0[1] = 2.5;
-
-	s_print_mat(nx_, nx_, A, nx_);
-	s_print_mat(nx_, nu_, B, nx_);
-	s_print_mat(1, nx_, b, 1);
-	s_print_mat(1, nx_, x0, 1);
-
-/************************************************
-* cost function
-************************************************/	
-
-	float *R; s_zeros(&R, nu_, nu_);
-	for(ii=0; ii<nu_; ii++) R[ii*(nu_+1)] = 2.0;
-
-	float *S; s_zeros(&S, nu_, nx_);
-
-	float *Q; s_zeros(&Q, nx_, nx_);
-	for(ii=0; ii<nx_; ii++) Q[ii*(nx_+1)] = 1.0;
-
-	float *r; s_zeros(&r, nu_, 1);
-	for(ii=0; ii<nu_; ii++) r[ii] = 0.2;
-
-	float *q; s_zeros(&q, nx_, 1);
-	for(ii=0; ii<nx_; ii++) q[ii] = 0.1;
-
-	s_print_mat(nu_, nu_, R, nu_);
-	s_print_mat(nu_, nx_, S, nu_);
-	s_print_mat(nx_, nx_, Q, nx_);
-	s_print_mat(1, nu_, r, 1);
-	s_print_mat(1, nx_, q, 1);
-
-/************************************************
-* matrices as strmat
-************************************************/	
-
-	struct s_strmat sA;
-	s_allocate_strmat(nx_, nx_, &sA);
-	s_cvt_mat2strmat(nx_, nx_, A, nx_, &sA, 0, 0);
-	struct s_strvec sb;
-	s_allocate_strvec(nx_, &sb);
-	s_cvt_vec2strvec(nx_, b, &sb, 0);
-	struct s_strvec sx0;
-	s_allocate_strvec(nx_, &sx0);
-	s_cvt_vec2strvec(nx_, x0, &sx0, 0);
-	struct s_strvec sb0;
-	s_allocate_strvec(nx_, &sb0);
-	float *b0; d_zeros(&b0, nx_, 1); // states offset
-	sgemv_n_libstr(nx_, nx_, 1.0, &sA, 0, 0, &sx0, 0, 1.0, &sb, 0, &sb0, 0);
-	s_print_tran_strvec(nx_, &sb0, 0);
-
-	struct s_strmat sBbt0;
-	s_allocate_strmat(nu_+nx_+1, nx_, &sBbt0);
-	s_cvt_tran_mat2strmat(nx_, nx_, B, nx_, &sBbt0, 0, 0);
-	srowin_libstr(nx_, 1.0, &sb0, 0, &sBbt0, nu_, 0);
-	s_print_strmat(nu_+1, nx_, &sBbt0, 0, 0);
-
-	struct s_strmat sBAbt1;
-	s_allocate_strmat(nu_+nx_+1, nx_, &sBAbt1);
-	s_cvt_tran_mat2strmat(nx_, nu_, B, nx_, &sBAbt1, 0, 0);
-	s_cvt_tran_mat2strmat(nx_, nx_, A, nx_, &sBAbt1, nu_, 0);
-	s_cvt_tran_mat2strmat(nx_, 1, b, nx_, &sBAbt1, nu_+nx_, 0);
-	s_print_strmat(nu_+nx_+1, nx_, &sBAbt1, 0, 0);
-
-	struct s_strvec sr0; // XXX no need to update r0 since S=0
-	s_allocate_strvec(nu_, &sr0);
-	s_cvt_vec2strvec(nu_, r, &sr0, 0);
-
-	struct s_strmat sRr0;
-	s_allocate_strmat(nu_+1, nu_, &sRr0);
-	s_cvt_mat2strmat(nu_, nu_, R, nu_, &sRr0, 0, 0);
-	srowin_libstr(nu_, 1.0, &sr0, 0, &sRr0, nu_, 0);
-	s_print_strmat(nu_+1, nu_, &sRr0, 0, 0);
-
-	struct s_strvec srq1;
-	s_allocate_strvec(nu_+nx_, &srq1);
-	s_cvt_vec2strvec(nu_, r, &srq1, 0);
-	s_cvt_vec2strvec(nx_, q, &srq1, nu_);
-
-	struct s_strmat sRSQrq1;
-	s_allocate_strmat(nu_+nx_+1, nu_+nx_, &sRSQrq1);
-	s_cvt_mat2strmat(nu_, nu_, R, nu_, &sRSQrq1, 0, 0);
-	s_cvt_tran_mat2strmat(nu_, nx_, S, nu_, &sRSQrq1, nu_, 0);
-	s_cvt_mat2strmat(nx_, nx_, Q, nx_, &sRSQrq1, nu_, nu_);
-	srowin_libstr(nu_+nx_, 1.0, &srq1, 0, &sRSQrq1, nu_+nx_, 0);
-	s_print_strmat(nu_+nx_+1, nu_+nx_, &sRSQrq1, 0, 0);
-
-	struct s_strvec sqN;
-	s_allocate_strvec(nx_, &sqN);
-	s_cvt_vec2strvec(nx_, q, &sqN, 0);
-
-	struct s_strmat sQqN;
-	s_allocate_strmat(nx_+1, nx_, &sQqN);
-	s_cvt_mat2strmat(nx_, nx_, Q, nx_, &sQqN, 0, 0);
-	srowin_libstr(nx_, 1.0, &sqN, 0, &sQqN, nx_, 0);
-	s_print_strmat(nx_+1, nx_, &sQqN, 0, 0);
-
-/************************************************
-* array of matrices
-************************************************/	
-	
-	struct s_strmat hsBAbt[N];
-	struct s_strvec hsb[N];
-	struct s_strmat hsRSQrq[N+1];
-	struct s_strvec hsrq[N+1];
-	struct s_strmat hsL[N+1];
-	struct s_strvec hsPb[N];
-	struct s_strvec hsux[N+1];
-	struct s_strvec hspi[N];
-	struct s_strmat hswork_mat[1];
-	struct s_strvec hswork_vec[1];
-
-	hsBAbt[0] = sBbt0;
-	hsb[0] = sb0;
-	hsRSQrq[0] = sRr0;
-	hsrq[0] = sr0;
-	s_allocate_strmat(nu_+1, nu_, &hsL[0]);
-	s_allocate_strvec(nx_, &hsPb[0]);
-	s_allocate_strvec(nx_+nu_+1, &hsux[0]);
-	s_allocate_strvec(nx_, &hspi[0]);
-	for(ii=1; ii<N; ii++)
-		{
-		hsBAbt[ii] = sBAbt1;
-		hsb[ii] = sb;
-		hsRSQrq[ii] = sRSQrq1;
-		hsrq[ii] = srq1;
-		s_allocate_strmat(nu_+nx_+1, nu_+nx_, &hsL[ii]);
-		s_allocate_strvec(nx_, &hsPb[ii]);
-		s_allocate_strvec(nx_+nu_+1, &hsux[ii]);
-		s_allocate_strvec(nx_, &hspi[ii]);
-		}
-	hsRSQrq[N] = sQqN;
-	hsrq[N] = sqN;
-	s_allocate_strmat(nx_+1, nx_, &hsL[N]);
-	s_allocate_strvec(nx_+nu_+1, &hsux[N]);
-	s_allocate_strmat(nu_+nx_+1, nx_, &hswork_mat[0]);
-	s_allocate_strvec(nx_, &hswork_vec[0]);
-
-//	for(ii=0; ii<N; ii++)
-//		d_print_strmat(nu[ii]+nx[ii]+1, nx[ii+1], &hsBAbt[ii], 0, 0);
-//	return 0;
-
-/************************************************
-* call Riccati solver
-************************************************/	
-	
-	// timing 
-	struct timeval tv0, tv1, tv2, tv3;
-	int nrep = 1000;
-	int rep;
-
-	gettimeofday(&tv0, NULL); // time
-
-	for(rep=0; rep<nrep; rep++)
-		{
-		s_back_ric_sv_libstr(N, nx, nu, hsBAbt, hsRSQrq, hsL, hsux, hspi, hswork_mat, hswork_vec);
-		}
-
-	gettimeofday(&tv1, NULL); // time
-
-	for(rep=0; rep<nrep; rep++)
-		{
-		s_back_ric_trf_libstr(N, nx, nu, hsBAbt, hsRSQrq, hsL, hswork_mat);
-		}
-
-	gettimeofday(&tv2, NULL); // time
-
-	for(rep=0; rep<nrep; rep++)
-		{
-		s_back_ric_trs_libstr(N, nx, nu, hsBAbt, hsb, hsrq, hsL, hsPb, hsux, hspi, hswork_vec);
-		}
-
-	gettimeofday(&tv3, NULL); // time
-
-	float time_sv  = (float) (tv1.tv_sec-tv0.tv_sec)/(nrep+0.0)+(tv1.tv_usec-tv0.tv_usec)/(nrep*1e6);
-	float time_trf = (float) (tv2.tv_sec-tv1.tv_sec)/(nrep+0.0)+(tv2.tv_usec-tv1.tv_usec)/(nrep*1e6);
-	float time_trs = (float) (tv3.tv_sec-tv2.tv_sec)/(nrep+0.0)+(tv3.tv_usec-tv2.tv_usec)/(nrep*1e6);
-
-	// print sol
-	printf("\nux = \n\n");
-	for(ii=0; ii<=N; ii++)
-		s_print_tran_strvec(nu[ii]+nx[ii], &hsux[ii], 0);
-
-	printf("\npi = \n\n");
-	for(ii=0; ii<N; ii++)
-		s_print_tran_strvec(nx[ii+1], &hspi[ii], 0);
-
-//	printf("\nL = \n\n");
-//	for(ii=0; ii<=N; ii++)
-//		s_print_strmat(nu[ii]+nx[ii]+1, nu[ii]+nx[ii], &hsL[ii], 0, 0);
-
-	printf("\ntime sv\t\ttime trf\t\ttime trs\n");
-	printf("\n%e\t%e\t%e\n", time_sv, time_trf, time_trs);
-	printf("\n");
-
-/************************************************
-* free memory
-************************************************/	
-
-	d_free(Ad);
-	d_free(Bd);
-	d_free(bd);
-	d_free(x0d);
-	s_free(A);
-	s_free(B);
-	s_free(b);
-	s_free(x0);
-	s_free(R);
-	s_free(S);
-	s_free(Q);
-	s_free(r);
-	s_free(q);
-	s_free(b0);
-	s_free_strmat(&sA);
-	s_free_strvec(&sb);
-	s_free_strmat(&sBbt0);
-	s_free_strvec(&sb0);
-	s_free_strmat(&sBAbt1);
-	s_free_strmat(&sRr0);
-	s_free_strvec(&sr0);
-	s_free_strmat(&sRSQrq1);
-	s_free_strvec(&srq1);
-	s_free_strmat(&sQqN);
-	s_free_strvec(&sqN);
-	s_free_strmat(&hsL[0]);
-	s_free_strvec(&hsPb[0]);
-	s_free_strvec(&hsux[0]);
-	s_free_strvec(&hspi[0]);
-	for(ii=1; ii<N; ii++)
-		{
-		s_free_strmat(&hsL[ii]);
-		s_free_strvec(&hsPb[ii]);
-		s_free_strvec(&hsux[ii]);
-		s_free_strvec(&hspi[ii]);
-		}
-	s_free_strmat(&hsL[N]);
-	s_free_strvec(&hsux[N]);
-	s_free_strmat(&hswork_mat[0]);
-	s_free_strvec(&hswork_vec[0]);
-
-
-/************************************************
-* return
-************************************************/	
-
-	return 0;
-
-	}
-
-
-
-
diff --git a/third_party/blasfeo/examples/example_tree_riccati_recursion.c b/third_party/blasfeo/examples/example_tree_riccati_recursion.c
deleted file mode 100644
index b61d2d3..0000000
--- a/third_party/blasfeo/examples/example_tree_riccati_recursion.c
+++ /dev/null
@@ -1,638 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/time.h>
-
-#include "tools.h"
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_i_aux.h"
-#include "../include/blasfeo_d_aux.h"
-#include "../include/blasfeo_d_kernel.h"
-#include "../include/blasfeo_d_blas.h"
-
-
-
-void d_back_ric_sv_libstr(int N, int *nx, int *nu, struct d_strmat *hsBAbt, struct d_strmat *hsRSQrq, struct d_strmat *hsL, struct d_strmat *hsLxt, struct d_strvec *hsux, struct d_strvec *hspi, struct d_strmat *hswork_mat, struct d_strvec *hswork_vec)
-	{
-
-	int nn;
-
-	// factorization and backward substitution
-
-	// last stage
-	dpotrf_l_libstr(nx[N]+1, nx[N], &hsRSQrq[N], 0, 0, &hsL[N], 0, 0);
-	dtrtr_l_libstr(nx[N], &hsL[N], 0, 0, &hsLxt[N], 0, 0);
-
-	// middle stages
-	for(nn=0; nn<N; nn++)
-		{
-		dtrmm_rutn_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nx[N-nn], 1.0, &hsBAbt[N-nn-1], 0, 0, &hsLxt[N-nn], 0, 0, 0.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0);
-		dgead_libstr(1, nx[N-nn], 1.0, &hsL[N-nn], nu[N-nn]+nx[N-nn], nu[N-nn], &hswork_mat[0], nu[N-nn-1]+nx[N-nn-1], 0);
-#if 1
-		dsyrk_dpotrf_ln_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nu[N-nn-1]+nx[N-nn-1], nx[N-nn], &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, &hsRSQrq[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-#else
-		dsyrk_ln_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, 1.0, &hsRSQrq[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-		dpotrf_l_libstr(nu[N-nn-1]+nx[N-nn-1]+1, nu[N-nn-1]+nx[N-nn-1], &hsL[N-nn-1], 0, 0, &hsL[N-nn-1], 0, 0);
-#endif
-		dtrtr_l_libstr(nx[N-nn-1], &hsL[N-nn-1], nu[N-nn-1], nu[N-nn-1], &hsLxt[N-nn-1], 0, 0);
-		}
-	
-	// forward substitution
-
-	// first stage
-	nn = 0;
-	drowex_libstr(nu[nn]+nx[nn], -1.0, &hsL[nn], nu[nn]+nx[nn], 0, &hsux[nn], 0);
-	dtrsv_ltn_libstr(nu[nn]+nx[nn], nu[nn]+nx[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-	drowex_libstr(nx[nn+1], 1.0, &hsBAbt[nn], nu[nn]+nx[nn], 0, &hsux[nn+1], nu[nn+1]);
-	dgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsux[nn+1], nu[nn+1], &hsux[nn+1], nu[nn+1]);
-	dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-	drowex_libstr(nx[nn+1], 1.0, &hsL[nn+1], nu[nn+1]+nx[nn+1], nu[nn+1], &hswork_vec[0], 0);
-	dtrmv_unn_libstr(nx[nn+1], &hsLxt[nn+1], 0, 0, &hspi[nn], 0, &hspi[nn], 0);
-	daxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-	dtrmv_utn_libstr(nx[nn+1], &hsLxt[nn+1], 0, 0, &hspi[nn], 0, &hspi[nn], 0);
-
-	// middle stages
-	for(nn=1; nn<N; nn++)
-		{
-		drowex_libstr(nu[nn], -1.0, &hsL[nn], nu[nn]+nx[nn], 0, &hsux[nn], 0);
-		dtrsv_ltn_libstr(nu[nn]+nx[nn], nu[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-		drowex_libstr(nx[nn+1], 1.0, &hsBAbt[nn], nu[nn]+nx[nn], 0, &hsux[nn+1], nu[nn+1]);
-		dgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsux[nn+1], nu[nn+1], &hsux[nn+1], nu[nn+1]);
-		dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-		drowex_libstr(nx[nn+1], 1.0, &hsL[nn+1], nu[nn+1]+nx[nn+1], nu[nn+1], &hswork_vec[0], 0);
-		dtrmv_unn_libstr(nx[nn+1], &hsLxt[nn+1], 0, 0, &hspi[nn], 0, &hspi[nn], 0);
-		daxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-		dtrmv_utn_libstr(nx[nn+1], &hsLxt[nn+1], 0, 0, &hspi[nn], 0, &hspi[nn], 0);
-		}
-
-	return;
-
-	}
-
-
-
-void d_back_ric_trf_funnel1_libstr(int md, int *nx, int *nu, struct d_strmat *hsBAbt, struct d_strmat *hsRSQrq, struct d_strmat *hsL, struct d_strmat *hsLxt_old, struct d_strmat *hsLxt_new, struct d_strmat *hswork_mat)
-	{
-
-	int ii;
-
-	ii = 0;
-	dtrmm_rutn_libstr(nu[0]+nx[0], nx[1], 1.0, &hsBAbt[ii], 0, 0, &hsLxt_old[ii], 0, 0, 0.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0);
-	dsyrk_ln_libstr(nu[0]+nx[0], nu[0]+nx[0], nx[1], 1.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, 1.0, &hsRSQrq[0], 0, 0, &hsL[0], 0, 0);
-	for(ii=1; ii<md; ii++)
-		{
-		dtrmm_rutn_libstr(nu[0]+nx[0], nx[1], 1.0, &hsBAbt[ii], 0, 0, &hsLxt_old[ii], 0, 0, 0.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0);
-		dsyrk_ln_libstr(nu[0]+nx[0], nu[0]+nx[0], nx[1], 1.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, 1.0, &hsL[0], 0, 0, &hsL[0], 0, 0);
-		}
-
-	dpotrf_l_libstr(nu[0]+nx[0], nu[0]+nx[0], &hsL[0], 0, 0, &hsL[0], 0, 0);
-	dtrtr_l_libstr(nx[0], &hsL[0], nu[0], nu[0], &hsLxt_new[0], 0, 0);
-
-	return;
-
-	}
-
-
-
-void d_back_ric_trf_step1_libstr(int *nx, int *nu, struct d_strmat *hsBAbt, struct d_strmat *hsRSQrq, struct d_strmat *hsL, struct d_strmat *hsLxt, struct d_strmat *hswork_mat)
-	{
-
-	dtrmm_rutn_libstr(nu[0]+nx[0], nx[1], 1.0, &hsBAbt[0], 0, 0, &hsLxt[1], 0, 0, 0.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0);
-	dsyrk_ln_libstr(nu[0]+nx[0], nu[0]+nx[0], nx[1], 1.0, &hswork_mat[0], 0, 0, &hswork_mat[0], 0, 0, 1.0, &hsRSQrq[0], 0, 0, &hsL[0], 0, 0);
-	dpotrf_l_libstr(nu[0]+nx[0], nu[0]+nx[0], &hsL[0], 0, 0, &hsL[0], 0, 0);
-	dtrtr_l_libstr(nx[0], &hsL[0], nu[0], nu[0], &hsLxt[0], 0, 0);
-
-	return;
-
-	}
-
-
-
-void d_back_ric_trf_stepN_libstr(int *nx, struct d_strmat *hsRSQrq, struct d_strmat *hsL, struct d_strmat *hsLxt)
-	{
-
-	dpotrf_l_libstr(nx[0], nx[0], &hsRSQrq[0], 0, 0, &hsL[0], 0, 0);
-	dtrtr_l_libstr(nx[0], &hsL[0], 0, 0, &hsLxt[0], 0, 0);
-
-	return;
-
-	}
-
-
-
-void d_back_ric_trf_libstr(int N, int *nx, int *nu, struct d_strmat *hsBAbt, struct d_strmat *hsRSQrq, struct d_strmat *hsL, struct d_strmat *hsLxt, struct d_strmat *hswork_mat)
-	{
-
-	int nn;
-
-	// factorization
-
-	// last stage
-	d_back_ric_trf_stepN_libstr(&nx[N], &hsRSQrq[N], &hsL[N], &hsLxt[N]);
-
-	// middle stages
-	for(nn=0; nn<N; nn++)
-		{
-		d_back_ric_trf_step1_libstr(&nx[N-nn-1], &nu[N-nn-1], &hsBAbt[N-nn-1], &hsRSQrq[N-nn-1], &hsL[N-nn-1], &hsLxt[N-nn-1], hswork_mat);
-		}
-	
-	return;
-
-	}
-
-
-
-void d_back_ric_trs_libstr(int N, int *nx, int *nu, struct d_strmat *hsBAbt, struct d_strvec *hsb, struct d_strvec *hsrq, struct d_strmat *hsL, struct d_strmat *hsLxt, struct d_strvec *hsPb, struct d_strvec *hsux, struct d_strvec *hspi, struct d_strvec *hswork_vec)
-	{
-
-	int nn;
-
-	// backward substitution
-
-	// last stage
-	dveccp_libstr(nu[N]+nx[N], 1.0, &hsrq[N], 0, &hsux[N], 0);
-
-	// middle stages
-	for(nn=0; nn<N-1; nn++)
-		{
-		// compute Pb
-		dtrmv_unn_libstr(nx[N-nn], &hsLxt[N-nn], 0, 0, &hsb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-		dtrmv_utn_libstr(nx[N-nn], &hsLxt[N-nn], 0, 0, &hsPb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-		dveccp_libstr(nu[N-nn-1]+nx[N-nn-1], 1.0, &hsrq[N-nn-1], 0, &hsux[N-nn-1], 0);
-		dveccp_libstr(nx[N-nn], 1.0, &hsPb[N-nn-1], 0, &hswork_vec[0], 0);
-		daxpy_libstr(nx[N-nn], 1.0, &hsux[N-nn], nu[N-nn], &hswork_vec[0], 0);
-		dgemv_n_libstr(nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hsBAbt[N-nn-1], 0, 0, &hswork_vec[0], 0, 1.0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-		dtrsv_lnn_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1], &hsL[N-nn-1], 0, 0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-		}
-
-	// first stage
-	nn = N-1;
-	dtrmv_unn_libstr(nx[N-nn], &hsLxt[N-nn], 0, 0, &hsb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-	dtrmv_utn_libstr(nx[N-nn], &hsLxt[N-nn], 0, 0, &hsPb[N-nn-1], 0, &hsPb[N-nn-1], 0);
-	dveccp_libstr(nu[N-nn-1]+nx[N-nn-1], 1.0, &hsrq[N-nn-1], 0, &hsux[N-nn-1], 0);
-	dveccp_libstr(nx[N-nn], 1.0, &hsPb[N-nn-1], 0, &hswork_vec[0], 0);
-	daxpy_libstr(nx[N-nn], 1.0, &hsux[N-nn], nu[N-nn], &hswork_vec[0], 0);
-	dgemv_n_libstr(nu[N-nn-1]+nx[N-nn-1], nx[N-nn], 1.0, &hsBAbt[N-nn-1], 0, 0, &hswork_vec[0], 0, 1.0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-	dtrsv_lnn_libstr(nu[N-nn-1]+nx[N-nn-1], nu[N-nn-1]+nx[N-nn-1], &hsL[N-nn-1], 0, 0, &hsux[N-nn-1], 0, &hsux[N-nn-1], 0);
-
-	// forward substitution
-
-	// first stage
-	nn = 0;
-	dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-	dveccp_libstr(nu[nn]+nx[nn], -1.0, &hsux[nn], 0, &hsux[nn], 0);
-	dtrsv_ltn_libstr(nu[nn]+nx[nn], nu[nn]+nx[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-	dgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsb[nn], 0, &hsux[nn+1], nu[nn+1]);
-	dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hswork_vec[0], 0);
-	dtrmv_unn_libstr(nx[nn+1], &hsLxt[nn+1], 0, 0, &hswork_vec[0], 0, &hswork_vec[0], 0);
-	dtrmv_utn_libstr(nx[nn+1], &hsLxt[nn+1], 0, 0, &hswork_vec[0], 0, &hswork_vec[0], 0);
-	daxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-
-	// middle stages
-	for(nn=1; nn<N; nn++)
-		{
-		dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hspi[nn], 0);
-		dveccp_libstr(nu[nn], -1.0, &hsux[nn], 0, &hsux[nn], 0);
-		dtrsv_ltn_libstr(nu[nn]+nx[nn], nu[nn], &hsL[nn], 0, 0, &hsux[nn], 0, &hsux[nn], 0);
-		dgemv_t_libstr(nu[nn]+nx[nn], nx[nn+1], 1.0, &hsBAbt[nn], 0, 0, &hsux[nn], 0, 1.0, &hsb[nn], 0, &hsux[nn+1], nu[nn+1]);
-		dveccp_libstr(nx[nn+1], 1.0, &hsux[nn+1], nu[nn+1], &hswork_vec[0], 0);
-		dtrmv_unn_libstr(nx[nn+1], &hsLxt[nn+1], 0, 0, &hswork_vec[0], 0, &hswork_vec[0], 0);
-		dtrmv_utn_libstr(nx[nn+1], &hsLxt[nn+1], 0, 0, &hswork_vec[0], 0, &hswork_vec[0], 0);
-		daxpy_libstr(nx[nn+1], 1.0, &hswork_vec[0], 0, &hspi[nn], 0);
-		}
-
-	return;
-
-	}
-
-
-
-/************************************************ 
-Mass-spring system: nx/2 masses connected each other with springs (in a row), and the first and the last one to walls. nu (<=nx) controls act on the first nu masses. The system is sampled with sampling time Ts. 
-************************************************/
-void mass_spring_system(double Ts, int nx, int nu, int N, double *A, double *B, double *b, double *x0)
-	{
-
-	int nx2 = nx*nx;
-
-	int info = 0;
-
-	int pp = nx/2; // number of masses
-	
-/************************************************
-* build the continuous time system 
-************************************************/
-	
-	double *T; d_zeros(&T, pp, pp);
-	int ii;
-	for(ii=0; ii<pp; ii++) T[ii*(pp+1)] = -2;
-	for(ii=0; ii<pp-1; ii++) T[ii*(pp+1)+1] = 1;
-	for(ii=1; ii<pp; ii++) T[ii*(pp+1)-1] = 1;
-
-	double *Z; d_zeros(&Z, pp, pp);
-	double *I; d_zeros(&I, pp, pp); for(ii=0; ii<pp; ii++) I[ii*(pp+1)]=1.0; // = eye(pp);
-	double *Ac; d_zeros(&Ac, nx, nx);
-	dmcopy(pp, pp, Z, pp, Ac, nx);
-	dmcopy(pp, pp, T, pp, Ac+pp, nx);
-	dmcopy(pp, pp, I, pp, Ac+pp*nx, nx);
-	dmcopy(pp, pp, Z, pp, Ac+pp*(nx+1), nx); 
-	free(T);
-	free(Z);
-	free(I);
-	
-	d_zeros(&I, nu, nu); for(ii=0; ii<nu; ii++) I[ii*(nu+1)]=1.0; //I = eye(nu);
-	double *Bc; d_zeros(&Bc, nx, nu);
-	dmcopy(nu, nu, I, nu, Bc+pp, nx);
-	free(I);
-	
-/************************************************
-* compute the discrete time system 
-************************************************/
-
-	double *bb; d_zeros(&bb, nx, 1);
-	dmcopy(nx, 1, bb, nx, b, nx);
-		
-	dmcopy(nx, nx, Ac, nx, A, nx);
-	dscal_3l(nx2, Ts, A);
-	expm(nx, A);
-	
-	d_zeros(&T, nx, nx);
-	d_zeros(&I, nx, nx); for(ii=0; ii<nx; ii++) I[ii*(nx+1)]=1.0; //I = eye(nx);
-	dmcopy(nx, nx, A, nx, T, nx);
-	daxpy_3l(nx2, -1.0, I, T);
-	dgemm_nn_3l(nx, nu, nx, T, nx, Bc, nx, B, nx);
-	free(T);
-	free(I);
-	
-	int *ipiv = (int *) malloc(nx*sizeof(int));
-	dgesv_3l(nx, nu, Ac, nx, ipiv, B, nx, &info);
-	free(ipiv);
-
-	free(Ac);
-	free(Bc);
-	free(bb);
-	
-			
-/************************************************
-* initial state 
-************************************************/
-	
-	if(nx==4)
-		{
-		x0[0] = 5;
-		x0[1] = 10;
-		x0[2] = 15;
-		x0[3] = 20;
-		}
-	else
-		{
-		int jj;
-		for(jj=0; jj<nx; jj++)
-			x0[jj] = 1;
-		}
-
-	}
-
-
-
-int main()
-	{
-
-	printf("\nExample of LU factorization and backsolve\n\n");
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-	printf("\nLA provided by BLASFEO\n\n");
-
-#elif defined(LA_BLAS)
-
-	printf("\nLA provided by BLAS\n\n");
-
-#else
-
-	printf("\nLA provided by ???\n\n");
-	exit(2);
-
-#endif
-
-	// loop index
-	int ii;
-
-/************************************************
-* problem size
-************************************************/	
-
-	// problem size
-	int N = 4;
-	int nx_ = 8;
-	int nu_ = 3;
-
-	// stage-wise variant size
-	int nx[N+1];
-	nx[0] = 0;
-	for(ii=1; ii<=N; ii++)
-		nx[ii] = nx_;
-	nx[N] = nx_;
-
-	int nu[N+1];
-	for(ii=0; ii<N; ii++)
-		nu[ii] = nu_;
-	nu[N] = 0;
-
-/************************************************
-* dynamical system
-************************************************/	
-
-	double *A; d_zeros(&A, nx_, nx_); // states update matrix
-
-	double *B; d_zeros(&B, nx_, nu_); // inputs matrix
-
-	double *b; d_zeros(&b, nx_, 1); // states offset
-	double *x0; d_zeros_align(&x0, nx_, 1); // initial state
-
-	double Ts = 0.5; // sampling time
-	mass_spring_system(Ts, nx_, nu_, N, A, B, b, x0);
-	
-	for(ii=0; ii<nx_; ii++)
-		b[ii] = 0.1;
-	
-	for(ii=0; ii<nx_; ii++)
-		x0[ii] = 0;
-	x0[0] = 2.5;
-	x0[1] = 2.5;
-
-	d_print_mat(nx_, nx_, A, nx_);
-	d_print_mat(nx_, nu_, B, nx_);
-	d_print_mat(1, nx_, b, 1);
-	d_print_mat(1, nx_, x0, 1);
-
-/************************************************
-* cost function
-************************************************/	
-
-	double *R; d_zeros(&R, nu_, nu_);
-	for(ii=0; ii<nu_; ii++) R[ii*(nu_+1)] = 2.0;
-
-	double *S; d_zeros(&S, nu_, nx_);
-
-	double *Q; d_zeros(&Q, nx_, nx_);
-	for(ii=0; ii<nx_; ii++) Q[ii*(nx_+1)] = 1.0;
-
-	double *r; d_zeros(&r, nu_, 1);
-	for(ii=0; ii<nu_; ii++) r[ii] = 0.2;
-
-	double *q; d_zeros(&q, nx_, 1);
-	for(ii=0; ii<nx_; ii++) q[ii] = 0.1;
-
-	d_print_mat(nu_, nu_, R, nu_);
-	d_print_mat(nu_, nx_, S, nu_);
-	d_print_mat(nx_, nx_, Q, nx_);
-	d_print_mat(1, nu_, r, 1);
-	d_print_mat(1, nx_, q, 1);
-
-/************************************************
-* matrices as strmat
-************************************************/	
-
-	struct d_strmat sA;
-	d_allocate_strmat(nx_, nx_, &sA);
-	d_cvt_mat2strmat(nx_, nx_, A, nx_, &sA, 0, 0);
-	struct d_strvec sb;
-	d_allocate_strvec(nx_, &sb);
-	d_cvt_vec2strvec(nx_, b, &sb, 0);
-	struct d_strvec sx0;
-	d_allocate_strvec(nx_, &sx0);
-	d_cvt_vec2strvec(nx_, x0, &sx0, 0);
-	struct d_strvec sb0;
-	d_allocate_strvec(nx_, &sb0);
-	double *b0; d_zeros(&b0, nx_, 1); // states offset
-	dgemv_n_libstr(nx_, nx_, 1.0, &sA, 0, 0, &sx0, 0, 1.0, &sb, 0, &sb0, 0);
-	d_print_tran_strvec(nx_, &sb0, 0);
-
-	struct d_strmat sBbt0;
-	d_allocate_strmat(nu_+nx_+1, nx_, &sBbt0);
-	d_cvt_tran_mat2strmat(nx_, nx_, B, nx_, &sBbt0, 0, 0);
-	drowin_libstr(nx_, 1.0, &sb0, 0, &sBbt0, nu_, 0);
-	d_print_strmat(nu_+1, nx_, &sBbt0, 0, 0);
-
-	struct d_strmat sBAbt1;
-	d_allocate_strmat(nu_+nx_+1, nx_, &sBAbt1);
-	d_cvt_tran_mat2strmat(nx_, nu_, B, nx_, &sBAbt1, 0, 0);
-	d_cvt_tran_mat2strmat(nx_, nx_, A, nx_, &sBAbt1, nu_, 0);
-	d_cvt_tran_mat2strmat(nx_, 1, b, nx_, &sBAbt1, nu_+nx_, 0);
-	d_print_strmat(nu_+nx_+1, nx_, &sBAbt1, 0, 0);
-
-	struct d_strvec sr0; // XXX no need to update r0 since S=0
-	d_allocate_strvec(nu_, &sr0);
-	d_cvt_vec2strvec(nu_, r, &sr0, 0);
-
-	struct d_strmat sRr0;
-	d_allocate_strmat(nu_+1, nu_, &sRr0);
-	d_cvt_mat2strmat(nu_, nu_, R, nu_, &sRr0, 0, 0);
-	drowin_libstr(nu_, 1.0, &sr0, 0, &sRr0, nu_, 0);
-	d_print_strmat(nu_+1, nu_, &sRr0, 0, 0);
-
-	struct d_strvec srq1;
-	d_allocate_strvec(nu_+nx_, &srq1);
-	d_cvt_vec2strvec(nu_, r, &srq1, 0);
-	d_cvt_vec2strvec(nx_, q, &srq1, nu_);
-
-	struct d_strmat sRSQrq1;
-	d_allocate_strmat(nu_+nx_+1, nu_+nx_, &sRSQrq1);
-	d_cvt_mat2strmat(nu_, nu_, R, nu_, &sRSQrq1, 0, 0);
-	d_cvt_tran_mat2strmat(nu_, nx_, S, nu_, &sRSQrq1, nu_, 0);
-	d_cvt_mat2strmat(nx_, nx_, Q, nx_, &sRSQrq1, nu_, nu_);
-	drowin_libstr(nu_+nx_, 1.0, &srq1, 0, &sRSQrq1, nu_+nx_, 0);
-	d_print_strmat(nu_+nx_+1, nu_+nx_, &sRSQrq1, 0, 0);
-
-	struct d_strvec sqN;
-	d_allocate_strvec(nx_, &sqN);
-	d_cvt_vec2strvec(nx_, q, &sqN, 0);
-
-	struct d_strmat sQqN;
-	d_allocate_strmat(nx_+1, nx_, &sQqN);
-	d_cvt_mat2strmat(nx_, nx_, Q, nx_, &sQqN, 0, 0);
-	drowin_libstr(nx_, 1.0, &sqN, 0, &sQqN, nx_, 0);
-	d_print_strmat(nx_+1, nx_, &sQqN, 0, 0);
-
-/************************************************
-* array of matrices
-************************************************/	
-	
-	struct d_strmat hsBAbt[N];
-	struct d_strvec hsb[N];
-	struct d_strmat hsRSQrq[N+1];
-	struct d_strvec hsrq[N+1];
-	struct d_strmat hsL[N+1];
-	struct d_strmat hsLxt[N+1];
-	struct d_strvec hsPb[N];
-	struct d_strvec hsux[N+1];
-	struct d_strvec hspi[N];
-	struct d_strmat hswork_mat[1];
-	struct d_strvec hswork_vec[1];
-
-	hsBAbt[0] = sBbt0;
-	hsb[0] = sb0;
-	hsRSQrq[0] = sRr0;
-	hsrq[0] = sr0;
-	d_allocate_strmat(nu_+1, nu_, &hsL[0]);
-//	d_allocate_strmat(nu_+1, nu_, &hsLxt[0]);
-	d_allocate_strvec(nx_, &hsPb[0]);
-	d_allocate_strvec(nx_+nu_+1, &hsux[0]);
-	d_allocate_strvec(nx_, &hspi[0]);
-	for(ii=1; ii<N; ii++)
-		{
-		hsBAbt[ii] = sBAbt1;
-		hsb[ii] = sb;
-		hsRSQrq[ii] = sRSQrq1;
-		hsrq[ii] = srq1;
-		d_allocate_strmat(nu_+nx_+1, nu_+nx_, &hsL[ii]);
-		d_allocate_strmat(nx_, nu_+nx_, &hsLxt[ii]);
-		d_allocate_strvec(nx_, &hsPb[ii]);
-		d_allocate_strvec(nx_+nu_+1, &hsux[ii]);
-		d_allocate_strvec(nx_, &hspi[ii]);
-		}
-	hsRSQrq[N] = sQqN;
-	hsrq[N] = sqN;
-	d_allocate_strmat(nx_+1, nx_, &hsL[N]);
-	d_allocate_strmat(nx_, nx_, &hsLxt[N]);
-	d_allocate_strvec(nx_+nu_+1, &hsux[N]);
-	d_allocate_strmat(nu_+nx_+1, nx_, &hswork_mat[0]);
-	d_allocate_strvec(nx_, &hswork_vec[0]);
-
-//	for(ii=0; ii<N; ii++)
-//		d_print_strmat(nu[ii]+nx[ii]+1, nx[ii+1], &hsBAbt[ii], 0, 0);
-//	return 0;
-
-/************************************************
-* call Riccati solver
-************************************************/	
-	
-	// timing 
-	struct timeval tv0, tv1, tv2, tv3;
-	int nrep = 1000;
-	int rep;
-
-	gettimeofday(&tv0, NULL); // time
-
-	for(rep=0; rep<nrep; rep++)
-		{
-//		d_back_ric_sv_libstr(N, nx, nu, hsBAbt, hsRSQrq, hsL, hsLxt, hsux, hspi, hswork_mat, hswork_vec);
-		}
-
-	gettimeofday(&tv1, NULL); // time
-
-	for(rep=0; rep<nrep; rep++)
-		{
-		d_back_ric_trf_libstr(N, nx, nu, hsBAbt, hsRSQrq, hsL, hsLxt, hswork_mat);
-		}
-
-	gettimeofday(&tv2, NULL); // time
-
-	for(rep=0; rep<nrep; rep++)
-		{
-		d_back_ric_trs_libstr(N, nx, nu, hsBAbt, hsb, hsrq, hsL, hsLxt, hsPb, hsux, hspi, hswork_vec);
-		}
-
-	gettimeofday(&tv3, NULL); // time
-
-	float time_sv  = (float) (tv1.tv_sec-tv0.tv_sec)/(nrep+0.0)+(tv1.tv_usec-tv0.tv_usec)/(nrep*1e6);
-	float time_trf = (float) (tv2.tv_sec-tv1.tv_sec)/(nrep+0.0)+(tv2.tv_usec-tv1.tv_usec)/(nrep*1e6);
-	float time_trs = (float) (tv3.tv_sec-tv2.tv_sec)/(nrep+0.0)+(tv3.tv_usec-tv2.tv_usec)/(nrep*1e6);
-
-	// print sol
-	printf("\nux = \n\n");
-	for(ii=0; ii<=N; ii++)
-		d_print_tran_strvec(nu[ii]+nx[ii], &hsux[ii], 0);
-
-	printf("\npi = \n\n");
-	for(ii=0; ii<N; ii++)
-		d_print_tran_strvec(nx[ii+1], &hspi[ii], 0);
-
-	printf("\ntime sv\t\ttime trf\t\ttime trs\n");
-	printf("\n%e\t%e\t%e\n", time_sv, time_trf, time_trs);
-	printf("\n");
-
-/************************************************
-* free memory
-************************************************/	
-
-	d_free(A);
-	d_free(B);
-	d_free(b);
-	d_free_align(x0);
-	d_free(R);
-	d_free(S);
-	d_free(Q);
-	d_free(r);
-	d_free(q);
-	d_free(b0);
-	d_free_strmat(&sA);
-	d_free_strvec(&sb);
-	d_free_strmat(&sBbt0);
-	d_free_strvec(&sb0);
-	d_free_strmat(&sBAbt1);
-	d_free_strmat(&sRr0);
-	d_free_strvec(&sr0);
-	d_free_strmat(&sRSQrq1);
-	d_free_strvec(&srq1);
-	d_free_strmat(&sQqN);
-	d_free_strvec(&sqN);
-	d_free_strmat(&hsL[0]);
-//	d_free_strmat(&hsLxt[0]);
-	d_free_strvec(&hsPb[0]);
-	d_free_strvec(&hsux[0]);
-	d_free_strvec(&hspi[0]);
-	for(ii=1; ii<N; ii++)
-		{
-		d_free_strmat(&hsL[ii]);
-		d_free_strmat(&hsLxt[ii]);
-		d_free_strvec(&hsPb[ii]);
-		d_free_strvec(&hsux[ii]);
-		d_free_strvec(&hspi[ii]);
-		}
-	d_free_strmat(&hsL[N]);
-	d_free_strmat(&hsLxt[N]);
-	d_free_strvec(&hsux[N]);
-	d_free_strmat(&hswork_mat[0]);
-	d_free_strvec(&hswork_vec[0]);
-
-
-/************************************************
-* return
-************************************************/	
-
-	return 0;
-
-	}
-
-
-
diff --git a/third_party/blasfeo/examples/tools.c b/third_party/blasfeo/examples/tools.c
deleted file mode 100644
index 51d9e95..0000000
--- a/third_party/blasfeo/examples/tools.c
+++ /dev/null
@@ -1,724 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of HPMPC.                                                                     *
-*                                                                                                 *
-* HPMPC -- Library for High-Performance implementation of solvers for MPC.                        *
-* Copyright (C) 2014-2015 by Technical University of Denmark. All rights reserved.                *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <math.h>
-
-//#include "../include/aux_d.h"
-
-//void dgemm_(char *transa, char *transb, int *m, int *n, int *k, double *alpha, double *A, int *lda, double *B, int *ldb, double *beta, double *C, int *ldc);
-//void dgesv_(int *n, int *nrhs, double *A, int *lda, int *ipiv, double *B, int *ldb, int *info);
-//void dcopy_(int *n, double *dx, int *incx, double *dy, int *incy);
-//void daxpy_(int *n, double *da, double *dx, int *incx, double *dy, int *incy);
-//void dscal_(int *n, double *da, double *dx, int *incx);
-
-int posix_memalign(void **memptr, size_t alignment, size_t size);
-
-
-
-/************************************************
- matrix-matrix multiplication
-************************************************/
-void dgemm_nn_3l(int m, int n, int k, double *A, int lda , double *B, int ldb, double *C, int ldc)
-	{
-	
-	int ii, jj, kk;
-	
-	for(jj=0; jj<n; jj++)
-		{
-		for(ii=0; ii<m; ii++)
-			{
-			C[ii+ldc*jj] = 0;
-			for(kk=0; kk<k; kk++)
-				{
-				C[ii+ldc*jj] += A[ii+lda*kk] * B[kk+ldb*jj];
-				}
-			}
-		}
-	
-	return;
-	
-	}
-
-
-void daxpy_3l(int n, double da, double *dx, double *dy)
-	{
-	int i;
-	for(i=0; i<n; i++)
-		{
-		dy[i] += da*dx[i];
-		}
-	}
-
-
-
-void dscal_3l(int n, double da, double *dx)
-	{
-	int i;
-	for(i=0; i<n; i++)
-		{
-		dx[i] *= da;
-		}
-	}
-
-
-
-/************************************************
- Routine that copies a matrix 
-************************************************/
-void dmcopy(int row, int col, double *A, int lda, double *B, int ldb)
-	{
-	int i, j;
-	for(j=0; j<col; j++)
-		{
-		for(i=0; i<row; i++)
-			{
-			B[i+j*ldb] = A[i+j*lda];
-			}
-		}
-	}
-
-
-
-int idamax_3l(int n, double *x)
-	{
-	
-	if(n<=0)
-		return 0;
-	if(n==1)
-		return 0;	
-
-	double dabs;
-	double dmax = (x[0]>0 ? x[0] : -x[0]);
-	int idmax = 0;
-	int jj;
-	for(jj=1; jj<n; jj++)
-		{
-		dabs = (x[jj]>0 ? x[jj] : -x[jj]);
-		if(dabs>dmax)
-			{
-			dmax = dabs;
-			idmax = jj;
-			}
-		}
-	
-	return idmax;
-
-	}
-
-
-
-void dswap_3l(int n, double *x, int incx, double *y, int incy)
-	{
-	
-	if(n<=0)
-		return;
-	
-	double temp;
-	int jj;
-	for(jj=0; jj<n; jj++)
-		{
-		temp = x[0];
-		x[0] = y[0];
-		y[0] = temp;
-		x += incx;
-		y += incy;
-		}
-	
-	}
-
-
-
-void dger_3l(int m, int n, double alpha, double *x, int incx, double *y, int incy, double *A, int lda)
-	{
-	
-	if(m==0 || n==0 || alpha==0.0)
-		return;
-	
-	int i, j;
-	double *px, *py, temp;
-	
-	py = y;
-	for(j=0; j<n; j++)
-		{
-		temp = alpha * py[0];
-		px = x;
-		for(i=0; i<m; i++)
-			{
-			A[i+lda*j] += px[0] * temp;
-			px += incx;
-			}
-		py += incy;
-		}
-	
-	return;
-	
-	}
-
-
-
-void dgetf2_3l(int m, int n, double *A, int lda, int *ipiv, int *info)
-	{
-	
-	if(m<=0 || n<=0)
-		return;
-	
-	int i, j, jp;
-	
-	double Ajj;
-	
-	int size_min = ( m<n ? m : n );
-	
-	for(j=0; j<size_min; j++)
-		// find the pivot and test for singularity
-		{
-		jp = j + idamax_3l(m-j, &A[j+lda*j]);
-		ipiv[j] = jp;
-		if( A[jp+lda*j]!=0)
-			{
-			// apply the interchange to columns 0:n-1
-			if(jp!=j)
-				{
-				dswap_3l(n, &A[j], lda, &A[jp], lda);
-				}
-			// compute elements j+1:m-1 of j-th column
-			if(j<m-1)
-				{
-				Ajj = A[j+lda*j];
-				if( ( Ajj>0 ? Ajj : -Ajj ) >= 2.22e-16 )
-					{
-					dscal_3l(m-j-1, 1.0/Ajj, &A[j+1+lda*j]);
-					}
-				else
-					{
-					for(i=j+1; i<m; i++)
-						{
-						A[i+lda*j] /= Ajj;
-						}
-					}
-				}
-			}
-		else if(*info==0)
-			{
-			*info = j+1;
-			}
-		
-		if( j < size_min )
-			{
-			// update trailing submatrix
-			dger_3l(m-j-1, n-j-1, -1.0, &A[j+1+lda*j], 1, &A[j+lda*(j+1)], lda, &A[j+1+lda*(j+1)], lda);
-			}
-		
-		}
-
-	return;	
-	
-	}
-
-
-
-void dlaswp_3l(int n, double *A, int lda, int k1, int k2, int *ipiv)
-	{
-	
-	int i, j, k, ix, ix0, i1, i2, n32, ip;
-	double temp;
-
-	ix0 = k1;
-	i1 = k1;
-	i2 = k2;
-	
-	n32 = (n/32)*32;
-	if(n32!=0)
-		{
-		for(j=0; j<n32; j+=32)
-			{
-			ix = ix0;
-			for(i=i1; i<i2; i++)
-				{
-				ip = ipiv[ix];
-				if(ip!=i)
-					{
-					for(k=j; k<j+32; k++)
-						{
-						temp = A[i+lda*k];
-						A[i+lda*k] = A[ip+lda*k];
-						A[ip+lda*k] = temp;
-						}
-					}
-				ix++;
-				}
-			}
-		}
-	if(n32!=n)
-		{
-		ix = ix0;
-		for(i=i1; i<i2; i++)
-			{
-			ip = ipiv[ix];
-			if(ip!=i)
-				{
-				for(k=n32; k<n; k++)
-					{
-					temp = A[i+lda*k];
-					A[i+lda*k] = A[ip+lda*k];
-					A[ip+lda*k] = temp;
-					}
-				}
-			ix++;
-			}
-		}
-
-	return;
-	
-	}
-
-
-
-// left lower no-transp unit
-void dtrsm_l_l_n_u_3l(int m, int n, double *A, int lda, double *B, int ldb)
-	{
-	
-	if(m==0 || n==0)
-		return;
-	
-	int i, j, k;
-	
-	for(j=0; j<n; j++)
-		{
-		for(k=0; k<m; k++)
-			{
-			for(i=k+1; i<m; i++)
-				{
-				B[i+ldb*j] -= B[k+ldb*j] * A[i+lda*k];
-				}
-			}
-		}
-	
-	return;
-	
-	}
-
-
-
-// left upper no-transp non-unit
-void dtrsm_l_u_n_n_3l(int m, int n, double *A, int lda, double *B, int ldb)
-	{
-	
-	if(m==0 || n==0)
-		return;
-	
-	int i, j, k;
-	
-	for(j=0; j<n; j++)
-		{
-		for(k=m-1; k>=0; k--)
-			{
-			B[k+ldb*j] /= A[k+lda*k];
-			for(i=0; i<k; i++)
-				{
-				B[i+ldb*j] -= B[k+ldb*j] * A[i+lda*k];
-				}
-			}
-		}
-
-	return;
-	
-	}
-
-
-
-void dgetrs_3l(int n, int nrhs, double *A, int lda, int *ipiv, double *B, int ldb, int *info)
-	{
-	
-	if(n==0 || nrhs==0)
-		return;
-	
-	// solve A * X = B
-
-	// apply row interchanges to the rhs
-	dlaswp_3l(nrhs, B, ldb, 0, n, ipiv);
-
-	// solve L*X = B, overwriting B with X
-	dtrsm_l_l_n_u_3l(n, nrhs, A, lda, B, ldb);
-
-	// solve U*X = B, overwriting B with X
-	dtrsm_l_u_n_n_3l(n, nrhs, A, lda, B, ldb);
-
-	return;
-	  	
-	}
-
-
-
-void dgesv_3l(int n, int nrhs, double *A, int lda, int *ipiv, double *B, int ldb, int *info)
-	{
-	
-	// compute the LU factorization of A
-	dgetf2_3l(n, n, A, lda, ipiv, info);
-	
-	if(*info==0)
-		{
-		// solve the system A*X = B, overwriting B with X
-		dgetrs_3l(n, nrhs, A, lda, ipiv, B, ldb, info);
-		}
-
-	return;
-	
-	}
-
-
-
-/* one norm of a matrix */
-double onenorm(int row, int col, double *ptrA)
-	{
-	double max, temp;
-	int i, j;
-	temp = 0;
-	for(j=0; j<col; j++)
-		{
-		temp = abs(*(ptrA+j*row));
-		for(i=1; i<row; i++)
-			{
-			temp += abs(*(ptrA+j*row+i));
-			}
-		if(j==0) max = temp;
-		else if(max>temp) temp = max;
-		}
-	return temp;
-	}
-
-
-
-/* computes the Pade approximation of degree m of the matrix A */
-void padeapprox(int m, int row, double *A)
-	{
-	int ii;
-	int row2 = row*row;
-/*	int i1 = 1;*/
-/*	double d0 = 0;*/
-/*	double d1 = 1;*/
-/*	double dm1 = -1;*/
-	
-	double *U = (double *) malloc(row*row*sizeof(double)); // d_zeros(&U, row, row); 
-	double *V = (double *) malloc(row*row*sizeof(double)); // d_zeros(&V, row, row);
-	
-	if(m==3)
-		{
-		double c[] = {120, 60, 12, 1};
-		double *A0 = (double *) malloc(row*row*sizeof(double)); // d_eye(&A0, row);
-		for(ii=0; ii<row*row; ii++)
-			A0[ii] = 0.0;
-		for(ii=0; ii<row; ii++)
-			A0[ii*(row+1)] = 1.0;
-		double *A2 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *temp = malloc(row*row*sizeof(double)); // d_zeros(&temp, row, row);
-//		char ta = 'n'; double alpha = 1; double beta = 0;
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, A, &row, &beta, A2, &row);
-		dgemm_nn_3l(row, row, row, A, row, A, row, A2, row);
-//		dscal_(&row2, &d0, temp, &i1);
-		dscal_3l(row2, 0, temp);
-//		daxpy_(&row2, &c[3], A2, &i1, temp, &i1);
-		daxpy_3l(row2, c[3], A2, temp);
-//		daxpy_(&row2, &c[1], A0, &i1, temp, &i1);
-		daxpy_3l(row2, c[1], A0, temp);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, temp, &row, &beta, U, &row);
-		dgemm_nn_3l(row, row, row, A, row, temp, row, U, row);
-//		dscal_(&row2, &d0, V, &i1);
-		dscal_3l(row2, 0, V);
-//		daxpy_(&row2, &c[2], A2, &i1, V, &i1);
-		daxpy_3l(row2, c[2], A2, V);
-//		daxpy_(&row2, &c[0], A0, &i1, V, &i1);
-		daxpy_3l(row2, c[0], A0, V);
-		free(A0);
-		free(A2);
-		free(temp);
-		}
-	else if(m==5)
-		{
-		double c[] = {30240, 15120, 3360, 420, 30, 1};
-		double *A0 = (double *) malloc(row*row*sizeof(double)); // d_eye(&A0, row);
-		for(ii=0; ii<row*row; ii++)
-			A0[ii] = 0.0;
-		for(ii=0; ii<row; ii++)
-			A0[ii*(row+1)] = 1.0;
-		double *A2 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *A4 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *temp = malloc(row*row*sizeof(double)); // d_zeros(&temp, row, row);
-//		char ta = 'n'; double alpha = 1; double beta = 0;
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, A, &row, &beta, A2, &row);
-		dgemm_nn_3l(row, row, row, A, row, A, row, A2, row);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A2, &row, A2, &row, &beta, A4, &row);
-		dgemm_nn_3l(row, row, row, A2, row, A2, row, A4, row);
-		dmcopy(row, row, A4, row, V, row);
-		dmcopy(row, row, A4, row, temp, row);
-//		daxpy_(&row2, &c[3], A2, &i1, temp, &i1);
-		daxpy_3l(row2, c[3], A2, temp);
-//		daxpy_(&row2, &c[1], A0, &i1, temp, &i1);
-		daxpy_3l(row2, c[1], A0, temp);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, temp, &row, &beta, U, &row);
-		dgemm_nn_3l(row, row, row, A, row, temp, row, U, row);
-//		dscal_(&row2, &c[4], V, &i1);
-		dscal_3l(row2, c[4], V);
-//		daxpy_(&row2, &c[2], A2, &i1, V, &i1);
-		daxpy_3l(row2, c[2], A2, V);
-//		daxpy_(&row2, &c[0], A0, &i1, V, &i1);
-		daxpy_3l(row2, c[0], A0, V);
-		free(A0);
-		free(A2);
-		free(A4);
-		free(temp);
-		}
-	else if(m==7)
-		{
-		double c[] = {17297280, 8648640, 1995840, 277200, 25200, 1512, 56, 1};
-		double *A0 = (double *) malloc(row*row*sizeof(double)); // d_eye(&A0, row);
-		for(ii=0; ii<row*row; ii++)
-			A0[ii] = 0.0;
-		for(ii=0; ii<row; ii++)
-			A0[ii*(row+1)] = 1.0;
-		double *A2 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *A4 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *A6 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *temp = malloc(row*row*sizeof(double)); // d_zeros(&temp, row, row);
-//		char ta = 'n'; double alpha = 1; double beta = 1;
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, A, &row, &beta, A2, &row);
-		dgemm_nn_3l(row, row, row, A, row, A, row, A2, row);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A2, &row, A2, &row, &beta, A4, &row);
-		dgemm_nn_3l(row, row, row, A2, row, A2, row, A4, row);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A4, &row, A2, &row, &beta, A6, &row);
-		dgemm_nn_3l(row, row, row, A4, row, A2, row, A6, row);
-//		dscal_(&row2, &d0, temp, &i1);
-		dscal_3l(row2, 0, temp);
-//		daxpy_(&row2, &c[3], A2, &i1, temp, &i1);
-		daxpy_3l(row2, c[3], A2, temp);
-//		daxpy_(&row2, &c[1], A0, &i1, temp, &i1);
-		daxpy_3l(row2, c[1], A0, temp);
-//		daxpy_(&row2, &c[5], A4, &i1, temp, &i1);
-		daxpy_3l(row2, c[5], A4, temp);
-//		daxpy_(&row2, &c[7], A6, &i1, temp, &i1);
-		daxpy_3l(row2, c[7], A6, temp);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, temp, &row, &beta, U, &row);
-		dgemm_nn_3l(row, row, row, A, row, temp, row, U, row);
-//		dscal_(&row2, &d0, V, &i1);
-		dscal_3l(row2, 0, V);
-//		daxpy_(&row2, &c[2], A2, &i1, V, &i1);
-		daxpy_3l(row2, c[2], A2, V);
-//		daxpy_(&row2, &c[0], A0, &i1, V, &i1);
-		daxpy_3l(row2, c[0], A0, V);
-//		daxpy_(&row2, &c[4], A4, &i1, V, &i1);
-		daxpy_3l(row2, c[4], A4, V);
-//		daxpy_(&row2, &c[6], A6, &i1, V, &i1);
-		daxpy_3l(row2, c[6], A6, V);
-		free(A0);
-		free(A2);
-		free(A4);
-		free(A6);
-		free(temp);
-		}
-	else if(m==9)
-		{
-		double c[] = {17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1};		
-		double *A0 = (double *) malloc(row*row*sizeof(double)); // d_eye(&A0, row);
-		for(ii=0; ii<row*row; ii++)
-			A0[ii] = 0.0;
-		for(ii=0; ii<row; ii++)
-			A0[ii*(row+1)] = 1.0;
-		double *A2 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *A4 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *A6 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *A8 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *temp = malloc(row*row*sizeof(double)); // d_zeros(&temp, row, row);
-//		char ta = 'n'; double alpha = 1; double beta = 0;
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, A, &row, &beta, A2, &row);
-		dgemm_nn_3l(row, row, row, A, row, A, row, A2, row);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A2, &row, A2, &row, &beta, A4, &row);
-		dgemm_nn_3l(row, row, row, A2, row, A2, row, A4, row);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A4, &row, A2, &row, &beta, A6, &row);
-		dgemm_nn_3l(row, row, row, A4, row, A2, row, A6, row);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A6, &row, A2, &row, &beta, A8, &row);
-		dgemm_nn_3l(row, row, row, A6, row, A2, row, A8, row);
-		dmcopy(row, row, A8, row, V, row);
-		dmcopy(row, row, A8, row, temp, row);
-//		daxpy_(&row2, &c[3], A2, &i1, temp, &i1);
-		daxpy_3l(row2, c[3], A2, temp);
-//		daxpy_(&row2, &c[1], A0, &i1, temp, &i1);
-		daxpy_3l(row2, c[1], A0, temp);
-//		daxpy_(&row2, &c[5], A4, &i1, temp, &i1);
-		daxpy_3l(row2, c[5], A4, temp);
-//		daxpy_(&row2, &c[7], A6, &i1, temp, &i1);
-		daxpy_3l(row2, c[7], A6, temp);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, temp, &row, &beta, U, &row);
-		dgemm_nn_3l(row, row, row, A, row, temp, row, U, row);
-//		dscal_(&row2, &c[8], V, &i1);
-		dscal_3l(row2, c[8], V);
-//		daxpy_(&row2, &c[2], A2, &i1, V, &i1);
-		daxpy_3l(row2, c[2], A2, V);
-//		daxpy_(&row2, &c[0], A0, &i1, V, &i1);
-		daxpy_3l(row2, c[0], A0, V);
-//		daxpy_(&row2, &c[4], A4, &i1, V, &i1);
-		daxpy_3l(row2, c[4], A4, V);
-//		daxpy_(&row2, &c[6], A6, &i1, V, &i1);
-		daxpy_3l(row2, c[6], A6, V);
-		free(A0);
-		free(A2);
-		free(A4);
-		free(A6);
-		free(A8);
-		free(temp);
-		}
-	else if(m==13) // tested
-		{
-		double c[] = {64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, 129060195264000, 10559470521600, 670442572800, 33522128640, 1323241920, 40840800, 960960, 16380, 182, 1};
-		double *A0 = (double *) malloc(row*row*sizeof(double)); // d_eye(&A0, row);
-		for(ii=0; ii<row*row; ii++)
-			A0[ii] = 0.0;
-		for(ii=0; ii<row; ii++)
-			A0[ii*(row+1)] = 1.0;
-		double *A2 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *A4 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *A6 = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-		double *temp = malloc(row*row*sizeof(double)); // d_zeros(&temp, row, row);
-//		char ta = 'n'; double alpha = 1; double beta = 0;
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, A, &row, &beta, A2, &row);
-		dgemm_nn_3l(row, row, row, A, row, A, row, A2, row);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A2, &row, A2, &row, &beta, A4, &row);
-		dgemm_nn_3l(row, row, row, A2, row, A2, row, A4, row);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A4, &row, A2, &row, &beta, A6, &row);
-		dgemm_nn_3l(row, row, row, A4, row, A2, row, A6, row);
-		dmcopy(row, row, A2, row, U, row);
-//		dscal_(&row2, &c[9], U, &i1);
-		dscal_3l(row2, c[9], U);
-//		daxpy_(&row2, &c[11], A4, &i1, U, &i1);
-		daxpy_3l(row2, c[11], A4, U);
-//		daxpy_(&row2, &c[13], A6, &i1, U, &i1);
-		daxpy_3l(row2, c[13], A6, U);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A6, &row, U, &row, &beta, temp, &row);
-		dgemm_nn_3l(row, row, row, A6, row, U, row, temp, row);
-//		daxpy_(&row2, &c[7], A6, &i1, temp, &i1);
-		daxpy_3l(row2, c[7], A6, temp);
-//		daxpy_(&row2, &c[5], A4, &i1, temp, &i1);
-		daxpy_3l(row2, c[5], A4, temp);
-//		daxpy_(&row2, &c[3], A2, &i1, temp, &i1);
-		daxpy_3l(row2, c[3], A2, temp);
-//		daxpy_(&row2, &c[1], A0, &i1, temp, &i1);
-		daxpy_3l(row2, c[1], A0, temp);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, temp, &row, &beta, U, &row);
-		dgemm_nn_3l(row, row, row, A, row, temp, row, U, row);
-		dmcopy(row, row, A2, row, temp, row);
-//		dscal_(&row2, &c[8], V, &i1);
-		dscal_3l(row2, c[8], V);
-//		daxpy_(&row2, &c[12], A6, &i1, temp, &i1);
-		daxpy_3l(row2, c[12], A6, temp);
-//		daxpy_(&row2, &c[10], A4, &i1, temp, &i1);
-		daxpy_3l(row2, c[10], A4, temp);
-//		dgemm_(&ta, &ta, &row, &row, &row, &alpha, A6, &row, temp, &row, &beta, V, &row);
-		dgemm_nn_3l(row, row, row, A6, row, temp, row, V, row);
-//		daxpy_(&row2, &c[6], A6, &i1, V, &i1);
-		daxpy_3l(row2, c[6], A6, V);
-//		daxpy_(&row2, &c[4], A4, &i1, V, &i1);
-		daxpy_3l(row2, c[4], A4, V);
-//		daxpy_(&row2, &c[2], A2, &i1, V, &i1);
-		daxpy_3l(row2, c[2], A2, V);
-//		daxpy_(&row2, &c[0], A0, &i1, V, &i1);
-		daxpy_3l(row2, c[0], A0, V);
-		free(A0);
-		free(A2);
-		free(A4);
-		free(A6);
-		free(temp);
-		}
-	else
-		{
-		printf("%s\n", "Wrong Pade approximatin degree");
-		exit(1);
-		}
-	double *D = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-//	dcopy_(&row2, V, &i1, A, &i1);
-	dmcopy(row, row, V, row, A, row);
-//	daxpy_(&row2, &d1, U, &i1, A, &i1);
-	daxpy_3l(row2, 1.0, U, A);
-//	dcopy_(&row2, V, &i1, D, &i1);
-	dmcopy(row, row, V, row, D, row);
-//	daxpy_(&row2, &dm1, U, &i1, D, &i1);
-	daxpy_3l(row2, -1.0, U, D);
-	int *ipiv = (int *) malloc(row*sizeof(int));
-	int info = 0;
-//	dgesv_(&row, &row, D, &row, ipiv, A, &row, &info);
-	dgesv_3l(row, row, D, row, ipiv, A, row, &info);
-	free(ipiv);
-	free(D);
-	free(U);
-	free(V);
-	}	
-
-
-
-void expm(int row, double *A)
-	{
-	
-	int i;
-	
-	int m_vals[] = {3, 5, 7, 9, 13};
-	double theta[] = {0.01495585217958292, 0.2539398330063230, 0.9504178996162932, 2.097847961257068, 5.371920351148152};
-	int lentheta = 5;
-	
-	double normA = onenorm(row, row, A);
-
-	if(normA<=theta[4])
-		{
-		for(i=0; i<lentheta; i++)
-			{
-			if(normA<=theta[i])
-				{
-				padeapprox(m_vals[i], row, A);
-				break;
-				}
-			}
-		}
-	else
-		{
-		int s;
-		double t = frexp(normA/(theta[4]), &s);
-		s = s - (t==0.5);
-		t = pow(2,-s);
-		int row2 = row*row;
-/*		int i1 = 1;*/
-//		dscal_(&row2, &t, A, &i1);
-		dscal_3l(row2, t, A);
-		padeapprox(m_vals[4], row, A);
-		double *temp = (double *) malloc(row*row*sizeof(double)); // d_zeros(&A2, row, row);
-//		char ta = 'n'; double alpha = 1; double beta = 0;
-		for(i=0; i<s; i++)
-			{
-//			dgemm_(&ta, &ta, &row, &row, &row, &alpha, A, &row, A, &row, &beta, temp, &row);
-			dgemm_nn_3l(row, row, row, A, row, A, row, temp, row);
-			dmcopy(row, row, temp, row, A, row);
-			}
-		free(temp);
-		}
-	}
-
-
diff --git a/third_party/blasfeo/examples/tools.h b/third_party/blasfeo/examples/tools.h
deleted file mode 100644
index b017301..0000000
--- a/third_party/blasfeo/examples/tools.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of HPMPC.                                                                     *
-*                                                                                                 *
-* HPMPC -- Library for High-Performance implementation of solvers for MPC.                        *
-* Copyright (C) 2014-2015 by Technical University of Denmark. All rights reserved.                *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                                                                                                 *
-**************************************************************************************************/
-
-void dgemm_nn_3l(int m, int n, int k, double *A, int lda , double *B, int ldb, double *C, int ldc);
-void daxpy_3l(int n, double da, double *dx, double *dy);
-void dscal_3l(int n, double da, double *dx);
-
-/* copies a matrix into another matrix */
-void dmcopy(int row, int col, double *ptrA, int lda, double *ptrB, int ldb);
-
-/* solution of a system of linear equations */
-void dgesv_3l(int n, int nrhs, double *A, int lda, int *ipiv, double *B, int ldb, int *info);
-
-/* matrix exponential */
-void expm(int row, double *A);
diff --git a/third_party/blasfeo/include/blasfeo_block_size.h b/third_party/blasfeo/include/blasfeo_block_size.h
deleted file mode 100644
index 9b74139..0000000
--- a/third_party/blasfeo/include/blasfeo_block_size.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#ifndef BLASFEO_BLOCK_SIZE
-#define BLASFEO_BLOCK_SIZE
-
-
-
-#if defined( TARGET_X64_INTEL_HASWELL )
-
-#define D_PS 4
-#define S_PS 8
-#define D_NC 4 // 2 // until the smaller kernel is 4x4
-#define S_NC 4 //2
-
-#elif defined( TARGET_X64_INTEL_SANDY_BRIDGE )
-
-#define D_PS 4
-#define S_PS 8
-#define D_NC 4 // 2 // until the smaller kernel is 4x4
-#define S_NC 4 //2
-
-#elif defined( TARGET_X64_INTEL_CORE )
-
-#define D_PS 4
-#define S_PS 4
-#define D_NC 4 // 2 // until the smaller kernel is 4x4
-#define S_NC 4 //2
-
-#elif defined( TARGET_X64_AMD_BULLDOZER )
-
-#define D_PS 4
-#define S_PS 4
-#define D_NC 4 // 2 // until the smaller kernel is 4x4
-#define S_NC 4 //2
-
-#elif defined( TARGET_ARMV8A_ARM_CORTEX_A57 )
-
-#define D_PS 4
-#define S_PS 4
-#define D_NC 4
-#define S_NC 4
-
-#elif defined( TARGET_ARMV7A_ARM_CORTEX_A15 )
-
-#define D_PS 4
-#define S_PS 4
-#define D_NC 4 // 2 // until the smaller kernel is 4x4
-#define S_NC 4 //2
-
-#elif defined( TARGET_GENERIC )
-
-#define D_PS 4
-#define S_PS 4
-#define D_NC 4 // 2 // until the smaller kernel is 4x4
-#define S_NC 4 //2
-
-#else
-#error "Unknown architecture"
-#endif
-
-
-#endif  // BLASFEO_BLOCK_SIZE
diff --git a/third_party/blasfeo/include/blasfeo_common.h b/third_party/blasfeo/include/blasfeo_common.h
deleted file mode 100644
index 3f95c91..0000000
--- a/third_party/blasfeo/include/blasfeo_common.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-#ifndef BLASFEO_COMMON
-#define BLASFEO_COMMON
-
-
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-#include "blasfeo_block_size.h"
-
-// matrix structure
-struct d_strmat
-	{
-	int m; // rows
-	int n; // cols
-	int pm; // packed number or rows
-	int cn; // packed number or cols
-	double *pA; // pointer to a pm*pn array of doubles, the first is aligned to cache line size
-	double *dA; // pointer to a min(m,n) (or max???) array of doubles
-	int use_dA; // flag to tell if dA can be used
-	int memory_size; // size of needed memory
-	};
-
-struct s_strmat
-	{
-	int m; // rows
-	int n; // cols
-	int pm; // packed number or rows
-	int cn; // packed number or cols
-	float *pA; // pointer to a pm*pn array of floats, the first is aligned to cache line size
-	float *dA; // pointer to a min(m,n) (or max???) array of floats
-	int use_dA; // flag to tell if dA can be used
-	int memory_size; // size of needed memory
-	};
-
-// vector structure
-struct d_strvec
-	{
-	int m; // size
-	int pm; // packed size
-	double *pa; // pointer to a pm array of doubles, the first is aligned to cache line size
-	int memory_size; // size of needed memory
-	};
-
-struct s_strvec
-	{
-	int m; // size
-	int pm; // packed size
-	float *pa; // pointer to a pm array of floats, the first is aligned to cache line size
-	int memory_size; // size of needed memory
-	};
-
-#define DMATEL_LIBSTR(sA,ai,aj) ((sA)->pA[((ai)-((ai)&(D_PS-1)))*(sA)->cn+(aj)*D_PS+((ai)&(D_PS-1))])
-#define SMATEL_LIBSTR(sA,ai,aj) ((sA)->pA[((ai)-((ai)&(S_PS-1)))*(sA)->cn+(aj)*S_PS+((ai)&(S_PS-1))])
-#define DVECEL_LIBSTR(sa,ai) ((sa)->pa[ai])
-#define SVECEL_LIBSTR(sa,ai) ((sa)->pa[ai])
-
-#elif defined(LA_BLAS) | defined(LA_REFERENCE)
-
-// matrix structure
-struct d_strmat
-	{
-	int m; // rows
-	int n; // cols
-	double *pA; // pointer to a m*n array of doubles
-	double *dA; // pointer to a min(m,n) (or max???) array of doubles
-	int use_dA; // flag to tell if dA can be used
-	int memory_size; // size of needed memory
-	};
-
-struct s_strmat
-	{
-	int m; // rows
-	int n; // cols
-	float *pA; // pointer to a m*n array of floats
-	float *dA; // pointer to a min(m,n) (or max???) array of floats
-	int use_dA; // flag to tell if dA can be used
-	int memory_size; // size of needed memory
-	};
-
-// vector structure
-struct d_strvec
-	{
-	int m; // size
-	double *pa; // pointer to a m array of doubles, the first is aligned to cache line size
-	int memory_size; // size of needed memory
-	};
-
-struct s_strvec
-	{
-	int m; // size
-	float *pa; // pointer to a m array of floats, the first is aligned to cache line size
-	int memory_size; // size of needed memory
-	};
-
-#define DMATEL_LIBSTR(sA,ai,aj) ((sA)->pA[(ai)+(aj)*(sA)->m])
-#define SMATEL_LIBSTR(sA,ai,aj) ((sA)->pA[(ai)+(aj)*(sA)->m])
-#define DVECEL_LIBSTR(sa,ai) ((sa)->pa[ai])
-#define SVECEL_LIBSTR(sa,ai) ((sa)->pa[ai])
-
-#else
-
-#error : wrong LA choice
-
-#endif
-
-#endif  // BLASFEO_COMMON
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/include/blasfeo_d_aux.h b/third_party/blasfeo/include/blasfeo_d_aux.h
deleted file mode 100644
index c4f71ee..0000000
--- a/third_party/blasfeo/include/blasfeo_d_aux.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdio.h>
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-/************************************************
-* d_aux_lib.c
-************************************************/
-
-// returns the memory size (in bytes) needed for a strmat
-int d_size_strmat(int m, int n);
-// returns the memory size (in bytes) needed for the diagonal of a strmat
-int d_size_diag_strmat(int m, int n);
-// returns the memory size (in bytes) needed for a strvec
-int d_size_strvec(int m);
-// create a strmat for a matrix of size m*n by using memory passed by a pointer (pointer is not updated)
-void d_create_strmat(int m, int n, struct d_strmat *sA, void *memory);
-// create a strvec for a vector of size m by using memory passed by a pointer (pointer is not updated)
-void d_create_strvec(int m, struct d_strvec *sA, void *memory);
-void d_cvt_mat2strmat(int m, int n, double *A, int lda, struct d_strmat *sA, int ai, int aj);
-void d_cvt_vec2strvec(int m, double *a, struct d_strvec *sa, int ai);
-void d_cvt_tran_mat2strmat(int m, int n, double *A, int lda, struct d_strmat *sA, int ai, int aj);
-void d_cvt_strmat2mat(int m, int n, struct d_strmat *sA, int ai, int aj, double *A, int lda);
-void d_cvt_strvec2vec(int m, struct d_strvec *sa, int ai, double *a);
-void d_cvt_tran_strmat2mat(int m, int n, struct d_strmat *sA, int ai, int aj, double *A, int lda);
-void d_cast_mat2strmat(double *A, struct d_strmat *sA);
-void d_cast_diag_mat2strmat(double *dA, struct d_strmat *sA);
-void d_cast_vec2vecmat(double *a, struct d_strvec *sa);
-void dgein1_libstr(double a, struct d_strmat *sA, int ai, int aj);
-double dgeex1_libstr(struct d_strmat *sA, int ai, int aj);
-void dvecin1_libstr(double a, struct d_strvec *sx, int xi);
-double dvecex1_libstr(struct d_strvec *sx, int xi);
-// A <= alpha
-void dgese_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj);
-// a <= alpha
-void dvecse_libstr(int m, double alpha, struct d_strvec *sx, int xi);
-void dgecp_lib(int m, int n, double alpha, int offsetA, double *A, int sda, int offsetB, double *B, int sdb);
-void dgecp_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj);
-void dgesc_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj);
-void dveccp_libstr(int m, struct d_strvec *sa, int ai, struct d_strvec *sc, int ci);
-void dvecsc_libstr(int m, double alpha, struct d_strvec *sa, int ai);
-void dtrcp_l_lib(int m, double alpha, int offsetA, double *A, int sda, int offsetB, double *B, int sdb);
-void dtrcp_l_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj);
-void dgead_lib(int m, int n, double alpha, int offsetA, double *A, int sda, int offsetB, double *B, int sdb);
-void dgead_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj);
-void dvecad_libstr(int m, double alpha, struct d_strvec *sa, int ai, struct d_strvec *sc, int ci);
-void dgetr_lib(int m, int n, double alpha, int offsetA, double *pA, int sda, int offsetC, double *pC, int sdc);
-void dgetr_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj);
-void dtrtr_l_lib(int m, double alpha, int offsetA, double *pA, int sda, int offsetC, double *pC, int sdc);
-void dtrtr_l_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj);
-void dtrtr_u_lib(int m, double alpha, int offsetA, double *pA, int sda, int offsetC, double *pC, int sdc);
-void dtrtr_u_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj);
-void ddiareg_lib(int kmax, double reg, int offset, double *pD, int sdd);
-void ddiare_libstr(int kmax, double alpha, struct d_strmat *sA, int ai, int aj);
-void ddiain_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj);
-void ddiain_sqrt_lib(int kmax, double *x, int offset, double *pD, int sdd);
-void ddiaex_lib(int kmax, double alpha, int offset, double *pD, int sdd, double *x);
-void ddiaad_lib(int kmax, double alpha, double *x, int offset, double *pD, int sdd);
-void ddiain_libsp(int kmax, int *idx, double alpha, double *x, double *pD, int sdd);
-void ddiain_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strmat *sD, int di, int dj);
-void ddiaex_libsp(int kmax, int *idx, double alpha, double *pD, int sdd, double *x);
-void ddiaex_libstr(int kmax, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi);
-void ddiaex_sp_libstr(int kmax, double alpha, int *idx, struct d_strmat *sD, int di, int dj, struct d_strvec *sx, int xi);
-void ddiaad_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj);
-void ddiaad_libsp(int kmax, int *idx, double alpha, double *x, double *pD, int sdd);
-void ddiaad_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strmat *sD, int di, int dj);
-void ddiaadin_libsp(int kmax, int *idx, double alpha, double *x, double *y, double *pD, int sdd);
-void ddiaadin_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strvec *sy, int yi, int *idx, struct d_strmat *sD, int di, int dj);
-void drowin_lib(int kmax, double alpha, double *x, double *pD);
-void drowin_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj);
-void drowex_lib(int kmax, double alpha, double *pD, double *x);
-void drowex_libstr(int kmax, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi);
-void drowad_lib(int kmax, double alpha, double *x, double *pD);
-void drowad_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj);
-void drowin_libsp(int kmax, double alpha, int *idx, double *x, double *pD);
-void drowad_libsp(int kmax, int *idx, double alpha, double *x, double *pD);
-void drowad_sp_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strmat *sD, int di, int dj);
-void drowadin_libsp(int kmax, int *idx, double alpha, double *x, double *y, double *pD);
-void drowsw_lib(int kmax, double *pA, double *pC);
-void drowsw_libstr(int kmax, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj);
-void drowpe_libstr(int kmax, int *ipiv, struct d_strmat *sA);
-void dcolex_libstr(int kmax, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi);
-void dcolin_lib(int kmax, double *x, int offset, double *pD, int sdd);
-void dcolin_libstr(int kmax, struct d_strvec *sx, int xi, struct d_strmat *sA, int ai, int aj);
-void dcolad_lib(int kmax, double alpha, double *x, int offset, double *pD, int sdd);
-void dcolin_libsp(int kmax, int *idx, double *x, double *pD, int sdd);
-void dcolad_libsp(int kmax, double alpha, int *idx, double *x, double *pD, int sdd);
-void dcolsw_lib(int kmax, int offsetA, double *pA, int sda, int offsetC, double *pC, int sdc);
-void dcolsw_libstr(int kmax, struct d_strmat *sA, int ai, int aj, struct d_strmat *sC, int ci, int cj);
-void dcolpe_libstr(int kmax, int *ipiv, struct d_strmat *sA);
-void dvecin_libsp(int kmax, int *idx, double *x, double *y);
-void dvecad_libsp(int kmax, int *idx, double alpha, double *x, double *y);
-void dvecad_sp_libstr(int m, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strvec *sz, int zi);
-void dvecin_sp_libstr(int m, double alpha, struct d_strvec *sx, int xi, int *idx, struct d_strvec *sz, int zi);
-void dvecex_sp_libstr(int m, double alpha, int *idx, struct d_strvec *sx, int x, struct d_strvec *sz, int zi);
-void dveccl_libstr(int m, struct d_strvec *sxm, int xim, struct d_strvec *sx, int xi, struct d_strvec *sxp, int xip, struct d_strvec *sz, int zi);
-void dveccl_mask_libstr(int m, struct d_strvec *sxm, int xim, struct d_strvec *sx, int xi, struct d_strvec *sxp, int xip, struct d_strvec *sz, int zi, struct d_strvec *sm, int mi);
-void dvecze_libstr(int m, struct d_strvec *sm, int mi, struct d_strvec *sv, int vi, struct d_strvec *se, int ei);
-void dvecnrm_inf_libstr(int m, struct d_strvec *sx, int xi, double *ptr_norm);
-
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/include/blasfeo_d_aux_ext_dep.h b/third_party/blasfeo/include/blasfeo_d_aux_ext_dep.h
deleted file mode 100644
index 7b0222b..0000000
--- a/third_party/blasfeo/include/blasfeo_d_aux_ext_dep.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(EXT_DEP)
-
-
-
-#include <stdio.h>
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-/************************************************
-* d_aux_extern_depend_lib.c
-************************************************/
-
-/* column-major matrices */
-
-// dynamically allocate row*col doubles of memory and set accordingly a pointer to double; set allocated memory to zero
-void d_zeros(double **pA, int row, int col);
-// dynamically allocate row*col doubles of memory aligned to 64-byte boundaries and set accordingly a pointer to double; set allocated memory to zero
-void d_zeros_align(double **pA, int row, int col);
-// dynamically allocate size bytes of memory aligned to 64-byte boundaries and set accordingly a pointer to double; set allocated memory to zero
-void d_zeros_align_bytes(double **pA, int size);
-// free the memory allocated by d_zeros
-void d_free(double *pA);
-// free the memory allocated by d_zeros_align or d_zeros_align_bytes
-void d_free_align(double *pA);
-// print a column-major matrix
-void d_print_mat(int m, int n, double *A, int lda);
-// print the transposed of a column-major matrix
-void d_print_tran_mat(int row, int col, double *A, int lda);
-// print to file a column-major matrix
-void d_print_to_file_mat(FILE *file, int row, int col, double *A, int lda);
-// print to file the transposed of a column-major matrix
-void d_print_tran_to_file_mat(FILE *file, int row, int col, double *A, int lda);
-// print in exponential notation a column-major matrix
-void d_print_e_mat(int m, int n, double *A, int lda);
-// print in exponential notation the transposed of a column-major matrix
-void d_print_e_tran_mat(int row, int col, double *A, int lda);
-
-/* strmat and strvec */
-
-#ifdef BLASFEO_COMMON
-// create a strmat for a matrix of size m*n by dynamically allocating memory
-void d_allocate_strmat(int m, int n, struct d_strmat *sA);
-// create a strvec for a vector of size m by dynamically allocating memory
-void d_allocate_strvec(int m, struct d_strvec *sa);
-// free the memory allocated by d_allocate_strmat
-void d_free_strmat(struct d_strmat *sA);
-// free the memory allocated by d_allocate_strvec
-void d_free_strvec(struct d_strvec *sa);
-// print a strmat
-void d_print_strmat(int m, int n, struct d_strmat *sA, int ai, int aj);
-// print in exponential notation a strmat
-void d_print_e_strmat(int m, int n, struct d_strmat *sA, int ai, int aj);
-// print to file a strmat
-void d_print_to_file_strmat(FILE *file, int m, int n, struct d_strmat *sA, int ai, int aj);
-// print a strvec
-void d_print_strvec(int m, struct d_strvec *sa, int ai);
-// print in exponential notation a strvec
-void d_print_e_strvec(int m, struct d_strvec *sa, int ai);
-// print to file a strvec
-void d_print_to_file_strvec(FILE *file, int m, struct d_strvec *sa, int ai);
-// print the transposed of a strvec
-void d_print_tran_strvec(int m, struct d_strvec *sa, int ai);
-// print in exponential notation the transposed of a strvec
-void d_print_e_tran_strvec(int m, struct d_strvec *sa, int ai);
-// print to file the transposed of a strvec
-void d_print_tran_to_file_strvec(FILE *file, int m, struct d_strvec *sa, int ai);
-#endif
-
-
-
-#ifdef __cplusplus
-}
-#endif
-
-
-
-#endif // EXT_DEP
diff --git a/third_party/blasfeo/include/blasfeo_d_blas.h b/third_party/blasfeo/include/blasfeo_d_blas.h
deleted file mode 100644
index a473322..0000000
--- a/third_party/blasfeo/include/blasfeo_d_blas.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-//
-// level 1 BLAS
-//
-
-// y = y + alpha*x
-void daxpy_libstr(int kmax, double alpha, struct d_strvec *sx, int xi, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi);
-// z = x .* y, return sum(z) = x^T * y
-double dvecmuldot_libstr(int m, struct d_strvec *sx, int xi, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi);
-// return x^T * y
-double ddot_libstr(int m, struct d_strvec *sx, int xi, struct d_strvec *sy, int yi);
-
-
-
-//
-// level 2 BLAS
-//
-
-// dense
-
-// z <= beta * y + alpha * A * x
-void dgemv_n_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, double beta, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi);
-// z <= beta * y + alpha * A' * x
-void dgemv_t_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, double beta, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi);
-// y <= inv( A ) * x, A (m)x(n)
-void dtrsv_lnn_mn_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// y <= inv( A' ) * x, A (m)x(n)
-void dtrsv_ltn_mn_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// y <= inv( A ) * x, A (m)x(m) lower, not_transposed, not_unit
-void dtrsv_lnn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// y <= inv( A ) * x, A (m)x(m) lower, not_transposed, unit
-void dtrsv_lnu_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// y <= inv( A' ) * x, A (m)x(m) lower, transposed, not_unit
-void dtrsv_ltn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// y <= inv( A' ) * x, A (m)x(m) lower, transposed, unit
-void dtrsv_ltu_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// y <= inv( A' ) * x, A (m)x(m) upper, not_transposed, not_unit
-void dtrsv_unn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// y <= inv( A' ) * x, A (m)x(m) upper, transposed, not_unit
-void dtrsv_utn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// z <= beta * y + alpha * A * x ; A upper triangular
-void dtrmv_unn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// z <= A' * x ; A upper triangular
-void dtrmv_utn_libstr(int m, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// z <= A * x ; A lower triangular
-void dtrmv_lnn_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// z <= A' * x ; A lower triangular
-void dtrmv_ltn_libstr(int m, int n, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, struct d_strvec *sz, int zi);
-// z_n <= beta_n * y_n + alpha_n * A  * x_n
-// z_t <= beta_t * y_t + alpha_t * A' * x_t
-void dgemv_nt_libstr(int m, int n, double alpha_n, double alpha_t, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx_n, int xi_n, struct d_strvec *sx_t, int xi_t, double beta_n, double beta_t, struct d_strvec *sy_n, int yi_n, struct d_strvec *sy_t, int yi_t, struct d_strvec *sz_n, int zi_n, struct d_strvec *sz_t, int zi_t);
-// z <= beta * y + alpha * A * x, where A is symmetric and only the lower triangular patr of A is accessed
-void dsymv_l_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sx, int xi, double beta, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi);
-
-// diagonal
-
-// z <= beta * y + alpha * A * x, A diagonal
-void dgemv_diag_libstr(int m, double alpha, struct d_strvec *sA, int ai, struct d_strvec *sx, int xi, double beta, struct d_strvec *sy, int yi, struct d_strvec *sz, int zi);
-
-
-
-//
-// level 3 BLAS
-//
-
-// dense
-
-// D <= beta * C + alpha * A * B^T
-void dgemm_nt_libstr(int m, int n, int k, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj);
-// D <= beta * C + alpha * A * B
-void dgemm_nn_libstr(int m, int n, int k, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj);
-// D <= beta * C + alpha * A * B^T ; C, D lower triangular
-void dsyrk_ln_libstr(int m, int k, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj);
-void dsyrk_ln_mn_libstr(int m, int n, int k, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj);
-// D <= alpha * B * A^T ; B upper triangular
-void dtrmm_rutn_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj);
-// D <= alpha * B * A ; A lower triangular
-void dtrmm_rlnn_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj);
-// D <= alpha * B * A^{-T} , with A lower triangular employing explicit inverse of diagonal
-void dtrsm_rltn_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj);
-// D <= alpha * B * A^{-T} , with A lower triangular with unit diagonal
-void dtrsm_rltu_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj);
-// D <= alpha * B * A^{-T} , with A upper triangular employing explicit inverse of diagonal
-void dtrsm_rutn_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj);
-// D <= alpha * A^{-1} * B , with A lower triangular with unit diagonal
-void dtrsm_llnu_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj);
-// D <= alpha * A^{-1} * B , with A upper triangular employing explicit inverse of diagonal
-void dtrsm_lunn_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sD, int di, int dj);
-
-// diagonal
-
-// D <= alpha * A * B + beta * C, with A diagonal (stored as strvec)
-void dgemm_diag_left_ib(int m, int n, double alpha, double *dA, double *pB, int sdb, double beta, double *pC, int sdc, double *pD, int sdd);
-void dgemm_l_diag_libstr(int m, int n, double alpha, struct d_strvec *sA, int ai, struct d_strmat *sB, int bi, int bj, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj);
-// D <= alpha * A * B + beta * C, with B diagonal (stored as strvec)
-void dgemm_r_diag_libstr(int m, int n, double alpha, struct d_strmat *sA, int ai, int aj, struct d_strvec *sB, int bi, double beta, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj);
-
-
-
-//
-// LAPACK
-//
-
-// D <= chol( C ) ; C, D lower triangular
-void dpotrf_l_libstr(int m, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj);
-void dpotrf_l_mn_libstr(int m, int n, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj);
-// D <= chol( C + A * B' ) ; C, D lower triangular
-void dsyrk_dpotrf_ln_libstr(int m, int n, int k, struct d_strmat *sA, int ai, int aj, struct d_strmat *sB, int bi, int bj, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj);
-// D <= lu( C ) ; no pivoting
-void dgetrf_nopivot_libstr(int m, int n, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj);
-// D <= lu( C ) ; pivoting
-void dgetrf_libstr(int m, int n, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj, int *ipiv);
-// D <= qr( C )
-void dgeqrf_libstr(int m, int n, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj, void *work);
-int dgeqrf_work_size_libstr(int m, int n); // in bytes
-// D <= lq( C )
-void dgelqf_libstr(int m, int n, struct d_strmat *sC, int ci, int cj, struct d_strmat *sD, int di, int dj, void *work);
-int dgelqf_work_size_libstr(int m, int n); // in bytes
-
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/include/blasfeo_d_kernel.h b/third_party/blasfeo/include/blasfeo_d_kernel.h
deleted file mode 100644
index 6f045af..0000000
--- a/third_party/blasfeo/include/blasfeo_d_kernel.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-// level 2 BLAS
-// 12
-void kernel_dgemv_n_12_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-void kernel_dgemv_t_12_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-// 8
-void kernel_dgemv_n_8_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-void kernel_dgemv_t_8_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-void kernel_dtrmv_un_8_lib4(int k, double *A, int sda, double *x, double *z);
-// 4
-void kernel_dgemv_n_4_lib4(int k, double *alpha, double *A, double *x, double *beta, double *y, double *z);
-void kernel_dgemv_n_4_vs_lib4(int k, double *alpha, double *A, double *x, double *beta, double *y, double *z, int k1);
-void kernel_dgemv_n_4_gen_lib4(int kmax, double *alpha, double *A, double *x, double *beta, double *y, double *z, int k0, int k1);
-void kernel_dgemv_t_4_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-void kernel_dgemv_t_4_vs_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z, int k1);
-void kernel_dgemv_t_4_gen_lib4(int k, double *alpha, int offA, double *A, int sda, double *x, double *beta, double *C, double *D, int km);
-void kernel_dtrsv_ln_inv_4_lib4(int k, double *A, double *inv_diag_A, double *x, double *y, double *z);
-void kernel_dtrsv_ln_inv_4_vs_lib4(int k, double *A, double *inv_diag_A, double *x, double *y, double *z, int km, int kn);
-void kernel_dtrsv_lt_inv_4_lib4(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z);
-void kernel_dtrsv_lt_inv_3_lib4(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z);
-void kernel_dtrsv_lt_inv_2_lib4(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z);
-void kernel_dtrsv_lt_inv_1_lib4(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z);
-void kernel_dtrmv_un_4_lib4(int k, double *A, double *x, double *z);
-void kernel_dtrmv_ut_4_lib4(int k, double *A, int sda, double *x, double *z);
-void kernel_dtrmv_ut_4_vs_lib4(int k, double *A, int sda, double *x, double *z, int km);
-void kernel_dgemv_nt_6_lib4(int kmax, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t);
-void kernel_dgemv_nt_4_lib4(int kmax, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t);
-void kernel_dgemv_nt_4_vs_lib4(int kmax, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t, int km);
-void kernel_dsymv_l_4_lib4(int kmax, double *alpha, double *A, int sda, double *x, double *z);
-void kernel_dsymv_l_4_gen_lib4(int kmax, double *alpha, int offA, double *A, int sda, double *x, double *z, int km);
-
-
-
-// level 3 BLAS
-// 12x4
-void kernel_dgemm_nt_12x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd); //
-void kernel_dgemm_nt_12x4_vs_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn); //
-void kernel_dgemm_nn_12x4_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd); //
-void kernel_dgemm_nn_12x4_vs_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn); //
-void kernel_dsyrk_nt_l_12x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd); //
-void kernel_dsyrk_nt_l_12x4_vs_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn); //
-void kernel_dtrmm_nt_ru_12x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd); //
-void kernel_dtrmm_nt_ru_12x4_vs_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn); //
-void kernel_dtrmm_nn_rl_12x4_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *D, int sdd);
-void kernel_dtrmm_nn_rl_12x4_vs_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *D, int sdd, int km, int kn);
-void kernel_dtrsm_nt_rl_inv_12x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dtrsm_nt_rl_inv_12x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-void kernel_dtrsm_nt_rl_one_12x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E);
-void kernel_dtrsm_nt_rl_one_12x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, int km, int kn);
-void kernel_dtrsm_nt_ru_inv_12x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-void kernel_dtrsm_nt_ru_inv_12x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dtrsm_nn_ru_inv_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-void kernel_dtrsm_nn_ru_inv_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dtrsm_nn_ll_one_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde);
-void kernel_dtrsm_nn_ll_one_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, int km, int kn);
-void kernel_dtrsm_nn_lu_inv_12x4_lib4(int kmax, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E);
-void kernel_dtrsm_nn_lu_inv_12x4_vs_lib4(int kmax, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E, int km, int kn);
-// 4x12
-void kernel_dgemm_nt_4x12_lib4(int k, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D); //
-void kernel_dgemm_nt_4x12_vs_lib4(int k, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D, int km, int kn); //
-void kernel_dgemm_nn_4x12_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D); //
-void kernel_dtrsm_nt_rl_inv_4x12_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int sed, double *inv_diag_E);
-void kernel_dtrsm_nt_rl_inv_4x12_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int sed, double *inv_diag_E, int km, int kn);
-// 8x8
-void kernel_dgemm_nt_8x8l_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd); // computes [A00 *; A10 A11]
-void kernel_dgemm_nt_8x8u_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd); // computes [A00 *; A10 A11]
-void kernel_dgemm_nt_8x8l_vs_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn); // computes [A00 *; A10 A11]
-void kernel_dgemm_nt_8x8u_vs_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn); // computes [A00 *; A10 A11]
-void kernel_dsyrk_nt_l_8x8_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd); // computes [L00 *; A10 L11]
-void kernel_dsyrk_nt_l_8x8_vs_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn); // computes [L00 *; A10 L11]
-void kernel_dtrsm_nt_rl_inv_8x8l_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sed, double *inv_diag_E);
-void kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sed, double *inv_diag_E, int km, int kn);
-void kernel_dtrsm_nt_rl_inv_8x8u_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sed, double *inv_diag_E);
-void kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sed, double *inv_diag_E, int km, int kn);
-// 8x4
-void kernel_dgemm_nt_8x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd); //
-void kernel_dgemm_nt_8x4_vs_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn); //
-void kernel_dgemm_nt_8x4_gen_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int k0, int k1);
-void kernel_dgemm_nn_8x4_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd); //
-void kernel_dgemm_nn_8x4_gen_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1); //
-void kernel_dsyrk_nt_l_8x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd); //
-void kernel_dsyrk_nt_l_8x4_vs_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn); //
-void kernel_dsyrk_nt_l_8x4_gen_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int k0, int k1);
-void kernel_dtrmm_nt_ru_8x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd); //
-void kernel_dtrmm_nt_ru_8x4_vs_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn); //
-void kernel_dtrmm_nn_rl_8x4_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *D, int sdd);
-void kernel_dtrmm_nn_rl_8x4_vs_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *D, int sdd, int km, int kn);
-void kernel_dtrmm_nn_rl_8x4_gen_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dtrsm_nt_rl_inv_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-void kernel_dtrsm_nt_rl_one_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E);
-void kernel_dtrsm_nt_rl_one_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, int km, int kn);
-void kernel_dtrsm_nt_ru_inv_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-void kernel_dtrsm_nt_ru_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dtrsm_nn_ru_inv_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-void kernel_dtrsm_nn_ru_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dtrsm_nn_ll_one_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde);
-void kernel_dtrsm_nn_ll_one_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, int km, int kn);
-void kernel_dtrsm_nn_lu_inv_8x4_lib4(int kmax, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E);
-void kernel_dtrsm_nn_lu_inv_8x4_vs_lib4(int kmax, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E, int km, int kn);
-// 4x8
-void kernel_dgemm_nt_4x8_lib4(int k, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D); //
-void kernel_dgemm_nt_4x8_vs_lib4(int k, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D, int km, int kn); //
-void kernel_dgemm_nn_4x8_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D); //
-void kernel_dtrsm_nt_rl_inv_4x8_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int sed, double *inv_diag_E);
-void kernel_dtrsm_nt_rl_inv_4x8_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int sed, double *inv_diag_E, int km, int kn);
-// 4x4
-void kernel_dgemm_nt_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D); //
-void kernel_dgemm_nt_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn); //
-void kernel_dgemm_nt_4x4_gen_lib4(int k, double *alpha, double *A, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_dgemm_nn_4x4_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D); //
-void kernel_dgemm_nn_4x4_gen_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1); //
-void kernel_dsyrk_nt_l_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D); //
-void kernel_dsyrk_nt_l_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn); //
-void kernel_dsyrk_nt_l_4x4_gen_lib4(int k, double *alpha, double *A, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_dtrmm_nt_ru_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D); //
-void kernel_dtrmm_nt_ru_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn); //
-void kernel_dtrmm_nn_rl_4x4_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *D);
-void kernel_dtrmm_nn_rl_4x4_gen_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_dtrsm_nt_rl_inv_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E);
-void kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dtrsm_nt_rl_one_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E);
-void kernel_dtrsm_nt_rl_one_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, int km, int kn);
-void kernel_dtrsm_nt_ru_inv_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E);
-void kernel_dtrsm_nt_ru_inv_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dtrsm_nn_ru_inv_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E);
-void kernel_dtrsm_nn_ru_inv_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dtrsm_nn_ll_one_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E);
-void kernel_dtrsm_nn_ll_one_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int km, int kn);
-void kernel_dtrsm_nn_lu_inv_4x4_lib4(int kmax, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E);
-void kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(int kmax, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-// diag
-void kernel_dgemm_diag_right_4_a0_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *D, int sdd);
-void kernel_dgemm_diag_right_4_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-void kernel_dgemm_diag_right_3_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-void kernel_dgemm_diag_right_2_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-void kernel_dgemm_diag_right_1_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-void kernel_dgemm_diag_left_4_a0_lib4(int kmax, double *alpha, double *A, double *B, double *D);
-void kernel_dgemm_diag_left_4_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-void kernel_dgemm_diag_left_3_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-void kernel_dgemm_diag_left_2_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-void kernel_dgemm_diag_left_1_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-// low rank update
-void kernel_dger4_sub_12r_lib4(int k, double *A, int sda, double *B, double *C, int sdc);
-void kernel_dger4_sub_12r_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, int km);
-void kernel_dger4_sub_8r_lib4(int k, double *A, int sda, double *B, double *C, int sdc);
-void kernel_dger4_sub_8r_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, int km);
-void kernel_dger4_sub_4r_lib4(int n, double *A, double *B, double *C);
-void kernel_dger4_sub_4r_vs_lib4(int n, double *A, double *B, double *C, int km);
-
-
-
-// LAPACK
-// 12x4
-void kernel_dpotrf_nt_l_12x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-void kernel_dpotrf_nt_l_12x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-void kernel_dgetrf_nn_l_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-void kernel_dgetrf_nn_m_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-void kernel_dgetrf_nn_r_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-void kernel_dgetrf_nn_l_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-void kernel_dgetrf_nn_m_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-void kernel_dgetrf_nn_r_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-// 8x8
-void kernel_dpotrf_nt_l_8x8_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-void kernel_dpotrf_nt_l_8x8_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-// 8x4
-void kernel_dpotrf_nt_l_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-void kernel_dpotrf_nt_l_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-void kernel_dgetrf_nn_l_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-void kernel_dgetrf_nn_r_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-void kernel_dgetrf_nn_l_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-void kernel_dgetrf_nn_r_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-// 4x4
-void kernel_dpotrf_nt_l_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *inv_diag_D);
-void kernel_dpotrf_nt_l_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *inv_diag_D, int km, int kn);
-#if defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-void kernel_dlauum_nt_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D); //
-void kernel_dlauum_nt_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn); //
-#endif
-void kernel_dgetrf_nn_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *inv_diag_D);
-void kernel_dgetrf_nn_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *inv_diag_D, int km, int kn);
-void kernel_dgetrf_pivot_4_lib4(int m, double *pA, int sda, double *inv_diag_A, int* ipiv);
-void kernel_dgetrf_pivot_4_vs_lib4(int m, int n, double *pA, int sda, double *inv_diag_A, int* ipiv);
-void kernel_dgeqrf_4_lib4(int m, double *pD, int sdd, double *dD);
-void kernel_dgeqrf_vs_lib4(int m, int n, int k, int offD, double *pD, int sdd, double *dD);
-void kernel_dlarf_4_lib4(int m, int n, double *pV, int sdv, double *tau, double *pC, int sdc); // rank-4 reflector
-void kernel_dlarf_t_4_lib4(int m, int n, double *pD, int sdd, double *pVt, double *dD, double *pC0, int sdc, double *pW);
-void kernel_dgelqf_4_lib4(int n, double *pD, double *dD);
-void kernel_dgelqf_vs_lib4(int m, int n, int k, int offD, double *pD, int sdd, double *dD);
-void kernel_dlarft_4_lib4(int kmax, double *pD, double *dD, double *pT);
-void kernel_dgelqf_dlarft12_12_lib4(int n, double *pD, int sdd, double *dD, double *pT);
-void kernel_dgelqf_dlarft4_12_lib4(int n, double *pD, int sdd, double *dD, double *pT);
-void kernel_dgelqf_dlarft4_8_lib4(int n, double *pD, int sdd, double *dD, double *pT);
-void kernel_dgelqf_dlarft4_4_lib4(int n, double *pD, double *dD, double *pT);
-void kernel_dlarfb12_r_4_lib4(int kmax, double *pV, int sdd, double *pT, double *pD, double *pK, int km);
-void kernel_dlarfb4_r_12_lib4(int kmax, double *pV, double *pT, double *pD, int sdd);
-void kernel_dlarfb4_r_8_lib4(int kmax, double *pV, double *pT, double *pD, int sdd);
-void kernel_dlarfb4_r_4_lib4(int kmax, double *pV, double *pT, double *pD);
-void kernel_dlarfb4_r_1_lib4(int kmax, double *pV, double *pT, double *pD);
-
-
-
-// merged routines
-// 12x4
-void kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int km_, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4(int kp, double *Ap, int sdap, double *Bp, int km_, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-void kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int km_, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-void kernel_dsyrk_dpotrf_nt_l_12x4_lib4(int kp, double *Ap, int sdap, double *Bp, int km_, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-// 4x12
-void kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4(int kp, double *Ap, double *Bp, int sdbp, int km_, double *Am, double *Bm, int sdbm, double *C, double *D, double *E, int sde, double *inv_diag_E, int km, int kn);
-// 8x8
-void kernel_dsyrk_dpotrf_nt_l_8x8_lib4(int kp, double *Ap, int sdap, double *Bp, int sdbp, int km_, double *Am, int sdam, double *Bm, int sdbm, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-void kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int sdbp, int km_, double *Am, int sdam, double *Bm, int sdbm, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-void kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int sdb, int km_, double *Am, int sdam, double *Bm, int sdbm, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E, int km, int kn);
-void kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int sdb, int km_, double *Am, int sdam, double *Bm, int sdbm, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E, int km, int kn);
-// 8x4
-void kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int km_, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4(int kp, double *Ap, int sdap, double *Bp, int km_, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-void kernel_dsyrk_dpotrf_nt_l_8x4_lib4(int kp, double *Ap, int sdap, double *Bp, int km_, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-void kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int km_, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-// 4x8
-void kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4(int kp, double *Ap, double *Bp, int sdbp, int km_, double *Am, double *Bm, int sdbm, double *C, double *D, double *E, int sde, double *inv_diag_E, int km, int kn);
-// 4x4
-void kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4(int kp, double *Ap, double *Bp, int km_, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E);
-void kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km_, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-void kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km_, double *Am, double *Bm, double *C, double *D, double *inv_diag_D, int km, int kn);
-void kernel_dsyrk_dpotrf_nt_l_4x4_lib4(int kp, double *Ap, double *Bp, int km_, double *Am, double *Bm, double *C, double *D, double *inv_diag_D);
-
-
-
-// auxiliary routines
-void kernel_dgecp_8_0_lib4(int tri, int kmax, double alpha, double *A0, int sda,  double *B0, int sdb);
-void kernel_dgecp_8_1_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B0, int sdb);
-void kernel_dgecp_8_2_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B0, int sdb);
-void kernel_dgecp_8_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B0, int sdb);
-void kernel_dgecp_4_0_lib4(int tri, int kmax, double alpha, double *A, double *B);
-void kernel_dgecp_4_1_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgecp_4_2_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgecp_4_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgecp_3_0_lib4(int tri, int kmax, double alpha, double *A, double *B);
-void kernel_dgecp_3_2_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgecp_3_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgecp_2_0_lib4(int tri, int kmax, double alpha, double *A, double *B);
-void kernel_dgecp_2_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgecp_1_0_lib4(int tri, int kmax, double alpha, double *A, double *B);
-void kernel_dgead_8_0_lib4(int kmax, double alpha, double *A0, int sda,  double *B0, int sdb);
-void kernel_dgead_8_1_lib4(int kmax, double alpha, double *A0, int sda, double *B0, int sdb);
-void kernel_dgead_8_2_lib4(int kmax, double alpha, double *A0, int sda, double *B0, int sdb);
-void kernel_dgead_8_3_lib4(int kmax, double alpha, double *A0, int sda, double *B0, int sdb);
-void kernel_dgead_4_0_lib4(int kmax, double alpha, double *A, double *B);
-void kernel_dgead_4_1_lib4(int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgead_4_2_lib4(int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgead_4_3_lib4(int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgead_3_0_lib4(int kmax, double alpha, double *A, double *B);
-void kernel_dgead_3_2_lib4(int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgead_3_3_lib4(int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgead_2_0_lib4(int kmax, double alpha, double *A, double *B);
-void kernel_dgead_2_3_lib4(int kmax, double alpha, double *A0, int sda, double *B);
-void kernel_dgead_1_0_lib4(int kmax, double alpha, double *A, double *B);
-void kernel_dgeset_4_lib4(int kmax, double alpha, double *A);
-void kernel_dtrset_4_lib4(int kmax, double alpha, double *A);
-void kernel_dgetr_8_lib4(int tri, int kmax, int kna, double alpha, double *A, int sda, double *C, int sdc);
-void kernel_dgetr_4_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc);
-void kernel_dgetr_3_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc);
-void kernel_dgetr_2_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc);
-void kernel_dgetr_1_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc);
-void kernel_dgetr_4_0_lib4(int m, double *A, int sda, double *B);
-
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/include/blasfeo_i_aux_ext_dep.h b/third_party/blasfeo/include/blasfeo_i_aux_ext_dep.h
deleted file mode 100644
index 5f47088..0000000
--- a/third_party/blasfeo/include/blasfeo_i_aux_ext_dep.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(EXT_DEP)
-
-
-
-#include <stdio.h>
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-// i_aux_extern_depend_lib
-void int_zeros(int **pA, int row, int col);
-void int_zeros_align(int **pA, int row, int col);
-void int_free(int *pA);
-void int_free_align(int *pA);
-void int_print_mat(int row, int col, int *A, int lda);
-
-
-
-#ifdef __cplusplus
-}
-#endif
-
-
-
-#endif // EXT_DEP
diff --git a/third_party/blasfeo/include/blasfeo_m_aux.h b/third_party/blasfeo/include/blasfeo_m_aux.h
deleted file mode 100644
index bbaac28..0000000
--- a/third_party/blasfeo/include/blasfeo_m_aux.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-void m_cvt_d2s_strvec(int m, struct d_strvec *vd, int vdi, struct s_strvec *vs, int vsi);
-void m_cvt_s2d_strvec(int m, struct s_strvec *vs, int vsi, struct d_strvec *vd, int vdi);
-void m_cvt_d2s_strmat(int m, int n, struct d_strmat *Md, int mid, int nid, struct s_strmat *Ms, int mis, int nis);
-void m_cvt_s2d_strmat(int m, int n, struct s_strmat *Ms, int mis, int nis, struct d_strmat *Md, int mid, int nid);
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/include/blasfeo_s_aux.h b/third_party/blasfeo/include/blasfeo_s_aux.h
deleted file mode 100644
index d93509f..0000000
--- a/third_party/blasfeo/include/blasfeo_s_aux.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdio.h>
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-/************************************************
-* d_aux_lib.c
-************************************************/
-
-// returns the memory size (in bytes) needed for a strmat
-int s_size_strmat(int m, int n);
-// returns the memory size (in bytes) needed for the diagonal of a strmat
-int s_size_diag_strmat(int m, int n);
-// returns the memory size (in bytes) needed for a strvec
-int s_size_strvec(int m);
-// create a strmat for a matrix of size m*n by using memory passed by a pointer (pointer is not updated)
-void s_create_strmat(int m, int n, struct s_strmat *sA, void *memory);
-// create a strvec for a vector of size m by using memory passed by a pointer (pointer is not updated)
-void s_create_strvec(int m, struct s_strvec *sA, void *memory);
-void s_cvt_mat2strmat(int m, int n, float *A, int lda, struct s_strmat *sA, int ai, int aj);
-void s_cvt_vec2strvec(int m, float *a, struct s_strvec *sa, int ai);
-void s_cvt_tran_mat2strmat(int m, int n, float *A, int lda, struct s_strmat *sA, int ai, int aj);
-void s_cvt_strmat2mat(int m, int n, struct s_strmat *sA, int ai, int aj, float *A, int lda);
-void s_cvt_strvec2vec(int m, struct s_strvec *sa, int ai, float *a);
-void s_cvt_tran_strmat2mat(int m, int n, struct s_strmat *sA, int ai, int aj, float *A, int lda);
-void s_cast_mat2strmat(float *A, struct s_strmat *sA);
-void s_cast_diag_mat2strmat(float *dA, struct s_strmat *sA);
-void s_cast_vec2vecmat(float *a, struct s_strvec *sa);
-void sgein1_libstr(float a, struct s_strmat *sA, int ai, int aj);
-float sgeex1_libstr(struct s_strmat *sA, int ai, int aj);
-void svecin1_libstr(float a, struct s_strvec *sx, int xi);
-float svecex1_libstr(struct s_strvec *sx, int xi);
-// A <= alpha
-void sgese_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj);
-// a <= alpha
-void svecse_libstr(int m, float alpha, struct s_strvec *sx, int xi);
-void sgecp_lib(int m, int n, float alpha, int offsetA, float *A, int sda, int offsetB, float *B, int sdb);
-void sgecp_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj);
-void sgesc_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj);
-void sveccp_libstr(int m, struct s_strvec *sa, int ai, struct s_strvec *sc, int ci);
-void svecsc_libstr(int m, float alpha, struct s_strvec *sa, int ai);
-void strcp_l_lib(int m, float alpha, int offsetA, float *A, int sda, int offsetB, float *B, int sdb);
-void strcp_l_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj);
-void sgead_lib(int m, int n, float alpha, int offsetA, float *A, int sda, int offsetB, float *B, int sdb);
-void sgead_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj);
-void svecad_libstr(int m, float alpha, struct s_strvec *sa, int ai, struct s_strvec *sc, int ci);
-void sgetr_lib(int m, int n, float alpha, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc);
-void sgetr_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj);
-void strtr_l_lib(int m, float alpha, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc);
-void strtr_l_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj);
-void strtr_u_lib(int m, float alpha, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc);
-void strtr_u_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj);
-void sdiareg_lib(int kmax, float reg, int offset, float *pD, int sdd);
-void sdiaex_libstr(int kmax, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi);
-void sdiain_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj);
-void sdiain_sqrt_lib(int kmax, float *x, int offset, float *pD, int sdd);
-void sdiaex_lib(int kmax, float alpha, int offset, float *pD, int sdd, float *x);
-void sdiaad_lib(int kmax, float alpha, float *x, int offset, float *pD, int sdd);
-void sdiain_libsp(int kmax, int *idx, float alpha, float *x, float *pD, int sdd);
-void sdiain_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj);
-void sdiaex_libsp(int kmax, int *idx, float alpha, float *pD, int sdd, float *x);
-void sdiaex_sp_libstr(int kmax, float alpha, int *idx, struct s_strmat *sD, int di, int dj, struct s_strvec *sx, int xi);
-void sdiaad_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj);
-void sdiaad_libsp(int kmax, int *idx, float alpha, float *x, float *pD, int sdd);
-void sdiaad_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj);
-void sdiaadin_libsp(int kmax, int *idx, float alpha, float *x, float *y, float *pD, int sdd);
-void sdiaadin_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, int *idx, struct s_strmat *sD, int di, int dj);
-void srowin_lib(int kmax, float alpha, float *x, float *pD);
-void srowin_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj);
-void srowex_lib(int kmax, float alpha, float *pD, float *x);
-void srowex_libstr(int kmax, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi);
-void srowad_lib(int kmax, float alpha, float *x, float *pD);
-void srowad_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj);
-void srowin_libsp(int kmax, float alpha, int *idx, float *x, float *pD);
-void srowad_libsp(int kmax, int *idx, float alpha, float *x, float *pD);
-void srowad_sp_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strmat *sD, int di, int dj);
-void srowadin_libsp(int kmax, int *idx, float alpha, float *x, float *y, float *pD);
-void srowsw_lib(int kmax, float *pA, float *pC);
-void srowsw_libstr(int kmax, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj);
-void srowpe_libstr(int kmax, int *ipiv, struct s_strmat *sA);
-void scolin_lib(int kmax, float *x, int offset, float *pD, int sdd);
-void scolin_libstr(int kmax, struct s_strvec *sx, int xi, struct s_strmat *sA, int ai, int aj);
-void scolad_lib(int kmax, float alpha, float *x, int offset, float *pD, int sdd);
-void scolin_libsp(int kmax, int *idx, float *x, float *pD, int sdd);
-void scolad_libsp(int kmax, float alpha, int *idx, float *x, float *pD, int sdd);
-void scolsw_lib(int kmax, int offsetA, float *pA, int sda, int offsetC, float *pC, int sdc);
-void scolsw_libstr(int kmax, struct s_strmat *sA, int ai, int aj, struct s_strmat *sC, int ci, int cj);
-void scolpe_libstr(int kmax, int *ipiv, struct s_strmat *sA);
-void svecin_libsp(int kmax, int *idx, float *x, float *y);
-void svecad_libsp(int kmax, int *idx, float alpha, float *x, float *y);
-void svecad_sp_libstr(int m, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strvec *sz, int zi);
-void svecin_sp_libstr(int m, float alpha, struct s_strvec *sx, int xi, int *idx, struct s_strvec *sz, int zi);
-void svecex_sp_libstr(int m, float alpha, int *idx, struct s_strvec *sx, int x, struct s_strvec *sz, int zi);
-void sveccl_libstr(int m, struct s_strvec *sxm, int xim, struct s_strvec *sx, int xi, struct s_strvec *sxp, int xip, struct s_strvec *sz, int zi);
-void sveccl_mask_libstr(int m, struct s_strvec *sxm, int xim, struct s_strvec *sx, int xi, struct s_strvec *sxp, int xip, struct s_strvec *sz, int zi, struct s_strvec *sm, int mi);
-void svecze_libstr(int m, struct s_strvec *sm, int mi, struct s_strvec *sv, int vi, struct s_strvec *se, int ei);
-void svecnrm_inf_libstr(int m, struct s_strvec *sx, int xi, float *ptr_norm);
-
-
-
-#ifdef __cplusplus
-}
-#endif
-
diff --git a/third_party/blasfeo/include/blasfeo_s_aux_ext_dep.h b/third_party/blasfeo/include/blasfeo_s_aux_ext_dep.h
deleted file mode 100644
index 2b9f9d4..0000000
--- a/third_party/blasfeo/include/blasfeo_s_aux_ext_dep.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(EXT_DEP)
-
-
-
-#include <stdio.h>
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-/************************************************
-* d_aux_extern_depend_lib.c
-************************************************/
-
-/* column-major matrices */
-
-// dynamically allocate row*col floats of memory and set accordingly a pointer to float; set allocated memory to zero
-void s_zeros(float **pA, int row, int col);
-// dynamically allocate row*col floats of memory aligned to 64-byte boundaries and set accordingly a pointer to float; set allocated memory to zero
-void s_zeros_align(float **pA, int row, int col);
-// dynamically allocate size bytes of memory aligned to 64-byte boundaries and set accordingly a pointer to float; set allocated memory to zero
-void s_zeros_align_bytes(float **pA, int size);
-// free the memory allocated by d_zeros
-void s_free(float *pA);
-// free the memory allocated by d_zeros_align or d_zeros_align_bytes
-void s_free_align(float *pA);
-// print a column-major matrix
-void s_print_mat(int m, int n, float *A, int lda);
-// print the transposed of a column-major matrix
-void s_print_tran_mat(int row, int col, float *A, int lda);
-// print to file a column-major matrix
-void s_print_to_file_mat(FILE *file, int row, int col, float *A, int lda);
-// print to file the transposed of a column-major matrix
-void s_print_tran_to_file_mat(FILE *file, int row, int col, float *A, int lda);
-// print in exponential notation a column-major matrix
-void s_print_e_mat(int m, int n, float *A, int lda);
-// print in exponential notation the transposed of a column-major matrix
-void s_print_e_tran_mat(int row, int col, float *A, int lda);
-
-/* strmat and strvec */
-
-#ifdef BLASFEO_COMMON
-// create a strmat for a matrix of size m*n by dynamically allocating memory
-void s_allocate_strmat(int m, int n, struct s_strmat *sA);
-// create a strvec for a vector of size m by dynamically allocating memory
-void s_allocate_strvec(int m, struct s_strvec *sa);
-// free the memory allocated by d_allocate_strmat
-void s_free_strmat(struct s_strmat *sA);
-// free the memory allocated by d_allocate_strvec
-void s_free_strvec(struct s_strvec *sa);
-// print a strmat
-void s_print_strmat(int m, int n, struct s_strmat *sA, int ai, int aj);
-// print in exponential notation a strmat
-void s_print_e_strmat(int m, int n, struct s_strmat *sA, int ai, int aj);
-// print to file a strmat
-void s_print_to_file_strmat(FILE *file, int m, int n, struct s_strmat *sA, int ai, int aj);
-// print a strvec
-void s_print_strvec(int m, struct s_strvec *sa, int ai);
-// print in exponential notation a strvec
-void s_print_e_strvec(int m, struct s_strvec *sa, int ai);
-// print to file a strvec
-void s_print_to_file_strvec(FILE *file, int m, struct s_strvec *sa, int ai);
-// print the transposed of a strvec
-void s_print_tran_strvec(int m, struct s_strvec *sa, int ai);
-// print in exponential notation the transposed of a strvec
-void s_print_e_tran_strvec(int m, struct s_strvec *sa, int ai);
-// print to file the transposed of a strvec
-void s_print_tran_to_file_strvec(FILE *file, int m, struct s_strvec *sa, int ai);
-#endif
-
-
-
-#ifdef __cplusplus
-}
-#endif
-
-
-
-#endif // EXT_DEP
diff --git a/third_party/blasfeo/include/blasfeo_s_blas.h b/third_party/blasfeo/include/blasfeo_s_blas.h
deleted file mode 100644
index a0170a5..0000000
--- a/third_party/blasfeo/include/blasfeo_s_blas.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-//
-// level 1 BLAS
-//
-
-// y = y + alpha*x
-void saxpy_libstr(int kmax, float alpha, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi);
-// z = x .* y, return sum(z) = x^T * y
-float svecmuldot_libstr(int m, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi);
-// return x^T * y
-float sdot_libstr(int m, struct s_strvec *sx, int xi, struct s_strvec *sy, int yi);
-
-
-
-//
-// level 2 BLAS
-//
-
-// dense
-
-// z <= beta * y + alpha * A * x
-void sgemv_n_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, float beta, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi);
-// z <= beta * y + alpha * A' * x
-void sgemv_t_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, float beta, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi);
-// y <= inv( A ) * x, A (m)x(n)
-void strsv_lnn_mn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// y <= inv( A' ) * x, A (m)x(n)
-void strsv_ltn_mn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// y <= inv( A ) * x, A (m)x(m) lower, not_transposed, not_unit
-void strsv_lnn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// y <= inv( A ) * x, A (m)x(m) lower, not_transposed, unit
-void strsv_lnu_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// y <= inv( A' ) * x, A (m)x(m) lower, transposed, not_unit
-void strsv_ltn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// y <= inv( A' ) * x, A (m)x(m) lower, transposed, unit
-void strsv_ltu_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// y <= inv( A' ) * x, A (m)x(m) upper, not_transposed, not_unit
-void strsv_unn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// y <= inv( A' ) * x, A (m)x(m) upper, transposed, not_unit
-void strsv_utn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// z <= beta * y + alpha * A * x ; A upper triangular
-void strmv_unn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// z <= A' * x ; A upper triangular
-void strmv_utn_libstr(int m, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// z <= A * x ; A lower triangular
-void strmv_lnn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// z <= A' * x ; A lower triangular
-void strmv_ltn_libstr(int m, int n, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, struct s_strvec *sz, int zi);
-// z_n <= beta_n * y_n + alpha_n * A  * x_n
-// z_t <= beta_t * y_t + alpha_t * A' * x_t
-void sgemv_nt_libstr(int m, int n, float alpha_n, float alpha_t, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx_n, int xi_n, struct s_strvec *sx_t, int xi_t, float beta_n, float beta_t, struct s_strvec *sy_n, int yi_n, struct s_strvec *sy_t, int yi_t, struct s_strvec *sz_n, int zi_n, struct s_strvec *sz_t, int zi_t);
-// z <= beta * y + alpha * A * x, where A is symmetric and only the lower triangular patr of A is accessed
-void ssymv_l_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sx, int xi, float beta, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi);
-
-// diagonal
-
-// z <= beta * y + alpha * A * x, A diagonal
-void sgemv_diag_libstr(int m, float alpha, struct s_strvec *sA, int ai, struct s_strvec *sx, int xi, float beta, struct s_strvec *sy, int yi, struct s_strvec *sz, int zi);
-
-
-
-//
-// level 3 BLAS
-//
-
-// dense
-
-// D <= beta * C + alpha * A * B^T
-void sgemm_nt_libstr(int m, int n, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj);
-// D <= beta * C + alpha * A * B
-void sgemm_nn_libstr(int m, int n, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj);
-// D <= beta * C + alpha * A * B^T ; C, D lower triangular
-void ssyrk_ln_libstr(int m, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj);
-void ssyrk_ln_mn_libstr(int m, int n, int k, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj);
-// D <= alpha * B * A^T ; B upper triangular
-void strmm_rutn_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj);
-// D <= alpha * B * A ; A lower triangular
-void strmm_rlnn_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj);
-// D <= alpha * B * A^{-T} , with A lower triangular employing explicit inverse of diagonal
-void strsm_rltn_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj);
-// D <= alpha * B * A^{-T} , with A lower triangular with unit diagonal
-void strsm_rltu_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj);
-// D <= alpha * B * A^{-T} , with A upper triangular employing explicit inverse of diagonal
-void strsm_rutn_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj);
-// D <= alpha * A^{-1} * B , with A lower triangular with unit diagonal
-void strsm_llnu_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj);
-// D <= alpha * A^{-1} * B , with A upper triangular employing explicit inverse of diagonal
-void strsm_lunn_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sD, int di, int dj);
-
-// diagonal
-
-// D <= alpha * A * B + beta * C, with A diagonal (stored as strvec)
-void sgemm_diag_left_ib(int m, int n, float alpha, float *dA, float *pB, int sdb, float beta, float *pC, int sdc, float *pD, int sdd);
-void sgemm_l_diag_libstr(int m, int n, float alpha, struct s_strvec *sA, int ai, struct s_strmat *sB, int bi, int bj, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj);
-// D <= alpha * A * B + beta * C, with B diagonal (stored as strvec)
-void sgemm_r_diag_libstr(int m, int n, float alpha, struct s_strmat *sA, int ai, int aj, struct s_strvec *sB, int bi, float beta, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj);
-
-
-
-//
-// LAPACK
-//
-
-// D <= chol( C ) ; C, D lower triangular
-void spotrf_l_libstr(int m, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj);
-void spotrf_l_mn_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj);
-// D <= chol( C + A * B' ) ; C, D lower triangular
-void ssyrk_spotrf_ln_libstr(int m, int n, int k, struct s_strmat *sA, int ai, int aj, struct s_strmat *sB, int bi, int bj, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj);
-// D <= lu( C ) ; no pivoting
-void sgetrf_nopivot_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj);
-// D <= lu( C ) ; pivoting
-void sgetrf_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj, int *ipiv);
-// D <= qr( C )
-void sgeqrf_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj, void *work);
-int sgeqrf_work_size_libstr(int m, int n); // in bytes
-// D <= lq( C )
-void sgelqf_libstr(int m, int n, struct s_strmat *sC, int ci, int cj, struct s_strmat *sD, int di, int dj, void *work);
-int sgelqf_work_size_libstr(int m, int n); // in bytes
-
-
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/include/blasfeo_s_kernel.h b/third_party/blasfeo/include/blasfeo_s_kernel.h
deleted file mode 100644
index c0dc2b0..0000000
--- a/third_party/blasfeo/include/blasfeo_s_kernel.h
+++ /dev/null
@@ -1,355 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-//
-// lib8
-//
-
-// 24x4
-void kernel_sgemm_nt_24x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_sgemm_nt_24x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-void kernel_sgemm_nt_24x4_gen_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_sgemm_nn_24x4_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_sgemm_nn_24x4_vs_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-void kernel_sgemm_nn_24x4_gen_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_ssyrk_nt_l_24x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_ssyrk_nt_l_24x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-void kernel_ssyrk_nt_l_20x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_ssyrk_nt_l_20x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-void kernel_spotrf_nt_l_24x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-void kernel_spotrf_nt_l_24x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-void kernel_spotrf_nt_l_20x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-void kernel_spotrf_nt_l_20x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-void kernel_strsm_nt_rl_inv_24x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E);
-void kernel_strsm_nt_rl_inv_24x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E, int km, int kn);
-void kernel_sgemm_strsm_nt_rl_inv_24x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E);
-void kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_20x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-void kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_24x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-void kernel_strmm_nn_rl_24x4_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *D, int sdd);
-void kernel_strmm_nn_rl_24x4_vs_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *D, int sdd, int km, int kn);
-
-// 16x4
-void kernel_sgemm_nt_16x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_sgemm_nt_16x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-void kernel_sgemm_nt_16x4_gen_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_sgemm_nn_16x4_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_sgemm_nn_16x4_vs_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-void kernel_sgemm_nn_16x4_gen_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_ssyrk_nt_l_16x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_ssyrk_nt_l_16x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-void kernel_ssyrk_nt_l_12x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_ssyrk_nt_l_12x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-void kernel_spotrf_nt_l_16x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-void kernel_spotrf_nt_l_16x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-void kernel_spotrf_nt_l_12x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-void kernel_spotrf_nt_l_12x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-void kernel_strsm_nt_rl_inv_16x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E);
-void kernel_strsm_nt_rl_inv_16x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E, int km, int kn);
-void kernel_sgemm_strsm_nt_rl_inv_16x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E);
-void kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_12x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-void kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_16x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km_, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-void kernel_strmm_nn_rl_16x4_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *D, int sdd);
-void kernel_strmm_nn_rl_16x4_vs_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *D, int sdd, int km, int kn);
-void kernel_strmm_nn_rl_16x4_gen_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-// 8x8
-void kernel_sgemm_nt_8x8_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-void kernel_sgemm_nt_8x8_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-void kernel_sgemm_nt_8x8_gen_lib8(int k, float *alpha, float *A, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_sgemm_nn_8x8_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D);
-void kernel_sgemm_nn_8x8_vs_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D, int km, int kn);
-void kernel_sgemm_nn_8x8_gen_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_ssyrk_nt_l_8x8_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-void kernel_ssyrk_nt_l_8x8_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-void kernel_spotrf_nt_l_8x8_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D);
-void kernel_spotrf_nt_l_8x8_vs_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D, int km, int kn);
-void kernel_strsm_nt_rl_inv_8x8_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E);
-void kernel_strsm_nt_rl_inv_8x8_vs_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-void kernel_sgemm_strsm_nt_rl_inv_8x8_lib8(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E);
-void kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *inv_diag_D, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_8x8_lib8(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *inv_diag_D);
-
-// 8x4
-void kernel_sgemm_nt_8x4_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-void kernel_sgemm_nt_8x4_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-void kernel_sgemm_nt_8x4_gen_lib8(int k, float *alpha, float *A, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_sgemm_nn_8x4_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D);
-void kernel_sgemm_nn_8x4_vs_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D, int km, int kn);
-void kernel_sgemm_nn_8x4_gen_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-//void kernel_ssyrk_nt_l_8x4_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-void kernel_ssyrk_nt_l_8x4_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-void kernel_spotrf_nt_l_8x4_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D);
-void kernel_spotrf_nt_l_8x4_vs_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D, int km, int kn);
-void kernel_strsm_nt_rl_inv_8x4_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E);
-void kernel_strsm_nt_rl_inv_8x4_vs_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-void kernel_sgemm_strsm_nt_rl_inv_8x4_lib8(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E);
-void kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *inv_diag_D, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_8x4_lib8(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *inv_diag_D);
-void kernel_strmm_nn_rl_8x4_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *D);
-void kernel_strmm_nn_rl_8x4_vs_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *D, int km, int kn);
-void kernel_strmm_nn_rl_8x4_gen_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-// 4x8
-void kernel_sgemm_nt_4x8_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-void kernel_sgemm_nt_4x8_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-void kernel_sgemm_nt_4x8_gen_lib8(int k, float *alpha, float *A, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_strsm_nt_rl_inv_4x8_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E);
-void kernel_strsm_nt_rl_inv_4x8_vs_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-
-// 8
-void kernel_sgemv_n_8_lib8(int k, float *alpha, float *A, float *x, float *beta, float *y, float *z);
-void kernel_sgemv_n_8_vs_lib8(int k, float *alpha, float *A, float *x, float *beta, float *y, float *z, int k1);
-void kernel_sgemv_n_8_gen_lib8(int kmax, float *alpha, float *A, float *x, float *beta, float *y, float *z, int k0, int k1);
-void kernel_sgemv_t_8_lib8(int k, float *alpha, float *A, int sda, float *x, float *beta, float *y, float *z);
-void kernel_sgemv_t_8_vs_lib8(int k, float *alpha, float *A, int sda, float *x, float *beta, float *y, float *z, int k1);
-void kernel_sgemv_t_8_gen_lib8(int k, float *alpha, int offA, float *A, int sda, float *x, float *beta, float *C, float *D, int km);
-void kernel_sgemv_t_4_lib8(int k, float *alpha, float *A, int sda, float *x, float *beta, float *y, float *z);
-void kernel_sgemv_t_4_vs_lib8(int k, float *alpha, float *A, int sda, float *x, float *beta, float *y, float *z, int k1);
-void kernel_sgemv_t_4_gen_lib8(int k, float *alpha, int offA, float *A, int sda, float *x, float *beta, float *C, float *D, int km);
-void kernel_strsv_ln_inv_8_lib8(int k, float *A, float *inv_diag_A, float *x, float *y, float *z);
-void kernel_strsv_ln_inv_8_vs_lib8(int k, float *A, float *inv_diag_A, float *x, float *y, float *z, int km, int kn);
-void kernel_strsv_lt_inv_8_lib8(int k, float *A, int sda, float *inv_diag_A, float *x, float *y, float *z);
-void kernel_strsv_lt_inv_8_vs_lib8(int k, float *A, int sda, float *inv_diag_A, float *x, float *y, float *z, int km, int kn);
-void kernel_sgemv_nt_4_lib8(int kmax, float *alpha_n, float *alpha_t, float *A, int sda, float *x_n, float *x_t, float *beta_t, float *y_t, float *z_n, float *z_t);
-void kernel_sgemv_nt_4_vs_lib8(int kmax, float *alpha_n, float *alpha_t, float *A, int sda, float *x_n, float *x_t, float *beta_t, float *y_t, float *z_n, float *z_t, int km);
-void kernel_ssymv_l_4l_lib8(int kmax, float *alpha, float *A, int sda, float *x, float *z);
-void kernel_ssymv_l_4r_lib8(int kmax, float *alpha, float *A, int sda, float *x, float *z);
-void kernel_ssymv_l_4l_gen_lib8(int kmax, float *alpha, int offA, float *A, int sda, float *x, float *z, int km);
-void kernel_ssymv_l_4r_gen_lib8(int kmax, float *alpha, int offA, float *A, int sda, float *x, float *z, int km);
-
-// aux
-void kernel_sgecp_8_0_lib8(int m, float *A, float *B);
-void kernel_sgecp_8_0_gen_lib8(int m, float *A, float *B, int m1);
-void kernel_sgecp_8_1_lib8(int m, float *A, int sda, float *B);
-void kernel_sgecp_8_1_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgecp_8_2_lib8(int m, float *A, int sda, float *B);
-void kernel_sgecp_8_2_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgecp_8_3_lib8(int m, float *A, int sda, float *B);
-void kernel_sgecp_8_3_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgecp_8_4_lib8(int m, float *A, int sda, float *B);
-void kernel_sgecp_8_4_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgecp_8_5_lib8(int m, float *A, int sda, float *B);
-void kernel_sgecp_8_5_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgecp_8_6_lib8(int m, float *A, int sda, float *B);
-void kernel_sgecp_8_6_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgecp_8_7_lib8(int m, float *A, int sda, float *B);
-void kernel_sgecp_8_7_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgesc_8_lib8(int m, float *alpha, float *A);
-void kernel_sgesc_8_gen_lib8(int m, float *alpha, float *A, int m1);
-void kernel_sgetr_8_0_lib8(int m, float *A, int sda, float *B);
-void kernel_sgetr_8_0_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgetr_8_1_lib8(int m, float *A, int sda, float *B);
-void kernel_sgetr_8_1_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgetr_8_2_lib8(int m, float *A, int sda, float *B);
-void kernel_sgetr_8_2_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgetr_8_3_lib8(int m, float *A, int sda, float *B);
-void kernel_sgetr_8_3_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgetr_8_4_lib8(int m, float *A, int sda, float *B);
-void kernel_sgetr_8_4_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgetr_8_5_lib8(int m, float *A, int sda, float *B);
-void kernel_sgetr_8_5_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgetr_8_6_lib8(int m, float *A, int sda, float *B);
-void kernel_sgetr_8_6_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgetr_8_7_lib8(int m, float *A, int sda, float *B);
-void kernel_sgetr_8_7_gen_lib8(int m, float *A, int sda, float *B, int m1);
-void kernel_sgead_8_0_lib8(int m, float *alpha, float *A, float *B);
-void kernel_sgead_8_0_gen_lib8(int m, float *alpha, float *A, float *B, int m1);
-void kernel_sgead_8_1_lib8(int m, float *alpha, float *A, int sda, float *B);
-void kernel_sgead_8_1_gen_lib8(int m, float *alpha, float *A, int sda, float *B, int m1);
-void kernel_sgead_8_2_lib8(int m, float *alpha, float *A, int sda, float *B);
-void kernel_sgead_8_2_gen_lib8(int m, float *alpha, float *A, int sda, float *B, int m1);
-void kernel_sgead_8_3_lib8(int m, float *alpha, float *A, int sda, float *B);
-void kernel_sgead_8_3_gen_lib8(int m, float *alpha, float *A, int sda, float *B, int m1);
-void kernel_sgead_8_4_lib8(int m, float *alpha, float *A, int sda, float *B);
-void kernel_sgead_8_4_gen_lib8(int m, float *alpha, float *A, int sda, float *B, int m1);
-void kernel_sgead_8_5_lib8(int m, float *alpha, float *A, int sda, float *B);
-void kernel_sgead_8_5_gen_lib8(int m, float *alpha, float *A, int sda, float *B, int m1);
-void kernel_sgead_8_6_lib8(int m, float *alpha, float *A, int sda, float *B);
-void kernel_sgead_8_6_gen_lib8(int m, float *alpha, float *A, int sda, float *B, int m1);
-void kernel_sgead_8_7_lib8(int m, float *alpha, float *A, int sda, float *B);
-void kernel_sgead_8_7_gen_lib8(int m, float *alpha, float *A, int sda, float *B, int m1);
-
-
-//
-// lib4
-//
-
-
-
-// level 2 BLAS
-// 4
-void kernel_sgemv_n_4_lib4(int k, float *alpha, float *A, float *x, float *beta, float *y, float *z);
-void kernel_sgemv_n_4_vs_lib4(int k, float *alpha, float *A, float *x, float *beta, float *y, float *z, int k1);
-void kernel_sgemv_n_4_gen_lib4(int kmax, float *alpha, float *A, float *x, float *beta, float *y, float *z, int k0, int k1);
-void kernel_sgemv_t_4_lib4(int k, float *alpha, float *A, int sda, float *x, float *beta, float *y, float *z);
-void kernel_sgemv_t_4_vs_lib4(int k, float *alpha, float *A, int sda, float *x, float *beta, float *y, float *z, int k1);
-void kernel_sgemv_t_4_gen_lib4(int k, float *alpha, int offA, float *A, int sda, float *x, float *beta, float *C, float *D, int km);
-void kernel_strsv_ln_inv_4_lib4(int k, float *A, float *inv_diag_A, float *x, float *y, float *z);
-void kernel_strsv_ln_inv_4_vs_lib4(int k, float *A, float *inv_diag_A, float *x, float *y, float *z, int km, int kn);
-void kernel_strsv_lt_inv_4_lib4(int k, float *A, int sda, float *inv_diag_A, float *x, float *y, float *z);
-void kernel_strsv_lt_inv_3_lib4(int k, float *A, int sda, float *inv_diag_A, float *x, float *y, float *z);
-void kernel_strsv_lt_inv_2_lib4(int k, float *A, int sda, float *inv_diag_A, float *x, float *y, float *z);
-void kernel_strsv_lt_inv_1_lib4(int k, float *A, int sda, float *inv_diag_A, float *x, float *y, float *z);
-void kernel_strmv_un_4_lib4(int k, float *A, float *x, float *z);
-void kernel_strmv_ut_4_lib4(int k, float *A, int sda, float *x, float *z);
-void kernel_strmv_ut_4_vs_lib4(int k, float *A, int sda, float *x, float *z, int km);
-void kernel_sgemv_nt_6_lib4(int kmax, float *alpha_n, float *alpha_t, float *A, int sda, float *x_n, float *x_t, float *beta_t, float *y_t, float *z_n, float *z_t);
-void kernel_sgemv_nt_4_lib4(int kmax, float *alpha_n, float *alpha_t, float *A, int sda, float *x_n, float *x_t, float *beta_t, float *y_t, float *z_n, float *z_t);
-void kernel_sgemv_nt_4_vs_lib4(int kmax, float *alpha_n, float *alpha_t, float *A, int sda, float *x_n, float *x_t, float *beta_t, float *y_t, float *z_n, float *z_t, int km);
-void kernel_ssymv_l_4_lib4(int kmax, float *alpha, float *A, int sda, float *x_n, float *z_n);
-void kernel_ssymv_l_4_gen_lib4(int kmax, float *alpha, int offA, float *A, int sda, float *x_n, float *z_n, int km);
-
-
-
-// level 3 BLAS
-// 12x4
-void kernel_sgemm_nt_12x4_lib4(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd); //
-// 8x8
-void kernel_sgemm_nt_8x8_lib4(int k, float *alpha, float *A, int sda, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd); //
-// 8x4
-void kernel_sgemm_nt_8x4_lib4(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd); //
-// 4x4
-void kernel_sgemm_nt_4x4_lib4(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D); //
-void kernel_sgemm_nt_4x4_vs_lib4(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn); //
-void kernel_sgemm_nt_4x4_gen_lib4(int k, float *alpha, float *A, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int k0, int k1);
-void kernel_sgemm_nn_4x4_lib4(int k, float *alpha, float *A, float *B, int sdb, float *beta, float *C, float *D); //
-void kernel_sgemm_nn_4x4_vs_lib4(int k, float *alpha, float *A, float *B, int sdb, float *beta, float *C, float *D, int km, int kn); //
-void kernel_ssyrk_nt_l_4x4_lib4(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D); //
-void kernel_ssyrk_nt_l_4x4_vs_lib4(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn); //
-void kernel_strmm_nt_ru_4x4_lib4(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D); //
-void kernel_strmm_nt_ru_4x4_vs_lib4(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn); //
-void kernel_strmm_nn_rl_4x4_lib4(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *D);
-void kernel_strmm_nn_rl_4x4_gen_lib4(int k, float *alpha, float *A, int offsetB, float *B, int sdb, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-void kernel_strsm_nt_rl_inv_4x4_lib4(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E);
-void kernel_strsm_nt_rl_inv_4x4_vs_lib4(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-void kernel_strsm_nt_rl_one_4x4_lib4(int k, float *A, float *B, float *C, float *D, float *E);
-void kernel_strsm_nt_rl_one_4x4_vs_lib4(int k, float *A, float *B, float *C, float *D, float *E, int km, int kn);
-void kernel_strsm_nt_ru_inv_4x4_lib4(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E);
-void kernel_strsm_nt_ru_inv_4x4_vs_lib4(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-void kernel_strsm_nn_ru_inv_4x4_lib4(int k, float *A, float *B, int sdb, float *C, float *D, float *E, float *inv_diag_E);
-void kernel_strsm_nn_ru_inv_4x4_vs_lib4(int k, float *A, float *B, int sdb, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-void kernel_strsm_nn_ll_one_4x4_lib4(int k, float *A, float *B, int sdb, float *C, float *D, float *E);
-void kernel_strsm_nn_ll_one_4x4_vs_lib4(int k, float *A, float *B, int sdb, float *C, float *D, float *E, int km, int kn);
-void kernel_strsm_nn_lu_inv_4x4_lib4(int kmax, float *A, float *B, int sdb, float *C, float *D, float *E, float *inv_diag_E);
-void kernel_strsm_nn_lu_inv_4x4_vs_lib4(int kmax, float *A, float *B, int sdb, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-// diag
-void kernel_sgemm_diag_right_4_a0_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *D, int sdd);
-void kernel_sgemm_diag_right_4_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_sgemm_diag_right_3_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_sgemm_diag_right_2_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_sgemm_diag_right_1_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-void kernel_sgemm_diag_left_4_a0_lib4(int kmax, float *alpha, float *A, float *B, float *D);
-void kernel_sgemm_diag_left_4_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-void kernel_sgemm_diag_left_3_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-void kernel_sgemm_diag_left_2_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-void kernel_sgemm_diag_left_1_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-
-
-// LAPACK
-// 4x4
-void kernel_spotrf_nt_l_4x4_lib4(int k, float *A, float *B, float *C, float *D, float *inv_diag_D);
-void kernel_spotrf_nt_l_4x4_vs_lib4(int k, float *A, float *B, float *C, float *D, float *inv_diag_D, int km, int kn);
-void kernel_sgetrf_nn_4x4_lib4(int k, float *A, float *B, int sdb, float *C, float *D, float *inv_diag_D);
-void kernel_sgetrf_nn_4x4_vs_lib4(int k, float *A, float *B, int sdb, float *C, float *D, float *inv_diag_D, int km, int kn);
-void kernel_sgetrf_pivot_4_lib4(int m, float *pA, int sda, float *inv_diag_A, int* ipiv);
-void kernel_sgetrf_pivot_4_vs_lib4(int m, int n, float *pA, int sda, float *inv_diag_A, int* ipiv);
-
-
-
-// merged routines
-// 4x4
-void kernel_sgemm_strsm_nt_rl_inv_4x4_lib4(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E);
-void kernel_sgemm_strsm_nt_rl_inv_4x4_vs_lib4(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_4x4_vs_lib4(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *inv_diag_D, int km, int kn);
-void kernel_ssyrk_spotrf_nt_l_4x4_lib4(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *inv_diag_D);
-
-
-
-// auxiliary routines
-void kernel_sgesc_4_lib4(int kmax, float *alpha, float *A);
-void kernel_sgesc_3_lib4(int kmax, float *alpha, float *A);
-void kernel_sgesc_2_lib4(int kmax, float *alpha, float *A);
-void kernel_sgesc_1_lib4(int kmax, float *alpha, float *A);
-void kernel_sgecp_4_0_lib4(int kmax, float *A, float *B);
-void kernel_sgecp_4_1_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_sgecp_4_2_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_sgecp_4_3_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_sgecp_3_0_lib4(int kmax, float *A, float *B);
-void kernel_sgecp_3_2_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_sgecp_3_3_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_sgecp_2_0_lib4(int kmax, float *A, float *B);
-void kernel_sgecp_2_3_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_sgecp_1_0_lib4(int kmax, float *A, float *B);
-void kernel_strcp_l_4_0_lib4(int kmax, float *A, float *B);
-void kernel_strcp_l_4_1_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_strcp_l_4_2_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_strcp_l_4_3_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_strcp_l_3_0_lib4(int kmax, float *A, float *B);
-void kernel_strcp_l_3_2_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_strcp_l_3_3_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_strcp_l_2_0_lib4(int kmax, float *A, float *B);
-void kernel_strcp_l_2_3_lib4(int kmax, float *A0, int sda, float *B);
-void kernel_strcp_l_1_0_lib4(int kmax, float *A, float *B);
-void kernel_sgead_4_0_lib4(int kmax, float *alpha, float *A, float *B);
-void kernel_sgead_4_1_lib4(int kmax, float *alpha, float *A0, int sda, float *B);
-void kernel_sgead_4_2_lib4(int kmax, float *alpha, float *A0, int sda, float *B);
-void kernel_sgead_4_3_lib4(int kmax, float *alpha, float *A0, int sda, float *B);
-void kernel_sgead_3_0_lib4(int kmax, float *alpha, float *A, float *B);
-void kernel_sgead_3_2_lib4(int kmax, float *alpha, float *A0, int sda, float *B);
-void kernel_sgead_3_3_lib4(int kmax, float *alpha, float *A0, int sda, float *B);
-void kernel_sgead_2_0_lib4(int kmax, float *alpha, float *A, float *B);
-void kernel_sgead_2_3_lib4(int kmax, float *alpha, float *A0, int sda, float *B);
-void kernel_sgead_1_0_lib4(int kmax, float *alpha, float *A, float *B);
-// TODO
-void kernel_sgeset_4_lib4(int kmax, float alpha, float *A);
-void kernel_strset_4_lib4(int kmax, float alpha, float *A);
-void kernel_sgetr_4_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc);
-void kernel_sgetr_3_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc);
-void kernel_sgetr_2_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc);
-void kernel_sgetr_1_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc);
-
-
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/third_party/blasfeo/include/blasfeo_target.h b/third_party/blasfeo/include/blasfeo_target.h
deleted file mode 100644
index f0022ea..0000000
--- a/third_party/blasfeo/include/blasfeo_target.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef TARGET_X64_INTEL_HASWELL
-#define TARGET_X64_INTEL_HASWELL
-#endif
-#ifndef LA_HIGH_PERFORMANCE
-#define LA_HIGH_PERFORMANCE
-#endif
-#ifndef EXT_DEP
-#define EXT_DEP
-#endif
diff --git a/third_party/blasfeo/include/blasfeo_v_aux_ext_dep.h b/third_party/blasfeo/include/blasfeo_v_aux_ext_dep.h
deleted file mode 100644
index 2555fab..0000000
--- a/third_party/blasfeo/include/blasfeo_v_aux_ext_dep.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(EXT_DEP)
-
-
-
-#include <stdio.h>
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-/************************************************
-* d_aux_extern_depend_lib.c
-************************************************/
-
-void v_zeros(void **ptrA, int size);
-// dynamically allocate size bytes of memory aligned to 64-byte boundaries and set accordingly a pointer to void; set allocated memory to zero
-void v_zeros_align(void **ptrA, int size);
-// free the memory allocated by v_zeros
-void v_free(void *ptrA);
-// free the memory allocated by v_zeros_aligned
-void v_free_align(void *ptrA);
-// dynamically allocate size bytes of memory and set accordingly a pointer to char; set allocated memory to zero
-void c_zeros(char **ptrA, int size);
-// dynamically allocate size bytes of memory aligned to 64-byte boundaries and set accordingly a pointer to char; set allocated memory to zero
-void c_zeros_align(char **ptrA, int size);
-// free the memory allocated by c_zeros
-void c_free(char *ptrA);
-// free the memory allocated by c_zeros_aligned
-void c_free_align(char *ptrA);
-
-
-
-#ifdef __cplusplus
-}
-#endif
-
-
-
-#endif // EXT_DEP
diff --git a/third_party/blasfeo/kernel/Makefile b/third_party/blasfeo/kernel/Makefile
deleted file mode 100644
index 60e1f31..0000000
--- a/third_party/blasfeo/kernel/Makefile
+++ /dev/null
@@ -1,75 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../Makefile.rule
-
-obj:
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-	( cd avx2; $(MAKE) obj)
-	( cd avx; $(MAKE) obj)
-	( cd c99; $(MAKE) obj)
-endif
-
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-	( cd avx; $(MAKE) obj)
-	( cd c99; $(MAKE) obj)
-endif
-
-ifeq ($(TARGET), X64_INTEL_CORE)
-	( cd sse3; $(MAKE) obj)
-	( cd c99; $(MAKE) obj)
-endif
-
-ifeq ($(TARGET), X64_AMD_BULLDOZER)
-	( cd fma; $(MAKE) obj)
-	( cd c99; $(MAKE) obj)
-endif
-
-ifeq ($(TARGET), ARMV8A_ARM_CORTEX_A57)
-	( cd armv8a; $(MAKE) obj)
-	( cd c99; $(MAKE) obj)
-endif
-
-ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
-	( cd armv7a; $(MAKE) obj)
-	( cd c99; $(MAKE) obj)
-endif
-
-ifeq ($(TARGET), GENERIC)
-	( cd c99; $(MAKE) obj)
-endif
-
-clean:
-	make -C avx2 clean
-	make -C avx clean
-	make -C sse3 clean
-	make -C fma clean
-	make -C armv8a clean
-	make -C armv7a clean
-	make -C c99 clean
-
diff --git a/third_party/blasfeo/kernel/armv7a/Makefile b/third_party/blasfeo/kernel/armv7a/Makefile
deleted file mode 100644
index 4cb59a7..0000000
--- a/third_party/blasfeo/kernel/armv7a/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../../Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
-OBJS += kernel_dgemm_4x4_lib4.o
-OBJS += kernel_sgemm_12x4_lib4.o kernel_sgemm_8x4_lib4.o kernel_sgemm_4x4_lib4.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
-	rm -f *.s
-
diff --git a/third_party/blasfeo/kernel/armv7a/kernel_dgemm_4x4_lib4.S b/third_party/blasfeo/kernel/armv7a/kernel_dgemm_4x4_lib4.S
deleted file mode 100644
index 86aee4f..0000000
--- a/third_party/blasfeo/kernel/armv7a/kernel_dgemm_4x4_lib4.S
+++ /dev/null
@@ -1,3223 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nt_4x4_lib4, %function
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nt_4x4_lib4:
-#endif
-#endif
-
-	// early return
-	cmp		r4, #0
-	ble		2f // return
-
-	// prefetch
-	pld		[r5, #0]
-	pld		[r6, #0]
-
-	// preload A even
-	fldd	d16, [r5, #0]
-	fldd	d17, [r5, #8]
-	fldd	d18, [r5, #16]
-	fldd	d19, [r5, #24]
-
-	// preload B even
-	fldd	d20, [r6, #0]
-	fldd	d21, [r6, #8]
-	fldd	d22, [r6, #16]
-	fldd	d23, [r6, #24]
-
-	// preload A odd
-	fldd	d24, [r5, #32]
-	fldd	d25, [r5, #40]
-	fldd	d26, [r5, #48]
-	fldd	d27, [r5, #56]
-
-	// preload B odd
-	fldd	d28, [r6, #32]
-	fldd	d29, [r6, #40]
-	fldd	d30, [r6, #48]
-	fldd	d31, [r6, #56]
-
-	// prefetch
-	pld		[r5, #64]
-	pld		[r6, #64]
-
-	cmp		r4, #4
-	ble		0f // consider clean up loop
-
-	// main loop
-1:
-	
-	// unroll 0
-	fmacd	d0, d16, d20
-	pld		[r5, #128] // prefetch
-	fmacd	d1, d17, d20
-	pld		[r6, #128] // prefetch
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-	fldd	d20, [r6, #64] // B
-
-	fmacd	d4, d16, d21
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-	fldd	d21, [r6, #72] // B
-
-	fmacd	d8, d16, d22
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-	fldd	d22, [r6, #80] // B
-
-	fmacd	d12, d16, d23
-	fldd	d16, [r5, #64] // A
-	fmacd	d13, d17, d23
-	fldd	d17, [r5, #72] // A
-	fmacd	d14, d18, d23
-	fldd	d18, [r5, #80] // A
-	fmacd	d15, d19, d23
-	fldd	d19, [r5, #88] // A
-	fldd	d23, [r6, #88] // B
-
-	// unroll 1
-	fmacd	d0, d24, d28
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-	fldd	d28, [r6, #96] // B
-
-	fmacd	d4, d24, d29
-	fmacd	d5, d25, d29
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-	fldd	d29, [r6, #104] // B
-
-	fmacd	d8, d24, d30
-	fmacd	d9, d25, d30
-	fmacd	d10, d26, d30
-	fmacd	d11, d27, d30
-	fldd	d30, [r6, #112] // B
-
-	fmacd	d12, d24, d31
-	fldd	d24, [r5, #96] // A
-	fmacd	d13, d25, d31
-	fldd	d25, [r5, #104] // A
-	fmacd	d14, d26, d31
-	fldd	d26, [r5, #112] // A
-	fmacd	d15, d27, d31
-	fldd	d27, [r5, #120] // A
-	fldd	d31, [r6, #120] // B
-
-
-
-	// unroll 2
-	fmacd	d0, d16, d20
-	pld		[r6, #192] // prefetch
-	fmacd	d1, d17, d20
-	add		r6, r6, #128
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-	fldd	d20, [r6, #0] // B
-
-	fmacd	d4, d16, d21
-	pld		[r5, #192] // prefetch
-	fmacd	d5, d17, d21
-	add		r5, r5, #128
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-	fldd	d21, [r6, #8] // B
-
-	fmacd	d8, d16, d22
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-	fldd	d22, [r6, #16] // B
-
-	fmacd	d12, d16, d23
-	fldd	d16, [r5, #0] // A
-	fmacd	d13, d17, d23
-	fldd	d17, [r5, #8] // A
-	fmacd	d14, d18, d23
-	fldd	d18, [r5, #16] // A
-	fmacd	d15, d19, d23
-	fldd	d19, [r5, #24] // A
-	fldd	d23, [r6, #24] // B
-
-	// unroll 3
-	fmacd	d0, d24, d28
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-	fldd	d28, [r6, #32] // B
-
-	fmacd	d4, d24, d29
-	fmacd	d5, d25, d29
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-	fldd	d29, [r6, #40] // B
-
-	fmacd	d8, d24, d30
-	sub		r4, r4, #4
-	fmacd	d9, d25, d30
-	fmacd	d10, d26, d30
-	fmacd	d11, d27, d30
-	fldd	d30, [r6, #48] // B
-
-	fmacd	d12, d24, d31
-	fldd	d24, [r5, #32] // A
-	fmacd	d13, d25, d31
-	fldd	d25, [r5, #40] // A
-	fmacd	d14, d26, d31
-	fldd	d26, [r5, #48] // A
-	fmacd	d15, d27, d31
-	fldd	d27, [r5, #56] // A
-	fldd	d31, [r6, #56] // B
-
-	cmp		r4, #4
-	bgt		1b
-
-0:
-
-	cmp		r4, #3
-	ble		4f
-
-	// unroll 0
-	fmacd	d0, d16, d20
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-	fldd	d20, [r6, #64] // B
-
-	fmacd	d4, d16, d21
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-	fldd	d21, [r6, #72] // B
-
-	fmacd	d8, d16, d22
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-	fldd	d22, [r6, #80] // B
-
-	fmacd	d12, d16, d23
-	fldd	d16, [r5, #64] // A
-	fmacd	d13, d17, d23
-	fldd	d17, [r5, #72] // A
-	fmacd	d14, d18, d23
-	fldd	d18, [r5, #80] // A
-	fmacd	d15, d19, d23
-	fldd	d19, [r5, #88] // A
-	fldd	d23, [r6, #88] // B
-
-	// unroll 1
-	fmacd	d0, d24, d28
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-	fldd	d28, [r6, #96] // B
-
-	fmacd	d4, d24, d29
-	fmacd	d5, d25, d29
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-	fldd	d29, [r6, #104] // B
-
-	fmacd	d8, d24, d30
-	fmacd	d9, d25, d30
-	fmacd	d10, d26, d30
-	fmacd	d11, d27, d30
-	fldd	d30, [r6, #112] // B
-
-	fmacd	d12, d24, d31
-	fldd	d24, [r5, #96] // A
-	fmacd	d13, d25, d31
-	fldd	d25, [r5, #104] // A
-	fmacd	d14, d26, d31
-	fldd	d26, [r5, #112] // A
-	fmacd	d15, d27, d31
-	fldd	d27, [r5, #120] // A
-	fldd	d31, [r6, #120] // B
-
-	add		r5, r5, #128
-	add		r6, r6, #128
-
-	// unroll 2
-	fmacd	d0, d16, d20
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-
-	fmacd	d4, d16, d21
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-
-	fmacd	d8, d16, d22
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-
-	fmacd	d12, d16, d23
-	fmacd	d13, d17, d23
-	fmacd	d14, d18, d23
-	fmacd	d15, d19, d23
-
-	// unroll 3
-	fmacd	d0, d24, d28
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-
-	fmacd	d4, d24, d29
-	fmacd	d5, d25, d29
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-
-	fmacd	d8, d24, d30
-	fmacd	d9, d25, d30
-	fmacd	d10, d26, d30
-	fmacd	d11, d27, d30
-
-	fmacd	d12, d24, d31
-	fmacd	d13, d25, d31
-	fmacd	d14, d26, d31
-	fmacd	d15, d27, d31
-
-	sub		r4, r4, #4
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		r4, #0
-	ble		2f // return
-
-3: // clean1-up loop
-
-	fldd	d16, [r5, #0] // A
-	fldd	d17, [r5, #8] // A
-	fldd	d18, [r5, #16] // A
-	fldd	d19, [r5, #24] // A
-
-	fldd	d20, [r6, #0] // B
-	fmacd	d0, d16, d20
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-
-	fldd	d21, [r6, #8] // B
-	fmacd	d4, d16, d21
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-
-	fldd	d22, [r6, #16] // B
-	fmacd	d8, d16, d22
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-
-	fldd	d23, [r6, #24] // B
-	fmacd	d12, d16, d23
-	fmacd	d13, d17, d23
-	fmacd	d14, d18, d23
-	fmacd	d15, d19, d23
-
-	add		r5, r5, #32
-	add		r6, r6, #32
-
-	sub		r4, r4, #1
-	cmp		r4, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nt_4x4_lib4, .-inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- B
-// r7   <- 4*sdb*sizeof(double)
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nn_4x4_lib4, %function
-inner_kernel_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nn_4x4_lib4:
-#endif
-#endif
-
-	// early return
-	cmp		r4, #0
-	ble		2f // return
-
-	// prefetch
-	pld		[r5, #0]
-	pld		[r6, #0]
-	pld		[r6, #64]
-
-	// preload A even
-	fldd	d16, [r5, #0]
-	fldd	d17, [r5, #8]
-	fldd	d18, [r5, #16]
-	fldd	d19, [r5, #24]
-
-	// preload B even
-	fldd	d20, [r6, #0]
-	fldd	d21, [r6, #32]
-	fldd	d22, [r6, #64]
-	fldd	d23, [r6, #96]
-
-	// preload A odd
-	fldd	d24, [r5, #32]
-	fldd	d25, [r5, #40]
-	fldd	d26, [r5, #48]
-	fldd	d27, [r5, #56]
-
-	// preload B odd
-	fldd	d28, [r6, #8]
-	fldd	d29, [r6, #40]
-	fldd	d30, [r6, #72]
-	fldd	d31, [r6, #104]
-
-	// prefetch
-	pld		[r5, #64]
-
-	// B next
-	add		r9, r7, r6
-
-	cmp		r4, #4
-	ble		0f // consider clean up loop
-
-	// main loop
-1:
-	
-	// unroll 0
-	fmacd	d0, d16, d20
-	pld		[r5, #128] // prefetch
-	fmacd	d1, d17, d20
-	pld		[r9, #0]
-	fmacd	d2, d18, d20
-	pld		[r9, #64]
-	fmacd	d3, d19, d20
-	fldd	d20, [r6, #16] // B
-
-	fmacd	d4, d16, d21
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-	fldd	d21, [r6, #48] // B
-
-	fmacd	d8, d16, d22
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-	fldd	d22, [r6, #80] // B
-
-	fmacd	d12, d16, d23
-	fldd	d16, [r5, #64] // A
-	fmacd	d13, d17, d23
-	fldd	d17, [r5, #72] // A
-	fmacd	d14, d18, d23
-	fldd	d18, [r5, #80] // A
-	fmacd	d15, d19, d23
-	fldd	d19, [r5, #88] // A
-	fldd	d23, [r6, #112] // B
-
-	// unroll 1
-	fmacd	d0, d24, d28
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-	fldd	d28, [r6, #24] // B
-
-	fmacd	d4, d24, d29
-	fmacd	d5, d25, d29
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-	fldd	d29, [r6, #56] // B
-
-	fmacd	d8, d24, d30
-	fmacd	d9, d25, d30
-	fmacd	d10, d26, d30
-	fmacd	d11, d27, d30
-	fldd	d30, [r6, #88] // B
-
-	fmacd	d12, d24, d31
-	fldd	d24, [r5, #96] // A
-	fmacd	d13, d25, d31
-	fldd	d25, [r5, #104] // A
-	fmacd	d14, d26, d31
-	fldd	d26, [r5, #112] // A
-	fmacd	d15, d27, d31
-	fldd	d27, [r5, #120] // A
-	fldd	d31, [r6, #120] // B
-
-	// unroll 2
-	fmacd	d0, d16, d20
-	pld		[r5, #192] // prefetch
-	fmacd	d1, d17, d20
-	mov		r6, r9
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-	fldd	d20, [r6, #0] // B
-
-	fmacd	d4, d16, d21
-	add		r5, r5, #128
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-	fldd	d21, [r6, #32] // B
-
-	fmacd	d8, d16, d22
-	add		r9, r9, r7
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-	fldd	d22, [r6, #64] // B
-
-	fmacd	d12, d16, d23
-	fldd	d16, [r5, #0] // A
-	fmacd	d13, d17, d23
-	fldd	d17, [r5, #8] // A
-	fmacd	d14, d18, d23
-	fldd	d18, [r5, #16] // A
-	fmacd	d15, d19, d23
-	fldd	d19, [r5, #24] // A
-	fldd	d23, [r6, #96] // B
-
-	// unroll 3
-	fmacd	d0, d24, d28
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-	fldd	d28, [r6, #8] // B
-
-	fmacd	d4, d24, d29
-	sub		r4, r4, #4
-	fmacd	d5, d25, d29
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-	fldd	d29, [r6, #40] // B
-
-	fmacd	d8, d24, d30
-	fmacd	d9, d25, d30
-	fmacd	d10, d26, d30
-	fmacd	d11, d27, d30
-	fldd	d30, [r6, #72] // B
-
-	fmacd	d12, d24, d31
-	fldd	d24, [r5, #32] // A
-	fmacd	d13, d25, d31
-	fldd	d25, [r5, #40] // A
-	fmacd	d14, d26, d31
-	fldd	d26, [r5, #48] // A
-	fmacd	d15, d27, d31
-	fldd	d27, [r5, #56] // A
-	fldd	d31, [r6, #104] // B
-
-	cmp		r4, #4
-	bgt		1b
-
-0:
-
-	cmp		r4, #3
-	ble		4f
-
-	// unroll 0
-	fmacd	d0, d16, d20
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-	fldd	d20, [r6, #16] // B
-
-	fmacd	d4, d16, d21
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-	fldd	d21, [r6, #48] // B
-
-	fmacd	d8, d16, d22
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-	fldd	d22, [r6, #80] // B
-
-	fmacd	d12, d16, d23
-	fldd	d16, [r5, #64] // A
-	fmacd	d13, d17, d23
-	fldd	d17, [r5, #72] // A
-	fmacd	d14, d18, d23
-	fldd	d18, [r5, #80] // A
-	fmacd	d15, d19, d23
-	fldd	d19, [r5, #88] // A
-	fldd	d23, [r6, #112] // B
-
-	// unroll 1
-	fmacd	d0, d24, d28
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-	fldd	d28, [r6, #24] // B
-
-	fmacd	d4, d24, d29
-	fmacd	d5, d25, d29
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-	fldd	d29, [r6, #56] // B
-
-	fmacd	d8, d24, d30
-	fmacd	d9, d25, d30
-	fmacd	d10, d26, d30
-	fmacd	d11, d27, d30
-	fldd	d30, [r6, #88] // B
-
-	fmacd	d12, d24, d31
-	fldd	d24, [r5, #96] // A
-	fmacd	d13, d25, d31
-	fldd	d25, [r5, #104] // A
-	fmacd	d14, d26, d31
-	fldd	d26, [r5, #112] // A
-	fmacd	d15, d27, d31
-	fldd	d27, [r5, #120] // A
-	fldd	d31, [r6, #120] // B
-
-	add		r5, r5, #128
-	mov		r6, r9
-
-	// unroll 2
-	fmacd	d0, d16, d20
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-
-	fmacd	d4, d16, d21
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-
-	fmacd	d8, d16, d22
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-
-	fmacd	d12, d16, d23
-	fmacd	d13, d17, d23
-	fmacd	d14, d18, d23
-	fmacd	d15, d19, d23
-
-	// unroll 3
-	fmacd	d0, d24, d28
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-
-	fmacd	d4, d24, d29
-	fmacd	d5, d25, d29
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-
-	fmacd	d8, d24, d30
-	fmacd	d9, d25, d30
-	fmacd	d10, d26, d30
-	fmacd	d11, d27, d30
-
-	fmacd	d12, d24, d31
-	fmacd	d13, d25, d31
-	fmacd	d14, d26, d31
-	fmacd	d15, d27, d31
-
-	sub		r4, r4, #4
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		r4, #0
-	ble		2f // return
-
-3: // clean1-up loop
-
-	fldd	d16, [r5, #0] // A
-	fldd	d17, [r5, #8] // A
-	fldd	d18, [r5, #16] // A
-	fldd	d19, [r5, #24] // A
-
-	fldd	d20, [r6, #0] // B
-	fmacd	d0, d16, d20
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-
-	fldd	d21, [r6, #32] // B
-	fmacd	d4, d16, d21
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-
-	fldd	d22, [r6, #64] // B
-	fmacd	d8, d16, d22
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-
-	fldd	d23, [r6, #96] // B
-	fmacd	d12, d16, d23
-	fmacd	d13, d17, d23
-	fmacd	d14, d18, d23
-	fmacd	d15, d19, d23
-
-	add		r5, r5, #32
-	add		r6, r6, #8
-
-	sub		r4, r4, #1
-	cmp		r4, #0
-	bgt		3b
-
-2: // return
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nn_4x4_lib4, .-inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DSYRK_L_ADD_NT_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dsyrk_l_add_nt_4x4_lib4, %function
-inner_kernel_dsyrk_l_add_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dsyrk_l_add_nt_4x4_lib4:
-#endif
-#endif
-
-	// early return
-	cmp		r4, #0
-	ble		2f // return
-
-	// prefetch
-	pld		[r5, #0]
-	pld		[r6, #0]
-
-	// preload A even
-	fldd	d16, [r5, #0]
-	fldd	d17, [r5, #8]
-	fldd	d18, [r5, #16]
-	fldd	d19, [r5, #24]
-
-	// preload B even
-	fldd	d20, [r6, #0]
-	fldd	d21, [r6, #8]
-	fldd	d22, [r6, #16]
-	fldd	d23, [r6, #24]
-
-	// preload A odd
-	fldd	d24, [r5, #32]
-	fldd	d25, [r5, #40]
-	fldd	d26, [r5, #48]
-	fldd	d27, [r5, #56]
-
-	// preload B odd
-	fldd	d28, [r6, #32]
-	fldd	d29, [r6, #40]
-	fldd	d30, [r6, #48]
-	fldd	d31, [r6, #56]
-
-	// prefetch
-	pld		[r5, #64]
-	pld		[r6, #64]
-
-	cmp		r4, #4
-	ble		0f // consider clean up loop
-
-	// main loop
-1:
-	
-	// prefetch
-	pld		[r5, #128]
-	pld		[r6, #128]
-
-	// unroll 0
-	fmacd	d0, d16, d20
-	fldd	d16, [r5, #64] // A
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-	fldd	d20, [r6, #64] // B
-
-	fmacd	d5, d17, d21
-	fldd	d17, [r5, #72] // A
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-	fldd	d21, [r6, #72] // B
-
-	fmacd	d10, d18, d22
-	fldd	d18, [r5, #80] // A
-	fmacd	d11, d19, d22
-	fldd	d22, [r6, #80] // B
-
-	fmacd	d15, d19, d23
-	fldd	d19, [r5, #88] // A
-	fldd	d23, [r6, #88] // B
-
-	// unroll 1
-	fmacd	d0, d24, d28
-	fldd	d24, [r5, #96] // A
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-	fldd	d28, [r6, #96] // B
-
-	fmacd	d5, d25, d29
-	fldd	d25, [r5, #104] // A
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-	fldd	d29, [r6, #104] // B
-
-	fmacd	d10, d26, d30
-	fldd	d26, [r5, #112] // A
-	fmacd	d11, d27, d30
-	fldd	d30, [r6, #112] // B
-
-	fmacd	d15, d27, d31
-	fldd	d27, [r5, #120] // A
-	fldd	d31, [r6, #120] // B
-
-	// prefetch
-	pld		[r5, #192]
-	pld		[r6, #192]
-
-	add		r5, r5, #128
-	add		r6, r6, #128
-
-	// unroll 2
-	fmacd	d0, d16, d20
-	fldd	d16, [r5, #0] // A
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-	fldd	d20, [r6, #0] // B
-
-	fmacd	d5, d17, d21
-	fldd	d17, [r5, #8] // A
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-	fldd	d21, [r6, #8] // B
-
-	fmacd	d10, d18, d22
-	fldd	d18, [r5, #16] // A
-	fmacd	d11, d19, d22
-	fldd	d22, [r6, #16] // B
-
-	fmacd	d15, d19, d23
-	fldd	d19, [r5, #24] // A
-	fldd	d23, [r6, #24] // B
-
-	// unroll 3
-	fmacd	d0, d24, d28
-	fldd	d24, [r5, #32] // A
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-	fldd	d28, [r6, #32] // B
-
-	fmacd	d5, d25, d29
-	fldd	d25, [r5, #40] // A
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-	fldd	d29, [r6, #40] // B
-
-	fmacd	d10, d26, d30
-	fldd	d26, [r5, #48] // A
-	fmacd	d11, d27, d30
-	fldd	d30, [r6, #48] // B
-
-	fmacd	d15, d27, d31
-	fldd	d27, [r5, #56] // A
-	fldd	d31, [r6, #56] // B
-
-	sub		r4, r4, #4
-	cmp		r4, #4
-	bgt		1b
-
-0:
-
-	cmp		r4, #3
-	ble		4f
-
-	// unroll 0
-	fmacd	d0, d16, d20
-	fldd	d16, [r5, #64] // A
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-	fldd	d20, [r6, #64] // B
-
-	fmacd	d5, d17, d21
-	fldd	d17, [r5, #72] // A
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-	fldd	d21, [r6, #72] // B
-
-	fmacd	d10, d18, d22
-	fldd	d18, [r5, #80] // A
-	fmacd	d11, d19, d22
-	fldd	d22, [r6, #80] // B
-
-	fmacd	d15, d19, d23
-	fldd	d19, [r5, #88] // A
-	fldd	d23, [r6, #88] // B
-
-	// unroll 1
-	fmacd	d0, d24, d28
-	fldd	d24, [r5, #96] // A
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-	fldd	d28, [r6, #96] // B
-
-	fmacd	d5, d25, d29
-	fldd	d25, [r5, #104] // A
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-	fldd	d29, [r6, #104] // B
-
-	fmacd	d10, d26, d30
-	fldd	d26, [r5, #112] // A
-	fmacd	d11, d27, d30
-	fldd	d30, [r6, #112] // B
-
-	fmacd	d15, d27, d31
-	fldd	d27, [r5, #120] // A
-	fldd	d31, [r6, #120] // B
-
-	add		r5, r5, #128
-	add		r6, r6, #128
-
-	// unroll 2
-	fmacd	d0, d16, d20
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-
-	fmacd	d15, d19, d23
-
-	// unroll 3
-	fmacd	d0, d24, d28
-	fmacd	d1, d25, d28
-	fmacd	d2, d26, d28
-	fmacd	d3, d27, d28
-
-	fmacd	d5, d25, d29
-	fmacd	d6, d26, d29
-	fmacd	d7, d27, d29
-
-	fmacd	d10, d26, d30
-	fmacd	d11, d27, d30
-
-	fmacd	d15, d27, d31
-
-	sub		r4, r4, #4
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		r4, #0
-	ble		2f // return
-
-3: // clean1-up loop
-
-	fldd	d16, [r5, #0] // A
-	fldd	d17, [r5, #8] // A
-	fldd	d18, [r5, #16] // A
-	fldd	d19, [r5, #24] // A
-
-	fldd	d20, [r6, #0] // B
-	fmacd	d0, d16, d20
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-
-	fldd	d21, [r6, #8] // B
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-
-	fldd	d22, [r6, #16] // B
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-
-	fldd	d23, [r6, #24] // B
-	fmacd	d15, d19, d23
-
-	add		r5, r5, #32
-	add		r6, r6, #32
-
-	sub		r4, r4, #1
-	cmp		r4, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dsyrk_l_add_nt_4x4_lib4, .-inner_kernel_dsyrk_l_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nt_4x4_lib4, %function
-inner_kernel_dgemm_sub_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nt_4x4_lib4:
-#endif
-#endif
-
-	// early return
-	cmp		r4, #0
-	ble		2f // return
-
-	// prefetch
-	pld		[r5, #0]
-	pld		[r6, #0]
-
-	// preload A even
-	fldd	d16, [r5, #0]
-	fldd	d17, [r5, #8]
-	fldd	d18, [r5, #16]
-	fldd	d19, [r5, #24]
-
-	// preload B even
-	fldd	d20, [r6, #0]
-	fldd	d21, [r6, #8]
-	fldd	d22, [r6, #16]
-	fldd	d23, [r6, #24]
-
-	// preload A odd
-	fldd	d24, [r5, #32]
-	fldd	d25, [r5, #40]
-	fldd	d26, [r5, #48]
-	fldd	d27, [r5, #56]
-
-	// preload B odd
-	fldd	d28, [r6, #32]
-	fldd	d29, [r6, #40]
-	fldd	d30, [r6, #48]
-	fldd	d31, [r6, #56]
-
-	// prefetch
-	pld		[r5, #64]
-	pld		[r6, #64]
-
-	cmp		r4, #4
-	ble		0f // consider clean up loop
-
-	// main loop
-1:
-	
-	// prefetch
-	pld		[r5, #128]
-	pld		[r6, #128]
-
-	// unroll 0
-	fnmacd	d0, d16, d20
-	fnmacd	d1, d17, d20
-	fnmacd	d2, d18, d20
-	fnmacd	d3, d19, d20
-	fldd	d20, [r6, #64] // B
-
-	fnmacd	d4, d16, d21
-	fnmacd	d5, d17, d21
-	fnmacd	d6, d18, d21
-	fnmacd	d7, d19, d21
-	fldd	d21, [r6, #72] // B
-
-	fnmacd	d8, d16, d22
-	fnmacd	d9, d17, d22
-	fnmacd	d10, d18, d22
-	fnmacd	d11, d19, d22
-	fldd	d22, [r6, #80] // B
-
-	fnmacd	d12, d16, d23
-	fldd	d16, [r5, #64] // A
-	fnmacd	d13, d17, d23
-	fldd	d17, [r5, #72] // A
-	fnmacd	d14, d18, d23
-	fldd	d18, [r5, #80] // A
-	fnmacd	d15, d19, d23
-	fldd	d19, [r5, #88] // A
-	fldd	d23, [r6, #88] // B
-
-	// unroll 1
-	fnmacd	d0, d24, d28
-	fnmacd	d1, d25, d28
-	fnmacd	d2, d26, d28
-	fnmacd	d3, d27, d28
-	fldd	d28, [r6, #96] // B
-
-	fnmacd	d4, d24, d29
-	fnmacd	d5, d25, d29
-	fnmacd	d6, d26, d29
-	fnmacd	d7, d27, d29
-	fldd	d29, [r6, #104] // B
-
-	fnmacd	d8, d24, d30
-	fnmacd	d9, d25, d30
-	fnmacd	d10, d26, d30
-	fnmacd	d11, d27, d30
-	fldd	d30, [r6, #112] // B
-
-	fnmacd	d12, d24, d31
-	fldd	d24, [r5, #96] // A
-	fnmacd	d13, d25, d31
-	fldd	d25, [r5, #104] // A
-	fnmacd	d14, d26, d31
-	fldd	d26, [r5, #112] // A
-	fnmacd	d15, d27, d31
-	fldd	d27, [r5, #120] // A
-	fldd	d31, [r6, #120] // B
-
-	// prefetch
-	pld		[r5, #192]
-	pld		[r6, #192]
-
-	add		r5, r5, #128
-	add		r6, r6, #128
-
-	// unroll 2
-	fnmacd	d0, d16, d20
-	fnmacd	d1, d17, d20
-	fnmacd	d2, d18, d20
-	fnmacd	d3, d19, d20
-	fldd	d20, [r6, #0] // B
-
-	fnmacd	d4, d16, d21
-	fnmacd	d5, d17, d21
-	fnmacd	d6, d18, d21
-	fnmacd	d7, d19, d21
-	fldd	d21, [r6, #8] // B
-
-	fnmacd	d8, d16, d22
-	fnmacd	d9, d17, d22
-	fnmacd	d10, d18, d22
-	fnmacd	d11, d19, d22
-	fldd	d22, [r6, #16] // B
-
-	fnmacd	d12, d16, d23
-	fldd	d16, [r5, #0] // A
-	fnmacd	d13, d17, d23
-	fldd	d17, [r5, #8] // A
-	fnmacd	d14, d18, d23
-	fldd	d18, [r5, #16] // A
-	fnmacd	d15, d19, d23
-	fldd	d19, [r5, #24] // A
-	fldd	d23, [r6, #24] // B
-
-	// unroll 3
-	fnmacd	d0, d24, d28
-	fnmacd	d1, d25, d28
-	fnmacd	d2, d26, d28
-	fnmacd	d3, d27, d28
-	fldd	d28, [r6, #32] // B
-
-	fnmacd	d4, d24, d29
-	fnmacd	d5, d25, d29
-	fnmacd	d6, d26, d29
-	fnmacd	d7, d27, d29
-	fldd	d29, [r6, #40] // B
-
-	fnmacd	d8, d24, d30
-	fnmacd	d9, d25, d30
-	fnmacd	d10, d26, d30
-	fnmacd	d11, d27, d30
-	fldd	d30, [r6, #48] // B
-
-	fnmacd	d12, d24, d31
-	fldd	d24, [r5, #32] // A
-	fnmacd	d13, d25, d31
-	fldd	d25, [r5, #40] // A
-	fnmacd	d14, d26, d31
-	fldd	d26, [r5, #48] // A
-	fnmacd	d15, d27, d31
-	fldd	d27, [r5, #56] // A
-	fldd	d31, [r6, #56] // B
-
-	sub		r4, r4, #4
-	cmp		r4, #4
-	bgt		1b
-
-0:
-
-	cmp		r4, #3
-	ble		4f
-
-	// unroll 0
-	fnmacd	d0, d16, d20
-	fnmacd	d1, d17, d20
-	fnmacd	d2, d18, d20
-	fnmacd	d3, d19, d20
-	fldd	d20, [r6, #64] // B
-
-	fnmacd	d4, d16, d21
-	fnmacd	d5, d17, d21
-	fnmacd	d6, d18, d21
-	fnmacd	d7, d19, d21
-	fldd	d21, [r6, #72] // B
-
-	fnmacd	d8, d16, d22
-	fnmacd	d9, d17, d22
-	fnmacd	d10, d18, d22
-	fnmacd	d11, d19, d22
-	fldd	d22, [r6, #80] // B
-
-	fnmacd	d12, d16, d23
-	fldd	d16, [r5, #64] // A
-	fnmacd	d13, d17, d23
-	fldd	d17, [r5, #72] // A
-	fnmacd	d14, d18, d23
-	fldd	d18, [r5, #80] // A
-	fnmacd	d15, d19, d23
-	fldd	d19, [r5, #88] // A
-	fldd	d23, [r6, #88] // B
-
-	// unroll 1
-	fnmacd	d0, d24, d28
-	fnmacd	d1, d25, d28
-	fnmacd	d2, d26, d28
-	fnmacd	d3, d27, d28
-	fldd	d28, [r6, #96] // B
-
-	fnmacd	d4, d24, d29
-	fnmacd	d5, d25, d29
-	fnmacd	d6, d26, d29
-	fnmacd	d7, d27, d29
-	fldd	d29, [r6, #104] // B
-
-	fnmacd	d8, d24, d30
-	fnmacd	d9, d25, d30
-	fnmacd	d10, d26, d30
-	fnmacd	d11, d27, d30
-	fldd	d30, [r6, #112] // B
-
-	fnmacd	d12, d24, d31
-	fldd	d24, [r5, #96] // A
-	fnmacd	d13, d25, d31
-	fldd	d25, [r5, #104] // A
-	fnmacd	d14, d26, d31
-	fldd	d26, [r5, #112] // A
-	fnmacd	d15, d27, d31
-	fldd	d27, [r5, #120] // A
-	fldd	d31, [r6, #120] // B
-
-	add		r5, r5, #128
-	add		r6, r6, #128
-
-	// unroll 2
-	fnmacd	d0, d16, d20
-	fnmacd	d1, d17, d20
-	fnmacd	d2, d18, d20
-	fnmacd	d3, d19, d20
-
-	fnmacd	d4, d16, d21
-	fnmacd	d5, d17, d21
-	fnmacd	d6, d18, d21
-	fnmacd	d7, d19, d21
-
-	fnmacd	d8, d16, d22
-	fnmacd	d9, d17, d22
-	fnmacd	d10, d18, d22
-	fnmacd	d11, d19, d22
-
-	fnmacd	d12, d16, d23
-	fnmacd	d13, d17, d23
-	fnmacd	d14, d18, d23
-	fnmacd	d15, d19, d23
-
-	// unroll 3
-	fnmacd	d0, d24, d28
-	fnmacd	d1, d25, d28
-	fnmacd	d2, d26, d28
-	fnmacd	d3, d27, d28
-
-	fnmacd	d4, d24, d29
-	fnmacd	d5, d25, d29
-	fnmacd	d6, d26, d29
-	fnmacd	d7, d27, d29
-
-	fnmacd	d8, d24, d30
-	fnmacd	d9, d25, d30
-	fnmacd	d10, d26, d30
-	fnmacd	d11, d27, d30
-
-	fnmacd	d12, d24, d31
-	fnmacd	d13, d25, d31
-	fnmacd	d14, d26, d31
-	fnmacd	d15, d27, d31
-
-	sub		r4, r4, #4
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		r4, #0
-	ble		2f // return
-
-3: // clean1-up loop
-
-	fldd	d16, [r5, #0] // A
-	fldd	d17, [r5, #8] // A
-	fldd	d18, [r5, #16] // A
-	fldd	d19, [r5, #24] // A
-
-	fldd	d20, [r6, #0] // B
-	fnmacd	d0, d16, d20
-	fnmacd	d1, d17, d20
-	fnmacd	d2, d18, d20
-	fnmacd	d3, d19, d20
-
-	fldd	d21, [r6, #8] // B
-	fnmacd	d4, d16, d21
-	fnmacd	d5, d17, d21
-	fnmacd	d6, d18, d21
-	fnmacd	d7, d19, d21
-
-	fldd	d22, [r6, #16] // B
-	fnmacd	d8, d16, d22
-	fnmacd	d9, d17, d22
-	fnmacd	d10, d18, d22
-	fnmacd	d11, d19, d22
-
-	fldd	d23, [r6, #24] // B
-	fnmacd	d12, d16, d23
-	fnmacd	d13, d17, d23
-	fnmacd	d14, d18, d23
-	fnmacd	d15, d19, d23
-
-	add		r5, r5, #32
-	add		r6, r6, #32
-
-	sub		r4, r4, #1
-	cmp		r4, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nt_4x4_lib4, .-inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DSYRK_L_SUB_NT_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dsyrk_l_sub_nt_4x4_lib4, %function
-inner_kernel_dsyrk_l_sub_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dsyrk_l_sub_nt_4x4_lib4:
-#endif
-#endif
-
-	// early return
-	cmp		r4, #0
-	ble		2f // return
-
-	// prefetch
-	pld		[r5, #0]
-	pld		[r6, #0]
-
-	// preload A even
-	fldd	d16, [r5, #0]
-	fldd	d17, [r5, #8]
-	fldd	d18, [r5, #16]
-	fldd	d19, [r5, #24]
-
-	// preload B even
-	fldd	d20, [r6, #0]
-	fldd	d21, [r6, #8]
-	fldd	d22, [r6, #16]
-	fldd	d23, [r6, #24]
-
-	// preload A odd
-	fldd	d24, [r5, #32]
-	fldd	d25, [r5, #40]
-	fldd	d26, [r5, #48]
-	fldd	d27, [r5, #56]
-
-	// preload B odd
-	fldd	d28, [r6, #32]
-	fldd	d29, [r6, #40]
-	fldd	d30, [r6, #48]
-	fldd	d31, [r6, #56]
-
-	// prefetch
-	pld		[r5, #64]
-	pld		[r6, #64]
-
-	cmp		r4, #4
-	ble		0f // consider clean up loop
-
-	// main loop
-1:
-	
-	// prefetch
-	pld		[r5, #128]
-	pld		[r6, #128]
-
-	// unroll 0
-	fnmacd	d0, d16, d20
-	fldd	d16, [r5, #64] // A
-	fnmacd	d1, d17, d20
-	fnmacd	d2, d18, d20
-	fnmacd	d3, d19, d20
-	fldd	d20, [r6, #64] // B
-
-	fnmacd	d5, d17, d21
-	fldd	d17, [r5, #72] // A
-	fnmacd	d6, d18, d21
-	fnmacd	d7, d19, d21
-	fldd	d21, [r6, #72] // B
-
-	fnmacd	d10, d18, d22
-	fldd	d18, [r5, #80] // A
-	fnmacd	d11, d19, d22
-	fldd	d22, [r6, #80] // B
-
-	fnmacd	d15, d19, d23
-	fldd	d19, [r5, #88] // A
-	fldd	d23, [r6, #88] // B
-
-	// unroll 1
-	fnmacd	d0, d24, d28
-	fldd	d24, [r5, #96] // A
-	fnmacd	d1, d25, d28
-	fnmacd	d2, d26, d28
-	fnmacd	d3, d27, d28
-	fldd	d28, [r6, #96] // B
-
-	fnmacd	d5, d25, d29
-	fldd	d25, [r5, #104] // A
-	fnmacd	d6, d26, d29
-	fnmacd	d7, d27, d29
-	fldd	d29, [r6, #104] // B
-
-	fnmacd	d10, d26, d30
-	fldd	d26, [r5, #112] // A
-	fnmacd	d11, d27, d30
-	fldd	d30, [r6, #112] // B
-
-	fnmacd	d15, d27, d31
-	fldd	d27, [r5, #120] // A
-	fldd	d31, [r6, #120] // B
-
-	// prefetch
-	pld		[r5, #192]
-	pld		[r6, #192]
-
-	add		r5, r5, #128
-	add		r6, r6, #128
-
-	// unroll 2
-	fnmacd	d0, d16, d20
-	fldd	d16, [r5, #0] // A
-	fnmacd	d1, d17, d20
-	fnmacd	d2, d18, d20
-	fnmacd	d3, d19, d20
-	fldd	d20, [r6, #0] // B
-
-	fnmacd	d5, d17, d21
-	fldd	d17, [r5, #8] // A
-	fnmacd	d6, d18, d21
-	fnmacd	d7, d19, d21
-	fldd	d21, [r6, #8] // B
-
-	fnmacd	d10, d18, d22
-	fldd	d18, [r5, #16] // A
-	fnmacd	d11, d19, d22
-	fldd	d22, [r6, #16] // B
-
-	fnmacd	d15, d19, d23
-	fldd	d19, [r5, #24] // A
-	fldd	d23, [r6, #24] // B
-
-	// unroll 3
-	fnmacd	d0, d24, d28
-	fldd	d24, [r5, #32] // A
-	fnmacd	d1, d25, d28
-	fnmacd	d2, d26, d28
-	fnmacd	d3, d27, d28
-	fldd	d28, [r6, #32] // B
-
-	fnmacd	d5, d25, d29
-	fldd	d25, [r5, #40] // A
-	fnmacd	d6, d26, d29
-	fnmacd	d7, d27, d29
-	fldd	d29, [r6, #40] // B
-
-	fnmacd	d10, d26, d30
-	fldd	d26, [r5, #48] // A
-	fnmacd	d11, d27, d30
-	fldd	d30, [r6, #48] // B
-
-	fnmacd	d15, d27, d31
-	fldd	d27, [r5, #56] // A
-	fldd	d31, [r6, #56] // B
-
-	sub		r4, r4, #4
-	cmp		r4, #4
-	bgt		1b
-
-0:
-
-	cmp		r4, #3
-	ble		4f
-
-	// unroll 0
-	fnmacd	d0, d16, d20
-	fldd	d16, [r5, #64] // A
-	fnmacd	d1, d17, d20
-	fnmacd	d2, d18, d20
-	fnmacd	d3, d19, d20
-	fldd	d20, [r6, #64] // B
-
-	fnmacd	d5, d17, d21
-	fldd	d17, [r5, #72] // A
-	fnmacd	d6, d18, d21
-	fnmacd	d7, d19, d21
-	fldd	d21, [r6, #72] // B
-
-	fnmacd	d10, d18, d22
-	fldd	d18, [r5, #80] // A
-	fnmacd	d11, d19, d22
-	fldd	d22, [r6, #80] // B
-
-	fnmacd	d15, d19, d23
-	fldd	d19, [r5, #88] // A
-	fldd	d23, [r6, #88] // B
-
-	// unroll 1
-	fnmacd	d0, d24, d28
-	fldd	d24, [r5, #96] // A
-	fnmacd	d1, d25, d28
-	fnmacd	d2, d26, d28
-	fnmacd	d3, d27, d28
-	fldd	d28, [r6, #96] // B
-
-	fnmacd	d5, d25, d29
-	fldd	d25, [r5, #104] // A
-	fnmacd	d6, d26, d29
-	fnmacd	d7, d27, d29
-	fldd	d29, [r6, #104] // B
-
-	fnmacd	d10, d26, d30
-	fldd	d26, [r5, #112] // A
-	fnmacd	d11, d27, d30
-	fldd	d30, [r6, #112] // B
-
-	fnmacd	d15, d27, d31
-	fldd	d27, [r5, #120] // A
-	fldd	d31, [r6, #120] // B
-
-	add		r5, r5, #128
-	add		r6, r6, #128
-
-	// unroll 2
-	fnmacd	d0, d16, d20
-	fnmacd	d1, d17, d20
-	fnmacd	d2, d18, d20
-	fnmacd	d3, d19, d20
-
-	fnmacd	d5, d17, d21
-	fnmacd	d6, d18, d21
-	fnmacd	d7, d19, d21
-
-	fnmacd	d10, d18, d22
-	fnmacd	d11, d19, d22
-
-	fnmacd	d15, d19, d23
-
-	// unroll 3
-	fnmacd	d0, d24, d28
-	fnmacd	d1, d25, d28
-	fnmacd	d2, d26, d28
-	fnmacd	d3, d27, d28
-
-	fnmacd	d5, d25, d29
-	fnmacd	d6, d26, d29
-	fnmacd	d7, d27, d29
-
-	fnmacd	d10, d26, d30
-	fnmacd	d11, d27, d30
-
-	fnmacd	d15, d27, d31
-
-	sub		r4, r4, #4
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		r4, #0
-	ble		2f // return
-
-3: // clean1-up loop
-
-	fldd	d16, [r5, #0] // A
-	fldd	d17, [r5, #8] // A
-	fldd	d18, [r5, #16] // A
-	fldd	d19, [r5, #24] // A
-
-	fldd	d20, [r6, #0] // B
-	fnmacd	d0, d16, d20
-	fnmacd	d1, d17, d20
-	fnmacd	d2, d18, d20
-	fnmacd	d3, d19, d20
-
-	fldd	d21, [r6, #8] // B
-	fnmacd	d5, d17, d21
-	fnmacd	d6, d18, d21
-	fnmacd	d7, d19, d21
-
-	fldd	d22, [r6, #16] // B
-	fnmacd	d10, d18, d22
-	fnmacd	d11, d19, d22
-
-	fldd	d23, [r6, #24] // B
-	fnmacd	d15, d19, d23
-
-	add		r5, r5, #32
-	add		r6, r6, #32
-
-	sub		r4, r4, #1
-	cmp		r4, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dsyrk_l_sub_nt_4x4_lib4, .-inner_kernel_dsyrk_l_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- B
-// r7   <- bs*sdb*sizeof(double)
-// r8   <- offsetB
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemm_add_nn_4x4_lib4, %function
-inner_edge_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemm_add_nn_4x4_lib4:
-#endif
-#endif
-
-	cmp		r8, #0
-	ble		2f // return
-
-	cmp		r4, #0
-	ble		2f // return
-
-	rsb		r9, r8, #4 // 4-offsetB
-	cmp		r9, r4
-//	ble		0f
-//	mov		r9, r4 // kend=min(k,4-offsetB(
-//0:
-	movgt	r9, r4 // kend=min(k,4-offsetB(
-	
-//	lsl		r10, r8, #3 // offsetB*sizeof(double)
-	add		r6, r6, r8, LSL #3 // B + offsetB*sizeof(double)
-
-1:
-	fldd	d16, [r5, #0] // A
-	fldd	d17, [r5, #8] // A
-	fldd	d18, [r5, #16] // A
-	fldd	d19, [r5, #24] // A
-
-	fldd	d20, [r6, #0] // B
-	fmacd	d0, d16, d20
-	fmacd	d1, d17, d20
-	fmacd	d2, d18, d20
-	fmacd	d3, d19, d20
-
-	fldd	d21, [r6, #32] // B
-	fmacd	d4, d16, d21
-	fmacd	d5, d17, d21
-	fmacd	d6, d18, d21
-	fmacd	d7, d19, d21
-
-	fldd	d22, [r6, #64] // B
-	fmacd	d8, d16, d22
-	fmacd	d9, d17, d22
-	fmacd	d10, d18, d22
-	fmacd	d11, d19, d22
-
-	fldd	d23, [r6, #96] // B
-	fmacd	d12, d16, d23
-	fmacd	d13, d17, d23
-	fmacd	d14, d18, d23
-	fmacd	d15, d19, d23
-
-	sub		r4, r4, #1
-	sub		r9, r9, #1
-	add		r5, r5, #32
-	add		r6, r6, #8
-
-	cmp		r9, #0
-	bgt		1b
-
-	cmp		r4, #0
-	ble		2f // return
-
-	add		r6, r6, r7
-	sub		r6, r6, #32
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemm_add_nn_4x4_lib4, .-inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-	
-
-
-
-
-// subroutine
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r4   <- E
-// r5   <- inv_diag_E
-//
-// output arguments:
-// r4   <- E
-// r5   <- inv_diag_E
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x4_lib4, %function
-inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#elif defined(OS_MAC)
-inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#endif
-#endif
-	
-	// first column
-	fldd		d16, [r5, #0] // E_inv[0]
-	fmuld		d0, d0, d16
-	fmuld		d1, d1, d16
-	fmuld		d2, d2, d16
-	fmuld		d3, d3, d16
-
-	// second column
-	fldd		d16, [r4, #8] // E[1+4*0]
-	fnmacd		d4, d0, d16
-	fnmacd		d5, d1, d16
-	fnmacd		d6, d2, d16
-	fnmacd		d7, d3, d16
-	fldd		d16, [r5, #8] // E_inv[1]
-	fmuld		d4, d4, d16
-	fmuld		d5, d5, d16
-	fmuld		d6, d6, d16
-	fmuld		d7, d7, d16
-
-	// third column
-	fldd		d16, [r4, #16] // E[2+4*0]
-	fnmacd		d8, d0, d16
-	fnmacd		d9, d1, d16
-	fnmacd		d10, d2, d16
-	fnmacd		d11, d3, d16
-	fldd		d16, [r4, #48] // E[2+4*1]
-	fnmacd		d8, d4, d16
-	fnmacd		d9, d5, d16
-	fnmacd		d10, d6, d16
-	fnmacd		d11, d7, d16
-	fldd		d16, [r5, #16] // E_inv[2]
-	fmuld		d8, d8, d16
-	fmuld		d9, d9, d16
-	fmuld		d10, d10, d16
-	fmuld		d11, d11, d16
-
-	// fourth column
-	fldd		d16, [r4, #24] // E[3+4*0]
-	fnmacd		d12, d0, d16
-	fnmacd		d13, d1, d16
-	fnmacd		d14, d2, d16
-	fnmacd		d15, d3, d16
-	fldd		d16, [r4, #56] // E[3+4*1]
-	fnmacd		d12, d4, d16
-	fnmacd		d13, d5, d16
-	fnmacd		d14, d6, d16
-	fnmacd		d15, d7, d16
-	fldd		d16, [r4, #88] // E[3+4*2]
-	fnmacd		d12, d8, d16
-	fnmacd		d13, d9, d16
-	fnmacd		d14, d10, d16
-	fnmacd		d15, d11, d16
-	fldd		d16, [r5, #24] // E_inv[3]
-	fmuld		d12, d12, d16
-	fmuld		d13, d13, d16
-	fmuld		d14, d14, d16
-	fmuld		d15, d15, d16
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x4_lib4, .-inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// cholesky factorization 
-//
-// input arguments:
-// r4   <- inv_diag_D
-//
-// output arguments:
-// r4   <- inv_diag_D
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_4x4_lib4, %function
-inner_edge_dpotrf_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_4x4_lib4:
-#endif
-#endif
-	
-	fconstd		d16, #112 // 1.0
-	fldd		d17, .LC01 // 0.0
-
-	// first column
-	fcmped		d0, d17
-	fmstat
-	ble			1f
-	fsqrtd		d0, d0
-	fdivd		d18, d16, d0
-	fstd		d18, [r4, #0]
-2:
-	fmuld		d1, d1, d18
-	fmuld		d2, d2, d18
-	fmuld		d3, d3, d18
-
-	// second column
-	fnmacd		d5, d1, d1
-	fnmacd		d6, d1, d2
-	fnmacd		d7, d1, d3
-	fcmped		d5, d17
-	fmstat
-	ble			3f
-	fsqrtd		d5, d5
-	fdivd		d18, d16, d5
-	fstd		d18, [r4, #8]
-4:
-	fmuld		d6, d6, d18
-	fmuld		d7, d7, d18
-
-	// third column
-	fnmacd		d10, d2, d2
-	fnmacd		d11, d2, d3
-	fnmacd		d10, d6, d6
-	fnmacd		d11, d6, d7
-	fcmped		d10, d17
-	fmstat
-	ble			5f
-	fsqrtd		d10, d10
-	fdivd		d18, d16, d10
-	fstd		d18, [r4, #16]
-6:
-	fmuld		d11, d11, d18
-
-	// fourth column
-	fnmacd		d15, d3, d3
-	fnmacd		d15, d7, d7
-	fnmacd		d15, d11, d11
-	fcmped		d15, d17
-	fmstat
-	ble			7f
-	fsqrtd		d15, d15
-	fdivd		d18, d16, d15
-	fstd		d18, [r4, #24]
-
-	b			0f
-
-1:
-	fldd		d0, .LC01
-	b			2b
-
-3:
-	fldd		d5, .LC01
-	b			4b
-
-5:
-	fldd		d10, .LC01
-	b			6b
-
-7:
-	fldd		d15, .LC01
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_4x4_lib4, .-inner_edge_dpotrf_4x4_lib4
-#endif
-#endif
-
-	.align 3
-.LC01: // { 0 }
-	.word 0
-	.word 0
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- alpha
-// r5   <- beta
-// r6   <- C
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x4_lib4, %function
-inner_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x4_lib4:
-#endif
-#endif
-
-	fldd	d16, [r4, #0] // alpha
-
-	fmuld	d0, d0, d16
-	fldd	d18, [r5, #0] // beta
-	fmuld	d1, d1, d16
-	fldd	d17, .LC01 // 0.0
-	fmuld	d2, d2, d16
-	fmuld	d3, d3, d16
-
-	fmuld	d4, d4, d16
-	fmuld	d5, d5, d16
-	fmuld	d6, d6, d16
-	fmuld	d7, d7, d16
-
-	fmuld	d8, d8, d16
-	fcmped	d18, d17
-	fmuld	d9, d9, d16
-	fmuld	d10, d10, d16
-	fmuld	d11, d11, d16
-
-	fmuld	d12, d12, d16
-	fmstat
-	fmuld	d13, d13, d16
-	fmuld	d14, d14, d16
-	fmuld	d15, d15, d16
-
-	beq		0f // end
-
-	fldd	d17, [r6, #0] // C
-	fmacd	d0, d18, d17
-	fldd	d17, [r6, #8] // C
-	fmacd	d1, d18, d17
-	fldd	d17, [r6, #16] // C
-	fmacd	d2, d18, d17
-	fldd	d17, [r6, #24] // C
-	fmacd	d3, d18, d17
-
-	fldd	d17, [r6, #32] // C
-	fmacd	d4, d18, d17
-	fldd	d17, [r6, #40] // C
-	fmacd	d5, d18, d17
-	fldd	d17, [r6, #48] // C
-	fmacd	d6, d18, d17
-	fldd	d17, [r6, #56] // C
-	fmacd	d7, d18, d17
-
-	fldd	d17, [r6, #64] // C
-	fmacd	d8, d18, d17
-	fldd	d17, [r6, #72] // C
-	fmacd	d9, d18, d17
-	fldd	d17, [r6, #80] // C
-	fmacd	d10, d18, d17
-	fldd	d17, [r6, #88] // C
-	fmacd	d11, d18, d17
-
-	fldd	d17, [r6, #96] // C
-	fmacd	d12, d18, d17
-	fldd	d17, [r6, #104] // C
-	fmacd	d13, d18, d17
-	fldd	d17, [r6, #112] // C
-	fmacd	d14, d18, d17
-	fldd	d17, [r6, #120] // C
-	fmacd	d15, d18, d17
-
-0:
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x4_lib4, .-inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- C
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_11_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_4x4_lib4, %function
-inner_scale_11_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_11_4x4_lib4:
-#endif
-#endif
-
-	fldd	d17, [r4, #0] // C
-	faddd	d0, d0, d17
-	fldd	d17, [r4, #8] // C
-	faddd	d1, d1, d17
-	fldd	d17, [r4, #16] // C
-	faddd	d2, d2, d17
-	fldd	d17, [r4, #24] // C
-	faddd	d3, d3, d17
-
-	fldd	d17, [r4, #32] // C
-	faddd	d4, d4, d17
-	fldd	d17, [r4, #40] // C
-	faddd	d5, d5, d17
-	fldd	d17, [r4, #48] // C
-	faddd	d6, d6, d17
-	fldd	d17, [r4, #56] // C
-	faddd	d7, d7, d17
-
-	fldd	d17, [r4, #64] // C
-	faddd	d8, d8, d17
-	fldd	d17, [r4, #72] // C
-	faddd	d9, d9, d17
-	fldd	d17, [r4, #80] // C
-	faddd	d10, d10, d17
-	fldd	d17, [r4, #88] // C
-	faddd	d11, d11, d17
-
-	fldd	d17, [r4, #96] // C
-	faddd	d12, d12, d17
-	fldd	d17, [r4, #104] // C
-	faddd	d13, d13, d17
-	fldd	d17, [r4, #112] // C
-	faddd	d14, d14, d17
-	fldd	d17, [r4, #120] // C
-	faddd	d15, d15, d17
-
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_4x4_lib4, .-inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- D
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_lib4, %function
-inner_store_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_lib4:
-#endif
-#endif
-
-	fstd	d0, [r4, #0]
-	fstd	d1, [r4, #8]
-	fstd	d2, [r4, #16]
-	fstd	d3, [r4, #24]
-
-	fstd	d4, [r4, #32]
-	fstd	d5, [r4, #40]
-	fstd	d6, [r4, #48]
-	fstd	d7, [r4, #56]
-
-	fstd	d8, [r4, #64]
-	fstd	d9, [r4, #72]
-	fstd	d10, [r4, #80]
-	fstd	d11, [r4, #88]
-
-	fstd	d12, [r4, #96]
-	fstd	d13, [r4, #104]
-	fstd	d14, [r4, #112]
-	fstd	d15, [r4, #120]
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_lib4, .-inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- D
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_L_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_lib4, %function
-inner_store_l_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_lib4:
-#endif
-#endif
-
-	fstd	d0, [r4, #0]
-	fstd	d1, [r4, #8]
-	fstd	d2, [r4, #16]
-	fstd	d3, [r4, #24]
-
-//	fstd	d4, [r4, #32]
-	fstd	d5, [r4, #40]
-	fstd	d6, [r4, #48]
-	fstd	d7, [r4, #56]
-
-//	fstd	d8, [r4, #64]
-//	fstd	d9, [r4, #72]
-	fstd	d10, [r4, #80]
-	fstd	d11, [r4, #88]
-
-//	fstd	d12, [r4, #96]
-//	fstd	d13, [r4, #104]
-//	fstd	d14, [r4, #112]
-	fstd	d15, [r4, #120]
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_lib4, .-inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// zero double word
-	.align 3
-.LC00: // { 0 }
-	.word 0
-	.word 0
-
-//                               r0        r1             r2         r3         sp+0          sp+4       sp+8
-// void kernel_dgemm_nt_4x4_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.global	kernel_dgemm_nt_4x4_lib4
-	.type	kernel_dgemm_nt_4x4_lib4, %function
-kernel_dgemm_nt_4x4_lib4:
-#elif defined(OS_MAC)
-	.global	kernel_dgemm_nt_4x4_lib4
-_kernel_dgemm_nt_4x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	fldd	d0, .LC00
-	fcpyd	d1, d0
-	fcpyd	d2, d0
-	fcpyd	d3, d0
-	fcpyd	d4, d0
-	fcpyd	d5, d0
-	fcpyd	d6, d0
-	fcpyd	d7, d0
-	fcpyd	d8, d0
-	fcpyd	d9, d0
-	fcpyd	d10, d0
-	fcpyd	d11, d0
-	fcpyd	d12, d0
-	fcpyd	d13, d0
-	fcpyd	d14, d0
-	fcpyd	d15, d0
-
-
-
-	// call inner kernel dgemm nt
-	mov		r4, r0 // kmax
-	mov		r5, r2 // A
-	mov		r6, r3 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		r4, r1 // alpha
-	ldr		r5, [fp, #0] // beta
-	ldr		r6, [fp, #4] // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-	// store n
-	ldr		r4, [fp, #8] // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_lib4, .-kernel_dgemm_nt_4x4_lib4
-#endif
-
-
-
-
-
-//                               r0        r1             r2         r3           sp+0       sp+4     sp+8          sp+12      sp+16
-// void kernel_dgemm_nn_4x4_lib4(int kmax, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D)
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.global	kernel_dgemm_nn_4x4_lib4
-	.type	kernel_dgemm_nn_4x4_lib4, %function
-kernel_dgemm_nn_4x4_lib4:
-#elif defined(OS_MAC)
-	.global	kernel_dgemm_nn_4x4_lib4
-_kernel_dgemm_nn_4x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	fldd	d0, .LC00
-	fcpyd	d1, d0
-	fcpyd	d2, d0
-	fcpyd	d3, d0
-	fcpyd	d4, d0
-	fcpyd	d5, d0
-	fcpyd	d6, d0
-	fcpyd	d7, d0
-	fcpyd	d8, d0
-	fcpyd	d9, d0
-	fcpyd	d10, d0
-	fcpyd	d11, d0
-	fcpyd	d12, d0
-	fcpyd	d13, d0
-	fcpyd	d14, d0
-	fcpyd	d15, d0
-
-
-
-	// call inner kernel dgemm nt
-	mov		r4, r0 // kmax
-	mov		r5, r2 // A
-	ldr		r6, [fp, #0] // B
-	ldr		r7, [fp, #4] // sdb
-	lsl		r7, r7, #5 // 4*sizeof(double)*sdb
-	mov		r8, r3 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_edge_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		r4, r1 // alpha
-	ldr		r5, [fp, #8] // beta
-	ldr		r6, [fp, #12] // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-	// store n
-	ldr		r4, [fp, #16] // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_4x4_lib4, .-kernel_dgemm_nn_4x4_lib4
-#endif
-
-
-
-
-
-//                                 r0        r1             r2         r3         sp+0          sp+4       sp+8
-// void kernel_dsyrk_nt_l_4x4_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_lib4
-	.type kernel_dsyrk_nt_l_4x4_lib4, %function
-kernel_dsyrk_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_lib4
-_kernel_dsyrk_nt_l_4x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	fldd	d0, .LC00
-	fcpyd	d1, d0
-	fcpyd	d2, d0
-	fcpyd	d3, d0
-	fcpyd	d4, d0
-	fcpyd	d5, d0
-	fcpyd	d6, d0
-	fcpyd	d7, d0
-	fcpyd	d8, d0
-	fcpyd	d9, d0
-	fcpyd	d10, d0
-	fcpyd	d11, d0
-	fcpyd	d12, d0
-	fcpyd	d13, d0
-	fcpyd	d14, d0
-	fcpyd	d15, d0
-
-
-
-	// call inner kernel dsyrk l nt
-	mov		r4, r0 // kmax
-	mov		r5, r2 // A
-	mov		r6, r3 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DSYRK_L_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_dsyrk_l_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_dsyrk_l_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		r4, r1 // alpha
-	ldr		r5, [fp, #0] // beta
-	ldr		r6, [fp, #4] // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-	// store l
-	ldr		r4, [fp, #8] // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_lib4, .-kernel_dsyrk_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                      r0        r1         r2         r3         sp+0       sp+4       rsp+8
-// void kernel_dtrsm_nt_rl_inv_4x4_lib4(int kmax, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E);
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x4_lib4, %function
-kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x4_lib4
-_kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	fldd	d0, .LC00
-	fcpyd	d1, d0
-	fcpyd	d2, d0
-	fcpyd	d3, d0
-	fcpyd	d4, d0
-	fcpyd	d5, d0
-	fcpyd	d6, d0
-	fcpyd	d7, d0
-	fcpyd	d8, d0
-	fcpyd	d9, d0
-	fcpyd	d10, d0
-	fcpyd	d11, d0
-	fcpyd	d12, d0
-	fcpyd	d13, d0
-	fcpyd	d14, d0
-	fcpyd	d15, d0
-
-
-
-	// call inner kernel dsyrk l nt
-	mov		r4, r0 // kmax
-	mov		r5, r1 // A
-	mov		r6, r2 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for alpha=1.0 and beta=1.0
-	mov		r4, r3 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-	// factorization
-	ldr		r4, [fp, #4] // E
-	ldr		r5, [fp, #8] // inv_diag_E
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_edge_dtrsm_rlt_inv_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-
-	// store l
-	ldr		r4, [fp, #0] // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x4_lib4, .-kernel_dtrsm_nt_rl_inv_4x4_lib4
-#endif
-
-
-
-
-//                                  r0        r1         r2         r3         sp+0       sp+4
-// void kernel_dpotrf_nt_l_4x4_lib4(int kmax, double *A, double *B, double *C, double *D, double *inv_diag_D);
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_4x4_lib4
-	.type kernel_dpotrf_nt_l_4x4_lib4, %function
-kernel_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_4x4_lib4
-_kernel_dpotrf_nt_l_4x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	fldd	d0, .LC00
-	fcpyd	d1, d0
-	fcpyd	d2, d0
-	fcpyd	d3, d0
-	fcpyd	d4, d0
-	fcpyd	d5, d0
-	fcpyd	d6, d0
-	fcpyd	d7, d0
-	fcpyd	d8, d0
-	fcpyd	d9, d0
-	fcpyd	d10, d0
-	fcpyd	d11, d0
-	fcpyd	d12, d0
-	fcpyd	d13, d0
-	fcpyd	d14, d0
-	fcpyd	d15, d0
-
-
-
-	// call inner kernel dsyrk l nt
-	mov		r4, r0 // kmax
-	mov		r5, r1 // A
-	mov		r6, r2 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DSYRK_L_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_dsyrk_l_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_dsyrk_l_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for alpha=1.0 and beta=1.0
-	mov		r4, r3 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-	// factorization
-	ldr		r4, [fp, #4] // inv_diag_D
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_edge_dpotrf_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_edge_dpotrf_4x4_lib4
-#endif
-#endif
-
-
-
-	// store l
-	ldr		r4, [fp, #0] // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_4x4_lib4, .-kernel_dpotrf_nt_l_4x4_lib4
-#endif
-
-
-
-
-//                                            r0      r1          r2          r3      sp+0        sp+4        sp+8       sp+12      sp+16      sp+20
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E);
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4, %function
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	fldd	d0, .LC00
-	fcpyd	d1, d0
-	fcpyd	d2, d0
-	fcpyd	d3, d0
-	fcpyd	d4, d0
-	fcpyd	d5, d0
-	fcpyd	d6, d0
-	fcpyd	d7, d0
-	fcpyd	d8, d0
-	fcpyd	d9, d0
-	fcpyd	d10, d0
-	fcpyd	d11, d0
-	fcpyd	d12, d0
-	fcpyd	d13, d0
-	fcpyd	d14, d0
-	fcpyd	d15, d0
-
-
-
-	// call inner kernel dsyrk l nt add
-	mov		r4, r0 // kp
-	mov		r5, r1 // Ap
-	mov		r6, r2 // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner kernel dsyrk l nt sub
-	mov		r4, r3 // kmax
-	ldr		r5, [fp, #0] // Am
-	ldr		r6, [fp, #4] // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for alpha=1.0 and beta=1.0
-	ldr		r4, [fp, #8] // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-	// factorization
-	ldr		r4, [fp, #16] // E
-	ldr		r5, [fp, #20] // inv_diag_E
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_edge_dtrsm_rlt_inv_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-
-	// store l
-	ldr		r4, [fp, #12] // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-#endif
-
-
-
-
-//                                        r0      r1          r2          r3      sp+0        sp+4        sp+8       sp+12      sp+16
-// void kernel_dsyrk_dpotrf_nt_l_4x4_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *inv_diag_D);
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_4x4_lib4, %function
-kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_4x4_lib4
-_kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	fldd	d0, .LC00
-	fcpyd	d1, d0
-	fcpyd	d2, d0
-	fcpyd	d3, d0
-	fcpyd	d4, d0
-	fcpyd	d5, d0
-	fcpyd	d6, d0
-	fcpyd	d7, d0
-	fcpyd	d8, d0
-	fcpyd	d9, d0
-	fcpyd	d10, d0
-	fcpyd	d11, d0
-	fcpyd	d12, d0
-	fcpyd	d13, d0
-	fcpyd	d14, d0
-	fcpyd	d15, d0
-
-
-
-	// call inner kernel dsyrk l nt
-	mov		r4, r0 // kp
-	mov		r5, r1 // Ap
-	mov		r6, r2 // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DSYRK_L_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_dsyrk_l_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_dsyrk_l_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner kernel dsyrk l nt sub
-	mov		r4, r3 // kmax
-	ldr		r5, [fp, #0] // Am
-	ldr		r6, [fp, #4] // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DSYRK_L_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_dsyrk_l_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_dsyrk_l_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for alpha=1.0 and beta=1.0
-	ldr		r4, [fp, #8] // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-	// factorization
-	ldr		r4, [fp, #16] // inv_diag_D
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_edge_dpotrf_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_edge_dpotrf_4x4_lib4
-#endif
-#endif
-
-
-
-	// store l
-	ldr		r4, [fp, #12] // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_4x4_lib4, .-kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-#endif
-
-
-
-
-
diff --git a/third_party/blasfeo/kernel/armv7a/kernel_sgemm_12x4_lib4.S b/third_party/blasfeo/kernel/armv7a/kernel_sgemm_12x4_lib4.S
deleted file mode 100644
index 96ff7a4..0000000
--- a/third_party/blasfeo/kernel/armv7a/kernel_sgemm_12x4_lib4.S
+++ /dev/null
@@ -1,589 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- sda
-// r7   <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_12X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nt_12x4_lib4, %function
-inner_kernel_gemm_add_nt_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nt_12x4_lib4:
-#endif
-#endif
-
-	// early return
-	cmp		r4, #0
-	ble		2f // return
-
-	add		r8, r5, r6 // A1
-	add		r9, r8, r6 // A2
-
-	// prefetch
-	pld			[r5, #0] // A0
-	pld			[r7, #0] // B
-	pld			[r8, #0] // A1
-	pld			[r9, #0] // A2
-
-	// preload
-	vld1.64		{d0, d1}, [r7:128] // B
-	vld1.64		{d2, d3}, [r5:128] // A0
-	vld1.64		{d4, d5}, [r8:128] // A1
-//	vld1.64		{d6, d7}, [r9:128] // A2
-
-	cmp		r4, #4
-	ble		0f // consider clean up loop
-
-	// prefetch
-	pld			[r5, #64] // A0
-	pld			[r7, #64] // B
-	pld			[r8, #64] // A1
-	pld			[r9, #64] // A2
-
-	// main loop
-1:
-	
-	// unroll 0
-	vmla.f32	q4, q1, d0[0]
-	vldr		d6, [r9, #0] // A2
-	vmla.f32	q5, q1, d0[1]
-	vldr		d7, [r9, #8] // A2
-	vmla.f32	q6, q1, d1[0]
-	pld			[r7, #128]
-	vmla.f32	q7, q1, d1[1]
-	vldr		d2, [r5, #16] // A0
-	vmla.f32	q8, q2, d0[0]
-	vldr		d3, [r5, #24] // A0
-	vmla.f32	q9, q2, d0[1]
-	pld			[r5, #128]
-	vmla.f32	q10, q2, d1[0]
-	pld			[r8, #128]
-	vmla.f32	q11, q2, d1[1]
-	vldr		d4, [r7, #16] // B
-	vmla.f32	q12, q3, d0[0]
-	vldr		d5, [r7, #24] // B
-	vmla.f32	q13, q3, d0[1]
-	vldr		d0, [r8, #16] // A1
-	vmla.f32	q14, q3, d1[0]
-	pld			[r9, #128]
-	vmla.f32	q15, q3, d1[1]
-	vldr		d1, [r8, #24] // A1
-
-	// unroll 1
-	vmla.f32	q4, q1, d4[0]
-	vldr		d6, [r9, #16] // A2
-	vmla.f32	q5, q1, d4[1]
-	vldr		d7, [r9, #24] // A2
-	vmla.f32	q6, q1, d5[0]
-	sub		r4, r4, #4
-	vmla.f32	q7, q1, d5[1]
-	vldr		d2, [r5, #32] // A0
-	vmla.f32	q8, q0, d4[0]
-	vldr		d3, [r5, #40] // A0
-	vmla.f32	q9, q0, d4[1]
-	vmla.f32	q10, q0, d5[0]
-	vmla.f32	q11, q0, d5[1]
-	vldr		d0, [r7, #32] // B
-	vmla.f32	q12, q3, d4[0]
-	vldr		d1, [r7, #40] // B
-	vmla.f32	q13, q3, d4[1]
-	vldr		d4, [r8, #32] // A1
-	vmla.f32	q14, q3, d5[0]
-	vmla.f32	q15, q3, d5[1]
-	vldr		d5, [r8, #40] // A1
-
-	// unroll 2
-	vmla.f32	q4, q1, d0[0]
-	vldr		d6, [r9, #32] // A2
-	vmla.f32	q5, q1, d0[1]
-	vldr		d7, [r9, #40] // A2
-	vmla.f32	q6, q1, d1[0]
-	vmla.f32	q7, q1, d1[1]
-	vldr		d2, [r5, #48] // A0
-	vmla.f32	q8, q2, d0[0]
-	vldr		d3, [r5, #56] // A0
-	vmla.f32	q9, q2, d0[1]
-	vmla.f32	q10, q2, d1[0]
-	add		r5, r5, #64
-	vmla.f32	q11, q2, d1[1]
-	vldr		d4, [r7, #48] // B
-	vmla.f32	q12, q3, d0[0]
-	vldr		d5, [r7, #56] // B
-	vmla.f32	q13, q3, d0[1]
-	vldr		d0, [r8, #48] // A1
-	vmla.f32	q14, q3, d1[0]
-	add		r7, r7, #64
-	vmla.f32	q15, q3, d1[1]
-	vldr		d1, [r8, #56] // A1
-
-	// unroll 3
-	vmla.f32	q4, q1, d4[0]
-	vldr		d6, [r9, #48] // A2
-	vmla.f32	q5, q1, d4[1]
-	vldr		d7, [r9, #56] // A2
-	vmla.f32	q6, q1, d5[0]
-	add		r8, r8, #64
-	vmla.f32	q7, q1, d5[1]
-	vldr		d2, [r5, #0] // A0
-	vmla.f32	q8, q0, d4[0]
-	vldr		d3, [r5, #8] // A0
-	vmla.f32	q9, q0, d4[1]
-	add		r9, r9, #64
-	vmla.f32	q10, q0, d5[0]
-	cmp		r4, #4
-	vmla.f32	q11, q0, d5[1]
-	vldr		d0, [r7, #0] // B
-	vmla.f32	q12, q3, d4[0]
-	vldr		d1, [r7, #8] // B
-	vmla.f32	q13, q3, d4[1]
-	vldr		d4, [r8, #0] // A1
-	vmla.f32	q14, q3, d5[0]
-	vmla.f32	q15, q3, d5[1]
-	vldr		d5, [r8, #8] // A1
-
-
-	bgt		1b
-
-0:
-
-	cmp		r4, #3
-	ble		4f
-
-
-	// unroll 0
-	vmla.f32	q4, q1, d0[0]
-	vldr		d6, [r9, #0] // A2
-	vmla.f32	q5, q1, d0[1]
-	vldr		d7, [r9, #8] // A2
-	vmla.f32	q6, q1, d1[0]
-	pld			[r7, #64]
-	vmla.f32	q7, q1, d1[1]
-	vldr		d2, [r5, #16] // A0
-	vmla.f32	q8, q2, d0[0]
-	vldr		d3, [r5, #24] // A0
-	vmla.f32	q9, q2, d0[1]
-	pld			[r5, #64]
-	vmla.f32	q10, q2, d1[0]
-	pld			[r8, #64]
-	vmla.f32	q11, q2, d1[1]
-	vldr		d4, [r7, #16] // B
-	vmla.f32	q12, q3, d0[0]
-	vldr		d5, [r7, #24] // B
-	vmla.f32	q13, q3, d0[1]
-	vldr		d0, [r8, #16] // A1
-	vmla.f32	q14, q3, d1[0]
-	pld			[r9, #64]
-	vmla.f32	q15, q3, d1[1]
-	vldr		d1, [r8, #24] // A1
-
-	// unroll 1
-	vmla.f32	q4, q1, d4[0]
-	vldr		d6, [r9, #16] // A2
-	vmla.f32	q5, q1, d4[1]
-	vldr		d7, [r9, #24] // A2
-	vmla.f32	q6, q1, d5[0]
-	sub		r4, r4, #4
-	vmla.f32	q7, q1, d5[1]
-	vldr		d2, [r5, #32] // A0
-	vmla.f32	q8, q0, d4[0]
-	vldr		d3, [r5, #40] // A0
-	vmla.f32	q9, q0, d4[1]
-	vmla.f32	q10, q0, d5[0]
-	vmla.f32	q11, q0, d5[1]
-	vldr		d0, [r7, #32] // B
-	vmla.f32	q12, q3, d4[0]
-	vldr		d1, [r7, #40] // B
-	vmla.f32	q13, q3, d4[1]
-	vldr		d4, [r8, #32] // A1
-	vmla.f32	q14, q3, d5[0]
-	vmla.f32	q15, q3, d5[1]
-	vldr		d5, [r8, #40] // A1
-
-	// unroll 2
-	vmla.f32	q4, q1, d0[0]
-	vldr		d6, [r9, #32] // A2
-	vmla.f32	q5, q1, d0[1]
-	vldr		d7, [r9, #40] // A2
-	vmla.f32	q6, q1, d1[0]
-	vmla.f32	q7, q1, d1[1]
-	vldr		d2, [r5, #48] // A0
-	vmla.f32	q8, q2, d0[0]
-	vldr		d3, [r5, #56] // A0
-	vmla.f32	q9, q2, d0[1]
-	vmla.f32	q10, q2, d1[0]
-	add		r5, r5, #64
-	vmla.f32	q11, q2, d1[1]
-	vldr		d4, [r7, #48] // B
-	vmla.f32	q12, q3, d0[0]
-	vldr		d5, [r7, #56] // B
-	vmla.f32	q13, q3, d0[1]
-	vldr		d0, [r8, #48] // A1
-	vmla.f32	q14, q3, d1[0]
-	add		r7, r7, #64
-	vmla.f32	q15, q3, d1[1]
-	vldr		d1, [r8, #56] // A1
-
-	// unroll 3
-	vmla.f32	q4, q1, d4[0]
-	vldr		d6, [r9, #48] // A2
-	vmla.f32	q5, q1, d4[1]
-	vldr		d7, [r9, #56] // A2
-	vmla.f32	q6, q1, d5[0]
-	add		r9, r9, #64
-	vmla.f32	q7, q1, d5[1]
-//	vldr		d2, [r5, #0] // A0
-	vmla.f32	q8, q0, d4[0]
-//	vldr		d3, [r5, #8] // A0
-	vmla.f32	q9, q0, d4[1]
-	vmla.f32	q10, q0, d5[0]
-	add		r8, r8, #64
-	vmla.f32	q11, q0, d5[1]
-//	vldr		d0, [r7, #0] // B
-	vmla.f32	q12, q3, d4[0]
-//	vldr		d1, [r7, #8] // B
-	vmla.f32	q13, q3, d4[1]
-//	vldr		d4, [r8, #0] // A1
-	vmla.f32	q14, q3, d5[0]
-	vmla.f32	q15, q3, d5[1]
-//	vldr		d5, [r8, #8] // A1
-
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		r4, #0
-	ble		2f // return
-
-
-3: // clean1-up loop
-
-	// unroll 0
-	vld1.64		{d4, d5}, [r7:128]! // B
-	vld1.64		{d0, d1}, [r5:128]! // A0
-	vmla.f32	q4, q0, d4[0]
-	sub		r4, r4, #1
-	vmla.f32	q5, q0, d4[1]
-	vmla.f32	q6, q0, d5[0]
-	vmla.f32	q7, q0, d5[1]
-	vld1.64		{d0, d1}, [r8:128]! // A1
-	vmla.f32	q8, q0, d4[0]
-	vmla.f32	q9, q0, d4[1]
-	vmla.f32	q10, q0, d5[0]
-	vmla.f32	q11, q0, d5[1]
-	vld1.64		{d0, d1}, [r8:128]! // A1
-	vmla.f32	q12, q0, d4[0]
-	vmla.f32	q13, q0, d4[1]
-	vmla.f32	q14, q0, d5[0]
-	vmla.f32	q15, q0, d5[1]
-
-	cmp		r4, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nt_12x4_lib4, .-inner_kernel_gemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- alpha
-// r5   <- beta
-// r6   <- C
-// r7   <- sdc
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_12X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_12x4_lib4, %function
-inner_scale_ab_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_12x4_lib4:
-#endif
-#endif
-
-	flds		s8, [r4, #0] // alpha
-
-	vmul.f32	q4, q4, d4[0]
-	flds		s9, [r5, #0] // beta
-	vmul.f32	q5, q5, d4[0]
-	flds		s10, .LC00 // 0.0
-	vmul.f32	q6, q6, d4[0]
-	vmul.f32	q7, q7, d4[0]
-	fcmpes		s9, s10
-	vmul.f32	q8, q8, d4[0]
-	vmul.f32	q9, q9, d4[0]
-	vmul.f32	q10, q10, d4[0]
-	vmul.f32	q11, q11, d4[0]
-	vmul.f32	q12, q12, d4[0]
-	vmul.f32	q13, q13, d4[0]
-	vmul.f32	q14, q14, d4[0]
-	vmul.f32	q15, q15, d4[0]
-	fmstat
-
-	beq		0f // end
-
-	add		r8, r6, r7
-	add		r9, r8, r7
-
-	vld1.64		{d0, d1, d2, d3}, [r6:128]!
-	vmla.f32	q4, q0, d4[1]
-	vmla.f32	q5, q1, d4[1]
-	vld1.64		{d0, d1, d2, d3}, [r6:128]!
-	vmla.f32	q6, q0, d4[1]
-	vmla.f32	q7, q1, d4[1]
-	vld1.64		{d0, d1, d2, d3}, [r8:128]!
-	vmla.f32	q8, q0, d4[1]
-	vmla.f32	q9, q1, d4[1]
-	vld1.64		{d0, d1, d2, d3}, [r8:128]!
-	vmla.f32	q10, q0, d4[1]
-	vmla.f32	q11, q1, d4[1]
-	vld1.64		{d0, d1, d2, d3}, [r9:128]!
-	vmla.f32	q12, q0, d4[1]
-	vmla.f32	q13, q1, d4[1]
-	vld1.64		{d0, d1, d2, d3}, [r9:128]!
-	vmla.f32	q14, q0, d4[1]
-	vmla.f32	q15, q1, d4[1]
-
-0:
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_12x4_lib4, .-inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- D
-// r5   <- sdd
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_12X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_12x4_lib4, %function
-inner_store_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_12x4_lib4:
-#endif
-#endif
-
-	add		r6, r4, r5
-	add		r7, r6, r5
-
-	vst1.64		{d8, d9, d10, d11}, [r4:128]!
-	vst1.64		{d12, d13, d14, d15}, [r4:128]!
-	vst1.64		{d16, d17, d18, d19}, [r6:128]!
-	vst1.64		{d20, d21, d22, d23}, [r6:128]!
-	vst1.64		{d24, d25, d26, d27}, [r7:128]!
-	vst1.64		{d28, d29, d30, d31}, [r7:128]!
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_store_12x4_lib4, .-inner_store_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// zero double word
-	.align 3
-.LC00: // { 0 }
-	.word 0
-	.word 0
-	.word 0
-	.word 0
-
-//                                r0        r1             r2         r3       sp+0       sp+4          sp+8       sp+12    sp+16   sp+20
-// void kernel_sgemm_nt_12x4_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.global	kernel_sgemm_nt_12x4_lib4
-	.type	kernel_sgemm_nt_12x4_lib4, %function
-kernel_sgemm_nt_12x4_lib4:
-#elif defined(OS_MAC)
-	.global	kernel_sgemm_nt_12x4_lib4
-_kernel_sgemm_nt_12x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	vldr	d8, .LC00
-	vldr	d9, .LC00+8
-	vmov	q5, q4
-	vmov	q6, q4
-	vmov	q7, q4
-	vmov	q8, q4
-	vmov	q9, q4
-	vmov	q10, q4
-	vmov	q11, q4
-	vmov	q12, q4
-	vmov	q13, q4
-	vmov	q14, q4
-	vmov	q15, q4
-
-
-
-	// call inner kernel dgemm nt
-	mov		r4, r0 // kmax
-	mov		r5, r2 // A
-	mov		r6, r3 // sda
-	lsl		r6, r6, #4 // 4*sizeof(float)*sda
-	ldr		r7, [fp, #0] // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_gemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_gemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		r4, r1 // alpha
-	ldr		r5, [fp, #4] // beta
-	ldr		r6, [fp, #8] // C
-	ldr		r7, [fp, #12] // sdc
-	lsl		r7, r7, #4 // 4*sizeof(float)*sdc
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_12X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_ab_12x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-
-	// store n
-	ldr		r4, [fp, #16] // D
-	ldr		r5, [fp, #20] // sdd
-	lsl		r5, r5, #4 // 4*sizeof(float)*sdd
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_12x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_12x4_lib4, .-kernel_sgemm_nt_12x4_lib4
-#endif
-
-
-
-
diff --git a/third_party/blasfeo/kernel/armv7a/kernel_sgemm_4x4_lib4.S b/third_party/blasfeo/kernel/armv7a/kernel_sgemm_4x4_lib4.S
deleted file mode 100644
index e8a2e71..0000000
--- a/third_party/blasfeo/kernel/armv7a/kernel_sgemm_4x4_lib4.S
+++ /dev/null
@@ -1,675 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nt_4x4_lib4, %function
-inner_kernel_gemm_add_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nt_4x4_lib4:
-#endif
-#endif
-
-	// early return
-	cmp		r4, #0
-	ble		2f // return
-
-	// prefetch
-	pld		[r5, #0]
-	pld		[r6, #0]
-
-	// preload A
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vld1.64		{d4, d5}, [r6:128]! // B
-
-	cmp		r4, #4
-	ble		0f // consider clean up loop
-
-	// main loop
-1:
-	
-	// prefetch
-	pld		[r5, #64]
-	pld		[r6, #64]
-
-	// unroll 0
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d4[1]
-	vld1.64		{d6, d7}, [r6:128]! // B
-	vmla.f32	q6, q0, d5[0]
-	vmla.f32	q7, q0, d5[1]
-
-	// unroll 1
-	vmla.f32	q4, q1, d6[0]
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q5, q1, d6[1]
-	vld1.64		{d4, d5}, [r6:128]! // B
-	vmla.f32	q6, q1, d7[0]
-	vmla.f32	q7, q1, d7[1]
-
-	// unroll 2
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d4[1]
-	vld1.64		{d6, d7}, [r6:128]! // B
-	vmla.f32	q6, q0, d5[0]
-	vmla.f32	q7, q0, d5[1]
-
-	// unroll 3
-	vmla.f32	q4, q1, d6[0]
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q5, q1, d6[1]
-	vld1.64		{d4, d5}, [r6:128]! // B
-	vmla.f32	q6, q1, d7[0]
-	vmla.f32	q7, q1, d7[1]
-
-	sub		r4, r4, #4
-
-	cmp		r4, #4
-	bgt		1b
-
-0:
-
-	cmp		r4, #3
-	ble		4f
-
-	// unroll 0
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d4[1]
-	vld1.64		{d6, d7}, [r6:128]! // B
-	vmla.f32	q6, q0, d5[0]
-	vmla.f32	q7, q0, d5[1]
-
-	// unroll 1
-	vmla.f32	q4, q1, d6[0]
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q5, q1, d6[1]
-	vld1.64		{d4, d5}, [r6:128]! // B
-	vmla.f32	q6, q1, d7[0]
-	vmla.f32	q7, q1, d7[1]
-
-	// unroll 2
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d4[1]
-	vld1.64		{d6, d7}, [r6:128]! // B
-	vmla.f32	q6, q0, d5[0]
-	vmla.f32	q7, q0, d5[1]
-
-	// unroll 3
-	vmla.f32	q4, q1, d6[0]
-//	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q5, q1, d6[1]
-//	vld1.64		{d4, d5}, [r6:128]! // B
-	vmla.f32	q6, q1, d7[0]
-	vmla.f32	q7, q1, d7[1]
-
-	sub		r4, r4, #4
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		r4, #0
-	ble		2f // return
-
-	sub		r5, r5, #16
-	sub		r6, r6, #16
-
-3: // clean1-up loop
-
-	// unroll 0
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vld1.64		{d4, d5}, [r6:128]! // B
-	vmla.f32	q4, q0, d4[0]
-	vmla.f32	q5, q0, d4[1]
-	vmla.f32	q6, q0, d5[0]
-	vmla.f32	q7, q0, d5[1]
-
-	sub		r4, r4, #1
-	cmp		r4, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nt_4x4_lib4, .-inner_kernel_gemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- B
-// r7   <- 4*sdb*sizeof(float)
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NN_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nn_4x4_lib4, %function
-inner_kernel_gemm_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nn_4x4_lib4:
-#endif
-#endif
-
-	// early return
-	cmp		r4, #0
-	ble		2f // return
-
-	// prefetch
-	pld		[r5, #0]
-	pld		[r6, #0]
-
-	// preload A
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vldr		d4, [r6, #0]   // B[0,1]
-	vldr		d5, [r6, #16]  // B[4,5]
-	vldr		d6, [r6, #32]  // B[8,9]
-	vldr		d7, [r6, #48]  // B[12,13]
-
-	cmp		r4, #4
-	ble		0f // consider clean up loop
-
-	// main loop
-1:
-	
-	// prefetch
-	pld		[r5, #64]
-	pld		[r6, r7]
-
-	// unroll 0
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d5[0]
-	vmla.f32	q6, q0, d6[0]
-	vmla.f32	q7, q0, d7[0]
-
-	// unroll 1
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q4, q1, d4[1]
-	vldr		d4, [r6, #8]  // B[2,3]
-	vmla.f32	q5, q1, d5[1]
-	vldr		d5, [r6, #24] // B[6,7]
-	vmla.f32	q6, q1, d6[1]
-	vldr		d6, [r6, #40] // B[10,11]
-	vmla.f32	q7, q1, d7[1]
-	vldr		d7, [r6, #56] // B[14,15]
-
-	// unroll 2
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d5[0]
-	add		r6, r6, r7
-	vmla.f32	q6, q0, d6[0]
-	vmla.f32	q7, q0, d7[0]
-
-	// unroll 3
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q4, q1, d4[1]
-	vldr		d4, [r6, #0]   // B[0,1]
-	vmla.f32	q5, q1, d5[1]
-	vldr		d5, [r6, #16]  // B[4,5]
-	vmla.f32	q6, q1, d6[1]
-	vldr		d6, [r6, #32]  // B[8,9]
-	vmla.f32	q7, q1, d7[1]
-	vldr		d7, [r6, #48]  // B[12,13]
-
-	sub		r4, r4, #4
-
-	cmp		r4, #4
-	bgt		1b
-
-0:
-
-	cmp		r4, #3
-	ble		4f
-
-	// unroll 0
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d5[0]
-	vmla.f32	q6, q0, d6[0]
-	vmla.f32	q7, q0, d7[0]
-
-	// unroll 1
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q4, q1, d4[1]
-	vldr		d4, [r6, #8]  // B[2,3]
-	vmla.f32	q5, q1, d5[1]
-	vldr		d5, [r6, #24] // B[6,7]
-	vmla.f32	q6, q1, d6[1]
-	vldr		d6, [r6, #40] // B[10,11]
-	vmla.f32	q7, q1, d7[1]
-	vldr		d7, [r6, #56] // B[14,15]
-
-	// unroll 2
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d5[0]
-	add		r6, r6, r7
-	vmla.f32	q6, q0, d6[0]
-	vmla.f32	q7, q0, d7[0]
-
-	// unroll 3
-//	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q4, q1, d4[1]
-//	vldr		d4, [r6, #0]   // B[0,1]
-	vmla.f32	q5, q1, d5[1]
-//	vldr		d5, [r6, #16]  // B[4,5]
-	vmla.f32	q6, q1, d6[1]
-//	vldr		d6, [r6, #32]  // B[8,9]
-	vmla.f32	q7, q1, d7[1]
-//	vldr		d7, [r6, #48]  // B[12,13]
-
-	sub		r4, r4, #4
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		r4, #0
-	ble		2f // return
-
-	sub		r5, r5, #16
-
-3: // clean1-up loop
-
-	// unroll 0
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vldr		s8, [r6, #0]  // B[0]
-	vmla.f32	q4, q0, d4[0]
-	vldr		s8, [r6, #16] // B[4]
-	vmla.f32	q5, q0, d4[0]
-	vldr		s8, [r6, #32] // B[8]
-	vmla.f32	q6, q0, d4[0]
-	vldr		s8, [r6, #48] // B[12]
-	vmla.f32	q7, q0, d4[0]
-
-	sub		r4, r4, #1
-	add		r6, r6, #4
-	cmp		r4, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nn_4x4_lib4, .-inner_kernel_gemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- alpha
-// r5   <- beta
-// r6   <- C
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x4_lib4, %function
-inner_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x4_lib4:
-#endif
-#endif
-
-	flds		s8, [r4, #0] // alpha
-
-	vmul.f32	q4, q4, d4[0]
-	flds		s9, [r5, #0] // beta
-	vmul.f32	q5, q5, d4[0]
-	flds		s10, .LC00 // 0.0
-	vmul.f32	q6, q6, d4[0]
-	fcmpes		s9, s10
-	vmul.f32	q7, q7, d4[0]
-	fmstat
-
-	beq		0f // end
-
-	vld1.64		{d0, d1, d2, d3}, [r6:128]!
-	vmla.f32	q4, q0, d4[1]
-	vmla.f32	q5, q1, d4[1]
-	vld1.64		{d0, d1, d2, d3}, [r6:128]!
-	vmla.f32	q6, q0, d4[1]
-	vmla.f32	q7, q1, d4[1]
-
-0:
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x4_lib4, .-inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- D
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_lib4, %function
-inner_store_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_lib4:
-#endif
-#endif
-
-	vst1.64		{d8, d9, d10, d11}, [r4:128]!
-	vst1.64		{d12, d13, d14, d15}, [r4:128]!
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_lib4, .-inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// zero double word
-	.align 3
-.LC00: // { 0 }
-	.word 0
-	.word 0
-	.word 0
-	.word 0
-
-//                               r0        r1             r2         r3         sp+0          sp+4       sp+8
-// void kernel_sgemm_nt_4x4_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.global	kernel_sgemm_nt_4x4_lib4
-	.type	kernel_sgemm_nt_4x4_lib4, %function
-kernel_sgemm_nt_4x4_lib4:
-#elif defined(OS_MAC)
-	.global	kernel_sgemm_nt_4x4_lib4
-_kernel_sgemm_nt_4x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	vldr	d8, .LC00
-	vldr	d9, .LC00+8
-	vmov	q5, q4
-	vmov	q6, q4
-	vmov	q7, q4
-
-
-
-	// call inner kernel dgemm nt
-	mov		r4, r0 // kmax
-	mov		r5, r2 // A
-	mov		r6, r3 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_gemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_gemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		r4, r1 // alpha
-	ldr		r5, [fp, #0] // beta
-	ldr		r6, [fp, #4] // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-	// store n
-	ldr		r4, [fp, #8] // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_4x4_lib4, .-kernel_sgemm_nt_4x4_lib4
-#endif
-
-
-
-//                               r0        r1             r2         r3         sp+0     sp+4          sp+8       sp+12
-// void kernel_sgemm_nn_4x4_lib4(int kmax, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D)
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.global	kernel_sgemm_nn_4x4_lib4
-	.type	kernel_sgemm_nn_4x4_lib4, %function
-kernel_sgemm_nn_4x4_lib4:
-#elif defined(OS_MAC)
-	.global	kernel_sgemm_nn_4x4_lib4
-_kernel_sgemm_nn_4x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	vldr	d8, .LC00
-	vldr	d9, .LC00+8
-	vmov	q5, q4
-	vmov	q6, q4
-	vmov	q7, q4
-
-
-
-	// call inner kernel dgemm nt
-	mov		r4, r0 // kmax
-	mov		r5, r2 // A
-	mov		r6, r3 // B
-	ldr		r7, [fp, #0] // sdb
-	lsl		r7, r7, #4 // 4*sizeof(float)*sdb
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_gemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_gemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		r4, r1 // alpha
-	ldr		r5, [fp, #4] // beta
-	ldr		r6, [fp, #8] // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-	// store n
-	ldr		r4, [fp, #12] // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_4x4_lib4, .-kernel_sgemm_nn_4x4_lib4
-#endif
-
-
-
-
-
-
diff --git a/third_party/blasfeo/kernel/armv7a/kernel_sgemm_8x4_lib4.S b/third_party/blasfeo/kernel/armv7a/kernel_sgemm_8x4_lib4.S
deleted file mode 100644
index f356c9b..0000000
--- a/third_party/blasfeo/kernel/armv7a/kernel_sgemm_8x4_lib4.S
+++ /dev/null
@@ -1,795 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- sda
-// r7   <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_8X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nt_8x4_lib4, %function
-inner_kernel_gemm_add_nt_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nt_8x4_lib4:
-#endif
-#endif
-
-	// early return
-	cmp		r4, #0
-	ble		2f // return
-
-	add		r8, r5, r6 // A1
-
-	// prefetch
-	pld		[r5, #0]
-	pld		[r7, #0]
-	pld		[r8, #0]
-	pld		[r7, #64]
-
-	// preload
-	vld1.64		{d0, d1}, [r7:128]! // B // TODO preload B in d0-d3 too ?????
-	vld1.64		{d2, d3}, [r7:128]! // B
-	vld1.64		{d4, d5}, [r7:128]! // B // TODO preload B in d0-d3 too ?????
-	vld1.64		{d6, d7}, [r7:128]! // B
-	vld1.64		{d24, d25}, [r5:128]! // A0
-	vld1.64		{d28, d29}, [r5:128]! // A0
-	vld1.64		{d26, d27}, [r8:128] // A1
-
-	sub		r7, r7, #64
-	sub		r5, r5, #32
-
-	cmp		r4, #4
-	ble		0f // consider clean up loop
-
-	// main loop
-1:
-	
-	// unroll 0
-	pld		[r5, #64] // A0
-	vmla.f32	q4, q12, d0[0]
-	vldr		d30, [r8, #16] // A1
-	vmla.f32	q5, q12, d0[1]
-	vldr		d31, [r8, #24] // A1
-	vmla.f32	q6, q12, d1[0]
-	pld		[r7, #128] // B
-	vmla.f32	q7, q12, d1[1]
-	vldr		d24, [r5, #32]
-	vmla.f32	q8, q13, d0[0]
-	vldr		d25, [r5, #40]
-	vmla.f32	q9, q13, d0[1]
-	vldr		d0, [r7, #64]
-	vmla.f32	q10, q13, d1[0]
-	pld		[r8, #64] // A1
-	vmla.f32	q11, q13, d1[1]
-	vldr		d1, [r7, #72]
-
-	// unroll 1
-	vmla.f32	q4, q14, d2[0]
-	vldr		d26, [r8, #32] // A1
-	vmla.f32	q5, q14, d2[1]
-	vldr		d27, [r8, #40] // A1
-	vmla.f32	q6, q14, d3[0]
-	vmla.f32	q7, q14, d3[1]
-	vldr		d28, [r5, #48]
-	vmla.f32	q8, q15, d2[0]
-	vldr		d29, [r5, #56]
-	vmla.f32	q9, q15, d2[1]
-	vldr		d2, [r7, #80]
-	vmla.f32	q10, q15, d3[0]
-	add		r5, r5, #64
-	vmla.f32	q11, q15, d3[1]
-	vldr		d3, [r7, #88]
-
-	// unroll 2
-	vmla.f32	q4, q12, d4[0]
-	vldr		d30, [r8, #48] // A1
-	vmla.f32	q5, q12, d4[1]
-	vldr		d31, [r8, #56] // A1
-	vmla.f32	q6, q12, d5[0]
-	add		r7, r7, #64
-	vmla.f32	q7, q12, d5[1]
-	vldr		d24, [r5, #0]
-	vmla.f32	q8, q13, d4[0]
-	vldr		d25, [r5, #8]
-	vmla.f32	q9, q13, d4[1]
-	vldr		d4, [r7, #32]
-	vmla.f32	q10, q13, d5[0]
-	add		r8, r8, #64
-	vmla.f32	q11, q13, d5[1]
-	vldr		d5, [r7, #40]
-
-	// unroll 3
-	vmla.f32	q4, q14, d6[0]
-	vldr		d26, [r8, #0] // A1
-	vmla.f32	q5, q14, d6[1]
-	vldr		d27, [r8, #8] // A1
-	vmla.f32	q6, q14, d7[0]
-	sub		r4, r4, #4
-	vmla.f32	q7, q14, d7[1]
-	vldr		d28, [r5, #16]
-	vmla.f32	q8, q15, d6[0]
-	vldr		d29, [r5, #24]
-	vmla.f32	q9, q15, d6[1]
-	vldr		d6, [r7, #48]
-	vmla.f32	q10, q15, d7[0]
-	vmla.f32	q11, q15, d7[1]
-	vldr		d7, [r7, #56]
-
-	cmp		r4, #4
-	bgt		1b
-
-0:
-
-	cmp		r4, #3
-	ble		4f
-
-
-	// unroll 0
-	vmla.f32	q4, q12, d0[0]
-	vldr		d30, [r8, #16] // A1
-	vmla.f32	q5, q12, d0[1]
-	vldr		d31, [r8, #24] // A1
-	vmla.f32	q6, q12, d1[0]
-	vmla.f32	q7, q12, d1[1]
-	vldr		d24, [r5, #32]
-	vmla.f32	q8, q13, d0[0]
-	vldr		d25, [r5, #40]
-	vmla.f32	q9, q13, d0[1]
-//	vldr		d4, [r7, #64]
-	vmla.f32	q10, q13, d1[0]
-	vmla.f32	q11, q13, d1[1]
-//	vldr		d5, [r7, #72]
-
-	// unroll 1
-	vmla.f32	q4, q14, d2[0]
-	vldr		d26, [r8, #32] // A1
-	vmla.f32	q5, q14, d2[1]
-	vldr		d27, [r8, #40] // A1
-	vmla.f32	q6, q14, d3[0]
-	vmla.f32	q7, q14, d3[1]
-	vldr		d28, [r5, #48]
-	vmla.f32	q8, q15, d2[0]
-	vldr		d29, [r5, #56]
-	vmla.f32	q9, q15, d2[1]
-//	vldr		d6, [r7, #80]
-	vmla.f32	q10, q15, d3[0]
-//	add		r5, r5, #64
-	vmla.f32	q11, q15, d3[1]
-//	vldr		d7, [r7, #88]
-
-	// unroll 2
-	vmla.f32	q4, q12, d4[0]
-	vldr		d30, [r8, #48] // A1
-	vmla.f32	q5, q12, d4[1]
-	vldr		d31, [r8, #56] // A1
-	vmla.f32	q6, q12, d5[0]
-//	add		r7, r7, #64
-	vmla.f32	q7, q12, d5[1]
-//	vldr		d24, [r5, #0]
-	vmla.f32	q8, q13, d4[0]
-//	vldr		d25, [r5, #8]
-	vmla.f32	q9, q13, d4[1]
-//	vldr		d4, [r7, #32]
-	vmla.f32	q10, q13, d5[0]
-//	add		r8, r8, #64
-	vmla.f32	q11, q13, d5[1]
-//	vldr		d5, [r7, #40]
-
-	// unroll 3
-	vmla.f32	q4, q14, d6[0]
-//	vldr		d26, [r8, #0] // A1
-	vmla.f32	q5, q14, d6[1]
-//	vldr		d27, [r8, #8] // A1
-	vmla.f32	q6, q14, d7[0]
-	sub		r4, r4, #4
-	vmla.f32	q7, q14, d7[1]
-//	vldr		d28, [r5, #16]
-	vmla.f32	q8, q15, d6[0]
-//	vldr		d29, [r5, #24]
-	vmla.f32	q9, q15, d6[1]
-//	vldr		d6, [r7, #48]
-	vmla.f32	q10, q15, d7[0]
-	vmla.f32	q11, q15, d7[1]
-//	vldr		d7, [r7, #56]
-
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		r4, #0
-	ble		2f // return
-
-//	sub		r5, r5, #32 // A0
-//	sub		r7, r7, #32 // B
-//	sub		r8, r8, #16 // A1
-
-3: // clean1-up loop
-
-	// unroll 0
-	vld1.64		{d4, d5}, [r7:128]! // B
-	vld1.64		{d0, d1}, [r5:128]! // A0
-	vmla.f32	q4, q0, d4[0]
-	vmla.f32	q5, q0, d4[1]
-	vmla.f32	q6, q0, d5[0]
-	vmla.f32	q7, q0, d5[1]
-	vld1.64		{d0, d1}, [r8:128]! // A1
-	vmla.f32	q8, q0, d4[0]
-	vmla.f32	q9, q0, d4[1]
-	vmla.f32	q10, q0, d5[0]
-	vmla.f32	q11, q0, d5[1]
-
-	sub		r4, r4, #1
-	cmp		r4, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nt_8x4_lib4, .-inner_kernel_gemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-#if 0
-// subroutine
-//
-// input arguments:
-// r4   <- k
-// r5   <- A
-// r6   <- B
-// r7   <- 4*sdb*sizeof(float)
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NN_4X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nn_4x4_lib4, %function
-inner_kernel_gemm_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nn_4x4_lib4:
-#endif
-#endif
-
-	// early return
-	cmp		r4, #0
-	ble		2f // return
-
-	// prefetch
-	pld		[r5, #0]
-	pld		[r6, #0]
-
-	// preload A
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vldr		d4, [r6, #0]   // B[0,1]
-	vldr		d5, [r6, #16]  // B[4,5]
-	vldr		d6, [r6, #32]  // B[8,9]
-	vldr		d7, [r6, #48]  // B[12,13]
-
-	cmp		r4, #4
-	ble		0f // consider clean up loop
-
-	// main loop
-1:
-	
-	// prefetch
-	pld		[r5, #64]
-	pld		[r6, r7]
-
-	// unroll 0
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d5[0]
-	vmla.f32	q6, q0, d6[0]
-	vmla.f32	q7, q0, d7[0]
-
-	// unroll 1
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q4, q1, d4[1]
-	vldr		d4, [r6, #8]  // B[2,3]
-	vmla.f32	q5, q1, d5[1]
-	vldr		d5, [r6, #24] // B[6,7]
-	vmla.f32	q6, q1, d6[1]
-	vldr		d6, [r6, #40] // B[10,11]
-	vmla.f32	q7, q1, d7[1]
-	vldr		d7, [r6, #56] // B[14,15]
-
-	// unroll 2
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d5[0]
-	add		r6, r6, r7
-	vmla.f32	q6, q0, d6[0]
-	vmla.f32	q7, q0, d7[0]
-
-	// unroll 3
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q4, q1, d4[1]
-	vldr		d4, [r6, #0]   // B[0,1]
-	vmla.f32	q5, q1, d5[1]
-	vldr		d5, [r6, #16]  // B[4,5]
-	vmla.f32	q6, q1, d6[1]
-	vldr		d6, [r6, #32]  // B[8,9]
-	vmla.f32	q7, q1, d7[1]
-	vldr		d7, [r6, #48]  // B[12,13]
-
-	sub		r4, r4, #4
-
-	cmp		r4, #4
-	bgt		1b
-
-0:
-
-	cmp		r4, #3
-	ble		4f
-
-	// unroll 0
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d5[0]
-	vmla.f32	q6, q0, d6[0]
-	vmla.f32	q7, q0, d7[0]
-
-	// unroll 1
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q4, q1, d4[1]
-	vldr		d4, [r6, #8]  // B[2,3]
-	vmla.f32	q5, q1, d5[1]
-	vldr		d5, [r6, #24] // B[6,7]
-	vmla.f32	q6, q1, d6[1]
-	vldr		d6, [r6, #40] // B[10,11]
-	vmla.f32	q7, q1, d7[1]
-	vldr		d7, [r6, #56] // B[14,15]
-
-	// unroll 2
-	vmla.f32	q4, q0, d4[0]
-	vld1.64		{d2, d3}, [r5:128]! // A
-	vmla.f32	q5, q0, d5[0]
-	add		r6, r6, r7
-	vmla.f32	q6, q0, d6[0]
-	vmla.f32	q7, q0, d7[0]
-
-	// unroll 3
-//	vld1.64		{d0, d1}, [r5:128]! // A
-	vmla.f32	q4, q1, d4[1]
-//	vldr		d4, [r6, #0]   // B[0,1]
-	vmla.f32	q5, q1, d5[1]
-//	vldr		d5, [r6, #16]  // B[4,5]
-	vmla.f32	q6, q1, d6[1]
-//	vldr		d6, [r6, #32]  // B[8,9]
-	vmla.f32	q7, q1, d7[1]
-//	vldr		d7, [r6, #48]  // B[12,13]
-
-	sub		r4, r4, #4
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		r4, #0
-	ble		2f // return
-
-	sub		r5, r5, #16
-
-3: // clean1-up loop
-
-	// unroll 0
-	vld1.64		{d0, d1}, [r5:128]! // A
-	vldr		s8, [r6, #0]  // B[0]
-	vmla.f32	q4, q0, d4[0]
-	vldr		s8, [r6, #16] // B[4]
-	vmla.f32	q5, q0, d4[0]
-	vldr		s8, [r6, #32] // B[8]
-	vmla.f32	q6, q0, d4[0]
-	vldr		s8, [r6, #48] // B[12]
-	vmla.f32	q7, q0, d4[0]
-
-	sub		r4, r4, #1
-	add		r6, r6, #4
-	cmp		r4, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nn_4x4_lib4, .-inner_kernel_gemm_add_nn_4x4_lib4
-#endif
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- alpha
-// r5   <- beta
-// r6   <- C
-// r7   <- sdc
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_8X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x4_lib4, %function
-inner_scale_ab_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x4_lib4:
-#endif
-#endif
-
-	flds		s8, [r4, #0] // alpha
-
-	vmul.f32	q4, q4, d4[0]
-	flds		s9, [r5, #0] // beta
-	vmul.f32	q5, q5, d4[0]
-	flds		s10, .LC00 // 0.0
-	vmul.f32	q6, q6, d4[0]
-	vmul.f32	q7, q7, d4[0]
-	fcmpes		s9, s10
-	vmul.f32	q8, q8, d4[0]
-	vmul.f32	q9, q9, d4[0]
-	vmul.f32	q10, q10, d4[0]
-	vmul.f32	q11, q11, d4[0]
-	fmstat
-
-	beq		0f // end
-
-	add		r8, r6, r7
-
-	vld1.64		{d0, d1, d2, d3}, [r6:128]!
-	vmla.f32	q4, q0, d4[1]
-	vmla.f32	q5, q1, d4[1]
-	vld1.64		{d0, d1, d2, d3}, [r6:128]!
-	vmla.f32	q6, q0, d4[1]
-	vmla.f32	q7, q1, d4[1]
-	vld1.64		{d0, d1, d2, d3}, [r8:128]!
-	vmla.f32	q8, q0, d4[1]
-	vmla.f32	q9, q1, d4[1]
-	vld1.64		{d0, d1, d2, d3}, [r8:128]!
-	vmla.f32	q10, q0, d4[1]
-	vmla.f32	q11, q1, d4[1]
-
-0:
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x4_lib4, .-inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r4   <- D
-// r5   <- sdd
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_8X4_LIB4
-#else
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_lib4, %function
-inner_store_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x4_lib4:
-#endif
-#endif
-
-	add		r6, r4, r5
-
-	vst1.64		{d8, d9, d10, d11}, [r4:128]!
-	vst1.64		{d12, d13, d14, d15}, [r4:128]!
-	vst1.64		{d16, d17, d18, d19}, [r6:128]!
-	vst1.64		{d20, d21, d22, d23}, [r6:128]!
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	mov		pc, lr // return
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_lib4, .-inner_store_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// zero double word
-	.align 3
-.LC00: // { 0 }
-	.word 0
-	.word 0
-	.word 0
-	.word 0
-
-//                               r0        r1             r2         r3       sp+0       sp+4          sp+8       sp+12    sp+16   sp+20
-// void kernel_sgemm_nt_8x4_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.global	kernel_sgemm_nt_8x4_lib4
-	.type	kernel_sgemm_nt_8x4_lib4, %function
-kernel_sgemm_nt_8x4_lib4:
-#elif defined(OS_MAC)
-	.global	kernel_sgemm_nt_8x4_lib4
-_kernel_sgemm_nt_8x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	vldr	d8, .LC00
-	vldr	d9, .LC00+8
-	vmov	q5, q4
-	vmov	q6, q4
-	vmov	q7, q4
-	vmov	q8, q4
-	vmov	q9, q4
-	vmov	q10, q4
-	vmov	q11, q4
-
-
-
-	// call inner kernel dgemm nt
-	mov		r4, r0 // kmax
-	mov		r5, r2 // A
-	mov		r6, r3 // sda
-	lsl		r6, r6, #4 // 4*sizeof(float)*sda
-	ldr		r7, [fp, #0] // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_gemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_gemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		r4, r1 // alpha
-	ldr		r5, [fp, #4] // beta
-	ldr		r6, [fp, #8] // C
-	ldr		r7, [fp, #12] // sdc
-	lsl		r7, r7, #4 // 4*sizeof(float)*sdc
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-
-	// store n
-	ldr		r4, [fp, #16] // D
-	ldr		r5, [fp, #20] // sdd
-	lsl		r5, r5, #4 // 4*sizeof(float)*sdd
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_8x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x4_lib4, .-kernel_sgemm_nt_8x4_lib4
-#endif
-
-
-
-#if 0
-//                               r0        r1             r2         r3         sp+0     sp+4          sp+8       sp+12
-// void kernel_sgemm_nn_4x4_lib4(int kmax, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D)
-
-//	.p2align 4,,15
-#if defined(OS_LINUX)
-	.global	kernel_sgemm_nn_4x4_lib4
-	.type	kernel_sgemm_nn_4x4_lib4, %function
-kernel_sgemm_nn_4x4_lib4:
-#elif defined(OS_MAC)
-	.global	kernel_sgemm_nn_4x4_lib4
-_kernel_sgemm_nn_4x4_lib4:
-#endif
-
-	// prologue
-
-	// save GP registers
-	stmdb	sp!, {r4 - r10, fp, lr} // save registers
-	add		fp, sp, #36 // fp to old sp position
-
-	// save FP registers
-	fstmfdd	sp!, {d8-d15}
-
-
-
-	// zero accumulation registers
-	vldr	d8, .LC00
-	vldr	d9, .LC00+8
-	vmov	q5, q4
-	vmov	q6, q4
-	vmov	q7, q4
-
-
-
-	// call inner kernel dgemm nt
-	mov		r4, r0 // kmax
-	mov		r5, r2 // A
-	mov		r6, r3 // B
-	ldr		r7, [fp, #0] // sdb
-	lsl		r7, r7, #4 // 4*sizeof(float)*sdb
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl	inner_kernel_gemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	bl	_inner_kernel_gemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		r4, r1 // alpha
-	ldr		r5, [fp, #4] // beta
-	ldr		r6, [fp, #8] // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-	// store n
-	ldr		r4, [fp, #12] // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX)
-	bl inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	bl _inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-	// epilogue
-
-	// load FP registers
-	fldmfdd	sp!, {d8-d15}
-
-	// load GP registers and return
-//	ldmia	sp!, {r4 - r10, fp, lr} // load registers
-//	mov		pc, lr // return
-	ldmia	sp!, {r4 - r10, fp, pc} // load registers and return
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_4x4_lib4, .-kernel_sgemm_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-
diff --git a/third_party/blasfeo/kernel/armv8a/Makefile b/third_party/blasfeo/kernel/armv8a/Makefile
deleted file mode 100644
index 75e1faf..0000000
--- a/third_party/blasfeo/kernel/armv8a/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../../Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), ARMV8A_ARM_CORTEX_A57)
-OBJS += kernel_dgemm_8x4_lib4.o kernel_dgemm_4x4_lib4.o
-OBJS += kernel_sgemm_16x4_lib4.o kernel_sgemm_12x4_lib4.o kernel_sgemm_8x8_lib4.o kernel_sgemm_8x4_lib4.o kernel_sgemm_4x4_lib4.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
-	rm -f *.s
-
diff --git a/third_party/blasfeo/kernel/armv8a/kernel_dgemm_4x4_lib4.S b/third_party/blasfeo/kernel/armv8a/kernel_dgemm_4x4_lib4.S
deleted file mode 100644
index 2d43b10..0000000
--- a/third_party/blasfeo/kernel/armv8a/kernel_dgemm_4x4_lib4.S
+++ /dev/null
@@ -1,414 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#define STACKSIZE 11*16
-#define PROLOGUE \
-	add sp, sp, #-(11 * 16); \
-	stp d8, d9, [sp, #(0 * 16)]; \
-	stp d10, d11, [sp, #(1 * 16)]; \
-	stp d12, d13, [sp, #(2 * 16)]; \
-	stp d14, d15, [sp, #(3 * 16)]; \
-	stp x18, x19, [sp, #(4 * 16)]; \
-	stp x20, x21, [sp, #(5 * 16)]; \
-	stp x22, x23, [sp, #(6 * 16)]; \
-	stp x24, x25, [sp, #(7 * 16)]; \
-	stp x26, x27, [sp, #(8 * 16)]; \
-	stp x28, x29, [sp, #(9 * 16)]; \
-	str x30, [sp, #(10 * 16)];
-#define EPILOGUE \
-	ldp d8, d9, [sp, #(0 * 16)]; \
-	ldp d10, d11, [sp, #(1 * 16)]; \
-	ldp d12, d13, [sp, #(2 * 16)]; \
-	ldp d14, d15, [sp, #(3 * 16)]; \
-	ldp x18, x19, [sp, #(4 * 16)]; \
-	ldp x20, x21, [sp, #(5 * 16)]; \
-	ldp x22, x23, [sp, #(6 * 16)]; \
-	ldp x24, x25, [sp, #(7 * 16)]; \
-	ldp x26, x27, [sp, #(8 * 16)]; \
-	ldp x28, x29, [sp, #(9 * 16)]; \
-	ldr x30, [sp, #(10 * 16)]; \
-	add sp, sp, #(11 * 16);
-
-
-
-
-
-	.text
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// w8   <- k
-// x9   <- A
-// x10   <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-	.align	4
-	.type inner_kernel_dgemm_add_nt_4x4_lib4, %function
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#endif
-
-// TODO more aggressive preload of A !!!
-
-	// early return
-	cmp		w8, #0
-	ble		2f // return
-
-	// prefetch
-	prfm	PLDL1KEEP, [x9, #0]
-	prfm	PLDL1KEEP, [x10, #0]
-
-	cmp		w8, #4
-	ble		0f // consider clean up loop
-
-	// preload
-	ld1   {v24.2d, v25.2d}, [x9], #32
-	ld1   {v28.2d, v29.2d}, [x10], #32
-
-	// prefetch
-	prfm	PLDL1KEEP, [x9, #32]
-	prfm	PLDL1KEEP, [x10, #32]
-
-	// main loop
-1:
-	
-	// unroll 0
-	fmla	v0.2d, v24.2d, v28.2d[0]
-	ld1		{v26.2d, v27.2d}, [x9], #32
-	fmla	v1.2d, v25.2d, v28.2d[0]
-	ld1		{v30.2d, v31.2d}, [x10], #32
-	fmla	v2.2d, v24.2d, v28.2d[1]
-	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v3.2d, v25.2d, v28.2d[1]
-	prfm	PLDL1KEEP, [x10, #64]
-	fmla	v4.2d, v24.2d, v29.2d[0]
-	fmla	v5.2d, v25.2d, v29.2d[0]
-	fmla	v6.2d, v24.2d, v29.2d[1]
-	fmla	v7.2d, v25.2d, v29.2d[1]
-	sub		w8, w8, #4
-
-	// unroll 1
-	fmla	v0.2d, v26.2d, v30.2d[0]
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	fmla	v1.2d, v27.2d, v30.2d[0]
-	ld1		{v28.2d, v29.2d}, [x10], #32
-	fmla	v2.2d, v26.2d, v30.2d[1]
-	fmla	v3.2d, v27.2d, v30.2d[1]
-	fmla	v4.2d, v26.2d, v31.2d[0]
-	fmla	v5.2d, v27.2d, v31.2d[0]
-	fmla	v6.2d, v26.2d, v31.2d[1]
-	fmla	v7.2d, v27.2d, v31.2d[1]
-
-	// unroll 2
-	fmla	v0.2d, v24.2d, v28.2d[0]
-	ld1		{v26.2d, v27.2d}, [x9], #32
-	fmla	v1.2d, v25.2d, v28.2d[0]
-	ld1		{v30.2d, v31.2d}, [x10], #32
-	fmla	v2.2d, v24.2d, v28.2d[1]
-	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v3.2d, v25.2d, v28.2d[1]
-	prfm	PLDL1KEEP, [x10, #64]
-	fmla	v4.2d, v24.2d, v29.2d[0]
-	fmla	v5.2d, v25.2d, v29.2d[0]
-	fmla	v6.2d, v24.2d, v29.2d[1]
-	fmla	v7.2d, v25.2d, v29.2d[1]
-
-	// unroll 3
-	fmla	v0.2d, v26.2d, v30.2d[0]
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	fmla	v1.2d, v27.2d, v30.2d[0]
-	ld1		{v28.2d, v29.2d}, [x10], #32
-	fmla	v2.2d, v26.2d, v30.2d[1]
-	fmla	v3.2d, v27.2d, v30.2d[1]
-	fmla	v4.2d, v26.2d, v31.2d[0]
-	fmla	v5.2d, v27.2d, v31.2d[0]
-	fmla	v6.2d, v26.2d, v31.2d[1]
-	fmla	v7.2d, v27.2d, v31.2d[1]
-
-	cmp		w8, #4
-	bgt		1b
-
-	sub		x9, x9, #32
-	sub		x10, x10, #32
-
-0:
-
-	cmp		w8, #3
-	ble		4f
-
-	// unroll 0
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	ld1		{v28.2d, v29.2d}, [x10], #32
-	fmla	v0.2d, v24.2d, v28.2d[0]
-	fmla	v1.2d, v25.2d, v28.2d[0]
-	fmla	v2.2d, v24.2d, v28.2d[1]
-	fmla	v3.2d, v25.2d, v28.2d[1]
-	fmla	v4.2d, v24.2d, v29.2d[0]
-	fmla	v5.2d, v25.2d, v29.2d[0]
-	fmla	v6.2d, v24.2d, v29.2d[1]
-	fmla	v7.2d, v25.2d, v29.2d[1]
-
-	// unroll 1
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	ld1		{v28.2d, v29.2d}, [x10], #32
-	fmla	v0.2d, v24.2d, v28.2d[0]
-	fmla	v1.2d, v25.2d, v28.2d[0]
-	fmla	v2.2d, v24.2d, v28.2d[1]
-	fmla	v3.2d, v25.2d, v28.2d[1]
-	fmla	v4.2d, v24.2d, v29.2d[0]
-	fmla	v5.2d, v25.2d, v29.2d[0]
-	fmla	v6.2d, v24.2d, v29.2d[1]
-	fmla	v7.2d, v25.2d, v29.2d[1]
-
-	// unroll 2
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	ld1		{v28.2d, v29.2d}, [x10], #32
-	fmla	v0.2d, v24.2d, v28.2d[0]
-	fmla	v1.2d, v25.2d, v28.2d[0]
-	fmla	v2.2d, v24.2d, v28.2d[1]
-	fmla	v3.2d, v25.2d, v28.2d[1]
-	fmla	v4.2d, v24.2d, v29.2d[0]
-	fmla	v5.2d, v25.2d, v29.2d[0]
-	fmla	v6.2d, v24.2d, v29.2d[1]
-	fmla	v7.2d, v25.2d, v29.2d[1]
-
-	// unroll 3
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	ld1		{v28.2d, v29.2d}, [x10], #32
-	fmla	v0.2d, v24.2d, v28.2d[0]
-	fmla	v1.2d, v25.2d, v28.2d[0]
-	fmla	v2.2d, v24.2d, v28.2d[1]
-	fmla	v3.2d, v25.2d, v28.2d[1]
-	fmla	v4.2d, v24.2d, v29.2d[0]
-	fmla	v5.2d, v25.2d, v29.2d[0]
-	fmla	v6.2d, v24.2d, v29.2d[1]
-	fmla	v7.2d, v25.2d, v29.2d[1]
-
-	sub		w8, w8, #4
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		w8, #0
-	ble		2f // return
-
-3: // clean1-up loop
-
-	// unroll 0
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	ld1		{v28.2d, v29.2d}, [x10], #32
-	fmla	v0.2d, v24.2d, v28.2d[0]
-	fmla	v1.2d, v25.2d, v28.2d[0]
-	fmla	v2.2d, v24.2d, v28.2d[1]
-	fmla	v3.2d, v25.2d, v28.2d[1]
-	fmla	v4.2d, v24.2d, v29.2d[0]
-	fmla	v5.2d, v25.2d, v29.2d[0]
-	fmla	v6.2d, v24.2d, v29.2d[1]
-	fmla	v7.2d, v25.2d, v29.2d[1]
-
-	sub		w8, w8, #1
-	cmp		w8, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_kernel_dgemm_add_nt_4x4_lib4, .-inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- alpha
-// x9   <- beta
-// x10  <- C
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_4X4_LIB4
-#else
-	.align	4
-	.type inner_scale_ab_4x4_lib4, %function
-inner_scale_ab_4x4_lib4:
-#endif
-
-	ld1		{v28.2d}, [x8]
-
-	fmul	v0.2d, v0.2d, v28.2d[0]
-	fmul	v1.2d, v1.2d, v28.2d[0]
-	fmul	v2.2d, v2.2d, v28.2d[0]
-	fmul	v3.2d, v3.2d, v28.2d[0]
-	fmul	v4.2d, v4.2d, v28.2d[0]
-	fmul	v5.2d, v5.2d, v28.2d[0]
-	fmul	v6.2d, v6.2d, v28.2d[0]
-	fmul	v7.2d, v7.2d, v28.2d[0]
-
-	ld1		{v28.2d}, [x9]
-
-	ld1		{v24.2d, v25.2d, v26.2d, v27.2d}, [x10], #64
-	fmla	v0.2d, v24.2d, v28.2d[0]
-	fmla	v1.2d, v25.2d, v28.2d[0]
-	fmla	v2.2d, v26.2d, v28.2d[0]
-	fmla	v3.2d, v27.2d, v28.2d[0]
-
-	ld1		{v24.2d, v25.2d, v26.2d, v27.2d}, [x10], #64
-	fmla	v4.2d, v24.2d, v28.2d[0]
-	fmla	v5.2d, v25.2d, v28.2d[0]
-	fmla	v6.2d, v26.2d, v28.2d[0]
-	fmla	v7.2d, v27.2d, v28.2d[0]
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_scale_ab_4x4_lib4, .-inner_scale_ab_4x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- D
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_4X4_LIB4
-#else
-	.align 4
-	.type inner_store_4x4_lib4, %function
-inner_store_4x4_lib4:
-#endif
-
-	st1		{v0.2d, v1.2d, v2.2d, v3.2d}, [x8], #64
-	st1		{v4.2d, v5.2d, v6.2d, v7.2d}, [x8], #64
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_store_4x4_lib4, .-inner_store_4x4_lib4
-#endif
-
-
-
-
-
-//                               w0        x1             x2         x3         x4            x5         x6
-// void kernel_dgemm_nt_4x4_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-
-	.align	4
-	.global	kernel_dgemm_nt_4x4_lib4
-	.type	kernel_dgemm_nt_4x4_lib4, %function
-kernel_dgemm_nt_4x4_lib4:
-	
-
-
-	PROLOGUE
-
-
-
-	// TODO zero the entire 128-bit register ???
-	fmov	d0, xzr
-	fmov    d1, d0
-	fmov    d2, d0
-	fmov    d3, d0
-	fmov    d4, d0
-	fmov    d5, d0
-	fmov    d6, d0
-	fmov    d7, d0
-
-
-
-	// call inner kernel dgemm nt
-	mov		w8, w0 // kmax
-	mov		x9, x2 // A
-	mov		x10, x3 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-	bl	inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		x8, x1 // alpha
-	mov		x9, x4 // beta
-	mov		x10, x5 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-	bl inner_scale_ab_4x4_lib4
-#endif
-
-
-
-	// store n
-	mov		x8, x6
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-	bl inner_store_4x4_lib4
-#endif
-
-
-
-	EPILOGUE
-
-	mov	x0, #0
-
-	ret
-
diff --git a/third_party/blasfeo/kernel/armv8a/kernel_dgemm_8x4_lib4.S b/third_party/blasfeo/kernel/armv8a/kernel_dgemm_8x4_lib4.S
deleted file mode 100644
index 314489d..0000000
--- a/third_party/blasfeo/kernel/armv8a/kernel_dgemm_8x4_lib4.S
+++ /dev/null
@@ -1,575 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#define STACKSIZE 11*16
-#define PROLOGUE \
-	sub sp, sp, #(11 * 16); \
-	stp d8, d9, [sp, #(0 * 16)]; \
-	stp d10, d11, [sp, #(1 * 16)]; \
-	stp d12, d13, [sp, #(2 * 16)]; \
-	stp d14, d15, [sp, #(3 * 16)]; \
-	stp x18, x19, [sp, #(4 * 16)]; \
-	stp x20, x21, [sp, #(5 * 16)]; \
-	stp x22, x23, [sp, #(6 * 16)]; \
-	stp x24, x25, [sp, #(7 * 16)]; \
-	stp x26, x27, [sp, #(8 * 16)]; \
-	stp x28, x29, [sp, #(9 * 16)]; \
-	str x30, [sp, #(10 * 16)];
-#define EPILOGUE \
-	ldp d8, d9, [sp, #(0 * 16)]; \
-	ldp d10, d11, [sp, #(1 * 16)]; \
-	ldp d12, d13, [sp, #(2 * 16)]; \
-	ldp d14, d15, [sp, #(3 * 16)]; \
-	ldp x18, x19, [sp, #(4 * 16)]; \
-	ldp x20, x21, [sp, #(5 * 16)]; \
-	ldp x22, x23, [sp, #(6 * 16)]; \
-	ldp x24, x25, [sp, #(7 * 16)]; \
-	ldp x26, x27, [sp, #(8 * 16)]; \
-	ldp x28, x29, [sp, #(9 * 16)]; \
-	ldr x30, [sp, #(10 * 16)]; \
-	add sp, sp, #(11 * 16);
-
-
-
-
-
-	.text
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// w8   <- k
-// x9   <- A
-// x10  <- sda
-// x11  <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_8X4_LIB4
-#else
-	.align	4
-	.type inner_kernel_gemm_add_nt_8x4_lib4, %function
-inner_kernel_gemm_add_nt_8x4_lib4:
-#endif
-
-// TODO more aggressive preload of A !!!
-
-	// early return
-	cmp		w8, #0
-	ble		2f // return
-
-	add		x12, x9, x10
-
-	// prefetch
-	prfm	PLDL1KEEP, [x11, #0]
-	prfm	PLDL1KEEP, [x9, #0]
-	prfm	PLDL1KEEP, [x12, #0]
-
-	// preload
-	ldp		d24, d25, [x11], #16
-	ldp		d26, d27, [x11], #16
-	ldp		q16, q17, [x9], #32
-	ldp		q20, q21, [x12], #32
-
-	cmp		w8, #4
-	ble		0f // consider clean up loop
-
-	// prefetch
-	prfm	PLDL1KEEP, [x11, #32]
-	prfm	PLDL1KEEP, [x9, #32]
-	prfm	PLDL1KEEP, [x12, #32]
-
-	// main loop
-1:
-	
-	// unroll 0
-	ldp		d28, d29, [x11], #16
-	fmla	v0.2d, v16.2d, v24.2d[0]
-	fmla	v1.2d, v17.2d, v24.2d[0]
-	ldp		d30, d31, [x11], #16
-	fmla	v2.2d, v16.2d, v25.2d[0]
-	fmla	v3.2d, v17.2d, v25.2d[0]
-	ldr		q18, [x9], #16
-	fmla	v4.2d, v16.2d, v26.2d[0]
-	fmla	v5.2d, v17.2d, v26.2d[0]
-	ldr		q19, [x9], #16
-	fmla	v6.2d, v16.2d, v27.2d[0]
-	fmla	v7.2d, v17.2d, v27.2d[0]
-	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v8.2d, v20.2d, v24.2d[0]
-	fmla	v9.2d, v21.2d, v24.2d[0]
-	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v10.2d, v20.2d, v25.2d[0]
-	fmla	v11.2d, v21.2d, v25.2d[0]
-	ldp		q22, q23, [x12], #32
-	fmla	v12.2d, v20.2d, v26.2d[0]
-	fmla	v13.2d, v21.2d, v26.2d[0]
-	prfm	PLDL1KEEP, [x12, #64]
-	fmla	v14.2d, v20.2d, v27.2d[0]
-	fmla	v15.2d, v21.2d, v27.2d[0]
-
-	// unroll 1
-	ldp		d24, d25, [x11], #16
-	fmla	v0.2d, v18.2d, v28.2d[0]
-	fmla	v1.2d, v19.2d, v28.2d[0]
-	ldp		d26, d27, [x11], #16
-	fmla	v2.2d, v18.2d, v29.2d[0]
-	fmla	v3.2d, v19.2d, v29.2d[0]
-	ldr		q16, [x9], #16
-	fmla	v4.2d, v18.2d, v30.2d[0]
-	fmla	v5.2d, v19.2d, v30.2d[0]
-	ldr		q17, [x9], #16
-	fmla	v6.2d, v18.2d, v31.2d[0]
-	fmla	v7.2d, v19.2d, v31.2d[0]
-	ldr		q20, [x12], #16
-	fmla	v8.2d, v22.2d, v28.2d[0]
-	fmla	v9.2d, v23.2d, v28.2d[0]
-	ldr		q21, [x12], #16
-	fmla	v10.2d, v22.2d, v29.2d[0]
-	fmla	v11.2d, v23.2d, v29.2d[0]
-	sub		w8, w8, #4
-	fmla	v12.2d, v22.2d, v30.2d[0]
-	fmla	v13.2d, v23.2d, v30.2d[0]
-	fmla	v14.2d, v22.2d, v31.2d[0]
-	fmla	v15.2d, v23.2d, v31.2d[0]
-
-	// unroll 2
-	ldp		d28, d29, [x11], #16
-	fmla	v0.2d, v16.2d, v24.2d[0]
-	fmla	v1.2d, v17.2d, v24.2d[0]
-	ldp		d30, d31, [x11], #16
-	fmla	v2.2d, v16.2d, v25.2d[0]
-	fmla	v3.2d, v17.2d, v25.2d[0]
-	ldr		q18, [x9], #16
-	fmla	v4.2d, v16.2d, v26.2d[0]
-	fmla	v5.2d, v17.2d, v26.2d[0]
-	ldr		q19, [x9], #16
-	fmla	v6.2d, v16.2d, v27.2d[0]
-	fmla	v7.2d, v17.2d, v27.2d[0]
-	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v8.2d, v20.2d, v24.2d[0]
-	fmla	v9.2d, v21.2d, v24.2d[0]
-	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v10.2d, v20.2d, v25.2d[0]
-	fmla	v11.2d, v21.2d, v25.2d[0]
-	ldp		q22, q23, [x12], #32
-	fmla	v12.2d, v20.2d, v26.2d[0]
-	fmla	v13.2d, v21.2d, v26.2d[0]
-	prfm	PLDL1KEEP, [x12, #64]
-	fmla	v14.2d, v20.2d, v27.2d[0]
-	fmla	v15.2d, v21.2d, v27.2d[0]
-
-	// unroll 3
-	ldp		d24, d25, [x11], #16
-	fmla	v0.2d, v18.2d, v28.2d[0]
-	fmla	v1.2d, v19.2d, v28.2d[0]
-	ldp		d26, d27, [x11], #16
-	fmla	v2.2d, v18.2d, v29.2d[0]
-	fmla	v3.2d, v19.2d, v29.2d[0]
-	ldr		q16, [x9], #16
-	fmla	v4.2d, v18.2d, v30.2d[0]
-	fmla	v5.2d, v19.2d, v30.2d[0]
-	ldr		q17, [x9], #16
-	fmla	v6.2d, v18.2d, v31.2d[0]
-	fmla	v7.2d, v19.2d, v31.2d[0]
-	ldr		q20, [x12], #16
-	fmla	v8.2d, v22.2d, v28.2d[0]
-	fmla	v9.2d, v23.2d, v28.2d[0]
-	ldr		q21, [x12], #16
-	fmla	v10.2d, v22.2d, v29.2d[0]
-	fmla	v11.2d, v23.2d, v29.2d[0]
-	cmp		w8, #4
-	fmla	v12.2d, v22.2d, v30.2d[0]
-	fmla	v13.2d, v23.2d, v30.2d[0]
-	fmla	v14.2d, v22.2d, v31.2d[0]
-	fmla	v15.2d, v23.2d, v31.2d[0]
-
-	bgt		1b
-
-0:
-
-	cmp		w8, #3
-	ble		4f
-
-	
-	// unroll 0
-	ldp		d28, d29, [x11], #16
-	fmla	v0.2d, v16.2d, v24.2d[0]
-	fmla	v1.2d, v17.2d, v24.2d[0]
-	ldp		d30, d31, [x11], #16
-	fmla	v2.2d, v16.2d, v25.2d[0]
-	fmla	v3.2d, v17.2d, v25.2d[0]
-	ldr		q18, [x9], #16
-	fmla	v4.2d, v16.2d, v26.2d[0]
-	fmla	v5.2d, v17.2d, v26.2d[0]
-	ldr		q19, [x9], #16
-	fmla	v6.2d, v16.2d, v27.2d[0]
-	fmla	v7.2d, v17.2d, v27.2d[0]
-	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v8.2d, v20.2d, v24.2d[0]
-	fmla	v9.2d, v21.2d, v24.2d[0]
-	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v10.2d, v20.2d, v25.2d[0]
-	fmla	v11.2d, v21.2d, v25.2d[0]
-	ldp		q22, q23, [x12], #32
-	fmla	v12.2d, v20.2d, v26.2d[0]
-	fmla	v13.2d, v21.2d, v26.2d[0]
-	prfm	PLDL1KEEP, [x12, #64]
-	fmla	v14.2d, v20.2d, v27.2d[0]
-	fmla	v15.2d, v21.2d, v27.2d[0]
-
-	// unroll 1
-	ldp		d24, d25, [x11], #16
-	fmla	v0.2d, v18.2d, v28.2d[0]
-	fmla	v1.2d, v19.2d, v28.2d[0]
-	ldp		d26, d27, [x11], #16
-	fmla	v2.2d, v18.2d, v29.2d[0]
-	fmla	v3.2d, v19.2d, v29.2d[0]
-	ldr		q16, [x9], #16
-	fmla	v4.2d, v18.2d, v30.2d[0]
-	fmla	v5.2d, v19.2d, v30.2d[0]
-	ldr		q17, [x9], #16
-	fmla	v6.2d, v18.2d, v31.2d[0]
-	fmla	v7.2d, v19.2d, v31.2d[0]
-	ldr		q20, [x12], #16
-	fmla	v8.2d, v22.2d, v28.2d[0]
-	fmla	v9.2d, v23.2d, v28.2d[0]
-	ldr		q21, [x12], #16
-	fmla	v10.2d, v22.2d, v29.2d[0]
-	fmla	v11.2d, v23.2d, v29.2d[0]
-	sub		w8, w8, #4
-	fmla	v12.2d, v22.2d, v30.2d[0]
-	fmla	v13.2d, v23.2d, v30.2d[0]
-	fmla	v14.2d, v22.2d, v31.2d[0]
-	fmla	v15.2d, v23.2d, v31.2d[0]
-
-	// unroll 2
-	ldp		d28, d29, [x11], #16
-	fmla	v0.2d, v16.2d, v24.2d[0]
-	fmla	v1.2d, v17.2d, v24.2d[0]
-	ldp		d30, d31, [x11], #16
-	fmla	v2.2d, v16.2d, v25.2d[0]
-	fmla	v3.2d, v17.2d, v25.2d[0]
-	ldr		q18, [x9], #16
-	fmla	v4.2d, v16.2d, v26.2d[0]
-	fmla	v5.2d, v17.2d, v26.2d[0]
-	ldr		q19, [x9], #16
-	fmla	v6.2d, v16.2d, v27.2d[0]
-	fmla	v7.2d, v17.2d, v27.2d[0]
-//	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v8.2d, v20.2d, v24.2d[0]
-	fmla	v9.2d, v21.2d, v24.2d[0]
-//	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v10.2d, v20.2d, v25.2d[0]
-	fmla	v11.2d, v21.2d, v25.2d[0]
-	ldp		q22, q23, [x12], #32
-	fmla	v12.2d, v20.2d, v26.2d[0]
-	fmla	v13.2d, v21.2d, v26.2d[0]
-//	prfm	PLDL1KEEP, [x12, #64]
-	fmla	v14.2d, v20.2d, v27.2d[0]
-	fmla	v15.2d, v21.2d, v27.2d[0]
-
-	// unroll 3
-//	ldp		d24, d25, [x11], #16
-	fmla	v0.2d, v18.2d, v28.2d[0]
-	fmla	v1.2d, v19.2d, v28.2d[0]
-//	ldp		d26, d27, [x11], #16
-	fmla	v2.2d, v18.2d, v29.2d[0]
-	fmla	v3.2d, v19.2d, v29.2d[0]
-//	ldr		q16, [x9], #16
-	fmla	v4.2d, v18.2d, v30.2d[0]
-	fmla	v5.2d, v19.2d, v30.2d[0]
-//	ldr		q17, [x9], #16
-	fmla	v6.2d, v18.2d, v31.2d[0]
-	fmla	v7.2d, v19.2d, v31.2d[0]
-//	ldr		q20, [x12], #16
-	fmla	v8.2d, v22.2d, v28.2d[0]
-	fmla	v9.2d, v23.2d, v28.2d[0]
-//	ldr		q21, [x12], #16
-	fmla	v10.2d, v22.2d, v29.2d[0]
-	fmla	v11.2d, v23.2d, v29.2d[0]
-//	cmp		w8, #4
-	fmla	v12.2d, v22.2d, v30.2d[0]
-	fmla	v13.2d, v23.2d, v30.2d[0]
-	fmla	v14.2d, v22.2d, v31.2d[0]
-	fmla	v15.2d, v23.2d, v31.2d[0]
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		w8, #0
-	ble		2f // return
-
-	sub		x9, x9, #32
-	sub		x11, x11, #32
-	sub		x12, x12, #32
-
-3: // clean1-up loop
-
-	// unroll 0
-	ld1		{v20.2d, v21.2d}, [x9], #32
-	ld1		{v28.2d, v29.2d}, [x11], #32
-	fmla	v0.2d, v20.2d, v28.2d[0]
-	fmla	v1.2d, v21.2d, v28.2d[0]
-	fmla	v2.2d, v20.2d, v28.2d[1]
-	fmla	v3.2d, v21.2d, v28.2d[1]
-	fmla	v4.2d, v20.2d, v29.2d[0]
-	fmla	v5.2d, v21.2d, v29.2d[0]
-	fmla	v6.2d, v20.2d, v29.2d[1]
-	fmla	v7.2d, v21.2d, v29.2d[1]
-	ld1		{v22.2d, v23.2d}, [x12], #32
-	fmla	v8.2d, v22.2d, v28.2d[0]
-	fmla	v9.2d, v23.2d, v28.2d[0]
-	fmla	v10.2d, v22.2d, v28.2d[1]
-	fmla	v11.2d, v23.2d, v28.2d[1]
-	fmla	v12.2d, v22.2d, v29.2d[0]
-	fmla	v13.2d, v23.2d, v29.2d[0]
-	fmla	v14.2d, v22.2d, v29.2d[1]
-	fmla	v15.2d, v23.2d, v29.2d[1]
-
-	sub		w8, w8, #1
-	cmp		w8, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_kernel_gemm_add_nt_8x4_lib4, .-inner_kernel_gemm_add_nt_8x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- alpha
-// x9   <- beta
-// x10  <- C
-// x11  <- sdc
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_8X4_LIB4
-#else
-	.align	4
-	.type inner_scale_ab_8x4_lib4, %function
-inner_scale_ab_8x4_lib4:
-#endif
-
-	ld1		{v28.2d}, [x8]
-
-	fmul	v0.2d, v0.2d, v28.2d[0]
-	fmul	v1.2d, v1.2d, v28.2d[0]
-	fmul	v2.2d, v2.2d, v28.2d[0]
-	fmul	v3.2d, v3.2d, v28.2d[0]
-	fmul	v4.2d, v4.2d, v28.2d[0]
-	fmul	v5.2d, v5.2d, v28.2d[0]
-	fmul	v6.2d, v6.2d, v28.2d[0]
-	fmul	v7.2d, v7.2d, v28.2d[0]
-	fmul	v8.2d, v8.2d, v28.2d[0]
-	fmul	v9.2d, v9.2d, v28.2d[0]
-	fmul	v10.2d, v10.2d, v28.2d[0]
-	fmul	v11.2d, v11.2d, v28.2d[0]
-	fmul	v12.2d, v12.2d, v28.2d[0]
-	fmul	v13.2d, v13.2d, v28.2d[0]
-	fmul	v14.2d, v14.2d, v28.2d[0]
-	fmul	v15.2d, v15.2d, v28.2d[0]
-
-	ld1		{v28.2d}, [x9]
-
-	add		x12, x10, x11
-
-	ld1		{v24.2d, v25.2d, v26.2d, v27.2d}, [x10], #64
-	fmla	v0.2d, v24.2d, v28.2d[0]
-	fmla	v1.2d, v25.2d, v28.2d[0]
-	fmla	v2.2d, v26.2d, v28.2d[0]
-	fmla	v3.2d, v27.2d, v28.2d[0]
-
-	ld1		{v24.2d, v25.2d, v26.2d, v27.2d}, [x10], #64
-	fmla	v4.2d, v24.2d, v28.2d[0]
-	fmla	v5.2d, v25.2d, v28.2d[0]
-	fmla	v6.2d, v26.2d, v28.2d[0]
-	fmla	v7.2d, v27.2d, v28.2d[0]
-
-	ld1		{v24.2d, v25.2d, v26.2d, v27.2d}, [x12], #64
-	fmla	v8.2d, v24.2d, v28.2d[0]
-	fmla	v9.2d, v25.2d, v28.2d[0]
-	fmla	v10.2d, v26.2d, v28.2d[0]
-	fmla	v11.2d, v27.2d, v28.2d[0]
-
-	ld1		{v24.2d, v25.2d, v26.2d, v27.2d}, [x12], #64
-	fmla	v12.2d, v24.2d, v28.2d[0]
-	fmla	v13.2d, v25.2d, v28.2d[0]
-	fmla	v14.2d, v26.2d, v28.2d[0]
-	fmla	v15.2d, v27.2d, v28.2d[0]
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_scale_ab_8x4_lib4, .-inner_scale_ab_8x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- D
-// x9   <- sdd
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_8X4_LIB4
-#else
-	.align 4
-	.type inner_store_8x4_lib4, %function
-inner_store_8x4_lib4:
-#endif
-
-	add		x10, x8, x9
-
-	st1		{v0.2d, v1.2d, v2.2d, v3.2d}, [x8], #64
-	st1		{v4.2d, v5.2d, v6.2d, v7.2d}, [x8], #64
-	st1		{v8.2d, v9.2d, v10.2d, v11.2d}, [x10], #64
-	st1		{v12.2d, v13.2d, v14.2d, v15.2d}, [x10], #64
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_store_8x4_lib4, .-inner_store_8x4_lib4
-#endif
-
-
-
-
-
-//                               w0        x1             x2         w3       x4         x5            x6         w7       sp+0       sp+8
-// void kernel_dgemm_nt_8x4_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-
-	.align	4
-	.global	kernel_dgemm_nt_8x4_lib4
-	.type	kernel_dgemm_nt_8x4_lib4, %function
-kernel_dgemm_nt_8x4_lib4:
-	
-
-
-	PROLOGUE
-
-
-
-	// TODO zero the entire 128-bit register ???
-	fmov	d0, xzr
-	fmov    d1, d0
-	fmov    d2, d0
-	fmov    d3, d0
-	fmov    d4, d0
-	fmov    d5, d0
-	fmov    d6, d0
-	fmov    d7, d0
-	fmov    d8, d0
-	fmov    d9, d0
-	fmov    d10, d0
-	fmov    d11, d0
-	fmov    d12, d0
-	fmov    d13, d0
-	fmov    d14, d0
-	fmov    d15, d0
-
-
-
-	// call inner kernel gemm nt
-	mov		w8, w0 // kmax
-	mov		x9, x2 // A
-	mov		w10, w3 // sda
-	lsl		w10, w10, #5 // 32*sda
-	mov		x11, x4 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB4
-#else
-	bl	inner_kernel_gemm_add_nt_8x4_lib4
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		x8, x1 // alpha
-	mov		x9, x5 // beta
-	mov		x10, x6 // C
-	mov		w11, w7 // C
-	lsl		w11, w11, #5 // 32*sdc
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-	bl inner_scale_ab_8x4_lib4
-#endif
-
-
-
-	// store n
-	ldr		x8, [sp, #(STACKSIZE + 0)] // D
-	ldr		w9, [sp, #(STACKSIZE + 8)] // sdd
-	lsl		w9, w9, #5 // 32*sdd
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-	bl inner_store_8x4_lib4
-#endif
-
-
-
-	EPILOGUE
-
-	mov	x0, #0
-
-	ret
-
-
diff --git a/third_party/blasfeo/kernel/armv8a/kernel_sgemm_12x4_lib4.S b/third_party/blasfeo/kernel/armv8a/kernel_sgemm_12x4_lib4.S
deleted file mode 100644
index ab66cad..0000000
--- a/third_party/blasfeo/kernel/armv8a/kernel_sgemm_12x4_lib4.S
+++ /dev/null
@@ -1,512 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#define STACKSIZE 11*16
-#define PROLOGUE \
-	sub sp, sp, #(11 * 16); \
-	stp d8, d9, [sp, #(0 * 16)]; \
-	stp d10, d11, [sp, #(1 * 16)]; \
-	stp d12, d13, [sp, #(2 * 16)]; \
-	stp d14, d15, [sp, #(3 * 16)]; \
-	stp x18, x19, [sp, #(4 * 16)]; \
-	stp x20, x21, [sp, #(5 * 16)]; \
-	stp x22, x23, [sp, #(6 * 16)]; \
-	stp x24, x25, [sp, #(7 * 16)]; \
-	stp x26, x27, [sp, #(8 * 16)]; \
-	stp x28, x29, [sp, #(9 * 16)]; \
-	str x30, [sp, #(10 * 16)];
-#define EPILOGUE \
-	ldp d8, d9, [sp, #(0 * 16)]; \
-	ldp d10, d11, [sp, #(1 * 16)]; \
-	ldp d12, d13, [sp, #(2 * 16)]; \
-	ldp d14, d15, [sp, #(3 * 16)]; \
-	ldp x18, x19, [sp, #(4 * 16)]; \
-	ldp x20, x21, [sp, #(5 * 16)]; \
-	ldp x22, x23, [sp, #(6 * 16)]; \
-	ldp x24, x25, [sp, #(7 * 16)]; \
-	ldp x26, x27, [sp, #(8 * 16)]; \
-	ldp x28, x29, [sp, #(9 * 16)]; \
-	ldr x30, [sp, #(10 * 16)]; \
-	add sp, sp, #(11 * 16);
-
-
-
-
-
-	.text
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// w8   <- k
-// x9   <- A
-// x10  <- sda
-// x11  <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_12X4_LIB4
-#else
-	.align	4
-	.type inner_kernel_gemm_add_nt_12x4_lib4, %function
-inner_kernel_gemm_add_nt_12x4_lib4:
-#endif
-
-	// early return
-	cmp		w8, #0
-	ble		2f // return
-
-	add		x12, x9, x10
-	add		x13, x12, x10
-
-	// prefetch
-	prfm	PLDL1KEEP, [x11, #0]
-	prfm	PLDL1KEEP, [x9, #0]
-	prfm	PLDL1KEEP, [x12, #0]
-	prfm	PLDL1KEEP, [x13, #0]
-
-	// preload
-	ld1		{v24.4s, v25.4s}, [x9], #32
-	ld1		{v28.4s, v29.4s}, [x11], #32
-	ld1		{v20.4s, v21.4s}, [x12], #32
-	ld1		{v16.4s, v17.4s}, [x13], #32
-
-	cmp		w8, #4
-	ble		0f // consider clean up loop
-
-	// prefetch
-	prfm	PLDL1KEEP, [x11, #32]
-	prfm	PLDL1KEEP, [x9, #32]
-	prfm	PLDL1KEEP, [x12, #32]
-	prfm	PLDL1KEEP, [x13, #32]
-
-	// main loop
-1:
-
-	// unroll 0
-	ld1		{v26.4s}, [x9], #16
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	ld1		{v27.4s}, [x9], #16
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-	ld1		{v30.4s}, [x11], #16
-	fmla	v4.4s, v20.4s, v28.4s[0]
-	fmla	v5.4s, v20.4s, v28.4s[1]
-	ld1		{v31.4s}, [x11], #16
-	fmla	v6.4s, v20.4s, v28.4s[2]
-	fmla	v7.4s, v20.4s, v28.4s[3]
-	ld1		{v22.4s}, [x12], #16
-	fmla	v8.4s, v16.4s, v28.4s[0]
-	fmla	v9.4s, v16.4s, v28.4s[1]
-	ld1		{v23.4s}, [x12], #16
-	fmla	v10.4s, v16.4s, v28.4s[2]
-	fmla	v11.4s, v16.4s, v28.4s[3]
-
-	// unroll 1
-	ld1		{v18.4s}, [x13], #16
-	fmla	v0.4s, v25.4s, v29.4s[0]
-	fmla	v1.4s, v25.4s, v29.4s[1]
-	ld1		{v19.4s}, [x13], #16
-	fmla	v2.4s, v25.4s, v29.4s[2]
-	fmla	v3.4s, v25.4s, v29.4s[3]
-	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v4.4s, v21.4s, v29.4s[0]
-	fmla	v5.4s, v21.4s, v29.4s[1]
-	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v6.4s, v21.4s, v29.4s[2]
-	fmla	v7.4s, v21.4s, v29.4s[3]
-	prfm	PLDL1KEEP, [x12, #64]
-	fmla	v8.4s, v17.4s, v29.4s[0]
-	fmla	v9.4s, v17.4s, v29.4s[1]
-	sub		w8, w8, #4
-	fmla	v10.4s, v17.4s, v29.4s[2]
-	fmla	v11.4s, v17.4s, v29.4s[3]
-
-	// unroll 2
-	ld1		{v24.4s}, [x9], #16
-	fmla	v0.4s, v26.4s, v30.4s[0]
-	fmla	v1.4s, v26.4s, v30.4s[1]
-	ld1		{v25.4s}, [x9], #16
-	fmla	v2.4s, v26.4s, v30.4s[2]
-	fmla	v3.4s, v26.4s, v30.4s[3]
-	ld1		{v28.4s}, [x11], #16
-	fmla	v4.4s, v22.4s, v30.4s[0]
-	fmla	v5.4s, v22.4s, v30.4s[1]
-	ld1		{v29.4s}, [x11], #16
-	fmla	v6.4s, v22.4s, v30.4s[2]
-	fmla	v7.4s, v22.4s, v30.4s[3]
-	ld1		{v20.4s}, [x12], #16
-	fmla	v8.4s, v18.4s, v30.4s[0]
-	fmla	v9.4s, v18.4s, v30.4s[1]
-	ld1		{v21.4s}, [x12], #16
-	fmla	v10.4s, v18.4s, v30.4s[2]
-	fmla	v11.4s, v18.4s, v30.4s[3]
-
-	// unroll 3
-	ld1		{v16.4s}, [x13], #16
-	fmla	v0.4s, v27.4s, v31.4s[0]
-	fmla	v1.4s, v27.4s, v31.4s[1]
-	ld1		{v17.4s}, [x13], #16
-	fmla	v2.4s, v27.4s, v31.4s[2]
-	fmla	v3.4s, v27.4s, v31.4s[3]
-	cmp		w8, #4
-	fmla	v4.4s, v23.4s, v31.4s[0]
-	fmla	v5.4s, v23.4s, v31.4s[1]
-	fmla	v6.4s, v23.4s, v31.4s[2]
-	fmla	v7.4s, v23.4s, v31.4s[3]
-	fmla	v8.4s, v19.4s, v31.4s[0]
-	fmla	v9.4s, v19.4s, v31.4s[1]
-	fmla	v10.4s, v19.4s, v31.4s[2]
-	fmla	v11.4s, v19.4s, v31.4s[3]
-
-	bgt		1b
-
-0:
-
-	cmp		w8, #3
-	ble		4f
-
-	// unroll 0
-	ld1		{v26.4s}, [x9], #16
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	ld1		{v27.4s}, [x9], #16
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-	ld1		{v30.4s}, [x11], #16
-	fmla	v4.4s, v20.4s, v28.4s[0]
-	fmla	v5.4s, v20.4s, v28.4s[1]
-	ld1		{v31.4s}, [x11], #16
-	fmla	v6.4s, v20.4s, v28.4s[2]
-	fmla	v7.4s, v20.4s, v28.4s[3]
-	ld1		{v22.4s}, [x12], #16
-	fmla	v8.4s, v16.4s, v28.4s[0]
-	fmla	v9.4s, v16.4s, v28.4s[1]
-	ld1		{v23.4s}, [x12], #16
-	fmla	v10.4s, v16.4s, v28.4s[2]
-	fmla	v11.4s, v16.4s, v28.4s[3]
-
-	// unroll 1
-	ld1		{v18.4s}, [x13], #16
-	fmla	v0.4s, v25.4s, v29.4s[0]
-	fmla	v1.4s, v25.4s, v29.4s[1]
-	ld1		{v19.4s}, [x13], #16
-	fmla	v2.4s, v25.4s, v29.4s[2]
-	fmla	v3.4s, v25.4s, v29.4s[3]
-//	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v4.4s, v21.4s, v29.4s[0]
-	fmla	v5.4s, v21.4s, v29.4s[1]
-//	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v6.4s, v21.4s, v29.4s[2]
-	fmla	v7.4s, v21.4s, v29.4s[3]
-//	prfm	PLDL1KEEP, [x12, #64]
-	fmla	v8.4s, v17.4s, v29.4s[0]
-	fmla	v9.4s, v17.4s, v29.4s[1]
-	sub		w8, w8, #4
-	fmla	v10.4s, v17.4s, v29.4s[2]
-	fmla	v11.4s, v17.4s, v29.4s[3]
-
-	// unroll 2
-//	ld1		{v24.4s}, [x9], #16
-	fmla	v0.4s, v26.4s, v30.4s[0]
-	fmla	v1.4s, v26.4s, v30.4s[1]
-//	ld1		{v25.4s}, [x9], #16
-	fmla	v2.4s, v26.4s, v30.4s[2]
-	fmla	v3.4s, v26.4s, v30.4s[3]
-//	ld1		{v28.4s}, [x11], #16
-	fmla	v4.4s, v22.4s, v30.4s[0]
-	fmla	v5.4s, v22.4s, v30.4s[1]
-//	ld1		{v29.4s}, [x11], #16
-	fmla	v6.4s, v22.4s, v30.4s[2]
-	fmla	v7.4s, v22.4s, v30.4s[3]
-//	ld1		{v20.4s}, [x12], #16
-	fmla	v8.4s, v18.4s, v30.4s[0]
-	fmla	v9.4s, v18.4s, v30.4s[1]
-//	ld1		{v21.4s}, [x12], #16
-	fmla	v10.4s, v18.4s, v30.4s[2]
-	fmla	v11.4s, v18.4s, v30.4s[3]
-
-	// unroll 3
-//	ld1		{v16.4s}, [x13], #16
-	fmla	v0.4s, v27.4s, v31.4s[0]
-	fmla	v1.4s, v27.4s, v31.4s[1]
-//	ld1		{v17.4s}, [x13], #16
-	fmla	v2.4s, v27.4s, v31.4s[2]
-	fmla	v3.4s, v27.4s, v31.4s[3]
-	cmp		w8, #4
-	fmla	v4.4s, v23.4s, v31.4s[0]
-	fmla	v5.4s, v23.4s, v31.4s[1]
-	fmla	v6.4s, v23.4s, v31.4s[2]
-	fmla	v7.4s, v23.4s, v31.4s[3]
-	fmla	v8.4s, v19.4s, v31.4s[0]
-	fmla	v9.4s, v19.4s, v31.4s[1]
-	fmla	v10.4s, v19.4s, v31.4s[2]
-	fmla	v11.4s, v19.4s, v31.4s[3]
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		w8, #0
-	ble		2f // return
-
-	sub		x9, x9, #32
-	sub		x12, x12, #32
-	sub		x11, x11, #32
-	sub		x13, x13, #32
-
-3: // clean1-up loop
-
-	// unroll 0
-
-	ld1		{v28.4s}, [x11], #16
-	ld1		{v24.4s}, [x9], #16
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-	ld1		{v20.4s}, [x12], #16
-	fmla	v4.4s, v20.4s, v28.4s[0]
-	fmla	v5.4s, v20.4s, v28.4s[1]
-	fmla	v6.4s, v20.4s, v28.4s[2]
-	fmla	v7.4s, v20.4s, v28.4s[3]
-	ld1		{v16.4s}, [x13], #16
-	fmla	v8.4s, v16.4s, v28.4s[0]
-	fmla	v9.4s, v16.4s, v28.4s[1]
-	fmla	v10.4s, v16.4s, v28.4s[2]
-	fmla	v11.4s, v16.4s, v28.4s[3]
-
-	sub		w8, w8, #1
-	cmp		w8, #0
-	bgt		3b
-
-2: // return
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_kernel_gemm_add_nt_12x4_lib4, .-inner_kernel_gemm_add_nt_12x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- alpha
-// x9   <- beta
-// x10  <- C
-// x11  <- sdc
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_12X4_LIB4
-#else
-	.align	4
-	.type inner_scale_ab_12x4_lib4, %function
-inner_scale_ab_12x4_lib4:
-#endif
-
-	ld1		{v28.4s}, [x8]
-
-	fmul	v0.4s, v0.4s, v28.4s[0]
-	fmul	v1.4s, v1.4s, v28.4s[0]
-	fmul	v2.4s, v2.4s, v28.4s[0]
-	fmul	v3.4s, v3.4s, v28.4s[0]
-	fmul	v4.4s, v4.4s, v28.4s[0]
-	fmul	v5.4s, v5.4s, v28.4s[0]
-	fmul	v6.4s, v6.4s, v28.4s[0]
-	fmul	v7.4s, v7.4s, v28.4s[0]
-	fmul	v8.4s, v8.4s, v28.4s[0]
-	fmul	v9.4s, v9.4s, v28.4s[0]
-	fmul	v10.4s, v10.4s, v28.4s[0]
-	fmul	v11.4s, v11.4s, v28.4s[0]
-
-	ld1		{v28.4s}, [x9]
-
-	add		x12, x10, x11
-	add		x13, x12, x11
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x10], #64
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v25.4s, v28.4s[0]
-	fmla	v2.4s, v26.4s, v28.4s[0]
-	fmla	v3.4s, v27.4s, v28.4s[0]
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x12], #64
-	fmla	v4.4s, v24.4s, v28.4s[0]
-	fmla	v5.4s, v25.4s, v28.4s[0]
-	fmla	v6.4s, v26.4s, v28.4s[0]
-	fmla	v7.4s, v27.4s, v28.4s[0]
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x13], #64
-	fmla	v8.4s, v24.4s, v28.4s[0]
-	fmla	v9.4s, v25.4s, v28.4s[0]
-	fmla	v10.4s, v26.4s, v28.4s[0]
-	fmla	v11.4s, v27.4s, v28.4s[0]
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_scale_ab_12x4_lib4, .-inner_scale_ab_12x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- D
-// x9   <- sdd
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_12X4_LIB4
-#else
-	.align 4
-	.type inner_store_12x4_lib4, %function
-inner_store_12x4_lib4:
-#endif
-
-	add		x10, x8, x9
-	add		x11, x10, x9
-
-	st1		{v0.4s, v1.4s, v2.4s, v3.4s}, [x8], #64
-	st1		{v4.4s, v5.4s, v6.4s, v7.4s}, [x10], #64
-	st1		{v8.4s, v9.4s, v10.4s, v11.4s}, [x11], #64
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_store_12x4_lib4, .-inner_store_12x4_lib4
-#endif
-
-
-
-
-
-//                                w0        x1             x2         w3       x4         x5            x6         w7       sp+0       sp+8
-// void kernel_sgemm_nt_12x4_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-
-	.align	4
-	.global	kernel_sgemm_nt_12x4_lib4
-	.type	kernel_sgemm_nt_12x4_lib4, %function
-kernel_sgemm_nt_12x4_lib4:
-	
-
-
-	PROLOGUE
-
-
-
-	// TODO zero the entire 128-bit register ???
-	fmov	d0, xzr
-	fmov    d1, d0
-	fmov    d2, d0
-	fmov    d3, d0
-	fmov    d4, d0
-	fmov    d5, d0
-	fmov    d6, d0
-	fmov    d7, d0
-	fmov    d8, d0
-	fmov    d9, d0
-	fmov    d10, d0
-	fmov    d11, d0
-
-
-
-	// call inner kernel gemm nt
-	mov		w8, w0 // kmax
-	mov		x9, x2 // A
-	mov		w10, w3 // sda
-	lsl		w10, w10, #4 // 16*sda
-	mov		x11, x4 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_12X4_LIB4
-#else
-	bl	inner_kernel_gemm_add_nt_12x4_lib4
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		x8, x1 // alpha
-	mov		x9, x5 // beta
-	mov		x10, x6 // C
-	mov		w11, w7 // C
-	lsl		w11, w11, #4 // 16*sdc
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_12X4_LIB4
-#else
-	bl inner_scale_ab_12x4_lib4
-#endif
-
-
-
-	// store n
-	ldr		x8, [sp, #(STACKSIZE + 0)] // D
-	ldr		w9, [sp, #(STACKSIZE + 8)] // sdd
-	lsl		w9, w9, #4 // 16*sdd
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-	bl inner_store_12x4_lib4
-#endif
-
-
-
-	EPILOGUE
-
-	mov	x0, #0
-
-	ret
-
-
-
-
diff --git a/third_party/blasfeo/kernel/armv8a/kernel_sgemm_16x4_lib4.S b/third_party/blasfeo/kernel/armv8a/kernel_sgemm_16x4_lib4.S
deleted file mode 100644
index edc06ac..0000000
--- a/third_party/blasfeo/kernel/armv8a/kernel_sgemm_16x4_lib4.S
+++ /dev/null
@@ -1,600 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#define STACKSIZE 11*16
-#define PROLOGUE \
-	sub sp, sp, #(11 * 16); \
-	stp d8, d9, [sp, #(0 * 16)]; \
-	stp d10, d11, [sp, #(1 * 16)]; \
-	stp d12, d13, [sp, #(2 * 16)]; \
-	stp d14, d15, [sp, #(3 * 16)]; \
-	stp x18, x19, [sp, #(4 * 16)]; \
-	stp x20, x21, [sp, #(5 * 16)]; \
-	stp x22, x23, [sp, #(6 * 16)]; \
-	stp x24, x25, [sp, #(7 * 16)]; \
-	stp x26, x27, [sp, #(8 * 16)]; \
-	stp x28, x29, [sp, #(9 * 16)]; \
-	str x30, [sp, #(10 * 16)];
-#define EPILOGUE \
-	ldp d8, d9, [sp, #(0 * 16)]; \
-	ldp d10, d11, [sp, #(1 * 16)]; \
-	ldp d12, d13, [sp, #(2 * 16)]; \
-	ldp d14, d15, [sp, #(3 * 16)]; \
-	ldp x18, x19, [sp, #(4 * 16)]; \
-	ldp x20, x21, [sp, #(5 * 16)]; \
-	ldp x22, x23, [sp, #(6 * 16)]; \
-	ldp x24, x25, [sp, #(7 * 16)]; \
-	ldp x26, x27, [sp, #(8 * 16)]; \
-	ldp x28, x29, [sp, #(9 * 16)]; \
-	ldr x30, [sp, #(10 * 16)]; \
-	add sp, sp, #(11 * 16);
-
-
-
-
-
-	.text
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// w8   <- k
-// x9   <- A
-// x10  <- sda
-// x11  <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_16X4_LIB4
-#else
-	.align	4
-	.type inner_kernel_gemm_add_nt_16x4_lib4, %function
-inner_kernel_gemm_add_nt_16x4_lib4:
-#endif
-
-// TODO more aggressive preload of A !!!
-
-	// early return
-	cmp		w8, #0
-	ble		2f // return
-
-	add		x12, x9, x10
-	add		x13, x12, x10
-	add		x14, x13, x10
-
-	// prefetch
-	prfm	PLDL1KEEP, [x11, #0]
-	prfm	PLDL1KEEP, [x9, #0]
-	prfm	PLDL1KEEP, [x12, #0]
-	prfm	PLDL1KEEP, [x13, #0]
-	prfm	PLDL1KEEP, [x14, #0]
-
-	// preload
-	ldp		s24, s25, [x11], #8
-	ldp		s26, s27, [x11], #8
-	ldr		q16, [x9], #16
-	ldr		q17, [x12], #16
-	ldr		q18, [x13], #16
-	ldr		q19, [x14], #16
-
-	cmp		w8, #4
-	ble		0f // consider clean up loop
-
-	// prefetch
-	prfm	PLDL1KEEP, [x11, #32]
-	prfm	PLDL1KEEP, [x9, #32]
-	prfm	PLDL1KEEP, [x12, #32]
-	prfm	PLDL1KEEP, [x13, #32]
-	prfm	PLDL1KEEP, [x14, #32]
-
-	// main loop
-1:
-	
-	// unroll 0
-	ldp		s28, s29, [x11], #8
-	fmla	v0.4s, v16.4s, v24.4s[0]
-	fmla	v1.4s, v16.4s, v25.4s[0]
-	ldp		s30, s31, [x11], #8
-	fmla	v2.4s, v16.4s, v26.4s[0]
-	fmla	v3.4s, v16.4s, v27.4s[0]
-	ldr		q20, [x9], #16
-	fmla	v4.4s, v17.4s, v24.4s[0]
-	fmla	v5.4s, v17.4s, v25.4s[0]
-	ldr		q21, [x12], #16
-	fmla	v6.4s, v17.4s, v26.4s[0]
-	fmla	v7.4s, v17.4s, v27.4s[0]
-	ldr		q22, [x13], #16
-	fmla	v8.4s, v18.4s, v24.4s[0]
-	fmla	v9.4s, v18.4s, v25.4s[0]
-	ldr		q23, [x14], #16
-	fmla	v10.4s, v18.4s, v26.4s[0]
-	fmla	v11.4s, v18.4s, v27.4s[0]
-	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v12.4s, v19.4s, v24.4s[0]
-	fmla	v13.4s, v19.4s, v25.4s[0]
-	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v14.4s, v19.4s, v26.4s[0]
-	prfm	PLDL1KEEP, [x12, #64]
-	fmla	v15.4s, v19.4s, v27.4s[0]
-
-
-	// unroll 1
-	ldp		s24, s25, [x11], #8
-	fmla	v0.4s, v20.4s, v28.4s[0]
-	fmla	v1.4s, v20.4s, v29.4s[0]
-	ldp		s26, s27, [x11], #8
-	fmla	v2.4s, v20.4s, v30.4s[0]
-	fmla	v3.4s, v20.4s, v31.4s[0]
-	ldr		q16, [x9], #16
-	fmla	v4.4s, v21.4s, v28.4s[0]
-	fmla	v5.4s, v21.4s, v29.4s[0]
-	ldr		q17, [x12], #16
-	fmla	v6.4s, v21.4s, v30.4s[0]
-	fmla	v7.4s, v21.4s, v31.4s[0]
-	ldr		q18, [x13], #16
-	fmla	v8.4s, v22.4s, v28.4s[0]
-	fmla	v9.4s, v22.4s, v29.4s[0]
-	ldr		q19, [x14], #16
-	fmla	v10.4s, v22.4s, v30.4s[0]
-	fmla	v11.4s, v22.4s, v31.4s[0]
-	prfm	PLDL1KEEP, [x13, #32]
-	fmla	v12.4s, v23.4s, v28.4s[0]
-	fmla	v13.4s, v23.4s, v29.4s[0]
-	prfm	PLDL1KEEP, [x14, #32]
-	fmla	v14.4s, v23.4s, v30.4s[0]
-	fmla	v15.4s, v23.4s, v31.4s[0]
-
-	// unroll 2
-	ldp		s28, s29, [x11], #8
-	fmla	v0.4s, v16.4s, v24.4s[0]
-	fmla	v1.4s, v16.4s, v25.4s[0]
-	ldp		s30, s31, [x11], #8
-	fmla	v2.4s, v16.4s, v26.4s[0]
-	fmla	v3.4s, v16.4s, v27.4s[0]
-	ldr		q20, [x9], #16
-	fmla	v4.4s, v17.4s, v24.4s[0]
-	fmla	v5.4s, v17.4s, v25.4s[0]
-	ldr		q21, [x12], #16
-	fmla	v6.4s, v17.4s, v26.4s[0]
-	fmla	v7.4s, v17.4s, v27.4s[0]
-	ldr		q22, [x13], #16
-	fmla	v8.4s, v18.4s, v24.4s[0]
-	fmla	v9.4s, v18.4s, v25.4s[0]
-	ldr		q23, [x14], #16
-	fmla	v10.4s, v18.4s, v26.4s[0]
-	fmla	v11.4s, v18.4s, v27.4s[0]
-	fmla	v12.4s, v19.4s, v24.4s[0]
-	fmla	v13.4s, v19.4s, v25.4s[0]
-	fmla	v14.4s, v19.4s, v26.4s[0]
-	fmla	v15.4s, v19.4s, v27.4s[0]
-
-
-	// unroll 3
-	ldp		s24, s25, [x11], #8
-	fmla	v0.4s, v20.4s, v28.4s[0]
-	fmla	v1.4s, v20.4s, v29.4s[0]
-	ldp		s26, s27, [x11], #8
-	fmla	v2.4s, v20.4s, v30.4s[0]
-	fmla	v3.4s, v20.4s, v31.4s[0]
-	ldr		q16, [x9], #16
-	fmla	v4.4s, v21.4s, v28.4s[0]
-	fmla	v5.4s, v21.4s, v29.4s[0]
-	ldr		q17, [x12], #16
-	fmla	v6.4s, v21.4s, v30.4s[0]
-	fmla	v7.4s, v21.4s, v31.4s[0]
-	ldr		q18, [x13], #16
-	fmla	v8.4s, v22.4s, v28.4s[0]
-	fmla	v9.4s, v22.4s, v29.4s[0]
-	ldr		q19, [x14], #16
-	fmla	v10.4s, v22.4s, v30.4s[0]
-	fmla	v11.4s, v22.4s, v31.4s[0]
-	sub		w8, w8, #4
-	fmla	v12.4s, v23.4s, v28.4s[0]
-	fmla	v13.4s, v23.4s, v29.4s[0]
-	cmp		w8, #4
-	fmla	v14.4s, v23.4s, v30.4s[0]
-	fmla	v15.4s, v23.4s, v31.4s[0]
-
-	bgt		1b
-
-0:
-
-	cmp		w8, #3
-	ble		4f
-
-	
-	// unroll 0
-	ldp		s28, s29, [x11], #8
-	fmla	v0.4s, v16.4s, v24.4s[0]
-	fmla	v1.4s, v16.4s, v25.4s[0]
-	ldp		s30, s31, [x11], #8
-	fmla	v2.4s, v16.4s, v26.4s[0]
-	fmla	v3.4s, v16.4s, v27.4s[0]
-	ldr		q20, [x9], #16
-	fmla	v4.4s, v17.4s, v24.4s[0]
-	fmla	v5.4s, v17.4s, v25.4s[0]
-	ldr		q21, [x12], #16
-	fmla	v6.4s, v17.4s, v26.4s[0]
-	fmla	v7.4s, v17.4s, v27.4s[0]
-	ldr		q22, [x13], #16
-	fmla	v8.4s, v18.4s, v24.4s[0]
-	fmla	v9.4s, v18.4s, v25.4s[0]
-	ldr		q23, [x14], #16
-	fmla	v10.4s, v18.4s, v26.4s[0]
-	fmla	v11.4s, v18.4s, v27.4s[0]
-//	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v12.4s, v19.4s, v24.4s[0]
-	fmla	v13.4s, v19.4s, v25.4s[0]
-//	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v14.4s, v19.4s, v26.4s[0]
-	fmla	v15.4s, v19.4s, v27.4s[0]
-
-
-	// unroll 1
-	ldp		s24, s25, [x11], #8
-	fmla	v0.4s, v20.4s, v28.4s[0]
-	fmla	v1.4s, v20.4s, v29.4s[0]
-	ldp		s26, s27, [x11], #8
-	fmla	v2.4s, v20.4s, v30.4s[0]
-	fmla	v3.4s, v20.4s, v31.4s[0]
-	ldr		q16, [x9], #16
-	fmla	v4.4s, v21.4s, v28.4s[0]
-	fmla	v5.4s, v21.4s, v29.4s[0]
-	ldr		q17, [x12], #16
-	fmla	v6.4s, v21.4s, v30.4s[0]
-	fmla	v7.4s, v21.4s, v31.4s[0]
-	ldr		q18, [x13], #16
-	fmla	v8.4s, v22.4s, v28.4s[0]
-	fmla	v9.4s, v22.4s, v29.4s[0]
-	ldr		q19, [x14], #16
-	fmla	v10.4s, v22.4s, v30.4s[0]
-	fmla	v11.4s, v22.4s, v31.4s[0]
-//	prfm	PLDL1KEEP, [x12, #64]
-	fmla	v12.4s, v23.4s, v28.4s[0]
-	fmla	v13.4s, v23.4s, v29.4s[0]
-//	prfm	PLDL1KEEP, [x13, #64]
-	fmla	v14.4s, v23.4s, v30.4s[0]
-	fmla	v15.4s, v23.4s, v31.4s[0]
-
-	// unroll 2
-	ldp		s28, s29, [x11], #8
-	fmla	v0.4s, v16.4s, v24.4s[0]
-	fmla	v1.4s, v16.4s, v25.4s[0]
-	ldp		s30, s31, [x11], #8
-	fmla	v2.4s, v16.4s, v26.4s[0]
-	fmla	v3.4s, v16.4s, v27.4s[0]
-	ldr		q20, [x9], #16
-	fmla	v4.4s, v17.4s, v24.4s[0]
-	fmla	v5.4s, v17.4s, v25.4s[0]
-	ldr		q21, [x12], #16
-	fmla	v6.4s, v17.4s, v26.4s[0]
-	fmla	v7.4s, v17.4s, v27.4s[0]
-	ldr		q22, [x13], #16
-	fmla	v8.4s, v18.4s, v24.4s[0]
-	fmla	v9.4s, v18.4s, v25.4s[0]
-	ldr		q23, [x14], #16
-	fmla	v10.4s, v18.4s, v26.4s[0]
-	fmla	v11.4s, v18.4s, v27.4s[0]
-//	prfm	PLDL1KEEP, [x14, #64]
-	fmla	v12.4s, v19.4s, v24.4s[0]
-	fmla	v13.4s, v19.4s, v25.4s[0]
-	fmla	v14.4s, v19.4s, v26.4s[0]
-	fmla	v15.4s, v19.4s, v27.4s[0]
-
-
-	// unroll 3
-	ldp		s24, s25, [x11], #8
-	fmla	v0.4s, v20.4s, v28.4s[0]
-	fmla	v1.4s, v20.4s, v29.4s[0]
-	ldp		s26, s27, [x11], #8
-	fmla	v2.4s, v20.4s, v30.4s[0]
-	fmla	v3.4s, v20.4s, v31.4s[0]
-	ldr		q16, [x9], #16
-	fmla	v4.4s, v21.4s, v28.4s[0]
-	fmla	v5.4s, v21.4s, v29.4s[0]
-	ldr		q17, [x12], #16
-	fmla	v6.4s, v21.4s, v30.4s[0]
-	fmla	v7.4s, v21.4s, v31.4s[0]
-	ldr		q18, [x13], #16
-	fmla	v8.4s, v22.4s, v28.4s[0]
-	fmla	v9.4s, v22.4s, v29.4s[0]
-	ldr		q19, [x14], #16
-	fmla	v10.4s, v22.4s, v30.4s[0]
-	fmla	v11.4s, v22.4s, v31.4s[0]
-//	sub		w8, w8, #4
-	fmla	v12.4s, v23.4s, v28.4s[0]
-	fmla	v13.4s, v23.4s, v29.4s[0]
-//	cmp		w8, #4
-	fmla	v14.4s, v23.4s, v30.4s[0]
-	fmla	v15.4s, v23.4s, v31.4s[0]
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		w8, #0
-	ble		2f // return
-
-	sub		x9, x9, #16
-	sub		x11, x11, #16
-	sub		x12, x12, #16
-	sub		x13, x13, #16
-	sub		x14, x14, #16
-
-3: // clean1-up loop
-
-	// unroll 0
-	// TODO
-	ldp		s24, s25, [x11], #8
-	ldr		q16, [x9], #16
-	fmla	v0.4s, v16.4s, v24.4s[0]
-	fmla	v1.4s, v16.4s, v25.4s[0]
-	ldp		s26, s27, [x11], #8
-	fmla	v2.4s, v16.4s, v26.4s[0]
-	fmla	v3.4s, v16.4s, v27.4s[0]
-	ldr		q17, [x12], #16
-	fmla	v4.4s, v17.4s, v24.4s[0]
-	fmla	v5.4s, v17.4s, v25.4s[0]
-	fmla	v6.4s, v17.4s, v26.4s[0]
-	fmla	v7.4s, v17.4s, v27.4s[0]
-	ldr		q18, [x13], #16
-	fmla	v8.4s, v18.4s, v24.4s[0]
-	fmla	v9.4s, v18.4s, v25.4s[0]
-	fmla	v10.4s, v18.4s, v26.4s[0]
-	fmla	v11.4s, v18.4s, v27.4s[0]
-	ldr		q19, [x14], #16
-	fmla	v12.4s, v19.4s, v24.4s[0]
-	fmla	v13.4s, v19.4s, v25.4s[0]
-	fmla	v14.4s, v19.4s, v26.4s[0]
-	fmla	v15.4s, v19.4s, v27.4s[0]
-
-	sub		w8, w8, #1
-	cmp		w8, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_kernel_gemm_add_nt_16x4_lib4, .-inner_kernel_gemm_add_nt_16x4_lib4
-#endif
-
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- alpha
-// x9   <- beta
-// x10  <- C
-// x11  <- sdc
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_16X4_LIB4
-#else
-	.align	4
-	.type inner_scale_ab_16x4_lib4, %function
-inner_scale_ab_16x4_lib4:
-#endif
-
-	ld1		{v28.4s}, [x8]
-
-	fmul	v0.4s, v0.4s, v28.4s[0]
-	fmul	v1.4s, v1.4s, v28.4s[0]
-	fmul	v2.4s, v2.4s, v28.4s[0]
-	fmul	v3.4s, v3.4s, v28.4s[0]
-	fmul	v4.4s, v4.4s, v28.4s[0]
-	fmul	v5.4s, v5.4s, v28.4s[0]
-	fmul	v6.4s, v6.4s, v28.4s[0]
-	fmul	v7.4s, v7.4s, v28.4s[0]
-	fmul	v8.4s, v8.4s, v28.4s[0]
-	fmul	v9.4s, v9.4s, v28.4s[0]
-	fmul	v10.4s, v10.4s, v28.4s[0]
-	fmul	v11.4s, v11.4s, v28.4s[0]
-	fmul	v12.4s, v12.4s, v28.4s[0]
-	fmul	v13.4s, v13.4s, v28.4s[0]
-	fmul	v14.4s, v14.4s, v28.4s[0]
-	fmul	v15.4s, v15.4s, v28.4s[0]
-
-	ld1		{v28.4s}, [x9]
-
-	add		x12, x10, x11
-	add		x13, x12, x11
-	add		x14, x13, x11
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x10], #64
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v25.4s, v28.4s[0]
-	fmla	v2.4s, v26.4s, v28.4s[0]
-	fmla	v3.4s, v27.4s, v28.4s[0]
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x12], #64
-	fmla	v4.4s, v24.4s, v28.4s[0]
-	fmla	v5.4s, v25.4s, v28.4s[0]
-	fmla	v6.4s, v26.4s, v28.4s[0]
-	fmla	v7.4s, v27.4s, v28.4s[0]
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x13], #64
-	fmla	v8.4s, v24.4s, v28.4s[0]
-	fmla	v9.4s, v25.4s, v28.4s[0]
-	fmla	v10.4s, v26.4s, v28.4s[0]
-	fmla	v11.4s, v27.4s, v28.4s[0]
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x14], #64
-	fmla	v12.4s, v24.4s, v28.4s[0]
-	fmla	v13.4s, v25.4s, v28.4s[0]
-	fmla	v14.4s, v26.4s, v28.4s[0]
-	fmla	v15.4s, v27.4s, v28.4s[0]
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_scale_ab_16x4_lib4, .-inner_scale_ab_16x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- D
-// x9   <- sdd
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_16X4_LIB4
-#else
-	.align 4
-	.type inner_store_16x4_lib4, %function
-inner_store_16x4_lib4:
-#endif
-
-	add		x10, x8, x9
-	add		x11, x10, x9
-	add		x12, x11, x9
-
-	st1		{v0.4s, v1.4s, v2.4s, v3.4s}, [x8], #64
-	st1		{v4.4s, v5.4s, v6.4s, v7.4s}, [x10], #64
-	st1		{v8.4s, v9.4s, v10.4s, v11.4s}, [x11], #64
-	st1		{v12.4s, v13.4s, v14.4s, v15.4s}, [x12], #64
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_store_16x4_lib4, .-inner_store_16x4_lib4
-#endif
-
-
-
-
-
-//                                w0        x1            x2        w3       x4        x5           x6        w7       sp+0      sp+8
-// void kernel_sgemm_nt_16x4_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd)
-
-	.align	4
-	.global	kernel_sgemm_nt_16x4_lib4
-	.type	kernel_sgemm_nt_16x4_lib4, %function
-kernel_sgemm_nt_16x4_lib4:
-	
-
-
-	PROLOGUE
-
-
-
-	// TODO zero the entire 128-bit register ???
-	fmov	d0, xzr
-	fmov    d1, d0
-	fmov    d2, d0
-	fmov    d3, d0
-	fmov    d4, d0
-	fmov    d5, d0
-	fmov    d6, d0
-	fmov    d7, d0
-	fmov    d8, d0
-	fmov    d9, d0
-	fmov    d10, d0
-	fmov    d11, d0
-	fmov    d12, d0
-	fmov    d13, d0
-	fmov    d14, d0
-	fmov    d15, d0
-
-
-
-	// call inner kernel gemm nt
-	mov		w8, w0 // kmax
-	mov		x9, x2 // A
-	mov		w10, w3 // sda
-	lsl		w10, w10, #4 // 16*sda
-	mov		x11, x4 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB4
-#else
-	bl	inner_kernel_gemm_add_nt_16x4_lib4
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		x8, x1 // alpha
-	mov		x9, x5 // beta
-	mov		x10, x6 // C
-	mov		w11, w7 // C
-	lsl		w11, w11, #4 // 16*sdc
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB4
-#else
-	bl inner_scale_ab_16x4_lib4
-#endif
-
-
-
-	// store n
-	ldr		x8, [sp, #(STACKSIZE + 0)] // D
-	ldr		w9, [sp, #(STACKSIZE + 8)] // sdd
-	lsl		w9, w9, #4 // 16*sdd
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB4
-#else
-	bl inner_store_16x4_lib4
-#endif
-
-
-
-	EPILOGUE
-
-	mov	x0, #0
-
-	ret
-
-
diff --git a/third_party/blasfeo/kernel/armv8a/kernel_sgemm_4x4_lib4.S b/third_party/blasfeo/kernel/armv8a/kernel_sgemm_4x4_lib4.S
deleted file mode 100644
index 6d3850d..0000000
--- a/third_party/blasfeo/kernel/armv8a/kernel_sgemm_4x4_lib4.S
+++ /dev/null
@@ -1,354 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#define STACKSIZE 11*16
-#define PROLOGUE \
-	add sp, sp, #-(11 * 16); \
-	stp d8, d9, [sp, #(0 * 16)]; \
-	stp d10, d11, [sp, #(1 * 16)]; \
-	stp d12, d13, [sp, #(2 * 16)]; \
-	stp d14, d15, [sp, #(3 * 16)]; \
-	stp x18, x19, [sp, #(4 * 16)]; \
-	stp x20, x21, [sp, #(5 * 16)]; \
-	stp x22, x23, [sp, #(6 * 16)]; \
-	stp x24, x25, [sp, #(7 * 16)]; \
-	stp x26, x27, [sp, #(8 * 16)]; \
-	stp x28, x29, [sp, #(9 * 16)]; \
-	str x30, [sp, #(10 * 16)];
-#define EPILOGUE \
-	ldp d8, d9, [sp, #(0 * 16)]; \
-	ldp d10, d11, [sp, #(1 * 16)]; \
-	ldp d12, d13, [sp, #(2 * 16)]; \
-	ldp d14, d15, [sp, #(3 * 16)]; \
-	ldp x18, x19, [sp, #(4 * 16)]; \
-	ldp x20, x21, [sp, #(5 * 16)]; \
-	ldp x22, x23, [sp, #(6 * 16)]; \
-	ldp x24, x25, [sp, #(7 * 16)]; \
-	ldp x26, x27, [sp, #(8 * 16)]; \
-	ldp x28, x29, [sp, #(9 * 16)]; \
-	ldr x30, [sp, #(10 * 16)]; \
-	add sp, sp, #(11 * 16);
-
-
-
-
-
-	.text
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// w8   <- k
-// x9   <- A
-// x10   <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_4X4_LIB4
-#else
-	.align	4
-	.type inner_kernel_gemm_add_nt_4x4_lib4, %function
-inner_kernel_gemm_add_nt_4x4_lib4:
-#endif
-
-// TODO more aggressive preload of A !!!
-
-	// early return
-	cmp		w8, #0
-	ble		2f // return
-
-	// prefetch
-	prfm	PLDL1KEEP, [x9, #0]
-	prfm	PLDL1KEEP, [x10, #0]
-
-	cmp		w8, #4
-	ble		0f // consider clean up loop
-
-	// preload
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	ld1		{v28.2d, v29.2d}, [x10], #32
-
-	// prefetch
-	prfm	PLDL1KEEP, [x9, #32]
-	prfm	PLDL1KEEP, [x10, #32]
-
-	// main loop
-1:
-	
-
-	// unroll 0
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	ld1		{v26.2d, v27.2d}, [x9], #32
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	ld1		{v30.2d, v31.2d}, [x10], #32
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-	prfm	PLDL1KEEP, [x10, #64]
-
-	// unroll 1
-	fmla	v0.4s, v25.4s, v29.4s[0]
-	sub		w8, w8, #4
-	fmla	v1.4s, v25.4s, v29.4s[1]
-	fmla	v2.4s, v25.4s, v29.4s[2]
-	fmla	v3.4s, v25.4s, v29.4s[3]
-
-	// unroll 2
-	fmla	v0.4s, v26.4s, v30.4s[0]
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	fmla	v1.4s, v26.4s, v30.4s[1]
-	ld1		{v28.2d, v29.2d}, [x10], #32
-	fmla	v2.4s, v26.4s, v30.4s[2]
-	fmla	v3.4s, v26.4s, v30.4s[3]
-
-	// unroll 3
-	fmla	v0.4s, v27.4s, v31.4s[0]
-	fmla	v1.4s, v27.4s, v31.4s[1]
-	fmla	v2.4s, v27.4s, v31.4s[2]
-	fmla	v3.4s, v27.4s, v31.4s[3]
-
-	cmp		w8, #4
-	bgt		1b
-
-	sub		x9, x9, #32
-	sub		x10, x10, #32
-
-0:
-
-	cmp		w8, #3
-	ble		4f
-
-	// unroll 0
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	ld1		{v28.2d, v29.2d}, [x10], #32
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-
-	// unroll 1
-	fmla	v0.4s, v25.4s, v29.4s[0]
-	fmla	v1.4s, v25.4s, v29.4s[1]
-	fmla	v2.4s, v25.4s, v29.4s[2]
-	fmla	v3.4s, v25.4s, v29.4s[3]
-
-	// unroll 2
-	ld1		{v24.2d, v25.2d}, [x9], #32
-	ld1		{v28.2d, v29.2d}, [x10], #32
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-
-	// unroll 3
-	fmla	v0.4s, v25.4s, v29.4s[0]
-	fmla	v1.4s, v25.4s, v29.4s[1]
-	fmla	v2.4s, v25.4s, v29.4s[2]
-	fmla	v3.4s, v25.4s, v29.4s[3]
-
-	sub		w8, w8, #4
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		w8, #0
-	ble		2f // return
-
-3: // clean1-up loop
-
-	// unroll 0
-	ld1		{v24.2d}, [x9], #16
-	ld1		{v28.2d}, [x10], #16
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-
-	sub		w8, w8, #1
-	cmp		w8, #0
-	bgt		3b
-
-2: // return
-
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_kernel_gemm_add_nt_4x4_lib4, .-inner_kernel_gemm_add_nt_4x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- alpha
-// x9   <- beta
-// x10  <- C
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_4X4_LIB4
-#else
-	.align	4
-	.type inner_scale_ab_4x4_lib4, %function
-inner_scale_ab_4x4_lib4:
-#endif
-
-	ld1		{v28.2d}, [x8]
-
-	fmul	v0.4s, v0.4s, v28.4s[0]
-	fmul	v1.4s, v1.4s, v28.4s[0]
-	fmul	v2.4s, v2.4s, v28.4s[0]
-	fmul	v3.4s, v3.4s, v28.4s[0]
-
-	ld1		{v28.2d}, [x9]
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x10], #64
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v25.4s, v28.4s[0]
-	fmla	v2.4s, v26.4s, v28.4s[0]
-	fmla	v3.4s, v27.4s, v28.4s[0]
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_scale_ab_4x4_lib4, .-inner_scale_ab_4x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- D
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_4X4_LIB4
-#else
-	.align 4
-	.type inner_store_4x4_lib4, %function
-inner_store_4x4_lib4:
-#endif
-
-	st1		{v0.4s, v1.4s, v2.4s, v3.4s}, [x8], #64
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_store_4x4_lib4, .-inner_store_4x4_lib4
-#endif
-
-
-
-
-
-//                               w0        x1             x2         x3         x4            x5         x6
-// void kernel_dgemm_nt_4x4_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-
-	.align	4
-	.global	kernel_sgemm_nt_4x4_lib4
-	.type	kernel_sgemm_nt_4x4_lib4, %function
-kernel_sgemm_nt_4x4_lib4:
-	
-
-
-	PROLOGUE
-
-
-
-	// TODO zero the entire 128-bit register ???
-	fmov	d0, xzr
-	fmov    d1, d0
-	fmov    d2, d0
-	fmov    d3, d0
-
-
-
-	// call inner kernel dgemm nt
-	mov		w8, w0 // kmax
-	mov		x9, x2 // A
-	mov		x10, x3 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_4X4_LIB4
-#else
-	bl	inner_kernel_gemm_add_nt_4x4_lib4
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		x8, x1 // alpha
-	mov		x9, x4 // beta
-	mov		x10, x5 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-	bl inner_scale_ab_4x4_lib4
-#endif
-
-
-
-	// store n
-	mov		x8, x6
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-	bl inner_store_4x4_lib4
-#endif
-
-
-
-	EPILOGUE
-
-	mov	x0, #0
-
-	ret
-
diff --git a/third_party/blasfeo/kernel/armv8a/kernel_sgemm_8x4_lib4.S b/third_party/blasfeo/kernel/armv8a/kernel_sgemm_8x4_lib4.S
deleted file mode 100644
index 016af72..0000000
--- a/third_party/blasfeo/kernel/armv8a/kernel_sgemm_8x4_lib4.S
+++ /dev/null
@@ -1,433 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#define STACKSIZE 11*16
-#define PROLOGUE \
-	sub sp, sp, #(11 * 16); \
-	stp d8, d9, [sp, #(0 * 16)]; \
-	stp d10, d11, [sp, #(1 * 16)]; \
-	stp d12, d13, [sp, #(2 * 16)]; \
-	stp d14, d15, [sp, #(3 * 16)]; \
-	stp x18, x19, [sp, #(4 * 16)]; \
-	stp x20, x21, [sp, #(5 * 16)]; \
-	stp x22, x23, [sp, #(6 * 16)]; \
-	stp x24, x25, [sp, #(7 * 16)]; \
-	stp x26, x27, [sp, #(8 * 16)]; \
-	stp x28, x29, [sp, #(9 * 16)]; \
-	str x30, [sp, #(10 * 16)];
-#define EPILOGUE \
-	ldp d8, d9, [sp, #(0 * 16)]; \
-	ldp d10, d11, [sp, #(1 * 16)]; \
-	ldp d12, d13, [sp, #(2 * 16)]; \
-	ldp d14, d15, [sp, #(3 * 16)]; \
-	ldp x18, x19, [sp, #(4 * 16)]; \
-	ldp x20, x21, [sp, #(5 * 16)]; \
-	ldp x22, x23, [sp, #(6 * 16)]; \
-	ldp x24, x25, [sp, #(7 * 16)]; \
-	ldp x26, x27, [sp, #(8 * 16)]; \
-	ldp x28, x29, [sp, #(9 * 16)]; \
-	ldr x30, [sp, #(10 * 16)]; \
-	add sp, sp, #(11 * 16);
-
-
-
-
-
-	.text
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// w8   <- k
-// x9   <- A
-// x10  <- sda
-// x11  <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_8X4_LIB4
-#else
-	.align	4
-	.type inner_kernel_gemm_add_nt_8x4_lib4, %function
-inner_kernel_gemm_add_nt_8x4_lib4:
-#endif
-
-	// early return
-	cmp		w8, #0
-	ble		2f // return
-
-	add		x12, x9, x10
-
-	// prefetch
-	prfm	PLDL1KEEP, [x11, #0]
-	prfm	PLDL1KEEP, [x9, #0]
-	prfm	PLDL1KEEP, [x12, #0]
-
-	// preload
-	ld1		{v24.4s, v25.4s}, [x9], #32
-	ld1		{v28.4s, v29.4s}, [x11], #32
-	ld1		{v20.4s, v21.4s}, [x12], #32
-
-	cmp		w8, #4
-	ble		0f // consider clean up loop
-
-	// prefetch
-	prfm	PLDL1KEEP, [x11, #32]
-	prfm	PLDL1KEEP, [x9, #32]
-	prfm	PLDL1KEEP, [x12, #32]
-
-	// main loop
-1:
-
-	// unroll 0
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	ld1		{v26.4s, v27.4s}, [x9], #32
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	ld1		{v30.4s, v31.4s}, [x11], #32
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	ld1		{v22.4s, v23.4s}, [x12], #32
-	fmla	v3.4s, v24.4s, v28.4s[3]
-	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v4.4s, v20.4s, v28.4s[0]
-	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v5.4s, v20.4s, v28.4s[1]
-	prfm	PLDL1KEEP, [x12, #64]
-	fmla	v6.4s, v20.4s, v28.4s[2]
-	fmla	v7.4s, v20.4s, v28.4s[3]
-	sub		w8, w8, #4
-
-	// unroll 1
-	fmla	v0.4s, v25.4s, v29.4s[0]
-	fmla	v1.4s, v25.4s, v29.4s[1]
-	fmla	v2.4s, v25.4s, v29.4s[2]
-	fmla	v3.4s, v25.4s, v29.4s[3]
-	fmla	v4.4s, v21.4s, v29.4s[0]
-	fmla	v5.4s, v21.4s, v29.4s[1]
-	fmla	v6.4s, v21.4s, v29.4s[2]
-	fmla	v7.4s, v21.4s, v29.4s[3]
-	cmp		w8, #4
-
-	// unroll 2
-	fmla	v0.4s, v26.4s, v30.4s[0]
-	ld1		{v24.4s, v25.4s}, [x9], #32
-	fmla	v1.4s, v26.4s, v30.4s[1]
-	ld1		{v28.4s, v29.4s}, [x11], #32
-	fmla	v2.4s, v26.4s, v30.4s[2]
-	ld1		{v20.4s, v21.4s}, [x12], #32
-	fmla	v3.4s, v26.4s, v30.4s[3]
-	fmla	v4.4s, v22.4s, v30.4s[0]
-	fmla	v5.4s, v22.4s, v30.4s[1]
-	fmla	v6.4s, v22.4s, v30.4s[2]
-	fmla	v7.4s, v22.4s, v30.4s[3]
-
-	// unroll 3
-	fmla	v0.4s, v27.4s, v31.4s[0]
-	fmla	v1.4s, v27.4s, v31.4s[1]
-	fmla	v2.4s, v27.4s, v31.4s[2]
-	fmla	v3.4s, v27.4s, v31.4s[3]
-	fmla	v4.4s, v23.4s, v31.4s[0]
-	fmla	v5.4s, v23.4s, v31.4s[1]
-	fmla	v6.4s, v23.4s, v31.4s[2]
-	fmla	v7.4s, v23.4s, v31.4s[3]
-
-	bgt		1b
-
-0:
-
-	cmp		w8, #3
-	ble		4f
-
-	// unroll 0
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	ld1		{v26.4s, v27.4s}, [x9], #32
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	ld1		{v30.4s, v31.4s}, [x11], #32
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	ld1		{v22.4s, v23.4s}, [x12], #32
-	fmla	v3.4s, v24.4s, v28.4s[3]
-//	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v4.4s, v20.4s, v28.4s[0]
-//	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v5.4s, v20.4s, v28.4s[1]
-//	prfm	PLDL1KEEP, [x12, #64]
-	fmla	v6.4s, v20.4s, v28.4s[2]
-	fmla	v7.4s, v20.4s, v28.4s[3]
-	sub		w8, w8, #4
-
-	// unroll 1
-	fmla	v0.4s, v25.4s, v29.4s[0]
-	fmla	v1.4s, v25.4s, v29.4s[1]
-	fmla	v2.4s, v25.4s, v29.4s[2]
-	fmla	v3.4s, v25.4s, v29.4s[3]
-	fmla	v4.4s, v21.4s, v29.4s[0]
-	fmla	v5.4s, v21.4s, v29.4s[1]
-	fmla	v6.4s, v21.4s, v29.4s[2]
-	fmla	v7.4s, v21.4s, v29.4s[3]
-//	cmp		w8, #4
-
-	// unroll 2
-	fmla	v0.4s, v26.4s, v30.4s[0]
-//	ld1		{v24.4s, v25.4s}, [x9], #32
-	fmla	v1.4s, v26.4s, v30.4s[1]
-//	ld1		{v28.4s, v29.4s}, [x11], #32
-	fmla	v2.4s, v26.4s, v30.4s[2]
-//	ld1		{v20.4s, v21.4s}, [x12], #32
-	fmla	v3.4s, v26.4s, v30.4s[3]
-//	ld1		{v16.4s, v17.4s}, [x13], #32
-	fmla	v4.4s, v22.4s, v30.4s[0]
-	fmla	v5.4s, v22.4s, v30.4s[1]
-	fmla	v6.4s, v22.4s, v30.4s[2]
-	fmla	v7.4s, v22.4s, v30.4s[3]
-
-	// unroll 3
-	fmla	v0.4s, v27.4s, v31.4s[0]
-	fmla	v1.4s, v27.4s, v31.4s[1]
-	fmla	v2.4s, v27.4s, v31.4s[2]
-	fmla	v3.4s, v27.4s, v31.4s[3]
-	fmla	v4.4s, v23.4s, v31.4s[0]
-	fmla	v5.4s, v23.4s, v31.4s[1]
-	fmla	v6.4s, v23.4s, v31.4s[2]
-	fmla	v7.4s, v23.4s, v31.4s[3]
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		w8, #0
-	ble		2f // return
-
-	sub		x9, x9, #32
-	sub		x12, x12, #32
-	sub		x11, x11, #32
-
-3: // clean1-up loop
-
-	// unroll 0
-
-	ld1		{v28.4s}, [x11], #16
-	ld1		{v24.4s}, [x9], #16
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-	ld1		{v20.4s}, [x12], #16
-	fmla	v4.4s, v20.4s, v28.4s[0]
-	fmla	v5.4s, v20.4s, v28.4s[1]
-	fmla	v6.4s, v20.4s, v28.4s[2]
-	fmla	v7.4s, v20.4s, v28.4s[3]
-
-	sub		w8, w8, #1
-	cmp		w8, #0
-	bgt		3b
-
-2: // return
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_kernel_gemm_add_nt_8x4_lib4, .-inner_kernel_gemm_add_nt_8x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- alpha
-// x9   <- beta
-// x10  <- C
-// x11  <- sdc
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_8X4_LIB4
-#else
-	.align	4
-	.type inner_scale_ab_8x4_lib4, %function
-inner_scale_ab_8x4_lib4:
-#endif
-
-	ld1		{v28.4s}, [x8]
-
-	fmul	v0.4s, v0.4s, v28.4s[0]
-	fmul	v1.4s, v1.4s, v28.4s[0]
-	fmul	v2.4s, v2.4s, v28.4s[0]
-	fmul	v3.4s, v3.4s, v28.4s[0]
-	fmul	v4.4s, v4.4s, v28.4s[0]
-	fmul	v5.4s, v5.4s, v28.4s[0]
-	fmul	v6.4s, v6.4s, v28.4s[0]
-	fmul	v7.4s, v7.4s, v28.4s[0]
-
-	ld1		{v28.4s}, [x9]
-
-	add		x12, x10, x11
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x10], #64
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v25.4s, v28.4s[0]
-	fmla	v2.4s, v26.4s, v28.4s[0]
-	fmla	v3.4s, v27.4s, v28.4s[0]
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x12], #64
-	fmla	v4.4s, v24.4s, v28.4s[0]
-	fmla	v5.4s, v25.4s, v28.4s[0]
-	fmla	v6.4s, v26.4s, v28.4s[0]
-	fmla	v7.4s, v27.4s, v28.4s[0]
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_scale_ab_8x4_lib4, .-inner_scale_ab_8x4_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- D
-// x9   <- sdd
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_8X4_LIB4
-#else
-	.align 4
-	.type inner_store_8x4_lib4, %function
-inner_store_8x4_lib4:
-#endif
-
-	add		x10, x8, x9
-
-	st1		{v0.4s, v1.4s, v2.4s, v3.4s}, [x8], #64
-	st1		{v4.4s, v5.4s, v6.4s, v7.4s}, [x10], #64
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_store_8x4_lib4, .-inner_store_8x4_lib4
-#endif
-
-
-
-
-
-//                               w0        x1             x2         w3       x4         x5            x6         w7       sp+0       sp+8
-// void kernel_sgemm_nt_8x4_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-
-	.align	4
-	.global	kernel_sgemm_nt_8x4_lib4
-	.type	kernel_sgemm_nt_8x4_lib4, %function
-kernel_sgemm_nt_8x4_lib4:
-	
-
-
-	PROLOGUE
-
-
-
-	// TODO zero the entire 128-bit register ???
-	fmov	d0, xzr
-	fmov    d1, d0
-	fmov    d2, d0
-	fmov    d3, d0
-	fmov    d4, d0
-	fmov    d5, d0
-	fmov    d6, d0
-	fmov    d7, d0
-
-
-
-	// call inner kernel gemm nt
-	mov		w8, w0 // kmax
-	mov		x9, x2 // A
-	mov		w10, w3 // sda
-	lsl		w10, w10, #4 // 16*sda
-	mov		x11, x4 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB4
-#else
-	bl	inner_kernel_gemm_add_nt_8x4_lib4
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		x8, x1 // alpha
-	mov		x9, x5 // beta
-	mov		x10, x6 // C
-	mov		w11, w7 // C
-	lsl		w11, w11, #4 // 16*sdc
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-	bl inner_scale_ab_8x4_lib4
-#endif
-
-
-
-	// store n
-	ldr		x8, [sp, #(STACKSIZE + 0)] // D
-	ldr		w9, [sp, #(STACKSIZE + 8)] // sdd
-	lsl		w9, w9, #4 // 16*sdd
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-	bl inner_store_8x4_lib4
-#endif
-
-
-
-	EPILOGUE
-
-	mov	x0, #0
-
-	ret
-
-
-
diff --git a/third_party/blasfeo/kernel/armv8a/kernel_sgemm_8x8_lib4.S b/third_party/blasfeo/kernel/armv8a/kernel_sgemm_8x8_lib4.S
deleted file mode 100644
index 6c8c090..0000000
--- a/third_party/blasfeo/kernel/armv8a/kernel_sgemm_8x8_lib4.S
+++ /dev/null
@@ -1,565 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#define STACKSIZE 11*16
-#define PROLOGUE \
-	sub sp, sp, #(11 * 16); \
-	stp d8, d9, [sp, #(0 * 16)]; \
-	stp d10, d11, [sp, #(1 * 16)]; \
-	stp d12, d13, [sp, #(2 * 16)]; \
-	stp d14, d15, [sp, #(3 * 16)]; \
-	stp x18, x19, [sp, #(4 * 16)]; \
-	stp x20, x21, [sp, #(5 * 16)]; \
-	stp x22, x23, [sp, #(6 * 16)]; \
-	stp x24, x25, [sp, #(7 * 16)]; \
-	stp x26, x27, [sp, #(8 * 16)]; \
-	stp x28, x29, [sp, #(9 * 16)]; \
-	str x30, [sp, #(10 * 16)];
-#define EPILOGUE \
-	ldp d8, d9, [sp, #(0 * 16)]; \
-	ldp d10, d11, [sp, #(1 * 16)]; \
-	ldp d12, d13, [sp, #(2 * 16)]; \
-	ldp d14, d15, [sp, #(3 * 16)]; \
-	ldp x18, x19, [sp, #(4 * 16)]; \
-	ldp x20, x21, [sp, #(5 * 16)]; \
-	ldp x22, x23, [sp, #(6 * 16)]; \
-	ldp x24, x25, [sp, #(7 * 16)]; \
-	ldp x26, x27, [sp, #(8 * 16)]; \
-	ldp x28, x29, [sp, #(9 * 16)]; \
-	ldr x30, [sp, #(10 * 16)]; \
-	add sp, sp, #(11 * 16);
-
-
-
-
-
-	.text
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// w8   <- k
-// x9   <- A
-// x10  <- sda
-// x11  <- B
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_8X8_LIB4
-#else
-	.align	4
-	.type inner_kernel_gemm_add_nt_8x8_lib4, %function
-inner_kernel_gemm_add_nt_8x8_lib4:
-#endif
-
-	// early return
-	cmp		w8, #0
-	ble		2f // return
-
-	add		x13, x9, x10
-	add		x14, x11, x12
-
-	// prefetch
-	prfm	PLDL1KEEP, [x11, #0]
-	prfm	PLDL1KEEP, [x9, #0]
-	prfm	PLDL1KEEP, [x13, #0]
-	prfm	PLDL1KEEP, [x14, #0]
-
-	// preload
-	ld1		{v24.4s, v25.4s}, [x9], #32
-	ld1		{v28.4s, v29.4s}, [x11], #32
-	ld1		{v20.4s, v21.4s}, [x13], #32
-	ld1		{v16.4s, v17.4s}, [x14], #32
-
-	cmp		w8, #4
-	ble		0f // consider clean up loop
-
-	// prefetch
-	prfm	PLDL1KEEP, [x11, #32]
-	prfm	PLDL1KEEP, [x9, #32]
-	prfm	PLDL1KEEP, [x13, #32]
-	prfm	PLDL1KEEP, [x14, #32]
-
-	// main loop
-1:
-
-	// unroll 0
-	ld1		{v26.4s}, [x9], #16
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	ld1		{v27.4s}, [x9], #16
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-	ld1		{v30.4s}, [x11], #16
-	fmla	v4.4s, v20.4s, v28.4s[0]
-	fmla	v5.4s, v20.4s, v28.4s[1]
-	ld1		{v31.4s}, [x11], #16
-	fmla	v6.4s, v20.4s, v28.4s[2]
-	fmla	v7.4s, v20.4s, v28.4s[3]
-	ld1		{v22.4s}, [x13], #16
-	fmla	v8.4s, v24.4s, v16.4s[0]
-	fmla	v9.4s, v24.4s, v16.4s[1]
-	ld1		{v23.4s}, [x13], #16
-	fmla	v10.4s, v24.4s, v16.4s[2]
-	fmla	v11.4s, v24.4s, v16.4s[3]
-	ld1		{v18.4s}, [x14], #16
-	fmla	v12.4s, v20.4s, v16.4s[0]
-	fmla	v13.4s, v20.4s, v16.4s[1]
-	ld1		{v19.4s}, [x14], #16
-	fmla	v14.4s, v20.4s, v16.4s[2]
-	fmla	v15.4s, v20.4s, v16.4s[3]
-
-	// unroll 1
-	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v0.4s, v25.4s, v29.4s[0]
-	fmla	v1.4s, v25.4s, v29.4s[1]
-	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v2.4s, v25.4s, v29.4s[2]
-	fmla	v3.4s, v25.4s, v29.4s[3]
-	prfm	PLDL1KEEP, [x13, #64]
-	fmla	v4.4s, v21.4s, v29.4s[0]
-	fmla	v5.4s, v21.4s, v29.4s[1]
-	prfm	PLDL1KEEP, [x14, #64]
-	fmla	v6.4s, v21.4s, v29.4s[2]
-	fmla	v7.4s, v21.4s, v29.4s[3]
-	sub		w8, w8, #4
-	fmla	v8.4s, v25.4s, v17.4s[0]
-	fmla	v9.4s, v25.4s, v17.4s[1]
-	fmla	v10.4s, v25.4s, v17.4s[2]
-	fmla	v11.4s, v25.4s, v17.4s[3]
-	fmla	v12.4s, v21.4s, v17.4s[0]
-	fmla	v13.4s, v21.4s, v17.4s[1]
-	cmp		w8, #4
-	fmla	v14.4s, v21.4s, v17.4s[2]
-	fmla	v15.4s, v21.4s, v17.4s[3]
-
-	// unroll 2
-	ld1		{v24.4s}, [x9], #16
-	fmla	v0.4s, v26.4s, v30.4s[0]
-	fmla	v1.4s, v26.4s, v30.4s[1]
-	ld1		{v25.4s}, [x9], #16
-	fmla	v2.4s, v26.4s, v30.4s[2]
-	fmla	v3.4s, v26.4s, v30.4s[3]
-	ld1		{v28.4s}, [x11], #16
-	fmla	v4.4s, v22.4s, v30.4s[0]
-	fmla	v5.4s, v22.4s, v30.4s[1]
-	ld1		{v29.4s}, [x11], #16
-	fmla	v6.4s, v22.4s, v30.4s[2]
-	fmla	v7.4s, v22.4s, v30.4s[3]
-	ld1		{v20.4s}, [x13], #16
-	fmla	v8.4s, v26.4s, v18.4s[0]
-	fmla	v9.4s, v26.4s, v18.4s[1]
-	ld1		{v21.4s}, [x13], #16
-	fmla	v10.4s, v26.4s, v18.4s[2]
-	fmla	v11.4s, v26.4s, v18.4s[3]
-	ld1		{v16.4s}, [x14], #16
-	fmla	v12.4s, v22.4s, v18.4s[0]
-	fmla	v13.4s, v22.4s, v18.4s[1]
-	ld1		{v17.4s}, [x14], #16
-	fmla	v14.4s, v22.4s, v18.4s[2]
-	fmla	v15.4s, v22.4s, v18.4s[3]
-
-	// unroll 3
-	fmla	v0.4s, v27.4s, v31.4s[0]
-	fmla	v1.4s, v27.4s, v31.4s[1]
-	fmla	v2.4s, v27.4s, v31.4s[2]
-	fmla	v3.4s, v27.4s, v31.4s[3]
-	fmla	v4.4s, v23.4s, v31.4s[0]
-	fmla	v5.4s, v23.4s, v31.4s[1]
-	fmla	v6.4s, v23.4s, v31.4s[2]
-	fmla	v7.4s, v23.4s, v31.4s[3]
-	fmla	v8.4s, v27.4s, v19.4s[0]
-	fmla	v9.4s, v27.4s, v19.4s[1]
-	fmla	v10.4s, v27.4s, v19.4s[2]
-	fmla	v11.4s, v27.4s, v19.4s[3]
-	fmla	v12.4s, v23.4s, v19.4s[0]
-	fmla	v13.4s, v23.4s, v19.4s[1]
-	fmla	v14.4s, v23.4s, v19.4s[2]
-	fmla	v15.4s, v23.4s, v19.4s[3]
-
-	bgt		1b
-
-0:
-
-	cmp		w8, #3
-	ble		4f
-
-	// unroll 0
-	ld1		{v26.4s}, [x9], #16
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	ld1		{v27.4s}, [x9], #16
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-	ld1		{v30.4s}, [x11], #16
-	fmla	v4.4s, v20.4s, v28.4s[0]
-	fmla	v5.4s, v20.4s, v28.4s[1]
-	ld1		{v31.4s}, [x11], #16
-	fmla	v6.4s, v20.4s, v28.4s[2]
-	fmla	v7.4s, v20.4s, v28.4s[3]
-	ld1		{v22.4s}, [x13], #16
-	fmla	v8.4s, v24.4s, v16.4s[0]
-	fmla	v9.4s, v24.4s, v16.4s[1]
-	ld1		{v23.4s}, [x13], #16
-	fmla	v10.4s, v24.4s, v16.4s[2]
-	fmla	v11.4s, v24.4s, v16.4s[3]
-	ld1		{v18.4s}, [x14], #16
-	fmla	v12.4s, v20.4s, v16.4s[0]
-	fmla	v13.4s, v20.4s, v16.4s[1]
-	ld1		{v19.4s}, [x14], #16
-	fmla	v14.4s, v20.4s, v16.4s[2]
-	fmla	v15.4s, v20.4s, v16.4s[3]
-
-	// unroll 1
-//	prfm	PLDL1KEEP, [x11, #64]
-	fmla	v0.4s, v25.4s, v29.4s[0]
-	fmla	v1.4s, v25.4s, v29.4s[1]
-//	prfm	PLDL1KEEP, [x9, #64]
-	fmla	v2.4s, v25.4s, v29.4s[2]
-	fmla	v3.4s, v25.4s, v29.4s[3]
-//	prfm	PLDL1KEEP, [x13, #64]
-	fmla	v4.4s, v21.4s, v29.4s[0]
-	fmla	v5.4s, v21.4s, v29.4s[1]
-//	prfm	PLDL1KEEP, [x14, #64]
-	fmla	v6.4s, v21.4s, v29.4s[2]
-	fmla	v7.4s, v21.4s, v29.4s[3]
-	sub		w8, w8, #4
-	fmla	v8.4s, v25.4s, v17.4s[0]
-	fmla	v9.4s, v25.4s, v17.4s[1]
-	fmla	v10.4s, v25.4s, v17.4s[2]
-	fmla	v11.4s, v25.4s, v17.4s[3]
-	fmla	v12.4s, v21.4s, v17.4s[0]
-	fmla	v13.4s, v21.4s, v17.4s[1]
-	cmp		w8, #4
-	fmla	v14.4s, v21.4s, v17.4s[2]
-	fmla	v15.4s, v21.4s, v17.4s[3]
-
-	// unroll 2
-//	ld1		{v24.4s}, [x9], #16
-	fmla	v0.4s, v26.4s, v30.4s[0]
-	fmla	v1.4s, v26.4s, v30.4s[1]
-//	ld1		{v25.4s}, [x9], #16
-	fmla	v2.4s, v26.4s, v30.4s[2]
-	fmla	v3.4s, v26.4s, v30.4s[3]
-//	ld1		{v28.4s}, [x11], #16
-	fmla	v4.4s, v22.4s, v30.4s[0]
-	fmla	v5.4s, v22.4s, v30.4s[1]
-//	ld1		{v29.4s}, [x11], #16
-	fmla	v6.4s, v22.4s, v30.4s[2]
-	fmla	v7.4s, v22.4s, v30.4s[3]
-//	ld1		{v20.4s}, [x13], #16
-	fmla	v8.4s, v26.4s, v18.4s[0]
-	fmla	v9.4s, v26.4s, v18.4s[1]
-//	ld1		{v21.4s}, [x13], #16
-	fmla	v10.4s, v26.4s, v18.4s[2]
-	fmla	v11.4s, v26.4s, v18.4s[3]
-//	ld1		{v16.4s}, [x14], #16
-	fmla	v12.4s, v22.4s, v18.4s[0]
-	fmla	v13.4s, v22.4s, v18.4s[1]
-//	ld1		{v17.4s}, [x14], #16
-	fmla	v14.4s, v22.4s, v18.4s[2]
-	fmla	v15.4s, v22.4s, v18.4s[3]
-
-	// unroll 3
-	fmla	v0.4s, v27.4s, v31.4s[0]
-	fmla	v1.4s, v27.4s, v31.4s[1]
-	fmla	v2.4s, v27.4s, v31.4s[2]
-	fmla	v3.4s, v27.4s, v31.4s[3]
-	fmla	v4.4s, v23.4s, v31.4s[0]
-	fmla	v5.4s, v23.4s, v31.4s[1]
-	fmla	v6.4s, v23.4s, v31.4s[2]
-	fmla	v7.4s, v23.4s, v31.4s[3]
-	fmla	v8.4s, v27.4s, v19.4s[0]
-	fmla	v9.4s, v27.4s, v19.4s[1]
-	fmla	v10.4s, v27.4s, v19.4s[2]
-	fmla	v11.4s, v27.4s, v19.4s[3]
-	fmla	v12.4s, v23.4s, v19.4s[0]
-	fmla	v13.4s, v23.4s, v19.4s[1]
-	fmla	v14.4s, v23.4s, v19.4s[2]
-	fmla	v15.4s, v23.4s, v19.4s[3]
-
-	b		2f // return
-
-4: // consider clean1-up loop
-
-	cmp		w8, #0
-	ble		2f // return
-
-	sub		x9, x9, #32
-	sub		x13, x13, #32
-	sub		x11, x11, #32
-	sub		x14, x14, #32
-
-3: // clean1-up loop
-
-	// unroll 0
-
-	ld1		{v28.4s}, [x11], #16
-	ld1		{v24.4s}, [x9], #16
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v24.4s, v28.4s[1]
-	fmla	v2.4s, v24.4s, v28.4s[2]
-	fmla	v3.4s, v24.4s, v28.4s[3]
-	ld1		{v20.4s}, [x13], #16
-	fmla	v4.4s, v20.4s, v28.4s[0]
-	fmla	v5.4s, v20.4s, v28.4s[1]
-	fmla	v6.4s, v20.4s, v28.4s[2]
-	fmla	v7.4s, v20.4s, v28.4s[3]
-	ld1		{v16.4s}, [x14], #16
-	fmla	v8.4s, v24.4s, v16.4s[0]
-	fmla	v9.4s, v24.4s, v16.4s[1]
-	fmla	v10.4s, v24.4s, v16.4s[2]
-	fmla	v11.4s, v24.4s, v16.4s[3]
-	fmla	v12.4s, v20.4s, v16.4s[0]
-	fmla	v13.4s, v20.4s, v16.4s[1]
-	fmla	v14.4s, v20.4s, v16.4s[2]
-	fmla	v15.4s, v20.4s, v16.4s[3]
-
-	sub		w8, w8, #1
-	cmp		w8, #0
-	bgt		3b
-
-2: // return
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_kernel_gemm_add_nt_8x8_lib4, .-inner_kernel_gemm_add_nt_8x8_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- alpha
-// x9   <- beta
-// x10  <- C
-// x11  <- sdc
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_SCALE_AB_8X8_LIB4
-#else
-	.align	4
-	.type inner_scale_ab_8x8_lib4, %function
-inner_scale_ab_8x8_lib4:
-#endif
-
-	ld1		{v28.4s}, [x8]
-
-	fmul	v0.4s, v0.4s, v28.4s[0]
-	fmul	v1.4s, v1.4s, v28.4s[0]
-	fmul	v2.4s, v2.4s, v28.4s[0]
-	fmul	v3.4s, v3.4s, v28.4s[0]
-	fmul	v4.4s, v4.4s, v28.4s[0]
-	fmul	v5.4s, v5.4s, v28.4s[0]
-	fmul	v6.4s, v6.4s, v28.4s[0]
-	fmul	v7.4s, v7.4s, v28.4s[0]
-	fmul	v8.4s, v8.4s, v28.4s[0]
-	fmul	v9.4s, v9.4s, v28.4s[0]
-	fmul	v10.4s, v10.4s, v28.4s[0]
-	fmul	v11.4s, v11.4s, v28.4s[0]
-	fmul	v12.4s, v12.4s, v28.4s[0]
-	fmul	v13.4s, v13.4s, v28.4s[0]
-	fmul	v14.4s, v14.4s, v28.4s[0]
-	fmul	v15.4s, v15.4s, v28.4s[0]
-
-	ld1		{v28.4s}, [x9]
-
-	add		x12, x10, x11
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x10], #64
-	fmla	v0.4s, v24.4s, v28.4s[0]
-	fmla	v1.4s, v25.4s, v28.4s[0]
-	fmla	v2.4s, v26.4s, v28.4s[0]
-	fmla	v3.4s, v27.4s, v28.4s[0]
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x12], #64
-	fmla	v4.4s, v24.4s, v28.4s[0]
-	fmla	v5.4s, v25.4s, v28.4s[0]
-	fmla	v6.4s, v26.4s, v28.4s[0]
-	fmla	v7.4s, v27.4s, v28.4s[0]
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x10], #64
-	fmla	v8.4s, v24.4s, v28.4s[0]
-	fmla	v9.4s, v25.4s, v28.4s[0]
-	fmla	v10.4s, v26.4s, v28.4s[0]
-	fmla	v11.4s, v27.4s, v28.4s[0]
-
-	ld1		{v24.4s, v25.4s, v26.4s, v27.4s}, [x12], #64
-	fmla	v12.4s, v24.4s, v28.4s[0]
-	fmla	v13.4s, v25.4s, v28.4s[0]
-	fmla	v14.4s, v26.4s, v28.4s[0]
-	fmla	v15.4s, v27.4s, v28.4s[0]
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_scale_ab_8x8_lib4, .-inner_scale_ab_8x8_lib4
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// x8   <- D
-// x9   <- sdd
-//
-// output arguments:
-
-#if MACRO_LEVEL>=2
-	.macro INNER_STORE_8X8_LIB4
-#else
-	.align 4
-	.type inner_store_8x8_lib4, %function
-inner_store_8x8_lib4:
-#endif
-
-	add		x10, x8, x9
-
-	st1		{v0.4s, v1.4s, v2.4s, v3.4s}, [x8], #64
-	st1		{v4.4s, v5.4s, v6.4s, v7.4s}, [x10], #64
-	st1		{v8.4s, v9.4s, v10.4s, v11.4s}, [x8], #64
-	st1		{v12.4s, v13.4s, v14.4s, v15.4s}, [x10], #64
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-	.size	inner_store_8x8_lib4, .-inner_store_8x8_lib4
-#endif
-
-
-
-
-
-//                               w0        x1             x2         w3       x4         w5       x6             x7        sp+0     sp+8       sp+16
-// void kernel_sgemm_nt_8x4_lib4(int kmax, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd)
-
-	.align	4
-	.global	kernel_sgemm_nt_8x8_lib4
-	.type	kernel_sgemm_nt_8x8_lib4, %function
-kernel_sgemm_nt_8x8_lib4:
-	
-
-
-	PROLOGUE
-
-
-
-	// TODO zero the entire 128-bit register ???
-	fmov	d0, xzr
-	fmov    d1, d0
-	fmov    d2, d0
-	fmov    d3, d0
-	fmov    d4, d0
-	fmov    d5, d0
-	fmov    d6, d0
-	fmov    d7, d0
-	fmov    d8, d0
-	fmov    d9, d0
-	fmov    d10, d0
-	fmov    d11, d0
-	fmov    d12, d0
-	fmov    d13, d0
-	fmov    d14, d0
-	fmov    d15, d0
-
-
-
-	// call inner kernel gemm nt
-	mov		w8, w0 // kmax
-	mov		x9, x2 // A
-	mov		w10, w3 // sda
-	lsl		w10, w10, #4 // 16*sda
-	mov		x11, x4 // B
-	mov		w12, w5 // sdb
-	lsl		w12, w12, #4 // 16*sdb
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB4
-#else
-	bl	inner_kernel_gemm_add_nt_8x8_lib4
-#endif
-
-
-
-	// call inner blend for generic alpha and beta
-	mov		x8, x1 // alpha
-	mov		x9, x6 // beta
-	mov		x10, x7 // C
-	ldr		w11, [sp, #(STACKSIZE + 0)] // D
-	lsl		w11, w11, #4 // 16*sdc
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB4
-#else
-	bl inner_scale_ab_8x8_lib4
-#endif
-
-
-
-	// store n
-	ldr		x8, [sp, #(STACKSIZE + 8)] // D
-	ldr		w9, [sp, #(STACKSIZE + 16)] // sdd
-	lsl		w9, w9, #4 // 16*sdd
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_LIB4
-#else
-	bl inner_store_8x8_lib4
-#endif
-
-
-
-	EPILOGUE
-
-	mov	x0, #0
-
-	ret
-
-
-
-
diff --git a/third_party/blasfeo/kernel/avx/Makefile b/third_party/blasfeo/kernel/avx/Makefile
deleted file mode 100644
index f260086..0000000
--- a/third_party/blasfeo/kernel/avx/Makefile
+++ /dev/null
@@ -1,54 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../../Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-OBJS += kernel_dgemm_diag_lib4.o kernel_dgemv_4_lib4.o kernel_dgeqrf_4_lib4.o
-OBJS += kernel_sgemm_diag_lib8.o kernel_sgecp_lib8.o kernel_sgetr_lib8.o kernel_sgead_lib8.o kernel_sgesc_lib8.o kernel_sgemv_8_lib8.o kernel_sgemv_4_lib8.o
-endif
-
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-OBJS += kernel_dgemm_8x4_lib4.o kernel_dgemm_4x4_lib4.o kernel_dgemm_diag_lib4.o kernel_dgemv_12_lib4.o kernel_dgemv_8_lib4.o kernel_dgemv_4_lib4.o kernel_dsymv_6_lib4.o kernel_dgetrf_pivot_4_lib4.o kernel_dgeqrf_4_lib4.o kernel_dgebp_lib4.o
-OBJS += kernel_sgemm_16x4_lib8.o kernel_sgemm_8x8_lib8.o kernel_sgemm_8x4_lib8.o kernel_sgemm_diag_lib8.o kernel_sgecp_lib8.o kernel_sgetr_lib8.o kernel_sgead_lib8.o kernel_sgetr_lib8.o kernel_sgesc_lib8.o kernel_sgemv_8_lib8.o kernel_sgemv_4_lib8.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
-	rm -f *.s
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_dgebp_lib4.S b/third_party/blasfeo/kernel/avx/kernel_dgebp_lib4.S
deleted file mode 100644
index 0e8581e..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_dgebp_lib4.S
+++ /dev/null
@@ -1,935 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-
-//                               1      2          3        4          5
-// void kernel_dger4_sub_8r_lib4(int k, double *A, int sda, double *B, double *C)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger4_sub_8r_lib4
-	.type kernel_dger4_sub_8r_lib4, @function
-kernel_dger4_sub_8r_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger4_sub_8r_lib4
-_kernel_dger4_sub_8r_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger4_sub_8r_lib4
-	.def kernel_dger4_sub_8r_lib4; .scl 2; .type 32; .endef
-kernel_dger4_sub_8r_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // C
-	movq	ARG6, %r15 // C
-	sall	$5, %r15d // 4*sdc*sizeof(double)
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// load block from A
-	vmovapd	0(%r11), %ymm0
-	vmovapd	32(%r11), %ymm1
-	vmovapd	64(%r11), %ymm2
-	vmovapd	96(%r11), %ymm3
-
-	vmovapd	0(%r11, %r12, 1), %ymm4
-	vmovapd	32(%r11, %r12, 1), %ymm5
-	vmovapd	64(%r11, %r12, 1), %ymm6
-	vmovapd	96(%r11, %r12, 1), %ymm7
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r14), %ymm8
-	vmovapd			0(%r14, %r15, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm4, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	8(%r13), %ymm15
-	subl	$4, %r10d
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm5, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	16(%r13), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm6, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	24(%r13), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm7, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vmovapd			%ymm8, 0(%r14)
-	vmovapd			%ymm9, 0(%r14, %r15, 1)
-
-	vmovapd			32(%r14), %ymm8
-	vmovapd			32(%r14, %r15, 1), %ymm9
-	vbroadcastsd	32(%r13), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm4, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	40(%r13), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm5, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	48(%r13), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm6, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	56(%r13), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm7, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vmovapd			%ymm8, 32(%r14)
-	vmovapd			%ymm9, 32(%r14, %r15, 1)
-
-	vmovapd			64(%r14), %ymm8
-	vmovapd			64(%r14, %r15, 1), %ymm9
-	vbroadcastsd	64(%r13), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm4, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	72(%r13), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm5, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	80(%r13), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm6, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	88(%r13), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm7, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vmovapd			%ymm8, 64(%r14)
-	vmovapd			%ymm9, 64(%r14, %r15, 1)
-
-	vmovapd			96(%r14), %ymm8
-	vmovapd			96(%r14, %r15, 1), %ymm9
-	vbroadcastsd	96(%r13), %ymm15
-	addq	$128, %r13
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm4, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	-24(%r13), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm5, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	-16(%r13), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm6, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	-8(%r13), %ymm15
-	addq	$128, %r14
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm7, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vmovapd			%ymm8, -32(%r14)
-	vmovapd			%ymm9, -32(%r14, %r15, 1)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r14), %ymm8
-	vmovapd			0(%r14, %r15, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm4, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	8(%r13), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm5, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	16(%r13), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm6, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	24(%r13), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm7, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vmovapd			%ymm8, 0(%r14)
-	vmovapd			%ymm9, 0(%r14, %r15, 1)
-
-	addq	$32, %r13
-	addq	$32, %r14
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger4_sub_8r_lib4, .-kernel_dger4_sub_8r_lib4
-#endif
-
-
-
-
-
-//                                 1      2          3        4          5          6        7
-// void kernel_dger4_sub_8_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, int km)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger4_sub_8r_vs_lib4
-	.type kernel_dger4_sub_8r_vs_lib4, @function
-kernel_dger4_sub_8r_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger4_sub_8r_vs_lib4
-_kernel_dger4_sub_8r_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger4_sub_8r_vs_lib4
-	.def kernel_dger4_sub_8r_vs_lib4; .scl 2; .type 32; .endef
-kernel_dger4_sub_8r_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // C
-	movq	ARG6, %r15 // C
-	sall	$5, %r15d // 4*sdc*sizeof(double)
-	movq	ARG7, %rax // km
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	vcvtsi2sd	%eax, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC01(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC01(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	// load block from A
-	vmovapd	0(%r11), %ymm0
-	vmovapd	32(%r11), %ymm1
-	vmovapd	64(%r11), %ymm2
-	vmovapd	96(%r11), %ymm3
-
-	vmaskmovpd	0(%r11, %r12, 1), %ymm15, %ymm4
-	vmaskmovpd	32(%r11, %r12, 1), %ymm15, %ymm5
-	vmaskmovpd	64(%r11, %r12, 1), %ymm15, %ymm6
-	vmaskmovpd	96(%r11, %r12, 1), %ymm15, %ymm7
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r14), %ymm8
-	vmovapd			0(%r14, %r15, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm4, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	8(%r13), %ymm15
-	subl	$4, %r10d
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm5, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	16(%r13), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm6, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	24(%r13), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm7, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vmovapd			%ymm8, 0(%r14)
-	vmovapd			%ymm9, 0(%r14, %r15, 1)
-
-	vmovapd			32(%r14), %ymm8
-	vmovapd			32(%r14, %r15, 1), %ymm9
-	vbroadcastsd	32(%r13), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm4, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	40(%r13), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm5, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	48(%r13), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm6, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	56(%r13), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm7, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vmovapd			%ymm8, 32(%r14)
-	vmovapd			%ymm9, 32(%r14, %r15, 1)
-
-	vmovapd			64(%r14), %ymm8
-	vmovapd			64(%r14, %r15, 1), %ymm9
-	vbroadcastsd	64(%r13), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm4, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	72(%r13), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm5, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	80(%r13), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm6, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	88(%r13), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm7, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vmovapd			%ymm8, 64(%r14)
-	vmovapd			%ymm9, 64(%r14, %r15, 1)
-
-	vmovapd			96(%r14), %ymm8
-	vmovapd			96(%r14, %r15, 1), %ymm9
-	vbroadcastsd	96(%r13), %ymm15
-	addq	$128, %r13
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm4, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	-24(%r13), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm5, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	-16(%r13), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm6, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	-8(%r13), %ymm15
-	addq	$128, %r14
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm7, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vmovapd			%ymm8, -32(%r14)
-	vmovapd			%ymm9, -32(%r14, %r15, 1)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r14), %ymm8
-	vmovapd			0(%r14, %r15, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm4, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	8(%r13), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm5, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	16(%r13), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm6, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vbroadcastsd	24(%r13), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm8, %ymm8
-	vmulpd			%ymm7, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm9, %ymm9
-	vmovapd			%ymm8, 0(%r14)
-	vmovapd			%ymm9, 0(%r14, %r15, 1)
-
-	addq	$32, %r13
-	addq	$32, %r14
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger4_sub_8r_vs_lib4, .-kernel_dger4_sub_8r_vs_lib4
-#endif
-
-
-
-
-
-//                               1      2          3          4
-// void kernel_dger4_sub_4r_lib4(int n, double *A, double *B, double *C)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger4_sub_4r_lib4
-	.type kernel_dger4_sub_4r_lib4, @function
-kernel_dger4_sub_4r_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger4_sub_4r_lib4
-_kernel_dger4_sub_4r_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger4_sub_4r_lib4
-	.def kernel_dger4_sub_4r_lib4; .scl 2; .type 32; .endef
-kernel_dger4_sub_4r_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	movq	ARG4, %r13
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// load block from A
-	vmovapd	0(%r11), %ymm0
-	vmovapd	32(%r11), %ymm1
-	vmovapd	64(%r11), %ymm2
-	vmovapd	96(%r11), %ymm3
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r13), %ymm4
-	vbroadcastsd	0(%r12), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	8(%r12), %ymm15
-	subl	$4, %r10d
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	16(%r12), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	24(%r12), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vmovapd			%ymm4, 0(%r13)
-
-	vmovapd			32(%r13), %ymm4
-	vbroadcastsd	32(%r12), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	40(%r12), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	48(%r12), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	56(%r12), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vmovapd			%ymm4, 32(%r13)
-
-	vmovapd			64(%r13), %ymm4
-	vbroadcastsd	64(%r12), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	72(%r12), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	80(%r12), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	88(%r12), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vmovapd			%ymm4, 64(%r13)
-
-	vmovapd			96(%r13), %ymm4
-	vbroadcastsd	96(%r12), %ymm15
-	addq	$128, %r12
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	-24(%r12), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	-16(%r12), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	-8(%r12), %ymm15
-	addq	$128, %r13
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vmovapd			%ymm4, -32(%r13)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r13), %ymm4
-	vbroadcastsd	0(%r12), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	8(%r12), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	16(%r12), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	24(%r12), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vmovapd			%ymm4, 0(%r13)
-
-	addq	$32, %r12
-	addq	$32, %r13
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger4_sub_4r_lib4, .-kernel_dger4_sub_4r_lib4
-#endif
-
-
-
-
-
-//                                 1      2          3          4          5
-// void kernel_dger4_sub_4_vs_lib4(int n, double *A, double *B, double *C, int km)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger4_sub_4r_vs_lib4
-	.type kernel_dger4_sub_4r_vs_lib4, @function
-kernel_dger4_sub_4r_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger4_sub_4r_vs_lib4
-_kernel_dger4_sub_4r_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger4_sub_4r_vs_lib4
-	.def kernel_dger4_sub_4r_vs_lib4; .scl 2; .type 32; .endef
-kernel_dger4_sub_4r_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	movq	ARG4, %r13
-	movq	ARG5, %r14
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC00(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC00(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	// load block from A
-	vmaskmovpd	0(%r11), %ymm15, %ymm0
-	vmaskmovpd	32(%r11), %ymm15, %ymm1
-	vmaskmovpd	64(%r11), %ymm15, %ymm2
-	vmaskmovpd	96(%r11), %ymm15, %ymm3
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r13), %ymm4
-	vbroadcastsd	0(%r12), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	8(%r12), %ymm15
-	subl	$4, %r10d
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	16(%r12), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	24(%r12), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vmovapd			%ymm4, 0(%r13)
-
-	vmovapd			32(%r13), %ymm4
-	vbroadcastsd	32(%r12), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	40(%r12), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	48(%r12), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	56(%r12), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vmovapd			%ymm4, 32(%r13)
-
-	vmovapd			64(%r13), %ymm4
-	vbroadcastsd	64(%r12), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	72(%r12), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	80(%r12), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	88(%r12), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vmovapd			%ymm4, 64(%r13)
-
-	vmovapd			96(%r13), %ymm4
-	vbroadcastsd	96(%r12), %ymm15
-	addq	$128, %r12
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	-24(%r12), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	-16(%r12), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	-8(%r12), %ymm15
-	addq	$128, %r13
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vmovapd			%ymm4, -32(%r13)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r13), %ymm4
-	vbroadcastsd	0(%r12), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	8(%r12), %ymm15
-	vmulpd			%ymm1, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	16(%r12), %ymm15
-	vmulpd			%ymm2, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vbroadcastsd	24(%r12), %ymm15
-	vmulpd			%ymm3, %ymm15, %ymm14
-	vsubpd			%ymm14, %ymm4, %ymm4
-	vmovapd			%ymm4, 0(%r13)
-
-	addq	$32, %r12
-	addq	$32, %r13
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger4_sub_4r_vs_lib4, .-kernel_dger4_sub_4r_vs_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00:
-#elif defined(OS_MAC)
-LC00:
-	.align 5
-#endif
-	.double 0.5
-	.double 1.5
-	.double 2.5
-	.double 3.5
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01:
-#elif defined(OS_MAC)
-LC01:
-	.align 5
-#endif
-	.double 4.5
-	.double 5.5
-	.double 6.5
-	.double 7.5
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02:
-#elif defined(OS_MAC)
-LC02:
-	.align 5
-#endif
-	.double 8.5
-	.double 9.5
-	.double 10.5
-	.double 11.5
-
-
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_dgemm_4x4_lib4.S b/third_party/blasfeo/kernel/avx/kernel_dgemm_4x4_lib4.S
deleted file mode 100644
index 95ff6ea..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_dgemm_4x4_lib4.S
+++ /dev/null
@@ -1,9906 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nt_4x4_lib4, @function
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r12), %ymm12 // B[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmovapd 32(%r12), %ymm13 // B[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	subl	$4, %r10d
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	// unroll 1
-	vmovapd 64(%r12), %ymm12 // B[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	// unroll 2
-	vmovapd 96(%r12), %ymm13 // B[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r12
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	addq	$128, %r11
-
-
-	// unroll 3
-	vmovapd 0(%r12), %ymm12 // B[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 0(%r11), %ymm8 // A0[0]
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vmovapd 32(%r12), %ymm13 // B[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	subl	$4, %r10d
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	// unroll 1
-	vmovapd 64(%r12), %ymm12 // B[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	// unroll 2
-	vmovapd 96(%r12), %ymm13 // B[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r12
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	addq	$128, %r11
-
-
-	// unroll 3
-//	vmovapd 0(%r12), %ymm12 // B[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-//	vmovapd 0(%r11), %ymm8 // A0[0]
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-
-//	cmpl	$3, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd 0(%r12), %ymm12 // B[0]
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	addq	$32, %r11
-
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	addq	$32, %r12
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	subl	$1, %r10d
-
-	vshufpd $0x5, %ymm14, %ymm14, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nt_4x4_lib4, .-inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nt_4x4_lib4, @function
-inner_kernel_dgemm_sub_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nt_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r12), %ymm12 // B[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmovapd 32(%r12), %ymm13 // B[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	subl	$4, %r10d
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-
-	// unroll 1
-	vmovapd 64(%r12), %ymm12 // B[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-
-	// unroll 2
-	vmovapd 96(%r12), %ymm13 // B[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r12
-	addq	$128, %r11
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-
-
-	// unroll 3
-	vmovapd 0(%r12), %ymm12 // B[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	cmpl	$4, %r10d
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vmovapd 32(%r12), %ymm13 // B[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	subl	$4, %r10d
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-
-	// unroll 1
-	vmovapd 64(%r12), %ymm12 // B[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-
-	// unroll 2
-	vmovapd 96(%r12), %ymm13 // B[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r12
-	addq	$128, %r11
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-
-
-	// unroll 3
-//	vmovapd 0(%r12), %ymm12 // B[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-//	vmovapd 0(%r11), %ymm8 // A0[0]
-//	cmpl	$3, %r10d
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd 0(%r12), %ymm12 // B[0]
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	addq	$32, %r11
-
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	addq	$32, %r12
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-
-	vshufpd $0x5, %ymm14, %ymm14, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nt_4x4_lib4, .-inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nn_4x4_lib4, @function
-inner_kernel_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nn_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nn_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 		0(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r12, %r13, 2) // software prefetch
-	prefetcht0	64(%r12, %r13, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	addq	%r13, %r12
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	addq	%r13, %r12
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	addq	$32, %r11
-	addq	$8, %r12
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nn_4x4_lib4, .-inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nn_4x4_lib4, @function
-inner_kernel_dgemm_sub_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nn_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nn_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 		0(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r12, %r13, 2) // software prefetch
-	prefetcht0	64(%r12, %r13, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	addq	%r13, %r12
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	addq	%r13, %r12
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-	addq	$32, %r11
-	addq	$8, %r12
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nn_4x4_lib4, .-inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- B
-// r12   <- C
-// ymm0  <- [a00 a10 a20 a30]
-// ymm1  <- [a01 a11 a21 a31]
-// ymm2  <- [a02 a12 a22 a32]
-// ymm3  <- [a03 a13 a23 a33]
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- ?
-// r12   <- ?
-// ymm0  <- [a00 a10 a20 a30]
-// ymm1  <- [a01 a11 a21 a31]
-// ymm2  <- [a02 a12 a22 a32]
-// ymm3  <- [a03 a13 a23 a33]
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEBP_ADD_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgebp_add_nn_4x4_lib4, @function
-inner_kernel_dgebp_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgebp_add_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgebp_add_nn_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgebp_add_nn_4x4_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r12), %ymm12
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	8(%r11), %ymm13
-	subl	$4, %r10d
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmovapd			%ymm12, 0(%r12)
-
-	vmovapd			32(%r12), %ymm12
-	vbroadcastsd	32(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	40(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	48(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	56(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmovapd			%ymm12, 32(%r12)
-
-	vmovapd			64(%r12), %ymm12
-	vbroadcastsd	64(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	72(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	80(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	88(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmovapd			%ymm12, 64(%r12)
-
-	vmovapd			96(%r12), %ymm12
-	vbroadcastsd	96(%r11), %ymm13
-	addq	$128, %r11
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	-24(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	-16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	-8(%r11), %ymm13
-	addq	$128, %r12
-	vmulpd			%ymm3, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmovapd			%ymm12, -32(%r12)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r12), %ymm12
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmovapd			%ymm12, 0(%r12)
-
-	addq	$32, %r11
-	addq	$32, %r12
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgebp_add_nn_4x4_lib4, .-inner_kernel_dgebp_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemm_add_nn_4x4_lib4, @function
-inner_edge_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgemm_add_nn_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgemm_add_nn_4x4_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r14d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$4, %r15d
-	subl			%r14d, %r15d // 4-offsetB
-	cmpl			%r10d, %r15d
-//	jle				0f
-//	movl			%r10d, %r15d // kend=min(k,4-offsetB)
-//0:
-	cmovgl			%r10d, %r15d // kend=min(k,4-offsetB)
-
-	movl			%r14d, %eax
-	sall			$3, %eax // offsetB*sizeof(double)
-	addq			%rax, %r12 // B+offsetB*sizeof(double)
-
-1:
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	subl			$1, %r10d // k-1
-	subl			$1, %r15d // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$8, %r12 // B+1*sizeof(float)
-
-	cmpl			$0, %r15d
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r13, %r12
-	subq			$32, %r12 // B+bs*(sdb-1)*sizeof(double)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemm_add_nn_4x4_lib4, .-inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10   <- A
-// r11   <- B
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- A+4*4*sizeof(double)
-// r11   <- B+4*4*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_4x4_lib4, @function
-inner_edge_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd			0(%r10), %ymm8
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-
-	vmovapd			32(%r10), %ymm8
-	vbroadcastsd	32(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	40(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-
-	vmovapd			64(%r10), %ymm8
-	vbroadcastsd	64(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	72(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	80(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-
-	vmovapd			96(%r10), %ymm8
-	vbroadcastsd	96(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	104(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	112(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	120(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-
-	addq			$128, %r10
-	addq			$128, %r11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_4x4_lib4, .-inner_edge_dtrmm_nt_ru_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- max(k-4,0)
-// r11   <- A+4*4*sizeof(double)
-// r12   <- B+4*4*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_4x4_vs_lib4, @function
-inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#endif
-#endif
-	
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	addq			$32, %r11
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	addq			$32, %r11
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	addq			$32, %r11
-	vbroadcastsd	16(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	16(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	addq			$32, %r11
-	vbroadcastsd	24(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	addq			$32, %r12
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_4x4_vs_lib4, .-inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_4x4_lib4, @function
-inner_edge_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r14d
-	jg		0f
-
-	// offB==0
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-
-	vmovapd			32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	40(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-
-	vmovapd			64(%r11), %ymm8
-	vbroadcastsd	16(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	48(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	80(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-
-	vmovapd			96(%r11), %ymm8
-	vbroadcastsd	24(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	56(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	88(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	120(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A+4*bs*sizeof(double)
-	addq			%r13, %r12 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-0:
-	cmpl	$1, %r14d
-	jg		1f
-
-	// offB==1
-
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-
-	vmovapd			32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm0
-	vbroadcastsd	40(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm1
-
-	vmovapd			64(%r11), %ymm8
-	vbroadcastsd	16(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	48(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	80(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-
-	subl			$3, %r10d // k-3
-	addq			$96, %r11 // A+3*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$8, %r12 // B+bs*sdb*sizeof(double)-1
-
-	jmp		3f
-
-1:
-	cmpl	$2, %r14d
-	jg		2f
-
-	// offB==2
-
-	addq			$16, %r12 // B+2*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-
-	vmovapd			32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	40(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-
-	subl			$2, %r10d // k-2
-	addq			$64, %r11 // A+2*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$16, %r12 // B+bs*sdb*sizeof(double)-2
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-
-	vmovapd			32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	40(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	72(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	104(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	vmovapd			64(%r11), %ymm8
-	vbroadcastsd	16(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	48(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	80(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	112(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	vmovapd			96(%r11), %ymm8
-	vbroadcastsd	24(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	56(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	88(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	120(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A+4*bs*sizeof(double)
-	addq			%r13, %r12 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-2:
-	// offB==3
-
-	addq			$24, %r12 // B+3*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-3
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-
-	vmovapd			32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	40(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	72(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-
-	vmovapd			64(%r11), %ymm8
-	vbroadcastsd	16(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	48(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	80(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	112(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	vmovapd			96(%r11), %ymm8
-	vbroadcastsd	24(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	56(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	88(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	120(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A+4*bs*sizeof(double)
-	addq			%r13, %r12 // B+bs*sdb*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_4x4_lib4, .-inner_edge_dtrmm_nn_rl_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_4x4_gen_lib4, @function
-inner_edge_dtrmm_nn_rl_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_4x4_gen_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	cmpl			$0, %r14d
-	jg				0f // offB>0
-
-	// offB==0
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f // end
-
-0:
-	cmpl			$1, %r14d
-	jg				1f // offB>1
-
-	// offB==1
-
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f // end
-
-1:
-	cmpl			$2, %r14d
-	jg				2f // offB>2
-
-	// offB==2
-
-	addq			$16, %r12 // B+2*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-
-	subl			$1, %r10d // k-2
-	addq			$32, %r11 // A+2*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f
-
-2:
-	// offB==3
-
-	addq			$24, %r12 // B+3*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-
-	subl			$1, %r10d // k-4
-	addq			$32, %r11 // A+4*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_4x4_gen_lib4, .-inner_edge_dtrmm_nn_rl_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for dlauum
-//
-// input arguments:
-// r10   <- A
-// r11   <- B
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- A+4*4*sizeof(double)
-// r11   <- B+4*4*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DLAUUM_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dlauum_nt_4x4_lib4, @function
-inner_edge_dlauum_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dlauum_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dlauum_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dlauum_nt_4x4_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vmovapd			0(%r10), %ymm8
-	vblendpd		$0x1, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-
-	vmovapd			32(%r10), %ymm8
-	vblendpd		$0x3, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	32(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	40(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-
-	vmovapd			64(%r10), %ymm8
-	vblendpd		$0x7, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	64(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	72(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	80(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-
-	vmovapd			96(%r10), %ymm8
-	vbroadcastsd	96(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	104(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	112(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	120(%r11), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-
-	addq			$128, %r10
-	addq			$128, %r11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dlauum_nt_4x4_lib4, .-inner_edge_dlauum_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for dlauum
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DLAUUM_NT_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dlauum_nt_4x4_vs_lib4, @function
-inner_edge_dlauum_nt_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dlauum_nt_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dlauum_nt_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dlauum_nt_4x4_vs_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vblendpd		$0x1, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	addq			$32, %r11
-	addq			$32, %r12
-
-	cmpl			$0, %r10d
-	jle				0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vblendpd		$0x3, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	addq			$32, %r11
-	addq			$32, %r12
-
-	cmpl			$0, %r10d
-	jle				0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vblendpd		$0x7, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	16(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	addq			$32, %r11
-	addq			$32, %r12
-
-	cmpl			$0, %r10d
-	jle				0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	16(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	24(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	addq			$32, %r11
-	addq			$32, %r12
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dlauum_nt_4x4_vs_lib4, .-inner_edge_dlauum_nt_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend
-//
-// input arguments:
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_4x4_lib4, @function
-inner_blend_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_4x4_lib4:
-#endif	
-#endif	
-	
-
-	// tc==n
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_4x4_lib4, .-inner_blend_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x4_lib4, @function
-inner_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_4x4_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_4x4_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x4_lib4, .-inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x4_gen_lib4, @function
-inner_scale_ab_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_4x4_gen_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-
-	vxorpd		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovapd		0(%r13), %ymm12
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmovapd		32(%r13), %ymm12
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm1, %ymm12, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmovapd		96(%r13), %ymm12
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm3, %ymm12, %ymm3
-
-	jmp		3f
-
-0:
-
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$1, %r12d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm1, %ymm13, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm3, %ymm13, %ymm3
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r12d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm1, %ymm13, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm3, %ymm13, %ymm3
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm1, %ymm13, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm3, %ymm13, %ymm3
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x4_gen_lib4, .-inner_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0
-//
-// input arguments:
-// r10   <- alpha
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_4x4_lib4, @function
-inner_scale_a0_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_a0_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_4x4_lib4; .scl 2; .type 32; .endef
-inner_scale_a0_4x4_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_4x4_lib4, .-inner_scale_a0_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_4x4_lib4, @function
-inner_scale_11_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_11_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_4x4_lib4; .scl 2; .type 32; .endef
-inner_scale_11_4x4_lib4:
-#endif	
-#endif	
-	
-	vmovapd		0(%r10), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_4x4_lib4, .-inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_4x4_lib4, @function
-inner_blend_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_4x4_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_4x4_lib4, .-inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_4x4_gen_lib4, @function
-inner_blend_scale_ab_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_4x4_gen_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-
-	vxorpd		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovapd		0(%r13), %ymm12
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmovapd		32(%r13), %ymm12
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm1, %ymm12, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmovapd		96(%r13), %ymm12
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm3, %ymm12, %ymm3
-
-	jmp		3f
-
-0:
-
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$1, %r12d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm1, %ymm13, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm3, %ymm13, %ymm3
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r12d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm1, %ymm13, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm3, %ymm13, %ymm3
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm1, %ymm13, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm3, %ymm13, %ymm3
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_4x4_gen_lib4, .-inner_blend_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender_loader for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_4x4_lib4, @function
-inner_blend_scale_11_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_11_4x4_lib4:
-#endif	
-#endif	
-	
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmovapd		0(%r10), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_4x4_lib4, .-inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_4x4_lib4, @function
-inner_edge_dpotrf_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dpotrf_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dpotrf_4x4_lib4:
-#endif
-#endif
-	
-	vxorpd	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd	.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd	LC04(%rip), %xmm14 // 1.0
-#endif
-
-	vmovsd		%xmm0, %xmm0, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-2:
-	vmovsd		%xmm13, 0(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm0
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm11
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm1, %ymm1
-	vperm2f128	$0x11, %ymm0, %ymm0, %ymm11
-	vpermilpd	$0x0, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm2, %ymm2
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-
-
-	vpermilpd	$0x3, %xmm1, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-4:
-	vmovsd		%xmm13, 8(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm1
-	vperm2f128	$0x11, %ymm1, %ymm1, %ymm11
-	vpermilpd	$0x0, %ymm11, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm2, %ymm2
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-6:
-	vmovsd		%xmm13, 16(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm2, %ymm13, %ymm2
-	vperm2f128	$0x11, %ymm2, %ymm2, %ymm11
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm2, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-
-	vextractf128	$0x1, %ymm3, %xmm13
-	vpermilpd	$0x3, %xmm13, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-8:
-	vmovsd		%xmm13, 24(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm3, %ymm13, %ymm3
-
-	jmp		0f
-
-1:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_4x4_lib4, .-inner_edge_dpotrf_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_4x4_vs_lib4, @function
-inner_edge_dpotrf_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dpotrf_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dpotrf_4x4_vs_lib4:
-#endif
-#endif
-	
-	vxorpd	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd	.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd	LC04(%rip), %xmm14 // 1.0
-#endif
-
-	vmovsd		%xmm0, %xmm0, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-2:
-	vmovsd		%xmm13, 0(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm0
-	cmpl		$2, %r11d
-	jl			0f // ret
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm11
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm1, %ymm1
-	vperm2f128	$0x11, %ymm0, %ymm0, %ymm11
-	vpermilpd	$0x0, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm2, %ymm2
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-
-
-	vpermilpd	$0x3, %xmm1, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-4:
-	vmovsd		%xmm13, 8(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm1
-	cmpl		$3, %r11d
-	jl			0f // ret
-	vperm2f128	$0x11, %ymm1, %ymm1, %ymm11
-	vpermilpd	$0x0, %ymm11, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm2, %ymm2
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-6:
-	vmovsd		%xmm13, 16(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm2, %ymm13, %ymm2
-	cmpl		$4, %r11d
-	jl			0f // ret
-	vperm2f128	$0x11, %ymm2, %ymm2, %ymm11
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm2, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-
-
-	vextractf128	$0x1, %ymm3, %xmm13
-	vpermilpd	$0x3, %xmm13, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-8:
-	vmovsd		%xmm13, 24(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm3, %ymm13, %ymm3
-
-	jmp		0f
-
-1:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_4x4_vs_lib4, .-inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x4_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vbroadcastsd	8(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm1, %ymm1
-	vbroadcastsd	16(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vbroadcastsd	24(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vbroadcastsd	48(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vbroadcastsd	56(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vbroadcastsd	88(%r10), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x4_lib4, .-inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	cmpl			$2, %r12d
-	jl				0f // ret
-	vbroadcastsd	8(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm1, %ymm1
-	vbroadcastsd	16(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vbroadcastsd	24(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-
-
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	cmpl			$3, %r12d
-	jl				0f // ret
-	vbroadcastsd	48(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vbroadcastsd	56(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-
-
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	cmpl			$4, %r12d
-	jl				0f // ret
-	vbroadcastsd	88(%r10), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-
-
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x4_vs_lib4, .-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// unit diagonal
-//
-// input arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_ONE_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_one_4x4_lib4, @function
-inner_edge_dtrsm_rlt_one_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_one_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_one_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_one_4x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	8(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm1, %ymm1
-
-	vbroadcastsd	16(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vbroadcastsd	48(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-
-	vbroadcastsd	24(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vbroadcastsd	56(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vbroadcastsd	88(%r10), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_one_4x4_lib4, .-inner_edge_dtrsm_rlt_one_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// unit diagonal
-//
-// input arguments:
-// r10  <- D
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_ONE_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_one_4x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_one_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_one_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_one_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_one_4x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$2, %r11d
-
-	jl				0f // ret
-
-	vbroadcastsd	8(%r10), %ymm13
-	cmpl			$3, %r11d
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm1, %ymm1
-
-	jl				0f // ret
-
-	vbroadcastsd	16(%r10), %ymm13
-	cmpl			$4, %r11d
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vbroadcastsd	48(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-
-	jl				0f // ret
-
-	vbroadcastsd	24(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vbroadcastsd	56(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vbroadcastsd	88(%r10), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_one_4x4_vs_lib4, .-inner_edge_dtrsm_rlt_one_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = upper
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUT_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rut_inv_4x4_lib4, @function
-inner_edge_dtrsm_rut_inv_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rut_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rut_inv_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rut_inv_4x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vbroadcastsd	112(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	104(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	96(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vbroadcastsd	72(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vbroadcastsd	32(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rut_inv_4x4_lib4, .-inner_edge_dtrsm_rut_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUT_INV_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rut_inv_4x4_vs_lib4, @function
-inner_edge_dtrsm_rut_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rut_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rut_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rut_inv_4x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$3, %r12d
-	jle				0f
-
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vbroadcastsd	112(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	104(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	96(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-0:
-	cmpl			$2, %r12d
-	jle				1f
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vbroadcastsd	72(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	64(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-1:
-	cmpl			$1, %r12d
-	jle				2f
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vbroadcastsd	32(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-2:
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rut_inv_4x4_vs_lib4, .-inner_edge_dtrsm_rut_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = up
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUN_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_run_inv_4x4_lib4, @function
-inner_edge_dtrsm_run_inv_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_run_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_run_inv_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_run_inv_4x4_lib4:
-#endif
-#endif
-
-	// first column
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-
-	// second column
-	vbroadcastsd	32(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-
-	// third column
-	vbroadcastsd	64(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	72(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-
-	// fourth column
-	vbroadcastsd	96(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vbroadcastsd	104(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vbroadcastsd	112(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_run_inv_4x4_lib4, .-inner_edge_dtrsm_run_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = lower
-// tran = normal
-// unit diagonal
-//
-// input arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LLN_ONE_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lln_one_4x4_lib4, @function
-inner_edge_dtrsm_lln_one_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lln_one_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lln_one_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lln_one_4x4_lib4:
-#endif
-#endif
-
-	vxorpd		%ymm14, %ymm14, %ymm14
-
-	vmovapd		0(%r10), %ymm12
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm0, %ymm0
-	vperm2f128	$0x00, %ymm1, %ymm1, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm1, %ymm1
-	vperm2f128	$0x00, %ymm2, %ymm2, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm2, %ymm2
-	vperm2f128	$0x00, %ymm3, %ymm3, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm3, %ymm3
-
-	vmovapd		32(%r10), %ymm12
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm0, %ymm0
-	vperm2f128	$0x00, %ymm1, %ymm1, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm1, %ymm1
-	vperm2f128	$0x00, %ymm2, %ymm2, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm2, %ymm2
-	vperm2f128	$0x00, %ymm3, %ymm3, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm3, %ymm3
-
-	vmovapd		64(%r10), %ymm12
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x11, %ymm0, %ymm0, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm0, %ymm0
-	vperm2f128	$0x11, %ymm1, %ymm1, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm1, %ymm1
-	vperm2f128	$0x11, %ymm2, %ymm2, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm2, %ymm2
-	vperm2f128	$0x11, %ymm3, %ymm3, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lln_one_4x4_lib4, .-inner_edge_dtrsm_lln_one_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = upper
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LUN_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lun_inv_4x4_lib4, @function
-inner_edge_dtrsm_lun_inv_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lun_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lun_inv_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lun_inv_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd			96(%r10), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	24(%r11), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovapd			64(%r10), %xmm13
-	vbroadcastsd	16(%r11), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			32(%r10), %xmm13
-	vbroadcastsd	8(%r11), %ymm12
-
-	vpermilpd		$0xf, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vpermilpd		$0xf, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vpermilpd		$0xf, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vpermilpd		$0xf, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vbroadcastsd	0(%r11), %ymm12
-
-	vmulpd			%ymm0, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm0, %ymm0
-
-	vmulpd			%ymm1, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm1, %ymm1
-
-	vmulpd			%ymm2, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm2, %ymm2
-
-	vmulpd			%ymm3, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lun_inv_4x4_lib4, .-inner_edge_dtrsm_lun_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = upper
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- km
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- km
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LUN_INV_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lun_inv_4x4_vs_lib4, @function
-inner_edge_dtrsm_lun_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lun_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lun_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lun_inv_4x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl	$3, %r12d
-	jle		0f
-
-	vmovapd			96(%r10), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	24(%r11), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-0:
-	cmpl	$2, %r12d
-	jle		1f
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovapd			64(%r10), %xmm13
-	vbroadcastsd	16(%r11), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-1:
-	cmpl	$1, %r12d
-	jle		2f
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			32(%r10), %xmm13
-	vbroadcastsd	8(%r11), %ymm12
-
-	vpermilpd		$0xf, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vpermilpd		$0xf, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vpermilpd		$0xf, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vpermilpd		$0xf, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-2:
-
-	vbroadcastsd	0(%r11), %ymm12
-
-	vmulpd			%ymm0, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm0, %ymm0
-
-	vmulpd			%ymm1, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm1, %ymm1
-
-	vmulpd			%ymm2, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm2, %ymm2
-
-	vmulpd			%ymm3, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lun_inv_4x4_vs_lib4, .-inner_edge_dtrsm_lun_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// LU factorization without pivoting
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGETRF_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgetrf_4x4_lib4, @function
-inner_edge_dgetrf_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgetrf_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgetrf_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgetrf_4x4_lib4:
-#endif
-#endif
-	
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd	.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd	LC04(%rip), %xmm14 // 1.0
-#endif
-	vmovddup	%xmm14, %xmm14
-
-	// first column
-//	vblendpd	$0x1, %ymm0, %ymm12, %ymm12
-	vmovapd		%ymm0, %ymm12
-	vmovddup	%xmm0, %xmm13
-	vdivpd		%xmm13, %xmm14, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmovsd		%xmm13, 0(%r10)
-	vmulpd		%ymm0, %ymm13, %ymm0
-	vblendpd	$0x1, %ymm12, %ymm0, %ymm0
-
-	// second column
-	vmovddup	%xmm1, %xmm12
-	vperm2f128	$0x00, %ymm12, %ymm12, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm1, %ymm1
-	vblendpd	$0x2, %ymm1, %ymm13, %ymm12
-
-	vpermilpd	$0x3, %xmm1, %xmm13
-	vdivpd		%xmm13, %xmm14, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmovsd		%xmm13, 8(%r10)
-	vmulpd		%ymm1, %ymm13, %ymm1
-	vblendpd	$0x3, %ymm12, %ymm1, %ymm1
-
-	// third column
-	vmovddup	%xmm2, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm2, %ymm2
-	vblendpd	$0x2, %ymm2, %ymm13, %ymm12
-
-	vpermilpd	$0x3, %xmm2, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm2, %ymm2
-	vblendpd	$0x4, %ymm2, %ymm12, %ymm12
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vmovddup	%xmm13, %xmm13
-	vdivpd		%xmm13, %xmm14, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmovsd		%xmm13, 16(%r10)
-	vmulpd		%ymm2, %ymm13, %ymm2
-	vblendpd	$0x7, %ymm12, %ymm2, %ymm2
-
-	// fourth column
-	vmovddup	%xmm3, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm3, %ymm3
-	vblendpd	$0x2, %ymm3, %ymm13, %ymm12
-
-	vpermilpd	$0x3, %xmm3, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm3, %ymm3
-	vblendpd	$0x4, %ymm3, %ymm12, %ymm12
-
-	vperm2f128	$0x11, %ymm3, %ymm3, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm2, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm3, %ymm3
-	vblendpd	$0x8, %ymm3, %ymm12, %ymm12
-	
-	vextractf128	$0x1, %ymm3, %xmm13
-	vpermilpd	$0x3, %xmm13, %xmm13
-	vdivpd		%xmm13, %xmm14, %xmm13
-//	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmovsd		%xmm13, 24(%r10)
-//	vmulpd		%ymm3, %ymm13, %ymm3
-	vblendpd	$0x7, %ymm12, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgetrf_4x4_lib4, .-inner_edge_dgetrf_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_lib4, @function
-inner_store_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd %ymm0,  0(%r10)
-	vmovapd %ymm1, 32(%r10)
-	vmovapd %ymm2, 64(%r10)
-	vmovapd %ymm3, 96(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_lib4, .-inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_vs_lib4, @function
-inner_store_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	cmpl		$2, %r12d
-	vmaskmovpd	%ymm0, %ymm15,  0(%r10)
-	jl			0f // end
-	cmpl		$3, %r12d
-	vmaskmovpd	%ymm1, %ymm15, 32(%r10)
-	jl			0f // end
-	vmaskmovpd	%ymm2, %ymm15, 64(%r10)
-	je			0f // end
-	vmaskmovpd	%ymm3, %ymm15, 96(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_vs_lib4, .-inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n lower triangular
-//
-// input arguments:
-// r10   <- D
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_lib4, @function
-inner_store_l_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_4x4_lib4; .scl 2; .type 32; .endef
-inner_store_l_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd		%ymm0, 0(%r10)
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmovapd		%ymm1, 32(%r10)
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmovapd		%ymm2, 64(%r10)
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmovapd		%ymm3, 96(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_lib4, .-inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs lower triangular
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_vs_lib4, @function
-inner_store_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_l_4x4_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmaskmovpd	%ymm0, %ymm15,  0(%r10)
-	cmpl		$2, %r12d
-	jl			0f // end
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmaskmovpd	%ymm1, %ymm15, 32(%r10)
-	cmpl		$3, %r12d
-	jl			0f // end
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmaskmovpd	%ymm2, %ymm15, 64(%r10)
-	je			0f // end
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmaskmovpd	%ymm3, %ymm15, 96(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_vs_lib4, .-inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_gen_lib4, @function
-inner_store_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_gen_lib4:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2sd	%r13d, %xmm14, %xmm14
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm12
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm12, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm12, %ymm15
-	vandpd		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm3, %ymm2
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm2, %ymm1
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	vmaskmovpd	%ymm0, %ymm15,  0(%r11)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vmaskmovpd	%ymm1, %ymm15, 32(%r11)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vmaskmovpd	%ymm2, %ymm15, 64(%r11)
-	je			3f // end
-	vmaskmovpd	%ymm3, %ymm15, 96(%r11)
-
-	jmp		3f
-
-0:
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$1, %r10d
-	jg		1f
-
-	// offset==1
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm0, %ymm12, %ymm0
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm1, %ymm12, %ymm1
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm2, %ymm12, %ymm2
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm3, %ymm12, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm15, %ymm12, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC08(%rip), %ymm12
-	vmovupd		.LC05(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC08(%rip), %ymm12
-	vmovupd		LC05(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	cmpl		$2, %r15d
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm0, %ymm13, 0(%rbx)
-	jl			3f // end
-	cmpl		$3, %r15d
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm1, %ymm13, 32(%rbx)
-	jl			3f // end
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm2, %ymm13, 64(%rbx)
-	je			3f // end
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm3, %ymm13, 96(%rbx)
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r10d
-	jg		2f
-
-	// offset==2
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm1
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm2
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC09(%rip), %ymm12
-	vmovupd		.LC06(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC09(%rip), %ymm12
-	vmovupd		LC06(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	cmpl		$2, %r15d
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm0, %ymm13, 0(%rbx)
-	jl			3f // end
-	cmpl		$3, %r15d
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm1, %ymm13, 32(%rbx)
-	jl			3f // end
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm2, %ymm13, 64(%rbx)
-	je			3f // end
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm3, %ymm13, 96(%rbx)
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm1, %ymm1
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm2, %ymm2
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm3, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC10(%rip), %ymm12
-	vmovupd		.LC07(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC10(%rip), %ymm12
-	vmovupd		LC07(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	cmpl		$2, %r15d
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm0, %ymm13, 0(%rbx)
-	jl			3f // end
-	cmpl		$3, %r15d
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm1, %ymm13, 32(%rbx)
-	jl			3f // end
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm2, %ymm13, 64(%rbx)
-	je			3f // end
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm3, %ymm13, 96(%rbx)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_gen_lib4, .-inner_store_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store l generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_gen_lib4, @function
-inner_store_l_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_store_l_4x4_gen_lib4:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2sd	%r13d, %xmm14, %xmm14
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm12
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm12, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm12, %ymm15
-	vandpd		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm3, %ymm2
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm2, %ymm1
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm14
-#endif
-
-	vmaskmovpd	%ymm0, %ymm15,  0(%r11)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x1, %ymm14, %ymm15, %ymm15
-	vmaskmovpd	%ymm1, %ymm15, 32(%r11)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x2, %ymm14, %ymm15, %ymm15
-	vmaskmovpd	%ymm2, %ymm15, 64(%r11)
-	je			3f // end
-	vblendpd	$0x4, %ymm14, %ymm15, %ymm15
-	vmaskmovpd	%ymm3, %ymm15, 96(%r11)
-
-	jmp		3f
-
-0:
-	
-	cmpl	$1, %r10d
-	jg		1f
-
-	// offset==1
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm0, %ymm12, %ymm0
-
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm1, %ymm12, %ymm1
-
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm2, %ymm12, %ymm2
-
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm3, %ymm12, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm15, %ymm12, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC08(%rip), %ymm12
-	vmovupd		.LC05(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC08(%rip), %ymm12
-	vmovupd		LC05(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm14
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x2, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x4, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 1)
-	je			3f // end
-	vblendpd	$0x8, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 1)
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r10d
-	jg		2f
-
-	// offset==2
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm0
-
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm1
-
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm2
-
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC09(%rip), %ymm12
-	vmovupd		.LC06(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC09(%rip), %ymm12
-	vmovupd		LC06(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm14
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x4, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x8, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 1)
-	je			3f // end
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 1)
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm0, %ymm0
-
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm1, %ymm1
-
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm2, %ymm2
-
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm3, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC10(%rip), %ymm12
-	vmovupd		.LC07(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC10(%rip), %ymm12
-	vmovupd		LC07(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm14
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x8, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 1)
-	je			3f // end
-	vblendpd	$0x2, %ymm14, %ymm13, %ymm13
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 1)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_gen_lib4, .-inner_store_l_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-//                               1      2              3          4          5             6          7
-// void kernel_dgemm_nt_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_lib4
-	.type kernel_dgemm_nt_4x4_lib4, @function
-kernel_dgemm_nt_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_lib4
-_kernel_dgemm_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_lib4
-	.def kernel_dgemm_nt_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_lib4, .-kernel_dgemm_nt_4x4_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dgemm_nt_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_vs_lib4
-	.type kernel_dgemm_nt_4x4_vs_lib4, @function
-kernel_dgemm_nt_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_vs_lib4
-_kernel_dgemm_nt_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_vs_lib4
-	.def kernel_dgemm_nt_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_vs_lib4, .-kernel_dgemm_nt_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   1      2              3          4          5             6            7          8        9            10         11       12      13      14      15
-// void kernel_dgemm_nt_4x4_gen_lib4(int k, double *alpha, double *A, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_gen_lib4
-	.type kernel_dgemm_nt_4x4_gen_lib4, @function
-kernel_dgemm_nt_4x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_gen_lib4
-_kernel_dgemm_nt_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_gen_lib4
-	.def kernel_dgemm_nt_4x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_gen_lib4, .-kernel_dgemm_nt_4x4_gen_lib4
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx          r8         r9       rsp+8         rsp+16     rsp+24
-// void kernel_dgemm_nn_4x4_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_4x4_lib4
-	.type kernel_dgemm_nn_4x4_lib4, @function
-kernel_dgemm_nn_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_4x4_lib4
-_kernel_dgemm_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_4x4_lib4
-	.def kernel_dgemm_nn_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_4x4_lib4, .-kernel_dgemm_nn_4x4_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx       r8         r9       rsp+8         rsp+16    rsp+24     rsp+32    rsp+40   rsp+48     rsp+56   rsp+64  rsp+72  rsp+80  rsp+88
-// void kernel_dgemm_nn_4x4_gen_lib4(int k, double *alpha, double *A, int offB, double *B, int sdb, double *beta, int offC, double *C, int sdc, int offD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_4x4_gen_lib4
-	.type kernel_dgemm_nn_4x4_gen_lib4, @function
-kernel_dgemm_nn_4x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_4x4_gen_lib4
-_kernel_dgemm_nn_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_4x4_gen_lib4
-	.def kernel_dgemm_nn_4x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_4x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // offsetC
-	movq	ARG9, %r13 // C
-	movq	ARG10, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG11, %r10 // offsetD
-	movq	ARG12, %r11 // D
-	movq	ARG13, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG14, %r13 // m0
-	movq	ARG15, %r14 // m1
-	movq	ARG16, %r15 // n0
-	movq	ARG17, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_4x4_gen_lib4, .-kernel_dgemm_nn_4x4_gen_lib4
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dsyrk_nt_l_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_lib4
-	.type kernel_dsyrk_nt_l_4x4_lib4, @function
-kernel_dsyrk_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_lib4
-_kernel_dsyrk_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_4x4_lib4
-	.def kernel_dsyrk_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call	inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq	_inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_lib4, .-kernel_dsyrk_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dsyrk_nt_l_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_vs_lib4
-	.type kernel_dsyrk_nt_l_4x4_vs_lib4, @function
-kernel_dsyrk_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_vs_lib4
-_kernel_dsyrk_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_4x4_vs_lib4
-	.def kernel_dsyrk_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call	inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq	_inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_vs_lib4, .-kernel_dsyrk_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                     rdi    rsi            rdx        rcx        r8            r9           rsp+8      rsp+16   rsp+24       rsp+32     rsp+40   rsp+48  rsp+56  rsp+64  rsp+72
-// void kernel_dsyrk_nt_l_4x4_gen_lib4(int k, double *alpha, double *A, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_gen_lib4
-	.type kernel_dsyrk_nt_l_4x4_gen_lib4, @function
-kernel_dsyrk_nt_l_4x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_gen_lib4
-_kernel_dsyrk_nt_l_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_4x4_gen_lib4
-	.def kernel_dsyrk_nt_l_4x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_4x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_gen_lib4, .-kernel_dsyrk_nt_l_4x4_gen_lib4
-#endif
-
-
-
-
-
-//                                  1      2              3          4            5          6        7
-// void kernel_dtrmm_nn_rl_4x4_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_4x4_lib4
-	.type kernel_dtrmm_nn_rl_4x4_lib4, @function
-kernel_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_4x4_lib4
-_kernel_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_4x4_lib4
-	.def kernel_dtrmm_nn_rl_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_4x4_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_4x4_lib4, .-kernel_dtrmm_nn_rl_4x4_lib4
-#endif
-
-
-
-
-
-//                                      rdi    rsi            rdx        rcx          r8         r9       rsp+8        rsp+16     rsp+24   rsp+32  rsp+40  rsp+48  rsp+56
-// void kernel_dtrmm_nn_rl_4x4_gen_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_4x4_gen_lib4
-	.type kernel_dtrmm_nn_rl_4x4_gen_lib4, @function
-kernel_dtrmm_nn_rl_4x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_4x4_gen_lib4
-_kernel_dtrmm_nn_rl_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_4x4_gen_lib4
-	.def kernel_dtrmm_nn_rl_4x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_4x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_4x4_gen_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // offsetD
-	movq	ARG8, %r11 // D
-	movq	ARG9, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG10, %r13 // m0
-	movq	ARG11, %r14 // m1
-	movq	ARG12, %r15 // n0
-	movq	ARG13, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_4x4_gen_lib4, .-kernel_dtrmm_nn_rl_4x4_gen_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dtrmm_nt_ru_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_4x4_lib4
-	.type kernel_dtrmm_nt_ru_4x4_lib4, @function
-kernel_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_4x4_lib4
-_kernel_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_4x4_lib4
-	.def kernel_dtrmm_nt_ru_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG3, %r10
-	movq	ARG4, %r11
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_4x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_4x4_lib4, .-kernel_dtrmm_nt_ru_4x4_lib4
-#endif
-
-
-
-
-
-//                                     rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dtrmm_nt_ru_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_4x4_vs_lib4
-	.type kernel_dtrmm_nt_ru_4x4_vs_lib4, @function
-kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_4x4_vs_lib4
-_kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_4x4_vs_lib4
-	.def kernel_dtrmm_nt_ru_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-#endif
-
-
-	// call inner loader nn
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_4x4_vs_lib4, .-kernel_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  edi    rsi        rdx        rcx        r8         r9
-// void kernel_dpotrf_nt_l_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_4x4_lib4
-	.type kernel_dpotrf_nt_l_4x4_lib4, @function
-kernel_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_4x4_lib4
-_kernel_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_4x4_lib4
-	.def kernel_dpotrf_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_4x4_lib4, .-kernel_dpotrf_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                     edi    rsi        rdx        rcx        r8         r9                  rsp+8   rsp+16
-// void kernel_dpotrf_nt_l_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_4x4_vs_lib4
-	.type kernel_dpotrf_nt_l_4x4_vs_lib4, @function
-kernel_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_4x4_vs_lib4
-_kernel_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_4x4_vs_lib4
-	.def kernel_dpotrf_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movq	ARG8, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG7, %r11 // km 
-	movq	ARG8, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_4x4_vs_lib4, .-kernel_dpotrf_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                        edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24
-// void kernel_dsyrk_dpotrf_nt_l_4x4_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_4x4_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-_kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_4x4_lib4, .-kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                           edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24             rsp+32   rsp+40
-// void kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-_kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4, .-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx        r8         r9         rsp+8     
-// void kernel_dtrsm_nt_rl_inv_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x4_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x4_lib4
-_kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x4_lib4, .-kernel_dtrsm_nt_rl_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                            edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24     rsp+32
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10   // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx        r8         r9         rsp+8               rsp+16  rsp+24  
-// void kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-_kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn // TODO scale gen
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x4_vs_lib4, .-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                               edi     rsi         rdx         ecx     r8          r9          rsp+8    rsp+16     rsp+24     rsp+32                rsp+40 rsp+48
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10  // C 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D 
-	movq	ARG11, %r11 // km 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx        r8         r9
-// void kernel_dtrsm_nt_rl_one_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_one_4x4_lib4
-	.type kernel_dtrsm_nt_rl_one_4x4_lib4, @function
-kernel_dtrsm_nt_rl_one_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_one_4x4_lib4
-_kernel_dtrsm_nt_rl_one_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_one_4x4_lib4
-	.def kernel_dtrsm_nt_rl_one_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_one_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_ONE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_one_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_one_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_one_4x4_lib4, .-kernel_dtrsm_nt_rl_one_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx        r8         r9         rsp+8   rsp+16
-// void kernel_dtrsm_nt_rl_one_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_one_4x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_one_4x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_one_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_one_4x4_vs_lib4
-_kernel_dtrsm_nt_rl_one_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_one_4x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_one_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_one_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG8, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_ONE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_one_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_one_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG7, %r11 // km 
-	movq	ARG8, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_one_4x4_vs_lib4, .-kernel_dtrsm_nt_rl_one_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx        r8         r9         rsp+8
-// void kernel_dtrsm_nt_ru_inv_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_ru_inv_4x4_lib4
-	.type kernel_dtrsm_nt_ru_inv_4x4_lib4, @function
-kernel_dtrsm_nt_ru_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_ru_inv_4x4_lib4
-_kernel_dtrsm_nt_ru_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_ru_inv_4x4_lib4
-	.def kernel_dtrsm_nt_ru_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_ru_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11 // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rut_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rut_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_ru_inv_4x4_lib4, .-kernel_dtrsm_nt_ru_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx        r8         r9         rsp+8                rsp+16  rsp+24
-// void kernel_dtrsm_nt_ru_inv_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, double  *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_ru_inv_4x4_vs_lib4
-	.type kernel_dtrsm_nt_ru_inv_4x4_vs_lib4, @function
-kernel_dtrsm_nt_ru_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_ru_inv_4x4_vs_lib4
-_kernel_dtrsm_nt_ru_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_ru_inv_4x4_vs_lib4
-	.def kernel_dtrsm_nt_ru_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_ru_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11 // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUT_INV_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rut_inv_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rut_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_ru_inv_4x4_vs_lib4, .-kernel_dtrsm_nt_ru_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx      r8         r9         rsp+8      rsp+16
-// void kernel_dtrsm_nn_ru_inv_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ru_inv_4x4_lib4
-	.type kernel_dtrsm_nn_ru_inv_4x4_lib4, @function
-kernel_dtrsm_nn_ru_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ru_inv_4x4_lib4
-_kernel_dtrsm_nn_ru_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ru_inv_4x4_lib4
-	.def kernel_dtrsm_nn_ru_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ru_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUN_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_run_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_run_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ru_inv_4x4_lib4, .-kernel_dtrsm_nn_ru_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx      r8         r9         rsp+8      rsp+16              rsp+24  rsp+32
-// void kernel_dtrsm_nn_ru_inv_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ru_inv_4x4_vs_lib4
-	.type kernel_dtrsm_nn_ru_inv_4x4_vs_lib4, @function
-kernel_dtrsm_nn_ru_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ru_inv_4x4_vs_lib4
-_kernel_dtrsm_nn_ru_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ru_inv_4x4_vs_lib4
-	.def kernel_dtrsm_nn_ru_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ru_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUN_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_run_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_run_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-	movq	ARG9, %r11  // km 
-	movq	ARG10, %r12  // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ru_inv_4x4_vs_lib4, .-kernel_dtrsm_nn_ru_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx      r8         r9         rsp+8
-// void kernel_dtrsm_nn_ll_one_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ll_one_4x4_lib4
-	.type kernel_dtrsm_nn_ll_one_4x4_lib4, @function
-kernel_dtrsm_nn_ll_one_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ll_one_4x4_lib4
-_kernel_dtrsm_nn_ll_one_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ll_one_4x4_lib4
-	.def kernel_dtrsm_nn_ll_one_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ll_one_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LLN_ONE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lln_one_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lln_one_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ll_one_4x4_lib4, .-kernel_dtrsm_nn_ll_one_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx      r8         r9         rsp+8      rsp+16  rsp+24
-// void kernel_dtrsm_nn_ll_one_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ll_one_4x4_vs_lib4
-	.type kernel_dtrsm_nn_ll_one_4x4_vs_lib4, @function
-kernel_dtrsm_nn_ll_one_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ll_one_4x4_vs_lib4
-_kernel_dtrsm_nn_ll_one_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ll_one_4x4_vs_lib4
-	.def kernel_dtrsm_nn_ll_one_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ll_one_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LLN_ONE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lln_one_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lln_one_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-	movq	ARG8, %r11  // km 
-	movq	ARG9, %r12  // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ll_one_4x4_vs_lib4, .-kernel_dtrsm_nn_ll_one_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx      r8         r9         rsp+8      rsp+16
-// void kernel_dtrsm_nn_lu_inv_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_lu_inv_4x4_lib4
-	.type kernel_dtrsm_nn_lu_inv_4x4_lib4, @function
-kernel_dtrsm_nn_lu_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_lu_inv_4x4_lib4
-_kernel_dtrsm_nn_lu_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_lu_inv_4x4_lib4
-	.def kernel_dtrsm_nn_lu_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_lu_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LUN_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lun_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lun_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_lu_inv_4x4_lib4, .-kernel_dtrsm_nn_lu_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx      r8         r9         rsp+8      rsp+16              rsp+24  rsp+32
-// void kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_lu_inv_4x4_vs_lib4
-	.type kernel_dtrsm_nn_lu_inv_4x4_vs_lib4, @function
-kernel_dtrsm_nn_lu_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_lu_inv_4x4_vs_lib4
-_kernel_dtrsm_nn_lu_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_lu_inv_4x4_vs_lib4
-	.def kernel_dtrsm_nn_lu_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_lu_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // inv_diag_E 
-	movq	ARG9, %r12  // km 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LUN_INV_4X4_VS_LIB4 // TODO
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lun_inv_4x4_vs_lib4 // TODO
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lun_inv_4x4_vs_lib4 // TODO
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-	movq	ARG9, %r11  // km 
-	movq	ARG10, %r12  // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_lu_inv_4x4_vs_lib4, .-kernel_dtrsm_nn_lu_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                edi    rsi        rdx        rcx      r8         r9         rsp+8
-// void kernel_dgetrf_nn_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_4x4_lib4
-	.type kernel_dgetrf_nn_4x4_lib4, @function
-kernel_dgetrf_nn_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_4x4_lib4
-_kernel_dgetrf_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_4x4_lib4
-	.def kernel_dgetrf_nn_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG7, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_4x4_lib4, .-kernel_dgetrf_nn_4x4_lib4
-#endif
-
-
-
-
-
-//                                   edi    rsi        rdx        rcx      r8         r9         rsp+8               rsp+16  rsp+24
-// void kernel_dgetrf_nn_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_4x4_vs_lib4
-	.type kernel_dgetrf_nn_4x4_vs_lib4, @function
-kernel_dgetrf_nn_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_4x4_vs_lib4
-_kernel_dgetrf_nn_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_4x4_vs_lib4
-	.def kernel_dgetrf_nn_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG7, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-	movq	ARG8, %r11  // km 
-	movq	ARG9, %r12  // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_4x4_vs_lib4, .-kernel_dgetrf_nn_4x4_vs_lib4
-#endif
-
-
-
-
-
-#if 0
-//                                   rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dlauum_nt_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlauum_nt_4x4_lib4
-	.type kernel_dlauum_nt_4x4_lib4, @function
-kernel_dlauum_nt_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlauum_nt_4x4_lib4
-_kernel_dlauum_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlauum_nt_4x4_lib4
-	.def kernel_dlauum_nt_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dlauum_nt_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DLAUUM_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dlauum_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dlauum_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner loader nn
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlauum_nt_4x4_vs_lib4, .-kernel_dlauum_nt_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx        r8            r9         rsp+8      rsp+16  rsp+24
-// void kernel_dlauum_nt_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlauum_nt_4x4_vs_lib4
-	.type kernel_dlauum_nt_4x4_vs_lib4, @function
-kernel_dlauum_nt_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlauum_nt_4x4_vs_lib4
-_kernel_dlauum_nt_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlauum_nt_4x4_vs_lib4
-	.def kernel_dlauum_nt_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dlauum_nt_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DLAUUM_NT_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dlauum_nt_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dlauum_nt_4x4_vs_lib4
-#endif
-#endif
-
-
-	// call inner loader nn
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlauum_nt_4x4_vs_lib4, .-kernel_dlauum_nt_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-//                             1         2           3           4
-// void kernel_dlarfb4_r_4_lib4(int kmax, double *pV, double *pT, double *pD);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlarfb4_r_4_lib4
-	.type kernel_dlarfb4_r_4_lib4, @function
-kernel_dlarfb4_r_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlarfb4_r_4_lib4
-_kernel_dlarfb4_r_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlarfb4_r_4_lib4
-	.def kernel_dlarfb4_r_4_lib4; .scl 2; .type 32; .endef
-kernel_dlarfb4_r_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // D
-	movq	ARG2, %r12 // V
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // D
-	movq	ARG2, %r12 // V
-
-	//
-	vmovapd			0(%r11), %ymm12
-	vaddpd			%ymm12, %ymm0, %ymm0
-	//
-	vmovapd			32(%r11), %ymm12
-	vaddpd			%ymm12, %ymm1, %ymm1
-	vbroadcastsd	32(%r12), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	//
-	vmovapd			64(%r11), %ymm12
-	vaddpd			%ymm12, %ymm2, %ymm2
-	vbroadcastsd	64(%r12), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	72(%r12), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	//
-	vmovapd			96(%r11), %ymm12
-	vaddpd			%ymm12, %ymm3, %ymm3
-	vbroadcastsd	96(%r12), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	104(%r12), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	112(%r12), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-
-	movq	ARG3, %r10 // T
-
-	//
-	vbroadcastsd	120(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	//
-	vbroadcastsd	112(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vbroadcastsd	80(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	//
-	vbroadcastsd	104(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vbroadcastsd	72(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	40(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	//
-	vbroadcastsd	96(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vbroadcastsd	64(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vbroadcastsd	32(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vbroadcastsd	0(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // V
-	movq	ARG4, %r12 // D
-
-	//
-	vmovapd			0(%r12), %ymm12
-	vaddpd			%ymm12, %ymm0, %ymm12
-	vmovapd			%ymm12, 0(%r12)
-	//
-	vmovapd			32(%r12), %ymm12
-	vbroadcastsd	32(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vaddpd			%ymm12, %ymm1, %ymm12
-	vmovapd			%ymm12, 32(%r12)
-	//
-	vmovapd			64(%r12), %ymm12
-	vbroadcastsd	64(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	72(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vaddpd			%ymm12, %ymm2, %ymm12
-	vmovapd			%ymm12, 64(%r12)
-	//
-	vmovapd			96(%r12), %ymm12
-	vbroadcastsd	96(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	104(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vbroadcastsd	112(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vaddpd			%ymm12, %ymm3, %ymm12
-	vmovapd			%ymm12, 96(%r12)
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEBP_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgebp_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgebp_add_nn_4x4_lib4
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlarfb4_r_4_lib4, .-kernel_dlarfb4_r_4_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { -1 -1 -1 1 }
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { -1 -1 -1 -1 }
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 3.5 2.5 1.5 0.5 }
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { 7.5 6.5 5.5 4.5 }
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC04: // { 1.0 1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC05: // { 1.0 1.0 1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC05: // { 1.0 1.0 1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC06: // { 1.0 1.0 -1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC06: // { 1.0 1.0 -1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC07: // { 1.0 -1.0 -1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC07: // { 1.0 -1.0 -1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC08: // { -1.0 -1.0 -1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC08: // { -1.0 -1.0 -1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC09: // { -1.0 -1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC09: // { -1.0 -1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC10: // { -1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC10: // { -1.0 1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_dgemm_8x4_lib4.S b/third_party/blasfeo/kernel/avx/kernel_dgemm_8x4_lib4.S
deleted file mode 100644
index e9f1f34..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_dgemm_8x4_lib4.S
+++ /dev/null
@@ -1,13154 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nt_8x4_lib4, @function
-inner_kernel_dgemm_add_nt_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nt_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nt_8x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nt_8x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-//	movq	%r11, %r15 // A1 <- A0
-//	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(double)
-
-	// prefetch
-	vmovapd 0(%r11), %ymm8 // A0[0]
-//	vmovapd 0(%r15), %ymm9 // A1[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vmovapd 0(%r13), %ymm12 // B[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmovapd 32(%r13), %ymm13 // B[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-
-//	vmovapd 32(%r15), %ymm11 // A1[4]
-	vmovapd 32(%r11, %r12, 1), %ymm11 // A1[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	subl	$4, %r10d
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-	// unroll 1
-	vmovapd 64(%r13), %ymm12 // B[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-
-//	vmovapd 64(%r15), %ymm9 // A1[8]
-	vmovapd 64(%r11, %r12, 1), %ymm9 // A1[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-	// unroll 2
-	vmovapd 96(%r13), %ymm13 // B[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-
-//	vmovapd 96(%r15), %ymm11 // A1[12]
-	vmovapd 96(%r11, %r12, 1), %ymm11 // A1[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r13
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	addq	$128, %r11
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	addq	$128, %r15
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-
-	// unroll 3
-	vmovapd 0(%r13), %ymm12 // B[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-
-//	vmovapd 0(%r15), %ymm9 // A1[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vmovapd 32(%r13), %ymm13 // B[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-
-//	vmovapd 32(%r15), %ymm11 // A1[4]
-	vmovapd 32(%r11, %r12, 1), %ymm11 // A1[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	subl	$4, %r10d
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-	// unroll 1
-	vmovapd 64(%r13), %ymm12 // B[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-
-//	vmovapd 64(%r15), %ymm9 // A1[8]
-	vmovapd 64(%r11, %r12, 1), %ymm9 // A1[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-	// unroll 2
-	vmovapd 96(%r13), %ymm13 // B[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-
-//	vmovapd 96(%r15), %ymm11 // A1[12]
-	vmovapd 96(%r11, %r12, 1), %ymm11 // A1[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r13
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-	addq	$128, %r11
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-//	addq	$128, %r15
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-
-	// unroll 3
-//	vmovapd 0(%r13), %ymm12 // B[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-//	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-
-//	vmovapd 0(%r15), %ymm9 // A1[0]
-//	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-
-//	cmpl	$3, %r10d
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd 0(%r13), %ymm12 // B[0]
-	vmovapd 0(%r11), %ymm8 // A0[0]
-//	vmovapd 0(%r15), %ymm9 // A1[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-	addq	$32, %r11
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	addq	$32, %r13
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-//	addq	$32, %r15
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	vshufpd $0x5, %ymm14, %ymm14, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	subl	$1, %r10d
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nt_8x4_lib4, .-inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nt_8x4_lib4, @function
-inner_kernel_dgemm_sub_nt_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nt_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nt_8x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nt_8x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vmovapd 0(%r13), %ymm12 // B[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmovapd 32(%r13), %ymm13 // B[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm4, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm5, %ymm5
-
-	vmovapd 32(%r11, %r12, 1), %ymm11 // A1[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm7, %ymm7
-
-	subl	$4, %r10d
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm6, %ymm6
-
-	// unroll 1
-	vmovapd 64(%r13), %ymm12 // B[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm4, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm5, %ymm5
-
-	vmovapd 64(%r11, %r12, 1), %ymm9 // A1[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm7, %ymm7
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm6, %ymm6
-
-	// unroll 2
-	vmovapd 96(%r13), %ymm13 // B[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm4, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm5, %ymm5
-
-	vmovapd 96(%r11, %r12, 1), %ymm11 // A1[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r13
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm7, %ymm7
-	addq	$128, %r11
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm6, %ymm6
-
-
-	// unroll 3
-	vmovapd 0(%r13), %ymm12 // B[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm4, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm5, %ymm5
-	cmpl	$4, %r10d
-
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm7, %ymm7
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm6, %ymm6
-
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vmovapd 32(%r13), %ymm13 // B[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm4, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm5, %ymm5
-
-	vmovapd 32(%r11, %r12, 1), %ymm11 // A1[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm7, %ymm7
-
-	subl	$4, %r10d
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm6, %ymm6
-
-	// unroll 1
-	vmovapd 64(%r13), %ymm12 // B[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm4, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm5, %ymm5
-
-	vmovapd 64(%r11, %r12, 1), %ymm9 // A1[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm7, %ymm7
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm6, %ymm6
-
-	// unroll 2
-	vmovapd 96(%r13), %ymm13 // B[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm4, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm5, %ymm5
-
-	vmovapd 96(%r11, %r12, 1), %ymm11 // A1[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r13
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm7, %ymm7
-	addq	$128, %r11
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm6, %ymm6
-
-
-	// unroll 3
-//	vmovapd 0(%r13), %ymm12 // B[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm4, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-//	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm5, %ymm5
-//	cmpl	$3, %r10d
-
-//	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vmulpd	%ymm11, %ymm13, %ymm15
-	vsubpd	%ymm15, %ymm7, %ymm7
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-	vmulpd	%ymm11, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm6, %ymm6
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd 0(%r13), %ymm12 // B[0]
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm0, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm9, %ymm12, %ymm15
-	vsubpd	%ymm15, %ymm4, %ymm4
-	addq	$32, %r11
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm1, %ymm1
-	addq	$32, %r13
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm5, %ymm5
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm3, %ymm3
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm7, %ymm7
-
-	vshufpd $0x5, %ymm14, %ymm14, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm2, %ymm2
-	subl	$1, %r10d
-	vmulpd	%ymm9, %ymm14, %ymm15
-	vsubpd	%ymm15, %ymm6, %ymm6
-
-	cmpl	$0, %r10d
-
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nt_8x4_lib4, .-inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- k
-// r11   <- A+4*sda*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nn_8x4_lib4, @function
-inner_kernel_dgemm_add_nn_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nn_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nn_8x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nn_8x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r13, %r14, 2) // software prefetch
-	prefetcht0	64(%r13, %r14, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	72(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	104(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	48(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	112(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	56(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	88(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	addq	%r14, %r13
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	72(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	104(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	48(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	112(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-//	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	56(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-//	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	88(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	addq	%r14, %r13
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd			0(%r11), %ymm8 // A0[0]
-	vmovapd 		0(%r11, %r12, 1), %ymm9 // A1[0]
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	addq	$32, %r11
-
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	subl	$1, %r10d
-
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-	addq	$8, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nn_8x4_lib4, .-inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- k
-// r11   <- A+4*sda*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nn_8x4_lib4, @function
-inner_kernel_dgemm_sub_nn_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nn_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nn_8x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nn_8x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r13, %r14, 2) // software prefetch
-	prefetcht0	64(%r13, %r14, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	72(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	104(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	48(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	112(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	56(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	88(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	addq	%r14, %r13
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	72(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	104(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	48(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	112(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-//	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	56(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-//	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	88(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	addq	%r14, %r13
-	vmulpd			%ymm11, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd			0(%r11), %ymm8 // A0[0]
-	vmovapd 		0(%r11, %r12, 1), %ymm9 // A1[0]
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	addq	$32, %r11
-
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	subl	$1, %r10d
-
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	addq	$8, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nn_8x4_lib4, .-inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NN_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nn_4x8_lib4, @function
-inner_kernel_dgemm_add_nn_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nn_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nn_4x8_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nn_4x8_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 		0(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r12, %r13, 2) // software prefetch
-	prefetcht0	64(%r12, %r13, 2) // software prefetch
-	prefetcht0	128(%r12, %r13, 2) // software prefetch
-	prefetcht0	192(%r12, %r13, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vbroadcastsd	136(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	168(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	200(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	232(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vbroadcastsd	144(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	176(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	208(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	240(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vbroadcastsd	152(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	184(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	216(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	248(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-	addq	%r13, %r12
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vbroadcastsd	136(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	168(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	200(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	232(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vbroadcastsd	144(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	176(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	208(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	240(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vbroadcastsd	152(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	184(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	216(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	248(%r12), %ymm12 // B
-	vmulpd			%ymm14, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-	addq	%r13, %r12
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-
-	addq	$32, %r11
-	addq	$8, %r12
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nn_4x8_lib4, .-inner_kernel_dgemm_add_nn_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- B
-// r12   <- C
-// r13   <- 32*sdc
-// ymm0  <- [a00 a10 a20 a30]
-// ymm1  <- [a01 a11 a21 a31]
-// ymm2  <- [a02 a12 a22 a32]
-// ymm3  <- [a03 a13 a23 a33]
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- ?
-// r12   <- ?
-// r13   <- 32*sdc
-// ymm0  <- [a00 a10 a20 a30]
-// ymm1  <- [a01 a11 a21 a31]
-// ymm2  <- [a02 a12 a22 a32]
-// ymm3  <- [a03 a13 a23 a33]
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEBP_ADD_NN_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgebp_add_nn_8x4_lib4, @function
-inner_kernel_dgebp_add_nn_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgebp_add_nn_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgebp_add_nn_8x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgebp_add_nn_8x4_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r12), %ymm12
-	vmovapd			0(%r12, %r13, 1), %ymm14
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	8(%r11), %ymm13
-	subl	$4, %r10d
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm5, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm6, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm7, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vmovapd			%ymm12, 0(%r12)
-	vmovapd			%ymm14, 0(%r12, %r13, 1)
-
-	vmovapd			32(%r12), %ymm12
-	vmovapd			32(%r12, %r13, 1), %ymm14
-	vbroadcastsd	32(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	40(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm5, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	48(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm6, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	56(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm7, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vmovapd			%ymm12, 32(%r12)
-	vmovapd			%ymm14, 32(%r12, %r13, 1)
-
-	vmovapd			64(%r12), %ymm12
-	vmovapd			64(%r12, %r13, 1), %ymm14
-	vbroadcastsd	64(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	72(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm5, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	80(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm6, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	88(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm7, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vmovapd			%ymm12, 64(%r12)
-	vmovapd			%ymm14, 64(%r12, %r13, 1)
-
-	vmovapd			96(%r12), %ymm12
-	vmovapd			96(%r12, %r13, 1), %ymm14
-	vbroadcastsd	96(%r11), %ymm13
-	addq	$128, %r11
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	-24(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm5, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	-16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm6, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	-8(%r11), %ymm13
-	addq	$128, %r12
-	vmulpd			%ymm3, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm7, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vmovapd			%ymm12, -32(%r12)
-	vmovapd			%ymm14, -32(%r12, %r13, 1)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r12), %ymm12
-	vmovapd			0(%r12, %r13, 1), %ymm14
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm5, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm6, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm7, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vmovapd			%ymm12, 0(%r12)
-	vmovapd			%ymm14, 0(%r12, %r13, 1)
-
-	addq	$32, %r11
-	addq	$32, %r12
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgebp_add_nn_8x4_lib4, .-inner_kernel_dgebp_add_nn_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGEMM_ADD_NN_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemm_add_nn_8x4_lib4, @function
-inner_edge_dgemm_add_nn_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemm_add_nn_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgemm_add_nn_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgemm_add_nn_8x4_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r15d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$4, %ebx
-	subl			%r15d, %ebx // 4-offsetB
-	cmpl			%r10d, %ebx
-//	jle				0f
-//	movl			%r10d, %ebx // kend=min(k,4-offsetB)
-//0:
-	cmovgl			%r10d, %ebx // kend=min(k,4-offsetB)
-
-	movl			%r15d, %eax
-	sall			$3, %eax // offsetB*sizeof(double)
-	addq			%rax, %r13 // B+offsetB*sizeof(double)
-
-1:
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	subl			$1, %r10d // k-1
-	subl			$1, %ebx // kend-1
-	addq			$32, %r11 // A0+1*bs*sizeof(float)
-	addq			$8, %r13 // B+1*sizeof(float)
-
-	cmpl			$0, %ebx
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r14, %r13
-	subq			$32, %r13 // B+bs*(sdb-1)*sizeof(double)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemm_add_nn_8x4_lib4, .-inner_edge_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGEMM_ADD_NN_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemm_add_nn_4x8_lib4, @function
-inner_edge_dgemm_add_nn_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemm_add_nn_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgemm_add_nn_4x8_lib4; .scl 2; .type 32; .endef
-inner_edge_dgemm_add_nn_4x8_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r14d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$4, %r15d
-	subl			%r14d, %r15d // 4-offsetB
-	cmpl			%r10d, %r15d
-//	jle				0f
-//	movl			%r10d, %r15d // kend=min(k,4-offsetB)
-//0:
-	cmovgl			%r10d, %r15d // kend=min(k,4-offsetB)
-
-	movl			%r14d, %eax
-	sall			$3, %eax // offsetB*sizeof(double)
-	addq			%rax, %r12 // B+offsetB*sizeof(double)
-
-1:
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vmulpd			%ymm12, %ymm13, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vmulpd			%ymm12, %ymm13, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vmulpd			%ymm12, %ymm13, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vmulpd			%ymm12, %ymm13, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-
-	subl			$1, %r10d // k-1
-	subl			$1, %r15d // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$8, %r12 // B+1*sizeof(float)
-
-	cmpl			$0, %r15d
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r13, %r12
-	subq			$32, %r12 // B+bs*(sdb-1)*sizeof(double)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemm_add_nn_4x8_lib4, .-inner_edge_dgemm_add_nn_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10   <- A
-// r11   <- 4*sda*sizeof(double)
-// r12   <- B
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- A+4*4*sizeof(double)
-// r11   <- 4*sda*sizeof(double)
-// r12   <- B+4*4*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_8x4_lib4, @function
-inner_edge_dtrmm_nt_ru_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_8x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r12), %ymm12
-	vmovapd			0(%r10), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			0(%r10, %r11, 1), %ymm9
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-
-	vbroadcastsd	32(%r12), %ymm12
-	vmovapd			32(%r10), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			32(%r10, %r11, 1), %ymm9
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	40(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-
-	vbroadcastsd	64(%r12), %ymm12
-	vmovapd			64(%r10), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			64(%r10, %r11, 1), %ymm9
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	72(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	80(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-
-	vbroadcastsd	96(%r12), %ymm12
-	vmovapd			96(%r10), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			96(%r10, %r11, 1), %ymm9
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	104(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	112(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	120(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-
-	addq			$128, %r10
-	addq			$128, %r12
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_8x4_lib4, .-inner_edge_dtrmm_nt_ru_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- max(k-4,0)
-// r11   <- A+4*4*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*4*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_8x4_vs_lib4, @function
-inner_edge_dtrmm_nt_ru_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_8x4_vs_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	addq			$32, %r11
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	addq			$32, %r13
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	addq			$32, %r11
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	addq			$32, %r13
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	addq			$32, %r11
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	addq			$32, %r13
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm4, %ymm15, %ymm4
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm5, %ymm15, %ymm5
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	addq			$32, %r11
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm6, %ymm15, %ymm6
-	vbroadcastsd	24(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	addq			$32, %r13
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm7, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_8x4_vs_lib4, .-inner_edge_dtrmm_nt_ru_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A0
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_8x4_lib4, @function
-inner_edge_dtrmm_nn_rl_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_8x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r15d
-	jg		0f
-
-	// offB==0
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r12, 1), %ymm9
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	40(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r12, 1), %ymm9
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	48(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	80(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r12, 1), %ymm9
-	vbroadcastsd	24(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	56(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	88(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	120(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A0+4*bs*sizeof(double)
-	addq			%r14, %r13 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-0:
-	cmpl	$1, %r15d
-	jg		1f
-
-	// offB==1
-
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r12, 1), %ymm9
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	40(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r12, 1), %ymm9
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	48(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	80(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	subl			$3, %r10d // k-3
-	addq			$96, %r11 // A0+3*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$8, %r13 // B+bs*sdb*sizeof(double)-1
-
-	jmp		3f
-
-1:
-	cmpl	$2, %r15d
-	jg		2f
-
-	// offB==2
-
-	addq			$16, %r13 // B+2*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r12, 1), %ymm9
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	40(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-
-	subl			$2, %r10d // k-2
-	addq			$64, %r11 // A0+2*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$16, %r13 // B+bs*sdb*sizeof(double)-2
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r12, 1), %ymm9
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	40(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	72(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	104(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r12, 1), %ymm9
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	48(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	80(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	112(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r12, 1), %ymm9
-	vbroadcastsd	24(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	56(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	88(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	120(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A0+4*bs*sizeof(double)
-	addq			%r14, %r13 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-2:
-	// offB==3
-
-	addq			$24, %r13 // B+3*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-3
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r12, 1), %ymm9
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	40(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	72(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r12, 1), %ymm9
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	48(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	80(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	112(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r12, 1), %ymm9
-	vbroadcastsd	24(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	56(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	88(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	120(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A0+4*bs*sizeof(double)
-	addq			%r14, %r13 // B+bs*sdb*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_8x4_lib4, .-inner_edge_dtrmm_nn_rl_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A0
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_8X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_8x4_gen_lib4, @function
-inner_edge_dtrmm_nn_rl_8x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_8x4_gen_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_8x4_gen_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	cmpl			$0, %r15d
-	jg				0f // offB>0
-
-	// offB==0
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f // end
-
-0:
-	cmpl			$1, %r15d
-	jg				1f // offB>1
-
-	// offB==1
-
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f // end
-
-1:
-	cmpl			$2, %r15d
-	jg				2f // offB>2
-
-	// offB==2
-
-	addq			$16, %r13 // B+2*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-
-	subl			$1, %r10d // k-2
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f
-
-2:
-	// offB==3
-
-	addq			$24, %r13 // B+3*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-
-	subl			$1, %r10d // k-4
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_8x4_gen_lib4, .-inner_edge_dtrmm_nn_rl_8x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend
-//
-// input arguments:
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_8x4_lib4, @function
-inner_blend_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_8x4_lib4; .scl 2; .type 32; .endef
-inner_blend_8x4_lib4:
-#endif
-#endif
-	
-
-	// tc==n
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm8
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm9
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm10
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm4
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm6
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm5
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_8x4_lib4, .-inner_blend_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_8x4_lib4, @function
-inner_scale_11_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_11_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_8x4_lib4; .scl 2; .type 32; .endef
-inner_scale_11_8x4_lib4:
-#endif
-#endif
-	
-
-	vmovapd		0(%r10), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-	vmovapd		0(%r10, %r11, 1), %ymm15
-	vaddpd		%ymm4, %ymm15, %ymm4
-	vmovapd		32(%r10, %r11, 1), %ymm15
-	vaddpd		%ymm5, %ymm15, %ymm5
-	vmovapd		64(%r10, %r11, 1), %ymm15
-	vaddpd		%ymm6, %ymm15, %ymm6
-	vmovapd		96(%r10, %r11, 1), %ymm15
-	vaddpd		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_8x4_lib4, .-inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0
-//
-// input arguments:
-// r10   <- alpha
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_8x4_lib4, @function
-inner_scale_a0_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_a0_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_8x4_lib4; .scl 2; .type 32; .endef
-inner_scale_a0_8x4_lib4:
-#endif
-#endif
-	
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_8x4_lib4, .-inner_scale_a0_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x4_lib4, @function
-inner_scale_ab_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x4_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_8x4_lib4:
-#endif
-#endif
-	
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	// alg==1
-	vmovapd		0(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-	vmovapd		0(%r12, %r13, 1), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm4, %ymm15, %ymm4
-	vmovapd		32(%r12, %r13, 1), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm5, %ymm15, %ymm5
-	vmovapd		64(%r12, %r13, 1), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm6, %ymm15, %ymm6
-	vmovapd		96(%r12, %r13, 1), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm7, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x4_lib4, .-inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- offset
-// r13   <- C
-// r14   <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- offset
-// r13   <- C
-// r14   <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x4_gen_lib4, @function
-inner_scale_ab_8x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x4_gen_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_8x4_gen_lib4:
-#endif
-#endif
-	
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-
-	vxorpd		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovapd		0(%r13), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-	vmovapd		32(%r13), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm1, %ymm14, %ymm1
-	vmovapd		64(%r13), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm2, %ymm14, %ymm2
-	vmovapd		96(%r13), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm3, %ymm14, %ymm3
-
-	vmovapd		0(%r13, %r14, 1), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm4, %ymm14, %ymm4
-	vmovapd		32(%r13, %r14, 1), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm5, %ymm14, %ymm5
-	vmovapd		64(%r13, %r14, 1), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm6, %ymm14, %ymm6
-	vmovapd		96(%r13, %r14, 1), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm7, %ymm14, %ymm7
-
-	jmp		3f
-
-0:
-
-	cmpl	$1, %r12d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r13, %r14, 1), %ymm13
-	vmovapd		0(%r13, %r14, 2), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm4, %ymm13, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%r13, %r14, 1), %ymm13
-	vmovapd		32(%r13, %r14, 2), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm1, %ymm12, %ymm1
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm5, %ymm13, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r13, %r14, 1), %ymm13
-	vmovapd		64(%r13, %r14, 2), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm6, %ymm13, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%r13, %r14, 1), %ymm13
-	vmovapd		96(%r13, %r14, 2), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm3, %ymm12, %ymm3
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm7, %ymm13, %ymm7
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r12d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r13, %r14, 1), %ymm13
-	vmovapd		0(%r13, %r14, 2), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm4, %ymm13, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%r13, %r14, 1), %ymm13
-	vmovapd		32(%r13, %r14, 2), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm1, %ymm12, %ymm1
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm5, %ymm13, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r13, %r14, 1), %ymm13
-	vmovapd		64(%r13, %r14, 2), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm6, %ymm13, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%r13, %r14, 1), %ymm13
-	vmovapd		96(%r13, %r14, 2), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm3, %ymm12, %ymm3
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm7, %ymm13, %ymm7
-
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r13, %r14, 1), %ymm13
-	vmovapd		0(%r13, %r14, 2), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm4, %ymm13, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%r13, %r14, 1), %ymm13
-	vmovapd		32(%r13, %r14, 2), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm1, %ymm12, %ymm1
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm5, %ymm13, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r13, %r14, 1), %ymm13
-	vmovapd		64(%r13, %r14, 2), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm6, %ymm13, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%r13, %r14, 1), %ymm13
-	vmovapd		96(%r13, %r14, 2), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm3, %ymm12, %ymm3
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm7, %ymm13, %ymm7
-
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x4_gen_lib4, .-inner_scale_ab_8x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x4_lib4, @function
-inner_blend_scale_ab_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x4_lib4:
-#endif
-#endif
-	
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	// tc==n
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm8
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm9
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm10
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm4
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm6
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm5
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm7
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	// alg==1
-	vmovapd		0(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-	vmovapd		0(%r12, %r13, 1), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm4, %ymm15, %ymm4
-	vmovapd		32(%r12, %r13, 1), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm5, %ymm15, %ymm5
-	vmovapd		64(%r12, %r13, 1), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm6, %ymm15, %ymm6
-	vmovapd		96(%r12, %r13, 1), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm7, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x4_lib4, .-inner_blend_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r10   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x8_lib4, @function
-inner_scale_ab_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_4x8_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_4x8_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm0, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm1, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm2, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm3, %ymm3
-	vmovapd		128(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm4, %ymm4
-	vmovapd		160(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm5, %ymm5
-	vmovapd		192(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm6, %ymm6
-	vmovapd		224(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm7, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x8_lib4, .-inner_scale_ab_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// transpose and scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_AB_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_ab_4x8_lib4, @function
-inner_tran_scale_ab_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_tran_scale_ab_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_ab_4x8_lib4; .scl 2; .type 32; .endef
-inner_tran_scale_ab_4x8_lib4:
-#endif
-#endif
-		
-	vunpcklpd	%ymm1, %ymm0, %ymm12
-	vunpckhpd	%ymm0, %ymm1, %ymm13
-	vunpcklpd	%ymm3, %ymm2, %ymm14
-	vunpckhpd	%ymm2, %ymm3, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm0
-	vperm2f128	$0x31, %ymm12, %ymm14, %ymm2
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x31, %ymm13, %ymm15, %ymm3
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vunpcklpd	%ymm5, %ymm4, %ymm12
-	vunpckhpd	%ymm4, %ymm5, %ymm13
-	vunpcklpd	%ymm7, %ymm6, %ymm14
-	vunpckhpd	%ymm6, %ymm7, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm4
-	vperm2f128	$0x31, %ymm12, %ymm14, %ymm6
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm5
-	vperm2f128	$0x31, %ymm13, %ymm15, %ymm7
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	vbroadcastsd 0(%r11), %ymm14 // beta
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm0, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm1, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm2, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm3, %ymm3
-	vmovapd		128(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm4, %ymm4
-	vmovapd		160(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm5, %ymm5
-	vmovapd		192(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm6, %ymm6
-	vmovapd		224(%r12), %ymm15
-	vmulpd		%ymm14, %ymm15, %ymm15
-	vaddpd		%ymm15, %ymm7, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_ab_4x8_lib4, .-inner_tran_scale_ab_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- offset
-// r13   <- C
-// r14   <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- offset
-// r13   <- C
-// r14   <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x4_gen_lib4, @function
-inner_blend_scale_ab_8x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x4_gen_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x4_gen_lib4:
-#endif
-#endif
-	
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	// tc==n
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm8
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm9
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm10
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm4
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm6
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm5
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm7
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-
-	vxorpd		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovapd		0(%r13), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-	vmovapd		32(%r13), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm1, %ymm14, %ymm1
-	vmovapd		64(%r13), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm2, %ymm14, %ymm2
-	vmovapd		96(%r13), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm3, %ymm14, %ymm3
-
-	vmovapd		0(%r13, %r14, 1), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm4, %ymm14, %ymm4
-	vmovapd		32(%r13, %r14, 1), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm5, %ymm14, %ymm5
-	vmovapd		64(%r13, %r14, 1), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm6, %ymm14, %ymm6
-	vmovapd		96(%r13, %r14, 1), %ymm14
-	vmulpd		%ymm14, %ymm15, %ymm14
-	vaddpd		%ymm7, %ymm14, %ymm7
-
-	jmp		3f
-
-0:
-
-	cmpl	$1, %r12d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r13, %r14, 1), %ymm13
-	vmovapd		0(%r13, %r14, 2), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm4, %ymm13, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%r13, %r14, 1), %ymm13
-	vmovapd		32(%r13, %r14, 2), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm1, %ymm12, %ymm1
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm5, %ymm13, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r13, %r14, 1), %ymm13
-	vmovapd		64(%r13, %r14, 2), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm6, %ymm13, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%r13, %r14, 1), %ymm13
-	vmovapd		96(%r13, %r14, 2), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm3, %ymm12, %ymm3
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm7, %ymm13, %ymm7
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r12d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r13, %r14, 1), %ymm13
-	vmovapd		0(%r13, %r14, 2), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm4, %ymm13, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%r13, %r14, 1), %ymm13
-	vmovapd		32(%r13, %r14, 2), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm1, %ymm12, %ymm1
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm5, %ymm13, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r13, %r14, 1), %ymm13
-	vmovapd		64(%r13, %r14, 2), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm6, %ymm13, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%r13, %r14, 1), %ymm13
-	vmovapd		96(%r13, %r14, 2), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm3, %ymm12, %ymm3
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm7, %ymm13, %ymm7
-
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r13, %r14, 1), %ymm13
-	vmovapd		0(%r13, %r14, 2), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm0, %ymm12, %ymm0
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm4, %ymm13, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%r13, %r14, 1), %ymm13
-	vmovapd		32(%r13, %r14, 2), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm1, %ymm12, %ymm1
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm5, %ymm13, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r13, %r14, 1), %ymm13
-	vmovapd		64(%r13, %r14, 2), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm2, %ymm12, %ymm2
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm6, %ymm13, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%r13, %r14, 1), %ymm13
-	vmovapd		96(%r13, %r14, 2), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vmulpd		%ymm12, %ymm15, %ymm12
-	vaddpd		%ymm3, %ymm12, %ymm3
-	vmulpd		%ymm13, %ymm15, %ymm13
-	vaddpd		%ymm7, %ymm13, %ymm7
-
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x4_gen_lib4, .-inner_blend_scale_ab_8x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_8x4_lib4, @function
-inner_blend_scale_11_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_8x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_11_8x4_lib4:
-#endif
-#endif
-	
-
-	// tc==n
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm8
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm9
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm10
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm4
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm6
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm5
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm7
-
-	// alg==1
-	vmovapd		0(%r10), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-	vmovapd		0(%r10, %r11, 1), %ymm15
-	vaddpd		%ymm4, %ymm15, %ymm4
-	vmovapd		32(%r10, %r11, 1), %ymm15
-	vaddpd		%ymm5, %ymm15, %ymm5
-	vmovapd		64(%r10, %r11, 1), %ymm15
-	vaddpd		%ymm6, %ymm15, %ymm6
-	vmovapd		96(%r10, %r11, 1), %ymm15
-	vaddpd		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_8x4_lib4, .-inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_8x4_lib4, @function
-inner_edge_dpotrf_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dpotrf_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dpotrf_8x4_lib4:
-#endif
-#endif
-	
-	vxorpd	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd	.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd	LC04(%rip), %xmm14 // 1.0
-#endif
-
-	vmovsd		%xmm0, %xmm0, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-2:
-	vmovsd		%xmm13, 0(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm0
-	vmulpd		%ymm4, %ymm13, %ymm4
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm11
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm1, %ymm1
-	vmulpd		%ymm4, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm5, %ymm5
-	vperm2f128	$0x11, %ymm0, %ymm0, %ymm11
-	vpermilpd	$0x0, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm2, %ymm2
-	vmulpd		%ymm4, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm6, %ymm6
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-	vmulpd		%ymm4, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm7, %ymm7
-
-
-	vpermilpd	$0x3, %xmm1, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-4:
-	vmovsd		%xmm13, 8(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm1
-	vmulpd		%ymm5, %ymm13, %ymm5
-	vperm2f128	$0x11, %ymm1, %ymm1, %ymm11
-	vpermilpd	$0x0, %ymm11, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm2, %ymm2
-	vmulpd		%ymm5, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm6, %ymm6
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-	vmulpd		%ymm5, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm7, %ymm7
-
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-6:
-	vmovsd		%xmm13, 16(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm2, %ymm13, %ymm2
-	vmulpd		%ymm6, %ymm13, %ymm6
-	vperm2f128	$0x11, %ymm2, %ymm2, %ymm11
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm2, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-	vmulpd		%ymm6, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm7, %ymm7
-
-	vextractf128	$0x1, %ymm3, %xmm13
-	vpermilpd	$0x3, %xmm13, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-8:
-	vmovsd		%xmm13, 24(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm3, %ymm13, %ymm3
-	vmulpd		%ymm7, %ymm13, %ymm7
-
-	jmp				0f
-
-1:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_8x4_lib4, .-inner_edge_dpotrf_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization vs
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_8x4_vs_lib4, @function
-inner_edge_dpotrf_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dpotrf_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dpotrf_8x4_vs_lib4:
-#endif
-#endif
-	
-	vxorpd	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd	.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd	LC04(%rip), %xmm14 // 1.0
-#endif
-
-	vmovsd		%xmm0, %xmm0, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-2:
-	vmovsd		%xmm13, 0(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm0
-	vmulpd		%ymm4, %ymm13, %ymm4
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm11
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm1, %ymm1
-	vmulpd		%ymm4, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm5, %ymm5
-	vperm2f128	$0x11, %ymm0, %ymm0, %ymm11
-	vpermilpd	$0x0, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm2, %ymm2
-	vmulpd		%ymm4, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm6, %ymm6
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm0, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-	vmulpd		%ymm4, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm7, %ymm7
-
-	cmpl		$2, %r11d
-	jl			0f // ret
-
-	vpermilpd	$0x3, %xmm1, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-4:
-	vmovsd		%xmm13, 8(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm1
-	vmulpd		%ymm5, %ymm13, %ymm5
-	vperm2f128	$0x11, %ymm1, %ymm1, %ymm11
-	vpermilpd	$0x0, %ymm11, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm2, %ymm2
-	vmulpd		%ymm5, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm6, %ymm6
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm1, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-	vmulpd		%ymm5, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm7, %ymm7
-
-	cmpl		$3, %r11d
-	jl			0f // ret
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-6:
-	vmovsd		%xmm13, 16(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm2, %ymm13, %ymm2
-	vmulpd		%ymm6, %ymm13, %ymm6
-	vperm2f128	$0x11, %ymm2, %ymm2, %ymm11
-	vpermilpd	$0xf, %ymm11, %ymm13
-	vmulpd		%ymm2, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm3, %ymm3
-	vmulpd		%ymm6, %ymm13, %ymm12
-	vsubpd		%ymm12, %ymm7, %ymm7
-
-	cmpl		$4, %r11d
-	jl			0f // ret
-
-	vextractf128	$0x1, %ymm3, %xmm13
-	vpermilpd	$0x3, %xmm13, %xmm13
-	vucomisd	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtsd		%xmm13, %xmm13, %xmm13
-	vdivsd		%xmm13, %xmm14, %xmm13
-8:
-	vmovsd		%xmm13, 24(%r10)
-	vmovddup	%xmm13, %xmm13
-	vperm2f128	$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd		%ymm3, %ymm13, %ymm3
-	vmulpd		%ymm7, %ymm13, %ymm7
-
-	jmp				0f
-
-1:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_8x4_vs_lib4, .-inner_edge_dpotrf_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_8x4_lib4, @function
-inner_edge_dtrsm_rlt_inv_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_8x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vbroadcastsd	8(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm1, %ymm1
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm5, %ymm5
-	vbroadcastsd	16(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm6, %ymm6
-	vbroadcastsd	24(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vbroadcastsd	48(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vmulpd			%ymm5, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm6, %ymm6
-	vbroadcastsd	56(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm5, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vbroadcastsd	88(%r10), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm6, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_8x4_lib4, .-inner_edge_dtrsm_rlt_inv_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_8x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_8x4_vs_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vbroadcastsd	8(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm1, %ymm1
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm5, %ymm5
-	vbroadcastsd	16(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm6, %ymm6
-	vbroadcastsd	24(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-
-	cmpl			$2, %r12d
-	jl				0f // ret
-
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vbroadcastsd	48(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vmulpd			%ymm5, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm6, %ymm6
-	vbroadcastsd	56(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm5, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-
-	cmpl			$3, %r12d
-	jl				0f // ret
-
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vbroadcastsd	88(%r10), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm6, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-
-	cmpl			$4, %r12d
-	jl				0f // ret
-
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_8x4_vs_lib4, .-inner_edge_dtrsm_rlt_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// unit diagonal
-//
-// input arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_ONE_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_one_8x4_lib4, @function
-inner_edge_dtrsm_rlt_one_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_one_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_one_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_one_8x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	8(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm1, %ymm1
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm5, %ymm5
-
-	vbroadcastsd	16(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm6, %ymm6
-	vbroadcastsd	48(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vmulpd			%ymm5, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm6, %ymm6
-
-	vbroadcastsd	24(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-	vbroadcastsd	56(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm5, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-	vbroadcastsd	88(%r10), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm6, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_one_8x4_lib4, .-inner_edge_dtrsm_rlt_one_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// unit diagonal
-//
-// input arguments:
-// r10  <- D
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_ONE_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_one_8x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_one_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_one_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_one_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_one_8x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$2, %r11d
-
-	jl				0f // ret
-
-	vbroadcastsd	8(%r10), %ymm13
-	cmpl			$3, %r11d
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm1, %ymm1
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm5, %ymm5
-
-	jl				0f // ret
-
-	vbroadcastsd	16(%r10), %ymm13
-	cmpl			$4, %r11d
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm6, %ymm6
-	vbroadcastsd	48(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm2, %ymm2
-	vmulpd			%ymm5, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm6, %ymm6
-
-	jl				0f // ret
-
-	vbroadcastsd	24(%r10), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm4, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-	vbroadcastsd	56(%r10), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm5, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-	vbroadcastsd	88(%r10), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm3, %ymm3
-	vmulpd			%ymm6, %ymm13, %ymm12
-	vsubpd			%ymm12, %ymm7, %ymm7
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_one_8x4_vs_lib4, .-inner_edge_dtrsm_rlt_one_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = upper
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUT_INV_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rut_inv_8x4_lib4, @function
-inner_edge_dtrsm_rut_inv_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rut_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rut_inv_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rut_inv_8x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-	vbroadcastsd	112(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm7, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	104(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm7, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	96(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm7, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-	vbroadcastsd	72(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm6, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm6, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-	vbroadcastsd	32(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm5, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rut_inv_8x4_lib4, .-inner_edge_dtrsm_rut_inv_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUT_INV_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rut_inv_8x4_vs_lib4, @function
-inner_edge_dtrsm_rut_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rut_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rut_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rut_inv_8x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$3, %r12d
-	jle				0f
-
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-	vbroadcastsd	112(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm7, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	104(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm7, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	96(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm7, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-
-0:
-	cmpl			$2, %r12d
-	jle				1f
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-	vbroadcastsd	72(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm6, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	64(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm6, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-
-1:
-	cmpl			$1, %r12d
-	jle				2f
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-	vbroadcastsd	32(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm5, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-
-2:
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rut_inv_8x4_vs_lib4, .-inner_edge_dtrsm_rut_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = up
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUN_INV_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_run_inv_8x4_lib4, @function
-inner_edge_dtrsm_run_inv_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_run_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_run_inv_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_run_inv_8x4_lib4:
-#endif
-#endif
-
-	// first column
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-
-	// second column
-	vbroadcastsd	32(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm4, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-
-	// third column
-	vbroadcastsd	64(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm4, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	72(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm5, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-
-	// fourth column
-	vbroadcastsd	96(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm4, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vbroadcastsd	104(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm5, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vbroadcastsd	112(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm6, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_run_inv_8x4_lib4, .-inner_edge_dtrsm_run_inv_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = lower
-// tran = normal
-// unit diagonal
-//
-// input arguments:
-// r10  <- E0
-// r11  <- 4*sde*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E0
-// r11  <- 4*sde*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LLN_ONE_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lln_one_8x4_lib4, @function
-inner_edge_dtrsm_lln_one_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lln_one_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lln_one_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lln_one_8x4_lib4:
-#endif
-#endif
-
-	// solve top-left
-	vxorpd		%ymm14, %ymm14, %ymm14
-
-	vmovapd		0(%r10), %ymm12
-	vxorpd		%ymm14, %ymm14, %ymm14
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd		0(%r10, %r11, 1), %ymm14
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm0, %ymm0
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm4, %ymm4
-	vperm2f128	$0x00, %ymm1, %ymm1, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm1, %ymm1
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm5, %ymm5
-	vperm2f128	$0x00, %ymm2, %ymm2, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm2, %ymm2
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm6, %ymm6
-	vperm2f128	$0x00, %ymm3, %ymm3, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm3, %ymm3
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm7, %ymm7
-
-	vmovapd		32(%r10), %ymm12
-	vxorpd		%ymm14, %ymm14, %ymm14
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r10, %r11, 1), %ymm14
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm0, %ymm0
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm4, %ymm4
-	vperm2f128	$0x00, %ymm1, %ymm1, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm1, %ymm1
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm5, %ymm5
-	vperm2f128	$0x00, %ymm2, %ymm2, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm2, %ymm2
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm6, %ymm6
-	vperm2f128	$0x00, %ymm3, %ymm3, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm3, %ymm3
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm7, %ymm7
-
-	vmovapd		64(%r10), %ymm12
-	vxorpd		%ymm14, %ymm14, %ymm14
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd		64(%r10, %r11, 1), %ymm14
-	vperm2f128	$0x11, %ymm0, %ymm0, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm0, %ymm0
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm4, %ymm4
-	vperm2f128	$0x11, %ymm1, %ymm1, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm1, %ymm1
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm5, %ymm5
-	vperm2f128	$0x11, %ymm2, %ymm2, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm2, %ymm2
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm6, %ymm6
-	vperm2f128	$0x11, %ymm3, %ymm3, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm3, %ymm3
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm7, %ymm7
-
-	vmovapd		96(%r10, %r11, 1), %ymm14
-	vperm2f128	$0x11, %ymm0, %ymm0, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm4, %ymm4
-	vperm2f128	$0x11, %ymm1, %ymm1, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm5, %ymm5
-	vperm2f128	$0x11, %ymm2, %ymm2, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm6, %ymm6
-	vperm2f128	$0x11, %ymm3, %ymm3, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm14, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm7, %ymm7
-
-	addq		$128, %r10
-
-
-	// solve top-left
-	vxorpd		%ymm14, %ymm14, %ymm14
-
-	vmovapd		0(%r10, %r11, 1), %ymm12
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x00, %ymm4, %ymm4, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm4, %ymm4
-	vperm2f128	$0x00, %ymm5, %ymm5, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm5, %ymm5
-	vperm2f128	$0x00, %ymm6, %ymm6, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm6, %ymm6
-	vperm2f128	$0x00, %ymm7, %ymm7, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm7, %ymm7
-
-	vmovapd		32(%r10, %r11, 1), %ymm12
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x00, %ymm4, %ymm4, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm4, %ymm4
-	vperm2f128	$0x00, %ymm5, %ymm5, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm5, %ymm5
-	vperm2f128	$0x00, %ymm6, %ymm6, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm6, %ymm6
-	vperm2f128	$0x00, %ymm7, %ymm7, %ymm13
-	vpermilpd	$0xf, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm7, %ymm7
-
-	vmovapd		64(%r10, %r11, 1), %ymm12
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x11, %ymm4, %ymm4, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm4, %ymm4
-	vperm2f128	$0x11, %ymm5, %ymm5, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm5, %ymm5
-	vperm2f128	$0x11, %ymm6, %ymm6, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm6, %ymm6
-	vperm2f128	$0x11, %ymm7, %ymm7, %ymm13
-	vpermilpd	$0x0, %ymm13, %ymm13
-	vmulpd		%ymm12, %ymm13, %ymm15
-	vsubpd		%ymm15, %ymm7, %ymm7
-
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lln_one_8x4_lib4, .-inner_edge_dtrsm_lln_one_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = upper
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LUN_INV_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lun_inv_8x4_lib4, @function
-inner_edge_dtrsm_lun_inv_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lun_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lun_inv_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lun_inv_8x4_lib4:
-#endif
-#endif
-	
-	// bottom-right
-
-	vmovapd			224(%r10, %r11, 1), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	56(%r12), %ymm12
-	vmovapd			224(%r10), %ymm11
-
-	vperm2f128		$0x11, %ymm4, %ymm4, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm4, %ymm4
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm5, %ymm5, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm5, %ymm5
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm6, %ymm6, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm6, %ymm6
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm7, %ymm7, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm7, %ymm7
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0 // ?????????????
-	vmovapd			192(%r10, %r11, 1), %xmm13
-	vbroadcastsd	48(%r12), %ymm12
-	vmovapd			192(%r10), %ymm11
-
-	vperm2f128		$0x11, %ymm4, %ymm4, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm4, %ymm4
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm5, %ymm5, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm5, %ymm5
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm6, %ymm6, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm6, %ymm6
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm7, %ymm7, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm7, %ymm7
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			160(%r10, %r11, 1), %xmm13
-	vbroadcastsd	40(%r12), %ymm12
-	vmovapd			160(%r10), %ymm11
-
-	vperm2f128		$0x00, %ymm4, %ymm4, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm4, %ymm4
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x00, %ymm5, %ymm5, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm5, %ymm5
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x00, %ymm6, %ymm6, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm6, %ymm6
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x00, %ymm7, %ymm7, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm7, %ymm7
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vbroadcastsd	32(%r12), %ymm12
-	vmovapd			128(%r10), %ymm11
-
-	vperm2f128		$0x00, %ymm4, %ymm4, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm4, %ymm4
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x00, %ymm5, %ymm5, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm5, %ymm5
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x00, %ymm6, %ymm6, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm6, %ymm6
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x00, %ymm7, %ymm7, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm7, %ymm7
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	// top-left
-
-	vmovapd			96(%r10), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	24(%r12), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovapd			64(%r10), %xmm13
-	vbroadcastsd	16(%r12), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			32(%r10), %xmm13
-	vbroadcastsd	8(%r12), %ymm12
-
-	vpermilpd		$0xf, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vpermilpd		$0xf, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vpermilpd		$0xf, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vpermilpd		$0xf, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vbroadcastsd	0(%r12), %ymm12
-
-	vmulpd			%ymm0, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm0, %ymm0
-
-	vmulpd			%ymm1, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm1, %ymm1
-
-	vmulpd			%ymm2, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm2, %ymm2
-
-	vmulpd			%ymm3, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lun_inv_8x4_lib4, .-inner_edge_dtrsm_lun_inv_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = upper
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// r13  <- km
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// r13  <- km
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LUN_INV_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lun_inv_8x4_vs_lib4, @function
-inner_edge_dtrsm_lun_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lun_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lun_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lun_inv_8x4_vs_lib4:
-#endif
-#endif
-	
-	// bottom-right
-
-	cmpl	$7, %r13d
-	jle		0f
-
-	vmovapd			224(%r10, %r11, 1), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	56(%r12), %ymm12
-	vmovapd			224(%r10), %ymm11
-
-	vperm2f128		$0x11, %ymm4, %ymm4, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm4, %ymm4
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm5, %ymm5, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm5, %ymm5
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm6, %ymm6, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm6, %ymm6
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm7, %ymm7, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm7, %ymm7
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-0:
-	cmpl	$6, %r13d
-	jle		1f
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0 // ?????????????
-	vmovapd			192(%r10, %r11, 1), %xmm13
-	vbroadcastsd	48(%r12), %ymm12
-	vmovapd			192(%r10), %ymm11
-
-	vperm2f128		$0x11, %ymm4, %ymm4, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm4, %ymm4
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm5, %ymm5, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm5, %ymm5
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm6, %ymm6, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm6, %ymm6
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm7, %ymm7, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm7, %ymm7
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-1:
-	cmpl	$5, %r13d
-	jle		2f
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			160(%r10, %r11, 1), %xmm13
-	vbroadcastsd	40(%r12), %ymm12
-	vmovapd			160(%r10), %ymm11
-
-	vperm2f128		$0x00, %ymm4, %ymm4, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm4, %ymm4
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm4, %ymm4
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x00, %ymm5, %ymm5, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm5, %ymm5
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x00, %ymm6, %ymm6, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm6, %ymm6
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x00, %ymm7, %ymm7, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm7, %ymm7
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-2:
-
-	vbroadcastsd	32(%r12), %ymm12
-	vmovapd			128(%r10), %ymm11
-
-	vperm2f128		$0x00, %ymm4, %ymm4, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm4, %ymm4
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x00, %ymm5, %ymm5, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm5, %ymm5
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x00, %ymm6, %ymm6, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm6, %ymm6
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x00, %ymm7, %ymm7, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm7, %ymm7
-	vmulpd			%ymm11, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	// top-left
-
-	vmovapd			96(%r10), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	24(%r12), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovapd			64(%r10), %xmm13
-	vbroadcastsd	16(%r12), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			32(%r10), %xmm13
-	vbroadcastsd	8(%r12), %ymm12
-
-	vpermilpd		$0xf, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	vpermilpd		$0xf, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-
-	vpermilpd		$0xf, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-
-	vpermilpd		$0xf, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm3, %ymm3
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-
-
-	vbroadcastsd	0(%r12), %ymm12
-
-	vmulpd			%ymm0, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm0, %ymm0
-
-	vmulpd			%ymm1, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm1, %ymm1
-
-	vmulpd			%ymm2, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm2, %ymm2
-
-	vmulpd			%ymm3, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lun_inv_8x4_vs_lib4, .-inner_edge_dtrsm_lun_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// LU factorization without pivoting
-// left kernel
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGETRF_L_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgetrf_l_8x4_lib4, @function
-inner_edge_dgetrf_l_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgetrf_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgetrf_l_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgetrf_l_8x4_lib4:
-#endif
-#endif
-	
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd			LC04(%rip), %xmm14 // 1.0
-#endif
-//	vmovddup		%xmm14, %xmm14
-
-	// first column
-//	vblendpd		$0x1, %ymm0, %ymm12, %ymm12
-	vmovapd			%ymm0, %ymm12
-	vdivsd			%xmm0, %xmm14, %xmm13
-//	vpermpd			$0x00, %ymm13, %ymm13
-	vmovddup		%xmm13, %xmm13
-	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vmovsd			%xmm13, 0(%r10)
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vblendpd		$0x1, %ymm12, %ymm0, %ymm0
-
-	// second column
-//	vpermpd			$0x00, %ymm1, %ymm13
-	vmovddup		%xmm1, %xmm13
-	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm5, %ymm5
-	vblendpd		$0x2, %ymm1, %ymm13, %ymm12
-
-	vpermilpd		$0x3, %xmm1, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-//	vpermpd			$0x00, %ymm13, %ymm13
-	vmovddup		%xmm13, %xmm13
-	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vmovsd			%xmm13, 8(%r10)
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vblendpd		$0x3, %ymm12, %ymm1, %ymm1
-
-	// third column
-//	vpermpd			$0x00, %ymm2, %ymm13
-	vmovddup		%xmm2, %xmm13
-	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vblendpd		$0x2, %ymm2, %ymm13, %ymm12
-
-//	vpermpd			$0x55, %ymm2, %ymm13
-	vperm2f128		$0x00, %ymm2, %ymm2, %ymm13
-	vpermilpd		$0xf, %ymm13, %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm5, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm6, %ymm6
-	vblendpd		$0x4, %ymm2, %ymm12, %ymm12
-
-//	vpermpd			$0xaa, %ymm2, %ymm13
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm13
-	vpermilpd		$0x0, %ymm13, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-//	vpermpd			$0x00, %ymm13, %ymm13
-	vmovddup		%xmm13, %xmm13
-	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vmovsd			%xmm13, 16(%r10)
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vblendpd		$0x7, %ymm12, %ymm2, %ymm2
-
-	// fourth column
-//	vpermpd			$0x00, %ymm3, %ymm13
-	vmovddup		%xmm3, %xmm13
-	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vblendpd		$0x2, %ymm3, %ymm13, %ymm12
-
-//	vpermpd			$0x55, %ymm3, %ymm13
-	vperm2f128		$0x00, %ymm3, %ymm3, %ymm13
-	vpermilpd		$0xf, %ymm13, %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm5, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vblendpd		$0x4, %ymm3, %ymm12, %ymm12
-
-//	vpermpd			$0xaa, %ymm3, %ymm13
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm11
-	vpermilpd		$0x0, %ymm11, %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm6, %ymm13, %ymm15
-	vsubpd			%ymm15, %ymm7, %ymm7
-	vblendpd		$0x8, %ymm3, %ymm12, %ymm12
-	
-//	vpermpd			$0xff, %ymm3, %ymm13
-//	vperm2f128		$0x11, %ymm3, %ymm3, %ymm11
-	vpermilpd		$0xf, %ymm11, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-//	vpermpd			$0x00, %ymm13, %ymm13
-	vmovddup		%xmm13, %xmm13
-	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vmovsd			%xmm13, 24(%r10)
-//	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vblendpd		$0x7, %ymm12, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgetrf_l_8x4_lib4, .-inner_edge_dgetrf_l_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_lib4, @function
-inner_store_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_lib4; .scl 2; .type 32; .endef
-inner_store_8x4_lib4:
-#endif
-#endif
-	
-	vmovapd %ymm0,  0(%r10)
-	vmovapd %ymm1, 32(%r10)
-	vmovapd %ymm2, 64(%r10)
-	vmovapd %ymm3, 96(%r10)
-
-	vmovapd %ymm4,  0(%r10, %r11, 1)
-	vmovapd %ymm5, 32(%r10, %r11, 1)
-	vmovapd %ymm6, 64(%r10, %r11, 1)
-	vmovapd %ymm7, 96(%r10, %r11, 1)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_lib4, .-inner_store_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x8_lib4, @function
-inner_store_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x8_lib4; .scl 2; .type 32; .endef
-inner_store_4x8_lib4:
-#endif
-#endif
-	
-	vmovapd %ymm0,   0(%r10)
-	vmovapd %ymm1,  32(%r10)
-	vmovapd %ymm2,  64(%r10)
-	vmovapd %ymm3,  96(%r10)
-
-	vmovapd %ymm4, 128(%r10)
-	vmovapd %ymm5, 160(%r10)
-	vmovapd %ymm6, 192(%r10)
-	vmovapd %ymm7, 224(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x8_lib4, .-inner_store_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d50 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_vs_lib4, @function
-inner_store_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_8x4_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC03(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	cmpl		$2, %r13d
-	vmovapd		%ymm0, 0(%r10)
-	vmaskmovpd	%ymm4, %ymm15,  0(%r10, %r11, 1)
-	jl			0f // end
-	cmpl		$3, %r13d
-	vmovapd		%ymm1, 32(%r10)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	jl			0f // end
-	vmovapd		%ymm2, 64(%r10)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	je			0f // end
-	vmovapd		%ymm3, 96(%r10)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r10, %r11, 1)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_vs_lib4, .-inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X8_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x8_vs_lib4, @function
-inner_store_4x8_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x8_vs_lib4; .scl 2; .type 32; .endef
-inner_store_4x8_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmaskmovpd	%ymm0, %ymm15,   0(%r10)
-	vmaskmovpd	%ymm1, %ymm15,  32(%r10)
-	vmaskmovpd	%ymm2, %ymm15,  64(%r10)
-	vmaskmovpd	%ymm3, %ymm15,  96(%r10)
-
-	vmaskmovpd	%ymm4, %ymm15, 128(%r10)
-	cmpl		$6, %r12d
-	jl			0f // end
-	vmaskmovpd	%ymm5, %ymm15, 160(%r10)
-	cmpl		$7, %r12d
-	jl			0f // end
-	vmaskmovpd	%ymm6, %ymm15, 192(%r10)
-	je			0f // end
-	vmaskmovpd	%ymm7, %ymm15, 224(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x8_vs_lib4, .-inner_store_4x8_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d50 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_lib4, @function
-inner_store_l_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_lib4; .scl 2; .type 32; .endef
-inner_store_l_8x4_lib4:
-#endif
-#endif
-	
-	vmovapd		%ymm0,0(%r10)
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmovapd		%ymm1, 32(%r10)
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmovapd		%ymm2, 64(%r10)
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmovapd		%ymm3, 96(%r10)
-
-	vmovapd		%ymm4, 0(%r10, %r11, 1)
-	vmovapd		%ymm5, 32(%r10, %r11, 1)
-	vmovapd		%ymm6, 64(%r10, %r11, 1)
-	vmovapd		%ymm7, 96(%r10, %r11, 1)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_lib4, .-inner_store_l_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d50 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_vs_lib4, @function
-inner_store_l_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_l_8x4_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC03(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	cmpl		$2, %r13d
-	vmovapd		%ymm0, 0(%r10)
-	vmaskmovpd	%ymm4, %ymm15,  0(%r10, %r11, 1)
-	jl			0f // end
-	cmpl		$3, %r13d
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmovapd		%ymm1, 32(%r10)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	jl			0f // end
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmovapd		%ymm2, 64(%r10)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	je			0f // end
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmovapd		%ymm3, 96(%r10)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r10, %r11, 1)
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_vs_lib4, .-inner_store_l_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// rbp  <- dirty
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// rbp  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_gen_lib4, @function
-inner_store_8x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_gen_lib4; .scl 2; .type 32; .endef
-inner_store_8x4_gen_lib4:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2sd	%r13d, %xmm14, %xmm14
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm12
-	vmovupd		.LC03(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm12
-	vmovupd		LC03(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm12, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm6, %ymm5
-	vmovapd		%ymm3, %ymm2
-	vmovapd		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	cmpl		$2, %r15d
-	vmaskmovpd	%ymm0, %ymm14,  0(%r11)
-	vmaskmovpd	%ymm4, %ymm15,  0(%r11, %r12, 1)
-	jl			4f // end
-	cmpl		$3, %r15d
-	vmaskmovpd	%ymm1, %ymm14, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r11, %r12, 1)
-	jl			4f // end
-	vmaskmovpd	%ymm2, %ymm14, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r11, %r12, 1)
-	je			4f // end
-	vmaskmovpd	%ymm3, %ymm14, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r11, %r12, 1)
-
-	jmp		4f
-
-0:
-	
-	cmpl	$1, %r10d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x03, %ymm4, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm0, %ymm12, %ymm0
-	vperm2f128	$0x03, %ymm13, %ymm4, %ymm12
-	vshufpd		$0x5, %ymm4, %ymm12, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x03, %ymm5, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm1, %ymm12, %ymm1
-	vperm2f128	$0x03, %ymm13, %ymm5, %ymm12
-	vshufpd		$0x5, %ymm5, %ymm12, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x03, %ymm6, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm2, %ymm12, %ymm2
-	vperm2f128	$0x03, %ymm13, %ymm6, %ymm12
-	vshufpd		$0x5, %ymm6, %ymm12, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x03, %ymm7, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm3, %ymm12, %ymm3
-	vperm2f128	$0x03, %ymm13, %ymm7, %ymm12
-	vshufpd		$0x5, %ymm7, %ymm12, %ymm7
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm15, %ymm12, %ymm15
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm12
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC08(%rip), %ymm12
-	vmovupd		.LC05(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC08(%rip), %ymm12
-	vmovupd		LC05(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x1, %ymm14, %ymm15, %ymm14
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r10d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x03, %ymm4, %ymm0, %ymm0
-	vperm2f128	$0x03, %ymm13, %ymm4, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x03, %ymm5, %ymm1, %ymm1
-	vperm2f128	$0x03, %ymm13, %ymm5, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x03, %ymm6, %ymm2, %ymm2
-	vperm2f128	$0x03, %ymm13, %ymm6, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x03, %ymm7, %ymm3, %ymm3
-	vperm2f128	$0x03, %ymm13, %ymm7, %ymm7
-
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm14
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC09(%rip), %ymm12
-	vmovupd		.LC06(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC09(%rip), %ymm12
-	vmovupd		LC06(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x3, %ymm14, %ymm15, %ymm14
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x21, %ymm0, %ymm4, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm4, %ymm0
-	vperm2f128	$0x21, %ymm4, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x21, %ymm1, %ymm5, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm5, %ymm1
-	vperm2f128	$0x21, %ymm5, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x21, %ymm2, %ymm6, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm6, %ymm2
-	vperm2f128	$0x21, %ymm6, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x21, %ymm3, %ymm7, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm7, %ymm3
-	vperm2f128	$0x21, %ymm7, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm7
-
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm14
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC10(%rip), %ymm12
-	vmovupd		.LC07(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC10(%rip), %ymm12
-	vmovupd		LC07(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x7, %ymm14, %ymm15, %ymm14
-
-3:
-
-	cmpl		$2, %r15d
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm4, %ymm14, 0(%r11, %r12, 1)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 2)
-	jl			4f // end
-	cmpl		$3, %r15d
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm14, 32(%r11, %r12, 1)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 2)
-	jl			4f // end
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm14, 64(%r11, %r12, 1)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 2)
-	je			4f // end
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm14, 96(%r11, %r12, 1)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 2)
-
-4:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_gen_lib4, .-inner_store_8x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store l generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// rbp  <- dirty
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// rbp  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_gen_lib4, @function
-inner_store_l_8x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_gen_lib4; .scl 2; .type 32; .endef
-inner_store_l_8x4_gen_lib4:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2sd	%r13d, %xmm14, %xmm14
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm12
-	vmovupd		.LC03(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm12
-	vmovupd		LC03(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm12, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm6, %ymm5
-	vmovapd		%ymm3, %ymm2
-	vmovapd		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm13
-#endif
-
-	vmaskmovpd	%ymm0, %ymm14,  0(%r11)
-	vmaskmovpd	%ymm4, %ymm15,  0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x1, %ymm13, %ymm14, %ymm14
-	vmaskmovpd	%ymm1, %ymm14, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x2, %ymm13, %ymm14, %ymm14
-	vmaskmovpd	%ymm2, %ymm14, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r11, %r12, 1)
-	je			3f // end
-	vblendpd	$0x4, %ymm13, %ymm14, %ymm14
-	vmaskmovpd	%ymm3, %ymm14, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r11, %r12, 1)
-
-	jmp		3f
-
-0:
-	
-	cmpl	$1, %r10d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x03, %ymm4, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm0, %ymm12, %ymm0
-	vperm2f128	$0x03, %ymm13, %ymm4, %ymm12
-	vshufpd		$0x5, %ymm4, %ymm12, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x03, %ymm5, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm1, %ymm12, %ymm1
-	vperm2f128	$0x03, %ymm13, %ymm5, %ymm12
-	vshufpd		$0x5, %ymm5, %ymm12, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x03, %ymm6, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm2, %ymm12, %ymm2
-	vperm2f128	$0x03, %ymm13, %ymm6, %ymm12
-	vshufpd		$0x5, %ymm6, %ymm12, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x03, %ymm7, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm3, %ymm12, %ymm3
-	vperm2f128	$0x03, %ymm13, %ymm7, %ymm12
-	vshufpd		$0x5, %ymm7, %ymm12, %ymm7
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm15, %ymm12, %ymm15
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm12
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC08(%rip), %ymm12
-	vmovupd		.LC05(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC08(%rip), %ymm12
-	vmovupd		LC05(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x1, %ymm14, %ymm15, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm15
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm15
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm4, %ymm14, 0(%r11, %r12, 1)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 2)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x2, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm14, 32(%r11, %r12, 1)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 2)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x4, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm14, 64(%r11, %r12, 1)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 2)
-	je			3f // end
-	vblendpd	$0x8, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm14, 96(%r11, %r12, 1)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 2)
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r10d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x03, %ymm4, %ymm0, %ymm0
-	vperm2f128	$0x03, %ymm13, %ymm4, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x03, %ymm5, %ymm1, %ymm1
-	vperm2f128	$0x03, %ymm13, %ymm5, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x03, %ymm6, %ymm2, %ymm2
-	vperm2f128	$0x03, %ymm13, %ymm6, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x03, %ymm7, %ymm3, %ymm3
-	vperm2f128	$0x03, %ymm13, %ymm7, %ymm7
-
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm14
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC09(%rip), %ymm12
-	vmovupd		.LC06(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC09(%rip), %ymm12
-	vmovupd		LC06(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x3, %ymm14, %ymm15, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm15
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm15
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm4, %ymm14, 0(%r11, %r12, 1)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 2)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x4, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm14, 32(%r11, %r12, 1)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 2)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x8, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm14, 64(%r11, %r12, 1)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 2)
-	je			3f // end
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm14, 96(%r11, %r12, 1)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 2)
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x21, %ymm0, %ymm4, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm4, %ymm0
-	vperm2f128	$0x21, %ymm4, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x21, %ymm1, %ymm5, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm5, %ymm1
-	vperm2f128	$0x21, %ymm5, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x21, %ymm2, %ymm6, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm6, %ymm2
-	vperm2f128	$0x21, %ymm6, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x21, %ymm3, %ymm7, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm7, %ymm3
-	vperm2f128	$0x21, %ymm7, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm7
-
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm14
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC10(%rip), %ymm12
-	vmovupd		.LC07(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC10(%rip), %ymm12
-	vmovupd		LC07(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x7, %ymm14, %ymm15, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm15
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm15
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm4, %ymm14, 0(%r11, %r12, 1)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 2)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x8, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm14, 32(%r11, %r12, 1)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 2)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm14, 64(%r11, %r12, 1)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 2)
-	je			3f // end
-	vblendpd	$0x2, %ymm15, %ymm14, %ymm14
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm14, 96(%r11, %r12, 1)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 2)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_gen_lib4, .-inner_store_l_8x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-//                               1      2              3          4        5          6             7          8        9          10
-// void kernel_dgemm_nt_8x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x4_lib4
-	.type kernel_dgemm_nt_8x4_lib4, @function
-kernel_dgemm_nt_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x4_lib4
-_kernel_dgemm_nt_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x4_lib4
-	.def kernel_dgemm_nt_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x4_lib4, .-kernel_dgemm_nt_8x4_lib4
-#endif
-
-
-
-
-
-//                               1      2              3          4          5        6             7          8
-// void kernel_dgemm_nt_4x8_lib4(int k, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x8_lib4
-	.type kernel_dgemm_nt_4x8_lib4, @function
-kernel_dgemm_nt_4x8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x8_lib4
-_kernel_dgemm_nt_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x8_lib4
-	.def kernel_dgemm_nt_4x8_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // B
-	movq	ARG5, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG3, %r13 // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x8_lib4, .-kernel_dgemm_nt_4x8_lib4
-#endif
-
-
-
-
-
-//                                  rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32   rsp+40  rsp+48
-// void kernel_dgemm_nt_8x4_vs_lib4(int km, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x4_vs_lib4
-	.type kernel_dgemm_nt_8x4_vs_lib4, @function
-kernel_dgemm_nt_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x4_vs_lib4
-_kernel_dgemm_nt_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x4_vs_lib4
-	.def kernel_dgemm_nt_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // store address D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x4_vs_lib4, .-kernel_dgemm_nt_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  1      2              3          4          5        6             7          8          9       10
-// void kernel_dgemm_nt_4x8_vs_lib4(int k, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x8_vs_lib4
-	.type kernel_dgemm_nt_4x8_vs_lib4, @function
-kernel_dgemm_nt_4x8_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x8_vs_lib4
-_kernel_dgemm_nt_4x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x8_vs_lib4
-	.def kernel_dgemm_nt_4x8_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x8_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // B
-	movq	ARG5, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG3, %r13 // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // km
-	movq	ARG10, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x8_vs_lib4, .-kernel_dgemm_nt_4x8_vs_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx      r8         r9            rsp+8        rsp+16     rsp+24   rsp+32       rsp+40     rsp+48   rsp+56  rsp+64  rsp+72  rsp+80
-// void kernel_dgemm_nt_8x4_gen_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x4_gen_lib4
-	.type kernel_dgemm_nt_8x4_gen_lib4, @function
-kernel_dgemm_nt_8x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x4_gen_lib4
-_kernel_dgemm_nt_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x4_gen_lib4
-	.def kernel_dgemm_nt_8x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // offsetC
-	movq	ARG8, %r13 // C
-	movq	ARG9, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_gen_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // offsetD
-	movq	ARG11, %r11 // D
-	movq	ARG12, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG13, %r13 // m0
-	movq	ARG14, %r14 // m1
-	movq	ARG15, %r15 // n0
-	movq	ARG16, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x4_gen_lib4, .-kernel_dgemm_nt_8x4_gen_lib4
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx      r8           r9         rsp+8    rsp+16        rsp+24     rsp+32   rsp+40     rsp+48
-// void kernel_dgemm_nn_8x4_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_8x4_lib4
-	.type kernel_dgemm_nn_8x4_lib4, @function
-kernel_dgemm_nn_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_8x4_lib4
-_kernel_dgemm_nn_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_8x4_lib4
-	.def kernel_dgemm_nn_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12 // C
-	movq	ARG10, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_8x4_lib4, .-kernel_dgemm_nn_8x4_lib4
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx          r8         r9       rsp+8         rsp+16     rsp+24
-// void kernel_dgemm_nn_4x8_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_4x8_lib4
-	.type kernel_dgemm_nn_4x8_lib4, @function
-kernel_dgemm_nn_4x8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_4x8_lib4
-_kernel_dgemm_nn_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_4x8_lib4
-	.def kernel_dgemm_nn_4x8_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_4x8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_4x8_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x8_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_4x8_lib4, .-kernel_dgemm_nn_4x8_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx      r8        r9         rsp+8    rsp+16        rsp+24    rsp+32     rsp+40   rsp+48    rsp+56     rsp+64   rsp+72  rsp+80  rsp+88  rsp+96
-// void kernel_dgemm_nn_8x4_gen_lib4(int k, double *alpha, double *A, int sda, int offB, double *B, int sdb, double *beta, int offC, double *C, int sdc, int offD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_8x4_gen_lib4
-	.type kernel_dgemm_nn_8x4_gen_lib4, @function
-kernel_dgemm_nn_8x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_8x4_gen_lib4
-_kernel_dgemm_nn_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_8x4_gen_lib4
-	.def kernel_dgemm_nn_8x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_8x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12 // offsetC
-	movq	ARG10, %r13 // C
-	movq	ARG11, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_gen_lib4
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG12, %r10 // offsetD
-	movq	ARG13, %r11 // D
-	movq	ARG14, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG15, %r13 // m0
-	movq	ARG16, %r14 // m1
-	movq	ARG17, %r15 // n0
-	movq	ARG18, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_8x4_gen_lib4, .-kernel_dgemm_nn_8x4_gen_lib4
-#endif
-
-
-
-
-
-//                                 rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32
-// void kernel_dsyrk_nt_l_8x4_lib4(int km, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_8x4_lib4
-	.type kernel_dsyrk_nt_l_8x4_lib4, @function
-kernel_dsyrk_nt_l_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_8x4_lib4
-_kernel_dsyrk_nt_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_8x4_lib4
-	.def kernel_dsyrk_nt_l_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib4
-#endif
-#endif
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_8x4_lib4, .-kernel_dsyrk_nt_l_8x4_lib4
-#endif
-
-
-
-
-
-//                                    rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32   rsp+40  rsp+48
-// void kernel_dsyrk_nt_l_8x4_vs_lib4(int km, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_8x4_vs_lib4
-	.type kernel_dsyrk_nt_l_8x4_vs_lib4, @function
-kernel_dsyrk_nt_l_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_8x4_vs_lib4
-_kernel_dsyrk_nt_l_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_8x4_vs_lib4
-	.def kernel_dsyrk_nt_l_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // store address D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib4
-#endif
-#endif
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_8x4_vs_lib4, .-kernel_dsyrk_nt_l_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                     rdi    rsi            rdx        rcx      r8         r9            rsp+8        rsp+16     rsp+24   rsp+32       rsp+40     rsp+48   rsp+56  rsp+64  rsp+72  rsp+80
-// void kernel_dsyrk_nt_l_8x4_gen_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_8x4_gen_lib4
-	.type kernel_dsyrk_nt_l_8x4_gen_lib4, @function
-kernel_dsyrk_nt_l_8x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_8x4_gen_lib4
-_kernel_dsyrk_nt_l_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_8x4_gen_lib4
-	.def kernel_dsyrk_nt_l_8x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_8x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // offsetC
-	movq	ARG8, %r13 // C
-	movq	ARG9, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_gen_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // offsetD
-	movq	ARG11, %r11 // D
-	movq	ARG12, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG13, %r13 // m0
-	movq	ARG14, %r14 // m1
-	movq	ARG15, %r15 // n0
-	movq	ARG16, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_8x4_gen_lib4, .-kernel_dsyrk_nt_l_8x4_gen_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx      r8           r9         rsp+8    rsp+16     rsp+24
-// void kernel_dtrmm_nn_rl_8x4_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_8x4_lib4
-	.type kernel_dtrmm_nn_rl_8x4_lib4, @function
-kernel_dtrmm_nn_rl_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_8x4_lib4
-_kernel_dtrmm_nn_rl_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_8x4_lib4
-	.def kernel_dtrmm_nn_rl_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_8x4_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_8x4_lib4, .-kernel_dtrmm_nn_rl_8x4_lib4
-#endif
-
-
-
-
-
-//                                      rdi    rsi            rdx        rcx      r8           r9         rsp+8    rsp+16       rsp+24     rsp+32   rsp+40  rsp+48  rsp+56  rsp+64
-// void kernel_dtrmm_nn_rl_8x4_gen_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_8x4_gen_lib4
-	.type kernel_dtrmm_nn_rl_8x4_gen_lib4, @function
-kernel_dtrmm_nn_rl_8x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_8x4_gen_lib4
-_kernel_dtrmm_nn_rl_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_8x4_gen_lib4
-	.def kernel_dtrmm_nn_rl_8x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_8x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_8x4_gen_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // offsetD
-	movq	ARG9, %r11 // D
-	movq	ARG10, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG11, %r13 // m0
-	movq	ARG12, %r14 // m1
-	movq	ARG13, %r15 // n0
-	movq	ARG14, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_8x4_gen_lib4, .-kernel_dtrmm_nn_rl_8x4_gen_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32
-// void kernel_dtrmm_nt_ru_8x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_8x4_lib4
-	.type kernel_dtrmm_nt_ru_8x4_lib4, @function
-kernel_dtrmm_nt_ru_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_8x4_lib4
-_kernel_dtrmm_nt_ru_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_8x4_lib4
-	.def kernel_dtrmm_nt_ru_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d //k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	addq	$128, %r13 // B+4*bs
-
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_8x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG3, %r10 // A
-	movq	ARG4, %r11 // sda
-	sall	$5, %r11d // 4*sda*sizeof(double)
-	movq	ARG5, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_8x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_8x4_lib4, .-kernel_dtrmm_nt_ru_8x4_lib4
-#endif
-
-
-
-
-
-//                                 rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32   rsp+40  rsp+48
-// void kernel_dtrmm_nt_ru_8x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_8x4_vs_lib4
-	.type kernel_dtrmm_nt_ru_8x4_vs_lib4, @function
-kernel_dtrmm_nt_ru_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_8x4_vs_lib4
-_kernel_dtrmm_nt_ru_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_8x4_vs_lib4
-	.def kernel_dtrmm_nt_ru_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d //k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	addq	$128, %r13 // B+4*bs
-
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_8x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG1, %r10
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_8x4_vs_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-// store n
-
-	movq	ARG9, %r10 // store address D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_8x4_vs_lib4, .-kernel_dtrmm_nt_ru_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24
-// void kernel_dpotrf_nt_l_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_8x4_lib4
-	.type kernel_dpotrf_nt_l_8x4_lib4, @function
-kernel_dpotrf_nt_l_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_8x4_lib4
-_kernel_dpotrf_nt_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_8x4_lib4
-	.def kernel_dpotrf_nt_l_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_8x4_lib4, .-kernel_dpotrf_nt_l_8x4_lib4
-#endif
-
-
-
-
-
-//                                     rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24              rsp+32  rsp+40 
-// void kernel_dpotrf_nt_l_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_8x4_vs_lib4
-	.type kernel_dpotrf_nt_l_8x4_vs_lib4, @function
-kernel_dpotrf_nt_l_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_8x4_vs_lib4
-_kernel_dpotrf_nt_l_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_8x4_vs_lib4
-	.def kernel_dpotrf_nt_l_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG10, %r12 // km 
-	movq	ARG11, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_8x4_vs_lib4, .-kernel_dpotrf_nt_l_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                        rdi     rsi         rdx       rcx         r8      r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56
-// void kernel_dsyrk_dpotrf_nt_l_8x4_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x4_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_8x4_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_8x4_lib4
-_kernel_dsyrk_dpotrf_nt_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x4_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_8x4_lib4, .-kernel_dsyrk_dpotrf_nt_l_8x4_lib4
-#endif
-
-
-
-
-
-//                                           rdi     rsi         rdx       rcx         r8      r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56              rsp+64  rsp+72
-// void kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4
-_kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movq	ARG15, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG14, %r12 // km 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4, .-kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                         rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32              rsp+40  rsp+48
-// void kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_8x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_8x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_8x4_vs_lib4
-_kernel_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_8x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_8x4_vs_lib4, .-kernel_dtrsm_nt_rl_inv_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                               rdi     rsi         rdx       rcx          r8     r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56     rsp+64              rsp+72  rsp+80
-// void kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-	movq	ARG16, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG15, %r12 // km 
-	movq	ARG16, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32 
-// void kernel_dtrsm_nt_rl_inv_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_8x4_lib4
-	.type kernel_dtrsm_nt_rl_inv_8x4_lib4, @function
-kernel_dtrsm_nt_rl_inv_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_8x4_lib4
-_kernel_dtrsm_nt_rl_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_8x4_lib4
-	.def kernel_dtrsm_nt_rl_inv_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_8x4_lib4, .-kernel_dtrsm_nt_rl_inv_8x4_lib4
-#endif
-
-
-
-
-
-//                                            rdi     rsi         rdx       rcx         r8      r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56     rsp+64
-// void kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4
-#endif
-
-
-
-
-
-//                                      rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24
-// void kernel_dtrsm_nt_rl_one_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_one_8x4_lib4
-	.type kernel_dtrsm_nt_rl_one_8x4_lib4, @function
-kernel_dtrsm_nt_rl_one_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_one_8x4_lib4
-_kernel_dtrsm_nt_rl_one_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_one_8x4_lib4
-	.def kernel_dtrsm_nt_rl_one_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_one_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_ONE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_one_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_one_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_one_8x4_lib4, .-kernel_dtrsm_nt_rl_one_8x4_lib4
-#endif
-
-
-
-
-
-//                                         rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32  rsp+40
-// void kernel_dtrsm_nt_ru_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_one_8x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_one_8x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_one_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_one_8x4_vs_lib4
-_kernel_dtrsm_nt_rl_one_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_one_8x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_one_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_one_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_ONE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_one_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_one_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG10, %r12 // km 
-	movq	ARG11, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_one_8x4_vs_lib4, .-kernel_dtrsm_nt_rl_one_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32 
-// void kernel_dtrsm_nt_ru_inv_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_ru_inv_8x4_lib4
-	.type kernel_dtrsm_nt_ru_inv_8x4_lib4, @function
-kernel_dtrsm_nt_ru_inv_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_ru_inv_8x4_lib4
-_kernel_dtrsm_nt_ru_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_ru_inv_8x4_lib4
-	.def kernel_dtrsm_nt_ru_inv_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_ru_inv_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUT_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rut_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rut_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_ru_inv_8x4_lib4, .-kernel_dtrsm_nt_ru_inv_8x4_lib4
-#endif
-
-
-
-
-
-//                                         rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32              rsp+40  rsp+48
-// void kernel_dtrsm_nt_ru_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_ru_inv_8x4_vs_lib4
-	.type kernel_dtrsm_nt_ru_inv_8x4_vs_lib4, @function
-kernel_dtrsm_nt_ru_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_ru_inv_8x4_vs_lib4
-_kernel_dtrsm_nt_ru_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_ru_inv_8x4_vs_lib4
-	.def kernel_dtrsm_nt_ru_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_ru_inv_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUT_INV_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rut_inv_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rut_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_ru_inv_8x4_vs_lib4, .-kernel_dtrsm_nt_ru_inv_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40
-// void kernel_dtrsm_nn_ru_inv_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ru_inv_8x4_lib4
-	.type kernel_dtrsm_nn_ru_inv_8x4_lib4, @function
-kernel_dtrsm_nn_ru_inv_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ru_inv_8x4_lib4
-_kernel_dtrsm_nn_ru_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ru_inv_8x4_lib4
-	.def kernel_dtrsm_nn_ru_inv_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ru_inv_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // inv_diag_E
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUN_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_run_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_run_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ru_inv_8x4_lib4, .-kernel_dtrsm_nn_ru_inv_8x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40              rsp+48  rsp+56
-// void kernel_dtrsm_nn_ru_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ru_inv_8x4_vs_lib4
-	.type kernel_dtrsm_nn_ru_inv_8x4_vs_lib4, @function
-kernel_dtrsm_nn_ru_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ru_inv_8x4_vs_lib4
-_kernel_dtrsm_nn_ru_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ru_inv_8x4_vs_lib4
-	.def kernel_dtrsm_nn_ru_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ru_inv_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // inv_diag_E
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUN_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_run_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_run_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG12, %r12 // km
-	movq	ARG13, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ru_inv_8x4_vs_lib4, .-kernel_dtrsm_nn_ru_inv_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40
-// void kernel_dtrsm_nn_ll_one_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ll_one_8x4_lib4
-	.type kernel_dtrsm_nn_ll_one_8x4_lib4, @function
-kernel_dtrsm_nn_ll_one_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ll_one_8x4_lib4
-_kernel_dtrsm_nn_ll_one_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ll_one_8x4_lib4
-	.def kernel_dtrsm_nn_ll_one_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ll_one_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LLN_ONE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lln_one_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lln_one_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ll_one_8x4_lib4, .-kernel_dtrsm_nn_ll_one_8x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40   rsp+48  tsp+56
-// void kernel_dtrsm_nn_ll_one_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ll_one_8x4_vs_lib4
-	.type kernel_dtrsm_nn_ll_one_8x4_vs_lib4, @function
-kernel_dtrsm_nn_ll_one_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ll_one_8x4_vs_lib4
-_kernel_dtrsm_nn_ll_one_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ll_one_8x4_vs_lib4
-	.def kernel_dtrsm_nn_ll_one_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ll_one_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LLN_ONE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lln_one_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lln_one_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG12, %r12 // km
-	movq	ARG13, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ll_one_8x4_vs_lib4, .-kernel_dtrsm_nn_ll_one_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40   rsp+48
-// void kernel_dtrsm_nn_lu_inv_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_lu_inv_8x4_lib4
-	.type kernel_dtrsm_nn_lu_inv_8x4_lib4, @function
-kernel_dtrsm_nn_lu_inv_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_lu_inv_8x4_lib4
-_kernel_dtrsm_nn_lu_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_lu_inv_8x4_lib4
-	.def kernel_dtrsm_nn_lu_inv_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_lu_inv_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG12, %r12  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LUN_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lun_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lun_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_lu_inv_8x4_lib4, .-kernel_dtrsm_nn_lu_inv_8x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40   rsp+48              rsp+56  rsp+64
-// void kernel_dtrsm_nn_lu_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_lu_inv_8x4_vs_lib4
-	.type kernel_dtrsm_nn_lu_inv_8x4_vs_lib4, @function
-kernel_dtrsm_nn_lu_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_lu_inv_8x4_vs_lib4
-_kernel_dtrsm_nn_lu_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_lu_inv_8x4_vs_lib4
-	.def kernel_dtrsm_nn_lu_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_lu_inv_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG12, %r12  // inv_diag_E 
-	movq	ARG13, %r13  // km
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LUN_INV_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lun_inv_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lun_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG13, %r12  // km
-	movq	ARG14, %r13  // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_lu_inv_8x4_vs_lib4, .-kernel_dtrsm_nn_lu_inv_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                edi    rsi        rdx      rcx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32
-// void kernel_dgetrf_nn_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_l_8x4_lib4
-	.type kernel_dgetrf_nn_l_8x4_lib4, @function
-kernel_dgetrf_nn_l_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_l_8x4_lib4
-_kernel_dgetrf_nn_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_l_8x4_lib4
-	.def kernel_dgetrf_nn_l_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_l_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG10, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_L_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_l_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_l_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	// epilogue
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_l_8x4_lib4, .-kernel_dgetrf_nn_l_8x4_lib4
-#endif
-
-
-
-
-
-//                                   edi    rsi        rdx      rcx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32              rsp+40  rsp+48
-// void kernel_dgetrf_nn_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_l_8x4_vs_lib4
-	.type kernel_dgetrf_nn_l_8x4_vs_lib4, @function
-kernel_dgetrf_nn_l_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_l_8x4_vs_lib4
-_kernel_dgetrf_nn_l_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_l_8x4_vs_lib4
-	.def kernel_dgetrf_nn_l_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_l_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG10, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_L_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_l_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_l_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG11, %r12  // km
-	movq	ARG12, %r13  // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_l_8x4_vs_lib4, .-kernel_dgetrf_nn_l_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                             1         2           3           4           5
-// void kernel_dlarfb4_r_8_lib4(int kmax, double *pV, double *pT, double *pD, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlarfb4_r_8_lib4
-	.type kernel_dlarfb4_r_8_lib4, @function
-kernel_dlarfb4_r_8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlarfb4_r_8_lib4
-_kernel_dlarfb4_r_8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlarfb4_r_8_lib4
-	.def kernel_dlarfb4_r_8_lib4; .scl 2; .type 32; .endef
-kernel_dlarfb4_r_8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // D
-	movq	ARG5, %r12 // sdd
-	sall	$5, %r12d
-	movq	ARG2, %r13 // V
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_8x4_lib4
-#endif
-#endif
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // D
-	movq	ARG5, %r12 // sdd
-	sall	$5, %r12d
-	movq	ARG2, %r13 // V
-
-	//
-	vmovapd			0(%r11), %ymm12
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vaddpd			%ymm12, %ymm0, %ymm0
-	vaddpd			%ymm14, %ymm4, %ymm4
-	//
-	vmovapd			32(%r11), %ymm12
-	vmovapd			32(%r11, %r12, 1), %ymm14
-	vaddpd			%ymm12, %ymm1, %ymm1
-	vaddpd			%ymm14, %ymm5, %ymm5
-	vbroadcastsd	32(%r13), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	//
-	vmovapd			64(%r11), %ymm12
-	vmovapd			64(%r11, %r12, 1), %ymm14
-	vaddpd			%ymm12, %ymm2, %ymm2
-	vaddpd			%ymm14, %ymm6, %ymm6
-	vbroadcastsd	64(%r13), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	72(%r13), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	//
-	vmovapd			96(%r11), %ymm12
-	vmovapd			96(%r11, %r12, 1), %ymm14
-	vaddpd			%ymm12, %ymm3, %ymm3
-	vaddpd			%ymm14, %ymm7, %ymm7
-	vbroadcastsd	96(%r13), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vaddpd			%ymm15, %ymm4, %ymm4
-	vbroadcastsd	104(%r13), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	112(%r13), %ymm13
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm13, %ymm14, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-
-	movq	ARG3, %r10 // T
-
-	//
-	vbroadcastsd	120(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-	//
-	vbroadcastsd	112(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm6, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-	vbroadcastsd	80(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-	//
-	vbroadcastsd	104(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm5, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-	vbroadcastsd	72(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm5, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	40(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-	//
-	vbroadcastsd	96(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm3, %ymm3
-	vmulpd			%ymm4, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm7, %ymm7
-	vbroadcastsd	64(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm2, %ymm2
-	vmulpd			%ymm4, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm6, %ymm6
-	vbroadcastsd	32(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm4, %ymm12, %ymm15
-	vaddpd			%ymm15, %ymm5, %ymm5
-	vbroadcastsd	0(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // V
-	movq	ARG4, %r12 // D
-	movq	ARG5, %r13 // sdd
-	sall	$5, %r13d
-
-	//
-	vmovapd			0(%r12), %ymm12
-	vmovapd			0(%r12, %r13, 1), %ymm14
-	vaddpd			%ymm12, %ymm0, %ymm12
-	vaddpd			%ymm14, %ymm4, %ymm14
-	vmovapd			%ymm12, 0(%r12)
-	vmovapd			%ymm14, 0(%r12, %r13, 1)
-	//
-	vmovapd			32(%r12), %ymm12
-	vmovapd			32(%r12, %r13, 1), %ymm14
-	vbroadcastsd	32(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vaddpd			%ymm12, %ymm1, %ymm12
-	vaddpd			%ymm14, %ymm5, %ymm14
-	vmovapd			%ymm12, 32(%r12)
-	vmovapd			%ymm14, 32(%r12, %r13, 1)
-	//
-	vmovapd			64(%r12), %ymm12
-	vmovapd			64(%r12, %r13, 1), %ymm14
-	vbroadcastsd	64(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	72(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm5, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vaddpd			%ymm12, %ymm2, %ymm12
-	vaddpd			%ymm14, %ymm6, %ymm14
-	vmovapd			%ymm12, 64(%r12)
-	vmovapd			%ymm14, 64(%r12, %r13, 1)
-	//
-	vmovapd			96(%r12), %ymm12
-	vmovapd			96(%r12, %r13, 1), %ymm14
-	vbroadcastsd	96(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm4, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	104(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm5, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vbroadcastsd	112(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm12, %ymm12
-	vmulpd			%ymm6, %ymm13, %ymm15
-	vaddpd			%ymm15, %ymm14, %ymm14
-	vaddpd			%ymm12, %ymm3, %ymm12
-	vaddpd			%ymm14, %ymm7, %ymm14
-	vmovapd			%ymm12, 96(%r12)
-	vmovapd			%ymm14, 96(%r12, %r13, 1)
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEBP_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgebp_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgebp_add_nn_8x4_lib4
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlarfb4_r_8_lib4, .-kernel_dlarfb4_r_8_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC05: // { 1.0 1.0 1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC05: // { 1.0 1.0 1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC06: // { 1.0 1.0 -1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC06: // { 1.0 1.0 -1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC07: // { 1.0 -1.0 -1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC07: // { 1.0 -1.0 -1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC08: // { -1.0 -1.0 -1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC08: // { -1.0 -1.0 -1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC09: // { -1.0 -1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC09: // { -1.0 -1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC10: // { -1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC10: // { -1.0 1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_dgemm_diag_lib4.c b/third_party/blasfeo/kernel/avx/kernel_dgemm_diag_lib4.c
deleted file mode 100644
index d64f977..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_dgemm_diag_lib4.c
+++ /dev/null
@@ -1,866 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-
-
-
-// B is the diagonal of a matrix, beta==0.0 case
-void kernel_dgemm_diag_right_4_a0_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m256d
-		alpha0,
-		mask_f,
-		sign,
-		a_00,
-		b_00, b_11, b_22, b_33,
-		d_00, d_01, d_02, d_03;
-	
-	__m256i
-		mask_i;
-	
-	alpha0 = _mm256_broadcast_sd( alpha );
-	
-	b_00 = _mm256_broadcast_sd( &B[0] );
-	b_00 = _mm256_mul_pd( b_00, alpha0 );
-	b_11 = _mm256_broadcast_sd( &B[1] );
-	b_11 = _mm256_mul_pd( b_11, alpha0 );
-	b_22 = _mm256_broadcast_sd( &B[2] );
-	b_22 = _mm256_mul_pd( b_22, alpha0 );
-	b_33 = _mm256_broadcast_sd( &B[3] );
-	b_33 = _mm256_mul_pd( b_33, alpha0 );
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_00 = _mm256_load_pd( &A[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		a_00 = _mm256_load_pd( &A[4] );
-		d_01 = _mm256_mul_pd( a_00, b_11 );
-		a_00 = _mm256_load_pd( &A[8] );
-		d_02 = _mm256_mul_pd( a_00, b_22 );
-		a_00 = _mm256_load_pd( &A[12] );
-		d_03 = _mm256_mul_pd( a_00, b_33 );
-
-		_mm256_store_pd( &D[0], d_00 );
-		_mm256_store_pd( &D[4], d_01 );
-		_mm256_store_pd( &D[8], d_02 );
-		_mm256_store_pd( &D[12], d_03 );
-
-		A += 4*sda;
-		D += 4*sdd;
-
-		}
-	if(k<kmax)
-		{
-
-		const double mask_f[] = {0.5, 1.5, 2.5, 3.5};
-		double m_f = kmax-k;
-
-		mask_i = _mm256_castpd_si256( _mm256_sub_pd( _mm256_loadu_pd( mask_f ), _mm256_broadcast_sd( &m_f ) ) );
-
-		a_00 = _mm256_load_pd( &A[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		a_00 = _mm256_load_pd( &A[4] );
-		d_01 = _mm256_mul_pd( a_00, b_11 );
-		a_00 = _mm256_load_pd( &A[8] );
-		d_02 = _mm256_mul_pd( a_00, b_22 );
-		a_00 = _mm256_load_pd( &A[12] );
-		d_03 = _mm256_mul_pd( a_00, b_33 );
-
-		_mm256_maskstore_pd( &D[0], mask_i, d_00 );
-		_mm256_maskstore_pd( &D[4], mask_i, d_01 );
-		_mm256_maskstore_pd( &D[8], mask_i, d_02 );
-		_mm256_maskstore_pd( &D[12], mask_i, d_03 );
-
-		}
-	
-	}
-
-
-
-// B is the diagonal of a matrix
-void kernel_dgemm_diag_right_4_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m256d
-		alpha0, beta0,
-		mask_f,
-		sign,
-		a_00,
-		b_00, b_11, b_22, b_33,
-		c_00,
-		d_00, d_01, d_02, d_03;
-	
-	__m256i
-		mask_i;
-	
-	alpha0 = _mm256_broadcast_sd( alpha );
-	beta0  = _mm256_broadcast_sd( beta );
-	
-	b_00 = _mm256_broadcast_sd( &B[0] );
-	b_00 = _mm256_mul_pd( b_00, alpha0 );
-	b_11 = _mm256_broadcast_sd( &B[1] );
-	b_11 = _mm256_mul_pd( b_11, alpha0 );
-	b_22 = _mm256_broadcast_sd( &B[2] );
-	b_22 = _mm256_mul_pd( b_22, alpha0 );
-	b_33 = _mm256_broadcast_sd( &B[3] );
-	b_33 = _mm256_mul_pd( b_33, alpha0 );
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_00 = _mm256_load_pd( &A[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		a_00 = _mm256_load_pd( &A[4] );
-		d_01 = _mm256_mul_pd( a_00, b_11 );
-		a_00 = _mm256_load_pd( &A[8] );
-		d_02 = _mm256_mul_pd( a_00, b_22 );
-		a_00 = _mm256_load_pd( &A[12] );
-		d_03 = _mm256_mul_pd( a_00, b_33 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-		c_00 = _mm256_load_pd( &C[4] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_01 = _mm256_add_pd( c_00, d_01 );
-		c_00 = _mm256_load_pd( &C[8] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_02 = _mm256_add_pd( c_00, d_02 );
-		c_00 = _mm256_load_pd( &C[12] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_03 = _mm256_add_pd( c_00, d_03 );
-
-		_mm256_store_pd( &D[0], d_00 );
-		_mm256_store_pd( &D[4], d_01 );
-		_mm256_store_pd( &D[8], d_02 );
-		_mm256_store_pd( &D[12], d_03 );
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-
-		}
-	if(k<kmax)
-		{
-
-		const double mask_f[] = {0.5, 1.5, 2.5, 3.5};
-		double m_f = kmax-k;
-
-		mask_i = _mm256_castpd_si256( _mm256_sub_pd( _mm256_loadu_pd( mask_f ), _mm256_broadcast_sd( &m_f ) ) );
-
-		a_00 = _mm256_load_pd( &A[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		a_00 = _mm256_load_pd( &A[4] );
-		d_01 = _mm256_mul_pd( a_00, b_11 );
-		a_00 = _mm256_load_pd( &A[8] );
-		d_02 = _mm256_mul_pd( a_00, b_22 );
-		a_00 = _mm256_load_pd( &A[12] );
-		d_03 = _mm256_mul_pd( a_00, b_33 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-		c_00 = _mm256_load_pd( &C[4] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_01 = _mm256_add_pd( c_00, d_01 );
-		c_00 = _mm256_load_pd( &C[8] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_02 = _mm256_add_pd( c_00, d_02 );
-		c_00 = _mm256_load_pd( &C[12] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_03 = _mm256_add_pd( c_00, d_03 );
-
-		_mm256_maskstore_pd( &D[0], mask_i, d_00 );
-		_mm256_maskstore_pd( &D[4], mask_i, d_01 );
-		_mm256_maskstore_pd( &D[8], mask_i, d_02 );
-		_mm256_maskstore_pd( &D[12], mask_i, d_03 );
-
-		}
-	
-	}
-
-
-
-// B is the diagonal of a matrix
-void kernel_dgemm_diag_right_3_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m256d
-		alpha0, beta0,
-		mask_f,
-		sign,
-		a_00,
-		b_00, b_11, b_22,
-		c_00,
-		d_00, d_01, d_02;
-	
-	__m256i
-		mask_i;
-	
-	alpha0 = _mm256_broadcast_sd( alpha );
-	beta0  = _mm256_broadcast_sd( beta );
-	
-	b_00 = _mm256_broadcast_sd( &B[0] );
-	b_00 = _mm256_mul_pd( b_00, alpha0 );
-	b_11 = _mm256_broadcast_sd( &B[1] );
-	b_11 = _mm256_mul_pd( b_11, alpha0 );
-	b_22 = _mm256_broadcast_sd( &B[2] );
-	b_22 = _mm256_mul_pd( b_22, alpha0 );
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_00 = _mm256_load_pd( &A[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		a_00 = _mm256_load_pd( &A[4] );
-		d_01 = _mm256_mul_pd( a_00, b_11 );
-		a_00 = _mm256_load_pd( &A[8] );
-		d_02 = _mm256_mul_pd( a_00, b_22 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-		c_00 = _mm256_load_pd( &C[4] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_01 = _mm256_add_pd( c_00, d_01 );
-		c_00 = _mm256_load_pd( &C[8] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_02 = _mm256_add_pd( c_00, d_02 );
-
-		_mm256_store_pd( &D[0], d_00 );
-		_mm256_store_pd( &D[4], d_01 );
-		_mm256_store_pd( &D[8], d_02 );
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-
-		}
-	if(k<kmax)
-		{
-
-		const double mask_f[] = {0.5, 1.5, 2.5, 3.5};
-		double m_f = kmax-k;
-
-		mask_i = _mm256_castpd_si256( _mm256_sub_pd( _mm256_loadu_pd( mask_f ), _mm256_broadcast_sd( &m_f ) ) );
-
-		a_00 = _mm256_load_pd( &A[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		a_00 = _mm256_load_pd( &A[4] );
-		d_01 = _mm256_mul_pd( a_00, b_11 );
-		a_00 = _mm256_load_pd( &A[8] );
-		d_02 = _mm256_mul_pd( a_00, b_22 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-		c_00 = _mm256_load_pd( &C[4] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_01 = _mm256_add_pd( c_00, d_01 );
-		c_00 = _mm256_load_pd( &C[8] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_02 = _mm256_add_pd( c_00, d_02 );
-
-		_mm256_maskstore_pd( &D[0], mask_i, d_00 );
-		_mm256_maskstore_pd( &D[4], mask_i, d_01 );
-		_mm256_maskstore_pd( &D[8], mask_i, d_02 );
-
-		}
-	
-	}
-
-
-
-// B is the diagonal of a matrix
-void kernel_dgemm_diag_right_2_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m256d
-		alpha0, beta0,
-		mask_f,
-		sign,
-		a_00,
-		b_00, b_11,
-		c_00,
-		d_00, d_01;
-	
-	__m256i
-		mask_i;
-	
-	alpha0 = _mm256_broadcast_sd( alpha );
-	beta0  = _mm256_broadcast_sd( beta );
-	
-	b_00 = _mm256_broadcast_sd( &B[0] );
-	b_00 = _mm256_mul_pd( b_00, alpha0 );
-	b_11 = _mm256_broadcast_sd( &B[1] );
-	b_11 = _mm256_mul_pd( b_11, alpha0 );
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_00 = _mm256_load_pd( &A[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		a_00 = _mm256_load_pd( &A[4] );
-		d_01 = _mm256_mul_pd( a_00, b_11 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-		c_00 = _mm256_load_pd( &C[4] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_01 = _mm256_add_pd( c_00, d_01 );
-
-		_mm256_store_pd( &D[0], d_00 );
-		_mm256_store_pd( &D[4], d_01 );
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-
-		}
-	if(k<kmax)
-		{
-
-		const double mask_f[] = {0.5, 1.5, 2.5, 3.5};
-		double m_f = kmax-k;
-
-		mask_i = _mm256_castpd_si256( _mm256_sub_pd( _mm256_loadu_pd( mask_f ), _mm256_broadcast_sd( &m_f ) ) );
-
-		a_00 = _mm256_load_pd( &A[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		a_00 = _mm256_load_pd( &A[4] );
-		d_01 = _mm256_mul_pd( a_00, b_11 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-		c_00 = _mm256_load_pd( &C[4] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_01 = _mm256_add_pd( c_00, d_01 );
-
-		_mm256_maskstore_pd( &D[0], mask_i, d_00 );
-		_mm256_maskstore_pd( &D[4], mask_i, d_01 );
-
-		}
-
-	}
-
-
-
-// B is the diagonal of a matrix
-void kernel_dgemm_diag_right_1_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m256d
-		alpha0, beta0,
-		mask_f,
-		sign,
-		a_00,
-		b_00,
-		c_00,
-		d_00;
-	
-	__m256i
-		mask_i;
-	
-	alpha0 = _mm256_broadcast_sd( alpha );
-	beta0  = _mm256_broadcast_sd( beta );
-	
-	b_00 = _mm256_broadcast_sd( &B[0] );
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		a_00 = _mm256_load_pd( &A[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-
-		_mm256_store_pd( &D[0], d_00 );
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-
-		}
-	if(k<kmax)
-		{
-
-		const double mask_f[] = {0.5, 1.5, 2.5, 3.5};
-		double m_f = kmax-k;
-
-		mask_i = _mm256_castpd_si256( _mm256_sub_pd( _mm256_loadu_pd( mask_f ), _mm256_broadcast_sd( &m_f ) ) );
-
-		a_00 = _mm256_load_pd( &A[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-
-		_mm256_maskstore_pd( &D[0], mask_i, d_00 );
-
-		}
-	
-	}
-
-
-
-// A is the diagonal of a matrix, beta=0.0 case
-void kernel_dgemm_diag_left_4_a0_lib4(int kmax, double *alpha, double *A, double *B, double *D)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m256d
-		alpha0,
-		sign,
-		a_00,
-		b_00,
-		d_00, d_01, d_02, d_03;
-	
-	alpha0 = _mm256_broadcast_sd( alpha );
-	
-	a_00 = _mm256_load_pd( &A[0] );
-	a_00 = _mm256_mul_pd( a_00, alpha0 );
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_00 = _mm256_load_pd( &B[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		b_00 = _mm256_load_pd( &B[4] );
-		d_01 = _mm256_mul_pd( a_00, b_00 );
-		b_00 = _mm256_load_pd( &B[8] );
-		d_02 = _mm256_mul_pd( a_00, b_00 );
-		b_00 = _mm256_load_pd( &B[12] );
-		d_03 = _mm256_mul_pd( a_00, b_00 );
-
-		_mm256_store_pd( &D[0], d_00 );
-		_mm256_store_pd( &D[4], d_01 );
-		_mm256_store_pd( &D[8], d_02 );
-		_mm256_store_pd( &D[12], d_03 );
-
-		B += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_00 = _mm256_load_pd( &B[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-
-		_mm256_store_pd( &D[0], d_00 );
-
-		B += 4;
-		D += 4;
-		
-		}
-
-	}
-
-
-
-// A is the diagonal of a matrix
-void kernel_dgemm_diag_left_4_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m256d
-		alpha0, beta0,
-		sign,
-		a_00,
-		b_00,
-		c_00,
-		d_00, d_01, d_02, d_03;
-	
-	alpha0 = _mm256_broadcast_sd( alpha );
-	beta0  = _mm256_broadcast_sd( beta );
-	
-	a_00 = _mm256_load_pd( &A[0] );
-	a_00 = _mm256_mul_pd( a_00, alpha0 );
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_00 = _mm256_load_pd( &B[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		b_00 = _mm256_load_pd( &B[4] );
-		d_01 = _mm256_mul_pd( a_00, b_00 );
-		b_00 = _mm256_load_pd( &B[8] );
-		d_02 = _mm256_mul_pd( a_00, b_00 );
-		b_00 = _mm256_load_pd( &B[12] );
-		d_03 = _mm256_mul_pd( a_00, b_00 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-		c_00 = _mm256_load_pd( &C[4] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_01 = _mm256_add_pd( c_00, d_01 );
-		c_00 = _mm256_load_pd( &C[8] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_02 = _mm256_add_pd( c_00, d_02 );
-		c_00 = _mm256_load_pd( &C[12] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_03 = _mm256_add_pd( c_00, d_03 );
-
-		_mm256_store_pd( &D[0], d_00 );
-		_mm256_store_pd( &D[4], d_01 );
-		_mm256_store_pd( &D[8], d_02 );
-		_mm256_store_pd( &D[12], d_03 );
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_00 = _mm256_load_pd( &B[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-
-		_mm256_store_pd( &D[0], d_00 );
-
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-
-	}
-
-
-
-// A is the diagonal of a matrix
-void kernel_dgemm_diag_left_3_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-	{
-	
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m256i
-		mask;
-
-	__m256d
-		alpha0, beta0,
-		sign,
-		a_00,
-		b_00,
-		c_00,
-		d_00, d_01, d_02, d_03;
-	
-	mask = _mm256_set_epi64x( 1, -1, -1, -1 );
-		
-	alpha0 = _mm256_broadcast_sd( alpha );
-	beta0  = _mm256_broadcast_sd( beta );
-	
-	a_00 = _mm256_load_pd( &A[0] );
-	a_00 = _mm256_mul_pd( a_00, alpha0 );
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_00 = _mm256_load_pd( &B[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-		b_00 = _mm256_load_pd( &B[4] );
-		d_01 = _mm256_mul_pd( a_00, b_00 );
-		b_00 = _mm256_load_pd( &B[8] );
-		d_02 = _mm256_mul_pd( a_00, b_00 );
-		b_00 = _mm256_load_pd( &B[12] );
-		d_03 = _mm256_mul_pd( a_00, b_00 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-		c_00 = _mm256_load_pd( &C[4] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_01 = _mm256_add_pd( c_00, d_01 );
-		c_00 = _mm256_load_pd( &C[8] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_02 = _mm256_add_pd( c_00, d_02 );
-		c_00 = _mm256_load_pd( &C[12] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_03 = _mm256_add_pd( c_00, d_03 );
-
-		_mm256_maskstore_pd( &D[0], mask, d_00 );
-		_mm256_maskstore_pd( &D[4], mask, d_01 );
-		_mm256_maskstore_pd( &D[8], mask, d_02 );
-		_mm256_maskstore_pd( &D[12], mask, d_03 );
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_00 = _mm256_load_pd( &B[0] );
-		d_00 = _mm256_mul_pd( a_00, b_00 );
-
-		c_00 = _mm256_load_pd( &C[0] );
-		c_00 = _mm256_mul_pd( c_00, beta0 );
-		d_00 = _mm256_add_pd( c_00, d_00 );
-
-		_mm256_maskstore_pd( &D[0], mask, d_00 );
-
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-
-	}
-
-
-
-// A is the diagonal of a matrix
-void kernel_dgemm_diag_left_2_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-	{
-	
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m128d
-		alpha0, beta0,
-		sign,
-		a_00,
-		b_00,
-		c_00,
-		d_00, d_01, d_02, d_03;
-		
-	alpha0 = _mm_loaddup_pd( alpha );
-	beta0  = _mm_loaddup_pd( beta );
-	
-	a_00 = _mm_load_pd( &A[0] );
-	a_00 = _mm_mul_pd( a_00, alpha0 );
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_00 = _mm_load_pd( &B[0] );
-		d_00 = _mm_mul_pd( a_00, b_00 );
-		b_00 = _mm_load_pd( &B[4] );
-		d_01 = _mm_mul_pd( a_00, b_00 );
-		b_00 = _mm_load_pd( &B[8] );
-		d_02 = _mm_mul_pd( a_00, b_00 );
-		b_00 = _mm_load_pd( &B[12] );
-		d_03 = _mm_mul_pd( a_00, b_00 );
-
-		c_00 = _mm_load_pd( &C[0] );
-		c_00 = _mm_mul_pd( c_00, beta0 );
-		d_00 = _mm_add_pd( c_00, d_00 );
-		c_00 = _mm_load_pd( &C[4] );
-		c_00 = _mm_mul_pd( c_00, beta0 );
-		d_01 = _mm_add_pd( c_00, d_01 );
-		c_00 = _mm_load_pd( &C[8] );
-		c_00 = _mm_mul_pd( c_00, beta0 );
-		d_02 = _mm_add_pd( c_00, d_02 );
-		c_00 = _mm_load_pd( &C[12] );
-		c_00 = _mm_mul_pd( c_00, beta0 );
-		d_03 = _mm_add_pd( c_00, d_03 );
-
-		_mm_store_pd( &D[0], d_00 );
-		_mm_store_pd( &D[4], d_01 );
-		_mm_store_pd( &D[8], d_02 );
-		_mm_store_pd( &D[12], d_03 );
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_00 = _mm_load_pd( &B[0] );
-		d_00 = _mm_mul_pd( a_00, b_00 );
-
-		c_00 = _mm_load_pd( &C[0] );
-		c_00 = _mm_mul_pd( c_00, beta0 );
-		d_00 = _mm_add_pd( c_00, d_00 );
-
-		_mm_store_pd( &D[0], d_00 );
-
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-
-	
-	}
-
-
-// A is the diagonal of a matrix
-void kernel_dgemm_diag_left_1_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-	{
-	
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0, beta0,
-		a_0,
-		b_0,
-		c_0;
-	
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	a_0 = A[0] * alpha0;
-		
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		b_0 = B[0+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_0;
-
-		D[0+bs*1] = c_0;
-		
-
-		b_0 = B[0+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_0;
-
-		D[0+bs*2] = c_0;
-		
-
-		b_0 = B[0+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_0;
-
-		D[0+bs*3] = c_0;
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-	
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-		
-	}
-
-
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_dgemv_12_lib4.S b/third_party/blasfeo/kernel/avx/kernel_dgemv_12_lib4.S
deleted file mode 100644
index c51ad9a..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_dgemv_12_lib4.S
+++ /dev/null
@@ -1,1322 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- x
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z8 z9 za zb]_a
-// ymm3  <- [z0 z1 z2 z3]_b
-// ymm4  <- [z4 z5 z6 z7]_b
-// ymm5  <- [z8 z9 za zb]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- x+k*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z8 z9 za zb]_a
-// ymm3  <- [z0 z1 z2 z3]_b
-// ymm4  <- [z4 z5 z6 z7]_b
-// ymm5  <- [z8 z9 za zb]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_N_12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_n_12_lib4, @function
-inner_kernel_dgemv_add_n_12_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_n_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_n_12_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_n_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_n_12_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_n_12_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	movq	%r11, %r14 // A1 <- A0
-	addq	%r12, %r14 // A1 <- A0 + 4*sda*sizeof(double)
-	movq	%r14, %r15 // A2 <- A1
-	addq	%r12, %r15 // A2 <- A1 + 4*sda*sizeof(double)
-
-	cmpl	$4, %r10d
-
-	prefetcht0	0(%r11) // software prefetch
-	prefetcht0	0(%r14) // software prefetch
-	prefetcht0	0(%r15) // software prefetch
-	prefetcht0	64(%r11) // software prefetch
-	prefetcht0	64(%r14) // software prefetch
-	prefetcht0	64(%r15) // software prefetch
-
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	128(%r11) // software prefetch
-	prefetcht0	128(%r14) // software prefetch
-	prefetcht0	128(%r15) // software prefetch
-
-	vbroadcastsd	0(%r13), %ymm12
-	vmovapd	0(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vmovapd	0(%r14), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd	0(%r15), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	
-	subl	$4, %r10d
-
-	vbroadcastsd	8(%r13), %ymm12
-	vmovapd	32(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vmovapd	32(%r14), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-	vmovapd	32(%r15), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-	
-	prefetcht0	192(%r11) // software prefetch
-	prefetcht0	192(%r14) // software prefetch
-	prefetcht0	192(%r15) // software prefetch
-
-	vbroadcastsd	16(%r13), %ymm12
-	vmovapd	64(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vmovapd	64(%r14), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd	64(%r15), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	vbroadcastsd	24(%r13), %ymm12
-	addq	$32, %r13 // x+4
-	vmovapd	96(%r11), %ymm8
-	addq	$128, %r11 // A0+4*bs
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vmovapd	96(%r14), %ymm8
-	addq	$128, %r14 // A1+4*bs
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-	vmovapd	96(%r15), %ymm8
-	addq	$128, %r15 // A2+4*bs
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vbroadcastsd	0(%r13), %ymm12
-	vmovapd	0(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vmovapd	0(%r14), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd	0(%r15), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	
-	addq	$32, %r11
-	addq	$32, %r14
-	addq	$32, %r15
-	addq	$8, %r13
-	
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-
-	jg		0b // clean
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_n_12_lib4, .-inner_kernel_dgemv_add_n_12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm4  <- [z4a z4b z4c z4d]
-// ymm5  <- [z5a z5b z5c z5d]
-// ymm6  <- [z6a z6b z6c z6d]
-// ymm7  <- [z7a z7b z7c z7d]
-// ymm8  <- [z8a z8b z8c z8d]
-// ymm9  <- [z9a z9b z9c z9d]
-// ymm10 <- [zaa zab zac zad]
-// ymm11 <- [zba zbb zbc zbd]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x+k*sizeof(double)
-// r14   <- dirty
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm4  <- [z4a z4b z4c z4d]
-// ymm5  <- [z5a z5b z5c z5d]
-// ymm6  <- [z6a z6b z6c z6d]
-// ymm7  <- [z7a z7b z7c z7d]
-// ymm8  <- [z8a z8b z8c z8d]
-// ymm9  <- [z9a z9b z9c z9d]
-// ymm10 <- [zaa zab zac zad]
-// ymm11 <- [zba zbb zbc zbd]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_T_12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_t_12_lib4, @function
-inner_kernel_dgemv_add_t_12_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_t_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_t_12_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_t_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_t_12_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_t_12_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$4, %r10d
-
-	prefetcht0	0(%r11) // software prefetch
-	prefetcht0	64(%r11) // software prefetch
-	prefetcht0	128(%r11) // software prefetch
-	prefetcht0	192(%r11) // software prefetch
-	prefetcht0	256(%r11) // software prefetch
-	prefetcht0	320(%r11) // software prefetch
-
-	jl		0f // clean-up loop
-
-	movq	%r11, %r14
-	addq	%r12, %r14 // A+bs*sda
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	0(%r14) // software prefetch
-
-	vmovupd	0(%r13), %ymm12
-	addq	$32, %r13 // x+4
-
-	vmovapd	0(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	
-	subl	$4, %r10d
-
-	vmovapd	32(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	prefetcht0	64(%r14) // software prefetch
-
-	vmovapd	64(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	vmovapd	96(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-
-	prefetcht0	128(%r14) // software prefetch
-
-	vmovapd	128(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-	
-	vmovapd	160(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-	
-	prefetcht0	192(%r14) // software prefetch
-
-	vmovapd	192(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-	vmovapd	224(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	prefetcht0	256(%r14) // software prefetch
-
-	vmovapd	256(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm8, %ymm15, %ymm8
-	
-	vmovapd	288(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm9, %ymm15, %ymm9
-	
-	prefetcht0	320(%r14) // software prefetch
-
-	vmovapd	320(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm10, %ymm15, %ymm10
-
-	vmovapd	352(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm11, %ymm15, %ymm11
-	
-//	addq	%r12, %r11 // A+bs*sda
-	movq	%r14, %r11 // A+bs*sda
-	addq	%r12, %r14 // A+bs*sda+bs*sda
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vcvtsi2sd	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vsubpd		%ymm14, %ymm13, %ymm14
-
-	vmaskmovpd	0(%r13), %ymm14, %ymm12
-
-	vmovapd	0(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	
-	vmovapd	32(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	vmovapd	64(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	vmovapd	96(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-		
-	vmovapd	128(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-	
-	vmovapd	160(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-	
-	vmovapd	192(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-	vmovapd	224(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	vmovapd	256(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm8, %ymm15, %ymm8
-	
-	vmovapd	288(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm9, %ymm15, %ymm9
-	
-	vmovapd	320(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm10, %ymm15, %ymm10
-
-	vmovapd	352(%r11), %ymm13
-	vmulpd	%ymm13, %ymm12, %ymm15
-	vaddpd	%ymm11, %ymm15, %ymm11
-
-	sall	$3, %r10d
-//	movslq	%r10d, %r10
-	addq	%r10, %r11
-	addq	%r10, %r13
-	xorl	%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_t_12_lib4, .-inner_kernel_dgemv_add_t_12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==n, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z8 z9 za zb]_a
-// ymm3  <- [z0 z1 z2 z3]_b
-// ymm4  <- [z4 z5 z6 z7]_b
-// ymm5  <- [z8 z9 za zb]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2  <- [z8 z9 za zb]
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_N_SCALE_AB_12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_n_scale_ab_12_lib4, @function
-inner_blend_n_scale_ab_12_lib4:
-#elif defined(OS_MAC)
-_inner_blend_n_scale_ab_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_scale_ab_12_lib4; .scl 2; .type 32; .endef
-inner_blend_n_scale_ab_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_scale_ab_12_lib4; .scl 2; .type 32; .endef
-inner_blend_n_scale_ab_12_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm3, %ymm0
-	vaddpd	%ymm1, %ymm4, %ymm1
-	vaddpd	%ymm2, %ymm5, %ymm2
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-	vmulpd	%ymm1, %ymm15, %ymm1
-	vmulpd	%ymm2, %ymm15, %ymm2
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-	vmovupd		0(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-	vmovupd		32(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm1, %ymm14, %ymm1
-	vmovupd		64(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm2, %ymm14, %ymm2
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_n_scale_ab_12_lib4, .-inner_blend_n_scale_ab_12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm4 <- [z4a z4b z4c z4d]
-// ymm5 <- [z5a z5b z5c z5d]
-// ymm6 <- [z6a z6b z6c z6d]
-// ymm7 <- [z7a z7b z7c z7d]
-// ymm8  <- [z8a z8b z8c z8d]
-// ymm9  <- [z9a z9b z9c z9d]
-// ymm10 <- [zaa zab zac zad]
-// ymm11 <- [zba zbb zbc zbd]
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2  <- [z8 z9 za zb]
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_AB_12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_ab_12_lib4, @function
-inner_blend_t_scale_ab_12_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_ab_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_ab_12_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_ab_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_ab_12_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_ab_12_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm5, %ymm4, %ymm4
-	vhaddpd	%ymm9, %ymm8, %ymm8
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vhaddpd	%ymm7, %ymm6, %ymm6
-	vhaddpd	%ymm11, %ymm10, %ymm10
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm3
-	vperm2f128	$0x2, %ymm4, %ymm6, %ymm5
-	vperm2f128	$0x2, %ymm8, %ymm10, %ymm9
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vperm2f128	$0x13, %ymm4, %ymm6, %ymm4
-	vperm2f128	$0x13, %ymm8, %ymm10, %ymm8
-	vaddpd	%ymm0, %ymm3, %ymm0
-	vaddpd	%ymm4, %ymm5, %ymm1
-	vaddpd	%ymm8, %ymm9, %ymm2
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-	vmulpd	%ymm1, %ymm15, %ymm1
-	vmulpd	%ymm2, %ymm15, %ymm2
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-	vmovupd		0(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-	vmovupd		32(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm1, %ymm14, %ymm1
-	vmovupd		64(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm2, %ymm14, %ymm2
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_ab_12_lib4, .-inner_blend_t_scale_ab_12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for ta==n
-//
-// input arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z8 z9 za zb]_a
-// ymm3  <- [z0 z1 z2 z3]_b
-// ymm4  <- [z4 z5 z6 z7]_b
-// ymm5  <- [z8 z9 za zb]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0  <- [z0 z1 z2 z3]
-// ymm1  <- [z4 z5 z6 z7]
-// ymm2  <- [z8 z9 za zb]
-// ymm3  <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLENDER_N_12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blender_n_12_lib4, @function
-inner_blender_n_12_lib4:
-#elif defined(OS_MAC)
-_inner_blender_n_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blender_n_12_lib4; .scl 2; .type 32; .endef
-inner_blender_n_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blender_n_12_lib4; .scl 2; .type 32; .endef
-inner_blender_n_12_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm3, %ymm0
-	vaddpd	%ymm1, %ymm4, %ymm1
-	vaddpd	%ymm2, %ymm5, %ymm2
-
-	cmpl	$0, %r10d // alg
-	je		0f // return
-
-	cmpl	$1, %r10d // alg
-	jne		1f // alg==-1
-
-	// alg==1
-	vmovupd		0(%r11), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovupd		64(%r11), %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-
-	jmp		0f // return
-
-1:
-
-	// alg==-1
-	vmovupd		0(%r11), %ymm15
-	vsubpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vsubpd		%ymm1, %ymm15, %ymm1
-	vmovupd		64(%r11), %ymm15
-	vsubpd		%ymm2, %ymm15, %ymm2
-
-0: // return
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blender_n_12_lib4, .-inner_blender_n_12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for ta==t
-//
-// input arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm4  <- [z4a z4b z4c z4d]
-// ymm5  <- [z5a z5b z5c z5d]
-// ymm6  <- [z6a z6b z6c z6d]
-// ymm7  <- [z7a z7b z7c z7d]
-// ymm8  <- [z8a z8b z8c z8d]
-// ymm9  <- [z9a z9b z9c z9d]
-// ymm10 <- [zaa zab zac zad]
-// ymm11 <- [zba zbb zbc zbd]
-// ymm15 <- dirty
-//
-// output arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0  <- [z0 z1 z2 z3]
-// ymm1  <- [z4 z5 z6 z7]
-// ymm2  <- [z8 z9 za zb]
-// ymm3  <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLENDER_T_12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blender_t_12_lib4, @function
-inner_blender_t_12_lib4:
-#elif defined(OS_MAC)
-_inner_blender_t_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blender_t_12_lib4; .scl 2; .type 32; .endef
-inner_blender_t_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blender_t_12_lib4; .scl 2; .type 32; .endef
-inner_blender_t_12_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm5, %ymm4, %ymm4
-	vhaddpd	%ymm9, %ymm8, %ymm8
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vhaddpd	%ymm7, %ymm6, %ymm6
-	vhaddpd	%ymm11, %ymm10, %ymm10
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm3
-	vperm2f128	$0x2, %ymm4, %ymm6, %ymm5
-	vperm2f128	$0x2, %ymm8, %ymm10, %ymm9
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vperm2f128	$0x13, %ymm4, %ymm6, %ymm4
-	vperm2f128	$0x13, %ymm8, %ymm10, %ymm8
-	vaddpd	%ymm0, %ymm3, %ymm0
-	vaddpd	%ymm4, %ymm5, %ymm1
-	vaddpd	%ymm8, %ymm9, %ymm2
-
-	cmpl	$0, %r10d // alg
-	je		0f // return
-
-	cmpl	$1, %r10d // alg
-	jne		1f // alg==-1
-
-	// alg==1
-	vmovupd		0(%r11), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovupd		64(%r11), %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-
-	jmp		0f // return
-
-1:
-
-	// alg==-1
-	vmovupd		0(%r11), %ymm15
-	vsubpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vsubpd		%ymm1, %ymm15, %ymm1
-	vmovupd		64(%r11), %ymm15
-	vsubpd		%ymm2, %ymm15, %ymm2
-
-0: // return
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blender_t_12_lib4, .-inner_blender_t_12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store 
-//
-// input arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-//
-// output arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_12_lib4, @function
-inner_store_12_lib4:
-#elif defined(OS_MAC)
-_inner_store_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_12_lib4; .scl 2; .type 32; .endef
-inner_store_12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_12_lib4; .scl 2; .type 32; .endef
-inner_store_12_lib4:
-#endif
-#endif
-	
-	vmovupd %ymm0, 0(%r10)
-	vmovupd %ymm1, 32(%r10)
-	vmovupd %ymm2, 64(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_12_lib4, .-inner_store_12_lib4
-#endif
-#endif
-
-
-
-
-
-//                             rdi    rsi            rdx        rcx      r8         r9            rsp+8      rsp+16
-// void kernel_dgemv_n_12_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_n_12_lib4
-	.type kernel_dgemv_n_12_lib4, @function
-kernel_dgemv_n_12_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_n_12_lib4
-_kernel_dgemv_n_12_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_n_12_lib4
-	.def kernel_dgemv_n_12_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_n_12_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_12_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_12_lib4
-#endif
-#endif
-
-
-	// call inner blender n
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_AB_12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_ab_12_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_ab_12_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_n_12_lib4, .-kernel_dgemv_n_12_lib4
-#endif
-
-
-
-
-
-//                            rdi    rsi           rdx         rcx      r8         r9            rsp+8      rsp+16
-// void kernel_dgemv_t_8_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_t_12_lib4
-	.type kernel_dgemv_t_12_lib4, @function
-kernel_dgemv_t_12_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_t_12_lib4
-_kernel_dgemv_t_12_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_t_12_lib4
-	.def kernel_dgemv_t_12_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_t_12_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_T_12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_t_12_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_t_12_lib4
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_12_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_12_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_t_12_lib4, .-kernel_dgemv_t_12_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_dgemv_4_lib4.S b/third_party/blasfeo/kernel/avx/kernel_dgemv_4_lib4.S
deleted file mode 100644
index 656e220..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_dgemv_4_lib4.S
+++ /dev/null
@@ -1,4503 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- x
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z0 z1 z2 z3]_b
-// ymm2  <- [z0 z1 z2 z3]_c
-// ymm3  <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- x+k*sizeof(double)
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z0 z1 z2 z3]_b
-// ymm2  <- [z0 z1 z2 z3]_c
-// ymm3  <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_N_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_n_4_lib4, @function
-inner_kernel_dgemv_add_n_4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_n_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_n_4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_n_4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$4, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovapd	0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	
-	subl	$4, %r10d
-
-	vmovapd	32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	vmovapd	64(%r11), %ymm8
-	vbroadcastsd	16(%r12), %ymm12
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	vmovapd	96(%r11), %ymm8
-	vbroadcastsd	24(%r12), %ymm12
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	
-	addq	$128, %r11
-	addq	$32, %r12
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vmovapd	0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	
-	addq	$32, %r11
-	addq	$8, %r12
-	
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-
-	jg		0b // clean
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_n_4_lib4, .-inner_kernel_dgemv_add_n_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x+k*sizeof(double)
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_T_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_t_4_lib4, @function
-inner_kernel_dgemv_add_t_4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_t_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_t_4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_t_4_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$4, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovupd	0(%r13), %ymm12
-
-	vmovapd	0(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	
-	subl	$4, %r10d
-
-	vmovapd	32(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	vmovapd	64(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	vmovapd	96(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	
-	addq	%r12, %r11
-	addq	$32, %r13
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vcvtsi2sd	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vsubpd		%ymm14, %ymm13, %ymm14
-
-	vmaskmovpd	0(%r13), %ymm14, %ymm12
-
-	vmaskmovpd	0(%r11), %ymm14, %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	
-	vmaskmovpd	32(%r11), %ymm14, %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	vmaskmovpd	64(%r11), %ymm14, %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	vmaskmovpd	96(%r11), %ymm14, %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-		
-	sall	$3, %r10d
-//	movslq	%r10d, %r10
-	addq	%r10, %r11
-	addq	%r10, %r13
-	xorl	%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_t_4_lib4, .-inner_kernel_dgemv_add_t_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t
-// r14   <- z_n
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t+k*sizeof(double)
-// r14   <- z_n+k*sizeof(double)
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_NT_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_nt_4_lib4, @function
-inner_kernel_dgemv_add_nt_4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_nt_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_nt_4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_nt_4_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$4, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovupd	0(%r13), %ymm12
-	vmovupd	0(%r14), %ymm13
-
-	vmovapd	0(%r11), %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vmulpd	%ymm14, %ymm6, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	subl	$4, %r10d
-
-	vmovapd	32(%r11), %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmulpd	%ymm14, %ymm7, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	vmovapd	64(%r11), %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmulpd	%ymm14, %ymm8, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-
-	vmovapd	96(%r11), %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vmulpd	%ymm14, %ymm9, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	vmovupd	%ymm13, 0(%r14) 
-
-	addq	%r12, %r11
-	addq	$32, %r13
-	addq	$32, %r14
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vcvtsi2sd	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vsubpd		%ymm14, %ymm13, %ymm11
-
-	vmaskmovpd	0(%r13), %ymm11, %ymm12
-	vmaskmovpd	0(%r14), %ymm11, %ymm13
-
-//	vmovupd	%ymm14, -32(%rsp) // spill mask to stack
-
-//	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	0(%r11), %ymm11, %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vmulpd	%ymm14, %ymm6, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-//	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	32(%r11), %ymm11, %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmulpd	%ymm14, %ymm7, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-//	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	64(%r11), %ymm11, %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmulpd	%ymm14, %ymm8, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-
-//	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	96(%r11), %ymm11, %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vmulpd	%ymm14, %ymm9, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-		
-//	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	%ymm13, %ymm11, 0(%r14)
-
-	sall	$3, %r10d // *sizeof(double)
-	addq	%r10, %r11
-	addq	%r10, %r13
-	addq	%r10, %r14
-	xorl	%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_nt_4_lib4, .-inner_kernel_dgemv_add_nt_4_lib4
-#endif
-#endif
-
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x
-// r14d  <- offA
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 
-// r11   <- 
-// r12   <- 
-// r13   <- 
-// r14d  <- offA
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_EDGE_DGEMV_ADD_T_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemv_add_t_4_lib4, @function
-inner_edge_dgemv_add_t_4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemv_add_t_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgemv_add_t_4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgemv_add_t_4_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r14d
-	jle		0f // return
-
-	movl	%r14d, %r15d
-	sall	$3, %r15d // offA*sizeof(double)
-
-	subq	%r15, %r11 // A - offA
-	subq	%r15, %r13 // x - offA
-
-	movl	%r10d, %r15d // kmax
-	addl	%r14d, %r15d // kmax + offA
-
-	vcvtsi2sd	%r14d, %xmm14, %xmm14 // offA
-	vcvtsi2sd	%r15d, %xmm15, %xmm15 // offA + kmax
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm13, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm13, %ymm15
-	vandpd		%ymm15, %ymm14, %ymm14
-
-	vmaskmovpd	0(%r13), %ymm14, %ymm12
-
-	vmovapd	0(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	
-	vmovapd	32(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	vmovapd	64(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	vmovapd	96(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-
-	addq	$32, %r13 // x + 4
-	addq	%r12, %r11 // A + bs*sda
-		
-	addl	%r14d, %r10d
-	subl	$4, %r10d // kmax - (4-offA)
-	
-0: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemv_add_t_4_lib4, .-inner_edge_dgemv_add_t_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10   <- kmax
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t
-// r14   <- z_n
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- kmax-4
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t+k*sizeof(double)
-// r14   <- z_n+k*sizeof(double)
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_EDGE_DSYMV_ADD_NT_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dsymv_add_nt_4_lib4, @function
-inner_edge_dsymv_add_nt_4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dsymv_add_nt_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dsymv_add_nt_4_lib4; .scl 2; .type 32; .endef
-inner_edge_dsymv_add_nt_4_lib4:
-#endif
-#endif
-
-	vmovupd		0(%r13), %ymm12
-	vmovupd		0(%r14), %ymm13
-
-	vmovupd		0(%r11), %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm6, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmovupd		32(%r11), %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x3, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm7, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmovupd		64(%r11), %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x3, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x7, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm8, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-
-	vmovupd		96(%r11), %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x7, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-//	vxorpd		%ymm15, %ymm15, %ymm15
-//	vblendpd	$0x0, %ymm14, %ymm15, %ymm14
-//	vmulpd		%ymm14, %ymm9, %ymm15
-//	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmovupd		%ymm13, 0(%r14) 
-
-	addq	%r12, %r11
-	addq	$32, %r13
-	addq	$32, %r14
-	
-	subq	$4, %r10
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dsymv_add_nt_4_lib4, .-inner_edge_dsymv_add_nt_4_lib4
-#endif
-#endif
-
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10   <- kmax
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t
-// r14   <- z_n
-// r15   <- offA
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- kmax-4
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t+k*sizeof(double)
-// r14   <- z_n+k*sizeof(double)
-// r15   <- offA
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_EDGE_DSYMV_ADD_NT_4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dsymv_add_nt_4_gen_lib4, @function
-inner_edge_dsymv_add_nt_4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dsymv_add_nt_4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dsymv_add_nt_4_gen_lib4; .scl 2; .type 32; .endef
-inner_edge_dsymv_add_nt_4_gen_lib4:
-#endif
-#endif
-
-	movl	$4, %eax
-	cmpl	%eax, %r10d
-	jge		0f
-	movl	%r10d, %eax
-0:
-	subl	%r15d, %eax
-
-	vcvtsi2sd	%eax, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubpd		%ymm14, %ymm13, %ymm11
-
-	vmaskmovpd	0(%r13), %ymm11, %ymm12
-	vmaskmovpd	0(%r14), %ymm11, %ymm13
-
-	vmaskmovpd	0(%r11), %ymm11, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm6, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmaskmovpd	32(%r11), %ymm11, %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x3, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm7, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmaskmovpd	64(%r11), %ymm11, %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x3, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x7, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm8, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-
-	vmaskmovpd	96(%r11), %ymm11, %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x7, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-//	vxorpd		%ymm15, %ymm15, %ymm15
-//	vblendpd	$0x0, %ymm14, %ymm15, %ymm14
-//	vmulpd		%ymm14, %ymm9, %ymm15
-//	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmaskmovpd	%ymm13, %ymm11, 0(%r14)
-
-	subl	%eax, %r10d
-
-	salq	$3, %rax // *sizeof(double)
-	addq	%rax, %r11
-	subq	$32, %r11
-	addq	%r12, %r11
-	addq	%rax, %r13
-	addq	%rax, %r14
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dsymv_add_nt_4_gen_lib4, .-inner_edge_dsymv_add_nt_4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==n
-//
-// input arguments:
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z0 z1 z2 z3]_b
-// ymm2 <- [z0 z1 z2 z3]_c
-// ymm3 <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_N_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_n_lib4, @function
-inner_blend_n_lib4:
-#elif defined(OS_MAC)
-_inner_blend_n_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_lib4; .scl 2; .type 32; .endef
-inner_blend_n_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm1, %ymm0
-	vaddpd	%ymm2, %ymm3, %ymm2
-	vaddpd	%ymm0, %ymm2, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_n_lib4, .-inner_blend_n_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t
-//
-// input arguments:
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_lib4, @function
-inner_blend_t_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_lib4; .scl 2; .type 32; .endef
-inner_blend_t_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm1
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vaddpd	%ymm0, %ymm1, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_lib4, .-inner_blend_t_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==n, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z0 z1 z2 z3]_b
-// ymm2 <- [z0 z1 z2 z3]_c
-// ymm3 <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_N_SCALE_AB_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_n_scale_ab_4_lib4, @function
-inner_blend_n_scale_ab_4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_n_scale_ab_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_scale_ab_4_lib4; .scl 2; .type 32; .endef
-inner_blend_n_scale_ab_4_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm1, %ymm0
-	vaddpd	%ymm2, %ymm3, %ymm2
-	vaddpd	%ymm0, %ymm2, %ymm0
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-	vmovupd		0(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_n_scale_ab_4_lib4, .-inner_blend_n_scale_ab_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==n, scale for alpha=-1.0 and beta=1.0
-//
-// input arguments:
-// r10  <- y
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z0 z1 z2 z3]_b
-// ymm2 <- [z0 z1 z2 z3]_c
-// ymm3 <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_N_SCALE_M11_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_n_scale_m11_4_lib4, @function
-inner_blend_n_scale_m11_4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_n_scale_m11_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_scale_m11_4_lib4; .scl 2; .type 32; .endef
-inner_blend_n_scale_m11_4_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm1, %ymm0
-	vaddpd	%ymm2, %ymm3, %ymm2
-	vaddpd	%ymm0, %ymm2, %ymm0
-
-	// beta
-	vmovupd		0(%r10), %ymm14
-	vsubpd		%ymm0, %ymm14, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_n_scale_m11_4_lib4, .-inner_blend_n_scale_m11_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_AB_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_ab_4_lib4, @function
-inner_blend_t_scale_ab_4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_ab_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_ab_4_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_ab_4_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm1
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vaddpd	%ymm0, %ymm1, %ymm0
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-	vmovupd		0(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_ab_4_lib4, .-inner_blend_t_scale_ab_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta=1.0
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_A1_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_a1_4_lib4, @function
-inner_blend_t_scale_a1_4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_a1_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_a1_4_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_a1_4_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm1
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vaddpd	%ymm0, %ymm1, %ymm0
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-
-	// beta
-	vmovupd		0(%r11), %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_a1_4_lib4, .-inner_blend_t_scale_a1_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for alpha=-1.0 and beta=1.0
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_M11_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_m11_4_lib4, @function
-inner_blend_t_scale_m11_4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_m11_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_m11_4_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_m11_4_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm1
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vaddpd	%ymm0, %ymm1, %ymm0
-
-	vmovupd		0(%r10), %ymm14
-	vsubpd		%ymm0, %ymm14, %ymm0
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_m11_4_lib4, .-inner_blend_t_scale_m11_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSV_LN_INV_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsv_ln_inv_4_lib4, @function
-inner_edge_dtrsv_ln_inv_4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsv_ln_inv_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsv_ln_inv_4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsv_ln_inv_4_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm1
-	vblendpd		$0x1, %ymm1, %ymm0, %ymm0
-
-	vmovapd			0(%r10), %ymm13
-	vblendpd		$0x1, %ymm14, %ymm13, %ymm13
-	vpermilpd		$0x0, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm1
-	vblendpd		$0x2, %ymm1, %ymm0, %ymm0
-
-	vmovapd			32(%r10), %ymm13
-	vblendpd		$0x3, %ymm14, %ymm13, %ymm13
-	vpermilpd		$0x3, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm1
-	vblendpd		$0x4, %ymm1, %ymm0, %ymm0
-
-	vmovapd			64(%r10), %ymm13
-	vblendpd		$0x7, %ymm14, %ymm13, %ymm13
-	vpermilpd		$0x0, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm1
-	vblendpd		$0x8, %ymm1, %ymm0, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsv_ln_inv_4_lib4, .-inner_edge_dtrsv_ln_inv_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS, variable size version
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12d <- kn
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12d <- kn
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSV_LN_INV_4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsv_ln_inv_4_vs_lib4, @function
-inner_edge_dtrsv_ln_inv_4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsv_ln_inv_4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsv_ln_inv_4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsv_ln_inv_4_vs_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm1
-	vblendpd		$0x1, %ymm1, %ymm0, %ymm0
-	vmovapd			0(%r10), %ymm13
-	vblendpd		$0x1, %ymm14, %ymm13, %ymm13
-	vpermilpd		$0x0, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	cmpl			$2, %r12d
-	jl				0f // ret
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm1
-	vblendpd		$0x2, %ymm1, %ymm0, %ymm0
-	vmovapd			32(%r10), %ymm13
-	vblendpd		$0x3, %ymm14, %ymm13, %ymm13
-	vpermilpd		$0x3, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	cmpl			$3, %r12d
-	jl				0f // ret
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm1
-	vblendpd		$0x4, %ymm1, %ymm0, %ymm0
-	vmovapd			64(%r10), %ymm13
-	vblendpd		$0x7, %ymm14, %ymm13, %ymm13
-	vpermilpd		$0x0, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm13, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-	cmpl			$4, %r12d
-	jl				0f // ret
-
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm1
-	vblendpd		$0x8, %ymm1, %ymm0, %ymm0
-
-	// return
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsv_ln_inv_4_vs_lib4, .-inner_edge_dtrsv_ln_inv_4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSV_LT_INV_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsv_lt_inv_4_lib4, @function
-inner_edge_dtrsv_lt_inv_4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsv_lt_inv_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsv_lt_inv_4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsv_lt_inv_4_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vmovapd			16(%r10), %xmm12
-	vmovapd			48(%r10), %xmm13
-	vunpcklpd		%xmm13, %xmm12, %xmm9
-	vblendpd		$0xc, %ymm14, %ymm9, %ymm9
-	vunpckhpd		%xmm13, %xmm12, %xmm10
-	vmovsd			8(%r10), %xmm8
-	vblendpd		$0xe, %ymm14, %ymm8, %ymm8
-	vmovsd			88(%r10), %xmm11
-	vinsertf128		$0x1, %xmm11, %ymm10, %ymm10
-	vblendpd		$0x8, %ymm14, %ymm10, %ymm10
-
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm12, %ymm0, %ymm1
-	vblendpd		$0x8, %ymm1, %ymm0, %ymm0
-
-	vpermilpd		$0xf, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm12, %ymm0, %ymm1
-	vblendpd		$0x4, %ymm1, %ymm0, %ymm0
-
-	vpermilpd		$0x0, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm12, %ymm0, %ymm1
-	vblendpd		$0x2, %ymm1, %ymm0, %ymm0
-
-	vpermilpd		$0x3, %ymm0, %ymm12
-//	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-//	vbroadcastsd	8(%r11), %ymm12
-	vmovsd			0(%r11), %xmm12
-	vmulpd			%ymm12, %ymm0, %ymm1
-	vblendpd		$0x1, %ymm1, %ymm0, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsv_lt_inv_4_lib4, .-inner_edge_dtrsv_lt_inv_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- k
-// r13  <- x
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- k
-// r13  <- x
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSV_LT_INV_3_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsv_lt_inv_3_lib4, @function
-inner_edge_dtrsv_lt_inv_3_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsv_lt_inv_3_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsv_lt_inv_3_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsv_lt_inv_3_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vmovapd			16(%r10), %xmm12
-	vmovapd			48(%r10), %xmm13
-	vunpcklpd		%xmm13, %xmm12, %xmm9
-	vblendpd		$0xc, %ymm14, %ymm9, %ymm9
-	vunpckhpd		%xmm13, %xmm12, %xmm10
-	vmovsd			8(%r10), %xmm8
-	vblendpd		$0xe, %ymm14, %ymm8, %ymm8
-	vmovsd			88(%r10), %xmm11
-	vinsertf128		$0x1, %xmm11, %ymm10, %ymm10
-	vblendpd		$0x8, %ymm14, %ymm10, %ymm10
-
-//	vbroadcastsd	24(%r11), %ymm12
-//	vmulpd			%ymm12, %ymm0, %ymm1
-//	vblendpd		$0x8, %ymm1, %ymm0, %ymm0
-
-	vmovupd			0(%r13), %ymm12
-	vblendpd		$0x8, %ymm12, %ymm0, %ymm0
-	
-	cmpl			$4, %r12d
-	jl				0f
-
-	vpermilpd		$0xf, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-0:
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm12, %ymm0, %ymm1
-	vblendpd		$0x4, %ymm1, %ymm0, %ymm0
-
-	vpermilpd		$0x0, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm12, %ymm0, %ymm1
-	vblendpd		$0x2, %ymm1, %ymm0, %ymm0
-
-	vpermilpd		$0x3, %ymm0, %ymm12
-//	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-//	vbroadcastsd	8(%r11), %ymm12
-	vmovsd			0(%r11), %xmm12
-	vmulpd			%ymm12, %ymm0, %ymm1
-	vblendpd		$0x1, %ymm1, %ymm0, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsv_lt_inv_3_lib4, .-inner_edge_dtrsv_lt_inv_3_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- k
-// r13  <- x
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- k
-// r13  <- x
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSV_LT_INV_2_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsv_lt_inv_2_lib4, @function
-inner_edge_dtrsv_lt_inv_2_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsv_lt_inv_2_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsv_lt_inv_2_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsv_lt_inv_2_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	cmpl			$3, %r12d
-
-	vmovapd			16(%r10), %xmm12
-	vmovapd			48(%r10), %xmm13
-	vunpcklpd		%xmm13, %xmm12, %xmm9
-	vblendpd		$0xc, %ymm14, %ymm9, %ymm9
-	vunpckhpd		%xmm13, %xmm12, %xmm10
-	vmovsd			8(%r10), %xmm8
-	vblendpd		$0xe, %ymm14, %ymm8, %ymm8
-//	vmovsd			88(%r10), %xmm11
-//	vinsertf128		$0x1, %xmm11, %ymm10, %ymm10
-//	vblendpd		$0x8, %ymm14, %ymm10, %ymm10
-	vblendpd		$0xc, %ymm14, %ymm10, %ymm10
-
-//	vbroadcastsd	24(%r11), %ymm12
-//	vmulpd			%ymm12, %ymm0, %ymm1
-//	vblendpd		$0x8, %ymm1, %ymm0, %ymm0
-
-	vmovupd			0(%r13), %ymm12
-	vblendpd		$0xc, %ymm12, %ymm0, %ymm0
-	
-	je				0f
-	jl				1f
-
-	vpermilpd		$0xf, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-0:
-
-//	vbroadcastsd	16(%r11), %ymm12
-//	vmulpd			%ymm12, %ymm0, %ymm1
-//	vblendpd		$0x4, %ymm1, %ymm0, %ymm0
-
-	vpermilpd		$0x0, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-1:
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm12, %ymm0, %ymm1
-	vblendpd		$0x2, %ymm1, %ymm0, %ymm0
-
-	vpermilpd		$0x3, %ymm0, %ymm12
-//	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-//	vbroadcastsd	8(%r11), %ymm12
-
-	vmovsd			0(%r11), %xmm12
-	vmulpd			%ymm12, %ymm0, %ymm1
-	vblendpd		$0x1, %ymm1, %ymm0, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsv_lt_inv_2_lib4, .-inner_edge_dtrsv_lt_inv_2_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope 
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- k
-// r13  <- x
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- k
-// r13  <- x
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSV_LT_INV_1_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsv_lt_inv_1_lib4, @function
-inner_edge_dtrsv_lt_inv_1_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsv_lt_inv_1_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsv_lt_inv_1_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsv_lt_inv_1_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vmovupd			0(%r13), %ymm12
-	vblendpd		$0xe, %ymm12, %ymm0, %ymm0
-	
-	cmpl			$3, %r12d
-	je				0f
-
-	cmpl			$2, %r12d
-	je				1f
-	jl				2f
-
-	vmovsd			24(%r10), %xmm10
-	vblendpd		$0xe, %ymm14, %ymm10, %ymm10
-	vpermilpd		$0xf, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm10, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-0:
-
-	vmovsd			16(%r10), %xmm9
-	vblendpd		$0xe, %ymm14, %ymm9, %ymm9
-	vpermilpd		$0x0, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm9, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-
-1:
-
-	vmovsd			8(%r10), %xmm8
-	vblendpd		$0xe, %ymm14, %ymm8, %ymm8
-	vpermilpd		$0x3, %ymm0, %ymm12
-//	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vsubpd			%ymm15, %ymm0, %ymm0
-//	vbroadcastsd	8(%r11), %ymm12
-
-2:
-
-	vmovsd			0(%r11), %xmm12
-	vmulpd			%ymm12, %ymm0, %ymm1
-	vblendpd		$0x1, %ymm1, %ymm0, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsv_lt_inv_1_lib4, .-inner_edge_dtrsv_lt_inv_1_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- x
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z0 z1 z2 z3]_b
-// ymm2  <- [z0 z1 z2 z3]_c
-// ymm3  <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- k-4
-// r11   <- A+4*4*sizeof(double)
-// r12   <- x+4*sizeof(double)
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z0 z1 z2 z3]_b
-// ymm2  <- [z0 z1 z2 z3]_c
-// ymm3  <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMV_UN_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmv_un_4_lib4, @function
-inner_edge_dtrmv_un_4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmv_un_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmv_un_4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmv_un_4_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vmovapd			0(%r11), %ymm8
-	vblendpd		$0x1, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	
-	subl			$4, %r10d
-
-	vmovapd			32(%r11), %ymm8
-	vblendpd		$0x3, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	
-	vmovapd			64(%r11), %ymm8
-	vblendpd		$0x7, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	16(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-
-	vmovapd			96(%r11), %ymm8
-	vbroadcastsd	24(%r12), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	
-	addq			$128, %r11
-	addq			$32, %r12
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmv_un_4_lib4, .-inner_edge_dtrmv_un_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z0 z1 z2 z3]_b
-// ymm2  <- [z0 z1 z2 z3]_c
-// ymm3  <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x+k*sizeof(double)
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z0 z1 z2 z3]_b
-// ymm2  <- [z0 z1 z2 z3]_c
-// ymm3  <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_DTRMV_UT_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dtrmv_ut_4_lib4, @function
-inner_kernel_dtrmv_ut_4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dtrmv_ut_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dtrmv_ut_4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dtrmv_ut_4_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$4, %r10d
-	jle		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovupd	0(%r13), %ymm12
-
-	vmovapd	0(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	
-	subl	$4, %r10d
-
-	vmovapd	32(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	vmovapd	64(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	vmovapd	96(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	
-	addq	%r12, %r11
-	addq	$32, %r13
-	
-	cmpl	$4, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-//	vcvtsi2sd	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-//	vmovupd		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-//	vmovupd		LC02(%rip), %ymm13
-#endif
-//	vmovddup	%xmm14, %xmm14
-//	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-//	vsubpd		%ymm14, %ymm13, %ymm14
-//
-//	vmaskmovpd	0(%r13), %ymm14, %ymm12
-
-	vmovupd		0(%r13), %ymm12
-
-	vxorpd		%ymm14, %ymm14, %ymm14
-
-	vmovapd		0(%r11), %ymm8
-	vblendpd	$0x1, %ymm8, %ymm14, %ymm8
-	vmulpd		%ymm8, %ymm12, %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	
-	vmovapd	32(%r11), %ymm8
-	vblendpd	$0x3, %ymm8, %ymm14, %ymm8
-	vmulpd		%ymm8, %ymm12, %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	
-	vmovapd		64(%r11), %ymm8
-	vblendpd	$0x7, %ymm8, %ymm14, %ymm8
-	vmulpd		%ymm8, %ymm12, %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-
-	vmovapd		96(%r11), %ymm8
-	vmulpd		%ymm8, %ymm12, %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-		
-	sall		$3, %r10d
-//	movslq		%r10d, %r10
-	addq		%r10, %r11
-	addq		%r10, %r13
-	xorl		%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dtrmv_ut_4_lib4, .-inner_kernel_dtrmv_ut_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store 
-//
-// input arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-//
-// output arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4_lib4, @function
-inner_store_4_lib4:
-#elif defined(OS_MAC)
-_inner_store_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4_lib4; .scl 2; .type 32; .endef
-inner_store_4_lib4:
-#endif
-#endif
-	
-	vmovupd %ymm0,  0(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4_lib4, .-inner_store_4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store vs
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4_vs_lib4, @function
-inner_store_4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_4_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmaskmovpd	%ymm0, %ymm15,  0(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4_vs_lib4, .-inner_store_4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store gen
-//
-// input arguments:
-// r10   <- D
-// r11d  <- k0 : start form (inc)
-// r12d  <- k1 : up to (exc)
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d  <- k0 : start form (inc)
-// r12d  <- k1 : up to (exc)
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4_gen_lib4, @function
-inner_store_4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4_gen_lib4; .scl 2; .type 32; .endef
-inner_store_4_gen_lib4:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2sd	%r11d, %xmm14, %xmm14
-	vcvtsi2sd	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm12
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm12, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm12, %ymm15
-	vandpd		%ymm14, %ymm15, %ymm15
-
-	vmaskmovpd	%ymm0, %ymm15,  0(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4_gen_lib4, .-inner_store_4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-//                            1      2              3          4          5             6          7
-// void kernel_dgemv_n_4_lib4(int k, double *alpha, double *A, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_n_4_lib4
-	.type kernel_dgemv_n_4_lib4, @function
-kernel_dgemv_n_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_n_4_lib4
-_kernel_dgemv_n_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_n_4_lib4
-	.def kernel_dgemv_n_4_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_n_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_4_lib4
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11   // beta
-	movq	ARG6, %r12   // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_AB_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_ab_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_ab_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_n_4_lib4, .-kernel_dgemv_n_4_lib4
-#endif
-
-
-
-
-
-//                               1      2              3          4          5             6          7          8
-// void kernel_dgemv_n_4_vs_lib4(int k, double *alpha, double *A, double *x, double *beta, double *y, double *z, int k1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_n_4_vs_lib4
-	.type kernel_dgemv_n_4_vs_lib4, @function
-kernel_dgemv_n_4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_n_4_vs_lib4
-_kernel_dgemv_n_4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_n_4_vs_lib4
-	.def kernel_dgemv_n_4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_n_4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_4_lib4
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11   // beta
-	movq	ARG6, %r12   // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_AB_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_ab_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_ab_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-	movq	ARG8, %r11 // k1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_n_4_vs_lib4, .-kernel_dgemv_n_4_vs_lib4
-#endif
-
-
-
-
-
-//                                1      2              3          4          5             6          7          8       9
-// void kernel_dgemv_n_4_gen_lib4(int k, double *alpha, double *A, double *x, double *beta, double *y, double *z, int k0, int k1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_n_4_gen_lib4
-	.type kernel_dgemv_n_4_gen_lib4, @function
-kernel_dgemv_n_4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_n_4_gen_lib4
-_kernel_dgemv_n_4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_n_4_gen_lib4
-	.def kernel_dgemv_n_4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_n_4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_4_lib4
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11   // beta
-	movq	ARG6, %r12   // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_AB_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_ab_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_ab_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-	movq	ARG8, %r11 // k0 
-	movq	ARG9, %r12 // k1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_n_4_gen_lib4, .-kernel_dgemv_n_4_gen_lib4
-#endif
-
-
-
-
-
-//                            1      2              3          4        5          6             7         8
-// void kernel_dgemv_t_4_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_t_4_lib4
-	.type kernel_dgemv_t_4_lib4, @function
-kernel_dgemv_t_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_t_4_lib4
-_kernel_dgemv_t_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_t_4_lib4
-	.def kernel_dgemv_t_4_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_t_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_T_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_t_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_t_4_lib4
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_t_4_lib4, .-kernel_dgemv_t_4_lib4
-#endif
-
-
-
-
-
-//                               1      2              3          4        5          6             7         8           9
-// void kernel_dgemv_t_4_vs_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z, int km);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_t_4_vs_lib4
-	.type kernel_dgemv_t_4_vs_lib4, @function
-kernel_dgemv_t_4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_t_4_vs_lib4
-_kernel_dgemv_t_4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_t_4_vs_lib4
-	.def kernel_dgemv_t_4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_t_4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_T_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_t_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_t_4_lib4
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z 
-	movq	ARG9, %r11 // km 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_t_4_vs_lib4, .-kernel_dgemv_t_4_vs_lib4
-#endif
-
-
-
-
-
-//                                1      2              3         4          5        6          7             8          9          10
-// void kernel_dgemv_t_4_gen_lib4(int k, double *alpha, int offA, double *A, int sda, double *x, double *beta, double *y, double *z, int km);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_t_4_gen_lib4
-	.type kernel_dgemv_t_4_gen_lib4, @function
-kernel_dgemv_t_4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_t_4_gen_lib4
-_kernel_dgemv_t_4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_t_4_gen_lib4
-	.def kernel_dgemv_t_4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_t_4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG6, %r13  // x
-	movq	ARG3, %r14 // offA
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_DGEMV_ADD_T_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemv_add_t_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemv_add_t_4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_T_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_t_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_t_4_lib4
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11   // beta
-	movq	ARG8, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG9, %r10 // z 
-	movq	ARG10, %r11 // km 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_t_4_gen_lib4, .-kernel_dgemv_t_4_gen_lib4
-#endif
-
-
-
-
-
-//                                 1      2          3                   4          5          6
-// void kernel_dtrsv_ln_inv_4_lib4(int k, double *A, double *inv_diag_A, double *x, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsv_ln_inv_4_lib4
-	.type kernel_dtrsv_ln_inv_4_lib4, @function
-kernel_dtrsv_ln_inv_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsv_ln_inv_4_lib4
-_kernel_dtrsv_ln_inv_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsv_ln_inv_4_lib4
-	.def kernel_dtrsv_ln_inv_4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsv_ln_inv_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG4, %r12  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_4_lib4
-#endif
-#endif
-
-	movq	%r11, %r13 // A+k*sizeof(double)
-
-
-	// call inner blender n
-
-	movq	ARG5, %r10   // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_M11_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_m11_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_m11_4_lib4
-#endif
-#endif
-
-
-	// solution
-
-	movq	%r13, %r10 // A+k*sizeof(double)
-	movq	ARG3, %r11 // inv_diag_A
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSV_LN_INV_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsv_ln_inv_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsv_ln_inv_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsv_ln_inv_4_lib4, .-kernel_dtrsv_ln_inv_4_lib4
-#endif
-
-
-
-
-
-//                                    1      2          3                   4          5          6          7       8
-// void kernel_dtrsv_ln_inv_4_vs_lib4(int k, double *A, double *inv_diag_A, double *x, double *y, double *z, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsv_ln_inv_4_vs_lib4
-	.type kernel_dtrsv_ln_inv_4_vs_lib4, @function
-kernel_dtrsv_ln_inv_4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsv_ln_inv_4_vs_lib4
-_kernel_dtrsv_ln_inv_4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsv_ln_inv_4_vs_lib4
-	.def kernel_dtrsv_ln_inv_4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsv_ln_inv_4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG4, %r12  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_4_lib4
-#endif
-#endif
-
-	movq	%r11, %r13
-
-
-	// call inner blender n
-
-	movq	ARG5, %r10   // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_M11_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_m11_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_m11_4_lib4
-#endif
-#endif
-
-
-	// solution
-
-	movq	%r13, %r10 // A+k*sizeof(double)
-	movq	ARG3, %r11 // inv_diag_A
-	movq	ARG8, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSV_LN_INV_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsv_ln_inv_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsv_ln_inv_4_vs_lib4
-#endif
-#endif
-
-
-	// store vs
-
-	movq	ARG6, %r10 // z 
-	movq	ARG7, %r11 // km 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsv_ln_inv_4_vs_lib4, .-kernel_dtrsv_ln_inv_4_vs_lib4
-#endif
-
-
-
-
-
-//                                 1      2          3        4                   5          6          7
-// void kernel_dtrsv_lt_inv_4_lib4(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsv_lt_inv_4_lib4
-	.type kernel_dtrsv_lt_inv_4_lib4, @function
-kernel_dtrsv_lt_inv_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsv_lt_inv_4_lib4
-_kernel_dtrsv_lt_inv_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsv_lt_inv_4_lib4
-	.def kernel_dtrsv_lt_inv_4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsv_lt_inv_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	addq	%r12, %r11 // A+4*sda*sizeof(double)
-	movq	ARG5, %r13 // x
-	addq	$32, %r13 // x+4 
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_T_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_t_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_t_4_lib4
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG6, %r10 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_M11_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_m11_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_m11_4_lib4
-#endif
-#endif
-
-
-	// solution
-
-	movq	ARG2, %r10 // A
-	movq	ARG4, %r11 // inv_diag_A
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSV_LT_INV_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsv_lt_inv_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsv_lt_inv_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsv_lt_inv_4_lib4, .-kernel_dtrsv_lt_inv_4_lib4
-#endif
-
-
-
-
-
-//                                 rdi    rsi        rdx      rcx                 r8         r9         rsp+8   
-// void kernel_dtrsv_lt_inv_3_lib4(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsv_lt_inv_3_lib4
-	.type kernel_dtrsv_lt_inv_3_lib4, @function
-kernel_dtrsv_lt_inv_3_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsv_lt_inv_3_lib4
-_kernel_dtrsv_lt_inv_3_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsv_lt_inv_3_lib4
-	.def kernel_dtrsv_lt_inv_3_lib4; .scl 2; .type 32; .endef
-kernel_dtrsv_lt_inv_3_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	addq	%r12, %r11 // A+4*sda*sizeof(double)
-	movq	ARG5, %r13 // x
-	addq	$32, %r13 // x+4 
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_T_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_t_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_t_4_lib4
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG6, %r10 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_M11_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_m11_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_m11_4_lib4
-#endif
-#endif
-
-
-	// solution
-
-	movq	ARG2, %r10 // A
-	movq	ARG4, %r11 // inv_diag_A
-	movq	ARG1, %r12 // k
-	movq	ARG5, %r13 // x
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSV_LT_INV_3_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsv_lt_inv_3_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsv_lt_inv_3_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-	movq	$3, %r11
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsv_lt_inv_3_lib4, .-kernel_dtrsv_lt_inv_3_lib4
-#endif
-
-
-
-
-
-//                                 rdi    rsi        rdx      rcx                 r8         r9         rsp+8 
-// void kernel_dtrsv_lt_inv_2_lib4(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsv_lt_inv_2_lib4
-	.type kernel_dtrsv_lt_inv_2_lib4, @function
-kernel_dtrsv_lt_inv_2_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsv_lt_inv_2_lib4
-_kernel_dtrsv_lt_inv_2_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsv_lt_inv_2_lib4
-	.def kernel_dtrsv_lt_inv_2_lib4; .scl 2; .type 32; .endef
-kernel_dtrsv_lt_inv_2_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movslq	%r12d, %r12
-	addq	%r12, %r11 // A+4*sda*sizeof(double)
-	movq	ARG5, %r13 // x
-	addq	$32, %r13 // x+4 
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_T_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_t_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_t_4_lib4
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG6, %r10 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_M11_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_m11_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_m11_4_lib4
-#endif
-#endif
-
-
-	// solution
-
-	movq	ARG2, %r10 // A
-	movq	ARG4, %r11 // inv_diag_A
-	movq	ARG1, %r12 // k
-	movq	ARG5, %r13 // x
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSV_LT_INV_2_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsv_lt_inv_2_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsv_lt_inv_2_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-	movq	$2, %r11
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsv_lt_inv_2_lib4, .-kernel_dtrsv_lt_inv_2_lib4
-#endif
-
-
-
-
-
-//                                 rdi    rsi        rdx      rcx                 r8         r9         rsp+8 
-// void kernel_dtrsv_lt_inv_1_lib4(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsv_lt_inv_1_lib4
-	.type kernel_dtrsv_lt_inv_1_lib4, @function
-kernel_dtrsv_lt_inv_1_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsv_lt_inv_1_lib4
-_kernel_dtrsv_lt_inv_1_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsv_lt_inv_1_lib4
-	.def kernel_dtrsv_lt_inv_1_lib4; .scl 2; .type 32; .endef
-kernel_dtrsv_lt_inv_1_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movslq	%r12d, %r12
-	addq	%r12, %r11 // A+4*sda*sizeof(double)
-	movq	ARG5, %r13 // x
-	addq	$32, %r13 // x+4 
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_T_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_t_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_t_4_lib4
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG6, %r10 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_M11_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_m11_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_m11_4_lib4
-#endif
-#endif
-
-
-	// solution
-
-	movq	ARG2, %r10 // A
-	movq	ARG4, %r11 // inv_diag_A
-	movq	ARG1, %r12 // k
-	movq	ARG5, %r13 // x
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSV_LT_INV_1_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsv_lt_inv_1_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsv_lt_inv_1_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-	movq	$1, %r11
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsv_lt_inv_1_lib4, .-kernel_dtrsv_lt_inv_1_lib4
-#endif
-
-
-
-
-
-//                            rdi    rsi        rdx        rcx
-// void kernel_dtrmv_un_4_lib4(int k, double *A, double *x, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmv_un_4_lib4
-	.type kernel_dtrmv_un_4_lib4, @function
-kernel_dtrmv_un_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmv_un_4_lib4
-_kernel_dtrmv_un_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmv_un_4_lib4
-	.def kernel_dtrmv_un_4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmv_un_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dtrmv edge & dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // x
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMV_UN_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmv_un_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmv_un_4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_4_lib4
-#endif
-#endif
-
-
-	// call inner blend n
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG4, %r10 // z
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmv_un_4_lib4, .-kernel_dtrmv_un_4_lib4
-#endif
-
-
-
-
-
-//                             rdi    rsi        rdx      rcx        r8
-// void kernel_dtrmv_ut_4_lib4(int k, double *A, int sda, double *x, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmv_ut_4_lib4
-	.type kernel_dtrmv_ut_4_lib4, @function
-kernel_dtrmv_ut_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmv_ut_4_lib4
-_kernel_dtrmv_ut_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmv_ut_4_lib4
-	.def kernel_dtrmv_ut_4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmv_ut_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movslq	%r12d, %r12
-	movq	ARG4, %r13  // x
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_DTRMV_UT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dtrmv_ut_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dtrmv_ut_4_lib4
-#endif
-#endif
-
-
-	// call inner blend t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmv_ut_4_lib4, .-kernel_dtrmv_ut_4_lib4
-#endif
-
-
-
-
-
-//                                rdi    rsi        rdx      rcx        r8         r9
-// void kernel_dtrmv_ut_4_vs_lib4(int k, double *A, int sda, double *x, double *y, int km);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmv_ut_4_vs_lib4
-	.type kernel_dtrmv_ut_4_vs_lib4, @function
-kernel_dtrmv_ut_4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmv_ut_4_vs_lib4
-_kernel_dtrmv_ut_4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmv_ut_4_vs_lib4
-	.def kernel_dtrmv_ut_4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrmv_ut_4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movslq	%r12d, %r12
-	movq	ARG4, %r13  // x
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_DTRMV_UT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dtrmv_ut_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dtrmv_ut_4_lib4
-#endif
-#endif
-
-
-	// call inner blend t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // z 
-	movq	ARG6, %r11 // km 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmv_ut_4_vs_lib4, .-kernel_dtrmv_ut_4_vs_lib4
-#endif
-
-
-
-
-
-//                             1      2                3                4          5        6            7            8               9            10           11
-// void kernel_dgemv_nt_4_lib4(int k, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_nt_4_lib4
-	.type kernel_dgemv_nt_4_lib4, @function
-kernel_dgemv_nt_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_nt_4_lib4
-_kernel_dgemv_nt_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_nt_4_lib4
-	.def kernel_dgemv_nt_4_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_nt_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha_n
-	vbroadcastsd 0(%r10), %ymm15
-
-	movq	ARG6, %r10 // x_n
-
-	vbroadcastsd 0(%r10), %ymm6
-	vmulpd		%ymm15, %ymm6, %ymm6
-	vbroadcastsd 8(%r10), %ymm7
-	vmulpd		%ymm15, %ymm7, %ymm7
-	vbroadcastsd 16(%r10), %ymm8
-	vmulpd		%ymm15, %ymm8, %ymm8
-	vbroadcastsd 24(%r10), %ymm9
-	vmulpd		%ymm15, %ymm9, %ymm9
-
-
-	// inner kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG7, %r13  // x_t
-	movq	ARG10, %r14  // z_n
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_NT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_nt_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_nt_4_lib4
-#endif
-#endif
-
-
-	// inner blend n scale ab
-
-	movq	ARG3, %r10 // alpha_t
-	movq	ARG8, %r11   // beta_t
-	movq	ARG9, %r12   // y_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG11, %r10 // z_t 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_nt_4_lib4, .-kernel_dgemv_nt_4_lib4
-#endif
-
-
-
-
-
-//                                1      2                3                4          5        6            7            8               9            10           11           12
-// void kernel_dgemv_nt_4_vs_lib4(int k, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t, int km);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_nt_4_vs_lib4
-	.type kernel_dgemv_nt_4_vs_lib4, @function
-kernel_dgemv_nt_4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_nt_4_vs_lib4
-_kernel_dgemv_nt_4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_nt_4_vs_lib4
-	.def kernel_dgemv_nt_4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_nt_4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha_n
-	vbroadcastsd 0(%r10), %ymm15
-
-	movq	ARG6, %r10 // x_n
-	movq	ARG12, %r11 // km
-
-	vbroadcastsd 0(%r10), %ymm6
-	vmulpd		%ymm15, %ymm6, %ymm6
-	cmpl	$2, %r11d
-	jl		0f
-	vbroadcastsd 8(%r10), %ymm7
-	vmulpd		%ymm15, %ymm7, %ymm7
-	cmpl	$3, %r11d
-	jl		0f
-	vbroadcastsd 16(%r10), %ymm8
-	vmulpd		%ymm15, %ymm8, %ymm8
-	je		0f
-	vbroadcastsd 24(%r10), %ymm9
-	vmulpd		%ymm15, %ymm9, %ymm9
-0:
-
-	// inner kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG7, %r13  // x_t
-	movq	ARG10, %r14  // z_n
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_NT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_nt_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_nt_4_lib4
-#endif
-#endif
-
-
-	// inner blend n scale ab
-
-	movq	ARG3, %r10 // alpha_t
-	movq	ARG8, %r11   // beta_t
-	movq	ARG9, %r12   // y_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG11, %r10 // z_t 
-	movq	ARG12, %r11 // km 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_nt_4_vs_lib4, .-kernel_dgemv_nt_4_vs_lib4
-#endif
-
-
-
-
-
-//                            1      2              3          4        5           6
-// void kernel_dsymv_l_4_lib4(int k, double *alpha, double *A, int sda, double *x, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsymv_l_4_lib4
-	.type kernel_dsymv_l_4_lib4, @function
-kernel_dsymv_l_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsymv_l_4_lib4
-_kernel_dsymv_l_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsymv_l_4_lib4
-	.def kernel_dsymv_l_4_lib4; .scl 2; .type 32; .endef
-kernel_dsymv_l_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha
-	vbroadcastsd 0(%r10), %ymm15
-
-	movq	ARG5, %r10 // x_n
-
-	vbroadcastsd 0(%r10), %ymm6
-	vmulpd		%ymm15, %ymm6, %ymm6
-	vbroadcastsd 8(%r10), %ymm7
-	vmulpd		%ymm15, %ymm7, %ymm7
-	vbroadcastsd 16(%r10), %ymm8
-	vmulpd		%ymm15, %ymm8, %ymm8
-	vbroadcastsd 24(%r10), %ymm9
-	vmulpd		%ymm15, %ymm9, %ymm9
-
-
-	// inner edge dsyrk & kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13  // x_t
-	movq	ARG6, %r14  // z_n
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_DSYMV_ADD_NT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dsymv_add_nt_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dsymv_add_nt_4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_NT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_nt_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_nt_4_lib4
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // z_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_A1_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_a1_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_a1_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // z_t 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsymv_l_4_lib4, .-kernel_dsymv_l_4_lib4
-#endif
-
-
-
-
-
-//                                1      2              3         4          5        6           7
-// void kernel_dsymv_l_4_gen_lib4(int k, double *alpha, int offA, double *A, int sda, double *x, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsymv_l_4_gen_lib4
-	.type kernel_dsymv_l_4_gen_lib4, @function
-kernel_dsymv_l_4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsymv_l_4_gen_lib4
-_kernel_dsymv_l_4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsymv_l_4_gen_lib4
-	.def kernel_dsymv_l_4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dsymv_l_4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha
-	vbroadcastsd 0(%r10), %ymm15
-
-	movq	ARG6, %r10 // x_n
-
-	vbroadcastsd 0(%r10), %ymm6
-	vmulpd		%ymm15, %ymm6, %ymm6
-	vbroadcastsd 8(%r10), %ymm7
-	vmulpd		%ymm15, %ymm7, %ymm7
-	vbroadcastsd 16(%r10), %ymm8
-	vmulpd		%ymm15, %ymm8, %ymm8
-	vbroadcastsd 24(%r10), %ymm9
-	vmulpd		%ymm15, %ymm9, %ymm9
-
-
-	// inner edge dsyrk & kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13  // x_t
-	movq	ARG7, %r14  // z_n
-	movq	ARG3, %r15 // offA
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_DSYMV_ADD_NT_4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dsymv_add_nt_4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dsymv_add_nt_4_gen_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_NT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_nt_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_nt_4_lib4
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11   // z_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_A1_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_a1_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_a1_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z_t 
-	movq	ARG8, %r11 // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsymv_l_4_gen_lib4, .-kernel_dsymv_l_4_gen_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_dgemv_8_lib4.S b/third_party/blasfeo/kernel/avx/kernel_dgemv_8_lib4.S
deleted file mode 100644
index 53d371e..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_dgemv_8_lib4.S
+++ /dev/null
@@ -1,1575 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- x
-// r15   <- dirty
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z0 z1 z2 z3]_b
-// ymm3  <- [z4 z5 z6 z7]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- x+k*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z0 z1 z2 z3]_b
-// ymm3  <- [z4 z5 z6 z7]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_N_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_n_8_lib4, @function
-inner_kernel_dgemv_add_n_8_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_n_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_n_8_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_n_8_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	movq	%r11, %r15 // A1 <- A0
-	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(double)
-
-	cmpl	$4, %r10d
-
-	prefetcht0	0(%r11) // software prefetch
-	prefetcht0	0(%r15) // software prefetch
-	prefetcht0	64(%r11) // software prefetch
-	prefetcht0	64(%r15) // software prefetch
-
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	128(%r11) // software prefetch
-	prefetcht0	128(%r15) // software prefetch
-
-	vbroadcastsd	0(%r13), %ymm12
-	vmovapd	0(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vmovapd	0(%r15), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	subl	$4, %r10d
-
-	vbroadcastsd	8(%r13), %ymm12
-	vmovapd	32(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmovapd	32(%r15), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	
-	prefetcht0	192(%r11) // software prefetch
-	prefetcht0	192(%r15) // software prefetch
-
-	vbroadcastsd	16(%r13), %ymm12
-	vmovapd	64(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vmovapd	64(%r15), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-
-	vbroadcastsd	24(%r13), %ymm12
-	addq	$32, %r13 // x+4
-	vmovapd	96(%r11), %ymm8
-	addq	$128, %r11 // A0+4*bs
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmovapd	96(%r15), %ymm8
-	addq	$128, %r15 // A1+4*bs
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vbroadcastsd	0(%r13), %ymm12
-	vmovapd	0(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vmovapd	0(%r15), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	addq	$32, %r11
-	addq	$32, %r15
-	addq	$8, %r13
-	
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-
-	jg		0b // clean
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_n_8_lib4, .-inner_kernel_dgemv_add_n_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm4  <- [z4a z4b z4c z4d]
-// ymm5  <- [z5a z5b z5c z5d]
-// ymm6  <- [z6a z6b z6c z6d]
-// ymm7  <- [z7a z7b z7c z7d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x+k*sizeof(double)
-// r14   <- dirty
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm4  <- [z4a z4b z4c z4d]
-// ymm5  <- [z5a z5b z5c z5d]
-// ymm6  <- [z6a z6b z6c z6d]
-// ymm7  <- [z7a z7b z7c z7d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_T_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_t_8_lib4, @function
-inner_kernel_dgemv_add_t_8_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_t_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_t_8_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_t_8_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$4, %r10d
-
-	prefetcht0	0(%r11) // software prefetch
-	prefetcht0	64(%r11) // software prefetch
-	prefetcht0	128(%r11) // software prefetch
-	prefetcht0	192(%r11) // software prefetch
-
-	jl		0f // clean-up loop
-
-	movq	%r11, %r14
-	addq	%r12, %r14 // A+bs*sda
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	0(%r14) // software prefetch
-
-	vmovupd	0(%r13), %ymm12
-	addq	$32, %r13 // x+4
-
-	vmovapd	0(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	
-	subl	$4, %r10d
-
-	vmovapd	32(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	prefetcht0	64(%r14) // software prefetch
-
-	vmovapd	64(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	vmovapd	96(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-
-	prefetcht0	128(%r14) // software prefetch
-
-	vmovapd	128(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-	
-	vmovapd	160(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-	
-	prefetcht0	192(%r14) // software prefetch
-
-	vmovapd	192(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-	vmovapd	224(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-	
-//	addq	%r12, %r11 // A+bs*sda
-	movq	%r14, %r11 // A+bs*sda
-	addq	%r12, %r14 // A+bs*sda+bs*sda
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vcvtsi2sd	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vsubpd		%ymm14, %ymm13, %ymm14
-
-	vmaskmovpd	0(%r13), %ymm14, %ymm12
-
-	vmovapd	0(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	
-	vmovapd	32(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	
-	vmovapd	64(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	vmovapd	96(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-		
-	vmovapd	128(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-	
-	vmovapd	160(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-	
-	vmovapd	192(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm6, %ymm15, %ymm6
-
-	vmovapd	224(%r11), %ymm8
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm7, %ymm15, %ymm7
-
-	sall	$3, %r10d
-//	movslq	%r10d, %r10
-	addq	%r10, %r11
-	addq	%r10, %r13
-	xorl	%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_t_8_lib4, .-inner_kernel_dgemv_add_t_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- x
-// r15   <- dirty
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z0 z1 z2 z3]_b
-// ymm3  <- [z4 z5 z6 z7]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- k-4
-// r11   <- A+4*4*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- x+4*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z0 z1 z2 z3]_b
-// ymm3  <- [z4 z5 z6 z7]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMV_UN_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmv_un_8_lib4, @function
-inner_edge_dtrmv_un_8_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmv_un_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmv_un_8_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmv_un_8_lib4:
-#endif
-#endif
-	
-	movq	%r11, %r15 // A1 <- A0
-	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(double)
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	// first 4 columns
-	vmovapd			0(%r11), %ymm8
-	vblendpd		$0x1, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	
-	subl			$4, %r10d
-
-	vmovapd			32(%r11), %ymm8
-	vblendpd		$0x3, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	8(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	
-	vmovapd			64(%r11), %ymm8
-	vblendpd		$0x7, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	16(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-
-	vmovapd			96(%r11), %ymm8
-	vbroadcastsd	24(%r13), %ymm12
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	
-	addq			$128, %r11
-	addq			$128, %r15
-	addq			$32, %r13
-
-
-
-	// last 4 columns
-	vbroadcastsd	0(%r13), %ymm12
-	vmovapd			0(%r11), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			0(%r15), %ymm8
-	vblendpd		$0x1, %ymm8, %ymm14, %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	
-	subl			$4, %r10d
-
-	vbroadcastsd	8(%r13), %ymm12
-	vmovapd			32(%r11), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vmovapd			32(%r15), %ymm8
-	vblendpd		$0x3, %ymm8, %ymm14, %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	
-	vbroadcastsd	16(%r13), %ymm12
-	vmovapd			64(%r11), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			64(%r15), %ymm8
-	vblendpd		$0x7, %ymm8, %ymm14, %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-
-	vbroadcastsd	24(%r13), %ymm12
-	vmovapd			96(%r11), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm2, %ymm15, %ymm2
-	vmovapd			96(%r15), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm3, %ymm15, %ymm3
-	
-	addq			$128, %r11
-	addq			$128, %r15
-	addq			$32, %r13
-	
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmv_un_8_lib4, .-inner_edge_dtrmv_un_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==n
-//
-// input arguments:
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z4 z5 z6 z7]_a
-// ymm2 <- [z0 z1 z2 z3]_b
-// ymm3 <- [z4 z5 z6 z7]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_N_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_n_8_lib4, @function
-inner_blend_n_8_lib4:
-#elif defined(OS_MAC)
-_inner_blend_n_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_8_lib4; .scl 2; .type 32; .endef
-inner_blend_n_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm2, %ymm0
-	vaddpd	%ymm1, %ymm3, %ymm1
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_n_8_lib4, .-inner_blend_n_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t
-//
-// input arguments:
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm4 <- [z4a z4b z4c z4d]
-// ymm5 <- [z5a z5b z5c z5d]
-// ymm6 <- [z6a z6b z6c z6d]
-// ymm7 <- [z7a z7b z7c z7d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_8_lib4, @function
-inner_blend_t_8_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_8_lib4; .scl 2; .type 32; .endef
-inner_blend_t_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm5, %ymm4, %ymm4
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vhaddpd	%ymm7, %ymm6, %ymm6
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm3
-	vperm2f128	$0x2, %ymm4, %ymm6, %ymm5
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vperm2f128	$0x13, %ymm4, %ymm6, %ymm4
-	vaddpd	%ymm0, %ymm3, %ymm0
-	vaddpd	%ymm4, %ymm5, %ymm1
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_8_lib4, .-inner_blend_t_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==n, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z4 z5 z6 z7]_a
-// ymm2 <- [z0 z1 z2 z3]_b
-// ymm3 <- [z4 z5 z6 z7]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_N_SCALE_AB_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_n_scale_ab_8_lib4, @function
-inner_blend_n_scale_ab_8_lib4:
-#elif defined(OS_MAC)
-_inner_blend_n_scale_ab_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_scale_ab_8_lib4; .scl 2; .type 32; .endef
-inner_blend_n_scale_ab_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm2, %ymm0
-	vaddpd	%ymm1, %ymm3, %ymm1
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-	vmulpd	%ymm1, %ymm15, %ymm1
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-	vmovupd		0(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-	vmovupd		32(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm1, %ymm14, %ymm1
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_n_scale_ab_8_lib4, .-inner_blend_n_scale_ab_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm4 <- [z4a z4b z4c z4d]
-// ymm5 <- [z5a z5b z5c z5d]
-// ymm6 <- [z6a z6b z6c z6d]
-// ymm7 <- [z7a z7b z7c z7d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_AB_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_ab_8_lib4, @function
-inner_blend_t_scale_ab_8_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_ab_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_ab_8_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_ab_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm5, %ymm4, %ymm4
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vhaddpd	%ymm7, %ymm6, %ymm6
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm3
-	vperm2f128	$0x2, %ymm4, %ymm6, %ymm5
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vperm2f128	$0x13, %ymm4, %ymm6, %ymm4
-	vaddpd	%ymm0, %ymm3, %ymm0
-	vaddpd	%ymm4, %ymm5, %ymm1
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-	vmulpd	%ymm1, %ymm15, %ymm1
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-	vmovupd		0(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-	vmovupd		32(%r12), %ymm14
-	vmulpd		%ymm15, %ymm14, %ymm14
-	vaddpd		%ymm1, %ymm14, %ymm1
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_ab_8_lib4, .-inner_blend_t_scale_ab_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for ta==n
-//
-// input arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z4 z5 z6 z7]_a
-// ymm2 <- [z0 z1 z2 z3]_b
-// ymm3 <- [z4 z5 z6 z7]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLENDER_N_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blender_n_8_lib4, @function
-inner_blender_n_8_lib4:
-#elif defined(OS_MAC)
-_inner_blender_n_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blender_n_8_lib4; .scl 2; .type 32; .endef
-inner_blender_n_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm2, %ymm0
-	vaddpd	%ymm1, %ymm3, %ymm1
-
-	cmpl	$0, %r10d // alg
-	je		0f // return
-
-	cmpl	$1, %r10d // alg
-	jne		1f // alg==-1
-
-	// alg==1
-	vmovupd		0(%r11), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-
-	jmp		0f // return
-
-1:
-
-	// alg==-1
-	vmovupd		0(%r11), %ymm15
-	vsubpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vsubpd		%ymm1, %ymm15, %ymm1
-
-0: // return
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blender_n_8_lib4, .-inner_blender_n_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for ta==t
-//
-// input arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm4 <- [z4a z4b z4c z4d]
-// ymm5 <- [z5a z5b z5c z5d]
-// ymm6 <- [z6a z6b z6c z6d]
-// ymm7 <- [z7a z7b z7c z7d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLENDER_T_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blender_t_8_lib4, @function
-inner_blender_t_8_lib4:
-#elif defined(OS_MAC)
-_inner_blender_t_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blender_t_8_lib4; .scl 2; .type 32; .endef
-inner_blender_t_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm5, %ymm4, %ymm4
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vhaddpd	%ymm7, %ymm6, %ymm6
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm3
-	vperm2f128	$0x2, %ymm4, %ymm6, %ymm5
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vperm2f128	$0x13, %ymm4, %ymm6, %ymm4
-	vaddpd	%ymm0, %ymm3, %ymm0
-	vaddpd	%ymm4, %ymm5, %ymm1
-
-	cmpl	$0, %r10d // alg
-	je		0f // return
-
-	cmpl	$1, %r10d // alg
-	jne		1f // alg==-1
-
-	// alg==1
-	vmovupd		0(%r11), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-
-	jmp		0f // return
-
-1:
-
-	// alg==-1
-	vmovupd		0(%r11), %ymm15
-	vsubpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vsubpd		%ymm1, %ymm15, %ymm1
-
-0: // return
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blender_t_8_lib4, .-inner_blender_t_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store 
-//
-// input arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-//
-// output arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8_lib4, @function
-inner_store_8_lib4:
-#elif defined(OS_MAC)
-_inner_store_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8_lib4; .scl 2; .type 32; .endef
-inner_store_8_lib4:
-#endif
-#endif
-	
-	vmovupd %ymm0, 0(%r10)
-	vmovupd %ymm1, 32(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8_lib4, .-inner_store_8_lib4
-#endif
-#endif
-
-
-
-
-
-//                            rdi    rsi            rdx        rcx      r8         r9            rsp+8      rsp+16
-// void kernel_dgemv_n_8_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_n_8_lib4
-	.type kernel_dgemv_n_8_lib4, @function
-kernel_dgemv_n_8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_n_8_lib4
-_kernel_dgemv_n_8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_n_8_lib4
-	.def kernel_dgemv_n_8_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_n_8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_8_lib4
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_AB_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_ab_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_ab_8_lib4
-#endif
-#endif
-
-
-
-	// store
-
-	movq	ARG8, %r10 // z
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_n_8_lib4, .-kernel_dgemv_n_8_lib4
-#endif
-
-
-
-
-
-//                            rdi    rsi           rdx         rcx      r8         r9            rsp+8      rsp+16
-// void kernel_dgemv_t_8_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_t_8_lib4
-	.type kernel_dgemv_t_8_lib4, @function
-kernel_dgemv_t_8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_t_8_lib4
-_kernel_dgemv_t_8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_t_8_lib4
-	.def kernel_dgemv_t_8_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_t_8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_T_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_t_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_t_8_lib4
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_8_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_t_8_lib4, .-kernel_dgemv_t_8_lib4
-#endif
-
-
-
-
-
-//                             rdi    rsi        rdx      rcx        r8
-// void kernel_dtrmv_un_8_lib4(int k, double *A, int sda, double *x, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmv_un_8_lib4
-	.type kernel_dtrmv_un_8_lib4, @function
-kernel_dtrmv_un_8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmv_un_8_lib4
-_kernel_dtrmv_un_8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmv_un_8_lib4
-	.def kernel_dtrmv_un_8_lib4; .scl 2; .type 32; .endef
-kernel_dtrmv_un_8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dtrmv edge & dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG4, %r13  // x
-
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMV_UN_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmv_un_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmv_un_8_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_8_lib4
-#endif
-#endif
-
-
-	// call inner blender n
-
-#if MACRO_LEVEL>=1
-	INNER_BLENDER_N_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_8_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // z
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmv_un_8_lib4, .-kernel_dtrmv_un_8_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_dgeqrf_4_lib4.c b/third_party/blasfeo/kernel/avx/kernel_dgeqrf_4_lib4.c
deleted file mode 100644
index a5faf20..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_dgeqrf_4_lib4.c
+++ /dev/null
@@ -1,2751 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <math.h>
-#include <stdio.h>
-
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-
-#include "../../include/blasfeo_common.h"
-#include "../../include/blasfeo_d_aux.h"
-#include "../../include/blasfeo_d_kernel.h"
-
-
-
-void kernel_dgeqrf_4_lib4(int m, double *pD, int sdd, double *dD)
-	{
-	int ii, jj, ll;
-	double alpha, beta, tmp, w1, w2, w3;
-	const int ps = 4;
-	// first column
-	beta = 0.0;
-	ii = 1;
-	if(m>1)
-		{
-		tmp = pD[1+ps*0];
-		beta += tmp*tmp;
-		if(m>2)
-			{
-			tmp = pD[2+ps*0];
-			beta += tmp*tmp;
-			if(m>3)
-				{
-				tmp = pD[3+ps*0];
-				beta += tmp*tmp;
-				}
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		tmp = pD[0+ii*sdd+ps*0];
-		beta += tmp*tmp;
-		tmp = pD[1+ii*sdd+ps*0];
-		beta += tmp*tmp;
-		tmp = pD[2+ii*sdd+ps*0];
-		beta += tmp*tmp;
-		tmp = pD[3+ii*sdd+ps*0];
-		beta += tmp*tmp;
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		tmp = pD[ll+ii*sdd+ps*0];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[0] = 0.0;
-		}
-	else
-		{
-		alpha = pD[0+ps*0];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[0] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[0+ps*0] = beta;
-		ii = 1;
-		if(m>1)
-			{
-			pD[1+ps*0] *= tmp;
-			if(m>2)
-				{
-				pD[2+ps*0] *= tmp;
-				if(m>3)
-					{
-					pD[3+ps*0] *= tmp;
-					}
-				}
-			}
-		for(ii=4; ii<m-3; ii+=4)
-			{
-			pD[0+ii*sdd+ps*0] *= tmp;
-			pD[1+ii*sdd+ps*0] *= tmp;
-			pD[2+ii*sdd+ps*0] *= tmp;
-			pD[3+ii*sdd+ps*0] *= tmp;
-			}
-		for(ll=0; ll<m-ii; ll++)
-			{
-			pD[ll+ii*sdd+ps*0] *= tmp;
-			}
-		}
-	// gemv_t & ger
-	w1 = pD[0+ps*1];
-	w2 = pD[0+ps*2];
-	w3 = pD[0+ps*3];
-	if(m>1)
-		{
-		w1 += pD[1+ps*1] * pD[1+ps*0];
-		w2 += pD[1+ps*2] * pD[1+ps*0];
-		w3 += pD[1+ps*3] * pD[1+ps*0];
-		if(m>2)
-			{
-			w1 += pD[2+ps*1] * pD[2+ps*0];
-			w2 += pD[2+ps*2] * pD[2+ps*0];
-			w3 += pD[2+ps*3] * pD[2+ps*0];
-			if(m>3)
-				{
-				w1 += pD[3+ps*1] * pD[3+ps*0];
-				w2 += pD[3+ps*2] * pD[3+ps*0];
-				w3 += pD[3+ps*3] * pD[3+ps*0];
-				}
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		w1 += pD[0+ii*sdd+ps*1] * pD[0+ii*sdd+ps*0];
-		w2 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*0];
-		w3 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*0];
-		w1 += pD[1+ii*sdd+ps*1] * pD[1+ii*sdd+ps*0];
-		w2 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*0];
-		w3 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*0];
-		w1 += pD[2+ii*sdd+ps*1] * pD[2+ii*sdd+ps*0];
-		w2 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*0];
-		w3 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*0];
-		w1 += pD[3+ii*sdd+ps*1] * pD[3+ii*sdd+ps*0];
-		w2 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*0];
-		w3 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*0];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		w1 += pD[ll+ii*sdd+ps*1] * pD[ll+ii*sdd+ps*0];
-		w2 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*0];
-		w3 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*0];
-		}
-	w1 = - dD[0] * w1;
-	w2 = - dD[0] * w2;
-	w3 = - dD[0] * w3;
-	pD[0+ps*1] += w1;
-	pD[0+ps*2] += w2;
-	pD[0+ps*3] += w3;
-	if(m>1)
-		{
-		pD[1+ps*1] += w1 * pD[1+ps*0];
-		pD[1+ps*2] += w2 * pD[1+ps*0];
-		pD[1+ps*3] += w3 * pD[1+ps*0];
-		if(m>2)
-			{
-			pD[2+ps*1] += w1 * pD[2+ps*0];
-			pD[2+ps*2] += w2 * pD[2+ps*0];
-			pD[2+ps*3] += w3 * pD[2+ps*0];
-			if(m>3)
-				{
-				pD[3+ps*1] += w1 * pD[3+ps*0];
-				pD[3+ps*2] += w2 * pD[3+ps*0];
-				pD[3+ps*3] += w3 * pD[3+ps*0];
-				}
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		pD[0+ii*sdd+ps*1] += w1 * pD[0+ii*sdd+ps*0];
-		pD[0+ii*sdd+ps*2] += w2 * pD[0+ii*sdd+ps*0];
-		pD[0+ii*sdd+ps*3] += w3 * pD[0+ii*sdd+ps*0];
-		pD[1+ii*sdd+ps*1] += w1 * pD[1+ii*sdd+ps*0];
-		pD[1+ii*sdd+ps*2] += w2 * pD[1+ii*sdd+ps*0];
-		pD[1+ii*sdd+ps*3] += w3 * pD[1+ii*sdd+ps*0];
-		pD[2+ii*sdd+ps*1] += w1 * pD[2+ii*sdd+ps*0];
-		pD[2+ii*sdd+ps*2] += w2 * pD[2+ii*sdd+ps*0];
-		pD[2+ii*sdd+ps*3] += w3 * pD[2+ii*sdd+ps*0];
-		pD[3+ii*sdd+ps*1] += w1 * pD[3+ii*sdd+ps*0];
-		pD[3+ii*sdd+ps*2] += w2 * pD[3+ii*sdd+ps*0];
-		pD[3+ii*sdd+ps*3] += w3 * pD[3+ii*sdd+ps*0];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		pD[ll+ii*sdd+ps*1] += w1 * pD[ll+ii*sdd+ps*0];
-		pD[ll+ii*sdd+ps*2] += w2 * pD[ll+ii*sdd+ps*0];
-		pD[ll+ii*sdd+ps*3] += w3 * pD[ll+ii*sdd+ps*0];
-		}
-	if(m==1)
-		return;
-	// second column
-	beta = 0.0;
-	if(m>2)
-		{
-		tmp = pD[2+ps*1];
-		beta += tmp*tmp;
-		if(m>3)
-			{
-			tmp = pD[3+ps*1];
-			beta += tmp*tmp;
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		tmp = pD[0+ii*sdd+ps*1];
-		beta += tmp*tmp;
-		tmp = pD[1+ii*sdd+ps*1];
-		beta += tmp*tmp;
-		tmp = pD[2+ii*sdd+ps*1];
-		beta += tmp*tmp;
-		tmp = pD[3+ii*sdd+ps*1];
-		beta += tmp*tmp;
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		tmp = pD[ll+ii*sdd+ps*1];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[1] = 0.0;
-		}
-	else
-		{
-		alpha = pD[1+ps*1];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[1] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[1+ps*1] = beta;
-		if(m>2)
-			{
-			pD[2+ps*1] *= tmp;
-			if(m>3)
-				{
-				pD[3+ps*1] *= tmp;
-				}
-			}
-		for(ii=4; ii<m-3; ii+=4)
-			{
-			pD[0+ii*sdd+ps*1] *= tmp;
-			pD[1+ii*sdd+ps*1] *= tmp;
-			pD[2+ii*sdd+ps*1] *= tmp;
-			pD[3+ii*sdd+ps*1] *= tmp;
-			}
-		for(ll=0; ll<m-ii; ll++)
-			{
-			pD[ll+ii*sdd+ps*1] *= tmp;
-			}
-		}
-	// gemv_t & ger
-	w2 = pD[1+ps*2];
-	w3 = pD[1+ps*3];
-	if(m>2)
-		{
-		w2 += pD[2+ps*2] * pD[2+ps*1];
-		w3 += pD[2+ps*3] * pD[2+ps*1];
-		if(m>3)
-			{
-			w2 += pD[3+ps*2] * pD[3+ps*1];
-			w3 += pD[3+ps*3] * pD[3+ps*1];
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		w2 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*1];
-		w3 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*1];
-		w2 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*1];
-		w3 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*1];
-		w2 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*1];
-		w3 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*1];
-		w2 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*1];
-		w3 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*1];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		w2 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*1];
-		w3 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*1];
-		}
-	w2 = - dD[1] * w2;
-	w3 = - dD[1] * w3;
-	pD[1+ps*2] += w2;
-	pD[1+ps*3] += w3;
-	if(m>2)
-		{
-		pD[2+ps*2] += w2 * pD[2+ps*1];
-		pD[2+ps*3] += w3 * pD[2+ps*1];
-		if(m>3)
-			{
-			pD[3+ps*2] += w2 * pD[3+ps*1];
-			pD[3+ps*3] += w3 * pD[3+ps*1];
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		pD[0+ii*sdd+ps*2] += w2 * pD[0+ii*sdd+ps*1];
-		pD[0+ii*sdd+ps*3] += w3 * pD[0+ii*sdd+ps*1];
-		pD[1+ii*sdd+ps*2] += w2 * pD[1+ii*sdd+ps*1];
-		pD[1+ii*sdd+ps*3] += w3 * pD[1+ii*sdd+ps*1];
-		pD[2+ii*sdd+ps*2] += w2 * pD[2+ii*sdd+ps*1];
-		pD[2+ii*sdd+ps*3] += w3 * pD[2+ii*sdd+ps*1];
-		pD[3+ii*sdd+ps*2] += w2 * pD[3+ii*sdd+ps*1];
-		pD[3+ii*sdd+ps*3] += w3 * pD[3+ii*sdd+ps*1];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		pD[ll+ii*sdd+ps*2] += w2 * pD[ll+ii*sdd+ps*1];
-		pD[ll+ii*sdd+ps*3] += w3 * pD[ll+ii*sdd+ps*1];
-		}
-	if(m==2)
-		return;
-	// third column
-	beta = 0.0;
-	if(m>3)
-		{
-		tmp = pD[3+ps*2];
-		beta += tmp*tmp;
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		tmp = pD[0+ii*sdd+ps*2];
-		beta += tmp*tmp;
-		tmp = pD[1+ii*sdd+ps*2];
-		beta += tmp*tmp;
-		tmp = pD[2+ii*sdd+ps*2];
-		beta += tmp*tmp;
-		tmp = pD[3+ii*sdd+ps*2];
-		beta += tmp*tmp;
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		tmp = pD[ll+ii*sdd+ps*2];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[2] = 0.0;
-		}
-	else
-		{
-		alpha = pD[2+ps*2];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[2] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[2+ps*2] = beta;
-		if(m>3)
-			{
-			pD[3+ps*2] *= tmp;
-			}
-		for(ii=4; ii<m-3; ii+=4)
-			{
-			pD[0+ii*sdd+ps*2] *= tmp;
-			pD[1+ii*sdd+ps*2] *= tmp;
-			pD[2+ii*sdd+ps*2] *= tmp;
-			pD[3+ii*sdd+ps*2] *= tmp;
-			}
-		for(ll=0; ll<m-ii; ll++)
-			{
-			pD[ll+ii*sdd+ps*2] *= tmp;
-			}
-		}
-	// gemv_t & ger
-	w3 = pD[2+ps*3];
-	if(m>3)
-		{
-		w3 += pD[3+ps*3] * pD[3+ps*2];
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		w3 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*2];
-		w3 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*2];
-		w3 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*2];
-		w3 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*2];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		w3 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*2];
-		}
-	w3 = - dD[2] * w3;
-	pD[2+ps*3] += w3;
-	if(m>3)
-		{
-		pD[3+ps*3] += w3 * pD[3+ps*2];
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		pD[0+ii*sdd+ps*3] += w3 * pD[0+ii*sdd+ps*2];
-		pD[1+ii*sdd+ps*3] += w3 * pD[1+ii*sdd+ps*2];
-		pD[2+ii*sdd+ps*3] += w3 * pD[2+ii*sdd+ps*2];
-		pD[3+ii*sdd+ps*3] += w3 * pD[3+ii*sdd+ps*2];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		pD[ll+ii*sdd+ps*3] += w3 * pD[ll+ii*sdd+ps*2];
-		}
-	if(m==3)
-		return;
-	// fourth column
-	beta = 0.0;
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		tmp = pD[0+ii*sdd+ps*3];
-		beta += tmp*tmp;
-		tmp = pD[1+ii*sdd+ps*3];
-		beta += tmp*tmp;
-		tmp = pD[2+ii*sdd+ps*3];
-		beta += tmp*tmp;
-		tmp = pD[3+ii*sdd+ps*3];
-		beta += tmp*tmp;
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		tmp = pD[ll+ii*sdd+ps*3];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[3] = 0.0;
-		}
-	else
-		{
-		alpha = pD[3+ps*3];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[3] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[3+ps*3] = beta;
-		for(ii=4; ii<m-3; ii+=4)
-			{
-			pD[0+ii*sdd+ps*3] *= tmp;
-			pD[1+ii*sdd+ps*3] *= tmp;
-			pD[2+ii*sdd+ps*3] *= tmp;
-			pD[3+ii*sdd+ps*3] *= tmp;
-			}
-		for(ll=0; ll<m-ii; ll++)
-			{
-			pD[ll+ii*sdd+ps*3] *= tmp;
-			}
-		}
-	return;
-	}
-
-
-// unblocked algorithm
-void kernel_dgeqrf_vs_lib4(int m, int n, int k, int offD, double *pD, int sdd, double *dD)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk, ll, imax, jmax, jmax0, kmax, kmax0;
-	const int ps = 4;
-	imax = k;//m<n ? m : n;
-	double alpha, beta, tmp, w0;
-	double *pC00, *pC10, *pC01, *pC11;
-	int offset;
-	double *pD0 = pD-offD;
-	for(ii=0; ii<imax; ii++)
-		{
-		pC00 = &pD0[((offD+ii)&(ps-1))+((offD+ii)-((offD+ii)&(ps-1)))*sdd+ii*ps];
-		pC10 = &pD0[((offD+ii+1)&(ps-1))+((offD+ii+1)-((offD+ii+1)&(ps-1)))*sdd+ii*ps];
-		beta = 0.0;
-		jmax = m-ii-1;
-		jmax0 = (ps-((ii+1+offD)&(ps-1)))&(ps-1);
-		jmax0 = jmax<jmax0 ? jmax : jmax0;
-		offset = 0;
-		jj = 0;
-		if(jmax0>0)
-			{
-			for( ; jj<jmax0; jj++)
-				{
-				tmp = pC10[0+offset];
-				beta += tmp*tmp;
-				offset += 1;
-				}
-			offset += -ps+ps*sdd;
-			}
-		for( ; jj<jmax-3; jj+=4)
-			{
-			tmp = pC10[0+offset];
-			beta += tmp*tmp;
-			tmp = pC10[1+offset];
-			beta += tmp*tmp;
-			tmp = pC10[2+offset];
-			beta += tmp*tmp;
-			tmp = pC10[3+offset];
-			beta += tmp*tmp;
-			offset += ps*sdd;
-			}
-		for(ll=0; ll<jmax-jj; ll++)
-			{
-			tmp = pC10[0+offset];
-			beta += tmp*tmp;
-			offset += 1;
-			}
-		if(beta==0.0)
-			{
-			dD[ii] = 0.0;
-			}
-		else
-			{
-			alpha = pC00[0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			dD[ii] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			offset = 0;
-			jj = 0;
-			if(jmax0>0)
-				{
-				for( ; jj<jmax0; jj++)
-					{
-					pC10[0+offset] *= tmp;
-					offset += 1;
-					}
-				offset += -ps+ps*sdd;
-				}
-			for( ; jj<jmax-3; jj+=4)
-				{
-				pC10[0+offset] *= tmp;
-				pC10[1+offset] *= tmp;
-				pC10[2+offset] *= tmp;
-				pC10[3+offset] *= tmp;
-				offset += ps*sdd;
-				}
-			for(ll=0; ll<jmax-jj; ll++)
-				{
-				pC10[0+offset] *= tmp;
-				offset += 1;
-				}
-			pC00[0] = beta;
-			}
-		if(ii<n)
-			{
-			pC01 = pC00 + ps;
-			pC11 = pC10 + ps;
-			kmax = jmax;
-			kmax0 = jmax0;
-			jmax = n-ii-1;
-			jj = 0;
-			for( ; jj<jmax; jj++)
-				{
-				w0 = pC01[0+ps*jj] * 1.0;
-				offset = 0;
-				kk = 0;
-				if(kmax0>0)
-					{
-					for( ; kk<kmax0; kk++)
-						{
-						w0 += pC11[0+offset+ps*jj] * pC10[0+offset];
-						offset += 1;
-						}
-					offset += -ps+ps*sdd;
-					}
-				for( ; kk<kmax-3; kk+=4)
-					{
-					w0 += pC11[0+offset+ps*jj] * pC10[0+offset];
-					w0 += pC11[1+offset+ps*jj] * pC10[1+offset];
-					w0 += pC11[2+offset+ps*jj] * pC10[2+offset];
-					w0 += pC11[3+offset+ps*jj] * pC10[3+offset];
-					offset += ps*sdd;
-					}
-				for(ll=0; ll<kmax-kk; ll++)
-					{
-					w0 += pC11[0+offset+ps*jj] * pC10[0+offset];
-					offset += 1;
-					}
-				w0 = - dD[ii] * w0;
-				pC01[0+ps*jj] += w0;
-				offset = 0;
-				kk = 0;
-				if(kmax0>0)
-					{
-					for( ; kk<kmax0; kk++)
-						{
-						pC11[0+offset+ps*jj] += w0 * pC10[0+offset];
-						offset += 1;
-						}
-					offset = offset-ps+ps*sdd;
-					}
-				for( ; kk<kmax-3; kk+=4)
-					{
-					pC11[0+offset+ps*jj] += w0 * pC10[0+offset];
-					pC11[1+offset+ps*jj] += w0 * pC10[1+offset];
-					pC11[2+offset+ps*jj] += w0 * pC10[2+offset];
-					pC11[3+offset+ps*jj] += w0 * pC10[3+offset];
-					offset += ps*sdd;
-					}
-				for(ll=0; ll<kmax-kk; ll++)
-					{
-					pC11[0+offset+ps*jj] += w0 * pC10[0+offset];
-					offset += 1;
-					}
-				}
-			}
-		}
-	return;
-	}
-
-
-
-void kernel_dlarf_4_lib4(int m, int n, double *pD, int sdd, double *dD, double *pC0, int sdc)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, ll;
-	const int ps = 4;
-	double v10,
-	       v20, v21,
-		   v30, v31, v32;
-	double tmp, d0, d1, d2, d3;
-	double *pC;
-	double pT[16];// = {};
-	int ldt = 4;
-	double pW[8];// = {};
-	int ldw = 2;
-	// dot product of v
-	v10 = 0.0;
-	v20 = 0.0;
-	v30 = 0.0;
-	v21 = 0.0;
-	v31 = 0.0;
-	v32 = 0.0;
-	if(m>1)
-		{
-		v10 = 1.0 * pD[1+ps*0];
-		if(m>2)
-			{
-			v10 += pD[2+ps*1] * pD[2+ps*0];
-			v20 = 1.0 * pD[2+ps*0];
-			v21 = 1.0 * pD[2+ps*1];
-			if(m>3)
-				{
-				v10 += pD[3+ps*1] * pD[3+ps*0];
-				v20 += pD[3+ps*2] * pD[3+ps*0];
-				v21 += pD[3+ps*2] * pD[3+ps*1];
-				v30 = 1.0 * pD[3+ps*0];
-				v31 = 1.0 * pD[3+ps*1];
-				v32 = 1.0 * pD[3+ps*2];
-				}
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		v10 += pD[0+ii*sdd+ps*1] * pD[0+ii*sdd+ps*0];
-		v20 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*0];
-		v21 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*1];
-		v30 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*0];
-		v31 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*1];
-		v32 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*2];
-		v10 += pD[1+ii*sdd+ps*1] * pD[1+ii*sdd+ps*0];
-		v20 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*0];
-		v21 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*1];
-		v30 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*0];
-		v31 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*1];
-		v32 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*2];
-		v10 += pD[2+ii*sdd+ps*1] * pD[2+ii*sdd+ps*0];
-		v20 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*0];
-		v21 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*1];
-		v30 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*0];
-		v31 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*1];
-		v32 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*2];
-		v10 += pD[3+ii*sdd+ps*1] * pD[3+ii*sdd+ps*0];
-		v20 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*0];
-		v21 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*1];
-		v30 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*0];
-		v31 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*1];
-		v32 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*2];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		v10 += pD[ll+ii*sdd+ps*1] * pD[ll+ii*sdd+ps*0];
-		v20 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*0];
-		v21 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*1];
-		v30 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*0];
-		v31 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*1];
-		v32 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*2];
-		}
-	// compute lower triangular T containing tau for matrix update
-	pT[0+ldt*0] = dD[0];
-	pT[1+ldt*1] = dD[1];
-	pT[2+ldt*2] = dD[2];
-	pT[3+ldt*3] = dD[3];
-	pT[1+ldt*0] = - dD[1] * (v10*pT[0+ldt*0]);
-	pT[2+ldt*1] = - dD[2] * (v21*pT[1+ldt*1]);
-	pT[3+ldt*2] = - dD[3] * (v32*pT[2+ldt*2]);
-	pT[2+ldt*0] = - dD[2] * (v20*pT[0+ldt*0] + v21*pT[1+ldt*0]);
-	pT[3+ldt*1] = - dD[3] * (v31*pT[1+ldt*1] + v32*pT[2+ldt*1]);
-	pT[3+ldt*0] = - dD[3] * (v30*pT[0+ldt*0] + v31*pT[1+ldt*0] + v32*pT[2+ldt*0]);
-	// downgrade matrix
-	pW[0] = 0.0;
-	pW[1] = 0.0;
-	pW[2] = 0.0;
-	pW[3] = 0.0;
-	pW[4] = 0.0;
-	pW[5] = 0.0;
-	pW[6] = 0.0;
-	pW[7] = 0.0;
-	ii = 0;
-	for( ; ii<n-1; ii+=2)
-		{
-		pC = pC0+ii*ps;
-		// compute W^T = C^T * V
-		tmp = pC[0+ps*0];
-		pW[0+ldw*0] = tmp;
-		tmp = pC[0+ps*1];
-		pW[1+ldw*0] = tmp;
-		if(m>1)
-			{
-			d0 = pD[1+ps*0];
-			tmp = pC[1+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] = tmp;
-			tmp = pC[1+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] = tmp;
-			if(m>2)
-				{
-				d0 = pD[2+ps*0];
-				d1 = pD[2+ps*1];
-				tmp = pC[2+ps*0];
-				pW[0+ldw*0] += tmp * d0;
-				pW[0+ldw*1] += tmp * d1;
-				pW[0+ldw*2] = tmp;
-				tmp = pC[2+ps*1];
-				pW[1+ldw*0] += tmp * d0;
-				pW[1+ldw*1] += tmp * d1;
-				pW[1+ldw*2] = tmp;
-				if(m>3)
-					{
-					d0 = pD[3+ps*0];
-					d1 = pD[3+ps*1];
-					d2 = pD[3+ps*2];
-					tmp = pC[3+ps*0];
-					pW[0+ldw*0] += tmp * d0;
-					pW[0+ldw*1] += tmp * d1;
-					pW[0+ldw*2] += tmp * d2;
-					pW[0+ldw*3] = tmp;
-					tmp = pC[3+ps*1];
-					pW[1+ldw*0] += tmp * d0;
-					pW[1+ldw*1] += tmp * d1;
-					pW[1+ldw*2] += tmp * d2;
-					pW[1+ldw*3] = tmp;
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			//
-			d0 = pD[0+jj*sdd+ps*0];
-			d1 = pD[0+jj*sdd+ps*1];
-			d2 = pD[0+jj*sdd+ps*2];
-			d3 = pD[0+jj*sdd+ps*3];
-			tmp = pC[0+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] += tmp * d1;
-			pW[0+ldw*2] += tmp * d2;
-			pW[0+ldw*3] += tmp * d3;
-			tmp = pC[0+jj*sdc+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] += tmp * d1;
-			pW[1+ldw*2] += tmp * d2;
-			pW[1+ldw*3] += tmp * d3;
-			//
-			d0 = pD[1+jj*sdd+ps*0];
-			d1 = pD[1+jj*sdd+ps*1];
-			d2 = pD[1+jj*sdd+ps*2];
-			d3 = pD[1+jj*sdd+ps*3];
-			tmp = pC[1+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] += tmp * d1;
-			pW[0+ldw*2] += tmp * d2;
-			pW[0+ldw*3] += tmp * d3;
-			tmp = pC[1+jj*sdc+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] += tmp * d1;
-			pW[1+ldw*2] += tmp * d2;
-			pW[1+ldw*3] += tmp * d3;
-			//
-			d0 = pD[2+jj*sdd+ps*0];
-			d1 = pD[2+jj*sdd+ps*1];
-			d2 = pD[2+jj*sdd+ps*2];
-			d3 = pD[2+jj*sdd+ps*3];
-			tmp = pC[2+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] += tmp * d1;
-			pW[0+ldw*2] += tmp * d2;
-			pW[0+ldw*3] += tmp * d3;
-			tmp = pC[2+jj*sdc+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] += tmp * d1;
-			pW[1+ldw*2] += tmp * d2;
-			pW[1+ldw*3] += tmp * d3;
-			//
-			d0 = pD[3+jj*sdd+ps*0];
-			d1 = pD[3+jj*sdd+ps*1];
-			d2 = pD[3+jj*sdd+ps*2];
-			d3 = pD[3+jj*sdd+ps*3];
-			tmp = pC[3+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] += tmp * d1;
-			pW[0+ldw*2] += tmp * d2;
-			pW[0+ldw*3] += tmp * d3;
-			tmp = pC[3+jj*sdc+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] += tmp * d1;
-			pW[1+ldw*2] += tmp * d2;
-			pW[1+ldw*3] += tmp * d3;
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			d0 = pD[ll+jj*sdd+ps*0];
-			d1 = pD[ll+jj*sdd+ps*1];
-			d2 = pD[ll+jj*sdd+ps*2];
-			d3 = pD[ll+jj*sdd+ps*3];
-			tmp = pC[ll+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] += tmp * d1;
-			pW[0+ldw*2] += tmp * d2;
-			pW[0+ldw*3] += tmp * d3;
-			tmp = pC[ll+jj*sdc+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] += tmp * d1;
-			pW[1+ldw*2] += tmp * d2;
-			pW[1+ldw*3] += tmp * d3;
-			}
-		// compute W^T *= T
-		pW[0+ldw*3] = pT[3+ldt*0]*pW[0+ldw*0] + pT[3+ldt*1]*pW[0+ldw*1] + pT[3+ldt*2]*pW[0+ldw*2] + pT[3+ldt*3]*pW[0+ldw*3];
-		pW[1+ldw*3] = pT[3+ldt*0]*pW[1+ldw*0] + pT[3+ldt*1]*pW[1+ldw*1] + pT[3+ldt*2]*pW[1+ldw*2] + pT[3+ldt*3]*pW[1+ldw*3];
-		pW[0+ldw*2] = pT[2+ldt*0]*pW[0+ldw*0] + pT[2+ldt*1]*pW[0+ldw*1] + pT[2+ldt*2]*pW[0+ldw*2];
-		pW[1+ldw*2] = pT[2+ldt*0]*pW[1+ldw*0] + pT[2+ldt*1]*pW[1+ldw*1] + pT[2+ldt*2]*pW[1+ldw*2];
-		pW[0+ldw*1] = pT[1+ldt*0]*pW[0+ldw*0] + pT[1+ldt*1]*pW[0+ldw*1];
-		pW[1+ldw*1] = pT[1+ldt*0]*pW[1+ldw*0] + pT[1+ldt*1]*pW[1+ldw*1];
-		pW[0+ldw*0] = pT[0+ldt*0]*pW[0+ldw*0];
-		pW[1+ldw*0] = pT[0+ldt*0]*pW[1+ldw*0];
-		// compute C -= V * W^T
-		pC[0+ps*0] -= pW[0+ldw*0];
-		pC[0+ps*1] -= pW[1+ldw*0];
-		if(m>1)
-			{
-			pC[1+ps*0] -= pD[1+ps*0]*pW[0+ldw*0] + pW[0+ldw*1];
-			pC[1+ps*1] -= pD[1+ps*0]*pW[1+ldw*0] + pW[1+ldw*1];
-			if(m>2)
-				{
-				pC[2+ps*0] -= pD[2+ps*0]*pW[0+ldw*0] + pD[2+ps*1]*pW[0+ldw*1] + pW[0+ldw*2];
-				pC[2+ps*1] -= pD[2+ps*0]*pW[1+ldw*0] + pD[2+ps*1]*pW[1+ldw*1] + pW[1+ldw*2];
-				if(m>3)
-					{
-					pC[3+ps*0] -= pD[3+ps*0]*pW[0+ldw*0] + pD[3+ps*1]*pW[0+ldw*1] + pD[3+ps*2]*pW[0+ldw*2] + pW[0+ldw*3];
-					pC[3+ps*1] -= pD[3+ps*0]*pW[1+ldw*0] + pD[3+ps*1]*pW[1+ldw*1] + pD[3+ps*2]*pW[1+ldw*2] + pW[1+ldw*3];
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			//
-			d0 = pD[0+jj*sdd+ps*0];
-			d1 = pD[0+jj*sdd+ps*1];
-			d2 = pD[0+jj*sdd+ps*2];
-			d3 = pD[0+jj*sdd+ps*3];
-			pC[0+jj*sdc+ps*0] -= d0*pW[0+ldw*0] + d1*pW[0+ldw*1] + d2*pW[0+ldw*2] + d3*pW[0+ldw*3];
-			pC[0+jj*sdc+ps*1] -= d0*pW[1+ldw*0] + d1*pW[1+ldw*1] + d2*pW[1+ldw*2] + d3*pW[1+ldw*3];
-			//
-			d0 = pD[1+jj*sdd+ps*0];
-			d1 = pD[1+jj*sdd+ps*1];
-			d2 = pD[1+jj*sdd+ps*2];
-			d3 = pD[1+jj*sdd+ps*3];
-			pC[1+jj*sdc+ps*0] -= d0*pW[0+ldw*0] + d1*pW[0+ldw*1] + d2*pW[0+ldw*2] + d3*pW[0+ldw*3];
-			pC[1+jj*sdc+ps*1] -= d0*pW[1+ldw*0] + d1*pW[1+ldw*1] + d2*pW[1+ldw*2] + d3*pW[1+ldw*3];
-			//
-			d0 = pD[2+jj*sdd+ps*0];
-			d1 = pD[2+jj*sdd+ps*1];
-			d2 = pD[2+jj*sdd+ps*2];
-			d3 = pD[2+jj*sdd+ps*3];
-			pC[2+jj*sdc+ps*0] -= d0*pW[0+ldw*0] + d1*pW[0+ldw*1] + d2*pW[0+ldw*2] + d3*pW[0+ldw*3];
-			pC[2+jj*sdc+ps*1] -= d0*pW[1+ldw*0] + d1*pW[1+ldw*1] + d2*pW[1+ldw*2] + d3*pW[1+ldw*3];
-			//
-			d0 = pD[3+jj*sdd+ps*0];
-			d1 = pD[3+jj*sdd+ps*1];
-			d2 = pD[3+jj*sdd+ps*2];
-			d3 = pD[3+jj*sdd+ps*3];
-			pC[3+jj*sdc+ps*0] -= d0*pW[0+ldw*0] + d1*pW[0+ldw*1] + d2*pW[0+ldw*2] + d3*pW[0+ldw*3];
-			pC[3+jj*sdc+ps*1] -= d0*pW[1+ldw*0] + d1*pW[1+ldw*1] + d2*pW[1+ldw*2] + d3*pW[1+ldw*3];
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			d0 = pD[ll+jj*sdd+ps*0];
-			d1 = pD[ll+jj*sdd+ps*1];
-			d2 = pD[ll+jj*sdd+ps*2];
-			d3 = pD[ll+jj*sdd+ps*3];
-			pC[ll+jj*sdc+ps*0] -= d0*pW[0+ldw*0] + d1*pW[0+ldw*1] + d2*pW[0+ldw*2] + d3*pW[0+ldw*3];
-			pC[ll+jj*sdc+ps*1] -= d0*pW[1+ldw*0] + d1*pW[1+ldw*1] + d2*pW[1+ldw*2] + d3*pW[1+ldw*3];
-			}
-		}
-	for( ; ii<n; ii++)
-		{
-		pC = pC0+ii*ps;
-		// compute W^T = C^T * V
-		tmp = pC[0+ps*0];
-		pW[0+ldw*0] = tmp;
-		if(m>1)
-			{
-			tmp = pC[1+ps*0];
-			pW[0+ldw*0] += tmp * pD[1+ps*0];
-			pW[0+ldw*1] = tmp;
-			if(m>2)
-				{
-				tmp = pC[2+ps*0];
-				pW[0+ldw*0] += tmp * pD[2+ps*0];
-				pW[0+ldw*1] += tmp * pD[2+ps*1];
-				pW[0+ldw*2] = tmp;
-				if(m>3)
-					{
-					tmp = pC[3+ps*0];
-					pW[0+ldw*0] += tmp * pD[3+ps*0];
-					pW[0+ldw*1] += tmp * pD[3+ps*1];
-					pW[0+ldw*2] += tmp * pD[3+ps*2];
-					pW[0+ldw*3] = tmp;
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			tmp = pC[0+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * pD[0+jj*sdd+ps*0];
-			pW[0+ldw*1] += tmp * pD[0+jj*sdd+ps*1];
-			pW[0+ldw*2] += tmp * pD[0+jj*sdd+ps*2];
-			pW[0+ldw*3] += tmp * pD[0+jj*sdd+ps*3];
-			tmp = pC[1+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * pD[1+jj*sdd+ps*0];
-			pW[0+ldw*1] += tmp * pD[1+jj*sdd+ps*1];
-			pW[0+ldw*2] += tmp * pD[1+jj*sdd+ps*2];
-			pW[0+ldw*3] += tmp * pD[1+jj*sdd+ps*3];
-			tmp = pC[2+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * pD[2+jj*sdd+ps*0];
-			pW[0+ldw*1] += tmp * pD[2+jj*sdd+ps*1];
-			pW[0+ldw*2] += tmp * pD[2+jj*sdd+ps*2];
-			pW[0+ldw*3] += tmp * pD[2+jj*sdd+ps*3];
-			tmp = pC[3+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * pD[3+jj*sdd+ps*0];
-			pW[0+ldw*1] += tmp * pD[3+jj*sdd+ps*1];
-			pW[0+ldw*2] += tmp * pD[3+jj*sdd+ps*2];
-			pW[0+ldw*3] += tmp * pD[3+jj*sdd+ps*3];
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			tmp = pC[ll+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * pD[ll+jj*sdd+ps*0];
-			pW[0+ldw*1] += tmp * pD[ll+jj*sdd+ps*1];
-			pW[0+ldw*2] += tmp * pD[ll+jj*sdd+ps*2];
-			pW[0+ldw*3] += tmp * pD[ll+jj*sdd+ps*3];
-			}
-		// compute W^T *= T
-		pW[0+ldw*3] = pT[3+ldt*0]*pW[0+ldw*0] + pT[3+ldt*1]*pW[0+ldw*1] + pT[3+ldt*2]*pW[0+ldw*2] + pT[3+ldt*3]*pW[0+ldw*3];
-		pW[0+ldw*2] = pT[2+ldt*0]*pW[0+ldw*0] + pT[2+ldt*1]*pW[0+ldw*1] + pT[2+ldt*2]*pW[0+ldw*2];
-		pW[0+ldw*1] = pT[1+ldt*0]*pW[0+ldw*0] + pT[1+ldt*1]*pW[0+ldw*1];
-		pW[0+ldw*0] = pT[0+ldt*0]*pW[0+ldw*0];
-		// compute C -= V * W^T
-		pC[0+ps*0] -= pW[0+ldw*0];
-		if(m>1)
-			{
-			pC[1+ps*0] -= pD[1+ps*0]*pW[0+ldw*0] + pW[0+ldw*1];
-			if(m>2)
-				{
-				pC[2+ps*0] -= pD[2+ps*0]*pW[0+ldw*0] + pD[2+ps*1]*pW[0+ldw*1] + pW[0+ldw*2];
-				if(m>3)
-					{
-					pC[3+ps*0] -= pD[3+ps*0]*pW[0+ldw*0] + pD[3+ps*1]*pW[0+ldw*1] + pD[3+ps*2]*pW[0+ldw*2] + pW[0+ldw*3];
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			pC[0+jj*sdc+ps*0] -= pD[0+jj*sdd+ps*0]*pW[0+ldw*0] + pD[0+jj*sdd+ps*1]*pW[0+ldw*1] + pD[0+jj*sdd+ps*2]*pW[0+ldw*2] + pD[0+jj*sdd+ps*3]*pW[0+ldw*3];
-			pC[1+jj*sdc+ps*0] -= pD[1+jj*sdd+ps*0]*pW[0+ldw*0] + pD[1+jj*sdd+ps*1]*pW[0+ldw*1] + pD[1+jj*sdd+ps*2]*pW[0+ldw*2] + pD[1+jj*sdd+ps*3]*pW[0+ldw*3];
-			pC[2+jj*sdc+ps*0] -= pD[2+jj*sdd+ps*0]*pW[0+ldw*0] + pD[2+jj*sdd+ps*1]*pW[0+ldw*1] + pD[2+jj*sdd+ps*2]*pW[0+ldw*2] + pD[2+jj*sdd+ps*3]*pW[0+ldw*3];
-			pC[3+jj*sdc+ps*0] -= pD[3+jj*sdd+ps*0]*pW[0+ldw*0] + pD[3+jj*sdd+ps*1]*pW[0+ldw*1] + pD[3+jj*sdd+ps*2]*pW[0+ldw*2] + pD[3+jj*sdd+ps*3]*pW[0+ldw*3];
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			pC[ll+jj*sdc+ps*0] -= pD[ll+jj*sdd+ps*0]*pW[0+ldw*0] + pD[ll+jj*sdd+ps*1]*pW[0+ldw*1] + pD[ll+jj*sdd+ps*2]*pW[0+ldw*2] + pD[ll+jj*sdd+ps*3]*pW[0+ldw*3];
-			}
-		}
-
-	return;
-	}
-
-
-
-void kernel_dlarf_t_4_lib4(int m, int n, double *pD, int sdd, double *pVt, double *dD, double *pC0, int sdc, double *pW0)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, ll;
-	const int ps = 4;
-	double v10,
-	       v20, v21,
-		   v30, v31, v32;
-	double c00, c01,
-	       c10, c11,
-	       c20, c21,
-	       c30, c31;
-	double a0, a1, a2, a3, b0, b1;
-	double tmp, d0, d1, d2, d3;
-	double *pC, *pW;
-	double pT[16];// = {};
-	int ldt = 4;
-	// dot product of v
-	v10 = 0.0;
-	v20 = 0.0;
-	v30 = 0.0;
-	v21 = 0.0;
-	v31 = 0.0;
-	v32 = 0.0;
-	if(m>1)
-		{
-		v10 = 1.0 * pD[1+ps*0];
-		if(m>2)
-			{
-			v10 += pD[2+ps*1] * pD[2+ps*0];
-			v20 = 1.0 * pD[2+ps*0];
-			v21 = 1.0 * pD[2+ps*1];
-			if(m>3)
-				{
-				v10 += pD[3+ps*1] * pD[3+ps*0];
-				v20 += pD[3+ps*2] * pD[3+ps*0];
-				v21 += pD[3+ps*2] * pD[3+ps*1];
-				v30 = 1.0 * pD[3+ps*0];
-				v31 = 1.0 * pD[3+ps*1];
-				v32 = 1.0 * pD[3+ps*2];
-				}
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		v10 += pD[0+ii*sdd+ps*1] * pD[0+ii*sdd+ps*0];
-		v20 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*0];
-		v21 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*1];
-		v30 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*0];
-		v31 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*1];
-		v32 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*2];
-		v10 += pD[1+ii*sdd+ps*1] * pD[1+ii*sdd+ps*0];
-		v20 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*0];
-		v21 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*1];
-		v30 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*0];
-		v31 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*1];
-		v32 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*2];
-		v10 += pD[2+ii*sdd+ps*1] * pD[2+ii*sdd+ps*0];
-		v20 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*0];
-		v21 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*1];
-		v30 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*0];
-		v31 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*1];
-		v32 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*2];
-		v10 += pD[3+ii*sdd+ps*1] * pD[3+ii*sdd+ps*0];
-		v20 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*0];
-		v21 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*1];
-		v30 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*0];
-		v31 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*1];
-		v32 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*2];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		v10 += pD[ll+ii*sdd+ps*1] * pD[ll+ii*sdd+ps*0];
-		v20 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*0];
-		v21 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*1];
-		v30 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*0];
-		v31 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*1];
-		v32 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*2];
-		}
-	// compute lower triangular T containing tau for matrix update
-	pT[0+ldt*0] = dD[0];
-	pT[1+ldt*1] = dD[1];
-	pT[2+ldt*2] = dD[2];
-	pT[3+ldt*3] = dD[3];
-	pT[1+ldt*0] = - dD[1] * (v10*pT[0+ldt*0]);
-	pT[2+ldt*1] = - dD[2] * (v21*pT[1+ldt*1]);
-	pT[3+ldt*2] = - dD[3] * (v32*pT[2+ldt*2]);
-	pT[2+ldt*0] = - dD[2] * (v20*pT[0+ldt*0] + v21*pT[1+ldt*0]);
-	pT[3+ldt*1] = - dD[3] * (v31*pT[1+ldt*1] + v32*pT[2+ldt*1]);
-	pT[3+ldt*0] = - dD[3] * (v30*pT[0+ldt*0] + v31*pT[1+ldt*0] + v32*pT[2+ldt*0]);
-	// downgrade matrix
-	__m256d
-		_w0, _w1, _w2, _w3, _d0, _t0, _tp, _c0, _c1, _c2, _c3, _a0, _b0, _tz;
-
-	ii = 0;
-#if 1
-	double alpha = 1.0;
-	double beta = 0.0;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for( ; ii<n-11; ii+=12)
-		{
-		kernel_dgemm_nn_4x12_lib4(m, &alpha, &pVt[0+ps*0], 0, &pC0[0+ps*ii], sdc, &beta, &pW0[0+ps*ii], &pW0[0+ps*ii]);
-		}
-#endif
-	for( ; ii<n-7; ii+=8)
-		{
-		kernel_dgemm_nn_4x8_lib4(m, &alpha, &pVt[0+ps*0], 0, &pC0[0+ps*ii], sdc, &beta, &pW0[0+ps*ii], &pW0[0+ps*ii]);
-		}
-	for( ; ii<n-3; ii+=4)
-		{
-		kernel_dgemm_nn_4x4_lib4(m, &alpha, &pVt[0+ps*0], 0, &pC0[0+ps*ii], sdc, &beta, &pW0[0+ps*ii], &pW0[0+ps*ii]);
-		}
-	if(ii<n)
-		{
-//		kernel_dgemm_nn_4x4_vs_lib4(m, &alpha, &pVt[0+ps*0], 0, &pC0[0+ps*ii], sdc, &beta, &pW0[0+ps*ii], &pW0[0+ps*ii], 4, n-ii);
-		kernel_dgemm_nn_4x4_gen_lib4(m, &alpha, &pVt[0+ps*0], 0, &pC0[0+ps*ii], sdc, &beta, 0, &pW0[0+ps*ii], 0, 0, &pW0[0+ps*ii], 0, 0, 4, 0, n-ii);
-		}
-#else
-	for( ; ii<n-3; ii+=4)
-		{
-		pW = pW0+ii*ps;
-		pC = pC0+ii*ps;
-		// compute W^T = C^T * V
-		_w0 = _mm256_setzero_pd();
-		_w1 = _mm256_setzero_pd();
-		_w2 = _mm256_setzero_pd();
-		_w3 = _mm256_setzero_pd();
-		for(jj=0; jj<m-3; jj+=4)
-			{
-			//
-			_d0 = _mm256_load_pd( &pVt[0+ps*(0+jj)] );
-			_t0 = _mm256_broadcast_sd( &pC[0+jj*sdc+ps*0] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w0 = _mm256_add_pd( _w0, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[0+jj*sdc+ps*1] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w1 = _mm256_add_pd( _w1, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[0+jj*sdc+ps*2] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w2 = _mm256_add_pd( _w2, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[0+jj*sdc+ps*3] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w3 = _mm256_add_pd( _w3, _tp );
-			//
-			_d0 = _mm256_load_pd( &pVt[0+ps*(1+jj)] );
-			_t0 = _mm256_broadcast_sd( &pC[1+jj*sdc+ps*0] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w0 = _mm256_add_pd( _w0, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[1+jj*sdc+ps*1] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w1 = _mm256_add_pd( _w1, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[1+jj*sdc+ps*2] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w2 = _mm256_add_pd( _w2, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[1+jj*sdc+ps*3] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w3 = _mm256_add_pd( _w3, _tp );
-			//
-			_d0 = _mm256_load_pd( &pVt[0+ps*(2+jj)] );
-			_t0 = _mm256_broadcast_sd( &pC[2+jj*sdc+ps*0] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w0 = _mm256_add_pd( _w0, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[2+jj*sdc+ps*1] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w1 = _mm256_add_pd( _w1, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[2+jj*sdc+ps*2] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w2 = _mm256_add_pd( _w2, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[2+jj*sdc+ps*3] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w3 = _mm256_add_pd( _w3, _tp );
-			//
-			_d0 = _mm256_load_pd( &pVt[0+ps*(3+jj)] );
-			_t0 = _mm256_broadcast_sd( &pC[3+jj*sdc+ps*0] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w0 = _mm256_add_pd( _w0, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[3+jj*sdc+ps*1] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w1 = _mm256_add_pd( _w1, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[3+jj*sdc+ps*2] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w2 = _mm256_add_pd( _w2, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[3+jj*sdc+ps*3] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w3 = _mm256_add_pd( _w3, _tp );
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			_d0 = _mm256_load_pd( &pVt[0+ps*(ll+jj)] );
-			_t0 = _mm256_broadcast_sd( &pC[ll+jj*sdc+ps*0] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w0 = _mm256_add_pd( _w0, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[ll+jj*sdc+ps*1] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w1 = _mm256_add_pd( _w1, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[ll+jj*sdc+ps*2] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w2 = _mm256_add_pd( _w2, _tp );
-			_t0 = _mm256_broadcast_sd( &pC[ll+jj*sdc+ps*3] );
-			_tp = _mm256_mul_pd( _d0, _t0 );
-			_w3 = _mm256_add_pd( _w3, _tp );
-			}
-		// TODO mask store
-		_mm256_storeu_pd( &pW[0+ps*0], _w0 );
-		_mm256_storeu_pd( &pW[0+ps*1], _w1 );
-		_mm256_storeu_pd( &pW[0+ps*2], _w2 );
-		_mm256_storeu_pd( &pW[0+ps*3], _w3 );
-		}
-	for( ; ii<n; ii++)
-		{
-		pW = pW0+ii*ps;
-		pC = pC0+ii*ps;
-		// compute W^T = C^T * V
-		tmp = pC[0+ps*0];
-		pW[0+ps*0] = tmp;
-		if(m>1)
-			{
-			d0 = pVt[0+ps*1];
-			tmp = pC[1+ps*0];
-			pW[0+ps*0] += d0 * tmp;
-			pW[1+ps*0] = tmp;
-			if(m>2)
-				{
-				d0 = pVt[0+ps*2];
-				d1 = pVt[1+ps*2];
-				tmp = pC[2+ps*0];
-				pW[0+ps*0] += d0 * tmp;
-				pW[1+ps*0] += d1 * tmp;
-				pW[2+ps*0] = tmp;
-				if(m>3)
-					{
-					d0 = pVt[0+ps*3];
-					d1 = pVt[1+ps*3];
-					d2 = pVt[2+ps*3];
-					tmp = pC[3+ps*0];
-					pW[0+ps*0] += d0 * tmp;
-					pW[1+ps*0] += d1 * tmp;
-					pW[2+ps*0] += d2 * tmp;
-					pW[3+ps*0] = tmp;
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			//
-			d0 = pVt[0+ps*(0+jj)];
-			d1 = pVt[1+ps*(0+jj)];
-			d2 = pVt[2+ps*(0+jj)];
-			d3 = pVt[3+ps*(0+jj)];
-			tmp = pC[0+jj*sdc+ps*0];
-			pW[0+ps*0] += d0 * tmp;
-			pW[1+ps*0] += d1 * tmp;
-			pW[2+ps*0] += d2 * tmp;
-			pW[3+ps*0] += d3 * tmp;
-			//
-			d0 = pVt[0+ps*(1+jj)];
-			d1 = pVt[1+ps*(1+jj)];
-			d2 = pVt[2+ps*(1+jj)];
-			d3 = pVt[3+ps*(1+jj)];
-			tmp = pC[1+jj*sdc+ps*0];
-			pW[0+ps*0] += d0 * tmp;
-			pW[1+ps*0] += d1 * tmp;
-			pW[2+ps*0] += d2 * tmp;
-			pW[3+ps*0] += d3 * tmp;
-			//
-			d0 = pVt[0+ps*(2+jj)];
-			d1 = pVt[1+ps*(2+jj)];
-			d2 = pVt[2+ps*(2+jj)];
-			d3 = pVt[3+ps*(2+jj)];
-			tmp = pC[2+jj*sdc+ps*0];
-			pW[0+ps*0] += d0 * tmp;
-			pW[1+ps*0] += d1 * tmp;
-			pW[2+ps*0] += d2 * tmp;
-			pW[3+ps*0] += d3 * tmp;
-			//
-			d0 = pVt[0+ps*(3+jj)];
-			d1 = pVt[1+ps*(3+jj)];
-			d2 = pVt[2+ps*(3+jj)];
-			d3 = pVt[3+ps*(3+jj)];
-			tmp = pC[3+jj*sdc+ps*0];
-			pW[0+ps*0] += d0 * tmp;
-			pW[1+ps*0] += d1 * tmp;
-			pW[2+ps*0] += d2 * tmp;
-			pW[3+ps*0] += d3 * tmp;
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			d0 = pVt[0+ps*(ll+jj)];
-			d1 = pVt[1+ps*(ll+jj)];
-			d2 = pVt[2+ps*(ll+jj)];
-			d3 = pVt[3+ps*(ll+jj)];
-			tmp = pC[ll+jj*sdc+ps*0];
-			pW[0+ps*0] += d0 * tmp;
-			pW[1+ps*0] += d1 * tmp;
-			pW[2+ps*0] += d2 * tmp;
-			pW[3+ps*0] += d3 * tmp;
-			}
-		}
-#endif
-
-	ii = 0;
-	for( ; ii<n-3; ii+=4)
-		{
-		pW = pW0+ii*ps;
-		pC = pC0+ii*ps;
-
-		// compute W^T *= T
-		_tz = _mm256_setzero_pd();
-
-		_t0 = _mm256_load_pd( &pT[0+ldt*0] );
-		_tp = _mm256_broadcast_sd( &pW[0+ps*0] );
-		_w0 = _mm256_mul_pd( _t0, _tp );
-		_tp = _mm256_broadcast_sd( &pW[0+ps*1] );
-		_w1 = _mm256_mul_pd( _t0, _tp );
-		_tp = _mm256_broadcast_sd( &pW[0+ps*2] );
-		_w2 = _mm256_mul_pd( _t0, _tp );
-		_tp = _mm256_broadcast_sd( &pW[0+ps*3] );
-		_w3 = _mm256_mul_pd( _t0, _tp );
-
-#if defined(TARGET_X64_INTEL_GASWELL)
-		_t0 = _mm256_load_pd( &pT[0+ldt*1] );
-		_t0 = _mm256_blend_pd( _t0, _tz, 0x1 );
-		_tp = _mm256_broadcast_sd( &pW[1+ps*0] );
-		_w0 = _mm256_fmadd_pd( _t0, _tp, _w0 );
-		_tp = _mm256_broadcast_sd( &pW[1+ps*1] );
-		_w1 = _mm256_fmadd_pd( _t0, _tp, _w1 );
-		_tp = _mm256_broadcast_sd( &pW[1+ps*2] );
-		_w2 = _mm256_fmadd_pd( _t0, _tp, _w2 );
-		_tp = _mm256_broadcast_sd( &pW[1+ps*3] );
-		_w3 = _mm256_fmadd_pd( _t0, _tp, _w3 );
-
-		_t0 = _mm256_load_pd( &pT[0+ldt*2] );
-		_t0 = _mm256_blend_pd( _t0, _tz, 0x3 );
-		_tp = _mm256_broadcast_sd( &pW[2+ps*0] );
-		_w0 = _mm256_fmadd_pd( _t0, _tp, _w0 );
-		_tp = _mm256_broadcast_sd( &pW[2+ps*1] );
-		_w1 = _mm256_fmadd_pd( _t0, _tp, _w1 );
-		_tp = _mm256_broadcast_sd( &pW[2+ps*2] );
-		_w2 = _mm256_fmadd_pd( _t0, _tp, _w2 );
-		_tp = _mm256_broadcast_sd( &pW[2+ps*3] );
-		_w3 = _mm256_fmadd_pd( _t0, _tp, _w3 );
-
-		_t0 = _mm256_load_pd( &pT[0+ldt*3] );
-		_t0 = _mm256_blend_pd( _t0, _tz, 0x7 );
-		_tp = _mm256_broadcast_sd( &pW[3+ps*0] );
-		_w0 = _mm256_fmadd_pd( _t0, _tp, _w0 );
-		_tp = _mm256_broadcast_sd( &pW[3+ps*1] );
-		_w1 = _mm256_fmadd_pd( _t0, _tp, _w1 );
-		_tp = _mm256_broadcast_sd( &pW[3+ps*2] );
-		_w2 = _mm256_fmadd_pd( _t0, _tp, _w2 );
-		_tp = _mm256_broadcast_sd( &pW[3+ps*3] );
-		_w3 = _mm256_fmadd_pd( _t0, _tp, _w3 );
-#else
-		_t0 = _mm256_load_pd( &pT[0+ldt*1] );
-		_t0 = _mm256_blend_pd( _t0, _tz, 0x1 );
-		_tp = _mm256_broadcast_sd( &pW[1+ps*0] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w0 = _mm256_add_pd( _w0, _tp );
-		_tp = _mm256_broadcast_sd( &pW[1+ps*1] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w1 = _mm256_add_pd( _w1, _tp );
-		_tp = _mm256_broadcast_sd( &pW[1+ps*2] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w2 = _mm256_add_pd( _w2, _tp );
-		_tp = _mm256_broadcast_sd( &pW[1+ps*3] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w3 = _mm256_add_pd( _w3, _tp );
-
-		_t0 = _mm256_load_pd( &pT[0+ldt*2] );
-		_t0 = _mm256_blend_pd( _t0, _tz, 0x3 );
-		_tp = _mm256_broadcast_sd( &pW[2+ps*0] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w0 = _mm256_add_pd( _w0, _tp );
-		_tp = _mm256_broadcast_sd( &pW[2+ps*1] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w1 = _mm256_add_pd( _w1, _tp );
-		_tp = _mm256_broadcast_sd( &pW[2+ps*2] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w2 = _mm256_add_pd( _w2, _tp );
-		_tp = _mm256_broadcast_sd( &pW[2+ps*3] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w3 = _mm256_add_pd( _w3, _tp );
-
-		_t0 = _mm256_load_pd( &pT[0+ldt*3] );
-		_t0 = _mm256_blend_pd( _t0, _tz, 0x7 );
-		_tp = _mm256_broadcast_sd( &pW[3+ps*0] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w0 = _mm256_add_pd( _w0, _tp );
-		_tp = _mm256_broadcast_sd( &pW[3+ps*1] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w1 = _mm256_add_pd( _w1, _tp );
-		_tp = _mm256_broadcast_sd( &pW[3+ps*2] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w2 = _mm256_add_pd( _w2, _tp );
-		_tp = _mm256_broadcast_sd( &pW[3+ps*3] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w3 = _mm256_add_pd( _w3, _tp );
-#endif
-
-		_mm256_store_pd( &pW[0+ps*0], _w0 );
-		_mm256_store_pd( &pW[0+ps*1], _w1 );
-		_mm256_store_pd( &pW[0+ps*2], _w2 );
-		_mm256_store_pd( &pW[0+ps*3], _w3 );
-		}
-	for( ; ii<n; ii++)
-		{
-		pW = pW0+ii*ps;
-		pC = pC0+ii*ps;
-
-		// compute W^T *= T
-		_tz = _mm256_setzero_pd();
-
-		_t0 = _mm256_load_pd( &pT[0+ldt*0] );
-		_tp = _mm256_broadcast_sd( &pW[0+ps*0] );
-		_w0 = _mm256_mul_pd( _t0, _tp );
-
-		_t0 = _mm256_load_pd( &pT[0+ldt*1] );
-		_t0 = _mm256_blend_pd( _t0, _tz, 0x1 );
-		_tp = _mm256_broadcast_sd( &pW[1+ps*0] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w0 = _mm256_add_pd( _w0, _tp );
-
-		_t0 = _mm256_load_pd( &pT[0+ldt*2] );
-		_t0 = _mm256_blend_pd( _t0, _tz, 0x3 );
-		_tp = _mm256_broadcast_sd( &pW[2+ps*0] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w0 = _mm256_add_pd( _w0, _tp );
-
-		_t0 = _mm256_load_pd( &pT[0+ldt*3] );
-		_t0 = _mm256_blend_pd( _t0, _tz, 0x7 );
-		_tp = _mm256_broadcast_sd( &pW[3+ps*0] );
-		_tp = _mm256_mul_pd( _t0, _tp );
-		_w0 = _mm256_add_pd( _w0, _tp );
-
-		_mm256_store_pd( &pW[0+ps*0], _w0 );
-		}
-
-	ii = 0;
-	for( ; ii<n-3; ii+=4)
-		{
-		pW = pW0+ii*ps;
-		pC = pC0+ii*ps;
-		// compute C -= V * W^T
-		jj = 0;
-		// load
-		c00 = pC[0+jj*sdc+ps*0];
-		c10 = pC[1+jj*sdc+ps*0];
-		c20 = pC[2+jj*sdc+ps*0];
-		c30 = pC[3+jj*sdc+ps*0];
-		c01 = pC[0+jj*sdc+ps*1];
-		c11 = pC[1+jj*sdc+ps*1];
-		c21 = pC[2+jj*sdc+ps*1];
-		c31 = pC[3+jj*sdc+ps*1];
-		// rank1
-		a1 = pD[1+jj*sdd+ps*0];
-		a2 = pD[2+jj*sdd+ps*0];
-		a3 = pD[3+jj*sdd+ps*0];
-		b0 = pW[0+ps*0];
-		c00 -= b0;
-		c10 -= a1*b0;
-		c20 -= a2*b0;
-		c30 -= a3*b0;
-		b1 = pW[0+ps*1];
-		c01 -= b1;
-		c11 -= a1*b1;
-		c21 -= a2*b1;
-		c31 -= a3*b1;
-		// rank2
-		a2 = pD[2+jj*sdd+ps*1];
-		a3 = pD[3+jj*sdd+ps*1];
-		b0 = pW[1+ps*0];
-		c10 -= b0;
-		c20 -= a2*b0;
-		c30 -= a3*b0;
-		b1 = pW[1+ps*1];
-		c11 -= b1;
-		c21 -= a2*b1;
-		c31 -= a3*b1;
-		// rank3
-		a3 = pD[3+jj*sdd+ps*2];
-		b0 = pW[2+ps*0];
-		c20 -= b0;
-		c30 -= a3*b0;
-		b1 = pW[2+ps*1];
-		c21 -= b1;
-		c31 -= a3*b1;
-		// rank4
-		a3 = pD[3+jj*sdd+ps*3];
-		b0 = pW[3+ps*0];
-		c30 -= b0;
-		b1 = pW[3+ps*1];
-		c31 -= b1;
-		// store
-		pC[0+jj*sdc+ps*0] = c00;
-		pC[0+jj*sdc+ps*1] = c01;
-		if(m>1)
-			{
-			pC[1+jj*sdc+ps*0] = c10;
-			pC[1+jj*sdc+ps*1] = c11;
-			if(m>2)
-				{
-				pC[2+jj*sdc+ps*0] = c20;
-				pC[2+jj*sdc+ps*1] = c21;
-				if(m>3)
-					{
-					pC[3+jj*sdc+ps*0] = c30;
-					pC[3+jj*sdc+ps*1] = c31;
-					}
-				}
-			}
-		// load
-		c00 = pC[0+jj*sdc+ps*2];
-		c10 = pC[1+jj*sdc+ps*2];
-		c20 = pC[2+jj*sdc+ps*2];
-		c30 = pC[3+jj*sdc+ps*2];
-		c01 = pC[0+jj*sdc+ps*3];
-		c11 = pC[1+jj*sdc+ps*3];
-		c21 = pC[2+jj*sdc+ps*3];
-		c31 = pC[3+jj*sdc+ps*3];
-		// rank1
-		a1 = pD[1+jj*sdd+ps*0];
-		a2 = pD[2+jj*sdd+ps*0];
-		a3 = pD[3+jj*sdd+ps*0];
-		b0 = pW[0+ps*2];
-		c00 -= b0;
-		c10 -= a1*b0;
-		c20 -= a2*b0;
-		c30 -= a3*b0;
-		b1 = pW[0+ps*3];
-		c01 -= b1;
-		c11 -= a1*b1;
-		c21 -= a2*b1;
-		c31 -= a3*b1;
-		// rank2
-		a2 = pD[2+jj*sdd+ps*1];
-		a3 = pD[3+jj*sdd+ps*1];
-		b0 = pW[1+ps*2];
-		c10 -= b0;
-		c20 -= a2*b0;
-		c30 -= a3*b0;
-		b1 = pW[1+ps*3];
-		c11 -= b1;
-		c21 -= a2*b1;
-		c31 -= a3*b1;
-		// rank3
-		a3 = pD[3+jj*sdd+ps*2];
-		b0 = pW[2+ps*2];
-		c20 -= b0;
-		c30 -= a3*b0;
-		b1 = pW[2+ps*3];
-		c21 -= b1;
-		c31 -= a3*b1;
-		// rank4
-		a3 = pD[3+jj*sdd+ps*3];
-		b0 = pW[3+ps*2];
-		c30 -= b0;
-		b1 = pW[3+ps*3];
-		c31 -= b1;
-		// store
-		pC[0+jj*sdc+ps*2] = c00;
-		pC[0+jj*sdc+ps*3] = c01;
-		if(m>1)
-			{
-			pC[1+jj*sdc+ps*2] = c10;
-			pC[1+jj*sdc+ps*3] = c11;
-			if(m>2)
-				{
-				pC[2+jj*sdc+ps*2] = c20;
-				pC[2+jj*sdc+ps*3] = c21;
-				if(m>3)
-					{
-					pC[3+jj*sdc+ps*2] = c30;
-					pC[3+jj*sdc+ps*3] = c31;
-					}
-				}
-			}
-		}
-	for( ; ii<n; ii++)
-		{
-		pW = pW0+ii*ps;
-		pC = pC0+ii*ps;
-		// compute C -= V * W^T
-		jj = 0;
-		// load
-		c00 = pC[0+jj*sdc+ps*0];
-		c10 = pC[1+jj*sdc+ps*0];
-		c20 = pC[2+jj*sdc+ps*0];
-		c30 = pC[3+jj*sdc+ps*0];
-		// rank1
-		a1 = pD[1+jj*sdd+ps*0];
-		a2 = pD[2+jj*sdd+ps*0];
-		a3 = pD[3+jj*sdd+ps*0];
-		b0 = pW[0+ps*0];
-		c00 -= b0;
-		c10 -= a1*b0;
-		c20 -= a2*b0;
-		c30 -= a3*b0;
-		// rank2
-		a2 = pD[2+jj*sdd+ps*1];
-		a3 = pD[3+jj*sdd+ps*1];
-		b0 = pW[1+ps*0];
-		c10 -= b0;
-		c20 -= a2*b0;
-		c30 -= a3*b0;
-		// rank3
-		a3 = pD[3+jj*sdd+ps*2];
-		b0 = pW[2+ps*0];
-		c20 -= b0;
-		c30 -= a3*b0;
-		// rank4
-		a3 = pD[3+jj*sdd+ps*3];
-		b0 = pW[3+ps*0];
-		c30 -= b0;
-		// store
-		pC[0+jj*sdc+ps*0] = c00;
-		if(m>1)
-			{
-			pC[1+jj*sdc+ps*0] = c10;
-			if(m>2)
-				{
-				pC[2+jj*sdc+ps*0] = c20;
-				if(m>3)
-					{
-					pC[3+jj*sdc+ps*0] = c30;
-					}
-				}
-			}
-		}
-
-#if 1
-	jj = 4;
-#if defined(TARGET_X64_INTEL_HASWELL)
-	for(; jj<m-11; jj+=12)
-		{
-		kernel_dger4_sub_12r_lib4(n, &pD[jj*sdd], sdd, &pW0[0], &pC0[jj*sdc], sdc);
-		}
-#endif
-	for(; jj<m-7; jj+=8)
-		{
-		kernel_dger4_sub_8r_lib4(n, &pD[jj*sdd], sdd, &pW0[0], &pC0[jj*sdc], sdc);
-		}
-	for(; jj<m-3; jj+=4)
-		{
-		kernel_dger4_sub_4r_lib4(n, &pD[jj*sdd], &pW0[0], &pC0[jj*sdc]);
-		}
-	if(jj<m)
-		{
-		kernel_dger4_sub_4r_vs_lib4(n, &pD[jj*sdd], &pW0[0], &pC0[jj*sdc], m-jj);
-		}
-#else
-	ii = 0;
-	for( ; ii<n-3; ii+=4)
-		{
-		pW = pW0+ii*ps;
-		pC = pC0+ii*ps;
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			// load
-			_c0 = _mm256_load_pd( &pC[0+jj*sdc+ps*0] );
-			_c1 = _mm256_load_pd( &pC[0+jj*sdc+ps*1] );
-			_c2 = _mm256_load_pd( &pC[0+jj*sdc+ps*2] );
-			_c3 = _mm256_load_pd( &pC[0+jj*sdc+ps*3] );
-			//
-			_a0 = _mm256_load_pd( &pD[0+jj*sdd+ps*0] );
-			_b0 = _mm256_broadcast_sd( &pW[0+ps*0] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c0 = _mm256_sub_pd( _c0, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[0+ps*1] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c1 = _mm256_sub_pd( _c1, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[0+ps*2] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c2 = _mm256_sub_pd( _c2, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[0+ps*3] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c3 = _mm256_sub_pd( _c3, _tp );
-			//
-			_a0 = _mm256_load_pd( &pD[0+jj*sdd+ps*1] );
-			_b0 = _mm256_broadcast_sd( &pW[1+ps*0] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c0 = _mm256_sub_pd( _c0, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[1+ps*1] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c1 = _mm256_sub_pd( _c1, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[1+ps*2] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c2 = _mm256_sub_pd( _c2, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[1+ps*3] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c3 = _mm256_sub_pd( _c3, _tp );
-			//
-			_a0 = _mm256_load_pd( &pD[0+jj*sdd+ps*2] );
-			_b0 = _mm256_broadcast_sd( &pW[2+ps*0] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c0 = _mm256_sub_pd( _c0, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[2+ps*1] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c1 = _mm256_sub_pd( _c1, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[2+ps*2] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c2 = _mm256_sub_pd( _c2, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[2+ps*3] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c3 = _mm256_sub_pd( _c3, _tp );
-			//
-			_a0 = _mm256_load_pd( &pD[0+jj*sdd+ps*3] );
-			_b0 = _mm256_broadcast_sd( &pW[3+ps*0] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c0 = _mm256_sub_pd( _c0, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[3+ps*1] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c1 = _mm256_sub_pd( _c1, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[3+ps*2] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c2 = _mm256_sub_pd( _c2, _tp );
-			_b0 = _mm256_broadcast_sd( &pW[3+ps*3] );
-			_tp = _mm256_mul_pd( _a0, _b0 );
-			_c3 = _mm256_sub_pd( _c3, _tp );
-			// store
-			_mm256_store_pd( &pC[0+jj*sdc+ps*0], _c0 );
-			_mm256_store_pd( &pC[0+jj*sdc+ps*1], _c1 );
-			_mm256_store_pd( &pC[0+jj*sdc+ps*2], _c2 );
-			_mm256_store_pd( &pC[0+jj*sdc+ps*3], _c3 );
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			// load
-			c00 = pC[ll+jj*sdc+ps*0];
-			c01 = pC[ll+jj*sdc+ps*1];
-			//
-			a0 = pD[ll+jj*sdd+ps*0];
-			b0 = pW[0+ps*0];
-			c00 -= a0*b0;
-			b1 = pW[0+ps*1];
-			c01 -= a0*b1;
-			//
-			a0 = pD[ll+jj*sdd+ps*1];
-			b0 = pW[1+ps*0];
-			c00 -= a0*b0;
-			b1 = pW[1+ps*1];
-			c01 -= a0*b1;
-			//
-			a0 = pD[ll+jj*sdd+ps*2];
-			b0 = pW[2+ps*0];
-			c00 -= a0*b0;
-			b1 = pW[2+ps*1];
-			c01 -= a0*b1;
-			//
-			a0 = pD[ll+jj*sdd+ps*3];
-			b0 = pW[3+ps*0];
-			c00 -= a0*b0;
-			b1 = pW[3+ps*1];
-			c01 -= a0*b1;
-			// store
-			pC[ll+jj*sdc+ps*0] = c00;
-			pC[ll+jj*sdc+ps*1] = c01;
-			// load
-			c00 = pC[ll+jj*sdc+ps*2];
-			c01 = pC[ll+jj*sdc+ps*3];
-			//
-			a0 = pD[ll+jj*sdd+ps*0];
-			b0 = pW[0+ps*2];
-			c00 -= a0*b0;
-			b1 = pW[0+ps*3];
-			c01 -= a0*b1;
-			//
-			a0 = pD[ll+jj*sdd+ps*1];
-			b0 = pW[1+ps*2];
-			c00 -= a0*b0;
-			b1 = pW[1+ps*3];
-			c01 -= a0*b1;
-			//
-			a0 = pD[ll+jj*sdd+ps*2];
-			b0 = pW[2+ps*2];
-			c00 -= a0*b0;
-			b1 = pW[2+ps*3];
-			c01 -= a0*b1;
-			//
-			a0 = pD[ll+jj*sdd+ps*3];
-			b0 = pW[3+ps*2];
-			c00 -= a0*b0;
-			b1 = pW[3+ps*3];
-			c01 -= a0*b1;
-			// store
-			pC[ll+jj*sdc+ps*2] = c00;
-			pC[ll+jj*sdc+ps*3] = c01;
-			}
-		}
-	for( ; ii<n; ii++)
-		{
-		pW = pW0+ii*ps;
-		pC = pC0+ii*ps;
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			// load
-			c00 = pC[0+jj*sdc+ps*0];
-			c10 = pC[1+jj*sdc+ps*0];
-			c20 = pC[2+jj*sdc+ps*0];
-			c30 = pC[3+jj*sdc+ps*0];
-			//
-			a0 = pD[0+jj*sdd+ps*0];
-			a1 = pD[1+jj*sdd+ps*0];
-			a2 = pD[2+jj*sdd+ps*0];
-			a3 = pD[3+jj*sdd+ps*0];
-			b0 = pW[0+ps*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			//
-			a0 = pD[0+jj*sdd+ps*1];
-			a1 = pD[1+jj*sdd+ps*1];
-			a2 = pD[2+jj*sdd+ps*1];
-			a3 = pD[3+jj*sdd+ps*1];
-			b0 = pW[1+ps*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			//
-			a0 = pD[0+jj*sdd+ps*2];
-			a1 = pD[1+jj*sdd+ps*2];
-			a2 = pD[2+jj*sdd+ps*2];
-			a3 = pD[3+jj*sdd+ps*2];
-			b0 = pW[2+ps*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			//
-			a0 = pD[0+jj*sdd+ps*3];
-			a1 = pD[1+jj*sdd+ps*3];
-			a2 = pD[2+jj*sdd+ps*3];
-			a3 = pD[3+jj*sdd+ps*3];
-			b0 = pW[3+ps*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			// store
-			pC[0+jj*sdc+ps*0] = c00;
-			pC[1+jj*sdc+ps*0] = c10;
-			pC[2+jj*sdc+ps*0] = c20;
-			pC[3+jj*sdc+ps*0] = c30;
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			// load
-			c00 = pC[ll+jj*sdc+ps*0];
-			//
-			a0 = pD[ll+jj*sdd+ps*0];
-			b0 = pW[0+ps*0];
-			c00 -= a0*b0;
-			//
-			a0 = pD[ll+jj*sdd+ps*1];
-			b0 = pW[1+ps*0];
-			c00 -= a0*b0;
-			//
-			a0 = pD[ll+jj*sdd+ps*2];
-			b0 = pW[2+ps*0];
-			c00 -= a0*b0;
-			//
-			a0 = pD[ll+jj*sdd+ps*3];
-			b0 = pW[3+ps*0];
-			c00 -= a0*b0;
-			// store
-			pC[ll+jj*sdc+ps*0] = c00;
-			}
-		}
-#endif
-
-	return;
-	}
-
-
-
-// assume n>=4
-void kernel_dgelqf_4_lib4(int n, double *pD, double *dD)
-	{
-	int ii, jj, ll;
-	double alpha, beta, tmp, w1, w2, w3;
-	const int ps = 4;
-	// first column
-	beta = 0.0;
-	for(ii=1; ii<n; ii++)
-		{
-		tmp = pD[0+ps*ii];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		dD[0] = 0.0;
-		tmp = 0.0;
-		goto col2;
-		}
-	alpha = pD[0+ps*0];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[0] = (beta-alpha) / beta;
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[0+ps*0] = beta;
-	w1 = pD[1+ps*0];
-	w2 = pD[2+ps*0];
-	w3 = pD[3+ps*0];
-	//
-	pD[0+ps*1] *= tmp;
-	w1 += pD[1+ps*1] * pD[0+ps*1];
-	w2 += pD[2+ps*1] * pD[0+ps*1];
-	w3 += pD[3+ps*1] * pD[0+ps*1];
-	//
-	pD[0+ps*2] *= tmp;
-	w1 += pD[1+ps*2] * pD[0+ps*2];
-	w2 += pD[2+ps*2] * pD[0+ps*2];
-	w3 += pD[3+ps*2] * pD[0+ps*2];
-	//
-	pD[0+ps*3] *= tmp;
-	w1 += pD[1+ps*3] * pD[0+ps*3];
-	w2 += pD[2+ps*3] * pD[0+ps*3];
-	w3 += pD[3+ps*3] * pD[0+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[0+ps*ii] *= tmp;
-		w1 += pD[1+ps*ii] * pD[0+ps*ii];
-		w2 += pD[2+ps*ii] * pD[0+ps*ii];
-		w3 += pD[3+ps*ii] * pD[0+ps*ii];
-		}
-	//
-	w1 = - dD[0] * w1;
-	w2 = - dD[0] * w2;
-	w3 = - dD[0] * w3;
-	//
-	pD[1+ps*0] += w1;
-	pD[2+ps*0] += w2;
-	pD[3+ps*0] += w3;
-	//
-	pD[1+ps*1] += w1 * pD[0+ps*1];
-	pD[2+ps*1] += w2 * pD[0+ps*1];
-	pD[3+ps*1] += w3 * pD[0+ps*1];
-	//
-	pD[1+ps*2] += w1 * pD[0+ps*2];
-	pD[2+ps*2] += w2 * pD[0+ps*2];
-	pD[3+ps*2] += w3 * pD[0+ps*2];
-	beta = pD[1+ps*2] * pD[1+ps*2];
-	//
-	pD[1+ps*3] += w1 * pD[0+ps*3];
-	pD[2+ps*3] += w2 * pD[0+ps*3];
-	pD[3+ps*3] += w3 * pD[0+ps*3];
-	beta += pD[1+ps*3] * pD[1+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[1+ps*ii] += w1 * pD[0+ps*ii];
-		pD[2+ps*ii] += w2 * pD[0+ps*ii];
-		pD[3+ps*ii] += w3 * pD[0+ps*ii];
-		beta += pD[1+ps*ii] * pD[1+ps*ii];
-		}
-	// second column
-col2:
-	if(beta==0.0)
-		{
-		dD[1] = 0.0;
-		tmp = 0.0;
-		goto col3;
-		}
-	alpha = pD[1+ps*1];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[1] = (beta-alpha) / beta;
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[1+ps*1] = beta;
-	w2 = pD[2+ps*1];
-	w3 = pD[3+ps*1];
-	//
-	pD[1+ps*2] *= tmp;
-	w2 += pD[2+ps*2] * pD[1+ps*2];
-	w3 += pD[3+ps*2] * pD[1+ps*2];
-	//
-	pD[1+ps*3] *= tmp;
-	w2 += pD[2+ps*3] * pD[1+ps*3];
-	w3 += pD[3+ps*3] * pD[1+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[1+ps*ii] *= tmp;
-		w2 += pD[2+ps*ii] * pD[1+ps*ii];
-		w3 += pD[3+ps*ii] * pD[1+ps*ii];
-		}
-	//
-	w2 = - dD[1] * w2;
-	w3 = - dD[1] * w3;
-	//
-	pD[2+ps*1] += w2;
-	pD[3+ps*1] += w3;
-	//
-	pD[2+ps*2] += w2 * pD[1+ps*2];
-	pD[3+ps*2] += w3 * pD[1+ps*2];
-	//
-	pD[2+ps*3] += w2 * pD[1+ps*3];
-	pD[3+ps*3] += w3 * pD[1+ps*3];
-	beta = pD[2+ps*3] * pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[2+ps*ii] += w2 * pD[1+ps*ii];
-		pD[3+ps*ii] += w3 * pD[1+ps*ii];
-		beta += pD[2+ps*ii] * pD[2+ps*ii];
-		}
-	// third column
-col3:
-	if(beta==0.0)
-		{
-		dD[2] = 0.0;
-		tmp = 0.0;
-		goto col4;
-		}
-	alpha = pD[2+ps*2];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[2] = (beta-alpha) / beta;
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[2+ps*2] = beta;
-	w3 = pD[3+ps*2];
-	//
-	pD[2+ps*3] *= tmp;
-	w3 += pD[3+ps*3] * pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[2+ps*ii] *= tmp;
-		w3 += pD[3+ps*ii] * pD[2+ps*ii];
-		}
-	//
-	w3 = - dD[2] * w3;
-	//
-	pD[3+ps*2] += w3;
-	//
-	pD[3+ps*3] += w3 * pD[2+ps*3];
-	//
-	beta = 0.0;
-	for(ii=4; ii<n; ii++)
-		{
-		pD[3+ps*ii] += w3 * pD[2+ps*ii];
-		beta += pD[3+ps*ii] * pD[3+ps*ii];
-		}
-	// fourth column
-col4:
-	if(beta==0.0)
-		{
-		dD[3] = 0.0;
-		tmp = 0.0;
-		return;
-		}
-	alpha = pD[3+ps*3];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[3] = (beta-alpha) / beta;
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[3+ps*3] = beta;
-	for(ii=4; ii<n; ii++)
-		{
-		pD[3+ps*ii] *= tmp;
-		}
-	return;
-	}
-
-
-
-// unblocked algorithm
-void kernel_dgelqf_vs_lib4(int m, int n, int k, int offD, double *pD, int sdd, double *dD)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk, ll, imax, jmax, jmax0, kmax, kmax0;
-	const int ps = 4;
-	imax = k;//m<n ? m : n;
-	double alpha, beta, tmp;
-	double w00, w01,
-		   w10, w11,
-		   w20, w21,
-		   w30, w31;
-	__m256d
-		_a0, _b0, _t0, _w0, _w1;
-	double *pC00, *pC10, *pC10a, *pC20, *pC20a, *pC01, *pC11;
-	double pT[4];
-	int ldt = 2;
-	double *pD0 = pD-offD;
-	ii = 0;
-#if 1 // rank 2
-	for(; ii<imax-1; ii+=2)
-		{
-		// first row
-		pC00 = &pD0[((offD+ii)&(ps-1))+((offD+ii)-((offD+ii)&(ps-1)))*sdd+ii*ps];
-		beta = 0.0;
-		for(jj=1; jj<n-ii; jj++)
-			{
-			tmp = pC00[0+ps*jj];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			dD[ii] = 0.0;
-			}
-		else
-			{
-			alpha = pC00[0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			dD[ii] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			pC00[0] = beta;
-			for(jj=1; jj<n-ii; jj++)
-				pC00[0+ps*jj] *= tmp;
-			}
-		pC10 = &pD0[((offD+ii+1)&(ps-1))+((offD+ii+1)-((offD+ii+1)&(ps-1)))*sdd+ii*ps];
-		kmax = n-ii;
-		w00 = pC10[0+ps*0]; // pC00[0+ps*0] = 1.0
-		for(kk=1; kk<kmax; kk++)
-			{
-			w00 += pC10[0+ps*kk] * pC00[0+ps*kk];
-			}
-		w00 = - w00*dD[ii];
-		pC10[0+ps*0] += w00; // pC00[0+ps*0] = 1.0
-		for(kk=1; kk<kmax; kk++)
-			{
-			pC10[0+ps*kk] += w00 * pC00[0+ps*kk];
-			}
-		// second row
-		pC11 = pC10+ps*1;
-		beta = 0.0;
-		for(jj=1; jj<n-(ii+1); jj++)
-			{
-			tmp = pC11[0+ps*jj];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			dD[(ii+1)] = 0.0;
-			}
-		else
-			{
-			alpha = pC11[0+ps*0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			dD[(ii+1)] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			pC11[0+ps*0] = beta;
-			for(jj=1; jj<n-(ii+1); jj++)
-				pC11[0+ps*jj] *= tmp;
-			}
-		// compute T
-		kmax = n-ii;
-		tmp = 1.0*0.0 + pC00[0+ps*1]*1.0;
-		for(kk=2; kk<kmax; kk++)
-			tmp += pC00[0+ps*kk]*pC10[0+ps*kk];
-		pT[0+ldt*0] = - dD[ii+0];
-		pT[0+ldt*1] = + dD[ii+1] * tmp * dD[ii+0];
-		pT[1+ldt*1] = - dD[ii+1];
-		// downgrade
-		kmax = n-ii;
-		jmax = m-ii-2;
-		jmax0 = (ps-((ii+2+offD)&(ps-1)))&(ps-1);
-		jmax0 = jmax<jmax0 ? jmax : jmax0;
-		jj = 0;
-		pC20a = &pD0[((offD+ii+2)&(ps-1))+((offD+ii+2)-((offD+ii+2)&(ps-1)))*sdd+ii*ps];
-		pC20 = pC20a;
-		if(jmax0>0)
-			{
-			for( ; jj<jmax0; jj++)
-				{
-				w00 = pC20[0+ps*0]*1.0 + pC20[0+ps*1]*pC00[0+ps*1];
-				w01 = pC20[0+ps*0]*0.0 + pC20[0+ps*1]*1.0;
-				for(kk=2; kk<kmax; kk++)
-					{
-					w00 += pC20[0+ps*kk]*pC00[0+ps*kk];
-					w01 += pC20[0+ps*kk]*pC10[0+ps*kk];
-					}
-				w01 = w00*pT[0+ldt*1] + w01*pT[1+ldt*1];
-				w00 = w00*pT[0+ldt*0];
-				pC20[0+ps*0] += w00*1.0          + w01*0.0;
-				pC20[0+ps*1] += w00*pC00[0+ps*1] + w01*1.0;
-				for(kk=2; kk<kmax; kk++)
-					{
-					pC20[0+ps*kk] += w00*pC00[0+ps*kk] + w01*pC10[0+ps*kk];
-					}
-				pC20 += 1;
-				}
-			pC20 += -ps+ps*sdd;
-			}
-		for( ; jj<jmax-3; jj+=4)
-			{
-			//
-			_w0 = _mm256_load_pd( &pC20[0+ps*0] );
-			_a0 = _mm256_load_pd( &pC20[0+ps*1] );
-			_b0 = _mm256_broadcast_sd( &pC00[0+ps*1] );
-			_t0 = _mm256_mul_pd( _a0, _b0 );
-			_w0 = _mm256_add_pd( _w0, _t0 );
-			_w1 = _mm256_load_pd( &pC20[0+ps*1] );
-			for(kk=2; kk<kmax; kk++)
-				{
-				_a0 = _mm256_load_pd( &pC20[0+ps*kk] );
-				_b0 = _mm256_broadcast_sd( &pC00[0+ps*kk] );
-				_t0 = _mm256_mul_pd( _a0, _b0 );
-				_w0 = _mm256_add_pd( _w0, _t0 );
-				_b0 = _mm256_broadcast_sd( &pC10[0+ps*kk] );
-				_t0 = _mm256_mul_pd( _a0, _b0 );
-				_w1 = _mm256_add_pd( _w1, _t0 );
-				}
-			//
-			_b0 = _mm256_broadcast_sd( &pT[1+ldt*1] );
-			_w1 = _mm256_mul_pd( _w1, _b0 );
-			_b0 = _mm256_broadcast_sd( &pT[0+ldt*1] );
-			_t0 = _mm256_mul_pd( _w0, _b0 );
-			_w1 = _mm256_add_pd( _w1, _t0 );
-			_b0 = _mm256_broadcast_sd( &pT[0+ldt*0] );
-			_w0 = _mm256_mul_pd( _w0, _b0 );
-			//
-			_a0 = _mm256_load_pd( &pC20[0+ps*0] );
-			_a0 = _mm256_add_pd( _a0, _w0 );
-			_mm256_store_pd( &pC20[0+ps*0], _a0 );
-			_a0 = _mm256_load_pd( &pC20[0+ps*1] );
-			_b0 = _mm256_broadcast_sd( &pC00[0+ps*1] );
-			_t0 = _mm256_mul_pd( _w0, _b0 );
-			_a0 = _mm256_add_pd( _a0, _t0 );
-			_a0 = _mm256_add_pd( _a0, _w1 );
-			_mm256_store_pd( &pC20[0+ps*1], _a0 );
-			for(kk=2; kk<kmax; kk++)
-				{
-				_a0 = _mm256_load_pd( &pC20[0+ps*kk] );
-				_b0 = _mm256_broadcast_sd( &pC00[0+ps*kk] );
-				_t0 = _mm256_mul_pd( _w0, _b0 );
-				_a0 = _mm256_add_pd( _a0, _t0 );
-				_b0 = _mm256_broadcast_sd( &pC10[0+ps*kk] );
-				_t0 = _mm256_mul_pd( _w1, _b0 );
-				_a0 = _mm256_add_pd( _a0, _t0 );
-				_mm256_store_pd( &pC20[0+ps*kk], _a0 );
-				}
-			pC20 += ps*sdd;
-			}
-		for(ll=0; ll<jmax-jj; ll++)
-			{
-			w00 = pC20[0+ps*0]*1.0 + pC20[0+ps*1]*pC00[0+ps*1];
-			w01 = pC20[0+ps*0]*0.0 + pC20[0+ps*1]*1.0;
-			for(kk=2; kk<kmax; kk++)
-				{
-				w00 += pC20[0+ps*kk]*pC00[0+ps*kk];
-				w01 += pC20[0+ps*kk]*pC10[0+ps*kk];
-				}
-			w01 = w00*pT[0+ldt*1] + w01*pT[1+ldt*1];
-			w00 = w00*pT[0+ldt*0];
-			pC20[0+ps*0] += w00*1.0          + w01*0.0;
-			pC20[0+ps*1] += w00*pC00[0+ps*1] + w01*1.0;
-			for(kk=2; kk<kmax; kk++)
-				{
-				pC20[0+ps*kk] += w00*pC00[0+ps*kk] + w01*pC10[0+ps*kk];
-				}
-			pC20 += 1;
-			}
-		}
-#endif
-	for(; ii<imax; ii++)
-		{
-		pC00 = &pD0[((offD+ii)&(ps-1))+((offD+ii)-((offD+ii)&(ps-1)))*sdd+ii*ps];
-		beta = 0.0;
-		for(jj=1; jj<n-ii; jj++)
-			{
-			tmp = pC00[0+ps*jj];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			dD[ii] = 0.0;
-			}
-		else
-			{
-			alpha = pC00[0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			dD[ii] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			pC00[0] = beta;
-			for(jj=1; jj<n-ii; jj++)
-				pC00[0+ps*jj] *= tmp;
-			}
-		if(ii<n)
-			{
-			// compute T
-			pT[0+ldt*0] = - dD[ii+0];
-			// downgrade
-			kmax = n-ii;
-			jmax = m-ii-1;
-			jmax0 = (ps-((ii+1+offD)&(ps-1)))&(ps-1);
-			jmax0 = jmax<jmax0 ? jmax : jmax0;
-			jj = 0;
-			pC10a = &pD0[((offD+ii+1)&(ps-1))+((offD+ii+1)-((offD+ii+1)&(ps-1)))*sdd+ii*ps];
-			pC10 = pC10a;
-			if(jmax0>0)
-				{
-				for( ; jj<jmax0; jj++)
-					{
-					w00 = pC10[0+ps*0];
-					for(kk=1; kk<kmax; kk++)
-						{
-						w00 += pC10[0+ps*kk] * pC00[0+ps*kk];
-						}
-					w00 = w00*pT[0+ldt*0];
-					pC10[0+ps*0] += w00;
-					for(kk=1; kk<kmax; kk++)
-						{
-						pC10[0+ps*kk] += w00 * pC00[0+ps*kk];
-						}
-					pC10 += 1;
-					}
-				pC10 += -ps+ps*sdd;
-				}
-			for( ; jj<jmax-3; jj+=4)
-				{
-				//
-				_w0 = _mm256_load_pd( &pC10[0+ps*0] );
-				for(kk=1; kk<kmax; kk++)
-					{
-					_a0 = _mm256_load_pd( &pC10[0+ps*kk] );
-					_b0 = _mm256_broadcast_sd( &pC00[0+ps*kk] );
-					_t0 = _mm256_mul_pd( _a0, _b0 );
-					_w0 = _mm256_add_pd( _w0, _t0 );
-					}
-				//
-				_b0 = _mm256_broadcast_sd( &pT[0+ldt*0] );
-				_w0 = _mm256_mul_pd( _w0, _b0 );
-				//
-				_a0 = _mm256_load_pd( &pC10[0+ps*0] );
-				_a0 = _mm256_add_pd( _a0, _w0 );
-				_mm256_store_pd( &pC10[0+ps*0], _a0 );
-				for(kk=1; kk<kmax; kk++)
-					{
-					_a0 = _mm256_load_pd( &pC10[0+ps*kk] );
-					_b0 = _mm256_broadcast_sd( &pC00[0+ps*kk] );
-					_t0 = _mm256_mul_pd( _w0, _b0 );
-					_a0 = _mm256_add_pd( _a0, _t0 );
-					_mm256_store_pd( &pC10[0+ps*kk], _a0 );
-					}
-				pC10 += ps*sdd;
-				}
-			for(ll=0; ll<jmax-jj; ll++)
-				{
-				w00 = pC10[0+ps*0];
-				for(kk=1; kk<kmax; kk++)
-					{
-					w00 += pC10[0+ps*kk] * pC00[0+ps*kk];
-					}
-				w00 = w00*pT[0+ldt*0];
-				pC10[0+ps*0] += w00;
-				for(kk=1; kk<kmax; kk++)
-					{
-					pC10[0+ps*kk] += w00 * pC00[0+ps*kk];
-					}
-				pC10 += 1;
-				}
-			}
-		}
-	return;
-	}
-
-
-
-// assume kmax>=4
-void kernel_dlarft_4_lib4(int kmax, double *pD, double *dD, double *pT)
-	{
-	const int ps = 4;
-	int kk;
-	double v10,
-	       v20, v21,
-		   v30, v31, v32;
-	// 0
-	// 1
-	v10 =  pD[0+ps*1];
-	// 2
-	v10 += pD[1+ps*2]*pD[0+ps*2];
-	v20 =  pD[0+ps*2];
-	v21 =  pD[1+ps*2];
-	// 3
-	v10 += pD[1+ps*3]*pD[0+ps*3];
-	v20 += pD[2+ps*3]*pD[0+ps*3];
-	v21 += pD[2+ps*3]*pD[1+ps*3];
-	v30 =  pD[0+ps*3];
-	v31 =  pD[1+ps*3];
-	v32 =  pD[2+ps*3];
-	//
-	for(kk=4; kk<kmax; kk++)
-		{
-		v10 += pD[1+ps*kk]*pD[0+ps*kk];
-		v20 += pD[2+ps*kk]*pD[0+ps*kk];
-		v30 += pD[3+ps*kk]*pD[0+ps*kk];
-		v21 += pD[2+ps*kk]*pD[1+ps*kk];
-		v31 += pD[3+ps*kk]*pD[1+ps*kk];
-		v32 += pD[3+ps*kk]*pD[2+ps*kk];
-		}
-	pT[0+ps*0] = - dD[0];
-	pT[1+ps*1] = - dD[1];
-	pT[2+ps*2] = - dD[2];
-	pT[3+ps*3] = - dD[3];
-	pT[0+ps*1] = - dD[1] * (v10*pT[0+ps*0]);
-	pT[1+ps*2] = - dD[2] * (v21*pT[1+ps*1]);
-	pT[2+ps*3] = - dD[3] * (v32*pT[2+ps*2]);
-	pT[0+ps*2] = - dD[2] * (v20*pT[0+ps*0] + v21*pT[0+ps*1]);
-	pT[1+ps*3] = - dD[3] * (v31*pT[1+ps*1] + v32*pT[1+ps*2]);
-	pT[0+ps*3] = - dD[3] * (v30*pT[0+ps*0] + v31*pT[0+ps*1] + v32*pT[0+ps*2]);
-	return;
-	}
-
-
-
-// assume n>=4
-#if ! defined(TARGET_X64_INTEL_HASWELL)
-void kernel_dgelqf_dlarft4_4_lib4(int n, double *pD, double *dD, double *pT)
-	{
-	int ii, jj, ll;
-	double alpha, beta, tmp, w0, w1, w2, w3;
-	const int ps = 4;
-	// zero tau matrix
-	for(ii=0; ii<16; ii++)
-		pT[ii] = 0.0;
-	// first column
-	beta = 0.0;
-	for(ii=1; ii<n; ii++)
-		{
-		tmp = pD[0+ps*ii];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		dD[0] = 0.0;
-		tmp = 0.0;
-		goto col2;
-		}
-	alpha = pD[0+ps*0];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[0] = (beta-alpha) / beta;
-	pT[0+ps*0] = - dD[0];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[0+ps*0] = beta;
-	w1 = pD[1+ps*0];
-	w2 = pD[2+ps*0];
-	w3 = pD[3+ps*0];
-	//
-	pD[0+ps*1] *= tmp;
-	w1 += pD[1+ps*1] * pD[0+ps*1];
-	w2 += pD[2+ps*1] * pD[0+ps*1];
-	w3 += pD[3+ps*1] * pD[0+ps*1];
-	//
-	pD[0+ps*2] *= tmp;
-	w1 += pD[1+ps*2] * pD[0+ps*2];
-	w2 += pD[2+ps*2] * pD[0+ps*2];
-	w3 += pD[3+ps*2] * pD[0+ps*2];
-	//
-	pD[0+ps*3] *= tmp;
-	w1 += pD[1+ps*3] * pD[0+ps*3];
-	w2 += pD[2+ps*3] * pD[0+ps*3];
-	w3 += pD[3+ps*3] * pD[0+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[0+ps*ii] *= tmp;
-		w1 += pD[1+ps*ii] * pD[0+ps*ii];
-		w2 += pD[2+ps*ii] * pD[0+ps*ii];
-		w3 += pD[3+ps*ii] * pD[0+ps*ii];
-		}
-	//
-	w1 = - dD[0] * w1;
-	w2 = - dD[0] * w2;
-	w3 = - dD[0] * w3;
-	//
-	pD[1+ps*0] += w1;
-	pD[2+ps*0] += w2;
-	pD[3+ps*0] += w3;
-	//
-	pD[1+ps*1] += w1 * pD[0+ps*1];
-	pD[2+ps*1] += w2 * pD[0+ps*1];
-	pD[3+ps*1] += w3 * pD[0+ps*1];
-	//
-	pD[1+ps*2] += w1 * pD[0+ps*2];
-	pD[2+ps*2] += w2 * pD[0+ps*2];
-	pD[3+ps*2] += w3 * pD[0+ps*2];
-	beta = pD[1+ps*2] * pD[1+ps*2];
-	//
-	pD[1+ps*3] += w1 * pD[0+ps*3];
-	pD[2+ps*3] += w2 * pD[0+ps*3];
-	pD[3+ps*3] += w3 * pD[0+ps*3];
-	beta += pD[1+ps*3] * pD[1+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[1+ps*ii] += w1 * pD[0+ps*ii];
-		pD[2+ps*ii] += w2 * pD[0+ps*ii];
-		pD[3+ps*ii] += w3 * pD[0+ps*ii];
-		beta += pD[1+ps*ii] * pD[1+ps*ii];
-		}
-	// second column
-col2:
-	if(beta==0.0)
-		{
-		dD[1] = 0.0;
-		tmp = 0.0;
-		goto col3;
-		}
-	alpha = pD[1+ps*1];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[1] = (beta-alpha) / beta;
-	pT[1+ps*1] = - dD[1];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[1+ps*1] = beta;
-	w0 = pD[0+ps*1]; //
-	w2 = pD[2+ps*1];
-	w3 = pD[3+ps*1];
-	//
-	pD[1+ps*2] *= tmp;
-	w0 += pD[0+ps*2] * pD[1+ps*2]; //
-	w2 += pD[2+ps*2] * pD[1+ps*2];
-	w3 += pD[3+ps*2] * pD[1+ps*2];
-	//
-	pD[1+ps*3] *= tmp;
-	w0 += pD[0+ps*3] * pD[1+ps*3]; //
-	w2 += pD[2+ps*3] * pD[1+ps*3];
-	w3 += pD[3+ps*3] * pD[1+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[1+ps*ii] *= tmp;
-		w0 += pD[0+ps*ii] * pD[1+ps*ii]; //
-		w2 += pD[2+ps*ii] * pD[1+ps*ii];
-		w3 += pD[3+ps*ii] * pD[1+ps*ii];
-		}
-	//
-	pT[0+ps*1] = - dD[1] * (w0*pT[0+ps*0]);
-	w2 = - dD[1] * w2;
-	w3 = - dD[1] * w3;
-	//
-	pD[2+ps*1] += w2;
-	pD[3+ps*1] += w3;
-	//
-	pD[2+ps*2] += w2 * pD[1+ps*2];
-	pD[3+ps*2] += w3 * pD[1+ps*2];
-	//
-	pD[2+ps*3] += w2 * pD[1+ps*3];
-	pD[3+ps*3] += w3 * pD[1+ps*3];
-	beta = pD[2+ps*3] * pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[2+ps*ii] += w2 * pD[1+ps*ii];
-		pD[3+ps*ii] += w3 * pD[1+ps*ii];
-		beta += pD[2+ps*ii] * pD[2+ps*ii];
-		}
-	// third column
-col3:
-	if(beta==0.0)
-		{
-		dD[2] = 0.0;
-		tmp = 0.0;
-		goto col4;
-		}
-	alpha = pD[2+ps*2];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[2] = (beta-alpha) / beta;
-	pT[2+ps*2] = - dD[2];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[2+ps*2] = beta;
-	w0 = pD[0+ps*2];
-	w1 = pD[1+ps*2];
-	w3 = pD[3+ps*2];
-	//
-	pD[2+ps*3] *= tmp;
-	w0 += pD[0+ps*3] * pD[2+ps*3];
-	w1 += pD[1+ps*3] * pD[2+ps*3];
-	w3 += pD[3+ps*3] * pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[2+ps*ii] *= tmp;
-		w0 += pD[0+ps*ii] * pD[2+ps*ii];
-		w1 += pD[1+ps*ii] * pD[2+ps*ii];
-		w3 += pD[3+ps*ii] * pD[2+ps*ii];
-		}
-	//
-	pT[1+ps*2] = - dD[2] * (w1*pT[1+ps*1]);
-	pT[0+ps*2] = - dD[2] * (w0*pT[0+ps*0] + w1*pT[0+ps*1]);
-	w3 = - dD[2] * w3;
-	//
-	pD[3+ps*2] += w3;
-	//
-	pD[3+ps*3] += w3 * pD[2+ps*3];
-	//
-	beta = 0.0;
-	for(ii=4; ii<n; ii++)
-		{
-		pD[3+ps*ii] += w3 * pD[2+ps*ii];
-		beta += pD[3+ps*ii] * pD[3+ps*ii];
-		}
-	// fourth column
-col4:
-	if(beta==0.0)
-		{
-		dD[3] = 0.0;
-		tmp = 0.0;
-		return;
-		}
-	alpha = pD[3+ps*3];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[3] = (beta-alpha) / beta;
-	pT[3+ps*3] = - dD[3];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[3+ps*3] = beta;
-	w0 =  pD[0+ps*3];
-	w1 =  pD[1+ps*3];
-	w2 =  pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[3+ps*ii] *= tmp;
-		w0 += pD[0+ps*ii] * pD[3+ps*ii];
-		w1 += pD[1+ps*ii] * pD[3+ps*ii];
-		w2 += pD[2+ps*ii] * pD[3+ps*ii];
-		}
-	//
-	pT[2+ps*3] = - dD[3] * (w2*pT[2+ps*2]);
-	pT[1+ps*3] = - dD[3] * (w1*pT[1+ps*1] + w2*pT[1+ps*2]);
-	pT[0+ps*3] = - dD[3] * (w0*pT[0+ps*0] + w1*pT[0+ps*1] + w2*pT[0+ps*2]);
-	return;
-	}
-#endif
-
-
-
-void kernel_dlarfb4_r_1_lib4(int kmax, double *pV, double *pT, double *pD)
-	{
-	const int ps = 4;
-	double pW[16];
-	int kk;
-	// 0
-	pW[0+ps*0] = pD[0+ps*0];
-	// 1
-	pW[0+ps*0] += pD[0+ps*1]*pV[0+ps*1];
-	pW[0+ps*1] = pD[0+ps*1];
-	// 2
-	pW[0+ps*0] += pD[0+ps*2]*pV[0+ps*2];
-	pW[0+ps*1] += pD[0+ps*2]*pV[1+ps*2];
-	pW[0+ps*2] = pD[0+ps*2];
-	// 3
-	pW[0+ps*0] += pD[0+ps*3]*pV[0+ps*3];
-	pW[0+ps*1] += pD[0+ps*3]*pV[1+ps*3];
-	pW[0+ps*2] += pD[0+ps*3]*pV[2+ps*3];
-	pW[0+ps*3] = pD[0+ps*3];
-	//
-	for(kk=4; kk<kmax; kk++)
-		{
-		pW[0+ps*0] += pD[0+ps*kk]*pV[0+ps*kk];
-		pW[0+ps*1] += pD[0+ps*kk]*pV[1+ps*kk];
-		pW[0+ps*2] += pD[0+ps*kk]*pV[2+ps*kk];
-		pW[0+ps*3] += pD[0+ps*kk]*pV[3+ps*kk];
-		}
-	//
-	pW[0+ps*3] = pW[0+ps*0]*pT[0+ps*3] + pW[0+ps*1]*pT[1+ps*3] + pW[0+ps*2]*pT[2+ps*3] + pW[0+ps*3]*pT[3+ps*3];
-	//
-	pW[0+ps*2] = pW[0+ps*0]*pT[0+ps*2] + pW[0+ps*1]*pT[1+ps*2] + pW[0+ps*2]*pT[2+ps*2];
-	//
-	pW[0+ps*1] = pW[0+ps*0]*pT[0+ps*1] + pW[0+ps*1]*pT[1+ps*1];
-	//
-	pW[0+ps*0] = pW[0+ps*0]*pT[0+ps*0];
-	//
-	pD[0+ps*0] += pW[0+ps*0];
-	//
-	pD[0+ps*1] += pW[0+ps*0]*pV[0+ps*1] + pW[0+ps*1];
-	//
-	pD[0+ps*2] += pW[0+ps*0]*pV[0+ps*2] + pW[0+ps*1]*pV[1+ps*2] + pW[0+ps*2];
-	//
-	pD[0+ps*3] += pW[0+ps*0]*pV[0+ps*3] + pW[0+ps*1]*pV[1+ps*3] + pW[0+ps*2]*pV[2+ps*3] + pW[0+ps*3];
-	for(kk=4; kk<kmax; kk++)
-		{
-		pD[0+ps*kk] += pW[0+ps*0]*pV[0+ps*kk] + pW[0+ps*1]*pV[1+ps*kk] + pW[0+ps*2]*pV[2+ps*kk] + pW[0+ps*3]*pV[3+ps*kk];
-		}
-	return;
-	}
-
-
-
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_dgetrf_pivot_4_lib4.c b/third_party/blasfeo/kernel/avx/kernel_dgetrf_pivot_4_lib4.c
deleted file mode 100644
index 91d1cc0..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_dgetrf_pivot_4_lib4.c
+++ /dev/null
@@ -1,1434 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-
-#include "../../include/blasfeo_common.h"
-#include "../../include/blasfeo_d_aux.h"
-
-
-
-// C numering (starting from zero) in the ipiv
-void kernel_dgetrf_pivot_4_lib4(int m, double *pA, int sda, double *inv_diag_A, int* ipiv)
-	{
-
-	const int bs = 4;
-
-	// assume m>=4
-	int ma = m-4;
-
-	__m128d
-		max0, max1, msk0, imx0, imx1,
-		inv;
-	
-		
-	__m256d
-		lft, msk,
-		sgn, vna, max, imx, idx,
-		ones,
-		tmp,
-		a_0,
-		b_0, b_1, b_2,
-		scl,
-		c_0,
-		d_0;
-	
-	double
-		dlft;
-
-	sgn = _mm256_set_pd( -0.0, -0.0, -0.0, -0.0 );
-	vna = _mm256_set_pd( 4.0, 4.0, 4.0, 4.0 );
-	lft  = _mm256_set_pd( 3.2, 2.2, 1.2, 0.2 );
-	ones = _mm256_set_pd( 1.0, 1.0, 1.0, 1.0 );
-
-	double
-		tmp0;
-	
-	double
-		*pB;
-	
-	int 
-		k, idamax;
-	
-	int B_pref = bs*sda;
-	
-
-	// first column
-
-	// find pivot
-	pB = &pA[0+bs*0];
-	idx = lft; // _mm256_set_pd( 3.2, 2.2, 1.2, 0.2 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	k = 0;
-	for( ; k<m-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0] );
-//		__builtin_prefetch( pB+2*B_pref );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0] );
-//		__builtin_prefetch( pB+2*B_pref );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for( ; k<m-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0] );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<m)
-		{
-		dlft = m-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		a_0 = _mm256_load_pd( &pB[0] );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_blendv_pd( a_0, sgn, msk );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	ipiv[0] = idamax;
-	if(tmp0!=0.0)
-		{
-		if(ipiv[0]!=0)
-			drowsw_lib(4, pA+0, pA+ipiv[0]/bs*bs*sda+ipiv[0]%bs);
-
-		inv = _mm_loaddup_pd( &pA[0+bs*0] );
-		inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-		scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-		_mm_store_sd( &inv_diag_A[0], inv );
-		}
-	else
-		{
-		scl = ones;
-		inv_diag_A[0] = 0.0;
-		}
-
-
-	// second column
-
-	// scale & correct & find pivot
-	idx = _mm256_set_pd( 2.2, 1.2, 0.2, -0.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	c_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	a_0 = _mm256_blend_pd( tmp, a_0, 0x1 );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	d_0 = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( d_0, c_0, 0x1 );
-	_mm256_store_pd( &pA[0+bs*0], a_0 );
-	_mm256_store_pd( &pA[0+bs*1], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x1 );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	ipiv[1] = idamax+1;
-	if(tmp0!=0)
-		{
-		if(ipiv[1]!=1)
-			drowsw_lib(4, pA+1, pA+ipiv[1]/bs*bs*sda+ipiv[1]%bs);
-
-		inv = _mm_loaddup_pd( &pA[1+bs*1] );
-		inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-		scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-		_mm_store_sd( &inv_diag_A[1], inv );
-		}
-	else
-		{
-		scl = ones;
-		inv_diag_A[1] = 0.0;
-		}
-
-
-	// third column
-
-	// scale & correct & find pivot
-	idx = _mm256_set_pd( 1.2, 0.2, -0.8, -1.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	c_0 = _mm256_load_pd( &pA[0+bs*2] );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( tmp, c_0, 0x1 );
-	a_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_1 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	a_0 = _mm256_blend_pd( tmp, a_0, 0x3 );
-	b_1 = _mm256_permute_pd( b_1, 0xf );
-	tmp = _mm256_mul_pd( a_0, b_1 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( tmp, c_0, 0x3 );
-	_mm256_store_pd( &pA[0+bs*1], a_0 );
-	_mm256_store_pd( &pA[0+bs*2], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x3 );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	ipiv[2] = idamax+2;
-	if(tmp0!=0)
-		{
-		if(ipiv[2]!=2)
-			drowsw_lib(4, pA+2, pA+ipiv[2]/bs*bs*sda+ipiv[2]%bs);
-
-		inv = _mm_loaddup_pd( &pA[2+bs*2] );
-		inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-		scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-		_mm_store_sd( &inv_diag_A[2], inv );
-		}
-	else
-		{
-		scl = ones;
-		inv_diag_A[2] = 0.0;
-		}
-
-
-	// fourth column
-
-	// scale & correct & find pivot
-	idx = _mm256_set_pd( 0.2, -0.8, -1.8, -2.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	c_0 = _mm256_load_pd( &pA[0+bs*3] );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( tmp, c_0, 0x1 );
-	b_1 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_1 = _mm256_permute_pd( b_1, 0xf );
-	a_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, b_1 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( tmp, c_0, 0x3 );
-	a_0 = _mm256_load_pd( &pA[0+bs*2] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_2 = _mm256_permute2f128_pd( c_0, c_0, 0x11 );
-	a_0 = _mm256_blend_pd( tmp, a_0, 0x7 );
-	b_2 = _mm256_permute_pd( b_2, 0x0 );
-	tmp = _mm256_mul_pd( a_0, b_2 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( tmp, c_0, 0x7 );
-	_mm256_store_pd( &pA[0+bs*2], a_0 );
-	_mm256_store_pd( &pA[0+bs*3], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x7 );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	ipiv[3] = idamax+3;
-	if(tmp0!=0)
-		{
-		if(ipiv[3]!=3)
-			drowsw_lib(4, pA+3, pA+ipiv[3]/bs*bs*sda+ipiv[3]%bs);
-
-		inv = _mm_loaddup_pd( &pA[3+bs*3] );
-		inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-		scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-		_mm_store_sd( &inv_diag_A[3], inv );
-		}
-	else
-		{
-		scl = ones;
-		inv_diag_A[3] = 0.0;
-		}
-
-	// scale
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		tmp = _mm256_mul_pd( c_0, scl );
-		c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-//		pB += B_pref;
-		}
-
-	return;
-
-	}
-
-	
-
-void kernel_dgetrf_pivot_4_vs_lib4(int m, int n, double *pA, int sda, double *inv_diag_A, int* ipiv)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	// assume m>=4
-	int ma = m-4;
-
-	__m128d
-		max0, max1, msk0, imx0, imx1,
-		inv;
-	
-		
-	__m256d
-		lft, msk,
-		sgn, vna, max, imx, idx,
-		ones,
-		tmp,
-		a_0,
-		b_0, b_1, b_2,
-		scl,
-		c_0,
-		d_0;
-	
-	double
-		dlft;
-
-	sgn = _mm256_set_pd( -0.0, -0.0, -0.0, -0.0 );
-	vna = _mm256_set_pd( 4.0, 4.0, 4.0, 4.0 );
-	lft  = _mm256_set_pd( 3.2, 2.2, 1.2, 0.2 );
-	ones = _mm256_set_pd( 1.0, 1.0, 1.0, 1.0 );
-
-	double
-		tmp0;
-	
-	double
-		*pB;
-	
-	int 
-		k, idamax;
-	
-	int B_pref = bs*sda;
-	
-
-	// first column
-
-	// find pivot
-	pB = &pA[0+bs*0];
-	idx = lft; // _mm256_set_pd( 3.2, 2.2, 1.2, 0.2 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	k = 0;
-	for( ; k<m-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0] );
-//		__builtin_prefetch( pB+2*B_pref );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0] );
-//		__builtin_prefetch( pB+2*B_pref );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for( ; k<m-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0] );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<m)
-		{
-		dlft = m-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		a_0 = _mm256_load_pd( &pB[0] );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_blendv_pd( a_0, sgn, msk );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	ipiv[0] = idamax;
-	if(tmp0!=0.0)
-		{
-		if(ipiv[0]!=0)
-			drowsw_lib(4, pA+0, pA+ipiv[0]/bs*bs*sda+ipiv[0]%bs);
-
-		inv = _mm_loaddup_pd( &pA[0+bs*0] );
-		inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-		scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-		_mm_store_sd( &inv_diag_A[0], inv );
-		}
-	else
-		{
-		scl = ones;
-		inv_diag_A[0] = 0.0;
-		}
-	
-	if(n==1)
-		{
-		// scale & return
-		dlft = m;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_load_pd( &pA[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_blend_pd( tmp, a_0, 0x1 );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		_mm256_store_pd( &pA[0+bs*0], a_0 );
-		pB = pA + B_pref;
-		k = 0;
-		for(; k<ma-7; k+=8)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*0] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*0], a_0 );
-			pB += B_pref;
-			a_0 = _mm256_load_pd( &pB[0+bs*0] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*0], a_0 );
-			pB += B_pref;
-			}
-		for(; k<ma-3; k+=4)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*0] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*0], a_0 );
-			pB += B_pref;
-			}
-		if(k<ma)
-			{
-			dlft = ma-k;
-			msk = _mm256_broadcast_sd( &dlft );
-			msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-			a_0 = _mm256_load_pd( &pB[0+bs*0] );
-			tmp = _mm256_mul_pd( a_0, scl );
-			a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-			_mm256_store_pd( &pB[0+bs*0], a_0 );
-	//		pB += B_pref;
-			}
-
-		return;
-		}
-
-
-	// second column
-
-	// scale & correct & find pivot
-	dlft = m;
-	msk = _mm256_broadcast_sd( &dlft );
-	msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-	idx = _mm256_set_pd( 2.2, 1.2, 0.2, -0.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	c_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	tmp = _mm256_blend_pd( tmp, a_0, 0x1 );
-	a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	d_0 = _mm256_sub_pd( c_0, tmp );
-	d_0 = _mm256_blend_pd( d_0, c_0, 0x1 );
-	c_0 = _mm256_blendv_pd( d_0, c_0, msk );
-	_mm256_store_pd( &pA[0+bs*0], a_0 );
-	_mm256_store_pd( &pA[0+bs*1], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x1 );
-	c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	if(m>1)
-		{
-		ipiv[1] = idamax+1;
-		if(tmp0!=0)
-			{
-			if(ipiv[1]!=1)
-				drowsw_lib(4, pA+1, pA+ipiv[1]/bs*bs*sda+ipiv[1]%bs);
-
-			inv = _mm_loaddup_pd( &pA[1+bs*1] );
-			inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-			scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-			_mm_store_sd( &inv_diag_A[1], inv );
-			}
-		else
-			{
-			scl = ones;
-			inv_diag_A[1] = 0.0;
-			}
-		}
-
-	if(n==2)
-		{
-		// scale & return
-		dlft = m;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_load_pd( &pA[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_blend_pd( tmp, a_0, 0x3 );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		_mm256_store_pd( &pA[0+bs*1], a_0 );
-		pB = pA + B_pref;
-		k = 0;
-		for(; k<ma-7; k+=8)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*1] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*1], a_0 );
-			pB += B_pref;
-			a_0 = _mm256_load_pd( &pB[0+bs*1] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*1], a_0 );
-			pB += B_pref;
-			}
-		for(; k<ma-3; k+=4)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*1] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*1], a_0 );
-			pB += B_pref;
-			}
-		if(k<ma)
-			{
-			dlft = ma-k;
-			msk = _mm256_broadcast_sd( &dlft );
-			msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-			a_0 = _mm256_load_pd( &pB[0+bs*1] );
-			tmp = _mm256_mul_pd( a_0, scl );
-			a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-			_mm256_store_pd( &pB[0+bs*1], a_0 );
-	//		pB += B_pref;
-			}
-
-		return;
-		}
-
-	// third column
-
-	// scale & correct & find pivot
-	dlft = m;
-	msk = _mm256_broadcast_sd( &dlft );
-	msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-	idx = _mm256_set_pd( 1.2, 0.2, -0.8, -1.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	c_0 = _mm256_load_pd( &pA[0+bs*2] );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	tmp = _mm256_blend_pd( tmp, c_0, 0x1 );
-	c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-	a_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_1 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	tmp = _mm256_blend_pd( tmp, a_0, 0x3 );
-	a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-	b_1 = _mm256_permute_pd( b_1, 0xf );
-	tmp = _mm256_mul_pd( a_0, b_1 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	tmp = _mm256_blend_pd( tmp, c_0, 0x3 );
-	c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-	_mm256_store_pd( &pA[0+bs*1], a_0 );
-	_mm256_store_pd( &pA[0+bs*2], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x3 );
-	c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	if(m>2)
-		{
-		ipiv[2] = idamax+2;
-		if(tmp0!=0)
-			{
-			if(ipiv[2]!=2)
-				drowsw_lib(4, pA+2, pA+ipiv[2]/bs*bs*sda+ipiv[2]%bs);
-
-			inv = _mm_loaddup_pd( &pA[2+bs*2] );
-			inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-			scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-			_mm_store_sd( &inv_diag_A[2], inv );
-			}
-		else
-			{
-			scl = ones;
-			inv_diag_A[2] = 0.0;
-			}
-		}
-
-	if(n==3)
-		{
-		// scale & return
-		dlft = m;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_load_pd( &pA[0+bs*2] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_blend_pd( tmp, a_0, 0x7 );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		_mm256_store_pd( &pA[0+bs*2], a_0 );
-		pB = pA + B_pref;
-		k = 0;
-		for(; k<ma-7; k+=8)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*2] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*2], a_0 );
-			pB += B_pref;
-			a_0 = _mm256_load_pd( &pB[0+bs*2] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*2], a_0 );
-			pB += B_pref;
-			}
-		for(; k<ma-3; k+=4)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*2] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*2], a_0 );
-			pB += B_pref;
-			}
-		if(k<ma)
-			{
-			dlft = ma-k;
-			msk = _mm256_broadcast_sd( &dlft );
-			msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-			a_0 = _mm256_load_pd( &pB[0+bs*2] );
-			tmp = _mm256_mul_pd( a_0, scl );
-			a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-			_mm256_store_pd( &pB[0+bs*2], a_0 );
-	//		pB += B_pref;
-			}
-
-		return;
-		}
-
-	// fourth column
-
-	// scale & correct & find pivot
-	dlft = m;
-	msk = _mm256_broadcast_sd( &dlft );
-	msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-	idx = _mm256_set_pd( 0.2, -0.8, -1.8, -2.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	c_0 = _mm256_load_pd( &pA[0+bs*3] );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	tmp = _mm256_blend_pd( tmp, c_0, 0x1 );
-	c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-	b_1 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_1 = _mm256_permute_pd( b_1, 0xf );
-	a_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, b_1 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	tmp = _mm256_blend_pd( tmp, c_0, 0x3 );
-	c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-	a_0 = _mm256_load_pd( &pA[0+bs*2] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_2 = _mm256_permute2f128_pd( c_0, c_0, 0x11 );
-	tmp = _mm256_blend_pd( tmp, a_0, 0x7 );
-	a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-	b_2 = _mm256_permute_pd( b_2, 0x0 );
-	tmp = _mm256_mul_pd( a_0, b_2 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	tmp = _mm256_blend_pd( tmp, c_0, 0x7 );
-	c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-	_mm256_store_pd( &pA[0+bs*2], a_0 );
-	_mm256_store_pd( &pA[0+bs*3], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x7 );
-	c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	if(m>3)
-		{
-		ipiv[3] = idamax+3;
-		if(tmp0!=0)
-			{
-			if(ipiv[3]!=3)
-				drowsw_lib(4, pA+3, pA+ipiv[3]/bs*bs*sda+ipiv[3]%bs);
-
-			inv = _mm_loaddup_pd( &pA[3+bs*3] );
-			inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-			scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-			_mm_store_sd( &inv_diag_A[3], inv );
-			}
-		else
-			{
-			scl = ones;
-			inv_diag_A[3] = 0.0;
-			}
-		}
-
-	// scale
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		tmp = _mm256_mul_pd( c_0, scl );
-		c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-//		pB += B_pref;
-		}
-
-	return;
-
-	}
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_dsymv_6_lib4.S b/third_party/blasfeo/kernel/avx/kernel_dsymv_6_lib4.S
deleted file mode 100644
index b55690a..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_dsymv_6_lib4.S
+++ /dev/null
@@ -1,1031 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t
-// r14   <- z_n
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm4  <- [z_t_4a z_t_4b z_t_4c z_t_4d]
-// ymm5  <- [z_t_5a z_t_5b z_t_5c z_t_5d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- x_n_4
-// ymm11 <- x_n_5
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t+k*sizeof(double)
-// r14   <- z_n+k*sizeof(double)
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm4  <- [z_t_4a z_t_4b z_t_4c z_t_4d]
-// ymm5  <- [z_t_5a z_t_5b z_t_5c z_t_5d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- x_n_4
-// ymm11 <- x_n_5
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_NT_6_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_nt_6_lib4, @function
-inner_kernel_dgemv_add_nt_6_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_nt_6_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_nt_6_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_nt_6_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$4, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovupd	0(%r13), %ymm12
-	vmovupd	0(%r14), %ymm13
-
-	vmovapd	0(%r11), %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vmulpd	%ymm14, %ymm6, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	subl	$4, %r10d
-
-	vmovapd	32(%r11), %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmulpd	%ymm14, %ymm7, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	vmovapd	64(%r11), %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmulpd	%ymm14, %ymm8, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-
-	vmovapd	96(%r11), %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vmulpd	%ymm14, %ymm9, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	vmovapd	128(%r11), %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-	vmulpd	%ymm14, %ymm10, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	vmovapd	160(%r11), %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-	vmulpd	%ymm14, %ymm11, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	vmovupd	%ymm13, 0(%r14) 
-
-	addq	%r12, %r11
-	addq	$32, %r13
-	addq	$32, %r14
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vcvtsi2sd	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vsubpd		%ymm14, %ymm13, %ymm14
-
-	vmaskmovpd	0(%r13), %ymm14, %ymm12
-	vmaskmovpd	0(%r14), %ymm14, %ymm13
-
-	vmovupd	%ymm14, -32(%rsp) // spill mask to stack
-
-//	vmaskmovpd	-32(%rsp), %ymm14
-	vmaskmovpd	0(%r11), %ymm14, %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vmulpd	%ymm14, %ymm6, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	32(%r11), %ymm14, %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmulpd	%ymm14, %ymm7, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	64(%r11), %ymm14, %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	vmulpd	%ymm14, %ymm8, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-
-	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	96(%r11), %ymm14, %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vmulpd	%ymm14, %ymm9, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-		
-	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	128(%r11), %ymm14, %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm4, %ymm15, %ymm4
-	vmulpd	%ymm14, %ymm10, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	160(%r11), %ymm14, %ymm14
-	vmulpd	%ymm14, %ymm12, %ymm15
-	vaddpd	%ymm5, %ymm15, %ymm5
-	vmulpd	%ymm14, %ymm11, %ymm15
-	vaddpd	%ymm13, %ymm15, %ymm13
-	
-	vmovupd	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovpd	%ymm13, %ymm14, 0(%r14)
-
-	sall	$3, %r10d
-	addq	%r10, %r11
-	addq	%r10, %r13
-	addq	%r10, %r14
-	xorl	%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_nt_6_lib4, .-inner_kernel_dgemv_add_nt_6_lib4
-#endif
-#endif
-
-
-
-
-
-
-#if 0
-
-// TODO
-// common inner routine with file scope
-//
-// input arguments:
-// r10   <- kmax
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t
-// r14   <- z_n
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- kmax-4
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t+k*sizeof(double)
-// r14   <- z_n+k*sizeof(double)
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_EDGE_DSYMV_ADD_NT_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dsymv_add_nt_4_lib4, @function
-inner_edge_dsymv_add_nt_4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dsymv_add_nt_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dsymv_add_nt_4_lib4; .scl 2; .type 32; .endef
-inner_edge_dsymv_add_nt_4_lib4:
-#endif
-#endif
-
-	vmovupd		0(%r13), %ymm12
-	vmovupd		0(%r14), %ymm13
-
-	vmovapd		0(%r11), %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm6, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmovapd		32(%r11), %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x3, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm7, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmovapd		64(%r11), %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x3, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x7, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm8, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-
-	vmovapd		96(%r11), %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x7, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-//	vxorpd		%ymm15, %ymm15, %ymm15
-//	vblendpd	$0x0, %ymm14, %ymm15, %ymm14
-//	vmulpd		%ymm14, %ymm9, %ymm15
-//	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmovupd		%ymm13, 0(%r14) 
-
-	addq	%r12, %r11
-	addq	$32, %r13
-	addq	$32, %r14
-	
-	subq	$4, %r10
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dsymv_add_nt_4_lib4, .-inner_edge_dsymv_add_nt_4_lib4
-#endif
-#endif
-
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm4 <- [z4a z4b z4c z4d]
-// ymm5 <- [z5a z5b z5c z5d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 xx xx]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_AB_6_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_ab_6_lib4, @function
-inner_blend_t_scale_ab_6_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_ab_6_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_ab_6_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_ab_6_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd			%ymm1, %ymm0, %ymm0
-	vhaddpd			%ymm3, %ymm2, %ymm2
-	vhaddpd			%ymm5, %ymm4, %ymm4
-//	vhaddpd			%ymm3, %ymm2, %ymm2
-	vperm2f128		$0x2, %ymm0, %ymm2, %ymm1
-	vperm2f128		$0x13, %ymm0, %ymm2, %ymm0
-	vextractf128	$0x1, %ymm4, %xmm5
-	vaddpd			%ymm0, %ymm1, %ymm0
-	vaddpd			%ymm4, %ymm5, %ymm4
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm0
-	vmulpd			%ymm4, %ymm15, %ymm1
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-	vmovupd			0(%r12), %ymm14
-	vmovupd			32(%r12), %ymm13
-	vmulpd			%ymm15, %ymm14, %ymm14
-	vaddpd			%ymm0, %ymm14, %ymm0
-	vmulpd			%ymm15, %ymm13, %ymm13
-	vaddpd			%ymm1, %ymm13, %ymm1
-	
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x3, %ymm1, %ymm15, %ymm1
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_ab_6_lib4, .-inner_blend_t_scale_ab_6_lib4
-#endif
-#endif
-
-
-
-
-
-#if 0
-
-//TODO
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta=1.0
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_A1_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_a1_4_lib4, @function
-inner_blend_t_scale_a1_4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_a1_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_a1_4_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_a1_4_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm1
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vaddpd	%ymm0, %ymm1, %ymm0
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-
-	// beta
-	vmovupd		0(%r11), %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_a1_4_lib4, .-inner_blend_t_scale_a1_4_lib4
-#endif
-#endif
-
-#endif
-
-
-
-
-// common inner routine with file scope
-//
-// store 
-//
-// input arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-//
-// output arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_6_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_6_lib4, @function
-inner_store_6_lib4:
-#elif defined(OS_MAC)
-_inner_store_6_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_6_lib4; .scl 2; .type 32; .endef
-inner_store_6_lib4:
-#endif
-#endif
-	
-	vmovupd %ymm0, 0(%r10)
-	vmovupd %xmm1, 32(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_6_lib4, .-inner_store_6_lib4
-#endif
-#endif
-
-
-
-
-
-//                             rdi    rsi              rdx              rcx        r8       r9           rsp+8        rsp+16          rsp+24       rsp_32       rsp_40
-// void kernel_dgemv_nt_6_lib4(int k, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_nt_6_lib4
-	.type kernel_dgemv_nt_6_lib4, @function
-kernel_dgemv_nt_6_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_nt_6_lib4
-_kernel_dgemv_nt_6_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_nt_6_lib4
-	.def kernel_dgemv_nt_6_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_nt_6_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha_n
-	vbroadcastsd 0(%r10), %ymm15
-
-	movq	ARG6, %r10 // x_n
-
-	vbroadcastsd 0(%r10), %ymm6
-	vmulpd		%ymm15, %ymm6, %ymm6
-	vbroadcastsd 8(%r10), %ymm7
-	vmulpd		%ymm15, %ymm7, %ymm7
-	vbroadcastsd 16(%r10), %ymm8
-	vmulpd		%ymm15, %ymm8, %ymm8
-	vbroadcastsd 24(%r10), %ymm9
-	vmulpd		%ymm15, %ymm9, %ymm9
-	vbroadcastsd 32(%r10), %ymm10
-	vmulpd		%ymm15, %ymm10, %ymm10
-	vbroadcastsd 40(%r10), %ymm11
-	vmulpd		%ymm15, %ymm11, %ymm11
-
-
-	// inner kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG7, %r13  // x_t
-	movq	ARG10, %r14  // z_n
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_NT_6_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_nt_6_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_nt_6_lib4
-#endif
-#endif
-
-
-	// inner blend n scale ab
-
-	movq	ARG3, %r10 // alpha_t
-	movq	ARG8, %r11   // beta_t
-	movq	ARG9, %r12   // y_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_6_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_6_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_6_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG11, %r10 // z_t 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_6_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_6_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_6_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_nt_6_lib4, .-kernel_dgemv_nt_6_lib4
-#endif
-
-
-
-
-
-#if 0
-// TODO
-//                            rdi    rsi            rdx        rcx      r8           r9           rsp+8        rsp+16 
-// void kernel_dsymv_l_4_lib4(int k, double *alpha, double *A, int sda, double *x_n, double *x_t, double *z_n, double *z_t);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsymv_l_4_lib4
-	.type kernel_dsymv_l_4_lib4, @function
-kernel_dsymv_l_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsymv_l_4_lib4
-_kernel_dsymv_l_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsymv_l_4_lib4
-	.def kernel_dsymv_l_4_lib4; .scl 2; .type 32; .endef
-kernel_dsymv_l_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha
-	vbroadcastsd 0(%r10), %ymm15
-
-	movq	ARG5, %r10 // x_n
-
-	vbroadcastsd 0(%r10), %ymm6
-	vmulpd		%ymm15, %ymm6, %ymm6
-	vbroadcastsd 8(%r10), %ymm7
-	vmulpd		%ymm15, %ymm7, %ymm7
-	vbroadcastsd 16(%r10), %ymm8
-	vmulpd		%ymm15, %ymm8, %ymm8
-	vbroadcastsd 24(%r10), %ymm9
-	vmulpd		%ymm15, %ymm9, %ymm9
-
-
-	// inner edge dsyrk & kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG6, %r13  // x_t
-	movq	ARG7, %r14  // z_n
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_DSYMV_ADD_NT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dsymv_add_nt_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dsymv_add_nt_4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_NT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_nt_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_nt_4_lib4
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11   // z_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_A1_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_a1_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_a1_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z_t 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsymv_l_4_lib4, .-kernel_dsymv_l_4_lib4
-#endif
-
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
-
-
-
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_sgead_lib8.S b/third_party/blasfeo/kernel/avx/kernel_sgead_lib8.S
deleted file mode 100644
index 4cafa0a..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_sgead_lib8.S
+++ /dev/null
@@ -1,3096 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_0_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_0_lib8, @function
-inner_kernel_sgead_8_0_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_0_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_0_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_0_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%r13), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%r13), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r13)
-	addq		$128, %r12
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		64(%r13), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%r13), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%r13), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_0_lib8, .-inner_kernel_sgead_8_0_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13    <- B
-// r14d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_0_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_0_gen_lib8, @function
-inner_kernel_sgead_8_0_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_0_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_0_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_0_gen_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovups		0(%r12), %ymm0
-	vmaskmovps	0(%r13), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$4, %r10d
-
-	vmovups		32(%r12), %ymm0
-	vmaskmovps	32(%r13), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r13)
-	addq		$128, %r12
-
-	vmovups		-64(%r12), %ymm0
-	vmaskmovps	64(%r13), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r13)
-	addq		$128, %r13
-
-	vmovups		-32(%r12), %ymm0
-	vmaskmovps	-32(%r13), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovups		0(%r12), %ymm0
-	vmaskmovps	0(%r13), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_0_lib8, .-inner_kernel_sgead_8_0_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_1_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_1_lib8, @function
-inner_kernel_sgead_8_1_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_1_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_1_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_1_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-#if 1
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmovaps		32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmovaps		64(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmovaps		-32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, -32(%r14)
-#else
-	vmovups		4(%r12), %ymm0
-	vmovups		-28(%rax), %ymm1
-	vblendps	$0x80, %ymm1, %ymm0, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$4, %r10d
-
-	vmovups		36(%r12), %ymm0
-	vmovups		4(%rax), %ymm1
-	vblendps	$0x80, %ymm1, %ymm0, %ymm0
-	vmovaps		32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovups		-60(%r12), %ymm0
-	vmovups		-92(%rax), %ymm1
-	vblendps	$0x80, %ymm1, %ymm0, %ymm0
-	vmovaps		64(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r14)
-	addq		$128, %r14
-
-	vmovups		-28(%r12), %ymm0
-	vmovups		-60(%rax), %ymm1
-	vblendps	$0x80, %ymm1, %ymm0, %ymm0
-	vmovaps		-32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, -32(%r14)
-#endif
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_1_lib8, .-inner_kernel_sgead_8_1_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-// r15d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_1_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_1_gen_lib8, @function
-inner_kernel_sgead_8_1_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_1_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_1_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_1_gen_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	// compute mask for rows
-	vcvtsi2ss	%r15d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmaskmovps	32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmaskmovps	64(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmaskmovps	-32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_1_gen_lib8, .-inner_kernel_sgead_8_1_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_2_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_2_lib8, @function
-inner_kernel_sgead_8_2_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_2_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_2_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_2_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmovaps		32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmovaps		64(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmovaps		-32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_2_lib8, .-inner_kernel_sgead_8_2_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-// r15d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_2_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_2_gen_lib8, @function
-inner_kernel_sgead_8_2_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_2_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_2_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_2_gen_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	// compute mask for rows
-	vcvtsi2ss	%r15d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmaskmovps	32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmaskmovps	64(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmaskmovps	-32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_2_gen_lib8, .-inner_kernel_sgead_8_2_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_3_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_3_lib8, @function
-inner_kernel_sgead_8_3_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_3_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_3_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_3_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmovaps		32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmovaps		64(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmovaps		-32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x03, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_3_lib8, .-inner_kernel_sgead_8_3_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-// r15d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_3_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_3_gen_lib8, @function
-inner_kernel_sgead_8_3_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_3_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_3_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_3_gen_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	// compute mask for rows
-	vcvtsi2ss	%r15d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmaskmovps	32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmaskmovps	64(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmaskmovps	-32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_3_gen_lib8, .-inner_kernel_sgead_8_3_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_4_lib8, @function
-inner_kernel_sgead_8_4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_4_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_4_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		16(%r12), %xmm0
-	vmovaps		0(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		48(%r12), %xmm0
-	vmovaps		32(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmovaps		32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r14)
-	addq		$128, %r12
-
-	vmovaps		-48(%r12), %xmm0
-	vmovaps		64(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmovaps		64(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r14)
-	addq		$128, %rax
-
-	vmovaps		-16(%r12), %xmm0
-	vmovaps		-32(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmovaps		96(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 96(%r14)
-	addq		$128, %r14
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		16(%r12), %xmm0
-	vmovaps		0(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_4_lib8, .-inner_kernel_sgead_8_4_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-// r15d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_4_gen_lib8, @function
-inner_kernel_sgead_8_4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_4_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_4_gen_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	// compute mask for rows
-	vcvtsi2ss	%r15d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		16(%r12), %xmm0
-	vmovaps		0(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		48(%r12), %xmm0
-	vmovaps		32(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmaskmovps	32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r14)
-	addq		$128, %r12
-
-	vmovaps		-48(%r12), %xmm0
-	vmovaps		64(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmaskmovps	64(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r14)
-	addq		$128, %rax
-
-	vmovaps		-16(%r12), %xmm0
-	vmovaps		-32(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmaskmovps	96(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 96(%r14)
-	addq		$128, %r14
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		16(%r12), %xmm0
-	vmovaps		0(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_4_gen_lib8, .-inner_kernel_sgead_8_4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_5_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_5_lib8, @function
-inner_kernel_sgead_8_5_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_5_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_5_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_5_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmovaps		32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmovaps		64(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmovaps		-32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_5_lib8, .-inner_kernel_sgead_8_5_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-// r15d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_5_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_5_gen_lib8, @function
-inner_kernel_sgead_8_5_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_5_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_5_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_5_gen_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	// compute mask for rows
-	vcvtsi2ss	%r15d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmaskmovps	32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmaskmovps	64(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmaskmovps	-32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_5_gen_lib8, .-inner_kernel_sgead_8_5_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_6_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_6_lib8, @function
-inner_kernel_sgead_8_6_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_6_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_6_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_6_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmovaps		32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmovaps		64(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmovaps		-32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_6_lib8, .-inner_kernel_sgead_8_6_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-// r15d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_6_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_6_gen_lib8, @function
-inner_kernel_sgead_8_6_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_6_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_6_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_6_gen_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	// compute mask for rows
-	vcvtsi2ss	%r15d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmaskmovps	32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmaskmovps	64(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmaskmovps	-32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_6_gen_lib8, .-inner_kernel_sgead_8_6_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_7_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_7_lib8, @function
-inner_kernel_sgead_8_7_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_7_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_7_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_7_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmovaps		32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmovaps		64(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmovaps		-32(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x03, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmovaps		0(%r14), %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_7_lib8, .-inner_kernel_sgead_8_7_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r12    <- A
-// r13d   <- 8*sda*sizeof(float)
-// r14    <- B
-// r15d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGEAD_8_7_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgead_8_7_gen_lib8, @function
-inner_kernel_sgead_8_7_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgead_8_7_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgead_8_7_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgead_8_7_gen_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm14
-
-	// compute mask for rows
-	vcvtsi2ss	%r15d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r12, %rax // A1 <- A0
-	addq	%r13, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmaskmovps	32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r14)
-	addq		$128, %r12
-	addq		$128, %rax
-
-	vmovaps		-64(%r12), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmaskmovps	64(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r14)
-	addq		$128, %r14
-
-	vmovaps		-32(%r12), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmaskmovps	-32(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r14)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmaskmovps	0(%r14), %ymm15, %ymm13
-	vmulps		%ymm14, %ymm0, %ymm0
-	vaddps		%ymm13, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r14)
-	subl		$1, %r10d
-	addq		$32, %r12
-	addq		$32, %rax
-	addq		$32, %r14
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgead_8_7_gen_lib8, .-inner_kernel_sgead_8_7_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                            1      2             3         4
-// void kernel_sgead_8_0_lib8(int k, float *alpha, float *A, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_0_lib8
-	.type kernel_sgead_8_0_lib8, @function
-kernel_sgead_8_0_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_0_lib8
-_kernel_sgead_8_0_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_0_lib8
-	.def kernel_sgead_8_0_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_0_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_0_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_0_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_0_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_0_lib8, .-kernel_sgead_8_0_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx       rcx
-// void kernel_sgead_8_0_gen_lib8(int k, float *A, float *B, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_0_gen_lib8
-	.type kernel_sgead_8_0_gen_lib8, @function
-kernel_sgead_8_0_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_0_gen_lib8
-_kernel_sgead_8_0_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_0_gen_lib8
-	.def kernel_sgead_8_0_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_0_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_0_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_0_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_0_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_0_gen_lib8, .-kernel_sgead_8_0_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgead_8_1_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_1_lib8
-	.type kernel_sgead_8_1_lib8, @function
-kernel_sgead_8_1_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_1_lib8
-_kernel_sgead_8_1_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_1_lib8
-	.def kernel_sgead_8_1_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_1_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_1_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_1_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_1_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_1_lib8, .-kernel_sgead_8_1_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgead_8_1_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_1_gen_lib8
-	.type kernel_sgead_8_1_gen_lib8, @function
-kernel_sgead_8_1_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_1_gen_lib8
-_kernel_sgead_8_1_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_1_gen_lib8
-	.def kernel_sgead_8_1_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_1_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-	movq	ARG6, %r15 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_1_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_1_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_1_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_1_gen_lib8, .-kernel_sgead_8_1_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgead_8_2_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_2_lib8
-	.type kernel_sgead_8_2_lib8, @function
-kernel_sgead_8_2_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_2_lib8
-_kernel_sgead_8_2_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_2_lib8
-	.def kernel_sgead_8_2_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_2_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_2_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_2_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_2_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_2_lib8, .-kernel_sgead_8_2_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgead_8_2_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_2_gen_lib8
-	.type kernel_sgead_8_2_gen_lib8, @function
-kernel_sgead_8_2_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_2_gen_lib8
-_kernel_sgead_8_2_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_2_gen_lib8
-	.def kernel_sgead_8_2_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_2_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-	movq	ARG6, %r15 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_2_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_2_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_2_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_2_gen_lib8, .-kernel_sgead_8_2_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgead_8_3_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_3_lib8
-	.type kernel_sgead_8_3_lib8, @function
-kernel_sgead_8_3_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_3_lib8
-_kernel_sgead_8_3_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_3_lib8
-	.def kernel_sgead_8_3_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_3_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_3_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_3_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_3_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_3_lib8, .-kernel_sgead_8_3_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgead_8_3_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_3_gen_lib8
-	.type kernel_sgead_8_3_gen_lib8, @function
-kernel_sgead_8_3_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_3_gen_lib8
-_kernel_sgead_8_3_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_3_gen_lib8
-	.def kernel_sgead_8_3_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_3_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-	movq	ARG6, %r15 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_3_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_3_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_3_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_3_gen_lib8, .-kernel_sgead_8_3_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgead_8_4_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_4_lib8
-	.type kernel_sgead_8_4_lib8, @function
-kernel_sgead_8_4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_4_lib8
-_kernel_sgead_8_4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_4_lib8
-	.def kernel_sgead_8_4_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_4_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_4_lib8, .-kernel_sgead_8_4_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgead_8_4_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_4_gen_lib8
-	.type kernel_sgead_8_4_gen_lib8, @function
-kernel_sgead_8_4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_4_gen_lib8
-_kernel_sgead_8_4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_4_gen_lib8
-	.def kernel_sgead_8_4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-	movq	ARG6, %r15 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_4_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_4_gen_lib8, .-kernel_sgead_8_4_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgead_8_5_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_5_lib8
-	.type kernel_sgead_8_5_lib8, @function
-kernel_sgead_8_5_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_5_lib8
-_kernel_sgead_8_5_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_5_lib8
-	.def kernel_sgead_8_5_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_5_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_5_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_5_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_5_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_5_lib8, .-kernel_sgead_8_5_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgead_8_5_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_5_gen_lib8
-	.type kernel_sgead_8_5_gen_lib8, @function
-kernel_sgead_8_5_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_5_gen_lib8
-_kernel_sgead_8_5_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_5_gen_lib8
-	.def kernel_sgead_8_5_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_5_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-	movq	ARG6, %r15 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_5_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_5_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_5_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_5_gen_lib8, .-kernel_sgead_8_5_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgead_8_6_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_6_lib8
-	.type kernel_sgead_8_6_lib8, @function
-kernel_sgead_8_6_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_6_lib8
-_kernel_sgead_8_6_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_6_lib8
-	.def kernel_sgead_8_6_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_6_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_6_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_6_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_6_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_6_lib8, .-kernel_sgead_8_6_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgead_8_6_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_6_gen_lib8
-	.type kernel_sgead_8_6_gen_lib8, @function
-kernel_sgead_8_6_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_6_gen_lib8
-_kernel_sgead_8_6_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_6_gen_lib8
-	.def kernel_sgead_8_6_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_6_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-	movq	ARG6, %r15 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_6_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_6_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_6_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_6_gen_lib8, .-kernel_sgead_8_6_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgead_8_7_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_7_lib8
-	.type kernel_sgead_8_7_lib8, @function
-kernel_sgead_8_7_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_7_lib8
-_kernel_sgead_8_7_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_7_lib8
-	.def kernel_sgead_8_7_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_7_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_7_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_7_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_7_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_7_lib8, .-kernel_sgead_8_7_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgead_8_7_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgead_8_7_gen_lib8
-	.type kernel_sgead_8_7_gen_lib8, @function
-kernel_sgead_8_7_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgead_8_7_gen_lib8
-_kernel_sgead_8_7_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgead_8_7_gen_lib8
-	.def kernel_sgead_8_7_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgead_8_7_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r13  // 8*sda*sizeof(float)
-	sall	$5, %r13d
-	movq	ARG5, %r14  // B
-	movq	ARG6, %r15 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGEAD_8_7_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgead_8_7_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgead_8_7_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgead_8_7_gen_lib8, .-kernel_sgead_8_7_gen_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_sgecp_lib8.S b/third_party/blasfeo/kernel/avx/kernel_sgecp_lib8.S
deleted file mode 100644
index 5cd2c00..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_sgecp_lib8.S
+++ /dev/null
@@ -1,2796 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_0_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_0_lib8, @function
-inner_kernel_sgecp_8_0_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_0_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_0_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_0_lib8:
-#endif
-#endif
-	
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		%ymm0, 0(%r12)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		%ymm0, 32(%r12)
-	addq		$128, %r11
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		%ymm0, 64(%r12)
-	addq		$128, %r12
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		%ymm0, -32(%r12)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		%ymm0, 0(%r12)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %r12
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_0_lib8, .-inner_kernel_sgecp_8_0_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- B
-// r13d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_0_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_0_gen_lib8, @function
-inner_kernel_sgecp_8_0_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_0_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_0_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_0_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovups		0(%r11), %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r12)
-	subl		$4, %r10d
-
-	vmovups		32(%r11), %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r12)
-	addq		$128, %r11
-
-	vmovups		-64(%r11), %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r12)
-	addq		$128, %r12
-
-	vmovups		-32(%r11), %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r12)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovups		0(%r11), %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r12)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %r12
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_0_lib8, .-inner_kernel_sgecp_8_0_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_1_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_1_lib8, @function
-inner_kernel_sgecp_8_1_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_1_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_1_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_1_lib8:
-#endif
-#endif
-	
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-#if 1
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, -32(%r13)
-#else
-	vmovups		4(%r11), %ymm0
-	vmovups		-28(%rax), %ymm1
-	vblendps	$0x80, %ymm1, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$4, %r10d
-
-	vmovups		36(%r11), %ymm0
-	vmovups		4(%rax), %ymm1
-	vblendps	$0x80, %ymm1, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovups		-60(%r11), %ymm0
-	vmovups		-92(%rax), %ymm1
-	vblendps	$0x80, %ymm1, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r13)
-	addq		$128, %r13
-
-	vmovups		-28(%r11), %ymm0
-	vmovups		-60(%rax), %ymm1
-	vblendps	$0x80, %ymm1, %ymm0, %ymm0
-	vmovaps		%ymm0, -32(%r13)
-#endif
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_1_lib8, .-inner_kernel_sgecp_8_1_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-// r14d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_1_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_1_gen_lib8, @function
-inner_kernel_sgecp_8_1_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_1_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_1_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_1_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x01, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x77, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_1_gen_lib8, .-inner_kernel_sgecp_8_1_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_2_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_2_lib8, @function
-inner_kernel_sgecp_8_2_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_2_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_2_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_2_lib8:
-#endif
-#endif
-	
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_2_lib8, .-inner_kernel_sgecp_8_2_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-// r14d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_2_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_2_gen_lib8, @function
-inner_kernel_sgecp_8_2_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_2_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_2_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_2_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x03, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x33, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_2_gen_lib8, .-inner_kernel_sgecp_8_2_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_3_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_3_lib8, @function
-inner_kernel_sgecp_8_3_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_3_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_3_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_3_lib8:
-#endif
-#endif
-	
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x03, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_3_lib8, .-inner_kernel_sgecp_8_3_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-// r14d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_3_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_3_gen_lib8, @function
-inner_kernel_sgecp_8_3_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_3_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_3_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_3_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x07, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x11, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_3_gen_lib8, .-inner_kernel_sgecp_8_3_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_4_lib8, @function
-inner_kernel_sgecp_8_4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_4_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_4_lib8:
-#endif
-#endif
-	
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		16(%r11), %xmm0
-	vmovaps		0(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		48(%r11), %xmm0
-	vmovaps		32(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r13)
-	addq		$128, %r11
-
-	vmovaps		-48(%r11), %xmm0
-	vmovaps		64(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r13)
-	addq		$128, %rax
-
-	vmovaps		-16(%r11), %xmm0
-	vmovaps		-32(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmovaps		%ymm0, 96(%r13)
-	addq		$128, %r13
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		16(%r11), %xmm0
-	vmovaps		0(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_4_lib8, .-inner_kernel_sgecp_8_4_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-// r14d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_4_gen_lib8, @function
-inner_kernel_sgecp_8_4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_4_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		16(%r11), %xmm0
-	vmovaps		0(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		48(%r11), %xmm0
-	vmovaps		32(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r13)
-	addq		$128, %r11
-
-	vmovaps		-48(%r11), %xmm0
-	vmovaps		64(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r13)
-	addq		$128, %rax
-
-	vmovaps		-16(%r11), %xmm0
-	vmovaps		-32(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 96(%r13)
-	addq		$128, %r13
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		16(%r11), %xmm0
-	vmovaps		0(%rax), %xmm1
-	vinsertf128	$0x01, %xmm1, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_4_gen_lib8, .-inner_kernel_sgecp_8_4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_5_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_5_lib8, @function
-inner_kernel_sgecp_8_5_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_5_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_5_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_5_lib8:
-#endif
-#endif
-	
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_5_lib8, .-inner_kernel_sgecp_8_5_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-// r14d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_5_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_5_gen_lib8, @function
-inner_kernel_sgecp_8_5_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_5_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_5_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_5_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x1f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x39, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0x88, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_5_gen_lib8, .-inner_kernel_sgecp_8_5_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_6_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_6_lib8, @function
-inner_kernel_sgecp_8_6_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_6_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_6_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_6_lib8:
-#endif
-#endif
-	
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_6_lib8, .-inner_kernel_sgecp_8_6_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-// r14d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_6_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_6_gen_lib8, @function
-inner_kernel_sgecp_8_6_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_6_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_6_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_6_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x3f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x4e, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xcc, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_6_gen_lib8, .-inner_kernel_sgecp_8_6_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_7_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_7_lib8, @function
-inner_kernel_sgecp_8_7_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_7_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_7_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_7_lib8:
-#endif
-#endif
-	
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x03, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmovaps		%ymm0, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_7_lib8, .-inner_kernel_sgecp_8_7_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12d   <- 8*sda*sizeof(float)
-// r13    <- B
-// r14d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGECP_8_7_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgecp_8_7_gen_lib8, @function
-inner_kernel_sgecp_8_7_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgecp_8_7_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgecp_8_7_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgecp_8_7_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	movq	%r11, %rax // A1 <- A0
-	addq	%r12, %rax // A1 <- A0 + 4*sda*sizeof(float)
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$4, %r10d
-
-	vmovaps		32(%r11), %ymm0
-	vmovaps		32(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 32(%r13)
-	addq		$128, %r11
-	addq		$128, %rax
-
-	vmovaps		-64(%r11), %ymm0
-	vmovaps		-64(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 64(%r13)
-	addq		$128, %r13
-
-	vmovaps		-32(%r11), %ymm0
-	vmovaps		-32(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, -32(%r13)
-
-	cmpl		$3, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		0(%rax), %ymm1
-	vblendps	$0x7f, %ymm1, %ymm0, %ymm0
-	vpermilps	$0x93, %ymm0, %ymm0
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm1
-	vblendps	$0xee, %ymm0, %ymm1, %ymm0
-	vmaskmovps	%ymm0, %ymm15, 0(%r13)
-	subl		$1, %r10d
-	addq		$32, %r11
-	addq		$32, %rax
-	addq		$32, %r13
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgecp_8_7_gen_lib8, .-inner_kernel_sgecp_8_7_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx
-// void kernel_sgecp_8_0_lib8(int k, float *A, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_0_lib8
-	.type kernel_sgecp_8_0_lib8, @function
-kernel_sgecp_8_0_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_0_lib8
-_kernel_sgecp_8_0_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_0_lib8
-	.def kernel_sgecp_8_0_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_0_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_0_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_0_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_0_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_0_lib8, .-kernel_sgecp_8_0_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx       rcx
-// void kernel_sgecp_8_0_gen_lib8(int k, float *A, float *B, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_0_gen_lib8
-	.type kernel_sgecp_8_0_gen_lib8, @function
-kernel_sgecp_8_0_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_0_gen_lib8
-_kernel_sgecp_8_0_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_0_gen_lib8
-	.def kernel_sgecp_8_0_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_0_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_0_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_0_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_0_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_0_gen_lib8, .-kernel_sgecp_8_0_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgecp_8_1_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_1_lib8
-	.type kernel_sgecp_8_1_lib8, @function
-kernel_sgecp_8_1_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_1_lib8
-_kernel_sgecp_8_1_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_1_lib8
-	.def kernel_sgecp_8_1_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_1_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_1_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_1_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_1_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_1_lib8, .-kernel_sgecp_8_1_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgecp_8_1_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_1_gen_lib8
-	.type kernel_sgecp_8_1_gen_lib8, @function
-kernel_sgecp_8_1_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_1_gen_lib8
-_kernel_sgecp_8_1_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_1_gen_lib8
-	.def kernel_sgecp_8_1_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_1_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_1_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_1_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_1_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_1_gen_lib8, .-kernel_sgecp_8_1_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgecp_8_2_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_2_lib8
-	.type kernel_sgecp_8_2_lib8, @function
-kernel_sgecp_8_2_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_2_lib8
-_kernel_sgecp_8_2_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_2_lib8
-	.def kernel_sgecp_8_2_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_2_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_2_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_2_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_2_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_2_lib8, .-kernel_sgecp_8_2_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgecp_8_2_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_2_gen_lib8
-	.type kernel_sgecp_8_2_gen_lib8, @function
-kernel_sgecp_8_2_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_2_gen_lib8
-_kernel_sgecp_8_2_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_2_gen_lib8
-	.def kernel_sgecp_8_2_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_2_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_2_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_2_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_2_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_2_gen_lib8, .-kernel_sgecp_8_2_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgecp_8_3_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_3_lib8
-	.type kernel_sgecp_8_3_lib8, @function
-kernel_sgecp_8_3_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_3_lib8
-_kernel_sgecp_8_3_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_3_lib8
-	.def kernel_sgecp_8_3_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_3_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_3_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_3_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_3_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_3_lib8, .-kernel_sgecp_8_3_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgecp_8_3_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_3_gen_lib8
-	.type kernel_sgecp_8_3_gen_lib8, @function
-kernel_sgecp_8_3_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_3_gen_lib8
-_kernel_sgecp_8_3_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_3_gen_lib8
-	.def kernel_sgecp_8_3_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_3_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_3_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_3_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_3_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_3_gen_lib8, .-kernel_sgecp_8_3_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgecp_8_4_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_4_lib8
-	.type kernel_sgecp_8_4_lib8, @function
-kernel_sgecp_8_4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_4_lib8
-_kernel_sgecp_8_4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_4_lib8
-	.def kernel_sgecp_8_4_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_4_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_4_lib8, .-kernel_sgecp_8_4_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgecp_8_4_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_4_gen_lib8
-	.type kernel_sgecp_8_4_gen_lib8, @function
-kernel_sgecp_8_4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_4_gen_lib8
-_kernel_sgecp_8_4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_4_gen_lib8
-	.def kernel_sgecp_8_4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_4_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_4_gen_lib8, .-kernel_sgecp_8_4_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgecp_8_5_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_5_lib8
-	.type kernel_sgecp_8_5_lib8, @function
-kernel_sgecp_8_5_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_5_lib8
-_kernel_sgecp_8_5_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_5_lib8
-	.def kernel_sgecp_8_5_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_5_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_5_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_5_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_5_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_5_lib8, .-kernel_sgecp_8_5_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgecp_8_5_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_5_gen_lib8
-	.type kernel_sgecp_8_5_gen_lib8, @function
-kernel_sgecp_8_5_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_5_gen_lib8
-_kernel_sgecp_8_5_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_5_gen_lib8
-	.def kernel_sgecp_8_5_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_5_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_5_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_5_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_5_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_5_gen_lib8, .-kernel_sgecp_8_5_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgecp_8_6_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_6_lib8
-	.type kernel_sgecp_8_6_lib8, @function
-kernel_sgecp_8_6_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_6_lib8
-_kernel_sgecp_8_6_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_6_lib8
-	.def kernel_sgecp_8_6_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_6_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_6_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_6_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_6_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_6_lib8, .-kernel_sgecp_8_6_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgecp_8_6_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_6_gen_lib8
-	.type kernel_sgecp_8_6_gen_lib8, @function
-kernel_sgecp_8_6_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_6_gen_lib8
-_kernel_sgecp_8_6_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_6_gen_lib8
-	.def kernel_sgecp_8_6_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_6_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_6_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_6_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_6_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_6_gen_lib8, .-kernel_sgecp_8_6_gen_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi       rdx      rcx
-// void kernel_sgecp_8_7_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_7_lib8
-	.type kernel_sgecp_8_7_lib8, @function
-kernel_sgecp_8_7_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_7_lib8
-_kernel_sgecp_8_7_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_7_lib8
-	.def kernel_sgecp_8_7_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_7_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_7_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_7_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_7_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_7_lib8, .-kernel_sgecp_8_7_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi       rdx      rcx       r8
-// void kernel_sgecp_8_7_gen_lib8(int k, float *A, int sda, float *B, int m0);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgecp_8_7_gen_lib8
-	.type kernel_sgecp_8_7_gen_lib8, @function
-kernel_sgecp_8_7_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgecp_8_7_gen_lib8
-_kernel_sgecp_8_7_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgecp_8_7_gen_lib8
-	.def kernel_sgecp_8_7_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgecp_8_7_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // 8*sda*sizeof(float)
-	sall	$5, %r12d
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGECP_8_7_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgecp_8_7_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgecp_8_7_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgecp_8_7_gen_lib8, .-kernel_sgecp_8_7_gen_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_sgemm_16x4_lib8.S b/third_party/blasfeo/kernel/avx/kernel_sgemm_16x4_lib8.S
deleted file mode 100644
index 5c2d6c4..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_sgemm_16x4_lib8.S
+++ /dev/null
@@ -1,7057 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nt_16x4_lib8, @function
-inner_kernel_gemm_add_nt_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nt_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nt_16x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nt_16x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	movq	%r11, %r15 // A1 <- A0
-	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(float)
-
-	// preload
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vmovaps			0(%r11), %ymm8 // A0
-	vmovaps			0(%r15), %ymm9 // A1
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-//  8 A0
-//  9 A1
-// 10 A0+
-// 11 A1+
-// 12 B
-// 13 B+
-// 14 Bt
-// 15 tmp
-	
-	// unroll 0
-	vmulps			%ymm8, %ymm14, %ymm15
-	vbroadcastf128	32(%r13), %ymm13 // B
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	subl	$4, %r10d
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			32(%r11), %ymm10 // A0
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			32(%r15), %ymm11 // A1
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm8, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 1
-	vmulps			%ymm10, %ymm14, %ymm15
-	vbroadcastf128	64(%r13), %ymm12 // B
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			64(%r11), %ymm8 // A0
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			64(%r15), %ymm9 // A1
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 2
-	vmulps			%ymm8, %ymm14, %ymm15
-	vbroadcastf128	96(%r13), %ymm13 // B
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	addq	$128, %r13
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			96(%r11), %ymm10 // A0
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	addq	$128, %r11
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			96(%r15), %ymm11 // A1
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	addq	$128, %r15
-	vmulps			%ymm8, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 3
-	vmulps			%ymm10, %ymm14, %ymm15
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			0(%r11), %ymm8 // A0
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			0(%r15), %ymm9 // A1
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vmulps			%ymm8, %ymm14, %ymm15
-	vbroadcastf128	32(%r13), %ymm13 // B
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	subl	$4, %r10d
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			32(%r11), %ymm10 // A0
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			32(%r15), %ymm11 // A1
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm8, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 1
-	vmulps			%ymm10, %ymm14, %ymm15
-	vbroadcastf128	64(%r13), %ymm12 // B
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			64(%r11), %ymm8 // A0
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			64(%r15), %ymm9 // A1
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 2
-	vmulps			%ymm8, %ymm14, %ymm15
-	vbroadcastf128	96(%r13), %ymm13 // B
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	addq	$128, %r13
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			96(%r11), %ymm10 // A0
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	addq	$128, %r11
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			96(%r15), %ymm11 // A1
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	addq	$128, %r15
-	vmulps			%ymm8, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 3
-	vmulps			%ymm10, %ymm14, %ymm15
-//	vbroadcastf128	0(%r13), %ymm12 // B
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	vmulps			%ymm10, %ymm14, %ymm15
-//	vmovaps			0(%r11), %ymm8 // A0
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm10, %ymm14, %ymm15
-//	vmovaps			0(%r15), %ymm9 // A1
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm14, %ymm15
-//	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-//	cmpl	$4, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vmovaps			0(%r11), %ymm8 // A0
-	vmovaps			0(%r15), %ymm9 // A1
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vmulps			%ymm8, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vmulps			%ymm8, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vmulps			%ymm8, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$32, %r13
-	addq	$32, %r15
-
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vmulps			%ymm8, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm9, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nt_16x4_lib8, .-inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_sub_nt_16x4_lib8, @function
-inner_kernel_gemm_sub_nt_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_sub_nt_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_sub_nt_16x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_sub_nt_16x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	movq	%r11, %r15 // A1 <- A0
-	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(float)
-
-	// preload
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vmovaps			0(%r11), %ymm8 // A0
-	vmovaps			0(%r15), %ymm9 // A1
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-//  8 A0
-//  9 A1
-// 10 A0+
-// 11 A1+
-// 12 B
-// 13 B+
-// 14 Bt
-// 15 tmp
-	
-	// unroll 0
-	vmulps			%ymm8, %ymm14, %ymm15
-	vbroadcastf128	32(%r13), %ymm13 // B
-	vsubps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm4, %ymm4
-
-	subl	$4, %r10d
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			32(%r11), %ymm10 // A0
-	vsubps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			32(%r15), %ymm11 // A1
-	vsubps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm8, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 1
-	vmulps			%ymm10, %ymm14, %ymm15
-	vbroadcastf128	64(%r13), %ymm12 // B
-	vsubps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm4, %ymm4
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			64(%r11), %ymm8 // A0
-	vsubps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			64(%r15), %ymm9 // A1
-	vsubps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 2
-	vmulps			%ymm8, %ymm14, %ymm15
-	vbroadcastf128	96(%r13), %ymm13 // B
-	vsubps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm4, %ymm4
-
-	addq	$128, %r13
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			96(%r11), %ymm10 // A0
-	vsubps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm5, %ymm5
-
-	addq	$128, %r11
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			96(%r15), %ymm11 // A1
-	vsubps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm6, %ymm6
-
-	addq	$128, %r15
-	vmulps			%ymm8, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 3
-	vmulps			%ymm10, %ymm14, %ymm15
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vsubps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm4, %ymm4
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			0(%r11), %ymm8 // A0
-	vsubps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			0(%r15), %ymm9 // A1
-	vsubps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm7, %ymm7
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vmulps			%ymm8, %ymm14, %ymm15
-	vbroadcastf128	32(%r13), %ymm13 // B
-	vsubps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm4, %ymm4
-
-	subl	$4, %r10d
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			32(%r11), %ymm10 // A0
-	vsubps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			32(%r15), %ymm11 // A1
-	vsubps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm8, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 1
-	vmulps			%ymm10, %ymm14, %ymm15
-	vbroadcastf128	64(%r13), %ymm12 // B
-	vsubps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm4, %ymm4
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			64(%r11), %ymm8 // A0
-	vsubps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vmovaps			64(%r15), %ymm9 // A1
-	vsubps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 2
-	vmulps			%ymm8, %ymm14, %ymm15
-	vbroadcastf128	96(%r13), %ymm13 // B
-	vsubps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm4, %ymm4
-
-	addq	$128, %r13
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			96(%r11), %ymm10 // A0
-	vsubps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm5, %ymm5
-
-	addq	$128, %r11
-	vmulps			%ymm8, %ymm14, %ymm15
-	vmovaps			96(%r15), %ymm11 // A1
-	vsubps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm6, %ymm6
-
-	addq	$128, %r15
-	vmulps			%ymm8, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm9, %ymm14, %ymm15
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 3
-	vmulps			%ymm10, %ymm14, %ymm15
-//	vbroadcastf128	0(%r13), %ymm12 // B
-	vsubps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm4, %ymm4
-
-	vmulps			%ymm10, %ymm14, %ymm15
-//	vmovaps			0(%r11), %ymm8 // A0
-	vsubps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm5, %ymm5
-
-	vmulps			%ymm10, %ymm14, %ymm15
-//	vmovaps			0(%r15), %ymm9 // A1
-	vsubps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm14, %ymm15
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-	vsubps			%ymm15, %ymm6, %ymm6
-
-	vmulps			%ymm10, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm14, %ymm15
-//	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vsubps			%ymm15, %ymm7, %ymm7
-
-
-//	cmpl	$4, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vmovaps			0(%r11), %ymm8 // A0
-	vmovaps			0(%r15), %ymm9 // A1
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vmulps			%ymm8, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm4, %ymm4
-
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vmulps			%ymm8, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm5, %ymm5
-
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vmulps			%ymm8, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm6, %ymm6
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$32, %r13
-	addq	$32, %r15
-
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vmulps			%ymm8, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm9, %ymm14, %ymm15
-	vsubps			%ymm15, %ymm7, %ymm7
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_sub_nt_16x4_lib8, .-inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nn_16x4_lib8, @function
-inner_kernel_gemm_add_nn_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nn_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nn_16x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nn_16x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovaps 		0(%r11), %ymm13 // A
-	vmovaps 		0(%r11, %r12, 1), %ymm14 // A
-
-	cmpl	$8, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	0(%r13, %r14, 1) // software prefetch
-	prefetcht0	64(%r13, %r14, 1) // software prefetch
-
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			32(%r11), %ymm10 // A
-	vbroadcastss	32(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	64(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	96(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-	subl	$8, %r10d
-
-	// unroll 1
-	vbroadcastss	4(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	68(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	100(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	// unroll 2
-	vbroadcastss	8(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			96(%r11), %ymm10 // A
-	vbroadcastss	40(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	72(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	104(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	// unroll 3
-	vbroadcastss	12(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			128(%r11), %ymm13 // A
-	vbroadcastss	44(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			128(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	76(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	108(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	// unroll 4
-	vbroadcastss	16(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			160(%r11), %ymm13 // A
-	vbroadcastss	48(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			160(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	80(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	112(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	// unroll 5
-	vbroadcastss	20(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			192(%r11), %ymm13 // A
-	vbroadcastss	52(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			192(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	84(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	116(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	// unroll 6
-	vbroadcastss	24(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			224(%r11), %ymm13 // A
-	vbroadcastss	56(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			224(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	88(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	120(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-	addq	$256, %r11
-
-	// unroll 7
-	vbroadcastss	28(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastss	60(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	92(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	-4(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-	addq	%r14, %r13
-
-	cmpl	$8, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$7, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			32(%r11), %ymm10 // A
-	vbroadcastss	32(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	64(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	96(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-	subl	$8, %r10d
-
-	// unroll 1
-	vbroadcastss	4(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	68(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	100(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	// unroll 2
-	vbroadcastss	8(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			96(%r11), %ymm10 // A
-	vbroadcastss	40(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	72(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	104(%r13), %ymm12 // B
-	vmulps			%ymm13, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm14, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	// unroll 3
-	vbroadcastss	12(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			128(%r11), %ymm13 // A
-	vbroadcastss	44(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			128(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	76(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	108(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	// unroll 4
-	vbroadcastss	16(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			160(%r11), %ymm13 // A
-	vbroadcastss	48(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			160(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	80(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	112(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	// unroll 5
-	vbroadcastss	20(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			192(%r11), %ymm13 // A
-	vbroadcastss	52(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			192(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	84(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	116(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	// unroll 6
-	vbroadcastss	24(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vmovapd			224(%r11), %ymm13 // A
-	vbroadcastss	56(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vmovapd			224(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	88(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	120(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-	addq	$256, %r11
-
-	// unroll 7
-	vbroadcastss	28(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastss	60(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-//	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	92(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	124(%r13), %ymm12 // B
-	vmulps			%ymm10, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm11, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-	addq	%r14, %r13
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A0
-	vmovaps			0(%r11, %r12, 1), %ymm13 // A1
-	vbroadcastss	0(%r13), %ymm14 // B[0]
-	vmulps			%ymm12, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm13, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	32(%r13), %ymm14 // B[1]
-	vmulps			%ymm12, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm13, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	64(%r13), %ymm14 // B[2]
-	vmulps			%ymm12, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm13, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	96(%r13), %ymm14 // B[3]
-	vmulps			%ymm12, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vmulps			%ymm13, %ymm14, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$4, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nn_16x4_lib8, .-inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_gemm_add_nn_16x4_lib8, @function
-inner_edge_gemm_add_nn_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_gemm_add_nn_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_gemm_add_nn_16x4_lib8; .scl 2; .type 32; .endef
-inner_edge_gemm_add_nn_16x4_lib8:
-#endif
-#endif
-	
-	cmpl			$0, %r15d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$8, %ebx
-	subl			%r15d, %ebx // 8-offsetB
-	cmpl			%r10d, %ebx
-//	jle				0f
-//	movl			%r10d, %ebx // kend=min(k,8-offsetB)
-//0:
-	cmovgl			%r10d, %ebx // kend=min(k,8-offsetB)
-
-	movl			%r15d, %eax
-	sall			$2, %eax // offsetB*sizeof(float)
-	addq			%rax, %r13 // B+offsetB*sizeof(float)
-
-1:
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A0
-	vmovaps			0(%r11, %r12, 1), %ymm13 // A1
-	vbroadcastss	0(%r13), %ymm15 // B[0]
-	vmulps			%ymm12, %ymm15, %ymm14
-	vaddps			%ymm14, %ymm0, %ymm0
-	vmulps			%ymm13, %ymm15, %ymm14
-	vaddps			%ymm14, %ymm4, %ymm4
-	vbroadcastss	32(%r13), %ymm15 // B[1]
-	vmulps			%ymm12, %ymm15, %ymm14
-	vaddps			%ymm14, %ymm1, %ymm1
-	vmulps			%ymm13, %ymm15, %ymm14
-	vaddps			%ymm14, %ymm5, %ymm5
-	vbroadcastss	64(%r13), %ymm15 // B[2]
-	vmulps			%ymm12, %ymm15, %ymm14
-	vaddps			%ymm14, %ymm2, %ymm2
-	vmulps			%ymm13, %ymm15, %ymm14
-	vaddps			%ymm14, %ymm6, %ymm6
-	vbroadcastss	96(%r13), %ymm15 // B[3]
-	vmulps			%ymm12, %ymm15, %ymm14
-	vaddps			%ymm14, %ymm3, %ymm3
-	vmulps			%ymm13, %ymm15, %ymm14
-	vaddps			%ymm14, %ymm7, %ymm7
-
-	subl			$1, %r10d // k-1
-	subl			$1, %ebx // end-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$4, %r13 // B+1*sizeof(float)
-
-	cmpl			$0, %ebx
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r14, %r13
-	subq			$32, %r13 // B+bs*(sdb-1)*sizeof(float)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_gemm_add_nn_16x4_lib8, .-inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRMM_NN_RL_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trmm_nn_rl_16x4_lib8, @function
-inner_edge_trmm_nn_rl_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trmm_nn_rl_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trmm_nn_rl_16x4_lib8; .scl 2; .type 32; .endef
-inner_edge_trmm_nn_rl_16x4_lib8:
-#endif
-#endif
-	
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	movl		%r15d, %eax
-	sall		$2, %eax // offsetB*sizeof(float)
-	movq		%r13, %rbx // B
-	addq		%rax, %rbx // B+offsetB*sizeof(float)
-
-
-	cmpl	$4, %r15d
-	jg		1f
-
-	// offB==0, 1, 2, 3, 4
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	8(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	40(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	72(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	jmp			0f // end
-
-
-1:
-	cmpl	$5, %r15d
-	jg		1f
-
-	// offB==5
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	8(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	40(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	72(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r14, %r13 // B+8*sdb*sizeof(float)
-	movl		$0, %r15d // offsetB=0
-
-	jmp			0f // end
-
-
-1:
-	cmpl	$6, %r15d
-	jg		1f
-
-	// offB==6
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r14, %r13 // B+8*sdb*sizeof(float)
-	movq		%r13, %rbx // B
-	movl		$0, %r15d // offsetB=0
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	32(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	64(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	jmp			0f // end
-
-
-1:
-//	cmpl	$7, %r15d
-//	jg		0f
-
-	// offB==6
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r14, %r13 // B+8*sdb*sizeof(float)
-	movq		%r13, %rbx // B
-	movl		$0, %r15d // offsetB=0
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	32(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	68(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vmulps			%ymm9, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-//	jmp			0f // end
-
-
-	// end
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trmm_nn_rl_16x4_lib8, .-inner_edge_trmm_nn_rl_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// strsm
-// right
-// lower
-// transposed
-// not-unit
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSM_RLT_INV_16X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsm_rlt_inv_16x4_vs_lib8, @function
-inner_edge_trsm_rlt_inv_16x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsm_rlt_inv_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsm_rlt_inv_16x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsm_rlt_inv_16x4_vs_lib8:
-#endif
-#endif
-
-	vbroadcastss	0(%r11), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vmulps			%ymm4, %ymm13, %ymm4
-	cmpl			$2, %r12d
-	jl				0f // ret
-	vbroadcastss	4(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm1, %ymm1
-	vmulps			%ymm4, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm5, %ymm5
-	vbroadcastss	8(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vmulps			%ymm4, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm6, %ymm6
-	vbroadcastss	12(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-	vmulps			%ymm4, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm7, %ymm7
-
-	vbroadcastss	4(%r11), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vmulps			%ymm5, %ymm13, %ymm5
-	cmpl			$3, %r12d
-	jl				0f // ret
-	vbroadcastss	40(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vmulps			%ymm5, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm6, %ymm6
-	vbroadcastss	44(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-	vmulps			%ymm5, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm7, %ymm7
-
-	vbroadcastss	8(%r11), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vmulps			%ymm6, %ymm13, %ymm6
-	cmpl			$4, %r12d
-	jl				0f // ret
-	vbroadcastss	76(%r10), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-	vmulps			%ymm6, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm7, %ymm7
-
-	vbroadcastss	12(%r11), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vmulps			%ymm7, %ymm13, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsm_rlt_inv_16x4_vs_lib8, .-inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization gen
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_16X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_16x4_vs_lib8, @function
-inner_edge_potrf_16x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_16x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_16x4_vs_lib8:
-#endif
-#endif
-
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vmovss		%xmm0, %xmm0, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-2:
-	vmovss		%xmm13, 0(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm0
-	vmulps		%ymm4, %ymm13, %ymm4
-	cmpl		$2, %r11d
-	jl			0f // ret
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm11
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm1, %ymm1
-	vmulps		%ymm4, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm5, %ymm5
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vmulps		%ymm4, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm6, %ymm6
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-	vmulps		%ymm4, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vpermilps	$0x55, %xmm1, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-4:
-	vmovss		%xmm13, 4(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm1
-	vmulps		%ymm5, %ymm13, %ymm5
-	cmpl		$3, %r11d
-	jl			0f // ret
-	vperm2f128	$0x00, %ymm1, %ymm1, %ymm11
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vmulps		%ymm5, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm6, %ymm6
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-	vmulps		%ymm5, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vpermilps	$0xaa, %xmm2, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-6:
-	vmovss		%xmm13, 8(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm2
-	vmulps		%ymm6, %ymm13, %ymm6
-	cmpl		$4, %r11d
-	jl			0f // ret
-	vperm2f128	$0x00, %ymm2, %ymm2, %ymm11
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-	vmulps		%ymm6, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vpermilps	$0xff, %xmm3, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-8:
-	vmovsd		%xmm13, 12(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm3, %ymm13, %ymm3
-	vmulps		%ymm7, %ymm13, %ymm7
-
-	jmp		0f
-
-
-1:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_16x4_vs_lib8, .-inner_edge_potrf_16x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization gen
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_12X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_12x4_vs_lib8, @function
-inner_edge_potrf_12x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_12x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_12x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_12x4_vs_lib8:
-#endif
-#endif
-
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vextractf128	$0x1, %ymm0, %xmm13
-//	vpermilps		$0x00, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-2:
-	vmovss			%xmm13, 0(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vmulps			%ymm4, %ymm13, %ymm4
-	cmpl		$2, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm11
-	vpermilps		$0x55, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm1, %ymm1
-	vmulps		%ymm4, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm5, %ymm5
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vmulps		%ymm4, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm6, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-	vmulps		%ymm4, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vextractf128	$0x1, %ymm1, %xmm13
-	vpermilps		$0x55, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-4:
-	vmovss			%xmm13, 4(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vmulps			%ymm5, %ymm13, %ymm5
-	cmpl		$3, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm11
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vmulps		%ymm5, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm6, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-	vmulps		%ymm5, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vpermilps		$0xaa, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-6:
-	vmovss			%xmm13, 8(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vmulps			%ymm6, %ymm13, %ymm6
-	cmpl		$4, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm11
-	vpermilps		$0xff, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-	vmulps		%ymm6, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vextractf128	$0x1, %ymm3, %xmm13
-	vpermilps		$0xff, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-8:
-	vmovsd			%xmm13, 12(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vmulps			%ymm7, %ymm13, %ymm7
-
-	jmp		0f
-
-
-1:
-	vxorps			%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_12x4_vs_lib8, .-inner_edge_potrf_12x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_16x4_lib8, @function
-inner_scale_ab_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_16x4_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_16x4_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	movq	%r12, %r15 // C1 <- C0
-	addq	%r13, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	vmovaps		0(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-
-	vmovaps		0(%r15), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	vmovaps		32(%r15), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	vmovaps		64(%r15), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	vmovaps		96(%r15), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_16x4_lib8, .-inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_16X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_16x4_gen_lib8, @function
-inner_scale_ab_16x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_16x4_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_16x4_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	movq	%r13, %rax // C1 <- C0
-	addq	%r14, %rax // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-
-	vmovaps		0(%rax), %ymm14
-	vmulps		%ymm14, %ymm15, %ymm14
-	vaddps		%ymm4, %ymm14, %ymm4
-	vmovaps		32(%rax), %ymm14
-	vmulps		%ymm14, %ymm15, %ymm14
-	vaddps		%ymm5, %ymm14, %ymm5
-	vmovaps		64(%rax), %ymm14
-	vmulps		%ymm14, %ymm15, %ymm14
-	vaddps		%ymm6, %ymm14, %ymm6
-	vmovaps		96(%rax), %ymm14
-	vmulps		%ymm14, %ymm15, %ymm14
-	vaddps		%ymm7, %ymm14, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%rax, %rbx // C1
-	addq	%r14, %rbx // C2 <- C1 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_16x4_gen_lib8, .-inner_scale_ab_16x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0
-//
-// input arguments:
-// r10   <- alpha
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_16x4_lib8, @function
-inner_scale_a0_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_a0_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_16x4_lib8; .scl 2; .type 32; .endef
-inner_scale_a0_16x4_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_16x4_lib8, .-inner_scale_a0_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_16x4_lib8, @function
-inner_scale_11_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_11_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_16x4_lib8; .scl 2; .type 32; .endef
-inner_scale_11_16x4_lib8:
-#endif
-#endif
-	
-	movq	%r10, %r15 // C1 <- C0
-	addq	%r11, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	vmovaps		0(%r10), %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r10), %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r10), %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r10), %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-
-	vmovaps		0(%r15), %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	vmovaps		32(%r15), %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	vmovaps		64(%r15), %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	vmovaps		96(%r15), %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_16x4_lib8, .-inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_16X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_16x4_gen_lib8, @function
-inner_scale_11_16x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_11_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_16x4_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_11_16x4_gen_lib8:
-#endif
-#endif
-	
-	movq	%r11, %rax // C1 <- C0
-	addq	%r12, %rax // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r11), %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r11), %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r11), %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r11), %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-
-	vmovaps		0(%rax), %ymm14
-	vaddps		%ymm4, %ymm14, %ymm4
-	vmovaps		32(%rax), %ymm14
-	vaddps		%ymm5, %ymm14, %ymm5
-	vmovaps		64(%rax), %ymm14
-	vaddps		%ymm6, %ymm14, %ymm6
-	vmovaps		96(%rax), %ymm14
-	vaddps		%ymm7, %ymm14, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%rax, %rbx // C1
-	addq	%r12, %rbx // C2 <- C1 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_16x4_gen_lib8, .-inner_scale_11_16x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_16x4_lib8, @function
-inner_store_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_16x4_lib8; .scl 2; .type 32; .endef
-inner_store_16x4_lib8:
-#endif
-#endif
-	
-	movq	%r10, %r15 // D1 <- D0
-	addq	%r11, %r15 // D1 <- D0 + 4*sdd*sizeof(double)
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		%ymm3, 96(%r10)
-
-	vmovaps 	%ymm4,  0(%r15)
-	vmovaps 	%ymm5, 32(%r15)
-	vmovaps 	%ymm6, 64(%r15)
-	vmovaps 	%ymm7, 96(%r15)
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_16x4_lib8, .-inner_store_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_16X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_16x4_vs_lib8, @function
-inner_store_16x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_16x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_16x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	vmovaps		%ymm0, 0(%r10)
-	vmaskmovps	%ymm4, %ymm15, 0(%r10, %r11, 1)
-	cmpl		$2, %r13d
-	jl			7f // end
-	vmovaps		%ymm1, 32(%r10)
-	vmaskmovps	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	cmpl		$3, %r13d
-	jl			7f // end
-	vmovaps		%ymm2, 64(%r10)
-	vmaskmovps	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	je			7f // end
-	vmovaps		%ymm3, 96(%r10)
-	vmaskmovps	%ymm7, %ymm15, 96(%r10, %r11, 1)
-	//
-	jmp		0f
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_16x4_vs_lib8, .-inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_16X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_16x4_gen_lib8, @function
-inner_store_16x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_16x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_16x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute D1
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(float)
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	cmpl		$2, %r15d
-	vmaskmovps	%ymm0, %ymm14,  0(%r11)
-	vmaskmovps	%ymm4, %ymm15,  0(%rbx)
-	jl			7f // end
-	cmpl		$3, %r15d
-	vmaskmovps	%ymm1, %ymm14, 32(%r11)
-	vmaskmovps	%ymm5, %ymm15, 32(%rbx)
-	jl			7f // end
-	vmaskmovps	%ymm2, %ymm14, 64(%r11)
-	vmaskmovps	%ymm6, %ymm15, 64(%rbx)
-	je			7f // end
-	vmaskmovps	%ymm3, %ymm14, 96(%r11)
-	vmaskmovps	%ymm7, %ymm15, 96(%rbx)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbp // D1
-	addq	%r12, %rbp // D2 <- D1 + 4*sdd*sizeof(float)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_16x4_gen_lib8, .-inner_store_16x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_16x4_lib8, @function
-inner_store_l_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_16x4_lib8; .scl 2; .type 32; .endef
-inner_store_l_16x4_lib8:
-#endif
-#endif
-	
-	vmovaps		32(%r10), %ymm12
-	vmovaps		64(%r10), %ymm13
-	vmovaps		96(%r10), %ymm14
-
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vblendps	$0x03, %ymm13, %ymm2, %ymm2
-	vblendps	$0x07, %ymm14, %ymm3, %ymm3
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		%ymm3, 96(%r10)
-
-	vmovaps 	%ymm4,  0(%r10, %r11, 1)
-	vmovaps 	%ymm5, 32(%r10, %r11, 1)
-	vmovaps 	%ymm6, 64(%r10, %r11, 1)
-	vmovaps 	%ymm7, 96(%r10, %r11, 1)
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_16x4_lib8, .-inner_store_l_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_16X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_16x4_vs_lib8, @function
-inner_store_l_16x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_16x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_l_16x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	vmovaps		%ymm0, 0(%r10)
-	vmaskmovps	%ymm4, %ymm15, 0(%r10, %r11, 1)
-	cmpl		$2, %r13d
-	jl			0f // end
-	vmovaps		32(%r10), %ymm12
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vmovaps		%ymm1, 32(%r10)
-	vmaskmovps	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	cmpl		$3, %r13d
-	jl			0f // end
-	vmovaps		64(%r10), %ymm12
-	vblendps	$0x03, %ymm12, %ymm2, %ymm2
-	vmovaps		%ymm2, 64(%r10)
-	vmaskmovps	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	je			0f // end
-	vmovaps		96(%r10), %ymm12
-	vblendps	$0x07, %ymm12, %ymm3, %ymm3
-	vmovaps		%ymm3, 96(%r10)
-	vmaskmovps	%ymm7, %ymm15, 96(%r10, %r11, 1)
-	//
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_16x4_vs_lib8, .-inner_store_l_16x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_16X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_16x4_gen_lib8, @function
-inner_store_l_16x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_16x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_l_16x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm14,  0(%r11)
-	vmaskmovps	%ymm4, %ymm15,  0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmovaps		32(%r11), %ymm12
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm14, 32(%r11)
-	vmaskmovps	%ymm5, %ymm15, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmovaps		64(%r11), %ymm12
-	vblendps	$0x01, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm14, 64(%r11)
-	vmaskmovps	%ymm6, %ymm15, 64(%r11, %r12, 1)
-	je			7f // end
-	vmovaps		96(%r11), %ymm12
-	vblendps	$0x01, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm14, 96(%r11)
-	vmaskmovps	%ymm7, %ymm15, 96(%r11, %r12, 1)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_16x4_gen_lib8, .-inner_store_l_16x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_12X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_12x4_lib8, @function
-inner_store_l_12x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_12x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_12x4_lib8; .scl 2; .type 32; .endef
-inner_store_l_12x4_lib8:
-#endif
-#endif
-	
-	vmovaps		0(%r10), %ymm12
-	vmovaps		32(%r10), %ymm13
-	vmovaps		64(%r10), %ymm14
-	vmovaps		96(%r10), %ymm15
-
-	vblendps	$0x0f, %ymm12, %ymm0, %ymm0
-	vblendps	$0x1f, %ymm13, %ymm1, %ymm1
-	vblendps	$0x3f, %ymm14, %ymm2, %ymm2
-	vblendps	$0x7f, %ymm15, %ymm3, %ymm3
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		%ymm3, 96(%r10)
-
-	vmovaps 	%ymm4,  0(%r10, %r11, 1)
-	vmovaps 	%ymm5, 32(%r10, %r11, 1)
-	vmovaps 	%ymm6, 64(%r10, %r11, 1)
-	vmovaps 	%ymm7, 96(%r10, %r11, 1)
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_12x4_lib8, .-inner_store_l_12x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_12X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_12x4_vs_lib8, @function
-inner_store_l_12x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_12x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_12x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_l_12x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	vmovaps		0(%r10), %ymm12
-	vblendps	$0x0f, %ymm12, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r10)
-	vmaskmovps	%ymm4, %ymm15, 0(%r10, %r11, 1)
-	cmpl		$2, %r13d
-	jl			0f // end
-	vmovaps		32(%r10), %ymm12
-	vblendps	$0x1f, %ymm12, %ymm1, %ymm1
-	vmovaps		%ymm1, 32(%r10)
-	vmaskmovps	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	cmpl		$3, %r13d
-	jl			0f // end
-	vmovaps		64(%r10), %ymm12
-	vblendps	$0x3f, %ymm12, %ymm2, %ymm2
-	vmovaps		%ymm2, 64(%r10)
-	vmaskmovps	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	je			0f // end
-	vmovaps		96(%r10), %ymm12
-	vblendps	$0x7f, %ymm12, %ymm3, %ymm3
-	vmovaps		%ymm3, 96(%r10)
-	vmaskmovps	%ymm7, %ymm15, 96(%r10, %r11, 1)
-	//
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_12x4_vs_lib8, .-inner_store_l_12x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_12X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_12x4_gen_lib8, @function
-inner_store_l_12x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_12x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_12x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_l_12x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmovaps		0(%r11), %ymm12
-	vblendps	$0x0f, %ymm12, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm14,  0(%r11)
-	vmaskmovps	%ymm4, %ymm15,  0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmovaps		32(%r11), %ymm12
-	vblendps	$0x1f, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm14, 32(%r11)
-	vmaskmovps	%ymm5, %ymm15, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmovaps		64(%r11), %ymm12
-	vblendps	$0x3f, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm14, 64(%r11)
-	vmaskmovps	%ymm6, %ymm15, 64(%r11, %r12, 1)
-	je			7f // end
-	vmovaps		96(%r11), %ymm12
-	vblendps	$0x7f, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm14, 96(%r11)
-	vmaskmovps	%ymm7, %ymm15, 96(%r11, %r12, 1)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_12x4_gen_lib8, .-inner_store_l_12x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                                rdi    rsi           rdx       rcx      r8        r9           rsp+8     rsp+16   rsp+24    rsp+32
-// void kernel_sgemm_nt_16x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_16x4_lib8
-	.type kernel_sgemm_nt_16x4_lib8, @function
-kernel_sgemm_nt_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_16x4_lib8
-_kernel_sgemm_nt_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_16x4_lib8, .-kernel_sgemm_nt_16x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5         6            7         8        9         10       12      13
-// void kernel_sgemm_nt_16x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_16x4_vs_lib8
-	.type kernel_sgemm_nt_16x4_vs_lib8, @function
-kernel_sgemm_nt_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_16x4_vs_lib8
-_kernel_sgemm_nt_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_16x4_vs_lib8
-	.def kernel_sgemm_nt_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_16x4_vs_lib8, .-kernel_sgemm_nt_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                    rdi    rsi           rdx       rcx      r8        r9           rsp+8        rsp+16    rsp+24   rsp+32       rsp+40    rsp+48   rsp+56  rsp+64  rsp+72  rsp+80
-// void kernel_sgemm_nt_16x4_gen_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_16x4_gen_lib8
-	.type kernel_sgemm_nt_16x4_gen_lib8, @function
-kernel_sgemm_nt_16x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_16x4_gen_lib8
-_kernel_sgemm_nt_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_16x4_gen_lib8
-	.def kernel_sgemm_nt_16x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_16x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // offsetC
-	movq	ARG8, %r13 // C
-	movq	ARG9, %r14 // sdc
-	sall	$5, %r14d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_16x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG10, %r10 // offsetD
-	movq	ARG11, %r11 // D
-	movq	ARG12, %r12 // sdd
-	sall	$5, %r12d // 8*sdb*sizeof(float)
-	movq	ARG13, %r13 // m0
-	movq	ARG14, %r14 // m1
-	movq	ARG15, %r15 // n0
-	movq	ARG16, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_16x4_gen_lib8, .-kernel_sgemm_nt_16x4_gen_lib8
-#endif
-
-
-
-
-
-//                                rdi    rsi           rdx       rcx      r8           r9        rsp+8    rsp+16       rsp+24    rsp+32   rsp+40    rsp+48
-// void kernel_sgemm_nn_16x4_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_16x4_lib8
-	.type kernel_sgemm_nn_16x4_lib8, @function
-kernel_sgemm_nn_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_16x4_lib8
-_kernel_sgemm_nn_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_16x4_lib8
-	.def kernel_sgemm_nn_16x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12   // C
-	movq	ARG10, %r13   // sdc
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_16x4_lib8, .-kernel_sgemm_nn_16x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5            6         7        8            9         10       11        12       13      14
-// void kernel_sgemm_nn_16x4_vs_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_16x4_vs_lib8
-	.type kernel_sgemm_nn_16x4_vs_lib8, @function
-kernel_sgemm_nn_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_16x4_vs_lib8
-_kernel_sgemm_nn_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_16x4_vs_lib8
-	.def kernel_sgemm_nn_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12   // C
-	movq	ARG10, %r13   // sdc
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG13, %r12 // km
-	movq	ARG14, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_16x4_vs_lib8, .-kernel_sgemm_nn_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                    rdi    rsi           rdx       rcx      r8        r9        rsp+8    rsp+16       rsp+24    rsp+32    rsp+40   rsp+48    rsp+56    rsp+64   rsp+72  rsp+80  rsp+88  rsp+96
-// void kernel_sgemm_nn_16x4_gen_lib4(int k, float *alpha, float *A, int sda, int offB, float *B, int sdb, float *beta, int offC, float *C, int sdc, int offD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_16x4_gen_lib8
-	.type kernel_sgemm_nn_16x4_gen_lib8, @function
-kernel_sgemm_nn_16x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_16x4_gen_lib8
-_kernel_sgemm_nn_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_16x4_gen_lib8
-	.def kernel_sgemm_nn_16x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_16x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12 // offsetC
-	movq	ARG10, %r13 // C
-	movq	ARG11, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_16x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG12, %r10 // offsetD
-	movq	ARG13, %r11 // D
-	movq	ARG14, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG15, %r13 // m0
-	movq	ARG16, %r14 // m1
-	movq	ARG17, %r15 // n0
-	movq	ARG18, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_16x4_gen_lib8, .-kernel_sgemm_nn_16x4_gen_lib8
-#endif
-
-
-
-
-
-//                                  1      2             3         4        5         6            7         8        9         10
-// void kernel_ssyrk_nt_l_16x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_16x4_lib8
-	.type kernel_ssyrk_nt_l_16x4_lib8, @function
-kernel_ssyrk_nt_l_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_16x4_lib8
-_kernel_ssyrk_nt_l_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_16x4_lib8, .-kernel_ssyrk_nt_l_16x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5         6            7         8        9         10       12      13
-// void kernel_ssyrk_nt_l_16x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_16x4_vs_lib8
-	.type kernel_ssyrk_nt_l_16x4_vs_lib8, @function
-kernel_ssyrk_nt_l_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_16x4_vs_lib8
-_kernel_ssyrk_nt_l_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_16x4_vs_lib8
-	.def kernel_ssyrk_nt_l_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_16x4_vs_lib8, .-kernel_ssyrk_nt_l_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                  1      2             3         4        5         6            7         8        9         10
-// void kernel_ssyrk_nt_l_12x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_12x4_lib8
-	.type kernel_ssyrk_nt_l_12x4_lib8, @function
-kernel_ssyrk_nt_l_12x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_12x4_lib8
-_kernel_ssyrk_nt_l_12x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_12x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_12x4_lib8, .-kernel_ssyrk_nt_l_12x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5         6            7         8        9         10       12      13
-// void kernel_ssyrk_nt_l_12x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_12x4_vs_lib8
-	.type kernel_ssyrk_nt_l_12x4_vs_lib8, @function
-kernel_ssyrk_nt_l_12x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_12x4_vs_lib8
-_kernel_ssyrk_nt_l_12x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_12x4_vs_lib8
-	.def kernel_ssyrk_nt_l_12x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_12x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_12x4_vs_lib8, .-kernel_ssyrk_nt_l_12x4_vs_lib8
-#endif
-
-
-
-
-
-//                                       rdi    rsi       rdx      rcx       r8        r9       rsp+8     rsp+16   rsp+24    rsp+32 
-// void kernel_strsm_nt_rl_inv_16x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_16x4_lib8
-	.type kernel_strsm_nt_rl_inv_16x4_lib8, @function
-kernel_strsm_nt_rl_inv_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_16x4_lib8
-_kernel_strsm_nt_rl_inv_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_16x4_lib8
-	.def kernel_strsm_nt_rl_inv_16x4_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movl	$4, %r12d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_16x4_lib8, .-kernel_strsm_nt_rl_inv_16x4_lib8
-#endif
-
-
-
-
-
-//                                          rdi    rsi       rdx      rcx       r8        r9       rsp+8     rsp+16   rsp+24    rsp+32             rsp+40  rsp+48
-// void kernel_strsm_nt_rl_inv_16x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_16x4_vs_lib8
-	.type kernel_strsm_nt_rl_inv_16x4_vs_lib8, @function
-kernel_strsm_nt_rl_inv_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_16x4_vs_lib8
-_kernel_strsm_nt_rl_inv_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_16x4_vs_lib8
-	.def kernel_strsm_nt_rl_inv_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG11, %r12 // m1 
-	movq	ARG12, %r13 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_16x4_vs_lib8, .-kernel_strsm_nt_rl_inv_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                             1       2          3         4          5       6          7         8          9         10       11        12       13        14
-// void kernel_sgemm_strsm_nt_rl_inv_16x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_16x4_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_16x4_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_16x4_lib8
-_kernel_sgemm_strsm_nt_rl_inv_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_16x4_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_16x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-	movl	$4, %r12d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_16x4_lib8, .-kernel_sgemm_strsm_nt_rl_inv_16x4_lib8
-#endif
-
-
-
-
-
-//                                                1       2          3         4          5       6          7         8          9         10       11        12       13        14                 15      16
-// void kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8
-_kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-	movq	ARG16, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG15, %r12 // km 
-	movq	ARG16, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8, .-kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                   1      2         3        4         5         6        7         8        9
-// void kernel_spotrf_nt_l_12x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_12x4_lib8
-	.type kernel_spotrf_nt_l_12x4_lib8, @function
-kernel_spotrf_nt_l_12x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_12x4_lib8
-_kernel_spotrf_nt_l_12x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_12x4_lib8
-	.def kernel_spotrf_nt_l_12x4_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_12x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_12x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_12x4_lib8, .-kernel_spotrf_nt_l_12x4_lib8
-#endif
-
-
-
-
-
-//                                      1      2         3        4         5         6        7         8        9                  10      11
-// void kernel_spotrf_nt_l_12x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_12x4_vs_lib8
-	.type kernel_spotrf_nt_l_12x4_vs_lib8, @function
-kernel_spotrf_nt_l_12x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_12x4_vs_lib8
-_kernel_spotrf_nt_l_12x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_12x4_vs_lib8
-	.def kernel_spotrf_nt_l_12x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_12x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_12x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12 // m1 
-	movq	ARG11, %r13 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_12x4_lib8, .-kernel_spotrf_nt_l_12x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2         3        4         5         6        7         8        9
-// void kernel_spotrf_nt_l_16x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_16x4_lib8
-	.type kernel_spotrf_nt_l_16x4_lib8, @function
-kernel_spotrf_nt_l_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_16x4_lib8
-_kernel_spotrf_nt_l_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_16x4_lib8
-	.def kernel_spotrf_nt_l_16x4_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_16x4_lib8, .-kernel_spotrf_nt_l_16x4_lib8
-#endif
-
-
-
-
-
-//                                      1      2         3        4         5         6        7         8        9                  10      11
-// void kernel_spotrf_nt_l_16x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_16x4_vs_lib8
-	.type kernel_spotrf_nt_l_16x4_vs_lib8, @function
-kernel_spotrf_nt_l_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_16x4_vs_lib8
-_kernel_spotrf_nt_l_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_16x4_vs_lib8
-	.def kernel_spotrf_nt_l_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12 // m1 
-	movq	ARG11, %r13 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_16x4_lib8, .-kernel_spotrf_nt_l_16x4_lib8
-#endif
-
-
-
-
-
-//                                        1        2          3         4          5       6          7         8          9         10       11        12       13
-// void kernel_ssyrk_spotrf_nt_l_12x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_12x4_lib8
-	.type kernel_ssyrk_spotrf_nt_l_12x4_lib8, @function
-kernel_ssyrk_spotrf_nt_l_12x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_12x4_lib8
-_kernel_ssyrk_spotrf_nt_l_12x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_12x4_lib8
-	.def kernel_ssyrk_spotrf_nt_l_12x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_12x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_12x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_12x4_lib8, .-kernel_ssyrk_spotrf_nt_l_12x4_lib8
-#endif
-
-
-
-
-
-//                                            1        2          3         4          5       6          7         8          9         10       11        12       13                14      15
-// void kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8
-	.type kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8, @function
-kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8
-_kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8
-	.def kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movq	ARG15, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_12x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG14, %r12 // km 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8, .-kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8
-#endif
-
-
-
-
-
-//                                        1        2          3         4          5       6          7         8          9         10       11        12       13
-// void kernel_ssyrk_spotrf_nt_l_16x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_16x4_lib8
-	.type kernel_ssyrk_spotrf_nt_l_16x4_lib8, @function
-kernel_ssyrk_spotrf_nt_l_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_16x4_lib8
-_kernel_ssyrk_spotrf_nt_l_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_16x4_lib8
-	.def kernel_ssyrk_spotrf_nt_l_16x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_16x4_lib8, .-kernel_ssyrk_spotrf_nt_l_16x4_lib8
-#endif
-
-
-
-
-
-//                                            1        2          3         4          5       6          7         8          9         10       11        12       13                14      15
-// void kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8
-	.type kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8, @function
-kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8
-_kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8
-	.def kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movq	ARG15, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG14, %r12 // km 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8, .-kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5            6         7        8         9
-// void kernel_strmm_nn_rl_16x4_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_16x4_lib8
-	.type kernel_strmm_nn_rl_16x4_lib8, @function
-kernel_strmm_nn_rl_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_16x4_lib8
-_kernel_strmm_nn_rl_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_16x4_lib8
-	.def kernel_strmm_nn_rl_16x4_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_16x4_lib8, .-kernel_strmm_nn_rl_16x4_lib8
-#endif
-
-
-
-
-
-//                                      1      2             3         4        5            6         7        8         9        10      11
-// void kernel_strmm_nn_rl_16x4_vs_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_16x4_vs_lib8
-	.type kernel_strmm_nn_rl_16x4_vs_lib8, @function
-kernel_strmm_nn_rl_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_16x4_vs_lib8
-_kernel_strmm_nn_rl_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_16x4_vs_lib8
-	.def kernel_strmm_nn_rl_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12 // km
-	movq	ARG11, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_16x4_vs_lib8, .-kernel_strmm_nn_rl_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                       1      2             3         4        5            6         7        8            9         10       11      12      13      14
-// void kernel_strmm_nn_rl_16x4_gen_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_16x4_gen_lib8
-	.type kernel_strmm_nn_rl_16x4_gen_lib8, @function
-kernel_strmm_nn_rl_16x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_16x4_gen_lib8
-_kernel_strmm_nn_rl_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_16x4_gen_lib8
-	.def kernel_strmm_nn_rl_16x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_16x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // offsetD
-	movq	ARG9, %r11 // D
-	movq	ARG10, %r12 // sdd
-	sall	$5, %r12d // 4*sdd*sizeof(double)
-	movq	ARG11, %r13 // m0
-	movq	ARG12, %r14 // m1
-	movq	ARG13, %r15 // n0
-	movq	ARG14, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_16x4_gen_lib8, .-kernel_strmm_nn_rl_16x4_gen_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_sgemm_8x4_lib8.S b/third_party/blasfeo/kernel/avx/kernel_sgemm_8x4_lib8.S
deleted file mode 100644
index d319a83..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_sgemm_8x4_lib8.S
+++ /dev/null
@@ -1,6673 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nt_8x4_lib8, @function
-inner_kernel_gemm_add_nt_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nt_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nt_8x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nt_8x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vbroadcastf128	32(%r12), %ymm15 // B
-	vmovaps			32(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14 // 01 00 11 10
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	64(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	96(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	128(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	32(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-	vmovaps			32(%r11), %ymm13 // A
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	64(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	96(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-//	vbroadcastf128	128(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-//	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm15, %ymm11
-//	vbroadcastf128	32(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-//	vmovaps			32(%r11), %ymm13 // A
-
-
-//	cmpl	$4, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$32, %r12
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nt_8x4_lib8, .-inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_sub_nt_8x4_lib8, @function
-inner_kernel_gemm_sub_nt_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_sub_nt_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_sub_nt_8x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_sub_nt_8x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vbroadcastf128	32(%r12), %ymm15 // B
-	vmovaps			32(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14 // 01 00 11 10
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	64(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	96(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	128(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	32(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-	vmovaps			32(%r11), %ymm13 // A
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	64(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	96(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-//	vbroadcastf128	128(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-//	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm15, %ymm11
-//	vbroadcastf128	32(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-//	vmovaps			32(%r11), %ymm13 // A
-
-
-//	cmpl	$4, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$32, %r12
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm3, %ymm3
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_sub_nt_8x4_lib8, .-inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nn_8x4_lib8, @function
-inner_kernel_gemm_add_nn_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nn_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nn_8x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nn_8x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	movq	%r12, %r14 // B_next <- B
-	addq	%r13, %r14 // B_next <- B + 4*sda*sizeof(double)
-
-	cmpl	$8, %r10d
-	jl		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	0(%r14) // software prefetch
-	prefetcht0	64(%r14) // software prefetch
-
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 1
-	vmovaps			32(%r11), %ymm12 // A[0]
-	vbroadcastss	4(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	68(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	100(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 2
-	vmovaps			64(%r11), %ymm12 // A[0]
-	vbroadcastss	8(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	40(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	72(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	104(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 3
-	vmovaps			96(%r11), %ymm12 // A[0]
-	vbroadcastss	12(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	44(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	76(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	108(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 4
-	vmovaps			128(%r11), %ymm12 // A[0]
-	vbroadcastss	16(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	48(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	80(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	112(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 5
-	vmovaps			160(%r11), %ymm12 // A[0]
-	vbroadcastss	20(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	52(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	84(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	116(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 6
-	vmovaps			192(%r11), %ymm12 // A[0]
-	vbroadcastss	24(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	56(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	88(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	120(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 7
-	vmovaps			224(%r11), %ymm12 // A[0]
-	vbroadcastss	28(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	60(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	92(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	124(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-	subl	$8, %r10d
-	addq	$256, %r11
-
-	mov		%r14, %r12
-	addq	%r13, %r14
-
-	cmpl	$7, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean1-up loop
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean1-up loop
-	
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$4, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nn_8x4_lib8, .-inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_SUB_NN_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_sub_nn_8x4_lib8, @function
-inner_kernel_gemm_sub_nn_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_sub_nn_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_sub_nn_8x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_sub_nn_8x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	movq	%r12, %r14 // B_next <- B
-	addq	%r13, %r14 // B_next <- B + 4*sda*sizeof(double)
-
-	cmpl	$8, %r10d
-	jl		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	0(%r14) // software prefetch
-	prefetcht0	64(%r14) // software prefetch
-
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 1
-	vmovaps			32(%r11), %ymm12 // A[0]
-	vbroadcastss	4(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	68(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	100(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 2
-	vmovaps			64(%r11), %ymm12 // A[0]
-	vbroadcastss	8(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	40(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	72(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	104(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 3
-	vmovaps			96(%r11), %ymm12 // A[0]
-	vbroadcastss	12(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	44(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	76(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	108(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 4
-	vmovaps			128(%r11), %ymm12 // A[0]
-	vbroadcastss	16(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	48(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	80(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	112(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 5
-	vmovaps			160(%r11), %ymm12 // A[0]
-	vbroadcastss	20(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	52(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	84(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	116(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 6
-	vmovaps			192(%r11), %ymm12 // A[0]
-	vbroadcastss	24(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	56(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	88(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	120(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-
-
-	// unroll 7
-	vmovaps			224(%r11), %ymm12 // A[0]
-	vbroadcastss	28(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	60(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	92(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	124(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-
-	subl	$8, %r10d
-	addq	$256, %r11
-
-	mov		%r14, %r12
-	addq	%r13, %r14
-
-	cmpl	$7, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean1-up loop
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean1-up loop
-	
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vsubps			%ymm15, %ymm3, %ymm3
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$4, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_sub_nn_8x4_lib8, .-inner_kernel_gemm_sub_nn_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_gemm_add_nn_8x4_lib8, @function
-inner_edge_gemm_add_nn_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_gemm_add_nn_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_gemm_add_nn_8x4_lib8; .scl 2; .type 32; .endef
-inner_edge_gemm_add_nn_8x4_lib8:
-#endif
-#endif
-	
-	cmpl			$0, %r14d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$8, %r15d
-	subl			%r14d, %r15d // 8-offsetB
-	cmpl			%r10d, %r15d
-//	jle				0f
-//	movl			%r10d, %r15d // kend=min(k,8-offsetB)
-//0:
-	cmovgl			%r10d, %r15d // kend=min(k,8-offsetB)
-
-	movl			%r14d, %eax
-	sall			$2, %eax // offsetB*sizeof(float)
-	addq			%rax, %r12 // B+offsetB*sizeof(float)
-
-1:
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	96(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-	subl			$1, %r10d // k-1
-	subl			$1, %r15d // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$4, %r12 // B+1*sizeof(float)
-
-	cmpl			$0, %r15d
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r13, %r12
-	subq			$32, %r12 // B+bs*(sdb-1)*sizeof(float)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_gemm_add_nn_8x4_lib8, .-inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRMM_NN_RL_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trmm_nn_rl_8x4_lib8, @function
-inner_edge_trmm_nn_rl_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trmm_nn_rl_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trmm_nn_rl_8x4_lib8; .scl 2; .type 32; .endef
-inner_edge_trmm_nn_rl_8x4_lib8:
-#endif
-#endif
-	
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	movl		%r14d, %eax
-	sall		$2, %eax // offsetB*sizeof(float)
-	movq		%r12, %rbx // B
-	addq		%rax, %rbx // B+offsetB*sizeof(float)
-
-
-	cmpl	$4, %r14d
-	jg		1f
-
-	// offB==0, 1, 2, 3, 4
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	8(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	40(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	72(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	jmp			0f // end
-
-
-1:
-	cmpl	$5, %r14d
-	jg		1f
-
-	// offB==5
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	8(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	40(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	72(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r13, %r12 // B+8*sdb*sizeof(float)
-	movl		$0, %r14d // offsetB=0
-
-	jmp			0f // end
-
-
-1:
-	cmpl	$6, %r14d
-	jg		1f
-
-	// offB==6
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r13, %r12 // B+8*sdb*sizeof(float)
-	movq		%r12, %rbx // B
-	movl		$0, %r14d // offsetB=0
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	jmp			0f // end
-
-
-1:
-//	cmpl	$7, %r14d
-//	jg		0f
-
-	// offB==6
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r13, %r12 // B+8*sdb*sizeof(float)
-	movq		%r12, %rbx // B
-	movl		$0, %r14d // offsetB=0
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	68(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-//	jmp			0f // end
-
-
-	// end
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trmm_nn_rl_8x4_lib8, .-inner_edge_trmm_nn_rl_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// strsm
-// right
-// lower
-// transposed
-// not-unit
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSM_RLT_INV_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsm_rlt_inv_8x4_lib8, @function
-inner_edge_trsm_rlt_inv_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsm_rlt_inv_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsm_rlt_inv_8x4_lib8; .scl 2; .type 32; .endef
-inner_edge_trsm_rlt_inv_8x4_lib8:
-#endif
-#endif
-
-	vbroadcastss	0(%r11), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vbroadcastss	4(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm1, %ymm1
-	vbroadcastss	8(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vbroadcastss	12(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	4(%r11), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vbroadcastss	40(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vbroadcastss	44(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	8(%r11), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vbroadcastss	76(%r10), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	12(%r11), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsm_rlt_inv_8x4_lib8, .-inner_edge_trsm_rlt_inv_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// strsm
-// right
-// lower
-// transposed
-// not-unit
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSM_RLT_INV_8X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsm_rlt_inv_8x4_vs_lib8, @function
-inner_edge_trsm_rlt_inv_8x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsm_rlt_inv_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsm_rlt_inv_8x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsm_rlt_inv_8x4_vs_lib8:
-#endif
-#endif
-
-	vbroadcastss	0(%r11), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	cmpl			$2, %r12d
-	jl				0f // ret
-	vbroadcastss	4(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm1, %ymm1
-	vbroadcastss	8(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vbroadcastss	12(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	4(%r11), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	cmpl			$3, %r12d
-	jl				0f // ret
-	vbroadcastss	40(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vbroadcastss	44(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	8(%r11), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	cmpl			$4, %r12d
-	jl				0f // ret
-	vbroadcastss	76(%r10), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	12(%r11), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsm_rlt_inv_8x4_vs_lib8, .-inner_edge_trsm_rlt_inv_8x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_8x4_lib8, @function
-inner_edge_potrf_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_8x4_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_8x4_lib8:
-#endif
-#endif
-
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vmovss		%xmm0, %xmm0, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-2:
-	vmovss		%xmm13, 0(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm0
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm11
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm1, %ymm1
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0x55, %xmm1, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-4:
-	vmovss		%xmm13, 4(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm1
-	vperm2f128	$0x00, %ymm1, %ymm1, %ymm11
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0xaa, %xmm2, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-6:
-	vmovss		%xmm13, 8(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm2
-	vperm2f128	$0x00, %ymm2, %ymm2, %ymm11
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0xff, %xmm3, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-8:
-	vmovsd		%xmm13, 12(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm3, %ymm13, %ymm3
-
-	jmp		0f
-
-
-1:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_8x4_lib8, .-inner_edge_potrf_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization gen
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_8X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_8x4_vs_lib8, @function
-inner_edge_potrf_8x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_8x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_8x4_vs_lib8:
-#endif
-#endif
-
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vmovss		%xmm0, %xmm0, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-2:
-	vmovss		%xmm13, 0(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm0
-	cmpl		$2, %r11d
-	jl			0f // ret
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm11
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm1, %ymm1
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0x55, %xmm1, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-4:
-	vmovss		%xmm13, 4(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm1
-	cmpl		$3, %r11d
-	jl			0f // ret
-	vperm2f128	$0x00, %ymm1, %ymm1, %ymm11
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0xaa, %xmm2, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-6:
-	vmovss		%xmm13, 8(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm2
-	cmpl		$4, %r11d
-	jl			0f // ret
-	vperm2f128	$0x00, %ymm2, %ymm2, %ymm11
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0xff, %xmm3, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-8:
-	vmovsd		%xmm13, 12(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm3, %ymm13, %ymm3
-
-	jmp		0f
-
-
-1:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_8x4_vs_lib8, .-inner_edge_potrf_8x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x4_lib8, @function
-inner_scale_ab_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x4_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_8x4_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x4_lib8, .-inner_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_AB_4X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_ab_4x8_lib8, @function
-inner_tran_scale_ab_4x8_lib8:
-#elif defined(OS_MAC)
-_inner_tran_scale_ab_4x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_ab_4x8_lib8; .scl 2; .type 32; .endef
-inner_tran_scale_ab_4x8_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm4
-	vmulps		%ymm1, %ymm15, %ymm5
-	vmulps		%ymm2, %ymm15, %ymm6
-	vmulps		%ymm3, %ymm15, %ymm7
-
-	// transpose
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm0
-	vblendps	$0xaa, %ymm5, %ymm5, %ymm1
-	vblendps	$0xaa, %ymm6, %ymm7, %ymm2
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm3
-
-	vunpcklps	%ymm1, %ymm0, %ymm4
-	vunpckhps	%ymm1, %ymm0, %ymm5
-	vunpcklps	%ymm3, %ymm2, %ymm6
-	vunpckhps	%ymm3, %ymm2, %ymm7
-
-	vunpcklpd	%ymm5, %ymm7, %ymm2
-	vunpckhpd	%ymm5, %ymm7, %ymm3
-	vunpcklpd	%ymm6, %ymm4, %ymm0
-	vunpckhpd	%ymm6, %ymm4, %ymm1
-
-	vextractf128 $0x1, %ymm0, %xmm4
-	vextractf128 $0x1, %ymm1, %xmm5
-	vextractf128 $0x1, %ymm2, %xmm6
-	vextractf128 $0x1, %ymm3, %xmm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm0, %xmm0
-	vmovaps		32(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm1, %xmm1
-	vmovaps		64(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm2, %xmm2
-	vmovaps		96(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm3, %xmm3
-	vmovaps		128(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm4, %xmm4
-	vmovaps		160(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm5, %xmm5
-	vmovaps		192(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm6, %xmm6
-	vmovaps		224(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm7, %xmm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_ab_4x8_lib8, .-inner_tran_scale_ab_4x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x4_gen_lib8, @function
-inner_scale_ab_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_8x4_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x4_gen_lib8, .-inner_scale_ab_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_AB_4X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_ab_4x8_gen_lib8, @function
-inner_tran_scale_ab_4x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_tran_scale_ab_4x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_ab_4x8_gen_lib8; .scl 2; .type 32; .endef
-inner_tran_scale_ab_4x8_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm4
-	vmulps		%ymm1, %ymm15, %ymm5
-	vmulps		%ymm2, %ymm15, %ymm6
-	vmulps		%ymm3, %ymm15, %ymm7
-
-	// transpose
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm0
-	vblendps	$0xaa, %ymm5, %ymm5, %ymm1
-	vblendps	$0xaa, %ymm6, %ymm7, %ymm2
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm3
-
-	vunpcklps	%ymm1, %ymm0, %ymm4
-	vunpckhps	%ymm1, %ymm0, %ymm5
-	vunpcklps	%ymm3, %ymm2, %ymm6
-	vunpckhps	%ymm3, %ymm2, %ymm7
-
-	vunpcklpd	%ymm5, %ymm7, %ymm2
-	vunpckhpd	%ymm5, %ymm7, %ymm3
-	vunpcklpd	%ymm6, %ymm4, %ymm0
-	vunpckhpd	%ymm6, %ymm4, %ymm1
-
-	vextractf128 $0x1, %ymm0, %xmm4
-	vextractf128 $0x1, %ymm1, %xmm5
-	vextractf128 $0x1, %ymm2, %xmm6
-	vextractf128 $0x1, %ymm3, %xmm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm0, %xmm0
-	vmovaps		32(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm1, %xmm1
-	vmovaps		64(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm2, %xmm2
-	vmovaps		96(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm3, %xmm3
-	vmovaps		128(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm4, %xmm4
-	vmovaps		160(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm5, %xmm5
-	vmovaps		192(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm6, %xmm6
-	vmovaps		224(%r12), %xmm15
-	vmulps		%xmm15, %xmm14, %xmm15
-	vaddps		%xmm15, %xmm7, %xmm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_ab_4x8_gen_lib8, .-inner_tran_scale_ab_4x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0
-//
-// input arguments:
-// r10   <- alpha
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_8x4_lib8, @function
-inner_scale_a0_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_a0_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_8x4_lib8; .scl 2; .type 32; .endef
-inner_scale_a0_8x4_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_8x4_lib8, .-inner_scale_a0_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x4_lib8, @function
-inner_blend_scale_ab_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x4_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x4_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm8 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm9 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm10
-	vblendps	$0x55, %ymm3, %ymm2, %ymm11
-
-	vblendps	$0xcc, %ymm11, %ymm8, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm11, %ymm8, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm10, %ymm9, %ymm1
-	vblendps	$0x33, %ymm10, %ymm9, %ymm3
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x4_lib8, .-inner_blend_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x4_gen_lib8, @function
-inner_blend_scale_ab_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x4_gen_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm8 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm9 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm10
-	vblendps	$0x55, %ymm3, %ymm2, %ymm11
-
-	vblendps	$0xcc, %ymm11, %ymm8, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm11, %ymm8, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm10, %ymm9, %ymm1
-	vblendps	$0x33, %ymm10, %ymm9, %ymm3
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r12d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r12d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r12d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x4_gen_lib8, .-inner_blend_scale_ab_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_8x4_lib8, @function
-inner_blend_scale_11_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_8x4_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_11_8x4_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm8 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm9 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm10
-	vblendps	$0x55, %ymm3, %ymm2, %ymm11
-
-	vblendps	$0xcc, %ymm11, %ymm8, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm11, %ymm8, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm10, %ymm9, %ymm1
-	vblendps	$0x33, %ymm10, %ymm9, %ymm3
-
-	vmovaps		0(%r10), %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r10), %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r10), %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r10), %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_8x4_lib8, .-inner_blend_scale_11_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_8x4_gen_lib8, @function
-inner_blend_scale_11_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_11_8x4_gen_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm8 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm9 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm10
-	vblendps	$0x55, %ymm3, %ymm2, %ymm11
-
-	vblendps	$0xcc, %ymm11, %ymm8, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm11, %ymm8, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm10, %ymm9, %ymm1
-	vblendps	$0x33, %ymm10, %ymm9, %ymm3
-
-	// offset==0
-
-	vmovaps		0(%r11), %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r11), %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r11), %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r11), %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %r15 // C0
-	addq	%r12, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_8x4_gen_lib8, .-inner_blend_scale_11_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_lib8, @function
-inner_store_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_lib8; .scl 2; .type 32; .endef
-inner_store_8x4_lib8:
-#endif
-#endif
-	
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps 	%ymm3, 96(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_lib8, .-inner_store_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x8_lib8, @function
-inner_store_4x8_lib8:
-#elif defined(OS_MAC)
-_inner_store_4x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x8_lib8; .scl 2; .type 32; .endef
-inner_store_4x8_lib8:
-#endif
-#endif
-	
-	vmovaps 	%xmm0,  0(%r10)
-	vmovaps 	%xmm1, 32(%r10)
-	vmovaps 	%xmm2, 64(%r10)
-	vmovaps 	%xmm3, 96(%r10)
-	vmovaps 	%xmm4, 128(%r10)
-	vmovaps 	%xmm5, 160(%r10)
-	vmovaps 	%xmm6, 192(%r10)
-	vmovaps 	%xmm7, 224(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x8_lib8, .-inner_store_4x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_vs_lib8, @function
-inner_store_8x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_8x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubps		%ymm14, %ymm12, %ymm14
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm14,  0(%r10)
-	cmpl		$2, %r12d
-	jl			0f // end
-	vmaskmovps	%ymm1, %ymm14, 32(%r10)
-	cmpl		$3, %r12d
-	jl			0f // end
-	vmaskmovps	%ymm2, %ymm14, 64(%r10)
-	je			0f // end
-	vmaskmovps	%ymm3, %ymm14, 96(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_vs_lib8, .-inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x8_vs_lib8, @function
-inner_store_4x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_4x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x8_vs_lib8; .scl 2; .type 32; .endef
-inner_store_4x8_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %xmm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %xmm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubps		%xmm14, %xmm12, %xmm14
-
-	// offset==0
-	vmaskmovps	%xmm0, %xmm14,  0(%r10)
-	cmpl		$2, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm1, %xmm14, 32(%r10)
-	cmpl		$3, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm2, %xmm14, 64(%r10)
-	cmpl		$4, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm3, %xmm14, 96(%r10)
-	cmpl		$5, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm4, %xmm14, 128(%r10)
-	cmpl		$6, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm5, %xmm14, 160(%r10)
-	cmpl		$7, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm6, %xmm14, 192(%r10)
-	je			0f // end
-	vmaskmovps	%xmm7, %xmm14, 224(%r10)
-	//
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x8_vs_lib8, .-inner_store_4x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_gen_lib8, @function
-inner_store_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_8x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm12, %ymm15
-	vandps		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm15,  0(%r11)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmaskmovps	%ymm1, %ymm15, 32(%r11)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmaskmovps	%ymm2, %ymm15, 64(%r11)
-	je			7f // end
-	vmaskmovps	%ymm3, %ymm15, 96(%r11)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_gen_lib8, .-inner_store_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x8_gen_lib8, @function
-inner_store_4x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_4x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x8_gen_lib8; .scl 2; .type 32; .endef
-inner_store_4x8_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %xmm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %xmm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%xmm12, %xmm14, %xmm14
-	vsubps		%xmm15, %xmm12, %xmm15
-	vandps		%xmm14, %xmm15, %xmm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	vmovaps		%xmm3, %xmm2
-	vmovaps		%xmm4, %xmm3
-	vmovaps		%xmm5, %xmm4
-	vmovaps		%xmm6, %xmm5
-	vmovaps		%xmm7, %xmm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	vmovaps		%xmm3, %xmm2
-	vmovaps		%xmm4, %xmm3
-	vmovaps		%xmm5, %xmm4
-	vmovaps		%xmm6, %xmm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	vmovaps		%xmm3, %xmm2
-	vmovaps		%xmm4, %xmm3
-	vmovaps		%xmm5, %xmm4
-	addq		$32, %r11
-
-	cmpl	$3, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	vmovaps		%xmm3, %xmm2
-	vmovaps		%xmm4, %xmm3
-	addq		$32, %r11
-
-	cmpl	$4, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	vmovaps		%xmm3, %xmm2
-	addq		$32, %r11
-
-	cmpl	$5, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	addq		$32, %r11
-
-	cmpl	$6, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$8, %eax
-	jle		0f
-	movl	$8, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%xmm0, %xmm15,  0(%r11)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm1, %xmm15, 32(%r11)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm2, %xmm15, 64(%r11)
-	cmpl		$4, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm3, %xmm15, 96(%r11)
-	cmpl		$5, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm4, %xmm15, 128(%r11)
-	cmpl		$6, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm5, %xmm15, 160(%r11)
-	cmpl		$7, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm6, %xmm15, 192(%r11)
-	je			7f // end
-	vmaskmovps	%xmm7, %xmm15, 224(%r11)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_gen_lib8, .-inner_store_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_lib8, @function
-inner_store_l_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x4_lib8:
-#endif
-#endif
-	
-	vmovaps 	32(%r10), %ymm12
-	vmovaps 	64(%r10), %ymm13
-	vmovaps 	96(%r10), %ymm14
-
-	vblendps	$0x1, %ymm12, %ymm1, %ymm1
-	vblendps	$0x3, %ymm13, %ymm2, %ymm2
-	vblendps	$0x7, %ymm14, %ymm3, %ymm3
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps 	%ymm3, 96(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_lib8, .-inner_store_l_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_vs_lib8, @function
-inner_store_l_8x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	vmaskmovps	%ymm0, %ymm15,  0(%r10)
-	cmpl		$2, %r12d
-	jl			0f // end
-	vmovaps 	32(%r10), %ymm12
-	vblendps	$0x1, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm15, 32(%r10)
-	cmpl		$3, %r12d
-	jl			0f // end
-	vmovaps 	64(%r10), %ymm12
-	vblendps	$0x3, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm15, 64(%r10)
-	je			0f // end
-	vmovaps 	96(%r10), %ymm12
-	vblendps	$0x7, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm15, 96(%r10)
-	//
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_vs_lib8, .-inner_store_l_8x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_gen_lib8, @function
-inner_store_l_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm12, %ymm15
-	vandps		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm15,  0(%r11)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmovaps 	32(%r11), %ymm12
-	vblendps	$0x1, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm15, 32(%r11)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmovaps 	64(%r11), %ymm12
-	vblendps	$0x3, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm15, 64(%r11)
-	je			7f // end
-	vmovaps 	96(%r11), %ymm12
-	vblendps	$0x7, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm15, 96(%r11)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_gen_lib8, .-inner_store_l_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                               rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_sgemm_nt_8x4_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x4_lib8
-	.type kernel_sgemm_nt_8x4_lib8, @function
-kernel_sgemm_nt_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x4_lib8
-_kernel_sgemm_nt_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x4_lib8
-	.def kernel_sgemm_nt_8x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x4_lib8, .-kernel_sgemm_nt_8x4_lib8
-#endif
-
-
-
-
-
-//                               rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_sgemm_nt_4x8_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_4x8_lib8
-	.type kernel_sgemm_nt_4x8_lib8, @function
-kernel_sgemm_nt_4x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_4x8_lib8
-_kernel_sgemm_nt_4x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_4x8_lib8
-	.def kernel_sgemm_nt_4x8_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_4x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // B
-	movq	ARG3, %r12  // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_4x8_lib8, .-kernel_sgemm_nt_4x8_lib8
-#endif
-
-
-
-
-
-//                               rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_sgemm_nt_8x4_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x4_vs_lib8
-	.type kernel_sgemm_nt_8x4_vs_lib8, @function
-kernel_sgemm_nt_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x4_vs_lib8
-_kernel_sgemm_nt_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x4_vs_lib8
-	.def kernel_sgemm_nt_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km
-	movq	ARG9, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x4_vs_lib8, .-kernel_sgemm_nt_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_sgemm_nt_4x8_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_4x8_vs_lib8
-	.type kernel_sgemm_nt_4x8_vs_lib8, @function
-kernel_sgemm_nt_4x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_4x8_vs_lib8
-_kernel_sgemm_nt_4x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_4x8_vs_lib8
-	.def kernel_sgemm_nt_4x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_4x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // B
-	movq	ARG3, %r12  // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km
-	movq	ARG9, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_4x8_vs_lib8, .-kernel_sgemm_nt_4x8_vs_lib8
-#endif
-
-
-
-
-
-//                                   rdi    rsi           rdx       rcx       r8           r9           rsp+8     rsp+16   rsp+24       rsp+32    rsp+40   rsp+48  rsp+56  rsp+64  rsp+72
-// void kernel_sgemm_nt_8x4_gen_lib8(int k, float *alpha, float *A, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x4_gen_lib8
-	.type kernel_sgemm_nt_8x4_gen_lib8, @function
-kernel_sgemm_nt_8x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x4_gen_lib8
-_kernel_sgemm_nt_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x4_gen_lib8
-	.def kernel_sgemm_nt_8x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 8*sdb*sizeof(float)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x4_gen_lib8, .-kernel_sgemm_nt_8x4_gen_lib8
-#endif
-
-
-
-
-
-//                                   rdi    rsi           rdx       rcx       r8           r9           rsp+8     rsp+16   rsp+24       rsp+32    rsp+40   rsp+48  rsp+56  rsp+64  rsp+72
-// void kernel_sgemm_nt_4x8_gen_lib8(int k, float *alpha, float *A, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_4x8_gen_lib8
-	.type kernel_sgemm_nt_4x8_gen_lib8, @function
-kernel_sgemm_nt_4x8_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_4x8_gen_lib8
-_kernel_sgemm_nt_4x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_4x8_gen_lib8
-	.def kernel_sgemm_nt_4x8_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_4x8_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG3, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x8_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 8*sdb*sizeof(float)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_4x8_gen_lib8, .-kernel_sgemm_nt_4x8_gen_lib8
-#endif
-
-
-
-
-
-//                               rdi    rsi           rdx        rcx         r8         r9      rsp+8        rsp+16    rsp+24
-// void kernel_sgemm_nn_8x4_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x4_lib8
-	.type kernel_sgemm_nn_8x4_lib8, @function
-kernel_sgemm_nn_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x4_lib8
-_kernel_sgemm_nn_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x4_lib8
-	.def kernel_sgemm_nn_8x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x4_lib8, .-kernel_sgemm_nn_8x4_lib8
-#endif
-
-
-
-
-
-//                               1      2             3         4            5         6        7            8         9         10      11
-// void kernel_sgemm_nn_8x4_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x4_vs_lib8
-	.type kernel_sgemm_nn_8x4_vs_lib8, @function
-kernel_sgemm_nn_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x4_vs_lib8
-_kernel_sgemm_nn_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x4_vs_lib8
-	.def kernel_sgemm_nn_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // km
-	movq	ARG11, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x4_vs_lib8, .-kernel_sgemm_nn_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                   rdi    rsi           rdx       rcx       r8        r9       rsp+8        rsp+16    rsp+24    rsp+32    rsp+40   rsp+48     rsp+56   rsp+64  rsp+72  rsp+80  rsp+88
-// void kernel_sgemm_nn_8x4_gen_lib4(int k, float *alpha, float *A, int offB, float *B, int sdb, float *beta, int offC, float *C, int sdc, int offD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x4_gen_lib8
-	.type kernel_sgemm_nn_8x4_gen_lib8, @function
-kernel_sgemm_nn_8x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x4_gen_lib8
-_kernel_sgemm_nn_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x4_gen_lib8
-	.def kernel_sgemm_nn_8x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // offsetC
-	movq	ARG9, %r13 // C
-	movq	ARG10, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG11, %r10 // offsetD
-	movq	ARG12, %r11 // D
-	movq	ARG13, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG14, %r13 // m0
-	movq	ARG15, %r14 // m1
-	movq	ARG16, %r15 // n0
-	movq	ARG17, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x4_gen_lib8, .-kernel_sgemm_nn_8x4_gen_lib8
-#endif
-
-
-
-
-
-//                                 rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_ssyrk_nt_l_8x4_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_8x4_lib8
-	.type kernel_ssyrk_nt_l_8x4_lib8, @function
-kernel_ssyrk_nt_l_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_8x4_lib8
-_kernel_ssyrk_nt_l_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_8x4_lib8
-	.def kernel_ssyrk_nt_l_8x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_8x4_lib8, .-kernel_ssyrk_nt_l_8x4_lib8
-#endif
-
-
-
-
-
-//                                    1      2             3         4         5            6         7         8       9
-// void kernel_ssyrk_nt_l_8x4_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_8x4_vs_lib8
-	.type kernel_ssyrk_nt_l_8x4_vs_lib8, @function
-kernel_ssyrk_nt_l_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_8x4_vs_lib8
-_kernel_ssyrk_nt_l_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_8x4_vs_lib8
-	.def kernel_ssyrk_nt_l_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km
-	movq	ARG9, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_8x4_vs_lib8, .-kernel_ssyrk_nt_l_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                      edi    rsi       rdx       ecx       r8        r9        rsp+8     
-// void kernel_strsm_nt_rl_inv_8x4_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_8x4_lib8
-	.type kernel_strsm_nt_rl_inv_8x4_lib8, @function
-kernel_strsm_nt_rl_inv_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_8x4_lib8
-_kernel_strsm_nt_rl_inv_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_8x4_lib8
-	.def kernel_strsm_nt_rl_inv_8x4_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_8x4_lib8, .-kernel_strsm_nt_rl_inv_8x4_lib8
-#endif
-
-
-
-
-
-//                                         edi    rsi       rdx       ecx       r8        r9        rsp+8               rsp+16  rsp+24  
-// void kernel_strsm_nt_rl_inv_8x4_vs_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_8x4_vs_lib8
-	.type kernel_strsm_nt_rl_inv_8x4_vs_lib8, @function
-kernel_strsm_nt_rl_inv_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_8x4_vs_lib8
-_kernel_strsm_nt_rl_inv_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_8x4_vs_lib8
-	.def kernel_strsm_nt_rl_inv_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn // TODO scale gen
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_8x4_vs_lib8, .-kernel_strsm_nt_rl_inv_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                            1       2          3          4       5          6          7         8         9         10
-// void kernel_sgemm_strsm_nt_rl_inv_8x4_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x4_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_8x4_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_8x4_lib8
-_kernel_sgemm_strsm_nt_rl_inv_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x4_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_8x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10   // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_8x4_lib8, .-kernel_sgemm_strsm_nt_rl_inv_8x4_lib8
-#endif
-
-
-
-
-
-//                                               1       2          3          4       5          6          7         8         9         10                 11      12
-// void kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8
-_kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10  // C 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D 
-	movq	ARG11, %r11 // km 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8, .-kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                  edi    rsi       rdx       rcx       r8        r9
-// void kernel_spotrf_nt_l_8x4_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_8x4_lib8
-	.type kernel_spotrf_nt_l_8x4_lib8, @function
-kernel_spotrf_nt_l_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_8x4_lib8
-_kernel_spotrf_nt_l_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_8x4_lib8
-	.def kernel_spotrf_nt_l_8x4_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_8x4_lib8, .-kernel_spotrf_nt_l_8x4_lib8
-#endif
-
-
-
-
-
-//                                     edi    rsi       rdx       rcx       r8        r9                  rsp+8   rsp+16
-// void kernel_spotrf_nt_l_8x4_vs_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_8x4_vs_lib8
-	.type kernel_spotrf_nt_l_8x4_vs_lib8, @function
-kernel_spotrf_nt_l_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_8x4_vs_lib8
-_kernel_spotrf_nt_l_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_8x4_vs_lib8
-	.def kernel_spotrf_nt_l_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movq	ARG8, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG7, %r11 // m1 
-	movq	ARG8, %r12 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_8x4_vs_lib8, .-kernel_spotrf_nt_l_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                        1       2          3          4       5          6          7         8         9
-// void kernel_ssyrk_spotrf_nt_l_8x4_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_8x4_lib8
-	.type kernel_ssyrk_spotrf_nt_l_8x4_lib8, @function
-kernel_ssyrk_spotrf_nt_l_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_8x4_lib8
-_kernel_ssyrk_spotrf_nt_l_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_8x4_lib8
-	.def kernel_ssyrk_spotrf_nt_l_8x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_8x4_lib8, .-kernel_ssyrk_spotrf_nt_l_8x4_lib8
-#endif
-
-
-
-
-
-//                                           1       2          3          4       5          6          7         8         9                  10      11
-// void kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8
-	.type kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8, @function
-kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8
-_kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8
-	.def kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8, .-kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                  1      2             3         4            5         6        7
-// void kernel_strmm_nn_rl_8x4_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_8x4_lib8
-	.type kernel_strmm_nn_rl_8x4_lib8, @function
-kernel_strmm_nn_rl_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_8x4_lib8
-_kernel_strmm_nn_rl_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_8x4_lib8
-	.def kernel_strmm_nn_rl_8x4_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_8x4_lib8, .-kernel_strmm_nn_rl_8x4_lib8
-#endif
-
-
-
-
-
-//                                     1      2             3         4            5         6        7         8       9
-// void kernel_strmm_nn_rl_8x4_vs_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_8x4_vs_lib8
-	.type kernel_strmm_nn_rl_8x4_vs_lib8, @function
-kernel_strmm_nn_rl_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_8x4_vs_lib8
-_kernel_strmm_nn_rl_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_8x4_vs_lib8
-	.def kernel_strmm_nn_rl_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km
-	movq	ARG9, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_8x4_vs_lib8, .-kernel_strmm_nn_rl_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                      1      2             3         4            5         6        7            8         9        10      11      12      13
-// void kernel_strmm_nn_rl_8x4_gen_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_8x4_gen_lib8
-	.type kernel_strmm_nn_rl_8x4_gen_lib8, @function
-kernel_strmm_nn_rl_8x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_8x4_gen_lib8
-_kernel_strmm_nn_rl_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_8x4_gen_lib8
-	.def kernel_strmm_nn_rl_8x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_8x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // offsetD
-	movq	ARG8, %r11 // D
-	movq	ARG9, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG10, %r13 // m0
-	movq	ARG11, %r14 // m1
-	movq	ARG12, %r15 // n0
-	movq	ARG13, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_8x4_gen_lib8, .-kernel_strmm_nn_rl_8x4_gen_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_sgemm_8x8_lib8.S b/third_party/blasfeo/kernel/avx/kernel_sgemm_8x8_lib8.S
deleted file mode 100644
index 354fa83..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_sgemm_8x8_lib8.S
+++ /dev/null
@@ -1,5514 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d11 d22 d33 d40 d51 d62 d73]
-// ymm1  <- [d01 d10 d23 d32 d41 d50 d63 d72]
-// ymm2  <- [d03 d12 d21 d30 d43 d52 d61 d70]
-// ymm3  <- [d02 d13 d20 d31 d42 d53 d60 d71]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33 d40 d51 d62 d73]
-// ymm1  <- [d01 d10 d23 d32 d41 d50 d63 d72]
-// ymm2  <- [d03 d12 d21 d30 d43 d52 d61 d70]
-// ymm3  <- [d02 d13 d20 d31 d42 d53 d60 d71]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nt_8x8_lib8, @function
-inner_kernel_gemm_add_nt_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nt_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nt_8x8_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nt_8x8_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vbroadcastf128	16(%r12), %ymm15 // B
-	vmovaps			32(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14 // 01 00 11 10
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	32(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vbroadcastf128	48(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm7, %ymm7
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vbroadcastf128	64(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	80(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm7, %ymm7
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	96(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vbroadcastf128	112(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm7, %ymm7
-	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	16(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm7, %ymm7
-	vmovaps			32(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	32(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vbroadcastf128	48(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm7, %ymm7
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vbroadcastf128	64(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	80(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm7, %ymm7
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	96(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vbroadcastf128	112(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm7, %ymm7
-//	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm14, %ymm11
-//	vbroadcastf128	0(%r12), %ymm14 // B
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vaddps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm13, %ymm15, %ymm11
-//	vbroadcastf128	16(%r12), %ymm15 // B
-	vaddps			%ymm11, %ymm7, %ymm7
-//	vmovaps			32(%r11), %ymm13 // A
-
-
-//	cmpl	$4, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	vbroadcastf128	16(%r12), %ymm14 // B
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm4, %ymm4
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm5, %ymm5
-
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm6, %ymm6
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$32, %r12
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm7, %ymm7
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nt_8x8_lib8, .-inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d11 d22 d33 d40 d51 d62 d73]
-// ymm1  <- [d01 d10 d23 d32 d41 d50 d63 d72]
-// ymm2  <- [d03 d12 d21 d30 d43 d52 d61 d70]
-// ymm3  <- [d02 d13 d20 d31 d42 d53 d60 d71]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33 d40 d51 d62 d73]
-// ymm1  <- [d01 d10 d23 d32 d41 d50 d63 d72]
-// ymm2  <- [d03 d12 d21 d30 d43 d52 d61 d70]
-// ymm3  <- [d02 d13 d20 d31 d42 d53 d60 d71]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_sub_nt_8x8_lib8, @function
-inner_kernel_gemm_sub_nt_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_sub_nt_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_sub_nt_8x8_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_sub_nt_8x8_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vbroadcastf128	16(%r12), %ymm15 // B
-	vmovaps			32(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14 // 01 00 11 10
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	32(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vbroadcastf128	48(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm7, %ymm7
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vbroadcastf128	64(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	80(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm7, %ymm7
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	96(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vbroadcastf128	112(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm7, %ymm7
-	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	16(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm7, %ymm7
-	vmovaps			32(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	32(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vbroadcastf128	48(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm7, %ymm7
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vbroadcastf128	64(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vbroadcastf128	80(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm7, %ymm7
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm12, %ymm14, %ymm11
-	vbroadcastf128	96(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm12, %ymm15, %ymm11
-	vbroadcastf128	112(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm7, %ymm7
-//	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vmulps			%ymm13, %ymm14, %ymm11
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vmulps			%ymm13, %ymm14, %ymm11
-//	vbroadcastf128	0(%r12), %ymm14 // B
-	vsubps			%ymm11, %ymm3, %ymm3
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm4, %ymm4
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm5, %ymm5
-
-	vmulps			%ymm13, %ymm15, %ymm11
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-	vsubps			%ymm11, %ymm6, %ymm6
-
-	vmulps			%ymm13, %ymm15, %ymm11
-//	vbroadcastf128	16(%r12), %ymm15 // B
-	vsubps			%ymm11, %ymm7, %ymm7
-//	vmovaps			32(%r11), %ymm13 // A
-
-
-//	cmpl	$4, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm0, %ymm0
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm1, %ymm1
-
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm2, %ymm2
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm3, %ymm3
-
-	vbroadcastf128	16(%r12), %ymm14 // B
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm4, %ymm4
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm5, %ymm5
-
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm6, %ymm6
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$32, %r12
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vsubps			%ymm11, %ymm7, %ymm7
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_sub_nt_8x8_lib8, .-inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NN_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nn_8x8_lib8, @function
-inner_kernel_gemm_add_nn_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nn_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nn_8x8_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nn_8x8_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	movq	%r12, %r14 // B_next <- B
-	addq	%r13, %r14 // B_next <- B + 4*sda*sizeof(double)
-
-	cmpl	$8, %r10d
-	jl		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	0(%r14) // software prefetch
-	prefetcht0	64(%r14) // software prefetch
-	prefetcht0	128(%r14) // software prefetch
-	prefetcht0	192(%r14) // software prefetch
-
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vbroadcastss	128(%r12), %ymm13 // B[4]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	160(%r12), %ymm13 // B[5]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	192(%r12), %ymm13 // B[6]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	224(%r12), %ymm13 // B[7]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 1
-	vmovaps			32(%r11), %ymm12 // A[0]
-	vbroadcastss	4(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	68(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	100(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vbroadcastss	132(%r12), %ymm13 // B[4]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	164(%r12), %ymm13 // B[5]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	196(%r12), %ymm13 // B[6]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	228(%r12), %ymm13 // B[7]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 2
-	vmovaps			64(%r11), %ymm12 // A[0]
-	vbroadcastss	8(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	40(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	72(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	104(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vbroadcastss	136(%r12), %ymm13 // B[4]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	168(%r12), %ymm13 // B[5]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	200(%r12), %ymm13 // B[6]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	232(%r12), %ymm13 // B[7]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 3
-	vmovaps			96(%r11), %ymm12 // A[0]
-	vbroadcastss	12(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	44(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	76(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	108(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vbroadcastss	140(%r12), %ymm13 // B[4]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	172(%r12), %ymm13 // B[5]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	204(%r12), %ymm13 // B[6]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	236(%r12), %ymm13 // B[7]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 4
-	vmovaps			128(%r11), %ymm12 // A[0]
-	vbroadcastss	16(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	48(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	80(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	112(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vbroadcastss	144(%r12), %ymm13 // B[4]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	176(%r12), %ymm13 // B[5]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	208(%r12), %ymm13 // B[6]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	240(%r12), %ymm13 // B[7]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 5
-	vmovaps			160(%r11), %ymm12 // A[0]
-	vbroadcastss	20(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	52(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	84(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	116(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vbroadcastss	148(%r12), %ymm13 // B[4]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	180(%r12), %ymm13 // B[5]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	212(%r12), %ymm13 // B[6]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	244(%r12), %ymm13 // B[7]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 6
-	vmovaps			192(%r11), %ymm12 // A[0]
-	vbroadcastss	24(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	56(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	88(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	120(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vbroadcastss	152(%r12), %ymm13 // B[4]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	184(%r12), %ymm13 // B[5]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	216(%r12), %ymm13 // B[6]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	248(%r12), %ymm13 // B[7]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-
-	// unroll 7
-	vmovaps			224(%r11), %ymm12 // A[0]
-	vbroadcastss	28(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	60(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	92(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	124(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vbroadcastss	156(%r12), %ymm13 // B[4]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	188(%r12), %ymm13 // B[5]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	220(%r12), %ymm13 // B[6]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	252(%r12), %ymm13 // B[7]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	subl	$8, %r10d
-	addq	$256, %r11
-
-	mov		%r14, %r12
-	addq	%r13, %r14
-
-	cmpl	$7, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean1-up loop
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean1-up loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vbroadcastss	128(%r12), %ymm13 // B[4]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	160(%r12), %ymm13 // B[5]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	192(%r12), %ymm13 // B[6]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	224(%r12), %ymm13 // B[7]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$4, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nn_8x8_lib8, .-inner_kernel_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_GEMM_ADD_NN_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_gemm_add_nn_8x8_lib8, @function
-inner_edge_gemm_add_nn_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_edge_gemm_add_nn_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_gemm_add_nn_8x8_lib8; .scl 2; .type 32; .endef
-inner_edge_gemm_add_nn_8x8_lib8:
-#endif
-#endif
-	
-	cmpl			$0, %r14d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$8, %ebx
-	subl			%r14d, %ebx // 8-offsetB
-	cmpl			%r10d, %ebx
-//	jle				0f
-//	movl			%r10d, %ebx // kend=min(k,8-offsetB)
-//0:
-	cmovgl			%r10d, %ebx // kend=min(k,8-offsetB)
-
-	movl			%r14d, %eax
-	sall			$2, %eax // offsetB*sizeof(float)
-	addq			%rax, %r12 // B+offsetB*sizeof(float)
-
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-	vbroadcastss	128(%r12), %ymm13 // B[4]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm4, %ymm4
-	vbroadcastss	160(%r12), %ymm13 // B[5]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm5, %ymm5
-	vbroadcastss	192(%r12), %ymm13 // B[6]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm6, %ymm6
-	vbroadcastss	224(%r12), %ymm13 // B[7]
-	vmulps			%ymm12, %ymm13, %ymm15
-	vaddps			%ymm15, %ymm7, %ymm7
-
-	subl			$1, %r10d // k-1
-	subl			$1, %ebx // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$4, %r12 // B+1*sizeof(float)
-
-	cmpl			$0, %ebx
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r13, %r12
-	subq			$32, %r12 // B+bs*(sdb-1)*sizeof(float)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_gemm_add_nn_8x8_lib8, .-inner_edge_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// strsm
-// right
-// lower
-// transposed
-// not-unit
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSM_RLT_INV_8X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsm_rlt_inv_8x8_vs_lib8, @function
-inner_edge_trsm_rlt_inv_8x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsm_rlt_inv_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsm_rlt_inv_8x8_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsm_rlt_inv_8x8_vs_lib8:
-#endif
-#endif
-
-	vbroadcastss	0(%r11), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vbroadcastss	4(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm1, %ymm1
-	vbroadcastss	8(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vbroadcastss	12(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-	vbroadcastss	16(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm4, %ymm4
-	vbroadcastss	20(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm5, %ymm5
-	vbroadcastss	24(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm6, %ymm6
-	vbroadcastss	28(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm7, %ymm7
-
-	vbroadcastss	4(%r11), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vbroadcastss	40(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vbroadcastss	44(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-	vbroadcastss	48(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm4, %ymm4
-	vbroadcastss	52(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm5, %ymm5
-	vbroadcastss	56(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm6, %ymm6
-	vbroadcastss	60(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm7, %ymm7
-
-	vbroadcastss	8(%r11), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vbroadcastss	76(%r10), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-	vbroadcastss	80(%r10), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm4, %ymm4
-	vbroadcastss	84(%r10), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm5, %ymm5
-	vbroadcastss	88(%r10), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm6, %ymm6
-	vbroadcastss	92(%r10), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm7, %ymm7
-
-	vbroadcastss	12(%r11), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vbroadcastss	112(%r10), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm4, %ymm4
-	vbroadcastss	116(%r10), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm5, %ymm5
-	vbroadcastss	120(%r10), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm6, %ymm6
-	vbroadcastss	124(%r10), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm7, %ymm7
-
-	vbroadcastss	16(%r11), %ymm13
-	vmulps			%ymm4, %ymm13, %ymm4
-	cmpl			$6, %r12d
-	jl				0f // ret
-	vbroadcastss	148(%r10), %ymm13
-	vmulps			%ymm4, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm5, %ymm5
-	vbroadcastss	152(%r10), %ymm13
-	vmulps			%ymm4, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm6, %ymm6
-	vbroadcastss	156(%r10), %ymm13
-	vmulps			%ymm4, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm7, %ymm7
-
-	vbroadcastss	20(%r11), %ymm13
-	vmulps			%ymm5, %ymm13, %ymm5
-	cmpl			$7, %r12d
-	jl				0f // ret
-	vbroadcastss	184(%r10), %ymm13
-	vmulps			%ymm5, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm6, %ymm6
-	vbroadcastss	188(%r10), %ymm13
-	vmulps			%ymm5, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm7, %ymm7
-
-	vbroadcastss	24(%r11), %ymm13
-	vmulps			%ymm6, %ymm13, %ymm6
-	cmpl			$8, %r12d
-	jl				0f // ret
-	vbroadcastss	220(%r10), %ymm13
-	vmulps			%ymm6, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm7, %ymm7
-
-	vbroadcastss	28(%r11), %ymm13
-	vmulps			%ymm7, %ymm13, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsm_rlt_inv_8x8_vs_lib8, .-inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_8X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_8x8_vs_lib8, @function
-inner_edge_potrf_8x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_8x8_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_8x8_vs_lib8:
-#endif
-#endif
-
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vmovss		%xmm0, %xmm0, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-2:
-	vmovss		%xmm13, 0(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm0
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm11
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm1, %ymm1
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-	vperm2f128	$0x11, %ymm0, %ymm0, %ymm11
-	vpermilps	$0x00, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm4, %ymm4
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm5, %ymm5
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm6, %ymm6
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vpermilps	$0x55, %xmm1, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-4:
-	vmovss		%xmm13, 4(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm1
-	vperm2f128	$0x00, %ymm1, %ymm1, %ymm11
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-	vperm2f128	$0x11, %ymm1, %ymm1, %ymm11
-	vpermilps	$0x00, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm4, %ymm4
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm5, %ymm5
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm6, %ymm6
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vpermilps	$0xaa, %xmm2, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-6:
-	vmovss		%xmm13, 8(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm2
-	vperm2f128	$0x00, %ymm2, %ymm2, %ymm11
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-	vperm2f128	$0x11, %ymm2, %ymm2, %ymm11
-	vpermilps	$0x00, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm4, %ymm4
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm5, %ymm5
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm6, %ymm6
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vpermilps	$0xff, %xmm3, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-8:
-	vmovsd		%xmm13, 12(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm3, %ymm13, %ymm3
-	vperm2f128	$0x11, %ymm3, %ymm3, %ymm11
-	vpermilps	$0x00, %ymm11, %ymm13
-	vmulps		%ymm3, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm4, %ymm4
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm3, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm5, %ymm5
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm3, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm6, %ymm6
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm3, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vextractf128	$0x1, %ymm4, %xmm13
-//	vpermilps	$0x00, %xmm13, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			9f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-10:
-	vmovsd		%xmm13, 16(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm4, %ymm13, %ymm4
-	cmpl		$6, %r11d
-	jl			0f // ret
-	vperm2f128	$0x11, %ymm4, %ymm4, %ymm11
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm4, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm5, %ymm5
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm4, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm6, %ymm6
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm4, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vextractf128	$0x1, %ymm5, %xmm13
-	vpermilps	$0x55, %xmm13, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			11f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-12:
-	vmovsd		%xmm13, 20(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm5, %ymm13, %ymm5
-	cmpl		$7, %r11d
-	jl			0f // ret
-	vperm2f128	$0x11, %ymm5, %ymm5, %ymm11
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm5, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm6, %ymm6
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm5, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vextractf128	$0x1, %ymm6, %xmm13
-	vpermilps	$0xaa, %xmm13, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			13f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-14:
-	vmovsd		%xmm13, 24(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm6, %ymm13, %ymm6
-	cmpl		$8, %r11d
-	jl			0f // ret
-	vperm2f128	$0x11, %ymm6, %ymm6, %ymm11
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm6, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm7, %ymm7
-
-
-	vextractf128	$0x1, %ymm7, %xmm13
-	vpermilps	$0xff, %xmm13, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			15f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-16:
-	vmovsd		%xmm13, 28(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm7, %ymm13, %ymm7
-
-
-	jmp		0f
-
-
-1:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-9:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		10b
-
-11:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		12b
-
-13:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		14b
-
-15:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		16b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_8x8_vs_lib8, .-inner_edge_potrf_8x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x8_lib8, @function
-inner_scale_ab_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x8_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_8x8_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-	vmovaps		128(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	vmovaps		160(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	vmovaps		192(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	vmovaps		224(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x8_lib8, .-inner_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x8_gen_lib8, @function
-inner_scale_ab_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_8x8_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-	vmovaps		128(%r13), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	vmovaps		160(%r13), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	vmovaps		192(%r13), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	vmovaps		224(%r13), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x8_gen_lib8, .-inner_scale_ab_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x8_lib8, @function
-inner_blend_scale_ab_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x8_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x8_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-	vmovaps		128(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	vmovaps		160(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	vmovaps		192(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	vmovaps		224(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x8_lib8, .-inner_blend_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x8_gen_lib8, @function
-inner_blend_scale_ab_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x8_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-	vmovaps		128(%r13), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	vmovaps		160(%r13), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	vmovaps		192(%r13), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	vmovaps		224(%r13), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x8_gen_lib8, .-inner_blend_scale_ab_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_8x8_lib8, @function
-inner_blend_scale_11_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_8x8_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_11_8x8_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	vmovaps		0(%r10), %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r10), %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r10), %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r10), %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-	vmovaps		128(%r10), %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	vmovaps		160(%r10), %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	vmovaps		192(%r10), %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	vmovaps		224(%r10), %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_8x8_lib8, .-inner_blend_scale_11_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_8x8_gen_lib8, @function
-inner_blend_scale_11_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_11_8x8_gen_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r11), %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r11), %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r11), %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r11), %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-	vmovaps		128(%r11), %ymm12
-	vaddps		%ymm4, %ymm12, %ymm4
-	vmovaps		160(%r11), %ymm12
-	vaddps		%ymm5, %ymm12, %ymm5
-	vmovaps		192(%r11), %ymm12
-	vaddps		%ymm6, %ymm12, %ymm6
-	vmovaps		224(%r11), %ymm12
-	vaddps		%ymm7, %ymm12, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_8x8_gen_lib8, .-inner_blend_scale_11_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8_lib8, @function
-inner_store_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8_lib8; .scl 2; .type 32; .endef
-inner_store_8x8_lib8:
-#endif
-#endif
-	
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps 	%ymm3, 96(%r10)
-	vmovaps 	%ymm4, 128(%r10)
-	vmovaps 	%ymm5, 160(%r10)
-	vmovaps 	%ymm6, 192(%r10)
-	vmovaps 	%ymm7, 224(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_lib8, .-inner_store_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8_vs_lib8, @function
-inner_store_8x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8_vs_lib8; .scl 2; .type 32; .endef
-inner_store_8x8_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	vmaskmovps	%ymm0, %ymm15,  0(%r10)
-	vmaskmovps	%ymm1, %ymm15,  32(%r10)
-	vmaskmovps	%ymm2, %ymm15,  64(%r10)
-	vmaskmovps	%ymm3, %ymm15,  96(%r10)
-	vmaskmovps	%ymm4, %ymm15,  128(%r10)
-	cmpl		$6, %r12d
-	jl			0f // end
-	vmaskmovps	%ymm5, %ymm15, 160(%r10)
-	cmpl		$7, %r12d
-	jl			0f // end
-	vmaskmovps	%ymm6, %ymm15, 192(%r10)
-	je			0f // end
-	vmaskmovps	%ymm7, %ymm15, 224(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_vs_lib8, .-inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8_gen_lib8, @function
-inner_store_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_store_8x8_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm12, %ymm15
-	vandps		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$8, %eax
-	jle		0f
-	movl	$8, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm15,  0(%r11)
-	vmaskmovps	%ymm1, %ymm15,  32(%r11)
-	vmaskmovps	%ymm2, %ymm15,  64(%r11)
-	vmaskmovps	%ymm3, %ymm15,  96(%r11)
-	vmaskmovps	%ymm4, %ymm15,  128(%r11)
-	cmpl		$6, %r15d
-	jl			7f // end
-	vmaskmovps	%ymm5, %ymm15, 160(%r11)
-	cmpl		$7, %r15d
-	jl			7f // end
-	vmaskmovps	%ymm6, %ymm15, 192(%r11)
-	je			7f // end
-	vmaskmovps	%ymm7, %ymm15, 224(%r11)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_gen_lib8, .-inner_store_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x8_lib8, @function
-inner_store_l_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x8_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x8_lib8:
-#endif
-#endif
-	
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps		32(%r10), %ymm14
-	vblendps	$0x01, %ymm14, %ymm1, %ymm1
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps		64(%r10), %ymm14
-	vblendps	$0x03, %ymm14, %ymm2, %ymm2
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		96(%r10), %ymm14
-	vblendps	$0x07, %ymm14, %ymm3, %ymm3
-	vmovaps 	%ymm3, 96(%r10)
-	vmovaps		128(%r10), %ymm14
-	vblendps	$0x0f, %ymm14, %ymm4, %ymm4
-	vmovaps 	%ymm4, 128(%r10)
-	vmovaps		160(%r10), %ymm14
-	vblendps	$0x1f, %ymm14, %ymm5, %ymm5
-	vmovaps 	%ymm5, 160(%r10)
-	vmovaps		192(%r10), %ymm14
-	vblendps	$0x3f, %ymm14, %ymm6, %ymm6
-	vmovaps 	%ymm6, 192(%r10)
-	vmovaps		224(%r10), %ymm14
-	vblendps	$0x7f, %ymm14, %ymm7, %ymm7
-	vmovaps 	%ymm7, 224(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x8_lib8, .-inner_store_l_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x8_vs_lib8, @function
-inner_store_l_8x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x8_vs_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x8_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm15,  0(%r10)
-	vmovaps 	32(%r10), %ymm12
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm15,  32(%r10)
-	vmovaps 	64(%r10), %ymm12
-	vblendps	$0x03, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm15,  64(%r10)
-	vmovaps 	96(%r10), %ymm12
-	vblendps	$0x07, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm15,  96(%r10)
-	vmovaps 	128(%r10), %ymm12
-	vblendps	$0x0f, %ymm12, %ymm4, %ymm4
-	vmaskmovps	%ymm4, %ymm15,  128(%r10)
-	cmpl		$6, %r12d
-	jl			0f // end
-	vmovaps 	160(%r10), %ymm12
-	vblendps	$0x1f, %ymm12, %ymm5, %ymm5
-	vmaskmovps	%ymm5, %ymm15, 160(%r10)
-	cmpl		$7, %r12d
-	jl			0f // end
-	vmovaps 	192(%r10), %ymm12
-	vblendps	$0x3f, %ymm12, %ymm6, %ymm6
-	vmaskmovps	%ymm6, %ymm15, 192(%r10)
-	je			0f // end
-	vmovaps 	224(%r10), %ymm12
-	vblendps	$0x7f, %ymm12, %ymm7, %ymm7
-	vmaskmovps	%ymm7, %ymm15, 224(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_vs_lib8, .-inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x8_gen_lib8, @function
-inner_store_l_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x8_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm12, %ymm15
-	vandps		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$8, %eax
-	jle		0f
-	movl	$8, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm15,  0(%r11)
-	vmovaps 	32(%r11), %ymm12
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm15,  32(%r11)
-	vmovaps 	64(%r11), %ymm12
-	vblendps	$0x03, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm15,  64(%r11)
-	vmovaps 	96(%r11), %ymm12
-	vblendps	$0x07, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm15,  96(%r11)
-	vmovaps 	128(%r11), %ymm12
-	vblendps	$0x0f, %ymm12, %ymm4, %ymm4
-	vmaskmovps	%ymm4, %ymm15,  128(%r11)
-	cmpl		$6, %r15d
-	jl			7f // end
-	vmovaps 	160(%r11), %ymm12
-	vblendps	$0x1f, %ymm12, %ymm5, %ymm5
-	vmaskmovps	%ymm5, %ymm15, 160(%r11)
-	cmpl		$7, %r15d
-	jl			7f // end
-	vmovaps 	192(%r11), %ymm12
-	vblendps	$0x3f, %ymm12, %ymm6, %ymm6
-	vmaskmovps	%ymm6, %ymm15, 192(%r11)
-	je			7f // end
-	vmovaps 	224(%r11), %ymm12
-	vblendps	$0x7f, %ymm12, %ymm7, %ymm7
-	vmaskmovps	%ymm7, %ymm15, 224(%r11)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_gen_lib8, .-inner_store_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                               rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_sgemm_nt_8x8_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x8_lib8
-	.type kernel_sgemm_nt_8x8_lib8, @function
-kernel_sgemm_nt_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x8_lib8
-_kernel_sgemm_nt_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x8_lib8
-	.def kernel_sgemm_nt_8x8_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x8_lib8, .-kernel_sgemm_nt_8x8_lib8
-#endif
-
-
-
-
-
-//                                  1      2             3         4         5            6         7         8       9
-// void kernel_sgemm_nt_8x8_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x8_vs_lib8
-	.type kernel_sgemm_nt_8x8_vs_lib8, @function
-kernel_sgemm_nt_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x8_vs_lib8
-_kernel_sgemm_nt_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x8_vs_lib8
-	.def kernel_sgemm_nt_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // D
-	movq	ARG9, %r12 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x8_vs_lib8, .-kernel_sgemm_nt_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                   rdi    rsi           rdx       rcx       r8           r9           rsp+8     rsp+16   rsp+24       rsp+32    rsp+40   rsp+48  rsp+56  rsp+64  rsp+72
-// void kernel_sgemm_nt_8x8_gen_lib8(int k, float *alpha, float *A, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x8_gen_lib8
-	.type kernel_sgemm_nt_8x8_gen_lib8, @function
-kernel_sgemm_nt_8x8_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x8_gen_lib8
-_kernel_sgemm_nt_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x8_gen_lib8
-	.def kernel_sgemm_nt_8x8_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x8_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x8_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 8*sdb*sizeof(float)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x8_gen_lib8, .-kernel_sgemm_nt_8x8_gen_lib8
-#endif
-
-
-
-
-
-//                               rdi    rsi           rdx        rcx         r8         r9      rsp+8        rsp+16    rsp+24
-// void kernel_sgemm_nn_8x8_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x8_lib8
-	.type kernel_sgemm_nn_8x8_lib8, @function
-kernel_sgemm_nn_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x8_lib8
-_kernel_sgemm_nn_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x8_lib8
-	.def kernel_sgemm_nn_8x8_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x8_lib8, .-kernel_sgemm_nn_8x8_lib8
-#endif
-
-
-
-
-
-//                               1      2             3         4            5         6        7            8         9         10      11
-// void kernel_sgemm_nn_8x8_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x8_vs_lib8
-	.type kernel_sgemm_nn_8x8_vs_lib8, @function
-kernel_sgemm_nn_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x8_vs_lib8
-_kernel_sgemm_nn_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x8_vs_lib8
-	.def kernel_sgemm_nn_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x8_vs_lib8, .-kernel_sgemm_nn_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                   rdi    rsi           rdx       rcx       r8        r9       rsp+8        rsp+16    rsp+24    rsp+32    rsp+40   rsp+48     rsp+56   rsp+64  rsp+72  rsp+80  rsp+88
-// void kernel_sgemm_nn_8x8_gen_lib4(int k, float *alpha, float *A, int offB, float *B, int sdb, float *beta, int offC, float *C, int sdc, int offD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x8_gen_lib8
-	.type kernel_sgemm_nn_8x8_gen_lib8, @function
-kernel_sgemm_nn_8x8_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x8_gen_lib8
-_kernel_sgemm_nn_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x8_gen_lib8
-	.def kernel_sgemm_nn_8x8_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x8_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // offsetC
-	movq	ARG9, %r13 // C
-	movq	ARG10, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG11, %r10 // offsetD
-	movq	ARG12, %r11 // D
-	movq	ARG13, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG14, %r13 // m0
-	movq	ARG15, %r14 // m1
-	movq	ARG16, %r15 // n0
-	movq	ARG17, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x8_gen_lib8, .-kernel_sgemm_nn_8x8_gen_lib8
-#endif
-
-
-
-
-
-//                                 rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_ssyrk_nt_l_8x8_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_8x8_lib8
-	.type kernel_ssyrk_nt_l_8x8_lib8, @function
-kernel_ssyrk_nt_l_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_8x8_lib8
-_kernel_ssyrk_nt_l_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_8x8_lib8
-	.def kernel_ssyrk_nt_l_8x8_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_8x8_lib8, .-kernel_ssyrk_nt_l_8x8_lib8
-#endif
-
-
-
-
-
-//                                    1      2             3         4         5            6         7         8       9
-// void kernel_ssyrk_nt_l_8x8_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_8x8_vs_lib8
-	.type kernel_ssyrk_nt_l_8x8_vs_lib8, @function
-kernel_ssyrk_nt_l_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_8x8_vs_lib8
-_kernel_ssyrk_nt_l_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_8x8_vs_lib8
-	.def kernel_ssyrk_nt_l_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km
-	movq	ARG9, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_8x8_vs_lib8, .-kernel_ssyrk_nt_l_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                      edi    rsi       rdx       ecx       r8        r9        rsp+8     
-// void kernel_strsm_nt_rl_inv_8x8_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_8x8_lib8
-	.type kernel_strsm_nt_rl_inv_8x8_lib8, @function
-kernel_strsm_nt_rl_inv_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_8x8_lib8
-_kernel_strsm_nt_rl_inv_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_8x8_lib8
-	.def kernel_strsm_nt_rl_inv_8x8_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movl	$8, %r12d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_8x8_lib8, .-kernel_strsm_nt_rl_inv_8x8_lib8
-#endif
-
-
-
-
-
-//                                         edi    rsi       rdx       ecx       r8        r9        rsp+8               rsp+16  rsp+24  
-// void kernel_strsm_nt_rl_inv_8x8_vs_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_8x8_vs_lib8
-	.type kernel_strsm_nt_rl_inv_8x8_vs_lib8, @function
-kernel_strsm_nt_rl_inv_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_8x8_vs_lib8
-_kernel_strsm_nt_rl_inv_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_8x8_vs_lib8
-	.def kernel_strsm_nt_rl_inv_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn // TODO scale gen
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // m1 
-	movq	ARG9, %r12 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_8x8_vs_lib8, .-kernel_strsm_nt_rl_inv_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                            1       2          3          4       5          6          7         8         9         10
-// void kernel_sgemm_strsm_nt_rl_inv_8x8_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x8_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_8x8_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_8x8_lib8
-_kernel_sgemm_strsm_nt_rl_inv_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x8_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_8x8_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	$8, %r12 // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10   // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_8x8_lib8, .-kernel_sgemm_strsm_nt_rl_inv_8x8_lib8
-#endif
-
-
-
-
-
-//                                               1       2          3          4       5          6          7         8         9         10                 11      12
-// void kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8
-_kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10  // C 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D 
-	movq	ARG11, %r11 // km 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8, .-kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                  edi    rsi       rdx       rcx       r8        r9
-// void kernel_spotrf_nt_l_8x8_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_8x8_lib8
-	.type kernel_spotrf_nt_l_8x8_lib8, @function
-kernel_spotrf_nt_l_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_8x8_lib8
-_kernel_spotrf_nt_l_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_8x8_lib8
-	.def kernel_spotrf_nt_l_8x8_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movl	$8, %r11d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_8x8_lib8, .-kernel_spotrf_nt_l_8x8_lib8
-#endif
-
-
-
-
-
-//                                     edi    rsi       rdx       rcx       r8        r9                  rsp+8   rsp+16
-// void kernel_spotrf_nt_l_8x8_vs_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_8x8_vs_lib8
-	.type kernel_spotrf_nt_l_8x8_vs_lib8, @function
-kernel_spotrf_nt_l_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_8x8_vs_lib8
-_kernel_spotrf_nt_l_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_8x8_vs_lib8
-	.def kernel_spotrf_nt_l_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movq	ARG8, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG7, %r11 // m1 
-	movq	ARG8, %r12 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_8x8_vs_lib8, .-kernel_spotrf_nt_l_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                        1       2          3          4       5          6          7         8         9
-// void kernel_ssyrk_spotrf_nt_l_8x8_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_8x8_lib8
-	.type kernel_ssyrk_spotrf_nt_l_8x8_lib8, @function
-kernel_ssyrk_spotrf_nt_l_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_8x8_lib8
-_kernel_ssyrk_spotrf_nt_l_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_8x8_lib8
-	.def kernel_ssyrk_spotrf_nt_l_8x8_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$8, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_8x8_lib8, .-kernel_ssyrk_spotrf_nt_l_8x8_lib8
-#endif
-
-
-
-
-
-//                                           1       2          3          4       5          6          7         8         9                  10      11
-// void kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8
-	.type kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8, @function
-kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8
-_kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8
-	.def kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8, .-kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC09: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC09: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_sgemm_diag_lib8.c b/third_party/blasfeo/kernel/avx/kernel_sgemm_diag_lib8.c
deleted file mode 100644
index 63183b2..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_sgemm_diag_lib8.c
+++ /dev/null
@@ -1,480 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-
-
-
-// B is the diagonal of a matrix, beta==0.0 case
-void kernel_sgemm_diag_right_4_a0_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 8;
-
-	int k;
-
-	__m256
-		alpha0,
-		mask_f,
-		sign,
-		a_00,
-		b_00, b_11, b_22, b_33,
-		d_00, d_01, d_02, d_03;
-	
-	__m256i
-		mask_i;
-	
-	alpha0 = _mm256_broadcast_ss( alpha );
-	
-	b_00 = _mm256_broadcast_ss( &B[0] );
-	b_00 = _mm256_mul_ps( b_00, alpha0 );
-	b_11 = _mm256_broadcast_ss( &B[1] );
-	b_11 = _mm256_mul_ps( b_11, alpha0 );
-	b_22 = _mm256_broadcast_ss( &B[2] );
-	b_22 = _mm256_mul_ps( b_22, alpha0 );
-	b_33 = _mm256_broadcast_ss( &B[3] );
-	b_33 = _mm256_mul_ps( b_33, alpha0 );
-	
-	for(k=0; k<kmax-7; k+=8)
-		{
-
-		a_00 = _mm256_load_ps( &A[0] );
-		d_00 = _mm256_mul_ps( a_00, b_00 );
-		a_00 = _mm256_load_ps( &A[8] );
-		d_01 = _mm256_mul_ps( a_00, b_11 );
-		a_00 = _mm256_load_ps( &A[16] );
-		d_02 = _mm256_mul_ps( a_00, b_22 );
-		a_00 = _mm256_load_ps( &A[24] );
-		d_03 = _mm256_mul_ps( a_00, b_33 );
-
-		_mm256_store_ps( &D[0], d_00 );
-		_mm256_store_ps( &D[8], d_01 );
-		_mm256_store_ps( &D[16], d_02 );
-		_mm256_store_ps( &D[24], d_03 );
-
-		A += 8*sda;
-		D += 8*sdd;
-
-		}
-	if(k<kmax)
-		{
-
-		const float mask_f[] = {0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5};
-		float m_f = kmax-k;
-
-		mask_i = _mm256_castps_si256( _mm256_sub_ps( _mm256_loadu_ps( mask_f ), _mm256_broadcast_ss( &m_f ) ) );
-
-		a_00 = _mm256_load_ps( &A[0] );
-		d_00 = _mm256_mul_ps( a_00, b_00 );
-		a_00 = _mm256_load_ps( &A[8] );
-		d_01 = _mm256_mul_ps( a_00, b_11 );
-		a_00 = _mm256_load_ps( &A[16] );
-		d_02 = _mm256_mul_ps( a_00, b_22 );
-		a_00 = _mm256_load_ps( &A[24] );
-		d_03 = _mm256_mul_ps( a_00, b_33 );
-
-		_mm256_maskstore_ps( &D[0], mask_i, d_00 );
-		_mm256_maskstore_ps( &D[8], mask_i, d_01 );
-		_mm256_maskstore_ps( &D[16], mask_i, d_02 );
-		_mm256_maskstore_ps( &D[24], mask_i, d_03 );
-
-		}
-	
-	}
-
-
-
-// B is the diagonal of a matrix
-void kernel_sgemm_diag_right_4_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 8;
-
-	int k;
-
-	__m256
-		alpha0, beta0,
-		mask_f,
-		sign,
-		a_00,
-		b_00, b_11, b_22, b_33,
-		c_00,
-		d_00, d_01, d_02, d_03;
-	
-	__m256i
-		mask_i;
-	
-	alpha0 = _mm256_broadcast_ss( alpha );
-	beta0  = _mm256_broadcast_ss( beta );
-	
-	b_00 = _mm256_broadcast_ss( &B[0] );
-	b_00 = _mm256_mul_ps( b_00, alpha0 );
-	b_11 = _mm256_broadcast_ss( &B[1] );
-	b_11 = _mm256_mul_ps( b_11, alpha0 );
-	b_22 = _mm256_broadcast_ss( &B[2] );
-	b_22 = _mm256_mul_ps( b_22, alpha0 );
-	b_33 = _mm256_broadcast_ss( &B[3] );
-	b_33 = _mm256_mul_ps( b_33, alpha0 );
-	
-	for(k=0; k<kmax-7; k+=8)
-		{
-
-		a_00 = _mm256_load_ps( &A[0] );
-		d_00 = _mm256_mul_ps( a_00, b_00 );
-		a_00 = _mm256_load_ps( &A[8] );
-		d_01 = _mm256_mul_ps( a_00, b_11 );
-		a_00 = _mm256_load_ps( &A[16] );
-		d_02 = _mm256_mul_ps( a_00, b_22 );
-		a_00 = _mm256_load_ps( &A[24] );
-		d_03 = _mm256_mul_ps( a_00, b_33 );
-
-		c_00 = _mm256_load_ps( &C[0] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_00 = _mm256_add_ps( c_00, d_00 );
-		c_00 = _mm256_load_ps( &C[8] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_01 = _mm256_add_ps( c_00, d_01 );
-		c_00 = _mm256_load_ps( &C[16] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_02 = _mm256_add_ps( c_00, d_02 );
-		c_00 = _mm256_load_ps( &C[24] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_03 = _mm256_add_ps( c_00, d_03 );
-
-		_mm256_store_ps( &D[0], d_00 );
-		_mm256_store_ps( &D[8], d_01 );
-		_mm256_store_ps( &D[16], d_02 );
-		_mm256_store_ps( &D[24], d_03 );
-
-		A += 8*sda;
-		C += 8*sdc;
-		D += 8*sdd;
-
-		}
-	if(k<kmax)
-		{
-
-		const float mask_f[] = {0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5};
-		float m_f = kmax-k;
-
-		mask_i = _mm256_castps_si256( _mm256_sub_ps( _mm256_loadu_ps( mask_f ), _mm256_broadcast_ss( &m_f ) ) );
-
-		a_00 = _mm256_load_ps( &A[0] );
-		d_00 = _mm256_mul_ps( a_00, b_00 );
-		a_00 = _mm256_load_ps( &A[8] );
-		d_01 = _mm256_mul_ps( a_00, b_11 );
-		a_00 = _mm256_load_ps( &A[16] );
-		d_02 = _mm256_mul_ps( a_00, b_22 );
-		a_00 = _mm256_load_ps( &A[24] );
-		d_03 = _mm256_mul_ps( a_00, b_33 );
-
-		c_00 = _mm256_load_ps( &C[0] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_00 = _mm256_add_ps( c_00, d_00 );
-		c_00 = _mm256_load_ps( &C[8] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_01 = _mm256_add_ps( c_00, d_01 );
-		c_00 = _mm256_load_ps( &C[16] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_02 = _mm256_add_ps( c_00, d_02 );
-		c_00 = _mm256_load_ps( &C[24] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_03 = _mm256_add_ps( c_00, d_03 );
-
-		_mm256_maskstore_ps( &D[0], mask_i, d_00 );
-		_mm256_maskstore_ps( &D[8], mask_i, d_01 );
-		_mm256_maskstore_ps( &D[16], mask_i, d_02 );
-		_mm256_maskstore_ps( &D[24], mask_i, d_03 );
-
-		}
-	
-	}
-
-
-
-// B is the diagonal of a matrix
-void kernel_sgemm_diag_right_3_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 8;
-
-	int k;
-
-	__m256
-		alpha0, beta0,
-		mask_f,
-		sign,
-		a_00,
-		b_00, b_11, b_22,
-		c_00,
-		d_00, d_01, d_02;
-	
-	__m256i
-		mask_i;
-	
-	alpha0 = _mm256_broadcast_ss( alpha );
-	beta0  = _mm256_broadcast_ss( beta );
-	
-	b_00 = _mm256_broadcast_ss( &B[0] );
-	b_00 = _mm256_mul_ps( b_00, alpha0 );
-	b_11 = _mm256_broadcast_ss( &B[1] );
-	b_11 = _mm256_mul_ps( b_11, alpha0 );
-	b_22 = _mm256_broadcast_ss( &B[2] );
-	b_22 = _mm256_mul_ps( b_22, alpha0 );
-	
-	for(k=0; k<kmax-7; k+=8)
-		{
-
-		a_00 = _mm256_load_ps( &A[0] );
-		d_00 = _mm256_mul_ps( a_00, b_00 );
-		a_00 = _mm256_load_ps( &A[8] );
-		d_01 = _mm256_mul_ps( a_00, b_11 );
-		a_00 = _mm256_load_ps( &A[16] );
-		d_02 = _mm256_mul_ps( a_00, b_22 );
-
-		c_00 = _mm256_load_ps( &C[0] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_00 = _mm256_add_ps( c_00, d_00 );
-		c_00 = _mm256_load_ps( &C[8] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_01 = _mm256_add_ps( c_00, d_01 );
-		c_00 = _mm256_load_ps( &C[16] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_02 = _mm256_add_ps( c_00, d_02 );
-
-		_mm256_store_ps( &D[0], d_00 );
-		_mm256_store_ps( &D[8], d_01 );
-		_mm256_store_ps( &D[16], d_02 );
-
-		A += 8*sda;
-		C += 8*sdc;
-		D += 8*sdd;
-
-		}
-	if(k<kmax)
-		{
-
-		const float mask_f[] = {0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5};
-		float m_f = kmax-k;
-
-		mask_i = _mm256_castps_si256( _mm256_sub_ps( _mm256_loadu_ps( mask_f ), _mm256_broadcast_ss( &m_f ) ) );
-
-		a_00 = _mm256_load_ps( &A[0] );
-		d_00 = _mm256_mul_ps( a_00, b_00 );
-		a_00 = _mm256_load_ps( &A[8] );
-		d_01 = _mm256_mul_ps( a_00, b_11 );
-		a_00 = _mm256_load_ps( &A[16] );
-		d_02 = _mm256_mul_ps( a_00, b_22 );
-
-		c_00 = _mm256_load_ps( &C[0] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_00 = _mm256_add_ps( c_00, d_00 );
-		c_00 = _mm256_load_ps( &C[8] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_01 = _mm256_add_ps( c_00, d_01 );
-		c_00 = _mm256_load_ps( &C[16] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_02 = _mm256_add_ps( c_00, d_02 );
-
-		_mm256_maskstore_ps( &D[0], mask_i, d_00 );
-		_mm256_maskstore_ps( &D[8], mask_i, d_01 );
-		_mm256_maskstore_ps( &D[16], mask_i, d_02 );
-
-		}
-	
-	}
-
-
-
-// B is the diagonal of a matrix
-void kernel_sgemm_diag_right_2_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m256
-		alpha0, beta0,
-		mask_f,
-		sign,
-		a_00,
-		b_00, b_11,
-		c_00,
-		d_00, d_01;
-	
-	__m256i
-		mask_i;
-	
-	alpha0 = _mm256_broadcast_ss( alpha );
-	beta0  = _mm256_broadcast_ss( beta );
-	
-	b_00 = _mm256_broadcast_ss( &B[0] );
-	b_00 = _mm256_mul_ps( b_00, alpha0 );
-	b_11 = _mm256_broadcast_ss( &B[1] );
-	b_11 = _mm256_mul_ps( b_11, alpha0 );
-	
-	for(k=0; k<kmax-7; k+=8)
-		{
-
-		a_00 = _mm256_load_ps( &A[0] );
-		d_00 = _mm256_mul_ps( a_00, b_00 );
-		a_00 = _mm256_load_ps( &A[8] );
-		d_01 = _mm256_mul_ps( a_00, b_11 );
-
-		c_00 = _mm256_load_ps( &C[0] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_00 = _mm256_add_ps( c_00, d_00 );
-		c_00 = _mm256_load_ps( &C[8] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_01 = _mm256_add_ps( c_00, d_01 );
-
-		_mm256_store_ps( &D[0], d_00 );
-		_mm256_store_ps( &D[8], d_01 );
-
-		A += 8*sda;
-		C += 8*sdc;
-		D += 8*sdd;
-
-		}
-	if(k<kmax)
-		{
-
-		const float mask_f[] = {0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5};
-		float m_f = kmax-k;
-
-		mask_i = _mm256_castps_si256( _mm256_sub_ps( _mm256_loadu_ps( mask_f ), _mm256_broadcast_ss( &m_f ) ) );
-
-		a_00 = _mm256_load_ps( &A[0] );
-		d_00 = _mm256_mul_ps( a_00, b_00 );
-		a_00 = _mm256_load_ps( &A[8] );
-		d_01 = _mm256_mul_ps( a_00, b_11 );
-
-		c_00 = _mm256_load_ps( &C[0] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_00 = _mm256_add_ps( c_00, d_00 );
-		c_00 = _mm256_load_ps( &C[8] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_01 = _mm256_add_ps( c_00, d_01 );
-
-		_mm256_maskstore_ps( &D[0], mask_i, d_00 );
-		_mm256_maskstore_ps( &D[8], mask_i, d_01 );
-
-		}
-	
-	}
-
-
-
-// B is the diagonal of a matrix
-void kernel_sgemm_diag_right_1_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	__m256
-		alpha0, beta0,
-		mask_f,
-		sign,
-		a_00,
-		b_00,
-		c_00,
-		d_00;
-	
-	__m256i
-		mask_i;
-	
-	alpha0 = _mm256_broadcast_ss( alpha );
-	beta0  = _mm256_broadcast_ss( beta );
-	
-	b_00 = _mm256_broadcast_ss( &B[0] );
-	b_00 = _mm256_mul_ps( b_00, alpha0 );
-	
-	for(k=0; k<kmax-7; k+=8)
-		{
-
-		a_00 = _mm256_load_ps( &A[0] );
-		d_00 = _mm256_mul_ps( a_00, b_00 );
-
-		c_00 = _mm256_load_ps( &C[0] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_00 = _mm256_add_ps( c_00, d_00 );
-
-		_mm256_store_ps( &D[0], d_00 );
-
-		A += 8*sda;
-		C += 8*sdc;
-		D += 8*sdd;
-
-		}
-	if(k<kmax)
-		{
-
-		const float mask_f[] = {0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5};
-		float m_f = kmax-k;
-
-		mask_i = _mm256_castps_si256( _mm256_sub_ps( _mm256_loadu_ps( mask_f ), _mm256_broadcast_ss( &m_f ) ) );
-
-		a_00 = _mm256_load_ps( &A[0] );
-		d_00 = _mm256_mul_ps( a_00, b_00 );
-
-		c_00 = _mm256_load_ps( &C[0] );
-		c_00 = _mm256_mul_ps( c_00, beta0 );
-		d_00 = _mm256_add_ps( c_00, d_00 );
-
-		_mm256_maskstore_ps( &D[0], mask_i, d_00 );
-
-		}
-	
-	}
-
-
-
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_sgemv_4_lib8.S b/third_party/blasfeo/kernel/avx/kernel_sgemv_4_lib8.S
deleted file mode 100644
index 1508ebe..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_sgemv_4_lib8.S
+++ /dev/null
@@ -1,2935 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x+k*sizeof(double)
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMV_ADD_T_4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemv_add_t_4_lib8, @function
-inner_kernel_gemv_add_t_4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemv_add_t_4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemv_add_t_4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemv_add_t_4_lib8:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$8, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovups		0(%r13), %ymm12
-
-	vmovaps		0(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	
-	subl	$8, %r10d
-
-	vmovaps		32(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	
-	vmovaps		64(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-
-	vmovaps		96(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-	
-	addq	%r12, %r11
-	addq	$32, %r13
-	
-	cmpl	$7, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vcvtsi2ss	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubps		%ymm14, %ymm13, %ymm14
-
-	vmaskmovps	0(%r13), %ymm14, %ymm12
-
-	vmaskmovps	0(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	
-	vmaskmovps	32(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	
-	vmaskmovps	64(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-
-	vmaskmovps	96(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-		
-	sall	$2, %r10d // *sizeof(float)
-	addq	%r10, %r11
-	addq	%r10, %r13
-	xorl	%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemv_add_t_4_lib8, .-inner_kernel_gemv_add_t_4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t
-// r14   <- z_n
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t+k*sizeof(double)
-// r14   <- z_n+k*sizeof(double)
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMV_ADD_NT_4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemv_add_nt_4_lib8, @function
-inner_kernel_gemv_add_nt_4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemv_add_nt_4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemv_add_nt_4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemv_add_nt_4_lib8:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$8, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovups	0(%r13), %ymm12
-	vmovups	0(%r14), %ymm13
-
-	vmovaps	0(%r11), %ymm14
-	vmulps	%ymm14, %ymm12, %ymm15
-	vaddps	%ymm0, %ymm15, %ymm0
-	vmulps	%ymm14, %ymm6, %ymm15
-	vaddps	%ymm13, %ymm15, %ymm13
-	
-	subl	$8, %r10d
-
-	vmovaps	32(%r11), %ymm14
-	vmulps	%ymm14, %ymm12, %ymm15
-	vaddps	%ymm1, %ymm15, %ymm1
-	vmulps	%ymm14, %ymm7, %ymm15
-	vaddps	%ymm13, %ymm15, %ymm13
-	
-	vmovaps	64(%r11), %ymm14
-	vmulps	%ymm14, %ymm12, %ymm15
-	vaddps	%ymm2, %ymm15, %ymm2
-	vmulps	%ymm14, %ymm8, %ymm15
-	vaddps	%ymm13, %ymm15, %ymm13
-
-	vmovaps	96(%r11), %ymm14
-	vmulps	%ymm14, %ymm12, %ymm15
-	vaddps	%ymm3, %ymm15, %ymm3
-	vmulps	%ymm14, %ymm9, %ymm15
-	vaddps	%ymm13, %ymm15, %ymm13
-	
-	vmovups	%ymm13, 0(%r14) 
-
-	addq	%r12, %r11
-	addq	$32, %r13
-	addq	$32, %r14
-	
-	cmpl	$7, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vcvtsi2ss	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm13
-#endif
-	vshufps		$0x0, %xmm14, %xmm14, %xmm14
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubps		%ymm14, %ymm13, %ymm11
-
-	vmaskmovps	0(%r13), %ymm11, %ymm12
-	vmaskmovps	0(%r14), %ymm11, %ymm13
-
-//	vmovups	%ymm14, -32(%rsp) // spill mask to stack
-
-//	vmovups	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovps	0(%r11), %ymm11, %ymm14
-	vmulps	%ymm14, %ymm12, %ymm15
-	vaddps	%ymm0, %ymm15, %ymm0
-	vmulps	%ymm14, %ymm6, %ymm15
-	vaddps	%ymm13, %ymm15, %ymm13
-	
-//	vmovups	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovps	32(%r11), %ymm11, %ymm14
-	vmulps	%ymm14, %ymm12, %ymm15
-	vaddps	%ymm1, %ymm15, %ymm1
-	vmulps	%ymm14, %ymm7, %ymm15
-	vaddps	%ymm13, %ymm15, %ymm13
-	
-//	vmovups	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovps	64(%r11), %ymm11, %ymm14
-	vmulps	%ymm14, %ymm12, %ymm15
-	vaddps	%ymm2, %ymm15, %ymm2
-	vmulps	%ymm14, %ymm8, %ymm15
-	vaddps	%ymm13, %ymm15, %ymm13
-
-//	vmovups	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovps	96(%r11), %ymm11, %ymm14
-	vmulps	%ymm14, %ymm12, %ymm15
-	vaddps	%ymm3, %ymm15, %ymm3
-	vmulps	%ymm14, %ymm9, %ymm15
-	vaddps	%ymm13, %ymm15, %ymm13
-		
-//	vmovups	-32(%rsp), %ymm14 // load mask form stack
-	vmaskmovps	%ymm13, %ymm11, 0(%r14)
-
-	sall	$2, %r10d // *sizeof(float)
-	addq	%r10, %r11
-	addq	%r10, %r13
-	addq	%r10, %r14
-	xorl	%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemv_add_nt_4_lib8, .-inner_kernel_gemv_add_nt_4_lib8
-#endif
-#endif
-
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x
-// r14d  <- offA
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 
-// r11   <- 
-// r12   <- 
-// r13   <- 
-// r14d  <- offA
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_EDGE_GEMV_ADD_T_4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_gemv_add_t_4_lib8, @function
-inner_edge_gemv_add_t_4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_gemv_add_t_4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_gemv_add_t_4_lib8; .scl 2; .type 32; .endef
-inner_edge_gemv_add_t_4_lib8:
-#endif
-#endif
-
-	cmpl	$0, %r14d
-	jle		0f // return
-
-	movl	%r14d, %r15d
-	sall	$2, %r15d // offA*sizeof(float)
-
-	subq	%r15, %r11 // A - offA
-	subq	%r15, %r13 // x - offA
-
-	movl	%r10d, %r15d // kmax
-	addl	%r14d, %r15d // kmax + offA
-
-	vcvtsi2ss	%r14d, %xmm14, %xmm14 // offA
-	vcvtsi2ss	%r15d, %xmm15, %xmm15 // offA + kmax
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm13, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-	vandps		%ymm15, %ymm14, %ymm14
-
-	vmaskmovps	0(%r13), %ymm14, %ymm12
-
-	vmovaps		0(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	
-	vmovaps		32(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	
-	vmovaps		64(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-
-	vmovaps		96(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-
-	addq	$32, %r13 // x + 4
-	addq	%r12, %r11 // A + bs*sda
-		
-	addl	%r14d, %r10d
-	subl	$8, %r10d // kmax - (8-offA)
-	
-0: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_gemv_add_t_4_lib8, .-inner_edge_gemv_add_t_4_lib8
-#endif
-#endif
-
-
-
-
-
-#if 0
-// TODO
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSV_LT_INV_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsv_lt_inv_8_lib8, @function
-inner_edge_trsv_lt_inv_8_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsv_lt_inv_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsv_lt_inv_8_lib8; .scl 2; .type 32; .endef
-inner_edge_trsv_lt_inv_8_lib8:
-#endif
-#endif
-	
-	vxorps			%ymm14, %ymm14, %ymm14
-
-	vmovaps			0(%r10), %ymm12
-	vblendps		$0x01, %ymm14, %ymm12, %ymm12
-	vmovaps			32(%r10), %ymm13
-	vblendps		$0x03, %ymm14, %ymm13, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm8
-	vunpckhps		%ymm13, %ymm12, %ymm9
-
-	vmovaps			64(%r10), %ymm12
-	vblendps		$0x07, %ymm14, %ymm12, %ymm12
-	vmovaps			96(%r10), %ymm13
-	vblendps		$0x0f, %ymm14, %ymm13, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm10
-	vunpckhps		%ymm13, %ymm12, %ymm11
-
-	vshufps			$0x44, %ymm10, %ymm8, %ymm7
-	vshufps			$0xee, %ymm10, %ymm8, %ymm4
-	vshufps			$0x44, %ymm11, %ymm9, %ymm5
-	vshufps			$0xee, %ymm11, %ymm9, %ymm6
-	vextractf128	$0x1, %ymm7, %xmm7
-	vextractf128	$0x1, %ymm4, %xmm8
-	vextractf128	$0x1, %ymm5, %xmm9
-	vextractf128	$0x1, %ymm6, %xmm10
-
-	vmovaps			144(%r10), %xmm12
-	vblendps		$0x01, %xmm14, %xmm12, %xmm12
-	vmovaps			176(%r10), %xmm13
-	vblendps		$0x03, %xmm14, %xmm13, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm1
-	vunpckhps		%xmm13, %xmm12, %xmm2
-
-	vmovaps			208(%r10), %xmm12
-	vblendps		$0x07, %xmm14, %xmm12, %xmm12
-	vmovaps			240(%r10), %xmm13
-	vblendps		$0x0f, %xmm14, %xmm13, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm3
-	vunpckhps		%xmm13, %xmm12, %xmm15
-
-	vshufps			$0xee, %xmm3, %xmm1, %xmm11
-	vshufps			$0x44, %xmm15, %xmm2, %xmm12
-	vshufps			$0xee, %xmm15, %xmm2, %xmm13
-
-
-	vxorps			%ymm14, %ymm14, %ymm14
-
-	vextractf128	$0x1, %ymm0, %xmm1
-
-	vshufps			$0xff, %xmm1, %xmm1, %xmm2
-	vbroadcastss	28(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x08, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm10, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm13, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-	vshufps			$0xaa, %xmm1, %xmm1, %xmm2
-	vbroadcastss	24(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x04, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm9, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm12, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-	vshufps			$0x55, %xmm1, %xmm1, %xmm2
-	vbroadcastss	20(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x02, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm8, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm11, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-	vshufps			$0x00, %xmm1, %xmm1, %xmm2
-	vbroadcastss	16(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x01, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm7, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-	vshufps			$0xff, %xmm0, %xmm0, %xmm2
-	vbroadcastss	12(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x08, %xmm2, %xmm0, %xmm0
-	vmulps			%xmm6, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-	vshufps			$0xaa, %xmm0, %xmm0, %xmm2
-	vbroadcastss	8(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x04, %xmm2, %xmm0, %xmm0
-	vmulps			%xmm5, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-	vshufps			$0x55, %xmm0, %xmm0, %xmm2
-	vbroadcastss	4(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x02, %xmm2, %xmm0, %xmm0
-	vmulps			%xmm4, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-	vshufps			$0x00, %xmm0, %xmm0, %xmm2
-	vbroadcastss	0(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x01, %xmm2, %xmm0, %xmm0
-
-	vinsertf128		$0x1, %xmm1, %ymm0, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsv_lt_inv_8_lib8, .-inner_edge_trsv_lt_inv_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- km
-// r13  <- kn
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- km
-// r13  <- kn
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSV_LT_INV_8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsv_lt_inv_8_vs_lib8, @function
-inner_edge_trsv_lt_inv_8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsv_lt_inv_8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsv_lt_inv_8_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsv_lt_inv_8_vs_lib8:
-#endif
-#endif
-	
-	vxorps			%ymm14, %ymm14, %ymm14
-
-	vmovaps			0(%r10), %ymm12
-	vblendps		$0x01, %ymm14, %ymm12, %ymm12
-	cmpl	$2, %r13d
-	jl		1f
-	vmovaps			32(%r10), %ymm13
-	vblendps		$0x03, %ymm14, %ymm13, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm8
-	vunpckhps		%ymm13, %ymm12, %ymm9
-
-	cmpl	$3, %r13d
-	jl		2f
-	vmovaps			64(%r10), %ymm12
-	vblendps		$0x07, %ymm14, %ymm12, %ymm12
-	cmpl	$4, %r13d
-	jl		3f
-	vmovaps			96(%r10), %ymm13
-	vblendps		$0x0f, %ymm14, %ymm13, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm10
-	vunpckhps		%ymm13, %ymm12, %ymm11
-
-	vshufps			$0x44, %ymm10, %ymm8, %ymm7
-	vshufps			$0xee, %ymm10, %ymm8, %ymm4
-	vshufps			$0x44, %ymm11, %ymm9, %ymm5
-	vshufps			$0xee, %ymm11, %ymm9, %ymm6
-	vextractf128	$0x1, %ymm7, %xmm7
-	vextractf128	$0x1, %ymm4, %xmm8
-	vextractf128	$0x1, %ymm5, %xmm9
-	vextractf128	$0x1, %ymm6, %xmm10
-
-	cmpl	$5, %r13d
-	jl		4f
-	vmovaps			144(%r10), %xmm12
-	vblendps		$0x01, %xmm14, %xmm12, %xmm12
-	cmpl	$6, %r13d
-	jl		5f
-	vmovaps			176(%r10), %xmm13
-	vblendps		$0x03, %xmm14, %xmm13, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm1
-	vunpckhps		%xmm13, %xmm12, %xmm2
-
-	cmpl	$7, %r13d
-	jl		6f
-	vmovaps			208(%r10), %xmm12
-	vblendps		$0x07, %xmm14, %xmm12, %xmm12
-	cmpl	$8, %r13d
-	jl		7f
-	vmovaps			240(%r10), %xmm13
-	vblendps		$0x0f, %xmm14, %xmm13, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm3
-	vunpckhps		%xmm13, %xmm12, %xmm15
-
-	vshufps			$0xee, %xmm3, %xmm1, %xmm11
-	vshufps			$0x44, %xmm15, %xmm2, %xmm12
-	vshufps			$0xee, %xmm15, %xmm2, %xmm13
-
-	jmp		0f
-
-
-
-	vmovaps			%ymm14, %ymm12
-1:
-	vmovaps			%ymm14, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm8
-	vunpckhps		%ymm13, %ymm12, %ymm9
-
-2:
-	vmovaps			%ymm14, %ymm12
-3:
-	vmovaps			%ymm14, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm10
-	vunpckhps		%ymm13, %ymm12, %ymm11
-
-	vshufps			$0x44, %ymm10, %ymm8, %ymm7
-	vshufps			$0xee, %ymm10, %ymm8, %ymm4
-	vshufps			$0x44, %ymm11, %ymm9, %ymm5
-	vshufps			$0xee, %ymm11, %ymm9, %ymm6
-	vextractf128	$0x1, %ymm7, %xmm7
-	vextractf128	$0x1, %ymm4, %xmm8
-	vextractf128	$0x1, %ymm5, %xmm9
-	vextractf128	$0x1, %ymm6, %xmm10
-
-	jmp		8f
-
-4:
-	vmovaps			%xmm14, %xmm12
-5:
-	vmovaps			%xmm14, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm1
-	vunpckhps		%xmm13, %xmm12, %xmm2
-
-6:
-	vmovaps			%xmm14, %xmm12
-7:
-	vmovaps			%xmm14, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm3
-	vunpckhps		%xmm13, %xmm12, %xmm15
-
-	vshufps			$0xee, %xmm3, %xmm1, %xmm11
-	vshufps			$0x44, %xmm15, %xmm2, %xmm12
-	vshufps			$0xee, %xmm15, %xmm2, %xmm13
-
-8:
-	
-	vmovaps			%xmm14, %xmm11
-	vmovaps			%xmm14, %xmm12
-	vmovaps			%xmm14, %xmm13
-
-0:
-	vxorps			%ymm14, %ymm14, %ymm14
-
-	vextractf128	$0x1, %ymm0, %xmm1
-
-	cmpl	$8, %r12d
-	jl		0f
-
-	vshufps			$0xff, %xmm1, %xmm1, %xmm2
-	vbroadcastss	28(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x08, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm10, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm13, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-0:
-	cmpl	$7, %r12d
-	jl		0f
-
-	vshufps			$0xaa, %xmm1, %xmm1, %xmm2
-	vbroadcastss	24(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x04, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm9, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm12, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-0:
-	cmpl	$6, %r12d
-	jl		0f
-
-	vshufps			$0x55, %xmm1, %xmm1, %xmm2
-	vbroadcastss	20(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x02, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm8, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm11, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-0:
-	cmpl	$5, %r12d
-	jl		0f
-
-	vshufps			$0x00, %xmm1, %xmm1, %xmm2
-	vbroadcastss	16(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x01, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm7, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-0:
-	cmpl	$4, %r12d
-	jl		0f
-
-	vshufps			$0xff, %xmm0, %xmm0, %xmm2
-	vbroadcastss	12(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x08, %xmm2, %xmm0, %xmm0
-	vmulps			%xmm6, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-0:
-	cmpl	$3, %r12d
-	jl		0f
-
-	vshufps			$0xaa, %xmm0, %xmm0, %xmm2
-	vbroadcastss	8(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x04, %xmm2, %xmm0, %xmm0
-	vmulps			%xmm5, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-0:
-	cmpl	$2, %r12d
-	jl		0f
-
-	vshufps			$0x55, %xmm0, %xmm0, %xmm2
-	vbroadcastss	4(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x02, %xmm2, %xmm0, %xmm0
-	vmulps			%xmm4, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-0:
-	cmpl	$1, %r12d
-	jl		0f
-
-	vshufps			$0x00, %xmm0, %xmm0, %xmm2
-	vbroadcastss	0(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x01, %xmm2, %xmm0, %xmm0
-
-0:
-
-	vinsertf128		$0x1, %xmm1, %ymm0, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsv_lt_inv_8_vs_lib8, .-inner_edge_trsv_lt_inv_8_vs_lib8
-#endif
-#endif
-
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10   <- kmax
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t
-// r14   <- z_n
-// r15   <- offA
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- kmax-4
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t+k*sizeof(double)
-// r14   <- z_n+k*sizeof(double)
-// r15   <- offA
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_EDGE_SYMV_ADD_NT_4L_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_symv_add_nt_4l_lib8, @function
-inner_edge_symv_add_nt_4l_lib8:
-#elif defined(OS_MAC)
-_inner_edge_symv_add_nt_4l_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_symv_add_nt_4l_lib8; .scl 2; .type 32; .endef
-inner_edge_symv_add_nt_4l_lib8:
-#endif
-#endif
-
-	movl	$8, %eax
-	cmpl	%eax, %r10d
-	jge		0f
-	movl	%r10d, %eax
-0:
-	subl	%r15d, %eax
-
-	vcvtsi2ss	%eax, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm13
-#endif
-	vshufps		$0x0, %xmm14, %xmm14, %xmm14
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubps		%ymm14, %ymm13, %ymm11
-
-	vmaskmovps	0(%r13), %ymm11, %ymm12
-	vmaskmovps	0(%r14), %ymm11, %ymm13
-
-	vmaskmovps	0(%r11), %ymm11, %ymm14
-	vmulps		%ymm14, %ymm12, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vxorps		%ymm15, %ymm15, %ymm15
-	vblendps	$0x01, %ymm15, %ymm14, %ymm14
-	vmulps		%ymm14, %ymm6, %ymm15
-	vaddps		%ymm13, %ymm15, %ymm13
-	
-	vmaskmovps	32(%r11), %ymm11, %ymm14
-	vxorps		%ymm15, %ymm15, %ymm15
-	vblendps	$0x01, %ymm15, %ymm14, %ymm14
-	vmulps		%ymm14, %ymm12, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vxorps		%ymm15, %ymm15, %ymm15
-	vblendps	$0x03, %ymm15, %ymm14, %ymm14
-	vmulps		%ymm14, %ymm7, %ymm15
-	vaddps		%ymm13, %ymm15, %ymm13
-	
-	vmaskmovps	64(%r11), %ymm11, %ymm14
-	vxorps		%ymm15, %ymm15, %ymm15
-	vblendps	$0x03, %ymm15, %ymm14, %ymm14
-	vmulps		%ymm14, %ymm12, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vxorps		%ymm15, %ymm15, %ymm15
-	vblendps	$0x07, %ymm15, %ymm14, %ymm14
-	vmulps		%ymm14, %ymm8, %ymm15
-	vaddps		%ymm13, %ymm15, %ymm13
-
-	vmaskmovps	96(%r11), %ymm11, %ymm14
-	vxorps		%ymm15, %ymm15, %ymm15
-	vblendps	$0x07, %ymm15, %ymm14, %ymm14
-	vmulps		%ymm14, %ymm12, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-	vxorps		%ymm15, %ymm15, %ymm15
-	vblendps	$0x0f, %ymm15, %ymm14, %ymm14
-	vmulps		%ymm14, %ymm9, %ymm15
-	vaddps		%ymm13, %ymm15, %ymm13
-	
-	vmaskmovps	%ymm13, %ymm11, 0(%r14)
-
-	subl	%eax, %r10d
-
-	salq	$2, %rax // *sizeof(float)
-	addq	%rax, %r11
-	subq	$32, %r11
-	addq	%r12, %r11
-	addq	%rax, %r13
-	addq	%rax, %r14
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_symv_add_nt_4l_lib8, .-inner_edge_symv_add_nt_4l_lib8
-#endif
-#endif
-
-
-
-
-
-
-#if MACRO_LEVEL>=2
-	.macro INNER_EDGE_SYMV_ADD_NT_4R_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_symv_add_nt_4r_lib8, @function
-inner_edge_symv_add_nt_4r_lib8:
-#elif defined(OS_MAC)
-_inner_edge_symv_add_nt_4r_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_symv_add_nt_4r_lib8; .scl 2; .type 32; .endef
-inner_edge_symv_add_nt_4r_lib8:
-#endif
-#endif
-
-	movl	$4, %eax
-	cmpl	%eax, %r10d
-	jge		0f
-	movl	%r10d, %eax
-0:
-	subl	%r15d, %eax
-
-	vcvtsi2ss	%eax, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %xmm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %xmm13
-#endif
-	vshufps		$0x0, %xmm14, %xmm14, %xmm14
-//	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubps		%xmm14, %xmm13, %xmm11
-
-	vmaskmovps	0(%r13), %xmm11, %xmm12
-	vmaskmovps	0(%r14), %xmm11, %xmm13
-
-	vmaskmovps	0(%r11), %xmm11, %xmm14
-	vmulps		%xmm14, %xmm12, %xmm15
-	vaddps		%xmm0, %xmm15, %xmm0
-	vxorps		%xmm15, %xmm15, %xmm15
-	vblendps	$0x01, %xmm15, %xmm14, %xmm14
-	vmulps		%xmm14, %xmm6, %xmm15
-	vaddps		%xmm13, %xmm15, %xmm13
-	
-	vmaskmovps	32(%r11), %xmm11, %xmm14
-	vxorps		%xmm15, %xmm15, %xmm15
-	vblendps	$0x01, %xmm15, %xmm14, %xmm14
-	vmulps		%xmm14, %xmm12, %xmm15
-	vaddps		%xmm1, %xmm15, %xmm1
-	vxorps		%xmm15, %xmm15, %xmm15
-	vblendps	$0x03, %xmm15, %xmm14, %xmm14
-	vmulps		%xmm14, %xmm7, %xmm15
-	vaddps		%xmm13, %xmm15, %xmm13
-	
-	vmaskmovps	64(%r11), %xmm11, %xmm14
-	vxorps		%xmm15, %xmm15, %xmm15
-	vblendps	$0x03, %xmm15, %xmm14, %xmm14
-	vmulps		%xmm14, %xmm12, %xmm15
-	vaddps		%xmm2, %xmm15, %xmm2
-	vxorps		%xmm15, %xmm15, %xmm15
-	vblendps	$0x07, %xmm15, %xmm14, %xmm14
-	vmulps		%xmm14, %xmm8, %xmm15
-	vaddps		%xmm13, %xmm15, %xmm13
-
-	vmaskmovps	96(%r11), %xmm11, %xmm14
-	vxorps		%xmm15, %xmm15, %xmm15
-	vblendps	$0x07, %xmm15, %xmm14, %xmm14
-	vmulps		%xmm14, %xmm12, %xmm15
-	vaddps		%xmm3, %xmm15, %xmm3
-//	vxorps		%xmm15, %xmm15, %xmm15
-//	vblendps	$0x0f, %xmm15, %xmm14, %xmm14
-//	vmulps		%xmm14, %xmm9, %xmm15
-//	vaddps		%xmm13, %xmm15, %xmm13
-	
-	vmaskmovps	%xmm13, %xmm11, 0(%r14)
-
-	subl	%eax, %r10d
-
-	salq	$2, %rax // *sizeof(float)
-	addq	%rax, %r11
-	subq	$32, %r11
-	addq	%r12, %r11
-	addq	%rax, %r13
-	addq	%rax, %r14
-	
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_symv_add_nt_4r_lib8, .-inner_edge_symv_add_nt_4r_lib8
-#endif
-#endif
-
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_AB_4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_ab_4_lib8, @function
-inner_blend_t_scale_ab_4_lib8:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_ab_4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_ab_4_lib8; .scl 2; .type 32; .endef
-inner_blend_t_scale_ab_4_lib8:
-#endif
-#endif
-
-	// reduction
-	vhaddps			%ymm1, %ymm0, %ymm0
-	vhaddps			%ymm3, %ymm2, %ymm2
-
-	vhaddps			%ymm2, %ymm0, %ymm0
-
-	vextractf128	$0x1, %ymm0, %xmm1
-
-	vaddps			%xmm0, %xmm1, %xmm0
-
-	// alpha
-	vbroadcastss	0(%r10), %xmm15
-	vmulps			%xmm0, %xmm15, %xmm0
-
-	// beta
-	vbroadcastss	0(%r11), %xmm15
-	vmovups			0(%r12), %xmm14
-	vmulps			%xmm15, %xmm14, %xmm14
-	vaddps			%xmm0, %xmm14, %xmm0
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_ab_4_lib8, .-inner_blend_t_scale_ab_4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta=1.0
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_A1_4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_a1_4_lib8, @function
-inner_blend_t_scale_a1_4_lib8:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_a1_4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_a1_4_lib8; .scl 2; .type 32; .endef
-inner_blend_t_scale_a1_4_lib8:
-#endif
-#endif
-
-	// reduction
-	vhaddps			%ymm1, %ymm0, %ymm0
-	vhaddps			%ymm3, %ymm2, %ymm2
-
-	vhaddps			%ymm2, %ymm0, %ymm0
-
-	vextractf128	$0x1, %ymm0, %xmm1
-
-	vaddps			%xmm0, %xmm1, %xmm0
-
-	// alpha
-	vbroadcastss	0(%r10), %xmm15
-	vmulps			%xmm0, %xmm15, %xmm0
-
-	// beta
-	vmovups			0(%r11), %xmm14
-	vaddps			%xmm0, %xmm14, %xmm0
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_a1_4_lib8, .-inner_blend_t_scale_a1_4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for alpha=-1.0 and beta=1.0
-//
-// input arguments:
-// r10  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_M11_4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_m11_4_lib8, @function
-inner_blend_t_scale_m11_4_lib8:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_m11_4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_m11_4_lib8; .scl 2; .type 32; .endef
-inner_blend_t_scale_m11_4_lib8:
-#endif
-#endif
-
-	// reduction
-	vhaddps			%ymm1, %ymm0, %ymm0
-	vhaddps			%ymm3, %ymm2, %ymm2
-
-	vhaddps			%ymm2, %ymm0, %ymm0
-
-	vextractf128	$0x1, %ymm0, %xmm1
-
-	vaddps			%xmm0, %xmm1, %xmm0
-
-	// beta
-	vmovups			0(%r10), %xmm14
-	vsubps			%xmm0, %xmm14, %xmm0
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_m11_4_lib8, .-inner_blend_t_scale_m11_4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store 
-//
-// input arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-//
-// output arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4_lib8, @function
-inner_store_4_lib8:
-#elif defined(OS_MAC)
-_inner_store_4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4_lib8; .scl 2; .type 32; .endef
-inner_store_4_lib8:
-#endif
-#endif
-	
-	vmovups %xmm0,  0(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4_lib8, .-inner_store_4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store vs
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4_vs_lib8, @function
-inner_store_4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_4_vs_lib8:
-#endif
-#endif
-	
-	vcvtsi2ss	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %xmm14
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %xmm14
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-//	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%xmm15, %xmm14, %xmm15
-
-	vmaskmovps	%xmm0, %xmm15,  0(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4_vs_lib8, .-inner_store_4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store gen
-//
-// input arguments:
-// r10   <- D
-// r11d  <- k0 : start form (inc)
-// r12d  <- k1 : up to (exc)
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d  <- k0 : start form (inc)
-// r12d  <- k1 : up to (exc)
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4_gen_lib8, @function
-inner_store_4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm14, %xmm14
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %xmm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %xmm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-//	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-//	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%xmm12, %xmm14, %xmm14
-	vsubps		%xmm15, %xmm12, %xmm15
-	vandps		%xmm14, %xmm15, %xmm15
-
-	vmaskmovps	%xmm0, %xmm15,  0(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4_gen_lib8, .-inner_store_4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                            1      2              3          4        5          6             7         8
-// void kernel_sgemv_t_4_lib8(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_t_4_lib8
-	.type kernel_sgemv_t_4_lib8, @function
-kernel_sgemv_t_4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_t_4_lib8
-_kernel_sgemv_t_4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_t_4_lib8
-	.def kernel_sgemv_t_4_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_t_4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner sgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_T_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_t_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_t_4_lib8
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_t_4_lib8, .-kernel_sgemv_t_4_lib8
-#endif
-
-
-
-
-
-//                               1      2              3          4        5          6             7         8           9
-// void kernel_sgemv_t_4_vs_lib8(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z, int k1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_t_4_vs_lib8
-	.type kernel_sgemv_t_4_vs_lib8, @function
-kernel_sgemv_t_4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_t_4_vs_lib8
-_kernel_sgemv_t_4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_t_4_vs_lib8
-	.def kernel_sgemv_t_4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_t_4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner sgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_T_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_t_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_t_4_lib8
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z 
-	movq	ARG9, %r11 // k1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_t_4_vs_lib8, .-kernel_sgemv_t_4_vs_lib8
-#endif
-
-
-
-
-
-//                                1      2              3         4          5        6          7             8          9          10
-// void kernel_sgemv_t_4_gen_lib8(int k, double *alpha, int offA, double *A, int sda, double *x, double *beta, double *y, double *z, int km);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_t_4_gen_lib8
-	.type kernel_sgemv_t_4_gen_lib8, @function
-kernel_sgemv_t_4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_t_4_gen_lib8
-_kernel_sgemv_t_4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_t_4_gen_lib8
-	.def kernel_sgemv_t_4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_t_4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner sgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG6, %r13  // x
-	movq	ARG3, %r14 // offA
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_GEMV_ADD_T_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemv_add_t_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemv_add_t_4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_T_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_t_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_t_4_lib8
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11   // beta
-	movq	ARG8, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG9, %r10 // z 
-	movq	ARG10, %r11 // km 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_t_4_gen_lib8, .-kernel_sgemv_t_4_gen_lib8
-#endif
-
-
-
-
-
-#if 0
-// TODO
-
-//                                 1      2          3        4                   5          6          7
-// void kernel_strsv_lt_inv_8_lib8(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsv_lt_inv_8_lib8
-	.type kernel_strsv_lt_inv_8_lib8, @function
-kernel_strsv_lt_inv_8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsv_lt_inv_8_lib8
-_kernel_strsv_lt_inv_8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsv_lt_inv_8_lib8
-	.def kernel_strsv_lt_inv_8_lib8; .scl 2; .type 32; .endef
-kernel_strsv_lt_inv_8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	subl	$8, %r10d
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	addq	%r12, %r11 // A+8*sda*sizeof(float)
-	movq	ARG5, %r13 // x
-	addq	$32, %r13 // x+8 
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_T_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_t_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_t_8_lib8
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG6, %r10 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_M11_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_m11_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_m11_8_lib8
-#endif
-#endif
-
-
-	// solution
-
-	movq	ARG2, %r10 // A
-	movq	ARG4, %r11 // inv_diag_A
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSV_LT_INV_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsv_lt_inv_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsv_lt_inv_8_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsv_lt_inv_8_lib8, .-kernel_strsv_lt_inv_8_lib8
-#endif
-
-
-
-
-
-//                                    1      2          3        4                   5          6          7          8      9
-// void kernel_strsv_lt_inv_8_vs_lib8(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsv_lt_inv_8_vs_lib8
-	.type kernel_strsv_lt_inv_8_vs_lib8, @function
-kernel_strsv_lt_inv_8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsv_lt_inv_8_vs_lib8
-_kernel_strsv_lt_inv_8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsv_lt_inv_8_vs_lib8
-	.def kernel_strsv_lt_inv_8_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsv_lt_inv_8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	subl	$8, %r10d
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	addq	%r12, %r11 // A+8*sda*sizeof(float)
-	movq	ARG5, %r13 // x
-	addq	$32, %r13 // x+8 
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_T_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_t_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_t_8_lib8
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG6, %r10 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_M11_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_m11_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_m11_8_lib8
-#endif
-#endif
-
-
-	// solution
-
-	movq	ARG2, %r10 // A
-	movq	ARG4, %r11 // inv_diag_A
-	movq	ARG8, %r12 // km
-	movq	ARG9, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSV_LT_INV_8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsv_lt_inv_8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsv_lt_inv_8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-	movq	ARG8, %r11 // km 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsv_lt_inv_8_vs_lib8, .-kernel_strsv_lt_inv_8_vs_lib8
-#endif
-
-#endif
-
-
-
-
-
-//                             1      2                3                4          5        6            7            8               9            10           11
-// void kernel_sgemv_nt_4_lib8(int k, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_nt_4_lib8
-	.type kernel_sgemv_nt_4_lib8, @function
-kernel_sgemv_nt_4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_nt_4_lib8
-_kernel_sgemv_nt_4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_nt_4_lib8
-	.def kernel_sgemv_nt_4_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_nt_4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha_n
-	vbroadcastss 0(%r10), %ymm15
-
-	movq	ARG6, %r10 // x_n
-
-	vbroadcastss 0(%r10), %ymm6
-	vmulps		%ymm15, %ymm6, %ymm6
-	vbroadcastss 4(%r10), %ymm7
-	vmulps		%ymm15, %ymm7, %ymm7
-	vbroadcastss 8(%r10), %ymm8
-	vmulps		%ymm15, %ymm8, %ymm8
-	vbroadcastss 12(%r10), %ymm9
-	vmulps		%ymm15, %ymm9, %ymm9
-
-
-	// inner kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-//	movslq	%r12d, %r12
-	movq	ARG7, %r13  // x_t
-	movq	ARG10, %r14  // z_n
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_NT_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_nt_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_nt_4_lib8
-#endif
-#endif
-
-
-	// inner blend n scale ab
-
-	movq	ARG3, %r10 // alpha_t
-	movq	ARG8, %r11   // beta_t
-	movq	ARG9, %r12   // y_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG11, %r10 // z_t 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_nt_4_lib8, .-kernel_sgemv_nt_4_lib8
-#endif
-
-
-
-
-
-//                                1      2                3                4          5        6            7            8               9            10           11           12
-// void kernel_sgemv_nt_4_vs_lib8(int k, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t, int km);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_nt_4_vs_lib8
-	.type kernel_sgemv_nt_4_vs_lib8, @function
-kernel_sgemv_nt_4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_nt_4_vs_lib8
-_kernel_sgemv_nt_4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_nt_4_vs_lib8
-	.def kernel_sgemv_nt_4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_nt_4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha_n
-	vbroadcastss 0(%r10), %ymm15
-
-	movq	ARG6, %r10 // x_n
-	movq	ARG12, %r11 // km
-
-	vbroadcastss 0(%r10), %ymm6
-	vmulps		%ymm15, %ymm6, %ymm6
-	cmpl	$2, %r11d
-	jl		0f
-	vbroadcastss 4(%r10), %ymm7
-	vmulps		%ymm15, %ymm7, %ymm7
-	cmpl	$3, %r11d
-	jl		0f
-	vbroadcastss 8(%r10), %ymm8
-	vmulps		%ymm15, %ymm8, %ymm8
-	je		0f
-	vbroadcastss 12(%r10), %ymm9
-	vmulps		%ymm15, %ymm9, %ymm9
-0:
-
-	// inner kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-//	movslq	%r12d, %r12
-	movq	ARG7, %r13  // x_t
-	movq	ARG10, %r14  // z_n
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_NT_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_nt_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_nt_4_lib8
-#endif
-#endif
-
-
-	// inner blend n scale ab
-
-	movq	ARG3, %r10 // alpha_t
-	movq	ARG8, %r11   // beta_t
-	movq	ARG9, %r12   // y_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG11, %r10 // z_t 
-	movq	ARG12, %r11 // km 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_nt_4_vs_lib8, .-kernel_sgemv_nt_4_vs_lib8
-#endif
-
-
-
-
-
-//                             1      2              3          4        5          6
-// void kernel_dsymv_l_4l_lib8(int k, double *alpha, double *A, int sda, double *x, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssymv_l_4l_lib8
-	.type kernel_ssymv_l_4l_lib8, @function
-kernel_ssymv_l_4l_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssymv_l_4l_lib8
-_kernel_ssymv_l_4l_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssymv_l_4l_lib8
-	.def kernel_ssymv_l_4l_lib8; .scl 2; .type 32; .endef
-kernel_ssymv_l_4l_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha
-	vbroadcastss 0(%r10), %ymm15
-
-	movq	ARG5, %r10 // x_n
-
-	vbroadcastss 0(%r10), %ymm6
-	vmulps		%ymm15, %ymm6, %ymm6
-	vbroadcastss 4(%r10), %ymm7
-	vmulps		%ymm15, %ymm7, %ymm7
-	vbroadcastss 8(%r10), %ymm8
-	vmulps		%ymm15, %ymm8, %ymm8
-	vbroadcastss 12(%r10), %ymm9
-	vmulps		%ymm15, %ymm9, %ymm9
-
-
-	// inner edge dsyrk & kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // x_t
-	movq	ARG6, %r14  // z_n
-	movq	$0, %r15 // offA
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_SYMV_ADD_NT_4L_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_symv_add_nt_4l_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_symv_add_nt_4l_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_NT_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_nt_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_nt_4_lib8
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // z_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_A1_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_a1_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_a1_4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // z_t 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssymv_l_4l_lib8, .-kernel_ssymv_l_4l_lib8
-#endif
-
-
-
-
-
-//                             1      2              3          4        5          6
-// void kernel_dsymv_l_4r_lib8(int k, double *alpha, double *A, int sda, double *x, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssymv_l_4r_lib8
-	.type kernel_ssymv_l_4r_lib8, @function
-kernel_ssymv_l_4r_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssymv_l_4r_lib8
-_kernel_ssymv_l_4r_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssymv_l_4r_lib8
-	.def kernel_ssymv_l_4r_lib8; .scl 2; .type 32; .endef
-kernel_ssymv_l_4r_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha
-	vbroadcastss 0(%r10), %ymm15
-
-	movq	ARG5, %r10 // x_n
-
-	vbroadcastss 0(%r10), %ymm6
-	vmulps		%ymm15, %ymm6, %ymm6
-	vbroadcastss 4(%r10), %ymm7
-	vmulps		%ymm15, %ymm7, %ymm7
-	vbroadcastss 8(%r10), %ymm8
-	vmulps		%ymm15, %ymm8, %ymm8
-	vbroadcastss 12(%r10), %ymm9
-	vmulps		%ymm15, %ymm9, %ymm9
-
-
-	// inner edge dsyrk & kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // x_t
-	movq	ARG6, %r14  // z_n
-	movq	$0, %r15 // offA
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_SYMV_ADD_NT_4R_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_symv_add_nt_4r_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_symv_add_nt_4r_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_NT_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_nt_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_nt_4_lib8
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // z_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_A1_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_a1_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_a1_4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // z_t 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssymv_l_4r_lib8, .-kernel_ssymv_l_4r_lib8
-#endif
-
-
-
-
-
-//                                1      2              3          4          5        6          7          8
-// void kernel_dsymv_l_4l_gen_lib8(int k, double *alpha, int offA, double *A, int sda, double *x, double *z, int km);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssymv_l_4l_gen_lib8
-	.type kernel_ssymv_l_4l_gen_lib8, @function
-kernel_ssymv_l_4l_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssymv_l_4l_gen_lib8
-_kernel_ssymv_l_4l_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssymv_l_4l_gen_lib8
-	.def kernel_ssymv_l_4l_gen_lib8; .scl 2; .type 32; .endef
-kernel_ssymv_l_4l_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha
-	vbroadcastss 0(%r10), %ymm15
-
-	movq	ARG6, %r10 // x_n
-	movq	ARG8, %r11 // km
-
-	vbroadcastss 0(%r10), %ymm6
-	vmulps		%ymm15, %ymm6, %ymm6
-	cmpl	$2, %r11d
-	jl		0f
-	vbroadcastss 4(%r10), %ymm7
-	vmulps		%ymm15, %ymm7, %ymm7
-	cmpl	$3, %r11d
-	jl		0f
-	vbroadcastss 8(%r10), %ymm8
-	vmulps		%ymm15, %ymm8, %ymm8
-	je		0f
-	vbroadcastss 12(%r10), %ymm9
-	vmulps		%ymm15, %ymm9, %ymm9
-0:
-
-	// inner edge dsyrk & kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG6, %r13  // x_t
-	movq	ARG7, %r14  // z_n
-	movq	ARG3, %r15 // offA
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_SYMV_ADD_NT_4L_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_symv_add_nt_4l_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_symv_add_nt_4l_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_NT_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_nt_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_nt_4_lib8
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11   // z_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_A1_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_a1_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_a1_4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z_t 
-	movq	ARG8, %r11 // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssymv_l_4l_gen_lib8, .-kernel_ssymv_l_4l_gen_lib8
-#endif
-
-
-
-
-
-//                                1      2              3          4          5        6          7          8
-// void kernel_dsymv_l_4r_gen_lib8(int k, double *alpha, int offA, double *A, int sda, double *x, double *z, int km);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssymv_l_4r_gen_lib8
-	.type kernel_ssymv_l_4r_gen_lib8, @function
-kernel_ssymv_l_4r_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssymv_l_4r_gen_lib8
-_kernel_ssymv_l_4r_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssymv_l_4r_gen_lib8
-	.def kernel_ssymv_l_4r_gen_lib8; .scl 2; .type 32; .endef
-kernel_ssymv_l_4r_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha
-	vbroadcastss 0(%r10), %ymm15
-
-	movq	ARG6, %r10 // x_n
-	movq	ARG8, %r11 // km
-
-	vbroadcastss 0(%r10), %ymm6
-	vmulps		%ymm15, %ymm6, %ymm6
-	cmpl	$2, %r11d
-	jl		0f
-	vbroadcastss 4(%r10), %ymm7
-	vmulps		%ymm15, %ymm7, %ymm7
-	cmpl	$3, %r11d
-	jl		0f
-	vbroadcastss 8(%r10), %ymm8
-	vmulps		%ymm15, %ymm8, %ymm8
-	je		0f
-	vbroadcastss 12(%r10), %ymm9
-	vmulps		%ymm15, %ymm9, %ymm9
-0:
-
-	// inner edge dsyrk & kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG6, %r13  // x_t
-	movq	ARG7, %r14  // z_n
-	movq	ARG3, %r15 // offA
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_SYMV_ADD_NT_4R_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_symv_add_nt_4r_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_symv_add_nt_4r_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_NT_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_nt_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_nt_4_lib8
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11   // z_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_A1_4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_a1_4_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_a1_4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z_t 
-	movq	ARG8, %r11 // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssymv_l_4r_gen_lib8, .-kernel_ssymv_l_4r_gen_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.float	0.5
-	.float	1.5
-	.float	2.5
-	.float	3.5
-	.float	4.5
-	.float	5.5
-	.float	6.5
-	.float	7.5
-
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_sgemv_8_lib8.S b/third_party/blasfeo/kernel/avx/kernel_sgemv_8_lib8.S
deleted file mode 100644
index aafd8cb..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_sgemv_8_lib8.S
+++ /dev/null
@@ -1,2837 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- x
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z0 z1 z2 z3]_b
-// ymm2  <- [z0 z1 z2 z3]_c
-// ymm3  <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- x+k*sizeof(double)
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z0 z1 z2 z3]_b
-// ymm2  <- [z0 z1 z2 z3]_c
-// ymm3  <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMV_ADD_N_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemv_add_n_8_lib8, @function
-inner_kernel_gemv_add_n_8_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemv_add_n_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemv_add_n_8_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemv_add_n_8_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$4, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm0, %ymm15, %ymm0
-	
-	subl	$4, %r10d
-
-	vmovaps			32(%r11), %ymm8
-	vbroadcastss	4(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm1, %ymm15, %ymm1
-	
-	vmovaps			64(%r11), %ymm8
-	vbroadcastss	8(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm2, %ymm15, %ymm2
-
-	vmovaps			96(%r11), %ymm8
-	vbroadcastss	12(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm3, %ymm15, %ymm3
-	
-	addq	$128, %r11
-	addq	$16, %r12
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm0, %ymm15, %ymm0
-	
-	addq	$32, %r11
-	addq	$4, %r12
-	
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-
-	jg		0b // clean
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemv_add_n_8_lib8, .-inner_kernel_gemv_add_n_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x+k*sizeof(double)
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMV_ADD_T_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemv_add_t_8_lib8, @function
-inner_kernel_gemv_add_t_8_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemv_add_t_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemv_add_t_8_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemv_add_t_8_lib8:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$8, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovups		0(%r13), %ymm12
-
-	vmovaps		0(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	
-	subl	$8, %r10d
-
-	vmovaps		32(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	
-	vmovaps		64(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-
-	vmovaps		96(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-	
-	vmovaps		128(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	
-	vmovaps		160(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	
-	vmovaps		192(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	
-	vmovaps		224(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-	
-	addq	%r12, %r11
-	addq	$32, %r13
-	
-	cmpl	$7, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vcvtsi2ss	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubps		%ymm14, %ymm13, %ymm14
-
-	vmaskmovps	0(%r13), %ymm14, %ymm12
-
-	vmaskmovps	0(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	
-	vmaskmovps	32(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	
-	vmaskmovps	64(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-
-	vmaskmovps	96(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-		
-	vmaskmovps	128(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-		
-	vmaskmovps	160(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-		
-	vmaskmovps	192(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-		
-	vmaskmovps	224(%r11), %ymm14, %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-		
-	sall	$2, %r10d
-	addq	%r10, %r11
-	addq	%r10, %r13
-	xorl	%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemv_add_t_8_lib8, .-inner_kernel_gemv_add_t_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x
-// r14d  <- offA
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 
-// r11   <- 
-// r12   <- 
-// r13   <- 
-// r14d  <- offA
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_EDGE_GEMV_ADD_T_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_gemv_add_t_8_lib8, @function
-inner_edge_gemv_add_t_8_lib8:
-#elif defined(OS_MAC)
-_inner_edge_gemv_add_t_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_gemv_add_t_8_lib8; .scl 2; .type 32; .endef
-inner_edge_gemv_add_t_8_lib8:
-#endif
-#endif
-
-	cmpl	$0, %r14d
-	jle		0f // return
-
-	movl	%r14d, %r15d
-	sall	$2, %r15d // offA*sizeof(float)
-
-	subq	%r15, %r11 // A - offA
-	subq	%r15, %r13 // x - offA
-
-	movl	%r10d, %r15d // kmax
-	addl	%r14d, %r15d // kmax + offA
-
-	vcvtsi2ss	%r14d, %xmm14, %xmm14 // offA
-	vcvtsi2ss	%r15d, %xmm15, %xmm15 // offA + kmax
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm13, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-	vandps		%ymm15, %ymm14, %ymm14
-
-	vmaskmovps	0(%r13), %ymm14, %ymm12
-
-	vmovaps		0(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	
-	vmovaps		32(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	
-	vmovaps		64(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-
-	vmovaps		96(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-
-	vmovaps		128(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-
-	vmovaps		160(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-
-	vmovaps		192(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-
-	vmovaps		224(%r11), %ymm8
-	vmulps		%ymm8, %ymm12, %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-	addq	$32, %r13 // x + 4
-	addq	%r12, %r11 // A + bs*sda
-		
-	addl	%r14d, %r10d
-	subl	$8, %r10d // kmax - (8-offA)
-	
-0: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_gemv_add_t_8_lib8, .-inner_edge_gemv_add_t_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSV_LN_INV_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsv_ln_inv_8_lib8, @function
-inner_edge_trsv_ln_inv_8_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsv_ln_inv_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsv_ln_inv_8_lib8; .scl 2; .type 32; .endef
-inner_edge_trsv_ln_inv_8_lib8:
-#endif
-#endif
-	
-	vxorps			%ymm14, %ymm14, %ymm14
-
-	vbroadcastss	0(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x01, %ymm1, %ymm0, %ymm0
-
-	vmovaps			0(%r10), %ymm13
-	vblendps		$0x01, %ymm14, %ymm13, %ymm13
-	vpermilps		$0x00, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	4(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x02, %ymm1, %ymm0, %ymm0
-
-	vmovaps			32(%r10), %ymm13
-	vblendps		$0x03, %ymm14, %ymm13, %ymm13
-	vpermilps		$0x55, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	8(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x04, %ymm1, %ymm0, %ymm0
-
-	vmovaps			64(%r10), %ymm13
-	vblendps		$0x07, %ymm14, %ymm13, %ymm13
-	vpermilps		$0xaa, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	12(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x08, %ymm1, %ymm0, %ymm0
-
-	vmovaps			96(%r10), %ymm13
-	vblendps		$0x0f, %ymm14, %ymm13, %ymm13
-	vpermilps		$0xff, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	16(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x10, %ymm1, %ymm0, %ymm0
-
-	vmovaps			128(%r10), %ymm13
-	vblendps		$0x1f, %ymm14, %ymm13, %ymm13
-	vpermilps		$0x00, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	20(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x20, %ymm1, %ymm0, %ymm0
-
-	vmovaps			160(%r10), %ymm13
-	vblendps		$0x3f, %ymm14, %ymm13, %ymm13
-	vpermilps		$0x55, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	24(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x40, %ymm1, %ymm0, %ymm0
-
-	vmovaps			192(%r10), %ymm13
-	vblendps		$0x7f, %ymm14, %ymm13, %ymm13
-	vpermilps		$0xaa, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	28(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x80, %ymm1, %ymm0, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsv_ln_inv_8_lib8, .-inner_edge_trsv_ln_inv_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12d <- kn
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12d <- kn
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSV_LN_INV_8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsv_ln_inv_8_vs_lib8, @function
-inner_edge_trsv_ln_inv_8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsv_ln_inv_8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsv_ln_inv_8_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsv_ln_inv_8_vs_lib8:
-#endif
-#endif
-	
-	vxorps			%ymm14, %ymm14, %ymm14
-
-	vbroadcastss	0(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x01, %ymm1, %ymm0, %ymm0
-	vmovaps			0(%r10), %ymm13
-	vblendps		$0x01, %ymm14, %ymm13, %ymm13
-	vpermilps		$0x00, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-
-	cmpl			$2, %r12d
-	jl				0f // ret
-
-	vbroadcastss	4(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x02, %ymm1, %ymm0, %ymm0
-	vmovaps			32(%r10), %ymm13
-	vblendps		$0x03, %ymm14, %ymm13, %ymm13
-	vpermilps		$0x55, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-
-	cmpl			$3, %r12d
-	jl				0f // ret
-
-	vbroadcastss	8(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x04, %ymm1, %ymm0, %ymm0
-	vmovaps			64(%r10), %ymm13
-	vblendps		$0x07, %ymm14, %ymm13, %ymm13
-	vpermilps		$0xaa, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-
-	cmpl			$4, %r12d
-	jl				0f // ret
-
-	vbroadcastss	12(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x08, %ymm1, %ymm0, %ymm0
-	vmovaps			96(%r10), %ymm13
-	vblendps		$0x0f, %ymm14, %ymm13, %ymm13
-	vpermilps		$0xff, %ymm0, %ymm12
-	vperm2f128		$0x00, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-
-	cmpl			$5, %r12d
-	jl				0f // ret
-
-	vbroadcastss	16(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x10, %ymm1, %ymm0, %ymm0
-	vmovaps			128(%r10), %ymm13
-	vblendps		$0x1f, %ymm14, %ymm13, %ymm13
-	vpermilps		$0x00, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-
-	cmpl			$6, %r12d
-	jl				0f // ret
-
-	vbroadcastss	20(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x20, %ymm1, %ymm0, %ymm0
-	vmovaps			160(%r10), %ymm13
-	vblendps		$0x3f, %ymm14, %ymm13, %ymm13
-	vpermilps		$0x55, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-
-	cmpl			$7, %r12d
-	jl				0f // ret
-
-	vbroadcastss	24(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x40, %ymm1, %ymm0, %ymm0
-	vmovaps			192(%r10), %ymm13
-	vblendps		$0x7f, %ymm14, %ymm13, %ymm13
-	vpermilps		$0xaa, %ymm0, %ymm12
-	vperm2f128		$0x11, %ymm12, %ymm12, %ymm12
-	vmulps			%ymm13, %ymm12, %ymm15
-	vsubps			%ymm15, %ymm0, %ymm0
-
-	cmpl			$8, %r12d
-	jl				0f // ret
-
-	vbroadcastss	28(%r11), %ymm12
-	vmulps			%ymm0, %ymm12, %ymm1
-	vblendps		$0x80, %ymm1, %ymm0, %ymm0
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsv_ln_inv_8_vs_lib8, .-inner_edge_trsv_ln_inv_8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSV_LT_INV_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsv_lt_inv_8_lib8, @function
-inner_edge_trsv_lt_inv_8_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsv_lt_inv_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsv_lt_inv_8_lib8; .scl 2; .type 32; .endef
-inner_edge_trsv_lt_inv_8_lib8:
-#endif
-#endif
-	
-	vxorps			%ymm14, %ymm14, %ymm14
-
-	vmovaps			0(%r10), %ymm12
-	vblendps		$0x01, %ymm14, %ymm12, %ymm12
-	vmovaps			32(%r10), %ymm13
-	vblendps		$0x03, %ymm14, %ymm13, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm8
-	vunpckhps		%ymm13, %ymm12, %ymm9
-
-	vmovaps			64(%r10), %ymm12
-	vblendps		$0x07, %ymm14, %ymm12, %ymm12
-	vmovaps			96(%r10), %ymm13
-	vblendps		$0x0f, %ymm14, %ymm13, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm10
-	vunpckhps		%ymm13, %ymm12, %ymm11
-
-	vshufps			$0x44, %ymm10, %ymm8, %ymm7
-	vshufps			$0xee, %ymm10, %ymm8, %ymm4
-	vshufps			$0x44, %ymm11, %ymm9, %ymm5
-	vshufps			$0xee, %ymm11, %ymm9, %ymm6
-	vextractf128	$0x1, %ymm7, %xmm7
-	vextractf128	$0x1, %ymm4, %xmm8
-	vextractf128	$0x1, %ymm5, %xmm9
-	vextractf128	$0x1, %ymm6, %xmm10
-
-	vmovaps			144(%r10), %xmm12
-	vblendps		$0x01, %xmm14, %xmm12, %xmm12
-	vmovaps			176(%r10), %xmm13
-	vblendps		$0x03, %xmm14, %xmm13, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm1
-	vunpckhps		%xmm13, %xmm12, %xmm2
-
-	vmovaps			208(%r10), %xmm12
-	vblendps		$0x07, %xmm14, %xmm12, %xmm12
-	vmovaps			240(%r10), %xmm13
-	vblendps		$0x0f, %xmm14, %xmm13, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm3
-	vunpckhps		%xmm13, %xmm12, %xmm15
-
-	vshufps			$0xee, %xmm3, %xmm1, %xmm11
-	vshufps			$0x44, %xmm15, %xmm2, %xmm12
-	vshufps			$0xee, %xmm15, %xmm2, %xmm13
-
-
-	vxorps			%ymm14, %ymm14, %ymm14
-
-	vextractf128	$0x1, %ymm0, %xmm1
-
-	vshufps			$0xff, %xmm1, %xmm1, %xmm2
-	vbroadcastss	28(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x08, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm10, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm13, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-	vshufps			$0xaa, %xmm1, %xmm1, %xmm2
-	vbroadcastss	24(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x04, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm9, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm12, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-	vshufps			$0x55, %xmm1, %xmm1, %xmm2
-	vbroadcastss	20(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x02, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm8, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm11, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-	vshufps			$0x00, %xmm1, %xmm1, %xmm2
-	vbroadcastss	16(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x01, %xmm2, %xmm1, %xmm1
-	vmulps			%xmm7, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-	vshufps			$0xff, %xmm0, %xmm0, %xmm2
-	vbroadcastss	12(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x08, %xmm2, %xmm0, %xmm0
-	vmulps			%xmm6, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-	vshufps			$0xaa, %xmm0, %xmm0, %xmm2
-	vbroadcastss	8(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x04, %xmm2, %xmm0, %xmm0
-	vmulps			%xmm5, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-	vshufps			$0x55, %xmm0, %xmm0, %xmm2
-	vbroadcastss	4(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x02, %xmm2, %xmm0, %xmm0
-	vmulps			%xmm4, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-	vshufps			$0x00, %xmm0, %xmm0, %xmm2
-	vbroadcastss	0(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x01, %xmm2, %xmm0, %xmm0
-
-	vinsertf128		$0x1, %xmm1, %ymm0, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsv_lt_inv_8_lib8, .-inner_edge_trsv_lt_inv_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution with vector RHS
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- km
-// r13  <- kn
-// r14  <- x
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- km
-// r13  <- kn
-// r14  <- x
-// ymm0 <- [z0 z1 z2 z3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSV_LT_INV_8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsv_lt_inv_8_vs_lib8, @function
-inner_edge_trsv_lt_inv_8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsv_lt_inv_8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsv_lt_inv_8_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsv_lt_inv_8_vs_lib8:
-#endif
-#endif
-	
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubps		%ymm14, %ymm13, %ymm14
-
-	vmovups		0(%r14), %ymm15
-	vblendvps	%ymm14, %ymm0, %ymm15, %ymm0
-
-
-
-	vxorps			%ymm14, %ymm14, %ymm14
-
-	vmovaps			0(%r10), %ymm12
-	vblendps		$0x01, %ymm14, %ymm12, %ymm12
-	cmpl	$2, %r13d
-	jl		1f
-	vmovaps			32(%r10), %ymm13
-	vblendps		$0x03, %ymm14, %ymm13, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm8
-	vunpckhps		%ymm13, %ymm12, %ymm9
-
-	cmpl	$3, %r13d
-	jl		2f
-	vmovaps			64(%r10), %ymm12
-	vblendps		$0x07, %ymm14, %ymm12, %ymm12
-	cmpl	$4, %r13d
-	jl		3f
-	vmovaps			96(%r10), %ymm13
-	vblendps		$0x0f, %ymm14, %ymm13, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm10
-	vunpckhps		%ymm13, %ymm12, %ymm11
-
-	vshufps			$0x44, %ymm10, %ymm8, %ymm7
-	vshufps			$0xee, %ymm10, %ymm8, %ymm4
-	vshufps			$0x44, %ymm11, %ymm9, %ymm5
-	vshufps			$0xee, %ymm11, %ymm9, %ymm6
-	vextractf128	$0x1, %ymm7, %xmm7
-	vextractf128	$0x1, %ymm4, %xmm8
-	vextractf128	$0x1, %ymm5, %xmm9
-	vextractf128	$0x1, %ymm6, %xmm10
-
-	cmpl	$5, %r13d
-	jl		4f
-	vmovaps			144(%r10), %xmm12
-	vblendps		$0x01, %xmm14, %xmm12, %xmm12
-	cmpl	$6, %r13d
-	jl		5f
-	vmovaps			176(%r10), %xmm13
-	vblendps		$0x03, %xmm14, %xmm13, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm1
-	vunpckhps		%xmm13, %xmm12, %xmm2
-
-	cmpl	$7, %r13d
-	jl		6f
-	vmovaps			208(%r10), %xmm12
-	vblendps		$0x07, %xmm14, %xmm12, %xmm12
-	cmpl	$8, %r13d
-	jl		7f
-	vmovaps			240(%r10), %xmm13
-	vblendps		$0x0f, %xmm14, %xmm13, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm3
-	vunpckhps		%xmm13, %xmm12, %xmm15
-
-	vshufps			$0xee, %xmm3, %xmm1, %xmm11
-	vshufps			$0x44, %xmm15, %xmm2, %xmm12
-	vshufps			$0xee, %xmm15, %xmm2, %xmm13
-
-	jmp		0f
-
-
-
-	vmovaps			%ymm14, %ymm12
-1:
-	vmovaps			%ymm14, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm8
-	vunpckhps		%ymm13, %ymm12, %ymm9
-
-2:
-	vmovaps			%ymm14, %ymm12
-3:
-	vmovaps			%ymm14, %ymm13
-	vunpcklps		%ymm13, %ymm12, %ymm10
-	vunpckhps		%ymm13, %ymm12, %ymm11
-
-	vshufps			$0x44, %ymm10, %ymm8, %ymm7
-	vshufps			$0xee, %ymm10, %ymm8, %ymm4
-	vshufps			$0x44, %ymm11, %ymm9, %ymm5
-	vshufps			$0xee, %ymm11, %ymm9, %ymm6
-	vextractf128	$0x1, %ymm7, %xmm7
-	vextractf128	$0x1, %ymm4, %xmm8
-	vextractf128	$0x1, %ymm5, %xmm9
-	vextractf128	$0x1, %ymm6, %xmm10
-
-	jmp		8f
-
-4:
-	vmovaps			%xmm14, %xmm12
-5:
-	vmovaps			%xmm14, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm1
-	vunpckhps		%xmm13, %xmm12, %xmm2
-
-6:
-	vmovaps			%xmm14, %xmm12
-7:
-	vmovaps			%xmm14, %xmm13
-	vunpcklps		%xmm13, %xmm12, %xmm3
-	vunpckhps		%xmm13, %xmm12, %xmm15
-
-	vshufps			$0xee, %xmm3, %xmm1, %xmm11
-	vshufps			$0x44, %xmm15, %xmm2, %xmm12
-	vshufps			$0xee, %xmm15, %xmm2, %xmm13
-
-8:
-	
-	vmovaps			%xmm14, %xmm11
-	vmovaps			%xmm14, %xmm12
-	vmovaps			%xmm14, %xmm13
-
-0:
-	vxorps			%ymm14, %ymm14, %ymm14
-
-	vextractf128	$0x1, %ymm0, %xmm1
-
-	cmpl	$8, %r12d
-	jl		0f
-
-	vshufps			$0xff, %xmm1, %xmm1, %xmm2
-	cmpl	$8, %r13d
-	jl		1f
-	vbroadcastss	28(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x08, %xmm2, %xmm1, %xmm1
-1:
-	vmulps			%xmm10, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm13, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-0:
-	cmpl	$7, %r12d
-	jl		0f
-
-	vshufps			$0xaa, %xmm1, %xmm1, %xmm2
-	cmpl	$7, %r13d
-	jl		1f
-	vbroadcastss	24(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x04, %xmm2, %xmm1, %xmm1
-1:
-	vmulps			%xmm9, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm12, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-0:
-	cmpl	$6, %r12d
-	jl		0f
-
-	vshufps			$0x55, %xmm1, %xmm1, %xmm2
-	cmpl	$6, %r13d
-	jl		1f
-	vbroadcastss	20(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x02, %xmm2, %xmm1, %xmm1
-1:
-	vmulps			%xmm8, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-	vmulps			%xmm11, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm1, %xmm1
-
-0:
-	cmpl	$5, %r12d
-	jl		0f
-
-	vshufps			$0x00, %xmm1, %xmm1, %xmm2
-	cmpl	$5, %r13d
-	jl		1f
-	vbroadcastss	16(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x01, %xmm2, %xmm1, %xmm1
-1:
-	vmulps			%xmm7, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-0:
-	cmpl	$4, %r12d
-	jl		0f
-
-	vshufps			$0xff, %xmm0, %xmm0, %xmm2
-	cmpl	$4, %r13d
-	jl		1f
-	vbroadcastss	12(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x08, %xmm2, %xmm0, %xmm0
-1:
-	vmulps			%xmm6, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-0:
-	cmpl	$3, %r12d
-	jl		0f
-
-	vshufps			$0xaa, %xmm0, %xmm0, %xmm2
-	cmpl	$3, %r13d
-	jl		1f
-	vbroadcastss	8(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x04, %xmm2, %xmm0, %xmm0
-1:
-	vmulps			%xmm5, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-0:
-	cmpl	$2, %r12d
-	jl		0f
-
-	vshufps			$0x55, %xmm0, %xmm0, %xmm2
-	cmpl	$2, %r13d
-	jl		1f
-	vbroadcastss	4(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x02, %xmm2, %xmm0, %xmm0
-1:
-	vmulps			%xmm4, %xmm2, %xmm15
-	vsubps			%xmm15, %xmm0, %xmm0
-
-0:
-	cmpl	$1, %r12d
-	jl		0f
-
-	vshufps			$0x00, %xmm0, %xmm0, %xmm2
-	cmpl	$1, %r13d
-	jl		1f
-	vbroadcastss	0(%r11), %xmm15
-	vmulps			%xmm2, %xmm15, %xmm2
-	vblendps		$0x01, %xmm2, %xmm0, %xmm0
-1:
-
-0:
-
-	vinsertf128		$0x1, %xmm1, %ymm0, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsv_lt_inv_8_vs_lib8, .-inner_edge_trsv_lt_inv_8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==n, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z0 z1 z2 z3]_b
-// ymm2 <- [z0 z1 z2 z3]_c
-// ymm3 <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_N_SCALE_AB_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_n_scale_ab_8_lib8, @function
-inner_blend_n_scale_ab_8_lib8:
-#elif defined(OS_MAC)
-_inner_blend_n_scale_ab_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_scale_ab_8_lib8; .scl 2; .type 32; .endef
-inner_blend_n_scale_ab_8_lib8:
-#endif
-#endif
-
-	// reduction
-	vaddps			%ymm0, %ymm1, %ymm0
-	vaddps			%ymm2, %ymm3, %ymm2
-	vaddps			%ymm0, %ymm2, %ymm0
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-	vmulps			%ymm0, %ymm15, %ymm0
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-	vmovups			0(%r12), %ymm14
-	vmulps			%ymm15, %ymm14, %ymm14
-	vaddps			%ymm0, %ymm14, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_n_scale_ab_8_lib8, .-inner_blend_n_scale_ab_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==n, scale for alpha=-1.0 and beta=1.0
-//
-// input arguments:
-// r10  <- y
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z0 z1 z2 z3]_b
-// ymm2 <- [z0 z1 z2 z3]_c
-// ymm3 <- [z0 z1 z2 z3]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_N_SCALE_M11_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_n_scale_m11_8_lib8, @function
-inner_blend_n_scale_m11_8_lib8:
-#elif defined(OS_MAC)
-_inner_blend_n_scale_m11_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_scale_m11_8_lib8; .scl 2; .type 32; .endef
-inner_blend_n_scale_m11_8_lib8:
-#endif
-#endif
-
-	// reduction
-	vaddps	%ymm0, %ymm1, %ymm0
-	vaddps	%ymm2, %ymm3, %ymm2
-	vaddps	%ymm0, %ymm2, %ymm0
-
-	// beta
-	vmovups		0(%r10), %ymm14
-	vsubps		%ymm0, %ymm14, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_n_scale_m11_8_lib8, .-inner_blend_n_scale_m11_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_AB_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_ab_8_lib8, @function
-inner_blend_t_scale_ab_8_lib8:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_ab_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_ab_8_lib8; .scl 2; .type 32; .endef
-inner_blend_t_scale_ab_8_lib8:
-#endif
-#endif
-
-	// reduction
-	vhaddps			%ymm1, %ymm0, %ymm0
-	vhaddps			%ymm3, %ymm2, %ymm2
-	vhaddps			%ymm5, %ymm4, %ymm4
-	vhaddps			%ymm7, %ymm6, %ymm6
-
-	vhaddps			%ymm2, %ymm0, %ymm0
-	vhaddps			%ymm6, %ymm4, %ymm4
-
-	vperm2f128		$0x20, %ymm4, %ymm0, %ymm1
-	vperm2f128		$0x13, %ymm0, %ymm4, %ymm0
-
-	vaddps			%ymm0, %ymm1, %ymm0
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-	vmulps			%ymm0, %ymm15, %ymm0
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-	vmovups			0(%r12), %ymm14
-	vmulps			%ymm15, %ymm14, %ymm14
-	vaddps			%ymm0, %ymm14, %ymm0
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_ab_8_lib8, .-inner_blend_t_scale_ab_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for alpha=-1.0 and beta=1.0
-//
-// input arguments:
-// r10  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_M11_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_m11_8_lib8, @function
-inner_blend_t_scale_m11_8_lib8:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_m11_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_m11_8_lib8; .scl 2; .type 32; .endef
-inner_blend_t_scale_m11_8_lib8:
-#endif
-#endif
-
-	// reduction
-	vhaddps			%ymm1, %ymm0, %ymm0
-	vhaddps			%ymm3, %ymm2, %ymm2
-	vhaddps			%ymm5, %ymm4, %ymm4
-	vhaddps			%ymm7, %ymm6, %ymm6
-
-	vhaddps			%ymm2, %ymm0, %ymm0
-	vhaddps			%ymm6, %ymm4, %ymm4
-
-	vperm2f128		$0x20, %ymm4, %ymm0, %ymm1
-	vperm2f128		$0x13, %ymm0, %ymm4, %ymm0
-
-	vaddps			%ymm0, %ymm1, %ymm0
-
-	// beta
-	vmovups			0(%r10), %ymm14
-	vsubps			%ymm0, %ymm14, %ymm0
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_m11_8_lib8, .-inner_blend_t_scale_m11_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store 
-//
-// input arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-//
-// output arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8_lib8, @function
-inner_store_8_lib8:
-#elif defined(OS_MAC)
-_inner_store_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8_lib8; .scl 2; .type 32; .endef
-inner_store_8_lib8:
-#endif
-#endif
-	
-	vmovups %ymm0,  0(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8_lib8, .-inner_store_8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store vs
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8_vs_lib8, @function
-inner_store_8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8_vs_lib8; .scl 2; .type 32; .endef
-inner_store_8_vs_lib8:
-#endif
-#endif
-	
-	vcvtsi2ss	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm14
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm14, %ymm15
-
-	vmaskmovps	%ymm0, %ymm15,  0(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8_vs_lib8, .-inner_store_8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store gen
-//
-// input arguments:
-// r10   <- D
-// r11d  <- k0 : start form (inc)
-// r12d  <- k1 : up to (exc)
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d  <- k0 : start form (inc)
-// r12d  <- k1 : up to (exc)
-// ymm0  <- [z0 z1 z2 z3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8_gen_lib8, @function
-inner_store_8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8_gen_lib8; .scl 2; .type 32; .endef
-inner_store_8_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm14, %xmm14
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm12, %ymm15
-	vandps		%ymm14, %ymm15, %ymm15
-
-	vmaskmovps	%ymm0, %ymm15,  0(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8_gen_lib8, .-inner_store_8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                            1      2              3          4          5             6          7
-// void kernel_sgemv_n_8_lib8(int k, double *alpha, double *A, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_n_8_lib8
-	.type kernel_sgemv_n_8_lib8, @function
-kernel_sgemv_n_8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_n_8_lib8
-_kernel_sgemv_n_8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_n_8_lib8
-	.def kernel_sgemv_n_8_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_n_8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner sgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_N_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_n_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_n_8_lib8
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11   // beta
-	movq	ARG6, %r12   // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_AB_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_ab_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_ab_8_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_n_8_lib8, .-kernel_sgemv_n_8_lib8
-#endif
-
-
-
-
-
-//                               1      2              3          4          5             6          7          8
-// void kernel_sgemv_n_8_vs_lib8(int k, double *alpha, double *A, double *x, double *beta, double *y, double *z, int k1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_n_8_vs_lib8
-	.type kernel_sgemv_n_8_vs_lib8, @function
-kernel_sgemv_n_8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_n_8_vs_lib8
-_kernel_sgemv_n_8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_n_8_vs_lib8
-	.def kernel_sgemv_n_8_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_n_8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner sgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_N_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_n_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_n_8_lib8
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11   // beta
-	movq	ARG6, %r12   // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_AB_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_ab_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_ab_8_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-	movq	ARG8, %r11 // k1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_n_8_vs_lib8, .-kernel_sgemv_n_8_vs_lib8
-#endif
-
-
-
-
-
-//                                1      2              3          4          5             6          7          8       9
-// void kernel_sgemv_n_8_gen_lib8(int k, double *alpha, double *A, double *x, double *beta, double *y, double *z, int k0, int kq);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_n_8_gen_lib8
-	.type kernel_sgemv_n_8_gen_lib8, @function
-kernel_sgemv_n_8_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_n_8_gen_lib8
-_kernel_sgemv_n_8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_n_8_gen_lib8
-	.def kernel_sgemv_n_8_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_n_8_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner sgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_N_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_n_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_n_8_lib8
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11   // beta
-	movq	ARG6, %r12   // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_AB_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_ab_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_ab_8_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-	movq	ARG8, %r11 // k1 
-	movq	ARG9, %r12 // k2 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_n_8_gen_lib8, .-kernel_sgemv_n_8_gen_lib8
-#endif
-
-
-
-
-
-//                            1      2              3          4        5          6             7         8
-// void kernel_sgemv_t_8_lib8(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_t_8_lib8
-	.type kernel_sgemv_t_8_lib8, @function
-kernel_sgemv_t_8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_t_8_lib8
-_kernel_sgemv_t_8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_t_8_lib8
-	.def kernel_sgemv_t_8_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_t_8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner sgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_T_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_t_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_t_8_lib8
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_8_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_t_8_lib8, .-kernel_sgemv_t_8_lib8
-#endif
-
-
-
-
-
-//                               1      2              3          4        5          6             7         8           9
-// void kernel_sgemv_t_8_vs_lib8(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z, int k1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_t_8_vs_lib8
-	.type kernel_sgemv_t_8_vs_lib8, @function
-kernel_sgemv_t_8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_t_8_vs_lib8
-_kernel_sgemv_t_8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_t_8_vs_lib8
-	.def kernel_sgemv_t_8_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_t_8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner sgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_T_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_t_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_t_8_lib8
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_8_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z 
-	movq	ARG9, %r11 // k1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_t_8_vs_lib8, .-kernel_sgemv_t_8_vs_lib8
-#endif
-
-
-
-
-
-//                                1      2              3         4          5        6          7             8          9          10
-// void kernel_sgemv_t_8_gen_lib8(int k, double *alpha, int offA, double *A, int sda, double *x, double *beta, double *y, double *z, int km);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemv_t_8_gen_lib8
-	.type kernel_sgemv_t_8_gen_lib8, @function
-kernel_sgemv_t_8_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemv_t_8_gen_lib8
-_kernel_sgemv_t_8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemv_t_8_gen_lib8
-	.def kernel_sgemv_t_8_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemv_t_8_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner sgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG6, %r13  // x
-	movq	ARG3, %r14 // offA
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_GEMV_ADD_T_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemv_add_t_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemv_add_t_8_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_T_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_t_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_t_8_lib8
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11   // beta
-	movq	ARG8, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_8_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG9, %r10 // z 
-	movq	ARG10, %r11 // km 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemv_t_8_gen_lib8, .-kernel_sgemv_t_8_gen_lib8
-#endif
-
-
-
-
-
-//                                 1      2          3                   4          5          6
-// void kernel_strsv_ln_inv_8_lib8(int k, double *A, double *inv_diag_A, double *x, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsv_ln_inv_8_lib8
-	.type kernel_strsv_ln_inv_8_lib8, @function
-kernel_strsv_ln_inv_8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsv_ln_inv_8_lib8
-_kernel_strsv_ln_inv_8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsv_ln_inv_8_lib8
-	.def kernel_strsv_ln_inv_8_lib8; .scl 2; .type 32; .endef
-kernel_strsv_ln_inv_8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG4, %r12  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_N_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_n_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_n_8_lib8
-#endif
-#endif
-
-	movq	%r11, %r13 // A+k*sizeof(double)
-
-
-	// call inner blender n
-
-	movq	ARG5, %r10   // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_M11_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_m11_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_m11_8_lib8
-#endif
-#endif
-
-
-	// solution
-
-	movq	%r13, %r10 // A+k*sizeof(double)
-	movq	ARG3, %r11 // inv_diag_A
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSV_LN_INV_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsv_ln_inv_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsv_ln_inv_8_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsv_ln_inv_8_lib8, .-kernel_strsv_ln_inv_8_lib8
-#endif
-
-
-
-
-
-//                                    1      2          3                   4          5          6          7       8
-// void kernel_strsv_ln_inv_8_vs_lib8(int k, double *A, double *inv_diag_A, double *x, double *y, double *z, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsv_ln_inv_8_vs_lib8
-	.type kernel_strsv_ln_inv_8_vs_lib8, @function
-kernel_strsv_ln_inv_8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsv_ln_inv_8_vs_lib8
-_kernel_strsv_ln_inv_8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsv_ln_inv_8_vs_lib8
-	.def kernel_strsv_ln_inv_8_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsv_ln_inv_8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG4, %r12  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_N_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_n_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_n_8_lib8
-#endif
-#endif
-
-	movq	%r11, %r13 // A+k*sizeof(double)
-
-
-	// call inner blender n
-
-	movq	ARG5, %r10   // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_M11_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_m11_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_m11_8_lib8
-#endif
-#endif
-
-
-	// solution
-
-	movq	%r13, %r10 // A+k*sizeof(double)
-	movq	ARG3, %r11 // inv_diag_A
-	movq	ARG8, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSV_LN_INV_8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsv_ln_inv_8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsv_ln_inv_8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // z 
-	movq	ARG7, %r11 // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsv_ln_inv_8_vs_lib8, .-kernel_strsv_ln_inv_8_vs_lib8
-#endif
-
-
-
-
-
-//                                 1      2          3        4                   5          6          7
-// void kernel_strsv_lt_inv_8_lib8(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsv_lt_inv_8_lib8
-	.type kernel_strsv_lt_inv_8_lib8, @function
-kernel_strsv_lt_inv_8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsv_lt_inv_8_lib8
-_kernel_strsv_lt_inv_8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsv_lt_inv_8_lib8
-	.def kernel_strsv_lt_inv_8_lib8; .scl 2; .type 32; .endef
-kernel_strsv_lt_inv_8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	subl	$8, %r10d
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	addq	%r12, %r11 // A+8*sda*sizeof(float)
-	movq	ARG5, %r13 // x
-	addq	$32, %r13 // x+8 
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_T_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_t_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_t_8_lib8
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG6, %r10 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_M11_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_m11_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_m11_8_lib8
-#endif
-#endif
-
-
-	// solution
-
-	movq	ARG2, %r10 // A
-	movq	ARG4, %r11 // inv_diag_A
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSV_LT_INV_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsv_lt_inv_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsv_lt_inv_8_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsv_lt_inv_8_lib8, .-kernel_strsv_lt_inv_8_lib8
-#endif
-
-
-
-
-
-//                                    1      2          3        4                   5          6          7          8      9
-// void kernel_strsv_lt_inv_8_vs_lib8(int k, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsv_lt_inv_8_vs_lib8
-	.type kernel_strsv_lt_inv_8_vs_lib8, @function
-kernel_strsv_lt_inv_8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsv_lt_inv_8_vs_lib8
-_kernel_strsv_lt_inv_8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsv_lt_inv_8_vs_lib8
-	.def kernel_strsv_lt_inv_8_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsv_lt_inv_8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	subl	$8, %r10d
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	addq	%r12, %r11 // A+8*sda*sizeof(float)
-	movq	ARG5, %r13 // x
-	addq	$32, %r13 // x+8 
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMV_ADD_T_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemv_add_t_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemv_add_t_8_lib8
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG6, %r10 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_M11_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_m11_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_m11_8_lib8
-#endif
-#endif
-
-
-	// solution
-
-	movq	ARG2, %r10 // A
-	movq	ARG4, %r11 // inv_diag_A
-	movq	ARG8, %r12 // km
-	movq	ARG9, %r13 // kn
-	movq	ARG5, %r14 // x
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSV_LT_INV_8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsv_lt_inv_8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsv_lt_inv_8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // z 
-	movq	ARG9, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsv_lt_inv_8_vs_lib8, .-kernel_strsv_lt_inv_8_vs_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.float	0.5
-	.float	1.5
-	.float	2.5
-	.float	3.5
-	.float	4.5
-	.float	5.5
-	.float	6.5
-	.float	7.5
-
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_sgesc_lib8.S b/third_party/blasfeo/kernel/avx/kernel_sgesc_lib8.S
deleted file mode 100644
index 43ff708..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_sgesc_lib8.S
+++ /dev/null
@@ -1,506 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- alpha
-// r12    <- A
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGESC_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgesc_8_lib8, @function
-inner_kernel_sgesc_8_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgesc_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgesc_8_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgesc_8_lib8:
-#endif
-#endif
-	
-	vbroadcastss	0(%r11), %ymm15
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmulps		%ymm15, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r12)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmulps		%ymm15, %ymm0, %ymm0
-	vmovaps		%ymm0, 32(%r12)
-
-	vmovaps		64(%r12), %ymm0
-	vmulps		%ymm15, %ymm0, %ymm0
-	vmovaps		%ymm0, 64(%r12)
-	addq		$128, %r12
-
-	vmovaps		-32(%r12), %ymm0
-	vmulps		%ymm15, %ymm0, %ymm0
-	vmovaps		%ymm0, -32(%r12)
-
-	cmpl		$4, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmulps		%ymm15, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r12)
-	subl		$1, %r10d
-	addq		$32, %r12
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgesc_8_lib8, .-inner_kernel_sgesc_8_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- alpha
-// r12    <- A
-// r13d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGESC_8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgesc_8_gen_lib8, @function
-inner_kernel_sgesc_8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgesc_8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgesc_8_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgesc_8_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	vbroadcastss	0(%r11), %ymm14
-
-	cmpl	$3, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r12), %ymm0
-	vmulps		%ymm14, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15,  0(%r12)
-	subl		$4, %r10d
-
-	vmovaps		32(%r12), %ymm0
-	vmulps		%ymm14, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15,  32(%r12)
-
-	vmovaps		64(%r12), %ymm0
-	vmulps		%ymm14, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15,  64(%r12)
-	addq		$128, %r12
-
-	vmovaps		-32(%r12), %ymm0
-	vmulps		%ymm14, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15,  -32(%r12)
-
-	cmpl		$4, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean-up loop
-
-	vmovaps		0(%r12), %ymm0
-	vmulps		%ymm14, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm15,  0(%r12)
-	subl		$1, %r10d
-	addq		$32, %r12
-
-	cmpl		$0, %r10d
-	jg			3b // clean-up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgesc_8_lib8, .-inner_kernel_sgesc_8_lib8
-#endif
-#endif
-
-
-
-
-
-//                          rdi    rsi           rdx
-// void kernel_sgesc_8_lib8(int k, float *alpha, float *A);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgesc_8_lib8
-	.type kernel_sgesc_8_lib8, @function
-kernel_sgesc_8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgesc_8_lib8
-_kernel_sgesc_8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgesc_8_lib8
-	.def kernel_sgesc_8_lib8; .scl 2; .type 32; .endef
-kernel_sgesc_8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGESC_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgesc_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgesc_8_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgesc_8_lib8, .-kernel_sgesc_8_lib8
-#endif
-
-
-
-
-
-//                              rdi    rsi           rdx       rcx
-// void kernel_sgecp_8_gen_lib8(int k, float *alpha, float *A, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgesc_8_gen_lib8
-	.type kernel_sgesc_8_gen_lib8, @function
-kernel_sgesc_8_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgesc_8_gen_lib8
-_kernel_sgesc_8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgesc_8_gen_lib8
-	.def kernel_sgesc_8_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgesc_8_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // alpha
-	movq	ARG3, %r12  // A
-	movq	ARG4, %r14 // m1
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGESC_8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgesc_8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgesc_8_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgesc_8_gen_lib8, .-kernel_sgesc_8_gen_lib8
-#endif
-
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx/kernel_sgetr_lib8.S b/third_party/blasfeo/kernel/avx/kernel_sgetr_lib8.S
deleted file mode 100644
index 745c42e..0000000
--- a/third_party/blasfeo/kernel/avx/kernel_sgetr_lib8.S
+++ /dev/null
@@ -1,2476 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGETR_8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgetr_8_lib8, @function
-inner_kernel_sgetr_8_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgetr_8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgetr_8_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgetr_8_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$7, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	subl		$8, %r10d
-	addq		%r12, %r11
-
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vperm2f128	$0x20, %ymm1, %ymm0, %ymm2
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm3
-	vmovaps		%ymm2, 0(%r13)
-	vmovaps		%ymm3, 128(%r13)
-	vshufps		$0xee, %ymm10, %ymm8, %ymm0
-	vshufps		$0xee, %ymm14, %ymm12, %ymm1
-	vperm2f128	$0x20, %ymm1, %ymm0, %ymm2
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm3
-	vmovaps		%ymm2, 32(%r13)
-	vmovaps		%ymm3, 160(%r13)
-	vshufps		$0x44, %ymm11, %ymm9, %ymm0
-	vshufps		$0x44, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x20, %ymm1, %ymm0, %ymm2
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm3
-	vmovaps		%ymm2, 64(%r13)
-	vmovaps		%ymm3, 192(%r13)
-	vshufps		$0xee, %ymm11, %ymm9, %ymm0
-	vshufps		$0xee, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x20, %ymm1, %ymm0, %ymm2
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm3
-	vmovaps		%ymm2, 96(%r13)
-	vmovaps		%ymm3, 224(%r13)
-
-	addq		$256, %r13
-
-	cmpl		$7, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// common
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vshufps		$0xee, %ymm10, %ymm8, %ymm2
-	vshufps		$0xee, %ymm14, %ymm12, %ymm3
-	vshufps		$0x44, %ymm11, %ymm9, %ymm4
-	vshufps		$0x44, %ymm15, %ymm13, %ymm5
-	vshufps		$0xee, %ymm11, %ymm9, %ymm6
-	vshufps		$0xee, %ymm15, %ymm13, %ymm7
-
-	// 0
-	vperm2f128	$0x20, %ymm1, %ymm0, %ymm8
-	vmovaps		%ymm8, 0(%r13)
-	cmpl	$1, %r10d
-	jle		3f
-	// 1
-	vperm2f128	$0x20, %ymm3, %ymm2, %ymm8
-	vmovaps		%ymm8, 32(%r13)
-	cmpl	$2, %r10d
-	jle		3f
-	// 2
-	vperm2f128	$0x20, %ymm5, %ymm4, %ymm8
-	vmovaps		%ymm8, 64(%r13)
-	cmpl	$3, %r10d
-	jle		3f
-	// 3
-	vperm2f128	$0x20, %ymm7, %ymm6, %ymm8
-	vmovaps		%ymm8, 96(%r13)
-	cmpl	$4, %r10d
-	jle		3f
-	// 4
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm8
-	vmovaps		%ymm8, 128(%r13)
-	cmpl	$5, %r10d
-	jle		3f
-	// 5
-	vperm2f128	$0x31, %ymm3, %ymm2, %ymm8
-	vmovaps		%ymm8, 160(%r13)
-	cmpl	$6, %r10d
-	jle		3f
-	// 6
-	vperm2f128	$0x31, %ymm5, %ymm4, %ymm8
-	vmovaps		%ymm8, 192(%r13)
-//	cmpl	$7, %r10d
-//	jle		3f
-	// 7
-//	vperm2f128	$0x31, %ymm7, %ymm6, %ymm8
-//	vmovaps		%ymm8, 224(%r13)
-
-3:
-	movl	%r10d, %r14d
-	sall	$2, %r14d // kleft*sizeof(float)
-	addq	%r14, %r11 // A+kleft
-	movl	%r10d, %r14d
-	sall	$5, %r14d // kleft*bs*sizeof(float)
-	addq	%r14, %r13
-	movl	$0, %r10d
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgetr_8_lib8, .-inner_kernel_sgetr_8_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- 8*sda*sizeof(float)
-// r13    <- B
-// r14d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_KERNEL_SGETR_8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_sgetr_8_gen_lib8, @function
-inner_kernel_sgetr_8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_sgetr_8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_sgetr_8_gen_lib8; .scl 2; .type 32; .endef
-inner_kernel_sgetr_8_gen_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$7, %r10d
-	jle		0f // consider clean-up
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	subl		$8, %r10d
-	addq		%r12, %r11
-
-	vmovupd		-32(%rsp), %ymm4
-
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vperm2f128	$0x20, %ymm1, %ymm0, %ymm2
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm3
-	vmaskmovps	%ymm2, %ymm4, 0(%r13)
-	vmaskmovps	%ymm3, %ymm4, 128(%r13)
-	vshufps		$0xee, %ymm10, %ymm8, %ymm0
-	vshufps		$0xee, %ymm14, %ymm12, %ymm1
-	vperm2f128	$0x20, %ymm1, %ymm0, %ymm2
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm3
-	vmaskmovps	%ymm2, %ymm4, 32(%r13)
-	vmaskmovps	%ymm3, %ymm4, 160(%r13)
-	vshufps		$0x44, %ymm11, %ymm9, %ymm0
-	vshufps		$0x44, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x20, %ymm1, %ymm0, %ymm2
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm3
-	vmaskmovps	%ymm2, %ymm4, 64(%r13)
-	vmaskmovps	%ymm3, %ymm4, 192(%r13)
-	vshufps		$0xee, %ymm11, %ymm9, %ymm0
-	vshufps		$0xee, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x20, %ymm1, %ymm0, %ymm2
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm3
-	vmaskmovps	%ymm2, %ymm4, 96(%r13)
-	vmaskmovps	%ymm3, %ymm4, 224(%r13)
-
-	addq		$256, %r13
-
-	cmpl		$7, %r10d
-	jg			1b // main loop 
-
-0: // consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// common
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vshufps		$0xee, %ymm10, %ymm8, %ymm2
-	vshufps		$0xee, %ymm14, %ymm12, %ymm3
-	vshufps		$0x44, %ymm11, %ymm9, %ymm4
-	vshufps		$0x44, %ymm15, %ymm13, %ymm5
-	vshufps		$0xee, %ymm11, %ymm9, %ymm6
-	vshufps		$0xee, %ymm15, %ymm13, %ymm7
-
-	vmovupd		-32(%rsp), %ymm9
-
-	// 0
-	vperm2f128	$0x20, %ymm1, %ymm0, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 0(%r13)
-	cmpl	$1, %r10d
-	jle		3f
-	// 1
-	vperm2f128	$0x20, %ymm3, %ymm2, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 32(%r13)
-	cmpl	$2, %r10d
-	jle		3f
-	// 2
-	vperm2f128	$0x20, %ymm5, %ymm4, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 64(%r13)
-	cmpl	$3, %r10d
-	jle		3f
-	// 3
-	vperm2f128	$0x20, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 96(%r13)
-	cmpl	$4, %r10d
-	jle		3f
-	// 4
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 128(%r13)
-	cmpl	$5, %r10d
-	jle		3f
-	// 5
-	vperm2f128	$0x31, %ymm3, %ymm2, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 160(%r13)
-	cmpl	$6, %r10d
-	jle		3f
-	// 6
-	vperm2f128	$0x31, %ymm5, %ymm4, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 192(%r13)
-//	cmpl	$7, %r10d
-//	jle		3f
-	// 7
-//	vperm2f128	$0x31, %ymm7, %ymm6, %ymm8
-//	vmaskmovps	%ymm8, %ymm9, 224(%r13)
-
-3:
-	movl	%r10d, %r14d
-	sall	$2, %r14d // kleft*sizeof(float)
-	addq	%r14, %r11 // A+kleft
-	movl	%r10d, %r14d
-	sall	$5, %r14d // kleft*bs*sizeof(float)
-	addq	%r14, %r13
-	movl	$0, %r10d
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_sgetr_8_gen_lib8, .-inner_kernel_sgetr_8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- 8*sda*sizeof(float)
-// r13    <- B
-// r14d   <- m1
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_SGETR_8_0_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_sgetr_8_0_gen_lib8, @function
-inner_edge_sgetr_8_0_gen_lib8:
-#elif defined(OS_MAC)
-_inner_edge_sgetr_8_0_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_sgetr_8_0_gen_lib8; .scl 2; .type 32; .endef
-inner_edge_sgetr_8_0_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-	vmovupd		%ymm15, -32(%rsp) // spill mask to stack
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_sgetr_8_0_gen_lib8, .-inner_edge_sgetr_8_0_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_SGETR_8_1_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_sgetr_8_1_gen_lib8, @function
-inner_edge_sgetr_8_1_gen_lib8:
-#elif defined(OS_MAC)
-_inner_edge_sgetr_8_1_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_sgetr_8_1_gen_lib8; .scl 2; .type 32; .endef
-inner_edge_sgetr_8_1_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-	vmovupd		%ymm15, -32(%rsp) // spill mask to stack
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// common
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vshufps		$0xee, %ymm10, %ymm8, %ymm2
-	vshufps		$0xee, %ymm14, %ymm12, %ymm3
-	vshufps		$0x44, %ymm11, %ymm9, %ymm4
-	vshufps		$0x44, %ymm15, %ymm13, %ymm5
-	vshufps		$0xee, %ymm11, %ymm9, %ymm6
-	vshufps		$0xee, %ymm15, %ymm13, %ymm7
-
-	vmovupd		-32(%rsp), %ymm9
-
-	// 0
-	// 1
-	vperm2f128	$0x20, %ymm3, %ymm2, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 0(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 2
-	vperm2f128	$0x20, %ymm5, %ymm4, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 32(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 3
-	vperm2f128	$0x20, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 64(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 4
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 96(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 5
-	vperm2f128	$0x31, %ymm3, %ymm2, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 128(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 6
-	vperm2f128	$0x31, %ymm5, %ymm4, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 160(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 7
-	vperm2f128	$0x31, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 192(%r13)
-	subl	$1, %r10d
-
-	addq	%r12, %r11 // A+bs*sda*sizeof(float)
-	addq	$224, %r13 // B+7*bs*sizeof(float)
-
-	jmp		2f
-
-3:
-	movl	%r10d, %r14d
-	sall	$2, %r14d
-	addq	%r14, %r11 // A+k*sizeof(float)
-	movl	%r10d, %r14d
-	sall	$5, %r14d
-	addq	%r14, %r13 // B+k*bs*sizeof(float)
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_sgetr_8_1_gen_lib8, .-inner_edge_sgetr_8_1_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_SGETR_8_2_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_sgetr_8_2_gen_lib8, @function
-inner_edge_sgetr_8_2_gen_lib8:
-#elif defined(OS_MAC)
-_inner_edge_sgetr_8_2_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_sgetr_8_2_gen_lib8; .scl 2; .type 32; .endef
-inner_edge_sgetr_8_2_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-	vmovupd		%ymm15, -32(%rsp) // spill mask to stack
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// common
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vshufps		$0xee, %ymm10, %ymm8, %ymm2
-	vshufps		$0xee, %ymm14, %ymm12, %ymm3
-	vshufps		$0x44, %ymm11, %ymm9, %ymm4
-	vshufps		$0x44, %ymm15, %ymm13, %ymm5
-	vshufps		$0xee, %ymm11, %ymm9, %ymm6
-	vshufps		$0xee, %ymm15, %ymm13, %ymm7
-
-	vmovupd		-32(%rsp), %ymm9
-
-	// 0
-	// 1
-	// 2
-	vperm2f128	$0x20, %ymm5, %ymm4, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 0(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 3
-	vperm2f128	$0x20, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 32(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 4
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 64(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 5
-	vperm2f128	$0x31, %ymm3, %ymm2, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 96(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 6
-	vperm2f128	$0x31, %ymm5, %ymm4, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 128(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 7
-	vperm2f128	$0x31, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 160(%r13)
-	subl	$1, %r10d
-
-	addq	%r12, %r11 // A+bs*sda*sizeof(float)
-	addq	$192, %r13 // B+6*bs*sizeof(float)
-
-	jmp		2f
-
-3:
-	movl	%r10d, %r14d
-	sall	$2, %r14d
-	addq	%r14, %r11 // A+k*sizeof(float)
-	movl	%r10d, %r14d
-	sall	$5, %r14d
-	addq	%r14, %r13 // B+k*bs*sizeof(float)
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_sgetr_8_2_gen_lib8, .-inner_edge_sgetr_8_2_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_SGETR_8_3_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_sgetr_8_3_gen_lib8, @function
-inner_edge_sgetr_8_3_gen_lib8:
-#elif defined(OS_MAC)
-_inner_edge_sgetr_8_3_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_sgetr_8_3_gen_lib8; .scl 2; .type 32; .endef
-inner_edge_sgetr_8_3_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-	vmovupd		%ymm15, -32(%rsp) // spill mask to stack
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// common
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vshufps		$0xee, %ymm10, %ymm8, %ymm2
-	vshufps		$0xee, %ymm14, %ymm12, %ymm3
-	vshufps		$0x44, %ymm11, %ymm9, %ymm4
-	vshufps		$0x44, %ymm15, %ymm13, %ymm5
-	vshufps		$0xee, %ymm11, %ymm9, %ymm6
-	vshufps		$0xee, %ymm15, %ymm13, %ymm7
-
-	vmovupd		-32(%rsp), %ymm9
-
-	// 0
-	// 1
-	// 2
-	// 3
-	vperm2f128	$0x20, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 0(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 4
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 32(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 5
-	vperm2f128	$0x31, %ymm3, %ymm2, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 64(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 6
-	vperm2f128	$0x31, %ymm5, %ymm4, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 96(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 7
-	vperm2f128	$0x31, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 128(%r13)
-	subl	$1, %r10d
-
-	addq	%r12, %r11 // A+bs*sda*sizeof(float)
-	addq	$160, %r13 // B+6*bs*sizeof(float)
-
-	jmp		2f
-
-3:
-	movl	%r10d, %r14d
-	sall	$2, %r14d
-	addq	%r14, %r11 // A+k*sizeof(float)
-	movl	%r10d, %r14d
-	sall	$5, %r14d
-	addq	%r14, %r13 // B+k*bs*sizeof(float)
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_sgetr_8_3_gen_lib8, .-inner_edge_sgetr_8_3_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_SGETR_8_4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_sgetr_8_4_gen_lib8, @function
-inner_edge_sgetr_8_4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_edge_sgetr_8_4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_sgetr_8_4_gen_lib8; .scl 2; .type 32; .endef
-inner_edge_sgetr_8_4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-	vmovupd		%ymm15, -32(%rsp) // spill mask to stack
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// common
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vshufps		$0xee, %ymm10, %ymm8, %ymm2
-	vshufps		$0xee, %ymm14, %ymm12, %ymm3
-	vshufps		$0x44, %ymm11, %ymm9, %ymm4
-	vshufps		$0x44, %ymm15, %ymm13, %ymm5
-	vshufps		$0xee, %ymm11, %ymm9, %ymm6
-	vshufps		$0xee, %ymm15, %ymm13, %ymm7
-
-	vmovupd		-32(%rsp), %ymm9
-
-	// 0
-	// 1
-	// 2
-	// 3
-	// 4
-	vperm2f128	$0x31, %ymm1, %ymm0, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 0(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 5
-	vperm2f128	$0x31, %ymm3, %ymm2, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 32(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 6
-	vperm2f128	$0x31, %ymm5, %ymm4, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 64(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 7
-	vperm2f128	$0x31, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 96(%r13)
-	subl	$1, %r10d
-
-	addq	%r12, %r11 // A+bs*sda*sizeof(float)
-	addq	$128, %r13 // B+6*bs*sizeof(float)
-
-	jmp		2f
-
-3:
-	movl	%r10d, %r14d
-	sall	$2, %r14d
-	addq	%r14, %r11 // A+k*sizeof(float)
-	movl	%r10d, %r14d
-	sall	$5, %r14d
-	addq	%r14, %r13 // B+k*bs*sizeof(float)
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_sgetr_8_4_gen_lib8, .-inner_edge_sgetr_8_4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_SGETR_8_5_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_sgetr_8_5_gen_lib8, @function
-inner_edge_sgetr_8_5_gen_lib8:
-#elif defined(OS_MAC)
-_inner_edge_sgetr_8_5_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_sgetr_8_5_gen_lib8; .scl 2; .type 32; .endef
-inner_edge_sgetr_8_5_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-	vmovupd		%ymm15, -32(%rsp) // spill mask to stack
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// common
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vshufps		$0xee, %ymm10, %ymm8, %ymm2
-	vshufps		$0xee, %ymm14, %ymm12, %ymm3
-	vshufps		$0x44, %ymm11, %ymm9, %ymm4
-	vshufps		$0x44, %ymm15, %ymm13, %ymm5
-	vshufps		$0xee, %ymm11, %ymm9, %ymm6
-	vshufps		$0xee, %ymm15, %ymm13, %ymm7
-
-	vmovupd		-32(%rsp), %ymm9
-
-	// 0
-	// 1
-	// 2
-	// 3
-	// 4
-	// 5
-	vperm2f128	$0x31, %ymm3, %ymm2, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 0(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 6
-	vperm2f128	$0x31, %ymm5, %ymm4, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 32(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 7
-	vperm2f128	$0x31, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 64(%r13)
-	subl	$1, %r10d
-
-	addq	%r12, %r11 // A+bs*sda*sizeof(float)
-	addq	$96, %r13 // B+6*bs*sizeof(float)
-
-	jmp		2f
-
-3:
-	movl	%r10d, %r14d
-	sall	$2, %r14d
-	addq	%r14, %r11 // A+k*sizeof(float)
-	movl	%r10d, %r14d
-	sall	$5, %r14d
-	addq	%r14, %r13 // B+k*bs*sizeof(float)
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_sgetr_8_5_gen_lib8, .-inner_edge_sgetr_8_5_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_SGETR_8_6_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_sgetr_8_6_gen_lib8, @function
-inner_edge_sgetr_8_6_gen_lib8:
-#elif defined(OS_MAC)
-_inner_edge_sgetr_8_6_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_sgetr_8_6_gen_lib8; .scl 2; .type 32; .endef
-inner_edge_sgetr_8_6_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-	vmovupd		%ymm15, -32(%rsp) // spill mask to stack
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// common
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vshufps		$0xee, %ymm10, %ymm8, %ymm2
-	vshufps		$0xee, %ymm14, %ymm12, %ymm3
-	vshufps		$0x44, %ymm11, %ymm9, %ymm4
-	vshufps		$0x44, %ymm15, %ymm13, %ymm5
-	vshufps		$0xee, %ymm11, %ymm9, %ymm6
-	vshufps		$0xee, %ymm15, %ymm13, %ymm7
-
-	vmovupd		-32(%rsp), %ymm9
-
-	// 0
-	// 1
-	// 2
-	// 3
-	// 4
-	// 5
-	// 6
-	vperm2f128	$0x31, %ymm5, %ymm4, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 0(%r13)
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jle		3f
-	// 7
-	vperm2f128	$0x31, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 32(%r13)
-	subl	$1, %r10d
-
-	addq	%r12, %r11 // A+bs*sda*sizeof(float)
-	addq	$64, %r13 // B+6*bs*sizeof(float)
-
-	jmp		2f
-
-3:
-	movl	%r10d, %r14d
-	sall	$2, %r14d
-	addq	%r14, %r11 // A+k*sizeof(float)
-	movl	%r10d, %r14d
-	sall	$5, %r14d
-	addq	%r14, %r13 // B+k*bs*sizeof(float)
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_sgetr_8_6_gen_lib8, .-inner_edge_sgetr_8_6_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// subroutine
-//
-// input arguments:
-// r10d   <- k
-// r11    <- A
-// r12    <- 8*sda*sizeof(float)
-// r13    <- B
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_SGETR_8_7_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_sgetr_8_7_gen_lib8, @function
-inner_edge_sgetr_8_7_gen_lib8:
-#elif defined(OS_MAC)
-_inner_edge_sgetr_8_7_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_sgetr_8_7_gen_lib8; .scl 2; .type 32; .endef
-inner_edge_sgetr_8_7_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-	vmovupd		%ymm15, -32(%rsp) // spill mask to stack
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// common
-	vmovaps		0(%r11), %ymm0
-	vmovaps		32(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm8
-	vunpckhps	%ymm1, %ymm0, %ymm9
-	vmovaps		64(%r11), %ymm0
-	vmovaps		96(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm10
-	vunpckhps	%ymm1, %ymm0, %ymm11
-	vmovaps		128(%r11), %ymm0
-	vmovaps		160(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm12
-	vunpckhps	%ymm1, %ymm0, %ymm13
-	vmovaps		192(%r11), %ymm0
-	vmovaps		224(%r11), %ymm1
-	vunpcklps	%ymm1, %ymm0, %ymm14
-	vunpckhps	%ymm1, %ymm0, %ymm15
-	vshufps		$0x44, %ymm10, %ymm8, %ymm0
-	vshufps		$0x44, %ymm14, %ymm12, %ymm1
-	vshufps		$0xee, %ymm10, %ymm8, %ymm2
-	vshufps		$0xee, %ymm14, %ymm12, %ymm3
-	vshufps		$0x44, %ymm11, %ymm9, %ymm4
-	vshufps		$0x44, %ymm15, %ymm13, %ymm5
-	vshufps		$0xee, %ymm11, %ymm9, %ymm6
-	vshufps		$0xee, %ymm15, %ymm13, %ymm7
-
-	vmovupd		-32(%rsp), %ymm9
-
-	// 0
-	// 1
-	// 2
-	// 3
-	// 4
-	// 5
-	// 6
-	// 7
-	vperm2f128	$0x31, %ymm7, %ymm6, %ymm8
-	vmaskmovps	%ymm8, %ymm9, 0(%r13)
-	subl	$1, %r10d
-
-	addq	%r12, %r11 // A+bs*sda*sizeof(float)
-	addq	$32, %r13 // B+6*bs*sizeof(float)
-
-//	jmp		2f
-//
-//3:
-//	movl	%r10d, %r14d
-//	sall	$2, %r14d
-//	addq	%r14, %r11 // A+k*sizeof(float)
-//	movl	%r10d, %r14d
-//	sall	$5, %r14d
-//	addq	%r14, %r13 // B+k*bs*sizeof(float)
-
-2: // return
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_sgetr_8_7_gen_lib8, .-inner_edge_sgetr_8_7_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                            rdi    rsi       rdx      rcx
-// void kernel_sgetr_8_0_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_0_lib8
-	.type kernel_sgetr_8_0_lib8, @function
-kernel_sgetr_8_0_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_0_lib8
-_kernel_sgetr_8_0_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_0_lib8
-	.def kernel_sgetr_8_0_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_0_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-
-	// offsetA==0: no edge
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_0_lib8, .-kernel_sgetr_8_0_lib8
-#endif
-
-
-
-
-
-//                                rdi    rsi       rdx      rcx       r8
-// void kernel_sgetr_8_0_gen_lib8(int k, float *A, int sda, float *B, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_0_gen_lib8
-	.type kernel_sgetr_8_0_gen_lib8, @function
-kernel_sgetr_8_0_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_0_gen_lib8
-_kernel_sgetr_8_0_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_0_gen_lib8
-	.def kernel_sgetr_8_0_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_0_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14  // m1
-
-	// offsetA==0: edge to compute mask
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_0_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_0_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_0_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_GEN_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_sgetr_8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_0_gen_lib8, .-kernel_sgetr_8_0_gen_lib8
-#endif
-
-
-
-
-
-//                            rdi    rsi       rdx      rcx
-// void kernel_sgetr_8_1_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_1_lib8
-	.type kernel_sgetr_8_1_lib8, @function
-kernel_sgetr_8_1_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_1_lib8
-_kernel_sgetr_8_1_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_1_lib8
-	.def kernel_sgetr_8_1_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_1_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	$8, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_1_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_1_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_1_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_1_lib8, .-kernel_sgetr_8_1_lib8
-#endif
-
-
-
-
-
-//                                rdi    rsi       rdx      rcx       r8
-// void kernel_sgetr_8_1_gen_lib8(int k, float *A, int sda, float *B, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_1_gen_lib8
-	.type kernel_sgetr_8_1_gen_lib8, @function
-kernel_sgetr_8_1_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_1_gen_lib8
-_kernel_sgetr_8_1_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_1_gen_lib8
-	.def kernel_sgetr_8_1_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_1_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_1_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_1_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_1_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_1_gen_lib8, .-kernel_sgetr_8_1_gen_lib8
-#endif
-
-
-
-
-
-//                            rdi    rsi       rdx      rcx
-// void kernel_sgetr_8_2_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_2_lib8
-	.type kernel_sgetr_8_2_lib8, @function
-kernel_sgetr_8_2_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_2_lib8
-_kernel_sgetr_8_2_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_2_lib8
-	.def kernel_sgetr_8_2_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_2_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	$8, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_2_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_2_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_2_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_2_lib8, .-kernel_sgetr_8_2_lib8
-#endif
-
-
-
-
-
-//                                rdi    rsi       rdx      rcx       r8
-// void kernel_sgetr_8_2_gen_lib8(int k, float *A, int sda, float *B, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_2_gen_lib8
-	.type kernel_sgetr_8_2_gen_lib8, @function
-kernel_sgetr_8_2_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_2_gen_lib8
-_kernel_sgetr_8_2_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_2_gen_lib8
-	.def kernel_sgetr_8_2_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_2_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_2_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_2_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_2_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_2_gen_lib8, .-kernel_sgetr_8_2_gen_lib8
-#endif
-
-
-
-
-
-//                            rdi    rsi       rdx      rcx
-// void kernel_sgetr_8_3_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_3_lib8
-	.type kernel_sgetr_8_3_lib8, @function
-kernel_sgetr_8_3_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_3_lib8
-_kernel_sgetr_8_3_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_3_lib8
-	.def kernel_sgetr_8_3_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_3_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	$8, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_3_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_3_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_3_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_3_lib8, .-kernel_sgetr_8_3_lib8
-#endif
-
-
-
-
-
-//                                rdi    rsi       rdx      rcx       r8
-// void kernel_sgetr_8_3_gen_lib8(int k, float *A, int sda, float *B, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_3_gen_lib8
-	.type kernel_sgetr_8_3_gen_lib8, @function
-kernel_sgetr_8_3_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_3_gen_lib8
-_kernel_sgetr_8_3_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_3_gen_lib8
-	.def kernel_sgetr_8_3_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_3_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_3_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_3_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_3_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_3_gen_lib8, .-kernel_sgetr_8_3_gen_lib8
-#endif
-
-
-
-
-
-//                            rdi    rsi       rdx      rcx
-// void kernel_sgetr_8_4_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_4_lib8
-	.type kernel_sgetr_8_4_lib8, @function
-kernel_sgetr_8_4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_4_lib8
-_kernel_sgetr_8_4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_4_lib8
-	.def kernel_sgetr_8_4_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	$8, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_4_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_4_lib8, .-kernel_sgetr_8_4_lib8
-#endif
-
-
-
-
-
-//                                rdi    rsi       rdx      rcx       r8
-// void kernel_sgetr_8_4_gen_lib8(int k, float *A, int sda, float *B, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_4_gen_lib8
-	.type kernel_sgetr_8_4_gen_lib8, @function
-kernel_sgetr_8_4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_4_gen_lib8
-_kernel_sgetr_8_4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_4_gen_lib8
-	.def kernel_sgetr_8_4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_4_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_4_gen_lib8, .-kernel_sgetr_8_4_gen_lib8
-#endif
-
-
-
-
-
-//                            rdi    rsi       rdx      rcx
-// void kernel_sgetr_8_5_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_5_lib8
-	.type kernel_sgetr_8_5_lib8, @function
-kernel_sgetr_8_5_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_5_lib8
-_kernel_sgetr_8_5_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_5_lib8
-	.def kernel_sgetr_8_5_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_5_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	$8, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_5_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_5_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_5_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_5_lib8, .-kernel_sgetr_8_5_lib8
-#endif
-
-
-
-
-
-//                                rdi    rsi       rdx      rcx       r8
-// void kernel_sgetr_8_5_gen_lib8(int k, float *A, int sda, float *B, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_5_gen_lib8
-	.type kernel_sgetr_8_5_gen_lib8, @function
-kernel_sgetr_8_5_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_5_gen_lib8
-_kernel_sgetr_8_5_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_5_gen_lib8
-	.def kernel_sgetr_8_5_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_5_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_5_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_5_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_5_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_5_gen_lib8, .-kernel_sgetr_8_5_gen_lib8
-#endif
-
-
-
-
-
-//                            rdi    rsi       rdx      rcx
-// void kernel_sgetr_8_6_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_6_lib8
-	.type kernel_sgetr_8_6_lib8, @function
-kernel_sgetr_8_6_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_6_lib8
-_kernel_sgetr_8_6_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_6_lib8
-	.def kernel_sgetr_8_6_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_6_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	$8, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_6_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_6_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_6_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_6_lib8, .-kernel_sgetr_8_6_lib8
-#endif
-
-
-
-
-
-//                                rdi    rsi       rdx      rcx       r8
-// void kernel_sgetr_8_6_gen_lib8(int k, float *A, int sda, float *B, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_6_gen_lib8
-	.type kernel_sgetr_8_6_gen_lib8, @function
-kernel_sgetr_8_6_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_6_gen_lib8
-_kernel_sgetr_8_6_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_6_gen_lib8
-	.def kernel_sgetr_8_6_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_6_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_6_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_6_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_6_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_6_gen_lib8, .-kernel_sgetr_8_6_gen_lib8
-#endif
-
-
-
-
-
-//                            rdi    rsi       rdx      rcx
-// void kernel_sgetr_8_7_lib8(int k, float *A, int sda, float *B);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_7_lib8
-	.type kernel_sgetr_8_7_lib8, @function
-kernel_sgetr_8_7_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_7_lib8
-_kernel_sgetr_8_7_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_7_lib8
-	.def kernel_sgetr_8_7_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_7_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	$8, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_7_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_7_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_7_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_7_lib8, .-kernel_sgetr_8_7_lib8
-#endif
-
-
-
-
-
-//                                rdi    rsi       rdx      rcx       r8
-// void kernel_sgetr_8_7_gen_lib8(int k, float *A, int sda, float *B, int m1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgetr_8_7_gen_lib8
-	.type kernel_sgetr_8_7_gen_lib8, @function
-kernel_sgetr_8_7_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgetr_8_7_gen_lib8
-_kernel_sgetr_8_7_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgetr_8_7_gen_lib8
-	.def kernel_sgetr_8_7_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgetr_8_7_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13  // B
-	movq	ARG5, %r14  // m1
-
-	// offsetA==1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_SGETR_8_7_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_sgetr_8_7_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_sgetr_8_7_gen_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_KERNEL_SGETR_8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_sgetr_8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_sgetr_8_gen_lib8
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgetr_8_7_gen_lib8, .-kernel_sgetr_8_7_gen_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/Makefile b/third_party/blasfeo/kernel/avx2/Makefile
deleted file mode 100644
index adb91c4..0000000
--- a/third_party/blasfeo/kernel/avx2/Makefile
+++ /dev/null
@@ -1,48 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../../Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-OBJS += kernel_dgemm_4x4_lib4.o kernel_dgemm_8x4_lib4.o kernel_dgemm_8x8_lib4.o kernel_dgemm_12x4_lib4.o kernel_dgemv_8_lib4.o kernel_dsymv_6_lib4.o kernel_dgetrf_pivot_4_lib4.o kernel_dgebp_lib4.o kernel_dgelqf_4_lib4.o
-OBJS += kernel_sgemm_24x4_lib8.o kernel_sgemm_16x4_lib8.o kernel_sgemm_8x8_lib8.o kernel_sgemm_8x4_lib8.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
-	rm -f *.s
diff --git a/third_party/blasfeo/kernel/avx2/kernel_dgebp_lib4.S b/third_party/blasfeo/kernel/avx2/kernel_dgebp_lib4.S
deleted file mode 100644
index 4093b23..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_dgebp_lib4.S
+++ /dev/null
@@ -1,2741 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-//                               1      2          3        4          5
-// void kernel_dger4_sub_12_lib4(int k, double *A, int sda, double *B, double *C)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger4_sub_12r_lib4
-	.type kernel_dger4_sub_12r_lib4, @function
-kernel_dger4_sub_12r_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger4_sub_12r_lib4
-_kernel_dger4_sub_12r_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger4_sub_12r_lib4
-	.def kernel_dger4_sub_12r_lib4; .scl 2; .type 32; .endef
-kernel_dger4_sub_12r_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // C
-	movq	ARG6, %r15 // C
-	sall	$5, %r15d // 4*sdc*sizeof(double)
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// load block from A
-	vmovapd	0(%r11), %ymm0
-	vmovapd	32(%r11), %ymm1
-	vmovapd	64(%r11), %ymm2
-	vmovapd	96(%r11), %ymm3
-
-	vmovapd	0(%r11, %r12, 1), %ymm4
-	vmovapd	32(%r11, %r12, 1), %ymm5
-	vmovapd	64(%r11, %r12, 1), %ymm6
-	vmovapd	96(%r11, %r12, 1), %ymm7
-
-	vmovapd	0(%r11, %r12, 2), %ymm8
-	vmovapd	32(%r11, %r12, 2), %ymm9
-	vmovapd	64(%r11, %r12, 2), %ymm10
-	vmovapd	96(%r11, %r12, 2), %ymm11
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r14), %ymm12
-	vmovapd			0(%r14, %r15, 1), %ymm13
-	vmovapd			0(%r14, %r15, 2), %ymm14
-	vbroadcastsd	0(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm12
-	vfnmadd231pd	%ymm4, %ymm15, %ymm13
-	vfnmadd231pd	%ymm8, %ymm15, %ymm14
-	vbroadcastsd	8(%r13), %ymm15
-	subl	$4, %r10d
-	vfnmadd231pd	%ymm1, %ymm15, %ymm12
-	vfnmadd231pd	%ymm5, %ymm15, %ymm13
-	vfnmadd231pd	%ymm9, %ymm15, %ymm14
-	vbroadcastsd	16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm12
-	vfnmadd231pd	%ymm6, %ymm15, %ymm13
-	vfnmadd231pd	%ymm10, %ymm15, %ymm14
-	vbroadcastsd	24(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm12
-	vfnmadd231pd	%ymm7, %ymm15, %ymm13
-	vfnmadd231pd	%ymm11, %ymm15, %ymm14
-	vmovapd			%ymm12, 0(%r14)
-	vmovapd			%ymm13, 0(%r14, %r15, 1)
-	vmovapd			%ymm14, 0(%r14, %r15, 2)
-
-	vmovapd			32(%r14), %ymm12
-	vmovapd			32(%r14, %r15, 1), %ymm13
-	vmovapd			32(%r14, %r15, 2), %ymm14
-	vbroadcastsd	32(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm12
-	vfnmadd231pd	%ymm4, %ymm15, %ymm13
-	vfnmadd231pd	%ymm8, %ymm15, %ymm14
-	vbroadcastsd	40(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm12
-	vfnmadd231pd	%ymm5, %ymm15, %ymm13
-	vfnmadd231pd	%ymm9, %ymm15, %ymm14
-	vbroadcastsd	48(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm12
-	vfnmadd231pd	%ymm6, %ymm15, %ymm13
-	vfnmadd231pd	%ymm10, %ymm15, %ymm14
-	vbroadcastsd	56(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm12
-	vfnmadd231pd	%ymm7, %ymm15, %ymm13
-	vfnmadd231pd	%ymm11, %ymm15, %ymm14
-	vmovapd			%ymm12, 32(%r14)
-	vmovapd			%ymm13, 32(%r14, %r15, 1)
-	vmovapd			%ymm14, 32(%r14, %r15, 2)
-
-	vmovapd			64(%r14), %ymm12
-	vmovapd			64(%r14, %r15, 1), %ymm13
-	vmovapd			64(%r14, %r15, 2), %ymm14
-	vbroadcastsd	64(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm12
-	vfnmadd231pd	%ymm4, %ymm15, %ymm13
-	vfnmadd231pd	%ymm8, %ymm15, %ymm14
-	vbroadcastsd	72(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm12
-	vfnmadd231pd	%ymm5, %ymm15, %ymm13
-	vfnmadd231pd	%ymm9, %ymm15, %ymm14
-	vbroadcastsd	80(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm12
-	vfnmadd231pd	%ymm6, %ymm15, %ymm13
-	vfnmadd231pd	%ymm10, %ymm15, %ymm14
-	vbroadcastsd	88(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm12
-	vfnmadd231pd	%ymm7, %ymm15, %ymm13
-	vfnmadd231pd	%ymm11, %ymm15, %ymm14
-	vmovapd			%ymm12, 64(%r14)
-	vmovapd			%ymm13, 64(%r14, %r15, 1)
-	vmovapd			%ymm14, 64(%r14, %r15, 2)
-
-	vmovapd			96(%r14), %ymm12
-	vmovapd			96(%r14, %r15, 1), %ymm13
-	vmovapd			96(%r14, %r15, 2), %ymm14
-	vbroadcastsd	96(%r13), %ymm15
-	addq	$128, %r13
-	vfnmadd231pd	%ymm0, %ymm15, %ymm12
-	vfnmadd231pd	%ymm4, %ymm15, %ymm13
-	vfnmadd231pd	%ymm8, %ymm15, %ymm14
-	vbroadcastsd	-24(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm12
-	vfnmadd231pd	%ymm5, %ymm15, %ymm13
-	vfnmadd231pd	%ymm9, %ymm15, %ymm14
-	vbroadcastsd	-16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm12
-	vfnmadd231pd	%ymm6, %ymm15, %ymm13
-	vfnmadd231pd	%ymm10, %ymm15, %ymm14
-	vbroadcastsd	-8(%r13), %ymm15
-	addq	$128, %r14
-	vfnmadd231pd	%ymm3, %ymm15, %ymm12
-	vfnmadd231pd	%ymm7, %ymm15, %ymm13
-	vfnmadd231pd	%ymm11, %ymm15, %ymm14
-	vmovapd			%ymm12, -32(%r14)
-	vmovapd			%ymm13, -32(%r14, %r15, 1)
-	vmovapd			%ymm14, -32(%r14, %r15, 2)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r14), %ymm12
-	vmovapd			0(%r14, %r15, 1), %ymm13
-	vmovapd			0(%r14, %r15, 2), %ymm14
-	vbroadcastsd	0(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm12
-	vfnmadd231pd	%ymm4, %ymm15, %ymm13
-	vfnmadd231pd	%ymm8, %ymm15, %ymm14
-	vbroadcastsd	8(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm12
-	vfnmadd231pd	%ymm5, %ymm15, %ymm13
-	vfnmadd231pd	%ymm9, %ymm15, %ymm14
-	vbroadcastsd	16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm12
-	vfnmadd231pd	%ymm6, %ymm15, %ymm13
-	vfnmadd231pd	%ymm10, %ymm15, %ymm14
-	vbroadcastsd	24(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm12
-	vfnmadd231pd	%ymm7, %ymm15, %ymm13
-	vfnmadd231pd	%ymm11, %ymm15, %ymm14
-	vmovapd			%ymm12, 0(%r14)
-	vmovapd			%ymm13, 0(%r14, %r15, 1)
-	vmovapd			%ymm14, 0(%r14, %r15, 2)
-
-	addq	$32, %r13
-	addq	$32, %r14
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger4_sub_12r_lib4, .-kernel_dger4_sub_12r_lib4
-#endif
-
-
-
-
-
-//                                  1      2          3        4          5          6        7
-// void kernel_dger4_sub_12_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, int km)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger4_sub_12r_vs_lib4
-	.type kernel_dger4_sub_12r_vs_lib4, @function
-kernel_dger4_sub_12r_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger4_sub_12r_vs_lib4
-_kernel_dger4_sub_12r_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger4_sub_12r_vs_lib4
-	.def kernel_dger4_sub_12r_vs_lib4; .scl 2; .type 32; .endef
-kernel_dger4_sub_12r_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // C
-	movq	ARG6, %r15 // C
-	sall	$5, %r15d // 4*sdc*sizeof(double)
-	movq	ARG7, %rax // km
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	vcvtsi2sd	%eax, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	// load block from A
-	vmovapd	0(%r11), %ymm0
-	vmovapd	32(%r11), %ymm1
-	vmovapd	64(%r11), %ymm2
-	vmovapd	96(%r11), %ymm3
-
-	vmovapd	0(%r11, %r12, 1), %ymm4
-	vmovapd	32(%r11, %r12, 1), %ymm5
-	vmovapd	64(%r11, %r12, 1), %ymm6
-	vmovapd	96(%r11, %r12, 1), %ymm7
-
-	vmaskmovpd	0(%r11, %r12, 2), %ymm15, %ymm8
-	vmaskmovpd	32(%r11, %r12, 2), %ymm15, %ymm9
-	vmaskmovpd	64(%r11, %r12, 2), %ymm15, %ymm10
-	vmaskmovpd	96(%r11, %r12, 2), %ymm15, %ymm11
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r14), %ymm12
-	vmovapd			0(%r14, %r15, 1), %ymm13
-	vmovapd			0(%r14, %r15, 2), %ymm14
-	vbroadcastsd	0(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm12
-	vfnmadd231pd	%ymm4, %ymm15, %ymm13
-	vfnmadd231pd	%ymm8, %ymm15, %ymm14
-	vbroadcastsd	8(%r13), %ymm15
-	subl	$4, %r10d
-	vfnmadd231pd	%ymm1, %ymm15, %ymm12
-	vfnmadd231pd	%ymm5, %ymm15, %ymm13
-	vfnmadd231pd	%ymm9, %ymm15, %ymm14
-	vbroadcastsd	16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm12
-	vfnmadd231pd	%ymm6, %ymm15, %ymm13
-	vfnmadd231pd	%ymm10, %ymm15, %ymm14
-	vbroadcastsd	24(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm12
-	vfnmadd231pd	%ymm7, %ymm15, %ymm13
-	vfnmadd231pd	%ymm11, %ymm15, %ymm14
-	vmovapd			%ymm12, 0(%r14)
-	vmovapd			%ymm13, 0(%r14, %r15, 1)
-	vmovapd			%ymm14, 0(%r14, %r15, 2)
-
-	vmovapd			32(%r14), %ymm12
-	vmovapd			32(%r14, %r15, 1), %ymm13
-	vmovapd			32(%r14, %r15, 2), %ymm14
-	vbroadcastsd	32(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm12
-	vfnmadd231pd	%ymm4, %ymm15, %ymm13
-	vfnmadd231pd	%ymm8, %ymm15, %ymm14
-	vbroadcastsd	40(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm12
-	vfnmadd231pd	%ymm5, %ymm15, %ymm13
-	vfnmadd231pd	%ymm9, %ymm15, %ymm14
-	vbroadcastsd	48(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm12
-	vfnmadd231pd	%ymm6, %ymm15, %ymm13
-	vfnmadd231pd	%ymm10, %ymm15, %ymm14
-	vbroadcastsd	56(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm12
-	vfnmadd231pd	%ymm7, %ymm15, %ymm13
-	vfnmadd231pd	%ymm11, %ymm15, %ymm14
-	vmovapd			%ymm12, 32(%r14)
-	vmovapd			%ymm13, 32(%r14, %r15, 1)
-	vmovapd			%ymm14, 32(%r14, %r15, 2)
-
-	vmovapd			64(%r14), %ymm12
-	vmovapd			64(%r14, %r15, 1), %ymm13
-	vmovapd			64(%r14, %r15, 2), %ymm14
-	vbroadcastsd	64(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm12
-	vfnmadd231pd	%ymm4, %ymm15, %ymm13
-	vfnmadd231pd	%ymm8, %ymm15, %ymm14
-	vbroadcastsd	72(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm12
-	vfnmadd231pd	%ymm5, %ymm15, %ymm13
-	vfnmadd231pd	%ymm9, %ymm15, %ymm14
-	vbroadcastsd	80(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm12
-	vfnmadd231pd	%ymm6, %ymm15, %ymm13
-	vfnmadd231pd	%ymm10, %ymm15, %ymm14
-	vbroadcastsd	88(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm12
-	vfnmadd231pd	%ymm7, %ymm15, %ymm13
-	vfnmadd231pd	%ymm11, %ymm15, %ymm14
-	vmovapd			%ymm12, 64(%r14)
-	vmovapd			%ymm13, 64(%r14, %r15, 1)
-	vmovapd			%ymm14, 64(%r14, %r15, 2)
-
-	vmovapd			96(%r14), %ymm12
-	vmovapd			96(%r14, %r15, 1), %ymm13
-	vmovapd			96(%r14, %r15, 2), %ymm14
-	vbroadcastsd	96(%r13), %ymm15
-	addq	$128, %r13
-	vfnmadd231pd	%ymm0, %ymm15, %ymm12
-	vfnmadd231pd	%ymm4, %ymm15, %ymm13
-	vfnmadd231pd	%ymm8, %ymm15, %ymm14
-	vbroadcastsd	-24(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm12
-	vfnmadd231pd	%ymm5, %ymm15, %ymm13
-	vfnmadd231pd	%ymm9, %ymm15, %ymm14
-	vbroadcastsd	-16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm12
-	vfnmadd231pd	%ymm6, %ymm15, %ymm13
-	vfnmadd231pd	%ymm10, %ymm15, %ymm14
-	vbroadcastsd	-8(%r13), %ymm15
-	addq	$128, %r14
-	vfnmadd231pd	%ymm3, %ymm15, %ymm12
-	vfnmadd231pd	%ymm7, %ymm15, %ymm13
-	vfnmadd231pd	%ymm11, %ymm15, %ymm14
-	vmovapd			%ymm12, -32(%r14)
-	vmovapd			%ymm13, -32(%r14, %r15, 1)
-	vmovapd			%ymm14, -32(%r14, %r15, 2)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r14), %ymm12
-	vmovapd			0(%r14, %r15, 1), %ymm13
-	vmovapd			0(%r14, %r15, 2), %ymm14
-	vbroadcastsd	0(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm12
-	vfnmadd231pd	%ymm4, %ymm15, %ymm13
-	vfnmadd231pd	%ymm8, %ymm15, %ymm14
-	vbroadcastsd	8(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm12
-	vfnmadd231pd	%ymm5, %ymm15, %ymm13
-	vfnmadd231pd	%ymm9, %ymm15, %ymm14
-	vbroadcastsd	16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm12
-	vfnmadd231pd	%ymm6, %ymm15, %ymm13
-	vfnmadd231pd	%ymm10, %ymm15, %ymm14
-	vbroadcastsd	24(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm12
-	vfnmadd231pd	%ymm7, %ymm15, %ymm13
-	vfnmadd231pd	%ymm11, %ymm15, %ymm14
-	vmovapd			%ymm12, 0(%r14)
-	vmovapd			%ymm13, 0(%r14, %r15, 1)
-	vmovapd			%ymm14, 0(%r14, %r15, 2)
-
-	addq	$32, %r13
-	addq	$32, %r14
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger4_sub_12r_vs_lib4, .-kernel_dger4_sub_12r_vs_lib4
-#endif
-
-
-
-
-
-//                              1      2          3        4          5
-// void kernel_dger4_sub_8_lib4(int k, double *A, int sda, double *B, double *C)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger4_sub_8r_lib4
-	.type kernel_dger4_sub_8r_lib4, @function
-kernel_dger4_sub_8r_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger4_sub_8r_lib4
-_kernel_dger4_sub_8r_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger4_sub_8r_lib4
-	.def kernel_dger4_sub_8r_lib4; .scl 2; .type 32; .endef
-kernel_dger4_sub_8r_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // C
-	movq	ARG6, %r15 // C
-	sall	$5, %r15d // 4*sdc*sizeof(double)
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// load block from A
-	vmovapd	0(%r11), %ymm0
-	vmovapd	32(%r11), %ymm1
-	vmovapd	64(%r11), %ymm2
-	vmovapd	96(%r11), %ymm3
-
-	vmovapd	0(%r11, %r12, 1), %ymm4
-	vmovapd	32(%r11, %r12, 1), %ymm5
-	vmovapd	64(%r11, %r12, 1), %ymm6
-	vmovapd	96(%r11, %r12, 1), %ymm7
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r14), %ymm8
-	vmovapd			0(%r14, %r15, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm8
-	vfnmadd231pd	%ymm4, %ymm15, %ymm9
-	vbroadcastsd	8(%r13), %ymm15
-	subl	$4, %r10d
-	vfnmadd231pd	%ymm1, %ymm15, %ymm8
-	vfnmadd231pd	%ymm5, %ymm15, %ymm9
-	vbroadcastsd	16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm8
-	vfnmadd231pd	%ymm6, %ymm15, %ymm9
-	vbroadcastsd	24(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm8
-	vfnmadd231pd	%ymm7, %ymm15, %ymm9
-	vmovapd			%ymm8, 0(%r14)
-	vmovapd			%ymm9, 0(%r14, %r15, 1)
-
-	vmovapd			32(%r14), %ymm8
-	vmovapd			32(%r14, %r15, 1), %ymm9
-	vbroadcastsd	32(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm8
-	vfnmadd231pd	%ymm4, %ymm15, %ymm9
-	vbroadcastsd	40(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm8
-	vfnmadd231pd	%ymm5, %ymm15, %ymm9
-	vbroadcastsd	48(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm8
-	vfnmadd231pd	%ymm6, %ymm15, %ymm9
-	vbroadcastsd	56(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm8
-	vfnmadd231pd	%ymm7, %ymm15, %ymm9
-	vmovapd			%ymm8, 32(%r14)
-	vmovapd			%ymm9, 32(%r14, %r15, 1)
-
-	vmovapd			64(%r14), %ymm8
-	vmovapd			64(%r14, %r15, 1), %ymm9
-	vbroadcastsd	64(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm8
-	vfnmadd231pd	%ymm4, %ymm15, %ymm9
-	vbroadcastsd	72(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm8
-	vfnmadd231pd	%ymm5, %ymm15, %ymm9
-	vbroadcastsd	80(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm8
-	vfnmadd231pd	%ymm6, %ymm15, %ymm9
-	vbroadcastsd	88(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm8
-	vfnmadd231pd	%ymm7, %ymm15, %ymm9
-	vmovapd			%ymm8, 64(%r14)
-	vmovapd			%ymm9, 64(%r14, %r15, 1)
-
-	vmovapd			96(%r14), %ymm8
-	vmovapd			96(%r14, %r15, 1), %ymm9
-	vbroadcastsd	96(%r13), %ymm15
-	addq	$128, %r13
-	vfnmadd231pd	%ymm0, %ymm15, %ymm8
-	vfnmadd231pd	%ymm4, %ymm15, %ymm9
-	vbroadcastsd	-24(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm8
-	vfnmadd231pd	%ymm5, %ymm15, %ymm9
-	vbroadcastsd	-16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm8
-	vfnmadd231pd	%ymm6, %ymm15, %ymm9
-	vbroadcastsd	-8(%r13), %ymm15
-	addq	$128, %r14
-	vfnmadd231pd	%ymm3, %ymm15, %ymm8
-	vfnmadd231pd	%ymm7, %ymm15, %ymm9
-	vmovapd			%ymm8, -32(%r14)
-	vmovapd			%ymm9, -32(%r14, %r15, 1)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r14), %ymm8
-	vmovapd			0(%r14, %r15, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm8
-	vfnmadd231pd	%ymm4, %ymm15, %ymm9
-	vbroadcastsd	8(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm8
-	vfnmadd231pd	%ymm5, %ymm15, %ymm9
-	vbroadcastsd	16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm8
-	vfnmadd231pd	%ymm6, %ymm15, %ymm9
-	vbroadcastsd	24(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm8
-	vfnmadd231pd	%ymm7, %ymm15, %ymm9
-	vmovapd			%ymm8, 0(%r14)
-	vmovapd			%ymm9, 0(%r14, %r15, 1)
-
-	addq	$32, %r13
-	addq	$32, %r14
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger4_sub_8r_lib4, .-kernel_dger4_sub_8r_lib4
-#endif
-
-
-
-
-
-//                                 1      2          3        4          5          6        7
-// void kernel_dger4_sub_8_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, int km)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger4_sub_8r_vs_lib4
-	.type kernel_dger4_sub_8r_vs_lib4, @function
-kernel_dger4_sub_8r_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger4_sub_8r_vs_lib4
-_kernel_dger4_sub_8r_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger4_sub_8r_vs_lib4
-	.def kernel_dger4_sub_8r_vs_lib4; .scl 2; .type 32; .endef
-kernel_dger4_sub_8r_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // C
-	movq	ARG6, %r15 // C
-	sall	$5, %r15d // 4*sdc*sizeof(double)
-	movq	ARG7, %rax // km
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	vcvtsi2sd	%eax, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC01(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC01(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	// load block from A
-	vmovapd	0(%r11), %ymm0
-	vmovapd	32(%r11), %ymm1
-	vmovapd	64(%r11), %ymm2
-	vmovapd	96(%r11), %ymm3
-
-	vmaskmovpd	0(%r11, %r12, 1), %ymm15, %ymm4
-	vmaskmovpd	32(%r11, %r12, 1), %ymm15, %ymm5
-	vmaskmovpd	64(%r11, %r12, 1), %ymm15, %ymm6
-	vmaskmovpd	96(%r11, %r12, 1), %ymm15, %ymm7
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r14), %ymm8
-	vmovapd			0(%r14, %r15, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm8
-	vfnmadd231pd	%ymm4, %ymm15, %ymm9
-	vbroadcastsd	8(%r13), %ymm15
-	subl	$4, %r10d
-	vfnmadd231pd	%ymm1, %ymm15, %ymm8
-	vfnmadd231pd	%ymm5, %ymm15, %ymm9
-	vbroadcastsd	16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm8
-	vfnmadd231pd	%ymm6, %ymm15, %ymm9
-	vbroadcastsd	24(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm8
-	vfnmadd231pd	%ymm7, %ymm15, %ymm9
-	vmovapd			%ymm8, 0(%r14)
-	vmovapd			%ymm9, 0(%r14, %r15, 1)
-
-	vmovapd			32(%r14), %ymm8
-	vmovapd			32(%r14, %r15, 1), %ymm9
-	vbroadcastsd	32(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm8
-	vfnmadd231pd	%ymm4, %ymm15, %ymm9
-	vbroadcastsd	40(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm8
-	vfnmadd231pd	%ymm5, %ymm15, %ymm9
-	vbroadcastsd	48(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm8
-	vfnmadd231pd	%ymm6, %ymm15, %ymm9
-	vbroadcastsd	56(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm8
-	vfnmadd231pd	%ymm7, %ymm15, %ymm9
-	vmovapd			%ymm8, 32(%r14)
-	vmovapd			%ymm9, 32(%r14, %r15, 1)
-
-	vmovapd			64(%r14), %ymm8
-	vmovapd			64(%r14, %r15, 1), %ymm9
-	vbroadcastsd	64(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm8
-	vfnmadd231pd	%ymm4, %ymm15, %ymm9
-	vbroadcastsd	72(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm8
-	vfnmadd231pd	%ymm5, %ymm15, %ymm9
-	vbroadcastsd	80(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm8
-	vfnmadd231pd	%ymm6, %ymm15, %ymm9
-	vbroadcastsd	88(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm8
-	vfnmadd231pd	%ymm7, %ymm15, %ymm9
-	vmovapd			%ymm8, 64(%r14)
-	vmovapd			%ymm9, 64(%r14, %r15, 1)
-
-	vmovapd			96(%r14), %ymm8
-	vmovapd			96(%r14, %r15, 1), %ymm9
-	vbroadcastsd	96(%r13), %ymm15
-	addq	$128, %r13
-	vfnmadd231pd	%ymm0, %ymm15, %ymm8
-	vfnmadd231pd	%ymm4, %ymm15, %ymm9
-	vbroadcastsd	-24(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm8
-	vfnmadd231pd	%ymm5, %ymm15, %ymm9
-	vbroadcastsd	-16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm8
-	vfnmadd231pd	%ymm6, %ymm15, %ymm9
-	vbroadcastsd	-8(%r13), %ymm15
-	addq	$128, %r14
-	vfnmadd231pd	%ymm3, %ymm15, %ymm8
-	vfnmadd231pd	%ymm7, %ymm15, %ymm9
-	vmovapd			%ymm8, -32(%r14)
-	vmovapd			%ymm9, -32(%r14, %r15, 1)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r14), %ymm8
-	vmovapd			0(%r14, %r15, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm8
-	vfnmadd231pd	%ymm4, %ymm15, %ymm9
-	vbroadcastsd	8(%r13), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm8
-	vfnmadd231pd	%ymm5, %ymm15, %ymm9
-	vbroadcastsd	16(%r13), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm8
-	vfnmadd231pd	%ymm6, %ymm15, %ymm9
-	vbroadcastsd	24(%r13), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm8
-	vfnmadd231pd	%ymm7, %ymm15, %ymm9
-	vmovapd			%ymm8, 0(%r14)
-	vmovapd			%ymm9, 0(%r14, %r15, 1)
-
-	addq	$32, %r13
-	addq	$32, %r14
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger4_sub_8r_vs_lib4, .-kernel_dger4_sub_8r_vs_lib4
-#endif
-
-
-
-
-
-//                               1      2          3          4        5
-// void kernel_dger12_add_4r_lib4(int n, double *A, double *B, int sdb, double *C)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger12_add_4r_lib4
-	.type kernel_dger12_add_4r_lib4, @function
-kernel_dger12_add_4r_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger12_add_4r_lib4
-_kernel_dger12_add_4r_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger12_add_4r_lib4
-	.def kernel_dger12_add_4r_lib4; .scl 2; .type 32; .endef
-kernel_dger12_add_4r_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10 // n
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d
-	movq	ARG5, %r14 // C
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	cmpl	$11, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	// load block from C
-	vmovapd	0(%r14), %ymm0
-	vmovapd	32(%r14), %ymm1
-	vmovapd	64(%r14), %ymm2
-	vmovapd	96(%r14), %ymm3
-	vmovapd	128(%r14), %ymm4
-	vmovapd	160(%r14), %ymm5
-	vmovapd	192(%r14), %ymm6
-	vmovapd	224(%r14), %ymm7
-	vmovapd	256(%r14), %ymm8
-	vmovapd	288(%r14), %ymm9
-	vmovapd	320(%r14), %ymm10
-	vmovapd	352(%r14), %ymm11
-
-	// 0
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 1
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	136(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	168(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	200(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	232(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	264(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	296(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	328(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 2
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	144(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	176(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	208(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	240(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	272(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	304(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	336(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	368(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 3
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	152(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	184(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	216(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	248(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	280(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	312(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	344(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	376(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 4
-	vmovapd			128(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 5
-	vmovapd			160(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	136(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	168(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	200(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	232(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	264(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	296(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	328(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 6
-	vmovapd			192(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	144(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	176(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	208(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	240(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	272(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	304(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	336(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	368(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 7
-	vmovapd			224(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	152(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	184(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	216(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	248(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	280(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	312(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	344(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	376(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 8
-	vmovapd			256(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 9
-	vmovapd			288(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	136(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	168(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	200(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	232(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	264(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	296(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	328(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 10
-	vmovapd			320(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	144(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	176(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	208(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	240(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	272(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	304(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	336(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	368(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 11
-	vmovapd			352(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	152(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	184(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	216(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	248(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	280(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	312(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	344(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	376(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// store block to C
-	vmovapd	%ymm0, 0(%r14)
-	vmovapd	%ymm1, 32(%r14)
-	vmovapd	%ymm2, 64(%r14)
-	vmovapd	%ymm3, 96(%r14)
-	vmovapd	%ymm4, 128(%r14)
-	vmovapd	%ymm5, 160(%r14)
-	vmovapd	%ymm6, 192(%r14)
-	vmovapd	%ymm7, 224(%r14)
-	vmovapd	%ymm8, 256(%r14)
-	vmovapd	%ymm9, 288(%r14)
-	vmovapd	%ymm10, 320(%r14)
-	vmovapd	%ymm11, 352(%r14)
-
-	addq	$384, %r12
-	addq	$384, %r14
-	subl	$12, %r10d
-
-	cmpl	$11, %r10d
-	jg		1b // main loop
-
-2:
-	cmpl	$3, %r10d
-	jle		2f // return
-
-	// cleanup loop
-1:
-	// load block from C
-	vmovapd	0(%r14), %ymm0
-	vmovapd	32(%r14), %ymm1
-	vmovapd	64(%r14), %ymm2
-	vmovapd	96(%r14), %ymm3
-
-	// 0
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 1
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 2
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 3
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 4
-	vmovapd			128(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 5
-	vmovapd			160(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 6
-	vmovapd			192(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 7
-	vmovapd			224(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 8
-	vmovapd			256(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 9
-	vmovapd			288(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 10
-	vmovapd			320(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 11
-	vmovapd			352(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// store block to C
-	vmovapd	%ymm0, 0(%r14)
-	vmovapd	%ymm1, 32(%r14)
-	vmovapd	%ymm2, 64(%r14)
-	vmovapd	%ymm3, 96(%r14)
-
-	addq	$128, %r12
-	addq	$128, %r14
-	subl	$4, %r10d
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-2:
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-1:
-	// load block from C
-	vmovapd	0(%r14), %ymm0
-
-	// 0
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 1
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 2
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 3
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 4
-	vmovapd			128(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 5
-	vmovapd			160(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 6
-	vmovapd			192(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 7
-	vmovapd			224(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 8
-	vmovapd			256(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 9
-	vmovapd			288(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 10
-	vmovapd			320(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 11
-	vmovapd			352(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// store block to C
-	vmovapd	%ymm0, 0(%r14)
-
-	addq	$32, %r12
-	addq	$32, %r14
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		1b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger12_add_4r_lib4, .-kernel_dger12_add_4r_lib4
-#endif
-
-
-
-
-
-//                               1      2          3          4        5
-// void kernel_dger8_add_4r_lib4(int n, double *A, double *B, int sdb, double *C)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger8_add_4r_lib4
-	.type kernel_dger8_add_4r_lib4, @function
-kernel_dger8_add_4r_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger8_add_4r_lib4
-_kernel_dger8_add_4r_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger8_add_4r_lib4
-	.def kernel_dger8_add_4r_lib4; .scl 2; .type 32; .endef
-kernel_dger8_add_4r_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10 // n
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d
-	movq	ARG5, %r14 // C
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	cmpl	$11, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	// load block from C
-	vmovapd	0(%r14), %ymm0
-	vmovapd	32(%r14), %ymm1
-	vmovapd	64(%r14), %ymm2
-	vmovapd	96(%r14), %ymm3
-	vmovapd	128(%r14), %ymm4
-	vmovapd	160(%r14), %ymm5
-	vmovapd	192(%r14), %ymm6
-	vmovapd	224(%r14), %ymm7
-	vmovapd	256(%r14), %ymm8
-	vmovapd	288(%r14), %ymm9
-	vmovapd	320(%r14), %ymm10
-	vmovapd	352(%r14), %ymm11
-
-	// 0
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 1
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	136(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	168(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	200(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	232(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	264(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	296(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	328(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 2
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	144(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	176(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	208(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	240(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	272(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	304(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	336(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	368(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 3
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	152(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	184(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	216(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	248(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	280(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	312(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	344(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	376(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 4
-	vmovapd			128(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 5
-	vmovapd			160(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	136(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	168(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	200(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	232(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	264(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	296(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	328(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 6
-	vmovapd			192(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	144(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	176(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	208(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	240(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	272(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	304(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	336(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	368(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 7
-	vmovapd			224(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	152(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	184(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	216(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	248(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	280(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	312(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	344(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	376(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// store block to C
-	vmovapd	%ymm0, 0(%r14)
-	vmovapd	%ymm1, 32(%r14)
-	vmovapd	%ymm2, 64(%r14)
-	vmovapd	%ymm3, 96(%r14)
-	vmovapd	%ymm4, 128(%r14)
-	vmovapd	%ymm5, 160(%r14)
-	vmovapd	%ymm6, 192(%r14)
-	vmovapd	%ymm7, 224(%r14)
-	vmovapd	%ymm8, 256(%r14)
-	vmovapd	%ymm9, 288(%r14)
-	vmovapd	%ymm10, 320(%r14)
-	vmovapd	%ymm11, 352(%r14)
-
-	addq	$384, %r12
-	addq	$384, %r14
-	subl	$12, %r10d
-
-	cmpl	$11, %r10d
-	jg		1b // main loop
-
-2:
-	cmpl	$3, %r10d
-	jle		2f // return
-
-	// cleanup loop
-1:
-	// load block from C
-	vmovapd	0(%r14), %ymm0
-	vmovapd	32(%r14), %ymm1
-	vmovapd	64(%r14), %ymm2
-	vmovapd	96(%r14), %ymm3
-
-	// 0
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 1
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 2
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 3
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 4
-	vmovapd			128(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 5
-	vmovapd			160(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 6
-	vmovapd			192(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 7
-	vmovapd			224(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// store block to C
-	vmovapd	%ymm0, 0(%r14)
-	vmovapd	%ymm1, 32(%r14)
-	vmovapd	%ymm2, 64(%r14)
-	vmovapd	%ymm3, 96(%r14)
-
-	addq	$128, %r12
-	addq	$128, %r14
-	subl	$4, %r10d
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-2:
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-1:
-	// load block from C
-	vmovapd	0(%r14), %ymm0
-
-	// 0
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 1
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 2
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 3
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 4
-	vmovapd			128(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 5
-	vmovapd			160(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 6
-	vmovapd			192(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 7
-	vmovapd			224(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// store block to C
-	vmovapd	%ymm0, 0(%r14)
-
-	addq	$32, %r12
-	addq	$32, %r14
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		1b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger8_add_4r_lib4, .-kernel_dger8_add_4r_lib4
-#endif
-
-
-
-
-
-#if 0
-//                               1      2          3          4        5
-// void kernel_dger8_sub_4r_lib4(int n, double *A, double *B, int sdb, double *C)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger8_add_4r_lib4
-	.type kernel_dger8_add_4r_lib4, @function
-kernel_dger8_add_4r_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger8_add_4r_lib4
-_kernel_dger8_add_4r_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger8_add_4r_lib4
-	.def kernel_dger8_add_4r_lib4; .scl 2; .type 32; .endef
-kernel_dger8_add_4r_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	movq	ARG4, %r13
-	sall	$5, %r13d
-	movq	ARG5, %r14
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// load block from A
-	vmovapd	0(%r11), %ymm0
-	vmovapd	32(%r11), %ymm1
-	vmovapd	64(%r11), %ymm2
-	vmovapd	96(%r11), %ymm3
-	vmovapd	128(%r11), %ymm4
-	vmovapd	160(%r11), %ymm5
-	vmovapd	192(%r11), %ymm6
-	vmovapd	224(%r11), %ymm7
-
-	cmpl	$7, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	// 04
-	vmovapd			0(%r14), %ymm12
-	vbroadcastsd	0(%r12), %ymm15
-	vfmadd231pd		%ymm0, %ymm15, %ymm12
-	vbroadcastsd	8(%r12), %ymm15
-	vfmadd231pd		%ymm1, %ymm15, %ymm12
-	vbroadcastsd	16(%r12), %ymm15
-	vfmadd231pd		%ymm2, %ymm15, %ymm12
-	vbroadcastsd	24(%r12), %ymm15
-	vfmadd231pd		%ymm3, %ymm15, %ymm12
-	vmovapd			%ymm12, 0(%r14)
-
-	// 14
-	vmovapd			32(%r14), %ymm12
-	vbroadcastsd	32(%r12), %ymm15
-	vfmadd231pd		%ymm0, %ymm15, %ymm12
-	vbroadcastsd	40(%r12), %ymm15
-	vfmadd231pd		%ymm1, %ymm15, %ymm12
-	vbroadcastsd	48(%r12), %ymm15
-	vfmadd231pd		%ymm2, %ymm15, %ymm12
-	vbroadcastsd	56(%r12), %ymm15
-	vfmadd231pd		%ymm3, %ymm15, %ymm12
-	vmovapd			%ymm12, 32(%r14)
-
-	// 24
-	vmovapd			64(%r14), %ymm12
-	vbroadcastsd	64(%r12), %ymm15
-	vfmadd231pd		%ymm0, %ymm15, %ymm12
-	vbroadcastsd	72(%r12), %ymm15
-	vfmadd231pd		%ymm1, %ymm15, %ymm12
-	vbroadcastsd	80(%r12), %ymm15
-	vfmadd231pd		%ymm2, %ymm15, %ymm12
-	vbroadcastsd	88(%r12), %ymm15
-	vfmadd231pd		%ymm3, %ymm15, %ymm12
-	vmovapd			%ymm12, 64(%r14)
-
-	// 34
-	vmovapd			96(%r14), %ymm12
-	vbroadcastsd	96(%r12), %ymm15
-	vfmadd231pd		%ymm0, %ymm15, %ymm12
-	vbroadcastsd	104(%r12), %ymm15
-	vfmadd231pd		%ymm1, %ymm15, %ymm12
-	vbroadcastsd	112(%r12), %ymm15
-	vfmadd231pd		%ymm2, %ymm15, %ymm12
-	vbroadcastsd	120(%r12), %ymm15
-	vfmadd231pd		%ymm3, %ymm15, %ymm12
-	vmovapd			%ymm12, 96(%r14)
-
-	// 44
-	vmovapd			128(%r14), %ymm12
-	vbroadcastsd	128(%r12), %ymm15
-	vfmadd231pd		%ymm0, %ymm15, %ymm12
-	vbroadcastsd	136(%r12), %ymm15
-	vfmadd231pd		%ymm1, %ymm15, %ymm12
-	vbroadcastsd	144(%r12), %ymm15
-	vfmadd231pd		%ymm2, %ymm15, %ymm12
-	vbroadcastsd	152(%r12), %ymm15
-	vfmadd231pd		%ymm3, %ymm15, %ymm12
-	vmovapd			%ymm12, 128(%r14)
-
-	// 54
-	vmovapd			160(%r14), %ymm12
-	vbroadcastsd	160(%r12), %ymm15
-	vfmadd231pd		%ymm0, %ymm15, %ymm12
-	vbroadcastsd	168(%r12), %ymm15
-	vfmadd231pd		%ymm1, %ymm15, %ymm12
-	vbroadcastsd	176(%r12), %ymm15
-	vfmadd231pd		%ymm2, %ymm15, %ymm12
-	vbroadcastsd	184(%r12), %ymm15
-	vfmadd231pd		%ymm3, %ymm15, %ymm12
-	vmovapd			%ymm12, 160(%r14)
-
-	// 64
-	vmovapd			192(%r14), %ymm12
-	vbroadcastsd	192(%r12), %ymm15
-	vfmadd231pd		%ymm0, %ymm15, %ymm12
-	vbroadcastsd	200(%r12), %ymm15
-	vfmadd231pd		%ymm1, %ymm15, %ymm12
-	vbroadcastsd	208(%r12), %ymm15
-	vfmadd231pd		%ymm2, %ymm15, %ymm12
-	vbroadcastsd	216(%r12), %ymm15
-	vfmadd231pd		%ymm3, %ymm15, %ymm12
-	vmovapd			%ymm12, 192(%r14)
-
-	// 74
-	vmovapd			224(%r14), %ymm12
-	vbroadcastsd	224(%r12), %ymm15
-	vfmadd231pd		%ymm0, %ymm15, %ymm12
-	vbroadcastsd	232(%r12), %ymm15
-	vfmadd231pd		%ymm1, %ymm15, %ymm12
-	vbroadcastsd	240(%r12), %ymm15
-	vfmadd231pd		%ymm2, %ymm15, %ymm12
-	vbroadcastsd	248(%r12), %ymm15
-	vfmadd231pd		%ymm3, %ymm15, %ymm12
-	vmovapd			%ymm12, 224(%r14)
-
-	// 08
-	vmovapd			0(%r14), %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm4, %ymm15, %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm5, %ymm15, %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm6, %ymm15, %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm7, %ymm15, %ymm12
-	vmovapd			%ymm12, 0(%r14)
-
-	// 18
-	vmovapd			32(%r14), %ymm12
-	vbroadcastsd	32(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm4, %ymm15, %ymm12
-	vbroadcastsd	40(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm5, %ymm15, %ymm12
-	vbroadcastsd	48(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm6, %ymm15, %ymm12
-	vbroadcastsd	56(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm7, %ymm15, %ymm12
-	vmovapd			%ymm12, 32(%r14)
-
-	// 28
-	vmovapd			64(%r14), %ymm12
-	vbroadcastsd	64(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm4, %ymm15, %ymm12
-	vbroadcastsd	71(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm5, %ymm15, %ymm12
-	vbroadcastsd	80(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm6, %ymm15, %ymm12
-	vbroadcastsd	88(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm7, %ymm15, %ymm12
-	vmovapd			%ymm12, 64(%r14)
-
-	// 38
-	vmovapd			96(%r14), %ymm12
-	vbroadcastsd	96(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm4, %ymm15, %ymm12
-	vbroadcastsd	104(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm5, %ymm15, %ymm12
-	vbroadcastsd	112(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm6, %ymm15, %ymm12
-	vbroadcastsd	120(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm7, %ymm15, %ymm12
-	vmovapd			%ymm12, 96(%r14)
-
-	// 48
-	vmovapd			128(%r14), %ymm12
-	vbroadcastsd	128(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm4, %ymm15, %ymm12
-	vbroadcastsd	136(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm5, %ymm15, %ymm12
-	vbroadcastsd	144(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm6, %ymm15, %ymm12
-	vbroadcastsd	152(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm7, %ymm15, %ymm12
-	vmovapd			%ymm12, 128(%r14)
-
-	// 58
-	vmovapd			160(%r14), %ymm12
-	vbroadcastsd	160(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm4, %ymm15, %ymm12
-	vbroadcastsd	168(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm5, %ymm15, %ymm12
-	vbroadcastsd	176(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm6, %ymm15, %ymm12
-	vbroadcastsd	184(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm7, %ymm15, %ymm12
-	vmovapd			%ymm12, 160(%r14)
-
-	// 68
-	vmovapd			192(%r14), %ymm12
-	vbroadcastsd	192(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm4, %ymm15, %ymm12
-	vbroadcastsd	200(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm5, %ymm15, %ymm12
-	vbroadcastsd	208(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm6, %ymm15, %ymm12
-	vbroadcastsd	216(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm7, %ymm15, %ymm12
-	vmovapd			%ymm12, 192(%r14)
-
-	// 78
-	vmovapd			224(%r14), %ymm12
-	vbroadcastsd	224(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm4, %ymm15, %ymm12
-	vbroadcastsd	232(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm5, %ymm15, %ymm12
-	vbroadcastsd	240(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm6, %ymm15, %ymm12
-	vbroadcastsd	248(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm7, %ymm15, %ymm12
-	vmovapd			%ymm12, 224(%r14)
-
-	addq	$256, %r12
-	addq	$256, %r14
-	subl	$8, %r10d
-
-	cmpl	$7, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r14), %ymm12
-	vbroadcastsd	0(%r12), %ymm15
-	vfmadd231pd		%ymm0, %ymm15, %ymm12
-	vbroadcastsd	8(%r12), %ymm15
-	vfmadd231pd		%ymm1, %ymm15, %ymm12
-	vbroadcastsd	16(%r12), %ymm15
-	vfmadd231pd		%ymm2, %ymm15, %ymm12
-	vbroadcastsd	24(%r12), %ymm15
-	vfmadd231pd		%ymm3, %ymm15, %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm4, %ymm15, %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm5, %ymm15, %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm6, %ymm15, %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm15
-	vfmadd231pd		%ymm7, %ymm15, %ymm12
-	vmovapd			%ymm12, 0(%r14)
-
-	addq	$32, %r12
-	addq	$32, %r14
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger8_add_4r_lib4, .-kernel_dger8_add_4r_lib4
-#endif
-#endif
-
-
-
-
-
-//                              1      2          3          4
-// void kernel_dger4_sub_4_lib4(int n, double *A, double *B, double *C)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger4_sub_4r_lib4
-	.type kernel_dger4_sub_4r_lib4, @function
-kernel_dger4_sub_4r_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger4_sub_4r_lib4
-_kernel_dger4_sub_4r_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger4_sub_4r_lib4
-	.def kernel_dger4_sub_4r_lib4; .scl 2; .type 32; .endef
-kernel_dger4_sub_4r_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	movq	ARG4, %r13
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// load block from A
-	vmovapd	0(%r11), %ymm0
-	vmovapd	32(%r11), %ymm1
-	vmovapd	64(%r11), %ymm2
-	vmovapd	96(%r11), %ymm3
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r13), %ymm4
-	vbroadcastsd	0(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	8(%r12), %ymm15
-	subl	$4, %r10d
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vbroadcastsd	16(%r12), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm4
-	vbroadcastsd	24(%r12), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm4
-	vmovapd			%ymm4, 0(%r13)
-
-	vmovapd			32(%r13), %ymm4
-	vbroadcastsd	32(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	40(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vbroadcastsd	48(%r12), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm4
-	vbroadcastsd	56(%r12), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm4
-	vmovapd			%ymm4, 32(%r13)
-
-	vmovapd			64(%r13), %ymm4
-	vbroadcastsd	64(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	72(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vbroadcastsd	80(%r12), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm4
-	vbroadcastsd	88(%r12), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm4
-	vmovapd			%ymm4, 64(%r13)
-
-	vmovapd			96(%r13), %ymm4
-	vbroadcastsd	96(%r12), %ymm15
-	addq	$128, %r12
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	-24(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vbroadcastsd	-16(%r12), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm4
-	vbroadcastsd	-8(%r12), %ymm15
-	addq	$128, %r13
-	vfnmadd231pd	%ymm3, %ymm15, %ymm4
-	vmovapd			%ymm4, -32(%r13)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r13), %ymm4
-	vbroadcastsd	0(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	8(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vbroadcastsd	16(%r12), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm4
-	vbroadcastsd	24(%r12), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm4
-	vmovapd			%ymm4, 0(%r13)
-
-	addq	$32, %r12
-	addq	$32, %r13
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger4_sub_4r_lib4, .-kernel_dger4_sub_4r_lib4
-#endif
-
-
-
-
-
-//                              1      2          3          4
-// void kernel_dger2_sub_4_lib4(int n, double *A, double *B, double *C)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger2_sub_4r_lib4
-	.type kernel_dger2_sub_4r_lib4, @function
-kernel_dger2_sub_4r_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger2_sub_4r_lib4
-_kernel_dger2_sub_4r_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger2_sub_4r_lib4
-	.def kernel_dger2_sub_4r_lib4; .scl 2; .type 32; .endef
-kernel_dger2_sub_4r_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	movq	ARG4, %r13
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// load block from A
-	vmovapd	0(%r11), %ymm0
-	vmovapd	32(%r11), %ymm1
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r13), %ymm4
-	vbroadcastsd	0(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	8(%r12), %ymm15
-	subl	$4, %r10d
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vmovapd			%ymm4, 0(%r13)
-
-	vmovapd			32(%r13), %ymm4
-	vbroadcastsd	32(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	40(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vmovapd			%ymm4, 32(%r13)
-
-	vmovapd			64(%r13), %ymm4
-	vbroadcastsd	64(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	72(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vmovapd			%ymm4, 64(%r13)
-
-	vmovapd			96(%r13), %ymm4
-	vbroadcastsd	96(%r12), %ymm15
-	addq	$128, %r12
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	-24(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	addq	$128, %r13
-	vmovapd			%ymm4, -32(%r13)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r13), %ymm4
-	vbroadcastsd	0(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	8(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vmovapd			%ymm4, 0(%r13)
-
-	addq	$32, %r12
-	addq	$32, %r13
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger2_sub_4r_lib4, .-kernel_dger2_sub_4r_lib4
-#endif
-
-
-
-
-
-//                                 1      2          3          4          5
-// void kernel_dger4_sub_4_vs_lib4(int n, double *A, double *B, double *C, int km)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dger4_sub_4r_vs_lib4
-	.type kernel_dger4_sub_4r_vs_lib4, @function
-kernel_dger4_sub_4r_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dger4_sub_4r_vs_lib4
-_kernel_dger4_sub_4r_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dger4_sub_4r_vs_lib4
-	.def kernel_dger4_sub_4r_vs_lib4; .scl 2; .type 32; .endef
-kernel_dger4_sub_4r_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	movq	ARG4, %r13
-	movq	ARG5, %r14
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC00(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC00(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	// load block from A
-	vmaskmovpd	0(%r11), %ymm15, %ymm0
-	vmaskmovpd	32(%r11), %ymm15, %ymm1
-	vmaskmovpd	64(%r11), %ymm15, %ymm2
-	vmaskmovpd	96(%r11), %ymm15, %ymm3
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r13), %ymm4
-	vbroadcastsd	0(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	8(%r12), %ymm15
-	subl	$4, %r10d
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vbroadcastsd	16(%r12), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm4
-	vbroadcastsd	24(%r12), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm4
-	vmovapd			%ymm4, 0(%r13)
-
-	vmovapd			32(%r13), %ymm4
-	vbroadcastsd	32(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	40(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vbroadcastsd	48(%r12), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm4
-	vbroadcastsd	56(%r12), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm4
-	vmovapd			%ymm4, 32(%r13)
-
-	vmovapd			64(%r13), %ymm4
-	vbroadcastsd	64(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	72(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vbroadcastsd	80(%r12), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm4
-	vbroadcastsd	88(%r12), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm4
-	vmovapd			%ymm4, 64(%r13)
-
-	vmovapd			96(%r13), %ymm4
-	vbroadcastsd	96(%r12), %ymm15
-	addq	$128, %r12
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	-24(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vbroadcastsd	-16(%r12), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm4
-	vbroadcastsd	-8(%r12), %ymm15
-	addq	$128, %r13
-	vfnmadd231pd	%ymm3, %ymm15, %ymm4
-	vmovapd			%ymm4, -32(%r13)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r13), %ymm4
-	vbroadcastsd	0(%r12), %ymm15
-	vfnmadd231pd	%ymm0, %ymm15, %ymm4
-	vbroadcastsd	8(%r12), %ymm15
-	vfnmadd231pd	%ymm1, %ymm15, %ymm4
-	vbroadcastsd	16(%r12), %ymm15
-	vfnmadd231pd	%ymm2, %ymm15, %ymm4
-	vbroadcastsd	24(%r12), %ymm15
-	vfnmadd231pd	%ymm3, %ymm15, %ymm4
-	vmovapd			%ymm4, 0(%r13)
-
-	addq	$32, %r12
-	addq	$32, %r13
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dger4_sub_4r_vs_lib4, .-kernel_dger4_sub_4r_vs_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00:
-#elif defined(OS_MAC)
-LC00:
-	.align 5
-#endif
-	.double 0.5
-	.double 1.5
-	.double 2.5
-	.double 3.5
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01:
-#elif defined(OS_MAC)
-LC01:
-	.align 5
-#endif
-	.double 4.5
-	.double 5.5
-	.double 6.5
-	.double 7.5
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02:
-#elif defined(OS_MAC)
-LC02:
-	.align 5
-#endif
-	.double 8.5
-	.double 9.5
-	.double 10.5
-	.double 11.5
-
-
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_dgelqf_4_lib4.S b/third_party/blasfeo/kernel/avx2/kernel_dgelqf_4_lib4.S
deleted file mode 100644
index 2f8b1be..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_dgelqf_4_lib4.S
+++ /dev/null
@@ -1,5728 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-
-//                                   1      2           3        4           5
-// void kernel_dgelqf_dlarft12_12_lib4(int n, double *pD, int sdd, double *dD, double *pT)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgelqf_dlarft12_12_lib4
-	.type kernel_dgelqf_dlarft12_12_lib4, @function
-kernel_dgelqf_dlarft12_12_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgelqf_dlarft12_12_lib4
-_kernel_dgelqf_dlarft12_12_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgelqf_dlarft12_12_lib4
-	.def kernel_dgelqf_dlarft12_12_lib4; .scl 2; .type 32; .endef
-kernel_dgelqf_dlarft12_12_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero T
-
-	movq	ARG5, %r10 // T
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			%ymm15, 0(%r10)
-	vmovapd			%ymm15, 32(%r10)
-	vmovapd			%ymm15, 64(%r10)
-	vmovapd			%ymm15, 96(%r10)
-
-	// first column
-
-	movq	ARG2, %r11 // D
-	movq	ARG3, %r14 // sdd
-	sall	$5, %r14d
-	movq	ARG4, %r12 // dD
-	movq	ARG5, %r13 // T
-	movq	$384, %r15 // sdt !!!!!!!!!!!!!!!!!!!!!!!!!
-
-	vxorpd			%xmm15, %xmm15, %xmm15
-	movq	ARG1, %r10 // n
-	subl	$1, %r10d
-	addq	$32, %r11
-100:
-	vmovsd			0(%r11), %xmm14
-	vfmadd231sd		%xmm14, %xmm14, %xmm15
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		100b
-
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		101f
-	vmovsd			%xmm14, 0(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			0(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 0(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 0(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 0(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			0(%r11), %ymm0
-	vmovapd			0(%r11, %r14, 1), %ymm1
-	vmovapd			0(%r11, %r14, 2), %ymm2
-	vbroadcastsd	32(%r11), %ymm8
-	vbroadcastsd	64(%r11), %ymm9
-	vbroadcastsd	96(%r11), %ymm10
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		32(%r11), %ymm8, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm8, 32(%r11)
-	vmovsd			%xmm9, 64(%r11)
-	vmovsd			%xmm10, 96(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	0(%r11), %ymm8
-	vbroadcastsd	32(%r11), %ymm9
-	vbroadcastsd	64(%r11), %ymm10
-	vbroadcastsd	96(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 0(%r11)
-	vmovsd			%xmm9, 32(%r11)
-	vmovsd			%xmm10, 64(%r11)
-	vmovsd			%xmm11, 96(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	0(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 0(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vbroadcastsd	0(%r13), %ymm15
-	vmulpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm15, %ymm2, %ymm2
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x1, %ymm15, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm0, %ymm8, %ymm8
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	32(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	64(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmulpd			%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	96(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	0(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	32(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	64(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	96(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	0(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0x55, %ymm15, %ymm15  // beta
-
-	// second column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 8(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			40(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 40(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 8(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 40(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			32(%r11), %ymm0
-	vmovapd			32(%r11, %r14, 1), %ymm1
-	vmovapd			32(%r11, %r14, 2), %ymm2
-	vbroadcastsd	72(%r11), %ymm9
-	vbroadcastsd	104(%r11), %ymm10
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm9, 72(%r11)
-	vmovsd			%xmm10, 104(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	8(%r11), %ymm8
-	vbroadcastsd	40(%r11), %ymm9
-	vbroadcastsd	72(%r11), %ymm10
-	vbroadcastsd	104(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 8(%r11)
-	vmovsd			%xmm9, 40(%r11)
-	vmovsd			%xmm10, 72(%r11)
-	vmovsd			%xmm11, 104(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	8(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 8(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC02(%rip), %ymm12
-#else
-	vmovapd			LC02(%rip), %ymm12
-#endif
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm0
-	vbroadcastsd	40(%r13), %ymm15
-	vmulpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm15, %ymm2, %ymm2
-	vmovsd			%xmm0, 32(%r13)
-
-	vxorpd			%ymm12, %ymm12, %ymm12
-	vblendpd		$0x3, %ymm12, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm0, %ymm8, %ymm8
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	72(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	104(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmulpd			%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	8(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	40(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	72(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	104(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	8(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xaa, %ymm15, %ymm15  // beta
-
-	// third column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 16(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			80(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 80(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 16(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 80(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			64(%r11), %ymm0
-	vmovapd			64(%r11, %r14, 1), %ymm1
-	vmovapd			64(%r11, %r14, 2), %ymm2
-	vbroadcastsd	112(%r11), %ymm10
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm10, 112(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	16(%r11), %ymm8
-	vbroadcastsd	48(%r11), %ymm9
-	vbroadcastsd	80(%r11), %ymm10
-	vbroadcastsd	112(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 16(%r11)
-	vmovsd			%xmm9, 48(%r11)
-	vmovsd			%xmm10, 80(%r11)
-	vmovsd			%xmm11, 112(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	16(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 16(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vblendpd		$0x7, %ymm15, %ymm0, %ymm0
-	vbroadcastsd	80(%r13), %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm0
-	vmulpd			%ymm14, %ymm1, %ymm1
-	vmulpd			%ymm14, %ymm2, %ymm2
-	vmovapd			%xmm0, 64(%r13)
-
-	vxorpd			%ymm12, %ymm12, %ymm12
-	vblendpd		$0x7, %ymm12, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm0, %ymm8, %ymm8
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	112(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	16(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	48(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	80(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	112(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	16(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xff, %ymm15, %ymm15  // beta
-
-	// fourth column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 24(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			120(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 120(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 24(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 120(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			96(%r11), %ymm0
-	vmovapd			96(%r11, %r14, 1), %ymm1
-	vmovapd			96(%r11, %r14, 2), %ymm2
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	24(%r11), %ymm8
-	vbroadcastsd	56(%r11), %ymm9
-	vbroadcastsd	88(%r11), %ymm10
-	vbroadcastsd	120(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 24(%r11)
-	vmovsd			%xmm9, 56(%r11)
-	vmovsd			%xmm10, 88(%r11)
-	vmovsd			%xmm11, 120(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	24(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 24(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	//
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	//
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vbroadcastsd	120(%r13), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm1, %ymm1
-	vmulpd			%ymm14, %ymm2, %ymm2
-	vmovapd			96(%r13), %ymm0
-	vblendpd		$0x7, %ymm15, %ymm0, %ymm0
-	vmovapd			%ymm0, 96(%r13)
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	24(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	56(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmulpd			%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	88(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	120(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	24(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	56(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	88(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	120(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	24(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-//	vpermpd	$0x00, %ymm15, %ymm15  // beta
-
-	// fifth column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 32(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	addq	$128, %r11
-	vmovsd			0(%r11, %r14, 1), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 0(%r11, %r14, 1) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 32(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 128(%r13, %r15, 1) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			0(%r11), %ymm0
-	vmovapd			0(%r11, %r14, 1), %ymm1
-	vmovapd			0(%r11, %r14, 2), %ymm2
-	vbroadcastsd	32(%r11, %r14, 1), %ymm8
-	vbroadcastsd	64(%r11, %r14, 1), %ymm9
-	vbroadcastsd	96(%r11, %r14, 1), %ymm10
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		32(%r11), %ymm8, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm8, 32(%r11, %r14, 1)
-	vmovsd			%xmm9, 64(%r11, %r14, 1)
-	vmovsd			%xmm10, 96(%r11, %r14, 1)
-	movq	ARG1, %r10 // n
-	subl	$8, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	0(%r11, %r14, 1), %ymm8
-	vbroadcastsd	32(%r11, %r14, 1), %ymm9
-	vbroadcastsd	64(%r11, %r14, 1), %ymm10
-	vbroadcastsd	96(%r11, %r14, 1), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 0(%r11, %r14, 1)
-	vmovsd			%xmm9, 32(%r11, %r14, 1)
-	vmovsd			%xmm10, 64(%r11, %r14, 1)
-	vmovsd			%xmm11, 96(%r11, %r14, 1)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	0(%r11, %r14, 1), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 0(%r11, %r14, 1)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	//
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	//
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vmovapd			96(%r13), %ymm14
-	vpermpd			$0xff, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vbroadcastsd	128(%r13, %r15, 1), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm1, %ymm1
-	vmulpd			%ymm14, %ymm2, %ymm2
-//	vmovapd			128(%r13), %ymm0
-//	vblendpd		$0xf, %ymm15, %ymm0, %ymm15
-	vmovapd			%ymm15, 128(%r13)
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x1, %ymm15, %ymm1, %ymm1
-
-	movq	ARG2, %r11 // D
-	//
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	32(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	64(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmulpd			%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	96(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	0(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	32(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	64(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	96(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	0(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0x55, %ymm15, %ymm15  // beta
-
-	// sixth column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 40(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	addq	$128, %r11
-	vmovsd			40(%r11, %r14, 1), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 40(%r11, %r14, 1) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 40(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 168(%r13, %r15, 1) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			32(%r11), %ymm0
-	vmovapd			32(%r11, %r14, 1), %ymm1
-	vmovapd			32(%r11, %r14, 2), %ymm2
-	vbroadcastsd	72(%r11, %r14, 1), %ymm9
-	vbroadcastsd	104(%r11, %r14, 1), %ymm10
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm9, 72(%r11, %r14, 1)
-	vmovsd			%xmm10, 104(%r11, %r14, 1)
-	movq	ARG1, %r10 // n
-	subl	$8, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	8(%r11, %r14, 1), %ymm8
-	vbroadcastsd	40(%r11, %r14, 1), %ymm9
-	vbroadcastsd	72(%r11, %r14, 1), %ymm10
-	vbroadcastsd	104(%r11, %r14, 1), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 8(%r11, %r14, 1)
-	vmovsd			%xmm9, 40(%r11, %r14, 1)
-	vmovsd			%xmm10, 72(%r11, %r14, 1)
-	vmovsd			%xmm11, 104(%r11, %r14, 1)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	8(%r11, %r14, 1), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 8(%r11, %r14, 1)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	//
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	//
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vmovapd			96(%r13), %ymm14
-	vpermpd			$0xff, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vmovapd			128(%r13), %ymm14
-	vmovapd			128(%r13, %r15, 1), %ymm11
-	vblendpd		$0x1, %ymm11, %ymm12, %ymm11
-	vpermpd			$0x00, %ymm1, %ymm13 // vv
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmulpd			%ymm11, %ymm13, %ymm11
-	//
-	vbroadcastsd	168(%r13, %r15, 1), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm11, %ymm11
-	vmulpd			%ymm14, %ymm1, %ymm1
-	vmulpd			%ymm14, %ymm2, %ymm2
-	vmovapd			160(%r13, %r15, 1), %ymm0
-	vblendpd		$0x1, %ymm11, %ymm0, %ymm11
-	vmovapd			%ymm15, 160(%r13)
-	vmovapd			%ymm11, 160(%r13, %r15, 1)
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x3, %ymm15, %ymm1, %ymm1
-
-	movq	ARG2, %r11 // D
-	//
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	72(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	104(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmulpd			%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	8(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	40(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	72(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	104(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	8(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xaa, %ymm15, %ymm15  // beta
-
-	// seventh column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 40(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	addq	$128, %r11
-	vmovsd			80(%r11, %r14, 1), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 80(%r11, %r14, 1) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 48(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 208(%r13, %r15, 1) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			64(%r11), %ymm0
-	vmovapd			64(%r11, %r14, 1), %ymm1
-	vmovapd			64(%r11, %r14, 2), %ymm2
-	vbroadcastsd	112(%r11, %r14, 1), %ymm10
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm10, 112(%r11, %r14, 1)
-	movq	ARG1, %r10 // n
-	subl	$8, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	16(%r11, %r14, 1), %ymm8
-	vbroadcastsd	48(%r11, %r14, 1), %ymm9
-	vbroadcastsd	80(%r11, %r14, 1), %ymm10
-	vbroadcastsd	112(%r11, %r14, 1), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 16(%r11, %r14, 1)
-	vmovsd			%xmm9, 48(%r11, %r14, 1)
-	vmovsd			%xmm10, 80(%r11, %r14, 1)
-	vmovsd			%xmm11, 112(%r11, %r14, 1)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	16(%r11, %r14, 1), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 16(%r11, %r14, 1)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	//
-//	vpermpd			$0x00, %ymm0, %ymm13
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	//
-	vpermpd			$0x55, %ymm0, %ymm13
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xff, %ymm0, %ymm13
-	vmovapd			96(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0x00, %ymm1, %ymm13 // vv
-	vmovapd			128(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			128(%r13, %r15, 1), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0x55, %ymm1, %ymm13 // vv
-	vmovapd			160(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			160(%r13, %r15, 1), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vbroadcastsd	208(%r13, %r15, 1), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm11, %ymm11
-	vmulpd			%ymm14, %ymm1, %ymm1
-	vmulpd			%ymm14, %ymm2, %ymm2
-	vmovapd			192(%r13, %r15, 1), %ymm0
-	vblendpd		$0x3, %ymm11, %ymm0, %ymm11
-	vmovapd			%ymm15, 192(%r13)
-	vmovapd			%ymm11, 192(%r13, %r15, 1)
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x7, %ymm15, %ymm1, %ymm1
-
-	movq	ARG2, %r11 // D
-	//
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	112(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	16(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	48(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	80(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	112(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	16(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xff, %ymm15, %ymm15  // beta
-
-	// eight column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 40(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	addq	$128, %r11
-	vmovsd			120(%r11, %r14, 1), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 120(%r11, %r14, 1) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 56(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 248(%r13, %r15, 1) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			96(%r11), %ymm0
-	vmovapd			96(%r11, %r14, 1), %ymm1
-	vmovapd			96(%r11, %r14, 2), %ymm2
-	movq	ARG1, %r10 // n
-	subl	$8, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	24(%r11, %r14, 1), %ymm8
-	vbroadcastsd	56(%r11, %r14, 1), %ymm9
-	vbroadcastsd	88(%r11, %r14, 1), %ymm10
-	vbroadcastsd	120(%r11, %r14, 1), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 24(%r11, %r14, 1)
-	vmovsd			%xmm9, 56(%r11, %r14, 1)
-	vmovsd			%xmm10, 88(%r11, %r14, 1)
-	vmovsd			%xmm11, 120(%r11, %r14, 1)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	24(%r11, %r14, 1), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 24(%r11, %r14, 1)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	//
-//	vpermpd			$0x00, %ymm0, %ymm13
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	//
-	vpermpd			$0x55, %ymm0, %ymm13
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xff, %ymm0, %ymm13
-	vmovapd			96(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0x00, %ymm1, %ymm13
-	vmovapd			128(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			128(%r13, %r15, 1), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0x55, %ymm1, %ymm13
-	vmovapd			160(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			160(%r13, %r15, 1), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0xaa, %ymm1, %ymm13
-	vmovapd			192(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			192(%r13, %r15, 1), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vbroadcastsd	248(%r13, %r15, 1), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm11, %ymm11
-//	vmulpd			%ymm14, %ymm1, %ymm1
-	vmulpd			%ymm14, %ymm2, %ymm2
-	vmovapd			224(%r13, %r15, 1), %ymm0
-	vblendpd		$0x7, %ymm11, %ymm0, %ymm11
-	vmovapd			%ymm15, 224(%r13)
-	vmovapd			%ymm11, 224(%r13, %r15, 1)
-
-//	vxorpd			%ymm15, %ymm15, %ymm15
-//	vblendpd		$0xf, %ymm15, %ymm1, %ymm1
-
-	movq	ARG2, %r11 // D
-	//
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	24(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	56(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmulpd			%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	88(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	120(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	24(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	56(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	88(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	120(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	//
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	24(%r11, %r14, 1), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-//	vpermpd	$0x00, %ymm15, %ymm15  // beta
-
-	// ninth column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 40(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	addq	$256, %r11
-	vmovsd			0(%r11, %r14, 2), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 0(%r11, %r14, 2) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 64(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 256(%r13, %r15, 2) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			0(%r11), %ymm0
-	vmovapd			0(%r11, %r14, 1), %ymm1
-	vmovapd			0(%r11, %r14, 2), %ymm2
-	vbroadcastsd	32(%r11, %r14, 2), %ymm8
-	vbroadcastsd	64(%r11, %r14, 2), %ymm9
-	vbroadcastsd	96(%r11, %r14, 2), %ymm10
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		32(%r11), %ymm8, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm8, 32(%r11, %r14, 2)
-	vmovsd			%xmm9, 64(%r11, %r14, 2)
-	vmovsd			%xmm10, 96(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$12, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	0(%r11, %r14, 2), %ymm8
-	vbroadcastsd	32(%r11, %r14, 2), %ymm9
-	vbroadcastsd	64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	96(%r11, %r14, 2), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 0(%r11, %r14, 2)
-	vmovsd			%xmm9, 32(%r11, %r14, 2)
-	vmovsd			%xmm10, 64(%r11, %r14, 2)
-	vmovsd			%xmm11, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	0(%r11, %r14, 2), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	//
-//	vpermpd			$0x00, %ymm0, %ymm13
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	//
-	vpermpd			$0x55, %ymm0, %ymm13
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xff, %ymm0, %ymm13
-	vmovapd			96(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0x00, %ymm1, %ymm13
-	vmovapd			128(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			128(%r13, %r15, 1), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0x55, %ymm1, %ymm13
-	vmovapd			160(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			160(%r13, %r15, 1), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0xaa, %ymm1, %ymm13
-	vmovapd			192(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			192(%r13, %r15, 1), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0xff, %ymm1, %ymm13
-	vmovapd			224(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			224(%r13, %r15, 1), %ymm14
-//	vblendpd		$0xf, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vbroadcastsd	256(%r13, %r15, 2), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm11, %ymm11
-//	vmulpd			%ymm14, %ymm1, %ymm1
-	vmulpd			%ymm14, %ymm2, %ymm2
-//	vmovapd			224(%r13, %r15, 1), %ymm0
-//	vblendpd		$0xf, %ymm11, %ymm0, %ymm11
-	vmovapd			%ymm15, 256(%r13)
-	vmovapd			%ymm11, 256(%r13, %r15, 1)
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x1, %ymm15, %ymm2, %ymm2
-
-	movq	ARG2, %r11 // D
-	//
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	32(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	64(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmulpd			%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	96(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	0(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	32(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	64(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	96(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	//
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	0(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0x55, %ymm15, %ymm15  // beta
-
-	// tenth column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 40(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	addq	$256, %r11
-	vmovsd			40(%r11, %r14, 2), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 40(%r11, %r14, 2) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 72(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 296(%r13, %r15, 2) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			32(%r11), %ymm0
-	vmovapd			32(%r11, %r14, 1), %ymm1
-	vmovapd			32(%r11, %r14, 2), %ymm2
-	vbroadcastsd	72(%r11, %r14, 2), %ymm9
-	vbroadcastsd	104(%r11, %r14, 2), %ymm10
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm9, 72(%r11, %r14, 2)
-	vmovsd			%xmm10, 104(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$12, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	8(%r11, %r14, 2), %ymm8
-	vbroadcastsd	40(%r11, %r14, 2), %ymm9
-	vbroadcastsd	72(%r11, %r14, 2), %ymm10
-	vbroadcastsd	104(%r11, %r14, 2), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 8(%r11, %r14, 2)
-	vmovsd			%xmm9, 40(%r11, %r14, 2)
-	vmovsd			%xmm10, 72(%r11, %r14, 2)
-	vmovsd			%xmm11, 104(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	8(%r11, %r14, 2), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 8(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	//
-//	vpermpd			$0x00, %ymm0, %ymm13
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	//
-	vpermpd			$0x55, %ymm0, %ymm13
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xff, %ymm0, %ymm13
-	vmovapd			96(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0x00, %ymm1, %ymm13
-	vmovapd			128(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			128(%r13, %r15, 1), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0x55, %ymm1, %ymm13
-	vmovapd			160(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			160(%r13, %r15, 1), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0xaa, %ymm1, %ymm13
-	vmovapd			192(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			192(%r13, %r15, 1), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0xff, %ymm1, %ymm13
-	vmovapd			224(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			224(%r13, %r15, 1), %ymm14
-//	vblendpd		$0xf, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0x00, %ymm2, %ymm13
-	vmovapd			256(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			256(%r13, %r15, 1), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	vmovapd			256(%r13, %r15, 2), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm13, %ymm10
-	//
-	vbroadcastsd	296(%r13, %r15, 2), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm11, %ymm11
-	vmulpd			%ymm14, %ymm10, %ymm10
-	vmulpd			%ymm14, %ymm2, %ymm2
-	vmovapd			288(%r13, %r15, 2), %ymm0
-	vblendpd		$0x1, %ymm10, %ymm0, %ymm10
-	vmovapd			%ymm15, 288(%r13)
-	vmovapd			%ymm11, 288(%r13, %r15, 1)
-	vmovapd			%ymm10, 288(%r13, %r15, 2)
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x3, %ymm15, %ymm2, %ymm2
-
-	movq	ARG2, %r11 // D
-	//
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	72(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	104(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmulpd			%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	8(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	40(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	72(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	104(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	//
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	8(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xaa, %ymm15, %ymm15  // beta
-
-	// eleventh column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 40(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	addq	$256, %r11
-	vmovsd			80(%r11, %r14, 2), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 80(%r11, %r14, 2) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 80(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 336(%r13, %r15, 2) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			64(%r11), %ymm0
-	vmovapd			64(%r11, %r14, 1), %ymm1
-	vmovapd			64(%r11, %r14, 2), %ymm2
-	vbroadcastsd	112(%r11, %r14, 2), %ymm10
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm10, 112(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$12, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	16(%r11, %r14, 2), %ymm8
-	vbroadcastsd	48(%r11, %r14, 2), %ymm9
-	vbroadcastsd	80(%r11, %r14, 2), %ymm10
-	vbroadcastsd	112(%r11, %r14, 2), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 16(%r11, %r14, 2)
-	vmovsd			%xmm9, 48(%r11, %r14, 2)
-	vmovsd			%xmm10, 80(%r11, %r14, 2)
-	vmovsd			%xmm11, 112(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	16(%r11, %r14, 2), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 16(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	//
-//	vpermpd			$0x00, %ymm0, %ymm13
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	//
-	vpermpd			$0x55, %ymm0, %ymm13
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xff, %ymm0, %ymm13
-	vmovapd			96(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0x00, %ymm1, %ymm13
-	vmovapd			128(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			128(%r13, %r15, 1), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0x55, %ymm1, %ymm13
-	vmovapd			160(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			160(%r13, %r15, 1), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0xaa, %ymm1, %ymm13
-	vmovapd			192(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			192(%r13, %r15, 1), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0xff, %ymm1, %ymm13
-	vmovapd			224(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			224(%r13, %r15, 1), %ymm14
-//	vblendpd		$0xf, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0x00, %ymm2, %ymm13
-	vmovapd			256(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			256(%r13, %r15, 1), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	vmovapd			256(%r13, %r15, 2), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm13, %ymm10
-	//
-	vpermpd			$0x55, %ymm2, %ymm13
-	vmovapd			288(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			288(%r13, %r15, 1), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	vmovapd			288(%r13, %r15, 2), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm10
-	//
-	vbroadcastsd	336(%r13, %r15, 2), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm11, %ymm11
-	vmulpd			%ymm14, %ymm10, %ymm10
-	vmulpd			%ymm14, %ymm2, %ymm2
-	vmovapd			320(%r13, %r15, 2), %ymm0
-	vblendpd		$0x3, %ymm10, %ymm0, %ymm10
-	vmovapd			%ymm15, 320(%r13)
-	vmovapd			%ymm11, 320(%r13, %r15, 1)
-	vmovapd			%ymm10, 320(%r13, %r15, 2)
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x7, %ymm15, %ymm2, %ymm2
-
-	movq	ARG2, %r11 // D
-	//
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-	//
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	112(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	16(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	48(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	80(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	112(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	//
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	16(%r11, %r14, 2), %ymm14
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xff, %ymm15, %ymm15  // beta
-
-	// twelveth
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 40(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	addq	$256, %r11
-	vmovsd			120(%r11, %r14, 2), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 120(%r11, %r14, 2) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 88(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 376(%r13, %r15, 2) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			96(%r11), %ymm0
-	vmovapd			96(%r11, %r14, 1), %ymm1
-	vmovapd			96(%r11, %r14, 2), %ymm2
-	movq	ARG1, %r10 // n
-	subl	$12, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	24(%r11, %r14, 2), %ymm8
-	vbroadcastsd	56(%r11, %r14, 2), %ymm9
-	vbroadcastsd	88(%r11, %r14, 2), %ymm10
-	vbroadcastsd	120(%r11, %r14, 2), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 24(%r11, %r14, 2)
-	vmovsd			%xmm9, 56(%r11, %r14, 2)
-	vmovsd			%xmm10, 88(%r11, %r14, 2)
-	vmovsd			%xmm11, 120(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	24(%r11, %r14, 2), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 24(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	//
-//	vpermpd			$0x00, %ymm0, %ymm13
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	//
-	vpermpd			$0x55, %ymm0, %ymm13
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0xff, %ymm0, %ymm13
-	vmovapd			96(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	//
-	vpermpd			$0x00, %ymm1, %ymm13
-	vmovapd			128(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			128(%r13, %r15, 1), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0x55, %ymm1, %ymm13
-	vmovapd			160(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			160(%r13, %r15, 1), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0xaa, %ymm1, %ymm13
-	vmovapd			192(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			192(%r13, %r15, 1), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0xff, %ymm1, %ymm13
-	vmovapd			224(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			224(%r13, %r15, 1), %ymm14
-//	vblendpd		$0xf, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	//
-	vpermpd			$0x00, %ymm2, %ymm13
-	vmovapd			256(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			256(%r13, %r15, 1), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	vmovapd			256(%r13, %r15, 2), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm13, %ymm10
-	//
-	vpermpd			$0x55, %ymm2, %ymm13
-	vmovapd			288(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			288(%r13, %r15, 1), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	vmovapd			288(%r13, %r15, 2), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm10
-	//
-	vpermpd			$0xaa, %ymm2, %ymm13
-	vmovapd			320(%r13), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vmovapd			320(%r13, %r15, 1), %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm11
-	vmovapd			320(%r13, %r15, 2), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vfmadd231pd		%ymm14, %ymm13, %ymm10
-	//
-	vbroadcastsd	376(%r13, %r15, 2), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm11, %ymm11
-	vmulpd			%ymm14, %ymm10, %ymm10
-//	vmulpd			%ymm14, %ymm2, %ymm2
-	vmovapd			352(%r13, %r15, 2), %ymm0
-	vblendpd		$0x7, %ymm10, %ymm0, %ymm10
-	vmovapd			%ymm15, 352(%r13)
-	vmovapd			%ymm11, 352(%r13, %r15, 1)
-	vmovapd			%ymm10, 352(%r13, %r15, 2)
-
-102:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgelqf_dlarft12_12_lib4, .-kernel_dgelqf_dlarft12_12_lib4
-#endif
-
-
-
-
-
-//                                   1      2           3        4           5
-// void kernel_dgelqf_dlarft4_12_lib4(int n, double *pD, int sdd, double *dD, double *pT)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgelqf_dlarft4_12_lib4
-	.type kernel_dgelqf_dlarft4_12_lib4, @function
-kernel_dgelqf_dlarft4_12_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgelqf_dlarft4_12_lib4
-_kernel_dgelqf_dlarft4_12_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgelqf_dlarft4_12_lib4
-	.def kernel_dgelqf_dlarft4_12_lib4; .scl 2; .type 32; .endef
-kernel_dgelqf_dlarft4_12_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero T
-
-	movq	ARG5, %r10 // T
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			%ymm15, 0(%r10)
-	vmovapd			%ymm15, 32(%r10)
-	vmovapd			%ymm15, 64(%r10)
-	vmovapd			%ymm15, 96(%r10)
-
-	// first column
-
-	movq	ARG2, %r11 // D
-	movq	ARG3, %r14 // sdd
-	sall	$5, %r14d
-	movq	ARG4, %r12 // dD
-	movq	ARG5, %r13 // T
-
-	vxorpd			%xmm15, %xmm15, %xmm15
-	movq	ARG1, %r10 // n
-	subl	$1, %r10d
-	addq	$32, %r11
-100:
-	vmovsd			0(%r11), %xmm14
-	vfmadd231sd		%xmm14, %xmm14, %xmm15
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		100b
-
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		101f
-	vmovsd			%xmm14, 0(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			0(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 0(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 0(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 0(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			0(%r11), %ymm0
-	vmovapd			0(%r11, %r14, 1), %ymm1
-	vmovapd			0(%r11, %r14, 2), %ymm2
-	vbroadcastsd	32(%r11), %ymm8
-	vbroadcastsd	64(%r11), %ymm9
-	vbroadcastsd	96(%r11), %ymm10
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		32(%r11), %ymm8, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm8, 32(%r11)
-	vmovsd			%xmm9, 64(%r11)
-	vmovsd			%xmm10, 96(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	0(%r11), %ymm8
-	vbroadcastsd	32(%r11), %ymm9
-	vbroadcastsd	64(%r11), %ymm10
-	vbroadcastsd	96(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 0(%r11)
-	vmovsd			%xmm9, 32(%r11)
-	vmovsd			%xmm10, 64(%r11)
-	vmovsd			%xmm11, 96(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	0(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 0(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vbroadcastsd	0(%r13), %ymm15
-	vmulpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm15, %ymm2, %ymm2
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x1, %ymm15, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm0, %ymm8, %ymm8
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	32(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	64(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmulpd			%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	96(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	0(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	32(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	64(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	96(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	0(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0x55, %ymm15, %ymm15  // beta
-
-	// second column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 8(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			40(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 40(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 8(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 40(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			32(%r11), %ymm0
-	vmovapd			32(%r11, %r14, 1), %ymm1
-	vmovapd			32(%r11, %r14, 2), %ymm2
-	vbroadcastsd	72(%r11), %ymm9
-	vbroadcastsd	104(%r11), %ymm10
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm9, 72(%r11)
-	vmovsd			%xmm10, 104(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	8(%r11), %ymm8
-	vbroadcastsd	40(%r11), %ymm9
-	vbroadcastsd	72(%r11), %ymm10
-	vbroadcastsd	104(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 8(%r11)
-	vmovsd			%xmm9, 40(%r11)
-	vmovsd			%xmm10, 72(%r11)
-	vmovsd			%xmm11, 104(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	8(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 8(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC02(%rip), %ymm12
-#else
-	vmovapd			LC02(%rip), %ymm12
-#endif
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm0
-	vbroadcastsd	40(%r13), %ymm15
-	vmulpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm15, %ymm1, %ymm1
-	vmulpd			%ymm15, %ymm2, %ymm2
-	vmovsd			%xmm0, 32(%r13)
-
-	vxorpd			%ymm12, %ymm12, %ymm12
-	vblendpd		$0x3, %ymm12, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm0, %ymm8, %ymm8
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	72(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	104(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmulpd			%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	8(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	40(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	72(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	104(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	8(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xaa, %ymm15, %ymm15  // beta
-
-	// third column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 16(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			80(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 80(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 16(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 80(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			64(%r11), %ymm0
-	vmovapd			64(%r11, %r14, 1), %ymm1
-	vmovapd			64(%r11, %r14, 2), %ymm2
-	vbroadcastsd	112(%r11), %ymm10
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm10, %ymm2
-	vmovsd			%xmm10, 112(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	16(%r11), %ymm8
-	vbroadcastsd	48(%r11), %ymm9
-	vbroadcastsd	80(%r11), %ymm10
-	vbroadcastsd	112(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 16(%r11)
-	vmovsd			%xmm9, 48(%r11)
-	vmovsd			%xmm10, 80(%r11)
-	vmovsd			%xmm11, 112(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	16(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 16(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vblendpd		$0x7, %ymm15, %ymm0, %ymm0
-	vbroadcastsd	80(%r13), %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm0
-	vmulpd			%ymm14, %ymm1, %ymm1
-	vmulpd			%ymm14, %ymm2, %ymm2
-	vmovapd			%xmm0, 64(%r13)
-
-	vxorpd			%ymm12, %ymm12, %ymm12
-	vblendpd		$0x7, %ymm12, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm0, %ymm8, %ymm8
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	112(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	16(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	48(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	80(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	112(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	16(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xff, %ymm15, %ymm15  // beta
-
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 24(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			120(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 120(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 24(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 120(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			96(%r11), %ymm0
-	vmovapd			96(%r11, %r14, 1), %ymm1
-	vmovapd			96(%r11, %r14, 2), %ymm2
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	24(%r11), %ymm8
-	vbroadcastsd	56(%r11), %ymm9
-	vbroadcastsd	88(%r11), %ymm10
-	vbroadcastsd	120(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		32(%r11, %r14, 2), %ymm9, %ymm2
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		64(%r11, %r14, 2), %ymm10, %ymm2
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vfmadd231pd		96(%r11, %r14, 2), %ymm11, %ymm2
-	vmovsd			%xmm8, 24(%r11)
-	vmovsd			%xmm9, 56(%r11)
-	vmovsd			%xmm10, 88(%r11)
-	vmovsd			%xmm11, 120(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	24(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		0(%r11, %r14, 2), %ymm8, %ymm2
-	vmovsd			%xmm8, 24(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-
-	vbroadcastsd	120(%r13), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm1, %ymm1
-	vmulpd			%ymm14, %ymm2, %ymm2
-	vmovapd			96(%r13), %ymm0
-	vblendpd		$0x7, %ymm15, %ymm0, %ymm0
-	vmovapd			%ymm0, 96(%r13)
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vaddpd			%ymm2, %ymm10, %ymm10
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	24(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	//
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vmovapd			32(%r11, %r14, 2), %ymm10
-	vbroadcastsd	56(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	vmovapd			%ymm10, 32(%r11, %r14, 2)
-	//
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vmovapd			64(%r11, %r14, 2), %ymm10
-	vbroadcastsd	88(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	vmovapd			%ymm10, 64(%r11, %r14, 2)
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vmovapd			96(%r11, %r14, 2), %ymm10
-	vbroadcastsd	120(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	vmovapd			%ymm10, 96(%r11, %r14, 2)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vmovapd			0(%r11, %r14, 2), %ymm10
-	vbroadcastsd	24(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm2, %ymm14, %ymm10
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	vmovapd			%ymm10, 0(%r11, %r14, 2)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-
-102:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgelqf_dlarft4_12_lib4, .-kernel_dgelqf_dlarft4_12_lib4
-#endif
-
-
-
-
-
-//                                  1      2           3        4           5
-// void kernel_dgelqf_dlarft4_8_lib4(int n, double *pD, int sdd, double *dD, double *pT)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgelqf_dlarft4_8_lib4
-	.type kernel_dgelqf_dlarft4_8_lib4, @function
-kernel_dgelqf_dlarft4_8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgelqf_dlarft4_8_lib4
-_kernel_dgelqf_dlarft4_8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgelqf_dlarft4_8_lib4
-	.def kernel_dgelqf_dlarft4_8_lib4; .scl 2; .type 32; .endef
-kernel_dgelqf_dlarft4_8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero T
-
-	movq	ARG5, %r10 // T
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			%ymm15, 0(%r10)
-	vmovapd			%ymm15, 32(%r10)
-	vmovapd			%ymm15, 64(%r10)
-	vmovapd			%ymm15, 96(%r10)
-
-	// first column
-
-	movq	ARG2, %r11 // D
-	movq	ARG3, %r14 // sdd
-	sall	$5, %r14d
-	movq	ARG4, %r12 // dD
-	movq	ARG5, %r13 // T
-
-	vxorpd			%xmm15, %xmm15, %xmm15
-	movq	ARG1, %r10 // n
-	subl	$1, %r10d
-	addq	$32, %r11
-100:
-	vmovsd			0(%r11), %xmm14
-	vfmadd231sd		%xmm14, %xmm14, %xmm15
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		100b
-
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		101f
-	vmovsd			%xmm14, 0(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			0(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 0(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 0(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 0(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			0(%r11), %ymm0
-	vmovapd			0(%r11, %r14, 1), %ymm1
-	vbroadcastsd	32(%r11), %ymm8
-	vbroadcastsd	64(%r11), %ymm9
-	vbroadcastsd	96(%r11), %ymm10
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		32(%r11), %ymm8, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vmovsd			%xmm8, 32(%r11)
-	vmovsd			%xmm9, 64(%r11)
-	vmovsd			%xmm10, 96(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	0(%r11), %ymm8
-	vbroadcastsd	32(%r11), %ymm9
-	vbroadcastsd	64(%r11), %ymm10
-	vbroadcastsd	96(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vmovsd			%xmm8, 0(%r11)
-	vmovsd			%xmm9, 32(%r11)
-	vmovsd			%xmm10, 64(%r11)
-	vmovsd			%xmm11, 96(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	0(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vmovsd			%xmm8, 0(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vbroadcastsd	0(%r13), %ymm15
-	vmulpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm15, %ymm1, %ymm1
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x1, %ymm15, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vaddpd			%ymm0, %ymm8, %ymm8
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vbroadcastsd	32(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vbroadcastsd	64(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vmulpd			%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vbroadcastsd	96(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vbroadcastsd	0(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vbroadcastsd	32(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vbroadcastsd	64(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vbroadcastsd	96(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vbroadcastsd	0(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0x55, %ymm15, %ymm15  // beta
-
-	// second column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 8(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			40(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 40(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 8(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 40(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			32(%r11), %ymm0
-	vmovapd			32(%r11, %r14, 1), %ymm1
-	vbroadcastsd	72(%r11), %ymm9
-	vbroadcastsd	104(%r11), %ymm10
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vmovsd			%xmm9, 72(%r11)
-	vmovsd			%xmm10, 104(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	8(%r11), %ymm8
-	vbroadcastsd	40(%r11), %ymm9
-	vbroadcastsd	72(%r11), %ymm10
-	vbroadcastsd	104(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vmovsd			%xmm8, 8(%r11)
-	vmovsd			%xmm9, 40(%r11)
-	vmovsd			%xmm10, 72(%r11)
-	vmovsd			%xmm11, 104(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	8(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vmovsd			%xmm8, 8(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC02(%rip), %ymm12
-#else
-	vmovapd			LC02(%rip), %ymm12
-#endif
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm0
-	vbroadcastsd	40(%r13), %ymm15
-	vmulpd			%ymm15, %ymm0, %ymm0
-	vmulpd			%ymm15, %ymm1, %ymm1
-	vmovsd			%xmm0, 32(%r13)
-
-	vxorpd			%ymm12, %ymm12, %ymm12
-	vblendpd		$0x3, %ymm12, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vaddpd			%ymm0, %ymm8, %ymm8
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vbroadcastsd	72(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vbroadcastsd	104(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vmulpd			%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vbroadcastsd	8(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vbroadcastsd	40(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vbroadcastsd	72(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vbroadcastsd	104(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vbroadcastsd	8(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xaa, %ymm15, %ymm15  // beta
-
-	// third column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 16(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			80(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 80(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 16(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 80(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			64(%r11), %ymm0
-	vmovapd			64(%r11, %r14, 1), %ymm1
-	vbroadcastsd	112(%r11), %ymm10
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm10, %ymm1
-	vmovsd			%xmm10, 112(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	16(%r11), %ymm8
-	vbroadcastsd	48(%r11), %ymm9
-	vbroadcastsd	80(%r11), %ymm10
-	vbroadcastsd	112(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vmovsd			%xmm8, 16(%r11)
-	vmovsd			%xmm9, 48(%r11)
-	vmovsd			%xmm10, 80(%r11)
-	vmovsd			%xmm11, 112(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	16(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vmovsd			%xmm8, 16(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-	vblendpd		$0x7, %ymm15, %ymm0, %ymm0
-	vbroadcastsd	80(%r13), %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm0
-	vmulpd			%ymm14, %ymm1, %ymm1
-	vmovapd			%xmm0, 64(%r13)
-
-	vxorpd			%ymm12, %ymm12, %ymm12
-	vblendpd		$0x7, %ymm12, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vaddpd			%ymm0, %ymm8, %ymm8
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vbroadcastsd	112(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vbroadcastsd	16(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	//
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vbroadcastsd	48(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 32(%r11)
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	//
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vbroadcastsd	80(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 64(%r11)
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	//
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vbroadcastsd	112(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 96(%r11)
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vbroadcastsd	16(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xff, %ymm15, %ymm15  // beta
-
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 24(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			120(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 120(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 24(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 120(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			96(%r11), %ymm0
-	vmovapd			96(%r11, %r14, 1), %ymm1
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	24(%r11), %ymm8
-	vbroadcastsd	56(%r11), %ymm9
-	vbroadcastsd	88(%r11), %ymm10
-	vbroadcastsd	120(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		32(%r11, %r14, 1), %ymm9, %ymm1
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		64(%r11, %r14, 1), %ymm10, %ymm1
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vfmadd231pd		96(%r11, %r14, 1), %ymm11, %ymm1
-	vmovsd			%xmm8, 24(%r11)
-	vmovsd			%xmm9, 56(%r11)
-	vmovsd			%xmm10, 88(%r11)
-	vmovsd			%xmm11, 120(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	24(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		0(%r11, %r14, 1), %ymm8, %ymm1
-	vmovsd			%xmm8, 24(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm15
-
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm15
-
-	vbroadcastsd	120(%r13), %ymm14
-	vmulpd			%ymm14, %ymm15, %ymm15
-	vmulpd			%ymm14, %ymm1, %ymm1
-	vmovapd			96(%r13), %ymm0
-	vblendpd		$0x7, %ymm15, %ymm0, %ymm0
-	vmovapd			%ymm0, 96(%r13)
-
-	movq	ARG2, %r11 // D
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vaddpd			%ymm1, %ymm9, %ymm9
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	//
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vbroadcastsd	24(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	//
-	vmovapd			32(%r11, %r14, 1), %ymm9
-	vbroadcastsd	56(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vmovapd			%ymm9, 32(%r11, %r14, 1)
-	//
-	vmovapd			64(%r11, %r14, 1), %ymm9
-	vbroadcastsd	88(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vmovapd			%ymm9, 64(%r11, %r14, 1)
-	//
-	vmovapd			96(%r11, %r14, 1), %ymm9
-	vbroadcastsd	120(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vmovapd			%ymm9, 96(%r11, %r14, 1)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11, %r14, 1), %ymm9
-	vbroadcastsd	24(%r11), %ymm14
-	vfmadd231pd		%ymm1, %ymm14, %ymm9
-	vmovapd			%ymm9, 0(%r11, %r14, 1)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-
-102:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgelqf_dlarft4_8_lib4, .-kernel_dgelqf_dlarft4_8_lib4
-#endif
-
-
-
-
-
-//                                  1      2           3           4
-// void kernel_dgelqf_dlarft4_4_lib4(int n, double *pD, double *dD, double *pT, double *beta)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgelqf_dlarft4_4_lib4
-	.type kernel_dgelqf_dlarft4_4_lib4, @function
-kernel_dgelqf_dlarft4_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgelqf_dlarft4_4_lib4
-_kernel_dgelqf_dlarft4_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgelqf_dlarft4_4_lib4
-	.def kernel_dgelqf_dlarft4_4_lib4; .scl 2; .type 32; .endef
-kernel_dgelqf_dlarft4_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero T
-
-	movq	ARG4, %r10 // T
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			%ymm15, 0(%r10)
-	vmovapd			%ymm15, 32(%r10)
-	vmovapd			%ymm15, 64(%r10)
-	vmovapd			%ymm15, 96(%r10)
-
-	// first column
-
-	movq	ARG2, %r11 // D
-	movq	ARG3, %r12 // dD
-	movq	ARG4, %r13 // T
-
-	vxorpd			%xmm15, %xmm15, %xmm15
-	movq	ARG1, %r10 // n
-	subl	$1, %r10d
-	addq	$32, %r11
-100:
-	vmovsd			0(%r11), %xmm14
-	vfmadd231sd		%xmm14, %xmm14, %xmm15
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		100b
-
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		101f
-	vmovsd			%xmm14, 0(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			0(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 0(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 0(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 0(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			0(%r11), %ymm0
-	vbroadcastsd	32(%r11), %ymm8
-	vbroadcastsd	64(%r11), %ymm9
-	vbroadcastsd	96(%r11), %ymm10
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		32(%r11), %ymm8, %ymm0
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vmovsd			%xmm8, 32(%r11)
-	vmovsd			%xmm9, 64(%r11)
-	vmovsd			%xmm10, 96(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	0(%r11), %ymm8
-	vbroadcastsd	32(%r11), %ymm9
-	vbroadcastsd	64(%r11), %ymm10
-	vbroadcastsd	96(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vmovsd			%xmm8, 0(%r11)
-	vmovsd			%xmm9, 32(%r11)
-	vmovsd			%xmm10, 64(%r11)
-	vmovsd			%xmm11, 96(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	0(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vmovsd			%xmm8, 0(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vbroadcastsd	0(%r13), %ymm15
-	vmulpd			%ymm15, %ymm0, %ymm0
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x1, %ymm15, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	vmovapd			0(%r11), %ymm8
-	vmovapd			32(%r11), %ymm9
-	vmovapd			64(%r11), %ymm10
-	vmovapd			96(%r11), %ymm11
-	vaddpd			%ymm0, %ymm8, %ymm8
-	vbroadcastsd	32(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm9
-	vbroadcastsd	64(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm10
-	vbroadcastsd	96(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm11
-	vmulpd			%ymm10, %ymm10, %ymm15
-	vfmadd231pd		%ymm11, %ymm11, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 32(%r11)
-	vmovapd			%ymm10, 64(%r11)
-	vmovapd			%ymm11, 96(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	vmovapd			0(%r11), %ymm8
-	vmovapd			32(%r11), %ymm9
-	vmovapd			64(%r11), %ymm10
-	vmovapd			96(%r11), %ymm11
-	vbroadcastsd	0(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vbroadcastsd	32(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm9
-	vbroadcastsd	64(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm10
-	vbroadcastsd	96(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm11
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vfmadd231pd		%ymm11, %ymm11, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 32(%r11)
-	vmovapd			%ymm10, 64(%r11)
-	vmovapd			%ymm11, 96(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0x55, %ymm15, %ymm15  // beta
-
-	// second column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 8(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			40(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 40(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 8(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 40(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			32(%r11), %ymm0
-	vbroadcastsd	72(%r11), %ymm9
-	vbroadcastsd	104(%r11), %ymm10
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		64(%r11), %ymm9, %ymm0
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vmovsd			%xmm9, 72(%r11)
-	vmovsd			%xmm10, 104(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	8(%r11), %ymm8
-	vbroadcastsd	40(%r11), %ymm9
-	vbroadcastsd	72(%r11), %ymm10
-	vbroadcastsd	104(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vmovsd			%xmm8, 8(%r11)
-	vmovsd			%xmm9, 40(%r11)
-	vmovsd			%xmm10, 72(%r11)
-	vmovsd			%xmm11, 104(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	8(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vmovsd			%xmm8, 8(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC02(%rip), %ymm12
-#else
-	vmovapd			LC02(%rip), %ymm12
-#endif
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm0
-	vbroadcastsd	40(%r13), %ymm15
-	vmulpd			%ymm15, %ymm0, %ymm0
-	vmovsd			%xmm0, 32(%r13)
-
-	vxorpd			%ymm12, %ymm12, %ymm12
-	vblendpd		$0x3, %ymm12, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	vmovapd			32(%r11), %ymm9
-	vmovapd			64(%r11), %ymm10
-	vmovapd			96(%r11), %ymm11
-	vaddpd			%ymm0, %ymm9, %ymm9
-	vbroadcastsd	72(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm10
-	vbroadcastsd	104(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm11
-	vmulpd			%ymm11, %ymm11, %ymm15
-	vmovapd			%ymm9, 32(%r11)
-	vmovapd			%ymm10, 64(%r11)
-	vmovapd			%ymm11, 96(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	vmovapd			0(%r11), %ymm8
-	vmovapd			32(%r11), %ymm9
-	vmovapd			64(%r11), %ymm10
-	vmovapd			96(%r11), %ymm11
-	vbroadcastsd	8(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vbroadcastsd	40(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm9
-	vbroadcastsd	72(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm10
-	vbroadcastsd	104(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm11
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vfmadd231pd		%ymm11, %ymm11, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 32(%r11)
-	vmovapd			%ymm10, 64(%r11)
-	vmovapd			%ymm11, 96(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	8(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xaa, %ymm15, %ymm15  // beta
-
-	// third column
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 16(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			80(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 80(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 16(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 80(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			64(%r11), %ymm0
-	vbroadcastsd	112(%r11), %ymm10
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vfmadd231pd		96(%r11), %ymm10, %ymm0
-	vmovsd			%xmm10, 112(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	16(%r11), %ymm8
-	vbroadcastsd	48(%r11), %ymm9
-	vbroadcastsd	80(%r11), %ymm10
-	vbroadcastsd	112(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vmovsd			%xmm8, 16(%r11)
-	vmovsd			%xmm9, 48(%r11)
-	vmovsd			%xmm10, 80(%r11)
-	vmovsd			%xmm11, 112(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	16(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vmovsd			%xmm8, 16(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm1
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm1
-	vblendpd		$0x7, %ymm1, %ymm0, %ymm0
-	vbroadcastsd	80(%r13), %ymm15
-	vmulpd			%ymm15, %ymm0, %ymm0
-	vmovapd			%xmm0, 64(%r13)
-
-	vxorpd			%ymm12, %ymm12, %ymm12
-	vblendpd		$0x7, %ymm12, %ymm0, %ymm0
-
-	movq	ARG2, %r11 // D
-	vmovapd			64(%r11), %ymm10
-	vmovapd			96(%r11), %ymm11
-	vaddpd			%ymm0, %ymm10, %ymm10
-	vbroadcastsd	112(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm11
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			%ymm10, 64(%r11)
-	vmovapd			%ymm11, 96(%r11)
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		110f
-106:
-	vmovapd			0(%r11), %ymm8
-	vmovapd			32(%r11), %ymm9
-	vmovapd			64(%r11), %ymm10
-	vmovapd			96(%r11), %ymm11
-	vbroadcastsd	16(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vbroadcastsd	48(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm9
-	vbroadcastsd	80(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm10
-	vbroadcastsd	112(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm11
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vfmadd231pd		%ymm9, %ymm9, %ymm15
-	vfmadd231pd		%ymm10, %ymm10, %ymm15
-	vfmadd231pd		%ymm11, %ymm11, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	vmovapd			%ymm9, 32(%r11)
-	vmovapd			%ymm10, 64(%r11)
-	vmovapd			%ymm11, 96(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		106b
-110:
-	cmpl	$0, %r10d
-	jle		107f
-108:
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	16(%r11), %ymm14
-	vfmadd231pd		%ymm0, %ymm14, %ymm8
-	vfmadd231pd		%ymm8, %ymm8, %ymm15
-	vmovapd			%ymm8, 0(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		108b
-107:
-	vpermpd	$0xff, %ymm15, %ymm15  // beta
-
-102:
-	vxorpd			%xmm14, %xmm14, %xmm14
-	vucomisd		%xmm14, %xmm15
-	jne		101f
-//	jp		111f
-	vmovsd			%xmm14, 24(%r12)
-	jmp		102f
-
-101:
-	movq	ARG2, %r11 // D
-	vmovsd			120(%r11), %xmm14 // alpha
-	vfmadd231sd		%xmm14, %xmm14, %xmm15 // beta
-	vsqrtsd			%xmm15, %xmm15, %xmm15 // beta
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC00(%rip), %xmm13 // mask
-#else
-	vmovsd			LC00(%rip), %xmm13 // mask
-#endif
-	vandpd			%xmm13, %xmm14, %xmm12
-	vxorpd			%xmm13, %xmm12, %xmm12
-	vxorpd			%xmm12, %xmm15, %xmm15 // beta
-	vmovsd			%xmm15, 120(%r11) // pD[0+ps*0]
-	vsubsd			%xmm14, %xmm15, %xmm14 // beta-alpha
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd			.LC01(%rip), %xmm12
-#else
-	vmovapd			LC01(%rip), %xmm12
-#endif
-	vmovsd			%xmm14, %xmm12, %xmm12
-	vmovddup		%xmm14, %xmm14
-	vmovsd			%xmm15, %xmm14, %xmm14
-	vdivpd			%xmm14, %xmm12, %xmm14
-	vmovsd			%xmm14, 24(%r12) // dD[0]
-	vxorpd			%xmm13, %xmm14, %xmm12
-	vmovsd			%xmm12, 120(%r13) // pT[0+ps*0]
-
-	vpermpd			$0x55, %ymm14, %ymm15 // tmp
-
-	vmovapd			96(%r11), %ymm0
-	movq	ARG1, %r10 // n
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jle		109f
-103:
-	vbroadcastsd	24(%r11), %ymm8
-	vbroadcastsd	56(%r11), %ymm9
-	vbroadcastsd	88(%r11), %ymm10
-	vbroadcastsd	120(%r11), %ymm11
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vmulpd			%ymm15, %ymm9, %ymm9
-	vmulpd			%ymm15, %ymm10, %ymm10
-	vmulpd			%ymm15, %ymm11, %ymm11
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vfmadd231pd		32(%r11), %ymm9, %ymm0
-	vfmadd231pd		64(%r11), %ymm10, %ymm0
-	vfmadd231pd		96(%r11), %ymm11, %ymm0
-	vmovsd			%xmm8, 24(%r11)
-	vmovsd			%xmm9, 56(%r11)
-	vmovsd			%xmm10, 88(%r11)
-	vmovsd			%xmm11, 120(%r11)
-	subl	$4, %r10d
-	addq	$128, %r11
-	cmpl	$3, %r10d
-	jg		103b
-109:
-	cmpl	$0, %r10d
-	jle		104f
-105:
-	vbroadcastsd	24(%r11), %ymm8
-	vmulpd			%ymm15, %ymm8, %ymm8
-	vfmadd231pd		0(%r11), %ymm8, %ymm0
-	vmovsd			%xmm8, 24(%r11)
-	subl	$1, %r10d
-	addq	$32, %r11
-	cmpl	$0, %r10d
-	jg		105b
-104:
-
-	vxorpd			%xmm12, %xmm12, %xmm12
-
-	vmovapd			0(%r13), %ymm14
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm14
-	vmulpd			%ymm14, %ymm0, %ymm1
-
-	vmovapd			32(%r13), %ymm14
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm1
-
-	vmovapd			64(%r13), %ymm14
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm14
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfmadd231pd		%ymm14, %ymm13, %ymm1
-
-	vbroadcastsd	120(%r13), %ymm15
-	vmulpd			%ymm15, %ymm1, %ymm1
-	vmovapd			96(%r13), %ymm0
-	vblendpd		$0x7, %ymm1, %ymm0, %ymm0
-	vmovapd			%ymm0, 96(%r13)
-
-102:
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgelqf_dlarft4_4_lib4, .-kernel_dgelqf_dlarft4_4_lib4
-#endif
-
-
-
-
-
-//                                      1           2
-// void kernel_dgelqf_dlarft_12_12_lib4(double *dK, double *pT)
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlarfb_12_lib4
-	.type kernel_dlarfb_12_lib4, @function
-kernel_dlarfb_12_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlarfb_12_lib4
-_kernel_dlarfb_12_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlarfb_12_lib4
-	.def kernel_dlarfb_12_lib4; .scl 2; .type 32; .endef
-kernel_dlarfb_12_lib4:
-#endif
-	
-	PROLOGUE
-
-	movq	ARG1, %r10 // K
-	movq	ARG2, %r11 // T
-	movq	$384, %r12 // sdt !!!!!!!!!!!!!!!!!!!!!!!!!
-
-	//
-	vmovapd			352(%r10), %ymm12
-	vbroadcastsd	376(%r11, %r12, 2), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm11
-	//
-	vmovapd			320(%r10), %ymm12
-	vbroadcastsd	368(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	336(%r11, %r12, 2), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm10
-	//
-	vmovapd			288(%r10), %ymm12
-	vbroadcastsd	360(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	328(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	296(%r11, %r12, 2), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm9
-	//
-	vmovapd			256(%r10), %ymm12
-	vbroadcastsd	352(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	320(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	288(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	256(%r11, %r12, 2), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm8
-	//
-	vmovapd			224(%r10), %ymm12
-	vbroadcastsd	376(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	344(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	312(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	280(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	248(%r11, %r12, 1), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm7
-	//
-	vmovapd			192(%r10), %ymm12
-	vbroadcastsd	368(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	336(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	304(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	272(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	240(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	208(%r11, %r12, 1), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm6
-	//
-	vmovapd			160(%r10), %ymm12
-	vbroadcastsd	360(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	328(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	296(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	264(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	232(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	200(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	168(%r11, %r12, 1), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm5
-	//
-	vmovapd			128(%r10), %ymm12
-	vbroadcastsd	352(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	320(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	288(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	256(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	224(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	192(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	160(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	128(%r11, %r12, 1), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm4
-	//
-	vmovapd			96(%r10), %ymm12
-	vbroadcastsd	376(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	344(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	312(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	280(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	248(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	216(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	184(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	152(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	120(%r11), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm3
-	//
-	vmovapd			64(%r10), %ymm12
-	vbroadcastsd	368(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	336(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	304(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	272(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	240(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	208(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	176(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	144(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	112(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	80(%r11), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm2
-	//
-	vmovapd			32(%r10), %ymm12
-	vbroadcastsd	360(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	328(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	296(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	264(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	232(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	200(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	168(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	136(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	104(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	72(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	40(%r11), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm1
-	//
-	vmovapd			0(%r10), %ymm12
-	vbroadcastsd	352(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	vbroadcastsd	320(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	288(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	256(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	224(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	192(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	160(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	128(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	96(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	64(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	32(%r11), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm12, %ymm13, %ymm0
-
-	vmovapd			%ymm11, 352(%r10)
-	vmovapd			%ymm10, 320(%r10)
-	vmovapd			%ymm9, 288(%r10)
-	vmovapd			%ymm8, 256(%r10)
-	vmovapd			%ymm7, 224(%r10)
-	vmovapd			%ymm6, 192(%r10)
-	vmovapd			%ymm5, 160(%r10)
-	vmovapd			%ymm4, 128(%r10)
-	vmovapd			%ymm3, 96(%r10)
-	vmovapd			%ymm2, 64(%r10)
-	vmovapd			%ymm1, 32(%r10)
-	vmovapd			%ymm0, 0(%r10)
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlarfb_12_lib4, .-kernel_dlarfb_12_lib4
-#endif
-
-
-
-
-
-// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 100...0 100...0 100...0 100...0 }
-#elif defined(OS_MAC)
-LC00: // { 100...0 100...0 100...0 100...0 }
-	.align 5
-#endif
-	.long	0x00000000
-	.long	0x80000000
-	.long	0x00000000
-	.long	0x80000000
-	.long	0x00000000
-	.long	0x80000000
-	.long	0x00000000
-	.long	0x80000000
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01:
-#elif defined(OS_MAC)
-LC01:
-	.align 5
-#endif
-	.double	-1.0
-	.double	-1.0
-	.double	-1.0
-	.double	-1.0
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02:
-#elif defined(OS_MAC)
-LC02:
-	.align 5
-#endif
-	.double	1.0
-	.double	1.0
-	.double	1.0
-	.double	1.0
-
-
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_dgelqf_4_lib4_bkp.c b/third_party/blasfeo/kernel/avx2/kernel_dgelqf_4_lib4_bkp.c
deleted file mode 100644
index 05c2d2e..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_dgelqf_4_lib4_bkp.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <math.h>
-#include <stdio.h>
-
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-
-#include "../../include/blasfeo_common.h"
-#include "../../include/blasfeo_d_aux.h"
-#include "../../include/blasfeo_d_kernel.h"
-
-
-
-// assume n>=4
-void kernel_dgelqf_dlarft_4_lib4(int n, double *pD, double *dD, double *pT)
-	{
-	return;
-	int ii, jj, ll;
-	double alpha, beta, tmp, w0, w1, w2, w3;
-	const int ps = 4;
-	// zero tau matrix
-	for(ii=0; ii<16; ii++)
-		pT[ii] = 0.0;
-	// first column
-	beta = 0.0;
-	for(ii=1; ii<n; ii++)
-		{
-		tmp = pD[0+ps*ii];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		dD[0] = 0.0;
-		goto col2;
-		}
-	alpha = pD[0+ps*0];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[0] = (beta-alpha) / beta;
-	pT[0+ps*0] = - dD[0];
-	tmp = -1.0 / (beta-alpha);
-	//
-	pD[0+ps*0] = beta;
-	w1 = pD[1+ps*0];
-	w2 = pD[2+ps*0];
-	w3 = pD[3+ps*0];
-	//
-	pD[0+ps*1] *= tmp;
-	w1 += pD[1+ps*1] * pD[0+ps*1];
-	w2 += pD[2+ps*1] * pD[0+ps*1];
-	w3 += pD[3+ps*1] * pD[0+ps*1];
-	//
-	pD[0+ps*2] *= tmp;
-	w1 += pD[1+ps*2] * pD[0+ps*2];
-	w2 += pD[2+ps*2] * pD[0+ps*2];
-	w3 += pD[3+ps*2] * pD[0+ps*2];
-	//
-	pD[0+ps*3] *= tmp;
-	w1 += pD[1+ps*3] * pD[0+ps*3];
-	w2 += pD[2+ps*3] * pD[0+ps*3];
-	w3 += pD[3+ps*3] * pD[0+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[0+ps*ii] *= tmp;
-		w1 += pD[1+ps*ii] * pD[0+ps*ii];
-		w2 += pD[2+ps*ii] * pD[0+ps*ii];
-		w3 += pD[3+ps*ii] * pD[0+ps*ii];
-		}
-	//
-	w1 = - dD[0] * w1;
-	w2 = - dD[0] * w2;
-	w3 = - dD[0] * w3;
-	//
-	pD[1+ps*0] += w1;
-	pD[2+ps*0] += w2;
-	pD[3+ps*0] += w3;
-	//
-	pD[1+ps*1] += w1 * pD[0+ps*1];
-	pD[2+ps*1] += w2 * pD[0+ps*1];
-	pD[3+ps*1] += w3 * pD[0+ps*1];
-	//
-	pD[1+ps*2] += w1 * pD[0+ps*2];
-	pD[2+ps*2] += w2 * pD[0+ps*2];
-	pD[3+ps*2] += w3 * pD[0+ps*2];
-	beta = pD[1+ps*2] * pD[1+ps*2];
-	//
-	pD[1+ps*3] += w1 * pD[0+ps*3];
-	pD[2+ps*3] += w2 * pD[0+ps*3];
-	pD[3+ps*3] += w3 * pD[0+ps*3];
-	beta += pD[1+ps*3] * pD[1+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[1+ps*ii] += w1 * pD[0+ps*ii];
-		pD[2+ps*ii] += w2 * pD[0+ps*ii];
-		pD[3+ps*ii] += w3 * pD[0+ps*ii];
-		beta += pD[1+ps*ii] * pD[1+ps*ii];
-		}
-	// second column
-col2:
-	if(beta==0.0)
-		{
-		dD[1] = 0.0;
-		tmp = 0.0;
-		goto col3;
-		}
-	alpha = pD[1+ps*1];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[1] = (beta-alpha) / beta;
-	pT[1+ps*1] = - dD[1];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[1+ps*1] = beta;
-	w0 = pD[0+ps*1]; //
-	w2 = pD[2+ps*1];
-	w3 = pD[3+ps*1];
-	//
-	pD[1+ps*2] *= tmp;
-	w0 += pD[0+ps*2] * pD[1+ps*2]; //
-	w2 += pD[2+ps*2] * pD[1+ps*2];
-	w3 += pD[3+ps*2] * pD[1+ps*2];
-	//
-	pD[1+ps*3] *= tmp;
-	w0 += pD[0+ps*3] * pD[1+ps*3]; //
-	w2 += pD[2+ps*3] * pD[1+ps*3];
-	w3 += pD[3+ps*3] * pD[1+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[1+ps*ii] *= tmp;
-		w0 += pD[0+ps*ii] * pD[1+ps*ii]; //
-		w2 += pD[2+ps*ii] * pD[1+ps*ii];
-		w3 += pD[3+ps*ii] * pD[1+ps*ii];
-		}
-	//
-	pT[0+ps*1] = - dD[1] * (w0*pT[0+ps*0]);
-	w2 = - dD[1] * w2;
-	w3 = - dD[1] * w3;
-	//
-	pD[2+ps*1] += w2;
-	pD[3+ps*1] += w3;
-	//
-	pD[2+ps*2] += w2 * pD[1+ps*2];
-	pD[3+ps*2] += w3 * pD[1+ps*2];
-	//
-	pD[2+ps*3] += w2 * pD[1+ps*3];
-	pD[3+ps*3] += w3 * pD[1+ps*3];
-	beta = pD[2+ps*3] * pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[2+ps*ii] += w2 * pD[1+ps*ii];
-		pD[3+ps*ii] += w3 * pD[1+ps*ii];
-		beta += pD[2+ps*ii] * pD[2+ps*ii];
-		}
-	// third column
-col3:
-	if(beta==0.0)
-		{
-		dD[2] = 0.0;
-		tmp = 0.0;
-		goto col4;
-		}
-	alpha = pD[2+ps*2];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[2] = (beta-alpha) / beta;
-	pT[2+ps*2] = - dD[2];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[2+ps*2] = beta;
-	w0 = pD[0+ps*2];
-	w1 = pD[1+ps*2];
-	w3 = pD[3+ps*2];
-	//
-	pD[2+ps*3] *= tmp;
-	w0 += pD[0+ps*3] * pD[2+ps*3];
-	w1 += pD[1+ps*3] * pD[2+ps*3];
-	w3 += pD[3+ps*3] * pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[2+ps*ii] *= tmp;
-		w0 += pD[0+ps*ii] * pD[2+ps*ii];
-		w1 += pD[1+ps*ii] * pD[2+ps*ii];
-		w3 += pD[3+ps*ii] * pD[2+ps*ii];
-		}
-	//
-	pT[0+ps*2] = - dD[2] * (w0*pT[0+ps*0] + w1*pT[0+ps*1]);
-	pT[1+ps*2] = - dD[2] * (w1*pT[1+ps*1]);
-	w3 = - dD[2] * w3;
-//printf("\n%f %f %f\n", pT[0+ps*2], pT[1+ps*2], w3);
-//return;
-	//
-	pD[3+ps*2] += w3;
-	//
-	pD[3+ps*3] += w3 * pD[2+ps*3];
-	//
-	beta = 0.0;
-	for(ii=4; ii<n; ii++)
-		{
-		pD[3+ps*ii] += w3 * pD[2+ps*ii];
-		beta += pD[3+ps*ii] * pD[3+ps*ii];
-		}
-	// fourth column
-col4:
-	if(beta==0.0)
-		{
-		dD[3] = 0.0;
-		tmp = 0.0;
-		return;
-		}
-	alpha = pD[3+ps*3];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[3] = (beta-alpha) / beta;
-	pT[3+ps*3] = - dD[3];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[3+ps*3] = beta;
-	w0 =  pD[0+ps*3];
-	w1 =  pD[1+ps*3];
-	w2 =  pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[3+ps*ii] *= tmp;
-		w0 += pD[0+ps*ii] * pD[3+ps*ii];
-		w1 += pD[1+ps*ii] * pD[3+ps*ii];
-		w2 += pD[2+ps*ii] * pD[3+ps*ii];
-		}
-	//
-	pT[0+ps*3] = - dD[3] * (w0*pT[0+ps*0] + w1*pT[0+ps*1] + w2*pT[0+ps*2]);
-	pT[1+ps*3] = - dD[3] * (w1*pT[1+ps*1] + w2*pT[1+ps*2]);
-	pT[2+ps*3] = - dD[3] * (w2*pT[2+ps*2]);
-	return;
-	}
-
-
-
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_dgemm_12x4_lib4.S b/third_party/blasfeo/kernel/avx2/kernel_dgemm_12x4_lib4.S
deleted file mode 100644
index 766cb92..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_dgemm_12x4_lib4.S
+++ /dev/null
@@ -1,15536 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nt_12x4_lib4, @function
-inner_kernel_dgemm_add_nt_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nt_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nt_12x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nt_12x4_lib4:
-#endif
-#endif
-	
-// broadcast scheme
-#if 1
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm13 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm14 // A1[0]
-	vmovapd 0(%r11, %r12, 2), %ymm15 // A1[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	subl	$4, %r10d
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 1
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 2
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			96(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			96(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			96(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 3
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	120(%r13), %ymm12
-	addq	$128, %r13
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			0(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			0(%r11, %r12, 2), %ymm15 // A1
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	subl	$4, %r10d
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 1
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 2
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			96(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			96(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			96(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 3
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	120(%r13), %ymm12
-	addq	$128, %r13
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-//	vmovapd			0(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-//	vmovapd			0(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-//	vmovapd			0(%r11, %r12, 2), %ymm15 // A1
-
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd			0(%r11), %ymm13 // A0[0]
-	vmovapd 		0(%r11, %r12, 1), %ymm14 // A1[0]
-	vmovapd 		0(%r11, %r12, 2), %ymm15 // A2[0]
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	addq	$32, %r11
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	subl	$1, %r10d
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	addq	$32, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-// shuffle scheme
-#else
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-	vmovapd 0(%r11), %ymm12 // A0[0]
-	vmovapd 0(%r13), %ymm15 // B[0]
-	vmovapd 0(%r11, %r12, 1), %ymm13 // A1[0]
-	vmovapd 0(%r11, %r12, 2), %ymm14 // A2[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	subl		$4, %r10d
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-
-	vshufpd		$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-
-	vperm2f128	$0x1, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-	vshufpd		$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vmovapd		32(%r11), %ymm12 // A0[4]
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-	vmovapd		32(%r11, %r12, 1), %ymm13 // A1[4]
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		32(%r13), %ymm15 // B[4]
-	vmovapd		32(%r11, %r12, 2), %ymm14 // A2[4]
-
-	// unroll 1
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-
-	vperm2f128 	$0x1, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vmovapd 	64(%r11), %ymm12 // A0[8]
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-	vmovapd 	64(%r11, %r12, 1), %ymm13 // A1[8]
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd 	64(%r13), %ymm15 // B[8]
-	vmovapd 	64(%r11, %r12, 2), %ymm14 // A2[8]
-
-
-	// unroll 2
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-
-	vperm2f128 	$0x1, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vmovapd 	96(%r11), %ymm12 // A0[12]
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-	vmovapd 	96(%r11, %r12, 1), %ymm13 // A1[12]
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd 	96(%r13), %ymm15 // B[12]
-	vmovapd 	96(%r11, %r12, 2), %ymm14 // A2[12]
-
-
-	// unroll 3
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	addq		$128, %r11
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	addq		$128, %r13
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-
-	vperm2f128 $0x1, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vmovapd 	0(%r11), %ymm12 // A0[0]
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-	vmovapd 	0(%r11, %r12, 1), %ymm13 // A1[0]
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd 	0(%r13), %ymm15 // B[0]
-	vmovapd 	0(%r11, %r12, 2), %ymm14 // A2[0]
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	subl		$4, %r10d
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-
-	vshufpd		$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-
-	vperm2f128	$0x1, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-	vshufpd		$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vmovapd		32(%r11), %ymm12 // A0[4]
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-	vmovapd		32(%r11, %r12, 1), %ymm13 // A1[4]
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		32(%r13), %ymm15 // B[4]
-	vmovapd		32(%r11, %r12, 2), %ymm14 // A2[4]
-
-	// unroll 1
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-
-	vperm2f128 	$0x1, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vmovapd 	64(%r11), %ymm12 // A0[8]
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-	vmovapd 	64(%r11, %r12, 1), %ymm13 // A1[8]
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd 	64(%r13), %ymm15 // B[8]
-	vmovapd 	64(%r11, %r12, 2), %ymm14 // A2[8]
-
-
-	// unroll 2
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-
-	vperm2f128 	$0x1, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vmovapd 	96(%r11), %ymm12 // A0[12]
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-	vmovapd 	96(%r11, %r12, 1), %ymm13 // A1[12]
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd 	96(%r13), %ymm15 // B[12]
-	vmovapd 	96(%r11, %r12, 2), %ymm14 // A2[12]
-
-
-	// unroll 3
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	addq		$128, %r11
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	addq		$128, %r13
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-
-	vperm2f128 $0x1, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-//	cmpl		$4, %r10d
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-//	vmovapd 	0(%r11), %ymm12 // A0[0]
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-//	vmovapd 	0(%r11, %r12, 1), %ymm13 // A1[0]
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-//	vmovapd 	0(%r13), %ymm15 // B[0]
-//	vmovapd 	0(%r11, %r12, 2), %ymm14 // A2[0]
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd 	0(%r11), %ymm12 // A0[4]
-	vmovapd 	0(%r11, %r12, 1), %ymm13 // A1[4]
-	vmovapd 	0(%r13), %ymm15 // B[4]
-	vmovapd 	0(%r11, %r12, 2), %ymm14 // A2[4]
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	addq		$32, %r11
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-	addq		$32, %r13
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-
-	vperm2f128 	$0x1, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	subl		$1, %r10d
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-	vshufpd 	$0x5, %ymm15, %ymm15, %ymm15
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-
-
-	cmpl		$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#endif
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nt_12x4_lib4, .-inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nt_12x4_lib4, @function
-inner_kernel_dgemm_sub_nt_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nt_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nt_12x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nt_12x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm13 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm14 // A1[0]
-	vmovapd 0(%r11, %r12, 2), %ymm15 // A1[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-	subl	$4, %r10d
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 1
-	vbroadcastsd	32(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 2
-	vbroadcastsd	64(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			96(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			96(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			96(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 3
-	vbroadcastsd	96(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	120(%r13), %ymm12
-	addq	$128, %r13
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			0(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			0(%r11, %r12, 2), %ymm15 // A1
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-	subl	$4, %r10d
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 1
-	vbroadcastsd	32(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 2
-	vbroadcastsd	64(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			96(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			96(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			96(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 3
-	vbroadcastsd	96(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	120(%r13), %ymm12
-	addq	$128, %r13
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-//	vmovapd			0(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-//	vmovapd			0(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-//	vmovapd			0(%r11, %r12, 2), %ymm15 // A1
-
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd			0(%r11), %ymm13 // A0[0]
-	vmovapd 		0(%r11, %r12, 1), %ymm14 // A1[0]
-	vmovapd 		0(%r11, %r12, 2), %ymm15 // A2[0]
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-	addq	$32, %r11
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-	subl	$1, %r10d
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	addq	$32, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nt_12x4_lib4, .-inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// rbx   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- k
-// r11   <- A+4*sda*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// rbx   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NN_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nn_12x4_lib4, @function
-inner_kernel_dgemm_add_nn_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nn_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nn_12x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nn_12x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm13 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm14 // A1[0]
-	vmovapd 0(%r11, %r12, 2), %ymm15 // A1[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	 0(%r13, %r14, 2) // software prefetch
-	prefetcht0	64(%r13, %r14, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	subl	$4, %r10d
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			96(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			96(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			96(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	120(%r13), %ymm12
-	addq	%r14, %r13
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			0(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			0(%r11, %r12, 2), %ymm15 // A1
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	subl	$4, %r10d
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vmovapd			96(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vmovapd			96(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	vmovapd			96(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	120(%r13), %ymm12
-	addq	%r14, %r13
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-//	vmovapd			0(%r11), %ymm13 // A0
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-//	vmovapd			0(%r11, %r12, 1), %ymm14 // A1
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-//	vmovapd			0(%r11, %r12, 2), %ymm15 // A1
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd			0(%r11), %ymm13 // A0[0]
-	vmovapd 		0(%r11, %r12, 1), %ymm14 // A1[0]
-	vmovapd 		0(%r11, %r12, 2), %ymm15 // A2[0]
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	addq	$32, %r11
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	subl	$1, %r10d
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	addq	$8, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nn_12x4_lib4, .-inner_kernel_dgemm_add_nn_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// rbx   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- k
-// r11   <- A+4*sda*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// rbx   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nn_12x4_lib4, @function
-inner_kernel_dgemm_sub_nn_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nn_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nn_12x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nn_12x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm13 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm14 // A1[0]
-	vmovapd 0(%r11, %r12, 2), %ymm15 // A1[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	 0(%r13, %r14, 2) // software prefetch
-	prefetcht0	64(%r13, %r14, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-	subl	$4, %r10d
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			96(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			96(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			96(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	120(%r13), %ymm12
-	addq	%r14, %r13
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			0(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			0(%r11, %r12, 2), %ymm15 // A1
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-	subl	$4, %r10d
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vmovapd			96(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vmovapd			96(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	vmovapd			96(%r11, %r12, 2), %ymm15 // A1
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	120(%r13), %ymm12
-	addq	%r14, %r13
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-//	vmovapd			0(%r11), %ymm13 // A0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-//	vmovapd			0(%r11, %r12, 1), %ymm14 // A1
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-//	vmovapd			0(%r11, %r12, 2), %ymm15 // A1
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd			0(%r11), %ymm13 // A0[0]
-	vmovapd 		0(%r11, %r12, 1), %ymm14 // A1[0]
-	vmovapd 		0(%r11, %r12, 2), %ymm15 // A2[0]
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vfnmadd231pd	%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vfnmadd231pd	%ymm15, %ymm12, %ymm9
-	addq	$32, %r11
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vfnmadd231pd	%ymm15, %ymm12, %ymm10
-	subl	$1, %r10d
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	vfnmadd231pd	%ymm15, %ymm12, %ymm11
-	addq	$8, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nn_12x4_lib4, .-inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NN_4X12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nn_4x12_lib4, @function
-inner_kernel_dgemm_add_nn_4x12_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nn_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nn_4x12_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nn_4x12_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 		0(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r12, %r13, 2) // software prefetch
-	prefetcht0	64(%r12, %r13, 2) // software prefetch
-	prefetcht0	128(%r12, %r13, 2) // software prefetch
-	prefetcht0	192(%r12, %r13, 2) // software prefetch
-	prefetcht0	256(%r12, %r13, 2) // software prefetch
-	prefetcht0	320(%r12, %r13, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm7
-	vbroadcastsd	256(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm8
-	vbroadcastsd	288(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm9
-	vbroadcastsd	320(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm10
-	vbroadcastsd	352(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm11
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm0
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm1
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm2
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm3
-	vbroadcastsd	136(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vbroadcastsd	168(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	200(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	232(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vbroadcastsd	264(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm8
-	vbroadcastsd	296(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm9
-	vbroadcastsd	328(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm10
-	vbroadcastsd	360(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm11
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vbroadcastsd	144(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm4
-	vbroadcastsd	176(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm5
-	vbroadcastsd	208(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm6
-	vbroadcastsd	240(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm7
-	vbroadcastsd	272(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm8
-	vbroadcastsd	304(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm9
-	vbroadcastsd	336(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm10
-	vbroadcastsd	368(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm11
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm1
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm2
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm3
-	vbroadcastsd	152(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vbroadcastsd	184(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	216(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	248(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vbroadcastsd	280(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm8
-	vbroadcastsd	312(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm9
-	vbroadcastsd	344(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm10
-	vbroadcastsd	376(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm11
-	addq	%r13, %r12
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm7
-	vbroadcastsd	256(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm8
-	vbroadcastsd	288(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm9
-	vbroadcastsd	320(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm10
-	vbroadcastsd	352(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm11
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm0
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm1
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm2
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm3
-	vbroadcastsd	136(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vbroadcastsd	168(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	200(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	232(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vbroadcastsd	264(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm8
-	vbroadcastsd	296(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm9
-	vbroadcastsd	328(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm10
-	vbroadcastsd	360(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm11
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vbroadcastsd	144(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm4
-	vbroadcastsd	176(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm5
-	vbroadcastsd	208(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm6
-	vbroadcastsd	240(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm7
-	vbroadcastsd	272(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm8
-	vbroadcastsd	304(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm9
-	vbroadcastsd	336(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm10
-	vbroadcastsd	368(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm11
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm0
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm1
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm2
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm3
-	vbroadcastsd	152(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vbroadcastsd	184(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	216(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	248(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vbroadcastsd	280(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm8
-	vbroadcastsd	312(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm9
-	vbroadcastsd	344(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm10
-	vbroadcastsd	376(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm11
-	addq	%r13, %r12
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm7
-	vbroadcastsd	256(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm8
-	vbroadcastsd	288(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm9
-	vbroadcastsd	320(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm10
-	vbroadcastsd	352(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm11
-
-	addq	$32, %r11
-	addq	$8, %r12
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nn_4x12_lib4, .-inner_kernel_dgemm_add_nn_4x12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- B
-// r12   <- C
-// r13   <- 32*sdc
-// ymm0  <- [a00 a10 a20 a30]
-// ymm1  <- [a01 a11 a21 a31]
-// ymm2  <- [a02 a12 a22 a32]
-// ymm3  <- [a03 a13 a23 a33]
-// ymm4  <-
-// ymm5  <-
-// ymm6  <-
-// ymm7  <-
-// ymm8  <-
-// ymm9  <-
-// ymm10 <-
-// ymm11 <-
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- ?
-// r12   <- ?
-// r13   <- 32*sdc
-// ymm0  <- [a00 a10 a20 a30]
-// ymm1  <- [a01 a11 a21 a31]
-// ymm2  <- [a02 a12 a22 a32]
-// ymm3  <- [a03 a13 a23 a33]
-// ymm4  <-
-// ymm5  <-
-// ymm6  <-
-// ymm7  <-
-// ymm8  <-
-// ymm9  <-
-// ymm10 <-
-// ymm11 <-
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEBP_ADD_NN_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgebp_add_nn_12x4_lib4, @function
-inner_kernel_dgebp_add_nn_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgebp_add_nn_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgebp_add_nn_12x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgebp_add_nn_12x4_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r12), %ymm12
-	vmovapd			0(%r12, %r13, 1), %ymm14
-	vmovapd			0(%r12, %r13, 2), %ymm15
-	vbroadcastsd	0(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vfmadd231pd		%ymm8, %ymm13, %ymm15
-	vbroadcastsd	8(%r11), %ymm13
-	subl	$4, %r10d
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vfmadd231pd		%ymm9, %ymm13, %ymm15
-	vbroadcastsd	16(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vfmadd231pd		%ymm10, %ymm13, %ymm15
-	vbroadcastsd	24(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vfmadd231pd		%ymm7, %ymm13, %ymm14
-	vfmadd231pd		%ymm11, %ymm13, %ymm15
-	vmovapd			%ymm12, 0(%r12)
-	vmovapd			%ymm14, 0(%r12, %r13, 1)
-	vmovapd			%ymm15, 0(%r12, %r13, 2)
-
-	vmovapd			32(%r12), %ymm12
-	vmovapd			32(%r12, %r13, 1), %ymm14
-	vmovapd			32(%r12, %r13, 2), %ymm15
-	vbroadcastsd	32(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vfmadd231pd		%ymm8, %ymm13, %ymm15
-	vbroadcastsd	40(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vfmadd231pd		%ymm9, %ymm13, %ymm15
-	vbroadcastsd	48(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vfmadd231pd		%ymm10, %ymm13, %ymm15
-	vbroadcastsd	56(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vfmadd231pd		%ymm7, %ymm13, %ymm14
-	vfmadd231pd		%ymm11, %ymm13, %ymm15
-	vmovapd			%ymm12, 32(%r12)
-	vmovapd			%ymm14, 32(%r12, %r13, 1)
-	vmovapd			%ymm15, 32(%r12, %r13, 2)
-
-	vmovapd			64(%r12), %ymm12
-	vmovapd			64(%r12, %r13, 1), %ymm14
-	vmovapd			64(%r12, %r13, 2), %ymm15
-	vbroadcastsd	64(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vfmadd231pd		%ymm8, %ymm13, %ymm15
-	vbroadcastsd	72(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vfmadd231pd		%ymm9, %ymm13, %ymm15
-	vbroadcastsd	80(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vfmadd231pd		%ymm10, %ymm13, %ymm15
-	vbroadcastsd	88(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vfmadd231pd		%ymm7, %ymm13, %ymm14
-	vfmadd231pd		%ymm11, %ymm13, %ymm15
-	vmovapd			%ymm12, 64(%r12)
-	vmovapd			%ymm14, 64(%r12, %r13, 1)
-	vmovapd			%ymm15, 64(%r12, %r13, 2)
-
-	vmovapd			96(%r12), %ymm12
-	vmovapd			96(%r12, %r13, 1), %ymm14
-	vmovapd			96(%r12, %r13, 2), %ymm15
-	vbroadcastsd	96(%r11), %ymm13
-	addq	$128, %r11
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vfmadd231pd		%ymm8, %ymm13, %ymm15
-	vbroadcastsd	-24(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vfmadd231pd		%ymm9, %ymm13, %ymm15
-	vbroadcastsd	-16(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vfmadd231pd		%ymm10, %ymm13, %ymm15
-	vbroadcastsd	-8(%r11), %ymm13
-	addq	$128, %r12
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vfmadd231pd		%ymm7, %ymm13, %ymm14
-	vfmadd231pd		%ymm11, %ymm13, %ymm15
-	vmovapd			%ymm12, -32(%r12)
-	vmovapd			%ymm14, -32(%r12, %r13, 1)
-	vmovapd			%ymm15, -32(%r12, %r13, 2)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r12), %ymm12
-	vmovapd			0(%r12, %r13, 1), %ymm14
-	vmovapd			0(%r12, %r13, 2), %ymm15
-	vbroadcastsd	0(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vfmadd231pd		%ymm8, %ymm13, %ymm15
-	vbroadcastsd	8(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vfmadd231pd		%ymm9, %ymm13, %ymm15
-	vbroadcastsd	16(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vfmadd231pd		%ymm10, %ymm13, %ymm15
-	vbroadcastsd	24(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vfmadd231pd		%ymm7, %ymm13, %ymm14
-	vfmadd231pd		%ymm11, %ymm13, %ymm15
-	vmovapd			%ymm12, 0(%r12)
-	vmovapd			%ymm14, 0(%r12, %r13, 1)
-	vmovapd			%ymm15, 0(%r12, %r13, 2)
-
-	addq	$32, %r11
-	addq	$32, %r12
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgebp_add_nn_12x4_lib4, .-inner_kernel_dgebp_add_nn_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 32*sdb
-// r14   <- C
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm4  <-
-// ymm5  <-
-// ymm6  <-
-// ymm7  <-
-// ymm8  <-
-// ymm9  <-
-// ymm10 <-
-// ymm11 <-
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A
-// r12   <- B+?
-// r13   <- 32*sdb
-// r14   <- C+?
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm4  <-
-// ymm5  <-
-// ymm6  <-
-// ymm7  <-
-// ymm8  <-
-// ymm9  <-
-// ymm10 <-
-// ymm11 <-
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEBP_ADD_NN_4X12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgebp_add_nn_4x12_lib4, @function
-inner_kernel_dgebp_add_nn_4x12_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgebp_add_nn_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgebp_add_nn_4x12_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgebp_add_nn_4x12_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	cmpl	$11, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	// load block from C
-	vmovapd	0(%r14), %ymm0
-	vmovapd	32(%r14), %ymm1
-	vmovapd	64(%r14), %ymm2
-	vmovapd	96(%r14), %ymm3
-	vmovapd	128(%r14), %ymm4
-	vmovapd	160(%r14), %ymm5
-	vmovapd	192(%r14), %ymm6
-	vmovapd	224(%r14), %ymm7
-	vmovapd	256(%r14), %ymm8
-	vmovapd	288(%r14), %ymm9
-	vmovapd	320(%r14), %ymm10
-	vmovapd	352(%r14), %ymm11
-
-	// 0
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 1
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	136(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	168(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	200(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	232(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	264(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	296(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	328(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 2
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	144(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	176(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	208(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	240(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	272(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	304(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	336(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	368(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 3
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	152(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	184(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	216(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	248(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	280(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	312(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	344(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	376(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 4
-	vmovapd			128(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 5
-	vmovapd			160(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	136(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	168(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	200(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	232(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	264(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	296(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	328(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 6
-	vmovapd			192(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	144(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	176(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	208(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	240(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	272(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	304(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	336(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	368(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 7
-	vmovapd			224(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	152(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	184(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	216(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	248(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	280(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	312(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	344(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	376(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 8
-	vmovapd			256(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 9
-	vmovapd			288(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	136(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	168(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	200(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	232(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	264(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	296(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	328(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 10
-	vmovapd			320(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	144(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	176(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	208(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	240(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	272(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	304(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	336(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	368(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// 11
-	vmovapd			352(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	152(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	184(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	216(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	248(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	280(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	312(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	344(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	376(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	// store block to C
-	vmovapd	%ymm0, 0(%r14)
-	vmovapd	%ymm1, 32(%r14)
-	vmovapd	%ymm2, 64(%r14)
-	vmovapd	%ymm3, 96(%r14)
-	vmovapd	%ymm4, 128(%r14)
-	vmovapd	%ymm5, 160(%r14)
-	vmovapd	%ymm6, 192(%r14)
-	vmovapd	%ymm7, 224(%r14)
-	vmovapd	%ymm8, 256(%r14)
-	vmovapd	%ymm9, 288(%r14)
-	vmovapd	%ymm10, 320(%r14)
-	vmovapd	%ymm11, 352(%r14)
-
-	addq	$384, %r12
-	addq	$384, %r14
-	subl	$12, %r10d
-
-	cmpl	$11, %r10d
-	jg		1b // main loop
-
-2:
-	cmpl	$3, %r10d
-	jle		2f // return
-
-	// cleanup loop
-1:
-	// load block from C
-	vmovapd	0(%r14), %ymm0
-	vmovapd	32(%r14), %ymm1
-	vmovapd	64(%r14), %ymm2
-	vmovapd	96(%r14), %ymm3
-
-	// 0
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 1
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 2
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 3
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 4
-	vmovapd			128(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 5
-	vmovapd			160(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 6
-	vmovapd			192(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 7
-	vmovapd			224(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 8
-	vmovapd			256(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 9
-	vmovapd			288(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	72(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 10
-	vmovapd			320(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	48(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	112(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// 11
-	vmovapd			352(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	56(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	88(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	// store block to C
-	vmovapd	%ymm0, 0(%r14)
-	vmovapd	%ymm1, 32(%r14)
-	vmovapd	%ymm2, 64(%r14)
-	vmovapd	%ymm3, 96(%r14)
-
-	addq	$128, %r12
-	addq	$128, %r14
-	subl	$4, %r10d
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-2:
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-1:
-	// load block from C
-	vmovapd	0(%r14), %ymm0
-
-	// 0
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 1
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 2
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 3
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 4
-	vmovapd			128(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 5
-	vmovapd			160(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 6
-	vmovapd			192(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 7
-	vmovapd			224(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 8
-	vmovapd			256(%r11), %ymm12
-	vbroadcastsd	0(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 9
-	vmovapd			288(%r11), %ymm12
-	vbroadcastsd	8(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 10
-	vmovapd			320(%r11), %ymm12
-	vbroadcastsd	16(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// 11
-	vmovapd			352(%r11), %ymm12
-	vbroadcastsd	24(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-
-	// store block to C
-	vmovapd	%ymm0, 0(%r14)
-
-	addq	$32, %r12
-	addq	$32, %r14
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		1b // main loop
-
-	// return
-0:
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgebp_add_nn_4x12_lib4, .-inner_kernel_dgebp_add_nn_4x12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGEMM_ADD_NN_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemm_add_nn_12x4_lib4, @function
-inner_edge_dgemm_add_nn_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemm_add_nn_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgemm_add_nn_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgemm_add_nn_12x4_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r15d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$4, %ebx
-	subl			%r15d, %ebx // 4-offsetB
-	cmpl			%r10d, %ebx
-//	jle				0f
-//	movl			%r10d, %ebx // kend=min(k,4-offsetB)
-//0:
-	cmovgl			%r10d, %ebx // kend=min(k,4-offsetB)
-
-	movl			%r15d, %eax
-	sall			$3, %eax // offsetB*sizeof(double)
-	addq			%rax, %r13 // B+offsetB*sizeof(double)
-
-	movq			%r11, %rax // A1 <- A0
-	addq			%r12, %rax // A1 <- A0 + 4*sda*sizeof(double)
-
-	movq			%rax, %rbp // A2 <- A1
-	addq			%r12, %rbp // A2 <- A1 + 4*sda*sizeof(double)
-
-1:
-	vmovapd			0(%r11), %ymm12 // A0[0]
-	vmovapd			0(%rax), %ymm14 // A1[0]
-	vmovapd			0(%rbp), %ymm15 // A2[0]
-	vbroadcastsd	0(%r13), %ymm13 // B[0]
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vfmadd231pd		%ymm14, %ymm13, %ymm4
-	vfmadd231pd		%ymm15, %ymm13, %ymm8
-	vbroadcastsd	32(%r13), %ymm13 // B[1]
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vfmadd231pd		%ymm14, %ymm13, %ymm5
-	vfmadd231pd		%ymm15, %ymm13, %ymm9
-	vbroadcastsd	64(%r13), %ymm13 // B[2]
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vfmadd231pd		%ymm14, %ymm13, %ymm6
-	vfmadd231pd		%ymm15, %ymm13, %ymm10
-	vbroadcastsd	96(%r13), %ymm13 // B[3]
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vfmadd231pd		%ymm14, %ymm13, %ymm7
-	vfmadd231pd		%ymm15, %ymm13, %ymm11
-
-	subl			$1, %r10d // k-1
-	subl			$1, %ebx // kend-1
-	addq			$32, %r11 // A0+1*bs*sizeof(float)
-	addq			$32, %rax // A1+1*bs*sizeof(float)
-	addq			$32, %rbp // A2+1*bs*sizeof(float)
-	addq			$8, %r13 // B+1*sizeof(float)
-
-	cmpl			$0, %ebx
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r14, %r13
-	subq			$32, %r13 // B+bs*(sdb-1)*sizeof(double)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemm_add_nn_12x4_lib4, .-inner_edge_dgemm_add_nn_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGEMM_ADD_NN_4X12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemm_add_nn_4x12_lib4, @function
-inner_edge_dgemm_add_nn_4x12_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemm_add_nn_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgemm_add_nn_4x12_lib4; .scl 2; .type 32; .endef
-inner_edge_dgemm_add_nn_4x12_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r14d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$4, %r15d
-	subl			%r14d, %r15d // 4-offsetB
-	cmpl			%r10d, %r15d
-//	jle				0f
-//	movl			%r10d, %r15d // kend=min(k,4-offsetB)
-//0:
-	cmovgl			%r10d, %r15d // kend=min(k,4-offsetB)
-
-	movl			%r14d, %eax
-	sall			$3, %eax // offsetB*sizeof(double)
-	addq			%rax, %r12 // B+offsetB*sizeof(double)
-
-1:
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-
-	subl			$1, %r10d // k-1
-	subl			$1, %r15d // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$8, %r12 // B+1*sizeof(float)
-
-	cmpl			$0, %r15d
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r13, %r12
-	subq			$32, %r12 // B+bs*(sdb-1)*sizeof(double)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemm_add_nn_4x12_lib4, .-inner_edge_dgemm_add_nn_4x12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10   <- A
-// r11   <- 4*sda*sizeof(double)
-// r12   <- B
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- A+4*4*sizeof(double)
-// r11   <- 4*sda*sizeof(double)
-// r12   <- B+4*4*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_12x4_lib4, @function
-inner_edge_dtrmm_nt_ru_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_12x4_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r15 // A1 <- A0
-	addq	%r11, %r15 // A1 <- A0 + 4*sda*sizeof(double)
-
-	movq	%r15, %r14 // A2 <- A1
-	addq	%r11, %r14 // A2 <- A1 + 4*sda*sizeof(double)
-
-	vbroadcastsd	0(%r12), %ymm12
-	vmovapd			0(%r10), %ymm13
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm14
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			0(%r14), %ymm15
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vbroadcastsd	32(%r12), %ymm12
-	vmovapd			32(%r10), %ymm13
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r15), %ymm14
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			32(%r14), %ymm15
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	40(%r12), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vbroadcastsd	64(%r12), %ymm12
-	vmovapd			64(%r10), %ymm13
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			64(%r15), %ymm14
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			64(%r14), %ymm15
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	72(%r12), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	80(%r12), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vbroadcastsd	96(%r12), %ymm12
-	vmovapd			96(%r10), %ymm13
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			96(%r15), %ymm14
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			96(%r14), %ymm15
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	104(%r12), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	112(%r12), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	120(%r12), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	addq			$128, %r10
-	addq			$128, %r12
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_12x4_lib4, .-inner_edge_dtrmm_nt_ru_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- max(k-4,0)
-// r11   <- A+4*4*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*4*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_12X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_12x4_vs_lib4, @function
-inner_edge_dtrmm_nt_ru_12x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_12x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_12x4_vs_lib4:
-#endif
-#endif
-	
-	movq	%r11, %r15 // A1 <- A0
-	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(double)
-
-	movq	%r15, %r14 // A2 <- A1
-	addq	%r12, %r14 // A2 <- A1 + 4*sda*sizeof(double)
-
-	vbroadcastsd	0(%r13), %ymm12
-	addq			$32, %r13
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm13
-	addq			$32, %r11
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm14
-	addq			$32, %r15
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			0(%r14), %ymm15
-	addq			$32, %r14
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm13
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm14
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			0(%r14), %ymm15
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	addq			$32, %r11
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	addq			$32, %r13
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	addq			$32, %r15
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	addq			$32, %r14
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm13
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm14
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			0(%r14), %ymm15
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	addq			$32, %r11
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	addq			$32, %r13
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	addq			$32, %r15
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	addq			$32, %r14
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm13
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm14
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			0(%r14), %ymm15
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	addq			$32, %r11
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	addq			$32, %r13
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq			$32, %r15
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-	addq			$32, %r14
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_12x4_vs_lib4, .-inner_edge_dtrmm_nt_ru_12x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A0
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d40 d50 d60 d70]
-// ymm9  <- [d41 d51 d61 d71]
-// ymm10  <- [d42 d52 d62 d72]
-// ymm11 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_12x4_lib4, @function
-inner_edge_dtrmm_nn_rl_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_12x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r15d
-	jg		0f
-
-	// offB==0
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vmovapd			32(%r11), %ymm13
-	vmovapd			32(%r11, %r12, 1), %ymm14
-	vmovapd			32(%r11, %r12, 2), %ymm15
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vmovapd			64(%r11), %ymm13
-	vmovapd			64(%r11, %r12, 1), %ymm14
-	vmovapd			64(%r11, %r12, 2), %ymm15
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vmovapd			96(%r11), %ymm13
-	vmovapd			96(%r11, %r12, 1), %ymm14
-	vmovapd			96(%r11, %r12, 2), %ymm15
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	120(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A0+4*bs*sizeof(double)
-	addq			%r14, %r13 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-0:
-	cmpl	$1, %r15d
-	jg		1f
-
-	// offB==1
-
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vmovapd			32(%r11), %ymm13
-	vmovapd			32(%r11, %r12, 1), %ymm14
-	vmovapd			32(%r11, %r12, 2), %ymm15
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vmovapd			64(%r11), %ymm13
-	vmovapd			64(%r11, %r12, 1), %ymm14
-	vmovapd			64(%r11, %r12, 2), %ymm15
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	subl			$3, %r10d // k-3
-	addq			$96, %r11 // A0+3*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$8, %r13 // B+bs*sdb*sizeof(double)-1
-
-	jmp		3f
-
-1:
-	cmpl	$2, %r15d
-	jg		2f
-
-	// offB==2
-
-	addq			$16, %r13 // B+2*sizeof(double)
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	vmovapd			32(%r11), %ymm13
-	vmovapd			32(%r11, %r12, 1), %ymm14
-	vmovapd			32(%r11, %r12, 2), %ymm15
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	subl			$2, %r10d // k-2
-	addq			$64, %r11 // A0+2*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$16, %r13 // B+bs*sdb*sizeof(double)-2
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vmovapd			32(%r11), %ymm13
-	vmovapd			32(%r11, %r12, 1), %ymm14
-	vmovapd			32(%r11, %r12, 2), %ymm15
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	104(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	vmovapd			64(%r11), %ymm13
-	vmovapd			64(%r11, %r12, 1), %ymm14
-	vmovapd			64(%r11, %r12, 2), %ymm15
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	vmovapd			96(%r11), %ymm13
-	vmovapd			96(%r11, %r12, 1), %ymm14
-	vmovapd			96(%r11, %r12, 2), %ymm15
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	120(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A0+4*bs*sizeof(double)
-	addq			%r14, %r13 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-2:
-	// offB==3
-
-	addq			$24, %r13 // B+3*sizeof(double)
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-3
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	vmovapd			32(%r11), %ymm13
-	vmovapd			32(%r11, %r12, 1), %ymm14
-	vmovapd			32(%r11, %r12, 2), %ymm15
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	vmovapd			64(%r11), %ymm13
-	vmovapd			64(%r11, %r12, 1), %ymm14
-	vmovapd			64(%r11, %r12, 2), %ymm15
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	vmovapd			96(%r11), %ymm13
-	vmovapd			96(%r11, %r12, 1), %ymm14
-	vmovapd			96(%r11, %r12, 2), %ymm15
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	120(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A0+4*bs*sizeof(double)
-	addq			%r14, %r13 // B+bs*sdb*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_12x4_lib4, .-inner_edge_dtrmm_nn_rl_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A0
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_12X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_12x4_vs_lib4, @function
-inner_edge_dtrmm_nn_rl_12x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_12x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_12x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	cmpl			$0, %r15d
-	jg				0f // offB>0
-
-	// offB==0
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f // end
-
-0:
-	cmpl			$1, %r15d
-	jg				1f // offB>1
-
-	// offB==1
-
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f // end
-
-1:
-	cmpl			$2, %r15d
-	jg				2f // offB>2
-
-	// offB==2
-
-	addq			$16, %r13 // B+2*sizeof(double)
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	subl			$1, %r10d // k-2
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f
-
-2:
-	// offB==3
-
-	addq			$24, %r13 // B+3*sizeof(double)
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm13
-	vmovapd			0(%r11, %r12, 1), %ymm14
-	vmovapd			0(%r11, %r12, 2), %ymm15
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vfmadd231pd		%ymm15, %ymm12, %ymm8
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vfmadd231pd		%ymm15, %ymm12, %ymm9
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vfmadd231pd		%ymm15, %ymm12, %ymm10
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	vfmadd231pd		%ymm15, %ymm12, %ymm11
-
-	subl			$1, %r10d // k-4
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_12x4_vs_lib4, .-inner_edge_dtrmm_nn_rl_12x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend
-//
-// input arguments:
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_12x4_lib4, @function
-inner_blend_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_12x4_lib4; .scl 2; .type 32; .endef
-inner_blend_12x4_lib4:
-#endif
-#endif
-	
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm12
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm13
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm14
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm15
-
-	vblendpd	$0xc, %ymm14, %ymm12, %ymm0
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm2
-	vblendpd	$0xc, %ymm15, %ymm13, %ymm1
-	vblendpd	$0x3, %ymm15, %ymm13, %ymm3
-
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm12
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm13
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm14
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm15
-
-	vblendpd	$0xc, %ymm14, %ymm12, %ymm4
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm6
-	vblendpd	$0xc, %ymm15, %ymm13, %ymm5
-	vblendpd	$0x3, %ymm15, %ymm13, %ymm7
-
-
-	vblendpd	$0xa, %ymm9, %ymm8, %ymm12
-	vblendpd	$0x5, %ymm9, %ymm8, %ymm13
-	vblendpd	$0xa, %ymm11, %ymm10, %ymm14
-	vblendpd	$0x5, %ymm11, %ymm10, %ymm15
-
-	vblendpd	$0xc, %ymm14, %ymm12, %ymm8
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm10
-	vblendpd	$0xc, %ymm15, %ymm13, %ymm9
-	vblendpd	$0x3, %ymm15, %ymm13, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_12x4_lib4, .-inner_blend_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// transpose and scale for generic alpha and beta
-//
-// input arguments:
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_12x4_lib4, @function
-inner_tran_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_tran_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_12x4_lib4; .scl 2; .type 32; .endef
-inner_tran_12x4_lib4:
-#endif
-#endif
-		
-	vunpcklpd	%ymm1, %ymm0, %ymm12
-	vunpckhpd	%ymm1, %ymm0, %ymm13
-	vunpcklpd	%ymm3, %ymm2, %ymm14
-	vunpckhpd	%ymm3, %ymm2, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm0
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm2
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm3
-
-	vunpcklpd	%ymm5, %ymm4, %ymm12
-	vunpckhpd	%ymm5, %ymm4, %ymm13
-	vunpcklpd	%ymm7, %ymm6, %ymm14
-	vunpckhpd	%ymm7, %ymm6, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm4
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm6
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm5
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm7
-
-	vunpcklpd	%ymm9, %ymm8, %ymm12
-	vunpckhpd	%ymm9, %ymm8, %ymm13
-	vunpcklpd	%ymm11, %ymm10, %ymm14
-	vunpckhpd	%ymm11, %ymm10, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm8
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm10
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm9
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_12x4_lib4, .-inner_tran_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 d63 db3]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 d63 db3]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_12x4_lib4, @function
-inner_scale_11_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_11_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_12x4_lib4; .scl 2; .type 32; .endef
-inner_scale_11_12x4_lib4:
-#endif
-#endif
-	
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC05(%rip), %ymm14 // beta=1.0
-#else
-	vmovapd		LC05(%rip), %ymm14 // beta=1.0
-#endif
-
-	vmovapd		0(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		0(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		32(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		64(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		96(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	vmovapd		0(%r10, %r11, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		32(%r10, %r11, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		64(%r10, %r11, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		96(%r10, %r11, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_12x4_lib4, .-inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_12x4_lib4, @function
-inner_scale_ab_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_12x4_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_12x4_lib4:
-#endif
-#endif
-		
-	vbroadcastsd 0(%r10), %ymm15 // beta
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	vmulpd		%ymm8, %ymm15, %ymm8
-	vmulpd		%ymm9, %ymm15, %ymm9
-	vmulpd		%ymm10, %ymm15, %ymm10
-	vmulpd		%ymm11, %ymm15, %ymm11
-
-	movq	%r12, %r15 // C1 <- C0
-	addq	%r13, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	movq	%r15, %r14 // C2 <- C1
-	addq	%r13, %r14 // C2 <- C1 + 4*sdc*sizeof(double)
-
-	vbroadcastsd 0(%r11), %ymm14 // beta
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		0(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		32(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		64(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		96(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	vmovapd		0(%r14), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		32(%r14), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		64(%r14), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		96(%r14), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_12x4_lib4, .-inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0
-//
-// input arguments:
-// r10   <- &alpha
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- &alpha
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_12x4_lib4, @function
-inner_scale_a0_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_a0_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_12x4_lib4; .scl 2; .type 32; .endef
-inner_scale_a0_12x4_lib4:
-#endif
-#endif
-		
-	vbroadcastsd 0(%r10), %ymm15 // beta
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	vmulpd		%ymm8, %ymm15, %ymm8
-	vmulpd		%ymm9, %ymm15, %ymm9
-	vmulpd		%ymm10, %ymm15, %ymm10
-	vmulpd		%ymm11, %ymm15, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_12x4_lib4, .-inner_scale_a0_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend and scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_12x4_lib4, @function
-inner_blend_scale_ab_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_12x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_12x4_lib4:
-#endif
-#endif
-		
-
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm12
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm13
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm14
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm15
-
-	vblendpd	$0xc, %ymm14, %ymm12, %ymm0
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm2
-	vblendpd	$0xc, %ymm15, %ymm13, %ymm1
-	vblendpd	$0x3, %ymm15, %ymm13, %ymm3
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm12
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm13
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm14
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm15
-
-	vblendpd	$0xc, %ymm14, %ymm12, %ymm4
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm6
-	vblendpd	$0xc, %ymm15, %ymm13, %ymm5
-	vblendpd	$0x3, %ymm15, %ymm13, %ymm7
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	vblendpd	$0xa, %ymm9, %ymm8, %ymm12
-	vblendpd	$0x5, %ymm9, %ymm8, %ymm13
-	vblendpd	$0xa, %ymm11, %ymm10, %ymm14
-	vblendpd	$0x5, %ymm11, %ymm10, %ymm15
-
-	vblendpd	$0xc, %ymm14, %ymm12, %ymm8
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm10
-	vblendpd	$0xc, %ymm15, %ymm13, %ymm9
-	vblendpd	$0x3, %ymm15, %ymm13, %ymm11
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm8, %ymm15, %ymm8
-	vmulpd		%ymm9, %ymm15, %ymm9
-	vmulpd		%ymm10, %ymm15, %ymm10
-	vmulpd		%ymm11, %ymm15, %ymm11
-
-	vbroadcastsd 0(%r11), %ymm14 // beta
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		0(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		32(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		64(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		96(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	vmovapd		0(%r12, %r13, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		32(%r12, %r13, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		64(%r12, %r13, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		96(%r12, %r13, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_12x4_lib4, .-inner_blend_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r10   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_4X12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x12_lib4, @function
-inner_scale_ab_4x12_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_4x12_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_4x12_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-	vmulpd		%ymm8, %ymm15, %ymm8
-	vmulpd		%ymm9, %ymm15, %ymm9
-	vmulpd		%ymm10, %ymm15, %ymm10
-	vmulpd		%ymm11, %ymm15, %ymm11
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-	vmovapd		128(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		160(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		192(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		224(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-	vmovapd		256(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		288(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		320(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		352(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x12_lib4, .-inner_scale_ab_4x12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// transpose and scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_AB_4X12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_ab_4x12_lib4, @function
-inner_tran_scale_ab_4x12_lib4:
-#elif defined(OS_MAC)
-_inner_tran_scale_ab_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_ab_4x12_lib4; .scl 2; .type 32; .endef
-inner_tran_scale_ab_4x12_lib4:
-#endif
-#endif
-		
-	vunpcklpd	%ymm1, %ymm0, %ymm12
-	vunpckhpd	%ymm1, %ymm0, %ymm13
-	vunpcklpd	%ymm3, %ymm2, %ymm14
-	vunpckhpd	%ymm3, %ymm2, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm0
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm2
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm3
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vunpcklpd	%ymm5, %ymm4, %ymm12
-	vunpckhpd	%ymm5, %ymm4, %ymm13
-	vunpcklpd	%ymm7, %ymm6, %ymm14
-	vunpckhpd	%ymm7, %ymm6, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm4
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm6
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm5
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm7
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	vunpcklpd	%ymm9, %ymm8, %ymm12
-	vunpckhpd	%ymm9, %ymm8, %ymm13
-	vunpcklpd	%ymm11, %ymm10, %ymm14
-	vunpckhpd	%ymm11, %ymm10, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm8
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm10
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm9
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm11
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm8, %ymm15, %ymm8
-	vmulpd		%ymm9, %ymm15, %ymm9
-	vmulpd		%ymm10, %ymm15, %ymm10
-	vmulpd		%ymm11, %ymm15, %ymm11
-
-	vbroadcastsd 0(%r11), %ymm14 // beta
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		128(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		160(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		192(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		224(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	vmovapd		256(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		288(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		320(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		352(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_ab_4x12_lib4, .-inner_tran_scale_ab_4x12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_12x4_lib4, @function
-inner_blend_scale_11_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_12x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_11_12x4_lib4:
-#endif
-#endif
-	
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm12
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm13
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm14
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm15
-
-	vblendpd	$0xc, %ymm14, %ymm12, %ymm0
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm2
-	vblendpd	$0xc, %ymm15, %ymm13, %ymm1
-	vblendpd	$0x3, %ymm15, %ymm13, %ymm3
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm12
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm13
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm14
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm15
-
-	vblendpd	$0xc, %ymm14, %ymm12, %ymm4
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm6
-	vblendpd	$0xc, %ymm15, %ymm13, %ymm5
-	vblendpd	$0x3, %ymm15, %ymm13, %ymm7
-
-	vblendpd	$0xa, %ymm9, %ymm8, %ymm12
-	vblendpd	$0x5, %ymm9, %ymm8, %ymm13
-	vblendpd	$0xa, %ymm11, %ymm10, %ymm14
-	vblendpd	$0x5, %ymm11, %ymm10, %ymm15
-
-	vblendpd	$0xc, %ymm14, %ymm12, %ymm8
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm10
-	vblendpd	$0xc, %ymm15, %ymm13, %ymm9
-	vblendpd	$0x3, %ymm15, %ymm13, %ymm11
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC05(%rip), %ymm14 // beta=1.0
-#else
-	vmovapd		LC05(%rip), %ymm14 // beta=1.0
-#endif
-
-	vmovapd		0(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		0(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		32(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		64(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		96(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	vmovapd		0(%r10, %r11, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		32(%r10, %r11, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		64(%r10, %r11, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		96(%r10, %r11, 2), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_12x4_lib4, .-inner_blend_scale_11_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// transpose and scale for alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_11_4X12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_11_4x12_lib4, @function
-inner_tran_scale_11_4x12_lib4:
-#elif defined(OS_MAC)
-_inner_tran_scale_11_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_11_4x12_lib4; .scl 2; .type 32; .endef
-inner_tran_scale_11_4x12_lib4:
-#endif
-#endif
-		
-	vunpcklpd	%ymm1, %ymm0, %ymm12
-	vunpckhpd	%ymm1, %ymm0, %ymm13
-	vunpcklpd	%ymm3, %ymm2, %ymm14
-	vunpckhpd	%ymm3, %ymm2, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm0
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm2
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm3
-
-	vunpcklpd	%ymm5, %ymm4, %ymm12
-	vunpckhpd	%ymm5, %ymm4, %ymm13
-	vunpcklpd	%ymm7, %ymm6, %ymm14
-	vunpckhpd	%ymm7, %ymm6, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm4
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm6
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm5
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm7
-
-	vunpcklpd	%ymm9, %ymm8, %ymm12
-	vunpckhpd	%ymm9, %ymm8, %ymm13
-	vunpcklpd	%ymm11, %ymm10, %ymm14
-	vunpckhpd	%ymm11, %ymm10, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm8
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm10
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm9
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm11
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC05(%rip), %ymm14 // beta=1.0
-#else
-	vmovapd		LC05(%rip), %ymm14 // beta=1.0
-#endif
-
-	vmovapd		0(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		128(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		160(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		192(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		224(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	vmovapd		256(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		288(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		320(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		352(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_11_4x12_lib4, .-inner_tran_scale_11_4x12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10   <- inv_diag_E
-// r11d  <- kn
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_12X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_12x4_vs_lib4, @function
-inner_edge_dpotrf_12x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dpotrf_12x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dpotrf_12x4_vs_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC05(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd			LC05(%rip), %xmm14 // 1.0
-#endif
-
-	vmovsd			%xmm0, %xmm0, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe				1f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-2:
-	vmovsd			%xmm13, 0(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vmulpd			%ymm8, %ymm13, %ymm8
-	cmpl			$2, %r11d
-	jl				0f // ret
-//	vperm2f128		$0x00, %ymm0, %ymm0, %ymm12
-//	vpermilpd		$0xf, %ymm12, %ymm13
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-
-	vpermilpd		$0x3, %xmm1, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe				3f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-4:
-	vmovsd			%xmm13, 8(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vmulpd			%ymm9, %ymm13, %ymm9
-	cmpl			$3, %r11d
-	jl				0f // ret
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe				5f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-6:
-	vmovsd			%xmm13, 16(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vmulpd			%ymm10, %ymm13, %ymm10
-	cmpl			$4, %r11d
-	jl				0f // ret
-//	vperm2f128		$0x11, %ymm2, %ymm2, %ymm12
-//	vpermilpd		$0xf, %ymm12, %ymm13
-	vpermpd			$0xff, %ymm2, %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-//	vextractf128	$0x1, %ymm3, %xmm13
-//	vpermilpd		$0x3, %xmm13, %xmm13
-	vpermpd			$0xff, %ymm3, %ymm13
-	vucomisd		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe				7f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-8:
-	vmovsd			%xmm13, 24(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vmulpd			%ymm11, %ymm13, %ymm11
-
-	jmp				0f
-
-1:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				2b
-
-3:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				4b
-
-5:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				6b
-
-7:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				8b
-
-0:
-	#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_12x4_vs_lib4, .-inner_edge_dpotrf_12x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10   <- E
-// r11   <- inv_diag_E
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10   <- E
-// r11   <- inv_diag_E
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_12x4_lib4, @function
-inner_edge_dtrsm_rlt_inv_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_12x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vmulpd			%ymm8, %ymm13, %ymm8
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vmulpd			%ymm9, %ymm13, %ymm9
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vmulpd			%ymm10, %ymm13, %ymm10
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vmulpd			%ymm11, %ymm13, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_12x4_lib4, .-inner_edge_dtrsm_rlt_inv_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- D
-// r11  <- sdd
-// r12  <- inv_diag_D
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- sdd
-// r12  <- inv_diag_D
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x12_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x12_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x12_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x12_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r12), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vbroadcastsd	0(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm4
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm5
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm6
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm7
-	vbroadcastsd	0(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm11
-
-	vbroadcastsd	8(%r12), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vbroadcastsd	32(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm4
-	vbroadcastsd	40(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm5
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm6
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm7
-	vbroadcastsd	32(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm8
-	vbroadcastsd	40(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm11
-
-	vbroadcastsd	16(%r12), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vbroadcastsd	64(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm4
-	vbroadcastsd	72(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm5
-	vbroadcastsd	80(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm6
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm7
-	vbroadcastsd	64(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm8
-	vbroadcastsd	72(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm9
-	vbroadcastsd	80(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm11
-
-	vbroadcastsd	24(%r12), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vbroadcastsd	96(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm4
-	vbroadcastsd	104(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm5
-	vbroadcastsd	112(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm6
-	vbroadcastsd	120(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm7
-	vbroadcastsd	96(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm8
-	vbroadcastsd	104(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm9
-	vbroadcastsd	112(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm10
-	vbroadcastsd	120(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm11
-
-	addq	$128, %r10
-
-	vbroadcastsd	32(%r12), %ymm13
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vbroadcastsd	0(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm11
-
-	vbroadcastsd	40(%r12), %ymm13
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vbroadcastsd	32(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm8
-	vbroadcastsd	40(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm11
-
-	vbroadcastsd	48(%r12), %ymm13
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vbroadcastsd	64(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm8
-	vbroadcastsd	72(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm9
-	vbroadcastsd	80(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm11
-
-	vbroadcastsd	56(%r12), %ymm13
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vbroadcastsd	96(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm8
-	vbroadcastsd	104(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm9
-	vbroadcastsd	112(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm10
-	vbroadcastsd	120(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm11
-
-	addq	$128, %r10
-
-	vbroadcastsd	64(%r12), %ymm13
-	vmulpd			%ymm8, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-
-	vbroadcastsd	72(%r12), %ymm13
-	vmulpd			%ymm9, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-
-	vbroadcastsd	80(%r12), %ymm13
-	vmulpd			%ymm10, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-	vbroadcastsd	88(%r12), %ymm13
-	vmulpd			%ymm11, %ymm13, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x12_lib4, .-inner_edge_dtrsm_rlt_inv_4x12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10   <- D
-// r11   <- inv_diag_D
-// r12d  <- kn
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11   <- inv_diag_D
-// r12d  <- kn
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_12X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_12x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_12x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_12x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_12x4_vs_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vmulpd			%ymm8, %ymm13, %ymm8
-	cmpl			$2, %r12d
-	jl				0f // ret
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vmulpd			%ymm9, %ymm13, %ymm9
-	cmpl			$3, %r12d
-	jl				0f // ret
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vmulpd			%ymm10, %ymm13, %ymm10
-	cmpl			$4, %r12d
-	jl				0f // ret
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vmulpd			%ymm11, %ymm13, %ymm11
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_12x4_vs_lib4, .-inner_edge_dtrsm_rlt_inv_12x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- D
-// r11  <- sdd
-// r12  <- inv_diag_D
-// r13d <- kn
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- sdd
-// r12  <- inv_diag_D
-// r13d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X12_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x12_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x12_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x12_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x12_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x12_vs_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r12), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vbroadcastsd	0(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm4
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm5
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm6
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm7
-	vbroadcastsd	0(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm11
-
-	vbroadcastsd	8(%r12), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vbroadcastsd	32(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm4
-	vbroadcastsd	40(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm5
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm6
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm7
-	vbroadcastsd	32(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm8
-	vbroadcastsd	40(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm11
-
-	vbroadcastsd	16(%r12), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vbroadcastsd	64(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm4
-	vbroadcastsd	72(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm5
-	vbroadcastsd	80(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm6
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm7
-	vbroadcastsd	64(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm8
-	vbroadcastsd	72(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm9
-	vbroadcastsd	80(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm11
-
-	vbroadcastsd	24(%r12), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vbroadcastsd	96(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm4
-	vbroadcastsd	104(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm5
-	vbroadcastsd	112(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm6
-	vbroadcastsd	120(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm7
-	vbroadcastsd	96(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm8
-	vbroadcastsd	104(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm9
-	vbroadcastsd	112(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm10
-	vbroadcastsd	120(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm11
-
-	addq	$128, %r10
-
-	vbroadcastsd	32(%r12), %ymm13
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vbroadcastsd	0(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm11
-
-	vbroadcastsd	40(%r12), %ymm13
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vbroadcastsd	32(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm8
-	vbroadcastsd	40(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm11
-
-	vbroadcastsd	48(%r12), %ymm13
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vbroadcastsd	64(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm8
-	vbroadcastsd	72(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm9
-	vbroadcastsd	80(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm11
-
-	vbroadcastsd	56(%r12), %ymm13
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vbroadcastsd	96(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm8
-	vbroadcastsd	104(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm9
-	vbroadcastsd	112(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm10
-	vbroadcastsd	120(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm11
-
-	addq	$128, %r10
-
-	vbroadcastsd	64(%r12), %ymm13
-	vmulpd			%ymm8, %ymm13, %ymm8
-	cmpl			$10, %r13d
-	jl				0f // ret
-	vbroadcastsd	8(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-
-	vbroadcastsd	72(%r12), %ymm13
-	vmulpd			%ymm9, %ymm13, %ymm9
-	cmpl			$11, %r13d
-	jl				0f // ret
-	vbroadcastsd	48(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-
-	vbroadcastsd	80(%r12), %ymm13
-	vmulpd			%ymm10, %ymm13, %ymm10
-	cmpl			$12, %r13d
-	jl				0f // ret
-	vbroadcastsd	88(%r10, %r11, 2), %ymm13
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-	vbroadcastsd	88(%r12), %ymm13
-	vmulpd			%ymm11, %ymm13, %ymm11
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x12_vs_lib4, .-inner_edge_dtrsm_rlt_inv_4x12_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// unit diagonal
-//
-// input arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_ONE_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_one_12x4_lib4, @function
-inner_edge_dtrsm_rlt_one_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_one_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_one_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_one_12x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_one_12x4_lib4, .-inner_edge_dtrsm_rlt_one_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// unit diagonal
-//
-// input arguments:
-// r10  <- D
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_ONE_12X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_one_12x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_one_12x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_one_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_one_12x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_one_12x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$2, %r11d
-	jl				0f // ret
-
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-
-	cmpl			$3, %r11d
-	jl				0f // ret
-
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-
-	cmpl			$4, %r11d
-	jl				0f // ret
-
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_one_12x4_vs_lib4, .-inner_edge_dtrsm_rlt_one_12x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = upper
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUT_INV_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rut_inv_12x4_lib4, @function
-inner_edge_dtrsm_rut_inv_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rut_inv_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rut_inv_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rut_inv_12x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-	vmulpd			%ymm11, %ymm12, %ymm11
-	vbroadcastsd	112(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm2
-	vfnmadd231pd	%ymm7, %ymm12, %ymm6
-	vfnmadd231pd	%ymm11, %ymm12, %ymm10
-	vbroadcastsd	104(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm1
-	vfnmadd231pd	%ymm7, %ymm12, %ymm5
-	vfnmadd231pd	%ymm11, %ymm12, %ymm9
-	vbroadcastsd	96(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm0
-	vfnmadd231pd	%ymm7, %ymm12, %ymm4
-	vfnmadd231pd	%ymm11, %ymm12, %ymm8
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-	vmulpd			%ymm10, %ymm12, %ymm10
-	vbroadcastsd	72(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm1
-	vfnmadd231pd	%ymm6, %ymm12, %ymm5
-	vfnmadd231pd	%ymm10, %ymm12, %ymm9
-	vbroadcastsd	64(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm0
-	vfnmadd231pd	%ymm6, %ymm12, %ymm4
-	vfnmadd231pd	%ymm10, %ymm12, %ymm8
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-	vmulpd			%ymm9, %ymm12, %ymm9
-	vbroadcastsd	32(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm0
-	vfnmadd231pd	%ymm5, %ymm12, %ymm4
-	vfnmadd231pd	%ymm9, %ymm12, %ymm8
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-	vmulpd			%ymm8, %ymm12, %ymm8
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rut_inv_12x4_lib4, .-inner_edge_dtrsm_rut_inv_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = up
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUN_INV_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_run_inv_12x4_lib4, @function
-inner_edge_dtrsm_run_inv_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_run_inv_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_run_inv_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_run_inv_12x4_lib4:
-#endif
-#endif
-
-	// first column
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-	vmulpd			%ymm8, %ymm12, %ymm8
-
-	// second column
-	vbroadcastsd	32(%r10), %ymm12
-	vfnmadd231pd	%ymm0, %ymm12, %ymm1
-	vfnmadd231pd	%ymm4, %ymm12, %ymm5
-	vfnmadd231pd	%ymm8, %ymm12, %ymm9
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-	vmulpd			%ymm9, %ymm12, %ymm9
-
-	// third column
-	vbroadcastsd	64(%r10), %ymm12
-	vfnmadd231pd	%ymm0, %ymm12, %ymm2
-	vfnmadd231pd	%ymm4, %ymm12, %ymm6
-	vfnmadd231pd	%ymm8, %ymm12, %ymm10
-	vbroadcastsd	72(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm2
-	vfnmadd231pd	%ymm5, %ymm12, %ymm6
-	vfnmadd231pd	%ymm9, %ymm12, %ymm10
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-	vmulpd			%ymm10, %ymm12, %ymm10
-
-	// fourth column
-	vbroadcastsd	96(%r10), %ymm12
-	vfnmadd231pd	%ymm0, %ymm12, %ymm3
-	vfnmadd231pd	%ymm4, %ymm12, %ymm7
-	vfnmadd231pd	%ymm8, %ymm12, %ymm11
-	vbroadcastsd	104(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm3
-	vfnmadd231pd	%ymm5, %ymm12, %ymm7
-	vfnmadd231pd	%ymm9, %ymm12, %ymm11
-	vbroadcastsd	112(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm3
-	vfnmadd231pd	%ymm6, %ymm12, %ymm7
-	vfnmadd231pd	%ymm10, %ymm12, %ymm11
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-	vmulpd			%ymm11, %ymm12, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_run_inv_12x4_lib4, .-inner_edge_dtrsm_run_inv_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 d60 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUT_INV_12X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rut_inv_12x4_vs_lib4, @function
-inner_edge_dtrsm_rut_inv_12x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rut_inv_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rut_inv_12x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rut_inv_12x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$3, %r12d
-	jle				0f
-
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-	vmulpd			%ymm11, %ymm12, %ymm11
-	vbroadcastsd	112(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm2
-	vfnmadd231pd	%ymm7, %ymm12, %ymm6
-	vfnmadd231pd	%ymm11, %ymm12, %ymm10
-	vbroadcastsd	104(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm1
-	vfnmadd231pd	%ymm7, %ymm12, %ymm5
-	vfnmadd231pd	%ymm11, %ymm12, %ymm9
-	vbroadcastsd	96(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm0
-	vfnmadd231pd	%ymm7, %ymm12, %ymm4
-	vfnmadd231pd	%ymm11, %ymm12, %ymm8
-
-0:
-	cmpl			$2, %r12d
-	jle				1f
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-	vmulpd			%ymm10, %ymm12, %ymm10
-	vbroadcastsd	72(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm1
-	vfnmadd231pd	%ymm6, %ymm12, %ymm5
-	vfnmadd231pd	%ymm10, %ymm12, %ymm9
-	vbroadcastsd	64(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm0
-	vfnmadd231pd	%ymm6, %ymm12, %ymm4
-	vfnmadd231pd	%ymm10, %ymm12, %ymm8
-
-1:
-	cmpl			$1, %r12d
-	jle				2f
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-	vmulpd			%ymm9, %ymm12, %ymm9
-	vbroadcastsd	32(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm0
-	vfnmadd231pd	%ymm5, %ymm12, %ymm4
-	vfnmadd231pd	%ymm9, %ymm12, %ymm8
-
-2:
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-	vmulpd			%ymm8, %ymm12, %ymm8
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rut_inv_12x4_vs_lib4, .-inner_edge_dtrsm_rut_inv_12x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = lower
-// tran = normal
-// unit diagonal
-//
-// input arguments:
-// r10   <- E0
-// r11   <- 4*sde*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- E0
-// r11   <- 4*sde*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LLN_ONE_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lln_one_12x4_lib4, @function
-inner_edge_dtrsm_lln_one_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lln_one_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lln_one_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lln_one_12x4_lib4:
-#endif
-#endif
-
-	movq	%r10, %r12 // E1 <- E0
-	addq	%r11, %r12 // E1 <- E0 + 4*sde*sizeof(double)
-	movq	%r12, %r13 // E2 <- E1
-	addq	%r11, %r13 // E2 <- E1 + 4*sde*sizeof(double)
-
-	// left block-column
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			0(%r10), %ymm12
-	vblendpd		$0x1, %ymm15, %ymm12, %ymm12
-	vmovapd			0(%r12), %ymm14
-	vmovapd			0(%r13), %ymm15
-	vpermpd			$0x00, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0x00, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0x00, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0x00, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			32(%r10), %ymm12
-	vblendpd		$0x3, %ymm15, %ymm12, %ymm12
-	vmovapd			32(%r12), %ymm14
-	vmovapd			32(%r13), %ymm15
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0x55, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0x55, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0x55, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			64(%r10), %ymm12
-	vblendpd		$0x7, %ymm15, %ymm12, %ymm12
-	vmovapd			64(%r12), %ymm14
-	vmovapd			64(%r13), %ymm15
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0xaa, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0xaa, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0xaa, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	vmovapd			96(%r12), %ymm14
-	vmovapd			96(%r13), %ymm15
-	vpermpd			$0xff, %ymm0, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0xff, %ymm1, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0xff, %ymm2, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0xff, %ymm3, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	addq		$128, %r12
-	addq		$128, %r13
-
-
-	// middle block-column
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-	vmovapd			0(%r12), %ymm12
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd			0(%r13), %ymm14
-	vpermpd			$0x00, %ymm4, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm4
-	vfnmadd231pd	%ymm14, %ymm13, %ymm8
-	vpermpd			$0x00, %ymm5, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm5
-	vfnmadd231pd	%ymm14, %ymm13, %ymm9
-	vpermpd			$0x00, %ymm6, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm6
-	vfnmadd231pd	%ymm14, %ymm13, %ymm10
-	vpermpd			$0x00, %ymm7, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm7
-	vfnmadd231pd	%ymm14, %ymm13, %ymm11
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-	vmovapd			32(%r12), %ymm12
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd			32(%r13), %ymm14
-	vpermpd			$0x55, %ymm4, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm4
-	vfnmadd231pd	%ymm14, %ymm13, %ymm8
-	vpermpd			$0x55, %ymm5, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm5
-	vfnmadd231pd	%ymm14, %ymm13, %ymm9
-	vpermpd			$0x55, %ymm6, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm6
-	vfnmadd231pd	%ymm14, %ymm13, %ymm10
-	vpermpd			$0x55, %ymm7, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm7
-	vfnmadd231pd	%ymm14, %ymm13, %ymm11
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-	vmovapd			64(%r12), %ymm12
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd			64(%r13), %ymm14
-	vpermpd			$0xaa, %ymm4, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm4
-	vfnmadd231pd	%ymm14, %ymm13, %ymm8
-	vpermpd			$0xaa, %ymm5, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm5
-	vfnmadd231pd	%ymm14, %ymm13, %ymm9
-	vpermpd			$0xaa, %ymm6, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm6
-	vfnmadd231pd	%ymm14, %ymm13, %ymm10
-	vpermpd			$0xaa, %ymm7, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm7
-	vfnmadd231pd	%ymm14, %ymm13, %ymm11
-
-	vmovapd			96(%r13), %ymm14
-	vpermpd			$0xff, %ymm4, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm8
-	vpermpd			$0xff, %ymm5, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm9
-	vpermpd			$0xff, %ymm6, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm10
-	vpermpd			$0xff, %ymm7, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm11
-
-
-	addq		$128, %r13
-
-
-	// right block-column
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vmovapd			0(%r13), %ymm12
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm12
-	vpermpd			$0x00, %ymm8, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vpermpd			$0x00, %ymm9, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vpermpd			$0x00, %ymm10, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vpermpd			$0x00, %ymm11, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-	vmovapd			32(%r13), %ymm12
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm12
-	vpermpd			$0x55, %ymm8, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vpermpd			$0x55, %ymm9, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vpermpd			$0x55, %ymm10, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vpermpd			$0x55, %ymm11, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-	vmovapd			64(%r13), %ymm12
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm12
-	vpermpd			$0xaa, %ymm8, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vpermpd			$0xaa, %ymm9, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vpermpd			$0xaa, %ymm10, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vpermpd			$0xaa, %ymm11, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lln_one_12x4_lib4, .-inner_edge_dtrsm_lln_one_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = upper
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8 <- [d80 d90 da0 db0]
-// ymm9 <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8 <- [d80 d90 da0 db0]
-// ymm9 <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LUN_INV_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lun_inv_12x4_lib4, @function
-inner_edge_dtrsm_lun_inv_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lun_inv_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lun_inv_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lun_inv_12x4_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r13 // E1 <- E0
-	addq	%r11, %r13 // E1 <- E0 + 4*sde*sizeof(double)
-	movq	%r13, %r14 // E2 <- E1
-	addq	%r11, %r14 // E2 <- E1 + 4*sde*sizeof(double)
-
-	// bottom-right
-
-	vmovapd			352(%r14), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	88(%r12), %ymm12
-	vmovapd			352(%r13), %ymm15
-//	vmovapd			352(%r10), %ymm11
-
-	vpermpd			$0xff, %ymm8, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm8, %ymm8
-	vfnmadd231pd	%ymm13, %ymm14, %ymm8
-	vfnmadd231pd	%ymm15, %ymm14, %ymm4
-	vfnmadd231pd	352(%r10), %ymm14, %ymm0
-
-	vpermpd			$0xff, %ymm9, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm9, %ymm9
-	vfnmadd231pd	%ymm13, %ymm14, %ymm9
-	vfnmadd231pd	%ymm15, %ymm14, %ymm5
-	vfnmadd231pd	352(%r10), %ymm14, %ymm1
-
-	vpermpd			$0xff, %ymm10, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm10, %ymm10
-	vfnmadd231pd	%ymm13, %ymm14, %ymm10
-	vfnmadd231pd	%ymm15, %ymm14, %ymm6
-	vfnmadd231pd	352(%r10), %ymm14, %ymm2
-
-	vpermpd			$0xff, %ymm11, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm11, %ymm11
-	vfnmadd231pd	%ymm13, %ymm14, %ymm11
-	vfnmadd231pd	%ymm15, %ymm14, %ymm7
-	vfnmadd231pd	352(%r10), %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0 // ?????????????
-	vmovapd			320(%r14), %xmm13
-	vbroadcastsd	80(%r12), %ymm12
-	vmovapd			320(%r13), %ymm15
-//	vmovapd			320(%r10), %ymm11
-
-	vpermpd			$0xaa, %ymm8, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm8, %ymm8
-	vfnmadd231pd	%ymm13, %ymm14, %ymm8
-	vfnmadd231pd	%ymm15, %ymm14, %ymm4
-	vfnmadd231pd	320(%r10), %ymm14, %ymm0
-
-	vpermpd			$0xaa, %ymm9, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm9, %ymm9
-	vfnmadd231pd	%ymm13, %ymm14, %ymm9
-	vfnmadd231pd	%ymm15, %ymm14, %ymm5
-	vfnmadd231pd	320(%r10), %ymm14, %ymm1
-
-	vpermpd			$0xaa, %ymm10, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm10, %ymm10
-	vfnmadd231pd	%ymm13, %ymm14, %ymm10
-	vfnmadd231pd	%ymm15, %ymm14, %ymm6
-	vfnmadd231pd	320(%r10), %ymm14, %ymm2
-
-	vpermpd			$0xaa, %ymm11, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm11, %ymm11
-	vfnmadd231pd	%ymm13, %ymm14, %ymm11
-	vfnmadd231pd	%ymm15, %ymm14, %ymm7
-	vfnmadd231pd	320(%r10), %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			288(%r14), %xmm13
-	vbroadcastsd	72(%r12), %ymm12
-	vmovapd			288(%r13), %ymm15
-//	vmovapd			288(%r10), %ymm11
-
-	vpermpd			$0x55, %ymm8, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm8, %ymm8
-	vfnmadd231pd	%ymm13, %ymm14, %ymm8
-	vfnmadd231pd	%ymm15, %ymm14, %ymm4
-	vfnmadd231pd	288(%r10), %ymm14, %ymm0
-
-	vpermpd			$0x55, %ymm9, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm9, %ymm9
-	vfnmadd231pd	%ymm13, %ymm14, %ymm9
-	vfnmadd231pd	%ymm15, %ymm14, %ymm5
-	vfnmadd231pd	288(%r10), %ymm14, %ymm1
-
-	vpermpd			$0x55, %ymm10, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm10, %ymm10
-	vfnmadd231pd	%ymm13, %ymm14, %ymm10
-	vfnmadd231pd	%ymm15, %ymm14, %ymm6
-	vfnmadd231pd	288(%r10), %ymm14, %ymm2
-
-	vpermpd			$0x55, %ymm11, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm11, %ymm11
-	vfnmadd231pd	%ymm13, %ymm14, %ymm11
-	vfnmadd231pd	%ymm15, %ymm14, %ymm7
-	vfnmadd231pd	288(%r10), %ymm14, %ymm3
-
-
-	vbroadcastsd	64(%r12), %ymm12
-	vmovapd			256(%r13), %ymm15
-//	vmovapd			256(%r10), %ymm11
-
-	vpermpd			$0x00, %ymm8, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm8, %ymm8
-	vfnmadd231pd	%ymm15, %ymm14, %ymm4
-	vfnmadd231pd	256(%r10), %ymm14, %ymm0
-
-	vpermpd			$0x00, %ymm9, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm9, %ymm9
-	vfnmadd231pd	%ymm15, %ymm14, %ymm5
-	vfnmadd231pd	256(%r10), %ymm14, %ymm1
-
-	vpermpd			$0x00, %ymm10, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm10, %ymm10
-	vfnmadd231pd	%ymm15, %ymm14, %ymm6
-	vfnmadd231pd	256(%r10), %ymm14, %ymm2
-
-	vpermpd			$0x00, %ymm11, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm11, %ymm11
-	vfnmadd231pd	%ymm15, %ymm14, %ymm7
-	vfnmadd231pd	256(%r10), %ymm14, %ymm3
-
-
-	// middle-middle
-
-	vmovapd			224(%r13), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	56(%r12), %ymm12
-	vmovapd			224(%r10), %ymm15
-
-	vpermpd			$0xff, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm15, %ymm14, %ymm0
-
-	vpermpd			$0xff, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm15, %ymm14, %ymm1
-
-	vpermpd			$0xff, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm15, %ymm14, %ymm2
-
-	vpermpd			$0xff, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm15, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0 // ?????????????
-	vmovapd			192(%r13), %xmm13
-	vbroadcastsd	48(%r12), %ymm12
-	vmovapd			192(%r10), %ymm15
-
-	vpermpd			$0xaa, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm15, %ymm14, %ymm0
-
-	vpermpd			$0xaa, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm15, %ymm14, %ymm1
-
-	vpermpd			$0xaa, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm15, %ymm14, %ymm2
-
-	vpermpd			$0xaa, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm15, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			160(%r13), %xmm13
-	vbroadcastsd	40(%r12), %ymm12
-	vmovapd			160(%r10), %ymm15
-
-	vpermpd			$0x55, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm15, %ymm14, %ymm0
-
-	vpermpd			$0x55, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm15, %ymm14, %ymm1
-
-	vpermpd			$0x55, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm15, %ymm14, %ymm2
-
-	vpermpd			$0x55, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm15, %ymm14, %ymm3
-
-
-	vbroadcastsd	32(%r12), %ymm12
-	vmovapd			128(%r10), %ymm15
-
-	vpermpd			$0x00, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm15, %ymm14, %ymm0
-
-	vpermpd			$0x00, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm15, %ymm14, %ymm1
-
-	vpermpd			$0x00, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm15, %ymm14, %ymm2
-
-	vpermpd			$0x00, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm15, %ymm14, %ymm3
-
-
-	// top-left
-
-	vmovapd			96(%r10), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	24(%r12), %ymm12
-
-	vpermpd			$0xff, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermpd			$0xff, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermpd			$0xff, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermpd			$0xff, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovapd			64(%r10), %xmm13
-	vbroadcastsd	16(%r12), %ymm12
-
-	vpermpd			$0xaa, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermpd			$0xaa, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermpd			$0xaa, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermpd			$0xaa, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			32(%r10), %xmm13
-	vbroadcastsd	8(%r12), %ymm12
-
-	vpermilpd		$0xf, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermilpd		$0xf, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermilpd		$0xf, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermilpd		$0xf, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vbroadcastsd	0(%r12), %ymm12
-
-	vmulpd			%ymm0, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm0, %ymm0
-
-	vmulpd			%ymm1, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm1, %ymm1
-
-	vmulpd			%ymm2, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm2, %ymm2
-
-	vmulpd			%ymm3, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lun_inv_12x4_lib4, .-inner_edge_dtrsm_lun_inv_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = upper
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// r13  <- km
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8 <- [d80 d90 da0 db0]
-// ymm9 <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// r13  <- km
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8 <- [d80 d90 da0 db0]
-// ymm9 <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LUN_INV_12X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lun_inv_12x4_vs_lib4, @function
-inner_edge_dtrsm_lun_inv_12x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lun_inv_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lun_inv_12x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lun_inv_12x4_vs_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r15 // E1 <- E0
-	addq	%r11, %r15 // E1 <- E0 + 4*sde*sizeof(double)
-	movq	%r15, %r14 // E2 <- E1
-	addq	%r11, %r14 // E2 <- E1 + 4*sde*sizeof(double)
-
-	// bottom-right
-
-	cmpl	$11, %r13d
-	jle		0f
-
-	vmovapd			352(%r14), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	88(%r12), %ymm12
-	vmovapd			352(%r15), %ymm15
-//	vmovapd			352(%r10), %ymm11
-
-	vpermpd			$0xff, %ymm8, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm8, %ymm8
-	vfnmadd231pd	%ymm13, %ymm14, %ymm8
-	vfnmadd231pd	%ymm15, %ymm14, %ymm4
-	vfnmadd231pd	352(%r10), %ymm14, %ymm0
-
-	vpermpd			$0xff, %ymm9, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm9, %ymm9
-	vfnmadd231pd	%ymm13, %ymm14, %ymm9
-	vfnmadd231pd	%ymm15, %ymm14, %ymm5
-	vfnmadd231pd	352(%r10), %ymm14, %ymm1
-
-	vpermpd			$0xff, %ymm10, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm10, %ymm10
-	vfnmadd231pd	%ymm13, %ymm14, %ymm10
-	vfnmadd231pd	%ymm15, %ymm14, %ymm6
-	vfnmadd231pd	352(%r10), %ymm14, %ymm2
-
-	vpermpd			$0xff, %ymm11, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm11, %ymm11
-	vfnmadd231pd	%ymm13, %ymm14, %ymm11
-	vfnmadd231pd	%ymm15, %ymm14, %ymm7
-	vfnmadd231pd	352(%r10), %ymm14, %ymm3
-
-0:
-	cmpl	$10, %r13d
-	jle		1f
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0 // ?????????????
-	vmovapd			320(%r14), %xmm13
-	vbroadcastsd	80(%r12), %ymm12
-	vmovapd			320(%r15), %ymm15
-//	vmovapd			320(%r10), %ymm11
-
-	vpermpd			$0xaa, %ymm8, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm8, %ymm8
-	vfnmadd231pd	%ymm13, %ymm14, %ymm8
-	vfnmadd231pd	%ymm15, %ymm14, %ymm4
-	vfnmadd231pd	320(%r10), %ymm14, %ymm0
-
-	vpermpd			$0xaa, %ymm9, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm9, %ymm9
-	vfnmadd231pd	%ymm13, %ymm14, %ymm9
-	vfnmadd231pd	%ymm15, %ymm14, %ymm5
-	vfnmadd231pd	320(%r10), %ymm14, %ymm1
-
-	vpermpd			$0xaa, %ymm10, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm10, %ymm10
-	vfnmadd231pd	%ymm13, %ymm14, %ymm10
-	vfnmadd231pd	%ymm15, %ymm14, %ymm6
-	vfnmadd231pd	320(%r10), %ymm14, %ymm2
-
-	vpermpd			$0xaa, %ymm11, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm11, %ymm11
-	vfnmadd231pd	%ymm13, %ymm14, %ymm11
-	vfnmadd231pd	%ymm15, %ymm14, %ymm7
-	vfnmadd231pd	320(%r10), %ymm14, %ymm3
-
-1:
-	cmpl	$9, %r13d
-	jle		2f
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			288(%r14), %xmm13
-	vbroadcastsd	72(%r12), %ymm12
-	vmovapd			288(%r15), %ymm15
-//	vmovapd			288(%r10), %ymm11
-
-	vpermpd			$0x55, %ymm8, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm8, %ymm8
-	vfnmadd231pd	%ymm13, %ymm14, %ymm8
-	vfnmadd231pd	%ymm15, %ymm14, %ymm4
-	vfnmadd231pd	288(%r10), %ymm14, %ymm0
-
-	vpermpd			$0x55, %ymm9, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm9, %ymm9
-	vfnmadd231pd	%ymm13, %ymm14, %ymm9
-	vfnmadd231pd	%ymm15, %ymm14, %ymm5
-	vfnmadd231pd	288(%r10), %ymm14, %ymm1
-
-	vpermpd			$0x55, %ymm10, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm10, %ymm10
-	vfnmadd231pd	%ymm13, %ymm14, %ymm10
-	vfnmadd231pd	%ymm15, %ymm14, %ymm6
-	vfnmadd231pd	288(%r10), %ymm14, %ymm2
-
-	vpermpd			$0x55, %ymm11, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm11, %ymm11
-	vfnmadd231pd	%ymm13, %ymm14, %ymm11
-	vfnmadd231pd	%ymm15, %ymm14, %ymm7
-	vfnmadd231pd	288(%r10), %ymm14, %ymm3
-
-2:
-
-	vbroadcastsd	64(%r12), %ymm12
-	vmovapd			256(%r15), %ymm15
-//	vmovapd			256(%r10), %ymm11
-
-	vpermpd			$0x00, %ymm8, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm8, %ymm8
-	vfnmadd231pd	%ymm15, %ymm14, %ymm4
-	vfnmadd231pd	256(%r10), %ymm14, %ymm0
-
-	vpermpd			$0x00, %ymm9, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm9, %ymm9
-	vfnmadd231pd	%ymm15, %ymm14, %ymm5
-	vfnmadd231pd	256(%r10), %ymm14, %ymm1
-
-	vpermpd			$0x00, %ymm10, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm10, %ymm10
-	vfnmadd231pd	%ymm15, %ymm14, %ymm6
-	vfnmadd231pd	256(%r10), %ymm14, %ymm2
-
-	vpermpd			$0x00, %ymm11, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm11, %ymm11
-	vfnmadd231pd	%ymm15, %ymm14, %ymm7
-	vfnmadd231pd	256(%r10), %ymm14, %ymm3
-
-
-	// middle-middle
-
-	vmovapd			224(%r15), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	56(%r12), %ymm12
-	vmovapd			224(%r10), %ymm15
-
-	vpermpd			$0xff, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm15, %ymm14, %ymm0
-
-	vpermpd			$0xff, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm15, %ymm14, %ymm1
-
-	vpermpd			$0xff, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm15, %ymm14, %ymm2
-
-	vpermpd			$0xff, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm15, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0 // ?????????????
-	vmovapd			192(%r15), %xmm13
-	vbroadcastsd	48(%r12), %ymm12
-	vmovapd			192(%r10), %ymm15
-
-	vpermpd			$0xaa, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm15, %ymm14, %ymm0
-
-	vpermpd			$0xaa, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm15, %ymm14, %ymm1
-
-	vpermpd			$0xaa, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm15, %ymm14, %ymm2
-
-	vpermpd			$0xaa, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm15, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			160(%r15), %xmm13
-	vbroadcastsd	40(%r12), %ymm12
-	vmovapd			160(%r10), %ymm15
-
-	vpermpd			$0x55, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm15, %ymm14, %ymm0
-
-	vpermpd			$0x55, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm15, %ymm14, %ymm1
-
-	vpermpd			$0x55, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm15, %ymm14, %ymm2
-
-	vpermpd			$0x55, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm15, %ymm14, %ymm3
-
-
-	vbroadcastsd	32(%r12), %ymm12
-	vmovapd			128(%r10), %ymm15
-
-	vpermpd			$0x00, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm15, %ymm14, %ymm0
-
-	vpermpd			$0x00, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm15, %ymm14, %ymm1
-
-	vpermpd			$0x00, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm15, %ymm14, %ymm2
-
-	vpermpd			$0x00, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm15, %ymm14, %ymm3
-
-
-	// top-left
-
-	vmovapd			96(%r10), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	24(%r12), %ymm12
-
-	vpermpd			$0xff, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermpd			$0xff, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermpd			$0xff, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermpd			$0xff, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovapd			64(%r10), %xmm13
-	vbroadcastsd	16(%r12), %ymm12
-
-	vpermpd			$0xaa, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermpd			$0xaa, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermpd			$0xaa, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermpd			$0xaa, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			32(%r10), %xmm13
-	vbroadcastsd	8(%r12), %ymm12
-
-	vpermilpd		$0xf, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermilpd		$0xf, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermilpd		$0xf, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermilpd		$0xf, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vbroadcastsd	0(%r12), %ymm12
-
-	vmulpd			%ymm0, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm0, %ymm0
-
-	vmulpd			%ymm1, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm1, %ymm1
-
-	vmulpd			%ymm2, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm2, %ymm2
-
-	vmulpd			%ymm3, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lun_inv_12x4_vs_lib4, .-inner_edge_dtrsm_lun_inv_12x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// LU factorization without pivoting
-// left kernel
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8 <- [d80 d90 da0 db0]
-// ymm9 <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8 <- [d80 d90 da0 db0]
-// ymm9 <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGETRF_L_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgetrf_l_12x4_lib4, @function
-inner_edge_dgetrf_l_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgetrf_l_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgetrf_l_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgetrf_l_12x4_lib4:
-#endif
-#endif
-	
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC05(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd			LC05(%rip), %xmm14 // 1.0
-#endif
-//	vmovddup		%xmm14, %xmm14
-
-	// first column
-//	vblendpd		$0x1, %ymm0, %ymm12, %ymm12
-	vmovapd			%ymm0, %ymm12
-	vdivsd			%xmm0, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 0(%r10)
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vmulpd			%ymm8, %ymm13, %ymm8
-	vblendpd		$0x1, %ymm12, %ymm0, %ymm0
-
-	// second column
-	vpermpd			$0x00, %ymm1, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vblendpd		$0x2, %ymm1, %ymm13, %ymm12
-
-	vpermilpd		$0x3, %xmm1, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 8(%r10)
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vmulpd			%ymm9, %ymm13, %ymm9
-	vblendpd		$0x3, %ymm12, %ymm1, %ymm1
-
-	// third column
-	vpermpd			$0x00, %ymm2, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vblendpd		$0x2, %ymm2, %ymm13, %ymm12
-
-	vpermpd			$0x55, %ymm2, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vblendpd		$0x4, %ymm2, %ymm12, %ymm12
-
-	vpermpd			$0xaa, %ymm2, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 16(%r10)
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vmulpd			%ymm10, %ymm13, %ymm10
-	vblendpd		$0x7, %ymm12, %ymm2, %ymm2
-
-	// fourth column
-	vpermpd			$0x00, %ymm3, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-	vblendpd		$0x2, %ymm3, %ymm13, %ymm12
-
-	vpermpd			$0x55, %ymm3, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-	vblendpd		$0x4, %ymm3, %ymm12, %ymm12
-
-	vpermpd			$0xaa, %ymm3, %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-	vblendpd		$0x8, %ymm3, %ymm12, %ymm12
-	
-	vpermpd			$0xff, %ymm3, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 24(%r10)
-//	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vmulpd			%ymm11, %ymm13, %ymm11
-	vblendpd		$0x7, %ymm12, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgetrf_l_12x4_lib4, .-inner_edge_dgetrf_l_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// LU factorization without pivoting
-// middle kernel
-//
-// input arguments:
-// r10  <- E
-// r11  <- sde
-// r12  <- inv_diag_D
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8 <- [d80 d90 da0 db0]
-// ymm9 <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- sde
-// r12  <- inv_diag_D
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8 <- [d80 d90 da0 db0]
-// ymm9 <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGETRF_M_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgetrf_m_12x4_lib4, @function
-inner_edge_dgetrf_m_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgetrf_m_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgetrf_m_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgetrf_m_12x4_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r14 // E1 <- E0
-	addq	%r11, %r14 // E1 <- E0 + 4*sde*sizeof(double)
-	movq	%r14, %r13 // E2 <- E1
-	addq	%r11, %r13 // E2 <- E1 + 4*sde*sizeof(double)
-
-	// solve upper 4x4 & correct lower 8x4
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			0(%r10), %ymm12
-	vblendpd		$0x1, %ymm15, %ymm12, %ymm12
-	vmovapd			0(%r14), %ymm14
-	vmovapd			0(%r13), %ymm15
-	vpermpd			$0x00, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0x00, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0x00, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0x00, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			32(%r10), %ymm12
-	vblendpd		$0x3, %ymm15, %ymm12, %ymm12
-	vmovapd			32(%r14), %ymm14
-	vmovapd			32(%r13), %ymm15
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0x55, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0x55, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0x55, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			64(%r10), %ymm12
-	vblendpd		$0x7, %ymm15, %ymm12, %ymm12
-	vmovapd			64(%r14), %ymm14
-	vmovapd			64(%r13), %ymm15
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0xaa, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0xaa, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0xaa, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	vmovapd			96(%r14), %ymm14
-	vmovapd			96(%r13), %ymm15
-	vpermpd			$0xff, %ymm0, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0xff, %ymm1, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0xff, %ymm2, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0xff, %ymm3, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-
-	// factorize lower 8x4
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC05(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd			LC05(%rip), %xmm14 // 1.0
-#endif
-//	vmovddup		%xmm14, %xmm14
-
-	// first column
-//	vblendpd		$0x1, %ymm4, %ymm12, %ymm12
-	vmovapd			%ymm4, %ymm12
-	vdivsd			%xmm4, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 0(%r12)
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vmulpd			%ymm8, %ymm13, %ymm8
-	vblendpd		$0x1, %ymm12, %ymm4, %ymm4
-
-	// second column
-	vpermpd			$0x00, %ymm5, %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vblendpd		$0x2, %ymm5, %ymm13, %ymm12
-
-	vpermilpd		$0x3, %xmm5, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 8(%r12)
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vmulpd			%ymm9, %ymm13, %ymm9
-	vblendpd		$0x3, %ymm12, %ymm5, %ymm5
-
-	// third column
-	vpermpd			$0x00, %ymm6, %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vblendpd		$0x2, %ymm6, %ymm13, %ymm12
-
-	vpermpd			$0x55, %ymm6, %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vblendpd		$0x4, %ymm6, %ymm12, %ymm12
-
-	vpermpd			$0xaa, %ymm6, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 16(%r12)
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vmulpd			%ymm10, %ymm13, %ymm10
-	vblendpd		$0x7, %ymm12, %ymm6, %ymm6
-
-	// fourth column
-	vpermpd			$0x00, %ymm7, %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-	vblendpd		$0x2, %ymm7, %ymm13, %ymm12
-
-	vpermpd			$0x55, %ymm7, %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-	vblendpd		$0x4, %ymm7, %ymm12, %ymm12
-
-	vpermpd			$0xaa, %ymm7, %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-	vblendpd		$0x8, %ymm7, %ymm12, %ymm12
-	
-	vpermpd			$0xff, %ymm7, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 24(%r12)
-//	vmulpd			%ymm7, %ymm13, %ymm7
-	vmulpd			%ymm11, %ymm13, %ymm11
-	vblendpd		$0x7, %ymm12, %ymm7, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgetrf_m_12x4_lib4, .-inner_edge_dgetrf_m_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// LU factorization without pivoting
-// right kernel
-//
-// input arguments:
-// r10  <- E
-// r11  <- sde
-// r12  <- inv_diag_D
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8 <- [d80 d90 da0 db0]
-// ymm9 <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- sde
-// r12  <- inv_diag_D
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8 <- [d80 d90 da0 db0]
-// ymm9 <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGETRF_R_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgetrf_r_12x4_lib4, @function
-inner_edge_dgetrf_r_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgetrf_r_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgetrf_r_12x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgetrf_r_12x4_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r14 // E1 <- E0
-	addq	%r11, %r14 // E1 <- E0 + 4*sde*sizeof(double)
-	movq	%r14, %r13 // E2 <- E1
-	addq	%r11, %r13 // E2 <- E1 + 4*sde*sizeof(double)
-
-	// solve upper 8x4 & correct lower 4x4
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			0(%r10), %ymm12
-	vblendpd		$0x1, %ymm15, %ymm12, %ymm12
-	vmovapd			0(%r14), %ymm14
-	vmovapd			0(%r13), %ymm15
-	vpermpd			$0x00, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0x00, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0x00, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0x00, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			32(%r10), %ymm12
-	vblendpd		$0x3, %ymm15, %ymm12, %ymm12
-	vmovapd			32(%r14), %ymm14
-	vmovapd			32(%r13), %ymm15
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0x55, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0x55, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0x55, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vmovapd			64(%r10), %ymm12
-	vblendpd		$0x7, %ymm15, %ymm12, %ymm12
-	vmovapd			64(%r14), %ymm14
-	vmovapd			64(%r13), %ymm15
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0xaa, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0xaa, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0xaa, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	vmovapd			96(%r14), %ymm14
-	vmovapd			96(%r13), %ymm15
-	vpermpd			$0xff, %ymm0, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vfnmadd231pd	%ymm15, %ymm13, %ymm8
-	vpermpd			$0xff, %ymm1, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vfnmadd231pd	%ymm15, %ymm13, %ymm9
-	vpermpd			$0xff, %ymm2, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vfnmadd231pd	%ymm15, %ymm13, %ymm10
-	vpermpd			$0xff, %ymm3, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-	vfnmadd231pd	%ymm15, %ymm13, %ymm11
-
-	addq		$128, %r14
-	addq		$128, %r13
-
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-	vmovapd			0(%r14), %ymm12
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd			0(%r13), %ymm14
-	vpermpd			$0x00, %ymm4, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm4
-	vfnmadd231pd	%ymm14, %ymm13, %ymm8
-	vpermpd			$0x00, %ymm5, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm5
-	vfnmadd231pd	%ymm14, %ymm13, %ymm9
-	vpermpd			$0x00, %ymm6, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm6
-	vfnmadd231pd	%ymm14, %ymm13, %ymm10
-	vpermpd			$0x00, %ymm7, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm7
-	vfnmadd231pd	%ymm14, %ymm13, %ymm11
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-	vmovapd			32(%r14), %ymm12
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd			32(%r13), %ymm14
-	vpermpd			$0x55, %ymm4, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm4
-	vfnmadd231pd	%ymm14, %ymm13, %ymm8
-	vpermpd			$0x55, %ymm5, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm5
-	vfnmadd231pd	%ymm14, %ymm13, %ymm9
-	vpermpd			$0x55, %ymm6, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm6
-	vfnmadd231pd	%ymm14, %ymm13, %ymm10
-	vpermpd			$0x55, %ymm7, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm7
-	vfnmadd231pd	%ymm14, %ymm13, %ymm11
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-	vmovapd			64(%r14), %ymm12
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd			64(%r13), %ymm14
-	vpermpd			$0xaa, %ymm4, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm4
-	vfnmadd231pd	%ymm14, %ymm13, %ymm8
-	vpermpd			$0xaa, %ymm5, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm5
-	vfnmadd231pd	%ymm14, %ymm13, %ymm9
-	vpermpd			$0xaa, %ymm6, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm6
-	vfnmadd231pd	%ymm14, %ymm13, %ymm10
-	vpermpd			$0xaa, %ymm7, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm7
-	vfnmadd231pd	%ymm14, %ymm13, %ymm11
-
-	vmovapd			96(%r13), %ymm14
-	vpermpd			$0xff, %ymm4, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm8
-	vpermpd			$0xff, %ymm5, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm9
-	vpermpd			$0xff, %ymm6, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm10
-	vpermpd			$0xff, %ymm7, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm11
-
-
-
-	// factorize lower 8x4
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC05(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd			LC05(%rip), %xmm14 // 1.0
-#endif
-//	vmovddup		%xmm14, %xmm14
-
-	// first column
-//	vblendpd		$0x1, %ymm8, %ymm12, %ymm12
-	vmovapd			%ymm8, %ymm12
-	vdivsd			%xmm8, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 0(%r12)
-	vmulpd			%ymm8, %ymm13, %ymm8
-	vblendpd		$0x1, %ymm12, %ymm8, %ymm8
-
-	// second column
-	vpermpd			$0x00, %ymm9, %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vblendpd		$0x2, %ymm9, %ymm13, %ymm12
-
-	vpermilpd		$0x3, %xmm9, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 8(%r12)
-	vmulpd			%ymm9, %ymm13, %ymm9
-	vblendpd		$0x3, %ymm12, %ymm9, %ymm9
-
-	// third column
-	vpermpd			$0x00, %ymm10, %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vblendpd		$0x2, %ymm10, %ymm13, %ymm12
-
-	vpermpd			$0x55, %ymm10, %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vblendpd		$0x4, %ymm10, %ymm12, %ymm12
-
-	vpermpd			$0xaa, %ymm10, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 16(%r12)
-	vmulpd			%ymm10, %ymm13, %ymm10
-	vblendpd		$0x7, %ymm12, %ymm10, %ymm10
-
-	// fourth column
-	vpermpd			$0x00, %ymm11, %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-	vblendpd		$0x2, %ymm11, %ymm13, %ymm12
-
-	vpermpd			$0x55, %ymm11, %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-	vblendpd		$0x4, %ymm11, %ymm12, %ymm12
-
-	vpermpd			$0xaa, %ymm11, %ymm13
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-	vblendpd		$0x8, %ymm11, %ymm12, %ymm12
-	
-	vpermpd			$0xff, %ymm11, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-//	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 24(%r12)
-//	vmulpd			%ymm11, %ymm13, %ymm11
-	vblendpd		$0x7, %ymm12, %ymm11, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgetrf_r_12x4_lib4, .-inner_edge_dgetrf_r_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_12x4_lib4, @function
-inner_store_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_12x4_lib4; .scl 2; .type 32; .endef
-inner_store_12x4_lib4:
-#endif
-#endif
-	
-	vmovapd %ymm0,  0(%r10)
-	vmovapd %ymm1, 32(%r10)
-	vmovapd %ymm2, 64(%r10)
-	vmovapd %ymm3, 96(%r10)
-
-	vmovapd %ymm4,  0(%r10, %r11, 1)
-	vmovapd %ymm5, 32(%r10, %r11, 1)
-	vmovapd %ymm6, 64(%r10, %r11, 1)
-	vmovapd %ymm7, 96(%r10, %r11, 1)
-
-	vmovapd %ymm8,   0(%r10, %r11, 2)
-	vmovapd %ymm9,  32(%r10, %r11, 2)
-	vmovapd %ymm10, 64(%r10, %r11, 2)
-	vmovapd %ymm11, 96(%r10, %r11, 2)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_12x4_lib4, .-inner_store_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X12_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x12_lib4, @function
-inner_store_4x12_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x12_lib4; .scl 2; .type 32; .endef
-inner_store_4x12_lib4:
-#endif
-#endif
-	
-	vmovapd %ymm0,   0(%r10)
-	vmovapd %ymm1,  32(%r10)
-	vmovapd %ymm2,  64(%r10)
-	vmovapd %ymm3,  96(%r10)
-
-	vmovapd %ymm4, 128(%r10)
-	vmovapd %ymm5, 160(%r10)
-	vmovapd %ymm6, 192(%r10)
-	vmovapd %ymm7, 224(%r10)
-
-	vmovapd %ymm8, 256(%r10)
-	vmovapd %ymm9, 288(%r10)
-	vmovapd %ymm10, 320(%r10)
-	vmovapd %ymm11, 352(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x12_lib4, .-inner_store_4x12_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r14  <- dirty
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d50 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r14  <- dirty
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_12X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_12x4_vs_lib4, @function
-inner_store_12x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_12x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_12x4_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC04(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC04(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmovapd		%ymm0, 0(%r10)
-	vmovapd		%ymm4, 0(%r10, %r11, 1)
-	vmaskmovpd	%ymm8, %ymm15,  0(%r10, %r11, 2)
-	cmpl		$2, %r13d
-	jl			0f // end
-	vmovapd		%ymm1, 32(%r10)
-	vmovapd		%ymm5, 32(%r10, %r11, 1)
-	vmaskmovpd	%ymm9, %ymm15, 32(%r10, %r11, 2)
-	cmpl		$3, %r13d
-	jl			0f // end
-	vmovapd		%ymm2, 64(%r10)
-	vmovapd		%ymm6, 64(%r10, %r11, 1)
-	vmaskmovpd	%ymm10, %ymm15, 64(%r10, %r11, 2)
-	je			0f // end
-	vmovapd		%ymm3, 96(%r10)
-	vmovapd		%ymm7, 96(%r10, %r11, 1)
-	vmaskmovpd	%ymm11, %ymm15, 96(%r10, %r11, 2)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_12x4_vs_lib4, .-inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X12_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x12_vs_lib4, @function
-inner_store_4x12_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x12_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x12_vs_lib4; .scl 2; .type 32; .endef
-inner_store_4x12_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmaskmovpd	%ymm0, %ymm15,   0(%r10)
-	vmaskmovpd	%ymm1, %ymm15,  32(%r10)
-	vmaskmovpd	%ymm2, %ymm15,  64(%r10)
-	vmaskmovpd	%ymm3, %ymm15,  96(%r10)
-
-	vmaskmovpd	%ymm4, %ymm15, 128(%r10)
-	vmaskmovpd	%ymm5, %ymm15, 160(%r10)
-	vmaskmovpd	%ymm6, %ymm15, 192(%r10)
-	vmaskmovpd	%ymm7, %ymm15, 224(%r10)
-
-	vmaskmovpd	%ymm8, %ymm15, 256(%r10)
-	cmpl		$10, %r12d
-	jl			0f // end
-	vmaskmovpd	%ymm9, %ymm15, 288(%r10)
-	cmpl		$11, %r12d
-	jl			0f // end
-	vmaskmovpd	%ymm10, %ymm15, 320(%r10)
-	je			0f // end
-	vmaskmovpd	%ymm11, %ymm15, 352(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x12_vs_lib4, .-inner_store_4x12_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d50 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d90 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d90 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_12X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_12x4_lib4, @function
-inner_store_l_12x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_12x4_lib4; .scl 2; .type 32; .endef
-inner_store_l_12x4_lib4:
-#endif
-#endif
-	
-	vmovapd		%ymm0, 0(%r10)
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmovapd		%ymm1, 32(%r10)
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmovapd		%ymm2, 64(%r10)
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmovapd		%ymm3, 96(%r10)
-
-	vmovapd		%ymm4, 0(%r10, %r11, 1)
-	vmovapd		%ymm5, 32(%r10, %r11, 1)
-	vmovapd		%ymm6, 64(%r10, %r11, 1)
-	vmovapd		%ymm7, 96(%r10, %r11, 1)
-
-	vmovapd		%ymm8, 0(%r10, %r11, 2)
-	vmovapd		%ymm9, 32(%r10, %r11, 2)
-	vmovapd		%ymm10, 64(%r10, %r11, 2)
-	vmovapd		%ymm11, 96(%r10, %r11, 2)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_12x4_lib4, .-inner_store_l_12x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower n vs
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d50 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d90 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d90 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_12X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_12x4_vs_lib4, @function
-inner_store_l_12x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_12x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_l_12x4_vs_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r15 // D1 <- D0
-	addq	%r11, %r15 // D1 <- D0 + 4*sdd*sizeof(double)
-
-	movq	%r15, %r14 // D2 <- D1
-	addq	%r11, %r14 // D2 <- D1 + 4*sdd*sizeof(double)
-
-	vcvtsi2sd	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC04(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC04(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	cmpl		$2, %r13d
-	vmovapd		%ymm0, 0(%r10)
-	vmovapd		%ymm4, 0(%r15)
-	vmaskmovpd	%ymm8, %ymm15,  0(%r14)
-	jl			0f // end
-	cmpl		$3, %r13d
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmovapd		%ymm1, 32(%r10)
-	vmovapd		%ymm5, 32(%r15)
-	vmaskmovpd	%ymm9, %ymm15, 32(%r14)
-	jl			0f // end
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmovapd		%ymm2, 64(%r10)
-	vmovapd		%ymm6, 64(%r15)
-	vmaskmovpd	%ymm10, %ymm15, 64(%r14)
-	je			0f // end
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmovapd		%ymm3, 96(%r10)
-	vmovapd		%ymm7, 96(%r15)
-	vmaskmovpd	%ymm11, %ymm15, 96(%r14)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_12x4_vs_lib4, .-inner_store_l_12x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-//                                1      2              3          4        5          6             7          8        9          10
-// void kernel_dgemm_nt_12x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_12x4_lib4
-	.type kernel_dgemm_nt_12x4_lib4, @function
-kernel_dgemm_nt_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_12x4_lib4
-_kernel_dgemm_nt_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_12x4_lib4
-	.def kernel_dgemm_nt_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_12x4_lib4, .-kernel_dgemm_nt_12x4_lib4
-#endif
-
-
-
-
-
-//                                1      2              3          4          5        6             7          8
-// void kernel_dgemm_nt_4x12_lib4(int k, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x12_lib4
-	.type kernel_dgemm_nt_4x12_lib4, @function
-kernel_dgemm_nt_4x12_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x12_lib4
-_kernel_dgemm_nt_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x12_lib4
-	.def kernel_dgemm_nt_4x12_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x12_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // B
-	movq	ARG5, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG3, %r13 // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x12_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x12_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x12_lib4, .-kernel_dgemm_nt_4x12_lib4
-#endif
-
-
-
-
-
-//                                   rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32   rsp+40  rsp+48
-// void kernel_dgemm_nt_12x4_vs_lib4(int km, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_12x4_vs_lib4
-	.type kernel_dgemm_nt_12x4_vs_lib4, @function
-kernel_dgemm_nt_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_12x4_vs_lib4
-_kernel_dgemm_nt_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_12x4_vs_lib4
-	.def kernel_dgemm_nt_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_12x4_vs_lib4, .-kernel_dgemm_nt_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   1      2              3          4          5        6             7          8          9       10
-// void kernel_dgemm_nt_4x12_vs_lib4(int k, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x12_vs_lib4
-	.type kernel_dgemm_nt_4x12_vs_lib4, @function
-kernel_dgemm_nt_4x12_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x12_vs_lib4
-_kernel_dgemm_nt_4x12_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x12_vs_lib4
-	.def kernel_dgemm_nt_4x12_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x12_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // B
-	movq	ARG5, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG3, %r13 // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x12_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // km
-	movq	ARG10, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X12_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x12_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x12_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x12_vs_lib4, .-kernel_dgemm_nt_4x12_vs_lib4
-#endif
-
-
-
-
-
-//                                rdi    rsi            rdx        rcx      r8           r9         rsp+8    rsp+16        rsp+24     rsp+32   rsp+40     rsp+48
-// void kernel_dgemm_nn_12x4_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_12x4_lib4
-	.type kernel_dgemm_nn_12x4_lib4, @function
-kernel_dgemm_nn_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_12x4_lib4
-_kernel_dgemm_nn_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_12x4_lib4
-	.def kernel_dgemm_nn_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_12x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12 // C
-	movq	ARG10, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_12x4_lib4, .-kernel_dgemm_nn_12x4_lib4
-#endif
-
-
-
-
-
-//                                rdi    rsi            rdx        rcx          r8         r9       rsp+8         rsp+16     rsp+24
-// void kernel_dgemm_nn_4x12_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_4x12_lib4
-	.type kernel_dgemm_nn_4x12_lib4, @function
-kernel_dgemm_nn_4x12_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_4x12_lib4
-_kernel_dgemm_nn_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_4x12_lib4
-	.def kernel_dgemm_nn_4x12_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_4x12_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_4x12_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x12_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x12_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x12_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_4x12_lib4, .-kernel_dgemm_nn_4x12_lib4
-#endif
-
-
-
-
-
-//                                   rdi     rsi            rdx        rcx      r8         r9       rsp+8         rsp+16     rsp+24   rsp+32     rsp+40   rsp+48  rsp+56
-// void kernel_dgemm_nn_12x4_vs_lib4(int km, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_12x4_vs_lib4
-	.type kernel_dgemm_nn_12x4_vs_lib4, @function
-kernel_dgemm_nn_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_12x4_vs_lib4
-_kernel_dgemm_nn_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_12x4_vs_lib4
-	.def kernel_dgemm_nn_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	movq	ARG6, %r14 // sda
-	sall	$5, %r14d // 4*sda*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // C
-	movq	ARG9, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // store address D
-	movq	ARG11, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG12, %r12 // km 
-	movq	ARG13, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_12x4_vs_lib4, .-kernel_dgemm_nn_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32
-// void kernel_dsyrk_nt_l_12x4_lib4(int km, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_12x4_lib4
-	.type kernel_dsyrk_nt_l_12x4_lib4, @function
-kernel_dsyrk_nt_l_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_12x4_lib4
-_kernel_dsyrk_nt_l_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_12x4_lib4
-	.def kernel_dsyrk_nt_l_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_lib4
-#endif
-#endif
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_12x4_lib4, .-kernel_dsyrk_nt_l_12x4_lib4
-#endif
-
-
-
-
-
-//                                     rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32   rsp+40  rsp+48
-// void kernel_dsyrk_nt_l_12x4_vs_lib4(int km, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_12x4_vs_lib4
-	.type kernel_dsyrk_nt_l_12x4_vs_lib4, @function
-kernel_dsyrk_nt_l_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_12x4_vs_lib4
-_kernel_dsyrk_nt_l_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_12x4_vs_lib4
-	.def kernel_dsyrk_nt_l_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_vs_lib4
-#endif
-#endif
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_12x4_vs_lib4, .-kernel_dsyrk_nt_l_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx      r8           r9         rsp+8    rsp+16     rsp+24
-// void kernel_dtrmm_nn_rl_12x4_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_12x4_lib4
-	.type kernel_dtrmm_nn_rl_12x4_lib4, @function
-kernel_dtrmm_nn_rl_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_12x4_lib4
-_kernel_dtrmm_nn_rl_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_12x4_lib4
-	.def kernel_dtrmm_nn_rl_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_12x4_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_12x4_lib4, .-kernel_dtrmm_nn_rl_12x4_lib4
-#endif
-
-
-
-
-
-//                                      1      2              3          4        5            6          7        8          9        10      11
-// void kernel_dtrmm_nn_rl_12x4_vs_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_12x4_vs_lib4
-	.type kernel_dtrmm_nn_rl_12x4_vs_lib4, @function
-kernel_dtrmm_nn_rl_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_12x4_vs_lib4
-_kernel_dtrmm_nn_rl_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_12x4_vs_lib4
-	.def kernel_dtrmm_nn_rl_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_12x4_vs_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdb*sizeof(double)
-	movq	ARG10, %r12 // km
-	movq	ARG11, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_12x4_vs_lib4, .-kernel_dtrmm_nn_rl_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32
-// void kernel_dtrmm_nt_ru_12x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_12x4_lib4
-	.type kernel_dtrmm_nt_ru_12x4_lib4, @function
-kernel_dtrmm_nt_ru_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_12x4_lib4
-_kernel_dtrmm_nt_ru_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_12x4_lib4
-	.def kernel_dtrmm_nt_ru_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d //k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	addq	$128, %r13 // B+4*bs
-
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-// call inner blend
-
-#if MACRO_LEVEL>=1
-//	INNER_BLEND_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-//	call inner_blend_12x4_lib4
-#elif defined(OS_MAC)
-//	callq _inner_blend_12x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG3, %r10 // A
-	movq	ARG4, %r11 // sda
-	sall	$5, %r11d // 4*sda*sizeof(double)
-	movq	ARG5, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_12x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_12x4_lib4, .-kernel_dtrmm_nt_ru_12x4_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32
-// void kernel_dtrmm_nt_ru_12x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_12x4_vs_lib4
-	.type kernel_dtrmm_nt_ru_12x4_vs_lib4, @function
-kernel_dtrmm_nt_ru_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_12x4_vs_lib4
-_kernel_dtrmm_nt_ru_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_12x4_vs_lib4
-	.def kernel_dtrmm_nt_ru_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d //k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	addq	$128, %r13 // B+4*bs
-
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-// call inner blend
-
-#if MACRO_LEVEL>=1
-//	INNER_BLEND_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-//	call inner_blend_12x4_lib4
-#elif defined(OS_MAC)
-//	callq _inner_blend_12x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG3, %r10 // A
-	movq	ARG4, %r11 // sda
-	sall	$5, %r11d // 4*sda*sizeof(double)
-	movq	ARG5, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_12x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_12x4_vs_lib4, .-kernel_dtrmm_nt_ru_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24
-// void kernel_dpotrf_nt_l_12x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_12x4_lib4
-	.type kernel_dpotrf_nt_l_12x4_lib4, @function
-kernel_dpotrf_nt_l_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_12x4_lib4
-_kernel_dpotrf_nt_l_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_12x4_lib4
-	.def kernel_dpotrf_nt_l_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_12x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_12x4_lib4, .-kernel_dpotrf_nt_l_12x4_lib4
-#endif
-
-
-
-
-
-//                                      rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24              rsp+32  rsp+40 
-// void kernel_dpotrf_nt_l_12x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_12x4_vs_lib4
-	.type kernel_dpotrf_nt_l_12x4_vs_lib4, @function
-kernel_dpotrf_nt_l_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_12x4_vs_lib4
-_kernel_dpotrf_nt_l_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_12x4_vs_lib4
-	.def kernel_dpotrf_nt_l_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_12x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG10, %r12 // km 
-	movq	ARG11, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_12x4_vs_lib4, .-kernel_dpotrf_nt_l_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                         rdi     rsi         rdx       rcx         r8      r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56
-// void kernel_dsyrk_dpotrf_nt_l_12x4_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_12x4_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_12x4_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_12x4_lib4
-_kernel_dsyrk_dpotrf_nt_l_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_12x4_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_12x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_12x4_lib4, .-kernel_dsyrk_dpotrf_nt_l_12x4_lib4
-#endif
-
-
-
-
-
-//                                            rdi     rsi         rdx       rcx         r8      r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56              rsp+64  rsp+72
-// void kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4
-_kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movq	ARG15, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_12x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG14, %r12 // km 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4, .-kernel_dsyrk_dpotrf_nt_l_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                          rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32              rsp+40  rsp+48 
-// void kernel_dtrsm_nt_rl_inv_12x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_12x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_12x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_12x4_vs_lib4
-_kernel_dtrsm_nt_rl_inv_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_12x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_12x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_12x4_vs_lib4, .-kernel_dtrsm_nt_rl_inv_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                          1      2          3          4        5          6          7          8        9                   10      11
-// void kernel_dtrsm_nt_rl_inv_4x12_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int sde, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x12_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x12_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x12_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x12_vs_lib4
-_kernel_dtrsm_nt_rl_inv_4x12_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x12_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x12_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x12_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // B
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 32*sdb
-	movq	ARG2, %r13 // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_4x12_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG9, %r12  // inv_diag_E 
-	movq	ARG11, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X12_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x12_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x12_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG6, %r10 // store address D
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X12_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x12_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x12_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x12_vs_lib4, .-kernel_dtrsm_nt_rl_inv_4x12_vs_lib4
-#endif
-
-
-
-
-
-//                                                1       2           3         4           5       6           7         8           9          10       11         12       13         14                  15      16
-// void kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-	movq	ARG16, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_12x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG15, %r12 // km 
-	movq	ARG16, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                                1       2           3           4         5       6           7           8         9          10         11         12       13                 14       15
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4(int kp, double *Ap, double *Bp, int sdbp, int km, double *Am, double *Bm, int sdbm, double *C, double *D, double *E, int sde, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG3, %r11  // Bp
-	movq	ARG4, %r12 // sdbp
-	sall	$5, %r12d   // 32*sdbp
-	movq	ARG2, %r13  // Ap
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10 // km
-	movq	ARG7, %r11 // Bm
-	movq	ARG8, %r12 // sdbm
-	sall	$5, %r12d // 32*sdbm
-	movq	ARG6, %r13 // Am
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_4x12_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG11, %r10  // E 
-	movq	ARG12, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG13, %r12  // inv_diag_E 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X12_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x12_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x12_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // store address D
-	movq	ARG14, %r11 // km 
-	movq	ARG15, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X12_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x12_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x12_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x12_vs_lib4
-#endif
-
-
-
-
-
-//                                       rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32  
-// void kernel_dtrsm_nt_rl_inv_12x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_12x4_lib4
-	.type kernel_dtrsm_nt_rl_inv_12x4_lib4, @function
-kernel_dtrsm_nt_rl_inv_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_12x4_lib4
-_kernel_dtrsm_nt_rl_inv_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_12x4_lib4
-	.def kernel_dtrsm_nt_rl_inv_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_12x4_lib4, .-kernel_dtrsm_nt_rl_inv_12x4_lib4
-#endif
-
-
-
-
-
-//                                       1      2          3          4        5          6          7          8        9
-// void kernel_dtrsm_nt_rl_inv_4x12_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int sde, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x12_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x12_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x12_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x12_lib4
-_kernel_dtrsm_nt_rl_inv_4x12_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x12_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x12_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x12_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG3, %r11
-	movq	ARG4, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG2, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_4x12_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG9, %r12  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x12_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG6, %r10 // store address D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x12_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x12_lib4, .-kernel_dtrsm_nt_rl_inv_4x12_lib4
-#endif
-
-
-
-
-
-//                                             rdi     rsi         rdx       rcx         r8      r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56     rsp+64
-// void kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_12x4_lib4
-#endif
-
-
-
-
-
-//                                       rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24
-// void kernel_dtrsm_nt_rl_one_12x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_one_12x4_lib4
-	.type kernel_dtrsm_nt_rl_one_12x4_lib4, @function
-kernel_dtrsm_nt_rl_one_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_one_12x4_lib4
-_kernel_dtrsm_nt_rl_one_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_one_12x4_lib4
-	.def kernel_dtrsm_nt_rl_one_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_one_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_ONE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_one_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_one_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_one_12x4_lib4, .-kernel_dtrsm_nt_rl_one_12x4_lib4
-#endif
-
-
-
-
-
-//                                          rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32  rsp+40
-// void kernel_dtrsm_nt_rl_one_12x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_one_12x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_one_12x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_one_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_one_12x4_vs_lib4
-_kernel_dtrsm_nt_rl_one_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_one_12x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_one_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_one_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_ONE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_one_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_one_12x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG10, %r12 // km 
-	movq	ARG11, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_one_12x4_vs_lib4, .-kernel_dtrsm_nt_rl_one_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                       rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32 
-// void kernel_dtrsm_nt_ru_inv_12x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_ru_inv_12x4_lib4
-	.type kernel_dtrsm_nt_ru_inv_12x4_lib4, @function
-kernel_dtrsm_nt_ru_inv_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_ru_inv_12x4_lib4
-_kernel_dtrsm_nt_ru_inv_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_ru_inv_12x4_lib4
-	.def kernel_dtrsm_nt_ru_inv_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_ru_inv_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUT_INV_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rut_inv_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rut_inv_12x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_ru_inv_12x4_lib4, .-kernel_dtrsm_nt_ru_inv_12x4_lib4
-#endif
-
-
-
-
-
-//                                          rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32              rsp+40  rsp+48
-// void kernel_dtrsm_nt_ru_inv_12x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_ru_inv_12x4_vs_lib4
-	.type kernel_dtrsm_nt_ru_inv_12x4_vs_lib4, @function
-kernel_dtrsm_nt_ru_inv_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_ru_inv_12x4_vs_lib4
-_kernel_dtrsm_nt_ru_inv_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_ru_inv_12x4_vs_lib4
-	.def kernel_dtrsm_nt_ru_inv_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_ru_inv_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUT_INV_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rut_inv_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rut_inv_12x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_ru_inv_12x4_vs_lib4, .-kernel_dtrsm_nt_ru_inv_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                       edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40
-// void kernel_dtrsm_nn_ru_inv_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ru_inv_12x4_lib4
-	.type kernel_dtrsm_nn_ru_inv_12x4_lib4, @function
-kernel_dtrsm_nn_ru_inv_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ru_inv_12x4_lib4
-_kernel_dtrsm_nn_ru_inv_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ru_inv_12x4_lib4
-	.def kernel_dtrsm_nn_ru_inv_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ru_inv_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // inv_diag_E
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUN_INV_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_run_inv_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_run_inv_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ru_inv_12x4_lib4, .-kernel_dtrsm_nn_ru_inv_12x4_lib4
-#endif
-
-
-
-
-
-//                                          edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40              rsp+48  rsp+56
-// void kernel_dtrsm_nn_ru_inv_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ru_inv_12x4_vs_lib4
-	.type kernel_dtrsm_nn_ru_inv_12x4_vs_lib4, @function
-kernel_dtrsm_nn_ru_inv_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ru_inv_12x4_vs_lib4
-_kernel_dtrsm_nn_ru_inv_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ru_inv_12x4_vs_lib4
-	.def kernel_dtrsm_nn_ru_inv_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ru_inv_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // inv_diag_E
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUN_INV_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_run_inv_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_run_inv_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG12, %r12 // km
-	movq	ARG13, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ru_inv_12x4_vs_lib4, .-kernel_dtrsm_nn_ru_inv_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                       edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40
-// void kernel_dtrsm_nn_ll_one_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ll_one_12x4_lib4
-	.type kernel_dtrsm_nn_ll_one_12x4_lib4, @function
-kernel_dtrsm_nn_ll_one_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ll_one_12x4_lib4
-_kernel_dtrsm_nn_ll_one_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ll_one_12x4_lib4
-	.def kernel_dtrsm_nn_ll_one_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ll_one_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LLN_ONE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lln_one_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lln_one_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ll_one_12x4_lib4, .-kernel_dtrsm_nn_ll_one_12x4_lib4
-#endif
-
-
-
-
-
-//                                          edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40   rsp+48  tsp+56
-// void kernel_dtrsm_nn_ll_one_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ll_one_12x4_vs_lib4
-	.type kernel_dtrsm_nn_ll_one_12x4_vs_lib4, @function
-kernel_dtrsm_nn_ll_one_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ll_one_12x4_vs_lib4
-_kernel_dtrsm_nn_ll_one_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ll_one_12x4_vs_lib4
-	.def kernel_dtrsm_nn_ll_one_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ll_one_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LLN_ONE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lln_one_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lln_one_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG12, %r12 // km
-	movq	ARG13, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ll_one_12x4_vs_lib4, .-kernel_dtrsm_nn_ll_one_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                       edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40   rsp+48
-// void kernel_dtrsm_nn_lu_inv_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_lu_inv_12x4_lib4
-	.type kernel_dtrsm_nn_lu_inv_12x4_lib4, @function
-kernel_dtrsm_nn_lu_inv_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_lu_inv_12x4_lib4
-_kernel_dtrsm_nn_lu_inv_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_lu_inv_12x4_lib4
-	.def kernel_dtrsm_nn_lu_inv_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_lu_inv_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG12, %r12  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LUN_INV_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lun_inv_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lun_inv_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_lu_inv_12x4_lib4, .-kernel_dtrsm_nn_lu_inv_12x4_lib4
-#endif
-
-
-
-
-
-//                                          edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40   rsp+48               rsp+56  rsp+64
-// void kernel_dtrsm_nn_lu_inv_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E), int km, int kn;
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_lu_inv_12x4_vs_lib4
-	.type kernel_dtrsm_nn_lu_inv_12x4_vs_lib4, @function
-kernel_dtrsm_nn_lu_inv_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_lu_inv_12x4_vs_lib4
-_kernel_dtrsm_nn_lu_inv_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_lu_inv_12x4_vs_lib4
-	.def kernel_dtrsm_nn_lu_inv_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_lu_inv_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG12, %r12  // inv_diag_E 
-	movq	ARG13, %r13  // km 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LUN_INV_12X4_vs_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lun_inv_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lun_inv_12x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG13, %r12  // km 
-	movq	ARG14, %r13  // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_lu_inv_12x4_vs_lib4, .-kernel_dtrsm_nn_lu_inv_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   edi    rsi        rdx      rcx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32
-// void kernel_dgetrf_nn_l_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_l_12x4_lib4
-	.type kernel_dgetrf_nn_l_12x4_lib4, @function
-kernel_dgetrf_nn_l_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_l_12x4_lib4
-_kernel_dgetrf_nn_l_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_l_12x4_lib4
-	.def kernel_dgetrf_nn_l_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_l_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG10, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_L_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_l_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_l_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_l_12x4_lib4, .-kernel_dgetrf_nn_l_12x4_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx      rcx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32              rsp+40  rsp+48
-// void kernel_dgetrf_nn_l_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_l_12x4_vs_lib4
-	.type kernel_dgetrf_nn_l_12x4_vs_lib4, @function
-kernel_dgetrf_nn_l_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_l_12x4_vs_lib4
-_kernel_dgetrf_nn_l_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_l_12x4_vs_lib4
-	.def kernel_dgetrf_nn_l_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_l_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG10, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_L_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_l_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_l_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG11, %r12  // km
-	movq	ARG12, %r13  // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_l_12x4_vs_lib4, .-kernel_dgetrf_nn_l_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   edi    rsi        rdx      rcx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32
-// void kernel_dgetrf_nn_m_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_m_12x4_lib4
-	.type kernel_dgetrf_nn_m_12x4_lib4, @function
-kernel_dgetrf_nn_m_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_m_12x4_lib4
-_kernel_dgetrf_nn_m_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_m_12x4_lib4
-	.def kernel_dgetrf_nn_m_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_m_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG8, %r10 // D
-	subq	$128, %r10 // E
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_M_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_m_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_m_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_m_12x4_lib4, .-kernel_dgetrf_nn_m_12x4_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx      rcx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32              rsp+40  rsp+48
-// void kernel_dgetrf_nn_m_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_m_12x4_vs_lib4
-	.type kernel_dgetrf_nn_m_12x4_vs_lib4, @function
-kernel_dgetrf_nn_m_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_m_12x4_vs_lib4
-_kernel_dgetrf_nn_m_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_m_12x4_vs_lib4
-	.def kernel_dgetrf_nn_m_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_m_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG8, %r10 // D
-	subq	$128, %r10 // E
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_M_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_m_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_m_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG11, %r12  // km
-	movq	ARG12, %r13  // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_m_12x4_vs_lib4, .-kernel_dgetrf_nn_m_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   edi    rsi        rdx      rcx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32
-// void kernel_dgetrf_nn_r_12x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_r_12x4_lib4
-	.type kernel_dgetrf_nn_r_12x4_lib4, @function
-kernel_dgetrf_nn_r_12x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_r_12x4_lib4
-_kernel_dgetrf_nn_r_12x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_r_12x4_lib4
-	.def kernel_dgetrf_nn_r_12x4_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_r_12x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG8, %r10 // D
-	subq	$256, %r10 // E
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_R_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_r_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_r_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_r_12x4_lib4, .-kernel_dgetrf_nn_r_12x4_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx      rcx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32              rsp+40  rsp+48
-// void kernel_dgetrf_nn_r_12x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_r_12x4_vs_lib4
-	.type kernel_dgetrf_nn_r_12x4_vs_lib4, @function
-kernel_dgetrf_nn_r_12x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_r_12x4_vs_lib4
-_kernel_dgetrf_nn_r_12x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_r_12x4_vs_lib4
-	.def kernel_dgetrf_nn_r_12x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_r_12x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_12x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_12x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG8, %r10 // D
-	subq	$256, %r10 // E
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_R_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_r_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_r_12x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG11, %r12  // km
-	movq	ARG12, %r13  // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_12X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_12x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_12x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_r_12x4_vs_lib4, .-kernel_dgetrf_nn_r_12x4_vs_lib4
-#endif
-
-
-
-
-
-//                               1         2           3         4           5          6           7
-// void kernel_dlarfb12_r_4_lib4(int kmax, double *pV, int sdd, double *pT, double *pD, double *pK, int km);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlarfb12_r_4_lib4
-	.type kernel_dlarfb12_r_4_lib4, @function
-kernel_dlarfb12_r_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlarfb12_r_4_lib4
-_kernel_dlarfb12_r_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlarfb12_r_4_lib4
-	.def kernel_dlarfb12_r_4_lib4; .scl 2; .type 32; .endef
-kernel_dlarfb12_r_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-//	vxorpd	%ymm0, %ymm0, %ymm0
-//	vmovapd	%ymm0, %ymm1
-//	vmovapd	%ymm0, %ymm2
-//	vmovapd	%ymm0, %ymm3
-//	vmovapd	%ymm0, %ymm4
-//	vmovapd	%ymm0, %ymm5
-//	vmovapd	%ymm0, %ymm6
-//	vmovapd	%ymm0, %ymm7
-//	vmovapd	%ymm0, %ymm8
-//	vmovapd	%ymm0, %ymm9
-//	vmovapd	%ymm0, %ymm10
-//	vmovapd	%ymm0, %ymm11
-	
-	movq	ARG1, %r10 // k
-	movq	ARG5, %r11 // D
-	movq	ARG2, %r12 // V
-	movq	ARG3, %r13 // sdd
-	sall	$5, %r13d
-
-	//
-	vmovapd			0(%r11), %ymm12
-	vmovapd			%ymm12, %ymm0
-	//
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vmovapd			%ymm12, %ymm1
-	//
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vmovapd			%ymm12, %ymm2
-	//
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vmovapd			%ymm12, %ymm3
-	//
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-	//
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vmovapd			%ymm12, %ymm4
-	//
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	48(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	56(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	32(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vmovapd			%ymm12, %ymm5
-	//
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	88(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	64(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	72(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vmovapd			%ymm12, %ymm6
-	//
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	96(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	104(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	112(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vmovapd			%ymm12, %ymm7
-	//
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-	//
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	8(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	16(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	24(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	0(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	8(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	16(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	24(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vmovapd			%ymm12, %ymm8
-	//
-	vmovapd			32(%r11), %ymm12
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	40(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	48(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	56(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	32(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	40(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	48(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	56(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	32(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vmovapd			%ymm12, %ymm9
-	//
-	vmovapd			64(%r11), %ymm12
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	80(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	88(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	64(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	72(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	80(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	88(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	64(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	72(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vmovapd			%ymm12, %ymm10
-	//
-	vmovapd			96(%r11), %ymm12
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	120(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	96(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	104(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	112(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	120(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	96(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	104(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	112(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vmovapd			%ymm12, %ymm11
-	//
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	movq	%r11, %r14
-	movq	%r12, %r11
-	movq	%r13, %r12
-	movq	%r14, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_TRAN_12X4_LIB4
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-	INNER_TRAN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_12x4_lib4
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-	call inner_tran_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_12x4_lib4
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-	callq _inner_tran_12x4_lib4
-#endif
-#endif
-
-	movq	ARG4, %r11 // T
-	movq	$384, %r12 // sdt !!!!!!!!!!!!!!!!!!!!!!!!!
-
-	//
-	vbroadcastsd	376(%r11, %r12, 2), %ymm13
-	vmulpd			%ymm11, %ymm13, %ymm11
-	//
-	vbroadcastsd	368(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm10, %ymm13, %ymm11
-	vbroadcastsd	336(%r11, %r12, 2), %ymm13
-	vmulpd			%ymm10, %ymm13, %ymm10
-	//
-	vbroadcastsd	360(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm9, %ymm13, %ymm11
-	vbroadcastsd	328(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm9, %ymm13, %ymm10
-	vbroadcastsd	296(%r11, %r12, 2), %ymm13
-	vmulpd			%ymm9, %ymm13, %ymm9
-	//
-	vbroadcastsd	352(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm8, %ymm13, %ymm11
-	vbroadcastsd	320(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm8, %ymm13, %ymm10
-	vbroadcastsd	288(%r11, %r12, 2), %ymm13
-	vfmadd231pd		%ymm8, %ymm13, %ymm9
-	vbroadcastsd	256(%r11, %r12, 2), %ymm13
-	vmulpd			%ymm8, %ymm13, %ymm8
-	//
-	vbroadcastsd	376(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm7, %ymm13, %ymm11
-	vbroadcastsd	344(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm7, %ymm13, %ymm10
-	vbroadcastsd	312(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm7, %ymm13, %ymm9
-	vbroadcastsd	280(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm7, %ymm13, %ymm8
-	vbroadcastsd	248(%r11, %r12, 1), %ymm13
-	vmulpd			%ymm7, %ymm13, %ymm7
-	//
-	vbroadcastsd	368(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm6, %ymm13, %ymm11
-	vbroadcastsd	336(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm6, %ymm13, %ymm10
-	vbroadcastsd	304(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm6, %ymm13, %ymm9
-	vbroadcastsd	272(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm6, %ymm13, %ymm8
-	vbroadcastsd	240(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm6, %ymm13, %ymm7
-	vbroadcastsd	208(%r11, %r12, 1), %ymm13
-	vmulpd			%ymm6, %ymm13, %ymm6
-	//
-	vbroadcastsd	360(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm5, %ymm13, %ymm11
-	vbroadcastsd	328(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm5, %ymm13, %ymm10
-	vbroadcastsd	296(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm5, %ymm13, %ymm9
-	vbroadcastsd	264(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm5, %ymm13, %ymm8
-	vbroadcastsd	232(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm5, %ymm13, %ymm7
-	vbroadcastsd	200(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm5, %ymm13, %ymm6
-	vbroadcastsd	168(%r11, %r12, 1), %ymm13
-	vmulpd			%ymm5, %ymm13, %ymm5
-	//
-	vbroadcastsd	352(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm4, %ymm13, %ymm11
-	vbroadcastsd	320(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm4, %ymm13, %ymm10
-	vbroadcastsd	288(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm4, %ymm13, %ymm9
-	vbroadcastsd	256(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm4, %ymm13, %ymm8
-	vbroadcastsd	224(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm4, %ymm13, %ymm7
-	vbroadcastsd	192(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm4, %ymm13, %ymm6
-	vbroadcastsd	160(%r11, %r12, 1), %ymm13
-	vfmadd231pd		%ymm4, %ymm13, %ymm5
-	vbroadcastsd	128(%r11, %r12, 1), %ymm13
-	vmulpd			%ymm4, %ymm13, %ymm4
-	//
-	vbroadcastsd	376(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm11
-	vbroadcastsd	344(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm10
-	vbroadcastsd	312(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm9
-	vbroadcastsd	280(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm8
-	vbroadcastsd	248(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm7
-	vbroadcastsd	216(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm6
-	vbroadcastsd	184(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm5
-	vbroadcastsd	152(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm4
-	vbroadcastsd	120(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	//
-	vbroadcastsd	368(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm11
-	vbroadcastsd	336(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm10
-	vbroadcastsd	304(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm9
-	vbroadcastsd	272(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm8
-	vbroadcastsd	240(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm7
-	vbroadcastsd	208(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm6
-	vbroadcastsd	176(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm5
-	vbroadcastsd	144(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm4
-	vbroadcastsd	112(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm3
-	vbroadcastsd	80(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	//
-	vbroadcastsd	360(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm11
-	vbroadcastsd	328(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm10
-	vbroadcastsd	296(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm9
-	vbroadcastsd	264(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm8
-	vbroadcastsd	232(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm7
-	vbroadcastsd	200(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm6
-	vbroadcastsd	168(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm5
-	vbroadcastsd	136(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm4
-	vbroadcastsd	104(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm3
-	vbroadcastsd	72(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm2
-	vbroadcastsd	40(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	//
-	vbroadcastsd	352(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm11
-	vbroadcastsd	320(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm10
-	vbroadcastsd	288(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm9
-	vbroadcastsd	256(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm8
-	vbroadcastsd	224(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm7
-	vbroadcastsd	192(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm6
-	vbroadcastsd	160(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm5
-	vbroadcastsd	128(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm4
-	vbroadcastsd	96(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm3
-	vbroadcastsd	64(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm2
-	vbroadcastsd	32(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm1
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-
-	movq	ARG6, %r10 // K
-	movq	ARG7, %r11 // km
-
-	cmpl	$4, %r11d
-	jge		0f
-
-	vcvtsi2sd	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vxorpd		%ymm14, %ymm14, %ymm14
-	vblendvpd	%ymm15, %ymm11, %ymm14, %ymm11
-	vblendvpd	%ymm15, %ymm10, %ymm14, %ymm10
-	vblendvpd	%ymm15, %ymm9, %ymm14, %ymm9
-	vblendvpd	%ymm15, %ymm8, %ymm14, %ymm8
-	vblendvpd	%ymm15, %ymm7, %ymm14, %ymm7
-	vblendvpd	%ymm15, %ymm6, %ymm14, %ymm6
-	vblendvpd	%ymm15, %ymm5, %ymm14, %ymm5
-	vblendvpd	%ymm15, %ymm4, %ymm14, %ymm4
-	vblendvpd	%ymm15, %ymm3, %ymm14, %ymm3
-	vblendvpd	%ymm15, %ymm2, %ymm14, %ymm2
-	vblendvpd	%ymm15, %ymm1, %ymm14, %ymm1
-	vblendvpd	%ymm15, %ymm0, %ymm14, %ymm0
-
-0:
-	vmovapd			%ymm11, 352(%r10)
-	vmovapd			%ymm10, 320(%r10)
-	vmovapd			%ymm9, 288(%r10)
-	vmovapd			%ymm8, 256(%r10)
-	vmovapd			%ymm7, 224(%r10)
-	vmovapd			%ymm6, 192(%r10)
-	vmovapd			%ymm5, 160(%r10)
-	vmovapd			%ymm4, 128(%r10)
-	vmovapd			%ymm3, 96(%r10)
-	vmovapd			%ymm2, 64(%r10)
-	vmovapd			%ymm1, 32(%r10)
-	vmovapd			%ymm0, 0(%r10)
-
-	movq	ARG1, %r10 // n
-	movq	ARG6, %r11 // K
-	movq	ARG2, %r12 // V
-	movq	ARG3, %r13 // sdd
-	sall	$5, %r13d
-	movq	ARG5, %r14 // D
-
-	// load block from C
-	vmovapd	0(%r14), %ymm0
-	vmovapd	32(%r14), %ymm1
-	vmovapd	64(%r14), %ymm2
-	vmovapd	96(%r14), %ymm3
-	vmovapd	128(%r14), %ymm4
-	vmovapd	160(%r14), %ymm5
-	vmovapd	192(%r14), %ymm6
-	vmovapd	224(%r14), %ymm7
-	vmovapd	256(%r14), %ymm8
-	vmovapd	288(%r14), %ymm9
-	vmovapd	320(%r14), %ymm10
-	vmovapd	352(%r14), %ymm11
-
-	// 0
-	vmovapd			0(%r11), %ymm12
-	vaddpd			%ymm12, %ymm0, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 1
-	vmovapd			32(%r11), %ymm12
-	vaddpd			%ymm12, %ymm1, %ymm1
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	136(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	168(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	200(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	232(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	264(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	296(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	328(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 2
-	vmovapd			64(%r11), %ymm12
-	vaddpd			%ymm12, %ymm2, %ymm2
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	144(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	176(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	208(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	240(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	272(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	304(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	336(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	368(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 3
-	vmovapd			96(%r11), %ymm12
-	vaddpd			%ymm12, %ymm3, %ymm3
-	vbroadcastsd	152(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	184(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	216(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	248(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	280(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	312(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	344(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	376(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 4
-	vmovapd			128(%r11), %ymm12
-	vaddpd			%ymm12, %ymm4, %ymm4
-	vbroadcastsd	160(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	256(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	288(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 5
-	vmovapd			160(%r11), %ymm12
-	vaddpd			%ymm12, %ymm5, %ymm5
-	vbroadcastsd	200(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	232(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	264(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	296(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	328(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 6
-	vmovapd			192(%r11), %ymm12
-	vaddpd			%ymm12, %ymm6, %ymm6
-	vbroadcastsd	240(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-	vbroadcastsd	272(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	304(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	336(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	368(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 7
-	vmovapd			224(%r11), %ymm12
-	vaddpd			%ymm12, %ymm7, %ymm7
-	vbroadcastsd	280(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm8
-	vbroadcastsd	312(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	344(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	376(%r12, %r13, 1), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 8
-	vmovapd			256(%r11), %ymm12
-	vaddpd			%ymm12, %ymm8, %ymm8
-	vbroadcastsd	288(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm9
-	vbroadcastsd	320(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	352(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 9
-	vmovapd			288(%r11), %ymm12
-	vaddpd			%ymm12, %ymm9, %ymm9
-	vbroadcastsd	328(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm10
-	vbroadcastsd	360(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 10
-	vmovapd			320(%r11), %ymm12
-	vaddpd			%ymm12, %ymm10, %ymm10
-	vbroadcastsd	368(%r12, %r13, 2), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm11
-	// 11
-	vmovapd			352(%r11), %ymm12
-	vaddpd			%ymm12, %ymm11, %ymm11
-
-	// store block to C
-	vmovapd	%ymm0, 0(%r14)
-	vmovapd	%ymm1, 32(%r14)
-	vmovapd	%ymm2, 64(%r14)
-	vmovapd	%ymm3, 96(%r14)
-	vmovapd	%ymm4, 128(%r14)
-	vmovapd	%ymm5, 160(%r14)
-	vmovapd	%ymm6, 192(%r14)
-	vmovapd	%ymm7, 224(%r14)
-	vmovapd	%ymm8, 256(%r14)
-	vmovapd	%ymm9, 288(%r14)
-	vmovapd	%ymm10, 320(%r14)
-	vmovapd	%ymm11, 352(%r14)
-
-	subl	$12, %r10d
-	addq	$384, %r12
-	addq	$384, %r14
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEBP_ADD_NN_4X12_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgebp_add_nn_4x12_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgebp_add_nn_4x12_lib4
-#endif
-#endif
-
-100:
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlarfb12_r_4_lib4, .-kernel_dlarfb12_r_4_lib4
-#endif
-
-
-
-
-
-	//                             1         2           3           4           5
-// void kernel_dlarfb4_r_12_lib4(int kmax, double *pV, double *pT, double *pD, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlarfb4_r_12_lib4
-	.type kernel_dlarfb4_r_12_lib4, @function
-kernel_dlarfb4_r_12_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlarfb4_r_12_lib4
-_kernel_dlarfb4_r_12_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlarfb4_r_12_lib4
-	.def kernel_dlarfb4_r_12_lib4; .scl 2; .type 32; .endef
-kernel_dlarfb4_r_12_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-//	vxorpd	%ymm0, %ymm0, %ymm0
-//	vmovapd	%ymm0, %ymm1
-//	vmovapd	%ymm0, %ymm2
-//	vmovapd	%ymm0, %ymm3
-//	vmovapd	%ymm0, %ymm4
-//	vmovapd	%ymm0, %ymm5
-//	vmovapd	%ymm0, %ymm6
-//	vmovapd	%ymm0, %ymm7
-//	vmovapd	%ymm0, %ymm8
-//	vmovapd	%ymm0, %ymm9
-//	vmovapd	%ymm0, %ymm10
-//	vmovapd	%ymm0, %ymm11
-	
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // D
-	movq	ARG5, %r12 // sdd
-	sall	$5, %r12d
-	movq	ARG2, %r13 // V
-
-	//
-	vmovapd			0(%r11), %ymm0
-	vmovapd			0(%r11, %r12, 1), %ymm4
-	vmovapd			0(%r11, %r12, 2), %ymm8
-	//
-	vmovapd			32(%r11), %ymm1
-	vmovapd			32(%r11, %r12, 1), %ymm5
-	vmovapd			32(%r11, %r12, 2), %ymm9
-	vbroadcastsd	32(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm1, %ymm0
-	vfmadd231pd		%ymm13, %ymm5, %ymm4
-	vfmadd231pd		%ymm13, %ymm9, %ymm8
-	//
-	vmovapd			64(%r11), %ymm2
-	vmovapd			64(%r11, %r12, 1), %ymm6
-	vmovapd			64(%r11, %r12, 2), %ymm10
-	vbroadcastsd	64(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm2, %ymm0
-	vfmadd231pd		%ymm13, %ymm6, %ymm4
-	vfmadd231pd		%ymm13, %ymm10, %ymm8
-	vbroadcastsd	72(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm2, %ymm1
-	vfmadd231pd		%ymm13, %ymm6, %ymm5
-	vfmadd231pd		%ymm13, %ymm10, %ymm9
-	//
-	vmovapd			96(%r11), %ymm3
-	vmovapd			96(%r11, %r12, 1), %ymm7
-	vmovapd			96(%r11, %r12, 2), %ymm11
-	vbroadcastsd	96(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm3, %ymm0
-	vfmadd231pd		%ymm13, %ymm7, %ymm4
-	vfmadd231pd		%ymm13, %ymm11, %ymm8
-	vbroadcastsd	104(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm3, %ymm1
-	vfmadd231pd		%ymm13, %ymm7, %ymm5
-	vfmadd231pd		%ymm13, %ymm11, %ymm9
-	vbroadcastsd	112(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm3, %ymm2
-	vfmadd231pd		%ymm13, %ymm7, %ymm6
-	vfmadd231pd		%ymm13, %ymm11, %ymm10
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_12x4_lib4
-#endif
-#endif
-
-	movq	ARG3, %r10 // T
-
-	//
-	vbroadcastsd	120(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-	vmulpd			%ymm11, %ymm12, %ymm11
-	//
-	vbroadcastsd	112(%r10), %ymm12
-	vfmadd231pd		%ymm2, %ymm12, %ymm3
-	vfmadd231pd		%ymm6, %ymm12, %ymm7
-	vfmadd231pd		%ymm10, %ymm12, %ymm11
-	vbroadcastsd	80(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-	vmulpd			%ymm10, %ymm12, %ymm10
-	//
-	vbroadcastsd	104(%r10), %ymm12
-	vfmadd231pd		%ymm1, %ymm12, %ymm3
-	vfmadd231pd		%ymm5, %ymm12, %ymm7
-	vfmadd231pd		%ymm9, %ymm12, %ymm11
-	vbroadcastsd	72(%r10), %ymm12
-	vfmadd231pd		%ymm1, %ymm12, %ymm2
-	vfmadd231pd		%ymm5, %ymm12, %ymm6
-	vfmadd231pd		%ymm9, %ymm12, %ymm10
-	vbroadcastsd	40(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-	vmulpd			%ymm9, %ymm12, %ymm9
-	//
-	vbroadcastsd	96(%r10), %ymm12
-	vfmadd231pd		%ymm0, %ymm12, %ymm3
-	vfmadd231pd		%ymm4, %ymm12, %ymm7
-	vfmadd231pd		%ymm8, %ymm12, %ymm11
-	vbroadcastsd	64(%r10), %ymm12
-	vfmadd231pd		%ymm0, %ymm12, %ymm2
-	vfmadd231pd		%ymm4, %ymm12, %ymm6
-	vfmadd231pd		%ymm8, %ymm12, %ymm10
-	vbroadcastsd	32(%r10), %ymm12
-	vfmadd231pd		%ymm0, %ymm12, %ymm1
-	vfmadd231pd		%ymm4, %ymm12, %ymm5
-	vfmadd231pd		%ymm8, %ymm12, %ymm9
-	vbroadcastsd	0(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-	vmulpd			%ymm8, %ymm12, %ymm8
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // V
-	movq	ARG4, %r12 // D
-	movq	ARG5, %r13 // sdd
-	sall	$5, %r13d
-
-	//
-	vmovapd			0(%r12), %ymm12
-	vmovapd			0(%r12, %r13, 1), %ymm14
-	vmovapd			0(%r12, %r13, 2), %ymm15
-	vaddpd			%ymm12, %ymm0, %ymm12
-	vaddpd			%ymm14, %ymm4, %ymm14
-	vaddpd			%ymm15, %ymm8, %ymm15
-	vmovapd			%ymm12, 0(%r12)
-	vmovapd			%ymm14, 0(%r12, %r13, 1)
-	vmovapd			%ymm15, 0(%r12, %r13, 2)
-	//
-	vmovapd			32(%r12), %ymm12
-	vmovapd			32(%r12, %r13, 1), %ymm14
-	vmovapd			32(%r12, %r13, 2), %ymm15
-	vbroadcastsd	32(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vfmadd231pd		%ymm8, %ymm13, %ymm15
-	vaddpd			%ymm12, %ymm1, %ymm12
-	vaddpd			%ymm14, %ymm5, %ymm14
-	vaddpd			%ymm15, %ymm9, %ymm15
-	vmovapd			%ymm12, 32(%r12)
-	vmovapd			%ymm14, 32(%r12, %r13, 1)
-	vmovapd			%ymm15, 32(%r12, %r13, 2)
-	//
-	vmovapd			64(%r12), %ymm12
-	vmovapd			64(%r12, %r13, 1), %ymm14
-	vmovapd			64(%r12, %r13, 2), %ymm15
-	vbroadcastsd	64(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vfmadd231pd		%ymm8, %ymm13, %ymm15
-	vbroadcastsd	72(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vfmadd231pd		%ymm9, %ymm13, %ymm15
-	vaddpd			%ymm12, %ymm2, %ymm12
-	vaddpd			%ymm14, %ymm6, %ymm14
-	vaddpd			%ymm15, %ymm10, %ymm15
-	vmovapd			%ymm12, 64(%r12)
-	vmovapd			%ymm14, 64(%r12, %r13, 1)
-	vmovapd			%ymm15, 64(%r12, %r13, 2)
-	//
-	vmovapd			96(%r12), %ymm12
-	vmovapd			96(%r12, %r13, 1), %ymm14
-	vmovapd			96(%r12, %r13, 2), %ymm15
-	vbroadcastsd	96(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vfmadd231pd		%ymm8, %ymm13, %ymm15
-	vbroadcastsd	104(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vfmadd231pd		%ymm9, %ymm13, %ymm15
-	vbroadcastsd	112(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vfmadd231pd		%ymm10, %ymm13, %ymm15
-	vaddpd			%ymm12, %ymm3, %ymm12
-	vaddpd			%ymm14, %ymm7, %ymm14
-	vaddpd			%ymm15, %ymm11, %ymm15
-	vmovapd			%ymm12, 96(%r12)
-	vmovapd			%ymm14, 96(%r12, %r13, 1)
-	vmovapd			%ymm15, 96(%r12, %r13, 2)
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEBP_ADD_NN_12X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgebp_add_nn_12x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgebp_add_nn_12x4_lib4
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlarfb4_r_12_lib4, .-kernel_dlarfb4_r_12_lib4
-#endif
-
-
-
-
-
-// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-LC04: // { 11.5 10.5 9.5 8.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1075904512
-	.long	0
-	.long	1076035584
-	.long	0
-	.long	1076166656
- 	.long	0
-	.long	1076297728
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC05: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC05: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_dgemm_4x4_lib4.S b/third_party/blasfeo/kernel/avx2/kernel_dgemm_4x4_lib4.S
deleted file mode 100644
index c9bf696..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_dgemm_4x4_lib4.S
+++ /dev/null
@@ -1,9433 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nt_4x4_lib4, @function
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#endif
-#endif
-	
-// broadcast scheme
-#if 1
-
-	cmpl	$0, %r10d
-	jle		5f // return
-
-	// preload
-	vmovapd 		0(%r11), %ymm13 // A
-
-	vxorpd			%ymm4, %ymm4, %ymm4
-	vmovapd			%ymm4, %ymm5
-	vmovapd			%ymm4, %ymm6
-	vmovapd			%ymm4, %ymm7
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	addq	$128, %r12
-
-	// unroll 0
-	vbroadcastsd	-32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	-24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	-16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	-8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	addq	$128, %r12
-
-	// unroll 0
-	vbroadcastsd	-32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	-24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	-16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	-8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-
-	addq	$32, %r11
-	addq	$32, %r12
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // reduce
-
-	vaddpd			%ymm4, %ymm0, %ymm0
-	vaddpd			%ymm5, %ymm1, %ymm1
-	vaddpd			%ymm6, %ymm2, %ymm2
-	vaddpd			%ymm7, %ymm3, %ymm3
-
-5: // return
-
-// shuffle scheme
-#else
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	vxorpd		%ymm4, %ymm4, %ymm4
-	vmovapd		%ymm4, %ymm5
-	vmovapd		%ymm4, %ymm5
-	vmovapd		%ymm4, %ymm6
-
-	// preload
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r12), %ymm12 // B[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmovapd 	32(%r12), %ymm13 // B[4]
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	vshufpd 	$0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128	$0x1, %ymm14, %ymm14, %ymm12
-	vfmadd231pd	%ymm8, %ymm14, %ymm1
-	vmovapd 	32(%r11), %ymm10 // A0[4]
-
-	vfmadd231pd	%ymm8, %ymm12, %ymm3
-	vshufpd 	$0x5, %ymm12, %ymm12, %ymm14
-
-	subl	$4, %r10d
-	vfmadd231pd	%ymm8, %ymm14, %ymm2
-
-	// unroll 1
-	vmovapd 	64(%r12), %ymm12 // B[8]
-	vfmadd231pd	%ymm10, %ymm13, %ymm4
-	vshufpd 	$0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128	$0x1, %ymm14, %ymm14, %ymm13
-	vfmadd231pd	%ymm10, %ymm14, %ymm5
-	vmovapd 	64(%r11), %ymm8 // A0[8]
-
-	vfmadd231pd	%ymm10, %ymm13, %ymm7
-	vshufpd 	$0x5, %ymm13, %ymm13, %ymm14
-
-	vfmadd231pd	%ymm10, %ymm14, %ymm6
-
-	// unroll 2
-	vmovapd 	96(%r12), %ymm13 // B[12]
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	vshufpd 	$0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128	$0x1, %ymm14, %ymm14, %ymm12
-	vfmadd231pd	%ymm8, %ymm14, %ymm1
-	vmovapd 	96(%r11), %ymm10 // A0[12]
-
-	vfmadd231pd	%ymm8, %ymm12, %ymm3
-	vshufpd 	$0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r12
-
-	vfmadd231pd	%ymm8, %ymm14, %ymm2
-	addq	$128, %r11
-
-
-	// unroll 3
-	vmovapd 	0(%r12), %ymm12 // B[0]
-	vfmadd231pd	%ymm10, %ymm13, %ymm4
-	vshufpd 	$0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128	$0x1, %ymm14, %ymm14, %ymm13
-	vfmadd231pd	%ymm10, %ymm14, %ymm5
-	vmovapd 	0(%r11), %ymm8 // A0[0]
-
-	vfmadd231pd	%ymm10, %ymm13, %ymm7
-	vshufpd 	$0x5, %ymm13, %ymm13, %ymm14
-
-	vfmadd231pd	%ymm10, %ymm14, %ymm6
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vmovapd 	32(%r12), %ymm13 // B[4]
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	vshufpd 	$0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128	$0x1, %ymm14, %ymm14, %ymm12
-	vfmadd231pd	%ymm8, %ymm14, %ymm1
-	vmovapd 	32(%r11), %ymm10 // A0[4]
-
-	vfmadd231pd	%ymm8, %ymm12, %ymm3
-	vshufpd 	$0x5, %ymm12, %ymm12, %ymm14
-
-	subl	$4, %r10d
-	vfmadd231pd	%ymm8, %ymm14, %ymm2
-
-	// unroll 1
-	vmovapd 	64(%r12), %ymm12 // B[8]
-	vfmadd231pd	%ymm10, %ymm13, %ymm4
-	vshufpd 	$0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128	$0x1, %ymm14, %ymm14, %ymm13
-	vfmadd231pd	%ymm10, %ymm14, %ymm5
-	vmovapd 	64(%r11), %ymm8 // A0[8]
-
-	vfmadd231pd	%ymm10, %ymm13, %ymm7
-	vshufpd 	$0x5, %ymm13, %ymm13, %ymm14
-
-	vfmadd231pd	%ymm10, %ymm14, %ymm6
-
-	// unroll 2
-	vmovapd 	96(%r12), %ymm13 // B[12]
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	vshufpd 	$0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128	$0x1, %ymm14, %ymm14, %ymm12
-	vfmadd231pd	%ymm8, %ymm14, %ymm1
-	vmovapd 	96(%r11), %ymm10 // A0[12]
-
-	vfmadd231pd	%ymm8, %ymm12, %ymm3
-	vshufpd 	$0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r12
-
-	vfmadd231pd	%ymm8, %ymm14, %ymm2
-	addq	$128, %r11
-
-
-	// unroll 3
-//	vmovapd 	0(%r12), %ymm12 // B[0]
-	vfmadd231pd	%ymm10, %ymm13, %ymm4
-	vshufpd 	$0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128	$0x1, %ymm14, %ymm14, %ymm13
-	vfmadd231pd	%ymm10, %ymm14, %ymm5
-//	vmovapd 	0(%r11), %ymm8 // A0[0]
-
-	vfmadd231pd	%ymm10, %ymm13, %ymm7
-	vshufpd 	$0x5, %ymm13, %ymm13, %ymm14
-
-	vfmadd231pd	%ymm10, %ymm14, %ymm6
-
-
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd 0(%r12), %ymm12 // B[0]
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	addq	$32, %r11
-
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vfmadd231pd	%ymm8, %ymm14, %ymm1
-	addq	$32, %r12
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm14
-	vfmadd231pd	%ymm8, %ymm14, %ymm3
-
-	vshufpd $0x5, %ymm14, %ymm14, %ymm14
-	vfmadd231pd	%ymm8, %ymm14, %ymm2
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-
-	jg		3b // clean up loop 
-
-
-2: // return
-
-	vaddpd			%ymm4, %ymm0, %ymm0
-	vaddpd			%ymm5, %ymm1, %ymm1
-	vaddpd			%ymm6, %ymm2, %ymm2
-	vaddpd			%ymm7, %ymm3, %ymm3
-
-#endif
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nt_4x4_lib4, .-inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nt_4x4_lib4, @function
-inner_kernel_dgemm_sub_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nt_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		5f // return
-
-	vxorpd			%ymm4, %ymm4, %ymm4
-	vmovapd			%ymm4, %ymm5
-	vmovapd			%ymm4, %ymm6
-	vmovapd			%ymm4, %ymm7
-
-	// preload
-	vmovapd 		0(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	addq	$128, %r12
-
-	// unroll 0
-	vbroadcastsd	-32(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	-24(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vbroadcastsd	-16(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vbroadcastsd	-8(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	addq	$128, %r12
-
-	// unroll 0
-	vbroadcastsd	-32(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	-24(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vbroadcastsd	-16(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vbroadcastsd	-8(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-
-	addq	$32, %r11
-	addq	$32, %r12
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // reduce
-
-	vaddpd			%ymm4, %ymm0, %ymm0
-	vaddpd			%ymm5, %ymm1, %ymm1
-	vaddpd			%ymm6, %ymm2, %ymm2
-	vaddpd			%ymm7, %ymm3, %ymm3
-
-5: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nt_4x4_lib4, .-inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nn_4x4_lib4, @function
-inner_kernel_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nn_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nn_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		5f // return
-
-	vxorpd			%ymm4, %ymm4, %ymm4
-	vmovapd			%ymm4, %ymm5
-	vmovapd			%ymm4, %ymm6
-	vmovapd			%ymm4, %ymm7
-
-	// preload
-	vmovapd 		0(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r12, %r13, 2) // software prefetch
-	prefetcht0	64(%r12, %r13, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq	%r13, %r12
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq	%r13, %r12
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-
-	addq	$32, %r11
-	addq	$8, %r12
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // reduce
-
-	vaddpd			%ymm4, %ymm0, %ymm0
-	vaddpd			%ymm5, %ymm1, %ymm1
-	vaddpd			%ymm6, %ymm2, %ymm2
-	vaddpd			%ymm7, %ymm3, %ymm3
-
-5: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nn_4x4_lib4, .-inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nn_4x4_lib4, @function
-inner_kernel_dgemm_sub_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nn_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nn_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		5f // return
-
-	vxorpd			%ymm4, %ymm4, %ymm4
-	vmovapd			%ymm4, %ymm5
-	vmovapd			%ymm4, %ymm6
-	vmovapd			%ymm4, %ymm7
-
-	// preload
-	vmovapd 		0(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r12, %r13, 2) // software prefetch
-	prefetcht0	64(%r12, %r13, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	addq	%r13, %r12
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm5
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm6
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm14, %ymm12, %ymm7
-	addq	%r13, %r12
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfnmadd231pd	%ymm13, %ymm12, %ymm3
-
-	addq	$32, %r11
-	addq	$8, %r12
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // reduce
-
-	vaddpd			%ymm4, %ymm0, %ymm0
-	vaddpd			%ymm5, %ymm1, %ymm1
-	vaddpd			%ymm6, %ymm2, %ymm2
-	vaddpd			%ymm7, %ymm3, %ymm3
-
-5: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nn_4x4_lib4, .-inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- B
-// r12   <- C
-// ymm0  <- [a00 a10 a20 a30]
-// ymm1  <- [a01 a11 a21 a31]
-// ymm2  <- [a02 a12 a22 a32]
-// ymm3  <- [a03 a13 a23 a33]
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- ?
-// r12   <- ?
-// ymm0  <- [a00 a10 a20 a30]
-// ymm1  <- [a01 a11 a21 a31]
-// ymm2  <- [a02 a12 a22 a32]
-// ymm3  <- [a03 a13 a23 a33]
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEBP_ADD_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgebp_add_nn_4x4_lib4, @function
-inner_kernel_dgebp_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgebp_add_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgebp_add_nn_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgebp_add_nn_4x4_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r12), %ymm12
-	vbroadcastsd	0(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vbroadcastsd	8(%r11), %ymm13
-	subl	$4, %r10d
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vbroadcastsd	16(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vbroadcastsd	24(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vmovapd			%ymm12, 0(%r12)
-
-	vmovapd			32(%r12), %ymm12
-	vbroadcastsd	32(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vbroadcastsd	40(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vbroadcastsd	48(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vbroadcastsd	56(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vmovapd			%ymm12, 32(%r12)
-
-	vmovapd			64(%r12), %ymm12
-	vbroadcastsd	64(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vbroadcastsd	72(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vbroadcastsd	80(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vbroadcastsd	88(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vmovapd			%ymm12, 64(%r12)
-
-	vmovapd			96(%r12), %ymm12
-	vbroadcastsd	96(%r11), %ymm13
-	addq	$128, %r11
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vbroadcastsd	-24(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vbroadcastsd	-16(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vbroadcastsd	-8(%r11), %ymm13
-	addq	$128, %r12
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vmovapd			%ymm12, -32(%r12)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r12), %ymm12
-	vbroadcastsd	0(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vbroadcastsd	8(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vbroadcastsd	16(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vbroadcastsd	24(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vmovapd			%ymm12, 0(%r12)
-
-	addq	$32, %r11
-	addq	$32, %r12
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgebp_add_nn_4x4_lib4, .-inner_kernel_dgebp_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemm_add_nn_4x4_lib4, @function
-inner_edge_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgemm_add_nn_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgemm_add_nn_4x4_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r14d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$4, %r15d
-	subl			%r14d, %r15d // 4-offsetB
-	cmpl			%r10d, %r15d
-//	jle				0f
-//	movl			%r10d, %r15d // kend=min(k,4-offsetB)
-//0:
-	cmovgl			%r10d, %r15d // kend=min(k,4-offsetB)
-
-	movl			%r14d, %eax
-	sall			$3, %eax // offsetB*sizeof(double)
-	addq			%rax, %r12 // B+offsetB*sizeof(double)
-
-1:
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-
-	subl			$1, %r10d // k-1
-	subl			$1, %r15d // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$8, %r12 // B+1*sizeof(float)
-
-	cmpl			$0, %r15d
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r13, %r12
-	subq			$32, %r12 // B+bs*(sdb-1)*sizeof(double)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemm_add_nn_4x4_lib4, .-inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10   <- A
-// r11   <- B
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- A+4*4*sizeof(double)
-// r11   <- B+4*4*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_4x4_lib4, @function
-inner_edge_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd			0(%r10), %ymm8
-	vbroadcastsd	0(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-
-	vmovapd			32(%r10), %ymm8
-	vbroadcastsd	32(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	40(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-
-	vmovapd			64(%r10), %ymm8
-	vbroadcastsd	64(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	72(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	80(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-
-	vmovapd			96(%r10), %ymm8
-	vbroadcastsd	96(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	104(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	112(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	120(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	addq			$128, %r10
-	addq			$128, %r11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_4x4_lib4, .-inner_edge_dtrmm_nt_ru_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- max(k-4,0)
-// r11   <- A+4*4*sizeof(double)
-// r12   <- B+4*4*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_4x4_vs_lib4, @function
-inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#endif
-#endif
-	
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	addq			$32, %r11
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	addq			$32, %r11
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	addq			$32, %r11
-	vbroadcastsd	16(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	16(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	addq			$32, %r11
-	vbroadcastsd	24(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	addq			$32, %r12
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_4x4_vs_lib4, .-inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_4x4_lib4, @function
-inner_edge_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r14d
-	jg		0f
-
-	// offB==0
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-
-	vmovapd			32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	40(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-
-	vmovapd			64(%r11), %ymm8
-	vbroadcastsd	16(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	48(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-
-	vmovapd			96(%r11), %ymm8
-	vbroadcastsd	24(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	56(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	88(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	120(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A+4*bs*sizeof(double)
-	addq			%r13, %r12 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-0:
-	cmpl	$1, %r14d
-	jg		1f
-
-	// offB==1
-
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-
-	vmovapd			32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	40(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-
-	vmovapd			64(%r11), %ymm8
-	vbroadcastsd	16(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	48(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-
-	subl			$3, %r10d // k-3
-	addq			$96, %r11 // A+3*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$8, %r12 // B+bs*sdb*sizeof(double)-1
-
-	jmp		3f
-
-1:
-	cmpl	$2, %r14d
-	jg		2f
-
-	// offB==2
-
-	addq			$16, %r12 // B+2*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-
-	vmovapd			32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	40(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-
-	subl			$2, %r10d // k-2
-	addq			$64, %r11 // A+2*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$16, %r12 // B+bs*sdb*sizeof(double)-2
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-
-	vmovapd			32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	40(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	72(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	104(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	vmovapd			64(%r11), %ymm8
-	vbroadcastsd	16(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	48(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	112(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	vmovapd			96(%r11), %ymm8
-	vbroadcastsd	24(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	56(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	88(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	120(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A+4*bs*sizeof(double)
-	addq			%r13, %r12 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-2:
-	// offB==3
-
-	addq			$24, %r12 // B+3*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-3
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-
-	vmovapd			32(%r11), %ymm8
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	40(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	72(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-
-	vmovapd			64(%r11), %ymm8
-	vbroadcastsd	16(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	48(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	112(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	vmovapd			96(%r11), %ymm8
-	vbroadcastsd	24(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	56(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	88(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	120(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A+4*bs*sizeof(double)
-	addq			%r13, %r12 // B+bs*sdb*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_4x4_lib4, .-inner_edge_dtrmm_nn_rl_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_4x4_gen_lib4, @function
-inner_edge_dtrmm_nn_rl_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_4x4_gen_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	cmpl			$0, %r14d
-	jg				0f // offB>0
-
-	// offB==0
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f // end
-
-0:
-	cmpl			$1, %r14d
-	jg				1f // offB>1
-
-	// offB==1
-
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f // end
-
-1:
-	cmpl			$2, %r14d
-	jg				2f // offB>2
-
-	// offB==2
-
-	addq			$16, %r12 // B+2*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-
-	subl			$1, %r10d // k-2
-	addq			$32, %r11 // A+2*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f
-
-2:
-	// offB==3
-
-	addq			$24, %r12 // B+3*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A+1*bs*sizeof(double)
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	subl			$1, %r10d // k-4
-	addq			$32, %r11 // A+4*bs*sizeof(double)
-	addq			%r13, %r12
-	subq			$24, %r12 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_4x4_gen_lib4, .-inner_edge_dtrmm_nn_rl_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for dlauum
-//
-// input arguments:
-// r10   <- A
-// r11   <- B
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- A+4*4*sizeof(double)
-// r11   <- B+4*4*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DLAUUM_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dlauum_nt_4x4_lib4, @function
-inner_edge_dlauum_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dlauum_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dlauum_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dlauum_nt_4x4_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vmovapd			0(%r10), %ymm8
-	vblendpd		$0x1, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-
-	vmovapd			32(%r10), %ymm8
-	vblendpd		$0x3, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	32(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	40(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-
-	vmovapd			64(%r10), %ymm8
-	vblendpd		$0x7, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	64(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	72(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	80(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-
-	vmovapd			96(%r10), %ymm8
-	vbroadcastsd	96(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	104(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	112(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	120(%r11), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-
-	addq			$128, %r10
-	addq			$128, %r11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dlauum_nt_4x4_lib4, .-inner_edge_dlauum_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for dlauum
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DLAUUM_NT_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dlauum_nt_4x4_vs_lib4, @function
-inner_edge_dlauum_nt_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dlauum_nt_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dlauum_nt_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dlauum_nt_4x4_vs_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vblendpd		$0x1, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	addq			$32, %r11
-	addq			$32, %r12
-
-	cmpl			$0, %r10d
-	jle				0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vblendpd		$0x3, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	addq			$32, %r11
-	addq			$32, %r12
-
-	cmpl			$0, %r10d
-	jle				0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vblendpd		$0x7, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	16(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	addq			$32, %r11
-	addq			$32, %r12
-
-	cmpl			$0, %r10d
-	jle				0f
-
-	vmovapd			0(%r11), %ymm8
-	subl			$1, %r10d
-	vbroadcastsd	0(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vbroadcastsd	8(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vbroadcastsd	16(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vbroadcastsd	24(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	addq			$32, %r11
-	addq			$32, %r12
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dlauum_nt_4x4_vs_lib4, .-inner_edge_dlauum_nt_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend
-//
-// input arguments:
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_4x4_lib4, @function
-inner_blend_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_4x4_lib4:
-#endif
-#endif	
-	
-
-	// tc==n
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_4x4_lib4, .-inner_blend_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r10   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x4_lib4, @function
-inner_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_4x4_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_4x4_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x4_lib4, .-inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x4_gen_lib4, @function
-inner_scale_ab_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_4x4_gen_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-
-	vxorpd		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovapd		0(%r13), %ymm12
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vmovapd		32(%r13), %ymm12
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vmovapd		96(%r13), %ymm12
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-
-	jmp		3f
-
-0:
-
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$1, %r12d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm3
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r12d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm3
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm3
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x4_gen_lib4, .-inner_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0
-//
-// input arguments:
-// r10   <- alpha
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_4x4_lib4, @function
-inner_scale_a0_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_a0_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_4x4_lib4; .scl 2; .type 32; .endef
-inner_scale_a0_4x4_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_4x4_lib4, .-inner_scale_a0_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_4x4_lib4, @function
-inner_scale_11_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_11_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_4x4_lib4; .scl 2; .type 32; .endef
-inner_scale_11_4x4_lib4:
-#endif
-#endif	
-	
-	vmovapd		0(%r10), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_4x4_lib4, .-inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r10   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_4x4_lib4, @function
-inner_blend_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_4x4_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_4x4_lib4, .-inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_4x4_gen_lib4, @function
-inner_blend_scale_ab_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_4x4_gen_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-
-	vxorpd		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovapd		0(%r13), %ymm12
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vmovapd		32(%r13), %ymm12
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vmovapd		96(%r13), %ymm12
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-
-	jmp		3f
-
-0:
-
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$1, %r12d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm3
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r12d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm3
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd		32(%r13), %ymm13
-	vmovapd		32(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm1
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd		96(%r13), %ymm13
-	vmovapd		96(%r15), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm3
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_4x4_gen_lib4, .-inner_blend_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_4x4_lib4, @function
-inner_blend_scale_11_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_11_4x4_lib4:
-#endif
-#endif	
-
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmovapd		0(%r10), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_4x4_lib4, .-inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_4x4_vs_lib4, @function
-inner_edge_dpotrf_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dpotrf_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dpotrf_4x4_vs_lib4:
-#endif
-#endif
-	
-	vxorpd	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd	.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd	LC04(%rip), %xmm14 // 1.0
-#endif
-
-	vmovsd			%xmm0, %xmm0, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe				1f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-2:
-	vmovsd			%xmm13, 0(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	cmpl			$2, %r11d
-	jl				0f // ret
-//	vperm2f128		$0x00, %ymm0, %ymm0, %ymm12
-//	vpermilpd		$0xf, %ymm12, %ymm13
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-
-	vpermilpd		$0x3, %xmm1, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe				3f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-4:
-	vmovsd			%xmm13, 8(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	cmpl			$3, %r11d
-	jl				0f // ret
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe				5f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-6:
-	vmovsd			%xmm13, 16(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	cmpl			$4, %r11d
-	jl				0f // ret
-//	vperm2f128		$0x11, %ymm2, %ymm2, %ymm12
-//	vpermilpd		$0xf, %ymm12, %ymm13
-	vpermpd			$0xff, %ymm2, %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-
-//	vextractf128	$0x1, %ymm3, %xmm13
-//	vpermilpd		$0x3, %xmm13, %xmm13
-	vpermpd			$0xff, %ymm3, %ymm13
-	vucomisd		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe				7f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-8:
-	vmovsd			%xmm13, 24(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-
-	jmp				0f
-
-1:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				2b
-
-3:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				4b
-
-5:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				6b
-
-7:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				8b
-
-0:
-	#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_4x4_vs_lib4, .-inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x4_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x4_lib4, .-inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r11), %ymm13
-	cmpl			$2, %r12d
-	vmulpd			%ymm0, %ymm13, %ymm0
-
-	jl				0f // ret
-
-	vbroadcastsd	8(%r10), %ymm13
-	cmpl			$3, %r12d
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-
-	jl				0f // ret
-
-	vbroadcastsd	16(%r10), %ymm13
-	cmpl			$4, %r12d
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-
-	jl				0f // ret
-
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x4_vs_lib4, .-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// unit diagonal
-//
-// input arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_ONE_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_one_4x4_lib4, @function
-inner_edge_dtrsm_rlt_one_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_one_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_one_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_one_4x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_one_4x4_lib4, .-inner_edge_dtrsm_rlt_one_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// unit diagonal
-//
-// input arguments:
-// r10  <- D
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_ONE_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_one_4x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_one_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_one_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_one_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_one_4x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$2, %r11d
-
-	jl				0f // ret
-
-	vbroadcastsd	8(%r10), %ymm13
-	cmpl			$3, %r11d
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-
-	jl				0f // ret
-
-	vbroadcastsd	16(%r10), %ymm13
-	cmpl			$4, %r11d
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-
-	jl				0f // ret
-
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_one_4x4_vs_lib4, .-inner_edge_dtrsm_rlt_one_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = upper
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUT_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rut_inv_4x4_lib4, @function
-inner_edge_dtrsm_rut_inv_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rut_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rut_inv_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rut_inv_4x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vbroadcastsd	112(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm2
-	vbroadcastsd	104(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm1
-	vbroadcastsd	96(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm0
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vbroadcastsd	72(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm1
-	vbroadcastsd	64(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm0
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vbroadcastsd	32(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm0
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rut_inv_4x4_lib4, .-inner_edge_dtrsm_rut_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUT_INV_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rut_inv_4x4_vs_lib4, @function
-inner_edge_dtrsm_rut_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rut_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rut_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rut_inv_4x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$3, %r12d
-	jle				0f
-
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vbroadcastsd	112(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm2
-	vbroadcastsd	104(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm1
-	vbroadcastsd	96(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm0
-
-0:
-	cmpl			$2, %r12d
-	jle				1f
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vbroadcastsd	72(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm1
-	vbroadcastsd	64(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm0
-
-1:
-	cmpl			$1, %r12d
-	jle				2f
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vbroadcastsd	32(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm0
-
-2:
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rut_inv_4x4_vs_lib4, .-inner_edge_dtrsm_rut_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = up
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUN_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_run_inv_4x4_lib4, @function
-inner_edge_dtrsm_run_inv_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_run_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_run_inv_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_run_inv_4x4_lib4:
-#endif
-#endif
-
-	// first column
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-
-	// second column
-	vbroadcastsd	32(%r10), %ymm12
-	vfnmadd231pd	%ymm0, %ymm12, %ymm1
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-
-	// third column
-	vbroadcastsd	64(%r10), %ymm12
-	vfnmadd231pd	%ymm0, %ymm12, %ymm2
-	vbroadcastsd	72(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm2
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-
-	// fourth column
-	vbroadcastsd	96(%r10), %ymm12
-	vfnmadd231pd	%ymm0, %ymm12, %ymm3
-	vbroadcastsd	104(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm3
-	vbroadcastsd	112(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm3
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_run_inv_4x4_lib4, .-inner_edge_dtrsm_run_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = lower
-// tran = normal
-// unit diagonal
-//
-// input arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LLN_ONE_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lln_one_4x4_lib4, @function
-inner_edge_dtrsm_lln_one_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lln_one_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lln_one_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lln_one_4x4_lib4:
-#endif
-#endif
-
-	vxorpd		%ymm14, %ymm14, %ymm14
-
-	vmovapd			0(%r10), %ymm12
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm12
-	vpermpd			$0x00, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vpermpd			$0x00, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vpermpd			$0x00, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vpermpd			$0x00, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-
-	vmovapd			32(%r10), %ymm12
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm12
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vpermpd			$0x55, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vpermpd			$0x55, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vpermpd			$0x55, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-
-	vmovapd			64(%r10), %ymm12
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm12
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vpermpd			$0xaa, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vpermpd			$0xaa, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vpermpd			$0xaa, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lln_one_4x4_lib4, .-inner_edge_dtrsm_lln_one_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = upper
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LUN_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lun_inv_4x4_lib4, @function
-inner_edge_dtrsm_lun_inv_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lun_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lun_inv_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lun_inv_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd			96(%r10), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	24(%r11), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovapd			64(%r10), %xmm13
-	vbroadcastsd	16(%r11), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			32(%r10), %xmm13
-	vbroadcastsd	8(%r11), %ymm12
-
-	vpermilpd		$0xf, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermilpd		$0xf, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermilpd		$0xf, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermilpd		$0xf, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vbroadcastsd	0(%r11), %ymm12
-
-	vmulpd			%ymm0, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm0, %ymm0
-
-	vmulpd			%ymm1, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm1, %ymm1
-
-	vmulpd			%ymm2, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm2, %ymm2
-
-	vmulpd			%ymm3, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lun_inv_4x4_lib4, .-inner_edge_dtrsm_lun_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = upper
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- km
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// r12  <- km
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LUN_INV_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lun_inv_4x4_vs_lib4, @function
-inner_edge_dtrsm_lun_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lun_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lun_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lun_inv_4x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl	$3, %r12d
-	jle		0f
-
-	vmovapd			96(%r10), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	24(%r11), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0xf, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-0:
-	cmpl	$2, %r12d
-	jle		1f
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovapd			64(%r10), %xmm13
-	vbroadcastsd	16(%r11), %ymm12
-
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm14
-	vpermilpd		$0x0, %ymm14, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-1:
-	cmpl	$1, %r12d
-	jle		2f
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			32(%r10), %xmm13
-	vbroadcastsd	8(%r11), %ymm12
-
-	vpermilpd		$0xf, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermilpd		$0xf, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermilpd		$0xf, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermilpd		$0xf, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-2:
-
-	vbroadcastsd	0(%r11), %ymm12
-
-	vmulpd			%ymm0, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm0, %ymm0
-
-	vmulpd			%ymm1, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm1, %ymm1
-
-	vmulpd			%ymm2, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm2, %ymm2
-
-	vmulpd			%ymm3, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lun_inv_4x4_vs_lib4, .-inner_edge_dtrsm_lun_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// LU factorization without pivoting
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGETRF_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgetrf_4x4_lib4, @function
-inner_edge_dgetrf_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgetrf_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgetrf_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgetrf_4x4_lib4:
-#endif
-#endif
-	
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd			LC04(%rip), %xmm14 // 1.0
-#endif
-//	vmovddup		%xmm14, %xmm14
-
-	// first column
-//	vblendpd		$0x1, %ymm0, %ymm12, %ymm12
-	vmovapd			%ymm0, %ymm12
-	vdivsd			%xmm0, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 0(%r10)
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vblendpd		$0x1, %ymm12, %ymm0, %ymm0
-
-	// second column
-	vpermpd			$0x00, %ymm1, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vblendpd		$0x2, %ymm1, %ymm13, %ymm12
-
-	vpermilpd		$0x3, %xmm1, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 8(%r10)
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vblendpd		$0x3, %ymm12, %ymm1, %ymm1
-
-	// third column
-	vpermpd			$0x00, %ymm2, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vblendpd		$0x2, %ymm2, %ymm13, %ymm12
-
-	vpermpd			$0x55, %ymm2, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vblendpd		$0x4, %ymm2, %ymm12, %ymm12
-
-	vpermpd			$0xaa, %ymm2, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 16(%r10)
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vblendpd		$0x7, %ymm12, %ymm2, %ymm2
-
-	// fourth column
-	vpermpd			$0x00, %ymm3, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vblendpd		$0x2, %ymm3, %ymm13, %ymm12
-
-	vpermpd			$0x55, %ymm3, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vblendpd		$0x4, %ymm3, %ymm12, %ymm12
-
-	vpermpd			$0xaa, %ymm3, %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vblendpd		$0x8, %ymm3, %ymm12, %ymm12
-	
-	vpermpd			$0xff, %ymm3, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vmovsd			%xmm13, 24(%r10)
-//	vmulpd			%ymm3, %ymm13, %ymm3
-	vblendpd		$0x7, %ymm12, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgetrf_4x4_lib4, .-inner_edge_dgetrf_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_lib4, @function
-inner_store_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd %ymm0,  0(%r10)
-	vmovapd %ymm1, 32(%r10)
-	vmovapd %ymm2, 64(%r10)
-	vmovapd %ymm3, 96(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_lib4, .-inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_vs_lib4, @function
-inner_store_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmaskmovpd	%ymm0, %ymm15,  0(%r10)
-	cmpl		$2, %r12d
-	jl			0f // end
-	vmaskmovpd	%ymm1, %ymm15, 32(%r10)
-	cmpl		$3, %r12d
-	jl			0f // end
-	vmaskmovpd	%ymm2, %ymm15, 64(%r10)
-	je			0f // end
-	vmaskmovpd	%ymm3, %ymm15, 96(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_vs_lib4, .-inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n lower triangular
-//
-// input arguments:
-// r10   <- D
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_lib4, @function
-inner_store_l_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_4x4_lib4; .scl 2; .type 32; .endef
-inner_store_l_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd		%ymm0, 0(%r10)
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmovapd		%ymm1, 32(%r10)
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmovapd		%ymm2, 64(%r10)
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmovapd		%ymm3, 96(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_lib4, .-inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs lower triangular
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_vs_lib4, @function
-inner_store_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_l_4x4_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmaskmovpd	%ymm0, %ymm15,  0(%r10)
-	cmpl		$2, %r12d
-	jl			0f // end
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmaskmovpd	%ymm1, %ymm15, 32(%r10)
-	cmpl		$3, %r12d
-	jl			0f // end
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmaskmovpd	%ymm2, %ymm15, 64(%r10)
-	je			0f // end
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmaskmovpd	%ymm3, %ymm15, 96(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_vs_lib4, .-inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_gen_lib4, @function
-inner_store_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_gen_lib4:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2sd	%r13d, %xmm14, %xmm14
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm12
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm12, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm12, %ymm15
-	vandpd		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm3, %ymm2
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm2, %ymm1
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	vmaskmovpd	%ymm0, %ymm15,  0(%r11)
-	cmpl		$2, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm1, %ymm15, 32(%r11)
-	cmpl		$3, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm2, %ymm15, 64(%r11)
-	je			4f // end
-	vmaskmovpd	%ymm3, %ymm15, 96(%r11)
-
-	jmp		4f
-
-0:
-	
-	cmpl	$1, %r10d
-	jg		1f
-
-	// offset==1
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm0, %ymm12, %ymm0
-
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm1, %ymm12, %ymm1
-
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm2, %ymm12, %ymm2
-
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm3, %ymm12, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm15, %ymm12, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC08(%rip), %ymm12
-	vmovupd		.LC05(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC08(%rip), %ymm12
-	vmovupd		LC05(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r10d
-	jg		2f
-
-	// offset==2
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm0
-
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm1
-
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm2
-
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC09(%rip), %ymm12
-	vmovupd		.LC06(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC09(%rip), %ymm12
-	vmovupd		LC06(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm0, %ymm0
-
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm1, %ymm1
-
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm2, %ymm2
-
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm3, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC10(%rip), %ymm12
-	vmovupd		.LC07(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC10(%rip), %ymm12
-	vmovupd		LC07(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-3:
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 1)
-	je			4f // end
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 1)
-
-4:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_gen_lib4, .-inner_store_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store l generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_gen_lib4, @function
-inner_store_l_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_store_l_4x4_gen_lib4:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2sd	%r13d, %xmm14, %xmm14
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm12
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm12, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm12, %ymm15
-	vandpd		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm3, %ymm2
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm2, %ymm1
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm14
-#endif
-
-	vmaskmovpd	%ymm0, %ymm15,  0(%r11)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x1, %ymm14, %ymm15, %ymm15
-	vmaskmovpd	%ymm1, %ymm15, 32(%r11)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x2, %ymm14, %ymm15, %ymm15
-	vmaskmovpd	%ymm2, %ymm15, 64(%r11)
-	je			3f // end
-	vblendpd	$0x4, %ymm14, %ymm15, %ymm15
-	vmaskmovpd	%ymm3, %ymm15, 96(%r11)
-
-	jmp		3f
-
-0:
-	
-	cmpl	$1, %r10d
-	jg		1f
-
-	// offset==1
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm0, %ymm12, %ymm0
-
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm1, %ymm12, %ymm1
-
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm2, %ymm12, %ymm2
-
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm3, %ymm12, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm15, %ymm12, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC08(%rip), %ymm12
-	vmovupd		.LC05(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC08(%rip), %ymm12
-	vmovupd		LC05(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm14
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x2, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x4, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 1)
-	je			3f // end
-	vblendpd	$0x8, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 1)
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r10d
-	jg		2f
-
-	// offset==2
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm0
-
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm1
-
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm2
-
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC09(%rip), %ymm12
-	vmovupd		.LC06(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC09(%rip), %ymm12
-	vmovupd		LC06(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm14
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x4, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x8, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 1)
-	je			3f // end
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 1)
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vperm2f128	$0x01, %ymm0, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm0, %ymm0
-
-	vperm2f128	$0x01, %ymm1, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm1, %ymm1
-
-	vperm2f128	$0x01, %ymm2, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm2, %ymm2
-
-	vperm2f128	$0x01, %ymm3, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm3, %ymm3
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC10(%rip), %ymm12
-	vmovupd		.LC07(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC10(%rip), %ymm12
-	vmovupd		LC07(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm15, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm14
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x8, %ymm14, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 1)
-	je			3f // end
-	vblendpd	$0x2, %ymm14, %ymm13, %ymm13
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 1)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_gen_lib4, .-inner_store_l_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dgemm_nt_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_lib4
-	.type kernel_dgemm_nt_4x4_lib4, @function
-kernel_dgemm_nt_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_lib4
-_kernel_dgemm_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_lib4
-	.def kernel_dgemm_nt_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_lib4, .-kernel_dgemm_nt_4x4_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dgemm_nt_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_vs_lib4
-	.type kernel_dgemm_nt_4x4_vs_lib4, @function
-kernel_dgemm_nt_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_vs_lib4
-_kernel_dgemm_nt_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_vs_lib4
-	.def kernel_dgemm_nt_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_vs_lib4, .-kernel_dgemm_nt_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx        r8            r9           rsp+8      rsp+16   rsp+24       rsp+32     rsp+40   rsp+48  rsp+56  rsp+64  rsp+72
-// void kernel_dgemm_nt_4x4_gen_lib4(int k, double *alpha, double *A, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_gen_lib4
-	.type kernel_dgemm_nt_4x4_gen_lib4, @function
-kernel_dgemm_nt_4x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_gen_lib4
-_kernel_dgemm_nt_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_gen_lib4
-	.def kernel_dgemm_nt_4x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_gen_lib4, .-kernel_dgemm_nt_4x4_gen_lib4
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx          r8         r9       rsp+8         rsp+16     rsp+24
-// void kernel_dgemm_nn_4x4_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_4x4_lib4
-	.type kernel_dgemm_nn_4x4_lib4, @function
-kernel_dgemm_nn_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_4x4_lib4
-_kernel_dgemm_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_4x4_lib4
-	.def kernel_dgemm_nn_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_4x4_lib4, .-kernel_dgemm_nn_4x4_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx       r8         r9       rsp+8         rsp+16    rsp+24     rsp+32    rsp+40   rsp+48     rsp+56   rsp+64  rsp+72  rsp+80  rsp+88
-// void kernel_dgemm_nn_4x4_gen_lib4(int k, double *alpha, double *A, int offB, double *B, int sdb, double *beta, int offC, double *C, int sdc, int offD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_4x4_gen_lib4
-	.type kernel_dgemm_nn_4x4_gen_lib4, @function
-kernel_dgemm_nn_4x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_4x4_gen_lib4
-_kernel_dgemm_nn_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_4x4_gen_lib4
-	.def kernel_dgemm_nn_4x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_4x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // offsetC
-	movq	ARG9, %r13 // C
-	movq	ARG10, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG11, %r10 // offsetD
-	movq	ARG12, %r11 // D
-	movq	ARG13, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG14, %r13 // m0
-	movq	ARG15, %r14 // m1
-	movq	ARG16, %r15 // n0
-	movq	ARG17, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_4x4_gen_lib4, .-kernel_dgemm_nn_4x4_gen_lib4
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dsyrk_nt_l_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_lib4
-	.type kernel_dsyrk_nt_l_4x4_lib4, @function
-kernel_dsyrk_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_lib4
-_kernel_dsyrk_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_4x4_lib4
-	.def kernel_dsyrk_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call	inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq	_inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_lib4, .-kernel_dsyrk_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dsyrk_nt_l_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_vs_lib4
-	.type kernel_dsyrk_nt_l_4x4_vs_lib4, @function
-kernel_dsyrk_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_vs_lib4
-_kernel_dsyrk_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_4x4_vs_lib4
-	.def kernel_dsyrk_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call	inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq	_inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_vs_lib4, .-kernel_dsyrk_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                     rdi    rsi            rdx        rcx        r8            r9           rsp+8      rsp+16   rsp+24       rsp+32     rsp+40   rsp+48  rsp+56  rsp+64  rsp+72
-// void kernel_dsyrk_nt_l_4x4_gen_lib4(int k, double *alpha, double *A, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_gen_lib4
-	.type kernel_dsyrk_nt_l_4x4_gen_lib4, @function
-kernel_dsyrk_nt_l_4x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_gen_lib4
-_kernel_dsyrk_nt_l_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_4x4_gen_lib4
-	.def kernel_dsyrk_nt_l_4x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_4x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_gen_lib4, .-kernel_dsyrk_nt_l_4x4_gen_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx          r8         r9       rsp+8
-// void kernel_dtrmm_nn_rl_4x4_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_4x4_lib4
-	.type kernel_dtrmm_nn_rl_4x4_lib4, @function
-kernel_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_4x4_lib4
-_kernel_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_4x4_lib4
-	.def kernel_dtrmm_nn_rl_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_4x4_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_4x4_lib4, .-kernel_dtrmm_nn_rl_4x4_lib4
-#endif
-
-
-
-
-
-//                                      rdi    rsi            rdx        rcx          r8         r9       rsp+8        rsp+16     rsp+24   rsp+32  rsp+40  rsp+48  rsp+56
-// void kernel_dtrmm_nn_rl_4x4_gen_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_4x4_gen_lib4
-	.type kernel_dtrmm_nn_rl_4x4_gen_lib4, @function
-kernel_dtrmm_nn_rl_4x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_4x4_gen_lib4
-_kernel_dtrmm_nn_rl_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_4x4_gen_lib4
-	.def kernel_dtrmm_nn_rl_4x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_4x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_4x4_gen_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // offsetD
-	movq	ARG8, %r11 // D
-	movq	ARG9, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG10, %r13 // m0
-	movq	ARG11, %r14 // m1
-	movq	ARG12, %r15 // n0
-	movq	ARG13, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_4x4_gen_lib4, .-kernel_dtrmm_nn_rl_4x4_gen_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dtrmm_nt_ru_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_4x4_lib4
-	.type kernel_dtrmm_nt_ru_4x4_lib4, @function
-kernel_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_4x4_lib4
-_kernel_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_4x4_lib4
-	.def kernel_dtrmm_nt_ru_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend
-
-#if MACRO_LEVEL>=1
-//	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-//	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-//	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG3, %r10
-	movq	ARG4, %r11
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_4x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_4x4_lib4, .-kernel_dtrmm_nt_ru_4x4_lib4
-#endif
-
-
-
-
-
-//                                     rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dtrmm_nt_ru_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_4x4_vs_lib4
-	.type kernel_dtrmm_nt_ru_4x4_vs_lib4, @function
-kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_4x4_vs_lib4
-_kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_4x4_vs_lib4
-	.def kernel_dtrmm_nt_ru_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-#if MACRO_LEVEL>=1
-//	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-//	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-//	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-#endif
-
-
-	// call inner loader nn
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_4x4_vs_lib4, .-kernel_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  edi    rsi        rdx        rcx        r8         r9
-// void kernel_dpotrf_nt_l_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_4x4_lib4
-	.type kernel_dpotrf_nt_l_4x4_lib4, @function
-kernel_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_4x4_lib4
-_kernel_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_4x4_lib4
-	.def kernel_dpotrf_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_4x4_lib4, .-kernel_dpotrf_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                     edi    rsi        rdx        rcx        r8         r9                  rsp+8   rsp+16
-// void kernel_dpotrf_nt_l_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_4x4_vs_lib4
-	.type kernel_dpotrf_nt_l_4x4_vs_lib4, @function
-kernel_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_4x4_vs_lib4
-_kernel_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_4x4_vs_lib4
-	.def kernel_dpotrf_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movq	ARG8, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG7, %r11 // km 
-	movq	ARG8, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_4x4_vs_lib4, .-kernel_dpotrf_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                        1       2           3           4       5           6           7          8          9
-// void kernel_dsyrk_dpotrf_nt_l_4x4_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_4x4_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-_kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_4x4_lib4, .-kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                           edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24             rsp+32   rsp+40
-// void kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-_kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4, .-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx        r8         r9         rsp+8     
-// void kernel_dtrsm_nt_rl_inv_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x4_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x4_lib4
-_kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x4_lib4, .-kernel_dtrsm_nt_rl_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                            edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24     rsp+32
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10   // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx        r8         r9         rsp+8               rsp+16  rsp+24  
-// void kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-_kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn // TODO scale gen
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x4_vs_lib4, .-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                               edi     rsi         rdx         ecx     r8          r9          rsp+8    rsp+16     rsp+24     rsp+32                rsp+40 rsp+48
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10  // C 
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D 
-	movq	ARG11, %r11 // km 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx        r8         r9
-// void kernel_dtrsm_nt_rl_one_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_one_4x4_lib4
-	.type kernel_dtrsm_nt_rl_one_4x4_lib4, @function
-kernel_dtrsm_nt_rl_one_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_one_4x4_lib4
-_kernel_dtrsm_nt_rl_one_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_one_4x4_lib4
-	.def kernel_dtrsm_nt_rl_one_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_one_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_ONE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_one_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_one_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_one_4x4_lib4, .-kernel_dtrsm_nt_rl_one_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx        r8         r9         rsp+8   rsp+16
-// void kernel_dtrsm_nt_rl_one_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_one_4x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_one_4x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_one_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_one_4x4_vs_lib4
-_kernel_dtrsm_nt_rl_one_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_one_4x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_one_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_one_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG8, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_ONE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_one_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_one_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG7, %r11 // km 
-	movq	ARG8, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_one_4x4_vs_lib4, .-kernel_dtrsm_nt_rl_one_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx        r8         r9         rsp+8
-// void kernel_dtrsm_nt_ru_inv_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_ru_inv_4x4_lib4
-	.type kernel_dtrsm_nt_ru_inv_4x4_lib4, @function
-kernel_dtrsm_nt_ru_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_ru_inv_4x4_lib4
-_kernel_dtrsm_nt_ru_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_ru_inv_4x4_lib4
-	.def kernel_dtrsm_nt_ru_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_ru_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11 // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rut_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rut_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_ru_inv_4x4_lib4, .-kernel_dtrsm_nt_ru_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx        r8         r9         rsp+8                rsp+16  rsp+24
-// void kernel_dtrsm_nt_ru_inv_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, double  *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_ru_inv_4x4_vs_lib4
-	.type kernel_dtrsm_nt_ru_inv_4x4_vs_lib4, @function
-kernel_dtrsm_nt_ru_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_ru_inv_4x4_vs_lib4
-_kernel_dtrsm_nt_ru_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_ru_inv_4x4_vs_lib4
-	.def kernel_dtrsm_nt_ru_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_ru_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11 // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUT_INV_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rut_inv_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rut_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_ru_inv_4x4_vs_lib4, .-kernel_dtrsm_nt_ru_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx      r8         r9         rsp+8      rsp+16
-// void kernel_dtrsm_nn_ru_inv_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ru_inv_4x4_lib4
-	.type kernel_dtrsm_nn_ru_inv_4x4_lib4, @function
-kernel_dtrsm_nn_ru_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ru_inv_4x4_lib4
-_kernel_dtrsm_nn_ru_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ru_inv_4x4_lib4
-	.def kernel_dtrsm_nn_ru_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ru_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUN_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_run_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_run_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ru_inv_4x4_lib4, .-kernel_dtrsm_nn_ru_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx      r8         r9         rsp+8      rsp+16              rsp+24  rsp+32
-// void kernel_dtrsm_nn_ru_inv_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ru_inv_4x4_vs_lib4
-	.type kernel_dtrsm_nn_ru_inv_4x4_vs_lib4, @function
-kernel_dtrsm_nn_ru_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ru_inv_4x4_vs_lib4
-_kernel_dtrsm_nn_ru_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ru_inv_4x4_vs_lib4
-	.def kernel_dtrsm_nn_ru_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ru_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUN_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_run_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_run_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-	movq	ARG9, %r11  // km 
-	movq	ARG10, %r12  // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ru_inv_4x4_vs_lib4, .-kernel_dtrsm_nn_ru_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx      r8         r9         rsp+8
-// void kernel_dtrsm_nn_ll_one_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ll_one_4x4_lib4
-	.type kernel_dtrsm_nn_ll_one_4x4_lib4, @function
-kernel_dtrsm_nn_ll_one_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ll_one_4x4_lib4
-_kernel_dtrsm_nn_ll_one_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ll_one_4x4_lib4
-	.def kernel_dtrsm_nn_ll_one_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ll_one_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LLN_ONE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lln_one_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lln_one_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ll_one_4x4_lib4, .-kernel_dtrsm_nn_ll_one_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx      r8         r9         rsp+8      rsp+16  rsp+24
-// void kernel_dtrsm_nn_ll_one_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ll_one_4x4_vs_lib4
-	.type kernel_dtrsm_nn_ll_one_4x4_vs_lib4, @function
-kernel_dtrsm_nn_ll_one_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ll_one_4x4_vs_lib4
-_kernel_dtrsm_nn_ll_one_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ll_one_4x4_vs_lib4
-	.def kernel_dtrsm_nn_ll_one_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ll_one_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LLN_ONE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lln_one_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lln_one_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-	movq	ARG8, %r11  // km 
-	movq	ARG9, %r12  // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ll_one_4x4_vs_lib4, .-kernel_dtrsm_nn_ll_one_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx      r8         r9         rsp+8      rsp+16
-// void kernel_dtrsm_nn_lu_inv_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_lu_inv_4x4_lib4
-	.type kernel_dtrsm_nn_lu_inv_4x4_lib4, @function
-kernel_dtrsm_nn_lu_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_lu_inv_4x4_lib4
-_kernel_dtrsm_nn_lu_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_lu_inv_4x4_lib4
-	.def kernel_dtrsm_nn_lu_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_lu_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LUN_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lun_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lun_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_lu_inv_4x4_lib4, .-kernel_dtrsm_nn_lu_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx      r8         r9         rsp+8      rsp+16              rsp+24  rsp+32
-// void kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_lu_inv_4x4_vs_lib4
-	.type kernel_dtrsm_nn_lu_inv_4x4_vs_lib4, @function
-kernel_dtrsm_nn_lu_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_lu_inv_4x4_vs_lib4
-_kernel_dtrsm_nn_lu_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_lu_inv_4x4_vs_lib4
-	.def kernel_dtrsm_nn_lu_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_lu_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // inv_diag_E 
-	movq	ARG9, %r12  // km 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LUN_INV_4X4_VS_LIB4 // TODO
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lun_inv_4x4_vs_lib4 // TODO
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lun_inv_4x4_vs_lib4 // TODO
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-	movq	ARG9, %r11  // km 
-	movq	ARG10, %r12  // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_lu_inv_4x4_vs_lib4, .-kernel_dtrsm_nn_lu_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                edi    rsi        rdx        rcx      r8         r9         rsp+8
-// void kernel_dgetrf_nn_4x4_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_4x4_lib4
-	.type kernel_dgetrf_nn_4x4_lib4, @function
-kernel_dgetrf_nn_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_4x4_lib4
-_kernel_dgetrf_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_4x4_lib4
-	.def kernel_dgetrf_nn_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG7, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_4x4_lib4, .-kernel_dgetrf_nn_4x4_lib4
-#endif
-
-
-
-
-
-//                                   edi    rsi        rdx        rcx      r8         r9         rsp+8               rsp+16  rsp+24
-// void kernel_dgetrf_nn_4x4_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_4x4_vs_lib4
-	.type kernel_dgetrf_nn_4x4_vs_lib4, @function
-kernel_dgetrf_nn_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_4x4_vs_lib4
-_kernel_dgetrf_nn_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_4x4_vs_lib4
-	.def kernel_dgetrf_nn_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12  // B
-	movq	ARG4, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG7, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG6, %r10 // D
-
-	movq	ARG8, %r11  // km 
-	movq	ARG9, %r12  // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_4x4_vs_lib4, .-kernel_dgetrf_nn_4x4_vs_lib4
-#endif
-
-
-
-
-
-#if 0
-//                                   rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dlauum_nt_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlauum_nt_4x4_lib4
-	.type kernel_dlauum_nt_4x4_lib4, @function
-kernel_dlauum_nt_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlauum_nt_4x4_lib4
-_kernel_dlauum_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlauum_nt_4x4_lib4
-	.def kernel_dlauum_nt_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dlauum_nt_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-#if MACRO_LEVEL>=1
-//	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-//	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-//	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DLAUUM_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dlauum_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dlauum_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner loader nn
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlauum_nt_4x4_vs_lib4, .-kernel_dlauum_nt_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx        r8            r9         rsp+8      rsp+16  rsp+24
-// void kernel_dlauum_nt_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlauum_nt_4x4_vs_lib4
-	.type kernel_dlauum_nt_4x4_vs_lib4, @function
-kernel_dlauum_nt_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlauum_nt_4x4_vs_lib4
-_kernel_dlauum_nt_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlauum_nt_4x4_vs_lib4
-	.def kernel_dlauum_nt_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dlauum_nt_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-#if MACRO_LEVEL>=1
-//	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-//	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-//	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DLAUUM_NT_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dlauum_nt_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dlauum_nt_4x4_vs_lib4
-#endif
-#endif
-
-
-	// call inner loader nn
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlauum_nt_4x4_vs_lib4, .-kernel_dlauum_nt_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-//                             1         2           3           4
-// void kernel_dlarfb4_r_4_lib4(int kmax, double *pV, double *pT, double *pD);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlarfb4_r_4_lib4
-	.type kernel_dlarfb4_r_4_lib4, @function
-kernel_dlarfb4_r_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlarfb4_r_4_lib4
-_kernel_dlarfb4_r_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlarfb4_r_4_lib4
-	.def kernel_dlarfb4_r_4_lib4; .scl 2; .type 32; .endef
-kernel_dlarfb4_r_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-//	vxorpd	%ymm0, %ymm0, %ymm0
-//	vmovapd	%ymm0, %ymm1
-//	vmovapd	%ymm0, %ymm2
-//	vmovapd	%ymm0, %ymm3
-	
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // D
-	movq	ARG2, %r12 // V
-
-	//
-	vmovapd			0(%r11), %ymm0
-	//
-	vmovapd			32(%r11), %ymm1
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm13, %ymm1, %ymm0
-	//
-	vmovapd			64(%r11), %ymm2
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm13, %ymm2, %ymm0
-	vbroadcastsd	72(%r12), %ymm13
-	vfmadd231pd		%ymm13, %ymm2, %ymm1
-	//
-	vmovapd			96(%r11), %ymm3
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm13, %ymm3, %ymm0
-	vbroadcastsd	104(%r12), %ymm13
-	vfmadd231pd		%ymm13, %ymm3, %ymm1
-	vbroadcastsd	112(%r12), %ymm13
-	vfmadd231pd		%ymm13, %ymm3, %ymm2
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-	movq	ARG3, %r10 // T
-
-	//
-	vbroadcastsd	120(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	//
-	vbroadcastsd	112(%r10), %ymm12
-	vfmadd231pd		%ymm2, %ymm12, %ymm3
-	vbroadcastsd	80(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	//
-	vbroadcastsd	104(%r10), %ymm12
-	vfmadd231pd		%ymm1, %ymm12, %ymm3
-	vbroadcastsd	72(%r10), %ymm12
-	vfmadd231pd		%ymm1, %ymm12, %ymm2
-	vbroadcastsd	40(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	//
-	vbroadcastsd	96(%r10), %ymm12
-	vfmadd231pd		%ymm0, %ymm12, %ymm3
-	vbroadcastsd	64(%r10), %ymm12
-	vfmadd231pd		%ymm0, %ymm12, %ymm2
-	vbroadcastsd	32(%r10), %ymm12
-	vfmadd231pd		%ymm0, %ymm12, %ymm1
-	vbroadcastsd	0(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // V
-	movq	ARG4, %r12 // D
-
-	//
-	vmovapd			0(%r12), %ymm12
-	vaddpd			%ymm12, %ymm0, %ymm12
-	vmovapd			%ymm12, 0(%r12)
-	//
-	vmovapd			32(%r12), %ymm12
-	vbroadcastsd	32(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vaddpd			%ymm12, %ymm1, %ymm12
-	vmovapd			%ymm12, 32(%r12)
-	//
-	vmovapd			64(%r12), %ymm12
-	vbroadcastsd	64(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vbroadcastsd	72(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vaddpd			%ymm12, %ymm2, %ymm12
-	vmovapd			%ymm12, 64(%r12)
-	//
-	vmovapd			96(%r12), %ymm12
-	vbroadcastsd	96(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vbroadcastsd	104(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vbroadcastsd	112(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vaddpd			%ymm12, %ymm3, %ymm12
-	vmovapd			%ymm12, 96(%r12)
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEBP_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgebp_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgebp_add_nn_4x4_lib4
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlarfb4_r_4_lib4, .-kernel_dlarfb4_r_4_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { -1 -1 -1 1 }
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { -1 -1 -1 -1 }
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 3.5 2.5 1.5 0.5 }
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { 7.5 6.5 5.5 4.5 }
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC04: // { 1.0 1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC05: // { 1.0 1.0 1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC05: // { 1.0 1.0 1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC06: // { 1.0 1.0 -1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC06: // { 1.0 1.0 -1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC07: // { 1.0 -1.0 -1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC07: // { 1.0 -1.0 -1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC08: // { -1.0 -1.0 -1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC08: // { -1.0 -1.0 -1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC09: // { -1.0 -1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC09: // { -1.0 -1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC10: // { -1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC10: // { -1.0 1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_dgemm_8x4_lib4.S b/third_party/blasfeo/kernel/avx2/kernel_dgemm_8x4_lib4.S
deleted file mode 100644
index 82a5a86..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_dgemm_8x4_lib4.S
+++ /dev/null
@@ -1,12995 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nt_8x4_lib4, @function
-inner_kernel_dgemm_add_nt_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nt_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nt_8x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nt_8x4_lib4:
-#endif
-#endif
-	
-// broadcast scheme
-#if 1
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm0
-	vfmadd231pd		%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm1
-	vfmadd231pd		%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm2
-	vfmadd231pd		%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm3
-	vfmadd231pd		%ymm11, %ymm12, %ymm7
-
-	// unroll 2
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm0
-	vfmadd231pd		%ymm11, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm1
-	vfmadd231pd		%ymm11, %ymm12, %ymm5
-	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm2
-	vfmadd231pd		%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm3
-	addq	$128, %r13
-	vfmadd231pd		%ymm11, %ymm12, %ymm7
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm0
-	vfmadd231pd		%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm1
-	vfmadd231pd		%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm2
-	vfmadd231pd		%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm3
-	vfmadd231pd		%ymm11, %ymm12, %ymm7
-
-	// unroll 2
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm0
-	vfmadd231pd		%ymm11, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm1
-	vfmadd231pd		%ymm11, %ymm12, %ymm5
-//	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm2
-	vfmadd231pd		%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm3
-	addq	$128, %r13
-	vfmadd231pd		%ymm11, %ymm12, %ymm7
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd			0(%r11), %ymm8 // A0[0]
-	vmovapd 		0(%r11, %r12, 1), %ymm9 // A1[0]
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	addq	$32, %r11
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	subl	$1, %r10d
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	addq	$32, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-// shuffle scheme
-#else
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vmovapd 0(%r13), %ymm12 // B[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	vmovapd 32(%r13), %ymm13 // B[4]
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vfmadd231pd	%ymm9, %ymm12, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vfmadd231pd	%ymm8, %ymm14, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-	vfmadd231pd	%ymm9, %ymm14, %ymm5
-
-	vmovapd 32(%r11, %r12, 1), %ymm11 // A1[4]
-	vfmadd231pd	%ymm8, %ymm12, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vfmadd231pd	%ymm9, %ymm12, %ymm7
-
-	subl	$4, %r10d
-	vfmadd231pd	%ymm8, %ymm14, %ymm2
-	vfmadd231pd	%ymm9, %ymm14, %ymm6
-
-	// unroll 1
-	vmovapd 64(%r13), %ymm12 // B[8]
-	vfmadd231pd	%ymm10, %ymm13, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vfmadd231pd	%ymm11, %ymm13, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vfmadd231pd	%ymm10, %ymm14, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-	vfmadd231pd	%ymm11, %ymm14, %ymm5
-
-	vmovapd 64(%r11, %r12, 1), %ymm9 // A1[8]
-	vfmadd231pd	%ymm10, %ymm13, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vfmadd231pd	%ymm11, %ymm13, %ymm7
-
-	vfmadd231pd	%ymm10, %ymm14, %ymm2
-	vfmadd231pd	%ymm11, %ymm14, %ymm6
-
-	// unroll 2
-	vmovapd 96(%r13), %ymm13 // B[12]
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vfmadd231pd	%ymm9, %ymm12, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vfmadd231pd	%ymm8, %ymm14, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-	vfmadd231pd	%ymm9, %ymm14, %ymm5
-
-	vmovapd 96(%r11, %r12, 1), %ymm11 // A1[12]
-	vfmadd231pd	%ymm8, %ymm12, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r13
-	vfmadd231pd	%ymm9, %ymm12, %ymm7
-	addq	$128, %r11
-
-	vfmadd231pd	%ymm8, %ymm14, %ymm2
-	vfmadd231pd	%ymm9, %ymm14, %ymm6
-
-
-	// unroll 3
-	vmovapd 0(%r13), %ymm12 // B[0]
-	vfmadd231pd	%ymm10, %ymm13, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vfmadd231pd	%ymm11, %ymm13, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vfmadd231pd	%ymm10, %ymm14, %ymm1
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vfmadd231pd	%ymm11, %ymm14, %ymm5
-
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vfmadd231pd	%ymm10, %ymm13, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vfmadd231pd	%ymm11, %ymm13, %ymm7
-
-	vfmadd231pd	%ymm10, %ymm14, %ymm2
-	vfmadd231pd	%ymm11, %ymm14, %ymm6
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vmovapd 32(%r13), %ymm13 // B[4]
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vfmadd231pd	%ymm9, %ymm12, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vfmadd231pd	%ymm8, %ymm14, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-	vfmadd231pd	%ymm9, %ymm14, %ymm5
-
-	vmovapd 32(%r11, %r12, 1), %ymm11 // A1[4]
-	vfmadd231pd	%ymm8, %ymm12, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vfmadd231pd	%ymm9, %ymm12, %ymm7
-
-	subl	$4, %r10d
-	vfmadd231pd	%ymm8, %ymm14, %ymm2
-	vfmadd231pd	%ymm9, %ymm14, %ymm6
-
-	// unroll 1
-	vmovapd 64(%r13), %ymm12 // B[8]
-	vfmadd231pd	%ymm10, %ymm13, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vfmadd231pd	%ymm11, %ymm13, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vfmadd231pd	%ymm10, %ymm14, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-	vfmadd231pd	%ymm11, %ymm14, %ymm5
-
-	vmovapd 64(%r11, %r12, 1), %ymm9 // A1[8]
-	vfmadd231pd	%ymm10, %ymm13, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vfmadd231pd	%ymm11, %ymm13, %ymm7
-
-	vfmadd231pd	%ymm10, %ymm14, %ymm2
-	vfmadd231pd	%ymm11, %ymm14, %ymm6
-
-	// unroll 2
-	vmovapd 96(%r13), %ymm13 // B[12]
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vfmadd231pd	%ymm9, %ymm12, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vfmadd231pd	%ymm8, %ymm14, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-	vfmadd231pd	%ymm9, %ymm14, %ymm5
-
-	vmovapd 96(%r11, %r12, 1), %ymm11 // A1[12]
-	vfmadd231pd	%ymm8, %ymm12, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r13
-	vfmadd231pd	%ymm9, %ymm12, %ymm7
-	addq	$128, %r11
-
-	vfmadd231pd	%ymm8, %ymm14, %ymm2
-	vfmadd231pd	%ymm9, %ymm14, %ymm6
-
-
-	// unroll 3
-//	vmovapd 0(%r13), %ymm12 // B[0]
-	vfmadd231pd	%ymm10, %ymm13, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vfmadd231pd	%ymm11, %ymm13, %ymm4
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vfmadd231pd	%ymm10, %ymm14, %ymm1
-//	vmovapd 0(%r11), %ymm8 // A0[0]
-	vfmadd231pd	%ymm11, %ymm14, %ymm5
-//	cmpl	$3, %r10d
-
-//	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vfmadd231pd	%ymm10, %ymm13, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-	vfmadd231pd	%ymm11, %ymm13, %ymm7
-
-	vfmadd231pd	%ymm10, %ymm14, %ymm2
-	vfmadd231pd	%ymm11, %ymm14, %ymm6
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd 0(%r13), %ymm12 // B[0]
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vfmadd231pd	%ymm9, %ymm12, %ymm4
-	addq	$32, %r11
-
-	vfmadd231pd	%ymm8, %ymm14, %ymm1
-	addq	$32, %r13
-	vfmadd231pd	%ymm9, %ymm14, %ymm5
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm14
-	vfmadd231pd	%ymm8, %ymm14, %ymm3
-	vfmadd231pd	%ymm9, %ymm14, %ymm7
-
-	vshufpd $0x5, %ymm14, %ymm14, %ymm14
-	vfmadd231pd	%ymm8, %ymm14, %ymm2
-	subl	$1, %r10d
-	vfmadd231pd	%ymm9, %ymm14, %ymm6
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#endif
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nt_8x4_lib4, .-inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nt_8x4_lib4, @function
-inner_kernel_dgemm_sub_nt_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nt_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nt_8x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nt_8x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm0
-	vfnmadd231pd	%ymm9, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm1
-	vfnmadd231pd	%ymm9, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm2
-	vfnmadd231pd	%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm3
-	vfnmadd231pd	%ymm9, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	32(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm0
-	vfnmadd231pd	%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm1
-	vfnmadd231pd	%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm2
-	vfnmadd231pd	%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm3
-	vfnmadd231pd	%ymm11, %ymm12, %ymm7
-
-	// unroll 2
-	vbroadcastsd	64(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm0
-	vfnmadd231pd	%ymm9, %ymm12, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm1
-	vfnmadd231pd	%ymm9, %ymm12, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm2
-	vfnmadd231pd	%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm3
-	vfnmadd231pd	%ymm9, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	96(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm0
-	vfnmadd231pd	%ymm11, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm1
-	vfnmadd231pd	%ymm11, %ymm12, %ymm5
-	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm2
-	vfnmadd231pd	%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm3
-	addq	$128, %r13
-	vfnmadd231pd	%ymm11, %ymm12, %ymm7
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm0
-	vfnmadd231pd	%ymm9, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm1
-	vfnmadd231pd	%ymm9, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm2
-	vfnmadd231pd	%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm3
-	vfnmadd231pd	%ymm9, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	32(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm0
-	vfnmadd231pd	%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm1
-	vfnmadd231pd	%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm2
-	vfnmadd231pd	%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm3
-	vfnmadd231pd	%ymm11, %ymm12, %ymm7
-
-	// unroll 2
-	vbroadcastsd	64(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm0
-	vfnmadd231pd	%ymm9, %ymm12, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm1
-	vfnmadd231pd	%ymm9, %ymm12, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm2
-	vfnmadd231pd	%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm3
-	vfnmadd231pd	%ymm9, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	96(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm0
-	vfnmadd231pd	%ymm11, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm1
-	vfnmadd231pd	%ymm11, %ymm12, %ymm5
-//	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm2
-	vfnmadd231pd	%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm3
-	addq	$128, %r13
-	vfnmadd231pd	%ymm11, %ymm12, %ymm7
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd			0(%r11), %ymm8 // A0[0]
-	vmovapd 		0(%r11, %r12, 1), %ymm9 // A1[0]
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm0
-	vfnmadd231pd	%ymm9, %ymm12, %ymm4
-
-	vbroadcastsd	8(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm1
-	vfnmadd231pd	%ymm9, %ymm12, %ymm5
-	addq	$32, %r11
-
-	vbroadcastsd	16(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm2
-	vfnmadd231pd	%ymm9, %ymm12, %ymm6
-	subl	$1, %r10d
-
-	vbroadcastsd	24(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm3
-	vfnmadd231pd	%ymm9, %ymm12, %ymm7
-	addq	$32, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nt_8x4_lib4, .-inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// rbx   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- k
-// r11   <- A+4*sda*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// rbx   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nn_8x4_lib4, @function
-inner_kernel_dgemm_add_nn_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nn_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nn_8x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nn_8x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r13, %r14, 2) // software prefetch
-	prefetcht0	64(%r13, %r14, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm0
-	vfmadd231pd		%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm1
-	vfmadd231pd		%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm2
-	vfmadd231pd		%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm3
-	vfmadd231pd		%ymm11, %ymm12, %ymm7
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm0
-	vfmadd231pd		%ymm11, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm1
-	vfmadd231pd		%ymm11, %ymm12, %ymm5
-	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm2
-	vfmadd231pd		%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm3
-	addq	%r14, %r13
-	vfmadd231pd		%ymm11, %ymm12, %ymm7
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm0
-	vfmadd231pd		%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm1
-	vfmadd231pd		%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm2
-	vfmadd231pd		%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm3
-	vfmadd231pd		%ymm11, %ymm12, %ymm7
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm0
-	vfmadd231pd		%ymm11, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm1
-	vfmadd231pd		%ymm11, %ymm12, %ymm5
-//	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm2
-	vfmadd231pd		%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vfmadd231pd		%ymm10, %ymm12, %ymm3
-	addq	%r14, %r13
-	vfmadd231pd		%ymm11, %ymm12, %ymm7
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd			0(%r11), %ymm8 // A0[0]
-	vmovapd 		0(%r11, %r12, 1), %ymm9 // A1[0]
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	addq	$32, %r11
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	subl	$1, %r10d
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	addq	$8, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nn_8x4_lib4, .-inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- k
-// r11   <- A+4*sda*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nn_8x4_lib4, @function
-inner_kernel_dgemm_sub_nn_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nn_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nn_8x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nn_8x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r11, %r12, 1), %ymm9 // A1[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r13, %r14, 2) // software prefetch
-	prefetcht0	64(%r13, %r14, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm0
-	vfnmadd231pd	%ymm9, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm1
-	vfnmadd231pd	%ymm9, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm2
-	vfnmadd231pd	%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm3
-	vfnmadd231pd	%ymm9, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm0
-	vfnmadd231pd	%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm1
-	vfnmadd231pd	%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm2
-	vfnmadd231pd	%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm3
-	vfnmadd231pd	%ymm11, %ymm12, %ymm7
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm0
-	vfnmadd231pd	%ymm9, %ymm12, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm1
-	vfnmadd231pd	%ymm9, %ymm12, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm2
-	vfnmadd231pd	%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm3
-	vfnmadd231pd	%ymm9, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm0
-	vfnmadd231pd	%ymm11, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm1
-	vfnmadd231pd	%ymm11, %ymm12, %ymm5
-	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm2
-	vfnmadd231pd	%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm3
-	addq	%r14, %r13
-	vfnmadd231pd	%ymm11, %ymm12, %ymm7
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm0
-	vfnmadd231pd	%ymm9, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A0
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm1
-	vfnmadd231pd	%ymm9, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm2
-	vfnmadd231pd	%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm3
-	vfnmadd231pd	%ymm9, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 1
-	vbroadcastsd	8(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm0
-	vfnmadd231pd	%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm8 // A0
-
-	vbroadcastsd	40(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm1
-	vfnmadd231pd	%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	72(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm2
-	vfnmadd231pd	%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	104(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm3
-	vfnmadd231pd	%ymm11, %ymm12, %ymm7
-
-	// unroll 2
-	vbroadcastsd	16(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm0
-	vfnmadd231pd	%ymm9, %ymm12, %ymm4
-	vmovapd			96(%r11), %ymm10 // A0
-
-	vbroadcastsd	48(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm1
-	vfnmadd231pd	%ymm9, %ymm12, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A1
-
-	vbroadcastsd	80(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm2
-	vfnmadd231pd	%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	112(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm3
-	vfnmadd231pd	%ymm9, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 3
-	vbroadcastsd	24(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm0
-	vfnmadd231pd	%ymm11, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm8 // A0
-
-	vbroadcastsd	56(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm1
-	vfnmadd231pd	%ymm11, %ymm12, %ymm5
-//	vmovapd			0(%r11, %r12, 1), %ymm9 // A1
-
-	vbroadcastsd	88(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm2
-	vfnmadd231pd	%ymm11, %ymm12, %ymm6
-
-	vbroadcastsd	120(%r13), %ymm12
-	vfnmadd231pd	%ymm10, %ymm12, %ymm3
-	addq	%r14, %r13
-	vfnmadd231pd	%ymm11, %ymm12, %ymm7
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd			0(%r11), %ymm8 // A0[0]
-	vmovapd 		0(%r11, %r12, 1), %ymm9 // A1[0]
-	vbroadcastsd	0(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm0
-	vfnmadd231pd	%ymm9, %ymm12, %ymm4
-
-	vbroadcastsd	32(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm1
-	vfnmadd231pd	%ymm9, %ymm12, %ymm5
-	addq	$32, %r11
-
-	vbroadcastsd	64(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm2
-	vfnmadd231pd	%ymm9, %ymm12, %ymm6
-	subl	$1, %r10d
-
-	vbroadcastsd	96(%r13), %ymm12
-	vfnmadd231pd	%ymm8, %ymm12, %ymm3
-	vfnmadd231pd	%ymm9, %ymm12, %ymm7
-	addq	$8, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nn_8x4_lib4, .-inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NN_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nn_4x8_lib4, @function
-inner_kernel_dgemm_add_nn_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nn_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nn_4x8_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nn_4x8_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd 		0(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r12, %r13, 2) // software prefetch
-	prefetcht0	64(%r12, %r13, 2) // software prefetch
-	prefetcht0	128(%r12, %r13, 2) // software prefetch
-	prefetcht0	192(%r12, %r13, 2) // software prefetch
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm0
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm1
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm2
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm3
-	vbroadcastsd	136(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vbroadcastsd	168(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	200(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	232(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vbroadcastsd	144(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm4
-	vbroadcastsd	176(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm5
-	vbroadcastsd	208(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm6
-	vbroadcastsd	240(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm7
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm1
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm2
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm3
-	vbroadcastsd	152(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vbroadcastsd	184(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	216(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	248(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq	%r13, %r12
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			32(%r11), %ymm14 // A
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastsd	8(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm0
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastsd	40(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm1
-	vbroadcastsd	72(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm2
-	vbroadcastsd	104(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm3
-	vbroadcastsd	136(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vbroadcastsd	168(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	200(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	232(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastsd	16(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vmovapd			-32(%r11), %ymm14 // A
-	vbroadcastsd	48(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	80(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	112(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vbroadcastsd	144(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm4
-	vbroadcastsd	176(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm5
-	vbroadcastsd	208(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm6
-	vbroadcastsd	240(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm7
-
-	// unroll 0
-	vbroadcastsd	24(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm0
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	56(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm1
-	vbroadcastsd	88(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm2
-	vbroadcastsd	120(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm3
-	vbroadcastsd	152(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm4
-	vbroadcastsd	184(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm5
-	vbroadcastsd	216(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm6
-	vbroadcastsd	248(%r12), %ymm12 // B
-	vfmadd231pd		%ymm14, %ymm12, %ymm7
-	addq	%r13, %r12
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastsd	0(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm0
-	vbroadcastsd	32(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm1
-	vbroadcastsd	64(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm2
-	vbroadcastsd	96(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vfmadd231pd		%ymm13, %ymm12, %ymm7
-
-	addq	$32, %r11
-	addq	$8, %r12
-	subl	$1, %r10d
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nn_4x8_lib4, .-inner_kernel_dgemm_add_nn_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- B
-// r12   <- C
-// r13   <- 32*sdc
-// ymm0  <- [a00 a10 a20 a30]
-// ymm1  <- [a01 a11 a21 a31]
-// ymm2  <- [a02 a12 a22 a32]
-// ymm3  <- [a03 a13 a23 a33]
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- ?
-// r12   <- ?
-// r13   <- 32*sdc
-// ymm0  <- [a00 a10 a20 a30]
-// ymm1  <- [a01 a11 a21 a31]
-// ymm2  <- [a02 a12 a22 a32]
-// ymm3  <- [a03 a13 a23 a33]
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEBP_ADD_NN_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgebp_add_nn_8x4_lib4, @function
-inner_kernel_dgebp_add_nn_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgebp_add_nn_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgebp_add_nn_8x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgebp_add_nn_8x4_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	cmpl	$3, %r10d
-	jle		2f // cleanup loop
-
-	// main loop
-	.p2align 3
-1:
-	vmovapd			0(%r12), %ymm12
-	vmovapd			0(%r12, %r13, 1), %ymm14
-	vbroadcastsd	0(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vbroadcastsd	8(%r11), %ymm13
-	subl	$4, %r10d
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vbroadcastsd	16(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vbroadcastsd	24(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vfmadd231pd		%ymm7, %ymm13, %ymm14
-	vmovapd			%ymm12, 0(%r12)
-	vmovapd			%ymm14, 0(%r12, %r13, 1)
-
-	vmovapd			32(%r12), %ymm12
-	vmovapd			32(%r12, %r13, 1), %ymm14
-	vbroadcastsd	32(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vbroadcastsd	40(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vbroadcastsd	48(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vbroadcastsd	56(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vfmadd231pd		%ymm7, %ymm13, %ymm14
-	vmovapd			%ymm12, 32(%r12)
-	vmovapd			%ymm14, 32(%r12, %r13, 1)
-
-	vmovapd			64(%r12), %ymm12
-	vmovapd			64(%r12, %r13, 1), %ymm14
-	vbroadcastsd	64(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vbroadcastsd	72(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vbroadcastsd	80(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vbroadcastsd	88(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vfmadd231pd		%ymm7, %ymm13, %ymm14
-	vmovapd			%ymm12, 64(%r12)
-	vmovapd			%ymm14, 64(%r12, %r13, 1)
-
-	vmovapd			96(%r12), %ymm12
-	vmovapd			96(%r12, %r13, 1), %ymm14
-	vbroadcastsd	96(%r11), %ymm13
-	addq	$128, %r11
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vbroadcastsd	-24(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vbroadcastsd	-16(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vbroadcastsd	-8(%r11), %ymm13
-	addq	$128, %r12
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vfmadd231pd		%ymm7, %ymm13, %ymm14
-	vmovapd			%ymm12, -32(%r12)
-	vmovapd			%ymm14, -32(%r12, %r13, 1)
-
-	cmpl	$3, %r10d
-	jg		1b // main loop
-
-	cmpl	$0, %r10d
-	jle		0f // return
-
-	// cleanup loop
-2:
-	vmovapd			0(%r12), %ymm12
-	vmovapd			0(%r12, %r13, 1), %ymm14
-	vbroadcastsd	0(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vbroadcastsd	8(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vbroadcastsd	16(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vbroadcastsd	24(%r11), %ymm13
-	vfmadd231pd		%ymm3, %ymm13, %ymm12
-	vfmadd231pd		%ymm7, %ymm13, %ymm14
-	vmovapd			%ymm12, 0(%r12)
-	vmovapd			%ymm14, 0(%r12, %r13, 1)
-
-	addq	$32, %r11
-	addq	$32, %r12
-
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-	jg		2b // main loop
-
-	// return
-0:
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgebp_add_nn_8x4_lib4, .-inner_kernel_dgebp_add_nn_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGEMM_ADD_NN_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemm_add_nn_8x4_lib4, @function
-inner_edge_dgemm_add_nn_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemm_add_nn_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgemm_add_nn_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgemm_add_nn_8x4_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r15d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$4, %ebx
-	subl			%r15d, %ebx // 4-offsetB
-	cmpl			%r10d, %ebx
-//	jle				0f
-//	movl			%r10d, %ebx // kend=min(k,4-offsetB)
-//0:
-	cmovgl			%r10d, %ebx // kend=min(k,4-offsetB)
-
-	movl			%r15d, %eax
-	sall			$3, %eax // offsetB*sizeof(double)
-	addq			%rax, %r13 // B+offsetB*sizeof(double)
-
-	movq			%r11, %rax // A1 <- A0
-	addq			%r12, %rax // A1 <- A0 + 4*sda*sizeof(double)
-
-1:
-	vmovapd			0(%r11), %ymm12 // A0[0]
-	vmovapd			0(%rax), %ymm14 // A1[0]
-	vbroadcastsd	0(%r13), %ymm13 // B[0]
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vfmadd231pd		%ymm14, %ymm13, %ymm4
-	vbroadcastsd	32(%r13), %ymm13 // B[1]
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vfmadd231pd		%ymm14, %ymm13, %ymm5
-	vbroadcastsd	64(%r13), %ymm13 // B[2]
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vfmadd231pd		%ymm14, %ymm13, %ymm6
-	vbroadcastsd	96(%r13), %ymm13 // B[3]
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vfmadd231pd		%ymm14, %ymm13, %ymm7
-
-	subl			$1, %r10d // k-1
-	subl			$1, %ebx // kend-1
-	addq			$32, %r11 // A0+1*bs*sizeof(float)
-	addq			$32, %rax // A1+1*bs*sizeof(float)
-	addq			$8, %r13 // B+1*sizeof(float)
-
-	cmpl			$0, %ebx
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r14, %r13
-	subq			$32, %r13 // B+bs*(sdb-1)*sizeof(double)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemm_add_nn_8x4_lib4, .-inner_edge_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGEMM_ADD_NN_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemm_add_nn_4x8_lib4, @function
-inner_edge_dgemm_add_nn_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemm_add_nn_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgemm_add_nn_4x8_lib4; .scl 2; .type 32; .endef
-inner_edge_dgemm_add_nn_4x8_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r14d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$4, %r15d
-	subl			%r14d, %r15d // 4-offsetB
-	cmpl			%r10d, %r15d
-//	jle				0f
-//	movl			%r10d, %r15d // kend=min(k,4-offsetB)
-//0:
-	cmovgl			%r10d, %r15d // kend=min(k,4-offsetB)
-
-	movl			%r14d, %eax
-	sall			$3, %eax // offsetB*sizeof(double)
-	addq			%rax, %r12 // B+offsetB*sizeof(double)
-
-1:
-	vmovapd			0(%r11), %ymm12
-	vbroadcastsd	0(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm0
-	vbroadcastsd	32(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm1
-	vbroadcastsd	64(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm2
-	vbroadcastsd	96(%r12), %ymm13
-	vfmadd231pd		%ymm12, %ymm13, %ymm3
-	vbroadcastsd	128(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm4
-	vbroadcastsd	160(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm5
-	vbroadcastsd	192(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm6
-	vbroadcastsd	224(%r12), %ymm12 // B
-	vfmadd231pd		%ymm12, %ymm13, %ymm7
-
-	subl			$1, %r10d // k-1
-	subl			$1, %r15d // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$8, %r12 // B+1*sizeof(float)
-
-	cmpl			$0, %r15d
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r13, %r12
-	subq			$32, %r12 // B+bs*(sdb-1)*sizeof(double)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemm_add_nn_4x8_lib4, .-inner_edge_dgemm_add_nn_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10   <- A
-// r11   <- 4*sda*sizeof(double)
-// r12   <- B
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- A+4*4*sizeof(double)
-// r11   <- 4*sda*sizeof(double)
-// r12   <- B+4*4*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_8x4_lib4, @function
-inner_edge_dtrmm_nt_ru_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_8x4_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r15 // A1 <- A0
-	addq	%r11, %r15 // A1 <- A0 + 4*sda*sizeof(double)
-
-	vbroadcastsd	0(%r12), %ymm12
-	vmovapd			0(%r10), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm9
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	vbroadcastsd	32(%r12), %ymm12
-	vmovapd			32(%r10), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vmovapd			32(%r15), %ymm9
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	40(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-
-	vbroadcastsd	64(%r12), %ymm12
-	vmovapd			64(%r10), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vmovapd			64(%r15), %ymm9
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	72(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	80(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vbroadcastsd	96(%r12), %ymm12
-	vmovapd			96(%r10), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vmovapd			96(%r15), %ymm9
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	104(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	112(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	120(%r12), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	addq			$128, %r10
-	addq			$128, %r12
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_8x4_lib4, .-inner_edge_dtrmm_nt_ru_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- max(k-4,0)
-// r11   <- A+4*4*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*4*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_8x4_vs_lib4, @function
-inner_edge_dtrmm_nt_ru_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_8x4_vs_lib4:
-#endif
-#endif
-	
-	movq	%r11, %r15 // A1 <- A0
-	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(double)
-
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	addq			$32, %r11
-	vmovapd			0(%r15), %ymm9
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	addq			$32, %r13
-	addq			$32, %r15
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm9
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	addq			$32, %r11
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	addq			$32, %r13
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	addq			$32, %r15
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm9
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	addq			$32, %r11
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	addq			$32, %r13
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	addq			$32, %r15
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vbroadcastsd	0(%r13), %ymm12
-	subl			$1, %r10d
-	vmovapd			0(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm9
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	addq			$32, %r11
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	addq			$32, %r13
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-	addq			$32, %r15
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_8x4_vs_lib4, .-inner_edge_dtrmm_nt_ru_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A0
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_8x4_lib4, @function
-inner_edge_dtrmm_nn_rl_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_8x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r15d
-	jg		0f
-
-	// offB==0
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r12, 1), %ymm9
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r12, 1), %ymm9
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r12, 1), %ymm9
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	120(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A0+4*bs*sizeof(double)
-	addq			%r14, %r13 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-0:
-	cmpl	$1, %r15d
-	jg		1f
-
-	// offB==1
-
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r12, 1), %ymm9
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r12, 1), %ymm9
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	subl			$3, %r10d // k-3
-	addq			$96, %r11 // A0+3*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$8, %r13 // B+bs*sdb*sizeof(double)-1
-
-	jmp		3f
-
-1:
-	cmpl	$2, %r15d
-	jg		2f
-
-	// offB==2
-
-	addq			$16, %r13 // B+2*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r12, 1), %ymm9
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-
-	subl			$2, %r10d // k-2
-	addq			$64, %r11 // A0+2*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$16, %r13 // B+bs*sdb*sizeof(double)-2
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r12, 1), %ymm9
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	104(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r12, 1), %ymm9
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r12, 1), %ymm9
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	120(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A0+4*bs*sizeof(double)
-	addq			%r14, %r13 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-2:
-	// offB==3
-
-	addq			$24, %r13 // B+3*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-3
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-
-	vmovapd			32(%r11), %ymm8
-	vmovapd			32(%r11, %r12, 1), %ymm9
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	40(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	72(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	vmovapd			64(%r11), %ymm8
-	vmovapd			64(%r11, %r12, 1), %ymm9
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	48(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	80(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	112(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	vmovapd			96(%r11), %ymm8
-	vmovapd			96(%r11, %r12, 1), %ymm9
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	56(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	88(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	120(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	subl			$4, %r10d // k-4
-	addq			$128, %r11 // A0+4*bs*sizeof(double)
-	addq			%r14, %r13 // B+bs*sdb*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_8x4_lib4, .-inner_edge_dtrmm_nn_rl_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A0
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// rax   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_8x4_vs_lib4, @function
-inner_edge_dtrmm_nn_rl_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_8x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	cmpl			$0, %r15d
-	jg				0f // offB>0
-
-	// offB==0
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f // end
-
-0:
-	cmpl			$1, %r15d
-	jg				1f // offB>1
-
-	// offB==1
-
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f // end
-
-1:
-	cmpl			$2, %r15d
-	jg				2f // offB>2
-
-	// offB==2
-
-	addq			$16, %r13 // B+2*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-
-	subl			$1, %r10d // k-2
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	jmp				3f
-
-2:
-	// offB==3
-
-	addq			$24, %r13 // B+3*sizeof(double)
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	subl			$1, %r10d // k-1
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			$8, %r13 // B+1*sizeof(double)
-
-	cmpl			$0, %r10d
-	jle				3f // end
-
-	vmovapd			0(%r11), %ymm8
-	vmovapd			0(%r11, %r12, 1), %ymm9
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vfmadd231pd		%ymm9, %ymm12, %ymm4
-	vbroadcastsd	32(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	vfmadd231pd		%ymm9, %ymm12, %ymm5
-	vbroadcastsd	64(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vfmadd231pd		%ymm9, %ymm12, %ymm6
-	vbroadcastsd	96(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	vfmadd231pd		%ymm9, %ymm12, %ymm7
-
-	subl			$1, %r10d // k-4
-	addq			$32, %r11 // A0+1*bs*sizeof(double)
-	addq			%r14, %r13
-	subq			$24, %r13 // B+bs*sdb*sizeof(double)-(bs-1)*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_8x4_vs_lib4, .-inner_edge_dtrmm_nn_rl_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend
-//
-// input arguments:
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_8x4_lib4, @function
-inner_blend_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_8x4_lib4; .scl 2; .type 32; .endef
-inner_blend_8x4_lib4:
-#endif
-#endif
-	
-
-	// tc==n
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm8
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm9
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm10
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm4
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm6
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm5
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_8x4_lib4, .-inner_blend_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_8x4_lib4, @function
-inner_scale_11_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_11_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_8x4_lib4; .scl 2; .type 32; .endef
-inner_scale_11_8x4_lib4:
-#endif
-#endif
-	
-
-	movq	%r10, %r12 // C1 <- C0
-	addq	%r11, %r12 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	vmovapd		0(%r10), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-	vmovapd		0(%r12), %ymm15
-	vaddpd		%ymm4, %ymm15, %ymm4
-	vmovapd		32(%r12), %ymm15
-	vaddpd		%ymm5, %ymm15, %ymm5
-	vmovapd		64(%r12), %ymm15
-	vaddpd		%ymm6, %ymm15, %ymm6
-	vmovapd		96(%r12), %ymm15
-	vaddpd		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_8x4_lib4, .-inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x4_lib4, @function
-inner_scale_ab_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x4_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_8x4_lib4:
-#endif
-#endif
-	
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	movq	%r12, %r15 // C1 <- C0
-	addq	%r13, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	// alg==1
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		0(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		32(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		64(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		96(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x4_lib4, .-inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- offset
-// r13   <- C
-// r14   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- offset
-// r13   <- C
-// r14   <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x4_gen_lib4, @function
-inner_scale_ab_8x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x4_gen_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_8x4_gen_lib4:
-#endif
-#endif
-	
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	movq	%r13, %rax // C1 <- C0
-	addq	%r14, %rax // C1 <- C0 + 4*sdc*sizeof(double)
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-
-	vxorpd		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovapd		0(%r13), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r13), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r13), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r13), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		0(%rax), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		32(%rax), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		64(%rax), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		96(%rax), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	jmp		3f
-
-0:
-
-	movq	%rax, %rbx // C0
-	addq	%r14, %rbx // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$1, %r12d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%rax), %ymm13
-	vmovapd		0(%rbx), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%rax), %ymm13
-	vmovapd		32(%rbx), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%rax), %ymm13
-	vmovapd		64(%rbx), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%rax), %ymm13
-	vmovapd		96(%rbx), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r12d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%rax), %ymm13
-	vmovapd		0(%rbx), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%rax), %ymm13
-	vmovapd		32(%rbx), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%rax), %ymm13
-	vmovapd		64(%rbx), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%rax), %ymm13
-	vmovapd		96(%rbx), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%rax), %ymm13
-	vmovapd		0(%rbx), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%rax), %ymm13
-	vmovapd		32(%rbx), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%rax), %ymm13
-	vmovapd		64(%rbx), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%rax), %ymm13
-	vmovapd		96(%rbx), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x4_gen_lib4, .-inner_scale_ab_8x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0
-//
-// input arguments:
-// r10   <- alpha
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_8x4_lib4, @function
-inner_scale_a0_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_a0_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_8x4_lib4; .scl 2; .type 32; .endef
-inner_scale_a0_8x4_lib4:
-#endif
-#endif
-	
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_8x4_lib4, .-inner_scale_a0_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x4_lib4, @function
-inner_blend_scale_ab_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x4_lib4:
-#endif
-#endif
-	
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	// tc==n
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm8
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm9
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm10
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm4
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm6
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm5
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm7
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	movq	%r12, %r15 // C1 <- C0
-	addq	%r13, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	// alg==1
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		0(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		32(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		64(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		96(%r15), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x4_lib4, .-inner_blend_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r10   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x8_lib4, @function
-inner_scale_ab_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_4x8_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_4x8_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-	vmovapd		128(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		160(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		192(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		224(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x8_lib4, .-inner_scale_ab_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// transpose and scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_AB_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_ab_4x8_lib4, @function
-inner_tran_scale_ab_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_tran_scale_ab_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_ab_4x8_lib4; .scl 2; .type 32; .endef
-inner_tran_scale_ab_4x8_lib4:
-#endif
-#endif
-		
-	vunpcklpd	%ymm1, %ymm0, %ymm12
-	vunpckhpd	%ymm1, %ymm0, %ymm13
-	vunpcklpd	%ymm3, %ymm2, %ymm14
-	vunpckhpd	%ymm3, %ymm2, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm0
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm2
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm3
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vunpcklpd	%ymm5, %ymm4, %ymm12
-	vunpckhpd	%ymm5, %ymm4, %ymm13
-	vunpcklpd	%ymm7, %ymm6, %ymm14
-	vunpckhpd	%ymm7, %ymm6, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm4
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm6
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm5
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm7
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	vbroadcastsd 0(%r11), %ymm14 // beta
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		128(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		160(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		192(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		224(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_ab_4x8_lib4, .-inner_tran_scale_ab_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- offset
-// r13   <- C
-// r14   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- offset
-// r13   <- C
-// r14   <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x4_gen_lib4, @function
-inner_blend_scale_ab_8x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x4_gen_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x4_gen_lib4:
-#endif
-#endif
-	
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	// tc==n
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm8
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm9
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm10
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm4
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm6
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm5
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm7
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	movq	%r13, %rax // C1 <- C0
-	addq	%r14, %rax // C1 <- C0 + 4*sdc*sizeof(double)
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-
-	vxorpd		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovapd		0(%r13), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r13), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r13), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r13), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		0(%rax), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		32(%rax), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		64(%rax), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		96(%rax), %ymm14
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	jmp		3f
-
-0:
-
-	movq	%rax, %rbx // C0
-	addq	%r14, %rbx // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$1, %r12d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%rax), %ymm13
-	vmovapd		0(%rbx), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%rax), %ymm13
-	vmovapd		32(%rbx), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%rax), %ymm13
-	vmovapd		64(%rbx), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%rax), %ymm13
-	vmovapd		96(%rbx), %ymm14
-	vblendpd	$0x1, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x1, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm14, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r12d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%rax), %ymm13
-	vmovapd		0(%rbx), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%rax), %ymm13
-	vmovapd		32(%rbx), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%rax), %ymm13
-	vmovapd		64(%rbx), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%rax), %ymm13
-	vmovapd		96(%rbx), %ymm14
-	vblendpd	$0x3, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x3, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		0(%r13), %ymm12
-	vmovapd		0(%rax), %ymm13
-	vmovapd		0(%rbx), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm0
-	vfmadd231pd	%ymm13, %ymm15, %ymm4
-
-	vmovapd		32(%r13), %ymm12
-	vmovapd		32(%rax), %ymm13
-	vmovapd		32(%rbx), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm1
-	vfmadd231pd	%ymm13, %ymm15, %ymm5
-
-	vmovapd		64(%r13), %ymm12
-	vmovapd		64(%rax), %ymm13
-	vmovapd		64(%rbx), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm2
-	vfmadd231pd	%ymm13, %ymm15, %ymm6
-
-	vmovapd		96(%r13), %ymm12
-	vmovapd		96(%rax), %ymm13
-	vmovapd		96(%rbx), %ymm14
-	vblendpd	$0x7, %ymm13, %ymm12, %ymm12
-	vblendpd	$0x7, %ymm14, %ymm13, %ymm13
-	vperm2f128	$0x01, %ymm12, %ymm12, %ymm14
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm12
-	vperm2f128	$0x01, %ymm13, %ymm13, %ymm14
-	vshufpd		$0x5, %ymm13, %ymm14, %ymm13
-	vfmadd231pd	%ymm12, %ymm15, %ymm3
-	vfmadd231pd	%ymm13, %ymm15, %ymm7
-
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x4_gen_lib4, .-inner_blend_scale_ab_8x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_8x4_lib4, @function
-inner_blend_scale_11_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_8x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_11_8x4_lib4:
-#endif
-#endif
-
-	// tc==n
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vblendpd	$0xa, %ymm5, %ymm4, %ymm8
-	vblendpd	$0x5, %ymm5, %ymm4, %ymm9
-	vblendpd	$0xa, %ymm7, %ymm6, %ymm10
-	vblendpd	$0x5, %ymm7, %ymm6, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm4
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm6
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm5
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm7
-
-	movq	%r10, %r15 // C1 <- C0
-	addq	%r11, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	vmovapd		0(%r10), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-	vmovapd		0(%r15), %ymm15
-	vaddpd		%ymm4, %ymm15, %ymm4
-	vmovapd		32(%r15), %ymm15
-	vaddpd		%ymm5, %ymm15, %ymm5
-	vmovapd		64(%r15), %ymm15
-	vaddpd		%ymm6, %ymm15, %ymm6
-	vmovapd		96(%r15), %ymm15
-	vaddpd		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_8x4_lib4, .-inner_blend_scale_11_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// transpose and scale for alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_11_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_11_4x8_lib4, @function
-inner_tran_scale_11_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_tran_scale_11_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_11_4x8_lib4; .scl 2; .type 32; .endef
-inner_tran_scale_11_4x8_lib4:
-#endif
-#endif
-		
-	vunpcklpd	%ymm1, %ymm0, %ymm12
-	vunpckhpd	%ymm1, %ymm0, %ymm13
-	vunpcklpd	%ymm3, %ymm2, %ymm14
-	vunpckhpd	%ymm3, %ymm2, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm0
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm2
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm3
-
-	vunpcklpd	%ymm5, %ymm4, %ymm12
-	vunpckhpd	%ymm5, %ymm4, %ymm13
-	vunpcklpd	%ymm7, %ymm6, %ymm14
-	vunpckhpd	%ymm7, %ymm6, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm4
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm6
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm5
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm7
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14 // beta=1.0
-#else
-	vmovapd		LC04(%rip), %ymm14 // beta=1.0
-#endif
-
-	vmovapd		0(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		128(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		160(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		192(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		224(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_11_4x8_lib4, .-inner_tran_scale_11_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_8x4_vs_lib4, @function
-inner_edge_dpotrf_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dpotrf_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dpotrf_8x4_vs_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd			LC04(%rip), %xmm14 // 1.0
-#endif
-
-	vmovsd			%xmm0, %xmm0, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe				1f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-2:
-	vmovsd			%xmm13, 0(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	cmpl			$2, %r11d
-	jl				0f // ret
-//	vperm2f128		$0x00, %ymm0, %ymm0, %ymm12
-//	vpermilpd		$0xf, %ymm12, %ymm13
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-
-	vpermilpd		$0x3, %xmm1, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe				3f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-4:
-	vmovsd			%xmm13, 8(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	cmpl			$3, %r11d
-	jl				0f // ret
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe				5f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-6:
-	vmovsd			%xmm13, 16(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	cmpl			$4, %r11d
-	jl				0f // ret
-//	vperm2f128		$0x11, %ymm2, %ymm2, %ymm12
-//	vpermilpd		$0xf, %ymm12, %ymm13
-	vpermpd			$0xff, %ymm2, %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-
-//	vextractf128	$0x1, %ymm3, %xmm13
-//	vpermilpd		$0x3, %xmm13, %xmm13
-	vpermpd			$0xff, %ymm3, %ymm13
-	vucomisd		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe				7f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-8:
-	vmovsd			%xmm13, 24(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-
-	jmp				0f
-
-1:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				2b
-
-3:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				4b
-
-5:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				6b
-
-7:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				8b
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_8x4_vs_lib4, .-inner_edge_dpotrf_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_8x4_lib4, @function
-inner_edge_dtrsm_rlt_inv_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_8x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_8x4_lib4, .-inner_edge_dtrsm_rlt_inv_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- D
-// r11  <- sdd
-// r12  <- inv_diag_D
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- sdd
-// r12  <- inv_diag_D
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x8_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x8_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x8_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r12), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vbroadcastsd	0(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm4
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm5
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm6
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm7
-
-	vbroadcastsd	8(%r12), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vbroadcastsd	32(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm4
-	vbroadcastsd	40(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm5
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm6
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm7
-
-	vbroadcastsd	16(%r12), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vbroadcastsd	64(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm4
-	vbroadcastsd	72(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm5
-	vbroadcastsd	80(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm6
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm7
-
-	vbroadcastsd	24(%r12), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vbroadcastsd	96(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm4
-	vbroadcastsd	104(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm5
-	vbroadcastsd	112(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm6
-	vbroadcastsd	120(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm7
-
-	addq	$128, %r10
-
-	vbroadcastsd	32(%r12), %ymm13
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-
-	vbroadcastsd	40(%r12), %ymm13
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-
-	vbroadcastsd	48(%r12), %ymm13
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-
-	vbroadcastsd	56(%r12), %ymm13
-	vmulpd			%ymm7, %ymm13, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x8_lib4, .-inner_edge_dtrsm_rlt_inv_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_8x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_8x4_vs_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r11), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	cmpl			$2, %r12d
-	jl				0f // ret
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-
-
-	vbroadcastsd	8(%r11), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	cmpl			$3, %r12d
-	jl				0f // ret
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-
-	vbroadcastsd	16(%r11), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	cmpl			$4, %r12d
-	jl				0f // ret
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-
-	vbroadcastsd	24(%r11), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_8x4_vs_lib4, .-inner_edge_dtrsm_rlt_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- D
-// r11  <- sdd
-// r12  <- inv_diag_D
-// r13d <- kn
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- sdd
-// r12  <- inv_diag_D
-// r13d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X8_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x8_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x8_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x8_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x8_vs_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r12), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vbroadcastsd	0(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm4
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm5
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm6
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm7
-
-	vbroadcastsd	8(%r12), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vbroadcastsd	32(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm4
-	vbroadcastsd	40(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm5
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm6
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm7
-
-	vbroadcastsd	16(%r12), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vbroadcastsd	64(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm4
-	vbroadcastsd	72(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm5
-	vbroadcastsd	80(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm6
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm7
-
-	vbroadcastsd	24(%r12), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vbroadcastsd	96(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm4
-	vbroadcastsd	104(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm5
-	vbroadcastsd	112(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm6
-	vbroadcastsd	120(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm7
-
-	addq	$128, %r10
-
-	vbroadcastsd	32(%r12), %ymm13
-	vmulpd			%ymm4, %ymm13, %ymm4
-	cmpl			$6, %r13d
-	jl				0f // ret
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-
-	vbroadcastsd	40(%r12), %ymm13
-	vmulpd			%ymm5, %ymm13, %ymm5
-	cmpl			$7, %r13d
-	jl				0f // ret
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-
-	vbroadcastsd	48(%r12), %ymm13
-	vmulpd			%ymm6, %ymm13, %ymm6
-	cmpl			$8, %r13d
-	jl				0f // ret
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-
-	vbroadcastsd	56(%r12), %ymm13
-	vmulpd			%ymm7, %ymm13, %ymm7
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x8_vs_lib4, .-inner_edge_dtrsm_rlt_inv_4x8_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// unit diagonal
-//
-// input arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_ONE_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_one_8x4_lib4, @function
-inner_edge_dtrsm_rlt_one_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_one_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_one_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_one_8x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_one_8x4_lib4, .-inner_edge_dtrsm_rlt_one_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// unit diagonal
-//
-// input arguments:
-// r10  <- D
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_ONE_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_one_8x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_one_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_one_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_one_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_one_8x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$2, %r11d
-	jl				0f // ret
-
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-
-	cmpl			$3, %r11d
-	jl				0f // ret
-
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-
-	cmpl			$4, %r11d
-	jl				0f // ret
-
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_one_8x4_vs_lib4, .-inner_edge_dtrsm_rlt_one_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = upper
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUT_INV_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rut_inv_8x4_lib4, @function
-inner_edge_dtrsm_rut_inv_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rut_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rut_inv_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rut_inv_8x4_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-	vbroadcastsd	112(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm2
-	vfnmadd231pd	%ymm7, %ymm12, %ymm6
-	vbroadcastsd	104(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm1
-	vfnmadd231pd	%ymm7, %ymm12, %ymm5
-	vbroadcastsd	96(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm0
-	vfnmadd231pd	%ymm7, %ymm12, %ymm4
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-	vbroadcastsd	72(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm1
-	vfnmadd231pd	%ymm6, %ymm12, %ymm5
-	vbroadcastsd	64(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm0
-	vfnmadd231pd	%ymm6, %ymm12, %ymm4
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-	vbroadcastsd	32(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm0
-	vfnmadd231pd	%ymm5, %ymm12, %ymm4
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rut_inv_8x4_lib4, .-inner_edge_dtrsm_rut_inv_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = lower
-// tran = transposed
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUT_INV_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rut_inv_8x4_vs_lib4, @function
-inner_edge_dtrsm_rut_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rut_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rut_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rut_inv_8x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl			$3, %r12d
-	jle				0f
-
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-	vbroadcastsd	112(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm2
-	vfnmadd231pd	%ymm7, %ymm12, %ymm6
-	vbroadcastsd	104(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm1
-	vfnmadd231pd	%ymm7, %ymm12, %ymm5
-	vbroadcastsd	96(%r10), %ymm12
-	vfnmadd231pd	%ymm3, %ymm12, %ymm0
-	vfnmadd231pd	%ymm7, %ymm12, %ymm4
-
-0:
-	cmpl			$2, %r12d
-	jle				1f
-
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-	vbroadcastsd	72(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm1
-	vfnmadd231pd	%ymm6, %ymm12, %ymm5
-	vbroadcastsd	64(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm0
-	vfnmadd231pd	%ymm6, %ymm12, %ymm4
-
-1:
-	cmpl			$1, %r12d
-	jle				2f
-
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-	vbroadcastsd	32(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm0
-	vfnmadd231pd	%ymm5, %ymm12, %ymm4
-
-2:
-
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rut_inv_8x4_vs_lib4, .-inner_edge_dtrsm_rut_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = right
-// uplo = up
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RUN_INV_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_run_inv_8x4_lib4, @function
-inner_edge_dtrsm_run_inv_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_run_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_run_inv_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_run_inv_8x4_lib4:
-#endif
-#endif
-
-	// first column
-	vbroadcastsd	0(%r11), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-
-	// second column
-	vbroadcastsd	32(%r10), %ymm12
-	vfnmadd231pd	%ymm0, %ymm12, %ymm1
-	vfnmadd231pd	%ymm4, %ymm12, %ymm5
-	vbroadcastsd	8(%r11), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-
-	// third column
-	vbroadcastsd	64(%r10), %ymm12
-	vfnmadd231pd	%ymm0, %ymm12, %ymm2
-	vfnmadd231pd	%ymm4, %ymm12, %ymm6
-	vbroadcastsd	72(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm2
-	vfnmadd231pd	%ymm5, %ymm12, %ymm6
-	vbroadcastsd	16(%r11), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-
-	// fourth column
-	vbroadcastsd	96(%r10), %ymm12
-	vfnmadd231pd	%ymm0, %ymm12, %ymm3
-	vfnmadd231pd	%ymm4, %ymm12, %ymm7
-	vbroadcastsd	104(%r10), %ymm12
-	vfnmadd231pd	%ymm1, %ymm12, %ymm3
-	vfnmadd231pd	%ymm5, %ymm12, %ymm7
-	vbroadcastsd	112(%r10), %ymm12
-	vfnmadd231pd	%ymm2, %ymm12, %ymm3
-	vfnmadd231pd	%ymm6, %ymm12, %ymm7
-	vbroadcastsd	24(%r11), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_run_inv_8x4_lib4, .-inner_edge_dtrsm_run_inv_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = lower
-// tran = normal
-// unit diagonal
-//
-// input arguments:
-// r10  <- E0
-// r11  <- 4*sde*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E0
-// r11  <- 4*sde*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LLN_ONE_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lln_one_8x4_lib4, @function
-inner_edge_dtrsm_lln_one_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lln_one_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lln_one_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lln_one_8x4_lib4:
-#endif
-#endif
-
-	movq	%r10, %r12 // E1 <- E0
-	addq	%r11, %r12 // E1 <- E0 + 4*sde*sizeof(double)
-
-	// left block-column
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-	vmovapd			0(%r10), %ymm12
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm12
-	vmovapd			0(%r12), %ymm14
-	vpermpd			$0x00, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vpermpd			$0x00, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vpermpd			$0x00, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vpermpd			$0x00, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-	vmovapd			32(%r10), %ymm12
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm12
-	vmovapd			32(%r12), %ymm14
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vpermpd			$0x55, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vpermpd			$0x55, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vpermpd			$0x55, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-	vmovapd			64(%r10), %ymm12
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm12
-	vmovapd			64(%r12), %ymm14
-	vpermpd			$0xaa, %ymm0, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm0
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vpermpd			$0xaa, %ymm1, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm1
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vpermpd			$0xaa, %ymm2, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm2
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vpermpd			$0xaa, %ymm3, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm3
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-
-	vmovapd			96(%r12), %ymm14
-	vpermpd			$0xff, %ymm0, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm4
-	vpermpd			$0xff, %ymm1, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm5
-	vpermpd			$0xff, %ymm2, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm6
-	vpermpd			$0xff, %ymm3, %ymm13
-	vfnmadd231pd	%ymm14, %ymm13, %ymm7
-
-	addq		$128, %r12
-
-
-	// right block-column
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	vmovapd			0(%r12), %ymm12
-	vblendpd		$0x1, %ymm14, %ymm12, %ymm12
-	vpermpd			$0x00, %ymm4, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm4
-	vpermpd			$0x00, %ymm5, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm5
-	vpermpd			$0x00, %ymm6, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm6
-	vpermpd			$0x00, %ymm7, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm7
-
-	vmovapd			32(%r12), %ymm12
-	vblendpd		$0x3, %ymm14, %ymm12, %ymm12
-	vpermpd			$0x55, %ymm4, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm4
-	vpermpd			$0x55, %ymm5, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm5
-	vpermpd			$0x55, %ymm6, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm6
-	vpermpd			$0x55, %ymm7, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm7
-
-	vmovapd			64(%r12), %ymm12
-	vblendpd		$0x7, %ymm14, %ymm12, %ymm12
-	vpermpd			$0xaa, %ymm4, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm4
-	vpermpd			$0xaa, %ymm5, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm5
-	vpermpd			$0xaa, %ymm6, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm6
-	vpermpd			$0xaa, %ymm7, %ymm13
-	vfnmadd231pd	%ymm12, %ymm13, %ymm7
-
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lln_one_8x4_lib4, .-inner_edge_dtrsm_lln_one_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = upper
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LUN_INV_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lun_inv_8x4_lib4, @function
-inner_edge_dtrsm_lun_inv_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lun_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lun_inv_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lun_inv_8x4_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r13 // E1 <- E0
-	addq	%r11, %r13 // E1 <- E0 + 4*sde*sizeof(double)
-
-	// bottom-right
-
-	vmovapd			224(%r13), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	56(%r12), %ymm12
-	vmovapd			224(%r10), %ymm11
-
-	vpermpd			$0xff, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm11, %ymm14, %ymm0
-
-	vpermpd			$0xff, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm11, %ymm14, %ymm1
-
-	vpermpd			$0xff, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm11, %ymm14, %ymm2
-
-	vpermpd			$0xff, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm11, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0 // ?????????????
-	vmovapd			192(%r13), %xmm13
-	vbroadcastsd	48(%r12), %ymm12
-	vmovapd			192(%r10), %ymm11
-
-	vpermpd			$0xaa, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm11, %ymm14, %ymm0
-
-	vpermpd			$0xaa, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm11, %ymm14, %ymm1
-
-	vpermpd			$0xaa, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm11, %ymm14, %ymm2
-
-	vpermpd			$0xaa, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm11, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			160(%r13), %xmm13
-	vbroadcastsd	40(%r12), %ymm12
-	vmovapd			160(%r10), %ymm11
-
-	vpermpd			$0x55, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm11, %ymm14, %ymm0
-
-	vpermpd			$0x55, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm11, %ymm14, %ymm1
-
-	vpermpd			$0x55, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm11, %ymm14, %ymm2
-
-	vpermpd			$0x55, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm11, %ymm14, %ymm3
-
-
-	vbroadcastsd	32(%r12), %ymm12
-	vmovapd			128(%r10), %ymm11
-
-	vpermpd			$0x00, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm11, %ymm14, %ymm0
-
-	vpermpd			$0x00, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm11, %ymm14, %ymm1
-
-	vpermpd			$0x00, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm11, %ymm14, %ymm2
-
-	vpermpd			$0x00, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm11, %ymm14, %ymm3
-
-
-	// top-left
-
-	vmovapd			96(%r10), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	24(%r12), %ymm12
-
-	vpermpd			$0xff, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermpd			$0xff, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermpd			$0xff, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermpd			$0xff, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovapd			64(%r10), %xmm13
-	vbroadcastsd	16(%r12), %ymm12
-
-	vpermpd			$0xaa, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermpd			$0xaa, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermpd			$0xaa, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermpd			$0xaa, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			32(%r10), %xmm13
-	vbroadcastsd	8(%r12), %ymm12
-
-	vpermilpd		$0xf, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermilpd		$0xf, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermilpd		$0xf, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermilpd		$0xf, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vbroadcastsd	0(%r12), %ymm12
-
-	vmulpd			%ymm0, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm0, %ymm0
-
-	vmulpd			%ymm1, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm1, %ymm1
-
-	vmulpd			%ymm2, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm2, %ymm2
-
-	vmulpd			%ymm3, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lun_inv_8x4_lib4, .-inner_edge_dtrsm_lun_inv_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution:
-// side = left
-// uplo = upper
-// tran = normal
-// requires explicit inverse of diagonal
-//
-// input arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// r13  <- km
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- 4*sde*sizeof(double)
-// r12  <- inv_diag_E
-// r13  <- km
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_LUN_INV_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_lun_inv_8x4_vs_lib4, @function
-inner_edge_dtrsm_lun_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_lun_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_lun_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_lun_inv_8x4_vs_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r14 // E1 <- E0
-	addq	%r11, %r14 // E1 <- E0 + 4*sde*sizeof(double)
-
-	// bottom-right
-
-	cmpl	$7, %r13d
-	jle		0f
-
-	vmovapd			224(%r14), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	56(%r12), %ymm12
-	vmovapd			224(%r10), %ymm11
-
-	vpermpd			$0xff, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm11, %ymm14, %ymm0
-
-	vpermpd			$0xff, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm11, %ymm14, %ymm1
-
-	vpermpd			$0xff, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm11, %ymm14, %ymm2
-
-	vpermpd			$0xff, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm11, %ymm14, %ymm3
-
-0:
-	cmpl	$6, %r13d
-	jle		1f
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0 // ?????????????
-	vmovapd			192(%r14), %xmm13
-	vbroadcastsd	48(%r12), %ymm12
-	vmovapd			192(%r10), %ymm11
-
-	vpermpd			$0xaa, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm11, %ymm14, %ymm0
-
-	vpermpd			$0xaa, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm11, %ymm14, %ymm1
-
-	vpermpd			$0xaa, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm11, %ymm14, %ymm2
-
-	vpermpd			$0xaa, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm11, %ymm14, %ymm3
-
-1:
-	cmpl	$5, %r13d
-	jle		2f
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			160(%r14), %xmm13
-	vbroadcastsd	40(%r12), %ymm12
-	vmovapd			160(%r10), %ymm11
-
-	vpermpd			$0x55, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vfnmadd231pd	%ymm11, %ymm14, %ymm0
-
-	vpermpd			$0x55, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vfnmadd231pd	%ymm11, %ymm14, %ymm1
-
-	vpermpd			$0x55, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vfnmadd231pd	%ymm11, %ymm14, %ymm2
-
-	vpermpd			$0x55, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vfnmadd231pd	%ymm11, %ymm14, %ymm3
-
-2:
-
-	vbroadcastsd	32(%r12), %ymm12
-	vmovapd			128(%r10), %ymm11
-
-	vpermpd			$0x00, %ymm4, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm4, %ymm4
-	vfnmadd231pd	%ymm11, %ymm14, %ymm0
-
-	vpermpd			$0x00, %ymm5, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm5, %ymm5
-	vfnmadd231pd	%ymm11, %ymm14, %ymm1
-
-	vpermpd			$0x00, %ymm6, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm6, %ymm6
-	vfnmadd231pd	%ymm11, %ymm14, %ymm2
-
-	vpermpd			$0x00, %ymm7, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm7, %ymm7
-	vfnmadd231pd	%ymm11, %ymm14, %ymm3
-
-
-	// top-left
-
-	vmovapd			96(%r10), %ymm13
-	vxorpd			%ymm14, %ymm14, %ymm14 // 0.0
-	vblendpd		$0x7, %ymm13, %ymm14, %ymm13
-	vbroadcastsd	24(%r12), %ymm12
-
-	vpermpd			$0xff, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermpd			$0xff, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermpd			$0xff, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermpd			$0xff, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x8, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovapd			64(%r10), %xmm13
-	vbroadcastsd	16(%r12), %ymm12
-
-	vpermpd			$0xaa, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermpd			$0xaa, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermpd			$0xaa, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermpd			$0xaa, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x4, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vxorpd			%ymm13, %ymm13, %ymm13 // 0.0
-	vmovsd			32(%r10), %xmm13
-	vbroadcastsd	8(%r12), %ymm12
-
-	vpermilpd		$0xf, %ymm0, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm0, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm0
-
-	vpermilpd		$0xf, %ymm1, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm1, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm1
-
-	vpermilpd		$0xf, %ymm2, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm2, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm2
-
-	vpermilpd		$0xf, %ymm3, %ymm14
-	vmulpd			%ymm14, %ymm12, %ymm14
-	vblendpd		$0x2, %ymm14, %ymm3, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm3
-
-
-	vbroadcastsd	0(%r12), %ymm12
-
-	vmulpd			%ymm0, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm0, %ymm0
-
-	vmulpd			%ymm1, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm1, %ymm1
-
-	vmulpd			%ymm2, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm2, %ymm2
-
-	vmulpd			%ymm3, %ymm12, %ymm14
-	vblendpd		$0x1, %ymm14, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_lun_inv_8x4_vs_lib4, .-inner_edge_dtrsm_lun_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// LU factorization without pivoting
-// left kernel
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGETRF_L_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgetrf_l_8x4_lib4, @function
-inner_edge_dgetrf_l_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgetrf_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgetrf_l_8x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgetrf_l_8x4_lib4:
-#endif
-#endif
-	
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd			LC04(%rip), %xmm14 // 1.0
-#endif
-//	vmovddup		%xmm14, %xmm14
-
-	// first column
-//	vblendpd		$0x1, %ymm0, %ymm12, %ymm12
-	vmovapd			%ymm0, %ymm12
-	vdivsd			%xmm0, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 0(%r10)
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vblendpd		$0x1, %ymm12, %ymm0, %ymm0
-
-	// second column
-	vpermpd			$0x00, %ymm1, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vblendpd		$0x2, %ymm1, %ymm13, %ymm12
-
-	vpermilpd		$0x3, %xmm1, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 8(%r10)
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vblendpd		$0x3, %ymm12, %ymm1, %ymm1
-
-	// third column
-	vpermpd			$0x00, %ymm2, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vblendpd		$0x2, %ymm2, %ymm13, %ymm12
-
-	vpermpd			$0x55, %ymm2, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vblendpd		$0x4, %ymm2, %ymm12, %ymm12
-
-	vpermpd			$0xaa, %ymm2, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 16(%r10)
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vblendpd		$0x7, %ymm12, %ymm2, %ymm2
-
-	// fourth column
-	vpermpd			$0x00, %ymm3, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vblendpd		$0x2, %ymm3, %ymm13, %ymm12
-
-	vpermpd			$0x55, %ymm3, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vblendpd		$0x4, %ymm3, %ymm12, %ymm12
-
-	vpermpd			$0xaa, %ymm3, %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vblendpd		$0x8, %ymm3, %ymm12, %ymm12
-	
-	vpermpd			$0xff, %ymm3, %ymm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmovsd			%xmm13, 24(%r10)
-//	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vblendpd		$0x7, %ymm12, %ymm3, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgetrf_l_8x4_lib4, .-inner_edge_dgetrf_l_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_lib4, @function
-inner_store_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_lib4; .scl 2; .type 32; .endef
-inner_store_8x4_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r15 // D1 <- D0
-	addq	%r11, %r15 // D1 <- D0 + 4*sdd*sizeof(double)
-
-	vmovapd %ymm0,  0(%r10)
-	vmovapd %ymm1, 32(%r10)
-	vmovapd %ymm2, 64(%r10)
-	vmovapd %ymm3, 96(%r10)
-
-	vmovapd %ymm4,  0(%r15)
-	vmovapd %ymm5, 32(%r15)
-	vmovapd %ymm6, 64(%r15)
-	vmovapd %ymm7, 96(%r15)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_lib4, .-inner_store_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x8_lib4, @function
-inner_store_4x8_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x8_lib4; .scl 2; .type 32; .endef
-inner_store_4x8_lib4:
-#endif
-#endif
-	
-	vmovapd %ymm0,   0(%r10)
-	vmovapd %ymm1,  32(%r10)
-	vmovapd %ymm2,  64(%r10)
-	vmovapd %ymm3,  96(%r10)
-
-	vmovapd %ymm4, 128(%r10)
-	vmovapd %ymm5, 160(%r10)
-	vmovapd %ymm6, 192(%r10)
-	vmovapd %ymm7, 224(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x8_lib4, .-inner_store_4x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d50 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_vs_lib4, @function
-inner_store_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_8x4_vs_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r15 // D1 <- D0
-	addq	%r11, %r15 // D1 <- D0 + 4*sdd*sizeof(double)
-
-	vcvtsi2sd	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC03(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	cmpl		$2, %r13d
-	vmovapd		%ymm0, 0(%r10)
-	vmaskmovpd	%ymm4, %ymm15,  0(%r15)
-	jl			0f // end
-	cmpl		$3, %r13d
-	vmovapd		%ymm1, 32(%r10)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r15)
-	jl			0f // end
-	vmovapd		%ymm2, 64(%r10)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r15)
-	je			0f // end
-	vmovapd		%ymm3, 96(%r10)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r15)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_vs_lib4, .-inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X8_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x8_vs_lib4, @function
-inner_store_4x8_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x8_vs_lib4; .scl 2; .type 32; .endef
-inner_store_4x8_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmaskmovpd	%ymm0, %ymm15,   0(%r10)
-	vmaskmovpd	%ymm1, %ymm15,  32(%r10)
-	vmaskmovpd	%ymm2, %ymm15,  64(%r10)
-	vmaskmovpd	%ymm3, %ymm15,  96(%r10)
-
-	vmaskmovpd	%ymm4, %ymm15, 128(%r10)
-	cmpl		$6, %r12d
-	jl			0f // end
-	vmaskmovpd	%ymm5, %ymm15, 160(%r10)
-	cmpl		$7, %r12d
-	jl			0f // end
-	vmaskmovpd	%ymm6, %ymm15, 192(%r10)
-	je			0f // end
-	vmaskmovpd	%ymm7, %ymm15, 224(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x8_vs_lib4, .-inner_store_4x8_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d50 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_lib4, @function
-inner_store_l_8x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_lib4; .scl 2; .type 32; .endef
-inner_store_l_8x4_lib4:
-#endif
-#endif
-	
-	movq	%r10, %r15 // D1 <- D0
-	addq	%r11, %r15 // D1 <- D0 + 4*sdd*sizeof(double)
-
-	vmovapd		%ymm0,0(%r10)
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmovapd		%ymm1, 32(%r10)
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmovapd		%ymm2, 64(%r10)
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmovapd		%ymm3, 96(%r10)
-
-	vmovapd		%ymm4, 0(%r15)
-	vmovapd		%ymm5, 32(%r15)
-	vmovapd		%ymm6, 64(%r15)
-	vmovapd		%ymm7, 96(%r15)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_lib4, .-inner_store_l_8x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d50 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r15  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_vs_lib4, @function
-inner_store_l_8x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_l_8x4_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC03(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmovapd		%ymm0, 0(%r10)
-	vmaskmovpd	%ymm4, %ymm15,  0(%r10, %r11, 1)
-	cmpl		$2, %r13d
-	jl			0f // end
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmovapd		%ymm1, 32(%r10)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	cmpl		$3, %r13d
-	jl			0f // end
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmovapd		%ymm2, 64(%r10)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	je			0f // end
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmovapd		%ymm3, 96(%r10)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r10, %r11, 1)
-
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_vs_lib4, .-inner_store_l_8x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// rbp  <- dirty
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// rbp  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_gen_lib4, @function
-inner_store_8x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_gen_lib4; .scl 2; .type 32; .endef
-inner_store_8x4_gen_lib4:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2sd	%r13d, %xmm14, %xmm14
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm12
-	vmovupd		.LC03(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm12
-	vmovupd		LC03(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm12, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm6, %ymm5
-	vmovapd		%ymm3, %ymm2
-	vmovapd		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	vmaskmovpd	%ymm0, %ymm14,  0(%r11)
-	vmaskmovpd	%ymm4, %ymm15,  0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm1, %ymm14, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm2, %ymm14, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r11, %r12, 1)
-	je			4f // end
-	vmaskmovpd	%ymm3, %ymm14, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r11, %r12, 1)
-
-	jmp		4f
-
-0:
-	
-	cmpl	$1, %r10d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x03, %ymm4, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm0, %ymm12, %ymm0
-	vperm2f128	$0x03, %ymm13, %ymm4, %ymm12
-	vshufpd		$0x5, %ymm4, %ymm12, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x03, %ymm5, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm1, %ymm12, %ymm1
-	vperm2f128	$0x03, %ymm13, %ymm5, %ymm12
-	vshufpd		$0x5, %ymm5, %ymm12, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x03, %ymm6, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm2, %ymm12, %ymm2
-	vperm2f128	$0x03, %ymm13, %ymm6, %ymm12
-	vshufpd		$0x5, %ymm6, %ymm12, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x03, %ymm7, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm3, %ymm12, %ymm3
-	vperm2f128	$0x03, %ymm13, %ymm7, %ymm12
-	vshufpd		$0x5, %ymm7, %ymm12, %ymm7
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm15, %ymm12, %ymm15
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm12
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC08(%rip), %ymm12
-	vmovupd		.LC05(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC08(%rip), %ymm12
-	vmovupd		LC05(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x1, %ymm14, %ymm15, %ymm14
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r10d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x03, %ymm4, %ymm0, %ymm0
-	vperm2f128	$0x03, %ymm13, %ymm4, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x03, %ymm5, %ymm1, %ymm1
-	vperm2f128	$0x03, %ymm13, %ymm5, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x03, %ymm6, %ymm2, %ymm2
-	vperm2f128	$0x03, %ymm13, %ymm6, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x03, %ymm7, %ymm3, %ymm3
-	vperm2f128	$0x03, %ymm13, %ymm7, %ymm7
-
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm14
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC09(%rip), %ymm12
-	vmovupd		.LC06(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC09(%rip), %ymm12
-	vmovupd		LC06(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x3, %ymm14, %ymm15, %ymm14
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x21, %ymm0, %ymm4, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm4, %ymm0
-	vperm2f128	$0x21, %ymm4, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x21, %ymm1, %ymm5, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm5, %ymm1
-	vperm2f128	$0x21, %ymm5, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x21, %ymm2, %ymm6, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm6, %ymm2
-	vperm2f128	$0x21, %ymm6, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x21, %ymm3, %ymm7, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm7, %ymm3
-	vperm2f128	$0x21, %ymm7, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm7
-
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm14
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC10(%rip), %ymm12
-	vmovupd		.LC07(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC10(%rip), %ymm12
-	vmovupd		LC07(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x7, %ymm14, %ymm15, %ymm14
-
-3:
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm4, %ymm14, 0(%r11, %r12, 1)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 2)
-	cmpl		$2, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm14, 32(%r11, %r12, 1)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 2)
-	cmpl		$3, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm14, 64(%r11, %r12, 1)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 2)
-	je			4f // end
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm14, 96(%r11, %r12, 1)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 2)
-
-4:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_gen_lib4, .-inner_store_8x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store l generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// rbp  <- dirty
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// rbp  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_gen_lib4, @function
-inner_store_l_8x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_gen_lib4; .scl 2; .type 32; .endef
-inner_store_l_8x4_gen_lib4:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2sd	%r13d, %xmm14, %xmm14
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm12
-	vmovupd		.LC03(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm12
-	vmovupd		LC03(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm12, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm6, %ymm5
-	vmovapd		%ymm3, %ymm2
-	vmovapd		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm13
-#endif
-
-	vmaskmovpd	%ymm0, %ymm14,  0(%r11)
-	vmaskmovpd	%ymm4, %ymm15,  0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x1, %ymm13, %ymm14, %ymm14
-	vmaskmovpd	%ymm1, %ymm14, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x2, %ymm13, %ymm14, %ymm14
-	vmaskmovpd	%ymm2, %ymm14, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r11, %r12, 1)
-	je			3f // end
-	vblendpd	$0x4, %ymm13, %ymm14, %ymm14
-	vmaskmovpd	%ymm3, %ymm14, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r11, %r12, 1)
-
-	jmp		3f
-
-0:
-	
-	cmpl	$1, %r10d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x03, %ymm4, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm0, %ymm12, %ymm0
-	vperm2f128	$0x03, %ymm13, %ymm4, %ymm12
-	vshufpd		$0x5, %ymm4, %ymm12, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x03, %ymm5, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm1, %ymm12, %ymm1
-	vperm2f128	$0x03, %ymm13, %ymm5, %ymm12
-	vshufpd		$0x5, %ymm5, %ymm12, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x03, %ymm6, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm2, %ymm12, %ymm2
-	vperm2f128	$0x03, %ymm13, %ymm6, %ymm12
-	vshufpd		$0x5, %ymm6, %ymm12, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x03, %ymm7, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm3, %ymm12, %ymm3
-	vperm2f128	$0x03, %ymm13, %ymm7, %ymm12
-	vshufpd		$0x5, %ymm7, %ymm12, %ymm7
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm15, %ymm12, %ymm15
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm12
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC08(%rip), %ymm12
-	vmovupd		.LC05(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC08(%rip), %ymm12
-	vmovupd		LC05(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x1, %ymm14, %ymm15, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm15
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm15
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm4, %ymm14, 0(%r11, %r12, 1)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 2)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x2, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm14, 32(%r11, %r12, 1)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 2)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x4, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm14, 64(%r11, %r12, 1)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 2)
-	je			3f // end
-	vblendpd	$0x8, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm14, 96(%r11, %r12, 1)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 2)
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r10d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x03, %ymm4, %ymm0, %ymm0
-	vperm2f128	$0x03, %ymm13, %ymm4, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x03, %ymm5, %ymm1, %ymm1
-	vperm2f128	$0x03, %ymm13, %ymm5, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x03, %ymm6, %ymm2, %ymm2
-	vperm2f128	$0x03, %ymm13, %ymm6, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x03, %ymm7, %ymm3, %ymm3
-	vperm2f128	$0x03, %ymm13, %ymm7, %ymm7
-
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm14
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC09(%rip), %ymm12
-	vmovupd		.LC06(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC09(%rip), %ymm12
-	vmovupd		LC06(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x3, %ymm14, %ymm15, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm15
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm15
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm4, %ymm14, 0(%r11, %r12, 1)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 2)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x4, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm14, 32(%r11, %r12, 1)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 2)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x8, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm14, 64(%r11, %r12, 1)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 2)
-	je			3f // end
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm14, 96(%r11, %r12, 1)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 2)
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x21, %ymm0, %ymm4, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm4, %ymm0
-	vperm2f128	$0x21, %ymm4, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x21, %ymm1, %ymm5, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm5, %ymm1
-	vperm2f128	$0x21, %ymm5, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x21, %ymm2, %ymm6, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm6, %ymm2
-	vperm2f128	$0x21, %ymm6, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x21, %ymm3, %ymm7, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm7, %ymm3
-	vperm2f128	$0x21, %ymm7, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm7
-
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm14
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC10(%rip), %ymm12
-	vmovupd		.LC07(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC10(%rip), %ymm12
-	vmovupd		LC07(%rip), %ymm13
-#endif
-	vandpd		%ymm12, %ymm14, %ymm12
-	vandpd		%ymm13, %ymm15, %ymm13
-
-	vblendpd	$0x7, %ymm14, %ymm15, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm15
-#elif defined(OS_MAC)
-	vmovapd		LC04(%rip), %ymm15
-#endif
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm4, %ymm14, 0(%r11, %r12, 1)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 2)
-	cmpl		$2, %r15d
-	jl			3f // end
-	vblendpd	$0x8, %ymm15, %ymm12, %ymm12
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm14, 32(%r11, %r12, 1)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 2)
-	cmpl		$3, %r15d
-	jl			3f // end
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm14, 64(%r11, %r12, 1)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 2)
-	je			3f // end
-	vblendpd	$0x2, %ymm15, %ymm14, %ymm14
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm14, 96(%r11, %r12, 1)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 2)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_gen_lib4, .-inner_store_l_8x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32
-// void kernel_dgemm_nt_8x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x4_lib4
-	.type kernel_dgemm_nt_8x4_lib4, @function
-kernel_dgemm_nt_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x4_lib4
-_kernel_dgemm_nt_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x4_lib4
-	.def kernel_dgemm_nt_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x4_lib4, .-kernel_dgemm_nt_8x4_lib4
-#endif
-
-
-
-
-
-//                               1      2              3          4          5        6             7          8
-// void kernel_dgemm_nt_4x8_lib4(int k, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x8_lib4
-	.type kernel_dgemm_nt_4x8_lib4, @function
-kernel_dgemm_nt_4x8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x8_lib4
-_kernel_dgemm_nt_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x8_lib4
-	.def kernel_dgemm_nt_4x8_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // B
-	movq	ARG5, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG3, %r13 // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x8_lib4, .-kernel_dgemm_nt_4x8_lib4
-#endif
-
-
-
-
-
-//                                  rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32   rsp+40  rsp+48
-// void kernel_dgemm_nt_8x4_vs_lib4(int km, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x4_vs_lib4
-	.type kernel_dgemm_nt_8x4_vs_lib4, @function
-kernel_dgemm_nt_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x4_vs_lib4
-_kernel_dgemm_nt_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x4_vs_lib4
-	.def kernel_dgemm_nt_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // store address D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x4_vs_lib4, .-kernel_dgemm_nt_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  1      2              3          4          5        6             7          8          9       10
-// void kernel_dgemm_nt_4x8_vs_lib4(int k, double *alpha, double *A, double *B, int sdb, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x8_vs_lib4
-	.type kernel_dgemm_nt_4x8_vs_lib4, @function
-kernel_dgemm_nt_4x8_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x8_vs_lib4
-_kernel_dgemm_nt_4x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x8_vs_lib4
-	.def kernel_dgemm_nt_4x8_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x8_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // B
-	movq	ARG5, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG3, %r13 // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // km
-	movq	ARG10, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x8_vs_lib4, .-kernel_dgemm_nt_4x8_vs_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx      r8         r9            rsp+8        rsp+16     rsp+24   rsp+32       rsp+40     rsp+48   rsp+56  rsp+64  rsp+72  rsp+80
-// void kernel_dgemm_nt_8x4_gen_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x4_gen_lib4
-	.type kernel_dgemm_nt_8x4_gen_lib4, @function
-kernel_dgemm_nt_8x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x4_gen_lib4
-_kernel_dgemm_nt_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x4_gen_lib4
-	.def kernel_dgemm_nt_8x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // offsetC
-	movq	ARG8, %r13 // C
-	movq	ARG9, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_gen_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // offsetD
-	movq	ARG11, %r11 // D
-	movq	ARG12, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG13, %r13 // m0
-	movq	ARG14, %r14 // m1
-	movq	ARG15, %r15 // n0
-	movq	ARG16, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x4_gen_lib4, .-kernel_dgemm_nt_8x4_gen_lib4
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx      r8           r9         rsp+8    rsp+16        rsp+24     rsp+32   rsp+40     rsp+48
-// void kernel_dgemm_nn_8x4_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_8x4_lib4
-	.type kernel_dgemm_nn_8x4_lib4, @function
-kernel_dgemm_nn_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_8x4_lib4
-_kernel_dgemm_nn_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_8x4_lib4
-	.def kernel_dgemm_nn_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12 // C
-	movq	ARG10, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_8x4_lib4, .-kernel_dgemm_nn_8x4_lib4
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx          r8         r9       rsp+8         rsp+16     rsp+24
-// void kernel_dgemm_nn_4x8_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_4x8_lib4
-	.type kernel_dgemm_nn_4x8_lib4, @function
-kernel_dgemm_nn_4x8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_4x8_lib4
-_kernel_dgemm_nn_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_4x8_lib4
-	.def kernel_dgemm_nn_4x8_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_4x8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_4x8_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x8_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_4x8_lib4, .-kernel_dgemm_nn_4x8_lib4
-#endif
-
-
-
-
-
-//                                   rdi    rsi            rdx        rcx      r8        r9         rsp+8    rsp+16        rsp+24    rsp+32     rsp+40   rsp+48    rsp+56     rsp+64   rsp+72  rsp+80  rsp+88  rsp+96
-// void kernel_dgemm_nn_8x4_gen_lib4(int k, double *alpha, double *A, int sda, int offB, double *B, int sdb, double *beta, int offC, double *C, int sdc, int offD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_8x4_gen_lib4
-	.type kernel_dgemm_nn_8x4_gen_lib4, @function
-kernel_dgemm_nn_8x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_8x4_gen_lib4
-_kernel_dgemm_nn_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_8x4_gen_lib4
-	.def kernel_dgemm_nn_8x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_8x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12 // offsetC
-	movq	ARG10, %r13 // C
-	movq	ARG11, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_gen_lib4
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG12, %r10 // offsetD
-	movq	ARG13, %r11 // D
-	movq	ARG14, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG15, %r13 // m0
-	movq	ARG16, %r14 // m1
-	movq	ARG17, %r15 // n0
-	movq	ARG18, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_8x4_gen_lib4, .-kernel_dgemm_nn_8x4_gen_lib4
-#endif
-
-
-
-
-
-//                                 rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32
-// void kernel_dsyrk_nt_l_8x4_lib4(int km, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_8x4_lib4
-	.type kernel_dsyrk_nt_l_8x4_lib4, @function
-kernel_dsyrk_nt_l_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_8x4_lib4
-_kernel_dsyrk_nt_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_8x4_lib4
-	.def kernel_dsyrk_nt_l_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib4
-#endif
-#endif
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_8x4_lib4, .-kernel_dsyrk_nt_l_8x4_lib4
-#endif
-
-
-
-
-
-//                                    rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32   rsp+40  rsp+48
-// void kernel_dsyrk_nt_l_8x4_vs_lib4(int km, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_8x4_vs_lib4
-	.type kernel_dsyrk_nt_l_8x4_vs_lib4, @function
-kernel_dsyrk_nt_l_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_8x4_vs_lib4
-_kernel_dsyrk_nt_l_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_8x4_vs_lib4
-	.def kernel_dsyrk_nt_l_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // store address D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib4
-#endif
-#endif
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_8x4_vs_lib4, .-kernel_dsyrk_nt_l_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                     rdi    rsi            rdx        rcx      r8         r9            rsp+8        rsp+16     rsp+24   rsp+32       rsp+40     rsp+48   rsp+56  rsp+64  rsp+72  rsp+80
-// void kernel_dsyrk_nt_l_8x4_gen_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_8x4_gen_lib4
-	.type kernel_dsyrk_nt_l_8x4_gen_lib4, @function
-kernel_dsyrk_nt_l_8x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_8x4_gen_lib4
-_kernel_dsyrk_nt_l_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_8x4_gen_lib4
-	.def kernel_dsyrk_nt_l_8x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_8x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // offsetC
-	movq	ARG8, %r13 // C
-	movq	ARG9, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_gen_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // offsetD
-	movq	ARG11, %r11 // D
-	movq	ARG12, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG13, %r13 // m0
-	movq	ARG14, %r14 // m1
-	movq	ARG15, %r15 // n0
-	movq	ARG16, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_8x4_gen_lib4, .-kernel_dsyrk_nt_l_8x4_gen_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx      r8           r9         rsp+8    rsp+16     rsp+24
-// void kernel_dtrmm_nn_rl_8x4_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_8x4_lib4
-	.type kernel_dtrmm_nn_rl_8x4_lib4, @function
-kernel_dtrmm_nn_rl_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_8x4_lib4
-_kernel_dtrmm_nn_rl_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_8x4_lib4
-	.def kernel_dtrmm_nn_rl_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_8x4_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_8x4_lib4, .-kernel_dtrmm_nn_rl_8x4_lib4
-#endif
-
-
-
-
-
-//                                     1      2              3          4        5            6          7        8          9        10      11
-// void kernel_dtrmm_nn_rl_8x4_vs_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_8x4_vs_lib4
-	.type kernel_dtrmm_nn_rl_8x4_vs_lib4, @function
-kernel_dtrmm_nn_rl_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_8x4_vs_lib4
-_kernel_dtrmm_nn_rl_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_8x4_vs_lib4
-	.def kernel_dtrmm_nn_rl_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_8x4_vs_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdb*sizeof(double)
-	movq	ARG10, %r12 // km
-	movq	ARG11, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_8x4_vs_lib4, .-kernel_dtrmm_nn_rl_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      1      2              3          4        5            6          7        8            9          10       11      12      13      14
-// void kernel_dtrmm_nn_rl_8x4_gen_lib4(int k, double *alpha, double *A, int sda, int offsetB, double *B, int sdb, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_8x4_gen_lib4
-	.type kernel_dtrmm_nn_rl_8x4_gen_lib4, @function
-kernel_dtrmm_nn_rl_8x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_8x4_gen_lib4
-_kernel_dtrmm_nn_rl_8x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_8x4_gen_lib4
-	.def kernel_dtrmm_nn_rl_8x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_8x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_8x4_vs_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // offsetD
-	movq	ARG9, %r11 // D
-	movq	ARG10, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG11, %r13 // m0
-	movq	ARG12, %r14 // m1
-	movq	ARG13, %r15 // n0
-	movq	ARG14, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_8x4_gen_lib4, .-kernel_dtrmm_nn_rl_8x4_gen_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32
-// void kernel_dtrmm_nt_ru_8x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_8x4_lib4
-	.type kernel_dtrmm_nt_ru_8x4_lib4, @function
-kernel_dtrmm_nt_ru_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_8x4_lib4
-_kernel_dtrmm_nt_ru_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_8x4_lib4
-	.def kernel_dtrmm_nt_ru_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d //k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	addq	$128, %r13 // B+4*bs
-
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-
-#if MACRO_LEVEL>=1
-//	INNER_BLEND_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-//	call inner_blend_8x4_lib4
-#elif defined(OS_MAC)
-//	callq _inner_blend_8x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG3, %r10 // A
-	movq	ARG4, %r11 // sda
-	sall	$5, %r11d // 4*sda*sizeof(double)
-	movq	ARG5, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_8x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_8x4_lib4, .-kernel_dtrmm_nt_ru_8x4_lib4
-#endif
-
-
-
-
-
-//                                 rdi     rsi            rdx        rcx      r8         r9            rsp+8      rsp+16   rsp+24     rsp+32   rsp+40  rsp+48
-// void kernel_dtrmm_nt_ru_8x4_lib4(int k, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_8x4_vs_lib4
-	.type kernel_dtrmm_nt_ru_8x4_vs_lib4, @function
-kernel_dtrmm_nt_ru_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_8x4_vs_lib4
-_kernel_dtrmm_nt_ru_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_8x4_vs_lib4
-	.def kernel_dtrmm_nt_ru_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d //k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	addq	$128, %r13 // B+4*bs
-
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-#if MACRO_LEVEL>=1
-//	INNER_BLEND_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-//	call inner_blend_8x4_lib4
-#elif defined(OS_MAC)
-//	callq _inner_blend_8x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG1, %r10
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_8x4_vs_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // C
-	movq	ARG8, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib4
-#endif
-#endif
-
-
-// store n
-
-	movq	ARG9, %r10 // store address D
-	movq	ARG10, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_8x4_vs_lib4, .-kernel_dtrmm_nt_ru_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24
-// void kernel_dpotrf_nt_l_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_8x4_lib4
-	.type kernel_dpotrf_nt_l_8x4_lib4, @function
-kernel_dpotrf_nt_l_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_8x4_lib4
-_kernel_dpotrf_nt_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_8x4_lib4
-	.def kernel_dpotrf_nt_l_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_8x4_lib4, .-kernel_dpotrf_nt_l_8x4_lib4
-#endif
-
-
-
-
-
-//                                     rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24              rsp+32  rsp+40 
-// void kernel_dpotrf_nt_l_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_8x4_vs_lib4
-	.type kernel_dpotrf_nt_l_8x4_vs_lib4, @function
-kernel_dpotrf_nt_l_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_8x4_vs_lib4
-_kernel_dpotrf_nt_l_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_8x4_vs_lib4
-	.def kernel_dpotrf_nt_l_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG10, %r12 // km 
-	movq	ARG11, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_8x4_vs_lib4, .-kernel_dpotrf_nt_l_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                        rdi     rsi         rdx       rcx         r8      r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56
-// void kernel_dsyrk_dpotrf_nt_l_8x4_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x4_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_8x4_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_8x4_lib4
-_kernel_dsyrk_dpotrf_nt_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x4_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_8x4_lib4, .-kernel_dsyrk_dpotrf_nt_l_8x4_lib4
-#endif
-
-
-
-
-
-//                                           rdi     rsi         rdx       rcx         r8      r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56              rsp+64  rsp+72
-// void kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4
-_kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movq	ARG15, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG14, %r12 // km 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4, .-kernel_dsyrk_dpotrf_nt_l_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                         rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32              rsp+40  rsp+48
-// void kernel_dtrsm_nt_rl_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_8x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_8x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_8x4_vs_lib4
-_kernel_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_8x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_8x4_vs_lib4, .-kernel_dtrsm_nt_rl_inv_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                         1      2          3          4        5          6          7          8        9                   10      11
-// void kernel_dtrsm_nt_rl_inv_4x8_vs_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int sde, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x8_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x8_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x8_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x8_vs_lib4
-_kernel_dtrsm_nt_rl_inv_4x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x8_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x8_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x8_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG3, %r11
-	movq	ARG4, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG2, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_4x8_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG9, %r12  // inv_diag_E 
-	movq	ARG11, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x8_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG6, %r10 // store address D
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x8_vs_lib4, .-kernel_dtrsm_nt_rl_inv_4x8_vs_lib4
-#endif
-
-
-
-
-
-//                                               rdi     rsi         rdx       rcx          r8     r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56     rsp+64              rsp+72  rsp+80
-// void kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-	movq	ARG16, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG15, %r12 // km 
-	movq	ARG16, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                                1       2           3           4         5       6           7           8         9          10         11         12       13                 14       15
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4(int kp, double *Ap, double *Bp, int sdbp, int km, double *Am, double *Bm, int sdbm, double *C, double *D, double *E, int sde, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG3, %r11  // Bp
-	movq	ARG4, %r12 // sdbp
-	sall	$5, %r12d   // 32*sdbp
-	movq	ARG2, %r13  // Ap
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10 // km
-	movq	ARG7, %r11 // Bm
-	movq	ARG8, %r12 // sdbm
-	sall	$5, %r12d // 32*sdbm
-	movq	ARG6, %r13 // Am
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_4x8_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG11, %r10  // E 
-	movq	ARG12, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG13, %r12  // inv_diag_E 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x8_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // store address D
-	movq	ARG14, %r11 // km 
-	movq	ARG15, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x8_vs_lib4
-#endif
-
-
-
-
-
-//                                      rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32 
-// void kernel_dtrsm_nt_rl_inv_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_8x4_lib4
-	.type kernel_dtrsm_nt_rl_inv_8x4_lib4, @function
-kernel_dtrsm_nt_rl_inv_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_8x4_lib4
-_kernel_dtrsm_nt_rl_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_8x4_lib4
-	.def kernel_dtrsm_nt_rl_inv_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_8x4_lib4, .-kernel_dtrsm_nt_rl_inv_8x4_lib4
-#endif
-
-
-
-
-
-//                                      1      2          3          4        5          6          7          8        9
-// void kernel_dtrsm_nt_rl_inv_4x8_lib4(int k, double *A, double *B, int sdb, double *C, double *D, double *E, int sde, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x8_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x8_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x8_lib4
-_kernel_dtrsm_nt_rl_inv_4x8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x8_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x8_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG3, %r11
-	movq	ARG4, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG2, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_4x8_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG7, %r10  // E 
-	movq	ARG8, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG9, %r12  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG6, %r10 // store address D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x8_lib4, .-kernel_dtrsm_nt_rl_inv_4x8_lib4
-#endif
-
-
-
-
-
-//                                            rdi     rsi         rdx       rcx         r8      r9          rsp+8     rsp+16      rsp+24     rsp+32   rsp+40     rsp+48   rsp+56     rsp+64
-// void kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4(int kp, double *Ap, int sdap, double *Bp, int km, double *Am, int sdam, double *Bm, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_8x4_lib4
-#endif
-
-
-
-
-
-//                                      rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24
-// void kernel_dtrsm_nt_rl_one_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_one_8x4_lib4
-	.type kernel_dtrsm_nt_rl_one_8x4_lib4, @function
-kernel_dtrsm_nt_rl_one_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_one_8x4_lib4
-_kernel_dtrsm_nt_rl_one_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_one_8x4_lib4
-	.def kernel_dtrsm_nt_rl_one_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_one_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_ONE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_one_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_one_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_one_8x4_lib4, .-kernel_dtrsm_nt_rl_one_8x4_lib4
-#endif
-
-
-
-
-
-//                                         rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32  rsp+40
-// void kernel_dtrsm_nt_ru_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_one_8x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_one_8x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_one_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_one_8x4_vs_lib4
-_kernel_dtrsm_nt_rl_one_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_one_8x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_one_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_one_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_ONE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_one_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_one_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG10, %r12 // km 
-	movq	ARG11, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_one_8x4_vs_lib4, .-kernel_dtrsm_nt_rl_one_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32 
-// void kernel_dtrsm_nt_ru_inv_8x4_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_ru_inv_8x4_lib4
-	.type kernel_dtrsm_nt_ru_inv_8x4_lib4, @function
-kernel_dtrsm_nt_ru_inv_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_ru_inv_8x4_lib4
-_kernel_dtrsm_nt_ru_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_ru_inv_8x4_lib4
-	.def kernel_dtrsm_nt_ru_inv_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_ru_inv_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUT_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rut_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rut_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_ru_inv_8x4_lib4, .-kernel_dtrsm_nt_ru_inv_8x4_lib4
-#endif
-
-
-
-
-
-//                                         rdi    rsi        rdx      rcx        r8         r9       rsp+8      rsp+16   rsp+24     rsp+32              rsp+40  rsp+48
-// void kernel_dtrsm_nt_ru_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_ru_inv_8x4_vs_lib4
-	.type kernel_dtrsm_nt_ru_inv_8x4_vs_lib4, @function
-kernel_dtrsm_nt_ru_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_ru_inv_8x4_vs_lib4
-_kernel_dtrsm_nt_ru_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_ru_inv_8x4_vs_lib4
-	.def kernel_dtrsm_nt_ru_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_ru_inv_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUT_INV_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rut_inv_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rut_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // store address D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_ru_inv_8x4_vs_lib4, .-kernel_dtrsm_nt_ru_inv_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40
-// void kernel_dtrsm_nn_ru_inv_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ru_inv_8x4_lib4
-	.type kernel_dtrsm_nn_ru_inv_8x4_lib4, @function
-kernel_dtrsm_nn_ru_inv_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ru_inv_8x4_lib4
-_kernel_dtrsm_nn_ru_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ru_inv_8x4_lib4
-	.def kernel_dtrsm_nn_ru_inv_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ru_inv_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // inv_diag_E
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUN_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_run_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_run_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ru_inv_8x4_lib4, .-kernel_dtrsm_nn_ru_inv_8x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40              rsp+48  rsp+56
-// void kernel_dtrsm_nn_ru_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ru_inv_8x4_vs_lib4
-	.type kernel_dtrsm_nn_ru_inv_8x4_vs_lib4, @function
-kernel_dtrsm_nn_ru_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ru_inv_8x4_vs_lib4
-_kernel_dtrsm_nn_ru_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ru_inv_8x4_vs_lib4
-	.def kernel_dtrsm_nn_ru_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ru_inv_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // inv_diag_E
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RUN_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_run_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_run_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG12, %r12 // km
-	movq	ARG13, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ru_inv_8x4_vs_lib4, .-kernel_dtrsm_nn_ru_inv_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40
-// void kernel_dtrsm_nn_ll_one_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ll_one_8x4_lib4
-	.type kernel_dtrsm_nn_ll_one_8x4_lib4, @function
-kernel_dtrsm_nn_ll_one_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ll_one_8x4_lib4
-_kernel_dtrsm_nn_ll_one_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ll_one_8x4_lib4
-	.def kernel_dtrsm_nn_ll_one_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ll_one_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LLN_ONE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lln_one_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lln_one_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ll_one_8x4_lib4, .-kernel_dtrsm_nn_ll_one_8x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40   rsp+48  tsp+56
-// void kernel_dtrsm_nn_ll_one_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_ll_one_8x4_vs_lib4
-	.type kernel_dtrsm_nn_ll_one_8x4_vs_lib4, @function
-kernel_dtrsm_nn_ll_one_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_ll_one_8x4_vs_lib4
-_kernel_dtrsm_nn_ll_one_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_ll_one_8x4_vs_lib4
-	.def kernel_dtrsm_nn_ll_one_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_ll_one_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LLN_ONE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lln_one_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lln_one_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG12, %r12 // km
-	movq	ARG13, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_ll_one_8x4_vs_lib4, .-kernel_dtrsm_nn_ll_one_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40   rsp+48
-// void kernel_dtrsm_nn_lu_inv_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_lu_inv_8x4_lib4
-	.type kernel_dtrsm_nn_lu_inv_8x4_lib4, @function
-kernel_dtrsm_nn_lu_inv_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_lu_inv_8x4_lib4
-_kernel_dtrsm_nn_lu_inv_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_lu_inv_8x4_lib4
-	.def kernel_dtrsm_nn_lu_inv_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_lu_inv_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG12, %r12  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LUN_INV_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lun_inv_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lun_inv_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_lu_inv_8x4_lib4, .-kernel_dtrsm_nn_lu_inv_8x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx      ecx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32     rsp+40   rsp+48              rsp+56  rsp+64
-// void kernel_dtrsm_nn_lu_inv_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nn_lu_inv_8x4_vs_lib4
-	.type kernel_dtrsm_nn_lu_inv_8x4_vs_lib4, @function
-kernel_dtrsm_nn_lu_inv_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nn_lu_inv_8x4_vs_lib4
-_kernel_dtrsm_nn_lu_inv_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nn_lu_inv_8x4_vs_lib4
-	.def kernel_dtrsm_nn_lu_inv_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nn_lu_inv_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11 // sde
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG12, %r12  // inv_diag_E 
-	movq	ARG13, %r13  // km
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_LUN_INV_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_lun_inv_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_lun_inv_8x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG13, %r12  // km
-	movq	ARG14, %r13  // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nn_lu_inv_8x4_vs_lib4, .-kernel_dtrsm_nn_lu_inv_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                                edi    rsi        rdx      rcx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32
-// void kernel_dgetrf_nn_8x4_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_l_8x4_lib4
-	.type kernel_dgetrf_nn_l_8x4_lib4, @function
-kernel_dgetrf_nn_l_8x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_l_8x4_lib4
-_kernel_dgetrf_nn_l_8x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_l_8x4_lib4
-	.def kernel_dgetrf_nn_l_8x4_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_l_8x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG10, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_L_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_l_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_l_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib4
-#endif
-#endif
-
-
-	// epilogue
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_l_8x4_lib4, .-kernel_dgetrf_nn_l_8x4_lib4
-#endif
-
-
-
-
-
-//                                   edi    rsi        rdx      rcx        r8       r9         rsp+8    rsp+16     rsp+24   rsp+32              rsp+40  rsp+48
-// void kernel_dgetrf_nn_8x4_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgetrf_nn_l_8x4_vs_lib4
-	.type kernel_dgetrf_nn_l_8x4_vs_lib4, @function
-kernel_dgetrf_nn_l_8x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgetrf_nn_l_8x4_vs_lib4
-_kernel_dgetrf_nn_l_8x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgetrf_nn_l_8x4_vs_lib4
-	.def kernel_dgetrf_nn_l_8x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgetrf_nn_l_8x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nn_8x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG10, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGETRF_L_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgetrf_l_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgetrf_l_8x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG11, %r12  // km
-	movq	ARG12, %r13  // km
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgetrf_nn_l_8x4_vs_lib4, .-kernel_dgetrf_nn_l_8x4_vs_lib4
-#endif
-
-
-
-
-
-//                             1         2           3           4           5
-// void kernel_dlarfb4_r_8_lib4(int kmax, double *pV, double *pT, double *pD, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dlarfb4_r_8_lib4
-	.type kernel_dlarfb4_r_8_lib4, @function
-kernel_dlarfb4_r_8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dlarfb4_r_8_lib4
-_kernel_dlarfb4_r_8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dlarfb4_r_8_lib4
-	.def kernel_dlarfb4_r_8_lib4; .scl 2; .type 32; .endef
-kernel_dlarfb4_r_8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-//	vxorpd	%ymm0, %ymm0, %ymm0
-//	vmovapd	%ymm0, %ymm1
-//	vmovapd	%ymm0, %ymm2
-//	vmovapd	%ymm0, %ymm3
-//	vmovapd	%ymm0, %ymm4
-//	vmovapd	%ymm0, %ymm5
-//	vmovapd	%ymm0, %ymm6
-//	vmovapd	%ymm0, %ymm7
-	
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11 // D
-	movq	ARG5, %r12 // sdd
-	sall	$5, %r12d
-	movq	ARG2, %r13 // V
-
-	//
-	vmovapd			0(%r11), %ymm0
-	vmovapd			0(%r11, %r12, 1), %ymm4
-	//
-	vmovapd			32(%r11), %ymm1
-	vmovapd			32(%r11, %r12, 1), %ymm5
-	vbroadcastsd	32(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm1, %ymm0
-	vfmadd231pd		%ymm13, %ymm5, %ymm4
-	//
-	vmovapd			64(%r11), %ymm2
-	vmovapd			64(%r11, %r12, 1), %ymm6
-	vbroadcastsd	64(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm2, %ymm0
-	vfmadd231pd		%ymm13, %ymm6, %ymm4
-	vbroadcastsd	72(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm2, %ymm1
-	vfmadd231pd		%ymm13, %ymm6, %ymm5
-	//
-	vmovapd			96(%r11), %ymm3
-	vmovapd			96(%r11, %r12, 1), %ymm7
-	vbroadcastsd	96(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm3, %ymm0
-	vfmadd231pd		%ymm13, %ymm7, %ymm4
-	vbroadcastsd	104(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm3, %ymm1
-	vfmadd231pd		%ymm13, %ymm7, %ymm5
-	vbroadcastsd	112(%r13), %ymm13
-	vfmadd231pd		%ymm13, %ymm3, %ymm2
-	vfmadd231pd		%ymm13, %ymm7, %ymm6
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x4_lib4
-#endif
-#endif
-
-	movq	ARG3, %r10 // T
-
-	//
-	vbroadcastsd	120(%r10), %ymm12
-	vmulpd			%ymm3, %ymm12, %ymm3
-	vmulpd			%ymm7, %ymm12, %ymm7
-	//
-	vbroadcastsd	112(%r10), %ymm12
-	vfmadd231pd		%ymm2, %ymm12, %ymm3
-	vfmadd231pd		%ymm6, %ymm12, %ymm7
-	vbroadcastsd	80(%r10), %ymm12
-	vmulpd			%ymm2, %ymm12, %ymm2
-	vmulpd			%ymm6, %ymm12, %ymm6
-	//
-	vbroadcastsd	104(%r10), %ymm12
-	vfmadd231pd		%ymm1, %ymm12, %ymm3
-	vfmadd231pd		%ymm5, %ymm12, %ymm7
-	vbroadcastsd	72(%r10), %ymm12
-	vfmadd231pd		%ymm1, %ymm12, %ymm2
-	vfmadd231pd		%ymm5, %ymm12, %ymm6
-	vbroadcastsd	40(%r10), %ymm12
-	vmulpd			%ymm1, %ymm12, %ymm1
-	vmulpd			%ymm5, %ymm12, %ymm5
-	//
-	vbroadcastsd	96(%r10), %ymm12
-	vfmadd231pd		%ymm0, %ymm12, %ymm3
-	vfmadd231pd		%ymm4, %ymm12, %ymm7
-	vbroadcastsd	64(%r10), %ymm12
-	vfmadd231pd		%ymm0, %ymm12, %ymm2
-	vfmadd231pd		%ymm4, %ymm12, %ymm6
-	vbroadcastsd	32(%r10), %ymm12
-	vfmadd231pd		%ymm0, %ymm12, %ymm1
-	vfmadd231pd		%ymm4, %ymm12, %ymm5
-	vbroadcastsd	0(%r10), %ymm12
-	vmulpd			%ymm0, %ymm12, %ymm0
-	vmulpd			%ymm4, %ymm12, %ymm4
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // V
-	movq	ARG4, %r12 // D
-	movq	ARG5, %r13 // sdd
-	sall	$5, %r13d
-
-	//
-	vmovapd			0(%r12), %ymm12
-	vmovapd			0(%r12, %r13, 1), %ymm14
-	vaddpd			%ymm12, %ymm0, %ymm12
-	vaddpd			%ymm14, %ymm4, %ymm14
-	vmovapd			%ymm12, 0(%r12)
-	vmovapd			%ymm14, 0(%r12, %r13, 1)
-	//
-	vmovapd			32(%r12), %ymm12
-	vmovapd			32(%r12, %r13, 1), %ymm14
-	vbroadcastsd	32(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vaddpd			%ymm12, %ymm1, %ymm12
-	vaddpd			%ymm14, %ymm5, %ymm14
-	vmovapd			%ymm12, 32(%r12)
-	vmovapd			%ymm14, 32(%r12, %r13, 1)
-	//
-	vmovapd			64(%r12), %ymm12
-	vmovapd			64(%r12, %r13, 1), %ymm14
-	vbroadcastsd	64(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vbroadcastsd	72(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vaddpd			%ymm12, %ymm2, %ymm12
-	vaddpd			%ymm14, %ymm6, %ymm14
-	vmovapd			%ymm12, 64(%r12)
-	vmovapd			%ymm14, 64(%r12, %r13, 1)
-	//
-	vmovapd			96(%r12), %ymm12
-	vmovapd			96(%r12, %r13, 1), %ymm14
-	vbroadcastsd	96(%r11), %ymm13
-	vfmadd231pd		%ymm0, %ymm13, %ymm12
-	vfmadd231pd		%ymm4, %ymm13, %ymm14
-	vbroadcastsd	104(%r11), %ymm13
-	vfmadd231pd		%ymm1, %ymm13, %ymm12
-	vfmadd231pd		%ymm5, %ymm13, %ymm14
-	vbroadcastsd	112(%r11), %ymm13
-	vfmadd231pd		%ymm2, %ymm13, %ymm12
-	vfmadd231pd		%ymm6, %ymm13, %ymm14
-	vaddpd			%ymm12, %ymm3, %ymm12
-	vaddpd			%ymm14, %ymm7, %ymm14
-	vmovapd			%ymm12, 96(%r12)
-	vmovapd			%ymm14, 96(%r12, %r13, 1)
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEBP_ADD_NN_8X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgebp_add_nn_8x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgebp_add_nn_8x4_lib4
-#endif
-#endif
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dlarfb4_r_8_lib4, .-kernel_dlarfb4_r_8_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC05: // { 1.0 1.0 1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC05: // { 1.0 1.0 1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC06: // { 1.0 1.0 -1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC06: // { 1.0 1.0 -1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC07: // { 1.0 -1.0 -1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC07: // { 1.0 -1.0 -1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC08: // { -1.0 -1.0 -1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC08: // { -1.0 -1.0 -1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC09: // { -1.0 -1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC09: // { -1.0 -1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC10: // { -1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC10: // { -1.0 1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_dgemm_8x8_lib4.S b/third_party/blasfeo/kernel/avx2/kernel_dgemm_8x8_lib4.S
deleted file mode 100644
index 954c96d..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_dgemm_8x8_lib4.S
+++ /dev/null
@@ -1,5625 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define ARG19 STACKSIZE + 104(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define ARG19 STACKSIZE + 152(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nt_8x8_lib4, @function
-inner_kernel_dgemm_add_nt_8x8_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nt_8x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nt_8x8_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nt_8x8_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd			0(%r11), %ymm12
-	vmovapd			0(%r11, %r12, 1), %ymm13
-	vbroadcastsd	0(%r13), %ymm14
-	vbroadcastsd 	0(%r13, %r14, 1), %ymm15
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vfmadd231pd		%ymm12, %ymm14, %ymm0
-	subl	$4, %r10d
-	vfmadd231pd		%ymm13, %ymm14, %ymm4
-	vbroadcastsd	8(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm8
-	vbroadcastsd	8(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm1
-	vfmadd231pd		%ymm13, %ymm14, %ymm5
-	vbroadcastsd	16(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm9
-	vbroadcastsd	16(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm2
-	vfmadd231pd		%ymm13, %ymm14, %ymm6
-	vbroadcastsd	24(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm10
-	vbroadcastsd	24(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm3
-	vmovapd			32(%r11), %ymm12
-	vfmadd231pd		%ymm13, %ymm14, %ymm7
-	vbroadcastsd	32(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm11
-	vmovapd			32(%r11, %r12, 1), %ymm13
-	vbroadcastsd	32(%r13, %r14, 1), %ymm15
-
-	// unroll 1
-	vfmadd231pd		%ymm12, %ymm14, %ymm0
-	vfmadd231pd		%ymm13, %ymm14, %ymm4
-	vbroadcastsd	40(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm8
-	vbroadcastsd	40(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm1
-	vfmadd231pd		%ymm13, %ymm14, %ymm5
-	vbroadcastsd	48(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm9
-	vbroadcastsd	48(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm2
-	vfmadd231pd		%ymm13, %ymm14, %ymm6
-	vbroadcastsd	56(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm10
-	vbroadcastsd	56(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm3
-	vmovapd			64(%r11), %ymm12
-	vfmadd231pd		%ymm13, %ymm14, %ymm7
-	vbroadcastsd	64(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm11
-	vmovapd			64(%r11, %r12, 1), %ymm13
-	vbroadcastsd	64(%r13, %r14, 1), %ymm15
-
-	// unroll 2
-	vfmadd231pd		%ymm12, %ymm14, %ymm0
-	vfmadd231pd		%ymm13, %ymm14, %ymm4
-	vbroadcastsd	72(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm8
-	vbroadcastsd	72(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm1
-	vfmadd231pd		%ymm13, %ymm14, %ymm5
-	vbroadcastsd	80(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm9
-	vbroadcastsd	80(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm2
-	vfmadd231pd		%ymm13, %ymm14, %ymm6
-	vbroadcastsd	88(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm10
-	vbroadcastsd	88(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm3
-	vmovapd			96(%r11), %ymm12
-	vfmadd231pd		%ymm13, %ymm14, %ymm7
-	vbroadcastsd	96(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm11
-	vmovapd			96(%r11, %r12, 1), %ymm13
-	vbroadcastsd	96(%r13, %r14, 1), %ymm15
-
-	// unroll 3
-	vfmadd231pd		%ymm12, %ymm14, %ymm0
-	vfmadd231pd		%ymm13, %ymm14, %ymm4
-	vbroadcastsd	104(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm8
-	vbroadcastsd	104(%r13, %r14, 1), %ymm15
-	addq	$128, %r11
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm1
-	vfmadd231pd		%ymm13, %ymm14, %ymm5
-	vbroadcastsd	112(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm9
-	vbroadcastsd	112(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm2
-	vfmadd231pd		%ymm13, %ymm14, %ymm6
-	vbroadcastsd	120(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm10
-	vbroadcastsd	120(%r13, %r14, 1), %ymm15
-	addq	$128, %r13
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm3
-	vmovapd			0(%r11), %ymm12
-	vfmadd231pd		%ymm13, %ymm14, %ymm7
-	vbroadcastsd	0(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm11
-	vmovapd			0(%r11, %r12, 1), %ymm13
-	vbroadcastsd	0(%r13, %r14, 1), %ymm15
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vfmadd231pd		%ymm12, %ymm14, %ymm0
-	subl	$4, %r10d
-	vfmadd231pd		%ymm13, %ymm14, %ymm4
-	vbroadcastsd	8(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm8
-	vbroadcastsd	8(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm1
-	vfmadd231pd		%ymm13, %ymm14, %ymm5
-	vbroadcastsd	16(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm9
-	vbroadcastsd	16(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm2
-	vfmadd231pd		%ymm13, %ymm14, %ymm6
-	vbroadcastsd	24(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm10
-	vbroadcastsd	24(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm3
-	vmovapd			32(%r11), %ymm12
-	vfmadd231pd		%ymm13, %ymm14, %ymm7
-	vbroadcastsd	32(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm11
-	vmovapd			32(%r11, %r12, 1), %ymm13
-	vbroadcastsd	32(%r13, %r14, 1), %ymm15
-
-	// unroll 1
-	vfmadd231pd		%ymm12, %ymm14, %ymm0
-	vfmadd231pd		%ymm13, %ymm14, %ymm4
-	vbroadcastsd	40(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm8
-	vbroadcastsd	40(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm1
-	vfmadd231pd		%ymm13, %ymm14, %ymm5
-	vbroadcastsd	48(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm9
-	vbroadcastsd	48(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm2
-	vfmadd231pd		%ymm13, %ymm14, %ymm6
-	vbroadcastsd	56(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm10
-	vbroadcastsd	56(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm3
-	vmovapd			64(%r11), %ymm12
-	vfmadd231pd		%ymm13, %ymm14, %ymm7
-	vbroadcastsd	64(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm11
-	vmovapd			64(%r11, %r12, 1), %ymm13
-	vbroadcastsd	64(%r13, %r14, 1), %ymm15
-
-	// unroll 2
-	vfmadd231pd		%ymm12, %ymm14, %ymm0
-	vfmadd231pd		%ymm13, %ymm14, %ymm4
-	vbroadcastsd	72(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm8
-	vbroadcastsd	72(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm1
-	vfmadd231pd		%ymm13, %ymm14, %ymm5
-	vbroadcastsd	80(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm9
-	vbroadcastsd	80(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm2
-	vfmadd231pd		%ymm13, %ymm14, %ymm6
-	vbroadcastsd	88(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm10
-	vbroadcastsd	88(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm3
-	vmovapd			96(%r11), %ymm12
-	vfmadd231pd		%ymm13, %ymm14, %ymm7
-	vbroadcastsd	96(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm11
-	vmovapd			96(%r11, %r12, 1), %ymm13
-	vbroadcastsd	96(%r13, %r14, 1), %ymm15
-
-	// unroll 3
-	vfmadd231pd		%ymm12, %ymm14, %ymm0
-	vfmadd231pd		%ymm13, %ymm14, %ymm4
-	vbroadcastsd	104(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm8
-	vbroadcastsd	104(%r13, %r14, 1), %ymm15
-	addq	$128, %r11
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm1
-	vfmadd231pd		%ymm13, %ymm14, %ymm5
-	vbroadcastsd	112(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm9
-	vbroadcastsd	112(%r13, %r14, 1), %ymm15
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm2
-	vfmadd231pd		%ymm13, %ymm14, %ymm6
-	vbroadcastsd	120(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm10
-	vbroadcastsd	120(%r13, %r14, 1), %ymm15
-	addq	$128, %r13
-
-	vfmadd231pd		%ymm12, %ymm14, %ymm3
-//	vmovapd			0(%r11), %ymm12
-	vfmadd231pd		%ymm13, %ymm14, %ymm7
-//	vbroadcastsd	0(%r13), %ymm14
-	vfmadd231pd		%ymm13, %ymm15, %ymm11
-//	vmovapd			0(%r11, %r12, 1), %ymm13
-//	vbroadcastsd	0(%r13, %r14, 1), %ymm15
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm12
-	vmovapd			0(%r11, %r12, 1), %ymm13
-	vbroadcastsd	0(%r13), %ymm14
-	vfmadd231pd		%ymm12, %ymm14, %ymm0
-	vfmadd231pd		%ymm13, %ymm14, %ymm4
-	vbroadcastsd	0(%r13, %r14, 1), %ymm15
-	vfmadd231pd		%ymm13, %ymm15, %ymm8
-	subl	$1, %r10d
-
-	vbroadcastsd	8(%r13), %ymm14
-	vfmadd231pd		%ymm12, %ymm14, %ymm1
-	vfmadd231pd		%ymm13, %ymm14, %ymm5
-	vbroadcastsd	8(%r13, %r14, 1), %ymm15
-	vfmadd231pd		%ymm13, %ymm15, %ymm9
-	addq		$32, %r11
-
-	vbroadcastsd	16(%r13), %ymm14
-	vfmadd231pd		%ymm12, %ymm14, %ymm2
-	vfmadd231pd		%ymm13, %ymm14, %ymm6
-	vbroadcastsd	16(%r13, %r14, 1), %ymm15
-	vfmadd231pd		%ymm13, %ymm15, %ymm10
-	addq		$32, %r13
-
-	vbroadcastsd	-8(%r13), %ymm14
-	vfmadd231pd		%ymm12, %ymm14, %ymm3
-	vfmadd231pd		%ymm13, %ymm14, %ymm7
-	vbroadcastsd	-8(%r13, %r14, 1), %ymm15
-	vfmadd231pd		%ymm13, %ymm15, %ymm11
-
-	cmpl		$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nt_8x8_lib4, .-inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nt_8x8_lib4, @function
-inner_kernel_dgemm_sub_nt_8x8_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nt_8x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nt_8x8_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nt_8x8_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovapd			0(%r11), %ymm12
-	vmovapd			0(%r11, %r12, 1), %ymm13
-	vbroadcastsd	0(%r13), %ymm14
-	vbroadcastsd 	0(%r13, %r14, 1), %ymm15
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vfnmadd231pd	%ymm12, %ymm14, %ymm0
-	subl	$4, %r10d
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vbroadcastsd	8(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm8
-	vbroadcastsd	8(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vbroadcastsd	16(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm9
-	vbroadcastsd	16(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vbroadcastsd	24(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm10
-	vbroadcastsd	24(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm3
-	vmovapd			32(%r11), %ymm12
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vbroadcastsd	32(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm11
-	vmovapd			32(%r11, %r12, 1), %ymm13
-	vbroadcastsd	32(%r13, %r14, 1), %ymm15
-
-	// unroll 1
-	vfnmadd231pd	%ymm12, %ymm14, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vbroadcastsd	40(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm8
-	vbroadcastsd	40(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vbroadcastsd	48(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm9
-	vbroadcastsd	48(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vbroadcastsd	56(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm10
-	vbroadcastsd	56(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm3
-	vmovapd			64(%r11), %ymm12
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vbroadcastsd	64(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm11
-	vmovapd			64(%r11, %r12, 1), %ymm13
-	vbroadcastsd	64(%r13, %r14, 1), %ymm15
-
-	// unroll 2
-	vfnmadd231pd	%ymm12, %ymm14, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vbroadcastsd	72(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm8
-	vbroadcastsd	72(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vbroadcastsd	80(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm9
-	vbroadcastsd	80(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vbroadcastsd	88(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm10
-	vbroadcastsd	88(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm3
-	vmovapd			96(%r11), %ymm12
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vbroadcastsd	96(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm11
-	vmovapd			96(%r11, %r12, 1), %ymm13
-	vbroadcastsd	96(%r13, %r14, 1), %ymm15
-
-	// unroll 3
-	vfnmadd231pd	%ymm12, %ymm14, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vbroadcastsd	104(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm8
-	vbroadcastsd	104(%r13, %r14, 1), %ymm15
-	addq	$128, %r11
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vbroadcastsd	112(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm9
-	vbroadcastsd	112(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vbroadcastsd	120(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm10
-	vbroadcastsd	120(%r13, %r14, 1), %ymm15
-	addq	$128, %r13
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm3
-	vmovapd			0(%r11), %ymm12
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vbroadcastsd	0(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm11
-	vmovapd			0(%r11, %r12, 1), %ymm13
-	vbroadcastsd	0(%r13, %r14, 1), %ymm15
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vfnmadd231pd	%ymm12, %ymm14, %ymm0
-	subl	$4, %r10d
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vbroadcastsd	8(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm8
-	vbroadcastsd	8(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vbroadcastsd	16(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm9
-	vbroadcastsd	16(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vbroadcastsd	24(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm10
-	vbroadcastsd	24(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm3
-	vmovapd			32(%r11), %ymm12
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vbroadcastsd	32(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm11
-	vmovapd			32(%r11, %r12, 1), %ymm13
-	vbroadcastsd	32(%r13, %r14, 1), %ymm15
-
-	// unroll 1
-	vfnmadd231pd	%ymm12, %ymm14, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vbroadcastsd	40(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm8
-	vbroadcastsd	40(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vbroadcastsd	48(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm9
-	vbroadcastsd	48(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vbroadcastsd	56(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm10
-	vbroadcastsd	56(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm3
-	vmovapd			64(%r11), %ymm12
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vbroadcastsd	64(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm11
-	vmovapd			64(%r11, %r12, 1), %ymm13
-	vbroadcastsd	64(%r13, %r14, 1), %ymm15
-
-	// unroll 2
-	vfnmadd231pd	%ymm12, %ymm14, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vbroadcastsd	72(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm8
-	vbroadcastsd	72(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vbroadcastsd	80(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm9
-	vbroadcastsd	80(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vbroadcastsd	88(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm10
-	vbroadcastsd	88(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm3
-	vmovapd			96(%r11), %ymm12
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vbroadcastsd	96(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm11
-	vmovapd			96(%r11, %r12, 1), %ymm13
-	vbroadcastsd	96(%r13, %r14, 1), %ymm15
-
-	// unroll 3
-	vfnmadd231pd	%ymm12, %ymm14, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vbroadcastsd	104(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm8
-	vbroadcastsd	104(%r13, %r14, 1), %ymm15
-	addq	$128, %r11
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vbroadcastsd	112(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm9
-	vbroadcastsd	112(%r13, %r14, 1), %ymm15
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vbroadcastsd	120(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm10
-	vbroadcastsd	120(%r13, %r14, 1), %ymm15
-	addq	$128, %r13
-
-	vfnmadd231pd	%ymm12, %ymm14, %ymm3
-//	vmovapd			0(%r11), %ymm12
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-//	vbroadcastsd	0(%r13), %ymm14
-	vfnmadd231pd	%ymm13, %ymm15, %ymm11
-//	vmovapd			0(%r11, %r12, 1), %ymm13
-//	vbroadcastsd	0(%r13, %r14, 1), %ymm15
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm12
-	vmovapd			0(%r11, %r12, 1), %ymm13
-	vbroadcastsd	0(%r13), %ymm14
-	vfnmadd231pd	%ymm12, %ymm14, %ymm0
-	vfnmadd231pd	%ymm13, %ymm14, %ymm4
-	vbroadcastsd	0(%r13, %r14, 1), %ymm15
-	vfnmadd231pd	%ymm13, %ymm15, %ymm8
-	subl	$1, %r10d
-
-	vbroadcastsd	8(%r13), %ymm14
-	vfnmadd231pd	%ymm12, %ymm14, %ymm1
-	vfnmadd231pd	%ymm13, %ymm14, %ymm5
-	vbroadcastsd	8(%r13, %r14, 1), %ymm15
-	vfnmadd231pd	%ymm13, %ymm15, %ymm9
-	addq		$32, %r11
-
-	vbroadcastsd	16(%r13), %ymm14
-	vfnmadd231pd	%ymm12, %ymm14, %ymm2
-	vfnmadd231pd	%ymm13, %ymm14, %ymm6
-	vbroadcastsd	16(%r13, %r14, 1), %ymm15
-	vfnmadd231pd	%ymm13, %ymm15, %ymm10
-	addq		$32, %r13
-
-	vbroadcastsd	-8(%r13), %ymm14
-	vfnmadd231pd	%ymm12, %ymm14, %ymm3
-	vfnmadd231pd	%ymm13, %ymm14, %ymm7
-	vbroadcastsd	-8(%r13, %r14, 1), %ymm15
-	vfnmadd231pd	%ymm13, %ymm15, %ymm11
-
-	cmpl		$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nt_8x8_lib4, .-inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x8_lib4, @function
-inner_scale_ab_8x8_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x8_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_8x8_lib4:
-#endif
-#endif
-		
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	vmulpd		%ymm8, %ymm15, %ymm8
-	vmulpd		%ymm9, %ymm15, %ymm9
-	vmulpd		%ymm10, %ymm15, %ymm10
-	vmulpd		%ymm11, %ymm15, %ymm11
-
-	vbroadcastsd 0(%r11), %ymm14 // beta
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		0(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		32(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		64(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		96(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	vmovapd		128(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		160(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		192(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		224(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x8_lib4, .-inner_scale_ab_8x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- &alpha
-// r11   <- &beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_AB_8X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_ab_8x8_lib4, @function
-inner_tran_scale_ab_8x8_lib4:
-#elif defined(OS_MAC)
-_inner_tran_scale_ab_8x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_ab_8x8_lib4; .scl 2; .type 32; .endef
-inner_tran_scale_ab_8x8_lib4:
-#endif
-#endif
-		
-
-	vunpcklpd	%ymm1, %ymm0, %ymm12
-	vunpckhpd	%ymm1, %ymm0, %ymm13
-	vunpcklpd	%ymm3, %ymm2, %ymm14
-	vunpckhpd	%ymm3, %ymm2, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm0
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm2
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm3
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	vunpcklpd	%ymm5, %ymm4, %ymm12
-	vunpckhpd	%ymm5, %ymm4, %ymm13
-	vunpcklpd	%ymm7, %ymm6, %ymm14
-	vunpckhpd	%ymm7, %ymm6, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm4
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm6
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm5
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm7
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm4, %ymm15, %ymm4
-	vmulpd		%ymm5, %ymm15, %ymm5
-	vmulpd		%ymm6, %ymm15, %ymm6
-	vmulpd		%ymm7, %ymm15, %ymm7
-
-	vunpcklpd	%ymm9, %ymm8, %ymm12
-	vunpckhpd	%ymm9, %ymm8, %ymm13
-	vunpcklpd	%ymm11, %ymm10, %ymm14
-	vunpckhpd	%ymm11, %ymm10, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm8
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm10
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm9
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm11
-
-	vbroadcastsd 0(%r10), %ymm15 // alpha
-
-	vmulpd		%ymm8, %ymm15, %ymm8
-	vmulpd		%ymm9, %ymm15, %ymm9
-	vmulpd		%ymm10, %ymm15, %ymm10
-	vmulpd		%ymm11, %ymm15, %ymm11
-
-	vbroadcastsd 0(%r11), %ymm14 // beta
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovapd		0(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		128(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		160(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		192(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		224(%r12), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	vmovapd		128(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		160(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		192(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		224(%r12, %r13, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_ab_8x8_lib4, .-inner_tran_scale_ab_8x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_8X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_8x8_lib4, @function
-inner_scale_11_8x8_lib4:
-#elif defined(OS_MAC)
-_inner_scale_11_8x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_8x8_lib4; .scl 2; .type 32; .endef
-inner_scale_11_8x8_lib4:
-#endif
-#endif
-		
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14 // beta=1.0
-#else
-	vmovapd		LC04(%rip), %ymm14 // beta=1.0
-#endif
-
-	vmovapd		0(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		0(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		32(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		64(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		96(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	vmovapd		128(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		160(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		192(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		224(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_8x8_lib4, .-inner_scale_11_8x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm4  <- [d40 d51 d62 d73]
-// ymm5  <- [d41 d50 d63 d72]
-// ymm6  <- [d43 d52 d61 d70]
-// ymm7  <- [d42 d53 d60 d71]
-// ymm8  <- [d80 d91 da2 db3]
-// ymm9  <- [d81 d90 da3 db2]
-// ymm10 <- [d83 d92 da1 db0]
-// ymm11 <- [d82 d93 da0 db1]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_11_8X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_11_8x8_lib4, @function
-inner_tran_scale_11_8x8_lib4:
-#elif defined(OS_MAC)
-_inner_tran_scale_11_8x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_11_8x8_lib4; .scl 2; .type 32; .endef
-inner_tran_scale_11_8x8_lib4:
-#endif
-#endif
-		
-
-	vunpcklpd	%ymm1, %ymm0, %ymm12
-	vunpckhpd	%ymm1, %ymm0, %ymm13
-	vunpcklpd	%ymm3, %ymm2, %ymm14
-	vunpckhpd	%ymm3, %ymm2, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm0
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm2
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm1
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm3
-
-	vunpcklpd	%ymm5, %ymm4, %ymm12
-	vunpckhpd	%ymm5, %ymm4, %ymm13
-	vunpcklpd	%ymm7, %ymm6, %ymm14
-	vunpckhpd	%ymm7, %ymm6, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm4
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm6
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm5
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm7
-
-	vunpcklpd	%ymm9, %ymm8, %ymm12
-	vunpckhpd	%ymm9, %ymm8, %ymm13
-	vunpcklpd	%ymm11, %ymm10, %ymm14
-	vunpckhpd	%ymm11, %ymm10, %ymm15
-
-	vperm2f128	$0x20, %ymm14, %ymm12, %ymm8
-	vperm2f128	$0x31, %ymm14, %ymm12, %ymm10
-	vperm2f128	$0x20, %ymm15, %ymm13, %ymm9
-	vperm2f128	$0x31, %ymm15, %ymm13, %ymm11
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovapd		.LC04(%rip), %ymm14 // beta=1.0
-#else
-	vmovapd		LC04(%rip), %ymm14 // beta=1.0
-#endif
-
-	vmovapd		0(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm0
-	vmovapd		32(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm1
-	vmovapd		64(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm2
-	vmovapd		96(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm3
-
-	vmovapd		128(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm4
-	vmovapd		160(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm5
-	vmovapd		192(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm6
-	vmovapd		224(%r10), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm7
-
-	vmovapd		128(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm8
-	vmovapd		160(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm9
-	vmovapd		192(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm10
-	vmovapd		224(%r10, %r11, 1), %ymm15
-	vfmadd231pd	%ymm14, %ymm15, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_11_8x8_lib4, .-inner_tran_scale_11_8x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_8X8_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_8x8_vs_lib4, @function
-inner_edge_dpotrf_8x8_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_8x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dpotrf_8x8_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dpotrf_8x8_vs_lib4:
-#endif
-#endif
-	
-	vxorpd			%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd			LC04(%rip), %xmm14 // 1.0
-#endif
-
-	vmovsd			%xmm0, %xmm0, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe				1f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-2:
-	vmovsd			%xmm13, 0(%r10)
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vpermpd			$0x55, %ymm0, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vperm2f128		$0x00, %ymm4, %ymm4, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm8
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm9
-	vperm2f128		$0x11, %ymm4, %ymm4, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm10
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm11
-
-	vpermilpd		$0x3, %xmm1, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe				3f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-4:
-	vmovsd			%xmm13, 8(%r10)
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vperm2f128		$0x00, %ymm5, %ymm5, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm8
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm9
-	vperm2f128		$0x11, %ymm5, %ymm5, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm10
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm11
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe				5f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-6:
-	vmovsd			%xmm13, 16(%r10)
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vpermpd			$0xff, %ymm2, %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vperm2f128		$0x00, %ymm6, %ymm6, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm8
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm9
-	vperm2f128		$0x11, %ymm6, %ymm6, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm10
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm11
-
-	vpermpd			$0xff, %ymm3, %ymm13
-	vucomisd		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe				7f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-8:
-	vmovsd			%xmm13, 24(%r10)
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vperm2f128		$0x00, %ymm7, %ymm7, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm8
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm9
-	vperm2f128		$0x11, %ymm7, %ymm7, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm10
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm11
-
-	vmovsd			%xmm8, %xmm8, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe				9f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-10:
-	vmovsd			%xmm13, 32(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm8, %ymm13, %ymm8
-	cmpl			$6, %r11d
-	jl				0f // ret
-//	vperm2f128		$0x00, %ymm8, %ymm8, %ymm12
-//	vpermilpd		$0xf, %ymm12, %ymm13
-	vpermpd			$0x55, %ymm8, %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vperm2f128		$0x11, %ymm8, %ymm8, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-
-	vpermilpd		$0x3, %xmm9, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe				11f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-12:
-	vmovsd			%xmm13, 40(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm9, %ymm13, %ymm9
-	cmpl			$7, %r11d
-	jl				0f // ret
-	vperm2f128		$0x11, %ymm9, %ymm9, %ymm12
-	vpermilpd		$0x0, %ymm12, %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vpermilpd		$0xf, %ymm12, %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-
-	vextractf128	$0x1, %ymm10, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe				13f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-14:
-	vmovsd			%xmm13, 48(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm10, %ymm13, %ymm10
-	cmpl			$8, %r11d
-	jl				0f // ret
-//	vperm2f128		$0x11, %ymm10, %ymm10, %ymm12
-//	vpermilpd		$0xf, %ymm12, %ymm13
-	vpermpd			$0xff, %ymm10, %ymm13
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-//	vextractf128	$0x1, %ymm11, %xmm13
-//	vpermilpd		$0x3, %xmm13, %xmm13
-	vpermpd			$0xff, %ymm11, %ymm13
-	vucomisd		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe				15f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-16:
-	vmovsd			%xmm13, 56(%r10)
-//	vmovddup		%xmm13, %xmm13
-//	vperm2f128		$0x00, %ymm13, %ymm13, %ymm13
-	vpermpd			$0x00, %ymm13, %ymm13
-	vmulpd			%ymm11, %ymm13, %ymm11
-
-
-
-	jmp				0f
-
-1:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				2b
-
-3:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				4b
-
-5:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				6b
-
-7:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				8b
-
-9:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				10b
-
-11:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				12b
-
-13:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				14b
-
-15:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp				16b
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_8x8_vs_lib4, .-inner_edge_dpotrf_8x8_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_8X8L_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_8x8l_lib4, @function
-inner_edge_dtrsm_rlt_inv_8x8l_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_8x8l_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_8x8l_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_8x8l_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r12), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vbroadcastsd	0(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm11
-
-	vbroadcastsd	8(%r12), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vbroadcastsd	32(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm8
-	vbroadcastsd	40(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm11
-
-	vbroadcastsd	16(%r12), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vbroadcastsd	64(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm8
-	vbroadcastsd	72(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm9
-	vbroadcastsd	80(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm11
-
-	vbroadcastsd	24(%r12), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vbroadcastsd	96(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm8
-	vbroadcastsd	104(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm9
-	vbroadcastsd	112(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm10
-	vbroadcastsd	120(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm11
-	addq	$128, %r10
-
-	vbroadcastsd	32(%r12), %ymm13
-	vmulpd			%ymm8, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-
-	vbroadcastsd	40(%r12), %ymm13
-	vmulpd			%ymm9, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-
-	vbroadcastsd	48(%r12), %ymm13
-	vmulpd			%ymm10, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-	vbroadcastsd	56(%r12), %ymm13
-	vmulpd			%ymm11, %ymm13, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_8x8l_lib4, .-inner_edge_dtrsm_rlt_inv_8x8l_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- E
-// r11  <- sde
-// r12  <- inv_diag_E
-// r13  <- D
-// r14  <- sdd
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- sde
-// r12  <- inv_diag_E
-// r13  <- D
-// r14  <- sdd
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_8X8U_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_8x8u_lib4, @function
-inner_edge_dtrsm_rlt_inv_8x8u_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_8x8u_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_8x8u_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_8x8u_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r12), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-
-	vmovapd			0(%r13, %r14, 1), %ymm12
-	vbroadcastsd	0(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm4
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm5
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm6
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm7
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-
-	vbroadcastsd	8(%r12), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-
-	vmovapd			32(%r13, %r14, 1), %ymm12
-	vbroadcastsd	32(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm4
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vbroadcastsd	40(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm5
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm6
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm7
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-
-	vbroadcastsd	16(%r12), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-
-	vmovapd			64(%r13, %r14, 1), %ymm12
-	vbroadcastsd	64(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm4
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vbroadcastsd	72(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm5
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vbroadcastsd	80(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm6
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm7
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-
-	vbroadcastsd	24(%r12), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-
-	vmovapd			96(%r13, %r14, 1), %ymm12
-	vbroadcastsd	96(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm4
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vbroadcastsd	104(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm5
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vbroadcastsd	112(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm6
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vbroadcastsd	120(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm7
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-	addq	$128, %r10
-
-	vbroadcastsd	32(%r12), %ymm13
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vmulpd			%ymm8, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-
-	vbroadcastsd	40(%r12), %ymm13
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vmulpd			%ymm9, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-
-	vbroadcastsd	48(%r12), %ymm13
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vmulpd			%ymm10, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-	vbroadcastsd	56(%r12), %ymm13
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vmulpd			%ymm11, %ymm13, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_8x8u_lib4, .-inner_edge_dtrsm_rlt_inv_8x8u_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_8X8L_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r12), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vbroadcastsd	0(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm11
-
-	vbroadcastsd	8(%r12), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vbroadcastsd	32(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm8
-	vbroadcastsd	40(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm11
-
-	vbroadcastsd	16(%r12), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vbroadcastsd	64(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm8
-	vbroadcastsd	72(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm9
-	vbroadcastsd	80(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm11
-
-	vbroadcastsd	24(%r12), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vbroadcastsd	96(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm8
-	vbroadcastsd	104(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm9
-	vbroadcastsd	112(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm10
-	vbroadcastsd	120(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm7, %ymm13, %ymm11
-	addq	$128, %r10
-
-	vbroadcastsd	32(%r12), %ymm13
-	vmulpd			%ymm8, %ymm13, %ymm8
-	cmpl			$6, %r13d
-	jl				0f // ret
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-
-	vbroadcastsd	40(%r12), %ymm13
-	vmulpd			%ymm9, %ymm13, %ymm9
-	cmpl			$7, %r13d
-	jl				0f // ret
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-
-	vbroadcastsd	48(%r12), %ymm13
-	vmulpd			%ymm10, %ymm13, %ymm10
-	cmpl			$8, %r13d
-	jl				0f // ret
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-	vbroadcastsd	56(%r12), %ymm13
-	vmulpd			%ymm11, %ymm13, %ymm11
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4, .-inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- E
-// r11  <- sde
-// r12  <- inv_diag_E
-// r13  <- D
-// r14  <- sdd
-// r15d <- kn
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- sde
-// r12  <- inv_diag_E
-// r13  <- D
-// r14  <- sdd
-// r15d <- kn
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d51 d62 d73]
-// ymm5 <- [d41 d50 d63 d72]
-// ymm6 <- [d43 d52 d61 d70]
-// ymm7 <- [d42 d53 d60 d71]
-// ymm12 <- dirty
-// ymm13 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_8X8U_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4:
-#endif
-#endif
-	
-	vbroadcastsd	0(%r12), %ymm13
-	vmulpd			%ymm0, %ymm13, %ymm0
-	vbroadcastsd	8(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm1
-	vbroadcastsd	16(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm2
-	vbroadcastsd	24(%r10), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm3
-
-	vmovapd			0(%r13, %r14, 1), %ymm12
-	vbroadcastsd	0(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm4
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm5
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm6
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm0, %ymm13, %ymm7
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-
-	vbroadcastsd	8(%r12), %ymm13
-	vmulpd			%ymm1, %ymm13, %ymm1
-	vbroadcastsd	48(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm2
-	vbroadcastsd	56(%r10), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm3
-
-	vmovapd			32(%r13, %r14, 1), %ymm12
-	vbroadcastsd	32(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm4
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vbroadcastsd	40(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm5
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm6
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm1, %ymm13, %ymm7
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-
-	vbroadcastsd	16(%r12), %ymm13
-	vmulpd			%ymm2, %ymm13, %ymm2
-	vbroadcastsd	88(%r10), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm3
-
-	vmovapd			64(%r13, %r14, 1), %ymm12
-	vbroadcastsd	64(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm4
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vbroadcastsd	72(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm5
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vbroadcastsd	80(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm6
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm2, %ymm13, %ymm7
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-
-	vbroadcastsd	24(%r12), %ymm13
-	vmulpd			%ymm3, %ymm13, %ymm3
-
-	vmovapd			96(%r13, %r14, 1), %ymm12
-	vbroadcastsd	96(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm4
-	vfnmadd231pd	%ymm12, %ymm13, %ymm8
-	vbroadcastsd	104(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm5
-	vfnmadd231pd	%ymm12, %ymm13, %ymm9
-	vbroadcastsd	112(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm6
-	vfnmadd231pd	%ymm12, %ymm13, %ymm10
-	vbroadcastsd	120(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm3, %ymm13, %ymm7
-	vfnmadd231pd	%ymm12, %ymm13, %ymm11
-
-	addq	$128, %r10
-
-	vbroadcastsd	32(%r12), %ymm13
-	vmulpd			%ymm4, %ymm13, %ymm4
-	vmulpd			%ymm8, %ymm13, %ymm8
-	cmpl			$6, %r15d
-	jl				0f // ret
-	vbroadcastsd	8(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm5
-	vfnmadd231pd	%ymm8, %ymm13, %ymm9
-	vbroadcastsd	16(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm6
-	vfnmadd231pd	%ymm8, %ymm13, %ymm10
-	vbroadcastsd	24(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm4, %ymm13, %ymm7
-	vfnmadd231pd	%ymm8, %ymm13, %ymm11
-
-	vbroadcastsd	40(%r12), %ymm13
-	vmulpd			%ymm5, %ymm13, %ymm5
-	vmulpd			%ymm9, %ymm13, %ymm9
-	cmpl			$7, %r15d
-	jl				0f // ret
-	vbroadcastsd	48(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm6
-	vfnmadd231pd	%ymm9, %ymm13, %ymm10
-	vbroadcastsd	56(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm5, %ymm13, %ymm7
-	vfnmadd231pd	%ymm9, %ymm13, %ymm11
-
-	vbroadcastsd	48(%r12), %ymm13
-	vmulpd			%ymm6, %ymm13, %ymm6
-	vmulpd			%ymm10, %ymm13, %ymm10
-	cmpl			$8, %r15d
-	jl				0f // ret
-	vbroadcastsd	88(%r10, %r11, 1), %ymm13
-	vfnmadd231pd	%ymm6, %ymm13, %ymm7
-	vfnmadd231pd	%ymm10, %ymm13, %ymm11
-
-	vbroadcastsd	56(%r12), %ymm13
-	vmulpd			%ymm7, %ymm13, %ymm7
-	vmulpd			%ymm11, %ymm13, %ymm11
-
-
-
-//	subq	$128, %r10
-//	vmovapd	0(%r10, %r11, 1), %ymm4
-//	vmovapd	32(%r10, %r11, 1), %ymm5
-//	vmovapd	64(%r10, %r11, 1), %ymm6
-//	vmovapd	96(%r10, %r11, 1), %ymm7
-
-
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4, .-inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8L_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8l_lib4, @function
-inner_store_8x8l_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x8l_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8l_lib4; .scl 2; .type 32; .endef
-inner_store_8x8l_lib4:
-#endif
-#endif
-	
-	vmovapd %ymm0,  0(%r10)
-	vmovapd %ymm1, 32(%r10)
-	vmovapd %ymm2, 64(%r10)
-	vmovapd %ymm3, 96(%r10)
-
-	vmovapd %ymm4,  0(%r10, %r11, 1)
-	vmovapd %ymm5, 32(%r10, %r11, 1)
-	vmovapd %ymm6, 64(%r10, %r11, 1)
-	vmovapd %ymm7, 96(%r10, %r11, 1)
-
-	vmovapd %ymm8,  128(%r10, %r11, 1)
-	vmovapd %ymm9,  160(%r10, %r11, 1)
-	vmovapd %ymm10, 192(%r10, %r11, 1)
-	vmovapd %ymm11, 224(%r10, %r11, 1)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8l_lib4, .-inner_store_8x8l_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8U_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8u_lib4, @function
-inner_store_8x8u_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x8u_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8u_lib4; .scl 2; .type 32; .endef
-inner_store_8x8u_lib4:
-#endif
-#endif
-	
-	vmovapd %ymm0,  0(%r10)
-	vmovapd %ymm1, 32(%r10)
-	vmovapd %ymm2, 64(%r10)
-	vmovapd %ymm3, 96(%r10)
-
-	vmovapd %ymm4, 128(%r10)
-	vmovapd %ymm5, 160(%r10)
-	vmovapd %ymm6, 192(%r10)
-	vmovapd %ymm7, 224(%r10)
-
-	vmovapd %ymm8,  128(%r10, %r11, 1)
-	vmovapd %ymm9,  160(%r10, %r11, 1)
-	vmovapd %ymm10, 192(%r10, %r11, 1)
-	vmovapd %ymm11, 224(%r10, %r11, 1)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8u_lib4, .-inner_store_8x8u_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8L_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8l_vs_lib4, @function
-inner_store_8x8l_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x8l_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8l_vs_lib4; .scl 2; .type 32; .endef
-inner_store_8x8l_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC03(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmovapd %ymm0,  0(%r10)
-	vmovapd %ymm1, 32(%r10)
-	vmovapd %ymm2, 64(%r10)
-	vmovapd %ymm3, 96(%r10)
-
-	vmaskmovpd	%ymm4, %ymm15,  0(%r10, %r11, 1)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r10, %r11, 1)
-
-	vmaskmovpd	%ymm8, %ymm15, 128(%r10, %r11, 1)
-	cmpl		$6, %r13d
-	jl			0f // end
-	vmaskmovpd	%ymm9, %ymm15, 160(%r10, %r11, 1)
-	cmpl		$7, %r13d
-	jl			0f // end
-	vmaskmovpd	%ymm10, %ymm15, 192(%r10, %r11, 1)
-	je			0f // end
-	vmaskmovpd	%ymm11, %ymm15, 224(%r10, %r11, 1)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8l_vs_lib4, .-inner_store_8x8l_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d91 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8U_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8u_vs_lib4, @function
-inner_store_8x8u_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x8u_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8u_vs_lib4; .scl 2; .type 32; .endef
-inner_store_8x8u_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC03(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmovapd %ymm0,  0(%r10)
-	vmovapd %ymm1, 32(%r10)
-	vmovapd %ymm2, 64(%r10)
-	vmovapd %ymm3, 96(%r10)
-
-
-	vmovapd		%ymm4, 128(%r10)
-	vmaskmovpd	%ymm8, %ymm15, 128(%r10, %r11, 1)
-	cmpl		$6, %r13d
-	jl			0f // end
-	vmovapd		%ymm5, 160(%r10)
-	vmaskmovpd	%ymm9, %ymm15, 160(%r10, %r11, 1)
-	cmpl		$7, %r13d
-	jl			0f // end
-	vmovapd		%ymm6, 192(%r10)
-	vmaskmovpd	%ymm10, %ymm15, 192(%r10, %r11, 1)
-	je			0f // end
-	vmovapd		%ymm7, 224(%r10)
-	vmaskmovpd	%ymm11, %ymm15, 224(%r10, %r11, 1)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8u_vs_lib4, .-inner_store_8x8u_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d50 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d90 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d90 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x8_lib4, @function
-inner_store_l_8x8_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_8x8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x8_lib4; .scl 2; .type 32; .endef
-inner_store_l_8x8_lib4:
-#endif
-#endif
-	
-	vmovapd		%ymm0, 0(%r10)
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmovapd		%ymm1, 32(%r10)
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmovapd		%ymm2, 64(%r10)
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmovapd		%ymm3, 96(%r10)
-
-	vmovapd		%ymm4, 0(%r10, %r11, 1)
-	vmovapd		%ymm5, 32(%r10, %r11, 1)
-	vmovapd		%ymm6, 64(%r10, %r11, 1)
-	vmovapd		%ymm7, 96(%r10, %r11, 1)
-
-	vmovapd		%ymm8, 128(%r10, %r11, 1)
-	vmovapd		160(%r10, %r11, 1), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm9, %ymm9
-	vmovapd		%ymm9, 160(%r10, %r11, 1)
-	vmovapd		192(%r10, %r11, 1), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm10, %ymm10
-	vmovapd		%ymm10, 192(%r10, %r11, 1)
-	vmovapd		224(%r10, %r11, 1), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm11, %ymm11
-	vmovapd		%ymm11, 224(%r10, %r11, 1)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x8_lib4, .-inner_store_l_8x8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower n
-//
-// input arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d50 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d90 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11   <- 4*sdd*sizeof(double)
-// r12d  <- km
-// r13d  <- kn
-// r14   <- dirty
-// r15   <- dirty
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm4  <- [d40 d50 d60 d70]
-// ymm5  <- [d41 d51 d61 d71]
-// ymm6  <- [d42 d52 d62 d72]
-// ymm7  <- [d43 d53 d63 d73]
-// ymm8  <- [d80 d90 da0 db0]
-// ymm9  <- [d81 d90 da1 db1]
-// ymm10 <- [d82 d92 da2 db2]
-// ymm11 <- [d83 d93 da3 db3]
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X8_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x8_vs_lib4, @function
-inner_store_l_8x8_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_8x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x8_vs_lib4; .scl 2; .type 32; .endef
-inner_store_l_8x8_vs_lib4:
-#endif
-#endif
-	
-	vcvtsi2sd	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovupd		LC03(%rip), %ymm14
-#endif
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm15, %ymm14, %ymm15
-
-	vmovapd		%ymm0, 0(%r10)
-	vmovapd		32(%r10), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm1, %ymm1	
-	vmovapd		%ymm1, 32(%r10)
-	vmovapd		64(%r10), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm2, %ymm2	
-	vmovapd		%ymm2, 64(%r10)
-	vmovapd		96(%r10), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm3, %ymm3	
-	vmovapd		%ymm3, 96(%r10)
-
-	vmaskmovpd	%ymm4, %ymm15,  0(%r10, %r11, 1)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r10, %r11, 1)
-
-	vmaskmovpd	%ymm8, %ymm15, 128(%r10, %r11, 1)
-	cmpl		$6, %r13d
-	jl			0f // end
-	vmovapd		160(%r10, %r11, 1), %ymm14
-	vblendpd	$0x1, %ymm14, %ymm9, %ymm9
-	vmaskmovpd	%ymm9, %ymm15, 160(%r10, %r11, 1)
-	cmpl		$7, %r13d
-	jl			0f // end
-	vmovapd		192(%r10, %r11, 1), %ymm14
-	vblendpd	$0x3, %ymm14, %ymm10, %ymm10
-	vmaskmovpd	%ymm10, %ymm15, 192(%r10, %r11, 1)
-	je			0f // end
-	vmovapd		224(%r10, %r11, 1), %ymm14
-	vblendpd	$0x7, %ymm14, %ymm11, %ymm11
-	vmaskmovpd	%ymm11, %ymm15, 224(%r10, %r11, 1)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x8_vs_lib4, .-inner_store_l_8x8_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// rbp  <- dirty
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// rbp  <- dirty
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm4 <- [d40 d50 d60 d70]
-// ymm5 <- [d41 d51 d61 d71]
-// ymm6 <- [d42 d52 d62 d72]
-// ymm7 <- [d43 d53 d63 d73]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8_gen_lib4, @function
-inner_store_8x8_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_8x8_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8_gen_lib4; .scl 2; .type 32; .endef
-inner_store_8x8_gen_lib4:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2sd	%r13d, %xmm14, %xmm14
-	vcvtsi2sd	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm12
-	vmovupd		.LC03(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm12
-	vmovupd		LC03(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vmovddup	%xmm15, %xmm15
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$1, %xmm15, %ymm15, %ymm15
-	vsubpd		%ymm12, %ymm14, %ymm14
-	vsubpd		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm6, %ymm5
-	vmovapd		%ymm3, %ymm2
-	vmovapd		%ymm7, %ymm6
-	vmovapd		%ymm8, %ymm7
-	vmovapd		%ymm9, %ymm8
-	vmovapd		%ymm10, %ymm9
-	vmovapd		%ymm11, %ymm10
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm2, %ymm1
-	vmovapd		%ymm6, %ymm5
-	vmovapd		%ymm7, %ymm6
-	vmovapd		%ymm8, %ymm7
-	vmovapd		%ymm9, %ymm8
-	vmovapd		%ymm10, %ymm9
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovapd		%ymm1, %ymm0
-	vmovapd		%ymm5, %ymm4
-	vmovapd		%ymm6, %ymm5
-	vmovapd		%ymm7, %ymm6
-	vmovapd		%ymm8, %ymm7
-	vmovapd		%ymm9, %ymm8
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$8, %eax
-	jle		0f
-	movl	$8, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	vmaskmovpd	%ymm0, %ymm14,  0(%r11)
-	vmaskmovpd	%ymm1, %ymm14, 32(%r11)
-	vmaskmovpd	%ymm2, %ymm14, 64(%r11)
-	vmaskmovpd	%ymm3, %ymm14, 96(%r11)
-
-	vmaskmovpd	%ymm4, %ymm15,  0(%r11, %r12, 1)
-	vmaskmovpd	%ymm5, %ymm15, 32(%r11, %r12, 1)
-	vmaskmovpd	%ymm6, %ymm15, 64(%r11, %r12, 1)
-	vmaskmovpd	%ymm7, %ymm15, 96(%r11, %r12, 1)
-
-	vmaskmovpd	%ymm8, %ymm15, 128(%r11, %r12, 1)
-	cmpl		$6, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm9, %ymm15, 160(%r11, %r12, 1)
-	cmpl		$7, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm10, %ymm15, 192(%r11, %r12, 1)
-	je			4f // end
-	vmaskmovpd	%ymm11, %ymm15, 224(%r11, %r12, 1)
-
-	jmp		4f
-
-0:
-	
-	cmpl	$1, %r10d
-	jg		1f
-
-	// offset==1
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x03, %ymm4, %ymm0, %ymm12
-	vshufpd		$0x5, %ymm0, %ymm12, %ymm0
-	vperm2f128	$0x03, %ymm13, %ymm4, %ymm12
-	vshufpd		$0x5, %ymm4, %ymm12, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x03, %ymm5, %ymm1, %ymm12
-	vshufpd		$0x5, %ymm1, %ymm12, %ymm1
-	vperm2f128	$0x03, %ymm13, %ymm5, %ymm12
-	vshufpd		$0x5, %ymm5, %ymm12, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x03, %ymm6, %ymm2, %ymm12
-	vshufpd		$0x5, %ymm2, %ymm12, %ymm2
-	vperm2f128	$0x03, %ymm13, %ymm6, %ymm12
-	vshufpd		$0x5, %ymm6, %ymm12, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x03, %ymm7, %ymm3, %ymm12
-	vshufpd		$0x5, %ymm3, %ymm12, %ymm3
-	vperm2f128	$0x03, %ymm13, %ymm7, %ymm12
-	vshufpd		$0x5, %ymm7, %ymm12, %ymm7
-
-	vperm2f128	$0x01, %ymm8, %ymm8, %ymm12
-	vshufpd		$0x5, %ymm8, %ymm12, %ymm8
-
-	vperm2f128	$0x01, %ymm9, %ymm9, %ymm12
-	vshufpd		$0x5, %ymm9, %ymm12, %ymm9
-
-	vperm2f128	$0x01, %ymm10, %ymm10, %ymm12
-	vshufpd		$0x5, %ymm10, %ymm12, %ymm10
-
-	vperm2f128	$0x01, %ymm11, %ymm11, %ymm12
-	vshufpd		$0x5, %ymm11, %ymm12, %ymm11
-
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm15, %ymm12, %ymm15
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm12
-	vshufpd		$0x5, %ymm14, %ymm12, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vandpd		.LC08(%rip), %ymm14, %ymm12
-	vandpd		.LC05(%rip), %ymm15, %ymm13
-#elif defined(OS_MAC)
-	vandpd		LC08(%rip), %ymm14, %ymm12
-	vandpd		LC05(%rip), %ymm15, %ymm13
-#endif
-
-	vblendpd	$0x1, %ymm14, %ymm15, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vandpd		.LC08(%rip), %ymm15, %ymm15
-#elif defined(OS_MAC)
-	vandpd		LC08(%rip), %ymm15, %ymm15
-#endif
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r10d
-	jg		2f
-
-	// offset==2
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x03, %ymm4, %ymm0, %ymm0
-	vperm2f128	$0x03, %ymm13, %ymm4, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x03, %ymm5, %ymm1, %ymm1
-	vperm2f128	$0x03, %ymm13, %ymm5, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x03, %ymm6, %ymm2, %ymm2
-	vperm2f128	$0x03, %ymm13, %ymm6, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x03, %ymm7, %ymm3, %ymm3
-	vperm2f128	$0x03, %ymm13, %ymm7, %ymm7
-
-	vperm2f128	$0x01, %ymm8, %ymm8, %ymm8
-
-	vperm2f128	$0x01, %ymm9, %ymm9, %ymm9
-
-	vperm2f128	$0x01, %ymm10, %ymm10, %ymm10
-
-	vperm2f128	$0x01, %ymm11, %ymm11, %ymm11
-
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm14
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vandpd		.LC09(%rip), %ymm14, %ymm12
-	vandpd		.LC06(%rip), %ymm15, %ymm13
-#elif defined(OS_MAC)
-	vandpd		LC09(%rip), %ymm14, %ymm12
-	vandpd		LC06(%rip), %ymm15, %ymm13
-#endif
-
-	vblendpd	$0x3, %ymm14, %ymm15, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vandpd		.LC09(%rip), %ymm15, %ymm15
-#elif defined(OS_MAC)
-	vandpd		LC09(%rip), %ymm15, %ymm15
-#endif
-
-	jmp		3f
-
-2:
-
-	// offset==3
-
-	vmovapd		%ymm0, %ymm13
-	vperm2f128	$0x21, %ymm0, %ymm4, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm4, %ymm0
-	vperm2f128	$0x21, %ymm4, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm4
-
-	vmovapd		%ymm1, %ymm13
-	vperm2f128	$0x21, %ymm1, %ymm5, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm5, %ymm1
-	vperm2f128	$0x21, %ymm5, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm5
-
-	vmovapd		%ymm2, %ymm13
-	vperm2f128	$0x21, %ymm2, %ymm6, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm6, %ymm2
-	vperm2f128	$0x21, %ymm6, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm6
-
-	vmovapd		%ymm3, %ymm13
-	vperm2f128	$0x21, %ymm3, %ymm7, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm7, %ymm3
-	vperm2f128	$0x21, %ymm7, %ymm13, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm13, %ymm7
-
-	vperm2f128	$0x01, %ymm8, %ymm8, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm8, %ymm8
-
-	vperm2f128	$0x01, %ymm9, %ymm9, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm9, %ymm9
-
-	vperm2f128	$0x01, %ymm10, %ymm10, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm10, %ymm10
-
-	vperm2f128	$0x01, %ymm11, %ymm11, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm11, %ymm11
-
-	vperm2f128	$0x01, %ymm14, %ymm14, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm14, %ymm14
-	vperm2f128	$0x01, %ymm15, %ymm15, %ymm12
-	vshufpd		$0x5, %ymm12, %ymm15, %ymm15
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vandpd		.LC10(%rip), %ymm14, %ymm12
-	vandpd		.LC07(%rip), %ymm15, %ymm13
-#elif defined(OS_MAC)
-	vandpd		LC10(%rip), %ymm14, %ymm12
-	vandpd		LC07(%rip), %ymm15, %ymm13
-#endif
-
-	vblendpd	$0x7, %ymm14, %ymm15, %ymm14
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vandpd		.LC10(%rip), %ymm15, %ymm15
-#elif defined(OS_MAC)
-	vandpd		LC10(%rip), %ymm15, %ymm15
-#endif
-
-3:
-
-	vmaskmovpd	%ymm0, %ymm12, 0(%r11)
-	vmaskmovpd	%ymm4, %ymm14, 0(%r11, %r12, 1)
-	vmaskmovpd	%ymm0, %ymm13, 0(%r11, %r12, 2)
-	vmaskmovpd	%ymm1, %ymm12, 32(%r11)
-	vmaskmovpd	%ymm5, %ymm14, 32(%r11, %r12, 1)
-	vmaskmovpd	%ymm1, %ymm13, 32(%r11, %r12, 2)
-	vmaskmovpd	%ymm2, %ymm12, 64(%r11)
-	vmaskmovpd	%ymm6, %ymm14, 64(%r11, %r12, 1)
-	vmaskmovpd	%ymm2, %ymm13, 64(%r11, %r12, 2)
-	vmaskmovpd	%ymm3, %ymm12, 96(%r11)
-	vmaskmovpd	%ymm7, %ymm14, 96(%r11, %r12, 1)
-	vmaskmovpd	%ymm3, %ymm13, 96(%r11, %r12, 2)
-
-	vmaskmovpd	%ymm8, %ymm15, 128(%r11, %r12, 1)
-	vmaskmovpd	%ymm8, %ymm13, 128(%r11, %r12, 2)
-	cmpl		$6, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm9, %ymm15, 160(%r11, %r12, 1)
-	vmaskmovpd	%ymm9, %ymm13, 160(%r11, %r12, 2)
-	cmpl		$7, %r15d
-	jl			4f // end
-	vmaskmovpd	%ymm10, %ymm15, 192(%r11, %r12, 1)
-	vmaskmovpd	%ymm10, %ymm13, 192(%r11, %r12, 2)
-	je			4f // end
-	vmaskmovpd	%ymm11, %ymm15, 224(%r11, %r12, 1)
-	vmaskmovpd	%ymm11, %ymm13, 224(%r11, %r12, 2)
-
-4:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_gen_lib4, .-inner_store_8x8_gen_lib4
-#endif
-#endif
-
-
-
-
-
-//                               1      2              3          4        5          6        7             8          9        10         11
-// void kernel_dgemm_nt_8x8l_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x8l_lib4
-	.type kernel_dgemm_nt_8x8l_lib4, @function
-kernel_dgemm_nt_8x8l_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x8l_lib4
-_kernel_dgemm_nt_8x8l_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x8l_lib4
-	.def kernel_dgemm_nt_8x8l_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x8l_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	movq	ARG6, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // C
-	movq	ARG9, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // D
-	movq	ARG11, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8L_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8l_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8l_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x8l_lib4, .-kernel_dgemm_nt_8x8l_lib4
-#endif
-
-
-
-
-
-//                               1      2              3          4        5          6        7             8          9        10         11
-// void kernel_dgemm_nt_8x8u_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x8u_lib4
-	.type kernel_dgemm_nt_8x8u_lib4, @function
-kernel_dgemm_nt_8x8u_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x8u_lib4
-_kernel_dgemm_nt_8x8u_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x8u_lib4
-	.def kernel_dgemm_nt_8x8u_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x8u_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG5, %r11 // B
-	movq	ARG6, %r12 // sdb
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG3, %r13 // A
-	movq	ARG4, %r14 // sda
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // C
-	movq	ARG9, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_8x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // D
-	movq	ARG11, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8U_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8u_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8u_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x8u_lib4, .-kernel_dgemm_nt_8x8u_lib4
-#endif
-
-
-
-
-
-//                                   1      2              3          4        5          6        7             8          9        10         11       12      13
-// void kernel_dgemm_nt_8x8l_vs_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x8l_vs_lib4
-	.type kernel_dgemm_nt_8x8l_vs_lib4, @function
-kernel_dgemm_nt_8x8l_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x8l_vs_lib4
-_kernel_dgemm_nt_8x8l_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x8l_vs_lib4
-	.def kernel_dgemm_nt_8x8l_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x8l_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	movq	ARG6, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // C
-	movq	ARG9, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // D
-	movq	ARG11, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG12, %r12 // km 
-	movq	ARG13, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8L_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8l_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8l_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x8l_vs_lib4, .-kernel_dgemm_nt_8x8l_vs_lib4
-#endif
-
-
-
-
-
-//                                   1      2              3          4        5          6        7             8          9        10         11       12      13
-// void kernel_dgemm_nt_8x8u_vs_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x8u_vs_lib4
-	.type kernel_dgemm_nt_8x8u_vs_lib4, @function
-kernel_dgemm_nt_8x8u_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x8u_vs_lib4
-_kernel_dgemm_nt_8x8u_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x8u_vs_lib4
-	.def kernel_dgemm_nt_8x8u_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x8u_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG5, %r11 // B
-	movq	ARG6, %r12 // sdb
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG3, %r13 // A
-	movq	ARG4, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // C
-	movq	ARG9, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_8x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // D
-	movq	ARG11, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG12, %r12 // km 
-	movq	ARG13, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8U_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8u_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8u_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x8u_vs_lib4, .-kernel_dgemm_nt_8x8u_vs_lib4
-#endif
-
-
-
-
-
-#if 0
-//                                   1      2              3          4        5          6        7             8         9          10       11        12         13       14      15      16      17
-// void kernel_dgemm_nt_8x8_gen_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, int offC, double *C, int sdc, int offD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_8x8_gen_lib4
-	.type kernel_dgemm_nt_8x8_gen_lib4, @function
-kernel_dgemm_nt_8x8_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_8x8_gen_lib4
-_kernel_dgemm_nt_8x8_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_8x8_gen_lib4
-	.def kernel_dgemm_nt_8x8_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_8x8_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	movq	ARG6, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // C
-	movq	ARG9, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // D
-	movq	ARG11, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_8x8_lib4, .-kernel_dgemm_nt_8x8_lib4
-#endif
-#endif
-
-
-
-
-
-//                               1      2              3          4        5          6        7             8          9        10         11
-// void kernel_dsyrk_nt_8x8_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_8x8_lib4
-	.type kernel_dsyrk_nt_l_8x8_lib4, @function
-kernel_dsyrk_nt_l_8x8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_8x8_lib4
-_kernel_dsyrk_nt_l_8x8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_8x8_lib4
-	.def kernel_dsyrk_nt_l_8x8_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_8x8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	movq	ARG6, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // C
-	movq	ARG9, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // D
-	movq	ARG11, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_8x8_lib4, .-kernel_dsyrk_nt_l_8x8_lib4
-#endif
-
-
-
-
-
-
-//                                  1      2              3          4        5          6        7             8          9        10         11       12      13
-// void kernel_dsyrk_nt_8x8_vs_lib4(int k, double *alpha, double *A, int sda, double *B, int sdb, double *beta, double *C, int sdc, double *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_8x8_vs_lib4
-	.type kernel_dsyrk_nt_l_8x8_vs_lib4, @function
-kernel_dsyrk_nt_l_8x8_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_8x8_vs_lib4
-_kernel_dsyrk_nt_l_8x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_8x8_vs_lib4
-	.def kernel_dsyrk_nt_l_8x8_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_8x8_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG5, %r13 // B
-	movq	ARG6, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // C
-	movq	ARG9, %r13 // sdc
-	sall	$5, %r13d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG10, %r10 // D
-	movq	ARG11, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG12, %r12 // D
-	movq	ARG13, %r13 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_8x8_vs_lib4, .-kernel_dsyrk_nt_l_8x8_vs_lib4
-#endif
-
-
-
-
-
-
-//                                  1      2          3        4          5        6          7        8          9        10
-// void kernel_dpotrf_nt_l_8x8_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_8x8_lib4
-	.type kernel_dpotrf_nt_l_8x8_lib4, @function
-kernel_dpotrf_nt_l_8x8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_8x8_lib4
-_kernel_dpotrf_nt_l_8x8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_8x8_lib4
-	.def kernel_dpotrf_nt_l_8x8_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_8x8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG10, %r10  // inv_diag_D 
-	movl	$8, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x8_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // store address D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_8x8_lib4, .-kernel_dpotrf_nt_l_8x8_lib4
-#endif
-
-
-
-
-
-//                                     1      2          3        4          5        6          7        8          9        10                  11      12
-// void kernel_dpotrf_nt_l_8x8_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_8x8_vs_lib4
-	.type kernel_dpotrf_nt_l_8x8_vs_lib4, @function
-kernel_dpotrf_nt_l_8x8_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_8x8_vs_lib4
-_kernel_dpotrf_nt_l_8x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_8x8_vs_lib4
-	.def kernel_dpotrf_nt_l_8x8_vs_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_8x8_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13 // B
-	movq	ARG5, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG10, %r10  // inv_diag_D 
-	movq	ARG12, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x8_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // store address D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG11, %r12 // km 
-	movq	ARG12, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_8x8_vs_lib4, .-kernel_dpotrf_nt_l_8x8_vs_lib4
-#endif
-
-
-
-
-
-//                                        1       2           3         4           5         6       7           8         9           10        11         12       13         14       15
-// void kernel_dsyrk_dpotrf_nt_l_8x8_lib4(int kp, double *Ap, int sdap, double *Bp, int sdbp, int km, double *Am, int sdam, double *Bm, int sdbm, double *C, int sdc, double *D, int sdd, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x8_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_8x8_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_8x8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_8x8_lib4
-_kernel_dsyrk_dpotrf_nt_l_8x8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x8_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_8x8_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_8x8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11 // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d // 4*sdap*sizeof(double)
-	movq	ARG4, %r13 // Bp
-	movq	ARG5, %r14 // sdbp
-	sall	$5, %r14d // 4*sdbp*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-	movq	ARG6, %r10 // km
-	movq	ARG7, %r11 // Am
-	movq	ARG8, %r12 // sdam
-	sall	$5, %r12d // 4*sdam*sizeof(double)
-	movq	ARG9, %r13 // Bm
-	movq	ARG10, %r14 // sdbm
-	sall	$5, %r14d // 4*sdbm*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG11, %r10 // C
-	movq	ARG12, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG15, %r10  // inv_diag_D 
-	movl	$8, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x8_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG13, %r10 // store address D
-	movq	ARG14, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_8x8_lib4, .-kernel_dsyrk_dpotrf_nt_l_8x8_lib4
-#endif
-
-
-
-
-
-//                                           1       2           3         4           5         6       7           8         9           10        11         12       13         14       15                  16      17
-// void kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int sdbp, int km, double *Am, int sdam, double *Bm, int sdbm, double *C, int sdc, double *D, int sdd, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4
-_kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11 // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d // 4*sdap*sizeof(double)
-	movq	ARG4, %r13 // Bp
-	movq	ARG5, %r14 // sdbp
-	sall	$5, %r14d // 4*sdbp*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG6, %r10 // km
-	movq	ARG7, %r11 // Am
-	movq	ARG8, %r12 // sdam
-	sall	$5, %r12d // 4*sdam*sizeof(double)
-	movq	ARG9, %r13 // Bm
-	movq	ARG10, %r14 // sdbm
-	sall	$5, %r14d // 4*sdbm*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG11, %r10 // C
-	movq	ARG12, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG15, %r10  // inv_diag_D 
-	movq	ARG17, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_8X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_8x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_8x8_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG13, %r10 // store address D
-	movq	ARG14, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG16, %r12 // km 
-	movq	ARG17, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4, .-kernel_dsyrk_dpotrf_nt_l_8x8_vs_lib4
-#endif
-
-
-
-
-
-//                                       1      2          3        4          5        6          7        8          9        10         11       12
-// void kernel_dtrsm_nt_rl_inv_8x8l_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_8x8l_lib4
-	.type kernel_dtrsm_nt_rl_inv_8x8l_lib4, @function
-kernel_dtrsm_nt_rl_inv_8x8l_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_8x8l_lib4
-_kernel_dtrsm_nt_rl_inv_8x8l_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_8x8l_lib4
-	.def kernel_dtrsm_nt_rl_inv_8x8l_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_8x8l_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-	movq	ARG5, %r14
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG6, %r10
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG12, %r12  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X8L_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x8l_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x8l_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // store address D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8L_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8l_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8l_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_8x8l_lib4, .-kernel_dtrsm_nt_rl_inv_8x8l_lib4
-#endif
-
-
-
-
-
-//                                       1      2          3        4          5        6          7        8          9        10         11       12
-// void kernel_dtrsm_nt_rl_inv_8x8u_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_8x8u_lib4
-	.type kernel_dtrsm_nt_rl_inv_8x8u_lib4, @function
-kernel_dtrsm_nt_rl_inv_8x8u_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_8x8u_lib4
-_kernel_dtrsm_nt_rl_inv_8x8u_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_8x8u_lib4
-	.def kernel_dtrsm_nt_rl_inv_8x8u_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_8x8u_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG4, %r11
-	movq	ARG5, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG2, %r13
-	movq	ARG3, %r14
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG6, %r10
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_8x8_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG12, %r12  // inv_diag_E 
-	movq	ARG8, %r13 // D
-	movq	ARG9, %r14 // sdd
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X8U_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x8u_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x8u_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // store address D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8U_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8u_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8u_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_8x8u_lib4, .-kernel_dtrsm_nt_rl_inv_8x8u_lib4
-#endif
-
-
-
-
-
-//                                          1      2          3        4          5        6          7        8          9        10         11       12                  13      14
-// void kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4
-_kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-	movq	ARG5, %r14
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG6, %r10
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG12, %r12  // inv_diag_E 
-	movq	ARG14, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X8L_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // store address D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG13, %r12 // km 
-	movq	ARG14, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8L_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8l_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8l_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4, .-kernel_dtrsm_nt_rl_inv_8x8l_vs_lib4
-#endif
-
-
-
-
-
-//                                          1      2          3        4          5        6          7        8          9        10         11       12                  13      14
-// void kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4(int k, double *A, int sda, double *B, int sdb, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4
-_kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG4, %r11
-	movq	ARG5, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG2, %r13
-	movq	ARG3, %r14
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG6, %r10 // C
-	movq	ARG7, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_8x8_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG10, %r10  // E 
-	movq	ARG11, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG12, %r12  // inv_diag_E 
-	movq	ARG8, %r13 // D
-	movq	ARG9, %r14 // sdd
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-	movq	ARG14, %r15 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X8U_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // store address D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG13, %r12 // km 
-	movq	ARG14, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8U_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8u_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8u_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4, .-kernel_dtrsm_nt_rl_inv_8x8u_vs_lib4
-#endif
-
-
-
-
-
-//                                                1       2           3         4           5         6       7           8          9          10        11         12       13         14       15         16       17                  18      19
-// void kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int sdbp, int km, double *Am, int sdam, double *Bm, int sdbm, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-	movq	ARG5, %r14
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-	movq	ARG6, %r10
-	movq	ARG7, %r11
-	movq	ARG8, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG9, %r13
-	movq	ARG10, %r14
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG11, %r10
-	movq	ARG12, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG15, %r10  // E 
-	movq	ARG16, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG17, %r12  // inv_diag_E 
-	movq	ARG19, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X8L_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x8l_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG13, %r10 // store address D
-	movq	ARG14, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG18, %r12 // km 
-	movq	ARG19, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8L_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8l_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8l_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_8x8l_vs_lib4
-#endif
-
-
-
-
-
-//                                                1       2           3         4           5         6       7           8          9          10        11         12       13         14       15         16       17                  18      19
-// void kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4(int kp, double *Ap, int sdap, double *Bp, int sdbp, int km, double *Am, int sdam, double *Bm, int sdbm, double *C, int sdc, double *D, int sdd, double *E, int sde, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovapd	%ymm0, %ymm8
-	vmovapd	%ymm0, %ymm9
-	vmovapd	%ymm0, %ymm10
-	vmovapd	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG4, %r11
-	movq	ARG5, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG2, %r13
-	movq	ARG3, %r14
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_8x8_lib4
-#endif
-#endif
-
-
-	movq	ARG6, %r10
-	movq	ARG9, %r11
-	movq	ARG10, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG7, %r13
-	movq	ARG8, %r14
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_8x8_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG11, %r10 // C
-	movq	ARG12, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_8X8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_8x8_lib4
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_8x8_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG15, %r10  // E 
-	movq	ARG16, %r11  // sde 
-	sall	$5, %r11d // 4*sde*sizeof(double)
-	movq	ARG17, %r12  // inv_diag_E 
-	movq	ARG13, %r13 // D
-	movq	ARG14, %r14 // sdd
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-	movq	ARG19, %r15 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_8X8U_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_8x8u_vs_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG13, %r10 // store address D
-	movq	ARG14, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG18, %r12 // km 
-	movq	ARG19, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8U_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8u_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8x8u_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_8x8u_vs_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC05: // { 1.0 1.0 1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC05: // { 1.0 1.0 1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC06: // { 1.0 1.0 -1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC06: // { 1.0 1.0 -1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC07: // { 1.0 -1.0 -1.0 -1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC07: // { 1.0 -1.0 -1.0 -1.0 }
-#endif
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	1072693248
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC08: // { -1.0 -1.0 -1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC08: // { -1.0 -1.0 -1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC09: // { -1.0 -1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC09: // { -1.0 -1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-	.long	0
-	.long	-1074790400
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC10: // { -1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC10: // { -1.0 1.0 1.0 1.0 }
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	-1074790400
-
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_dgemv_8_lib4.S b/third_party/blasfeo/kernel/avx2/kernel_dgemv_8_lib4.S
deleted file mode 100644
index 1c9185a..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_dgemv_8_lib4.S
+++ /dev/null
@@ -1,1543 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- x
-// r15   <- dirty
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z0 z1 z2 z3]_b
-// ymm3  <- [z4 z5 z6 z7]_b
-// ymm4  <- [z0 z1 z2 z3]_c
-// ymm5  <- [z4 z5 z6 z7]_c
-// ymm6  <- [z0 z1 z2 z3]_d
-// ymm7  <- [z4 z5 z6 z7]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- x+k*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z0 z1 z2 z3]_b
-// ymm3  <- [z4 z5 z6 z7]_b
-// ymm4  <- [z0 z1 z2 z3]_c
-// ymm5  <- [z4 z5 z6 z7]_c
-// ymm6  <- [z0 z1 z2 z3]_d
-// ymm7  <- [z4 z5 z6 z7]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_N_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_n_8_lib4, @function
-inner_kernel_dgemv_add_n_8_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_n_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_n_8_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_n_8_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	movq	%r11, %r15 // A1 <- A0
-	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vbroadcastsd	0(%r13), %ymm12
-	vmovapd			0(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	
-	subl	$4, %r10d
-
-	vbroadcastsd	8(%r13), %ymm12
-	vmovapd			32(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vmovapd			32(%r15), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	
-	vbroadcastsd	16(%r13), %ymm12
-	vmovapd			64(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm4
-	vmovapd			64(%r15), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm5
-
-	vbroadcastsd	24(%r13), %ymm12
-	addq			$32, %r13
-	vmovapd			96(%r11), %ymm8
-	addq			$128, %r11
-	vfmadd231pd		%ymm8, %ymm12, %ymm6
-	vmovapd			96(%r15), %ymm8
-	addq			$128, %r15
-	vfmadd231pd		%ymm8, %ymm12, %ymm7
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vbroadcastsd	0(%r13), %ymm12
-	vmovapd			0(%r11), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm0, %ymm15, %ymm0
-	vmovapd			0(%r15), %ymm8
-	vmulpd			%ymm8, %ymm12, %ymm15
-	vaddpd			%ymm1, %ymm15, %ymm1
-	
-	addq	$32, %r11
-	addq	$32, %r15
-	addq	$8, %r13
-	
-	subl	$1, %r10d
-	cmpl	$0, %r10d
-
-	jg		0b // clean
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_n_8_lib4, .-inner_kernel_dgemv_add_n_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm4  <- [z4a z4b z4c z4d]
-// ymm5  <- [z5a z5b z5c z5d]
-// ymm6  <- [z6a z6b z6c z6d]
-// ymm7  <- [z7a z7b z7c z7d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x+k*sizeof(double)
-// ymm0  <- [z0a z0b z0c z0d]
-// ymm1  <- [z1a z1b z1c z1d]
-// ymm2  <- [z2a z2b z2c z2d]
-// ymm3  <- [z3a z3b z3c z3d]
-// ymm4  <- [z4a z4b z4c z4d]
-// ymm5  <- [z5a z5b z5c z5d]
-// ymm6  <- [z6a z6b z6c z6d]
-// ymm7  <- [z7a z7b z7c z7d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_T_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_t_8_lib4, @function
-inner_kernel_dgemv_add_t_8_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_t_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_t_8_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_t_8_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$4, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovupd	0(%r13), %ymm12
-
-	vmovapd	0(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	
-	subl	$4, %r10d
-
-	vmovapd	32(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm1
-	
-	vmovapd	64(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm2
-
-	vmovapd	96(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm3
-
-	vmovapd	128(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm4
-	
-	vmovapd	160(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm5
-	
-	vmovapd	192(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm6
-
-	vmovapd	224(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm7
-	
-	addq	%r12, %r11
-	addq	$32, %r13
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vcvtsi2sd	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vsubpd		%ymm14, %ymm13, %ymm14
-
-	vmaskmovpd	0(%r13), %ymm14, %ymm12
-
-	vmovapd	0(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm0
-	
-	vmovapd	32(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm1
-	
-	vmovapd	64(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm2
-
-	vmovapd	96(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm3
-		
-	vmovapd	128(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm4
-	
-	vmovapd	160(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm5
-	
-	vmovapd	192(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm6
-
-	vmovapd	224(%r11), %ymm8
-	vfmadd231pd	%ymm8, %ymm12, %ymm7
-
-	sall	$3, %r10d
-//	movslq	%r10d, %r10
-	addq	%r10, %r11
-	addq	%r10, %r13
-	xorl	%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_t_8_lib4, .-inner_kernel_dgemv_add_t_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- x
-// r15   <- dirty
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z0 z1 z2 z3]_b
-// ymm3  <- [z4 z5 z6 z7]_b
-// ymm4  <- [z0 z1 z2 z3]_c
-// ymm5  <- [z4 z5 z6 z7]_c
-// ymm6  <- [z0 z1 z2 z3]_d
-// ymm7  <- [z4 z5 z6 z7]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- k-4
-// r11   <- A+4*4*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- x+4*sizeof(double)
-// r15   <- dirty
-// ymm0  <- [z0 z1 z2 z3]_a
-// ymm1  <- [z4 z5 z6 z7]_a
-// ymm2  <- [z0 z1 z2 z3]_b
-// ymm3  <- [z4 z5 z6 z7]_b
-// ymm4  <- [z0 z1 z2 z3]_c
-// ymm5  <- [z4 z5 z6 z7]_c
-// ymm6  <- [z0 z1 z2 z3]_d
-// ymm7  <- [z4 z5 z6 z7]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMV_UN_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmv_un_8_lib4, @function
-inner_edge_dtrmv_un_8_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmv_un_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmv_un_8_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmv_un_8_lib4:
-#endif
-#endif
-	
-	movq	%r11, %r15 // A1 <- A0
-	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(double)
-
-	vxorpd			%ymm14, %ymm14, %ymm14
-
-	// first 4 columns
-	vmovapd			0(%r11), %ymm8
-	vblendpd		$0x1, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	0(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	
-	subl			$4, %r10d
-
-	vmovapd			32(%r11), %ymm8
-	vblendpd		$0x3, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	8(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	
-	vmovapd			64(%r11), %ymm8
-	vblendpd		$0x7, %ymm8, %ymm14, %ymm8
-	vbroadcastsd	16(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm4
-
-	vmovapd			96(%r11), %ymm8
-	vbroadcastsd	24(%r13), %ymm12
-	vfmadd231pd		%ymm8, %ymm12, %ymm6
-	
-	addq			$128, %r11
-	addq			$128, %r15
-	addq			$32, %r13
-
-
-
-	// last 4 columns
-	vbroadcastsd	0(%r13), %ymm12
-	vmovapd			0(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm0
-	vmovapd			0(%r15), %ymm8
-	vblendpd		$0x1, %ymm8, %ymm14, %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm1
-	
-	subl			$4, %r10d
-
-	vbroadcastsd	8(%r13), %ymm12
-	vmovapd			32(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm2
-	vmovapd			32(%r15), %ymm8
-	vblendpd		$0x3, %ymm8, %ymm14, %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm3
-	
-	vbroadcastsd	16(%r13), %ymm12
-	vmovapd			64(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm4
-	vmovapd			64(%r15), %ymm8
-	vblendpd		$0x7, %ymm8, %ymm14, %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm5
-
-	vbroadcastsd	24(%r13), %ymm12
-	vmovapd			96(%r11), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm6
-	vmovapd			96(%r15), %ymm8
-	vfmadd231pd		%ymm8, %ymm12, %ymm7
-	
-	addq			$128, %r11
-	addq			$128, %r15
-	addq			$32, %r13
-	
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmv_un_8_lib4, .-inner_edge_dtrmv_un_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==n
-//
-// input arguments:
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z4 z5 z6 z7]_a
-// ymm2 <- [z0 z1 z2 z3]_b
-// ymm3 <- [z4 z5 z6 z7]_b
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_N_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_n_8_lib4, @function
-inner_blend_n_8_lib4:
-#elif defined(OS_MAC)
-_inner_blend_n_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_8_lib4; .scl 2; .type 32; .endef
-inner_blend_n_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm2, %ymm0
-	vaddpd	%ymm1, %ymm3, %ymm1
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_n_8_lib4, .-inner_blend_n_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t
-//
-// input arguments:
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm4 <- [z4a z4b z4c z4d]
-// ymm5 <- [z5a z5b z5c z5d]
-// ymm6 <- [z6a z6b z6c z6d]
-// ymm7 <- [z7a z7b z7c z7d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_8_lib4, @function
-inner_blend_t_8_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_8_lib4; .scl 2; .type 32; .endef
-inner_blend_t_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm5, %ymm4, %ymm4
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vhaddpd	%ymm7, %ymm6, %ymm6
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm3
-	vperm2f128	$0x2, %ymm4, %ymm6, %ymm5
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vperm2f128	$0x13, %ymm4, %ymm6, %ymm4
-	vaddpd	%ymm0, %ymm3, %ymm0
-	vaddpd	%ymm4, %ymm5, %ymm1
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_8_lib4, .-inner_blend_t_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==n, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z4 z5 z6 z7]_a
-// ymm2 <- [z0 z1 z2 z3]_b
-// ymm3 <- [z4 z5 z6 z7]_b
-// ymm4 <- [z0 z1 z2 z3]_c
-// ymm5 <- [z4 z5 z6 z7]_c
-// ymm6 <- [z0 z1 z2 z3]_d
-// ymm7 <- [z4 z5 z6 z7]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_N_SCALE_AB_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_n_scale_ab_8_lib4, @function
-inner_blend_n_scale_ab_8_lib4:
-#elif defined(OS_MAC)
-_inner_blend_n_scale_ab_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_n_scale_ab_8_lib4; .scl 2; .type 32; .endef
-inner_blend_n_scale_ab_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm2, %ymm0
-	vaddpd	%ymm1, %ymm3, %ymm1
-	vaddpd	%ymm4, %ymm6, %ymm4
-	vaddpd	%ymm5, %ymm7, %ymm5
-	vaddpd	%ymm0, %ymm4, %ymm0
-	vaddpd	%ymm1, %ymm5, %ymm1
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-	vmulpd	%ymm1, %ymm15, %ymm1
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-	vmovupd		0(%r12), %ymm14
-	vfmadd231pd	%ymm15, %ymm14, %ymm0
-	vmovupd		32(%r12), %ymm14
-	vfmadd231pd	%ymm15, %ymm14, %ymm1
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_n_scale_ab_8_lib4, .-inner_blend_n_scale_ab_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm4 <- [z4a z4b z4c z4d]
-// ymm5 <- [z5a z5b z5c z5d]
-// ymm6 <- [z6a z6b z6c z6d]
-// ymm7 <- [z7a z7b z7c z7d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_AB_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_ab_8_lib4, @function
-inner_blend_t_scale_ab_8_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_ab_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_ab_8_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_ab_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm5, %ymm4, %ymm4
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vhaddpd	%ymm7, %ymm6, %ymm6
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm3
-	vperm2f128	$0x2, %ymm4, %ymm6, %ymm5
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vperm2f128	$0x13, %ymm4, %ymm6, %ymm4
-	vaddpd	%ymm0, %ymm3, %ymm0
-	vaddpd	%ymm4, %ymm5, %ymm1
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-	vmulpd	%ymm1, %ymm15, %ymm1
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-	vmovupd		0(%r12), %ymm14
-	vfmadd231pd	%ymm15, %ymm14, %ymm0
-	vmovupd		32(%r12), %ymm14
-	vfmadd231pd	%ymm15, %ymm14, %ymm1
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_ab_8_lib4, .-inner_blend_t_scale_ab_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for ta==n
-//
-// input arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0 <- [z0 z1 z2 z3]_a
-// ymm1 <- [z4 z5 z6 z7]_a
-// ymm2 <- [z0 z1 z2 z3]_b
-// ymm3 <- [z4 z5 z6 z7]_b
-// ymm4 <- [z0 z1 z2 z3]_c
-// ymm5 <- [z4 z5 z6 z7]_c
-// ymm6 <- [z0 z1 z2 z3]_d
-// ymm7 <- [z4 z5 z6 z7]_d
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLENDER_N_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blender_n_8_lib4, @function
-inner_blender_n_8_lib4:
-#elif defined(OS_MAC)
-_inner_blender_n_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blender_n_8_lib4; .scl 2; .type 32; .endef
-inner_blender_n_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vaddpd	%ymm0, %ymm2, %ymm0
-	vaddpd	%ymm1, %ymm3, %ymm1
-	vaddpd	%ymm4, %ymm6, %ymm4
-	vaddpd	%ymm5, %ymm7, %ymm5
-	vaddpd	%ymm0, %ymm4, %ymm0
-	vaddpd	%ymm1, %ymm5, %ymm1
-
-	cmpl	$0, %r10d // alg
-	je		0f // return
-
-	cmpl	$1, %r10d // alg
-	jne		1f // alg==-1
-
-	// alg==1
-	vmovupd		0(%r11), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-
-	jmp		0f // return
-
-1:
-
-	// alg==-1
-	vmovupd		0(%r11), %ymm15
-	vsubpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vsubpd		%ymm1, %ymm15, %ymm1
-
-0: // return
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blender_n_8_lib4, .-inner_blender_n_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for ta==t
-//
-// input arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm0 <- [z4a z4b z4c z4d]
-// ymm1 <- [z5a z5b z5c z5d]
-// ymm2 <- [z6a z6b z6c z6d]
-// ymm3 <- [z7a z7b z7c z7d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10d <- alg
-// r11   <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLENDER_T_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blender_t_8_lib4, @function
-inner_blender_t_8_lib4:
-#elif defined(OS_MAC)
-_inner_blender_t_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blender_t_8_lib4; .scl 2; .type 32; .endef
-inner_blender_t_8_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm5, %ymm4, %ymm4
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vhaddpd	%ymm7, %ymm6, %ymm6
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm3
-	vperm2f128	$0x2, %ymm4, %ymm6, %ymm5
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vperm2f128	$0x13, %ymm4, %ymm6, %ymm4
-	vaddpd	%ymm0, %ymm3, %ymm0
-	vaddpd	%ymm4, %ymm5, %ymm1
-
-	cmpl	$0, %r10d // alg
-	je		0f // return
-
-	cmpl	$1, %r10d // alg
-	jne		1f // alg==-1
-
-	// alg==1
-	vmovupd		0(%r11), %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-
-	jmp		0f // return
-
-1:
-
-	// alg==-1
-	vmovupd		0(%r11), %ymm15
-	vsubpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r11), %ymm15
-	vsubpd		%ymm1, %ymm15, %ymm1
-
-0: // return
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blender_t_8_lib4, .-inner_blender_t_8_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store 
-//
-// input arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-//
-// output arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 z6 z7]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8_lib4, @function
-inner_store_8_lib4:
-#elif defined(OS_MAC)
-_inner_store_8_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8_lib4; .scl 2; .type 32; .endef
-inner_store_8_lib4:
-#endif
-#endif
-	
-	vmovupd %ymm0, 0(%r10)
-	vmovupd %ymm1, 32(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8_lib4, .-inner_store_8_lib4
-#endif
-#endif
-
-
-
-
-
-//                            rdi    rsi            rdx        rcx      r8         r9            rsp+8      rsp+16
-// void kernel_dgemv_n_8_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_n_8_lib4
-	.type kernel_dgemv_n_8_lib4, @function
-kernel_dgemv_n_8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_n_8_lib4
-_kernel_dgemv_n_8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_n_8_lib4
-	.def kernel_dgemv_n_8_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_n_8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_8_lib4
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_N_SCALE_AB_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_scale_ab_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_scale_ab_8_lib4
-#endif
-#endif
-
-
-
-	// store
-
-	movq	ARG8, %r10 // z
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_n_8_lib4, .-kernel_dgemv_n_8_lib4
-#endif
-
-
-
-
-
-//                            rdi    rsi           rdx         rcx      r8         r9            rsp+8      rsp+16
-// void kernel_dgemv_t_8_lib4(int k, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_t_8_lib4
-	.type kernel_dgemv_t_8_lib4, @function
-kernel_dgemv_t_8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_t_8_lib4
-_kernel_dgemv_t_8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_t_8_lib4
-	.def kernel_dgemv_t_8_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_t_8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG5, %r13  // x
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_T_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_t_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_t_8_lib4
-#endif
-#endif
-
-
-	// call inner blender t
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11   // beta
-	movq	ARG7, %r12 // y 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_8_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_t_8_lib4, .-kernel_dgemv_t_8_lib4
-#endif
-
-
-
-
-
-//                             rdi    rsi        rdx      rcx        r8
-// void kernel_dtrmv_un_8_lib4(int k, double *A, int sda, double *x, double *z);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmv_un_8_lib4
-	.type kernel_dtrmv_un_8_lib4, @function
-kernel_dtrmv_un_8_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmv_un_8_lib4
-_kernel_dtrmv_un_8_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmv_un_8_lib4
-	.def kernel_dtrmv_un_8_lib4; .scl 2; .type 32; .endef
-kernel_dtrmv_un_8_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dtrmv edge & dgemv kernel n
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11  // A
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG4, %r13  // x
-
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMV_UN_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmv_un_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmv_un_8_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_N_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_n_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_n_8_lib4
-#endif
-#endif
-
-
-	// call inner blender n
-
-#if MACRO_LEVEL>=1
-	INNER_BLENDER_N_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_n_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_n_8_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // z
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_8_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmv_un_8_lib4, .-kernel_dtrmv_un_8_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_dgetrf_pivot_4_lib4.c b/third_party/blasfeo/kernel/avx2/kernel_dgetrf_pivot_4_lib4.c
deleted file mode 100644
index b1329fe..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_dgetrf_pivot_4_lib4.c
+++ /dev/null
@@ -1,1435 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <mmintrin.h>
-#include <xmmintrin.h>  // SSE
-#include <emmintrin.h>  // SSE2
-#include <pmmintrin.h>  // SSE3
-#include <smmintrin.h>  // SSE4
-#include <immintrin.h>  // AVX
-
-#include "../../include/blasfeo_common.h"
-#include "../../include/blasfeo_d_aux.h"
-
-
-
-// C numering (starting from zero) in the ipiv
-void kernel_dgetrf_pivot_4_lib4(int m, double *pA, int sda, double *inv_diag_A, int* ipiv)
-	{
-
-	const int bs = 4;
-
-	// assume m>=4
-	int ma = m-4;
-
-	__m128d
-		max0, max1, msk0, imx0, imx1,
-		inv;
-	
-		
-	__m256d
-		lft, msk,
-		sgn, vna, max, imx, idx,
-		ones,
-		tmp,
-		a_0,
-		b_0, b_1, b_2,
-		scl,
-		c_0,
-		d_0;
-	
-	double
-		dlft;
-
-	sgn = _mm256_set_pd( -0.0, -0.0, -0.0, -0.0 );
-	vna = _mm256_set_pd( 4.0, 4.0, 4.0, 4.0 );
-	lft  = _mm256_set_pd( 3.2, 2.2, 1.2, 0.2 );
-	ones = _mm256_set_pd( 1.0, 1.0, 1.0, 1.0 );
-
-	double
-		tmp0;
-	
-	double
-		*pB;
-	
-	int 
-		k, idamax;
-	
-	int B_pref = bs*sda;
-	
-
-	// first column
-
-	// find pivot
-	pB = &pA[0+bs*0];
-	idx = lft; // _mm256_set_pd( 3.2, 2.2, 1.2, 0.2 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	k = 0;
-	for( ; k<m-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0] );
-//		__builtin_prefetch( pB+2*B_pref );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0] );
-//		__builtin_prefetch( pB+2*B_pref );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for( ; k<m-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0] );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<m)
-		{
-		dlft = m-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		a_0 = _mm256_load_pd( &pB[0] );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_blendv_pd( a_0, sgn, msk );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	ipiv[0] = idamax;
-	if(tmp0!=0.0)
-		{
-		if(ipiv[0]!=0)
-			drowsw_lib(4, pA+0, pA+ipiv[0]/bs*bs*sda+ipiv[0]%bs);
-
-		inv = _mm_loaddup_pd( &pA[0+bs*0] );
-		inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-		scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-		_mm_store_sd( &inv_diag_A[0], inv );
-		}
-	else
-		{
-		scl = ones;
-		inv_diag_A[0] = 0.0;
-		}
-
-
-	// second column
-
-	// scale & correct & find pivot
-	idx = _mm256_set_pd( 2.2, 1.2, 0.2, -0.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	c_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	a_0 = _mm256_blend_pd( tmp, a_0, 0x1 );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	d_0 = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( d_0, c_0, 0x1 );
-	_mm256_store_pd( &pA[0+bs*0], a_0 );
-	_mm256_store_pd( &pA[0+bs*1], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x1 );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	ipiv[1] = idamax+1;
-	if(tmp0!=0)
-		{
-		if(ipiv[1]!=1)
-			drowsw_lib(4, pA+1, pA+ipiv[1]/bs*bs*sda+ipiv[1]%bs);
-
-		inv = _mm_loaddup_pd( &pA[1+bs*1] );
-		inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-		scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-		_mm_store_sd( &inv_diag_A[1], inv );
-		}
-	else
-		{
-		scl = ones;
-		inv_diag_A[1] = 0.0;
-		}
-
-
-	// third column
-
-	// scale & correct & find pivot
-	idx = _mm256_set_pd( 1.2, 0.2, -0.8, -1.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	c_0 = _mm256_load_pd( &pA[0+bs*2] );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( tmp, c_0, 0x1 );
-	a_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_1 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	a_0 = _mm256_blend_pd( tmp, a_0, 0x3 );
-	b_1 = _mm256_permute_pd( b_1, 0xf );
-	tmp = _mm256_mul_pd( a_0, b_1 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( tmp, c_0, 0x3 );
-	_mm256_store_pd( &pA[0+bs*1], a_0 );
-	_mm256_store_pd( &pA[0+bs*2], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x3 );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	ipiv[2] = idamax+2;
-	if(tmp0!=0)
-		{
-		if(ipiv[2]!=2)
-			drowsw_lib(4, pA+2, pA+ipiv[2]/bs*bs*sda+ipiv[2]%bs);
-
-		inv = _mm_loaddup_pd( &pA[2+bs*2] );
-		inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-		scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-		_mm_store_sd( &inv_diag_A[2], inv );
-		}
-	else
-		{
-		scl = ones;
-		inv_diag_A[2] = 0.0;
-		}
-
-
-	// fourth column
-
-	// scale & correct & find pivot
-	idx = _mm256_set_pd( 0.2, -0.8, -1.8, -2.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	c_0 = _mm256_load_pd( &pA[0+bs*3] );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( tmp, c_0, 0x1 );
-	b_1 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_1 = _mm256_permute_pd( b_1, 0xf );
-	a_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, b_1 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( tmp, c_0, 0x3 );
-	a_0 = _mm256_load_pd( &pA[0+bs*2] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_2 = _mm256_permute2f128_pd( c_0, c_0, 0x11 );
-	a_0 = _mm256_blend_pd( tmp, a_0, 0x7 );
-	b_2 = _mm256_permute_pd( b_2, 0x0 );
-	tmp = _mm256_mul_pd( a_0, b_2 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	c_0 = _mm256_blend_pd( tmp, c_0, 0x7 );
-	_mm256_store_pd( &pA[0+bs*2], a_0 );
-	_mm256_store_pd( &pA[0+bs*3], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x7 );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	ipiv[3] = idamax+3;
-	if(tmp0!=0)
-		{
-		if(ipiv[3]!=3)
-			drowsw_lib(4, pA+3, pA+ipiv[3]/bs*bs*sda+ipiv[3]%bs);
-
-		inv = _mm_loaddup_pd( &pA[3+bs*3] );
-		inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-		scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-		_mm_store_sd( &inv_diag_A[3], inv );
-		}
-	else
-		{
-		scl = ones;
-		inv_diag_A[3] = 0.0;
-		}
-
-	// scale
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		tmp = _mm256_mul_pd( c_0, scl );
-		c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-//		pB += B_pref;
-		}
-
-	return;
-
-	}
-
-	
-
-void kernel_dgetrf_pivot_4_vs_lib4(int m, int n, double *pA, int sda, double *inv_diag_A, int* ipiv)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	// assume m>=4
-	int ma = m-4;
-
-	__m128d
-		max0, max1, msk0, imx0, imx1,
-		inv;
-	
-		
-	__m256d
-		lft, msk,
-		sgn, vna, max, imx, idx,
-		ones,
-		tmp,
-		a_0,
-		b_0, b_1, b_2,
-		scl,
-		c_0,
-		d_0;
-	
-	double
-		dlft;
-
-	sgn = _mm256_set_pd( -0.0, -0.0, -0.0, -0.0 );
-	vna = _mm256_set_pd( 4.0, 4.0, 4.0, 4.0 );
-	lft  = _mm256_set_pd( 3.2, 2.2, 1.2, 0.2 );
-	ones = _mm256_set_pd( 1.0, 1.0, 1.0, 1.0 );
-
-	double
-		tmp0;
-	
-	double
-		*pB;
-	
-	int 
-		k, idamax;
-	
-	int B_pref = bs*sda;
-	
-
-	// first column
-
-	// find pivot
-	pB = &pA[0+bs*0];
-	idx = lft; // _mm256_set_pd( 3.2, 2.2, 1.2, 0.2 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	k = 0;
-	for( ; k<m-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0] );
-//		__builtin_prefetch( pB+2*B_pref );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0] );
-//		__builtin_prefetch( pB+2*B_pref );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for( ; k<m-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0] );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<m)
-		{
-		dlft = m-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		a_0 = _mm256_load_pd( &pB[0] );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_blendv_pd( a_0, sgn, msk );
-		a_0 = _mm256_andnot_pd( sgn, a_0 ); // abs
-		msk = _mm256_cmp_pd( a_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, a_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	ipiv[0] = idamax;
-	if(tmp0!=0.0)
-		{
-		if(ipiv[0]!=0)
-			drowsw_lib(4, pA+0, pA+ipiv[0]/bs*bs*sda+ipiv[0]%bs);
-
-		inv = _mm_loaddup_pd( &pA[0+bs*0] );
-		inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-		scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-		_mm_store_sd( &inv_diag_A[0], inv );
-		}
-	else
-		{
-		scl = ones;
-		inv_diag_A[0] = 0.0;
-		}
-	
-	if(n==1)
-		{
-		// scale & return
-		dlft = m;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_load_pd( &pA[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_blend_pd( tmp, a_0, 0x1 );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		_mm256_store_pd( &pA[0+bs*0], a_0 );
-		pB = pA + B_pref;
-		k = 0;
-		for(; k<ma-7; k+=8)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*0] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*0], a_0 );
-			pB += B_pref;
-			a_0 = _mm256_load_pd( &pB[0+bs*0] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*0], a_0 );
-			pB += B_pref;
-			}
-		for(; k<ma-3; k+=4)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*0] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*0], a_0 );
-			pB += B_pref;
-			}
-		if(k<ma)
-			{
-			dlft = ma-k;
-			msk = _mm256_broadcast_sd( &dlft );
-			msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-			a_0 = _mm256_load_pd( &pB[0+bs*0] );
-			tmp = _mm256_mul_pd( a_0, scl );
-			a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-			_mm256_store_pd( &pB[0+bs*0], a_0 );
-	//		pB += B_pref;
-			}
-
-		return;
-		}
-
-
-	// second column
-
-	// scale & correct & find pivot
-	dlft = m;
-	msk = _mm256_broadcast_sd( &dlft );
-	msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-	idx = _mm256_set_pd( 2.2, 1.2, 0.2, -0.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	c_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	tmp = _mm256_blend_pd( tmp, a_0, 0x1 );
-	a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	d_0 = _mm256_sub_pd( c_0, tmp );
-	d_0 = _mm256_blend_pd( d_0, c_0, 0x1 );
-	c_0 = _mm256_blendv_pd( d_0, c_0, msk );
-	_mm256_store_pd( &pA[0+bs*0], a_0 );
-	_mm256_store_pd( &pA[0+bs*1], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x1 );
-	c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk );
-		_mm256_store_pd( &pB[0+bs*0], a_0 );
-		_mm256_store_pd( &pB[0+bs*1], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	if(m>1)
-		{
-		ipiv[1] = idamax+1;
-		if(tmp0!=0)
-			{
-			if(ipiv[1]!=1)
-				drowsw_lib(4, pA+1, pA+ipiv[1]/bs*bs*sda+ipiv[1]%bs);
-
-			inv = _mm_loaddup_pd( &pA[1+bs*1] );
-			inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-			scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-			_mm_store_sd( &inv_diag_A[1], inv );
-			}
-		else
-			{
-			scl = ones;
-			inv_diag_A[1] = 0.0;
-			}
-		}
-
-	if(n==2)
-		{
-		// scale & return
-		dlft = m;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_load_pd( &pA[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_blend_pd( tmp, a_0, 0x3 );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		_mm256_store_pd( &pA[0+bs*1], a_0 );
-		pB = pA + B_pref;
-		k = 0;
-		for(; k<ma-7; k+=8)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*1] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*1], a_0 );
-			pB += B_pref;
-			a_0 = _mm256_load_pd( &pB[0+bs*1] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*1], a_0 );
-			pB += B_pref;
-			}
-		for(; k<ma-3; k+=4)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*1] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*1], a_0 );
-			pB += B_pref;
-			}
-		if(k<ma)
-			{
-			dlft = ma-k;
-			msk = _mm256_broadcast_sd( &dlft );
-			msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-			a_0 = _mm256_load_pd( &pB[0+bs*1] );
-			tmp = _mm256_mul_pd( a_0, scl );
-			a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-			_mm256_store_pd( &pB[0+bs*1], a_0 );
-	//		pB += B_pref;
-			}
-
-		return;
-		}
-
-	// third column
-
-	// scale & correct & find pivot
-	dlft = m;
-	msk = _mm256_broadcast_sd( &dlft );
-	msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-	idx = _mm256_set_pd( 1.2, 0.2, -0.8, -1.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	c_0 = _mm256_load_pd( &pA[0+bs*2] );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	tmp = _mm256_blend_pd( tmp, c_0, 0x1 );
-	c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-	a_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_1 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	tmp = _mm256_blend_pd( tmp, a_0, 0x3 );
-	a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-	b_1 = _mm256_permute_pd( b_1, 0xf );
-	tmp = _mm256_mul_pd( a_0, b_1 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	tmp = _mm256_blend_pd( tmp, c_0, 0x3 );
-	c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-	_mm256_store_pd( &pA[0+bs*1], a_0 );
-	_mm256_store_pd( &pA[0+bs*2], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x3 );
-	c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		_mm256_store_pd( &pB[0+bs*1], a_0 );
-		_mm256_store_pd( &pB[0+bs*2], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	if(m>2)
-		{
-		ipiv[2] = idamax+2;
-		if(tmp0!=0)
-			{
-			if(ipiv[2]!=2)
-				drowsw_lib(4, pA+2, pA+ipiv[2]/bs*bs*sda+ipiv[2]%bs);
-
-			inv = _mm_loaddup_pd( &pA[2+bs*2] );
-			inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-			scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-			_mm_store_sd( &inv_diag_A[2], inv );
-			}
-		else
-			{
-			scl = ones;
-			inv_diag_A[2] = 0.0;
-			}
-		}
-
-	if(n==3)
-		{
-		// scale & return
-		dlft = m;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		a_0 = _mm256_load_pd( &pA[0+bs*2] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_blend_pd( tmp, a_0, 0x7 );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		_mm256_store_pd( &pA[0+bs*2], a_0 );
-		pB = pA + B_pref;
-		k = 0;
-		for(; k<ma-7; k+=8)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*2] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*2], a_0 );
-			pB += B_pref;
-			a_0 = _mm256_load_pd( &pB[0+bs*2] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*2], a_0 );
-			pB += B_pref;
-			}
-		for(; k<ma-3; k+=4)
-			{
-			a_0 = _mm256_load_pd( &pB[0+bs*2] );
-			a_0 = _mm256_mul_pd( a_0, scl );
-			_mm256_store_pd( &pB[0+bs*2], a_0 );
-			pB += B_pref;
-			}
-		if(k<ma)
-			{
-			dlft = ma-k;
-			msk = _mm256_broadcast_sd( &dlft );
-			msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-			a_0 = _mm256_load_pd( &pB[0+bs*2] );
-			tmp = _mm256_mul_pd( a_0, scl );
-			a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-			_mm256_store_pd( &pB[0+bs*2], a_0 );
-	//		pB += B_pref;
-			}
-
-		return;
-		}
-
-	// fourth column
-
-	// scale & correct & find pivot
-	dlft = m;
-	msk = _mm256_broadcast_sd( &dlft );
-	msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-	idx = _mm256_set_pd( 0.2, -0.8, -1.8, -2.8 );
-	max = _mm256_setzero_pd();
-	imx = _mm256_setzero_pd();
-	c_0 = _mm256_load_pd( &pA[0+bs*3] );
-	b_0 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_0 = _mm256_permute_pd( b_0, 0x0 );
-	a_0 = _mm256_load_pd( &pA[0+bs*0] );
-	tmp = _mm256_mul_pd( a_0, b_0 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	tmp = _mm256_blend_pd( tmp, c_0, 0x1 );
-	c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-	b_1 = _mm256_permute2f128_pd( c_0, c_0, 0x00 );
-	b_1 = _mm256_permute_pd( b_1, 0xf );
-	a_0 = _mm256_load_pd( &pA[0+bs*1] );
-	tmp = _mm256_mul_pd( a_0, b_1 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	tmp = _mm256_blend_pd( tmp, c_0, 0x3 );
-	c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-	a_0 = _mm256_load_pd( &pA[0+bs*2] );
-	tmp = _mm256_mul_pd( a_0, scl );
-	b_2 = _mm256_permute2f128_pd( c_0, c_0, 0x11 );
-	tmp = _mm256_blend_pd( tmp, a_0, 0x7 );
-	a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-	b_2 = _mm256_permute_pd( b_2, 0x0 );
-	tmp = _mm256_mul_pd( a_0, b_2 );
-	tmp = _mm256_sub_pd( c_0, tmp );
-	tmp = _mm256_blend_pd( tmp, c_0, 0x7 );
-	c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-	_mm256_store_pd( &pA[0+bs*2], a_0 );
-	_mm256_store_pd( &pA[0+bs*3], c_0 );
-	c_0 = _mm256_blend_pd( c_0, sgn, 0x7 );
-	c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-	c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-	msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-	max = _mm256_blendv_pd( max, c_0, msk );
-	imx = _mm256_blendv_pd( imx, idx, msk );
-	idx = _mm256_add_pd( idx, vna );
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-//		__builtin_prefetch( pB+2*B_pref );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		a_0 = _mm256_mul_pd( a_0, scl );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		c_0 = _mm256_sub_pd( c_0, tmp );
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-		idx = _mm256_add_pd( idx, vna );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		a_0 = _mm256_load_pd( &pB[0+bs*0] );
-		tmp = _mm256_mul_pd( a_0, b_0 );
-		d_0 = _mm256_sub_pd( c_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		a_0 = _mm256_load_pd( &pB[0+bs*1] );
-		tmp = _mm256_mul_pd( a_0, b_1 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		a_0 = _mm256_load_pd( &pB[0+bs*2] );
-		tmp = _mm256_mul_pd( a_0, scl );
-		a_0 = _mm256_blendv_pd( tmp, a_0, msk );
-		tmp = _mm256_mul_pd( a_0, b_2 );
-		d_0 = _mm256_sub_pd( d_0, tmp );
-		c_0 = _mm256_blendv_pd( d_0, c_0, msk);
-		_mm256_store_pd( &pB[0+bs*2], a_0 );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		c_0 = _mm256_blendv_pd( c_0, sgn, msk );
-		c_0 = _mm256_andnot_pd( sgn, c_0 ); // abs
-		msk = _mm256_cmp_pd( c_0, max, 14 ); // >
-		max = _mm256_blendv_pd( max, c_0, msk );
-		imx = _mm256_blendv_pd( imx, idx, msk );
-//		idx = _mm256_add_pd( idx, vna );
-//		pB += B_pref;
-		}
-	max0 = _mm256_extractf128_pd( max, 0x0 );
-	max1 = _mm256_extractf128_pd( max, 0x1 );
-	imx0 = _mm256_extractf128_pd( imx, 0x0 ); // lower indexes in case of identical max value
-	imx1 = _mm256_extractf128_pd( imx, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	max1 = _mm_permute_pd( max0, 0x1 );
-	imx1 = _mm_permute_pd( imx0, 0x1 );
-	msk0 = _mm_cmp_pd( max1, max0, 14 );
-	max0 = _mm_blendv_pd( max0, max1, msk0 );
-	imx0 = _mm_blendv_pd( imx0, imx1, msk0 );
-	_mm_store_sd( &tmp0, max0 );
-	idamax = _mm_cvtsd_si32( imx0 );
-
-	// compute scaling
-	if(m>3)
-		{
-		ipiv[3] = idamax+3;
-		if(tmp0!=0)
-			{
-			if(ipiv[3]!=3)
-				drowsw_lib(4, pA+3, pA+ipiv[3]/bs*bs*sda+ipiv[3]%bs);
-
-			inv = _mm_loaddup_pd( &pA[3+bs*3] );
-			inv = _mm_div_pd( _mm256_castpd256_pd128( ones ), inv );
-			scl = _mm256_permute2f128_pd( _mm256_castpd128_pd256( inv ), _mm256_castpd128_pd256( inv ), 0x00 );
-			_mm_store_sd( &inv_diag_A[3], inv );
-			}
-		else
-			{
-			scl = ones;
-			inv_diag_A[3] = 0.0;
-			}
-		}
-
-	// scale
-	pB = pA + B_pref;
-	k = 0;
-	for(; k<ma-7; k+=8)
-		{
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-//		__builtin_prefetch( pB+2*B_pref+8 );
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-		}
-	for(; k<ma-3; k+=4)
-		{
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		c_0 = _mm256_mul_pd( c_0, scl );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-		pB += B_pref;
-		}
-	if(k<ma)
-		{
-		dlft = ma-k;
-		msk = _mm256_broadcast_sd( &dlft );
-		msk = _mm256_cmp_pd( lft, msk, 14 ); // >
-		c_0 = _mm256_load_pd( &pB[0+bs*3] );
-		tmp = _mm256_mul_pd( c_0, scl );
-		c_0 = _mm256_blendv_pd( tmp, c_0, msk );
-		_mm256_store_pd( &pB[0+bs*3], c_0 );
-//		pB += B_pref;
-		}
-
-	return;
-
-	}
-
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_dsymv_6_lib4.S b/third_party/blasfeo/kernel/avx2/kernel_dsymv_6_lib4.S
deleted file mode 100644
index 7a4411c..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_dsymv_6_lib4.S
+++ /dev/null
@@ -1,996 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t
-// r14   <- z_n
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm4  <- [z_t_4a z_t_4b z_t_4c z_t_4d]
-// ymm5  <- [z_t_5a z_t_5b z_t_5c z_t_5d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- x_n_4
-// ymm11 <- x_n_5
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t+k*sizeof(double)
-// r14   <- z_n+k*sizeof(double)
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm4  <- [z_t_4a z_t_4b z_t_4c z_t_4d]
-// ymm5  <- [z_t_5a z_t_5b z_t_5c z_t_5d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- x_n_4
-// ymm11 <- x_n_5
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMV_ADD_NT_6_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemv_add_nt_6_lib4, @function
-inner_kernel_dgemv_add_nt_6_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemv_add_nt_6_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemv_add_nt_6_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemv_add_nt_6_lib4:
-#endif
-#endif
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$4, %r10d
-	jl		0f // clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	vmovupd	0(%r13), %ymm12
-	vmovupd	0(%r14), %ymm13
-
-	vmovapd	0(%r11), %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm0
-	vfmadd231pd	%ymm14, %ymm6, %ymm13
-	
-	subl	$4, %r10d
-
-	vmovapd	32(%r11), %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm1
-	vfmadd231pd	%ymm14, %ymm7, %ymm13
-	
-	vmovapd	64(%r11), %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm2
-	vfmadd231pd	%ymm14, %ymm8, %ymm13
-
-	vmovapd	96(%r11), %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm3
-	vfmadd231pd	%ymm14, %ymm9, %ymm13
-	
-	vmovapd	128(%r11), %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm4
-	vfmadd231pd	%ymm14, %ymm10, %ymm13
-	
-	vmovapd	160(%r11), %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm5
-	vfmadd231pd	%ymm14, %ymm11, %ymm13
-	
-	vmovupd	%ymm13, 0(%r14) 
-
-	addq	%r12, %r11
-	addq	$32, %r13
-	addq	$32, %r14
-	
-	cmpl	$3, %r10d
-
-	jg		1b // main loop 
-
-
-	// consider clean-up
-	cmpl	$0, %r10d
-	jle		2f // return
-
-0: // clean-up
-	
-	vcvtsi2sd	%r10d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovupd		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovupd		LC02(%rip), %ymm13
-#endif
-	vmovddup	%xmm14, %xmm14
-	vinsertf128	$1, %xmm14, %ymm14, %ymm14
-	vsubpd		%ymm14, %ymm13, %ymm15
-
-	vmaskmovpd	0(%r13), %ymm15, %ymm12
-	vmaskmovpd	0(%r14), %ymm15, %ymm13
-
-	vmaskmovpd	0(%r11), %ymm15, %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm0
-	vfmadd231pd	%ymm14, %ymm6, %ymm13
-	
-	vmaskmovpd	32(%r11), %ymm15, %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm1
-	vfmadd231pd	%ymm14, %ymm7, %ymm13
-	
-	vmaskmovpd	64(%r11), %ymm15, %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm2
-	vfmadd231pd	%ymm14, %ymm8, %ymm13
-
-	vmaskmovpd	96(%r11), %ymm15, %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm3
-	vfmadd231pd	%ymm14, %ymm9, %ymm13
-	
-	vmaskmovpd	128(%r11), %ymm15, %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm4
-	vfmadd231pd	%ymm14, %ymm10, %ymm13
-	
-	vmaskmovpd	160(%r11), %ymm15, %ymm14
-	vfmadd231pd	%ymm14, %ymm12, %ymm5
-	vfmadd231pd	%ymm14, %ymm11, %ymm13
-	
-	vmaskmovpd	%ymm13, %ymm15, 0(%r14)
-
-	sall	$3, %r10d
-	addq	%r10, %r11
-	addq	%r10, %r13
-	addq	%r10, %r14
-	xorl	%r10d, %r10d
-	
-	
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemv_add_nt_6_lib4, .-inner_kernel_dgemv_add_nt_6_lib4
-#endif
-#endif
-
-
-
-
-
-
-#if 0
-
-// TODO
-// common inner routine with file scope
-//
-// input arguments:
-// r10   <- kmax
-// r11   <- A
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t
-// r14   <- z_n
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- kmax-4
-// r11   <- A+4*k*sizeof(double)
-// r12   <- bs*sda*sizeof(double) = 32*sda
-// r13   <- x_t+k*sizeof(double)
-// r14   <- z_n+k*sizeof(double)
-// ymm0  <- [z_t_0a z_t_0b z_t_0c z_t_0d]
-// ymm1  <- [z_t_1a z_t_1b z_t_1c z_t_1d]
-// ymm2  <- [z_t_2a z_t_2b z_t_2c z_t_2d]
-// ymm3  <- [z_t_3a z_t_3b z_t_3c z_t_3d]
-// ymm6  <- x_n_0
-// ymm7  <- x_n_1
-// ymm8  <- x_n_2
-// ymm9  <- x_n_3
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_EDGE_DSYMV_ADD_NT_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dsymv_add_nt_4_lib4, @function
-inner_edge_dsymv_add_nt_4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dsymv_add_nt_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dsymv_add_nt_4_lib4; .scl 2; .type 32; .endef
-inner_edge_dsymv_add_nt_4_lib4:
-#endif
-#endif
-
-	vmovupd		0(%r13), %ymm12
-	vmovupd		0(%r14), %ymm13
-
-	vmovapd		0(%r11), %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm6, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmovapd		32(%r11), %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x1, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x3, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm7, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmovapd		64(%r11), %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x3, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x7, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm8, %ymm15
-	vaddpd		%ymm13, %ymm15, %ymm13
-
-	vmovapd		96(%r11), %ymm14
-	vxorpd		%ymm15, %ymm15, %ymm15
-	vblendpd	$0x7, %ymm15, %ymm14, %ymm14
-	vmulpd		%ymm14, %ymm12, %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-//	vxorpd		%ymm15, %ymm15, %ymm15
-//	vblendpd	$0x0, %ymm14, %ymm15, %ymm14
-//	vmulpd		%ymm14, %ymm9, %ymm15
-//	vaddpd		%ymm13, %ymm15, %ymm13
-	
-	vmovupd		%ymm13, 0(%r14) 
-
-	addq	%r12, %r11
-	addq	$32, %r13
-	addq	$32, %r14
-	
-	subq	$4, %r10
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dsymv_add_nt_4_lib4, .-inner_edge_dsymv_add_nt_4_lib4
-#endif
-#endif
-
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm4 <- [z4a z4b z4c z4d]
-// ymm5 <- [z5a z5b z5c z5d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- beta
-// r12  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- [z4 z5 xx xx]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_AB_6_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_ab_6_lib4, @function
-inner_blend_t_scale_ab_6_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_ab_6_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_ab_6_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_ab_6_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd			%ymm1, %ymm0, %ymm0
-	vhaddpd			%ymm3, %ymm2, %ymm2
-	vhaddpd			%ymm5, %ymm4, %ymm4
-//	vhaddpd			%ymm3, %ymm2, %ymm2
-	vperm2f128		$0x2, %ymm0, %ymm2, %ymm1
-	vperm2f128		$0x13, %ymm0, %ymm2, %ymm0
-	vextractf128	$0x1, %ymm4, %xmm5
-	vaddpd			%ymm0, %ymm1, %ymm0
-	vaddpd			%ymm4, %ymm5, %ymm4
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd			%ymm0, %ymm15, %ymm0
-	vmulpd			%ymm4, %ymm15, %ymm1
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm15
-	vmovupd			0(%r12), %ymm14
-	vmovupd			32(%r12), %ymm13
-	vfmadd231pd		%ymm15, %ymm14, %ymm0
-	vfmadd231pd		%ymm15, %ymm13, %ymm1
-	
-	vxorpd			%ymm15, %ymm15, %ymm15
-	vblendpd		$0x3, %ymm1, %ymm15, %ymm1
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_ab_6_lib4, .-inner_blend_t_scale_ab_6_lib4
-#endif
-#endif
-
-
-
-
-
-#if 0
-
-//TODO
-// common inner routine with file scope
-//
-// blend for ta==t, scale for generic alpha and beta=1.0
-//
-// input arguments:
-// r10  <- alpha
-// r11  <- y
-// ymm0 <- [z0a z0b z0c z0d]
-// ymm1 <- [z1a z1b z1c z1d]
-// ymm2 <- [z2a z2b z2c z2d]
-// ymm3 <- [z3a z3b z3c z3d]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- alpha
-// r11  <- y
-// ymm0 <- [z0 z1 z2 z3]
-// ymm1 <- dirty
-// ymm2 <- dirty
-// ymm3 <- dirty
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_T_SCALE_A1_4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_t_scale_a1_4_lib4, @function
-inner_blend_t_scale_a1_4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_t_scale_a1_4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_t_scale_a1_4_lib4; .scl 2; .type 32; .endef
-inner_blend_t_scale_a1_4_lib4:
-#endif
-#endif
-
-	// reduction
-	vhaddpd	%ymm1, %ymm0, %ymm0
-	vhaddpd	%ymm3, %ymm2, %ymm2
-	vperm2f128	$0x2, %ymm0, %ymm2, %ymm1
-	vperm2f128	$0x13, %ymm0, %ymm2, %ymm0
-	vaddpd	%ymm0, %ymm1, %ymm0
-
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-	vmulpd	%ymm0, %ymm15, %ymm0
-
-	// beta
-	vmovupd		0(%r11), %ymm14
-	vaddpd		%ymm0, %ymm14, %ymm0
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-	
-#if defined(OS_LINUX)
-	.size	inner_blend_t_scale_a1_4_lib4, .-inner_blend_t_scale_a1_4_lib4
-#endif
-#endif
-
-#endif
-
-
-
-
-// common inner routine with file scope
-//
-// store 
-//
-// input arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-//
-// output arguments:
-// r10  <- z
-// ymm0 <- [z0 z1 z2 z3]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_6_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_6_lib4, @function
-inner_store_6_lib4:
-#elif defined(OS_MAC)
-_inner_store_6_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_6_lib4; .scl 2; .type 32; .endef
-inner_store_6_lib4:
-#endif
-#endif
-	
-	vmovupd %ymm0, 0(%r10)
-	vmovupd %xmm1, 32(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_6_lib4, .-inner_store_6_lib4
-#endif
-#endif
-
-
-
-
-
-//                             rdi    rsi              rdx              rcx        r8       r9           rsp+8        rsp+16          rsp+24       rsp_32       rsp_40
-// void kernel_dgemv_nt_6_lib4(int k, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemv_nt_6_lib4
-	.type kernel_dgemv_nt_6_lib4, @function
-kernel_dgemv_nt_6_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemv_nt_6_lib4
-_kernel_dgemv_nt_6_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemv_nt_6_lib4
-	.def kernel_dgemv_nt_6_lib4; .scl 2; .type 32; .endef
-kernel_dgemv_nt_6_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha_n
-	vbroadcastsd 0(%r10), %ymm15
-
-	movq	ARG6, %r10 // x_n
-
-	vbroadcastsd 0(%r10), %ymm6
-	vmulpd		%ymm15, %ymm6, %ymm6
-	vbroadcastsd 8(%r10), %ymm7
-	vmulpd		%ymm15, %ymm7, %ymm7
-	vbroadcastsd 16(%r10), %ymm8
-	vmulpd		%ymm15, %ymm8, %ymm8
-	vbroadcastsd 24(%r10), %ymm9
-	vmulpd		%ymm15, %ymm9, %ymm9
-	vbroadcastsd 32(%r10), %ymm10
-	vmulpd		%ymm15, %ymm10, %ymm10
-	vbroadcastsd 40(%r10), %ymm11
-	vmulpd		%ymm15, %ymm11, %ymm11
-
-
-	// inner kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG5, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG7, %r13  // x_t
-	movq	ARG10, %r14  // z_n
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_NT_6_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_nt_6_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_nt_6_lib4
-#endif
-#endif
-
-
-	// inner blend n scale ab
-
-	movq	ARG3, %r10 // alpha_t
-	movq	ARG8, %r11   // beta_t
-	movq	ARG9, %r12   // y_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_AB_6_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_ab_6_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_ab_6_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG11, %r10 // z_t 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_6_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_6_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_6_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemv_nt_6_lib4, .-kernel_dgemv_nt_6_lib4
-#endif
-
-
-
-
-
-#if 0
-// TODO
-//                            rdi    rsi            rdx        rcx      r8           r9           rsp+8        rsp+16 
-// void kernel_dsymv_l_4_lib4(int k, double *alpha, double *A, int sda, double *x_n, double *x_t, double *z_n, double *z_t);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsymv_l_4_lib4
-	.type kernel_dsymv_l_4_lib4, @function
-kernel_dsymv_l_4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsymv_l_4_lib4
-_kernel_dsymv_l_4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsymv_l_4_lib4
-	.def kernel_dsymv_l_4_lib4; .scl 2; .type 32; .endef
-kernel_dsymv_l_4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers y_t
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-	// initialize x_n
-	movq	ARG2, %r10 // alpha
-	vbroadcastsd 0(%r10), %ymm15
-
-	movq	ARG5, %r10 // x_n
-
-	vbroadcastsd 0(%r10), %ymm6
-	vmulpd		%ymm15, %ymm6, %ymm6
-	vbroadcastsd 8(%r10), %ymm7
-	vmulpd		%ymm15, %ymm7, %ymm7
-	vbroadcastsd 16(%r10), %ymm8
-	vmulpd		%ymm15, %ymm8, %ymm8
-	vbroadcastsd 24(%r10), %ymm9
-	vmulpd		%ymm15, %ymm9, %ymm9
-
-
-	// inner edge dsyrk & kernel dgemv nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-//	movslq	%r12d, %r12
-	movq	ARG6, %r13  // x_t
-	movq	ARG7, %r14  // z_n
-
-#if MACRO_LEVEL>=2
-	INNER_EDGE_DSYMV_ADD_NT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dsymv_add_nt_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dsymv_add_nt_4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMV_ADD_NT_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemv_add_nt_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemv_add_nt_4_lib4
-#endif
-#endif
-
-
-	// call inner blend n scale ab
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11   // z_t
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_T_SCALE_A1_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_t_scale_a1_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_t_scale_a1_4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // z_t 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsymv_l_4_lib4, .-kernel_dsymv_l_4_lib4
-#endif
-
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
-
-
-
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_sgemm_16x4_lib8.S b/third_party/blasfeo/kernel/avx2/kernel_sgemm_16x4_lib8.S
deleted file mode 100644
index 857fb11..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_sgemm_16x4_lib8.S
+++ /dev/null
@@ -1,6811 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nt_16x4_lib8, @function
-inner_kernel_gemm_add_nt_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nt_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nt_16x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nt_16x4_lib8:
-#endif
-#endif
-	
-// broadcast scheme
-#if 1
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovaps 		0(%r11), %ymm13 // A
-	vmovaps 		0(%r11, %r12, 1), %ymm14 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A
-	vbroadcastss	4(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	8(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	12(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastss	32(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	40(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	44(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastss	64(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovapd			-32(%r11), %ymm10 // A
-	vbroadcastss	68(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vmovapd			-32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	72(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	76(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	addq	$128, %r13
-
-	// unroll 0
-	vbroadcastss	-32(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastss	-28(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	-24(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	-20(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A
-	vbroadcastss	4(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	8(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	12(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastss	32(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	40(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	44(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastss	64(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovapd			-32(%r11), %ymm10 // A
-	vbroadcastss	68(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vmovapd			-32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	72(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	76(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	addq	$128, %r13
-
-	// unroll 0
-	vbroadcastss	-32(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastss	-28(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-//	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	-24(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	-20(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // a
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	0(%r13), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vbroadcastss	4(%r13), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	subl	$1, %r10d
-	vbroadcastss	8(%r13), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	addq	$32, %r11
-	vbroadcastss	12(%r13), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	addq	$32, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-// shuffle scheme
-#else
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	movq	%r11, %r15 // A1 <- A0
-	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(float)
-
-	// preload
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vmovaps			0(%r11), %ymm8 // A0
-	vmovaps			0(%r15), %ymm9 // A1
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	vbroadcastf128	32(%r13), %ymm13 // B
-	vfmadd231ps		%ymm8, %ymm14, %ymm0
-	vfmadd231ps		%ymm9, %ymm14, %ymm4
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-
-	subl	$4, %r10d
-	vmovaps			32(%r11), %ymm10 // A0
-	vfmadd231ps		%ymm8, %ymm14, %ymm1
-	vfmadd231ps		%ymm9, %ymm14, %ymm5
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-
-	vmovaps			32(%r15), %ymm11 // A1
-	vfmadd231ps		%ymm8, %ymm14, %ymm2
-	vfmadd231ps		%ymm9, %ymm14, %ymm6
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-
-	vfmadd231ps		%ymm8, %ymm14, %ymm3
-	vfmadd231ps		%ymm9, %ymm14, %ymm7
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-
-
-	// unroll 1
-	vbroadcastf128	64(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm14, %ymm0
-	vfmadd231ps		%ymm11, %ymm14, %ymm4
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-
-	vmovaps			64(%r11), %ymm8 // A0
-	vfmadd231ps		%ymm10, %ymm14, %ymm1
-	vfmadd231ps		%ymm11, %ymm14, %ymm5
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-
-	vmovaps			64(%r15), %ymm9 // A1
-	vfmadd231ps		%ymm10, %ymm14, %ymm2
-	vfmadd231ps		%ymm11, %ymm14, %ymm6
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-
-	vfmadd231ps		%ymm10, %ymm14, %ymm3
-	vfmadd231ps		%ymm11, %ymm14, %ymm7
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-
-
-	// unroll 2
-	vbroadcastf128	96(%r13), %ymm13 // B
-	vfmadd231ps		%ymm8, %ymm14, %ymm0
-	vfmadd231ps		%ymm9, %ymm14, %ymm4
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-
-	addq	$128, %r13
-	vmovaps			96(%r11), %ymm10 // A0
-	vfmadd231ps		%ymm8, %ymm14, %ymm1
-	vfmadd231ps		%ymm9, %ymm14, %ymm5
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-
-	addq	$128, %r11
-	vmovaps			96(%r15), %ymm11 // A1
-	vfmadd231ps		%ymm8, %ymm14, %ymm2
-	vfmadd231ps		%ymm9, %ymm14, %ymm6
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-
-	addq	$128, %r15
-	vfmadd231ps		%ymm8, %ymm14, %ymm3
-	vfmadd231ps		%ymm9, %ymm14, %ymm7
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-
-
-	// unroll 3
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm14, %ymm0
-	vfmadd231ps		%ymm11, %ymm14, %ymm4
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-
-	vmovaps			0(%r11), %ymm8 // A0
-	vfmadd231ps		%ymm10, %ymm14, %ymm1
-	vfmadd231ps		%ymm11, %ymm14, %ymm5
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-
-	vmovaps			0(%r15), %ymm9 // A1
-	vfmadd231ps		%ymm10, %ymm14, %ymm2
-	vfmadd231ps		%ymm11, %ymm14, %ymm6
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-
-	vfmadd231ps		%ymm10, %ymm14, %ymm3
-	vfmadd231ps		%ymm11, %ymm14, %ymm7
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vbroadcastf128	32(%r13), %ymm13 // B
-	vfmadd231ps		%ymm8, %ymm14, %ymm0
-	vfmadd231ps		%ymm9, %ymm14, %ymm4
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-
-	subl	$4, %r10d
-	vmovaps			32(%r11), %ymm10 // A0
-	vfmadd231ps		%ymm8, %ymm14, %ymm1
-	vfmadd231ps		%ymm9, %ymm14, %ymm5
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-
-	vmovaps			32(%r15), %ymm11 // A1
-	vfmadd231ps		%ymm8, %ymm14, %ymm2
-	vfmadd231ps		%ymm9, %ymm14, %ymm6
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-
-	vfmadd231ps		%ymm8, %ymm14, %ymm3
-	vfmadd231ps		%ymm9, %ymm14, %ymm7
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-
-
-	// unroll 1
-	vbroadcastf128	64(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm14, %ymm0
-	vfmadd231ps		%ymm11, %ymm14, %ymm4
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-
-	vmovaps			64(%r11), %ymm8 // A0
-	vfmadd231ps		%ymm10, %ymm14, %ymm1
-	vfmadd231ps		%ymm11, %ymm14, %ymm5
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-
-	vmovaps			64(%r15), %ymm9 // A1
-	vfmadd231ps		%ymm10, %ymm14, %ymm2
-	vfmadd231ps		%ymm11, %ymm14, %ymm6
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-
-	vfmadd231ps		%ymm10, %ymm14, %ymm3
-	vfmadd231ps		%ymm11, %ymm14, %ymm7
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-
-
-	// unroll 2
-	vbroadcastf128	96(%r13), %ymm13 // B
-	vfmadd231ps		%ymm8, %ymm14, %ymm0
-	vfmadd231ps		%ymm9, %ymm14, %ymm4
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-
-	addq	$128, %r13
-	vmovaps			96(%r11), %ymm10 // A0
-	vfmadd231ps		%ymm8, %ymm14, %ymm1
-	vfmadd231ps		%ymm9, %ymm14, %ymm5
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-
-	addq	$128, %r11
-	vmovaps			96(%r15), %ymm11 // A1
-	vfmadd231ps		%ymm8, %ymm14, %ymm2
-	vfmadd231ps		%ymm9, %ymm14, %ymm6
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-
-	addq	$128, %r15
-	vfmadd231ps		%ymm8, %ymm14, %ymm3
-	vfmadd231ps		%ymm9, %ymm14, %ymm7
-	vshufps			$0x00, %ymm13, %ymm13, %ymm14
-
-
-	// unroll 3
-//	vbroadcastf128	0(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm14, %ymm0
-	vfmadd231ps		%ymm11, %ymm14, %ymm4
-	vshufps			$0x55, %ymm13, %ymm13, %ymm14
-
-//	vmovaps			0(%r11), %ymm8 // A0
-	vfmadd231ps		%ymm10, %ymm14, %ymm1
-	vfmadd231ps		%ymm11, %ymm14, %ymm5
-	vshufps			$0xaa, %ymm13, %ymm13, %ymm14
-
-//	vmovaps			0(%r15), %ymm9 // A1
-	vfmadd231ps		%ymm10, %ymm14, %ymm2
-	vfmadd231ps		%ymm11, %ymm14, %ymm6
-	vshufps			$0xff, %ymm13, %ymm13, %ymm14
-
-	vfmadd231ps		%ymm10, %ymm14, %ymm3
-	vfmadd231ps		%ymm11, %ymm14, %ymm7
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-
-
-//	cmpl	$4, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vmovaps			0(%r11), %ymm8 // A0
-	vmovaps			0(%r15), %ymm9 // A1
-	vshufps			$0x00, %ymm12, %ymm12, %ymm14
-	vfmadd231ps		%ymm8, %ymm14, %ymm0
-	vfmadd231ps		%ymm9, %ymm14, %ymm4
-
-	vshufps			$0x55, %ymm12, %ymm12, %ymm14
-	vfmadd231ps		%ymm8, %ymm14, %ymm1
-	vfmadd231ps		%ymm9, %ymm14, %ymm5
-
-	vshufps			$0xaa, %ymm12, %ymm12, %ymm14
-	vfmadd231ps		%ymm8, %ymm14, %ymm2
-	vfmadd231ps		%ymm9, %ymm14, %ymm6
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$32, %r13
-	addq	$32, %r15
-
-	vshufps			$0xff, %ymm12, %ymm12, %ymm14
-	vfmadd231ps		%ymm8, %ymm14, %ymm3
-	vfmadd231ps		%ymm9, %ymm14, %ymm7
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#endif
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nt_16x4_lib8, .-inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_sub_nt_16x4_lib8, @function
-inner_kernel_gemm_sub_nt_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_sub_nt_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_sub_nt_16x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_sub_nt_16x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovaps 		0(%r11), %ymm13 // A
-	vmovaps 		0(%r11, %r12, 1), %ymm14 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A
-	vbroadcastss	4(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	8(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vbroadcastss	12(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastss	32(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm0
-	vfnmadd231ps	%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm1
-	vfnmadd231ps	%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	40(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm2
-	vfnmadd231ps	%ymm11, %ymm12, %ymm6
-	vbroadcastss	44(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm3
-	vfnmadd231ps	%ymm11, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastss	64(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vmovapd			-32(%r11), %ymm10 // A
-	vbroadcastss	68(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vmovapd			-32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	72(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vbroadcastss	76(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	addq	$128, %r13
-
-	// unroll 0
-	vbroadcastss	-32(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm0
-	vfnmadd231ps	%ymm11, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastss	-28(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm1
-	vfnmadd231ps	%ymm11, %ymm12, %ymm5
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	-24(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm2
-	vfnmadd231ps	%ymm11, %ymm12, %ymm6
-	vbroadcastss	-20(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm3
-	vfnmadd231ps	%ymm11, %ymm12, %ymm7
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A
-	vbroadcastss	4(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	8(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vbroadcastss	12(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastss	32(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm0
-	vfnmadd231ps	%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm1
-	vfnmadd231ps	%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	40(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm2
-	vfnmadd231ps	%ymm11, %ymm12, %ymm6
-	vbroadcastss	44(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm3
-	vfnmadd231ps	%ymm11, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastss	64(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vmovapd			-32(%r11), %ymm10 // A
-	vbroadcastss	68(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vmovapd			-32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	72(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vbroadcastss	76(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	addq	$128, %r13
-
-	// unroll 0
-	vbroadcastss	-32(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm0
-	vfnmadd231ps	%ymm11, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastss	-28(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm1
-	vfnmadd231ps	%ymm11, %ymm12, %ymm5
-//	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	-24(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm2
-	vfnmadd231ps	%ymm11, %ymm12, %ymm6
-	vbroadcastss	-20(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm10, %ymm12, %ymm3
-	vfnmadd231ps	%ymm11, %ymm12, %ymm7
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // a
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	0(%r13), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vbroadcastss	4(%r13), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	subl	$1, %r10d
-	vbroadcastss	8(%r13), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	addq	$32, %r11
-	vbroadcastss	12(%r13), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	addq	$32, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_sub_nt_16x4_lib8, .-inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nn_16x4_lib8, @function
-inner_kernel_gemm_add_nn_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nn_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nn_16x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nn_16x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovaps 		0(%r11), %ymm13 // A
-	vmovaps 		0(%r11, %r12, 1), %ymm14 // A
-
-	cmpl	$8, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	0(%r13, %r14, 1) // software prefetch
-	prefetcht0	64(%r13, %r14, 1) // software prefetch
-
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A
-	vbroadcastss	32(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	64(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	96(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	subl	$8, %r10d
-
-	// unroll 1
-	vbroadcastss	4(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	68(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	100(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-
-	// unroll 2
-	vbroadcastss	8(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovapd			96(%r11), %ymm10 // A
-	vbroadcastss	40(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	72(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	104(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-
-	// unroll 3
-	vbroadcastss	12(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			128(%r11), %ymm13 // A
-	vbroadcastss	44(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			128(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	76(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	108(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-
-	// unroll 4
-	vbroadcastss	16(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			160(%r11), %ymm13 // A
-	vbroadcastss	48(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			160(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	80(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	112(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-
-	// unroll 5
-	vbroadcastss	20(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			192(%r11), %ymm13 // A
-	vbroadcastss	52(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			192(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	84(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	116(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-
-	// unroll 6
-	vbroadcastss	24(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			224(%r11), %ymm13 // A
-	vbroadcastss	56(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			224(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	88(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	120(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-	addq	$256, %r11
-
-	// unroll 7
-	vbroadcastss	28(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastss	60(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	92(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	-4(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-	addq	%r14, %r13
-
-	cmpl	$8, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$7, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovapd			32(%r11), %ymm10 // A
-	vbroadcastss	32(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vmovapd			32(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	64(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	96(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	subl	$8, %r10d
-
-	// unroll 1
-	vbroadcastss	4(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	68(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	100(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-
-	// unroll 2
-	vbroadcastss	8(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovapd			96(%r11), %ymm10 // A
-	vbroadcastss	40(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vmovapd			96(%r11, %r12, 1), %ymm11 // A
-	vbroadcastss	72(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	104(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-
-	// unroll 3
-	vbroadcastss	12(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			128(%r11), %ymm13 // A
-	vbroadcastss	44(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			128(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	76(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	108(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-
-	// unroll 4
-	vbroadcastss	16(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			160(%r11), %ymm13 // A
-	vbroadcastss	48(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			160(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	80(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	112(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-
-	// unroll 5
-	vbroadcastss	20(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			192(%r11), %ymm13 // A
-	vbroadcastss	52(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			192(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	84(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	116(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-
-	// unroll 6
-	vbroadcastss	24(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-	vmovapd			224(%r11), %ymm13 // A
-	vbroadcastss	56(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-	vmovapd			224(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	88(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	120(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-	addq	$256, %r11
-
-	// unroll 7
-	vbroadcastss	28(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm0
-	vfmadd231ps		%ymm11, %ymm12, %ymm4
-//	vmovapd			0(%r11), %ymm13 // A
-	vbroadcastss	60(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm1
-	vfmadd231ps		%ymm11, %ymm12, %ymm5
-//	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vbroadcastss	92(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm2
-	vfmadd231ps		%ymm11, %ymm12, %ymm6
-	vbroadcastss	124(%r13), %ymm12 // B
-	vfmadd231ps		%ymm10, %ymm12, %ymm3
-	vfmadd231ps		%ymm11, %ymm12, %ymm7
-	addq	%r14, %r13
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A0
-	vmovaps			0(%r11, %r12, 1), %ymm13 // A1
-	vbroadcastss	0(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vbroadcastss	32(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vbroadcastss	64(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vbroadcastss	96(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$4, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nn_16x4_lib8, .-inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_gemm_add_nn_16x4_lib8, @function
-inner_edge_gemm_add_nn_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_gemm_add_nn_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_gemm_add_nn_16x4_lib8; .scl 2; .type 32; .endef
-inner_edge_gemm_add_nn_16x4_lib8:
-#endif
-#endif
-	
-	cmpl			$0, %r15d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$8, %ebx
-	subl			%r15d, %ebx // 8-offsetB
-	cmpl			%r10d, %ebx
-//	jle				0f
-//	movl			%r10d, %ebx // kend=min(k,8-offsetB)
-//0:
-	cmovgl			%r10d, %ebx // kend=min(k,8-offsetB)
-
-	movl			%r15d, %eax
-	sall			$2, %eax // offsetB*sizeof(float)
-	addq			%rax, %r13 // B+offsetB*sizeof(float)
-
-1:
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A0
-	vmovaps			0(%r11, %r12, 1), %ymm13 // A1
-	vbroadcastss	0(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vbroadcastss	32(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vbroadcastss	64(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vbroadcastss	96(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-
-	subl			$1, %r10d // k-1
-	subl			$1, %ebx // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$4, %r13 // B+1*sizeof(float)
-
-	cmpl			$0, %ebx
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r14, %r13
-	subq			$32, %r13 // B+bs*(sdb-1)*sizeof(float)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_gemm_add_nn_16x4_lib8, .-inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRMM_NN_RL_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trmm_nn_rl_16x4_lib8, @function
-inner_edge_trmm_nn_rl_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trmm_nn_rl_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trmm_nn_rl_16x4_lib8; .scl 2; .type 32; .endef
-inner_edge_trmm_nn_rl_16x4_lib8:
-#endif
-#endif
-	
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	movl		%r15d, %eax
-	sall		$2, %eax // offsetB*sizeof(float)
-	movq		%r13, %rbx // B
-	addq		%rax, %rbx // B+offsetB*sizeof(float)
-
-
-	cmpl	$4, %r15d
-	jg		1f
-
-	// offB==0, 1, 2, 3, 4
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	4(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-	vbroadcastss	36(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm1
-	vfmadd231ps		%ymm9, %ymm12, %ymm5
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	8(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-	vbroadcastss	40(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm1
-	vfmadd231ps		%ymm9, %ymm12, %ymm5
-	vbroadcastss	72(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm2
-	vfmadd231ps		%ymm9, %ymm12, %ymm6
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	jmp			0f // end
-
-
-1:
-	cmpl	$5, %r15d
-	jg		1f
-
-	// offB==5
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	4(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-	vbroadcastss	36(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm1
-	vfmadd231ps		%ymm9, %ymm12, %ymm5
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	8(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-	vbroadcastss	40(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm1
-	vfmadd231ps		%ymm9, %ymm12, %ymm5
-	vbroadcastss	72(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm2
-	vfmadd231ps		%ymm9, %ymm12, %ymm6
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r14, %r13 // B+8*sdb*sizeof(float)
-	movl		$0, %r15d // offsetB=0
-
-	jmp			0f // end
-
-
-1:
-	cmpl	$6, %r15d
-	jg		1f
-
-	// offB==6
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	4(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-	vbroadcastss	36(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm1
-	vfmadd231ps		%ymm9, %ymm12, %ymm5
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r14, %r13 // B+8*sdb*sizeof(float)
-	movq		%r13, %rbx // B
-	movl		$0, %r15d // offsetB=0
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-	vbroadcastss	32(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm1
-	vfmadd231ps		%ymm9, %ymm12, %ymm5
-	vbroadcastss	64(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm2
-	vfmadd231ps		%ymm9, %ymm12, %ymm6
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	jmp			0f // end
-
-
-1:
-//	cmpl	$7, %r15d
-//	jg		0f
-
-	// offB==6
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r14, %r13 // B+8*sdb*sizeof(float)
-	movq		%r13, %rbx // B
-	movl		$0, %r15d // offsetB=0
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-	vbroadcastss	32(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm1
-	vfmadd231ps		%ymm9, %ymm12, %ymm5
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vmovaps			0(%r11, %r12, 1), %ymm9
-	vbroadcastss	4(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm0
-	vfmadd231ps		%ymm9, %ymm12, %ymm4
-	vbroadcastss	36(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm1
-	vfmadd231ps		%ymm9, %ymm12, %ymm5
-	vbroadcastss	68(%rbx), %ymm12
-	vfmadd231ps		%ymm8, %ymm12, %ymm2
-	vfmadd231ps		%ymm9, %ymm12, %ymm6
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-//	jmp			0f // end
-
-
-	// end
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trmm_nn_rl_16x4_lib8, .-inner_edge_trmm_nn_rl_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// strsm
-// right
-// lower
-// transposed
-// not-unit
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSM_RLT_INV_16X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsm_rlt_inv_16x4_vs_lib8, @function
-inner_edge_trsm_rlt_inv_16x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsm_rlt_inv_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsm_rlt_inv_16x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsm_rlt_inv_16x4_vs_lib8:
-#endif
-#endif
-
-	vbroadcastss	0(%r11), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vmulps			%ymm4, %ymm13, %ymm4
-	cmpl			$2, %r12d
-	jl				0f // ret
-	vbroadcastss	4(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm1
-	vfnmadd231ps	%ymm4, %ymm13, %ymm5
-	vbroadcastss	8(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm2
-	vfnmadd231ps	%ymm4, %ymm13, %ymm6
-	vbroadcastss	12(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm3
-	vfnmadd231ps	%ymm4, %ymm13, %ymm7
-
-	vbroadcastss	4(%r11), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vmulps			%ymm5, %ymm13, %ymm5
-	cmpl			$3, %r12d
-	jl				0f // ret
-	vbroadcastss	40(%r10), %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm2
-	vfnmadd231ps	%ymm5, %ymm13, %ymm6
-	vbroadcastss	44(%r10), %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm3
-	vfnmadd231ps	%ymm5, %ymm13, %ymm7
-
-	vbroadcastss	8(%r11), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vmulps			%ymm6, %ymm13, %ymm6
-	cmpl			$4, %r12d
-	jl				0f // ret
-	vbroadcastss	76(%r10), %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm3
-	vfnmadd231ps	%ymm6, %ymm13, %ymm7
-
-	vbroadcastss	12(%r11), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vmulps			%ymm7, %ymm13, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsm_rlt_inv_16x4_vs_lib8, .-inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization gen
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_16X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_16x4_vs_lib8, @function
-inner_edge_potrf_16x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_16x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_16x4_vs_lib8:
-#endif
-#endif
-
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vmovss			%xmm0, %xmm0, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-2:
-	vmovss			%xmm13, 0(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vmulps			%ymm4, %ymm13, %ymm4
-	cmpl		$2, %r11d
-	jl			0f // ret
-	vperm2f128		$0x00, %ymm0, %ymm0, %ymm11
-	vpermilps		$0x55, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm1
-	vfnmadd231ps	%ymm4, %ymm13, %ymm5
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm2
-	vfnmadd231ps	%ymm4, %ymm13, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm3
-	vfnmadd231ps	%ymm4, %ymm13, %ymm7
-
-
-	vpermilps		$0x55, %xmm1, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-4:
-	vmovss			%xmm13, 4(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vmulps			%ymm5, %ymm13, %ymm5
-	cmpl		$3, %r11d
-	jl			0f // ret
-	vperm2f128		$0x00, %ymm1, %ymm1, %ymm11
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm2
-	vfnmadd231ps	%ymm5, %ymm13, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm3
-	vfnmadd231ps	%ymm5, %ymm13, %ymm7
-
-
-	vpermilps		$0xaa, %xmm2, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-6:
-	vmovss			%xmm13, 8(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vmulps			%ymm6, %ymm13, %ymm6
-	cmpl		$4, %r11d
-	jl			0f // ret
-	vperm2f128		$0x00, %ymm2, %ymm2, %ymm11
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm3
-	vfnmadd231ps	%ymm6, %ymm13, %ymm7
-
-
-	vpermilps		$0xff, %xmm3, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-8:
-	vmovsd			%xmm13, 12(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vmulps			%ymm7, %ymm13, %ymm7
-
-	jmp		0f
-
-
-1:
-	vxorps			%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_16x4_vs_lib8, .-inner_edge_potrf_16x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization gen
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_12X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_12x4_vs_lib8, @function
-inner_edge_potrf_12x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_12x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_12x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_12x4_vs_lib8:
-#endif
-#endif
-
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vextractf128	$0x1, %ymm0, %xmm13
-//	vpermilps		$0x00, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-2:
-	vmovss			%xmm13, 0(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vmulps			%ymm4, %ymm13, %ymm4
-	cmpl		$2, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm11
-	vpermilps		$0x55, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm1
-	vfnmadd231ps	%ymm4, %ymm13, %ymm5
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm2
-	vfnmadd231ps	%ymm4, %ymm13, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm3
-	vfnmadd231ps	%ymm4, %ymm13, %ymm7
-
-
-	vextractf128	$0x1, %ymm1, %xmm13
-	vpermilps		$0x55, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-4:
-	vmovss			%xmm13, 4(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vmulps			%ymm5, %ymm13, %ymm5
-	cmpl		$3, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm11
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm2
-	vfnmadd231ps	%ymm5, %ymm13, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm3
-	vfnmadd231ps	%ymm5, %ymm13, %ymm7
-
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vpermilps		$0xaa, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-6:
-	vmovss			%xmm13, 8(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vmulps			%ymm6, %ymm13, %ymm6
-	cmpl		$4, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm11
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm3
-	vfnmadd231ps	%ymm6, %ymm13, %ymm7
-
-
-	vextractf128	$0x1, %ymm3, %xmm13
-	vpermilps		$0xff, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-8:
-	vmovsd			%xmm13, 12(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vmulps			%ymm7, %ymm13, %ymm7
-
-	jmp		0f
-
-
-1:
-	vxorps			%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_12x4_vs_lib8, .-inner_edge_potrf_12x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_16x4_lib8, @function
-inner_scale_ab_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_16x4_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_16x4_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	movq	%r12, %r15 // C1 <- C0
-	addq	%r13, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	vmovaps		0(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-	vmovaps		0(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm4
-	vmovaps		32(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm5
-	vmovaps		64(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm6
-	vmovaps		96(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_16x4_lib8, .-inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_16X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_16x4_gen_lib8, @function
-inner_scale_ab_16x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_16x4_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_16x4_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	movq	%r13, %rax // C1 <- C0
-	addq	%r14, %rax // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm0
-	vmovaps		32(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm1
-	vmovaps		64(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm2
-	vmovaps		96(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm3
-
-	vmovaps		0(%rax), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm4
-	vmovaps		32(%rax), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm5
-	vmovaps		64(%rax), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm6
-	vmovaps		96(%rax), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%rax, %rbx // C1
-	addq	%r14, %rbx // C2 <- C1 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_16x4_gen_lib8, .-inner_scale_ab_16x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0
-//
-// input arguments:
-// r10   <- alpha
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_16x4_lib8, @function
-inner_scale_a0_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_a0_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_16x4_lib8; .scl 2; .type 32; .endef
-inner_scale_a0_16x4_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_16x4_lib8, .-inner_scale_a0_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_16x4_lib8, @function
-inner_scale_11_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_11_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_16x4_lib8; .scl 2; .type 32; .endef
-inner_scale_11_16x4_lib8:
-#endif
-#endif
-	
-	movq	%r10, %r15 // C1 <- C0
-	addq	%r11, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	vmovaps		0(%r10), %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r10), %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r10), %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r10), %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-
-	vmovaps		0(%r15), %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	vmovaps		32(%r15), %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	vmovaps		64(%r15), %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	vmovaps		96(%r15), %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_16x4_lib8, .-inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_16X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_16x4_gen_lib8, @function
-inner_scale_11_16x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_11_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_16x4_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_11_16x4_gen_lib8:
-#endif
-#endif
-	
-	movq	%r11, %rax // C1 <- C0
-	addq	%r12, %rax // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r11), %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r11), %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r11), %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r11), %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-
-	vmovaps		0(%rax), %ymm14
-	vaddps		%ymm4, %ymm14, %ymm4
-	vmovaps		32(%rax), %ymm14
-	vaddps		%ymm5, %ymm14, %ymm5
-	vmovaps		64(%rax), %ymm14
-	vaddps		%ymm6, %ymm14, %ymm6
-	vmovaps		96(%rax), %ymm14
-	vaddps		%ymm7, %ymm14, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%rax, %rbx // C1
-	addq	%r12, %rbx // C2 <- C1 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_16x4_gen_lib8, .-inner_scale_11_16x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_16x4_lib8, @function
-inner_store_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_16x4_lib8; .scl 2; .type 32; .endef
-inner_store_16x4_lib8:
-#endif
-#endif
-	
-	movq	%r10, %r15 // D1 <- D0
-	addq	%r11, %r15 // D1 <- D0 + 4*sdd*sizeof(double)
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		%ymm3, 96(%r10)
-
-	vmovaps 	%ymm4,  0(%r15)
-	vmovaps 	%ymm5, 32(%r15)
-	vmovaps 	%ymm6, 64(%r15)
-	vmovaps 	%ymm7, 96(%r15)
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_16x4_lib8, .-inner_store_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_16X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_16x4_vs_lib8, @function
-inner_store_16x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_16x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_16x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	vmovaps		%ymm0, 0(%r10)
-	vmaskmovps	%ymm4, %ymm15, 0(%r10, %r11, 1)
-	cmpl		$2, %r13d
-	jl			7f // end
-	vmovaps		%ymm1, 32(%r10)
-	vmaskmovps	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	cmpl		$3, %r13d
-	jl			7f // end
-	vmovaps		%ymm2, 64(%r10)
-	vmaskmovps	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	je			7f // end
-	vmovaps		%ymm3, 96(%r10)
-	vmaskmovps	%ymm7, %ymm15, 96(%r10, %r11, 1)
-	//
-	jmp		0f
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_16x4_vs_lib8, .-inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_16X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_16x4_gen_lib8, @function
-inner_store_16x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_16x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_16x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute D1
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(float)
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	cmpl		$2, %r15d
-	vmaskmovps	%ymm0, %ymm14,  0(%r11)
-	vmaskmovps	%ymm4, %ymm15,  0(%rbx)
-	jl			7f // end
-	cmpl		$3, %r15d
-	vmaskmovps	%ymm1, %ymm14, 32(%r11)
-	vmaskmovps	%ymm5, %ymm15, 32(%rbx)
-	jl			7f // end
-	vmaskmovps	%ymm2, %ymm14, 64(%r11)
-	vmaskmovps	%ymm6, %ymm15, 64(%rbx)
-	je			7f // end
-	vmaskmovps	%ymm3, %ymm14, 96(%r11)
-	vmaskmovps	%ymm7, %ymm15, 96(%rbx)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbp // D1
-	addq	%r12, %rbp // D2 <- D1 + 4*sdd*sizeof(float)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_16x4_gen_lib8, .-inner_store_16x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_16X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_16x4_lib8, @function
-inner_store_l_16x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_16x4_lib8; .scl 2; .type 32; .endef
-inner_store_l_16x4_lib8:
-#endif
-#endif
-	
-	vmovaps		32(%r10), %ymm12
-	vmovaps		64(%r10), %ymm13
-	vmovaps		96(%r10), %ymm14
-
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vblendps	$0x03, %ymm13, %ymm2, %ymm2
-	vblendps	$0x07, %ymm14, %ymm3, %ymm3
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		%ymm3, 96(%r10)
-
-	vmovaps 	%ymm4,  0(%r10, %r11, 1)
-	vmovaps 	%ymm5, 32(%r10, %r11, 1)
-	vmovaps 	%ymm6, 64(%r10, %r11, 1)
-	vmovaps 	%ymm7, 96(%r10, %r11, 1)
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_16x4_lib8, .-inner_store_l_16x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_16X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_16x4_vs_lib8, @function
-inner_store_l_16x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_16x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_l_16x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	vmovaps		%ymm0, 0(%r10)
-	vmaskmovps	%ymm4, %ymm15, 0(%r10, %r11, 1)
-	cmpl		$2, %r13d
-	jl			0f // end
-	vmovaps		32(%r10), %ymm12
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vmovaps		%ymm1, 32(%r10)
-	vmaskmovps	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	cmpl		$3, %r13d
-	jl			0f // end
-	vmovaps		64(%r10), %ymm12
-	vblendps	$0x03, %ymm12, %ymm2, %ymm2
-	vmovaps		%ymm2, 64(%r10)
-	vmaskmovps	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	je			0f // end
-	vmovaps		96(%r10), %ymm12
-	vblendps	$0x07, %ymm12, %ymm3, %ymm3
-	vmovaps		%ymm3, 96(%r10)
-	vmaskmovps	%ymm7, %ymm15, 96(%r10, %r11, 1)
-	//
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_16x4_vs_lib8, .-inner_store_l_16x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_16X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_16x4_gen_lib8, @function
-inner_store_l_16x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_16x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_l_16x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm14,  0(%r11)
-	vmaskmovps	%ymm4, %ymm15,  0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmovaps		32(%r11), %ymm12
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm14, 32(%r11)
-	vmaskmovps	%ymm5, %ymm15, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmovaps		64(%r11), %ymm12
-	vblendps	$0x03, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm14, 64(%r11)
-	vmaskmovps	%ymm6, %ymm15, 64(%r11, %r12, 1)
-	je			7f // end
-	vmovaps		96(%r11), %ymm12
-	vblendps	$0x07, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm14, 96(%r11)
-	vmaskmovps	%ymm7, %ymm15, 96(%r11, %r12, 1)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_16x4_gen_lib8, .-inner_store_l_16x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_12X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_12x4_lib8, @function
-inner_store_l_12x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_12x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_12x4_lib8; .scl 2; .type 32; .endef
-inner_store_l_12x4_lib8:
-#endif
-#endif
-	
-	vmovaps		0(%r10), %ymm12
-	vmovaps		32(%r10), %ymm13
-	vmovaps		64(%r10), %ymm14
-	vmovaps		96(%r10), %ymm15
-
-	vblendps	$0x0f, %ymm12, %ymm0, %ymm0
-	vblendps	$0x1f, %ymm13, %ymm1, %ymm1
-	vblendps	$0x3f, %ymm14, %ymm2, %ymm2
-	vblendps	$0x7f, %ymm15, %ymm3, %ymm3
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		%ymm3, 96(%r10)
-
-	vmovaps 	%ymm4,  0(%r10, %r11, 1)
-	vmovaps 	%ymm5, 32(%r10, %r11, 1)
-	vmovaps 	%ymm6, 64(%r10, %r11, 1)
-	vmovaps 	%ymm7, 96(%r10, %r11, 1)
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_12x4_lib8, .-inner_store_l_12x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_12X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_12x4_vs_lib8, @function
-inner_store_l_12x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_12x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_12x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_l_12x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	vmovaps		0(%r10), %ymm12
-	vblendps	$0x0f, %ymm12, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r10)
-	vmaskmovps	%ymm4, %ymm15, 0(%r10, %r11, 1)
-	cmpl		$2, %r13d
-	jl			0f // end
-	vmovaps		32(%r10), %ymm12
-	vblendps	$0x1f, %ymm12, %ymm1, %ymm1
-	vmovaps		%ymm1, 32(%r10)
-	vmaskmovps	%ymm5, %ymm15, 32(%r10, %r11, 1)
-	cmpl		$3, %r13d
-	jl			0f // end
-	vmovaps		64(%r10), %ymm12
-	vblendps	$0x3f, %ymm12, %ymm2, %ymm2
-	vmovaps		%ymm2, 64(%r10)
-	vmaskmovps	%ymm6, %ymm15, 64(%r10, %r11, 1)
-	je			0f // end
-	vmovaps		96(%r10), %ymm12
-	vblendps	$0x7f, %ymm12, %ymm3, %ymm3
-	vmovaps		%ymm3, 96(%r10)
-	vmaskmovps	%ymm7, %ymm15, 96(%r10, %r11, 1)
-	//
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_12x4_vs_lib8, .-inner_store_l_12x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_12X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_12x4_gen_lib8, @function
-inner_store_l_12x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_12x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_12x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_l_12x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-	vmovups		.LC01(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-	vmovups		LC01(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmovaps		0(%r11), %ymm12
-	vblendps	$0x0f, %ymm12, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm14,  0(%r11)
-	vmaskmovps	%ymm4, %ymm15,  0(%r11, %r12, 1)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmovaps		32(%r11), %ymm12
-	vblendps	$0x1f, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm14, 32(%r11)
-	vmaskmovps	%ymm5, %ymm15, 32(%r11, %r12, 1)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmovaps		64(%r11), %ymm12
-	vblendps	$0x3f, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm14, 64(%r11)
-	vmaskmovps	%ymm6, %ymm15, 64(%r11, %r12, 1)
-	je			7f // end
-	vmovaps		96(%r11), %ymm12
-	vblendps	$0x7f, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm14, 96(%r11)
-	vmaskmovps	%ymm7, %ymm15, 96(%r11, %r12, 1)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_12x4_gen_lib8, .-inner_store_l_12x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                                1      2             3         4        5         6            7         8        9         10
-// void kernel_sgemm_nt_16x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_16x4_lib8
-	.type kernel_sgemm_nt_16x4_lib8, @function
-kernel_sgemm_nt_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_16x4_lib8
-_kernel_sgemm_nt_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_16x4_lib8
-	.def kernel_sgemm_nt_16x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_16x4_lib8, .-kernel_sgemm_nt_16x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5         6            7         8        9         10       12      13
-// void kernel_sgemm_nt_16x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_16x4_vs_lib8
-	.type kernel_sgemm_nt_16x4_vs_lib8, @function
-kernel_sgemm_nt_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_16x4_vs_lib8
-_kernel_sgemm_nt_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_16x4_vs_lib8
-	.def kernel_sgemm_nt_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_16x4_vs_lib8, .-kernel_sgemm_nt_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                    rdi    rsi           rdx       rcx      r8        r9           rsp+8        rsp+16    rsp+24   rsp+32       rsp+40    rsp+48   rsp+56  rsp+64  rsp+72  rsp+80
-// void kernel_sgemm_nt_16x4_gen_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_16x4_gen_lib8
-	.type kernel_sgemm_nt_16x4_gen_lib8, @function
-kernel_sgemm_nt_16x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_16x4_gen_lib8
-_kernel_sgemm_nt_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_16x4_gen_lib8
-	.def kernel_sgemm_nt_16x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_16x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // offsetC
-	movq	ARG8, %r13 // C
-	movq	ARG9, %r14 // sdc
-	sall	$5, %r14d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_16x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG10, %r10 // offsetD
-	movq	ARG11, %r11 // D
-	movq	ARG12, %r12 // sdd
-	sall	$5, %r12d // 8*sdb*sizeof(float)
-	movq	ARG13, %r13 // m0
-	movq	ARG14, %r14 // m1
-	movq	ARG15, %r15 // n0
-	movq	ARG16, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_16x4_gen_lib8, .-kernel_sgemm_nt_16x4_gen_lib8
-#endif
-
-
-
-
-
-//                                1      2             3         4        5            6         7        8            9         10       11        12
-// void kernel_sgemm_nn_16x4_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_16x4_lib8
-	.type kernel_sgemm_nn_16x4_lib8, @function
-kernel_sgemm_nn_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_16x4_lib8
-_kernel_sgemm_nn_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_16x4_lib8
-	.def kernel_sgemm_nn_16x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12   // C
-	movq	ARG10, %r13   // sdc
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_16x4_lib8, .-kernel_sgemm_nn_16x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5            6         7        8            9         10       11        12       13      14
-// void kernel_sgemm_nn_16x4_vs_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_16x4_vs_lib8
-	.type kernel_sgemm_nn_16x4_vs_lib8, @function
-kernel_sgemm_nn_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_16x4_vs_lib8
-_kernel_sgemm_nn_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_16x4_vs_lib8
-	.def kernel_sgemm_nn_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12   // C
-	movq	ARG10, %r13   // sdc
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG13, %r12 // km
-	movq	ARG14, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_16x4_vs_lib8, .-kernel_sgemm_nn_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                    rdi    rsi           rdx       rcx      r8        r9        rsp+8    rsp+16       rsp+24    rsp+32    rsp+40   rsp+48    rsp+56    rsp+64   rsp+72  rsp+80  rsp+88  rsp+96
-// void kernel_sgemm_nn_16x4_gen_lib8(int k, float *alpha, float *A, int sda, int offB, float *B, int sdb, float *beta, int offC, float *C, int sdc, int offD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_16x4_gen_lib8
-	.type kernel_sgemm_nn_16x4_gen_lib8, @function
-kernel_sgemm_nn_16x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_16x4_gen_lib8
-_kernel_sgemm_nn_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_16x4_gen_lib8
-	.def kernel_sgemm_nn_16x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_16x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12 // offsetC
-	movq	ARG10, %r13 // C
-	movq	ARG11, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_16x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG12, %r10 // offsetD
-	movq	ARG13, %r11 // D
-	movq	ARG14, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG15, %r13 // m0
-	movq	ARG16, %r14 // m1
-	movq	ARG17, %r15 // n0
-	movq	ARG18, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_16x4_gen_lib8, .-kernel_sgemm_nn_16x4_gen_lib8
-#endif
-
-
-
-
-
-//                                       rdi    rsi       rdx      rcx       r8        r9       rsp+8     rsp+16   rsp+24    rsp+32 
-// void kernel_strsm_nt_rl_inv_16x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_16x4_lib8
-	.type kernel_strsm_nt_rl_inv_16x4_lib8, @function
-kernel_strsm_nt_rl_inv_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_16x4_lib8
-_kernel_strsm_nt_rl_inv_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_16x4_lib8
-	.def kernel_strsm_nt_rl_inv_16x4_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movl	$4, %r12d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_16x4_lib8, .-kernel_strsm_nt_rl_inv_16x4_lib8
-#endif
-
-
-
-
-
-//                                          rdi    rsi       rdx      rcx       r8        r9       rsp+8     rsp+16   rsp+24    rsp+32             rsp+40  rsp+48
-// void kernel_strsm_nt_rl_inv_16x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_16x4_vs_lib8
-	.type kernel_strsm_nt_rl_inv_16x4_vs_lib8, @function
-kernel_strsm_nt_rl_inv_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_16x4_vs_lib8
-_kernel_strsm_nt_rl_inv_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_16x4_vs_lib8
-	.def kernel_strsm_nt_rl_inv_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG11, %r12 // m1 
-	movq	ARG12, %r13 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_16x4_vs_lib8, .-kernel_strsm_nt_rl_inv_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                             1       2          3         4          5       6          7         8          9         10       11        12       13        14
-// void kernel_sgemm_strsm_nt_rl_inv_16x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_16x4_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_16x4_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_16x4_lib8
-_kernel_sgemm_strsm_nt_rl_inv_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_16x4_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_16x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-	movl	$4, %r12d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_16x4_lib8, .-kernel_sgemm_strsm_nt_rl_inv_16x4_lib8
-#endif
-
-
-
-
-
-//                                                1       2          3         4          5       6          7         8          9         10       11        12       13        14                 15      16
-// void kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8
-_kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-	movq	ARG16, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG15, %r12 // km 
-	movq	ARG16, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8, .-kernel_sgemm_strsm_nt_rl_inv_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                   1      2         3        4         5         6        7         8        9
-// void kernel_spotrf_nt_l_12x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_12x4_lib8
-	.type kernel_spotrf_nt_l_12x4_lib8, @function
-kernel_spotrf_nt_l_12x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_12x4_lib8
-_kernel_spotrf_nt_l_12x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_12x4_lib8
-	.def kernel_spotrf_nt_l_12x4_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_12x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_12x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_12x4_lib8, .-kernel_spotrf_nt_l_12x4_lib8
-#endif
-
-
-
-
-
-//                                      1      2         3        4         5         6        7         8        9                  10      11
-// void kernel_spotrf_nt_l_12x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_12x4_vs_lib8
-	.type kernel_spotrf_nt_l_12x4_vs_lib8, @function
-kernel_spotrf_nt_l_12x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_12x4_vs_lib8
-_kernel_spotrf_nt_l_12x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_12x4_vs_lib8
-	.def kernel_spotrf_nt_l_12x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_12x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_12x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12 // m1 
-	movq	ARG11, %r13 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_12x4_lib8, .-kernel_spotrf_nt_l_12x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2         3        4         5         6        7         8        9
-// void kernel_spotrf_nt_l_16x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_16x4_lib8
-	.type kernel_spotrf_nt_l_16x4_lib8, @function
-kernel_spotrf_nt_l_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_16x4_lib8
-_kernel_spotrf_nt_l_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_16x4_lib8
-	.def kernel_spotrf_nt_l_16x4_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_16x4_lib8, .-kernel_spotrf_nt_l_16x4_lib8
-#endif
-
-
-
-
-
-//                                      1      2         3        4         5         6        7         8        9                  10      11
-// void kernel_spotrf_nt_l_16x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_16x4_vs_lib8
-	.type kernel_spotrf_nt_l_16x4_vs_lib8, @function
-kernel_spotrf_nt_l_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_16x4_vs_lib8
-_kernel_spotrf_nt_l_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_16x4_vs_lib8
-	.def kernel_spotrf_nt_l_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12 // m1 
-	movq	ARG11, %r13 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_16x4_lib8, .-kernel_spotrf_nt_l_16x4_lib8
-#endif
-
-
-
-
-
-//                                        1        2          3         4          5       6          7         8          9         10       11        12       13
-// void kernel_ssyrk_spotrf_nt_l_12x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_12x4_lib8
-	.type kernel_ssyrk_spotrf_nt_l_12x4_lib8, @function
-kernel_ssyrk_spotrf_nt_l_12x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_12x4_lib8
-_kernel_ssyrk_spotrf_nt_l_12x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_12x4_lib8
-	.def kernel_ssyrk_spotrf_nt_l_12x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_12x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_12x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_12x4_lib8, .-kernel_ssyrk_spotrf_nt_l_12x4_lib8
-#endif
-
-
-
-
-
-//                                            1        2          3         4          5       6          7         8          9         10       11        12       13                14      15
-// void kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8
-	.type kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8, @function
-kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8
-_kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8
-	.def kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movq	ARG15, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_12x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG14, %r12 // km 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8, .-kernel_ssyrk_spotrf_nt_l_12x4_vs_lib8
-#endif
-
-
-
-
-
-//                                        1        2          3         4          5       6          7         8          9         10       11        12       13
-// void kernel_ssyrk_spotrf_nt_l_16x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_16x4_lib8
-	.type kernel_ssyrk_spotrf_nt_l_16x4_lib8, @function
-kernel_ssyrk_spotrf_nt_l_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_16x4_lib8
-_kernel_ssyrk_spotrf_nt_l_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_16x4_lib8
-	.def kernel_ssyrk_spotrf_nt_l_16x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_16x4_lib8, .-kernel_ssyrk_spotrf_nt_l_16x4_lib8
-#endif
-
-
-
-
-
-//                                            1        2          3         4          5       6          7         8          9         10       11        12       13                14      15
-// void kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8
-	.type kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8, @function
-kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8
-_kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8
-	.def kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_16x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movq	ARG15, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_16x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG14, %r12 // km 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8, .-kernel_ssyrk_spotrf_nt_l_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                  1      2             3         4        5         6            7         8        9         10
-// void kernel_ssyrk_nt_l_16x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_16x4_lib8
-	.type kernel_ssyrk_nt_l_16x4_lib8, @function
-kernel_ssyrk_nt_l_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_16x4_lib8
-_kernel_ssyrk_nt_l_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_16x4_lib8
-	.def kernel_ssyrk_nt_l_16x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_16x4_lib8, .-kernel_ssyrk_nt_l_16x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5         6            7         8        9         10       12      13
-// void kernel_ssyrk_nt_l_16x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_16x4_vs_lib8
-	.type kernel_ssyrk_nt_l_16x4_vs_lib8, @function
-kernel_ssyrk_nt_l_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_16x4_vs_lib8
-_kernel_ssyrk_nt_l_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_16x4_vs_lib8
-	.def kernel_ssyrk_nt_l_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_16x4_vs_lib8, .-kernel_ssyrk_nt_l_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                  1      2             3         4        5         6            7         8        9         10
-// void kernel_ssyrk_nt_l_12x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_12x4_lib8
-	.type kernel_ssyrk_nt_l_12x4_lib8, @function
-kernel_ssyrk_nt_l_12x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_12x4_lib8
-_kernel_ssyrk_nt_l_12x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_12x4_lib8
-	.def kernel_ssyrk_nt_l_12x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_12x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L12X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_12x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_12x4_lib8, .-kernel_ssyrk_nt_l_12x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5         6            7         8        9         10       12      13
-// void kernel_ssyrk_nt_l_12x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_12x4_vs_lib8
-	.type kernel_ssyrk_nt_l_12x4_vs_lib8, @function
-kernel_ssyrk_nt_l_12x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_12x4_vs_lib8
-_kernel_ssyrk_nt_l_12x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_12x4_vs_lib8
-	.def kernel_ssyrk_nt_l_12x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_12x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps		%ymm0, %ymm0, %ymm0
-	vmovaps		%ymm0, %ymm1
-	vmovaps		%ymm0, %ymm2
-	vmovaps		%ymm0, %ymm3
-	vmovaps		%ymm0, %ymm4
-	vmovaps		%ymm0, %ymm5
-	vmovaps		%ymm0, %ymm6
-	vmovaps		%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_16X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_12X4_VS_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_12x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_12x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_12x4_vs_lib8, .-kernel_ssyrk_nt_l_12x4_vs_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5            6         7        8         9
-// void kernel_strmm_nn_rl_16x4_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_16x4_lib8
-	.type kernel_strmm_nn_rl_16x4_lib8, @function
-kernel_strmm_nn_rl_16x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_16x4_lib8
-_kernel_strmm_nn_rl_16x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_16x4_lib8
-	.def kernel_strmm_nn_rl_16x4_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_16x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_16x4_lib8, .-kernel_strmm_nn_rl_16x4_lib8
-#endif
-
-
-
-
-
-//                                      1      2             3         4        5            6         7        8         9        10      11
-// void kernel_strmm_nn_rl_16x4_vs_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_16x4_vs_lib8
-	.type kernel_strmm_nn_rl_16x4_vs_lib8, @function
-kernel_strmm_nn_rl_16x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_16x4_vs_lib8
-_kernel_strmm_nn_rl_16x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_16x4_vs_lib8
-	.def kernel_strmm_nn_rl_16x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_16x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12 // km
-	movq	ARG11, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_16x4_vs_lib8, .-kernel_strmm_nn_rl_16x4_vs_lib8
-#endif
-
-
-
-
-
-//                                       1      2             3         4        5            6         7        8            9         10       11      12      13      14
-// void kernel_strmm_nn_rl_16x4_gen_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_16x4_gen_lib8
-	.type kernel_strmm_nn_rl_16x4_gen_lib8, @function
-kernel_strmm_nn_rl_16x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_16x4_gen_lib8
-_kernel_strmm_nn_rl_16x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_16x4_gen_lib8
-	.def kernel_strmm_nn_rl_16x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_16x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_16x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_16x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_16X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_16x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_16x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // offsetD
-	movq	ARG9, %r11 // D
-	movq	ARG10, %r12 // sdd
-	sall	$5, %r12d // 4*sdd*sizeof(double)
-	movq	ARG11, %r13 // m0
-	movq	ARG12, %r14 // m1
-	movq	ARG13, %r15 // n0
-	movq	ARG14, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_16X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_16x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_16x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_16x4_gen_lib8, .-kernel_strmm_nn_rl_16x4_gen_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_sgemm_24x4_lib8.S b/third_party/blasfeo/kernel/avx2/kernel_sgemm_24x4_lib8.S
deleted file mode 100644
index b3a027f..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_sgemm_24x4_lib8.S
+++ /dev/null
@@ -1,7734 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nt_24x4_lib8, @function
-inner_kernel_gemm_add_nt_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nt_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nt_24x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nt_24x4_lib8:
-#endif
-#endif
-	
-// broadcast scheme
-#if 1
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovaps 		0(%r11), %ymm13 // A
-	vmovaps 		0(%r11, %r12, 1), %ymm14 // A
-	vmovaps 		0(%r11, %r12, 2), %ymm15 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	subl	$4, %r10d
-	vbroadcastss	4(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	8(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vbroadcastss	12(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	32(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	36(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	40(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vbroadcastss	44(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	64(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-	vbroadcastss	68(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	72(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vbroadcastss	76(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovapd			-32(%r11), %ymm13 // A
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovapd			-32(%r11, %r12, 1), %ymm14 // A
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vmovapd			-32(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	96(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	addq	$128, %r13
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	-28(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	-24(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vbroadcastss	-20(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovapd			0(%r11), %ymm13 // A
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vmovapd			0(%r11, %r12, 2), %ymm15 // A
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	subl	$4, %r10d
-	vbroadcastss	4(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	8(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vbroadcastss	12(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	32(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	36(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	40(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vbroadcastss	44(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	64(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-	vbroadcastss	68(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	72(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vbroadcastss	76(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovapd			-32(%r11), %ymm13 // A
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovapd			-32(%r11, %r12, 1), %ymm14 // A
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vmovapd			-32(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	96(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	addq	$128, %r13
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	-28(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	-24(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vbroadcastss	-20(%r13), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-//	vmovapd			0(%r11), %ymm13 // A
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-//	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-//	vmovapd			0(%r11, %r12, 2), %ymm15 // A
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // a
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vmovapd			0(%r11, %r12, 2), %ymm15 // A
-	vbroadcastss	0(%r13), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	4(%r13), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	subl	$1, %r10d
-	vbroadcastss	8(%r13), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	addq	$32, %r11
-	vbroadcastss	12(%r13), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	addq	$32, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-// shuffle scheme
-#else
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	movq	%r11, %r15 // A1 <- A0
-	addq	%r12, %r15 // A1 <- A0 + 4*sda*sizeof(float)
-	movq	%r15, %rax // A2 <- A1
-	addq	%r12, %rax // A2 <- A1 + 4*sda*sizeof(float)
-
-	// preload
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vmovaps			0(%r11), %ymm13 // A0
-	vmovaps			0(%r15), %ymm14 // A1
-	vmovaps			0(%rax), %ymm15 // A2
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vpermilps		$0x4e, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovaps			32(%r11), %ymm13 // A0
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovaps			32(%r15), %ymm14 // A1
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vbroadcastf128	32(%r13), %ymm12 // B
-	vmovaps			32(%rax), %ymm15 // A2
-
-
-	// unroll 1
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vpermilps		$0x4e, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovaps			64(%r11), %ymm13 // A0
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovaps			64(%r15), %ymm14 // A1
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vbroadcastf128	64(%r13), %ymm12 // B
-	vmovaps			64(%rax), %ymm15 // A2
-
-
-	// unroll 2
-	subl	$4, %r10d
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vpermilps		$0x4e, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovaps			96(%r11), %ymm13 // A0
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovaps			96(%r15), %ymm14 // A1
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vbroadcastf128	96(%r13), %ymm12 // B
-	vmovaps			96(%rax), %ymm15 // A2
-
-
-	// unroll 3
-	addq	$128, %r13
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	addq	$128, %r11
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vpermilps		$0x4e, %ymm12, %ymm12
-
-	addq	$128, %r15
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	addq	$128, %rax
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovaps			0(%r11), %ymm13 // A0
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovaps			0(%r15), %ymm14 // A1
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vbroadcastf128	0(%r13), %ymm12 // B
-	vmovaps			0(%rax), %ymm15 // A2
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vpermilps		$0x4e, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovaps			32(%r11), %ymm13 // A0
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovaps			32(%r15), %ymm14 // A1
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vbroadcastf128	32(%r13), %ymm12 // B
-	vmovaps			32(%rax), %ymm15 // A2
-
-
-	// unroll 1
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vpermilps		$0x4e, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovaps			64(%r11), %ymm13 // A0
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovaps			64(%r15), %ymm14 // A1
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vbroadcastf128	64(%r13), %ymm12 // B
-	vmovaps			64(%rax), %ymm15 // A2
-
-
-	// unroll 2
-	subl	$4, %r10d
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vpermilps		$0x4e, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vmovaps			96(%r11), %ymm13 // A0
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vmovaps			96(%r15), %ymm14 // A1
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	vbroadcastf128	96(%r13), %ymm12 // B
-	vmovaps			96(%rax), %ymm15 // A2
-
-
-	// unroll 3
-	addq	$128, %r13
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	addq	$128, %r11
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vpermilps		$0x4e, %ymm12, %ymm12
-
-	addq	$128, %r15
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	vpermilps		$0xb1, %ymm12, %ymm12
-
-	addq	$128, %rax
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-//	vmovaps			0(%r11), %ymm13 // A0
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-//	vmovaps			0(%r15), %ymm14 // A1
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-//	vbroadcastf128	0(%r13), %ymm12 // B
-//	vmovaps			0(%rax), %ymm15 // A2
-
-
-//	cmpl	$4, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vbroadcastf128	32(%r13), %ymm12 // B
-	vmovaps			32(%r11), %ymm13 // A0
-	vmovaps			32(%r15), %ymm14 // A1
-	vmovaps			32(%rax), %ymm15 // A2
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	subl	$1, %r10d
-
-	vpermilps		$0xb1, %ymm12, %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	addq	$32, %r11
-
-	vpermilps		$0x4e, %ymm12, %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-	addq	$32, %r13
-
-	vpermilps		$0xb1, %ymm12, %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	vfmadd231ps		%ymm15, %ymm12, %ymm11
-	addq	$32, %r15
-
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#endif
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nt_24x4_lib8, .-inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_sub_nt_24x4_lib8, @function
-inner_kernel_gemm_sub_nt_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_sub_nt_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_sub_nt_24x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_sub_nt_24x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vmovaps 		0(%r11), %ymm13 // A
-	vmovaps 		0(%r11, %r12, 1), %ymm14 // A
-	vmovaps 		0(%r11, %r12, 2), %ymm15 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vfnmadd231ps	%ymm15, %ymm12, %ymm8
-	subl	$4, %r10d
-	vbroadcastss	4(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vfnmadd231ps	%ymm15, %ymm12, %ymm9
-	vbroadcastss	8(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vfnmadd231ps	%ymm15, %ymm12, %ymm10
-	vbroadcastss	12(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A
-	vfnmadd231ps	%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	32(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vfnmadd231ps	%ymm15, %ymm12, %ymm8
-	vbroadcastss	36(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vfnmadd231ps	%ymm15, %ymm12, %ymm9
-	vbroadcastss	40(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vfnmadd231ps	%ymm15, %ymm12, %ymm10
-	vbroadcastss	44(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vfnmadd231ps	%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	64(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vfnmadd231ps	%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-	vbroadcastss	68(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vfnmadd231ps	%ymm15, %ymm12, %ymm9
-	vbroadcastss	72(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vfnmadd231ps	%ymm15, %ymm12, %ymm10
-	vbroadcastss	76(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vmovapd			-32(%r11), %ymm13 // A
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	vmovapd			-32(%r11, %r12, 1), %ymm14 // A
-	vfnmadd231ps	%ymm15, %ymm12, %ymm11
-	vmovapd			-32(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	96(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	addq	$128, %r13
-	vfnmadd231ps	%ymm15, %ymm12, %ymm8
-	vbroadcastss	-28(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vfnmadd231ps	%ymm15, %ymm12, %ymm9
-	vbroadcastss	-24(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vfnmadd231ps	%ymm15, %ymm12, %ymm10
-	vbroadcastss	-20(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vmovapd			0(%r11), %ymm13 // A
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vfnmadd231ps	%ymm15, %ymm12, %ymm11
-	vmovapd			0(%r11, %r12, 2), %ymm15 // A
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vbroadcastss	0(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vfnmadd231ps	%ymm15, %ymm12, %ymm8
-	subl	$4, %r10d
-	vbroadcastss	4(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vfnmadd231ps	%ymm15, %ymm12, %ymm9
-	vbroadcastss	8(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vfnmadd231ps	%ymm15, %ymm12, %ymm10
-	vbroadcastss	12(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vmovapd			32(%r11), %ymm13 // A
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	vmovapd			32(%r11, %r12, 1), %ymm14 // A
-	vfnmadd231ps	%ymm15, %ymm12, %ymm11
-	vmovapd			32(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	32(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vfnmadd231ps	%ymm15, %ymm12, %ymm8
-	vbroadcastss	36(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vfnmadd231ps	%ymm15, %ymm12, %ymm9
-	vbroadcastss	40(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vfnmadd231ps	%ymm15, %ymm12, %ymm10
-	vbroadcastss	44(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vmovapd			64(%r11), %ymm13 // A
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	vmovapd			64(%r11, %r12, 1), %ymm14 // A
-	vfnmadd231ps	%ymm15, %ymm12, %ymm11
-	vmovapd			64(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	64(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vfnmadd231ps	%ymm15, %ymm12, %ymm8
-	addq	$128, %r11
-	vbroadcastss	68(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vfnmadd231ps	%ymm15, %ymm12, %ymm9
-	vbroadcastss	72(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vfnmadd231ps	%ymm15, %ymm12, %ymm10
-	vbroadcastss	76(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vmovapd			-32(%r11), %ymm13 // A
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	vmovapd			-32(%r11, %r12, 1), %ymm14 // A
-	vfnmadd231ps	%ymm15, %ymm12, %ymm11
-	vmovapd			-32(%r11, %r12, 2), %ymm15 // A
-
-	// unroll 0
-	vbroadcastss	96(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	addq	$128, %r13
-	vfnmadd231ps	%ymm15, %ymm12, %ymm8
-	vbroadcastss	-28(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vfnmadd231ps	%ymm15, %ymm12, %ymm9
-	vbroadcastss	-24(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vfnmadd231ps	%ymm15, %ymm12, %ymm10
-	vbroadcastss	-20(%r13), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-//	vmovapd			0(%r11), %ymm13 // A
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-//	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vfnmadd231ps	%ymm15, %ymm12, %ymm11
-//	vmovapd			0(%r11, %r12, 2), %ymm15 // A
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd			0(%r11), %ymm13 // a
-	vmovapd			0(%r11, %r12, 1), %ymm14 // A
-	vmovapd			0(%r11, %r12, 2), %ymm15 // A
-	vbroadcastss	0(%r13), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vfnmadd231ps	%ymm15, %ymm12, %ymm8
-	vbroadcastss	4(%r13), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vfnmadd231ps	%ymm15, %ymm12, %ymm9
-	subl	$1, %r10d
-	vbroadcastss	8(%r13), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vfnmadd231ps	%ymm15, %ymm12, %ymm10
-	addq	$32, %r11
-	vbroadcastss	12(%r13), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	vfnmadd231ps	%ymm15, %ymm12, %ymm11
-	addq	$32, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_sub_nt_24x4_lib8, .-inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B
-// r14   <- 4*sdb*sizeof(double)
-// r15   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- 4*sda*sizeof(double)
-// r13   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r14   <- 4*sdb*sizeof(double)
-// r15   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NN_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nn_24x4_lib8, @function
-inner_kernel_gemm_add_nn_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nn_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nn_24x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nn_24x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$8, %r10d
-	jl		0f // consider clean-up loop
-
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	0(%r13, %r14, 1) // software prefetch
-	prefetcht0	64(%r13, %r14, 1) // software prefetch
-
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A0
-	vmovaps			0(%r11, %r12, 1), %ymm13 // A1
-	vmovaps			0(%r11, %r12, 2), %ymm14 // A2
-	vbroadcastss	0(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vfmadd231ps		%ymm14, %ymm15, %ymm8
-	vbroadcastss	32(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vfmadd231ps		%ymm14, %ymm15, %ymm9
-	vbroadcastss	64(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vfmadd231ps		%ymm14, %ymm15, %ymm10
-	vbroadcastss	96(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vfmadd231ps		%ymm14, %ymm15, %ymm11
-
-
-	// unroll 1
-	vmovaps			32(%r11), %ymm12 // A0
-	vmovaps			32(%r11, %r12, 1), %ymm13 // A1
-	vmovaps			32(%r11, %r12, 2), %ymm14 // A2
-	vbroadcastss	4(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vfmadd231ps		%ymm14, %ymm15, %ymm8
-	vbroadcastss	36(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vfmadd231ps		%ymm14, %ymm15, %ymm9
-	vbroadcastss	68(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vfmadd231ps		%ymm14, %ymm15, %ymm10
-	vbroadcastss	100(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vfmadd231ps		%ymm14, %ymm15, %ymm11
-
-
-	// unroll 2
-	vmovaps			64(%r11), %ymm12 // A0
-	vmovaps			64(%r11, %r12, 1), %ymm13 // A1
-	vmovaps			64(%r11, %r12, 2), %ymm14 // A2
-	vbroadcastss	8(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vfmadd231ps		%ymm14, %ymm15, %ymm8
-	vbroadcastss	40(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vfmadd231ps		%ymm14, %ymm15, %ymm9
-	vbroadcastss	72(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vfmadd231ps		%ymm14, %ymm15, %ymm10
-	vbroadcastss	104(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vfmadd231ps		%ymm14, %ymm15, %ymm11
-
-
-	// unroll 3
-	vmovaps			96(%r11), %ymm12 // A0
-	vmovaps			96(%r11, %r12, 1), %ymm13 // A1
-	vmovaps			96(%r11, %r12, 2), %ymm14 // A2
-	vbroadcastss	12(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vfmadd231ps		%ymm14, %ymm15, %ymm8
-	vbroadcastss	44(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vfmadd231ps		%ymm14, %ymm15, %ymm9
-	vbroadcastss	76(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vfmadd231ps		%ymm14, %ymm15, %ymm10
-	vbroadcastss	108(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vfmadd231ps		%ymm14, %ymm15, %ymm11
-
-
-	// unroll 4
-	vmovaps			128(%r11), %ymm12 // A0
-	vmovaps			128(%r11, %r12, 1), %ymm13 // A1
-	vmovaps			128(%r11, %r12, 2), %ymm14 // A2
-	vbroadcastss	16(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vfmadd231ps		%ymm14, %ymm15, %ymm8
-	vbroadcastss	48(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vfmadd231ps		%ymm14, %ymm15, %ymm9
-	vbroadcastss	80(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vfmadd231ps		%ymm14, %ymm15, %ymm10
-	vbroadcastss	112(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vfmadd231ps		%ymm14, %ymm15, %ymm11
-
-
-	// unroll 5
-	vmovaps			160(%r11), %ymm12 // A0
-	vmovaps			160(%r11, %r12, 1), %ymm13 // A1
-	vmovaps			160(%r11, %r12, 2), %ymm14 // A2
-	vbroadcastss	20(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vfmadd231ps		%ymm14, %ymm15, %ymm8
-	vbroadcastss	52(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vfmadd231ps		%ymm14, %ymm15, %ymm9
-	vbroadcastss	84(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vfmadd231ps		%ymm14, %ymm15, %ymm10
-	vbroadcastss	116(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vfmadd231ps		%ymm14, %ymm15, %ymm11
-
-
-	// unroll 6
-	vmovaps			192(%r11), %ymm12 // A0
-	vmovaps			192(%r11, %r12, 1), %ymm13 // A1
-	vmovaps			192(%r11, %r12, 2), %ymm14 // A2
-	vbroadcastss	24(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vfmadd231ps		%ymm14, %ymm15, %ymm8
-	vbroadcastss	56(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	subl	$8, %r10d
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vfmadd231ps		%ymm14, %ymm15, %ymm9
-	vbroadcastss	88(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vbroadcastss	120(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm14, %ymm15, %ymm10
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vfmadd231ps		%ymm14, %ymm15, %ymm11
-
-
-	// unroll 7
-	vmovaps			224(%r11), %ymm12 // A0
-	vmovaps			224(%r11, %r12, 1), %ymm13 // A1
-	vmovaps			224(%r11, %r12, 2), %ymm14 // A2
-	vbroadcastss	28(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vfmadd231ps		%ymm14, %ymm15, %ymm8
-	vbroadcastss	60(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	addq	$256, %r11
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vfmadd231ps		%ymm14, %ymm15, %ymm9
-	vbroadcastss	92(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vfmadd231ps		%ymm14, %ymm15, %ymm10
-	vbroadcastss	124(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vfmadd231ps		%ymm14, %ymm15, %ymm11
-
-	addq	%r14, %r13
-
-	cmpl	$7, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean1-up loop
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean1-up loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A0
-	vmovaps			0(%r11, %r12, 1), %ymm13 // A1
-	vmovaps			0(%r11, %r12, 2), %ymm14 // A2
-	vbroadcastss	0(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vfmadd231ps		%ymm14, %ymm15, %ymm8
-	vbroadcastss	32(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vfmadd231ps		%ymm14, %ymm15, %ymm9
-	vbroadcastss	64(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vfmadd231ps		%ymm14, %ymm15, %ymm10
-	vbroadcastss	96(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vfmadd231ps		%ymm14, %ymm15, %ymm11
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$4, %r13
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nn_24x4_lib8, .-inner_kernel_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_GEMM_ADD_NN_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_gemm_add_nn_24x4_lib8, @function
-inner_edge_gemm_add_nn_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_gemm_add_nn_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_gemm_add_nn_24x4_lib8; .scl 2; .type 32; .endef
-inner_edge_gemm_add_nn_24x4_lib8:
-#endif
-#endif
-	
-	cmpl			$0, %r15d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$8, %ebx
-	subl			%r15d, %ebx // 8-offsetB
-	cmpl			%r10d, %ebx
-//	jle				0f
-//	movl			%r10d, %ebx // kend=min(k,8-offsetB)
-//0:
-	cmovgl			%r10d, %ebx // kend=min(k,8-offsetB)
-
-	movl			%r15d, %eax
-	sall			$2, %eax // offsetB*sizeof(float)
-	addq			%rax, %r13 // B+offsetB*sizeof(float)
-
-1:
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A0
-	vmovaps			0(%r11, %r12, 1), %ymm13 // A1
-	vmovaps			0(%r11, %r12, 2), %ymm14 // A2
-	vbroadcastss	0(%r13), %ymm15 // B[0]
-	vfmadd231ps		%ymm12, %ymm15, %ymm0
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vfmadd231ps		%ymm14, %ymm15, %ymm8
-	vbroadcastss	32(%r13), %ymm15 // B[1]
-	vfmadd231ps		%ymm12, %ymm15, %ymm1
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vfmadd231ps		%ymm14, %ymm15, %ymm9
-	vbroadcastss	64(%r13), %ymm15 // B[2]
-	vfmadd231ps		%ymm12, %ymm15, %ymm2
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vfmadd231ps		%ymm14, %ymm15, %ymm10
-	vbroadcastss	96(%r13), %ymm15 // B[3]
-	vfmadd231ps		%ymm12, %ymm15, %ymm3
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vfmadd231ps		%ymm14, %ymm15, %ymm11
-
-	subl			$1, %r10d // k-1
-	subl			$1, %ebx // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$4, %r13 // B+1*sizeof(float)
-
-	cmpl			$0, %ebx
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r14, %r13
-	subq			$32, %r13 // B+bs*(sdb-1)*sizeof(float)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_gemm_add_nn_24x4_lib8, .-inner_edge_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- bs*sda*sizeof(double)
-// r13   <- B-offB+bs*sdb*sizeof(double)
-// r14   <- bs*sdb*sizeof(double)
-// r15   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRMM_NN_RL_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trmm_nn_rl_24x4_lib8, @function
-inner_edge_trmm_nn_rl_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trmm_nn_rl_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trmm_nn_rl_24x4_lib8; .scl 2; .type 32; .endef
-inner_edge_trmm_nn_rl_24x4_lib8:
-#endif
-#endif
-	
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	movl		%r15d, %eax
-	sall		$2, %eax // offsetB*sizeof(float)
-	movq		%r13, %rbx // B
-	addq		%rax, %rbx // B+offsetB*sizeof(float)
-
-
-	cmpl	$4, %r15d
-	jg		1f
-
-	// offB==0, 1, 2, 3, 4
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	4(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	36(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	8(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	40(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	72(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	jmp			0f // end
-
-
-1:
-	cmpl	$5, %r15d
-	jg		1f
-
-	// offB==5
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	4(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	36(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	8(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	40(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	72(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r14, %r13 // B+8*sdb*sizeof(float)
-	movl		$0, %r15d // offsetB=0
-
-	jmp			0f // end
-
-
-1:
-	cmpl	$6, %r15d
-	jg		1f
-
-	// offB==6
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	4(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	36(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r14, %r13 // B+8*sdb*sizeof(float)
-	movq		%r13, %rbx // B
-	movl		$0, %r15d // offsetB=0
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	32(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	64(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	jmp			0f // end
-
-
-1:
-//	cmpl	$7, %r15d
-//	jg		0f
-
-	// offB==6
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r14, %r13 // B+8*sdb*sizeof(float)
-	movq		%r13, %rbx // B
-	movl		$0, %r15d // offsetB=0
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	0(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	32(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm13
-	vmovaps			0(%r11, %r12, 1), %ymm14
-	vmovaps			0(%r11, %r12, 2), %ymm15
-	vbroadcastss	4(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vfmadd231ps		%ymm15, %ymm12, %ymm8
-	vbroadcastss	36(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vfmadd231ps		%ymm15, %ymm12, %ymm9
-	vbroadcastss	68(%rbx), %ymm12
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vfmadd231ps		%ymm15, %ymm12, %ymm10
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r15d // offsetB+1
-
-//	jmp			0f // end
-
-
-	// end
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trmm_nn_rl_24x4_lib8, .-inner_edge_trmm_nn_rl_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// strsm
-// right
-// lower
-// transposed
-// not-unit
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSM_RLT_INV_24X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsm_rlt_inv_24x4_vs_lib8, @function
-inner_edge_trsm_rlt_inv_24x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsm_rlt_inv_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsm_rlt_inv_24x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsm_rlt_inv_24x4_vs_lib8:
-#endif
-#endif
-
-	vbroadcastss	0(%r11), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vmulps			%ymm4, %ymm13, %ymm4
-	vmulps			%ymm8, %ymm13, %ymm8
-	cmpl			$2, %r12d
-	jl				0f // ret
-	vbroadcastss	4(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm1
-	vfnmadd231ps	%ymm4, %ymm13, %ymm5
-	vfnmadd231ps	%ymm8, %ymm13, %ymm9
-	vbroadcastss	8(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm2
-	vfnmadd231ps	%ymm4, %ymm13, %ymm6
-	vfnmadd231ps	%ymm8, %ymm13, %ymm10
-	vbroadcastss	12(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm3
-	vfnmadd231ps	%ymm4, %ymm13, %ymm7
-	vfnmadd231ps	%ymm8, %ymm13, %ymm11
-
-	vbroadcastss	4(%r11), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vmulps			%ymm5, %ymm13, %ymm5
-	vmulps			%ymm9, %ymm13, %ymm9
-	cmpl			$3, %r12d
-	jl				0f // ret
-	vbroadcastss	40(%r10), %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm2
-	vfnmadd231ps	%ymm5, %ymm13, %ymm6
-	vfnmadd231ps	%ymm9, %ymm13, %ymm10
-	vbroadcastss	44(%r10), %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm3
-	vfnmadd231ps	%ymm5, %ymm13, %ymm7
-	vfnmadd231ps	%ymm9, %ymm13, %ymm11
-
-	vbroadcastss	8(%r11), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vmulps			%ymm6, %ymm13, %ymm6
-	vmulps			%ymm10, %ymm13, %ymm10
-	cmpl			$4, %r12d
-	jl				0f // ret
-	vbroadcastss	76(%r10), %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm3
-	vfnmadd231ps	%ymm6, %ymm13, %ymm7
-	vfnmadd231ps	%ymm10, %ymm13, %ymm11
-
-	vbroadcastss	12(%r11), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vmulps			%ymm7, %ymm13, %ymm7
-	vmulps			%ymm11, %ymm13, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsm_rlt_inv_24x4_vs_lib8, .-inner_edge_trsm_rlt_inv_24x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization gen
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_24X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_24x4_vs_lib8, @function
-inner_edge_potrf_24x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_24x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_24x4_vs_lib8:
-#endif
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vmovss			%xmm0, %xmm0, %xmm13
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-	vucomiss		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-2:
-	vmovss			%xmm13, 0(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vmulps			%ymm4, %ymm13, %ymm4
-	vmulps			%ymm8, %ymm13, %ymm8
-	cmpl		$2, %r11d
-	jl			0f // ret
-	vperm2f128		$0x00, %ymm0, %ymm0, %ymm15
-	vpermilps		$0x55, %ymm15, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm1
-	vfnmadd231ps	%ymm4, %ymm13, %ymm5
-	vfnmadd231ps	%ymm8, %ymm13, %ymm9
-	vpermilps		$0xaa, %ymm15, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm2
-	vfnmadd231ps	%ymm4, %ymm13, %ymm6
-	vfnmadd231ps	%ymm8, %ymm13, %ymm10
-	vpermilps		$0xff, %ymm15, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm3
-	vfnmadd231ps	%ymm4, %ymm13, %ymm7
-	vfnmadd231ps	%ymm8, %ymm13, %ymm11
-
-
-	vpermilps		$0x55, %xmm1, %xmm13
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-	vucomiss		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-4:
-	vmovss			%xmm13, 4(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vmulps			%ymm5, %ymm13, %ymm5
-	vmulps			%ymm9, %ymm13, %ymm9
-	cmpl		$3, %r11d
-	jl			0f // ret
-	vperm2f128		$0x00, %ymm1, %ymm1, %ymm15
-	vpermilps		$0xaa, %ymm15, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm2
-	vfnmadd231ps	%ymm5, %ymm13, %ymm6
-	vfnmadd231ps	%ymm9, %ymm13, %ymm10
-	vpermilps		$0xff, %ymm15, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm3
-	vfnmadd231ps	%ymm5, %ymm13, %ymm7
-	vfnmadd231ps	%ymm9, %ymm13, %ymm11
-
-
-	vpermilps		$0xaa, %xmm2, %xmm13
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-	vucomiss		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-6:
-	vmovss			%xmm13, 8(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vmulps			%ymm6, %ymm13, %ymm6
-	vmulps			%ymm10, %ymm13, %ymm10
-	cmpl		$4, %r11d
-	jl			0f // ret
-	vperm2f128		$0x00, %ymm2, %ymm2, %ymm15
-	vpermilps		$0xff, %ymm15, %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm3
-	vfnmadd231ps	%ymm6, %ymm13, %ymm7
-	vfnmadd231ps	%ymm10, %ymm13, %ymm11
-
-
-	vpermilps		$0xff, %xmm3, %xmm13
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-	vucomiss		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-8:
-	vmovsd			%xmm13, 12(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vmulps			%ymm7, %ymm13, %ymm7
-	vmulps			%ymm11, %ymm13, %ymm11
-
-	jmp		0f
-
-
-1:
-	vxorps			%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_24x4_vs_lib8, .-inner_edge_potrf_24x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization gen
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_20X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_20x4_vs_lib8, @function
-inner_edge_potrf_20x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_20x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_20x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_20x4_vs_lib8:
-#endif
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vextractf128	$0x1, %ymm0, %xmm13
-//	vpermilps		$0x00, %xmm13, %xmm13
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-	vucomiss		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-2:
-	vmovss			%xmm13, 0(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vmulps			%ymm4, %ymm13, %ymm4
-	vmulps			%ymm8, %ymm13, %ymm8
-	cmpl		$2, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm15
-	vpermilps		$0x55, %ymm15, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm1
-	vfnmadd231ps	%ymm4, %ymm13, %ymm5
-	vfnmadd231ps	%ymm8, %ymm13, %ymm9
-	vpermilps		$0xaa, %ymm15, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm2
-	vfnmadd231ps	%ymm4, %ymm13, %ymm6
-	vfnmadd231ps	%ymm8, %ymm13, %ymm10
-	vpermilps		$0xff, %ymm15, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm3
-	vfnmadd231ps	%ymm4, %ymm13, %ymm7
-	vfnmadd231ps	%ymm8, %ymm13, %ymm11
-
-
-	vextractf128	$0x1, %ymm1, %xmm13
-	vpermilps		$0x55, %xmm13, %xmm13
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-	vucomiss		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-4:
-	vmovss			%xmm13, 4(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vmulps			%ymm5, %ymm13, %ymm5
-	vmulps			%ymm9, %ymm13, %ymm9
-	cmpl		$3, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm15
-	vpermilps		$0xaa, %ymm15, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm2
-	vfnmadd231ps	%ymm5, %ymm13, %ymm6
-	vfnmadd231ps	%ymm9, %ymm13, %ymm10
-	vpermilps		$0xff, %ymm15, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm3
-	vfnmadd231ps	%ymm5, %ymm13, %ymm7
-	vfnmadd231ps	%ymm9, %ymm13, %ymm11
-
-
-	vextractf128	$0x1, %ymm2, %xmm13
-	vpermilps		$0xaa, %xmm13, %xmm13
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-	vucomiss		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-6:
-	vmovss			%xmm13, 8(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vmulps			%ymm6, %ymm13, %ymm6
-	vmulps			%ymm10, %ymm13, %ymm10
-	cmpl		$4, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm15
-	vpermilps		$0xff, %ymm15, %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm3
-	vfnmadd231ps	%ymm6, %ymm13, %ymm7
-	vfnmadd231ps	%ymm10, %ymm13, %ymm11
-
-
-	vextractf128	$0x1, %ymm3, %xmm13
-	vpermilps		$0xff, %xmm13, %xmm13
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-	vucomiss		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-8:
-	vmovsd			%xmm13, 12(%r10)
-	vpermilps		$0x00, %xmm13, %xmm13
-	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vmulps			%ymm7, %ymm13, %ymm7
-	vmulps			%ymm11, %ymm13, %ymm11
-
-	jmp		0f
-
-
-1:
-	vxorps			%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd			%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_20x4_vs_lib8, .-inner_edge_potrf_20x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_24x4_lib8, @function
-inner_scale_ab_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_24x4_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_24x4_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	vmulps		%ymm4, %ymm15, %ymm4
-	vmulps		%ymm5, %ymm15, %ymm5
-	vmulps		%ymm6, %ymm15, %ymm6
-	vmulps		%ymm7, %ymm15, %ymm7
-
-	vmulps		%ymm8, %ymm15, %ymm8
-	vmulps		%ymm9, %ymm15, %ymm9
-	vmulps		%ymm10, %ymm15, %ymm10
-	vmulps		%ymm11, %ymm15, %ymm11
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-	vmovaps		0(%r12, %r13, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm4
-	vmovaps		32(%r12, %r13, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm5
-	vmovaps		64(%r12, %r13, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm6
-	vmovaps		96(%r12, %r13, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm7
-
-	vmovaps		0(%r12, %r13, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm8
-	vmovaps		32(%r12, %r13, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm9
-	vmovaps		64(%r12, %r13, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm10
-	vmovaps		96(%r12, %r13, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_24x4_lib8, .-inner_scale_ab_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_24X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_24x4_gen_lib8, @function
-inner_scale_ab_24x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_24x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_24x4_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_24x4_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	vmulps		%ymm4, %ymm15, %ymm4
-	vmulps		%ymm5, %ymm15, %ymm5
-	vmulps		%ymm6, %ymm15, %ymm6
-	vmulps		%ymm7, %ymm15, %ymm7
-
-	vmulps		%ymm8, %ymm15, %ymm8
-	vmulps		%ymm9, %ymm15, %ymm9
-	vmulps		%ymm10, %ymm15, %ymm10
-	vmulps		%ymm11, %ymm15, %ymm11
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	movq	%r13, %r15 // C1 <- C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-	movq	%r15, %rax // C2 <- C1
-	addq	%r14, %rax // C2 <- C1 + 4*sdc*sizeof(double)
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-	vmovaps		0(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm4
-	vmovaps		32(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm5
-	vmovaps		64(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm6
-	vmovaps		96(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm7
-
-	vmovaps		0(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm8
-	vmovaps		32(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm9
-	vmovaps		64(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm10
-	vmovaps		96(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm11
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%rax, %rbx // C1
-	addq	%r14, %rbx // C2 <- C1 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r12d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r12d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r12d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_24x4_gen_lib8, .-inner_scale_ab_24x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0
-//
-// input arguments:
-// r10   <- alpha
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_24x4_lib8, @function
-inner_scale_a0_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_a0_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_24x4_lib8; .scl 2; .type 32; .endef
-inner_scale_a0_24x4_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm12
-
-	vmulps		%ymm0, %ymm12, %ymm0
-	vmulps		%ymm1, %ymm12, %ymm1
-	vmulps		%ymm2, %ymm12, %ymm2
-	vmulps		%ymm3, %ymm12, %ymm3
-
-	vmulps		%ymm4, %ymm12, %ymm4
-	vmulps		%ymm5, %ymm12, %ymm5
-	vmulps		%ymm6, %ymm12, %ymm6
-	vmulps		%ymm7, %ymm12, %ymm7
-
-	vmulps		%ymm8, %ymm12, %ymm8
-	vmulps		%ymm9, %ymm12, %ymm9
-	vmulps		%ymm10, %ymm12, %ymm10
-	vmulps		%ymm11, %ymm12, %ymm11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_24x4_lib8, .-inner_scale_a0_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_24x4_lib8, @function
-inner_scale_11_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_11_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_24x4_lib8; .scl 2; .type 32; .endef
-inner_scale_11_24x4_lib8:
-#endif
-#endif
-	
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovaps		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovaps		LC03(%rip), %ymm14
-#endif
-
-	vmovaps		0(%r10), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r10), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r10), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r10), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-	vmovaps		0(%r10, %r11, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm4
-	vmovaps		32(%r10, %r11, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm5
-	vmovaps		64(%r10, %r11, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm6
-	vmovaps		96(%r10, %r11, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm7
-
-	vmovaps		0(%r10, %r11, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm8
-	vmovaps		32(%r10, %r11, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm9
-	vmovaps		64(%r10, %r11, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm10
-	vmovaps		96(%r10, %r11, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_24x4_lib8, .-inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_24X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_24x4_gen_lib8, @function
-inner_scale_11_24x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_11_24x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_24x4_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_11_24x4_gen_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovaps		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovaps		LC03(%rip), %ymm14
-#endif
-
-	vmovaps		0(%r11), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r11), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r11), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r11), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-	vmovaps		0(%r11, %r12, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm4
-	vmovaps		32(%r11, %r12, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm5
-	vmovaps		64(%r11, %r12, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm6
-	vmovaps		96(%r11, %r12, 1), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm7
-
-	vmovaps		0(%r11, %r12, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm8
-	vmovaps		32(%r11, %r12, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm9
-	vmovaps		64(%r11, %r12, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm10
-	vmovaps		96(%r11, %r12, 2), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm11
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_24x4_gen_lib8, .-inner_scale_11_24x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// r13   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_24x4_lib8, @function
-inner_blend_scale_ab_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_24x4_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_ab_24x4_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm4, %ymm15, %ymm4
-	vmulps		%ymm5, %ymm15, %ymm5
-	vmulps		%ymm6, %ymm15, %ymm6
-	vmulps		%ymm7, %ymm15, %ymm7
-
-	vblendps	$0xaa, %ymm9, %ymm8, %ymm12
-	vblendps	$0x55, %ymm9, %ymm8, %ymm13
-	vblendps	$0xaa, %ymm11, %ymm10, %ymm14
-	vblendps	$0x55, %ymm11, %ymm10, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm8
-	vblendps	$0x33, %ymm15, %ymm12, %ymm10
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm9
-	vblendps	$0x33, %ymm14, %ymm13, %ymm11
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm8, %ymm15, %ymm8
-	vmulps		%ymm9, %ymm15, %ymm9
-	vmulps		%ymm10, %ymm15, %ymm10
-	vmulps		%ymm11, %ymm15, %ymm11
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	movq	%r12, %r15 // C1 <- C0
-	addq	%r13, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-	movq	%r15, %rax // C2 <- C1
-	addq	%r13, %rax // C2 <- C1 + 4*sdc*sizeof(double)
-
-	vmovaps		0(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-	vmovaps		0(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm4
-	vmovaps		32(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm5
-	vmovaps		64(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm6
-	vmovaps		96(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm7
-
-	vmovaps		0(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm8
-	vmovaps		32(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm9
-	vmovaps		64(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm10
-	vmovaps		96(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_24x4_lib8, .-inner_blend_scale_ab_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_24X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_24x4_gen_lib8, @function
-inner_blend_scale_ab_24x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_24x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_24x4_gen_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_ab_24x4_gen_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm4, %ymm15, %ymm4
-	vmulps		%ymm5, %ymm15, %ymm5
-	vmulps		%ymm6, %ymm15, %ymm6
-	vmulps		%ymm7, %ymm15, %ymm7
-
-	vblendps	$0xaa, %ymm9, %ymm8, %ymm12
-	vblendps	$0x55, %ymm9, %ymm8, %ymm13
-	vblendps	$0xaa, %ymm11, %ymm10, %ymm14
-	vblendps	$0x55, %ymm11, %ymm10, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm8
-	vblendps	$0x33, %ymm15, %ymm12, %ymm10
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm9
-	vblendps	$0x33, %ymm14, %ymm13, %ymm11
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm8, %ymm15, %ymm8
-	vmulps		%ymm9, %ymm15, %ymm9
-	vmulps		%ymm10, %ymm15, %ymm10
-	vmulps		%ymm11, %ymm15, %ymm11
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	movq	%r13, %r15 // C1 <- C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-	movq	%r15, %rax // C2 <- C1
-	addq	%r14, %rax // C2 <- C1 + 4*sdc*sizeof(double)
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-	vmovaps		0(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm4
-	vmovaps		32(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm5
-	vmovaps		64(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm6
-	vmovaps		96(%r15), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm7
-
-	vmovaps		0(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm8
-	vmovaps		32(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm9
-	vmovaps		64(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm10
-	vmovaps		96(%rax), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm11
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%rax, %rbx // C1
-	addq	%r14, %rbx // C2 <- C1 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r12d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r12d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r12d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_24x4_gen_lib8, .-inner_blend_scale_ab_24x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// r11   <- 4*sdc*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_24x4_lib8, @function
-inner_blend_scale_11_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_24x4_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_11_24x4_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	vblendps	$0xaa, %ymm9, %ymm8, %ymm12
-	vblendps	$0x55, %ymm9, %ymm8, %ymm13
-	vblendps	$0xaa, %ymm11, %ymm10, %ymm14
-	vblendps	$0x55, %ymm11, %ymm10, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm8
-	vblendps	$0x33, %ymm15, %ymm12, %ymm10
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm9
-	vblendps	$0x33, %ymm14, %ymm13, %ymm11
-
-	movq	%r10, %r15 // C1 <- C0
-	addq	%r11, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-	movq	%r15, %rax // C2 <- C1
-	addq	%r11, %rax // C2 <- C1 + 4*sdc*sizeof(double)
-
-	vmovaps		0(%r10), %ymm15
-	vaddps		%ymm15, %ymm0, %ymm0
-	vmovaps		32(%r10), %ymm15
-	vaddps		%ymm15, %ymm1, %ymm1
-	vmovaps		64(%r10), %ymm15
-	vaddps		%ymm15, %ymm2, %ymm2
-	vmovaps		96(%r10), %ymm15
-	vaddps		%ymm15, %ymm3, %ymm3
-
-	vmovaps		0(%r15), %ymm15
-	vaddps		%ymm15, %ymm4, %ymm4
-	vmovaps		32(%r15), %ymm15
-	vaddps		%ymm15, %ymm5, %ymm5
-	vmovaps		64(%r15), %ymm15
-	vaddps		%ymm15, %ymm6, %ymm6
-	vmovaps		96(%r15), %ymm15
-	vaddps		%ymm15, %ymm7, %ymm7
-
-	vmovaps		0(%rax), %ymm15
-	vaddps		%ymm15, %ymm8, %ymm8
-	vmovaps		32(%rax), %ymm15
-	vaddps		%ymm15, %ymm9, %ymm9
-	vmovaps		64(%rax), %ymm15
-	vaddps		%ymm15, %ymm10, %ymm10
-	vmovaps		96(%rax), %ymm15
-	vaddps		%ymm15, %ymm11, %ymm11
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_24x4_lib8, .-inner_blend_scale_11_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_24X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_24x4_gen_lib8, @function
-inner_blend_scale_11_24x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_24x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_24x4_gen_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_11_24x4_gen_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	vblendps	$0xaa, %ymm9, %ymm8, %ymm12
-	vblendps	$0x55, %ymm9, %ymm8, %ymm13
-	vblendps	$0xaa, %ymm11, %ymm10, %ymm14
-	vblendps	$0x55, %ymm11, %ymm10, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm8
-	vblendps	$0x33, %ymm15, %ymm12, %ymm10
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm9
-	vblendps	$0x33, %ymm14, %ymm13, %ymm11
-
-	movq	%r11, %r15 // C1 <- C0
-	addq	%r12, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-	movq	%r15, %rax // C2 <- C1
-	addq	%r12, %rax // C2 <- C1 + 4*sdc*sizeof(double)
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r11), %ymm15
-	vaddps		%ymm15, %ymm0, %ymm0
-	vmovaps		32(%r11), %ymm15
-	vaddps		%ymm15, %ymm1, %ymm1
-	vmovaps		64(%r11), %ymm15
-	vaddps		%ymm15, %ymm2, %ymm2
-	vmovaps		96(%r11), %ymm15
-	vaddps		%ymm15, %ymm3, %ymm3
-
-	vmovaps		0(%r15), %ymm15
-	vaddps		%ymm15, %ymm4, %ymm4
-	vmovaps		32(%r15), %ymm15
-	vaddps		%ymm15, %ymm5, %ymm5
-	vmovaps		64(%r15), %ymm15
-	vaddps		%ymm15, %ymm6, %ymm6
-	vmovaps		96(%r15), %ymm15
-	vaddps		%ymm15, %ymm7, %ymm7
-
-	vmovaps		0(%rax), %ymm15
-	vaddps		%ymm15, %ymm8, %ymm8
-	vmovaps		32(%rax), %ymm15
-	vaddps		%ymm15, %ymm9, %ymm9
-	vmovaps		64(%rax), %ymm15
-	vaddps		%ymm15, %ymm10, %ymm10
-	vmovaps		96(%rax), %ymm15
-	vaddps		%ymm15, %ymm11, %ymm11
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%rax, %rbx // C1
-	addq	%r12, %rbx // C2 <- C1 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_24x4_gen_lib8, .-inner_blend_scale_11_24x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_24x4_lib8, @function
-inner_store_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_24x4_lib8; .scl 2; .type 32; .endef
-inner_store_24x4_lib8:
-#endif
-#endif
-	
-	movq	%r10, %r15 // D1 <- D0
-	addq	%r11, %r15 // D1 <- D0 + 4*sdd*sizeof(double)
-	movq	%r15, %rax // D2 <- D1
-	addq	%r11, %rax // D2 <- D1 + 4*sdd*sizeof(double)
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		%ymm3, 96(%r10)
-
-	vmovaps 	%ymm4,  0(%r15)
-	vmovaps 	%ymm5, 32(%r15)
-	vmovaps 	%ymm6, 64(%r15)
-	vmovaps 	%ymm7, 96(%r15)
-
-	vmovaps 	%ymm8,  0(%rax)
-	vmovaps 	%ymm9, 32(%rax)
-	vmovaps 	%ymm10, 64(%rax)
-	vmovaps 	%ymm11, 96(%rax)
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_24x4_lib8, .-inner_store_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_24X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_24x4_vs_lib8, @function
-inner_store_24x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_24x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_24x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC02(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	vmovaps		%ymm0,  0(%r10)
-	vmovaps		%ymm4, 0(%r10, %r11, 1)
-	vmaskmovps	%ymm8, %ymm15,  0(%r10, %r11, 2)
-	cmpl		$2, %r13d
-	jl			0f // end
-	vmovaps		%ymm1, 32(%r10)
-	vmovaps		%ymm5, 32(%r10, %r11, 1)
-	vmaskmovps	%ymm9, %ymm15, 32(%r10, %r11, 2)
-	cmpl		$3, %r13d
-	jl			0f // end
-	vmovaps		%ymm2, 64(%r10)
-	vmovaps		%ymm6, 64(%r10, %r11, 1)
-	vmaskmovps	%ymm10, %ymm15, 64(%r10, %r11, 2)
-	je			0f // end
-	vmovaps		%ymm3, 96(%r10)
-	vmovaps		%ymm7, 96(%r10, %r11, 1)
-	vmaskmovps	%ymm11, %ymm15, 96(%r10, %r11, 2)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_24x4_vs_lib8, .-inner_store_24x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_24X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_24x4_gen_lib8, @function
-inner_store_24x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_24x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_24x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_24x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-	vmovups		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-	vmovups		LC02(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm9, %ymm8
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm10, %ymm9
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm7, %ymm6
-	vmovaps		%ymm11, %ymm10
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm9, %ymm8
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm10, %ymm9
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm9, %ymm8
-	addq		$32, %r11
-
-0:
-
-	// compute D1
-	movq	%r11, %rbx // D1
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(float)
-	movq	%rbx, %rbp // D2
-	addq	%r12, %rbp // D2 <- D1 + 4*sdd*sizeof(float)
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	cmpl		$2, %r15d
-	vmaskmovps	%ymm0, %ymm14,  0(%r11)
-	vmovaps		%ymm4, 0(%rbx)
-	vmaskmovps	%ymm8, %ymm15,  0(%rbp)
-	jl			7f // end
-	cmpl		$3, %r15d
-	vmaskmovps	%ymm1, %ymm14, 32(%r11)
-	vmovaps		%ymm5, 32(%rbx)
-	vmaskmovps	%ymm9, %ymm15, 32(%rbp)
-	jl			7f // end
-	vmaskmovps	%ymm2, %ymm14, 64(%r11)
-	vmovaps		%ymm6, 64(%rbx)
-	vmaskmovps	%ymm10, %ymm15, 64(%rbp)
-	je			7f // end
-	vmaskmovps	%ymm3, %ymm14, 96(%r11)
-	vmovaps		%ymm7, 96(%rbx)
-	vmaskmovps	%ymm11, %ymm15, 96(%rbp)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-//	movq	%r11, %rbp // D1
-//	addq	%r12, %rbp // D2 <- D1 + 4*sdd*sizeof(float)
-	addq	%rbp, %r12 // D3 <- D2 + 4*sdd*sizeof(float)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_24x4_gen_lib8, .-inner_store_24x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_20X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_20x4_lib8, @function
-inner_store_l_20x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_20x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_20x4_lib8; .scl 2; .type 32; .endef
-inner_store_l_20x4_lib8:
-#endif
-#endif
-	
-	vmovaps		0(%r10), %ymm12
-	vmovaps		32(%r10), %ymm13
-	vmovaps		64(%r10), %ymm14
-	vmovaps		96(%r10), %ymm15
-
-	vblendps	$0x0f, %ymm12, %ymm0, %ymm0
-	vblendps	$0x1f, %ymm13, %ymm1, %ymm1
-	vblendps	$0x3f, %ymm14, %ymm2, %ymm2
-	vblendps	$0x7f, %ymm15, %ymm3, %ymm3
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		%ymm3, 96(%r10)
-
-	vmovaps 	%ymm4,  0(%r10, %r11, 1)
-	vmovaps 	%ymm5, 32(%r10, %r11, 1)
-	vmovaps 	%ymm6, 64(%r10, %r11, 1)
-	vmovaps 	%ymm7, 96(%r10, %r11, 1)
-
-	vmovaps 	%ymm8,  0(%r10, %r11, 2)
-	vmovaps 	%ymm9, 32(%r10, %r11, 2)
-	vmovaps 	%ymm10, 64(%r10, %r11, 2)
-	vmovaps 	%ymm11, 96(%r10, %r11, 2)
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_20x4_lib8, .-inner_store_l_20x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(float)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_24X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_24x4_lib8, @function
-inner_store_l_24x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_24x4_lib8; .scl 2; .type 32; .endef
-inner_store_l_24x4_lib8:
-#endif
-#endif
-	
-	vmovaps		32(%r10), %ymm12
-	vmovaps		64(%r10), %ymm13
-	vmovaps		96(%r10), %ymm14
-
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vblendps	$0x03, %ymm13, %ymm2, %ymm2
-	vblendps	$0x07, %ymm14, %ymm3, %ymm3
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		%ymm3, 96(%r10)
-
-	vmovaps 	%ymm4,  0(%r10, %r11, 1)
-	vmovaps 	%ymm5, 32(%r10, %r11, 1)
-	vmovaps 	%ymm6, 64(%r10, %r11, 1)
-	vmovaps 	%ymm7, 96(%r10, %r11, 1)
-
-	vmovaps 	%ymm8,  0(%r10, %r11, 2)
-	vmovaps 	%ymm9, 32(%r10, %r11, 2)
-	vmovaps 	%ymm10, 64(%r10, %r11, 2)
-	vmovaps 	%ymm11, 96(%r10, %r11, 2)
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_24x4_lib8, .-inner_store_l_24x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_20X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_20x4_vs_lib8, @function
-inner_store_l_20x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_20x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_20x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_l_20x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC02(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	vmovaps		0(%r10), %ymm12
-	vblendps	$0x0f, %ymm12, %ymm0, %ymm0
-	vmovaps		%ymm0, 0(%r10)
-	vmovaps		%ymm4, 0(%r10, %r11, 1)
-	vmaskmovps	%ymm8, %ymm15, 0(%r10, %r11, 2)
-	cmpl		$2, %r13d
-	jl			0f // end
-	vmovaps		32(%r10), %ymm12
-	vblendps	$0x1f, %ymm12, %ymm1, %ymm1
-	vmovaps		%ymm1, 32(%r10)
-	vmovaps		%ymm5, 32(%r10, %r11, 1)
-	vmaskmovps	%ymm9, %ymm15, 32(%r10, %r11, 2)
-	cmpl		$3, %r13d
-	jl			0f // end
-	vmovaps		64(%r10), %ymm12
-	vblendps	$0x3f, %ymm12, %ymm2, %ymm2
-	vmovaps		%ymm2, 64(%r10)
-	vmovaps		%ymm6, 64(%r10, %r11, 1)
-	vmaskmovps	%ymm10, %ymm15, 64(%r10, %r11, 2)
-	je			0f // end
-	vmovaps		96(%r10), %ymm12
-	vblendps	$0x7f, %ymm12, %ymm3, %ymm3
-	vmovaps		%ymm3, 96(%r10)
-	vmovaps		%ymm7, 96(%r10, %r11, 1)
-	vmaskmovps	%ymm11, %ymm15, 96(%r10, %r11, 2)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_20x4_vs_lib8, .-inner_store_l_20x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- 4*sdd*sizeof(double)
-// r12  <- km
-// r13  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_24X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_24x4_vs_lib8, @function
-inner_store_l_24x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_24x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_l_24x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r12d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC02(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	vmovaps		%ymm0, 0(%r10)
-	vmovaps		%ymm4, 0(%r10, %r11, 1)
-	vmaskmovps	%ymm8, %ymm15, 0(%r10, %r11, 2)
-	cmpl		$2, %r13d
-	jl			0f // end
-	vmovaps		32(%r10), %ymm12
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vmovaps		%ymm1, 32(%r10)
-	vmovaps		%ymm5, 32(%r10, %r11, 1)
-	vmaskmovps	%ymm9, %ymm15, 32(%r10, %r11, 2)
-	cmpl		$3, %r13d
-	jl			0f // end
-	vmovaps		64(%r10), %ymm12
-	vblendps	$0x03, %ymm12, %ymm2, %ymm2
-	vmovaps		%ymm2, 64(%r10)
-	vmovaps		%ymm6, 64(%r10, %r11, 1)
-	vmaskmovps	%ymm10, %ymm15, 64(%r10, %r11, 2)
-	je			0f // end
-	vmovaps		96(%r10), %ymm12
-	vblendps	$0x07, %ymm12, %ymm3, %ymm3
-	vmovaps		%ymm3, 96(%r10)
-	vmovaps		%ymm7, 96(%r10, %r11, 1)
-	vmaskmovps	%ymm11, %ymm15, 96(%r10, %r11, 2)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_24x4_vs_lib8, .-inner_store_l_24x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_20X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_20x4_gen_lib8, @function
-inner_store_l_20x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_20x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_20x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_l_20x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-	vmovups		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-	vmovups		LC02(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm9, %ymm8
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm10, %ymm9
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm7, %ymm6
-	vmovaps		%ymm11, %ymm10
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm9, %ymm8
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm10, %ymm9
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm9, %ymm8
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmovaps		0(%r11), %ymm12
-	vblendps	$0x0f, %ymm12, %ymm0, %ymm0
-	vmaskmovps	%ymm0, %ymm14,  0(%r11)
-	vmovaps		%ymm4, 0(%r11, %r12, 1)
-	vmaskmovps	%ymm8, %ymm15,  0(%r11, %r12, 2)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmovaps		32(%r11), %ymm12
-	vblendps	$0x1f, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm14, 32(%r11)
-	vmovaps		%ymm5, 32(%r11, %r12, 1)
-	vmaskmovps	%ymm9, %ymm15, 32(%r11, %r12, 2)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmovaps		64(%r11), %ymm12
-	vblendps	$0x3f, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm14, 64(%r11)
-	vmovaps		%ymm6, 64(%r11, %r12, 1)
-	vmaskmovps	%ymm10, %ymm15, 64(%r11, %r12, 2)
-	je			7f // end
-	vmovaps		96(%r11), %ymm12
-	vblendps	$0x7f, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm14, 96(%r11)
-	vmovaps		%ymm7, 96(%r11, %r12, 1)
-	vmaskmovps	%ymm11, %ymm15, 96(%r11, %r12, 2)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_20x4_gen_lib8, .-inner_store_l_20x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_24X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_24x4_gen_lib8, @function
-inner_store_l_24x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_24x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_24x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_l_24x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-	vmovups		.LC02(%rip), %ymm13
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-	vmovups		LC02(%rip), %ymm13
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm13, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm9, %ymm8
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm10, %ymm9
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm7, %ymm6
-	vmovaps		%ymm11, %ymm10
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm9, %ymm8
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm10, %ymm9
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm9, %ymm8
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm14,  0(%r11)
-	vmovaps		%ymm4, 0(%r11, %r12, 1)
-	vmaskmovps	%ymm8, %ymm15,  0(%r11, %r12, 2)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmovaps		32(%r11), %ymm12
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm14, 32(%r11)
-	vmovaps		%ymm5, 32(%r11, %r12, 1)
-	vmaskmovps	%ymm9, %ymm15, 32(%r11, %r12, 2)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmovaps		64(%r11), %ymm12
-	vblendps	$0x03, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm14, 64(%r11)
-	vmovaps		%ymm6, 64(%r11, %r12, 1)
-	vmaskmovps	%ymm10, %ymm15, 64(%r11, %r12, 2)
-	je			7f // end
-	vmovaps		96(%r11), %ymm12
-	vblendps	$0x07, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm14, 96(%r11)
-	vmovaps		%ymm7, 96(%r11, %r12, 1)
-	vmaskmovps	%ymm11, %ymm15, 96(%r11, %r12, 2)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_24x4_gen_lib8, .-inner_store_l_24x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                                1      2             3         4        5         6            7         8        9         10
-// void kernel_sgemm_nt_24x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_24x4_lib8
-	.type kernel_sgemm_nt_24x4_lib8, @function
-kernel_sgemm_nt_24x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_24x4_lib8
-_kernel_sgemm_nt_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_24x4_lib8
-	.def kernel_sgemm_nt_24x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_24x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_24x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_24x4_lib8, .-kernel_sgemm_nt_24x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5         6            7         8        9         10       11      12
-// void kernel_sgemm_nt_24x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_24x4_vs_lib8
-	.type kernel_sgemm_nt_24x4_vs_lib8, @function
-kernel_sgemm_nt_24x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_24x4_vs_lib8
-_kernel_sgemm_nt_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_24x4_vs_lib8
-	.def kernel_sgemm_nt_24x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_24x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_24x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_24x4_vs_lib8, .-kernel_sgemm_nt_24x4_vs_lib8
-#endif
-
-
-
-
-
-//                                    rdi    rsi           rdx       rcx      r8        r9           rsp+8        rsp+16    rsp+24   rsp+32       rsp+40    rsp+48   rsp+56  rsp+64  rsp+72  rsp+80
-// void kernel_sgemm_nt_24x4_gen_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_24x4_gen_lib8
-	.type kernel_sgemm_nt_24x4_gen_lib8, @function
-kernel_sgemm_nt_24x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_24x4_gen_lib8
-_kernel_sgemm_nt_24x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_24x4_gen_lib8
-	.def kernel_sgemm_nt_24x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_24x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12 // offsetC
-	movq	ARG8, %r13 // C
-	movq	ARG9, %r14 // sdc
-	sall	$5, %r14d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_24X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_24x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_24x4_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG10, %r10 // offsetD
-	movq	ARG11, %r11 // D
-	movq	ARG12, %r12 // sdd
-	sall	$5, %r12d // 8*sdb*sizeof(float)
-	movq	ARG13, %r13 // m0
-	movq	ARG14, %r14 // m1
-	movq	ARG15, %r15 // n0
-	movq	ARG16, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_24x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_24x4_gen_lib8, .-kernel_sgemm_nt_24x4_gen_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5            6         7        8            9         10       11        12
-// void kernel_sgemm_nn_24x4_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_24x4_lib8
-	.type kernel_sgemm_nn_24x4_lib8, @function
-kernel_sgemm_nn_24x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_24x4_lib8
-_kernel_sgemm_nn_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_24x4_lib8
-	.def kernel_sgemm_nn_24x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_24x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12   // C
-	movq	ARG10, %r13   // sdc
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_24x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_24x4_lib8, .-kernel_sgemm_nn_24x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5            6         7        8            9         10       11        12       13      14
-// void kernel_sgemm_nn_24x4_vs_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_24x4_vs_lib8
-	.type kernel_sgemm_nn_24x4_vs_lib8, @function
-kernel_sgemm_nn_24x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_24x4_vs_lib8
-_kernel_sgemm_nn_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_24x4_vs_lib8
-	.def kernel_sgemm_nn_24x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_24x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12   // C
-	movq	ARG10, %r13   // sdc
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_24x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG13, %r12 // km
-	movq	ARG14, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_24x4_vs_lib8, .-kernel_sgemm_nn_24x4_vs_lib8
-#endif
-
-
-
-
-
-//                                    rdi    rsi           rdx       rcx      r8        r9        rsp+8    rsp+16       rsp+24    rsp+32    rsp+40   rsp+48    rsp+56    rsp+64   rsp+72  rsp+80  rsp+88  rsp+96
-// void kernel_sgemm_nn_24x4_gen_lib4(int k, float *alpha, float *A, int sda, int offB, float *B, int sdb, float *beta, int offC, float *C, int sdc, int offD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_24x4_gen_lib8
-	.type kernel_sgemm_nn_24x4_gen_lib8, @function
-kernel_sgemm_nn_24x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_24x4_gen_lib8
-_kernel_sgemm_nn_24x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_24x4_gen_lib8
-	.def kernel_sgemm_nn_24x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_24x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG6, %r13  // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG8, %r11 // beta
-	movq	ARG9, %r12 // offsetC
-	movq	ARG10, %r13 // C
-	movq	ARG11, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_24X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_24x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_24x4_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG12, %r10 // offsetD
-	movq	ARG13, %r11 // D
-	movq	ARG14, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG15, %r13 // m0
-	movq	ARG16, %r14 // m1
-	movq	ARG17, %r15 // n0
-	movq	ARG18, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_24x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_24x4_gen_lib8, .-kernel_sgemm_nn_24x4_gen_lib8
-#endif
-
-
-
-
-
-//                                       rdi    rsi       rdx      rcx       r8        r9       rsp+8     rsp+16   rsp+24    rsp+32 
-// void kernel_strsm_nt_rl_inv_24x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_24x4_lib8
-	.type kernel_strsm_nt_rl_inv_24x4_lib8, @function
-kernel_strsm_nt_rl_inv_24x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_24x4_lib8
-_kernel_strsm_nt_rl_inv_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_24x4_lib8
-	.def kernel_strsm_nt_rl_inv_24x4_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_24x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movl	$4, %r12d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_24x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_24x4_lib8, .-kernel_strsm_nt_rl_inv_24x4_lib8
-#endif
-
-
-
-
-
-//                                          rdi    rsi       rdx      rcx       r8        r9       rsp+8     rsp+16   rsp+24    rsp+32             rsp+40  rsp+48
-// void kernel_strsm_nt_rl_inv_24x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_24x4_vs_lib8
-	.type kernel_strsm_nt_rl_inv_24x4_vs_lib8, @function
-kernel_strsm_nt_rl_inv_24x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_24x4_vs_lib8
-_kernel_strsm_nt_rl_inv_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_24x4_vs_lib8
-	.def kernel_strsm_nt_rl_inv_24x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_24x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-	sall	$5, %r12d // 4*sda*sizeof(double)
-	movq	ARG4, %r13
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_24x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG11, %r12 // m1 
-	movq	ARG12, %r13 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_24x4_vs_lib8, .-kernel_strsm_nt_rl_inv_24x4_vs_lib8
-#endif
-
-
-
-
-
-//                                             1       2          3         4          5       6          7         8          9         10       11        12       13        14
-// void kernel_sgemm_strsm_nt_rl_inv_24x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_24x4_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_24x4_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_24x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_24x4_lib8
-_kernel_sgemm_strsm_nt_rl_inv_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_24x4_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_24x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_24x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-	movl	$4, %r12d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_24x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_24x4_lib8, .-kernel_sgemm_strsm_nt_rl_inv_24x4_lib8
-#endif
-
-
-
-
-
-//                                                1       2          3         4          5       6          7         8          9         10       11        12       13        14                 15      16
-// void kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8
-_kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sda*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10  // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG13, %r10  // E 
-	movq	ARG14, %r11  // inv_diag_E 
-	movq	ARG16, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_24x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG15, %r12 // km 
-	movq	ARG16, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8, .-kernel_sgemm_strsm_nt_rl_inv_24x4_vs_lib8
-#endif
-
-
-
-
-
-//                                   1      2         3        4         5         6        7         8        9
-// void kernel_spotrf_nt_l_20x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_20x4_lib8
-	.type kernel_spotrf_nt_l_20x4_lib8, @function
-kernel_spotrf_nt_l_20x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_20x4_lib8
-_kernel_spotrf_nt_l_20x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_20x4_lib8
-	.def kernel_spotrf_nt_l_20x4_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_20x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_20X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_20x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_20x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_20X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_20x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_20x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_20x4_lib8, .-kernel_spotrf_nt_l_20x4_lib8
-#endif
-
-
-
-
-
-//                                      1      2         3        4         5         6        7         8        9                  10      11
-// void kernel_spotrf_nt_l_20x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_20x4_vs_lib8
-	.type kernel_spotrf_nt_l_20x4_vs_lib8, @function
-kernel_spotrf_nt_l_20x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_20x4_vs_lib8
-_kernel_spotrf_nt_l_20x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_20x4_vs_lib8
-	.def kernel_spotrf_nt_l_20x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_20x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_20X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_20x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_20x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12 // m1 
-	movq	ARG11, %r13 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_20X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_20x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_20x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_20x4_lib8, .-kernel_spotrf_nt_l_20x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2         3        4         5         6        7         8        9
-// void kernel_spotrf_nt_l_24x4_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_24x4_lib8
-	.type kernel_spotrf_nt_l_24x4_lib8, @function
-kernel_spotrf_nt_l_24x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_24x4_lib8
-_kernel_spotrf_nt_l_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_24x4_lib8
-	.def kernel_spotrf_nt_l_24x4_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_24x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_24x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_24x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_24x4_lib8, .-kernel_spotrf_nt_l_24x4_lib8
-#endif
-
-
-
-
-
-//                                      1      2         3        4         5         6        7         8        9                  10      11
-// void kernel_spotrf_nt_l_24x4_vs_lib8(int k, float *A, int sda, float *B, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_24x4_vs_lib8
-	.type kernel_spotrf_nt_l_24x4_vs_lib8, @function
-kernel_spotrf_nt_l_24x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_24x4_vs_lib8
-_kernel_spotrf_nt_l_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_24x4_vs_lib8
-	.def kernel_spotrf_nt_l_24x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_24x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG2, %r11 // A
-	movq	ARG3, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG4, %r13 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG5, %r10 // C
-	movq	ARG6, %r11 // sdc
-	sall	$5, %r11d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_24x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12 // m1 
-	movq	ARG11, %r13 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_24x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_24x4_lib8, .-kernel_spotrf_nt_l_24x4_lib8
-#endif
-
-
-
-
-
-//                                        1        2          3         4          5       6          7         8          9         10       11        12       13
-// void kernel_ssyrk_spotrf_nt_l_20x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_20x4_lib8
-	.type kernel_ssyrk_spotrf_nt_l_20x4_lib8, @function
-kernel_ssyrk_spotrf_nt_l_20x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_20x4_lib8
-_kernel_ssyrk_spotrf_nt_l_20x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_20x4_lib8
-	.def kernel_ssyrk_spotrf_nt_l_20x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_20x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_20X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_20x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_20x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_20X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_20x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_20x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_20x4_lib8, .-kernel_ssyrk_spotrf_nt_l_20x4_lib8
-#endif
-
-
-
-
-
-//                                            1        2          3         4          5       6          7         8          9         10       11        12       13                14      15
-// void kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8
-	.type kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8, @function
-kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8
-_kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8
-	.def kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movq	ARG15, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_20X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_20x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_20x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG14, %r12 // km 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_20X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_20x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_20x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8, .-kernel_ssyrk_spotrf_nt_l_20x4_vs_lib8
-#endif
-
-
-
-
-
-//                                        1        2          3         4          5       6          7         8          9         10       11        12       13
-// void kernel_ssyrk_spotrf_nt_l_24x4_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_24x4_lib8
-	.type kernel_ssyrk_spotrf_nt_l_24x4_lib8, @function
-kernel_ssyrk_spotrf_nt_l_24x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_24x4_lib8
-_kernel_ssyrk_spotrf_nt_l_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_24x4_lib8
-	.def kernel_ssyrk_spotrf_nt_l_24x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_24x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_24x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_24x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_24x4_lib8, .-kernel_ssyrk_spotrf_nt_l_24x4_lib8
-#endif
-
-
-
-
-
-//                                            1        2          3         4          5       6          7         8          9         10       11        12       13                14      15
-// void kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8(int kp, float *Ap, int sdap, float *Bp, int km, float *Am, int sdam, float *Bm, float *C, int sdc, float *D, int sdd, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8
-	.type kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8, @function
-kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8
-_kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8
-	.def kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-	vmovapd	%ymm0, %ymm4
-	vmovapd	%ymm0, %ymm5
-	vmovapd	%ymm0, %ymm6
-	vmovapd	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12 // sdap
-	sall	$5, %r12d   // 4*sdap*sizeof(double)
-	movq	ARG4, %r13  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG5, %r10                 // km
-	movq	ARG6, %r11                   // Am
-	movq	ARG7, %r12 // sdam
-	sall	$5, %r12d                   // 4*sdam*sizeof(double)
-	movq	ARG8, %r13  // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner blender nn
-
-	movq	ARG9, %r10 // C
-	movq	ARG10, %r11 // sdc
-	sall	$5, %r11d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_24x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG13, %r10  // inv_diag_D 
-	movq	ARG15, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_24x4_vs_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG11, %r10 // store address D
-	movq	ARG12, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-	movq	ARG14, %r12 // km 
-	movq	ARG15, %r13 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_24x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-	
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8, .-kernel_ssyrk_spotrf_nt_l_24x4_vs_lib8
-#endif
-
-
-
-
-
-//                                1      2             3         4        5         6            7         8        9         10
-// void kernel_ssyrk_nt_l_24x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_24x4_lib8
-	.type kernel_ssyrk_nt_l_24x4_lib8, @function
-kernel_ssyrk_nt_l_24x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_24x4_lib8
-_kernel_ssyrk_nt_l_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_24x4_lib8
-	.def kernel_ssyrk_nt_l_24x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_24x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_24x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_24x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_24x4_lib8, .-kernel_ssyrk_nt_l_24x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5         6            7         8        9         10       11      12
-// void kernel_ssyrk_nt_l_24x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_24x4_vs_lib8
-	.type kernel_ssyrk_nt_l_24x4_vs_lib8, @function
-kernel_ssyrk_nt_l_24x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_24x4_vs_lib8
-_kernel_ssyrk_nt_l_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_24x4_vs_lib8
-	.def kernel_ssyrk_nt_l_24x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_24x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_24x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_24x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_24x4_vs_lib8, .-kernel_ssyrk_nt_l_24x4_vs_lib8
-#endif
-
-
-
-
-
-//                                1      2             3         4        5         6            7         8        9         10
-// void kernel_ssyrk_nt_l_20x4_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_20x4_lib8
-	.type kernel_ssyrk_nt_l_20x4_lib8, @function
-kernel_ssyrk_nt_l_20x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_20x4_lib8
-_kernel_ssyrk_nt_l_20x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_20x4_lib8
-	.def kernel_ssyrk_nt_l_20x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_20x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_24x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_20X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_20x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_20x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_20x4_lib8, .-kernel_ssyrk_nt_l_20x4_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5         6            7         8        9         10       11      12
-// void kernel_ssyrk_nt_l_20x4_vs_lib8(int k, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_20x4_vs_lib8
-	.type kernel_ssyrk_nt_l_20x4_vs_lib8, @function
-kernel_ssyrk_nt_l_20x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_20x4_vs_lib8
-_kernel_ssyrk_nt_l_20x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_20x4_vs_lib8
-	.def kernel_ssyrk_nt_l_20x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_20x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12 // sda
-	sall	$5, %r12d // 8*sda*sizeof(float)
-	movq	ARG5, %r13  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_kernel_gemm_add_nt_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_24x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	%rsi, %r10 // alpha
-	movq	ARG6, %r11 // beta
-	movq	ARG7, %r12   // C
-	movl	ARG8, %r13d // sdc
-	sall	$5, %r13d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_24X4_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_scale_ab_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_24x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movl	ARG10, %r11d // sdd
-	sall	$5, %r11d // 8*sdd*sizeof(float)
-	movq	ARG11, %r12 // km
-	movq	ARG12, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_20X4_VS_LIB8
-#else
-#if defined(OS_LINUX)
-	call inner_store_l_20x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_20x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_20x4_vs_lib8, .-kernel_ssyrk_nt_l_20x4_vs_lib8
-#endif
-
-
-
-
-
-//                                   1      2             3         4        5            6         7        8         9
-// void kernel_strmm_nn_rl_24x4_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *D, int sdd);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_24x4_lib8
-	.type kernel_strmm_nn_rl_24x4_lib8, @function
-kernel_strmm_nn_rl_24x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_24x4_lib8
-_kernel_strmm_nn_rl_24x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_24x4_lib8
-	.def kernel_strmm_nn_rl_24x4_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_24x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_24x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_24x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_24x4_lib8, .-kernel_strmm_nn_rl_24x4_lib8
-#endif
-
-
-
-
-
-//                                      1      2             3         4        5            6         7        8         9        10      11
-// void kernel_strmm_nn_rl_24x4_vs_lib8(int k, float *alpha, float *A, int sda, int offsetB, float *B, int sdb, float *D, int sdd, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_24x4_vs_lib8
-	.type kernel_strmm_nn_rl_24x4_vs_lib8, @function
-kernel_strmm_nn_rl_24x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_24x4_vs_lib8
-_kernel_strmm_nn_rl_24x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_24x4_vs_lib8
-	.def kernel_strmm_nn_rl_24x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_24x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-	vmovaps	%ymm0, %ymm8
-	vmovaps	%ymm0, %ymm9
-	vmovaps	%ymm0, %ymm10
-	vmovaps	%ymm0, %ymm11
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // sdb
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG6, %r13 // B
-	movq	ARG7, %r14 // sdb
-	sall	$5, %r14d // 4*sdb*sizeof(double)
-	movq	ARG5, %r15 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_24x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_24x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_24X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_24x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_24x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG8, %r10 // D
-	movq	ARG9, %r11 // sdd
-	sall	$5, %r11d // 4*sdd*sizeof(double)
-	movq	ARG10, %r12 // km
-	movq	ARG11, %r13 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_24X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_24x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_24x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_24x4_vs_lib8, .-kernel_strmm_nn_rl_24x4_vs_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#endif
-
-#if defined(OS_LINUX)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC04: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_sgemm_8x4_lib8.S b/third_party/blasfeo/kernel/avx2/kernel_sgemm_8x4_lib8.S
deleted file mode 100644
index 44946f1..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_sgemm_8x4_lib8.S
+++ /dev/null
@@ -1,7342 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nt_8x4_lib8, @function
-inner_kernel_gemm_add_nt_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nt_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nt_8x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nt_8x4_lib8:
-#endif
-#endif
-	
-// broadcast scheme
-#if 1
-
-	cmpl	$0, %r10d
-	jle		5f // return
-
-	// preload
-	vmovaps 		0(%r11), %ymm13 // A
-
-	vxorps			%ymm4, %ymm4, %ymm4
-	vmovaps			%ymm4, %ymm5
-	vmovaps			%ymm4, %ymm6
-	vmovaps			%ymm4, %ymm7
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vbroadcastss	0(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vmovaps			32(%r11), %ymm14 // A
-	vbroadcastss	4(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vbroadcastss	8(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vbroadcastss	12(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastss	32(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovaps			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vbroadcastss	40(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	44(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastss	64(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vmovaps			-32(%r11), %ymm14 // A
-	vbroadcastss	68(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vbroadcastss	72(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vbroadcastss	76(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	addq	$128, %r12
-
-	// unroll 0
-	vbroadcastss	-32(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovaps			0(%r11), %ymm13 // A
-	vbroadcastss	-28(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vbroadcastss	-24(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	-20(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vbroadcastss	0(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vmovaps			32(%r11), %ymm14 // a
-	vbroadcastss	4(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vbroadcastss	8(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vbroadcastss	12(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastss	32(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-	vmovaps			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vbroadcastss	40(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	44(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastss	64(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vmovaps			-32(%r11), %ymm14 // A
-	vbroadcastss	68(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vbroadcastss	72(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vbroadcastss	76(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	addq	$128, %r12
-
-	// unroll 0
-	vbroadcastss	-32(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm4
-//	vmovaps			0(%r11), %ymm13 // A
-	vbroadcastss	-28(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm5
-	vbroadcastss	-24(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm6
-	vbroadcastss	-20(%r12), %ymm12 // B
-	vfmadd231ps		%ymm14, %ymm12, %ymm7
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm13 // a
-	vbroadcastss	0(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vbroadcastss	4(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	subl	$1, %r10d
-	vbroadcastss	8(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	addq	$32, %r11
-	vbroadcastss	12(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	addq	$32, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // reduce
-
-	vaddps			%ymm4, %ymm0, %ymm0
-	vaddps			%ymm5, %ymm1, %ymm1
-	vaddps			%ymm6, %ymm2, %ymm2
-	vaddps			%ymm7, %ymm3, %ymm3
-
-5: // return
-
-// shuffle scheme
-#else
-
-	cmpl	$0, %r10d
-	jle		5f // return
-
-	// preload
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vbroadcastf128	32(%r12), %ymm15 // B
-	vmovaps			32(%r11), %ymm13 // A
-
-	vxorps			%ymm4, %ymm4, %ymm4
-	vmovaps			%ymm4, %ymm5
-	vmovaps			%ymm4, %ymm6
-	vmovaps			%ymm4, %ymm7
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vfmadd231ps		%ymm12, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14 // 01 00 11 10
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm3
-	vbroadcastf128	64(%r12), %ymm14 // B
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vbroadcastf128	96(%r12), %ymm15 // B
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vfmadd231ps		%ymm12, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm3
-	vbroadcastf128	128(%r12), %ymm14 // B
-	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vbroadcastf128	32(%r12), %ymm15 // B
-	vmovaps			32(%r11), %ymm13 // A
-
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vfmadd231ps		%ymm12, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm3
-	vbroadcastf128	64(%r12), %ymm14 // B
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vbroadcastf128	96(%r12), %ymm15 // B
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vfmadd231ps		%ymm12, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm3
-//	vbroadcastf128	128(%r12), %ymm14 // B
-//	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-//	vbroadcastf128	32(%r12), %ymm15 // B
-//	vmovaps			32(%r11), %ymm13 // A
-
-
-//	cmpl	$4, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm0, %ymm0
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm1, %ymm1
-
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm2, %ymm2
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$32, %r12
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vmulps			%ymm12, %ymm14, %ymm11
-	vaddps			%ymm11, %ymm3, %ymm3
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // reduce
-
-	vaddps			%ymm4, %ymm0, %ymm0
-	vaddps			%ymm5, %ymm1, %ymm1
-	vaddps			%ymm6, %ymm2, %ymm2
-	vaddps			%ymm7, %ymm3, %ymm3
-
-5: // return
-
-#endif
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nt_8x4_lib8, .-inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d10 d20 d30 d40 d50 d60 d70]
-// ymm1  <- [d01 d11 d21 d31 d41 d51 d61 d71]
-// ymm2  <- [d02 d12 d22 d32 d42 d52 d62 d72]
-// ymm3  <- [d03 d13 d23 d33 d43 d53 d63 d73]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_sub_nt_8x4_lib8, @function
-inner_kernel_gemm_sub_nt_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_sub_nt_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_sub_nt_8x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_sub_nt_8x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		5f // return
-
-	// preload
-	vmovaps 		0(%r11), %ymm13 // A
-
-	vxorps			%ymm4, %ymm4, %ymm4
-	vmovaps			%ymm4, %ymm5
-	vmovaps			%ymm4, %ymm6
-	vmovaps			%ymm4, %ymm7
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vbroadcastss	0(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vmovaps			32(%r11), %ymm14 // A
-	vbroadcastss	4(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vbroadcastss	8(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vbroadcastss	12(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastss	32(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vmovaps			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vbroadcastss	40(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vbroadcastss	44(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastss	64(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vmovaps			-32(%r11), %ymm14 // A
-	vbroadcastss	68(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vbroadcastss	72(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vbroadcastss	76(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	addq	$128, %r12
-
-	// unroll 0
-	vbroadcastss	-32(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vmovaps			0(%r11), %ymm13 // A
-	vbroadcastss	-28(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vbroadcastss	-24(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vbroadcastss	-20(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vbroadcastss	0(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vmovaps			32(%r11), %ymm14 // a
-	vbroadcastss	4(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vbroadcastss	8(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vbroadcastss	12(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	subl	$4, %r10d
-
-	// unroll 0
-	vbroadcastss	32(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-	vmovaps			64(%r11), %ymm13 // A
-	vbroadcastss	36(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vbroadcastss	40(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vbroadcastss	44(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vbroadcastss	64(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vmovaps			-32(%r11), %ymm14 // A
-	vbroadcastss	68(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vbroadcastss	72(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vbroadcastss	76(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	addq	$128, %r12
-
-	// unroll 0
-	vbroadcastss	-32(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm4
-//	vmovaps			0(%r11), %ymm13 // A
-	vbroadcastss	-28(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm5
-	vbroadcastss	-24(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm6
-	vbroadcastss	-20(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm14, %ymm12, %ymm7
-
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm13 // a
-	vbroadcastss	0(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vbroadcastss	4(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	subl	$1, %r10d
-	vbroadcastss	8(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	addq	$32, %r11
-	vbroadcastss	12(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	addq	$32, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // reduce
-
-	vaddps			%ymm4, %ymm0, %ymm0
-	vaddps			%ymm5, %ymm1, %ymm1
-	vaddps			%ymm6, %ymm2, %ymm2
-	vaddps			%ymm7, %ymm3, %ymm3
-
-5: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_sub_nt_8x4_lib8, .-inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nn_8x4_lib8, @function
-inner_kernel_gemm_add_nn_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nn_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nn_8x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nn_8x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$8, %r10d
-	jl		0f // consider clean-up loop
-
-	vxorps			%ymm4, %ymm4, %ymm4
-	vmovaps			%ymm4, %ymm5
-	vmovaps			%ymm4, %ymm6
-	vmovaps			%ymm4, %ymm7
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-//	prefetcht0	0(%r12, %r13, 1) // software prefetch
-//	prefetcht0	64(%r12, %r13, 1) // software prefetch
-
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-
-	// unroll 1
-	vmovaps			32(%r11), %ymm12 // A[0]
-	vbroadcastss	4(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	36(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	68(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	100(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-
-	// unroll 2
-	vmovaps			64(%r11), %ymm12 // A[0]
-	vbroadcastss	8(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	40(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	72(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	104(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-
-	// unroll 3
-	vmovaps			96(%r11), %ymm12 // A[0]
-	vbroadcastss	12(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	44(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	76(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	108(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-
-	// unroll 4
-	vmovaps			128(%r11), %ymm12 // A[0]
-	vbroadcastss	16(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	48(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	80(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	112(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-
-	// unroll 5
-	vmovaps			160(%r11), %ymm12 // A[0]
-	vbroadcastss	20(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	52(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	84(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	116(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-	subl	$8, %r10d
-
-	// unroll 6
-	vmovaps			192(%r11), %ymm12 // A[0]
-	vbroadcastss	24(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	56(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	88(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	120(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	addq	$256, %r11
-
-	// unroll 7
-	vmovaps			-32(%r11), %ymm12 // A[0]
-	vbroadcastss	28(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	60(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	92(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	124(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-	addq	%r12, %r13
-
-	cmpl	$7, %r10d
-	jg		1b // main loop 
-
-	vaddps			%ymm4, %ymm0, %ymm0
-	vaddps			%ymm5, %ymm1, %ymm1
-	vaddps			%ymm6, %ymm2, %ymm2
-	vaddps			%ymm7, %ymm3, %ymm3
-
-
-0: // consider clean1-up loop
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean1-up loop
-	
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$4, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nn_8x4_lib8, .-inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_SUB_NN_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_sub_nn_8x4_lib8, @function
-inner_kernel_gemm_sub_nn_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_sub_nn_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_sub_nn_8x4_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_sub_nn_8x4_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$8, %r10d
-	jl		0f // consider clean-up loop
-
-	vxorps			%ymm4, %ymm4, %ymm4
-	vmovaps			%ymm4, %ymm5
-	vmovaps			%ymm4, %ymm6
-	vmovaps			%ymm4, %ymm7
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	prefetcht0	0(%r12, %r13, 1) // software prefetch
-	prefetcht0	64(%r12, %r13, 1) // software prefetch
-
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm3
-
-	// unroll 1
-	vmovaps			32(%r11), %ymm12 // A[0]
-	vbroadcastss	4(%r12), %ymm13 // B[0]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm4
-	vbroadcastss	36(%r12), %ymm13 // B[1]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm5
-	vbroadcastss	68(%r12), %ymm13 // B[2]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm6
-	vbroadcastss	100(%r12), %ymm13 // B[3]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm7
-
-	// unroll 2
-	vmovaps			64(%r11), %ymm12 // A[0]
-	vbroadcastss	8(%r12), %ymm13 // B[0]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm0
-	vbroadcastss	40(%r12), %ymm13 // B[1]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm1
-	vbroadcastss	72(%r12), %ymm13 // B[2]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm2
-	vbroadcastss	104(%r12), %ymm13 // B[3]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm3
-
-	// unroll 3
-	vmovaps			96(%r11), %ymm12 // A[0]
-	vbroadcastss	12(%r12), %ymm13 // B[0]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm4
-	vbroadcastss	44(%r12), %ymm13 // B[1]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm5
-	vbroadcastss	76(%r12), %ymm13 // B[2]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm6
-	vbroadcastss	108(%r12), %ymm13 // B[3]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm7
-
-	// unroll 4
-	vmovaps			128(%r11), %ymm12 // A[0]
-	vbroadcastss	16(%r12), %ymm13 // B[0]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm0
-	vbroadcastss	48(%r12), %ymm13 // B[1]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm1
-	vbroadcastss	80(%r12), %ymm13 // B[2]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm2
-	vbroadcastss	112(%r12), %ymm13 // B[3]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm3
-
-	// unroll 5
-	vmovaps			160(%r11), %ymm12 // A[0]
-	vbroadcastss	20(%r12), %ymm13 // B[0]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm4
-	vbroadcastss	52(%r12), %ymm13 // B[1]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm5
-	vbroadcastss	84(%r12), %ymm13 // B[2]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm6
-	vbroadcastss	116(%r12), %ymm13 // B[3]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm7
-	subl	$8, %r10d
-
-	// unroll 6
-	vmovaps			192(%r11), %ymm12 // A[0]
-	vbroadcastss	24(%r12), %ymm13 // B[0]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm0
-	vbroadcastss	56(%r12), %ymm13 // B[1]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm1
-	vbroadcastss	88(%r12), %ymm13 // B[2]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm2
-	vbroadcastss	120(%r12), %ymm13 // B[3]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm3
-	addq	$256, %r11
-
-	// unroll 7
-	vmovaps			-32(%r11), %ymm12 // A[0]
-	vbroadcastss	28(%r12), %ymm13 // B[0]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm4
-	vbroadcastss	60(%r12), %ymm13 // B[1]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm5
-	vbroadcastss	92(%r12), %ymm13 // B[2]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm6
-	vbroadcastss	124(%r12), %ymm13 // B[3]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm7
-	addq	%r12, %r13
-
-	cmpl	$7, %r10d
-	jg		1b // main loop 
-
-	vaddps			%ymm4, %ymm0, %ymm0
-	vaddps			%ymm5, %ymm1, %ymm1
-	vaddps			%ymm6, %ymm2, %ymm2
-	vaddps			%ymm7, %ymm3, %ymm3
-
-
-0: // consider clean1-up loop
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean1-up loop
-	
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vfnmadd231ps	%ymm12, %ymm13, %ymm3
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$4, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_sub_nn_8x4_lib8, .-inner_kernel_gemm_sub_nn_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_gemm_add_nn_8x4_lib8, @function
-inner_edge_gemm_add_nn_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_gemm_add_nn_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_gemm_add_nn_8x4_lib8; .scl 2; .type 32; .endef
-inner_edge_gemm_add_nn_8x4_lib8:
-#endif
-#endif
-	
-	cmpl			$0, %r14d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$8, %r15d
-	subl			%r14d, %r15d // 8-offsetB
-	cmpl			%r10d, %r15d
-//	jle				0f
-//	movl			%r10d, %r15d // kend=min(k,8-offsetB)
-//0:
-	cmovgl			%r10d, %r15d // kend=min(k,8-offsetB)
-
-	movl			%r14d, %eax
-	sall			$2, %eax // offsetB*sizeof(float)
-	addq			%rax, %r12 // B+offsetB*sizeof(float)
-
-1:
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-	vbroadcastss	96(%r12), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm3, %ymm3
-
-	subl			$1, %r10d // k-1
-	subl			$1, %r15d // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$4, %r12 // B+1*sizeof(float)
-
-	cmpl			$0, %r15d
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r13, %r12
-	subq			$32, %r12 // B+bs*(sdb-1)*sizeof(float)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_gemm_add_nn_8x4_lib8, .-inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- [d00 d10 d20 d30]
-// ymm1  <- [d01 d11 d21 d31]
-// ymm2  <- [d02 d12 d22 d32]
-// ymm3  <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRMM_NN_RL_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trmm_nn_rl_8x4_lib8, @function
-inner_edge_trmm_nn_rl_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trmm_nn_rl_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trmm_nn_rl_8x4_lib8; .scl 2; .type 32; .endef
-inner_edge_trmm_nn_rl_8x4_lib8:
-#endif
-#endif
-	
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	movl		%r14d, %eax
-	sall		$2, %eax // offsetB*sizeof(float)
-	movq		%r12, %rbx // B
-	addq		%rax, %rbx // B+offsetB*sizeof(float)
-
-
-	cmpl	$4, %r14d
-	jg		1f
-
-	// offB==0, 1, 2, 3, 4
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	8(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	40(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	72(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	jmp			0f // end
-
-
-1:
-	cmpl	$5, %r14d
-	jg		1f
-
-	// offB==5
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	8(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	40(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	72(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r13, %r12 // B+8*sdb*sizeof(float)
-	movl		$0, %r14d // offsetB=0
-
-	jmp			0f // end
-
-
-1:
-	cmpl	$6, %r14d
-	jg		1f
-
-	// offB==6
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r13, %r12 // B+8*sdb*sizeof(float)
-	movq		%r12, %rbx // B
-	movl		$0, %r14d // offsetB=0
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	64(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	jmp			0f // end
-
-
-1:
-//	cmpl	$7, %r14d
-//	jg		0f
-
-	// offB==6
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addq		%r13, %r12 // B+8*sdb*sizeof(float)
-	movq		%r12, %rbx // B
-	movl		$0, %r14d // offsetB=0
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	0(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	32(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-	cmpl		$0, %r10d
-	jle			0f // end
-
-	vmovaps			0(%r11), %ymm8
-	vbroadcastss	4(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm0, %ymm0
-	vbroadcastss	36(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm1, %ymm1
-	vbroadcastss	68(%rbx), %ymm12
-	vmulps			%ymm8, %ymm12, %ymm15
-	vaddps			%ymm15, %ymm2, %ymm2
-
-	subl		$1, %r10d // k-1
-	addq		$32, %r11 // A+1*bs*sizeof(float)
-	addl		$1, %r14d // offsetB+1
-
-//	jmp			0f // end
-
-
-	// end
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trmm_nn_rl_8x4_lib8, .-inner_edge_trmm_nn_rl_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// strsm
-// right
-// lower
-// transposed
-// not-unit
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSM_RLT_INV_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsm_rlt_inv_8x4_lib8, @function
-inner_edge_trsm_rlt_inv_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsm_rlt_inv_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsm_rlt_inv_8x4_lib8; .scl 2; .type 32; .endef
-inner_edge_trsm_rlt_inv_8x4_lib8:
-#endif
-#endif
-
-	vbroadcastss	0(%r11), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vbroadcastss	4(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm1, %ymm1
-	vbroadcastss	8(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vbroadcastss	12(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	4(%r11), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vbroadcastss	40(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vbroadcastss	44(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	8(%r11), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vbroadcastss	76(%r10), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	12(%r11), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsm_rlt_inv_8x4_lib8, .-inner_edge_trsm_rlt_inv_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// strsm
-// right
-// lower
-// transposed
-// not-unit
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSM_RLT_INV_8X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsm_rlt_inv_8x4_vs_lib8, @function
-inner_edge_trsm_rlt_inv_8x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsm_rlt_inv_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsm_rlt_inv_8x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsm_rlt_inv_8x4_vs_lib8:
-#endif
-#endif
-
-	vbroadcastss	0(%r11), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	cmpl			$2, %r12d
-	jl				0f // ret
-	vbroadcastss	4(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm1, %ymm1
-	vbroadcastss	8(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vbroadcastss	12(%r10), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	4(%r11), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	cmpl			$3, %r12d
-	jl				0f // ret
-	vbroadcastss	40(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm2, %ymm2
-	vbroadcastss	44(%r10), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	8(%r11), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	cmpl			$4, %r12d
-	jl				0f // ret
-	vbroadcastss	76(%r10), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm12
-	vsubps			%ymm12, %ymm3, %ymm3
-
-	vbroadcastss	12(%r11), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsm_rlt_inv_8x4_vs_lib8, .-inner_edge_trsm_rlt_inv_8x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// strsm
-// right
-// lower
-// transposed
-// not-unit
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSM_RLT_INV_4X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsm_rlt_inv_4x8_vs_lib8, @function
-inner_edge_trsm_rlt_inv_4x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsm_rlt_inv_4x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsm_rlt_inv_4x8_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsm_rlt_inv_4x8_vs_lib8:
-#endif
-#endif
-
-	vbroadcastss	0(%r11), %xmm13
-	vmulps			%xmm0, %xmm13, %xmm0
-	vbroadcastss	4(%r10), %xmm13
-	vfnmadd231ps	%xmm0, %xmm13, %xmm1
-	vbroadcastss	8(%r10), %xmm13
-	vfnmadd231ps	%xmm0, %xmm13, %xmm2
-	vbroadcastss	12(%r10), %xmm13
-	vfnmadd231ps	%xmm0, %xmm13, %xmm3
-	vbroadcastss	16(%r10), %xmm13
-	vfnmadd231ps	%xmm0, %xmm13, %xmm4
-	vbroadcastss	20(%r10), %xmm13
-	vfnmadd231ps	%xmm0, %xmm13, %xmm5
-	vbroadcastss	24(%r10), %xmm13
-	vfnmadd231ps	%xmm0, %xmm13, %xmm6
-	vbroadcastss	28(%r10), %xmm13
-	vfnmadd231ps	%xmm0, %xmm13, %xmm7
-
-	vbroadcastss	4(%r11), %xmm13
-	vmulps			%xmm1, %xmm13, %xmm1
-	vbroadcastss	40(%r10), %xmm13
-	vfnmadd231ps	%xmm1, %xmm13, %xmm2
-	vbroadcastss	44(%r10), %xmm13
-	vfnmadd231ps	%xmm1, %xmm13, %xmm3
-	vbroadcastss	48(%r10), %xmm13
-	vfnmadd231ps	%xmm1, %xmm13, %xmm4
-	vbroadcastss	52(%r10), %xmm13
-	vfnmadd231ps	%xmm1, %xmm13, %xmm5
-	vbroadcastss	56(%r10), %xmm13
-	vfnmadd231ps	%xmm1, %xmm13, %xmm6
-	vbroadcastss	60(%r10), %xmm13
-	vfnmadd231ps	%xmm1, %xmm13, %xmm7
-
-	vbroadcastss	8(%r11), %xmm13
-	vmulps			%xmm2, %xmm13, %xmm2
-	vbroadcastss	76(%r10), %xmm13
-	vfnmadd231ps	%xmm2, %xmm13, %xmm3
-	vbroadcastss	80(%r10), %xmm13
-	vfnmadd231ps	%xmm2, %xmm13, %xmm4
-	vbroadcastss	84(%r10), %xmm13
-	vfnmadd231ps	%xmm2, %xmm13, %xmm5
-	vbroadcastss	88(%r10), %xmm13
-	vfnmadd231ps	%xmm2, %xmm13, %xmm6
-	vbroadcastss	92(%r10), %xmm13
-	vfnmadd231ps	%xmm2, %xmm13, %xmm7
-
-	vbroadcastss	12(%r11), %xmm13
-	vmulps			%xmm3, %xmm13, %xmm3
-	vbroadcastss	112(%r10), %xmm13
-	vfnmadd231ps	%xmm3, %xmm13, %xmm4
-	vbroadcastss	116(%r10), %xmm13
-	vfnmadd231ps	%xmm3, %xmm13, %xmm5
-	vbroadcastss	120(%r10), %xmm13
-	vfnmadd231ps	%xmm3, %xmm13, %xmm6
-	vbroadcastss	124(%r10), %xmm13
-	vfnmadd231ps	%xmm3, %xmm13, %xmm7
-
-	vbroadcastss	16(%r11), %xmm13
-	vmulps			%xmm4, %xmm13, %xmm4
-	cmpl			$6, %r12d
-	jl				0f // ret
-	vbroadcastss	148(%r10), %xmm13
-	vfnmadd231ps	%xmm4, %xmm13, %xmm5
-	vbroadcastss	152(%r10), %xmm13
-	vfnmadd231ps	%xmm4, %xmm13, %xmm6
-	vbroadcastss	156(%r10), %xmm13
-	vfnmadd231ps	%xmm4, %xmm13, %xmm7
-
-	vbroadcastss	20(%r11), %xmm13
-	vmulps			%xmm5, %xmm13, %xmm5
-	cmpl			$7, %r12d
-	jl				0f // ret
-	vbroadcastss	184(%r10), %xmm13
-	vfnmadd231ps	%xmm5, %xmm13, %xmm6
-	vbroadcastss	188(%r10), %xmm13
-	vfnmadd231ps	%xmm5, %xmm13, %xmm7
-
-	vbroadcastss	24(%r11), %xmm13
-	vmulps			%xmm6, %xmm13, %xmm6
-	cmpl			$8, %r12d
-	jl				0f // ret
-	vbroadcastss	220(%r10), %xmm13
-	vfnmadd231ps	%xmm6, %xmm13, %xmm7
-
-	vbroadcastss	28(%r11), %xmm13
-	vmulps			%xmm7, %xmm13, %xmm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsm_rlt_inv_4x8_vs_lib8, .-inner_edge_trsm_rlt_inv_4x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_8x4_lib8, @function
-inner_edge_potrf_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_8x4_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_8x4_lib8:
-#endif
-#endif
-
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vmovss		%xmm0, %xmm0, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-2:
-	vmovss		%xmm13, 0(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm0
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm11
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm1, %ymm1
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0x55, %xmm1, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-4:
-	vmovss		%xmm13, 4(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm1
-	vperm2f128	$0x00, %ymm1, %ymm1, %ymm11
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0xaa, %xmm2, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-6:
-	vmovss		%xmm13, 8(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm2
-	vperm2f128	$0x00, %ymm2, %ymm2, %ymm11
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0xff, %xmm3, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-8:
-	vmovsd		%xmm13, 12(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm3, %ymm13, %ymm3
-
-	jmp		0f
-
-
-1:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_8x4_lib8, .-inner_edge_potrf_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization gen
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_8X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_8x4_vs_lib8, @function
-inner_edge_potrf_8x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_8x4_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_8x4_vs_lib8:
-#endif
-#endif
-
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vmovss		%xmm0, %xmm0, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-2:
-	vmovss		%xmm13, 0(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm0
-	cmpl		$2, %r11d
-	jl			0f // ret
-	vperm2f128	$0x00, %ymm0, %ymm0, %ymm11
-	vpermilps	$0x55, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm1, %ymm1
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm0, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0x55, %xmm1, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-4:
-	vmovss		%xmm13, 4(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm1
-	cmpl		$3, %r11d
-	jl			0f // ret
-	vperm2f128	$0x00, %ymm1, %ymm1, %ymm11
-	vpermilps	$0xaa, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm2, %ymm2
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm1, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0xaa, %xmm2, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-6:
-	vmovss		%xmm13, 8(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm2
-	cmpl		$4, %r11d
-	jl			0f // ret
-	vperm2f128	$0x00, %ymm2, %ymm2, %ymm11
-	vpermilps	$0xff, %ymm11, %ymm13
-	vmulps		%ymm2, %ymm13, %ymm12
-	vsubps		%ymm12, %ymm3, %ymm3
-
-
-	vpermilps	$0xff, %xmm3, %xmm13
-	vucomiss	%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss		%xmm13, %xmm13, %xmm13
-	vdivss		%xmm13, %xmm14, %xmm13
-8:
-	vmovsd		%xmm13, 12(%r10)
-	vpermilps	$0x00, %xmm13, %xmm13
-	vinsertf128	$0x1, %xmm13, %ymm13, %ymm13
-	vmulps		%ymm3, %ymm13, %ymm3
-
-	jmp		0f
-
-
-1:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_8x4_vs_lib8, .-inner_edge_potrf_8x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x4_lib8, @function
-inner_scale_ab_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x4_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_8x4_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x4_lib8, .-inner_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_AB_4X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_ab_4x8_lib8, @function
-inner_tran_scale_ab_4x8_lib8:
-#elif defined(OS_MAC)
-_inner_tran_scale_ab_4x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_ab_4x8_lib8; .scl 2; .type 32; .endef
-inner_tran_scale_ab_4x8_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	// transpose
-	vunpcklps	%ymm1, %ymm0, %ymm5
-	vunpckhps	%ymm1, %ymm0, %ymm4
-	vunpcklps	%ymm3, %ymm2, %ymm7
-	vunpckhps	%ymm3, %ymm2, %ymm6
-
-	vunpcklpd	%ymm7, %ymm5, %ymm0
-	vunpckhpd	%ymm7, %ymm5, %ymm1
-	vunpcklpd	%ymm6, %ymm4, %ymm2
-	vunpckhpd	%ymm6, %ymm4, %ymm3
-
-	vextractf128 $0x1, %ymm0, %xmm4
-	vextractf128 $0x1, %ymm1, %xmm5
-	vextractf128 $0x1, %ymm2, %xmm6
-	vextractf128 $0x1, %ymm3, %xmm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm0
-	vmovaps		32(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm1
-	vmovaps		64(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm2
-	vmovaps		96(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm3
-	vmovaps		128(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm4
-	vmovaps		160(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm5
-	vmovaps		192(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm6
-	vmovaps		224(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_ab_4x8_lib8, .-inner_tran_scale_ab_4x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x4_gen_lib8, @function
-inner_scale_ab_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_8x4_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r13), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x4_gen_lib8, .-inner_scale_ab_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_AB_4X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_ab_4x8_gen_lib8, @function
-inner_tran_scale_ab_4x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_tran_scale_ab_4x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_ab_4x8_gen_lib8; .scl 2; .type 32; .endef
-inner_tran_scale_ab_4x8_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	// transpose
-	vunpcklps	%ymm1, %ymm0, %ymm5
-	vunpckhps	%ymm1, %ymm0, %ymm4
-	vunpcklps	%ymm3, %ymm2, %ymm7
-	vunpckhps	%ymm3, %ymm2, %ymm6
-
-	vunpcklpd	%ymm7, %ymm5, %ymm0
-	vunpckhpd	%ymm7, %ymm5, %ymm1
-	vunpcklpd	%ymm6, %ymm4, %ymm2
-	vunpckhpd	%ymm6, %ymm4, %ymm3
-
-	vextractf128 $0x1, %ymm0, %xmm4
-	vextractf128 $0x1, %ymm1, %xmm5
-	vextractf128 $0x1, %ymm2, %xmm6
-	vextractf128 $0x1, %ymm3, %xmm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm0
-	vmovaps		32(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm1
-	vmovaps		64(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm2
-	vmovaps		96(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm3
-	vmovaps		128(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm4
-	vmovaps		160(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm5
-	vmovaps		192(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm6
-	vmovaps		224(%r12), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_ab_4x8_gen_lib8, .-inner_tran_scale_ab_4x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0
-//
-// input arguments:
-// r10   <- alpha
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_8x4_lib8, @function
-inner_scale_a0_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_a0_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_8x4_lib8; .scl 2; .type 32; .endef
-inner_scale_a0_8x4_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_8x4_lib8, .-inner_scale_a0_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_8x4_lib8, @function
-inner_scale_11_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_scale_11_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_8x4_lib8; .scl 2; .type 32; .endef
-inner_scale_11_8x4_lib8:
-#endif
-#endif
-	
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovaps		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovaps		LC03(%rip), %ymm14
-#endif
-
-	vmovaps		0(%r10), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r10), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r10), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r10), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_8x4_lib8, .-inner_scale_11_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_TRAN_SCALE_11_4X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_tran_scale_11_4x8_lib8, @function
-inner_tran_scale_11_4x8_lib8:
-#elif defined(OS_MAC)
-_inner_tran_scale_11_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_tran_scale_11_4x8_lib8; .scl 2; .type 32; .endef
-inner_tran_scale_11_4x8_lib8:
-#endif
-#endif
-	
-	// transpose
-	vunpcklps	%ymm1, %ymm0, %ymm5
-	vunpckhps	%ymm1, %ymm0, %ymm4
-	vunpcklps	%ymm3, %ymm2, %ymm7
-	vunpckhps	%ymm3, %ymm2, %ymm6
-
-	vunpcklpd	%ymm7, %ymm5, %ymm0
-	vunpckhpd	%ymm7, %ymm5, %ymm1
-	vunpcklpd	%ymm6, %ymm4, %ymm2
-	vunpckhpd	%ymm6, %ymm4, %ymm3
-
-	vextractf128 $0x1, %ymm0, %xmm4
-	vextractf128 $0x1, %ymm1, %xmm5
-	vextractf128 $0x1, %ymm2, %xmm6
-	vextractf128 $0x1, %ymm3, %xmm7
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovaps		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovaps		LC03(%rip), %ymm14
-#endif
-
-	vmovaps		0(%r10), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm0
-	vmovaps		32(%r10), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm1
-	vmovaps		64(%r10), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm2
-	vmovaps		96(%r10), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm3
-	vmovaps		128(%r10), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm4
-	vmovaps		160(%r10), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm5
-	vmovaps		192(%r10), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm6
-	vmovaps		224(%r10), %xmm15
-	vfmadd231ps	%xmm15, %xmm14, %xmm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_tran_scale_11_4x8_lib8, .-inner_tran_scale_11_4x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_8x4_gen_lib8, @function
-inner_scale_11_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_11_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_11_8x4_gen_lib8:
-#endif
-#endif
-	
-
-	// offset==0
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovaps		.LC03(%rip), %ymm14
-#elif defined(OS_MAC)
-	vmovaps		LC03(%rip), %ymm14
-#endif
-
-	vmovaps		0(%r11), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r11), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r11), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r11), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_8x4_gen_lib8, .-inner_scale_11_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x4_lib8, @function
-inner_blend_scale_ab_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x4_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x4_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm8 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm9 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm10
-	vblendps	$0x55, %ymm3, %ymm2, %ymm11
-
-	vblendps	$0xcc, %ymm11, %ymm8, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm11, %ymm8, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm10, %ymm9, %ymm1
-	vblendps	$0x33, %ymm10, %ymm9, %ymm3
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vmulps		%ymm15, %ymm14, %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x4_lib8, .-inner_blend_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x4_gen_lib8, @function
-inner_blend_scale_ab_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x4_gen_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm8 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm9 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm10
-	vblendps	$0x55, %ymm3, %ymm2, %ymm11
-
-	vblendps	$0xcc, %ymm11, %ymm8, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm11, %ymm8, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm10, %ymm9, %ymm1
-	vblendps	$0x33, %ymm10, %ymm9, %ymm3
-
-	// alpha
-	vbroadcastss	0(%r10), %ymm15
-
-	vmulps		%ymm0, %ymm15, %ymm0
-	vmulps		%ymm1, %ymm15, %ymm1
-	vmulps		%ymm2, %ymm15, %ymm2
-	vmulps		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r13), %ymm12
-	vmulps		%ymm12, %ymm15, %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x4_gen_lib8, .-inner_blend_scale_ab_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_8x4_lib8, @function
-inner_blend_scale_11_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_8x4_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_11_8x4_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm8 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm9 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm10
-	vblendps	$0x55, %ymm3, %ymm2, %ymm11
-
-	vblendps	$0xcc, %ymm11, %ymm8, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm11, %ymm8, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm10, %ymm9, %ymm1
-	vblendps	$0x33, %ymm10, %ymm9, %ymm3
-
-	vmovaps		0(%r10), %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r10), %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r10), %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r10), %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_8x4_lib8, .-inner_blend_scale_11_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_8x4_gen_lib8, @function
-inner_blend_scale_11_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_11_8x4_gen_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm8 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm9 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm10
-	vblendps	$0x55, %ymm3, %ymm2, %ymm11
-
-	vblendps	$0xcc, %ymm11, %ymm8, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm11, %ymm8, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm10, %ymm9, %ymm1
-	vblendps	$0x33, %ymm10, %ymm9, %ymm3
-
-	// offset==0
-
-	vmovaps		0(%r11), %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r11), %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r11), %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r11), %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %r15 // C0
-	addq	%r12, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_8x4_gen_lib8, .-inner_blend_scale_11_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_lib8, @function
-inner_store_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_lib8; .scl 2; .type 32; .endef
-inner_store_8x4_lib8:
-#endif
-#endif
-	
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps 	%ymm3, 96(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_lib8, .-inner_store_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x8_lib8, @function
-inner_store_4x8_lib8:
-#elif defined(OS_MAC)
-_inner_store_4x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x8_lib8; .scl 2; .type 32; .endef
-inner_store_4x8_lib8:
-#endif
-#endif
-	
-	vmovaps 	%xmm0,  0(%r10)
-	vmovaps 	%xmm1, 32(%r10)
-	vmovaps 	%xmm2, 64(%r10)
-	vmovaps 	%xmm3, 96(%r10)
-	vmovaps 	%xmm4, 128(%r10)
-	vmovaps 	%xmm5, 160(%r10)
-	vmovaps 	%xmm6, 192(%r10)
-	vmovaps 	%xmm7, 224(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x8_lib8, .-inner_store_4x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_vs_lib8, @function
-inner_store_8x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_8x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubps		%ymm14, %ymm12, %ymm14
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm14,  0(%r10)
-	cmpl		$2, %r12d
-	jl			0f // end
-	vmaskmovps	%ymm1, %ymm14, 32(%r10)
-	cmpl		$3, %r12d
-	jl			0f // end
-	vmaskmovps	%ymm2, %ymm14, 64(%r10)
-	je			0f // end
-	vmaskmovps	%ymm3, %ymm14, 96(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_vs_lib8, .-inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x8_vs_lib8, @function
-inner_store_4x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_4x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x8_vs_lib8; .scl 2; .type 32; .endef
-inner_store_4x8_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm14, %xmm14
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %xmm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %xmm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vsubps		%xmm14, %xmm12, %xmm14
-
-	// offset==0
-	vmaskmovps	%xmm0, %xmm14,  0(%r10)
-	cmpl		$2, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm1, %xmm14, 32(%r10)
-	cmpl		$3, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm2, %xmm14, 64(%r10)
-	cmpl		$4, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm3, %xmm14, 96(%r10)
-	cmpl		$5, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm4, %xmm14, 128(%r10)
-	cmpl		$6, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm5, %xmm14, 160(%r10)
-	cmpl		$7, %r12d
-	jl			0f // end
-	vmaskmovps	%xmm6, %xmm14, 192(%r10)
-	je			0f // end
-	vmaskmovps	%xmm7, %xmm14, 224(%r10)
-	//
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x8_vs_lib8, .-inner_store_4x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x4_gen_lib8, @function
-inner_store_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_8x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm12, %ymm15
-	vandps		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm15,  0(%r11)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmaskmovps	%ymm1, %ymm15, 32(%r11)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmaskmovps	%ymm2, %ymm15, 64(%r11)
-	je			7f // end
-	vmaskmovps	%ymm3, %ymm15, 96(%r11)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_gen_lib8, .-inner_store_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x8_gen_lib8, @function
-inner_store_4x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_4x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x8_gen_lib8; .scl 2; .type 32; .endef
-inner_store_4x8_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %xmm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %xmm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%xmm12, %xmm14, %xmm14
-	vsubps		%xmm15, %xmm12, %xmm15
-	vandps		%xmm14, %xmm15, %xmm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	vmovaps		%xmm3, %xmm2
-	vmovaps		%xmm4, %xmm3
-	vmovaps		%xmm5, %xmm4
-	vmovaps		%xmm6, %xmm5
-	vmovaps		%xmm7, %xmm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	vmovaps		%xmm3, %xmm2
-	vmovaps		%xmm4, %xmm3
-	vmovaps		%xmm5, %xmm4
-	vmovaps		%xmm6, %xmm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	vmovaps		%xmm3, %xmm2
-	vmovaps		%xmm4, %xmm3
-	vmovaps		%xmm5, %xmm4
-	addq		$32, %r11
-
-	cmpl	$3, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	vmovaps		%xmm3, %xmm2
-	vmovaps		%xmm4, %xmm3
-	addq		$32, %r11
-
-	cmpl	$4, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	vmovaps		%xmm3, %xmm2
-	addq		$32, %r11
-
-	cmpl	$5, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	vmovaps		%xmm2, %xmm1
-	addq		$32, %r11
-
-	cmpl	$6, %r15d
-	jle		0f
-
-	vmovaps		%xmm1, %xmm0
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$8, %eax
-	jle		0f
-	movl	$8, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%xmm0, %xmm15,  0(%r11)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm1, %xmm15, 32(%r11)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm2, %xmm15, 64(%r11)
-	cmpl		$4, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm3, %xmm15, 96(%r11)
-	cmpl		$5, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm4, %xmm15, 128(%r11)
-	cmpl		$6, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm5, %xmm15, 160(%r11)
-	cmpl		$7, %r15d
-	jl			7f // end
-	vmaskmovps	%xmm6, %xmm15, 192(%r11)
-	je			7f // end
-	vmaskmovps	%xmm7, %xmm15, 224(%r11)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x4_gen_lib8, .-inner_store_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_lib8, @function
-inner_store_l_8x4_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x4_lib8:
-#endif
-#endif
-	
-	vmovaps 	32(%r10), %ymm12
-	vmovaps 	64(%r10), %ymm13
-	vmovaps 	96(%r10), %ymm14
-
-	vblendps	$0x1, %ymm12, %ymm1, %ymm1
-	vblendps	$0x3, %ymm13, %ymm2, %ymm2
-	vblendps	$0x7, %ymm14, %ymm3, %ymm3
-
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps 	%ymm3, 96(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_lib8, .-inner_store_l_8x4_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_vs_lib8, @function
-inner_store_l_8x4_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_vs_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x4_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	vmaskmovps	%ymm0, %ymm15,  0(%r10)
-	cmpl		$2, %r12d
-	jl			0f // end
-	vmovaps 	32(%r10), %ymm12
-	vblendps	$0x1, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm15, 32(%r10)
-	cmpl		$3, %r12d
-	jl			0f // end
-	vmovaps 	64(%r10), %ymm12
-	vblendps	$0x3, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm15, 64(%r10)
-	je			0f // end
-	vmovaps 	96(%r10), %ymm12
-	vblendps	$0x7, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm15, 96(%r10)
-	//
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_vs_lib8, .-inner_store_l_8x4_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X4_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x4_gen_lib8, @function
-inner_store_l_8x4_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x4_gen_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x4_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm12, %ymm15
-	vandps		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm15,  0(%r11)
-	cmpl		$2, %r15d
-	jl			7f // end
-	vmovaps 	32(%r11), %ymm12
-	vblendps	$0x1, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm15, 32(%r11)
-	cmpl		$3, %r15d
-	jl			7f // end
-	vmovaps 	64(%r11), %ymm12
-	vblendps	$0x3, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm15, 64(%r11)
-	je			7f // end
-	vmovaps 	96(%r11), %ymm12
-	vblendps	$0x7, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm15, 96(%r11)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x4_gen_lib8, .-inner_store_l_8x4_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                               rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_sgemm_nt_8x4_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x4_lib8
-	.type kernel_sgemm_nt_8x4_lib8, @function
-kernel_sgemm_nt_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x4_lib8
-_kernel_sgemm_nt_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x4_lib8
-	.def kernel_sgemm_nt_8x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x4_lib8, .-kernel_sgemm_nt_8x4_lib8
-#endif
-
-
-
-
-
-//                               rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_sgemm_nt_4x8_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_4x8_lib8
-	.type kernel_sgemm_nt_4x8_lib8, @function
-kernel_sgemm_nt_4x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_4x8_lib8
-_kernel_sgemm_nt_4x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_4x8_lib8
-	.def kernel_sgemm_nt_4x8_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_4x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // B
-	movq	ARG3, %r12  // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_4x8_lib8, .-kernel_sgemm_nt_4x8_lib8
-#endif
-
-
-
-
-
-//                               rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_sgemm_nt_8x4_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x4_vs_lib8
-	.type kernel_sgemm_nt_8x4_vs_lib8, @function
-kernel_sgemm_nt_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x4_vs_lib8
-_kernel_sgemm_nt_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x4_vs_lib8
-	.def kernel_sgemm_nt_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km
-	movq	ARG9, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x4_vs_lib8, .-kernel_sgemm_nt_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                  rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_sgemm_nt_4x8_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_4x8_vs_lib8
-	.type kernel_sgemm_nt_4x8_vs_lib8, @function
-kernel_sgemm_nt_4x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_4x8_vs_lib8
-_kernel_sgemm_nt_4x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_4x8_vs_lib8
-	.def kernel_sgemm_nt_4x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_4x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // B
-	movq	ARG3, %r12  // A
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km
-	movq	ARG9, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_4x8_vs_lib8, .-kernel_sgemm_nt_4x8_vs_lib8
-#endif
-
-
-
-
-
-//                                   rdi    rsi           rdx       rcx       r8           r9           rsp+8     rsp+16   rsp+24       rsp+32    rsp+40   rsp+48  rsp+56  rsp+64  rsp+72
-// void kernel_sgemm_nt_8x4_gen_lib8(int k, float *alpha, float *A, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x4_gen_lib8
-	.type kernel_sgemm_nt_8x4_gen_lib8, @function
-kernel_sgemm_nt_8x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x4_gen_lib8
-_kernel_sgemm_nt_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x4_gen_lib8
-	.def kernel_sgemm_nt_8x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 8*sdb*sizeof(float)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x4_gen_lib8, .-kernel_sgemm_nt_8x4_gen_lib8
-#endif
-
-
-
-
-
-//                                   rdi    rsi           rdx       rcx       r8           r9           rsp+8     rsp+16   rsp+24       rsp+32    rsp+40   rsp+48  rsp+56  rsp+64  rsp+72
-// void kernel_sgemm_nt_4x8_gen_lib8(int k, float *alpha, float *A, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_4x8_gen_lib8
-	.type kernel_sgemm_nt_4x8_gen_lib8, @function
-kernel_sgemm_nt_4x8_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_4x8_gen_lib8
-_kernel_sgemm_nt_4x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_4x8_gen_lib8
-	.def kernel_sgemm_nt_4x8_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_4x8_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG4, %r11  // A
-	movq	ARG3, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_AB_4X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_ab_4x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_ab_4x8_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 8*sdb*sizeof(float)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_4x8_gen_lib8, .-kernel_sgemm_nt_4x8_gen_lib8
-#endif
-
-
-
-
-
-//                               0      1             2         3            4         5        6            7         8
-// void kernel_sgemm_nn_8x4_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x4_lib8
-	.type kernel_sgemm_nn_8x4_lib8, @function
-kernel_sgemm_nn_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x4_lib8
-_kernel_sgemm_nn_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x4_lib8
-	.def kernel_sgemm_nn_8x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x4_lib8, .-kernel_sgemm_nn_8x4_lib8
-#endif
-
-
-
-
-
-//                               1      2             3         4            5         6        7            8         9         10      11
-// void kernel_sgemm_nn_8x4_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x4_vs_lib8
-	.type kernel_sgemm_nn_8x4_vs_lib8, @function
-kernel_sgemm_nn_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x4_vs_lib8
-_kernel_sgemm_nn_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x4_vs_lib8
-	.def kernel_sgemm_nn_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x4_vs_lib8, .-kernel_sgemm_nn_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                   rdi    rsi           rdx       rcx       r8        r9       rsp+8        rsp+16    rsp+24    rsp+32    rsp+40   rsp+48     rsp+56   rsp+64  rsp+72  rsp+80  rsp+88
-// void kernel_sgemm_nn_8x4_gen_lib8(int k, float *alpha, float *A, int offB, float *B, int sdb, float *beta, int offC, float *C, int sdc, int offD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x4_gen_lib8
-	.type kernel_sgemm_nn_8x4_gen_lib8, @function
-kernel_sgemm_nn_8x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x4_gen_lib8
-_kernel_sgemm_nn_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x4_gen_lib8
-	.def kernel_sgemm_nn_8x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // offsetC
-	movq	ARG9, %r13 // C
-	movq	ARG10, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG11, %r10 // offsetD
-	movq	ARG12, %r11 // D
-	movq	ARG13, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG14, %r13 // m0
-	movq	ARG15, %r14 // m1
-	movq	ARG16, %r15 // n0
-	movq	ARG17, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x4_gen_lib8, .-kernel_sgemm_nn_8x4_gen_lib8
-#endif
-
-
-
-
-
-//                                 1      2             3         4         5            6         7
-// void kernel_ssyrk_nt_l_8x4_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_8x4_lib8
-	.type kernel_ssyrk_nt_l_8x4_lib8, @function
-kernel_ssyrk_nt_l_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_8x4_lib8
-_kernel_ssyrk_nt_l_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_8x4_lib8
-	.def kernel_ssyrk_nt_l_8x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_8x4_lib8, .-kernel_ssyrk_nt_l_8x4_lib8
-#endif
-
-
-
-
-
-//                                    1      2             3         4         5            6         7         8       9
-// void kernel_ssyrk_nt_l_8x4_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_8x4_vs_lib8
-	.type kernel_ssyrk_nt_l_8x4_vs_lib8, @function
-kernel_ssyrk_nt_l_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_8x4_vs_lib8
-_kernel_ssyrk_nt_l_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_8x4_vs_lib8
-	.def kernel_ssyrk_nt_l_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km
-	movq	ARG9, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_8x4_vs_lib8, .-kernel_ssyrk_nt_l_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                      edi    rsi       rdx       ecx       r8        r9        rsp+8     
-// void kernel_strsm_nt_rl_inv_8x4_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_8x4_lib8
-	.type kernel_strsm_nt_rl_inv_8x4_lib8, @function
-kernel_strsm_nt_rl_inv_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_8x4_lib8
-_kernel_strsm_nt_rl_inv_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_8x4_lib8
-	.def kernel_strsm_nt_rl_inv_8x4_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_8x4_lib8, .-kernel_strsm_nt_rl_inv_8x4_lib8
-#endif
-
-
-
-
-
-//                                      edi    rsi       rdx       ecx       r8        r9        rsp+8     
-// void kernel_strsm_nt_rl_inv_4x8_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_4x8_lib8
-	.type kernel_strsm_nt_rl_inv_4x8_lib8, @function
-kernel_strsm_nt_rl_inv_4x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_4x8_lib8
-_kernel_strsm_nt_rl_inv_4x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_4x8_lib8
-	.def kernel_strsm_nt_rl_inv_4x8_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_4x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG3, %r11
-	movq	ARG2, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_4X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_4x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_4x8_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	$8, %r12 // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_4X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_4x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_4x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_4x8_lib8, .-kernel_strsm_nt_rl_inv_4x8_lib8
-#endif
-
-
-
-
-
-//                                         edi    rsi       rdx       ecx       r8        r9        rsp+8               rsp+16  rsp+24  
-// void kernel_strsm_nt_rl_inv_8x4_vs_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_8x4_vs_lib8
-	.type kernel_strsm_nt_rl_inv_8x4_vs_lib8, @function
-kernel_strsm_nt_rl_inv_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_8x4_vs_lib8
-_kernel_strsm_nt_rl_inv_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_8x4_vs_lib8
-	.def kernel_strsm_nt_rl_inv_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn // TODO scale gen
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_8x4_vs_lib8, .-kernel_strsm_nt_rl_inv_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                         edi    rsi       rdx       ecx       r8        r9        rsp+8               rsp+16  rsp+24  
-// void kernel_strsm_nt_rl_inv_4x8_vs_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_4x8_vs_lib8
-	.type kernel_strsm_nt_rl_inv_4x8_vs_lib8, @function
-kernel_strsm_nt_rl_inv_4x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_4x8_vs_lib8
-_kernel_strsm_nt_rl_inv_4x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_4x8_vs_lib8
-	.def kernel_strsm_nt_rl_inv_4x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_4x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG3, %r11
-	movq	ARG2, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn // TODO scale gen
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_TRAN_SCALE_11_4X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_tran_scale_11_4x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_tran_scale_11_4x8_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_4X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_4x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_4x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_4x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_4x8_vs_lib8, .-kernel_strsm_nt_rl_inv_4x8_vs_lib8
-#endif
-
-
-
-
-
-//                                            1       2          3          4       5          6          7         8         9         10
-// void kernel_sgemm_strsm_nt_rl_inv_8x4_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x4_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_8x4_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_8x4_lib8
-_kernel_sgemm_strsm_nt_rl_inv_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x4_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_8x4_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10   // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_8x4_lib8, .-kernel_sgemm_strsm_nt_rl_inv_8x4_lib8
-#endif
-
-
-
-
-
-//                                               1       2          3          4       5          6          7         8         9         10                 11      12
-// void kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8
-_kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10  // C 
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D 
-	movq	ARG11, %r11 // km 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8, .-kernel_sgemm_strsm_nt_rl_inv_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                  1      2         3         4         5         6
-// void kernel_spotrf_nt_l_8x4_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_8x4_lib8
-	.type kernel_spotrf_nt_l_8x4_lib8, @function
-kernel_spotrf_nt_l_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_8x4_lib8
-_kernel_spotrf_nt_l_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_8x4_lib8
-	.def kernel_spotrf_nt_l_8x4_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x4_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_8x4_lib8, .-kernel_spotrf_nt_l_8x4_lib8
-#endif
-
-
-
-
-
-//                                     edi    rsi       rdx       rcx       r8        r9                  rsp+8   rsp+16
-// void kernel_spotrf_nt_l_8x4_vs_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_8x4_vs_lib8
-	.type kernel_spotrf_nt_l_8x4_vs_lib8, @function
-kernel_spotrf_nt_l_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_8x4_vs_lib8
-_kernel_spotrf_nt_l_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_8x4_vs_lib8
-	.def kernel_spotrf_nt_l_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movq	ARG8, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG7, %r11 // m1 
-	movq	ARG8, %r12 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_8x4_vs_lib8, .-kernel_spotrf_nt_l_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                        1       2          3          4       5          6          7         8         9
-// void kernel_ssyrk_spotrf_nt_l_8x4_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_8x4_lib8
-	.type kernel_ssyrk_spotrf_nt_l_8x4_lib8, @function
-kernel_ssyrk_spotrf_nt_l_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_8x4_lib8
-_kernel_ssyrk_spotrf_nt_l_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_8x4_lib8
-	.def kernel_ssyrk_spotrf_nt_l_8x4_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_8x4_lib8, .-kernel_ssyrk_spotrf_nt_l_8x4_lib8
-#endif
-
-
-
-
-
-//                                           1       2          3          4       5          6          7         8         9                  10      11
-// void kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8
-	.type kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8, @function
-kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8
-_kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8
-	.def kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x4_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x4_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x4_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8, .-kernel_ssyrk_spotrf_nt_l_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                  1      2             3         4            5         6        7
-// void kernel_strmm_nn_rl_8x4_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_8x4_lib8
-	.type kernel_strmm_nn_rl_8x4_lib8, @function
-kernel_strmm_nn_rl_8x4_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_8x4_lib8
-_kernel_strmm_nn_rl_8x4_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_8x4_lib8
-	.def kernel_strmm_nn_rl_8x4_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_8x4_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_8x4_lib8, .-kernel_strmm_nn_rl_8x4_lib8
-#endif
-
-
-
-
-
-//                                     1      2             3         4            5         6        7         8       9
-// void kernel_strmm_nn_rl_8x4_vs_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_8x4_vs_lib8
-	.type kernel_strmm_nn_rl_8x4_vs_lib8, @function
-kernel_strmm_nn_rl_8x4_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_8x4_vs_lib8
-_kernel_strmm_nn_rl_8x4_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_8x4_vs_lib8
-	.def kernel_strmm_nn_rl_8x4_vs_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_8x4_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km
-	movq	ARG9, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_8x4_vs_lib8, .-kernel_strmm_nn_rl_8x4_vs_lib8
-#endif
-
-
-
-
-
-//                                      1      2             3         4            5         6        7            8         9        10      11      12      13
-// void kernel_strmm_nn_rl_8x4_gen_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strmm_nn_rl_8x4_gen_lib8
-	.type kernel_strmm_nn_rl_8x4_gen_lib8, @function
-kernel_strmm_nn_rl_8x4_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strmm_nn_rl_8x4_gen_lib8
-_kernel_strmm_nn_rl_8x4_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strmm_nn_rl_8x4_gen_lib8
-	.def kernel_strmm_nn_rl_8x4_gen_lib8; .scl 2; .type 32; .endef
-kernel_strmm_nn_rl_8x4_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRMM_NN_RL_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trmm_nn_rl_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trmm_nn_rl_8x4_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x4_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_8X4_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_8x4_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_8x4_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // offsetD
-	movq	ARG8, %r11 // D
-	movq	ARG9, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG10, %r13 // m0
-	movq	ARG11, %r14 // m1
-	movq	ARG12, %r15 // n0
-	movq	ARG13, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X4_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x4_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x4_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strmm_nn_rl_8x4_gen_lib8, .-kernel_strmm_nn_rl_8x4_gen_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC04: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/avx2/kernel_sgemm_8x8_lib8.S b/third_party/blasfeo/kernel/avx2/kernel_sgemm_8x8_lib8.S
deleted file mode 100644
index 094acda..0000000
--- a/third_party/blasfeo/kernel/avx2/kernel_sgemm_8x8_lib8.S
+++ /dev/null
@@ -1,5395 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp); \
-	vzeroupper;
-#define EPILOGUE \
-	vzeroupper; \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d11 d22 d33 d40 d51 d62 d73]
-// ymm1  <- [d01 d10 d23 d32 d41 d50 d63 d72]
-// ymm2  <- [d03 d12 d21 d30 d43 d52 d61 d70]
-// ymm3  <- [d02 d13 d20 d31 d42 d53 d60 d71]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33 d40 d51 d62 d73]
-// ymm1  <- [d01 d10 d23 d32 d41 d50 d63 d72]
-// ymm2  <- [d03 d12 d21 d30 d43 d52 d61 d70]
-// ymm3  <- [d02 d13 d20 d31 d42 d53 d60 d71]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nt_8x8_lib8, @function
-inner_kernel_gemm_add_nt_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nt_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nt_8x8_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nt_8x8_lib8:
-#endif
-#endif
-	
-	
-// broadcast scheme
-#if 1
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-
-	cmpl	$3, %r10d
-	jle		4f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm13 // A
-	vbroadcastss	0(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vbroadcastss	4(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vbroadcastss	8(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vbroadcastss	12(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vbroadcastss	16(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm4
-	vbroadcastss	20(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm5
-	vbroadcastss	24(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm6
-	vbroadcastss	28(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 0
-	vmovaps			32(%r11), %ymm13 // A
-	vbroadcastss	32(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vbroadcastss	36(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vbroadcastss	40(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vbroadcastss	44(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vbroadcastss	48(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm4
-	vbroadcastss	52(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm5
-	vbroadcastss	56(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm6
-	vbroadcastss	60(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vmovaps			-64(%r11), %ymm13 // A
-	vbroadcastss	64(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vbroadcastss	68(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vbroadcastss	72(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vbroadcastss	76(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vbroadcastss	80(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm4
-	vbroadcastss	84(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm5
-	vbroadcastss	88(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm6
-	vbroadcastss	92(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm7
-	addq	$128, %r12
-
-	// unroll 0
-	vmovaps			-32(%r11), %ymm13 // A
-	vbroadcastss	-32(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vbroadcastss	-28(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	vbroadcastss	-24(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	vbroadcastss	-20(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vbroadcastss	-16(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm4
-	vbroadcastss	-12(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm5
-	vbroadcastss	-8(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm6
-	vbroadcastss	-4(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm7
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm13 // a
-	vbroadcastss	0(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm0
-	vbroadcastss	4(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm1
-	subl	$1, %r10d
-	vbroadcastss	8(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm2
-	addq	$32, %r11
-	vbroadcastss	12(%r12), %ymm12 // b
-	vfmadd231ps		%ymm13, %ymm12, %ymm3
-	vbroadcastss	16(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm4
-	vbroadcastss	20(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm5
-	vbroadcastss	24(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm6
-	vbroadcastss	28(%r12), %ymm12 // B
-	vfmadd231ps		%ymm13, %ymm12, %ymm7
-	addq	$32, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-// shuffle scheme
-#else
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vbroadcastf128	16(%r12), %ymm15 // B
-	vmovaps			32(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vfmadd231ps		%ymm12, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14 // 01 00 11 10
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm3
-	vbroadcastf128	32(%r12), %ymm14 // B
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm7
-	vbroadcastf128	48(%r12), %ymm15 // B
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vfmadd231ps		%ymm13, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm3
-	vbroadcastf128	64(%r12), %ymm14 // B
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vbroadcastf128	80(%r12), %ymm15 // B
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vfmadd231ps		%ymm12, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm3
-	vbroadcastf128	96(%r12), %ymm14 // B
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm7
-	vbroadcastf128	112(%r12), %ymm15 // B
-	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vfmadd231ps		%ymm13, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm3
-	vbroadcastf128	0(%r12), %ymm14 // B
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vbroadcastf128	16(%r12), %ymm15 // B
-	vmovaps			32(%r11), %ymm13 // A
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vfmadd231ps		%ymm12, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14 // 01 00 11 10
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14 // 10 11 00 01
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm3
-	vbroadcastf128	32(%r12), %ymm14 // B
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm7
-	vbroadcastf128	48(%r12), %ymm15 // B
-	vmovaps			64(%r11), %ymm12 // A
-
-
-	// unroll 1
-	vfmadd231ps		%ymm13, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm3
-	vbroadcastf128	64(%r12), %ymm14 // B
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-	vbroadcastf128	80(%r12), %ymm15 // B
-	vmovaps			96(%r11), %ymm13 // A
-
-
-	// unroll 2
-	vfmadd231ps		%ymm12, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm12, %ymm14, %ymm3
-	vbroadcastf128	96(%r12), %ymm14 // B
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm12, %ymm15, %ymm7
-	vbroadcastf128	112(%r12), %ymm15 // B
-//	vmovaps			128(%r11), %ymm12 // A
-
-	subl	$4, %r10d
-	addq	$128, %r11
-	addq	$128, %r12
-
-	// unroll 3
-	vfmadd231ps		%ymm13, %ymm14, %ymm0
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm1
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm2
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-
-	vfmadd231ps		%ymm13, %ymm14, %ymm3
-//	vbroadcastf128	0(%r12), %ymm14 // B
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm4
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm5
-	vshufps			$0x4e, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm6
-	vshufps			$0xb1, %ymm15, %ymm15, %ymm15
-
-	vfmadd231ps		%ymm13, %ymm15, %ymm7
-//	vbroadcastf128	16(%r12), %ymm15 // B
-//	vmovaps			32(%r11), %ymm13 // A
-
-
-//	cmpl	$4, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vbroadcastf128	0(%r12), %ymm14 // B
-	vmovaps			0(%r11), %ymm12 // A
-	vfmadd231ps		%ymm12, %ymm14, %ymm0
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vfmadd231ps		%ymm12, %ymm14, %ymm1
-
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vfmadd231ps		%ymm12, %ymm14, %ymm2
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vfmadd231ps		%ymm12, %ymm14, %ymm3
-
-	vbroadcastf128	16(%r12), %ymm14 // B
-	vfmadd231ps		%ymm12, %ymm14, %ymm4
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vfmadd231ps		%ymm12, %ymm14, %ymm5
-
-	vshufps			$0x4e, %ymm14, %ymm14, %ymm14
-	vfmadd231ps		%ymm12, %ymm14, %ymm6
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$32, %r12
-
-	vshufps			$0xb1, %ymm14, %ymm14, %ymm14
-	vfmadd231ps		%ymm12, %ymm14, %ymm7
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#endif
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nt_8x8_lib8, .-inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d11 d22 d33 d40 d51 d62 d73]
-// ymm1  <- [d01 d10 d23 d32 d41 d50 d63 d72]
-// ymm2  <- [d03 d12 d21 d30 d43 d52 d61 d70]
-// ymm3  <- [d02 d13 d20 d31 d42 d53 d60 d71]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33 d40 d51 d62 d73]
-// ymm1  <- [d01 d10 d23 d32 d41 d50 d63 d72]
-// ymm2  <- [d03 d12 d21 d30 d43 d52 d61 d70]
-// ymm3  <- [d02 d13 d20 d31 d42 d53 d60 d71]
-// ymm4  <- []
-// ymm5  <- []
-// ymm6  <- []
-// ymm7  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_sub_nt_8x8_lib8, @function
-inner_kernel_gemm_sub_nt_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_sub_nt_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_sub_nt_8x8_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_sub_nt_8x8_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// preload
-
-	cmpl	$3, %r10d
-	jle		4f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm13 // A
-	vbroadcastss	0(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vbroadcastss	4(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vbroadcastss	8(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vbroadcastss	12(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vbroadcastss	16(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm4
-	vbroadcastss	20(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm5
-	vbroadcastss	24(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm6
-	vbroadcastss	28(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm7
-	subl	$4, %r10d
-
-	// unroll 0
-	vmovaps			32(%r11), %ymm13 // A
-	vbroadcastss	32(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vbroadcastss	36(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vbroadcastss	40(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vbroadcastss	44(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vbroadcastss	48(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm4
-	vbroadcastss	52(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm5
-	vbroadcastss	56(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm6
-	vbroadcastss	60(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm7
-	addq	$128, %r11
-
-	// unroll 0
-	vmovaps			-64(%r11), %ymm13 // A
-	vbroadcastss	64(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vbroadcastss	68(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vbroadcastss	72(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vbroadcastss	76(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vbroadcastss	80(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm4
-	vbroadcastss	84(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm5
-	vbroadcastss	88(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm6
-	vbroadcastss	92(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm7
-	addq	$128, %r12
-
-	// unroll 0
-	vmovaps			-32(%r11), %ymm13 // A
-	vbroadcastss	-32(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vbroadcastss	-28(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	vbroadcastss	-24(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	vbroadcastss	-20(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vbroadcastss	-16(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm4
-	vbroadcastss	-12(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm5
-	vbroadcastss	-8(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm6
-	vbroadcastss	-4(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm7
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm13 // a
-	vbroadcastss	0(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm0
-	vbroadcastss	4(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm1
-	subl	$1, %r10d
-	vbroadcastss	8(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm2
-	addq	$32, %r11
-	vbroadcastss	12(%r12), %ymm12 // b
-	vfnmadd231ps	%ymm13, %ymm12, %ymm3
-	vbroadcastss	16(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm4
-	vbroadcastss	20(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm5
-	vbroadcastss	24(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm6
-	vbroadcastss	28(%r12), %ymm12 // B
-	vfnmadd231ps	%ymm13, %ymm12, %ymm7
-	addq	$32, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_sub_nt_8x8_lib8, .-inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// r14   <= dirty
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_GEMM_ADD_NN_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_gemm_add_nn_8x8_lib8, @function
-inner_kernel_gemm_add_nn_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_kernel_gemm_add_nn_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_gemm_add_nn_8x8_lib8; .scl 2; .type 32; .endef
-inner_kernel_gemm_add_nn_8x8_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	cmpl	$8, %r10d
-	jl		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-//	prefetcht0	0(%r12, %r13, 1) // software prefetch
-//	prefetcht0	64(%r12, %r13, 1) // software prefetch
-//	prefetcht0	128(%r12, %r13, 1) // software prefetch
-//	prefetcht0	192(%r12, %r13, 1) // software prefetch
-
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	vbroadcastss	128(%r12), %ymm13 // B[4]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	160(%r12), %ymm13 // B[5]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	192(%r12), %ymm13 // B[6]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	224(%r12), %ymm13 // B[7]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-
-	// unroll 1
-	vmovaps			32(%r11), %ymm12 // A[0]
-	vbroadcastss	4(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	36(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	68(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	100(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	vbroadcastss	132(%r12), %ymm13 // B[4]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	164(%r12), %ymm13 // B[5]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	196(%r12), %ymm13 // B[6]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	228(%r12), %ymm13 // B[7]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-
-	// unroll 2
-	vmovaps			64(%r11), %ymm12 // A[0]
-	vbroadcastss	8(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	40(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	72(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	104(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	vbroadcastss	136(%r12), %ymm13 // B[4]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	168(%r12), %ymm13 // B[5]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	200(%r12), %ymm13 // B[6]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	232(%r12), %ymm13 // B[7]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-
-	// unroll 3
-	vmovaps			96(%r11), %ymm12 // A[0]
-	vbroadcastss	12(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	44(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	76(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	108(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	vbroadcastss	140(%r12), %ymm13 // B[4]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	172(%r12), %ymm13 // B[5]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	204(%r12), %ymm13 // B[6]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	236(%r12), %ymm13 // B[7]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-
-	// unroll 4
-	vmovaps			128(%r11), %ymm12 // A[0]
-	vbroadcastss	16(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	48(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	80(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	112(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	vbroadcastss	144(%r12), %ymm13 // B[4]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	176(%r12), %ymm13 // B[5]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	208(%r12), %ymm13 // B[6]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	240(%r12), %ymm13 // B[7]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-
-	// unroll 5
-	vmovaps			160(%r11), %ymm12 // A[0]
-	vbroadcastss	20(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	52(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	84(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	116(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	vbroadcastss	148(%r12), %ymm13 // B[4]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	180(%r12), %ymm13 // B[5]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	212(%r12), %ymm13 // B[6]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	244(%r12), %ymm13 // B[7]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-	subl	$8, %r10d
-
-	// unroll 6
-	vmovaps			192(%r11), %ymm12 // A[0]
-	vbroadcastss	24(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	56(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	88(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	120(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	vbroadcastss	152(%r12), %ymm13 // B[4]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	184(%r12), %ymm13 // B[5]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	216(%r12), %ymm13 // B[6]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	248(%r12), %ymm13 // B[7]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-	addq	$256, %r11
-
-	// unroll 7
-	vmovaps			-32(%r11), %ymm12 // A[0]
-	vbroadcastss	28(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	60(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	92(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	124(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	vbroadcastss	156(%r12), %ymm13 // B[4]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	188(%r12), %ymm13 // B[5]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	220(%r12), %ymm13 // B[6]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	252(%r12), %ymm13 // B[7]
-	addq	%r12, %r13
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-
-	cmpl	$7, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean1-up loop
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-3: // clean1-up loop
-	
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	vbroadcastss	128(%r12), %ymm13 // B[4]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	160(%r12), %ymm13 // B[5]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	192(%r12), %ymm13 // B[6]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	224(%r12), %ymm13 // B[7]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-
-	subl	$1, %r10d
-	addq	$32, %r11
-	addq	$4, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_gemm_add_nn_8x8_lib8, .-inner_kernel_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// ymm0  <- []
-// ymm1  <- []
-// ymm2  <- []
-// ymm3  <- []
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_GEMM_ADD_NN_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_gemm_add_nn_8x8_lib8, @function
-inner_edge_gemm_add_nn_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_edge_gemm_add_nn_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_gemm_add_nn_8x8_lib8; .scl 2; .type 32; .endef
-inner_edge_gemm_add_nn_8x8_lib8:
-#endif
-#endif
-	
-	cmpl			$0, %r14d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$8, %ebx
-	subl			%r14d, %ebx // 8-offsetB
-	cmpl			%r10d, %ebx
-//	jle				0f
-//	movl			%r10d, %ebx // kend=min(k,8-offsetB)
-//0:
-	cmovgl			%r10d, %ebx // kend=min(k,8-offsetB)
-
-	movl			%r14d, %eax
-	sall			$2, %eax // offsetB*sizeof(float)
-	addq			%rax, %r12 // B+offsetB*sizeof(float)
-
-	// unroll 0
-	vmovaps			0(%r11), %ymm12 // A[0]
-	vbroadcastss	0(%r12), %ymm13 // B[0]
-	vfmadd231ps		%ymm12, %ymm13, %ymm0
-	vbroadcastss	32(%r12), %ymm13 // B[1]
-	vfmadd231ps		%ymm12, %ymm13, %ymm1
-	vbroadcastss	64(%r12), %ymm13 // B[2]
-	vfmadd231ps		%ymm12, %ymm13, %ymm2
-	vbroadcastss	96(%r12), %ymm13 // B[3]
-	vfmadd231ps		%ymm12, %ymm13, %ymm3
-	vbroadcastss	128(%r12), %ymm13 // B[4]
-	vfmadd231ps		%ymm12, %ymm13, %ymm4
-	vbroadcastss	160(%r12), %ymm13 // B[5]
-	vfmadd231ps		%ymm12, %ymm13, %ymm5
-	vbroadcastss	192(%r12), %ymm13 // B[6]
-	vfmadd231ps		%ymm12, %ymm13, %ymm6
-	vbroadcastss	224(%r12), %ymm13 // B[7]
-	vfmadd231ps		%ymm12, %ymm13, %ymm7
-
-	subl			$1, %r10d // k-1
-	subl			$1, %ebx // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$4, %r12 // B+1*sizeof(float)
-
-	cmpl			$0, %ebx
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r13, %r12
-	subq			$32, %r12 // B+bs*(sdb-1)*sizeof(float)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_gemm_add_nn_8x8_lib8, .-inner_edge_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// strsm
-// right
-// lower
-// transposed
-// not-unit
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_TRSM_RLT_INV_8X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_trsm_rlt_inv_8x8_vs_lib8, @function
-inner_edge_trsm_rlt_inv_8x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_trsm_rlt_inv_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_trsm_rlt_inv_8x8_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_trsm_rlt_inv_8x8_vs_lib8:
-#endif
-#endif
-
-	vbroadcastss	0(%r11), %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vbroadcastss	4(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm1
-	vbroadcastss	8(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm2
-	vbroadcastss	12(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm3
-	vbroadcastss	16(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm4
-	vbroadcastss	20(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm5
-	vbroadcastss	24(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm6
-	vbroadcastss	28(%r10), %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm7
-
-	vbroadcastss	4(%r11), %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vbroadcastss	40(%r10), %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm2
-	vbroadcastss	44(%r10), %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm3
-	vbroadcastss	48(%r10), %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm4
-	vbroadcastss	52(%r10), %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm5
-	vbroadcastss	56(%r10), %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm6
-	vbroadcastss	60(%r10), %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm7
-
-	vbroadcastss	8(%r11), %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vbroadcastss	76(%r10), %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm3
-	vbroadcastss	80(%r10), %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm4
-	vbroadcastss	84(%r10), %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm5
-	vbroadcastss	88(%r10), %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm6
-	vbroadcastss	92(%r10), %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm7
-
-	vbroadcastss	12(%r11), %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vbroadcastss	112(%r10), %ymm13
-	vfnmadd231ps	%ymm3, %ymm13, %ymm4
-	vbroadcastss	116(%r10), %ymm13
-	vfnmadd231ps	%ymm3, %ymm13, %ymm5
-	vbroadcastss	120(%r10), %ymm13
-	vfnmadd231ps	%ymm3, %ymm13, %ymm6
-	vbroadcastss	124(%r10), %ymm13
-	vfnmadd231ps	%ymm3, %ymm13, %ymm7
-
-	vbroadcastss	16(%r11), %ymm13
-	vmulps			%ymm4, %ymm13, %ymm4
-	cmpl			$6, %r12d
-	jl				0f // ret
-	vbroadcastss	148(%r10), %ymm13
-	vfnmadd231ps	%ymm4, %ymm13, %ymm5
-	vbroadcastss	152(%r10), %ymm13
-	vfnmadd231ps	%ymm4, %ymm13, %ymm6
-	vbroadcastss	156(%r10), %ymm13
-	vfnmadd231ps	%ymm4, %ymm13, %ymm7
-
-	vbroadcastss	20(%r11), %ymm13
-	vmulps			%ymm5, %ymm13, %ymm5
-	cmpl			$7, %r12d
-	jl				0f // ret
-	vbroadcastss	184(%r10), %ymm13
-	vfnmadd231ps	%ymm5, %ymm13, %ymm6
-	vbroadcastss	188(%r10), %ymm13
-	vfnmadd231ps	%ymm5, %ymm13, %ymm7
-
-	vbroadcastss	24(%r11), %ymm13
-	vmulps			%ymm6, %ymm13, %ymm6
-	cmpl			$8, %r12d
-	jl				0f // ret
-	vbroadcastss	220(%r10), %ymm13
-	vfnmadd231ps	%ymm6, %ymm13, %ymm7
-
-	vbroadcastss	28(%r11), %ymm13
-	vmulps			%ymm7, %ymm13, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_trsm_rlt_inv_8x8_vs_lib8, .-inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_POTRF_8X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_potrf_8x8_vs_lib8, @function
-inner_edge_potrf_8x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_edge_potrf_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_potrf_8x8_vs_lib8; .scl 2; .type 32; .endef
-inner_edge_potrf_8x8_vs_lib8:
-#endif
-#endif
-
-	vxorps	%ymm15, %ymm15, %ymm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovss	.LC03(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovss	LC03(%rip), %xmm14 // 1.0
-#endif
-
-	vmovss			%xmm0, %xmm0, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe			1f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-2:
-	vmovss			%xmm13, 0(%r10)
-	vbroadcastss	%xmm13, %ymm13
-//	vpermilps		$0x00, %xmm13, %xmm13
-//	vinsertf128		$0x1, %xmm13, %ymm13, %ymm13
-	vmulps			%ymm0, %ymm13, %ymm0
-	vperm2f128		$0x00, %ymm0, %ymm0, %ymm11
-	vpermilps		$0x55, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm1
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm2
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm3
-	vperm2f128		$0x11, %ymm0, %ymm0, %ymm11
-	vpermilps		$0x00, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm4
-	vpermilps		$0x55, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm5
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm0, %ymm13, %ymm7
-
-
-	vpermilps		$0x55, %xmm1, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe			3f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-4:
-	vmovss			%xmm13, 4(%r10)
-	vbroadcastss	%xmm13, %ymm13
-	vmulps			%ymm1, %ymm13, %ymm1
-	vperm2f128		$0x00, %ymm1, %ymm1, %ymm11
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm2
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm3
-	vperm2f128		$0x11, %ymm1, %ymm1, %ymm11
-	vpermilps		$0x00, %ymm11, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm4
-	vpermilps		$0x55, %ymm11, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm5
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm1, %ymm13, %ymm7
-
-
-	vpermilps		$0xaa, %xmm2, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe			5f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-6:
-	vmovss			%xmm13, 8(%r10)
-	vbroadcastss	%xmm13, %ymm13
-	vmulps			%ymm2, %ymm13, %ymm2
-	vperm2f128		$0x00, %ymm2, %ymm2, %ymm11
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm3
-	vperm2f128		$0x11, %ymm2, %ymm2, %ymm11
-	vpermilps		$0x00, %ymm11, %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm4
-	vpermilps		$0x55, %ymm11, %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm5
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm2, %ymm13, %ymm7
-
-
-	vpermilps		$0xff, %xmm3, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			7f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-8:
-	vmovss			%xmm13, 12(%r10)
-	vbroadcastss	%xmm13, %ymm13
-	vmulps			%ymm3, %ymm13, %ymm3
-	vperm2f128		$0x11, %ymm3, %ymm3, %ymm11
-	vpermilps		$0x00, %ymm11, %ymm13
-	vfnmadd231ps	%ymm3, %ymm13, %ymm4
-	vpermilps		$0x55, %ymm11, %ymm13
-	vfnmadd231ps	%ymm3, %ymm13, %ymm5
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm3, %ymm13, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm3, %ymm13, %ymm7
-
-
-	vextractf128	$0x1, %ymm4, %xmm13
-//	vpermilps		$0x00, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			9f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-10:
-	vmovss			%xmm13, 16(%r10)
-	vbroadcastss	%xmm13, %ymm13
-	vmulps			%ymm4, %ymm13, %ymm4
-	cmpl		$6, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm4, %ymm4, %ymm11
-	vpermilps		$0x55, %ymm11, %ymm13
-	vfnmadd231ps	%ymm4, %ymm13, %ymm5
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm4, %ymm13, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm4, %ymm13, %ymm7
-
-
-	vextractf128	$0x1, %ymm5, %xmm13
-	vpermilps		$0x55, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			11f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-12:
-	vmovss			%xmm13, 20(%r10)
-	vbroadcastss	%xmm13, %ymm13
-	vmulps			%ymm5, %ymm13, %ymm5
-	cmpl		$7, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm5, %ymm5, %ymm11
-	vpermilps		$0xaa, %ymm11, %ymm13
-	vfnmadd231ps	%ymm5, %ymm13, %ymm6
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm5, %ymm13, %ymm7
-
-
-	vextractf128	$0x1, %ymm6, %xmm13
-	vpermilps		$0xaa, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			13f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-14:
-	vmovss			%xmm13, 24(%r10)
-	vbroadcastss	%xmm13, %ymm13
-	vmulps			%ymm6, %ymm13, %ymm6
-	cmpl		$8, %r11d
-	jl			0f // ret
-	vperm2f128		$0x11, %ymm6, %ymm6, %ymm11
-	vpermilps		$0xff, %ymm11, %ymm13
-	vfnmadd231ps	%ymm6, %ymm13, %ymm7
-
-
-	vextractf128	$0x1, %ymm7, %xmm13
-	vpermilps		$0xff, %xmm13, %xmm13
-	vucomiss		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe			15f
-	vsqrtss			%xmm13, %xmm13, %xmm13
-	vdivss			%xmm13, %xmm14, %xmm13
-16:
-	vmovss			%xmm13, 28(%r10)
-	vbroadcastss	%xmm13, %ymm13
-	vmulps			%ymm7, %ymm13, %ymm7
-
-
-	jmp		0f
-
-
-1:
-	vxorps	%ymm13, %ymm13, %ymm13
-	jmp		2b
-
-3:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		4b
-
-5:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		6b
-
-7:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		8b
-
-9:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		10b
-
-11:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		12b
-
-13:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		14b
-
-15:
-	vxorpd	%ymm13, %ymm13, %ymm13
-	jmp		16b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_potrf_8x8_vs_lib8, .-inner_edge_potrf_8x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x8_lib8, @function
-inner_scale_ab_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x8_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_8x8_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-	vmovaps		128(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm4
-	vmovaps		160(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm5
-	vmovaps		192(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm6
-	vmovaps		224(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x8_lib8, .-inner_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// r15  <- n0 // col index: start from (inc)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_8x8_gen_lib8, @function
-inner_scale_ab_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_ab_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_ab_8x8_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm0
-	vmovaps		32(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm1
-	vmovaps		64(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm2
-	vmovaps		96(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm3
-	vmovaps		128(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm4
-	vmovaps		160(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm5
-	vmovaps		192(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm6
-	vmovaps		224(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_8x8_gen_lib8, .-inner_scale_ab_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_8x8_lib8, @function
-inner_scale_11_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_scale_11_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_8x8_lib8; .scl 2; .type 32; .endef
-inner_scale_11_8x8_lib8:
-#endif
-#endif
-	
-	vmovaps		0(%r10), %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r10), %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r10), %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r10), %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-	vmovaps		128(%r10), %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	vmovaps		160(%r10), %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	vmovaps		192(%r10), %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	vmovaps		224(%r10), %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_8x8_lib8, .-inner_scale_11_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_11_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_11_8x8_gen_lib8, @function
-inner_scale_11_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_scale_11_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_11_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_scale_11_8x8_gen_lib8:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r11), %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r11), %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r11), %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r11), %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-	vmovaps		128(%r11), %ymm12
-	vaddps		%ymm4, %ymm12, %ymm4
-	vmovaps		160(%r11), %ymm12
-	vaddps		%ymm5, %ymm12, %ymm5
-	vmovaps		192(%r11), %ymm12
-	vaddps		%ymm6, %ymm12, %ymm6
-	vmovaps		224(%r11), %ymm12
-	vaddps		%ymm7, %ymm12, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_11_8x8_gen_lib8, .-inner_scale_11_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x8_lib8, @function
-inner_blend_scale_ab_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x8_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x8_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm14
-
-	vxorps		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovaps		0(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm0
-	vmovaps		32(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm1
-	vmovaps		64(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm2
-	vmovaps		96(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm3
-	vmovaps		128(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm4
-	vmovaps		160(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm5
-	vmovaps		192(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm6
-	vmovaps		224(%r12), %ymm15
-	vfmadd231ps	%ymm15, %ymm14, %ymm7
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x8_lib8, .-inner_blend_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12  <- offset
-// r13   <- C
-// r14  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_8x8_gen_lib8, @function
-inner_blend_scale_ab_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_ab_8x8_gen_lib8:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastss	0(%r10), %ymm11
-
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	vmulps		%ymm0, %ymm11, %ymm0
-	vmulps		%ymm1, %ymm11, %ymm1
-	vmulps		%ymm2, %ymm11, %ymm2
-	vmulps		%ymm3, %ymm11, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	vmulps		%ymm4, %ymm11, %ymm4
-	vmulps		%ymm5, %ymm11, %ymm5
-	vmulps		%ymm6, %ymm11, %ymm6
-	vmulps		%ymm7, %ymm11, %ymm7
-
-	// beta
-	vbroadcastss	0(%r11), %ymm15
-
-	vxorps		%ymm14, %ymm14, %ymm14 // 0.0
-
-	vucomiss	%xmm15, %xmm14 // beta==0.0 ?
-	je			3f // end
-
-	cmpl	$0, %r12d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm0
-	vmovaps		32(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm1
-	vmovaps		64(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm2
-	vmovaps		96(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm3
-	vmovaps		128(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm4
-	vmovaps		160(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm5
-	vmovaps		192(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm6
-	vmovaps		224(%r13), %ymm12
-	vfmadd231ps	%ymm12, %ymm15, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r12d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r12d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r12d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_8x8_gen_lib8, .-inner_blend_scale_ab_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha=1.0 and beta=1.0
-//
-// input arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- C
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_8x8_lib8, @function
-inner_blend_scale_11_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_8x8_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_11_8x8_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	vmovaps		0(%r10), %ymm15
-	vaddps		%ymm0, %ymm15, %ymm0
-	vmovaps		32(%r10), %ymm15
-	vaddps		%ymm1, %ymm15, %ymm1
-	vmovaps		64(%r10), %ymm15
-	vaddps		%ymm2, %ymm15, %ymm2
-	vmovaps		96(%r10), %ymm15
-	vaddps		%ymm3, %ymm15, %ymm3
-	vmovaps		128(%r10), %ymm15
-	vaddps		%ymm4, %ymm15, %ymm4
-	vmovaps		160(%r10), %ymm15
-	vaddps		%ymm5, %ymm15, %ymm5
-	vmovaps		192(%r10), %ymm15
-	vaddps		%ymm6, %ymm15, %ymm6
-	vmovaps		224(%r10), %ymm15
-	vaddps		%ymm7, %ymm15, %ymm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_8x8_lib8, .-inner_blend_scale_11_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10  <- offset
-// r11   <- C
-// r12  <- 4*sdc*sizeof(double)
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_8x8_gen_lib8, @function
-inner_blend_scale_11_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_blend_scale_11_8x8_gen_lib8:
-#endif
-#endif
-	
-	vblendps	$0xaa, %ymm1, %ymm0, %ymm12 // 1010 1010
-	vblendps	$0x55, %ymm1, %ymm0, %ymm13 // 0101 0101
-	vblendps	$0xaa, %ymm3, %ymm2, %ymm14
-	vblendps	$0x55, %ymm3, %ymm2, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm0 // 1100 1100
-	vblendps	$0x33, %ymm15, %ymm12, %ymm2 // 0011 0011
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm1
-	vblendps	$0x33, %ymm14, %ymm13, %ymm3
-
-	vblendps	$0xaa, %ymm5, %ymm4, %ymm12
-	vblendps	$0x55, %ymm5, %ymm4, %ymm13
-	vblendps	$0xaa, %ymm7, %ymm6, %ymm14
-	vblendps	$0x55, %ymm7, %ymm6, %ymm15
-
-	vblendps	$0xcc, %ymm15, %ymm12, %ymm4
-	vblendps	$0x33, %ymm15, %ymm12, %ymm6
-	vblendps	$0xcc, %ymm14, %ymm13, %ymm5
-	vblendps	$0x33, %ymm14, %ymm13, %ymm7
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-
-	vmovaps		0(%r11), %ymm12
-	vaddps		%ymm0, %ymm12, %ymm0
-	vmovaps		32(%r11), %ymm12
-	vaddps		%ymm1, %ymm12, %ymm1
-	vmovaps		64(%r11), %ymm12
-	vaddps		%ymm2, %ymm12, %ymm2
-	vmovaps		96(%r11), %ymm12
-	vaddps		%ymm3, %ymm12, %ymm3
-	vmovaps		128(%r11), %ymm12
-	vaddps		%ymm4, %ymm12, %ymm4
-	vmovaps		160(%r11), %ymm12
-	vaddps		%ymm5, %ymm12, %ymm5
-	vmovaps		192(%r11), %ymm12
-	vaddps		%ymm6, %ymm12, %ymm6
-	vmovaps		224(%r11), %ymm12
-	vaddps		%ymm7, %ymm12, %ymm7
-
-	jmp		7f
-
-0:
-
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r13, %r15 // C0
-	addq	%r14, %r15 // C1 <- C0 + 4*sdc*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_8x8_gen_lib8, .-inner_blend_scale_11_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8_lib8, @function
-inner_store_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8_lib8; .scl 2; .type 32; .endef
-inner_store_8x8_lib8:
-#endif
-#endif
-	
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps 	%ymm3, 96(%r10)
-	vmovaps 	%ymm4, 128(%r10)
-	vmovaps 	%ymm5, 160(%r10)
-	vmovaps 	%ymm6, 192(%r10)
-	vmovaps 	%ymm7, 224(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_lib8, .-inner_store_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8_vs_lib8, @function
-inner_store_8x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8_vs_lib8; .scl 2; .type 32; .endef
-inner_store_8x8_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	vmaskmovps	%ymm0, %ymm15,  0(%r10)
-	vmaskmovps	%ymm1, %ymm15,  32(%r10)
-	vmaskmovps	%ymm2, %ymm15,  64(%r10)
-	vmaskmovps	%ymm3, %ymm15,  96(%r10)
-	vmaskmovps	%ymm4, %ymm15,  128(%r10)
-	cmpl		$6, %r12d
-	jl			0f // end
-	vmaskmovps	%ymm5, %ymm15, 160(%r10)
-	cmpl		$7, %r12d
-	jl			0f // end
-	vmaskmovps	%ymm6, %ymm15, 192(%r10)
-	je			0f // end
-	vmaskmovps	%ymm7, %ymm15, 224(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_vs_lib8, .-inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_8x8_gen_lib8, @function
-inner_store_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_store_8x8_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm12, %ymm15
-	vandps		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$8, %eax
-	jle		0f
-	movl	$8, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm15,  0(%r11)
-	vmaskmovps	%ymm1, %ymm15,  32(%r11)
-	vmaskmovps	%ymm2, %ymm15,  64(%r11)
-	vmaskmovps	%ymm3, %ymm15,  96(%r11)
-	vmaskmovps	%ymm4, %ymm15,  128(%r11)
-	cmpl		$6, %r15d
-	jl			7f // end
-	vmaskmovps	%ymm5, %ymm15, 160(%r11)
-	cmpl		$7, %r15d
-	jl			7f // end
-	vmaskmovps	%ymm6, %ymm15, 192(%r11)
-	je			7f // end
-	vmaskmovps	%ymm7, %ymm15, 224(%r11)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_gen_lib8, .-inner_store_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X8_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x8_lib8, @function
-inner_store_l_8x8_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x8_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x8_lib8:
-#endif
-#endif
-	
-	vmovaps 	%ymm0,  0(%r10)
-	vmovaps		32(%r10), %ymm14
-	vblendps	$0x01, %ymm14, %ymm1, %ymm1
-	vmovaps 	%ymm1, 32(%r10)
-	vmovaps		64(%r10), %ymm14
-	vblendps	$0x03, %ymm14, %ymm2, %ymm2
-	vmovaps 	%ymm2, 64(%r10)
-	vmovaps		96(%r10), %ymm14
-	vblendps	$0x07, %ymm14, %ymm3, %ymm3
-	vmovaps 	%ymm3, 96(%r10)
-	vmovaps		128(%r10), %ymm14
-	vblendps	$0x0f, %ymm14, %ymm4, %ymm4
-	vmovaps 	%ymm4, 128(%r10)
-	vmovaps		160(%r10), %ymm14
-	vblendps	$0x1f, %ymm14, %ymm5, %ymm5
-	vmovaps 	%ymm5, 160(%r10)
-	vmovaps		192(%r10), %ymm14
-	vblendps	$0x3f, %ymm14, %ymm6, %ymm6
-	vmovaps 	%ymm6, 192(%r10)
-	vmovaps		224(%r10), %ymm14
-	vblendps	$0x7f, %ymm14, %ymm7, %ymm7
-	vmovaps 	%ymm7, 224(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_8x8_lib8, .-inner_store_l_8x8_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower vs
-//
-// input arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-//
-// output arguments:
-// r10  <- D
-// r11  <- km
-// r12  <- kn
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X8_VS_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x8_vs_lib8, @function
-inner_store_l_8x8_vs_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x8_vs_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x8_vs_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r11d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm15, %ymm12, %ymm15
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm15,  0(%r10)
-	vmovaps 	32(%r10), %ymm12
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm15,  32(%r10)
-	vmovaps 	64(%r10), %ymm12
-	vblendps	$0x03, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm15,  64(%r10)
-	vmovaps 	96(%r10), %ymm12
-	vblendps	$0x07, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm15,  96(%r10)
-	vmovaps 	128(%r10), %ymm12
-	vblendps	$0x0f, %ymm12, %ymm4, %ymm4
-	vmaskmovps	%ymm4, %ymm15,  128(%r10)
-	cmpl		$6, %r12d
-	jl			0f // end
-	vmovaps 	160(%r10), %ymm12
-	vblendps	$0x1f, %ymm12, %ymm5, %ymm5
-	vmaskmovps	%ymm5, %ymm15, 160(%r10)
-	cmpl		$7, %r12d
-	jl			0f // end
-	vmovaps 	192(%r10), %ymm12
-	vblendps	$0x3f, %ymm12, %ymm6, %ymm6
-	vmaskmovps	%ymm6, %ymm15, 192(%r10)
-	je			0f // end
-	vmovaps 	224(%r10), %ymm12
-	vblendps	$0x7f, %ymm12, %ymm7, %ymm7
-	vmaskmovps	%ymm7, %ymm15, 224(%r10)
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_vs_lib8, .-inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store lower generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// ymm0 <- []
-// ymm1 <- []
-// ymm2 <- []
-// ymm3 <- []
-// ymm4 <- []
-// ymm5 <- []
-// ymm6 <- []
-// ymm7 <- []
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_8X8_GEN_LIB8
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_8x8_gen_lib8, @function
-inner_store_l_8x8_gen_lib8:
-#elif defined(OS_MAC)
-_inner_store_l_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_8x8_gen_lib8; .scl 2; .type 32; .endef
-inner_store_l_8x8_gen_lib8:
-#endif
-#endif
-	
-	// compute mask for rows
-	vcvtsi2ss	%r13d, %xmm14, %xmm14
-	vcvtsi2ss	%r14d, %xmm15, %xmm15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovups		.LC00(%rip), %ymm12
-#elif defined(OS_MAC)
-	vmovups		LC00(%rip), %ymm12
-#endif
-	vshufps		$0x00, %xmm14, %xmm14, %xmm14
-	vshufps		$0x00, %xmm15, %xmm15, %xmm15
-	vinsertf128	$0x1, %xmm14, %ymm14, %ymm14
-	vinsertf128	$0x1, %xmm15, %ymm15, %ymm15
-	vsubps		%ymm12, %ymm14, %ymm14
-	vsubps		%ymm15, %ymm12, %ymm15
-	vandps		%ymm14, %ymm15, %ymm15
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm6, %ymm5
-	vmovaps		%ymm7, %ymm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm2, %ymm1
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	vmovaps		%ymm6, %ymm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovaps		%ymm1, %ymm0
-	vmovaps		%ymm3, %ymm2
-	vmovaps		%ymm4, %ymm3
-	vmovaps		%ymm5, %ymm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$8, %eax
-	jle		0f
-	movl	$8, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	// offset==0
-	vmaskmovps	%ymm0, %ymm15,  0(%r11)
-	vmovaps 	32(%r11), %ymm12
-	vblendps	$0x01, %ymm12, %ymm1, %ymm1
-	vmaskmovps	%ymm1, %ymm15,  32(%r11)
-	vmovaps 	64(%r11), %ymm12
-	vblendps	$0x03, %ymm12, %ymm2, %ymm2
-	vmaskmovps	%ymm2, %ymm15,  64(%r11)
-	vmovaps 	96(%r11), %ymm12
-	vblendps	$0x07, %ymm12, %ymm3, %ymm3
-	vmaskmovps	%ymm3, %ymm15,  96(%r11)
-	vmovaps 	128(%r11), %ymm12
-	vblendps	$0x0f, %ymm12, %ymm4, %ymm4
-	vmaskmovps	%ymm4, %ymm15,  128(%r11)
-	cmpl		$6, %r15d
-	jl			7f // end
-	vmovaps 	160(%r11), %ymm12
-	vblendps	$0x1f, %ymm12, %ymm5, %ymm5
-	vmaskmovps	%ymm5, %ymm15, 160(%r11)
-	cmpl		$7, %r15d
-	jl			7f // end
-	vmovaps 	192(%r11), %ymm12
-	vblendps	$0x3f, %ymm12, %ymm6, %ymm6
-	vmaskmovps	%ymm6, %ymm15, 192(%r11)
-	je			7f // end
-	vmovaps 	224(%r11), %ymm12
-	vblendps	$0x7f, %ymm12, %ymm7, %ymm7
-	vmaskmovps	%ymm7, %ymm15, 224(%r11)
-	//
-	jmp		7f
-
-0:
-	// offset > 0
-	// 1 2 3 4 5 6 7
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$4, %r10d
-	jl		1f
-	jg		2f
-
-	// offset==4
-	// TODO
-	jmp		7f
-
-1:
-	// 1 2 3
-
-	cmpl	$2, %r10d
-	jl		3f
-	jg		4f
-
-	// offset==2
-	// TODO
-	jmp		7f
-
-3:
-	// offset==1
-	// TODO
-	jmp		7f
-
-4:
-	// offset==3
-	// TODO
-	jmp		7f
-
-2:
-	// 5 6 7
-
-	cmpl	$6, %r10d
-	jl		5f
-	jg		6f
-
-	// offset==6
-	// TODO
-	jmp		7f
-
-5:
-	// offset==5
-	// TODO
-	jmp		7f
-
-6:
-	// offset==7
-	// TODO
-	jmp		7f
-
-	// end
-7:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_8x8_gen_lib8, .-inner_store_8x8_gen_lib8
-#endif
-#endif
-
-
-
-
-
-//                               1      2             3         4         5            6         7
-// void kernel_sgemm_nt_8x8_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x8_lib8
-	.type kernel_sgemm_nt_8x8_lib8, @function
-kernel_sgemm_nt_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x8_lib8
-_kernel_sgemm_nt_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x8_lib8
-	.def kernel_sgemm_nt_8x8_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x8_lib8, .-kernel_sgemm_nt_8x8_lib8
-#endif
-
-
-
-
-
-//                                  1      2             3         4         5            6         7         8       9
-// void kernel_sgemm_nt_8x8_vs)lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x8_vs_lib8
-	.type kernel_sgemm_nt_8x8_vs_lib8, @function
-kernel_sgemm_nt_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x8_vs_lib8
-_kernel_sgemm_nt_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x8_vs_lib8
-	.def kernel_sgemm_nt_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // D
-	movq	ARG9, %r12 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x8_vs_lib8, .-kernel_sgemm_nt_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                   rdi    rsi           rdx       rcx       r8           r9           rsp+8     rsp+16   rsp+24       rsp+32    rsp+40   rsp+48  rsp+56  rsp+64  rsp+72
-// void kernel_sgemm_nt_8x8_gen_lib8(int k, float *alpha, float *A, float *B, float *beta, int offsetC, float *C, int sdc, int offsetD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nt_8x8_gen_lib8
-	.type kernel_sgemm_nt_8x8_gen_lib8, @function
-kernel_sgemm_nt_8x8_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nt_8x8_gen_lib8
-_kernel_sgemm_nt_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nt_8x8_gen_lib8
-	.def kernel_sgemm_nt_8x8_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nt_8x8_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 8*sdc*sizeof(float)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 8*sdb*sizeof(float)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nt_8x8_gen_lib8, .-kernel_sgemm_nt_8x8_gen_lib8
-#endif
-
-
-
-
-
-//                               1      2             3         4            5         6        7            8         9
-// void kernel_sgemm_nn_8x8_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x8_lib8
-	.type kernel_sgemm_nn_8x8_lib8, @function
-kernel_sgemm_nn_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x8_lib8
-_kernel_sgemm_nn_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x8_lib8
-	.def kernel_sgemm_nn_8x8_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x8_lib8, .-kernel_sgemm_nn_8x8_lib8
-#endif
-
-
-
-
-
-//                               1      2             3         4            5         6        7            8         9         10      11
-// void kernel_sgemm_nn_8x8_lib8(int k, float *alpha, float *A, int offsetB, float *B, int sdb, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x8_vs_lib8
-	.type kernel_sgemm_nn_8x8_vs_lib8, @function
-kernel_sgemm_nn_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x8_vs_lib8
-_kernel_sgemm_nn_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x8_vs_lib8
-	.def kernel_sgemm_nn_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x8_vs_lib8, .-kernel_sgemm_nn_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                   rdi    rsi           rdx       rcx       r8        r9       rsp+8        rsp+16    rsp+24    rsp+32    rsp+40   rsp+48     rsp+56   rsp+64  rsp+72  rsp+80  rsp+88
-// void kernel_sgemm_nn_8x8_gen_lib4(int k, float *alpha, float *A, int offB, float *B, int sdb, float *beta, int offC, float *C, int sdc, int offD, float *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_nn_8x8_gen_lib8
-	.type kernel_sgemm_nn_8x8_gen_lib8, @function
-kernel_sgemm_nn_8x8_gen_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_nn_8x8_gen_lib8
-_kernel_sgemm_nn_8x8_gen_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_nn_8x8_gen_lib8
-	.def kernel_sgemm_nn_8x8_gen_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_nn_8x8_gen_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nn
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NN_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nn_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nn_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12 // offsetC
-	movq	ARG9, %r13 // C
-	movq	ARG10, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_gen_lib8
-#endif
-#endif
-
-
-	// store n gen
-
-	movq	ARG11, %r10 // offsetD
-	movq	ARG12, %r11 // D
-	movq	ARG13, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG14, %r13 // m0
-	movq	ARG15, %r14 // m1
-	movq	ARG16, %r15 // n0
-	movq	ARG17, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_GEN_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_gen_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_gen_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_nn_8x8_gen_lib8, .-kernel_sgemm_nn_8x8_gen_lib8
-#endif
-
-
-
-
-
-//                                 rdi    rsi           rdx       rcx       r8           r9        rsp+8
-// void kernel_ssyrk_nt_l_8x8_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_8x8_lib8
-	.type kernel_ssyrk_nt_l_8x8_lib8, @function
-kernel_ssyrk_nt_l_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_8x8_lib8
-_kernel_ssyrk_nt_l_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_8x8_lib8
-	.def kernel_ssyrk_nt_l_8x8_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_8x8_lib8, .-kernel_ssyrk_nt_l_8x8_lib8
-#endif
-
-
-
-
-
-//                                    1      2             3         4         5            6         7         8       9
-// void kernel_ssyrk_nt_l_8x8_vs_lib8(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_nt_l_8x8_vs_lib8
-	.type kernel_ssyrk_nt_l_8x8_vs_lib8, @function
-kernel_ssyrk_nt_l_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_nt_l_8x8_vs_lib8
-_kernel_ssyrk_nt_l_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_nt_l_8x8_vs_lib8
-	.def kernel_ssyrk_nt_l_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_nt_l_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_8x8_lib8
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km
-	movq	ARG9, %r12 // kn
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_nt_l_8x8_vs_lib8, .-kernel_ssyrk_nt_l_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                      edi    rsi       rdx       ecx       r8        r9        rsp+8     
-// void kernel_strsm_nt_rl_inv_8x8_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_8x8_lib8
-	.type kernel_strsm_nt_rl_inv_8x8_lib8, @function
-kernel_strsm_nt_rl_inv_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_8x8_lib8
-_kernel_strsm_nt_rl_inv_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_8x8_lib8
-	.def kernel_strsm_nt_rl_inv_8x8_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	$8, %r12 // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_8x8_lib8, .-kernel_strsm_nt_rl_inv_8x8_lib8
-#endif
-
-
-
-
-
-//                                         edi    rsi       rdx       ecx       r8        r9        rsp+8               rsp+16  rsp+24  
-// void kernel_strsm_nt_rl_inv_8x8_vs_lib8(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_strsm_nt_rl_inv_8x8_vs_lib8
-	.type kernel_strsm_nt_rl_inv_8x8_vs_lib8, @function
-kernel_strsm_nt_rl_inv_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_strsm_nt_rl_inv_8x8_vs_lib8
-_kernel_strsm_nt_rl_inv_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_strsm_nt_rl_inv_8x8_vs_lib8
-	.def kernel_strsm_nt_rl_inv_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_strsm_nt_rl_inv_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn // TODO scale gen
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // m1 
-	movq	ARG9, %r12 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_strsm_nt_rl_inv_8x8_vs_lib8, .-kernel_strsm_nt_rl_inv_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                            1       2          3          4       5          6          7         8         9         10
-// void kernel_sgemm_strsm_nt_rl_inv_8x8_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x8_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_8x8_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_8x8_lib8
-_kernel_sgemm_strsm_nt_rl_inv_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x8_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_8x8_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	$8, %r12 // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10   // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_8x8_lib8, .-kernel_sgemm_strsm_nt_rl_inv_8x8_lib8
-#endif
-
-
-
-
-
-//                                               1       2          3          4       5          6          7         8         9         10                 11      12
-// void kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8
-	.type kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8, @function
-kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8
-_kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8
-	.def kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10  // C 
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_TRSM_RLT_INV_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_trsm_rlt_inv_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D 
-	movq	ARG11, %r11 // km 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8, .-kernel_sgemm_strsm_nt_rl_inv_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                  edi    rsi       rdx       rcx       r8        r9
-// void kernel_spotrf_nt_l_8x8_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_8x8_lib8
-	.type kernel_spotrf_nt_l_8x8_lib8, @function
-kernel_spotrf_nt_l_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_8x8_lib8
-_kernel_spotrf_nt_l_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_8x8_lib8
-	.def kernel_spotrf_nt_l_8x8_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movl	$8, %r11d // n1
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_8x8_lib8, .-kernel_spotrf_nt_l_8x8_lib8
-#endif
-
-
-
-
-
-//                                     edi    rsi       rdx       rcx       r8        r9                  rsp+8   rsp+16
-// void kernel_spotrf_nt_l_8x8_vs_lib8(int k, float *A, float *B, float *C, float *D, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_spotrf_nt_l_8x8_vs_lib8
-	.type kernel_spotrf_nt_l_8x8_vs_lib8, @function
-kernel_spotrf_nt_l_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_spotrf_nt_l_8x8_vs_lib8
-_kernel_spotrf_nt_l_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_spotrf_nt_l_8x8_vs_lib8
-	.def kernel_spotrf_nt_l_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_spotrf_nt_l_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movq	ARG8, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG7, %r11 // m1 
-	movq	ARG8, %r12 // n1 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_spotrf_nt_l_8x8_vs_lib8, .-kernel_spotrf_nt_l_8x8_vs_lib8
-#endif
-
-
-
-
-
-//                                        1       2          3          4       5          6          7         8         9
-// void kernel_ssyrk_spotrf_nt_l_8x8_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_8x8_lib8
-	.type kernel_ssyrk_spotrf_nt_l_8x8_lib8, @function
-kernel_ssyrk_spotrf_nt_l_8x8_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_8x8_lib8
-_kernel_ssyrk_spotrf_nt_l_8x8_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_8x8_lib8
-	.def kernel_ssyrk_spotrf_nt_l_8x8_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_8x8_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorps	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$8, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_8x8_lib8, .-kernel_ssyrk_spotrf_nt_l_8x8_lib8
-#endif
-
-
-
-
-
-//                                           1       2          3          4       5          6          7         8         9                  10      11
-// void kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8(int kp, float *Ap, float *Bp, int km, float *Am, float *Bm, float *C, float *D, float *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8
-	.type kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8, @function
-kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8:
-#elif defined(OS_MAC)
-	.globl _kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8
-_kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8:
-#elif defined(OS_WINDOWS)
-	.globl kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8
-	.def kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8; .scl 2; .type 32; .endef
-kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovaps	%ymm0, %ymm1
-	vmovaps	%ymm0, %ymm2
-	vmovaps	%ymm0, %ymm3
-	vmovaps	%ymm0, %ymm4
-	vmovaps	%ymm0, %ymm5
-	vmovaps	%ymm0, %ymm6
-	vmovaps	%ymm0, %ymm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_ADD_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_add_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_add_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_GEMM_SUB_NT_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_gemm_sub_nt_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_kernel_gemm_sub_nt_8x8_lib8
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_11_8X8_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_11_8x8_lib8
-#elif defined(OS_MAC)
-	callq _inner_scale_11_8x8_lib8
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_POTRF_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_potrf_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_edge_potrf_8x8_vs_lib8
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_8X8_VS_LIB8
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_8x8_vs_lib8
-#elif defined(OS_MAC)
-	callq _inner_store_l_8x8_vs_lib8
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8, .-kernel_ssyrk_spotrf_nt_l_8x8_vs_lib8
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC00: // { 7.5 6.5 5.5 4.5 3.5 2.5 1.5 0.5 }
-#endif
-	.long	1056964608
-	.long	1069547520
-	.long	1075838976
-	.long	1080033280
-	.long	1083179008
-	.long	1085276160
-	.long	1087373312
-	.long	1089470464
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC01: // { 15.5 14.5 13.5 12.5 11.5 10.5 9.5 8.5 }
-#endif
-	.long	1091043328
-	.long	1092091904
-	.long	1093140480
-	.long	1094189056
-	.long	1095237632
-	.long	1096286208
-	.long	1097334784
-	.long	1098383360
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#elif defined(OS_MAC)
-	.align 5
-LC02: // { 23.5 22.5 21.5 20.5 19.5 18.5 17.5 16.5 }
-#endif
-	.long	1099169792
-	.long	1099694080
-	.long	1100218368
-	.long	1100742656
-	.long	1101266944
-	.long	1101791232
-	.long	1102315520
-	.long	1102839808
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC03: // { 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC09: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-	.align 5
-LC09: // { -1.0 -1.0 1.0 1.0 1.0 1.0 1.0 1.0 }
-#endif
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	1065353216
-	.long	3212836864
-	.long	3212836864
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/c99/Makefile b/third_party/blasfeo/kernel/c99/Makefile
deleted file mode 100644
index 55d54ef..0000000
--- a/third_party/blasfeo/kernel/c99/Makefile
+++ /dev/null
@@ -1,80 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../../Makefile.rule
-
-OBJS =
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_INTEL_HASWELL)
-OBJS += kernel_dgemv_4_lib4.o
-OBJS += kernel_sgemm_4x4_lib4.o kernel_sgemm_diag_lib4.o kernel_sgemv_4_lib4.o kernel_ssymv_4_lib4.o kernel_sgetrf_pivot_4_lib4.o
-endif
-
-ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
-OBJS += kernel_dgemv_4_lib4.o
-#OBJS += kernel_sgemm_4x4_lib4.o kernel_sgemm_diag_lib4.o kernel_sgemv_4_lib4.o kernel_ssymv_4_lib4.o kernel_sgetrf_pivot_4_lib4.o
-OBJS +=
-endif
-
-ifeq ($(TARGET), X64_INTEL_CORE)
-OBJS += kernel_dgemm_4x4_lib4.o kernel_dgemm_diag_lib4.o kernel_dgemv_4_lib4.o kernel_dsymv_4_lib4.o kernel_dgetrf_pivot_4_lib4.o kernel_dgeqrf_4_lib4.o
-OBJS += kernel_sgemm_4x4_lib4.o kernel_sgemm_diag_lib4.o kernel_sgemv_4_lib4.o kernel_ssymv_4_lib4.o kernel_sgetrf_pivot_4_lib4.o kernel_sgecp_lib4.o
-endif
-
-ifeq ($(TARGET), X64_AMD_BULLDOZER)
-OBJS += kernel_dgemm_4x4_lib4.o kernel_dgemm_diag_lib4.o kernel_dgemv_4_lib4.o kernel_dsymv_4_lib4.o kernel_dgetrf_pivot_4_lib4.o kernel_dgeqrf_4_lib4.o
-OBJS += kernel_sgemm_4x4_lib4.o kernel_sgemm_diag_lib4.o kernel_sgemv_4_lib4.o kernel_ssymv_4_lib4.o kernel_sgetrf_pivot_4_lib4.o kernel_sgecp_lib4.o
-endif
-
-ifeq ($(TARGET), ARMV8A_ARM_CORTEX_A57)
-OBJS += kernel_dgemm_4x4_lib4.o kernel_dgemm_diag_lib4.o kernel_dgemv_4_lib4.o kernel_dsymv_4_lib4.o kernel_dgetrf_pivot_4_lib4.o kernel_dgeqrf_4_lib4.o
-OBJS += kernel_sgemm_4x4_lib4.o kernel_sgemm_diag_lib4.o kernel_sgemv_4_lib4.o kernel_ssymv_4_lib4.o kernel_sgetrf_pivot_4_lib4.o kernel_sgecp_lib4.o
-endif
-
-ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
-OBJS += kernel_dgemm_4x4_lib4.o kernel_dgemm_diag_lib4.o kernel_dgemv_4_lib4.o kernel_dsymv_4_lib4.o kernel_dgetrf_pivot_4_lib4.o kernel_dgeqrf_4_lib4.o
-OBJS += kernel_sgemm_4x4_lib4.o kernel_sgemm_diag_lib4.o kernel_sgemv_4_lib4.o kernel_ssymv_4_lib4.o kernel_sgetrf_pivot_4_lib4.o kernel_sgecp_lib4.o
-endif
-
-ifeq ($(TARGET), GENERIC)
-OBJS += kernel_dgemm_4x4_lib4.o kernel_dgemm_diag_lib4.o kernel_dgemv_4_lib4.o kernel_dsymv_4_lib4.o kernel_dgetrf_pivot_4_lib4.o kernel_dgeqrf_4_lib4.o
-OBJS += kernel_sgemm_4x4_lib4.o kernel_sgemm_diag_lib4.o kernel_sgemv_4_lib4.o kernel_ssymv_4_lib4.o kernel_sgetrf_pivot_4_lib4.o kernel_sgecp_lib4.o
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
-	rm -f *.s
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_dgemm_4x4_lib4.c b/third_party/blasfeo/kernel/c99/kernel_dgemm_4x4_lib4.c
deleted file mode 100644
index 167e356..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_dgemm_4x4_lib4.c
+++ /dev/null
@@ -1,6825 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <math.h>
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-//#if defined(TARGET_GENERIC) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_nt_4x4_gen_lib4(int kmax, double *alpha, double *A, double *B, double *beta, int offsetC, double *C0, int sdc, int offsetD, double *D0, int sdd, int m0, int m1, int n0, int n1)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	double
-		*C1, *D1;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	if(offsetC==0)
-		{
-		c_00 = beta[0]*C0[0+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[1+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C0[2+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C0[3+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[0+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C0[1+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C0[2+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C0[3+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[0+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C0[1+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C0[2+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C0[3+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[0+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C0[1+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C0[2+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C0[3+bs*3] + alpha[0]*c_33;
-		}
-	else if(offsetC==1)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[1+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[2+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C0[3+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[0+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[1+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C0[2+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C0[3+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[0+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[1+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C0[2+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C0[3+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[0+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[1+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C0[2+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C0[3+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C1[0+bs*3] + alpha[0]*c_33;
-		}
-	else if(offsetC==2)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[2+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[3+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C1[0+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[1+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[2+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C0[3+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C1[0+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[1+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[2+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C0[3+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C1[0+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[1+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[2+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C0[3+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C1[0+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C1[1+bs*3] + alpha[0]*c_33;
-		}
-	else //if(offsetC==3)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[3+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C1[0+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C1[1+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[2+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[3+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C1[0+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C1[1+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[2+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[3+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C1[0+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C1[1+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[2+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[3+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C1[0+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C1[1+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C1[2+bs*3] + alpha[0]*c_33;
-		}
-	
-	// shift sol for cols
-	if(n0>0)
-		{
-		if(n0==1)
-			{
-			c_00 = c_01;
-			c_10 = c_11;
-			c_20 = c_21;
-			c_30 = c_31;
-
-			c_01 = c_02;
-			c_11 = c_12;
-			c_21 = c_22;
-			c_31 = c_32;
-
-			c_02 = c_03;
-			c_12 = c_13;
-			c_22 = c_23;
-			c_32 = c_33;
-
-			D0 += 1*bs;
-			}
-		else if(n0==2)
-			{
-			c_00 = c_02;
-			c_10 = c_12;
-			c_20 = c_22;
-			c_30 = c_32;
-
-			c_01 = c_03;
-			c_11 = c_13;
-			c_21 = c_23;
-			c_31 = c_33;
-
-			D0 += 2*bs;
-			}
-		else //if(n0==3)
-			{
-			c_00 = c_03;
-			c_10 = c_13;
-			c_20 = c_23;
-			c_30 = c_33;
-
-			D0 += 3*bs;
-			}
-		}
-
-	int kn = n1 - n0;
-
-	if(offsetD==0)
-		{
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[1+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[2+bs*0] = c_20;
-		if(m0<=3 & m1>3) D0[3+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[1+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[2+bs*1] = c_21;
-		if(m0<=3 & m1>3) D0[3+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[1+bs*2] = c_12;
-		if(m0<=2 & m1>2) D0[2+bs*2] = c_22;
-		if(m0<=3 & m1>3) D0[3+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[1+bs*3] = c_13;
-		if(m0<=2 & m1>2) D0[2+bs*3] = c_23;
-		if(m0<=3 & m1>3) D0[3+bs*3] = c_33;
-		}
-	else if(offsetD==1)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[2+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[3+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[0+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[2+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[3+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[0+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[2+bs*2] = c_12;
-		if(m0<=2 & m1>2) D0[3+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[0+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[2+bs*3] = c_13;
-		if(m0<=2 & m1>2) D0[3+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[0+bs*3] = c_33;
-		}
-	else if(offsetD==2)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[3+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[0+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[1+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[3+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[0+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[1+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[3+bs*2] = c_12;
-		if(m0<=2 & m1>2) D1[0+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[1+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[3+bs*3] = c_13;
-		if(m0<=2 & m1>2) D1[0+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[1+bs*3] = c_33;
-		}
-	else //if(offsetD==3)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*0] = c_00;
-		if(m0<=1 & m1>1) D1[0+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[1+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[2+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*1] = c_01;
-		if(m0<=1 & m1>1) D1[0+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[1+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[2+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*2] = c_02;
-		if(m0<=1 & m1>1) D1[0+bs*2] = c_12;
-		if(m0<=2 & m1>2) D1[1+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[2+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*3] = c_03;
-		if(m0<=1 & m1>1) D1[0+bs*3] = c_13;
-		if(m0<=2 & m1>2) D1[1+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[2+bs*3] = c_33;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_nt_4x4_vs_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = beta[0]*C[0+bs*0] + alpha[0]*c_00;
-	c_10 = beta[0]*C[1+bs*0] + alpha[0]*c_10;
-	c_20 = beta[0]*C[2+bs*0] + alpha[0]*c_20;
-	c_30 = beta[0]*C[3+bs*0] + alpha[0]*c_30;
-
-	c_01 = beta[0]*C[0+bs*1] + alpha[0]*c_01;
-	c_11 = beta[0]*C[1+bs*1] + alpha[0]*c_11;
-	c_21 = beta[0]*C[2+bs*1] + alpha[0]*c_21;
-	c_31 = beta[0]*C[3+bs*1] + alpha[0]*c_31;
-
-	c_02 = beta[0]*C[0+bs*2] + alpha[0]*c_02;
-	c_12 = beta[0]*C[1+bs*2] + alpha[0]*c_12;
-	c_22 = beta[0]*C[2+bs*2] + alpha[0]*c_22;
-	c_32 = beta[0]*C[3+bs*2] + alpha[0]*c_32;
-
-	c_03 = beta[0]*C[0+bs*3] + alpha[0]*c_03;
-	c_13 = beta[0]*C[1+bs*3] + alpha[0]*c_13;
-	c_23 = beta[0]*C[2+bs*3] + alpha[0]*c_23;
-	c_33 = beta[0]*C[3+bs*3] + alpha[0]*c_33;
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC)
-void kernel_dgemm_nt_4x4_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-	{
-	kernel_dgemm_nt_4x4_vs_lib4(kmax, alpha, A, B, beta, C, D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_nn_4x4_gen_lib4(int kmax, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, int offsetC, double *C0, int sdc, int offsetD, double *D0, int sdd, int m0, int m1, int n0, int n1)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	double
-		*C1, *D1;
-	
-	int k;
-
-	k = 0;
-	if(offsetB!=0)
-		{
-		if(offsetB==1)
-			{
-
-			B += 1;
-
-			a_0 = A[0];
-			a_1 = A[1];
-			a_2 = A[2];
-			a_3 = A[3];
-
-			b_0 = B[0];
-			b_1 = B[4];
-			b_2 = B[8];
-			b_3 = B[12];
-
-			c_00 += a_0 * b_0;
-			c_10 += a_1 * b_0;
-			c_20 += a_2 * b_0;
-			c_30 += a_3 * b_0;
-
-			c_01 += a_0 * b_1;
-			c_11 += a_1 * b_1;
-			c_21 += a_2 * b_1;
-			c_31 += a_3 * b_1;
-
-			c_02 += a_0 * b_2;
-			c_12 += a_1 * b_2;
-			c_22 += a_2 * b_2;
-			c_32 += a_3 * b_2;
-
-			c_03 += a_0 * b_3;
-			c_13 += a_1 * b_3;
-			c_23 += a_2 * b_3;
-			c_33 += a_3 * b_3;
-
-			A += 4;
-			B += 1;
-			k += 1;
-
-			if(k>=kmax)
-				goto scale;
-
-			a_0 = A[0];
-			a_1 = A[1];
-			a_2 = A[2];
-			a_3 = A[3];
-
-			b_0 = B[0];
-			b_1 = B[4];
-			b_2 = B[8];
-			b_3 = B[12];
-
-			c_00 += a_0 * b_0;
-			c_10 += a_1 * b_0;
-			c_20 += a_2 * b_0;
-			c_30 += a_3 * b_0;
-
-			c_01 += a_0 * b_1;
-			c_11 += a_1 * b_1;
-			c_21 += a_2 * b_1;
-			c_31 += a_3 * b_1;
-
-			c_02 += a_0 * b_2;
-			c_12 += a_1 * b_2;
-			c_22 += a_2 * b_2;
-			c_32 += a_3 * b_2;
-
-			c_03 += a_0 * b_3;
-			c_13 += a_1 * b_3;
-			c_23 += a_2 * b_3;
-			c_33 += a_3 * b_3;
-
-			A += 4;
-			B += 1;
-			k += 1;
-
-			if(k>=kmax)
-				goto scale;
-
-			a_0 = A[0];
-			a_1 = A[1];
-			a_2 = A[2];
-			a_3 = A[3];
-
-			b_0 = B[0];
-			b_1 = B[4];
-			b_2 = B[8];
-			b_3 = B[12];
-
-			c_00 += a_0 * b_0;
-			c_10 += a_1 * b_0;
-			c_20 += a_2 * b_0;
-			c_30 += a_3 * b_0;
-
-			c_01 += a_0 * b_1;
-			c_11 += a_1 * b_1;
-			c_21 += a_2 * b_1;
-			c_31 += a_3 * b_1;
-
-			c_02 += a_0 * b_2;
-			c_12 += a_1 * b_2;
-			c_22 += a_2 * b_2;
-			c_32 += a_3 * b_2;
-
-			c_03 += a_0 * b_3;
-			c_13 += a_1 * b_3;
-			c_23 += a_2 * b_3;
-			c_33 += a_3 * b_3;
-
-			A += 4;
-			B += 1;
-			B += bs*(sdb-1);
-			k += 1;
-
-			}
-		else if(offsetB==2)
-			{
-
-			B += 2;
-
-			a_0 = A[0];
-			a_1 = A[1];
-			a_2 = A[2];
-			a_3 = A[3];
-
-			b_0 = B[0];
-			b_1 = B[4];
-			b_2 = B[8];
-			b_3 = B[12];
-
-			c_00 += a_0 * b_0;
-			c_10 += a_1 * b_0;
-			c_20 += a_2 * b_0;
-			c_30 += a_3 * b_0;
-
-			c_01 += a_0 * b_1;
-			c_11 += a_1 * b_1;
-			c_21 += a_2 * b_1;
-			c_31 += a_3 * b_1;
-
-			c_02 += a_0 * b_2;
-			c_12 += a_1 * b_2;
-			c_22 += a_2 * b_2;
-			c_32 += a_3 * b_2;
-
-			c_03 += a_0 * b_3;
-			c_13 += a_1 * b_3;
-			c_23 += a_2 * b_3;
-			c_33 += a_3 * b_3;
-
-			A += 4;
-			B += 1;
-			k += 1;
-
-			if(k>=kmax)
-				goto scale;
-
-			a_0 = A[0];
-			a_1 = A[1];
-			a_2 = A[2];
-			a_3 = A[3];
-
-			b_0 = B[0];
-			b_1 = B[4];
-			b_2 = B[8];
-			b_3 = B[12];
-
-			c_00 += a_0 * b_0;
-			c_10 += a_1 * b_0;
-			c_20 += a_2 * b_0;
-			c_30 += a_3 * b_0;
-
-			c_01 += a_0 * b_1;
-			c_11 += a_1 * b_1;
-			c_21 += a_2 * b_1;
-			c_31 += a_3 * b_1;
-
-			c_02 += a_0 * b_2;
-			c_12 += a_1 * b_2;
-			c_22 += a_2 * b_2;
-			c_32 += a_3 * b_2;
-
-			c_03 += a_0 * b_3;
-			c_13 += a_1 * b_3;
-			c_23 += a_2 * b_3;
-			c_33 += a_3 * b_3;
-
-			A += 4;
-			B += 1;
-			B += bs*(sdb-1);
-			k += 1;
-
-			}
-		else // if(offsetB==3)
-			{
-
-			B += 3;
-
-			a_0 = A[0];
-			a_1 = A[1];
-			a_2 = A[2];
-			a_3 = A[3];
-
-			b_0 = B[0];
-			b_1 = B[4];
-			b_2 = B[8];
-			b_3 = B[12];
-
-			c_00 += a_0 * b_0;
-			c_10 += a_1 * b_0;
-			c_20 += a_2 * b_0;
-			c_30 += a_3 * b_0;
-
-			c_01 += a_0 * b_1;
-			c_11 += a_1 * b_1;
-			c_21 += a_2 * b_1;
-			c_31 += a_3 * b_1;
-
-			c_02 += a_0 * b_2;
-			c_12 += a_1 * b_2;
-			c_22 += a_2 * b_2;
-			c_32 += a_3 * b_2;
-
-			c_03 += a_0 * b_3;
-			c_13 += a_1 * b_3;
-			c_23 += a_2 * b_3;
-			c_33 += a_3 * b_3;
-
-			A += 4;
-			B += 1;
-			B += bs*(sdb-1);
-			k += 1;
-
-			}
-		}
-	for(; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[4];
-		b_2 = B[8];
-		b_3 = B[12];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[1];
-		b_1 = B[5];
-		b_2 = B[9];
-		b_3 = B[13];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[2];
-		b_1 = B[6];
-		b_2 = B[10];
-		b_3 = B[14];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[3];
-		b_1 = B[7];
-		b_2 = B[11];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 4*sdb;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[4];
-		b_2 = B[8];
-		b_3 = B[12];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 1;
-
-		}	
-	
-	scale:
-
-	if(offsetC==0)
-		{
-		c_00 = beta[0]*C0[0+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[1+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C0[2+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C0[3+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[0+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C0[1+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C0[2+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C0[3+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[0+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C0[1+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C0[2+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C0[3+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[0+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C0[1+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C0[2+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C0[3+bs*3] + alpha[0]*c_33;
-		}
-	else if(offsetC==1)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[1+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[2+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C0[3+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[0+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[1+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C0[2+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C0[3+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[0+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[1+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C0[2+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C0[3+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[0+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[1+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C0[2+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C0[3+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C1[0+bs*3] + alpha[0]*c_33;
-		}
-	else if(offsetC==2)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[2+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[3+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C1[0+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[1+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[2+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C0[3+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C1[0+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[1+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[2+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C0[3+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C1[0+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[1+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[2+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C0[3+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C1[0+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C1[1+bs*3] + alpha[0]*c_33;
-		}
-	else //if(offsetC==3)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[3+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C1[0+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C1[1+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[2+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[3+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C1[0+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C1[1+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[2+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[3+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C1[0+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C1[1+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[2+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[3+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C1[0+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C1[1+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C1[2+bs*3] + alpha[0]*c_33;
-		}
-	
-	// shift sol for cols
-	if(n0>0)
-		{
-		if(n0==1)
-			{
-			c_00 = c_01;
-			c_10 = c_11;
-			c_20 = c_21;
-			c_30 = c_31;
-
-			c_01 = c_02;
-			c_11 = c_12;
-			c_21 = c_22;
-			c_31 = c_32;
-
-			c_02 = c_03;
-			c_12 = c_13;
-			c_22 = c_23;
-			c_32 = c_33;
-
-			D0 += 1*bs;
-			}
-		else if(n0==2)
-			{
-			c_00 = c_02;
-			c_10 = c_12;
-			c_20 = c_22;
-			c_30 = c_32;
-
-			c_01 = c_03;
-			c_11 = c_13;
-			c_21 = c_23;
-			c_31 = c_33;
-
-			D0 += 2*bs;
-			}
-		else //if(n0==3)
-			{
-			c_00 = c_03;
-			c_10 = c_13;
-			c_20 = c_23;
-			c_30 = c_33;
-
-			D0 += 3*bs;
-			}
-		}
-
-	int kn = n1 - n0;
-
-	if(offsetD==0)
-		{
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[1+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[2+bs*0] = c_20;
-		if(m0<=3 & m1>3) D0[3+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[1+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[2+bs*1] = c_21;
-		if(m0<=3 & m1>3) D0[3+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[1+bs*2] = c_12;
-		if(m0<=2 & m1>2) D0[2+bs*2] = c_22;
-		if(m0<=3 & m1>3) D0[3+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[1+bs*3] = c_13;
-		if(m0<=2 & m1>2) D0[2+bs*3] = c_23;
-		if(m0<=3 & m1>3) D0[3+bs*3] = c_33;
-		}
-	else if(offsetD==1)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[2+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[3+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[0+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[2+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[3+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[0+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[2+bs*2] = c_12;
-		if(m0<=2 & m1>2) D0[3+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[0+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[2+bs*3] = c_13;
-		if(m0<=2 & m1>2) D0[3+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[0+bs*3] = c_33;
-		}
-	else if(offsetD==2)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[3+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[0+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[1+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[3+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[0+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[1+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[3+bs*2] = c_12;
-		if(m0<=2 & m1>2) D1[0+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[1+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[3+bs*3] = c_13;
-		if(m0<=2 & m1>2) D1[0+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[1+bs*3] = c_33;
-		}
-	else //if(offsetD==3)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*0] = c_00;
-		if(m0<=1 & m1>1) D1[0+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[1+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[2+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*1] = c_01;
-		if(m0<=1 & m1>1) D1[0+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[1+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[2+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*2] = c_02;
-		if(m0<=1 & m1>1) D1[0+bs*2] = c_12;
-		if(m0<=2 & m1>2) D1[1+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[2+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*3] = c_03;
-		if(m0<=1 & m1>1) D1[0+bs*3] = c_13;
-		if(m0<=2 & m1>2) D1[1+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[2+bs*3] = c_33;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_nn_4x4_lib4(int kmax, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D)
-	{
-	kernel_dgemm_nn_4x4_gen_lib4(kmax, alpha, A, offsetB, B, sdb, beta, 0, C, 0, 0, D, 0, 0, 4, 0, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dsyrk_nt_l_4x4_gen_lib4(int kmax, double *alpha, double *A, double *B, double *beta, int offsetC, double *C0, int sdc, int offsetD, double *D0, int sdd, int m0, int m1, int n0, int n1)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0,
-		c_10=0, c_11=0,
-		c_20=0, c_21=0, c_22=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	double
-		*C1, *D1;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	if(offsetC==0)
-		{
-		c_00 = beta[0]*C0[0+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[1+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C0[2+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C0[3+bs*0] + alpha[0]*c_30;
-
-		c_11 = beta[0]*C0[1+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C0[2+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C0[3+bs*1] + alpha[0]*c_31;
-
-		c_22 = beta[0]*C0[2+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C0[3+bs*2] + alpha[0]*c_32;
-
-		c_33 = beta[0]*C0[3+bs*3] + alpha[0]*c_33;
-		}
-	else if(offsetC==1)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[1+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[2+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C0[3+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[0+bs*0] + alpha[0]*c_30;
-
-		c_11 = beta[0]*C0[2+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C0[3+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[0+bs*1] + alpha[0]*c_31;
-
-		c_22 = beta[0]*C0[3+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[0+bs*2] + alpha[0]*c_32;
-
-		c_33 = beta[0]*C1[0+bs*3] + alpha[0]*c_33;
-		}
-	else if(offsetC==2)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[2+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[3+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C1[0+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[1+bs*0] + alpha[0]*c_30;
-
-		c_11 = beta[0]*C0[3+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C1[0+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[1+bs*1] + alpha[0]*c_31;
-
-		c_22 = beta[0]*C1[0+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[1+bs*2] + alpha[0]*c_32;
-
-		c_33 = beta[0]*C1[1+bs*3] + alpha[0]*c_33;
-		}
-	else //if(offsetC==3)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[3+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C1[0+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C1[1+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[2+bs*0] + alpha[0]*c_30;
-
-		c_11 = beta[0]*C1[0+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C1[1+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[2+bs*1] + alpha[0]*c_31;
-
-		c_22 = beta[0]*C1[1+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[2+bs*2] + alpha[0]*c_32;
-
-		c_33 = beta[0]*C1[2+bs*3] + alpha[0]*c_33;
-		}
-	
-	// shift sol for cols
-	if(n0>0)
-		{
-		if(n0==1)
-			{
-			c_10 = c_11;
-			c_20 = c_21;
-			c_30 = c_31;
-
-			c_21 = c_22;
-			c_31 = c_32;
-
-			c_32 = c_33;
-
-			D0 += 1*bs;
-			}
-		else if(n0==2)
-			{
-			c_20 = c_22;
-			c_30 = c_32;
-
-			c_31 = c_33;
-
-			D0 += 2*bs;
-			}
-		else //if(n0==3)
-			{
-			c_30 = c_33;
-
-			D0 += 3*bs;
-			}
-		}
-
-	int kn = n1 - n0;
-
-	if(offsetD==0)
-		{
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[1+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[2+bs*0] = c_20;
-		if(m0<=3 & m1>3) D0[3+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=1 & m1>1) D0[1+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[2+bs*1] = c_21;
-		if(m0<=3 & m1>3) D0[3+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=2 & m1>2) D0[2+bs*2] = c_22;
-		if(m0<=3 & m1>3) D0[3+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=3 & m1>3) D0[3+bs*3] = c_33;
-		}
-	else if(offsetD==1)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[2+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[3+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[0+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=1 & m1>1) D0[2+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[3+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[0+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=2 & m1>2) D0[3+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[0+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=3 & m1>3) D1[0+bs*3] = c_33;
-		}
-	else if(offsetD==2)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[3+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[0+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[1+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=1 & m1>1) D0[3+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[0+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[1+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=2 & m1>2) D1[0+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[1+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=3 & m1>3) D1[1+bs*3] = c_33;
-		}
-	else //if(offsetD==3)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*0] = c_00;
-		if(m0<=1 & m1>1) D1[0+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[1+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[2+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=1 & m1>1) D1[0+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[1+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[2+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=2 & m1>2) D1[1+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[2+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=3 & m1>3) D1[2+bs*3] = c_33;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dsyrk_nt_l_4x4_vs_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0,
-		c_10=0, c_11=0,
-		c_20=0, c_21=0, c_22=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = beta[0]*C[0+bs*0] + alpha[0]*c_00;
-	c_10 = beta[0]*C[1+bs*0] + alpha[0]*c_10;
-	c_20 = beta[0]*C[2+bs*0] + alpha[0]*c_20;
-	c_30 = beta[0]*C[3+bs*0] + alpha[0]*c_30;
-
-	c_11 = beta[0]*C[1+bs*1] + alpha[0]*c_11;
-	c_21 = beta[0]*C[2+bs*1] + alpha[0]*c_21;
-	c_31 = beta[0]*C[3+bs*1] + alpha[0]*c_31;
-
-	c_22 = beta[0]*C[2+bs*2] + alpha[0]*c_22;
-	c_32 = beta[0]*C[3+bs*2] + alpha[0]*c_32;
-
-	c_33 = beta[0]*C[3+bs*3] + alpha[0]*c_33;
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[2+bs*2] = c_22;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[1+bs*1] = c_11;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dsyrk_nt_l_4x4_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-	{
-	kernel_dsyrk_nt_l_4x4_vs_lib4(kmax, alpha, A, B, beta, C, D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrmm_nt_ru_4x4_vs_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	k = 0;
-
-	// k = 0
-	if(kmax>0)
-		{
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		A += 4;
-		B += 4;
-		k++;
-		}
-
-	// k = 1
-	if(kmax>0)
-		{
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		A += 4;
-		B += 4;
-		k++;
-		}
-
-	// k = 2
-	if(kmax>0)
-		{
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		A += 4;
-		B += 4;
-		k++;
-		}
-
-	for(; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = beta[0]*C[0+bs*0] + alpha[0]*c_00;
-	c_10 = beta[0]*C[1+bs*0] + alpha[0]*c_10;
-	c_20 = beta[0]*C[2+bs*0] + alpha[0]*c_20;
-	c_30 = beta[0]*C[3+bs*0] + alpha[0]*c_30;
-
-	c_01 = beta[0]*C[0+bs*1] + alpha[0]*c_01;
-	c_11 = beta[0]*C[1+bs*1] + alpha[0]*c_11;
-	c_21 = beta[0]*C[2+bs*1] + alpha[0]*c_21;
-	c_31 = beta[0]*C[3+bs*1] + alpha[0]*c_31;
-
-	c_02 = beta[0]*C[0+bs*2] + alpha[0]*c_02;
-	c_12 = beta[0]*C[1+bs*2] + alpha[0]*c_12;
-	c_22 = beta[0]*C[2+bs*2] + alpha[0]*c_22;
-	c_32 = beta[0]*C[3+bs*2] + alpha[0]*c_32;
-
-	c_03 = beta[0]*C[0+bs*3] + alpha[0]*c_03;
-	c_13 = beta[0]*C[1+bs*3] + alpha[0]*c_13;
-	c_23 = beta[0]*C[2+bs*3] + alpha[0]*c_23;
-	c_33 = beta[0]*C[3+bs*3] + alpha[0]*c_33;
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrmm_nt_ru_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-	{
-	kernel_dtrmm_nt_ru_4x4_vs_lib4(k, alpha, A, B, beta, C, D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrmm_nn_rl_4x4_gen_lib4(int kmax, double *alpha, double *A, int offsetB, double *B, int sdb, int offsetD, double *D0, int sdd, int m0, int m1, int n0, int n1)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	double *D1;
-	
-	int k;
-
-	B += offsetB;
-
-	k = 0;
-
-	if(offsetB==0)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 1
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 2
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 3
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		}
-	else if(offsetB==1)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 1
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 2
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		}
-	else if(offsetB==2)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 1
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 2
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 3
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 4
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 5
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		}
-	else // if(offetB==3)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 1
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 2
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 3
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 4
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		}
-
-	for(; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[4];
-		b_2 = B[8];
-		b_3 = B[12];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[1];
-		b_1 = B[5];
-		b_2 = B[9];
-		b_3 = B[13];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[2];
-		b_1 = B[6];
-		b_2 = B[10];
-		b_3 = B[14];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[3];
-		b_1 = B[7];
-		b_2 = B[11];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 4*sdb;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[4];
-		b_2 = B[8];
-		b_3 = B[12];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 1;
-
-		}
-	
-	store:
-	
-	c_00 = alpha[0]*c_00;
-	c_10 = alpha[0]*c_10;
-	c_20 = alpha[0]*c_20;
-	c_30 = alpha[0]*c_30;
-
-	c_01 = alpha[0]*c_01;
-	c_11 = alpha[0]*c_11;
-	c_21 = alpha[0]*c_21;
-	c_31 = alpha[0]*c_31;
-
-	c_02 = alpha[0]*c_02;
-	c_12 = alpha[0]*c_12;
-	c_22 = alpha[0]*c_22;
-	c_32 = alpha[0]*c_32;
-
-	c_03 = alpha[0]*c_03;
-	c_13 = alpha[0]*c_13;
-	c_23 = alpha[0]*c_23;
-	c_33 = alpha[0]*c_33;
-
-	// shift sol for cols
-	if(n0>0)
-		{
-		if(n0==1)
-			{
-			c_00 = c_01;
-			c_10 = c_11;
-			c_20 = c_21;
-			c_30 = c_31;
-
-			c_01 = c_02;
-			c_11 = c_12;
-			c_21 = c_22;
-			c_31 = c_32;
-
-			c_02 = c_03;
-			c_12 = c_13;
-			c_22 = c_23;
-			c_32 = c_33;
-
-			D0 += 1*bs;
-			}
-		else if(n0==2)
-			{
-			c_00 = c_02;
-			c_10 = c_12;
-			c_20 = c_22;
-			c_30 = c_32;
-
-			c_01 = c_03;
-			c_11 = c_13;
-			c_21 = c_23;
-			c_31 = c_33;
-
-			D0 += 2*bs;
-			}
-		else //if(n0==3)
-			{
-			c_00 = c_03;
-			c_10 = c_13;
-			c_20 = c_23;
-			c_30 = c_33;
-
-			D0 += 3*bs;
-			}
-		}
-
-	int kn = n1 - n0;
-
-	if(offsetD==0)
-		{
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[1+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[2+bs*0] = c_20;
-		if(m0<=3 & m1>3) D0[3+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[1+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[2+bs*1] = c_21;
-		if(m0<=3 & m1>3) D0[3+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[1+bs*2] = c_12;
-		if(m0<=2 & m1>2) D0[2+bs*2] = c_22;
-		if(m0<=3 & m1>3) D0[3+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[1+bs*3] = c_13;
-		if(m0<=2 & m1>2) D0[2+bs*3] = c_23;
-		if(m0<=3 & m1>3) D0[3+bs*3] = c_33;
-		}
-	else if(offsetD==1)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[2+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[3+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[0+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[2+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[3+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[0+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[2+bs*2] = c_12;
-		if(m0<=2 & m1>2) D0[3+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[0+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[2+bs*3] = c_13;
-		if(m0<=2 & m1>2) D0[3+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[0+bs*3] = c_33;
-		}
-	else if(offsetD==2)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[3+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[0+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[1+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[3+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[0+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[1+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[3+bs*2] = c_12;
-		if(m0<=2 & m1>2) D1[0+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[1+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[3+bs*3] = c_13;
-		if(m0<=2 & m1>2) D1[0+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[1+bs*3] = c_33;
-		}
-	else //if(offsetD==3)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*0] = c_00;
-		if(m0<=1 & m1>1) D1[0+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[1+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[2+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*1] = c_01;
-		if(m0<=1 & m1>1) D1[0+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[1+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[2+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*2] = c_02;
-		if(m0<=1 & m1>1) D1[0+bs*2] = c_12;
-		if(m0<=2 & m1>2) D1[1+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[2+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*3] = c_03;
-		if(m0<=1 & m1>1) D1[0+bs*3] = c_13;
-		if(m0<=2 & m1>2) D1[1+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[2+bs*3] = c_33;
-		}
-	
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC)  || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrmm_nn_rl_4x4_lib4(int kmax, double *alpha, double *A, int offsetB, double *B, int sdb, double *D)
-	{
-	kernel_dtrmm_nn_rl_4x4_gen_lib4(kmax, alpha, A, offsetB, B, sdb, 0, D, 0, 0, 4, 0, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dpotrf_nt_l_4x4_vs_lib4(int kmax, double *A, double *B, double *C, double *D, double *inv_diag_D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		tmp,
-		c_00=0, //c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, //c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, //c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-//		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-//		c_02 -= a_0 * b_2;
-//		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-//		c_03 -= a_0 * b_3;
-//		c_13 -= a_1 * b_3;
-//		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-//		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-//		c_02 -= a_0 * b_2;
-//		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-//		c_03 -= a_0 * b_3;
-//		c_13 -= a_1 * b_3;
-//		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-//		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-//		c_02 -= a_0 * b_2;
-//		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-//		c_03 -= a_0 * b_3;
-//		c_13 -= a_1 * b_3;
-//		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-//		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-//		c_02 -= a_0 * b_2;
-//		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-//		c_03 -= a_0 * b_3;
-//		c_13 -= a_1 * b_3;
-//		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-//		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-//		c_02 -= a_0 * b_2;
-//		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-//		c_03 -= a_0 * b_3;
-//		c_13 -= a_1 * b_3;
-//		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = C[0+bs*0] + c_00;
-	c_10 = C[1+bs*0] + c_10;
-	c_20 = C[2+bs*0] + c_20;
-	c_30 = C[3+bs*0] + c_30;
-
-//	c_01 = C[0+bs*1] + c_01;
-	c_11 = C[1+bs*1] + c_11;
-	c_21 = C[2+bs*1] + c_21;
-	c_31 = C[3+bs*1] + c_31;
-
-//	c_02 = C[0+bs*2] + c_02;
-//	c_12 = C[1+bs*2] + c_12;
-	c_22 = C[2+bs*2] + c_22;
-	c_32 = C[3+bs*2] + c_32;
-
-//	c_03 = C[0+bs*3] + c_03;
-//	c_13 = C[1+bs*3] + c_13;
-//	c_23 = C[2+bs*3] + c_23;
-	c_33 = C[3+bs*3] + c_33;
-
-	if(c_00>0)
-		{
-		c_00 = sqrt(c_00);
-		tmp = 1.0/c_00;
-		}
-	else
-		{
-		c_00 = 0.0;
-		tmp = 0.0;
-		}
-	c_10 *= tmp;
-	c_20 *= tmp;
-	c_30 *= tmp;
-	inv_diag_D[0] = tmp;
-
-	if(kn==1)
-		goto store;
-	
-	c_11 -= c_10 * c_10;
-	c_21 -= c_20 * c_10;
-	c_31 -= c_30 * c_10;
-	if(c_11>0)
-		{
-		c_11 = sqrt(c_11);
-		tmp = 1.0/c_11;
-		}
-	else
-		{
-		c_11 = 0.0;
-		tmp = 0.0;
-		}
-	c_21 *= tmp;
-	c_31 *= tmp;
-	inv_diag_D[1] = tmp;
-
-	if(kn==2)
-		goto store;
-	
-	c_22 -= c_20 * c_20;
-	c_32 -= c_30 * c_20;
-	c_22 -= c_21 * c_21;
-	c_32 -= c_31 * c_21;
-	if(c_22>0)
-		{
-		c_22 = sqrt(c_22);
-		tmp = 1.0/c_22;
-		}
-	else
-		{
-		c_22 = 0.0;
-		tmp = 0.0;
-		}
-	c_32 *= tmp;
-	inv_diag_D[2] = tmp;
-
-	if(kn==3)
-		goto store;
-	
-	c_33 -= c_30 * c_30;
-	c_33 -= c_31 * c_31;
-	c_33 -= c_32 * c_32;
-	if(c_33>0)
-		{
-		c_33 = sqrt(c_33);
-		tmp = 1.0/c_33;
-		}
-	else
-		{
-		c_33 = 0.0;
-		tmp = 0.0;
-		}
-	inv_diag_D[3] = tmp;
-
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-//		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-//		D[0+bs*2] = c_02;
-//		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-//		D[0+bs*3] = c_03;
-//		D[1+bs*3] = c_13;
-//		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-//		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-//		D[0+bs*2] = c_02;
-//		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-//		if(kn==3)
-//			return;
-
-//		D[0+bs*3] = c_03;
-//		D[1+bs*3] = c_13;
-//		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-//		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-//		if(kn==2)
-//			return;
-
-//		D[0+bs*2] = c_02;
-//		D[1+bs*2] = c_12;
-
-//		if(kn==3)
-//			return;
-
-//		D[0+bs*3] = c_03;
-//		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-//		if(kn==1)
-//			return;
-
-//		D[0+bs*1] = c_01;
-
-//		if(kn==2)
-//			return;
-
-//		D[0+bs*2] = c_02;
-
-//		if(kn==3)
-//			return;
-
-//		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dpotrf_nt_l_4x4_lib4(int kmax, double *A, double *B, double *C, double *D, double *inv_diag_D)
-	{
-	kernel_dpotrf_nt_l_4x4_vs_lib4(kmax, A, B, C, D, inv_diag_D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km_, double *Am, double *Bm, double *C, double *D, double *inv_diag_D, int km, int kn)
-	{
-	double alpha = 1.0;
-	double beta = 1.0;
-	kernel_dsyrk_nt_l_4x4_vs_lib4(kp, &alpha, Ap, Bp, &beta, C, D, km, kn);
-	kernel_dpotrf_nt_l_4x4_vs_lib4(km_, Am, Bm, D, D, inv_diag_D, km, kn);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dsyrk_dpotrf_nt_l_4x4_lib4(int kp, double *Ap, double *Bp, int km_, double *Am, double *Bm, double *C, double *D, double *inv_diag_D)
-	{
-	double alpha = 1.0;
-	double beta = 1.0;
-	kernel_dsyrk_nt_l_4x4_lib4(kp, &alpha, Ap, Bp, &beta, C, D);
-	kernel_dpotrf_nt_l_4x4_lib4(km_, Am, Bm, D, D, inv_diag_D);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(int kmax, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		tmp,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = C[0+bs*0] + c_00;
-	c_10 = C[1+bs*0] + c_10;
-	c_20 = C[2+bs*0] + c_20;
-	c_30 = C[3+bs*0] + c_30;
-
-	c_01 = C[0+bs*1] + c_01;
-	c_11 = C[1+bs*1] + c_11;
-	c_21 = C[2+bs*1] + c_21;
-	c_31 = C[3+bs*1] + c_31;
-
-	c_02 = C[0+bs*2] + c_02;
-	c_12 = C[1+bs*2] + c_12;
-	c_22 = C[2+bs*2] + c_22;
-	c_32 = C[3+bs*2] + c_32;
-
-	c_03 = C[0+bs*3] + c_03;
-	c_13 = C[1+bs*3] + c_13;
-	c_23 = C[2+bs*3] + c_23;
-	c_33 = C[3+bs*3] + c_33;
-
-	tmp = inv_diag_E[0];
-	c_00 *= tmp;
-	c_10 *= tmp;
-	c_20 *= tmp;
-	c_30 *= tmp;
-
-	if(kn==1)
-		goto store;
-	
-	tmp = E[1+bs*0];
-	c_01 -= c_00 * tmp;
-	c_11 -= c_10 * tmp;
-	c_21 -= c_20 * tmp;
-	c_31 -= c_30 * tmp;
-	tmp = inv_diag_E[1];
-	c_01 *= tmp;
-	c_11 *= tmp;
-	c_21 *= tmp;
-	c_31 *= tmp;
-
-	if(kn==2)
-		goto store;
-	
-	tmp = E[2+bs*0];
-	c_02 -= c_00 * tmp;
-	c_12 -= c_10 * tmp;
-	c_22 -= c_20 * tmp;
-	c_32 -= c_30 * tmp;
-	tmp = E[2+bs*1];
-	c_02 -= c_01 * tmp;
-	c_12 -= c_11 * tmp;
-	c_22 -= c_21 * tmp;
-	c_32 -= c_31 * tmp;
-	tmp = inv_diag_E[2];
-	c_02 *= tmp;
-	c_12 *= tmp;
-	c_22 *= tmp;
-	c_32 *= tmp;
-
-	if(kn==3)
-		goto store;
-	
-	tmp = E[3+bs*0];
-	c_03 -= c_00 * tmp;
-	c_13 -= c_10 * tmp;
-	c_23 -= c_20 * tmp;
-	c_33 -= c_30 * tmp;
-	tmp = E[3+bs*1];
-	c_03 -= c_01 * tmp;
-	c_13 -= c_11 * tmp;
-	c_23 -= c_21 * tmp;
-	c_33 -= c_31 * tmp;
-	tmp = E[3+bs*2];
-	c_03 -= c_02 * tmp;
-	c_13 -= c_12 * tmp;
-	c_23 -= c_22 * tmp;
-	c_33 -= c_32 * tmp;
-	tmp = inv_diag_E[3];
-	c_03 *= tmp;
-	c_13 *= tmp;
-	c_23 *= tmp;
-	c_33 *= tmp;
-
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nt_rl_inv_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E)
-	{
-	kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(k, A, B, C, D, E, inv_diag_E, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km_, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E, int km, int kn)
-	{
-	double alpha = 1.0;
-	double beta  = 1.0;
-	kernel_dgemm_nt_4x4_vs_lib4(kp, &alpha, Ap, Bp, &beta, C, D, km, kn);
-	kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(km_, Am, Bm, D, D, E, inv_diag_E, km, kn);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4(int kp, double *Ap, double *Bp, int km_, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E)
-	{
-	double alpha = 1.0;
-	double beta  = 1.0;
-	kernel_dgemm_nt_4x4_lib4(kp, &alpha, Ap, Bp, &beta, C, D);
-	kernel_dtrsm_nt_rl_inv_4x4_lib4(km_, Am, Bm, D, D, E, inv_diag_E);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nt_rl_one_4x4_vs_lib4(int kmax, double *A, double *B, double *C, double *D, double *E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		tmp,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = C[0+bs*0] + c_00;
-	c_10 = C[1+bs*0] + c_10;
-	c_20 = C[2+bs*0] + c_20;
-	c_30 = C[3+bs*0] + c_30;
-
-	c_01 = C[0+bs*1] + c_01;
-	c_11 = C[1+bs*1] + c_11;
-	c_21 = C[2+bs*1] + c_21;
-	c_31 = C[3+bs*1] + c_31;
-
-	c_02 = C[0+bs*2] + c_02;
-	c_12 = C[1+bs*2] + c_12;
-	c_22 = C[2+bs*2] + c_22;
-	c_32 = C[3+bs*2] + c_32;
-
-	c_03 = C[0+bs*3] + c_03;
-	c_13 = C[1+bs*3] + c_13;
-	c_23 = C[2+bs*3] + c_23;
-	c_33 = C[3+bs*3] + c_33;
-
-	if(kn==1)
-		goto store;
-	
-	tmp = E[1+bs*0];
-	c_01 -= c_00 * tmp;
-	c_11 -= c_10 * tmp;
-	c_21 -= c_20 * tmp;
-	c_31 -= c_30 * tmp;
-
-	if(kn==2)
-		goto store;
-	
-	tmp = E[2+bs*0];
-	c_02 -= c_00 * tmp;
-	c_12 -= c_10 * tmp;
-	c_22 -= c_20 * tmp;
-	c_32 -= c_30 * tmp;
-	tmp = E[2+bs*1];
-	c_02 -= c_01 * tmp;
-	c_12 -= c_11 * tmp;
-	c_22 -= c_21 * tmp;
-	c_32 -= c_31 * tmp;
-
-	if(kn==3)
-		goto store;
-	
-	tmp = E[3+bs*0];
-	c_03 -= c_00 * tmp;
-	c_13 -= c_10 * tmp;
-	c_23 -= c_20 * tmp;
-	c_33 -= c_30 * tmp;
-	tmp = E[3+bs*1];
-	c_03 -= c_01 * tmp;
-	c_13 -= c_11 * tmp;
-	c_23 -= c_21 * tmp;
-	c_33 -= c_31 * tmp;
-	tmp = E[3+bs*2];
-	c_03 -= c_02 * tmp;
-	c_13 -= c_12 * tmp;
-	c_23 -= c_22 * tmp;
-	c_33 -= c_32 * tmp;
-
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nt_rl_one_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E)
-	{
-	kernel_dtrsm_nt_rl_one_4x4_vs_lib4(k, A, B, C, D, E, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nt_ru_inv_4x4_vs_lib4(int kmax, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	double
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		tmp,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = C[0+bs*0] + c_00;
-	c_10 = C[1+bs*0] + c_10;
-	c_20 = C[2+bs*0] + c_20;
-	c_30 = C[3+bs*0] + c_30;
-
-	c_01 = C[0+bs*1] + c_01;
-	c_11 = C[1+bs*1] + c_11;
-	c_21 = C[2+bs*1] + c_21;
-	c_31 = C[3+bs*1] + c_31;
-
-	c_02 = C[0+bs*2] + c_02;
-	c_12 = C[1+bs*2] + c_12;
-	c_22 = C[2+bs*2] + c_22;
-	c_32 = C[3+bs*2] + c_32;
-
-	c_03 = C[0+bs*3] + c_03;
-	c_13 = C[1+bs*3] + c_13;
-	c_23 = C[2+bs*3] + c_23;
-	c_33 = C[3+bs*3] + c_33;
-
-
-	if(kn>3)
-		{
-		tmp = inv_diag_E[3];
-		c_03 *= tmp;
-		c_13 *= tmp;
-		c_23 *= tmp;
-		c_33 *= tmp;
-		tmp = E[2+bs*3];
-		c_02 -= c_03 * tmp;
-		c_12 -= c_13 * tmp;
-		c_22 -= c_23 * tmp;
-		c_32 -= c_33 * tmp;
-		tmp = E[1+bs*3];
-		c_01 -= c_03 * tmp;
-		c_11 -= c_13 * tmp;
-		c_21 -= c_23 * tmp;
-		c_31 -= c_33 * tmp;
-		tmp = E[0+bs*3];
-		c_00 -= c_03 * tmp;
-		c_10 -= c_13 * tmp;
-		c_20 -= c_23 * tmp;
-		c_30 -= c_33 * tmp;
-		}
-
-	if(kn>2)
-		{
-		tmp = inv_diag_E[2];
-		c_02 *= tmp;
-		c_12 *= tmp;
-		c_22 *= tmp;
-		c_32 *= tmp;
-		tmp = E[1+bs*2];
-		c_01 -= c_02 * tmp;
-		c_11 -= c_12 * tmp;
-		c_21 -= c_22 * tmp;
-		c_31 -= c_32 * tmp;
-		tmp = E[0+bs*2];
-		c_00 -= c_02 * tmp;
-		c_10 -= c_12 * tmp;
-		c_20 -= c_22 * tmp;
-		c_30 -= c_32 * tmp;
-		}
-
-	if(kn>1)
-		{
-		tmp = inv_diag_E[1];
-		c_01 *= tmp;
-		c_11 *= tmp;
-		c_21 *= tmp;
-		c_31 *= tmp;
-		tmp = E[0+bs*1];
-		c_00 -= c_01 * tmp;
-		c_10 -= c_11 * tmp;
-		c_20 -= c_21 * tmp;
-		c_30 -= c_31 * tmp;
-		}
-
-	tmp = inv_diag_E[0];
-	c_00 *= tmp;
-	c_10 *= tmp;
-	c_20 *= tmp;
-	c_30 *= tmp;
-
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nt_ru_inv_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E)
-	{
-	kernel_dtrsm_nt_ru_inv_4x4_vs_lib4(k, A, B, C, D, E, inv_diag_E, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgetrf_nn_4x4_vs_lib4(int kmax, double *A, double *B, int sdb, double *C, double *D, double *inv_diag_D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	int k;
-
-	double
-		tmp,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-		
-	if(kmax<=0)
-		goto add;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		b_0 = B[1+bs*0];
-		b_1 = B[1+bs*1];
-		b_2 = B[1+bs*2];
-		b_3 = B[1+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		b_0 = B[2+bs*0];
-		b_1 = B[2+bs*1];
-		b_2 = B[2+bs*2];
-		b_3 = B[2+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		b_0 = B[3+bs*0];
-		b_1 = B[3+bs*1];
-		b_2 = B[3+bs*2];
-		b_3 = B[3+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-		
-		
-		A += 16;
-		B += 4*sdb;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		A += 4;
-		B += 1;
-
-		}
-		
-	add:
-
-	c_00 += C[0+bs*0];
-	c_10 += C[1+bs*0];
-	c_20 += C[2+bs*0];
-	c_30 += C[3+bs*0];
-
-	c_01 += C[0+bs*1];
-	c_11 += C[1+bs*1];
-	c_21 += C[2+bs*1];
-	c_31 += C[3+bs*1];
-
-	c_02 += C[0+bs*2];
-	c_12 += C[1+bs*2];
-	c_22 += C[2+bs*2];
-	c_32 += C[3+bs*2];
-
-	c_03 += C[0+bs*3];
-	c_13 += C[1+bs*3];
-	c_23 += C[2+bs*3];
-	c_33 += C[3+bs*3];
-
-	// factorization
-
-	// first column
-	tmp = 1.0 / c_00;
-	c_10 *= tmp;
-	c_20 *= tmp;
-	c_30 *= tmp;
-
-	inv_diag_D[0] = tmp;
-
-	if(kn==1)
-		goto store;
-
-	// second column
-	c_11 -= c_10 * c_01;
-	c_21 -= c_20 * c_01;
-	c_31 -= c_30 * c_01;
-
-	tmp = 1.0 / c_11;
-	c_21 *= tmp;
-	c_31 *= tmp;
-	
-	inv_diag_D[1] = tmp;
-
-	if(kn==2)
-		goto store;
-
-	// third column
-	c_12 -= c_10 * c_02;
-	c_22 -= c_20 * c_02;
-	c_32 -= c_30 * c_02;
-
-	c_22 -= c_21 * c_12;
-	c_32 -= c_31 * c_12;
-
-	tmp = 1.0 / c_22;
-	c_32 *= tmp;
-
-	inv_diag_D[2] = tmp;
-
-	if(kn==3)
-		goto store;
-
-	// fourth column
-	c_13 -= c_10 * c_03;
-	c_23 -= c_20 * c_03;
-	c_33 -= c_30 * c_03;
-
-	c_23 -= c_21 * c_13;
-	c_33 -= c_31 * c_13;
-
-	c_33 -= c_32 * c_23;
-
-	tmp = 1.0 / c_33;
-
-	inv_diag_D[3] = tmp;
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgetrf_nn_4x4_lib4(int kmax, double *A, double *B, int sdb, double *C, double *D, double *inv_diag_D)
-	{
-	kernel_dgetrf_nn_4x4_vs_lib4(kmax, A, B, sdb, C, D, inv_diag_D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nn_ll_one_4x4_vs_lib4(int kmax, double *A, double *B, int sdb, double *C, double *D, double *E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	int k;
-
-	double
-		tmp,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		e_1, e_2, e_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-		
-	if(kmax<=0)
-		goto add;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		b_0 = B[1+bs*0];
-		b_1 = B[1+bs*1];
-		b_2 = B[1+bs*2];
-		b_3 = B[1+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		b_0 = B[2+bs*0];
-		b_1 = B[2+bs*1];
-		b_2 = B[2+bs*2];
-		b_3 = B[2+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		b_0 = B[3+bs*0];
-		b_1 = B[3+bs*1];
-		b_2 = B[3+bs*2];
-		b_3 = B[3+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-		
-		
-		A += 16;
-		B += 4*sdb;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		A += 4;
-		B += 1;
-
-		}
-		
-	add:
-
-	c_00 += C[0+bs*0];
-	c_10 += C[1+bs*0];
-	c_20 += C[2+bs*0];
-	c_30 += C[3+bs*0];
-
-	c_01 += C[0+bs*1];
-	c_11 += C[1+bs*1];
-	c_21 += C[2+bs*1];
-	c_31 += C[3+bs*1];
-
-	c_02 += C[0+bs*2];
-	c_12 += C[1+bs*2];
-	c_22 += C[2+bs*2];
-	c_32 += C[3+bs*2];
-
-	c_03 += C[0+bs*3];
-	c_13 += C[1+bs*3];
-	c_23 += C[2+bs*3];
-	c_33 += C[3+bs*3];
-
-	// solution
-
-	if(km==1)
-		goto store;
-	
-	e_1 = E[1+bs*0];
-	e_2 = E[2+bs*0];
-	e_3 = E[3+bs*0];
-	c_10 -= e_1 * c_00;
-	c_20 -= e_2 * c_00;
-	c_30 -= e_3 * c_00;
-	c_11 -= e_1 * c_01;
-	c_21 -= e_2 * c_01;
-	c_31 -= e_3 * c_01;
-	c_12 -= e_1 * c_02;
-	c_22 -= e_2 * c_02;
-	c_32 -= e_3 * c_02;
-	c_13 -= e_1 * c_03;
-	c_23 -= e_2 * c_03;
-	c_33 -= e_3 * c_03;
-
-	if(km==2)
-		goto store;
-	
-	e_2 = E[2+bs*1];
-	e_3 = E[3+bs*1];
-	c_20 -= e_2 * c_10;
-	c_30 -= e_3 * c_10;
-	c_21 -= e_2 * c_11;
-	c_31 -= e_3 * c_11;
-	c_22 -= e_2 * c_12;
-	c_32 -= e_3 * c_12;
-	c_23 -= e_2 * c_13;
-	c_33 -= e_3 * c_13;
-
-	if(km==3)
-		goto store;
-	
-	e_3 = E[3+bs*2];
-	c_30 -= e_3 * c_20;
-	c_31 -= e_3 * c_21;
-	c_32 -= e_3 * c_22;
-	c_33 -= e_3 * c_23;
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nn_ll_one_4x4_lib4(int kmax, double *A, double *B, int sdb, double *C, double *D, double *E)
-	{
-	kernel_dtrsm_nn_ll_one_4x4_vs_lib4(kmax, A, B, sdb, C, D, E, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nn_ru_inv_4x4_vs_lib4(int kmax, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	int k;
-
-	double
-		tmp,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		e_00, e_01, e_02, e_03,
-		      e_11, e_12, e_13,
-			        e_22, e_23,
-					      e_33,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-		
-	if(kmax<=0)
-		goto add;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		b_0 = B[1+bs*0];
-		b_1 = B[1+bs*1];
-		b_2 = B[1+bs*2];
-		b_3 = B[1+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		b_0 = B[2+bs*0];
-		b_1 = B[2+bs*1];
-		b_2 = B[2+bs*2];
-		b_3 = B[2+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		b_0 = B[3+bs*0];
-		b_1 = B[3+bs*1];
-		b_2 = B[3+bs*2];
-		b_3 = B[3+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-		
-		
-		A += 16;
-		B += 4*sdb;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		A += 4;
-		B += 1;
-
-		}
-		
-	add:
-
-	c_00 += C[0+bs*0];
-	c_10 += C[1+bs*0];
-	c_20 += C[2+bs*0];
-	c_30 += C[3+bs*0];
-
-	c_01 += C[0+bs*1];
-	c_11 += C[1+bs*1];
-	c_21 += C[2+bs*1];
-	c_31 += C[3+bs*1];
-
-	c_02 += C[0+bs*2];
-	c_12 += C[1+bs*2];
-	c_22 += C[2+bs*2];
-	c_32 += C[3+bs*2];
-
-	c_03 += C[0+bs*3];
-	c_13 += C[1+bs*3];
-	c_23 += C[2+bs*3];
-	c_33 += C[3+bs*3];
-	
-	// solve
-
-	e_00 = inv_diag_E[0];
-	c_00 *= e_00;
-	c_10 *= e_00;
-	c_20 *= e_00;
-	c_30 *= e_00;
-
-	if(kn==1)
-		goto store;
-	
-	e_01 = E[0+bs*1];
-	e_11 = inv_diag_E[1];
-	c_01 -= c_00 * e_01;
-	c_11 -= c_10 * e_01;
-	c_21 -= c_20 * e_01;
-	c_31 -= c_30 * e_01;
-	c_01 *= e_11;
-	c_11 *= e_11;
-	c_21 *= e_11;
-	c_31 *= e_11;
-
-	if(kn==2)
-		goto store;
-	
-	e_02 = E[0+bs*2];
-	e_12 = E[1+bs*2];
-	e_22 = inv_diag_E[2];
-	c_02 -= c_00 * e_02;
-	c_12 -= c_10 * e_02;
-	c_22 -= c_20 * e_02;
-	c_32 -= c_30 * e_02;
-	c_02 -= c_01 * e_12;
-	c_12 -= c_11 * e_12;
-	c_22 -= c_21 * e_12;
-	c_32 -= c_31 * e_12;
-	c_02 *= e_22;
-	c_12 *= e_22;
-	c_22 *= e_22;
-	c_32 *= e_22;
-
-	if(kn==3)
-		goto store;
-	
-	e_03 = E[0+bs*3];
-	e_13 = E[1+bs*3];
-	e_23 = E[2+bs*3];
-	e_33 = inv_diag_E[3];
-	c_03 -= c_00 * e_03;
-	c_13 -= c_10 * e_03;
-	c_23 -= c_20 * e_03;
-	c_33 -= c_30 * e_03;
-	c_03 -= c_01 * e_13;
-	c_13 -= c_11 * e_13;
-	c_23 -= c_21 * e_13;
-	c_33 -= c_31 * e_13;
-	c_03 -= c_02 * e_23;
-	c_13 -= c_12 * e_23;
-	c_23 -= c_22 * e_23;
-	c_33 -= c_32 * e_23;
-	c_03 *= e_33;
-	c_13 *= e_33;
-	c_23 *= e_33;
-	c_33 *= e_33;
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nn_ru_inv_4x4_lib4(int kmax, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E)
-	{
-	kernel_dtrsm_nn_ru_inv_4x4_vs_lib4(kmax, A, B, sdb, C, D, E, inv_diag_E, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(int kmax, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	int k;
-
-	double
-		tmp,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		e_00, e_01, e_02, e_03,
-		      e_11, e_12, e_13,
-			        e_22, e_23,
-					      e_33,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-		
-	if(kmax<=0)
-		goto add;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		b_0 = B[1+bs*0];
-		b_1 = B[1+bs*1];
-		b_2 = B[1+bs*2];
-		b_3 = B[1+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		b_0 = B[2+bs*0];
-		b_1 = B[2+bs*1];
-		b_2 = B[2+bs*2];
-		b_3 = B[2+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		b_0 = B[3+bs*0];
-		b_1 = B[3+bs*1];
-		b_2 = B[3+bs*2];
-		b_3 = B[3+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-		
-		
-		A += 16;
-		B += 4*sdb;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		A += 4;
-		B += 1;
-
-		}
-		
-	add:
-
-	c_00 += C[0+bs*0];
-	c_10 += C[1+bs*0];
-	c_20 += C[2+bs*0];
-	c_30 += C[3+bs*0];
-
-	c_01 += C[0+bs*1];
-	c_11 += C[1+bs*1];
-	c_21 += C[2+bs*1];
-	c_31 += C[3+bs*1];
-
-	c_02 += C[0+bs*2];
-	c_12 += C[1+bs*2];
-	c_22 += C[2+bs*2];
-	c_32 += C[3+bs*2];
-
-	c_03 += C[0+bs*3];
-	c_13 += C[1+bs*3];
-	c_23 += C[2+bs*3];
-	c_33 += C[3+bs*3];
-
-//	printf("\n%f %f %f %f\n", c_00, c_01, c_02, c_03);
-//	printf("\n%f %f %f %f\n", c_10, c_11, c_12, c_13);
-//	printf("\n%f %f %f %f\n", c_20, c_21, c_22, c_23);
-//	printf("\n%f %f %f %f\n", c_30, c_31, c_32, c_33);
-	
-	// solve
-
-	if(km>3)
-		{
-		e_03 = E[0+bs*3];
-		e_13 = E[1+bs*3];
-		e_23 = E[2+bs*3];
-		e_33 = inv_diag_E[3];
-		c_30 *= e_33;
-		c_31 *= e_33;
-		c_32 *= e_33;
-		c_33 *= e_33;
-		c_00 -= e_03 * c_30;
-		c_01 -= e_03 * c_31;
-		c_02 -= e_03 * c_32;
-		c_03 -= e_03 * c_33;
-		c_10 -= e_13 * c_30;
-		c_11 -= e_13 * c_31;
-		c_12 -= e_13 * c_32;
-		c_13 -= e_13 * c_33;
-		c_20 -= e_23 * c_30;
-		c_21 -= e_23 * c_31;
-		c_22 -= e_23 * c_32;
-		c_23 -= e_23 * c_33;
-		}
-	
-	if(km>2)
-		{
-		e_02 = E[0+bs*2];
-		e_12 = E[1+bs*2];
-		e_22 = inv_diag_E[2];
-		c_20 *= e_22;
-		c_21 *= e_22;
-		c_22 *= e_22;
-		c_23 *= e_22;
-		c_00 -= e_02 * c_20;
-		c_01 -= e_02 * c_21;
-		c_02 -= e_02 * c_22;
-		c_03 -= e_02 * c_23;
-		c_10 -= e_12 * c_20;
-		c_11 -= e_12 * c_21;
-		c_12 -= e_12 * c_22;
-		c_13 -= e_12 * c_23;
-		}
-	
-	if(km>1)
-		{
-		e_01 = E[0+bs*1];
-		e_11 = inv_diag_E[1];
-		c_10 *= e_11;
-		c_11 *= e_11;
-		c_12 *= e_11;
-		c_13 *= e_11;
-		c_00 -= e_01 * c_10;
-		c_01 -= e_01 * c_11;
-		c_02 -= e_01 * c_12;
-		c_03 -= e_01 * c_13;
-		}
-	
-	e_00 = inv_diag_E[0];
-	c_00 *= e_00;
-	c_01 *= e_00;
-	c_02 *= e_00;
-	c_03 *= e_00;
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsm_nn_lu_inv_4x4_lib4(int kmax, double *A, double *B, int sdb, double *C, double *D, double *E, double *inv_diag_E)
-	{
-	kernel_dtrsm_nn_lu_inv_4x4_vs_lib4(kmax, A, B, sdb, C, D, E, inv_diag_E, 4, 4);
-	}
-#endif
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_dgemm_diag_lib4.c b/third_party/blasfeo/kernel/c99/kernel_dgemm_diag_lib4.c
deleted file mode 100644
index cad2b21..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_dgemm_diag_lib4.c
+++ /dev/null
@@ -1,1111 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-// B is the diagonal of a matrix, case beta=0.0
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_diag_right_4_a0_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_0, c_1, c_2, c_3;
-	
-	alpha0 = alpha[0];
-		
-	b_0 = alpha0 * B[0];
-	b_1 = alpha0 * B[1];
-	b_2 = alpha0 * B[2];
-	b_3 = alpha0 * B[3];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_0;
-		c_2 = a_2 * b_0;
-		c_3 = a_3 * b_0;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		c_0 = a_0 * b_1;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_1;
-		c_3 = a_3 * b_1;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		c_0 = a_0 * b_2;
-		c_1 = a_1 * b_2;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_2;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		D[3+bs*2] = c_3;
-		
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		c_0 = a_0 * b_3;
-		c_1 = a_1 * b_3;
-		c_2 = a_2 * b_3;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-		D[2+bs*3] = c_2;
-		D[3+bs*3] = c_3;
-
-		A += 4*sda;
-		D += 4*sdd;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		
-		c_0 = a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		a_0 = A[0+bs*1];
-		
-		c_0 = a_0 * b_1;
-
-		D[0+bs*1] = c_0;
-		
-
-		a_0 = A[0+bs*2];
-		
-		c_0 = a_0 * b_2;
-
-		D[0+bs*2] = c_0;
-		
-
-		a_0 = A[0+bs*3];
-		
-		c_0 = a_0 * b_3;
-
-		D[0+bs*3] = c_0;
-
-
-		A += 1;
-		D += 1;
-		
-		}
-	
-	}
-#endif
-
-
-
-// B is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_diag_right_4_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0, beta0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_0, c_1, c_2, c_3;
-	
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	b_0 = alpha0 * B[0];
-	b_1 = alpha0 * B[1];
-	b_2 = alpha0 * B[2];
-	b_3 = alpha0 * B[3];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_0;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_0;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_0;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*1] + a_2 * b_1;
-		c_3 = beta0 * C[3+bs*1] + a_3 * b_1;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_2;
-		c_1 = beta0 * C[1+bs*2] + a_1 * b_2;
-		c_2 = beta0 * C[2+bs*2] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*2] + a_3 * b_2;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		D[3+bs*2] = c_3;
-		
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_3;
-		c_1 = beta0 * C[1+bs*3] + a_1 * b_3;
-		c_2 = beta0 * C[2+bs*3] + a_2 * b_3;
-		c_3 = beta0 * C[3+bs*3] + a_3 * b_3;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-		D[2+bs*3] = c_2;
-		D[3+bs*3] = c_3;
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		a_0 = A[0+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-
-		D[0+bs*1] = c_0;
-		
-
-		a_0 = A[0+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_2;
-
-		D[0+bs*2] = c_0;
-		
-
-		a_0 = A[0+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_3;
-
-		D[0+bs*3] = c_0;
-
-
-		A += 1;
-		C += 1;
-		D += 1;
-		
-		}
-	
-	}
-#endif
-
-
-
-// B is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_diag_right_3_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0, beta0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2,
-		c_0, c_1, c_2, c_3;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	b_0 = alpha0 * B[0];
-	b_1 = alpha0 * B[1];
-	b_2 = alpha0 * B[2];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_0;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_0;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_0;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*1] + a_2 * b_1;
-		c_3 = beta0 * C[3+bs*1] + a_3 * b_1;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_2;
-		c_1 = beta0 * C[1+bs*2] + a_1 * b_2;
-		c_2 = beta0 * C[2+bs*2] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*2] + a_3 * b_2;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		D[3+bs*2] = c_3;
-		
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		a_0 = A[0+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-
-		D[0+bs*1] = c_0;
-		
-
-		a_0 = A[0+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_2;
-
-		D[0+bs*2] = c_0;
-		
-
-		A += 1;
-		C += 1;
-		D += 1;
-		
-		}
-	
-	}
-#endif
-
-
-
-// B is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_diag_right_2_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0, beta0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1,
-		c_0, c_1, c_2, c_3;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	b_0 = alpha0 * B[0];
-	b_1 = alpha0 * B[1];
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_0;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_0;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_0;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*1] + a_2 * b_1;
-		c_3 = beta0 * C[3+bs*1] + a_3 * b_1;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		a_0 = A[0+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-
-		D[0+bs*1] = c_0;
-		
-
-		A += 1;
-		C += 1;
-		D += 1;
-		
-		}
-	
-	}
-#endif
-
-
-
-// B is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_diag_right_1_lib4(int kmax, double *alpha, double *A, int sda, double *B, double *beta, double *C, int sdc, double *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0, beta0,
-		a_0, a_1, a_2, a_3,
-		b_0,
-		c_0, c_1, c_2, c_3;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	b_0 = alpha0 * B[0];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_0;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_0;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_0;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		A += 1;
-		C += 1;
-		D += 1;
-		
-		}
-	
-	}
-#endif
-
-
-
-// A is the diagonal of a matrix, case beta=0.0
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_diag_left_4_a0_lib4(int kmax, double *alpha, double *A, double *B, double *D, int alg)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_0, c_1, c_2, c_3;
-		
-	alpha0 = alpha[0];
-		
-	a_0 = alpha0 * A[0];
-	a_1 = alpha0 * A[1];
-	a_2 = alpha0 * A[2];
-	a_3 = alpha0 * A[3];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		b_3 = B[3+bs*0];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		b_0 = B[0+bs*1];
-		b_1 = B[1+bs*1];
-		b_2 = B[2+bs*1];
-		b_3 = B[3+bs*1];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		b_0 = B[0+bs*2];
-		b_1 = B[1+bs*2];
-		b_2 = B[2+bs*2];
-		b_3 = B[3+bs*2];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		D[3+bs*2] = c_3;
-		
-
-		b_0 = B[0+bs*3];
-		b_1 = B[1+bs*3];
-		b_2 = B[2+bs*3];
-		b_3 = B[3+bs*3];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-		D[2+bs*3] = c_2;
-		D[3+bs*3] = c_3;
-
-		B += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		b_3 = B[3+bs*0];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-	
-		B += 4;
-		D += 4;
-		
-		}
-	
-	}
-#endif
-
-
-
-// A is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_diag_left_4_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D, int alg)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0, beta0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_0, c_1, c_2, c_3;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	a_0 = alpha0 * A[0];
-	a_1 = alpha0 * A[1];
-	a_2 = alpha0 * A[2];
-	a_3 = alpha0 * A[3];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		b_3 = B[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_3;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		b_0 = B[0+bs*1];
-		b_1 = B[1+bs*1];
-		b_2 = B[2+bs*1];
-		b_3 = B[3+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*1] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*1] + a_3 * b_3;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		b_0 = B[0+bs*2];
-		b_1 = B[1+bs*2];
-		b_2 = B[2+bs*2];
-		b_3 = B[3+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*2] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*2] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*2] + a_3 * b_3;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		D[3+bs*2] = c_3;
-		
-
-		b_0 = B[0+bs*3];
-		b_1 = B[1+bs*3];
-		b_2 = B[2+bs*3];
-		b_3 = B[3+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*3] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*3] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*3] + a_3 * b_3;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-		D[2+bs*3] = c_2;
-		D[3+bs*3] = c_3;
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		b_3 = B[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_3;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-	
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-	
-	}
-#endif
-
-
-
-// A is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_diag_left_3_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-	{
-	
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0, beta0,
-		a_0, a_1, a_2,
-		b_0, b_1, b_2,
-		c_0, c_1, c_2;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	a_0 = alpha0 * A[0];
-	a_1 = alpha0 * A[1];
-	a_2 = alpha0 * A[2];
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_2;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		
-
-		b_0 = B[0+bs*1];
-		b_1 = B[1+bs*1];
-		b_2 = B[2+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*1] + a_2 * b_2;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		
-
-		b_0 = B[0+bs*2];
-		b_1 = B[1+bs*2];
-		b_2 = B[2+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*2] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*2] + a_2 * b_2;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		
-
-		b_0 = B[0+bs*3];
-		b_1 = B[1+bs*3];
-		b_2 = B[2+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*3] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*3] + a_2 * b_2;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-		D[2+bs*3] = c_2;
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_2;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-	
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-	
-	}
-#endif
-
-
-
-// A is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_diag_left_2_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-	{
-	
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0, beta0,
-		a_0, a_1,
-		b_0, b_1,
-		c_0, c_1;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	a_0 = alpha0 * A[0];
-	a_1 = alpha0 * A[1];
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		
-
-		b_0 = B[0+bs*1];
-		b_1 = B[1+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		
-
-		b_0 = B[0+bs*2];
-		b_1 = B[1+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*2] + a_1 * b_1;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		
-
-		b_0 = B[0+bs*3];
-		b_1 = B[1+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*3] + a_1 * b_1;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-	
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-	
-	}
-#endif
-
-
-
-// A is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemm_diag_left_1_lib4(int kmax, double *alpha, double *A, double *B, double *beta, double *C, double *D)
-	{
-	
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		alpha0, beta0,
-		a_0,
-		b_0,
-		c_0;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	a_0 = alpha0 * A[0];
-		
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		b_0 = B[0+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_0;
-
-		D[0+bs*1] = c_0;
-		
-
-		b_0 = B[0+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_0;
-
-		D[0+bs*2] = c_0;
-		
-
-		b_0 = B[0+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_0;
-
-		D[0+bs*3] = c_0;
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-	
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-		
-	}
-#endif
-
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_dgemv_4_lib4.c b/third_party/blasfeo/kernel/c99/kernel_dgemv_4_lib4.c
deleted file mode 100644
index 9f11b5f..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_dgemv_4_lib4.c
+++ /dev/null
@@ -1,1009 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemv_n_4_gen_lib4(int kmax, double *alpha, double *A, double *x, double *beta, double *y, double *z, int k0, int k1)
-	{
-
-	const int bs = 4;
-
-	int k;
-
-	double
-		x_0,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	k=0;
-	for(; k<kmax-3; k+=4)
-		{
-
-		x_0 = x[0];
-
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[1+bs*0] * x_0;
-		y_2 += A[2+bs*0] * x_0;
-		y_3 += A[3+bs*0] * x_0;
-		
-		x_0 = x[1];
-
-		y_0 += A[0+bs*1] * x_0;
-		y_1 += A[1+bs*1] * x_0;
-		y_2 += A[2+bs*1] * x_0;
-		y_3 += A[3+bs*1] * x_0;
-		
-		x_0 = x[2];
-
-		y_0 += A[0+bs*2] * x_0;
-		y_1 += A[1+bs*2] * x_0;
-		y_2 += A[2+bs*2] * x_0;
-		y_3 += A[3+bs*2] * x_0;
-		
-		x_0 = x[3];
-
-		y_0 += A[0+bs*3] * x_0;
-		y_1 += A[1+bs*3] * x_0;
-		y_2 += A[2+bs*3] * x_0;
-		y_3 += A[3+bs*3] * x_0;
-		
-		A += 4*bs;
-		x += 4;
-
-		}
-
-	for(; k<kmax; k++)
-		{
-
-		x_0 = x[0];
-
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[1+bs*0] * x_0;
-		y_2 += A[2+bs*0] * x_0;
-		y_3 += A[3+bs*0] * x_0;
-		
-		A += 1*bs;
-		x += 1;
-
-		}
-
-	y_0 = alpha[0]*y_0 + beta[0]*y[0];
-	y_1 = alpha[0]*y_1 + beta[0]*y[1];
-	y_2 = alpha[0]*y_2 + beta[0]*y[2];
-	y_3 = alpha[0]*y_3 + beta[0]*y[3];
-
-	if(k0<=0 & k1>3)
-		{
-		z[0] = y_0;
-		z[1] = y_1;
-		z[2] = y_2;
-		z[3] = y_3;
-		}
-	else
-		{
-		if(k0<=0 & k1>0) z[0] = y_0;
-		if(k0<=1 & k1>1) z[1] = y_1;
-		if(k0<=2 & k1>2) z[2] = y_2;
-		if(k0<=3 & k1>3) z[3] = y_3;
-		}
-
-	}
-#endif
-	
-	
-	
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemv_n_4_vs_lib4(int kmax, double *alpha, double *A, double *x, double *beta, double *y, double *z, int k1)
-	{
-
-	kernel_dgemv_n_4_gen_lib4(kmax, alpha, A, x, beta, y, z, 0, k1);
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemv_n_4_lib4(int kmax, double *alpha, double *A, double *x, double *beta, double *y, double *z)
-	{
-
-	kernel_dgemv_n_4_gen_lib4(kmax, alpha, A, x, beta, y, z, 0, 4);
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemv_t_4_gen_lib4(int kmax, double *alpha, int offA, double *A, int sda, double *x, double *beta, double *y, double *z, int km)
-	{
-
-	const int bs  = 4;
-	
-	int k, kend;
-	
-	double
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	k=0;
-	if(offA!=0) // 1, 2, 3
-		{
-		kend = 4-offA<kmax ? 4-offA : kmax;
-		for(; k<kend; k++)
-			{
-			
-			x_0 = x[0];
-		
-			y_0 += A[0+bs*0] * x_0;
-			y_1 += A[0+bs*1] * x_0;
-			y_2 += A[0+bs*2] * x_0;
-			y_3 += A[0+bs*3] * x_0;
-		
-			A += 1;
-			x += 1;
-			
-			}
-		A += bs*(sda-1);
-		}
-	for(; k<kmax-bs+1; k+=bs)
-		{
-		
-		x_0 = x[0];
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-		
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[0+bs*1] * x_0;
-		y_2 += A[0+bs*2] * x_0;
-		y_3 += A[0+bs*3] * x_0;
-
-		y_0 += A[1+bs*0] * x_1;
-		y_1 += A[1+bs*1] * x_1;
-		y_2 += A[1+bs*2] * x_1;
-		y_3 += A[1+bs*3] * x_1;
-		
-		y_0 += A[2+bs*0] * x_2;
-		y_1 += A[2+bs*1] * x_2;
-		y_2 += A[2+bs*2] * x_2;
-		y_3 += A[2+bs*3] * x_2;
-
-		y_0 += A[3+bs*0] * x_3;
-		y_1 += A[3+bs*1] * x_3;
-		y_2 += A[3+bs*2] * x_3;
-		y_3 += A[3+bs*3] * x_3;
-		
-		A += sda*bs;
-		x += 4;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		x_0 = x[0];
-	
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[0+bs*1] * x_0;
-		y_2 += A[0+bs*2] * x_0;
-		y_3 += A[0+bs*3] * x_0;
-	
-		A += 1;
-		x += 1;
-		
-		}
-
-	y_0 = alpha[0]*y_0 + beta[0]*y[0];
-	y_1 = alpha[0]*y_1 + beta[0]*y[1];
-	y_2 = alpha[0]*y_2 + beta[0]*y[2];
-	y_3 = alpha[0]*y_3 + beta[0]*y[3];
-
-	if(km>=4)
-		{
-		z[0] = y_0;
-		z[1] = y_1;
-		z[2] = y_2;
-		z[3] = y_3;
-		}
-	else
-		{
-		z[0] = y_0;
-		if(km>=2)
-			{
-			z[1] = y_1;
-			if(km>2)
-				{
-				z[2] = y_2;
-				}
-			}
-		}
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemv_t_4_lib4(int kmax, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z)
-	{
-
-	kernel_dgemv_t_4_gen_lib4(kmax, alpha, 0, A, sda, x, beta, y, z, 4);
-
-	}
-#endif
-
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dgemv_t_4_vs_lib4(int kmax, double *alpha, double *A, int sda, double *x, double *beta, double *y, double *z, int k1)
-	{
-
-	kernel_dgemv_t_4_gen_lib4(kmax, alpha, 0, A, sda, x, beta, y, z, k1);
-
-	}
-#endif
-
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsv_ln_inv_4_vs_lib4(int kmax, double *A, double *inv_diag_A, double *x, double *y, double *z, int km, int kn)
-	{
-
-	const int bs = 4;
-	
-	int k;
-
-	double
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	k=0;
-	for(; k<kmax-3; k+=4)
-		{
-
-		x_0 = x[0];
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-
-		y_0 -= A[0+bs*0] * x_0;
-		y_1 -= A[1+bs*0] * x_0;
-		y_2 -= A[2+bs*0] * x_0;
-		y_3 -= A[3+bs*0] * x_0;
-
-		y_0 -= A[0+bs*1] * x_1;
-		y_1 -= A[1+bs*1] * x_1;
-		y_2 -= A[2+bs*1] * x_1;
-		y_3 -= A[3+bs*1] * x_1;
-
-		y_0 -= A[0+bs*2] * x_2;
-		y_1 -= A[1+bs*2] * x_2;
-		y_2 -= A[2+bs*2] * x_2;
-		y_3 -= A[3+bs*2] * x_2;
-
-		y_0 -= A[0+bs*3] * x_3;
-		y_1 -= A[1+bs*3] * x_3;
-		y_2 -= A[2+bs*3] * x_3;
-		y_3 -= A[3+bs*3] * x_3;
-		
-		A += 4*bs;
-		x += 4;
-
-		}
-
-	y_0 = y[0] + y_0;
-	y_1 = y[1] + y_1;
-	y_2 = y[2] + y_2;
-	y_3 = y[3] + y_3;
-
-	double
-		a_00, a_10, a_20, a_30,
-		a_11, a_21, a_31;
-	
-	// a_00
-	a_00 = inv_diag_A[0];
-	a_10 = A[1+bs*0];
-	a_20 = A[2+bs*0];
-	a_30 = A[3+bs*0];
-	y_0 *= a_00;
-	z[0] = y_0;
-	y_1 -= a_10 * y_0;
-	y_2 -= a_20 * y_0;
-	y_3 -= a_30 * y_0;
-
-	if(kn==1)
-		{
-		if(km==1)
-			return;
-		y[1] = y_1;
-		if(km==2)
-			return;
-		y[2] = y_2;
-		if(km==3)
-			return;
-		y[3] = y_3;
-		return;
-		}
-
-	// a_11
-	a_11 = inv_diag_A[1];
-	a_21 = A[2+bs*1];
-	a_31 = A[3+bs*1];
-	y_1 *= a_11;	
-	z[1] = y_1;
-	y_2 -= a_21 * y_1;
-	y_3 -= a_31 * y_1;
-
-	if(kn==2)
-		{
-		if(km==2)
-			return;
-		y[2] = y_2;
-		if(km==3)
-			return;
-		y[3] = y_3;
-		return;
-		}
-
-	// a_22
-	a_00 = inv_diag_A[2];
-	a_10 = A[3+bs*2];
-	y_2 *= a_00;
-	z[2] = y_2;
-	y_3 -= a_10 * y_2;
-
-	if(kn==3)
-		{
-		if(km==3)
-			return;
-		y[3] = y_3;
-
-		return;
-		}
-
-	// a_33
-	a_11 = inv_diag_A[3];
-	y_3 *= a_11;	
-	z[3] = y_3;
-
-	}
-#endif
-	
-
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsv_ln_inv_4_lib4(int kmax, double *A, double *inv_diag_A, double *x, double *y, double *z)
-	{
-
-	kernel_dtrsv_ln_inv_4_vs_lib4(kmax, A, inv_diag_A, x, y, z, 4, 4);
-
-
-	}
-#endif
-	
-	
-		
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsv_lt_inv_4_lib4(int kmax, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z)
-	{
-
-	const int bs = 4;
-	
-	int
-		k;
-	
-	double *tA, *tx;
-	tA = A;
-	tx = x;
-
-	double
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	k=4;
-	A += 4 + (sda-1)*bs;
-	x += 4;
-	for(; k<kmax-3; k+=4)
-		{
-		
-		x_0 = x[0];
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-		
-		y_0 -= A[0+bs*0] * x_0;
-		y_1 -= A[0+bs*1] * x_0;
-		y_2 -= A[0+bs*2] * x_0;
-		y_3 -= A[0+bs*3] * x_0;
-
-		y_0 -= A[1+bs*0] * x_1;
-		y_1 -= A[1+bs*1] * x_1;
-		y_2 -= A[1+bs*2] * x_1;
-		y_3 -= A[1+bs*3] * x_1;
-		
-		y_0 -= A[2+bs*0] * x_2;
-		y_1 -= A[2+bs*1] * x_2;
-		y_2 -= A[2+bs*2] * x_2;
-		y_3 -= A[2+bs*3] * x_2;
-
-		y_0 -= A[3+bs*0] * x_3;
-		y_1 -= A[3+bs*1] * x_3;
-		y_2 -= A[3+bs*2] * x_3;
-		y_3 -= A[3+bs*3] * x_3;
-		
-		A += sda*bs;
-		x += 4;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		x_0 = x[0];
-		
-		y_0 -= A[0+bs*0] * x_0;
-		y_1 -= A[0+bs*1] * x_0;
-		y_2 -= A[0+bs*2] * x_0;
-		y_3 -= A[0+bs*3] * x_0;
-		
-		A += 1;//sda*bs;
-		x += 1;
-
-		}
-	
-	y_0 = y[0] + y_0;
-	y_1 = y[1] + y_1;
-	y_2 = y[2] + y_2;
-	y_3 = y[3] + y_3;
-
-	A = tA;
-	x = tx;
-
-	// bottom trinagle
-	y_3 *= inv_diag_A[3];
-	z[3] = y_3;
-
-	y_2 -= A[3+bs*2] * y_3;
-	y_2 *= inv_diag_A[2];
-	z[2] = y_2;
-
-	// square
-	y_0 -= A[2+bs*0]*y_2 + A[3+bs*0]*y_3;
-	y_1 -= A[2+bs*1]*y_2 + A[3+bs*1]*y_3;
-		
-	// top trinagle
-	y_1 *= inv_diag_A[1];
-	z[1] = y_1;
-
-	y_0 -= A[1+bs*0] * y_1;
-	y_0 *= inv_diag_A[0];
-	z[0] = y_0;
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsv_lt_inv_3_lib4(int kmax, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z)
-	{
-
-	const int bs = 4;
-	
-	int
-		k;
-	
-	double *tA, *tx;
-	tA = A;
-	tx = x;
-
-	double
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0;
-	
-	k = 3;
-	if(kmax>4)
-		{
-		// clean up at the beginning
-		x_3 = x[3];
-
-		y_0 -= A[3+bs*0] * x_3;
-		y_1 -= A[3+bs*1] * x_3;
-		y_2 -= A[3+bs*2] * x_3;
-
-		k=4;
-		A += 4 + (sda-1)*bs;
-		x += 4;
-		for(; k<kmax-3; k+=4)
-			{
-			
-			x_0 = x[0];
-			x_1 = x[1];
-			x_2 = x[2];
-			x_3 = x[3];
-			
-			y_0 -= A[0+bs*0] * x_0;
-			y_1 -= A[0+bs*1] * x_0;
-			y_2 -= A[0+bs*2] * x_0;
-
-			y_0 -= A[1+bs*0] * x_1;
-			y_1 -= A[1+bs*1] * x_1;
-			y_2 -= A[1+bs*2] * x_1;
-			
-			y_0 -= A[2+bs*0] * x_2;
-			y_1 -= A[2+bs*1] * x_2;
-			y_2 -= A[2+bs*2] * x_2;
-
-			y_0 -= A[3+bs*0] * x_3;
-			y_1 -= A[3+bs*1] * x_3;
-			y_2 -= A[3+bs*2] * x_3;
-			
-			A += sda*bs;
-			x += 4;
-
-			}
-		}
-	else
-		{
-		A += 3;
-		x += 1;
-		}
-	for(; k<kmax; k++)
-		{
-		
-		x_0 = x[0];
-		
-		y_0 -= A[0+bs*0] * x_0;
-		y_1 -= A[0+bs*1] * x_0;
-		y_2 -= A[0+bs*2] * x_0;
-		
-		A += 1;//sda*bs;
-		x += 1;
-
-		}
-
-	y_0 = y[0] + y_0;
-	y_1 = y[1] + y_1;
-	y_2 = y[2] + y_2;
-
-	A = tA;
-	x = tx;
-
-	// bottom trinagle
-	y_2 *= inv_diag_A[2];
-	z[2] = y_2;
-
-	// square
-	y_0 -= A[2+bs*0]*y_2;
-	y_1 -= A[2+bs*1]*y_2;
-		
-	// top trinagle
-	y_1 *= inv_diag_A[1];
-	z[1] = y_1;
-
-	y_0 -= A[1+bs*0] * y_1;
-	y_0 *= inv_diag_A[0];
-	z[0] = y_0;
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsv_lt_inv_2_lib4(int kmax, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z)
-	{
-
-	const int bs = 4;
-	
-	int
-		k;
-	
-	double *tA, *tx;
-	tA = A;
-	tx = x;
-
-	double
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0;
-	
-	k = 2;
-	if(kmax>4)
-		{
-		// clean up at the beginning
-		x_2 = x[2];
-		x_3 = x[3];
-
-		y_0 -= A[2+bs*0] * x_2;
-		y_1 -= A[2+bs*1] * x_2;
-
-		y_0 -= A[3+bs*0] * x_3;
-		y_1 -= A[3+bs*1] * x_3;
-
-		k=4;
-		A += 4 + (sda-1)*bs;
-		x += 4;
-		for(; k<kmax-3; k+=4)
-			{
-			
-			x_0 = x[0];
-			x_1 = x[1];
-			x_2 = x[2];
-			x_3 = x[3];
-			
-			y_0 -= A[0+bs*0] * x_0;
-			y_1 -= A[0+bs*1] * x_0;
-
-			y_0 -= A[1+bs*0] * x_1;
-			y_1 -= A[1+bs*1] * x_1;
-			
-			y_0 -= A[2+bs*0] * x_2;
-			y_1 -= A[2+bs*1] * x_2;
-
-			y_0 -= A[3+bs*0] * x_3;
-			y_1 -= A[3+bs*1] * x_3;
-			
-			A += sda*bs;
-			x += 4;
-
-			}
-		}
-	else
-		{
-		A += 2;
-		x += 2;
-		}
-	for(; k<kmax; k++)
-		{
-		
-		x_0 = x[0];
-		
-		y_0 -= A[0+bs*0] * x_0;
-		y_1 -= A[0+bs*1] * x_0;
-		
-		A += 1;//sda*bs;
-		x += 1;
-
-		}
-
-	y_0 = y[0] + y_0;
-	y_1 = y[1] + y_1;
-
-	A = tA;
-	x = tx;
-
-	// top trinagle
-	y_1 *= inv_diag_A[1];
-	z[1] = y_1;
-
-	y_0 -= A[1+bs*0] * y_1;
-	y_0 *= inv_diag_A[0];
-	z[0] = y_0;
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrsv_lt_inv_1_lib4(int kmax, double *A, int sda, double *inv_diag_A, double *x, double *y, double *z)
-	{
-
-	const int bs = 4;
-	
-	int
-		k;
-	
-	double *tA, *tx;
-	tA = A;
-	tx = x;
-
-	double
-		x_0, x_1, x_2, x_3,
-		y_0=0;
-	
-	k = 1;
-	if(kmax>4)
-		{
-		// clean up at the beginning
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-
-		y_0 -= A[1+bs*0] * x_1;
-		y_0 -= A[2+bs*0] * x_2;
-		y_0 -= A[3+bs*0] * x_3;
-
-		k=4;
-		A += 4 + (sda-1)*bs;
-		x += 4;
-		for(; k<kmax-3; k+=4)
-			{
-			
-			x_0 = x[0];
-			x_1 = x[1];
-			x_2 = x[2];
-			x_3 = x[3];
-			
-			y_0 -= A[0+bs*0] * x_0;
-			y_0 -= A[1+bs*0] * x_1;
-			y_0 -= A[2+bs*0] * x_2;
-			y_0 -= A[3+bs*0] * x_3;
-			
-			A += sda*bs;
-			x += 4;
-
-			}
-		}
-	else
-		{
-		A += 1;
-		x += 1;
-		}
-	for(; k<kmax; k++)
-		{
-		
-		x_0 = x[0];
-		
-		y_0 -= A[0+bs*0] * x_0;
-		
-		A += 1;//sda*bs;
-		x += 1;
-
-		}
-
-	y_0 = y[0] + y_0;
-
-	A = tA;
-	x = tx;
-
-	// top trinagle
-	y_0 *= inv_diag_A[0];
-	z[0] = y_0;
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrmv_un_4_lib4(int kmax, double *A, double *x, double *z)
-	{
-
-	const int bs = 4;
-	
-	int k;
-
-	double
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	x_0 = x[0];
-	x_1 = x[1];
-	x_2 = x[2];
-	x_3 = x[3];
-
-	y_0 += A[0+bs*0] * x_0;
-/*	y_1 += A[1+bs*0] * x_0;*/
-/*	y_2 += A[2+bs*0] * x_0;*/
-/*	y_3 += A[3+bs*0] * x_0;*/
-
-	y_0 += A[0+bs*1] * x_1;
-	y_1 += A[1+bs*1] * x_1;
-/*	y_2 += A[2+bs*1] * x_1;*/
-/*	y_3 += A[3+bs*1] * x_1;*/
-
-	y_0 += A[0+bs*2] * x_2;
-	y_1 += A[1+bs*2] * x_2;
-	y_2 += A[2+bs*2] * x_2;
-/*	y_3 += A[3+bs*2] * x_2;*/
-
-	y_0 += A[0+bs*3] * x_3;
-	y_1 += A[1+bs*3] * x_3;
-	y_2 += A[2+bs*3] * x_3;
-	y_3 += A[3+bs*3] * x_3;
-	
-	A += 4*bs;
-	x += 4;
-
-	k=4;
-	for(; k<kmax-3; k+=4)
-		{
-
-		x_0 = x[0];
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[1+bs*0] * x_0;
-		y_2 += A[2+bs*0] * x_0;
-		y_3 += A[3+bs*0] * x_0;
-
-		y_0 += A[0+bs*1] * x_1;
-		y_1 += A[1+bs*1] * x_1;
-		y_2 += A[2+bs*1] * x_1;
-		y_3 += A[3+bs*1] * x_1;
-
-		y_0 += A[0+bs*2] * x_2;
-		y_1 += A[1+bs*2] * x_2;
-		y_2 += A[2+bs*2] * x_2;
-		y_3 += A[3+bs*2] * x_2;
-
-		y_0 += A[0+bs*3] * x_3;
-		y_1 += A[1+bs*3] * x_3;
-		y_2 += A[2+bs*3] * x_3;
-		y_3 += A[3+bs*3] * x_3;
-		
-		A += 4*bs;
-		x += 4;
-
-		}
-
-	for(; k<kmax; k++)
-		{
-
-		x_0 = x[0];
-
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[1+bs*0] * x_0;
-		y_2 += A[2+bs*0] * x_0;
-		y_3 += A[3+bs*0] * x_0;
-		
-		A += 1*bs;
-		x += 1;
-
-		}
-
-	z[0] = y_0;
-	z[1] = y_1;
-	z[2] = y_2;
-	z[3] = y_3;
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrmv_ut_4_vs_lib4(int kmax, double *A, int sda, double *x, double *z, int km)
-	{
-
-	const int bs  = 4;
-	
-	int
-		k;
-	
-	double
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	k=0;
-	for(; k<kmax-4; k+=4)
-		{
-		
-		x_0 = x[0];
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-		
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[0+bs*1] * x_0;
-		y_2 += A[0+bs*2] * x_0;
-		y_3 += A[0+bs*3] * x_0;
-
-		y_0 += A[1+bs*0] * x_1;
-		y_1 += A[1+bs*1] * x_1;
-		y_2 += A[1+bs*2] * x_1;
-		y_3 += A[1+bs*3] * x_1;
-		
-		y_0 += A[2+bs*0] * x_2;
-		y_1 += A[2+bs*1] * x_2;
-		y_2 += A[2+bs*2] * x_2;
-		y_3 += A[2+bs*3] * x_2;
-
-		y_0 += A[3+bs*0] * x_3;
-		y_1 += A[3+bs*1] * x_3;
-		y_2 += A[3+bs*2] * x_3;
-		y_3 += A[3+bs*3] * x_3;
-		
-		A += sda*bs;
-		x += 4;
-
-		}
-
-	x_0 = x[0];
-	x_1 = x[1];
-	x_2 = x[2];
-	x_3 = x[3];
-	
-	y_0 += A[0+bs*0] * x_0;
-	y_1 += A[0+bs*1] * x_0;
-	y_2 += A[0+bs*2] * x_0;
-	y_3 += A[0+bs*3] * x_0;
-
-/*	y_0 += A[1+bs*0] * x_1;*/
-	y_1 += A[1+bs*1] * x_1;
-	y_2 += A[1+bs*2] * x_1;
-	y_3 += A[1+bs*3] * x_1;
-	
-/*	y_0 += A[2+bs*0] * x_2;*/
-/*	y_1 += A[2+bs*1] * x_2;*/
-	y_2 += A[2+bs*2] * x_2;
-	y_3 += A[2+bs*3] * x_2;
-
-/*	y_0 += A[3+bs*0] * x_3;*/
-/*	y_1 += A[3+bs*1] * x_3;*/
-/*	y_2 += A[3+bs*2] * x_3;*/
-	y_3 += A[3+bs*3] * x_3;
-	
-//	A += sda*bs;
-//	x += 4;
-
-	// store_vs
-	store:
-	if(km>=4)
-		{
-		z[0] = y_0;
-		z[1] = y_1;
-		z[2] = y_2;
-		z[3] = y_3;
-		}
-	else
-		{
-		z[0] = y_0;
-		if(km>=2)
-			{
-			z[1] = y_1;
-			if(km>2)
-				{
-				z[2] = y_2;
-				}
-			}
-		}
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_dtrmv_ut_4_lib4(int kmax, double *A, int sda, double *x, double *z)
-	{
-	
-	kernel_dtrmv_ut_4_vs_lib4(kmax, A, sda, x, z, 4);
-
-	}
-#endif
-
-
-
-
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_dgeqrf_4_lib4.c b/third_party/blasfeo/kernel/c99/kernel_dgeqrf_4_lib4.c
deleted file mode 100644
index 071ec86..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_dgeqrf_4_lib4.c
+++ /dev/null
@@ -1,2620 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <math.h>
-#include <stdio.h>
-
-#include "../../include/blasfeo_common.h"
-#include "../../include/blasfeo_d_aux.h"
-
-
-
-void kernel_dgeqrf_4_lib4(int m, double *pD, int sdd, double *dD)
-	{
-	int ii, jj, ll;
-	double alpha, beta, tmp, w1, w2, w3;
-	const int ps = 4;
-	// first column
-	beta = 0.0;
-	ii = 1;
-	if(m>1)
-		{
-		tmp = pD[1+ps*0];
-		beta += tmp*tmp;
-		if(m>2)
-			{
-			tmp = pD[2+ps*0];
-			beta += tmp*tmp;
-			if(m>3)
-				{
-				tmp = pD[3+ps*0];
-				beta += tmp*tmp;
-				}
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		tmp = pD[0+ii*sdd+ps*0];
-		beta += tmp*tmp;
-		tmp = pD[1+ii*sdd+ps*0];
-		beta += tmp*tmp;
-		tmp = pD[2+ii*sdd+ps*0];
-		beta += tmp*tmp;
-		tmp = pD[3+ii*sdd+ps*0];
-		beta += tmp*tmp;
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		tmp = pD[ll+ii*sdd+ps*0];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[0] = 0.0;
-		}
-	else
-		{
-		alpha = pD[0+ps*0];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[0] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[0+ps*0] = beta;
-		ii = 1;
-		if(m>1)
-			{
-			pD[1+ps*0] *= tmp;
-			if(m>2)
-				{
-				pD[2+ps*0] *= tmp;
-				if(m>3)
-					{
-					pD[3+ps*0] *= tmp;
-					}
-				}
-			}
-		for(ii=4; ii<m-3; ii+=4)
-			{
-			pD[0+ii*sdd+ps*0] *= tmp;
-			pD[1+ii*sdd+ps*0] *= tmp;
-			pD[2+ii*sdd+ps*0] *= tmp;
-			pD[3+ii*sdd+ps*0] *= tmp;
-			}
-		for(ll=0; ll<m-ii; ll++)
-			{
-			pD[ll+ii*sdd+ps*0] *= tmp;
-			}
-		}
-	// gemv_t & ger
-	w1 = pD[0+ps*1];
-	w2 = pD[0+ps*2];
-	w3 = pD[0+ps*3];
-	if(m>1)
-		{
-		w1 += pD[1+ps*1] * pD[1+ps*0];
-		w2 += pD[1+ps*2] * pD[1+ps*0];
-		w3 += pD[1+ps*3] * pD[1+ps*0];
-		if(m>2)
-			{
-			w1 += pD[2+ps*1] * pD[2+ps*0];
-			w2 += pD[2+ps*2] * pD[2+ps*0];
-			w3 += pD[2+ps*3] * pD[2+ps*0];
-			if(m>3)
-				{
-				w1 += pD[3+ps*1] * pD[3+ps*0];
-				w2 += pD[3+ps*2] * pD[3+ps*0];
-				w3 += pD[3+ps*3] * pD[3+ps*0];
-				}
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		w1 += pD[0+ii*sdd+ps*1] * pD[0+ii*sdd+ps*0];
-		w2 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*0];
-		w3 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*0];
-		w1 += pD[1+ii*sdd+ps*1] * pD[1+ii*sdd+ps*0];
-		w2 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*0];
-		w3 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*0];
-		w1 += pD[2+ii*sdd+ps*1] * pD[2+ii*sdd+ps*0];
-		w2 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*0];
-		w3 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*0];
-		w1 += pD[3+ii*sdd+ps*1] * pD[3+ii*sdd+ps*0];
-		w2 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*0];
-		w3 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*0];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		w1 += pD[ll+ii*sdd+ps*1] * pD[ll+ii*sdd+ps*0];
-		w2 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*0];
-		w3 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*0];
-		}
-	w1 = - dD[0] * w1;
-	w2 = - dD[0] * w2;
-	w3 = - dD[0] * w3;
-	pD[0+ps*1] += w1;
-	pD[0+ps*2] += w2;
-	pD[0+ps*3] += w3;
-	if(m>1)
-		{
-		pD[1+ps*1] += w1 * pD[1+ps*0];
-		pD[1+ps*2] += w2 * pD[1+ps*0];
-		pD[1+ps*3] += w3 * pD[1+ps*0];
-		if(m>2)
-			{
-			pD[2+ps*1] += w1 * pD[2+ps*0];
-			pD[2+ps*2] += w2 * pD[2+ps*0];
-			pD[2+ps*3] += w3 * pD[2+ps*0];
-			if(m>3)
-				{
-				pD[3+ps*1] += w1 * pD[3+ps*0];
-				pD[3+ps*2] += w2 * pD[3+ps*0];
-				pD[3+ps*3] += w3 * pD[3+ps*0];
-				}
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		pD[0+ii*sdd+ps*1] += w1 * pD[0+ii*sdd+ps*0];
-		pD[0+ii*sdd+ps*2] += w2 * pD[0+ii*sdd+ps*0];
-		pD[0+ii*sdd+ps*3] += w3 * pD[0+ii*sdd+ps*0];
-		pD[1+ii*sdd+ps*1] += w1 * pD[1+ii*sdd+ps*0];
-		pD[1+ii*sdd+ps*2] += w2 * pD[1+ii*sdd+ps*0];
-		pD[1+ii*sdd+ps*3] += w3 * pD[1+ii*sdd+ps*0];
-		pD[2+ii*sdd+ps*1] += w1 * pD[2+ii*sdd+ps*0];
-		pD[2+ii*sdd+ps*2] += w2 * pD[2+ii*sdd+ps*0];
-		pD[2+ii*sdd+ps*3] += w3 * pD[2+ii*sdd+ps*0];
-		pD[3+ii*sdd+ps*1] += w1 * pD[3+ii*sdd+ps*0];
-		pD[3+ii*sdd+ps*2] += w2 * pD[3+ii*sdd+ps*0];
-		pD[3+ii*sdd+ps*3] += w3 * pD[3+ii*sdd+ps*0];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		pD[ll+ii*sdd+ps*1] += w1 * pD[ll+ii*sdd+ps*0];
-		pD[ll+ii*sdd+ps*2] += w2 * pD[ll+ii*sdd+ps*0];
-		pD[ll+ii*sdd+ps*3] += w3 * pD[ll+ii*sdd+ps*0];
-		}
-	if(m==1)
-		return;
-	// second column
-	beta = 0.0;
-	if(m>2)
-		{
-		tmp = pD[2+ps*1];
-		beta += tmp*tmp;
-		if(m>3)
-			{
-			tmp = pD[3+ps*1];
-			beta += tmp*tmp;
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		tmp = pD[0+ii*sdd+ps*1];
-		beta += tmp*tmp;
-		tmp = pD[1+ii*sdd+ps*1];
-		beta += tmp*tmp;
-		tmp = pD[2+ii*sdd+ps*1];
-		beta += tmp*tmp;
-		tmp = pD[3+ii*sdd+ps*1];
-		beta += tmp*tmp;
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		tmp = pD[ll+ii*sdd+ps*1];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[1] = 0.0;
-		}
-	else
-		{
-		alpha = pD[1+ps*1];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[1] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[1+ps*1] = beta;
-		if(m>2)
-			{
-			pD[2+ps*1] *= tmp;
-			if(m>3)
-				{
-				pD[3+ps*1] *= tmp;
-				}
-			}
-		for(ii=4; ii<m-3; ii+=4)
-			{
-			pD[0+ii*sdd+ps*1] *= tmp;
-			pD[1+ii*sdd+ps*1] *= tmp;
-			pD[2+ii*sdd+ps*1] *= tmp;
-			pD[3+ii*sdd+ps*1] *= tmp;
-			}
-		for(ll=0; ll<m-ii; ll++)
-			{
-			pD[ll+ii*sdd+ps*1] *= tmp;
-			}
-		}
-	// gemv_t & ger
-	w2 = pD[1+ps*2];
-	w3 = pD[1+ps*3];
-	if(m>2)
-		{
-		w2 += pD[2+ps*2] * pD[2+ps*1];
-		w3 += pD[2+ps*3] * pD[2+ps*1];
-		if(m>3)
-			{
-			w2 += pD[3+ps*2] * pD[3+ps*1];
-			w3 += pD[3+ps*3] * pD[3+ps*1];
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		w2 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*1];
-		w3 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*1];
-		w2 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*1];
-		w3 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*1];
-		w2 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*1];
-		w3 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*1];
-		w2 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*1];
-		w3 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*1];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		w2 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*1];
-		w3 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*1];
-		}
-	w2 = - dD[1] * w2;
-	w3 = - dD[1] * w3;
-	pD[1+ps*2] += w2;
-	pD[1+ps*3] += w3;
-	if(m>2)
-		{
-		pD[2+ps*2] += w2 * pD[2+ps*1];
-		pD[2+ps*3] += w3 * pD[2+ps*1];
-		if(m>3)
-			{
-			pD[3+ps*2] += w2 * pD[3+ps*1];
-			pD[3+ps*3] += w3 * pD[3+ps*1];
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		pD[0+ii*sdd+ps*2] += w2 * pD[0+ii*sdd+ps*1];
-		pD[0+ii*sdd+ps*3] += w3 * pD[0+ii*sdd+ps*1];
-		pD[1+ii*sdd+ps*2] += w2 * pD[1+ii*sdd+ps*1];
-		pD[1+ii*sdd+ps*3] += w3 * pD[1+ii*sdd+ps*1];
-		pD[2+ii*sdd+ps*2] += w2 * pD[2+ii*sdd+ps*1];
-		pD[2+ii*sdd+ps*3] += w3 * pD[2+ii*sdd+ps*1];
-		pD[3+ii*sdd+ps*2] += w2 * pD[3+ii*sdd+ps*1];
-		pD[3+ii*sdd+ps*3] += w3 * pD[3+ii*sdd+ps*1];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		pD[ll+ii*sdd+ps*2] += w2 * pD[ll+ii*sdd+ps*1];
-		pD[ll+ii*sdd+ps*3] += w3 * pD[ll+ii*sdd+ps*1];
-		}
-	if(m==2)
-		return;
-	// third column
-	beta = 0.0;
-	if(m>3)
-		{
-		tmp = pD[3+ps*2];
-		beta += tmp*tmp;
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		tmp = pD[0+ii*sdd+ps*2];
-		beta += tmp*tmp;
-		tmp = pD[1+ii*sdd+ps*2];
-		beta += tmp*tmp;
-		tmp = pD[2+ii*sdd+ps*2];
-		beta += tmp*tmp;
-		tmp = pD[3+ii*sdd+ps*2];
-		beta += tmp*tmp;
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		tmp = pD[ll+ii*sdd+ps*2];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[2] = 0.0;
-		}
-	else
-		{
-		alpha = pD[2+ps*2];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[2] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[2+ps*2] = beta;
-		if(m>3)
-			{
-			pD[3+ps*2] *= tmp;
-			}
-		for(ii=4; ii<m-3; ii+=4)
-			{
-			pD[0+ii*sdd+ps*2] *= tmp;
-			pD[1+ii*sdd+ps*2] *= tmp;
-			pD[2+ii*sdd+ps*2] *= tmp;
-			pD[3+ii*sdd+ps*2] *= tmp;
-			}
-		for(ll=0; ll<m-ii; ll++)
-			{
-			pD[ll+ii*sdd+ps*2] *= tmp;
-			}
-		}
-	// gemv_t & ger
-	w3 = pD[2+ps*3];
-	if(m>3)
-		{
-		w3 += pD[3+ps*3] * pD[3+ps*2];
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		w3 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*2];
-		w3 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*2];
-		w3 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*2];
-		w3 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*2];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		w3 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*2];
-		}
-	w3 = - dD[2] * w3;
-	pD[2+ps*3] += w3;
-	if(m>3)
-		{
-		pD[3+ps*3] += w3 * pD[3+ps*2];
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		pD[0+ii*sdd+ps*3] += w3 * pD[0+ii*sdd+ps*2];
-		pD[1+ii*sdd+ps*3] += w3 * pD[1+ii*sdd+ps*2];
-		pD[2+ii*sdd+ps*3] += w3 * pD[2+ii*sdd+ps*2];
-		pD[3+ii*sdd+ps*3] += w3 * pD[3+ii*sdd+ps*2];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		pD[ll+ii*sdd+ps*3] += w3 * pD[ll+ii*sdd+ps*2];
-		}
-	if(m==3)
-		return;
-	// fourth column
-	beta = 0.0;
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		tmp = pD[0+ii*sdd+ps*3];
-		beta += tmp*tmp;
-		tmp = pD[1+ii*sdd+ps*3];
-		beta += tmp*tmp;
-		tmp = pD[2+ii*sdd+ps*3];
-		beta += tmp*tmp;
-		tmp = pD[3+ii*sdd+ps*3];
-		beta += tmp*tmp;
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		tmp = pD[ll+ii*sdd+ps*3];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[3] = 0.0;
-		}
-	else
-		{
-		alpha = pD[3+ps*3];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[3] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[3+ps*3] = beta;
-		for(ii=4; ii<m-3; ii+=4)
-			{
-			pD[0+ii*sdd+ps*3] *= tmp;
-			pD[1+ii*sdd+ps*3] *= tmp;
-			pD[2+ii*sdd+ps*3] *= tmp;
-			pD[3+ii*sdd+ps*3] *= tmp;
-			}
-		for(ll=0; ll<m-ii; ll++)
-			{
-			pD[ll+ii*sdd+ps*3] *= tmp;
-			}
-		}
-	return;
-	}
-
-
-// unblocked algorithm
-void kernel_dgeqrf_vs_lib4(int m, int n, int k, int offD, double *pD, int sdd, double *dD)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk, ll, imax, jmax, jmax0, kmax, kmax0;
-	const int ps = 4;
-	imax = k; //m<n ? m : n;
-	double alpha, beta, tmp, w0;
-	double *pC00, *pC10, *pC01, *pC11;
-	int offset;
-	double *pD0 = pD-offD;
-	for(ii=0; ii<imax; ii++)
-		{
-		pC00 = &pD0[((offD+ii)&(ps-1))+((offD+ii)-((offD+ii)&(ps-1)))*sdd+ii*ps];
-		pC10 = &pD0[((offD+ii+1)&(ps-1))+((offD+ii+1)-((offD+ii+1)&(ps-1)))*sdd+ii*ps];
-		beta = 0.0;
-		jmax = m-ii-1;
-		jmax0 = (ps-((ii+1+offD)&(ps-1)))&(ps-1);
-		jmax0 = jmax<jmax0 ? jmax : jmax0;
-		offset = 0;
-		jj = 0;
-		if(jmax0>0)
-			{
-			for( ; jj<jmax0; jj++)
-				{
-				tmp = pC10[0+offset];
-				beta += tmp*tmp;
-				offset += 1;
-				}
-			offset += -ps+ps*sdd;
-			}
-		for( ; jj<jmax-3; jj+=4)
-			{
-			tmp = pC10[0+offset];
-			beta += tmp*tmp;
-			tmp = pC10[1+offset];
-			beta += tmp*tmp;
-			tmp = pC10[2+offset];
-			beta += tmp*tmp;
-			tmp = pC10[3+offset];
-			beta += tmp*tmp;
-			offset += ps*sdd;
-			}
-		for(ll=0; ll<jmax-jj; ll++)
-			{
-			tmp = pC10[0+offset];
-			beta += tmp*tmp;
-			offset += 1;
-			}
-		if(beta==0.0)
-			{
-			dD[ii] = 0.0;
-			}
-		else
-			{
-			alpha = pC00[0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			dD[ii] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			offset = 0;
-			jj = 0;
-			if(jmax0>0)
-				{
-				for( ; jj<jmax0; jj++)
-					{
-					pC10[0+offset] *= tmp;
-					offset += 1;
-					}
-				offset += -ps+ps*sdd;
-				}
-			for( ; jj<jmax-3; jj+=4)
-				{
-				pC10[0+offset] *= tmp;
-				pC10[1+offset] *= tmp;
-				pC10[2+offset] *= tmp;
-				pC10[3+offset] *= tmp;
-				offset += ps*sdd;
-				}
-			for(ll=0; ll<jmax-jj; ll++)
-				{
-				pC10[0+offset] *= tmp;
-				offset += 1;
-				}
-			pC00[0] = beta;
-			}
-		if(ii<n)
-			{
-			pC01 = pC00 + ps;
-			pC11 = pC10 + ps;
-			kmax = jmax;
-			kmax0 = jmax0;
-			jmax = n-ii-1;
-			jj = 0;
-			for( ; jj<jmax; jj++)
-				{
-				w0 = pC01[0+ps*jj] * 1.0;
-				offset = 0;
-				kk = 0;
-				if(kmax0>0)
-					{
-					for( ; kk<kmax0; kk++)
-						{
-						w0 += pC11[0+offset+ps*jj] * pC10[0+offset];
-						offset += 1;
-						}
-					offset += -ps+ps*sdd;
-					}
-				for( ; kk<kmax-3; kk+=4)
-					{
-					w0 += pC11[0+offset+ps*jj] * pC10[0+offset];
-					w0 += pC11[1+offset+ps*jj] * pC10[1+offset];
-					w0 += pC11[2+offset+ps*jj] * pC10[2+offset];
-					w0 += pC11[3+offset+ps*jj] * pC10[3+offset];
-					offset += ps*sdd;
-					}
-				for(ll=0; ll<kmax-kk; ll++)
-					{
-					w0 += pC11[0+offset+ps*jj] * pC10[0+offset];
-					offset += 1;
-					}
-				w0 = - dD[ii] * w0;
-				pC01[0+ps*jj] += w0;
-				offset = 0;
-				kk = 0;
-				if(kmax0>0)
-					{
-					for( ; kk<kmax0; kk++)
-						{
-						pC11[0+offset+ps*jj] += w0 * pC10[0+offset];
-						offset += 1;
-						}
-					offset = offset-ps+ps*sdd;
-					}
-				for( ; kk<kmax-3; kk+=4)
-					{
-					pC11[0+offset+ps*jj] += w0 * pC10[0+offset];
-					pC11[1+offset+ps*jj] += w0 * pC10[1+offset];
-					pC11[2+offset+ps*jj] += w0 * pC10[2+offset];
-					pC11[3+offset+ps*jj] += w0 * pC10[3+offset];
-					offset += ps*sdd;
-					}
-				for(ll=0; ll<kmax-kk; ll++)
-					{
-					pC11[0+offset+ps*jj] += w0 * pC10[0+offset];
-					offset += 1;
-					}
-				}
-			}
-		}
-	return;
-	}
-
-
-
-void kernel_dlarf_4_lib4(int m, int n, double *pD, int sdd, double *dD, double *pC0, int sdc)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, ll;
-	const int ps = 4;
-	double v10,
-	       v20, v21,
-		   v30, v31, v32;
-	double tmp, d0, d1, d2, d3;
-	double *pC;
-	double pT[16];// = {};
-	int ldt = 4;
-	double pW[8];// = {};
-	int ldw = 2;
-	// dot product of v
-	v10 = 0.0;
-	v20 = 0.0;
-	v30 = 0.0;
-	v21 = 0.0;
-	v31 = 0.0;
-	v32 = 0.0;
-	if(m>1)
-		{
-		v10 = 1.0 * pD[1+ps*0];
-		if(m>2)
-			{
-			v10 += pD[2+ps*1] * pD[2+ps*0];
-			v20 = 1.0 * pD[2+ps*0];
-			v21 = 1.0 * pD[2+ps*1];
-			if(m>3)
-				{
-				v10 += pD[3+ps*1] * pD[3+ps*0];
-				v20 += pD[3+ps*2] * pD[3+ps*0];
-				v21 += pD[3+ps*2] * pD[3+ps*1];
-				v30 = 1.0 * pD[3+ps*0];
-				v31 = 1.0 * pD[3+ps*1];
-				v32 = 1.0 * pD[3+ps*2];
-				}
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		v10 += pD[0+ii*sdd+ps*1] * pD[0+ii*sdd+ps*0];
-		v20 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*0];
-		v21 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*1];
-		v30 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*0];
-		v31 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*1];
-		v32 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*2];
-		v10 += pD[1+ii*sdd+ps*1] * pD[1+ii*sdd+ps*0];
-		v20 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*0];
-		v21 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*1];
-		v30 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*0];
-		v31 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*1];
-		v32 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*2];
-		v10 += pD[2+ii*sdd+ps*1] * pD[2+ii*sdd+ps*0];
-		v20 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*0];
-		v21 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*1];
-		v30 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*0];
-		v31 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*1];
-		v32 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*2];
-		v10 += pD[3+ii*sdd+ps*1] * pD[3+ii*sdd+ps*0];
-		v20 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*0];
-		v21 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*1];
-		v30 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*0];
-		v31 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*1];
-		v32 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*2];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		v10 += pD[ll+ii*sdd+ps*1] * pD[ll+ii*sdd+ps*0];
-		v20 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*0];
-		v21 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*1];
-		v30 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*0];
-		v31 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*1];
-		v32 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*2];
-		}
-	// compute lower triangular T containing tau for matrix update
-	pT[0+ldt*0] = dD[0];
-	pT[1+ldt*1] = dD[1];
-	pT[2+ldt*2] = dD[2];
-	pT[3+ldt*3] = dD[3];
-	pT[1+ldt*0] = - dD[1] * (v10*pT[0+ldt*0]);
-	pT[2+ldt*1] = - dD[2] * (v21*pT[1+ldt*1]);
-	pT[3+ldt*2] = - dD[3] * (v32*pT[2+ldt*2]);
-	pT[2+ldt*0] = - dD[2] * (v20*pT[0+ldt*0] + v21*pT[1+ldt*0]);
-	pT[3+ldt*1] = - dD[3] * (v31*pT[1+ldt*1] + v32*pT[2+ldt*1]);
-	pT[3+ldt*0] = - dD[3] * (v30*pT[0+ldt*0] + v31*pT[1+ldt*0] + v32*pT[2+ldt*0]);
-	// downgrade matrix
-	pW[0] = 0.0;
-	pW[1] = 0.0;
-	pW[2] = 0.0;
-	pW[3] = 0.0;
-	pW[4] = 0.0;
-	pW[5] = 0.0;
-	pW[6] = 0.0;
-	pW[7] = 0.0;
-	ii = 0;
-	for( ; ii<n-1; ii+=2)
-		{
-		pC = pC0+ii*ps;
-		// compute W^T = C^T * V
-		tmp = pC[0+ps*0];
-		pW[0+ldw*0] = tmp;
-		tmp = pC[0+ps*1];
-		pW[1+ldw*0] = tmp;
-		if(m>1)
-			{
-			d0 = pD[1+ps*0];
-			tmp = pC[1+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] = tmp;
-			tmp = pC[1+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] = tmp;
-			if(m>2)
-				{
-				d0 = pD[2+ps*0];
-				d1 = pD[2+ps*1];
-				tmp = pC[2+ps*0];
-				pW[0+ldw*0] += tmp * d0;
-				pW[0+ldw*1] += tmp * d1;
-				pW[0+ldw*2] = tmp;
-				tmp = pC[2+ps*1];
-				pW[1+ldw*0] += tmp * d0;
-				pW[1+ldw*1] += tmp * d1;
-				pW[1+ldw*2] = tmp;
-				if(m>3)
-					{
-					d0 = pD[3+ps*0];
-					d1 = pD[3+ps*1];
-					d2 = pD[3+ps*2];
-					tmp = pC[3+ps*0];
-					pW[0+ldw*0] += tmp * d0;
-					pW[0+ldw*1] += tmp * d1;
-					pW[0+ldw*2] += tmp * d2;
-					pW[0+ldw*3] = tmp;
-					tmp = pC[3+ps*1];
-					pW[1+ldw*0] += tmp * d0;
-					pW[1+ldw*1] += tmp * d1;
-					pW[1+ldw*2] += tmp * d2;
-					pW[1+ldw*3] = tmp;
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			//
-			d0 = pD[0+jj*sdd+ps*0];
-			d1 = pD[0+jj*sdd+ps*1];
-			d2 = pD[0+jj*sdd+ps*2];
-			d3 = pD[0+jj*sdd+ps*3];
-			tmp = pC[0+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] += tmp * d1;
-			pW[0+ldw*2] += tmp * d2;
-			pW[0+ldw*3] += tmp * d3;
-			tmp = pC[0+jj*sdc+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] += tmp * d1;
-			pW[1+ldw*2] += tmp * d2;
-			pW[1+ldw*3] += tmp * d3;
-			//
-			d0 = pD[1+jj*sdd+ps*0];
-			d1 = pD[1+jj*sdd+ps*1];
-			d2 = pD[1+jj*sdd+ps*2];
-			d3 = pD[1+jj*sdd+ps*3];
-			tmp = pC[1+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] += tmp * d1;
-			pW[0+ldw*2] += tmp * d2;
-			pW[0+ldw*3] += tmp * d3;
-			tmp = pC[1+jj*sdc+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] += tmp * d1;
-			pW[1+ldw*2] += tmp * d2;
-			pW[1+ldw*3] += tmp * d3;
-			//
-			d0 = pD[2+jj*sdd+ps*0];
-			d1 = pD[2+jj*sdd+ps*1];
-			d2 = pD[2+jj*sdd+ps*2];
-			d3 = pD[2+jj*sdd+ps*3];
-			tmp = pC[2+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] += tmp * d1;
-			pW[0+ldw*2] += tmp * d2;
-			pW[0+ldw*3] += tmp * d3;
-			tmp = pC[2+jj*sdc+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] += tmp * d1;
-			pW[1+ldw*2] += tmp * d2;
-			pW[1+ldw*3] += tmp * d3;
-			//
-			d0 = pD[3+jj*sdd+ps*0];
-			d1 = pD[3+jj*sdd+ps*1];
-			d2 = pD[3+jj*sdd+ps*2];
-			d3 = pD[3+jj*sdd+ps*3];
-			tmp = pC[3+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] += tmp * d1;
-			pW[0+ldw*2] += tmp * d2;
-			pW[0+ldw*3] += tmp * d3;
-			tmp = pC[3+jj*sdc+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] += tmp * d1;
-			pW[1+ldw*2] += tmp * d2;
-			pW[1+ldw*3] += tmp * d3;
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			d0 = pD[ll+jj*sdd+ps*0];
-			d1 = pD[ll+jj*sdd+ps*1];
-			d2 = pD[ll+jj*sdd+ps*2];
-			d3 = pD[ll+jj*sdd+ps*3];
-			tmp = pC[ll+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * d0;
-			pW[0+ldw*1] += tmp * d1;
-			pW[0+ldw*2] += tmp * d2;
-			pW[0+ldw*3] += tmp * d3;
-			tmp = pC[ll+jj*sdc+ps*1];
-			pW[1+ldw*0] += tmp * d0;
-			pW[1+ldw*1] += tmp * d1;
-			pW[1+ldw*2] += tmp * d2;
-			pW[1+ldw*3] += tmp * d3;
-			}
-		// compute W^T *= T
-		pW[0+ldw*3] = pT[3+ldt*0]*pW[0+ldw*0] + pT[3+ldt*1]*pW[0+ldw*1] + pT[3+ldt*2]*pW[0+ldw*2] + pT[3+ldt*3]*pW[0+ldw*3];
-		pW[1+ldw*3] = pT[3+ldt*0]*pW[1+ldw*0] + pT[3+ldt*1]*pW[1+ldw*1] + pT[3+ldt*2]*pW[1+ldw*2] + pT[3+ldt*3]*pW[1+ldw*3];
-		pW[0+ldw*2] = pT[2+ldt*0]*pW[0+ldw*0] + pT[2+ldt*1]*pW[0+ldw*1] + pT[2+ldt*2]*pW[0+ldw*2];
-		pW[1+ldw*2] = pT[2+ldt*0]*pW[1+ldw*0] + pT[2+ldt*1]*pW[1+ldw*1] + pT[2+ldt*2]*pW[1+ldw*2];
-		pW[0+ldw*1] = pT[1+ldt*0]*pW[0+ldw*0] + pT[1+ldt*1]*pW[0+ldw*1];
-		pW[1+ldw*1] = pT[1+ldt*0]*pW[1+ldw*0] + pT[1+ldt*1]*pW[1+ldw*1];
-		pW[0+ldw*0] = pT[0+ldt*0]*pW[0+ldw*0];
-		pW[1+ldw*0] = pT[0+ldt*0]*pW[1+ldw*0];
-		// compute C -= V * W^T
-		pC[0+ps*0] -= pW[0+ldw*0];
-		pC[0+ps*1] -= pW[1+ldw*0];
-		if(m>1)
-			{
-			pC[1+ps*0] -= pD[1+ps*0]*pW[0+ldw*0] + pW[0+ldw*1];
-			pC[1+ps*1] -= pD[1+ps*0]*pW[1+ldw*0] + pW[1+ldw*1];
-			if(m>2)
-				{
-				pC[2+ps*0] -= pD[2+ps*0]*pW[0+ldw*0] + pD[2+ps*1]*pW[0+ldw*1] + pW[0+ldw*2];
-				pC[2+ps*1] -= pD[2+ps*0]*pW[1+ldw*0] + pD[2+ps*1]*pW[1+ldw*1] + pW[1+ldw*2];
-				if(m>3)
-					{
-					pC[3+ps*0] -= pD[3+ps*0]*pW[0+ldw*0] + pD[3+ps*1]*pW[0+ldw*1] + pD[3+ps*2]*pW[0+ldw*2] + pW[0+ldw*3];
-					pC[3+ps*1] -= pD[3+ps*0]*pW[1+ldw*0] + pD[3+ps*1]*pW[1+ldw*1] + pD[3+ps*2]*pW[1+ldw*2] + pW[1+ldw*3];
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			//
-			d0 = pD[0+jj*sdd+ps*0];
-			d1 = pD[0+jj*sdd+ps*1];
-			d2 = pD[0+jj*sdd+ps*2];
-			d3 = pD[0+jj*sdd+ps*3];
-			pC[0+jj*sdc+ps*0] -= d0*pW[0+ldw*0] + d1*pW[0+ldw*1] + d2*pW[0+ldw*2] + d3*pW[0+ldw*3];
-			pC[0+jj*sdc+ps*1] -= d0*pW[1+ldw*0] + d1*pW[1+ldw*1] + d2*pW[1+ldw*2] + d3*pW[1+ldw*3];
-			//
-			d0 = pD[1+jj*sdd+ps*0];
-			d1 = pD[1+jj*sdd+ps*1];
-			d2 = pD[1+jj*sdd+ps*2];
-			d3 = pD[1+jj*sdd+ps*3];
-			pC[1+jj*sdc+ps*0] -= d0*pW[0+ldw*0] + d1*pW[0+ldw*1] + d2*pW[0+ldw*2] + d3*pW[0+ldw*3];
-			pC[1+jj*sdc+ps*1] -= d0*pW[1+ldw*0] + d1*pW[1+ldw*1] + d2*pW[1+ldw*2] + d3*pW[1+ldw*3];
-			//
-			d0 = pD[2+jj*sdd+ps*0];
-			d1 = pD[2+jj*sdd+ps*1];
-			d2 = pD[2+jj*sdd+ps*2];
-			d3 = pD[2+jj*sdd+ps*3];
-			pC[2+jj*sdc+ps*0] -= d0*pW[0+ldw*0] + d1*pW[0+ldw*1] + d2*pW[0+ldw*2] + d3*pW[0+ldw*3];
-			pC[2+jj*sdc+ps*1] -= d0*pW[1+ldw*0] + d1*pW[1+ldw*1] + d2*pW[1+ldw*2] + d3*pW[1+ldw*3];
-			//
-			d0 = pD[3+jj*sdd+ps*0];
-			d1 = pD[3+jj*sdd+ps*1];
-			d2 = pD[3+jj*sdd+ps*2];
-			d3 = pD[3+jj*sdd+ps*3];
-			pC[3+jj*sdc+ps*0] -= d0*pW[0+ldw*0] + d1*pW[0+ldw*1] + d2*pW[0+ldw*2] + d3*pW[0+ldw*3];
-			pC[3+jj*sdc+ps*1] -= d0*pW[1+ldw*0] + d1*pW[1+ldw*1] + d2*pW[1+ldw*2] + d3*pW[1+ldw*3];
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			d0 = pD[ll+jj*sdd+ps*0];
-			d1 = pD[ll+jj*sdd+ps*1];
-			d2 = pD[ll+jj*sdd+ps*2];
-			d3 = pD[ll+jj*sdd+ps*3];
-			pC[ll+jj*sdc+ps*0] -= d0*pW[0+ldw*0] + d1*pW[0+ldw*1] + d2*pW[0+ldw*2] + d3*pW[0+ldw*3];
-			pC[ll+jj*sdc+ps*1] -= d0*pW[1+ldw*0] + d1*pW[1+ldw*1] + d2*pW[1+ldw*2] + d3*pW[1+ldw*3];
-			}
-		}
-	for( ; ii<n; ii++)
-		{
-		pC = pC0+ii*ps;
-		// compute W^T = C^T * V
-		tmp = pC[0+ps*0];
-		pW[0+ldw*0] = tmp;
-		if(m>1)
-			{
-			tmp = pC[1+ps*0];
-			pW[0+ldw*0] += tmp * pD[1+ps*0];
-			pW[0+ldw*1] = tmp;
-			if(m>2)
-				{
-				tmp = pC[2+ps*0];
-				pW[0+ldw*0] += tmp * pD[2+ps*0];
-				pW[0+ldw*1] += tmp * pD[2+ps*1];
-				pW[0+ldw*2] = tmp;
-				if(m>3)
-					{
-					tmp = pC[3+ps*0];
-					pW[0+ldw*0] += tmp * pD[3+ps*0];
-					pW[0+ldw*1] += tmp * pD[3+ps*1];
-					pW[0+ldw*2] += tmp * pD[3+ps*2];
-					pW[0+ldw*3] = tmp;
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			tmp = pC[0+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * pD[0+jj*sdd+ps*0];
-			pW[0+ldw*1] += tmp * pD[0+jj*sdd+ps*1];
-			pW[0+ldw*2] += tmp * pD[0+jj*sdd+ps*2];
-			pW[0+ldw*3] += tmp * pD[0+jj*sdd+ps*3];
-			tmp = pC[1+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * pD[1+jj*sdd+ps*0];
-			pW[0+ldw*1] += tmp * pD[1+jj*sdd+ps*1];
-			pW[0+ldw*2] += tmp * pD[1+jj*sdd+ps*2];
-			pW[0+ldw*3] += tmp * pD[1+jj*sdd+ps*3];
-			tmp = pC[2+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * pD[2+jj*sdd+ps*0];
-			pW[0+ldw*1] += tmp * pD[2+jj*sdd+ps*1];
-			pW[0+ldw*2] += tmp * pD[2+jj*sdd+ps*2];
-			pW[0+ldw*3] += tmp * pD[2+jj*sdd+ps*3];
-			tmp = pC[3+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * pD[3+jj*sdd+ps*0];
-			pW[0+ldw*1] += tmp * pD[3+jj*sdd+ps*1];
-			pW[0+ldw*2] += tmp * pD[3+jj*sdd+ps*2];
-			pW[0+ldw*3] += tmp * pD[3+jj*sdd+ps*3];
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			tmp = pC[ll+jj*sdc+ps*0];
-			pW[0+ldw*0] += tmp * pD[ll+jj*sdd+ps*0];
-			pW[0+ldw*1] += tmp * pD[ll+jj*sdd+ps*1];
-			pW[0+ldw*2] += tmp * pD[ll+jj*sdd+ps*2];
-			pW[0+ldw*3] += tmp * pD[ll+jj*sdd+ps*3];
-			}
-		// compute W^T *= T
-		pW[0+ldw*3] = pT[3+ldt*0]*pW[0+ldw*0] + pT[3+ldt*1]*pW[0+ldw*1] + pT[3+ldt*2]*pW[0+ldw*2] + pT[3+ldt*3]*pW[0+ldw*3];
-		pW[0+ldw*2] = pT[2+ldt*0]*pW[0+ldw*0] + pT[2+ldt*1]*pW[0+ldw*1] + pT[2+ldt*2]*pW[0+ldw*2];
-		pW[0+ldw*1] = pT[1+ldt*0]*pW[0+ldw*0] + pT[1+ldt*1]*pW[0+ldw*1];
-		pW[0+ldw*0] = pT[0+ldt*0]*pW[0+ldw*0];
-		// compute C -= V * W^T
-		pC[0+ps*0] -= pW[0+ldw*0];
-		if(m>1)
-			{
-			pC[1+ps*0] -= pD[1+ps*0]*pW[0+ldw*0] + pW[0+ldw*1];
-			if(m>2)
-				{
-				pC[2+ps*0] -= pD[2+ps*0]*pW[0+ldw*0] + pD[2+ps*1]*pW[0+ldw*1] + pW[0+ldw*2];
-				if(m>3)
-					{
-					pC[3+ps*0] -= pD[3+ps*0]*pW[0+ldw*0] + pD[3+ps*1]*pW[0+ldw*1] + pD[3+ps*2]*pW[0+ldw*2] + pW[0+ldw*3];
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			pC[0+jj*sdc+ps*0] -= pD[0+jj*sdd+ps*0]*pW[0+ldw*0] + pD[0+jj*sdd+ps*1]*pW[0+ldw*1] + pD[0+jj*sdd+ps*2]*pW[0+ldw*2] + pD[0+jj*sdd+ps*3]*pW[0+ldw*3];
-			pC[1+jj*sdc+ps*0] -= pD[1+jj*sdd+ps*0]*pW[0+ldw*0] + pD[1+jj*sdd+ps*1]*pW[0+ldw*1] + pD[1+jj*sdd+ps*2]*pW[0+ldw*2] + pD[1+jj*sdd+ps*3]*pW[0+ldw*3];
-			pC[2+jj*sdc+ps*0] -= pD[2+jj*sdd+ps*0]*pW[0+ldw*0] + pD[2+jj*sdd+ps*1]*pW[0+ldw*1] + pD[2+jj*sdd+ps*2]*pW[0+ldw*2] + pD[2+jj*sdd+ps*3]*pW[0+ldw*3];
-			pC[3+jj*sdc+ps*0] -= pD[3+jj*sdd+ps*0]*pW[0+ldw*0] + pD[3+jj*sdd+ps*1]*pW[0+ldw*1] + pD[3+jj*sdd+ps*2]*pW[0+ldw*2] + pD[3+jj*sdd+ps*3]*pW[0+ldw*3];
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			pC[ll+jj*sdc+ps*0] -= pD[ll+jj*sdd+ps*0]*pW[0+ldw*0] + pD[ll+jj*sdd+ps*1]*pW[0+ldw*1] + pD[ll+jj*sdd+ps*2]*pW[0+ldw*2] + pD[ll+jj*sdd+ps*3]*pW[0+ldw*3];
-			}
-		}
-
-	return;
-	}
-
-
-
-void kernel_dlarf_t_4_lib4(int m, int n, double *pD, int sdd, double *pVt, double *dD, double *pC0, int sdc)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, ll;
-	const int ps = 4;
-	double v10,
-	       v20, v21,
-		   v30, v31, v32;
-	double c00, c01,
-	       c10, c11,
-	       c20, c21,
-	       c30, c31;
-	double a0, a1, a2, a3, b0, b1;
-	double tmp, d0, d1, d2, d3;
-	double *pC;
-	double pT[16];// = {};
-	int ldt = 4;
-	double pW[8];// = {};
-	int ldw = 4;
-	// dot product of v
-	v10 = 0.0;
-	v20 = 0.0;
-	v30 = 0.0;
-	v21 = 0.0;
-	v31 = 0.0;
-	v32 = 0.0;
-	if(m>1)
-		{
-		v10 = 1.0 * pD[1+ps*0];
-		if(m>2)
-			{
-			v10 += pD[2+ps*1] * pD[2+ps*0];
-			v20 = 1.0 * pD[2+ps*0];
-			v21 = 1.0 * pD[2+ps*1];
-			if(m>3)
-				{
-				v10 += pD[3+ps*1] * pD[3+ps*0];
-				v20 += pD[3+ps*2] * pD[3+ps*0];
-				v21 += pD[3+ps*2] * pD[3+ps*1];
-				v30 = 1.0 * pD[3+ps*0];
-				v31 = 1.0 * pD[3+ps*1];
-				v32 = 1.0 * pD[3+ps*2];
-				}
-			}
-		}
-	for(ii=4; ii<m-3; ii+=4)
-		{
-		v10 += pD[0+ii*sdd+ps*1] * pD[0+ii*sdd+ps*0];
-		v20 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*0];
-		v21 += pD[0+ii*sdd+ps*2] * pD[0+ii*sdd+ps*1];
-		v30 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*0];
-		v31 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*1];
-		v32 += pD[0+ii*sdd+ps*3] * pD[0+ii*sdd+ps*2];
-		v10 += pD[1+ii*sdd+ps*1] * pD[1+ii*sdd+ps*0];
-		v20 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*0];
-		v21 += pD[1+ii*sdd+ps*2] * pD[1+ii*sdd+ps*1];
-		v30 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*0];
-		v31 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*1];
-		v32 += pD[1+ii*sdd+ps*3] * pD[1+ii*sdd+ps*2];
-		v10 += pD[2+ii*sdd+ps*1] * pD[2+ii*sdd+ps*0];
-		v20 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*0];
-		v21 += pD[2+ii*sdd+ps*2] * pD[2+ii*sdd+ps*1];
-		v30 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*0];
-		v31 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*1];
-		v32 += pD[2+ii*sdd+ps*3] * pD[2+ii*sdd+ps*2];
-		v10 += pD[3+ii*sdd+ps*1] * pD[3+ii*sdd+ps*0];
-		v20 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*0];
-		v21 += pD[3+ii*sdd+ps*2] * pD[3+ii*sdd+ps*1];
-		v30 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*0];
-		v31 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*1];
-		v32 += pD[3+ii*sdd+ps*3] * pD[3+ii*sdd+ps*2];
-		}
-	for(ll=0; ll<m-ii; ll++)
-		{
-		v10 += pD[ll+ii*sdd+ps*1] * pD[ll+ii*sdd+ps*0];
-		v20 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*0];
-		v21 += pD[ll+ii*sdd+ps*2] * pD[ll+ii*sdd+ps*1];
-		v30 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*0];
-		v31 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*1];
-		v32 += pD[ll+ii*sdd+ps*3] * pD[ll+ii*sdd+ps*2];
-		}
-	// compute lower triangular T containing tau for matrix update
-	pT[0+ldt*0] = dD[0];
-	pT[1+ldt*1] = dD[1];
-	pT[2+ldt*2] = dD[2];
-	pT[3+ldt*3] = dD[3];
-	pT[1+ldt*0] = - dD[1] * (v10*pT[0+ldt*0]);
-	pT[2+ldt*1] = - dD[2] * (v21*pT[1+ldt*1]);
-	pT[3+ldt*2] = - dD[3] * (v32*pT[2+ldt*2]);
-	pT[2+ldt*0] = - dD[2] * (v20*pT[0+ldt*0] + v21*pT[1+ldt*0]);
-	pT[3+ldt*1] = - dD[3] * (v31*pT[1+ldt*1] + v32*pT[2+ldt*1]);
-	pT[3+ldt*0] = - dD[3] * (v30*pT[0+ldt*0] + v31*pT[1+ldt*0] + v32*pT[2+ldt*0]);
-	// downgrade matrix
-	pW[0] = 0.0;
-	pW[1] = 0.0;
-	pW[2] = 0.0;
-	pW[3] = 0.0;
-	pW[4] = 0.0;
-	pW[5] = 0.0;
-	pW[6] = 0.0;
-	pW[7] = 0.0;
-	ii = 0;
-	for( ; ii<n-1; ii+=2)
-		{
-		pC = pC0+ii*ps;
-		// compute W^T = C^T * V
-		tmp = pC[0+ps*0];
-		pW[0+ldw*0] = tmp;
-		tmp = pC[0+ps*1];
-		pW[0+ldw*1] = tmp;
-		if(m>1)
-			{
-			d0 = pVt[0+ps*1];
-			tmp = pC[1+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] = tmp;
-			tmp = pC[1+ps*1];
-			pW[0+ldw*1] += d0 * tmp;
-			pW[1+ldw*1] = tmp;
-			if(m>2)
-				{
-				d0 = pVt[0+ps*2];
-				d1 = pVt[1+ps*2];
-				tmp = pC[2+ps*0];
-				pW[0+ldw*0] += d0 * tmp;
-				pW[1+ldw*0] += d1 * tmp;
-				pW[2+ldw*0] = tmp;
-				tmp = pC[2+ps*1];
-				pW[0+ldw*1] += d0 * tmp;
-				pW[1+ldw*1] += d1 * tmp;
-				pW[2+ldw*1] = tmp;
-				if(m>3)
-					{
-					d0 = pVt[0+ps*3];
-					d1 = pVt[1+ps*3];
-					d2 = pVt[2+ps*3];
-					tmp = pC[3+ps*0];
-					pW[0+ldw*0] += d0 * tmp;
-					pW[1+ldw*0] += d1 * tmp;
-					pW[2+ldw*0] += d2 * tmp;
-					pW[3+ldw*0] = tmp;
-					tmp = pC[3+ps*1];
-					pW[0+ldw*1] += d0 * tmp;
-					pW[1+ldw*1] += d1 * tmp;
-					pW[2+ldw*1] += d2 * tmp;
-					pW[3+ldw*1] = tmp;
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			//
-			d0 = pVt[0+ps*(0+jj)];
-			d1 = pVt[1+ps*(0+jj)];
-			d2 = pVt[2+ps*(0+jj)];
-			d3 = pVt[3+ps*(0+jj)];
-			tmp = pC[0+jj*sdc+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] += d1 * tmp;
-			pW[2+ldw*0] += d2 * tmp;
-			pW[3+ldw*0] += d3 * tmp;
-			tmp = pC[0+jj*sdc+ps*1];
-			pW[0+ldw*1] += d0 * tmp;
-			pW[1+ldw*1] += d1 * tmp;
-			pW[2+ldw*1] += d2 * tmp;
-			pW[3+ldw*1] += d3 * tmp;
-			//
-			d0 = pVt[0+ps*(1+jj)];
-			d1 = pVt[1+ps*(1+jj)];
-			d2 = pVt[2+ps*(1+jj)];
-			d3 = pVt[3+ps*(1+jj)];
-			tmp = pC[1+jj*sdc+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] += d1 * tmp;
-			pW[2+ldw*0] += d2 * tmp;
-			pW[3+ldw*0] += d3 * tmp;
-			tmp = pC[1+jj*sdc+ps*1];
-			pW[0+ldw*1] += d0 * tmp;
-			pW[1+ldw*1] += d1 * tmp;
-			pW[2+ldw*1] += d2 * tmp;
-			pW[3+ldw*1] += d3 * tmp;
-			//
-			d0 = pVt[0+ps*(2+jj)];
-			d1 = pVt[1+ps*(2+jj)];
-			d2 = pVt[2+ps*(2+jj)];
-			d3 = pVt[3+ps*(2+jj)];
-			tmp = pC[2+jj*sdc+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] += d1 * tmp;
-			pW[2+ldw*0] += d2 * tmp;
-			pW[3+ldw*0] += d3 * tmp;
-			tmp = pC[2+jj*sdc+ps*1];
-			pW[0+ldw*1] += d0 * tmp;
-			pW[1+ldw*1] += d1 * tmp;
-			pW[2+ldw*1] += d2 * tmp;
-			pW[3+ldw*1] += d3 * tmp;
-			//
-			d0 = pVt[0+ps*(3+jj)];
-			d1 = pVt[1+ps*(3+jj)];
-			d2 = pVt[2+ps*(3+jj)];
-			d3 = pVt[3+ps*(3+jj)];
-			tmp = pC[3+jj*sdc+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] += d1 * tmp;
-			pW[2+ldw*0] += d2 * tmp;
-			pW[3+ldw*0] += d3 * tmp;
-			tmp = pC[3+jj*sdc+ps*1];
-			pW[0+ldw*1] += d0 * tmp;
-			pW[1+ldw*1] += d1 * tmp;
-			pW[2+ldw*1] += d2 * tmp;
-			pW[3+ldw*1] += d3 * tmp;
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			d0 = pVt[0+ps*(ll+jj)];
-			d1 = pVt[1+ps*(ll+jj)];
-			d2 = pVt[2+ps*(ll+jj)];
-			d3 = pVt[3+ps*(ll+jj)];
-			tmp = pC[ll+jj*sdc+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] += d1 * tmp;
-			pW[2+ldw*0] += d2 * tmp;
-			pW[3+ldw*0] += d3 * tmp;
-			tmp = pC[ll+jj*sdc+ps*1];
-			pW[0+ldw*1] += d0 * tmp;
-			pW[1+ldw*1] += d1 * tmp;
-			pW[2+ldw*1] += d2 * tmp;
-			pW[3+ldw*1] += d3 * tmp;
-			}
-		// compute W^T *= T
-		pW[3+ldw*0] = pT[3+ldt*0]*pW[0+ldw*0] + pT[3+ldt*1]*pW[1+ldw*0] + pT[3+ldt*2]*pW[2+ldw*0] + pT[3+ldt*3]*pW[3+ldw*0];
-		pW[3+ldw*1] = pT[3+ldt*0]*pW[0+ldw*1] + pT[3+ldt*1]*pW[1+ldw*1] + pT[3+ldt*2]*pW[2+ldw*1] + pT[3+ldt*3]*pW[3+ldw*1];
-		pW[2+ldw*0] = pT[2+ldt*0]*pW[0+ldw*0] + pT[2+ldt*1]*pW[1+ldw*0] + pT[2+ldt*2]*pW[2+ldw*0];
-		pW[2+ldw*1] = pT[2+ldt*0]*pW[0+ldw*1] + pT[2+ldt*1]*pW[1+ldw*1] + pT[2+ldt*2]*pW[2+ldw*1];
-		pW[1+ldw*0] = pT[1+ldt*0]*pW[0+ldw*0] + pT[1+ldt*1]*pW[1+ldw*0];
-		pW[1+ldw*1] = pT[1+ldt*0]*pW[0+ldw*1] + pT[1+ldt*1]*pW[1+ldw*1];
-		pW[0+ldw*0] = pT[0+ldt*0]*pW[0+ldw*0];
-		pW[0+ldw*1] = pT[0+ldt*0]*pW[0+ldw*1];
-		// compute C -= V * W^T
-		jj = 0;
-		// load
-		c00 = pC[0+jj*sdc+ps*0];
-		c10 = pC[1+jj*sdc+ps*0];
-		c20 = pC[2+jj*sdc+ps*0];
-		c30 = pC[3+jj*sdc+ps*0];
-		c01 = pC[0+jj*sdc+ps*1];
-		c11 = pC[1+jj*sdc+ps*1];
-		c21 = pC[2+jj*sdc+ps*1];
-		c31 = pC[3+jj*sdc+ps*1];
-		// rank1
-		a1 = pD[1+jj*sdd+ps*0];
-		a2 = pD[2+jj*sdd+ps*0];
-		a3 = pD[3+jj*sdd+ps*0];
-		b0 = pW[0+ldw*0];
-		c00 -= b0;
-		c10 -= a1*b0;
-		c20 -= a2*b0;
-		c30 -= a3*b0;
-		b1 = pW[0+ldw*1];
-		c01 -= b1;
-		c11 -= a1*b1;
-		c21 -= a2*b1;
-		c31 -= a3*b1;
-		// rank2
-		a2 = pD[2+jj*sdd+ps*1];
-		a3 = pD[3+jj*sdd+ps*1];
-		b0 = pW[1+ldw*0];
-		c10 -= b0;
-		c20 -= a2*b0;
-		c30 -= a3*b0;
-		b1 = pW[1+ldw*1];
-		c11 -= b1;
-		c21 -= a2*b1;
-		c31 -= a3*b1;
-		// rank3
-		a3 = pD[3+jj*sdd+ps*2];
-		b0 = pW[2+ldw*0];
-		c20 -= b0;
-		c30 -= a3*b0;
-		b1 = pW[2+ldw*1];
-		c21 -= b1;
-		c31 -= a3*b1;
-		// rank4
-		a3 = pD[3+jj*sdd+ps*3];
-		b0 = pW[3+ldw*0];
-		c30 -= b0;
-		b1 = pW[3+ldw*1];
-		c31 -= b1;
-		// store
-		pC[0+jj*sdc+ps*0] = c00;
-		pC[0+jj*sdc+ps*1] = c01;
-		if(m>1)
-			{
-			pC[1+jj*sdc+ps*0] = c10;
-			pC[1+jj*sdc+ps*1] = c11;
-			if(m>2)
-				{
-				pC[2+jj*sdc+ps*0] = c20;
-				pC[2+jj*sdc+ps*1] = c21;
-				if(m>3)
-					{
-					pC[3+jj*sdc+ps*0] = c30;
-					pC[3+jj*sdc+ps*1] = c31;
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			// load
-			c00 = pC[0+jj*sdc+ps*0];
-			c10 = pC[1+jj*sdc+ps*0];
-			c20 = pC[2+jj*sdc+ps*0];
-			c30 = pC[3+jj*sdc+ps*0];
-			c01 = pC[0+jj*sdc+ps*1];
-			c11 = pC[1+jj*sdc+ps*1];
-			c21 = pC[2+jj*sdc+ps*1];
-			c31 = pC[3+jj*sdc+ps*1];
-			//
-			a0 = pD[0+jj*sdd+ps*0];
-			a1 = pD[1+jj*sdd+ps*0];
-			a2 = pD[2+jj*sdd+ps*0];
-			a3 = pD[3+jj*sdd+ps*0];
-			b0 = pW[0+ldw*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			b1 = pW[0+ldw*1];
-			c01 -= a0*b1;
-			c11 -= a1*b1;
-			c21 -= a2*b1;
-			c31 -= a3*b1;
-			//
-			a0 = pD[0+jj*sdd+ps*1];
-			a1 = pD[1+jj*sdd+ps*1];
-			a2 = pD[2+jj*sdd+ps*1];
-			a3 = pD[3+jj*sdd+ps*1];
-			b0 = pW[1+ldw*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			b1 = pW[1+ldw*1];
-			c01 -= a0*b1;
-			c11 -= a1*b1;
-			c21 -= a2*b1;
-			c31 -= a3*b1;
-			//
-			a0 = pD[0+jj*sdd+ps*2];
-			a1 = pD[1+jj*sdd+ps*2];
-			a2 = pD[2+jj*sdd+ps*2];
-			a3 = pD[3+jj*sdd+ps*2];
-			b0 = pW[2+ldw*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			b1 = pW[2+ldw*1];
-			c01 -= a0*b1;
-			c11 -= a1*b1;
-			c21 -= a2*b1;
-			c31 -= a3*b1;
-			//
-			a0 = pD[0+jj*sdd+ps*3];
-			a1 = pD[1+jj*sdd+ps*3];
-			a2 = pD[2+jj*sdd+ps*3];
-			a3 = pD[3+jj*sdd+ps*3];
-			b0 = pW[3+ldw*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			b1 = pW[3+ldw*1];
-			c01 -= a0*b1;
-			c11 -= a1*b1;
-			c21 -= a2*b1;
-			c31 -= a3*b1;
-			// store
-			pC[0+jj*sdc+ps*0] = c00;
-			pC[1+jj*sdc+ps*0] = c10;
-			pC[2+jj*sdc+ps*0] = c20;
-			pC[3+jj*sdc+ps*0] = c30;
-			pC[0+jj*sdc+ps*1] = c01;
-			pC[1+jj*sdc+ps*1] = c11;
-			pC[2+jj*sdc+ps*1] = c21;
-			pC[3+jj*sdc+ps*1] = c31;
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			// load
-			c00 = pC[ll+jj*sdc+ps*0];
-			c01 = pC[ll+jj*sdc+ps*1];
-			//
-			a0 = pD[ll+jj*sdd+ps*0];
-			b0 = pW[0+ldw*0];
-			c00 -= a0*b0;
-			b1 = pW[0+ldw*1];
-			c01 -= a0*b1;
-			//
-			a0 = pD[ll+jj*sdd+ps*1];
-			b0 = pW[1+ldw*0];
-			c00 -= a0*b0;
-			b1 = pW[1+ldw*1];
-			c01 -= a0*b1;
-			//
-			a0 = pD[ll+jj*sdd+ps*2];
-			b0 = pW[2+ldw*0];
-			c00 -= a0*b0;
-			b1 = pW[2+ldw*1];
-			c01 -= a0*b1;
-			//
-			a0 = pD[ll+jj*sdd+ps*3];
-			b0 = pW[3+ldw*0];
-			c00 -= a0*b0;
-			b1 = pW[3+ldw*1];
-			c01 -= a0*b1;
-			// store
-			pC[ll+jj*sdc+ps*0] = c00;
-			pC[ll+jj*sdc+ps*1] = c01;
-			}
-		}
-	for( ; ii<n; ii++)
-		{
-		pC = pC0+ii*ps;
-		// compute W^T = C^T * V
-		tmp = pC[0+ps*0];
-		pW[0+ldw*0] = tmp;
-		if(m>1)
-			{
-			d0 = pVt[0+ps*1];
-			tmp = pC[1+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] = tmp;
-			if(m>2)
-				{
-				d0 = pVt[0+ps*2];
-				d1 = pVt[1+ps*2];
-				tmp = pC[2+ps*0];
-				pW[0+ldw*0] += d0 * tmp;
-				pW[1+ldw*0] += d1 * tmp;
-				pW[2+ldw*0] = tmp;
-				if(m>3)
-					{
-					d0 = pVt[0+ps*3];
-					d1 = pVt[1+ps*3];
-					d2 = pVt[2+ps*3];
-					tmp = pC[3+ps*0];
-					pW[0+ldw*0] += d0 * tmp;
-					pW[1+ldw*0] += d1 * tmp;
-					pW[2+ldw*0] += d2 * tmp;
-					pW[3+ldw*0] = tmp;
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			//
-			d0 = pVt[0+ps*(0+jj)];
-			d1 = pVt[1+ps*(0+jj)];
-			d2 = pVt[2+ps*(0+jj)];
-			d3 = pVt[3+ps*(0+jj)];
-			tmp = pC[0+jj*sdc+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] += d1 * tmp;
-			pW[2+ldw*0] += d2 * tmp;
-			pW[3+ldw*0] += d3 * tmp;
-			//
-			d0 = pVt[0+ps*(1+jj)];
-			d1 = pVt[1+ps*(1+jj)];
-			d2 = pVt[2+ps*(1+jj)];
-			d3 = pVt[3+ps*(1+jj)];
-			tmp = pC[1+jj*sdc+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] += d1 * tmp;
-			pW[2+ldw*0] += d2 * tmp;
-			pW[3+ldw*0] += d3 * tmp;
-			//
-			d0 = pVt[0+ps*(2+jj)];
-			d1 = pVt[1+ps*(2+jj)];
-			d2 = pVt[2+ps*(2+jj)];
-			d3 = pVt[3+ps*(2+jj)];
-			tmp = pC[2+jj*sdc+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] += d1 * tmp;
-			pW[2+ldw*0] += d2 * tmp;
-			pW[3+ldw*0] += d3 * tmp;
-			//
-			d0 = pVt[0+ps*(3+jj)];
-			d1 = pVt[1+ps*(3+jj)];
-			d2 = pVt[2+ps*(3+jj)];
-			d3 = pVt[3+ps*(3+jj)];
-			tmp = pC[3+jj*sdc+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] += d1 * tmp;
-			pW[2+ldw*0] += d2 * tmp;
-			pW[3+ldw*0] += d3 * tmp;
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			d0 = pVt[0+ps*(ll+jj)];
-			d1 = pVt[1+ps*(ll+jj)];
-			d2 = pVt[2+ps*(ll+jj)];
-			d3 = pVt[3+ps*(ll+jj)];
-			tmp = pC[ll+jj*sdc+ps*0];
-			pW[0+ldw*0] += d0 * tmp;
-			pW[1+ldw*0] += d1 * tmp;
-			pW[2+ldw*0] += d2 * tmp;
-			pW[3+ldw*0] += d3 * tmp;
-			}
-		// compute W^T *= T
-		pW[3+ldw*0] = pT[3+ldt*0]*pW[0+ldw*0] + pT[3+ldt*1]*pW[1+ldw*0] + pT[3+ldt*2]*pW[2+ldw*0] + pT[3+ldt*3]*pW[3+ldw*0];
-		pW[2+ldw*0] = pT[2+ldt*0]*pW[0+ldw*0] + pT[2+ldt*1]*pW[1+ldw*0] + pT[2+ldt*2]*pW[2+ldw*0];
-		pW[1+ldw*0] = pT[1+ldt*0]*pW[0+ldw*0] + pT[1+ldt*1]*pW[1+ldw*0];
-		pW[0+ldw*0] = pT[0+ldt*0]*pW[0+ldw*0];
-		// compute C -= V * W^T
-		jj = 0;
-		// load
-		c00 = pC[0+jj*sdc+ps*0];
-		c10 = pC[1+jj*sdc+ps*0];
-		c20 = pC[2+jj*sdc+ps*0];
-		c30 = pC[3+jj*sdc+ps*0];
-		// rank1
-		a1 = pD[1+jj*sdd+ps*0];
-		a2 = pD[2+jj*sdd+ps*0];
-		a3 = pD[3+jj*sdd+ps*0];
-		b0 = pW[0+ldw*0];
-		c00 -= b0;
-		c10 -= a1*b0;
-		c20 -= a2*b0;
-		c30 -= a3*b0;
-		// rank2
-		a2 = pD[2+jj*sdd+ps*1];
-		a3 = pD[3+jj*sdd+ps*1];
-		b0 = pW[1+ldw*0];
-		c10 -= b0;
-		c20 -= a2*b0;
-		c30 -= a3*b0;
-		// rank3
-		a3 = pD[3+jj*sdd+ps*2];
-		b0 = pW[2+ldw*0];
-		c20 -= b0;
-		c30 -= a3*b0;
-		// rank4
-		a3 = pD[3+jj*sdd+ps*3];
-		b0 = pW[3+ldw*0];
-		c30 -= b0;
-		// store
-		pC[0+jj*sdc+ps*0] = c00;
-		if(m>1)
-			{
-			pC[1+jj*sdc+ps*0] = c10;
-			if(m>2)
-				{
-				pC[2+jj*sdc+ps*0] = c20;
-				if(m>3)
-					{
-					pC[3+jj*sdc+ps*0] = c30;
-					}
-				}
-			}
-		for(jj=4; jj<m-3; jj+=4)
-			{
-			// load
-			c00 = pC[0+jj*sdc+ps*0];
-			c10 = pC[1+jj*sdc+ps*0];
-			c20 = pC[2+jj*sdc+ps*0];
-			c30 = pC[3+jj*sdc+ps*0];
-			//
-			a0 = pD[0+jj*sdd+ps*0];
-			a1 = pD[1+jj*sdd+ps*0];
-			a2 = pD[2+jj*sdd+ps*0];
-			a3 = pD[3+jj*sdd+ps*0];
-			b0 = pW[0+ldw*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			//
-			a0 = pD[0+jj*sdd+ps*1];
-			a1 = pD[1+jj*sdd+ps*1];
-			a2 = pD[2+jj*sdd+ps*1];
-			a3 = pD[3+jj*sdd+ps*1];
-			b0 = pW[1+ldw*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			//
-			a0 = pD[0+jj*sdd+ps*2];
-			a1 = pD[1+jj*sdd+ps*2];
-			a2 = pD[2+jj*sdd+ps*2];
-			a3 = pD[3+jj*sdd+ps*2];
-			b0 = pW[2+ldw*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			//
-			a0 = pD[0+jj*sdd+ps*3];
-			a1 = pD[1+jj*sdd+ps*3];
-			a2 = pD[2+jj*sdd+ps*3];
-			a3 = pD[3+jj*sdd+ps*3];
-			b0 = pW[3+ldw*0];
-			c00 -= a0*b0;
-			c10 -= a1*b0;
-			c20 -= a2*b0;
-			c30 -= a3*b0;
-			// store
-			pC[0+jj*sdc+ps*0] = c00;
-			pC[1+jj*sdc+ps*0] = c10;
-			pC[2+jj*sdc+ps*0] = c20;
-			pC[3+jj*sdc+ps*0] = c30;
-			}
-		for(ll=0; ll<m-jj; ll++)
-			{
-			// load
-			c00 = pC[ll+jj*sdc+ps*0];
-			//
-			a0 = pD[ll+jj*sdd+ps*0];
-			b0 = pW[0+ldw*0];
-			c00 -= a0*b0;
-			//
-			a0 = pD[ll+jj*sdd+ps*1];
-			b0 = pW[1+ldw*0];
-			c00 -= a0*b0;
-			//
-			a0 = pD[ll+jj*sdd+ps*2];
-			b0 = pW[2+ldw*0];
-			c00 -= a0*b0;
-			//
-			a0 = pD[ll+jj*sdd+ps*3];
-			b0 = pW[3+ldw*0];
-			c00 -= a0*b0;
-			// store
-			pC[ll+jj*sdc+ps*0] = c00;
-			}
-		}
-
-	return;
-	}
-
-
-
-// assume n>=4
-void kernel_dgelqf_4_lib4(int n, double *pD, double *dD)
-	{
-	int ii, jj, ll;
-	double alpha, beta, tmp, w1, w2, w3;
-	const int ps = 4;
-	// first column
-	beta = 0.0;
-	for(ii=1; ii<n; ii++)
-		{
-		tmp = pD[0+ps*ii];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[0] = 0.0;
-		}
-	else
-		{
-		alpha = pD[0+ps*0];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[0] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[0+ps*0] = beta;
-		for(ii=1; ii<n; ii++)
-			{
-			pD[0+ps*ii] *= tmp;
-			}
-		}
-	// gemv_t & ger
-	w1 = pD[1+ps*0];
-	w2 = pD[2+ps*0];
-	w3 = pD[3+ps*0];
-	w1 += pD[1+ps*1] * pD[0+ps*1];
-	w2 += pD[2+ps*1] * pD[0+ps*1];
-	w3 += pD[3+ps*1] * pD[0+ps*1];
-	w1 += pD[1+ps*2] * pD[0+ps*2];
-	w2 += pD[2+ps*2] * pD[0+ps*2];
-	w3 += pD[3+ps*2] * pD[0+ps*2];
-	w1 += pD[1+ps*3] * pD[0+ps*3];
-	w2 += pD[2+ps*3] * pD[0+ps*3];
-	w3 += pD[3+ps*3] * pD[0+ps*3];
-	for(ii=4; ii<n; ii++)
-		{
-		w1 += pD[1+ps*ii] * pD[0+ps*ii];
-		w2 += pD[2+ps*ii] * pD[0+ps*ii];
-		w3 += pD[3+ps*ii] * pD[0+ps*ii];
-		}
-	w1 = - dD[0] * w1;
-	w2 = - dD[0] * w2;
-	w3 = - dD[0] * w3;
-	pD[1+ps*0] += w1;
-	pD[2+ps*0] += w2;
-	pD[3+ps*0] += w3;
-	pD[1+ps*1] += w1 * pD[0+ps*1];
-	pD[2+ps*1] += w2 * pD[0+ps*1];
-	pD[3+ps*1] += w3 * pD[0+ps*1];
-	pD[1+ps*2] += w1 * pD[0+ps*2];
-	pD[2+ps*2] += w2 * pD[0+ps*2];
-	pD[3+ps*2] += w3 * pD[0+ps*2];
-	pD[1+ps*3] += w1 * pD[0+ps*3];
-	pD[2+ps*3] += w2 * pD[0+ps*3];
-	pD[3+ps*3] += w3 * pD[0+ps*3];
-	for(ii=4; ii<n; ii++)
-		{
-		pD[1+ps*ii] += w1 * pD[0+ps*ii];
-		pD[2+ps*ii] += w2 * pD[0+ps*ii];
-		pD[3+ps*ii] += w3 * pD[0+ps*ii];
-		}
-	// second column
-	beta = 0.0;
-	for(ii=2; ii<n; ii++)
-		{
-		tmp = pD[1+ps*ii];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[1] = 0.0;
-		}
-	else
-		{
-		alpha = pD[1+ps*1];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[1] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[1+ps*1] = beta;
-		for(ii=2; ii<n; ii++)
-			{
-			pD[1+ps*ii] *= tmp;
-			}
-		}
-	// gemv_t & ger
-	w2 = pD[2+ps*1];
-	w3 = pD[3+ps*1];
-	w2 += pD[2+ps*2] * pD[1+ps*2];
-	w3 += pD[3+ps*2] * pD[1+ps*2];
-	w2 += pD[2+ps*3] * pD[1+ps*3];
-	w3 += pD[3+ps*3] * pD[1+ps*3];
-	for(ii=4; ii<n; ii++)
-		{
-		w2 += pD[2+ps*ii] * pD[1+ps*ii];
-		w3 += pD[3+ps*ii] * pD[1+ps*ii];
-		}
-	w2 = - dD[1] * w2;
-	w3 = - dD[1] * w3;
-	pD[2+ps*1] += w2;
-	pD[3+ps*1] += w3;
-	pD[2+ps*2] += w2 * pD[1+ps*2];
-	pD[3+ps*2] += w3 * pD[1+ps*2];
-	pD[2+ps*3] += w2 * pD[1+ps*3];
-	pD[3+ps*3] += w3 * pD[1+ps*3];
-	for(ii=4; ii<n; ii++)
-		{
-		pD[2+ps*ii] += w2 * pD[1+ps*ii];
-		pD[3+ps*ii] += w3 * pD[1+ps*ii];
-		}
-	// third column
-	beta = 0.0;
-	for(ii=3; ii<n; ii++)
-		{
-		tmp = pD[2+ps*ii];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[2] = 0.0;
-		}
-	else
-		{
-		alpha = pD[2+ps*2];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[2] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[2+ps*2] = beta;
-		for(ii=3; ii<n; ii++)
-			{
-			pD[2+ps*ii] *= tmp;
-			}
-		}
-	// gemv_t & ger
-	w3 = pD[3+ps*2];
-	w3 += pD[3+ps*3] * pD[2+ps*3];
-	for(ii=4; ii<n; ii++)
-		{
-		w3 += pD[3+ps*ii] * pD[2+ps*ii];
-		}
-	w3 = - dD[2] * w3;
-	pD[3+ps*2] += w3;
-	pD[3+ps*3] += w3 * pD[2+ps*3];
-	for(ii=4; ii<n; ii++)
-		{
-		pD[3+ps*ii] += w3 * pD[2+ps*ii];
-		}
-	// fourth column
-	beta = 0.0;
-	for(ii=4; ii<n; ii++)
-		{
-		tmp = pD[3+ps*ii];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		// tau
-		dD[3] = 0.0;
-		}
-	else
-		{
-		alpha = pD[3+ps*3];
-		beta += alpha*alpha;
-		beta = sqrt(beta);
-		if(alpha>0)
-			beta = -beta;
-		// tau0
-		dD[3] = (beta-alpha) / beta;
-		tmp = 1.0 / (alpha-beta);
-		// compute v0
-		pD[3+ps*3] = beta;
-		for(ii=4; ii<n; ii++)
-			{
-			pD[3+ps*ii] *= tmp;
-			}
-		}
-	return;
-	}
-
-
-
-// unblocked algorithm
-void kernel_dgelqf_vs_lib4(int m, int n, int k, int offD, double *pD, int sdd, double *dD)
-	{
-	if(m<=0 | n<=0)
-		return;
-	int ii, jj, kk, ll, imax, jmax, jmax0, kmax, kmax0;
-	const int ps = 4;
-	imax = k;//m<n ? m : n;
-	double alpha, beta, tmp;
-	double w00, w01,
-		   w10, w11,
-		   w20, w21,
-		   w30, w31;
-	double *pC00, *pC10, *pC10a, *pC20, *pC20a, *pC01, *pC11;
-	double pT[4];
-	int ldt = 2;
-	double *pD0 = pD-offD;
-	ii = 0;
-#if 1
-	for(; ii<imax-1; ii+=2)
-		{
-		// first row
-		pC00 = &pD0[((offD+ii)&(ps-1))+((offD+ii)-((offD+ii)&(ps-1)))*sdd+ii*ps];
-		beta = 0.0;
-		for(jj=1; jj<n-ii; jj++)
-			{
-			tmp = pC00[0+ps*jj];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			dD[ii] = 0.0;
-			}
-		else
-			{
-			alpha = pC00[0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			dD[ii] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			pC00[0] = beta;
-			for(jj=1; jj<n-ii; jj++)
-				pC00[0+ps*jj] *= tmp;
-			}
-		pC10 = &pD0[((offD+ii+1)&(ps-1))+((offD+ii+1)-((offD+ii+1)&(ps-1)))*sdd+ii*ps];
-		kmax = n-ii;
-		w00 = pC10[0+ps*0]; // pC00[0+ps*0] = 1.0
-		for(kk=1; kk<kmax; kk++)
-			{
-			w00 += pC10[0+ps*kk] * pC00[0+ps*kk];
-			}
-		w00 = - w00*dD[ii];
-		pC10[0+ps*0] += w00; // pC00[0+ps*0] = 1.0
-		for(kk=1; kk<kmax; kk++)
-			{
-			pC10[0+ps*kk] += w00 * pC00[0+ps*kk];
-			}
-		// second row
-		pC11 = pC10+ps*1;
-		beta = 0.0;
-		for(jj=1; jj<n-(ii+1); jj++)
-			{
-			tmp = pC11[0+ps*jj];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			dD[(ii+1)] = 0.0;
-			}
-		else
-			{
-			alpha = pC11[0+ps*0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			dD[(ii+1)] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			pC11[0+ps*0] = beta;
-			for(jj=1; jj<n-(ii+1); jj++)
-				pC11[0+ps*jj] *= tmp;
-			}
-		// compute T
-		kmax = n-ii;
-		tmp = 1.0*0.0 + pC00[0+ps*1]*1.0;
-		for(kk=2; kk<kmax; kk++)
-			tmp += pC00[0+ps*kk]*pC10[0+ps*kk];
-		pT[0+ldt*0] = dD[ii+0];
-		pT[0+ldt*1] = - dD[ii+1] * tmp * dD[ii+0];
-		pT[1+ldt*1] = dD[ii+1];
-		// downgrade
-		kmax = n-ii;
-		jmax = m-ii-2;
-		jmax0 = (ps-((ii+2+offD)&(ps-1)))&(ps-1);
-		jmax0 = jmax<jmax0 ? jmax : jmax0;
-		jj = 0;
-		pC20a = &pD0[((offD+ii+2)&(ps-1))+((offD+ii+2)-((offD+ii+2)&(ps-1)))*sdd+ii*ps];
-		pC20 = pC20a;
-		if(jmax0>0)
-			{
-			for( ; jj<jmax0; jj++)
-				{
-				w00 = pC20[0+ps*0]*1.0 + pC20[0+ps*1]*pC00[0+ps*1];
-				w01 = pC20[0+ps*0]*0.0 + pC20[0+ps*1]*1.0;
-				for(kk=2; kk<kmax; kk++)
-					{
-					w00 += pC20[0+ps*kk]*pC00[0+ps*kk];
-					w01 += pC20[0+ps*kk]*pC10[0+ps*kk];
-					}
-				w01 = - w00*pT[0+ldt*1] - w01*pT[1+ldt*1];
-				w00 = - w00*pT[0+ldt*0];
-				pC20[0+ps*0] += w00*1.0          + w01*0.0;
-				pC20[0+ps*1] += w00*pC00[0+ps*1] + w01*1.0;
-				for(kk=2; kk<kmax; kk++)
-					{
-					pC20[0+ps*kk] += w00*pC00[0+ps*kk] + w01*pC10[0+ps*kk];
-					}
-				pC20 += 1;
-				}
-			pC20 += -ps+ps*sdd;
-			}
-		for( ; jj<jmax-3; jj+=4)
-			{
-			w00 = pC20[0+ps*0]*1.0 + pC20[0+ps*1]*pC00[0+ps*1];
-			w10 = pC20[1+ps*0]*1.0 + pC20[1+ps*1]*pC00[0+ps*1];
-			w20 = pC20[2+ps*0]*1.0 + pC20[2+ps*1]*pC00[0+ps*1];
-			w30 = pC20[3+ps*0]*1.0 + pC20[3+ps*1]*pC00[0+ps*1];
-			w01 = pC20[0+ps*0]*0.0 + pC20[0+ps*1]*1.0;
-			w11 = pC20[1+ps*0]*0.0 + pC20[1+ps*1]*1.0;
-			w21 = pC20[2+ps*0]*0.0 + pC20[2+ps*1]*1.0;
-			w31 = pC20[3+ps*0]*0.0 + pC20[3+ps*1]*1.0;
-			for(kk=2; kk<kmax; kk++)
-				{
-				w00 += pC20[0+ps*kk]*pC00[0+ps*kk];
-				w10 += pC20[1+ps*kk]*pC00[0+ps*kk];
-				w20 += pC20[2+ps*kk]*pC00[0+ps*kk];
-				w30 += pC20[3+ps*kk]*pC00[0+ps*kk];
-				w01 += pC20[0+ps*kk]*pC10[0+ps*kk];
-				w11 += pC20[1+ps*kk]*pC10[0+ps*kk];
-				w21 += pC20[2+ps*kk]*pC10[0+ps*kk];
-				w31 += pC20[3+ps*kk]*pC10[0+ps*kk];
-				}
-			w01 = - w00*pT[0+ldt*1] - w01*pT[1+ldt*1];
-			w11 = - w10*pT[0+ldt*1] - w11*pT[1+ldt*1];
-			w21 = - w20*pT[0+ldt*1] - w21*pT[1+ldt*1];
-			w31 = - w30*pT[0+ldt*1] - w31*pT[1+ldt*1];
-			w00 = - w00*pT[0+ldt*0];
-			w10 = - w10*pT[0+ldt*0];
-			w20 = - w20*pT[0+ldt*0];
-			w30 = - w30*pT[0+ldt*0];
-			pC20[0+ps*0] += w00*1.0          + w01*0.0;
-			pC20[1+ps*0] += w10*1.0          + w11*0.0;
-			pC20[2+ps*0] += w20*1.0          + w21*0.0;
-			pC20[3+ps*0] += w30*1.0          + w31*0.0;
-			pC20[0+ps*1] += w00*pC00[0+ps*1] + w01*1.0;
-			pC20[1+ps*1] += w10*pC00[0+ps*1] + w11*1.0;
-			pC20[2+ps*1] += w20*pC00[0+ps*1] + w21*1.0;
-			pC20[3+ps*1] += w30*pC00[0+ps*1] + w31*1.0;
-			for(kk=2; kk<kmax; kk++)
-				{
-				pC20[0+ps*kk] += w00*pC00[0+ps*kk] + w01*pC10[0+ps*kk];
-				pC20[1+ps*kk] += w10*pC00[0+ps*kk] + w11*pC10[0+ps*kk];
-				pC20[2+ps*kk] += w20*pC00[0+ps*kk] + w21*pC10[0+ps*kk];
-				pC20[3+ps*kk] += w30*pC00[0+ps*kk] + w31*pC10[0+ps*kk];
-				}
-			pC20 += ps*sdd;
-			}
-		for(ll=0; ll<jmax-jj; ll++)
-			{
-			w00 = pC20[0+ps*0]*1.0 + pC20[0+ps*1]*pC00[0+ps*1];
-			w01 = pC20[0+ps*0]*0.0 + pC20[0+ps*1]*1.0;
-			for(kk=2; kk<kmax; kk++)
-				{
-				w00 += pC20[0+ps*kk]*pC00[0+ps*kk];
-				w01 += pC20[0+ps*kk]*pC10[0+ps*kk];
-				}
-			w01 = - w00*pT[0+ldt*1] - w01*pT[1+ldt*1];
-			w00 = - w00*pT[0+ldt*0];
-			pC20[0+ps*0] += w00*1.0          + w01*0.0;
-			pC20[0+ps*1] += w00*pC00[0+ps*1] + w01*1.0;
-			for(kk=2; kk<kmax; kk++)
-				{
-				pC20[0+ps*kk] += w00*pC00[0+ps*kk] + w01*pC10[0+ps*kk];
-				}
-			pC20 += 1;
-			}
-		}
-#endif
-	for(; ii<imax; ii++)
-		{
-		pC00 = &pD0[((offD+ii)&(ps-1))+((offD+ii)-((offD+ii)&(ps-1)))*sdd+ii*ps];
-		beta = 0.0;
-		for(jj=1; jj<n-ii; jj++)
-			{
-			tmp = pC00[0+ps*jj];
-			beta += tmp*tmp;
-			}
-		if(beta==0.0)
-			{
-			dD[ii] = 0.0;
-			}
-		else
-			{
-			alpha = pC00[0];
-			beta += alpha*alpha;
-			beta = sqrt(beta);
-			if(alpha>0)
-				beta = -beta;
-			dD[ii] = (beta-alpha) / beta;
-			tmp = 1.0 / (alpha-beta);
-			pC00[0] = beta;
-			for(jj=1; jj<n-ii; jj++)
-				pC00[0+ps*jj] *= tmp;
-			}
-		if(ii<n)
-			{
-			kmax = n-ii;
-			jmax = m-ii-1;
-			jmax0 = (ps-((ii+1+offD)&(ps-1)))&(ps-1);
-			jmax0 = jmax<jmax0 ? jmax : jmax0;
-			jj = 0;
-			pC10a = &pD0[((offD+ii+1)&(ps-1))+((offD+ii+1)-((offD+ii+1)&(ps-1)))*sdd+ii*ps];
-			pC10 = pC10a;
-			if(jmax0>0)
-				{
-				for( ; jj<jmax0; jj++)
-					{
-					w00 = pC10[0+ps*0];
-					for(kk=1; kk<kmax; kk++)
-						{
-						w00 += pC10[0+ps*kk] * pC00[0+ps*kk];
-						}
-					w00 = - w00*dD[ii];
-					pC10[0+ps*0] += w00;
-					for(kk=1; kk<kmax; kk++)
-						{
-						pC10[0+ps*kk] += w00 * pC00[0+ps*kk];
-						}
-					pC10 += 1;
-					}
-				pC10 += -ps+ps*sdd;
-				}
-			for( ; jj<jmax-3; jj+=4)
-				{
-				w00 = pC10[0+ps*0];
-				w10 = pC10[1+ps*0];
-				w20 = pC10[2+ps*0];
-				w30 = pC10[3+ps*0];
-				for(kk=1; kk<kmax; kk++)
-					{
-					w00 += pC10[0+ps*kk]*pC00[0+ps*kk];
-					w10 += pC10[1+ps*kk]*pC00[0+ps*kk];
-					w20 += pC10[2+ps*kk]*pC00[0+ps*kk];
-					w30 += pC10[3+ps*kk]*pC00[0+ps*kk];
-					}
-				w00 = - w00*dD[ii];
-				w10 = - w10*dD[ii];
-				w20 = - w20*dD[ii];
-				w30 = - w30*dD[ii];
-				pC10[0+ps*0] += w00;
-				pC10[1+ps*0] += w10;
-				pC10[2+ps*0] += w20;
-				pC10[3+ps*0] += w30;
-				for(kk=1; kk<kmax; kk++)
-					{
-					pC10[0+ps*kk] += w00*pC00[0+ps*kk];
-					pC10[1+ps*kk] += w10*pC00[0+ps*kk];
-					pC10[2+ps*kk] += w20*pC00[0+ps*kk];
-					pC10[3+ps*kk] += w30*pC00[0+ps*kk];
-					}
-				pC10 += ps*sdd;
-				}
-			for(ll=0; ll<jmax-jj; ll++)
-				{
-				w00 = pC10[0+ps*0];
-				for(kk=1; kk<kmax; kk++)
-					{
-					w00 += pC10[0+ps*kk] * pC00[0+ps*kk];
-					}
-				w00 = - w00*dD[ii];
-				pC10[0+ps*0] += w00;
-				for(kk=1; kk<kmax; kk++)
-					{
-					pC10[0+ps*kk] += w00 * pC00[0+ps*kk];
-					}
-				pC10 += 1;
-				}
-			}
-		}
-	return;
-	}
-
-
-
-// assume kmax>=4
-void kernel_dlarft_4_lib4(int kmax, double *pD, double *dD, double *pT)
-	{
-	const int ps = 4;
-	int kk;
-	double v10,
-	       v20, v21,
-		   v30, v31, v32;
-	// 0
-	// 1
-	v10 =  pD[0+ps*1];
-	// 2
-	v10 += pD[1+ps*2]*pD[0+ps*2];
-	v20 =  pD[0+ps*2];
-	v21 =  pD[1+ps*2];
-	// 3
-	v10 += pD[1+ps*3]*pD[0+ps*3];
-	v20 += pD[2+ps*3]*pD[0+ps*3];
-	v21 += pD[2+ps*3]*pD[1+ps*3];
-	v30 =  pD[0+ps*3];
-	v31 =  pD[1+ps*3];
-	v32 =  pD[2+ps*3];
-	//
-	for(kk=4; kk<kmax; kk++)
-		{
-		v10 += pD[1+ps*kk]*pD[0+ps*kk];
-		v20 += pD[2+ps*kk]*pD[0+ps*kk];
-		v30 += pD[3+ps*kk]*pD[0+ps*kk];
-		v21 += pD[2+ps*kk]*pD[1+ps*kk];
-		v31 += pD[3+ps*kk]*pD[1+ps*kk];
-		v32 += pD[3+ps*kk]*pD[2+ps*kk];
-		}
-	pT[0+ps*0] = - dD[0];
-	pT[1+ps*1] = - dD[1];
-	pT[2+ps*2] = - dD[2];
-	pT[3+ps*3] = - dD[3];
-	pT[0+ps*1] = - dD[1] * (v10*pT[0+ps*0]);
-	pT[1+ps*2] = - dD[2] * (v21*pT[1+ps*1]);
-	pT[2+ps*3] = - dD[3] * (v32*pT[2+ps*2]);
-	pT[0+ps*2] = - dD[2] * (v20*pT[0+ps*0] + v21*pT[0+ps*1]);
-	pT[1+ps*3] = - dD[3] * (v31*pT[1+ps*1] + v32*pT[1+ps*2]);
-	pT[0+ps*3] = - dD[3] * (v30*pT[0+ps*0] + v31*pT[0+ps*1] + v32*pT[0+ps*2]);
-	return;
-	}
-
-
-
-// assume n>=4
-void kernel_dgelqf_dlarft4_4_lib4(int n, double *pD, double *dD, double *pT)
-	{
-	int ii, jj, ll;
-	double alpha, beta, tmp, w0, w1, w2, w3;
-	const int ps = 4;
-	// zero tau matrix
-	for(ii=0; ii<16; ii++)
-		pT[ii] = 0.0;
-	// first column
-	beta = 0.0;
-	for(ii=1; ii<n; ii++)
-		{
-		tmp = pD[0+ps*ii];
-		beta += tmp*tmp;
-		}
-	if(beta==0.0)
-		{
-		dD[0] = 0.0;
-		tmp = 0.0;
-		goto col2;
-		}
-	alpha = pD[0+ps*0];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[0] = (beta-alpha) / beta;
-	pT[0+ps*0] = - dD[0];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[0+ps*0] = beta;
-	w1 = pD[1+ps*0];
-	w2 = pD[2+ps*0];
-	w3 = pD[3+ps*0];
-	//
-	pD[0+ps*1] *= tmp;
-	w1 += pD[1+ps*1] * pD[0+ps*1];
-	w2 += pD[2+ps*1] * pD[0+ps*1];
-	w3 += pD[3+ps*1] * pD[0+ps*1];
-	//
-	pD[0+ps*2] *= tmp;
-	w1 += pD[1+ps*2] * pD[0+ps*2];
-	w2 += pD[2+ps*2] * pD[0+ps*2];
-	w3 += pD[3+ps*2] * pD[0+ps*2];
-	//
-	pD[0+ps*3] *= tmp;
-	w1 += pD[1+ps*3] * pD[0+ps*3];
-	w2 += pD[2+ps*3] * pD[0+ps*3];
-	w3 += pD[3+ps*3] * pD[0+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[0+ps*ii] *= tmp;
-		w1 += pD[1+ps*ii] * pD[0+ps*ii];
-		w2 += pD[2+ps*ii] * pD[0+ps*ii];
-		w3 += pD[3+ps*ii] * pD[0+ps*ii];
-		}
-	//
-	w1 = - dD[0] * w1;
-	w2 = - dD[0] * w2;
-	w3 = - dD[0] * w3;
-	//
-	pD[1+ps*0] += w1;
-	pD[2+ps*0] += w2;
-	pD[3+ps*0] += w3;
-	//
-	pD[1+ps*1] += w1 * pD[0+ps*1];
-	pD[2+ps*1] += w2 * pD[0+ps*1];
-	pD[3+ps*1] += w3 * pD[0+ps*1];
-	//
-	pD[1+ps*2] += w1 * pD[0+ps*2];
-	pD[2+ps*2] += w2 * pD[0+ps*2];
-	pD[3+ps*2] += w3 * pD[0+ps*2];
-	beta = pD[1+ps*2] * pD[1+ps*2];
-	//
-	pD[1+ps*3] += w1 * pD[0+ps*3];
-	pD[2+ps*3] += w2 * pD[0+ps*3];
-	pD[3+ps*3] += w3 * pD[0+ps*3];
-	beta += pD[1+ps*3] * pD[1+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[1+ps*ii] += w1 * pD[0+ps*ii];
-		pD[2+ps*ii] += w2 * pD[0+ps*ii];
-		pD[3+ps*ii] += w3 * pD[0+ps*ii];
-		beta += pD[1+ps*ii] * pD[1+ps*ii];
-		}
-	// second column
-col2:
-	if(beta==0.0)
-		{
-		dD[1] = 0.0;
-		tmp = 0.0;
-		goto col3;
-		}
-	alpha = pD[1+ps*1];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[1] = (beta-alpha) / beta;
-	pT[1+ps*1] = - dD[1];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[1+ps*1] = beta;
-	w0 = pD[0+ps*1]; //
-	w2 = pD[2+ps*1];
-	w3 = pD[3+ps*1];
-	//
-	pD[1+ps*2] *= tmp;
-	w0 += pD[0+ps*2] * pD[1+ps*2]; //
-	w2 += pD[2+ps*2] * pD[1+ps*2];
-	w3 += pD[3+ps*2] * pD[1+ps*2];
-	//
-	pD[1+ps*3] *= tmp;
-	w0 += pD[0+ps*3] * pD[1+ps*3]; //
-	w2 += pD[2+ps*3] * pD[1+ps*3];
-	w3 += pD[3+ps*3] * pD[1+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[1+ps*ii] *= tmp;
-		w0 += pD[0+ps*ii] * pD[1+ps*ii]; //
-		w2 += pD[2+ps*ii] * pD[1+ps*ii];
-		w3 += pD[3+ps*ii] * pD[1+ps*ii];
-		}
-	//
-	pT[0+ps*1] = - dD[1] * (w0*pT[0+ps*0]);
-	w2 = - dD[1] * w2;
-	w3 = - dD[1] * w3;
-	//
-	pD[2+ps*1] += w2;
-	pD[3+ps*1] += w3;
-	//
-	pD[2+ps*2] += w2 * pD[1+ps*2];
-	pD[3+ps*2] += w3 * pD[1+ps*2];
-	//
-	pD[2+ps*3] += w2 * pD[1+ps*3];
-	pD[3+ps*3] += w3 * pD[1+ps*3];
-	beta = pD[2+ps*3] * pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[2+ps*ii] += w2 * pD[1+ps*ii];
-		pD[3+ps*ii] += w3 * pD[1+ps*ii];
-		beta += pD[2+ps*ii] * pD[2+ps*ii];
-		}
-	// third column
-col3:
-	if(beta==0.0)
-		{
-		dD[2] = 0.0;
-		tmp = 0.0;
-		goto col4;
-		}
-	alpha = pD[2+ps*2];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[2] = (beta-alpha) / beta;
-	pT[2+ps*2] = - dD[2];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[2+ps*2] = beta;
-	w0 = pD[0+ps*2];
-	w1 = pD[1+ps*2];
-	w3 = pD[3+ps*2];
-	//
-	pD[2+ps*3] *= tmp;
-	w0 += pD[0+ps*3] * pD[2+ps*3];
-	w1 += pD[1+ps*3] * pD[2+ps*3];
-	w3 += pD[3+ps*3] * pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[2+ps*ii] *= tmp;
-		w0 += pD[0+ps*ii] * pD[2+ps*ii];
-		w1 += pD[1+ps*ii] * pD[2+ps*ii];
-		w3 += pD[3+ps*ii] * pD[2+ps*ii];
-		}
-	//
-	pT[1+ps*2] = - dD[2] * (w1*pT[1+ps*1]);
-	pT[0+ps*2] = - dD[2] * (w0*pT[0+ps*0] + w1*pT[0+ps*1]);
-	w3 = - dD[2] * w3;
-	//
-	pD[3+ps*2] += w3;
-	//
-	pD[3+ps*3] += w3 * pD[2+ps*3];
-	//
-	beta = 0.0;
-	for(ii=4; ii<n; ii++)
-		{
-		pD[3+ps*ii] += w3 * pD[2+ps*ii];
-		beta += pD[3+ps*ii] * pD[3+ps*ii];
-		}
-	// fourth column
-col4:
-	if(beta==0.0)
-		{
-		dD[3] = 0.0;
-		tmp = 0.0;
-		return;
-		}
-	alpha = pD[3+ps*3];
-	beta += alpha*alpha;
-	beta = sqrt(beta);
-	if(alpha>0)
-		beta = -beta;
-	dD[3] = (beta-alpha) / beta;
-	pT[3+ps*3] = - dD[3];
-	tmp = 1.0 / (alpha-beta);
-	//
-	pD[3+ps*3] = beta;
-	w0 =  pD[0+ps*3];
-	w1 =  pD[1+ps*3];
-	w2 =  pD[2+ps*3];
-	//
-	for(ii=4; ii<n; ii++)
-		{
-		pD[3+ps*ii] *= tmp;
-		w0 += pD[0+ps*ii] * pD[3+ps*ii];
-		w1 += pD[1+ps*ii] * pD[3+ps*ii];
-		w2 += pD[2+ps*ii] * pD[3+ps*ii];
-		}
-	//
-	pT[2+ps*3] = - dD[3] * (w2*pT[2+ps*2]);
-	pT[1+ps*3] = - dD[3] * (w1*pT[1+ps*1] + w2*pT[1+ps*2]);
-	pT[0+ps*3] = - dD[3] * (w0*pT[0+ps*0] + w1*pT[0+ps*1] + w2*pT[0+ps*2]);
-	return;
-	}
-
-
-
-void kernel_dlarfb4_r_4_lib4(int kmax, double *pV, double *pT, double *pD)
-	{
-	const int ps = 4;
-	double pW[16];
-	int kk;
-	// 0
-	pW[0+ps*0] = pD[0+ps*0];
-	pW[1+ps*0] = pD[1+ps*0];
-	pW[2+ps*0] = pD[2+ps*0];
-	pW[3+ps*0] = pD[3+ps*0];
-	// 1
-	pW[0+ps*0] += pD[0+ps*1]*pV[0+ps*1];
-	pW[1+ps*0] += pD[1+ps*1]*pV[0+ps*1];
-	pW[2+ps*0] += pD[2+ps*1]*pV[0+ps*1];
-	pW[3+ps*0] += pD[3+ps*1]*pV[0+ps*1];
-	pW[0+ps*1] = pD[0+ps*1];
-	pW[1+ps*1] = pD[1+ps*1];
-	pW[2+ps*1] = pD[2+ps*1];
-	pW[3+ps*1] = pD[3+ps*1];
-	// 2
-	pW[0+ps*0] += pD[0+ps*2]*pV[0+ps*2];
-	pW[1+ps*0] += pD[1+ps*2]*pV[0+ps*2];
-	pW[2+ps*0] += pD[2+ps*2]*pV[0+ps*2];
-	pW[3+ps*0] += pD[3+ps*2]*pV[0+ps*2];
-	pW[0+ps*1] += pD[0+ps*2]*pV[1+ps*2];
-	pW[1+ps*1] += pD[1+ps*2]*pV[1+ps*2];
-	pW[2+ps*1] += pD[2+ps*2]*pV[1+ps*2];
-	pW[3+ps*1] += pD[3+ps*2]*pV[1+ps*2];
-	pW[0+ps*2] = pD[0+ps*2];
-	pW[1+ps*2] = pD[1+ps*2];
-	pW[2+ps*2] = pD[2+ps*2];
-	pW[3+ps*2] = pD[3+ps*2];
-	// 3
-	pW[0+ps*0] += pD[0+ps*3]*pV[0+ps*3];
-	pW[1+ps*0] += pD[1+ps*3]*pV[0+ps*3];
-	pW[2+ps*0] += pD[2+ps*3]*pV[0+ps*3];
-	pW[3+ps*0] += pD[3+ps*3]*pV[0+ps*3];
-	pW[0+ps*1] += pD[0+ps*3]*pV[1+ps*3];
-	pW[1+ps*1] += pD[1+ps*3]*pV[1+ps*3];
-	pW[2+ps*1] += pD[2+ps*3]*pV[1+ps*3];
-	pW[3+ps*1] += pD[3+ps*3]*pV[1+ps*3];
-	pW[0+ps*2] += pD[0+ps*3]*pV[2+ps*3];
-	pW[1+ps*2] += pD[1+ps*3]*pV[2+ps*3];
-	pW[2+ps*2] += pD[2+ps*3]*pV[2+ps*3];
-	pW[3+ps*2] += pD[3+ps*3]*pV[2+ps*3];
-	pW[0+ps*3] = pD[0+ps*3];
-	pW[1+ps*3] = pD[1+ps*3];
-	pW[2+ps*3] = pD[2+ps*3];
-	pW[3+ps*3] = pD[3+ps*3];
-	//
-	for(kk=4; kk<kmax; kk++)
-		{
-		pW[0+ps*0] += pD[0+ps*kk]*pV[0+ps*kk];
-		pW[1+ps*0] += pD[1+ps*kk]*pV[0+ps*kk];
-		pW[2+ps*0] += pD[2+ps*kk]*pV[0+ps*kk];
-		pW[3+ps*0] += pD[3+ps*kk]*pV[0+ps*kk];
-		pW[0+ps*1] += pD[0+ps*kk]*pV[1+ps*kk];
-		pW[1+ps*1] += pD[1+ps*kk]*pV[1+ps*kk];
-		pW[2+ps*1] += pD[2+ps*kk]*pV[1+ps*kk];
-		pW[3+ps*1] += pD[3+ps*kk]*pV[1+ps*kk];
-		pW[0+ps*2] += pD[0+ps*kk]*pV[2+ps*kk];
-		pW[1+ps*2] += pD[1+ps*kk]*pV[2+ps*kk];
-		pW[2+ps*2] += pD[2+ps*kk]*pV[2+ps*kk];
-		pW[3+ps*2] += pD[3+ps*kk]*pV[2+ps*kk];
-		pW[0+ps*3] += pD[0+ps*kk]*pV[3+ps*kk];
-		pW[1+ps*3] += pD[1+ps*kk]*pV[3+ps*kk];
-		pW[2+ps*3] += pD[2+ps*kk]*pV[3+ps*kk];
-		pW[3+ps*3] += pD[3+ps*kk]*pV[3+ps*kk];
-		}
-	//
-	pW[0+ps*3] = pW[0+ps*0]*pT[0+ps*3] + pW[0+ps*1]*pT[1+ps*3] + pW[0+ps*2]*pT[2+ps*3] + pW[0+ps*3]*pT[3+ps*3];
-	pW[1+ps*3] = pW[1+ps*0]*pT[0+ps*3] + pW[1+ps*1]*pT[1+ps*3] + pW[1+ps*2]*pT[2+ps*3] + pW[1+ps*3]*pT[3+ps*3];
-	pW[2+ps*3] = pW[2+ps*0]*pT[0+ps*3] + pW[2+ps*1]*pT[1+ps*3] + pW[2+ps*2]*pT[2+ps*3] + pW[2+ps*3]*pT[3+ps*3];
-	pW[3+ps*3] = pW[3+ps*0]*pT[0+ps*3] + pW[3+ps*1]*pT[1+ps*3] + pW[3+ps*2]*pT[2+ps*3] + pW[3+ps*3]*pT[3+ps*3];
-	//
-	pW[0+ps*2] = pW[0+ps*0]*pT[0+ps*2] + pW[0+ps*1]*pT[1+ps*2] + pW[0+ps*2]*pT[2+ps*2];
-	pW[1+ps*2] = pW[1+ps*0]*pT[0+ps*2] + pW[1+ps*1]*pT[1+ps*2] + pW[1+ps*2]*pT[2+ps*2];
-	pW[2+ps*2] = pW[2+ps*0]*pT[0+ps*2] + pW[2+ps*1]*pT[1+ps*2] + pW[2+ps*2]*pT[2+ps*2];
-	pW[3+ps*2] = pW[3+ps*0]*pT[0+ps*2] + pW[3+ps*1]*pT[1+ps*2] + pW[3+ps*2]*pT[2+ps*2];
-	//
-	pW[0+ps*1] = pW[0+ps*0]*pT[0+ps*1] + pW[0+ps*1]*pT[1+ps*1];
-	pW[1+ps*1] = pW[1+ps*0]*pT[0+ps*1] + pW[1+ps*1]*pT[1+ps*1];
-	pW[2+ps*1] = pW[2+ps*0]*pT[0+ps*1] + pW[2+ps*1]*pT[1+ps*1];
-	pW[3+ps*1] = pW[3+ps*0]*pT[0+ps*1] + pW[3+ps*1]*pT[1+ps*1];
-	//
-	pW[0+ps*0] = pW[0+ps*0]*pT[0+ps*0];
-	pW[1+ps*0] = pW[1+ps*0]*pT[0+ps*0];
-	pW[2+ps*0] = pW[2+ps*0]*pT[0+ps*0];
-	pW[3+ps*0] = pW[3+ps*0]*pT[0+ps*0];
-	//
-	pD[0+ps*0] += pW[0+ps*0];
-	pD[1+ps*0] += pW[1+ps*0];
-	pD[2+ps*0] += pW[2+ps*0];
-	pD[3+ps*0] += pW[3+ps*0];
-	//
-	pD[0+ps*1] += pW[0+ps*0]*pV[0+ps*1] + pW[0+ps*1];
-	pD[1+ps*1] += pW[1+ps*0]*pV[0+ps*1] + pW[1+ps*1];
-	pD[2+ps*1] += pW[2+ps*0]*pV[0+ps*1] + pW[2+ps*1];
-	pD[3+ps*1] += pW[3+ps*0]*pV[0+ps*1] + pW[3+ps*1];
-	//
-	pD[0+ps*2] += pW[0+ps*0]*pV[0+ps*2] + pW[0+ps*1]*pV[1+ps*2] + pW[0+ps*2];
-	pD[1+ps*2] += pW[1+ps*0]*pV[0+ps*2] + pW[1+ps*1]*pV[1+ps*2] + pW[1+ps*2];
-	pD[2+ps*2] += pW[2+ps*0]*pV[0+ps*2] + pW[2+ps*1]*pV[1+ps*2] + pW[2+ps*2];
-	pD[3+ps*2] += pW[3+ps*0]*pV[0+ps*2] + pW[3+ps*1]*pV[1+ps*2] + pW[3+ps*2];
-	//
-	pD[0+ps*3] += pW[0+ps*0]*pV[0+ps*3] + pW[0+ps*1]*pV[1+ps*3] + pW[0+ps*2]*pV[2+ps*3] + pW[0+ps*3];
-	pD[1+ps*3] += pW[1+ps*0]*pV[0+ps*3] + pW[1+ps*1]*pV[1+ps*3] + pW[1+ps*2]*pV[2+ps*3] + pW[1+ps*3];
-	pD[2+ps*3] += pW[2+ps*0]*pV[0+ps*3] + pW[2+ps*1]*pV[1+ps*3] + pW[2+ps*2]*pV[2+ps*3] + pW[2+ps*3];
-	pD[3+ps*3] += pW[3+ps*0]*pV[0+ps*3] + pW[3+ps*1]*pV[1+ps*3] + pW[3+ps*2]*pV[2+ps*3] + pW[3+ps*3];
-	for(kk=4; kk<kmax; kk++)
-		{
-		pD[0+ps*kk] += pW[0+ps*0]*pV[0+ps*kk] + pW[0+ps*1]*pV[1+ps*kk] + pW[0+ps*2]*pV[2+ps*kk] + pW[0+ps*3]*pV[3+ps*kk];
-		pD[1+ps*kk] += pW[1+ps*0]*pV[0+ps*kk] + pW[1+ps*1]*pV[1+ps*kk] + pW[1+ps*2]*pV[2+ps*kk] + pW[1+ps*3]*pV[3+ps*kk];
-		pD[2+ps*kk] += pW[2+ps*0]*pV[0+ps*kk] + pW[2+ps*1]*pV[1+ps*kk] + pW[2+ps*2]*pV[2+ps*kk] + pW[2+ps*3]*pV[3+ps*kk];
-		pD[3+ps*kk] += pW[3+ps*0]*pV[0+ps*kk] + pW[3+ps*1]*pV[1+ps*kk] + pW[3+ps*2]*pV[2+ps*kk] + pW[3+ps*3]*pV[3+ps*kk];
-		}
-	return;
-	}
-
-
-
-void kernel_dlarfb4_r_1_lib4(int kmax, double *pV, double *pT, double *pD)
-	{
-	const int ps = 4;
-	double pW[16];
-	int kk;
-	// 0
-	pW[0+ps*0] = pD[0+ps*0];
-	// 1
-	pW[0+ps*0] += pD[0+ps*1]*pV[0+ps*1];
-	pW[0+ps*1] = pD[0+ps*1];
-	// 2
-	pW[0+ps*0] += pD[0+ps*2]*pV[0+ps*2];
-	pW[0+ps*1] += pD[0+ps*2]*pV[1+ps*2];
-	pW[0+ps*2] = pD[0+ps*2];
-	// 3
-	pW[0+ps*0] += pD[0+ps*3]*pV[0+ps*3];
-	pW[0+ps*1] += pD[0+ps*3]*pV[1+ps*3];
-	pW[0+ps*2] += pD[0+ps*3]*pV[2+ps*3];
-	pW[0+ps*3] = pD[0+ps*3];
-	//
-	for(kk=4; kk<kmax; kk++)
-		{
-		pW[0+ps*0] += pD[0+ps*kk]*pV[0+ps*kk];
-		pW[0+ps*1] += pD[0+ps*kk]*pV[1+ps*kk];
-		pW[0+ps*2] += pD[0+ps*kk]*pV[2+ps*kk];
-		pW[0+ps*3] += pD[0+ps*kk]*pV[3+ps*kk];
-		}
-	//
-	pW[0+ps*3] = pW[0+ps*0]*pT[0+ps*3] + pW[0+ps*1]*pT[1+ps*3] + pW[0+ps*2]*pT[2+ps*3] + pW[0+ps*3]*pT[3+ps*3];
-	//
-	pW[0+ps*2] = pW[0+ps*0]*pT[0+ps*2] + pW[0+ps*1]*pT[1+ps*2] + pW[0+ps*2]*pT[2+ps*2];
-	//
-	pW[0+ps*1] = pW[0+ps*0]*pT[0+ps*1] + pW[0+ps*1]*pT[1+ps*1];
-	//
-	pW[0+ps*0] = pW[0+ps*0]*pT[0+ps*0];
-	//
-	pD[0+ps*0] += pW[0+ps*0];
-	//
-	pD[0+ps*1] += pW[0+ps*0]*pV[0+ps*1] + pW[0+ps*1];
-	//
-	pD[0+ps*2] += pW[0+ps*0]*pV[0+ps*2] + pW[0+ps*1]*pV[1+ps*2] + pW[0+ps*2];
-	//
-	pD[0+ps*3] += pW[0+ps*0]*pV[0+ps*3] + pW[0+ps*1]*pV[1+ps*3] + pW[0+ps*2]*pV[2+ps*3] + pW[0+ps*3];
-	for(kk=4; kk<kmax; kk++)
-		{
-		pD[0+ps*kk] += pW[0+ps*0]*pV[0+ps*kk] + pW[0+ps*1]*pV[1+ps*kk] + pW[0+ps*2]*pV[2+ps*kk] + pW[0+ps*3]*pV[3+ps*kk];
-		}
-	return;
-	}
diff --git a/third_party/blasfeo/kernel/c99/kernel_dgetrf_pivot_4_lib4.c b/third_party/blasfeo/kernel/c99/kernel_dgetrf_pivot_4_lib4.c
deleted file mode 100644
index 787322e..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_dgetrf_pivot_4_lib4.c
+++ /dev/null
@@ -1,779 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <math.h>
-#include <stdio.h>
-
-#include "../../include/blasfeo_common.h"
-#include "../../include/blasfeo_d_aux.h"
-
-
-
-// C numbering, starting from 0
-void didamax_lib4(int n, int offset, double *pA, int sda, int *p_idamax, double *p_amax)
-	{
-
-	int idamax, ii;
-	double tmp, amax;
-		
-	p_idamax[0] = -1;
-	if(n<1)
-		return;
-
-	const int bs = 4;
-
-	int na = (bs - offset%bs)%bs;
-	na = n<na ? n : na;
-
-	amax = -1.0;
-	ii = 0;
-	if(na>0)
-		{
-		for( ; ii<na; ii++)
-			{
-			tmp = fabs(pA[0]);
-			if(tmp>amax)
-				{
-				idamax = ii+0;
-				amax = tmp;
-				}
-			pA += 1;
-			}
-		pA += bs*(sda-1);
-		}
-	for( ; ii<n-3; ii+=4)
-		{
-		tmp = fabs(pA[0]);
-		if(tmp>amax)
-			{
-			idamax = ii+0;
-			amax = tmp;
-			}
-		tmp = fabs(pA[1]);
-		if(tmp>amax)
-			{
-			idamax = ii+1;
-			amax = tmp;
-			}
-		tmp = fabs(pA[2]);
-		if(tmp>amax)
-			{
-			idamax = ii+2;
-			amax = tmp;
-			}
-		tmp = fabs(pA[3]);
-		if(tmp>amax)
-			{
-			idamax = ii+3;
-			amax = tmp;
-			}
-		pA += bs*sda;
-		}
-	for( ; ii<n; ii++)
-		{
-		tmp = fabs(pA[0]);
-		if(tmp>amax)
-			{
-			idamax = ii+0;
-			amax = tmp;
-			}
-		pA += 1;
-		}
-	
-	p_amax[0] = amax;
-	p_idamax[0] = idamax;
-
-	return;
-
-	}
-
-
-
-// C numering (starting from zero) in the ipiv
-// it process m>=4 rows and 4 cols
-void kernel_dgetrf_pivot_4_lib4(int m, double *pA, int sda, double *inv_diag_A, int* ipiv)
-	{
-
-	const int bs = 4;
-
-	// assume m>=4
-	int ma = m-4;
-
-	double
-		tmp0, tmp1, tmp2, tmp3,
-		u_00, u_01, u_02, u_03,
-		      u_11, u_12, u_13,
-		            u_22, u_23,
-		                  u_33;
-	
-	double
-		*pB;
-	
-	int 
-		k, idamax;
-	
-	// first column
-	didamax_lib4(m-0, 0, &pA[0+bs*0], sda, &idamax, &tmp0);
-	ipiv[0] = idamax;
-	if(tmp0!=0.0)
-		{
-		if(ipiv[0]!=0)
-			drowsw_lib(4, pA+0, pA+ipiv[0]/bs*bs*sda+ipiv[0]%bs);
-
-		tmp0 = 1.0 / pA[0+bs*0];
-		inv_diag_A[0] = tmp0;
-		pA[1+bs*0] *= tmp0;
-		pA[2+bs*0] *= tmp0;
-		pA[3+bs*0] *= tmp0;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			pB[0+bs*0] *= tmp0;
-			pB[1+bs*0] *= tmp0;
-			pB[2+bs*0] *= tmp0;
-			pB[3+bs*0] *= tmp0;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			pB[0+bs*0] *= tmp0;
-			pB += 1;
-			}
-		}
-	else
-		{
-		inv_diag_A[0] = 0.0;
-		}
-
-	// second column
-	u_01  = pA[0+bs*1];
-	tmp1  = pA[1+bs*1];
-	tmp2  = pA[2+bs*1];
-	tmp3  = pA[3+bs*1];
-	tmp1 -= pA[1+bs*0] * u_01;
-	tmp2 -= pA[2+bs*0] * u_01;
-	tmp3 -= pA[3+bs*0] * u_01;
-	pA[1+bs*1] = tmp1;
-	pA[2+bs*1] = tmp2;
-	pA[3+bs*1] = tmp3;
-	pB = pA + bs*sda;
-	for(k=0; k<ma-3; k+=4)
-		{
-		tmp0  = pB[0+bs*1];
-		tmp1  = pB[1+bs*1];
-		tmp2  = pB[2+bs*1];
-		tmp3  = pB[3+bs*1];
-		tmp0 -= pB[0+bs*0] * u_01;
-		tmp1 -= pB[1+bs*0] * u_01;
-		tmp2 -= pB[2+bs*0] * u_01;
-		tmp3 -= pB[3+bs*0] * u_01;
-		pB[0+bs*1] = tmp0;
-		pB[1+bs*1] = tmp1;
-		pB[2+bs*1] = tmp2;
-		pB[3+bs*1] = tmp3;
-		pB += bs*sda;
-		}
-	for( ; k<ma; k++)
-		{
-		tmp0 = pB[0+bs*1];
-		tmp0 -= pB[0+bs*0] * u_01;
-		pB[0+bs*1] = tmp0;
-		pB += 1;
-		}
-
-	didamax_lib4(m-1, 1, &pA[1+bs*1], sda, &idamax, &tmp1);
-	ipiv[1] = idamax+1;
-	if(tmp1!=0)
-		{
-		if(ipiv[1]!=1)
-			drowsw_lib(4, pA+1, pA+ipiv[1]/bs*bs*sda+ipiv[1]%bs);
-
-		tmp1 = 1.0 / pA[1+bs*1];
-		inv_diag_A[1] = tmp1;
-		pA[2+bs*1] *= tmp1;
-		pA[3+bs*1] *= tmp1;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			pB[0+bs*1] *= tmp1;
-			pB[1+bs*1] *= tmp1;
-			pB[2+bs*1] *= tmp1;
-			pB[3+bs*1] *= tmp1;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			pB[0+bs*1] *= tmp1;
-			pB += 1;
-			}
-		}
-	else
-		{
-		inv_diag_A[1] = 0.0;
-		}
-
-	// third column
-	u_02  = pA[0+bs*2];
-	u_12  = pA[1+bs*2];
-	u_12 -= pA[1+bs*0] * u_02;
-	pA[1+bs*2] = u_12;
-	tmp2  = pA[2+bs*2];
-	tmp3  = pA[3+bs*2];
-	tmp2 -= pA[2+bs*0] * u_02;
-	tmp3 -= pA[3+bs*0] * u_02;
-	tmp2 -= pA[2+bs*1] * u_12;
-	tmp3 -= pA[3+bs*1] * u_12;
-	pA[2+bs*2] = tmp2;
-	pA[3+bs*2] = tmp3;
-	pB = pA + bs*sda;
-	for(k=0; k<ma-3; k+=4)
-		{
-		tmp0  = pB[0+bs*2];
-		tmp1  = pB[1+bs*2];
-		tmp2  = pB[2+bs*2];
-		tmp3  = pB[3+bs*2];
-		tmp0 -= pB[0+bs*0] * u_02;
-		tmp1 -= pB[1+bs*0] * u_02;
-		tmp2 -= pB[2+bs*0] * u_02;
-		tmp3 -= pB[3+bs*0] * u_02;
-		tmp0 -= pB[0+bs*1] * u_12;
-		tmp1 -= pB[1+bs*1] * u_12;
-		tmp2 -= pB[2+bs*1] * u_12;
-		tmp3 -= pB[3+bs*1] * u_12;
-		pB[0+bs*2] = tmp0;
-		pB[1+bs*2] = tmp1;
-		pB[2+bs*2] = tmp2;
-		pB[3+bs*2] = tmp3;
-		pB += bs*sda;
-		}
-	for( ; k<ma; k++)
-		{
-		tmp0  = pB[0+bs*2];
-		tmp0 -= pB[0+bs*0] * u_02;
-		tmp0 -= pB[0+bs*1] * u_12;
-		pB[0+bs*2] = tmp0;
-		pB += 1;
-		}
-
-	didamax_lib4(m-2, 2, &pA[2+bs*2], sda, &idamax, &tmp2);
-	ipiv[2] = idamax+2;
-	if(tmp2!=0)
-		{
-		if(ipiv[2]!=2)
-			drowsw_lib(4, pA+2, pA+ipiv[2]/bs*bs*sda+ipiv[2]%bs);
-
-		tmp2 = 1.0 / pA[2+bs*2];
-		inv_diag_A[2] = tmp2;
-		pA[3+bs*2] *= tmp2;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			pB[0+bs*2] *= tmp2;
-			pB[1+bs*2] *= tmp2;
-			pB[2+bs*2] *= tmp2;
-			pB[3+bs*2] *= tmp2;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			pB[0+bs*2] *= tmp2;
-			pB += 1;
-			}
-		}
-	else
-		{
-		inv_diag_A[2] = 0.0;
-		}
-
-	// fourth column
-	u_03  = pA[0+bs*3];
-	u_13  = pA[1+bs*3];
-	u_13 -= pA[1+bs*0] * u_03;
-	pA[1+bs*3] = u_13;
-	u_23  = pA[2+bs*3];
-	u_23 -= pA[2+bs*0] * u_03;
-	u_23 -= pA[2+bs*1] * u_13;
-	pA[2+bs*3] = u_23;
-	tmp3  = pA[3+bs*3];
-	tmp3 -= pA[3+bs*0] * u_03;
-	tmp3 -= pA[3+bs*1] * u_13;
-	tmp3 -= pA[3+bs*2] * u_23;
-	pA[3+bs*3] = tmp3;
-	pB = pA + bs*sda;
-	for(k=0; k<ma-3; k+=4)
-		{
-		tmp0  = pB[0+bs*3];
-		tmp1  = pB[1+bs*3];
-		tmp2  = pB[2+bs*3];
-		tmp3  = pB[3+bs*3];
-		tmp0 -= pB[0+bs*0] * u_03;
-		tmp1 -= pB[1+bs*0] * u_03;
-		tmp2 -= pB[2+bs*0] * u_03;
-		tmp3 -= pB[3+bs*0] * u_03;
-		tmp0 -= pB[0+bs*1] * u_13;
-		tmp1 -= pB[1+bs*1] * u_13;
-		tmp2 -= pB[2+bs*1] * u_13;
-		tmp3 -= pB[3+bs*1] * u_13;
-		tmp0 -= pB[0+bs*2] * u_23;
-		tmp1 -= pB[1+bs*2] * u_23;
-		tmp2 -= pB[2+bs*2] * u_23;
-		tmp3 -= pB[3+bs*2] * u_23;
-		pB[0+bs*3] = tmp0;
-		pB[1+bs*3] = tmp1;
-		pB[2+bs*3] = tmp2;
-		pB[3+bs*3] = tmp3;
-		pB += bs*sda;
-		}
-	for( ; k<ma; k++)
-		{
-		tmp0  = pB[0+bs*3];
-		tmp0 -= pB[0+bs*0] * u_03;
-		tmp0 -= pB[0+bs*1] * u_13;
-		tmp0 -= pB[0+bs*2] * u_23;
-		pB[0+bs*3] = tmp0;
-		pB += 1;
-		}
-
-	didamax_lib4(m-3, 3, &pA[3+bs*3], sda, &idamax, &tmp3);
-	ipiv[3] = idamax+3;
-	if(tmp3!=0)
-		{
-		if(ipiv[3]!=3)
-			drowsw_lib(4, pA+3, pA+ipiv[3]/bs*bs*sda+ipiv[3]%bs);
-
-		tmp3 = 1.0 / pA[3+bs*3];
-		inv_diag_A[3] = tmp3;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			pB[0+bs*3] *= tmp3;
-			pB[1+bs*3] *= tmp3;
-			pB[2+bs*3] *= tmp3;
-			pB[3+bs*3] *= tmp3;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			pB[0+bs*3] *= tmp3;
-			pB += 1;
-			}
-		}
-	else
-		{
-		inv_diag_A[3] = 0.0;
-		}
-	
-	return;
-
-	}
-
-
-
-// it process m>0 rows and 0<n<=4 cols
-void kernel_dgetrf_pivot_4_vs_lib4(int m, int n, double *pA, int sda, double *inv_diag_A, int* ipiv)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	// assume m>=4
-	int ma = m-4;
-
-	double
-		tmp0, tmp1, tmp2, tmp3,
-		u_00, u_01, u_02, u_03,
-		      u_11, u_12, u_13,
-		            u_22, u_23,
-		                  u_33;
-	
-	double
-		*pB;
-	
-	int 
-		k, idamax;
-	
-	// first column
-
-	// find pivot & scale
-	didamax_lib4(m-0, 0, &pA[0+bs*0], sda, &idamax, &tmp0);
-	ipiv[0] = idamax;
-	if(tmp0!=0.0)
-		{
-		if(ipiv[0]!=0)
-			drowsw_lib(4, pA+0, pA+ipiv[0]/bs*bs*sda+ipiv[0]%bs);
-
-		tmp0 = 1.0 / pA[0+bs*0];
-		inv_diag_A[0] = tmp0;
-		if(m>=4)
-			{
-			pA[1+bs*0] *= tmp0;
-			pA[2+bs*0] *= tmp0;
-			pA[3+bs*0] *= tmp0;
-			pB = pA + bs*sda;
-			for(k=0; k<ma-3; k+=4)
-				{
-				pB[0+bs*0] *= tmp0;
-				pB[1+bs*0] *= tmp0;
-				pB[2+bs*0] *= tmp0;
-				pB[3+bs*0] *= tmp0;
-				pB += bs*sda;
-				}
-			for( ; k<ma; k++)
-				{
-				pB[0+bs*0] *= tmp0;
-				pB += 1;
-				}
-			}
-		else // m = {1,2,3}
-			{
-			if(m>1)
-				{
-				pA[1+bs*0] *= tmp0;
-				if(m>2)
-					pA[2+bs*0] *= tmp0;
-				}
-			}
-		}
-	else
-		{
-		inv_diag_A[0] = 0.0;
-		}
-	
-	if(n==1 || m==1) // XXX for the first row there is nothing to do, so we can return here
-		return;
-
-	// second column
-
-	// correct
-	if(m>=4)
-		{
-		u_01  = pA[0+bs*1];
-		tmp1  = pA[1+bs*1];
-		tmp2  = pA[2+bs*1];
-		tmp3  = pA[3+bs*1];
-		tmp1 -= pA[1+bs*0] * u_01;
-		tmp2 -= pA[2+bs*0] * u_01;
-		tmp3 -= pA[3+bs*0] * u_01;
-		pA[1+bs*1] = tmp1;
-		pA[2+bs*1] = tmp2;
-		pA[3+bs*1] = tmp3;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			tmp0  = pB[0+bs*1];
-			tmp1  = pB[1+bs*1];
-			tmp2  = pB[2+bs*1];
-			tmp3  = pB[3+bs*1];
-			tmp0 -= pB[0+bs*0] * u_01;
-			tmp1 -= pB[1+bs*0] * u_01;
-			tmp2 -= pB[2+bs*0] * u_01;
-			tmp3 -= pB[3+bs*0] * u_01;
-			pB[0+bs*1] = tmp0;
-			pB[1+bs*1] = tmp1;
-			pB[2+bs*1] = tmp2;
-			pB[3+bs*1] = tmp3;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			tmp0 = pB[0+bs*1];
-			tmp0 -= pB[0+bs*0] * u_01;
-			pB[0+bs*1] = tmp0;
-			pB += 1;
-			}
-		}
-	else // m = {2,3}
-		{
-		u_01  = pA[0+bs*1];
-		tmp1  = pA[1+bs*1];
-		tmp1 -= pA[1+bs*0] * u_01;
-		pA[1+bs*1] = tmp1;
-		if(m>2)
-			{
-			tmp2  = pA[2+bs*1];
-			tmp2 -= pA[2+bs*0] * u_01;
-			pA[2+bs*1] = tmp2;
-			}
-		}
-
-	// find pivot & scale
-	didamax_lib4(m-1, 1, &pA[1+bs*1], sda, &idamax, &tmp1);
-	ipiv[1] = idamax+1;
-	if(tmp1!=0)
-		{
-		if(ipiv[1]!=1)
-			drowsw_lib(4, pA+1, pA+ipiv[1]/bs*bs*sda+ipiv[1]%bs);
-
-		tmp1 = 1.0 / pA[1+bs*1];
-		inv_diag_A[1] = tmp1;
-		if(m>=4)
-			{
-			pA[2+bs*1] *= tmp1;
-			pA[3+bs*1] *= tmp1;
-			pB = pA + bs*sda;
-			for(k=0; k<ma-3; k+=4)
-				{
-				pB[0+bs*1] *= tmp1;
-				pB[1+bs*1] *= tmp1;
-				pB[2+bs*1] *= tmp1;
-				pB[3+bs*1] *= tmp1;
-				pB += bs*sda;
-				}
-			for( ; k<ma; k++)
-				{
-				pB[0+bs*1] *= tmp1;
-				pB += 1;
-				}
-			}
-		else // m = {2,3}
-			{
-			if(m>2)
-				pA[2+bs*1] *= tmp1;
-			}
-		}
-	else
-		{
-		inv_diag_A[1] = 0.0;
-		}
-
-	if(n==2)
-		return;
-
-	// third column
-
-	// correct
-	if(m>=4)
-		{
-		u_02  = pA[0+bs*2];
-		u_12  = pA[1+bs*2];
-		u_12 -= pA[1+bs*0] * u_02;
-		pA[1+bs*2] = u_12;
-		tmp2  = pA[2+bs*2];
-		tmp3  = pA[3+bs*2];
-		tmp2 -= pA[2+bs*0] * u_02;
-		tmp3 -= pA[3+bs*0] * u_02;
-		tmp2 -= pA[2+bs*1] * u_12;
-		tmp3 -= pA[3+bs*1] * u_12;
-		pA[2+bs*2] = tmp2;
-		pA[3+bs*2] = tmp3;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			tmp0  = pB[0+bs*2];
-			tmp1  = pB[1+bs*2];
-			tmp2  = pB[2+bs*2];
-			tmp3  = pB[3+bs*2];
-			tmp0 -= pB[0+bs*0] * u_02;
-			tmp1 -= pB[1+bs*0] * u_02;
-			tmp2 -= pB[2+bs*0] * u_02;
-			tmp3 -= pB[3+bs*0] * u_02;
-			tmp0 -= pB[0+bs*1] * u_12;
-			tmp1 -= pB[1+bs*1] * u_12;
-			tmp2 -= pB[2+bs*1] * u_12;
-			tmp3 -= pB[3+bs*1] * u_12;
-			pB[0+bs*2] = tmp0;
-			pB[1+bs*2] = tmp1;
-			pB[2+bs*2] = tmp2;
-			pB[3+bs*2] = tmp3;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			tmp0  = pB[0+bs*2];
-			tmp0 -= pB[0+bs*0] * u_02;
-			tmp0 -= pB[0+bs*1] * u_12;
-			pB[0+bs*2] = tmp0;
-			pB += 1;
-			}
-		}
-	else // m = {2,3}
-		{
-		u_02  = pA[0+bs*2];
-		u_12  = pA[1+bs*2];
-		u_12 -= pA[1+bs*0] * u_02;
-		pA[1+bs*2] = u_12;
-		if(m>2)
-			{
-			tmp2  = pA[2+bs*2];
-			tmp2 -= pA[2+bs*0] * u_02;
-			tmp2 -= pA[2+bs*1] * u_12;
-			pA[2+bs*2] = tmp2;
-			}
-		}
-
-	// find pivot & scale
-	if(m>2)
-		{
-		didamax_lib4(m-2, 2, &pA[2+bs*2], sda, &idamax, &tmp2);
-		ipiv[2] = idamax+2;
-		if(tmp2!=0)
-			{
-			if(ipiv[2]!=2)
-				drowsw_lib(4, pA+2, pA+ipiv[2]/bs*bs*sda+ipiv[2]%bs);
-
-			tmp2 = 1.0 / pA[2+bs*2];
-			inv_diag_A[2] = tmp2;
-			if(m>=4)
-				{
-				pA[3+bs*2] *= tmp2;
-				pB = pA + bs*sda;
-				for(k=0; k<ma-3; k+=4)
-					{
-					pB[0+bs*2] *= tmp2;
-					pB[1+bs*2] *= tmp2;
-					pB[2+bs*2] *= tmp2;
-					pB[3+bs*2] *= tmp2;
-					pB += bs*sda;
-					}
-				for( ; k<ma; k++)
-					{
-					pB[0+bs*2] *= tmp2;
-					pB += 1;
-					}
-				}
-			}
-		else
-			{
-			inv_diag_A[2] = 0.0;
-			}
-		}
-
-	if(n<4)
-		return;
-
-	// fourth column
-
-	// correct
-	if(m>=4)
-		{
-		u_03  = pA[0+bs*3];
-		u_13  = pA[1+bs*3];
-		u_13 -= pA[1+bs*0] * u_03;
-		pA[1+bs*3] = u_13;
-		u_23  = pA[2+bs*3];
-		u_23 -= pA[2+bs*0] * u_03;
-		u_23 -= pA[2+bs*1] * u_13;
-		pA[2+bs*3] = u_23;
-		tmp3  = pA[3+bs*3];
-		tmp3 -= pA[3+bs*0] * u_03;
-		tmp3 -= pA[3+bs*1] * u_13;
-		tmp3 -= pA[3+bs*2] * u_23;
-		pA[3+bs*3] = tmp3;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			tmp0  = pB[0+bs*3];
-			tmp1  = pB[1+bs*3];
-			tmp2  = pB[2+bs*3];
-			tmp3  = pB[3+bs*3];
-			tmp0 -= pB[0+bs*0] * u_03;
-			tmp1 -= pB[1+bs*0] * u_03;
-			tmp2 -= pB[2+bs*0] * u_03;
-			tmp3 -= pB[3+bs*0] * u_03;
-			tmp0 -= pB[0+bs*1] * u_13;
-			tmp1 -= pB[1+bs*1] * u_13;
-			tmp2 -= pB[2+bs*1] * u_13;
-			tmp3 -= pB[3+bs*1] * u_13;
-			tmp0 -= pB[0+bs*2] * u_23;
-			tmp1 -= pB[1+bs*2] * u_23;
-			tmp2 -= pB[2+bs*2] * u_23;
-			tmp3 -= pB[3+bs*2] * u_23;
-			pB[0+bs*3] = tmp0;
-			pB[1+bs*3] = tmp1;
-			pB[2+bs*3] = tmp2;
-			pB[3+bs*3] = tmp3;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			tmp0  = pB[0+bs*3];
-			tmp0 -= pB[0+bs*0] * u_03;
-			tmp0 -= pB[0+bs*1] * u_13;
-			tmp0 -= pB[0+bs*2] * u_23;
-			pB[0+bs*3] = tmp0;
-			pB += 1;
-			}
-		}
-	else // m = {2,3}
-		{
-		u_03  = pA[0+bs*3];
-		u_13  = pA[1+bs*3];
-		u_13 -= pA[1+bs*0] * u_03;
-		pA[1+bs*3] = u_13;
-		if(m>2)
-			{
-			u_23  = pA[2+bs*3];
-			u_23 -= pA[2+bs*0] * u_03;
-			u_23 -= pA[2+bs*1] * u_13;
-			pA[2+bs*3] = u_23;
-			}
-		}
-
-	if(m>3)
-		{
-		// find pivot & scale
-		didamax_lib4(m-3, 3, &pA[3+bs*3], sda, &idamax, &tmp3);
-		ipiv[3] = idamax+3;
-		if(tmp3!=0)
-			{
-			if(ipiv[3]!=3)
-				drowsw_lib(4, pA+3, pA+ipiv[3]/bs*bs*sda+ipiv[3]%bs);
-
-			tmp3 = 1.0 / pA[3+bs*3];
-			inv_diag_A[3] = tmp3;
-			pB = pA + bs*sda;
-			for(k=0; k<ma-3; k+=4)
-				{
-				pB[0+bs*3] *= tmp3;
-				pB[1+bs*3] *= tmp3;
-				pB[2+bs*3] *= tmp3;
-				pB[3+bs*3] *= tmp3;
-				pB += bs*sda;
-				}
-			for( ; k<ma; k++)
-				{
-				pB[0+bs*3] *= tmp3;
-				pB += 1;
-				}
-			}
-		else
-			{
-			inv_diag_A[3] = 0.0;
-			}
-		}
-	
-	return;
-
-	}
-
-
-	
-
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_dsymv_4_lib4.c b/third_party/blasfeo/kernel/c99/kernel_dsymv_4_lib4.c
deleted file mode 100644
index bed4300..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_dsymv_4_lib4.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-// XXX copy and scale y_n into z_n outside the kernel !!!!!
-#if ! ( defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL) )
-void kernel_dgemv_nt_4_vs_lib4(int kmax, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t, int km)
-	{
-
-	if(kmax<=0) 
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	double
-		a_00, a_01, a_02, a_03,
-		x_n_0, x_n_1, x_n_2, x_n_3, y_n_0,
-		x_t_0, y_t_0, y_t_1, y_t_2, y_t_3;
-	
-	x_n_0 = 0;
-	x_n_1 = 0;
-	x_n_2 = 0;
-	x_n_3 = 0;
-
-	x_n_0 = alpha_n[0]*x_n[0];
-	if(km>1)
-		{
-		x_n_1 = alpha_n[0]*x_n[1];
-		if(km>2)
-			{
-			x_n_2 = alpha_n[0]*x_n[2];
-			if(km>3)
-				{
-				x_n_3 = alpha_n[0]*x_n[3];
-				}
-			}
-		}
-
-	y_t_0 = 0;
-	y_t_1 = 0;
-	y_t_2 = 0;
-	y_t_3 = 0;
-
-	k = 0;
-	for(; k<kmax-3; k+=bs)
-		{
-
-		// 0
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-
-		// 1
-
-		y_n_0 = z_n[1]; 
-		x_t_0 = x_t[1];
-
-		a_00 = A[1+bs*0];
-		a_01 = A[1+bs*1];
-		a_02 = A[1+bs*2];
-		a_03 = A[1+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[1] = y_n_0;
-
-
-		// 2
-
-		y_n_0 = z_n[2]; 
-		x_t_0 = x_t[2];
-
-		a_00 = A[2+bs*0];
-		a_01 = A[2+bs*1];
-		a_02 = A[2+bs*2];
-		a_03 = A[2+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[2] = y_n_0;
-
-
-		// 3
-
-		y_n_0 = z_n[3]; 
-		x_t_0 = x_t[3];
-
-		a_00 = A[3+bs*0];
-		a_01 = A[3+bs*1];
-		a_02 = A[3+bs*2];
-		a_03 = A[3+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[3] = y_n_0;
-
-
-		A += sda*bs;
-		z_n += 4;
-		x_t += 4;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		// 0
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		}
-	
-	// store t
-	z_t[0] = alpha_t[0]*y_t_0 + beta_t[0]*y_t[0];
-	if(km>1)
-		{
-		z_t[1] = alpha_t[0]*y_t_1 + beta_t[0]*y_t[1];
-		if(km>2)
-			{
-			z_t[2] = alpha_t[0]*y_t_2 + beta_t[0]*y_t[2];
-			if(km>3)
-				{
-				z_t[3] = alpha_t[0]*y_t_3 + beta_t[0]*y_t[3];
-				}
-			}
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if ! ( defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL) )
-// XXX copy and scale y_n into z_n outside the kernel !!!!!
-void kernel_dgemv_nt_4_lib4(int kmax, double *alpha_n, double *alpha_t, double *A, int sda, double *x_n, double *x_t, double *beta_t, double *y_t, double *z_n, double *z_t)
-	{
-
-	kernel_dgemv_nt_4_vs_lib4(kmax, alpha_n, alpha_t, A, sda, x_n, x_t, beta_t, y_t, z_n, z_t, 4);
-
-	return;
-
-	}
-#endif
-
-
-
-// XXX copy and scale y_n into z_n outside the kernel !!!!!
-#if ! ( defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL) )
-void kernel_dsymv_l_4_gen_lib4(int kmax, double *alpha, int offA, double *A, int sda, double *x_n, double *z_n, int km)
-	{
-
-	if(kmax<=0) 
-		return;
-	
-	double *x_t = x_n;
-	double *z_t = z_n;
-
-	const int bs = 4;
-
-	int k;
-
-	double
-		a_00, a_01, a_02, a_03,
-		x_n_0, x_n_1, x_n_2, x_n_3, y_n_0,
-		x_t_0, y_t_0, y_t_1, y_t_2, y_t_3;
-	
-	x_n_0 = 0;
-	x_n_1 = 0;
-	x_n_2 = 0;
-	x_n_3 = 0;
-
-	x_n_0 = alpha[0]*x_n[0];
-	if(km>1)
-		{
-		x_n_1 = alpha[0]*x_n[1];
-		if(km>2)
-			{
-			x_n_2 = alpha[0]*x_n[2];
-			if(km>3)
-				{
-				x_n_3 = alpha[0]*x_n[3];
-				}
-			}
-		}
-
-	y_t_0 = 0;
-	y_t_1 = 0;
-	y_t_2 = 0;
-	y_t_3 = 0;
-
-	k = 0;
-	if(offA==0)
-		{
-		if(kmax<4)
-			{
-			// 0
-
-			x_t_0 = x_t[0];
-
-			a_00 = A[0+bs*0];
-			
-			y_t_0 += a_00 * x_t_0;
-
-			if(kmax==1)
-				goto store_t;
-
-			// 1
-
-			y_n_0 = z_n[1]; 
-			x_t_0 = x_t[1];
-
-			a_00 = A[1+bs*0];
-			a_01 = A[1+bs*1];
-			
-			y_n_0 += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t_0;
-			y_t_1 += a_01 * x_t_0;
-
-			z_n[1] = y_n_0;
-
-			if(kmax==2)
-				goto store_t;
-
-			// 2
-
-			y_n_0 = z_n[2]; 
-			x_t_0 = x_t[2];
-
-			a_00 = A[2+bs*0];
-			a_01 = A[2+bs*1];
-			a_02 = A[2+bs*2];
-			
-			y_n_0 += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t_0;
-			y_n_0 += a_01 * x_n_1;
-			y_t_1 += a_01 * x_t_0;
-			y_t_2 += a_02 * x_t_0;
-
-			z_n[2] = y_n_0;
-
-			goto store_t;
-			}
-		else
-			{
-
-			// 0
-
-			x_t_0 = x_t[0];
-
-			a_00 = A[0+bs*0];
-			
-			y_t_0 += a_00 * x_t_0;
-
-
-			// 1
-
-			y_n_0 = z_n[1]; 
-			x_t_0 = x_t[1];
-
-			a_00 = A[1+bs*0];
-			a_01 = A[1+bs*1];
-			
-			y_n_0 += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t_0;
-			y_t_1 += a_01 * x_t_0;
-
-			z_n[1] = y_n_0;
-
-
-			// 2
-
-			y_n_0 = z_n[2]; 
-			x_t_0 = x_t[2];
-
-			a_00 = A[2+bs*0];
-			a_01 = A[2+bs*1];
-			a_02 = A[2+bs*2];
-			
-			y_n_0 += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t_0;
-			y_n_0 += a_01 * x_n_1;
-			y_t_1 += a_01 * x_t_0;
-			y_t_2 += a_02 * x_t_0;
-
-			z_n[2] = y_n_0;
-
-
-			// 3
-
-			y_n_0 = z_n[3]; 
-			x_t_0 = x_t[3];
-
-			a_00 = A[3+bs*0];
-			a_01 = A[3+bs*1];
-			a_02 = A[3+bs*2];
-			a_03 = A[3+bs*3];
-			
-			y_n_0 += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t_0;
-			y_n_0 += a_01 * x_n_1;
-			y_t_1 += a_01 * x_t_0;
-			y_n_0 += a_02 * x_n_2;
-			y_t_2 += a_02 * x_t_0;
-			y_t_3 += a_03 * x_t_0;
-
-			z_n[3] = y_n_0;
-
-			k += 4;
-			A += sda*bs;
-			z_n += 4;
-			x_t += 4;
-
-			}
-		}
-	else if(offA==1)
-		{
-
-		// 0
-
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		
-		y_t_0 += a_00 * x_t_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==1)
-			goto store_t;
-
-		// 1
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_t_1 += a_01 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==2)
-			goto store_t;
-
-		// 2
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_t_2 += a_02 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==3)
-			goto store_t;
-
-		// 3
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==4)
-			goto store_t;
-
-		// 4
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==5)
-			goto store_t;
-
-		// 5
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==6)
-			goto store_t;
-
-		// 6
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==7)
-			goto store_t;
-
-		k += 7;
-
-		}
-	else if(offA==2)
-		{
-
-		// 0
-
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		
-		y_t_0 += a_00 * x_t_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==1)
-			goto store_t;
-
-		// 1
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_t_1 += a_01 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==2)
-			goto store_t;
-
-		// 2
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_t_2 += a_02 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==3)
-			goto store_t;
-
-		// 3
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==4)
-			goto store_t;
-
-		// 4
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==5)
-			goto store_t;
-
-		// 5
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==6)
-			goto store_t;
-
-		k += 6;
-
-		}
-	else // if(offA==3)
-		{
-
-		// 0
-
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		
-		y_t_0 += a_00 * x_t_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==1)
-			goto store_t;
-
-		// 1
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_t_1 += a_01 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==2)
-			goto store_t;
-
-		// 2
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_t_2 += a_02 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==3)
-			goto store_t;
-
-		// 3
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==4)
-			goto store_t;
-
-		// 4
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==5)
-			goto store_t;
-
-		k += 5;
-
-		}
-	for(; k<kmax-3; k+=bs)
-		{
-
-		// 0
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-
-		// 1
-
-		y_n_0 = z_n[1]; 
-		x_t_0 = x_t[1];
-
-		a_00 = A[1+bs*0];
-		a_01 = A[1+bs*1];
-		a_02 = A[1+bs*2];
-		a_03 = A[1+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[1] = y_n_0;
-
-
-		// 2
-
-		y_n_0 = z_n[2]; 
-		x_t_0 = x_t[2];
-
-		a_00 = A[2+bs*0];
-		a_01 = A[2+bs*1];
-		a_02 = A[2+bs*2];
-		a_03 = A[2+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[2] = y_n_0;
-
-
-		// 3
-
-		y_n_0 = z_n[3]; 
-		x_t_0 = x_t[3];
-
-		a_00 = A[3+bs*0];
-		a_01 = A[3+bs*1];
-		a_02 = A[3+bs*2];
-		a_03 = A[3+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[3] = y_n_0;
-
-
-		A += sda*bs;
-		z_n += 4;
-		x_t += 4;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		// 0
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		}
-	
-	store_t:
-	z_t[0] += alpha[0]*y_t_0;
-	if(km>1)
-		{
-		z_t[1] += alpha[0]*y_t_1;
-		if(km>2)
-			{
-			z_t[2] += alpha[0]*y_t_2;
-			if(km>3)
-				{
-				z_t[3] += alpha[0]*y_t_3;
-				}
-			}
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if ! ( defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_HASWELL) )
-// XXX copy and scale y_n into z_n outside the kernel !!!!!
-void kernel_dsymv_l_4_lib4(int kmax, double *alpha, double *A, int sda, double *x_n, double *z_n)
-	{
-
-	kernel_dsymv_l_4_gen_lib4(kmax, alpha, 0, A, sda, x_n, z_n, 4);
-
-	return;
-
-	}
-#endif
-
-
-
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_sgecp_lib4.c b/third_party/blasfeo/kernel/c99/kernel_sgecp_lib4.c
deleted file mode 100644
index de5b704..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_sgecp_lib4.c
+++ /dev/null
@@ -1,1148 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-
-// both A and B are aligned to 256-bit boundaries
-void kernel_sgesc_4_lib4(int kmax, float *alphap, float *A)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		A[0+bs*0] *= alpha;
-		A[1+bs*0] *= alpha;
-		A[2+bs*0] *= alpha;
-		A[3+bs*0] *= alpha;
-
-		A += 4;
-
-		}
-	
-	}
-
-
-
-void kernel_sgesc_3_lib4(int kmax, float *alphap, float *A)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		A[0+bs*0] *= alpha;
-		A[1+bs*0] *= alpha;
-		A[2+bs*0] *= alpha;
-
-		A += 4;
-
-		}
-	
-	}
-
-
-
-void kernel_sgesc_2_lib4(int kmax, float *alphap, float *A)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		A[0+bs*0] *= alpha;
-		A[1+bs*0] *= alpha;
-
-		A += 4;
-
-		}
-	
-	}
-
-
-
-void kernel_sgesc_1_lib4(int kmax, float *alphap, float *A)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		A[0+bs*0] *= alpha;
-
-		A += 4;
-
-		}
-	
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries
-void kernel_sgecp_4_0_lib4(int kmax, float *A, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A[0+bs*0];
-		B[1+bs*0] = A[1+bs*0];
-		B[2+bs*0] = A[2+bs*0];
-		B[3+bs*0] = A[3+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
-void kernel_sgecp_4_1_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[1+bs*0];
-		B[1+bs*0] = A0[2+bs*0];
-		B[2+bs*0] = A0[3+bs*0];
-		B[3+bs*0] = A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_sgecp_4_2_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[2+bs*0];
-		B[1+bs*0] = A0[3+bs*0];
-		B[2+bs*0] = A1[0+bs*0];
-		B[3+bs*0] = A1[1+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_sgecp_4_3_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[3+bs*0];
-		B[1+bs*0] = A1[0+bs*0];
-		B[2+bs*0] = A1[1+bs*0];
-		B[3+bs*0] = A1[2+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_sgecp_3_0_lib4(int kmax, float *A, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A[0+bs*0];
-		B[1+bs*0] = A[1+bs*0];
-		B[2+bs*0] = A[2+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_sgecp_3_2_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[2+bs*0];
-		B[1+bs*0] = A0[3+bs*0];
-		B[2+bs*0] = A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_sgecp_3_3_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[3+bs*0];
-		B[1+bs*0] = A1[0+bs*0];
-		B[2+bs*0] = A1[1+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_sgecp_2_0_lib4(int kmax, float *A, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A[0+bs*0];
-		B[1+bs*0] = A[1+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	}
-
-
-
-// both A and B are aligned to 128-bit boundaries, 3 elements of A must be skipped
-void kernel_sgecp_2_3_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[3+bs*0];
-		B[1+bs*0] = A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	}
-
-
-
-// both A and B are aligned 64-bit boundaries
-void kernel_sgecp_1_0_lib4(int kmax, float *A, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A[0+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries
-void kernel_strcp_l_4_0_lib4(int kmax, float *A, float *B)
-	{
-
-	// A and C are lower triangular
-	// kmax+1 4-wide + end 3x3 triangle
-
-	kmax += 1;
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A[0+bs*0];
-		B[1+bs*0] = A[1+bs*0];
-		B[2+bs*0] = A[2+bs*0];
-		B[3+bs*0] = A[3+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	// 3x3 triangle
-
-	B[1+bs*0] = A[1+bs*0];
-	B[2+bs*0] = A[2+bs*0];
-	B[3+bs*0] = A[3+bs*0];
-
-	B[2+bs*1] = A[2+bs*1];
-	B[3+bs*1] = A[3+bs*1];
-
-	B[3+bs*2] = A[3+bs*2];
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
-void kernel_strcp_l_4_1_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	// A and C are lower triangular
-	// kmax+1 4-wide + end 3x3 triangle
-
-	kmax += 1;
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[1+bs*0];
-		B[1+bs*0] = A0[2+bs*0];
-		B[2+bs*0] = A0[3+bs*0];
-		B[3+bs*0] = A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	// 3x3 triangle
-
-	B[1+0*bs] = A0[2+0*bs];
-	B[2+0*bs] = A0[3+0*bs];
-	B[3+0*bs] = A1[0+0*bs];
-
-	B[2+1*bs] = A0[3+1*bs];
-	B[3+1*bs] = A1[0+1*bs];
-
-	B[3+2*bs] = A1[0+2*bs];
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_strcp_l_4_2_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	// A and C are lower triangular
-	// kmax+1 4-wide + end 3x3 triangle
-
-	kmax += 1;
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[2+bs*0];
-		B[1+bs*0] = A0[3+bs*0];
-		B[2+bs*0] = A1[0+bs*0];
-		B[3+bs*0] = A1[1+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	// 3x3 triangle}
-
-	B[1+bs*0] = A0[3+bs*0];
-	B[2+bs*0] = A1[0+bs*0];
-	B[3+bs*0] = A1[1+bs*0];
-
-	B[2+bs*1] = A1[0+bs*1];
-	B[3+bs*1] = A1[1+bs*1];
-
-	B[3+bs*2] = A1[1+bs*2];
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_strcp_l_4_3_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	// A and C are lower triangular
-	// kmax+1 4-wide + end 3x3 triangle
-
-	kmax += 1;
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[3+bs*0];
-		B[1+bs*0] = A1[0+bs*0];
-		B[2+bs*0] = A1[1+bs*0];
-		B[3+bs*0] = A1[2+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	// 3x3 triangle
-
-	B[1+bs*0] = A1[0+bs*0];
-	B[2+bs*0] = A1[1+bs*0];
-	B[3+bs*0] = A1[2+bs*0];
-
-	B[2+bs*1] = A1[1+bs*1];
-	B[3+bs*1] = A1[2+bs*1];
-
-	B[3+bs*2] = A1[2+bs*2];
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_strcp_l_3_0_lib4(int kmax, float *A, float *B)
-	{
-
-	// A and C are lower triangular
-	// kmax+1 3-wide + end 2x2 triangle
-
-	kmax += 1;
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A[0+bs*0];
-		B[1+bs*0] = A[1+bs*0];
-		B[2+bs*0] = A[2+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	// 2x2 triangle
-
-	B[1+bs*0] = A[1+bs*0];
-	B[2+bs*0] = A[2+bs*0];
-
-	B[2+bs*1] = A[2+bs*1];
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_strcp_l_3_2_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	// A and C are lower triangular
-	// kmax+1 3-wide + end 2x2 triangle
-
-	kmax += 1;
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[2+bs*0];
-		B[1+bs*0] = A0[3+bs*0];
-		B[2+bs*0] = A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	// 2x2 triangle
-
-	B[1+bs*0] = A0[3+bs*0];
-	B[2+bs*0] = A1[0+bs*0];
-
-	B[2+bs*1] = A1[0+bs*1];
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_strcp_l_3_3_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	// A and C are lower triangular
-	// kmax+1 3-wide + end 2x2 triangle
-
-	kmax += 1;
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[3+bs*0];
-		B[1+bs*0] = A1[0+bs*0];
-		B[2+bs*0] = A1[1+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	// 2x2 triangle
-
-	B[1+bs*0] = A1[0+bs*0];
-	B[2+bs*0] = A1[1+bs*0];
-
-	B[2+bs*1] = A1[1+bs*1];
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_strcp_l_2_0_lib4(int kmax, float alpha, float *A, float *B)
-	{
-
-	// A and C are lower triangular
-	// kmax+1 2-wide + end 1x1 triangle
-
-	kmax += 1;
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A[0+bs*0];
-		B[1+bs*0] = A[1+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	// 1x1 triangle
-
-	B[1+bs*0] = A[1+bs*0];
-
-	}
-
-
-
-// both A and B are aligned to 128-bit boundaries, 3 elements of A must be skipped
-void kernel_strcp_l_2_3_lib4(int kmax, float *A0, int sda, float *B)
-	{
-
-	// A and C are lower triangular
-	// kmax+1 2-wide + end 1x1 triangle
-
-	kmax += 1;
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A0[3+bs*0];
-		B[1+bs*0] = A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-	
-	// 1x1 triangle
-
-	B[1+bs*0] = A1[0+bs*0];
-
-	}
-
-
-
-// both A and B are aligned 64-bit boundaries
-void kernel_strcp_l_1_0_lib4(int kmax, float *A, float *B)
-	{
-
-	// A and C are lower triangular
-	// kmax+1 1-wide
-
-	kmax += 1;
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] = A[0+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-
-// both A and B are aligned to 256-bit boundaries
-void kernel_sgead_4_0_lib4(int kmax, float *alphap, float *A, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A[0+bs*0];
-		B[1+bs*0] += alpha * A[1+bs*0];
-		B[2+bs*0] += alpha * A[2+bs*0];
-		B[3+bs*0] += alpha * A[3+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
-void kernel_sgead_4_1_lib4(int kmax, float *alphap, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[1+bs*0];
-		B[1+bs*0] += alpha * A0[2+bs*0];
-		B[2+bs*0] += alpha * A0[3+bs*0];
-		B[3+bs*0] += alpha * A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_sgead_4_2_lib4(int kmax, float *alphap, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[2+bs*0];
-		B[1+bs*0] += alpha * A0[3+bs*0];
-		B[2+bs*0] += alpha * A1[0+bs*0];
-		B[3+bs*0] += alpha * A1[1+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_sgead_4_3_lib4(int kmax, float *alphap, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[3+bs*0];
-		B[1+bs*0] += alpha * A1[0+bs*0];
-		B[2+bs*0] += alpha * A1[1+bs*0];
-		B[3+bs*0] += alpha * A1[2+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_sgead_3_0_lib4(int kmax, float *alphap, float *A, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A[0+bs*0];
-		B[1+bs*0] += alpha * A[1+bs*0];
-		B[2+bs*0] += alpha * A[2+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
-void kernel_sgead_3_2_lib4(int kmax, float *alphap, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[2+bs*0];
-		B[1+bs*0] += alpha * A0[3+bs*0];
-		B[2+bs*0] += alpha * A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
-void kernel_sgead_3_3_lib4(int kmax, float *alphap, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[3+bs*0];
-		B[1+bs*0] += alpha * A1[0+bs*0];
-		B[2+bs*0] += alpha * A1[1+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 64-bit boundaries
-void kernel_sgead_2_0_lib4(int kmax, float *alphap, float *A, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A[0+bs*0];
-		B[1+bs*0] += alpha * A[1+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned to 128-bit boundaries, 3 elements of A must be skipped
-void kernel_sgead_2_3_lib4(int kmax, float *alphap, float *A0, int sda, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	float *A1 = A0 + bs*sda;
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A0[3+bs*0];
-		B[1+bs*0] += alpha * A1[0+bs*0];
-
-		A0 += 4;
-		A1 += 4;
-		B  += 4;
-
-		}
-
-	}
-
-
-
-// both A and B are aligned 64-bit boundaries
-void kernel_sgead_1_0_lib4(int kmax, float *alphap, float *A, float *B)
-	{
-
-	if(kmax<=0)
-		return;
-
-	const int bs = 4;
-
-	float alpha = alphap[0];
-
-	int k;
-
-	for(k=0; k<kmax; k++)
-		{
-
-		B[0+bs*0] += alpha * A[0+bs*0];
-
-		A += 4;
-		B += 4;
-
-		}
-
-	}
-
-
-
-
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_sgemm_4x4_lib4.c b/third_party/blasfeo/kernel/c99/kernel_sgemm_4x4_lib4.c
deleted file mode 100644
index 243d559..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_sgemm_4x4_lib4.c
+++ /dev/null
@@ -1,6094 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <math.h>
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_nt_4x4_gen_lib4(int kmax, float *alpha, float *A, float *B, float *beta, int offsetC, float *C0, int sdc, int offsetD, float *D0, int sdd, int m0, int m1, int n0, int n1)
-	{
-
-	const int bs = 4;
-
-	float
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	float
-		*C1, *D1;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	if(offsetC==0)
-		{
-		c_00 = beta[0]*C0[0+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[1+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C0[2+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C0[3+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[0+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C0[1+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C0[2+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C0[3+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[0+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C0[1+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C0[2+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C0[3+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[0+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C0[1+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C0[2+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C0[3+bs*3] + alpha[0]*c_33;
-		}
-	else if(offsetC==1)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[1+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[2+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C0[3+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[0+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[1+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C0[2+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C0[3+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[0+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[1+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C0[2+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C0[3+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[0+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[1+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C0[2+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C0[3+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C1[0+bs*3] + alpha[0]*c_33;
-		}
-	else if(offsetC==2)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[2+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C0[3+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C1[0+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[1+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[2+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C0[3+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C1[0+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[1+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[2+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C0[3+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C1[0+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[1+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[2+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C0[3+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C1[0+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C1[1+bs*3] + alpha[0]*c_33;
-		}
-	else //if(offsetC==3)
-		{
-		C1 = C0 + sdc*bs;
-
-		c_00 = beta[0]*C0[3+bs*0] + alpha[0]*c_00;
-		c_10 = beta[0]*C1[0+bs*0] + alpha[0]*c_10;
-		c_20 = beta[0]*C1[1+bs*0] + alpha[0]*c_20;
-		c_30 = beta[0]*C1[2+bs*0] + alpha[0]*c_30;
-
-		c_01 = beta[0]*C0[3+bs*1] + alpha[0]*c_01;
-		c_11 = beta[0]*C1[0+bs*1] + alpha[0]*c_11;
-		c_21 = beta[0]*C1[1+bs*1] + alpha[0]*c_21;
-		c_31 = beta[0]*C1[2+bs*1] + alpha[0]*c_31;
-
-		c_02 = beta[0]*C0[3+bs*2] + alpha[0]*c_02;
-		c_12 = beta[0]*C1[0+bs*2] + alpha[0]*c_12;
-		c_22 = beta[0]*C1[1+bs*2] + alpha[0]*c_22;
-		c_32 = beta[0]*C1[2+bs*2] + alpha[0]*c_32;
-
-		c_03 = beta[0]*C0[3+bs*3] + alpha[0]*c_03;
-		c_13 = beta[0]*C1[0+bs*3] + alpha[0]*c_13;
-		c_23 = beta[0]*C1[1+bs*3] + alpha[0]*c_23;
-		c_33 = beta[0]*C1[2+bs*3] + alpha[0]*c_33;
-		}
-	
-	// shift sol for cols
-	if(n0>0)
-		{
-		if(n0==1)
-			{
-			c_00 = c_01;
-			c_10 = c_11;
-			c_20 = c_21;
-			c_30 = c_31;
-
-			c_01 = c_02;
-			c_11 = c_12;
-			c_21 = c_22;
-			c_31 = c_32;
-
-			c_02 = c_03;
-			c_12 = c_13;
-			c_22 = c_23;
-			c_32 = c_33;
-
-			D0 += 1*bs;
-			}
-		else if(n0==2)
-			{
-			c_00 = c_02;
-			c_10 = c_12;
-			c_20 = c_22;
-			c_30 = c_32;
-
-			c_01 = c_03;
-			c_11 = c_13;
-			c_21 = c_23;
-			c_31 = c_33;
-
-			D0 += 2*bs;
-			}
-		else //if(n0==3)
-			{
-			c_00 = c_03;
-			c_10 = c_13;
-			c_20 = c_23;
-			c_30 = c_33;
-
-			D0 += 3*bs;
-			}
-		}
-
-	int kn = n1 - n0;
-
-	if(offsetD==0)
-		{
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[1+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[2+bs*0] = c_20;
-		if(m0<=3 & m1>3) D0[3+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[1+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[2+bs*1] = c_21;
-		if(m0<=3 & m1>3) D0[3+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[1+bs*2] = c_12;
-		if(m0<=2 & m1>2) D0[2+bs*2] = c_22;
-		if(m0<=3 & m1>3) D0[3+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[1+bs*3] = c_13;
-		if(m0<=2 & m1>2) D0[2+bs*3] = c_23;
-		if(m0<=3 & m1>3) D0[3+bs*3] = c_33;
-		}
-	else if(offsetD==1)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[2+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[3+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[0+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[2+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[3+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[0+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[2+bs*2] = c_12;
-		if(m0<=2 & m1>2) D0[3+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[0+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[2+bs*3] = c_13;
-		if(m0<=2 & m1>2) D0[3+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[0+bs*3] = c_33;
-		}
-	else if(offsetD==2)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[3+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[0+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[1+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[3+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[0+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[1+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[3+bs*2] = c_12;
-		if(m0<=2 & m1>2) D1[0+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[1+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[3+bs*3] = c_13;
-		if(m0<=2 & m1>2) D1[0+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[1+bs*3] = c_33;
-		}
-	else //if(offsetD==3)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*0] = c_00;
-		if(m0<=1 & m1>1) D1[0+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[1+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[2+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*1] = c_01;
-		if(m0<=1 & m1>1) D1[0+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[1+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[2+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*2] = c_02;
-		if(m0<=1 & m1>1) D1[0+bs*2] = c_12;
-		if(m0<=2 & m1>2) D1[1+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[2+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*3] = c_03;
-		if(m0<=1 & m1>1) D1[0+bs*3] = c_13;
-		if(m0<=2 & m1>2) D1[1+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[2+bs*3] = c_33;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_nt_4x4_vs_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	float
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = beta[0]*C[0+bs*0] + alpha[0]*c_00;
-	c_10 = beta[0]*C[1+bs*0] + alpha[0]*c_10;
-	c_20 = beta[0]*C[2+bs*0] + alpha[0]*c_20;
-	c_30 = beta[0]*C[3+bs*0] + alpha[0]*c_30;
-
-	c_01 = beta[0]*C[0+bs*1] + alpha[0]*c_01;
-	c_11 = beta[0]*C[1+bs*1] + alpha[0]*c_11;
-	c_21 = beta[0]*C[2+bs*1] + alpha[0]*c_21;
-	c_31 = beta[0]*C[3+bs*1] + alpha[0]*c_31;
-
-	c_02 = beta[0]*C[0+bs*2] + alpha[0]*c_02;
-	c_12 = beta[0]*C[1+bs*2] + alpha[0]*c_12;
-	c_22 = beta[0]*C[2+bs*2] + alpha[0]*c_22;
-	c_32 = beta[0]*C[3+bs*2] + alpha[0]*c_32;
-
-	c_03 = beta[0]*C[0+bs*3] + alpha[0]*c_03;
-	c_13 = beta[0]*C[1+bs*3] + alpha[0]*c_13;
-	c_23 = beta[0]*C[2+bs*3] + alpha[0]*c_23;
-	c_33 = beta[0]*C[3+bs*3] + alpha[0]*c_33;
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER)
-void kernel_sgemm_nt_4x4_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D)
-	{
-	kernel_sgemm_nt_4x4_vs_lib4(kmax, alpha, A, B, beta, C, D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_nn_4x4_vs_lib4(int kmax, float *alpha, float *A, float *B, int sdb, float *beta, float *C, float *D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	float
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[4];
-		b_2 = B[8];
-		b_3 = B[12];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[1];
-		b_1 = B[5];
-		b_2 = B[9];
-		b_3 = B[13];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[2];
-		b_1 = B[6];
-		b_2 = B[10];
-		b_3 = B[14];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[3];
-		b_1 = B[7];
-		b_2 = B[11];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 4*sdb;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[4];
-		b_2 = B[8];
-		b_3 = B[12];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 1;
-
-		}
-	
-	c_00 = beta[0]*C[0+bs*0] + alpha[0]*c_00;
-	c_10 = beta[0]*C[1+bs*0] + alpha[0]*c_10;
-	c_20 = beta[0]*C[2+bs*0] + alpha[0]*c_20;
-	c_30 = beta[0]*C[3+bs*0] + alpha[0]*c_30;
-
-	c_01 = beta[0]*C[0+bs*1] + alpha[0]*c_01;
-	c_11 = beta[0]*C[1+bs*1] + alpha[0]*c_11;
-	c_21 = beta[0]*C[2+bs*1] + alpha[0]*c_21;
-	c_31 = beta[0]*C[3+bs*1] + alpha[0]*c_31;
-
-	c_02 = beta[0]*C[0+bs*2] + alpha[0]*c_02;
-	c_12 = beta[0]*C[1+bs*2] + alpha[0]*c_12;
-	c_22 = beta[0]*C[2+bs*2] + alpha[0]*c_22;
-	c_32 = beta[0]*C[3+bs*2] + alpha[0]*c_32;
-
-	c_03 = beta[0]*C[0+bs*3] + alpha[0]*c_03;
-	c_13 = beta[0]*C[1+bs*3] + alpha[0]*c_13;
-	c_23 = beta[0]*C[2+bs*3] + alpha[0]*c_23;
-	c_33 = beta[0]*C[3+bs*3] + alpha[0]*c_33;
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_nn_4x4_lib4(int kmax, float *alpha, float *A, float *B, int sdb, float *beta, float *C, float *D)
-	{
-	kernel_sgemm_nn_4x4_vs_lib4(kmax, alpha, A, B, sdb, beta, C, D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_ssyrk_nt_l_4x4_vs_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	float
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, //c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, //c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, //c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-//		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-//		c_02 += a_0 * b_2;
-//		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-//		c_03 += a_0 * b_3;
-//		c_13 += a_1 * b_3;
-//		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-//		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-//		c_02 += a_0 * b_2;
-//		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-//		c_03 += a_0 * b_3;
-//		c_13 += a_1 * b_3;
-//		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-//		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-//		c_02 += a_0 * b_2;
-//		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-//		c_03 += a_0 * b_3;
-//		c_13 += a_1 * b_3;
-//		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-//		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-//		c_02 += a_0 * b_2;
-//		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-//		c_03 += a_0 * b_3;
-//		c_13 += a_1 * b_3;
-//		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-//		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-//		c_02 += a_0 * b_2;
-//		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-//		c_03 += a_0 * b_3;
-//		c_13 += a_1 * b_3;
-//		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = beta[0]*C[0+bs*0] + alpha[0]*c_00;
-	c_10 = beta[0]*C[1+bs*0] + alpha[0]*c_10;
-	c_20 = beta[0]*C[2+bs*0] + alpha[0]*c_20;
-	c_30 = beta[0]*C[3+bs*0] + alpha[0]*c_30;
-
-//	c_01 = beta[0]*C[0+bs*1] + alpha[0]*c_01;
-	c_11 = beta[0]*C[1+bs*1] + alpha[0]*c_11;
-	c_21 = beta[0]*C[2+bs*1] + alpha[0]*c_21;
-	c_31 = beta[0]*C[3+bs*1] + alpha[0]*c_31;
-
-//	c_02 = beta[0]*C[0+bs*2] + alpha[0]*c_02;
-//	c_12 = beta[0]*C[1+bs*2] + alpha[0]*c_12;
-	c_22 = beta[0]*C[2+bs*2] + alpha[0]*c_22;
-	c_32 = beta[0]*C[3+bs*2] + alpha[0]*c_32;
-
-//	c_03 = beta[0]*C[0+bs*3] + alpha[0]*c_03;
-//	c_13 = beta[0]*C[1+bs*3] + alpha[0]*c_13;
-//	c_23 = beta[0]*C[2+bs*3] + alpha[0]*c_23;
-	c_33 = beta[0]*C[3+bs*3] + alpha[0]*c_33;
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-//		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-//		D[0+bs*2] = c_02;
-//		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-//		D[0+bs*3] = c_03;
-//		D[1+bs*3] = c_13;
-//		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-//		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-//		D[0+bs*2] = c_02;
-//		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-//		if(kn==3)
-//			return;
-
-//		D[0+bs*3] = c_03;
-//		D[1+bs*3] = c_13;
-//		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-//		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-//		if(kn==2)
-//			return;
-
-//		D[0+bs*2] = c_02;
-//		D[1+bs*2] = c_12;
-
-//		if(kn==3)
-//			return;
-
-//		D[0+bs*3] = c_03;
-//		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-//		if(kn==1)
-//			return;
-
-//		D[0+bs*1] = c_01;
-
-//		if(kn==2)
-//			return;
-
-//		D[0+bs*2] = c_02;
-
-//		if(kn==3)
-//			return;
-
-//		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_ssyrk_nt_l_4x4_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D)
-	{
-	kernel_ssyrk_nt_l_4x4_vs_lib4(kmax, alpha, A, B, beta, C, D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strmm_nt_ru_4x4_vs_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	float
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	k = 0;
-
-	// k = 0
-	if(kmax>0)
-		{
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		A += 4;
-		B += 4;
-		k++;
-		}
-
-	// k = 1
-	if(kmax>0)
-		{
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		A += 4;
-		B += 4;
-		k++;
-		}
-
-	// k = 2
-	if(kmax>0)
-		{
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		A += 4;
-		B += 4;
-		k++;
-		}
-
-	for(; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = beta[0]*C[0+bs*0] + alpha[0]*c_00;
-	c_10 = beta[0]*C[1+bs*0] + alpha[0]*c_10;
-	c_20 = beta[0]*C[2+bs*0] + alpha[0]*c_20;
-	c_30 = beta[0]*C[3+bs*0] + alpha[0]*c_30;
-
-	c_01 = beta[0]*C[0+bs*1] + alpha[0]*c_01;
-	c_11 = beta[0]*C[1+bs*1] + alpha[0]*c_11;
-	c_21 = beta[0]*C[2+bs*1] + alpha[0]*c_21;
-	c_31 = beta[0]*C[3+bs*1] + alpha[0]*c_31;
-
-	c_02 = beta[0]*C[0+bs*2] + alpha[0]*c_02;
-	c_12 = beta[0]*C[1+bs*2] + alpha[0]*c_12;
-	c_22 = beta[0]*C[2+bs*2] + alpha[0]*c_22;
-	c_32 = beta[0]*C[3+bs*2] + alpha[0]*c_32;
-
-	c_03 = beta[0]*C[0+bs*3] + alpha[0]*c_03;
-	c_13 = beta[0]*C[1+bs*3] + alpha[0]*c_13;
-	c_23 = beta[0]*C[2+bs*3] + alpha[0]*c_23;
-	c_33 = beta[0]*C[3+bs*3] + alpha[0]*c_33;
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strmm_nt_ru_4x4_lib4(int k, float *alpha, float *A, float *B, float *beta, float *C, float *D)
-	{
-	kernel_strmm_nt_ru_4x4_vs_lib4(k, alpha, A, B, beta, C, D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strmm_nn_rl_4x4_gen_lib4(int kmax, float *alpha, float *A, int offsetB, float *B, int sdb, int offsetD, float *D0, int sdd, int m0, int m1, int n0, int n1)
-	{
-
-	const int bs = 4;
-
-	float
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	float *D1;
-	
-	int k;
-
-	B += offsetB;
-
-	k = 0;
-
-	if(offsetB==0)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 1
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 2
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 3
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		}
-	else if(offsetB==1)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 1
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 2
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		}
-	else if(offsetB==2)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 1
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 2
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 3
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 4
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 5
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		}
-	else // if(offetB==3)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 1
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 2
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 3
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 1;
-		k += 1;
-
-		if(k>=kmax)
-			goto store;
-
-		// k = 4
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		b_1 = B[4];
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		b_2 = B[8];
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		b_3 = B[12];
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 4*sdb-3;
-		k += 1;
-
-		}
-
-	for(; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[4];
-		b_2 = B[8];
-		b_3 = B[12];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[1];
-		b_1 = B[5];
-		b_2 = B[9];
-		b_3 = B[13];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[2];
-		b_1 = B[6];
-		b_2 = B[10];
-		b_3 = B[14];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[3];
-		b_1 = B[7];
-		b_2 = B[11];
-		b_3 = B[15];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 16;
-		B += 4*sdb;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[4];
-		b_2 = B[8];
-		b_3 = B[12];
-
-		c_00 += a_0 * b_0;
-		c_10 += a_1 * b_0;
-		c_20 += a_2 * b_0;
-		c_30 += a_3 * b_0;
-
-		c_01 += a_0 * b_1;
-		c_11 += a_1 * b_1;
-		c_21 += a_2 * b_1;
-		c_31 += a_3 * b_1;
-
-		c_02 += a_0 * b_2;
-		c_12 += a_1 * b_2;
-		c_22 += a_2 * b_2;
-		c_32 += a_3 * b_2;
-
-		c_03 += a_0 * b_3;
-		c_13 += a_1 * b_3;
-		c_23 += a_2 * b_3;
-		c_33 += a_3 * b_3;
-
-		A += 4;
-		B += 1;
-
-		}
-	
-	store:
-	
-	c_00 = alpha[0]*c_00;
-	c_10 = alpha[0]*c_10;
-	c_20 = alpha[0]*c_20;
-	c_30 = alpha[0]*c_30;
-
-	c_01 = alpha[0]*c_01;
-	c_11 = alpha[0]*c_11;
-	c_21 = alpha[0]*c_21;
-	c_31 = alpha[0]*c_31;
-
-	c_02 = alpha[0]*c_02;
-	c_12 = alpha[0]*c_12;
-	c_22 = alpha[0]*c_22;
-	c_32 = alpha[0]*c_32;
-
-	c_03 = alpha[0]*c_03;
-	c_13 = alpha[0]*c_13;
-	c_23 = alpha[0]*c_23;
-	c_33 = alpha[0]*c_33;
-
-	// shift sol for cols
-	if(n0>0)
-		{
-		if(n0==1)
-			{
-			c_00 = c_01;
-			c_10 = c_11;
-			c_20 = c_21;
-			c_30 = c_31;
-
-			c_01 = c_02;
-			c_11 = c_12;
-			c_21 = c_22;
-			c_31 = c_32;
-
-			c_02 = c_03;
-			c_12 = c_13;
-			c_22 = c_23;
-			c_32 = c_33;
-
-			D0 += 1*bs;
-			}
-		else if(n0==2)
-			{
-			c_00 = c_02;
-			c_10 = c_12;
-			c_20 = c_22;
-			c_30 = c_32;
-
-			c_01 = c_03;
-			c_11 = c_13;
-			c_21 = c_23;
-			c_31 = c_33;
-
-			D0 += 2*bs;
-			}
-		else //if(n0==3)
-			{
-			c_00 = c_03;
-			c_10 = c_13;
-			c_20 = c_23;
-			c_30 = c_33;
-
-			D0 += 3*bs;
-			}
-		}
-
-	int kn = n1 - n0;
-
-	if(offsetD==0)
-		{
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[1+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[2+bs*0] = c_20;
-		if(m0<=3 & m1>3) D0[3+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[1+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[2+bs*1] = c_21;
-		if(m0<=3 & m1>3) D0[3+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[1+bs*2] = c_12;
-		if(m0<=2 & m1>2) D0[2+bs*2] = c_22;
-		if(m0<=3 & m1>3) D0[3+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[0+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[1+bs*3] = c_13;
-		if(m0<=2 & m1>2) D0[2+bs*3] = c_23;
-		if(m0<=3 & m1>3) D0[3+bs*3] = c_33;
-		}
-	else if(offsetD==1)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[2+bs*0] = c_10;
-		if(m0<=2 & m1>2) D0[3+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[0+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[2+bs*1] = c_11;
-		if(m0<=2 & m1>2) D0[3+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[0+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[2+bs*2] = c_12;
-		if(m0<=2 & m1>2) D0[3+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[0+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[1+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[2+bs*3] = c_13;
-		if(m0<=2 & m1>2) D0[3+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[0+bs*3] = c_33;
-		}
-	else if(offsetD==2)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*0] = c_00;
-		if(m0<=1 & m1>1) D0[3+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[0+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[1+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*1] = c_01;
-		if(m0<=1 & m1>1) D0[3+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[0+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[1+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*2] = c_02;
-		if(m0<=1 & m1>1) D0[3+bs*2] = c_12;
-		if(m0<=2 & m1>2) D1[0+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[1+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[2+bs*3] = c_03;
-		if(m0<=1 & m1>1) D0[3+bs*3] = c_13;
-		if(m0<=2 & m1>2) D1[0+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[1+bs*3] = c_33;
-		}
-	else //if(offsetD==3)
-		{
-		D1 = D0 + sdd*bs;
-
-		if(kn<=0)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*0] = c_00;
-		if(m0<=1 & m1>1) D1[0+bs*0] = c_10;
-		if(m0<=2 & m1>2) D1[1+bs*0] = c_20;
-		if(m0<=3 & m1>3) D1[2+bs*0] = c_30;
-
-		if(kn<=1)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*1] = c_01;
-		if(m0<=1 & m1>1) D1[0+bs*1] = c_11;
-		if(m0<=2 & m1>2) D1[1+bs*1] = c_21;
-		if(m0<=3 & m1>3) D1[2+bs*1] = c_31;
-
-		if(kn<=2)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*2] = c_02;
-		if(m0<=1 & m1>1) D1[0+bs*2] = c_12;
-		if(m0<=2 & m1>2) D1[1+bs*2] = c_22;
-		if(m0<=3 & m1>3) D1[2+bs*2] = c_32;
-
-		if(kn<=3)
-			return;
-
-		if(m0<=0 & m1>0) D0[3+bs*3] = c_03;
-		if(m0<=1 & m1>1) D1[0+bs*3] = c_13;
-		if(m0<=2 & m1>2) D1[1+bs*3] = c_23;
-		if(m0<=3 & m1>3) D1[2+bs*3] = c_33;
-		}
-	
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strmm_nn_rl_4x4_lib4(int kmax, float *alpha, float *A, int offsetB, float *B, int sdb, float *D)
-	{
-	kernel_strmm_nn_rl_4x4_gen_lib4(kmax, alpha, A, offsetB, B, sdb, 0, D, 0, 0, 4, 0, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_spotrf_nt_l_4x4_vs_lib4(int kmax, float *A, float *B, float *C, float *D, float *inv_diag_D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	float
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		tmp,
-		c_00=0, //c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, //c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, //c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-//		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-//		c_02 -= a_0 * b_2;
-//		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-//		c_03 -= a_0 * b_3;
-//		c_13 -= a_1 * b_3;
-//		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-//		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-//		c_02 -= a_0 * b_2;
-//		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-//		c_03 -= a_0 * b_3;
-//		c_13 -= a_1 * b_3;
-//		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-//		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-//		c_02 -= a_0 * b_2;
-//		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-//		c_03 -= a_0 * b_3;
-//		c_13 -= a_1 * b_3;
-//		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-//		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-//		c_02 -= a_0 * b_2;
-//		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-//		c_03 -= a_0 * b_3;
-//		c_13 -= a_1 * b_3;
-//		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-//		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-//		c_02 -= a_0 * b_2;
-//		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-//		c_03 -= a_0 * b_3;
-//		c_13 -= a_1 * b_3;
-//		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = C[0+bs*0] + c_00;
-	c_10 = C[1+bs*0] + c_10;
-	c_20 = C[2+bs*0] + c_20;
-	c_30 = C[3+bs*0] + c_30;
-
-//	c_01 = C[0+bs*1] + c_01;
-	c_11 = C[1+bs*1] + c_11;
-	c_21 = C[2+bs*1] + c_21;
-	c_31 = C[3+bs*1] + c_31;
-
-//	c_02 = C[0+bs*2] + c_02;
-//	c_12 = C[1+bs*2] + c_12;
-	c_22 = C[2+bs*2] + c_22;
-	c_32 = C[3+bs*2] + c_32;
-
-//	c_03 = C[0+bs*3] + c_03;
-//	c_13 = C[1+bs*3] + c_13;
-//	c_23 = C[2+bs*3] + c_23;
-	c_33 = C[3+bs*3] + c_33;
-
-	if(c_00>0)
-		{
-		c_00 = sqrt(c_00);
-		tmp = 1.0/c_00;
-		}
-	else
-		{
-		c_00 = 0.0;
-		tmp = 0.0;
-		}
-	c_10 *= tmp;
-	c_20 *= tmp;
-	c_30 *= tmp;
-	inv_diag_D[0] = tmp;
-
-	if(kn==1)
-		goto store;
-	
-	c_11 -= c_10 * c_10;
-	c_21 -= c_20 * c_10;
-	c_31 -= c_30 * c_10;
-	if(c_11>0)
-		{
-		c_11 = sqrt(c_11);
-		tmp = 1.0/c_11;
-		}
-	else
-		{
-		c_11 = 0.0;
-		tmp = 0.0;
-		}
-	c_21 *= tmp;
-	c_31 *= tmp;
-	inv_diag_D[1] = tmp;
-
-	if(kn==2)
-		goto store;
-	
-	c_22 -= c_20 * c_20;
-	c_32 -= c_30 * c_20;
-	c_22 -= c_21 * c_21;
-	c_32 -= c_31 * c_21;
-	if(c_22>0)
-		{
-		c_22 = sqrt(c_22);
-		tmp = 1.0/c_22;
-		}
-	else
-		{
-		c_22 = 0.0;
-		tmp = 0.0;
-		}
-	c_32 *= tmp;
-	inv_diag_D[2] = tmp;
-
-	if(kn==3)
-		goto store;
-	
-	c_33 -= c_30 * c_30;
-	c_33 -= c_31 * c_31;
-	c_33 -= c_32 * c_32;
-	if(c_33>0)
-		{
-		c_33 = sqrt(c_33);
-		tmp = 1.0/c_33;
-		}
-	else
-		{
-		c_33 = 0.0;
-		tmp = 0.0;
-		}
-	inv_diag_D[3] = tmp;
-
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-//		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-//		D[0+bs*2] = c_02;
-//		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-//		D[0+bs*3] = c_03;
-//		D[1+bs*3] = c_13;
-//		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-//		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-//		D[0+bs*2] = c_02;
-//		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-//		if(kn==3)
-//			return;
-
-//		D[0+bs*3] = c_03;
-//		D[1+bs*3] = c_13;
-//		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-//		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-//		if(kn==2)
-//			return;
-
-//		D[0+bs*2] = c_02;
-//		D[1+bs*2] = c_12;
-
-//		if(kn==3)
-//			return;
-
-//		D[0+bs*3] = c_03;
-//		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-//		if(kn==1)
-//			return;
-
-//		D[0+bs*1] = c_01;
-
-//		if(kn==2)
-//			return;
-
-//		D[0+bs*2] = c_02;
-
-//		if(kn==3)
-//			return;
-
-//		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_spotrf_nt_l_4x4_lib4(int kmax, float *A, float *B, float *C, float *D, float *inv_diag_D)
-	{
-	kernel_spotrf_nt_l_4x4_vs_lib4(kmax, A, B, C, D, inv_diag_D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_ssyrk_spotrf_nt_l_4x4_vs_lib4(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *inv_diag_D, int km, int kn)
-	{
-	float alpha = 1.0;
-	float beta = 1.0;
-	kernel_ssyrk_nt_l_4x4_vs_lib4(kp, &alpha, Ap, Bp, &beta, C, D, km, kn);
-	kernel_spotrf_nt_l_4x4_vs_lib4(km_, Am, Bm, D, D, inv_diag_D, km, kn);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_ssyrk_spotrf_nt_l_4x4_lib4(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *inv_diag_D)
-	{
-	float alpha = 1.0;
-	float beta = 1.0;
-	kernel_ssyrk_nt_l_4x4_lib4(kp, &alpha, Ap, Bp, &beta, C, D);
-	kernel_spotrf_nt_l_4x4_lib4(km_, Am, Bm, D, D, inv_diag_D);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nt_rl_inv_4x4_vs_lib4(int kmax, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	float
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		tmp,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = C[0+bs*0] + c_00;
-	c_10 = C[1+bs*0] + c_10;
-	c_20 = C[2+bs*0] + c_20;
-	c_30 = C[3+bs*0] + c_30;
-
-	c_01 = C[0+bs*1] + c_01;
-	c_11 = C[1+bs*1] + c_11;
-	c_21 = C[2+bs*1] + c_21;
-	c_31 = C[3+bs*1] + c_31;
-
-	c_02 = C[0+bs*2] + c_02;
-	c_12 = C[1+bs*2] + c_12;
-	c_22 = C[2+bs*2] + c_22;
-	c_32 = C[3+bs*2] + c_32;
-
-	c_03 = C[0+bs*3] + c_03;
-	c_13 = C[1+bs*3] + c_13;
-	c_23 = C[2+bs*3] + c_23;
-	c_33 = C[3+bs*3] + c_33;
-
-	tmp = inv_diag_E[0];
-	c_00 *= tmp;
-	c_10 *= tmp;
-	c_20 *= tmp;
-	c_30 *= tmp;
-
-	if(kn==1)
-		goto store;
-	
-	tmp = E[1+bs*0];
-	c_01 -= c_00 * tmp;
-	c_11 -= c_10 * tmp;
-	c_21 -= c_20 * tmp;
-	c_31 -= c_30 * tmp;
-	tmp = inv_diag_E[1];
-	c_01 *= tmp;
-	c_11 *= tmp;
-	c_21 *= tmp;
-	c_31 *= tmp;
-
-	if(kn==2)
-		goto store;
-	
-	tmp = E[2+bs*0];
-	c_02 -= c_00 * tmp;
-	c_12 -= c_10 * tmp;
-	c_22 -= c_20 * tmp;
-	c_32 -= c_30 * tmp;
-	tmp = E[2+bs*1];
-	c_02 -= c_01 * tmp;
-	c_12 -= c_11 * tmp;
-	c_22 -= c_21 * tmp;
-	c_32 -= c_31 * tmp;
-	tmp = inv_diag_E[2];
-	c_02 *= tmp;
-	c_12 *= tmp;
-	c_22 *= tmp;
-	c_32 *= tmp;
-
-	if(kn==3)
-		goto store;
-	
-	tmp = E[3+bs*0];
-	c_03 -= c_00 * tmp;
-	c_13 -= c_10 * tmp;
-	c_23 -= c_20 * tmp;
-	c_33 -= c_30 * tmp;
-	tmp = E[3+bs*1];
-	c_03 -= c_01 * tmp;
-	c_13 -= c_11 * tmp;
-	c_23 -= c_21 * tmp;
-	c_33 -= c_31 * tmp;
-	tmp = E[3+bs*2];
-	c_03 -= c_02 * tmp;
-	c_13 -= c_12 * tmp;
-	c_23 -= c_22 * tmp;
-	c_33 -= c_32 * tmp;
-	tmp = inv_diag_E[3];
-	c_03 *= tmp;
-	c_13 *= tmp;
-	c_23 *= tmp;
-	c_33 *= tmp;
-
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nt_rl_inv_4x4_lib4(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E)
-	{
-	kernel_strsm_nt_rl_inv_4x4_vs_lib4(k, A, B, C, D, E, inv_diag_E, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_strsm_nt_rl_inv_4x4_vs_lib4(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E, int km, int kn)
-	{
-	float alpha = 1.0;
-	float beta  = 1.0;
-	kernel_sgemm_nt_4x4_vs_lib4(kp, &alpha, Ap, Bp, &beta, C, D, km, kn);
-	kernel_strsm_nt_rl_inv_4x4_vs_lib4(km_, Am, Bm, D, D, E, inv_diag_E, km, kn);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_strsm_nt_rl_inv_4x4_lib4(int kp, float *Ap, float *Bp, int km_, float *Am, float *Bm, float *C, float *D, float *E, float *inv_diag_E)
-	{
-	float alpha = 1.0;
-	float beta  = 1.0;
-	kernel_sgemm_nt_4x4_lib4(kp, &alpha, Ap, Bp, &beta, C, D);
-	kernel_strsm_nt_rl_inv_4x4_lib4(km_, Am, Bm, D, D, E, inv_diag_E);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nt_rl_one_4x4_vs_lib4(int kmax, float *A, float *B, float *C, float *D, float *E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	float
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		tmp,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = C[0+bs*0] + c_00;
-	c_10 = C[1+bs*0] + c_10;
-	c_20 = C[2+bs*0] + c_20;
-	c_30 = C[3+bs*0] + c_30;
-
-	c_01 = C[0+bs*1] + c_01;
-	c_11 = C[1+bs*1] + c_11;
-	c_21 = C[2+bs*1] + c_21;
-	c_31 = C[3+bs*1] + c_31;
-
-	c_02 = C[0+bs*2] + c_02;
-	c_12 = C[1+bs*2] + c_12;
-	c_22 = C[2+bs*2] + c_22;
-	c_32 = C[3+bs*2] + c_32;
-
-	c_03 = C[0+bs*3] + c_03;
-	c_13 = C[1+bs*3] + c_13;
-	c_23 = C[2+bs*3] + c_23;
-	c_33 = C[3+bs*3] + c_33;
-
-	if(kn==1)
-		goto store;
-	
-	tmp = E[1+bs*0];
-	c_01 -= c_00 * tmp;
-	c_11 -= c_10 * tmp;
-	c_21 -= c_20 * tmp;
-	c_31 -= c_30 * tmp;
-
-	if(kn==2)
-		goto store;
-	
-	tmp = E[2+bs*0];
-	c_02 -= c_00 * tmp;
-	c_12 -= c_10 * tmp;
-	c_22 -= c_20 * tmp;
-	c_32 -= c_30 * tmp;
-	tmp = E[2+bs*1];
-	c_02 -= c_01 * tmp;
-	c_12 -= c_11 * tmp;
-	c_22 -= c_21 * tmp;
-	c_32 -= c_31 * tmp;
-
-	if(kn==3)
-		goto store;
-	
-	tmp = E[3+bs*0];
-	c_03 -= c_00 * tmp;
-	c_13 -= c_10 * tmp;
-	c_23 -= c_20 * tmp;
-	c_33 -= c_30 * tmp;
-	tmp = E[3+bs*1];
-	c_03 -= c_01 * tmp;
-	c_13 -= c_11 * tmp;
-	c_23 -= c_21 * tmp;
-	c_33 -= c_31 * tmp;
-	tmp = E[3+bs*2];
-	c_03 -= c_02 * tmp;
-	c_13 -= c_12 * tmp;
-	c_23 -= c_22 * tmp;
-	c_33 -= c_32 * tmp;
-
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nt_rl_one_4x4_lib4(int k, float *A, float *B, float *C, float *D, float *E)
-	{
-	kernel_strsm_nt_rl_one_4x4_vs_lib4(k, A, B, C, D, E, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nt_ru_inv_4x4_vs_lib4(int kmax, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	float
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		tmp,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-	
-	int k;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 1
-
-		a_0 = A[4];
-		a_1 = A[5];
-		a_2 = A[6];
-		a_3 = A[7];
-
-		b_0 = B[4];
-		b_1 = B[5];
-		b_2 = B[6];
-		b_3 = B[7];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 2
-
-		a_0 = A[8];
-		a_1 = A[9];
-		a_2 = A[10];
-		a_3 = A[11];
-
-		b_0 = B[8];
-		b_1 = B[9];
-		b_2 = B[10];
-		b_3 = B[11];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		// k = 3
-
-		a_0 = A[12];
-		a_1 = A[13];
-		a_2 = A[14];
-		a_3 = A[15];
-
-		b_0 = B[12];
-		b_1 = B[13];
-		b_2 = B[14];
-		b_3 = B[15];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 16;
-		B += 16;
-
-		}
-	
-	for(; k<kmax; k++)
-		{
-
-		// k = 0
-
-		a_0 = A[0];
-		a_1 = A[1];
-		a_2 = A[2];
-		a_3 = A[3];
-
-		b_0 = B[0];
-		b_1 = B[1];
-		b_2 = B[2];
-		b_3 = B[3];
-
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-		A += 4;
-		B += 4;
-
-		}
-	
-	c_00 = C[0+bs*0] + c_00;
-	c_10 = C[1+bs*0] + c_10;
-	c_20 = C[2+bs*0] + c_20;
-	c_30 = C[3+bs*0] + c_30;
-
-	c_01 = C[0+bs*1] + c_01;
-	c_11 = C[1+bs*1] + c_11;
-	c_21 = C[2+bs*1] + c_21;
-	c_31 = C[3+bs*1] + c_31;
-
-	c_02 = C[0+bs*2] + c_02;
-	c_12 = C[1+bs*2] + c_12;
-	c_22 = C[2+bs*2] + c_22;
-	c_32 = C[3+bs*2] + c_32;
-
-	c_03 = C[0+bs*3] + c_03;
-	c_13 = C[1+bs*3] + c_13;
-	c_23 = C[2+bs*3] + c_23;
-	c_33 = C[3+bs*3] + c_33;
-
-
-	if(kn>3)
-		{
-		tmp = inv_diag_E[3];
-		c_03 *= tmp;
-		c_13 *= tmp;
-		c_23 *= tmp;
-		c_33 *= tmp;
-		tmp = E[2+bs*3];
-		c_02 -= c_03 * tmp;
-		c_12 -= c_13 * tmp;
-		c_22 -= c_23 * tmp;
-		c_32 -= c_33 * tmp;
-		tmp = E[1+bs*3];
-		c_01 -= c_03 * tmp;
-		c_11 -= c_13 * tmp;
-		c_21 -= c_23 * tmp;
-		c_31 -= c_33 * tmp;
-		tmp = E[0+bs*3];
-		c_00 -= c_03 * tmp;
-		c_10 -= c_13 * tmp;
-		c_20 -= c_23 * tmp;
-		c_30 -= c_33 * tmp;
-		}
-
-	if(kn>2)
-		{
-		tmp = inv_diag_E[2];
-		c_02 *= tmp;
-		c_12 *= tmp;
-		c_22 *= tmp;
-		c_32 *= tmp;
-		tmp = E[1+bs*2];
-		c_01 -= c_02 * tmp;
-		c_11 -= c_12 * tmp;
-		c_21 -= c_22 * tmp;
-		c_31 -= c_32 * tmp;
-		tmp = E[0+bs*2];
-		c_00 -= c_02 * tmp;
-		c_10 -= c_12 * tmp;
-		c_20 -= c_22 * tmp;
-		c_30 -= c_32 * tmp;
-		}
-
-	if(kn>1)
-		{
-		tmp = inv_diag_E[1];
-		c_01 *= tmp;
-		c_11 *= tmp;
-		c_21 *= tmp;
-		c_31 *= tmp;
-		tmp = E[0+bs*1];
-		c_00 -= c_01 * tmp;
-		c_10 -= c_11 * tmp;
-		c_20 -= c_21 * tmp;
-		c_30 -= c_31 * tmp;
-		}
-
-	tmp = inv_diag_E[0];
-	c_00 *= tmp;
-	c_10 *= tmp;
-	c_20 *= tmp;
-	c_30 *= tmp;
-
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nt_ru_inv_4x4_lib4(int k, float *A, float *B, float *C, float *D, float *E, float *inv_diag_E)
-	{
-	kernel_strsm_nt_ru_inv_4x4_vs_lib4(k, A, B, C, D, E, inv_diag_E, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgetrf_nn_4x4_vs_lib4(int kmax, float *A, float *B, int sdb, float *C, float *D, float *inv_diag_D, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	int k;
-
-	float
-		tmp,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-		
-	if(kmax<=0)
-		goto add;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		b_0 = B[1+bs*0];
-		b_1 = B[1+bs*1];
-		b_2 = B[1+bs*2];
-		b_3 = B[1+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		b_0 = B[2+bs*0];
-		b_1 = B[2+bs*1];
-		b_2 = B[2+bs*2];
-		b_3 = B[2+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		b_0 = B[3+bs*0];
-		b_1 = B[3+bs*1];
-		b_2 = B[3+bs*2];
-		b_3 = B[3+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-		
-		
-		A += 16;
-		B += 4*sdb;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		A += 4;
-		B += 1;
-
-		}
-		
-	add:
-
-	c_00 += C[0+bs*0];
-	c_10 += C[1+bs*0];
-	c_20 += C[2+bs*0];
-	c_30 += C[3+bs*0];
-
-	c_01 += C[0+bs*1];
-	c_11 += C[1+bs*1];
-	c_21 += C[2+bs*1];
-	c_31 += C[3+bs*1];
-
-	c_02 += C[0+bs*2];
-	c_12 += C[1+bs*2];
-	c_22 += C[2+bs*2];
-	c_32 += C[3+bs*2];
-
-	c_03 += C[0+bs*3];
-	c_13 += C[1+bs*3];
-	c_23 += C[2+bs*3];
-	c_33 += C[3+bs*3];
-
-	// factorization
-
-	// first column
-	tmp = 1.0 / c_00;
-	c_10 *= tmp;
-	c_20 *= tmp;
-	c_30 *= tmp;
-
-	inv_diag_D[0] = tmp;
-
-	if(kn==1)
-		goto store;
-
-	// second column
-	c_11 -= c_10 * c_01;
-	c_21 -= c_20 * c_01;
-	c_31 -= c_30 * c_01;
-
-	tmp = 1.0 / c_11;
-	c_21 *= tmp;
-	c_31 *= tmp;
-	
-	inv_diag_D[1] = tmp;
-
-	if(kn==2)
-		goto store;
-
-	// third column
-	c_12 -= c_10 * c_02;
-	c_22 -= c_20 * c_02;
-	c_32 -= c_30 * c_02;
-
-	c_22 -= c_21 * c_12;
-	c_32 -= c_31 * c_12;
-
-	tmp = 1.0 / c_22;
-	c_32 *= tmp;
-
-	inv_diag_D[2] = tmp;
-
-	if(kn==3)
-		goto store;
-
-	// fourth column
-	c_13 -= c_10 * c_03;
-	c_23 -= c_20 * c_03;
-	c_33 -= c_30 * c_03;
-
-	c_23 -= c_21 * c_13;
-	c_33 -= c_31 * c_13;
-
-	c_33 -= c_32 * c_23;
-
-	tmp = 1.0 / c_33;
-
-	inv_diag_D[3] = tmp;
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgetrf_nn_4x4_lib4(int kmax, float *A, float *B, int sdb, float *C, float *D, float *inv_diag_D)
-	{
-	kernel_sgetrf_nn_4x4_vs_lib4(kmax, A, B, sdb, C, D, inv_diag_D, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nn_ll_one_4x4_vs_lib4(int kmax, float *A, float *B, int sdb, float *C, float *D, float *E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	int k;
-
-	float
-		tmp,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		e_1, e_2, e_3,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-		
-	if(kmax<=0)
-		goto add;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		b_0 = B[1+bs*0];
-		b_1 = B[1+bs*1];
-		b_2 = B[1+bs*2];
-		b_3 = B[1+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		b_0 = B[2+bs*0];
-		b_1 = B[2+bs*1];
-		b_2 = B[2+bs*2];
-		b_3 = B[2+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		b_0 = B[3+bs*0];
-		b_1 = B[3+bs*1];
-		b_2 = B[3+bs*2];
-		b_3 = B[3+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-		
-		
-		A += 16;
-		B += 4*sdb;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		A += 4;
-		B += 1;
-
-		}
-		
-	add:
-
-	c_00 += C[0+bs*0];
-	c_10 += C[1+bs*0];
-	c_20 += C[2+bs*0];
-	c_30 += C[3+bs*0];
-
-	c_01 += C[0+bs*1];
-	c_11 += C[1+bs*1];
-	c_21 += C[2+bs*1];
-	c_31 += C[3+bs*1];
-
-	c_02 += C[0+bs*2];
-	c_12 += C[1+bs*2];
-	c_22 += C[2+bs*2];
-	c_32 += C[3+bs*2];
-
-	c_03 += C[0+bs*3];
-	c_13 += C[1+bs*3];
-	c_23 += C[2+bs*3];
-	c_33 += C[3+bs*3];
-
-	// solution
-
-	if(km==1)
-		goto store;
-	
-	e_1 = E[1+bs*0];
-	e_2 = E[2+bs*0];
-	e_3 = E[3+bs*0];
-	c_10 -= e_1 * c_00;
-	c_20 -= e_2 * c_00;
-	c_30 -= e_3 * c_00;
-	c_11 -= e_1 * c_01;
-	c_21 -= e_2 * c_01;
-	c_31 -= e_3 * c_01;
-	c_12 -= e_1 * c_02;
-	c_22 -= e_2 * c_02;
-	c_32 -= e_3 * c_02;
-	c_13 -= e_1 * c_03;
-	c_23 -= e_2 * c_03;
-	c_33 -= e_3 * c_03;
-
-	if(km==2)
-		goto store;
-	
-	e_2 = E[2+bs*1];
-	e_3 = E[3+bs*1];
-	c_20 -= e_2 * c_10;
-	c_30 -= e_3 * c_10;
-	c_21 -= e_2 * c_11;
-	c_31 -= e_3 * c_11;
-	c_22 -= e_2 * c_12;
-	c_32 -= e_3 * c_12;
-	c_23 -= e_2 * c_13;
-	c_33 -= e_3 * c_13;
-
-	if(km==3)
-		goto store;
-	
-	e_3 = E[3+bs*2];
-	c_30 -= e_3 * c_20;
-	c_31 -= e_3 * c_21;
-	c_32 -= e_3 * c_22;
-	c_33 -= e_3 * c_23;
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nn_ll_one_4x4_lib4(int kmax, float *A, float *B, int sdb, float *C, float *D, float *E)
-	{
-	kernel_strsm_nn_ll_one_4x4_vs_lib4(kmax, A, B, sdb, C, D, E, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nn_ru_inv_4x4_vs_lib4(int kmax, float *A, float *B, int sdb, float *C, float *D, float *E, float *inv_diag_E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	int k;
-
-	float
-		tmp,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		e_00, e_01, e_02, e_03,
-		      e_11, e_12, e_13,
-			        e_22, e_23,
-					      e_33,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-		
-	if(kmax<=0)
-		goto add;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		b_0 = B[1+bs*0];
-		b_1 = B[1+bs*1];
-		b_2 = B[1+bs*2];
-		b_3 = B[1+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		b_0 = B[2+bs*0];
-		b_1 = B[2+bs*1];
-		b_2 = B[2+bs*2];
-		b_3 = B[2+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		b_0 = B[3+bs*0];
-		b_1 = B[3+bs*1];
-		b_2 = B[3+bs*2];
-		b_3 = B[3+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-		
-		
-		A += 16;
-		B += 4*sdb;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		A += 4;
-		B += 1;
-
-		}
-		
-	add:
-
-	c_00 += C[0+bs*0];
-	c_10 += C[1+bs*0];
-	c_20 += C[2+bs*0];
-	c_30 += C[3+bs*0];
-
-	c_01 += C[0+bs*1];
-	c_11 += C[1+bs*1];
-	c_21 += C[2+bs*1];
-	c_31 += C[3+bs*1];
-
-	c_02 += C[0+bs*2];
-	c_12 += C[1+bs*2];
-	c_22 += C[2+bs*2];
-	c_32 += C[3+bs*2];
-
-	c_03 += C[0+bs*3];
-	c_13 += C[1+bs*3];
-	c_23 += C[2+bs*3];
-	c_33 += C[3+bs*3];
-	
-	// solve
-
-	e_00 = inv_diag_E[0];
-	c_00 *= e_00;
-	c_10 *= e_00;
-	c_20 *= e_00;
-	c_30 *= e_00;
-
-	if(kn==1)
-		goto store;
-	
-	e_01 = E[0+bs*1];
-	e_11 = inv_diag_E[1];
-	c_01 -= c_00 * e_01;
-	c_11 -= c_10 * e_01;
-	c_21 -= c_20 * e_01;
-	c_31 -= c_30 * e_01;
-	c_01 *= e_11;
-	c_11 *= e_11;
-	c_21 *= e_11;
-	c_31 *= e_11;
-
-	if(kn==2)
-		goto store;
-	
-	e_02 = E[0+bs*2];
-	e_12 = E[1+bs*2];
-	e_22 = inv_diag_E[2];
-	c_02 -= c_00 * e_02;
-	c_12 -= c_10 * e_02;
-	c_22 -= c_20 * e_02;
-	c_32 -= c_30 * e_02;
-	c_02 -= c_01 * e_12;
-	c_12 -= c_11 * e_12;
-	c_22 -= c_21 * e_12;
-	c_32 -= c_31 * e_12;
-	c_02 *= e_22;
-	c_12 *= e_22;
-	c_22 *= e_22;
-	c_32 *= e_22;
-
-	if(kn==3)
-		goto store;
-	
-	e_03 = E[0+bs*3];
-	e_13 = E[1+bs*3];
-	e_23 = E[2+bs*3];
-	e_33 = inv_diag_E[3];
-	c_03 -= c_00 * e_03;
-	c_13 -= c_10 * e_03;
-	c_23 -= c_20 * e_03;
-	c_33 -= c_30 * e_03;
-	c_03 -= c_01 * e_13;
-	c_13 -= c_11 * e_13;
-	c_23 -= c_21 * e_13;
-	c_33 -= c_31 * e_13;
-	c_03 -= c_02 * e_23;
-	c_13 -= c_12 * e_23;
-	c_23 -= c_22 * e_23;
-	c_33 -= c_32 * e_23;
-	c_03 *= e_33;
-	c_13 *= e_33;
-	c_23 *= e_33;
-	c_33 *= e_33;
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nn_ru_inv_4x4_lib4(int kmax, float *A, float *B, int sdb, float *C, float *D, float *E, float *inv_diag_E)
-	{
-	kernel_strsm_nn_ru_inv_4x4_vs_lib4(kmax, A, B, sdb, C, D, E, inv_diag_E, 4, 4);
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nn_lu_inv_4x4_vs_lib4(int kmax, float *A, float *B, int sdb, float *C, float *D, float *E, float *inv_diag_E, int km, int kn)
-	{
-
-	const int bs = 4;
-
-	int k;
-
-	float
-		tmp,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		e_00, e_01, e_02, e_03,
-		      e_11, e_12, e_13,
-			        e_22, e_23,
-					      e_33,
-		c_00=0, c_01=0, c_02=0, c_03=0,
-		c_10=0, c_11=0, c_12=0, c_13=0,
-		c_20=0, c_21=0, c_22=0, c_23=0,
-		c_30=0, c_31=0, c_32=0, c_33=0;
-		
-	if(kmax<=0)
-		goto add;
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		b_0 = B[1+bs*0];
-		b_1 = B[1+bs*1];
-		b_2 = B[1+bs*2];
-		b_3 = B[1+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		b_0 = B[2+bs*0];
-		b_1 = B[2+bs*1];
-		b_2 = B[2+bs*2];
-		b_3 = B[2+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		b_0 = B[3+bs*0];
-		b_1 = B[3+bs*1];
-		b_2 = B[3+bs*2];
-		b_3 = B[3+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-		
-		
-		A += 16;
-		B += 4*sdb;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[0+bs*1];
-		b_2 = B[0+bs*2];
-		b_3 = B[0+bs*3];
-		
-		c_00 -= a_0 * b_0;
-		c_10 -= a_1 * b_0;
-		c_20 -= a_2 * b_0;
-		c_30 -= a_3 * b_0;
-
-		c_01 -= a_0 * b_1;
-		c_11 -= a_1 * b_1;
-		c_21 -= a_2 * b_1;
-		c_31 -= a_3 * b_1;
-
-		c_02 -= a_0 * b_2;
-		c_12 -= a_1 * b_2;
-		c_22 -= a_2 * b_2;
-		c_32 -= a_3 * b_2;
-
-		c_03 -= a_0 * b_3;
-		c_13 -= a_1 * b_3;
-		c_23 -= a_2 * b_3;
-		c_33 -= a_3 * b_3;
-
-
-		A += 4;
-		B += 1;
-
-		}
-		
-	add:
-
-	c_00 += C[0+bs*0];
-	c_10 += C[1+bs*0];
-	c_20 += C[2+bs*0];
-	c_30 += C[3+bs*0];
-
-	c_01 += C[0+bs*1];
-	c_11 += C[1+bs*1];
-	c_21 += C[2+bs*1];
-	c_31 += C[3+bs*1];
-
-	c_02 += C[0+bs*2];
-	c_12 += C[1+bs*2];
-	c_22 += C[2+bs*2];
-	c_32 += C[3+bs*2];
-
-	c_03 += C[0+bs*3];
-	c_13 += C[1+bs*3];
-	c_23 += C[2+bs*3];
-	c_33 += C[3+bs*3];
-
-//	printf("\n%f %f %f %f\n", c_00, c_01, c_02, c_03);
-//	printf("\n%f %f %f %f\n", c_10, c_11, c_12, c_13);
-//	printf("\n%f %f %f %f\n", c_20, c_21, c_22, c_23);
-//	printf("\n%f %f %f %f\n", c_30, c_31, c_32, c_33);
-	
-	// solve
-
-	if(km>3)
-		{
-		e_03 = E[0+bs*3];
-		e_13 = E[1+bs*3];
-		e_23 = E[2+bs*3];
-		e_33 = inv_diag_E[3];
-		c_30 *= e_33;
-		c_31 *= e_33;
-		c_32 *= e_33;
-		c_33 *= e_33;
-		c_00 -= e_03 * c_30;
-		c_01 -= e_03 * c_31;
-		c_02 -= e_03 * c_32;
-		c_03 -= e_03 * c_33;
-		c_10 -= e_13 * c_30;
-		c_11 -= e_13 * c_31;
-		c_12 -= e_13 * c_32;
-		c_13 -= e_13 * c_33;
-		c_20 -= e_23 * c_30;
-		c_21 -= e_23 * c_31;
-		c_22 -= e_23 * c_32;
-		c_23 -= e_23 * c_33;
-		}
-	
-	if(km>2)
-		{
-		e_02 = E[0+bs*2];
-		e_12 = E[1+bs*2];
-		e_22 = inv_diag_E[2];
-		c_20 *= e_22;
-		c_21 *= e_22;
-		c_22 *= e_22;
-		c_23 *= e_22;
-		c_00 -= e_02 * c_20;
-		c_01 -= e_02 * c_21;
-		c_02 -= e_02 * c_22;
-		c_03 -= e_02 * c_23;
-		c_10 -= e_12 * c_20;
-		c_11 -= e_12 * c_21;
-		c_12 -= e_12 * c_22;
-		c_13 -= e_12 * c_23;
-		}
-	
-	if(km>1)
-		{
-		e_01 = E[0+bs*1];
-		e_11 = inv_diag_E[1];
-		c_10 *= e_11;
-		c_11 *= e_11;
-		c_12 *= e_11;
-		c_13 *= e_11;
-		c_00 -= e_01 * c_10;
-		c_01 -= e_01 * c_11;
-		c_02 -= e_01 * c_12;
-		c_03 -= e_01 * c_13;
-		}
-	
-	e_00 = inv_diag_E[0];
-	c_00 *= e_00;
-	c_01 *= e_00;
-	c_02 *= e_00;
-	c_03 *= e_00;
-
-	store:
-
-	if(km>=4)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-		D[3+bs*0] = c_30;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-		D[3+bs*1] = c_31;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-		D[3+bs*2] = c_32;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		D[3+bs*3] = c_33;
-		}
-	else if(km>=3)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-		D[2+bs*0] = c_20;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-		D[2+bs*1] = c_21;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-		D[2+bs*2] = c_22;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		D[2+bs*3] = c_23;
-		}
-	else if(km>=2)
-		{
-		D[0+bs*0] = c_00;
-		D[1+bs*0] = c_10;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-		D[1+bs*1] = c_11;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-		D[1+bs*2] = c_12;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		D[1+bs*3] = c_13;
-		}
-	else //if(km>=1)
-		{
-		D[0+bs*0] = c_00;
-
-		if(kn==1)
-			return;
-
-		D[0+bs*1] = c_01;
-
-		if(kn==2)
-			return;
-
-		D[0+bs*2] = c_02;
-
-		if(kn==3)
-			return;
-
-		D[0+bs*3] = c_03;
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsm_nn_lu_inv_4x4_lib4(int kmax, float *A, float *B, int sdb, float *C, float *D, float *E, float *inv_diag_E)
-	{
-	kernel_strsm_nn_lu_inv_4x4_vs_lib4(kmax, A, B, sdb, C, D, E, inv_diag_E, 4, 4);
-	}
-#endif
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_sgemm_diag_lib4.c b/third_party/blasfeo/kernel/c99/kernel_sgemm_diag_lib4.c
deleted file mode 100644
index 93df707..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_sgemm_diag_lib4.c
+++ /dev/null
@@ -1,1112 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-// B is the diagonal of a matrix, case beta=0.0
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_diag_right_4_a0_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		alpha0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_0, c_1, c_2, c_3;
-	
-	alpha0 = alpha[0];
-		
-	b_0 = alpha0 * B[0];
-	b_1 = alpha0 * B[1];
-	b_2 = alpha0 * B[2];
-	b_3 = alpha0 * B[3];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_0;
-		c_2 = a_2 * b_0;
-		c_3 = a_3 * b_0;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		c_0 = a_0 * b_1;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_1;
-		c_3 = a_3 * b_1;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		c_0 = a_0 * b_2;
-		c_1 = a_1 * b_2;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_2;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		D[3+bs*2] = c_3;
-		
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		c_0 = a_0 * b_3;
-		c_1 = a_1 * b_3;
-		c_2 = a_2 * b_3;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-		D[2+bs*3] = c_2;
-		D[3+bs*3] = c_3;
-
-		A += 4*sda;
-		D += 4*sdd;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		
-		c_0 = a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		a_0 = A[0+bs*1];
-		
-		c_0 = a_0 * b_1;
-
-		D[0+bs*1] = c_0;
-		
-
-		a_0 = A[0+bs*2];
-		
-		c_0 = a_0 * b_2;
-
-		D[0+bs*2] = c_0;
-		
-
-		a_0 = A[0+bs*3];
-		
-		c_0 = a_0 * b_3;
-
-		D[0+bs*3] = c_0;
-
-
-		A += 1;
-		D += 1;
-		
-		}
-	
-	}
-#endif
-
-
-
-// B is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_diag_right_4_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		alpha0, beta0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_0, c_1, c_2, c_3;
-	
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	b_0 = alpha0 * B[0];
-	b_1 = alpha0 * B[1];
-	b_2 = alpha0 * B[2];
-	b_3 = alpha0 * B[3];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_0;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_0;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_0;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*1] + a_2 * b_1;
-		c_3 = beta0 * C[3+bs*1] + a_3 * b_1;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_2;
-		c_1 = beta0 * C[1+bs*2] + a_1 * b_2;
-		c_2 = beta0 * C[2+bs*2] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*2] + a_3 * b_2;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		D[3+bs*2] = c_3;
-		
-
-		a_0 = A[0+bs*3];
-		a_1 = A[1+bs*3];
-		a_2 = A[2+bs*3];
-		a_3 = A[3+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_3;
-		c_1 = beta0 * C[1+bs*3] + a_1 * b_3;
-		c_2 = beta0 * C[2+bs*3] + a_2 * b_3;
-		c_3 = beta0 * C[3+bs*3] + a_3 * b_3;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-		D[2+bs*3] = c_2;
-		D[3+bs*3] = c_3;
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		a_0 = A[0+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-
-		D[0+bs*1] = c_0;
-		
-
-		a_0 = A[0+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_2;
-
-		D[0+bs*2] = c_0;
-		
-
-		a_0 = A[0+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_3;
-
-		D[0+bs*3] = c_0;
-
-
-		A += 1;
-		C += 1;
-		D += 1;
-		
-		}
-	
-	}
-#endif
-
-
-
-// B is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_diag_right_3_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		alpha0, beta0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2,
-		c_0, c_1, c_2, c_3;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	b_0 = alpha0 * B[0];
-	b_1 = alpha0 * B[1];
-	b_2 = alpha0 * B[2];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_0;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_0;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_0;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*1] + a_2 * b_1;
-		c_3 = beta0 * C[3+bs*1] + a_3 * b_1;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		a_0 = A[0+bs*2];
-		a_1 = A[1+bs*2];
-		a_2 = A[2+bs*2];
-		a_3 = A[3+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_2;
-		c_1 = beta0 * C[1+bs*2] + a_1 * b_2;
-		c_2 = beta0 * C[2+bs*2] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*2] + a_3 * b_2;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		D[3+bs*2] = c_3;
-		
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		a_0 = A[0+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-
-		D[0+bs*1] = c_0;
-		
-
-		a_0 = A[0+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_2;
-
-		D[0+bs*2] = c_0;
-		
-
-		A += 1;
-		C += 1;
-		D += 1;
-		
-		}
-	
-	}
-#endif
-
-
-
-// B is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_diag_right_2_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		alpha0, beta0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1,
-		c_0, c_1, c_2, c_3;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	b_0 = alpha0 * B[0];
-	b_1 = alpha0 * B[1];
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_0;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_0;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_0;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		a_0 = A[0+bs*1];
-		a_1 = A[1+bs*1];
-		a_2 = A[2+bs*1];
-		a_3 = A[3+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*1] + a_2 * b_1;
-		c_3 = beta0 * C[3+bs*1] + a_3 * b_1;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		a_0 = A[0+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_1;
-
-		D[0+bs*1] = c_0;
-		
-
-		A += 1;
-		C += 1;
-		D += 1;
-		
-		}
-	
-	}
-#endif
-
-
-
-// B is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_diag_right_1_lib4(int kmax, float *alpha, float *A, int sda, float *B, float *beta, float *C, int sdc, float *D, int sdd)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		alpha0, beta0,
-		a_0, a_1, a_2, a_3,
-		b_0,
-		c_0, c_1, c_2, c_3;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	b_0 = alpha0 * B[0];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		a_0 = A[0+bs*0];
-		a_1 = A[1+bs*0];
-		a_2 = A[2+bs*0];
-		a_3 = A[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_0;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_0;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_0;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		A += 4*sda;
-		C += 4*sdc;
-		D += 4*sdd;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		a_0 = A[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		A += 1;
-		C += 1;
-		D += 1;
-		
-		}
-	
-	}
-#endif
-
-
-
-// A is the diagonal of a matrix, case beta=0.0
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_diag_left_4_a0_lib4(int kmax, float *alpha, float *A, float *B, float *D, int alg)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		alpha0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_0, c_1, c_2, c_3;
-		
-	alpha0 = alpha[0];
-		
-	a_0 = alpha0 * A[0];
-	a_1 = alpha0 * A[1];
-	a_2 = alpha0 * A[2];
-	a_3 = alpha0 * A[3];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		b_3 = B[3+bs*0];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		b_0 = B[0+bs*1];
-		b_1 = B[1+bs*1];
-		b_2 = B[2+bs*1];
-		b_3 = B[3+bs*1];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		b_0 = B[0+bs*2];
-		b_1 = B[1+bs*2];
-		b_2 = B[2+bs*2];
-		b_3 = B[3+bs*2];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		D[3+bs*2] = c_3;
-		
-
-		b_0 = B[0+bs*3];
-		b_1 = B[1+bs*3];
-		b_2 = B[2+bs*3];
-		b_3 = B[3+bs*3];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-		D[2+bs*3] = c_2;
-		D[3+bs*3] = c_3;
-
-		B += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		b_3 = B[3+bs*0];
-		
-		c_0 = a_0 * b_0;
-		c_1 = a_1 * b_1;
-		c_2 = a_2 * b_2;
-		c_3 = a_3 * b_3;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-	
-		B += 4;
-		D += 4;
-		
-		}
-	
-	}
-#endif
-
-
-
-// A is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_diag_left_4_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D, int alg)
-	{
-
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		alpha0, beta0,
-		a_0, a_1, a_2, a_3,
-		b_0, b_1, b_2, b_3,
-		c_0, c_1, c_2, c_3;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	a_0 = alpha0 * A[0];
-	a_1 = alpha0 * A[1];
-	a_2 = alpha0 * A[2];
-	a_3 = alpha0 * A[3];
-	
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		b_3 = B[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_3;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-		
-
-		b_0 = B[0+bs*1];
-		b_1 = B[1+bs*1];
-		b_2 = B[2+bs*1];
-		b_3 = B[3+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*1] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*1] + a_3 * b_3;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		D[3+bs*1] = c_3;
-		
-
-		b_0 = B[0+bs*2];
-		b_1 = B[1+bs*2];
-		b_2 = B[2+bs*2];
-		b_3 = B[3+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*2] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*2] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*2] + a_3 * b_3;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		D[3+bs*2] = c_3;
-		
-
-		b_0 = B[0+bs*3];
-		b_1 = B[1+bs*3];
-		b_2 = B[2+bs*3];
-		b_3 = B[3+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*3] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*3] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*3] + a_3 * b_3;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-		D[2+bs*3] = c_2;
-		D[3+bs*3] = c_3;
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		b_3 = B[3+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_2;
-		c_3 = beta0 * C[3+bs*0] + a_3 * b_3;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		D[3+bs*0] = c_3;
-	
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-	
-	}
-#endif
-
-
-
-// A is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_diag_left_3_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D)
-	{
-	
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		alpha0, beta0,
-		a_0, a_1, a_2,
-		b_0, b_1, b_2,
-		c_0, c_1, c_2;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	a_0 = alpha0 * A[0];
-	a_1 = alpha0 * A[1];
-	a_2 = alpha0 * A[2];
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_2;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-		
-
-		b_0 = B[0+bs*1];
-		b_1 = B[1+bs*1];
-		b_2 = B[2+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*1] + a_2 * b_2;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		D[2+bs*1] = c_2;
-		
-
-		b_0 = B[0+bs*2];
-		b_1 = B[1+bs*2];
-		b_2 = B[2+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*2] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*2] + a_2 * b_2;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		D[2+bs*2] = c_2;
-		
-
-		b_0 = B[0+bs*3];
-		b_1 = B[1+bs*3];
-		b_2 = B[2+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*3] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*3] + a_2 * b_2;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-		D[2+bs*3] = c_2;
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		b_2 = B[2+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-		c_2 = beta0 * C[2+bs*0] + a_2 * b_2;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		D[2+bs*0] = c_2;
-	
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-	
-	}
-#endif
-
-
-
-// A is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_diag_left_2_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D)
-	{
-	
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		alpha0, beta0,
-		a_0, a_1,
-		b_0, b_1,
-		c_0, c_1;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	a_0 = alpha0 * A[0];
-	a_1 = alpha0 * A[1];
-
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-		
-
-		b_0 = B[0+bs*1];
-		b_1 = B[1+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*1] + a_1 * b_1;
-
-		D[0+bs*1] = c_0;
-		D[1+bs*1] = c_1;
-		
-
-		b_0 = B[0+bs*2];
-		b_1 = B[1+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*2] + a_1 * b_1;
-
-		D[0+bs*2] = c_0;
-		D[1+bs*2] = c_1;
-		
-
-		b_0 = B[0+bs*3];
-		b_1 = B[1+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*3] + a_1 * b_1;
-
-		D[0+bs*3] = c_0;
-		D[1+bs*3] = c_1;
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		b_1 = B[1+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-		c_1 = beta0 * C[1+bs*0] + a_1 * b_1;
-
-		D[0+bs*0] = c_0;
-		D[1+bs*0] = c_1;
-	
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-	
-	}
-#endif
-
-
-
-// A is the diagonal of a matrix
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemm_diag_left_1_lib4(int kmax, float *alpha, float *A, float *B, float *beta, float *C, float *D)
-	{
-	
-	if(kmax<=0)
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		alpha0, beta0,
-		a_0,
-		b_0,
-		c_0;
-		
-	alpha0 = alpha[0];
-	beta0  = beta[0];
-		
-	a_0 = alpha0 * A[0];
-		
-	for(k=0; k<kmax-3; k+=4)
-		{
-		
-		b_0 = B[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-		
-
-		b_0 = B[0+bs*1];
-		
-		c_0 = beta0 * C[0+bs*1] + a_0 * b_0;
-
-		D[0+bs*1] = c_0;
-		
-
-		b_0 = B[0+bs*2];
-		
-		c_0 = beta0 * C[0+bs*2] + a_0 * b_0;
-
-		D[0+bs*2] = c_0;
-		
-
-		b_0 = B[0+bs*3];
-		
-		c_0 = beta0 * C[0+bs*3] + a_0 * b_0;
-
-		D[0+bs*3] = c_0;
-
-		B += 16;
-		C += 16;
-		D += 16;
-		
-		}
-	for(; k<kmax; k++)
-		{
-		
-		b_0 = B[0+bs*0];
-		
-		c_0 = beta0 * C[0+bs*0] + a_0 * b_0;
-
-		D[0+bs*0] = c_0;
-	
-		B += 4;
-		C += 4;
-		D += 4;
-		
-		}
-		
-	}
-#endif
-
-
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_sgemv_4_lib4.c b/third_party/blasfeo/kernel/c99/kernel_sgemv_4_lib4.c
deleted file mode 100644
index 03975f4..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_sgemv_4_lib4.c
+++ /dev/null
@@ -1,1010 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemv_n_4_gen_lib4(int kmax, float *alpha, float *A, float *x, float *beta, float *y, float *z, int k0, int k1)
-	{
-
-	const int bs = 4;
-
-	int k;
-
-	float
-		x_0,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	k=0;
-	for(; k<kmax-3; k+=4)
-		{
-
-		x_0 = x[0];
-
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[1+bs*0] * x_0;
-		y_2 += A[2+bs*0] * x_0;
-		y_3 += A[3+bs*0] * x_0;
-		
-		x_0 = x[1];
-
-		y_0 += A[0+bs*1] * x_0;
-		y_1 += A[1+bs*1] * x_0;
-		y_2 += A[2+bs*1] * x_0;
-		y_3 += A[3+bs*1] * x_0;
-		
-		x_0 = x[2];
-
-		y_0 += A[0+bs*2] * x_0;
-		y_1 += A[1+bs*2] * x_0;
-		y_2 += A[2+bs*2] * x_0;
-		y_3 += A[3+bs*2] * x_0;
-		
-		x_0 = x[3];
-
-		y_0 += A[0+bs*3] * x_0;
-		y_1 += A[1+bs*3] * x_0;
-		y_2 += A[2+bs*3] * x_0;
-		y_3 += A[3+bs*3] * x_0;
-		
-		A += 4*bs;
-		x += 4;
-
-		}
-
-	for(; k<kmax; k++)
-		{
-
-		x_0 = x[0];
-
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[1+bs*0] * x_0;
-		y_2 += A[2+bs*0] * x_0;
-		y_3 += A[3+bs*0] * x_0;
-		
-		A += 1*bs;
-		x += 1;
-
-		}
-
-	y_0 = alpha[0]*y_0 + beta[0]*y[0];
-	y_1 = alpha[0]*y_1 + beta[0]*y[1];
-	y_2 = alpha[0]*y_2 + beta[0]*y[2];
-	y_3 = alpha[0]*y_3 + beta[0]*y[3];
-
-	if(k0<=0 & k1>3)
-		{
-		z[0] = y_0;
-		z[1] = y_1;
-		z[2] = y_2;
-		z[3] = y_3;
-		}
-	else
-		{
-		if(k0<=0 & k1>0) z[0] = y_0;
-		if(k0<=1 & k1>1) z[1] = y_1;
-		if(k0<=2 & k1>2) z[2] = y_2;
-		if(k0<=3 & k1>3) z[3] = y_3;
-		}
-
-	}
-#endif
-	
-	
-	
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemv_n_4_lib4(int kmax, float *alpha, float *A, float *x, float *beta, float *y, float *z)
-	{
-
-	kernel_sgemv_n_4_gen_lib4(kmax, alpha, A, x, beta, y, z, 0, 4);
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemv_n_4_vs_lib4(int kmax, float *alpha, float *A, float *x, float *beta, float *y, float *z, int k1)
-	{
-
-	kernel_sgemv_n_4_gen_lib4(kmax, alpha, A, x, beta, y, z, 0, k1);
-
-	}
-#endif
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemv_t_4_gen_lib4(int kmax, float *alpha, int offA, float *A, int sda, float *x, float *beta, float *y, float *z, int km)
-	{
-
-	const int bs  = 4;
-	
-	int k, kend;
-	
-	float
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	k=0;
-	if(offA!=0) // 1, 2, 3
-		{
-		kend = 4-offA<kmax ? 4-offA : kmax;
-		for(; k<kend; k++)
-			{
-			
-			x_0 = x[0];
-		
-			y_0 += A[0+bs*0] * x_0;
-			y_1 += A[0+bs*1] * x_0;
-			y_2 += A[0+bs*2] * x_0;
-			y_3 += A[0+bs*3] * x_0;
-		
-			A += 1;
-			x += 1;
-			
-			}
-		A += bs*(sda-1);
-		}
-	for(; k<kmax-bs+1; k+=bs)
-		{
-		
-		x_0 = x[0];
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-		
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[0+bs*1] * x_0;
-		y_2 += A[0+bs*2] * x_0;
-		y_3 += A[0+bs*3] * x_0;
-
-		y_0 += A[1+bs*0] * x_1;
-		y_1 += A[1+bs*1] * x_1;
-		y_2 += A[1+bs*2] * x_1;
-		y_3 += A[1+bs*3] * x_1;
-		
-		y_0 += A[2+bs*0] * x_2;
-		y_1 += A[2+bs*1] * x_2;
-		y_2 += A[2+bs*2] * x_2;
-		y_3 += A[2+bs*3] * x_2;
-
-		y_0 += A[3+bs*0] * x_3;
-		y_1 += A[3+bs*1] * x_3;
-		y_2 += A[3+bs*2] * x_3;
-		y_3 += A[3+bs*3] * x_3;
-		
-		A += sda*bs;
-		x += 4;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		x_0 = x[0];
-	
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[0+bs*1] * x_0;
-		y_2 += A[0+bs*2] * x_0;
-		y_3 += A[0+bs*3] * x_0;
-	
-		A += 1;
-		x += 1;
-		
-		}
-
-	y_0 = alpha[0]*y_0 + beta[0]*y[0];
-	y_1 = alpha[0]*y_1 + beta[0]*y[1];
-	y_2 = alpha[0]*y_2 + beta[0]*y[2];
-	y_3 = alpha[0]*y_3 + beta[0]*y[3];
-
-	if(km>=4)
-		{
-		z[0] = y_0;
-		z[1] = y_1;
-		z[2] = y_2;
-		z[3] = y_3;
-		}
-	else
-		{
-		z[0] = y_0;
-		if(km>=2)
-			{
-			z[1] = y_1;
-			if(km>2)
-				{
-				z[2] = y_2;
-				}
-			}
-		}
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemv_t_4_lib4(int kmax, float *alpha, float *A, int sda, float *x, float *beta, float *y, float *z)
-	{
-
-	kernel_sgemv_t_4_gen_lib4(kmax, alpha, 0, A, sda, x, beta, y, z, 4);
-
-	}
-#endif
-
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgemv_t_4_vs_lib4(int kmax, float *alpha, float *A, int sda, float *x, float *beta, float *y, float *z, int k1)
-	{
-
-	kernel_sgemv_t_4_gen_lib4(kmax, alpha, 0, A, sda, x, beta, y, z, k1);
-
-	}
-#endif
-
-
-
-
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsv_ln_inv_4_vs_lib4(int kmax, float *A, float *inv_diag_A, float *x, float *y, float *z, int km, int kn)
-	{
-
-	const int bs = 4;
-	
-	int k;
-
-	float
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	k=0;
-	for(; k<kmax-3; k+=4)
-		{
-
-		x_0 = x[0];
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-
-		y_0 -= A[0+bs*0] * x_0;
-		y_1 -= A[1+bs*0] * x_0;
-		y_2 -= A[2+bs*0] * x_0;
-		y_3 -= A[3+bs*0] * x_0;
-
-		y_0 -= A[0+bs*1] * x_1;
-		y_1 -= A[1+bs*1] * x_1;
-		y_2 -= A[2+bs*1] * x_1;
-		y_3 -= A[3+bs*1] * x_1;
-
-		y_0 -= A[0+bs*2] * x_2;
-		y_1 -= A[1+bs*2] * x_2;
-		y_2 -= A[2+bs*2] * x_2;
-		y_3 -= A[3+bs*2] * x_2;
-
-		y_0 -= A[0+bs*3] * x_3;
-		y_1 -= A[1+bs*3] * x_3;
-		y_2 -= A[2+bs*3] * x_3;
-		y_3 -= A[3+bs*3] * x_3;
-		
-		A += 4*bs;
-		x += 4;
-
-		}
-
-	y_0 = y[0] + y_0;
-	y_1 = y[1] + y_1;
-	y_2 = y[2] + y_2;
-	y_3 = y[3] + y_3;
-
-	float
-		a_00, a_10, a_20, a_30,
-		a_11, a_21, a_31;
-	
-	// a_00
-	a_00 = inv_diag_A[0];
-	a_10 = A[1+bs*0];
-	a_20 = A[2+bs*0];
-	a_30 = A[3+bs*0];
-	y_0 *= a_00;
-	z[0] = y_0;
-	y_1 -= a_10 * y_0;
-	y_2 -= a_20 * y_0;
-	y_3 -= a_30 * y_0;
-
-	if(kn==1)
-		{
-		if(km==1)
-			return;
-		y[1] = y_1;
-		if(km==2)
-			return;
-		y[2] = y_2;
-		if(km==3)
-			return;
-		y[3] = y_3;
-		return;
-		}
-
-	// a_11
-	a_11 = inv_diag_A[1];
-	a_21 = A[2+bs*1];
-	a_31 = A[3+bs*1];
-	y_1 *= a_11;	
-	z[1] = y_1;
-	y_2 -= a_21 * y_1;
-	y_3 -= a_31 * y_1;
-
-	if(kn==2)
-		{
-		if(km==2)
-			return;
-		y[2] = y_2;
-		if(km==3)
-			return;
-		y[3] = y_3;
-		return;
-		}
-
-	// a_22
-	a_00 = inv_diag_A[2];
-	a_10 = A[3+bs*2];
-	y_2 *= a_00;
-	z[2] = y_2;
-	y_3 -= a_10 * y_2;
-
-	if(kn==3)
-		{
-		if(km==3)
-			return;
-		y[3] = y_3;
-
-		return;
-		}
-
-	// a_33
-	a_11 = inv_diag_A[3];
-	y_3 *= a_11;	
-	z[3] = y_3;
-
-	}
-#endif
-	
-
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsv_ln_inv_4_lib4(int kmax, float *A, float *inv_diag_A, float *x, float *y, float *z)
-	{
-
-	kernel_strsv_ln_inv_4_vs_lib4(kmax, A, inv_diag_A, x, y, z, 4, 4);
-
-
-	}
-#endif
-	
-	
-		
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsv_lt_inv_4_lib4(int kmax, float *A, int sda, float *inv_diag_A, float *x, float *y, float *z)
-	{
-
-	const int bs = 4;
-	
-	int
-		k;
-	
-	float *tA, *tx;
-	tA = A;
-	tx = x;
-
-	float
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	k=4;
-	A += 4 + (sda-1)*bs;
-	x += 4;
-	for(; k<kmax-3; k+=4)
-		{
-		
-		x_0 = x[0];
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-		
-		y_0 -= A[0+bs*0] * x_0;
-		y_1 -= A[0+bs*1] * x_0;
-		y_2 -= A[0+bs*2] * x_0;
-		y_3 -= A[0+bs*3] * x_0;
-
-		y_0 -= A[1+bs*0] * x_1;
-		y_1 -= A[1+bs*1] * x_1;
-		y_2 -= A[1+bs*2] * x_1;
-		y_3 -= A[1+bs*3] * x_1;
-		
-		y_0 -= A[2+bs*0] * x_2;
-		y_1 -= A[2+bs*1] * x_2;
-		y_2 -= A[2+bs*2] * x_2;
-		y_3 -= A[2+bs*3] * x_2;
-
-		y_0 -= A[3+bs*0] * x_3;
-		y_1 -= A[3+bs*1] * x_3;
-		y_2 -= A[3+bs*2] * x_3;
-		y_3 -= A[3+bs*3] * x_3;
-		
-		A += sda*bs;
-		x += 4;
-
-		}
-	for(; k<kmax; k++)
-		{
-		
-		x_0 = x[0];
-		
-		y_0 -= A[0+bs*0] * x_0;
-		y_1 -= A[0+bs*1] * x_0;
-		y_2 -= A[0+bs*2] * x_0;
-		y_3 -= A[0+bs*3] * x_0;
-		
-		A += 1;//sda*bs;
-		x += 1;
-
-		}
-	
-	y_0 = y[0] + y_0;
-	y_1 = y[1] + y_1;
-	y_2 = y[2] + y_2;
-	y_3 = y[3] + y_3;
-
-	A = tA;
-	x = tx;
-
-	// bottom trinagle
-	y_3 *= inv_diag_A[3];
-	z[3] = y_3;
-
-	y_2 -= A[3+bs*2] * y_3;
-	y_2 *= inv_diag_A[2];
-	z[2] = y_2;
-
-	// square
-	y_0 -= A[2+bs*0]*y_2 + A[3+bs*0]*y_3;
-	y_1 -= A[2+bs*1]*y_2 + A[3+bs*1]*y_3;
-		
-	// top trinagle
-	y_1 *= inv_diag_A[1];
-	z[1] = y_1;
-
-	y_0 -= A[1+bs*0] * y_1;
-	y_0 *= inv_diag_A[0];
-	z[0] = y_0;
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsv_lt_inv_3_lib4(int kmax, float *A, int sda, float *inv_diag_A, float *x, float *y, float *z)
-	{
-
-	const int bs = 4;
-	
-	int
-		k;
-	
-	float *tA, *tx;
-	tA = A;
-	tx = x;
-
-	float
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0;
-	
-	k = 3;
-	if(kmax>4)
-		{
-		// clean up at the beginning
-		x_3 = x[3];
-
-		y_0 -= A[3+bs*0] * x_3;
-		y_1 -= A[3+bs*1] * x_3;
-		y_2 -= A[3+bs*2] * x_3;
-
-		k=4;
-		A += 4 + (sda-1)*bs;
-		x += 4;
-		for(; k<kmax-3; k+=4)
-			{
-			
-			x_0 = x[0];
-			x_1 = x[1];
-			x_2 = x[2];
-			x_3 = x[3];
-			
-			y_0 -= A[0+bs*0] * x_0;
-			y_1 -= A[0+bs*1] * x_0;
-			y_2 -= A[0+bs*2] * x_0;
-
-			y_0 -= A[1+bs*0] * x_1;
-			y_1 -= A[1+bs*1] * x_1;
-			y_2 -= A[1+bs*2] * x_1;
-			
-			y_0 -= A[2+bs*0] * x_2;
-			y_1 -= A[2+bs*1] * x_2;
-			y_2 -= A[2+bs*2] * x_2;
-
-			y_0 -= A[3+bs*0] * x_3;
-			y_1 -= A[3+bs*1] * x_3;
-			y_2 -= A[3+bs*2] * x_3;
-			
-			A += sda*bs;
-			x += 4;
-
-			}
-		}
-	else
-		{
-		A += 3;
-		x += 1;
-		}
-	for(; k<kmax; k++)
-		{
-		
-		x_0 = x[0];
-		
-		y_0 -= A[0+bs*0] * x_0;
-		y_1 -= A[0+bs*1] * x_0;
-		y_2 -= A[0+bs*2] * x_0;
-		
-		A += 1;//sda*bs;
-		x += 1;
-
-		}
-
-	y_0 = y[0] + y_0;
-	y_1 = y[1] + y_1;
-	y_2 = y[2] + y_2;
-
-	A = tA;
-	x = tx;
-
-	// bottom trinagle
-	y_2 *= inv_diag_A[2];
-	z[2] = y_2;
-
-	// square
-	y_0 -= A[2+bs*0]*y_2;
-	y_1 -= A[2+bs*1]*y_2;
-		
-	// top trinagle
-	y_1 *= inv_diag_A[1];
-	z[1] = y_1;
-
-	y_0 -= A[1+bs*0] * y_1;
-	y_0 *= inv_diag_A[0];
-	z[0] = y_0;
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsv_lt_inv_2_lib4(int kmax, float *A, int sda, float *inv_diag_A, float *x, float *y, float *z)
-	{
-
-	const int bs = 4;
-	
-	int
-		k;
-	
-	float *tA, *tx;
-	tA = A;
-	tx = x;
-
-	float
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0;
-	
-	k = 2;
-	if(kmax>4)
-		{
-		// clean up at the beginning
-		x_2 = x[2];
-		x_3 = x[3];
-
-		y_0 -= A[2+bs*0] * x_2;
-		y_1 -= A[2+bs*1] * x_2;
-
-		y_0 -= A[3+bs*0] * x_3;
-		y_1 -= A[3+bs*1] * x_3;
-
-		k=4;
-		A += 4 + (sda-1)*bs;
-		x += 4;
-		for(; k<kmax-3; k+=4)
-			{
-			
-			x_0 = x[0];
-			x_1 = x[1];
-			x_2 = x[2];
-			x_3 = x[3];
-			
-			y_0 -= A[0+bs*0] * x_0;
-			y_1 -= A[0+bs*1] * x_0;
-
-			y_0 -= A[1+bs*0] * x_1;
-			y_1 -= A[1+bs*1] * x_1;
-			
-			y_0 -= A[2+bs*0] * x_2;
-			y_1 -= A[2+bs*1] * x_2;
-
-			y_0 -= A[3+bs*0] * x_3;
-			y_1 -= A[3+bs*1] * x_3;
-			
-			A += sda*bs;
-			x += 4;
-
-			}
-		}
-	else
-		{
-		A += 2;
-		x += 2;
-		}
-	for(; k<kmax; k++)
-		{
-		
-		x_0 = x[0];
-		
-		y_0 -= A[0+bs*0] * x_0;
-		y_1 -= A[0+bs*1] * x_0;
-		
-		A += 1;//sda*bs;
-		x += 1;
-
-		}
-
-	y_0 = y[0] + y_0;
-	y_1 = y[1] + y_1;
-
-	A = tA;
-	x = tx;
-
-	// top trinagle
-	y_1 *= inv_diag_A[1];
-	z[1] = y_1;
-
-	y_0 -= A[1+bs*0] * y_1;
-	y_0 *= inv_diag_A[0];
-	z[0] = y_0;
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strsv_lt_inv_1_lib4(int kmax, float *A, int sda, float *inv_diag_A, float *x, float *y, float *z)
-	{
-
-	const int bs = 4;
-	
-	int
-		k;
-	
-	float *tA, *tx;
-	tA = A;
-	tx = x;
-
-	float
-		x_0, x_1, x_2, x_3,
-		y_0=0;
-	
-	k = 1;
-	if(kmax>4)
-		{
-		// clean up at the beginning
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-
-		y_0 -= A[1+bs*0] * x_1;
-		y_0 -= A[2+bs*0] * x_2;
-		y_0 -= A[3+bs*0] * x_3;
-
-		k=4;
-		A += 4 + (sda-1)*bs;
-		x += 4;
-		for(; k<kmax-3; k+=4)
-			{
-			
-			x_0 = x[0];
-			x_1 = x[1];
-			x_2 = x[2];
-			x_3 = x[3];
-			
-			y_0 -= A[0+bs*0] * x_0;
-			y_0 -= A[1+bs*0] * x_1;
-			y_0 -= A[2+bs*0] * x_2;
-			y_0 -= A[3+bs*0] * x_3;
-			
-			A += sda*bs;
-			x += 4;
-
-			}
-		}
-	else
-		{
-		A += 1;
-		x += 1;
-		}
-	for(; k<kmax; k++)
-		{
-		
-		x_0 = x[0];
-		
-		y_0 -= A[0+bs*0] * x_0;
-		
-		A += 1;//sda*bs;
-		x += 1;
-
-		}
-
-	y_0 = y[0] + y_0;
-
-	A = tA;
-	x = tx;
-
-	// top trinagle
-	y_0 *= inv_diag_A[0];
-	z[0] = y_0;
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strmv_un_4_lib4(int kmax, float *A, float *x, float *z)
-	{
-
-	const int bs = 4;
-	
-	int k;
-
-	float
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	x_0 = x[0];
-	x_1 = x[1];
-	x_2 = x[2];
-	x_3 = x[3];
-
-	y_0 += A[0+bs*0] * x_0;
-/*	y_1 += A[1+bs*0] * x_0;*/
-/*	y_2 += A[2+bs*0] * x_0;*/
-/*	y_3 += A[3+bs*0] * x_0;*/
-
-	y_0 += A[0+bs*1] * x_1;
-	y_1 += A[1+bs*1] * x_1;
-/*	y_2 += A[2+bs*1] * x_1;*/
-/*	y_3 += A[3+bs*1] * x_1;*/
-
-	y_0 += A[0+bs*2] * x_2;
-	y_1 += A[1+bs*2] * x_2;
-	y_2 += A[2+bs*2] * x_2;
-/*	y_3 += A[3+bs*2] * x_2;*/
-
-	y_0 += A[0+bs*3] * x_3;
-	y_1 += A[1+bs*3] * x_3;
-	y_2 += A[2+bs*3] * x_3;
-	y_3 += A[3+bs*3] * x_3;
-	
-	A += 4*bs;
-	x += 4;
-
-	k=4;
-	for(; k<kmax-3; k+=4)
-		{
-
-		x_0 = x[0];
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[1+bs*0] * x_0;
-		y_2 += A[2+bs*0] * x_0;
-		y_3 += A[3+bs*0] * x_0;
-
-		y_0 += A[0+bs*1] * x_1;
-		y_1 += A[1+bs*1] * x_1;
-		y_2 += A[2+bs*1] * x_1;
-		y_3 += A[3+bs*1] * x_1;
-
-		y_0 += A[0+bs*2] * x_2;
-		y_1 += A[1+bs*2] * x_2;
-		y_2 += A[2+bs*2] * x_2;
-		y_3 += A[3+bs*2] * x_2;
-
-		y_0 += A[0+bs*3] * x_3;
-		y_1 += A[1+bs*3] * x_3;
-		y_2 += A[2+bs*3] * x_3;
-		y_3 += A[3+bs*3] * x_3;
-		
-		A += 4*bs;
-		x += 4;
-
-		}
-
-	for(; k<kmax; k++)
-		{
-
-		x_0 = x[0];
-
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[1+bs*0] * x_0;
-		y_2 += A[2+bs*0] * x_0;
-		y_3 += A[3+bs*0] * x_0;
-		
-		A += 1*bs;
-		x += 1;
-
-		}
-
-	z[0] = y_0;
-	z[1] = y_1;
-	z[2] = y_2;
-	z[3] = y_3;
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strmv_ut_4_vs_lib4(int kmax, float *A, int sda, float *x, float *z, int km)
-	{
-
-	const int bs  = 4;
-	
-	int
-		k;
-	
-	float
-		x_0, x_1, x_2, x_3,
-		y_0=0, y_1=0, y_2=0, y_3=0;
-	
-	k=0;
-	for(; k<kmax-4; k+=4)
-		{
-		
-		x_0 = x[0];
-		x_1 = x[1];
-		x_2 = x[2];
-		x_3 = x[3];
-		
-		y_0 += A[0+bs*0] * x_0;
-		y_1 += A[0+bs*1] * x_0;
-		y_2 += A[0+bs*2] * x_0;
-		y_3 += A[0+bs*3] * x_0;
-
-		y_0 += A[1+bs*0] * x_1;
-		y_1 += A[1+bs*1] * x_1;
-		y_2 += A[1+bs*2] * x_1;
-		y_3 += A[1+bs*3] * x_1;
-		
-		y_0 += A[2+bs*0] * x_2;
-		y_1 += A[2+bs*1] * x_2;
-		y_2 += A[2+bs*2] * x_2;
-		y_3 += A[2+bs*3] * x_2;
-
-		y_0 += A[3+bs*0] * x_3;
-		y_1 += A[3+bs*1] * x_3;
-		y_2 += A[3+bs*2] * x_3;
-		y_3 += A[3+bs*3] * x_3;
-		
-		A += sda*bs;
-		x += 4;
-
-		}
-
-	x_0 = x[0];
-	x_1 = x[1];
-	x_2 = x[2];
-	x_3 = x[3];
-	
-	y_0 += A[0+bs*0] * x_0;
-	y_1 += A[0+bs*1] * x_0;
-	y_2 += A[0+bs*2] * x_0;
-	y_3 += A[0+bs*3] * x_0;
-
-/*	y_0 += A[1+bs*0] * x_1;*/
-	y_1 += A[1+bs*1] * x_1;
-	y_2 += A[1+bs*2] * x_1;
-	y_3 += A[1+bs*3] * x_1;
-	
-/*	y_0 += A[2+bs*0] * x_2;*/
-/*	y_1 += A[2+bs*1] * x_2;*/
-	y_2 += A[2+bs*2] * x_2;
-	y_3 += A[2+bs*3] * x_2;
-
-/*	y_0 += A[3+bs*0] * x_3;*/
-/*	y_1 += A[3+bs*1] * x_3;*/
-/*	y_2 += A[3+bs*2] * x_3;*/
-	y_3 += A[3+bs*3] * x_3;
-	
-//	A += sda*bs;
-//	x += 4;
-
-	// store_vs
-	store:
-	if(km>=4)
-		{
-		z[0] = y_0;
-		z[1] = y_1;
-		z[2] = y_2;
-		z[3] = y_3;
-		}
-	else
-		{
-		z[0] = y_0;
-		if(km>=2)
-			{
-			z[1] = y_1;
-			if(km>2)
-				{
-				z[2] = y_2;
-				}
-			}
-		}
-
-	}
-#endif
-	
-	
-	
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_strmv_ut_4_lib4(int kmax, float *A, int sda, float *x, float *z)
-	{
-	
-	kernel_strmv_ut_4_vs_lib4(kmax, A, sda, x, z, 4);
-
-	}
-#endif
-
-
-
-
-
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_sgetrf_pivot_4_lib4.c b/third_party/blasfeo/kernel/c99/kernel_sgetrf_pivot_4_lib4.c
deleted file mode 100644
index fdec8de..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_sgetrf_pivot_4_lib4.c
+++ /dev/null
@@ -1,786 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <math.h>
-#include <stdio.h>
-
-#include "../../include/blasfeo_common.h"
-#include "../../include/blasfeo_s_aux.h"
-
-
-
-// C numbering, starting from 0
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void sidamax_lib4(int n, int offset, float *pA, int sda, int *p_idamax, float *p_amax)
-	{
-
-	int idamax, ii;
-	float tmp, amax;
-		
-	p_idamax[0] = -1;
-	if(n<1)
-		return;
-
-	const int bs = 4;
-
-	int na = (bs - offset%bs)%bs;
-	na = n<na ? n : na;
-
-	amax = -1.0;
-	ii = 0;
-	if(na>0)
-		{
-		for( ; ii<na; ii++)
-			{
-			tmp = fabs(pA[0]);
-			if(tmp>amax)
-				{
-				idamax = ii+0;
-				amax = tmp;
-				}
-			pA += 1;
-			}
-		pA += bs*(sda-1);
-		}
-	for( ; ii<n-3; ii+=4)
-		{
-		tmp = fabs(pA[0]);
-		if(tmp>amax)
-			{
-			idamax = ii+0;
-			amax = tmp;
-			}
-		tmp = fabs(pA[1]);
-		if(tmp>amax)
-			{
-			idamax = ii+1;
-			amax = tmp;
-			}
-		tmp = fabs(pA[2]);
-		if(tmp>amax)
-			{
-			idamax = ii+2;
-			amax = tmp;
-			}
-		tmp = fabs(pA[3]);
-		if(tmp>amax)
-			{
-			idamax = ii+3;
-			amax = tmp;
-			}
-		pA += bs*sda;
-		}
-	for( ; ii<n; ii++)
-		{
-		tmp = fabs(pA[0]);
-		if(tmp>amax)
-			{
-			idamax = ii+0;
-			amax = tmp;
-			}
-		pA += 1;
-		}
-	
-	p_amax[0] = amax;
-	p_idamax[0] = idamax;
-
-	return;
-
-	}
-#endif
-
-
-
-// C numering (starting from zero) in the ipiv
-// it process m>=4 rows and 4 cols
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgetrf_pivot_4_lib4(int m, float *pA, int sda, float *inv_diag_A, int* ipiv)
-	{
-
-	const int bs = 4;
-
-	// assume m>=4
-	int ma = m-4;
-
-	float
-		tmp0, tmp1, tmp2, tmp3,
-		u_00, u_01, u_02, u_03,
-		      u_11, u_12, u_13,
-		            u_22, u_23,
-		                  u_33;
-	
-	float
-		*pB;
-	
-	int 
-		k, idamax;
-	
-	// first column
-	sidamax_lib4(m-0, 0, &pA[0+bs*0], sda, &idamax, &tmp0);
-	ipiv[0] = idamax;
-	if(tmp0!=0.0)
-		{
-		if(ipiv[0]!=0)
-			srowsw_lib(4, pA+0, pA+ipiv[0]/bs*bs*sda+ipiv[0]%bs);
-
-		tmp0 = 1.0 / pA[0+bs*0];
-		inv_diag_A[0] = tmp0;
-		pA[1+bs*0] *= tmp0;
-		pA[2+bs*0] *= tmp0;
-		pA[3+bs*0] *= tmp0;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			pB[0+bs*0] *= tmp0;
-			pB[1+bs*0] *= tmp0;
-			pB[2+bs*0] *= tmp0;
-			pB[3+bs*0] *= tmp0;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			pB[0+bs*0] *= tmp0;
-			pB += 1;
-			}
-		}
-	else
-		{
-		inv_diag_A[0] = 0.0;
-		}
-
-	// second column
-	u_01  = pA[0+bs*1];
-	tmp1  = pA[1+bs*1];
-	tmp2  = pA[2+bs*1];
-	tmp3  = pA[3+bs*1];
-	tmp1 -= pA[1+bs*0] * u_01;
-	tmp2 -= pA[2+bs*0] * u_01;
-	tmp3 -= pA[3+bs*0] * u_01;
-	pA[1+bs*1] = tmp1;
-	pA[2+bs*1] = tmp2;
-	pA[3+bs*1] = tmp3;
-	pB = pA + bs*sda;
-	for(k=0; k<ma-3; k+=4)
-		{
-		tmp0  = pB[0+bs*1];
-		tmp1  = pB[1+bs*1];
-		tmp2  = pB[2+bs*1];
-		tmp3  = pB[3+bs*1];
-		tmp0 -= pB[0+bs*0] * u_01;
-		tmp1 -= pB[1+bs*0] * u_01;
-		tmp2 -= pB[2+bs*0] * u_01;
-		tmp3 -= pB[3+bs*0] * u_01;
-		pB[0+bs*1] = tmp0;
-		pB[1+bs*1] = tmp1;
-		pB[2+bs*1] = tmp2;
-		pB[3+bs*1] = tmp3;
-		pB += bs*sda;
-		}
-	for( ; k<ma; k++)
-		{
-		tmp0 = pB[0+bs*1];
-		tmp0 -= pB[0+bs*0] * u_01;
-		pB[0+bs*1] = tmp0;
-		pB += 1;
-		}
-
-	sidamax_lib4(m-1, 1, &pA[1+bs*1], sda, &idamax, &tmp1);
-	ipiv[1] = idamax+1;
-	if(tmp1!=0)
-		{
-		if(ipiv[1]!=1)
-			srowsw_lib(4, pA+1, pA+ipiv[1]/bs*bs*sda+ipiv[1]%bs);
-
-		tmp1 = 1.0 / pA[1+bs*1];
-		inv_diag_A[1] = tmp1;
-		pA[2+bs*1] *= tmp1;
-		pA[3+bs*1] *= tmp1;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			pB[0+bs*1] *= tmp1;
-			pB[1+bs*1] *= tmp1;
-			pB[2+bs*1] *= tmp1;
-			pB[3+bs*1] *= tmp1;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			pB[0+bs*1] *= tmp1;
-			pB += 1;
-			}
-		}
-	else
-		{
-		inv_diag_A[1] = 0.0;
-		}
-
-	// third column
-	u_02  = pA[0+bs*2];
-	u_12  = pA[1+bs*2];
-	u_12 -= pA[1+bs*0] * u_02;
-	pA[1+bs*2] = u_12;
-	tmp2  = pA[2+bs*2];
-	tmp3  = pA[3+bs*2];
-	tmp2 -= pA[2+bs*0] * u_02;
-	tmp3 -= pA[3+bs*0] * u_02;
-	tmp2 -= pA[2+bs*1] * u_12;
-	tmp3 -= pA[3+bs*1] * u_12;
-	pA[2+bs*2] = tmp2;
-	pA[3+bs*2] = tmp3;
-	pB = pA + bs*sda;
-	for(k=0; k<ma-3; k+=4)
-		{
-		tmp0  = pB[0+bs*2];
-		tmp1  = pB[1+bs*2];
-		tmp2  = pB[2+bs*2];
-		tmp3  = pB[3+bs*2];
-		tmp0 -= pB[0+bs*0] * u_02;
-		tmp1 -= pB[1+bs*0] * u_02;
-		tmp2 -= pB[2+bs*0] * u_02;
-		tmp3 -= pB[3+bs*0] * u_02;
-		tmp0 -= pB[0+bs*1] * u_12;
-		tmp1 -= pB[1+bs*1] * u_12;
-		tmp2 -= pB[2+bs*1] * u_12;
-		tmp3 -= pB[3+bs*1] * u_12;
-		pB[0+bs*2] = tmp0;
-		pB[1+bs*2] = tmp1;
-		pB[2+bs*2] = tmp2;
-		pB[3+bs*2] = tmp3;
-		pB += bs*sda;
-		}
-	for( ; k<ma; k++)
-		{
-		tmp0  = pB[0+bs*2];
-		tmp0 -= pB[0+bs*0] * u_02;
-		tmp0 -= pB[0+bs*1] * u_12;
-		pB[0+bs*2] = tmp0;
-		pB += 1;
-		}
-
-	sidamax_lib4(m-2, 2, &pA[2+bs*2], sda, &idamax, &tmp2);
-	ipiv[2] = idamax+2;
-	if(tmp2!=0)
-		{
-		if(ipiv[2]!=2)
-			srowsw_lib(4, pA+2, pA+ipiv[2]/bs*bs*sda+ipiv[2]%bs);
-
-		tmp2 = 1.0 / pA[2+bs*2];
-		inv_diag_A[2] = tmp2;
-		pA[3+bs*2] *= tmp2;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			pB[0+bs*2] *= tmp2;
-			pB[1+bs*2] *= tmp2;
-			pB[2+bs*2] *= tmp2;
-			pB[3+bs*2] *= tmp2;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			pB[0+bs*2] *= tmp2;
-			pB += 1;
-			}
-		}
-	else
-		{
-		inv_diag_A[2] = 0.0;
-		}
-
-	// fourth column
-	u_03  = pA[0+bs*3];
-	u_13  = pA[1+bs*3];
-	u_13 -= pA[1+bs*0] * u_03;
-	pA[1+bs*3] = u_13;
-	u_23  = pA[2+bs*3];
-	u_23 -= pA[2+bs*0] * u_03;
-	u_23 -= pA[2+bs*1] * u_13;
-	pA[2+bs*3] = u_23;
-	tmp3  = pA[3+bs*3];
-	tmp3 -= pA[3+bs*0] * u_03;
-	tmp3 -= pA[3+bs*1] * u_13;
-	tmp3 -= pA[3+bs*2] * u_23;
-	pA[3+bs*3] = tmp3;
-	pB = pA + bs*sda;
-	for(k=0; k<ma-3; k+=4)
-		{
-		tmp0  = pB[0+bs*3];
-		tmp1  = pB[1+bs*3];
-		tmp2  = pB[2+bs*3];
-		tmp3  = pB[3+bs*3];
-		tmp0 -= pB[0+bs*0] * u_03;
-		tmp1 -= pB[1+bs*0] * u_03;
-		tmp2 -= pB[2+bs*0] * u_03;
-		tmp3 -= pB[3+bs*0] * u_03;
-		tmp0 -= pB[0+bs*1] * u_13;
-		tmp1 -= pB[1+bs*1] * u_13;
-		tmp2 -= pB[2+bs*1] * u_13;
-		tmp3 -= pB[3+bs*1] * u_13;
-		tmp0 -= pB[0+bs*2] * u_23;
-		tmp1 -= pB[1+bs*2] * u_23;
-		tmp2 -= pB[2+bs*2] * u_23;
-		tmp3 -= pB[3+bs*2] * u_23;
-		pB[0+bs*3] = tmp0;
-		pB[1+bs*3] = tmp1;
-		pB[2+bs*3] = tmp2;
-		pB[3+bs*3] = tmp3;
-		pB += bs*sda;
-		}
-	for( ; k<ma; k++)
-		{
-		tmp0  = pB[0+bs*3];
-		tmp0 -= pB[0+bs*0] * u_03;
-		tmp0 -= pB[0+bs*1] * u_13;
-		tmp0 -= pB[0+bs*2] * u_23;
-		pB[0+bs*3] = tmp0;
-		pB += 1;
-		}
-
-	sidamax_lib4(m-3, 3, &pA[3+bs*3], sda, &idamax, &tmp3);
-	ipiv[3] = idamax+3;
-	if(tmp3!=0)
-		{
-		if(ipiv[3]!=3)
-			srowsw_lib(4, pA+3, pA+ipiv[3]/bs*bs*sda+ipiv[3]%bs);
-
-		tmp3 = 1.0 / pA[3+bs*3];
-		inv_diag_A[3] = tmp3;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			pB[0+bs*3] *= tmp3;
-			pB[1+bs*3] *= tmp3;
-			pB[2+bs*3] *= tmp3;
-			pB[3+bs*3] *= tmp3;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			pB[0+bs*3] *= tmp3;
-			pB += 1;
-			}
-		}
-	else
-		{
-		inv_diag_A[3] = 0.0;
-		}
-	
-	return;
-
-	}
-#endif
-
-
-
-// it process m>0 rows and 0<n<=4 cols
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_HASWELL) || defined(TARGET_X64_INTEL_SANDY_BRIDGE) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15) || defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-void kernel_sgetrf_pivot_4_vs_lib4(int m, int n, float *pA, int sda, float *inv_diag_A, int* ipiv)
-	{
-
-	if(m<=0 || n<=0)
-		return;
-
-	const int bs = 4;
-
-	// assume m>=4
-	int ma = m-4;
-
-	float
-		tmp0, tmp1, tmp2, tmp3,
-		u_00, u_01, u_02, u_03,
-		      u_11, u_12, u_13,
-		            u_22, u_23,
-		                  u_33;
-	
-	float
-		*pB;
-	
-	int 
-		k, idamax;
-	
-	// first column
-
-	// find pivot & scale
-	sidamax_lib4(m-0, 0, &pA[0+bs*0], sda, &idamax, &tmp0);
-	ipiv[0] = idamax;
-	if(tmp0!=0.0)
-		{
-		if(ipiv[0]!=0)
-			srowsw_lib(4, pA+0, pA+ipiv[0]/bs*bs*sda+ipiv[0]%bs);
-
-		tmp0 = 1.0 / pA[0+bs*0];
-		inv_diag_A[0] = tmp0;
-		if(m>=4)
-			{
-			pA[1+bs*0] *= tmp0;
-			pA[2+bs*0] *= tmp0;
-			pA[3+bs*0] *= tmp0;
-			pB = pA + bs*sda;
-			for(k=0; k<ma-3; k+=4)
-				{
-				pB[0+bs*0] *= tmp0;
-				pB[1+bs*0] *= tmp0;
-				pB[2+bs*0] *= tmp0;
-				pB[3+bs*0] *= tmp0;
-				pB += bs*sda;
-				}
-			for( ; k<ma; k++)
-				{
-				pB[0+bs*0] *= tmp0;
-				pB += 1;
-				}
-			}
-		else // m = {1,2,3}
-			{
-			if(m>1)
-				{
-				pA[1+bs*0] *= tmp0;
-				if(m>2)
-					pA[2+bs*0] *= tmp0;
-				}
-			}
-		}
-	else
-		{
-		inv_diag_A[0] = 0.0;
-		}
-	
-	if(n==1 || m==1) // XXX for the first row there is nothing to do, so we can return here
-		return;
-
-	// second column
-
-	// correct
-	if(m>=4)
-		{
-		u_01  = pA[0+bs*1];
-		tmp1  = pA[1+bs*1];
-		tmp2  = pA[2+bs*1];
-		tmp3  = pA[3+bs*1];
-		tmp1 -= pA[1+bs*0] * u_01;
-		tmp2 -= pA[2+bs*0] * u_01;
-		tmp3 -= pA[3+bs*0] * u_01;
-		pA[1+bs*1] = tmp1;
-		pA[2+bs*1] = tmp2;
-		pA[3+bs*1] = tmp3;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			tmp0  = pB[0+bs*1];
-			tmp1  = pB[1+bs*1];
-			tmp2  = pB[2+bs*1];
-			tmp3  = pB[3+bs*1];
-			tmp0 -= pB[0+bs*0] * u_01;
-			tmp1 -= pB[1+bs*0] * u_01;
-			tmp2 -= pB[2+bs*0] * u_01;
-			tmp3 -= pB[3+bs*0] * u_01;
-			pB[0+bs*1] = tmp0;
-			pB[1+bs*1] = tmp1;
-			pB[2+bs*1] = tmp2;
-			pB[3+bs*1] = tmp3;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			tmp0 = pB[0+bs*1];
-			tmp0 -= pB[0+bs*0] * u_01;
-			pB[0+bs*1] = tmp0;
-			pB += 1;
-			}
-		}
-	else // m = {2,3}
-		{
-		u_01  = pA[0+bs*1];
-		tmp1  = pA[1+bs*1];
-		tmp1 -= pA[1+bs*0] * u_01;
-		pA[1+bs*1] = tmp1;
-		if(m>2)
-			{
-			tmp2  = pA[2+bs*1];
-			tmp2 -= pA[2+bs*0] * u_01;
-			pA[2+bs*1] = tmp2;
-			}
-		}
-
-	// find pivot & scale
-	sidamax_lib4(m-1, 1, &pA[1+bs*1], sda, &idamax, &tmp1);
-	ipiv[1] = idamax+1;
-	if(tmp1!=0)
-		{
-		if(ipiv[1]!=1)
-			srowsw_lib(4, pA+1, pA+ipiv[1]/bs*bs*sda+ipiv[1]%bs);
-
-		tmp1 = 1.0 / pA[1+bs*1];
-		inv_diag_A[1] = tmp1;
-		if(m>=4)
-			{
-			pA[2+bs*1] *= tmp1;
-			pA[3+bs*1] *= tmp1;
-			pB = pA + bs*sda;
-			for(k=0; k<ma-3; k+=4)
-				{
-				pB[0+bs*1] *= tmp1;
-				pB[1+bs*1] *= tmp1;
-				pB[2+bs*1] *= tmp1;
-				pB[3+bs*1] *= tmp1;
-				pB += bs*sda;
-				}
-			for( ; k<ma; k++)
-				{
-				pB[0+bs*1] *= tmp1;
-				pB += 1;
-				}
-			}
-		else // m = {2,3}
-			{
-			if(m>2)
-				pA[2+bs*1] *= tmp1;
-			}
-		}
-	else
-		{
-		inv_diag_A[1] = 0.0;
-		}
-
-	if(n==2)
-		return;
-
-	// third column
-
-	// correct
-	if(m>=4)
-		{
-		u_02  = pA[0+bs*2];
-		u_12  = pA[1+bs*2];
-		u_12 -= pA[1+bs*0] * u_02;
-		pA[1+bs*2] = u_12;
-		tmp2  = pA[2+bs*2];
-		tmp3  = pA[3+bs*2];
-		tmp2 -= pA[2+bs*0] * u_02;
-		tmp3 -= pA[3+bs*0] * u_02;
-		tmp2 -= pA[2+bs*1] * u_12;
-		tmp3 -= pA[3+bs*1] * u_12;
-		pA[2+bs*2] = tmp2;
-		pA[3+bs*2] = tmp3;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			tmp0  = pB[0+bs*2];
-			tmp1  = pB[1+bs*2];
-			tmp2  = pB[2+bs*2];
-			tmp3  = pB[3+bs*2];
-			tmp0 -= pB[0+bs*0] * u_02;
-			tmp1 -= pB[1+bs*0] * u_02;
-			tmp2 -= pB[2+bs*0] * u_02;
-			tmp3 -= pB[3+bs*0] * u_02;
-			tmp0 -= pB[0+bs*1] * u_12;
-			tmp1 -= pB[1+bs*1] * u_12;
-			tmp2 -= pB[2+bs*1] * u_12;
-			tmp3 -= pB[3+bs*1] * u_12;
-			pB[0+bs*2] = tmp0;
-			pB[1+bs*2] = tmp1;
-			pB[2+bs*2] = tmp2;
-			pB[3+bs*2] = tmp3;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			tmp0  = pB[0+bs*2];
-			tmp0 -= pB[0+bs*0] * u_02;
-			tmp0 -= pB[0+bs*1] * u_12;
-			pB[0+bs*2] = tmp0;
-			pB += 1;
-			}
-		}
-	else // m = {2,3}
-		{
-		u_02  = pA[0+bs*2];
-		u_12  = pA[1+bs*2];
-		u_12 -= pA[1+bs*0] * u_02;
-		pA[1+bs*2] = u_12;
-		if(m>2)
-			{
-			tmp2  = pA[2+bs*2];
-			tmp2 -= pA[2+bs*0] * u_02;
-			tmp2 -= pA[2+bs*1] * u_12;
-			pA[2+bs*2] = tmp2;
-			}
-		}
-
-	// find pivot & scale
-	if(m>2)
-		{
-		sidamax_lib4(m-2, 2, &pA[2+bs*2], sda, &idamax, &tmp2);
-		ipiv[2] = idamax+2;
-		if(tmp2!=0)
-			{
-			if(ipiv[2]!=2)
-				srowsw_lib(4, pA+2, pA+ipiv[2]/bs*bs*sda+ipiv[2]%bs);
-
-			tmp2 = 1.0 / pA[2+bs*2];
-			inv_diag_A[2] = tmp2;
-			if(m>=4)
-				{
-				pA[3+bs*2] *= tmp2;
-				pB = pA + bs*sda;
-				for(k=0; k<ma-3; k+=4)
-					{
-					pB[0+bs*2] *= tmp2;
-					pB[1+bs*2] *= tmp2;
-					pB[2+bs*2] *= tmp2;
-					pB[3+bs*2] *= tmp2;
-					pB += bs*sda;
-					}
-				for( ; k<ma; k++)
-					{
-					pB[0+bs*2] *= tmp2;
-					pB += 1;
-					}
-				}
-			}
-		else
-			{
-			inv_diag_A[2] = 0.0;
-			}
-		}
-
-	if(n<4)
-		return;
-
-	// fourth column
-
-	// correct
-	if(m>=4)
-		{
-		u_03  = pA[0+bs*3];
-		u_13  = pA[1+bs*3];
-		u_13 -= pA[1+bs*0] * u_03;
-		pA[1+bs*3] = u_13;
-		u_23  = pA[2+bs*3];
-		u_23 -= pA[2+bs*0] * u_03;
-		u_23 -= pA[2+bs*1] * u_13;
-		pA[2+bs*3] = u_23;
-		tmp3  = pA[3+bs*3];
-		tmp3 -= pA[3+bs*0] * u_03;
-		tmp3 -= pA[3+bs*1] * u_13;
-		tmp3 -= pA[3+bs*2] * u_23;
-		pA[3+bs*3] = tmp3;
-		pB = pA + bs*sda;
-		for(k=0; k<ma-3; k+=4)
-			{
-			tmp0  = pB[0+bs*3];
-			tmp1  = pB[1+bs*3];
-			tmp2  = pB[2+bs*3];
-			tmp3  = pB[3+bs*3];
-			tmp0 -= pB[0+bs*0] * u_03;
-			tmp1 -= pB[1+bs*0] * u_03;
-			tmp2 -= pB[2+bs*0] * u_03;
-			tmp3 -= pB[3+bs*0] * u_03;
-			tmp0 -= pB[0+bs*1] * u_13;
-			tmp1 -= pB[1+bs*1] * u_13;
-			tmp2 -= pB[2+bs*1] * u_13;
-			tmp3 -= pB[3+bs*1] * u_13;
-			tmp0 -= pB[0+bs*2] * u_23;
-			tmp1 -= pB[1+bs*2] * u_23;
-			tmp2 -= pB[2+bs*2] * u_23;
-			tmp3 -= pB[3+bs*2] * u_23;
-			pB[0+bs*3] = tmp0;
-			pB[1+bs*3] = tmp1;
-			pB[2+bs*3] = tmp2;
-			pB[3+bs*3] = tmp3;
-			pB += bs*sda;
-			}
-		for( ; k<ma; k++)
-			{
-			tmp0  = pB[0+bs*3];
-			tmp0 -= pB[0+bs*0] * u_03;
-			tmp0 -= pB[0+bs*1] * u_13;
-			tmp0 -= pB[0+bs*2] * u_23;
-			pB[0+bs*3] = tmp0;
-			pB += 1;
-			}
-		}
-	else // m = {2,3}
-		{
-		u_03  = pA[0+bs*3];
-		u_13  = pA[1+bs*3];
-		u_13 -= pA[1+bs*0] * u_03;
-		pA[1+bs*3] = u_13;
-		if(m>2)
-			{
-			u_23  = pA[2+bs*3];
-			u_23 -= pA[2+bs*0] * u_03;
-			u_23 -= pA[2+bs*1] * u_13;
-			pA[2+bs*3] = u_23;
-			}
-		}
-
-	if(m>3)
-		{
-		// find pivot & scale
-		sidamax_lib4(m-3, 3, &pA[3+bs*3], sda, &idamax, &tmp3);
-		ipiv[3] = idamax+3;
-		if(tmp3!=0)
-			{
-			if(ipiv[3]!=3)
-				srowsw_lib(4, pA+3, pA+ipiv[3]/bs*bs*sda+ipiv[3]%bs);
-
-			tmp3 = 1.0 / pA[3+bs*3];
-			inv_diag_A[3] = tmp3;
-			pB = pA + bs*sda;
-			for(k=0; k<ma-3; k+=4)
-				{
-				pB[0+bs*3] *= tmp3;
-				pB[1+bs*3] *= tmp3;
-				pB[2+bs*3] *= tmp3;
-				pB[3+bs*3] *= tmp3;
-				pB += bs*sda;
-				}
-			for( ; k<ma; k++)
-				{
-				pB[0+bs*3] *= tmp3;
-				pB += 1;
-				}
-			}
-		else
-			{
-			inv_diag_A[3] = 0.0;
-			}
-		}
-	
-	return;
-
-	}
-#endif
-
-
-	
-
-
-
diff --git a/third_party/blasfeo/kernel/c99/kernel_ssymv_4_lib4.c b/third_party/blasfeo/kernel/c99/kernel_ssymv_4_lib4.c
deleted file mode 100644
index 5512154..0000000
--- a/third_party/blasfeo/kernel/c99/kernel_ssymv_4_lib4.c
+++ /dev/null
@@ -1,1025 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-
-
-// XXX copy and scale y_n into z_n outside the kernel !!!!!
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15)
-void kernel_sgemv_nt_4_vs_lib4(int kmax, float *alpha_n, float *alpha_t, float *A, int sda, float *x_n, float *x_t, float *beta_t, float *y_t, float *z_n, float *z_t, int km)
-	{
-
-	if(kmax<=0) 
-		return;
-	
-	const int bs = 4;
-
-	int k;
-
-	float
-		a_00, a_01, a_02, a_03,
-		x_n_0, x_n_1, x_n_2, x_n_3, y_n_0,
-		x_t_0, y_t_0, y_t_1, y_t_2, y_t_3;
-	
-	x_n_0 = 0;
-	x_n_1 = 0;
-	x_n_2 = 0;
-	x_n_3 = 0;
-
-	x_n_0 = alpha_n[0]*x_n[0];
-	if(km>1)
-		{
-		x_n_1 = alpha_n[0]*x_n[1];
-		if(km>2)
-			{
-			x_n_2 = alpha_n[0]*x_n[2];
-			if(km>3)
-				{
-				x_n_3 = alpha_n[0]*x_n[3];
-				}
-			}
-		}
-
-	y_t_0 = 0;
-	y_t_1 = 0;
-	y_t_2 = 0;
-	y_t_3 = 0;
-
-	k = 0;
-	for(; k<kmax-3; k+=bs)
-		{
-
-		// 0
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-
-		// 1
-
-		y_n_0 = z_n[1]; 
-		x_t_0 = x_t[1];
-
-		a_00 = A[1+bs*0];
-		a_01 = A[1+bs*1];
-		a_02 = A[1+bs*2];
-		a_03 = A[1+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[1] = y_n_0;
-
-
-		// 2
-
-		y_n_0 = z_n[2]; 
-		x_t_0 = x_t[2];
-
-		a_00 = A[2+bs*0];
-		a_01 = A[2+bs*1];
-		a_02 = A[2+bs*2];
-		a_03 = A[2+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[2] = y_n_0;
-
-
-		// 3
-
-		y_n_0 = z_n[3]; 
-		x_t_0 = x_t[3];
-
-		a_00 = A[3+bs*0];
-		a_01 = A[3+bs*1];
-		a_02 = A[3+bs*2];
-		a_03 = A[3+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[3] = y_n_0;
-
-
-		A += sda*bs;
-		z_n += 4;
-		x_t += 4;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		// 0
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		}
-	
-	// store t
-	z_t[0] = alpha_t[0]*y_t_0 + beta_t[0]*y_t[0];
-	if(km>1)
-		{
-		z_t[1] = alpha_t[0]*y_t_1 + beta_t[0]*y_t[1];
-		if(km>2)
-			{
-			z_t[2] = alpha_t[0]*y_t_2 + beta_t[0]*y_t[2];
-			if(km>3)
-				{
-				z_t[3] = alpha_t[0]*y_t_3 + beta_t[0]*y_t[3];
-				}
-			}
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-// XXX copy and scale y_n into z_n outside the kernel !!!!!
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15)
-void kernel_sgemv_nt_4_lib4(int kmax, float *alpha_n, float *alpha_t, float *A, int sda, float *x_n, float *x_t, float *beta_t, float *y_t, float *z_n, float *z_t)
-	{
-
-	kernel_sgemv_nt_4_vs_lib4(kmax, alpha_n, alpha_t, A, sda, x_n, x_t, beta_t, y_t, z_n, z_t, 4);
-
-	return;
-
-	}
-#endif
-
-
-
-// XXX copy and scale y_n into z_n outside the kernel !!!!!
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15)
-void kernel_ssymv_l_4_gen_lib4(int kmax, float *alpha, int offA, float *A, int sda, float *x_n, float *z_n, int km)
-	{
-
-	if(kmax<=0) 
-		return;
-	
-	float *x_t = x_n;
-	float *z_t = z_n;
-
-	const int bs = 4;
-
-	int k;
-
-	float
-		a_00, a_01, a_02, a_03,
-		x_n_0, x_n_1, x_n_2, x_n_3, y_n_0,
-		x_t_0, y_t_0, y_t_1, y_t_2, y_t_3;
-	
-	x_n_0 = 0;
-	x_n_1 = 0;
-	x_n_2 = 0;
-	x_n_3 = 0;
-
-	x_n_0 = alpha[0]*x_n[0];
-	if(km>1)
-		{
-		x_n_1 = alpha[0]*x_n[1];
-		if(km>2)
-			{
-			x_n_2 = alpha[0]*x_n[2];
-			if(km>3)
-				{
-				x_n_3 = alpha[0]*x_n[3];
-				}
-			}
-		}
-
-	y_t_0 = 0;
-	y_t_1 = 0;
-	y_t_2 = 0;
-	y_t_3 = 0;
-
-	k = 0;
-	if(offA==0)
-		{
-		if(kmax<4)
-			{
-			// 0
-
-			x_t_0 = x_t[0];
-
-			a_00 = A[0+bs*0];
-			
-			y_t_0 += a_00 * x_t_0;
-
-			if(kmax==1)
-				goto store_t;
-
-			// 1
-
-			y_n_0 = z_n[1]; 
-			x_t_0 = x_t[1];
-
-			a_00 = A[1+bs*0];
-			a_01 = A[1+bs*1];
-			
-			y_n_0 += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t_0;
-			y_t_1 += a_01 * x_t_0;
-
-			z_n[1] = y_n_0;
-
-			if(kmax==2)
-				goto store_t;
-
-			// 2
-
-			y_n_0 = z_n[2]; 
-			x_t_0 = x_t[2];
-
-			a_00 = A[2+bs*0];
-			a_01 = A[2+bs*1];
-			a_02 = A[2+bs*2];
-			
-			y_n_0 += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t_0;
-			y_n_0 += a_01 * x_n_1;
-			y_t_1 += a_01 * x_t_0;
-			y_t_2 += a_02 * x_t_0;
-
-			z_n[2] = y_n_0;
-
-			goto store_t;
-			}
-		else
-			{
-
-			// 0
-
-			x_t_0 = x_t[0];
-
-			a_00 = A[0+bs*0];
-			
-			y_t_0 += a_00 * x_t_0;
-
-
-			// 1
-
-			y_n_0 = z_n[1]; 
-			x_t_0 = x_t[1];
-
-			a_00 = A[1+bs*0];
-			a_01 = A[1+bs*1];
-			
-			y_n_0 += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t_0;
-			y_t_1 += a_01 * x_t_0;
-
-			z_n[1] = y_n_0;
-
-
-			// 2
-
-			y_n_0 = z_n[2]; 
-			x_t_0 = x_t[2];
-
-			a_00 = A[2+bs*0];
-			a_01 = A[2+bs*1];
-			a_02 = A[2+bs*2];
-			
-			y_n_0 += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t_0;
-			y_n_0 += a_01 * x_n_1;
-			y_t_1 += a_01 * x_t_0;
-			y_t_2 += a_02 * x_t_0;
-
-			z_n[2] = y_n_0;
-
-
-			// 3
-
-			y_n_0 = z_n[3]; 
-			x_t_0 = x_t[3];
-
-			a_00 = A[3+bs*0];
-			a_01 = A[3+bs*1];
-			a_02 = A[3+bs*2];
-			a_03 = A[3+bs*3];
-			
-			y_n_0 += a_00 * x_n_0;
-			y_t_0 += a_00 * x_t_0;
-			y_n_0 += a_01 * x_n_1;
-			y_t_1 += a_01 * x_t_0;
-			y_n_0 += a_02 * x_n_2;
-			y_t_2 += a_02 * x_t_0;
-			y_t_3 += a_03 * x_t_0;
-
-			z_n[3] = y_n_0;
-
-			k += 4;
-			A += sda*bs;
-			z_n += 4;
-			x_t += 4;
-
-			}
-		}
-	else if(offA==1)
-		{
-
-		// 0
-
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		
-		y_t_0 += a_00 * x_t_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==1)
-			goto store_t;
-
-		// 1
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_t_1 += a_01 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==2)
-			goto store_t;
-
-		// 2
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_t_2 += a_02 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==3)
-			goto store_t;
-
-		// 3
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==4)
-			goto store_t;
-
-		// 4
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==5)
-			goto store_t;
-
-		// 5
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==6)
-			goto store_t;
-
-		// 6
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==7)
-			goto store_t;
-
-		k += 7;
-
-		}
-	else if(offA==2)
-		{
-
-		// 0
-
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		
-		y_t_0 += a_00 * x_t_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==1)
-			goto store_t;
-
-		// 1
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_t_1 += a_01 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==2)
-			goto store_t;
-
-		// 2
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_t_2 += a_02 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==3)
-			goto store_t;
-
-		// 3
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==4)
-			goto store_t;
-
-		// 4
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==5)
-			goto store_t;
-
-		// 5
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==6)
-			goto store_t;
-
-		k += 6;
-
-		}
-	else // if(offA==3)
-		{
-
-		// 0
-
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		
-		y_t_0 += a_00 * x_t_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==1)
-			goto store_t;
-
-		// 1
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_t_1 += a_01 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==2)
-			goto store_t;
-
-		// 2
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_t_2 += a_02 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==3)
-			goto store_t;
-
-		// 3
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		if(kmax==4)
-			goto store_t;
-
-		// 4
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		A += (sda-1)*bs; // new panel
-
-		if(kmax==5)
-			goto store_t;
-
-		k += 5;
-
-		}
-	for(; k<kmax-3; k+=bs)
-		{
-
-		// 0
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-
-		// 1
-
-		y_n_0 = z_n[1]; 
-		x_t_0 = x_t[1];
-
-		a_00 = A[1+bs*0];
-		a_01 = A[1+bs*1];
-		a_02 = A[1+bs*2];
-		a_03 = A[1+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[1] = y_n_0;
-
-
-		// 2
-
-		y_n_0 = z_n[2]; 
-		x_t_0 = x_t[2];
-
-		a_00 = A[2+bs*0];
-		a_01 = A[2+bs*1];
-		a_02 = A[2+bs*2];
-		a_03 = A[2+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[2] = y_n_0;
-
-
-		// 3
-
-		y_n_0 = z_n[3]; 
-		x_t_0 = x_t[3];
-
-		a_00 = A[3+bs*0];
-		a_01 = A[3+bs*1];
-		a_02 = A[3+bs*2];
-		a_03 = A[3+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[3] = y_n_0;
-
-
-		A += sda*bs;
-		z_n += 4;
-		x_t += 4;
-
-		}
-	for(; k<kmax; k++)
-		{
-
-		// 0
-
-		y_n_0 = z_n[0]; 
-		x_t_0 = x_t[0];
-
-		a_00 = A[0+bs*0];
-		a_01 = A[0+bs*1];
-		a_02 = A[0+bs*2];
-		a_03 = A[0+bs*3];
-		
-		y_n_0 += a_00 * x_n_0;
-		y_t_0 += a_00 * x_t_0;
-		y_n_0 += a_01 * x_n_1;
-		y_t_1 += a_01 * x_t_0;
-		y_n_0 += a_02 * x_n_2;
-		y_t_2 += a_02 * x_t_0;
-		y_n_0 += a_03 * x_n_3;
-		y_t_3 += a_03 * x_t_0;
-
-		z_n[0] = y_n_0;
-
-		A += 1;
-		z_n += 1;
-		x_t += 1;
-
-		}
-	
-	store_t:
-	z_t[0] += alpha[0]*y_t_0;
-	if(km>1)
-		{
-		z_t[1] += alpha[0]*y_t_1;
-		if(km>2)
-			{
-			z_t[2] += alpha[0]*y_t_2;
-			if(km>3)
-				{
-				z_t[3] += alpha[0]*y_t_3;
-				}
-			}
-		}
-
-	return;
-
-	}
-#endif
-
-
-
-// XXX copy and scale y_n into z_n outside the kernel !!!!!
-#if defined(TARGET_GENERIC) || defined(TARGET_X64_INTEL_CORE) || defined(TARGET_X64_AMD_BULLDOZER) || defined(TARGET_ARMV7A_ARM_CORTEX_A15)
-void kernel_ssymv_l_4_lib4(int kmax, float *alpha, float *A, int sda, float *x_n, float *z_n)
-	{
-
-	kernel_ssymv_l_4_gen_lib4(kmax, alpha, 0, A, sda, x_n, z_n, 4);
-
-	return;
-
-	}
-#endif
-
-
-
-
-
diff --git a/third_party/blasfeo/kernel/fma/Makefile b/third_party/blasfeo/kernel/fma/Makefile
deleted file mode 100644
index d7be280..0000000
--- a/third_party/blasfeo/kernel/fma/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../../Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_AMD_BULLDOZER)
-OBJS += kernel_dgemm_4x4_lib4.o
-OBJS +=
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
-	rm -f *.s
-
diff --git a/third_party/blasfeo/kernel/fma/kernel_dgemm_4x4_lib4.S b/third_party/blasfeo/kernel/fma/kernel_dgemm_4x4_lib4.S
deleted file mode 100644
index a02f37d..0000000
--- a/third_party/blasfeo/kernel/fma/kernel_dgemm_4x4_lib4.S
+++ /dev/null
@@ -1,3895 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp);
-#define EPILOGUE \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp);
-#define EPILOGUE \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nt_4x4_lib4, @function
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmovapd 		0(%r11), %xmm8 // A[0]
-	vmovapd 		16(%r11), %xmm9 // A[2]
-
-	vmovddup		0(%r12), %xmm12 // B[0]
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-
-	vmovddup		8(%r12), %xmm12 // B[1]
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-
-	vmovddup		16(%r12), %xmm12 // B[2]
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-
-	vmovddup		24(%r12), %xmm12 // B[3]
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd		%xmm9, %xmm12, %xmm7
-
-
-	subl	$4, %r10d
-
-
-	// unroll 1
-	vmovapd 		32(%r11), %xmm8 // A[4]
-	vmovapd 		48(%r11), %xmm9 // A[6]
-
-	vmovddup		32(%r12), %xmm12 // B[4]
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-
-	vmovddup		40(%r12), %xmm12 // B[5]
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-
-	vmovddup		48(%r12), %xmm12 // B[6]
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-
-	vmovddup		56(%r12), %xmm12 // B[7]
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd		%xmm9, %xmm12, %xmm7
-
-
-	// unroll 2
-	vmovapd 		64(%r11), %xmm8 // A[8]
-	vmovapd 		80(%r11), %xmm9 // A[10]
-
-	vmovddup		64(%r12), %xmm12 // B[8]
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-
-	vmovddup		72(%r12), %xmm12 // B[9]
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-
-	vmovddup		80(%r12), %xmm12 // B[10]
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-
-	vmovddup		88(%r12), %xmm12 // B[11]
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd		%xmm9, %xmm12, %xmm7
-
-
-	// unroll 3
-	vmovapd 		96(%r11), %xmm8 // A[12]
-	vmovapd 		112(%r11), %xmm9 // A[14]
-
-	vmovddup		96(%r12), %xmm12 // B[12]
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-
-	vmovddup		104(%r12), %xmm12 // B[13]
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-
-	vmovddup		112(%r12), %xmm12 // B[14]
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-
-	vmovddup		120(%r12), %xmm12 // B[15]
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd		%xmm9, %xmm12, %xmm7
-
-
-	addq	$128, %r11
-	addq	$128, %r12
-
-
-	cmpl	$4, %r10d
-
-
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vmovapd 		0(%r11), %xmm8 // A[0]
-	vmovapd 		16(%r11), %xmm9 // A[2]
-
-	vmovddup		0(%r12), %xmm12 // B[0]
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-
-	vmovddup		8(%r12), %xmm12 // B[1]
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-
-	vmovddup		16(%r12), %xmm12 // B[2]
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-
-	vmovddup		24(%r12), %xmm12 // B[3]
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd		%xmm9, %xmm12, %xmm7
-
-
-	// unroll 1
-	vmovapd 		32(%r11), %xmm8 // A[4]
-	vmovapd 		48(%r11), %xmm9 // A[6]
-
-	vmovddup		32(%r12), %xmm12 // B[4]
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-
-	vmovddup		40(%r12), %xmm12 // B[5]
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-
-	vmovddup		48(%r12), %xmm12 // B[6]
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-
-	vmovddup		56(%r12), %xmm12 // B[7]
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd		%xmm9, %xmm12, %xmm7
-
-
-	// unroll 2
-	vmovapd 		64(%r11), %xmm8 // A[8]
-	vmovapd 		80(%r11), %xmm9 // A[10]
-
-	vmovddup		64(%r12), %xmm12 // B[8]
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-
-	vmovddup		72(%r12), %xmm12 // B[9]
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-
-	vmovddup		80(%r12), %xmm12 // B[10]
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-
-	vmovddup		88(%r12), %xmm12 // B[11]
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd		%xmm9, %xmm12, %xmm7
-
-
-	// unroll 3
-	vmovapd 		96(%r11), %xmm8 // A[12]
-	vmovapd 		112(%r11), %xmm9 // A[14]
-
-	vmovddup		96(%r12), %xmm12 // B[12]
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-
-	vmovddup		104(%r12), %xmm12 // B[13]
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-
-	vmovddup		112(%r12), %xmm12 // B[14]
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-
-	vmovddup		120(%r12), %xmm12 // B[15]
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd		%xmm9, %xmm12, %xmm7
-
-
-	addq	$128, %r12
-	addq	$128, %r11
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd 		0(%r11), %xmm8 // A[0]
-	vmovapd 		16(%r11), %xmm9 // A[2]
-
-	vmovddup		0(%r12), %xmm12 // B[0]
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-
-	subl	$1, %r10d
-
-	vmovddup		8(%r12), %xmm12 // B[1]
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-
-	vmovddup		16(%r12), %xmm12 // B[2]
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-
-	vmovddup		24(%r12), %xmm12 // B[3]
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd	%xmm9, %xmm12, %xmm7
-
-	addq	$32, %r11
-	addq	$32, %r12
-
-	cmpl	$0, %r10d
-
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nt_4x4_lib4, .-inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nt_4x4_lib4, @function
-inner_kernel_dgemm_sub_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nt_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmovapd 		0(%r11), %xmm8 // A[0]
-	vmovapd 		16(%r11), %xmm9 // A[2]
-
-	vmovddup		0(%r12), %xmm12 // B[0]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm0
-	vfnmadd231pd	%xmm9, %xmm12, %xmm1
-
-	vmovddup		8(%r12), %xmm12 // B[1]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm2
-	vfnmadd231pd	%xmm9, %xmm12, %xmm3
-
-	vmovddup		16(%r12), %xmm12 // B[2]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm4
-	vfnmadd231pd	%xmm9, %xmm12, %xmm5
-
-	vmovddup		24(%r12), %xmm12 // B[3]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm6
-	vfnmadd231pd	%xmm9, %xmm12, %xmm7
-
-
-	subl	$4, %r10d
-
-
-	// unroll 1
-	vmovapd 		32(%r11), %xmm8 // A[4]
-	vmovapd 		48(%r11), %xmm9 // A[6]
-
-	vmovddup		32(%r12), %xmm12 // B[4]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm0
-	vfnmadd231pd	%xmm9, %xmm12, %xmm1
-
-	vmovddup		40(%r12), %xmm12 // B[5]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm2
-	vfnmadd231pd	%xmm9, %xmm12, %xmm3
-
-	vmovddup		48(%r12), %xmm12 // B[6]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm4
-	vfnmadd231pd	%xmm9, %xmm12, %xmm5
-
-	vmovddup		56(%r12), %xmm12 // B[7]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm6
-	vfnmadd231pd	%xmm9, %xmm12, %xmm7
-
-
-	// unroll 2
-	vmovapd 		64(%r11), %xmm8 // A[8]
-	vmovapd 		80(%r11), %xmm9 // A[10]
-
-	vmovddup		64(%r12), %xmm12 // B[8]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm0
-	vfnmadd231pd	%xmm9, %xmm12, %xmm1
-
-	vmovddup		72(%r12), %xmm12 // B[9]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm2
-	vfnmadd231pd	%xmm9, %xmm12, %xmm3
-
-	vmovddup		80(%r12), %xmm12 // B[10]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm4
-	vfnmadd231pd	%xmm9, %xmm12, %xmm5
-
-	vmovddup		88(%r12), %xmm12 // B[11]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm6
-	vfnmadd231pd	%xmm9, %xmm12, %xmm7
-
-
-	// unroll 3
-	vmovapd 		96(%r11), %xmm8 // A[12]
-	vmovapd 		112(%r11), %xmm9 // A[14]
-
-	vmovddup		96(%r12), %xmm12 // B[12]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm0
-	vfnmadd231pd	%xmm9, %xmm12, %xmm1
-
-	vmovddup		104(%r12), %xmm12 // B[13]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm2
-	vfnmadd231pd	%xmm9, %xmm12, %xmm3
-
-	vmovddup		112(%r12), %xmm12 // B[14]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm4
-	vfnmadd231pd	%xmm9, %xmm12, %xmm5
-
-	vmovddup		120(%r12), %xmm12 // B[15]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm6
-	vfnmadd231pd	%xmm9, %xmm12, %xmm7
-
-
-	addq	$128, %r12
-	addq	$128, %r11
-
-
-	cmpl	$4, %r10d
-
-
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	vmovapd 		0(%r11), %xmm8 // A[0]
-	vmovapd 		16(%r11), %xmm9 // A[2]
-
-	vmovddup		0(%r12), %xmm12 // B[0]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm0
-	vfnmadd231pd	%xmm9, %xmm12, %xmm1
-
-	vmovddup		8(%r12), %xmm12 // B[1]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm2
-	vfnmadd231pd	%xmm9, %xmm12, %xmm3
-
-	vmovddup		16(%r12), %xmm12 // B[2]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm4
-	vfnmadd231pd	%xmm9, %xmm12, %xmm5
-
-	vmovddup		24(%r12), %xmm12 // B[3]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm6
-	vfnmadd231pd	%xmm9, %xmm12, %xmm7
-
-
-	// unroll 1
-	vmovapd 		32(%r11), %xmm8 // A[4]
-	vmovapd 		48(%r11), %xmm9 // A[6]
-
-	vmovddup		32(%r12), %xmm12 // B[4]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm0
-	vfnmadd231pd	%xmm9, %xmm12, %xmm1
-
-	vmovddup		40(%r12), %xmm12 // B[5]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm2
-	vfnmadd231pd	%xmm9, %xmm12, %xmm3
-
-	vmovddup		48(%r12), %xmm12 // B[6]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm4
-	vfnmadd231pd	%xmm9, %xmm12, %xmm5
-
-	vmovddup		56(%r12), %xmm12 // B[7]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm6
-	vfnmadd231pd	%xmm9, %xmm12, %xmm7
-
-
-	// unroll 2
-	vmovapd 		64(%r11), %xmm8 // A[8]
-	vmovapd 		80(%r11), %xmm9 // A[10]
-
-	vmovddup		64(%r12), %xmm12 // B[8]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm0
-	vfnmadd231pd	%xmm9, %xmm12, %xmm1
-
-	vmovddup		72(%r12), %xmm12 // B[9]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm2
-	vfnmadd231pd	%xmm9, %xmm12, %xmm3
-
-	vmovddup		80(%r12), %xmm12 // B[10]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm4
-	vfnmadd231pd	%xmm9, %xmm12, %xmm5
-
-	vmovddup		88(%r12), %xmm12 // B[11]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm6
-	vfnmadd231pd	%xmm9, %xmm12, %xmm7
-
-
-	// unroll 3
-	vmovapd 		96(%r11), %xmm8 // A[12]
-	vmovapd 		112(%r11), %xmm9 // A[14]
-
-	vmovddup		96(%r12), %xmm12 // B[12]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm0
-	vfnmadd231pd	%xmm9, %xmm12, %xmm1
-
-	vmovddup		104(%r12), %xmm12 // B[13]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm2
-	vfnmadd231pd	%xmm9, %xmm12, %xmm3
-
-	vmovddup		112(%r12), %xmm12 // B[14]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm4
-	vfnmadd231pd	%xmm9, %xmm12, %xmm5
-
-	vmovddup		120(%r12), %xmm12 // B[15]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm6
-	vfnmadd231pd	%xmm9, %xmm12, %xmm7
-
-
-	addq	$128, %r12
-	addq	$128, %r11
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	// unroll 0
-	vmovapd 		0(%r11), %xmm8 // A[0]
-	vmovapd 		16(%r11), %xmm9 // A[2]
-
-	vmovddup		0(%r12), %xmm12 // B[0]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm0
-	vfnmadd231pd	%xmm9, %xmm12, %xmm1
-
-	subl	$1, %r10d
-
-	vmovddup		8(%r12), %xmm12 // B[1]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm2
-	vfnmadd231pd	%xmm9, %xmm12, %xmm3
-
-	vmovddup		16(%r12), %xmm12 // B[2]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm4
-	vfnmadd231pd	%xmm9, %xmm12, %xmm5
-
-	vmovddup		24(%r12), %xmm12 // B[3]
-	vfnmadd231pd	%xmm8, %xmm12, %xmm6
-	vfnmadd231pd	%xmm9, %xmm12, %xmm7
-
-	addq	$32, %r12
-	addq	$32, %r11
-
-	cmpl	$0, %r10d
-
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nt_4x4_lib4, .-inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10   <- A
-// r11   <- B
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-//
-// output arguments:
-// r10   <- A+4*4*sizeof(double)
-// r11   <- B+4*4*sizeof(double)
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_4x4_lib4, @function
-inner_edge_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd			0(%r10), %xmm8
-	vmovapd			16(%r10), %xmm9
-	vmovddup		0(%r11), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-
-	vmovapd			32(%r10), %xmm8
-	vmovapd			48(%r10), %xmm9
-	vmovddup		32(%r11), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-	vmovddup		40(%r11), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-
-	vmovapd			64(%r10), %xmm8
-	vmovapd			80(%r10), %xmm9
-	vmovddup		64(%r11), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-	vmovddup		72(%r11), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-	vmovddup		80(%r11), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-
-	vmovapd			96(%r10), %xmm8
-	vmovapd			112(%r10), %xmm9
-	vmovddup		96(%r11), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-	vmovddup		104(%r11), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-	vmovddup		112(%r11), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-	vmovddup		120(%r11), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd		%xmm9, %xmm12, %xmm7
-
-	addq			$128, %r10
-	addq			$128, %r11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_4x4_lib4, .-inner_edge_dtrmm_nt_ru_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- max(k-4,0)
-// r11   <- A+4*4*sizeof(double)
-// r12   <- B+4*4*sizeof(double)
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_4x4_vs_lib4, @function
-inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#endif
-#endif
-	
-	vmovapd			0(%r11), %xmm8
-	vmovapd			16(%r11), %xmm9
-	subl			$1, %r10d
-	vmovddup		0(%r12), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-	addq			$32, %r11
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vmovapd			0(%r11), %xmm8
-	vmovapd			16(%r11), %xmm9
-	subl			$1, %r10d
-	vmovddup		0(%r12), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-	addq			$32, %r11
-	vmovddup		8(%r12), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vmovapd			0(%r11), %xmm8
-	vmovapd			16(%r11), %xmm9
-	subl			$1, %r10d
-	vmovddup		0(%r12), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-	vmovddup		8(%r12), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-	addq			$32, %r11
-	vmovddup		16(%r12), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	vmovapd			0(%r11), %xmm8
-	vmovapd			16(%r11), %xmm9
-	subl			$1, %r10d
-	vmovddup		0(%r12), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm0
-	vfmadd231pd		%xmm9, %xmm12, %xmm1
-	vmovddup		8(%r12), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm2
-	vfmadd231pd		%xmm9, %xmm12, %xmm3
-	vmovddup		16(%r12), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm4
-	vfmadd231pd		%xmm9, %xmm12, %xmm5
-	addq			$32, %r11
-	vmovddup		24(%r12), %xmm12
-	vfmadd231pd		%xmm8, %xmm12, %xmm6
-	vfmadd231pd		%xmm9, %xmm12, %xmm7
-	addq			$32, %r12
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_4x4_vs_lib4, .-inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend
-//
-// input arguments:
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-// output arguments:
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_4x4_lib4, @function
-inner_blend_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_4x4_lib4:
-#endif
-#endif
-	
-	// XXX nothing to blend
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_4x4_lib4, .-inner_blend_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-// output arguments:
-// r10   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x4_lib4, @function
-inner_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_4x4_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_4x4_lib4:
-#endif
-#endif
-	
-	// XXX nothing to blend
-
-	// alpha
-	movddup	0(%r10), %xmm15
-
-	mulpd	%xmm15, %xmm0
-	mulpd	%xmm15, %xmm1
-	mulpd	%xmm15, %xmm2
-	mulpd	%xmm15, %xmm3
-
-
-	// beta
-	movddup	0(%r11), %xmm14
-
-
-	vmovapd		0(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm0
-	vmovapd		16(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm1
-	vmovapd		32(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm2
-	vmovapd		48(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm3
-	vmovapd		64(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm4
-	vmovapd		80(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm5
-	vmovapd		96(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm6
-	vmovapd		112(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x4_lib4, .-inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-// output arguments:
-// r10   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_4x4_lib4, @function
-inner_blend_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_4x4_lib4:
-#endif
-#endif
-	
-	// alpha
-	movddup	0(%r10), %xmm15
-
-	mulpd	%xmm15, %xmm0
-	mulpd	%xmm15, %xmm1
-	mulpd	%xmm15, %xmm2
-	mulpd	%xmm15, %xmm3
-
-
-	// beta
-	movddup	0(%r11), %xmm14
-
-
-	vmovapd		0(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm0
-	vmovapd		16(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm1
-	vmovapd		32(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm2
-	vmovapd		48(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm3
-	vmovapd		64(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm4
-	vmovapd		80(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm5
-	vmovapd		96(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm6
-	vmovapd		112(%r12), %xmm15
-	vfmadd231pd	%xmm14, %xmm15, %xmm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_4x4_lib4, .-inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-// output arguments:
-// r10   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_4x4_lib4, @function
-inner_blend_scale_11_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_11_4x4_lib4:
-#endif
-#endif	
-	
-	vmovapd		0(%r10), %xmm15
-	vaddpd		%xmm0, %xmm15, %xmm0
-	vmovapd		16(%r10), %xmm15
-	vaddpd		%xmm1, %xmm15, %xmm1
-	vmovapd		32(%r10), %xmm15
-	vaddpd		%xmm2, %xmm15, %xmm2
-	vmovapd		48(%r10), %xmm15
-	vaddpd		%xmm3, %xmm15, %xmm3
-	vmovapd		64(%r10), %xmm15
-	vaddpd		%xmm4, %xmm15, %xmm4
-	vmovapd		80(%r10), %xmm15
-	vaddpd		%xmm5, %xmm15, %xmm5
-	vmovapd		96(%r10), %xmm15
-	vaddpd		%xmm6, %xmm15, %xmm6
-	vmovapd		112(%r10), %xmm15
-	vaddpd		%xmm7, %xmm15, %xmm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_4x4_lib4, .-inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_4x4_vs_lib4, @function
-inner_edge_dpotrf_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dpotrf_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dpotrf_4x4_vs_lib4:
-#endif
-#endif
-	
-	vxorpd			%xmm15, %xmm15, %xmm15 // 0.0
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	vmovsd			.LC04(%rip), %xmm14 // 1.0
-#elif defined(OS_MAC)
-	vmovsd			LC04(%rip), %xmm14 // 1.0
-#endif
-
-	vmovsd			%xmm0, %xmm0, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe				1f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-2:
-	cmpl			$2, %r11d
-	vmovsd			%xmm13, 0(%r10)
-	vmovddup		%xmm13, %xmm13
-	vmulpd			%xmm0, %xmm13, %xmm0
-	vmulpd			%xmm1, %xmm13, %xmm1
-
-	jl				0f // ret
-
-	vpermilpd		$0x3, %xmm0, %xmm13
-	vfnmadd231pd	%xmm0, %xmm13, %xmm2
-	vfnmadd231pd	%xmm1, %xmm13, %xmm3
-	vpermilpd		$0x3, %xmm2, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe				3f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-4:
-	cmpl			$3, %r11d
-	vmovsd			%xmm13, 8(%r10)
-	vmovddup		%xmm13, %xmm13
-	vmulpd			%xmm2, %xmm13, %xmm2
-	vmulpd			%xmm3, %xmm13, %xmm3
-
-	jl				0f // ret
-
-	vpermilpd		$0x0, %xmm1, %xmm13
-//	vfnmadd231pd	%xmm0, %xmm13, %xmm4
-	vfnmadd231pd	%xmm1, %xmm13, %xmm5
-	vpermilpd		$0x0, %xmm3, %xmm13
-//	vfnmadd231pd	%xmm2, %xmm13, %xmm4
-	vfnmadd231pd	%xmm3, %xmm13, %xmm5
-	vmovaps			%xmm5, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe				5f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-6:
-	cmpl			$4, %r11d
-	vmovsd			%xmm13, 16(%r10)
-	vmovddup		%xmm13, %xmm13
-//	vmulpd			%xmm4, %xmm13, %xmm4
-	vmulpd			%xmm5, %xmm13, %xmm5
-
-	jl				0f // ret
-
-	vpermilpd		$0x3, %xmm1, %xmm13
-//	vfnmadd231pd	%xmm0, %xmm13, %xmm6
-	vfnmadd231pd	%xmm1, %xmm13, %xmm7
-	vpermilpd		$0x3, %xmm3, %xmm13
-//	vfnmadd231pd	%xmm2, %xmm13, %xmm6
-	vfnmadd231pd	%xmm3, %xmm13, %xmm7
-	vpermilpd		$0x3, %xmm5, %xmm13
-//	vfnmadd231pd	%xmm4, %xmm13, %xmm6
-	vfnmadd231pd	%xmm5, %xmm13, %xmm7
-	vpermilpd		$0x3, %xmm7, %xmm13
-	vucomisd		%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe				7f
-	vsqrtsd			%xmm13, %xmm13, %xmm13
-	vdivsd			%xmm13, %xmm14, %xmm13
-8:
-	vmovsd			%xmm13, 24(%r10)
-	vmovddup		%xmm13, %xmm13
-//	vmulpd			%xmm6, %xmm13, %xmm6
-	vmulpd			%xmm7, %xmm13, %xmm7
-
-	jmp		0f
-	
-1:
-	vxorpd	%xmm13, %xmm13, %xmm13
-	jmp		2b
-
-3:
-	vxorpd	%xmm13, %xmm13, %xmm13
-	jmp		4b
-
-5:
-	vxorpd	%xmm13, %xmm13, %xmm13
-	jmp		6b
-
-7:
-	vxorpd	%xmm13, %xmm13, %xmm13
-	jmp		8b
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_4x4_vs_lib4, .-inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x4_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#endif
-#endif
-	
-	vmovddup		0(%r11), %xmm13
-	vmulpd			%xmm0, %xmm13, %xmm0
-	vmulpd			%xmm1, %xmm13, %xmm1
-
-	vmovddup		8(%r10), %xmm13
-	vfnmadd231pd	%xmm0, %xmm13, %xmm2
-	vfnmadd231pd	%xmm1, %xmm13, %xmm3
-	vmovddup		8(%r11), %xmm13
-	vmulpd			%xmm2, %xmm13, %xmm2
-	vmulpd			%xmm3, %xmm13, %xmm3
-
-	vmovddup		16(%r10), %xmm13
-	vfnmadd231pd	%xmm0, %xmm13, %xmm4
-	vfnmadd231pd	%xmm1, %xmm13, %xmm5
-	vmovddup		48(%r10), %xmm13
-	vfnmadd231pd	%xmm2, %xmm13, %xmm4
-	vfnmadd231pd	%xmm3, %xmm13, %xmm5
-	vmovddup		16(%r11), %xmm13
-	vmulpd			%xmm4, %xmm13, %xmm4
-	vmulpd			%xmm5, %xmm13, %xmm5
-
-	vmovddup		24(%r10), %xmm13
-	vfnmadd231pd	%xmm0, %xmm13, %xmm6
-	vfnmadd231pd	%xmm1, %xmm13, %xmm7
-	vmovddup		56(%r10), %xmm13
-	vfnmadd231pd	%xmm2, %xmm13, %xmm6
-	vfnmadd231pd	%xmm3, %xmm13, %xmm7
-	vmovddup		88(%r10), %xmm13
-	vfnmadd231pd	%xmm4, %xmm13, %xmm6
-	vfnmadd231pd	%xmm5, %xmm13, %xmm7
-	vmovddup		24(%r11), %xmm13
-	vmulpd			%xmm6, %xmm13, %xmm6
-	vmulpd			%xmm7, %xmm13, %xmm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x4_lib4, .-inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#endif
-#endif
-	
-	vmovddup		0(%r11), %xmm13
-	cmpl			$2, %r12d
-	vmulpd			%xmm0, %xmm13, %xmm0
-	vmulpd			%xmm1, %xmm13, %xmm1
-
-	jl				0f // ret
-
-	vmovddup		8(%r10), %xmm13
-	cmpl			$3, %r12d
-	vfnmadd231pd	%xmm0, %xmm13, %xmm2
-	vfnmadd231pd	%xmm1, %xmm13, %xmm3
-	vmovddup		8(%r11), %xmm13
-	vmulpd			%xmm2, %xmm13, %xmm2
-	vmulpd			%xmm3, %xmm13, %xmm3
-
-	jl				0f // ret
-
-	vmovddup		16(%r10), %xmm13
-	cmpl			$4, %r12d
-	vfnmadd231pd	%xmm0, %xmm13, %xmm4
-	vfnmadd231pd	%xmm1, %xmm13, %xmm5
-	vmovddup		48(%r10), %xmm13
-	vfnmadd231pd	%xmm2, %xmm13, %xmm4
-	vfnmadd231pd	%xmm3, %xmm13, %xmm5
-	vmovddup		16(%r11), %xmm13
-	vmulpd			%xmm4, %xmm13, %xmm4
-	vmulpd			%xmm5, %xmm13, %xmm5
-
-	jl				0f // ret
-
-	vmovddup		24(%r10), %xmm13
-	vfnmadd231pd	%xmm0, %xmm13, %xmm6
-	vfnmadd231pd	%xmm1, %xmm13, %xmm7
-	vmovddup		56(%r10), %xmm13
-	vfnmadd231pd	%xmm2, %xmm13, %xmm6
-	vfnmadd231pd	%xmm3, %xmm13, %xmm7
-	vmovddup		88(%r10), %xmm13
-	vfnmadd231pd	%xmm4, %xmm13, %xmm6
-	vfnmadd231pd	%xmm5, %xmm13, %xmm7
-	vmovddup		24(%r11), %xmm13
-	vmulpd			%xmm6, %xmm13, %xmm6
-	vmulpd			%xmm7, %xmm13, %xmm7
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x4_vs_lib4, .-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_lib4, @function
-inner_store_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd %xmm0,   0(%r10)
-	vmovapd %xmm1,  16(%r10)
-	vmovapd %xmm2,  32(%r10)
-	vmovapd %xmm3,  48(%r10)
-	vmovapd %xmm4,  64(%r10)
-	vmovapd %xmm5,  80(%r10)
-	vmovapd %xmm6,  96(%r10)
-	vmovapd %xmm7, 112(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_lib4, .-inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// TODO use blendv instead
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_vs_lib4, @function
-inner_store_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl		$2, %r11d
-	jg			1f
-	je			0f
-
-	// km==1
-	cmpl		$2, %r12d
-	vmovsd		%xmm0,  0(%r10)
-	jl			4f // end
-	cmpl		$3, %r12d
-	vmovsd		%xmm2, 32(%r10)
-	jl			4f // end
-	vmovsd		%xmm4, 64(%r10)
-	je			4f // end
-	vmovsd		%xmm6, 96(%r10)
-
-	jmp		4f
-
-0:
-	// km==2
-	cmpl		$2, %r12d
-	vmovapd		%xmm0,  0(%r10)
-	jl			4f // end
-	cmpl		$3, %r12d
-	vmovapd		%xmm2, 32(%r10)
-	jl			4f // end
-	vmovapd		%xmm4, 64(%r10)
-	je			4f // end
-	vmovapd		%xmm6, 96(%r10)
-
-	jmp		4f
-
-1:
-	cmpl		$3, %r11d
-	jg			2f
-
-	// km==3
-	cmpl		$2, %r12d
-	vmovapd		%xmm0,   0(%r10)
-	vmovsd		%xmm1,  16(%r10)
-	jl			4f // end
-	cmpl		$3, %r12d
-	vmovapd		%xmm2,  32(%r10)
-	vmovsd		%xmm3,  48(%r10)
-	jl			4f // end
-	vmovapd		%xmm4,  64(%r10)
-	vmovsd		%xmm5,  80(%r10)
-	je			4f // end
-	vmovapd		%xmm6,  96(%r10)
-	vmovsd		%xmm7, 112(%r10)
-
-	jmp		4f
-
-2:
-	// km==3
-	cmpl		$2, %r12d
-	vmovapd		%xmm0,   0(%r10)
-	vmovapd		%xmm1,  16(%r10)
-	jl			4f // end
-	cmpl		$3, %r12d
-	vmovapd		%xmm2,  32(%r10)
-	vmovapd		%xmm3,  48(%r10)
-	jl			4f // end
-	vmovapd		%xmm4,  64(%r10)
-	vmovapd		%xmm5,  80(%r10)
-	je			4f // end
-	vmovapd		%xmm6,  96(%r10)
-	vmovapd		%xmm7, 112(%r10)
-
-4:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_vs_lib4, .-inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n lower triangular
-//
-// input arguments:
-// r10   <- D
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_lib4, @function
-inner_store_l_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_4x4_lib4; .scl 2; .type 32; .endef
-inner_store_l_4x4_lib4:
-#endif
-#endif
-	
-	vmovapd		%xmm0,   0(%r10)
-	vmovapd		%xmm1,  16(%r10)
-	vmovsd		32(%r10), %xmm15
-	vmovsd		%xmm15, %xmm2, %xmm2
-	vmovapd		%xmm2,  32(%r10)
-	vmovapd		%xmm3,  48(%r10)
-//	vmovapd		%xmm4,  64(%r10)
-	vmovapd		%xmm5,  80(%r10)
-//	vmovapd		%xmm6,  96(%r10)
-	vmovsd		112(%r10), %xmm15
-	vmovsd		%xmm15, %xmm7, %xmm7
-	vmovapd		%xmm7, 112(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_lib4, .-inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs lower triangular
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_vs_lib4, @function
-inner_store_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_l_4x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl		$2, %r11d
-	jg			1f
-	je			0f
-
-	// km==1
-	vmovsd		%xmm0,  0(%r10)
-
-	jmp		3f
-
-0:
-	// km==2
-	cmpl		$2, %r12d
-	vmovapd		%xmm0,  0(%r10)
-	jl			3f // end
-	vmovsd		32(%r10), %xmm15
-	vmovsd		%xmm15, %xmm2, %xmm2
-	vmovapd		%xmm2, 32(%r10)
-
-	jmp		3f
-
-1:
-	cmpl		$3, %r11d
-	jg			2f
-
-	// km==3
-	cmpl		$2, %r12d
-	vmovapd		%xmm0,   0(%r10)
-	vmovsd		%xmm1,  16(%r10)
-	jl			3f // end
-	cmpl		$3, %r12d
-	vmovsd		32(%r10), %xmm15
-	vmovsd		%xmm15, %xmm2, %xmm2
-	vmovapd		%xmm2,  32(%r10)
-	vmovsd		%xmm3,  48(%r10)
-	jl			3f // end
-//	vmovapd		%xmm4,  64(%r10)
-	vmovsd		%xmm5,  80(%r10)
-
-	jmp		3f
-
-2:
-	// km==3
-	cmpl		$2, %r12d
-	vmovapd		%xmm0,   0(%r10)
-	vmovapd		%xmm1,  16(%r10)
-	jl			3f // end
-	cmpl		$3, %r12d
-	vmovsd		32(%r10), %xmm15
-	vmovsd		%xmm15, %xmm2, %xmm2
-	vmovapd		%xmm2,  32(%r10)
-	vmovapd		%xmm3,  48(%r10)
-	jl			3f // end
-//	vmovapd		%xmm4,  64(%r10)
-	vmovapd		%xmm5,  80(%r10)
-	je			3f // end
-//	vmovapd		%xmm6,  96(%r10)
-	vmovsd		112(%r10), %xmm15
-	vmovsd		%xmm15, %xmm7, %xmm7
-	vmovapd		%xmm7, 112(%r10)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_vs_lib4, .-inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dgemm_nt_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_lib4
-	.type kernel_dgemm_nt_4x4_lib4, @function
-kernel_dgemm_nt_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_lib4
-_kernel_dgemm_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_lib4
-	.def kernel_dgemm_nt_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_lib4, .-kernel_dgemm_nt_4x4_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dgemm_nt_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_vs_lib4
-	.type kernel_dgemm_nt_4x4_vs_lib4, @function
-kernel_dgemm_nt_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_vs_lib4
-_kernel_dgemm_nt_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_vs_lib4
-	.def kernel_dgemm_nt_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_vs_lib4, .-kernel_dgemm_nt_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                 rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dsyrk_nt_l_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_lib4
-	.type kernel_dsyrk_nt_l_4x4_lib4, @function
-kernel_dsyrk_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_lib4
-_kernel_dsyrk_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_4x4_lib4
-	.def kernel_dsyrk_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call	inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq	_inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_lib4, .-kernel_dsyrk_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                    rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dsyrk_nt_l_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_vs_lib4
-	.type kernel_dsyrk_nt_l_4x4_vs_lib4, @function
-kernel_dsyrk_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_vs_lib4
-_kernel_dsyrk_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_4x4_vs_lib4
-	.def kernel_dsyrk_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call	inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq	_inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_vs_lib4, .-kernel_dsyrk_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dtrmm_nt_ru_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_4x4_lib4
-	.type kernel_dtrmm_nt_ru_4x4_lib4, @function
-kernel_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_4x4_lib4
-_kernel_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_4x4_lib4
-	.def kernel_dtrmm_nt_ru_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG3, %r10
-	movq	ARG4, %r11
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_4x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_4x4_lib4, .-kernel_dtrmm_nt_ru_4x4_lib4
-#endif
-
-
-
-
-
-//                                     rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dtrmm_nt_ru_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_4x4_vs_lib4
-	.type kernel_dtrmm_nt_ru_4x4_vs_lib4, @function
-kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_4x4_vs_lib4
-_kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_4x4_vs_lib4
-	.def kernel_dtrmm_nt_ru_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-#endif
-
-
-	// call inner loader nn
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_4x4_vs_lib4, .-kernel_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  edi    rsi        rdx        ecx        r8         r9
-// void kernel_dpotrf_nt_l_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_4x4_lib4
-	.type kernel_dpotrf_nt_l_4x4_lib4, @function
-kernel_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_4x4_lib4
-_kernel_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_4x4_lib4
-	.def kernel_dpotrf_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movl	$4, %r11d // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_4x4_lib4, .-kernel_dpotrf_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                     edi    rsi        rdx        ecx        r8         r9                  rsp+8   rsp+16
-// void kernel_dpotrf_nt_l_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_4x4_vs_lib4
-	.type kernel_dpotrf_nt_l_4x4_vs_lib4, @function
-kernel_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_4x4_vs_lib4
-_kernel_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_4x4_vs_lib4
-	.def kernel_dpotrf_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movq	ARG8, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG7, %r11 // km 
-	movq	ARG8, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_4x4_vs_lib4, .-kernel_dpotrf_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                        edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24
-// void kernel_dsyrk_dpotrf_nt_l_4x4_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_4x4_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-_kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_4x4_lib4, .-kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                           edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24              rsp+32  rsp+40
-// void kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-_kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4, .-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx        r8         r9         rsp+8  
-// void kernel_dtrsm_nt_rl_inv_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x4_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x4_lib4
-_kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x4_lib4, .-kernel_dtrsm_nt_rl_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                            edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24     rsp+32
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10   // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx        r8       r9           rsp+8               rsp+16  rsp+24
-// void kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-_kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn // TODO scale gen
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x4_vs_lib4, .-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                               edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24     rsp+32              rsp+40  rsp+48
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10  // C 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D 
-	movq	ARG11, %r11 // km 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/kernel/sse3/Makefile b/third_party/blasfeo/kernel/sse3/Makefile
deleted file mode 100644
index dbc07d1..0000000
--- a/third_party/blasfeo/kernel/sse3/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../../Makefile.rule
-
-OBJS = 
-
-ifeq ($(LA), HIGH_PERFORMANCE)
-
-ifeq ($(TARGET), X64_INTEL_CORE)
-OBJS += kernel_dgemm_4x4_lib4.o
-OBJS +=
-endif
-
-else # LA_REFERENCE | LA_BLAS
-
-endif # LA choice
-
-obj: $(OBJS)
-
-clean:
-	rm -f *.o
-	rm -f *.s
-
diff --git a/third_party/blasfeo/kernel/sse3/kernel_dgemm_4x4_lib4.S b/third_party/blasfeo/kernel/sse3/kernel_dgemm_4x4_lib4.S
deleted file mode 100644
index 26f35b6..0000000
--- a/third_party/blasfeo/kernel/sse3/kernel_dgemm_4x4_lib4.S
+++ /dev/null
@@ -1,6235 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp);
-#define EPILOGUE \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	addq	$STACKSIZE, %rsp;
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-#define PROLOGUE \
-	subq	$STACKSIZE, %rsp; \
-	movq	%rbx,   (%rsp); \
-	movq	%rbp,  8(%rsp); \
-	movq	%r12, 16(%rsp); \
-	movq	%r13, 24(%rsp); \
-	movq	%r14, 32(%rsp); \
-	movq	%r15, 40(%rsp); \
-	movq	%rdi, 48(%rsp); \
-	movq	%rsi, 56(%rsp); \
-	vmovups	%xmm6, 64(%rsp); \
-	vmovups	%xmm7, 80(%rsp); \
-	vmovups	%xmm8, 96(%rsp); \
-	vmovups	%xmm9, 112(%rsp); \
-	vmovups	%xmm10, 128(%rsp); \
-	vmovups	%xmm11, 144(%rsp); \
-	vmovups	%xmm12, 160(%rsp); \
-	vmovups	%xmm13, 176(%rsp); \
-	vmovups	%xmm14, 192(%rsp); \
-	vmovups	%xmm15, 208(%rsp);
-#define EPILOGUE \
-	movq	  (%rsp), %rbx; \
-	movq	 8(%rsp), %rbp; \
-	movq	16(%rsp), %r12; \
-	movq	24(%rsp), %r13; \
-	movq	32(%rsp), %r14; \
-	movq	40(%rsp), %r15; \
-	movq	48(%rsp), %rdi; \
-	movq	56(%rsp), %rsi; \
-	vmovups	64(%rsp), %xmm6; \
-	vmovups	80(%rsp), %xmm7; \
-	vmovups	96(%rsp), %xmm8; \
-	vmovups	112(%rsp), %xmm9; \
-	vmovups	128(%rsp), %xmm10; \
-	vmovups	144(%rsp), %xmm11; \
-	vmovups	160(%rsp), %xmm12; \
-	vmovups	176(%rsp), %xmm13; \
-	vmovups	192(%rsp), %xmm14; \
-	vmovups	208(%rsp), %xmm15; \
-	addq	$STACKSIZE, %rsp;
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nt_4x4_lib4, @function
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-	movapd		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-	movapd		0(%r12), %xmm10 // B[0]
-
-	xorpd		%xmm11, %xmm11
-	movapd		%xmm11, %xmm12
-	movapd		%xmm11, %xmm13
-	movapd		%xmm11, %xmm14
-	movapd		%xmm11, %xmm15
-
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	addpd		%xmm14, %xmm3
-	movapd		16(%r12), %xmm14 // B[2]
-	addpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	addpd		%xmm10, %xmm1
-	movapd		32(%r12), %xmm10 // B[4]
-	addpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	addpd		%xmm15, %xmm0
-	addpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		32(%r11), %xmm8 // A[4]
-	mulpd		%xmm9, %xmm13
-	movapd 		48(%r11), %xmm9 // A[6]
-
-
-	// unroll 1
-	addpd		%xmm14, %xmm3
-	movapd		48(%r12), %xmm14 // B[6]
-	addpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	addpd		%xmm10, %xmm1
-	movapd		64(%r12), %xmm10 // B[8]
-	addpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	addpd		%xmm15, %xmm0
-	addpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		64(%r11), %xmm8 // A[8]
-	mulpd		%xmm9, %xmm13
-	movapd 		80(%r11), %xmm9 // A[10]
-
-
-	// unroll 2
-	addpd		%xmm14, %xmm3
-	movapd		80(%r12), %xmm14 // B[10]
-	addpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	subl		$4, %r10d
-
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	addpd		%xmm10, %xmm1
-	movapd		96(%r12), %xmm10 // B[12]
-	addpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	addpd		%xmm15, %xmm0
-	addpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		96(%r11), %xmm8 // A[12]
-	mulpd		%xmm9, %xmm13
-	movapd 		112(%r11), %xmm9 // A[14]
-	
-
-	// unroll 3
-	addpd		%xmm14, %xmm3
-	movapd		112(%r12), %xmm14 // B[14]
-	addpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addq		$128, %r12 // B += 16
-
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addq		$128, %r11 // A += 16
-
-	addpd		%xmm10, %xmm1
-	movapd		0(%r12), %xmm10 // B[0]
-	addpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	cmpl		$4, %r10d
-
-	addpd		%xmm15, %xmm0
-	addpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		0(%r11), %xmm8 // A[0]
-	mulpd		%xmm9, %xmm13
-	movapd 		16(%r11), %xmm9 // A[2]
-
-
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	addpd		%xmm14, %xmm3
-	movapd		16(%r12), %xmm14 // B[2]
-	addpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	addpd		%xmm10, %xmm1
-	movapd		32(%r12), %xmm10 // B[4]
-	addpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	addpd		%xmm15, %xmm0
-	addpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		32(%r11), %xmm8 // A[4]
-	mulpd		%xmm9, %xmm13
-	movapd 		48(%r11), %xmm9 // A[6]
-
-
-	// unroll 1
-	addpd		%xmm14, %xmm3
-	movapd		48(%r12), %xmm14 // B[6]
-	addpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	addpd		%xmm10, %xmm1
-	movapd		64(%r12), %xmm10 // B[8]
-	addpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	addpd		%xmm15, %xmm0
-	addpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		64(%r11), %xmm8 // A[8]
-	mulpd		%xmm9, %xmm13
-	movapd 		80(%r11), %xmm9 // A[10]
-
-
-	// unroll 2
-	addpd		%xmm14, %xmm3
-	movapd		80(%r12), %xmm14 // B[10]
-	addpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	subl		$4, %r10d
-
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	addpd		%xmm10, %xmm1
-	movapd		96(%r12), %xmm10 // B[12]
-	addpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	addpd		%xmm15, %xmm0
-	addpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		96(%r11), %xmm8 // A[12]
-	mulpd		%xmm9, %xmm13
-	movapd 		112(%r11), %xmm9 // A[14]
-	
-
-	// unroll 3
-	addpd		%xmm14, %xmm3
-	movapd		112(%r12), %xmm14 // B[14]
-	addpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addq		$128, %r12 // B += 16
-
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addq		$128, %r11 // A += 16
-
-	addpd		%xmm10, %xmm1
-//	movapd		0(%r12), %xmm10 // B[0]
-	addpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-//	cmpl		$4, %r10d
-
-	addpd		%xmm15, %xmm0
-	addpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-//	movapd 		0(%r11), %xmm8 // A[0]
-	mulpd		%xmm9, %xmm13
-//	movapd 		16(%r11), %xmm9 // A[2]
-
-
-	// clean accumulators
-	addpd		%xmm14, %xmm3
-	addpd		%xmm11, %xmm7
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-
-	// unroll 0
-	addpd		%xmm14, %xmm3
-	movapd		16(%r12), %xmm14 // B[2]
-	addpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	subl	$1, %r10d
-
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addq	$32, %r12
-
-	addpd		%xmm10, %xmm1
-	movapd		32(%r12), %xmm10 // B[0]
-	addpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addq	$32, %r11
-
-	addpd		%xmm15, %xmm0
-	addpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		32(%r11), %xmm8 // A[0]
-	mulpd		%xmm9, %xmm13
-	movapd 		48(%r11), %xmm9 // A[2]
-
-	cmpl	$0, %r10d
-
-	jg		3b // clean up loop 
-
-
-	// clean accumulators
-	addpd		%xmm14, %xmm3
-	addpd		%xmm11, %xmm7
-	addpd		%xmm12, %xmm2
-	addpd		%xmm13, %xmm6
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nt_4x4_lib4, .-inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_sub_nt_4x4_lib4, @function
-inner_kernel_dgemm_sub_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_sub_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_sub_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_sub_nt_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-	movapd		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-	movapd		0(%r12), %xmm10 // B[0]
-
-	xorpd		%xmm11, %xmm11
-	movapd		%xmm11, %xmm12
-	movapd		%xmm11, %xmm13
-	movapd		%xmm11, %xmm14
-	movapd		%xmm11, %xmm15
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	// unroll 0
-	subpd		%xmm14, %xmm3
-	movapd		16(%r12), %xmm14 // B[2]
-	subpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	subpd		%xmm10, %xmm1
-	movapd		32(%r12), %xmm10 // B[4]
-	subpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	subpd		%xmm15, %xmm0
-	subpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		32(%r11), %xmm8 // A[4]
-	mulpd		%xmm9, %xmm13
-	movapd 		48(%r11), %xmm9 // A[6]
-
-
-	// unroll 1
-	subpd		%xmm14, %xmm3
-	movapd		48(%r12), %xmm14 // B[6]
-	subpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	subpd		%xmm10, %xmm1
-	movapd		64(%r12), %xmm10 // B[8]
-	subpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	subpd		%xmm15, %xmm0
-	subpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		64(%r11), %xmm8 // A[8]
-	mulpd		%xmm9, %xmm13
-	movapd 		80(%r11), %xmm9 // A[10]
-
-
-	// unroll 2
-	subpd		%xmm14, %xmm3
-	movapd		80(%r12), %xmm14 // B[10]
-	subpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	subl		$4, %r10d
-
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	subpd		%xmm10, %xmm1
-	movapd		96(%r12), %xmm10 // B[12]
-	subpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	subpd		%xmm15, %xmm0
-	subpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		96(%r11), %xmm8 // A[12]
-	mulpd		%xmm9, %xmm13
-	movapd 		112(%r11), %xmm9 // A[14]
-	
-
-	// unroll 3
-	subpd		%xmm14, %xmm3
-	movapd		112(%r12), %xmm14 // B[14]
-	subpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addq		$128, %r12 // B += 16
-
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addq		$128, %r11 // A += 16
-
-	subpd		%xmm10, %xmm1
-	movapd		0(%r12), %xmm10 // B[0]
-	subpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	cmpl		$4, %r10d
-
-	subpd		%xmm15, %xmm0
-	subpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		0(%r11), %xmm8 // A[0]
-	mulpd		%xmm9, %xmm13
-	movapd 		16(%r11), %xmm9 // A[2]
-
-
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	subpd		%xmm14, %xmm3
-	movapd		16(%r12), %xmm14 // B[2]
-	subpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	subpd		%xmm10, %xmm1
-	movapd		32(%r12), %xmm10 // B[4]
-	subpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	subpd		%xmm15, %xmm0
-	subpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		32(%r11), %xmm8 // A[4]
-	mulpd		%xmm9, %xmm13
-	movapd 		48(%r11), %xmm9 // A[6]
-
-
-	// unroll 1
-	subpd		%xmm14, %xmm3
-	movapd		48(%r12), %xmm14 // B[6]
-	subpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	subpd		%xmm10, %xmm1
-	movapd		64(%r12), %xmm10 // B[8]
-	subpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	subpd		%xmm15, %xmm0
-	subpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		64(%r11), %xmm8 // A[8]
-	mulpd		%xmm9, %xmm13
-	movapd 		80(%r11), %xmm9 // A[10]
-
-
-	// unroll 2
-	subpd		%xmm14, %xmm3
-	movapd		80(%r12), %xmm14 // B[10]
-	subpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	subl		$4, %r10d
-
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	subpd		%xmm10, %xmm1
-	movapd		96(%r12), %xmm10 // B[12]
-	subpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	subpd		%xmm15, %xmm0
-	subpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		96(%r11), %xmm8 // A[12]
-	mulpd		%xmm9, %xmm13
-	movapd 		112(%r11), %xmm9 // A[14]
-	
-
-	// unroll 3
-	subpd		%xmm14, %xmm3
-	movapd		112(%r12), %xmm14 // B[14]
-	subpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addq		$128, %r12 // B += 16
-
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addq		$128, %r11 // A += 16
-
-	subpd		%xmm10, %xmm1
-//	movapd		0(%r12), %xmm10 // B[0]
-	subpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-//	cmpl		$4, %r10d
-
-	subpd		%xmm15, %xmm0
-	subpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-//	movapd 		0(%r11), %xmm8 // A[0]
-	mulpd		%xmm9, %xmm13
-//	movapd 		16(%r11), %xmm9 // A[2]
-
-
-	// update accumulators
-	subpd		%xmm14, %xmm3
-	subpd		%xmm11, %xmm7
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-
-	// unroll 0
-	subpd		%xmm14, %xmm3
-	movapd		16(%r12), %xmm14 // B[2]
-	subpd		%xmm11, %xmm7
-	movapd		%xmm10, %xmm11
-	pshufd		$0x4e, %xmm10, %xmm15
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	subl	$1, %r10d
-
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addq	$32, %r12
-
-	subpd		%xmm10, %xmm1
-	movapd		32(%r12), %xmm10 // B[0]
-	subpd		%xmm11, %xmm5
-	movapd		%xmm14, %xmm11
-	pshufd		$0x4e, %xmm14, %xmm12
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addq	$32, %r11
-
-	subpd		%xmm15, %xmm0
-	subpd		%xmm13, %xmm4
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		32(%r11), %xmm8 // A[0]
-	mulpd		%xmm9, %xmm13
-	movapd 		48(%r11), %xmm9 // A[2]
-
-	cmpl	$0, %r10d
-
-	jg		3b // clean up loop 
-
-
-	// update accumulators
-	subpd		%xmm14, %xmm3
-	subpd		%xmm11, %xmm7
-	subpd		%xmm12, %xmm2
-	subpd		%xmm13, %xmm6
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_sub_nt_4x4_lib4, .-inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- 4*sdb*sizeof(double)
-// xmm0  <- [d00 d10]
-// xmm1  <- [d01 d11]
-// xmm2  <- [d02 d12]
-// xmm3  <- [d03 d13]
-// xmm4  <- [d20 d30]
-// xmm5  <- [d21 d31]
-// xmm6  <- [d22 d32]
-// xmm7  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+(k/4)*sdb*sizeof(double)+(k%4)
-// r13   <- 4*sdb*sizeof(double)
-// xmm0  <- [d00 d10]
-// xmm1  <- [d01 d11]
-// xmm2  <- [d02 d12]
-// xmm3  <- [d03 d13]
-// xmm4  <- [d20 d30]
-// xmm5  <- [d21 d31]
-// xmm6  <- [d22 d32]
-// xmm7  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nn_4x4_lib4, @function
-inner_kernel_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nn_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nn_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-	movapd		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-
-	xorpd		%xmm11, %xmm11
-	movapd		%xmm11, %xmm12
-	movapd		%xmm11, %xmm13
-	movapd		%xmm11, %xmm14
-	movapd		%xmm11, %xmm15
-
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-
-	prefetcht0	0(%r12, %r13, 2) // software prefetch
-	prefetcht0	64(%r12, %r13, 2) // software prefetch
-
-	// unroll 0
-	movddup		0(%r12), %xmm10 // B[0]
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	movddup		32(%r12), %xmm15 // B[4]
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	movddup		64(%r12), %xmm14 // B[8]
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	movddup		96(%r12), %xmm12 // B[12]
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		32(%r11), %xmm8 // A[4]
-	mulpd		%xmm9, %xmm13
-	movapd 		48(%r11), %xmm9 // A[6]
-
-
-	// unroll 1
-	movddup		8(%r12), %xmm10 // B[1]
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	movddup		40(%r12), %xmm15 // B[5]
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	movddup		72(%r12), %xmm14 // B[9]
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	movddup		104(%r12), %xmm12 // B[13]
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		64(%r11), %xmm8 // A[8]
-	mulpd		%xmm9, %xmm13
-	movapd 		80(%r11), %xmm9 // A[10]
-
-
-	// unroll 2
-	movddup		16(%r12), %xmm10 // B[2]
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	subl		$4, %r10d
-
-	movddup		48(%r12), %xmm15 // B[6]
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	movddup		80(%r12), %xmm14 // B[10]
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	movddup		112(%r12), %xmm12 // B[14]
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		96(%r11), %xmm8 // A[12]
-	mulpd		%xmm9, %xmm13
-	movapd 		112(%r11), %xmm9 // A[14]
-	
-
-	// unroll 3
-	movddup		24(%r12), %xmm10 // B[3]
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	movddup		56(%r12), %xmm15 // B[7]
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addq		$128, %r11 // A += 16
-
-	movddup		88(%r12), %xmm14 // B[11]
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	movddup		120(%r12), %xmm12 // B[15]
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		0(%r11), %xmm8 // A[0]
-	mulpd		%xmm9, %xmm13
-	movapd 		16(%r11), %xmm9 // A[2]
-	addq		%r13, %r12 // B += ...
-
-
-	cmpl		$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-
-	// unroll 0
-	movddup		0(%r12), %xmm10 // B[0]
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	movddup		32(%r12), %xmm15 // B[4]
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	movddup		64(%r12), %xmm14 // B[8]
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	movddup		96(%r12), %xmm12 // B[12]
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		32(%r11), %xmm8 // A[4]
-	mulpd		%xmm9, %xmm13
-	movapd 		48(%r11), %xmm9 // A[6]
-
-
-	// unroll 1
-	movddup		8(%r12), %xmm10 // B[1]
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	movddup		40(%r12), %xmm15 // B[5]
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	movddup		72(%r12), %xmm14 // B[9]
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	movddup		104(%r12), %xmm12 // B[13]
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		64(%r11), %xmm8 // A[8]
-	mulpd		%xmm9, %xmm13
-	movapd 		80(%r11), %xmm9 // A[10]
-
-
-	// unroll 2
-	movddup		16(%r12), %xmm10 // B[2]
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	subl		$4, %r10d
-
-	movddup		48(%r12), %xmm15 // B[6]
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	movddup		80(%r12), %xmm14 // B[10]
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	movddup		112(%r12), %xmm12 // B[14]
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	movapd 		96(%r11), %xmm8 // A[12]
-	mulpd		%xmm9, %xmm13
-	movapd 		112(%r11), %xmm9 // A[14]
-	
-
-	// unroll 3
-	movddup		24(%r12), %xmm10 // B[3]
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-
-	movddup		56(%r12), %xmm15 // B[7]
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addq		$128, %r11 // A += 16
-
-	movddup		88(%r12), %xmm14 // B[11]
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-
-	movddup		120(%r12), %xmm12 // B[15]
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-//	movapd 		0(%r11), %xmm8 // A[0]
-	mulpd		%xmm9, %xmm13
-//	movapd 		16(%r11), %xmm9 // A[2]
-	addq		%r13, %r12 // B += ...
-
-
-	// clean accumulators
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-
-
-	jmp		2f
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-
-	// unroll 0
-	movapd 		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-
-	movddup		0(%r12), %xmm10 // B[0]
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	subl	$1, %r10d
-
-	movddup		32(%r12), %xmm15 // B[4]
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-
-	movddup		64(%r12), %xmm14 // B[8]
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addq	$32, %r11
-
-	movddup		96(%r12), %xmm12 // B[12]
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	mulpd		%xmm9, %xmm13
-	addq	$8, %r12
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-	// clean accumulators
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nn_4x4_lib4, .-inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B unaligned
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// xmm0  <- [d00 d10]
-// xmm1  <- [d01 d11]
-// xmm2  <- [d02 d12]
-// xmm3  <- [d03 d13]
-// xmm4  <- [d20 d30]
-// xmm5  <- [d21 d31]
-// xmm6  <- [d22 d32]
-// xmm7  <- [d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// xmm0  <- [d00 d10]
-// xmm1  <- [d01 d11]
-// xmm2  <- [d02 d12]
-// xmm3  <- [d03 d13]
-// xmm4  <- [d20 d30]
-// xmm5  <- [d21 d31]
-// xmm6  <- [d22 d32]
-// xmm7  <- [d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dgemm_add_nn_4x4_lib4, @function
-inner_edge_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dgemm_add_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dgemm_add_nn_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dgemm_add_nn_4x4_lib4:
-#endif
-#endif
-	
-	cmpl			$0, %r14d // offset==0
-	jle				2f // end
-
-	cmpl			$0, %r10d // k==0
-	jle				2f // end
-
-	movl			$4, %r15d
-	subl			%r14d, %r15d // 4-offsetB
-	cmpl			%r10d, %r15d
-//	jle				0f
-//	movl			%r10d, %r15d // kend=min(k,4-offsetB)
-//0:
-	cmovgl			%r10d, %r15d // kend=min(k,4-offsetB)
-
-	movl			%r14d, %eax
-	sall			$3, %eax // offsetB*sizeof(double)
-	addq			%rax, %r12 // B+offsetB*sizeof(double)
-
-1:
-	movapd 		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-
-	movddup		0(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		32(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		64(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	movddup		96(%r12), %xmm12 // B[12]
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-
-	subl			$1, %r10d // k-1
-	subl			$1, %r15d // kend-1
-	addq			$32, %r11 // A+1*bs*sizeof(float)
-	addq			$8, %r12 // B+1*sizeof(float)
-
-	cmpl			$0, %r15d
-	jg				1b
-
-	cmpl			$0, %r10d
-	jle				2f // end
-
-	addq			%r13, %r12
-	subq			$32, %r12 // B+bs*(sdb-1)*sizeof(double)
-
-2:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dgemm_add_nn_4x4_lib4, .-inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B lower triangular
-//
-// input arguments:
-// r10   <- k
-// r11   <- A
-// r12   <- B
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// xmm0  <- [d00 d10]
-// xmm1  <- [d01 d11]
-// xmm2  <- [d02 d12]
-// xmm3  <- [d03 d13]
-// xmm4  <- [d20 d30]
-// xmm5  <- [d21 d31]
-// xmm6  <- [d22 d32]
-// xmm7  <- [d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10   <- k-(4-offB)
-// r11   <- A+(4-offB)*bs*sizeof(double)
-// r12   <- B-offB+bs*sdb*sizeof(double)
-// r13   <- bs*sdb*sizeof(double)
-// r14   <- offB
-// xmm0  <- [d00 d10]
-// xmm1  <- [d01 d11]
-// xmm2  <- [d02 d12]
-// xmm3  <- [d03 d13]
-// xmm4  <- [d20 d30]
-// xmm5  <- [d21 d31]
-// xmm6  <- [d22 d32]
-// xmm7  <- [d23 d33]
-// ymm8  <- dirty
-// ymm12 <- dirty
-// ymm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NN_RL_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nn_rl_4x4_lib4, @function
-inner_edge_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nn_rl_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nn_rl_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r14d
-	jg		0f
-
-	// offB==0
-
-	// unroll 0
-	movapd 		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-
-	movddup		0(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	// unroll 1
-	movapd 		32(%r11), %xmm8 // A[0]
-	movapd 		48(%r11), %xmm9 // A[2]
-
-	movddup		8(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		40(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	// unroll 2
-	movapd 		64(%r11), %xmm8 // A[0]
-	movapd 		80(%r11), %xmm9 // A[2]
-
-	movddup		16(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		48(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		80(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	// unroll 3
-	movapd 		96(%r11), %xmm8 // A[0]
-	movapd 		112(%r11), %xmm9 // A[2]
-
-	movddup		24(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		56(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		88(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	movddup		120(%r12), %xmm12 // B[12]
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-
-	subl	$4, %r10d // k-4
-	addq	$128, %r11 // A+4*bs*sizeof(double)
-	addq	%r13, %r12 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-0:
-	cmpl	$1, %r14d
-	jg		1f
-
-	// offB==1
-
-	addq			$8, %r12 // B+1*sizeof(double)
-
-	// unroll 0
-	movapd 		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-
-	movddup		0(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	// unroll 1
-	movapd 		32(%r11), %xmm8 // A[0]
-	movapd 		48(%r11), %xmm9 // A[2]
-
-	movddup		8(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		40(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	// unroll 2
-	movapd 		64(%r11), %xmm8 // A[0]
-	movapd 		80(%r11), %xmm9 // A[2]
-
-	movddup		16(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		48(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		80(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	subl	$3, %r10d // k-3
-	addq	$96, %r11 // A+3*bs*sizeof(double)
-	addq	%r13, %r12
-	subq			$8, %r12 // B+bs*sdb*sizeof(double)-1
-
-	jmp		3f
-
-1:
-	cmpl	$2, %r14d
-	jg		2f
-
-	// offB==2
-
-	addq	$16, %r12 // B+2*sizeof(double)
-
-	// unroll 0
-	movapd 		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-
-	movddup		0(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	// unroll 1
-	movapd 		32(%r11), %xmm8 // A[0]
-	movapd 		48(%r11), %xmm9 // A[2]
-
-	movddup		8(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		40(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	subl	$2, %r10d // k-2
-	addq	$64, %r11 // A+2*bs*sizeof(double)
-	addq	%r13, %r12
-	subq	$16, %r12 // B+bs*sdb*sizeof(double)-2
-
-	// unroll 2
-	movapd 		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-
-	movddup		0(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		32(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		64(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	// unroll 3
-	movapd 		32(%r11), %xmm8 // A[0]
-	movapd 		48(%r11), %xmm9 // A[2]
-
-	movddup		8(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		40(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		72(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	movddup		104(%r12), %xmm12 // B[12]
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-
-	// unroll 4
-	movapd 		64(%r11), %xmm8 // A[0]
-	movapd 		80(%r11), %xmm9 // A[2]
-
-	movddup		16(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		48(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		80(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	movddup		112(%r12), %xmm12 // B[12]
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-
-	// unroll 5
-	movapd 		96(%r11), %xmm8 // A[0]
-	movapd 		112(%r11), %xmm9 // A[2]
-
-	movddup		24(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		56(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		88(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	movddup		120(%r12), %xmm12 // B[12]
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-
-	subl	$4, %r10d // k-4
-	addq	$128, %r11 // A+4*bs*sizeof(double)
-	addq	%r13, %r12 // B+bs*sdb*sizeof(double)
-
-	jmp		3f
-
-2:
-	// offB==3
-
-	addq	$24, %r12 // B+3*sizeof(double)
-
-	// unroll 0
-	movapd 		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-
-	movddup		0(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	subl	$1, %r10d // k-1
-	addq	$32, %r11 // A+1*bs*sizeof(double)
-	addq	%r13, %r12
-	subq	$24, %r12 // B+bs*sdb*sizeof(double)-3
-
-	// unroll 1
-	movapd 		0(%r11), %xmm8 // A[0]
-	movapd 		16(%r11), %xmm9 // A[2]
-
-	movddup		0(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		32(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	// unroll 2
-	movapd 		32(%r11), %xmm8 // A[0]
-	movapd 		48(%r11), %xmm9 // A[2]
-
-	movddup		8(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		40(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		72(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	// unroll 3
-	movapd 		64(%r11), %xmm8 // A[0]
-	movapd 		80(%r11), %xmm9 // A[2]
-
-	movddup		16(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		48(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		80(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	movddup		112(%r12), %xmm12 // B[12]
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-
-	// unroll 4
-	movapd 		96(%r11), %xmm8 // A[0]
-	movapd 		112(%r11), %xmm9 // A[2]
-
-	movddup		24(%r12), %xmm10 // B[0]
-	movapd		%xmm10, %xmm11
-	mulpd		%xmm8, %xmm10
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm10, %xmm0
-	addpd		%xmm11, %xmm4
-
-	movddup		56(%r12), %xmm15 // B[4]
-	movapd		%xmm15, %xmm13
-	mulpd		%xmm8, %xmm15
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm15, %xmm1
-	addpd		%xmm13, %xmm5
-
-	movddup		88(%r12), %xmm14 // B[8]
-	movapd		%xmm14, %xmm11
-	mulpd		%xmm8, %xmm14
-	mulpd		%xmm9, %xmm11
-	addpd		%xmm14, %xmm2
-	addpd		%xmm11, %xmm6
-
-	movddup		120(%r12), %xmm12 // B[12]
-	movapd		%xmm12, %xmm13
-	mulpd		%xmm8, %xmm12
-	mulpd		%xmm9, %xmm13
-	addpd		%xmm12, %xmm3
-	addpd		%xmm13, %xmm7
-
-	subl	$4, %r10d // k-4
-	addq	$128, %r11 // A+4*bs*sizeof(double)
-	addq	%r13, %r12 // B+bs*sdb*sizeof(double)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nn_rl_4x4_lib4, .-inner_edge_dtrmm_nn_rl_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10   <- A
-// r11   <- B
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-//
-// output arguments:
-// r10   <- A+4*4*sizeof(double)
-// r11   <- B+4*4*sizeof(double)
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_4x4_lib4, @function
-inner_edge_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_4x4_lib4:
-#endif
-#endif
-	
-	movapd			0(%r10), %xmm8
-	movapd			16(%r10), %xmm9
-	movddup			0(%r11), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm0
-	addpd			%xmm13, %xmm4
-
-	movapd			32(%r10), %xmm8
-	movapd			48(%r10), %xmm9
-	movddup			32(%r11), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm0
-	addpd			%xmm13, %xmm4
-	movddup			40(%r11), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm1
-	addpd			%xmm13, %xmm5
-
-	movapd			64(%r10), %xmm8
-	movapd			80(%r10), %xmm9
-	movddup			64(%r11), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm0
-	addpd			%xmm13, %xmm4
-	movddup			72(%r11), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm1
-	addpd			%xmm13, %xmm5
-	movddup			80(%r11), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm2
-	addpd			%xmm13, %xmm6
-
-	movapd			96(%r10), %xmm8
-	movapd			112(%r10), %xmm9
-	movddup			96(%r11), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm0
-	addpd			%xmm13, %xmm4
-	movddup			104(%r11), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm1
-	addpd			%xmm13, %xmm5
-	movddup			112(%r11), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm2
-	addpd			%xmm13, %xmm6
-	movddup			120(%r11), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm3
-	addpd			%xmm13, %xmm7
-
-	addq			$128, %r10
-	addq			$128, %r11
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_4x4_lib4, .-inner_edge_dtrmm_nt_ru_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// edge for B upper triangular
-//
-// input arguments:
-// r10d  <- k
-// r11   <- A
-// r12   <- B
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- max(k-4,0)
-// r11   <- A+4*4*sizeof(double)
-// r12   <- B+4*4*sizeof(double)
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRMM_NT_RU_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrmm_nt_ru_4x4_vs_lib4, @function
-inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrmm_nt_ru_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrmm_nt_ru_4x4_vs_lib4:
-#endif
-#endif
-	
-	movapd			0(%r11), %xmm8
-	movapd			16(%r11), %xmm9
-	subl			$1, %r10d
-	movddup			0(%r12), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm0
-	addpd			%xmm13, %xmm4
-	addq			$32, %r11
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	movapd			0(%r11), %xmm8
-	movapd			16(%r11), %xmm9
-	subl			$1, %r10d
-	movddup			0(%r12), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm0
-	addpd			%xmm13, %xmm4
-	addq			$32, %r11
-	movddup			8(%r12), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm1
-	addpd			%xmm13, %xmm5
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	movapd			0(%r11), %xmm8
-	movapd			16(%r11), %xmm9
-	subl			$1, %r10d
-	movddup			0(%r12), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm0
-	addpd			%xmm13, %xmm4
-	movddup			8(%r12), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm1
-	addpd			%xmm13, %xmm5
-	addq			$32, %r11
-	movddup			16(%r12), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm2
-	addpd			%xmm13, %xmm6
-	addq			$32, %r12
-
-	cmpl	$0, %r10d
-	jle		0f
-
-	movapd			0(%r11), %xmm8
-	movapd			16(%r11), %xmm9
-	subl			$1, %r10d
-	movddup			0(%r12), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm0
-	addpd			%xmm13, %xmm4
-	movddup			8(%r12), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm1
-	addpd			%xmm13, %xmm5
-	movddup			16(%r12), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm2
-	addpd			%xmm13, %xmm6
-	addq			$32, %r11
-	movddup			24(%r12), %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm8, %xmm12
-	mulpd			%xmm9, %xmm13
-	addpd			%xmm12, %xmm3
-	addpd			%xmm13, %xmm7
-	addq			$32, %r12
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrmm_nt_ru_4x4_vs_lib4, .-inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend
-//
-// input arguments:
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-// output arguments:
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_4x4_lib4, @function
-inner_blend_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_4x4_lib4:
-#endif
-#endif
-	
-	movapd	%xmm0, %xmm8
-	movsd	%xmm1, %xmm0
-	movsd	%xmm8, %xmm1
-
-	movapd	%xmm2, %xmm8
-	movsd	%xmm3, %xmm2
-	movsd	%xmm8, %xmm3
-
-	movapd	%xmm4, %xmm8
-	movsd	%xmm5, %xmm4
-	movsd	%xmm8, %xmm5
-
-	movapd	%xmm6, %xmm8
-	movsd	%xmm7, %xmm6
-	movsd	%xmm8, %xmm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_4x4_lib4, .-inner_blend_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_AB_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_ab_4x4_lib4, @function
-inner_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_ab_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_ab_4x4_lib4; .scl 2; .type 32; .endef
-inner_scale_ab_4x4_lib4:
-#endif
-#endif
-	
-	// alpha
-	movddup	0(%r10), %xmm15
-
-	mulpd	%xmm15, %xmm0
-	mulpd	%xmm15, %xmm1
-	mulpd	%xmm15, %xmm2
-	mulpd	%xmm15, %xmm3
-	mulpd	%xmm15, %xmm4
-	mulpd	%xmm15, %xmm5
-	mulpd	%xmm15, %xmm6
-	mulpd	%xmm15, %xmm7
-
-
-	// beta
-	movddup	0(%r11), %xmm14
-
-	movapd		0(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm0
-	movapd		16(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm4
-	movapd		32(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm1
-	movapd		48(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm5
-	movapd		64(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm2
-	movapd		80(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm6
-	movapd		96(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm3
-	movapd		112(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_ab_4x4_lib4, .-inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta=0.0
-//
-// input arguments:
-// r10   <- alpha
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-// output arguments:
-// r10   <- alpha
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_SCALE_A0_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_scale_a0_4x4_lib4, @function
-inner_scale_a0_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_scale_a0_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_scale_a0_4x4_lib4; .scl 2; .type 32; .endef
-inner_scale_a0_4x4_lib4:
-#endif
-#endif
-	
-	// alpha
-	movddup	0(%r10), %xmm15
-
-	mulpd	%xmm15, %xmm0
-	mulpd	%xmm15, %xmm1
-	mulpd	%xmm15, %xmm2
-	mulpd	%xmm15, %xmm3
-	mulpd	%xmm15, %xmm4
-	mulpd	%xmm15, %xmm5
-	mulpd	%xmm15, %xmm6
-	mulpd	%xmm15, %xmm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_scale_a0_4x4_lib4, .-inner_scale_a0_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// scale for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_4x4_lib4, @function
-inner_blend_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_4x4_lib4:
-#endif
-#endif
-	
-	movapd	%xmm0, %xmm8
-	movsd	%xmm1, %xmm0
-	movsd	%xmm8, %xmm1
-
-	movapd	%xmm2, %xmm8
-	movsd	%xmm3, %xmm2
-	movsd	%xmm8, %xmm3
-
-	movapd	%xmm4, %xmm8
-	movsd	%xmm5, %xmm4
-	movsd	%xmm8, %xmm5
-
-	movapd	%xmm6, %xmm8
-	movsd	%xmm7, %xmm6
-	movsd	%xmm8, %xmm7
-
-	// alpha
-	movddup	0(%r10), %xmm15
-
-	mulpd	%xmm15, %xmm0
-	mulpd	%xmm15, %xmm1
-	mulpd	%xmm15, %xmm2
-	mulpd	%xmm15, %xmm3
-	mulpd	%xmm15, %xmm4
-	mulpd	%xmm15, %xmm5
-	mulpd	%xmm15, %xmm6
-	mulpd	%xmm15, %xmm7
-
-
-	// beta
-	movddup	0(%r11), %xmm14
-
-	movapd		0(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm0
-	movapd		16(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm4
-	movapd		32(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm1
-	movapd		48(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm5
-	movapd		64(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm2
-	movapd		80(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm6
-	movapd		96(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm3
-	movapd		112(%r12), %xmm15
-	mulpd		%xmm14, %xmm15
-	addpd		%xmm15, %xmm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_4x4_lib4, .-inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blender for alpha = 1.0 and beta = 1.0
-//
-// input arguments:
-// r10   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-// output arguments:
-// r10   <- C
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_11_4x4_lib4, @function
-inner_blend_scale_11_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_11_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_11_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_11_4x4_lib4:
-#endif
-#endif	
-	
-	movapd	%xmm0, %xmm8
-	movsd	%xmm1, %xmm0
-	movsd	%xmm8, %xmm1
-
-	movapd	%xmm2, %xmm8
-	movsd	%xmm3, %xmm2
-	movsd	%xmm8, %xmm3
-
-	movapd	%xmm4, %xmm8
-	movsd	%xmm5, %xmm4
-	movsd	%xmm8, %xmm5
-
-	movapd	%xmm6, %xmm8
-	movsd	%xmm7, %xmm6
-	movsd	%xmm8, %xmm7
-
-
-	movapd		0(%r10), %xmm15
-	addpd		%xmm15, %xmm0
-	movapd		16(%r10), %xmm15
-	addpd		%xmm15, %xmm4
-	movapd		32(%r10), %xmm15
-	addpd		%xmm15, %xmm1
-	movapd		48(%r10), %xmm15
-	addpd		%xmm15, %xmm5
-	movapd		64(%r10), %xmm15
-	addpd		%xmm15, %xmm2
-	movapd		80(%r10), %xmm15
-	addpd		%xmm15, %xmm6
-	movapd		96(%r10), %xmm15
-	addpd		%xmm15, %xmm3
-	movapd		112(%r10), %xmm15
-	addpd		%xmm15, %xmm7
-
-	ret
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_11_4x4_lib4, .-inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// cholesky factorization 
-//
-// input arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10  <- inv_diag_E
-// r11d <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dpotrf_4x4_vs_lib4, @function
-inner_edge_dpotrf_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dpotrf_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dpotrf_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dpotrf_4x4_vs_lib4:
-#endif
-#endif
-	
-	xorpd			%xmm15, %xmm15 // 0.0
-
-	movsd			%xmm0, %xmm13
-	ucomisd			%xmm15, %xmm13 // d_00 > 0.0 ?
-	jbe				1f
-	sqrtsd			%xmm13, %xmm13
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	movsd			.LC04(%rip), %xmm12 // 1.0
-#elif defined(OS_MAC)
-	movsd			LC04(%rip), %xmm12 // 1.0
-#endif
-	divsd			%xmm13, %xmm12
-2:
-	cmpl			$2, %r11d
-	movsd			%xmm12, 0(%r10)
-	movddup			%xmm12, %xmm12
-	mulpd			%xmm12, %xmm0
-	mulpd			%xmm12, %xmm4
-
-	jl				0f // ret
-
-	movapd			%xmm0, %xmm12
-	shufpd			$0x3, %xmm12, %xmm12
-	movapd			%xmm12, %xmm13
-	mulpd			%xmm0, %xmm12
-	mulpd			%xmm4, %xmm13
-	subpd			%xmm12, %xmm1
-	subpd			%xmm13, %xmm5
-	movapd			%xmm1, %xmm13
-	shufpd			$0x3, %xmm13, %xmm13 // 0x1 ???
-	ucomisd			%xmm15, %xmm13 // d_11 > 0.0 ?
-	jbe				3f
-	sqrtsd			%xmm13, %xmm13
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	movsd			.LC04(%rip), %xmm12 // 1.0
-#elif defined(OS_MAC)
-	movsd			LC04(%rip), %xmm12 // 1.0
-#endif
-	divsd			%xmm13, %xmm12
-4:
-	cmpl			$3, %r11d
-	movsd			%xmm12, 8(%r10)
-	movddup			%xmm12, %xmm12
-	mulpd			%xmm12, %xmm1
-	mulpd			%xmm12, %xmm5
-
-	jl				0f // ret
-
-	movddup			%xmm4, %xmm12
-	movddup			%xmm5, %xmm13
-	mulpd			%xmm4, %xmm12
-	mulpd			%xmm5, %xmm13
-	subpd			%xmm12, %xmm6
-	subpd			%xmm13, %xmm6
-	movsd			%xmm6, %xmm13
-	ucomisd			%xmm15, %xmm13 // d_22 > 0.0 ?
-	jbe				5f
-	sqrtsd			%xmm13, %xmm13
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	movsd			.LC04(%rip), %xmm12 // 1.0
-#elif defined(OS_MAC)
-	movsd			LC04(%rip), %xmm12 // 1.0
-#endif
-	divsd			%xmm13, %xmm12
-6:
-	cmpl			$4, %r11d
-	movsd			%xmm12, 16(%r10)
-	movddup			%xmm12, %xmm12
-	mulpd			%xmm12, %xmm6
-
-	jl				0f // ret
-
-	movapd			%xmm4, %xmm12
-	movapd			%xmm5, %xmm13
-	movapd			%xmm6, %xmm14
-	shufpd			$0x3, %xmm12, %xmm12
-	shufpd			$0x3, %xmm13, %xmm13
-	shufpd			$0x3, %xmm14, %xmm14
-	mulpd			%xmm4, %xmm12
-	mulpd			%xmm5, %xmm13
-	mulpd			%xmm6, %xmm14
-	subpd			%xmm12, %xmm7
-	subpd			%xmm13, %xmm7
-	subpd			%xmm14, %xmm7
-	movapd			%xmm7, %xmm13
-	shufpd			$0x3, %xmm13, %xmm13
-	ucomisd			%xmm15, %xmm13 // d_33 > 0.0 ?
-	jbe				7f
-	sqrtsd			%xmm13, %xmm13
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	movsd			.LC04(%rip), %xmm12 // 1.0
-#elif defined(OS_MAC)
-	movsd			LC04(%rip), %xmm12 // 1.0
-#endif
-	divsd			%xmm13, %xmm12
-8:
-	movsd			%xmm12, 24(%r10)
-	movddup			%xmm12, %xmm12
-	mulpd			%xmm12, %xmm7
-
-	jmp		0f
-	
-1:
-	xorpd	%xmm12, %xmm12
-	jmp		2b
-
-3:
-	xorpd	%xmm12, %xmm12
-	jmp		4b
-
-5:
-	xorpd	%xmm12, %xmm12
-	jmp		6b
-
-7:
-	xorpd	%xmm12, %xmm12
-	jmp		8b
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dpotrf_4x4_vs_lib4, .-inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10  <- E
-// r11  <- inv_diag_E
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x4_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x4_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x4_lib4:
-#endif
-#endif
-	
-	movddup			0(%r11), %xmm13
-	mulpd			%xmm13, %xmm0
-	mulpd			%xmm13, %xmm4
-
-	movddup			8(%r10), %xmm13
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm0, %xmm13
-	mulpd			%xmm4, %xmm12
-	subpd			%xmm13, %xmm1
-	subpd			%xmm12, %xmm5
-	movddup			8(%r11), %xmm13
-	mulpd			%xmm13, %xmm1
-	mulpd			%xmm13, %xmm5
-
-	movddup			16(%r10), %xmm13
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm0, %xmm12
-	mulpd			%xmm4, %xmm13
-	subpd			%xmm12, %xmm2
-	subpd			%xmm13, %xmm6
-	movddup			48(%r10), %xmm13
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm1, %xmm12
-	mulpd			%xmm5, %xmm13
-	subpd			%xmm12, %xmm2
-	subpd			%xmm13, %xmm6
-	movddup			16(%r11), %xmm13
-	mulpd			%xmm13, %xmm2
-	mulpd			%xmm13, %xmm6
-
-	movddup			24(%r10), %xmm13
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm0, %xmm12
-	mulpd			%xmm4, %xmm13
-	subpd			%xmm12, %xmm3
-	subpd			%xmm13, %xmm7
-	movddup			56(%r10), %xmm13
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm1, %xmm12
-	mulpd			%xmm5, %xmm13
-	subpd			%xmm12, %xmm3
-	subpd			%xmm13, %xmm7
-	movddup			88(%r10), %xmm13
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm2, %xmm12
-	mulpd			%xmm6, %xmm13
-	subpd			%xmm12, %xmm3
-	subpd			%xmm13, %xmm7
-	movddup			24(%r11), %xmm13
-	mulpd			%xmm13, %xmm3
-	mulpd			%xmm13, %xmm7
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x4_lib4, .-inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// triangular substitution for cholesky factorization 
-//
-// input arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// r11  <- inv_diag_D
-// r12d <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_edge_dtrsm_rlt_inv_4x4_vs_lib4, @function
-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_edge_dtrsm_rlt_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4:
-#endif
-#endif
-	
-	movddup			0(%r11), %xmm13
-	cmpl			$2, %r12d
-	mulpd			%xmm13, %xmm0
-	mulpd			%xmm13, %xmm4
-
-	jl				0f // ret
-
-	movddup			8(%r10), %xmm13
-	cmpl			$3, %r12d
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm0, %xmm13
-	mulpd			%xmm4, %xmm12
-	subpd			%xmm13, %xmm1
-	subpd			%xmm12, %xmm5
-	movddup			8(%r11), %xmm13
-	mulpd			%xmm13, %xmm1
-	mulpd			%xmm13, %xmm5
-
-	jl				0f // ret
-
-	movddup			16(%r10), %xmm13
-	cmpl			$4, %r12d
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm0, %xmm12
-	mulpd			%xmm4, %xmm13
-	subpd			%xmm12, %xmm2
-	subpd			%xmm13, %xmm6
-	movddup			48(%r10), %xmm13
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm1, %xmm12
-	mulpd			%xmm5, %xmm13
-	subpd			%xmm12, %xmm2
-	subpd			%xmm13, %xmm6
-	movddup			16(%r11), %xmm13
-	mulpd			%xmm13, %xmm2
-	mulpd			%xmm13, %xmm6
-
-	jl				0f // ret
-
-	movddup			24(%r10), %xmm13
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm0, %xmm12
-	mulpd			%xmm4, %xmm13
-	subpd			%xmm12, %xmm3
-	subpd			%xmm13, %xmm7
-	movddup			56(%r10), %xmm13
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm1, %xmm12
-	mulpd			%xmm5, %xmm13
-	subpd			%xmm12, %xmm3
-	subpd			%xmm13, %xmm7
-	movddup			88(%r10), %xmm13
-	movapd			%xmm13, %xmm12
-	mulpd			%xmm2, %xmm12
-	mulpd			%xmm6, %xmm13
-	subpd			%xmm12, %xmm3
-	subpd			%xmm13, %xmm7
-	movddup			24(%r11), %xmm13
-	mulpd			%xmm13, %xmm3
-	mulpd			%xmm13, %xmm7
-
-0:
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_edge_dtrsm_rlt_inv_4x4_vs_lib4, .-inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10  <- D
-// xmm0  <- [d00 d10]
-// xmm1  <- [d01 d11]
-// xmm2  <- [d02 d12]
-// xmm3  <- [d03 d13]
-// xmm4  <- [d20 d30]
-// xmm5  <- [d21 d31]
-// xmm6  <- [d22 d32]
-// xmm7  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_lib4, @function
-inner_store_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_lib4:
-#endif
-#endif
-	
-	movapd %xmm0,   0(%r10)
-	movapd %xmm4,  16(%r10)
-	movapd %xmm1,  32(%r10)
-	movapd %xmm5,  48(%r10)
-	movapd %xmm2,  64(%r10)
-	movapd %xmm6,  80(%r10)
-	movapd %xmm3,  96(%r10)
-	movapd %xmm7, 112(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_lib4, .-inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_vs_lib4, @function
-inner_store_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl		$2, %r11d
-	jg			1f
-	je			0f
-
-	// km==1
-	movsd		%xmm0,  0(%r10)
-	cmpl		$2, %r12d
-	jl			4f // end
-	movsd		%xmm1, 32(%r10)
-	cmpl		$3, %r12d
-	jl			4f // end
-	movsd		%xmm2, 64(%r10)
-	je			4f // end
-	movsd		%xmm3, 96(%r10)
-
-	jmp		4f
-
-0:
-	// km==2
-	movapd		%xmm0,  0(%r10)
-	cmpl		$2, %r12d
-	jl			4f // end
-	movapd		%xmm1, 32(%r10)
-	cmpl		$3, %r12d
-	jl			4f // end
-	movapd		%xmm2, 64(%r10)
-	je			4f // end
-	movapd		%xmm3, 96(%r10)
-
-	jmp		4f
-
-1:
-	cmpl		$3, %r11d
-	jg			2f
-
-	// km==3
-	movapd		%xmm0,   0(%r10)
-	movsd		%xmm4,  16(%r10)
-	cmpl		$2, %r12d
-	jl			4f // end
-	movapd		%xmm1,  32(%r10)
-	movsd		%xmm5,  48(%r10)
-	cmpl		$3, %r12d
-	jl			4f // end
-	movapd		%xmm2,  64(%r10)
-	movsd		%xmm6,  80(%r10)
-	je			4f // end
-	movapd		%xmm3,  96(%r10)
-	movsd		%xmm7, 112(%r10)
-
-	jmp		4f
-
-2:
-	// km==4
-	movapd		%xmm0,   0(%r10)
-	movapd		%xmm4,  16(%r10)
-	cmpl		$2, %r12d
-	jl			4f // end
-	movapd		%xmm1,  32(%r10)
-	movapd		%xmm5,  48(%r10)
-	cmpl		$3, %r12d
-	jl			4f // end
-	movapd		%xmm2,  64(%r10)
-	movapd		%xmm6,  80(%r10)
-	je			4f // end
-	movapd		%xmm3,  96(%r10)
-	movapd		%xmm7, 112(%r10)
-
-4:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_vs_lib4, .-inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n generalized
-//
-// input arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n0 // col index: start from (inc)
-// rax  <- n1 // col index: up to (exc)
-// rbx  <- dirty
-// xmm0 <-
-//
-// output arguments:
-// r10  <- offset
-// r11  <- D
-// r12  <- 4*sdd*sizeof(double)
-// r13  <- m0 // row index: start from (inc)
-// r14  <- m1 // row index: up to (exc)
-// r15  <- n1-n0
-// rax  <- n1-n0
-// rbx  <- dirty
-// xmm0 <-
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_GEN_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_gen_lib4, @function
-inner_store_4x4_gen_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_gen_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_gen_lib4:
-#endif
-#endif
-	
-	// masks computation ???
-
-	// shift D and sol for cols
-	cmpl	$0, %r15d
-	jle		0f
-
-	vmovapd		%xmm1, %xmm0
-	vmovapd		%xmm5, %xmm4
-	vmovapd		%xmm2, %xmm1
-	vmovapd		%xmm6, %xmm5
-	vmovapd		%xmm3, %xmm2
-	vmovapd		%xmm7, %xmm6
-	addq		$32, %r11
-
-	cmpl	$1, %r15d
-	jle		0f
-
-	vmovapd		%xmm1, %xmm0
-	vmovapd		%xmm5, %xmm4
-	vmovapd		%xmm2, %xmm1
-	vmovapd		%xmm6, %xmm5
-	addq		$32, %r11
-
-	cmpl	$2, %r15d
-	jle		0f
-
-	vmovapd		%xmm1, %xmm0
-	vmovapd		%xmm5, %xmm4
-	addq		$32, %r11
-
-0:
-
-	// compute number of cols
-	cmpl	$4, %eax
-	jle		0f
-	movl	$4, %eax
-0:
-	subl	%r15d, %eax
-	movl	%eax, %r15d
-
-
-	cmpl	$0, %r10d
-	jg		0f
-
-	///////////////
-	// offset==0 //
-	///////////////
-
-	cmpl	$0, %r13d
-	jle		4f
-
-	cmpl	$1, %r13d
-	jg		5f
-
-	movsd	0(%r11), %xmm8
-	movsd	%xmm8, %xmm0
-	movsd	32(%r11), %xmm8
-	movsd	%xmm8, %xmm1
-	movsd	64(%r11), %xmm8
-	movsd	%xmm8, %xmm2
-	movsd	96(%r11), %xmm8
-	movsd	%xmm8, %xmm3
-
-	jmp		4f
-
-5:
-
-	cmpl	$2, %r13d
-	jg		5f
-
-	movapd	0(%r11), %xmm0
-	movapd	32(%r11), %xmm1
-	movapd	64(%r11), %xmm2
-	movapd	96(%r11), %xmm3
-
-	jmp		4f
-
-5:
-
-	cmpl	$3, %r13d
-	jg		5f
-
-	movapd	0(%r11), %xmm0
-	movsd	16(%r11), %xmm8
-	movsd	%xmm8, %xmm4
-	movapd	32(%r11), %xmm1
-	movsd	48(%r11), %xmm8
-	movsd	%xmm8, %xmm5
-	movapd	64(%r11), %xmm2
-	movsd	80(%r11), %xmm8
-	movsd	%xmm8, %xmm6
-	movapd	96(%r11), %xmm3
-	movsd	112(%r11), %xmm8
-	movsd	%xmm8, %xmm7
-
-	jmp		4f
-
-5:
-
-	movapd	0(%r11), %xmm0
-	movapd	16(%r11), %xmm4
-	movapd	32(%r11), %xmm1
-	movapd	48(%r11), %xmm5
-	movapd	64(%r11), %xmm2
-	movapd	80(%r11), %xmm6
-	movapd	96(%r11), %xmm3
-	movapd	112(%r11), %xmm7
-
-4:
-	cmpl		$2, %r14d
-	jg			5f
-	je			4f
-
-	// km==1
-	movsd		%xmm0,  0(%r11)
-	cmpl		$2, %r15d
-	jl			3f // end
-	movsd		%xmm1, 32(%r11)
-	cmpl		$3, %r15d
-	jl			3f // end
-	movsd		%xmm2, 64(%r11)
-	je			3f // end
-	movsd		%xmm3, 96(%r11)
-
-	jmp		3f
-
-4:
-	// km==2
-	movapd		%xmm0,  0(%r11)
-	cmpl		$2, %r15d
-	jl			3f // end
-	movapd		%xmm1, 32(%r11)
-	cmpl		$3, %r15d
-	jl			3f // end
-	movapd		%xmm2, 64(%r11)
-	je			3f // end
-	movapd		%xmm3, 96(%r11)
-
-	jmp		3f
-
-5:
-	cmpl		$3, %r14d
-	jg			6f
-
-	// km==3
-	movapd		%xmm0,   0(%r11)
-	movsd		%xmm4,  16(%r11)
-	cmpl		$2, %r15d
-	jl			3f // end
-	movapd		%xmm1,  32(%r11)
-	movsd		%xmm5,  48(%r11)
-	cmpl		$3, %r15d
-	jl			3f // end
-	movapd		%xmm2,  64(%r11)
-	movsd		%xmm6,  80(%r11)
-	je			3f // end
-	movapd		%xmm3,  96(%r11)
-	movsd		%xmm7, 112(%r11)
-
-	jmp		3f
-
-6:
-	// km==4
-	movapd		%xmm0,   0(%r11)
-	movapd		%xmm4,  16(%r11)
-	cmpl		$2, %r15d
-	jl			3f // end
-	movapd		%xmm1,  32(%r11)
-	movapd		%xmm5,  48(%r11)
-	cmpl		$3, %r15d
-	jl			3f // end
-	movapd		%xmm2,  64(%r11)
-	movapd		%xmm6,  80(%r11)
-	je			3f // end
-	movapd		%xmm3,  96(%r11)
-	movapd		%xmm7, 112(%r11)
-
-	jmp		3f
-
-0:
-	
-	movq	%r11, %rbx // D0
-	addq	%r12, %rbx // D1 <- D0 + 4*sdd*sizeof(double)
-
-	cmpl	$1, %r10d
-	jg		1f
-
-	///////////////
-	// offset==1 //
-	///////////////
-
-	// TODO
-
-	jmp		3f
-
-1:
-
-	cmpl	$2, %r10d
-	jg		2f
-
-	///////////////
-	// offset==2 //
-	///////////////
-
-	// TODO
-
-	jmp		3f
-
-2:
-
-	///////////////
-	// offset==3 //
-	///////////////
-
-	// TODO
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_gen_lib4, .-inner_store_4x4_gen_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n lower triangular
-//
-// input arguments:
-// r10   <- D
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_lib4, @function
-inner_store_l_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_4x4_lib4; .scl 2; .type 32; .endef
-inner_store_l_4x4_lib4:
-#endif
-#endif
-	
-	movapd		%xmm0,   0(%r10)
-	movapd		%xmm4,  16(%r10)
-	movsd		32(%r10), %xmm15
-	movsd		%xmm15, %xmm1
-	movapd		%xmm1,  32(%r10)
-	movapd		%xmm5,  48(%r10)
-//	movapd		%xmm2,  64(%r10)
-	movapd		%xmm6,  80(%r10)
-//	movapd		%xmm3,  96(%r10)
-	movsd		112(%r10), %xmm15
-	movsd		%xmm15, %xmm7
-	movapd		%xmm7, 112(%r10)
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_lib4, .-inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n vs lower triangular
-//
-// input arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-//
-// output arguments:
-// r10   <- D
-// r11d   <- km
-// r12d   <- kn
-// xmm0  <- [d00 d10]
-// xmm1  <- [d20 d30]
-// xmm2  <- [d01 d11]
-// xmm3  <- [d21 d31]
-// xmm0  <- [d02 d12]
-// xmm1  <- [d22 d32]
-// xmm2  <- [d03 d13]
-// xmm3  <- [d23 d33]
-// xmm8  <- dirty
-// xmm9  <- dirty
-// xmm10 <- dirty
-// xmm11 <- dirty
-// xmm12 <- dirty
-// xmm13 <- dirty
-// xmm14 <- dirty
-// xmm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_L_4X4_VS_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_l_4x4_vs_lib4, @function
-inner_store_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-_inner_store_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-inner_store_l_4x4_vs_lib4:
-#endif
-#endif
-	
-	cmpl		$2, %r11d
-	jg			1f
-	je			0f
-
-	// km==1
-	movsd		%xmm0,  0(%r10)
-
-	jmp		3f
-
-0:
-	// km==2
-	cmpl		$2, %r12d
-	movapd		%xmm0,  0(%r10)
-	jl			3f // end
-	movsd		32(%r10), %xmm15
-	movsd		%xmm15, %xmm1
-	movapd		%xmm1, 32(%r10)
-
-	jmp		3f
-
-1:
-	cmpl		$3, %r11d
-	jg			2f
-
-	// km==3
-	cmpl		$2, %r12d
-	movapd		%xmm0,   0(%r10)
-	movsd		%xmm4,  16(%r10)
-	jl			3f // end
-	cmpl		$3, %r12d
-	movsd		32(%r10), %xmm15
-	movsd		%xmm15, %xmm1
-	movapd		%xmm1,  32(%r10)
-	movsd		%xmm5,  48(%r10)
-	jl			3f // end
-//	movapd		%xmm2,  64(%r10)
-	movsd		%xmm6,  80(%r10)
-
-	jmp		3f
-
-2:
-	// km==3
-	cmpl		$2, %r12d
-	movapd		%xmm0,   0(%r10)
-	movapd		%xmm4,  16(%r10)
-	jl			3f // end
-	cmpl		$3, %r12d
-	movsd		32(%r10), %xmm15
-	movsd		%xmm15, %xmm1
-	movapd		%xmm1,  32(%r10)
-	movapd		%xmm5,  48(%r10)
-	jl			3f // end
-//	movapd		%xmm2,  64(%r10)
-	movapd		%xmm6,  80(%r10)
-	je			3f // end
-//	movapd		%xmm3,  96(%r10)
-	movsd		112(%r10), %xmm15
-	movsd		%xmm15, %xmm7
-	movapd		%xmm7, 112(%r10)
-
-3:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_l_4x4_vs_lib4, .-inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-
-
-
-//                               rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dgemm_nt_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_lib4
-	.type kernel_dgemm_nt_4x4_lib4, @function
-kernel_dgemm_nt_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_lib4
-_kernel_dgemm_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_lib4
-	.def kernel_dgemm_nt_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_lib4, .-kernel_dgemm_nt_4x4_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dgemm_nt_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_vs_lib4
-	.type kernel_dgemm_nt_4x4_vs_lib4, @function
-kernel_dgemm_nt_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_vs_lib4
-_kernel_dgemm_nt_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_vs_lib4
-	.def kernel_dgemm_nt_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_vs_lib4, .-kernel_dgemm_nt_4x4_vs_lib4
-#endif
-
-
-
-
-
-#if 0
-
-//                                   1      2              3          4          5             6            7          8        9            10         11       12      13      14      15
-// void kernel_dgemm_nt_4x4_gen_lib4(int k, double *alpha, double *A, double *B, double *beta, int offsetC, double *C, int sdc, int offsetD, double *D, int sdd, int m0, int m1, int n0, int n1);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nt_4x4_gen_lib4
-	.type kernel_dgemm_nt_4x4_gen_lib4, @function
-kernel_dgemm_nt_4x4_gen_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_gen_lib4
-_kernel_dgemm_nt_4x4_gen_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_gen_lib4
-	.def kernel_dgemm_nt_4x4_gen_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nt_4x4_gen_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-#if 0 //
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // offsetC
-	movq	ARG7, %r13 // C
-	movq	ARG8, %r14 // sdc
-	sall	$5, %r14d // 4*sdc*sizeof(double)
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_gen_lib4
-#endif
-#endif
-
-#else //
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG7, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-#endif //
-
-	// store n gen
-
-	movq	ARG9, %r10 // offsetD
-	movq	ARG10, %r11 // D
-	movq	ARG11, %r12 // sdd
-	sall	$5, %r12d // 4*sdb*sizeof(double)
-	movq	ARG12, %r13 // m0
-	movq	ARG13, %r14 // m1
-	movq	ARG14, %r15 // n0
-	movq	ARG15, %rax // n1
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_GEN_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_gen_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_gen_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_gen_lib4, .-kernel_dgemm_nt_4x4_gen_lib4
-#endif
-
-#endif
-
-
-
-
-
-//                               1      2              3          4            5          6        7             8          9
-// void kernel_dgemm_nt_4x4_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_4x4_lib4
-	.type kernel_dgemm_nn_4x4_lib4, @function
-kernel_dgemm_nn_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_4x4_lib4
-_kernel_dgemm_nn_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_4x4_lib4
-	.def kernel_dgemm_nn_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_4x4_lib4, .-kernel_dgemm_nn_4x4_lib4
-#endif
-
-
-
-
-
-//                                  1      2              3          4            5          6        7             8          9          10      11
-// void kernel_dgemm_nt_4x4_vs_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_nn_4x4_vs_lib4
-	.type kernel_dgemm_nn_4x4_vs_lib4, @function
-kernel_dgemm_nn_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nn_4x4_vs_lib4
-_kernel_dgemm_nn_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_nn_4x4_vs_lib4
-	.def kernel_dgemm_nn_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_nn_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG5, %r12  // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG7, %r11 // beta
-	movq	ARG8, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG9, %r10 // D
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nn_4x4_vs_lib4, .-kernel_dgemm_nn_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                 rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dsyrk_nt_l_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_lib4
-	.type kernel_dsyrk_nt_l_4x4_lib4, @function
-kernel_dsyrk_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_lib4
-_kernel_dsyrk_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_4x4_lib4
-	.def kernel_dsyrk_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call	inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq	_inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_lib4, .-kernel_dsyrk_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                    rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dsyrk_nt_l_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_nt_l_4x4_vs_lib4
-	.type kernel_dsyrk_nt_l_4x4_vs_lib4, @function
-kernel_dsyrk_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_nt_l_4x4_vs_lib4
-_kernel_dsyrk_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_nt_l_4x4_vs_lib4
-	.def kernel_dsyrk_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11  // A
-	movq	ARG4, %r12  // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend 
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call	inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq	_inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_nt_l_4x4_vs_lib4, .-kernel_dsyrk_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  rdi    rsi            rdx        rcx        r8            r9         rsp+8
-// void kernel_dtrmm_nt_ru_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_4x4_lib4
-	.type kernel_dtrmm_nt_ru_4x4_lib4, @function
-kernel_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_4x4_lib4
-_kernel_dtrmm_nt_ru_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_4x4_lib4
-	.def kernel_dtrmm_nt_ru_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG3, %r10
-	movq	ARG4, %r11
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_4x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_4x4_lib4, .-kernel_dtrmm_nt_ru_4x4_lib4
-#endif
-
-
-
-
-
-//                                     rdi    rsi            rdx        rcx        r8            r9         rsp+8     rsp+16   rsp+24
-// void kernel_dtrmm_nt_ru_4x4_vs_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nt_ru_4x4_vs_lib4
-	.type kernel_dtrmm_nt_ru_4x4_vs_lib4, @function
-kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nt_ru_4x4_vs_lib4
-_kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nt_ru_4x4_vs_lib4
-	.def kernel_dtrmm_nt_ru_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nt_ru_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt after initial triangle
-
-	movq	ARG1, %r10 // k
-	subl	$4, %r10d // k-4
-	movq	ARG3, %r11 // A
-	addq	$128, %r11 // A+4*bs
-	movq	ARG4, %r12 // B
-	addq	$128, %r12 // B+4*bs
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender nn
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_4x4_lib4
-#endif
-#endif
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // B
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NT_RU_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-#endif
-
-
-	// call inner loader nn
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12   // C
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nt_ru_4x4_vs_lib4, .-kernel_dtrmm_nt_ru_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  edi    rsi        rdx        ecx        r8         r9
-// void kernel_dpotrf_nt_l_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_4x4_lib4
-	.type kernel_dpotrf_nt_l_4x4_lib4, @function
-kernel_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_4x4_lib4
-_kernel_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_4x4_lib4
-	.def kernel_dpotrf_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movl	$4, %r11d // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_4x4_lib4, .-kernel_dpotrf_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                     edi    rsi        rdx        ecx        r8         r9                  rsp+8   rsp+16
-// void kernel_dpotrf_nt_l_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dpotrf_nt_l_4x4_vs_lib4
-	.type kernel_dpotrf_nt_l_4x4_vs_lib4, @function
-kernel_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dpotrf_nt_l_4x4_vs_lib4
-_kernel_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dpotrf_nt_l_4x4_vs_lib4
-	.def kernel_dpotrf_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dpotrf_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG6, %r10  // inv_diag_D 
-	movq	ARG8, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG7, %r11 // km 
-	movq	ARG8, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dpotrf_nt_l_4x4_vs_lib4, .-kernel_dpotrf_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                        edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24
-// void kernel_dsyrk_dpotrf_nt_l_4x4_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *inv_diag_D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_4x4_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-_kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movl	$4, %r11d
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_4x4_lib4, .-kernel_dsyrk_dpotrf_nt_l_4x4_lib4
-#endif
-
-
-
-
-
-//                                           edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24              rsp+32  rsp+40
-// void kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *inv_diag_D, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-	.type kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4, @function
-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-_kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-	.def kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// factorization
-
-	movq	ARG9, %r10  // inv_diag_D 
-	movq	ARG11, %r11 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DPOTRF_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dpotrf_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dpotrf_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10  // D 
-	movq	ARG10, %r11 // km 
-	movq	ARG11, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_L_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_l_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_l_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4, .-kernel_dsyrk_dpotrf_nt_l_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                      edi    rsi        rdx        ecx        r8         r9         rsp+8  
-// void kernel_dtrsm_nt_rl_inv_4x4_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x4_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x4_lib4
-_kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG4, %r10
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x4_lib4, .-kernel_dtrsm_nt_rl_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                            edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24     rsp+32
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10   // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10   // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x4_lib4
-#endif
-
-
-
-
-
-//                                         edi    rsi        rdx        ecx        r8       r9           rsp+8               rsp+16  rsp+24
-// void kernel_dtrsm_nt_rl_inv_4x4_vs_lib4(int k, double *A, double *B, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.type kernel_dtrsm_nt_rl_inv_4x4_vs_lib4, @function
-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-_kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.def kernel_dtrsm_nt_rl_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt 
-
-	movq	ARG1, %r10
-	movq	ARG2, %r11
-	movq	ARG3, %r12
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn // TODO scale gen
-
-	movq	ARG4, %r10 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG6, %r10  // E 
-	movq	ARG7, %r11  // inv_diag_E 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG5, %r10 // D
-	movq	ARG8, %r11 // km 
-	movq	ARG9, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrsm_nt_rl_inv_4x4_vs_lib4, .-kernel_dtrsm_nt_rl_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                               edi     rsi         rdx         ecx     r8          r9          rsp+8      rsp+16     rsp+24     rsp+32              rsp+40  rsp+48
-// void kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4(int kp, double *Ap, double *Bp, int km, double *Am, double *Bm, double *C, double *D, double *E, double *inv_diag_E, int km, int kn);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.type kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4, @function
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-_kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-	.def kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4; .scl 2; .type 32; .endef
-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-	// call inner dgemm kernel nt add
-
-	movq	ARG1, %r10 // kp
-	movq	ARG2, %r11  // Ap
-	movq	ARG3, %r12  // Bp
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner dgemm kernel nt sub
-
-	movq	ARG4, %r10 // km
-	movq	ARG5, %r11   // Am
-	movq	ARG6, %r12   // Bm
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_SUB_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_sub_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_sub_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blender_loader nn
-
-	movq	ARG7, %r10  // C 
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_11_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_11_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_11_4x4_lib4
-#endif
-#endif
-
-
-	// solve
-
-	movq	ARG9, %r10  // E 
-	movq	ARG10, %r11  // inv_diag_E 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRSM_RLT_INV_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrsm_rlt_inv_4x4_vs_lib4
-#endif
-#endif
-
-
-	// store
-
-	movq	ARG8, %r10 // D 
-	movq	ARG11, %r11 // km 
-	movq	ARG12, %r12 // kn 
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_VS_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_vs_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_vs_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4, .-kernel_dgemm_dtrsm_nt_rl_inv_4x4_vs_lib4
-#endif
-
-
-
-
-
-//                                  1      2              3          4            5          6        7
-// void kernel_dtrmm_nn_rl_4x4_lib4(int k, double *alpha, double *A, int offsetB, double *B, int sdb, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.globl kernel_dtrmm_nn_rl_4x4_lib4
-	.type kernel_dtrmm_nn_rl_4x4_lib4, @function
-kernel_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_MAC)
-	.globl _kernel_dtrmm_nn_rl_4x4_lib4
-_kernel_dtrmm_nn_rl_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.globl kernel_dtrmm_nn_rl_4x4_lib4
-	.def kernel_dtrmm_nn_rl_4x4_lib4; .scl 2; .type 32; .endef
-kernel_dtrmm_nn_rl_4x4_lib4:
-#endif
-	
-	PROLOGUE
-
-	// zero accumulation registers
-
-	xorpd	%xmm0, %xmm0
-	movapd	%xmm0, %xmm1
-	movapd	%xmm0, %xmm2
-	movapd	%xmm0, %xmm3
-	movapd	%xmm0, %xmm4
-	movapd	%xmm0, %xmm5
-	movapd	%xmm0, %xmm6
-	movapd	%xmm0, %xmm7
-
-
-
-	// initial triangle
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG5, %r12 // B
-	movq	ARG6, %r13 // sdb
-	sall	$5, %r13d // 4*sdb*sizeof(double)
-	movq	ARG4, %r14 // offsetB
-
-#if MACRO_LEVEL>=1
-	INNER_EDGE_DTRMM_NN_RL_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_edge_dtrmm_nn_rl_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_edge_dtrmm_nn_rl_4x4_lib4
-#endif
-#endif
-
-	// call inner dgemm kernel nt after initial triangle
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NN_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nn_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nn_4x4_lib4
-#endif
-#endif
-
-
-	// call inner scale
-
-	movq	ARG2, %r10 // alpha
-
-#if MACRO_LEVEL>=1
-	INNER_SCALE_A0_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_scale_a0_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_scale_a0_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-
-	EPILOGUE
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dtrmm_nn_rl_4x4_lib4, .-kernel_dtrmm_nn_rl_4x4_lib4
-#endif
-
-
-
-
-
-	// read-only data
-#if defined(OS_LINUX)
-	.section	.rodata.cst32,"aM",@progbits,32
-#elif defined(OS_MAC)
-	.section	__TEXT,__const
-#elif defined(OS_WINDOWS)
-	.section .rdata,"dr"
-#endif
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC00: // { -1 -1 -1 1 }
-#elif defined(OS_MAC)
-LC00: // { -1 -1 -1 1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC01: // { -1 -1 -1 -1 }
-#elif defined(OS_MAC)
-LC01: // { -1 -1 -1 -1 }
-	.align 5
-#endif
-	.quad	-1
-	.quad	-1
-	.quad	-1
-	.quad	-1
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC02: // { 3.5 2.5 1.5 0.5 }
-#elif defined(OS_MAC)
-LC02: // { 3.5 2.5 1.5 0.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1071644672
-	.long	0
-	.long	1073217536
-	.long	0
-	.long	1074003968
-	.long	0
-	.long	1074528256
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC03: // { 7.5 6.5 5.5 4.5 }
-#elif defined(OS_MAC)
-LC03: // { 7.5 6.5 5.5 4.5 }
-	.align 5
-#endif
-	.long	0
-	.long	1074921472
-	.long	0
-	.long	1075183616
-	.long	0
-	.long	1075445760
-	.long	0
-	.long	1075707904
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.align 32
-.LC04: // { 1.0 1.0 1.0 1.0 }
-#elif defined(OS_MAC)
-LC04: // { 1.0 1.0 1.0 1.0 }
-	.align 5
-#endif
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-	.long	0
-	.long	1072693248
-
-
-
-#if defined(OS_LINUX)
-	.section	.note.GNU-stack,"",@progbits
-#elif defined(OS_MAC)
-	.subsections_via_symbols
-#endif
-
diff --git a/third_party/blasfeo/lib/dummy.txt b/third_party/blasfeo/lib/dummy.txt
deleted file mode 100644
index e69de29..0000000
--- a/third_party/blasfeo/lib/dummy.txt
+++ /dev/null
diff --git a/third_party/blasfeo/test_problems/CMakeLists.txt b/third_party/blasfeo/test_problems/CMakeLists.txt
deleted file mode 100644
index 77becb1..0000000
--- a/third_party/blasfeo/test_problems/CMakeLists.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of HPIPM.                                                                     #
-#                                                                                                 #
-# HPIPM -- High Performance Interior Point Method.                                                #
-# Copyright (C) 2017 by Gianluca Frison.                                                          #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-add_executable(d_blas test_blas_d.c)
-target_link_libraries(d_blas blasfeo m)
-
-add_executable(s_blas test_blas_s.c)
-target_link_libraries(s_blas blasfeo m)
diff --git a/third_party/blasfeo/test_problems/Makefile b/third_party/blasfeo/test_problems/Makefile
deleted file mode 100644
index f2e4741..0000000
--- a/third_party/blasfeo/test_problems/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-###################################################################################################
-#                                                                                                 #
-# This file is part of BLASFEO.                                                                   #
-#                                                                                                 #
-# BLASFEO -- BLAS For Embedded Optimization.                                                      #
-# Copyright (C) 2016-2017 by Gianluca Frison.                                                     #
-# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              #
-# All rights reserved.                                                                            #
-#                                                                                                 #
-# HPMPC is free software; you can redistribute it and/or                                          #
-# modify it under the terms of the GNU Lesser General Public                                      #
-# License as published by the Free Software Foundation; either                                    #
-# version 2.1 of the License, or (at your option) any later version.                              #
-#                                                                                                 #
-# HPMPC is distributed in the hope that it will be useful,                                        #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of                                  #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            #
-# See the GNU Lesser General Public License for more details.                                     #
-#                                                                                                 #
-# You should have received a copy of the GNU Lesser General Public                                #
-# License along with HPMPC; if not, write to the Free Software                                    #
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  #
-#                                                                                                 #
-# Author: Gianluca Frison, giaf (at) dtu.dk                                                       #
-#                          gianluca.frison (at) imtek.uni-freiburg.de                             #
-#                                                                                                 #
-###################################################################################################
-
-include ../Makefile.rule
-
-ifeq ($(REF_BLAS), 0)
-LIBS = -lm 
-endif
-ifeq ($(REF_BLAS), OPENBLAS)
-LIBS = /opt/openblas/lib/libopenblas.a -pthread -lgfortran -lm
-endif
-ifeq ($(REF_BLAS), BLIS)
-LIBS = /opt/netlib/liblapack.a /opt/blis/lib/libblis.a -lgfortran -lm -fopenmp
-endif
-ifeq ($(REF_BLAS), NETLIB)
-LIBS = /opt/netlib/liblapack.a /opt/netlib/libblas.a -lgfortran -lm
-endif
-ifeq ($(REF_BLAS), MKL)
-LIBS = -Wl,--start-group /opt/intel/mkl/lib/intel64/libmkl_gf_lp64.a /opt/intel/mkl/lib/intel64/libmkl_core.a /opt/intel/mkl/lib/intel64/libmkl_sequential.a -Wl,--end-group -ldl -lpthread -lm
-endif
-ifeq ($(REF_BLAS), ATLAS)
-LIBS = /opt/atlas/lib/liblapack.a /opt/atlas/lib/libcblas.a /opt/atlas/lib/libf77blas.a /opt/atlas/lib/libatlas.a -lgfortran -lm
-endif
-
-#ifneq ($(NUM_THREAD), 1)
-#LIBS += -pthread 
-#endif
-
-OBJS_TEST = test_blas_d.o
-#OBJS_TEST = test_blas_s.o
-#OBJS_TEST = test_d_strmat.o
-#OBJS_TEST = test_s_strmat.o
-#OBJS_TEST = kernel_assembly.o test_assembly.o
-
-obj: $(OBJS_TEST)
-	$(CC) -o test.out $(OBJS_TEST) -L. libblasfeo.a $(LIBS) #-pg
-
-clean:
-	rm -f *.o
-	rm -f test.out
-	rm -f libblasfeo.a
-
diff --git a/third_party/blasfeo/test_problems/cpu_freq.h b/third_party/blasfeo/test_problems/cpu_freq.h
deleted file mode 100644
index 30320fc..0000000
--- a/third_party/blasfeo/test_problems/cpu_freq.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#ifndef GHZ_MAX
-#define GHZ_MAX 3.6
-#endif
diff --git a/third_party/blasfeo/test_problems/kernel_assembly.S b/third_party/blasfeo/test_problems/kernel_assembly.S
deleted file mode 100644
index b393e0d..0000000
--- a/third_party/blasfeo/test_problems/kernel_assembly.S
+++ /dev/null
@@ -1,633 +0,0 @@
-#if defined(OS_LINUX) | defined(OS_MAC)
-
-//#define STACKSIZE 96
-#define STACKSIZE 64
-#define ARG1  %rdi
-#define ARG2  %rsi
-#define ARG3  %rdx
-#define ARG4  %rcx
-#define ARG5  %r8
-#define ARG6  %r9
-#define ARG7  STACKSIZE +  8(%rsp)
-#define ARG8  STACKSIZE + 16(%rsp)
-#define ARG9  STACKSIZE + 24(%rsp)
-#define ARG10 STACKSIZE + 32(%rsp)
-#define ARG11 STACKSIZE + 40(%rsp)
-#define ARG12 STACKSIZE + 48(%rsp)
-#define ARG13 STACKSIZE + 56(%rsp)
-#define ARG14 STACKSIZE + 64(%rsp)
-#define ARG15 STACKSIZE + 72(%rsp)
-#define ARG16 STACKSIZE + 80(%rsp)
-#define ARG17 STACKSIZE + 88(%rsp)
-#define ARG18 STACKSIZE + 96(%rsp)
-
-#elif defined(OS_WINDOWS)
-
-#define STACKSIZE 256
-#define ARG1  %rcx
-#define ARG2  %rdx
-#define ARG3  %r8
-#define ARG4  %r9
-#define ARG5  STACKSIZE + 40(%rsp)
-#define ARG6  STACKSIZE + 48(%rsp)
-#define ARG7  STACKSIZE + 56(%rsp)
-#define ARG8  STACKSIZE + 64(%rsp)
-#define ARG9  STACKSIZE + 72(%rsp)
-#define ARG10 STACKSIZE + 80(%rsp)
-#define ARG11 STACKSIZE + 88(%rsp)
-#define ARG12 STACKSIZE + 96(%rsp)
-#define ARG13 STACKSIZE + 104(%rsp)
-#define ARG14 STACKSIZE + 112(%rsp)
-#define ARG15 STACKSIZE + 120(%rsp)
-#define ARG16 STACKSIZE + 128(%rsp)
-#define ARG17 STACKSIZE + 136(%rsp)
-#define ARG18 STACKSIZE + 144(%rsp)
-
-#else
-
-#error wrong OS
-
-#endif
-
-
-
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.text
-#elif defined(OS_MAC)
-	.section	__TEXT,__text,regular,pure_instructions
-#endif
-
-
-
-// common inner routine with file scope
-//
-// input arguments:
-// r10d   <- k
-// r11   <- A
-// r12   <- B
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-//
-// output arguments:
-// r10d  <- 0
-// r11   <- A+4*k*sizeof(double)
-// r12   <- B+4*k*sizeof(double)
-// ymm0  <- [d00 d11 d22 d33]
-// ymm1  <- [d01 d10 d23 d32]
-// ymm2  <- [d03 d12 d21 d30]
-// ymm3  <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm12 <- dirty
-// ymm13 <- dirty
-// ymm14 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=2
-	.macro INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_kernel_dgemm_add_nt_4x4_lib4, @function
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_kernel_dgemm_add_nt_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_kernel_dgemm_add_nt_4x4_lib4; .scl 2; .type 32; .endef
-inner_kernel_dgemm_add_nt_4x4_lib4:
-#endif
-#endif
-	
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// prefetch
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmovapd 0(%r12), %ymm12 // B[0]
-
-	cmpl	$4, %r10d
-	jle		0f // consider clean-up loop
-
-	// main loop
-	.p2align 3
-1: // main loop
-	
-	// unroll 0
-	vmovapd 32(%r12), %ymm13 // B[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	subl	$4, %r10d
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	// unroll 1
-	vmovapd 64(%r12), %ymm12 // B[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	// unroll 2
-	vmovapd 96(%r12), %ymm13 // B[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r12
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	addq	$128, %r11
-
-
-	// unroll 3
-	vmovapd 0(%r12), %ymm12 // B[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 0(%r11), %ymm8 // A0[0]
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	cmpl	$4, %r10d
-	jg		1b // main loop 
-
-
-0: // consider clean4-up
-	
-	cmpl	$3, %r10d
-	jle		4f // clean1
-
-	// unroll 0
-	vmovapd 32(%r12), %ymm13 // B[4]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 32(%r11), %ymm10 // A0[4]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	subl	$4, %r10d
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	// unroll 1
-	vmovapd 64(%r12), %ymm12 // B[8]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 64(%r11), %ymm8 // A0[8]
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	// unroll 2
-	vmovapd 96(%r12), %ymm13 // B[12]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm12
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	vmovapd 96(%r11), %ymm10 // A0[12]
-
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	addq	$128, %r12
-
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-	addq	$128, %r11
-
-
-	// unroll 3
-//	vmovapd 0(%r12), %ymm12 // B[0]
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm13
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-//	vmovapd 0(%r11), %ymm8 // A0[0]
-
-	vmulpd	%ymm10, %ymm13, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	vshufpd $0x5, %ymm13, %ymm13, %ymm14
-
-	vmulpd	%ymm10, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-
-//	cmpl	$3, %r10d
-	jmp		2f // return
-
-
-4: // consider clean1-up loop
-
-	cmpl	$0, %r10d
-	jle		2f // return
-
-	// clean-up loop
-3: // clean up loop
-	
-	vmovapd 0(%r12), %ymm12 // B[0]
-	vmovapd 0(%r11), %ymm8 // A0[0]
-	vmulpd	%ymm8, %ymm12, %ymm15
-	vaddpd	%ymm0, %ymm15, %ymm0
-	addq	$32, %r11
-
-	vshufpd $0x5, %ymm12, %ymm12, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm1, %ymm15, %ymm1
-	addq	$32, %r12
-
-	vperm2f128 $0x1, %ymm14, %ymm14, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm3, %ymm15, %ymm3
-	subl	$1, %r10d
-
-	vshufpd $0x5, %ymm14, %ymm14, %ymm14
-	vmulpd	%ymm8, %ymm14, %ymm15
-	vaddpd	%ymm2, %ymm15, %ymm2
-
-	cmpl	$0, %r10d
-	jg		3b // clean up loop 
-
-
-2: // return
-
-#if MACRO_LEVEL>=2
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_kernel_dgemm_add_nt_4x4_lib4, .-inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// blend for generic alpha and beta
-//
-// input arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-//
-// output arguments:
-// r10   <- alpha
-// r11   <- beta
-// r12   <- C
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-// ymm8  <- dirty
-// ymm9  <- dirty
-// ymm10 <- dirty
-// ymm11 <- dirty
-// ymm15 <- dirty
-
-#if MACRO_LEVEL>=1
-	.macro INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_blend_scale_ab_4x4_lib4, @function
-inner_blend_scale_ab_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_blend_scale_ab_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_blend_scale_ab_4x4_lib4; .scl 2; .type 32; .endef
-inner_blend_scale_ab_4x4_lib4:
-#endif
-#endif
-	
-	// alpha
-	vbroadcastsd	0(%r10), %ymm15
-
-	vblendpd	$0xa, %ymm1, %ymm0, %ymm8
-	vblendpd	$0x5, %ymm1, %ymm0, %ymm9
-	vblendpd	$0xa, %ymm3, %ymm2, %ymm10
-	vblendpd	$0x5, %ymm3, %ymm2, %ymm11
-
-	vblendpd	$0xc, %ymm10, %ymm8, %ymm0
-	vblendpd	$0x3, %ymm10, %ymm8, %ymm2
-	vblendpd	$0xc, %ymm11, %ymm9, %ymm1
-	vblendpd	$0x3, %ymm11, %ymm9, %ymm3
-
-	vmulpd		%ymm0, %ymm15, %ymm0
-	vmulpd		%ymm1, %ymm15, %ymm1
-	vmulpd		%ymm2, %ymm15, %ymm2
-	vmulpd		%ymm3, %ymm15, %ymm3
-
-	// beta
-	vbroadcastsd	0(%r11), %ymm14
-
-	vxorpd		%ymm15, %ymm15, %ymm15 // 0.0
-
-	vucomisd	%xmm15, %xmm14 // beta==0.0 ?
-	je			0f // end
-
-	vmovupd		0(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm0, %ymm15, %ymm0
-	vmovupd		32(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm1, %ymm15, %ymm1
-	vmovupd		64(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm2, %ymm15, %ymm2
-	vmovupd		96(%r12), %ymm15
-	vmulpd		%ymm15, %ymm14, %ymm15
-	vaddpd		%ymm3, %ymm15, %ymm3
-
-0:
-
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_blend_scale_ab_4x4_lib4, .-inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-// common inner routine with file scope
-//
-// store n
-//
-// input arguments:
-// r10  <- D
-// ymm0 <- [d00 d11 d22 d33]
-// ymm1 <- [d01 d10 d23 d32]
-// ymm2 <- [d03 d12 d21 d30]
-// ymm3 <- [d02 d13 d20 d31]
-//
-// output arguments:
-// r10  <- D
-// ymm0 <- [d00 d10 d20 d30]
-// ymm1 <- [d01 d11 d21 d31]
-// ymm2 <- [d02 d12 d22 d32]
-// ymm3 <- [d03 d13 d23 d33]
-
-#if MACRO_LEVEL>=1
-	.macro INNER_STORE_4X4_LIB4
-#else
-	.p2align 4,,15
-#if defined(OS_LINUX)
-	.type inner_store_4x4_lib4, @function
-inner_store_4x4_lib4:
-#elif defined(OS_MAC)
-_inner_store_4x4_lib4:
-#elif defined(OS_WINDOWS)
-	.def inner_store_4x4_lib4; .scl 2; .type 32; .endef
-inner_store_4x4_lib4:
-#endif
-#endif
-	
-	vmovupd %ymm0,  0(%r10)
-	vmovupd %ymm1, 32(%r10)
-	vmovupd %ymm2, 64(%r10)
-	vmovupd %ymm3, 96(%r10)
-	
-#if MACRO_LEVEL>=1
-	.endm
-#else
-	ret
-
-#if defined(OS_LINUX)
-	.size	inner_store_4x4_lib4, .-inner_store_4x4_lib4
-#endif
-#endif
-
-
-
-
-
-//                               1      2              3          4          5             6          7
-// void kernel_dgemm_nt_4x4_lib4(int k, double *alpha, double *A, double *B, double *beta, double *C, double *D);
-
-	.p2align 4,,15
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	.globl kernel_dgemm_nt_4x4_lib4_test
-#if defined(OS_LINUX)
-	.type kernel_dgemm_nt_4x4_lib4_test, @function
-#else // OS_WINDOWS
-	.def kernel_dgemm_nt_4x4_lib4_test; .scl 2; .type 32; .endef
-#endif
-kernel_dgemm_nt_4x4_lib4_test:
-#elif defined(OS_MAC)
-	.globl _kernel_dgemm_nt_4x4_lib4_test
-_kernel_dgemm_nt_4x4_lib4_test:
-#endif
-
-	// prologue
-
-	subq	$STACKSIZE, %rsp
-	movq	%rbx,   (%rsp)
-	movq	%rbp,  8(%rsp)
-	movq	%r12, 16(%rsp)
-	movq	%r13, 24(%rsp)
-	movq	%r14, 32(%rsp)
-	movq	%r15, 40(%rsp)
-#if defined(OS_WINDOWS)
-	movq	%rdi, 48(%rsp)
-	movq	%rsi, 56(%rsp)
-	vmovups	%xmm6, 64(%rsp)
-	vmovups	%xmm7, 80(%rsp)
-	vmovups	%xmm8, 96(%rsp)
-	vmovups	%xmm9, 112(%rsp)
-	vmovups	%xmm10, 128(%rsp)
-	vmovups	%xmm11, 144(%rsp)
-	vmovups	%xmm12, 160(%rsp)
-	vmovups	%xmm13, 176(%rsp)
-	vmovups	%xmm14, 192(%rsp)
-	vmovups	%xmm15, 208(%rsp)
-#endif
-
-	vzeroupper
-
-
-	// zero accumulation registers
-
-	vxorpd	%ymm0, %ymm0, %ymm0
-	vmovapd	%ymm0, %ymm1
-	vmovapd	%ymm0, %ymm2
-	vmovapd	%ymm0, %ymm3
-
-
-	// call inner dgemm kernel nt
-
-	movq	ARG1, %r10 // k
-	movq	ARG3, %r11 // A
-	movq	ARG4, %r12 // B
-
-#if MACRO_LEVEL>=2
-	INNER_KERNEL_DGEMM_ADD_NT_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_kernel_dgemm_add_nt_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_kernel_dgemm_add_nt_4x4_lib4
-#endif
-#endif
-
-
-	// call inner blend scale
-
-	movq	ARG2, %r10 // alpha
-	movq	ARG5, %r11 // beta
-	movq	ARG6, %r12 // C
-
-#if MACRO_LEVEL>=1
-	INNER_BLEND_SCALE_AB_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_blend_scale_ab_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_blend_scale_ab_4x4_lib4
-#endif
-#endif
-
-
-	// store n
-
-	movq	ARG7, %r10 // D
-
-#if MACRO_LEVEL>=1
-	INNER_STORE_4X4_LIB4
-#else
-#if defined(OS_LINUX) | defined(OS_WINDOWS)
-	call inner_store_4x4_lib4
-#elif defined(OS_MAC)
-	callq _inner_store_4x4_lib4
-#endif
-#endif
-
-//	movq	ARG6, %rax
-//	movq	STACKSIZE + 48(%rsp), %rax
-
-
-	// epilogue
-
-	vzeroupper
-
-	movq	  (%rsp), %rbx 
-	movq	 8(%rsp), %rbp
-	movq	16(%rsp), %r12 
-	movq	24(%rsp), %r13 
-	movq	32(%rsp), %r14 
-	movq	40(%rsp), %r15 
-#if defined(OS_WINDOWS)
-	movq	48(%rsp), %rdi
-	movq	56(%rsp), %rsi
-	vmovups	64(%rsp), %xmm6
-	vmovups	80(%rsp), %xmm7
-	vmovups	96(%rsp), %xmm8
-	vmovups	112(%rsp), %xmm9
-	vmovups	128(%rsp), %xmm10
-	vmovups	144(%rsp), %xmm11
-	vmovups	160(%rsp), %xmm12
-	vmovups	176(%rsp), %xmm13
-	vmovups	192(%rsp), %xmm14
-	vmovups	208(%rsp), %xmm15
-#endif
-	addq	$STACKSIZE, %rsp
-
-
-	ret
-
-#if defined(OS_LINUX)
-	.size	kernel_dgemm_nt_4x4_lib4_test, .-kernel_dgemm_nt_4x4_lib4_test
-#endif
-
-
diff --git a/third_party/blasfeo/test_problems/results/dummy.txt b/third_party/blasfeo/test_problems/results/dummy.txt
deleted file mode 100644
index e69de29..0000000
--- a/third_party/blasfeo/test_problems/results/dummy.txt
+++ /dev/null
diff --git a/third_party/blasfeo/test_problems/test_assembly.c b/third_party/blasfeo/test_problems/test_assembly.c
deleted file mode 100644
index 3a07a13..0000000
--- a/third_party/blasfeo/test_problems/test_assembly.c
+++ /dev/null
@@ -1,59 +0,0 @@
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_i_aux_ext_dep.h"
-#include "../include/blasfeo_d_aux_ext_dep.h"
-#include "../include/blasfeo_v_aux_ext_dep.h"
-#include "../include/blasfeo_d_aux.h"
-#include "../include/blasfeo_d_kernel.h"
-#include "../include/blasfeo_d_blas.h"
-
-int main()
-	{
-
-	printf("\ntest assembly\n");
-
-	int ii;
-
-	int n = 12;
-
-	double *A; d_zeros(&A, n, n);
-	for(ii=0; ii<n*n; ii++) A[ii] = ii;
-	d_print_mat(n, n, A, n);
-
-	double *B; d_zeros(&B, n, n);
-	for(ii=0; ii<n; ii++) B[ii*(n+1)] = 1.0;
-	d_print_mat(n, n, B, n);
-
-	struct d_strmat sA;
-	d_allocate_strmat(n, n, &sA);
-	d_cvt_mat2strmat(n, n, A, n, &sA, 0, 0);
-	d_print_strmat(n, n, &sA, 0, 0);
-
-	struct d_strmat sB;
-	d_allocate_strmat(n, n, &sB);
-	d_cvt_mat2strmat(n, n, B, n, &sB, 0, 0);
-	d_print_strmat(n, n, &sB, 0, 0);
-
-	struct d_strmat sD;
-	d_allocate_strmat(n, n, &sD);
-
-	struct d_strmat sC;
-	d_allocate_strmat(n, n, &sC);
-
-	double alpha = 1.0;
-	double beta = 0.0;
-	int ret = kernel_dgemm_nt_4x4_lib4_test(n, &alpha, sB.pA, sA.pA, &beta, sB.pA, sD.pA);
-	d_print_strmat(n, n, &sD, 0, 0);
-//	printf("\n%ld %ld\n", (long long) n, ret);
-//	printf("\n%ld %ld\n", (long long) &alpha, ret);
-//	printf("\n%ld %ld\n", (long long) sA.pA, ret);
-//	printf("\n%ld %ld\n", (long long) sB.pA, ret);
-//	printf("\n%ld %ld\n", (long long) &beta, ret);
-//	printf("\n%ld %ld\n", (long long) sC.pA, ret);
-//	printf("\n%ld %ld\n", (long long) sD.pA, ret);
-
-	return 0;
-
-	}
diff --git a/third_party/blasfeo/test_problems/test_blas_d.c b/third_party/blasfeo/test_problems/test_blas_d.c
deleted file mode 100644
index 1e71494..0000000
--- a/third_party/blasfeo/test_problems/test_blas_d.c
+++ /dev/null
@@ -1,480 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/time.h>
-
-//#if defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-//#include <xmmintrin.h> // needed to flush to zero sub-normals with _MM_SET_FLUSH_ZERO_MODE (_MM_FLUSH_ZERO_ON); in the main()
-//#endif
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_d_aux_ext_dep.h"
-#include "../include/blasfeo_d_aux.h"
-#include "../include/blasfeo_i_aux_ext_dep.h"
-#include "../include/blasfeo_v_aux_ext_dep.h"
-#include "../include/blasfeo_d_kernel.h"
-#include "../include/blasfeo_d_blas.h"
-
-#ifndef D_PS
-#define D_PS 1
-#endif
-#ifndef D_NC
-#define D_NC 1
-#endif
-
-
-
-#if defined(REF_BLAS_OPENBLAS)
-void openblas_set_num_threads(int num_threads);
-#endif
-#if defined(REF_BLAS_BLIS)
-void omp_set_num_threads(int num_threads);
-#endif
-#if defined(REF_BLAS_MKL)
-#include "mkl.h"
-#endif
-
-
-
-#include "cpu_freq.h"
-
-
-
-int main()
-	{
-		
-#if defined(REF_BLAS_OPENBLAS)
-	openblas_set_num_threads(1);
-#endif
-#if defined(REF_BLAS_BLIS)
-	omp_set_num_threads(1);
-#endif
-#if defined(REF_BLAS_MKL)
-	mkl_set_num_threads(1);
-#endif
-
-//#if defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-//	_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); // flush to zero subnormals !!! works only with one thread !!!
-//#endif
-
-	printf("\n");
-	printf("\n");
-	printf("\n");
-
-	printf("BLAS performance test - double precision\n");
-	printf("\n");
-
-	// maximum frequency of the processor
-	const float GHz_max = GHZ_MAX;
-	printf("Frequency used to compute theoretical peak: %5.1f GHz (edit test_param.h to modify this value).\n", GHz_max);
-	printf("\n");
-
-	// maximum flops per cycle, double precision
-#if defined(TARGET_X64_INTEL_HASWELL)
-	const float flops_max = 16;
-	printf("Testing BLAS version for AVX2 and FMA instruction sets, 64 bit (optimized for Intel Haswell): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	const float flops_max = 8;
-	printf("Testing BLAS version for AVX instruction set, 64 bit (optimized for Intel Sandy Bridge): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_X64_INTEL_CORE)
-	const float flops_max = 4;
-	printf("Testing BLAS version for SSE3 instruction set, 64 bit (optimized for Intel Core): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_X64_AMD_BULLDOZER)
-	const float flops_max = 8;
-	printf("Testing BLAS version for SSE3 and FMA instruction set, 64 bit (optimized for AMD Bulldozer): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-	const float flops_max = 4;
-	printf("Testing BLAS version for NEONv2 instruction set, 64 bit (optimized for ARM Cortex A57): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_ARMV7A_ARM_CORTEX_A15)
-	const float flops_max = 2;
-	printf("Testing BLAS version for VFPv4 instruction set, 32 bit (optimized for ARM Cortex A15): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_GENERIC)
-	const float flops_max = 2;
-	printf("Testing BLAS version for generic scalar instruction set: theoretical peak %5.1f Gflops ???\n", flops_max*GHz_max);
-#endif
-	
-//	FILE *f;
-//	f = fopen("./test_problems/results/test_blas.m", "w"); // a
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-//	fprintf(f, "C = 'd_x64_intel_haswell';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-//	fprintf(f, "C = 'd_x64_intel_sandybridge';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_X64_INTEL_CORE)
-//	fprintf(f, "C = 'd_x64_intel_core';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_X64_AMD_BULLDOZER)
-//	fprintf(f, "C = 'd_x64_amd_bulldozer';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-//	fprintf(f, "C = 'd_armv8a_arm_cortex_a57';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_ARMV7A_ARM_CORTEX_A15)
-//	fprintf(f, "C = 'd_armv7a_arm_cortex_a15';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_GENERIC)
-//	fprintf(f, "C = 'd_generic';\n");
-//	fprintf(f, "\n");
-#endif
-
-//	fprintf(f, "A = [%f %f];\n", GHz_max, flops_max);
-//	fprintf(f, "\n");
-
-//	fprintf(f, "B = [\n");
-	
-
-
-	int i, j, rep, ll;
-	
-	const int bsd = D_PS;
-	const int ncd = D_NC;
-
-/*	int info = 0;*/
-	
-	printf("\nn\t  dgemm_blasfeo\t  dgemm_blas\n");
-	printf("\nn\t Gflops\t    %%\t Gflops\n\n");
-	
-#if 1
-	int nn[] = {4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 172, 176, 180, 184, 188, 192, 196, 200, 204, 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 248, 252, 256, 260, 264, 268, 272, 276, 280, 284, 288, 292, 296, 300, 304, 308, 312, 316, 320, 324, 328, 332, 336, 340, 344, 348, 352, 356, 360, 364, 368, 372, 376, 380, 384, 388, 392, 396, 400, 404, 408, 412, 416, 420, 424, 428, 432, 436, 440, 444, 448, 452, 456, 460, 500, 550, 600, 650, 700};
-	int nnrep[] = {10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 400, 400, 400, 400, 400, 200, 200, 200, 200, 200, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 20, 20, 20, 20, 20, 20, 20, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 4, 4, 4, 4, 4};
-	
-//	for(ll=0; ll<24; ll++)
-	for(ll=0; ll<75; ll++)
-//	for(ll=0; ll<115; ll++)
-//	for(ll=0; ll<120; ll++)
-
-		{
-
-		int n = nn[ll];
-		int nrep = nnrep[ll];
-//		int n = ll+1;
-//		int nrep = nnrep[0];
-//		n = n<12 ? 12 : n;
-//		n = n<8 ? 8 : n;
-
-#else
-	int nn[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
-	
-	for(ll=0; ll<24; ll++)
-
-		{
-
-		int n = nn[ll];
-		int nrep = 40000; //nnrep[ll];
-#endif
-
-		double *A; d_zeros(&A, n, n);
-		double *B; d_zeros(&B, n, n);
-		double *C; d_zeros(&C, n, n);
-		double *M; d_zeros(&M, n, n);
-
-		char c_n = 'n';
-		char c_l = 'l';
-		char c_r = 'r';
-		char c_t = 't';
-		char c_u = 'u';
-		int i_1 = 1;
-		int i_t;
-		double d_1 = 1;
-		double d_0 = 0;
-	
-		for(i=0; i<n*n; i++)
-			A[i] = i;
-	
-		for(i=0; i<n; i++)
-			B[i*(n+1)] = 1;
-	
-		for(i=0; i<n*n; i++)
-			M[i] = 1;
-	
-		int n2 = n*n;
-		double *B2; d_zeros(&B2, n, n);
-		for(i=0; i<n*n; i++)
-			B2[i] = 1e-15;
-		for(i=0; i<n; i++)
-			B2[i*(n+1)] = 1;
-
-		int pnd = ((n+bsd-1)/bsd)*bsd;	
-		int cnd = ((n+ncd-1)/ncd)*ncd;	
-		int cnd2 = 2*((n+ncd-1)/ncd)*ncd;	
-
-		double *x; d_zeros_align(&x, pnd, 1);
-		double *y; d_zeros_align(&y, pnd, 1);
-		double *x2; d_zeros_align(&x2, pnd, 1);
-		double *y2; d_zeros_align(&y2, pnd, 1);
-		double *diag; d_zeros_align(&diag, pnd, 1);
-		int *ipiv; int_zeros(&ipiv, n, 1);
-
-		for(i=0; i<pnd; i++) x[i] = 1;
-		for(i=0; i<pnd; i++) x2[i] = 1;
-
-		// matrix struct
-#if 0
-		struct d_strmat sA; d_allocate_strmat(n+4, n+4, &sA);
-		struct d_strmat sB; d_allocate_strmat(n+4, n+4, &sB);
-		struct d_strmat sC; d_allocate_strmat(n+4, n+4, &sC);
-		struct d_strmat sD; d_allocate_strmat(n+4, n+4, &sD);
-		struct d_strmat sE; d_allocate_strmat(n+4, n+4, &sE);
-#else
-		struct d_strmat sA; d_allocate_strmat(n, n, &sA);
-		struct d_strmat sB; d_allocate_strmat(n, n, &sB);
-		struct d_strmat sB2; d_allocate_strmat(n, n, &sB2);
-		struct d_strmat sB3; d_allocate_strmat(n, n, &sB3);
-		struct d_strmat sC; d_allocate_strmat(n, n, &sC);
-		struct d_strmat sD; d_allocate_strmat(n, n, &sD);
-		struct d_strmat sE; d_allocate_strmat(n, n, &sE);
-#endif
-		struct d_strvec sx; d_allocate_strvec(n, &sx);
-		struct d_strvec sy; d_allocate_strvec(n, &sy);
-		struct d_strvec sz; d_allocate_strvec(n, &sz);
-
-		d_cvt_mat2strmat(n, n, A, n, &sA, 0, 0);
-		d_cvt_mat2strmat(n, n, B, n, &sB, 0, 0);
-		d_cvt_mat2strmat(n, n, B2, n, &sB2, 0, 0);
-		d_cvt_vec2strvec(n, x, &sx, 0);
-		int ii;
-		for(ii=0; ii<n; ii++)
-			{
-			DMATEL_LIBSTR(&sB3, ii, ii) = 1.0;
-//			DMATEL_LIBSTR(&sB3, n-1, ii) = 1.0;
-			DMATEL_LIBSTR(&sB3, ii, n-1) = 1.0;
-			DVECEL_LIBSTR(&sx, ii) = 1.0;
-			}
-//		d_print_strmat(n, n, &sB3, 0, 0);
-//		if(n==20) return;
-
-		int qr_work_size = 0;//dgeqrf_work_size_libstr(n, n);
-		void *qr_work;
-		v_zeros_align(&qr_work, qr_work_size);
-
-		int lq_work_size = 0;//dgelqf_work_size_libstr(n, n);
-		void *lq_work;
-		v_zeros_align(&lq_work, lq_work_size);
-
-		// create matrix to pivot all the time
-//		dgemm_nt_libstr(n, n, n, 1.0, &sA, 0, 0, &sA, 0, 0, 1.0, &sB, 0, 0, &sD, 0, 0);
-
-		double *dummy;
-
-		int info;
-
-		/* timing */
-		struct timeval tvm1, tv0, tv1, tv2, tv3, tv4, tv5, tv6, tv7, tv8, tv9, tv10, tv11, tv12, tv13, tv14, tv15, tv16;
-
-		/* warm up */
-		for(rep=0; rep<nrep; rep++)
-			{
-			dgemm_nt_libstr(n, n, n, 1.0, &sA, 0, 0, &sA, 0, 0, 1.0, &sB, 0, 0, &sC, 0, 0);
-			}
-
-		double alpha = 1.0;
-		double beta = 0.0;
-
-		gettimeofday(&tv0, NULL); // stop
-
-		for(rep=0; rep<nrep; rep++)
-			{
-
-//			dgemm_nt_lib(n, n, n, 1.0, pA, cnd, pB, cnd, 0.0, pC, cnd, pC, cnd);
-//			dgemm_nn_lib(n, n, n, 1.0, pA, cnd, pB, cnd, 0.0, pC, cnd, pC, cnd);
-//			dsyrk_nt_l_lib(n, n, n, 1.0, pA, cnd, pB, cnd, 1.0, pC, cnd, pD, cnd);
-//			dtrmm_nt_ru_lib(n, n, pA, cnd, pB, cnd, 0, pC, cnd, pD, cnd);
-//			dpotrf_nt_l_lib(n, n, pB, cnd, pD, cnd, diag);
-//			dsyrk_dpotrf_nt_l_lib(n, n, n, pA, cnd, pA, cnd, 1, pB, cnd, pD, cnd, diag);
-//			dsyrk_nt_l_lib(n, n, n, pA, cnd, pA, cnd, 1, pB, cnd, pD, cnd);
-//			dpotrf_nt_l_lib(n, n, pD, cnd, pD, cnd, diag);
-//			dgetrf_nn_nopivot_lib(n, n, pB, cnd, pB, cnd, diag);
-//			dgetrf_nn_lib(n, n, pB, cnd, pB, cnd, diag, ipiv);
-//			dtrsm_nn_ll_one_lib(n, n, pD, cnd, pB, cnd, pB, cnd);
-//			dtrsm_nn_lu_inv_lib(n, n, pD, cnd, diag, pB, cnd, pB, cnd);
-			}
-	
-		gettimeofday(&tv1, NULL); // stop
-
-		for(rep=0; rep<nrep; rep++)
-			{
-//			kernel_dgemm_nt_12x4_lib4(n, &alpha, sA.pA, sA.cn, sB.pA, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//			kernel_dgemm_nt_8x8_lib4(n, &alpha, sA.pA, sA.cn, sB.pA, sB.cn, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//			kernel_dsyrk_nt_l_8x8_lib4(n, &alpha, sA.pA, sA.cn, sB.pA, sB.cn, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//			kernel_dgemm_nt_8x4_lib4(n, &alpha, sA.pA, sA.cn, sB.pA, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//			kernel_dgemm_nt_4x8_lib4(n, &alpha, sA.pA, sB.pA, sB.cn, &beta, sD.pA, sD.pA);
-//			kernel_dgemm_nt_4x4_lib4(n, &alpha, sA.pA, sB.pA, &beta, sD.pA, sD.pA);
-//			kernel_dger4_12_sub_lib4(n, sA.pA, sA.cn, sB.pA, sD.pA, sD.cn);
-//			kernel_dger4_sub_12r_lib4(n, sA.pA, sA.cn, sB.pA, sD.pA, sD.cn);
-//			kernel_dger4_sub_8r_lib4(n, sA.pA, sA.cn, sB.pA, sD.pA, sD.cn);
-//			kernel_dger12_add_4r_lib4(n, sA.pA, sB.pA, sB.cn, sD.pA);
-//			kernel_dger8_add_4r_lib4(n, sA.pA, sB.pA, sB.cn, sD.pA);
-//			kernel_dger4_sub_4r_lib4(n, sA.pA, sB.pA, sD.pA);
-//			kernel_dger2_sub_4r_lib4(n, sA.pA, sB.pA, sD.pA);
-//			kernel_dger4_sub_8c_lib4(n, sA.pA, sA.cn, sB.pA, sD.pA, sD.cn);
-//			kernel_dger4_sub_4c_lib4(n, sA.pA, sA.cn, sB.pA, sD.pA, sD.cn);
-//			kernel_dgemm_nn_4x12_lib4(n, &alpha, sA.pA, 0, sB.pA, sB.cn, &beta, sD.pA, sD.pA);
-//			kernel_dgemm_nn_4x8_lib4(n, &alpha, sA.pA, 0, sB.pA, sB.cn, &beta, sD.pA, sD.pA);
-//			kernel_dgemm_nn_4x4_lib4(n, &alpha, sA.pA, 0, sB.pA, sB.cn, &beta, sD.pA, sD.pA);
-
-			dgemm_nt_libstr(n, n, n, 1.0, &sA, 0, 0, &sB, 0, 0, 0.0, &sC, 0, 0, &sD, 0, 0);
-//			dgemm_nn_libstr(n, n, n, 1.0, &sA, 0, 0, &sB, 0, 0, 0.0, &sC, 0, 0, &sD, 0, 0);
-//			dsyrk_ln_libstr(n, n, 1.0, &sA, 0, 0, &sA, 0, 0, 0.0, &sC, 0, 0, &sD, 0, 0);
-//			dsyrk_ln_mn_libstr(n, n, n, 1.0, &sA, 0, 0, &sA, 0, 0, 0.0, &sC, 0, 0, &sD, 0, 0);
-//			dpotrf_l_mn_libstr(n, n, &sB, 0, 0, &sB, 0, 0);
-//			dpotrf_l_libstr(n, &sB, 0, 0, &sB, 0, 0);
-//			dgetrf_nopivot_libstr(n, n, &sB, 0, 0, &sB, 0, 0);
-//			dgetrf_libstr(n, n, &sB, 0, 0, &sB, 0, 0, ipiv);
-//			dgeqrf_libstr(n, n, &sC, 0, 0, &sD, 0, 0, qr_work);
-//			dcolin_libstr(n, &sx, 0, &sB3, 0, n-1);
-//			dgelqf_libstr(n, n, &sB3, 0, 0, &sB3, 0, 0, lq_work);
-//			dtrmm_rlnn_libstr(n, n, 1.0, &sA, 0, 0, &sD, 0, 0, &sD, 0, 0); //
-//			dtrmm_rutn_libstr(n, n, 1.0, &sA, 0, 0, &sB, 0, 0, &sD, 0, 0);
-//			dtrsm_llnu_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sB, 0, 0);
-//			dtrsm_lunn_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sB, 0, 0);
-//			dtrsm_rltn_libstr(n, n, 1.0, &sB2, 0, 0, &sD, 0, 0, &sD, 0, 0); //
-//			dtrsm_rltu_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sB, 0, 0);
-//			dtrsm_rutn_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sB, 0, 0);
-//			dgemv_n_libstr(n, n, 1.0, &sA, 0, 0, &sx, 0, 0.0, &sy, 0, &sz, 0);
-//			dgemv_t_libstr(n, n, 1.0, &sA, 0, 0, &sx, 0, 0.0, &sy, 0, &sz, 0);
-//			dsymv_l_libstr(n, n, 1.0, &sA, 0, 0, &sx, 0, 0.0, &sy, 0, &sz, 0);
-//			dgemv_nt_libstr(n, n, 1.0, 1.0, &sA, 0, 0, &sx, 0, &sx, 0, 0.0, 0.0, &sy, 0, &sy, 0, &sz, 0, &sz, 0);
-			}
-
-//		d_print_strmat(n, n, &sD, 0, 0);
-
-		gettimeofday(&tv2, NULL); // stop
-
-		for(rep=0; rep<nrep; rep++)
-			{
-#if defined(REF_BLAS_OPENBLAS) || defined(REF_BLAS_NETLIB) || defined(REF_BLAS_MKL)
-//			dgemm_(&c_n, &c_t, &n, &n, &n, &d_1, A, &n, M, &n, &d_0, C, &n);
-//			dpotrf_(&c_l, &n, B2, &n, &info);
-//			dgemm_(&c_n, &c_n, &n, &n, &n, &d_1, A, &n, M, &n, &d_0, C, &n);
-//			dsyrk_(&c_l, &c_n, &n, &n, &d_1, A, &n, &d_0, C, &n);
-//			dtrmm_(&c_r, &c_u, &c_t, &c_n, &n, &n, &d_1, A, &n, C, &n);
-//			dgetrf_(&n, &n, B2, &n, ipiv, &info);
-//			dtrsm_(&c_l, &c_l, &c_n, &c_u, &n, &n, &d_1, B2, &n, B, &n);
-//			dtrsm_(&c_l, &c_u, &c_n, &c_n, &n, &n, &d_1, B2, &n, B, &n);
-//			dtrtri_(&c_l, &c_n, &n, B2, &n, &info);
-//			dlauum_(&c_l, &n, B, &n, &info);
-//			dgemv_(&c_n, &n, &n, &d_1, A, &n, x, &i_1, &d_0, y, &i_1);
-//			dgemv_(&c_t, &n, &n, &d_1, A, &n, x2, &i_1, &d_0, y2, &i_1);
-//			dtrmv_(&c_l, &c_n, &c_n, &n, B, &n, x, &i_1);
-//			dtrsv_(&c_l, &c_n, &c_n, &n, B, &n, x, &i_1);
-//			dsymv_(&c_l, &n, &d_1, A, &n, x, &i_1, &d_0, y, &i_1);
-
-//			for(i=0; i<n; i++)
-//				{
-//				i_t = n-i;
-//				dcopy_(&i_t, &B[i*(n+1)], &i_1, &C[i*(n+1)], &i_1);
-//				}
-//			dsyrk_(&c_l, &c_n, &n, &n, &d_1, A, &n, &d_1, C, &n);
-//			dpotrf_(&c_l, &n, C, &n, &info);
-
-#endif
-
-#if defined(REF_BLAS_BLIS)
-//			dgemm_(&c_n, &c_t, &n77, &n77, &n77, &d_1, A, &n77, B, &n77, &d_0, C, &n77);
-//			dgemm_(&c_n, &c_n, &n77, &n77, &n77, &d_1, A, &n77, B, &n77, &d_0, C, &n77);
-//			dsyrk_(&c_l, &c_n, &n77, &n77, &d_1, A, &n77, &d_0, C, &n77);
-//			dtrmm_(&c_r, &c_u, &c_t, &c_n, &n77, &n77, &d_1, A, &n77, C, &n77);
-//			dpotrf_(&c_l, &n77, B, &n77, &info);
-//			dtrtri_(&c_l, &c_n, &n77, B, &n77, &info);
-//			dlauum_(&c_l, &n77, B, &n77, &info);
-#endif
-			}
-
-		gettimeofday(&tv3, NULL); // stop
-
-		float Gflops_max = flops_max * GHz_max;
-
-//		float flop_operation = 4*16.0*2*n; // kernel 12x4
-//		float flop_operation = 3*16.0*2*n; // kernel 12x4
-//		float flop_operation = 2*16.0*2*n; // kernel 8x4
-//		float flop_operation = 1*16.0*2*n; // kernel 4x4
-//		float flop_operation = 0.5*16.0*2*n; // kernel 2x4
-
-		float flop_operation = 2.0*n*n*n; // dgemm
-//		float flop_operation = 1.0*n*n*n; // dsyrk dtrmm dtrsm
-//		float flop_operation = 1.0/3.0*n*n*n; // dpotrf dtrtri
-//		float flop_operation = 2.0/3.0*n*n*n; // dgetrf
-//		float flop_operation = 4.0/3.0*n*n*n; // dgeqrf
-//		float flop_operation = 2.0*n*n; // dgemv dsymv
-//		float flop_operation = 1.0*n*n; // dtrmv dtrsv
-//		float flop_operation = 4.0*n*n; // dgemv_nt
-
-//		float flop_operation = 4.0/3.0*n*n*n; // dsyrk+dpotrf
-
-		float time_hpmpc    = (float) (tv1.tv_sec-tv0.tv_sec)/(nrep+0.0)+(tv1.tv_usec-tv0.tv_usec)/(nrep*1e6);
-		float time_blasfeo  = (float) (tv2.tv_sec-tv1.tv_sec)/(nrep+0.0)+(tv2.tv_usec-tv1.tv_usec)/(nrep*1e6);
-		float time_blas     = (float) (tv3.tv_sec-tv2.tv_sec)/(nrep+0.0)+(tv3.tv_usec-tv2.tv_usec)/(nrep*1e6);
-
-		float Gflops_hpmpc    = 1e-9*flop_operation/time_hpmpc;
-		float Gflops_blasfeo  = 1e-9*flop_operation/time_blasfeo;
-		float Gflops_blas     = 1e-9*flop_operation/time_blas;
-
-
-//		printf("%d\t%7.2f\t%7.2f\t%7.2f\t%7.2f\t%7.2f\t%7.2f\n", n, Gflops_hpmpc, 100.0*Gflops_hpmpc/Gflops_max, Gflops_blasfeo, 100.0*Gflops_blasfeo/Gflops_max, Gflops_blas, 100.0*Gflops_blas/Gflops_max);
-//		fprintf(f, "%d\t%7.2f\t%7.2f\t%7.2f\t%7.2f\t%7.2f\t%7.2f\n", n, Gflops_hpmpc, 100.0*Gflops_hpmpc/Gflops_max, Gflops_blasfeo, 100.0*Gflops_blasfeo/Gflops_max, Gflops_blas, 100.0*Gflops_blas/Gflops_max);
-		printf("%d\t%7.2f\t%7.2f\t%7.2f\t%7.2f\n", n, Gflops_blasfeo, 100.0*Gflops_blasfeo/Gflops_max, Gflops_blas, 100.0*Gflops_blas/Gflops_max);
-//		fprintf(f, "%d\t%7.2f\t%7.2f\t%7.2f\t%7.2f\n", n, Gflops_blasfeo, 100.0*Gflops_blasfeo/Gflops_max, Gflops_blas, 100.0*Gflops_blas/Gflops_max);
-
-
-		d_free(A);
-		d_free(B);
-		d_free(B2);
-		d_free(M);
-		d_free_align(x);
-		d_free_align(y);
-		d_free_align(x2);
-		d_free_align(y2);
-		int_free(ipiv);
-		free(qr_work);
-		free(lq_work);
-		
-		d_free_strmat(&sA);
-		d_free_strmat(&sB);
-		d_free_strmat(&sB2);
-		d_free_strmat(&sB3);
-		d_free_strmat(&sC);
-		d_free_strmat(&sD);
-		d_free_strmat(&sE);
-		d_free_strvec(&sx);
-		d_free_strvec(&sy);
-		d_free_strvec(&sz);
-
-		}
-
-	printf("\n");
-
-//	fprintf(f, "];\n");
-//	fclose(f);
-
-	return 0;
-	
-	}
diff --git a/third_party/blasfeo/test_problems/test_blas_s.c b/third_party/blasfeo/test_problems/test_blas_s.c
deleted file mode 100644
index 3ea9f11..0000000
--- a/third_party/blasfeo/test_problems/test_blas_s.c
+++ /dev/null
@@ -1,454 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/time.h>
-
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_s_aux_ext_dep.h"
-#include "../include/blasfeo_i_aux_ext_dep.h"
-#include "../include/blasfeo_s_aux.h"
-#include "../include/blasfeo_s_kernel.h"
-#include "../include/blasfeo_s_blas.h"
-
-#ifndef S_PS
-#define S_PS 1
-#endif
-#ifndef S_NC
-#define S_NC 1
-#endif
-
-
-
-#if defined(REF_BLAS_OPENBLAS)
-void openblas_set_num_threads(int num_threads);
-#endif
-#if defined(REF_BLAS_BLIS)
-void omp_set_num_threads(int num_threads);
-#endif
-#if defined(REF_BLAS_MKL)
-#include "mkl.h"
-#endif
-
-
-
-#include "cpu_freq.h"
-
-
-
-int main()
-	{
-		
-#if defined(REF_BLAS_OPENBLAS)
-	openblas_set_num_threads(1);
-#endif
-#if defined(REF_BLAS_BLIS)
-	omp_set_num_threads(1);
-#endif
-#if defined(REF_BLAS_MKL)
-	mkl_set_num_threads(1);
-#endif
-
-	printf("\n");
-	printf("\n");
-	printf("\n");
-
-	printf("BLAS performance test - float precision\n");
-	printf("\n");
-
-	// maximum frequency of the processor
-	const float GHz_max = GHZ_MAX;
-	printf("Frequency used to compute theoretical peak: %5.1f GHz (edit test_param.h to modify this value).\n", GHz_max);
-	printf("\n");
-
-	// maximum flops per cycle, single precision
-	// maxumum memops (sustained load->store of floats) per cycle, single precision
-#if defined(TARGET_X64_INTEL_HASWELL)
-	const float flops_max = 32; // 2x256 bit fma
-	const float memops_max = 8; // 2x256 bit load + 1x256 bit store
-	printf("Testing BLAS version for AVX2 and FMA instruction sets, 64 bit (optimized for Intel Haswell): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-	const float flops_max = 16; // 1x256 bit mul + 1x256 bit add
-	const float memops_max = 4; // 1x256 bit load + 1x128 bit store
-	printf("Testing BLAS version for AVX instruction set, 64 bit (optimized for Intel Sandy Bridge): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_X64_INTEL_CORE)
-	const float flops_max = 8; // 1x128 bit mul + 1x128 bit add
-	const float memops_max = 4; // 1x128 bit load + 1x128 bit store;
-	printf("Testing BLAS version for SSE3 instruction set, 64 bit (optimized for Intel Core): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_X64_AMD_BULLDOZER)
-	const float flops_max = 16; // 2x128 bit fma
-	const float memops_max = 4; // 1x256 bit load + 1x128 bit store
-	printf("Testing BLAS version for SSE3 and FMA instruction set, 64 bit (optimized for AMD Bulldozer): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-	const float flops_max = 8; // 1x128 bit fma
-	const float memops_max = 4; // ???
-	printf("Testing BLAS version for VFPv4 instruction set, 32 bit (optimized for ARM Cortex A15): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_ARMV7A_ARM_CORTEX_A15)
-	const float flops_max = 8; // 1x128 bit fma
-	const float memops_max = 4; // ???
-	printf("Testing BLAS version for VFPv4 instruction set, 32 bit (optimized for ARM Cortex A15): theoretical peak %5.1f Gflops\n", flops_max*GHz_max);
-#elif defined(TARGET_GENERIC)
-	const float flops_max = 2; // 1x32 bit mul + 1x32 bit add ???
-	const float memops_max = 1; // ???
-	printf("Testing BLAS version for generic scalar instruction set: theoretical peak %5.1f Gflops ???\n", flops_max*GHz_max);
-#endif
-	
-//	FILE *f;
-//	f = fopen("./test_problems/results/test_blas.m", "w"); // a
-
-#if defined(TARGET_X64_INTEL_HASWELL)
-//	fprintf(f, "C = 's_x64_intel_haswell';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_X64_INTEL_SANDY_BRIDGE)
-//	fprintf(f, "C = 's_x64_intel_sandybridge';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_X64_INTEL_CORE)
-//	fprintf(f, "C = 's_x64_intel_core';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_X64_AMD_BULLDOZER)
-//	fprintf(f, "C = 's_x64_amd_bulldozer';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_ARMV8A_ARM_CORTEX_A57)
-//	fprintf(f, "C = 's_armv7a_arm_cortex_a15';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_ARMV7A_ARM_CORTEX_A15)
-//	fprintf(f, "C = 's_armv7a_arm_cortex_a15';\n");
-//	fprintf(f, "\n");
-#elif defined(TARGET_GENERIC)
-//	fprintf(f, "C = 's_generic';\n");
-//	fprintf(f, "\n");
-#endif
-
-//	fprintf(f, "A = [%f %f];\n", GHz_max, flops_max);
-//	fprintf(f, "\n");
-
-//	fprintf(f, "B = [\n");
-	
-
-
-	int i, j, rep, ll;
-	
-	const int bss = S_PS;
-	const int ncs = S_NC;
-
-/*	int info = 0;*/
-	
-	printf("\nn\t  sgemm_blasfeo\t  sgemm_blas\n");
-	printf("\nn\t Gflops\t    %%\t Gflops\t    %%\n\n");
-	
-#if 1
-	int nn[] = {4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 172, 176, 180, 184, 188, 192, 196, 200, 204, 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 248, 252, 256, 260, 264, 268, 272, 276, 280, 284, 288, 292, 296, 300, 304, 308, 312, 316, 320, 324, 328, 332, 336, 340, 344, 348, 352, 356, 360, 364, 368, 372, 376, 380, 384, 388, 392, 396, 400, 404, 408, 412, 416, 420, 424, 428, 432, 436, 440, 444, 448, 452, 456, 460, 500, 550, 600, 650, 700};
-	int nnrep[] = {10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 400, 400, 400, 400, 400, 200, 200, 200, 200, 200, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 20, 20, 20, 20, 20, 20, 20, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 4, 4, 4, 4, 4};
-	
-//	for(ll=0; ll<24; ll++)
-	for(ll=0; ll<75; ll++)
-//	for(ll=0; ll<115; ll++)
-//	for(ll=0; ll<120; ll++)
-
-		{
-
-		int n = nn[ll];
-		int nrep = nnrep[ll];
-//		int n = ll+1;
-//		int nrep = nnrep[0];
-//		n = n<16 ? 16 : n;
-
-		int n2 = n*n;
-
-#else
-	int nn[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
-	
-	for(ll=0; ll<24; ll++)
-
-		{
-
-		int n = nn[ll];
-		int nrep = 40000; //nnrep[ll];
-#endif
-
-		float *A; s_zeros(&A, n, n);
-		float *B; s_zeros(&B, n, n);
-		float *C; s_zeros(&C, n, n);
-		float *M; s_zeros(&M, n, n);
-
-		char c_n = 'n';
-		char c_l = 'l';
-		char c_r = 'r';
-		char c_t = 't';
-		char c_u = 'u';
-		int i_1 = 1;
-		int i_t;
-		float d_1 = 1;
-		float d_0 = 0;
-	
-		for(i=0; i<n*n; i++)
-			A[i] = i;
-	
-		for(i=0; i<n; i++)
-			B[i*(n+1)] = 1;
-	
-		for(i=0; i<n*n; i++)
-			M[i] = 1;
-	
-		float *B2; s_zeros(&B2, n, n);
-		for(i=0; i<n*n; i++)
-			B2[i] = 1e-15;
-		for(i=0; i<n; i++)
-			B2[i*(n+1)] = 1;
-
-		float *x; s_zeros(&x, n, 1);
-		float *y; s_zeros(&y, n, 1);
-		float *x2; s_zeros(&x2, n, 1);
-		float *y2; s_zeros(&y2, n, 1);
-		float *diag; s_zeros(&diag, n, 1);
-		int *ipiv; int_zeros(&ipiv, n, 1);
-
-//		for(i=0; i<n; i++) x[i] = 1;
-//		for(i=0; i<n; i++) x2[i] = 1;
-
-		// matrix struct
-#if 0
-		struct s_strmat sA; s_allocate_strmat(n+4, n+4, &sA);
-		struct s_strmat sB; s_allocate_strmat(n+4, n+4, &sB);
-		struct s_strmat sC; s_allocate_strmat(n+4, n+4, &sC);
-		struct s_strmat sD; s_allocate_strmat(n+4, n+4, &sD);
-		struct s_strmat sE; s_allocate_strmat(n+4, n+4, &sE);
-#else
-		struct s_strmat sA; s_allocate_strmat(n, n, &sA);
-		struct s_strmat sB; s_allocate_strmat(n, n, &sB);
-		struct s_strmat sC; s_allocate_strmat(n, n, &sC);
-		struct s_strmat sD; s_allocate_strmat(n, n, &sD);
-		struct s_strmat sE; s_allocate_strmat(n, n, &sE);
-#endif
-		struct s_strvec sx; s_allocate_strvec(n, &sx);
-		struct s_strvec sy; s_allocate_strvec(n, &sy);
-		struct s_strvec sz; s_allocate_strvec(n, &sz);
-
-		s_cvt_mat2strmat(n, n, A, n, &sA, 0, 0);
-		s_cvt_mat2strmat(n, n, B, n, &sB, 0, 0);
-		s_cvt_vec2strvec(n, x, &sx, 0);
-
-
-		// create matrix to pivot all the time
-//		sgemm_nt_libstr(n, n, n, 1.0, &sA, 0, 0, &sA, 0, 0, 1.0, &sB, 0, 0, &sD, 0, 0);
-
-		float *dummy;
-
-		int info;
-
-		/* timing */
-		struct timeval tvm1, tv0, tv1, tv2, tv3, tv4, tv5, tv6, tv7, tv8, tv9, tv10, tv11, tv12, tv13, tv14, tv15, tv16;
-
-		/* warm up */
-		for(rep=0; rep<nrep; rep++)
-			{
-			sgemm_nt_libstr(n, n, n, 1.0, &sA, 0, 0, &sB, 0, 0, 0.0, &sC, 0, 0, &sD, 0, 0);
-			}
-
-		float alpha = 1.0;
-		float beta = 0.0;
-
-		gettimeofday(&tv0, NULL); // stop
-
-		gettimeofday(&tv1, NULL); // stop
-
-		for(rep=0; rep<nrep; rep++)
-			{
-//			kernel_sgemm_nt_24x4_lib8(n, &alpha, sA.pA, sA.cn, sB.pA, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//			kernel_sgemm_nt_16x4_lib8(n, &alpha, sA.pA, sA.cn, sB.pA, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//			kernel_sgemm_nt_8x8_lib8(n, &alpha, sA.pA, sB.pA, &beta, sD.pA, sD.pA);
-//			kernel_sgemm_nt_8x4_lib8(n, &alpha, sA.pA, sB.pA, &beta, sD.pA, sD.pA);
-//			kernel_sgemm_nt_4x8_gen_lib8(n, &alpha, sA.pA, sB.pA, &beta, 0, sD.pA, sD.cn, 0, sD.pA, sD.cn, 0, 4, 0, 8);
-//			kernel_sgemm_nt_4x8_vs_lib8(n, &alpha, sA.pA, sB.pA, &beta, sD.pA, sD.pA, 4, 8);
-//			kernel_sgemm_nt_4x8_lib8(n, &alpha, sA.pA, sB.pA, &beta, sD.pA, sD.pA);
-//			kernel_sgemm_nt_12x4_lib4(n, &alpha, sA.pA, sA.cn, sB.pA, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//			kernel_sgemm_nt_8x4_lib4(n, &alpha, sA.pA, sA.cn, sB.pA, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//			kernel_sgemm_nt_4x4_lib4(n, &alpha, sA.pA, sB.pA, &beta, sD.pA, sD.pA);
-//			kernel_sgemm_nn_16x4_lib8(n, &alpha, sA.pA, sA.cn, 0, sB.pA, sB.cn, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//			kernel_sgemm_nn_8x8_lib8(n, &alpha, sA.pA, 0, sB.pA, sB.cn, &beta, sD.pA, sD.pA);
-//			kernel_sgemm_nn_8x4_lib8(n, &alpha, sA.pA, 0, sB.pA, sB.cn, &beta, sD.pA, sD.pA);
-
-//			sgemm_nt_libstr(n, n, n, 1.0, &sA, 0, 0, &sB, 0, 0, 0.0, &sC, 0, 0, &sD, 0, 0);
-//			sgemm_nn_libstr(n, n, n, 1.0, &sA, 0, 0, &sB, 0, 0, 0.0, &sC, 0, 0, &sD, 0, 0);
-//			ssyrk_ln_libstr(n, n, 1.0, &sA, 0, 0, &sA, 0, 0, 0.0, &sC, 0, 0, &sD, 0, 0);
-//			spotrf_l_mn_libstr(n, n, &sB, 0, 0, &sB, 0, 0);
-			spotrf_l_libstr(n, &sB, 0, 0, &sB, 0, 0);
-//			sgetr_libstr(n, n, &sA, 0, 0, &sB, 0, 0);
-//			sgetrf_nopivot_libstr(n, n, &sB, 0, 0, &sB, 0, 0);
-//			sgetrf_libstr(n, n, &sB, 0, 0, &sB, 0, 0, ipiv);
-//			strmm_rlnn_libstr(n, n, 1.0, &sA, 0, 0, &sB, 0, 0, &sD, 0, 0);
-//			strmm_rutn_libstr(n, n, 1.0, &sA, 0, 0, &sB, 0, 0, &sD, 0, 0);
-//			strsm_llnu_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sB, 0, 0);
-//			strsm_lunn_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sB, 0, 0);
-//			strsm_rltn_libstr(n, n, 1.0, &sB, 0, 0, &sD, 0, 0, &sD, 0, 0);
-//			strsm_rltu_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sB, 0, 0);
-//			strsm_rutn_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sB, 0, 0);
-//			sgemv_n_libstr(n, n, 1.0, &sA, 0, 0, &sx, 0, 0.0, &sy, 0, &sz, 0);
-//			sgemv_t_libstr(n, n, 1.0, &sA, 0, 0, &sx, 0, 0.0, &sy, 0, &sz, 0);
-//			ssymv_l_libstr(n, n, 1.0, &sA, 0, 0, &sx, 0, 0.0, &sy, 0, &sz, 0);
-//			sgemv_nt_libstr(n, n, 1.0, 1.0, &sA, 0, 0, &sx, 0, &sx, 0, 0.0, 0.0, &sy, 0, &sy, 0, &sz, 0, &sz, 0);
-			}
-
-//		d_print_strmat(n, n, &sD, 0, 0);
-
-		gettimeofday(&tv2, NULL); // stop
-
-		for(rep=0; rep<nrep; rep++)
-			{
-#if defined(REF_BLAS_OPENBLAS) || defined(REF_BLAS_NETLIB) || defined(REF_BLAS_MKL)
-//			sgemm_(&c_n, &c_t, &n, &n, &n, &d_1, A, &n, M, &n, &d_0, C, &n);
-//			sgemm_(&c_n, &c_n, &n, &n, &n, &d_1, A, &n, M, &n, &d_0, C, &n);
-//			scopy_(&n2, A, &i_1, B, &i_1);
-//			ssyrk_(&c_l, &c_n, &n, &n, &d_1, A, &n, &d_0, C, &n);
-//			strmm_(&c_r, &c_u, &c_t, &c_n, &n, &n, &d_1, A, &n, C, &n);
-//			spotrf_(&c_l, &n, B2, &n, &info);
-//			sgetrf_(&n, &n, B2, &n, ipiv, &info);
-//			strsm_(&c_l, &c_l, &c_n, &c_u, &n, &n, &d_1, B2, &n, B, &n);
-//			strsm_(&c_l, &c_u, &c_n, &c_n, &n, &n, &d_1, B2, &n, B, &n);
-//			strtri_(&c_l, &c_n, &n, B2, &n, &info);
-//			slauum_(&c_l, &n, B, &n, &info);
-//			sgemv_(&c_n, &n, &n, &d_1, A, &n, x, &i_1, &d_0, y, &i_1);
-//			sgemv_(&c_t, &n, &n, &d_1, A, &n, x2, &i_1, &d_0, y2, &i_1);
-//			strmv_(&c_l, &c_n, &c_n, &n, B, &n, x, &i_1);
-//			strsv_(&c_l, &c_n, &c_n, &n, B, &n, x, &i_1);
-//			ssymv_(&c_l, &n, &d_1, A, &n, x, &i_1, &d_0, y, &i_1);
-
-//			for(i=0; i<n; i++)
-//				{
-//				i_t = n-i;
-//				scopy_(&i_t, &B[i*(n+1)], &i_1, &C[i*(n+1)], &i_1);
-//				}
-//			ssyrk_(&c_l, &c_n, &n, &n, &d_1, A, &n, &d_1, C, &n);
-//			spotrf_(&c_l, &n, C, &n, &info);
-
-#endif
-
-#if defined(REF_BLAS_BLIS)
-//			sgemm_(&c_n, &c_t, &n77, &n77, &n77, &d_1, A, &n77, B, &n77, &d_0, C, &n77);
-//			sgemm_(&c_n, &c_n, &n77, &n77, &n77, &d_1, A, &n77, B, &n77, &d_0, C, &n77);
-//			ssyrk_(&c_l, &c_n, &n77, &n77, &d_1, A, &n77, &d_0, C, &n77);
-//			strmm_(&c_r, &c_u, &c_t, &c_n, &n77, &n77, &d_1, A, &n77, C, &n77);
-//			spotrf_(&c_l, &n77, B, &n77, &info);
-//			strtri_(&c_l, &c_n, &n77, B, &n77, &info);
-//			slauum_(&c_l, &n77, B, &n77, &info);
-#endif
-			}
-
-		gettimeofday(&tv3, NULL); // stop
-
-		// flops
-		if(1)
-			{
-
-			float Gflops_max = flops_max * GHz_max;
-
-//			float flop_operation = 6*16.0*2*n; // kernel 24x4
-//			float flop_operation = 4*16.0*2*n; // kernel 16x4
-//			float flop_operation = 3*16.0*2*n; // kernel 12x4
-//			float flop_operation = 2*16.0*2*n; // kernel 8x4
-//			float flop_operation = 1*16.0*2*n; // kernel 4x4
-
-//			float flop_operation = 2.0*n*n*n; // dgemm
-//			float flop_operation = 1.0*n*n*n; // dsyrk dtrmm dtrsm
-			float flop_operation = 1.0/3.0*n*n*n; // dpotrf dtrtri
-//			float flop_operation = 2.0/3.0*n*n*n; // dgetrf
-//			float flop_operation = 2.0*n*n; // dgemv dsymv
-//			float flop_operation = 1.0*n*n; // dtrmv dtrsv
-//			float flop_operation = 4.0*n*n; // dgemv_nt
-//			float flop_operation = 3*16.0*2*n; // kernel 12x4
-
-//			float flop_operation = 4.0/3.0*n*n*n; // dsyrk+dpotrf
-
-			float time_hpmpc    = (float) (tv1.tv_sec-tv0.tv_sec)/(nrep+0.0)+(tv1.tv_usec-tv0.tv_usec)/(nrep*1e6);
-			float time_blasfeo  = (float) (tv2.tv_sec-tv1.tv_sec)/(nrep+0.0)+(tv2.tv_usec-tv1.tv_usec)/(nrep*1e6);
-			float time_blas     = (float) (tv3.tv_sec-tv2.tv_sec)/(nrep+0.0)+(tv3.tv_usec-tv2.tv_usec)/(nrep*1e6);
-
-			float Gflops_hpmpc    = 1e-9*flop_operation/time_hpmpc;
-			float Gflops_blasfeo  = 1e-9*flop_operation/time_blasfeo;
-			float Gflops_blas     = 1e-9*flop_operation/time_blas;
-
-
-			printf("%d\t%7.2f\t%7.2f\t%7.2f\t%7.2f\n", n, Gflops_blasfeo, 100.0*Gflops_blasfeo/Gflops_max, Gflops_blas, 100.0*Gflops_blas/Gflops_max);
-//			fprintf(f, "%d\t%7.2f\t%7.2f\t%7.2f\t%7.2f\n", n, Gflops_blasfeo, 100.0*Gflops_blasfeo/Gflops_max, Gflops_blas, 100.0*Gflops_blas/Gflops_max);
-
-			}
-		// memops
-		else
-			{
-
-			float Gmemops_max = memops_max * GHz_max;
-
-			float memop_operation = 1.0*n*n; // dgecp
-
-			float time_hpmpc    = (float) (tv1.tv_sec-tv0.tv_sec)/(nrep+0.0)+(tv1.tv_usec-tv0.tv_usec)/(nrep*1e6);
-			float time_blasfeo  = (float) (tv2.tv_sec-tv1.tv_sec)/(nrep+0.0)+(tv2.tv_usec-tv1.tv_usec)/(nrep*1e6);
-			float time_blas     = (float) (tv3.tv_sec-tv2.tv_sec)/(nrep+0.0)+(tv3.tv_usec-tv2.tv_usec)/(nrep*1e6);
-
-			float Gmemops_hpmpc    = 1e-9*memop_operation/time_hpmpc;
-			float Gmemops_blasfeo  = 1e-9*memop_operation/time_blasfeo;
-			float Gmemops_blas     = 1e-9*memop_operation/time_blas;
-
-
-			printf("%d\t%7.2f\t%7.2f\t%7.2f\t%7.2f\n", n, Gmemops_blasfeo, 100.0*Gmemops_blasfeo/Gmemops_max, Gmemops_blas, 100.0*Gmemops_blas/Gmemops_max);
-//			fprintf(f, "%d\t%7.2f\t%7.2f\t%7.2f\t%7.2f\n", n, Gmemops_blasfeo, 100.0*Gmemops_blasfeo/Gmemops_max, Gmemops_blas, 100.0*Gmemops_blas/Gmemops_max);
-
-			}
-
-
-		free(A);
-		free(B);
-		free(B2);
-		free(M);
-		free(x);
-		free(y);
-		free(x2);
-		free(y2);
-		free(ipiv);
-		
-		s_free_strmat(&sA);
-		s_free_strmat(&sB);
-		s_free_strmat(&sC);
-		s_free_strmat(&sD);
-		s_free_strmat(&sE);
-		s_free_strvec(&sx);
-		s_free_strvec(&sy);
-		s_free_strvec(&sz);
-
-		}
-
-	printf("\n");
-
-//	fprintf(f, "];\n");
-//	fclose(f);
-
-	return 0;
-	
-	}
-
diff --git a/third_party/blasfeo/test_problems/test_d_strmat.c b/third_party/blasfeo/test_problems/test_d_strmat.c
deleted file mode 100644
index e06cf84..0000000
--- a/third_party/blasfeo/test_problems/test_d_strmat.c
+++ /dev/null
@@ -1,512 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/time.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_i_aux_ext_dep.h"
-#include "../include/blasfeo_d_aux_ext_dep.h"
-#include "../include/blasfeo_v_aux_ext_dep.h"
-#include "../include/blasfeo_d_aux.h"
-#include "../include/blasfeo_d_kernel.h"
-#include "../include/blasfeo_d_blas.h"
-
-
-int main()
-	{
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-	printf("\nLA provided by HIGH_PERFORMANCE\n\n");
-
-#elif defined(LA_REFERENCE)
-
-	printf("\nLA provided by REFERENCE\n\n");
-
-#elif defined(LA_BLAS)
-
-	printf("\nLA provided by BLAS\n\n");
-
-#else
-
-	printf("\nLA provided by ???\n\n");
-	exit(2);
-
-#endif
-
-	int ii;
-
-	int n = 16;
-
-	//
-	// matrices in column-major format
-	//
-	double *A; d_zeros(&A, n, n);
-	for(ii=0; ii<n*n; ii++) A[ii] = ii;
-//	d_print_mat(n, n, A, n);
-
-	double *B; d_zeros(&B, n, n);
-	for(ii=0; ii<n; ii++) B[ii*(n+1)] = 1.0;
-//	d_print_mat(n, n, B, n);
-
-	double *C; d_zeros(&C, n, n);
-
-	double *D; d_zeros(&D, n, n);
-	for(ii=0; ii<n*n; ii++) D[ii] = -1;
-
-	double *x_n; d_zeros(&x_n, n, 1);
-//	for(ii=0; ii<n; ii++) x_n[ii] = 1.0;
-	x_n[1] = 1.0;
-//	x_n[1] = 1.0;
-//	x_n[2] = 2.0;
-//	x_n[3] = 3.0;
-	double *x_t; d_zeros(&x_t, n, 1);
-//	for(ii=0; ii<n; ii++) x_n[ii] = 1.0;
-	x_t[0] = 1.0;
-	double *y_n; d_zeros(&y_n, n, 1);
-	double *y_t; d_zeros(&y_t, n, 1);
-	double *z_n; d_zeros(&z_n, n, 1);
-	double *z_t; d_zeros(&z_t, n, 1);
-
-	double *x0; d_zeros(&x0, n, 1); x0[0] = 1.0;
-	double *x1; d_zeros(&x1, n, 1); x1[1] = 1.0;
-	double *x2; d_zeros(&x2, n, 1); x2[2] = 1.0;
-	double *x3; d_zeros(&x3, n, 1); x3[3] = 1.0;
-	double *x4; d_zeros(&x4, n, 1); x4[4] = 1.0;
-	double *x5; d_zeros(&x5, n, 1); x5[5] = 1.0;
-	double *x6; d_zeros(&x6, n, 1); x6[6] = 1.0;
-	double *x7; d_zeros(&x7, n, 1); x7[7] = 1.0;
-	double *x8; d_zeros(&x8, n, 1); x8[8] = 1.0;
-	double *x9; d_zeros(&x9, n, 1); x9[9] = 1.0;
-
-	int *ipiv; int_zeros(&ipiv, n, 1);
-
-	//
-	// matrices in matrix struct format
-	//
-	int size_strmat = 5*d_size_strmat(n, n);
-	void *memory_strmat; v_zeros_align(&memory_strmat, size_strmat);
-	char *ptr_memory_strmat = (char *) memory_strmat;
-
-	struct d_strmat sA;
-//	d_allocate_strmat(n, n, &sA);
-	d_create_strmat(n, n, &sA, ptr_memory_strmat);
-	ptr_memory_strmat += sA.memory_size;
-	d_cvt_mat2strmat(n, n, A, n, &sA, 0, 0);
-//	d_cast_mat2strmat(A, &sA);
-	d_print_strmat(n, n, &sA, 0, 0);
-
-	struct d_strmat sB;
-//	d_allocate_strmat(n, n, &sB);
-	d_create_strmat(n, n, &sB, ptr_memory_strmat);
-	ptr_memory_strmat += sB.memory_size;
-	d_cvt_mat2strmat(n, n, B, n, &sB, 0, 0);
-	d_print_strmat(n, n, &sB, 0, 0);
-
-	struct d_strmat sC;
-//	d_allocate_strmat(n, n, &sC);
-	d_create_strmat(n, n, &sC, ptr_memory_strmat);
-	ptr_memory_strmat += sC.memory_size;
-
-	struct d_strmat sD;
-//	d_allocate_strmat(n, n, &sD);
-	d_create_strmat(n, n, &sD, ptr_memory_strmat);
-	ptr_memory_strmat += sD.memory_size;
-	d_cvt_mat2strmat(n, n, D, n, &sD, 0, 0);
-
-	struct d_strmat sE;
-//	d_allocate_strmat(n, n, &sE);
-	d_create_strmat(n, n, &sE, ptr_memory_strmat);
-	ptr_memory_strmat += sE.memory_size;
-
-	struct d_strvec sx_n;
-	d_allocate_strvec(n, &sx_n);
-	d_cvt_vec2strvec(n, x_n, &sx_n, 0);
-
-	struct d_strvec sx_t;
-	d_allocate_strvec(n, &sx_t);
-	d_cvt_vec2strvec(n, x_t, &sx_t, 0);
-
-	struct d_strvec sy_n;
-	d_allocate_strvec(n, &sy_n);
-	d_cvt_vec2strvec(n, y_n, &sy_n, 0);
-
-	struct d_strvec sy_t;
-	d_allocate_strvec(n, &sy_t);
-	d_cvt_vec2strvec(n, y_t, &sy_t, 0);
-
-	struct d_strvec sz_n;
-	d_allocate_strvec(n, &sz_n);
-	d_cvt_vec2strvec(n, z_n, &sz_n, 0);
-
-	struct d_strvec sz_t;
-	d_allocate_strvec(n, &sz_t);
-	d_cvt_vec2strvec(n, z_t, &sz_t, 0);
-
-	struct d_strvec sx0; d_create_strvec(n, &sx0, x0);
-	struct d_strvec sx1; d_create_strvec(n, &sx1, x1);
-	struct d_strvec sx2; d_create_strvec(n, &sx2, x2);
-	struct d_strvec sx3; d_create_strvec(n, &sx3, x3);
-	struct d_strvec sx4; d_create_strvec(n, &sx4, x4);
-	struct d_strvec sx5; d_create_strvec(n, &sx5, x5);
-	struct d_strvec sx6; d_create_strvec(n, &sx6, x6);
-	struct d_strvec sx7; d_create_strvec(n, &sx7, x7);
-	struct d_strvec sx8; d_create_strvec(n, &sx8, x8);
-	struct d_strvec sx9; d_create_strvec(n, &sx9, x9);
-
-	struct d_strvec sz0; d_allocate_strvec(n, &sz0);
-	struct d_strvec sz1; d_allocate_strvec(n, &sz1);
-	struct d_strvec sz2; d_allocate_strvec(n, &sz2);
-	struct d_strvec sz3; d_allocate_strvec(n, &sz3);
-	struct d_strvec sz4; d_allocate_strvec(n, &sz4);
-	struct d_strvec sz5; d_allocate_strvec(n, &sz5);
-	struct d_strvec sz6; d_allocate_strvec(n, &sz6);
-	struct d_strvec sz7; d_allocate_strvec(n, &sz7);
-	struct d_strvec sz8; d_allocate_strvec(n, &sz8);
-	struct d_strvec sz9; d_allocate_strvec(n, &sz9);
-
-	// tests
-	double *v; d_zeros(&v, n, 1);
-	double *vp; d_zeros(&vp, n, 1);
-	double *vm; d_zeros(&vm, n, 1);
-	double *m; d_zeros(&m, n, 1);
-	double *r; d_zeros(&r, n, 1);
-
-	for(ii=0; ii<n; ii++) v[ii] = ii; // x
-	for(ii=0; ii<n; ii++) vp[ii] = 8.0; // upper
-	for(ii=0; ii<n; ii++) vm[ii] = 3.0; // lower
-	for(ii=0; ii<n; ii++) r[ii] = 2*ii+1; // x
-
-	d_print_mat(1, n, v, 1);
-	d_print_mat(1, n, vp, 1);
-	d_print_mat(1, n, vm, 1);
-	d_print_mat(1, n, r, 1);
-
-	struct d_strvec sv; d_create_strvec(n, &sv, v);
-	struct d_strvec svp; d_create_strvec(n, &svp, vp);
-	struct d_strvec svm; d_create_strvec(n, &svm, vm);
-	struct d_strvec sm; d_create_strvec(n, &sm, m);
-	struct d_strvec sr; d_create_strvec(n, &sr, r);
-
-//	d_print_tran_strvec(n, &sv, 0);
-//	d_print_tran_strvec(n, &svp, 0);
-//	d_print_tran_strvec(n, &svm, 0);
-//	d_print_tran_strvec(n, &sm, 0);
-//	d_print_tran_strvec(n, &sr, 0);
-
-//	d_print_tran_strvec(n, &sm, 0);
-//	DVECEL_LIBSTR(&sm, 0) = 0.0;
-//	DVECEL_LIBSTR(&sm, 1) = 1.0;
-//	DVECEL_LIBSTR(&sm, 2) = 2.0;
-//	d_print_tran_strvec(n, &sm, 0);
-//	return 0;
-
-	double alpha = 1.0;
-	double beta = 0.0;
-	kernel_dgemm_nt_4x4_gen_lib4(4, &alpha, sA.pA, sB.pA, &beta, 0, sD.pA, sA.cn, 0, sD.pA, sE.cn, 1, 3, 1, 3);
-	d_print_strmat(n, n, &sD, 0, 0);
-	return 0;
-	dtrmm_rlnn_libstr(8, 8, alpha, &sA, 3, 0, &sB, 0, 0, &sD, 0, 0);
-//	dgemm_nn_libstr(8, 8, 8, alpha, &sB, 0, 0, &sA, 1, 0, beta, &sA, 0, 0, &sD, 0, 0);
-	d_print_strmat(n, n, &sD, 0, 0);
-	return 0;
-//	dsyrk_ln_libstr(n, 15, n, 1.0, &sA, 0, 0, &sA, 0, 0, 1.0, &sB, 0, 0, &sD, 0, 0);
-//	dpotrf_l_mn_libstr(n, 15, &sD, 0, 0, &sD, 0, 0);
-//	dsyrk_dpotrf_ln_libstr(n, 15, n, &sA, 0, 0, &sA, 0, 0, &sB, 0, 0, &sD, 0, 0);
-//	dtrmm_rlnn_libstr(n, n, alpha, &sA, 0, 0, &sB, 0, 0, &sD, 0, 0);
-//	dgese_libstr(n, n, 0.0/0.0, &sD, 0, 0);
-//	kernel_dgemm_nt_4x8_lib4(n, &alpha, sA.pA, sB.pA, sB.cn, &beta, sC.pA, sD.pA);
-//	kernel_dgemm_nn_4x8_lib4(n, &alpha, sA.pA, 0, sB.pA, sB.cn, &beta, sC.pA, sD.pA);
-//	kernel_dsyrk_nt_l_4x4_gen_lib4(n, &alpha, sA.pA, sB.pA, &beta, 0, sC.pA, sC.cn, 3, sD.pA, sD.cn, 0, 4, 0, 4);
-//	kernel_dsyrk_nt_l_8x4_gen_lib4(n, &alpha, sA.pA, sA.cn, sB.pA, &beta, 0, sC.pA, sC.cn, 3, sD.pA, sD.cn, 0, 8, 0, 8);
-//	dsyrk_ln_libstr(10, 10, n, 1.0, &sA, 0, 0, &sB, 0, 0, 0.0, &sC, 0, 0, &sD, 1, 0);
-//	d_print_strmat(n, n, &sD, 0, 0);
-	dsymv_l_libstr(10, 10, alpha, &sA, 0, 0, &sx0, 0, beta, &sz0, 0, &sz0, 0);
-	dsymv_l_libstr(10, 10, alpha, &sA, 0, 0, &sx1, 0, beta, &sz1, 0, &sz1, 0);
-	dsymv_l_libstr(10, 10, alpha, &sA, 0, 0, &sx2, 0, beta, &sz2, 0, &sz2, 0);
-	dsymv_l_libstr(10, 10, alpha, &sA, 0, 0, &sx3, 0, beta, &sz3, 0, &sz3, 0);
-	dsymv_l_libstr(10, 10, alpha, &sA, 0, 0, &sx4, 0, beta, &sz4, 0, &sz4, 0);
-	dsymv_l_libstr(10, 10, alpha, &sA, 0, 0, &sx5, 0, beta, &sz5, 0, &sz5, 0);
-	dsymv_l_libstr(10, 10, alpha, &sA, 0, 0, &sx6, 0, beta, &sz6, 0, &sz6, 0);
-	dsymv_l_libstr(10, 10, alpha, &sA, 0, 0, &sx7, 0, beta, &sz7, 0, &sz7, 0);
-	dsymv_l_libstr(10, 10, alpha, &sA, 0, 0, &sx8, 0, beta, &sz8, 0, &sz8, 0);
-	dsymv_l_libstr(10, 10, alpha, &sA, 0, 0, &sx9, 0, beta, &sz9, 0, &sz9, 0);
-	d_print_tran_strvec(n, &sz0, 0);
-	d_print_tran_strvec(n, &sz1, 0);
-	d_print_tran_strvec(n, &sz2, 0);
-	d_print_tran_strvec(n, &sz3, 0);
-	d_print_tran_strvec(n, &sz4, 0);
-	d_print_tran_strvec(n, &sz5, 0);
-	d_print_tran_strvec(n, &sz6, 0);
-	d_print_tran_strvec(n, &sz7, 0);
-	d_print_tran_strvec(n, &sz8, 0);
-	d_print_tran_strvec(n, &sz9, 0);
-	return 0;
-
-//	d_print_strmat(n, n, &sC, 0, 0);
-//	dgese_libstr(n, n, 1.0, &sB, 0, 0);
-//	kernel_dger4_sub_4_lib4(6, sB.pA, sA.pA, sC.pA);
-//	kernel_dger4_sub_4_vs_lib4(6, sB.pA, sA.pA, sC.pA, 1);
-	return 0;
-
-//	d_print_strmat(n, n, &sC, 0, 0);
-//	dgese_libstr(n, n, 1.0, &sB, 0, 0);
-//	kernel_dger4_sub_4_lib4(6, sB.pA, sA.pA, sC.pA);
-//	kernel_dger4_sub_4_vs_lib4(6, sB.pA, sA.pA, sC.pA, 1);
-//	kernel_dger4_sub_8_lib4(5, sB.pA, sB.cn, sA.pA, sC.pA, sC.cn);
-//	kernel_dger4_sub_8_vs_lib4(5, sB.pA, sB.cn, sA.pA, sC.pA, sC.cn, 5);
-//	kernel_dger4_sub_12_lib4(5, sB.pA, sB.cn, sA.pA, sC.pA, sC.cn);
-//	kernel_dger4_sub_12_vs_lib4(5, sB.pA, sB.cn, sA.pA, sC.pA, sC.cn, 9);
-//	kernel_dger4_sub_8c_lib4(9, sB.pA, sA.cn, sA.pA, sC.pA, sC.cn);
-//	kernel_dger4_sub_4c_lib4(9, sB.pA, sA.cn, sA.pA, sC.pA, sC.cn);
-//	d_print_strmat(n, n, &sC, 0, 0);
-//	return 0;
-
-#if 1
-	dgemm_nt_libstr(n, n, n, 1.0, &sA, 0, 0, &sA, 0, 0, 1.0, &sB, 0, 0, &sC, 0, 0);
-#else
-	dgese_libstr(n, n, 0.1, &sC, 0, 0);
-	DMATEL_LIBSTR(&sC, 0, 0) = 1.0;
-//	DMATEL_LIBSTR(&sC, 0, 1) = 1.0;
-	for(ii=1; ii<n-1; ii++)
-		{
-//		DMATEL_LIBSTR(&sC, ii, ii-1) = 1.0;
-		DMATEL_LIBSTR(&sC, ii, ii) = 1.0;
-//		DMATEL_LIBSTR(&sC, ii, ii+1) = 1.0;
-		}
-//	DMATEL_LIBSTR(&sC, n-1, n-2) = 1.0;
-	DMATEL_LIBSTR(&sC, n-1, n-1) = 1.0;
-#endif
-	d_print_strmat(n, n, &sC, 0, 0);
-	dgese_libstr(n, n, 0.0/0.0, &sD, 0, 0);
-//	d_print_strmat(n, n, &sA, 0, 0);
-//	dgein1_libstr(12.0, &sA, 0, 0);
-//	DMATEL_LIBSTR(&sA, 0, 0) =   12.0;
-//	DMATEL_LIBSTR(&sA, 1, 0) =    6.0;
-//	DMATEL_LIBSTR(&sA, 2, 0) = -  4.0;
-//	DMATEL_LIBSTR(&sA, 0, 1) = - 51.0;
-//	DMATEL_LIBSTR(&sA, 1, 1) =  167.0;
-//	DMATEL_LIBSTR(&sA, 2, 1) =   24.0;
-//	DMATEL_LIBSTR(&sA, 0, 2) =    4.0;
-//	DMATEL_LIBSTR(&sA, 1, 2) = - 68.0;
-//	DMATEL_LIBSTR(&sA, 2, 2) = - 41.0;
-//	d_print_strmat(n, n, &sA, 0, 0);
-	d_print_strmat(n, n, &sC, 0, 0);
-//	printf("\n%f\n", DGEEL_LIBSTR(&sA, 0, 0));
-//	int qr_work_size = dgeqrf_work_size_libstr(n, n);
-	int qr_work_size = dgelqf_work_size_libstr(n, n);
-	void *qr_work;
-	v_zeros_align(&qr_work, qr_work_size);
-//	dgeqrf_libstr(10, 10, &sC, 0, 0, &sD, 0, 0, qr_work);
-	dgelqf_libstr(17, 17, &sC, 0, 0, &sD, 0, 0, qr_work);
-//	dgecp_libstr(10, 10, &sC, 0, 0, &sD, 0, 0);
-//	kernel_dgeqrf_4_lib4(16, 12, sD.pA, sD.cn, sD.dA, qr_work);
-//	d_print_strmat(n, n, &sA, 0, 0);
-//	kernel_dgeqrf_vs_lib4(10, 16, 0, sD.pA+0, sD.cn, sD.dA);
-//	kernel_dgelqf_vs_lib4(10, 10, 10, 0, sD.pA+0, sD.cn, sD.dA);
-	d_print_strmat(n, n, &sD, 0, 0);
-	free(qr_work);
-	return 0;
-
-//	dveccl_mask_libstr(n, &svm, 0, &sv, 0, &svp, 0, &sv, 0, &sm, 0);
-//	veccl_libstr(n, &svm, 0, &sv, 0, &svp, 0, &sv, 0);
-//	d_print_tran_strvec(12, &sv, 0);
-//	d_print_tran_strvec(12, &sm, 0);
-//	dvecze_libstr(n, &sm, 0, &sr, 0, &sr, 0);
-//	d_print_tran_strvec(12, &sr, 0);
-//	return 0;
-
-//	d_print_strmat(n, n, &sA, 0, 0);
-//	dtrsv_unn_libstr(n, &sA, 1, 0, &sx0, 0, &sz0, 0);
-//	d_print_tran_strvec(n, &sz0, 0);
-//	dtrsv_unn_libstr(n, &sA, 1, 0, &sx1, 0, &sz1, 0);
-//	d_print_tran_strvec(n, &sz1, 0);
-//	dtrsv_unn_libstr(n, &sA, 1, 0, &sx2, 0, &sz2, 0);
-//	d_print_tran_strvec(n, &sz2, 0);
-//	dtrsv_unn_libstr(n, &sA, 1, 0, &sx3, 0, &sz3, 0);
-//	d_print_tran_strvec(n, &sz3, 0);
-//	return 0;
-
-//	double alpha = 1.0;
-//	double beta = 1.0;
-//	kernel_dgemm_nt_4x12_vs_lib4(n, &alpha, sA.pA, sB.pA, sB.cn, &beta, sD.pA, sD.pA, 3, 10);
-//	kernel_dgemm_nt_8x8u_vs_lib4(n, &alpha, sA.pA, sA.cn, sB.pA, sB.cn, &beta, sD.pA, sD.cn, sD.pA, sD.cn, 7, 6);
-	dgemm_nn_libstr(n, n, n, 1.0, &sA, 0, 0, &sA, 0, 0, 1.0, &sB, 0, 0, &sD, 0, 0);
-	d_print_strmat(n, n, &sD, 0, 0);
-	dpotrf_l_libstr(16, &sD, 0, 0, &sD, 0, 0);
-	d_print_strmat(n, n, &sD, 0, 0);
-	return 0;;
-
-//	dmatse_libstr(n, n, 100.0, &sD, 0, 0);
-
-//	for(ii=0; ii<n; ii++)
-//		dvecin1_libstr(ii+1, &sx_n, ii);
-//	d_print_tran_strvec(n, &sx_n, 0);
-//	d_print_strmat(n, n, &sD, 0, 0);
-//	// ddiain_libstr(4, -1.0, &sx_n, 1, &sD, 3, 2);
-//	ddiaad_libstr(4, -1.0, &sx_n, 1, &sD, 3, 2);
-//	d_print_strmat(n, n, &sD, 0, 0);
-//	return 0;
-
-//	d_print_tran_strvec(n, &sx_n, 0);
-//	dgemm_l_diag_libstr(n, n, 1.0, &sx_n, 0, &sA, 0, 0, 0.0, &sD, 0, 0, &sD, 0, 0);
-//	dgemm_r_diag_libstr(n, n, 1.0, &sA, 0, 0, &sx_n, 0, 0.0, &sD, 0, 0, &sD, 0, 0);
-//	d_print_strmat(n, n, &sD, 0, 0);
-//	exit(1);
-
-//	dsetmat_libstr(n, n, 0.0, &sD, 0, 0);
-//	dmatin1_libstr(2.0, &sD, 0, 0);
-//	dmatin1_libstr(2.0, &sD, 1, 1);
-//	dmatin1_libstr(2.0, &sD, 2, 2);
-//	dmatin1_libstr(1.0, &sD, 1, 0);
-//	dmatin1_libstr(1.0, &sD, 2, 1);
-//	dmatin1_libstr(0.5, &sD, 2, 0);
-//	d_print_strmat(n, n, &sD, 0, 0);
-//	d_print_tran_strvec(n, &sx_n, 0);
-//	dtrsv_lnn_libstr(n, n, &sD, 0, 0, &sx_n, 0, &sz_n, 0);
-//	d_print_tran_strvec(n, &sz_n, 0);
-//	exit(1);
-
-//	dgemm_nt_libstr(8, 8, 8, 1.0, &sB, 0, 0, &sA, 1, 0, 0.0, &sD, 0, 0, &sD, 0, 0);
-//	d_print_strmat(n, n, &sD, 0, 0);
-//	return 0;
-
-//	double alpha = 1.0;
-//	kernel_dtrmm_nn_rl_4x4_gen_lib4(7, &alpha, sB.pA, 2, sA.pA, sA.cn, 1, sD.pA, sD.cn, 0, 4, 1, 4);
-//	kernel_dtrmm_nn_rl_4x4_gen_lib4(7, &alpha, sB.pA+sB.cn*4, 2, sA.pA, sA.cn, 1, sD.pA+sD.cn*4, sD.cn, 0, 4, 1, 4);
-//	kernel_dtrmm_nn_rl_4x4_lib4(4, &alpha, sB.pA, sA.pA, sA.cn+4*4, sD.pA+4*4);
-//	kernel_dtrmm_nn_rl_4x4_gen_lib4(3, &alpha, sB.pA+sB.cn*4+4*4, 2, sA.pA+sB.cn*4+4*4, sA.cn, 1, sD.pA+sD.cn*4+4*4, sD.cn, 0, 4, 0, 4);
-	dtrmm_rlnn_libstr(8, 8, 1.0, &sB, 0, 0, &sA, 3, 0, &sD, 2, 1);
-	d_print_strmat(n, n, &sD, 0, 0);
-	return 0;
-
-	dtrmv_lnn_libstr(8, 8, &sA, 0, 0, &sx0, 0, &sx0, 0);
-	dtrmv_lnn_libstr(8, 8, &sA, 0, 0, &sx1, 0, &sx1, 0);
-	dtrmv_lnn_libstr(8, 8, &sA, 0, 0, &sx2, 0, &sx2, 0);
-	dtrmv_lnn_libstr(8, 8, &sA, 0, 0, &sx3, 0, &sx3, 0);
-	dtrmv_lnn_libstr(8, 8, &sA, 0, 0, &sx4, 0, &sx4, 0);
-	dtrmv_lnn_libstr(8, 8, &sA, 0, 0, &sx5, 0, &sx5, 0);
-	dtrmv_lnn_libstr(8, 8, &sA, 0, 0, &sx6, 0, &sx6, 0);
-	dtrmv_lnn_libstr(8, 8, &sA, 0, 0, &sx7, 0, &sx7, 0);
-	dtrmv_lnn_libstr(8, 8, &sA, 0, 0, &sx8, 0, &sx8, 0);
-	dtrmv_lnn_libstr(8, 8, &sA, 0, 0, &sx9, 0, &sx9, 0);
-	d_print_tran_strvec(n, &sx0, 0);
-	d_print_tran_strvec(n, &sx1, 0);
-	d_print_tran_strvec(n, &sx2, 0);
-	d_print_tran_strvec(n, &sx3, 0);
-	d_print_tran_strvec(n, &sx4, 0);
-	d_print_tran_strvec(n, &sx5, 0);
-	d_print_tran_strvec(n, &sx6, 0);
-	d_print_tran_strvec(n, &sx7, 0);
-	d_print_tran_strvec(n, &sx8, 0);
-	d_print_tran_strvec(n, &sx9, 0);
-	return 0;
-
-	dgemv_t_libstr(2, 8, 1.0, &sA, 2, 0, &sx_n, 0, 0.0, &sy_n, 0, &sz_n, 0);
-	d_print_tran_strvec(n, &sz_n, 0);
-	return 0;
-
-	dgemm_nt_libstr(4, 8, 8, 1.0, &sB, 0, 0, &sA, 0, 0, 0.0, &sB, 0, 0, &sD, 3, 0);
-//	d_print_strmat(n, n, &sB, 0, 0);
-	d_print_strmat(n, n, &sD, 0, 0);
-	exit(1);
-
-	dpotrf_l_libstr(n, &sD, 0, 0, &sD, 0, 0);
-//	dgetrf_nopivot_libstr(n, n, &sD, 0, 0, &sD, 0, 0);
-//	dgetrf_libstr(n, n, &sD, 0, 0, &sD, 0, 0, ipiv);
-	d_print_strmat(n, n, &sD, 0, 0);
-#if defined(LA_HIGH_PERFORMANCE) | defined(LA_REFERENCE)
-	d_print_mat(1, n, sD.dA, 1);
-#endif
-	int_print_mat(1, n, ipiv, 1);
-	dtrsm_rltn_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sE, 0, 0);
-	d_print_strmat(n, n, &sE, 0, 0);
-	exit(1);
-
-#if 1 // solve P L U X = P B
-	d_print_strmat(n, n, &sB, 0, 0);
-	drowpe_libstr(n, ipiv, &sB);
-	d_print_strmat(n, n, &sB, 0, 0);
-
-	dtrsm_llnu_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sE, 0, 0);
-	d_print_strmat(n, n, &sE, 0, 0);
-	dtrsm_lunn_libstr(n, n, 1.0, &sD, 0, 0, &sE, 0, 0, &sE, 0, 0);
-	d_print_strmat(n, n, &sE, 0, 0);
-#else // solve X^T (P L U)^T = B^T P^T
-	d_print_strmat(n, n, &sB, 0, 0);
-	dcolpe_libstr(n, ipiv, &sB);
-	d_print_strmat(n, n, &sB, 0, 0);
-
-	dtrsm_rltu_libstr(n, n, 1.0, &sD, 0, 0, &sB, 0, 0, &sE, 0, 0);
-	d_print_strmat(n, n, &sE, 0, 0);
-	dtrsm_rutn_libstr(n, n, 1.0, &sD, 0, 0, &sE, 0, 0, &sE, 0, 0);
-	d_print_strmat(n, n, &sE, 0, 0);
-#endif
-
-//	d_print_strmat(n, n, &sA, 0, 0);
-//	d_print_strmat(n, n, &sB, 0, 0);
-//	d_print_strmat(n, n, &sD, 0, 0);
-//	d_print_strmat(n, n, &sE, 0, 0);
-
-//	d_cvt_strmat2mat(n, n, &sE, 0, 0, C, n);
-//	d_print_mat(n, n, C, n);
-
-	dtrtr_u_libstr(6, &sE, 2, 0, &sB, 1, 0);
-	d_print_strmat(n, n, &sB, 0, 0);
-
-	d_print_strmat(n, n, &sA, 0, 0);
-	dgemv_nt_libstr(6, n, 1.0, 1.0, &sA, 0, 0, &sx_n, 0, &sx_t, 0, 0.0, 0.0, &sy_n, 0, &sy_t, 0, &sz_n, 0, &sz_t, 0);
-//	dsymv_l_libstr(5, 5, 1.0, &sA, 0, 0, x_n, 0.0, y_n, z_n);
-	d_print_mat(1, n, z_n, 1);
-	d_print_mat(1, n, z_t, 1);
-
-
-
-
-//	for(ii=0; ii<sE.pm*sE.cn; ii++) sE.pA[ii] = 0.0;
-//	double alpha = 0.0;
-//	double beta = 1.0;
-//	kernel_dgemm_nt_4x4_gen_lib4(4, &alpha, sA.pA, sB.pA, &beta, 3, sA.pA, sA.cn, 0, sE.pA, sE.cn, 0, 4, 2, 2);
-//	d_print_strmat(n, n, &sE, 0, 0);
-
-	// free memory
-	free(A);
-	free(B);
-	free(C);
-	free(D);
-	free(ipiv);
-//	d_free_strmat(&sA);
-//	d_free_strmat(&sB);
-//	d_free_strmat(&sD);
-	v_free_align(memory_strmat);
-
-	return 0;
-
-	}
diff --git a/third_party/blasfeo/test_problems/test_s_strmat.c b/third_party/blasfeo/test_problems/test_s_strmat.c
deleted file mode 100644
index 456db87..0000000
--- a/third_party/blasfeo/test_problems/test_s_strmat.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/**************************************************************************************************
-*                                                                                                 *
-* This file is part of BLASFEO.                                                                   *
-*                                                                                                 *
-* BLASFEO -- BLAS For Embedded Optimization.                                                      *
-* Copyright (C) 2016-2017 by Gianluca Frison.                                                     *
-* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl.              *
-* All rights reserved.                                                                            *
-*                                                                                                 *
-* HPMPC is free software; you can redistribute it and/or                                          *
-* modify it under the terms of the GNU Lesser General Public                                      *
-* License as published by the Free Software Foundation; either                                    *
-* version 2.1 of the License, or (at your option) any later version.                              *
-*                                                                                                 *
-* HPMPC is distributed in the hope that it will be useful,                                        *
-* but WITHOUT ANY WARRANTY; without even the implied warranty of                                  *
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.                                            *
-* See the GNU Lesser General Public License for more details.                                     *
-*                                                                                                 *
-* You should have received a copy of the GNU Lesser General Public                                *
-* License along with HPMPC; if not, write to the Free Software                                    *
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA                  *
-*                                                                                                 *
-* Author: Gianluca Frison, giaf (at) dtu.dk                                                       *
-*                          gianluca.frison (at) imtek.uni-freiburg.de                             *
-*                                                                                                 *
-**************************************************************************************************/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/time.h>
-
-#include "../include/blasfeo_common.h"
-#include "../include/blasfeo_i_aux_ext_dep.h"
-#include "../include/blasfeo_s_aux_ext_dep.h"
-#include "../include/blasfeo_s_aux.h"
-#include "../include/blasfeo_s_kernel.h"
-#include "../include/blasfeo_s_blas.h"
-
-
-int main()
-	{
-
-#if defined(LA_HIGH_PERFORMANCE)
-
-	printf("\nLA provided by HIGH_PERFORMANCE\n\n");
-
-#elif defined(LA_REFERENCE)
-
-	printf("\nLA provided by REFERENCE\n\n");
-
-#elif defined(LA_BLAS)
-
-	printf("\nLA provided by BLAS\n\n");
-
-#else
-
-	printf("\nLA provided by ???\n\n");
-	exit(2);
-
-#endif
-
-	int ii, jj;
-
-	int n = 16;
-
-	//
-	// matrices in column-major format
-	//
-	float *A; s_zeros(&A, n, n);
-	for(ii=0; ii<n*n; ii++) A[ii] = ii;
-//	for(jj=0; jj<n; jj++)
-//		for(ii=0; ii<jj; ii++)
-//			A[ii+n*jj] = 0.0/0.0;
-//	s_print_mat(n, n, A, n);
-
-	float *B; s_zeros(&B, n, n);
-	for(ii=0; ii<n; ii++) B[ii*(n+1)] = 1.0;
-//	s_print_mat(n, n, B, n);
-
-	float *D; s_zeros(&D, n, n);
-	for(ii=0; ii<n*n; ii++) D[ii] = -1.0;
-//	s_print_mat(n, n, B, n);
-
-
-	//
-	// matrices in matrix struct format
-	//
-
-	struct s_strmat sA;
-	s_allocate_strmat(n, n, &sA);
-	s_cvt_mat2strmat(n, n, A, n, &sA, 0, 0);
-	s_print_strmat(n, n, &sA, 0, 0);
-
-	struct s_strmat sB;
-	s_allocate_strmat(n, n, &sB);
-	s_cvt_mat2strmat(n, n, B, n, &sB, 0, 0);
-	s_print_strmat(n, n, &sB, 0, 0);
-
-	struct s_strmat sD;
-	s_allocate_strmat(n, n, &sD);
-	s_cvt_mat2strmat(n, n, D, n, &sD, 0, 0);
-
-	struct s_strvec sx;
-	s_allocate_strvec(n, &sx);
-	sx.pa[7] = 1.0;
-	s_print_tran_strvec(n, &sx, 0);
-
-	struct s_strvec sz0;
-	s_allocate_strvec(n, &sz0);
-
-	struct s_strvec sz1;
-	s_allocate_strvec(n, &sz1);
-
-	//
-	// tests
-	//
-
-	float alpha = 1.0;
-	float beta = 0.0;
-//	kernel_sgemm_nt_24x4_lib8(4, &alpha, sA.pA, sA.cn, sB.pA, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//	kernel_sgemm_nt_16x4_lib8(4, &alpha, sA.pA, sA.cn, sB.pA, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//	kernel_sgemm_nt_8x8_lib8(5, &alpha, sA.pA, sB.pA, &beta, sD.pA, sD.pA);
-//	kernel_sgemm_nt_8x4_lib8(5, &alpha, sA.pA, sB.pA, &beta, sD.pA, sD.pA);
-//	kernel_sgemm_nt_4x8_gen_lib8(8, &alpha, sA.pA, sB.pA, &beta, 0, sD.pA, sD.cn, 0, sD.pA, sD.cn, 0, 4, 0, 8);
-//	kernel_sgemm_nt_8x4_vs_lib8(8, &alpha, sA.pA, sB.pA, &beta, sD.pA, sD.pA, 7, 4);
-//	kernel_sgemm_nt_8x4_lib8(8, &alpha, sB.pA, sA.pA+4, &beta, sA.pA+4*8, sD.pA+4*8);
-//	kernel_sgemm_nn_16x4_lib8(4, &alpha, sA.pA, sA.cn, 0, sB.pA, sB.cn, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//	kernel_sgemm_nt_12x4_lib4(4, &alpha, sA.pA, sA.cn, sB.pA, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//	kernel_sgemm_nt_8x8_lib4(8, &alpha, sA.pA, sA.cn, sB.pA, sB.cn, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//	kernel_sgemm_nt_8x4_lib4(2, &alpha, sA.pA, sA.cn, sB.pA, &beta, sD.pA, sD.cn, sD.pA, sD.cn);
-//	s_print_strmat(n, n, &sD, 0, 0);
-//	return 0;
-//	sgemm_nt_libstr(n, n, 5, 1.0, &sA, 0, 0, &sB, 0, 0, 0.0, &sB, 0, 0, &sD, 0, 0);
-//	ssyrk_ln_libstr(n, n, 1.0, &sA, 0, 0, &sB, 0, 0, 0.0, &sB, 0, 0, &sD, 0, 0);
-//	ssyrk_ln_mn_libstr(n, n, n, 1.0, &sA, 0, 0, &sB, 0, 0, 0.0, &sB, 0, 0, &sD, 0, 0);
-//	kernel_ssyrk_nt_l_8x8_lib8(n, &alpha, sA.pA, sA.pA, &beta, sB.pA, sD.pA);
-//	sgecp_libstr(16, 16, &sA, 2, 0, &sD, 1, 0);
-//	sgetr_libstr(16, 16, &sA, 2, 0, &sD, 2, 0);
-//	s_print_strmat(n, n, &sD, 0, 0);
-//	sgemv_n_libstr(6, 6, 1.0, &sA, 1, 0, &sx, 0, 0.0, &sz0, 0, &sz0, 0);
-//	sgemv_t_libstr(11, 8, 1.0, &sA, 0, 0, &sx, 0, 0.0, &sz0, 0, &sz0, 0);
-//	strmv_lnn_libstr(6, 6, &sA, 1, 0, &sx, 0, &sz0, 0);
-//	strmv_ltn_libstr(10, 10, &sA, 1, 0, &sx, 0, &sz0, 0);
-//	sA.pA[0] = 1.0;
-//	strsv_lnn_libstr(10, &sA, 0, 0, &sx, 0, &sz0, 0);
-//	for(ii=0; ii<8; ii++) sA.dA[ii] = 1.0/sgeex1_libstr(&sA, ii, ii);
-//	kernel_strsv_lt_inv_8_lib8(0, sA.pA, sA.cn, sA.dA, sx.pa, sx.pa, sz0.pa);
-//	kernel_strsv_lt_inv_8_vs_lib8(0, sA.pA, sA.cn, sA.dA, sx.pa, sx.pa, sz0.pa, 3);
-//	s_print_strmat(n, n, &sA, 0, 0);
-//	strsv_ltn_libstr(12, &sA, 0, 0, &sx, 0, &sz0, 0);
-//	strsv_ltn_mn_libstr(11, 3, &sA, 0, 0, &sx, 0, &sz0, 0);
-//	s_print_strmat(n, n, &sA, 0, 0);
-//	kernel_sgemv_nt_4_lib8(n, &alpha, &alpha, sA.pA, sA.cn, sx.pa, sx.pa, &beta, sz1.pa, sz0.pa, sz1.pa);
-//	kernel_sgemv_nt_4_vs_lib8(n, &alpha, &alpha, sA.pA, sA.cn, sx.pa, sx.pa, &beta, sz1.pa, sz0.pa, sz1.pa, 3);
-//	sgemv_nt_libstr(5, 2, alpha, alpha, &sA, 0, 0, &sx, 0, &sx, 0, beta, beta, &sz0, 0, &sz1, 0, &sz0, 0, &sz1, 0);
-//	ssymv_l_libstr(10, 10, alpha, &sA, 1, 0, &sx, 0, beta, &sz0, 0, &sz1, 0);
-//	s_print_tran_strvec(n, &sz0, 0);
-//	s_print_tran_strvec(n, &sz1, 0);
-//	return 0;
-//	sgesc_libstr(16, 9, 2.0, &sD, 0, 0);
-//	s_print_strmat(n, n, &sD, 0, 0);
-//	kernel_spotrf_nt_l_8x8_lib8(0, sD.pA, sD.pA, sD.pA, sD.pA, sx.pa);
-//	s_print_strmat(n, n, &sD, 0, 0);
-//	s_print_tran_strvec(n, &sx, 0);
-//	kernel_strsm_nt_rl_inv_8x8_lib8(0, sD.pA, sD.pA, sD.pA+8*sD.cn, sD.pA+8*sD.cn, sD.pA, sx.pa);
-//	s_print_strmat(n, n, &sD, 0, 0);
-//	kernel_spotrf_nt_l_8x8_lib8(8, sD.pA+8*sD.cn, sD.pA+8*sD.cn, sD.pA+8*sD.cn+8*8, sD.pA+8*sD.cn+8*8, sx.pa+8);
-//	spotrf_l_mn_libstr(23, 17, &sD, 0, 0, &sD, 0, 0);
-//	spotrf_l_libstr(n, &sD, 0, 0, &sD, 0, 0);
-//	kernel_strmm_nn_rl_8x4_lib8(3, &alpha, sB.pA, 7, sA.pA, sA.cn, sD.pA);
-	strmm_rlnn_libstr(12, 8, 1.0, &sA, 0, 0, &sB, 0, 0, &sD, 0, 0);
-	s_print_strmat(n, n, &sD, 0, 0);
-	return 0;
-
-
-
-	//
-	// free memory
-	//
-
-	free(A);
-	free(B);
-	free(D);
-	s_free_strmat(&sA);
-	s_free_strmat(&sB);
-	s_free_strmat(&sD);
-
-	return 0;
-	
-	}
diff --git a/third_party/boostorg/algorithm/.gitattributes b/third_party/boostorg/algorithm/.gitattributes
deleted file mode 100644
index 3e84d7c..0000000
--- a/third_party/boostorg/algorithm/.gitattributes
+++ /dev/null
@@ -1,96 +0,0 @@
-* text=auto !eol svneol=native#text/plain
-*.gitattributes text svneol=native#text/plain
-
-# Scriptish formats
-*.bat        text svneol=native#text/plain
-*.bsh        text svneol=native#text/x-beanshell
-*.cgi        text svneol=native#text/plain
-*.cmd        text svneol=native#text/plain
-*.js         text svneol=native#text/javascript
-*.php        text svneol=native#text/x-php
-*.pl         text svneol=native#text/x-perl
-*.pm         text svneol=native#text/x-perl
-*.py         text svneol=native#text/x-python
-*.sh         eol=lf svneol=LF#text/x-sh
-configure    eol=lf svneol=LF#text/x-sh
-
-# Image formats
-*.bmp        binary svneol=unset#image/bmp
-*.gif        binary svneol=unset#image/gif
-*.ico        binary svneol=unset#image/ico
-*.jpeg       binary svneol=unset#image/jpeg
-*.jpg        binary svneol=unset#image/jpeg
-*.png        binary svneol=unset#image/png
-*.tif        binary svneol=unset#image/tiff
-*.tiff       binary svneol=unset#image/tiff
-*.svg        text svneol=native#image/svg%2Bxml
-
-# Data formats
-*.pdf        binary svneol=unset#application/pdf
-*.avi        binary svneol=unset#video/avi
-*.doc        binary svneol=unset#application/msword
-*.dsp        text svneol=crlf#text/plain
-*.dsw        text svneol=crlf#text/plain
-*.eps        binary svneol=unset#application/postscript
-*.gz         binary svneol=unset#application/gzip
-*.mov        binary svneol=unset#video/quicktime
-*.mp3        binary svneol=unset#audio/mpeg
-*.ppt        binary svneol=unset#application/vnd.ms-powerpoint
-*.ps         binary svneol=unset#application/postscript
-*.psd        binary svneol=unset#application/photoshop
-*.rdf        binary svneol=unset#text/rdf
-*.rss        text svneol=unset#text/xml
-*.rtf        binary svneol=unset#text/rtf
-*.sln        text svneol=native#text/plain
-*.swf        binary svneol=unset#application/x-shockwave-flash
-*.tgz        binary svneol=unset#application/gzip
-*.vcproj     text svneol=native#text/xml
-*.vcxproj    text svneol=native#text/xml
-*.vsprops    text svneol=native#text/xml
-*.wav        binary svneol=unset#audio/wav
-*.xls        binary svneol=unset#application/vnd.ms-excel
-*.zip        binary svneol=unset#application/zip
-
-# Text formats
-.htaccess    text svneol=native#text/plain
-*.bbk        text svneol=native#text/xml
-*.cmake      text svneol=native#text/plain
-*.css        text svneol=native#text/css
-*.dtd        text svneol=native#text/xml
-*.htm        text svneol=native#text/html
-*.html       text svneol=native#text/html
-*.ini        text svneol=native#text/plain
-*.log        text svneol=native#text/plain
-*.mak        text svneol=native#text/plain
-*.qbk        text svneol=native#text/plain
-*.rst        text svneol=native#text/plain
-*.sql        text svneol=native#text/x-sql
-*.txt        text svneol=native#text/plain
-*.xhtml      text svneol=native#text/xhtml%2Bxml
-*.xml        text svneol=native#text/xml
-*.xsd        text svneol=native#text/xml
-*.xsl        text svneol=native#text/xml
-*.xslt       text svneol=native#text/xml
-*.xul        text svneol=native#text/xul
-*.yml        text svneol=native#text/plain
-boost-no-inspect text svneol=native#text/plain
-CHANGES      text svneol=native#text/plain
-COPYING      text svneol=native#text/plain
-INSTALL      text svneol=native#text/plain
-Jamfile      text svneol=native#text/plain
-Jamroot      text svneol=native#text/plain
-Jamfile.v2   text svneol=native#text/plain
-Jamrules     text svneol=native#text/plain
-Makefile*    text svneol=native#text/plain
-README       text svneol=native#text/plain
-TODO         text svneol=native#text/plain
-
-# Code formats
-*.c          text svneol=native#text/plain
-*.cpp        text svneol=native#text/plain
-*.h          text svneol=native#text/plain
-*.hpp        text svneol=native#text/plain
-*.ipp        text svneol=native#text/plain
-*.tpp        text svneol=native#text/plain
-*.jam        text svneol=native#text/plain
-*.java       text svneol=native#text/plain
diff --git a/third_party/boostorg/algorithm/.travis.yml b/third_party/boostorg/algorithm/.travis.yml
deleted file mode 100644
index 11961b1..0000000
--- a/third_party/boostorg/algorithm/.travis.yml
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright 2016, 2017, 2018 Peter Dimov
-# Copyright 2018 T. Zachary Laine
-# Distributed under the Boost Software License, Version 1.0.
-# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt)
-
-language: cpp
-
-sudo: false
-
-branches:
-  only:
-    - master
-    - develop
-    - /feature\/.*/
-
-env:
-  matrix:
-    - BOGUS_JOB=true
-
-matrix:
-
-  exclude:
-    - env: BOGUS_JOB=true
-
-  include:
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++ CXXSTD=11
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.7 CXXSTD=11
-      addons:
-        apt:
-          packages:
-            - g++-4.7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.8 CXXSTD=11
-      addons:
-        apt:
-          packages:
-            - g++-4.8
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.9 CXXSTD=11
-      addons:
-        apt:
-          packages:
-            - g++-4.9
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=11,14,1z
-      addons:
-        apt:
-          packages:
-            - g++-5
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=11,14,1z
-      addons:
-        apt:
-          packages:
-            - g++-6
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=11,14,1z CXXSTD_DIALECT=cxxstd-dialect=gnu
-      addons:
-        apt:
-          packages:
-            - g++-6
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=11,14,17
-      addons:
-        apt:
-          packages:
-            - g++-7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++ CXXSTD=11
-
-    - os: linux
-      compiler: clang++-libc++
-      env: TOOLSET=clang COMPILER=clang++-libc++ CXXSTD=11,14,1z
-      addons:
-        apt:
-          packages:
-            - libc++-dev
-
-    - os: osx
-      env: TOOLSET=clang COMPILER=clang++ CXXSTD=11,14,1z
-      osx_image: xcode9.1
-
-    - os: osx
-      env: TOOLSET=clang COMPILER=clang++ CXXSTD=11,14,1z
-      osx_image: xcode9
-
-    - os: osx
-      env: TOOLSET=clang COMPILER=clang++ CXXSTD=11,14,1z
-      osx_image: xcode8.3
-
-    - os: osx
-      env: TOOLSET=clang COMPILER=clang++ CXXSTD=11,14,1z
-      osx_image: xcode8
-
-    - os: osx
-      env: TOOLSET=clang COMPILER=clang++ CXXSTD=11,14,1z
-      osx_image: xcode7.3
-
-    - os: osx
-      env: TOOLSET=clang COMPILER=clang++ CXXSTD=11,14,1z
-      osx_image: xcode6.4
-
-install:
-  - BOOST_BRANCH=develop && [ "$TRAVIS_BRANCH" == "master" ] && BOOST_BRANCH=master || true
-  - cd ..
-  - git clone -b $BOOST_BRANCH --depth 1 https://github.com/boostorg/boost.git boost-root
-  - cd boost-root
-  - git submodule update --init tools/build
-  - git submodule update --init libs/config
-  - git submodule update --init libs/predef
-  - git submodule update --init libs/core
-  - git submodule update --init libs/detail
-  - git submodule update --init libs/range
-  - git submodule update --init libs/assert
-  - git submodule update --init libs/array
-  - git submodule update --init libs/type_traits
-  - git submodule update --init libs/static_assert
-  - git submodule update --init libs/iterator
-  - git submodule update --init libs/preprocessor
-  - git submodule update --init libs/mpl
-  - git submodule update --init libs/smart_ptr
-  - git submodule update --init libs/callable_traits
-  - git submodule update --init libs/type_index
-  - git submodule update --init libs/exception
-  - git submodule update --init libs/throw_exception
-  - git submodule update --init libs/utility
-  - git submodule update --init libs/bind
-  - git submodule update --init libs/ratio
-  - git submodule update --init libs/function
-  - git submodule update --init libs/integer
-  - git submodule update --init libs/numeric
-  - git submodule update --init libs/move
-  - git submodule update --init libs/container_hash
-  - git submodule update --init libs/io
-  - git submodule update --init libs/concept_check
-  - git submodule update --init libs/test
-  - git submodule update --init libs/timer
-  - git submodule update --init libs/chrono
-  - git submodule update --init libs/system
-  - cp -r $TRAVIS_BUILD_DIR/* libs/algorithm
-  - ./bootstrap.sh
-  - ./b2 headers
- 
-script:
-  - |-
-    echo "using $TOOLSET : : $COMPILER ;" > ~/user-config.jam
-  - ./b2 -j3 libs/algorithm/test toolset=$TOOLSET cxxstd=$CXXSTD $CXXSTD_DIALECT
diff --git a/third_party/boostorg/algorithm/BUILD b/third_party/boostorg/algorithm/BUILD
deleted file mode 100644
index 8efc7ca..0000000
--- a/third_party/boostorg/algorithm/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-licenses(["notice"])  # boost
-
-cc_library(
-    name = "algorithm",
-    hdrs = glob(["include/**"]),
-    includes = ["include"],
-    target_compatible_with = ["@platforms//os:linux"],
-    visibility = ["//visibility:public"],
-    deps = [
-        "//third_party/boostorg/assert",
-        "//third_party/boostorg/static_assert",
-        "//third_party/boostorg/type_traits",
-    ],
-)
diff --git a/third_party/boostorg/algorithm/appveyor.yml b/third_party/boostorg/algorithm/appveyor.yml
deleted file mode 100644
index 6cdae88..0000000
--- a/third_party/boostorg/algorithm/appveyor.yml
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2016 Peter Dimov
-# Distributed under the Boost Software License, Version 1.0.
-# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt)
-
-version: 1.0.{build}-{branch}
-
-shallow_clone: true
-
-branches:
-  only:
-    - master
-    - develop
-
-platform:
-  - x64
-
-environment:
-  matrix:
-    - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
-      ARGS: --toolset=msvc-14.1 address-model=64
-    - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
-      ARGS: --toolset=msvc-14.1 address-model=32
-    - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
-      ARGS: --toolset=msvc-14.1 address-model=64 cxxflags=-std:c++latest cxxflags=-permissive-
-    - ARGS: --toolset=msvc-9.0  address-model=32
-    - ARGS: --toolset=msvc-10.0 address-model=32
-    - ARGS: --toolset=msvc-11.0 address-model=32
-    - ARGS: --toolset=msvc-12.0 address-model=32
-    - ARGS: --toolset=msvc-14.0 address-model=32
-    - ARGS: --toolset=msvc-12.0 address-model=64
-    - ARGS: --toolset=msvc-14.0 address-model=64
-    - ARGS: --toolset=msvc-14.0 address-model=64 cxxflags=-std:c++latest
-    - ARGS: --toolset=gcc address-model=64 
-      CXXSTD: 03,11,14,1z
-      PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
-    - ARGS: --toolset=gcc address-model=64
-      CXXSTD: 03,11,14,1z
-      PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
-    - ARGS: --toolset=gcc address-model=32 linkflags=-Wl,-allow-multiple-definition
-      CXXSTD: 03,11,14,1z
-      PATH: C:\MinGW\bin;%PATH%
-    - ARGS: --toolset=gcc address-model=64
-      CXXSTD: 03,11,14,1z
-      PATH: C:\cygwin64\bin;%PATH%
-    - ARGS: --toolset=gcc address-model=32
-      CXXSTD: 03,11,14,1z
-      PATH: C:\cygwin\bin;%PATH%
-
-install:
-  - cd ..
-  - git clone -b %APPVEYOR_REPO_BRANCH% --depth 1 https://github.com/boostorg/boost.git boost-root
-  - cd boost-root
-  - xcopy /s /e /q %APPVEYOR_BUILD_FOLDER% libs\algorithm
-  - git submodule update --init tools/build
-  - git submodule update --init libs/config
-  - git submodule update --init libs/predef
-  - git submodule update --init libs/core
-  - git submodule update --init libs/detail
-  - git submodule update --init libs/range
-  - git submodule update --init libs/assert
-  - git submodule update --init libs/array
-  - git submodule update --init libs/type_traits
-  - git submodule update --init libs/static_assert
-  - git submodule update --init libs/iterator
-  - git submodule update --init libs/preprocessor
-  - git submodule update --init libs/mpl
-  - git submodule update --init libs/smart_ptr
-  - git submodule update --init libs/callable_traits
-  - git submodule update --init libs/type_index
-  - git submodule update --init libs/exception
-  - git submodule update --init libs/throw_exception
-  - git submodule update --init libs/utility
-  - git submodule update --init libs/bind
-  - git submodule update --init libs/ratio
-  - git submodule update --init libs/function
-  - git submodule update --init libs/integer
-  - git submodule update --init libs/numeric
-  - git submodule update --init libs/move
-  - git submodule update --init libs/container_hash
-  - git submodule update --init libs/io
-  - git submodule update --init libs/concept_check
-  - git submodule update --init libs/test
-  - git submodule update --init libs/timer
-  - git submodule update --init libs/chrono
-  - git submodule update --init libs/system
-  - bootstrap
-  - b2 headers
-  
-build: off
-
-test_script:
-  - cd libs\config\test
-  - ..\..\..\b2 -j3 %ARGS% cxxstd=%CXXSTD%
diff --git a/third_party/boostorg/algorithm/doc/Jamfile.v2 b/third_party/boostorg/algorithm/doc/Jamfile.v2
deleted file mode 100644
index e84cfc0..0000000
--- a/third_party/boostorg/algorithm/doc/Jamfile.v2
+++ /dev/null
@@ -1,56 +0,0 @@
-# Boost.Algorithm
-#
-# Copyright (c) 2010-2012 Marshall Clow
-#
-# Distributed under the Boost Software License, Version 1.0.
-# (See accompanying file LICENSE_1_0.txt or copy at
-# http://www.boost.org/LICENSE_1_0.txt)
-
-
-# Quickbook
-# -----------------------------------------------------------------------------
-
-import os ;
-
-using quickbook ;
-using doxygen ;
-using boostbook ;
-
-doxygen autodoc
-    :
-        [ glob ../../../boost/algorithm/*.hpp
-               ../../../boost/algorithm/searching/*.hpp
-               ../../../boost/algorithm/cxx11/*.hpp
-               ../../../boost/algorithm/cxx14/*.hpp
-        ]
-    :
-       <doxygen:param>"PREDEFINED=\"BOOST_ALGORITHM_DOXYGEN=1\""
-       <doxygen:param>WARNINGS=YES # Default NO, but useful to see warnings, especially in a logfile.
-    ;
-
-
-xml algorithm : algorithm.qbk ;
-
-boostbook standalone
-  :
-    algorithm
-  :
-    <dependency>autodoc
-    <xsl:param>boost.root=../../../..
-    <xsl:param>"boost.doxygen.reftitle=Boost.Algorithms C++ Reference"
-    <xsl:param>chapter.autolabel=0
-    <xsl:param>chunk.section.depth=8
-    <xsl:param>toc.section.depth=2
-    <xsl:param>toc.max.depth=2
-    <xsl:param>generate.section.toc.level=1
-  ;
-
-###############################################################################
-alias boostdoc
-    : ../string/doc/string_algo.xml
-    :
-    : <dependency>../string/doc//autodoc
-    : ;
-explicit boostdoc ;
-alias boostrelease : standalone ;
-explicit boostrelease ;
diff --git a/third_party/boostorg/algorithm/doc/algorithm.qbk b/third_party/boostorg/algorithm/doc/algorithm.qbk
deleted file mode 100644
index 3d1230f..0000000
--- a/third_party/boostorg/algorithm/doc/algorithm.qbk
+++ /dev/null
@@ -1,80 +0,0 @@
-[library The Boost Algorithm Library
-    [quickbook 1.5]
-    [id algorithm]
-    [dirname algorithm]
-    [purpose Library of useful algorithms]
-    [category algorithms]
-    [authors [Clow, Marshall]]
-    [copyright 2010-2012 Marshall Clow]
-    [source-mode c++]
-    [license
-		Distributed under the Boost Software License, Version 1.0.
-		(See accompanying file LICENSE_1_0.txt or copy at
-		[@http://www.boost.org/LICENSE_1_0.txt])
-    ]
-]
-
-[section Description and Rationale]
-
-Boost.Algorithm is a collection of general purpose algorithms. While Boost contains many libraries of data structures, there is no single library for general purpose algorithms. Even though the algorithms are generally useful, many tend to be thought of as "too small" for Boost.
-
-An implementation of Boyer-Moore searching, for example, might take a developer a week or so to implement, including test cases and documentation. However, scheduling a review to include that code into Boost might take several months, and run into resistance because "it is too small". Nevertheless, a library of tested, reviewed, documented algorithms can make the developer's life much easier, and that is the purpose of this library.
-
-[heading Future plans]
-
-I will be soliciting submissions from other developers, as well as looking through the literature for existing algorithms to include. The Adobe Source Library, for example, contains many useful algorithms that already have documentation and test cases. Knuth's _The Art of Computer Programming_ is chock-full of algorithm descriptions, too. 
-
-My goal is to run regular algorithm reviews, similar to the Boost library review process, but with smaller chunks of code. 
-
-[heading Dependencies]
-
-Boost.Algorithm uses Boost.Range, Boost.Assert, Boost.Array, Boost.TypeTraits, and Boost.StaticAssert.
-
-
-[heading Acknowledgements]
-
-Thanks to all the people who have reviewed this library and made suggestions for improvements. Steven Watanabe and Sean Parent, in particular, have provided a great deal of help.
-
-[endsect]
-
-[/ include toc.qbk]
-
-
-[section:Searching Searching Algorithms]
-[include boyer_moore.qbk]
-[include boyer_moore_horspool.qbk]
-[include knuth_morris_pratt.qbk]
-[endsect]
-
-[section:CXX11 C++11 Algorithms]
-[include all_of.qbk]
-[include any_of.qbk]
-[include none_of.qbk]
-[include one_of.qbk]
-[include ordered-hpp.qbk]
-[include is_partitioned.qbk]
-[include is_permutation.qbk]
-[include partition_point.qbk]
-[endsect]
-
-[section:CXX14 C++14 Algorithms]
-[include equal.qbk]
-[include mismatch.qbk]
-[endsect]
-
-[section:Misc Other Algorithms]
-[include clamp-hpp.qbk]
-[include find_not.qbk]
-[include find_backward.qbk]
-[include gather.qbk]
-[include hex.qbk]
-[include is_palindrome.qbk]
-[include is_partitioned_until.qbk]
-[include apply_permutation.qbk]
-[endsect]
-
-
-
-[xinclude autodoc.xml]
-
-
diff --git a/third_party/boostorg/algorithm/doc/all_of.qbk b/third_party/boostorg/algorithm/doc/all_of.qbk
deleted file mode 100644
index 5b0b8af..0000000
--- a/third_party/boostorg/algorithm/doc/all_of.qbk
+++ /dev/null
@@ -1,89 +0,0 @@
-[/ File all_of.qbk]
-
-[section:all_of all_of]
-
-[/license
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'boost/algorithm/cxx11/all_of.hpp' contains four variants of a single algorithm, `all_of`. The algorithm tests all the elements of a sequence and returns true if they all share a  property.
-
-The routine `all_of` takes a sequence and a predicate. It will return true if the predicate returns true when applied to every element in the sequence. 
-
-The routine `all_of_equal` takes a sequence and a value. It will return true if every element in the sequence compares equal to the passed in value.
-
-Both routines come in two forms; the first one takes two iterators to define the range. The second form takes a single range parameter, and uses Boost.Range to traverse it.
-
-
-[heading interface]
-
-The function `all_of` returns true if the predicate returns true for every item in the sequence.  There are two versions; one takes two iterators, and the other takes a range.
-
-``
-namespace boost { namespace algorithm {
-template<typename InputIterator, typename Predicate>
-	bool all_of ( InputIterator first, InputIterator last, Predicate p );
-template<typename Range, typename Predicate> 
-	bool all_of ( const Range &r, Predicate p );
-}}
-``
-
-The function `all_of_equal` is similar to `all_of`, but instead of taking a predicate to test the elements of the sequence, it takes a value to compare against.
-
-``
-namespace boost { namespace algorithm {
-template<typename InputIterator, typename V>
-	bool all_of_equal ( InputIterator first, InputIterator last, V const &val );
-template<typename Range, typename V> 
-	bool all_of_equal ( const Range &r, V const &val );
-}}
-``
-
-[heading Examples]
-
-Given the container `c` containing `{ 0, 1, 2, 3, 14, 15 }`, then
-``
-bool isOdd ( int i ) { return i % 2 == 1; }
-bool lessThan10 ( int i ) { return i < 10; }
-
-using boost::algorithm;
-all_of ( c, isOdd ) --> false
-all_of ( c.begin (), c.end (), lessThan10 ) --> false
-all_of ( c.begin (), c.begin () + 3, lessThan10 ) --> true
-all_of ( c.end (), c.end (), isOdd ) --> true  // empty range
-all_of_equal ( c, 3 ) --> false
-all_of_equal ( c.begin () + 3, c.begin () + 4, 3 ) --> true
-all_of_equal ( c.begin (), c.begin (), 99 ) --> true  // empty range
-``
-
-[heading Iterator Requirements]
-
-`all_of` and `all_of_equal` work on all iterators except output iterators.
-
-[heading Complexity]
-
-All of the variants of `all_of` and `all_of_equal` run in ['O(N)] (linear) time; that is, they compare against each element in the list once. If any of the comparisons fail, the algorithm will terminate immediately, without examining the remaining members of the sequence.
-
-[heading Exception Safety]
-
-All of the variants of `all_of` and `all_of_equal` take their parameters by value or const reference, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* The routine `all_of` is also available as part of the C++11 standard.
-
-* `all_of` and `all_of_equal` both return true for empty ranges, no matter what is passed to test against. When there are no items in the sequence to test, they all satisfy the condition to be tested against.
-
-* The second parameter to `all_of_value` is a template parameter, rather than deduced from the first parameter (`std::iterator_traits<InputIterator>::value_type`) because that allows more flexibility for callers, and takes advantage of built-in comparisons for the type that is pointed to by the iterator.  The function is defined to return true if, for all elements in the sequence, the expression `*iter == val` evaluates to true (where `iter` is an iterator to each element in the sequence)
-
-[endsect]
-
-[/ File all_of.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/any_of.qbk b/third_party/boostorg/algorithm/doc/any_of.qbk
deleted file mode 100644
index 4a50861..0000000
--- a/third_party/boostorg/algorithm/doc/any_of.qbk
+++ /dev/null
@@ -1,89 +0,0 @@
-[/ File any_of.qbk]
-
-[section:any_of any_of]
-
-[/license
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'boost/algorithm/cxx11/any_of.hpp' contains four variants of a single algorithm, `any_of`. The algorithm tests the elements of a sequence and returns true if any of the elements has a  particular property.
-
-The routine `any_of` takes a sequence and a predicate. It will return true if the predicate returns true for any element in the sequence. 
-
-The routine `any_of_equal` takes a sequence and a value. It will return true if any element in the sequence compares equal to the passed in value.
-
-Both routines come in two forms; the first one takes two iterators to define the range. The second form takes a single range parameter, and uses Boost.Range to traverse it.
-
-
-[heading interface]
-
-The function `any_of` returns true if the predicate returns true any item in the sequence.  There are two versions; one takes two iterators, and the other takes a range.
-
-``
-namespace boost { namespace algorithm {
-template<typename InputIterator, typename Predicate>
-	bool any_of ( InputIterator first, InputIterator last, Predicate p );
-template<typename Range, typename Predicate> 
-	bool any_of ( const Range &r, Predicate p );
-}}
-``
-
-The function `any_of_equal` is similar to `any_of`, but instead of taking a predicate to test the elements of the sequence, it takes a value to compare against.
-
-``
-namespace boost { namespace algorithm {
-template<typename InputIterator, typename V>
-	bool any_of_equal ( InputIterator first, InputIterator last, V const &val );
-template<typename Range, typename V> 
-	bool any_of_equal ( const Range &r, V const &val );
-}}
-``
-
-[heading Examples]
-
-Given the container `c` containing `{ 0, 1, 2, 3, 14, 15 }`, then
-``
-bool isOdd ( int i ) { return i % 2 == 1; }
-bool lessThan10 ( int i ) { return i < 10; }
-
-using boost::algorithm;
-any_of ( c, isOdd ) --> true
-any_of ( c.begin (), c.end (), lessThan10 ) --> true
-any_of ( c.begin () + 4, c.end (), lessThan10 ) --> false
-any_of ( c.end (), c.end (), isOdd ) --> false  // empty range
-any_of_equal ( c, 3 ) --> true
-any_of_equal ( c.begin (), c.begin () + 3, 3 ) --> false
-any_of_equal ( c.begin (), c.begin (), 99 ) --> false  // empty range
-``
-
-[heading Iterator Requirements]
-
-`any_of` and `any_of_equal` work on all iterators except output iterators.
-
-[heading Complexity]
-
-All of the variants of `any_of` and `any_of_equal` run in ['O(N)] (linear) time; that is, they compare against each element in the list once. If any of the comparisons succeed, the algorithm will terminate immediately, without examining the remaining members of the sequence.
-
-[heading Exception Safety]
-
-All of the variants of `any_of` and `any_of_equal` take their parameters by value or const reference, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* The routine `any_of` is also available as part of the C++11 standard.
-
-* `any_of` and `any_of_equal` both return false for empty ranges, no matter what is passed to test against. 
-
-* The second parameter to `any_of_value` is a template parameter, rather than deduced from the first parameter (`std::iterator_traits<InputIterator>::value_type`) because that allows more flexibility for callers, and takes advantage of built-in comparisons for the type that is pointed to by the iterator.  The function is defined to return true if, for any element in the sequence, the expression `*iter == val` evaluates to true (where `iter` is an iterator to each element in the sequence)
-
-[endsect]
-
-[/ File any_of.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/apply_permutation.qbk b/third_party/boostorg/algorithm/doc/apply_permutation.qbk
deleted file mode 100644
index 7f11457..0000000
--- a/third_party/boostorg/algorithm/doc/apply_permutation.qbk
+++ /dev/null
@@ -1,96 +0,0 @@
-[/ File apply_permutation.qbk]
-
-[section:apply_permutation apply_permutation]
-
-[/license
-Copyright (c) 2017 Alexander Zaitsev
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'apply_permutation.hpp' contains two algorithms, apply_permutation and apply_reverse_permutation. Also there are range-based versions. 
-The algorithms transform the item sequence according to index sequence order.
-
-The routine `apply_permutation` takes a item sequence and a order sequence. It reshuffles item sequence according to order sequence. Every value in order sequence means where the item comes from. Order sequence needs to be exactly a permutation of the sequence [0, 1, ... , N], where N is the biggest index in the item sequence (zero-indexed).
-The routine `apply_reverse_permutation` takes a item sequence and a order sequence. It will reshuffle item sequence according to order sequence. Every value in order sequence means where the item goes to. Order sequence needs to be exactly a permutation of the sequence [0, 1, ... , N], where N is the biggest index in the item sequence (zero-indexed).
-
-Implementations are based on these articles:
-https://blogs.msdn.microsoft.com/oldnewthing/20170102-00/?p=95095
-https://blogs.msdn.microsoft.com/oldnewthing/20170103-00/?p=95105
-https://blogs.msdn.microsoft.com/oldnewthing/20170104-00/?p=95115
-https://blogs.msdn.microsoft.com/oldnewthing/20170109-00/?p=95145
-https://blogs.msdn.microsoft.com/oldnewthing/20170110-00/?p=95155
-https://blogs.msdn.microsoft.com/oldnewthing/20170111-00/?p=95165
-
-The routines come in 2 forms; the first one takes two iterators to define the item range and one iterator to define the beginning of index range. The second form takes range to define the item sequence and range to define index sequence.
-
-
-[heading interface]
-
-There are two versions of algorithms:
-1) takes four iterators.
-2) takes two ranges.
-``
-template<typename RandomAccessIterator1, typename RandomAccessIterator2>
-void apply_permutation(RandomAccessIterator1 item_begin, RandomAccessIterator1 item_end,
-                  RandomAccessIterator2 ind_begin, RandomAccessIterator2 ind_end);
-template<typename Range1, typename Range2>
-void apply_permutation(Range1& item_range, Range2& ind_range);
-template<typename RandomAccessIterator1, typename RandomAccessIterator2>
-void apply_reverse_permutation(RandomAccessIterator1 item_begin, RandomAccessIterator1 item_end,
-                  RandomAccessIterator2 ind_begin, RandomAccessIterator2 ind_end);
-template<typename Range1, typename Range2>
-void apply_reverse_permutation(Range1& item_range, Range2& ind_range);
-``
-
-
-[heading Examples]
-
-Given the containers:
-std::vector<int> emp_vec, emp_order,
-std::vector<int> one{1}, one_order{0},
-std::vector<int> two{1,2}, two_order{1,0},
-std::vector<int> vec{1, 2, 3, 4, 5},
-std::vector<int> order{4, 2, 3, 1, 0}, then
-``
-
-apply_permutation(emp_vec, emp_order))  --> no changes
-apply_reverse_permutation(emp_vec, emp_order))  --> no changes
-apply_permutation(one, one_order)  --> no changes
-apply_reverse_permutation(one, one_order)  --> no changes
-apply_permutation(two, two_order)  --> two:{2,1}
-apply_reverse_permutation(two, two_order)  --> two:{2,1}
-apply_permutation(vec, order)  --> vec:{5, 3, 4, 2, 1}
-apply_reverse_permutation(vec, order)  --> vec:{5, 4, 2, 3, 1}
-``
-
-[heading Iterator Requirements]
-
-`apply_permutation` and 'apply_reverse_permutation' work only on RandomAccess iterators. RandomAccess iterators required both for item and index sequences.
-
-[heading Complexity]
-
-All of the variants of `apply_permutation` and `apply_reverse_permutation` run in ['O(N)] (linear) time.
-More
-
-[heading Exception Safety]
-
-All of the variants of `apply_permutation` and `apply_reverse_permutation` take their parameters by iterators or reference, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-* If ItemSequence and IndexSequence are not equal, behavior is undefined.
-
-* `apply_permutation` and `apply_reverse_permutation` work also on empty sequences.
-
-* Order sequence must be zero-indexed.
-
-* Order sequence gets permuted.
-
-[endsect]
-
-[/ File apply_permutation.qbk
-Copyright 2017 Alexander Zaitsev
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
diff --git a/third_party/boostorg/algorithm/doc/boyer_moore.qbk b/third_party/boostorg/algorithm/doc/boyer_moore.qbk
deleted file mode 100644
index 1651133..0000000
--- a/third_party/boostorg/algorithm/doc/boyer_moore.qbk
+++ /dev/null
@@ -1,109 +0,0 @@
-[/ QuickBook Document version 1.5 ]
-
-[section:BoyerMoore Boyer-Moore Search]
-
-[/license
-
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at
-http://www.boost.org/LICENSE_1_0.txt)
-]
-
-
-[heading Overview]
-
-The header file 'boyer_moore.hpp' contains an implementation of the Boyer-Moore algorithm for searching sequences of values. 
-
-The Boyer–Moore string search algorithm is a particularly efficient string searching algorithm, and it has been the standard benchmark for the practical string search literature. The Boyer-Moore algorithm was invented by Bob Boyer and J. Strother Moore, and published in the October 1977 issue of the Communications of the ACM , and a copy of that article is available at [@http://www.cs.utexas.edu/~moore/publications/fstrpos.pdf].
-
-The Boyer-Moore algorithm uses two precomputed tables to give better performance than a naive search. These tables depend on the pattern being searched for, and give the Boyer-Moore algorithm larger a memory footprint and startup costs than a simpler algorithm, but these costs are recovered quickly during the searching process, especially if the pattern is longer than a few elements.
-
-However, the Boyer-Moore algorithm cannot be used with comparison predicates like `std::search`.
-
-Nomenclature: I refer to the sequence being searched for as the "pattern", and the sequence being searched in as the "corpus".
-
-[heading Interface]
-
-For flexibility, the Boyer-Moore algorithm has two interfaces; an object-based interface and a procedural one. The object-based interface builds the tables in the constructor, and uses operator () to perform the search. The procedural interface builds the table and does the search all in one step. If you are going to be searching for the same pattern in multiple corpora, then you should use the object interface, and only build the tables once.
-
-Here is the object interface:
-``
-template <typename patIter>
-class boyer_moore {
-public:
-    boyer_moore ( patIter first, patIter last );
-    ~boyer_moore ();
-    
-    template <typename corpusIter>
-    pair<corpusIter, corpusIter> operator () ( corpusIter corpus_first, corpusIter corpus_last );
-    };
-``
-
-and here is the corresponding procedural interface:
-
-``
-template <typename patIter, typename corpusIter>
-pair<corpusIter, corpusIter> boyer_moore_search ( 
-        corpusIter corpus_first, corpusIter corpus_last, 
-        patIter pat_first, patIter pat_last );
-``
-
-Each of the functions is passed two pairs of iterators. The first two define the corpus and the second two define the pattern. Note that the two pairs need not be of the same type, but they do need to "point" at the same type. In other words, `patIter::value_type` and `curpusIter::value_type` need to be the same type.
-
-The return value of the function is a pair of iterators pointing to the position of the pattern in the corpus. If the pattern is empty, it returns at empty range at the start of the corpus (`corpus_first`, `corpus_first`). If the pattern is not found, it returns at empty range at the end of the corpus (`corpus_last`, `corpus_last`).
-
-[heading Compatibility Note]
-
-Earlier versions of this searcher returned only a single iterator.  As explained in [@https://cplusplusmusings.wordpress.com/2016/02/01/sometimes-you-get-things-wrong/], this was a suboptimal interface choice, and has been changed, starting in the 1.62.0 release.  Old code that is expecting a single iterator return value can be updated by replacing the return value of the searcher's `operator ()` with the `.first` field of the pair.
-
-Instead of:
-``
-iterator foo = searcher(a, b);
-``
-
-you now write:
-``
-iterator foo = searcher(a, b).first;
-``
-
-[heading Performance]
-
-The execution time of the Boyer-Moore algorithm, while still linear in the size of the string being searched, can have a significantly lower constant factor than many other search algorithms: it doesn't need to check every character of the string to be searched, but rather skips over some of them. Generally the algorithm gets faster as the pattern being searched for becomes longer. Its efficiency derives from the fact that with each unsuccessful attempt to find a match between the search string and the text it is searching, it uses the information gained from that attempt to rule out as many positions of the text as possible where the string cannot match.
-
-[heading Memory Use]
-
-The algorithm allocates two internal tables. The first one is proportional to the length of the pattern; the second one has one entry for each member of the "alphabet" in the pattern. For (8-bit) character types, this table contains 256 entries.
-
-[heading Complexity]
-
-The worst-case performance to find a pattern in the corpus is ['O(N)] (linear) time; that is, proportional to the length of the corpus being searched. In general, the search is sub-linear; not every entry in the corpus need be checked.
-
-[heading Exception Safety]
-
-Both the object-oriented and procedural versions of the Boyer-Moore algorithm take their parameters by value and do not use any information other than what is passed in. Therefore, both interfaces provide the strong exception guarantee.
-
-[heading Notes]
-
-* When using the object-based interface, the pattern must remain unchanged for during the searches; i.e, from the time the object is constructed until the final call to operator () returns.
-
-* The Boyer-Moore algorithm requires random-access iterators for both the pattern and the corpus.
-
-[heading Customization points] 
-
-The Boyer-Moore object takes a traits template parameter which enables the caller to customize how one of the precomputed tables is stored. This table, called the skip table, contains (logically) one entry for every possible value that the pattern can contain. When searching 8-bit character data, this table contains 256 elements. The traits class defines the table to be used. 
-
-The default traits class uses a `boost::array` for small 'alphabets' and a `tr1::unordered_map` for larger ones.  The array-based skip table gives excellent performance, but could be prohibitively large when the 'alphabet' of elements to be searched grows. The unordered_map based version only grows as the number of unique elements in the pattern, but makes many more heap allocations, and gives slower lookup performance. 
-
-To use a different skip table, you should define your own skip table object and your own traits class, and use them to instantiate the Boyer-Moore object. The interface to these objects is described TBD.
-
-
-[endsect]
-
-[/ File boyer_moore.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/boyer_moore_horspool.qbk b/third_party/boostorg/algorithm/doc/boyer_moore_horspool.qbk
deleted file mode 100644
index 5b8491a..0000000
--- a/third_party/boostorg/algorithm/doc/boyer_moore_horspool.qbk
+++ /dev/null
@@ -1,107 +0,0 @@
-[/ QuickBook Document version 1.5 ]
-
-[section:BoyerMooreHorspool Boyer-Moore-Horspool Search]
-
-[/license
-
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at
-http://www.boost.org/LICENSE_1_0.txt)
-]
-
-
-[heading Overview]
-
-The header file 'boyer_moore_horspool.hpp' contains an implementation of the Boyer-Moore-Horspool algorithm for searching sequences of values. 
-
-The Boyer-Moore-Horspool search algorithm was published by Nigel Horspool in 1980. It is a refinement of the Boyer-Moore algorithm that trades space for time. It uses less space for internal tables than Boyer-Moore, and has poorer worst-case performance.
-
-The Boyer-Moore-Horspool algorithm cannot be used with comparison predicates like `std::search`.
-
-[heading Interface]
-
-Nomenclature: I refer to the sequence being searched for as the "pattern", and the sequence being searched in as the "corpus".
-
-For flexibility, the Boyer-Moore-Horspool algorithm has two interfaces; an object-based interface and a procedural one. The object-based interface builds the tables in the constructor, and uses operator () to perform the search. The procedural interface builds the table and does the search all in one step. If you are going to be searching for the same pattern in multiple corpora, then you should use the object interface, and only build the tables once.
-
-Here is the object interface:
-``
-template <typename patIter>
-class boyer_moore_horspool {
-public:
-    boyer_moore_horspool ( patIter first, patIter last );
-    ~boyer_moore_horspool ();
-    
-    template <typename corpusIter>
-    pair<corpusIter, corpusIter> operator () ( corpusIter corpus_first, corpusIter corpus_last );
-    };
-``
-
-and here is the corresponding procedural interface:
-
-``
-template <typename patIter, typename corpusIter>
-pair<corpusIter, corpusIter> boyer_moore_horspool_search ( 
-        corpusIter corpus_first, corpusIter corpus_last, 
-        patIter pat_first, patIter pat_last );
-``
-
-Each of the functions is passed two pairs of iterators. The first two define the corpus and the second two define the pattern. Note that the two pairs need not be of the same type, but they do need to "point" at the same type. In other words, `patIter::value_type` and `curpusIter::value_type` need to be the same type.
-
-The return value of the function is a pair of iterators pointing to the position of the pattern in the corpus. If the pattern is empty, it returns at empty range at the start of the corpus (`corpus_first`, `corpus_first`). If the pattern is not found, it returns at empty range at the end of the corpus (`corpus_last`, `corpus_last`).
-
-[heading Compatibility Note]
-
-Earlier versions of this searcher returned only a single iterator.  As explained in [@https://cplusplusmusings.wordpress.com/2016/02/01/sometimes-you-get-things-wrong/], this was a suboptimal interface choice, and has been changed, starting in the 1.62.0 release.  Old code that is expecting a single iterator return value can be updated by replacing the return value of the searcher's `operator ()` with the `.first` field of the pair.
-
-Instead of:
-``
-iterator foo = searcher(a, b);
-``
-
-you now write:
-``
-iterator foo = searcher(a, b).first;
-``
-
-[heading Performance]
-
-The execution time of the Boyer-Moore-Horspool algorithm is linear in the size of the string being searched; it can have a significantly lower constant factor than many other search algorithms: it doesn't need to check every character of the string to be searched, but rather skips over some of them. Generally the algorithm gets faster as the pattern being searched for becomes longer. Its efficiency derives from the fact that with each unsuccessful attempt to find a match between the search string and the text it is searching, it uses the information gained from that attempt to rule out as many positions of the text as possible where the string cannot match.
-
-[heading Memory Use]
-
-The algorithm an internal table that has one entry for each member of the "alphabet" in the pattern. For (8-bit) character types, this table contains 256 entries.
-
-[heading Complexity]
-
-The worst-case performance is ['O(m x n)], where ['m] is the length of the pattern and ['n] is the length of the corpus. The average time is ['O(n)]. The best case performance is sub-linear, and is, in fact, identical to Boyer-Moore, but the initialization is quicker and the internal loop is simpler than Boyer-Moore.
-
-[heading Exception Safety]
-
-Both the object-oriented and procedural versions of the Boyer-Moore-Horspool algorithm take their parameters by value and do not use any information other than what is passed in. Therefore, both interfaces provide the strong exception guarantee.
-
-[heading Notes]
-
-* When using the object-based interface, the pattern must remain unchanged for during the searches; i.e, from the time the object is constructed until the final call to operator () returns.
-
-* The Boyer-Moore-Horspool algorithm requires random-access iterators for both the pattern and the corpus.
-
-[heading Customization points] 
-
-The Boyer-Moore-Horspool object takes a traits template parameter which enables the caller to customize how the precomputed table is stored. This table, called the skip table, contains (logically) one entry for every possible value that the pattern can contain. When searching 8-bit character data, this table contains 256 elements. The traits class defines the table to be used. 
-
-The default traits class uses a `boost::array` for small 'alphabets' and a `tr1::unordered_map` for larger ones.  The array-based skip table gives excellent performance, but could be prohibitively large when the 'alphabet' of elements to be searched grows. The unordered_map based version only grows as the number of unique elements in the pattern, but makes many more heap allocations, and gives slower lookup performance. 
-
-To use a different skip table, you should define your own skip table object and your own traits class, and use them to instantiate the Boyer-Moore-Horspool object. The interface to these objects is described TBD.
-
-
-[endsect]
-
-[/ File boyer_moore_horspool.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/clamp-hpp.qbk b/third_party/boostorg/algorithm/doc/clamp-hpp.qbk
deleted file mode 100644
index 7fb9264..0000000
--- a/third_party/boostorg/algorithm/doc/clamp-hpp.qbk
+++ /dev/null
@@ -1,73 +0,0 @@
-[/ QuickBook Document version 1.5 ]
-[section:clamp clamp]
-
-[/license
-
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at
-http://www.boost.org/LICENSE_1_0.txt)
-
-]
-
-
-The header file clamp.hpp contains two functions for "clamping" a value between a pair of boundary values.
-
-[heading clamp]
-
-The function `clamp (v, lo, hi)` returns:
-
-* lo if v < lo
-* hi if hi < v
-* otherwise, v
-
-Note: using `clamp` with floating point numbers may give unexpected results if one of the values is `NaN`.
-
-There is also a version that allows the caller to specify a comparison predicate to use instead of `operator <`.
-
-``
-template<typename T> 
-const T& clamp ( const T& val, const T& lo, const T& hi );
-
-template<typename T, typename Pred> 
-const T& clamp ( const T& val, const T& lo, const T& hi, Pred p );
-``
-
-The following code: ``
-   int foo = 23;
-   foo = clamp ( foo, 1, 10 );
-``
-will leave `foo` with a value of 10
-
-Complexity:
-	`clamp` will make either one or two calls to the comparison predicate before returning one of the three parameters.
-
-[heading clamp_range]
-There are also four range-based versions of clamp, that apply clamping to a series of values. You could write them yourself with std::transform and bind, like this: `std::transform ( first, last, out, bind ( clamp ( _1, lo, hi )))`, but they are provided here for your convenience.
-
-``
-template<typename InputIterator, typename OutputIterator> 
-OutputIterator clamp_range ( InputIterator first, InputIterator last, OutputIterator out,
-    typename std::iterator_traits<InputIterator>::value_type lo, 
-    typename std::iterator_traits<InputIterator>::value_type hi );
-
-template<typename Range, typename OutputIterator> 
-OutputIterator clamp_range ( const Range &r, OutputIterator out,
-	typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type lo, 
-	typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type hi );
-
-template<typename InputIterator, typename OutputIterator, typename Pred> 
-OutputIterator clamp_range ( InputIterator first, InputIterator last, OutputIterator out,
-    typename std::iterator_traits<InputIterator>::value_type lo, 
-    typename std::iterator_traits<InputIterator>::value_type hi, Pred p );
-
-template<typename Range, typename OutputIterator, typename Pred> 
-OutputIterator clamp_range ( const Range &r, OutputIterator out,
-	typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type lo, 
-	typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type hi,
-	Pred p );
-``
-
-
-[endsect]
diff --git a/third_party/boostorg/algorithm/doc/equal.qbk b/third_party/boostorg/algorithm/doc/equal.qbk
deleted file mode 100644
index 0ba221c..0000000
--- a/third_party/boostorg/algorithm/doc/equal.qbk
+++ /dev/null
@@ -1,80 +0,0 @@
-[/ File equal.qbk]
-
-[section:equal equal ]
-
-[/license
-Copyright (c) 2013 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'equal.hpp' contains two variants of a the stl algorithm `equal`. The algorithm tests to see if two sequences contain equal values;  
-
-Before (the proposed) C++14 the algorithm `std::equal` took three iterators and an optional comparison predicate. The first two iterators `[first1, last1)` defined a sequence, and the second one `first2` defined the start of the second sequence. The second sequence was assumed to be the same length as the first. 
-
-In C++14, two new variants were introduced, taking four iterators and an optional comparison predicate. The four iterators define two sequences `[first1, last1)` and `[first2, last2)` explicitly, rather than defining the second one implicitly. This leads to correct answers in more cases (and avoid undefined behavior in others).
-
-Consider the two sequences:
-```
-	auto seq1 = { 0, 1, 2 };
-	auto seq2 = { 0, 1, 2, 3, 4 };
-	
-	std::equal ( seq1.begin (), seq1.end (), seq2.begin ()); // true
-	std::equal ( seq2.begin (), seq2.end (), seq1.begin ()); // Undefined behavior
-	std::equal ( seq1.begin (), seq1.end (), seq2.begin (), seq2.end ()); // false
-```
-
-You can argue that `true` is the correct answer in the first case, even though the sequences are not the same. The first N entries in `seq2` are the same as the entries in `seq1` - but that's not all that's in `seq2`. But in the second case, the algorithm will read past the end of `seq1`, resulting in undefined behavior (large earthquake, incorrect results, pregnant cat, etc).
-
-However, if the two sequences are specified completely, it's clear that they are not equal.
-
-[heading interface]
-
-The function `equal` returns true if the two sequences compare equal; i.e, if each element in the sequence compares equal to the corresponding element in the other sequence. One version uses `std::equal_to` to do the comparison; the other lets the caller pass predicate to do the comparisons. 
-
-``
-template <class InputIterator1, class InputIterator2>
-bool equal ( InputIterator1 first1, InputIterator1 last1, 
-             InputIterator2 first2, InputIterator2 last2 );
-             
-template <class InputIterator1, class InputIterator2, class BinaryPredicate>
-bool equal ( InputIterator1 first1, InputIterator1 last1, 
-             InputIterator2 first2, InputIterator2 last2, BinaryPredicate pred );
-``
-
-[heading Examples]
-
-Given the container `c1` containing `{ 0, 1, 2, 3, 14, 15 }`, and `c2` containing `{ 1, 2, 3 }`,  then
-``
-equal ( c1.begin (),     c1.end (),       c2.begin (), c2.end ()) --> false
-equal ( c1.begin () + 1, c1.begin () + 3, c2.begin (), c2.end ()) --> true
-equal ( c1.end (),       c1.end (),       c2.end (),   c2.end ()) --> true  // empty sequences are alway equal to each other
-``
-
-[heading Iterator Requirements]
-
-`equal` works on all iterators except output iterators.
-
-[heading Complexity]
-
-Both of the variants of `equal` run in ['O(N)] (linear) time; that is, they compare against each element in the list once. If the sequence is found to be not equal at any point, the routine will terminate immediately, without examining the rest of the elements.
-
-[heading Exception Safety]
-
-Both of the variants of `equal` take their parameters by value and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* The four iterator version of the routine `equal` is part of the C++14 standard. When C++14 standard library implementations become available, the implementation from the standard library should be used.
-
-* `equal` returns true for two empty ranges, no matter what predicate is passed to test against. 
-
-[endsect]
-
-[/ File equal.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/find_backward.qbk b/third_party/boostorg/algorithm/doc/find_backward.qbk
deleted file mode 100644
index 838dbc9..0000000
--- a/third_party/boostorg/algorithm/doc/find_backward.qbk
+++ /dev/null
@@ -1,116 +0,0 @@
-[/ File find_backward.qbk]
-
-[section:find_backward find_backward ]
-
-[/license
-Copyright (c) 2018 T. Zachary Laine
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'find_backward.hpp' contains variants of the stl algorithm
-`find`. These variants are like `find`, except that the evaluate the elements
-of the given sequence in reverse order.
-
-Consider how finding the last element that is equal to `x` in a range is
-typically done:
-
-    // Assume a valid range if elements delimited by [first, last).
-    while (last-- != first) {
-        if (*last == x) {
-            // Use last here...
-        }
-    }
-
-Raw loops are icky though.  Perhaps we should do a bit of extra work to allow
-the use of `std::find()`:
-
-    auto rfirst = std::make_reverse_iterator(last);
-    auto rlast = std::make_reverse_iterator(first);
-    auto it = std::find(rfirst, rlast);
-    // Use it here...
-
-That seems nicer in that there is no raw loop, but it has two major drawbacks.
-First, it requires an unpleasant amount of typing.  Second, it is less
-efficient than forward-iterator `find` , since `std::reverse_iterator` calls
-its base-iterator's `operator--()` in most of its member functions before
-doing the work that the member function requires.
-
-[heading interface]
-
-    template<typename BidiIter, typename T>
-    BidiIter find_backward(BidiIter first, BidiIter last, const T & x);
-
-    template<typename Range, typename T>
-    boost::range_iterator<Range> find_backward(Range & range, const T & x);
-
-These overloads of `find_backward` return an iterator to the last element that
-is equal to `x` in `[first, last)` or `r`, respectively.
-
-    template<typename BidiIter, typename T>
-    BidiIter find_not_backward(BidiIter first, BidiIter last, const T & x);
-
-    template<typename Range, typename T>
-    boost::range_iterator<Range> find_not_backward(Range & range, const T & x);
-
-These overloads of `find_not_backward` return an iterator to the last element
-that is not equal to `x` in `[first, last)` or `r`, respectively.
-
-    template<typename BidiIter, typename Pred>
-    BidiIter find_if_backward(BidiIter first, BidiIter last, Pred p);
-
-    template<typename Range, typename Pred>
-    boost::range_iterator<Range> find_if_backward(Range & range, Pred p);
-
-These overloads of `find_if_backward` return an iterator to the last element
-for which `pred` returns `true` in `[first, last)` or `r`, respectively.
-
-    template<typename BidiIter, typename Pred>
-    BidiIter find_if_not_backward(BidiIter first, BidiIter last, Pred p);
-
-    template<typename Range, typename Pred>
-    boost::range_iterator<Range> find_if_not_backward(Range & range, Pred p);
-
-These overloads of `find_if_not_backward` return an iterator to the last
-element for which `pred` returns `false` in `[first, last)` or `r`,
-respectively.
-
-[heading Examples]
-
-Given the container `c1` containing `{ 2, 1, 2 }`, then
-
-    find_backward        ( c1.begin(), c1.end(), 2                          ) --> --c1.end()
-    find_backward        ( c1.begin(), c1.end(), 3                          ) --> c1.end()
-    find_if_backward     ( c1.begin(), c1.end(), [](int i) {return i == 2;} ) --> --c1.end()
-    find_if_backward     ( c1.begin(), c1.end(), [](int i) {return i == 3;} ) --> c1.end()
-    find_not_backward    ( c1.begin(), c1.end(), 2                          ) --> std::prev(c1.end(), 2)
-    find_not_backward    ( c1.begin(), c1.end(), 1                          ) --> c1.end()
-    find_if_not_backward ( c1.begin(), c1.end(), [](int i) {return i == 2;} ) --> std::prev(c1.end(), 2)
-    find_if_not_backward ( c1.begin(), c1.end(), [](int i) {return i == 1;} ) --> c1.end()
-
-[heading Iterator Requirements]
-
-All variants work on bidirectional iterators.
-
-[heading Complexity]
-
-Linear.
-
-[heading Exception Safety]
-
-All of the variants take their parameters by value and do not depend upon any
-global state. Therefore, all the routines in this file provide the strong
-exception guarantee.
-
-[heading Notes]
-
-All variants are `constexpr` in C++14 or later.
-
-[endsect]
-
-[/ File equal.qbk
-Copyright 2018 T. Zachary Laine
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
diff --git a/third_party/boostorg/algorithm/doc/find_not.qbk b/third_party/boostorg/algorithm/doc/find_not.qbk
deleted file mode 100644
index 6df0482..0000000
--- a/third_party/boostorg/algorithm/doc/find_not.qbk
+++ /dev/null
@@ -1,83 +0,0 @@
-[/ File find_not.qbk]
-
-[section:find_not find_not ]
-
-[/license
-Copyright (c) 2018 T. Zachary Laine
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'find_not.hpp' contains a variants of a the stl algorithm
-`find`. The algorithm finds the first value in the given sequence that is not
-equal to the given value.
-
-Consider this use of `find()`:
-
-    auto std::vector<int> vec = { 1, 1, 2 };
-    auto it = std::find(vec.begin(), vec.end(), 1);
-
-This gives us the first occurance of `1` in `vec`.  What if we want to find
-the first occurrance of any number besides `1` in `vec`?  We have to write an
-unfortunate amount of code:
-
-    auto std::vector<int> vec = { 1, 1, 2 };
-    auto it = std::find_if(vec.begin(), vec.end(), [](int i) { return i != 1; });
-
-With `find_not()` the code gets much more terse:
-
-    auto std::vector<int> vec = { 1, 1, 2 };
-    auto it = find_not(vec.begin(), vec.end(), 1);
-
-The existing `find` variants are: `find()`, `find_if()`, and `find_if_not()`.
-It seems natural to also have `find_not()`, for the very reason that we have
-`find_if_not()` -- to avoid having to write a lambda to wrap the negation of
-the find condition.
-
-[heading interface]
-
-    template<typename InputIter, typename Sentinel, typename T>        
-    InputIter find_not(InputIter first, Sentinel last, const T & x);
-
-    template<typename Range, typename T>
-    boost::range_iterator<Range> find_not(Range & r, const T & x);
-
-These overloads of `find_not` return the first value that is not equal to `x`
-in the sequence `[first, last)` or `r`, respectively.
-
-[heading Examples]
-
-Given the container `c1` containing `{ 0, 1, 2 }`,  then
-
-    find_not ( c1.begin(),     c1.end(),    1 ) --> c1.begin()
-    find_not ( c1.begin(),     c1.end(),    0 ) --> std::next(c1.begin())
-
-[heading Iterator Requirements]
-
-`find_not` works on all iterators except output iterators.
-
-The template parameter `Sentinel` is allowed to be different from `InputIter`,
-or they may be the same.  For an `InputIter` `it` and a `Sentinel` `end`, `it
-== end` and `it != end` must be well-formed expressions.
-
-[heading Complexity]
-
-Linear.
-
-[heading Exception Safety]
-
-`find_not` takes its parameters by value and do not depend upon any global
-state. Therefore, it provides the strong exception guarantee.
-
-[heading Notes]
-
-`constexpr` in C++14 or later.
-
-[endsect]
-
-[/ File equal.qbk
-Copyright 2018 T. Zachary Laine
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
diff --git a/third_party/boostorg/algorithm/doc/gather.qbk b/third_party/boostorg/algorithm/doc/gather.qbk
deleted file mode 100644
index b50e85a..0000000
--- a/third_party/boostorg/algorithm/doc/gather.qbk
+++ /dev/null
@@ -1,79 +0,0 @@
-[/ File gather.qbk]
-
-[section:gather gather]
-
-[/license
-Copyright (c) 2013 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'boost/algorithm/gather.hpp' contains two variants of a single algorithm, `gather`. 
-
-`gather()` takes a collection of elements defined by a pair of iterators and moves the ones satisfying a predicate to them to a position (called the pivot) within the sequence. The algorithm is stable. The result is a pair of iterators that contains the items that satisfy the predicate.
-
-[heading Interface]
-
-The function `gather` returns a `std::pair` of iterators that denote the elements that satisfy the predicate.  
-
-There are two versions; one takes two iterators, and the other takes a range.
-
-``
-namespace boost { namespace algorithm {
-
-template <typename BidirectionalIterator, typename Pred>
-std::pair<BidirectionalIterator,BidirectionalIterator> 
-gather ( BidirectionalIterator first, BidirectionalIterator last, BidirectionalIterator pivot, Pred pred );
-
-template <typename BidirectionalRange, typename Pred>
-std::pair<typename boost::range_iterator<const BidirectionalRange>::type, typename boost::range_iterator<const BidirectionalRange>::type>
-gather ( const BidirectionalRange &range, typename boost::range_iterator<const BidirectionalRange>::type pivot, Pred pred );
-
-}}
-``
-
-[heading Examples]
-
-Given an sequence containing:
-``
-0 1 2 3 4 5 6 7 8 9
-``
-
-a call to gather ( arr, arr + 10, arr + 4, IsEven ) will result in:
-
-``
-1 3 0 2 4 6 8 5 7 9
-    |---|-----|
-  first |  second
-      pivot
-``
-where `first` and `second` are the fields of the pair that is returned by the call.
-
-
-[heading Iterator Requirements]
-
-`gather` work on bidirectional iterators or better. This requirement comes from the usage of `stable_partition`, which requires bidirectional iterators. Some standard libraries (libstdc++ and libc++, for example) have implementations of `stable_partition` that work with forward iterators. If that is the case, then `gather` will work with forward iterators as well.
-
-[heading Storage Requirements]
-
-`gather` uses `stable_partition`, which will attempt to allocate temporary memory, but will work in-situ if there is none available.
-
-[heading Complexity]
-
-If there is sufficient memory available, the run time is linear: `O(N)`
-
-If there is not any memory available, then the run time is `O(N log N)`.
-
-[heading Exception Safety]
-
-[heading Notes]
-
-[endsect]
-
-[/ File gather.qbk
-Copyright 2013 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/hex.qbk b/third_party/boostorg/algorithm/doc/hex.qbk
deleted file mode 100644
index d64bd01..0000000
--- a/third_party/boostorg/algorithm/doc/hex.qbk
+++ /dev/null
@@ -1,109 +0,0 @@
-[/ File hex.qbk]
-
-[section:hex hex]
-
-[/license
-Copyright (c) 2011-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file `'boost/algorithm/hex.hpp'` contains three variants each of two algorithms, `hex` and `unhex`. They are inverse algorithms; that is, one undoes the effort of the other. `hex` takes a sequence of values, and turns them into hexadecimal characters. `unhex` takes a sequence of hexadecimal characters, and outputs a sequence of values.
-
-`hex` and `unhex` come from MySQL, where they are used in database queries and stored procedures.
-
-[heading interface]
-
-The function `hex` takes a sequence of values and writes hexadecimal characters. There are three different interfaces, differing only in how the input sequence is specified.
-
-The first one takes an iterator pair. The second one takes a pointer to the start of a zero-terminated sequence, such as a c string, and the third takes a range as defined by the Boost.Range library.
-
-``
-template <typename InputIterator, typename OutputIterator>
-OutputIterator hex ( InputIterator first, InputIterator last, OutputIterator out );
-
-template <typename T, typename OutputIterator>
-OutputIterator hex ( const T *ptr, OutputIterator out );
-
-template <typename Range, typename OutputIterator>
-OutputIterator hex ( const Range &r, OutputIterator out );
-``
-
-`hex` writes only values in the range '0'..'9' and 'A'..'F', but is not limited to character output. The output iterator could refer to a wstring, or a vector of integers, or any other integral type.
-
-The function `unhex` takes the output of `hex` and turns it back into a sequence of values.
-
-The input parameters for the different variations of `unhex` are the same as `hex`.
-
-``
-template <typename InputIterator, typename OutputIterator>
-OutputIterator unhex ( InputIterator first, InputIterator last, OutputIterator out );
-
-template <typename T, typename OutputIterator>
-OutputIterator unhex ( const T *ptr, OutputIterator out );
-
-template <typename Range, typename OutputIterator>
-OutputIterator unhex ( const Range &r, OutputIterator out );
-``
-
-[heading Error Handling]
-The header 'hex.hpp' defines three exception classes:
-``
-struct hex_decode_error: virtual boost::exception, virtual std::exception {};
-struct not_enough_input : public hex_decode_error;
-struct non_hex_input : public hex_decode_error;
-``
-
-If the input to `unhex` does not contain an "even number" of hex digits, then an exception of type `boost::algorithm::not_enough_input` is thrown.
-
-If the input to `unhex` contains any non-hexadecimal characters, then an exception of type `boost::algorithm::non_hex_input` is thrown.
-
-If you want to catch all the decoding errors, you can catch exceptions of type `boost::algorithm::hex_decode_error`.
-
-[heading Examples]
-
-Assuming that `out` is an iterator that accepts `char` values, and `wout` accepts `wchar_t` values (and that sizeof ( wchar_t ) == 2)
-
-``
-hex ( "abcdef", out )  --> "616263646566"
-hex ( "32", out )     --> "3332"
-hex ( "abcdef", wout ) --> "006100620063006400650066"
-hex ( "32", wout )    --> "00330032"
-
-unhex ( "616263646566", out )  --> "abcdef"
-unhex ( "3332", out )          --> "32"
-unhex ( "616263646566", wout ) --> "\6162\6364\6566"	( i.e, a 3 character string )
-unhex ( "3332", wout )         --> "\3233"				( U+3332, SQUARE HUARADDO )
-
-unhex ( "3", out )            --> Error - not enough input
-unhex ( "32", wout )          --> Error - not enough input
-
-unhex ( "ACEG", out )         --> Error - non-hex input
-
-``
-
-[heading Iterator Requirements]
-
-`hex` and `unhex` work on all iterator types.
-
-[heading Complexity]
-
-All of the variants of `hex` and `unhex` run in ['O(N)] (linear) time; that is, that is, they process each element in the input sequence once.
-
-[heading Exception Safety]
-
-All of the variants of `hex` and `unhex` take their parameters by value or const reference, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee. However, when working on input iterators, if an exception is thrown, the input iterators will not be reset to their original values (i.e, the characters read from the iterator cannot be un-read)
-
-[heading Notes]
-
-* `hex` and `unhex` both do nothing when passed empty ranges.
-
-[endsect]
-
-[/ File hex.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/is_palindrome.qbk b/third_party/boostorg/algorithm/doc/is_palindrome.qbk
deleted file mode 100644
index 783305f..0000000
--- a/third_party/boostorg/algorithm/doc/is_palindrome.qbk
+++ /dev/null
@@ -1,98 +0,0 @@
-[/ File is_palindrome.qbk]
-
-[section:is_palindrome is_palindrome]
-
-[/license
-Copyright (c) 2016 Alexander Zaitsev
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'is_palindrome.hpp' contains six variants of a single algorithm, is_palindrome. 
-The algorithm tests the sequence and returns true if the sequence is a palindrome; i.e, it is identical when traversed either backwards or frontwards.
-
-The routine `is_palindrome` takes a sequence and, optionally, a predicate. It will return true if the predicate returns true for tested elements by algorithm in the sequence. 
-
-The routine come in 6 forms; the first one takes two iterators to define the range. The second form takes two iterators to define the range and a predicate. 
-The third form takes a single range parameter, and uses Boost.Range to traverse it. The fourth form takes a single range parameter ( uses Boost.Range to traverse it) and a predicate.
-The fifth form takes a single C-string and a predicate. The sixth form takes a single C-string.
-
-
-[heading interface]
-
-The function `is_palindrome` returns true if the predicate returns true any tested by algorithm items in the sequence.  
-There are six versions: 
-1) takes two iterators.
-2) takes two iterators and a predicate.
-3) takes a range.
-4) takes a range and a predicate.
-5) takes a C-string and a predicate.
-6) takes a C-string.
-
-``
-template<typename BidirectionalIterator>
-	bool is_palindrome ( BidirectionalIterator begin, BidirectionalIterator end );
-template<typename BidirectionalIterator, typename Predicate>
-	bool is_palindrome ( BidirectionalIterator begin, BidirectionalIterator end, Predicate p );
-template<typename Range> 
-	bool is_palindrome ( const Range &r );
-template<typename Range, typename Predicate> 
-	bool is_palindrome ( const Range &r, Predicate p );
-template<typename Predicate>
-	bool is_palindrome ( const char* str, Predicate p );
-bool is_palindrome(const char* str);
-``
-
-
-[heading Examples]
-
-Given the containers: 
-const std::list<int> empty,
-const std::vector<char> singleElement{'z'},
-int oddNonPalindrome[] = {3,2,2},
-const int oddPalindrome[] = {1,2,3,2,1},
-const int evenPalindrome[] = {1,2,2,1},
-int evenNonPalindrome[] = {1,4,8,8}, then
-``
-
-is_palindrome(empty))  --> true //empty range                                                                       
-is_palindrome(singleElement))  --> true                                                               
-is_palindrome(std::begin(oddNonPalindrome), std::end(oddNonPalindrome))) --> false                    
-is_palindrome(std::begin(evenPalindrome), std::end(evenPalindrome))) --> true                                                                                                                                                                                                            
-is_palindrome(empty.begin(), empty.end(), functorComparator())) --> true //empty range                             
-is_palindrome(std::begin(oddNonPalindrome), std::end(oddNonPalindrome), funcComparator<int>)) --> false
-is_palindrome(std::begin(oddPalindrome), std::end(oddPalindrome)) --> true
-is_palindrome(evenPalindrome, std::equal_to<int>())) --> true
-is_palindrome(std::begin(evenNonPalindrome), std::end(evenNonPalindrome)) --> false
-is_palindrome("a") --> true
-is_palindrome("aba", std::equal_to<char>()) --> true
-``
-
-[heading Iterator Requirements]
-
-`is_palindrome` work on Bidirectional and RandomAccess iterators.
-
-[heading Complexity]
-
-All of the variants of `is_palindrome` run in ['O(N)] (linear) time; that is, they compare against each element in the list once. If any of the comparisons not succeed, the algorithm will terminate immediately, without examining the remaining members of the sequence.
-
-[heading Exception Safety]
-
-All of the variants of `is_palindrome` take their parameters by value, const pointer or const reference, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* `is_palindrome` returns true for empty ranges, const char* null pointers and for single element ranges. 
-
-* If you use version of 'is_palindrome' without custom predicate, 'is_palindrome' uses default 'operator==()' for elements.
-
-* Be careful with using not null pointer 'const char*' without '\0' - if you use it with 'is_palindrome', it's a undefined behaviour.
-
-[endsect]
-
-[/ File is_palindrome.qbk
-Copyright 2016 Alexander Zaitsev
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
diff --git a/third_party/boostorg/algorithm/doc/is_partitioned.qbk b/third_party/boostorg/algorithm/doc/is_partitioned.qbk
deleted file mode 100644
index 0ed1f80..0000000
--- a/third_party/boostorg/algorithm/doc/is_partitioned.qbk
+++ /dev/null
@@ -1,69 +0,0 @@
-[/ File is_partitioned.qbk]
-
-[section:is_partitioned is_partitioned ]
-
-[/license
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'is_partitioned.hpp' contains two variants of a single algorithm, `is_partitioned`. The algorithm tests to see if a sequence is partitioned according to a predicate; in other words, all the items in the sequence that satisfy the predicate are at the beginning of the sequence.
-
-The routine `is_partitioned` takes a sequence and a predicate. It returns true if the sequence is partitioned according to the predicate.
-
-`is_partitioned` come in two forms; the first one takes two iterators to define the range. The second form takes a single range parameter, and uses Boost.Range to traverse it.
-
-
-[heading interface]
-
-The function `is_partitioned` returns true if the items in the sequence are separated according to their ability to satisfy the predicate.  There are two versions; one takes two iterators, and the other takes a range.
-
-``
-template<typename InputIterator, typename Predicate>
-	bool is_partitioned ( InputIterator first, InputIterator last, Predicate p );
-template<typename Range, typename Predicate> 
-	bool is_partitioned ( const Range &r, Predicate p );
-``
-
-[heading Examples]
-
-Given the container `c` containing `{ 0, 1, 2, 3, 14, 15 }`, then
-``
-bool isOdd ( int i ) { return i % 2 == 1; }
-bool lessThan10 ( int i ) { return i < 10; }
-
-is_partitioned ( c, isOdd ) --> false
-is_partitioned ( c, lessThan10 ) --> true
-is_partitioned ( c.begin (), c.end (), lessThan10 ) --> true
-is_partitioned ( c.begin (), c.begin () + 3, lessThan10 ) --> true
-is_partitioned ( c.end (), c.end (), isOdd ) --> true  // empty range
-``
-
-[heading Iterator Requirements]
-
-`is_partitioned` works on all iterators except output iterators.
-
-[heading Complexity]
-
-Both of the variants of `is_partitioned` run in ['O(N)] (linear) time; that is, they compare against each element in the list once. If the sequence is found to be not partitioned at any point, the routine will terminate immediately, without examining the rest of the elements.
-
-[heading Exception Safety]
-
-Both of the variants of `is_partitioned` take their parameters by value or const reference, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* The iterator-based version of the routine `is_partitioned` is also available as part of the C++11 standard.
-
-* `is_partitioned` returns true for empty and single-element ranges, no matter what predicate is passed to test against. 
-
-[endsect]
-
-[/ File is_partitioned.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/is_partitioned_until.qbk b/third_party/boostorg/algorithm/doc/is_partitioned_until.qbk
deleted file mode 100644
index 9e0879b..0000000
--- a/third_party/boostorg/algorithm/doc/is_partitioned_until.qbk
+++ /dev/null
@@ -1,67 +0,0 @@
-[/ File is_partitioned_until.qbk]
-
-[section:is_partitioned_until is_partitioned_until ]
-
-[/license
-Copyright (c) 2017 Alexander Zaitsev
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'is_partitioned_until.hpp' contains two variants of a single algorithm, `is_partitioned_until`. The algorithm tests to see if a sequence is partitioned according to a predicate; in other words, all the items in the sequence that satisfy the predicate are at the beginning of the sequence.
-
-The routine `is_partitioned_until` takes a sequence and a predicate. It returns the last iterator 'it' in the sequence [begin, end) for which the is_partitioned(begin, it) is true.
-
-`is_partitioned_until` come in two forms; the first one takes two iterators to define the range. The second form takes a single range parameter, and uses Boost.Range to traverse it.
-
-
-[heading interface]
-
-The function `is_partitioned_until` returns the last iterator 'it' in the sequence [begin, end) for which the is_partitioned(begin, it) is true. There are two versions; one takes two iterators, and the other takes a range.
-
-``
-template<typename InputIterator, typename Predicate>
-	InputIterator is_partitioned_until ( InputIterator first, InputIterator last, Predicate p );
-template<typename Range, typename Predicate> 
-	typename boost::range_iterator<const Range>::type is_partitioned_until ( const Range &r, Predicate p );
-``
-
-[heading Examples]
-
-Given the container `c` containing `{ 0, 1, 2, 3, 14, 15 }`, then
-``
-bool isOdd ( int i ) { return i % 2 == 1; }
-bool lessThan10 ( int i ) { return i < 10; }
-
-is_partitioned_until ( c, isOdd ) --> iterator to '1'
-is_partitioned_until ( c, lessThan10 ) --> end
-is_partitioned_until ( c.begin (), c.end (), lessThan10 ) --> end
-is_partitioned_until ( c.begin (), c.begin () + 3, lessThan10 ) --> end
-is_partitioned_until ( c.end (), c.end (), isOdd ) --> end  // empty range
-``
-
-[heading Iterator Requirements]
-
-`is_partitioned_until` works on all iterators except output iterators.
-
-[heading Complexity]
-
-Both of the variants of `is_partitioned_until` run in ['O(N)] (linear) time; that is, they compare against each element in the list once. If the sequence is found to be not partitioned at any point, the routine will terminate immediately, without examining the rest of the elements.
-
-[heading Exception Safety]
-
-Both of the variants of `is_partitioned_until` take their parameters by value or const reference, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* `is_partitioned_until` returns iterator to the end for empty and single-element ranges, no matter what predicate is passed to test against. 
-
-[endsect]
-
-[/ File is_partitioned_until.qbk
-Copyright 2017 Alexander Zaitsev
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/is_permutation.qbk b/third_party/boostorg/algorithm/doc/is_permutation.qbk
deleted file mode 100644
index e8753ba..0000000
--- a/third_party/boostorg/algorithm/doc/is_permutation.qbk
+++ /dev/null
@@ -1,87 +0,0 @@
-[/ File is_permutation.qbk]
-
-[section:is_permutation is_permutation ]
-
-[/license
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'is_permutation.hpp' contains six variants of a single algorithm, `is_permutation`. The algorithm tests to see if one sequence is a permutation of a second one; in other words, it contains all the same members, possibly in a different order.
-
-The routine `is_permutation` takes two sequences and an (optional) predicate. It returns true if the two sequences contain the same members. If it is passed a predicate, it uses the predicate to compare the elements of the sequence to see if they are the same.
-
-`is_permutation` come in three forms. The first one takes two iterators to define the first range, and the starting iterator of the second range. The second form takes a two iterators to define the first range and two more to define the second range. The third form takes a single range parameter, and uses Boost.Range to traverse it.
-
-
-[heading Interface]
-
-The function `is_permutation` returns true if the two input sequences contain the same elements.  There are six versions; two take three iterators, two take four iterators, and the other two take two ranges.
-
-In general, you should prefer the four iterator versions over the three iterator ones. The three iterator version has to "create" the fourth iterator internally by calling `std::advance(first2, std::distance(first1,last1))`, and if the second sequence is shorter than the first, that's undefined behavior.
-
-``
-template< class ForwardIterator1, class ForwardIterator2 >
-bool is_permutation ( ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2 );
-
-template< class ForwardIterator1, class ForwardIterator2, class BinaryPredicate >
-bool is_permutation ( ForwardIterator1 first1, ForwardIterator1 last1,
-                      ForwardIterator2 first2, BinaryPredicate p );
-
-
-template< class ForwardIterator1, class ForwardIterator2 >
-bool is_permutation ( ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2 );
-
-template< class ForwardIterator1, class ForwardIterator2, class BinaryPredicate >
-bool is_permutation ( ForwardIterator1 first1, ForwardIterator1 last1,
-                      ForwardIterator2 first2, ForwardIterator2 last2,
-                      BinaryPredicate p );
-
-template <typename Range, typename ForwardIterator>
-bool is_permutation ( const Range &r, ForwardIterator first2 );
-
-template <typename Range, typename ForwardIterator, typename BinaryPredicate>
-bool is_permutation ( const Range &r, ForwardIterator first2, BinaryPredicate pred );
-
-``
-
-[heading Examples]
-
-Given the container `c1` containing `{ 0, 1, 2, 3, 14, 15 }`, and `c2` containing `{ 15, 14, 3, 1, 2 }`, then
-``
-is_permutation ( c1.begin(),     c1.end (), c2.begin(), c2.end ()) --> false
-is_permutation ( c1.begin() + 1, c1.end (), c2.begin(), c2.end ()) --> true
-
-is_permutation ( c1.end (), c1.end (), c2.end(), c2.end ()) --> true  // all empty ranges are permutations of each other
-``
-
-[heading Iterator Requirements]
-
-`is_permutation` works on forward iterators or better. 
-
-[heading Complexity]
-
-All of the variants of `is_permutation` run in ['O(N^2)] (quadratic) time; that is, they compare against each element in the list (potentially) N times.  If passed random-access iterators, `is_permutation` can return quickly if the sequences are different sizes.
-
-[heading Exception Safety]
-
-All of the variants of `is_permutation` take their parameters by value, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* The three iterator versions of the routine `is_permutation` are also available as part of the C++11 standard.
-
-* The four iterator versions of the routine `is_permutation` are part of the proposed C++14 standard. When C++14 standard libraries become available, the implementation should be changed to use the implementation from the standard library (if available).
-
-* `is_permutation` returns true when passed a pair of empty ranges, no matter what predicate is passed to test with. 
-
-[endsect]
-
-[/ File is_permutation.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/knuth_morris_pratt.qbk b/third_party/boostorg/algorithm/doc/knuth_morris_pratt.qbk
deleted file mode 100644
index b2620f1..0000000
--- a/third_party/boostorg/algorithm/doc/knuth_morris_pratt.qbk
+++ /dev/null
@@ -1,101 +0,0 @@
-[/ QuickBook Document version 1.5 ]
-
-[section:KnuthMorrisPratt Knuth-Morris-Pratt Search]
-
-[/license
-
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at
-http://www.boost.org/LICENSE_1_0.txt)
-]
-
-
-[heading Overview]
-
-The header file 'knuth_morris_pratt.hpp' contains an implementation of the Knuth-Morris-Pratt algorithm for searching sequences of values. 
-
-The basic premise of the Knuth-Morris-Pratt algorithm is that when a mismatch occurs, there is information in the pattern being searched for that can be used to determine where the next match could begin, enabling the skipping of some elements of the corpus that have already been examined.
-
-It does this by building a table from the pattern being searched for, with one entry for each element in the pattern.
-
-The algorithm was conceived in 1974 by Donald Knuth and Vaughan Pratt, and independently by James H. Morris. The three published it jointly in 1977 in the SIAM Journal on Computing [@http://citeseer.ist.psu.edu/context/23820/0]
-
-However, the Knuth-Morris-Pratt algorithm cannot be used with comparison predicates like `std::search`.
-
-[heading Interface]
-
-Nomenclature: I refer to the sequence being searched for as the "pattern", and the sequence being searched in as the "corpus".
-
-For flexibility, the Knuth-Morris-Pratt algorithm has two interfaces; an object-based interface and a procedural one. The object-based interface builds the table in the constructor, and uses operator () to perform the search. The procedural interface builds the table and does the search all in one step. If you are going to be searching for the same pattern in multiple corpora, then you should use the object interface, and only build the tables once.
-
-Here is the object interface:
-``
-template <typename patIter>
-class knuth_morris_pratt {
-public:
-    knuth_morris_pratt ( patIter first, patIter last );
-    ~knuth_morris_pratt ();
-    
-    template <typename corpusIter>
-    pair<corpusIter, corpusIter> operator () ( corpusIter corpus_first, corpusIter corpus_last );
-    };
-``
-
-and here is the corresponding procedural interface:
-
-``
-template <typename patIter, typename corpusIter>
-pair<corpusIter, corpusIter> knuth_morris_pratt_search ( 
-        corpusIter corpus_first, corpusIter corpus_last, 
-        patIter pat_first, patIter pat_last );
-``
-
-Each of the functions is passed two pairs of iterators. The first two define the corpus and the second two define the pattern. Note that the two pairs need not be of the same type, but they do need to "point" at the same type. In other words, `patIter::value_type` and `curpusIter::value_type` need to be the same type.
-
-The return value of the function is a pair of iterators pointing to the position of the pattern in the corpus. If the pattern is empty, it returns at empty range at the start of the corpus (`corpus_first`, `corpus_first`). If the pattern is not found, it returns at empty range at the end of the corpus (`corpus_last`, `corpus_last`).
-
-[heading Compatibility Note]
-
-Earlier versions of this searcher returned only a single iterator.  As explained in [@https://cplusplusmusings.wordpress.com/2016/02/01/sometimes-you-get-things-wrong/], this was a suboptimal interface choice, and has been changed, starting in the 1.62.0 release.  Old code that is expecting a single iterator return value can be updated by replacing the return value of the searcher's `operator ()` with the `.first` field of the pair.
-
-Instead of:
-``
-iterator foo = searcher(a, b);
-``
-
-you now write:
-``
-iterator foo = searcher(a, b).first;
-``
-[heading Performance]
-
-The execution time of the Knuth-Morris-Pratt algorithm is linear in the size of the string being searched. Generally the algorithm gets faster as the pattern being searched for becomes longer. Its efficiency derives from the fact that with each unsuccessful attempt to find a match between the search string and the text it is searching, it uses the information gained from that attempt to rule out as many positions of the text as possible where the string cannot match.
-
-[heading Memory Use]
-
-The algorithm an that contains one entry for each element the pattern, plus one extra.  So, when searching for a 1026 byte string, the table will have 1027 entries.
-
-[heading Complexity]
-
-The worst-case performance is ['O(2n)], where ['n] is the length of the corpus. The average time is ['O(n)]. The best case performance is sub-linear.
-
-[heading Exception Safety]
-
-Both the object-oriented and procedural versions of the Knuth-Morris-Pratt algorithm take their parameters by value and do not use any information other than what is passed in. Therefore, both interfaces provide the strong exception guarantee.
-
-[heading Notes]
-
-* When using the object-based interface, the pattern must remain unchanged for during the searches; i.e, from the time the object is constructed until the final call to operator () returns.
-
-* The Knuth-Morris-Pratt algorithm requires random-access iterators for both the pattern and the corpus. It should be possible to write this to use bidirectional iterators (or possibly even forward ones), but this implementation does not do that.
-
-[endsect]
-
-[/ File knuth_morris_pratt.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/mismatch.qbk b/third_party/boostorg/algorithm/doc/mismatch.qbk
deleted file mode 100644
index ff56398..0000000
--- a/third_party/boostorg/algorithm/doc/mismatch.qbk
+++ /dev/null
@@ -1,82 +0,0 @@
-[/ File mismatch.qbk]
-
-[section:mismatch mismatch ]
-
-[/license
-Copyright (c) 2013 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'mismatch.hpp' contains two variants of a the stl algorithm `mismatch`. The algorithm finds the first point in two sequences where they do not match.
-
-Before (the proposed) C++14 the algorithm `std::mismatch` took three iterators and an optional comparison predicate. The first two iterators `[first1, last1)` defined a sequence, and the second one `first2` defined the start of the second sequence. The second sequence was assumed to be the same length as the first. 
-
-In C++14, two new variants were introduced, taking four iterators and an optional comparison predicate. The four iterators define two sequences `[first1, last1)` and `[first2, last2)` explicitly, rather than defining the second one implicitly. This leads to correct answers in more cases (and avoid undefined behavior in others).
-
-Consider the two sequences:
-```
-	auto seq1 = { 0, 1, 2 };
-	auto seq2 = { 0, 1, 2, 3, 4 };
-	
-	std::mismatch ( seq1.begin (), seq1.end (), seq2.begin ()); // <3, 3>
-	std::mismatch ( seq2.begin (), seq2.end (), seq1.begin ()); // Undefined behavior
-	std::mismatch ( seq1.begin (), seq1.end (), seq2.begin (), seq2.end ()); // <3, 3>
-```
-
-The first N entries in `seq2` are the same as the entries in `seq1` - but that's not all that's in `seq2`. In the second case, the algorithm will read past the end of `seq1`, resulting in undefined behavior (large earthquake, incorrect results, pregnant cat, etc).
-
-However, if the two sequences are specified completely, it's clear that where the mismatch occurs.
-
-[heading interface]
-
-The function `mismatch` returns a pair of iterators which denote the first mismatching elements in each sequence. If the sequences match completely, `mismatch` returns their end iterators. One version uses `std::equal_to` to do the comparison; the other lets the caller pass predicate to do the comparisons. 
-
-``
-template <class InputIterator1, class InputIterator2>
-std::pair<InputIterator1, InputIterator2>
-mismatch ( InputIterator1 first1, InputIterator1 last1, 
-           InputIterator2 first2, InputIterator2 last2 );
-             
-template <class InputIterator1, class InputIterator2, class BinaryPredicate>
-std::pair<InputIterator1, InputIterator2>
-mismatch ( InputIterator1 first1, InputIterator1 last1, 
-           InputIterator2 first2, InputIterator2 last2, BinaryPredicate pred );
-``
-
-[heading Examples]
-
-Given the container `c1` containing `{ 0, 1, 2, 3, 14, 15 }`, and `c2` containing `{ 1, 2, 3 }`,  then
-``
-mismatch ( c1.begin(),     c1.end(),       c2.begin(), c2.end()) --> <c1.begin(), c2.begin()> // first elements do not match
-mismatch ( c1.begin() + 1, c1.begin() + 4, c2.begin(), c2.end()) --> <c1.begin() + 4, c2.end ()> // all elements of `c2` match
-mismatch ( c1.end(),       c1.end(),       c2.end(),   c2.end()) --> <c1.end(), c2.end()> // empty sequences don't match at the end.
-``
-
-[heading Iterator Requirements]
-
-`mismatch` works on all iterators except output iterators.
-
-[heading Complexity]
-
-Both of the variants of `mismatch` run in ['O(N)] (linear) time; that is, they compare against each element in the list once. If the sequence is found to be not equal at any point, the routine will terminate immediately, without examining the rest of the elements.
-
-[heading Exception Safety]
-
-Both of the variants of `mismatch` take their parameters by value and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* If the sequences are equal (or both are empty), then mismatch returns the end iterators of both sequences. 
-
-* The four iterator version of the routine `mismatch` is part of the C++14 standard. When C++14 standard library implementations become available, the implementation from the standard library should be used.
-
-[endsect]
-
-[/ File mismatch.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/none_of.qbk b/third_party/boostorg/algorithm/doc/none_of.qbk
deleted file mode 100644
index f0b93b7..0000000
--- a/third_party/boostorg/algorithm/doc/none_of.qbk
+++ /dev/null
@@ -1,90 +0,0 @@
-[/ File none_of.qbk]
-
-[section:none_of none_of]
-
-[/license
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'boost/algorithm/cxx11/none_of.hpp' contains four variants of a single algorithm, `none_of`. The algorithm tests all the elements of a sequence and returns true if they none of them share a property.
-
-The routine `none_of` takes a sequence and a predicate. It will return true if the predicate returns false when applied to every element in the sequence. 
-
-The routine `none_of_equal` takes a sequence and a value. It will return true if none of the elements in the sequence compare equal to the passed in value.
-
-Both routines come in two forms; the first one takes two iterators to define the range. The second form takes a single range parameter, and uses Boost.Range to traverse it.
-
-
-[heading interface]
-
-The function `none_of` returns true if the predicate returns false for every item in the sequence.  There are two versions; one takes two iterators, and the other takes a range.
-
-``
-namespace boost { namespace algorithm {
-template<typename InputIterator, typename Predicate>
-	bool none_of ( InputIterator first, InputIterator last, Predicate p );
-template<typename Range, typename Predicate> 
-	bool none_of ( const Range &r, Predicate p );
-}}
-``
-
-The function `none_of_equal` is similar to `none_of`, but instead of taking a predicate to test the elements of the sequence, it takes a value to compare against.
-
-``
-namespace boost { namespace algorithm {
-template<typename InputIterator, typename V>
-	bool none_of_equal ( InputIterator first, InputIterator last, V const &val );
-template<typename Range, typename V> 
-	bool none_of_equal ( const Range &r, V const &val );
-}}
-``
-
-[heading Examples]
-
-Given the container `c` containing `{ 0, 1, 2, 3, 14, 15 }`, then
-``
-bool isOdd ( int i ) { return i % 2 == 1; }
-bool lessThan10 ( int i ) { return i < 10; }
-
-using boost::algorithm;
-
-none_of ( c, isOdd ) --> false
-none_of ( c.begin (), c.end (), lessThan10 ) --> false
-none_of ( c.begin () + 4, c.end (), lessThan10 ) --> true
-none_of ( c.end (), c.end (), isOdd ) --> true  // empty range
-none_of_equal ( c, 3 ) --> false
-none_of_equal ( c.begin (), c.begin () + 3, 3 ) --> true
-none_of_equal ( c.begin (), c.begin (), 99 ) --> true  // empty range
-``
-
-[heading Iterator Requirements]
-
-`none_of` and `none_of_equal` work on all iterators except output iterators.
-
-[heading Complexity]
-
-All of the variants of `none_of` and `none_of_equal` run in ['O(N)] (linear) time; that is, they compare against each element in the list once. If any of the comparisons succeed, the algorithm will terminate immediately, without examining the remaining members of the sequence.
-
-[heading Exception Safety]
-
-All of the variants of `none_of` and `none_of_equal` take their parameters by value or const reference, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* The routine `none_of` is also available as part of the C++11 standard.
-
-* `none_of` and `none_of_equal` both return true for empty ranges, no matter what is passed to test against. 
-
-* The second parameter to `none_of_value` is a template parameter, rather than deduced from the first parameter (`std::iterator_traits<InputIterator>::value_type`) because that allows more flexibility for callers, and takes advantage of built-in comparisons for the type that is pointed to by the iterator.  The function is defined to return true if, for all elements in the sequence, the expression `*iter == val` evaluates to false (where `iter` is an iterator to each element in the sequence)
-
-[endsect]
-
-[/ File none_of.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/one_of.qbk b/third_party/boostorg/algorithm/doc/one_of.qbk
deleted file mode 100644
index e5d873b..0000000
--- a/third_party/boostorg/algorithm/doc/one_of.qbk
+++ /dev/null
@@ -1,87 +0,0 @@
-[/ File one_of.qbk]
-
-[section:one_of one_of]
-
-[/license
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'boost/algorithm/cxx11/one_of.hpp' contains four variants of a single algorithm, `one_of`. The algorithm tests the elements of a sequence and returns true if exactly one of the elements in the sequence has a particular property.
-
-The routine `one_of` takes a sequence and a predicate. It will return true if the predicate returns true for one element in the sequence. 
-
-The routine `one_of_equal` takes a sequence and a value. It will return true if one element in the sequence compares equal to the passed in value.
-
-Both routines come in two forms; the first one takes two iterators to define the range. The second form takes a single range parameter, and uses Boost.Range to traverse it.
-
-
-[heading interface]
-
-The function `one_of` returns true if the predicate returns true for one item in the sequence.  There are two versions; one takes two iterators, and the other takes a range.
-
-``
-namespace boost { namespace algorithm {
-template<typename InputIterator, typename Predicate>
-	bool one_of ( InputIterator first, InputIterator last, Predicate p );
-template<typename Range, typename Predicate> 
-	bool one_of ( const Range &r, Predicate p );
-}}
-``
-
-The function `one_of_equal` is similar to `one_of`, but instead of taking a predicate to test the elements of the sequence, it takes a value to compare against.
-
-``
-namespace boost { namespace algorithm {
-template<typename InputIterator, typename V>
-	bool one_of_equal ( InputIterator first, InputIterator last, V const &val );
-template<typename Range, typename V> 
-	bool one_of_equal ( const Range &r, V const &val );
-}}
-``
-
-[heading Examples]
-
-Given the container `c` containing `{ 0, 1, 2, 3, 14, 15 }`, then
-``
-bool isOdd ( int i ) { return i % 2 == 1; }
-bool lessThan10 ( int i ) { return i < 10; }
-
-using boost::algorithm;
-one_of ( c, isOdd ) --> false
-one_of ( c.begin (), c.end (), lessThan10 ) --> false
-one_of ( c.begin () + 3, c.end (), lessThan10 ) --> true
-one_of ( c.end (), c.end (), isOdd ) --> false  // empty range
-one_of_equal ( c, 3 ) --> true
-one_of_equal ( c.begin (), c.begin () + 3, 3 ) --> false
-one_of_equal ( c.begin (), c.begin (), 99 ) --> false  // empty range
-``
-
-[heading Iterator Requirements]
-
-`one_of` and `one_of_equal` work on all iterators except output iterators.
-
-[heading Complexity]
-
-All of the variants of `one_of` and `one_of_equal` run in ['O(N)] (linear) time; that is, they compare against each element in the list once. If more than one of the elements in the sequence satisfy the condition, then algorithm will return false immediately, without examining the remaining members of the sequence.
-
-[heading Exception Safety]
-
-All of the variants of `one_of` and `one_of_equal` take their parameters by value or const reference, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* `one_of` and `one_of_equal` both return false for empty ranges, no matter what is passed to test against. 
-
-* The second parameter to `one_of_equal` is a template parameter, rather than deduced from the first parameter (`std::iterator_traits<InputIterator>::value_type`) because that allows more flexibility for callers, and takes advantage of built-in comparisons for the type that is pointed to by the iterator.  The function is defined to return true if, for one element in the sequence, the expression `*iter == val` evaluates to true (where `iter` is an iterator to each element in the sequence)
-
-[endsect]
-
-[/ File one_of.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/doc/ordered-hpp.qbk b/third_party/boostorg/algorithm/doc/ordered-hpp.qbk
deleted file mode 100644
index 9e860d4..0000000
--- a/third_party/boostorg/algorithm/doc/ordered-hpp.qbk
+++ /dev/null
@@ -1,130 +0,0 @@
-[/ QuickBook Document version 1.5 ]
-[section:is_sorted is_sorted ]
-
-[/license
-
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at
-http://www.boost.org/LICENSE_1_0.txt)
-
-]
-
-
-The header file `<boost/algorithm/cxx11/is_sorted.hpp>` contains functions for determining if a sequence is ordered.
-
-[heading is_sorted]
-The function `is_sorted(sequence)` determines whether or not a sequence is completely sorted according so some criteria.  If no comparison predicate is specified, then std::less_equal is used (i.e, the test is to see if the sequence is non-decreasing)
-
-``
-namespace boost { namespace algorithm {
-	template <typename ForwardIterator, typename Pred>
-	bool is_sorted ( ForwardIterator first, ForwardIterator last, Pred p );
-	
-	template <typename ForwardIterator>
-	bool is_sorted ( ForwardIterator first, ForwardIterator last );
-	
-	
-	template <typename Range, typename Pred>
-	bool is_sorted ( const Range &r, Pred p );
-	
-	template <typename Range>
-	bool is_sorted ( const Range &r );
-}}
-``
-
-Iterator requirements: The `is_sorted` functions will work forward iterators or better.
-
-[heading is_sorted_until]
-
-If `distance(first, last) < 2`, then `is_sorted ( first, last )` returns `last`. Otherwise, it returns the last iterator i in [first,last] for which the range [first,i) is sorted.
-
-In short, it returns the element in the sequence that is "out of order". If the entire sequence is sorted (according to the predicate), then it will return `last`.
-
-``
-namespace boost { namespace algorithm {
-	template <typename ForwardIterator, typename Pred>
-	FI is_sorted_until ( ForwardIterator first, ForwardIterator last, Pred p );
-	
-	template <typename ForwardIterator>
-	ForwardIterator is_sorted_until ( ForwardIterator first, ForwardIterator last );
-	
-	
-	template <typename Range, typename Pred>
-	typename boost::range_iterator<const R>::type is_sorted_until ( const Range &r, Pred p );
-	
-	template <typename Range>
-	typename boost::range_iterator<const R>::type is_sorted_until ( const Range &r );
-}}
-``
-
-Iterator requirements: The `is_sorted_until` functions will work on forward iterators or better. Since they have to return a place in the input sequence, input iterators will not suffice.
-
-Complexity:
-	`is_sorted_until` will make at most ['N-1] calls to the predicate (given a sequence of length ['N]).
-
-Examples:
-
-Given the sequence `{ 1, 2, 3, 4, 5, 3 }`,   `is_sorted_until ( beg, end, std::less<int>())` would return an iterator pointing at the second `3`.
-
-Given the sequence `{ 1, 2, 3, 4, 5, 9 }`,   `is_sorted_until ( beg, end, std::less<int>())` would return `end`.
-
-
-There are also a set of "wrapper functions" for is_ordered which make it easy to see if an entire sequence is ordered. These functions return a boolean indicating success or failure rather than an iterator to where the out of order items were found.
-
-To test if a sequence is increasing (each element at least as large as the preceding one):
-``
-namespace boost { namespace algorithm {
-	template <typename Iterator>
-	bool is_increasing ( Iterator first, Iterator last );
-	
-	template <typename R>
-	bool is_increasing ( const R &range );
-}}
-``
-
-To test if a sequence is decreasing (each element no larger than the preceding one):
-
-``
-namespace boost { namespace algorithm {
-	template <typename ForwardIterator>
-	bool is_decreasing ( ForwardIterator first, ForwardIterator last );
-	
-	template <typename R>
-	bool is_decreasing ( const R &range );
-}}
-``
-
-To test if a sequence is strictly increasing (each element larger than the preceding one):
-``
-namespace boost { namespace algorithm {
-	template <typename ForwardIterator>
-	bool is_strictly_increasing ( ForwardIterator first, ForwardIterator last );
-	
-	template <typename R>
-	bool is_strictly_increasing ( const R &range );
-}}
-``
-
-To test if a sequence is strictly decreasing (each element smaller than the preceding one):
-``
-namespace boost { namespace algorithm {
-	template <typename ForwardIterator>
-	bool is_strictly_decreasing ( ForwardIterator first, ForwardIterator last );
-	
-	template <typename R>
-	bool is_strictly_decreasing ( const R &range );
-}}
-``
-
-Complexity:
-	Each of these calls is just a thin wrapper over `is_sorted`, so they have the same complexity as `is_sorted`.
-
-[heading Notes]
-
-* The routines `is_sorted` and `is_sorted_until` are part of the C++11 standard. When compiled using a C++11 implementation, the implementation from the standard library will be used.
-
-* `is_sorted` and `is_sorted_until` both return true for empty ranges and ranges of length one.
-
-[endsect]
diff --git a/third_party/boostorg/algorithm/doc/partition_point.qbk b/third_party/boostorg/algorithm/doc/partition_point.qbk
deleted file mode 100644
index 813a27b..0000000
--- a/third_party/boostorg/algorithm/doc/partition_point.qbk
+++ /dev/null
@@ -1,68 +0,0 @@
-[/ File partition_point.qbk]
-
-[section:partition_point partition_point ]
-
-[/license
-Copyright (c) 2010-2012 Marshall Clow
-
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-]
-
-The header file 'partition_point.hpp' contains two variants of a single algorithm, `partition_point`. Given a partitioned sequence and a predicate, the algorithm finds the partition point; i.e, the first element in the sequence that does not satisfy the predicate.
-
-The routine `partition_point` takes a partitioned sequence and a predicate. It returns an iterator which 'points to' the first element in the sequence that does not satisfy the predicate. If all the items in the sequence satisfy the predicate, then it returns one past the final element in the sequence.
-
-`partition_point` come in two forms; the first one takes two iterators to define the range. The second form takes a single range parameter, and uses Boost.Range to traverse it.
-
-
-[heading interface]
-
-There are two versions; one takes two iterators, and the other takes a range.
-
-``
-template<typename ForwardIterator, typename Predicate>
-	ForwardIterator partition_point ( ForwardIterator first, ForwardIterator last, Predicate p );
-template<typename Range, typename Predicate> 
-	boost::range_iterator<Range> partition_point ( const Range &r, Predicate p );
-``
-
-[heading Examples]
-
-Given the container `c` containing `{ 0, 1, 2, 3, 14, 15 }`, then
-``
-bool lessThan10 ( int i ) { return i < 10; }
-bool isOdd ( int i ) { return i % 2 == 1; }
-
-partition_point ( c, lessThan10 ) --> c.begin () + 4  (pointing at 14)
-partition_point ( c.begin (), c.end (), lessThan10 ) --> c.begin () + 4  (pointing at 14)
-partition_point ( c.begin (), c.begin () + 3, lessThan10 ) -> c.begin () + 3 (end)
-partition_point ( c.end (), c.end (), isOdd ) --> c.end ()  // empty range
-``
-
-[heading Iterator Requirements]
-
-`partition_point` requires forward iterators or better; it will not work on input iterators or output iterators.
-
-[heading Complexity]
-
-Both of the variants of `partition_point` run in ['O( log (N))] (logarithmic) time; that is, the predicate will be will be applied approximately ['log(N)] times. To do this, however, the algorithm needs to know the size of the sequence. For forward and bidirectional iterators, calculating the size of the sequence is an ['O(N)] operation.
-
-[heading Exception Safety]
-
-Both of the variants of `partition_point` take their parameters by value or const reference, and do not depend upon any global state. Therefore, all the routines in this file provide the strong exception guarantee.
-
-[heading Notes]
-
-* The iterator-based version of the routine `partition_point` is also available as part of the C++11 standard.
-
-* For empty ranges, the partition point is the end of the range.
-
-[endsect]
-
-[/ File partition_point.qbk
-Copyright 2011 Marshall Clow
-Distributed under the Boost Software License, Version 1.0.
-(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-]
-
diff --git a/third_party/boostorg/algorithm/example/Jamfile.v2 b/third_party/boostorg/algorithm/example/Jamfile.v2
deleted file mode 100644
index 100878c..0000000
--- a/third_party/boostorg/algorithm/example/Jamfile.v2
+++ /dev/null
@@ -1,25 +0,0 @@
-#  Boost algorithm library example programs Jamfile
-#
-#  Copyright Marshall Clow 2010-2012. Use, modification and
-#  distribution is subject to the Boost Software License, Version
-#  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-#  http://www.boost.org/LICENSE_1_0.txt)
-#
-#  See http://www.boost.org for updates, documentation, and revision history.
-
-
-project /boost/algorithm/example
-    : requirements
-      <include>../../../
-      <optimization>speed
-      <toolset>msvc:<define>_SCL_SECURE_NO_WARNINGS
-      <toolset>msvc:<define>NOMINMAX
-      <link>static
-    :
-    ;
-
-exe clamp_example   : clamp_example.cpp ;
-exe search_example  : search_example.cpp ;
-exe is_palindrome_example  : is_palindrome_example.cpp;
-exe is_partitioned_until_example  : is_partitioned_until_example.cpp;
-exe apply_permutation_example  : apply_permutation_example.cpp;
diff --git a/third_party/boostorg/algorithm/example/apply_permutation_example.cpp b/third_party/boostorg/algorithm/example/apply_permutation_example.cpp
deleted file mode 100644
index 7ed91ae..0000000
--- a/third_party/boostorg/algorithm/example/apply_permutation_example.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
-  Copyright (c) Alexander Zaitsev <zamazan4ik@gmail.com>, 2017
-
-  Distributed under the Boost Software License, Version 1.0. (See
-  accompanying file LICENSE_1_0.txt or copy at
-  http://www.boost.org/LICENSE_1_0.txt)
-
-  See http://www.boost.org/ for latest version.
-*/
-
-#include <vector>
-#include <iostream>
-
-#include <boost/algorithm/apply_permutation.hpp>
-
-
-namespace ba = boost::algorithm;
-
-int main ( int /*argc*/, char * /*argv*/ [] )
-{
-    // WARNING: Example require C++11 or newer compiler
-    {
-        std::cout << "apply_permutation with iterators:\n";
-        std::vector<int> vec{1, 2, 3, 4, 5}, order{4, 2, 3, 1, 0};
-
-        ba::apply_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        for (const auto& x : vec)
-        {
-            std::cout << x << ", ";
-        }
-        std::cout << std::endl;
-    }
-    {
-        std::cout << "apply_reverse_permutation with iterators:\n";
-        std::vector<int> vec{1, 2, 3, 4, 5}, order{4, 2, 3, 1, 0};
-
-        ba::apply_reverse_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        for (const auto& x : vec)
-        {
-            std::cout << x << ", ";
-        }
-        std::cout << std::endl;
-    }
-    {
-        std::cout << "apply_reverse_permutation with ranges:\n";
-        std::vector<int> vec{1, 2, 3, 4, 5}, order{4, 2, 3, 1, 0};
-
-        ba::apply_reverse_permutation(vec, order);
-        for (const auto& x : vec)
-        {
-            std::cout << x << ", ";
-        }
-        std::cout << std::endl;
-    }
-    {
-        std::cout << "apply_permutation with ranges:\n";
-        std::vector<int> vec{1, 2, 3, 4, 5}, order{4, 2, 3, 1, 0};
-
-        ba::apply_permutation(vec, order);
-        for (const auto& x : vec)
-        {
-            std::cout << x << ", ";
-        }
-        std::cout << std::endl;
-    }
-
-    return 0;
-}
-
diff --git a/third_party/boostorg/algorithm/example/clamp_example.cpp b/third_party/boostorg/algorithm/example/clamp_example.cpp
deleted file mode 100644
index be61c84..0000000
--- a/third_party/boostorg/algorithm/example/clamp_example.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <string>
-#include <iostream>     // for cout, etc
-
-#include <boost/algorithm/clamp.hpp>
-
-namespace ba = boost::algorithm;
-
-bool compare_string_lengths ( const std::string &one, const std::string &two )
-{
-    return one.length () < two.length ();
-}
-
-int main ( int /*argc*/, char * /*argv*/ [] ) {
-//  Clamp takes a value and two "fenceposts", and brings the value "between" the fenceposts.
-
-//  If the input value is "between" the fenceposts, then it is returned unchanged.
-    std::cout << "Clamping   5 to between [1, 10] -> " << ba::clamp ( 5, 1, 10 ) << std::endl;
-
-//  If the input value is out side the range of the fenceposts, it "brought into" range.
-    std::cout << "Clamping  15 to between [1, 10] -> " << ba::clamp (  15, 1, 10 ) << std::endl;
-    std::cout << "Clamping -15 to between [1, 10] -> " << ba::clamp ( -15, 1, 10 ) << std::endl;
-
-//  It doesn't just work for ints
-    std::cout << "Clamping 5.1 to between [1, 10] -> " << ba::clamp ( 5.1, 1.0, 10.0 ) << std::endl;
-
-    {
-        std::string one ( "Lower Bound" ), two ( "upper bound!" ), test1 ( "test#" ), test2 ( "#test" );
-        std::cout << "Clamping '" << test1 << "' between ['" << one << "' and '" << two << "'] -> '" << 
-            ba::clamp ( test1, one, two ) << "'" << std::endl;
-        std::cout << "Clamping '" << test2 << "' between ['" << one << "' and '" << two << "'] -> '" << 
-            ba::clamp ( test2, one, two ) << "'" << std::endl;
-    //  There is also a predicate based version, if you want to compare objects in your own way
-        std::cout << "Clamping '" << test1 << "' between ['" << one << "' and '" << two << "'] (comparing lengths) -> '" << 
-            ba::clamp ( test1, one, two, compare_string_lengths ) << "'" << std::endl;
-        std::cout << "Clamping '" << test2 << "' between ['" << one << "' and '" << two << "'] (comparing lengths) -> '" << 
-            ba::clamp ( test2, one, two, compare_string_lengths ) << "'" << std::endl;
-    
-    }
-
-//  Sometimes, though, you don't get quite what you expect
-//  This is because the two double arguments get converted to int
-    std::cout << "Somewhat unexpected: clamp ( 12, 14.7, 15.9 ) --> " << ba::clamp ( 12, 14.7, 15.9 ) << std::endl;
-    std::cout << "Expected:     clamp ((double)12, 14.7, 15.9 ) --> " << ba::clamp ((double) 12, 14.7, 15.9 ) << std::endl;
-    return 0;
-    }
diff --git a/third_party/boostorg/algorithm/example/is_palindrome_example.cpp b/third_party/boostorg/algorithm/example/is_palindrome_example.cpp
deleted file mode 100644
index cefc782..0000000
--- a/third_party/boostorg/algorithm/example/is_palindrome_example.cpp
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
-   Copyright (c) Alexander Zaitsev <zamazan4ik@gmail.by>, 2016
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <vector>
-#include <list>
-#include <iterator>
-#include <functional>
-#include <iostream>
-
-#include <boost/algorithm/is_palindrome.hpp>
-
-
-namespace ba = boost::algorithm;
-
-template <typename T>
-bool funcComparator(const T& v1, const T& v2)
-{
-    return v1 == v2;
-}
-
-struct functorComparator
-{
-    template <typename T>
-    bool operator()(const T& v1, const T& v2) const
-    {
-        return v1 == v2;
-    }
-};
-
-
-int main ( int /*argc*/, char * /*argv*/ [] )
-{
-    //You can this algorithm with iterators(minimum Bidirectional)
-    std::vector<int> vec{1,2,1};
-    if(ba::is_palindrome(vec.begin(), vec.end()))
-        std::cout << "This container is palindrome" << std::endl;
-    else
-        std::cout << "This container is not palindrome" << std::endl;
-
-
-    //Of course, you can use const iterators
-    if(ba::is_palindrome(vec.cbegin(), vec.cend()))
-        std::cout << "This container is palindrome" << std::endl;
-    else
-        std::cout << "This container is not palindrome" << std::endl;
-
-
-    //Example with bidirectional iterators
-    std::list<int> list{1,2,1};
-    if(ba::is_palindrome(list.begin(), list.end()))
-        std::cout << "This container is palindrome" << std::endl;
-    else
-        std::cout << "This container is not palindrome" << std::endl;
-
-
-    //You can use custom comparators like functions, functors, lambdas
-    auto lambdaComparator = [](int v1, int v2){ return v1 == v2; };
-    auto objFunc = std::function<bool(int, int)>(lambdaComparator);
-
-    if(ba::is_palindrome(vec.begin(), vec.end(), lambdaComparator))
-        std::cout << "This container is palindrome" << std::endl;
-    else
-        std::cout << "This container is not palindrome" << std::endl;
-
-    if(ba::is_palindrome(vec.begin(), vec.end(), funcComparator<int>))
-        std::cout << "This container is palindrome" << std::endl;
-    else
-        std::cout << "This container is not palindrome" << std::endl;
-
-    if(ba::is_palindrome(vec.begin(), vec.end(), functorComparator()))
-        std::cout << "This container is palindrome" << std::endl;
-    else
-        std::cout << "This container is not palindrome" << std::endl;
-
-    if(ba::is_palindrome(vec.begin(), vec.end(), objFunc))
-        std::cout << "This container is palindrome" << std::endl;
-    else
-        std::cout << "This container is not palindrome" << std::endl;
-
-
-    //You can use ranges
-    if(ba::is_palindrome(vec))
-        std::cout << "This container is palindrome" << std::endl;
-    else
-        std::cout << "This container is not palindrome" << std::endl;
-    
-    //You can use C-strings
-    if(ba::is_palindrome("aba"))
-	std::cout << "This C-string is palindrome" << std::endl;
-    else
-        std::cout << "This C-string is not palindrome" << std::endl;
-    return 0;
-}
diff --git a/third_party/boostorg/algorithm/example/is_partitioned_until_example.cpp b/third_party/boostorg/algorithm/example/is_partitioned_until_example.cpp
deleted file mode 100644
index 759176b..0000000
--- a/third_party/boostorg/algorithm/example/is_partitioned_until_example.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-   Copyright (c) Alexander Zaitsev <zamazan4ik@gmail.by>, 2017
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <vector>
-#include <functional>
-#include <iostream>
-
-#include <boost/algorithm/is_partitioned_until.hpp>
-
-
-namespace ba = boost::algorithm;
-
-bool isOdd(const int v1)
-{
-    return v1 % 2 != 0;
-}
-
-struct isOddComp
-{
-    bool operator()(const int v1) const
-    {
-        return v1 % 2 != 0;
-    }
-};
-
-
-int main ( int /*argc*/, char * /*argv*/ [] )
-{
-    std::vector<int> good({1, 2, 4});
-    std::vector<int> bad({1, 2, 3});
-
-    //Use custom function
-    auto it1 = ba::is_partitioned_until(good.begin(), good.end(), isOdd);
-    if(it1 == good.end())
-    {
-        std::cout << "The sequence is partitioned\n";
-    }
-    else
-    {
-        std::cout << "is_partitioned_until check failed here: " << *it1 << std::endl;
-    }
-
-    //Use custom comparator
-    auto it2 = ba::is_partitioned_until(good.begin(), good.end(), isOddComp());
-    if(it2 == good.end())
-    {
-        std::cout << "The sequence is partitioned\n";
-    }
-    else
-    {
-        std::cout << "is_partitioned_until check failed here: " << *it2 << std::endl;
-    }
-
-    auto it3 = ba::is_partitioned_until(bad, isOdd);
-    if(it3 == bad.end())
-    {
-        std::cout << "The sequence is partitioned\n";
-    }
-    else
-    {
-        std::cout << "is_partitioned_until check failed here: " << *it3 << std::endl;
-    }
-    return 0;
-}
diff --git a/third_party/boostorg/algorithm/example/search_example.cpp b/third_party/boostorg/algorithm/example/search_example.cpp
deleted file mode 100644
index 438bee9..0000000
--- a/third_party/boostorg/algorithm/example/search_example.cpp
+++ /dev/null
@@ -1,57 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <string>
-#include <iostream>     //  for cout, etc.
-
-#include <boost/algorithm/searching/boyer_moore.hpp>
-#include <boost/algorithm/searching/boyer_moore_horspool.hpp>
-#include <boost/algorithm/searching/knuth_morris_pratt.hpp>
-
-namespace ba = boost::algorithm;
-
-const std::string haystack ( "ABACAB is it everywhere!" );
-const std::string needle1  ( "ACAB" );
-const std::string needle2  ( "not ABA" );
-
-
-
-int main ( int /*argc*/, char * /*argv*/ [] ) {
-//  In search.hpp, there are generic implementations of three classic sequence search
-//  algorithms. They all have the same (dual) interface.
-
-//  There is a procedural interface, based on std::search:
-    if ( ba::boyer_moore_search ( haystack.begin (), haystack.end (), needle1.begin (), needle1.end ()) != haystack.end ())
-        std::cout << "Found '" << needle1 << "'  in '" << haystack << "' (boyer-moore 1)" << std::endl;
-    else
-        std::cout << "Did NOT find '" << needle1 << "'  in '" << haystack << "' (boyer-moore 1)" << std::endl;
-
-//  If you plan on searching for the same pattern in several different data sets,
-//  you can create a search object and use that over and over again - amortizing the setup
-//  costs across several searches
-    ba::boyer_moore<std::string::const_iterator> search1 ( needle1.begin (), needle1.end ());
-    if ( search1 ( haystack.begin (), haystack.end ()) != haystack.end ())
-        std::cout << "Found '" << needle1 << "'  in '" << haystack << "' (boyer-moore 2)" << std::endl;
-    else
-        std::cout << "Did NOT find '" << needle1 << "'  in '" << haystack << "' (boyer-moore 2)" << std::endl;
-
-//  There is also an implementation of boyer-moore-horspool searching
-    if ( ba::boyer_moore_horspool_search ( haystack.begin (), haystack.end (), needle1.begin (), needle1.end ()) != haystack.end ())
-        std::cout << "Found '" << needle1 << "'  in '" << haystack << "' (boyer-moore-horspool)" << std::endl;
-    else
-        std::cout << "Did NOT find '" << needle1 << "'  in '" << haystack << "' (boyer-moore-horspool)" << std::endl;
-
-//  And also the knuth-pratt-morris search algorithm
-    if ( ba::knuth_morris_pratt_search ( haystack.begin (), haystack.end (), needle1.begin (), needle1.end ()) != haystack.end ())
-        std::cout << "Found '" << needle1 << "'  in '" << haystack << "' (knuth_morris_pratt)" << std::endl;
-    else
-        std::cout << "Did NOT find '" << needle1 << "'  in '" << haystack << "' (knuth_morris_pratt)" << std::endl;
-
-    return 0;
-    }
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/algorithm.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/algorithm.hpp
deleted file mode 100644
index 2bbee1d..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/algorithm.hpp
+++ /dev/null
@@ -1,88 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2014.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
- Revision history:
-    2 Dec 2014 mtc First version; power
-   
-*/
-
-/// \file algorithm.hpp
-/// \brief Misc Algorithms
-/// \author Marshall Clow
-///
-
-#ifndef BOOST_ALGORITHM_HPP
-#define BOOST_ALGORITHM_HPP
-
-#include <functional> // for plus and multiplies
-
-#include <boost/utility/enable_if.hpp> // for boost::disable_if
-#include <boost/type_traits/is_integral.hpp>
-
-namespace boost { namespace algorithm {
-
-template <typename T>
-BOOST_CXX14_CONSTEXPR T identity_operation ( std::multiplies<T> ) { return T(1); }
-
-template <typename T>
-BOOST_CXX14_CONSTEXPR T identity_operation ( std::plus<T> ) { return T(0); }
-
-
-/// \fn power ( T x, Integer n )
-/// \return the value "x" raised to the power "n"
-/// 
-/// \param x     The value to be exponentiated
-/// \param n     The exponent (must be >= 0)
-///
-//  \remark Taken from Knuth, The Art of Computer Programming, Volume 2:
-//  Seminumerical Algorithms, Section 4.6.3
-template <typename T, typename Integer>
-BOOST_CXX14_CONSTEXPR typename boost::enable_if<boost::is_integral<Integer>, T>::type
-power (T x, Integer n) {
-    T y = 1; // Should be "T y{1};" 
-    if (n == 0) return y;
-    while (true) {
-        if (n % 2 == 1) {
-            y = x * y;
-            if (n == 1)
-                return y;
-            }
-        n = n / 2;
-        x = x * x;
-        }
-    return y;
-    }
-
-/// \fn power ( T x, Integer n, Operation op )
-/// \return the value "x" raised to the power "n"
-/// using the operation "op".
-/// 
-/// \param x     The value to be exponentiated
-/// \param n     The exponent (must be >= 0)
-/// \param op    The operation used
-///
-//  \remark Taken from Knuth, The Art of Computer Programming, Volume 2:
-//  Seminumerical Algorithms, Section 4.6.3
-template <typename T, typename Integer, typename Operation>
-BOOST_CXX14_CONSTEXPR typename boost::enable_if<boost::is_integral<Integer>, T>::type
-power (T x, Integer n, Operation op) {
-    T y = identity_operation(op);
-    if (n == 0) return y;
-    while (true) {
-        if (n % 2 == 1) {
-            y = op(x, y);
-            if (n == 1)
-                return y;
-            }
-        n = n / 2;
-        x = op(x, x);
-        }
-    return y;
-    }
-
-}}
-
-#endif // BOOST_ALGORITHM_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/apply_permutation.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/apply_permutation.hpp
deleted file mode 100644
index c844cfc..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/apply_permutation.hpp
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
-  Copyright (c) Alexander Zaitsev <zamazan4ik@gmail.com>, 2017
-
-  Distributed under the Boost Software License, Version 1.0. (See
-  accompanying file LICENSE_1_0.txt or copy at
-  http://www.boost.org/LICENSE_1_0.txt)
-
-  See http://www.boost.org/ for latest version.
-
-
-  Based on https://blogs.msdn.microsoft.com/oldnewthing/20170104-00/?p=95115
-*/
-
-/// \file  apply_permutation.hpp
-/// \brief Apply permutation to a sequence.
-/// \author Alexander Zaitsev
-
-#ifndef BOOST_ALGORITHM_APPLY_PERMUTATION_HPP
-#define BOOST_ALGORITHM_APPLY_PERMUTATION_HPP
-
-#include <algorithm>
-#include <type_traits>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm
-{
-
-/// \fn apply_permutation ( RandomAccessIterator1 item_begin, RandomAccessIterator1 item_end, RandomAccessIterator2 ind_begin )
-/// \brief Reorder item sequence with index sequence order
-///
-/// \param item_begin    The start of the item sequence
-/// \param item_end		 One past the end of the item sequence
-/// \param ind_begin     The start of the index sequence.
-///
-/// \note Item sequence size should be equal to index size. Otherwise behavior is undefined.
-///       Complexity: O(N).
-template<typename RandomAccessIterator1, typename RandomAccessIterator2>
-void
-apply_permutation(RandomAccessIterator1 item_begin, RandomAccessIterator1 item_end,
-                  RandomAccessIterator2 ind_begin, RandomAccessIterator2 ind_end)
-{
-    using Diff = typename std::iterator_traits<RandomAccessIterator1>::difference_type;
-    using std::swap;
-    Diff size = std::distance(item_begin, item_end);
-    for (Diff i = 0; i < size; i++)
-    {
-        auto current = i;
-        while (i != ind_begin[current])
-        {
-            auto next = ind_begin[current];
-            swap(item_begin[current], item_begin[next]);
-            ind_begin[current] = current;
-            current = next;
-        }
-        ind_begin[current] = current;
-    }
-}
-
-/// \fn apply_reverse_permutation ( RandomAccessIterator1 item_begin, RandomAccessIterator1 item_end, RandomAccessIterator2 ind_begin )
-/// \brief Reorder item sequence with index sequence order
-///
-/// \param item_begin    The start of the item sequence
-/// \param item_end		 One past the end of the item sequence
-/// \param ind_begin     The start of the index sequence.
-///
-/// \note Item sequence size should be equal to index size. Otherwise behavior is undefined.
-///       Complexity: O(N).
-template<typename RandomAccessIterator1, typename RandomAccessIterator2>
-void
-apply_reverse_permutation(
-        RandomAccessIterator1 item_begin,
-        RandomAccessIterator1 item_end,
-        RandomAccessIterator2 ind_begin,
-        RandomAccessIterator2 ind_end)
-{
-    using Diff = typename std::iterator_traits<RandomAccessIterator2>::difference_type;
-    using std::swap;
-    Diff length = std::distance(item_begin, item_end);
-    for (Diff i = 0; i < length; i++)
-    {
-        while (i != ind_begin[i])
-        {
-            Diff next = ind_begin[i];
-            swap(item_begin[i], item_begin[next]);
-            swap(ind_begin[i], ind_begin[next]);
-        }
-    }
-}
-
-/// \fn apply_permutation ( Range1 item_range, Range2 ind_range )
-/// \brief Reorder item sequence with index sequence order
-///
-/// \param item_range    The item sequence
-/// \param ind_range     The index sequence
-///
-/// \note Item sequence size should be equal to index size. Otherwise behavior is undefined.
-///       Complexity: O(N).
-template<typename Range1, typename Range2>
-void
-apply_permutation(Range1& item_range, Range2& ind_range)
-{
-    apply_permutation(boost::begin(item_range), boost::end(item_range),
-                      boost::begin(ind_range), boost::end(ind_range));
-}
-
-/// \fn apply_reverse_permutation ( Range1 item_range, Range2 ind_range )
-/// \brief Reorder item sequence with index sequence order
-///
-/// \param item_range    The item sequence
-/// \param ind_range     The index sequence
-///
-/// \note Item sequence size should be equal to index size. Otherwise behavior is undefined.
-///       Complexity: O(N).
-template<typename Range1, typename Range2>
-void
-apply_reverse_permutation(Range1& item_range, Range2& ind_range)
-{
-    apply_reverse_permutation(boost::begin(item_range), boost::end(item_range),
-                              boost::begin(ind_range), boost::end(ind_range));
-}
-
-}}
-#endif //BOOST_ALGORITHM_APPLY_PERMUTATION_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/clamp.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/clamp.hpp
deleted file mode 100644
index d027acd..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/clamp.hpp
+++ /dev/null
@@ -1,175 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
- Revision history:
-   27 June 2009 mtc First version
-   23 Oct  2010 mtc Added predicate version
-   
-*/
-
-/// \file clamp.hpp
-/// \brief Clamp algorithm
-/// \author Marshall Clow
-///
-/// Suggested by olafvdspek in https://svn.boost.org/trac/boost/ticket/3215
-
-#ifndef BOOST_ALGORITHM_CLAMP_HPP
-#define BOOST_ALGORITHM_CLAMP_HPP
-
-#include <functional>       //  For std::less
-#include <iterator>         //  For std::iterator_traits
-#include <cassert>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/mpl/identity.hpp>      // for identity
-#include <boost/utility/enable_if.hpp> // for boost::disable_if
-
-namespace boost { namespace algorithm {
-
-/// \fn clamp ( T const& val, 
-///               typename boost::mpl::identity<T>::type const & lo, 
-///               typename boost::mpl::identity<T>::type const & hi, Pred p )
-/// \return the value "val" brought into the range [ lo, hi ]
-///     using the comparison predicate p.
-///     If p ( val, lo ) return lo.
-///     If p ( hi, val ) return hi.
-///     Otherwise, return the original value.
-/// 
-/// \param val   The value to be clamped
-/// \param lo    The lower bound of the range to be clamped to
-/// \param hi    The upper bound of the range to be clamped to
-/// \param p     A predicate to use to compare the values.
-///                 p ( a, b ) returns a boolean.
-///
-  template<typename T, typename Pred> 
-  BOOST_CXX14_CONSTEXPR T const & clamp ( T const& val, 
-    typename boost::mpl::identity<T>::type const & lo, 
-    typename boost::mpl::identity<T>::type const & hi, Pred p )
-  {
-//    assert ( !p ( hi, lo ));    // Can't assert p ( lo, hi ) b/c they might be equal
-    return p ( val, lo ) ? lo : p ( hi, val ) ? hi : val;
-  } 
-
-
-/// \fn clamp ( T const& val, 
-///               typename boost::mpl::identity<T>::type const & lo, 
-///               typename boost::mpl::identity<T>::type const & hi )
-/// \return the value "val" brought into the range [ lo, hi ].
-///     If the value is less than lo, return lo.
-///     If the value is greater than "hi", return hi.
-///     Otherwise, return the original value.
-///
-/// \param val   The value to be clamped
-/// \param lo    The lower bound of the range to be clamped to
-/// \param hi    The upper bound of the range to be clamped to
-///
-  template<typename T> 
-  BOOST_CXX14_CONSTEXPR T const& clamp ( const T& val, 
-    typename boost::mpl::identity<T>::type const & lo, 
-    typename boost::mpl::identity<T>::type const & hi )
-  {
-    return boost::algorithm::clamp ( val, lo, hi, std::less<T>());
-  } 
-
-/// \fn clamp_range ( InputIterator first, InputIterator last, OutputIterator out, 
-///       std::iterator_traits<InputIterator>::value_type const & lo, 
-///       std::iterator_traits<InputIterator>::value_type const & hi )
-/// \return clamp the sequence of values [first, last) into [ lo, hi ]
-/// 
-/// \param first The start of the range of values
-/// \param last  One past the end of the range of input values
-/// \param out   An output iterator to write the clamped values into
-/// \param lo    The lower bound of the range to be clamped to
-/// \param hi    The upper bound of the range to be clamped to
-///
-  template<typename InputIterator, typename OutputIterator> 
-  BOOST_CXX14_CONSTEXPR OutputIterator clamp_range ( InputIterator first, InputIterator last, OutputIterator out,
-    typename std::iterator_traits<InputIterator>::value_type const & lo, 
-    typename std::iterator_traits<InputIterator>::value_type const & hi )
-  {
-  // this could also be written with bind and std::transform
-    while ( first != last )
-        *out++ = boost::algorithm::clamp ( *first++, lo, hi );
-    return out;
-  } 
-
-/// \fn clamp_range ( const Range &r, OutputIterator out, 
-///       typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type const & lo,
-///       typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type const & hi )
-/// \return clamp the sequence of values [first, last) into [ lo, hi ]
-/// 
-/// \param r     The range of values to be clamped
-/// \param out   An output iterator to write the clamped values into
-/// \param lo    The lower bound of the range to be clamped to
-/// \param hi    The upper bound of the range to be clamped to
-///
-  template<typename Range, typename OutputIterator> 
-  BOOST_CXX14_CONSTEXPR typename boost::disable_if_c<boost::is_same<Range, OutputIterator>::value, OutputIterator>::type
-  clamp_range ( const Range &r, OutputIterator out,
-    typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type const & lo, 
-    typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type const & hi )
-  {
-    return boost::algorithm::clamp_range ( boost::begin ( r ), boost::end ( r ), out, lo, hi );
-  } 
-
-
-/// \fn clamp_range ( InputIterator first, InputIterator last, OutputIterator out, 
-///       std::iterator_traits<InputIterator>::value_type const & lo, 
-///       std::iterator_traits<InputIterator>::value_type const & hi, Pred p )
-/// \return clamp the sequence of values [first, last) into [ lo, hi ]
-///     using the comparison predicate p.
-/// 
-/// \param first The start of the range of values
-/// \param last  One past the end of the range of input values
-/// \param out   An output iterator to write the clamped values into
-/// \param lo    The lower bound of the range to be clamped to
-/// \param hi    The upper bound of the range to be clamped to
-/// \param p     A predicate to use to compare the values.
-///                 p ( a, b ) returns a boolean.
-
-///
-  template<typename InputIterator, typename OutputIterator, typename Pred> 
-  BOOST_CXX14_CONSTEXPR OutputIterator clamp_range ( InputIterator first, InputIterator last, OutputIterator out,
-    typename std::iterator_traits<InputIterator>::value_type const & lo, 
-    typename std::iterator_traits<InputIterator>::value_type const & hi, Pred p )
-  {
-  // this could also be written with bind and std::transform
-    while ( first != last )
-        *out++ = boost::algorithm::clamp ( *first++, lo, hi, p );
-    return out;
-  } 
-
-/// \fn clamp_range ( const Range &r, OutputIterator out, 
-///       typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type const & lo,
-///       typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type const & hi,
-///       Pred p )
-/// \return clamp the sequence of values [first, last) into [ lo, hi ]
-///     using the comparison predicate p.
-/// 
-/// \param r     The range of values to be clamped
-/// \param out   An output iterator to write the clamped values into
-/// \param lo    The lower bound of the range to be clamped to
-/// \param hi    The upper bound of the range to be clamped to
-/// \param p     A predicate to use to compare the values.
-///                 p ( a, b ) returns a boolean.
-//
-//  Disable this template if the first two parameters are the same type;
-//  In that case, the user will get the two iterator version.
-  template<typename Range, typename OutputIterator, typename Pred> 
-  BOOST_CXX14_CONSTEXPR typename boost::disable_if_c<boost::is_same<Range, OutputIterator>::value, OutputIterator>::type
-  clamp_range ( const Range &r, OutputIterator out,
-    typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type const & lo, 
-    typename std::iterator_traits<typename boost::range_iterator<const Range>::type>::value_type const & hi,
-    Pred p )
-  {
-    return boost::algorithm::clamp_range ( boost::begin ( r ), boost::end ( r ), out, lo, hi, p );
-  } 
-
-
-}}
-
-#endif // BOOST_ALGORITHM_CLAMP_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/all_of.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/all_of.hpp
deleted file mode 100644
index 527bbd5..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/all_of.hpp
+++ /dev/null
@@ -1,83 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  all_of.hpp
-/// \brief Test ranges to see if all elements match a value or predicate.
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_ALL_OF_HPP
-#define BOOST_ALGORITHM_ALL_OF_HPP
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn all_of ( InputIterator first, InputIterator last, Predicate p )
-/// \return true if all elements in [first, last) satisfy the predicate 'p'
-/// \note returns true on an empty range
-/// 
-/// \param first The start of the input sequence
-/// \param last  One past the end of the input sequence
-/// \param p     A predicate for testing the elements of the sequence
-///
-/// \note           This function is part of the C++2011 standard library.
-template<typename InputIterator, typename Predicate> 
-BOOST_CXX14_CONSTEXPR bool all_of ( InputIterator first, InputIterator last, Predicate p )
-{
-    for ( ; first != last; ++first )
-        if ( !p(*first)) 
-            return false;
-    return true; 
-} 
-
-/// \fn all_of ( const Range &r, Predicate p )
-/// \return true if all elements in the range satisfy the predicate 'p'
-/// \note returns true on an empty range
-/// 
-/// \param r    The input range
-/// \param p    A predicate for testing the elements of the range
-///
-template<typename Range, typename Predicate> 
-BOOST_CXX14_CONSTEXPR bool all_of ( const Range &r, Predicate p )
-{
-    return boost::algorithm::all_of ( boost::begin (r), boost::end (r), p );
-} 
-
-/// \fn all_of_equal ( InputIterator first, InputIterator last, const T &val )
-/// \return true if all elements in [first, last) are equal to 'val'
-/// \note returns true on an empty range
-/// 
-/// \param first The start of the input sequence
-/// \param last  One past the end of the input sequence
-/// \param val   A value to compare against
-///
-template<typename InputIterator, typename T> 
-BOOST_CXX14_CONSTEXPR bool all_of_equal ( InputIterator first, InputIterator last, const T &val )
-{
-    for ( ; first != last; ++first )
-    if ( val != *first ) 
-        return false;
-    return true; 
-} 
-
-/// \fn all_of_equal ( const Range &r, const T &val )
-/// \return true if all elements in the range are equal to 'val'
-/// \note returns true on an empty range
-/// 
-/// \param r    The input range
-/// \param val  A value to compare against
-///
-template<typename Range, typename T> 
-BOOST_CXX14_CONSTEXPR bool all_of_equal ( const Range &r, const T &val ) 
-{
-    return boost::algorithm::all_of_equal ( boost::begin (r), boost::end (r), val );
-} 
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_ALL_OF_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/any_of.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/any_of.hpp
deleted file mode 100644
index d9e2414..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/any_of.hpp
+++ /dev/null
@@ -1,84 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-/// \file
-/// \brief Test ranges to see if any elements match a value or predicate.
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_ANY_OF_HPP
-#define BOOST_ALGORITHM_ANY_OF_HPP
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn any_of ( InputIterator first, InputIterator last, Predicate p )
-/// \return true if any of the elements in [first, last) satisfy the predicate
-/// \note returns false on an empty range
-/// 
-/// \param first The start of the input sequence
-/// \param last  One past the end of the input sequence
-/// \param p     A predicate for testing the elements of the sequence
-///
-template<typename InputIterator, typename Predicate> 
-BOOST_CXX14_CONSTEXPR bool any_of ( InputIterator first, InputIterator last, Predicate p ) 
-{
-    for ( ; first != last; ++first )
-        if ( p(*first)) 
-            return true;
-    return false; 
-} 
-
-/// \fn any_of ( const Range &r, Predicate p )
-/// \return true if any elements in the range satisfy the predicate 'p'
-/// \note returns false on an empty range
-/// 
-/// \param r    The input range
-/// \param p    A predicate for testing the elements of the range
-///
-template<typename Range, typename Predicate> 
-BOOST_CXX14_CONSTEXPR bool any_of ( const Range &r, Predicate p )
-{
-    return boost::algorithm::any_of (boost::begin (r), boost::end (r), p);
-} 
-
-/// \fn any_of_equal ( InputIterator first, InputIterator last, const V &val )
-/// \return true if any of the elements in [first, last) are equal to 'val'
-/// \note returns false on an empty range
-/// 
-/// \param first The start of the input sequence
-/// \param last  One past the end of the input sequence
-/// \param val   A value to compare against
-///
-template<typename InputIterator, typename V> 
-BOOST_CXX14_CONSTEXPR bool any_of_equal ( InputIterator first, InputIterator last, const V &val ) 
-{
-    for ( ; first != last; ++first )
-        if ( val == *first )
-            return true;
-    return false; 
-} 
-
-/// \fn any_of_equal ( const Range &r, const V &val )
-/// \return true if any of the elements in the range are equal to 'val'
-/// \note returns false on an empty range
-/// 
-/// \param r     The input range
-/// \param val   A value to compare against
-///
-template<typename Range, typename V> 
-BOOST_CXX14_CONSTEXPR bool any_of_equal ( const Range &r, const V &val ) 
-{
-    return boost::algorithm::any_of_equal (boost::begin (r), boost::end (r), val);
-}
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_ANY_OF_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/copy_if.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/copy_if.hpp
deleted file mode 100644
index dc1fdef..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/copy_if.hpp
+++ /dev/null
@@ -1,129 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  copy_if.hpp
-/// \brief Copy a subset of a sequence to a new sequence
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_COPY_IF_HPP
-#define BOOST_ALGORITHM_COPY_IF_HPP
-
-#include <utility>    // for std::pair, std::make_pair
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn copy_if ( InputIterator first, InputIterator last, OutputIterator result, Predicate p )
-/// \brief Copies all the elements from the input range that satisfy the
-/// predicate to the output range.
-/// \return The updated output iterator
-/// 
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param result   An output iterator to write the results into
-/// \param p        A predicate for testing the elements of the range
-/// \note           This function is part of the C++2011 standard library.
-template<typename InputIterator, typename OutputIterator, typename Predicate> 
-BOOST_CXX14_CONSTEXPR OutputIterator copy_if ( InputIterator first, InputIterator last, OutputIterator result, Predicate p )
-{
-    for ( ; first != last; ++first )
-        if (p(*first))
-            *result++ = *first;
-    return result;
-}
-
-/// \fn copy_if ( const Range &r, OutputIterator result, Predicate p )
-/// \brief Copies all the elements from the input range that satisfy the
-/// predicate to the output range.
-/// \return The updated output iterator
-/// 
-/// \param r        The input range
-/// \param result   An output iterator to write the results into
-/// \param p        A predicate for testing the elements of the range
-///
-template<typename Range, typename OutputIterator, typename Predicate>
-BOOST_CXX14_CONSTEXPR OutputIterator copy_if ( const Range &r, OutputIterator result, Predicate p )
-{
-    return boost::algorithm::copy_if (boost::begin (r), boost::end(r), result, p);
-}
-
-
-/// \fn copy_while ( InputIterator first, InputIterator last, OutputIterator result, Predicate p )
-/// \brief Copies all the elements at the start of the input range that
-///     satisfy the predicate to the output range.
-/// \return The updated input and output iterators
-/// 
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param result   An output iterator to write the results into
-/// \param p        A predicate for testing the elements of the range
-///
-template<typename InputIterator, typename OutputIterator, typename Predicate> 
-BOOST_CXX14_CONSTEXPR std::pair<InputIterator, OutputIterator>
-copy_while ( InputIterator first, InputIterator last, OutputIterator result, Predicate p )
-{
-    for ( ; first != last && p(*first); ++first )
-        *result++ = *first;
-    return std::make_pair(first, result);
-}
-
-/// \fn copy_while ( const Range &r, OutputIterator result, Predicate p )
-/// \brief Copies all the elements at the start of the input range that
-///     satisfy the predicate to the output range.
-/// \return The updated input and output iterators
-/// 
-/// \param r        The input range
-/// \param result   An output iterator to write the results into
-/// \param p        A predicate for testing the elements of the range
-///
-template<typename Range, typename OutputIterator, typename Predicate>
-BOOST_CXX14_CONSTEXPR std::pair<typename boost::range_iterator<const Range>::type, OutputIterator> 
-copy_while ( const Range &r, OutputIterator result, Predicate p )
-{
-    return boost::algorithm::copy_while (boost::begin (r), boost::end(r), result, p);
-}
-
-
-/// \fn copy_until ( InputIterator first, InputIterator last, OutputIterator result, Predicate p )
-/// \brief Copies all the elements at the start of the input range that do not
-///     satisfy the predicate to the output range.
-/// \return The updated output iterator
-/// 
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param result   An output iterator to write the results into
-/// \param p        A predicate for testing the elements of the range
-///
-template<typename InputIterator, typename OutputIterator, typename Predicate> 
-BOOST_CXX14_CONSTEXPR std::pair<InputIterator, OutputIterator>
-copy_until ( InputIterator first, InputIterator last, OutputIterator result, Predicate p )
-{
-    for ( ; first != last && !p(*first); ++first )
-        *result++ = *first;
-    return std::make_pair(first, result);
-}
-
-/// \fn copy_until ( const Range &r, OutputIterator result, Predicate p )
-/// \brief Copies all the elements at the start of the input range that do not
-///     satisfy the predicate to the output range.
-/// \return The updated output iterator
-/// 
-/// \param r        The input range
-/// \param result   An output iterator to write the results into
-/// \param p        A predicate for testing the elements of the range
-///
-template<typename Range, typename OutputIterator, typename Predicate>
-BOOST_CXX14_CONSTEXPR std::pair<typename boost::range_iterator<const Range>::type, OutputIterator> 
-copy_until ( const Range &r, OutputIterator result, Predicate p )
-{
-    return boost::algorithm::copy_until (boost::begin (r), boost::end(r), result, p);
-}
-
-}} // namespace boost and algorithm
-
-#endif  // BOOST_ALGORITHM_COPY_IF_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/copy_n.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/copy_n.hpp
deleted file mode 100644
index e4bebd0..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/copy_n.hpp
+++ /dev/null
@@ -1,35 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  copy_n.hpp
-/// \brief Copy n items from one sequence to another
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_COPY_N_HPP
-#define BOOST_ALGORITHM_COPY_N_HPP
-
-namespace boost { namespace algorithm {
-
-/// \fn copy_n ( InputIterator first, Size n, OutputIterator result )
-/// \brief Copies exactly n (n > 0) elements from the range starting at first to
-///     the range starting at result.
-/// \return         The updated output iterator
-/// 
-/// \param first    The start of the input sequence
-/// \param n        The number of elements to copy
-/// \param result   An output iterator to write the results into
-/// \note           This function is part of the C++2011 standard library.
-template <typename InputIterator, typename Size, typename OutputIterator>
-BOOST_CXX14_CONSTEXPR OutputIterator copy_n ( InputIterator first, Size n, OutputIterator result )
-{
-    for ( ; n > 0; --n, ++first, ++result )
-        *result = *first;
-    return result;
-}
-}} // namespace boost and algorithm
-
-#endif  // BOOST_ALGORITHM_COPY_IF_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/find_if_not.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/find_if_not.hpp
deleted file mode 100644
index 6f5799a..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/find_if_not.hpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  find_if_not.hpp
-/// \brief Find the first element in a sequence that does not satisfy a predicate.
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_FIND_IF_NOT_HPP
-#define BOOST_ALGORITHM_FIND_IF_NOT_HPP
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn find_if_not(InputIterator first, InputIterator last, Predicate p)
-/// \brief Finds the first element in the sequence that does not satisfy the predicate.
-/// \return         The iterator pointing to the desired element.
-/// 
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param p        A predicate for testing the elements of the range
-/// \note           This function is part of the C++2011 standard library.
-template<typename InputIterator, typename Predicate> 
-BOOST_CXX14_CONSTEXPR InputIterator find_if_not ( InputIterator first, InputIterator last, Predicate p )
-{
-    for ( ; first != last; ++first )
-        if ( !p(*first))
-            break;
-    return first;
-}
-
-/// \fn find_if_not ( const Range &r, Predicate p )
-/// \brief Finds the first element in the sequence that does not satisfy the predicate.
-/// \return         The iterator pointing to the desired element.
-/// 
-/// \param r        The input range
-/// \param p        A predicate for testing the elements of the range
-///
-template<typename Range, typename Predicate>
-BOOST_CXX14_CONSTEXPR typename boost::range_iterator<const Range>::type find_if_not ( const Range &r, Predicate p )
-{
-    return boost::algorithm::find_if_not (boost::begin (r), boost::end(r), p);
-}
-
-}}
-#endif  // BOOST_ALGORITHM_FIND_IF_NOT_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/iota.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/iota.hpp
deleted file mode 100644
index 6efc4d8..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/iota.hpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  iota.hpp
-/// \brief Generate an increasing series
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_IOTA_HPP
-#define BOOST_ALGORITHM_IOTA_HPP
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn iota ( ForwardIterator first, ForwardIterator last, T value )
-/// \brief Generates an increasing sequence of values, and stores them in [first, last)
-/// 
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param value    The initial value of the sequence to be generated
-/// \note           This function is part of the C++2011 standard library.
-template <typename ForwardIterator, typename T>
-BOOST_CXX14_CONSTEXPR void iota ( ForwardIterator first, ForwardIterator last, T value )
-{
-    for ( ; first != last; ++first, ++value )
-        *first = value;
-}
-
-/// \fn iota ( Range &r, T value )
-/// \brief Generates an increasing sequence of values, and stores them in the input Range.
-/// 
-/// \param r        The input range
-/// \param value    The initial value of the sequence to be generated
-///
-template <typename Range, typename T>
-BOOST_CXX14_CONSTEXPR void iota ( Range &r, T value )
-{
-    boost::algorithm::iota (boost::begin(r), boost::end(r), value);
-}
-
-
-/// \fn iota_n ( OutputIterator out, T value, std::size_t n )
-/// \brief Generates an increasing sequence of values, and stores them in the input Range.
-/// 
-/// \param out      An output iterator to write the results into
-/// \param value    The initial value of the sequence to be generated
-/// \param n        The number of items to write
-///
-template <typename OutputIterator, typename T>
-BOOST_CXX14_CONSTEXPR OutputIterator iota_n ( OutputIterator out, T value, std::size_t n )
-{
-    for ( ; n > 0; --n, ++value )
-        *out++ = value;
-
-    return out;
-}
-
-}}
-
-#endif  // BOOST_ALGORITHM_IOTA_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/is_partitioned.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/is_partitioned.hpp
deleted file mode 100644
index fb2c5a1..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/is_partitioned.hpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  is_partitioned.hpp
-/// \brief Tell if a sequence is partitioned
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_IS_PARTITIONED_HPP
-#define BOOST_ALGORITHM_IS_PARTITIONED_HPP
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn is_partitioned ( InputIterator first, InputIterator last, UnaryPredicate p )
-/// \brief Tests to see if a sequence is partitioned according to a predicate. 
-///	   In other words, all the items in the sequence that satisfy the predicate are at the beginning of the sequence.
-/// 
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param p        The predicate to test the values with
-/// \note           This function is part of the C++2011 standard library.
-template <typename InputIterator, typename UnaryPredicate>
-BOOST_CXX14_CONSTEXPR bool is_partitioned ( InputIterator first, InputIterator last, UnaryPredicate p )
-{
-//  Run through the part that satisfy the predicate
-    for ( ; first != last; ++first )
-        if ( !p (*first))
-            break;
-//  Now the part that does not satisfy the predicate
-    for ( ; first != last; ++first )
-        if ( p (*first))
-            return false;
-    return true;
-}
-
-/// \fn is_partitioned ( const Range &r, UnaryPredicate p )
-/// \brief Tests to see if a sequence is partitioned according to a predicate. 
-///	   In other words, all the items in the sequence that satisfy the predicate are at the beginning of the sequence.
-/// 
-/// \param r        The input range
-/// \param p        The predicate to test the values with
-///
-template <typename Range, typename UnaryPredicate>
-BOOST_CXX14_CONSTEXPR bool is_partitioned ( const Range &r, UnaryPredicate p )
-{
-    return boost::algorithm::is_partitioned (boost::begin(r), boost::end(r), p);
-}
-
-
-}}
-
-#endif  // BOOST_ALGORITHM_IS_PARTITIONED_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/is_permutation.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/is_permutation.hpp
deleted file mode 100644
index 0098cd5..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/is_permutation.hpp
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  is_permutation.hpp
-/// \brief Is a sequence a permutation of another sequence
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_IS_PERMUTATION11_HPP
-#define BOOST_ALGORITHM_IS_PERMUTATION11_HPP
-
-#include <algorithm>    // for std::find_if, count_if, mismatch
-#include <utility>      // for std::pair
-#include <functional>   // for std::equal_to
-#include <iterator>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/utility/enable_if.hpp>
-#include <boost/type_traits/is_same.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \cond DOXYGEN_HIDE
-namespace detail {
-    template <typename Predicate, typename Iterator>
-    struct value_predicate {
-        value_predicate ( Predicate p, Iterator it ) : p_ ( p ), it_ ( it ) {}
-
-        template <typename T1>
-        bool operator () ( const T1 &t1 ) const { return p_ ( *it_, t1 ); }
-    private:
-        Predicate p_;
-        Iterator it_;
-        };
-        
-//  Preconditions:
-//  1. The sequences are the same length
-//  2. Any common elements on the front have been removed (not necessary for correctness, just for performance)
-    template< class ForwardIterator1, class ForwardIterator2, class BinaryPredicate >
-    bool is_permutation_inner ( ForwardIterator1 first1, ForwardIterator1 last1,
-                                ForwardIterator2 first2, ForwardIterator2 last2,
-                                BinaryPredicate p ) {
-        //  for each unique value in the sequence [first1,last1), count how many times
-        //  it occurs, and make sure it occurs the same number of times in [first2, last2)
-            for ( ForwardIterator1 iter = first1; iter != last1; ++iter ) {
-                value_predicate<BinaryPredicate, ForwardIterator1> pred ( p, iter );
-
-            /*  For each value we haven't seen yet... */
-                if ( std::find_if ( first1, iter, pred ) == iter ) {
-                    std::size_t dest_count = std::count_if ( first2, last2, pred );
-                    if ( dest_count == 0 || dest_count != (std::size_t) std::count_if ( iter, last1, pred ))
-                        return false;
-                    }
-                }
-
-        return true;
-        }                      
-
-    template< class ForwardIterator1, class ForwardIterator2, class BinaryPredicate>
-    bool is_permutation_tag ( ForwardIterator1 first1, ForwardIterator1 last1, 
-                          ForwardIterator2 first2, ForwardIterator2 last2, 
-                          BinaryPredicate p,
-                          std::forward_iterator_tag, std::forward_iterator_tag ) {
-
-    //  Skip the common prefix (if any)
-        while ( first1 != last1 && first2 != last2 && p ( *first1, *first2 )) {
-            ++first1;
-            ++first2;
-            }
-        if ( first1 != last1 && first2 != last2 )
-            return boost::algorithm::detail::is_permutation_inner ( first1, last1, first2, last2,
-                std::equal_to<typename std::iterator_traits<ForwardIterator1>::value_type> ());
-        return first1 == last1 && first2 == last2;
-        }
-
-    template <class RandomAccessIterator1, class RandomAccessIterator2, class BinaryPredicate>
-    bool is_permutation_tag ( RandomAccessIterator1 first1, RandomAccessIterator1 last1, 
-                          RandomAccessIterator2 first2, RandomAccessIterator2 last2, 
-                          BinaryPredicate p,
-                          std::random_access_iterator_tag, std::random_access_iterator_tag ) {
-    //  Cheap check
-        if ( std::distance ( first1, last1 ) != std::distance ( first2, last2 ))
-            return false;
-    //  Skip the common prefix (if any)
-        while ( first1 != last1 && first2 != last2 && p ( *first1, *first2 )) {
-            ++first1;
-            ++first2;
-            }
-
-        if ( first1 != last1 && first2 != last2 )
-            return is_permutation_inner (first1, last1, first2, last2, p);
-        return first1 == last1 && first2 == last2;
-        }
-
-}
-/// \endcond
-
-/// \fn is_permutation ( ForwardIterator1 first, ForwardIterator1 last, ForwardIterator2 first2, BinaryPredicate p )
-/// \brief Tests to see if the sequence [first,last) is a permutation of the sequence starting at first2
-///
-/// \param first1   The start of the input sequence
-/// \param last1    One past the end of the input sequence
-/// \param first2   The start of the second sequence
-/// \param p        The predicate to compare elements with
-///
-/// \note           This function is part of the C++2011 standard library.
-template< class ForwardIterator1, class ForwardIterator2, class BinaryPredicate >
-bool is_permutation ( ForwardIterator1 first1, ForwardIterator1 last1,
-                      ForwardIterator2 first2, BinaryPredicate p )
-{
-//  Skip the common prefix (if any)
-    std::pair<ForwardIterator1, ForwardIterator2> eq = std::mismatch (first1, last1, first2, p);
-    first1 = eq.first;
-    first2 = eq.second;
-    if ( first1 != last1 ) {
-    //  Create last2
-        ForwardIterator2 last2 = first2;
-        std::advance ( last2, std::distance (first1, last1));
-        return boost::algorithm::detail::is_permutation_inner ( first1, last1, first2, last2, p );
-        }
-
-    return true;
-}
-
-/// \fn is_permutation ( ForwardIterator1 first, ForwardIterator1 last, ForwardIterator2 first2 )
-/// \brief Tests to see if the sequence [first,last) is a permutation of the sequence starting at first2
-///
-/// \param first1   The start of the input sequence
-/// \param last2    One past the end of the input sequence
-/// \param first2   The start of the second sequence
-/// \note           This function is part of the C++2011 standard library.
-template< class ForwardIterator1, class ForwardIterator2 >
-bool is_permutation ( ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2 )
-{
-//  How should I deal with the idea that ForwardIterator1::value_type
-//  and ForwardIterator2::value_type could be different? Define my own comparison predicate?
-//  Skip the common prefix (if any)
-    std::pair<ForwardIterator1, ForwardIterator2> eq = std::mismatch (first1, last1, first2 );
-    first1 = eq.first;
-    first2 = eq.second;
-    if ( first1 != last1 ) {
-    //  Create last2
-        ForwardIterator2 last2 = first2;
-        std::advance ( last2, std::distance (first1, last1));
-        return boost::algorithm::detail::is_permutation_inner ( first1, last1, first2, last2,
-            std::equal_to<typename std::iterator_traits<ForwardIterator1>::value_type> ());
-        }
-    return true;
-}
-
-
-/// \fn is_permutation ( const Range &r, ForwardIterator first2 )
-/// \brief Tests to see if the sequence [first,last) is a permutation of the sequence starting at first2
-///
-/// \param r        The input range
-/// \param first2   The start of the second sequence
-template <typename Range, typename ForwardIterator>
-bool is_permutation ( const Range &r, ForwardIterator first2 )
-{
-    return boost::algorithm::is_permutation (boost::begin (r), boost::end (r), first2 );
-}
-
-/// \fn is_permutation ( const Range &r, ForwardIterator first2, BinaryPredicate pred )
-/// \brief Tests to see if the sequence [first,last) is a permutation of the sequence starting at first2
-///
-/// \param r        The input range
-/// \param first2   The start of the second sequence
-/// \param pred     The predicate to compare elements with
-///
-//  Disable this template when the first two parameters are the same type
-//  That way the non-range version will be chosen.
-template <typename Range, typename ForwardIterator, typename BinaryPredicate>
-typename boost::disable_if_c<boost::is_same<Range, ForwardIterator>::value, bool>::type
-is_permutation ( const Range &r, ForwardIterator first2, BinaryPredicate pred )
-{
-    return boost::algorithm::is_permutation (boost::begin (r), boost::end (r), first2, pred );
-}
-
-}}
-
-#endif  // BOOST_ALGORITHM_IS_PERMUTATION11_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/is_sorted.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/is_sorted.hpp
deleted file mode 100644
index 2766211..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/is_sorted.hpp
+++ /dev/null
@@ -1,280 +0,0 @@
-//  Copyright (c) 2010 Nuovation System Designs, LLC
-//    Grant Erickson <gerickson@nuovations.com>
-//
-//  Reworked somewhat by Marshall Clow; August 2010
-//  
-//  Distributed under the Boost Software License, Version 1.0. (See
-//  accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-//
-//  See http://www.boost.org/ for latest version.
-//
-
-#ifndef BOOST_ALGORITHM_ORDERED_HPP
-#define BOOST_ALGORITHM_ORDERED_HPP
-
-#include <functional>
-#include <iterator>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-#include <boost/utility/enable_if.hpp>
-#include <boost/type_traits/is_same.hpp>
-#include <boost/mpl/identity.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn is_sorted_until ( ForwardIterator first, ForwardIterator last, Pred p )
-/// \return the point in the sequence [first, last) where the elements are unordered
-///     (according to the comparison predicate 'p').
-/// 
-/// \param first The start of the sequence to be tested.
-/// \param last  One past the end of the sequence
-/// \param p     A binary predicate that returns true if two elements are ordered.
-///
-    template <typename ForwardIterator, typename Pred>
-    BOOST_CXX14_CONSTEXPR ForwardIterator is_sorted_until ( ForwardIterator first, ForwardIterator last, Pred p )
-    {
-        if ( first == last ) return last;  // the empty sequence is ordered
-        ForwardIterator next = first;
-        while ( ++next != last )
-        {
-            if ( p ( *next, *first ))
-                return next;
-            first = next;
-        }
-        return last;    
-    }
-
-/// \fn is_sorted_until ( ForwardIterator first, ForwardIterator last )
-/// \return the point in the sequence [first, last) where the elements are unordered
-/// 
-/// \param first The start of the sequence to be tested.
-/// \param last  One past the end of the sequence
-///
-    template <typename ForwardIterator>
-    BOOST_CXX14_CONSTEXPR ForwardIterator is_sorted_until ( ForwardIterator first, ForwardIterator last )
-    {
-        typedef typename std::iterator_traits<ForwardIterator>::value_type value_type;
-        return boost::algorithm::is_sorted_until ( first, last, std::less<value_type>());
-    }
-
-
-/// \fn is_sorted ( ForwardIterator first, ForwardIterator last, Pred p )
-/// \return whether or not the entire sequence is sorted
-/// 
-/// \param first The start of the sequence to be tested.
-/// \param last  One past the end of the sequence
-/// \param p     A binary predicate that returns true if two elements are ordered.
-///
-    template <typename ForwardIterator, typename Pred>
-    BOOST_CXX14_CONSTEXPR bool is_sorted ( ForwardIterator first, ForwardIterator last, Pred p )
-    {
-        return boost::algorithm::is_sorted_until (first, last, p) == last;
-    }
-
-/// \fn is_sorted ( ForwardIterator first, ForwardIterator last )
-/// \return whether or not the entire sequence is sorted
-/// 
-/// \param first The start of the sequence to be tested.
-/// \param last  One past the end of the sequence
-///
-    template <typename ForwardIterator>
-    BOOST_CXX14_CONSTEXPR bool is_sorted ( ForwardIterator first, ForwardIterator last )
-    {
-        return boost::algorithm::is_sorted_until (first, last) == last;
-    }
-
-///
-/// -- Range based versions of the C++11 functions
-///
-
-/// \fn is_sorted_until ( const R &range, Pred p )
-/// \return the point in the range R where the elements are unordered
-///     (according to the comparison predicate 'p').
-/// 
-/// \param range The range to be tested.
-/// \param p     A binary predicate that returns true if two elements are ordered.
-///
-    template <typename R, typename Pred>
-    BOOST_CXX14_CONSTEXPR typename boost::lazy_disable_if_c<
-        boost::is_same<R, Pred>::value, 
-        typename boost::range_iterator<const R> 
-    >::type is_sorted_until ( const R &range, Pred p )
-    {
-        return boost::algorithm::is_sorted_until ( boost::begin ( range ), boost::end ( range ), p );
-    }
-
-
-/// \fn is_sorted_until ( const R &range )
-/// \return the point in the range R where the elements are unordered
-/// 
-/// \param range The range to be tested.
-///
-    template <typename R>
-    BOOST_CXX14_CONSTEXPR typename boost::range_iterator<const R>::type is_sorted_until ( const R &range )
-    {
-        return boost::algorithm::is_sorted_until ( boost::begin ( range ), boost::end ( range ));
-    }
-
-/// \fn is_sorted ( const R &range, Pred p )
-/// \return whether or not the entire range R is sorted
-///     (according to the comparison predicate 'p').
-/// 
-/// \param range The range to be tested.
-/// \param p     A binary predicate that returns true if two elements are ordered.
-///
-    template <typename R, typename Pred>
-    BOOST_CXX14_CONSTEXPR typename boost::lazy_disable_if_c< boost::is_same<R, Pred>::value, boost::mpl::identity<bool> >::type
-    is_sorted ( const R &range, Pred p )
-    {
-        return boost::algorithm::is_sorted ( boost::begin ( range ), boost::end ( range ), p );
-    }
-
-
-/// \fn is_sorted ( const R &range )
-/// \return whether or not the entire range R is sorted
-/// 
-/// \param range The range to be tested.
-///
-    template <typename R>
-    BOOST_CXX14_CONSTEXPR bool is_sorted ( const R &range )
-    {
-        return boost::algorithm::is_sorted ( boost::begin ( range ), boost::end ( range ));
-    }
-
-
-///
-/// -- Range based versions of the C++11 functions
-///
-
-/// \fn is_increasing ( ForwardIterator first, ForwardIterator last )
-/// \return true if the entire sequence is increasing; i.e, each item is greater than or  
-///     equal to the previous one.
-/// 
-/// \param first The start of the sequence to be tested.
-/// \param last  One past the end of the sequence
-///
-/// \note This function will return true for sequences that contain items that compare
-///     equal. If that is not what you intended, you should use is_strictly_increasing instead.
-    template <typename ForwardIterator>
-    BOOST_CXX14_CONSTEXPR bool is_increasing ( ForwardIterator first, ForwardIterator last )
-    {
-        typedef typename std::iterator_traits<ForwardIterator>::value_type value_type;
-        return boost::algorithm::is_sorted (first, last, std::less<value_type>());
-    }
-
-
-/// \fn is_increasing ( const R &range )
-/// \return true if the entire sequence is increasing; i.e, each item is greater than or  
-///     equal to the previous one.
-/// 
-/// \param range The range to be tested.
-///
-/// \note This function will return true for sequences that contain items that compare
-///     equal. If that is not what you intended, you should use is_strictly_increasing instead.
-    template <typename R>
-    BOOST_CXX14_CONSTEXPR bool is_increasing ( const R &range )
-    {
-        return is_increasing ( boost::begin ( range ), boost::end ( range ));
-    }
-
-
-
-/// \fn is_decreasing ( ForwardIterator first, ForwardIterator last )
-/// \return true if the entire sequence is decreasing; i.e, each item is less than 
-///     or equal to the previous one.
-/// 
-/// \param first The start of the sequence to be tested.
-/// \param last  One past the end of the sequence
-///
-/// \note This function will return true for sequences that contain items that compare
-///     equal. If that is not what you intended, you should use is_strictly_decreasing instead.
-    template <typename ForwardIterator>
-    BOOST_CXX14_CONSTEXPR bool is_decreasing ( ForwardIterator first, ForwardIterator last )
-    {
-        typedef typename std::iterator_traits<ForwardIterator>::value_type value_type;
-        return boost::algorithm::is_sorted (first, last, std::greater<value_type>());
-    }
-
-/// \fn is_decreasing ( const R &range )
-/// \return true if the entire sequence is decreasing; i.e, each item is less than 
-///     or equal to the previous one.
-/// 
-/// \param range The range to be tested.
-///
-/// \note This function will return true for sequences that contain items that compare
-///     equal. If that is not what you intended, you should use is_strictly_decreasing instead.
-    template <typename R>
-    BOOST_CXX14_CONSTEXPR bool is_decreasing ( const R &range )
-    {
-        return is_decreasing ( boost::begin ( range ), boost::end ( range ));
-    }
-
-
-
-/// \fn is_strictly_increasing ( ForwardIterator first, ForwardIterator last )
-/// \return true if the entire sequence is strictly increasing; i.e, each item is greater
-///     than the previous one
-/// 
-/// \param first The start of the sequence to be tested.
-/// \param last  One past the end of the sequence
-///
-/// \note This function will return false for sequences that contain items that compare
-///     equal. If that is not what you intended, you should use is_increasing instead.
-    template <typename ForwardIterator>
-    BOOST_CXX14_CONSTEXPR bool is_strictly_increasing ( ForwardIterator first, ForwardIterator last )
-    {
-        typedef typename std::iterator_traits<ForwardIterator>::value_type value_type;
-        return boost::algorithm::is_sorted (first, last, std::less_equal<value_type>());
-    }
-
-/// \fn is_strictly_increasing ( const R &range )
-/// \return true if the entire sequence is strictly increasing; i.e, each item is greater
-///     than the previous one
-/// 
-/// \param range The range to be tested.
-///
-/// \note This function will return false for sequences that contain items that compare
-///     equal. If that is not what you intended, you should use is_increasing instead.
-    template <typename R>
-    BOOST_CXX14_CONSTEXPR bool is_strictly_increasing ( const R &range )
-    {
-        return is_strictly_increasing ( boost::begin ( range ), boost::end ( range ));
-    }
-
-
-/// \fn is_strictly_decreasing ( ForwardIterator first, ForwardIterator last )
-/// \return true if the entire sequence is strictly decreasing; i.e, each item is less than
-///     the previous one
-/// 
-/// \param first The start of the sequence to be tested.
-/// \param last  One past the end of the sequence
-///
-/// \note This function will return false for sequences that contain items that compare
-///     equal. If that is not what you intended, you should use is_decreasing instead.
-    template <typename ForwardIterator>
-    BOOST_CXX14_CONSTEXPR bool is_strictly_decreasing ( ForwardIterator first, ForwardIterator last )
-    {
-        typedef typename std::iterator_traits<ForwardIterator>::value_type value_type;
-        return boost::algorithm::is_sorted (first, last, std::greater_equal<value_type>());
-    }
-
-/// \fn is_strictly_decreasing ( const R &range )
-/// \return true if the entire sequence is strictly decreasing; i.e, each item is less than
-///     the previous one
-/// 
-/// \param range The range to be tested.
-///
-/// \note This function will return false for sequences that contain items that compare
-///     equal. If that is not what you intended, you should use is_decreasing instead.
-    template <typename R>
-    BOOST_CXX14_CONSTEXPR bool is_strictly_decreasing ( const R &range )
-    {
-        return is_strictly_decreasing ( boost::begin ( range ), boost::end ( range ));
-    }
-
-}} // namespace boost
-
-#endif  // BOOST_ALGORITHM_ORDERED_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/none_of.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/none_of.hpp
deleted file mode 100644
index e537c26..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/none_of.hpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  none_of.hpp
-/// \brief Test ranges to see if no elements match a value or predicate.
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_NONE_OF_HPP
-#define BOOST_ALGORITHM_NONE_OF_HPP
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn none_of ( InputIterator first, InputIterator last, Predicate p )
-/// \return true if none of the elements in [first, last) satisfy the predicate 'p'
-/// \note returns true on an empty range
-/// 
-/// \param first The start of the input sequence
-/// \param last  One past the end of the input sequence
-/// \param p     A predicate for testing the elements of the sequence
-///
-template<typename InputIterator, typename Predicate> 
-BOOST_CXX14_CONSTEXPR bool none_of ( InputIterator first, InputIterator last, Predicate p )
-{
-    for ( ; first != last; ++first )
-        if ( p(*first)) 
-            return false;
-    return true;
-} 
-
-/// \fn none_of ( const Range &r, Predicate p )
-/// \return true if none of the elements in the range satisfy the predicate 'p'
-/// \note returns true on an empty range
-/// 
-/// \param r     The input range
-/// \param p     A predicate for testing the elements of the range
-///
-template<typename Range, typename Predicate> 
-BOOST_CXX14_CONSTEXPR bool none_of ( const Range &r, Predicate p )
-{
-    return boost::algorithm::none_of (boost::begin (r), boost::end (r), p );
-} 
-
-/// \fn none_of_equal ( InputIterator first, InputIterator last, const V &val )
-/// \return true if none of the elements in [first, last) are equal to 'val'
-/// \note returns true on an empty range
-/// 
-/// \param first The start of the input sequence
-/// \param last  One past the end of the input sequence
-/// \param val   A value to compare against
-///
-template<typename InputIterator, typename V> 
-BOOST_CXX14_CONSTEXPR bool none_of_equal ( InputIterator first, InputIterator last, const V &val ) 
-{
-    for ( ; first != last; ++first )
-        if ( val == *first )
-            return false;
-    return true; 
-} 
-
-/// \fn none_of_equal ( const Range &r, const V &val )
-/// \return true if none of the elements in the range are equal to 'val'
-/// \note returns true on an empty range
-/// 
-/// \param r     The input range
-/// \param val   A value to compare against
-///
-template<typename Range, typename V> 
-BOOST_CXX14_CONSTEXPR bool none_of_equal ( const Range &r, const V & val ) 
-{
-    return boost::algorithm::none_of_equal (boost::begin (r), boost::end (r), val);
-} 
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_NONE_OF_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/one_of.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/one_of.hpp
deleted file mode 100644
index 3b95180..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/one_of.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file one_of.hpp
-/// \brief Test ranges to see if only one element matches a value or predicate.
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_ONE_OF_HPP
-#define BOOST_ALGORITHM_ONE_OF_HPP
-
-#include <boost/algorithm/cxx11/none_of.hpp>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn one_of ( InputIterator first, InputIterator last, Predicate p )
-/// \return true if the predicate 'p' is true for exactly one item in [first, last).
-/// 
-/// \param first The start of the input sequence
-/// \param last  One past the end of the input sequence
-/// \param p     A predicate for testing the elements of the sequence
-///
-template<typename InputIterator, typename Predicate> 
-BOOST_CXX14_CONSTEXPR bool one_of ( InputIterator first, InputIterator last, Predicate p )
-{
-//  find_if
-    for (; first != last; ++first)
-        if (p(*first))
-            break;
-
-    if (first == last)
-        return false;    // Didn't occur at all
-    return boost::algorithm::none_of (++first, last, p);
-}
-
-/// \fn one_of ( const Range &r, Predicate p )
-/// \return true if the predicate 'p' is true for exactly one item in the range.
-/// 
-/// \param r    The input range
-/// \param p    A predicate for testing the elements of the range
-///
-template<typename Range, typename Predicate> 
-BOOST_CXX14_CONSTEXPR bool one_of ( const Range &r, Predicate p )
-{
-    return boost::algorithm::one_of ( boost::begin (r), boost::end (r), p );
-}
-
-
-/// \fn one_of_equal ( InputIterator first, InputIterator last, const V &val )
-/// \return true if the value 'val' exists only once in [first, last).
-/// 
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param val      A value to compare against
-///
-template<typename InputIterator, typename V> 
-BOOST_CXX14_CONSTEXPR bool one_of_equal ( InputIterator first, InputIterator last, const V &val )
-{
-//  find
-    for (; first != last; ++first)
-        if (*first == val)
-            break;
-
-    if (first == last)
-        return false;                    // Didn't occur at all
-    return boost::algorithm::none_of_equal (++first, last, val);
-}
-
-/// \fn one_of_equal ( const Range &r, const V &val )
-/// \return true if the value 'val' exists only once in the range.
-/// 
-/// \param r    The input range
-/// \param val  A value to compare against
-///
-template<typename Range, typename V> 
-BOOST_CXX14_CONSTEXPR bool one_of_equal ( const Range &r, const V &val )
-{
-    return boost::algorithm::one_of_equal ( boost::begin (r), boost::end (r), val );
-} 
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_ALL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/partition_copy.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/partition_copy.hpp
deleted file mode 100644
index 635b1e7..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/partition_copy.hpp
+++ /dev/null
@@ -1,71 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  partition_copy.hpp
-/// \brief Copy a subset of a sequence to a new sequence
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_PARTITION_COPY_HPP
-#define BOOST_ALGORITHM_PARTITION_COPY_HPP
-
-#include <utility>  // for std::pair
-
-#include <boost/config.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn partition_copy ( InputIterator first, InputIterator last,
-///     OutputIterator1 out_true, OutputIterator2 out_false, UnaryPredicate p )
-/// \brief Copies the elements that satisfy the predicate p from the range [first, last) 
-///     to the range beginning at d_first_true, and
-///     copies the elements that do not satisfy p to the range beginning at d_first_false.
-///
-/// 
-/// \param first     The start of the input sequence
-/// \param last      One past the end of the input sequence
-/// \param out_true  An output iterator to write the elements that satisfy the predicate into
-/// \param out_false An output iterator to write the elements that do not satisfy the predicate into
-/// \param p         A predicate for dividing the elements of the input sequence.
-///
-/// \note            This function is part of the C++2011 standard library.
-template <typename InputIterator, 
-        typename OutputIterator1, typename OutputIterator2, typename UnaryPredicate>
-BOOST_CXX14_CONSTEXPR std::pair<OutputIterator1, OutputIterator2>
-partition_copy ( InputIterator first, InputIterator last,
-        OutputIterator1 out_true, OutputIterator2 out_false, UnaryPredicate p )
-{
-    for ( ; first != last; ++first )
-        if ( p (*first))
-            *out_true++ = *first;
-        else
-            *out_false++ = *first;
-    return std::pair<OutputIterator1, OutputIterator2> ( out_true, out_false );
-}
-
-/// \fn partition_copy ( const Range &r, 
-///     OutputIterator1 out_true, OutputIterator2 out_false, UnaryPredicate p )
-/// 
-/// \param r         The input range
-/// \param out_true  An output iterator to write the elements that satisfy the predicate into
-/// \param out_false An output iterator to write the elements that do not satisfy the predicate into
-/// \param p         A predicate for dividing the elements of the input sequence.
-///
-template <typename Range, typename OutputIterator1, typename OutputIterator2, 
-            typename UnaryPredicate>
-BOOST_CXX14_CONSTEXPR std::pair<OutputIterator1, OutputIterator2>
-partition_copy ( const Range &r, OutputIterator1 out_true, OutputIterator2 out_false, 
-                                UnaryPredicate p )
-{
-    return boost::algorithm::partition_copy 
-                      (boost::begin(r), boost::end(r), out_true, out_false, p );
-}
-
-}} // namespace boost and algorithm
-
-#endif  // BOOST_ALGORITHM_PARTITION_COPY_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/partition_point.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/partition_point.hpp
deleted file mode 100644
index 2c2767a..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx11/partition_point.hpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  partition_point.hpp
-/// \brief Find the partition point in a sequence
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_PARTITION_POINT_HPP
-#define BOOST_ALGORITHM_PARTITION_POINT_HPP
-
-#include <iterator>    // for std::distance, advance
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn partition_point ( ForwardIterator first, ForwardIterator last, Predicate p )
-/// \brief Given a partitioned range, returns the partition point, i.e, the first element 
-///     that does not satisfy p
-/// 
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param p        The predicate to test the values with
-/// \note           This function is part of the C++2011 standard library.
-template <typename ForwardIterator, typename Predicate>
-ForwardIterator partition_point ( ForwardIterator first, ForwardIterator last, Predicate p )
-{
-    std::size_t dist = std::distance ( first, last );
-    while ( first != last ) {
-        std::size_t d2 = dist / 2;
-        ForwardIterator ret_val = first;
-        std::advance (ret_val, d2);
-        if (p (*ret_val)) {
-            first = ++ret_val;
-            dist -= d2 + 1;
-            }
-        else {
-            last = ret_val;
-            dist = d2;
-            }
-        }
-    return first;
-}
-
-/// \fn partition_point ( Range &r, Predicate p )
-/// \brief Given a partitioned range, returns the partition point
-/// 
-/// \param r        The input range
-/// \param p        The predicate to test the values with
-///
-template <typename Range, typename Predicate>
-typename boost::range_iterator<Range>::type partition_point ( Range &r, Predicate p )
-{
-    return boost::algorithm::partition_point (boost::begin(r), boost::end(r), p);
-}
-
-
-}}
-
-#endif  // BOOST_ALGORITHM_PARTITION_POINT_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx14/equal.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx14/equal.hpp
deleted file mode 100644
index 526aae9..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx14/equal.hpp
+++ /dev/null
@@ -1,104 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  equal.hpp
-/// \brief Test ranges to if they are equal
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_EQUAL_HPP
-#define BOOST_ALGORITHM_EQUAL_HPP
-
-#include <iterator>
-
-namespace boost { namespace algorithm {
-
-namespace detail {
-
-    template <class T1, class T2>
-    struct eq {
-        BOOST_CONSTEXPR bool operator () ( const T1& v1, const T2& v2 ) const { return v1 == v2 ;}
-        };
-    
-    template <class RandomAccessIterator1, class RandomAccessIterator2, class BinaryPredicate>
-    BOOST_CXX14_CONSTEXPR
-    bool equal ( RandomAccessIterator1 first1, RandomAccessIterator1 last1, 
-                 RandomAccessIterator2 first2, RandomAccessIterator2 last2, BinaryPredicate pred,
-                 std::random_access_iterator_tag, std::random_access_iterator_tag )
-    {
-    //  Random-access iterators let is check the sizes in constant time
-        if ( std::distance ( first1, last1 ) != std::distance ( first2, last2 ))
-            return false;
-
-    //  std::equal
-        for (; first1 != last1; ++first1, ++first2)
-            if (!pred(*first1, *first2))
-                return false;
-        return true;
-    }
-
-    template <class InputIterator1, class InputIterator2, class BinaryPredicate>
-    BOOST_CXX14_CONSTEXPR
-    bool equal ( InputIterator1 first1, InputIterator1 last1, 
-                 InputIterator2 first2, InputIterator2 last2, BinaryPredicate pred,
-                 std::input_iterator_tag, std::input_iterator_tag )
-    {
-    for (; first1 != last1 && first2 != last2; ++first1, ++first2 )
-        if ( !pred(*first1, *first2 ))
-            return false;
-
-    return first1 == last1 && first2 == last2;
-    }
-}
-
-/// \fn equal ( InputIterator1 first1, InputIterator1 last1, 
-///             InputIterator2 first2, InputIterator2 last2,
-///             BinaryPredicate pred )
-/// \return true if all elements in the two ranges are equal
-/// 
-/// \param first1    The start of the first range.
-/// \param last1     One past the end of the first range.
-/// \param first2    The start of the second range.
-/// \param last2     One past the end of the second range.
-/// \param pred      A predicate for comparing the elements of the ranges
-template <class InputIterator1, class InputIterator2, class BinaryPredicate>
-BOOST_CXX14_CONSTEXPR
-bool equal ( InputIterator1 first1, InputIterator1 last1, 
-             InputIterator2 first2, InputIterator2 last2, BinaryPredicate pred )
-{
-    return boost::algorithm::detail::equal ( 
-        first1, last1, first2, last2, pred,
-        typename std::iterator_traits<InputIterator1>::iterator_category (),
-        typename std::iterator_traits<InputIterator2>::iterator_category ());
-}
-
-/// \fn equal ( InputIterator1 first1, InputIterator1 last1, 
-///             InputIterator2 first2, InputIterator2 last2 )
-/// \return true if all elements in the two ranges are equal
-/// 
-/// \param first1    The start of the first range.
-/// \param last1     One past the end of the first range.
-/// \param first2    The start of the second range.
-/// \param last2     One past the end of the second range.
-template <class InputIterator1, class InputIterator2>
-BOOST_CXX14_CONSTEXPR
-bool equal ( InputIterator1 first1, InputIterator1 last1, 
-             InputIterator2 first2, InputIterator2 last2 )
-{
-    return boost::algorithm::detail::equal (
-        first1, last1, first2, last2,
-        boost::algorithm::detail::eq<
-            typename std::iterator_traits<InputIterator1>::value_type,
-            typename std::iterator_traits<InputIterator2>::value_type> (),
-        typename std::iterator_traits<InputIterator1>::iterator_category (),
-        typename std::iterator_traits<InputIterator2>::iterator_category ());
-}
-
-//  There are already range-based versions of these.
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_EQUAL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx14/is_permutation.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx14/is_permutation.hpp
deleted file mode 100644
index 639446b..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx14/is_permutation.hpp
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2014.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  is_permutation.hpp
-/// \brief Is a sequence a permutation of another sequence (four iterator versions)
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_IS_PERMUTATION14_HPP
-#define BOOST_ALGORITHM_IS_PERMUTATION14_HPP
-
-#include <utility>      // for std::pair
-#include <functional>   // for std::equal_to
-#include <iterator>
-
-#include <boost/algorithm/cxx11/is_permutation.hpp>
-#include <boost/algorithm/cxx14/mismatch.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn is_permutation ( ForwardIterator1 first, ForwardIterator1 last, 
-///                      ForwardIterator2 first2, ForwardIterator2 last2 )
-/// \brief Tests to see if the sequence [first,last) is a permutation of the sequence starting at first2
-///
-/// \param first1   The start of the input sequence
-/// \param last2    One past the end of the input sequence
-/// \param first2   The start of the second sequence
-/// \param last1    One past the end of the second sequence
-/// \note           This function is part of the C++2014 standard library.
-template< class ForwardIterator1, class ForwardIterator2 >
-bool is_permutation ( ForwardIterator1 first1, ForwardIterator1 last1, 
-                      ForwardIterator2 first2, ForwardIterator2 last2 )
-{
-//  How should I deal with the idea that ForwardIterator1::value_type
-//  and ForwardIterator2::value_type could be different? Define my own comparison predicate?
-    std::pair<ForwardIterator1, ForwardIterator2> eq = boost::algorithm::mismatch
-        ( first1, last1, first2, last2 );
-    if ( eq.first == last1 && eq.second == last2)
-        return true;
-    return boost::algorithm::detail::is_permutation_tag (
-        eq.first, last1, eq.second, last2, 
-        std::equal_to<typename std::iterator_traits<ForwardIterator1>::value_type> (),
-        typename std::iterator_traits<ForwardIterator1>::iterator_category (),
-        typename std::iterator_traits<ForwardIterator2>::iterator_category ());
-}
-
-/// \fn is_permutation ( ForwardIterator1 first, ForwardIterator1 last, 
-///                      ForwardIterator2 first2, ForwardIterator2 last2, 
-///                      BinaryPredicate p )
-/// \brief Tests to see if the sequence [first,last) is a permutation of the sequence starting at first2
-///
-/// \param first1   The start of the input sequence
-/// \param last1    One past the end of the input sequence
-/// \param first2   The start of the second sequence
-/// \param last2    One past the end of the second sequence
-/// \param pred     The predicate to compare elements with
-///
-/// \note           This function is part of the C++2014 standard library.
-template< class ForwardIterator1, class ForwardIterator2, class BinaryPredicate >
-bool is_permutation ( ForwardIterator1 first1, ForwardIterator1 last1,
-                      ForwardIterator2 first2, ForwardIterator2 last2, 
-                      BinaryPredicate pred )
-{
-    std::pair<ForwardIterator1, ForwardIterator2> eq = boost::algorithm::mismatch
-        ( first1, last1, first2, last2, pred );
-    if ( eq.first == last1 && eq.second == last2)
-        return true;
-    return boost::algorithm::detail::is_permutation_tag (
-        first1, last1, first2, last2, pred, 
-        typename std::iterator_traits<ForwardIterator1>::iterator_category (),
-        typename std::iterator_traits<ForwardIterator2>::iterator_category ());
-}
-
-}}
-
-#endif  // BOOST_ALGORITHM_IS_PERMUTATION14_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx14/mismatch.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx14/mismatch.hpp
deleted file mode 100644
index 4601719..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx14/mismatch.hpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  mismatch.hpp
-/// \brief Find the first mismatched element in a sequence
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_MISMATCH_HPP
-#define BOOST_ALGORITHM_MISMATCH_HPP
-
-#include <utility>      // for std::pair
-#include <boost/config.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn mismatch ( InputIterator1 first1, InputIterator1 last1, 
-///                InputIterator2 first2, InputIterator2 last2,
-///                BinaryPredicate pred )
-/// \return a pair of iterators pointing to the first elements in the sequence that do not match
-/// 
-/// \param first1    The start of the first range.
-/// \param last1     One past the end of the first range.
-/// \param first2    The start of the second range.
-/// \param last2     One past the end of the second range.
-/// \param pred      A predicate for comparing the elements of the ranges
-template <class InputIterator1, class InputIterator2, class BinaryPredicate>
-BOOST_CXX14_CONSTEXPR std::pair<InputIterator1, InputIterator2> mismatch (
-                    InputIterator1 first1, InputIterator1 last1,
-                    InputIterator2 first2, InputIterator2 last2,
-                    BinaryPredicate pred )
-{
-    for (; first1 != last1 && first2 != last2; ++first1, ++first2)
-        if ( !pred ( *first1, *first2 ))
-            break;
-    return std::pair<InputIterator1, InputIterator2>(first1, first2);
-}
-
-/// \fn mismatch ( InputIterator1 first1, InputIterator1 last1, 
-///                InputIterator2 first2, InputIterator2 last2 )
-/// \return a pair of iterators pointing to the first elements in the sequence that do not match
-/// 
-/// \param first1    The start of the first range.
-/// \param last1     One past the end of the first range.
-/// \param first2    The start of the second range.
-/// \param last2     One past the end of the second range.
-template <class InputIterator1, class InputIterator2>
-BOOST_CXX14_CONSTEXPR std::pair<InputIterator1, InputIterator2> mismatch (
-                    InputIterator1 first1, InputIterator1 last1,
-                    InputIterator2 first2, InputIterator2 last2 )
-{
-    for (; first1 != last1 && first2 != last2; ++first1, ++first2)
-        if ( *first1 != *first2 )
-            break;
-    return std::pair<InputIterator1, InputIterator2>(first1, first2);
-}
-
-//  There are already range-based versions of these.
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_MISMATCH_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/exclusive_scan.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/exclusive_scan.hpp
deleted file mode 100644
index e4ec112..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/exclusive_scan.hpp
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  exclusive_scan.hpp
-/// \brief ???
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_EXCLUSIVE_SCAN_HPP
-#define BOOST_ALGORITHM_EXCLUSIVE_SCAN_HPP
-
-#include <functional>     // for std::plus
-#include <iterator>       // for std::iterator_traits
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/value_type.hpp>
-
-namespace boost { namespace algorithm {
-
-template<class InputIterator, class OutputIterator, class T, class BinaryOperation>
-OutputIterator exclusive_scan(InputIterator first, InputIterator last,
-                              OutputIterator result, T init, BinaryOperation bOp)
-{
-    if (first != last)
-    {
-        T saved = init;
-        do
-        {
-            init = bOp(init, *first);
-            *result = saved;
-            saved = init;
-            ++result;
-        } while (++first != last);
-    }
-    return result;
-}
-
-template<class InputIterator, class OutputIterator, class T>
-OutputIterator exclusive_scan(InputIterator first, InputIterator last,
-                              OutputIterator result, T init)
-{
-	typedef typename std::iterator_traits<InputIterator>::value_type VT;
-    return boost::algorithm::exclusive_scan(first, last, result, init, std::plus<VT>());
-}
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_EXCLUSIVE_SCAN_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/for_each_n.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/for_each_n.hpp
deleted file mode 100644
index 71f6cde..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/for_each_n.hpp
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  for_each_n.hpp
-/// \brief Apply a functor to the elements of a sequence
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_FOR_EACH_N_HPP
-#define BOOST_ALGORITHM_FOR_EACH_N_HPP
-
-#include <utility>      // for std::pair
-
-namespace boost { namespace algorithm {
-
-/// \fn for_each_n(InputIterator first, Size n, Function f);
-/// \return first + n
-///
-/// \param first    The start of the first range.
-/// \param n        One past the end of the first range.
-/// \param f        A functor to apply to the elements of the sequence
-/// \note           If f returns a result, the result is ignored.
-template<class InputIterator, class Size, class Function>
-InputIterator for_each_n(InputIterator first, Size n, Function f)
-{
-    for ( ; n > 0; --n, ++first )
-        f(*first);
-
-    return first;
-}
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_FOR_EACH_N_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/inclusive_scan.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/inclusive_scan.hpp
deleted file mode 100644
index 5c60c39..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/inclusive_scan.hpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  transform_reduce.hpp
-/// \brief Combine the (transformed) elements of a sequence (or two) into a single value.
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_TRANSFORM_REDUCE_HPP
-#define BOOST_ALGORITHM_TRANSFORM_REDUCE_HPP
-
-#include <functional>     // for std::plus
-#include <iterator>       // for std::iterator_traits
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/value_type.hpp>
-
-namespace boost { namespace algorithm {
-
-template<class InputIterator, class OutputIterator, class T, class BinaryOperation>
-OutputIterator inclusive_scan(InputIterator first, InputIterator last,
-                              OutputIterator result, BinaryOperation bOp, T init)
-{
-    for (; first != last; ++first, (void) ++result) {
-        init = bOp(init, *first);
-        *result = init;
-        }
-    return result;
-}
-
-
-template<class InputIterator, class OutputIterator, class BinaryOperation>
-OutputIterator inclusive_scan(InputIterator first, InputIterator last,
-                              OutputIterator result, BinaryOperation bOp)
-{
-    if (first != last) {
-        typename std::iterator_traits<InputIterator>::value_type init = *first;
-        *result++ = init;
-        if (++first != last)
-            return boost::algorithm::inclusive_scan(first, last, result, bOp, init);
-        }
-
-    return result;
-}
-
-template<class InputIterator, class OutputIterator>
-OutputIterator inclusive_scan(InputIterator first, InputIterator last,
-                   OutputIterator result)
-{
-    typedef typename std::iterator_traits<InputIterator>::value_type VT;
-    return boost::algorithm::inclusive_scan(first, last, result, std::plus<VT>());
-}
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_TRANSFORM_REDUCE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/reduce.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/reduce.hpp
deleted file mode 100644
index 55424b6..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/reduce.hpp
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  reduce.hpp
-/// \brief Combine the elements of a sequence into a single value
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_REDUCE_HPP
-#define BOOST_ALGORITHM_REDUCE_HPP
-
-#include <functional>     // for std::plus
-#include <iterator>       // for std::iterator_traits
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/value_type.hpp>
-
-namespace boost { namespace algorithm {
-
-template<class InputIterator, class T, class BinaryOperation>
-T reduce(InputIterator first, InputIterator last, T init, BinaryOperation bOp)
-{
-    ;
-    for (; first != last; ++first)
-        init = bOp(init, *first);
-    return init;
-}
-
-template<class InputIterator, class T>
-T reduce(InputIterator first, InputIterator last, T init)
-{
-	typedef typename std::iterator_traits<InputIterator>::value_type VT;
-    return boost::algorithm::reduce(first, last, init, std::plus<VT>());
-}
-
-template<class InputIterator>
-typename std::iterator_traits<InputIterator>::value_type
-reduce(InputIterator first, InputIterator last)
-{
-    return boost::algorithm::reduce(first, last,
-       typename std::iterator_traits<InputIterator>::value_type());
-}
-
-template<class Range>
-typename boost::range_value<Range>::type
-reduce(const Range &r)
-{
-    return boost::algorithm::reduce(boost::begin(r), boost::end(r));
-}
-
-//	Not sure that this won't be ambiguous (1)
-template<class Range, class T>
-T reduce(const Range &r, T init)
-{
-    return boost::algorithm::reduce(boost::begin (r), boost::end (r), init);
-}
-
-
-//	Not sure that this won't be ambiguous (2)
-template<class Range, class T, class BinaryOperation>
-T reduce(const Range &r, T init, BinaryOperation bOp)
-{
-    return boost::algorithm::reduce(boost::begin(r), boost::end(r), init, bOp);
-}
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_REDUCE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/transform_exclusive_scan.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/transform_exclusive_scan.hpp
deleted file mode 100644
index dd3c9c8..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/transform_exclusive_scan.hpp
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  transform_exclusive_scan.hpp
-/// \brief ????
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_TRANSFORM_EXCLUSIVE_SCAN_HPP
-#define BOOST_ALGORITHM_TRANSFORM_EXCLUSIVE_SCAN_HPP
-
-#include <functional>     // for std::plus
-#include <iterator>       // for std::iterator_traits
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/value_type.hpp>
-
-namespace boost { namespace algorithm {
-
-template<class InputIterator, class OutputIterator, class T,
-         class BinaryOperation, class UnaryOperation>
-OutputIterator transform_exclusive_scan(InputIterator first, InputIterator last,
-                                        OutputIterator result, T init,
-                                        BinaryOperation bOp, UnaryOperation uOp)
-{
-    if (first != last)
-    {
-        T saved = init;
-        do
-        {
-            init = bOp(init, uOp(*first));
-            *result = saved;
-            saved = init;
-            ++result;
-        } while (++first != last);
-    }
-    return result;
-}
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_TRANSFORM_EXCLUSIVE_SCAN_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/transform_inclusive_scan.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/transform_inclusive_scan.hpp
deleted file mode 100644
index 1d11976..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/transform_inclusive_scan.hpp
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  transform_reduce.hpp
-/// \brief Combine the (transformed) elements of a sequence (or two) into a single value.
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_TRANSFORM_REDUCE_HPP
-#define BOOST_ALGORITHM_TRANSFORM_REDUCE_HPP
-
-#include <functional>     // for std::plus
-#include <iterator>       // for std::iterator_traits
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/value_type.hpp>
-
-namespace boost { namespace algorithm {
-
-template<class InputIterator, class OutputIterator,
-         class BinaryOperation, class UnaryOperation, class T>
-OutputIterator transform_inclusive_scan(InputIterator first, InputIterator last,
-                                        OutputIterator result,
-                                        BinaryOperation bOp, UnaryOperation uOp,
-                                        T init)
-{
-    for (; first != last; ++first, (void) ++result) {
-        init = bOp(init, uOp(*first));
-        *result = init;
-        }
-
-    return result;
-}
-
-template<class InputIterator, class OutputIterator,
-         class BinaryOperation, class UnaryOperation>
-OutputIterator transform_inclusive_scan(InputIterator first, InputIterator last,
-                                        OutputIterator result,
-                                        BinaryOperation bOp, UnaryOperation uOp)
-{
-    if (first != last) {
-        typename std::iterator_traits<InputIterator>::value_type init = uOp(*first);
-        *result++ = init;
-        if (++first != last)
-            return boost::algorithm::transform_inclusive_scan
-                                              (first, last, result, bOp, uOp, init);
-        }
-
-    return result;
-}
-
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_TRANSFORM_REDUCE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/transform_reduce.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/transform_reduce.hpp
deleted file mode 100644
index 8696384..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/cxx17/transform_reduce.hpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  transform_reduce.hpp
-/// \brief Combine the (transformed) elements of a sequence (or two) into a single value.
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_TRANSFORM_REDUCE_HPP
-#define BOOST_ALGORITHM_TRANSFORM_REDUCE_HPP
-
-#include <functional>     // for std::plus
-#include <iterator>       // for std::iterator_traits
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/value_type.hpp>
-
-namespace boost { namespace algorithm {
-
-template<class InputIterator1, class InputIterator2, class T,
-         class BinaryOperation1, class BinaryOperation2>
-T transform_reduce(InputIterator1 first1, InputIterator1 last1,
-                   InputIterator2 first2, T init,
-                 BinaryOperation1 bOp1, BinaryOperation2 bOp2)
-{
-    for (; first1 != last1; ++first1, (void) ++first2)
-        init = bOp1(init, bOp2(*first1, *first2));
-    return init;
-}
-
-template<class InputIterator, class T,
-         class BinaryOperation, class UnaryOperation>
-T transform_reduce(InputIterator first, InputIterator last,
-                   T init, BinaryOperation bOp, UnaryOperation uOp)
-{
-    for (; first != last; ++first)
-        init = bOp(init, uOp(*first));
-    return init;
-}
-
-template<class InputIterator1, class InputIterator2, class T>
-T transform_reduce(InputIterator1 first1, InputIterator1 last1,
-                   InputIterator2 first2, T init)
-{
-    return boost::algorithm::transform_reduce(first1, last1, first2, init,
-                            std::plus<T>(), std::multiplies<T>());
-}
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_TRANSFORM_REDUCE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/find_backward.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/find_backward.hpp
deleted file mode 100644
index 66901a1..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/find_backward.hpp
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
-   Copyright (c) T. Zachary Laine 2018.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE10.txt or copy at http://www.boost.org/LICENSE10.txt)
-*/
-#ifndef BOOST_ALGORITHM_FIND_BACKWARD_HPP
-#define BOOST_ALGORITHM_FIND_BACKWARD_HPP
-
-#include <boost/config.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-#include <utility>
-
-
-namespace boost { namespace algorithm {
-
-template<typename BidiIter, typename T>
-BOOST_CXX14_CONSTEXPR
-BidiIter find_backward(BidiIter first, BidiIter last, const T & x)
-{
-    BidiIter it = last;
-    while (it != first) {
-        if (*--it == x)
-            return it;
-    }
-    return last;
-}
-
-template<typename Range, typename T>
-BOOST_CXX14_CONSTEXPR
-typename boost::range_iterator<Range>::type find_backward(Range & range, const T & x)
-{
-    return ::boost::algorithm::find_backward(boost::begin(range), boost::end(range), x);
-}
-
-template<typename BidiIter, typename T>
-BOOST_CXX14_CONSTEXPR
-BidiIter find_not_backward(BidiIter first, BidiIter last, const T & x)
-{
-    BidiIter it = last;
-    while (it != first) {
-        if (*--it != x)
-            return it;
-    }
-    return last;
-}
-
-template<typename Range, typename T>
-BOOST_CXX14_CONSTEXPR
-typename boost::range_iterator<Range>::type find_not_backward(Range & range, const T & x)
-{
-    return ::boost::algorithm::find_not_backward(boost::begin(range), boost::end(range), x);
-}
-
-template<typename BidiIter, typename Pred>
-BOOST_CXX14_CONSTEXPR
-BidiIter find_if_backward(BidiIter first, BidiIter last, Pred p)
-{
-    BidiIter it = last;
-    while (it != first) {
-        if (p(*--it))
-            return it;
-    }
-    return last;
-}
-
-template<typename Range, typename Pred>
-BOOST_CXX14_CONSTEXPR
-typename boost::range_iterator<Range>::type find_if_backward(Range & range, Pred p)
-{
-    return ::boost::algorithm::find_if_backward(boost::begin(range), boost::end(range), p);
-}
-
-template<typename BidiIter, typename Pred>
-BOOST_CXX14_CONSTEXPR
-BidiIter find_if_not_backward(BidiIter first, BidiIter last, Pred p)
-{
-    BidiIter it = last;
-    while (it != first) {
-        if (!p(*--it))
-            return it;
-    }
-    return last;
-}
-
-template<typename Range, typename Pred>
-BOOST_CXX14_CONSTEXPR
-typename boost::range_iterator<Range>::type find_if_not_backward(Range & range, Pred p)
-{
-    return ::boost::algorithm::find_if_not_backward(boost::begin(range), boost::end(range), p);
-}
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_FIND_BACKWARD_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/find_not.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/find_not.hpp
deleted file mode 100644
index ef4df00..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/find_not.hpp
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-   Copyright (c) T. Zachary Laine 2018.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE10.txt or copy at http://www.boost.org/LICENSE10.txt)
-*/
-#ifndef BOOST_ALGORITHM_FIND_NOT_HPP
-#define BOOST_ALGORITHM_FIND_NOT_HPP
-
-#include <boost/config.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-#include <utility>
-
-
-namespace boost { namespace algorithm {
-
-template<typename InputIter, typename Sentinel, typename T>        
-BOOST_CXX14_CONSTEXPR
-InputIter find_not(InputIter first, Sentinel last, const T & x)
-{
-    for (; first != last; ++first) {
-        if (*first != x)
-            break;
-    }
-    return first;
-}
-
-template<typename Range, typename T>
-BOOST_CXX14_CONSTEXPR
-typename boost::range_iterator<Range>::type find_not(Range & r, const T & x)
-{
-    return ::boost::algorithm::find_not(boost::begin(r), boost::end(r), x);
-}
-
-}} // namespace boost and algorithm
-
-#endif // BOOST_ALGORITHM_FIND_NOT_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/gather.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/gather.hpp
deleted file mode 100644
index 944bc94..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/gather.hpp
+++ /dev/null
@@ -1,123 +0,0 @@
-/* 
-    Copyright 2008 Adobe Systems Incorporated
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
- Revision history:
-   January 2008 mtc Version for Adobe Source Library
-   January 2013 mtc Version for Boost.Algorithm
-
-*/
-
-/**************************************************************************************************/
-
-/*!
-\author Marshall Clow
-\date    January 2008
-*/
-
-#ifndef BOOST_ALGORITHM_GATHER_HPP
-#define BOOST_ALGORITHM_GATHER_HPP
-
-#include <algorithm>                // for std::stable_partition
-#include <functional>
-
-#include <boost/bind.hpp>           // for boost::bind
-#include <boost/range/begin.hpp>    // for boost::begin(range)
-#include <boost/range/end.hpp>      // for boost::end(range)
-
-
-/**************************************************************************************************/
-/*!
-    \defgroup gather gather
-    \ingroup mutating_algorithm
-
-    \c gather() takes a collection of elements defined by a pair of iterators and moves
-    the ones satisfying a predicate to them to a position (called the pivot) within
-    the sequence. The algorithm is stable. The result is a pair of iterators that
-    contains the items that satisfy the predicate.
-
-    Given an sequence containing:
-    <pre>
-    0 1 2 3 4 5 6 7 8 9
-    </pre>
-
-    a call to gather ( arr, arr + 10, arr + 4, IsEven ()) will result in:
-
-    <pre>
-    1 3 0 2 4 6 8 5 7 9
-        |---|-----|
-      first |  second
-          pivot
-    </pre>
-
-
-    The problem is broken down into two basic steps, namely, moving the items before the pivot
-    and then moving the items from the pivot to the end. These "moves" are done with calls to
-    stable_partition.
-
-    \par Storage Requirements:
-
-    The algorithm uses stable_partition, which will attempt to allocate temporary memory,
-    but will work in-situ if there is none available.
-
-    \par Time Complexity:
-
-    If there is sufficient memory available, the run time is linear in <code>N</code>.
-    If there is not any memory available, then the run time is <code>O(N log N)</code>.
-*/
-
-/**************************************************************************************************/
-
-namespace boost { namespace algorithm {
-
-/**************************************************************************************************/
-
-/*!
-    \ingroup gather
-    \brief iterator-based gather implementation
-*/
-
-template <
-    typename BidirectionalIterator,  // Iter models BidirectionalIterator
-    typename Pred>                   // Pred models UnaryPredicate
-std::pair<BidirectionalIterator, BidirectionalIterator> gather 
-        ( BidirectionalIterator first, BidirectionalIterator last, BidirectionalIterator pivot, Pred pred )
-{
-//  The first call partitions everything up to (but not including) the pivot element,
-//  while the second call partitions the rest of the sequence.
-    return std::make_pair (
-        std::stable_partition ( first, pivot, !boost::bind<bool> ( pred, _1 )),
-        std::stable_partition ( pivot, last,   boost::bind<bool> ( pred, _1 )));
-}
-
-/**************************************************************************************************/
-
-/*!
-    \ingroup gather
-    \brief range-based gather implementation
-*/
-
-template <
-    typename BidirectionalRange,    //
-    typename Pred>                  // Pred models UnaryPredicate
-std::pair<
-    typename boost::range_iterator<const BidirectionalRange>::type,
-    typename boost::range_iterator<const BidirectionalRange>::type>
-gather (
-    const BidirectionalRange &range,
-    typename boost::range_iterator<const BidirectionalRange>::type pivot,
-    Pred pred )
-{
-    return boost::algorithm::gather ( boost::begin ( range ), boost::end ( range ), pivot, pred );
-}
-
-/**************************************************************************************************/
-
-}}  // namespace
-
-/**************************************************************************************************/
-
-#endif
-
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/hex.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/hex.hpp
deleted file mode 100644
index b833584..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/hex.hpp
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-   Thanks to Nevin for his comments/help.
-*/
-
-/*
-    General problem - turn a sequence of integral types into a sequence of hexadecimal characters.
-    - and back.
-*/
-
-/// \file  hex.hpp
-/// \brief Convert sequence of integral types into a sequence of hexadecimal
-///     characters and back. Based on the MySQL functions HEX and UNHEX
-/// \author Marshall Clow
-
-#ifndef BOOST_ALGORITHM_HEXHPP
-#define BOOST_ALGORITHM_HEXHPP
-
-#include <iterator>     // for std::iterator_traits
-#include <stdexcept>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/exception/exception.hpp>
-#include <boost/exception/info.hpp>
-#include <boost/throw_exception.hpp>
-
-#include <boost/utility/enable_if.hpp>
-#include <boost/type_traits/is_integral.hpp>
-
-
-namespace boost { namespace algorithm {
-
-/*!
-    \struct hex_decode_error
-    \brief  Base exception class for all hex decoding errors
-*/ /*!
-    \struct non_hex_input
-    \brief  Thrown when a non-hex value (0-9, A-F) encountered when decoding.
-                Contains the offending character
-*/ /*!
-    \struct not_enough_input
-    \brief  Thrown when the input sequence unexpectedly ends
-
-*/
-struct hex_decode_error : virtual boost::exception, virtual std::exception {};
-struct not_enough_input : virtual hex_decode_error {};
-struct non_hex_input    : virtual hex_decode_error {};
-typedef boost::error_info<struct bad_char_,char> bad_char;
-
-namespace detail {
-/// \cond DOXYGEN_HIDE
-
-    template <typename T, typename OutputIterator>
-    OutputIterator encode_one ( T val, OutputIterator out, const char * hexDigits ) {
-        const std::size_t num_hex_digits =  2 * sizeof ( T );
-        char res [ num_hex_digits ];
-        char  *p = res + num_hex_digits;
-        for ( std::size_t i = 0; i < num_hex_digits; ++i, val >>= 4 )
-            *--p = hexDigits [ val & 0x0F ];
-        return std::copy ( res, res + num_hex_digits, out );
-        }
-
-    template <typename T>
-    unsigned char hex_char_to_int ( T val ) {
-        char c = static_cast<char> ( val );
-        unsigned retval = 0;
-        if      ( c >= '0' && c <= '9' ) retval = c - '0';
-        else if ( c >= 'A' && c <= 'F' ) retval = c - 'A' + 10;
-        else if ( c >= 'a' && c <= 'f' ) retval = c - 'a' + 10;
-        else BOOST_THROW_EXCEPTION (non_hex_input() << bad_char (c));
-        return static_cast<char>(retval);
-        }
-
-//  My own iterator_traits class.
-//  It is here so that I can "reach inside" some kinds of output iterators
-//      and get the type to write.
-    template <typename Iterator>
-    struct hex_iterator_traits {
-        typedef typename std::iterator_traits<Iterator>::value_type value_type;
-    };
-
-    template<typename Container>
-    struct hex_iterator_traits< std::back_insert_iterator<Container> > {
-        typedef typename Container::value_type value_type;
-    };
-
-    template<typename Container>
-    struct hex_iterator_traits< std::front_insert_iterator<Container> > {
-        typedef typename Container::value_type value_type;
-    };
-
-    template<typename Container>
-    struct hex_iterator_traits< std::insert_iterator<Container> > {
-        typedef typename Container::value_type value_type;
-    };
-
-//  ostream_iterators have three template parameters.
-//  The first one is the output type, the second one is the character type of
-//  the underlying stream, the third is the character traits.
-//      We only care about the first one.
-    template<typename T, typename charType, typename traits>
-    struct hex_iterator_traits< std::ostream_iterator<T, charType, traits> > {
-        typedef T value_type;
-    };
-
-    template <typename Iterator>
-    bool iter_end ( Iterator current, Iterator last ) { return current == last; }
-
-    template <typename T>
-    bool ptr_end ( const T* ptr, const T* /*end*/ ) { return *ptr == '\0'; }
-
-//  What can we assume here about the inputs?
-//      is std::iterator_traits<InputIterator>::value_type always 'char' ?
-//  Could it be wchar_t, say? Does it matter?
-//      We are assuming ASCII for the values - but what about the storage?
-    template <typename InputIterator, typename OutputIterator, typename EndPred>
-    typename boost::enable_if<boost::is_integral<typename hex_iterator_traits<OutputIterator>::value_type>, OutputIterator>::type
-    decode_one ( InputIterator &first, InputIterator last, OutputIterator out, EndPred pred ) {
-        typedef typename hex_iterator_traits<OutputIterator>::value_type T;
-        T res (0);
-
-    //  Need to make sure that we get can read that many chars here.
-        for ( std::size_t i = 0; i < 2 * sizeof ( T ); ++i, ++first ) {
-            if ( pred ( first, last ))
-                BOOST_THROW_EXCEPTION (not_enough_input ());
-            res = ( 16 * res ) + hex_char_to_int (*first);
-            }
-
-        *out = res;
-        return ++out;
-        }
-/// \endcond
-    }
-
-
-/// \fn hex ( InputIterator first, InputIterator last, OutputIterator out )
-/// \brief   Converts a sequence of integral types into a hexadecimal sequence of characters.
-///
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param out      An output iterator to the results into
-/// \return         The updated output iterator
-/// \note           Based on the MySQL function of the same name
-template <typename InputIterator, typename OutputIterator>
-typename boost::enable_if<boost::is_integral<typename detail::hex_iterator_traits<InputIterator>::value_type>, OutputIterator>::type
-hex ( InputIterator first, InputIterator last, OutputIterator out ) {
-    for ( ; first != last; ++first )
-        out = detail::encode_one ( *first, out, "0123456789ABCDEF" );
-    return out;
-    }
-
-
-/// \fn hex_lower ( InputIterator first, InputIterator last, OutputIterator out )
-/// \brief   Converts a sequence of integral types into a lower case hexadecimal sequence of characters.
-///
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param out      An output iterator to the results into
-/// \return         The updated output iterator
-/// \note           Based on the MySQL function of the same name
-template <typename InputIterator, typename OutputIterator>
-typename boost::enable_if<boost::is_integral<typename detail::hex_iterator_traits<InputIterator>::value_type>, OutputIterator>::type
-hex_lower ( InputIterator first, InputIterator last, OutputIterator out ) {
-    for ( ; first != last; ++first )
-        out = detail::encode_one ( *first, out, "0123456789abcdef" );
-    return out;
-    }
-
-
-/// \fn hex ( const T *ptr, OutputIterator out )
-/// \brief   Converts a sequence of integral types into a hexadecimal sequence of characters.
-///
-/// \param ptr      A pointer to a 0-terminated sequence of data.
-/// \param out      An output iterator to the results into
-/// \return         The updated output iterator
-/// \note           Based on the MySQL function of the same name
-template <typename T, typename OutputIterator>
-typename boost::enable_if<boost::is_integral<T>, OutputIterator>::type
-hex ( const T *ptr, OutputIterator out ) {
-    while ( *ptr )
-        out = detail::encode_one ( *ptr++, out, "0123456789ABCDEF" );
-    return out;
-    }
-
-
-/// \fn hex_lower ( const T *ptr, OutputIterator out )
-/// \brief   Converts a sequence of integral types into a lower case hexadecimal sequence of characters.
-///
-/// \param ptr      A pointer to a 0-terminated sequence of data.
-/// \param out      An output iterator to the results into
-/// \return         The updated output iterator
-/// \note           Based on the MySQL function of the same name
-template <typename T, typename OutputIterator>
-typename boost::enable_if<boost::is_integral<T>, OutputIterator>::type
-hex_lower ( const T *ptr, OutputIterator out ) {
-    while ( *ptr )
-        out = detail::encode_one ( *ptr++, out, "0123456789abcdef" );
-    return out;
-    }
-
-
-/// \fn hex ( const Range &r, OutputIterator out )
-/// \brief   Converts a sequence of integral types into a hexadecimal sequence of characters.
-///
-/// \param r        The input range
-/// \param out      An output iterator to the results into
-/// \return         The updated output iterator
-/// \note           Based on the MySQL function of the same name
-template <typename Range, typename OutputIterator>
-typename boost::enable_if<boost::is_integral<typename detail::hex_iterator_traits<typename Range::iterator>::value_type>, OutputIterator>::type
-hex ( const Range &r, OutputIterator out ) {
-    return hex (boost::begin(r), boost::end(r), out);
-}
-
-
-/// \fn hex_lower ( const Range &r, OutputIterator out )
-/// \brief   Converts a sequence of integral types into a lower case hexadecimal sequence of characters.
-///
-/// \param r        The input range
-/// \param out      An output iterator to the results into
-/// \return         The updated output iterator
-/// \note           Based on the MySQL function of the same name
-template <typename Range, typename OutputIterator>
-typename boost::enable_if<boost::is_integral<typename detail::hex_iterator_traits<typename Range::iterator>::value_type>, OutputIterator>::type
-hex_lower ( const Range &r, OutputIterator out ) {
-    return hex_lower (boost::begin(r), boost::end(r), out);
-}
-
-
-/// \fn unhex ( InputIterator first, InputIterator last, OutputIterator out )
-/// \brief   Converts a sequence of hexadecimal characters into a sequence of integers.
-///
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param out      An output iterator to the results into
-/// \return         The updated output iterator
-/// \note           Based on the MySQL function of the same name
-template <typename InputIterator, typename OutputIterator>
-OutputIterator unhex ( InputIterator first, InputIterator last, OutputIterator out ) {
-    while ( first != last )
-        out = detail::decode_one ( first, last, out, detail::iter_end<InputIterator> );
-    return out;
-    }
-
-
-/// \fn unhex ( const T *ptr, OutputIterator out )
-/// \brief   Converts a sequence of hexadecimal characters into a sequence of integers.
-///
-/// \param ptr      A pointer to a null-terminated input sequence.
-/// \param out      An output iterator to the results into
-/// \return         The updated output iterator
-/// \note           Based on the MySQL function of the same name
-template <typename T, typename OutputIterator>
-OutputIterator unhex ( const T *ptr, OutputIterator out ) {
-//  If we run into the terminator while decoding, we will throw a
-//      malformed input exception. It would be nicer to throw a 'Not enough input'
-//      exception - but how much extra work would that require?
-    while ( *ptr )
-        out = detail::decode_one ( ptr, (const T *) NULL, out, detail::ptr_end<T> );
-    return out;
-    }
-
-
-/// \fn OutputIterator unhex ( const Range &r, OutputIterator out )
-/// \brief   Converts a sequence of hexadecimal characters into a sequence of integers.
-///
-/// \param r        The input range
-/// \param out      An output iterator to the results into
-/// \return         The updated output iterator
-/// \note           Based on the MySQL function of the same name
-template <typename Range, typename OutputIterator>
-OutputIterator unhex ( const Range &r, OutputIterator out ) {
-    return unhex (boost::begin(r), boost::end(r), out);
-    }
-
-
-/// \fn String hex ( const String &input )
-/// \brief   Converts a sequence of integral types into a hexadecimal sequence of characters.
-///
-/// \param input    A container to be converted
-/// \return         A container with the encoded text
-template<typename String>
-String hex ( const String &input ) {
-    String output;
-    output.reserve (input.size () * (2 * sizeof (typename String::value_type)));
-    (void) hex (input, std::back_inserter (output));
-    return output;
-    }
-
-
-/// \fn String hex_lower ( const String &input )
-/// \brief   Converts a sequence of integral types into a lower case hexadecimal sequence of characters.
-///
-/// \param input    A container to be converted
-/// \return         A container with the encoded text
-template<typename String>
-String hex_lower ( const String &input ) {
-    String output;
-    output.reserve (input.size () * (2 * sizeof (typename String::value_type)));
-    (void) hex_lower (input, std::back_inserter (output));
-    return output;
-    }
-
-
-/// \fn String unhex ( const String &input )
-/// \brief   Converts a sequence of hexadecimal characters into a sequence of characters.
-///
-/// \param input    A container to be converted
-/// \return         A container with the decoded text
-template<typename String>
-String unhex ( const String &input ) {
-    String output;
-    output.reserve (input.size () / (2 * sizeof (typename String::value_type)));
-    (void) unhex (input, std::back_inserter (output));
-    return output;
-    }
-
-}}
-
-#endif // BOOST_ALGORITHM_HEXHPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/is_palindrome.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/is_palindrome.hpp
deleted file mode 100644
index 0988110..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/is_palindrome.hpp
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
-  Copyright (c) Alexander Zaitsev <zamazan4ik@gmail.com>, 2016
-
-  Distributed under the Boost Software License, Version 1.0. (See
-  accompanying file LICENSE_1_0.txt or copy at
-  http://www.boost.org/LICENSE_1_0.txt)
-
-  See http://www.boost.org/ for latest version.
-*/
-
-/// \file  is_palindrome.hpp
-/// \brief Checks the input sequence on palindrome.
-/// \author Alexander Zaitsev
-
-#ifndef BOOST_ALGORITHM_IS_PALINDROME_HPP
-#define BOOST_ALGORITHM_IS_PALINDROME_HPP
-
-#include <iterator>
-#include <functional>
-#include <cstring>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost {  namespace algorithm {
-
-/// \fn is_palindrome ( BidirectionalIterator begin, BidirectionalIterator end, Predicate p )
-/// \return true if the entire sequence is palindrome
-///
-/// \param begin    The start of the input sequence
-/// \param end		One past the end of the input sequence
-/// \param p        A predicate used to compare the values.
-///
-/// \note This function will return true for empty sequences and for palindromes.
-///     For other sequences function will return false.
-///     Complexity: O(N).
-template <typename BidirectionalIterator, typename Predicate>
-bool is_palindrome(BidirectionalIterator begin, BidirectionalIterator end, Predicate p)
-{
-    if(begin == end)
-    {
-        return true;
-    }
-
-    --end;
-    while(begin != end)
-    {
-        if(!p(*begin, *end))
-        {
-            return false;
-        }
-        ++begin;
-        if(begin == end)
-        {
-            break;
-        }
-        --end;
-    }
-    return true;
-}
-
-/// \fn is_palindrome ( BidirectionalIterator begin, BidirectionalIterator end )
-/// \return true if the entire sequence is palindrome
-///
-/// \param begin    The start of the input sequence
-/// \param end	    One past the end of the input sequence
-///
-/// \note This function will return true for empty sequences and for palindromes.
-///     For other sequences function will return false.
-///     Complexity: O(N).
-template <typename BidirectionalIterator>
-bool is_palindrome(BidirectionalIterator begin, BidirectionalIterator end)
-{
-    return is_palindrome(begin, end,
-                         std::equal_to<typename std::iterator_traits<BidirectionalIterator>::value_type> ());
-}
-
-/// \fn is_palindrome ( const R& range )
-/// \return true if the entire sequence is palindrome
-///
-/// \param range The range to be tested.
-///
-/// \note This function will return true for empty sequences and for palindromes.
-///     For other sequences function will return false.
-///     Complexity: O(N).
-template <typename R>
-bool is_palindrome(const R& range)
-{
-    return is_palindrome(boost::begin(range), boost::end(range));
-}
-
-/// \fn is_palindrome ( const R& range, Predicate p )
-/// \return true if the entire sequence is palindrome
-///
-/// \param range The range to be tested.
-/// \param p     A predicate used to compare the values.
-///
-/// \note This function will return true for empty sequences and for palindromes.
-///     For other sequences function will return false.
-///     Complexity: O(N).
-template <typename R, typename Predicate>
-bool is_palindrome(const R& range, Predicate p)
-{
-    return is_palindrome(boost::begin(range), boost::end(range), p);
-}
-
-/// \fn is_palindrome ( const char* str )
-/// \return true if the entire sequence is palindrome
-///
-/// \param str C-string to be tested.
-///
-/// \note This function will return true for empty sequences and for palindromes.
-///     For other sequences function will return false.
-///     Complexity: O(N).
-bool is_palindrome(const char* str)
-{
-    if(!str)
-	    return true;
-    return is_palindrome(str, str + strlen(str));
-}
-
-/// \fn is_palindrome ( const char* str, Predicate p )
-/// \return true if the entire sequence is palindrome
-///
-/// \param str C-string to be tested.
-/// \param p   A predicate used to compare the values.
-///
-/// \note This function will return true for empty sequences and for palindromes.
-///     For other sequences function will return false.
-///     Complexity: O(N).
-template<typename Predicate>
-bool is_palindrome(const char* str, Predicate p)
-{
-    if(!str)
-	    return true;
-    return is_palindrome(str, str + strlen(str), p);
-}
-}}
-
-#endif // BOOST_ALGORITHM_IS_PALINDROME_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/is_partitioned_until.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/is_partitioned_until.hpp
deleted file mode 100644
index 42683e1..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/is_partitioned_until.hpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-   Copyright (c) Alexander Zaitsev <zamazan4ik@gmail.by>, 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-*/
-
-/// \file  is_partitioned_until.hpp
-/// \brief Tell if a sequence is partitioned
-/// \author Alexander Zaitsev
-
-#ifndef BOOST_ALGORITHM_IS_PARTITIONED_UNTIL_HPP
-#define BOOST_ALGORITHM_IS_PARTITIONED_UNTIL_HPP
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn is_partitioned_until ( InputIterator first, InputIterator last, UnaryPredicate p )
-/// \brief Tests to see if a sequence is partitioned according to a predicate. 
-///	   In other words, all the items in the sequence that satisfy the predicate are at the beginning of the sequence.
-///
-/// \param first    The start of the input sequence
-/// \param last     One past the end of the input sequence
-/// \param p        The predicate to test the values with
-///
-/// \note Returns the first iterator 'it' in the sequence [first, last) for which is_partitioned(first, it, p) is false.
-///     Returns last if the entire sequence is partitioned.
-///     Complexity: O(N).
-template <typename InputIterator, typename UnaryPredicate>
-InputIterator is_partitioned_until ( InputIterator first, InputIterator last, UnaryPredicate p )
-{
-//  Run through the part that satisfy the predicate
-    for ( ; first != last; ++first )
-        if ( !p (*first))
-            break;
-//  Now the part that does not satisfy the predicate
-    for ( ; first != last; ++first )
-        if ( p (*first))
-            return first;
-    return last;
-}
-
-/// \fn is_partitioned_until ( const Range &r, UnaryPredicate p )
-/// \brief Tests to see if a sequence is partitioned according to a predicate. 
-///	   In other words, all the items in the sequence that satisfy the predicate are at the beginning of the sequence.
-///
-/// \param r        The input range
-/// \param p        The predicate to test the values with
-///
-/// \note Returns the first iterator 'it' in the sequence [first, last) for which is_partitioned(first, it, p) is false.
-///     Returns last if the entire sequence is partitioned.
-///     Complexity: O(N).
-template <typename Range, typename UnaryPredicate>
-typename boost::range_iterator<const Range>::type is_partitioned_until ( const Range &r, UnaryPredicate p )
-{
-    return boost::algorithm::is_partitioned_until (boost::begin(r), boost::end(r), p);
-}
-
-}}
-
-#endif  // BOOST_ALGORITHM_IS_PARTITIONED_UNTIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/minmax.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/minmax.hpp
deleted file mode 100644
index 053a7d6..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/minmax.hpp
+++ /dev/null
@@ -1,47 +0,0 @@
-//  (C) Copyright Herve Bronnimann 2004.
-//
-// Distributed under the Boost Software License, Version 1.0. (See accompanying
-// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-/*
- Revision history:
-   1 July 2004
-      Split the code into two headers to lessen dependence on
-      Boost.tuple. (Herve)
-   26 June 2004
-      Added the code for the boost minmax library. (Herve)
-*/
-
-#ifndef BOOST_ALGORITHM_MINMAX_HPP
-#define BOOST_ALGORITHM_MINMAX_HPP
-
-/* PROPOSED STANDARD EXTENSIONS:
- *
- * minmax(a, b)
- * Effect: (b<a) ? std::make_pair(b,a) : std::make_pair(a,b);
- *
- * minmax(a, b, comp)
- * Effect: comp(b,a) ? std::make_pair(b,a) : std::make_pair(a,b);
- *
- */
-
-#include <boost/tuple/tuple.hpp> // for using pairs with boost::cref
-#include <boost/ref.hpp>
-
-namespace boost {
-
-  template <typename T>
-  tuple< T const&, T const& >
-  minmax(T const& a, T const& b) {
-    return (b<a) ? make_tuple(cref(b),cref(a)) : make_tuple(cref(a),cref(b));
-  }
-
-  template <typename T, class BinaryPredicate>
-  tuple< T const&, T const& >
-  minmax(T const& a, T const& b, BinaryPredicate comp) {
-    return comp(b,a) ? make_tuple(cref(b),cref(a)) : make_tuple(cref(a),cref(b));
-  }
-
-} // namespace boost
-
-#endif // BOOST_ALGORITHM_MINMAX_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/minmax_element.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/minmax_element.hpp
deleted file mode 100644
index 752f6cb..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/minmax_element.hpp
+++ /dev/null
@@ -1,553 +0,0 @@
-//  (C) Copyright Herve Bronnimann 2004.
-//
-// Distributed under the Boost Software License, Version 1.0. (See accompanying
-// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-/*
- Revision history:
-   1 July 2004
-      Split the code into two headers to lessen dependence on
-      Boost.tuple. (Herve)
-   26 June 2004
-      Added the code for the boost minmax library. (Herve)
-*/
-
-#ifndef BOOST_ALGORITHM_MINMAX_ELEMENT_HPP
-#define BOOST_ALGORITHM_MINMAX_ELEMENT_HPP
-
-/* PROPOSED STANDARD EXTENSIONS:
- *
- * minmax_element(first, last)
- * Effect: std::make_pair( std::min_element(first, last),
- *                         std::max_element(first, last) );
- *
- * minmax_element(first, last, comp)
- * Effect: std::make_pair( std::min_element(first, last, comp),
- *                         std::max_element(first, last, comp) );
- */
-
-#include <utility> // for std::pair and std::make_pair
-
-namespace boost {
-
-  namespace detail {  // for obtaining a uniform version of minmax_element
-    // that compiles with VC++ 6.0 -- avoid the iterator_traits by
-    // having comparison object over iterator, not over dereferenced value
-
-    template <typename Iterator>
-    struct less_over_iter {
-      bool operator()(Iterator const& it1,
-                      Iterator const& it2) const { return *it1 < *it2; }
-    };
-
-    template <typename Iterator, class BinaryPredicate>
-    struct binary_pred_over_iter {
-      explicit binary_pred_over_iter(BinaryPredicate const& p ) : m_p( p ) {}
-      bool operator()(Iterator const& it1,
-                      Iterator const& it2) const { return m_p(*it1, *it2); }
-    private:
-      BinaryPredicate m_p;
-    };
-
-    // common base for the two minmax_element overloads
-
-    template <typename ForwardIter, class Compare >
-    std::pair<ForwardIter,ForwardIter>
-    basic_minmax_element(ForwardIter first, ForwardIter last, Compare comp)
-    {
-      if (first == last)
-        return std::make_pair(last,last);
-
-      ForwardIter min_result = first;
-      ForwardIter max_result = first;
-
-      // if only one element
-      ForwardIter second = first; ++second;
-      if (second == last)
-        return std::make_pair(min_result, max_result);
-
-      // treat first pair separately (only one comparison for first two elements)
-      ForwardIter potential_min_result = last;
-      if (comp(first, second))
-        max_result = second;
-      else {
-        min_result = second;
-        potential_min_result = first;
-      }
-
-      // then each element by pairs, with at most 3 comparisons per pair
-      first = ++second; if (first != last) ++second;
-      while (second != last) {
-        if (comp(first, second)) {
-          if (comp(first, min_result)) {
-            min_result = first;
-            potential_min_result = last;
-          }
-          if (comp(max_result, second))
-            max_result = second;
-        } else {
-          if (comp(second, min_result)) {
-            min_result = second;
-            potential_min_result = first;
-          }
-          if (comp(max_result, first))
-            max_result = first;
-        }
-        first = ++second;
-        if (first != last) ++second;
-      }
-
-      // if odd number of elements, treat last element
-      if (first != last) { // odd number of elements
-        if (comp(first, min_result)) {
-          min_result = first;
-          potential_min_result = last;
-          }
-        else if (comp(max_result, first))
-          max_result = first;
-      }
-
-      // resolve min_result being incorrect with one extra comparison
-      // (in which case potential_min_result is necessarily the correct result)
-      if (potential_min_result != last
-        && !comp(min_result, potential_min_result))
-        min_result = potential_min_result;
-
-      return std::make_pair(min_result,max_result);
-    }
-
-  } // namespace detail
-
-  template <typename ForwardIter>
-  std::pair<ForwardIter,ForwardIter>
-  minmax_element(ForwardIter first, ForwardIter last)
-  {
-    return detail::basic_minmax_element(first, last,
-             detail::less_over_iter<ForwardIter>() );
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  std::pair<ForwardIter,ForwardIter>
-  minmax_element(ForwardIter first, ForwardIter last, BinaryPredicate comp)
-  {
-    return detail::basic_minmax_element(first, last,
-             detail::binary_pred_over_iter<ForwardIter,BinaryPredicate>(comp) );
-  }
-
-}
-
-/* PROPOSED BOOST EXTENSIONS
- * In the description below, [rfirst,rlast) denotes the reversed range
- * of [first,last). Even though the iterator type of first and last may
- * be only a Forward Iterator, it is possible to explain the semantics
- * by assuming that it is a Bidirectional Iterator. In the sequel,
- * reverse(ForwardIterator&) returns the reverse_iterator adaptor.
- * This is not how the functions would be implemented!
- *
- * first_min_element(first, last)
- * Effect: std::min_element(first, last);
- *
- * first_min_element(first, last, comp)
- * Effect: std::min_element(first, last, comp);
- *
- * last_min_element(first, last)
- * Effect: reverse( std::min_element(reverse(last), reverse(first)) );
- *
- * last_min_element(first, last, comp)
- * Effect: reverse( std::min_element(reverse(last), reverse(first), comp) );
- *
- * first_max_element(first, last)
- * Effect: std::max_element(first, last);
- *
- * first_max_element(first, last, comp)
- * Effect: max_element(first, last);
- *
- * last_max_element(first, last)
- * Effect: reverse( std::max_element(reverse(last), reverse(first)) );
- *
- * last_max_element(first, last, comp)
- * Effect: reverse( std::max_element(reverse(last), reverse(first), comp) );
- *
- * first_min_first_max_element(first, last)
- * Effect: std::make_pair( first_min_element(first, last),
- *                         first_max_element(first, last) );
- *
- * first_min_first_max_element(first, last, comp)
- * Effect: std::make_pair( first_min_element(first, last, comp),
- *                         first_max_element(first, last, comp) );
- *
- * first_min_last_max_element(first, last)
- * Effect: std::make_pair( first_min_element(first, last),
- *                         last_max_element(first, last) );
- *
- * first_min_last_max_element(first, last, comp)
- * Effect: std::make_pair( first_min_element(first, last, comp),
- *                         last_max_element(first, last, comp) );
- *
- * last_min_first_max_element(first, last)
- * Effect: std::make_pair( last_min_element(first, last),
- *                         first_max_element(first, last) );
- *
- * last_min_first_max_element(first, last, comp)
- * Effect: std::make_pair( last_min_element(first, last, comp),
- *                         first_max_element(first, last, comp) );
- *
- * last_min_last_max_element(first, last)
- * Effect: std::make_pair( last_min_element(first, last),
- *                         last_max_element(first, last) );
- *
- * last_min_last_max_element(first, last, comp)
- * Effect: std::make_pair( last_min_element(first, last, comp),
- *                         last_max_element(first, last, comp) );
- */
-
-namespace boost {
-
-  // Min_element and max_element variants
-
-  namespace detail {  // common base for the overloads
-
-  template <typename ForwardIter, class BinaryPredicate>
-  ForwardIter
-  basic_first_min_element(ForwardIter first, ForwardIter last,
-                          BinaryPredicate comp)
-  {
-    if (first == last) return last;
-    ForwardIter min_result = first;
-    while (++first != last)
-      if (comp(first, min_result))
-        min_result = first;
-    return min_result;
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  ForwardIter
-  basic_last_min_element(ForwardIter first, ForwardIter last,
-                         BinaryPredicate comp)
-  {
-    if (first == last) return last;
-    ForwardIter min_result = first;
-    while (++first != last)
-      if (!comp(min_result, first))
-        min_result = first;
-    return min_result;
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  ForwardIter
-  basic_first_max_element(ForwardIter first, ForwardIter last,
-                          BinaryPredicate comp)
-  {
-    if (first == last) return last;
-    ForwardIter max_result = first;
-    while (++first != last)
-      if (comp(max_result, first))
-        max_result = first;
-    return max_result;
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  ForwardIter
-  basic_last_max_element(ForwardIter first, ForwardIter last,
-                         BinaryPredicate comp)
-  {
-    if (first == last) return last;
-    ForwardIter max_result = first;
-    while (++first != last)
-      if (!comp(first, max_result))
-        max_result = first;
-    return max_result;
-  }
-
-  } // namespace detail
-
-  template <typename ForwardIter>
-  ForwardIter
-  first_min_element(ForwardIter first, ForwardIter last)
-  {
-    return detail::basic_first_min_element(first, last,
-             detail::less_over_iter<ForwardIter>() );
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  ForwardIter
-  first_min_element(ForwardIter first, ForwardIter last, BinaryPredicate comp)
-  {
-    return detail::basic_first_min_element(first, last,
-             detail::binary_pred_over_iter<ForwardIter,BinaryPredicate>(comp) );
-  }
-
-  template <typename ForwardIter>
-  ForwardIter
-  last_min_element(ForwardIter first, ForwardIter last)
-  {
-    return detail::basic_last_min_element(first, last,
-             detail::less_over_iter<ForwardIter>() );
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  ForwardIter
-  last_min_element(ForwardIter first, ForwardIter last, BinaryPredicate comp)
-  {
-    return detail::basic_last_min_element(first, last,
-             detail::binary_pred_over_iter<ForwardIter,BinaryPredicate>(comp) );
-  }
-
-  template <typename ForwardIter>
-  ForwardIter
-  first_max_element(ForwardIter first, ForwardIter last)
-  {
-    return detail::basic_first_max_element(first, last,
-             detail::less_over_iter<ForwardIter>() );
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  ForwardIter
-  first_max_element(ForwardIter first, ForwardIter last, BinaryPredicate comp)
-  {
-    return detail::basic_first_max_element(first, last,
-             detail::binary_pred_over_iter<ForwardIter,BinaryPredicate>(comp) );
-  }
-
-  template <typename ForwardIter>
-  ForwardIter
-  last_max_element(ForwardIter first, ForwardIter last)
-  {
-    return detail::basic_last_max_element(first, last,
-             detail::less_over_iter<ForwardIter>() );
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  ForwardIter
-  last_max_element(ForwardIter first, ForwardIter last, BinaryPredicate comp)
-  {
-    return detail::basic_last_max_element(first, last,
-             detail::binary_pred_over_iter<ForwardIter,BinaryPredicate>(comp) );
-  }
-
-
-  // Minmax_element variants -- comments removed
-
-  namespace detail {
-
-  template <typename ForwardIter, class BinaryPredicate>
-  std::pair<ForwardIter,ForwardIter>
-  basic_first_min_last_max_element(ForwardIter first, ForwardIter last,
-                                   BinaryPredicate comp)
-  {
-    if (first == last)
-      return std::make_pair(last,last);
-
-    ForwardIter min_result = first;
-    ForwardIter max_result = first;
-
-    ForwardIter second = ++first;
-    if (second == last)
-      return std::make_pair(min_result, max_result);
-
-    if (comp(second, min_result))
-      min_result = second;
-    else
-      max_result = second;
-
-    first = ++second; if (first != last) ++second;
-    while (second != last) {
-      if (!comp(second, first)) {
-        if (comp(first, min_result))
-                 min_result = first;
-        if (!comp(second, max_result))
-          max_result = second;
-      } else {
-        if (comp(second, min_result))
-          min_result = second;
-        if (!comp(first, max_result))
-              max_result = first;
-      }
-      first = ++second; if (first != last) ++second;
-    }
-
-    if (first != last) {
-      if (comp(first, min_result))
-         min_result = first;
-      else if (!comp(first, max_result))
-               max_result = first;
-    }
-
-    return std::make_pair(min_result, max_result);
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  std::pair<ForwardIter,ForwardIter>
-  basic_last_min_first_max_element(ForwardIter first, ForwardIter last,
-                                   BinaryPredicate comp)
-  {
-    if (first == last) return std::make_pair(last,last);
-
-    ForwardIter min_result = first;
-    ForwardIter max_result = first;
-
-    ForwardIter second = ++first;
-    if (second == last)
-      return std::make_pair(min_result, max_result);
-
-    if (comp(max_result, second))
-      max_result = second;
-    else
-      min_result = second;
-
-    first = ++second; if (first != last) ++second;
-    while (second != last)  {
-      if (comp(first, second)) {
-        if (!comp(min_result, first))
-          min_result = first;
-        if (comp(max_result, second))
-          max_result = second;
-      } else {
-        if (!comp(min_result, second))
-          min_result = second;
-        if (comp(max_result, first))
-          max_result = first;
-      }
-      first = ++second; if (first != last) ++second;
-    }
-
-    if (first != last) {
-      if (!comp(min_result, first))
-        min_result = first;
-      else if (comp(max_result, first))
-        max_result = first;
-    }
-
-    return std::make_pair(min_result, max_result);
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  std::pair<ForwardIter,ForwardIter>
-  basic_last_min_last_max_element(ForwardIter first, ForwardIter last,
-                                  BinaryPredicate comp)
-  {
-    if (first == last) return std::make_pair(last,last);
-
-    ForwardIter min_result = first;
-    ForwardIter max_result = first;
-
-    ForwardIter second = first; ++second;
-    if (second == last)
-      return std::make_pair(min_result,max_result);
-
-    ForwardIter potential_max_result = last;
-    if (comp(first, second))
-      max_result = second;
-    else {
-      min_result = second;
-      potential_max_result = second;
-    }
-
-    first = ++second; if (first != last) ++second;
-    while (second != last) {
-      if (comp(first, second)) {
-        if (!comp(min_result, first))
-          min_result = first;
-        if (!comp(second, max_result)) {
-          max_result = second;
-          potential_max_result = last;
-        }
-      } else {
-        if (!comp(min_result, second))
-          min_result = second;
-        if (!comp(first, max_result)) {
-          max_result = first;
-          potential_max_result = second;
-        }
-      }
-      first = ++second;
-      if (first != last) ++second;
-    }
-
-    if (first != last) {
-      if (!comp(min_result, first))
-        min_result = first;
-      if (!comp(first, max_result)) {
-        max_result = first;
-               potential_max_result = last;
-      }
-    }
-
-    if (potential_max_result != last
-        && !comp(potential_max_result, max_result))
-      max_result = potential_max_result;
-
-    return std::make_pair(min_result,max_result);
-  }
-
-  } // namespace detail
-
-  template <typename ForwardIter>
-  inline std::pair<ForwardIter,ForwardIter>
-  first_min_first_max_element(ForwardIter first, ForwardIter last)
-  {
-    return minmax_element(first, last);
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  inline std::pair<ForwardIter,ForwardIter>
-  first_min_first_max_element(ForwardIter first, ForwardIter last,
-                              BinaryPredicate comp)
-  {
-    return minmax_element(first, last, comp);
-  }
-
-  template <typename ForwardIter>
-  std::pair<ForwardIter,ForwardIter>
-  first_min_last_max_element(ForwardIter first, ForwardIter last)
-  {
-    return detail::basic_first_min_last_max_element(first, last,
-             detail::less_over_iter<ForwardIter>() );
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  inline std::pair<ForwardIter,ForwardIter>
-  first_min_last_max_element(ForwardIter first, ForwardIter last,
-                              BinaryPredicate comp)
-  {
-    return detail::basic_first_min_last_max_element(first, last,
-             detail::binary_pred_over_iter<ForwardIter,BinaryPredicate>(comp) );
-  }
-
-  template <typename ForwardIter>
-  std::pair<ForwardIter,ForwardIter>
-  last_min_first_max_element(ForwardIter first, ForwardIter last)
-  {
-    return detail::basic_last_min_first_max_element(first, last,
-             detail::less_over_iter<ForwardIter>() );
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  inline std::pair<ForwardIter,ForwardIter>
-  last_min_first_max_element(ForwardIter first, ForwardIter last,
-                              BinaryPredicate comp)
-  {
-    return detail::basic_last_min_first_max_element(first, last,
-             detail::binary_pred_over_iter<ForwardIter,BinaryPredicate>(comp) );
-  }
-
-  template <typename ForwardIter>
-  std::pair<ForwardIter,ForwardIter>
-  last_min_last_max_element(ForwardIter first, ForwardIter last)
-  {
-    return detail::basic_last_min_last_max_element(first, last,
-             detail::less_over_iter<ForwardIter>() );
-  }
-
-  template <typename ForwardIter, class BinaryPredicate>
-  inline std::pair<ForwardIter,ForwardIter>
-  last_min_last_max_element(ForwardIter first, ForwardIter last,
-                              BinaryPredicate comp)
-  {
-    return detail::basic_last_min_last_max_element(first, last,
-             detail::binary_pred_over_iter<ForwardIter,BinaryPredicate>(comp) );
-  }
-
-} // namespace boost
-
-#endif // BOOST_ALGORITHM_MINMAX_ELEMENT_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/searching/boyer_moore.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/searching/boyer_moore.hpp
deleted file mode 100644
index 192d4de..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/searching/boyer_moore.hpp
+++ /dev/null
@@ -1,272 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#ifndef BOOST_ALGORITHM_BOYER_MOORE_SEARCH_HPP
-#define BOOST_ALGORITHM_BOYER_MOORE_SEARCH_HPP
-
-#include <iterator>     // for std::iterator_traits
-
-#include <boost/assert.hpp>
-#include <boost/static_assert.hpp>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-#include <boost/utility/enable_if.hpp>
-#include <boost/type_traits/is_same.hpp>
-
-#include <boost/algorithm/searching/detail/bm_traits.hpp>
-#include <boost/algorithm/searching/detail/debugging.hpp>
-
-namespace boost { namespace algorithm {
-
-/*
-    A templated version of the boyer-moore searching algorithm.
-    
-References:
-    http://www.cs.utexas.edu/users/moore/best-ideas/string-searching/
-    http://www.cs.utexas.edu/~moore/publications/fstrpos.pdf
-    
-Explanations:
-    http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm
-    http://www.movsd.com/bm.htm
-    http://www.cs.ucdavis.edu/~gusfield/cs224f09/bnotes.pdf
-
-The Boyer-Moore search algorithm uses two tables, a "bad character" table
-to tell how far to skip ahead when it hits a character that is not in the pattern,
-and a "good character" table to tell how far to skip ahead when it hits a
-mismatch on a character that _is_ in the pattern.
-
-Requirements:
-        * Random access iterators
-        * The two iterator types (patIter and corpusIter) must 
-            "point to" the same underlying type and be comparable.
-        * Additional requirements may be imposed but the skip table, such as:
-        ** Numeric type (array-based skip table)
-        ** Hashable type (map-based skip table)
-*/
-
-    template <typename patIter, typename traits = detail::BM_traits<patIter> >
-    class boyer_moore {
-        typedef typename std::iterator_traits<patIter>::difference_type difference_type;
-    public:
-        boyer_moore ( patIter first, patIter last ) 
-                : pat_first ( first ), pat_last ( last ),
-                  k_pattern_length ( std::distance ( pat_first, pat_last )),
-                  skip_ ( k_pattern_length, -1 ),
-                  suffix_ ( k_pattern_length + 1 )
-            {
-            this->build_skip_table   ( first, last );
-            this->build_suffix_table ( first, last );
-            }
-            
-        ~boyer_moore () {}
-        
-        /// \fn operator ( corpusIter corpus_first, corpusIter corpus_last )
-        /// \brief Searches the corpus for the pattern that was passed into the constructor
-        /// 
-        /// \param corpus_first The start of the data to search (Random Access Iterator)
-        /// \param corpus_last  One past the end of the data to search
-        ///
-        template <typename corpusIter>
-        std::pair<corpusIter, corpusIter>
-        operator () ( corpusIter corpus_first, corpusIter corpus_last ) const {
-            BOOST_STATIC_ASSERT (( boost::is_same<
-                                    typename std::iterator_traits<patIter>::value_type, 
-                                    typename std::iterator_traits<corpusIter>::value_type>::value ));
-
-            if ( corpus_first == corpus_last ) return std::make_pair(corpus_last, corpus_last);   // if nothing to search, we didn't find it!
-            if (    pat_first ==    pat_last ) return std::make_pair(corpus_first, corpus_first); // empty pattern matches at start
-
-            const difference_type k_corpus_length  = std::distance ( corpus_first, corpus_last );
-        //  If the pattern is larger than the corpus, we can't find it!
-            if ( k_corpus_length < k_pattern_length ) 
-                return std::make_pair(corpus_last, corpus_last);
-
-        //  Do the search 
-            return this->do_search ( corpus_first, corpus_last );
-            }
-            
-        template <typename Range>
-        std::pair<typename boost::range_iterator<Range>::type, typename boost::range_iterator<Range>::type>
-        operator () ( Range &r ) const {
-            return (*this) (boost::begin(r), boost::end(r));
-            }
-
-    private:
-/// \cond DOXYGEN_HIDE
-        patIter pat_first, pat_last;
-        const difference_type k_pattern_length;
-        typename traits::skip_table_t skip_;
-        std::vector <difference_type> suffix_;
-
-        /// \fn operator ( corpusIter corpus_first, corpusIter corpus_last, Pred p )
-        /// \brief Searches the corpus for the pattern that was passed into the constructor
-        /// 
-        /// \param corpus_first The start of the data to search (Random Access Iterator)
-        /// \param corpus_last  One past the end of the data to search
-        /// \param p            A predicate used for the search comparisons.
-        ///
-        template <typename corpusIter>
-        std::pair<corpusIter, corpusIter>
-        do_search ( corpusIter corpus_first, corpusIter corpus_last ) const {
-        /*  ---- Do the matching ---- */
-            corpusIter curPos = corpus_first;
-            const corpusIter lastPos = corpus_last - k_pattern_length;
-            difference_type j, k, m;
-
-            while ( curPos <= lastPos ) {
-        /*  while ( std::distance ( curPos, corpus_last ) >= k_pattern_length ) { */
-            //  Do we match right where we are?
-                j = k_pattern_length;
-                while ( pat_first [j-1] == curPos [j-1] ) {
-                    j--;
-                //  We matched - we're done!
-                    if ( j == 0 )
-                        return std::make_pair(curPos, curPos + k_pattern_length);
-                    }
-                
-            //  Since we didn't match, figure out how far to skip forward
-                k = skip_ [ curPos [ j - 1 ]];
-                m = j - k - 1;
-                if ( k < j && m > suffix_ [ j ] )
-                    curPos += m;
-                else
-                    curPos += suffix_ [ j ];
-                }
-        
-            return std::make_pair(corpus_last, corpus_last);     // We didn't find anything
-            }
-
-
-        void build_skip_table ( patIter first, patIter last ) {
-            for ( std::size_t i = 0; first != last; ++first, ++i )
-                skip_.insert ( *first, i );
-            }
-        
-
-        template<typename Iter, typename Container>
-        void compute_bm_prefix ( Iter first, Iter last, Container &prefix ) {
-            const std::size_t count = std::distance ( first, last );
-            BOOST_ASSERT ( count > 0 );
-            BOOST_ASSERT ( prefix.size () == count );
-                            
-            prefix[0] = 0;
-            std::size_t k = 0;
-            for ( std::size_t i = 1; i < count; ++i ) {
-                BOOST_ASSERT ( k < count );
-                while ( k > 0 && ( first[k] != first[i] )) {
-                    BOOST_ASSERT ( k < count );
-                    k = prefix [ k - 1 ];
-                    }
-                    
-                if ( first[k] == first[i] )
-                    k++;
-                prefix [ i ] = k;
-                }
-            }
-
-        void build_suffix_table ( patIter first, patIter last ) {
-            const std::size_t count = (std::size_t) std::distance ( first, last );
-            
-            if ( count > 0 ) {  // empty pattern
-                std::vector<typename std::iterator_traits<patIter>::value_type> reversed(count);
-                (void) std::reverse_copy ( first, last, reversed.begin ());
-                
-                std::vector<difference_type> prefix (count);
-                compute_bm_prefix ( first, last, prefix );
-        
-                std::vector<difference_type> prefix_reversed (count);
-                compute_bm_prefix ( reversed.begin (), reversed.end (), prefix_reversed );
-                
-                for ( std::size_t i = 0; i <= count; i++ )
-                    suffix_[i] = count - prefix [count-1];
-         
-                for ( std::size_t i = 0; i < count; i++ ) {
-                    const std::size_t     j = count - prefix_reversed[i];
-                    const difference_type k = i -     prefix_reversed[i] + 1;
-         
-                    if (suffix_[j] > k)
-                        suffix_[j] = k;
-                    }
-                }
-            }
-/// \endcond
-        };
-
-
-/*  Two ranges as inputs gives us four possibilities; with 2,3,3,4 parameters
-    Use a bit of TMP to disambiguate the 3-argument templates */
-
-/// \fn boyer_moore_search ( corpusIter corpus_first, corpusIter corpus_last, 
-///       patIter pat_first, patIter pat_last )
-/// \brief Searches the corpus for the pattern.
-/// 
-/// \param corpus_first The start of the data to search (Random Access Iterator)
-/// \param corpus_last  One past the end of the data to search
-/// \param pat_first    The start of the pattern to search for (Random Access Iterator)
-/// \param pat_last     One past the end of the data to search for
-///
-    template <typename patIter, typename corpusIter>
-    std::pair<corpusIter, corpusIter> boyer_moore_search ( 
-                  corpusIter corpus_first, corpusIter corpus_last, 
-                  patIter pat_first, patIter pat_last )
-    {
-        boyer_moore<patIter> bm ( pat_first, pat_last );
-        return bm ( corpus_first, corpus_last );
-    }
-
-    template <typename PatternRange, typename corpusIter>
-    std::pair<corpusIter, corpusIter> boyer_moore_search ( 
-        corpusIter corpus_first, corpusIter corpus_last, const PatternRange &pattern )
-    {
-        typedef typename boost::range_iterator<const PatternRange>::type pattern_iterator;
-        boyer_moore<pattern_iterator> bm ( boost::begin(pattern), boost::end (pattern));
-        return bm ( corpus_first, corpus_last );
-    }
-    
-    template <typename patIter, typename CorpusRange>
-    typename boost::disable_if_c<
-        boost::is_same<CorpusRange, patIter>::value, 
-        std::pair<typename boost::range_iterator<CorpusRange>::type, typename boost::range_iterator<CorpusRange>::type> >
-    ::type
-    boyer_moore_search ( CorpusRange &corpus, patIter pat_first, patIter pat_last )
-    {
-        boyer_moore<patIter> bm ( pat_first, pat_last );
-        return bm (boost::begin (corpus), boost::end (corpus));
-    }
-    
-    template <typename PatternRange, typename CorpusRange>
-    std::pair<typename boost::range_iterator<CorpusRange>::type, typename boost::range_iterator<CorpusRange>::type>
-    boyer_moore_search ( CorpusRange &corpus, const PatternRange &pattern )
-    {
-        typedef typename boost::range_iterator<const PatternRange>::type pattern_iterator;
-        boyer_moore<pattern_iterator> bm ( boost::begin(pattern), boost::end (pattern));
-        return bm (boost::begin (corpus), boost::end (corpus));
-    }
-
-
-    //  Creator functions -- take a pattern range, return an object
-    template <typename Range>
-    boost::algorithm::boyer_moore<typename boost::range_iterator<const Range>::type>
-    make_boyer_moore ( const Range &r ) {
-        return boost::algorithm::boyer_moore
-            <typename boost::range_iterator<const Range>::type> (boost::begin(r), boost::end(r));
-        }
-    
-    template <typename Range>
-    boost::algorithm::boyer_moore<typename boost::range_iterator<Range>::type>
-    make_boyer_moore ( Range &r ) {
-        return boost::algorithm::boyer_moore
-            <typename boost::range_iterator<Range>::type> (boost::begin(r), boost::end(r));
-        }
-
-}}
-
-#endif  //  BOOST_ALGORITHM_BOYER_MOORE_SEARCH_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/searching/boyer_moore_horspool.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/searching/boyer_moore_horspool.hpp
deleted file mode 100644
index aacb5cb..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/searching/boyer_moore_horspool.hpp
+++ /dev/null
@@ -1,202 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#ifndef BOOST_ALGORITHM_BOYER_MOORE_HORSPOOOL_SEARCH_HPP
-#define BOOST_ALGORITHM_BOYER_MOORE_HORSPOOOL_SEARCH_HPP
-
-#include <iterator>     // for std::iterator_traits
-
-#include <boost/assert.hpp>
-#include <boost/static_assert.hpp>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-#include <boost/utility/enable_if.hpp>
-#include <boost/type_traits/is_same.hpp>
-
-#include <boost/algorithm/searching/detail/bm_traits.hpp>
-#include <boost/algorithm/searching/detail/debugging.hpp>
-
-// #define  BOOST_ALGORITHM_BOYER_MOORE_HORSPOOL_DEBUG_HPP
-
-namespace boost { namespace algorithm {
-
-/*
-    A templated version of the boyer-moore-horspool searching algorithm.
-    
-    Requirements:
-        * Random access iterators
-        * The two iterator types (patIter and corpusIter) must 
-            "point to" the same underlying type.
-        * Additional requirements may be imposed buy the skip table, such as:
-        ** Numeric type (array-based skip table)
-        ** Hashable type (map-based skip table)
-
-http://www-igm.univ-mlv.fr/%7Elecroq/string/node18.html
-
-*/
-
-    template <typename patIter, typename traits = detail::BM_traits<patIter> >
-    class boyer_moore_horspool {
-        typedef typename std::iterator_traits<patIter>::difference_type difference_type;
-    public:
-        boyer_moore_horspool ( patIter first, patIter last ) 
-                : pat_first ( first ), pat_last ( last ),
-                  k_pattern_length ( std::distance ( pat_first, pat_last )),
-                  skip_ ( k_pattern_length, k_pattern_length ) {
-                  
-        //  Build the skip table
-            std::size_t i = 0;
-            if ( first != last )    // empty pattern?
-                for ( patIter iter = first; iter != last-1; ++iter, ++i )
-                    skip_.insert ( *iter, k_pattern_length - 1 - i );
-#ifdef BOOST_ALGORITHM_BOYER_MOORE_HORSPOOL_DEBUG_HPP
-            skip_.PrintSkipTable ();
-#endif
-            }
-            
-        ~boyer_moore_horspool () {}
-        
-        /// \fn operator ( corpusIter corpus_first, corpusIter corpus_last)
-        /// \brief Searches the corpus for the pattern that was passed into the constructor
-        /// 
-        /// \param corpus_first The start of the data to search (Random Access Iterator)
-        /// \param corpus_last  One past the end of the data to search
-        ///
-        template <typename corpusIter>
-        std::pair<corpusIter, corpusIter>
-        operator () ( corpusIter corpus_first, corpusIter corpus_last ) const {
-            BOOST_STATIC_ASSERT (( boost::is_same<
-                typename std::iterator_traits<patIter>::value_type, 
-                typename std::iterator_traits<corpusIter>::value_type>::value ));
-
-            if ( corpus_first == corpus_last ) return std::make_pair(corpus_last, corpus_last);   // if nothing to search, we didn't find it!
-            if (    pat_first ==    pat_last ) return std::make_pair(corpus_first, corpus_first); // empty pattern matches at start
-
-            const difference_type k_corpus_length  = std::distance ( corpus_first, corpus_last );
-        //  If the pattern is larger than the corpus, we can't find it!
-            if ( k_corpus_length < k_pattern_length )
-                return std::make_pair(corpus_last, corpus_last);
-    
-        //  Do the search 
-            return this->do_search ( corpus_first, corpus_last );
-            }
-            
-        template <typename Range>
-        std::pair<typename boost::range_iterator<Range>::type, typename boost::range_iterator<Range>::type>
-        operator () ( Range &r ) const {
-            return (*this) (boost::begin(r), boost::end(r));
-            }
-
-    private:
-/// \cond DOXYGEN_HIDE
-        patIter pat_first, pat_last;
-        const difference_type k_pattern_length;
-        typename traits::skip_table_t skip_;
-
-        /// \fn do_search ( corpusIter corpus_first, corpusIter corpus_last )
-        /// \brief Searches the corpus for the pattern that was passed into the constructor
-        /// 
-        /// \param corpus_first The start of the data to search (Random Access Iterator)
-        /// \param corpus_last  One past the end of the data to search
-        /// \param k_corpus_length The length of the corpus to search
-        ///
-        template <typename corpusIter>
-        std::pair<corpusIter, corpusIter>
-        do_search ( corpusIter corpus_first, corpusIter corpus_last ) const {
-            corpusIter curPos = corpus_first;
-            const corpusIter lastPos = corpus_last - k_pattern_length;
-            while ( curPos <= lastPos ) {
-            //  Do we match right where we are?
-                std::size_t j = k_pattern_length - 1;
-                while ( pat_first [j] == curPos [j] ) {
-                //  We matched - we're done!
-                    if ( j == 0 )
-                        return std::make_pair(curPos, curPos + k_pattern_length);
-                    j--;
-                    }
-        
-                curPos += skip_ [ curPos [ k_pattern_length - 1 ]];
-                }
-            
-            return std::make_pair(corpus_last, corpus_last);
-            }
-// \endcond
-        };
-
-/*  Two ranges as inputs gives us four possibilities; with 2,3,3,4 parameters
-    Use a bit of TMP to disambiguate the 3-argument templates */
-
-/// \fn boyer_moore_horspool_search ( corpusIter corpus_first, corpusIter corpus_last, 
-///       patIter pat_first, patIter pat_last )
-/// \brief Searches the corpus for the pattern.
-/// 
-/// \param corpus_first The start of the data to search (Random Access Iterator)
-/// \param corpus_last  One past the end of the data to search
-/// \param pat_first    The start of the pattern to search for (Random Access Iterator)
-/// \param pat_last     One past the end of the data to search for
-///
-    template <typename patIter, typename corpusIter>
-    std::pair<corpusIter, corpusIter> boyer_moore_horspool_search ( 
-                  corpusIter corpus_first, corpusIter corpus_last, 
-                  patIter pat_first, patIter pat_last )
-    {
-        boyer_moore_horspool<patIter> bmh ( pat_first, pat_last );
-        return bmh ( corpus_first, corpus_last );
-    }
-
-    template <typename PatternRange, typename corpusIter>
-    std::pair<corpusIter, corpusIter> boyer_moore_horspool_search ( 
-        corpusIter corpus_first, corpusIter corpus_last, const PatternRange &pattern )
-    {
-        typedef typename boost::range_iterator<const PatternRange>::type pattern_iterator;
-        boyer_moore_horspool<pattern_iterator> bmh ( boost::begin(pattern), boost::end (pattern));
-        return bmh ( corpus_first, corpus_last );
-    }
-    
-    template <typename patIter, typename CorpusRange>
-    typename boost::disable_if_c<
-        boost::is_same<CorpusRange, patIter>::value, 
-        std::pair<typename boost::range_iterator<CorpusRange>::type, typename boost::range_iterator<CorpusRange>::type> >
-    ::type
-    boyer_moore_horspool_search ( CorpusRange &corpus, patIter pat_first, patIter pat_last )
-    {
-        boyer_moore_horspool<patIter> bmh ( pat_first, pat_last );
-        return bm (boost::begin (corpus), boost::end (corpus));
-    }
-    
-    template <typename PatternRange, typename CorpusRange>
-    std::pair<typename boost::range_iterator<CorpusRange>::type, typename boost::range_iterator<CorpusRange>::type>
-    boyer_moore_horspool_search ( CorpusRange &corpus, const PatternRange &pattern )
-    {
-        typedef typename boost::range_iterator<const PatternRange>::type pattern_iterator;
-        boyer_moore_horspool<pattern_iterator> bmh ( boost::begin(pattern), boost::end (pattern));
-        return bmh (boost::begin (corpus), boost::end (corpus));
-    }
-
-
-    //  Creator functions -- take a pattern range, return an object
-    template <typename Range>
-    boost::algorithm::boyer_moore_horspool<typename boost::range_iterator<const Range>::type>
-    make_boyer_moore_horspool ( const Range &r ) {
-        return boost::algorithm::boyer_moore_horspool
-            <typename boost::range_iterator<const Range>::type> (boost::begin(r), boost::end(r));
-        }
-    
-    template <typename Range>
-    boost::algorithm::boyer_moore_horspool<typename boost::range_iterator<Range>::type>
-    make_boyer_moore_horspool ( Range &r ) {
-        return boost::algorithm::boyer_moore_horspool
-            <typename boost::range_iterator<Range>::type> (boost::begin(r), boost::end(r));
-        }
-
-}}
-
-#endif  //  BOOST_ALGORITHM_BOYER_MOORE_HORSPOOOL_SEARCH_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/searching/detail/bm_traits.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/searching/detail/bm_traits.hpp
deleted file mode 100644
index 1214363..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/searching/detail/bm_traits.hpp
+++ /dev/null
@@ -1,113 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#ifndef BOOST_ALGORITHM_SEARCH_DETAIL_BM_TRAITS_HPP
-#define BOOST_ALGORITHM_SEARCH_DETAIL_BM_TRAITS_HPP
-
-#include <climits>      // for CHAR_BIT
-#include <vector>
-#include <iterator>     // for std::iterator_traits
-
-#include <boost/type_traits/make_unsigned.hpp>
-#include <boost/type_traits/is_integral.hpp>
-#include <boost/type_traits/remove_pointer.hpp>
-#include <boost/type_traits/remove_const.hpp>
-
-#include <boost/array.hpp>
-#ifdef BOOST_NO_CXX11_HDR_UNORDERED_MAP
-#include <boost/unordered_map.hpp>
-#else
-#include <unordered_map>
-#endif
-
-#include <boost/algorithm/searching/detail/debugging.hpp>
-
-namespace boost { namespace algorithm { namespace detail {
-
-//
-//  Default implementations of the skip tables for B-M and B-M-H
-//
-    template<typename key_type, typename value_type, bool /*useArray*/> class skip_table;
-
-//  General case for data searching other than bytes; use a map
-    template<typename key_type, typename value_type>
-    class skip_table<key_type, value_type, false> {
-    private:
-#ifdef BOOST_NO_CXX11_HDR_UNORDERED_MAP
-        typedef boost::unordered_map<key_type, value_type> skip_map;
-#else
-        typedef std::unordered_map<key_type, value_type> skip_map;
-#endif
-        const value_type k_default_value;
-        skip_map skip_;
-        
-    public:
-        skip_table ( std::size_t patSize, value_type default_value ) 
-            : k_default_value ( default_value ), skip_ ( patSize ) {}
-        
-        void insert ( key_type key, value_type val ) {
-            skip_ [ key ] = val;    // Would skip_.insert (val) be better here?
-            }
-
-        value_type operator [] ( key_type key ) const {
-            typename skip_map::const_iterator it = skip_.find ( key );
-            return it == skip_.end () ? k_default_value : it->second;
-            }
-            
-        void PrintSkipTable () const {
-            std::cout << "BM(H) Skip Table <unordered_map>:" << std::endl;
-            for ( typename skip_map::const_iterator it = skip_.begin (); it != skip_.end (); ++it )
-                if ( it->second != k_default_value )
-                    std::cout << "  " << it->first << ": " << it->second << std::endl;
-            std::cout << std::endl;
-            }
-        };
-        
-    
-//  Special case small numeric values; use an array
-    template<typename key_type, typename value_type>
-    class skip_table<key_type, value_type, true> {
-    private:
-        typedef typename boost::make_unsigned<key_type>::type unsigned_key_type;
-        typedef boost::array<value_type, 1U << (CHAR_BIT * sizeof(key_type))> skip_map;
-        skip_map skip_;
-        const value_type k_default_value;
-    public:
-        skip_table ( std::size_t /*patSize*/, value_type default_value ) : k_default_value ( default_value ) {
-            std::fill_n ( skip_.begin(), skip_.size(), default_value );
-            }
-        
-        void insert ( key_type key, value_type val ) {
-            skip_ [ static_cast<unsigned_key_type> ( key ) ] = val;
-            }
-
-        value_type operator [] ( key_type key ) const {
-            return skip_ [ static_cast<unsigned_key_type> ( key ) ];
-            }
-
-        void PrintSkipTable () const {
-            std::cout << "BM(H) Skip Table <boost:array>:" << std::endl;
-            for ( typename skip_map::const_iterator it = skip_.begin (); it != skip_.end (); ++it )
-                if ( *it != k_default_value )
-                    std::cout << "  " << std::distance (skip_.begin (), it) << ": " << *it << std::endl;
-            std::cout << std::endl;
-            }
-        };
-
-    template<typename Iterator>
-    struct BM_traits {
-        typedef typename std::iterator_traits<Iterator>::difference_type value_type;
-        typedef typename std::iterator_traits<Iterator>::value_type key_type;
-        typedef boost::algorithm::detail::skip_table<key_type, value_type, 
-                boost::is_integral<key_type>::value && (sizeof(key_type)==1)> skip_table_t;
-        };
-
-}}} // namespaces
-
-#endif  //  BOOST_ALGORITHM_SEARCH_DETAIL_BM_TRAITS_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/searching/detail/debugging.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/searching/detail/debugging.hpp
deleted file mode 100644
index 3996e0f..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/searching/detail/debugging.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#ifndef BOOST_ALGORITHM_SEARCH_DETAIL_DEBUG_HPP
-#define BOOST_ALGORITHM_SEARCH_DETAIL_DEBUG_HPP
-
-#include <iostream>
-/// \cond DOXYGEN_HIDE
-
-namespace boost { namespace algorithm { namespace detail {
-
-//  Debugging support
-    template <typename Iter>
-    void PrintTable ( Iter first, Iter last ) {
-        std::cout << std::distance ( first, last ) << ": { ";
-        for ( Iter iter = first; iter != last; ++iter )
-            std::cout << *iter << " ";
-        std::cout << "}" << std::endl;
-        }
-    
-}}}
-/// \endcond
-
-#endif  //  BOOST_ALGORITHM_SEARCH_DETAIL_DEBUG_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/searching/knuth_morris_pratt.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/searching/knuth_morris_pratt.hpp
deleted file mode 100644
index 5b5b64a..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/searching/knuth_morris_pratt.hpp
+++ /dev/null
@@ -1,263 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#ifndef BOOST_ALGORITHM_KNUTH_MORRIS_PRATT_SEARCH_HPP
-#define BOOST_ALGORITHM_KNUTH_MORRIS_PRATT_SEARCH_HPP
-
-#include <vector>
-#include <iterator>     // for std::iterator_traits
-
-#include <boost/assert.hpp>
-#include <boost/static_assert.hpp>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-#include <boost/utility/enable_if.hpp>
-#include <boost/type_traits/is_same.hpp>
-
-#include <boost/algorithm/searching/detail/debugging.hpp>
-
-// #define  BOOST_ALGORITHM_KNUTH_MORRIS_PRATT_DEBUG
-
-namespace boost { namespace algorithm {
-
-// #define  NEW_KMP
-
-/*
-    A templated version of the Knuth-Morris-Pratt searching algorithm.
-    
-    Requirements:
-        * Random-access iterators
-        * The two iterator types (I1 and I2) must "point to" the same underlying type.
-
-    http://en.wikipedia.org/wiki/Knuth-Morris-Pratt_algorithm
-    http://www.inf.fh-flensburg.de/lang/algorithmen/pattern/kmpen.htm
-*/
-
-    template <typename patIter>
-    class knuth_morris_pratt {
-        typedef typename std::iterator_traits<patIter>::difference_type difference_type;
-    public:
-        knuth_morris_pratt ( patIter first, patIter last ) 
-                : pat_first ( first ), pat_last ( last ), 
-                  k_pattern_length ( std::distance ( pat_first, pat_last )),
-                  skip_ ( k_pattern_length + 1 ) {
-#ifdef NEW_KMP
-            preKmp ( pat_first, pat_last );
-#else
-            init_skip_table ( pat_first, pat_last );
-#endif
-#ifdef BOOST_ALGORITHM_KNUTH_MORRIS_PRATT_DEBUG
-            detail::PrintTable ( skip_.begin (), skip_.end ());
-#endif
-            }
-            
-        ~knuth_morris_pratt () {}
-        
-        /// \fn operator ( corpusIter corpus_first, corpusIter corpus_last, Pred p )
-        /// \brief Searches the corpus for the pattern that was passed into the constructor
-        /// 
-        /// \param corpus_first The start of the data to search (Random Access Iterator)
-        /// \param corpus_last  One past the end of the data to search
-        /// \param p            A predicate used for the search comparisons.
-        ///
-        template <typename corpusIter>
-        std::pair<corpusIter, corpusIter>
-        operator () ( corpusIter corpus_first, corpusIter corpus_last ) const {
-            BOOST_STATIC_ASSERT (( boost::is_same<
-                typename std::iterator_traits<patIter>::value_type, 
-                typename std::iterator_traits<corpusIter>::value_type>::value ));
-
-            if ( corpus_first == corpus_last ) return std::make_pair(corpus_last, corpus_last);   // if nothing to search, we didn't find it!
-            if (    pat_first ==    pat_last ) return std::make_pair(corpus_first, corpus_first); // empty pattern matches at start
-
-            const difference_type k_corpus_length = std::distance ( corpus_first, corpus_last );
-        //  If the pattern is larger than the corpus, we can't find it!
-            if ( k_corpus_length < k_pattern_length ) 
-                return std::make_pair(corpus_last, corpus_last);
-
-            return do_search ( corpus_first, corpus_last, k_corpus_length );
-            }
-    
-        template <typename Range>
-        std::pair<typename boost::range_iterator<Range>::type, typename boost::range_iterator<Range>::type>
-        operator () ( Range &r ) const {
-            return (*this) (boost::begin(r), boost::end(r));
-            }
-
-    private:
-/// \cond DOXYGEN_HIDE
-        patIter pat_first, pat_last;
-        const difference_type k_pattern_length;
-        std::vector <difference_type> skip_;
-
-        /// \fn operator ( corpusIter corpus_first, corpusIter corpus_last, Pred p )
-        /// \brief Searches the corpus for the pattern that was passed into the constructor
-        /// 
-        /// \param corpus_first The start of the data to search (Random Access Iterator)
-        /// \param corpus_last  One past the end of the data to search
-        /// \param p            A predicate used for the search comparisons.
-        ///
-        template <typename corpusIter>
-        std::pair<corpusIter, corpusIter>
-        do_search ( corpusIter corpus_first, corpusIter corpus_last, 
-                                                difference_type k_corpus_length ) const {
-            difference_type match_start = 0;  // position in the corpus that we're matching
-            
-#ifdef NEW_KMP
-            int patternIdx = 0;
-            while ( match_start < k_corpus_length ) {
-                while ( patternIdx > -1 && pat_first[patternIdx] != corpus_first [match_start] )
-                    patternIdx = skip_ [patternIdx]; //<--- Shifting the pattern on mismatch
-
-                patternIdx++;
-                match_start++; //<--- corpus is always increased by 1
-
-                if ( patternIdx >= (int) k_pattern_length )
-                    return corpus_first + match_start - patternIdx;
-                }
-            
-#else
-//  At this point, we know:
-//          k_pattern_length <= k_corpus_length
-//          for all elements of skip, it holds -1 .. k_pattern_length
-//      
-//          In the loop, we have the following invariants
-//              idx is in the range 0 .. k_pattern_length
-//              match_start is in the range 0 .. k_corpus_length - k_pattern_length + 1
-
-            const difference_type last_match = k_corpus_length - k_pattern_length;
-            difference_type idx = 0;          // position in the pattern we're comparing
-
-            while ( match_start <= last_match ) {
-                while ( pat_first [ idx ] == corpus_first [ match_start + idx ] ) {
-                    if ( ++idx == k_pattern_length )
-                        return std::make_pair(corpus_first + match_start, corpus_first + match_start + k_pattern_length);
-                    }
-            //  Figure out where to start searching again
-           //   assert ( idx - skip_ [ idx ] > 0 ); // we're always moving forward
-                match_start += idx - skip_ [ idx ];
-                idx = skip_ [ idx ] >= 0 ? skip_ [ idx ] : 0;
-           //   assert ( idx >= 0 && idx < k_pattern_length );
-                }
-#endif
-                
-        //  We didn't find anything
-            return std::make_pair(corpus_last, corpus_last);
-            }
-    
-
-        void preKmp ( patIter first, patIter last ) {
-           const difference_type count = std::distance ( first, last );
-        
-           difference_type i, j;
-        
-           i = 0;
-           j = skip_[0] = -1;
-           while (i < count) {
-              while (j > -1 && first[i] != first[j])
-                 j = skip_[j];
-              i++;
-              j++;
-              if (first[i] == first[j])
-                 skip_[i] = skip_[j];
-              else
-                 skip_[i] = j;
-           }
-        }
-
-
-        void init_skip_table ( patIter first, patIter last ) {
-            const difference_type count = std::distance ( first, last );
-    
-            difference_type j;
-            skip_ [ 0 ] = -1;
-            for ( int i = 1; i <= count; ++i ) {
-                j = skip_ [ i - 1 ];
-                while ( j >= 0 ) {
-                    if ( first [ j ] == first [ i - 1 ] )
-                        break;
-                    j = skip_ [ j ];
-                    }
-                skip_ [ i ] = j + 1;
-                }
-            }
-// \endcond
-        };
-
-
-/*  Two ranges as inputs gives us four possibilities; with 2,3,3,4 parameters
-    Use a bit of TMP to disambiguate the 3-argument templates */
-
-/// \fn knuth_morris_pratt_search ( corpusIter corpus_first, corpusIter corpus_last, 
-///       patIter pat_first, patIter pat_last )
-/// \brief Searches the corpus for the pattern.
-/// 
-/// \param corpus_first The start of the data to search (Random Access Iterator)
-/// \param corpus_last  One past the end of the data to search
-/// \param pat_first    The start of the pattern to search for (Random Access Iterator)
-/// \param pat_last     One past the end of the data to search for
-///
-    template <typename patIter, typename corpusIter>
-    std::pair<corpusIter, corpusIter> knuth_morris_pratt_search ( 
-                  corpusIter corpus_first, corpusIter corpus_last, 
-                  patIter pat_first, patIter pat_last )
-    {
-        knuth_morris_pratt<patIter> kmp ( pat_first, pat_last );
-        return kmp ( corpus_first, corpus_last );
-    }
-
-    template <typename PatternRange, typename corpusIter>
-    std::pair<corpusIter, corpusIter> knuth_morris_pratt_search ( 
-        corpusIter corpus_first, corpusIter corpus_last, const PatternRange &pattern )
-    {
-        typedef typename boost::range_iterator<const PatternRange>::type pattern_iterator;
-        knuth_morris_pratt<pattern_iterator> kmp ( boost::begin(pattern), boost::end (pattern));
-        return kmp ( corpus_first, corpus_last );
-    }
-    
-    template <typename patIter, typename CorpusRange>
-    typename boost::disable_if_c<
-        boost::is_same<CorpusRange, patIter>::value, 
-        std::pair<typename boost::range_iterator<CorpusRange>::type, typename boost::range_iterator<CorpusRange>::type> >
-    ::type
-    knuth_morris_pratt_search ( CorpusRange &corpus, patIter pat_first, patIter pat_last )
-    {
-        knuth_morris_pratt<patIter> kmp ( pat_first, pat_last );
-        return kmp (boost::begin (corpus), boost::end (corpus));
-    }
-    
-    template <typename PatternRange, typename CorpusRange>
-    std::pair<typename boost::range_iterator<CorpusRange>::type, typename boost::range_iterator<CorpusRange>::type>
-    knuth_morris_pratt_search ( CorpusRange &corpus, const PatternRange &pattern )
-    {
-        typedef typename boost::range_iterator<const PatternRange>::type pattern_iterator;
-        knuth_morris_pratt<pattern_iterator> kmp ( boost::begin(pattern), boost::end (pattern));
-        return kmp (boost::begin (corpus), boost::end (corpus));
-    }
-
-
-    //  Creator functions -- take a pattern range, return an object
-    template <typename Range>
-    boost::algorithm::knuth_morris_pratt<typename boost::range_iterator<const Range>::type>
-    make_knuth_morris_pratt ( const Range &r ) {
-        return boost::algorithm::knuth_morris_pratt
-            <typename boost::range_iterator<const Range>::type> (boost::begin(r), boost::end(r));
-        }
-    
-    template <typename Range>
-    boost::algorithm::knuth_morris_pratt<typename boost::range_iterator<Range>::type>
-    make_knuth_morris_pratt ( Range &r ) {
-        return boost::algorithm::knuth_morris_pratt
-            <typename boost::range_iterator<Range>::type> (boost::begin(r), boost::end(r));
-        }
-}}
-
-#endif  // BOOST_ALGORITHM_KNUTH_MORRIS_PRATT_SEARCH_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/sort_subrange.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/sort_subrange.hpp
deleted file mode 100644
index 7fb2cb5..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/sort_subrange.hpp
+++ /dev/null
@@ -1,109 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
- Revision history:
-   28 Sep 2015 mtc First version
-   
-*/
-
-/// \file sort_subrange.hpp
-/// \brief Sort a subrange
-/// \author Marshall Clow
-///
-/// Suggested by Sean Parent in his CppCon 2015 keynote
-
-#ifndef BOOST_ALGORITHM_SORT_SUBRANGE_HPP
-#define BOOST_ALGORITHM_SORT_SUBRANGE_HPP
-
-#include <functional>       // For std::less
-#include <iterator>         // For std::iterator_traits
-#include <algorithm>        // For nth_element and partial_sort
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost { namespace algorithm {
-
-/// \fn sort_subrange ( T const& val, 
-///               Iterator first,     Iterator last, 
-///               Iterator sub_first, Iterator sub_last, 
-///               Pred p )
-/// \brief Sort the subrange [sub_first, sub_last) that is inside
-///     the range [first, last) as if you had sorted the entire range.
-/// 
-/// \param first       The start of the larger range
-/// \param last        The end of the larger range
-/// \param sub_first   The start of the sub range
-/// \param sub_last    The end of the sub range
-/// \param p           A predicate to use to compare the values.
-///                        p ( a, b ) returns a boolean.
-///
-  template<typename Iterator, typename Pred> 
-  void sort_subrange (
-  	Iterator first,     Iterator last, 
-  	Iterator sub_first, Iterator sub_last,
-  	Pred p)
-  {
-  	if (sub_first == sub_last) return; // the empty sub-range is already sorted.
-  	
-  	if (sub_first != first) { // sub-range is at the start, don't need to partition
-  		(void) std::nth_element(first, sub_first, last, p);
-  		++sub_first;
-  		}
-  	std::partial_sort(sub_first, sub_last, last, p);
-  }
-
-
-
-  template<typename Iterator> 
-  void sort_subrange (Iterator first, Iterator last, Iterator sub_first, Iterator sub_last)
-  {
-  	typedef typename std::iterator_traits<Iterator>::value_type value_type;
-  	return sort_subrange(first, last, sub_first, sub_last, std::less<value_type>());
-  }
-
-/// range versions?
-
-
-/// \fn partition_subrange ( T const& val, 
-///               Iterator first,     Iterator last, 
-///               Iterator sub_first, Iterator sub_last, 
-///               Pred p )
-/// \brief Gather the elements of the subrange [sub_first, sub_last) that is 
-///     inside the range [first, last) as if you had sorted the entire range.
-/// 
-/// \param first       The start of the larger range
-/// \param last        The end of the larger range
-/// \param sub_first   The start of the sub range
-/// \param sub_last    The end of the sub range
-/// \param p           A predicate to use to compare the values.
-///                        p ( a, b ) returns a boolean.
-///
-  template<typename Iterator, typename Pred> 
-  void partition_subrange (
-  	Iterator first,     Iterator last, 
-  	Iterator sub_first, Iterator sub_last,
-  	Pred p)
-  {
-  	if (sub_first != first) {
-  		(void) std::nth_element(first, sub_first, last, p);
-  		++sub_first;
-  		}
-  	
-  	if (sub_last != last)
-  		(void) std::nth_element(sub_first, sub_last, last, p);
-  }
-
-  template<typename Iterator> 
-  void partition_subrange (Iterator first, Iterator last, Iterator sub_first, Iterator sub_last)
-  {
-  	typedef typename std::iterator_traits<Iterator>::value_type value_type;
-  	return partition_subrange(first, last, sub_first, sub_last, std::less<value_type>());
-  }
-
-}}
-
-#endif // BOOST_ALGORITHM_SORT_SUBRANGE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string.hpp
deleted file mode 100644
index 0771517..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string.hpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//  Boost string_algo library string_algo.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2004.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_ALGO_HPP
-#define BOOST_STRING_ALGO_HPP
-
-/*! \file
-    Cumulative include for string_algo library
-*/
-
-#include <boost/algorithm/string/std_containers_traits.hpp>
-#include <boost/algorithm/string/trim.hpp>
-#include <boost/algorithm/string/case_conv.hpp>
-#include <boost/algorithm/string/predicate.hpp>
-#include <boost/algorithm/string/find.hpp>
-#include <boost/algorithm/string/split.hpp>
-#include <boost/algorithm/string/join.hpp>
-#include <boost/algorithm/string/replace.hpp>
-#include <boost/algorithm/string/erase.hpp>
-#include <boost/algorithm/string/classification.hpp>
-#include <boost/algorithm/string/find_iterator.hpp>
-
-
-#endif  // BOOST_STRING_ALGO_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/case_conv.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/case_conv.hpp
deleted file mode 100644
index 683340b..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/case_conv.hpp
+++ /dev/null
@@ -1,176 +0,0 @@
-//  Boost string_algo library case_conv.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_CASE_CONV_HPP
-#define BOOST_STRING_CASE_CONV_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <algorithm>
-#include <locale>
-#include <boost/iterator/transform_iterator.hpp>
-
-#include <boost/range/as_literal.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/value_type.hpp>
-
-#include <boost/algorithm/string/detail/case_conv.hpp>
-
-/*! \file
-    Defines sequence case-conversion algorithms.
-    Algorithms convert each element in the input sequence to the
-    desired case using provided locales.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  to_lower  -----------------------------------------------//
-
-        //! Convert to lower case
-        /*!
-            Each element of the input sequence is converted to lower
-            case. The result is a copy of the input converted to lower case.
-            It is returned as a sequence or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input range
-            \param Loc A locale used for conversion
-            \return 
-                An output iterator pointing just after the last inserted character or
-                a copy of the input
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-                
-        */
-        template<typename OutputIteratorT, typename RangeT>
-        inline OutputIteratorT 
-        to_lower_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::detail::transform_range_copy( 
-               Output,
-               ::boost::as_literal(Input),
-               ::boost::algorithm::detail::to_lowerF<
-                    typename range_value<RangeT>::type >(Loc));
-        }
-
-        //! Convert to lower case
-        /*!
-            \overload
-        */
-        template<typename SequenceT>
-        inline SequenceT to_lower_copy( 
-            const SequenceT& Input, 
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::detail::transform_range_copy<SequenceT>(
-                Input,
-                ::boost::algorithm::detail::to_lowerF<
-                    typename range_value<SequenceT>::type >(Loc));
-        }
-
-        //! Convert to lower case
-        /*!
-            Each element of the input sequence is converted to lower
-            case. The input sequence is modified in-place.
-
-            \param Input A range
-            \param Loc a locale used for conversion
-        */
-        template<typename WritableRangeT>
-        inline void to_lower( 
-            WritableRangeT& Input, 
-            const std::locale& Loc=std::locale())
-        {
-            ::boost::algorithm::detail::transform_range(
-                ::boost::as_literal(Input),
-                ::boost::algorithm::detail::to_lowerF<
-                    typename range_value<WritableRangeT>::type >(Loc));
-        }
-        
-//  to_upper  -----------------------------------------------//
-
-        //! Convert to upper case
-        /*!
-            Each element of the input sequence is converted to upper
-            case. The result is a copy of the input converted to upper case.
-            It is returned as a sequence or copied to the output iterator
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input range
-            \param Loc A locale used for conversion
-            \return 
-                An output iterator pointing just after the last inserted character or
-                a copy of the input
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<typename OutputIteratorT, typename RangeT>
-        inline OutputIteratorT 
-        to_upper_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::detail::transform_range_copy( 
-               Output,
-               ::boost::as_literal(Input),
-               ::boost::algorithm::detail::to_upperF<
-                    typename range_value<RangeT>::type >(Loc));
-        }
-
-        //! Convert to upper case
-        /*!
-            \overload
-        */
-        template<typename SequenceT>
-        inline SequenceT to_upper_copy( 
-            const SequenceT& Input, 
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::detail::transform_range_copy<SequenceT>(
-                Input,
-                ::boost::algorithm::detail::to_upperF<
-                    typename range_value<SequenceT>::type >(Loc));
-        }
-
-        //! Convert to upper case
-        /*!
-            Each element of the input sequence is converted to upper
-            case. The input sequence is modified in-place.
-
-            \param Input An input range
-            \param Loc a locale used for conversion
-        */
-        template<typename WritableRangeT>
-        inline void to_upper( 
-            WritableRangeT& Input, 
-            const std::locale& Loc=std::locale())
-        {
-            ::boost::algorithm::detail::transform_range(
-                ::boost::as_literal(Input),
-                ::boost::algorithm::detail::to_upperF<
-                    typename range_value<WritableRangeT>::type >(Loc));
-        }
-
-    } // namespace algorithm
-
-    // pull names to the boost namespace
-    using algorithm::to_lower;
-    using algorithm::to_lower_copy;
-    using algorithm::to_upper;
-    using algorithm::to_upper_copy;
-
-} // namespace boost
-
-#endif  // BOOST_STRING_CASE_CONV_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/classification.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/classification.hpp
deleted file mode 100644
index ca43602..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/classification.hpp
+++ /dev/null
@@ -1,312 +0,0 @@
-//  Boost string_algo library classification.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_CLASSIFICATION_HPP
-#define BOOST_STRING_CLASSIFICATION_HPP
-
-#include <algorithm>
-#include <locale>
-#include <boost/range/value_type.hpp>
-#include <boost/range/as_literal.hpp>
-#include <boost/algorithm/string/detail/classification.hpp>
-#include <boost/algorithm/string/predicate_facade.hpp>
-
-
-/*! \file
-    Classification predicates are included in the library to give 
-    some more convenience when using algorithms like \c trim() and \c all(). 
-    They wrap functionality of STL classification functions ( e.g. \c std::isspace() )
-    into generic functors. 
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  classification functor generator -------------------------------------//
-
-        //! is_classified predicate
-        /*!
-            Construct the \c is_classified predicate. This predicate holds if the input is
-            of specified \c std::ctype category.
-
-            \param Type A \c std::ctype category
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate 
-        */
-        inline detail::is_classifiedF
-        is_classified(std::ctype_base::mask Type, const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(Type, Loc);
-        }
-
-        //! is_space predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::space category.   
-
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate
-        */
-        inline detail::is_classifiedF 
-        is_space(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::space, Loc);
-        }
-
-        //! is_alnum predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::alnum category.   
-
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate 
-        */
-        inline detail::is_classifiedF 
-        is_alnum(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::alnum, Loc);
-        }
-
-        //! is_alpha predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::alpha category.   
-
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate 
-        */
-        inline detail::is_classifiedF 
-        is_alpha(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::alpha, Loc);
-        }
-
-        //! is_cntrl predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::cntrl category.   
-
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate 
-        */
-        inline detail::is_classifiedF 
-        is_cntrl(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::cntrl, Loc);
-        }
-
-        //! is_digit predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::digit category.   
-
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate 
-        */
-        inline detail::is_classifiedF 
-        is_digit(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::digit, Loc);
-        }
-
-        //! is_graph predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::graph category.   
-
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate 
-        */
-        inline detail::is_classifiedF
-        is_graph(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::graph, Loc);
-        }
-
-        //! is_lower predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::lower category.   
-
-            \param Loc A locale used for classification
-            \return An instance of \c is_classified predicate 
-        */
-        inline detail::is_classifiedF 
-        is_lower(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::lower, Loc);
-        }
-
-        //! is_print predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::print category.   
-
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate 
-        */
-        inline detail::is_classifiedF 
-        is_print(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::print, Loc);
-        }
-
-        //! is_punct predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::punct category.   
-
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate 
-        */
-        inline detail::is_classifiedF 
-        is_punct(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::punct, Loc);
-        }
-
-        //! is_upper predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::upper category.   
-
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate 
-        */
-        inline detail::is_classifiedF 
-        is_upper(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::upper, Loc);
-        }
-
-        //! is_xdigit predicate
-        /*!
-            Construct the \c is_classified predicate for the \c ctype_base::xdigit category.  
-
-            \param Loc A locale used for classification
-            \return An instance of the \c is_classified predicate 
-        */
-        inline detail::is_classifiedF 
-        is_xdigit(const std::locale& Loc=std::locale())
-        {
-            return detail::is_classifiedF(std::ctype_base::xdigit, Loc);
-        }
-
-        //! is_any_of predicate
-        /*!
-            Construct the \c is_any_of predicate. The predicate holds if the input
-            is included in the specified set of characters.
-
-            \param Set A set of characters to be recognized
-            \return An instance of the \c is_any_of predicate 
-        */
-        template<typename RangeT>
-        inline detail::is_any_ofF<
-            BOOST_STRING_TYPENAME range_value<RangeT>::type> 
-        is_any_of( const RangeT& Set )
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> lit_set(boost::as_literal(Set));
-            return detail::is_any_ofF<BOOST_STRING_TYPENAME range_value<RangeT>::type>(lit_set); 
-        }
-
-        //! is_from_range predicate
-        /*!
-            Construct the \c is_from_range predicate. The predicate holds if the input
-            is included in the specified range. (i.e. From <= Ch <= To )
-
-            \param From The start of the range
-            \param To The end of the range
-            \return An instance of the \c is_from_range predicate 
-        */
-        template<typename CharT>
-        inline detail::is_from_rangeF<CharT> is_from_range(CharT From, CharT To)
-        {
-            return detail::is_from_rangeF<CharT>(From,To); 
-        }
-        
-        // predicate combinators ---------------------------------------------------//
-
-        //! predicate 'and' composition predicate
-        /*!
-            Construct the \c class_and predicate. This predicate can be used
-            to logically combine two classification predicates. \c class_and holds,
-            if both predicates return true.
-
-            \param Pred1 The first predicate
-            \param Pred2 The second predicate
-            \return An instance of the \c class_and predicate     
-        */
-        template<typename Pred1T, typename Pred2T>
-        inline detail::pred_andF<Pred1T, Pred2T>
-        operator&&( 
-            const predicate_facade<Pred1T>& Pred1, 
-            const predicate_facade<Pred2T>& Pred2 )
-        {    
-            // Doing the static_cast with the pointer instead of the reference
-            // is a workaround for some compilers which have problems with
-            // static_cast's of template references, i.e. CW8. /grafik/
-            return detail::pred_andF<Pred1T,Pred2T>(
-                *static_cast<const Pred1T*>(&Pred1), 
-                *static_cast<const Pred2T*>(&Pred2) );
-        }
-
-        //! predicate 'or' composition predicate
-        /*!
-            Construct the \c class_or predicate. This predicate can be used
-            to logically combine two classification predicates. \c class_or holds,
-            if one of the predicates return true.
-
-            \param Pred1 The first predicate
-            \param Pred2 The second predicate
-            \return An instance of the \c class_or predicate     
-        */
-        template<typename Pred1T, typename Pred2T>
-        inline detail::pred_orF<Pred1T, Pred2T>
-        operator||( 
-            const predicate_facade<Pred1T>& Pred1, 
-            const predicate_facade<Pred2T>& Pred2 )
-        {    
-            // Doing the static_cast with the pointer instead of the reference
-            // is a workaround for some compilers which have problems with
-            // static_cast's of template references, i.e. CW8. /grafik/
-            return detail::pred_orF<Pred1T,Pred2T>(
-                *static_cast<const Pred1T*>(&Pred1), 
-                *static_cast<const Pred2T*>(&Pred2));
-        }
-
-        //! predicate negation operator
-        /*!
-            Construct the \c class_not predicate. This predicate represents a negation. 
-            \c class_or holds if of the predicates return false.
-
-            \param Pred The predicate to be negated
-            \return An instance of the \c class_not predicate     
-        */
-        template<typename PredT>
-        inline detail::pred_notF<PredT>
-        operator!( const predicate_facade<PredT>& Pred )
-        {
-            // Doing the static_cast with the pointer instead of the reference
-            // is a workaround for some compilers which have problems with
-            // static_cast's of template references, i.e. CW8. /grafik/
-            return detail::pred_notF<PredT>(*static_cast<const PredT*>(&Pred)); 
-        }
-
-    } // namespace algorithm
-
-    // pull names to the boost namespace
-    using algorithm::is_classified;
-    using algorithm::is_space;
-    using algorithm::is_alnum;
-    using algorithm::is_alpha;
-    using algorithm::is_cntrl;
-    using algorithm::is_digit;
-    using algorithm::is_graph;
-    using algorithm::is_lower;
-    using algorithm::is_upper;
-    using algorithm::is_print;
-    using algorithm::is_punct;
-    using algorithm::is_xdigit;
-    using algorithm::is_any_of;
-    using algorithm::is_from_range;
-
-} // namespace boost
-
-#endif  // BOOST_STRING_PREDICATE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/compare.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/compare.hpp
deleted file mode 100644
index 734303a..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/compare.hpp
+++ /dev/null
@@ -1,199 +0,0 @@
-//  Boost string_algo library compare.hpp header file  -------------------------//
-
-//  Copyright Pavol Droba 2002-2006.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_COMPARE_HPP
-#define BOOST_STRING_COMPARE_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <locale>
-
-/*! \file
-    Defines element comparison predicates. Many algorithms in this library can
-    take an additional argument with a predicate used to compare elements.
-    This makes it possible, for instance, to have case insensitive versions
-    of the algorithms.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-        //  is_equal functor  -----------------------------------------------//
-
-        //! is_equal functor
-        /*!
-            Standard STL equal_to only handle comparison between arguments
-            of the same type. This is a less restrictive version which wraps operator ==.
-        */
-        struct is_equal
-        {
-            //! Function operator
-            /*!
-                Compare two operands for equality
-            */
-            template< typename T1, typename T2 >
-                bool operator()( const T1& Arg1, const T2& Arg2 ) const
-            {
-                return Arg1==Arg2;
-            }
-        };
-
-        //! case insensitive version of is_equal
-        /*!
-            Case insensitive comparison predicate. Comparison is done using
-            specified locales.
-        */
-        struct is_iequal
-        {
-            //! Constructor
-            /*!
-                \param Loc locales used for comparison
-            */
-            is_iequal( const std::locale& Loc=std::locale() ) :
-                m_Loc( Loc ) {}
-
-            //! Function operator
-            /*!
-                Compare two operands. Case is ignored.
-            */
-            template< typename T1, typename T2 >
-                bool operator()( const T1& Arg1, const T2& Arg2 ) const
-            {
-                #if defined(__BORLANDC__) && (__BORLANDC__ >= 0x560) && (__BORLANDC__ <= 0x564) && !defined(_USE_OLD_RW_STL)
-                    return std::toupper(Arg1)==std::toupper(Arg2);
-                #else
-                    return std::toupper<T1>(Arg1,m_Loc)==std::toupper<T2>(Arg2,m_Loc);
-                #endif
-            }
-
-        private:
-            std::locale m_Loc;
-        };
-
-        //  is_less functor  -----------------------------------------------//
-
-        //! is_less functor
-        /*!
-            Convenient version of standard std::less. Operation is templated, therefore it is 
-            not required to specify the exact types upon the construction
-         */
-        struct is_less
-        {
-            //! Functor operation
-            /*!
-                Compare two operands using > operator
-             */
-            template< typename T1, typename T2 >
-                bool operator()( const T1& Arg1, const T2& Arg2 ) const
-            {
-                return Arg1<Arg2;
-            }
-        };
-
-
-        //! case insensitive version of is_less
-        /*!
-            Case insensitive comparison predicate. Comparison is done using
-            specified locales.
-        */
-        struct is_iless
-        {
-            //! Constructor
-            /*!
-                \param Loc locales used for comparison
-            */
-            is_iless( const std::locale& Loc=std::locale() ) :
-                m_Loc( Loc ) {}
-
-            //! Function operator
-            /*!
-                Compare two operands. Case is ignored.
-            */
-            template< typename T1, typename T2 >
-                bool operator()( const T1& Arg1, const T2& Arg2 ) const
-            {
-                #if defined(__BORLANDC__) && (__BORLANDC__ >= 0x560) && (__BORLANDC__ <= 0x564) && !defined(_USE_OLD_RW_STL)
-                    return std::toupper(Arg1)<std::toupper(Arg2);
-                #else
-                    return std::toupper<T1>(Arg1,m_Loc)<std::toupper<T2>(Arg2,m_Loc);
-                #endif
-            }
-
-        private:
-            std::locale m_Loc;
-        };
-
-        //  is_not_greater functor  -----------------------------------------------//
-
-        //! is_not_greater functor
-        /*!
-            Convenient version of standard std::not_greater_to. Operation is templated, therefore it is 
-            not required to specify the exact types upon the construction
-         */
-        struct is_not_greater
-        {
-            //! Functor operation
-            /*!
-                Compare two operands using > operator
-             */
-            template< typename T1, typename T2 >
-                bool operator()( const T1& Arg1, const T2& Arg2 ) const
-            {
-                return Arg1<=Arg2;
-            }
-        };
-
-
-        //! case insensitive version of is_not_greater
-        /*!
-            Case insensitive comparison predicate. Comparison is done using
-            specified locales.
-        */
-        struct is_not_igreater
-        {
-            //! Constructor
-            /*!
-                \param Loc locales used for comparison
-            */
-            is_not_igreater( const std::locale& Loc=std::locale() ) :
-                m_Loc( Loc ) {}
-
-            //! Function operator
-            /*!
-                Compare two operands. Case is ignored.
-            */
-            template< typename T1, typename T2 >
-                bool operator()( const T1& Arg1, const T2& Arg2 ) const
-            {
-                #if defined(__BORLANDC__) && (__BORLANDC__ >= 0x560) && (__BORLANDC__ <= 0x564) && !defined(_USE_OLD_RW_STL)
-                    return std::toupper(Arg1)<=std::toupper(Arg2);
-                #else
-                    return std::toupper<T1>(Arg1,m_Loc)<=std::toupper<T2>(Arg2,m_Loc);
-                #endif
-            }
-
-        private:
-            std::locale m_Loc;
-        };
-
-
-    } // namespace algorithm
-
-    // pull names to the boost namespace
-    using algorithm::is_equal;
-    using algorithm::is_iequal;
-    using algorithm::is_less;
-    using algorithm::is_iless;
-    using algorithm::is_not_greater;
-    using algorithm::is_not_igreater;
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_COMPARE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/concept.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/concept.hpp
deleted file mode 100644
index 17e8349..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/concept.hpp
+++ /dev/null
@@ -1,83 +0,0 @@
-//  Boost string_algo library concept.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_CONCEPT_HPP
-#define BOOST_STRING_CONCEPT_HPP
-
-#include <boost/concept_check.hpp>
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-/*! \file 
-    Defines concepts used in string_algo library
-*/
-
-namespace boost {
-    namespace algorithm {
-
-        //! Finder concept
-        /*!
-            Defines the Finder concept. Finder is a functor which selects
-            an arbitrary part of a string. Search is performed on
-            the range specified by starting and ending iterators.
-
-            Result of the find operation must be convertible to iterator_range.
-        */
-        template<typename FinderT, typename IteratorT>
-        struct FinderConcept
-        {
-        private:
-            typedef iterator_range<IteratorT> range;
-        public:
-            void constraints()
-            {
-                // Operation
-                r=(*pF)(i,i);
-            }
-        private:
-            range r;
-            IteratorT i;
-            FinderT* pF;    
-        }; // Finder_concept
-
-        
-        //! Formatter concept
-        /*!
-            Defines the Formatter concept. Formatter is a functor, which
-            takes a result from a finder operation and transforms it
-            in a specific way.
-
-            Result must be a container supported by container_traits, 
-            or a reference to it.
-        */
-        template<typename FormatterT, typename FinderT, typename IteratorT>
-        struct FormatterConcept
-        {
-        public:
-            void constraints()
-            {
-                // Operation
-                ::boost::begin((*pFo)( (*pF)(i,i) ));
-                ::boost::end((*pFo)( (*pF)(i,i) ));
-            }
-        private:
-            IteratorT i;
-            FinderT* pF;
-            FormatterT *pFo;
-        }; // FormatterConcept;
-
-    } // namespace algorithm
-} // namespace boost
-
-
-
-
-#endif  // BOOST_STRING_CONCEPT_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/config.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/config.hpp
deleted file mode 100644
index 559750a..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/config.hpp
+++ /dev/null
@@ -1,28 +0,0 @@
-//  Boost string_algo library config.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_CONFIG_HPP
-#define BOOST_STRING_CONFIG_HPP
-
-#include <boost/config.hpp>
-#include <boost/detail/workaround.hpp>
-
-#ifdef BOOST_STRING_DEDUCED_TYPENAME
-#   error "macro already defined!"
-#endif
-
-#define BOOST_STRING_TYPENAME BOOST_DEDUCED_TYPENAME
-
-// Metrowerks workaround
-#if BOOST_WORKAROUND(__MWERKS__, <= 0x3003) // 8.x
-#pragma parse_func_templ off
-#endif
-
-#endif  // BOOST_STRING_CONFIG_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/constants.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/constants.hpp
deleted file mode 100644
index 6ed70ef..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/constants.hpp
+++ /dev/null
@@ -1,36 +0,0 @@
-//  Boost string_algo library constants.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_CONSTANTS_HPP
-#define BOOST_STRING_CONSTANTS_HPP
-
-namespace boost {
-    namespace algorithm {
-
-    //! Token compression mode 
-    /*!
-        Specifies token compression mode for the token_finder.
-    */
-    enum token_compress_mode_type
-    {
-        token_compress_on,    //!< Compress adjacent tokens
-        token_compress_off  //!< Do not compress adjacent tokens
-    };
-    
-    } // namespace algorithm
-
-    // pull the names to the boost namespace
-    using algorithm::token_compress_on;
-    using algorithm::token_compress_off;
-
-} // namespace boost
-
-#endif  // BOOST_STRING_CONSTANTS_HPP
-
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/case_conv.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/case_conv.hpp
deleted file mode 100644
index 233912c..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/case_conv.hpp
+++ /dev/null
@@ -1,127 +0,0 @@
-//  Boost string_algo library string_funct.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_CASE_CONV_DETAIL_HPP
-#define BOOST_STRING_CASE_CONV_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <locale>
-#include <functional>
-
-#include <boost/type_traits/make_unsigned.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  case conversion functors -----------------------------------------------//
-
-#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400)
-#pragma warning(push)
-#pragma warning(disable:4512) //assignment operator could not be generated
-#endif
-
-            // a tolower functor
-            template<typename CharT>
-            struct to_lowerF
-            {
-                typedef CharT argument_type;
-                typedef CharT result_type;
-                // Constructor
-                to_lowerF( const std::locale& Loc ) : m_Loc( &Loc ) {}
-
-                // Operation
-                CharT operator ()( CharT Ch ) const
-                {
-                    #if defined(__BORLANDC__) && (__BORLANDC__ >= 0x560) && (__BORLANDC__ <= 0x564) && !defined(_USE_OLD_RW_STL)
-                        return std::tolower( static_cast<typename boost::make_unsigned <CharT>::type> ( Ch ));
-                    #else
-                        return std::tolower<CharT>( Ch, *m_Loc );
-                    #endif
-                }
-            private:
-                const std::locale* m_Loc;
-            };
-
-            // a toupper functor
-            template<typename CharT>
-            struct to_upperF
-            {
-                typedef CharT argument_type;
-                typedef CharT result_type;
-                // Constructor
-                to_upperF( const std::locale& Loc ) : m_Loc( &Loc ) {}
-
-                // Operation
-                CharT operator ()( CharT Ch ) const
-                {
-                    #if defined(__BORLANDC__) && (__BORLANDC__ >= 0x560) && (__BORLANDC__ <= 0x564) && !defined(_USE_OLD_RW_STL)
-                        return std::toupper( static_cast<typename boost::make_unsigned <CharT>::type> ( Ch ));
-                    #else
-                        return std::toupper<CharT>( Ch, *m_Loc );
-                    #endif
-                }
-            private:
-                const std::locale* m_Loc;
-            };
-
-#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400)
-#pragma warning(pop)
-#endif
-
-// algorithm implementation -------------------------------------------------------------------------
-
-            // Transform a range
-            template<typename OutputIteratorT, typename RangeT, typename FunctorT>
-            OutputIteratorT transform_range_copy(
-                OutputIteratorT Output,
-                const RangeT& Input,
-                FunctorT Functor)
-            {
-                return std::transform( 
-                    ::boost::begin(Input), 
-                    ::boost::end(Input), 
-                    Output,
-                    Functor);
-            }
-
-            // Transform a range (in-place)
-            template<typename RangeT, typename FunctorT>
-            void transform_range(
-                const RangeT& Input,
-                FunctorT Functor)
-            {
-                std::transform( 
-                    ::boost::begin(Input), 
-                    ::boost::end(Input), 
-                    ::boost::begin(Input),
-                    Functor);
-            }
-
-            template<typename SequenceT, typename RangeT, typename FunctorT>
-            inline SequenceT transform_range_copy( 
-                const RangeT& Input, 
-                FunctorT Functor)
-            {
-                return SequenceT(
-                    ::boost::make_transform_iterator(
-                        ::boost::begin(Input),
-                        Functor),
-                    ::boost::make_transform_iterator(
-                        ::boost::end(Input), 
-                        Functor));
-            }
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_CASE_CONV_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/classification.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/classification.hpp
deleted file mode 100644
index 704d9d2..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/classification.hpp
+++ /dev/null
@@ -1,353 +0,0 @@
-//  Boost string_algo library classification.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-// 
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_CLASSIFICATION_DETAIL_HPP
-#define BOOST_STRING_CLASSIFICATION_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <algorithm>
-#include <functional>
-#include <locale>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-#include <boost/algorithm/string/predicate_facade.hpp>
-#include <boost/type_traits/remove_const.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  classification functors -----------------------------------------------//
-
-   // is_classified functor
-            struct is_classifiedF :
-                public predicate_facade<is_classifiedF>
-            {
-                // Boost.ResultOf support
-                typedef bool result_type;
-
-                // Constructor from a locale
-                is_classifiedF(std::ctype_base::mask Type, std::locale const & Loc = std::locale()) :
-                    m_Type(Type), m_Locale(Loc) {}
-                // Operation
-                template<typename CharT>
-                bool operator()( CharT Ch ) const
-                {
-                    return std::use_facet< std::ctype<CharT> >(m_Locale).is( m_Type, Ch );
-                }
-
-                #if defined(__BORLANDC__) && (__BORLANDC__ >= 0x560) && (__BORLANDC__ <= 0x582) && !defined(_USE_OLD_RW_STL)
-                    template<>
-                    bool operator()( char const Ch ) const
-                    {
-                        return std::use_facet< std::ctype<char> >(m_Locale).is( m_Type, Ch );
-                    }
-                #endif
-
-            private:
-                std::ctype_base::mask m_Type;
-                std::locale m_Locale;
-            };
-
-
-            // is_any_of functor
-            /*
-                returns true if the value is from the specified set
-            */
-            template<typename CharT>
-            struct is_any_ofF :
-                public predicate_facade<is_any_ofF<CharT> >
-            {
-            private:
-                // set cannot operate on const value-type
-                typedef typename ::boost::remove_const<CharT>::type set_value_type;
-
-            public:     
-                // Boost.ResultOf support
-                typedef bool result_type;
-
-                // Constructor
-                template<typename RangeT>
-                is_any_ofF( const RangeT& Range ) : m_Size(0)
-                {
-                    // Prepare storage
-                    m_Storage.m_dynSet=0;
-
-                    std::size_t Size=::boost::distance(Range);
-                    m_Size=Size;
-                    set_value_type* Storage=0;
-
-                    if(use_fixed_storage(m_Size))
-                    {
-                        // Use fixed storage
-                        Storage=&m_Storage.m_fixSet[0];
-                    }
-                    else
-                    {
-                        // Use dynamic storage
-                        m_Storage.m_dynSet=new set_value_type[m_Size];
-                        Storage=m_Storage.m_dynSet;
-                    }
-
-                    // Use fixed storage
-                    ::std::copy(::boost::begin(Range), ::boost::end(Range), Storage);
-                    ::std::sort(Storage, Storage+m_Size);
-                }
-
-                // Copy constructor
-                is_any_ofF(const is_any_ofF& Other) : m_Size(Other.m_Size)
-                {
-                    // Prepare storage
-                    m_Storage.m_dynSet=0;               
-                    const set_value_type* SrcStorage=0;
-                    set_value_type* DestStorage=0;
-
-                    if(use_fixed_storage(m_Size))
-                    {
-                        // Use fixed storage
-                        DestStorage=&m_Storage.m_fixSet[0];
-                        SrcStorage=&Other.m_Storage.m_fixSet[0];
-                    }
-                    else
-                    {
-                        // Use dynamic storage
-                        m_Storage.m_dynSet=new set_value_type[m_Size];
-                        DestStorage=m_Storage.m_dynSet;
-                        SrcStorage=Other.m_Storage.m_dynSet;
-                    }
-
-                    // Use fixed storage
-                    ::std::memcpy(DestStorage, SrcStorage, sizeof(set_value_type)*m_Size);
-                }
-
-                // Destructor
-                ~is_any_ofF()
-                {
-                    if(!use_fixed_storage(m_Size) && m_Storage.m_dynSet!=0)
-                    {
-                        delete [] m_Storage.m_dynSet;
-                    }
-                }
-
-                // Assignment
-                is_any_ofF& operator=(const is_any_ofF& Other)
-                {
-                    // Handle self assignment
-                    if(this==&Other) return *this;
-
-                    // Prepare storage             
-                    const set_value_type* SrcStorage;
-                    set_value_type* DestStorage;
-
-                    if(use_fixed_storage(Other.m_Size))
-                    {
-                        // Use fixed storage
-                        DestStorage=&m_Storage.m_fixSet[0];
-                        SrcStorage=&Other.m_Storage.m_fixSet[0];
-
-                        // Delete old storage if was present
-                        if(!use_fixed_storage(m_Size) && m_Storage.m_dynSet!=0)
-                        {
-                            delete [] m_Storage.m_dynSet;
-                        }
-
-                        // Set new size
-                        m_Size=Other.m_Size;
-                    }
-                    else
-                    {
-                        // Other uses dynamic storage
-                        SrcStorage=Other.m_Storage.m_dynSet;
-
-                        // Check what kind of storage are we using right now
-                        if(use_fixed_storage(m_Size))
-                        {
-                            // Using fixed storage, allocate new
-                            set_value_type* pTemp=new set_value_type[Other.m_Size];
-                            DestStorage=pTemp;
-                            m_Storage.m_dynSet=pTemp;
-                            m_Size=Other.m_Size;
-                        }
-                        else
-                        {
-                            // Using dynamic storage, check if can reuse
-                            if(m_Storage.m_dynSet!=0 && m_Size>=Other.m_Size && m_Size<Other.m_Size*2)
-                            {
-                                // Reuse the current storage
-                                DestStorage=m_Storage.m_dynSet;
-                                m_Size=Other.m_Size;
-                            }
-                            else
-                            {
-                                // Allocate the new one
-                                set_value_type* pTemp=new set_value_type[Other.m_Size];
-                                DestStorage=pTemp;
-                        
-                                // Delete old storage if necessary
-                                if(m_Storage.m_dynSet!=0)
-                                {
-                                    delete [] m_Storage.m_dynSet;
-                                }
-                                // Store the new storage
-                                m_Storage.m_dynSet=pTemp;
-                                // Set new size
-                                m_Size=Other.m_Size;
-                            }
-                        }
-                    }
-
-                    // Copy the data
-                    ::std::memcpy(DestStorage, SrcStorage, sizeof(set_value_type)*m_Size);
-
-                    return *this;
-                }
-
-                // Operation
-                template<typename Char2T>
-                bool operator()( Char2T Ch ) const
-                {
-                    const set_value_type* Storage=
-                        (use_fixed_storage(m_Size))
-                        ? &m_Storage.m_fixSet[0]
-                        : m_Storage.m_dynSet;
-
-                    return ::std::binary_search(Storage, Storage+m_Size, Ch);
-                }
-            private:
-                // check if the size is eligible for fixed storage
-                static bool use_fixed_storage(std::size_t size)
-                {
-                    return size<=sizeof(set_value_type*)*2;
-                }
-
-
-            private:
-                // storage
-                // The actual used storage is selected on the type
-                union
-                {
-                    set_value_type* m_dynSet;
-                    set_value_type m_fixSet[sizeof(set_value_type*)*2];
-                } 
-                m_Storage;
-        
-                // storage size
-                ::std::size_t m_Size;
-            };
-
-            // is_from_range functor
-            /*
-                returns true if the value is from the specified range.
-                (i.e. x>=From && x>=To)
-            */
-            template<typename CharT>
-            struct is_from_rangeF :
-                public predicate_facade< is_from_rangeF<CharT> >
-            {
-                // Boost.ResultOf support
-                typedef bool result_type;
-
-                // Constructor
-                is_from_rangeF( CharT From, CharT To ) : m_From(From), m_To(To) {}
-
-                // Operation
-                template<typename Char2T>
-                bool operator()( Char2T Ch ) const
-                {
-                    return ( m_From <= Ch ) && ( Ch <= m_To );
-                }
-
-            private:
-                CharT m_From;
-                CharT m_To;
-            };
-
-            // class_and composition predicate
-            template<typename Pred1T, typename Pred2T>
-            struct pred_andF :
-                public predicate_facade< pred_andF<Pred1T,Pred2T> >
-            {
-            public:
-
-                // Boost.ResultOf support
-                typedef bool result_type;
-
-                // Constructor
-                pred_andF( Pred1T Pred1, Pred2T Pred2 ) :
-                    m_Pred1(Pred1), m_Pred2(Pred2) {}
-
-                // Operation
-                template<typename CharT>
-                bool operator()( CharT Ch ) const
-                {
-                    return m_Pred1(Ch) && m_Pred2(Ch);
-                }
-
-            private:
-                Pred1T m_Pred1;
-                Pred2T m_Pred2;
-            };
-
-            // class_or composition predicate
-            template<typename Pred1T, typename Pred2T>
-            struct pred_orF :
-                public predicate_facade< pred_orF<Pred1T,Pred2T> >
-            {
-            public:
-                // Boost.ResultOf support
-                typedef bool result_type;
-
-                // Constructor
-                pred_orF( Pred1T Pred1, Pred2T Pred2 ) :
-                    m_Pred1(Pred1), m_Pred2(Pred2) {}
-
-                // Operation
-                template<typename CharT>
-                bool operator()( CharT Ch ) const
-                {
-                    return m_Pred1(Ch) || m_Pred2(Ch);
-                }
-
-            private:
-                Pred1T m_Pred1;
-                Pred2T m_Pred2;
-            };
-
-            // class_not composition predicate
-            template< typename PredT >
-            struct pred_notF :
-                public predicate_facade< pred_notF<PredT> >
-            {
-            public:
-                // Boost.ResultOf support
-                typedef bool result_type;
-
-                // Constructor
-                pred_notF( PredT Pred ) : m_Pred(Pred) {}
-
-                // Operation
-                template<typename CharT>
-                bool operator()( CharT Ch ) const
-                {
-                    return !m_Pred(Ch);
-                }
-
-            private:
-                PredT m_Pred;
-            };
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_CLASSIFICATION_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_format.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_format.hpp
deleted file mode 100644
index b398750..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_format.hpp
+++ /dev/null
@@ -1,204 +0,0 @@
-//  Boost string_algo library find_format.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-// 
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FIND_FORMAT_DETAIL_HPP
-#define BOOST_STRING_FIND_FORMAT_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/const_iterator.hpp>
-#include <boost/range/iterator.hpp>
-#include <boost/algorithm/string/detail/find_format_store.hpp>
-#include <boost/algorithm/string/detail/replace_storage.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-// find_format_copy (iterator variant) implementation -------------------------------//
-
-           template< 
-                typename OutputIteratorT,
-                typename InputT,
-                typename FormatterT,
-                typename FindResultT,
-                typename FormatResultT >
-            inline OutputIteratorT find_format_copy_impl2(
-                OutputIteratorT Output,
-                const InputT& Input,
-                FormatterT Formatter,
-                const FindResultT& FindResult,
-                const FormatResultT& FormatResult )
-            {       
-                typedef find_format_store<
-                    BOOST_STRING_TYPENAME 
-                        range_const_iterator<InputT>::type, 
-                        FormatterT,
-                        FormatResultT > store_type;
-
-                // Create store for the find result
-                store_type M( FindResult, FormatResult, Formatter );
-
-                if ( !M )
-                {
-                    // Match not found - return original sequence
-                    Output = std::copy( ::boost::begin(Input), ::boost::end(Input), Output );
-                    return Output;
-                }
-
-                // Copy the beginning of the sequence
-                Output = std::copy( ::boost::begin(Input), ::boost::begin(M), Output );
-                // Format find result
-                // Copy formatted result
-                Output = std::copy( ::boost::begin(M.format_result()), ::boost::end(M.format_result()), Output );
-                // Copy the rest of the sequence
-                Output = std::copy( M.end(), ::boost::end(Input), Output );
-
-                return Output;
-            }
-
-            template< 
-                typename OutputIteratorT,
-                typename InputT,
-                typename FormatterT,
-                typename FindResultT >
-            inline OutputIteratorT find_format_copy_impl(
-                OutputIteratorT Output,
-                const InputT& Input,
-                FormatterT Formatter,
-                const FindResultT& FindResult )
-            {   
-                if( ::boost::algorithm::detail::check_find_result(Input, FindResult) ) {
-                    return ::boost::algorithm::detail::find_format_copy_impl2( 
-                        Output,
-                        Input,
-                        Formatter,
-                        FindResult,
-                        Formatter(FindResult) );
-                } else {
-                    return std::copy( ::boost::begin(Input), ::boost::end(Input), Output );
-                }
-            }
-
- 
-// find_format_copy implementation --------------------------------------------------//
-
-           template< 
-                typename InputT, 
-                typename FormatterT,
-                typename FindResultT,
-                typename FormatResultT >
-            inline InputT find_format_copy_impl2(
-                const InputT& Input,
-                FormatterT Formatter,
-                const FindResultT& FindResult,
-                const FormatResultT& FormatResult)
-            {
-                typedef find_format_store<
-                    BOOST_STRING_TYPENAME 
-                        range_const_iterator<InputT>::type, 
-                        FormatterT,
-                        FormatResultT > store_type;
-
-                // Create store for the find result
-                store_type M( FindResult, FormatResult, Formatter );
-
-                if ( !M )
-                {
-                    // Match not found - return original sequence
-                    return InputT( Input );
-                }
-
-                InputT Output;
-                // Copy the beginning of the sequence
-                boost::algorithm::detail::insert( Output, ::boost::end(Output), ::boost::begin(Input), M.begin() );
-                // Copy formatted result
-                boost::algorithm::detail::insert( Output, ::boost::end(Output), M.format_result() );
-                // Copy the rest of the sequence
-                boost::algorithm::detail::insert( Output, ::boost::end(Output), M.end(), ::boost::end(Input) );
-
-                return Output;
-            }
-
-            template< 
-                typename InputT, 
-                typename FormatterT,
-                typename FindResultT >
-            inline InputT find_format_copy_impl(
-                const InputT& Input,
-                FormatterT Formatter,
-                const FindResultT& FindResult)
-            {
-                if( ::boost::algorithm::detail::check_find_result(Input, FindResult) ) {
-                    return ::boost::algorithm::detail::find_format_copy_impl2(
-                        Input,
-                        Formatter,
-                        FindResult,
-                        Formatter(FindResult) );
-                } else {
-                    return Input;
-                }
-            }
-
- // replace implementation ----------------------------------------------------//
-        
-            template<
-                typename InputT,
-                typename FormatterT,
-                typename FindResultT,
-                typename FormatResultT >
-            inline void find_format_impl2( 
-                InputT& Input,
-                FormatterT Formatter,
-                const FindResultT& FindResult,
-                const FormatResultT& FormatResult)
-            {
-                typedef find_format_store<
-                    BOOST_STRING_TYPENAME 
-                        range_iterator<InputT>::type, 
-                        FormatterT,
-                        FormatResultT > store_type;
-
-                // Create store for the find result
-                store_type M( FindResult, FormatResult, Formatter );
-
-                if ( !M )
-                {
-                    // Search not found - return original sequence
-                    return;
-                }
-
-                // Replace match
-                ::boost::algorithm::detail::replace( Input, M.begin(), M.end(), M.format_result() );
-            }
-
-            template<
-                typename InputT,
-                typename FormatterT,
-                typename FindResultT >
-            inline void find_format_impl( 
-                InputT& Input,
-                FormatterT Formatter,
-                const FindResultT& FindResult)
-            {
-                if( ::boost::algorithm::detail::check_find_result(Input, FindResult) ) {
-                    ::boost::algorithm::detail::find_format_impl2(
-                        Input,
-                        Formatter,
-                        FindResult,
-                        Formatter(FindResult) );
-                }
-            }
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-#endif  // BOOST_STRING_FIND_FORMAT_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_format_all.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_format_all.hpp
deleted file mode 100644
index 52930c8..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_format_all.hpp
+++ /dev/null
@@ -1,273 +0,0 @@
-//  Boost string_algo library find_format_all.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FIND_FORMAT_ALL_DETAIL_HPP
-#define BOOST_STRING_FIND_FORMAT_ALL_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/const_iterator.hpp>
-#include <boost/range/value_type.hpp>
-#include <boost/algorithm/string/detail/find_format_store.hpp>
-#include <boost/algorithm/string/detail/replace_storage.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-// find_format_all_copy (iterator variant) implementation ---------------------------//
-
-           template< 
-                typename OutputIteratorT,
-                typename InputT,
-                typename FinderT,
-                typename FormatterT,
-                typename FindResultT,
-                typename FormatResultT >
-            inline OutputIteratorT find_format_all_copy_impl2(
-                OutputIteratorT Output,
-                const InputT& Input,
-                FinderT Finder,
-                FormatterT Formatter,
-                const FindResultT& FindResult,
-                const FormatResultT& FormatResult )
-            {       
-                typedef BOOST_STRING_TYPENAME 
-                    range_const_iterator<InputT>::type input_iterator_type; 
-
-                typedef find_format_store<
-                        input_iterator_type, 
-                        FormatterT,
-                        FormatResultT > store_type;
-
-                // Create store for the find result
-                store_type M( FindResult, FormatResult, Formatter );
-
-                // Initialize last match
-                input_iterator_type LastMatch=::boost::begin(Input);
-
-                // Iterate through all matches
-                while( M )
-                {
-                    // Copy the beginning of the sequence
-                    Output = std::copy( LastMatch, M.begin(), Output );
-                    // Copy formatted result
-                    Output = std::copy( ::boost::begin(M.format_result()), ::boost::end(M.format_result()), Output );
-
-                    // Proceed to the next match
-                    LastMatch=M.end();
-                    M=Finder( LastMatch, ::boost::end(Input) );
-                }
-
-                // Copy the rest of the sequence
-                Output = std::copy( LastMatch, ::boost::end(Input), Output );
-
-                return Output;
-            }
-
-            template< 
-                typename OutputIteratorT,
-                typename InputT,
-                typename FinderT,
-                typename FormatterT,
-                typename FindResultT >
-            inline OutputIteratorT find_format_all_copy_impl(
-                OutputIteratorT Output,
-                const InputT& Input,
-                FinderT Finder,
-                FormatterT Formatter,
-                const FindResultT& FindResult )
-            {   
-                if( ::boost::algorithm::detail::check_find_result(Input, FindResult) ) {
-                    return ::boost::algorithm::detail::find_format_all_copy_impl2( 
-                        Output,
-                        Input,
-                        Finder,
-                        Formatter,
-                        FindResult,
-                        Formatter(FindResult) );
-                } else {
-                    return std::copy( ::boost::begin(Input), ::boost::end(Input), Output );
-                }
-            }
-
- // find_format_all_copy implementation ----------------------------------------------//
-
-           template< 
-                typename InputT, 
-                typename FinderT,
-                typename FormatterT,
-                typename FindResultT,
-                typename FormatResultT >
-            inline InputT find_format_all_copy_impl2(
-                const InputT& Input,
-                FinderT Finder,
-                FormatterT Formatter,
-                const FindResultT& FindResult,
-                const FormatResultT& FormatResult)
-            {
-                typedef BOOST_STRING_TYPENAME 
-                    range_const_iterator<InputT>::type input_iterator_type; 
-
-                typedef find_format_store<
-                        input_iterator_type, 
-                        FormatterT,
-                        FormatResultT > store_type;
-
-                // Create store for the find result
-                store_type M( FindResult, FormatResult, Formatter );
-
-                // Initialize last match
-                input_iterator_type LastMatch=::boost::begin(Input);
-
-                // Output temporary
-                InputT Output;
-
-                // Iterate through all matches
-                while( M )
-                {
-                    // Copy the beginning of the sequence
-                    boost::algorithm::detail::insert( Output, ::boost::end(Output), LastMatch, M.begin() );
-                    // Copy formatted result
-                    boost::algorithm::detail::insert( Output, ::boost::end(Output), M.format_result() );
-
-                    // Proceed to the next match
-                    LastMatch=M.end();
-                    M=Finder( LastMatch, ::boost::end(Input) );
-                }
-
-                // Copy the rest of the sequence
-                ::boost::algorithm::detail::insert( Output, ::boost::end(Output), LastMatch, ::boost::end(Input) );
-
-                return Output;
-            }
-
-            template< 
-                typename InputT, 
-                typename FinderT,
-                typename FormatterT,
-                typename FindResultT >
-            inline InputT find_format_all_copy_impl(
-                const InputT& Input,
-                FinderT Finder,
-                FormatterT Formatter,
-                const FindResultT& FindResult)
-            {
-                if( ::boost::algorithm::detail::check_find_result(Input, FindResult) ) {
-                    return ::boost::algorithm::detail::find_format_all_copy_impl2(
-                        Input,
-                        Finder,
-                        Formatter,
-                        FindResult,
-                        Formatter(FindResult) );
-                } else {
-                    return Input;
-                }
-            }
-
- // find_format_all implementation ------------------------------------------------//
-        
-            template<
-                typename InputT,
-                typename FinderT,
-                typename FormatterT,
-                typename FindResultT,
-                typename FormatResultT >
-            inline void find_format_all_impl2( 
-                InputT& Input,
-                FinderT Finder,
-                FormatterT Formatter,
-                FindResultT FindResult,
-                FormatResultT FormatResult)
-            {
-                typedef BOOST_STRING_TYPENAME 
-                    range_iterator<InputT>::type input_iterator_type; 
-                typedef find_format_store<
-                        input_iterator_type, 
-                        FormatterT,
-                        FormatResultT > store_type;
-
-                // Create store for the find result
-                store_type M( FindResult, FormatResult, Formatter );
-          
-                // Instantiate replacement storage
-                std::deque<
-                    BOOST_STRING_TYPENAME range_value<InputT>::type> Storage;
-
-                // Initialize replacement iterators
-                input_iterator_type InsertIt=::boost::begin(Input);
-                input_iterator_type SearchIt=::boost::begin(Input);
-                
-                while( M )
-                {
-                    // process the segment
-                    InsertIt=process_segment( 
-                        Storage,
-                        Input,
-                        InsertIt,
-                        SearchIt,
-                        M.begin() );
-                    
-                    // Adjust search iterator
-                    SearchIt=M.end();
-
-                    // Copy formatted replace to the storage
-                    ::boost::algorithm::detail::copy_to_storage( Storage, M.format_result() );
-
-                    // Find range for a next match
-                    M=Finder( SearchIt, ::boost::end(Input) );
-                }
-
-                // process the last segment
-                InsertIt=::boost::algorithm::detail::process_segment( 
-                    Storage,
-                    Input,
-                    InsertIt,
-                    SearchIt,
-                    ::boost::end(Input) );
-                
-                if ( Storage.empty() )
-                {
-                    // Truncate input
-                    ::boost::algorithm::detail::erase( Input, InsertIt, ::boost::end(Input) );
-                }
-                else
-                {
-                    // Copy remaining data to the end of input
-                    ::boost::algorithm::detail::insert( Input, ::boost::end(Input), Storage.begin(), Storage.end() );
-                }
-            }
-
-            template<
-                typename InputT,
-                typename FinderT,
-                typename FormatterT,
-                typename FindResultT >
-            inline void find_format_all_impl( 
-                InputT& Input,
-                FinderT Finder,
-                FormatterT Formatter,
-                FindResultT FindResult)
-            {
-                if( ::boost::algorithm::detail::check_find_result(Input, FindResult) ) {
-                    ::boost::algorithm::detail::find_format_all_impl2(
-                        Input,
-                        Finder,
-                        Formatter,
-                        FindResult,
-                        Formatter(FindResult) );
-                }
-            }
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-#endif  // BOOST_STRING_FIND_FORMAT_ALL_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_format_store.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_format_store.hpp
deleted file mode 100644
index b9f4a88..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_format_store.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-//  Boost string_algo library find_format_store.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FIND_FORMAT_STORE_DETAIL_HPP
-#define BOOST_STRING_FIND_FORMAT_STORE_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/range/iterator_range_core.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  temporary format and find result storage --------------------------------//
-
-#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400)
-#pragma warning(push)
-#pragma warning(disable:4512) //assignment operator could not be generated
-#endif
-            template< 
-                typename ForwardIteratorT,
-                typename FormatterT,
-                typename FormatResultT >
-            class find_format_store : 
-                public iterator_range<ForwardIteratorT>
-            {
-            public:
-                // typedefs
-                typedef iterator_range<ForwardIteratorT> base_type;
-                typedef FormatterT  formatter_type;
-                typedef FormatResultT format_result_type;
-                
-            public:
-                // Construction
-                find_format_store( 
-                        const base_type& FindResult,
-                        const format_result_type& FormatResult,
-                        const formatter_type& Formatter ) :
-                    base_type(FindResult),
-                    m_FormatResult(FormatResult),
-                    m_Formatter(Formatter) {}
-
-                // Assignment
-                template< typename FindResultT >
-                find_format_store& operator=( FindResultT FindResult )
-                {
-                    iterator_range<ForwardIteratorT>::operator=(FindResult);
-                    if( !this->empty() ) {
-                        m_FormatResult=m_Formatter(FindResult);
-                    }
-                    
-                    return *this;
-                }
-
-                // Retrieve format result
-                const format_result_type& format_result()
-                {   
-                    return m_FormatResult;
-                }
-
-            private:
-                format_result_type m_FormatResult;
-                const formatter_type& m_Formatter;
-            };
-
-            template<typename InputT, typename FindResultT>
-            bool check_find_result(InputT&, FindResultT& FindResult)
-            {
-                typedef BOOST_STRING_TYPENAME 
-                    range_const_iterator<InputT>::type input_iterator_type; 
-                iterator_range<input_iterator_type> ResultRange(FindResult);
-                return !ResultRange.empty();
-            }
-
-#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400)
-#pragma warning(pop)
-#endif
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-#endif  // BOOST_STRING_FIND_FORMAT_STORE_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_iterator.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_iterator.hpp
deleted file mode 100644
index 4f90a98..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/find_iterator.hpp
+++ /dev/null
@@ -1,87 +0,0 @@
-//  Boost string_algo library find_iterator.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FIND_ITERATOR_DETAIL_HPP
-#define BOOST_STRING_FIND_ITERATOR_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/iterator/iterator_facade.hpp>
-#include <boost/iterator/iterator_categories.hpp>
-#include <boost/function.hpp>
-
-namespace boost {
-    namespace algorithm { 
-        namespace detail {
-
-//  find_iterator base -----------------------------------------------//
-
-            // Find iterator base
-            template<typename IteratorT>
-            class find_iterator_base
-            {
-            protected:
-                // typedefs
-                typedef IteratorT input_iterator_type;
-                typedef iterator_range<IteratorT> match_type;
-                typedef function2<
-                    match_type, 
-                    input_iterator_type, 
-                    input_iterator_type> finder_type;
-                
-            protected:
-            // Protected construction/destruction
-
-                // Default constructor
-                find_iterator_base() {}
-                // Copy construction
-                find_iterator_base( const find_iterator_base& Other ) :
-                    m_Finder(Other.m_Finder) {}
-                
-                // Constructor
-                template<typename FinderT>
-                find_iterator_base( FinderT Finder, int ) :
-                    m_Finder(Finder) {}
-
-                // Destructor
-                ~find_iterator_base() {}
-
-                // Find operation
-                match_type do_find( 
-                    input_iterator_type Begin,
-                    input_iterator_type End ) const
-                {
-                    if (!m_Finder.empty())
-                    {
-                        return m_Finder(Begin,End);
-                    }
-                    else
-                    {
-                        return match_type(End,End);
-                    }
-                }
-
-                // Check
-                bool is_null() const
-                {
-                    return m_Finder.empty();
-                }
-
-            private:
-                // Finder
-                finder_type m_Finder;
-            };
-
-       } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_FIND_ITERATOR_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/finder.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/finder.hpp
deleted file mode 100644
index a2a9582..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/finder.hpp
+++ /dev/null
@@ -1,639 +0,0 @@
-//  Boost string_algo library finder.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2006.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FINDER_DETAIL_HPP
-#define BOOST_STRING_FINDER_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/algorithm/string/constants.hpp>
-#include <boost/detail/iterator.hpp>
-
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/empty.hpp>
-#include <boost/range/as_literal.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-
-//  find first functor -----------------------------------------------//
-
-            // find a subsequence in the sequence ( functor )
-            /*
-                Returns a pair <begin,end> marking the subsequence in the sequence.
-                If the find fails, functor returns <End,End>
-            */
-            template<typename SearchIteratorT,typename PredicateT>
-            struct first_finderF
-            {
-                typedef SearchIteratorT search_iterator_type;
-
-                // Construction
-                template< typename SearchT >
-                first_finderF( const SearchT& Search, PredicateT Comp ) :
-                    m_Search(::boost::begin(Search), ::boost::end(Search)), m_Comp(Comp) {}
-                first_finderF(
-                        search_iterator_type SearchBegin,
-                        search_iterator_type SearchEnd,
-                        PredicateT Comp ) :
-                    m_Search(SearchBegin, SearchEnd), m_Comp(Comp) {}
-
-                // Operation
-                template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-                operator()(
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End ) const
-                {
-                    typedef iterator_range<ForwardIteratorT> result_type;
-                    typedef ForwardIteratorT input_iterator_type;
-
-                    // Outer loop
-                    for(input_iterator_type OuterIt=Begin;
-                        OuterIt!=End;
-                        ++OuterIt)
-                    {
-                        // Sanity check
-                        if( boost::empty(m_Search) )
-                            return result_type( End, End );
-
-                        input_iterator_type InnerIt=OuterIt;
-                        search_iterator_type SubstrIt=m_Search.begin();
-                        for(;
-                            InnerIt!=End && SubstrIt!=m_Search.end();
-                            ++InnerIt,++SubstrIt)
-                        {
-                            if( !( m_Comp(*InnerIt,*SubstrIt) ) )
-                                break;
-                        }
-
-                        // Substring matching succeeded
-                        if ( SubstrIt==m_Search.end() )
-                            return result_type( OuterIt, InnerIt );
-                    }
-
-                    return result_type( End, End );
-                }
-
-            private:
-                iterator_range<search_iterator_type> m_Search;
-                PredicateT m_Comp;
-            };
-
-//  find last functor -----------------------------------------------//
-
-            // find the last match a subsequence in the sequence ( functor )
-            /*
-                Returns a pair <begin,end> marking the subsequence in the sequence.
-                If the find fails, returns <End,End>
-            */
-            template<typename SearchIteratorT, typename PredicateT>
-            struct last_finderF
-            {
-                typedef SearchIteratorT search_iterator_type;
-                typedef first_finderF<
-                    search_iterator_type,
-                    PredicateT> first_finder_type;
-
-                // Construction
-                template< typename SearchT >
-                last_finderF( const SearchT& Search, PredicateT Comp ) :
-                    m_Search(::boost::begin(Search), ::boost::end(Search)), m_Comp(Comp) {}
-                last_finderF(
-                        search_iterator_type SearchBegin,
-                        search_iterator_type SearchEnd,
-                        PredicateT Comp ) :
-                    m_Search(SearchBegin, SearchEnd), m_Comp(Comp) {}
-
-                // Operation
-                template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-                operator()(
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End ) const
-                {
-                    typedef iterator_range<ForwardIteratorT> result_type;
-
-                    if( boost::empty(m_Search) )
-                        return result_type( End, End );
-
-                    typedef BOOST_STRING_TYPENAME boost::detail::
-                        iterator_traits<ForwardIteratorT>::iterator_category category;
-
-                    return findit( Begin, End, category() );
-                }
-
-            private:
-                // forward iterator
-                template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-                findit(
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End,
-                    std::forward_iterator_tag ) const
-                {
-                    typedef iterator_range<ForwardIteratorT> result_type;
-
-                    first_finder_type first_finder(
-                        m_Search.begin(), m_Search.end(), m_Comp );
-
-                    result_type M=first_finder( Begin, End );
-                    result_type Last=M;
-
-                    while( M )
-                    {
-                        Last=M;
-                        M=first_finder( ::boost::end(M), End );
-                    }
-
-                    return Last;
-                }
-
-                // bidirectional iterator
-                template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-                findit(
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End,
-                    std::bidirectional_iterator_tag ) const
-                {
-                    typedef iterator_range<ForwardIteratorT> result_type;
-                    typedef ForwardIteratorT input_iterator_type;
-
-                    // Outer loop
-                    for(input_iterator_type OuterIt=End;
-                        OuterIt!=Begin; )
-                    {
-                        input_iterator_type OuterIt2=--OuterIt;
-
-                        input_iterator_type InnerIt=OuterIt2;
-                        search_iterator_type SubstrIt=m_Search.begin();
-                        for(;
-                            InnerIt!=End && SubstrIt!=m_Search.end();
-                            ++InnerIt,++SubstrIt)
-                        {
-                            if( !( m_Comp(*InnerIt,*SubstrIt) ) )
-                                break;
-                        }
-
-                        // Substring matching succeeded
-                        if( SubstrIt==m_Search.end() )
-                            return result_type( OuterIt2, InnerIt );
-                    }
-
-                    return result_type( End, End );
-                }
-
-            private:
-                iterator_range<search_iterator_type> m_Search;
-                PredicateT m_Comp;
-            };
-
-//  find n-th functor -----------------------------------------------//
-
-            // find the n-th match of a subsequence in the sequence ( functor )
-            /*
-                Returns a pair <begin,end> marking the subsequence in the sequence.
-                If the find fails, returns <End,End>
-            */
-            template<typename SearchIteratorT, typename PredicateT>
-            struct nth_finderF
-            {
-                typedef SearchIteratorT search_iterator_type;
-                typedef first_finderF<
-                    search_iterator_type,
-                    PredicateT> first_finder_type;
-                typedef last_finderF<
-                    search_iterator_type,
-                    PredicateT> last_finder_type;
-
-                // Construction
-                template< typename SearchT >
-                nth_finderF(
-                        const SearchT& Search,
-                        int Nth,
-                        PredicateT Comp) :
-                    m_Search(::boost::begin(Search), ::boost::end(Search)),
-                    m_Nth(Nth),
-                    m_Comp(Comp) {}
-                nth_finderF(
-                        search_iterator_type SearchBegin,
-                        search_iterator_type SearchEnd,
-                        int Nth,
-                        PredicateT Comp) :
-                    m_Search(SearchBegin, SearchEnd),
-                    m_Nth(Nth),
-                    m_Comp(Comp) {}
-
-                // Operation
-                template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-                operator()(
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End ) const
-                {
-                    if(m_Nth>=0)
-                    {
-                        return find_forward(Begin, End, m_Nth);
-                    }
-                    else
-                    {
-                        return find_backward(Begin, End, -m_Nth);
-                    }
-
-                }
-
-            private:
-                // Implementation helpers
-                template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-                find_forward(
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End,
-                    unsigned int N) const
-                {
-                    typedef iterator_range<ForwardIteratorT> result_type;
-
-                    // Sanity check
-                    if( boost::empty(m_Search) )
-                        return result_type( End, End );
-
-                    // Instantiate find functor
-                    first_finder_type first_finder(
-                        m_Search.begin(), m_Search.end(), m_Comp );
-
-                    result_type M( Begin, Begin );
-
-                    for( unsigned int n=0; n<=N; ++n )
-                    {
-                        // find next match
-                        M=first_finder( ::boost::end(M), End );
-
-                        if ( !M )
-                        {
-                            // Subsequence not found, return
-                            return M;
-                        }
-                    }
-
-                    return M;
-                }
-
-                template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-                find_backward(
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End,
-                    unsigned int N) const
-                {
-                    typedef iterator_range<ForwardIteratorT> result_type;
-
-                    // Sanity check
-                    if( boost::empty(m_Search) )
-                        return result_type( End, End );
-
-                    // Instantiate find functor
-                    last_finder_type last_finder(
-                        m_Search.begin(), m_Search.end(), m_Comp );
-
-                    result_type M( End, End );
-
-                    for( unsigned int n=1; n<=N; ++n )
-                    {
-                        // find next match
-                        M=last_finder( Begin, ::boost::begin(M) );
-
-                        if ( !M )
-                        {
-                            // Subsequence not found, return
-                            return M;
-                        }
-                    }
-
-                    return M;
-                }
-
-
-            private:
-                iterator_range<search_iterator_type> m_Search;
-                int m_Nth;
-                PredicateT m_Comp;
-            };
-
-//  find head/tail implementation helpers ---------------------------//
-
-            template<typename ForwardIteratorT>
-                iterator_range<ForwardIteratorT>
-            find_head_impl(
-                ForwardIteratorT Begin,
-                ForwardIteratorT End,
-                unsigned int N,
-                std::forward_iterator_tag )
-            {
-                typedef ForwardIteratorT input_iterator_type;
-                typedef iterator_range<ForwardIteratorT> result_type;
-
-                input_iterator_type It=Begin;
-                for(
-                    unsigned int Index=0;
-                    Index<N && It!=End; ++Index,++It ) {};
-
-                return result_type( Begin, It );
-            }
-
-            template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-            find_head_impl(
-                ForwardIteratorT Begin,
-                ForwardIteratorT End,
-                unsigned int N,
-                std::random_access_iterator_tag )
-            {
-                typedef iterator_range<ForwardIteratorT> result_type;
-
-                if ( (End<=Begin) || ( static_cast<unsigned int>(End-Begin) < N ) )
-                    return result_type( Begin, End );
-
-                return result_type(Begin,Begin+N);
-            }
-
-            // Find head implementation
-            template<typename ForwardIteratorT>
-                iterator_range<ForwardIteratorT>
-            find_head_impl(
-                ForwardIteratorT Begin,
-                ForwardIteratorT End,
-                unsigned int N )
-            {
-                typedef BOOST_STRING_TYPENAME boost::detail::
-                    iterator_traits<ForwardIteratorT>::iterator_category category;
-
-                return ::boost::algorithm::detail::find_head_impl( Begin, End, N, category() );
-            }
-
-            template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-            find_tail_impl(
-                ForwardIteratorT Begin,
-                ForwardIteratorT End,
-                unsigned int N,
-                std::forward_iterator_tag )
-            {
-                typedef ForwardIteratorT input_iterator_type;
-                typedef iterator_range<ForwardIteratorT> result_type;
-
-                unsigned int Index=0;
-                input_iterator_type It=Begin;
-                input_iterator_type It2=Begin;
-
-                // Advance It2 by N increments
-                for( Index=0; Index<N && It2!=End; ++Index,++It2 ) {};
-
-                // Advance It, It2 to the end
-                for(; It2!=End; ++It,++It2 ) {};
-
-                return result_type( It, It2 );
-            }
-
-            template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-            find_tail_impl(
-                ForwardIteratorT Begin,
-                ForwardIteratorT End,
-                unsigned int N,
-                std::bidirectional_iterator_tag )
-            {
-                typedef ForwardIteratorT input_iterator_type;
-                typedef iterator_range<ForwardIteratorT> result_type;
-
-                input_iterator_type It=End;
-                for(
-                    unsigned int Index=0;
-                    Index<N && It!=Begin; ++Index,--It ) {};
-
-                return result_type( It, End );
-            }
-
-            template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-            find_tail_impl(
-                ForwardIteratorT Begin,
-                ForwardIteratorT End,
-                unsigned int N,
-                std::random_access_iterator_tag )
-            {
-                typedef iterator_range<ForwardIteratorT> result_type;
-
-                if ( (End<=Begin) || ( static_cast<unsigned int>(End-Begin) < N ) )
-                    return result_type( Begin, End );
-
-                return result_type( End-N, End );
-            }
-
-                        // Operation
-            template< typename ForwardIteratorT >
-            iterator_range<ForwardIteratorT>
-            find_tail_impl(
-                ForwardIteratorT Begin,
-                ForwardIteratorT End,
-                unsigned int N )
-            {
-                typedef BOOST_STRING_TYPENAME boost::detail::
-                    iterator_traits<ForwardIteratorT>::iterator_category category;
-
-                return ::boost::algorithm::detail::find_tail_impl( Begin, End, N, category() );
-            }
-
-
-
-//  find head functor -----------------------------------------------//
-
-
-            // find a head in the sequence ( functor )
-            /*
-                This functor find a head of the specified range. For
-                a specified N, the head is a subsequence of N starting
-                elements of the range.
-            */
-            struct head_finderF
-            {
-                // Construction
-                head_finderF( int N ) : m_N(N) {}
-
-                // Operation
-                template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-                operator()(
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End ) const
-                {
-                    if(m_N>=0)
-                    {
-                        return ::boost::algorithm::detail::find_head_impl( Begin, End, m_N );
-                    }
-                    else
-                    {
-                        iterator_range<ForwardIteratorT> Res=
-                            ::boost::algorithm::detail::find_tail_impl( Begin, End, -m_N );
-
-                        return ::boost::make_iterator_range(Begin, Res.begin());
-                    }
-                }
-
-            private:
-                int m_N;
-            };
-
-//  find tail functor -----------------------------------------------//
-
-
-            // find a tail in the sequence ( functor )
-            /*
-                This functor find a tail of the specified range. For
-                a specified N, the head is a subsequence of N starting
-                elements of the range.
-            */
-            struct tail_finderF
-            {
-                // Construction
-                tail_finderF( int N ) : m_N(N) {}
-
-                // Operation
-                template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-                operator()(
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End ) const
-                {
-                    if(m_N>=0)
-                    {
-                        return ::boost::algorithm::detail::find_tail_impl( Begin, End, m_N );
-                    }
-                    else
-                    {
-                        iterator_range<ForwardIteratorT> Res=
-                            ::boost::algorithm::detail::find_head_impl( Begin, End, -m_N );
-
-                        return ::boost::make_iterator_range(Res.end(), End);
-                    }
-                }
-
-            private:
-                int m_N;
-            };
-
-//  find token functor -----------------------------------------------//
-
-            // find a token in a sequence ( functor )
-            /*
-                This find functor finds a token specified be a predicate
-                in a sequence. It is equivalent of std::find algorithm,
-                with an exception that it return range instead of a single
-                iterator.
-
-                If bCompress is set to true, adjacent matching tokens are
-                concatenated into one match.
-            */
-            template< typename PredicateT >
-            struct token_finderF
-            {
-                // Construction
-                token_finderF(
-                    PredicateT Pred,
-                    token_compress_mode_type eCompress=token_compress_off ) :
-                        m_Pred(Pred), m_eCompress(eCompress) {}
-
-                // Operation
-                template< typename ForwardIteratorT >
-                iterator_range<ForwardIteratorT>
-                operator()(
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End ) const
-                {
-                    typedef iterator_range<ForwardIteratorT> result_type;
-
-                    ForwardIteratorT It=std::find_if( Begin, End, m_Pred );
-
-                    if( It==End )
-                    {
-                        return result_type( End, End );
-                    }
-                    else
-                    {
-                        ForwardIteratorT It2=It;
-
-                        if( m_eCompress==token_compress_on )
-                        {
-                            // Find first non-matching character
-                            while( It2!=End && m_Pred(*It2) ) ++It2;
-                        }
-                        else
-                        {
-                            // Advance by one position
-                            ++It2;
-                        }
-
-                        return result_type( It, It2 );
-                    }
-                }
-
-            private:
-                PredicateT m_Pred;
-                token_compress_mode_type m_eCompress;
-            };
-
-//  find range functor -----------------------------------------------//
-
-            // find a range in the sequence ( functor )
-            /*
-                This functor actually does not perform any find operation.
-                It always returns given iterator range as a result.
-            */
-            template<typename ForwardIterator1T>
-            struct range_finderF
-            {
-                typedef ForwardIterator1T input_iterator_type;
-                typedef iterator_range<input_iterator_type> result_type;
-
-                // Construction
-                range_finderF(
-                    input_iterator_type Begin,
-                    input_iterator_type End ) : m_Range(Begin, End) {}
-
-                range_finderF(const iterator_range<input_iterator_type>& Range) :
-                    m_Range(Range) {}
-
-                // Operation
-                template< typename ForwardIterator2T >
-                iterator_range<ForwardIterator2T>
-                operator()(
-                    ForwardIterator2T,
-                    ForwardIterator2T ) const
-                {
-#if BOOST_WORKAROUND( __MWERKS__, <= 0x3003 ) 
-                    return iterator_range<const ForwardIterator2T>(this->m_Range);
-#else
-                    return m_Range;
-#endif
-                }
-
-            private:
-                iterator_range<input_iterator_type> m_Range;
-            };
-
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-#endif  // BOOST_STRING_FINDER_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/finder_regex.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/finder_regex.hpp
deleted file mode 100644
index 9cb01cf..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/finder_regex.hpp
+++ /dev/null
@@ -1,122 +0,0 @@
-//  Boost string_algo library find_regex.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FINDER_REGEX_DETAIL_HPP
-#define BOOST_STRING_FINDER_REGEX_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/regex.hpp>
-
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  regex find functor -----------------------------------------------//
-
-            // regex search result
-            template<typename IteratorT>
-            struct regex_search_result : 
-                public iterator_range<IteratorT>
-            {
-                typedef regex_search_result<IteratorT> type;
-                typedef iterator_range<IteratorT> base_type;
-                typedef BOOST_STRING_TYPENAME base_type::value_type value_type;
-                typedef BOOST_STRING_TYPENAME base_type::difference_type difference_type;
-                typedef BOOST_STRING_TYPENAME base_type::const_iterator const_iterator;
-                typedef BOOST_STRING_TYPENAME base_type::iterator iterator;
-                typedef boost::match_results<iterator> match_results_type;
-
-                // Construction
-
-                // Construction from the match result
-                regex_search_result( const match_results_type& MatchResults ) :
-                    base_type( MatchResults[0].first, MatchResults[0].second ),
-                    m_MatchResults( MatchResults ) {}
-                
-                // Construction of empty match. End iterator has to be specified
-                regex_search_result( IteratorT End ) :
-                    base_type( End, End ) {}
-
-                regex_search_result( const regex_search_result& Other ) :
-                    base_type( Other.begin(), Other.end() ),
-                    m_MatchResults( Other.m_MatchResults ) {}
-
-                // Assignment
-                regex_search_result& operator=( const regex_search_result& Other )
-                {
-                    base_type::operator=( Other );
-                    m_MatchResults=Other.m_MatchResults;
-                    return *this;
-                }
-
-                // Match result retrieval
-                const match_results_type& match_results() const
-                {
-                    return m_MatchResults;
-                }
-
-            private:
-                // Saved match result
-                match_results_type m_MatchResults;
-            };
-
-            // find_regex
-            /*
-                Regex based search functor
-            */
-            template<typename RegExT>
-            struct find_regexF
-            {
-                typedef RegExT regex_type;
-                typedef const RegExT& regex_reference_type;
-                    
-                // Construction
-                find_regexF( regex_reference_type Rx, match_flag_type MatchFlags = match_default ) : 
-                    m_Rx(Rx), m_MatchFlags(MatchFlags) {}   
-
-                // Operation
-                template< typename ForwardIteratorT >
-                regex_search_result<ForwardIteratorT>
-                operator()( 
-                    ForwardIteratorT Begin, 
-                    ForwardIteratorT End ) const
-                {
-                    typedef ForwardIteratorT input_iterator_type;
-                    typedef regex_search_result<ForwardIteratorT> result_type;
-
-                    // instantiate match result
-                    match_results<input_iterator_type> result;
-                    // search for a match
-                    if ( ::boost::regex_search( Begin, End, result, m_Rx, m_MatchFlags ) )
-                    {
-                        // construct a result
-                        return result_type( result );
-                    }
-                    else
-                    {
-                        // empty result
-                        return result_type( End );
-                    }
-                }
-
-            private:
-                regex_reference_type m_Rx; // Regexp
-                match_flag_type m_MatchFlags;     // match flags
-            };
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-#endif  // BOOST_STRING_FIND_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/formatter.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/formatter.hpp
deleted file mode 100644
index c071822..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/formatter.hpp
+++ /dev/null
@@ -1,119 +0,0 @@
-//  Boost string_algo library formatter.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FORMATTER_DETAIL_HPP
-#define BOOST_STRING_FORMATTER_DETAIL_HPP
-
-
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/const_iterator.hpp>
-
-#include <boost/algorithm/string/detail/util.hpp>
-
-//  generic replace functors -----------------------------------------------//
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  const format functor ----------------------------------------------------//
-
-            // constant format functor
-            template<typename RangeT>
-            struct const_formatF
-            {
-            private:
-                typedef BOOST_STRING_TYPENAME
-                    range_const_iterator<RangeT>::type format_iterator;
-                typedef iterator_range<format_iterator> result_type;
-            
-            public:
-                // Construction
-                const_formatF(const RangeT& Format) :
-                    m_Format(::boost::begin(Format), ::boost::end(Format)) {}
-
-                // Operation
-#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x564))
-                template<typename Range2T>
-                result_type& operator()(const Range2T&)
-                {
-                    return m_Format;
-                }
-#endif
-
-                template<typename Range2T>
-                const result_type& operator()(const Range2T&) const
-                {
-                    return m_Format;
-                }
-
-            private:
-                result_type m_Format;
-            };
-
-//  identity format functor ----------------------------------------------------//
-
-            // identity format functor
-            template<typename RangeT>
-            struct identity_formatF
-            {
-                // Operation
-                template< typename Range2T >
-                const RangeT& operator()(const Range2T& Replace) const
-                {
-                    return RangeT(::boost::begin(Replace), ::boost::end(Replace));
-                }
-            };
-
-//  empty format functor ( used by erase ) ------------------------------------//
-        
-            // empty format functor
-            template< typename CharT >
-            struct empty_formatF
-            {
-                template< typename ReplaceT >
-                empty_container<CharT> operator()(const ReplaceT&) const
-                {
-                    return empty_container<CharT>();
-                }
-            };
-
-//  dissect format functor ----------------------------------------------------//
-
-            // dissect format functor
-            template<typename FinderT>
-            struct dissect_formatF
-            {
-            public:
-                // Construction
-                dissect_formatF(FinderT Finder) :
-                  m_Finder(Finder) {}
-
-                  // Operation
-                  template<typename RangeT>
-                  inline iterator_range< 
-                      BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type>
-                  operator()(const RangeT& Replace) const
-                  {
-                      return m_Finder(::boost::begin(Replace), ::boost::end(Replace));
-                  }
-
-            private:
-                FinderT m_Finder;
-            };
-
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-#endif  // BOOST_STRING_FORMATTER_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/formatter_regex.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/formatter_regex.hpp
deleted file mode 100644
index 5f26407..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/formatter_regex.hpp
+++ /dev/null
@@ -1,61 +0,0 @@
-//  Boost string_algo library formatter_regex.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FORMATTER_REGEX_DETAIL_HPP
-#define BOOST_STRING_FORMATTER_REGEX_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <string>
-#include <boost/regex.hpp>
-#include <boost/algorithm/string/detail/finder_regex.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  regex format functor -----------------------------------------//
-
-            // regex format functor
-            template<typename StringT>
-            struct regex_formatF
-            {
-            private:
-                typedef StringT result_type;
-                typedef BOOST_STRING_TYPENAME StringT::value_type char_type;
-
-            public:
-                // Construction
-                regex_formatF( const StringT& Fmt, match_flag_type Flags=format_default ) :
-                    m_Fmt(Fmt), m_Flags( Flags ) {}
-
-                template<typename InputIteratorT>
-                result_type operator()( 
-                    const regex_search_result<InputIteratorT>& Replace ) const
-                {
-                    if ( Replace.empty() )
-                    {
-                        return result_type();
-                    }
-                    else
-                    {
-                        return Replace.match_results().format( m_Fmt, m_Flags );                      
-                    }
-                }
-            private:
-                const StringT& m_Fmt;
-                match_flag_type m_Flags;
-            };
-
-        
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-#endif  // BOOST_STRING_FORMATTER_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/predicate.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/predicate.hpp
deleted file mode 100644
index 5acf3cc..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/predicate.hpp
+++ /dev/null
@@ -1,77 +0,0 @@
-//  Boost string_algo library predicate.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_PREDICATE_DETAIL_HPP
-#define BOOST_STRING_PREDICATE_DETAIL_HPP
-
-#include <iterator>
-#include <boost/algorithm/string/find.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  ends_with predicate implementation ----------------------------------//
-
-            template< 
-                typename ForwardIterator1T, 
-                typename ForwardIterator2T,
-                typename PredicateT>
-            inline bool ends_with_iter_select( 
-                ForwardIterator1T Begin, 
-                ForwardIterator1T End, 
-                ForwardIterator2T SubBegin,
-                ForwardIterator2T SubEnd,
-                PredicateT Comp,
-                std::bidirectional_iterator_tag)
-            {
-                ForwardIterator1T it=End;
-                ForwardIterator2T pit=SubEnd;
-                for(;it!=Begin && pit!=SubBegin;)
-                {
-                    if( !(Comp(*(--it),*(--pit))) )
-                        return false;
-                }
-
-                return pit==SubBegin;
-            }
-
-            template< 
-                typename ForwardIterator1T, 
-                typename ForwardIterator2T,
-                typename PredicateT>
-            inline bool ends_with_iter_select( 
-                ForwardIterator1T Begin, 
-                ForwardIterator1T End, 
-                ForwardIterator2T SubBegin,
-                ForwardIterator2T SubEnd,
-                PredicateT Comp,
-                std::forward_iterator_tag)
-            {
-                if ( SubBegin==SubEnd )
-                {
-                    // empty subsequence check
-                    return true;
-                }
-
-                iterator_range<ForwardIterator1T> Result
-                    =last_finder( 
-                        ::boost::make_iterator_range(SubBegin, SubEnd),
-                        Comp)(Begin, End);
-
-                return !Result.empty() && Result.end()==End;
-            }
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_PREDICATE_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/replace_storage.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/replace_storage.hpp
deleted file mode 100644
index db35e4c..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/replace_storage.hpp
+++ /dev/null
@@ -1,159 +0,0 @@
-//  Boost string_algo library replace_storage.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_REPLACE_STORAGE_DETAIL_HPP
-#define BOOST_STRING_REPLACE_STORAGE_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <algorithm>
-#include <boost/mpl/bool.hpp>
-#include <boost/algorithm/string/sequence_traits.hpp>
-#include <boost/algorithm/string/detail/sequence.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  storage handling routines -----------------------------------------------//
-            
-            template< typename StorageT, typename OutputIteratorT >
-            inline OutputIteratorT move_from_storage(
-                StorageT& Storage,
-                OutputIteratorT DestBegin,
-                OutputIteratorT DestEnd )
-            {
-                OutputIteratorT OutputIt=DestBegin;
-                
-                while( !Storage.empty() && OutputIt!=DestEnd )
-                {
-                    *OutputIt=Storage.front();
-                    Storage.pop_front();
-                    ++OutputIt;
-                }
-
-                return OutputIt;
-            }
-
-            template< typename StorageT, typename WhatT >
-            inline void copy_to_storage(
-                StorageT& Storage,
-                const WhatT& What )
-            {
-                Storage.insert( Storage.end(), ::boost::begin(What), ::boost::end(What) );
-            }
-
-
-//  process segment routine -----------------------------------------------//
-
-            template< bool HasStableIterators >
-            struct process_segment_helper
-            {
-                // Optimized version of process_segment for generic sequence
-                template< 
-                    typename StorageT,
-                    typename InputT,
-                    typename ForwardIteratorT >
-                ForwardIteratorT operator()(
-                    StorageT& Storage,
-                    InputT& /*Input*/,
-                    ForwardIteratorT InsertIt,
-                    ForwardIteratorT SegmentBegin,
-                    ForwardIteratorT SegmentEnd )
-                {
-                    // Copy data from the storage until the beginning of the segment
-                    ForwardIteratorT It=::boost::algorithm::detail::move_from_storage( Storage, InsertIt, SegmentBegin );
-
-                    // 3 cases are possible :
-                    //   a) Storage is empty, It==SegmentBegin
-                    //   b) Storage is empty, It!=SegmentBegin
-                    //   c) Storage is not empty
-
-                    if( Storage.empty() )
-                    {
-                        if( It==SegmentBegin )
-                        {
-                            // Case a) everything is grand, just return end of segment
-                            return SegmentEnd;
-                        }
-                        else
-                        {
-                            // Case b) move the segment backwards
-                            return std::copy( SegmentBegin, SegmentEnd, It );
-                        }
-                    }
-                    else
-                    {
-                        // Case c) -> shift the segment to the left and keep the overlap in the storage
-                        while( It!=SegmentEnd )
-                        {
-                            // Store value into storage
-                            Storage.push_back( *It );
-                            // Get the top from the storage and put it here
-                            *It=Storage.front();
-                            Storage.pop_front();
-
-                            // Advance
-                            ++It;
-                        }
-
-                        return It;
-                    }
-                }
-            };
-
-            template<>
-            struct process_segment_helper< true >
-            {
-                // Optimized version of process_segment for list-like sequence
-                template< 
-                    typename StorageT,
-                    typename InputT,
-                    typename ForwardIteratorT >
-                ForwardIteratorT operator()(
-                    StorageT& Storage,
-                    InputT& Input,
-                    ForwardIteratorT InsertIt,
-                    ForwardIteratorT SegmentBegin,
-                    ForwardIteratorT SegmentEnd )
-
-                {
-                    // Call replace to do the job
-                    ::boost::algorithm::detail::replace( Input, InsertIt, SegmentBegin, Storage );
-                    // Empty the storage
-                    Storage.clear();
-                    // Iterators were not changed, simply return the end of segment
-                    return SegmentEnd;
-                }
-            };
-
-            // Process one segment in the replace_all algorithm
-            template< 
-                typename StorageT,
-                typename InputT,
-                typename ForwardIteratorT >
-            inline ForwardIteratorT process_segment(
-                StorageT& Storage,
-                InputT& Input,
-                ForwardIteratorT InsertIt,
-                ForwardIteratorT SegmentBegin,
-                ForwardIteratorT SegmentEnd )
-            {
-                return 
-                    process_segment_helper< 
-                        has_stable_iterators<InputT>::value>()(
-                                Storage, Input, InsertIt, SegmentBegin, SegmentEnd );
-            }
-            
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-#endif  // BOOST_STRING_REPLACE_STORAGE_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/sequence.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/sequence.hpp
deleted file mode 100644
index dc47409..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/sequence.hpp
+++ /dev/null
@@ -1,200 +0,0 @@
-//  Boost string_algo library sequence.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_DETAIL_SEQUENCE_HPP
-#define BOOST_STRING_DETAIL_SEQUENCE_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/mpl/bool.hpp>
-#include <boost/mpl/logical.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-
-#include <boost/algorithm/string/sequence_traits.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  insert helpers  -------------------------------------------------//
-        
-            template< typename InputT, typename ForwardIteratorT >
-            inline void insert(
-                InputT& Input,
-                BOOST_STRING_TYPENAME InputT::iterator At,
-                ForwardIteratorT Begin,
-                ForwardIteratorT End )
-            {
-                Input.insert( At, Begin, End );
-            }
-
-            template< typename InputT, typename InsertT >
-            inline void insert(
-                InputT& Input,
-                BOOST_STRING_TYPENAME InputT::iterator At,
-                const InsertT& Insert )
-            {
-                ::boost::algorithm::detail::insert( Input, At, ::boost::begin(Insert), ::boost::end(Insert) );
-            }
-           
-//  erase helper  ---------------------------------------------------//
-
-            // Erase a range in the sequence
-            /*
-                Returns the iterator pointing just after the erase subrange
-            */
-            template< typename InputT >
-            inline typename InputT::iterator erase(
-                InputT& Input,
-                BOOST_STRING_TYPENAME InputT::iterator From,
-                BOOST_STRING_TYPENAME InputT::iterator To )
-            {
-                return Input.erase( From, To );
-            }
-
-//  replace helper implementation  ----------------------------------//
-
-            // Optimized version of replace for generic sequence containers
-            // Assumption: insert and erase are expensive
-            template< bool HasConstTimeOperations >
-            struct replace_const_time_helper
-            {
-                template< typename InputT, typename ForwardIteratorT >
-                void operator()(
-                    InputT& Input,
-                    BOOST_STRING_TYPENAME InputT::iterator From,
-                    BOOST_STRING_TYPENAME InputT::iterator To,
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End )
-                {
-                    // Copy data to the container ( as much as possible )
-                    ForwardIteratorT InsertIt=Begin;
-                    BOOST_STRING_TYPENAME InputT::iterator InputIt=From;
-                    for(; InsertIt!=End && InputIt!=To; InsertIt++, InputIt++ )
-                    {
-                        *InputIt=*InsertIt;
-                    }
-
-                    if ( InsertIt!=End )
-                    {
-                        // Replace sequence is longer, insert it
-                        Input.insert( InputIt, InsertIt, End );
-                    }
-                    else
-                    {
-                        if ( InputIt!=To )
-                        {
-                            // Replace sequence is shorter, erase the rest
-                            Input.erase( InputIt, To );
-                        }
-                    }
-                }
-            };
-
-            template<>
-            struct replace_const_time_helper< true >
-            {
-                // Const-time erase and insert methods -> use them
-                template< typename InputT, typename ForwardIteratorT >
-                void operator()(
-                    InputT& Input,
-                    BOOST_STRING_TYPENAME InputT::iterator From,
-                    BOOST_STRING_TYPENAME InputT::iterator To,
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End ) 
-                {
-                    BOOST_STRING_TYPENAME InputT::iterator At=Input.erase( From, To );
-                    if ( Begin!=End )
-                    {
-                        if(!Input.empty())
-                        {
-                            Input.insert( At, Begin, End );
-                        }
-                        else
-                        {
-                            Input.insert( Input.begin(), Begin, End );
-                        }
-                    }
-                }
-            };
-
-            // No native replace method
-            template< bool HasNative >
-            struct replace_native_helper
-            {
-                template< typename InputT, typename ForwardIteratorT >
-                void operator()(
-                    InputT& Input,
-                    BOOST_STRING_TYPENAME InputT::iterator From,
-                    BOOST_STRING_TYPENAME InputT::iterator To,
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End ) 
-                {
-                    replace_const_time_helper< 
-                        boost::mpl::and_<
-                            has_const_time_insert<InputT>,
-                            has_const_time_erase<InputT> >::value >()(
-                        Input, From, To, Begin, End );
-                }
-            };
-
-            // Container has native replace method
-            template<>
-            struct replace_native_helper< true >
-            {
-                template< typename InputT, typename ForwardIteratorT >
-                void operator()(
-                    InputT& Input,
-                    BOOST_STRING_TYPENAME InputT::iterator From,
-                    BOOST_STRING_TYPENAME InputT::iterator To,
-                    ForwardIteratorT Begin,
-                    ForwardIteratorT End )
-                {
-                    Input.replace( From, To, Begin, End );
-                }
-            };
-
-//  replace helper  -------------------------------------------------//
-        
-            template< typename InputT, typename ForwardIteratorT >
-            inline void replace(
-                InputT& Input,
-                BOOST_STRING_TYPENAME InputT::iterator From,
-                BOOST_STRING_TYPENAME InputT::iterator To,
-                ForwardIteratorT Begin,
-                ForwardIteratorT End )
-            {
-                replace_native_helper< has_native_replace<InputT>::value >()(
-                    Input, From, To, Begin, End );
-            }
-
-            template< typename InputT, typename InsertT >
-            inline void replace(
-                InputT& Input,
-                BOOST_STRING_TYPENAME InputT::iterator From,
-                BOOST_STRING_TYPENAME InputT::iterator To,
-                const InsertT& Insert )
-            {
-                if(From!=To)
-                {
-                    ::boost::algorithm::detail::replace( Input, From, To, ::boost::begin(Insert), ::boost::end(Insert) );
-                }
-                else
-                {
-                    ::boost::algorithm::detail::insert( Input, From, ::boost::begin(Insert), ::boost::end(Insert) );
-                }
-            }
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_DETAIL_SEQUENCE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/trim.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/trim.hpp
deleted file mode 100644
index 1233e49..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/trim.hpp
+++ /dev/null
@@ -1,95 +0,0 @@
-//  Boost string_algo library trim.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_TRIM_DETAIL_HPP
-#define BOOST_STRING_TRIM_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/detail/iterator.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  trim iterator helper -----------------------------------------------//
-
-            template< typename ForwardIteratorT, typename PredicateT >
-            inline ForwardIteratorT trim_end_iter_select( 
-                ForwardIteratorT InBegin, 
-                ForwardIteratorT InEnd, 
-                PredicateT IsSpace,
-                std::forward_iterator_tag )
-            {
-                ForwardIteratorT TrimIt=InBegin;
-
-                for( ForwardIteratorT It=InBegin; It!=InEnd; ++It )
-                {
-                    if ( !IsSpace(*It) ) 
-                    {
-                        TrimIt=It;
-                        ++TrimIt;
-                    }
-                }
-
-                return TrimIt;
-            }
-
-            template< typename ForwardIteratorT, typename PredicateT >
-            inline ForwardIteratorT trim_end_iter_select( 
-                ForwardIteratorT InBegin, 
-                ForwardIteratorT InEnd, 
-                PredicateT IsSpace,
-                std::bidirectional_iterator_tag )
-            {
-                for( ForwardIteratorT It=InEnd; It!=InBegin;  )
-                {
-                    if ( !IsSpace(*(--It)) )
-                        return ++It;
-                }
-
-                return InBegin;
-            }
-   // Search for first non matching character from the beginning of the sequence
-            template< typename ForwardIteratorT, typename PredicateT >
-            inline ForwardIteratorT trim_begin( 
-                ForwardIteratorT InBegin, 
-                ForwardIteratorT InEnd, 
-                PredicateT IsSpace )
-            {
-                ForwardIteratorT It=InBegin;
-                for(; It!=InEnd; ++It )
-                {
-                    if (!IsSpace(*It))
-                        return It;
-                }
-
-                return It;
-            }
-
-            // Search for first non matching character from the end of the sequence
-            template< typename ForwardIteratorT, typename PredicateT >
-            inline ForwardIteratorT trim_end( 
-                ForwardIteratorT InBegin, 
-                ForwardIteratorT InEnd, 
-                PredicateT IsSpace )
-            {
-                typedef BOOST_STRING_TYPENAME boost::detail::
-                    iterator_traits<ForwardIteratorT>::iterator_category category;
-
-                return ::boost::algorithm::detail::trim_end_iter_select( InBegin, InEnd, IsSpace, category() );
-            }
-
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_TRIM_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/util.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/util.hpp
deleted file mode 100644
index 7844b67..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/detail/util.hpp
+++ /dev/null
@@ -1,107 +0,0 @@
-//  Boost string_algo library util.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_UTIL_DETAIL_HPP
-#define BOOST_STRING_UTIL_DETAIL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <functional>
-#include <boost/range/iterator_range_core.hpp>
-
-namespace boost {
-    namespace algorithm {
-        namespace detail {
-
-//  empty container  -----------------------------------------------//
-
-            //  empty_container 
-            /*
-                This class represents always empty container,
-                containing elements of type CharT.
-
-                It is supposed to be used in a const version only
-            */
-            template< typename CharT >
-            struct empty_container 
-            {
-                typedef empty_container<CharT> type;        
-                typedef CharT value_type;
-                typedef std::size_t size_type;
-                typedef std::ptrdiff_t difference_type;
-                typedef const value_type& reference;
-                typedef const value_type& const_reference;
-                typedef const value_type* iterator;
-                typedef const value_type* const_iterator;
-
-                
-                // Operations
-                const_iterator begin() const
-                {
-                    return reinterpret_cast<const_iterator>(0);
-                }
-
-                const_iterator end() const
-                {
-                    return reinterpret_cast<const_iterator>(0);
-                }
-
-                bool empty() const
-                {
-                    return false;
-                }
-
-                size_type size() const
-                {
-                    return 0;
-                }
-            };
-    
-//  bounded copy algorithm  -----------------------------------------------//
-
-            // Bounded version of the std::copy algorithm
-            template<typename InputIteratorT, typename OutputIteratorT>
-            inline OutputIteratorT bounded_copy(
-                InputIteratorT First, 
-                InputIteratorT Last, 
-                OutputIteratorT DestFirst,
-                OutputIteratorT DestLast )
-            {
-                InputIteratorT InputIt=First;
-                OutputIteratorT OutputIt=DestFirst;
-                for(; InputIt!=Last && OutputIt!=DestLast; InputIt++, OutputIt++ )
-                {
-                    *OutputIt=*InputIt;
-                }
-
-                return OutputIt;
-            }
-
-//  iterator range utilities -----------------------------------------//
-
-            // copy range functor
-            template< 
-                typename SeqT, 
-                typename IteratorT=BOOST_STRING_TYPENAME SeqT::const_iterator >
-            struct copy_iterator_rangeF
-            {
-                typedef iterator_range<IteratorT> argument_type;
-                typedef SeqT result_type;
-                SeqT operator()( const iterator_range<IteratorT>& Range ) const
-                {
-                    return copy_range<SeqT>(Range);
-                }
-            };
-
-        } // namespace detail
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_UTIL_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/erase.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/erase.hpp
deleted file mode 100644
index 6883790..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/erase.hpp
+++ /dev/null
@@ -1,844 +0,0 @@
-//  Boost string_algo library erase.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2006.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_ERASE_HPP
-#define BOOST_STRING_ERASE_HPP
-
-#include <boost/algorithm/string/config.hpp>
-
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/iterator.hpp>
-#include <boost/range/const_iterator.hpp>
-
-#include <boost/algorithm/string/find_format.hpp>
-#include <boost/algorithm/string/finder.hpp>
-#include <boost/algorithm/string/formatter.hpp>
-
-/*! \file
-    Defines various erase algorithms. Each algorithm removes
-    part(s) of the input according to a searching criteria.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  erase_range -------------------------------------------------------//
-
-        //! Erase range algorithm
-        /*!
-            Remove the given range from the input. The result is a modified copy of 
-            the input. It is returned as a sequence or copied to the output iterator.
-    
-            \param Output An output iterator to which the result will be copied
-            \param Input An input sequence
-            \param SearchRange A range in the input to be removed
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<typename OutputIteratorT, typename RangeT>
-        inline OutputIteratorT erase_range_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            const iterator_range<
-                BOOST_STRING_TYPENAME 
-                    range_const_iterator<RangeT>::type>& SearchRange )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::range_finder(SearchRange),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase range algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT>
-        inline SequenceT erase_range_copy( 
-            const SequenceT& Input,
-            const iterator_range<
-                BOOST_STRING_TYPENAME 
-                    range_const_iterator<SequenceT>::type>& SearchRange )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::range_finder(SearchRange),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase range algorithm
-        /*!
-            Remove the given range from the input.
-            The input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param SearchRange A range in the input to be removed
-        */
-        template<typename SequenceT>
-        inline void erase_range( 
-            SequenceT& Input,
-            const iterator_range<
-                BOOST_STRING_TYPENAME 
-                    range_iterator<SequenceT>::type>& SearchRange )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::range_finder(SearchRange),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-//  erase_first  --------------------------------------------------------//
-
-        //! Erase first algorithm
-        /*!
-            Remove the first occurrence of the substring from the input.
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-            
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT erase_first_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase first algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT erase_first_copy( 
-            const SequenceT& Input,
-            const RangeT& Search )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input, 
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase first algorithm
-        /*!
-            Remove the first occurrence of the substring from the input. 
-            The input sequence is modified in-place.
-
-            \param Input An input string
-            \param Search A substring to be searched for. 
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void erase_first( 
-            SequenceT& Input,
-            const RangeT& Search )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-//  erase_first ( case insensitive ) ------------------------------------//
-
-        //! Erase first algorithm ( case insensitive )
-        /*!
-            Remove the first occurrence of the substring from the input. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            Searching is case insensitive.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Loc A locale used for case insensitive comparison
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT ierase_first_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase first algorithm ( case insensitive )
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT ierase_first_copy( 
-            const SequenceT& Input,
-            const RangeT& Search,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input, 
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase first algorithm ( case insensitive )
-        /*!
-            Remove the first occurrence of the substring from the input. 
-            The input sequence is modified in-place. Searching is case insensitive.
-
-            \param Input An input string
-            \param Search A substring to be searched for
-            \param Loc A locale used for case insensitive comparison
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void ierase_first( 
-            SequenceT& Input,
-            const RangeT& Search,
-            const std::locale& Loc=std::locale() )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-//  erase_last  --------------------------------------------------------//
-
-        //! Erase last algorithm
-        /*!
-            Remove the last occurrence of the substring from the input. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for.
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT erase_last_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::last_finder(Search),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase last algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT erase_last_copy( 
-            const SequenceT& Input,
-            const RangeT& Search )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input, 
-                ::boost::algorithm::last_finder(Search),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase last algorithm
-        /*!
-            Remove the last occurrence of the substring from the input. 
-            The input sequence is modified in-place.
-
-            \param Input An input string
-            \param Search A substring to be searched for 
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void erase_last( 
-            SequenceT& Input,
-            const RangeT& Search )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::last_finder(Search),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-//  erase_last ( case insensitive ) ------------------------------------//
-
-        //! Erase last algorithm ( case insensitive )
-        /*!
-            Remove the last occurrence of the substring from the input. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            Searching is case insensitive.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for
-            \param Loc A locale used for case insensitive comparison
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT ierase_last_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::last_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase last algorithm ( case insensitive )
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT ierase_last_copy( 
-            const SequenceT& Input,
-            const RangeT& Search,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input, 
-                ::boost::algorithm::last_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase last algorithm ( case insensitive )
-        /*!
-            Remove the last occurrence of the substring from the input. 
-            The input sequence is modified in-place. Searching is case insensitive.
-
-            \param Input An input string
-            \param Search A substring to be searched for
-            \param Loc A locale used for case insensitive comparison
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void ierase_last( 
-            SequenceT& Input,
-            const RangeT& Search,
-            const std::locale& Loc=std::locale() )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::last_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-//  erase_nth --------------------------------------------------------------------//
-
-        //! Erase nth algorithm
-        /*!
-            Remove the Nth occurrence of the substring in the input.
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for
-            \param Nth An index of the match to be replaced. The index is 0-based.
-                For negative N, matches are counted from the end of string.
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT erase_nth_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            int Nth )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::nth_finder(Search, Nth),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase nth algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT erase_nth_copy( 
-            const SequenceT& Input,
-            const RangeT& Search,
-            int Nth )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input, 
-                ::boost::algorithm::nth_finder(Search, Nth),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase nth algorithm
-        /*!
-            Remove the Nth occurrence of the substring in the input.
-            The input sequence is modified in-place.
-
-            \param Input An input string
-            \param Search A substring to be searched for. 
-            \param Nth An index of the match to be replaced. The index is 0-based.
-                For negative N, matches are counted from the end of string.
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void erase_nth( 
-            SequenceT& Input,
-            const RangeT& Search,
-            int Nth )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::nth_finder(Search, Nth),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-//  erase_nth ( case insensitive ) ---------------------------------------------//
-
-        //! Erase nth algorithm ( case insensitive )
-        /*!
-            Remove the Nth occurrence of the substring in the input.
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator. 
-            Searching is case insensitive.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for.
-            \param Nth An index of the match to be replaced. The index is 0-based.
-                For negative N, matches are counted from the end of string.
-            \param Loc A locale used for case insensitive comparison
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT ierase_nth_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            int Nth,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::nth_finder(Search, Nth, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase nth algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT ierase_nth_copy( 
-            const SequenceT& Input,
-            const RangeT& Search,
-            int Nth,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input, 
-                ::boost::algorithm::nth_finder(Search, Nth, is_iequal(Loc)),
-                empty_formatter(Input) );
-        }
-
-        //! Erase nth algorithm
-        /*!
-            Remove the Nth occurrence of the substring in the input.
-            The input sequence is modified in-place. Searching is case insensitive.
-
-            \param Input An input string
-            \param Search A substring to be searched for. 
-            \param Nth An index of the match to be replaced. The index is 0-based.
-                For negative N, matches are counted from the end of string.
-            \param Loc A locale used for case insensitive comparison
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void ierase_nth( 
-            SequenceT& Input,
-            const RangeT& Search,
-            int Nth,
-            const std::locale& Loc=std::locale() )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::nth_finder(Search, Nth, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-
-//  erase_all  --------------------------------------------------------//
-
-        //! Erase all algorithm
-        /*!
-            Remove all the occurrences of the string from the input. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-                        
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input sequence
-            \param Search A substring to be searched for. 
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT erase_all_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search )
-        {
-            return ::boost::algorithm::find_format_all_copy(
-                Output,
-                Input,
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase all algorithm
-        /*!
-            \overload
-        */  
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT erase_all_copy( 
-            const SequenceT& Input,
-            const RangeT& Search )
-        {
-            return ::boost::algorithm::find_format_all_copy( 
-                Input, 
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase all algorithm
-        /*!
-            Remove all the occurrences of the string from the input. 
-            The input sequence is modified in-place.
-
-            \param Input An input string
-            \param Search A substring to be searched for. 
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void erase_all( 
-            SequenceT& Input,
-            const RangeT& Search )
-        {
-            ::boost::algorithm::find_format_all( 
-                Input, 
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-//  erase_all ( case insensitive ) ------------------------------------//
-
-        //! Erase all algorithm ( case insensitive )
-        /*!
-            Remove all the occurrences of the string from the input. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator. 
-            Searching is case insensitive.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for
-            \param Loc A locale used for case insensitive comparison
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input
-
-              \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT ierase_all_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_all_copy(
-                Output,
-                Input,
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase all algorithm ( case insensitive )
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT ierase_all_copy( 
-            const SequenceT& Input,
-            const RangeT& Search,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_all_copy( 
-                Input, 
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-        //! Erase all algorithm ( case insensitive )
-        /*!
-            Remove all the occurrences of the string from the input. 
-            The input sequence is modified in-place. Searching is case insensitive.
-
-            \param Input An input string
-            \param Search A substring to be searched for. 
-            \param Loc A locale used for case insensitive comparison
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void ierase_all( 
-            SequenceT& Input,
-            const RangeT& Search,
-            const std::locale& Loc=std::locale() )
-        {
-            ::boost::algorithm::find_format_all( 
-                Input, 
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::empty_formatter(Input) );
-        }
-
-//  erase_head --------------------------------------------------------------------//
-
-        //! Erase head algorithm
-        /*!
-            Remove the head from the input. The head is a prefix of a sequence of given size. 
-            If the sequence is shorter then required, the whole string is 
-            considered to be the head. The result is a modified copy of the input. 
-            It is returned as a sequence or copied to the output iterator.
-            
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param N Length of the head.
-                For N>=0, at most N characters are extracted.
-                For N<0, size(Input)-|N| characters are extracted.
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename RangeT>
-        inline OutputIteratorT erase_head_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            int N )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::head_finder(N),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-        //! Erase head algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT>
-        inline SequenceT erase_head_copy( 
-            const SequenceT& Input,
-            int N )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::head_finder(N),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-        //! Erase head algorithm
-        /*!
-            Remove the head from the input. The head is a prefix of a sequence of given size. 
-            If the sequence is shorter then required, the whole string is 
-            considered to be the head. The input sequence is modified in-place.
-
-            \param Input An input string
-            \param N Length of the head
-                For N>=0, at most N characters are extracted.
-                For N<0, size(Input)-|N| characters are extracted.
-        */
-        template<typename SequenceT>
-        inline void erase_head( 
-            SequenceT& Input,
-            int N )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::head_finder(N),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-//  erase_tail --------------------------------------------------------------------//
-
-        //! Erase tail algorithm
-        /*!
-            Remove the tail from the input. The tail is a suffix of a sequence of given size. 
-            If the sequence is shorter then required, the whole string is 
-            considered to be the tail. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param N Length of the tail.                 
-                For N>=0, at most N characters are extracted.
-                For N<0, size(Input)-|N| characters are extracted.
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-            
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename RangeT>
-        inline OutputIteratorT erase_tail_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            int N )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::tail_finder(N),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-        //! Erase tail algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT>
-        inline SequenceT erase_tail_copy( 
-            const SequenceT& Input,
-            int N )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::tail_finder(N),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-        //! Erase tail algorithm
-        /*!
-            Remove the tail from the input. The tail is a suffix of a sequence of given size. 
-            If the sequence is shorter then required, the whole string is
-            considered to be the tail. The input sequence is modified in-place.
-
-            \param Input An input string
-            \param N Length of the tail
-                For N>=0, at most N characters are extracted.
-                For N<0, size(Input)-|N| characters are extracted.
-        */
-        template<typename SequenceT>
-        inline void erase_tail( 
-            SequenceT& Input,
-            int N )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::tail_finder(N),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-    } // namespace algorithm
-
-    // pull names into the boost namespace
-    using algorithm::erase_range_copy;
-    using algorithm::erase_range;
-    using algorithm::erase_first_copy;
-    using algorithm::erase_first;
-    using algorithm::ierase_first_copy;
-    using algorithm::ierase_first;
-    using algorithm::erase_last_copy;
-    using algorithm::erase_last;
-    using algorithm::ierase_last_copy;
-    using algorithm::ierase_last;
-    using algorithm::erase_nth_copy;
-    using algorithm::erase_nth;
-    using algorithm::ierase_nth_copy;
-    using algorithm::ierase_nth;
-    using algorithm::erase_all_copy;
-    using algorithm::erase_all;
-    using algorithm::ierase_all_copy;
-    using algorithm::ierase_all;
-    using algorithm::erase_head_copy;
-    using algorithm::erase_head;
-    using algorithm::erase_tail_copy;
-    using algorithm::erase_tail;
-
-} // namespace boost
-
-
-#endif  // BOOST_ERASE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/find.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/find.hpp
deleted file mode 100644
index f2c2926..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/find.hpp
+++ /dev/null
@@ -1,334 +0,0 @@
-//  Boost string_algo library find.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FIND_HPP
-#define BOOST_STRING_FIND_HPP
-
-#include <boost/algorithm/string/config.hpp>
-
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/iterator.hpp>
-#include <boost/range/as_literal.hpp>
-
-#include <boost/algorithm/string/finder.hpp>
-#include <boost/algorithm/string/compare.hpp>
-#include <boost/algorithm/string/constants.hpp>
-
-/*! \file
-    Defines a set of find algorithms. The algorithms are searching
-    for a substring of the input. The result is given as an \c iterator_range
-    delimiting the substring.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  Generic find -----------------------------------------------//
-
-        //! Generic find algorithm
-        /*!
-            Search the input using the given finder.
-
-            \param Input A string which will be searched.
-            \param Finder Finder object used for searching.
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c RangeT::iterator or 
-                \c RangeT::const_iterator, depending on the constness of 
-                the input parameter.
-        */
-        template<typename RangeT, typename FinderT>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<RangeT>::type>
-        find( 
-            RangeT& Input, 
-            const FinderT& Finder)
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_iterator<RangeT>::type> lit_input(::boost::as_literal(Input));
-
-            return Finder(::boost::begin(lit_input),::boost::end(lit_input));
-        }
-
-//  find_first  -----------------------------------------------//
-
-        //! Find first algorithm
-        /*!
-            Search for the first occurrence of the substring in the input. 
-            
-            \param Input A string which will be searched.
-            \param Search A substring to be searched for.
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c RangeT::iterator or 
-                \c RangeT::const_iterator, depending on the constness of 
-                the input parameter.
-
-              \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<Range1T>::type>
-        find_first( 
-            Range1T& Input, 
-            const Range2T& Search)
-        {
-            return ::boost::algorithm::find(Input, ::boost::algorithm::first_finder(Search));
-        }
-
-        //! Find first algorithm ( case insensitive )
-        /*!
-            Search for the first occurrence of the substring in the input. 
-            Searching is case insensitive.
-            
-            \param Input A string which will be searched.
-            \param Search A substring to be searched for.
-            \param Loc A locale used for case insensitive comparison
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c Range1T::iterator or 
-                \c Range1T::const_iterator, depending on the constness of 
-                the input parameter.
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<Range1T>::type>
-        ifind_first( 
-            Range1T& Input, 
-            const Range2T& Search,
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::find(Input, ::boost::algorithm::first_finder(Search,is_iequal(Loc)));
-        }
-
-//  find_last  -----------------------------------------------//
-
-        //! Find last algorithm
-        /*!
-            Search for the last occurrence of the substring in the input. 
-            
-            \param Input A string which will be searched.
-            \param Search A substring to be searched for.
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c Range1T::iterator or 
-                \c Range1T::const_iterator, depending on the constness of 
-                the input parameter.
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<Range1T>::type>
-        find_last( 
-            Range1T& Input, 
-            const Range2T& Search)
-        {
-            return ::boost::algorithm::find(Input, ::boost::algorithm::last_finder(Search));
-        }
-
-        //! Find last algorithm ( case insensitive )
-        /*!
-            Search for the last match a string in the input. 
-            Searching is case insensitive.
-            
-            \param Input A string which will be searched.
-            \param Search A substring to be searched for.
-            \param Loc A locale used for case insensitive comparison
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c Range1T::iterator or 
-                \c Range1T::const_iterator, depending on the constness of 
-                the input parameter.
-        
-            \note This function provides the strong exception-safety guarantee    
-        */
-        template<typename Range1T, typename Range2T>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<Range1T>::type>
-        ifind_last( 
-            Range1T& Input, 
-            const Range2T& Search,
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::find(Input, ::boost::algorithm::last_finder(Search, is_iequal(Loc)));
-        }
-
-//  find_nth ----------------------------------------------------------------------//
-
-        //! Find n-th algorithm 
-        /*!
-            Search for the n-th (zero-indexed) occurrence of the substring in the 
-            input.         
-            
-            \param Input A string which will be searched.
-            \param Search A substring to be searched for.
-            \param Nth An index (zero-indexed) of the match to be found.
-                For negative N, the matches are counted from the end of string.
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c Range1T::iterator or 
-                \c Range1T::const_iterator, depending on the constness of 
-                the input parameter.
-        */
-        template<typename Range1T, typename Range2T>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<Range1T>::type>
-        find_nth( 
-            Range1T& Input, 
-            const Range2T& Search,
-            int Nth)
-        {
-            return ::boost::algorithm::find(Input, ::boost::algorithm::nth_finder(Search,Nth));
-        }
-
-        //! Find n-th algorithm ( case insensitive ).
-        /*!
-            Search for the n-th (zero-indexed) occurrence of the substring in the 
-            input. Searching is case insensitive.
-            
-            \param Input A string which will be searched.
-            \param Search A substring to be searched for.
-            \param Nth An index (zero-indexed) of the match to be found. 
-                For negative N, the matches are counted from the end of string.
-            \param Loc A locale used for case insensitive comparison
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c Range1T::iterator or 
-                \c Range1T::const_iterator, depending on the constness of 
-                the input parameter.
-
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<Range1T>::type>
-        ifind_nth( 
-            Range1T& Input, 
-            const Range2T& Search,
-            int Nth,
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::find(Input, ::boost::algorithm::nth_finder(Search,Nth,is_iequal(Loc)));
-        }
-
-//  find_head ----------------------------------------------------------------------//
-
-        //! Find head algorithm
-        /*!
-            Get the head of the input. Head is a prefix of the string of the 
-            given size. If the input is shorter then required, whole input is considered 
-            to be the head.
-
-            \param Input An input string
-            \param N Length of the head
-                For N>=0, at most N characters are extracted.
-                For N<0, at most size(Input)-|N| characters are extracted.
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c Range1T::iterator or 
-                \c Range1T::const_iterator, depending on the constness of 
-                the input parameter.
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename RangeT>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<RangeT>::type>
-        find_head( 
-            RangeT& Input, 
-            int N)
-        {
-            return ::boost::algorithm::find(Input, ::boost::algorithm::head_finder(N));
-        }
-
-//  find_tail ----------------------------------------------------------------------//
-
-        //! Find tail algorithm
-        /*!
-            Get the tail of the input. Tail is a suffix of the string of the 
-            given size. If the input is shorter then required, whole input is considered 
-            to be the tail.
-
-            \param Input An input string
-            \param N Length of the tail. 
-                For N>=0, at most N characters are extracted.
-                For N<0, at most size(Input)-|N| characters are extracted.
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c RangeT::iterator or 
-                \c RangeT::const_iterator, depending on the constness of 
-                the input parameter.
-
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename RangeT>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<RangeT>::type>
-        find_tail( 
-            RangeT& Input, 
-            int N)
-        {
-            return ::boost::algorithm::find(Input, ::boost::algorithm::tail_finder(N));
-        }
-
-//  find_token --------------------------------------------------------------------//
-
-        //! Find token algorithm
-        /*!
-            Look for a given token in the string. Token is a character that matches the
-            given predicate.
-            If the "token compress mode" is enabled, adjacent tokens are considered to be one match.
-            
-            \param Input A input string.
-            \param Pred A unary predicate to identify a token
-            \param eCompress Enable/Disable compressing of adjacent tokens
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c RangeT::iterator or 
-                \c RangeT::const_iterator, depending on the constness of 
-                the input parameter.
-        
-            \note This function provides the strong exception-safety guarantee    
-        */
-        template<typename RangeT, typename PredicateT>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<RangeT>::type>
-        find_token( 
-            RangeT& Input,
-            PredicateT Pred,
-            token_compress_mode_type eCompress=token_compress_off)
-        {
-            return ::boost::algorithm::find(Input, ::boost::algorithm::token_finder(Pred, eCompress));
-        }
-
-    } // namespace algorithm
-
-    // pull names to the boost namespace
-    using algorithm::find;
-    using algorithm::find_first;
-    using algorithm::ifind_first;
-    using algorithm::find_last;
-    using algorithm::ifind_last;
-    using algorithm::find_nth;
-    using algorithm::ifind_nth;
-    using algorithm::find_head;
-    using algorithm::find_tail;
-    using algorithm::find_token;
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_FIND_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/find_format.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/find_format.hpp
deleted file mode 100644
index 0e84a4e..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/find_format.hpp
+++ /dev/null
@@ -1,287 +0,0 @@
-//  Boost string_algo library find_format.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FIND_FORMAT_HPP
-#define BOOST_STRING_FIND_FORMAT_HPP
-
-#include <deque>
-#include <boost/detail/iterator.hpp>
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/const_iterator.hpp>
-#include <boost/range/as_literal.hpp>
-
-#include <boost/algorithm/string/concept.hpp>
-#include <boost/algorithm/string/detail/find_format.hpp>
-#include <boost/algorithm/string/detail/find_format_all.hpp>
-
-/*! \file
-    Defines generic replace algorithms. Each algorithm replaces
-    part(s) of the input. The part to be replaced is looked up using a Finder object.
-    Result of finding is then used by a Formatter object to generate the replacement.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-// generic replace  -----------------------------------------------------------------//
-
-        //! Generic replace algorithm
-        /*!
-            Use the Finder to search for a substring. Use the Formatter to format
-            this substring and replace it in the input.
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-    
-            \param Output An output iterator to which the result will be copied
-            \param Input An input sequence
-            \param Finder A Finder object used to search for a match to be replaced
-            \param Formatter A Formatter object used to format a match
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template< 
-            typename OutputIteratorT,
-            typename RangeT,
-            typename FinderT,
-            typename FormatterT>
-        inline OutputIteratorT find_format_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            FinderT Finder,
-            FormatterT Formatter )
-        {
-            // Concept check
-            BOOST_CONCEPT_ASSERT((
-                FinderConcept<
-                    FinderT,
-                    BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type>
-                ));
-            BOOST_CONCEPT_ASSERT((
-                FormatterConcept<
-                    FormatterT,
-                    FinderT,BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type>
-                ));
-
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> lit_input(::boost::as_literal(Input));
-
-            return detail::find_format_copy_impl(
-                Output,
-                lit_input,
-                Formatter,
-                Finder( ::boost::begin(lit_input), ::boost::end(lit_input) ) );
-        }
-
-        //! Generic replace algorithm
-        /*!
-            \overload
-        */
-        template< 
-            typename SequenceT, 
-            typename FinderT,
-            typename FormatterT>
-        inline SequenceT find_format_copy(
-            const SequenceT& Input,
-            FinderT Finder,
-            FormatterT Formatter )
-        {
-            // Concept check
-            BOOST_CONCEPT_ASSERT((
-                FinderConcept<
-                    FinderT,
-                    BOOST_STRING_TYPENAME range_const_iterator<SequenceT>::type>
-                ));
-            BOOST_CONCEPT_ASSERT((
-                FormatterConcept<
-                    FormatterT,
-                    FinderT,BOOST_STRING_TYPENAME range_const_iterator<SequenceT>::type>
-                ));
-
-            return detail::find_format_copy_impl(
-                Input,
-                Formatter,
-                Finder(::boost::begin(Input), ::boost::end(Input)));
-        }
-
-        //! Generic replace algorithm
-        /*!
-            Use the Finder to search for a substring. Use the Formatter to format
-            this substring and replace it in the input. The input is modified in-place.
-
-            \param Input An input sequence
-            \param Finder A Finder object used to search for a match to be replaced
-            \param Formatter A Formatter object used to format a match
-        */
-        template<
-            typename SequenceT,
-            typename FinderT,
-            typename FormatterT>
-        inline void find_format( 
-            SequenceT& Input,
-            FinderT Finder,
-            FormatterT Formatter)
-        {
-            // Concept check
-            BOOST_CONCEPT_ASSERT((
-                FinderConcept<
-                    FinderT,
-                    BOOST_STRING_TYPENAME range_const_iterator<SequenceT>::type>
-                ));
-            BOOST_CONCEPT_ASSERT(( 
-                FormatterConcept<
-                    FormatterT,
-                    FinderT,BOOST_STRING_TYPENAME range_const_iterator<SequenceT>::type>
-                ));
-
-            detail::find_format_impl(
-                Input,
-                Formatter,
-                Finder(::boost::begin(Input), ::boost::end(Input)));
-        }
-
-
-//  find_format_all generic ----------------------------------------------------------------//
-
-        //! Generic replace all algorithm
-        /*!
-            Use the Finder to search for a substring. Use the Formatter to format
-            this substring and replace it in the input. Repeat this for all matching
-            substrings.
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input sequence
-            \param Finder A Finder object used to search for a match to be replaced
-            \param Formatter A Formatter object used to format a match
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template< 
-            typename OutputIteratorT,
-            typename RangeT,
-            typename FinderT,
-            typename FormatterT>
-        inline OutputIteratorT find_format_all_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            FinderT Finder,
-            FormatterT Formatter)
-        {
-            // Concept check
-            BOOST_CONCEPT_ASSERT(( 
-                FinderConcept<
-                    FinderT,
-                    BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type>
-                ));
-            BOOST_CONCEPT_ASSERT(( 
-                FormatterConcept<
-                    FormatterT,
-                    FinderT,BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type>
-                ));
-
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> lit_input(::boost::as_literal(Input));
-
-            return detail::find_format_all_copy_impl(
-                Output,
-                lit_input,
-                Finder,
-                Formatter,
-                Finder(::boost::begin(lit_input), ::boost::end(lit_input)));
-        }
-
-        //! Generic replace all algorithm
-        /*!
-            \overload
-        */
-        template< 
-            typename SequenceT, 
-            typename FinderT,
-            typename FormatterT >
-        inline SequenceT find_format_all_copy(
-            const SequenceT& Input,
-            FinderT Finder,
-            FormatterT Formatter )
-        {
-            // Concept check
-            BOOST_CONCEPT_ASSERT((
-                FinderConcept<
-                    FinderT,
-                    BOOST_STRING_TYPENAME range_const_iterator<SequenceT>::type>
-                ));
-            BOOST_CONCEPT_ASSERT((
-                FormatterConcept<
-                    FormatterT,
-                    FinderT,BOOST_STRING_TYPENAME range_const_iterator<SequenceT>::type>
-                ));
-
-            return detail::find_format_all_copy_impl(
-                Input,
-                Finder,
-                Formatter,
-                Finder( ::boost::begin(Input), ::boost::end(Input) ) );
-        }
-
-        //! Generic replace all algorithm
-        /*!
-            Use the Finder to search for a substring. Use the Formatter to format
-            this substring and replace it in the input. Repeat this for all matching
-            substrings.The input is modified in-place.
-
-            \param Input An input sequence
-            \param Finder A Finder object used to search for a match to be replaced
-            \param Formatter A Formatter object used to format a match
-        */
-        template<
-            typename SequenceT,
-            typename FinderT,
-            typename FormatterT >
-        inline void find_format_all( 
-            SequenceT& Input,
-            FinderT Finder,
-            FormatterT Formatter )
-        {
-            // Concept check
-            BOOST_CONCEPT_ASSERT((
-                FinderConcept<
-                    FinderT,
-                    BOOST_STRING_TYPENAME range_const_iterator<SequenceT>::type>
-                ));
-            BOOST_CONCEPT_ASSERT((
-                FormatterConcept<
-                    FormatterT,
-                    FinderT,BOOST_STRING_TYPENAME range_const_iterator<SequenceT>::type>
-                ));
-
-            detail::find_format_all_impl(
-                Input,
-                Finder,
-                Formatter,
-                Finder(::boost::begin(Input), ::boost::end(Input)));
-
-        }
-
-    } // namespace algorithm
-
-    // pull the names to the boost namespace
-    using algorithm::find_format_copy;
-    using algorithm::find_format;
-    using algorithm::find_format_all_copy;
-    using algorithm::find_format_all;
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_FIND_FORMAT_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/find_iterator.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/find_iterator.hpp
deleted file mode 100644
index 5a52d92..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/find_iterator.hpp
+++ /dev/null
@@ -1,388 +0,0 @@
-//  Boost string_algo library find_iterator.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2004.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FIND_ITERATOR_HPP
-#define BOOST_STRING_FIND_ITERATOR_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/iterator/iterator_facade.hpp>
-#include <boost/iterator/iterator_categories.hpp>
-
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/iterator.hpp>
-#include <boost/range/as_literal.hpp>
-
-#include <boost/algorithm/string/detail/find_iterator.hpp>
-
-/*! \file
-    Defines find iterator classes. Find iterator repeatedly applies a Finder
-    to the specified input string to search for matches. Dereferencing
-    the iterator yields the current match or a range between the last and the current
-    match depending on the iterator used.
-*/
-
-namespace boost {
-    namespace algorithm { 
-
-//  find_iterator -----------------------------------------------//
-
-        //! find_iterator
-        /*!    
-            Find iterator encapsulates a Finder and allows
-            for incremental searching in a string.
-            Each increment moves the iterator to the next match.
-
-            Find iterator is a readable forward traversal iterator.
-
-            Dereferencing the iterator yields an iterator_range delimiting
-            the current match.
-        */
-        template<typename IteratorT>
-        class find_iterator : 
-            public iterator_facade<
-                find_iterator<IteratorT>,
-                const iterator_range<IteratorT>,
-                forward_traversal_tag >,
-            private detail::find_iterator_base<IteratorT>
-        {
-        private:
-            // facade support
-            friend class ::boost::iterator_core_access;
-
-        private:
-        // typedefs
-
-            typedef detail::find_iterator_base<IteratorT> base_type;
-            typedef BOOST_STRING_TYPENAME 
-                base_type::input_iterator_type input_iterator_type;
-            typedef BOOST_STRING_TYPENAME 
-                base_type::match_type match_type;
-
-        public:
-            //! Default constructor
-            /*!
-                Construct null iterator. All null iterators are equal.
-
-                \post eof()==true
-            */
-            find_iterator() {}
-
-            //! Copy constructor
-            /*!
-                Construct a copy of the find_iterator
-            */
-            find_iterator( const find_iterator& Other ) :
-                base_type(Other),
-                m_Match(Other.m_Match),
-                m_End(Other.m_End) {}
-
-            //! Constructor
-            /*!
-                Construct new find_iterator for a given finder
-                and a range.
-            */
-            template<typename FinderT>
-            find_iterator(
-                    IteratorT Begin,
-                    IteratorT End,
-                    FinderT Finder ) :
-                detail::find_iterator_base<IteratorT>(Finder,0),
-                m_Match(Begin,Begin),
-                m_End(End)
-            {
-                increment();
-            }
-
-            //! Constructor
-            /*!
-                Construct new find_iterator for a given finder
-                and a range.
-            */
-            template<typename FinderT, typename RangeT>
-            find_iterator(
-                    RangeT& Col,
-                    FinderT Finder ) :
-                detail::find_iterator_base<IteratorT>(Finder,0)
-            {
-                iterator_range<BOOST_STRING_TYPENAME range_iterator<RangeT>::type> lit_col(::boost::as_literal(Col));
-                m_Match=::boost::make_iterator_range(::boost::begin(lit_col), ::boost::begin(lit_col));
-                m_End=::boost::end(lit_col);
-
-                increment();
-            }
-
-        private:
-        // iterator operations
-
-            // dereference
-            const match_type& dereference() const
-            {
-                return m_Match;
-            }
-
-            // increment
-            void increment()
-            {
-                m_Match=this->do_find(m_Match.end(),m_End);
-            }
-
-            // comparison
-            bool equal( const find_iterator& Other ) const
-            {
-                bool bEof=eof();
-                bool bOtherEof=Other.eof();
-
-                return bEof || bOtherEof ? bEof==bOtherEof :
-                    (
-                        m_Match==Other.m_Match &&
-                        m_End==Other.m_End 
-                    );
-            }
-
-        public:
-        // operations
-
-            //! Eof check
-            /*!
-                Check the eof condition. Eof condition means that
-                there is nothing more to be searched i.e. find_iterator
-                is after the last match.
-            */
-            bool eof() const
-            {
-                return 
-                    this->is_null() || 
-                    ( 
-                        m_Match.begin() == m_End &&
-                        m_Match.end() == m_End
-                    );
-            }
-
-        private:
-        // Attributes
-            match_type m_Match;
-            input_iterator_type m_End;
-        };
-
-        //! find iterator construction helper
-        /*!
-         *    Construct a find iterator to iterate through the specified string
-         */
-        template<typename RangeT, typename FinderT>
-        inline find_iterator< 
-            BOOST_STRING_TYPENAME range_iterator<RangeT>::type>
-        make_find_iterator(
-            RangeT& Collection,
-            FinderT Finder)
-        {
-            return find_iterator<BOOST_STRING_TYPENAME range_iterator<RangeT>::type>(
-                Collection, Finder);
-        }
-
-//  split iterator -----------------------------------------------//
-
-        //! split_iterator
-        /*!    
-            Split iterator encapsulates a Finder and allows
-            for incremental searching in a string.
-            Unlike the find iterator, split iterator iterates
-            through gaps between matches.
-
-            Find iterator is a readable forward traversal iterator.
-
-            Dereferencing the iterator yields an iterator_range delimiting
-            the current match.
-        */
-        template<typename IteratorT>
-        class split_iterator : 
-            public iterator_facade<
-                split_iterator<IteratorT>,
-                const iterator_range<IteratorT>,
-                forward_traversal_tag >,
-            private detail::find_iterator_base<IteratorT>
-        {
-        private:
-            // facade support
-            friend class ::boost::iterator_core_access;
-
-        private:
-        // typedefs
-
-            typedef detail::find_iterator_base<IteratorT> base_type;
-            typedef BOOST_STRING_TYPENAME 
-                base_type::input_iterator_type input_iterator_type;
-            typedef BOOST_STRING_TYPENAME 
-                base_type::match_type match_type;
-
-        public:
-            //! Default constructor
-            /*!
-                Construct null iterator. All null iterators are equal.
-    
-                \post eof()==true
-            */
-            split_iterator() :
-                m_Next(),
-                m_End(),
-                m_bEof(true)
-            {}
-
-            //! Copy constructor
-            /*!
-                Construct a copy of the split_iterator
-            */
-            split_iterator( const split_iterator& Other ) :
-                base_type(Other),
-                m_Match(Other.m_Match),
-                m_Next(Other.m_Next),
-                m_End(Other.m_End),
-                m_bEof(Other.m_bEof)
-            {}
-
-            //! Constructor
-            /*!
-                Construct new split_iterator for a given finder
-                and a range.
-            */
-            template<typename FinderT>
-            split_iterator(
-                    IteratorT Begin,
-                    IteratorT End,
-                    FinderT Finder ) :
-                detail::find_iterator_base<IteratorT>(Finder,0),
-                m_Match(Begin,Begin),
-                m_Next(Begin),
-                m_End(End),
-                m_bEof(false)
-            {
-                // force the correct behavior for empty sequences and yield at least one token
-                if(Begin!=End)
-                {
-                    increment();
-                }
-            }
-            //! Constructor
-            /*!
-                Construct new split_iterator for a given finder
-                and a collection.
-            */
-            template<typename FinderT, typename RangeT>
-            split_iterator(
-                    RangeT& Col,
-                    FinderT Finder ) :
-                detail::find_iterator_base<IteratorT>(Finder,0),
-                m_bEof(false)
-            {
-                iterator_range<BOOST_STRING_TYPENAME range_iterator<RangeT>::type> lit_col(::boost::as_literal(Col));
-                m_Match=make_iterator_range(::boost::begin(lit_col), ::boost::begin(lit_col));
-                m_Next=::boost::begin(lit_col);
-                m_End=::boost::end(lit_col);
-
-                // force the correct behavior for empty sequences and yield at least one token
-                if(m_Next!=m_End)
-                {
-                    increment();
-                }
-            }
-
-
-        private:
-        // iterator operations
-
-            // dereference
-            const match_type& dereference() const
-            {
-                return m_Match;
-            }
-
-            // increment
-            void increment()
-            {
-                match_type FindMatch=this->do_find( m_Next, m_End );
-
-                if(FindMatch.begin()==m_End && FindMatch.end()==m_End)
-                {
-                    if(m_Match.end()==m_End)
-                    {
-                        // Mark iterator as eof
-                        m_bEof=true;
-                    }
-                }
-
-                m_Match=match_type( m_Next, FindMatch.begin() );
-                m_Next=FindMatch.end();
-            }
-
-            // comparison
-            bool equal( const split_iterator& Other ) const
-            {
-                bool bEof=eof();
-                bool bOtherEof=Other.eof();
-
-                return bEof || bOtherEof ? bEof==bOtherEof :
-                    (
-                        m_Match==Other.m_Match &&
-                        m_Next==Other.m_Next &&
-                        m_End==Other.m_End
-                    );
-            }
-
-        public:
-        // operations
-
-            //! Eof check
-            /*!
-                Check the eof condition. Eof condition means that
-                there is nothing more to be searched i.e. find_iterator
-                is after the last match.
-            */
-            bool eof() const
-            {
-                return this->is_null() || m_bEof;
-            }
-
-        private:
-        // Attributes
-            match_type m_Match;
-            input_iterator_type m_Next;
-            input_iterator_type m_End;
-            bool m_bEof;
-        };
-
-        //! split iterator construction helper
-        /*!
-         *    Construct a split iterator to iterate through the specified collection
-         */
-        template<typename RangeT, typename FinderT>
-        inline split_iterator< 
-            BOOST_STRING_TYPENAME range_iterator<RangeT>::type>
-        make_split_iterator(
-            RangeT& Collection,
-            FinderT Finder)
-        {
-            return split_iterator<BOOST_STRING_TYPENAME range_iterator<RangeT>::type>(
-                Collection, Finder);
-        }
-
-
-    } // namespace algorithm
-
-    // pull names to the boost namespace
-    using algorithm::find_iterator;
-    using algorithm::make_find_iterator;
-    using algorithm::split_iterator;
-    using algorithm::make_split_iterator;
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_FIND_ITERATOR_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/finder.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/finder.hpp
deleted file mode 100644
index 61f6e41..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/finder.hpp
+++ /dev/null
@@ -1,266 +0,0 @@
-//  Boost string_algo library finder.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2006.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FINDER_HPP
-#define BOOST_STRING_FINDER_HPP
-
-#include <boost/algorithm/string/config.hpp>
-
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/iterator.hpp>
-#include <boost/range/const_iterator.hpp>
-
-#include <boost/algorithm/string/constants.hpp>
-#include <boost/algorithm/string/detail/finder.hpp>
-#include <boost/algorithm/string/compare.hpp>
-
-/*! \file
-    Defines Finder generators. Finder object is a functor which is able to 
-    find a substring matching a specific criteria in the input.
-    Finders are used as a pluggable components for replace, find 
-    and split facilities. This header contains generator functions 
-    for finders provided in this library.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  Finder generators ------------------------------------------//
-        
-        //! "First" finder 
-        /*!
-            Construct the \c first_finder. The finder searches for the first
-            occurrence of the string in a given input.
-            The result is given as an \c iterator_range delimiting the match.
-
-            \param Search A substring to be searched for.
-            \return An instance of the \c first_finder object
-        */
-        template<typename RangeT>
-        inline detail::first_finderF<
-            BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type,
-            is_equal>
-        first_finder( const RangeT& Search )
-        {
-            return 
-                detail::first_finderF<
-                    BOOST_STRING_TYPENAME 
-                        range_const_iterator<RangeT>::type,
-                        is_equal>( ::boost::as_literal(Search), is_equal() ) ;
-        }
-
-        //! "First" finder
-        /*!
-            \overload
-        */
-        template<typename RangeT,typename PredicateT>
-        inline detail::first_finderF<
-            BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type,
-            PredicateT>
-        first_finder( 
-            const RangeT& Search, PredicateT Comp )
-        {
-            return 
-                detail::first_finderF<
-                    BOOST_STRING_TYPENAME 
-                        range_const_iterator<RangeT>::type,
-                    PredicateT>( ::boost::as_literal(Search), Comp );
-        }
-
-        //! "Last" finder
-        /*!
-            Construct the \c last_finder. The finder searches for the last
-            occurrence of the string in a given input.
-            The result is given as an \c iterator_range delimiting the match.
-
-            \param Search A substring to be searched for.
-            \return An instance of the \c last_finder object
-        */
-        template<typename RangeT>
-        inline detail::last_finderF<
-            BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type,
-            is_equal>
-        last_finder( const RangeT& Search )
-        {
-            return 
-                detail::last_finderF<
-                    BOOST_STRING_TYPENAME 
-                        range_const_iterator<RangeT>::type,
-                    is_equal>( ::boost::as_literal(Search), is_equal() );
-        }
-        //! "Last" finder
-        /*!
-            \overload
-        */
-        template<typename RangeT, typename PredicateT>
-        inline detail::last_finderF<
-            BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type,
-            PredicateT>
-        last_finder( const RangeT& Search, PredicateT Comp )
-        {
-            return 
-                detail::last_finderF<
-                    BOOST_STRING_TYPENAME 
-                        range_const_iterator<RangeT>::type,
-                    PredicateT>( ::boost::as_literal(Search), Comp ) ;
-        }
-
-        //! "Nth" finder
-        /*!
-            Construct the \c nth_finder. The finder searches for the n-th (zero-indexed)
-            occurrence of the string in a given input.
-            The result is given as an \c iterator_range delimiting the match.
-
-            \param Search A substring to be searched for.
-            \param Nth An index of the match to be find
-            \return An instance of the \c nth_finder object
-        */
-        template<typename RangeT>
-        inline detail::nth_finderF<
-            BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type,
-            is_equal>
-        nth_finder( 
-            const RangeT& Search, 
-            int Nth)
-        {
-            return 
-                detail::nth_finderF<
-                    BOOST_STRING_TYPENAME 
-                        range_const_iterator<RangeT>::type,
-                    is_equal>( ::boost::as_literal(Search), Nth, is_equal() ) ;
-        }
-        //! "Nth" finder
-        /*!
-            \overload
-        */
-        template<typename RangeT, typename PredicateT>
-        inline detail::nth_finderF<
-            BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type,
-            PredicateT>
-        nth_finder( 
-            const RangeT& Search, 
-            int Nth, 
-            PredicateT Comp )
-        {
-            return 
-                detail::nth_finderF<
-                    BOOST_STRING_TYPENAME 
-                        range_const_iterator<RangeT>::type,
-                    PredicateT>( ::boost::as_literal(Search), Nth, Comp );
-        }
-
-        //! "Head" finder
-        /*!
-            Construct the \c head_finder. The finder returns a head of a given
-            input. The head is a prefix of a string up to n elements in
-            size. If an input has less then n elements, whole input is 
-            considered a head.
-            The result is given as an \c iterator_range delimiting the match.
-
-            \param N The size of the head
-            \return An instance of the \c head_finder object
-        */
-        inline detail::head_finderF
-        head_finder( int N )
-        {
-            return detail::head_finderF(N);
-        }
-        
-        //! "Tail" finder
-        /*!
-            Construct the \c tail_finder. The finder returns a tail of a given
-            input. The tail is a suffix of a string up to n elements in
-            size. If an input has less then n elements, whole input is 
-            considered a head.
-            The result is given as an \c iterator_range delimiting the match.
-
-            \param N The size of the head
-            \return An instance of the \c tail_finder object
-        */
-        inline detail::tail_finderF
-        tail_finder( int N )
-        {
-            return detail::tail_finderF(N);
-        }
-
-        //! "Token" finder
-        /*!
-            Construct the \c token_finder. The finder searches for a token 
-            specified by a predicate. It is similar to std::find_if 
-            algorithm, with an exception that it return a range of
-            instead of a single iterator.
-
-            If "compress token mode" is enabled, adjacent matching tokens are 
-            concatenated into one match. Thus the finder can be used to 
-            search for continuous segments of characters satisfying the 
-            given predicate.
-
-            The result is given as an \c iterator_range delimiting the match.
-
-            \param Pred An element selection predicate
-            \param eCompress Compress flag
-            \return An instance of the \c token_finder object
-        */
-        template< typename PredicateT >
-        inline detail::token_finderF<PredicateT>
-        token_finder( 
-            PredicateT Pred, 
-            token_compress_mode_type eCompress=token_compress_off )
-        {
-            return detail::token_finderF<PredicateT>( Pred, eCompress );
-        }
-
-        //! "Range" finder
-        /*!
-            Construct the \c range_finder. The finder does not perform 
-            any operation. It simply returns the given range for 
-            any input. 
-
-            \param Begin Beginning of the range
-            \param End End of the range
-            \return An instance of the \c range_finger object
-        */
-        template< typename ForwardIteratorT >
-        inline detail::range_finderF<ForwardIteratorT>
-        range_finder(
-            ForwardIteratorT Begin,
-            ForwardIteratorT End )
-        {
-            return detail::range_finderF<ForwardIteratorT>( Begin, End );
-        }
-
-        //! "Range" finder
-        /*!       
-            \overload
-        */
-        template< typename ForwardIteratorT >
-        inline detail::range_finderF<ForwardIteratorT>
-        range_finder( iterator_range<ForwardIteratorT> Range )
-        {
-            return detail::range_finderF<ForwardIteratorT>( Range );
-        }
-
-    } // namespace algorithm
-
-    // pull the names to the boost namespace
-    using algorithm::first_finder;
-    using algorithm::last_finder;
-    using algorithm::nth_finder;
-    using algorithm::head_finder;
-    using algorithm::tail_finder;
-    using algorithm::token_finder;
-    using algorithm::range_finder;
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_FINDER_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/formatter.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/formatter.hpp
deleted file mode 100644
index de8681b..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/formatter.hpp
+++ /dev/null
@@ -1,120 +0,0 @@
-//  Boost string_algo library formatter.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_FORMATTER_HPP
-#define BOOST_STRING_FORMATTER_HPP
-
-#include <boost/detail/iterator.hpp>
-#include <boost/range/value_type.hpp>
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/as_literal.hpp>
-
-#include <boost/algorithm/string/detail/formatter.hpp>
-
-/*! \file
-    Defines Formatter generators. Formatter is a functor which formats
-    a string according to given parameters. A Formatter works
-    in conjunction with a Finder. A Finder can provide additional information
-    for a specific Formatter. An example of such a cooperation is regex_finder
-    and regex_formatter.
-
-    Formatters are used as pluggable components for replace facilities. 
-    This header contains generator functions for the Formatters provided in this library.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-// generic formatters  ---------------------------------------------------------------//
-
-        //! Constant formatter
-        /*!
-            Constructs a \c const_formatter. Const formatter always returns
-            the same value, regardless of the parameter.
-
-            \param Format A predefined value used as a result for formatting
-            \return An instance of the \c const_formatter object.
-        */
-        template<typename RangeT>
-        inline detail::const_formatF<
-            iterator_range<
-                BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> >
-        const_formatter(const RangeT& Format)
-        {
-            return detail::const_formatF<
-                iterator_range<
-                    BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> >(::boost::as_literal(Format));
-        }
-
-        //! Identity formatter
-        /*!
-            Constructs an \c identity_formatter. Identity formatter always returns
-            the parameter.
-
-            \return An instance of the \c identity_formatter object.
-        */
-        template<typename RangeT>
-        inline detail::identity_formatF<
-            iterator_range<
-                BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> >
-        identity_formatter()
-        {
-            return detail::identity_formatF<
-                iterator_range<
-                    BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> >();
-        }
-
-        //! Empty formatter
-        /*!
-            Constructs an \c empty_formatter. Empty formatter always returns an empty
-            sequence. 
-
-            \param Input container used to select a correct value_type for the
-                         resulting empty_container<>.
-            \return An instance of the \c empty_formatter object.
-        */
-        template<typename RangeT>
-        inline detail::empty_formatF< 
-            BOOST_STRING_TYPENAME range_value<RangeT>::type>
-        empty_formatter(const RangeT&)
-        {
-            return detail::empty_formatF<
-                BOOST_STRING_TYPENAME range_value<RangeT>::type>();
-        }
-
-        //! Empty formatter
-        /*!
-            Constructs a \c dissect_formatter. Dissect formatter uses a specified finder
-            to extract a portion of the formatted sequence. The first finder's match is returned 
-            as a result
-
-            \param Finder a finder used to select a portion of the formatted sequence
-            \return An instance of the \c dissect_formatter object.
-        */
-        template<typename FinderT>
-        inline detail::dissect_formatF< FinderT >
-        dissect_formatter(const FinderT& Finder)
-        {
-            return detail::dissect_formatF<FinderT>(Finder);
-        }
-
-
-    } // namespace algorithm
-
-    // pull the names to the boost namespace
-    using algorithm::const_formatter;
-    using algorithm::identity_formatter;
-    using algorithm::empty_formatter;
-    using algorithm::dissect_formatter;
-
-} // namespace boost
-
-
-#endif  // BOOST_FORMATTER_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/iter_find.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/iter_find.hpp
deleted file mode 100644
index 10424ab..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/iter_find.hpp
+++ /dev/null
@@ -1,193 +0,0 @@
-//  Boost string_algo library iter_find.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_ITER_FIND_HPP
-#define BOOST_STRING_ITER_FIND_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <algorithm>
-#include <iterator>
-#include <boost/iterator/transform_iterator.hpp>
-
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/iterator.hpp>
-#include <boost/range/value_type.hpp>
-#include <boost/range/as_literal.hpp>
-
-#include <boost/algorithm/string/concept.hpp>
-#include <boost/algorithm/string/find_iterator.hpp>
-#include <boost/algorithm/string/detail/util.hpp>
-
-/*! \file
-    Defines generic split algorithms. Split algorithms can be 
-    used to divide a sequence into several part according 
-    to a given criteria. Result is given as a 'container 
-    of containers' where elements are copies or references 
-    to extracted parts.
-
-    There are two algorithms provided. One iterates over matching
-    substrings, the other one over the gaps between these matches.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  iterate find ---------------------------------------------------//
-
-        //! Iter find algorithm
-        /*!
-            This algorithm executes a given finder in iteration on the input,
-            until the end of input is reached, or no match is found.
-            Iteration is done using built-in find_iterator, so the real 
-            searching is performed only when needed.
-            In each iteration new match is found and added to the result.
-
-            \param Result A 'container container' to contain the result of search.
-                Both outer and inner container must have constructor taking a pair
-                of iterators as an argument.
-                Typical type of the result is 
-                    \c std::vector<boost::iterator_range<iterator>>
-                (each element of such a vector will container a range delimiting 
-                a match).
-            \param Input A container which will be searched.
-            \param Finder A Finder object used for searching
-            \return A reference to the result
-
-            \note Prior content of the result will be overwritten.
-        */
-        template< 
-            typename SequenceSequenceT,
-            typename RangeT,
-            typename FinderT >
-        inline SequenceSequenceT&
-        iter_find(
-            SequenceSequenceT& Result,
-            RangeT& Input,
-            FinderT Finder )
-        {
-            BOOST_CONCEPT_ASSERT((
-                FinderConcept<
-                    FinderT,
-                    BOOST_STRING_TYPENAME range_iterator<RangeT>::type>
-                ));
-
-            iterator_range<BOOST_STRING_TYPENAME range_iterator<RangeT>::type> lit_input(::boost::as_literal(Input));
-
-            typedef BOOST_STRING_TYPENAME 
-                range_iterator<RangeT>::type input_iterator_type;
-            typedef find_iterator<input_iterator_type> find_iterator_type;
-            typedef detail::copy_iterator_rangeF<
-                BOOST_STRING_TYPENAME 
-                    range_value<SequenceSequenceT>::type,
-                input_iterator_type> copy_range_type;
-            
-            input_iterator_type InputEnd=::boost::end(lit_input);
-
-            typedef transform_iterator<copy_range_type, find_iterator_type>
-                transform_iter_type;
-    
-            transform_iter_type itBegin=
-                ::boost::make_transform_iterator( 
-                    find_iterator_type( ::boost::begin(lit_input), InputEnd, Finder ),
-                    copy_range_type());
-            
-            transform_iter_type itEnd=
-                ::boost::make_transform_iterator( 
-                    find_iterator_type(),
-                    copy_range_type());
-
-            SequenceSequenceT Tmp(itBegin, itEnd);
-                        
-            Result.swap(Tmp);
-            return Result;
-        }
-
-//  iterate split ---------------------------------------------------//
-
-        //! Split find algorithm
-        /*!
-            This algorithm executes a given finder in iteration on the input,
-            until the end of input is reached, or no match is found.
-            Iteration is done using built-in find_iterator, so the real 
-            searching is performed only when needed.
-            Each match is used as a separator of segments. These segments are then
-            returned in the result.
-
-            \param Result A 'container container' to contain the result of search.
-                Both outer and inner container must have constructor taking a pair
-                of iterators as an argument.
-                Typical type of the result is 
-                    \c std::vector<boost::iterator_range<iterator>>
-                (each element of such a vector will container a range delimiting 
-                a match).
-            \param Input A container which will be searched.
-            \param Finder A finder object used for searching
-            \return A reference to the result
-
-            \note Prior content of the result will be overwritten.
-        */
-        template< 
-            typename SequenceSequenceT,
-            typename RangeT,
-            typename FinderT >
-        inline SequenceSequenceT&
-        iter_split(
-            SequenceSequenceT& Result,
-            RangeT& Input,
-            FinderT Finder )
-        {
-            BOOST_CONCEPT_ASSERT((
-                FinderConcept<FinderT,
-                BOOST_STRING_TYPENAME range_iterator<RangeT>::type>
-                ));
-
-            iterator_range<BOOST_STRING_TYPENAME range_iterator<RangeT>::type> lit_input(::boost::as_literal(Input));
-
-            typedef BOOST_STRING_TYPENAME 
-                range_iterator<RangeT>::type input_iterator_type;
-            typedef split_iterator<input_iterator_type> find_iterator_type;
-            typedef detail::copy_iterator_rangeF<
-                BOOST_STRING_TYPENAME 
-                    range_value<SequenceSequenceT>::type,
-                input_iterator_type> copy_range_type;
-            
-            input_iterator_type InputEnd=::boost::end(lit_input);
-
-            typedef transform_iterator<copy_range_type, find_iterator_type>
-                transform_iter_type;
-    
-            transform_iter_type itBegin=
-                ::boost::make_transform_iterator( 
-                    find_iterator_type( ::boost::begin(lit_input), InputEnd, Finder ),
-                    copy_range_type() );
-
-            transform_iter_type itEnd=
-                ::boost::make_transform_iterator( 
-                    find_iterator_type(),
-                    copy_range_type() );
-            
-            SequenceSequenceT Tmp(itBegin, itEnd);
-
-            Result.swap(Tmp);
-            return Result;
-        }
-
-    } // namespace algorithm
-
-    // pull names to the boost namespace
-    using algorithm::iter_find;
-    using algorithm::iter_split;
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_ITER_FIND_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/join.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/join.hpp
deleted file mode 100644
index b871eb4..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/join.hpp
+++ /dev/null
@@ -1,145 +0,0 @@
-//  Boost string_algo library join.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2006.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_JOIN_HPP
-#define BOOST_STRING_JOIN_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/algorithm/string/detail/sequence.hpp>
-#include <boost/range/value_type.hpp>
-#include <boost/range/as_literal.hpp>
-
-/*! \file
-    Defines join algorithm. 
-
-    Join algorithm is a counterpart to split algorithms.
-    It joins strings from a 'list' by adding user defined separator.
-    Additionally there is a version that allows simple filtering
-    by providing a predicate.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  join --------------------------------------------------------------//
-
-        //! Join algorithm
-        /*!
-            This algorithm joins all strings in a 'list' into one long string.
-            Segments are concatenated by given separator.
-
-            \param Input A container that holds the input strings. It must be a container-of-containers.
-            \param Separator A string that will separate the joined segments.
-            \return Concatenated string.
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template< typename SequenceSequenceT, typename Range1T>
-        inline typename range_value<SequenceSequenceT>::type 
-        join(
-            const SequenceSequenceT& Input,
-            const Range1T& Separator)
-        {
-            // Define working types
-            typedef typename range_value<SequenceSequenceT>::type ResultT;
-            typedef typename range_const_iterator<SequenceSequenceT>::type InputIteratorT;
-
-            // Parse input
-            InputIteratorT itBegin=::boost::begin(Input);
-            InputIteratorT itEnd=::boost::end(Input);
-
-            // Construct container to hold the result
-            ResultT Result;
-            
-            // Append first element
-            if(itBegin!=itEnd)
-            {
-                detail::insert(Result, ::boost::end(Result), *itBegin);
-                ++itBegin;
-            }
-
-            for(;itBegin!=itEnd; ++itBegin)
-            {
-                // Add separator
-                detail::insert(Result, ::boost::end(Result), ::boost::as_literal(Separator));
-                // Add element
-                detail::insert(Result, ::boost::end(Result), *itBegin);
-            }
-
-            return Result;
-        }
-
-// join_if ----------------------------------------------------------//
-
-        //! Conditional join algorithm
-        /*!
-            This algorithm joins all strings in a 'list' into one long string.
-            Segments are concatenated by given separator. Only segments that
-            satisfy the predicate will be added to the result.
-
-            \param Input A container that holds the input strings. It must be a container-of-containers.
-            \param Separator A string that will separate the joined segments.
-            \param Pred A segment selection predicate
-            \return Concatenated string.
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template< typename SequenceSequenceT, typename Range1T, typename PredicateT>
-        inline typename range_value<SequenceSequenceT>::type 
-        join_if(
-            const SequenceSequenceT& Input,
-            const Range1T& Separator,
-            PredicateT Pred)
-        {
-            // Define working types
-            typedef typename range_value<SequenceSequenceT>::type ResultT;
-            typedef typename range_const_iterator<SequenceSequenceT>::type InputIteratorT;
-
-            // Parse input
-            InputIteratorT itBegin=::boost::begin(Input);
-            InputIteratorT itEnd=::boost::end(Input);
-
-            // Construct container to hold the result
-            ResultT Result;
-
-            // Roll to the first element that will be added
-            while(itBegin!=itEnd && !Pred(*itBegin)) ++itBegin;
-            // Add this element
-            if(itBegin!=itEnd)
-            {
-                detail::insert(Result, ::boost::end(Result), *itBegin);
-                ++itBegin;
-            }
-
-            for(;itBegin!=itEnd; ++itBegin)
-            {
-                if(Pred(*itBegin))
-                {
-                    // Add separator
-                    detail::insert(Result, ::boost::end(Result), ::boost::as_literal(Separator));
-                    // Add element
-                    detail::insert(Result, ::boost::end(Result), *itBegin);
-                }
-            }
-
-            return Result;
-        }
-
-    } // namespace algorithm
-
-    // pull names to the boost namespace
-    using algorithm::join;
-    using algorithm::join_if;
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_JOIN_HPP
-
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/predicate.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/predicate.hpp
deleted file mode 100644
index 0879829..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/predicate.hpp
+++ /dev/null
@@ -1,475 +0,0 @@
-//  Boost string_algo library predicate.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_PREDICATE_HPP
-#define BOOST_STRING_PREDICATE_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/iterator.hpp>
-#include <boost/range/const_iterator.hpp>
-#include <boost/range/as_literal.hpp>
-#include <boost/range/iterator_range_core.hpp>
-
-#include <boost/algorithm/string/compare.hpp>
-#include <boost/algorithm/string/find.hpp>
-#include <boost/algorithm/string/detail/predicate.hpp>
-
-/*! \file boost/algorithm/string/predicate.hpp
-    Defines string-related predicates. 
-    The predicates determine whether a substring is contained in the input string 
-    under various conditions: a string starts with the substring, ends with the 
-    substring, simply contains the substring or if both strings are equal.
-    Additionaly the algorithm \c all() checks all elements of a container to satisfy a 
-    condition.
-
-    All predicates provide the strong exception guarantee.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  starts_with predicate  -----------------------------------------------//
-
-        //! 'Starts with' predicate
-        /*!
-            This predicate holds when the test string is a prefix of the Input.
-            In other words, if the input starts with the test.
-            When the optional predicate is specified, it is used for character-wise
-            comparison.
-
-            \param Input An input sequence
-            \param Test A test sequence
-            \param Comp An element comparison predicate
-            \return The result of the test
-
-              \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T, typename PredicateT>
-            inline bool starts_with( 
-            const Range1T& Input, 
-            const Range2T& Test,
-            PredicateT Comp)
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<Range1T>::type> lit_input(::boost::as_literal(Input));
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<Range2T>::type> lit_test(::boost::as_literal(Test));
-
-            typedef BOOST_STRING_TYPENAME 
-                range_const_iterator<Range1T>::type Iterator1T;
-            typedef BOOST_STRING_TYPENAME 
-                range_const_iterator<Range2T>::type Iterator2T;
-
-            Iterator1T InputEnd=::boost::end(lit_input);
-            Iterator2T TestEnd=::boost::end(lit_test);
-
-            Iterator1T it=::boost::begin(lit_input);
-            Iterator2T pit=::boost::begin(lit_test);
-            for(;
-                it!=InputEnd && pit!=TestEnd;
-                ++it,++pit)
-            {
-                if( !(Comp(*it,*pit)) )
-                    return false;
-            }
-
-            return pit==TestEnd;
-        }
-
-        //! 'Starts with' predicate
-        /*!
-            \overload
-        */
-        template<typename Range1T, typename Range2T>
-        inline bool starts_with( 
-            const Range1T& Input, 
-            const Range2T& Test)
-        {
-            return ::boost::algorithm::starts_with(Input, Test, is_equal());
-        }
-
-        //! 'Starts with' predicate ( case insensitive )
-        /*!
-            This predicate holds when the test string is a prefix of the Input.
-            In other words, if the input starts with the test.
-            Elements are compared case insensitively.
-
-            \param Input An input sequence
-            \param Test A test sequence
-            \param Loc A locale used for case insensitive comparison
-            \return The result of the test
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T>
-        inline bool istarts_with( 
-            const Range1T& Input, 
-            const Range2T& Test,
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::starts_with(Input, Test, is_iequal(Loc));
-        }
-
-
-//  ends_with predicate  -----------------------------------------------//
-
-        //! 'Ends with' predicate
-        /*!
-            This predicate holds when the test string is a suffix of the Input.
-            In other words, if the input ends with the test.
-            When the optional predicate is specified, it is used for character-wise
-            comparison.
-
-
-            \param Input An input sequence
-            \param Test A test sequence
-            \param Comp An element comparison predicate
-            \return The result of the test
-
-              \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T, typename PredicateT>
-        inline bool ends_with( 
-            const Range1T& Input, 
-            const Range2T& Test,
-            PredicateT Comp)
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<Range1T>::type> lit_input(::boost::as_literal(Input));
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<Range2T>::type> lit_test(::boost::as_literal(Test));
-
-            typedef BOOST_STRING_TYPENAME 
-                range_const_iterator<Range1T>::type Iterator1T;
-            typedef BOOST_STRING_TYPENAME boost::detail::
-                iterator_traits<Iterator1T>::iterator_category category;
-
-            return detail::
-                ends_with_iter_select( 
-                    ::boost::begin(lit_input), 
-                    ::boost::end(lit_input), 
-                    ::boost::begin(lit_test), 
-                    ::boost::end(lit_test), 
-                    Comp,
-                    category());
-        }
-
-
-        //! 'Ends with' predicate
-        /*!
-            \overload
-        */
-        template<typename Range1T, typename Range2T>
-        inline bool ends_with( 
-            const Range1T& Input, 
-            const Range2T& Test)
-        {
-            return ::boost::algorithm::ends_with(Input, Test, is_equal());
-        }
-
-        //! 'Ends with' predicate ( case insensitive )
-        /*!
-            This predicate holds when the test container is a suffix of the Input.
-            In other words, if the input ends with the test.
-            Elements are compared case insensitively.
-
-            \param Input An input sequence
-            \param Test A test sequence
-            \param Loc A locale used for case insensitive comparison
-            \return The result of the test
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T>
-        inline bool iends_with( 
-            const Range1T& Input, 
-            const Range2T& Test,
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::ends_with(Input, Test, is_iequal(Loc));
-        }
-
-//  contains predicate  -----------------------------------------------//
-
-        //! 'Contains' predicate
-        /*!
-            This predicate holds when the test container is contained in the Input.
-            When the optional predicate is specified, it is used for character-wise
-            comparison.
-
-            \param Input An input sequence
-            \param Test A test sequence
-            \param Comp An element comparison predicate
-            \return The result of the test
-
-               \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T, typename PredicateT>
-        inline bool contains( 
-            const Range1T& Input, 
-            const Range2T& Test,
-            PredicateT Comp)
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<Range1T>::type> lit_input(::boost::as_literal(Input));
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<Range2T>::type> lit_test(::boost::as_literal(Test));
-
-            if (::boost::empty(lit_test))
-            {
-                // Empty range is contained always
-                return true;
-            }
-            
-            // Use the temporary variable to make VACPP happy
-            bool bResult=(::boost::algorithm::first_finder(lit_test,Comp)(::boost::begin(lit_input), ::boost::end(lit_input)));
-            return bResult;
-        }
-
-        //! 'Contains' predicate
-        /*!
-            \overload
-        */
-        template<typename Range1T, typename Range2T>
-        inline bool contains( 
-            const Range1T& Input, 
-            const Range2T& Test)
-        {
-            return ::boost::algorithm::contains(Input, Test, is_equal());
-        }
-
-        //! 'Contains' predicate ( case insensitive )
-        /*!
-            This predicate holds when the test container is contained in the Input.
-            Elements are compared case insensitively.
-
-            \param Input An input sequence
-            \param Test A test sequence
-            \param Loc A locale used for case insensitive comparison
-            \return The result of the test
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T>
-        inline bool icontains( 
-            const Range1T& Input, 
-            const Range2T& Test, 
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::contains(Input, Test, is_iequal(Loc));
-        }
-
-//  equals predicate  -----------------------------------------------//
-
-        //! 'Equals' predicate
-        /*!
-            This predicate holds when the test container is equal to the
-            input container i.e. all elements in both containers are same.
-            When the optional predicate is specified, it is used for character-wise
-            comparison.
-
-            \param Input An input sequence
-            \param Test A test sequence
-            \param Comp An element comparison predicate
-            \return The result of the test
-
-            \note This is a two-way version of \c std::equal algorithm
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T, typename PredicateT>
-        inline bool equals( 
-            const Range1T& Input, 
-            const Range2T& Test,
-            PredicateT Comp)
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<Range1T>::type> lit_input(::boost::as_literal(Input));
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<Range2T>::type> lit_test(::boost::as_literal(Test));
-
-            typedef BOOST_STRING_TYPENAME 
-                range_const_iterator<Range1T>::type Iterator1T;
-            typedef BOOST_STRING_TYPENAME 
-                range_const_iterator<Range2T>::type Iterator2T;
-                
-            Iterator1T InputEnd=::boost::end(lit_input);
-            Iterator2T TestEnd=::boost::end(lit_test);
-
-            Iterator1T it=::boost::begin(lit_input);
-            Iterator2T pit=::boost::begin(lit_test);
-            for(;
-                it!=InputEnd && pit!=TestEnd;
-                ++it,++pit)
-            {
-                if( !(Comp(*it,*pit)) )
-                    return false;
-            }
-
-            return  (pit==TestEnd) && (it==InputEnd);
-        }
-
-        //! 'Equals' predicate
-        /*!
-            \overload
-        */
-        template<typename Range1T, typename Range2T>
-        inline bool equals( 
-            const Range1T& Input, 
-            const Range2T& Test)
-        {
-            return ::boost::algorithm::equals(Input, Test, is_equal());
-        }
-
-        //! 'Equals' predicate ( case insensitive )
-        /*!
-            This predicate holds when the test container is equal to the
-            input container i.e. all elements in both containers are same.
-            Elements are compared case insensitively.
-
-            \param Input An input sequence
-            \param Test A test sequence
-            \param Loc A locale used for case insensitive comparison
-            \return The result of the test
-
-            \note This is a two-way version of \c std::equal algorithm
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename Range1T, typename Range2T>
-        inline bool iequals( 
-            const Range1T& Input, 
-            const Range2T& Test,
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::equals(Input, Test, is_iequal(Loc));
-        }
-
-// lexicographical_compare predicate -----------------------------//
-
-        //! Lexicographical compare predicate
-        /*!
-             This predicate is an overload of std::lexicographical_compare
-             for range arguments
-
-             It check whether the first argument is lexicographically less
-             then the second one.
-
-             If the optional predicate is specified, it is used for character-wise
-             comparison
-
-             \param Arg1 First argument 
-             \param Arg2 Second argument
-             \param Pred Comparison predicate
-             \return The result of the test
-
-             \note This function provides the strong exception-safety guarantee
-         */
-        template<typename Range1T, typename Range2T, typename PredicateT>
-        inline bool lexicographical_compare(
-            const Range1T& Arg1,
-            const Range2T& Arg2,
-            PredicateT Pred)
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<Range1T>::type> lit_arg1(::boost::as_literal(Arg1));
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<Range2T>::type> lit_arg2(::boost::as_literal(Arg2));
-
-            return std::lexicographical_compare(
-                ::boost::begin(lit_arg1),
-                ::boost::end(lit_arg1),
-                ::boost::begin(lit_arg2),
-                ::boost::end(lit_arg2),
-                Pred);
-        }
-
-        //! Lexicographical compare predicate
-        /*!
-            \overload
-         */
-        template<typename Range1T, typename Range2T>
-            inline bool lexicographical_compare(
-            const Range1T& Arg1,
-            const Range2T& Arg2)
-        {
-            return ::boost::algorithm::lexicographical_compare(Arg1, Arg2, is_less());
-        }
-
-        //! Lexicographical compare predicate (case-insensitive)
-        /*!
-            This predicate is an overload of std::lexicographical_compare
-            for range arguments.
-            It check whether the first argument is lexicographically less
-            then the second one.
-            Elements are compared case insensitively
-
-
-             \param Arg1 First argument 
-             \param Arg2 Second argument
-             \param Loc A locale used for case insensitive comparison
-             \return The result of the test
-
-             \note This function provides the strong exception-safety guarantee
-         */
-        template<typename Range1T, typename Range2T>
-        inline bool ilexicographical_compare(
-            const Range1T& Arg1,
-            const Range2T& Arg2,
-            const std::locale& Loc=std::locale())
-        {
-            return ::boost::algorithm::lexicographical_compare(Arg1, Arg2, is_iless(Loc));
-        }
-        
-
-//  all predicate  -----------------------------------------------//
-
-        //! 'All' predicate
-        /*!
-            This predicate holds it all its elements satisfy a given 
-            condition, represented by the predicate.
-            
-            \param Input An input sequence
-            \param Pred A predicate
-            \return The result of the test
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename RangeT, typename PredicateT>
-        inline bool all( 
-            const RangeT& Input, 
-            PredicateT Pred)
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> lit_input(::boost::as_literal(Input));
-
-            typedef BOOST_STRING_TYPENAME 
-                range_const_iterator<RangeT>::type Iterator1T;
-
-            Iterator1T InputEnd=::boost::end(lit_input);
-            for( Iterator1T It=::boost::begin(lit_input); It!=InputEnd; ++It)
-            {
-                if (!Pred(*It))
-                    return false;
-            }
-            
-            return true;
-        }
-
-    } // namespace algorithm
-
-    // pull names to the boost namespace
-    using algorithm::starts_with;
-    using algorithm::istarts_with;
-    using algorithm::ends_with;
-    using algorithm::iends_with;
-    using algorithm::contains;
-    using algorithm::icontains;
-    using algorithm::equals;
-    using algorithm::iequals;
-    using algorithm::all;
-    using algorithm::lexicographical_compare;
-    using algorithm::ilexicographical_compare;
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_PREDICATE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/predicate_facade.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/predicate_facade.hpp
deleted file mode 100644
index a9753fc..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/predicate_facade.hpp
+++ /dev/null
@@ -1,42 +0,0 @@
-//  Boost string_algo library predicate_facade.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_PREDICATE_FACADE_HPP
-#define BOOST_STRING_PREDICATE_FACADE_HPP
-
-#include <boost/algorithm/string/config.hpp>
-
-/*
- \file boost/algorith/string/predicate_facade.hpp
- This file contains predicate_facade definition. This template class is used
- to identify classification predicates, so they can be combined using
- composition operators.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  predicate facade ------------------------------------------------------//
-
-        //! Predicate facade
-        /*!
-            This class allows to recognize classification
-            predicates, so that they can be combined using
-            composition operators.
-            Every classification predicate must be derived from this class.
-        */
-        template<typename Derived>
-        struct predicate_facade {};
-
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_CLASSIFICATION_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/regex.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/regex.hpp
deleted file mode 100644
index a6c7c60..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/regex.hpp
+++ /dev/null
@@ -1,646 +0,0 @@
-//  Boost string_algo library regex.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_REGEX_HPP
-#define BOOST_STRING_REGEX_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/regex.hpp>
-
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/iterator.hpp>
-#include <boost/range/as_literal.hpp>
-
-#include <boost/algorithm/string/find_format.hpp>
-#include <boost/algorithm/string/regex_find_format.hpp>
-#include <boost/algorithm/string/formatter.hpp>
-#include <boost/algorithm/string/iter_find.hpp>
-
-/*! \file
-    Defines regex variants of the algorithms. 
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  find_regex  -----------------------------------------------//
-
-        //! Find regex algorithm
-        /*!
-            Search for a substring matching the given regex in the input.
-            
-            \param Input A container which will be searched.
-            \param Rx A regular expression
-            \param Flags Regex options
-            \return 
-                An \c iterator_range delimiting the match. 
-                Returned iterator is either \c RangeT::iterator or 
-                \c RangeT::const_iterator, depending on the constness of 
-                the input parameter.
-
-              \note This function provides the strong exception-safety guarantee
-        */
-        template< 
-            typename RangeT, 
-            typename CharT, 
-            typename RegexTraitsT>
-        inline iterator_range< 
-            BOOST_STRING_TYPENAME range_iterator<RangeT>::type >
-        find_regex( 
-            RangeT& Input, 
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_iterator<RangeT>::type> lit_input(::boost::as_literal(Input));
-
-            return ::boost::algorithm::regex_finder(Rx,Flags)(
-                ::boost::begin(lit_input), ::boost::end(lit_input) );
-        }
-
-//  replace_regex --------------------------------------------------------------------//
-
-        //! Replace regex algorithm
-        /*!
-            Search for a substring matching given regex and format it with 
-            the specified format.             
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Rx A regular expression
-            \param Format Regex format definition
-            \param Flags Regex options
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input   
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template< 
-            typename OutputIteratorT,
-            typename RangeT, 
-            typename CharT, 
-            typename RegexTraitsT,
-            typename FormatStringTraitsT, typename FormatStringAllocatorT >
-        inline OutputIteratorT replace_regex_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            const std::basic_string<CharT, FormatStringTraitsT, FormatStringAllocatorT>& Format,
-            match_flag_type Flags=match_default | format_default )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Output,
-                Input,
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::regex_formatter( Format, Flags ) );
-        }
-
-        //! Replace regex algorithm
-        /*!
-            \overload
-        */
-        template< 
-            typename SequenceT, 
-            typename CharT, 
-            typename RegexTraitsT,
-            typename FormatStringTraitsT, typename FormatStringAllocatorT >
-        inline SequenceT replace_regex_copy( 
-            const SequenceT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            const std::basic_string<CharT, FormatStringTraitsT, FormatStringAllocatorT>& Format,
-            match_flag_type Flags=match_default | format_default )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::regex_formatter( Format, Flags ) );
-        }
-
-        //! Replace regex algorithm
-        /*!
-            Search for a substring matching given regex and format it with 
-            the specified format. The input string is modified in-place.
-
-            \param Input An input string
-            \param Rx A regular expression
-            \param Format Regex format definition
-            \param Flags Regex options
-        */
-        template< 
-            typename SequenceT, 
-            typename CharT, 
-            typename RegexTraitsT,
-            typename FormatStringTraitsT, typename FormatStringAllocatorT >
-        inline void replace_regex( 
-            SequenceT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            const std::basic_string<CharT, FormatStringTraitsT, FormatStringAllocatorT>& Format,
-            match_flag_type Flags=match_default | format_default )
-        {
-            ::boost::algorithm::find_format( 
-                Input,
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::regex_formatter( Format, Flags ) );
-        }
-
-//  replace_all_regex --------------------------------------------------------------------//
-
-        //! Replace all regex algorithm
-        /*!
-            Format all substrings, matching given regex, with the specified format. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Rx A regular expression
-            \param Format Regex format definition
-            \param Flags Regex options
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input     
-
-              \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template< 
-            typename OutputIteratorT,
-            typename RangeT, 
-            typename CharT, 
-            typename RegexTraitsT,
-            typename FormatStringTraitsT, typename FormatStringAllocatorT >
-        inline OutputIteratorT replace_all_regex_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            const std::basic_string<CharT, FormatStringTraitsT, FormatStringAllocatorT>& Format,
-            match_flag_type Flags=match_default | format_default )
-        {
-            return ::boost::algorithm::find_format_all_copy( 
-                Output,
-                Input,
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::regex_formatter( Format, Flags ) );
-        }
-
-        //! Replace all regex algorithm
-        /*!
-            \overload
-        */
-        template< 
-            typename SequenceT, 
-            typename CharT, 
-            typename RegexTraitsT,
-            typename FormatStringTraitsT, typename FormatStringAllocatorT >
-        inline SequenceT replace_all_regex_copy( 
-            const SequenceT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            const std::basic_string<CharT, FormatStringTraitsT, FormatStringAllocatorT>& Format,
-            match_flag_type Flags=match_default | format_default )
-        {
-            return ::boost::algorithm::find_format_all_copy( 
-                Input,
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::regex_formatter( Format, Flags ) );
-        }
-
-        //! Replace all regex algorithm
-        /*!
-            Format all substrings, matching given regex, with the specified format. 
-            The input string is modified in-place.
-
-            \param Input An input string
-            \param Rx A regular expression
-            \param Format Regex format definition
-            \param Flags Regex options            
-        */
-        template< 
-            typename SequenceT, 
-            typename CharT, 
-            typename RegexTraitsT,
-            typename FormatStringTraitsT, typename FormatStringAllocatorT >
-        inline void replace_all_regex( 
-            SequenceT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            const std::basic_string<CharT, FormatStringTraitsT, FormatStringAllocatorT>& Format,
-            match_flag_type Flags=match_default | format_default )
-        {
-            ::boost::algorithm::find_format_all( 
-                Input,
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::regex_formatter( Format, Flags ) );
-        }
-
-//  erase_regex --------------------------------------------------------------------//
-
-        //! Erase regex algorithm
-        /*!
-            Remove a substring matching given regex from the input.
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.                        
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Rx A regular expression
-            \param Flags Regex options
-            \return An output iterator pointing just after the last inserted character or
-                       a modified copy of the input    
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-       */
-        template< 
-            typename OutputIteratorT,
-            typename RangeT, 
-            typename CharT, 
-            typename RegexTraitsT >
-        inline OutputIteratorT erase_regex_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-        //! Erase regex algorithm
-        /*!
-            \overload
-        */
-        template< 
-            typename SequenceT, 
-            typename CharT, 
-            typename RegexTraitsT >
-        inline SequenceT erase_regex_copy( 
-            const SequenceT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input, 
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-        //! Erase regex algorithm
-        /*!
-            Remove a substring matching given regex from the input.
-            The input string is modified in-place.
-
-            \param Input An input string
-            \param Rx A regular expression
-            \param Flags Regex options
-        */
-        template< 
-            typename SequenceT, 
-            typename CharT, 
-            typename RegexTraitsT >
-        inline void erase_regex( 
-            SequenceT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-//  erase_all_regex --------------------------------------------------------------------//
-
-        //! Erase all regex algorithm
-        /*!
-            Erase all substrings, matching given regex, from the input.
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Rx A regular expression
-            \param Flags Regex options
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input                        
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template< 
-            typename OutputIteratorT,
-            typename RangeT, 
-            typename CharT, 
-            typename RegexTraitsT >
-        inline OutputIteratorT erase_all_regex_copy(
-            OutputIteratorT Output,
-            const RangeT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            return ::boost::algorithm::find_format_all_copy(
-                Output,
-                Input,
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-        //! Erase all regex algorithm
-        /*!
-            \overload
-        */
-        template< 
-            typename SequenceT, 
-            typename CharT, 
-            typename RegexTraitsT >
-        inline SequenceT erase_all_regex_copy( 
-            const SequenceT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            return ::boost::algorithm::find_format_all_copy( 
-                Input, 
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-        //! Erase all regex algorithm
-        /*!
-            Erase all substrings, matching given regex, from the input.
-            The input string is modified in-place.
-
-            \param Input An input string
-            \param Rx A regular expression
-            \param Flags Regex options
-        */
-        template< 
-            typename SequenceT, 
-            typename CharT, 
-            typename RegexTraitsT>
-        inline void erase_all_regex( 
-            SequenceT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            ::boost::algorithm::find_format_all( 
-                Input, 
-                ::boost::algorithm::regex_finder( Rx, Flags ),
-                ::boost::algorithm::empty_formatter( Input ) );
-        }
-
-//  find_all_regex ------------------------------------------------------------------//
-
-        //! Find all regex algorithm
-        /*!
-            This algorithm finds all substrings matching the give regex
-            in the input.             
-            
-            Each part is copied and added as a new element to the output container.
-            Thus the result container must be able to hold copies
-            of the matches (in a compatible structure like std::string) or
-            a reference to it (e.g. using the iterator range class).
-            Examples of such a container are \c std::vector<std::string>
-            or \c std::list<boost::iterator_range<std::string::iterator>>
-
-            \param Result A container that can hold copies of references to the substrings.
-            \param Input A container which will be searched.
-            \param Rx A regular expression
-            \param Flags Regex options
-            \return A reference to the result
-
-            \note Prior content of the result will be overwritten.
-
-             \note This function provides the strong exception-safety guarantee
-        */
-        template< 
-            typename SequenceSequenceT, 
-            typename RangeT,         
-            typename CharT, 
-            typename RegexTraitsT >
-        inline SequenceSequenceT& find_all_regex(
-            SequenceSequenceT& Result,
-            const RangeT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            return ::boost::algorithm::iter_find(
-                Result,
-                Input,
-                ::boost::algorithm::regex_finder(Rx,Flags) );         
-        }
-
-//  split_regex ------------------------------------------------------------------//
-
-        //! Split regex algorithm
-        /*! 
-            Tokenize expression. This function is equivalent to C strtok. Input
-            sequence is split into tokens, separated  by separators. Separator
-            is an every match of the given regex.
-            Each part is copied and added as a new element to the output container.
-            Thus the result container must be able to hold copies
-            of the matches (in a compatible structure like std::string) or
-            a reference to it (e.g. using the iterator range class).
-            Examples of such a container are \c std::vector<std::string>
-            or \c std::list<boost::iterator_range<std::string::iterator>>
-    
-            \param Result A container that can hold copies of references to the substrings.          
-            \param Input A container which will be searched.
-            \param Rx A regular expression
-            \param Flags Regex options
-            \return A reference to the result
-
-            \note Prior content of the result will be overwritten.
-
-               \note This function provides the strong exception-safety guarantee
-        */
-        template< 
-            typename SequenceSequenceT, 
-            typename RangeT,         
-            typename CharT, 
-            typename RegexTraitsT >
-        inline SequenceSequenceT& split_regex(
-            SequenceSequenceT& Result,
-            const RangeT& Input,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            return ::boost::algorithm::iter_split(
-                Result,
-                Input,
-                ::boost::algorithm::regex_finder(Rx,Flags) );         
-        }
-
-//  join_if ------------------------------------------------------------------//
-
-#ifndef BOOST_NO_FUNCTION_TEMPLATE_ORDERING
-
-        //! Conditional join algorithm
-        /*!
-            This algorithm joins all strings in a 'list' into one long string.
-            Segments are concatenated by given separator. Only segments that
-            match the given regular expression will be added to the result
-
-            This is a specialization of join_if algorithm.
-
-            \param Input A container that holds the input strings. It must be a container-of-containers.
-            \param Separator A string that will separate the joined segments.
-            \param Rx A regular expression
-            \param Flags Regex options
-            \return Concatenated string.
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template< 
-            typename SequenceSequenceT, 
-            typename Range1T,             
-            typename CharT, 
-            typename RegexTraitsT >
-        inline typename range_value<SequenceSequenceT>::type 
-        join_if(
-            const SequenceSequenceT& Input,
-            const Range1T& Separator,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            // Define working types
-            typedef typename range_value<SequenceSequenceT>::type ResultT;
-            typedef typename range_const_iterator<SequenceSequenceT>::type InputIteratorT;
-
-            // Parse input
-            InputIteratorT itBegin=::boost::begin(Input);
-            InputIteratorT itEnd=::boost::end(Input);
-
-            // Construct container to hold the result
-            ResultT Result;
-
-
-            // Roll to the first element that will be added
-            while(
-                itBegin!=itEnd && 
-                !::boost::regex_match(::boost::begin(*itBegin), ::boost::end(*itBegin), Rx, Flags)) ++itBegin;
-
-            // Add this element
-            if(itBegin!=itEnd)
-            {
-                detail::insert(Result, ::boost::end(Result), *itBegin);
-                ++itBegin;
-            }
-
-            for(;itBegin!=itEnd; ++itBegin)
-            {
-                if(::boost::regex_match(::boost::begin(*itBegin), ::boost::end(*itBegin), Rx, Flags))
-                {
-                    // Add separator
-                    detail::insert(Result, ::boost::end(Result), ::boost::as_literal(Separator));
-                    // Add element
-                    detail::insert(Result, ::boost::end(Result), *itBegin);
-                }
-            }
-
-            return Result;
-        }
-
-#else  // BOOST_NO_FUNCTION_TEMPLATE_ORDERING
-
-                //! Conditional join algorithm
-        /*!
-            This algorithm joins all strings in a 'list' into one long string.
-            Segments are concatenated by given separator. Only segments that
-            match the given regular expression will be added to the result
-
-            This is a specialization of join_if algorithm.
-
-            \param Input A container that holds the input strings. It must be a container-of-containers.
-            \param Separator A string that will separate the joined segments.
-            \param Rx A regular expression
-            \param Flags Regex options
-            \return Concatenated string.
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template< 
-            typename SequenceSequenceT, 
-            typename Range1T,             
-            typename CharT, 
-            typename RegexTraitsT >
-        inline typename range_value<SequenceSequenceT>::type 
-        join_if_regex(
-            const SequenceSequenceT& Input,
-            const Range1T& Separator,
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type Flags=match_default )
-        {
-            // Define working types
-            typedef typename range_value<SequenceSequenceT>::type ResultT;
-            typedef typename range_const_iterator<SequenceSequenceT>::type InputIteratorT;
-
-            // Parse input
-            InputIteratorT itBegin=::boost::begin(Input);
-            InputIteratorT itEnd=::boost::end(Input);
-
-            // Construct container to hold the result
-            ResultT Result;
-
-
-            // Roll to the first element that will be added
-            while(
-                itBegin!=itEnd && 
-                !::boost::regex_match(::boost::begin(*itBegin), ::boost::end(*itBegin), Rx, Flags)) ++itBegin;
-
-            // Add this element
-            if(itBegin!=itEnd)
-            {
-                detail::insert(Result, ::boost::end(Result), *itBegin);
-                ++itBegin;
-            }
-
-            for(;itBegin!=itEnd; ++itBegin)
-            {
-                if(::boost::regex_match(::boost::begin(*itBegin), ::boost::end(*itBegin), Rx, Flags))
-                {
-                    // Add separator
-                    detail::insert(Result, ::boost::end(Result), ::boost::as_literal(Separator));
-                    // Add element
-                    detail::insert(Result, ::boost::end(Result), *itBegin);
-                }
-            }
-
-            return Result;
-        }
-
-
-#endif // BOOST_NO_FUNCTION_TEMPLATE_ORDERING
-
-    } // namespace algorithm
-
-    // pull names into the boost namespace
-    using algorithm::find_regex;
-    using algorithm::replace_regex;
-    using algorithm::replace_regex_copy;
-    using algorithm::replace_all_regex;
-    using algorithm::replace_all_regex_copy;
-    using algorithm::erase_regex;
-    using algorithm::erase_regex_copy;
-    using algorithm::erase_all_regex;
-    using algorithm::erase_all_regex_copy;
-    using algorithm::find_all_regex;
-    using algorithm::split_regex;
-
-#ifndef BOOST_NO_FUNCTION_TEMPLATE_ORDERING
-    using algorithm::join_if;
-#else  // BOOST_NO_FUNCTION_TEMPLATE_ORDERING
-    using algorithm::join_if_regex;
-#endif // BOOST_NO_FUNCTION_TEMPLATE_ORDERING
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_REGEX_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/regex_find_format.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/regex_find_format.hpp
deleted file mode 100644
index 409afc2..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/regex_find_format.hpp
+++ /dev/null
@@ -1,90 +0,0 @@
-//  Boost string_algo library regex_find_format.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_REGEX_FIND_FORMAT_HPP
-#define BOOST_STRING_REGEX_FIND_FORMAT_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/regex.hpp>
-#include <boost/algorithm/string/detail/finder_regex.hpp>
-#include <boost/algorithm/string/detail/formatter_regex.hpp>
-
-/*! \file
-    Defines the \c regex_finder and \c regex_formatter generators. These two functors
-    are designed to work together. \c regex_formatter uses additional information
-    about a match contained in the regex_finder search result.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  regex_finder  -----------------------------------------------//
-
-        //! "Regex" finder 
-        /*!
-            Construct the \c regex_finder. Finder uses the regex engine to search
-            for a match.
-            Result is given in \c regex_search_result. This is an extension
-            of the iterator_range. In addition it contains match results 
-            from the \c regex_search algorithm.
-
-            \param Rx A regular expression
-            \param MatchFlags Regex search options
-            \return An instance of the \c regex_finder object
-        */
-        template< 
-            typename CharT, 
-            typename RegexTraitsT>
-        inline detail::find_regexF< basic_regex<CharT, RegexTraitsT> >
-        regex_finder(
-            const basic_regex<CharT, RegexTraitsT>& Rx,
-            match_flag_type MatchFlags=match_default )
-        {
-            return detail::
-                find_regexF< 
-                    basic_regex<CharT, RegexTraitsT> >( Rx, MatchFlags );
-        }
-
-//  regex_formater  ---------------------------------------------//
-
-        //! Regex formatter
-        /*!
-            Construct the \c regex_formatter. Regex formatter uses the regex engine to
-            format a match found by the \c regex_finder. 
-            This formatted it designed to closely cooperate with \c regex_finder.
-
-            \param Format Regex format definition
-            \param Flags Format flags
-            \return An instance of the \c regex_formatter functor
-        */
-       template< 
-            typename CharT, 
-            typename TraitsT, typename AllocT >
-        inline detail::regex_formatF< std::basic_string< CharT, TraitsT, AllocT > >
-        regex_formatter( 
-            const std::basic_string<CharT, TraitsT, AllocT>& Format,
-            match_flag_type Flags=format_default )
-        {
-            return 
-                detail::regex_formatF< std::basic_string<CharT, TraitsT, AllocT> >(
-                    Format,
-                    Flags );
-        }
-
-    } // namespace algorithm
-
-    // pull the names to the boost namespace
-    using algorithm::regex_finder;
-    using algorithm::regex_formatter;
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_REGEX_FIND_FORMAT_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/replace.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/replace.hpp
deleted file mode 100644
index 2adb031..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/replace.hpp
+++ /dev/null
@@ -1,926 +0,0 @@
-//  Boost string_algo library replace.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2006.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_REPLACE_HPP
-#define BOOST_STRING_REPLACE_HPP
-
-#include <boost/algorithm/string/config.hpp>
-
-#include <boost/range/iterator_range_core.hpp>
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/iterator.hpp>
-#include <boost/range/const_iterator.hpp>
-
-#include <boost/algorithm/string/find_format.hpp>
-#include <boost/algorithm/string/finder.hpp>
-#include <boost/algorithm/string/formatter.hpp>
-#include <boost/algorithm/string/compare.hpp>
-
-/*! \file
-    Defines various replace algorithms. Each algorithm replaces
-    part(s) of the input according to set of searching and replace criteria.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  replace_range --------------------------------------------------------------------//
-
-        //! Replace range algorithm
-        /*!
-            Replace the given range in the input string.
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param SearchRange A range in the input to be substituted
-            \param Format A substitute string
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-              \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT replace_range_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const iterator_range<
-                BOOST_STRING_TYPENAME 
-                    range_const_iterator<Range1T>::type>& SearchRange,
-            const Range2T& Format)
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::range_finder(SearchRange),
-                ::boost::algorithm::const_formatter(Format));
-        }
-
-        //! Replace range algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT replace_range_copy( 
-            const SequenceT& Input,
-            const iterator_range<
-                BOOST_STRING_TYPENAME 
-                    range_const_iterator<SequenceT>::type>& SearchRange,
-            const RangeT& Format)
-        {
-            return ::boost::algorithm::find_format_copy(
-                Input,
-                ::boost::algorithm::range_finder(SearchRange),
-                ::boost::algorithm::const_formatter(Format));
-        }
-
-        //! Replace range algorithm
-        /*!
-            Replace the given range in the input string. 
-            The input sequence is modified in-place.
-
-            \param Input An input string
-            \param SearchRange A range in the input to be substituted
-            \param Format A substitute string
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void replace_range( 
-            SequenceT& Input,
-            const iterator_range<
-                BOOST_STRING_TYPENAME 
-                    range_iterator<SequenceT>::type>& SearchRange,
-            const RangeT& Format)
-        {
-            ::boost::algorithm::find_format(
-                Input,
-                ::boost::algorithm::range_finder(SearchRange),
-                ::boost::algorithm::const_formatter(Format));
-        }
-
-//  replace_first --------------------------------------------------------------------//
-
-        //! Replace first algorithm
-        /*!
-            Replace the first match of the search substring in the input 
-            with the format string. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input
-
-              \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T,
-            typename Range3T>
-        inline OutputIteratorT replace_first_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            const Range3T& Format)
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace first algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline SequenceT replace_first_copy( 
-            const SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace first algorithm
-        /*!
-            replace the first match of the search substring in the input 
-            with the format string. The input sequence is modified in-place.
-
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline void replace_first( 
-            SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-//  replace_first ( case insensitive ) ---------------------------------------------//
-
-        //! Replace first algorithm ( case insensitive )
-        /*!
-            Replace the first match of the search substring in the input 
-            with the format string. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            Searching is case insensitive.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-            \param Loc A locale used for case insensitive comparison
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T,
-            typename Range3T>
-        inline OutputIteratorT ireplace_first_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            const Range3T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace first algorithm ( case insensitive )
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename Range2T, typename Range1T>
-        inline SequenceT ireplace_first_copy( 
-            const SequenceT& Input,
-            const Range2T& Search,
-            const Range1T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace first algorithm ( case insensitive )
-        /*!
-            Replace the first match of the search substring in the input 
-            with the format string. Input sequence is modified in-place.
-            Searching is case insensitive.
-
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-            \param Loc A locale used for case insensitive comparison
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline void ireplace_first( 
-            SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-//  replace_last --------------------------------------------------------------------//
-
-        //! Replace last algorithm
-        /*!
-            Replace the last match of the search string in the input 
-            with the format string. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for
-            \param Format A substitute string
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input            
-
-              \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T,
-            typename Range3T>
-        inline OutputIteratorT replace_last_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            const Range3T& Format )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::last_finder(Search),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace last algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline SequenceT replace_last_copy( 
-            const SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::last_finder(Search),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace last algorithm
-        /*!
-            Replace the last match of the search string in the input 
-            with the format string. Input sequence is modified in-place.
-
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline void replace_last( 
-            SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::last_finder(Search),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-//  replace_last ( case insensitive ) -----------------------------------------------//
-
-        //! Replace last algorithm ( case insensitive )
-        /*!
-            Replace the last match of the search string in the input 
-            with the format string. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            Searching is case insensitive.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-            \param Loc A locale used for case insensitive comparison
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input  
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T,
-            typename Range3T>
-        inline OutputIteratorT ireplace_last_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            const Range3T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::last_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace last algorithm ( case insensitive )
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline SequenceT ireplace_last_copy( 
-            const SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::last_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace last algorithm ( case insensitive )
-        /*!
-            Replace the last match of the search string in the input 
-            with the format string.The input sequence is modified in-place.
-            Searching is case insensitive.
-
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-            \param Loc A locale used for case insensitive comparison
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline void ireplace_last( 
-            SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::last_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-//  replace_nth --------------------------------------------------------------------//
-
-        //! Replace nth algorithm
-        /*!
-            Replace an Nth (zero-indexed) match of the search string in the input 
-            with the format string. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Nth An index of the match to be replaced. The index is 0-based.
-                For negative N, matches are counted from the end of string.
-            \param Format A substitute string
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T,
-            typename Range3T>
-        inline OutputIteratorT replace_nth_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            int Nth,
-            const Range3T& Format )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::nth_finder(Search, Nth),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace nth algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline SequenceT replace_nth_copy( 
-            const SequenceT& Input,
-            const Range1T& Search,
-            int Nth,
-            const Range2T& Format )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::nth_finder(Search, Nth),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace nth algorithm
-        /*!
-            Replace an Nth (zero-indexed) match of the search string in the input 
-            with the format string. Input sequence is modified in-place.
-
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Nth An index of the match to be replaced. The index is 0-based.
-                For negative N, matches are counted from the end of string.
-            \param Format A substitute string
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline void replace_nth( 
-            SequenceT& Input,
-            const Range1T& Search,
-            int Nth,
-            const Range2T& Format )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::nth_finder(Search, Nth),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-//  replace_nth ( case insensitive ) -----------------------------------------------//
-        
-        //! Replace nth algorithm ( case insensitive )
-        /*!
-            Replace an Nth (zero-indexed) match of the search string in the input 
-            with the format string. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            Searching is case insensitive.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Nth An index of the match to be replaced. The index is 0-based.
-                For negative N, matches are counted from the end of string.
-            \param Format A substitute string
-            \param Loc A locale used for case insensitive comparison
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input            
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-       */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T,
-            typename Range3T>
-        inline OutputIteratorT ireplace_nth_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            int Nth,
-            const Range3T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::nth_finder(Search, Nth, is_iequal(Loc) ),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace nth algorithm ( case insensitive )
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline SequenceT ireplace_nth_copy( 
-            const SequenceT& Input,
-            const Range1T& Search,
-            int Nth,
-            const Range2T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::nth_finder(Search, Nth, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace nth algorithm ( case insensitive )
-        /*!
-            Replace an Nth (zero-indexed) match of the search string in the input 
-            with the format string. Input sequence is modified in-place.
-            Searching is case insensitive.
-
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Nth An index of the match to be replaced. The index is 0-based.
-                For negative N, matches are counted from the end of string.
-            \param Format A substitute string
-            \param Loc A locale used for case insensitive comparison
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline void ireplace_nth( 
-            SequenceT& Input,
-            const Range1T& Search,
-            int Nth,
-            const Range2T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::nth_finder(Search, Nth, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-//  replace_all --------------------------------------------------------------------//
-
-        //! Replace all algorithm
-        /*!
-            Replace all occurrences of the search string in the input 
-            with the format string. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input 
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T,
-            typename Range3T>
-        inline OutputIteratorT replace_all_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            const Range3T& Format )
-        {
-            return ::boost::algorithm::find_format_all_copy(
-                Output,
-                Input,
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace all algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline SequenceT replace_all_copy( 
-            const SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format )
-        {
-            return ::boost::algorithm::find_format_all_copy( 
-                Input,
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace all algorithm
-        /*!
-            Replace all occurrences of the search string in the input 
-            with the format string. The input sequence is modified in-place.
-
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline void replace_all( 
-            SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format )
-        {
-            ::boost::algorithm::find_format_all( 
-                Input, 
-                ::boost::algorithm::first_finder(Search),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-        
-//  replace_all ( case insensitive ) -----------------------------------------------//
-
-        //! Replace all algorithm ( case insensitive )
-        /*!
-            Replace all occurrences of the search string in the input 
-            with the format string. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            Searching is case insensitive.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-            \param Loc A locale used for case insensitive comparison
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input 
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T,
-            typename Range3T>
-        inline OutputIteratorT ireplace_all_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            const Range2T& Search,
-            const Range3T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_all_copy(
-                Output,
-                Input,
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace all algorithm ( case insensitive )
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline SequenceT ireplace_all_copy( 
-            const SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::find_format_all_copy( 
-                Input,
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace all algorithm ( case insensitive )
-        /*!
-            Replace all occurrences of the search string in the input 
-            with the format string.The input sequence is modified in-place.
-            Searching is case insensitive.
-
-            \param Input An input string
-            \param Search A substring to be searched for 
-            \param Format A substitute string
-            \param Loc A locale used for case insensitive comparison
-        */
-        template<typename SequenceT, typename Range1T, typename Range2T>
-        inline void ireplace_all( 
-            SequenceT& Input,
-            const Range1T& Search,
-            const Range2T& Format,
-            const std::locale& Loc=std::locale() )
-        {
-            ::boost::algorithm::find_format_all( 
-                Input, 
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc)),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-        
-//  replace_head --------------------------------------------------------------------//
-
-        //! Replace head algorithm
-        /*!
-            Replace the head of the input with the given format string. 
-            The head is a prefix of a string of given size. 
-            If the sequence is shorter then required, whole string if 
-            considered to be the head. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-            
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param N Length of the head.
-                For N>=0, at most N characters are extracted.
-                For N<0, size(Input)-|N| characters are extracted.
-            \param Format A substitute string
-            \return An output iterator pointing just after the last inserted character or
-                a modified copy of the input  
-
-            \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT replace_head_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            int N,
-            const Range2T& Format )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::head_finder(N),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace head algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT replace_head_copy( 
-            const SequenceT& Input,
-            int N,
-            const RangeT& Format )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::head_finder(N),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace head algorithm
-        /*!
-            Replace the head of the input with the given format string. 
-            The head is a prefix of a string of given size. 
-            If the sequence is shorter then required, the whole string is 
-            considered to be the head. The input sequence is modified in-place.
-
-            \param Input An input string
-            \param N Length of the head.
-                For N>=0, at most N characters are extracted.
-                For N<0, size(Input)-|N| characters are extracted.
-            \param Format A substitute string
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void replace_head( 
-            SequenceT& Input,
-            int N,
-            const RangeT& Format )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::head_finder(N),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-//  replace_tail --------------------------------------------------------------------//
-
-        //! Replace tail algorithm
-        /*!
-            Replace the tail of the input with the given format string. 
-            The tail is a suffix of a string of given size. 
-            If the sequence is shorter then required, whole string is 
-            considered to be the tail. 
-            The result is a modified copy of the input. It is returned as a sequence 
-            or copied to the output iterator.
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input string
-            \param N Length of the tail.
-                For N>=0, at most N characters are extracted.
-                For N<0, size(Input)-|N| characters are extracted.
-            \param Format A substitute string
-            \return An output iterator pointing just after the last inserted character or
-                    a modified copy of the input   
-
-              \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<
-            typename OutputIteratorT,
-            typename Range1T, 
-            typename Range2T>
-        inline OutputIteratorT replace_tail_copy(
-            OutputIteratorT Output,
-            const Range1T& Input,
-            int N,
-            const Range2T& Format )
-        {
-            return ::boost::algorithm::find_format_copy(
-                Output,
-                Input,
-                ::boost::algorithm::tail_finder(N),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace tail algorithm
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT replace_tail_copy( 
-            const SequenceT& Input,
-            int N,
-            const RangeT& Format )
-        {
-            return ::boost::algorithm::find_format_copy( 
-                Input,
-                ::boost::algorithm::tail_finder(N),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-        //! Replace tail algorithm
-        /*!
-            Replace the tail of the input with the given format sequence. 
-            The tail is a suffix of a string of given size. 
-            If the sequence is shorter then required, the whole string is 
-            considered to be the tail. The input sequence is modified in-place.
-
-            \param Input An input string
-            \param N Length of the tail.
-                For N>=0, at most N characters are extracted.
-                For N<0, size(Input)-|N| characters are extracted.
-            \param Format A substitute string
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void replace_tail( 
-            SequenceT& Input,
-            int N,
-            const RangeT& Format )
-        {
-            ::boost::algorithm::find_format( 
-                Input, 
-                ::boost::algorithm::tail_finder(N),
-                ::boost::algorithm::const_formatter(Format) );
-        }
-
-    } // namespace algorithm
-
-    // pull names to the boost namespace
-    using algorithm::replace_range_copy;
-    using algorithm::replace_range;
-    using algorithm::replace_first_copy;
-    using algorithm::replace_first;
-    using algorithm::ireplace_first_copy;
-    using algorithm::ireplace_first;
-    using algorithm::replace_last_copy;
-    using algorithm::replace_last;
-    using algorithm::ireplace_last_copy;
-    using algorithm::ireplace_last;
-    using algorithm::replace_nth_copy;
-    using algorithm::replace_nth;
-    using algorithm::ireplace_nth_copy;
-    using algorithm::ireplace_nth;
-    using algorithm::replace_all_copy;
-    using algorithm::replace_all;
-    using algorithm::ireplace_all_copy;
-    using algorithm::ireplace_all;
-    using algorithm::replace_head_copy;
-    using algorithm::replace_head;
-    using algorithm::replace_tail_copy;
-    using algorithm::replace_tail;
-
-} // namespace boost
-
-#endif  // BOOST_REPLACE_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/sequence_traits.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/sequence_traits.hpp
deleted file mode 100644
index be151f8..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/sequence_traits.hpp
+++ /dev/null
@@ -1,120 +0,0 @@
-//  Boost string_algo library sequence_traits.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_SEQUENCE_TRAITS_HPP
-#define BOOST_STRING_SEQUENCE_TRAITS_HPP
-
-#include <boost/config.hpp>
-#include <boost/mpl/bool.hpp>
-#include <boost/algorithm/string/yes_no_type.hpp>
-
-/*! \file
-    Traits defined in this header are used by various algorithms to achieve
-    better performance for specific containers.
-    Traits provide fail-safe defaults. If a container supports some of these
-    features, it is possible to specialize the specific trait for this container.
-    For lacking compilers, it is possible of define an override for a specific tester
-    function.
-
-    Due to a language restriction, it is not currently possible to define specializations for
-    stl containers without including the corresponding header. To decrease the overhead
-    needed by this inclusion, user can selectively include a specialization
-    header for a specific container. They are located in boost/algorithm/string/stl
-    directory. Alternatively she can include boost/algorithm/string/std_collection_traits.hpp
-    header which contains specializations for all stl containers.
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  sequence traits  -----------------------------------------------//
-
-
-        //! Native replace trait
-        /*!
-            This trait specifies that the sequence has \c std::string like replace method
-        */
-        template< typename T >
-        class has_native_replace
-        {
-
-        public:
-#    if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = false };
-#    else
-            BOOST_STATIC_CONSTANT(bool, value=false);
-#    endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-
-
-            typedef mpl::bool_<has_native_replace<T>::value> type;
-        };
-
-
-        //! Stable iterators trait
-        /*!
-            This trait specifies that the sequence has stable iterators. It means
-            that operations like insert/erase/replace do not invalidate iterators.
-        */
-        template< typename T >
-        class has_stable_iterators
-        {
-        public:
-#    if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = false };
-#    else
-            BOOST_STATIC_CONSTANT(bool, value=false);
-#    endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-
-            typedef mpl::bool_<has_stable_iterators<T>::value> type;
-        };
-
-
-        //! Const time insert trait
-        /*!
-            This trait specifies that the sequence's insert method has
-            constant time complexity.
-        */
-        template< typename T >
-        class has_const_time_insert
-        {
-        public:
-#    if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = false };
-#    else
-            BOOST_STATIC_CONSTANT(bool, value=false);
-#    endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-
-            typedef mpl::bool_<has_const_time_insert<T>::value> type;
-        };
-
-
-        //! Const time erase trait
-        /*!
-            This trait specifies that the sequence's erase method has
-            constant time complexity.
-        */
-        template< typename T >
-        class has_const_time_erase
-        {
-        public:
-#    if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = false };
-#    else
-            BOOST_STATIC_CONSTANT(bool, value=false);
-#    endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-
-            typedef mpl::bool_<has_const_time_erase<T>::value> type;
-        };
-
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_SEQUENCE_TRAITS_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/split.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/split.hpp
deleted file mode 100644
index cae712c..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/split.hpp
+++ /dev/null
@@ -1,163 +0,0 @@
-//  Boost string_algo library split.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2006.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_SPLIT_HPP
-#define BOOST_STRING_SPLIT_HPP
-
-#include <boost/algorithm/string/config.hpp>
-
-#include <boost/algorithm/string/iter_find.hpp>
-#include <boost/algorithm/string/finder.hpp>
-#include <boost/algorithm/string/compare.hpp>
-
-/*! \file
-    Defines basic split algorithms. 
-    Split algorithms can be used to divide a string
-    into several parts according to given criteria.
-    
-    Each part is copied and added as a new element to the
-    output container.
-    Thus the result container must be able to hold copies
-    of the matches (in a compatible structure like std::string) or
-    a reference to it (e.g. using the iterator range class).
-    Examples of such a container are \c std::vector<std::string>
-    or \c std::list<boost::iterator_range<std::string::iterator>>
-*/
-
-namespace boost {
-    namespace algorithm {
-
-//  find_all  ------------------------------------------------------------//
-
-        //! Find all algorithm
-        /*!
-            This algorithm finds all occurrences of the search string
-            in the input.
-            
-            Each part is copied and added as a new element to the
-            output container.
-            Thus the result container must be able to hold copies
-            of the matches (in a compatible structure like std::string) or
-            a reference to it (e.g. using the iterator range class).
-            Examples of such a container are \c std::vector<std::string>
-            or \c std::list<boost::iterator_range<std::string::iterator>>
-
-            \param Result A container that can hold copies of references to the substrings
-            \param Input A container which will be searched.
-            \param Search A substring to be searched for.
-            \return A reference the result
-
-            \note Prior content of the result will be overwritten.
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template< typename SequenceSequenceT, typename Range1T, typename Range2T >
-        inline SequenceSequenceT& find_all(
-            SequenceSequenceT& Result,
-            Range1T& Input,
-            const Range2T& Search)
-        {
-            return ::boost::algorithm::iter_find(
-                Result,
-                Input,
-                ::boost::algorithm::first_finder(Search) );        
-        }
-
-        //! Find all algorithm ( case insensitive ) 
-        /*!
-            This algorithm finds all occurrences of the search string
-            in the input. 
-            Each part is copied and added as a new element to the
-            output container. Thus the result container must be able to hold copies
-            of the matches (in a compatible structure like std::string) or
-            a reference to it (e.g. using the iterator range class).
-            Examples of such a container are \c std::vector<std::string>
-            or \c std::list<boost::iterator_range<std::string::iterator>>
-
-            Searching is case insensitive.
-
-            \param Result A container that can hold copies of references to the substrings
-            \param Input A container which will be searched.
-            \param Search A substring to be searched for.
-            \param Loc A locale used for case insensitive comparison
-            \return A reference the result
-
-            \note Prior content of the result will be overwritten.
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template< typename SequenceSequenceT, typename Range1T, typename Range2T >
-        inline SequenceSequenceT& ifind_all(
-            SequenceSequenceT& Result,
-            Range1T& Input,
-            const Range2T& Search,
-            const std::locale& Loc=std::locale() )
-        {
-            return ::boost::algorithm::iter_find(
-                Result,
-                Input,
-                ::boost::algorithm::first_finder(Search, is_iequal(Loc) ) );        
-        }
-
-
-//  tokenize  -------------------------------------------------------------//
-
-        //! Split algorithm
-        /*! 
-            Tokenize expression. This function is equivalent to C strtok. Input
-            sequence is split into tokens, separated by separators. Separators 
-            are given by means of the predicate.
-
-            Each part is copied and added as a new element to the
-            output container.
-            Thus the result container must be able to hold copies
-            of the matches (in a compatible structure like std::string) or
-            a reference to it (e.g. using the iterator range class).
-            Examples of such a container are \c std::vector<std::string>
-            or \c std::list<boost::iterator_range<std::string::iterator>>
-    
-            \param Result A container that can hold copies of references to the substrings          
-            \param Input A container which will be searched.
-            \param Pred A predicate to identify separators. This predicate is 
-                supposed to return true if a given element is a separator.
-            \param eCompress If eCompress argument is set to token_compress_on, adjacent 
-                separators are merged together. Otherwise, every two separators
-                delimit a token.
-            \return A reference the result
-
-            \note Prior content of the result will be overwritten.
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template< typename SequenceSequenceT, typename RangeT, typename PredicateT >
-        inline SequenceSequenceT& split(
-            SequenceSequenceT& Result,
-            RangeT& Input,
-            PredicateT Pred,
-            token_compress_mode_type eCompress=token_compress_off )
-        {
-            return ::boost::algorithm::iter_split(
-                Result,
-                Input,
-                ::boost::algorithm::token_finder( Pred, eCompress ) );         
-        }
-
-    } // namespace algorithm
-
-    // pull names to the boost namespace
-    using algorithm::find_all;
-    using algorithm::ifind_all;
-    using algorithm::split;    
-
-} // namespace boost
-
-
-#endif  // BOOST_STRING_SPLIT_HPP
-
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/std/list_traits.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/std/list_traits.hpp
deleted file mode 100644
index a3cf7bb..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/std/list_traits.hpp
+++ /dev/null
@@ -1,68 +0,0 @@
-//  Boost string_algo library list_traits.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_STD_LIST_TRAITS_HPP
-#define BOOST_STRING_STD_LIST_TRAITS_HPP
-
-#include <boost/algorithm/string/yes_no_type.hpp>
-#include <list>
-#include <boost/algorithm/string/sequence_traits.hpp>
-
-namespace boost {
-    namespace algorithm {
-
-//  std::list<> traits  -----------------------------------------------//
-
-
-        // stable iterators trait
-        template<typename T, typename AllocT>
-        class has_stable_iterators< ::std::list<T,AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true };
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            typedef mpl::bool_<has_stable_iterators<T>::value> type;
-        };
-
-        // const time insert trait
-        template<typename T, typename AllocT>
-        class has_const_time_insert< ::std::list<T,AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true };
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            typedef mpl::bool_<has_const_time_insert<T>::value> type;
-        };
-
-        // const time erase trait
-        template<typename T, typename AllocT>
-        class has_const_time_erase< ::std::list<T,AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true };
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            typedef mpl::bool_<has_const_time_erase<T>::value> type;
-        };
-
-
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_STD_LIST_TRAITS_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/std/rope_traits.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/std/rope_traits.hpp
deleted file mode 100644
index 637059a..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/std/rope_traits.hpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//  Boost string_algo library string_traits.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_STD_ROPE_TRAITS_HPP
-#define BOOST_STRING_STD_ROPE_TRAITS_HPP
-
-#include <boost/algorithm/string/yes_no_type.hpp>
-#include <rope>
-#include <boost/algorithm/string/sequence_traits.hpp>
-
-namespace boost {
-    namespace algorithm {
-
-//  SGI's std::rope<> traits  -----------------------------------------------//
-
-    
-    // native replace trait
-        template<typename T, typename TraitsT, typename AllocT>
-        class has_native_replace< std::rope<T,TraitsT,AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true };
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            typedef mpl::bool_<value> type;     
-        };
-
-    // stable iterators trait
-        template<typename T, typename TraitsT, typename AllocT>
-        class has_stable_iterators< std::rope<T,TraitsT,AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true };
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            typedef mpl::bool_<value> type;     
-        };
-
-    // const time insert trait
-        template<typename T, typename TraitsT, typename AllocT>
-        class has_const_time_insert< std::rope<T,TraitsT,AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true };
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            typedef mpl::bool_<value> type;     
-        };
-
-    // const time erase trait
-        template<typename T, typename TraitsT, typename AllocT>
-        class has_const_time_erase< std::rope<T,TraitsT,AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true };
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            typedef mpl::bool_<value> type;     
-        };
-
-
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_ROPE_TRAITS_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/std/slist_traits.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/std/slist_traits.hpp
deleted file mode 100644
index c30b93c..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/std/slist_traits.hpp
+++ /dev/null
@@ -1,69 +0,0 @@
-//  Boost string_algo library slist_traits.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003. 
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_STD_SLIST_TRAITS_HPP
-#define BOOST_STRING_STD_SLIST_TRAITS_HPP
-
-#include <boost/algorithm/string/config.hpp>
-#include <boost/algorithm/string/yes_no_type.hpp>
-#include BOOST_SLIST_HEADER 
-#include <boost/algorithm/string/sequence_traits.hpp>
-
-namespace boost {
-    namespace algorithm {
-
-//  SGI's std::slist<> traits  -----------------------------------------------//
-
-
-    // stable iterators trait
-        template<typename T, typename AllocT>
-        class has_stable_iterators< BOOST_STD_EXTENSION_NAMESPACE::slist<T,AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true };
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            typedef mpl::bool_<has_stable_iterators<T>::value> type;
-        };
-
-    // const time insert trait
-        template<typename T, typename AllocT>
-        class has_const_time_insert< BOOST_STD_EXTENSION_NAMESPACE::slist<T,AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true };
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            typedef mpl::bool_<has_const_time_insert<T>::value> type;
-        };
-
-    // const time erase trait
-        template<typename T, typename AllocT>
-        class has_const_time_erase< BOOST_STD_EXTENSION_NAMESPACE::slist<T,AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true };
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            typedef mpl::bool_<has_const_time_erase<T>::value> type;
-        };
-
-
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_STD_LIST_TRAITS_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/std/string_traits.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/std/string_traits.hpp
deleted file mode 100644
index c940830..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/std/string_traits.hpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//  Boost string_algo library string_traits.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_STD_STRING_TRAITS_HPP
-#define BOOST_STRING_STD_STRING_TRAITS_HPP
-
-#include <boost/algorithm/string/yes_no_type.hpp>
-#include <string>
-#include <boost/algorithm/string/sequence_traits.hpp>
-
-namespace boost {
-    namespace algorithm {
-
-//  std::basic_string<> traits  -----------------------------------------------//
-
-
-    // native replace trait
-        template<typename T, typename TraitsT, typename AllocT>
-        class has_native_replace< std::basic_string<T, TraitsT, AllocT> >
-        {
-        public:
-#if BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-            enum { value = true } ;
-#else
-            BOOST_STATIC_CONSTANT(bool, value=true);
-#endif // BOOST_WORKAROUND( __IBMCPP__, <= 600 )
-
-        typedef mpl::bool_<has_native_replace<T>::value> type;
-        };
-
-
-
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_LIST_TRAITS_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/std_containers_traits.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/std_containers_traits.hpp
deleted file mode 100644
index 3f02246..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/std_containers_traits.hpp
+++ /dev/null
@@ -1,26 +0,0 @@
-//  Boost string_algo library std_containers_traits.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_STD_CONTAINERS_TRAITS_HPP
-#define BOOST_STRING_STD_CONTAINERS_TRAITS_HPP
-
-/*!\file 
-    This file includes sequence traits for stl containers.
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/string/std/string_traits.hpp>
-#include <boost/algorithm/string/std/list_traits.hpp>
-
-#ifdef BOOST_HAS_SLIST
-#   include <boost/algorithm/string/std/slist_traits.hpp>
-#endif
-
-#endif  // BOOST_STRING_STD_CONTAINERS_TRAITS_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/trim.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/trim.hpp
deleted file mode 100644
index e740d57..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/trim.hpp
+++ /dev/null
@@ -1,398 +0,0 @@
-//  Boost string_algo library trim.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_TRIM_HPP
-#define BOOST_STRING_TRIM_HPP
-
-#include <boost/algorithm/string/config.hpp>
-
-#include <boost/range/begin.hpp>
-#include <boost/range/end.hpp>
-#include <boost/range/const_iterator.hpp>
-#include <boost/range/as_literal.hpp>
-#include <boost/range/iterator_range_core.hpp>
-
-#include <boost/algorithm/string/detail/trim.hpp>
-#include <boost/algorithm/string/classification.hpp>
-#include <locale>
-
-/*! \file
-    Defines trim algorithms.
-    Trim algorithms are used to remove trailing and leading spaces from a 
-    sequence (string). Space is recognized using given locales.
-
-    Parametric (\c _if) variants use a predicate (functor) to select which characters
-    are to be trimmed.. 
-    Functions take a selection predicate as a parameter, which is used to determine 
-    whether a character is a space. Common predicates are provided in classification.hpp header.
-
-*/
-
-namespace boost {
-    namespace algorithm {
-
-    //  left trim  -----------------------------------------------//
-
-
-        //! Left trim - parametric
-        /*!
-            Remove all leading spaces from the input. 
-            The supplied predicate is used to determine which characters are considered spaces.
-            The result is a trimmed copy of the input. It is returned as a sequence 
-            or copied to the output iterator
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input range
-            \param IsSpace A unary predicate identifying spaces
-            \return 
-                An output iterator pointing just after the last inserted character or
-                a copy of the input
-
-               \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<typename OutputIteratorT, typename RangeT, typename PredicateT>
-        inline OutputIteratorT trim_left_copy_if( 
-            OutputIteratorT Output,
-            const RangeT& Input,
-            PredicateT IsSpace)
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> lit_range(::boost::as_literal(Input));
-
-            std::copy( 
-                ::boost::algorithm::detail::trim_begin( 
-                    ::boost::begin(lit_range), 
-                    ::boost::end(lit_range), 
-                    IsSpace ),
-                ::boost::end(lit_range),
-                Output);
-
-            return Output;
-        }
-
-        //! Left trim - parametric
-        /*!
-            \overload
-        */
-        template<typename SequenceT, typename PredicateT>
-        inline SequenceT trim_left_copy_if(const SequenceT& Input, PredicateT IsSpace)
-        {
-            return SequenceT( 
-                ::boost::algorithm::detail::trim_begin( 
-                    ::boost::begin(Input), 
-                    ::boost::end(Input), 
-                    IsSpace ),
-                ::boost::end(Input));
-        }
-
-        //! Left trim - parametric
-        /*!
-            Remove all leading spaces from the input. 
-            The result is a trimmed copy of the input.
-
-            \param Input An input sequence
-            \param Loc a locale used for 'space' classification
-            \return A trimmed copy of the input
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename SequenceT>
-        inline SequenceT trim_left_copy(const SequenceT& Input, const std::locale& Loc=std::locale())
-        {
-            return            
-                ::boost::algorithm::trim_left_copy_if(
-                    Input, 
-                    is_space(Loc));
-        }
-
-        //! Left trim
-        /*!
-            Remove all leading spaces from the input. The supplied predicate is 
-            used to determine which characters are considered spaces.
-            The input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param IsSpace A unary predicate identifying spaces
-        */
-        template<typename SequenceT, typename PredicateT>
-        inline void trim_left_if(SequenceT& Input, PredicateT IsSpace)
-        {
-            Input.erase( 
-                ::boost::begin(Input),
-                ::boost::algorithm::detail::trim_begin( 
-                    ::boost::begin(Input), 
-                    ::boost::end(Input), 
-                    IsSpace));
-        }
-
-        //! Left trim
-        /*!
-            Remove all leading spaces from the input.
-            The Input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param Loc A locale used for 'space' classification
-        */
-        template<typename SequenceT>
-        inline void trim_left(SequenceT& Input, const std::locale& Loc=std::locale())
-        {
-            ::boost::algorithm::trim_left_if( 
-                Input, 
-                is_space(Loc));
-        }
-
-    //  right trim  -----------------------------------------------//
-
-        //! Right trim - parametric
-        /*!
-            Remove all trailing spaces from the input.             
-            The supplied predicate is used to determine which characters are considered spaces.
-            The result is a trimmed copy of the input. It is returned as a sequence 
-            or copied to the output iterator
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input range
-            \param IsSpace A unary predicate identifying spaces
-            \return 
-                An output iterator pointing just after the last inserted character or
-                a copy of the input
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<typename OutputIteratorT, typename RangeT, typename PredicateT>
-        inline OutputIteratorT trim_right_copy_if( 
-            OutputIteratorT Output,
-            const RangeT& Input,
-            PredicateT IsSpace )
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> lit_range(::boost::as_literal(Input));
-         
-            std::copy( 
-                ::boost::begin(lit_range),
-                ::boost::algorithm::detail::trim_end( 
-                    ::boost::begin(lit_range), 
-                    ::boost::end(lit_range), 
-                    IsSpace ),
-                Output );
-
-            return Output;
-        }
-
-        //! Right trim - parametric
-        /*!
-            \overload
-         */
-        template<typename SequenceT, typename PredicateT>
-        inline SequenceT trim_right_copy_if(const SequenceT& Input, PredicateT IsSpace)
-        {
-            return SequenceT( 
-                ::boost::begin(Input),
-                ::boost::algorithm::detail::trim_end( 
-                    ::boost::begin(Input), 
-                    ::boost::end(Input), 
-                    IsSpace)
-                );
-        }
-
-        //! Right trim
-        /*!
-            Remove all trailing spaces from the input. 
-            The result is a trimmed copy of the input
-
-            \param Input An input sequence
-            \param Loc A locale used for 'space' classification
-            \return A trimmed copy of the input
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename SequenceT>
-        inline SequenceT trim_right_copy(const SequenceT& Input, const std::locale& Loc=std::locale())
-        {
-            return 
-                ::boost::algorithm::trim_right_copy_if( 
-                    Input, 
-                    is_space(Loc));
-        }
-
-            
-        //! Right trim - parametric
-        /*!
-            Remove all trailing spaces from the input.
-            The supplied predicate is used to determine which characters are considered spaces.
-            The input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param IsSpace A unary predicate identifying spaces
-        */
-        template<typename SequenceT, typename PredicateT>
-        inline void trim_right_if(SequenceT& Input, PredicateT IsSpace)
-        {
-            Input.erase(
-                ::boost::algorithm::detail::trim_end( 
-                    ::boost::begin(Input), 
-                    ::boost::end(Input), 
-                    IsSpace ),
-                ::boost::end(Input)
-                );
-        }
-
-
-        //! Right trim
-        /*!
-            Remove all trailing spaces from the input. 
-            The input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param Loc A locale used for 'space' classification
-        */
-        template<typename SequenceT>
-        inline void trim_right(SequenceT& Input, const std::locale& Loc=std::locale())
-        {
-            ::boost::algorithm::trim_right_if(
-                Input, 
-                is_space(Loc) );
-        }
-
-    //  both side trim  -----------------------------------------------//
-
-        //! Trim - parametric
-        /*!
-            Remove all trailing and leading spaces from the input. 
-            The supplied predicate is used to determine which characters are considered spaces.
-            The result is a trimmed copy of the input. It is returned as a sequence 
-            or copied to the output iterator
-
-            \param Output An output iterator to which the result will be copied
-            \param Input An input range
-            \param IsSpace A unary predicate identifying spaces
-            \return 
-                An output iterator pointing just after the last inserted character or
-                a copy of the input
-
-             \note The second variant of this function provides the strong exception-safety guarantee
-        */
-        template<typename OutputIteratorT, typename RangeT, typename PredicateT>
-        inline OutputIteratorT trim_copy_if( 
-            OutputIteratorT Output,
-            const RangeT& Input,
-            PredicateT IsSpace)
-        {
-            iterator_range<BOOST_STRING_TYPENAME range_const_iterator<RangeT>::type> lit_range(::boost::as_literal(Input));
-
-            BOOST_STRING_TYPENAME 
-                range_const_iterator<RangeT>::type TrimEnd=
-                ::boost::algorithm::detail::trim_end( 
-                    ::boost::begin(lit_range), 
-                    ::boost::end(lit_range), 
-                    IsSpace);
-
-            std::copy( 
-                detail::trim_begin( 
-                    ::boost::begin(lit_range), TrimEnd, IsSpace),
-                TrimEnd,
-                Output
-                );
-
-            return Output;
-        }
-
-        //! Trim - parametric
-        /*!
-            \overload
-         */
-        template<typename SequenceT, typename PredicateT>
-        inline SequenceT trim_copy_if(const SequenceT& Input, PredicateT IsSpace)
-        {
-            BOOST_STRING_TYPENAME 
-                range_const_iterator<SequenceT>::type TrimEnd=
-                    ::boost::algorithm::detail::trim_end( 
-                        ::boost::begin(Input), 
-                        ::boost::end(Input), 
-                        IsSpace);
-
-            return SequenceT( 
-                detail::trim_begin( 
-                    ::boost::begin(Input), 
-                    TrimEnd, 
-                    IsSpace),
-                TrimEnd
-                );
-        }
-
-        //! Trim
-        /*!
-            Remove all leading and trailing spaces from the input. 
-            The result is a trimmed copy of the input
-
-            \param Input An input sequence
-            \param Loc A locale used for 'space' classification
-            \return A trimmed copy of the input
-
-            \note This function provides the strong exception-safety guarantee
-        */
-        template<typename SequenceT>
-        inline SequenceT trim_copy( const SequenceT& Input, const std::locale& Loc=std::locale() )
-        {
-            return
-                ::boost::algorithm::trim_copy_if(
-                    Input, 
-                    is_space(Loc) );
-        }
-     
-        //! Trim
-        /*!
-            Remove all leading and trailing spaces from the input. 
-            The supplied predicate is used to determine which characters are considered spaces.
-            The input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param IsSpace A unary predicate identifying spaces
-        */
-        template<typename SequenceT, typename PredicateT>
-        inline void trim_if(SequenceT& Input, PredicateT IsSpace)
-        {
-            ::boost::algorithm::trim_right_if( Input, IsSpace );
-            ::boost::algorithm::trim_left_if( Input, IsSpace );
-        }
-
-        //! Trim
-        /*!
-            Remove all leading and trailing spaces from the input. 
-            The input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param Loc A locale used for 'space' classification
-        */
-        template<typename SequenceT>
-        inline void trim(SequenceT& Input, const std::locale& Loc=std::locale())
-        {
-            ::boost::algorithm::trim_if(
-                Input, 
-                is_space( Loc ) );
-        }
-
-    } // namespace algorithm 
-
-    // pull names to the boost namespace
-    using algorithm::trim_left;
-    using algorithm::trim_left_if;
-    using algorithm::trim_left_copy;
-    using algorithm::trim_left_copy_if;
-    using algorithm::trim_right;
-    using algorithm::trim_right_if;
-    using algorithm::trim_right_copy;
-    using algorithm::trim_right_copy_if;
-    using algorithm::trim;
-    using algorithm::trim_if;
-    using algorithm::trim_copy;
-    using algorithm::trim_copy_if;
-
-} // namespace boost
-
-#endif  // BOOST_STRING_TRIM_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/trim_all.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/trim_all.hpp
deleted file mode 100644
index a616f7f..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/trim_all.hpp
+++ /dev/null
@@ -1,217 +0,0 @@
-//  Boost string_algo library trim.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_TRIM_ALL_HPP
-#define BOOST_STRING_TRIM_ALL_HPP
-
-#include <boost/algorithm/string/config.hpp>
-
-#include <boost/algorithm/string/trim.hpp>
-#include <boost/algorithm/string/classification.hpp>
-#include <boost/algorithm/string/find_format.hpp>
-#include <boost/algorithm/string/formatter.hpp>
-#include <boost/algorithm/string/finder.hpp>
-#include <locale>
-
-/*! \file
-    Defines trim_all algorithms.
-    
-    Just like \c trim, \c trim_all removes all trailing and leading spaces from a 
-    sequence (string). In addition, spaces in the middle of the sequence are truncated
-    to just one character. Space is recognized using given locales.
-
-    \c trim_fill acts as trim_all, but the spaces in the middle are replaces with 
-    a user-define sequence of character.
-
-    Parametric (\c _if) variants use a predicate (functor) to select which characters
-    are to be trimmed.. 
-    Functions take a selection predicate as a parameter, which is used to determine 
-    whether a character is a space. Common predicates are provided in classification.hpp header.
-
-*/
-
-namespace boost {
-    namespace algorithm {
-
-        // multi line trim  ----------------------------------------------- //
-
-        //! Trim All - parametric
-        /*!
-            Remove all leading and trailing spaces from the input and
-            compress all other spaces to a single character.
-            The result is a trimmed copy of the input
-
-            \param Input An input sequence
-             \param IsSpace A unary predicate identifying spaces
-            \return A trimmed copy of the input
-        */
-        template<typename SequenceT, typename PredicateT>
-        inline SequenceT trim_all_copy_if(const SequenceT& Input, PredicateT IsSpace)
-        {
-            return 
-                ::boost::find_format_all_copy(      
-                    ::boost::trim_copy_if(Input, IsSpace),
-                    ::boost::token_finder(IsSpace, ::boost::token_compress_on),
-                    ::boost::dissect_formatter(::boost::head_finder(1)));
-        }
-
-
-        //! Trim All
-        /*!
-            Remove all leading and trailing spaces from the input and
-            compress all other spaces to a single character.
-            The input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param IsSpace A unary predicate identifying spaces
-        */
-        template<typename SequenceT, typename PredicateT>
-        inline void trim_all_if(SequenceT& Input, PredicateT IsSpace)
-        {
-            ::boost::trim_if(Input, IsSpace);
-            ::boost::find_format_all(       
-                Input,          
-                ::boost::token_finder(IsSpace, ::boost::token_compress_on),
-                ::boost::dissect_formatter(::boost::head_finder(1)));
-        }
-
-
-        //! Trim All
-        /*!
-            Remove all leading and trailing spaces from the input and
-            compress all other spaces to a single character.
-            The result is a trimmed copy of the input
-
-            \param Input An input sequence
-            \param Loc A locale used for 'space' classification
-            \return A trimmed copy of the input
-        */
-        template<typename SequenceT>
-        inline SequenceT trim_all_copy(const SequenceT& Input, const std::locale& Loc =std::locale())
-        {
-            return trim_all_copy_if(Input, ::boost::is_space(Loc));
-        }
-
-
-        //! Trim All
-        /*!
-            Remove all leading and trailing spaces from the input and
-            compress all other spaces to a single character.
-            The input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param Loc A locale used for 'space' classification
-            \return A trimmed copy of the input
-        */
-        template<typename SequenceT>
-        inline void trim_all(SequenceT& Input, const std::locale& Loc =std::locale())
-        {
-            trim_all_if(Input, ::boost::is_space(Loc));
-        }
-
-
-        //! Trim Fill - parametric
-        /*!
-            Remove all leading and trailing spaces from the input and
-            replace all every block of consecutive spaces with a fill string
-            defined by user.
-            The result is a trimmed copy of the input
-
-            \param Input An input sequence
-            \param Fill A string used to fill the inner spaces
-            \param IsSpace A unary predicate identifying spaces
-            \return A trimmed copy of the input
-        */
-        template<typename SequenceT, typename RangeT, typename PredicateT>
-        inline SequenceT trim_fill_copy_if(const SequenceT& Input, const RangeT& Fill, PredicateT IsSpace)
-        {
-            return 
-                ::boost::find_format_all_copy(      
-                    ::boost::trim_copy_if(Input, IsSpace),
-                    ::boost::token_finder(IsSpace, ::boost::token_compress_on),
-                    ::boost::const_formatter(::boost::as_literal(Fill)));
-        }
-
-
-        //! Trim Fill
-        /*!
-            Remove all leading and trailing spaces from the input and
-            replace all every block of consecutive spaces with a fill string
-            defined by user.
-            The input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param Fill A string used to fill the inner spaces
-            \param IsSpace A unary predicate identifying spaces
-        */
-        template<typename SequenceT, typename RangeT, typename PredicateT>
-        inline void trim_fill_if(SequenceT& Input, const RangeT& Fill, PredicateT IsSpace)
-        {
-            ::boost::trim_if(Input, IsSpace);
-            ::boost::find_format_all(       
-                Input,          
-                ::boost::token_finder(IsSpace, ::boost::token_compress_on),
-                ::boost::const_formatter(::boost::as_literal(Fill)));
-        }
-
-
-        //! Trim Fill
-        /*!
-            Remove all leading and trailing spaces from the input and
-            replace all every block of consecutive spaces with a fill string
-            defined by user.
-            The result is a trimmed copy of the input
-
-            \param Input An input sequence
-            \param Fill A string used to fill the inner spaces
-            \param Loc A locale used for 'space' classification
-            \return A trimmed copy of the input
-        */
-        template<typename SequenceT, typename RangeT>
-        inline SequenceT trim_fill_copy(const SequenceT& Input, const RangeT& Fill, const std::locale& Loc =std::locale())
-        {
-            return trim_fill_copy_if(Input, Fill, ::boost::is_space(Loc));
-        }
-
-
-        //! Trim Fill
-        /*!
-            Remove all leading and trailing spaces from the input and
-            replace all every block of consecutive spaces with a fill string
-            defined by user.
-            The input sequence is modified in-place.
-
-            \param Input An input sequence
-            \param Fill A string used to fill the inner spaces
-            \param Loc A locale used for 'space' classification
-            \return A trimmed copy of the input
-        */
-        template<typename SequenceT, typename RangeT>
-        inline void trim_fill(SequenceT& Input, const RangeT& Fill, const std::locale& Loc =std::locale())
-        {
-            trim_fill_if(Input, Fill, ::boost::is_space(Loc));
-        }
-
-
-    } // namespace algorithm    
-
-    // pull names to the boost namespace
-    using algorithm::trim_all;
-    using algorithm::trim_all_if;
-    using algorithm::trim_all_copy;
-    using algorithm::trim_all_copy_if;
-    using algorithm::trim_fill;
-    using algorithm::trim_fill_if;
-    using algorithm::trim_fill_copy;
-    using algorithm::trim_fill_copy_if;
-
-} // namespace boost
-
-#endif  // BOOST_STRING_TRIM_ALL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string/yes_no_type.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string/yes_no_type.hpp
deleted file mode 100644
index b76cc6c..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string/yes_no_type.hpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//  Boost string_algo library yes_no_type.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_YES_NO_TYPE_DETAIL_HPP
-#define BOOST_STRING_YES_NO_TYPE_DETAIL_HPP
-
-namespace boost {
-    namespace algorithm {
-
-        // taken from boost mailing-list
-        // when yes_no_type will become officially
-        // a part of boost distribution, this header
-        // will be deprecated
-        template<int I> struct size_descriptor 
-        {
-            typedef char (& type)[I];
-        }; 
-
-        typedef size_descriptor<1>::type yes_type;
-        typedef size_descriptor<2>::type no_type;
-
-    } // namespace algorithm
-} // namespace boost
-
-
-#endif  // BOOST_STRING_YES_NO_TYPE_DETAIL_HPP
diff --git a/third_party/boostorg/algorithm/include/boost/algorithm/string_regex.hpp b/third_party/boostorg/algorithm/include/boost/algorithm/string_regex.hpp
deleted file mode 100644
index 791aa18..0000000
--- a/third_party/boostorg/algorithm/include/boost/algorithm/string_regex.hpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//  Boost string_algo library string_regex.hpp header file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2004.
-//
-// Distributed under the Boost Software License, Version 1.0.
-//    (See accompanying file LICENSE_1_0.txt or copy at
-//          http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org/ for updates, documentation, and revision history.
-
-#ifndef BOOST_STRING_ALGO_REGEX_HPP
-#define BOOST_STRING_ALGO_REGEX_HPP
-
-/*! \file
-    Cumulative include for string_algo library.
-    In addition to string.hpp contains also regex-related stuff.
-*/
-
-#include <boost/regex.hpp>
-#include <boost/algorithm/string.hpp>
-#include <boost/algorithm/string/regex.hpp>
-
-#endif  // BOOST_STRING_ALGO_REGEX_HPP
diff --git a/third_party/boostorg/algorithm/index.html b/third_party/boostorg/algorithm/index.html
deleted file mode 100644
index b0b01ed..0000000
--- a/third_party/boostorg/algorithm/index.html
+++ /dev/null
@@ -1,13 +0,0 @@
-<html>
-<head>
-<meta http-equiv="refresh" content="0; URL=doc/html/index.html">
-</head>
-<body>
-Automatic redirection failed, please go to
-<a href="doc/html/index.html">doc/html/index.html</a> &nbsp;<hr>
-<p>© Copyright Marshall Clow, 2012</p>
-<p>Distributed under the Boost Software License, Version 1.0. (See accompanying 
-file <a href="../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or copy 
-at <a href="http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</p>
-</body>
-</html>
\ No newline at end of file
diff --git a/third_party/boostorg/algorithm/meta/libraries.json b/third_party/boostorg/algorithm/meta/libraries.json
deleted file mode 100644
index 43304e2..0000000
--- a/third_party/boostorg/algorithm/meta/libraries.json
+++ /dev/null
@@ -1,47 +0,0 @@
-[
-    {
-        "key": "algorithm",
-        "name": "Algorithm",
-        "authors": [
-            "Marshall Clow"
-        ],
-        "description": "A collection of useful generic algorithms.",
-        "category": [
-            "Algorithms"
-        ],
-        "maintainers": [
-            "Marshall Clow <marshall -at- idio.com>"
-        ]
-    },
-    {
-        "key": "algorithm/minmax",
-        "name": "Min-Max",
-        "authors": [
-            "Hervé Brönnimann"
-        ],
-        "description": "Standard library extensions for simultaneous min/max and min/max element computations.",
-        "documentation": "minmax/",
-        "category": [
-            "Algorithms"
-        ],
-        "maintainers": [
-            "Marshall Clow <marshall -at- idio.com>"
-        ]
-    },
-    {
-        "key": "algorithm/string",
-        "name": "String Algo",
-        "authors": [
-            "Pavol Droba"
-        ],
-        "description": "String algorithms library.",
-        "documentation": "string/",
-        "category": [
-            "Algorithms",
-            "String"
-        ],
-        "maintainers": [
-            "Marshall Clow <marshall -at- idio.com>"
-        ]
-    }
-]
\ No newline at end of file
diff --git a/third_party/boostorg/algorithm/minmax/doc/minmax_benchs.html b/third_party/boostorg/algorithm/minmax/doc/minmax_benchs.html
deleted file mode 100644
index 1db516d..0000000
--- a/third_party/boostorg/algorithm/minmax/doc/minmax_benchs.html
+++ /dev/null
@@ -1,524 +0,0 @@
-<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
-<html>
-<head>
-   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-   <meta name="GENERATOR" content="Mozilla/4.77 [en] (X11; U; Linux 2.2.19 i686) [Netscape]">
-   <meta name="Author" content="Herve Bronnimann">
-   <meta name="Description" content="Small library to propose minmax_element algorithm.">
-   <title>Boost minmax library</title>
-</head>
-<body text="#000000" bgcolor="#FFFFFF" link="#0000EE" vlink="#551A8B" alink="#FF0000">
-
-<center>
-<h1>
-Minmax_element Performance</h1></center>
-
-<h3>
-<a NAME="Performance"></a><b>About performance</b></h3>
-Of course, there are many factors that affect the performance of an algorithm.
-The number of comparison is only one, but also branch prediction, pipelining,
-locality of reference (affects cache efficiency), etc.  In practice,
-we observe that when the iterator type is a pointer,
-<tt>boost::minmax_element</tt>
-is only a tad slower than
-<tt>std::min_element</tt>, and is even faster
-than
-<tt>boost::first_min_last_max_element</tt>! This is even more true
-for slower iterators (<tt>list&lt;>::iterator</tt> or
-<tt>map&lt;>iterator</tt>
-for instance). The following experiments were conducted on a Pentium III
-500 Mhz running Linux and compiled with g++, version 2.95.2, flags -O3.
-In the tables, we use different distributions: <i>Identical</i> means that
-all the elements are identical, <i>2-valued</i> means that we replace the
-second half of the identical elements by a distinct element, <i>increasing</i>
-means that all the elements are distinct and in increasing order, <i>decrea</i>sing
-is the reverse, and <i>random</i> is produced by random_shuffle.
-<br>
-The program that created these tables is included in the distribution,
-under <a href="../example/minmax_timer.cpp">minmax_timer.cpp</a>
-<br> 
-<center><table BORDER NOSAVE >
-<tr NOSAVE>
-<td NOSAVE><b>vector&lt;int>::iterator</b></td>
-
-<td>Identical</td>
-
-<td>2-valued</td>
-
-<td>Increasing</td>
-
-<td>Decreasing</td>
-
-<td>Random</td>
-</tr>
-
-<tr>
-<td>std::min_element</td>
-
-<td>23.26M/s</td>
-
-<td>23.26M/s</td>
-
-<td>23.15M/s</td>
-
-<td>22.94M/s</td>
-
-<td>22.94M/s</td>
-</tr>
-
-<tr>
-<td>std::max_element</td>
-
-<td>23.26M/s</td>
-
-<td>23.26M/s</td>
-
-<td>23.15M/s</td>
-
-<td>22.94M/s</td>
-
-<td>22.62M/s</td>
-</tr>
-
-<tr>
-<td>boost::first_min_element</td>
-
-<td>23.15M/s</td>
-
-<td>23.04M/s</td>
-
-<td>23.04M/s</td>
-
-<td>22.94M/s</td>
-
-<td>22.83M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_min_element</td>
-
-<td>23.26M/s</td>
-
-<td>23.26M/s</td>
-
-<td>23.26M/s</td>
-
-<td>22.83M/s</td>
-
-<td>16.23M/s</td>
-</tr>
-
-<tr>
-<td>boost::first_max_element</td>
-
-<td>23.15M/s</td>
-
-<td>23.26M/s</td>
-
-<td>23.15M/s</td>
-
-<td>23.04M/s</td>
-
-<td>22.93M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_max_element</td>
-
-<td>23.26M/s</td>
-
-<td>23.15M/s</td>
-
-<td>23.15M/s</td>
-
-<td>22.94M/s</td>
-
-<td>16.18M/s</td>
-</tr>
-
-<tr>
-<td>boost::minmax_element</td>
-
-<td>21.83M/s</td>
-
-<td>21.83M/s</td>
-
-<td>21.83M/s</td>
-
-<td>21.55M/s</td>
-
-<td>17.79M/s</td>
-</tr>
-
-<tr>
-<td>boost::first_min_last_max_element</td>
-
-<td>18.52M/s</td>
-
-<td>18.38M/s</td>
-
-<td>18.38M/s</td>
-
-<td>18.94M/s</td>
-
-<td>16.29M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_min_first_max_element</td>
-
-<td>20.08M/s</td>
-
-<td>20.83M/s</td>
-
-<td>20.75M/s</td>
-
-<td>19.76M/s</td>
-
-<td>15.87M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_min_last_max_element</td>
-
-<td>18.66M/s</td>
-
-<td>19.69M/s</td>
-
-<td>19.69M/s</td>
-
-<td>19.23M/s</td>
-
-<td>15.77M/s</td>
-</tr>
-
-<caption ALIGN=BOTTOM>Number of elements per second for standard vector
-container iterators</caption>
-</table></center>
-
-<center><table BORDER NOSAVE >
-<tr NOSAVE>
-<td NOSAVE><b>list&lt;int>::iterator</b></td>
-
-<td>Identical</td>
-
-<td>2-valued</td>
-
-<td>Increasing</td>
-
-<td>Decreasing</td>
-
-<td>Random</td>
-</tr>
-
-<tr>
-<td>std::min_element</td>
-
-<td>5.8M/s</td>
-
-<td>5.8M/s</td>
-
-<td>5.80M/s</td>
-
-<td>5.73M/s</td>
-
-<td>5.73M/s</td>
-</tr>
-
-<tr>
-<td>std::max_element</td>
-
-<td>5.81M/s</td>
-
-<td>5.81M/s</td>
-
-<td>5.78M/s</td>
-
-<td>5.73M/s</td>
-
-<td>5.75M/s</td>
-</tr>
-
-<tr>
-<td>boost::first_min_element</td>
-
-<td>5.81M/s</td>
-
-<td>5.81M/s</td>
-
-<td>5.79M/s</td>
-
-<td>5.75M/s</td>
-
-<td>5.73M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_min_element</td>
-
-<td>5.81M/s</td>
-
-<td>5.80M/s</td>
-
-<td>5.79M/s</td>
-
-<td>5.73M/s</td>
-
-<td>5.03M/s</td>
-</tr>
-
-<tr>
-<td>boost::first_max_element</td>
-
-<td>5.81M/s</td>
-
-<td>5.80M/s</td>
-
-<td>5.78M/s</td>
-
-<td>5.74M/s</td>
-
-<td>5.73M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_max_element</td>
-
-<td>5.81M/s</td>
-
-<td>5.80M/s</td>
-
-<td>5.79M/s</td>
-
-<td>5.73M/s</td>
-
-<td>5.07M/s</td>
-</tr>
-
-<tr>
-<td>boost::minmax_element</td>
-
-<td>5.68M/s</td>
-
-<td>5.80M/s</td>
-
-<td>5.66M/s</td>
-
-<td>5.74M/s</td>
-
-<td>5.30M/s</td>
-</tr>
-
-<tr>
-<td>boost::first_min_last_max_element</td>
-
-<td>5.79M/s</td>
-
-<td>5.81M/s</td>
-
-<td>5.78M/s</td>
-
-<td>5.73M/s</td>
-
-<td>5.04M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_min_first_max_element</td>
-
-<td>5.69M/s</td>
-
-<td>5.79M/s</td>
-
-<td>5.69M/s</td>
-
-<td>5.73M/s</td>
-
-<td>4.84M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_min_last_max_element</td>
-
-<td>5.61M/s</td>
-
-<td>5.79M/s</td>
-
-<td>5.64M/s</td>
-
-<td>5.74M/s</td>
-
-<td>4.75M/s</td>
-</tr>
-
-<caption ALIGN=BOTTOM>Runtimes for standard list container iterators</caption>
-</table></center>
-
-<center><table BORDER NOSAVE >
-<tr NOSAVE>
-<td NOSAVE><b>multiset&lt;int>::iterator</b></td>
-
-<td>Identical</td>
-
-<td>2-valued</td>
-
-<td>Increasing</td>
-
-<td>Decreasing</td>
-
-<td>Random</td>
-</tr>
-
-<tr>
-<td>std::min_element</td>
-
-<td>4.03M/s</td>
-
-<td>4.04M/s</td>
-
-<td>4.02M/s</td>
-
-<td>4.04M/s</td>
-
-<td>2.97M/s</td>
-</tr>
-
-<tr>
-<td>std::max_element3.007M</td>
-
-<td>4.02M/s</td>
-
-<td>4.02M/s</td>
-
-<td>4.01M/s</td>
-
-<td>4.02M/s</td>
-
-<td>2.96M/s</td>
-</tr>
-
-<tr>
-<td>boost::first_min_element</td>
-
-<td>4.01M/s</td>
-
-<td>4.04M/s</td>
-
-<td>4.03M/s</td>
-
-<td>4.04M/s</td>
-
-<td>3.01M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_min_element</td>
-
-<td>4.03M/s</td>
-
-<td>4.04M/s</td>
-
-<td>4.04M/s</td>
-
-<td>4.04M/s</td>
-
-<td>3.00M/s</td>
-</tr>
-
-<tr>
-<td>boost::first_max_element</td>
-
-<td>4.04M/s</td>
-
-<td>4.04M/s</td>
-
-<td>4.04M/s</td>
-
-<td>4.06M/s</td>
-
-<td>3.01M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_max_element</td>
-
-<td>4.04M/s</td>
-
-<td>4.04M/s</td>
-
-<td>4.03M/s</td>
-
-<td>4.04M/s</td>
-
-<td>3.00M/s</td>
-</tr>
-
-<tr>
-<td>boost::minmax_element</td>
-
-<td>3.98M/s</td>
-
-<td>3.99M/s</td>
-
-<td>3.98M/s</td>
-
-<td>3.99M/s</td>
-
-<td>3.00M/s</td>
-</tr>
-
-<tr>
-<td>boost::first_min_last_max_element</td>
-
-<td>3.99M/s</td>
-
-<td>3.98M/s</td>
-
-<td>3.97M/s</td>
-
-<td>3.99M/s</td>
-
-<td>2.99M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_min_first_max_element</td>
-
-<td>3.97M/s</td>
-
-<td>3.98M/s</td>
-
-<td>3.96M/s</td>
-
-<td>3.98M/s</td>
-
-<td>3.00M/s</td>
-</tr>
-
-<tr>
-<td>boost::last_min_last_max_element</td>
-
-<td>4.00M/s</td>
-
-<td>4.00M/s</td>
-
-<td>4.00M/s</td>
-
-<td>4.02M/s</td>
-
-<td>2.97M/s</td>
-</tr>
-
-<caption ALIGN=BOTTOM>Runtimes for standard set/multiset container iterators</caption>
-</table></center>
-
-<hr SIZE="6">
-<br>Last modified 2004-06-28
-<p><font face="Arial,Helvetica"><font size=-1>&copy; Copyright Herv&eacute;
-Br&ouml;nnimann, Polytechnic University, 2002--2004. 
-Use, modification, and distribution is subject to the Boost Software
-License, Version 1.0. (See accompanying file <a href="../../../../LICENSE_1_0.txt">License_1_0.txt</a> or copy at
-<a href="http://www.boost.org/LICENSE_1_0.txt">http://www.boost.org/LICENSE_1_0.txt</a>)
-</font></font>
-</body>
-</html>
diff --git a/third_party/boostorg/algorithm/minmax/doc/minmax_synopsis.html b/third_party/boostorg/algorithm/minmax/doc/minmax_synopsis.html
deleted file mode 100644
index 1651a13..0000000
--- a/third_party/boostorg/algorithm/minmax/doc/minmax_synopsis.html
+++ /dev/null
@@ -1,127 +0,0 @@
-<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
-<html>
-<head>
-   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-   <meta name="GENERATOR" content="Mozilla/4.77 [en] (X11; U; Linux 2.2.19 i686) [Netscape]">
-   <meta name="Author" content="Herve Bronnimann">
-   <meta name="Description" content="Small library to propose minmax_element algorithm.">
-   <title>Boost minmax library synopsis</title>
-</head>
-<body text="#000000" bgcolor="#FFFFFF" link="#0000EE" vlink="#551A8B" alink="#FF0000">
-
-<center>
-<h1>
-Minmax_element complete synopsis</h1></center>
-
-<h3>
-Synopsis of <tt>&lt;boost/algorithm/minmax.hpp></tt></h3>
-
-<pre>#include &lt;boost/tuple/tuple.hpp>
-
-namespace boost {
-
-  template &lt;class T>
-  tuple&lt;T const&amp;, T const&amp;> >
-  minmax(const T&amp; a, const T&amp; b);
-
-  template &lt;class T, class <a href="http://www.sgi.com/tech/stl/  BinaryPredicate.html">BinaryPredicate</a>>
-  tuple&lt;T const&amp;, T const&amp;> >
-  minmax(const T&amp; a, const T&amp; b, BinaryPredicate comp);
-
-}
-</pre>
-
-<h3>
-Synopsis of <tt>&lt;boost/algorithm/minmax_element.hpp></tt></h3>
-
-<pre>#include &lt;utility> //for std::pair
-
-namespace boost {
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  minmax_element(ForwardIterator first, ForwardIterator last);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  minmax_element(ForwardIterator first, ForwardIterator last,
-                 BinaryPredicate comp);
-
-  // Variants
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>>
-  ForwardIterator first_min_element(ForwardIterator first, ForwardIterator last);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  ForwardIterator first_min_element(ForwardIterator first, ForwardIterator last,
-                                    BinaryPredicate comp);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>>
-  ForwardIterator last_min_element(ForwardIterator first, ForwardIterator last);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  ForwardIterator last_min_element(ForwardIterator first, ForwardIterator last,
-                                   BinaryPredicate comp);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>>
-  ForwardIterator first_max_element(ForwardIterator first, ForwardIterator last);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  ForwardIterator first_max_element(ForwardIterator first, ForwardIterator last,
-                                    BinaryPredicate comp);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>>
-  ForwardIterator last_max_element(ForwardIterator first, ForwardIterator last);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  ForwardIterator last_max_element(ForwardIterator first, ForwardIterator last,
-                                   BinaryPredicate comp);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  first_min_first_max_element(ForwardIterator first, ForwardIterator last);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  first_min_first_max_element(ForwardIterator first, ForwardIterator last,
-                             BinaryPredicate comp);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  first_min_last_max_element(ForwardIterator first, ForwardIterator last);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  first_min_last_max_element(ForwardIterator first, ForwardIterator last,
-                             BinaryPredicate comp);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  last_min_first_max_element(ForwardIterator first, ForwardIterator last);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  last_min_first_max_element(ForwardIterator first, ForwardIterator last,
-                             BinaryPredicate comp);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  last_min_last_max_element(ForwardIterator first, ForwardIterator last);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  last_min_last_max_element(ForwardIterator first, ForwardIterator last,
-                            BinaryPredicate comp);
-
-}</pre>
-
-<hr SIZE="6">
-<br>Last modified 2002-07-01
-<p><font face="Arial,Helvetica"><font size=-1>&copy; Copyright Herv&eacute;
-Br&ouml;nnimann, Polytechnic University, 2002--2004. 
-Use, modification, and distribution is subject to the Boost Software
-License, Version 1.0. (See accompanying file <a href="../../../../LICENSE_1_0.txt">License_1_0.txt</a> or copy at
-<a href="http://www.boost.org/LICENSE_1_0.txt">http://www.boost.org/LICENSE_1_0.txt</a>)
-</font></font>
-</body>
-</html>
diff --git a/third_party/boostorg/algorithm/minmax/example/Jamfile b/third_party/boostorg/algorithm/minmax/example/Jamfile
deleted file mode 100644
index d8650e0..0000000
--- a/third_party/boostorg/algorithm/minmax/example/Jamfile
+++ /dev/null
@@ -1,12 +0,0 @@
-#  Boost.Minmax Library Example Jamfile
-#
-#  Copyright (C) 2002--2004, Herve Bronnimann
-#
-# Use, modification, and distribution is subject to the Boost Software
-# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-# http://www.boost.org/LICENSE_1_0.txt)
-#
-
-exe minmax_ex : minmax_ex.cpp ;
-exe minmax_timer : minmax_timer.cpp ;
-
diff --git a/third_party/boostorg/algorithm/minmax/example/minmax_ex.cpp b/third_party/boostorg/algorithm/minmax/example/minmax_ex.cpp
deleted file mode 100644
index d77820b..0000000
--- a/third_party/boostorg/algorithm/minmax/example/minmax_ex.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-//  (C) Copyright Herve Bronnimann 2004.
-//  Use, modification and distribution are subject to the
-//  Boost Software License, Version 1.0. (See accompanying file
-//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-#include <list>
-#include <algorithm>
-#include <cstdlib>
-#include <cassert>
-#include <iostream>
-#include <iterator>
-
-#include <boost/algorithm/minmax.hpp>
-#include <boost/algorithm/minmax_element.hpp>
-
-int main()
-{
-  using namespace std;
-
-  // Demonstrating minmax()
-  boost::tuple<int const&, int const&> result1 = boost::minmax(1, 0);
-  assert( result1.get<0>() == 0 );
-  assert( result1.get<1>() == 1 );
-
-
-  // Demonstrating minmax_element()
-  list<int> L;
-  typedef list<int>::const_iterator iterator;
-  generate_n(front_inserter(L), 1000, rand);
-  pair< iterator, iterator > result2 = boost::minmax_element(L.begin(), L.end());
-
-  cout << "The smallest element is " << *(result2.first) << endl;
-  cout << "The largest element is  " << *(result2.second) << endl;
-
-  assert( result2.first  == std::min_element(L.begin(), L.end()) );
-  assert( result2.second == std::max_element(L.begin(), L.end()) );
-}
diff --git a/third_party/boostorg/algorithm/minmax/example/minmax_timer.cpp b/third_party/boostorg/algorithm/minmax/example/minmax_timer.cpp
deleted file mode 100644
index 0ab51a8..0000000
--- a/third_party/boostorg/algorithm/minmax/example/minmax_timer.cpp
+++ /dev/null
@@ -1,212 +0,0 @@
-//  (C) Copyright Herve Bronnimann 2004.
-//  Use, modification and distribution are subject to the
-//  Boost Software License, Version 1.0. (See accompanying file
-//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-#include <utility>
-#include <functional>
-#include <algorithm>
-#include <numeric>
-#include <iterator>
-#include <vector>
-#include <list>
-#include <set>
-#include <iostream>
-// What's the proper BOOST_ flag for <iomanip.h> vs <ios>
-#include <iomanip>
-
-#include <boost/timer.hpp>
-#include <boost/algorithm/minmax.hpp>
-
-template <class T1, class T2>
-void tie(std::pair<T1, T2> p, T1& min, T2& max)
-{
-  min = p.first; max = p.second;
-}
-
-template <class Value>
-struct less_count : std::less<Value> {
-  less_count(less_count<Value> const& lc) : _M_counter(lc._M_counter) {}
-  less_count(int& counter) : _M_counter(counter) {}
-  bool operator()(Value const& a, Value const& b) const {
-    ++_M_counter;
-    return std::less<Value>::operator()(a,b);
-  }
-  void reset() {
-    _M_counter = 0;
-  }
-private:
-  int& _M_counter;
-};
-
-inline int opt_min_count(int n) {
-  return (n==0) ? 0 : n-1;
-}
-inline int opt_minmax_count(int n) {
-  if (n < 2) return 0;
-  if (n == 2) return 1;
-  return (n%2 == 0) ? 3*(n/2)-1 : 3*(n/2)+1;
-}
-inline int opt_boost_minmax_count(int n) {
-  if (n < 2) return 0;
-  if (n == 2) return 1;
-  return (n%2 == 0) ? 3*(n/2)-2 : 3*(n/2);
-}
-
-int repeats = 10;
-
-#define TIMER( n, cmd , cmdname ) \
-  t.restart(); \
-  for (int i=0; i<repeats; ++i) { cmd ; } \
-  std::cout << "    " << std::setprecision(4) \
-            << (double)n*repeats/t.elapsed()/1.0E6 \
-            << "M items/sec  " << cmdname << "\n"
-
-#define CTIMER( n, cmd , cmdname, count, opt ) \
-  t.restart(); lc.reset(); \
-  for (int i=0; i<repeats; ++i) { cmd ; } \
-  std::cout << "    " << std::setprecision(4) \
-            << (double)n*repeats/t.elapsed()/1.0E6 \
-            << "M items/sec  " << cmdname \
-            << " ("<< (count)/repeats << " vs " << opt << ")\n"
-
-template <class CIterator>
-void test_minmax_element(CIterator first, CIterator last, int n, char* name)
-{
-  typedef typename std::iterator_traits<CIterator>::value_type vtype;
-  boost::timer t;
-
-  std::cout << "  ON " << name << " WITH OPERATOR<()\n";
-  TIMER( n, std::min_element(first, last),
-    "std::min_element" << name << "");
-  TIMER( n, std::max_element(first, last),
-    "std::max_element" << name << "");
-  TIMER( n, boost::first_min_element(first, last),
-    "boost::first_min_element" << name << "");
-  TIMER( n, boost::last_min_element(first, last),
-    "boost::last_min_element" << name << " ");
-  TIMER( n, boost::first_max_element(first, last),
-    "boost::first_max_element" << name << "");
-  TIMER( n, boost::last_max_element(first, last),
-    "boost::last_max_element" << name << " ");
-  TIMER( n, boost::minmax_element(first, last),
-    "boost::minmax_element" << name << "    ");
-  TIMER( n, boost::first_min_last_max_element(first, last),
-    "boost::first_min_last_max_element" << name << "");
-  TIMER( n, boost::last_min_first_max_element(first, last),
-    "boost::last_min_first_max_element" << name << "");
-  TIMER( n, boost::last_min_last_max_element(first, last),
-    "boost::last_min_last_max_element" << name << " ");
-
-  #define pred std::bind2nd( std::greater<vtype>(), vtype(10) )
-  TIMER( n, boost::min_element_if(first, last, pred),
-    "boost::min_element_if" << name << "");
-  TIMER( n, boost::max_element_if(first, last, pred),
-    "boost::max_element_if" << name << "");
-  TIMER( n, std::min_element(boost::make_filter_iterator(first, last, pred),
-                             boost::make_filter_iterator(last, last, pred)),
-    "std::min_element_with_filter_iterator" << name << "");
-  TIMER( n, std::max_element(boost::make_filter_iterator(first, last, pred),
-                             boost::make_filter_iterator(last, last, pred)),
-    "std::max_element_if_with_filter_iterator" << name << "");
-  #undef pred
-
-  int counter = 0;
-  less_count<vtype> lc(counter);
-  std::cout << "  ON " << name << " WITH LESS<> AND COUNTING COMPARISONS\n";
-  CTIMER( n, std::min_element(first, last, lc),
-    "std::min_element" << name << "        ",
-    counter, opt_min_count(n) );
-  CTIMER( n, std::max_element(first, last, lc),
-    "std::max_element" << name << "        ",
-    counter, opt_min_count(n) );
-  CTIMER( n, boost::first_min_element(first, last, lc),
-    "boost::first_min_element" << name << "",
-    counter, opt_min_count(n) );
-  CTIMER( n, boost::last_min_element(first, last, lc),
-    "boost::last_max_element" << name << " ",
-    counter, opt_min_count(n) );
-  CTIMER( n, boost::first_max_element(first, last, lc),
-    "boost::first_min_element" << name << "",
-    counter, opt_min_count(n) );
-  CTIMER( n, boost::last_max_element(first, last, lc),
-    "boost::last_max_element" << name << " ",
-    counter, opt_min_count(n) );
-  CTIMER( n, boost::minmax_element(first, last, lc),
-    "boost::minmax_element" << name << "   ",
-    counter, opt_minmax_count(n) );
-  CTIMER( n, boost::first_min_last_max_element(first, last, lc),
-    "boost::first_min_last_max_element" << name << "",
-    counter, opt_boost_minmax_count(n) );
-  CTIMER( n, boost::last_min_first_max_element(first, last, lc),
-    "boost::last_min_first_max_element" << name << "",
-    counter, opt_boost_minmax_count(n) );
-  CTIMER( n, boost::last_min_last_max_element(first, last, lc),
-    "boost::last_min_last_max_element" << name << " ",
-    counter, opt_minmax_count(n) );
-}
-
-template <class Container, class Iterator, class Value>
-void test_container(Iterator first, Iterator last, int n, char* name)
-{
-  Container c(first, last);
-  typename Container::iterator fit(c.begin()), lit(c.end());
-  test_minmax_element(fit, lit, n, name);
-}
-
-template <class Iterator>
-void test_range(Iterator first, Iterator last, int n)
-{
-  typedef typename std::iterator_traits<Iterator>::value_type Value;
-  // Test various containers with these values
-  test_container< std::vector<Value>, Iterator, Value >(first, last, n, "<vector>");
-#ifndef ONLY_VECTOR
-  test_container< std::list<Value>,   Iterator, Value >(first, last, n, "<list>  ");
-  test_container< std::multiset<Value>,    Iterator, Value >(first, last, n, "<set>   ");
-#endif
-}
-
-template <class Value>
-void test(int n)
-{
-  // Populate test vector with identical values
-  std::cout << "IDENTICAL VALUES...   \n";
-  std::vector<Value> test_vector(n, Value(1));
-  typename std::vector<Value>::iterator first( test_vector.begin() );
-  typename std::vector<Value>::iterator last( test_vector.end() );
-  test_range(first, last, n);
-
-  // Populate test vector with two values
-  std::cout << "TWO DISTINCT VALUES...\n";
-  typename std::vector<Value>::iterator middle( first + n/2 );
-  std::fill(middle, last, Value(2));
-  test_range(first, last, n);
-
-  // Populate test vector with increasing values
-  std::cout << "INCREASING VALUES...  \n";
-  std::fill(first, last, Value(1));
-  std::accumulate(first, last, Value(0));
-  test_range(first, last, n);
-
-  // Populate test vector with decreasing values
-  std::cout << "DECREASING VALUES...  \n";
-  std::reverse(first, last);
-  test_range(first, last, n);
-
-  // Populate test vector with random values
-  std::cout << "RANDOM VALUES...      \n";
-  std::random_shuffle(first, last);
-  test_range(first, last, n);
-}
-
-int
-main(char argc, char** argv)
-{
-  int n = 100;
-  if (argc > 1) n = atoi(argv[1]);
-  if (argc > 2) repeats = atoi(argv[2]);
-
-  test<int>(n);
-
-  return 0;
-}
diff --git a/third_party/boostorg/algorithm/minmax/fuzzing/minmax_element.fuzz.cpp b/third_party/boostorg/algorithm/minmax/fuzzing/minmax_element.fuzz.cpp
deleted file mode 100644
index 63b6a9b..0000000
--- a/third_party/boostorg/algorithm/minmax/fuzzing/minmax_element.fuzz.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//  (C) Copyright Marshall Clow 2018
-//  Use, modification and distribution are subject to the
-//  Boost Software License, Version 1.0. (See accompanying file
-//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-#include <iterator> // for std::distance
-#include <cassert>  // for assert
-
-#include <boost/algorithm/minmax_element.hpp>
-#include <boost/algorithm/cxx11/none_of.hpp>
-
-//	Fuzzing tests for:
-//
-//		template <class ForwardIterator>
-//		std::pair<ForwardIterator,ForwardIterator>
-//		minmax_element(ForwardIterator first, ForwardIterator last);
-//
-//		template <class ForwardIterator, class BinaryPredicate>
-//		std::pair<ForwardIterator,ForwardIterator>
-//		minmax_element(ForwardIterator first, ForwardIterator last,
-//	               		BinaryPredicate comp);
-
-
-bool greater(uint8_t lhs, uint8_t rhs) { return lhs > rhs; }
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t sz) {
-	typedef std::pair<const uint8_t *, const uint8_t *> result_t;
-	if (sz == 0) return 0; // we need at least one element
-	
-	{
-//	Find the min and max
-	result_t result = boost::minmax_element(data, data + sz);
-
-//	The iterators have to be in the sequence - and not at the end!
-	assert(std::distance(data, result.first)  < sz);
-	assert(std::distance(data, result.second) < sz);
-	
-//	the minimum element can't be bigger than the max element
-	uint8_t min_value = *result.first;
-	uint8_t max_value = *result.second;
-	
-	assert(min_value <= max_value);
-
-//	None of the elements in the sequence can be less than the min, nor greater than the max
-	for (size_t i = 0; i < sz; ++i) {
-		assert(min_value <= data[i]);
-		assert(data[i] <= max_value);
-		}
-
-//	We returned the first min element, and the first max element
-	assert(boost::algorithm::none_of_equal(data, result.first,  min_value));
-	assert(boost::algorithm::none_of_equal(data, result.second, max_value));
-	}
-	
-	{
-//	Find the min and max
-	result_t result = boost::minmax_element(data, data + sz, greater);
-
-//	The iterators have to be in the sequence - and not at the end!
-	assert(std::distance(data, result.first)  < sz);
-	assert(std::distance(data, result.second) < sz);
-
-//	the minimum element can't be bigger than the max element
-	uint8_t min_value = *result.first;
-	uint8_t max_value = *result.second;
-	
-	assert (!greater(max_value, min_value));
-
-//	None of the elements in the sequence can be less than the min, nor greater than the max
-	for (size_t i = 0; i < sz; ++i) {
-		assert(!greater(data[i], min_value));
-		assert(!greater(max_value, data[i]));
-		}
-
-//	We returned the first min element, and the first max element
-	assert(boost::algorithm::none_of_equal(data, result.first,  min_value));
-	assert(boost::algorithm::none_of_equal(data, result.second, max_value));
-	}
-
-  return 0;
-}
diff --git a/third_party/boostorg/algorithm/minmax/fuzzing/minmax_element_variants.fuzz.cpp b/third_party/boostorg/algorithm/minmax/fuzzing/minmax_element_variants.fuzz.cpp
deleted file mode 100644
index ba517e2..0000000
--- a/third_party/boostorg/algorithm/minmax/fuzzing/minmax_element_variants.fuzz.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-//  (C) Copyright Marshall Clow 2018
-//  Use, modification and distribution are subject to the
-//  Boost Software License, Version 1.0. (See accompanying file
-//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-#include <iterator> // for std::distance
-#include <cassert>  // for assert
-
-#include <boost/algorithm/minmax_element.hpp>
-#include <boost/algorithm/cxx11/none_of.hpp>
-
-//	Fuzzing tests for:
-//
-//		template <class ForwardIterator>
-//		std::pair<ForwardIterator,ForwardIterator>
-//		first_min_first_max_element(ForwardIterator first, ForwardIterator last);
-//
-//		template <class ForwardIterator, class BinaryPredicate>
-//		std::pair<ForwardIterator,ForwardIterator>
-//		first_min_first_max_element(ForwardIterator first, ForwardIterator last,
-//	               		BinaryPredicate comp);
-//
-//	identical signatures for:
-//		first_min_last_max_element
-//		last_min_first_max_element
-//		last_min_last_max_element
-
-bool greater(uint8_t lhs, uint8_t rhs) { return lhs > rhs; }
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t sz) {
-	typedef std::pair<const uint8_t *, const uint8_t *> result_t;
-	const uint8_t * const dend = data + sz;
-	if (sz == 0) return 0; // we need at least one element
-	
-	{
-//	Find the min and max
-	result_t resultff = boost::first_min_first_max_element(data, dend);
-	result_t resultfl = boost::first_min_last_max_element (data, dend);
-	result_t resultlf = boost::last_min_first_max_element (data, dend);
-	result_t resultll = boost::last_min_last_max_element  (data, dend);
-
-//	The iterators have to be in the sequence - and not at the end!
-	assert(std::distance(data, resultff.first)  < sz);
-	assert(std::distance(data, resultff.second) < sz);
-	assert(std::distance(data, resultfl.first)  < sz);
-	assert(std::distance(data, resultfl.second) < sz);
-	assert(std::distance(data, resultlf.first)  < sz);
-	assert(std::distance(data, resultlf.second) < sz);
-	assert(std::distance(data, resultll.first)  < sz);
-	assert(std::distance(data, resultll.second) < sz);
-	
-//	the minimum element can't be bigger than the max element
-
-//	Did we find the same min value and max value?
-	uint8_t min_value = *resultff.first;
-	uint8_t max_value = *resultff.second;
-	assert(min_value <= max_value);
-
-//	Each variant should have found the same min/max values
-	assert(*resultff.first  == min_value);
-	assert(*resultfl.first  == min_value);
-	assert(*resultlf.first  == min_value);
-	assert(*resultll.first  == min_value);
-
-	assert(*resultff.second == max_value);
-	assert(*resultfl.second == max_value);
-	assert(*resultlf.second == max_value);
-	assert(*resultll.second == max_value);
-
-//	None of the elements in the sequence can be less than the min, nor greater than the max
-	for (size_t i = 0; i < sz; ++i) {
-		assert(min_value <= data[i]);
-		assert(data[i] <= max_value);
-		}
-
-//	Make sure we returned the "right" first and last element
-	assert(boost::algorithm::none_of_equal(data, resultff.first,     min_value));
-	assert(boost::algorithm::none_of_equal(data, resultfl.first,     min_value));
-	assert(boost::algorithm::none_of_equal(resultlf.first + 1, dend, min_value));
-	assert(boost::algorithm::none_of_equal(resultll.first + 1, dend, min_value));
-
-	assert(boost::algorithm::none_of_equal(data, resultff.second,     max_value));
-	assert(boost::algorithm::none_of_equal(resultfl.second + 1, dend, max_value));
-	assert(boost::algorithm::none_of_equal(data, resultlf.second,     max_value));
-	assert(boost::algorithm::none_of_equal(resultll.second + 1, dend, max_value));
-	}
-	
-	{
-//	Find the min and max
-	result_t resultff = boost::first_min_first_max_element(data, dend, greater);
-	result_t resultfl = boost::first_min_last_max_element (data, dend, greater);
-	result_t resultlf = boost::last_min_first_max_element (data, dend, greater);
-	result_t resultll = boost::last_min_last_max_element  (data, dend, greater);
-
-//	The iterators have to be in the sequence - and not at the end!
-	assert(std::distance(data, resultff.first)  < sz);
-	assert(std::distance(data, resultff.second) < sz);
-	assert(std::distance(data, resultfl.first)  < sz);
-	assert(std::distance(data, resultfl.second) < sz);
-	assert(std::distance(data, resultlf.first)  < sz);
-	assert(std::distance(data, resultlf.second) < sz);
-	assert(std::distance(data, resultll.first)  < sz);
-	assert(std::distance(data, resultll.second) < sz);
-
-//	the minimum element can't be bigger than the max element
-	uint8_t min_value = *resultff.first;
-	uint8_t max_value = *resultff.second;
-	
-	assert (!greater(max_value, min_value));
-
-//	Each variant should have found the same min/max values
-	assert(*resultff.first  == min_value);
-	assert(*resultfl.first  == min_value);
-	assert(*resultlf.first  == min_value);
-	assert(*resultll.first  == min_value);
-
-	assert(*resultff.second == max_value);
-	assert(*resultfl.second == max_value);
-	assert(*resultlf.second == max_value);
-	assert(*resultll.second == max_value);
-
-//	None of the elements in the sequence can be less than the min, nor greater than the max
-	for (size_t i = 0; i < sz; ++i) {
-		assert(!greater(data[i], min_value));
-		assert(!greater(max_value, data[i]));
-		}
-
-//	We returned the first min element, and the first max element
-	assert(boost::algorithm::none_of_equal(data, resultff.first,     min_value));
-	assert(boost::algorithm::none_of_equal(data, resultfl.first,     min_value));
-	assert(boost::algorithm::none_of_equal(resultlf.first + 1, dend, min_value));
-	assert(boost::algorithm::none_of_equal(resultll.first + 1, dend, min_value));
-
-	assert(boost::algorithm::none_of_equal(data, resultff.second,     max_value));
-	assert(boost::algorithm::none_of_equal(resultfl.second + 1, dend, max_value));
-	assert(boost::algorithm::none_of_equal(data, resultlf.second,     max_value));
-	assert(boost::algorithm::none_of_equal(resultll.second + 1, dend, max_value));
-	}
-
-  return 0;
-}
diff --git a/third_party/boostorg/algorithm/minmax/index.html b/third_party/boostorg/algorithm/minmax/index.html
deleted file mode 100644
index 72a5116..0000000
--- a/third_party/boostorg/algorithm/minmax/index.html
+++ /dev/null
@@ -1,532 +0,0 @@
-<!DOCTYPE html public "-//w3c//dtd html 4.0 transitional//en">
-<HTML>
-<HEAD>
-   <LINK REL="stylesheet" TYPE="text/css" HREF="../../../boost.css">
-   <META http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-   <META name="GENERATOR" content="Mozilla/4.77 [en] (X11; U; Linux 2.2.19 i686) [Netscape]">
-   <META name="Author" content="Herve Bronnimann">
-   <META name="Description" content="Small library to propose minmax_element algorithm.">
-   <title>Boost Minmax library</title>
-</HEAD>
-<body text="#000000" bgcolor="#FFFFFF" link="#0000EE" vlink="#551A8B" alink="#FF0000">
-
-<h2><img src="../../../boost.png" WIDTH="276" HEIGHT="86">Header &lt;<A
-HREF="../../../boost/algorithm/minmax.hpp">boost/algorithm/minmax.hpp</A>&gt; </H2>
-
-<quote>
-<b>
-<a href="#minmax_element">Motivation</a><br>
-<a href="#synopsis">Synopsis</a><br>
-<a href="#description">Function templates description</a><br>
-<a href="#definition">Definition</a><br>
-<a href="#reqs">Requirements on types</a><br>
-<a href="#precond">Preconditions</a><br>
-<a href="#postcond">Postconditions</a><br>
-<a href="#complexity">Complexity</a><br>
-<a href="#example">Example</a><br>
-<a href="#notes">Notes</a><br>
-<a href="#rationale">Rationale</a><br>
-<a href="#perf">Note about performance</a><br>
-<a href="#acks">Acknowledgements</a>
-</b>
-</quote>
-
-
-<a name="minmax_element">
-<h3>
-Motivation</h3>
-
-<p>The minmax library is composed of two headers, <a
-href="../../../boost/algorithm/minmax.hpp">&lt;boost/algorithm/minmax.hpp></a>
-and <a
-href="../../../boost/algorithm/minmax_element.hpp">&lt;boost/algorithm/minmax_element.hpp></a>.
-(See <a href="#two_headers">rationale</a>.)
-The problem solved by this library is that simultaneous min and max
-computation requires
-only one comparison, but using <tt>std::min</tt> and <tt>std::max</tt>
-forces two comparisons. Worse, to compute the minimum and
-maximum elements of a range of <tt>n</tt> elements requires only
-<tt>3n/2+1</tt> comparisons, instead of the <tt>2n</tt> (in two passes)
-forced by <tt>std::min_element</tt> and <tt>std::max_element</tt>.
-I always thought it is a waste to have to call two functions to compute the
-extent of a range, performing two passes over the input, when one should
-be enough. The present library solves both problems.</p>
-
-<p>The first file implements the function templates
-<tt>minmax</tt>
-as straightforward extensions of the C++
-standard. As it returns a pair of <tt>const&amp;</tt>, we must use the <a
-href="../../tuple/index.html">Boost.tuple</a> library to construct such
-pairs. (Please note: the intent is not to fix the known defaults of
-<tt>std::min</tt>
-and <tt>std::max</tt>, but to add one more algorithms that combines both; see the
-<a href="#no-fix">rationale</a>.)</p>
-
-<p>The second file implements the function templates
-<tt>minmax_element</tt>. In a
-second part, it also proposes variants that can usually not be computed by
-the minmax algorithm, and which are more flexible in case some elements are equal.
-Those variants could have been also provided with policy-based design,
-but I ruled against that (see <a href="#no-policy">rationale</a>).
-</p>
-
-<p>If you are interested about
-<a href="doc/minmax_benchs.html">performance</a>,
-you will see that <tt>minmax_element</tt> is just slightly less efficient
-than a single <tt>min_element</tt> or <tt>max_element</tt>, and thus
-twice as efficient as two separate calls to <tt>min_element</tt> and
-<tt>max_element</tt>. From a
-theoretical standpoint,
-all the <tt>minmax_element</tt> functions perform at most
-<tt>3n/2+1</tt>
-comparisons and exactly n increments of the
-<tt>ForwardIterator</tt>.</p>
-</a>
-
-<a name="synopsis">
-<h3>
-Synopsis of <tt>&lt;boost/algorithm/minmax.hpp></tt></h3>
-
-<pre>#include &lt;boost/tuple/tuple.hpp>
-
-namespace boost {
-
-  template &lt;class T>
-  tuple&lt;T const&amp;, T const&amp;>
-  minmax(const T&amp; a, const T&amp; b);
-
-  template &lt;class T, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  tuple&lt;T const&amp;, T const&amp;>
-  minmax(const T&amp; a, const T&amp; b, BinaryPredicate comp);
-
-}
-</pre>
-
-<h3>
-Synopsis of <tt>&lt;boost/algorithm/minmax_element.hpp></tt></h3>
-
-<pre>#include &lt;utility> // for std::pair
-
-namespace boost {
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  minmax_element(ForwardIterator first, ForwardIterator last);
-
-  template &lt;class <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">ForwardIterator</a>, class <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>>
-  std::pair&lt;ForwardIterator,ForwardIterator>
-  minmax_element(ForwardIterator first, ForwardIterator last,
-                 BinaryPredicate comp);
-
-}
-</pre>
-
-In addition, there are a bunch of extensions which specify
-which element(s) you want to pick in case of equal elements. They are:
-<ul>
-<li><tt>first_min_element</tt> and <tt>last_min_element</tt></li>
-<li><tt>first_max_element</tt> and <tt>last_max_element</tt></li>
-<li><tt>first_min_first_max_element</tt>,
-<tt>first_min_last_max_element</tt>,
-<tt>last_min_first_max_element</tt>, and
-<tt>last_min_last_max_element</tt></li>
-</ul>
-I won't bore you with the complete synopsis, they have exactly the same
-declaration as their corresponding <tt>_element</tt> function. Still,
-you can find the complete synopsis <a href="doc/minmax_synopsis.html">here</a>.
-</a>
-
-<a name="description">
-<h3>
-Function templates description</h3>
-The <tt>minmax</tt> algorithm returns a pair <tt>p</tt> containing either
-<i>(a,b)</i>
-or <i>(b,a)</i>, such that <tt>p.first&lt;p.second</tt> in the first version,
-or <tt>comp(p.first,p.second)</tt> in the second version. If the elements
-are equivalent, the pair <i>(a,b) </i>is returned. <a href="#Note1">[1]</a>
-<p>The <tt>minmax_element </tt>is semantically equivalent to <tt>first_min_first_max_element</tt>.
-<p><tt>First_min_element</tt> and <tt>first_max_element</tt> find the smallest
-and largest elements in the range <tt>[first, last)</tt>. If there are
-several instance of these elements, the first one is returned. They are
-identical to
-<tt>std::min_element</tt> and <tt>std::max_element</tt>and
-are only included in this library for symmetry.
-<p><tt>Last_min_element</tt> and <tt>last_max_element</tt> find the smallest
-and largest elements in the range <tt>[first, last)</tt>. They are almost
-identical to
-<tt>std::min_element</tt> and <tt>std::max_element</tt>, except
-that they return the last instance of the largest element (and not the
-first, as <tt>first_min_element</tt> and <tt>last_max_element</tt> would).
-<p>The family of algorithms comprising <tt>first_min_first_max_element</tt>,
-<tt>first_min_last_max_element</tt>,
-<tt>last_min_first_max_element</tt>,
-and <tt>last_min_last_max_element</tt> can be described generically as
-follows (using <i><tt>which</tt></i> and
-<i><tt>what</tt></i> for <tt>first</tt>
-or <tt>last</tt>): <tt><i>which</i>_min_<i>what</i>_max_element</tt> finds
-the (first or last, according to <i><tt>which</tt></i>) smallest element
-and the (first or last, according to <i><tt>what</tt></i>) largest element
-in the range
-<tt>[first, last)</tt>. The first version is semantically
-equivalent to:
-<pre><tt>  std::make_pair(boost::<i>which</i>_min_element(first,last),
-                 boost::<i>what</i>_max_element(first,last))</tt>,</pre>
-and the second version to:
-<pre><tt>  std::make_pair(boost::<i>which</i>_min_element(first,last,comp),
-                 boost::<i>what</i>_max_element(first,last,comp))</tt>.</pre>
-
-<p><br><b><i>Note</i></b>: the <tt>first_min_last_max_element</tt> can also be described
-as finding the first and last elements in the range if it were stably sorted.
-</a>
-
-<a name="definition">
-<h3>
-Definition</h3>
-Defined in <a href="../../../boost/algorithm/minmax.hpp">minmax.hpp</a>
-and
-in <a href="../../../boost/algorithm/minmax_element.hpp">minmax_element.hpp</a>.
-</a>
-
-<a name="reqs">
-<h3>
-Requirements on types</h3>
-For minmax, <tt>T</tt> must be a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan
-Comparable</a>.
-<p>For all the other function templates, versions with two template parameters:
-<ul>
-<li>
-<tt>ForwardIterator</tt> is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward
-Iterator</a>.</li>
-
-<li>
-<tt>ForwardIterator</tt>'s value type is <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan
-Comparable</a>.</li>
-</ul>
-For the versions with three template parameters:
-<ul>
-<li>
-<tt>ForwardIterator</tt> is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward
-Iterator</a>.</li>
-
-<li>
-<tt>BinaryPredicate</tt> is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary
-Predicate</a>.</li>
-
-<li>
-<tt>ForwardIterator</tt>'s value type is convertible to <tt>BinaryPredicate</tt>'s
-first argument type and second argument type.</li>
-</ul>
-</a>
-
-<a name="precond">
-<h3>
-Preconditions</h3>
-
-<ul>
-<li>
-<tt>[first, last)</tt> is a valid range.</li>
-</ul>
-</a>
-
-<a name="postcond">
-<h3>
-Postconditions</h3>
-In addition to the semantic description above. for <tt>minmax_element</tt>
-and all the <tt><i>which</i>_min_<i>what</i>_max_element</tt>
-variants, the return value is
-<tt>last</tt> or <tt>std::make_pair(last,last)</tt>
-if and only if <tt>[first, last)</tt> is an empty range. Otherwise, the
-return value or both members of the resulting pair are iterators in the
-range
-<tt>[first, last)</tt>.
-</a>
-
-<a name="complexity">
-<h3>
-Complexity</h3>
-Minmax performs a single comparison and is otherwise of constant complexity.
-The use of <tt>boost::tuple&lt;T const&amp;></tt> prevents copy
-constructors in case the arguments are passed by reference.
-<p>The complexity of all the other algorithms is linear. They all perform
-exactly n increment operations, and zero comparisons if <tt>[first,last)</tt>
-is empty, otherwise :
-<ul>
-<li>
-all the <tt>min_element</tt> and <tt>max_element</tt> variants perform
-exactly<tt>(n-1)</tt> comparisons,</li>
-
-<li>
-<tt>minmax_element</tt> , <tt>first_min_first_max_element</tt>, and <tt>last_min_last_max_element</tt>
-perform at most <tt>3(n/2)-1</tt> comparisons if <tt>n</tt> is even and
-non-zero, and at most <tt>3(n/2)+1</tt> if <tt>n</tt> is odd,
-<a href="#Note2">[2]</a></li>
-
-<li>
-<tt>first_min_last_max_element</tt>, and <tt>last_min_first_max_element</tt>
-perform exactly <tt>3n/2-2</tt> comparisons if n is even and non-zero,
-and at most <tt>3(n/2)</tt> if <tt>n</tt> is odd,
-<a href="#Note1">[3]</a></li>
-</ul>
-where <tt>n</tt> is the number of elements in <tt>[first,last)</tt>.
-</a>
-
-<a name="example">
-<h3>
-Example</h3>
-This example is included in the distribution in the examples section of
-the library under
-<a href="example/minmax_ex.cpp">minmax_ex.cpp</a>.
-
-<pre>int main()
-{
-  using namespace std;
-  boost::tuple&lt;int const&amp;, int const&amp;> result1 = boost::minmax(1, 0);
-
-  assert( result1.get<0>() == 0 );
-  assert( result1.get<1>() == 1 );
-
-  <a href="http://www.sgi.com/tech/stl/List.html">list</a>&lt;int> L;
-  <a href="http://www.sgi.com/tech/stl/generate_n.html">generate_n</a>(<a href="http://www.sgi.com/tech/stl/front_insert_iterator.html">front_inserter</a>(L), 1000, rand);
-
-  typedef list&lt;int>::const_iterator iterator;
-  pair&lt; iterator, iterator > result2 = boost::minmax_element(L.begin(), L.end());
-  cout &lt;&lt; "The smallest element is " &lt;&lt; *(result2.first) &lt;&lt; endl;
-  cout &lt;&lt; "The largest element is  " &lt;&lt; *(result2.second) &lt;&lt; endl;
-
-  assert( result2.first  == std::min_element(L.begin(), L.end());
-  assert( result2.second == std::max_element(L.begin(), L.end());
-}</pre>
-</a>
-
-<a name="notes">
-<h3>
-Notes</h3>
-<a NAME="Note1"></a><a href="#Note1">[1]</a> We do not support
-idioms such as <tt><a href="../../tuple/doc/tuple_users_guide.html#tiers">tie</a>(a,b)=minmax(a,b)</tt>
-to order two elements <tt>a</tt>, <tt>b</tt>, although this would have
-the desired effect if we returned a reference instead of a constant
-reference. The reason is that two unnecessary assignments are
-performed if a and b are in order. It is better to stick to <tt>if (b&lt;a)
-swap(a,b)</tt> to achieve that effect.
-<p><a NAME="Note2"></a><a href="#Note2">[2]</a> These algorithms always
-perform at least <tt>3n/2-2</tt> comparisons, which is a lower bound on
-the number of comparisons in any case (Cormen, Leiserson, Rivest: "Introduction
-to Algorithms", section 9.1, Exercise 9.1-). The algorithms essentially compare
-the elements in pairs, performing 1 comparison for the first two elements,
-then 3 comparisons for each remaining pair of elements (one to order the
-elements and one for updating each the minimum and and the maximum). When
-the number of elements is odd, the last one needs to be compared to the
-current minimum and also to the current maximum. In addition, for <tt>minmax</tt>,
-in cases where equality of the two members in the pair could occur, and
-the update stores the second, we save the first to check at the end if
-the update should have stored the first (in case of equality). It's hard
-to predict if the last comparison is performed or not, hence the at most
-in both cases.
-<p><a NAME="Note3"></a><a href="#Note3">[3]</a> These algorithms always
-perform at least <tt>3n/2-2</tt> comparisons, which is a lower bound on
-the number of comparisons in any case. The method is the same as in note
-<a href="#Note2">[2]</a>
-above, and like above, when the number of elements is odd, the last one
-needs to be compared to the current minimum and also to the current maximum.
-We can avoid the latter comparison if the former is successful, hence the
-<i>at
-most</i> instead of <i>exactly</i> in the odd case.
-</a>
-
-<a name="rationale">
-<h3>
-<b>Rationale:</b></h3>
-
-<a name="two_headers">
-<h4><b>Why not a single header <tt>&lt;boost/algorithm/minmax.hpp></tt>?</b></h4>
-<p>This was the design originally proposed and approved in the formal
-review. As the need for Boost.tuple became clear (due to the limitations
-of <tt>std::pair</tt>), it became also annoying to require another
-library for <tt>minmax_element</tt> which does not need it. Hence the
-separation into two header files.</p>
-
-<a name="no-fix">
-<h4><b>Your minmax suffers from the same problems as std::min and
-std::max.</b></h4>
-<p>I am aware of the problems with std::min and
-std::max, and all the debate that has been going on (please consult
-<a href="#Alexandrescu">Alexandrescu's paper</a> and the links therein). But I don't see the purpose of this
-library as fixing something that is part of the C++ standard. I humbly
-think it's beyond the scope of this library. Rather, I am
-following the way of the standard in simply providing one more function
-of the same family. If someone else wants to fix std::min, their fix
-would probably apply to boost::minmax as well.</p>
-</a>
-
-<h4><b>Why no <tt>min/max_element_if</tt>?</b></h4>
-<p>In a first version of the library, I proposed <tt>_if</tt> versions of
-all the algorithms (well, not all, because that would be too much).
-However, there is simply no reason to do so, and all the versions I had
-were just as fast implemented using the excellent
-<tt>&lt;boost/iterator_adaptors.hpp></tt> library. Namely, a call to
-<tt>min_element_if(first, last, pred)</tt> would be just as well
-implemented by:
-<pre>
-     // equivalent to min_element_if(first, last, pred)
-     min_element(boost::make_filter_iterator(first, last, pred),
-                 boost::make_filter_iterator(last, last, pred));
-</pre>
-Arguably, the <tt>min_element_if</tt> version is somewhat shorter, but
-the overhead of iterator adaptors is not large, and they get rid of a
-lot of code (think of all the combinations between first/last and
-doubling them with _if variants!).</p>
-
-<h4><b>Discussion: about std::max_element</b></h4>
-<p>This rationale is somewhat historical, but explains why there are all
-these <tt>first/last_min/max_element</tt> functions.</p>
-<p>The C++ standard mandates that <tt>std::min_element</tt> and <tt>std::max_element</tt>
-return the first instance of the smallest and largest elements (as opposed
-to, say, the last). This arbitrary choice has some consistency: In the
-case of v of type vector&lt;int>, for instance, it is true that <tt>std::min_element(v.begin(),v.end(),std::less&lt;int>())
-== std::max_element(v.begin(),v.end(),std::greater&lt;int>())</tt>.
-<p>There is of course nothing wrong with this: it's simply a matter of
-choice. Yet another way to specify min_element and max_element is to define
-them as the first and the last elements if the range was stably sorted.
-(The <i>stable</i> sort is necessary to disambiguate between iterators
-that have the same value.) In that case, min should return the first instance
-and max should return the last. Then, both functions are related by
-<tt>reverse_iterator(std::first_min_element(v.begin(),v.end(),std::less&lt;int>()))
-==
-std::last_max_element(v.rbegin(),v.rend(),std::greater&lt;int>())</tt>.
-This definition is subtly different from the previous one.</p>
-<p>The definition problem surfaces when one tries to design a minmax_element,
-using the procedure proposed in (Cormen, Leiserson, Rivest: "Introduction
-to Algorithms", section 9.1). It <i>should</i> be possible to derive an
-algorithm using only <tt>3n/2</tt> comparisons if <tt>[first,last) </tt>has
-<tt>n</tt>
-elements, but if one tries to write a function called <tt>first_min_first_max_element()</tt>
-which returns both <tt>std::min_element</tt> and <tt>std::max_element</tt>
-in a pair, the trivial implementation does not work. The problem, rather
-subtly, is about equal elements: I had to think for a while to find a
-way to perform only three
-comparisons per pair and return the first min and first max elements.
-For a long time, it seemed any
-attempts at doing so would consume four comparisons per pair in the worst
-case. This implementation achieves three.</p>
-<p>It is not possible (or even desirable) to change the meaning of
-<tt>max_element</tt>,
-but it is still beneficial to provide a function called <tt>minmax_element</tt>,
-which returns a pair of <tt>min_element</tt> and <tt>max_element</tt>.
-Although it is easy enough to call <tt>min_element</tt> and <tt>max_element</tt>,
-this performs
-<tt>2(n-1)</tt> comparisons, and necessitates <b>two</b>
-passes over the input. In contrast,
-<tt>minmax_element</tt> will perform
-the fewer comparisons and perform a <b>single</b> pass over the input.
-The savings can be significant when the iterator type is not a raw pointer,
-or even is just a model of the InputIterator concept (although in that
-case the interface would have to be
-changed, as the return type could not be copied, so one could e.g.
-return a value).</p>
-<p>In order to benefit from all the variants of the algorithm, I propose
-to introduce both <tt>first_min_element</tt> and <tt>last_min_element</tt>,
-and their counterparts <tt>first_max_element</tt> and <tt>last_max_element</tt>.
-Then I also propose all the variants algorithms: <tt>first_min_last_max_element</tt>
-and <tt>last_min_first_max_element</tt>, which perform only at most <tt>3n/2</tt>
-comparisons, and only a single pass on the input. In fact, it can be proven
-that computing minmax requires at least <tt>3(n/2)-2</tt> comparisons in
-any instance of the problem (Cormen, Leiserson, Rivest, 2nd edition, section
-9.1). The implementation I give does not perform unnecessary comparisons
-(whose result could have been computed by transitivity from previous
-comparisons).</p>
-<p>It appears that <tt>first_min_last_max_element</tt> may be just a tad
-slower than
-<tt>first_min_element</tt> alone, still much less than <tt>first_min_element</tt>
-and
-<tt>last_max_element</tt> called separately.  <a href="#Note2">[2]</a>
-
-<h4><b>Why algorithms and not accumulators?</b></h4>
-<p>The minmax algorithms are useful in computing the extent of a range.
-In computer graphics, we need a bounding box of a set of objects.
-In that case the need for a single pass is even more stringent
-as all three directions must be done at once. Food for thoughts: there
-is matter for a nice generic programming library with stackable <tt>update_min</tt>
-and <tt>update_max</tt> function objects which store a reference to the
-<tt>min_result</tt>and
-<tt>max_result</tt> variables, in conjunction with the <tt>for_each</tt>
-algorithm).</p>
-<p>I believe many standard sequential algorithms could be reformulated
-with accumulators (and many others, such as in statistics, expectation /
-variance / etc.). It seems that there is room for another library, but I
-do not see it competing with minmax, rather extending several algorithms
-(including minmax) to the accumulator framework. However, I felt it is
-beyond the scope of this library to provide such accumulators.</p>
-
-<a NAME="no-policy">
-<h4><b>This first/last is a perfect application for a policy-based
-design.</b></h4>
-<p>True, and I could have gone that way, with the default policy for
-<tt>min_element</tt> and <tt>max_element</tt> to pick the first
-occurence of the result. This would have thinned the number of
-combinations of the minmax_element variants. But it would also have
-meant to change the interface of <tt>boost::minmax_element</tt>.
-One of the goals of the <tt>minmax_element</tt> algorithm is its
-eventual addition to the C++ standard, in connection with
-<tt>std::min_element</tt> and <tt>std::max_element</tt>
-(and I feel that it would be quite natural
-given the shortness of the implementation, and the not quite trivial
-detail which is needed to get it right). So changing the interface by
-adding policies would have meant unfortunately to depart from the
-standard and created an obstacle towards that goal. Besides, the code
-remains rather readable and simple without policies. So I am quite happy
-to keep it like this.
-</p></a>
-</a>
-
-<a name="perf">
-<a href="doc/minmax_benchs.html"><h3><b>About performance</b></h3></a>
-</a>
-
-<a name="acks">
-<h3>
-Acknowledgements</h3>
-
-<a name="Alexandrescu">
-<a href="http://www.drdobbs.com/generic-min-and-max-redivivus/184403774">Generic: Min and Max Redivivus, by Andrei Alexandrescu</a>
-Dr. Dobbs, April 2001
-
-<p>My students in CS903 (Polytechnic Univ., <a href="http://photon.poly.edu/~hbr/cs903/">http://photon.poly.edu/~hbr/cs903/</a>)
-who had <tt>minmax_element</tt> as an assignment helped clarify the issues,
-and also come up with the optimum number of comparisons for <tt>first_min_last_max_element</tt>.
-The identification of the issue surrounding <tt>max_element</tt> is solely
-my own.
-<p>One <tt>minmax_element</tt> implementation, which performs <tt>3(n/2)+O(log
-n)</tt> comparisons on the average when the elements are <tt>random_shuffle</tt>d,
-was suggested by my student Marc Glisse. The current one, which performs
-<tt>3(n/2)+1</tt>
-comparisons in the worst case, was suggested by John Iacono.<p>
-<p>Finally, Matthew Wilson and Jeremy Siek contributed pre-review
-comments, while Gennadiy Rozental, John Maddock, Craig Henderson, Gary
-Powell participated in the review of the library, managed by Thomas
-Witt. In particular, Gennadiy suggested a factorization of the code;
-while I haven't followed it all the way, his suggestions do make the
-code more readable and still work with older compilers. 
-Late after the review, as I finally scrounged to add the library for a
-release, Eric Niebler noted the bad behavior of <tt>std::pair</tt> for
-<tt>minmax</tt> and suggested to use Boost.tuple instead. 
-All my thanks for the excellent advice and reviews from all.
-<h3>
-See also</h3>
-<tt><a href="http://www.sgi.com/tech/stl/min.html">min</a></tt>, <tt><a href="http://www.sgi.com/tech/stl/max.html">max</a></tt>,
-<tt><a href="http://www.sgi.com/tech/stl/min_element.html">min_element</a></tt>,
-<tt><a href="http://www.sgi.com/tech/stl/max_element.html">max_element</a></tt>,
-<tt><a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan
-Comparable</a></tt>,
-<tt><a href="http://www.sgi.com/tech/stl/sort.html">sort</a></tt>,
-<tt><a href="http://www.sgi.com/tech/stl/nth_element.html">nth_element</a></tt>
-.
-<hr SIZE="6">
-<br>Last modified 2012-12-10
-<p><font face="Arial,Helvetica"><font size=-1>&copy; Copyright Herv&eacute;
-Br&ouml;nnimann, Polytechnic University, 2002--2004. 
-Use, modification, and distribution is subject to the Boost Software
-License, Version 1.0. (See accompanying file <a href="../../../LICENSE_1_0.txt">License_1_0.txt</a> or copy at
-<a href="http://www.boost.org/LICENSE_1_0.txt">http://www.boost.org/LICENSE_1_0.txt</a>)
-</font></font>
-</body>
-</html>
diff --git a/third_party/boostorg/algorithm/minmax/test/Jamfile.v2 b/third_party/boostorg/algorithm/minmax/test/Jamfile.v2
deleted file mode 100644
index fcfba8a..0000000
--- a/third_party/boostorg/algorithm/minmax/test/Jamfile.v2
+++ /dev/null
@@ -1,25 +0,0 @@
-#  Boost.Minmax Library test Jamfile
-#
-#  Copyright (C) 2002--2004, Herve Bronnimann
-#
-# Use, modification, and distribution is subject to the Boost Software
-# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-# http://www.boost.org/LICENSE_1_0.txt)
-#
-
-import testing ;
-
-alias unit_test_framework
-    : # sources
-        /boost//unit_test_framework
-    ;        
-
-{
-  test-suite algorithm/minmax:
-   : [ run minmax_element_test.cpp unit_test_framework
-       : : : : minmax_element ]
-     [ run minmax_test.cpp unit_test_framework
-       : : : : minmax ]      
-   ;
-}
-
diff --git a/third_party/boostorg/algorithm/minmax/test/minmax_element_test.cpp b/third_party/boostorg/algorithm/minmax/test/minmax_element_test.cpp
deleted file mode 100644
index 11cf2c4..0000000
--- a/third_party/boostorg/algorithm/minmax/test/minmax_element_test.cpp
+++ /dev/null
@@ -1,253 +0,0 @@
-//  (C) Copyright Herve Bronnimann 2004.
-//  Use, modification and distribution are subject to the
-//  Boost Software License, Version 1.0. (See accompanying file
-//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-#include <utility>
-#include <functional>
-#include <algorithm>
-#include <numeric>
-#include <iterator>
-#include <vector>
-#include <list>
-#include <set>
-#include <cstdlib>
-
-#include <boost/config.hpp> /* prevents some nasty warns in MSVC */
-#include <boost/algorithm/minmax_element.hpp>
-#include <boost/iterator/reverse_iterator.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#if (__cplusplus >= 201103L) || defined(BOOST_NO_CXX98_RANDOM_SHUFFLE)
-#include <random>
-
-std::default_random_engine gen;
-template<typename RandomIt>
-void do_shuffle(RandomIt first, RandomIt last)
-{ std::shuffle(first, last, gen); }
-#else
-template<typename RandomIt>
-void do_shuffle(RandomIt first, RandomIt last)
-{ std::random_shuffle(first, last); }
-#endif
-
-class custom {
-  int m_x;
-  friend bool operator<(custom const& x, custom const& y);
-public:
-  explicit custom(int x = 0) : m_x(x) {}
-  custom(custom const& y) : m_x(y.m_x) {}
-  custom operator+(custom const& y) const { return custom(m_x+y.m_x); }
-  custom& operator+=(custom const& y) { m_x += y.m_x; return *this; }
-};
-
-bool operator< (custom const& x, custom const& y)
-{
-  return x.m_x < y.m_x;
-}
-
-BOOST_BROKEN_COMPILER_TYPE_TRAITS_SPECIALIZATION(custom)
-
-namespace std {
-
-template <>
-struct iterator_traits<int*> {
-  typedef random_access_iterator_tag  iterator_category;
-  typedef int                          value_type;
-  typedef ptrdiff_t                    difference_type;
-  typedef value_type*                  pointer;
-  typedef value_type&                  reference;
-};
-
-template <>
-struct iterator_traits<custom*> {
-  typedef random_access_iterator_tag  iterator_category;
-  typedef custom                       value_type;
-  typedef ptrdiff_t                    difference_type;
-  typedef value_type*                  pointer;
-  typedef value_type&                  reference;
-};
-
-}
-
-template <class T1, class T2, class T3, class T4>
-void tie(std::pair<T1, T2> p, T3& first, T4& second)
-{
-  first = T3(p.first); second = T4(p.second);
-}
-
-template <class Value>
-struct less_count : std::less<Value> {
-    typedef std::less<Value> Base;
-  less_count(less_count<Value> const& lc) : m_counter(lc.m_counter) {}
-  less_count(int& counter) : m_counter(counter) {}
-  bool operator()(Value const& a, Value const& b) const {
-    ++m_counter;
-    return Base::operator()(a,b);
-  }
-  void reset() {
-    m_counter = 0;
-  }
-private:
-  int& m_counter;
-};
-
-inline int opt_min_count(int n) {
-  return (n==0) ? 0 : n-1;
-}
-inline int opt_minmax_count(int n) {
-  if (n < 2) return 0;
-  if (n == 2) return 2;
-  return (n%2 == 0) ? 3*(n/2)-1 : 3*(n/2)+1;
-}
-inline int opt_boost_minmax_count(int n) {
-  if (n < 2) return 0;
-  if (n == 2) return 1;
-  return (n%2 == 0) ? 3*(n/2)-2 : 3*(n/2);
-}
-
-#define CHECK_EQUAL_ITERATORS( left, right, first ) \
-BOOST_CHECK_EQUAL( std::distance( first, left ), std::distance( first, right ) )
-
-template <class CIterator>
-void test_minmax(CIterator first, CIterator last, int n)
-{
-  using namespace boost;
-
-  typedef typename std::iterator_traits<CIterator>::value_type Value;
-  typedef boost::reverse_iterator<CIterator> RCIterator;
-  // assume that CIterator is BidirectionalIter
-  CIterator min, max;
-  RCIterator rfirst(last), rlast(first), rmin, rmax;
-  int counter = 0;
-  less_count<Value> lc(counter);
-
-  // standard extensions
-  // first version, operator<
-  tie( boost::minmax_element(first, last), min, max );
-
-  CHECK_EQUAL_ITERATORS( min, std::min_element(first, last), first );
-  CHECK_EQUAL_ITERATORS( max, std::max_element(first, last), first );
-
-  // second version, comp function object (keeps a counter!)
-  lc.reset();
-  tie( boost::minmax_element(first, last, lc), min, max );
-  BOOST_CHECK( counter <= opt_minmax_count(n) );
-  CHECK_EQUAL_ITERATORS( min, std::min_element(first, last, lc), first );
-  CHECK_EQUAL_ITERATORS( max, std::max_element(first, last, lc), first );
-
-  // boost extensions
-  // first version, operator<
-  CHECK_EQUAL_ITERATORS( boost::first_min_element(first, last), std::min_element(first, last), first );
-  rmin = RCIterator(boost::last_min_element(first, last));
-  rmin = (rmin == rfirst) ? rlast : --rmin;
-  CHECK_EQUAL_ITERATORS( rmin, std::min_element(rfirst, rlast), rfirst );
-  CHECK_EQUAL_ITERATORS( boost::first_max_element(first, last), std::max_element(first, last), first );
-  rmax = RCIterator(boost::last_max_element(first, last));
-  rmax = (rmax == rfirst) ? rlast : --rmax;
-  CHECK_EQUAL_ITERATORS( rmax, std::max_element(rfirst, rlast), rfirst );
-  tie( boost::first_min_last_max_element(first, last), min, max );
-  CHECK_EQUAL_ITERATORS( min, boost::first_min_element(first, last), first );
-  CHECK_EQUAL_ITERATORS( max, boost::last_max_element(first, last), first );
-  tie( boost::last_min_first_max_element(first, last), min, max );
-  CHECK_EQUAL_ITERATORS( min, boost::last_min_element(first, last), first );
-  CHECK_EQUAL_ITERATORS( max, boost::first_max_element(first, last), first );
-  tie( boost::last_min_last_max_element(first, last), min, max );
-  CHECK_EQUAL_ITERATORS( min, boost::last_min_element(first, last), first );
-  CHECK_EQUAL_ITERATORS( max, boost::last_max_element(first, last), first );
-
-  // second version, comp function object (keeps a counter!)
-  lc.reset();
-  min = boost::first_min_element(first, last, lc);
-  BOOST_CHECK( counter <= opt_min_count(n) );
-  CHECK_EQUAL_ITERATORS( min, std::min_element(first, last, lc), first );
-  lc.reset();
-  rmin = RCIterator(boost::last_min_element(first, last, lc));
-  rmin = (rmin == rfirst) ? rlast : --rmin;
-  BOOST_CHECK( counter <= opt_min_count(n) );
-  CHECK_EQUAL_ITERATORS( rmin, std::min_element(rfirst, rlast, lc), rfirst );
-  lc.reset();
-  max =  boost::first_max_element(first, last, lc);
-  BOOST_CHECK( counter <= opt_min_count(n) );
-  CHECK_EQUAL_ITERATORS( max, std::max_element(first, last, lc), first );
-  lc.reset();
-  rmax = RCIterator(boost::last_max_element(first, last, lc));
-  rmax = (rmax == rfirst) ? rlast : --rmax;
-  BOOST_CHECK( counter <= opt_min_count(n) );
-  CHECK_EQUAL_ITERATORS( rmax, std::max_element(rfirst, rlast, lc), rfirst );
-  lc.reset();
-  tie( boost::first_min_last_max_element(first, last, lc), min, max );
-  BOOST_CHECK( counter <= opt_boost_minmax_count(n) );
-  CHECK_EQUAL_ITERATORS( min, boost::first_min_element(first, last, lc), first );
-  CHECK_EQUAL_ITERATORS( max, boost::last_max_element(first, last, lc), first );
-  lc.reset();
-  tie( boost::last_min_first_max_element(first, last, lc), min, max );
-  BOOST_CHECK( counter <= opt_boost_minmax_count(n) );
-  BOOST_CHECK( min == boost::last_min_element(first, last, lc) );
-  CHECK_EQUAL_ITERATORS( max, boost::first_max_element(first, last, lc), first );
-  lc.reset();
-  tie( boost::last_min_last_max_element(first, last, lc), min, max );
-  BOOST_CHECK( counter <= opt_minmax_count(n) );
-  CHECK_EQUAL_ITERATORS( min, boost::last_min_element(first, last, lc), first );
-  CHECK_EQUAL_ITERATORS( max, boost::last_max_element(first, last, lc), first );
-}
-
-template <class Container, class Iterator, class Value>
-void test_container(Iterator first, Iterator last, int n,
-                    Container* /* dummy */ = 0
-                    BOOST_APPEND_EXPLICIT_TEMPLATE_TYPE(Value) )
-{
-  Container c(first, last);
-  test_minmax(c.begin(), c.end(), n);
-}
-
-template <class Iterator>
-void test_range(Iterator first, Iterator last, int n)
-{
-  typedef typename std::iterator_traits<Iterator>::value_type Value;
-  // Test various containers with these values
-  test_container< std::vector<Value>, Iterator, Value >(first, last, n);
-  test_container< std::list<Value>,   Iterator, Value >(first, last, n);
-  test_container< std::set<Value>,    Iterator, Value >(first, last, n);
-}
-
-template <class Value>
-void test(int n BOOST_APPEND_EXPLICIT_TEMPLATE_TYPE(Value))
-{
-  // Populate test vector with identical values
-  std::vector<Value> test_vector(n, Value(1));
-  typename std::vector<Value>::iterator first( test_vector.begin() );
-  typename std::vector<Value>::iterator last( test_vector.end() );
-  test_range(first, last, n);
-
-  // Populate test vector with two values
-  typename std::vector<Value>::iterator middle( first + n/2 );
-  std::fill(middle, last, Value(2));
-  test_range(first, last, n);
-
-  // Populate test vector with increasing values
-  std::accumulate(first, last, Value(0));
-  test_range(first, last, n);
-
-  // Populate test vector with decreasing values
-  std::reverse(first, last);
-  test_range(first, last, n);
-
-  // Populate test vector with random values
-  do_shuffle(first, last);
-  test_range(first, last, n);
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-#ifndef BOOST_NO_STDC_NAMESPACE
-  using std::atoi;
-#endif
-
-  int n = 100;
-
-  test<int>(n);
-  test<custom>(n);
-}
diff --git a/third_party/boostorg/algorithm/minmax/test/minmax_test.cpp b/third_party/boostorg/algorithm/minmax/test/minmax_test.cpp
deleted file mode 100644
index 151b096..0000000
--- a/third_party/boostorg/algorithm/minmax/test/minmax_test.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-//  (C) Copyright Herve Bronnimann 2004.
-//  Use, modification and distribution are subject to the
-//  Boost Software License, Version 1.0. (See accompanying file
-//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-#include <utility>
-#include <functional>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/minmax.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-class custom {
-  int m_x;
-  friend std::ostream& operator<<(std::ostream& str, custom const& x);
-public:
-  explicit custom(int x = 0) : m_x(x) {}
-  custom(custom const& y) : m_x(y.m_x) {}
-  bool operator==(custom const& y) const { return m_x == y.m_x; }
-  bool operator<(custom const& y) const { return m_x < y.m_x; }
-  custom operator+(custom const& y) const { return custom(m_x+y.m_x); }
-  custom& operator+=(custom const& y) { m_x += y.m_x; return *this; }
-};
-
-std::ostream&
-operator<<(std::ostream& str, custom const& x)
-{
-  return  str << x.m_x;
-}
-
-template <class Value>
-struct less_count : std::less<Value> {
-  typedef std::less<Value> Base;
-  less_count(less_count<Value> const& lc) : m_counter(lc.m_counter) {}
-  less_count(int& counter) : m_counter(counter) {}
-  bool operator()(Value const& a, Value const& b) const {
-    ++m_counter;
-    return Base::operator()(a,b);
-  }
-  void reset() {
-    m_counter = 0;
-  }
-private:
-  int& m_counter;
-};
-
-using namespace boost;
-
-template <class Value>
-void test(BOOST_EXPLICIT_TEMPLATE_TYPE(Value))
-{
-  Value zero(0), one(1);
-  int counter = 0;
-  less_count<Value> lc(counter);
-
-  // Test functionality
-  tuple<Value const&, Value const&> result1 = boost::minmax(zero, one);
-  BOOST_CHECK_EQUAL( get<0>(result1), zero );
-  BOOST_CHECK_EQUAL( get<1>(result1), one );
-
-  tuple<Value const&, Value const&> result2 = boost::minmax(one, zero);
-  BOOST_CHECK_EQUAL( get<0>(result2), zero );
-  BOOST_CHECK_EQUAL( get<1>(result2), one );
-  
-  // Test functionality and number of comparisons
-  lc.reset();
-  tuple<Value const&, Value const&> result3 = boost::minmax(zero, one, lc );
-  BOOST_CHECK_EQUAL( get<0>(result3), zero );
-  BOOST_CHECK_EQUAL( get<1>(result3), one );
-  BOOST_CHECK_EQUAL( counter, 1 );
-
-  lc.reset();
-  tuple<Value const&, Value const&> result4 = boost::minmax(one, zero, lc );
-  BOOST_CHECK_EQUAL( get<0>(result4), zero );
-  BOOST_CHECK_EQUAL( get<1>(result4), one );
-  BOOST_CHECK_EQUAL( counter, 1);
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test<int>(); // ("builtin");
-  test<custom>(); // ("custom ");
-}
diff --git a/third_party/boostorg/algorithm/string/doc/Jamfile.v2 b/third_party/boostorg/algorithm/string/doc/Jamfile.v2
deleted file mode 100644
index 9ddebb8..0000000
--- a/third_party/boostorg/algorithm/string/doc/Jamfile.v2
+++ /dev/null
@@ -1,59 +0,0 @@
-#  Boost string_algo library documentation Jamfile  ---------------------------------
-#
-#  Copyright Pavol Droba 2002-2003. Use, modification and
-#  distribution is subject to the Boost Software License, Version
-#  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-#  http://www.boost.org/LICENSE_1_0.txt)
-#
-#  See http://www.boost.org for updates, documentation, and revision history.
-
-import toolset ;
-toolset.using doxygen ;
-
-boostbook string_algo : string_algo.xml autodoc 
-	:
-        <xsl:param>boost.root=../../../../..
-        <format>pdf:<xsl:param>boost.url.prefix=http://www.boost.org/doc/libs/release/doc/html
-	;
-
-doxygen autodoc
-    :   
-    [ glob ../../../../boost/algorithm/string.hpp ]
-    [ glob ../../../../boost/algorithm/string_regex.hpp ]
-
-    [ glob ../../../../boost/algorithm/string/classification.hpp ]
-    [ glob ../../../../boost/algorithm/string/iterator_range.hpp ]       
-    [ glob ../../../../boost/algorithm/string/sequence_traits.hpp ]
-    [ glob ../../../../boost/algorithm/string/std_containers_traits.hpp ]
-    [ glob ../../../../boost/algorithm/string/concept.hpp ]
-    [ glob ../../../../boost/algorithm/string/compare.hpp ]
-    [ glob ../../../../boost/algorithm/string/constants.hpp ]
-    [ glob ../../../../boost/algorithm/string/case_conv.hpp ]
-    [ glob ../../../../boost/algorithm/string/find.hpp ]
-    [ glob ../../../../boost/algorithm/string/finder.hpp ]
-    [ glob ../../../../boost/algorithm/string/find_iterator.hpp ]
-    [ glob ../../../../boost/algorithm/string/trim.hpp ]
-    [ glob ../../../../boost/algorithm/string/predicate.hpp ]
-    [ glob ../../../../boost/algorithm/string/split.hpp ]
-    [ glob ../../../../boost/algorithm/string/iter_find.hpp ]
-    [ glob ../../../../boost/algorithm/string/erase.hpp ]
-    [ glob ../../../../boost/algorithm/string/join.hpp ]
-    [ glob ../../../../boost/algorithm/string/replace.hpp ]
-    [ glob ../../../../boost/algorithm/string/find_format.hpp ]
-    [ glob ../../../../boost/algorithm/string/formatter.hpp ]
-    [ glob ../../../../boost/algorithm/string/regex.hpp ]
-    [ glob ../../../../boost/algorithm/string/regex_find_format.hpp ]
-    [ glob ../../../../boost/algorithm/string/trim_all.hpp ]
-    :
-    <doxygen:param>HIDE_UNDOC_MEMBERS=YES
-    <doxygen:param>EXTRACT_PRIVATE=NO
-    <doxygen:param>ENABLE_PREPROCESSING=YES
-    <doxygen:param>MACRO_EXPANSION=YES
-    <doxygen:param>EXPAND_ONLY_PREDEF=YES
-    <doxygen:param>SEARCH_INCLUDES=YES
-    <doxygen:param>PREDEFINED="BOOST_STRING_TYPENAME=typename \"BOOST_STATIC_CONSTANT(type,var)=static const type var;\""
-    ;
-        
-
-
-
diff --git a/third_party/boostorg/algorithm/string/doc/concept.xml b/third_party/boostorg/algorithm/string/doc/concept.xml
deleted file mode 100644
index 8e9c4a8..0000000
--- a/third_party/boostorg/algorithm/string/doc/concept.xml
+++ /dev/null
@@ -1,205 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-
-<!-- Copyright (c) 2002-2006 Pavol Droba.
-     Subject to the Boost Software License, Version 1.0. 
-     (See accompanying file LICENSE_1_0.txt or  http://www.boost.org/LICENSE_1_0.txt)
--->
-
-<section id="string_algo.concept" last-revision="$Date$">
-    <title>Concepts</title>
-
-    <using-namespace name="boost"/>
-    <using-namespace name="boost::algorithm"/>
-
-    <section>   
-        <title>Definitions</title>
-        
-        <table>
-            <title>Notation</title>
-            <tgroup cols="2" align="left">
-                <tbody>
-                    <row>
-                        <entry><code>F</code></entry>
-                        <entry>A type that is a model of Finder</entry>
-                    </row>
-                    <row>
-                        <entry><code>Fmt</code></entry>
-                        <entry>A type that is a model of Formatter</entry>
-                    </row>
-                    <row>
-                        <entry><code>Iter</code></entry>
-                        <entry>
-                            Iterator Type
-                        </entry>
-                    </row>
-                    <row>
-                        <entry><code>f</code></entry>
-                        <entry>Object of type <code>F</code></entry>
-                    </row>
-                    <row>
-                        <entry><code>fmt</code></entry>
-                        <entry>Object of type <code>Fmt</code></entry>
-                    </row>
-                    <row>
-                        <entry><code>i,j</code></entry>
-                        <entry>Objects of type <code>Iter</code></entry>
-                    </row>
-                    </tbody>
-            </tgroup>
-        </table>
-    </section>
-
-    <section id="string_algo.finder_concept">
-        <title>Finder Concept</title>
-
-        <para>
-            Finder is a functor which searches for an arbitrary part of a container. 
-            The result of the search is given as an <classname>iterator_range</classname> 
-            delimiting the selected part.
-        </para>
-
-        <table>             
-            <title>Valid Expressions</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>   
-                        <entry>Expression</entry>
-                        <entry>Return Type</entry>
-                        <entry>Effects</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry><code>f(i,j)</code></entry>
-                        <entry>Convertible to <code>iterator_range&lt;Iter&gt;</code></entry>
-                        <entry>Perform the search on the interval [i,j) and returns the result of the search</entry>
-                    </row>
-                </tbody>
-            </tgroup>
-        </table>
-
-        <para>
-            Various algorithms need to perform a search in a container and a Finder is a generalization of such
-            search operations that allows algorithms to abstract from searching. For instance, generic replace
-            algorithms can replace any part of the input, and the Finder is used to select the desired one.
-        </para>
-        <para>
-            Note, that it is only required that the finder works with a particular iterator type. However,
-            a Finder operation can be defined as a template, allowing the Finder to work with any iterator.
-        </para>
-        <para>
-            <emphasis role="bold">Examples</emphasis>
-        </para>
-        <para> 
-            <itemizedlist>
-                <listitem>
-                    Finder implemented as a class. This Finder always returns the whole input as a match. <code>operator()</code>
-                    is templated, so that the finder can be used on any iterator type.
-                    
-                    <programlisting>
-struct simple_finder
-{
-    template&lt;typename ForwardIteratorT&gt;
-    boost::iterator_range&lt;ForwardIteratorT&gt; operator()(
-        ForwardIteratorT Begin,
-        ForwardIteratorT End )
-    {
-        return boost::make_range( Begin, End );
-    }
-};
-        </programlisting>
-                </listitem>
-                <listitem>
-                    Function Finder. Finder can be any function object. That is, any ordinary function with the
-                    required signature can be used as well. However, such a function can be used only for
-                    a specific iterator type. 
-                    
-                    <programlisting>
-boost::iterator_range&lt;std::string&gt; simple_finder(
-    std::string::const_iterator Begin,
-    std::string::const_iterator End )
-{
-    return boost::make_range( Begin, End );
-}
-        </programlisting>
-                </listitem>
-            </itemizedlist>
-        </para> 
-    </section>
-    <section id="string_algo.formatter_concept">
-        <title>Formatter concept</title>
-
-        <para>
-            Formatters are used by <link linkend="string_algo.replace">replace algorithms</link>.
-            They are used in close combination with finders.
-            A formatter is a functor, which takes a result from a Finder operation and transforms it in a specific way. 
-            The operation of the formatter can use additional information provided by a specific finder,
-            for example <functionname>regex_formatter()</functionname> uses the match information from
-            <functionname>regex_finder()</functionname> to format the result of formatter operation.
-        </para>
-    
-        <table>
-            <title>Valid Expressions</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>   
-                        <entry>Expression</entry>
-                        <entry>Return Type</entry>
-                        <entry>Effects</entry>
-                    </row>
-                </thead>
-                <tbody>
-                   <row>
-                        <entry><code>fmt(f(i,j))</code></entry>
-                        <entry>A container type, accessible using container traits</entry>
-                        <entry>Formats the result of the finder operation</entry>
-                    </row>
-                </tbody>
-            </tgroup>
-        </table>
-
-        <para>
-            Similarly to finders, formatters generalize format operations. When a finder is used to 
-            select a part of the input, formatter takes this selection and performs some formatting
-            on it. Algorithms can abstract from formatting using a formatter.
-        </para>
-        <para>
-            <emphasis role="bold">Examples</emphasis>
-        </para>
-        <para> 
-            <itemizedlist>
-                <listitem>
-                    Formatter implemented as a class. This Formatter does not perform any formatting and 
-                    returns the match, repackaged. <code>operator()</code>
-                    is templated, so that the Formatter can be used on any Finder type.
-                    
-                    <programlisting>
-struct simple_formatter
-{
-    template&lt;typename FindResultT&gt;
-    std::string operator()( const FindResultT&amp; Match )
-    {
-        std::string Temp( Match.begin(), Match.end() );
-        return Temp;
-    }
-};
-                </programlisting>
-                </listitem>
-                <listitem>
-                    Function Formatter. Similarly to Finder, Formatter can be any function object. 
-                    However, as a function, it can be used only with a specific Finder type. 
-                  
-                    <programlisting>
-std::string simple_formatter( boost::iterator_range&lt;std::string::const_iterator&gt;&amp; Match )
-{
-    std::string Temp( Match.begin(), Match.end() );
-    return Temp;
-}
-                    </programlisting>
-                </listitem>
-            </itemizedlist>
-        </para> 
-     </section>
-</section>
diff --git a/third_party/boostorg/algorithm/string/doc/credits.xml b/third_party/boostorg/algorithm/string/doc/credits.xml
deleted file mode 100644
index 6acdf74..0000000
--- a/third_party/boostorg/algorithm/string/doc/credits.xml
+++ /dev/null
@@ -1,25 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-
-<!-- Copyright (c) 2002-2006 Pavol Droba.
-     Subject to the Boost Software License, Version 1.0. 
-     (See accompanying file LICENSE_1_0.txt or  http://www.boost.org/LICENSE_1_0.txt)
--->
-
-<section id="string_algo.credits" last-revision="$Date$">
-    <title>Credits</title>
-    <section id="string_algo.ack">
-        <title>Acknowledgments</title>
-        <para>
-            The author would like to thank everybody who gave suggestions and comments. Especially valuable
-            were the contributions of Thorsten Ottosen, Jeff Garland and the other boost members who participated
-            in the review process, namely David Abrahams, Daniel Frey, Beman Dawes, John Maddock, David B.Held, Pavel Vozenilek
-            and many other.
-        </para>
-        <para>
-            Additional thanks go to Stefan Slapeta and Toon Knapen, who have been very resourceful in solving various
-            portability issues.
-        </para>
-    </section>
-</section>
diff --git a/third_party/boostorg/algorithm/string/doc/design.xml b/third_party/boostorg/algorithm/string/doc/design.xml
deleted file mode 100644
index e6db25d..0000000
--- a/third_party/boostorg/algorithm/string/doc/design.xml
+++ /dev/null
@@ -1,223 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-
-
-<!-- Copyright (c) 2002-2006 Pavol Droba.
-     Subject to the Boost Software License, Version 1.0. 
-     (See accompanying file LICENSE_1_0.txt or  http://www.boost.org/LICENSE_1_0.txt)
--->
-
-<section id="string_algo.design" last-revision="$Date$">
-    <title>Design Topics</title>
-
-    <using-namespace name="boost"/>
-    <using-namespace name="boost::algorithm"/>
-
-    <section id="string_algo.string">
-        <title>String Representation</title>
-
-        <para>
-            As the name suggest, this library works mainly with strings. However, in the context of this library,
-            a string is not restricted to any particular implementation (like <code>std::basic_string</code>),
-            rather it is a concept. This allows the algorithms in this library to be reused for any string type,
-            that satisfies the given requirements.
-        </para>
-        <para>
-            <emphasis role="bold">Definition:</emphasis> A string is a 
-            <ulink url="../../libs/range/index.html">range</ulink> of characters accessible in sequential
-            ordered fashion. Character is any value type with "cheap" copying and assignment.                
-        </para>
-        <para>
-            First requirement of string-type is that it must accessible using 
-            <ulink url="../../libs/range/index.html">Boost.Range</ulink>. This facility allows to access
-            the elements inside the string in a uniform iterator-based fashion. 
-            This is sufficient for our library
-        </para>
-        <para>            
-            Second requirement defines the way in which the characters are stored in the string. Algorithms in 
-            this library work with an assumption that copying a character is cheaper then allocating extra 
-            storage to cache results. This is a natural assumption for common character types. Algorithms will 
-            work even if this requirement is not satisfied, however at the cost of performance degradation.
-        <para>
-        </para>
-            In addition some algorithms have additional requirements on the string-type. Particularly, it is required
-            that an algorithm can create a new string of the given type. In this case, it is required that
-            the type satisfies the sequence (Std &sect;23.1.1) requirements.
-        </para>
-        <para>
-            In the reference and also in the code, requirement on the string type is designated by the name of
-            template argument. <code>RangeT</code> means that the basic range requirements must hold.
-            <code>SequenceT</code> designates extended sequence requirements.
-        </para>
-    </section>
-    
-    <section id="string_algo.sequence_traits">
-        <title>Sequence Traits</title>
-
-        <para>
-            The major difference between <code>std::list</code> and <code>std::vector</code> is not in the interfaces
-            they provide, but rather in the inner details of the class and the way how it performs 
-            various operations. The problem is that it is not possible to infer this difference from the 
-            definitions of classes without some special mechanism.
-            However, some algorithms can run significantly faster with the knowledge of the properties
-            of a particular container.
-        </para>
-        <para>
-            Sequence traits allow one to specify additional properties of a sequence container (see Std.&sect;32.2).
-            These properties are then used by algorithms to select optimized handling for some operations.
-            The sequence traits are declared in the header 
-            <headername>boost/algorithm/string/sequence_traits.hpp</headername>.
-        </para>
-
-        <para>
-            In the table C denotes a container and c is an object of C.
-        </para>
-        <table>
-            <title>Sequence Traits</title>
-            <tgroup cols="2" align="left">
-                <thead>
-                    <row>   
-                        <entry>Trait</entry>
-                        <entry>Description</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry><classname>has_native_replace&lt;C&gt;</classname>::value</entry>
-                        <entry>Specifies that the sequence has std::string like replace method</entry>
-                    </row>
-                    <row>
-                        <entry><classname>has_stable_iterators&lt;C&gt;</classname>::value</entry>
-                        <entry>
-                            Specifies that the sequence has stable iterators. It means,
-                            that operations like <code>insert</code>/<code>erase</code>/<code>replace</code> 
-                            do not invalidate iterators.
-                        </entry>
-                    </row>
-                    <row>
-                        <entry><classname>has_const_time_insert&lt;C&gt;</classname>::value</entry>
-                        <entry>
-                            Specifies that the insert method of the sequence has 
-                            constant time complexity.
-                        </entry>
-                    </row>
-                    <row>
-                        <entry><classname>has_const_time_erase&lt;C&gt;</classname>::value</entry>
-                        <entry>
-                            Specifies that the erase method of the sequence has constant time complexity
-                        </entry>
-                    </row>
-                    </tbody>
-            </tgroup>
-        </table>
-        
-        <para>
-            Current implementation contains specializations for std::list&lt;T&gt; and
-            std::basic_string&lt;T&gt; from the standard library and SGI's std::rope&lt;T&gt; and std::slist&lt;T&gt;.
-        </para>
-    </section>
-    <section id="string_algo.find">
-        <title>Find Algorithms</title>
-
-        <para>
-            Find algorithms have similar functionality to <code>std::search()</code> algorithm. They provide a different 
-            interface which is more suitable for common string operations. 
-            Instead of returning just the start of matching subsequence they return a range which is necessary 
-            when the length of the matching subsequence is not known beforehand. 
-            This feature also allows a partitioning of  the input sequence into three 
-            parts: a prefix, a substring and a suffix. 
-        </para>
-        <para>
-            Another difference is an addition of various searching methods besides find_first, including find_regex. 
-        </para>
-        <para>
-            It the library, find algorithms are implemented in terms of 
-            <link linkend="string_algo.finder_concept">Finders</link>. Finders are used also by other facilities 
-            (replace,split).
-            For convenience, there are also function wrappers for these finders to simplify find operations.
-        </para>
-        <para>
-            Currently the library contains only naive implementation of find algorithms with complexity 
-            O(n * m) where n is the size of the input sequence and m is the size of the search sequence. 
-            There are algorithms with complexity O(n), but for smaller sequence a constant overhead is 
-            rather big. For small m &lt;&lt; n (m by magnitude smaller than n) the current implementation 
-            provides acceptable efficiency. 
-            Even the C++ standard defines the required complexity for search algorithm as O(n * m). 
-            It is possible that a future version of library will also contain algorithms with linear 
-            complexity as an option
-        </para>
-    </section>
-    <section id="string_algo.replace">
-        <title>Replace Algorithms</title>
-
-        <para>
-            The implementation of replace algorithms follows the layered structure of the library. The 
-            lower layer implements generic substitution of a range in the input sequence. 
-            This layer takes a <link linkend="string_algo.finder_concept">Finder</link> object and a 
-            <link linkend="string_algo.formatter_concept">Formatter</link> object as an input. These two 
-            functors define what to replace and what to replace it with. The upper layer functions 
-            are just wrapping calls to the lower layer. Finders are shared with the find and split facility. 
-        </para>
-        <para>
-            As usual, the implementation of the lower layer is designed to work with a generic sequence while
-            taking advantage of specific features if possible 
-            (by using <link linkend="string_algo.sequence_traits">Sequence traits</link>)
-        </para>         
-    </section>
-    <section id="string_algo.split">
-        <title>Find Iterators &amp; Split Algorithms</title>
-
-        <para>
-            Find iterators are a logical extension of the <link linkend="string_algo.find">find facility</link>.
-            Instead of searching for one match, the whole input can be iteratively searched for multiple matches.
-            The result of the search is then used to partition the input. It depends on the algorithms which parts 
-            are returned as the result. They can be the matching parts (<classname>find_iterator</classname>) of the parts in
-            between (<classname>split_iterator</classname>). 
-        </para>
-        <para>
-            In addition the split algorithms like <functionname>find_all()</functionname> and <functionname>split()</functionname>
-            can simplify the common operations. They use a find iterator to search the whole input and copy the 
-            matches they found into the supplied container.
-        </para>
-    </section>
-    <section id="string_algo.exception">
-        <title>Exception Safety</title>
-
-        <para>
-            The library requires that all operations on types used as template
-            or function arguments provide the <emphasis>basic exception-safety guarantee</emphasis>. 
-            In turn, all functions and algorithms in this library, except where stated
-            otherwise, will provide the <emphasis>basic exception-safety guarantee</emphasis>.
-            In other words:
-            The library maintains its invariants and does not leak resources in
-            the face of exceptions.  Some library operations give stronger
-            guarantees, which are documented on an individual basis.
-        </para>
-        
-        <para>
-            Some functions can provide the <emphasis>strong exception-safety guarantee</emphasis>.
-            That means that following statements are true:    
-            <itemizedlist>
-                <listitem>
-                    If an exception is thrown, there are no effects other than those
-                    of the function 
-                </listitem>
-                <listitem>
-                    If an exception is thrown other than by the function, there are no effects
-                </listitem>
-            </itemizedlist>
-            This guarantee can be provided under the condition that the operations 
-            on the types used for arguments for these functions either
-            provide the strong exception guarantee or do not alter the global state .
-         </para>
-        <para>
-            In the reference, under the term <emphasis>strong exception-safety guarantee</emphasis>, we mean the
-            guarantee as defined above.            
-        </para>
-        <para>
-            For more information about the exception safety topics, follow this 
-            <ulink url="http://www.boost.org/more/generic_exception_safety.html">link</ulink>
-        </para>        
-    </section>
-</section>
diff --git a/third_party/boostorg/algorithm/string/doc/environment.xml b/third_party/boostorg/algorithm/string/doc/environment.xml
deleted file mode 100644
index 4fef5d9..0000000
--- a/third_party/boostorg/algorithm/string/doc/environment.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-
-<!-- Copyright (c) 2002-2006 Pavol Droba.
-     Subject to the Boost Software License, Version 1.0. 
-     (See accompanying file LICENSE_1_0.txt or  http://www.boost.org/LICENSE_1_0.txt)
--->
-
-<section id="string_algo.env" last-revision="$Date$">
-    <title>Environment</title>
-    <section>
-        <title>Build</title>
-        <para>
-            The whole library is provided in headers. Regex variants of some algorithms, 
-            however, are dependent on the <libraryname>Boost.Regex</libraryname> library. All such algorithms are
-            separated in <headername>boost/algorithm/string_regex.hpp</headername>. 
-            If this header is used, the application must be linked with the <libraryname>Boost.Regex</libraryname> 
-            library. 
-        </para>
-    </section>
-
-    <section>
-        <title>Examples</title>
-        <para>
-            Examples showing the basic usage of the library can be found in the libs/algorithm/string/example
-            directory. There is a separate file for the each part of the library. Please follow the boost
-            build guidelines to build examples using the bjam. To successfully build regex examples 
-            the <libraryname>Boost.Regex</libraryname> library is required. 
-        </para>
-    </section>
-
-    <section>
-        <title>Tests</title>
-        <para>
-            A full set of test cases for the library is located in the libs/algorithm/string/test directory. 
-            The test cases can be executed using the boost build system. For the tests of regular 
-            expression variants of algorithms, the <libraryname>Boost.Regex</libraryname> library is required. 
-        </para>
-    </section>
-
-    <section>
-        <title>Portability</title>
-        <para>
-            The library has been successfully compiled and tested with the following compilers:
-            
-            <itemizedlist>
-                <listitem>Microsoft Visual C++ 7.0</listitem>
-                <listitem>Microsoft Visual C++ 7.1</listitem>
-                <listitem>GCC 3.2</listitem>
-                <listitem>GCC 3.3.1</listitem>
-            </itemizedlist>
-
-            See <ulink url="http://boost.sourceforge.net/regression-logs/">Boost regression tables</ulink>
-            for additional info for a particular compiler.
-        </para>
-        <para>
-            There are known limitation on platforms not supporting partial template specialization. 
-            Library depends on correctly implemented <code>std::iterator_traits</code> class. 
-            If a standard library provided with compiler is broken, the String Algorithm Library 
-            cannot function properly. Usually it implies that primitive pointer iterators are not 
-            working with the library functions. 
-        </para>
-    </section>
-</section>
diff --git a/third_party/boostorg/algorithm/string/doc/external_concepts.html b/third_party/boostorg/algorithm/string/doc/external_concepts.html
deleted file mode 100644
index af403be..0000000
--- a/third_party/boostorg/algorithm/string/doc/external_concepts.html
+++ /dev/null
@@ -1,40 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"><html><head><title> Concepts and External Concepts </title><meta http-equiv="Content-Type"content="text/html; charset=iso-8859-1"></head> <body><table  ><tr  ><td  ><img src="../../../../boost.png" width="100%" border="0"></td><td  ><h1  >Concepts and External Concepts</h1></td></tr></table><p  >Generic programming in C++ is characterized by the use of function and class templates where
-                the template parameter(s) must satisfy certain requirements.Often these
-                requirements are so important that we give them a name: we call
-                such a set of type requirements a <b>concept</b>. We say that a type <i>
-                conforms to a concept</i> or that it <i>is a model of a concept</i> if it
-                satisfies all of those requirements. The concept can be specified as a set
-                of member functions with well-defined semantics
-                and a set of nested typedefs with well-defined properties.</p><p  >Often it much more flexible to provide free-standing functions and typedefs
-            which provides the exact same semantics (but a different syntax) as
-            specified
-            by the concept. This allows generic code to treat different types <i> as if
-            </i> they fulfilled the concept. In this case we say that the concept has
-            been <b> externalized </b> or that the new requirements constitutes an <b>external
-            concept </b>. We say that a type <i> conforms to an external concept </i>
-            or that it <i> is a model of an external concept </i>. A concept may exist
-            without a corresponding external concept and conversely.</p><p  >Whenever a concept specifies a member function, the corresponding  external
-            concept
-            must specify a free-standing function of the same name, same return type and
-            the same argument list except there is an extra first argument which must
-            be of the type (or a reference to that type) that is to fulfill the external
-            concept. If the corresonding member function has any cv-qulifiers, the
-            first argument must have the same cv-qualifiers. Whenever a concept
-            specifies a nested typedef, the corresponding external concept
-            specifies a <b>type-generator</b>, that is, a type with a nested typedef
-            named <code>type</code>. The type-generator has the name as the nested typedef with
-            <code>_of</code> appended.
-            The converse relationship of an external concept and its corresponding concept
-            also holds.</p><p  ><b  ><i  >Example:</i></b></p><p  >A type <code>T</code> fulfills the FooConcept if it
-            has the follwing public members:</p><code> void T::foo( int ) const; <br>
-                 int T::bar(); <br> 
-               typedef <i>implementation defined </i> foo_type;</code><p  >The corresponding external concept is the ExternalFooConcept.</p><p  >A type <code>T</code> fullfills the ExternalFooConcept if these
-            free-standing functions and type-generators exists:</p><code>void foo( const T&, int ); <br>
-                int bar( T& ); <br>
-             foo_type_of< T >::type;</code> <br> <br><hr size="1" ><h3  >Literature</h3><ul  ><li  > <a href="http://www.boost.org/more/generic_programming.html#type_generator" target="_self" >Type Generators</a> </li><li  > <a href="http://www.boost.org/more/generic_programming.html#concept" target="_self" >Concepts</a> </li><li  > <a href="http://www.sgi.com/tech/stl/stl_introduction.html" target="_self" >Concepts and SGI STL</a> </li></ul><hr size="1" ><p  >&copy; Thorsten Ottosen 2003-2004 (nesotto_AT_cs.auc.dk).
-<br>Use, modification and distribution is subject to the Boost
- Software License, Version 1.0. (See accompanying file
- <code class="filename">LICENSE_1_0.txt</code> or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)
-</br>
-</p>
- <!-- Copyright Dezide Aps 2003-2004 -->
diff --git a/third_party/boostorg/algorithm/string/doc/intro.xml b/third_party/boostorg/algorithm/string/doc/intro.xml
deleted file mode 100644
index 47a7e18..0000000
--- a/third_party/boostorg/algorithm/string/doc/intro.xml
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-
-
-<!-- Copyright (c) 2002-2006 Pavol Droba.
-     Subject to the Boost Software License, Version 1.0. 
-     (See accompanying file LICENSE_1_0.txt or  http://www.boost.org/LICENSE_1_0.txt)
--->
-
-<section id="string_algo.intro" last-revision="$Date$">
-    <title>Introduction</title>
-
-    <para>
-        The String Algorithm Library provides a generic implementation of
-        string-related algorithms which are missing in STL. It is an extension
-        to the algorithms library of STL and it includes trimming, case conversion, 
-        predicates and find/replace functions. All of them come in different variants 
-        so it is easier to choose the best fit for a particular need.
-    </para>
-    <para>
-        The implementation is not restricted to work with a particular container 
-        (like <code>std::basic_string</code>), rather it is as generic as
-        possible. This generalization is not compromising the performance since
-        algorithms are using container specific features when it means a performance
-        gain.
-    </para>
-    <para>
-        <emphasis role="bold">
-            Important note: In this documentation we use term <emphasis>string</emphasis> to 
-            designate a sequence of <emphasis>characters</emphasis> stored in an arbitrary container.
-            A <emphasis>string</emphasis> is not restricted to <code>std::basic_string</code> and 
-            <emphasis>character</emphasis> does not have to be <code>char</code> or <code>wchar_t</code>,
-            although these are most common candidates.
-        </emphasis>
-        Consult the <link linkend="string_algo.design">design chapter</link> to see precise specification of
-        supported string types.
-    </para>
-    <para>      
-        The library interface functions and classes are defined in namespace <code>boost::algorithm</code>, and
-        they are lifted into namespace <code>boost</code> via using declaration.
-    </para>
-    <para>
-        The documentation is divided into several sections. For a quick start read the 
-        <link linkend="string_algo.usage">Usage</link> section followed by 
-        <link linkend="string_algo.quickref">Quick Reference</link>. 
-        <link linkend="string_algo.design">The Design Topics</link>,
-        <link linkend="string_algo.concept">Concepts</link> and <link linkend="string_algo.rationale">Rationale</link>
-        provide some explanation about the library design and structure an explain how it should be used.
-        See the <link linkend="string_algo.reference">Reference</link> for the complete list of provided utilities
-        and algorithms. Functions and classes in the reference are organized by the headers in which they are defined.
-        The reference contains links to the detailed description for every entity in the library.
-    </para>
-</section>
diff --git a/third_party/boostorg/algorithm/string/doc/quickref.xml b/third_party/boostorg/algorithm/string/doc/quickref.xml
deleted file mode 100644
index fe267e7..0000000
--- a/third_party/boostorg/algorithm/string/doc/quickref.xml
+++ /dev/null
@@ -1,758 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-
-<!-- Copyright (c) 2002-2006 Pavol Droba.
-     Subject to the Boost Software License, Version 1.0. 
-     (See accompanying file LICENSE_1_0.txt or  http://www.boost.org/LICENSE_1_0.txt)
--->
-
-<section id="string_algo.quickref" last-revision="$Date$">
-    <title>Quick Reference</title>
-
-    <using-namespace name="boost"/>
-    <using-namespace name="boost::algorithm"/>
-
-    <section>   
-        <title>Algorithms</title>
-        
-        <table>
-            <title>Case Conversion</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Algorithm name</entry>
-                        <entry>Description</entry>
-                        <entry>Functions</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry><code>to_upper</code></entry>
-                        <entry>Convert a string to upper case</entry>
-                        <entry>
-                            <functionname>to_upper_copy()</functionname>
-                            <sbr/>
-                            <functionname>to_upper()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry><code>to_lower</code></entry>
-                        <entry>Convert a string to lower case</entry>
-                        <entry>
-                            <functionname>to_lower_copy()</functionname>
-                            <sbr/>
-                            <functionname>to_lower()</functionname>
-                        </entry>
-                    </row>
-                </tbody>
-            </tgroup>
-        </table>
-        <table>
-            <title>Trimming</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Algorithm name</entry>
-                        <entry>Description</entry>
-                        <entry>Functions</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry><code>trim_left</code></entry>
-                        <entry>Remove leading spaces from a string</entry>
-                        <entry>
-                            <functionname>trim_left_copy_if()</functionname>
-                            <sbr/>
-                            <functionname>trim_left_if()</functionname>
-                            <sbr/>
-                            <functionname>trim_left_copy()</functionname>
-                            <sbr/>
-                            <functionname>trim_left()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry><code>trim_right</code></entry>
-                        <entry>Remove trailing spaces from a string</entry>
-                        <entry>
-                            <functionname>trim_right_copy_if()</functionname>
-                            <sbr/>
-                            <functionname>trim_right_if()</functionname>
-                            <sbr/>
-                            <functionname>trim_right_copy()</functionname>
-                            <sbr/>
-                            <functionname>trim_right()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry><code>trim</code></entry>
-                        <entry>Remove leading and trailing spaces from a string</entry>
-                        <entry>
-                            <functionname>trim_copy_if()</functionname>
-                            <sbr/>
-                            <functionname>trim_if()</functionname>
-                            <sbr/>
-                            <functionname>trim_copy()</functionname>
-                            <sbr/>
-                            <functionname>trim()</functionname>
-                        </entry>
-                    </row>
- 
-                </tbody>
-            </tgroup>
-        </table>
-        <table>
-            <title>Predicates</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Algorithm name</entry>
-                        <entry>Description</entry>
-                        <entry>Functions</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry><code>starts_with</code></entry>
-                        <entry>Check if a string is a prefix of the other one</entry>
-                        <entry>
-                            <functionname>starts_with()</functionname>
-                            <sbr/>
-                            <functionname>istarts_with()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry><code>ends_with</code></entry>
-                        <entry>Check if a string is a suffix of the other one</entry>
-                        <entry>
-                            <functionname>ends_with()</functionname>
-                            <sbr/>
-                            <functionname>iends_with()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry><code>contains</code></entry>
-                        <entry>Check if a string is contained of the other one</entry>
-                        <entry>
-                            <functionname>contains()</functionname>
-                            <sbr/>
-                            <functionname>icontains()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry><code>equals</code></entry>
-                        <entry>Check if two strings are equal</entry>
-                        <entry>
-                            <functionname>equals()</functionname>
-                            <sbr/>
-                            <functionname>iequals()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry><code>lexicographical_compare</code></entry>
-                        <entry>Check if a string is lexicographically less then another one</entry>
-                        <entry>
-                            <functionname>lexicographical_compare()</functionname>
-                            <sbr/>
-                            <functionname>ilexicographical_compare()</functionname>
-                        </entry>
-                    </row>
-
-                    <row>
-                        <entry><code>all</code></entry>
-                        <entry>Check if all elements of a string satisfy the given predicate</entry>
-                        <entry>
-                            <functionname>all()</functionname>
-                        </entry>
-                    </row>
-                </tbody>
-            </tgroup>
-        </table>
-        <table>
-            <title>Find algorithms</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Algorithm name</entry>
-                        <entry>Description</entry>
-                        <entry>Functions</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry>find_first</entry>
-                        <entry>Find the first occurrence of a string in the input</entry>
-                        <entry>
-                            <functionname>find_first()</functionname>
-                            <sbr/>
-                            <functionname>ifind_first()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>find_last</entry>
-                        <entry>Find the last occurrence of a string in the input</entry>
-                        <entry>
-                            <functionname>find_last()</functionname>
-                            <sbr/>
-                            <functionname>ifind_last()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>find_nth</entry>
-                        <entry>Find the nth (zero-indexed) occurrence of a string in the input</entry>
-                        <entry>
-                            <functionname>find_nth()</functionname>
-                            <sbr/>
-                            <functionname>ifind_nth()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>find_head</entry>
-                        <entry>Retrieve the head of a string</entry>
-                        <entry>
-                            <functionname>find_head()</functionname>
-                        </entry>
-                    </row>                  
-                    <row>
-                        <entry>find_tail</entry>
-                        <entry>Retrieve the tail of a string</entry>
-                        <entry>
-                            <functionname>find_tail()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>find_token</entry>
-                        <entry>Find first matching token in the string</entry>
-                        <entry>
-                            <functionname>find_token()</functionname>
-                        </entry>
-                    </row>                      
-                    <row>
-                        <entry>find_regex</entry>
-                        <entry>Use the regular expression to search the string</entry>
-                        <entry>
-                            <functionname>find_regex()</functionname>
-                        </entry>
-                    </row>                      
-                    <row>
-                        <entry>find</entry>
-                        <entry>Generic find algorithm</entry>
-                        <entry>
-                            <functionname>find()</functionname>
-                        </entry>
-                    </row>                      
-                </tbody>
-            </tgroup>
-        </table>
-        <table>
-            <title>Erase/Replace</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Algorithm name</entry>
-                        <entry>Description</entry>
-                        <entry>Functions</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry>replace/erase_first</entry>
-                        <entry>Replace/Erase the first occurrence of a string in the input</entry>
-                        <entry>
-                            <functionname>replace_first()</functionname>
-                            <sbr/>
-                            <functionname>replace_first_copy()</functionname>
-                            <sbr/>
-                            <functionname>ireplace_first()</functionname>
-                            <sbr/>
-                            <functionname>ireplace_first_copy()</functionname>
-                            <sbr/>                      
-                            <functionname>erase_first()</functionname>
-                            <sbr/>
-                            <functionname>erase_first_copy()</functionname>
-                            <sbr/>
-                            <functionname>ierase_first()</functionname>
-                            <sbr/>
-                            <functionname>ierase_first_copy()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>replace/erase_last</entry>
-                        <entry>Replace/Erase the last occurrence of a string in the input</entry>
-                        <entry>
-                            <functionname>replace_last()</functionname>
-                            <sbr/>
-                            <functionname>replace_last_copy()</functionname>
-                            <sbr/>
-                            <functionname>ireplace_last()</functionname>
-                            <sbr/>
-                            <functionname>ireplace_last_copy()</functionname>
-                            <sbr/>                      
-                            <functionname>erase_last()</functionname>
-                            <sbr/>
-                            <functionname>erase_last_copy()</functionname>
-                            <sbr/>
-                            <functionname>ierase_last()</functionname>
-                            <sbr/>
-                            <functionname>ierase_last_copy()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>replace/erase_nth</entry>
-                        <entry>Replace/Erase the nth (zero-indexed) occurrence of a string in the input</entry>
-                        <entry>
-                            <functionname>replace_nth()</functionname>
-                            <sbr/>
-                            <functionname>replace_nth_copy()</functionname>
-                            <sbr/>
-                            <functionname>ireplace_nth()</functionname>
-                            <sbr/>
-                            <functionname>ireplace_nth_copy()</functionname>
-                            <sbr/>                      
-                            <functionname>erase_nth()</functionname>
-                            <sbr/>
-                            <functionname>erase_nth_copy()</functionname>
-                            <sbr/>
-                            <functionname>ierase_nth()</functionname>
-                            <sbr/>
-                            <functionname>ierase_nth_copy()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>replace/erase_all</entry>
-                        <entry>Replace/Erase the all occurrences of a string in the input</entry>
-                        <entry>
-                            <functionname>replace_all()</functionname>
-                            <sbr/>
-                            <functionname>replace_all_copy()</functionname>
-                            <sbr/>
-                            <functionname>ireplace_all()</functionname>
-                            <sbr/>
-                            <functionname>ireplace_all_copy()</functionname>
-                            <sbr/>                      
-                            <functionname>erase_all()</functionname>
-                            <sbr/>
-                            <functionname>erase_all_copy()</functionname>
-                            <sbr/>
-                            <functionname>ierase_all()</functionname>
-                            <sbr/>
-                            <functionname>ierase_all_copy()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>replace/erase_head</entry>
-                        <entry>Replace/Erase the head of the input</entry>
-                        <entry>
-                            <functionname>replace_head()</functionname>
-                            <sbr/>
-                            <functionname>replace_head_copy()</functionname>
-                            <sbr/>
-                            <functionname>erase_head()</functionname>
-                            <sbr/>
-                            <functionname>erase_head_copy()</functionname>
-                            <sbr/>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>replace/erase_tail</entry>
-                        <entry>Replace/Erase the tail of the input</entry>
-                        <entry>
-                            <functionname>replace_tail()</functionname>
-                            <sbr/>
-                            <functionname>replace_tail_copy()</functionname>
-                            <sbr/>
-                            <functionname>erase_tail()</functionname>
-                            <sbr/>
-                            <functionname>erase_tail_copy()</functionname>
-                            <sbr/>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>replace/erase_regex</entry>
-                        <entry>Replace/Erase a substring matching the given regular expression</entry>
-                        <entry>
-                            <functionname>replace_regex()</functionname>
-                            <sbr/>
-                            <functionname>replace_regex_copy()</functionname>
-                            <sbr/>
-                            <functionname>erase_regex()</functionname>
-                            <sbr/>
-                            <functionname>erase_regex_copy()</functionname>
-                            <sbr/>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>replace/erase_regex_all</entry>
-                        <entry>Replace/Erase all substrings matching the given regular expression</entry>
-                        <entry>
-                            <functionname>replace_all_regex()</functionname>
-                            <sbr/>
-                            <functionname>replace_all_regex_copy()</functionname>
-                            <sbr/>
-                            <functionname>erase_all_regex()</functionname>
-                            <sbr/>
-                            <functionname>erase_all_regex_copy()</functionname>
-                            <sbr/>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>find_format</entry>
-                        <entry>Generic replace algorithm</entry>
-                        <entry>
-                            <functionname>find_format()</functionname>
-                            <sbr/>
-                            <functionname>find_format_copy()</functionname>
-                            <sbr/>
-                            <functionname>find_format_all()</functionname>
-                            <sbr/>
-                            <functionname>find_format_all_copy()()</functionname>
-                        </entry>
-                    </row>
-                </tbody>
-            </tgroup>
-        </table>
-        <table>
-            <title>Split</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Algorithm name</entry>
-                        <entry>Description</entry>
-                        <entry>Functions</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry>find_all</entry>
-                        <entry>Find/Extract all matching substrings in the input</entry>
-                        <entry>
-                            <functionname>find_all()</functionname>
-                            <sbr/>
-                            <functionname>ifind_all()</functionname>
-                            <sbr/>
-                            <functionname>find_all_regex()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>split</entry>
-                        <entry>Split input into parts</entry>
-                        <entry>
-                            <functionname>split()</functionname>
-                            <sbr/>
-                            <functionname>split_regex()</functionname>
-                        </entry>
-					</row>
-					<row>
-						<entry>iter_find</entry>
-						<entry>Iteratively apply the finder to the input to find all matching substrings</entry>
-						<entry>
-							<functionname>iter_find()</functionname>
-						</entry>
-					</row>
-					<row>
-						<entry>iter_split</entry>
-						<entry>Use the finder to find matching substrings in the input and use them as separators to split the input into parts</entry>
-						<entry>
-							<functionname>iter_split()</functionname>
-						</entry>
-					</row>
-                </tbody>
-            </tgroup>
-        </table>
-        <table>
-            <title>Join</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Algorithm name</entry>
-                        <entry>Description</entry>
-                        <entry>Functions</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry>join</entry>
-                        <entry>Join all elements in a container into a single string</entry>
-                        <entry>
-                            <functionname>join</functionname>
-                        </entry>
-                    </row>
-                        <row>
-                        <entry>join_if</entry>
-                        <entry>Join all elements in a container that satisfies the condition into a single string</entry>
-                        <entry>
-                            <functionname>join_if()</functionname>
-                        </entry>
-                   </row>
-                </tbody>
-            </tgroup>
-        </table>
-    </section>
-    <section>
-        <title>Finders and Formatters</title>
-        
-        <table>
-            <title>Finders</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Finder</entry>
-                        <entry>Description</entry>
-                        <entry>Generators</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry>first_finder</entry>
-                        <entry>Search for the first match of the string in an input</entry>
-                        <entry>
-                            <functionname>first_finder()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>last_finder</entry>
-                        <entry>Search for the last match of the string in an input</entry>
-                        <entry>
-                            <functionname>last_finder()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>nth_finder</entry>
-                        <entry>Search for the nth (zero-indexed) match of the string in an input</entry>
-                        <entry>
-                            <functionname>nth_finder()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>head_finder</entry>
-                        <entry>Retrieve the head of an input</entry>
-                        <entry>
-                            <functionname>head_finder()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>tail_finder</entry>
-                        <entry>Retrieve the tail of an input</entry>
-                        <entry>
-                            <functionname>tail_finder()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>token_finder</entry>
-                        <entry>Search for a matching token in an input</entry>
-                        <entry>
-                            <functionname>token_finder()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>range_finder</entry>
-                        <entry>Do no search, always returns the given range</entry>
-                        <entry>
-                            <functionname>range_finder()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>regex_finder</entry>
-                        <entry>Search for a substring matching the given regex</entry>
-                        <entry>
-                            <functionname>regex_finder()</functionname>
-                        </entry>
-                    </row>
-                </tbody>
-            </tgroup>
-        </table>
-
-        <table>
-            <title>Formatters</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Formatter</entry>
-                        <entry>Description</entry>
-                        <entry>Generators</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry>const_formatter</entry>
-                        <entry>Constant formatter. Always return the specified string</entry>
-                        <entry>
-                            <functionname>const_formatter()</functionname>
-                        </entry>                                                
-                    </row>
-                    <row>
-                        <entry>identity_formatter</entry>
-                        <entry>Identity formatter. Return unmodified input input</entry>
-                        <entry>
-                            <functionname>identity_formatter()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>empty_formatter</entry>
-                        <entry>Null formatter. Always return an empty string</entry>
-                        <entry>
-                            <functionname>empty_formatter()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>regex_formatter</entry>
-                        <entry>Regex formatter. Format regex match using the specification in the format string</entry>
-                        <entry>
-                            <functionname>regex_formatter()</functionname>
-                        </entry>
-                    </row>
-                </tbody>
-            </tgroup>
-        </table>
-    </section>
-    <section>
-        <title>Iterators</title>
-        
-        <table>
-            <title>Find Iterators</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Iterator name</entry>
-                        <entry>Description</entry>
-                        <entry>Iterator class</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry>find_iterator</entry>
-                        <entry>Iterates through matching substrings in the input</entry>
-                        <entry>
-                            <classname>find_iterator</classname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>split_iterator</entry>
-                        <entry>Iterates through gaps between matching substrings in the input</entry>
-                        <entry>
-                            <classname>split_iterator</classname>
-                        </entry>
-                    </row>              
-                </tbody>
-            </tgroup>
-        </table>
-    </section>
-    
-    <section>
-        <title>Classification</title>
-        
-        <table>
-            <title>Predicates</title>
-            <tgroup cols="3" align="left">
-                <thead>
-                    <row>
-                        <entry>Predicate name</entry>
-                        <entry>Description</entry>
-                        <entry>Generator</entry>
-                    </row>
-                </thead>
-                <tbody>
-                    <row>
-                        <entry>is_classified</entry>
-                        <entry>Generic <code>ctype</code> mask based classification</entry>
-                        <entry>
-                            <functionname>is_classified()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_space</entry>
-                        <entry>Recognize spaces</entry>
-                        <entry>
-                            <functionname>is_space()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_alnum</entry>
-                        <entry>Recognize alphanumeric characters</entry>
-                        <entry>
-                            <functionname>is_alnum()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_alpha</entry>
-                        <entry>Recognize letters</entry>
-                        <entry>
-                            <functionname>is_alpha()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_cntrl</entry>
-                        <entry>Recognize control characters</entry>
-                        <entry>
-                            <functionname>is_cntrl()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_digit</entry>
-                        <entry>Recognize decimal digits</entry>
-                        <entry>
-                            <functionname>is_digit()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_graph</entry>
-                        <entry>Recognize graphical characters</entry>
-                        <entry>
-                            <functionname>is_graph()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_lower</entry>
-                        <entry>Recognize lower case characters</entry>
-                        <entry>
-                            <functionname>is_lower()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_print</entry>
-                        <entry>Recognize printable characters</entry>
-                        <entry>
-                            <functionname>is_print()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_punct</entry>
-                        <entry>Recognize punctuation characters</entry>
-                        <entry>
-                            <functionname>is_punct()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_upper</entry>
-                        <entry>Recognize uppercase characters</entry>
-                        <entry>
-                            <functionname>is_upper()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_xdigit</entry>
-                        <entry>Recognize hexadecimal digits</entry>
-                        <entry>
-                            <functionname>is_xdigit()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_any_of</entry>
-                        <entry>Recognize any of a sequence of characters</entry>
-                        <entry>
-                            <functionname>is_any_of()</functionname>
-                        </entry>
-                    </row>
-                    <row>
-                        <entry>is_from_range</entry>
-                        <entry>Recognize characters inside a min..max range</entry>
-                        <entry>
-                            <functionname>is_from_range()</functionname>
-                        </entry>
-                    </row>
-                </tbody>
-            </tgroup>
-        </table>
-    </section>
-</section>
diff --git a/third_party/boostorg/algorithm/string/doc/rationale.xml b/third_party/boostorg/algorithm/string/doc/rationale.xml
deleted file mode 100644
index e2d1ab1..0000000
--- a/third_party/boostorg/algorithm/string/doc/rationale.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-
-<!-- Copyright (c) 2002-2006 Pavol Droba.
-     Subject to the Boost Software License, Version 1.0. 
-     (See accompanying file LICENSE_1_0.txt or  http://www.boost.org/LICENSE_1_0.txt)
--->
-
-<section id="string_algo.rationale" last-revision="$Date$">
-    <title>Rationale</title>
-
-    <using-namespace name="boost"/>
-    <using-namespace name="boost::algorithm"/>
-    
-   <section it="string_algo.locale">
-        <title>Locales</title>
-
-        <para>
-            Locales have a very close relation to string processing. They contain information about
-            the character sets and are used, for example, to change the case of characters and 
-            to classify the characters. 
-        </para>
-        <para>
-            C++ allows to work with multiple different instances of locales at once. If an algorithm
-            manipulates some data in a way that requires the usage of locales, there must be a way
-            to specify them. However, one instance of locales is sufficient for most of the applications,
-            and for a user it could be very tedious to specify which locales to use at every place 
-            where it is needed. 
-        </para> 
-        <para>
-            Fortunately, the C++ standard allows to specify the <emphasis>global</emphasis> locales (using static member
-            function <code>std:locale::global()</code>). When instantiating an
-            <code>std::locale</code> class without explicit information, the instance will 
-            be initialized with the <emphasis>global</emphasis> locale. This implies, that if an algorithm needs a locale,
-            it should have an <code>std::locale</code> parameter defaulting to  <code>std::locale()</code>.
-            If a user needs to specify locales explicitly, she can do so. Otherwise the <emphasis>global</emphasis>
-            locales are used.
-        </para>
-    </section>
-    <section id="string_algo.regex">
-        <title>Regular Expressions</title>
-
-        <para>
-            Regular expressions are an essential part of text processing. For this reason, the library 
-            also provides regex variants of some algorithms. The library does not attempt to replace
-            <libraryname>Boost.Regex</libraryname>; it merely wraps its functionality in a new interface.
-            As a part of this library, regex algorithms integrate smoothly with other components, which 
-            brings additional value.
-        </para>
-    </section>
-</section>
diff --git a/third_party/boostorg/algorithm/string/doc/release_notes.xml b/third_party/boostorg/algorithm/string/doc/release_notes.xml
deleted file mode 100644
index dd412d5..0000000
--- a/third_party/boostorg/algorithm/string/doc/release_notes.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-
-<!-- Copyright (c) 2002-2006 Pavol Droba.
-     Subject to the Boost Software License, Version 1.0. 
-     (See accompanying file LICENSE_1_0.txt or  http://www.boost.org/LICENSE_1_0.txt)
--->
-
-<section id="string_algo.release_notes" last-revision="$Date$">
-
-    <using-namespace name="boost"/>
-    <using-namespace name="boost::algorithm"/>
-
-    <title>Release Notes</title>
-
-    <itemizedlist>
-        <listitem>
-            <para><emphasis role="bold">1.32</emphasis></para>
-            <para>Initial release in Boost</para>
-        </listitem>
-        <listitem>
-            <para><emphasis role="bold">1.33</emphasis></para>
-            <para>Internal version of collection traits removed, library adapted to Boost.Range</para>
-        </listitem>
-        <listitem>
-            <para><emphasis role="bold">1.34</emphasis></para>
-            <itemizedlist>
-                <listitem>
-                    <functionname>lexicographical_compare()</functionname>
-                </listitem>
-                <listitem>
-                    <functionname>join()</functionname> and <functionname>join_if()</functionname> 
-                </listitem>
-                <listitem>
-                    New comparison predicates <code>is_less</code>, <code>is_not_greater</code>
-                </listitem>
-                <listitem>
-                     Negative indexes support (like Perl) in various algorithms
-                     (<code>*_head/tail</code>, <code>*_nth</code>).
-                </listitem>                                      
-            </itemizedlist>
-        </listitem>
-    </itemizedlist>
-</section>
diff --git a/third_party/boostorg/algorithm/string/doc/string_algo.xml b/third_party/boostorg/algorithm/string/doc/string_algo.xml
deleted file mode 100644
index bbf1fe0..0000000
--- a/third_party/boostorg/algorithm/string/doc/string_algo.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-
-<!-- Copyright (c) 2002-2006 Pavol Droba.
-     Subject to the Boost Software License, Version 1.0. 
-     (See accompanying file LICENSE_1_0.txt or  http://www.boost.org/LICENSE_1_0.txt)
--->
-
-<library name="String Algorithms" dirname="algorithm/string" xmlns:xi="http://www.w3.org/2001/XInclude" 
-    id="string_algo" last-revision="$Date$">
-    <libraryinfo>
-        <author>
-            <firstname>Pavol</firstname>
-            <surname>Droba</surname>
-        </author>
-
-        <copyright>
-            <year>2002</year>
-            <year>2003</year>
-            <year>2004</year>
-            <holder>Pavol Droba</holder>
-        </copyright>
-
-        <legalnotice>
-            <para>Use, modification and distribution is subject to the Boost
-                Software License, Version 1.0. (See accompanying file
-                <filename>LICENSE_1_0.txt</filename> or copy at <ulink
-                    url="http://www.boost.org/LICENSE_1_0.txt">http://www.boost.org/LICENSE_1_0.txt</ulink>)
-            </para>
-        </legalnotice>
-
-        <librarypurpose>
-            A set of generic string-related algorithms and utilities
-        </librarypurpose> 
-        <librarycategory name="category:algorithms"/>
-        <librarycategory name="category:string-text"/>
-    </libraryinfo>
-
-    <title>Boost String Algorithms Library</title>  
-    <xi:include href="intro.xml"/>
-    <xi:include href="release_notes.xml"/>
-    <xi:include href="usage.xml"/>
-    <xi:include href="quickref.xml"/>
-    <xi:include href="design.xml"/>
-    <xi:include href="concept.xml"/>
-    <xi:include href="autodoc.xml"/>
-    <xi:include href="rationale.xml"/>
-    <xi:include href="environment.xml"/>
-    <xi:include href="credits.xml"/>
-</library>
-
diff --git a/third_party/boostorg/algorithm/string/doc/usage.xml b/third_party/boostorg/algorithm/string/doc/usage.xml
deleted file mode 100644
index 56fd3ac..0000000
--- a/third_party/boostorg/algorithm/string/doc/usage.xml
+++ /dev/null
@@ -1,366 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-
-
-<!-- Copyright (c) 2002-2006 Pavol Droba.
-     Subject to the Boost Software License, Version 1.0. 
-     (See accompanying file LICENSE_1_0.txt or  http://www.boost.org/LICENSE_1_0.txt)
--->
-
-
-<section id="string_algo.usage" last-revision="$Date$">
-    <title>Usage</title>
-
-    <using-namespace name="boost"/>
-    <using-namespace name="boost::algorithm"/>
-
-
-    <section>
-        <title>First Example</title>
-        
-        <para>
-            Using the algorithms is straightforward. Let us have a look at the first example:
-        </para>
-        <programlisting>
-    #include &lt;boost/algorithm/string.hpp&gt;
-    using namespace std;
-    using namespace boost;
-    
-    // ...
-
-    string str1(" hello world! ");
-    to_upper(str1);  // str1 == " HELLO WORLD! "
-    trim(str1);      // str1 == "HELLO WORLD!"
-
-    string str2=
-       to_lower_copy(
-          ireplace_first_copy(
-             str1,"hello","goodbye")); // str2 == "goodbye world!"
-        </programlisting>
-        <para>
-            This example converts str1 to upper case and trims spaces from the start and the end
-            of the string. str2 is then created as a copy of str1 with "hello" replaced with "goodbye".
-            This example demonstrates several important concepts used in the library:
-        </para>
-        <itemizedlist>
-            <listitem>
-                <para><emphasis role="bold">Container parameters:</emphasis>
-                    Unlike in the STL algorithms, parameters are not specified only in the form
-                    of iterators. The STL convention allows for great flexibility,
-                    but it has several limitations. It is not possible to <emphasis>stack</emphasis> algorithms together, 
-                    because a container is passed in two parameters. Therefore it is not possible to use 
-                    a return value from another algorithm. It is considerably easier to write
-                    <code>to_lower(str1)</code>, than <code>to_lower(str1.begin(), str1.end())</code>.
-                </para>
-                <para>
-                    The magic of <ulink url="../../libs/range/index.html">Boost.Range</ulink> 
-                    provides a uniform way of handling different string types. 
-                    If there is a need to pass a pair of iterators, 
-                    <ulink url="../../libs/range/doc/html/range/reference/utilities/iterator_range.html"><code>boost::iterator_range</code></ulink>
-                    can be used to package iterators into a structure with a compatible interface.
-                </para>
-            </listitem>
-            <listitem>
-                <para><emphasis role="bold">Copy vs. Mutable:</emphasis>
-                    Many algorithms in the library are performing a transformation of the input. 
-                    The transformation can be done in-place, mutating the input sequence, or a copy 
-                    of the transformed input can be created, leaving the input intact. None of 
-                    these possibilities is superior to the other one and both have different 
-                    advantages and disadvantages. For this reason, both are provided with the library. 
-                </para>
-            </listitem>
-            <listitem>
-                <para><emphasis role="bold">Algorithm stacking:</emphasis>
-                    Copy versions return a transformed input as a result, thus allow a simple chaining of
-                    transformations within one expression (i.e. one can write <code>trim_copy(to_upper_copy(s))</code>). 
-                    Mutable versions have <code>void</code> return, to avoid misuse.
-                </para>
-            </listitem>
-            <listitem>
-                <para><emphasis role="bold">Naming:</emphasis>
-                    Naming follows the conventions from the Standard C++ Library. If there is a 
-                    copy and a mutable version of the same algorithm, the mutable version has no suffix 
-                    and the copy version has the suffix <emphasis>_copy</emphasis>. 
-                    Some algorithms have the prefix <emphasis>i</emphasis> 
-                    (e.g. <functionname>ifind_first()</functionname>).
-                    This prefix identifies that the algorithm works in a case-insensitive manner.
-                </para>
-            </listitem>
-        </itemizedlist>
-        <para>
-            To use the library, include the <headername>boost/algorithm/string.hpp</headername> header. 
-            If the regex related functions are needed, include the 
-            <headername>boost/algorithm/string_regex.hpp</headername> header.
-        </para>
-    </section>
-    <section>
-        <title>Case conversion</title>
-        
-        <para>
-            STL has a nice way of converting character case. Unfortunately, it works only
-            for a single character and we want to convert a string, 
-        </para>
-        <programlisting>
-    string str1("HeLlO WoRld!");
-    to_upper(str1); // str1=="HELLO WORLD!"
-        </programlisting>
-        <para>
-            <functionname>to_upper()</functionname> and <functionname>to_lower()</functionname> convert the case of 
-            characters in a string using a specified locale.
-        </para>
-        <para>
-            For more information see the reference for <headername>boost/algorithm/string/case_conv.hpp</headername>.
-        </para>
-    </section>
-    <section>
-        <title>Predicates and Classification</title>
-        <para>
-            A part of the library deals with string related predicates. Consider this example:
-        </para>
-        <programlisting>
-    bool is_executable( string&amp; filename )
-    {
-        return 
-            iends_with(filename, ".exe") ||
-            iends_with(filename, ".com");
-    }
-
-    // ...
-    string str1("command.com");
-    cout 
-        &lt;&lt; str1
-        &lt;&lt; (is_executable(str1)? "is": "is not") 
-        &lt;&lt; "an executable" 
-        &lt;&lt; endl; // prints "command.com is an executable"
-    
-    //..
-    char text1[]="hello";
-    cout 
-        &lt;&lt; text1 
-        &lt;&lt; (all( text1, is_lower() )? " is": " is not")
-        &lt;&lt; " written in the lower case" 
-        &lt;&lt; endl; // prints "hello is written in the lower case"
-        </programlisting>
-        <para>
-            The predicates determine whether if a substring is contained in the input string
-            under various conditions. The conditions are: a string starts with the substring, 
-            ends with the substring, 
-            simply contains the substring or if both strings are equal. See the reference for 
-            <headername>boost/algorithm/string/predicate.hpp</headername> for more details. 
-        </para>
-        <para>  
-            Note that if we had used "hello world" as the input to the test, it would have
-            output "hello world is not written in the lower case" because the space in the
-            input string is not a lower case letter.
-        </para>
-        <para>  
-            In addition the algorithm <functionname>all()</functionname> checks
-            all elements of a container to satisfy a condition specified by a predicate. 
-            This predicate can be any unary predicate, but the library provides a bunch of 
-            useful string-related predicates and combinators ready for use.
-            These are located in the <headername>boost/algorithm/string/classification.hpp</headername> header.
-            Classification predicates can be combined using logical combinators to form
-            a more complex expressions. For example: <code>is_from_range('a','z') || is_digit()</code>
-        </para>
-    </section>
-    <section>
-        <title>Trimming</title>
-        
-        <para>
-            When parsing the input from a user, strings often have unwanted leading or trailing 
-            characters. To get rid of them, we need trim functions:
-        </para>
-        <programlisting>
-    string str1="     hello world!     ";
-    string str2=trim_left_copy(str1);   // str2 == "hello world!     "
-    string str3=trim_right_copy(str1);  // str3 == "     hello world!"
-    trim(str1);                         // str1 == "hello world!"
-
-    string phone="00423333444";
-    // remove leading 0 from the phone number
-    trim_left_if(phone,is_any_of("0")); // phone == "423333444"
-        </programlisting>
-        <para>
-            It is possible to trim the spaces on the right, on the left or on both sides of a string.
-            And for those cases when there is a need to remove something else than blank space, there
-            are <emphasis>_if</emphasis> variants. Using these, a user can specify a functor which will 
-            select the <emphasis>space</emphasis> to be removed. It is possible to use classification 
-            predicates like <functionname>is_digit()</functionname> mentioned in the previous paragraph.
-            See the reference for the <headername>boost/algorithm/string/trim.hpp</headername>.
-        </para>
-    </section>
-    <section>
-        <title>Find algorithms</title>
-        
-        <para>
-            The library contains a set of find algorithms. Here is an example:
-        </para>
-        <programlisting>
-    char text[]="hello dolly!";
-    iterator_range&lt;char*&gt; result=find_last(text,"ll");
-
-    transform( result.begin(), result.end(), result.begin(), bind2nd(plus&lt;char&gt;(), 1) );
-    // text = "hello dommy!"            
-
-    to_upper(result); // text == "hello doMMy!"
-
-    // iterator_range is convertible to bool
-    if(find_first(text, "dolly"))
-    {
-        cout &lt;&lt; "Dolly is there" &lt;&lt; endl;
-    }
-        </programlisting>
-        <para>
-            We have used <functionname>find_last()</functionname> to search the <code>text</code> for "ll".
-            The result is given in the <ulink url="../../libs/range/doc/html/range/reference/utilities/iterator_range.html"><code>boost::iterator_range</code></ulink>. 
-            This range delimits the
-            part of the input which satisfies the find criteria. In our example it is the last occurrence of "ll".
-            
-            As we can see, input of the <functionname>find_last()</functionname> algorithm can be also 
-            char[] because this type is supported by 
-            <ulink url="../../libs/range/index.html">Boost.Range</ulink>.
-
-            The following lines transform the result. Notice that 
-            <ulink url="../../libs/range/doc/html/range/reference/utilities/iterator_range.html"><code>boost::iterator_range</code></ulink> has familiar 
-            <code>begin()</code> and <code>end()</code> methods, so it can be used like any other STL container.
-            Also it is convertible to bool therefore it is easy to use find algorithms for a simple containment checking.
-        </para>
-        <para>
-            Find algorithms are located in <headername>boost/algorithm/string/find.hpp</headername>.
-        </para>
-    </section>
-    <section>
-        <title>Replace Algorithms</title>
-        <para>
-            Find algorithms can be used for searching for a specific part of string. Replace goes one step
-            further. After a matching part is found, it is substituted with something else. The substitution is computed
-            from the original, using some transformation. 
-        </para>
-        <programlisting>
-    string str1="Hello  Dolly,   Hello World!"
-    replace_first(str1, "Dolly", "Jane");      // str1 == "Hello  Jane,   Hello World!"
-    replace_last(str1, "Hello", "Goodbye");    // str1 == "Hello  Jane,   Goodbye World!"
-    erase_all(str1, " ");                      // str1 == "HelloJane,GoodbyeWorld!"
-    erase_head(str1, 6);                       // str1 == "Jane,GoodbyeWorld!"
-        </programlisting>
-        <para>
-            For the complete list of replace and erase functions see the 
-            <link linkend="string_algo.reference">reference</link>.
-            There is a lot of predefined function for common usage, however, the library allows you to 
-            define a custom <code>replace()</code> that suits a specific need. There is a generic <functionname>find_format()</functionname> 
-            function which takes two parameters.
-            The first one is a <link linkend="string_algo.finder_concept">Finder</link> object, the second one is 
-            a <link linkend="string_algo.formatter_concept">Formatter</link> object. 
-            The Finder object is a functor which performs the searching for the replacement part. The Formatter object
-            takes the result of the Finder (usually a reference to the found substring) and creates a 
-            substitute for it. Replace algorithm puts these two together and makes the desired substitution. 
-        </para>
-        <para>
-            Check <headername>boost/algorithm/string/replace.hpp</headername>, <headername>boost/algorithm/string/erase.hpp</headername> and
-            <headername>boost/algorithm/string/find_format.hpp</headername> for reference.
-        </para>
-    </section>
-    <section>
-        <title>Find Iterator</title>
-
-        <para>
-            An extension to find algorithms it the Find Iterator. Instead of searching for just a one part of a string, 
-            the find iterator allows us to iterate over the substrings matching the specified criteria.
-            This facility is using the <link linkend="string_algo.finder_concept">Finder</link> to incrementally
-            search the string. 
-            Dereferencing a find iterator yields an <ulink url="../../libs/range/doc/html/range/reference/utilities/iterator_range.html"><code>boost::iterator_range</code></ulink> 
-            object, that delimits the current match.
-        </para>
-        <para>
-            There are two iterators provided <classname>find_iterator</classname> and 
-            <classname>split_iterator</classname>. The former iterates over substrings that are found using the specified
-            Finder. The latter iterates over the gaps between these substrings.
-        </para>
-        <programlisting>
-    string str1("abc-*-ABC-*-aBc");
-    // Find all 'abc' substrings (ignoring the case)
-    // Create a find_iterator
-    typedef find_iterator&lt;string::iterator&gt; string_find_iterator;
-    for(string_find_iterator It=
-            make_find_iterator(str1, first_finder("abc", is_iequal()));
-        It!=string_find_iterator();
-        ++It)
-    {
-        cout &lt;&lt; copy_range&lt;std::string&gt;(*It) &lt;&lt; endl;
-    }
-
-    // Output will be:
-    // abc
-    // ABC
-    // aBC
-    
-    typedef split_iterator&lt;string::iterator&gt; string_split_iterator;
-    for(string_split_iterator It=
-        make_split_iterator(str1, first_finder("-*-", is_iequal()));
-        It!=string_split_iterator();
-        ++It)
-    {
-        cout &lt;&lt; copy_range&lt;std::string&gt;(*It) &lt;&lt; endl;
-    }
-
-    // Output will be:
-    // abc
-    // ABC
-    // aBC
-        </programlisting>
-        <para>
-            Note that the find iterators have only one template parameter. It is the base iterator type.
-            The Finder is specified at runtime. This allows us to typedef a find iterator for
-            common string types and reuse it. Additionally make_*_iterator functions help
-            to construct a find iterator for a particular range.
-        </para>
-        <para>
-            See the reference in <headername>boost/algorithm/string/find_iterator.hpp</headername>.
-        </para>
-    </section>
-    <section>
-        <title>Split</title>
-
-        <para>
-            Split algorithms are an extension to the find iterator for one common usage scenario.
-            These algorithms use a find iterator and store all matches into the provided
-            container. This container must be able to hold copies (e.g. <code>std::string</code>) or 
-            references (e.g. <code>iterator_range</code>) of the extracted substrings.
-        </para>
-        <para>
-            Two algorithms are provided. <functionname>find_all()</functionname> finds all copies
-            of a string in the input. <functionname>split()</functionname> splits the input into parts.
-        </para>
-
-        <programlisting>
-    string str1("hello abc-*-ABC-*-aBc goodbye");
-
-    typedef vector&lt; iterator_range&lt;string::iterator&gt; &gt; find_vector_type;
-    
-    find_vector_type FindVec; // #1: Search for separators
-    ifind_all( FindVec, str1, "abc" ); // FindVec == { [abc],[ABC],[aBc] }
-
-    typedef vector&lt; string &gt; split_vector_type;
-    
-    split_vector_type SplitVec; // #2: Search for tokens
-    split( SplitVec, str1, is_any_of("-*"), token_compress_on ); // SplitVec == { "hello abc","ABC","aBc goodbye" }
-        </programlisting>
-        <para>
-            <code>[hello]</code> designates an <code>iterator_range</code> delimiting this substring.                       
-        </para>
-        <para>
-            First example show how to construct a container to hold references to all extracted
-            substrings. Algorithm <functionname>ifind_all()</functionname> puts into FindVec references
-            to all substrings that are in case-insensitive manner equal to "abc".
-        </para>
-        <para>
-            Second example uses <functionname>split()</functionname> to split string str1 into parts
-            separated by characters '-' or '*'. These parts are then put into the SplitVec.
-            It is possible to specify if adjacent separators are concatenated or not.
-        </para>
-        <para>
-            More information can be found in the reference: <headername>boost/algorithm/string/split.hpp</headername>.
-        </para>
-   </section>
-</section>
diff --git a/third_party/boostorg/algorithm/string/example/Jamfile b/third_party/boostorg/algorithm/string/example/Jamfile
deleted file mode 100644
index 74c923f..0000000
--- a/third_party/boostorg/algorithm/string/example/Jamfile
+++ /dev/null
@@ -1,18 +0,0 @@
-#  Boost string_algo library examples Jamfile  ---------------------------------
-#
-#  Copyright Pavol Droba 2002-2003. Use, modification and
-#  distribution is subject to the Boost Software License, Version
-#  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-#  http://www.boost.org/LICENSE_1_0.txt)
-#
-#  See http://www.boost.org for updates, documentation, and revision history.
-
-
-exe conv_example : conv_example.cpp ;
-exe predicate_example : predicate_example.cpp ;
-exe find_example : find_example.cpp ;
-exe replace_example : replace_example.cpp ;
-exe rle_example : rle_example.cpp ;
-exe trim_example : trim_example.cpp ;
-exe regex_example : regex_example.cpp /boost/regex//boost_regex ;
-exe split_example : split_example.cpp ;
\ No newline at end of file
diff --git a/third_party/boostorg/algorithm/string/example/conv_example.cpp b/third_party/boostorg/algorithm/string/example/conv_example.cpp
deleted file mode 100644
index b6a08f9..0000000
--- a/third_party/boostorg/algorithm/string/example/conv_example.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//  Boost string_algo library example file  ---------------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <string>
-#include <vector>
-#include <iostream>
-#include <iterator>
-#include <boost/algorithm/string/case_conv.hpp>
-
-using namespace std;
-using namespace boost;
-
-int main()
-{  
-    cout << "* Case Conversion Example *" << endl << endl;
-
-    string str1("AbCdEfG");
-    vector<char> vec1( str1.begin(), str1.end() );
-    
-    // Convert vector of chars to lower case
-    cout << "lower-cased copy of vec1: ";
-    to_lower_copy( ostream_iterator<char>(cout), vec1 );
-    cout << endl;
-
-    // Conver string str1 to upper case ( copy the input )
-    cout << "upper-cased copy of str1: " << to_upper_copy( str1 ) << endl;
-
-    // Inplace conversion
-    to_lower( str1 );
-    cout << "lower-cased str1: " << str1 << endl;
-
-    cout << endl;
-
-    return 0;
-}
diff --git a/third_party/boostorg/algorithm/string/example/find_example.cpp b/third_party/boostorg/algorithm/string/example/find_example.cpp
deleted file mode 100644
index 7fd7e60..0000000
--- a/third_party/boostorg/algorithm/string/example/find_example.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-//  Boost string_algo library example file  ---------------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <string>
-#include <iostream>
-#include <algorithm>
-#include <functional>
-#include <boost/algorithm/string/case_conv.hpp>
-#include <boost/algorithm/string/find.hpp>
-
-using namespace std;
-using namespace boost;
-
-int main()
-{  
-    cout << "* Find Example *" << endl << endl;
-
-    string str1("abc___cde___efg");
-    string str2("abc");
-
-    // find "cde" substring
-    iterator_range<string::iterator> range=find_first( str1, string("cde") );
-
-    // convert a substring to upper case 
-    // note that iterator range can be directly passed to the algorithm
-    to_upper( range );
-
-    cout << "str1 with upper-cased part matching cde: " << str1 << endl;
-
-    // get a head of the string
-    iterator_range<string::iterator> head=find_head( str1, 3 );
-    cout << "head(3) of the str1: " << string( head.begin(), head.end() ) << endl;
-
-    // get the tail
-    head=find_tail( str2, 5 );
-    cout << "tail(5) of the str2: " << string( head.begin(), head.end() ) << endl;
-
-    // char processing
-    char text[]="hello dolly!";
-    iterator_range<char*> crange=find_last(text,"ll");
-
-    // transform the range ( add 1 )
-    transform( crange.begin(), crange.end(), crange.begin(), bind2nd( plus<char>(), 1 ) );
-    // uppercase the range
-    to_upper( crange );
-
-    cout << text << endl;
-
-    cout << endl;
-
-    return 0;
-}
diff --git a/third_party/boostorg/algorithm/string/example/predicate_example.cpp b/third_party/boostorg/algorithm/string/example/predicate_example.cpp
deleted file mode 100644
index 473ab8b..0000000
--- a/third_party/boostorg/algorithm/string/example/predicate_example.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-//  Boost string_algo library example file  ---------------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <string>
-#include <iostream>
-#include <functional>
-#include <boost/algorithm/string/predicate.hpp>
-#include <boost/algorithm/string/classification.hpp>
-#include <boost/bind.hpp>
-
-
-using namespace std;
-using namespace boost;
-
-int main()
-{
-    cout << "* Predicate Example *" << endl << endl;
-
-    string str1("123xxx321");
-    string str2("abc");
-
-    // Check if str1 starts with '123'
-    cout << "str1 starts with \"123\": " << 
-        (starts_with( str1, string("123") )?"true":"false") << endl; 
-    
-    // Check if str1 ends with '123'
-    cout << "str1 ends with \"123\": " << 
-        (ends_with( str1, string("123") )?"true":"false") << endl; 
-
-    // Check if str1 contains 'xxx'
-    cout << "str1 contains \"xxx\": " << 
-        (contains( str1, string("xxx") )?"true":"false") << endl; 
-
-
-    // Check if str2 equals to 'abc'
-    cout << "str2 equals \"abc\": " << 
-        (equals( str2, string("abc") )?"true":"false") << endl; 
-
-
-    // Classification functors and all predicate
-    if ( all(";.,", is_punct() ) )
-    {
-        cout << "\";.,\" are all punctuation characters" << endl;  
-    }
-
-    // Classification predicates can be combined 
-    if ( all("abcxxx", is_any_of("xabc") && !is_space() ) )
-    {
-        cout << "true" << endl;
-    }
-
-    cout << endl;
-
-    return 0;
-}   
diff --git a/third_party/boostorg/algorithm/string/example/regex_example.cpp b/third_party/boostorg/algorithm/string/example/regex_example.cpp
deleted file mode 100644
index 9eba1c7..0000000
--- a/third_party/boostorg/algorithm/string/example/regex_example.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-//  Boost string_algo library example file  ---------------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <string>
-#include <iostream>
-#include <iterator>
-#include <boost/regex.hpp>
-#include <boost/algorithm/string/regex.hpp>
-
-using namespace std;
-using namespace boost;
-
-int main()
-{  
-    cout << "* Regex Example *" << endl << endl;
-
-    string str1("abc__(456)__123__(123)__cde");
-
-    // Replace all substrings matching (digit+) 
-    cout << 
-        "replace all (digit+) in str1 with #digit+# :" <<
-        replace_all_regex_copy( str1, regex("\\(([0-9]+)\\)"), string("#$1#") ) << endl;
-    
-    // Erase all substrings matching (digit+) 
-    cout << 
-        "remove all sequences of letters from str1 :" <<
-        erase_all_regex_copy( str1, regex("[[:alpha:]]+") ) << endl;
-
-    // in-place regex transformation
-    replace_all_regex( str1, regex("_(\\([^\\)]*\\))_"), string("-$1-") );
-    cout << "transformad str1: " << str1 << endl;
-
-    cout << endl;
-
-    return 0;
-}
diff --git a/third_party/boostorg/algorithm/string/example/replace_example.cpp b/third_party/boostorg/algorithm/string/example/replace_example.cpp
deleted file mode 100644
index 12089fa..0000000
--- a/third_party/boostorg/algorithm/string/example/replace_example.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-//  Boost string_algo library example file  ---------------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <string>
-#include <iostream>
-#include <iterator>
-//#include <boost/algorithm/string/replace.hpp>
-//#include <boost/algorithm/string/erase.hpp>
-//#include <boost/algorithm/string/case_conv.hpp>
-#include <boost/algorithm/string.hpp>
-
-//Following two includes contain second-layer function.
-//They are already included by first-layer header
-
-//#include <boost/algorithm/string/replace2.hpp>
-//#include <boost/algorithm/string/find2.hpp>
-
-using namespace std;
-using namespace boost;
-
-// uppercase formatter
-/*
-    Convert an input to upper case. 
-    Note, that this formatter can be used only on std::string inputs.
-*/
-inline string upcase_formatter( 
-    const iterator_range<string::const_iterator>& Replace )
-{
-    string Temp(Replace.begin(), Replace.end());
-    to_upper(Temp);
-    return Temp;
-}
-
-int main()
-{  
-    cout << "* Replace Example *" << endl << endl;
-
-    string str1("abc___cde___efg");
-
-    // Erase 6-9th characters from the string
-    cout << "str1 without 6th to 9th character:" <<
-        erase_range_copy( str1, make_iterator_range(str1.begin()+6, str1.begin()+9) ) << endl;
-
-    // Replace 6-9th character with '+++'
-    cout << "str1 with 6th to 9th character replaced with '+++': " << 
-        replace_range_copy( 
-            str1, make_iterator_range(str1.begin()+6, str1.begin()+9), "+++" ) << endl;
-
-    cout << "str1 with 'cde' replaced with 'XYZ': ";
-    
-    // Replace first 'cde' with 'XYZ'. Modify the input
-    replace_first_copy( ostream_iterator<char>(cout), str1, "cde", "XYZ" );
-    cout << endl;
-    
-    // Replace all '___'
-    cout << "str1 with all '___' replaced with '---': " << 
-        replace_all_copy( str1, "___", "---" ) << endl;
-
-    // Erase all '___'
-    cout << "str1 without all '___': " << 
-        erase_all_copy( str1, "___" ) << endl;
-
-    // replace third and 5th occurrence of _ in str1
-    // note that nth argument is 0-based
-    replace_nth( str1, "_", 4, "+" );
-    replace_nth( str1, "_", 2, "+" );
-
-    cout << "str1 with third and 5th occurrence of _ replace: " << str1 << endl;
-
-    // Custom formatter examples
-    string str2("abC-xxxx-AbC-xxxx-abc");
-
-    // Find string 'abc' ignoring the case and convert it to upper case
-    cout << "Upcase all 'abc'(s) in the str2: " <<
-        find_format_all_copy( 
-            str2,
-            first_finder("abc", is_iequal()), 
-            upcase_formatter );
-    
-    cout << endl;
-
-    return 0;
-}
diff --git a/third_party/boostorg/algorithm/string/example/rle_example.cpp b/third_party/boostorg/algorithm/string/example/rle_example.cpp
deleted file mode 100644
index 9e52b96..0000000
--- a/third_party/boostorg/algorithm/string/example/rle_example.cpp
+++ /dev/null
@@ -1,248 +0,0 @@
-//  Boost string_algo library example file  ---------------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-/*
-    RLE compression using replace framework. Goal is to compress a sequence of
-    repeating characters into 3 bytes ( repeat mark, character and repetition count ).
-    For simplification, it works only on numeric-value sequences.
-*/
-
-#include <string>
-#include <iostream>
-#include <limits>
-#include <boost/detail/iterator.hpp>
-#include <boost/algorithm/string/find_format.hpp>
-#include <boost/algorithm/string/finder.hpp>
-
-using namespace std;
-using namespace boost;
-
-// replace mark specification, specialize for a specific element type
-template< typename T > T repeat_mark() { return (std::numeric_limits<T>::max)(); };
-
-// Compression  -----------------------------------------------------------------------
-
-
-// compress finder -rle
-/*
-    Find a sequence which can be compressed. It has to be at least 3-character long
-    sequence of repetitive characters 
-*/
-struct find_compressF 
-{
-    // Construction
-    find_compressF() {}
-
-    // Operation
-    template<typename ForwardIteratorT>
-    iterator_range<ForwardIteratorT> operator()( 
-        ForwardIteratorT Begin, 
-        ForwardIteratorT End ) const
-    {
-        typedef ForwardIteratorT input_iterator_type;
-        typedef typename boost::detail::iterator_traits<input_iterator_type>::value_type value_type;
-        typedef iterator_range<input_iterator_type> result_type;
-
-        // begin of the matching segment
-        input_iterator_type MStart=End;
-        // Repetition counter
-        value_type Cnt=0;
-
-        // Search for a sequence of repetitive characters
-        for(input_iterator_type It=Begin; It!=End;)
-        {
-            input_iterator_type It2=It++;
-
-            if ( It==End || Cnt>=(std::numeric_limits<value_type>::max)() )
-            {
-                return result_type( MStart, It );
-            }
-
-            if ( *It==*It2 )
-            {
-                if ( MStart==End )
-                {
-                    // Mark the start
-                    MStart=It2;
-                }
-
-                // Increate repetition counter
-                Cnt++;
-            }
-            else
-            {
-                if ( MStart!=End )
-                {
-                    if ( Cnt>2 )
-                        return result_type( MStart, It );
-                    else
-                    {
-                        MStart=End;
-                        Cnt=0;
-                    }
-                }
-            }
-        }
-
-        return result_type( End, End );
-    }
-};
-
-// rle compress format
-/*
-    Transform a sequence into repeat mark, character and count 
-*/
-template<typename SeqT>
-struct format_compressF
-{
-private:
-    typedef SeqT result_type;
-    typedef typename SeqT::value_type value_type;
-
-public:
-    // Construction
-    format_compressF() {};
-
-    // Operation
-    template< typename ReplaceT >
-    result_type operator()( const ReplaceT& Replace ) const
-    {
-        SeqT r;
-        if(!Replace.empty())
-        {
-            r.push_back( repeat_mark<value_type>() );
-            r.push_back( *(Replace.begin()) );
-            r.push_back( value_type( Replace.size() ) );
-        }
-
-        return r;
-    }
-};
-
-// Decompression  -----------------------------------------------------------------------
-
-
-// find decompress-rle functor
-/*
-    find a repetition block
-*/
-struct find_decompressF
-{
-    // Construction
-    find_decompressF() {}
-
-    // Operation
-    template<typename ForwardIteratorT>
-    iterator_range<ForwardIteratorT> operator()( 
-        ForwardIteratorT Begin, 
-        ForwardIteratorT End ) const
-    {
-        typedef ForwardIteratorT input_iterator_type;
-        typedef typename boost::detail::iterator_traits<input_iterator_type>::value_type value_type;
-        typedef iterator_range<input_iterator_type> result_type;
-
-        for(input_iterator_type It=Begin; It!=End; It++)
-        {
-            if( *It==repeat_mark<value_type>() )
-            {
-                // Repeat mark found, extract body
-                input_iterator_type It2=It++; 
-                
-                if ( It==End ) break;
-                    It++; 
-                if ( It==End ) break;
-                    It++;
-                
-                return result_type( It2, It );
-            }
-        }
-
-        return result_type( End, End );
-    }
-};
-
-// rle decompress format
-/*
-    transform a repetition block into a sequence of characters
-*/
-template< typename SeqT >
-struct format_decompressF
-{
-private:
-    typedef SeqT result_type;
-    typedef typename SeqT::value_type value_type;
-
-public:
-    // Construction
-    format_decompressF() {};
-
-    // Operation
-    template< typename ReplaceT >
-    result_type operator()( const ReplaceT& Replace ) const
-    {
-        SeqT r;
-
-        if(!Replace.empty())
-        {
-            // extract info
-            typename ReplaceT::const_iterator It=Replace.begin();
-
-            value_type Value=*(++It);
-            value_type Repeat=*(++It);
-
-            for( value_type Index=0; Index<Repeat; Index++ ) r.push_back( Value );
-        }
-
-        return r;
-    }
-};
-
-
-int main()
-{
-    cout << "* RLE Compression Example *" << endl << endl;
-
-    string original("123_AA_*ZZZZZZZZZZZZZZ*34");
-
-    // copy compression
-    string compress=find_format_all_copy( 
-        original, 
-        find_compressF(), 
-        format_compressF<string>() );
-
-    cout << "Compressed string: " << compress << endl;
-
-    // Copy decompression
-    string decompress=find_format_all_copy( 
-        compress, 
-        find_decompressF(), 
-        format_decompressF<string>() );
-
-    cout << "Decompressed string: " << decompress << endl;
-
-    // in-place compression
-    find_format_all( 
-        original, 
-        find_compressF(), 
-        format_compressF<string>() );
-    
-    cout << "Compressed string: " << original << endl;
-
-    // in-place decompression
-    find_format_all( 
-        original, 
-        find_decompressF(), 
-        format_decompressF<string>() );
-
-    cout << "Decompressed string: " << original << endl;
-
-    cout << endl;
-
-    return 0;
-}
diff --git a/third_party/boostorg/algorithm/string/example/split_example.cpp b/third_party/boostorg/algorithm/string/example/split_example.cpp
deleted file mode 100644
index 27e261c..0000000
--- a/third_party/boostorg/algorithm/string/example/split_example.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-//  Boost string_algo library example file  ---------------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <string>
-#include <vector>
-#include <iostream>
-#include <iterator>
-#include <functional>
-#include <boost/algorithm/string/classification.hpp>
-#include <boost/algorithm/string/split.hpp>
-#include <boost/algorithm/string/find_iterator.hpp>
-
-using namespace std;
-using namespace boost;
-
-int main()
-{  
-    cout << "* Split Example *" << endl << endl;
-
-    string str1("abc-*-ABC-*-aBc");
-
-    cout << "Before: " << str1 << endl;
-
-    // Find all 'abc' substrings (ignoring the case)
-    // Create a find_iterator
-    typedef find_iterator<string::iterator> string_find_iterator;
-    for(string_find_iterator It=
-            make_find_iterator(str1, first_finder("abc", is_iequal()));
-        It!=string_find_iterator();
-        ++It)
-    {
-        cout << copy_range<std::string>(*It) << endl;
-        // shift all chars in the match by one
-        transform( 
-            It->begin(), It->end(), 
-            It->begin(), 
-            bind2nd( plus<char>(), 1 ) );
-    }
-
-    // Print the string now
-    cout << "After: " << str1 << endl;
-    
-    // Split the string into tokens ( use '-' and '*' as delimiters )
-    // We need copies of the input only, and adjacent tokens are compressed
-    vector<std::string> ResultCopy;
-    split(ResultCopy, str1, is_any_of("-*"), token_compress_on);
-
-    for(unsigned int nIndex=0; nIndex<ResultCopy.size(); nIndex++)
-    {
-        cout << nIndex << ":" << ResultCopy[nIndex] << endl;
-    };
-
-    cout << endl;
-
-    return 0;
-}
diff --git a/third_party/boostorg/algorithm/string/example/trim_example.cpp b/third_party/boostorg/algorithm/string/example/trim_example.cpp
deleted file mode 100644
index 7b1a0bd..0000000
--- a/third_party/boostorg/algorithm/string/example/trim_example.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-//  Boost string_algo library example file  ---------------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <string>
-#include <iostream>
-#include <boost/algorithm/string/trim.hpp>
-#include <boost/algorithm/string/classification.hpp>
-
-using namespace std;
-using namespace boost;
-
-int main()
-{
-    cout << "* Trim Example *" << endl << endl;
-
-    string str1("     1x x x x1     ");
-    string str2("<>trim<>");
-    string str3("123abs343");
-
-    // Simple left trim
-    cout << "trim_left copy of str1: " << "\"" << trim_left_copy( str1 ) << "\"" << endl;
-
-    // Inplace right trim
-    trim_right( str1 );
-    cout << "trim_right on str1: " << "\"" << str1 << "\"" << endl;
-
-    // Parametric trim. 'Space' is defined using is_any_of predicate
-    cout 
-        << "trimmed copy of str4 ( space='<>' ): " 
-        << "\""<< trim_copy_if( str2, is_any_of("<>") ) << "\"" << endl;
-    
-
-    // Parametric trim. 'Space' is defined using is_digit predicate
-    cout 
-        << "trimmed copy of str5 ( space=digit ): " 
-        << "\"" << trim_copy_if( str3, is_digit() ) << "\"" << endl;
-
-    cout << endl;
-
-    return 0;
-}
diff --git a/third_party/boostorg/algorithm/string/index.html b/third_party/boostorg/algorithm/string/index.html
deleted file mode 100644
index dc0e1b9..0000000
--- a/third_party/boostorg/algorithm/string/index.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<html>
-<head>
-<meta http-equiv="refresh" content="0; URL=../../../doc/html/string_algo.html">
-</head>
-<body>
-Automatic redirection failed, please go to
-<a href="../../../doc/html/string_algo.html">../../doc/html/string_algo.html</a> 
-&nbsp;<hr>
-<p>© Copyright Beman Dawes, 2001</p>
-<p>Distributed under the Boost Software License, Version 1.0. (See accompanying 
-file <a href="../../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or copy 
-at <a href="http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</p>
-</body>
-</html>
\ No newline at end of file
diff --git a/third_party/boostorg/algorithm/string/test/Jamfile.v2 b/third_party/boostorg/algorithm/string/test/Jamfile.v2
deleted file mode 100644
index 7f60df7..0000000
--- a/third_party/boostorg/algorithm/string/test/Jamfile.v2
+++ /dev/null
@@ -1,74 +0,0 @@
-#  Boost string_algo library test suite Jamfile  ----------------------------
-#
-#  Copyright Pavol Droba 2002-2003. Use, modification and
-#  distribution is subject to the Boost Software License, Version
-#  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-#  http://www.boost.org/LICENSE_1_0.txt)
-#
-#  See http://www.boost.org for updates, documentation, and revision history.
-
-import testing ;
-
-alias unit_test_framework
-    : # sources
-        /boost//unit_test_framework
-    ;        
-
-test-suite algorithm/string
-    : [ run 
-            trim_test.cpp unit_test_framework
-          : :
-            :
-            : trim
-        ]
-      [ run 
-            conv_test.cpp unit_test_framework
-          : :
-            :
-            : conv
-        ]
-      [ run 
-            predicate_test.cpp unit_test_framework
-          : :
-            :
-            : predicate
-        ]
-      [ run 
-            find_test.cpp unit_test_framework
-          : :
-            :
-            : find
-        ]
-      [ run
-            split_test.cpp unit_test_framework
-          : :
-            :
-            : split
-        ]
-      [ run
-            join_test.cpp unit_test_framework
-          : :
-            :
-            : join
-        ]
-      [ run 
-            replace_test.cpp unit_test_framework
-          : :
-            :
-            : replace
-        ]
-      [ run 
-            regex_test.cpp unit_test_framework
-            ../../../regex/build//boost_regex
-          : :
-            :   
-            : regex
-        ]
-      [ run 
-            find_format_test.cpp unit_test_framework
-          : :
-            :   
-            : find_format
-        ]
-    ;
-
diff --git a/third_party/boostorg/algorithm/string/test/conv_test.cpp b/third_party/boostorg/algorithm/string/test/conv_test.cpp
deleted file mode 100644
index c583d3e..0000000
--- a/third_party/boostorg/algorithm/string/test/conv_test.cpp
+++ /dev/null
@@ -1,94 +0,0 @@
-//  Boost string_algo library conv_test.cpp file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <boost/algorithm/string/case_conv.hpp>
-
-// Include unit test framework
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <iostream>
-#include <algorithm>
-#include <boost/test/test_tools.hpp>
-
-using namespace std;
-using namespace boost;
-
-void conv_test()
-{
-    string str1("AbCdEfG 123 xxxYYYzZzZ");
-    string str2("AbCdEfG 123 xxxYYYzZzZ");
-    string str3("");
-    const char pch[]="AbCdEfG 123 xxxYYYzZzZ";    
-    unsigned int pchlen=sizeof(pch);
-
-    char* pch1=new char[pchlen];
-    std::copy(pch, pch+pchlen, pch1);
-    char* pch2=new char[pchlen];
-    std::copy(pch, pch+pchlen, pch2);
-
-    // *** iterator tests *** //
-
-    string strout;
-    to_lower_copy( back_inserter(strout), str1 );
-    BOOST_CHECK( strout=="abcdefg 123 xxxyyyzzzz" );
-    strout.clear();
-    to_upper_copy( back_inserter(strout), str1 );
-    BOOST_CHECK( strout=="ABCDEFG 123 XXXYYYZZZZ" );
-
-    strout.clear();
-    to_lower_copy( back_inserter(strout), "AbCdEfG 123 xxxYYYzZzZ" );
-    BOOST_CHECK( strout=="abcdefg 123 xxxyyyzzzz" );
-    strout.clear();
-    to_upper_copy( back_inserter(strout), "AbCdEfG 123 xxxYYYzZzZ" );
-    BOOST_CHECK( strout=="ABCDEFG 123 XXXYYYZZZZ" );
-
-    strout.clear();
-    to_lower_copy( back_inserter(strout), pch1 );
-    BOOST_CHECK( strout=="abcdefg 123 xxxyyyzzzz" );
-    strout.clear();
-    to_upper_copy( back_inserter(strout), pch1 );
-    BOOST_CHECK( strout=="ABCDEFG 123 XXXYYYZZZZ" );
-
-    // *** value passing tests *** //
-
-    BOOST_CHECK( to_lower_copy( str1 )=="abcdefg 123 xxxyyyzzzz" );
-    BOOST_CHECK( to_upper_copy( str1 )=="ABCDEFG 123 XXXYYYZZZZ" );
-
-    BOOST_CHECK( to_lower_copy( str3 )=="" );
-    BOOST_CHECK( to_upper_copy( str3 )=="" );
-
-    // *** inplace tests *** //
-
-    to_lower( str1 );
-    BOOST_CHECK( str1=="abcdefg 123 xxxyyyzzzz" );
-    to_upper( str2 );
-    BOOST_CHECK( str2=="ABCDEFG 123 XXXYYYZZZZ" );
-
-    // c-string modification
-    to_lower( pch1 );
-    BOOST_CHECK( string(pch1)=="abcdefg 123 xxxyyyzzzz" );
-    to_upper( pch2 );
-    BOOST_CHECK( string(pch2)=="ABCDEFG 123 XXXYYYZZZZ" );
-
-    to_lower( str3 );
-    BOOST_CHECK( str3=="" );
-    to_upper( str3 );
-    BOOST_CHECK( str3=="" );
-
-    delete[] pch1;
-    delete[] pch2;
-}
-
-// test main 
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    conv_test();
-}
diff --git a/third_party/boostorg/algorithm/string/test/find_format_test.cpp b/third_party/boostorg/algorithm/string/test/find_format_test.cpp
deleted file mode 100644
index 645cdba..0000000
--- a/third_party/boostorg/algorithm/string/test/find_format_test.cpp
+++ /dev/null
@@ -1,162 +0,0 @@
-//  Boost string_algo library find_format_test.cpp file  ------------------//
-
-//  Copyright (c) 2009 Steven Watanabe
-//  Distributed under the Boost Software License, Version 1.0. (See
-//  accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <boost/algorithm/string/find_format.hpp>
-#include <boost/algorithm/string/finder.hpp>
-#include <boost/algorithm/string/formatter.hpp>
-
-// Include unit test framework
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <boost/test/test_tools.hpp>
-
-// We're only using const_formatter.
-template<class Formatter>
-struct formatter_result {
-    typedef boost::iterator_range<const char*> type;
-};
-
-template<class Formatter>
-struct checked_formatter {
-public:
-    checked_formatter(const Formatter& formatter) : formatter_(formatter) {}
-    template< typename T >
-    typename formatter_result<Formatter>::type operator()( const T & s ) const {
-        BOOST_CHECK( !s.empty() );
-        return formatter_(s);
-    }
-private:
-    Formatter formatter_;
-};
-
-template<class Formatter>
-checked_formatter<Formatter>
-make_checked_formatter(const Formatter& formatter) {
-    return checked_formatter<Formatter>(formatter);
-}
-
-void find_format_test()
-{
-    const std::string source = "$replace $replace";
-    std::string expected = "ok $replace";
-    std::string output(80, '\0');
-
-    std::string::iterator pos =
-        boost::find_format_copy(
-            output.begin(),
-            source,
-            boost::first_finder("$replace"),
-            make_checked_formatter(boost::const_formatter("ok")));
-    BOOST_CHECK(pos == output.begin() + expected.size());
-    output.erase(std::remove(output.begin(), output.end(), '\0'), output.end());
-    BOOST_CHECK_EQUAL(output, expected);
-
-    output =
-        boost::find_format_copy(
-            source,
-            boost::first_finder("$replace"),
-            make_checked_formatter(boost::const_formatter("ok")));
-    BOOST_CHECK_EQUAL(output, expected);
-
-    // now try finding a string that doesn't exist
-    output.resize(80);
-    pos =
-        boost::find_format_copy(
-            output.begin(),
-            source,
-            boost::first_finder("$noreplace"),
-            make_checked_formatter(boost::const_formatter("bad")));
-    BOOST_CHECK(pos == output.begin() + source.size());
-    output.erase(std::remove(output.begin(), output.end(), '\0'), output.end());
-    BOOST_CHECK_EQUAL(output, source);
-
-    output =
-        boost::find_format_copy(
-            source,
-            boost::first_finder("$noreplace"),
-            make_checked_formatter(boost::const_formatter("bad")));
-    BOOST_CHECK_EQUAL(output, source);
-
-    // in place version
-    output = source;
-    boost::find_format(
-        output,
-        boost::first_finder("$replace"),
-        make_checked_formatter(boost::const_formatter("ok")));
-    BOOST_CHECK_EQUAL(output, expected);
-    output = source;
-    boost::find_format(
-        output,
-        boost::first_finder("$noreplace"),
-        make_checked_formatter(boost::const_formatter("bad")));
-    BOOST_CHECK_EQUAL(output, source);
-}
-
-void find_format_all_test()
-{
-    const std::string source = "$replace $replace";
-    std::string expected = "ok ok";
-    std::string output(80, '\0');
-
-    std::string::iterator pos =
-        boost::find_format_all_copy(output.begin(),
-                                source,
-                                boost::first_finder("$replace"),
-                                boost::const_formatter("ok"));
-    BOOST_CHECK(pos == output.begin() + expected.size());
-    output.erase(std::remove(output.begin(), output.end(), '\0'), output.end());
-    BOOST_CHECK_EQUAL(output, expected);
-
-    output =
-        boost::find_format_all_copy(
-            source,
-            boost::first_finder("$replace"),
-            make_checked_formatter(boost::const_formatter("ok")));
-    BOOST_CHECK_EQUAL(output, expected);
-
-    // now try finding a string that doesn't exist
-    output.resize(80);
-    pos =
-        boost::find_format_all_copy(
-            output.begin(),
-            source,
-            boost::first_finder("$noreplace"),
-            make_checked_formatter(boost::const_formatter("bad")));
-    BOOST_CHECK(pos == output.begin() + source.size());
-    output.erase(std::remove(output.begin(), output.end(), '\0'), output.end());
-    BOOST_CHECK_EQUAL(output, source);
-
-    output =
-        boost::find_format_all_copy(
-            source,
-            boost::first_finder("$noreplace"),
-            make_checked_formatter(boost::const_formatter("bad")));
-    BOOST_CHECK_EQUAL(output, source);
-
-    // in place version
-    output = source;
-    boost::find_format_all(
-        output,
-        boost::first_finder("$replace"),
-        make_checked_formatter(boost::const_formatter("ok")));
-    BOOST_CHECK_EQUAL(output, expected);
-    output = source;
-    boost::find_format_all(
-        output,
-        boost::first_finder("$noreplace"),
-        make_checked_formatter(boost::const_formatter("bad")));
-    BOOST_CHECK_EQUAL(output, source);
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    find_format_test();
-    find_format_all_test();
-}
diff --git a/third_party/boostorg/algorithm/string/test/find_test.cpp b/third_party/boostorg/algorithm/string/test/find_test.cpp
deleted file mode 100644
index 8439b3c..0000000
--- a/third_party/boostorg/algorithm/string/test/find_test.cpp
+++ /dev/null
@@ -1,275 +0,0 @@
-//  Boost string_algo library substr_test.cpp file  ------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <boost/algorithm/string/find.hpp>
-#include <boost/algorithm/string/classification.hpp>
-#include <boost/algorithm/string/split.hpp>
-
-// Include unit test framework
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <iostream>
-#include <iterator>
-#include <sstream>
-#include <boost/test/test_tools.hpp>
-
-using namespace std;
-using namespace boost;
-
-void find_test()
-{
-    string str1("123abcxXxabcXxXabc321");
-    string str2("abc");
-    string str3("");
-    const char* pch1="123abcxxxabcXXXabc321";
-    vector<int> vec1( str1.begin(), str1.end() );
-
-    // find results ------------------------------------------------------------//
-    iterator_range<string::iterator> nc_result;
-    iterator_range<string::const_iterator> cv_result;
-    
-    iterator_range<vector<int>::iterator> nc_vresult;
-    iterator_range<vector<int>::const_iterator> cv_vresult;
-
-    iterator_range<const char*> ch_result;
-
-    // basic tests ------------------------------------------------------------//
-
-
-    // find_first
-    BOOST_TEST_CHECKPOINT( "find_first" );
-
-    nc_result=find_first( str1, string("abc") );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 3) &&
-        ( (nc_result.end()-str1.begin()) == 6) );
-
-    cv_result=find_first( const_cast<const string&>(str1), str2 );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 3) &&
-        ( (cv_result.end()-str1.begin()) == 6) );
-
-    cv_result=ifind_first( const_cast<const string&>(str1), "xXX" );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 6) &&
-        ( (cv_result.end()-str1.begin()) == 9) );
-
-    ch_result=find_first( pch1, "abc" );
-    BOOST_CHECK(( (ch_result.begin() - pch1 ) == 3) && ( (ch_result.end() - pch1 ) == 6 ) );
-
-    // find_last
-    BOOST_TEST_CHECKPOINT( "find_last" );
-    
-    nc_result=find_last( str1, string("abc") );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 15) &&
-        ( (nc_result.end()-str1.begin()) == 18) );
-
-    cv_result=find_last( const_cast<const string&>(str1), str2 );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 15) &&
-        ( (cv_result.end()-str1.begin()) == 18) );
-
-    cv_result=ifind_last( const_cast<const string&>(str1), "XXx" );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 12) &&
-        ( (cv_result.end()-str1.begin()) == 15) );
-
-    ch_result=find_last( pch1, "abc" );
-    BOOST_CHECK(( (ch_result.begin() - pch1 ) == 15) && ( (ch_result.end() - pch1 ) == 18 ) );
-
-    // find_nth
-    BOOST_TEST_CHECKPOINT( "find_nth" );
-
-    nc_result=find_nth( str1, string("abc"), 1 );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 9) &&
-        ( (nc_result.end()-str1.begin()) == 12) );
-
-    nc_result=find_nth( str1, string("abc"), -1 );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 15) &&
-        ( (nc_result.end()-str1.begin()) == 18) );
-
-
-    cv_result=find_nth( const_cast<const string&>(str1), str2, 1 );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 9) &&
-        ( (cv_result.end()-str1.begin()) == 12) );
-
-    cv_result=find_nth( const_cast<const string&>(str1), str2, -1 );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 15) &&
-        ( (cv_result.end()-str1.begin()) == 18) );
-        
-    cv_result=ifind_nth( const_cast<const string&>(str1), "xxx", 1 );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 12) &&
-        ( (cv_result.end()-str1.begin()) == 15) );
-
-    cv_result=ifind_nth( const_cast<const string&>(str1), "xxx", 1 );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 12) &&
-        ( (cv_result.end()-str1.begin()) == 15) );
-
-
-    ch_result=find_nth( pch1, "abc", 1 );
-    BOOST_CHECK(( (ch_result.begin() - pch1 ) == 9) && ( (ch_result.end() - pch1 ) == 12 ) );
-
-    // find_head
-    BOOST_TEST_CHECKPOINT( "find_head" );
-
-    nc_result=find_head( str1, 6 );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 0) &&
-        ( (nc_result.end()-str1.begin()) == 6) );
-
-    nc_result=find_head( str1, -6 );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 0) &&
-        ( (str1.end()-nc_result.end()) == 6 ) );
-
-    cv_result=find_head( const_cast<const string&>(str1), 6 );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 0) &&
-        ( (cv_result.end()-str1.begin()) == 6) );
-
-    ch_result=find_head( pch1, 6 );
-    BOOST_CHECK( ( (ch_result.begin() - pch1 ) == 0 ) && ( (ch_result.end() - pch1 ) == 6 ) );
-
-    // find_tail
-    BOOST_TEST_CHECKPOINT( "find_tail" );
-
-    nc_result=find_tail( str1, 6 );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 15) &&
-        ( (nc_result.end()-str1.begin()) == 21) );
-
-    nc_result=find_tail( str1, -6 );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 6) &&
-        ( (nc_result.end()-str1.begin()) == 21) );
-
-
-    cv_result=find_tail( const_cast<const string&>(str1), 6 );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 15) &&
-        ( (cv_result.end()-str1.begin()) == 21) );
-
-    ch_result=find_tail( pch1, 6 );
-    BOOST_CHECK( ( (ch_result.begin() - pch1 ) == 15 ) && ( (ch_result.end() - pch1 ) == 21 ) );
-
-    // find_token
-    BOOST_TEST_CHECKPOINT( "find_token" );
-
-    nc_result=find_token( str1, is_any_of("abc"), token_compress_on );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 3) &&
-        ( (nc_result.end()-str1.begin()) == 6) );
-
-    cv_result=find_token( const_cast<const string&>(str1), is_any_of("abc"), token_compress_on );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 3) &&
-        ( (cv_result.end()-str1.begin()) == 6) );
-
-    string s1("abc def ghi jkl");
-    find_iterator<string::iterator> fEnd;
-
-    find_iterator<string::iterator> fxIt = make_find_iterator(s1,
-            token_finder(is_alnum(), token_compress_on));
-    BOOST_CHECK((fxIt != fEnd) && (*fxIt == string("abc")));
-    ++fxIt;
-    BOOST_CHECK((fxIt != fEnd) && (*fxIt == string("def")));
-    ++fxIt;
-    BOOST_CHECK((fxIt != fEnd) && (*fxIt == string("ghi")));
-    ++fxIt;
-    BOOST_CHECK((fxIt != fEnd) && (*fxIt == string("jkl")));
-    ++fxIt;
-    BOOST_CHECK(fxIt == fEnd);
-
-    nc_result=find_token( str1, is_any_of("abc"), token_compress_off );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 3) &&
-        ( (nc_result.end()-str1.begin()) == 4) );
-
-    cv_result=find_token( const_cast<const string&>(str1), is_any_of("abc"), token_compress_off );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 3) &&
-        ( (cv_result.end()-str1.begin()) == 4) );
-
-    ch_result=find_token( pch1, is_any_of("abc"), token_compress_off );
-    BOOST_CHECK( ( (ch_result.begin() - pch1 ) == 3 ) && ( (ch_result.end() - pch1 ) == 4 ) );
-
-    // generic find
-    BOOST_TEST_CHECKPOINT( "generic find" );
-
-    nc_result=find(str1, first_finder(string("abc")));
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 3) &&
-        ( (nc_result.end()-str1.begin()) == 6) );
-
-    cv_result=find(const_cast<const string&>(str1), first_finder(str2) );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 3) &&
-        ( (cv_result.end()-str1.begin()) == 6) );
-
-    // multi-type comparison test 
-    BOOST_TEST_CHECKPOINT( "multi-type" );
-
-    nc_vresult=find_first( vec1, string("abc") );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 3) &&
-        ( (nc_result.end()-str1.begin()) == 6) );
-
-    cv_vresult=find_first( const_cast<const vector<int>&>(vec1), str2 );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 3) &&
-        ( (cv_result.end()-str1.begin()) == 6) );
-
-    // overflow test
-    BOOST_TEST_CHECKPOINT( "overflow" );
-    
-    nc_result=find_first( str2, string("abcd") );
-    BOOST_CHECK( nc_result.begin()==nc_result.end() );
-    cv_result=find_first( const_cast<const string&>(str2), string("abcd") );
-    BOOST_CHECK( cv_result.begin()==cv_result.end() );
-
-    cv_result=find_head( const_cast<const string&>(str2), 4 );
-    BOOST_CHECK( string( cv_result.begin(), cv_result.end() )== string("abc") );
-    cv_result=find_tail( const_cast<const string&>(str2), 4 );
-    BOOST_CHECK( string( cv_result.begin(), cv_result.end() )== string("abc") );
-
-    // Empty string test
-    BOOST_TEST_CHECKPOINT( "empty" );
-    
-    nc_result=find_first( str3, string("abcd") );
-    BOOST_CHECK( nc_result.begin()==nc_result.end() );
-    nc_result=find_first( str1, string("") );
-    BOOST_CHECK( nc_result.begin()==nc_result.end() );
-
-    cv_result=find_first( const_cast<const string&>(str3), string("abcd") );
-    BOOST_CHECK( cv_result.begin()==cv_result.end() );
-    cv_result=find_first( const_cast<const string&>(str1), string("") );
-    BOOST_CHECK( cv_result.begin()==cv_result.end() ); 
-
-    // iterator_range specific tests
-    ostringstream osstr;
-    osstr << find_first( str1, "abc" );
-    BOOST_CHECK( osstr.str()=="abc" );
-
-}
-
-// test main 
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    find_test();
-}
diff --git a/third_party/boostorg/algorithm/string/test/join_test.cpp b/third_party/boostorg/algorithm/string/test/join_test.cpp
deleted file mode 100644
index 9c5150b..0000000
--- a/third_party/boostorg/algorithm/string/test/join_test.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-//  Boost string_algo library iterator_test.cpp file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <boost/algorithm/string/join.hpp>
-#include <boost/algorithm/string/classification.hpp>
-// equals predicate is used for result comparison
-#include <boost/algorithm/string/predicate.hpp>
-
-// Include unit test framework
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <iostream>
-
-#include <boost/test/test_tools.hpp>
-
-
-using namespace std;
-using namespace boost;
-
-bool is_not_empty(const std::string& str)
-{
-    return !str.empty();
-}
-
-void join_test()
-{
-    // Prepare inputs
-    vector<string> tokens1;
-    tokens1.push_back("xx");
-    tokens1.push_back("abc");
-    tokens1.push_back("xx");
-
-    vector<string> tokens2;
-    tokens2.push_back("");
-    tokens2.push_back("xx");
-    tokens2.push_back("abc");
-    tokens2.push_back("");
-    tokens2.push_back("abc");
-    tokens2.push_back("xx");
-    tokens2.push_back("");
-
-    vector<string> tokens3;
-    tokens3.push_back("");
-    tokens3.push_back("");
-    tokens3.push_back("");
-
-    vector<string> empty_tokens;
-
-    vector<vector<int> > vtokens;
-    for(unsigned int n=0; n<tokens2.size(); ++n)
-    {
-        vtokens.push_back(vector<int>(tokens2[n].begin(), tokens2[n].end()));
-    }
-
-    BOOST_CHECK( equals(join(tokens1, "-"), "xx-abc-xx") );
-    BOOST_CHECK( equals(join(tokens2, "-"), "-xx-abc--abc-xx-") );
-    BOOST_CHECK( equals(join(vtokens, "-"), "-xx-abc--abc-xx-") );
-    BOOST_CHECK( equals(join(empty_tokens, "-"), "") );
-
-    BOOST_CHECK( equals(join_if(tokens2, "-", is_not_empty), "xx-abc-abc-xx") );
-    BOOST_CHECK( equals(join_if(empty_tokens, "-", is_not_empty), "") );
-    BOOST_CHECK( equals(join_if(tokens3, "-", is_not_empty), "") );
-}
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    join_test();
-}
diff --git a/third_party/boostorg/algorithm/string/test/predicate_test.cpp b/third_party/boostorg/algorithm/string/test/predicate_test.cpp
deleted file mode 100644
index ba1564e..0000000
--- a/third_party/boostorg/algorithm/string/test/predicate_test.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-//  Boost string_algo library predicate_test.cpp file  ------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <boost/algorithm/string/predicate.hpp>
-#include <boost/algorithm/string/classification.hpp>
-
-// Include unit test framework
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <iostream>
-#include <functional>
-#include <boost/test/test_tools.hpp>
-
-using namespace std;
-using namespace boost;
-
-void predicate_test()
-{
-    string str1("123xxx321");
-    string str1_prefix("123");
-    string str2("abc");
-    string str3("");
-    string str4("abc");
-    vector<int> vec1( str1.begin(), str1.end() );
-
-    // Basic tests
-    BOOST_CHECK( starts_with( str1, string("123") ) );
-    BOOST_CHECK( !starts_with( str1, string("1234") ) );
-
-    BOOST_CHECK( istarts_with( "aBCxxx", "abc" ) );
-    BOOST_CHECK( !istarts_with( "aBCxxx", "abcd" ) );
-
-    BOOST_CHECK( ends_with( str1, string("321") ) );
-    BOOST_CHECK( !ends_with( str1, string("123") ) );
-
-    BOOST_CHECK( iends_with( "aBCxXx", "XXX" ) );
-    BOOST_CHECK( !iends_with( "aBCxxX", "xXXX" ) );
-
-    BOOST_CHECK( contains( str1, string("xxx") ) );
-    BOOST_CHECK( !contains( str1, string("yyy") ) );
-
-    BOOST_CHECK( icontains( "123XxX321", "xxx" ) );
-    BOOST_CHECK( !icontains( "123xXx321", "yyy" ) );
-
-    BOOST_CHECK( equals( str2, string("abc") ) );
-    BOOST_CHECK( !equals( str1, string("yyy") ) );
-
-    BOOST_CHECK( iequals( "AbC", "abc" ) );
-    BOOST_CHECK( !iequals( "aBc", "yyy" ) );
-
-    BOOST_CHECK( lexicographical_compare("abc", "abd") );
-    BOOST_CHECK( !lexicographical_compare("abc", "abc") );
-    BOOST_CHECK( lexicographical_compare("abc", "abd", is_less()) );
-
-    BOOST_CHECK( !ilexicographical_compare("aBD", "AbC") );
-    BOOST_CHECK( ilexicographical_compare("aBc", "AbD") );
-    BOOST_CHECK( lexicographical_compare("abC", "aBd", is_iless()) );
-
-    // multi-type comparison test
-    BOOST_CHECK( starts_with( vec1, string("123") ) );
-    BOOST_CHECK( ends_with( vec1, string("321") ) );
-    BOOST_CHECK( contains( vec1, string("xxx") ) );
-    BOOST_CHECK( equals( vec1, str1 ) );
-
-    // overflow test
-    BOOST_CHECK( !starts_with( str2, string("abcd") ) );
-    BOOST_CHECK( !ends_with( str2, string("abcd") ) );
-    BOOST_CHECK( !contains( str2, string("abcd") ) );
-    BOOST_CHECK( !equals( str2, string("abcd") ) );
-
-    // equal test
-    BOOST_CHECK( starts_with( str2, string("abc") ) );
-    BOOST_CHECK( ends_with( str2, string("abc") ) );
-    BOOST_CHECK( contains( str2, string("abc") ) );
-    BOOST_CHECK( equals( str2, string("abc") ) );
-
-    //! Empty string test
-    BOOST_CHECK( starts_with( str2, string("") ) );
-    BOOST_CHECK( ends_with( str2, string("") ) );
-    BOOST_CHECK( contains( str2, string("") ) );
-    BOOST_CHECK( equals( str3, string("") ) );
-
-    //! Container compatibility test
-    BOOST_CHECK( starts_with( "123xxx321", "123" ) );
-    BOOST_CHECK( ends_with( "123xxx321", "321" ) );
-    BOOST_CHECK( contains( "123xxx321", "xxx" ) );
-    BOOST_CHECK( equals( "123xxx321", "123xxx321" ) );
-
-}
-
-template<typename Pred, typename Input>
-void test_pred(const Pred& pred, const Input& input, bool bYes)
-{
-    // test assignment operator
-    Pred pred1=pred;
-    pred1=pred;
-    pred1=pred1;
-    if(bYes)
-    {
-        BOOST_CHECK( all( input, pred ) );
-        BOOST_CHECK( all( input, pred1 ) );
-    }
-    else
-    {
-        BOOST_CHECK( !all( input, pred ) );
-        BOOST_CHECK( !all( input, pred1 ) );
-    }
-}
-
-#define TEST_CLASS( Pred, YesInput, NoInput )\
-{\
-    test_pred(Pred, YesInput, true); \
-    test_pred(Pred, NoInput, false); \
-}
-
-void classification_test()
-{
-    TEST_CLASS( is_space(), "\n\r\t ", "..." );
-    TEST_CLASS( is_alnum(), "ab129ABc", "_ab129ABc" );
-    TEST_CLASS( is_alpha(), "abc", "abc1" );
-    TEST_CLASS( is_cntrl(), "\n\t\r", "..." );
-    TEST_CLASS( is_digit(), "1234567890", "abc" );
-    TEST_CLASS( is_graph(), "123abc.,", "  \t" );
-    TEST_CLASS( is_lower(), "abc", "Aasdf" );
-    TEST_CLASS( is_print(), "abs", "\003\004asdf" );
-    TEST_CLASS( is_punct(), ".,;\"", "abc" );
-    TEST_CLASS( is_upper(), "ABC", "aBc" );
-    TEST_CLASS( is_xdigit(), "ABC123", "XFD" );
-    TEST_CLASS( is_any_of( string("abc") ), "aaabbcc", "aaxb" );
-    TEST_CLASS( is_any_of( "abc" ), "aaabbcc", "aaxb" );
-    TEST_CLASS( is_from_range( 'a', 'c' ), "aaabbcc", "aaxb" );
-
-    TEST_CLASS( !is_classified(std::ctype_base::space), "...", "..\n\r\t " );
-    TEST_CLASS( ( !is_any_of("abc") && is_from_range('a','e') ) || is_space(), "d e", "abcde" );
-
-    // is_any_of test
-//  TEST_CLASS( !is_any_of(""), "", "aaa" )
-    TEST_CLASS( is_any_of("a"), "a", "ab" )
-    TEST_CLASS( is_any_of("ba"), "ab", "abc" )
-    TEST_CLASS( is_any_of("cba"), "abc", "abcd" )
-    TEST_CLASS( is_any_of("hgfedcba"), "abcdefgh", "abcdefghi" )
-    TEST_CLASS( is_any_of("qponmlkjihgfedcba"), "abcdefghijklmnopq", "zzz" )
-}
-
-#undef TEST_CLASS
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    predicate_test();
-    classification_test();
-}
diff --git a/third_party/boostorg/algorithm/string/test/regex_test.cpp b/third_party/boostorg/algorithm/string/test/regex_test.cpp
deleted file mode 100644
index f93c1d9..0000000
--- a/third_party/boostorg/algorithm/string/test/regex_test.cpp
+++ /dev/null
@@ -1,158 +0,0 @@
-//  Boost string_algo library substr_test.cpp file  ------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <boost/algorithm/string/regex.hpp>
-#include <boost/algorithm/string/join.hpp>
-#include <boost/algorithm/string/sequence_traits.hpp>
-// equals predicate is used for result comparison
-#include <boost/algorithm/string/predicate.hpp>
-
-
-// Include unit test framework
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <iostream>
-#include <boost/regex.hpp>
-#include <boost/test/test_tools.hpp>
-
-using namespace std;
-using namespace boost;
-
-static void find_test()
-{
-    string str1("123a1cxxxa23cXXXa456c321");
-    const char* pch1="123a1cxxxa23cXXXa456c321";
-    regex rx("a[0-9]+c");
-    vector<int> vec1( str1.begin(), str1.end() );
-    vector<string> tokens;
-
-    // find results
-    iterator_range<string::iterator> nc_result;
-    iterator_range<string::const_iterator> cv_result;
-    
-    iterator_range<vector<int>::iterator> nc_vresult;
-    iterator_range<vector<int>::const_iterator> cv_vresult;
-
-    iterator_range<const char*> ch_result;
-
-    // basic tests
-    nc_result=find_regex( str1, rx );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 3) &&
-        ( (nc_result.end()-str1.begin()) == 6) );
-
-    cv_result=find_regex( str1, rx );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 3) &&
-        ( (cv_result.end()-str1.begin()) == 6) );
-
-    ch_result=find_regex( pch1, rx );
-    BOOST_CHECK(( (ch_result.begin() - pch1 ) == 3) && ( (ch_result.end() - pch1 ) == 6 ) );
-
-    // multi-type comparison test
-    nc_vresult=find_regex( vec1, rx );
-    BOOST_CHECK( 
-        ( (nc_result.begin()-str1.begin()) == 3) &&
-        ( (nc_result.end()-str1.begin()) == 6) );
-
-    cv_vresult=find_regex( vec1, rx );
-    BOOST_CHECK( 
-        ( (cv_result.begin()-str1.begin()) == 3) &&
-        ( (cv_result.end()-str1.begin()) == 6) );
-
-    // find_all_regex test
-    find_all_regex( tokens, str1, rx );
-
-    BOOST_REQUIRE( tokens.size()==3 );
-    BOOST_CHECK( tokens[0]==string("a1c") );
-    BOOST_CHECK( tokens[1]==string("a23c") );
-    BOOST_CHECK( tokens[2]==string("a456c") );
-
-    // split_regex test
-    split_regex(    tokens, str1, rx );
-
-    BOOST_REQUIRE( tokens.size()==4 );
-    BOOST_CHECK( tokens[0]==string("123") );
-    BOOST_CHECK( tokens[1]==string("xxx") );
-    BOOST_CHECK( tokens[2]==string("XXX") );
-    BOOST_CHECK( tokens[3]==string("321") );
-
-}
-
-static void join_test()
-{
-    // Prepare inputs
-    vector<string> tokens1;
-    tokens1.push_back("xx");
-    tokens1.push_back("abc");
-    tokens1.push_back("xx");
-
-#ifndef BOOST_NO_FUNCTION_TEMPLATE_ORDERING
-    BOOST_CHECK( equals(join_if(tokens1, "-", regex("x+")), "xx-xx") );
-    BOOST_CHECK( equals(join_if(tokens1, "-", regex("[abc]+")), "abc") );
-#else 
-    BOOST_CHECK( equals(join_if_regex(tokens1, "-", regex("x+")), "xx-xx") );
-    BOOST_CHECK( equals(join_if_regex(tokens1, "-", regex("[abc]+")), "abc") );
-#endif 
-}
-
-static void replace_test()
-{
-    string str1("123a1cxxxa23cXXXa456c321");
-    regex rx1("a([0-9]+)c");
-    regex rx2("([xX]+)");
-    regex rx3("_[^_]*_");
-    string fmt1("_A$1C_");
-    string fmt2("_xXx_");
-    vector<int> vec1( str1.begin(), str1.end() );
-
-    // immutable tests
-    
-    // basic tests
-    BOOST_CHECK( replace_regex_copy( str1, rx1, fmt1 )==string("123_A1C_xxxa23cXXXa456c321") );
-    BOOST_CHECK( replace_all_regex_copy( str1, rx1, fmt1 )==string("123_A1C_xxx_A23C_XXX_A456C_321") );
-    BOOST_CHECK( erase_regex_copy( str1, rx1 )==string("123xxxa23cXXXa456c321") );
-    BOOST_CHECK( erase_all_regex_copy( str1, rx1 )==string(string("123xxxXXX321")) );
-
-    // output iterator variants test
-    string strout;
-    replace_regex_copy( back_inserter(strout), str1, rx1, fmt1 );
-    BOOST_CHECK( strout==string("123_A1C_xxxa23cXXXa456c321") );
-    strout.clear();
-    replace_all_regex_copy( back_inserter(strout), str1, rx1, fmt1 );
-    BOOST_CHECK( strout==string("123_A1C_xxx_A23C_XXX_A456C_321") );
-    strout.clear();
-    erase_regex_copy( back_inserter(strout), str1, rx1 );
-    BOOST_CHECK( strout==string("123xxxa23cXXXa456c321") );
-    strout.clear();
-    erase_all_regex_copy( back_inserter(strout), str1, rx1 );
-    BOOST_CHECK( strout==string("123xxxXXX321") );
-    strout.clear();
-
-    // in-place test
-    replace_regex( str1, rx1, fmt2 );
-    BOOST_CHECK( str1==string("123_xXx_xxxa23cXXXa456c321") );
-
-    replace_all_regex( str1, rx2, fmt1 );
-    BOOST_CHECK( str1==string("123__AxXxC___AxxxC_a23c_AXXXC_a456c321") );
-    erase_regex( str1, rx3 );
-    BOOST_CHECK( str1==string("123AxXxC___AxxxC_a23c_AXXXC_a456c321") );
-    erase_all_regex( str1, rx3 );
-    BOOST_CHECK( str1==string("123AxXxCa23ca456c321") );
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    find_test();
-    join_test();
-    replace_test();
-}
diff --git a/third_party/boostorg/algorithm/string/test/replace_test.cpp b/third_party/boostorg/algorithm/string/test/replace_test.cpp
deleted file mode 100644
index 789d8e4..0000000
--- a/third_party/boostorg/algorithm/string/test/replace_test.cpp
+++ /dev/null
@@ -1,321 +0,0 @@
-//  Boost string_algo library substr_test.cpp file  ------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <boost/algorithm/string/replace.hpp>
-#include <boost/algorithm/string/erase.hpp>
-#include <boost/algorithm/string/std/list_traits.hpp>
-#include <boost/algorithm/string/std/string_traits.hpp>
-#include <boost/algorithm/string/finder.hpp>
-#include <boost/algorithm/string/formatter.hpp>
-#include <boost/algorithm/string/classification.hpp>
-
-// Include unit test framework
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <list>
-#include <iostream>
-
-// equals predicate is used for result comparison
-#include <boost/algorithm/string/predicate.hpp>
-
-#include <boost/test/test_tools.hpp>
-
-using namespace std;
-using namespace boost;
-
-void sequence_traits_test()
-{
-    // basic_string traits
-    BOOST_CHECK( boost::algorithm::has_native_replace<string>::value );
-    BOOST_CHECK( !boost::algorithm::has_stable_iterators<string>::value );
-    BOOST_CHECK( !boost::algorithm::has_const_time_insert<string>::value );    
-    BOOST_CHECK( !boost::algorithm::has_const_time_erase<string>::value ); 
-
-    // vector traits
-    BOOST_CHECK( !boost::algorithm::has_native_replace< vector<char> >::value );
-    BOOST_CHECK( !boost::algorithm::has_stable_iterators< vector<char> >::value );
-    BOOST_CHECK( !boost::algorithm::has_const_time_insert< vector<char> >::value );    
-    BOOST_CHECK( !boost::algorithm::has_const_time_erase< vector<char> >::value ); 
-
-    // list traits
-    BOOST_CHECK( !boost::algorithm::has_native_replace< list<char> >::value );
-    BOOST_CHECK( boost::algorithm::has_stable_iterators< list<char> >::value );
-    BOOST_CHECK( boost::algorithm::has_const_time_insert< list<char> >::value );   
-    BOOST_CHECK( boost::algorithm::has_const_time_erase< list<char> >::value );    
-}
-
-// Combine tests for all variants of the algorithm
-#define C_ ,
-#define TEST_ALGO( Algo, Input, Params, Output ) \
-{\
-    BOOST_TEST_CHECKPOINT( #Algo " - Copy" );\
-\
-    string str1(Input);\
-\
-    /* Copy test */ \
-    BOOST_CHECK( Algo##_copy( str1, Params )==Output );\
-\
-    BOOST_TEST_CHECKPOINT( #Algo " - Iterator" );\
-    /* Iterator test */\
-    string strout;\
-    Algo##_copy( back_inserter(strout), str1, Params );\
-    BOOST_CHECK( strout==Output ); \
-\
-    /* In-place test */\
-    vector<char> vec1( str1.begin(), str1.end() );\
-    list<char> list1( str1.begin(), str1.end() );\
-\
-    BOOST_TEST_CHECKPOINT( #Algo " - Inplace(string)" );\
-    Algo( str1, Params ); \
-    BOOST_CHECK( equals( str1, Output ) ); \
-\
-    BOOST_TEST_CHECKPOINT( #Algo " - Inplace(vector)" );\
-    Algo( vec1, Params ); \
-    BOOST_CHECK( equals( vec1, Output ) );\
-\
-    BOOST_TEST_CHECKPOINT( #Algo " - Inplace(list)" );\
-    Algo( list1, Params ); \
-    BOOST_CHECK( equals( list1, Output ) );\
-}
-
-void replace_first_test()
-{
-    // replace first
-    TEST_ALGO( replace_first, "1abc3abc2", string("abc") C_ string("YYY"), string("1YYY3abc2") );
-    TEST_ALGO( ireplace_first, "1AbC3abc2", "aBc" C_ "YYY", string("1YYY3abc2") );
-    TEST_ALGO( replace_first, "1abc3abc2", string("abc") C_ string("Z"), string("1Z3abc2") );
-    TEST_ALGO( replace_first, "1abc3abc2", string("abc") C_ string("XXXX"), string("1XXXX3abc2") );
-    TEST_ALGO( replace_first, "1abc3abc2", string("") C_ string("XXXX"), string("1abc3abc2") );
-    TEST_ALGO( replace_first, "1abc3abc2", "" C_ "XXXX", string("1abc3abc2") );
-    TEST_ALGO( replace_first, "", string("") C_ string("XXXX"), string("") );
-    TEST_ALGO( erase_first, "1abc3abc2", string("abc"), string("13abc2") );
-    TEST_ALGO( ierase_first, "1aBc3abc2", "abC", "13abc2" );
-    TEST_ALGO( erase_first, "1abc3abc2", "abc", "13abc2" );
-    TEST_ALGO( erase_first, "1abc3abc2", string(""), string("1abc3abc2") );
-    TEST_ALGO( erase_first, "", string("abc"), string("") );
-}
-
-void replace_last_test()
-{
-    // replace last
-    TEST_ALGO( replace_last, "1abc3abc2", string("abc") C_ string("YYY"), string("1abc3YYY2") );
-    TEST_ALGO( ireplace_last, "1abc3AbC2", "aBc" C_ "YYY", string("1abc3YYY2") );
-    TEST_ALGO( replace_last, "1abc3abc2", string("abc") C_ string("Z"), string("1abc3Z2") );
-    TEST_ALGO( replace_last, "1abc3abc2", string("abc") C_ string("XXXX"), string("1abc3XXXX2") );
-    TEST_ALGO( replace_last, "1abc3abc2", "abc" C_ "XXXX", string("1abc3XXXX2") );
-    TEST_ALGO( replace_last, "", string("") C_ string("XXXX"), string("") );
-    TEST_ALGO( erase_last, "1abc3abc2", string("abc"), string("1abc32") );
-    TEST_ALGO( ierase_last, "1aBc3aBc2", "ABC", string("1aBc32") );
-    TEST_ALGO( erase_last, "1abc3abc2", "abc", string("1abc32") );
-    TEST_ALGO( erase_last, "1abc3abc2", string(""), string("1abc3abc2") );
-    TEST_ALGO( erase_last, "", string("abc"), string("") );
-}
-
-void replace_all_test()
-{
-    // replace all
-    TEST_ALGO( replace_all, "1abc3abc2", string("abc") C_ string("YYY"), string("1YYY3YYY2") );
-    TEST_ALGO( replace_all, string("1abc3abc2"), "/" C_ "\\", string("1abc3abc2") );
-    TEST_ALGO( ireplace_all, "1aBc3AbC2", "abC" C_ "YYY", string("1YYY3YYY2") );
-    TEST_ALGO( replace_all, "1abc3abc2", string("abc") C_ string("Z"), string("1Z3Z2") );
-    TEST_ALGO( replace_all, "1abc3abc2", string("abc") C_ string("XXXX"), string("1XXXX3XXXX2") );
-    TEST_ALGO( replace_all, "1abc3abc2", "abc" C_ "XXXX", string("1XXXX3XXXX2") );
-    TEST_ALGO( replace_all, "", string("") C_ string("XXXX"), string("") );
-    TEST_ALGO( erase_all, "1abc3abc2", string("abc"), string("132") );
-    TEST_ALGO( ierase_all, "1aBc3aBc2", "aBC", string("132") );
-    TEST_ALGO( erase_all, "1abc3abc2", "abc", string("132") );
-    TEST_ALGO( erase_all, "1abc3abc2", string(""), string("1abc3abc2") );
-    TEST_ALGO( erase_all, "", string("abc"), string("") );
-}
-
-void replace_nth_test()
-{
-    // replace nth
-    TEST_ALGO( replace_nth, "1abc3abc2", string("abc") C_ 0 C_ string("YYY"), string("1YYY3abc2") );
-    TEST_ALGO( replace_nth, "1abc3abc2", string("abc") C_ -1 C_ string("YYY"), string("1abc3YYY2") );
-    TEST_ALGO( ireplace_nth, "1AbC3abc2", "aBc" C_ 0 C_ "YYY", string("1YYY3abc2") );
-    TEST_ALGO( ireplace_nth, "1AbC3abc2", "aBc" C_ -1 C_ "YYY", string("1AbC3YYY2") );
-    TEST_ALGO( replace_nth, "1abc3abc2", string("abc") C_ 0 C_ string("Z"), string("1Z3abc2") );
-    TEST_ALGO( replace_nth, "1abc3abc2", string("abc") C_ 0 C_ string("XXXX"), string("1XXXX3abc2") );
-    TEST_ALGO( replace_nth, "1abc3abc2", "abc" C_ 0 C_ "XXXX", string("1XXXX3abc2") );
-    TEST_ALGO( replace_nth, "1abc3abc2", "abc" C_ 3 C_ "XXXX", string("1abc3abc2") );
-    TEST_ALGO( replace_nth, "1abc3abc2", "abc" C_ -3 C_ "XXXX", string("1abc3abc2") );
-    TEST_ALGO( replace_nth, "1abc3abc2", string("") C_ 0 C_ string("XXXX"), string("1abc3abc2") );
-    TEST_ALGO( replace_nth, "", string("") C_ 0 C_ string("XXXX"), string("") );
-    TEST_ALGO( replace_nth, "", string("") C_ -1 C_ string("XXXX"), string("") );
-    TEST_ALGO( erase_nth, "1abc3abc2", string("abc") C_ 0, string("13abc2") );
-    TEST_ALGO( erase_nth, "1abc3abc2", string("abc") C_ -1, string("1abc32") );
-    TEST_ALGO( erase_nth, "1abc3abc2", string("abc") C_ -3, string("1abc3abc2") );
-    TEST_ALGO( ierase_nth, "1aBc3aBc2", "ABC" C_ 0, string("13aBc2") );
-    TEST_ALGO( ierase_nth, "1aBc3aBc2", "ABC" C_ -1, string("1aBc32") );
-    TEST_ALGO( ierase_nth, "1aBc3aBc2", "ABC" C_ -3, string("1aBc3aBc2") );
-    TEST_ALGO( erase_nth, "1abc3abc2", "abc" C_ 0, string("13abc2") );
-    TEST_ALGO( erase_nth, "1abc3abc2", string("") C_ 0, string("1abc3abc2") );
-    TEST_ALGO( erase_nth, "", string("abc") C_ 0, string("") );
-    TEST_ALGO( erase_nth, "", string("abc") C_ -1, string("") );
-    TEST_ALGO( replace_nth, "1abc3abc2", string("abc") C_ 1 C_ string("YYY"), string("1abc3YYY2") );
-    TEST_ALGO( replace_nth, "1abc3abc2", string("abc") C_ 2 C_ string("YYY"), string("1abc3abc2") );
-}
-
-void replace_head_test()
-{
-    // replace head
-    TEST_ALGO( replace_head, "abc3abc2", 3 C_ string("YYY"), string("YYY3abc2") );
-    TEST_ALGO( replace_head, "abc3abc2", -3 C_ string("YYY"), string("YYYbc2") );
-    TEST_ALGO( replace_head, "abc3abc2", 3 C_ "YYY", string("YYY3abc2") );
-    TEST_ALGO( replace_head, "abc", 3 C_ string("Z"), string("Z") );
-    TEST_ALGO( replace_head, "abc", 6 C_ string("XXXX"), string("XXXX") );
-    TEST_ALGO( replace_head, "abc", -6 C_ string("XXXX"), string("abc") );
-    TEST_ALGO( replace_head, "abc3abc2", 0 C_ string("XXXX"), string("abc3abc2") );
-    TEST_ALGO( replace_head, "", 4 C_ string("XXXX"), string("") );
-    TEST_ALGO( replace_head, "", -4 C_ string("XXXX"), string("") );
-    TEST_ALGO( erase_head, "abc3abc2", 3, string("3abc2") );
-    TEST_ALGO( erase_head, "abc3abc2", -3, string("bc2") );
-    TEST_ALGO( erase_head, "abc3abc2", 0, string("abc3abc2") );
-    TEST_ALGO( erase_head, "", 4, string("") );
-    TEST_ALGO( erase_head, "", -4, string("") );
-}
-
-void replace_tail_test()
-{
-    // replace tail
-    TEST_ALGO( replace_tail, "abc3abc", 3 C_ string("YYY"), string("abc3YYY") );
-    TEST_ALGO( replace_tail, "abc3abc", -3 C_ "YYY", string("abcYYY") );
-    TEST_ALGO( replace_tail, "abc", 3 C_ string("Z"), string("Z") );
-    TEST_ALGO( replace_tail, "abc", 6 C_ string("XXXX"), string("XXXX") );
-    TEST_ALGO( replace_tail, "abc", -6 C_ string("XXXX"), string("abc") );
-    TEST_ALGO( replace_tail, "abc3abc", 0 C_ string("XXXX"), string("abc3abc") );
-    TEST_ALGO( replace_tail, "", 4 C_ string("XXXX"), string("") );
-    TEST_ALGO( replace_tail, "", -4 C_ string("XXXX"), string("") );
-    TEST_ALGO( erase_tail, "abc3abc", 3, string("abc3") );
-    TEST_ALGO( erase_tail, "abc3abc", -3, string("abc") );
-    TEST_ALGO( erase_tail, "abc3abc", 0, string("abc3abc") );
-    TEST_ALGO( erase_tail, "", 4, string("") );
-    TEST_ALGO( erase_tail, "", -4, string("") );
-}
-
-void replace_range_test()
-{
-    // replace_range
-    {
-        BOOST_TEST_CHECKPOINT( "replace_range" );
-
-        string str1("1abc3abc2");
-        BOOST_CHECK( 
-            replace_range_copy( 
-                str1, 
-                make_iterator_range(str1.begin()+1, str1.begin()+4),
-                string("XXX") )==string("1XXX3abc2") );
-
-        string strout;
-        replace_range_copy( 
-                back_inserter( strout ),
-                str1, 
-                make_iterator_range(str1.begin()+1, str1.begin()+4),
-                string("XXX") );
-        BOOST_CHECK( strout==string("1XXX3abc2") );
-
-        replace_range( 
-                str1, 
-                make_iterator_range(str1.begin()+1, str1.begin()+4),
-                string("XXX") );
-        BOOST_CHECK( str1==string("1XXX3abc2") );
-    }
-    // erase_range
-    {
-        BOOST_TEST_CHECKPOINT( "erase_range" );
-
-        string str1("1abc3abc2");
-        BOOST_CHECK( 
-        erase_range_copy( 
-                str1, 
-                make_iterator_range(str1.begin()+1, str1.begin()+4))==string("13abc2") );
-
-        string strout;
-        erase_range_copy( 
-                back_inserter( strout ),
-                str1, 
-                make_iterator_range(str1.begin()+1, str1.begin()+4));
-        BOOST_CHECK( strout==string("13abc2") );
-
-        erase_range( 
-                str1, 
-                make_iterator_range(str1.begin()+1, str1.begin()+4));
-        BOOST_CHECK( str1==string("13abc2") );
-    }
-}
-
-void collection_comp_test()
-{
-    // container traits compatibility tests
-    {
-        string strout;
-        replace_first_copy( back_inserter(strout), "1abc3abc2", "abc", "YYY" );
-        BOOST_CHECK( strout==string("1YYY3abc2") ); 
-    }
-    {
-        string strout;
-        replace_last_copy( back_inserter(strout), "1abc3abc2", "abc", "YYY" );
-        BOOST_CHECK( strout==string("1abc3YYY2") ); 
-    }   
-    {
-        string strout;
-        replace_all_copy( back_inserter(strout), "1abc3abc2", "abc", "YYY" );
-        BOOST_CHECK( strout==string("1YYY3YYY2") ); 
-    }   
-    {
-        string strout;
-        replace_nth_copy( back_inserter(strout), "1abc3abc2", "abc", 1, "YYY" );
-        BOOST_CHECK( strout==string("1abc3YYY2") ); 
-    }   
-    {
-        string strout;
-        replace_head_copy( back_inserter(strout), "abc3abc2", 3 , "YYY" );
-        BOOST_CHECK( strout==string("YYY3abc2") ); 
-    }   
-    {
-        string strout;
-        replace_tail_copy( back_inserter(strout), "abc3abc", 3 , "YYY" );
-        BOOST_CHECK( strout==string("abc3YYY") ); 
-    }   
-}
-
-void dissect_format_test()
-{
-    BOOST_CHECK(
-        find_format_all_copy(
-            string("aBc123Abc"), 
-            first_finder("abc", is_iequal()), 
-            dissect_formatter(token_finder(is_upper())))=="B123A");
-
-
-    BOOST_CHECK(
-        find_format_all_copy(
-            string("abc   123   abc"),
-            token_finder(is_space(), token_compress_on),
-            dissect_formatter(head_finder(1)))=="abc 123 abc");
-
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    sequence_traits_test(); 
-    replace_first_test();
-    replace_last_test();
-    replace_all_test();
-    replace_nth_test();
-    replace_head_test();
-    replace_tail_test();
-    replace_range_test();
-    collection_comp_test();
-    dissect_format_test();
-}
diff --git a/third_party/boostorg/algorithm/string/test/split_test.cpp b/third_party/boostorg/algorithm/string/test/split_test.cpp
deleted file mode 100644
index 582472b..0000000
--- a/third_party/boostorg/algorithm/string/test/split_test.cpp
+++ /dev/null
@@ -1,193 +0,0 @@
-//  Boost string_algo library iterator_test.cpp file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <boost/algorithm/string/split.hpp>
-#include <boost/algorithm/string/classification.hpp>
-// equals predicate is used for result comparison
-#include <boost/algorithm/string/predicate.hpp>
-
-// Include unit test framework
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <list>
-#include <iostream>
-
-#include <boost/test/test_tools.hpp>
-
-
-using namespace std;
-using namespace boost;
-
-template< typename T1, typename T2 >
-void deep_compare( const T1& X, const T2& Y )
-{
-    BOOST_REQUIRE( X.size() == Y.size() );
-    for( unsigned int nIndex=0; nIndex<X.size(); ++nIndex )
-    {
-        BOOST_CHECK( equals( X[nIndex], Y[nIndex] ) );
-    }
-}
-
-void iterator_test()
-{
-    string str1("xx-abc--xx-abb");
-    string str2("Xx-abc--xX-abb-xx");
-    string str3("xx");
-    string strempty("");
-    const char* pch1="xx-abc--xx-abb";
-    vector<string> tokens;
-    vector< vector<int> > vtokens;
-    
-    // find_all tests
-    find_all(
-        tokens,
-        pch1,
-        "xx" );
-
-    BOOST_REQUIRE( tokens.size()==2 );
-    BOOST_CHECK( tokens[0]==string("xx") );
-    BOOST_CHECK( tokens[1]==string("xx") );
-
-    ifind_all(
-        tokens,
-        str2,
-        "xx" );
-
-    BOOST_REQUIRE( tokens.size()==3 );
-    BOOST_CHECK( tokens[0]==string("Xx") );
-    BOOST_CHECK( tokens[1]==string("xX") );
-    BOOST_CHECK( tokens[2]==string("xx") );
-
-    find_all(
-        tokens,
-        str1,
-        "xx" );
-
-    BOOST_REQUIRE( tokens.size()==2 );
-    BOOST_CHECK( tokens[0]==string("xx") );
-    BOOST_CHECK( tokens[1]==string("xx") );
-
-    find_all(
-        vtokens,
-        str1,
-        string("xx") );
-    deep_compare( tokens, vtokens );
-
-    // split tests
-    split(
-        tokens,
-        str2,
-        is_any_of("xX"),
-        token_compress_on );
-
-    BOOST_REQUIRE( tokens.size()==4 );
-    BOOST_CHECK( tokens[0]==string("") );
-    BOOST_CHECK( tokens[1]==string("-abc--") );
-    BOOST_CHECK( tokens[2]==string("-abb-") );
-    BOOST_CHECK( tokens[3]==string("") );
-
-    split(
-        tokens,
-        pch1,
-        is_any_of("x"),
-        token_compress_on );
-
-    BOOST_REQUIRE( tokens.size()==3 );
-    BOOST_CHECK( tokens[0]==string("") );
-    BOOST_CHECK( tokens[1]==string("-abc--") );
-    BOOST_CHECK( tokens[2]==string("-abb") );
-
-    split(
-        vtokens,
-        str1,
-        is_any_of("x"),
-        token_compress_on );
-    deep_compare( tokens, vtokens );
-
-    split(
-        tokens,
-        str1,
-        is_punct(),
-        token_compress_off );
-
-    BOOST_REQUIRE( tokens.size()==5 );
-    BOOST_CHECK( tokens[0]==string("xx") );
-    BOOST_CHECK( tokens[1]==string("abc") );
-    BOOST_CHECK( tokens[2]==string("") );
-    BOOST_CHECK( tokens[3]==string("xx") );
-    BOOST_CHECK( tokens[4]==string("abb") );
-
-    split(
-        tokens,
-        str3,
-        is_any_of(","),
-        token_compress_off);
-
-    BOOST_REQUIRE( tokens.size()==1 );
-    BOOST_CHECK( tokens[0]==string("xx") );
-
-    split(
-        tokens,
-        strempty,
-        is_punct(),
-        token_compress_off);
-
-    BOOST_REQUIRE( tokens.size()==1 );
-    BOOST_CHECK( tokens[0]==string("") );
-
-
-    find_iterator<string::iterator> fiter=make_find_iterator(str1, first_finder("xx"));
-    find_iterator<string::iterator> fiter2;
-    
-    BOOST_CHECK(equals(*fiter, "xx"));
-    ++fiter;
-    
-    fiter2 = fiter;
-    BOOST_CHECK(equals(*fiter,  "xx"));
-    BOOST_CHECK(equals(*fiter2, "xx"));
-
-    ++fiter;
-    BOOST_CHECK(fiter==find_iterator<string::iterator>());
-    BOOST_CHECK(equals(*fiter2, "xx"));
-
-    ++fiter2;
-    BOOST_CHECK(fiter2==find_iterator<string::iterator>());
-
-    split_iterator<string::iterator> siter=make_split_iterator(str1, token_finder(is_any_of("-"), token_compress_on));
-    split_iterator<string::iterator> siter2;
-    BOOST_CHECK(equals(*siter, "xx"));
-    ++siter;
-
-    siter2 = siter;
-    BOOST_CHECK(equals(*siter,  "abc"));
-    BOOST_CHECK(equals(*siter2, "abc"));
-    
-    ++siter;
-    BOOST_CHECK(equals(*siter,  "xx"));
-    BOOST_CHECK(equals(*siter2, "abc"));
-    
-    ++siter;
-    BOOST_CHECK(equals(*siter, "abb"));
-    ++siter;
-    BOOST_CHECK(siter==split_iterator<string::iterator>(siter));
-    BOOST_CHECK(siter==split_iterator<string::iterator>());
-
-//  Make sure we work with forward iterators
-//  See bug #7989
-    list<char> l1;
-    find_iterator<list<char>::iterator> liter=make_find_iterator(l1, first_finder("xx"));
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    iterator_test();
-}
diff --git a/third_party/boostorg/algorithm/string/test/trim_test.cpp b/third_party/boostorg/algorithm/string/test/trim_test.cpp
deleted file mode 100644
index b254caa..0000000
--- a/third_party/boostorg/algorithm/string/test/trim_test.cpp
+++ /dev/null
@@ -1,202 +0,0 @@
-//  Boost string_algo library trim_test.cpp file  ---------------------------//
-
-//  Copyright Pavol Droba 2002-2003. Use, modification and
-//  distribution is subject to the Boost Software License, Version
-//  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-
-//  See http://www.boost.org for updates, documentation, and revision history.
-
-#include <boost/algorithm/string/trim.hpp>
-#include <boost/algorithm/string/trim_all.hpp>
-
-// Include unit test framework
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <iostream>
-#include <boost/test/test_tools.hpp>
-
-using namespace std;
-using namespace boost;
-
-void trim_test()
-{
-    string str1("     1x x x x1     ");
-    string str2("     2x x x x2     ");
-    string str3("    ");
-
-    // *** value passing tests *** //
-
-    // general string test
-    BOOST_CHECK( trim_left_copy( str1 )=="1x x x x1     " ) ;
-    BOOST_CHECK( trim_right_copy( str1 )=="     1x x x x1" ) ;
-    BOOST_CHECK( trim_copy( str1 )=="1x x x x1" ) ;
-
-    // spaces-only string test
-    BOOST_CHECK( trim_left_copy( str3 )=="" );
-    BOOST_CHECK( trim_right_copy( str3 )=="" );
-    BOOST_CHECK( trim_copy( str3 )=="" );
-
-    // empty string check 
-    BOOST_CHECK( trim_left_copy( string("") )=="" );
-    BOOST_CHECK( trim_right_copy( string("") )=="" );
-    BOOST_CHECK( trim_copy( string("") )=="" );
-
-    // iterator tests
-    string str;
-    trim_left_copy_if( std::back_inserter(str), str1, is_space() );
-    BOOST_CHECK( str=="1x x x x1     " );
-
-    str.clear();
-    trim_right_copy_if( std::back_inserter(str), str1, is_space() );
-    BOOST_CHECK( str=="     1x x x x1" );
-
-    str.clear();
-    trim_copy_if( std::back_inserter(str), str1, is_space() );
-    BOOST_CHECK( str=="1x x x x1" );
-
-    str.clear();
-    trim_left_copy_if( 
-        std::back_inserter(str), 
-        "     1x x x x1     ", 
-        is_space() );
-    BOOST_CHECK( str=="1x x x x1     " );
-
-    str.clear();
-    trim_right_copy_if( 
-        std::back_inserter(str), 
-        "     1x x x x1     ", 
-        is_space() );
-    BOOST_CHECK( str=="     1x x x x1" );
-
-    str.clear();
-    trim_copy_if( 
-        std::back_inserter(str), 
-        "     1x x x x1     ", 
-        is_space() );
-    BOOST_CHECK( str=="1x x x x1" );
-    // *** inplace tests *** //
-
-    // general string test
-    trim_left( str1 );
-    BOOST_CHECK( str1=="1x x x x1     " );
-    trim_right( str1 );
-    BOOST_CHECK( str1=="1x x x x1" );
-    trim( str2 );
-    BOOST_CHECK( str2=="2x x x x2" );
-    
-    // spaces-only string test
-    str3 = "    "; trim_left( str3 );
-    BOOST_CHECK( str3=="" );
-    str3 = "    "; trim_right( str3 );
-    BOOST_CHECK( str3=="" );
-    str3 = "    "; trim( str3 );
-    BOOST_CHECK( str3=="" );
-
-    // empty string check 
-    str3 = ""; trim_left( str3 );
-    BOOST_CHECK( str3=="" );
-    str3 = ""; trim_right( str3 );
-    BOOST_CHECK( str3=="" );
-    str3 = ""; trim( str3 );
-    BOOST_CHECK( str3=="" );
-
-    // *** non-standard predicate tests *** //
-    BOOST_CHECK( 
-        trim_copy_if( 
-            string("123abc456"), 
-            is_classified(std::ctype_base::digit) )=="abc" );
-    BOOST_CHECK( trim_copy_if( string("<>abc<>"), is_any_of( "<<>>" ) )=="abc" );
-}
-
-void trim_all_test()
-{
-    string str1("     1x   x   x   x1     ");
-    string str2("+---...2x+--x--+x-+-x2...---+");
-    string str3("    ");
-
-    // *** value passing tests *** //
-
-    // general string test
-    BOOST_CHECK( trim_all_copy( str1 )=="1x x x x1" ) ;
-    BOOST_CHECK( trim_all_copy_if( str2, is_punct() )=="2x+x-x-x2" ) ;
-
-    // spaces-only string test
-    BOOST_CHECK( trim_all_copy( str3 )=="" );
-
-    // empty string check 
-    BOOST_CHECK( trim_all_copy( string("") )=="" );
-
-    // general string test
-    trim_all( str1 );
-    BOOST_CHECK( str1=="1x x x x1" ) ;
-    trim_all_if( str2, is_punct() );
-    BOOST_CHECK( str2=="2x+x-x-x2" ) ;
-    
-    // spaces-only string test
-    str3 = "    "; trim_all( str3 );
-    BOOST_CHECK( str3=="" );
-
-    // empty string check 
-    str3 = ""; trim_all( str3 );
-    BOOST_CHECK( str3=="" );
-    BOOST_CHECK( str3=="" );
-
-    // *** non-standard predicate tests *** //
-    BOOST_CHECK( 
-        trim_all_copy_if( 
-            string("123abc127deb456"), 
-            is_classified(std::ctype_base::digit) )=="abc1deb" );
-    BOOST_CHECK( trim_all_copy_if( string("<>abc<>def<>"), is_any_of( "<<>>" ) )=="abc<def" );
-}
-
-void trim_fill_test()
-{
-    string str1("     1x   x   x   x1     ");
-    string str2("+---...2x+--x--+x-+-x2...---+");
-    string str3("    ");
-
-    // *** value passing tests *** //
-
-    // general string test
-    BOOST_CHECK( trim_fill_copy( str1, "-" )=="1x-x-x-x1" ) ;
-    BOOST_CHECK( trim_fill_copy_if( str2, " ", is_punct() )=="2x x x x2" ) ;
-
-    // spaces-only string test
-    BOOST_CHECK( trim_fill_copy( str3, " " )=="" );
-
-    // empty string check 
-    BOOST_CHECK( trim_fill_copy( string(""), " " )=="" );
-
-    // general string test
-    trim_fill( str1, "-" );
-    BOOST_CHECK( str1=="1x-x-x-x1" ) ;
-    trim_fill_if( str2, "", is_punct() );
-    BOOST_CHECK( str2=="2xxxx2" ) ;
-
-    // spaces-only string test
-    str3 = "    "; trim_fill( str3, "" );
-    BOOST_CHECK( str3=="" );
-
-    // empty string check 
-    str3 = ""; trim_fill( str3, "" );
-    BOOST_CHECK( str3=="" );
-    BOOST_CHECK( str3=="" );
-
-    // *** non-standard predicate tests *** //
-    BOOST_CHECK( 
-        trim_fill_copy_if( 
-        string("123abc127deb456"), 
-        "+",
-        is_classified(std::ctype_base::digit) )=="abc+deb" );
-    BOOST_CHECK( trim_fill_copy_if( string("<>abc<>def<>"), "-", is_any_of( "<<>>" ) )=="abc-def" );
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    trim_test();
-    trim_all_test();
-    trim_fill_test();
-}
diff --git a/third_party/boostorg/algorithm/sublibs b/third_party/boostorg/algorithm/sublibs
deleted file mode 100644
index 721d7c4..0000000
--- a/third_party/boostorg/algorithm/sublibs
+++ /dev/null
@@ -1 +0,0 @@
-The existance of this file tells the regression reporting programs that the directory contains sub-directories which are libraries.
\ No newline at end of file
diff --git a/third_party/boostorg/algorithm/test/Jamfile.v2 b/third_party/boostorg/algorithm/test/Jamfile.v2
deleted file mode 100644
index 30cb786..0000000
--- a/third_party/boostorg/algorithm/test/Jamfile.v2
+++ /dev/null
@@ -1,94 +0,0 @@
-#  Boost algorithm library test suite Jamfile  ----------------------------
-#
-#  Copyright Marshall Clow 2010-2012. Use, modification and
-#  distribution is subject to the Boost Software License, Version
-#  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-#  http://www.boost.org/LICENSE_1_0.txt)
-#
-#  See http://www.boost.org for updates, documentation, and revision history.
-
-import testing ;
-
-alias unit_test_framework
-    : # sources
-        /boost//unit_test_framework
-    ;
-
-
-{
-  test-suite algorithm:
-# Search tests
-   : [ run empty_search_test.cpp unit_test_framework      : : : : empty_search_test ]
-     [ run search_test1.cpp unit_test_framework           : : : : search_test1 ]
-     [ run search_test2.cpp unit_test_framework           : : : : search_test2 ]
-     [ run search_test3.cpp unit_test_framework           : : : : search_test3 ]
-     [ run search_test4.cpp unit_test_framework           : : : : search_test4 ]
-     [ compile-fail search_fail1.cpp  : : : : ]
-     [ compile-fail search_fail2.cpp  : : : : ]
-     [ compile-fail search_fail3.cpp  : : : : ]
-
-# Misc tests
-     [ run clamp_test.cpp unit_test_framework         : : : : clamp_test ]
-     [ run power_test.cpp unit_test_framework         : : : : power_test ]
-     [ compile-fail power_fail1.cpp  : : : : ]
-
-# Cxx11 tests
-     [ run all_of_test.cpp unit_test_framework         : : : : all_of_test ]
-     [ run any_of_test.cpp unit_test_framework         : : : : any_of_test ]
-     [ run none_of_test.cpp unit_test_framework        : : : : none_of_test ]
-     [ run one_of_test.cpp unit_test_framework         : : : : one_of_test ]
-
-     [ run ordered_test.cpp unit_test_framework        : : : : ordered_test ]
-     [ run find_if_not_test1.cpp unit_test_framework   : : : : find_if_not_test1 ]
-     [ run copy_if_test1.cpp unit_test_framework        : : : : copy_if_test1 ]
-     [ run copy_n_test1.cpp unit_test_framework       : : : : copy_n_test1 ]
-     [ run iota_test1.cpp unit_test_framework          : : : : iota_test1 ]
-
-     [ run is_permutation_test1.cpp unit_test_framework         : : : : is_permutation_test1 ]
-     [ run partition_point_test1.cpp unit_test_framework        : : : : partition_point_test1 ]
-     [ run is_partitioned_test1.cpp unit_test_framework         : : : : is_partitioned_test1 ]
-     [ run partition_copy_test1.cpp unit_test_framework         : : : : partition_copy_test1 ]
-
-# Cxx14 tests
-     [ run equal_test.cpp unit_test_framework         : : : : equal_test ]
-     [ run mismatch_test.cpp unit_test_framework      : : : : mismatch_test ]
-
-# Cxx17 tests
-     [ run for_each_n_test.cpp unit_test_framework         : : : : for_each_n_test ]
-     [ run reduce_test.cpp unit_test_framework             : : : : reduce_test ]
-     [ run transform_reduce_test.cpp unit_test_framework   : : : : transform_reduce_test ]
-     [ run inclusive_scan_test.cpp unit_test_framework             : : : : inclusive_scan_test ]
-     [ run exclusive_scan_test.cpp unit_test_framework             : : : : exclusive_scan_test ]
-     [ run transform_inclusive_scan_test.cpp unit_test_framework   : : : : transform_inclusive_scan_test ]
-     [ run transform_exclusive_scan_test.cpp unit_test_framework   : : : : transform_exclusive_scan_test ]
-# Maybe GCD and LCM as well
-
-# Hex tests
-     [ run hex_test1.cpp unit_test_framework         : : : : hex_test1 ]
-     [ run hex_test2.cpp unit_test_framework         : : : : hex_test2 ]
-     [ run hex_test3.cpp unit_test_framework         : : : : hex_test3 ]
-     [ run hex_test4.cpp unit_test_framework         : : : : hex_test4 ]
-     [ compile-fail hex_fail1.cpp ]
-
-# Gather tests
-     [ run gather_test1.cpp unit_test_framework        : : : : gather_test1 ]
-     [ compile-fail gather_fail1.cpp ]
-
-# SortSubrange tests
-     [ run sort_subrange_test.cpp unit_test_framework       : : : : sort_subrange_test ]
-     [ run partition_subrange_test.cpp unit_test_framework  : : : : partition_subrange_test ]
-
-# Is_palindrome tests
-     [ run is_palindrome_test.cpp unit_test_framework    : : : : is_palindrome_test ]
-
-# Is_partitioned_until tests
-     [ run is_partitioned_until_test.cpp unit_test_framework    : : : : is_partitioned_until_test ]
-   
-# Apply_permutation tests
-     [ run apply_permutation_test.cpp unit_test_framework    : : : : apply_permutation_test ]
-# Find tests
-     [ run find_not_test.cpp unit_test_framework   : : : : find_not_test ]
-     [ run find_backward_test.cpp unit_test_framework   : : : : find_backward_test ]
-   ;
-}
-
diff --git a/third_party/boostorg/algorithm/test/all_of_test.cpp b/third_party/boostorg/algorithm/test/all_of_test.cpp
deleted file mode 100644
index a6fdfd6..0000000
--- a/third_party/boostorg/algorithm/test/all_of_test.cpp
+++ /dev/null
@@ -1,94 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/all_of.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <functional>
-#include <vector>
-#include <list>
-
-template<typename T>
-struct is_ {
-    BOOST_CXX14_CONSTEXPR is_ ( T v ) : val_ ( v ) {}
-    BOOST_CXX14_CONSTEXPR bool operator () ( T comp ) const { return val_ == comp; }
-private:
-    is_ (); // need a value
-
-    T val_;
-    };
-
-namespace ba = boost::algorithm;
-
-void test_all ()
-{
-//  Note: The literal values here are tested against directly, careful if you change them:
-    BOOST_CXX14_CONSTEXPR int some_numbers[] = { 1, 1, 1, 18, 10 };
-    std::vector<int> vi(some_numbers, some_numbers + 5);
-    std::list<int>   li(vi.begin(), vi.end ());
-    
-    int some_letters[] = { 'a', 'q', 'n', 'y', 'n' };
-    std::vector<char> vc(some_letters, some_letters + 5);
-    
-    
-    BOOST_CHECK (!ba::all_of_equal ( vi,                                  1 ));
-    BOOST_CHECK (!ba::all_of       ( vi,                       is_<int> ( 1 )));
-    BOOST_CHECK (!ba::all_of_equal ( vi.begin(),     vi.end(),            1 ));
-    BOOST_CHECK (!ba::all_of       ( vi.begin(),     vi.end(), is_<int> ( 1 )));
-    
-    BOOST_CHECK (!ba::all_of_equal ( vi,                                  0 ));
-    BOOST_CHECK (!ba::all_of       ( vi,                       is_<int> ( 0 )));
-    BOOST_CHECK (!ba::all_of_equal ( vi.begin(),     vi.end(),            0 ));
-    BOOST_CHECK (!ba::all_of       ( vi.begin(),     vi.end(), is_<int> ( 0 )));
-
-    BOOST_CHECK ( ba::all_of_equal ( vi.end(),       vi.end(),            0 ));
-    BOOST_CHECK ( ba::all_of       ( vi.end(),       vi.end(), is_<int> ( 0 )));
-
-    BOOST_CHECK ( ba::all_of_equal ( vi.begin(), vi.begin () + 3,            1 ));
-    BOOST_CHECK ( ba::all_of       ( vi.begin(), vi.begin () + 3, is_<int> ( 1 )));
-    
-    BOOST_CHECK ( ba::all_of_equal ( vc.begin() + 1, vc.begin() + 2,             'q' ));
-    BOOST_CHECK ( ba::all_of       ( vc.begin() + 1, vc.begin() + 2, is_<char> ( 'q' )));
-
-    BOOST_CHECK (!ba::all_of_equal ( vc,             '!' ));
-    BOOST_CHECK (!ba::all_of       ( vc, is_<char> ( '!' )));
-
-    BOOST_CHECK ( ba::all_of_equal ( vi.begin(), vi.begin(),   1 ));
-    BOOST_CHECK ( ba::all_of_equal ( vc.begin(), vc.begin(), 'a' ));
-    BOOST_CHECK ( ba::all_of       ( vi.begin(), vi.begin(), is_<int>  (   1 )));
-    BOOST_CHECK ( ba::all_of       ( vc.begin(), vc.begin(), is_<char> ( 'a' )));
-
-    BOOST_CHECK (!ba::all_of_equal ( li,                                  1 ));
-    BOOST_CHECK (!ba::all_of       ( li,                       is_<int> ( 1 )));
-    BOOST_CHECK (!ba::all_of_equal ( li.begin(),     li.end(),            1 ));
-    BOOST_CHECK (!ba::all_of       ( li.begin(),     li.end(), is_<int> ( 1 )));
-    
-    std::list<int>::iterator l_iter = li.begin ();
-    l_iter++; l_iter++; l_iter++;
-    BOOST_CHECK ( ba::all_of_equal ( li.begin(), l_iter,            1 ));
-    BOOST_CHECK ( ba::all_of       ( li.begin(), l_iter, is_<int> ( 1 )));
-
-    BOOST_CXX14_CONSTEXPR bool constexpr_res =
-        !ba::all_of_equal ( some_numbers, 1 )                               &&
-        !ba::all_of       ( some_numbers, is_<int> ( 1 ))                   &&
-         ba::all_of_equal ( some_numbers, some_numbers + 3,            1 )  &&
-         ba::all_of       ( some_numbers, some_numbers + 3, is_<int> ( 1 )) &&
-        true;
-
-    BOOST_CHECK ( constexpr_res );
-}
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_all ();
-}
diff --git a/third_party/boostorg/algorithm/test/any_of_test.cpp b/third_party/boostorg/algorithm/test/any_of_test.cpp
deleted file mode 100644
index 288c0cc..0000000
--- a/third_party/boostorg/algorithm/test/any_of_test.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/any_of.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <functional>
-#include <vector>
-#include <list>
-
-template<typename T>
-struct is_ {
-    BOOST_CXX14_CONSTEXPR is_ ( T v ) : val_ ( v ) {}
-    BOOST_CXX14_CONSTEXPR bool operator () ( T comp ) const { return val_ == comp; }
-private:
-    is_ (); // need a value
-
-    T val_;
-    };
-
-namespace ba = boost::algorithm;
-
-void test_any ()
-{
-//  Note: The literal values here are tested against directly, careful if you change them:
-    BOOST_CXX14_CONSTEXPR int some_numbers[] = { 1, 5, 0, 18, 10 };
-    std::vector<int> vi(some_numbers, some_numbers + 5);
-    std::list<int>   li(vi.begin(), vi.end ());
-
-    int some_letters[] = { 'a', 'q', 'n', 'y', 'n' };
-    std::vector<char> vc(some_letters, some_letters + 5);
-    
-    BOOST_CHECK ( ba::any_of_equal ( vi,                                   1 ));
-    BOOST_CHECK ( ba::any_of       ( vi,                       is_<int> (  1 )));
-    BOOST_CHECK ( ba::any_of_equal ( vi.begin(),     vi.end(),             1 ));
-    BOOST_CHECK ( ba::any_of       ( vi.begin(),     vi.end(), is_<int> (  1 )));
-
-    BOOST_CHECK (!ba::any_of_equal ( vi,                                   9 ));
-    BOOST_CHECK (!ba::any_of       ( vi,                       is_<int> (  9 )));
-    BOOST_CHECK (!ba::any_of_equal ( vi.begin(),     vi.end(),             9 ));
-    BOOST_CHECK (!ba::any_of       ( vi.begin(),     vi.end(), is_<int> (  9 )));
-
-    BOOST_CHECK ( ba::any_of_equal ( vi,                                  10 ));
-    BOOST_CHECK ( ba::any_of       ( vi,                       is_<int> ( 10 )));
-    BOOST_CHECK (!ba::any_of_equal ( vi,                                   4 ));
-    BOOST_CHECK (!ba::any_of       ( vi,                       is_<int> (  4 )));
-
-    BOOST_CHECK (!ba::any_of_equal ( vi.end(),       vi.end(),            0 ));
-    BOOST_CHECK (!ba::any_of       ( vi.end(),       vi.end(), is_<int> ( 0 )));
-
-//   5 is not in { 0, 18, 10 }, but 10 is
-    BOOST_CHECK ( ba::any_of_equal ( vi.begin() + 2, vi.end(),            10 ));
-    BOOST_CHECK ( ba::any_of       ( vi.begin() + 2, vi.end(), is_<int> ( 10 )));
-
-    BOOST_CHECK (!ba::any_of_equal ( vi.begin() + 2, vi.end(),             5 ));
-    BOOST_CHECK (!ba::any_of       ( vi.begin() + 2, vi.end(), is_<int> (  5 )));
-
-//  18 is not in { 1, 5, 0 }, but 5 is
-    BOOST_CHECK ( ba::any_of_equal ( vi.begin(), vi.begin() + 3,             5 ));
-    BOOST_CHECK ( ba::any_of       ( vi.begin(), vi.begin() + 3, is_<int> (  5 )));
-
-    BOOST_CHECK (!ba::any_of_equal ( vi.begin(), vi.begin() + 3,            18 ));
-    BOOST_CHECK (!ba::any_of       ( vi.begin(), vi.begin() + 3, is_<int> ( 18 )));
-
-    BOOST_CHECK ( ba::any_of_equal ( vc,             'q' ));
-    BOOST_CHECK ( ba::any_of       ( vc, is_<char> ( 'q' )));
-
-    BOOST_CHECK (!ba::any_of_equal ( vc,             '!' ));
-    BOOST_CHECK (!ba::any_of       ( vc, is_<char> ( '!' )));
-
-    BOOST_CHECK ( ba::any_of_equal ( vc,             'n' ));
-    BOOST_CHECK ( ba::any_of       ( vc, is_<char> ( 'n' )));
-
-    BOOST_CHECK (!ba::any_of_equal ( vi.begin(), vi.begin(),   1 ));
-    BOOST_CHECK (!ba::any_of_equal ( vc.begin(), vc.begin(), 'a' ));
-    BOOST_CHECK (!ba::any_of       ( vi.begin(), vi.begin(), is_<int>  (   1 )));
-    BOOST_CHECK (!ba::any_of       ( vc.begin(), vc.begin(), is_<char> ( 'a' )));
-
-    BOOST_CHECK ( ba::any_of_equal ( li,                                   1 ));
-    BOOST_CHECK ( ba::any_of       ( li,                       is_<int> (  1 )));
-    BOOST_CHECK ( ba::any_of_equal ( li.begin(),     li.end(),             1 ));
-    BOOST_CHECK ( ba::any_of       ( li.begin(),     li.end(), is_<int> (  1 )));
-    
-    std::list<int>::iterator l_iter = li.begin ();
-    l_iter++; l_iter++; l_iter++;
-    BOOST_CHECK ( ba::any_of_equal ( li.begin(), l_iter,             5 ));
-    BOOST_CHECK ( ba::any_of       ( li.begin(), l_iter, is_<int> (  5 )));
-    BOOST_CHECK (!ba::any_of_equal ( li.begin(), l_iter,            18 ));
-    BOOST_CHECK (!ba::any_of       ( li.begin(), l_iter, is_<int> ( 18 )));
-    
-    BOOST_CXX14_CONSTEXPR bool constexpr_res =
-         ba::any_of_equal ( some_numbers, 1 )                                 &&
-         ba::any_of       ( some_numbers, is_<int> ( 1 ))                     &&
-        !ba::any_of_equal ( some_numbers, some_numbers + 3,            777 )  &&
-        !ba::any_of       ( some_numbers, some_numbers + 3, is_<int> ( 777 )) &&
-        true;
-
-    BOOST_CHECK ( constexpr_res );
-}
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_any ();
-}
diff --git a/third_party/boostorg/algorithm/test/apply_permutation_test.cpp b/third_party/boostorg/algorithm/test/apply_permutation_test.cpp
deleted file mode 100644
index e9ab970..0000000
--- a/third_party/boostorg/algorithm/test/apply_permutation_test.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
-  Copyright (c) Alexander Zaitsev <zamazan4ik@gmail.com>, 2017
-
-  Distributed under the Boost Software License, Version 1.0. (See
-  accompanying file LICENSE_1_0.txt or copy at
-  http://www.boost.org/LICENSE_1_0.txt)
-
-  See http://www.boost.org/ for latest version.
-*/
-
-#include <vector>
-
-#include <boost/algorithm/apply_permutation.hpp>
-
-#define BOOST_TEST_DYN_LINK
-#define BOOST_TEST_MAIN
-
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-
-void test_apply_permutation()
-{
-    //Empty
-    {
-        std::vector<int> vec, order, result;
-        
-        ba::apply_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //1 element
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1);
-        order.push_back(0);
-        result = vec;
-        
-        ba::apply_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //2 elements, no changes
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1); vec.push_back(2);
-        order.push_back(0); order.push_back(1);
-        result = vec;
-        
-        ba::apply_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //2 elements, changed
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1); vec.push_back(2);
-        order.push_back(1); order.push_back(0);
-        result.push_back(2); result.push_back(1);
-        
-        ba::apply_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //Multiple elements, no changes
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1); vec.push_back(2); vec.push_back(3); vec.push_back(4); vec.push_back(5);
-        order.push_back(0); order.push_back(1); order.push_back(2); order.push_back(3); order.push_back(4);
-        result = vec;
-        
-        ba::apply_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //Multiple elements, changed
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1); vec.push_back(2); vec.push_back(3); vec.push_back(4); vec.push_back(5);
-        order.push_back(4); order.push_back(3); order.push_back(2); order.push_back(1); order.push_back(0);
-        result.push_back(5); result.push_back(4); result.push_back(3); result.push_back(2); result.push_back(1);
-        
-        ba::apply_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //Just test range interface
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1); vec.push_back(2); vec.push_back(3); vec.push_back(4); vec.push_back(5);
-        order.push_back(0); order.push_back(1); order.push_back(2); order.push_back(3); order.push_back(4);
-        result = vec;
-        
-        ba::apply_permutation(vec, order);
-        BOOST_CHECK(vec == result);
-    }
-}
-
-void test_apply_reverse_permutation()
-{
-    //Empty
-    {
-        std::vector<int> vec, order, result;
-        
-        ba::apply_reverse_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //1 element
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1);
-        order.push_back(0);
-        result = vec;
-        
-        ba::apply_reverse_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //2 elements, no changes
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1); vec.push_back(2);
-        order.push_back(0); order.push_back(1);
-        result = vec;
-        
-        ba::apply_reverse_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //2 elements, changed
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1); vec.push_back(2);
-        order.push_back(1); order.push_back(0);
-        result.push_back(2); result.push_back(1);
-        
-        ba::apply_reverse_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //Multiple elements, no changes
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1); vec.push_back(2); vec.push_back(3); vec.push_back(4); vec.push_back(5);
-        order.push_back(0); order.push_back(1); order.push_back(2); order.push_back(3); order.push_back(4);
-        result = vec;
-        
-        ba::apply_reverse_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //Multiple elements, changed
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1); vec.push_back(2); vec.push_back(3); vec.push_back(4); vec.push_back(5);
-        order.push_back(4); order.push_back(3); order.push_back(2); order.push_back(1); order.push_back(0);
-        result.push_back(5); result.push_back(4); result.push_back(3); result.push_back(2); result.push_back(1);
-        
-        ba::apply_reverse_permutation(vec.begin(), vec.end(), order.begin(), order.end());
-        BOOST_CHECK(vec == result);
-    }
-    //Just test range interface
-    {
-        std::vector<int> vec, order, result;
-        vec.push_back(1); vec.push_back(2); vec.push_back(3); vec.push_back(4); vec.push_back(5);
-        order.push_back(0); order.push_back(1); order.push_back(2); order.push_back(3); order.push_back(4);
-        result = vec;
-        
-        ba::apply_reverse_permutation(vec, order);
-        BOOST_CHECK(vec == result);
-    }
-}
-
-BOOST_AUTO_TEST_CASE(test_main)
-{
-    test_apply_permutation();
-    test_apply_reverse_permutation();
-}
diff --git a/third_party/boostorg/algorithm/test/clamp_test.cpp b/third_party/boostorg/algorithm/test/clamp_test.cpp
deleted file mode 100644
index a6e73b5..0000000
--- a/third_party/boostorg/algorithm/test/clamp_test.cpp
+++ /dev/null
@@ -1,328 +0,0 @@
-//  (C) Copyright Jesse Williamson 2009
-//  Use, modification and distribution are subject to the
-//  Boost Software License, Version 1.0. (See accompanying file
-//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-#include <iostream>
-#include <vector>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/clamp.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-BOOST_CONSTEXPR bool intGreater    ( int lhs, int rhs )       { return lhs > rhs; }
-BOOST_CONSTEXPR bool doubleGreater ( double lhs, double rhs ) { return lhs > rhs; }
-
-class custom {
-public:
-    custom ( int x )             : v(x)     {}
-    custom ( const custom &rhs ) : v(rhs.v) {}
-    ~custom () {}
-    custom & operator = ( const custom &rhs ) { v = rhs.v; return *this; }
-    
-    bool operator <  ( const custom &rhs ) const { return v < rhs.v; }
-    bool operator == ( const custom &rhs ) const { return v == rhs.v; }     // need this for the test
-    
-    std::ostream & print ( std::ostream &os ) const { return os << v; }
-    
-    int v;
-    };
-
-std::ostream & operator << ( std::ostream & os, const custom &x ) { return x.print ( os ); }
-
-bool customLess ( const custom &lhs, const custom &rhs ) { return lhs.v < rhs.v; }
-
-void test_ints()
-{
-
-//  Inside the range, equal to the endpoints, and outside the endpoints.
-    BOOST_CHECK_EQUAL (  3, ba::clamp (  3, 1, 10 ));
-    BOOST_CHECK_EQUAL (  1, ba::clamp (  1, 1, 10 ));
-    BOOST_CHECK_EQUAL (  1, ba::clamp (  0, 1, 10 ));
-    BOOST_CHECK_EQUAL ( 10, ba::clamp ( 10, 1, 10 ));
-    BOOST_CHECK_EQUAL ( 10, ba::clamp ( 11, 1, 10 ));
-    BOOST_CXX14_CONSTEXPR bool constexpr_res = (
-        ba::clamp (  3, 1, 10 ) == 3
-    );
-    BOOST_CHECK( constexpr_res );
-    
-    BOOST_CHECK_EQUAL (  3, ba::clamp (  3, 10, 1, intGreater ));
-    BOOST_CHECK_EQUAL (  1, ba::clamp (  1, 10, 1, intGreater ));
-    BOOST_CHECK_EQUAL (  1, ba::clamp (  0, 10, 1, intGreater ));
-    BOOST_CHECK_EQUAL ( 10, ba::clamp ( 10, 10, 1, intGreater ));
-    BOOST_CHECK_EQUAL ( 10, ba::clamp ( 11, 10, 1, intGreater ));
-
-//  Negative numbers
-    BOOST_CHECK_EQUAL (  -3, ba::clamp (  -3, -10, -1 ));
-    BOOST_CHECK_EQUAL (  -1, ba::clamp (  -1, -10, -1 ));
-    BOOST_CHECK_EQUAL (  -1, ba::clamp (   0, -10, -1 ));
-    BOOST_CHECK_EQUAL ( -10, ba::clamp ( -10, -10, -1 ));
-    BOOST_CHECK_EQUAL ( -10, ba::clamp ( -11, -10, -1 ));
-
-//  Mixed positive and negative numbers
-    BOOST_CHECK_EQUAL (   5, ba::clamp (   5, -10, 10 ));
-    BOOST_CHECK_EQUAL ( -10, ba::clamp ( -10, -10, 10 ));
-    BOOST_CHECK_EQUAL ( -10, ba::clamp ( -15, -10, 10 ));
-    BOOST_CHECK_EQUAL (  10, ba::clamp (  10, -10, 10 ));
-    BOOST_CHECK_EQUAL (  10, ba::clamp (  15, -10, 10 ));
-
-//  Unsigned 
-    BOOST_CHECK_EQUAL (  5U, ba::clamp (  5U, 1U, 10U ));
-    BOOST_CHECK_EQUAL (  1U, ba::clamp (  1U, 1U, 10U ));
-    BOOST_CHECK_EQUAL (  1U, ba::clamp (  0U, 1U, 10U ));
-    BOOST_CHECK_EQUAL ( 10U, ba::clamp ( 10U, 1U, 10U ));
-    BOOST_CHECK_EQUAL ( 10U, ba::clamp ( 15U, 1U, 10U ));
-    
-//  Mixed (1)
-    BOOST_CHECK_EQUAL (  5U, ba::clamp (  5U, 1,  10 ));
-    BOOST_CHECK_EQUAL (  1U, ba::clamp (  1U, 1,  10 ));
-    BOOST_CHECK_EQUAL (  1U, ba::clamp (  0U, 1,  10 ));
-    BOOST_CHECK_EQUAL ( 10U, ba::clamp ( 10U, 1,  10 ));
-    BOOST_CHECK_EQUAL ( 10U, ba::clamp ( 15U, 1,  10 ));
-    
-//  Mixed (3)
-    BOOST_CHECK_EQUAL (  5U, ba::clamp (  5U, 1,  10. ));
-    BOOST_CHECK_EQUAL (  1U, ba::clamp (  1U, 1,  10. ));
-    BOOST_CHECK_EQUAL (  1U, ba::clamp (  0U, 1,  10. ));
-    BOOST_CHECK_EQUAL ( 10U, ba::clamp ( 10U, 1,  10. ));
-    BOOST_CHECK_EQUAL ( 10U, ba::clamp ( 15U, 1,  10. ));
-    
-    short foo = 50;
-    BOOST_CHECK_EQUAL ( 56,     ba::clamp ( foo, 56.9, 129 ));
-    BOOST_CHECK_EQUAL ( 24910,  ba::clamp ( foo, 12345678, 123456999 ));
-    }
-
-
-void test_floats()
-{
-
-//  Inside the range, equal to the endpoints, and outside the endpoints.
-    BOOST_CHECK_EQUAL (  3.0, ba::clamp (  3.0, 1.0, 10.0 ));
-    BOOST_CHECK_EQUAL (  1.0, ba::clamp (  1.0, 1.0, 10.0 ));
-    BOOST_CHECK_EQUAL (  1.0, ba::clamp (  0.0, 1.0, 10.0 ));
-    BOOST_CHECK_EQUAL ( 10.0, ba::clamp ( 10.0, 1.0, 10.0 ));
-    BOOST_CHECK_EQUAL ( 10.0, ba::clamp ( 11.0, 1.0, 10.0 ));
-    
-    BOOST_CHECK_EQUAL (  3.0, ba::clamp (  3.0, 10.0, 1.0, doubleGreater ));
-    BOOST_CHECK_EQUAL (  1.0, ba::clamp (  1.0, 10.0, 1.0, doubleGreater ));
-    BOOST_CHECK_EQUAL (  1.0, ba::clamp (  0.0, 10.0, 1.0, doubleGreater ));
-    BOOST_CHECK_EQUAL ( 10.0, ba::clamp ( 10.0, 10.0, 1.0, doubleGreater ));
-    BOOST_CHECK_EQUAL ( 10.0, ba::clamp ( 11.0, 10.0, 1.0, doubleGreater ));
-
-//  Negative numbers
-    BOOST_CHECK_EQUAL (  -3.f, ba::clamp (  -3.f, -10.f, -1.f ));
-    BOOST_CHECK_EQUAL (  -1.f, ba::clamp (  -1.f, -10.f, -1.f ));
-    BOOST_CHECK_EQUAL (  -1.f, ba::clamp (   0.f, -10.f, -1.f ));
-    BOOST_CHECK_EQUAL ( -10.f, ba::clamp ( -10.f, -10.f, -1.f ));
-    BOOST_CHECK_EQUAL ( -10.f, ba::clamp ( -11.f, -10.f, -1.f ));
-
-//  Mixed positive and negative numbers
-    BOOST_CHECK_EQUAL (   5.f, ba::clamp (   5.f, -10.f, 10.f ));
-    BOOST_CHECK_EQUAL ( -10.f, ba::clamp ( -10.f, -10.f, 10.f ));
-    BOOST_CHECK_EQUAL ( -10.f, ba::clamp ( -15.f, -10.f, 10.f ));
-    BOOST_CHECK_EQUAL (  10.f, ba::clamp (  10.f, -10.f, 10.f ));
-    BOOST_CHECK_EQUAL (  10.f, ba::clamp (  15.f, -10.f, 10.f ));
-
-//  Mixed (1)
-    BOOST_CHECK_EQUAL (   5.f, ba::clamp (   5.f, -10., 10. ));
-    BOOST_CHECK_EQUAL ( -10.f, ba::clamp ( -10.f, -10., 10. ));
-    BOOST_CHECK_EQUAL ( -10.f, ba::clamp ( -15.f, -10., 10. ));
-    BOOST_CHECK_EQUAL (  10.f, ba::clamp (  10.f, -10., 10. ));
-    BOOST_CHECK_EQUAL (  10.f, ba::clamp (  15.f, -10., 10. ));
-
-//  Mixed (2)
-    BOOST_CHECK_EQUAL (   5.f, ba::clamp (   5.f, -10, 10 ));
-    BOOST_CHECK_EQUAL ( -10.f, ba::clamp ( -10.f, -10, 10 ));
-    BOOST_CHECK_EQUAL ( -10.f, ba::clamp ( -15.f, -10, 10 ));
-    BOOST_CHECK_EQUAL (  10.f, ba::clamp (  10.f, -10, 10 ));
-    BOOST_CHECK_EQUAL (  10.f, ba::clamp (  15.f, -10, 10 ));
-}
-
-void test_custom()
-{
-
-//  Inside the range, equal to the endpoints, and outside the endpoints.
-    BOOST_CHECK_EQUAL ( custom( 3), ba::clamp ( custom( 3), custom(1), custom(10)));
-    BOOST_CHECK_EQUAL ( custom( 1), ba::clamp ( custom( 1), custom(1), custom(10)));
-    BOOST_CHECK_EQUAL ( custom( 1), ba::clamp ( custom( 0), custom(1), custom(10)));
-    BOOST_CHECK_EQUAL ( custom(10), ba::clamp ( custom(10), custom(1), custom(10)));
-    BOOST_CHECK_EQUAL ( custom(10), ba::clamp ( custom(11), custom(1), custom(10)));
-
-    BOOST_CHECK_EQUAL ( custom( 3), ba::clamp ( custom( 3), custom(1), custom(10), customLess ));
-    BOOST_CHECK_EQUAL ( custom( 1), ba::clamp ( custom( 1), custom(1), custom(10), customLess ));
-    BOOST_CHECK_EQUAL ( custom( 1), ba::clamp ( custom( 0), custom(1), custom(10), customLess ));
-    BOOST_CHECK_EQUAL ( custom(10), ba::clamp ( custom(10), custom(1), custom(10), customLess ));
-    BOOST_CHECK_EQUAL ( custom(10), ba::clamp ( custom(11), custom(1), custom(10), customLess ));
-
-//  Fail!!
-//  BOOST_CHECK_EQUAL ( custom(1), ba::clamp ( custom(11), custom(1), custom(10)));
-}
-
-#define elementsof(v)   (sizeof (v) / sizeof (v[0]))
-#define a_begin(v)      (&v[0])
-#define a_end(v)        (v + elementsof (v))
-#define a_range(v)      v
-#define b_e(v)          a_begin(v),a_end(v)
-
-void test_int_range ()
-{
-    int inputs []  = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 19, 99, 999, -1, -3, -99, 234234 };
-    int outputs [] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10,  10, -1, -1, -1,  10 };
-    std::vector<int> results;
-    std::vector<int> in_v;
-    
-    std::copy ( a_begin(inputs), a_end(inputs), std::back_inserter ( in_v ));
-    
-    ba::clamp_range ( a_begin(inputs), a_end(inputs), std::back_inserter ( results ), -1, 10 );
-    BOOST_CHECK ( std::equal ( results.begin(), results.end (), outputs ));
-    results.clear ();
-    ba::clamp_range ( in_v.begin (), in_v.end (), std::back_inserter ( results ), -1, 10 );
-    BOOST_CHECK ( std::equal ( results.begin(), results.end (), outputs ));
-    results.clear ();
-
-    ba::clamp_range ( a_begin(inputs), a_end(inputs), std::back_inserter ( results ), 10, -1, intGreater );
-    BOOST_CHECK ( std::equal ( results.begin(), results.end (), outputs ));
-    results.clear ();
-    ba::clamp_range ( in_v.begin (), in_v.end (), std::back_inserter ( results ), 10, -1, intGreater );
-    BOOST_CHECK ( std::equal ( results.begin(), results.end (), outputs ));
-    results.clear ();
-
-    ba::clamp_range ( a_range(inputs), std::back_inserter ( results ), -1, 10 );
-    BOOST_CHECK ( std::equal ( results.begin(), results.end (), outputs ));
-    results.clear ();
-    ba::clamp_range ( in_v, std::back_inserter ( results ), -1, 10 );
-    BOOST_CHECK ( std::equal ( results.begin(), results.end (), outputs ));
-    results.clear ();
-        
-    ba::clamp_range ( a_range(inputs), std::back_inserter ( results ), 10, -1, intGreater );
-    BOOST_CHECK ( std::equal ( results.begin(), results.end (), outputs ));
-    results.clear ();
-    ba::clamp_range ( in_v, std::back_inserter ( results ), 10, -1, intGreater );
-    BOOST_CHECK ( std::equal ( results.begin(), results.end (), outputs ));
-    results.clear ();
-    
-    int junk[elementsof(inputs)];
-    ba::clamp_range ( inputs, junk, 10, -1, intGreater );
-    BOOST_CHECK ( std::equal ( b_e(junk), outputs ));
-}
-
-void test_constexpr()
-{
-
-//  Inside the range, equal to the endpoints, and outside the endpoints.
-    {
-        BOOST_CXX14_CONSTEXPR bool check_inside  = (3  == ba::clamp (  3, 1, 10 ));
-        BOOST_CHECK(check_inside);
-        BOOST_CXX14_CONSTEXPR bool check_min     = (1  == ba::clamp (  1, 1, 10 ));
-        BOOST_CHECK(check_min);
-        BOOST_CXX14_CONSTEXPR bool check_min_out = (1  == ba::clamp (  0, 1, 10 ));
-        BOOST_CHECK(check_min_out);
-        BOOST_CXX14_CONSTEXPR bool check_max     = (10 == ba::clamp ( 10, 1, 10 ));
-        BOOST_CHECK(check_max);
-        BOOST_CXX14_CONSTEXPR bool check_max_out = (10 == ba::clamp ( 11, 1, 10 ));
-        BOOST_CHECK(check_max_out);
-    }
-    {
-        BOOST_CXX14_CONSTEXPR bool check_inside  = (3  == ba::clamp (  3, 10, 1, intGreater ));
-        BOOST_CHECK(check_inside);
-        BOOST_CXX14_CONSTEXPR bool check_min     = (1  == ba::clamp (  1, 10, 1, intGreater ));
-        BOOST_CHECK(check_min);
-        BOOST_CXX14_CONSTEXPR bool check_min_out = (1  == ba::clamp (  0, 10, 1, intGreater ));
-        BOOST_CHECK(check_min_out);
-        BOOST_CXX14_CONSTEXPR bool check_max     = (10 == ba::clamp ( 10, 10, 1, intGreater ));
-        BOOST_CHECK(check_max);
-        BOOST_CXX14_CONSTEXPR bool check_max_out = (10 == ba::clamp ( 11, 10, 1, intGreater ));
-        BOOST_CHECK(check_max_out);
-    }
-
-//  Negative numbers
-    {
-        BOOST_CXX14_CONSTEXPR bool check_inside  = (-3  == ba::clamp  (  -3, -10, -1 ));
-        BOOST_CHECK(check_inside);
-        BOOST_CXX14_CONSTEXPR bool check_max     = (-1  == ba::clamp  (  -1, -10, -1 ));
-        BOOST_CHECK(check_max);
-        BOOST_CXX14_CONSTEXPR bool check_max_out = (-1  == ba::clamp  (   0, -10, -1 ));
-        BOOST_CHECK(check_max_out);
-        BOOST_CXX14_CONSTEXPR bool check_min     = (-10 == ba::clamp ( -10, -10, -1 ));
-        BOOST_CHECK(check_min);
-        BOOST_CXX14_CONSTEXPR bool check_min_out = (-10 == ba::clamp ( -11, -10, -1 ));
-        BOOST_CHECK(check_min_out);
-    }
-
-//  Mixed positive and negative numbers
-    {
-        BOOST_CXX14_CONSTEXPR bool check_inside  = (5   == ba::clamp (   5, -10, 10 ));
-        BOOST_CHECK(check_inside);
-        BOOST_CXX14_CONSTEXPR bool check_min     = (-10 == ba::clamp ( -10, -10, 10 ));
-        BOOST_CHECK(check_min);
-        BOOST_CXX14_CONSTEXPR bool check_min_out = (-10 == ba::clamp ( -15, -10, 10 ));
-        BOOST_CHECK(check_min_out);
-        BOOST_CXX14_CONSTEXPR bool check_max     = (10  == ba::clamp (  10, -10, 10 ));
-        BOOST_CHECK(check_max);
-        BOOST_CXX14_CONSTEXPR bool check_max_out = (10  == ba::clamp (  15, -10, 10 ));
-        BOOST_CHECK(check_max_out);
-    }
-//  Unsigned
-    {
-        BOOST_CXX14_CONSTEXPR bool check_inside  = (5U  == ba::clamp (  5U, 1U, 10U ));
-        BOOST_CHECK(check_inside);
-        BOOST_CXX14_CONSTEXPR bool check_min     = (1U  == ba::clamp (  1U, 1U, 10U ));
-        BOOST_CHECK(check_min);
-        BOOST_CXX14_CONSTEXPR bool check_min_out = (1U  == ba::clamp (  0U, 1U, 10U ));
-        BOOST_CHECK(check_min_out);
-        BOOST_CXX14_CONSTEXPR bool check_max     = (10U == ba::clamp ( 10U, 1U, 10U ));
-        BOOST_CHECK(check_max);
-        BOOST_CXX14_CONSTEXPR bool check_max_out = (10U == ba::clamp ( 15U, 1U, 10U ));
-        BOOST_CHECK(check_max_out);
-    }
-//  Mixed (1)
-    {
-        BOOST_CXX14_CONSTEXPR bool check_inside  = (5U  == ba::clamp (  5U, 1,  10 ));
-        BOOST_CHECK(check_inside);
-        BOOST_CXX14_CONSTEXPR bool check_min     = (1U  == ba::clamp (  1U, 1,  10 ));
-        BOOST_CHECK(check_min);
-        BOOST_CXX14_CONSTEXPR bool check_min_out = (1U  == ba::clamp (  0U, 1,  10 ));
-        BOOST_CHECK(check_min_out);
-        BOOST_CXX14_CONSTEXPR bool check_max     = (10U == ba::clamp ( 10U, 1,  10 ));
-        BOOST_CHECK(check_max);
-        BOOST_CXX14_CONSTEXPR bool check_max_out = (10U == ba::clamp ( 15U, 1,  10 ));
-        BOOST_CHECK(check_max_out);
-    }  
-//  Mixed (3)
-    {
-        BOOST_CXX14_CONSTEXPR bool check_inside  = (5U  == ba::clamp (  5U, 1,  10. ));
-        BOOST_CHECK(check_inside);
-        BOOST_CXX14_CONSTEXPR bool check_min     = (1U  == ba::clamp (  1U, 1,  10. ));
-        BOOST_CHECK(check_min);
-        BOOST_CXX14_CONSTEXPR bool check_min_out = (1U  == ba::clamp (  0U, 1,  10. ));
-        BOOST_CHECK(check_min_out);
-        BOOST_CXX14_CONSTEXPR bool check_max     = (10U == ba::clamp ( 10U, 1,  10. ));
-        BOOST_CHECK(check_max);
-        BOOST_CXX14_CONSTEXPR bool check_max_out = (10U == ba::clamp ( 15U, 1,  10. ));
-        BOOST_CHECK(check_max_out);
-    }
-    {
-        BOOST_CXX14_CONSTEXPR short foo = 50;
-        BOOST_CXX14_CONSTEXPR bool check_float   = ( 56    == ba::clamp ( foo, 56.9, 129 ));
-        BOOST_CHECK(check_float);
-        BOOST_CXX14_CONSTEXPR bool check_over    = ( 24910 == ba::clamp ( foo, 12345678, 123456999 ));
-        BOOST_CHECK(check_over);
-    }
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    test_ints ();
-    test_floats ();
-    test_custom ();
-    
-    test_int_range ();
-
-    test_constexpr ();
-//    test_float_range ();
-//    test_custom_range ();
-}
diff --git a/third_party/boostorg/algorithm/test/copy_if_test1.cpp b/third_party/boostorg/algorithm/test/copy_if_test1.cpp
deleted file mode 100644
index b275f5f..0000000
--- a/third_party/boostorg/algorithm/test/copy_if_test1.cpp
+++ /dev/null
@@ -1,256 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/copy_if.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <algorithm>
-#include <string>
-#include <iostream>
-#include <vector>
-#include <list>
-
-#include <boost/algorithm/cxx11/all_of.hpp>
-#include <boost/algorithm/cxx14/equal.hpp>
-#include <boost/algorithm/cxx11/none_of.hpp>
-
-namespace ba = boost::algorithm;
-// namespace ba = boost;
-
-BOOST_CXX14_CONSTEXPR bool is_true  ( int v ) { return true; }
-BOOST_CXX14_CONSTEXPR bool is_false ( int v ) { return false; }
-BOOST_CXX14_CONSTEXPR bool is_even  ( int v ) { return v % 2 == 0; }
-BOOST_CXX14_CONSTEXPR bool is_odd   ( int v ) { return v % 2 == 1; }
-BOOST_CXX14_CONSTEXPR bool is_zero  ( int v ) { return v == 0; }
-
-
-template <typename Container>
-void test_copy_if ( Container const &c ) {
-
-    typedef typename Container::value_type value_type;
-    std::vector<value_type> v;
-    
-//  None of the elements
-    v.clear ();
-    ba::copy_if ( c.begin (), c.end (), back_inserter ( v ), is_false);
-    BOOST_CHECK ( v.size () == 0 );
-
-    v.clear ();
-    ba::copy_if ( c, back_inserter ( v ), is_false);
-    BOOST_CHECK ( v.size () == 0 );
-
-//  All the elements
-    v.clear ();
-    ba::copy_if ( c.begin (), c.end (), back_inserter ( v ), is_true);
-    BOOST_CHECK ( v.size () == c.size ());
-    BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-
-    v.clear ();
-    ba::copy_if ( c, back_inserter ( v ), is_true);
-    BOOST_CHECK ( v.size () == c.size ());
-    BOOST_CHECK ( v.size () == c.size ());
-    BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-
-//  Some of the elements
-    v.clear ();
-    ba::copy_if ( c.begin (), c.end (), back_inserter ( v ), is_even );
-    BOOST_CHECK ( v.size () == (size_t) std::count_if ( c.begin (), c.end (), is_even ));
-    BOOST_CHECK ( ba::all_of ( v.begin (), v.end (), is_even ));
-
-    v.clear ();
-    ba::copy_if ( c, back_inserter ( v ), is_even );
-    BOOST_CHECK ( v.size () == (size_t) std::count_if ( c.begin (), c.end (), is_even ));
-    BOOST_CHECK ( ba::all_of ( v.begin (), v.end (), is_even ));
-    }
-
-
-template <typename Container>
-void test_copy_while ( Container const &c ) {
-
-    typedef typename Container::value_type value_type;
-    typename Container::const_iterator it;
-    std::vector<value_type> v;
-    
-//  None of the elements
-    v.clear ();
-    ba::copy_while ( c.begin (), c.end (), back_inserter ( v ), is_false);
-    BOOST_CHECK ( v.size () == 0 );
-    
-    v.clear ();
-    ba::copy_while ( c, back_inserter ( v ), is_false);
-    BOOST_CHECK ( v.size () == 0 );
-
-//  All the elements
-    v.clear ();
-    ba::copy_while ( c.begin (), c.end (), back_inserter ( v ), is_true);
-    BOOST_CHECK ( v.size () == c.size ());
-    BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-
-    v.clear ();
-    ba::copy_while ( c, back_inserter ( v ), is_true);
-    BOOST_CHECK ( v.size () == c.size ());
-    BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-
-//  Some of the elements
-    v.clear ();
-    it = ba::copy_while ( c.begin (), c.end (), back_inserter ( v ), is_even ).first;
-    BOOST_CHECK ( v.size () == (size_t) std::distance ( c.begin (), it ));
-    BOOST_CHECK ( it == c.end () || !is_even ( *it ));
-    BOOST_CHECK ( ba::all_of ( v.begin (), v.end (), is_even ));
-    BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-
-    v.clear ();
-    it = ba::copy_while ( c, back_inserter ( v ), is_even ).first;
-    BOOST_CHECK ( v.size () == (size_t) std::distance ( c.begin (), it ));
-    BOOST_CHECK ( it == c.end () || !is_even ( *it ));
-    BOOST_CHECK ( ba::all_of ( v.begin (), v.end (), is_even ));
-    BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-    }
-
-template <typename Container>
-void test_copy_until ( Container const &c ) {
-
-    typedef typename Container::value_type value_type;
-    typename Container::const_iterator it;
-    std::vector<value_type> v;
-    
-//  None of the elements
-    v.clear ();
-    ba::copy_until ( c.begin (), c.end (), back_inserter ( v ), is_true);
-    BOOST_CHECK ( v.size () == 0 );
-
-    v.clear ();
-    ba::copy_until ( c, back_inserter ( v ), is_true);
-    BOOST_CHECK ( v.size () == 0 );
-
-//  All the elements
-    v.clear ();
-    ba::copy_until ( c.begin (), c.end (), back_inserter ( v ), is_false);
-    BOOST_CHECK ( v.size () == c.size ());
-    BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-
-    v.clear ();
-    ba::copy_until ( c, back_inserter ( v ), is_false);
-    BOOST_CHECK ( v.size () == c.size ());
-    BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-
-//  Some of the elements
-    v.clear ();
-    it = ba::copy_until ( c.begin (), c.end (), back_inserter ( v ), is_even ).first;
-    BOOST_CHECK ( v.size () == (size_t) std::distance ( c.begin (), it ));
-    BOOST_CHECK ( it == c.end () || is_even ( *it ));
-    BOOST_CHECK ( ba::none_of ( v.begin (), v.end (), is_even ));
-    BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-
-    v.clear ();
-    it = ba::copy_until ( c, back_inserter ( v ), is_even ).first;
-    BOOST_CHECK ( v.size () == (size_t) std::distance ( c.begin (), it ));
-    BOOST_CHECK ( it == c.end () || is_even ( *it ));
-    BOOST_CHECK ( ba::none_of ( v.begin (), v.end (), is_even ));
-    BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-    }
-
-
-BOOST_CXX14_CONSTEXPR inline bool constexpr_test_copy_if() {
-    const int sz = 64;
-    int in_data[sz] = {0};
-    bool res = true;
-
-    const int* from = in_data;
-    const int* to = in_data + sz;
-    
-    int out_data[sz] = {0};
-    int* out = out_data;
-    out = ba::copy_if ( from, to, out, is_false ); // copy none
-    res = (res && out == out_data);
-    
-    out = ba::copy_if ( from, to, out, is_true ); // copy all
-    res = (res && out == out_data + sz
-           && ba::equal( input_iterator<const int *>(out_data),  input_iterator<const int *>(out_data + sz), 
-                         input_iterator<const int *>(from), input_iterator<const int *>(to)));
-    
-    return res;
-    }
-
-BOOST_CXX14_CONSTEXPR inline bool constexpr_test_copy_while() {
-    const int sz = 64;
-    int in_data[sz] = {0};
-    bool res = true;
-
-    const int* from = in_data;
-    const int* to = in_data + sz;
-    
-    int out_data[sz] = {0};
-    int* out = out_data;
-    out = ba::copy_while ( from, to, out, is_false ).second; // copy none
-    res = (res && out == out_data && ba::all_of(out, out + sz, is_zero));
-    
-    out = ba::copy_while ( from, to, out, is_true ).second; // copy all
-    res = (res && out == out_data + sz
-           && ba::equal( input_iterator<const int *>(out_data),  input_iterator<const int *>(out_data + sz), 
-                         input_iterator<const int *>(from), input_iterator<const int *>(to)));
-    
-    return res;
-    }
-
-BOOST_CXX14_CONSTEXPR inline bool constexpr_test_copy_until() {
-    const int sz = 64;
-    int in_data[sz] = {0};
-    bool res = true;
-
-    const int* from = in_data;
-    const int* to = in_data + sz;
-    
-    int out_data[sz] = {0};
-    int* out = out_data;
-    out = ba::copy_until ( from, to, out, is_true ).second; // copy none
-    res = (res && out == out_data && ba::all_of(out, out + sz, is_zero));
-    
-    out = ba::copy_until ( from, to, out, is_false ).second; // copy all
-    res = (res && out == out_data + sz
-           && ba::equal( input_iterator<const int *>(out_data),  input_iterator<const int *>(out_data + sz), 
-                         input_iterator<const int *>(from), input_iterator<const int *>(to)));
-    
-    return res;
-    }
-    
-    
-void test_sequence1 () {
-    std::vector<int> v;
-    for ( int i = 5; i < 15; ++i )
-        v.push_back ( i );
-    test_copy_if ( v );
-    test_copy_while ( v );
-    test_copy_until ( v );
-    
-    BOOST_CXX14_CONSTEXPR bool constexpr_res_if = constexpr_test_copy_if();
-    BOOST_CHECK ( constexpr_res_if );
-    BOOST_CXX14_CONSTEXPR bool constexpr_res_while = constexpr_test_copy_while();
-    BOOST_CHECK ( constexpr_res_while );
-    BOOST_CXX14_CONSTEXPR bool constexpr_res_until = constexpr_test_copy_until();
-    BOOST_CHECK ( constexpr_res_until );
-    
-    std::list<int> l;
-    for ( int i = 25; i > 15; --i )
-        l.push_back ( i );
-    test_copy_if ( l );
-    test_copy_while ( l );
-    test_copy_until ( l );
-    }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_sequence1 ();
-}
diff --git a/third_party/boostorg/algorithm/test/copy_n_test1.cpp b/third_party/boostorg/algorithm/test/copy_n_test1.cpp
deleted file mode 100644
index 68284b3..0000000
--- a/third_party/boostorg/algorithm/test/copy_n_test1.cpp
+++ /dev/null
@@ -1,118 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/copy_n.hpp>
-#include <boost/algorithm/cxx14/equal.hpp>
-#include <boost/algorithm/cxx11/all_of.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <iostream>
-#include <vector>
-#include <list>
-
-namespace ba = boost::algorithm;
-// namespace ba = boost;
-
-BOOST_CXX14_CONSTEXPR bool is_zero( int v ) { return v == 0; }
-
-template <typename Container>
-void test_sequence ( Container const &c ) {
-
-    typedef typename Container::value_type value_type;
-    std::vector<value_type> v;
-    
-//  Copy zero elements
-    v.clear ();
-    ba::copy_n ( c.begin (), 0, back_inserter ( v ));
-    BOOST_CHECK ( v.size () == 0 );
-    ba::copy_n ( c.begin (), 0U, back_inserter ( v ));
-    BOOST_CHECK ( v.size () == 0 );
-
-    if ( c.size () > 0 ) {  
-    //  Just one element
-        v.clear ();
-        ba::copy_n ( c.begin (), 1, back_inserter ( v ));
-        BOOST_CHECK ( v.size () == 1 );
-        BOOST_CHECK ( v[0] == *c.begin ());
-        
-        v.clear ();
-        ba::copy_n ( c.begin (), 1U, back_inserter ( v ));
-        BOOST_CHECK ( v.size () == 1 );
-        BOOST_CHECK ( v[0] == *c.begin ());
-
-    //  Half the elements
-        v.clear ();
-        ba::copy_n ( c.begin (), c.size () / 2, back_inserter ( v ));
-        BOOST_CHECK ( v.size () == c.size () / 2);
-        BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-
-    //  Half the elements + 1
-        v.clear ();
-        ba::copy_n ( c.begin (), c.size () / 2 + 1, back_inserter ( v ));
-        BOOST_CHECK ( v.size () == c.size () / 2 + 1 );
-        BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-    
-    //  All the elements
-        v.clear ();
-        ba::copy_n ( c.begin (), c.size (), back_inserter ( v ));
-        BOOST_CHECK ( v.size () == c.size ());
-        BOOST_CHECK ( std::equal ( v.begin (), v.end (), c.begin ()));
-        }
-    }
-
-
-BOOST_CXX14_CONSTEXPR inline bool test_constexpr() {
-    const size_t sz = 64;
-    int in_data[sz] = {0};
-    bool res = true;
-    
-    const int* from = in_data;
-    const int* to = in_data + sz;
-    
-    int out_data[sz] = {0};
-    int* out = out_data;
-    
-    out = ba::copy_n ( from, 0, out ); // Copy none
-    res = (res && out == out_data && ba::all_of(out, out + sz, is_zero));
-
-    out = ba::copy_n ( from, sz, out ); // Copy all
-    res = (res && out == out_data + sz
-           && ba::equal( input_iterator<const int *>(out_data),  input_iterator<const int *>(out_data + sz), 
-                         input_iterator<const int *>(from), input_iterator<const int *>(to)));
-    
-    return res;
-    }
-    
-    
-void test_sequence1 () {
-    std::vector<int> v;
-    for ( int i = 5; i < 15; ++i )
-        v.push_back ( i );
-    test_sequence  ( v );
-    
-    BOOST_CXX14_CONSTEXPR bool constexpr_res = test_constexpr();
-    BOOST_CHECK(constexpr_res);
-    
-    std::list<int> l;
-    for ( int i = 25; i > 15; --i )
-        l.push_back ( i );
-    test_sequence  ( l );   
-    }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_sequence1 ();
-}
diff --git a/third_party/boostorg/algorithm/test/empty_search_test.cpp b/third_party/boostorg/algorithm/test/empty_search_test.cpp
deleted file mode 100644
index cb37678..0000000
--- a/third_party/boostorg/algorithm/test/empty_search_test.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <string>
-
-#include <boost/algorithm/searching/boyer_moore.hpp>
-#include <boost/algorithm/searching/boyer_moore_horspool.hpp>
-#include <boost/algorithm/searching/knuth_morris_pratt.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    const std::string cs;
-    std::string estr;
-    std::string str ( "abc" );
-    
-//  empty corpus, empty pattern
-    BOOST_CHECK ( 
-        boost::algorithm::boyer_moore_search (
-            cs.begin (), cs.end (), estr.begin (), estr.end ())
-        == std::make_pair(cs.begin(), cs.begin())
-        );
-
-    BOOST_CHECK ( 
-        boost::algorithm::boyer_moore_horspool_search (
-            cs.begin (), cs.end (), estr.begin (), estr.end ())
-        == std::make_pair(cs.begin(), cs.begin())
-        );
-
-    BOOST_CHECK ( 
-        boost::algorithm::knuth_morris_pratt_search (
-            cs.begin (), cs.end (), estr.begin (), estr.end ())
-        == std::make_pair(cs.begin(), cs.begin())
-        );
-
-//  empty corpus, non-empty pattern
-    BOOST_CHECK ( 
-        boost::algorithm::boyer_moore_search (
-            estr.begin (), estr.end (), str.begin (), str.end ())
-        == std::make_pair(estr.end(), estr.end())
-        );
-
-    BOOST_CHECK ( 
-        boost::algorithm::boyer_moore_horspool_search (
-            estr.begin (), estr.end (), str.begin (), str.end ())
-        == std::make_pair(estr.end(), estr.end())
-        );
-
-    BOOST_CHECK ( 
-        boost::algorithm::knuth_morris_pratt_search (
-            estr.begin (), estr.end (), str.begin (), str.end ())
-        == std::make_pair(estr.end(), estr.end())
-        );
-
-//  non-empty corpus, empty pattern
-    BOOST_CHECK ( 
-        boost::algorithm::boyer_moore_search (
-            str.begin (), str.end (), estr.begin (), estr.end ())
-        == std::make_pair(str.begin(), str.begin())
-        );
-
-    BOOST_CHECK ( 
-        boost::algorithm::boyer_moore_horspool_search (
-            str.begin (), str.end (), estr.begin (), estr.end ())
-        == std::make_pair(str.begin(), str.begin())
-        );
-
-    BOOST_CHECK ( 
-        boost::algorithm::knuth_morris_pratt_search (
-            str.begin (), str.end (), estr.begin (), estr.end ())
-        == std::make_pair(str.begin(), str.begin())
-        );
-}
diff --git a/third_party/boostorg/algorithm/test/equal_test.cpp b/third_party/boostorg/algorithm/test/equal_test.cpp
deleted file mode 100644
index 3932098..0000000
--- a/third_party/boostorg/algorithm/test/equal_test.cpp
+++ /dev/null
@@ -1,165 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2013.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx14/equal.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-template <typename T>
-BOOST_CXX14_CONSTEXPR bool eq ( const T& a, const T& b ) { return a == b; }
-
-template <typename T>
-bool never_eq ( const T&, const T& ) { return false; }
-
-int comparison_count = 0;
-template <typename T>
-bool counting_equals ( const T &a, const T &b ) {
-    ++comparison_count;
-    return a == b;
-    }
-
-namespace ba = boost::algorithm;
-
-void test_equal ()
-{
-//  Note: The literal values here are tested against directly, careful if you change them:
-    int num[] = { 1, 1, 2, 3, 5 };
-    const int sz = sizeof (num)/sizeof(num[0]);
-    
-    
-//  Empty sequences are equal to each other, but not to non-empty sequences
-    BOOST_CHECK ( ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num), 
-                              input_iterator<int *>(num),     input_iterator<int *>(num)));
-    BOOST_CHECK ( ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num), 
-                              input_iterator<int *>(num),     input_iterator<int *>(num),
-                              never_eq<int> ));
-    BOOST_CHECK ( ba::equal ( random_access_iterator<int *>(num),     random_access_iterator<int *>(num), 
-                              random_access_iterator<int *>(num),     random_access_iterator<int *>(num),
-                              never_eq<int> ));
-                              
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num), 
-                              input_iterator<int *>(num),     input_iterator<int *>(num + 1)));
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num + 1), input_iterator<int *>(num + 2), 
-                              input_iterator<int *>(num),     input_iterator<int *>(num)));
-    BOOST_CHECK (!ba::equal ( random_access_iterator<int *>(num + 1), random_access_iterator<int *>(num + 2), 
-                              random_access_iterator<int *>(num),     random_access_iterator<int *>(num)));
-
-//  Single element sequences are equal if they contain the same value
-    BOOST_CHECK ( ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + 1),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + 1)));
-    BOOST_CHECK ( ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + 1),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + 1),
-                              eq<int> ));
-    BOOST_CHECK ( ba::equal ( random_access_iterator<int *>(num),     random_access_iterator<int *>(num + 1),
-                              random_access_iterator<int *>(num),     random_access_iterator<int *>(num + 1),
-                              eq<int> ));
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + 1),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + 1),
-                              never_eq<int> ));
-    BOOST_CHECK (!ba::equal ( random_access_iterator<int *>(num),     random_access_iterator<int *>(num + 1),
-                              random_access_iterator<int *>(num),     random_access_iterator<int *>(num + 1),
-                              never_eq<int> ));
-
-    BOOST_CHECK ( ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + 1),
-                              input_iterator<int *>(num + 1), input_iterator<int *>(num + 2)));
-    BOOST_CHECK ( ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + 1),
-                              input_iterator<int *>(num + 1), input_iterator<int *>(num + 2),
-                              eq<int> ));
-
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num + 2), input_iterator<int *>(num + 3), 
-                              input_iterator<int *>(num),     input_iterator<int *>(num + 1)));
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num + 2), input_iterator<int *>(num + 3),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + 1),
-                              eq<int> ));
-                              
-//  Identical long sequences are equal. 
-    BOOST_CHECK ( ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + sz)));
-    BOOST_CHECK ( ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                              eq<int> ));
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                              never_eq<int> ));
-    BOOST_CHECK ( ba::equal ( input_iterator<int *>(num),             input_iterator<int *>(num + sz),
-                              random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz),
-                              eq<int> ));
-
-//  different sequences are different
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num + 1), input_iterator<int *>(num + sz),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + sz)));
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num + 1), input_iterator<int *>(num + sz),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                              eq<int> ));
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + sz - 1)));
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + sz - 1),
-                              eq<int> ));
-
-//  When there's a cheap check, bail early
-    comparison_count = 0;
-    BOOST_CHECK (!ba::equal ( random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz),
-                              random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz - 1),
-                              counting_equals<int> ));
-    BOOST_CHECK ( comparison_count == 0 );
-//  And when there's not, we can't
-    comparison_count = 0;
-    BOOST_CHECK (!ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                              input_iterator<int *>(num),     input_iterator<int *>(num + sz - 1),
-                              counting_equals<int> ));
-    BOOST_CHECK ( comparison_count > 0 );
-    
-}
-
-
-BOOST_CXX14_CONSTEXPR bool test_constexpr_equal() {
-    int num[] = { 1, 1, 2, 3, 5};
-    const int sz = sizeof (num)/sizeof(num[0]);
-    bool res = true;
-//  Empty sequences are equal to each other
-    res = (   ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num), 
-                          input_iterator<int *>(num),     input_iterator<int *>(num))
-//  Identical long sequences are equal                         
-           && ba::equal ( input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                          input_iterator<int *>(num),     input_iterator<int *>(num + sz),
-                          eq<int> )
-//  Different sequences are different
-           && !ba::equal ( input_iterator<int *>(num + 1), input_iterator<int *>(num + sz),
-                           input_iterator<int *>(num),     input_iterator<int *>(num + sz))
-          );
-#ifdef __cpp_lib_array_constexpr // or cpp17 compiler
-//  Turn on tests for random_access_iterator, because std functions used in equal are marked constexpr_res
-    res = ( res 
-//  Empty sequences are equal to each other
-           && ba::equal ( random_access_iterator<int *>(num),     random_access_iterator<int *>(num), 
-                          random_access_iterator<int *>(num),     random_access_iterator<int *>(num))
-//  Identical long sequences are equal                         
-           && ba::equal ( random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz),
-                          random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz),
-                          eq<int> )
-//  Different sequences are different
-           && !ba::equal ( random_access_iterator<int *>(num + 1), random_access_iterator<int *>(num + sz),
-                           random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz))
-          );
-#endif
-    return res;
-  }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_equal ();
-  BOOST_CXX14_CONSTEXPR bool constexpr_res = test_constexpr_equal ();
-  BOOST_CHECK (constexpr_res);
-}
diff --git a/third_party/boostorg/algorithm/test/exclusive_scan_test.cpp b/third_party/boostorg/algorithm/test/exclusive_scan_test.cpp
deleted file mode 100644
index 2df3769..0000000
--- a/third_party/boostorg/algorithm/test/exclusive_scan_test.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <vector>
-#include <functional>
-#include <numeric>
-#include <algorithm>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/iota.hpp>
-#include <boost/algorithm/cxx17/exclusive_scan.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-int triangle(int n) { return n*(n+1)/2; }
-
-void basic_tests_init()
-{
-    {
-    std::vector<int> v(10);
-    std::fill(v.begin(), v.end(), 3);
-    ba::exclusive_scan(v.begin(), v.end(), v.begin(), 50);
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 50 + (int) i * 3);
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 0);
-    ba::exclusive_scan(v.begin(), v.end(), v.begin(), 30);
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 30 + triangle(i-1));
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 1);
-    ba::exclusive_scan(v.begin(), v.end(), v.begin(), 40);
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 40 + triangle(i));
-    }
-
-}
-
-void test_exclusive_scan_init()
-{
-	basic_tests_init();
-}
-
-void test_exclusive_scan_init_op()
-{
-	BOOST_CHECK(true);
-}
-
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_exclusive_scan_init();
-  test_exclusive_scan_init_op();
-}
diff --git a/third_party/boostorg/algorithm/test/find_backward_test.cpp b/third_party/boostorg/algorithm/test/find_backward_test.cpp
deleted file mode 100644
index f78fadb..0000000
--- a/third_party/boostorg/algorithm/test/find_backward_test.cpp
+++ /dev/null
@@ -1,420 +0,0 @@
-/* 
-   Copyright (c) T. Zachary Laine 2018.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-#include <iostream>
-
-#include <boost/algorithm/find_backward.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <vector>
-#include <list>
-
-
-namespace ba = boost::algorithm;
-
-template <typename Container>
-struct dist_t
-{
-    dist_t(Container & cont) : cont_(cont) {}
-    template<typename Iter>
-    std::ptrdiff_t operator()(Iter it) const
-    {
-        return std::distance(cont_.begin(), it);
-    }
-
-    Container & cont_;
-};
-
-BOOST_CXX14_CONSTEXPR bool check_constexpr_backward()
-{
-    int in_data[] = {1, 2, 3, 4, 5};
-    bool res = true;
-
-    const int* from = in_data;
-    const int* to = in_data + 5;
-
-    const int* start = ba::find_backward(from, to, 1); // stops on first
-    res = (res && start == from);
-
-    start = ba::find_backward(in_data, 1); // stops on first
-    res = (res && start == from);
-
-    const int* end = ba::find_backward(from, to, 6); // stops on the end
-    res = (res && end == to);
-
-    end = ba::find_backward(in_data, 6); // stops on the end
-    res = (res && end == to);
-
-    const int* three = ba::find_backward(from, to, 3); // stops on third element
-    res = (res && three == in_data + 2);
-
-    three = ba::find_backward(in_data, 3); // stops on third element
-    res = (res && three == in_data + 2);
-
-    return res;
-}
-
-void test_find_backward()
-{
-    {
-        std::vector<int> v1;
-        const dist_t<std::vector<int> > dist(v1);
-
-        for (int i = 5; i < 15; ++i)
-            v1.push_back(i);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_backward(v1.begin(), v1.end(), 0)), v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_backward(v1.begin(), v1.end(), 100)), v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_backward(v1.begin(), v1.end(), v1.back())),
-            v1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_backward(v1.begin(), v1.end(), v1.front())), 0);
-
-        BOOST_CHECK_EQUAL(dist(ba::find_backward(v1, 0)), v1.size());
-        BOOST_CHECK_EQUAL(dist(ba::find_backward(v1, 100)), v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_backward(v1, v1.back())), v1.size() - 1);
-        BOOST_CHECK_EQUAL(dist(ba::find_backward(v1, v1.front())), 0);
-    }
-
-    //  With bidirectional iterators.
-    {
-        std::list<int> l1;
-        const dist_t<std::list<int> > dist(l1);
-
-        for (int i = 5; i < 15; ++i)
-            l1.push_back(i);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_backward(l1.begin(), l1.end(), 0)), l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_backward(l1.begin(), l1.end(), 100)), l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_backward(l1.begin(), l1.end(), l1.back())),
-            l1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_backward(l1.begin(), l1.end(), l1.front())), 0);
-
-        BOOST_CHECK_EQUAL(dist(ba::find_backward(l1, 0)), l1.size());
-        BOOST_CHECK_EQUAL(dist(ba::find_backward(l1, 100)), l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_backward(l1, l1.back())), l1.size() - 1);
-        BOOST_CHECK_EQUAL(dist(ba::find_backward(l1, l1.front())), 0);
-    }
-
-    BOOST_CXX14_CONSTEXPR bool ce_result = check_constexpr_backward();
-    BOOST_CHECK(ce_result);
-}
-
-struct equals
-{
-    BOOST_CXX14_CONSTEXPR equals(int n) : n_(n) {}
-    BOOST_CXX14_CONSTEXPR bool operator()(int i) { return i == n_; }
-    int n_;
-};
-
-BOOST_CXX14_CONSTEXPR bool check_constexpr_if_backward()
-{
-    int in_data[] = {1, 2, 3, 4, 5};
-    bool res = true;
-
-    const int* from = in_data;
-    const int* to = in_data + 5;
-
-    const int* start = ba::find_if_backward(from, to, equals(1)); // stops on first
-    res = (res && start == from);
-
-    start = ba::find_if_backward(in_data, equals(1)); // stops on first
-    res = (res && start == from);
-
-    const int* end = ba::find_if_backward(from, to, equals(6)); // stops on the end
-    res = (res && end == to);
-
-    end = ba::find_if_backward(in_data, equals(6)); // stops on the end
-    res = (res && end == to);
-
-    const int* three = ba::find_if_backward(from, to, equals(3)); // stops on third element
-    res = (res && three == in_data + 2);
-
-    three = ba::find_if_backward(in_data, equals(3)); // stops on third element
-    res = (res && three == in_data + 2);
-
-    return res;
-}
-
-void test_find_if_backward()
-{
-    {
-        std::vector<int> v1;
-        const dist_t<std::vector<int> > dist(v1);
-
-        for (int i = 5; i < 15; ++i)
-            v1.push_back(i);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(v1.begin(), v1.end(), equals(0))),
-            v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(v1.begin(), v1.end(), equals(100))),
-            v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(v1.begin(), v1.end(), equals(v1.back()))),
-            v1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(
-                ba::find_if_backward(v1.begin(), v1.end(), equals(v1.front()))),
-            0);
-
-        BOOST_CHECK_EQUAL(dist(ba::find_if_backward(v1, equals(0))), v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(v1, equals(100))), v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(v1, equals(v1.back()))), v1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(v1, equals(v1.front()))), 0);
-    }
-
-    //  With bidirectional iterators.
-    {
-        std::list<int> l1;
-        const dist_t<std::list<int> > dist(l1);
-
-        for (int i = 5; i < 15; ++i)
-            l1.push_back(i);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(l1.begin(), l1.end(), equals(0))),
-            l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(l1.begin(), l1.end(), equals(100))),
-            l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(l1.begin(), l1.end(), equals(l1.back()))),
-            l1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(
-                ba::find_if_backward(l1.begin(), l1.end(), equals(l1.front()))),
-            0);
-
-        BOOST_CHECK_EQUAL(dist(ba::find_if_backward(l1, equals(0))), l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(l1, equals(100))), l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(l1, equals(l1.back()))), l1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_backward(l1, equals(l1.front()))), 0);
-    }
-
-    BOOST_CXX14_CONSTEXPR bool ce_result = check_constexpr_if_backward();
-    BOOST_CHECK(ce_result);
-}
-
-struct not_equals
-{
-    BOOST_CXX14_CONSTEXPR not_equals(int n) : n_(n) {}
-    BOOST_CXX14_CONSTEXPR bool operator()(int i) { return i != n_; }
-    int n_;
-};
-
-BOOST_CXX14_CONSTEXPR bool check_constexpr_if_not_backward()
-{
-    int in_data[] = {1, 2, 3, 4, 5};
-    bool res = true;
-
-    const int* from = in_data;
-    const int* to = in_data + 5;
-
-    const int* start = ba::find_if_not_backward(from, to, not_equals(1)); // stops on first
-    res = (res && start == from);
-
-    start = ba::find_if_not_backward(in_data, not_equals(1)); // stops on first
-    res = (res && start == from);
-
-    const int* end = ba::find_if_not_backward(from, to, not_equals(6)); // stops on the end
-    res = (res && end == to);
-
-    end = ba::find_if_not_backward(in_data, not_equals(6)); // stops on the end
-    res = (res && end == to);
-
-    const int* three = ba::find_if_not_backward(from, to, not_equals(3)); // stops on third element
-    res = (res && three == in_data + 2);
-
-    three = ba::find_if_not_backward(in_data, not_equals(3)); // stops on third element
-    res = (res && three == in_data + 2);
-
-    return res;
-}
-
-void test_find_if_not_backward()
-{
-    {
-        std::vector<int> v1;
-        const dist_t<std::vector<int> > dist(v1);
-
-        for (int i = 5; i < 15; ++i)
-            v1.push_back(i);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(v1.begin(), v1.end(), not_equals(0))),
-            v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(
-                v1.begin(), v1.end(), not_equals(100))),
-            v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(
-                v1.begin(), v1.end(), not_equals(v1.back()))),
-            v1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(
-                v1.begin(), v1.end(), not_equals(v1.front()))),
-            0);
-
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(v1, not_equals(0))), v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(v1, not_equals(100))), v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(v1, not_equals(v1.back()))),
-            v1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(v1, not_equals(v1.front()))), 0);
-    }
-
-    //  With bidirectional iterators.
-    {
-        std::list<int> l1;
-        const dist_t<std::list<int> > dist(l1);
-
-        for (int i = 5; i < 15; ++i)
-            l1.push_back(i);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(l1.begin(), l1.end(), not_equals(0))),
-            l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(
-                l1.begin(), l1.end(), not_equals(100))),
-            l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(
-                l1.begin(), l1.end(), not_equals(l1.back()))),
-            l1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(
-                l1.begin(), l1.end(), not_equals(l1.front()))),
-            0);
-
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(l1, not_equals(0))), l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(l1, not_equals(100))), l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(l1, not_equals(l1.back()))),
-            l1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_if_not_backward(l1, not_equals(l1.front()))), 0);
-    }
-
-    BOOST_CXX14_CONSTEXPR bool ce_result = check_constexpr_if_not_backward();
-    BOOST_CHECK(ce_result);
-}
-
-BOOST_CXX14_CONSTEXPR bool check_constexpr_not_backward()
-{
-    int in_data[] = {1, 5, 5, 5, 5};
-    bool res = true;
-
-    const int* from = in_data;
-    const int* to = in_data + 5;
-
-    const int* start = ba::find_not_backward(from, to, 5); // stops on first
-    res = (res && start == from);
-
-    start = ba::find_not_backward(in_data, 5); // stops on first
-    res = (res && start == from);
-
-    const int in_data_2[] = {6, 6, 6, 6, 6};
-    const int* end = ba::find_not_backward(in_data_2, in_data_2 + 5, 6); // stops on the end
-    res = (res && end == in_data_2 + 5);
-
-    end = ba::find_not_backward(in_data_2, 6); // stops on the end
-    res = (res && end == in_data_2 + 5);
-
-    return res;
-}
-
-void test_find_not_backward()
-{
-    {
-        std::vector<int> v1;
-        const dist_t<std::vector<int> > dist(v1);
-
-        for (int i = 0; i < 5; ++i)
-            v1.push_back(0);
-        for (int i = 0; i < 5; ++i)
-            v1.push_back(1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not_backward(v1.begin(), v1.end(), 1)), 4);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not_backward(v1.begin(), v1.end(), 0)),
-            v1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not_backward(v1.begin(), v1.end(), 2)),
-            v1.size() - 1);
-
-        BOOST_CHECK_EQUAL(dist(ba::find_not_backward(v1, 1)), 4);
-        BOOST_CHECK_EQUAL(dist(ba::find_not_backward(v1, 0)), v1.size() - 1);
-        BOOST_CHECK_EQUAL(dist(ba::find_not_backward(v1, 2)), v1.size() - 1);
-
-        v1.resize(5);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not_backward(v1.begin(), v1.end(), 0)), v1.size());
-
-        BOOST_CHECK_EQUAL(dist(ba::find_not_backward(v1, 0)), v1.size());
-    }
-
-    //  With bidirectional iterators.
-    {
-        std::list<int> l1;
-        const dist_t<std::list<int> > dist(l1);
-
-        for (int i = 0; i < 5; ++i)
-            l1.push_back(0);
-        for (int i = 0; i < 5; ++i)
-            l1.push_back(1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not_backward(l1.begin(), l1.end(), 1)), 4);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not_backward(l1.begin(), l1.end(), 0)),
-            l1.size() - 1);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not_backward(l1.begin(), l1.end(), 2)),
-            l1.size() - 1);
-
-        BOOST_CHECK_EQUAL(dist(ba::find_not_backward(l1, 1)), 4);
-        BOOST_CHECK_EQUAL(dist(ba::find_not_backward(l1, 0)), l1.size() - 1);
-        BOOST_CHECK_EQUAL(dist(ba::find_not_backward(l1, 2)), l1.size() - 1);
-
-        l1.resize(5);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not_backward(l1.begin(), l1.end(), 0)), l1.size());
-
-        BOOST_CHECK_EQUAL(dist(ba::find_not_backward(l1, 0)), l1.size());
-    }
-
-    BOOST_CXX14_CONSTEXPR bool ce_result = check_constexpr_not_backward();
-    BOOST_CHECK(ce_result);
-}
-
-BOOST_AUTO_TEST_CASE(test_main)
-{
-    test_find_backward();
-    test_find_if_backward();
-    test_find_if_not_backward();
-    test_find_not_backward();
-}
diff --git a/third_party/boostorg/algorithm/test/find_if_not_test1.cpp b/third_party/boostorg/algorithm/test/find_if_not_test1.cpp
deleted file mode 100644
index 2d79555..0000000
--- a/third_party/boostorg/algorithm/test/find_if_not_test1.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <iostream>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/find_if_not.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <list>
-
-namespace ba = boost::algorithm;
-// namespace ba = boost;
-
-BOOST_CXX14_CONSTEXPR bool is_true  ( int v ) { return true; }
-BOOST_CXX14_CONSTEXPR bool is_false ( int v ) { return false; }
-BOOST_CXX14_CONSTEXPR bool is_not_three ( int v ) { return v != 3; }
-
-BOOST_CXX14_CONSTEXPR bool check_constexpr() {
-    int in_data[] = {1, 2, 3, 4, 5};
-    bool res = true;
-
-    const int* from = in_data;
-    const int* to = in_data + 5;
-    
-    const int* start = ba::find_if_not (from, to, is_false); // stops on first
-    res = (res && start == from);
-    
-    const int* end = ba::find_if_not(from, to, is_true); // stops on the end
-    res = (res && end == to);
-    
-    const int* three = ba::find_if_not(from, to, is_not_three); // stops on third element
-    res = (res && three == in_data + 2);
-    
-    return res;
-}
-
-template <typename Container>
-typename Container::iterator offset_to_iter ( Container &v, int offset ) {
-    typename Container::iterator retval;
-    
-    if ( offset >= 0 ) {
-        retval = v.begin ();
-        std::advance ( retval, offset );
-        }
-    else {
-        retval = v.end ();
-        std::advance ( retval, offset + 1 );
-        }
-    return retval;
-    }
-
-template <typename Container, typename Predicate>
-void test_sequence ( Container &v, Predicate comp, int expected ) {
-    typename Container::iterator res, exp;
-    
-    res = ba::find_if_not ( v.begin (), v.end (), comp );
-    exp = offset_to_iter ( v, expected );
-    std::cout << "Expected(1): " << std::distance ( v.begin (), exp ) 
-              <<       ", got: " << std::distance ( v.begin (), res ) << std::endl;
-    BOOST_CHECK ( exp == res );
-    }
-
-template <typename T>
-struct less_than {
-public:
-    less_than ( T foo ) : val ( foo ) {}
-    less_than ( const less_than &rhs ) : val ( rhs.val ) {}
-
-    bool operator () ( const T &v ) const { return v < val; }
-private:
-    less_than ();
-    less_than operator = ( const less_than &rhs );
-    T val;
-    };
-
-
-void test_sequence1 () {
-    std::vector<int> v;
-    
-    v.clear ();
-    for ( int i = 5; i < 15; ++i )
-        v.push_back ( i );
-    test_sequence ( v, less_than<int>(3),  0 ); // no elements
-    test_sequence ( v, less_than<int>(6),  1 );    // only the first element
-    test_sequence ( v, less_than<int>(10), 5 );
-    test_sequence ( v, less_than<int>(99), -1 );   // all elements satisfy 
-
-//  With bidirectional iterators.
-    std::list<int> l;
-    for ( int i = 5; i < 15; ++i )
-        l.push_back ( i );
-    test_sequence ( l, less_than<int>(3),  0 ); // no elements
-    test_sequence ( l, less_than<int>(6),  1 );    // only the first element
-    test_sequence ( l, less_than<int>(10), 5 );
-    test_sequence ( l, less_than<int>(99), -1 );   // all elements satisfy 
-
-    }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_sequence1 ();
-}
diff --git a/third_party/boostorg/algorithm/test/find_not_test.cpp b/third_party/boostorg/algorithm/test/find_not_test.cpp
deleted file mode 100644
index ef7529f..0000000
--- a/third_party/boostorg/algorithm/test/find_not_test.cpp
+++ /dev/null
@@ -1,134 +0,0 @@
-/* 
-   Copyright (c) T. Zachary Laine 2018.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-#include <iostream>
-
-#include <boost/algorithm/find_not.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <vector>
-#include <list>
-
-
-namespace ba = boost::algorithm;
-
-template <typename Container>
-struct dist_t
-{
-    dist_t(Container & cont) : cont_(cont) {}
-    template<typename Iter>
-    std::ptrdiff_t operator()(Iter it) const
-    {
-        return std::distance(cont_.begin(), it);
-    }
-
-    Container & cont_;
-};
-
-BOOST_CXX14_CONSTEXPR bool check_constexpr()
-{
-    int in_data[] = {2, 2, 3, 4, 5};
-    bool res = true;
-
-    const int* from = in_data;
-    const int* to = in_data + 5;
-
-    const int* start = ba::find_not(from, to, 1); // stops on first
-    res = (res && start == from);
-
-    start = ba::find_not(in_data, 1); // stops on first
-    res = (res && start == from);
-
-    int in_data_2[] = {6, 6, 6, 6, 6};
-    const int* end = ba::find_not(in_data_2, in_data_2 + 5, 6); // stops on the end
-    res = (res && end == in_data_2 + 5);
-
-    end = ba::find_not(in_data_2, 6); // stops on the end
-    res = (res && end == in_data_2 + 5);
-
-    const int* three = ba::find_not(from, to, 2); // stops on third element
-    res = (res && three == in_data + 2);
-
-    three = ba::find_not(in_data, 2); // stops on third element
-    res = (res && three == in_data + 2);
-
-    return res;
-}
-
-void test_sequence()
-{
-    {
-        std::vector<int> v1;
-        const dist_t<std::vector<int> > dist(v1);
-
-        for (int i = 5; i < 15; ++i)
-            v1.push_back(i);
-        BOOST_CHECK_EQUAL(dist(ba::find_not(v1.begin(), v1.end(), 0)), 0);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not(v1.begin(), v1.end(), v1.back())), 0);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not(v1.begin(), v1.end(), v1.front())), 1);
-
-        BOOST_CHECK_EQUAL(dist(ba::find_not(v1, 0)), 0);
-        BOOST_CHECK_EQUAL(dist(ba::find_not(v1, v1.back())), 0);
-        BOOST_CHECK_EQUAL(dist(ba::find_not(v1, v1.front())), 1);
-
-        v1 = std::vector<int>(10, 2);
-        BOOST_CHECK_EQUAL(dist(ba::find_not(v1.begin(), v1.end(), 0)), 0);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not(v1.begin(), v1.end(), v1.back())), v1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not(v1.begin(), v1.end(), v1.front())), v1.size());
-
-        BOOST_CHECK_EQUAL(dist(ba::find_not(v1, 0)), 0);
-        BOOST_CHECK_EQUAL(dist(ba::find_not(v1, v1.back())), v1.size());
-        BOOST_CHECK_EQUAL(dist(ba::find_not(v1, v1.front())), v1.size());
-    }
-
-    //  With bidirectional iterators.
-    {
-        std::list<int> l1;
-        const dist_t<std::list<int> > dist(l1);
-
-        for (int i = 5; i < 15; ++i)
-            l1.push_back(i);
-        BOOST_CHECK_EQUAL(dist(ba::find_not(l1.begin(), l1.end(), 0)), 0);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not(l1.begin(), l1.end(), l1.back())), 0);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not(l1.begin(), l1.end(), l1.front())), 1);
-
-        BOOST_CHECK_EQUAL(dist(ba::find_not(l1, 0)), 0);
-        BOOST_CHECK_EQUAL(dist(ba::find_not(l1, l1.back())), 0);
-        BOOST_CHECK_EQUAL(dist(ba::find_not(l1, l1.front())), 1);
-
-        l1.clear();
-        for (int i = 0; i < 10; ++i)
-            l1.push_back(2);
-        BOOST_CHECK_EQUAL(dist(ba::find_not(l1.begin(), l1.end(), 0)), 0);
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not(l1.begin(), l1.end(), l1.back())), l1.size());
-        BOOST_CHECK_EQUAL(
-            dist(ba::find_not(l1.begin(), l1.end(), l1.front())), l1.size());
-
-        BOOST_CHECK_EQUAL(dist(ba::find_not(l1, 0)), 0);
-        BOOST_CHECK_EQUAL(dist(ba::find_not(l1, l1.back())), l1.size());
-        BOOST_CHECK_EQUAL(dist(ba::find_not(l1, l1.front())), l1.size());
-    }
-
-    BOOST_CXX14_CONSTEXPR bool ce_result = check_constexpr();
-    BOOST_CHECK(ce_result);
-}
-
-
-BOOST_AUTO_TEST_CASE(test_main)
-{
-    test_sequence();
-}
diff --git a/third_party/boostorg/algorithm/test/for_each_n_test.cpp b/third_party/boostorg/algorithm/test/for_each_n_test.cpp
deleted file mode 100644
index 8d55ba4..0000000
--- a/third_party/boostorg/algorithm/test/for_each_n_test.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2013.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx17/for_each_n.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-struct for_each_test
-{
-    for_each_test() {}
-    static int count;
-    void operator()(int& i) {++i; ++count;}
-};
-
-int for_each_test::count = 0;
-
-void test_for_each_n ()
-{
-    typedef input_iterator<int*> Iter;
-    int ia[] = {0, 1, 2, 3, 4, 5};
-    const unsigned s = sizeof(ia)/sizeof(ia[0]);
-
-    {
-	for_each_test::count = 0;
-    Iter it = ba::for_each_n(Iter(ia), 0, for_each_test());
-    BOOST_CHECK(it == Iter(ia));
-    BOOST_CHECK(for_each_test::count == 0);
-    }
-
-    {
-	for_each_test::count = 0;
-    Iter it = ba::for_each_n(Iter(ia), s, for_each_test());
-
-    BOOST_CHECK(it == Iter(ia+s));
-    BOOST_CHECK(for_each_test::count == s);
-    for (unsigned i = 0; i < s; ++i)
-        BOOST_CHECK(ia[i] == static_cast<int>(i+1));
-    }
-
-    {
-	for_each_test::count = 0;
-    Iter it = ba::for_each_n(Iter(ia), 1, for_each_test());
-
-    BOOST_CHECK(it == Iter(ia+1));
-    BOOST_CHECK(for_each_test::count == 1);
-    for (unsigned i = 0; i < 1; ++i)
-        BOOST_CHECK(ia[i] == static_cast<int>(i+2));
-    }
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_for_each_n ();
-}
diff --git a/third_party/boostorg/algorithm/test/gather_fail1.cpp b/third_party/boostorg/algorithm/test/gather_fail1.cpp
deleted file mode 100644
index 7fe5b7f..0000000
--- a/third_party/boostorg/algorithm/test/gather_fail1.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <iostream>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/gather.hpp>
-
-#include <string>
-#include <vector>
-#include <list>
-
-#include "iterator_test.hpp"
-
-namespace ba = boost::algorithm;
-
-bool is_ten  ( int i ) { return i == 10; }
-
-void test_sequence1 () {
-    std::vector<int> v;
-    typedef input_iterator<std::vector<int>::iterator> II;    
-
-//  This should fail to compile, since gather doesn't work with input iterators
-    (void) ba::gather ( II( v.begin ()), II( v.end ()), II( v.begin ()), is_ten );
-    }
-
-
-int main ()
-{
-  test_sequence1 ();
-  return 0;
-}
diff --git a/third_party/boostorg/algorithm/test/gather_test1.cpp b/third_party/boostorg/algorithm/test/gather_test1.cpp
deleted file mode 100644
index 28f63b5..0000000
--- a/third_party/boostorg/algorithm/test/gather_test1.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <iostream>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/gather.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <list>
-
-#include "iterator_test.hpp"
-
-namespace ba = boost::algorithm;
-
-template <typename Container>
-void print ( const char *prompt, const Container &c ) {
-    std::cout << prompt << " { ";
-    std::copy ( c.begin (), c.end (), std::ostream_iterator<typename Container::value_type>(std::cout, " "));
-    std::cout << std::endl;
-    }
-
-template <typename Iterator, typename Predicate>
-void test_iterators ( Iterator first, Iterator last, Predicate comp, std::size_t offset ) {
-//  Create the pivot point
-    Iterator off = first;
-    std::advance(off, offset);
-        
-//  Gather the elements
-    std::pair<Iterator, Iterator> res = ba::gather ( first, last, off, comp );
-
-//  We should now have three sequences, any of which may be empty:
-//      * [begin .. result.first)         - items that do not satisfy the predicate
-//      * [result.first .. result.second) - items that do satisfy the predicate
-//      * [result.second .. end)          - items that do not satisfy the predicate
-    Iterator iter = first;
-    for ( ; iter != res.first; ++iter )
-        BOOST_CHECK ( !comp ( *iter ));
-    for ( ; iter != res.second; ++iter)
-        BOOST_CHECK ( comp ( *iter ));
-    for ( ; iter != last; ++iter )
-        BOOST_CHECK ( !comp ( *iter ));
-    }
-
-template <typename Container, typename Predicate>
-void test_iterator_types ( const Container &c, Predicate comp, std::size_t offset ) {
-    typedef std::vector<typename Container::value_type> vec;
-
-    typedef bidirectional_iterator<typename vec::iterator> BDI;
-    typedef random_access_iterator<typename vec::iterator> RAI;
-    
-    vec v;
-    v.assign ( c.begin (), c.end ());
-    test_iterators ( BDI ( v.begin ()), BDI ( v.end ()), comp, offset );
-    v.assign ( c.begin (), c.end ());
-    test_iterators ( RAI ( v.begin ()), RAI ( v.end ()), comp, offset );
-    }
-
-
-template <typename T>
-struct less_than {
-public:
-//    typedef T argument_type;
-//    typedef bool result_type;
-
-    less_than ( T foo ) : val ( foo ) {}
-    less_than ( const less_than &rhs ) : val ( rhs.val ) {}
-
-    bool operator () ( const T &v ) const { return v < val; }
-private:
-    less_than ();
-    less_than operator = ( const less_than &rhs );
-    T val;
-    };
-
-bool is_even ( int i ) { return i % 2 == 0; }
-bool is_ten  ( int i ) { return i == 10; }
-
-void test_sequence1 () {
-    std::vector<int> v;
-    
-    for ( int i = 5; i < 15; ++i )
-        v.push_back ( i );
-    test_iterator_types ( v, less_than<int>(10),  0 );                  // at beginning
-    test_iterator_types ( v, less_than<int>(10),  5 );
-    test_iterator_types ( v, less_than<int>(10), v.size () - 1 );       // at end
-
-    test_iterator_types ( v, is_even, 0 );
-    test_iterator_types ( v, is_even, 5 );
-    test_iterator_types ( v, is_even, v.size () - 1 );
-
-//  Exactly one element in the sequence matches
-    test_iterator_types ( v, is_ten, 0 );
-    test_iterator_types ( v, is_ten, 5 );
-    test_iterator_types ( v, is_ten, v.size () - 1 );
-
-//  Everything in the sequence matches
-    test_iterator_types ( v, less_than<int>(99),  0 );
-    test_iterator_types ( v, less_than<int>(99),  5 );
-    test_iterator_types ( v, less_than<int>(99), v.size () - 1 );
-    
-//  Nothing in the sequence matches
-    test_iterator_types ( v, less_than<int>(0),  0 );
-    test_iterator_types ( v, less_than<int>(0),  5 );
-    test_iterator_types ( v, less_than<int>(0), v.size () - 1 );
-    
-//  All the elements in the sequence are the same
-    v.clear ();
-    for ( int i = 0; i < 11; ++i )
-        v.push_back ( 10 );
-    
-//  Everything in the sequence matches
-    test_iterator_types ( v, is_ten, 0 );
-    test_iterator_types ( v, is_ten, 5 );
-    test_iterator_types ( v, is_ten, v.size () - 1 );
-
-//  Nothing in the sequence matches
-    test_iterator_types ( v, less_than<int>(5),  0 );
-    test_iterator_types ( v, less_than<int>(5),  5 );
-    test_iterator_types ( v, less_than<int>(5), v.size () - 1 );
-
-    }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_sequence1 ();
-}
diff --git a/third_party/boostorg/algorithm/test/hex_fail1.cpp b/third_party/boostorg/algorithm/test/hex_fail1.cpp
deleted file mode 100644
index bd5ca46..0000000
--- a/third_party/boostorg/algorithm/test/hex_fail1.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/hex.hpp>
-
-#include <string>
-#include <iostream>
-#include <vector>
-
-//  should not compile: vector is not an integral type
-int main( int , char* [] )
-{
-  std::vector<float> v;
-  std::string out;
-  boost::algorithm::unhex ( out, std::back_inserter(v));
-
-  return 0;
-}
diff --git a/third_party/boostorg/algorithm/test/hex_test1.cpp b/third_party/boostorg/algorithm/test/hex_test1.cpp
deleted file mode 100644
index 31e3f9d..0000000
--- a/third_party/boostorg/algorithm/test/hex_test1.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/hex.hpp>
-#include <boost/algorithm/string/case_conv.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <iostream>
-
-
-template<typename String>
-void test_to_hex ( const typename String::value_type ** tests ) {
-    for ( const typename String::value_type **p = tests; *p; p++ ) {
-        String arg, argh, one, two, three, four;
-        arg.assign ( *p );
-        boost::algorithm::hex ( *p, std::back_inserter ( one ));
-        boost::algorithm::hex ( arg, std::back_inserter ( two ));
-        boost::algorithm::hex ( arg.begin (), arg.end (), std::back_inserter ( three ));
-        four = boost::algorithm::hex ( arg );
-        BOOST_CHECK ( one == two );
-        BOOST_CHECK ( one == three );
-        BOOST_CHECK ( one == four );
-        argh = one;
-        one.clear (); two.clear (); three.clear (); four.clear ();
-        boost::algorithm::unhex ( argh.c_str (), std::back_inserter ( one ));
-        boost::algorithm::unhex ( argh, std::back_inserter ( two ));
-        boost::algorithm::unhex ( argh.begin (), argh.end (), std::back_inserter ( three ));
-        four = boost::algorithm::unhex ( argh );
-        BOOST_CHECK ( one == two );
-        BOOST_CHECK ( one == three );
-        BOOST_CHECK ( one == four );
-        BOOST_CHECK ( one == arg );
-        }
-    }
-
-template<typename String>
-void test_to_hex_lower ( const typename String::value_type ** tests ) {
-    for ( const typename String::value_type **p = tests; *p; p++ ) {
-        String arg, argh, one, two, three, four;
-        arg.assign ( *p );
-        boost::algorithm::hex_lower ( *p, std::back_inserter ( one ));
-        boost::algorithm::hex_lower ( arg, std::back_inserter ( two ));
-        boost::algorithm::hex_lower ( arg.begin (), arg.end (), std::back_inserter ( three ));
-        four = boost::algorithm::hex_lower ( arg );
-        BOOST_CHECK ( one == two );
-        BOOST_CHECK ( one == three );
-        BOOST_CHECK ( one == four );
-        argh = one;
-        one.clear (); two.clear (); three.clear (); four.clear ();
-        boost::algorithm::unhex ( argh.c_str (), std::back_inserter ( one ));
-        boost::algorithm::unhex ( argh, std::back_inserter ( two ));
-        boost::algorithm::unhex ( argh.begin (), argh.end (), std::back_inserter ( three ));
-        four = boost::algorithm::unhex ( argh );
-        BOOST_CHECK ( one == two );
-        BOOST_CHECK ( one == three );
-        BOOST_CHECK ( one == four );
-        BOOST_CHECK ( one == arg );
-        }
-    }
-
-
-template<typename String>
-void test_from_hex_success ( const typename String::value_type ** tests ) {
-    for ( const typename String::value_type **p = tests; *p; p++ ) {
-        String arg, argh, one, two, three, four;
-        arg.assign ( *p );
-        boost::algorithm::unhex ( *p, std::back_inserter ( one ));
-        boost::algorithm::unhex ( arg, std::back_inserter ( two ));
-        boost::algorithm::unhex ( arg.begin (), arg.end (), std::back_inserter ( three ));
-        four = boost::algorithm::unhex ( arg );
-        BOOST_CHECK ( one == two );
-        BOOST_CHECK ( one == three );
-        BOOST_CHECK ( one == four );
-        argh = one;
-        one.clear (); two.clear (); three.clear (); four.clear ();
-        boost::algorithm::hex ( argh.c_str (), std::back_inserter ( one ));
-        boost::algorithm::hex ( argh, std::back_inserter ( two ));
-        boost::algorithm::hex ( argh.begin (), argh.end (), std::back_inserter ( three ));
-        four = boost::algorithm::hex ( argh );
-        boost::algorithm::to_lower( arg );
-        boost::algorithm::to_lower( one );
-        boost::algorithm::to_lower( two );
-        boost::algorithm::to_lower( three );
-        boost::algorithm::to_lower( four );
-        BOOST_CHECK ( one == two );
-        BOOST_CHECK ( one == three );
-        BOOST_CHECK ( one == four );
-        BOOST_CHECK ( one == arg );
-        }
-    }
-
-template<typename String>
-void test_from_hex_failure ( const typename String::value_type ** tests ) {
-    int num_catches;
-    for ( const typename String::value_type **p = tests; *p; p++ ) {
-        String arg, one;
-        arg.assign ( *p );
-        num_catches = 0;
-
-        try { boost::algorithm::unhex ( *p, std::back_inserter ( one )); }
-        catch ( const boost::algorithm::hex_decode_error & /*ex*/ ) { num_catches++; }
-        try { boost::algorithm::unhex ( arg, std::back_inserter ( one )); }
-        catch ( const boost::algorithm::hex_decode_error & /*ex*/ ) { num_catches++; }
-        try { boost::algorithm::unhex ( arg.begin (), arg.end (), std::back_inserter ( one )); }
-        catch ( const boost::algorithm::hex_decode_error & /*ex*/ ) { num_catches++; }
-        BOOST_CHECK ( num_catches == 3 );
-        }
-    }
-
-
-
-const char *tohex [] = {
-    "",
-    "a",
-    "\001",
-    "12",
-    "asdfadsfsad",
-    "01234567890ABCDEF",
-    NULL        // End of the list
-    };
-
-
-const wchar_t *tohex_w [] = {
-    L"",
-    L"a",
-    L"\001",
-    L"12",
-    L"asdfadsfsad",
-    L"01234567890ABCDEF",
-    NULL        // End of the list
-    };
-
-
-const char *fromhex [] = {
-    "20",
-    "2122234556FF",
-    "2122234556ff",
-    NULL        // End of the list
-    };
-
-
-const wchar_t *fromhex_w [] = {
-    L"00101020",
-    L"2122234556FF3456",
-    L"2122234556ff3456",
-    NULL        // End of the list
-    };
-
-
-const char *fromhex_fail [] = {
-    "2",
-    "H",
-    "234",
-    "21222G4556FF",
-    "h",
-    "21222g4556ff",
-    NULL        // End of the list
-    };
-
-
-const wchar_t *fromhex_fail_w [] = {
-    L"2",
-    L"12",
-    L"H",
-    L"234",
-    L"21222G4556FF",
-    L"h",
-    L"21222g4556ff",
-    NULL        // End of the list
-    };
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_to_hex<std::string> ( tohex );
-  test_to_hex_lower<std::string> ( tohex );
-  test_from_hex_success<std::string> ( fromhex );
-  test_from_hex_failure<std::string> ( fromhex_fail );
-
-  test_to_hex<std::wstring> ( tohex_w );
-  test_to_hex_lower<std::wstring> ( tohex_w );
-  test_from_hex_success<std::wstring> ( fromhex_w );
-  test_from_hex_failure<std::wstring> ( fromhex_fail_w );
-}
diff --git a/third_party/boostorg/algorithm/test/hex_test2.cpp b/third_party/boostorg/algorithm/test/hex_test2.cpp
deleted file mode 100644
index af5579a..0000000
--- a/third_party/boostorg/algorithm/test/hex_test2.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-
-Test non-string cases; vector and list
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/hex.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <iostream>
-#include <deque>
-#include <list>
-
-
-const char *tohex [] = {
-    "",
-    "a",
-    "\001",
-    "12",
-    "asdfadsfsad",
-    "01234567890ABCDEF",
-    NULL        // End of the list
-    };
-
-void test_to_hex () {
-    for ( const char **p = tohex; *p; p++ ) {
-        std::deque<char> arg, argh;
-        std::list<char> one, two, three;
-        arg.assign ( *p, *p + strlen (*p));
-        boost::algorithm::hex ( *p,                       std::back_inserter ( one ));
-        boost::algorithm::hex ( arg,                      std::back_inserter ( two ));
-        boost::algorithm::hex ( arg.begin (), arg.end (), std::back_inserter ( three ));
-        BOOST_CHECK ( std::equal ( one.begin (), one.end (), two.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), three.begin ()));
-
-        std::copy ( one.begin (), one.end (), std::back_inserter ( argh ));
-        one.clear (); two.clear (); three.clear ();
-
-//      boost::algorithm::unhex ( argh.c_str (),              std::back_inserter ( one ));
-        boost::algorithm::unhex ( argh,                       std::back_inserter ( two ));
-        boost::algorithm::unhex ( argh.begin (), argh.end (), std::back_inserter ( three ));
-//      BOOST_CHECK ( std::equal ( one.begin (), one.end (), two.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), three.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), arg.begin ()));
-        }
-
-//  Again, with a front_inserter
-    for ( const char **p = tohex; *p; p++ ) {
-        std::deque<char> arg, argh;
-        std::list<char> one, two, three;
-        arg.assign ( *p, *p + strlen (*p));
-        boost::algorithm::hex ( *p,                       std::front_inserter ( one ));
-        boost::algorithm::hex ( arg,                      std::front_inserter ( two ));
-        boost::algorithm::hex ( arg.begin (), arg.end (), std::front_inserter ( three ));
-        BOOST_CHECK ( std::equal ( one.begin (), one.end (), two.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), three.begin ()));
-
-    //  Copy, reversing
-        std::copy ( one.begin (), one.end (), std::front_inserter ( argh ));
-        one.clear (); two.clear (); three.clear ();
-
-//      boost::algorithm::unhex ( argh.c_str (),              std::front_inserter ( one ));
-        boost::algorithm::unhex ( argh,                       std::front_inserter ( two ));
-        boost::algorithm::unhex ( argh.begin (), argh.end (), std::front_inserter ( three ));
-//      BOOST_CHECK ( std::equal ( one.begin (), one.end (), two.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), three.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), arg.rbegin ()));   // reverse
-        }
-    }
-
-const char *fromhex [] = {
-    "20",
-    "2122234556FF",
-    NULL        // End of the list
-    };
-
-
-void test_from_hex_success () {
-    for ( const char **p = fromhex; *p; p++ ) {
-        std::deque<char> arg, argh;
-        std::list<char> one, two, three;
-        arg.assign ( *p, *p + strlen (*p));
-        boost::algorithm::unhex ( *p,                       std::back_inserter ( one ));
-        boost::algorithm::unhex ( arg,                      std::back_inserter ( two ));
-        boost::algorithm::unhex ( arg.begin (), arg.end (), std::back_inserter ( three ));
-        BOOST_CHECK ( std::equal ( one.begin (), one.end (), two.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), three.begin ()));
-
-        std::copy ( one.begin (), one.end (), std::back_inserter ( argh ));
-        one.clear (); two.clear (); three.clear ();
-
-//      boost::algorithm::hex ( argh.c_str (),              std::back_inserter ( one ));
-        boost::algorithm::hex ( argh,                       std::back_inserter ( two ));
-        boost::algorithm::hex ( argh.begin (), argh.end (), std::back_inserter ( three ));
-//      BOOST_CHECK ( std::equal ( one.begin (), one.end (), two.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), three.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), arg.begin ()));
-        }
-
-//  Again, with a front_inserter
-    for ( const char **p = fromhex; *p; p++ ) {
-        std::deque<char> arg, argh;
-        std::list<char> one, two, three;
-        arg.assign ( *p, *p + strlen (*p));
-        boost::algorithm::unhex ( *p,                       std::front_inserter ( one ));
-        boost::algorithm::unhex ( arg,                      std::front_inserter ( two ));
-        boost::algorithm::unhex ( arg.begin (), arg.end (), std::front_inserter ( three ));
-        BOOST_CHECK ( std::equal ( one.begin (), one.end (), two.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), three.begin ()));
-
-    //  Copy, reversing
-        std::copy ( one.begin (), one.end (), std::front_inserter ( argh ));
-        one.clear (); two.clear (); three.clear ();
-
-//      boost::algorithm::hex ( argh.c_str (),              std::front_inserter ( one ));
-        boost::algorithm::hex ( argh,                       std::front_inserter ( two ));
-        boost::algorithm::hex ( argh.begin (), argh.end (), std::front_inserter ( three ));
-//      BOOST_CHECK ( std::equal ( one.begin (), one.end (), two.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), three.begin ()));
-        BOOST_CHECK ( std::equal ( two.begin (), two.end (), arg.rbegin ()));   // reversed
-        }
-    }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_to_hex ();
-  test_from_hex_success ();
-}
diff --git a/third_party/boostorg/algorithm/test/hex_test3.cpp b/third_party/boostorg/algorithm/test/hex_test3.cpp
deleted file mode 100644
index 64f0ff4..0000000
--- a/third_party/boostorg/algorithm/test/hex_test3.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-
-Try ostream_iterators
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/hex.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <iostream>
-#include <deque>
-#include <list>
-
-
-template <typename char_type>
-void test_to_hex ( const char_type ** tests ) {
-    typedef std::basic_string<char_type> String;
-    typedef std::basic_ostringstream<char_type> Stream;
-    typedef std::ostream_iterator<char_type, char_type> Iter;
-
-    for ( const char_type **p = tests; *p; p++ ) {
-        String arg, argh;
-        Stream one, two, three;
-        arg.assign ( *p );
-        boost::algorithm::hex ( *p, Iter ( one ));
-        boost::algorithm::hex ( arg, Iter ( two ));
-        boost::algorithm::hex ( arg.begin (), arg.end (), Iter ( three ));
-        boost::algorithm::hex ( arg );
-        BOOST_CHECK ( one.str () == two.str ());
-        BOOST_CHECK ( one.str () == three.str ());
-        argh = one.str ();
-        one.str (String()); two.str (String()); three.str (String());
-        boost::algorithm::unhex ( argh.c_str (), Iter ( one ));
-        boost::algorithm::unhex ( argh, Iter ( two ));
-        boost::algorithm::unhex ( argh.begin (), argh.end (), Iter ( three ));
-        BOOST_CHECK ( one.str () == two.str ());
-        BOOST_CHECK ( one.str () == three.str ());
-        BOOST_CHECK ( one.str () == arg );
-        }
-    }
-
-
-template <typename char_type>
-void test_from_hex_success ( const char_type ** tests ) {
-    typedef std::basic_string<char_type> String;
-    typedef std::basic_ostringstream<char_type> Stream;
-    typedef std::ostream_iterator<char_type, char_type> Iter;
-
-    for ( const char_type **p = tests; *p; p++ ) {
-        String arg, argh;
-        Stream one, two, three;
-        arg.assign ( *p );
-        boost::algorithm::unhex ( *p,                       Iter ( one ));
-        boost::algorithm::unhex ( arg,                      Iter ( two ));
-        boost::algorithm::unhex ( arg.begin (), arg.end (), Iter ( three ));
-        
-        BOOST_CHECK ( one.str () == two.str ());
-        BOOST_CHECK ( one.str () == three.str ());
-
-        argh = one.str ();
-        one.str (String()); two.str (String()); three.str (String());
-
-        boost::algorithm::hex ( argh.c_str (),              Iter ( one ));
-        boost::algorithm::hex ( argh,                       Iter ( two ));
-        boost::algorithm::hex ( argh.begin (), argh.end (), Iter ( three ));
-
-        BOOST_CHECK ( one.str () == two.str ());
-        BOOST_CHECK ( one.str () == three.str ());
-        BOOST_CHECK ( one.str () == arg );
-        }
-
-    }
-
-const char *tohex [] = {
-    "",
-    "a",
-    "\001",
-    "12",
-    "asdfadsfsad",
-    "01234567890ABCDEF",
-    NULL        // End of the list
-    };
-
-const wchar_t *tohex_w [] = {
-    L"",
-    L"a",
-    L"\001",
-    L"12",
-    L"asdfadsfsad",
-    L"01234567890ABCDEF",
-    NULL        // End of the list
-    };
-
-
-const char *fromhex [] = {
-    "20",
-    "2122234556FF",
-    NULL        // End of the list
-    };
-
-const wchar_t *fromhex_w [] = {
-    L"11223320",
-    L"21222345010256FF",
-    NULL        // End of the list
-    };
-
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_to_hex ( tohex );
-  test_to_hex ( tohex_w );
-  test_from_hex_success ( fromhex );
-  test_from_hex_success ( fromhex_w );
-}
diff --git a/third_party/boostorg/algorithm/test/hex_test4.cpp b/third_party/boostorg/algorithm/test/hex_test4.cpp
deleted file mode 100644
index ba1ee34..0000000
--- a/third_party/boostorg/algorithm/test/hex_test4.cpp
+++ /dev/null
@@ -1,146 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-
-Try ostream_iterators
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/hex.hpp>
-#include <boost/exception/get_error_info.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <iostream>
-
-namespace ba = boost::algorithm;
-
-void test_short_input1 () {
-    std::string s;
-    
-    try { ba::unhex ( std::string ( "A" ), std::back_inserter(s)); }
-    catch ( const std::exception &ex ) { return; }
-    BOOST_TEST_MESSAGE ( "Failed to catch std::exception in test_short_input1" );
-    BOOST_CHECK ( false );
-    }
-
-void test_short_input2 () {
-    std::string s;
-    
-    try { ba::unhex ( std::string ( "A" ), std::back_inserter(s)); }
-    catch ( const ba::hex_decode_error &ex ) { return; }
-    BOOST_TEST_MESSAGE ( "Failed to catch ba::hex_decode_error in test_short_input2" );
-    BOOST_CHECK ( false );
-    }
-    
-void test_short_input3 () {
-    std::string s;
-    
-    try { ba::unhex ( std::string ( "A" ), std::back_inserter(s)); }
-    catch ( const ba::not_enough_input &ex ) { return; }
-    BOOST_TEST_MESSAGE ( "Failed to catch ba::not_enough_input in test_short_input3" );
-    BOOST_CHECK ( false );
-    }
-    
-//  Make sure that the right thing is thrown
-void test_short_input4 () {
-    std::string s;
-    
-    try { ba::unhex ( std::string ( "A" ), std::back_inserter(s)); }
-    catch ( const ba::non_hex_input &ex ) { BOOST_CHECK ( false ); }
-    catch ( const ba::not_enough_input &ex ) { return; }
-    catch ( ... ) { BOOST_CHECK ( false ); }
-    BOOST_CHECK ( false );
-    }
-
-//  Make sure that the right thing is thrown
-void test_short_input5 () {
-    std::string s;
-    
-    try { ba::unhex ( "A", std::back_inserter(s)); }
-    catch ( const ba::non_hex_input &ex ) { BOOST_CHECK ( false ); }
-    catch ( const ba::not_enough_input &ex ) { return; }
-    catch ( ... ) { BOOST_CHECK ( false ); }
-    BOOST_CHECK ( false );
-    }
-
-
-void test_short_input () {
-//  BOOST_TEST_MESSAGE ( "Short input tests for boost::algorithm::unhex" );
-    test_short_input1 ();
-    test_short_input2 ();
-    test_short_input3 ();
-    test_short_input4 ();
-    test_short_input5 ();
-    }
-
-
-void test_nonhex_input1 () {
-    std::string s;
-    
-    try { ba::unhex ( "01234FG1234", std::back_inserter(s)); }
-    catch ( const std::exception &ex ) {
-        BOOST_CHECK ( 'G' == *boost::get_error_info<ba::bad_char>(ex));
-        return;
-        }
-    catch ( ... ) {}
-    BOOST_TEST_MESSAGE ( "Failed to catch std::exception in test_nonhex_input1" );
-    BOOST_CHECK ( false );
-    }
-
-void test_nonhex_input2 () {
-    std::string s;
-    
-    try { ba::unhex ( "012Z4FA1234", std::back_inserter(s)); }
-    catch ( const ba::hex_decode_error &ex ) {
-        BOOST_CHECK ( 'Z' == *boost::get_error_info<ba::bad_char>(ex));
-        return;
-        }
-    catch ( ... ) {}
-    BOOST_TEST_MESSAGE ( "Failed to catch ba::hex_decode_error in test_nonhex_input2" );
-    BOOST_CHECK ( false );
-    }
-    
-void test_nonhex_input3 () {
-    std::string s;
-    
-    try { ba::unhex ( "01234FA12Q4", std::back_inserter(s)); }
-    catch ( const ba::non_hex_input &ex ) {
-        BOOST_CHECK ( 'Q' == *boost::get_error_info<ba::bad_char>(ex));
-        return;
-        }
-    catch ( ... ) {}
-    BOOST_TEST_MESSAGE ( "Failed to catch ba::non_hex_input in test_nonhex_input3" );
-    BOOST_CHECK ( false );
-    }
-    
-//  Make sure that the right thing is thrown
-void test_nonhex_input4 () {
-    std::string s;
-    
-    try { ba::unhex ( "P1234FA1234", std::back_inserter(s)); }
-    catch ( const ba::not_enough_input &ex ) { BOOST_CHECK ( false ); }
-    catch ( const ba::non_hex_input &ex ) { return; }
-    catch ( ... ) { BOOST_CHECK ( false ); }
-    BOOST_CHECK ( false );
-    }
-
-void test_nonhex_input () {
-//  BOOST_TEST_MESSAGE ( "Non hex input tests for boost::algorithm::unhex" );
-    test_nonhex_input1 ();
-    test_nonhex_input2 ();
-    test_nonhex_input3 ();
-    test_nonhex_input4 ();
-    }
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    test_short_input ();
-    test_nonhex_input ();
-}
diff --git a/third_party/boostorg/algorithm/test/inclusive_scan_test.cpp b/third_party/boostorg/algorithm/test/inclusive_scan_test.cpp
deleted file mode 100644
index 66dc998..0000000
--- a/third_party/boostorg/algorithm/test/inclusive_scan_test.cpp
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <vector>
-#include <functional>
-#include <numeric>
-#include <algorithm>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/iota.hpp>
-#include <boost/algorithm/cxx17/inclusive_scan.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-int triangle(int n) { return n*(n+1)/2; }
-
-void basic_tests_op()
-{
-    {
-    std::vector<int> v(10);
-    std::fill(v.begin(), v.end(), 3);
-    ba::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>());
-    for (size_t i = 0; i < v.size(); ++i)
-        assert(v[i] == (int)(i+1) * 3);
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 0);
-    ba::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>());
-    for (size_t i = 0; i < v.size(); ++i)
-        assert(v[i] == triangle(i));
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 1);
-    ba::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>());
-    for (size_t i = 0; i < v.size(); ++i)
-        assert(v[i] == triangle(i + 1));
-    }
-
-    {
-    std::vector<int> v, res;
-    ba::inclusive_scan(v.begin(), v.end(), std::back_inserter(res), std::plus<int>());
-    assert(res.empty());
-    }
-}
-
-void test_inclusive_scan_op()
-{
-	basic_tests_op();
-	BOOST_CHECK(true);
-}
-
-void basic_tests_init()
-{
-    {
-    std::vector<int> v(10);
-    std::fill(v.begin(), v.end(), 3);
-    ba::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), 50);
-    for (size_t i = 0; i < v.size(); ++i)
-        assert(v[i] == 50 + (int)(i+1) * 3);
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 0);
-    ba::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), 40);
-    for (size_t i = 0; i < v.size(); ++i)
-        assert(v[i] == 40 + triangle(i));
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 1);
-    ba::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), 30);
-    for (size_t i = 0; i < v.size(); ++i)
-        assert(v[i] == 30 + triangle(i + 1));
-    }
-
-    {
-    std::vector<int> v, res;
-    ba::inclusive_scan(v.begin(), v.end(), std::back_inserter(res), std::plus<int>(), 40);
-    assert(res.empty());
-    }
-}
-
-
-void test_inclusive_scan_init()
-{
-	basic_tests_init();
-	BOOST_CHECK(true);
-}
-
-void basic_tests_op_init()
-{
-    {
-    std::vector<int> v(10);
-    std::fill(v.begin(), v.end(), 3);
-    ba::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), 50);
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 50 + (int)(i+1) * 3);
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 0);
-    ba::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), 40);
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 40 + triangle(i));
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 1);
-    ba::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), 30);
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 30 + triangle(i + 1));
-    }
-
-    {
-    std::vector<int> v, res;
-    ba::inclusive_scan(v.begin(), v.end(), std::back_inserter(res), std::plus<int>(), 40);
-    BOOST_CHECK(res.empty());
-    }
-}
-
-void test_inclusive_scan_op_init()
-{
-    basic_tests_op_init();
-	BOOST_CHECK(true);
-}
-
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_inclusive_scan_op();
-  test_inclusive_scan_init();
-  test_inclusive_scan_op_init();
-}
diff --git a/third_party/boostorg/algorithm/test/iota_test1.cpp b/third_party/boostorg/algorithm/test/iota_test1.cpp
deleted file mode 100644
index dfc047d..0000000
--- a/third_party/boostorg/algorithm/test/iota_test1.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/iota.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <iostream>
-#include <string>
-#include <vector>
-#include <list>
-
-//  Test to make sure a sequence is "correctly formed"; i.e, ascending by one
-template <typename Iterator, typename T>
-BOOST_CXX14_CONSTEXPR bool test_iota_results ( Iterator first, Iterator last, T initial_value ) {
-    if ( first == last ) return true;
-    if ( initial_value != *first ) return false;
-    Iterator prev = first;
-    while ( ++first != last ) {
-        if (( *first - *prev ) != 1 )
-            return false;
-        prev = first;
-        }
-    return true;
-    }
-
-    
-template <typename Range, typename T>
-BOOST_CXX14_CONSTEXPR bool test_iota_results ( const Range &r, T initial_value ) {
-    return test_iota_results (boost::begin (r), boost::end (r), initial_value );
-}
-
-    
-void test_ints () {
-    std::vector<int> v;
-    std::list<int> l;
-
-    v.clear (); v.resize ( 10 );
-    boost::algorithm::iota ( v.begin (), v.end (), 23 );
-    BOOST_CHECK ( test_iota_results ( v.begin (), v.end (), 23 ));
-    
-    v.clear (); v.resize ( 19 );
-    boost::algorithm::iota ( v, 18 );
-    BOOST_CHECK ( test_iota_results ( v, 18 ));
-    
-    v.clear ();
-    boost::algorithm::iota_n ( std::back_inserter(v), 99, 20 );
-    BOOST_CHECK ( test_iota_results ( v, 99 ));
-    
-    v.clear ();
-    boost::algorithm::iota_n ( std::back_inserter(v), 99, 0 );
-    BOOST_CHECK ( v.size() == 0 );
-
-/*
-    l.clear (); l.reserve ( 5 );
-    boost::algorithm::iota ( l.begin (), l.end (), 123 );
-    BOOST_CHECK ( test_iota_results ( l.begin (), l.end (), 123 ));
-    
-    l.clear (); l.reserve ( 9 );
-    boost::algorithm::iota ( l.begin (), l.end (), 87 );
-    BOOST_CHECK ( test_iota_results ( l.begin (), l.end (), 87 ));
-*/
-
-    l.clear ();
-    boost::algorithm::iota_n ( std::back_inserter(l), 99, 20 );
-    BOOST_CHECK ( test_iota_results ( l, 99 ));
-    
-    l.clear ();
-    boost::algorithm::iota_n ( std::front_inserter(l), 123, 20 );
-    BOOST_CHECK ( test_iota_results ( l.rbegin (), l.rend (), 123 ));
-    }
-    
-BOOST_CXX14_CONSTEXPR inline bool test_constexpr_iota() {
-    bool res = true;
-    int data[] = {0, 0, 0};
-    boost::algorithm::iota(data, data, 1); // fill none
-    res = (res && data[0] == 0);
-    
-    boost::algorithm::iota(data, data + 3, 1); // fill all
-    res = (res && test_iota_results(data, data + 3, 1));
-    
-    return res;
-    }
-    
-    
-BOOST_CXX14_CONSTEXPR inline bool test_constexpr_iota_n() {
-    bool res = true;
-    int data[] = {0, 0, 0};
-    boost::algorithm::iota_n(data, 1, 0); // fill none
-    res = (res && data[0] == 0);
-    
-    boost::algorithm::iota_n(data, 1, 3); // fill all
-    res = (res && test_iota_results(data, 1));
-    
-    return res;
-    }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_ints ();
-  BOOST_CXX14_CONSTEXPR bool constexpr_iota_res = test_constexpr_iota ();
-  BOOST_CHECK(constexpr_iota_res);
-  BOOST_CXX14_CONSTEXPR bool constexpr_iota_n_res = test_constexpr_iota_n ();
-  BOOST_CHECK(constexpr_iota_n_res);
-}
diff --git a/third_party/boostorg/algorithm/test/is_palindrome_test.cpp b/third_party/boostorg/algorithm/test/is_palindrome_test.cpp
deleted file mode 100644
index ea2bb26..0000000
--- a/third_party/boostorg/algorithm/test/is_palindrome_test.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
-  Copyright (c) Alexander Zaitsev <zamazan4ik@gmail.com>, 2016
-
-  Distributed under the Boost Software License, Version 1.0. (See
-  accompanying file LICENSE_1_0.txt or copy at
-  http://www.boost.org/LICENSE_1_0.txt)
-
-  See http://www.boost.org/ for latest version.
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/is_palindrome.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <algorithm>
-#include <iostream>
-#include <list>
-#include <vector>
-
-
-namespace ba = boost::algorithm;
-
-
-template <typename T>
-bool funcComparator(const T& v1, const T& v2)
-{
-    return v1 == v2;
-}
-
-struct functorComparator
-{
-    template <typename T>
-    bool operator()(const T& v1, const T& v2) const
-    {
-        return v1 == v2;
-    }
-};
-
-void test_is_palindrome()
-{
-    const std::list<int> empty;
-    const std::vector<char> singleElement(1, 'z');
-    int oddNonPalindrome[] = {3,2,2};
-    const int oddPalindrome[] = {1,2,3,2,1};
-    const int evenPalindrome[] = {1,2,2,1};
-    int evenNonPalindrome[] = {1,4,8,8};
-    const char* stringNullPtr = NULL;
-
-    // Test a default operator==
-    BOOST_CHECK ( ba::is_palindrome(empty));
-    BOOST_CHECK ( ba::is_palindrome(singleElement));
-    BOOST_CHECK (!ba::is_palindrome(boost::begin(oddNonPalindrome),  boost::end(oddNonPalindrome)));
-    BOOST_CHECK ( ba::is_palindrome(boost::begin(oddPalindrome),     boost::end(oddPalindrome)));
-    BOOST_CHECK ( ba::is_palindrome(boost::begin(evenPalindrome),    boost::end(evenPalindrome)));
-    BOOST_CHECK (!ba::is_palindrome(boost::begin(evenNonPalindrome), boost::end(evenNonPalindrome)));
-
-    //Test the custom comparators
-    BOOST_CHECK ( ba::is_palindrome(empty.begin(), empty.end(), functorComparator()));
-    BOOST_CHECK (!ba::is_palindrome(boost::begin(oddNonPalindrome), boost::end(oddNonPalindrome), funcComparator<int>));
-    BOOST_CHECK ( ba::is_palindrome(evenPalindrome, std::equal_to<int>()));
-    
-    //Test C-strings like cases
-    BOOST_CHECK ( ba::is_palindrome(stringNullPtr));
-    BOOST_CHECK ( ba::is_palindrome(""));
-    BOOST_CHECK ( ba::is_palindrome("a"));
-    BOOST_CHECK ( ba::is_palindrome("abacaba", std::equal_to<char>()));
-    BOOST_CHECK ( ba::is_palindrome("abba"));
-    BOOST_CHECK (!ba::is_palindrome("acab"));
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_is_palindrome ();
-}
diff --git a/third_party/boostorg/algorithm/test/is_partitioned_test1.cpp b/third_party/boostorg/algorithm/test/is_partitioned_test1.cpp
deleted file mode 100644
index d538a06..0000000
--- a/third_party/boostorg/algorithm/test/is_partitioned_test1.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <iostream>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/is_partitioned.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <list>
-
-namespace ba = boost::algorithm;
-// namespace ba = boost;
-
-template <typename T>
-struct less_than {
-public:
-    BOOST_CXX14_CONSTEXPR less_than ( T foo ) : val ( foo ) {}
-    BOOST_CXX14_CONSTEXPR less_than ( const less_than &rhs ) : val ( rhs.val ) {}
-
-    BOOST_CXX14_CONSTEXPR bool operator () ( const T &v ) const { return v < val; }
-private:
-    less_than ();
-    less_than operator = ( const less_than &rhs );
-    T val;
-    };
-
-    
-BOOST_CXX14_CONSTEXPR bool test_constexpr() {
-    int v[] = { 4, 5, 6, 7, 8, 9, 10 };
-    bool res = true;
-    res = ( res && ba::is_partitioned ( v, less_than<int>(3)));  // no elements
-    res = ( res && ba::is_partitioned ( v, less_than<int>(5)));  // only the first element
-    res = ( res && ba::is_partitioned ( v, less_than<int>(8)));  // in the middle somewhere
-    res = ( res && ba::is_partitioned ( v, less_than<int>(99))); // all elements
-    return res;
-    }
-    
-
-void test_sequence1 () {
-    std::vector<int> v;
-    
-    v.clear ();
-    for ( int i = 5; i < 15; ++i )
-        v.push_back ( i );
-    BOOST_CHECK ( ba::is_partitioned ( v, less_than<int>(3)));      // no elements
-    BOOST_CHECK ( ba::is_partitioned ( v, less_than<int>(6)));      // only the first element
-    BOOST_CHECK ( ba::is_partitioned ( v, less_than<int>(10))); // in the middle somewhere
-    BOOST_CHECK ( ba::is_partitioned ( v, less_than<int>(99))); // all elements satisfy 
-
-//  With bidirectional iterators.
-    std::list<int> l;
-    for ( int i = 5; i < 15; ++i )
-        l.push_back ( i );
-    BOOST_CHECK ( ba::is_partitioned ( l.begin (), l.end (), less_than<int>(3)));       // no elements
-    BOOST_CHECK ( ba::is_partitioned ( l.begin (), l.end (), less_than<int>(6)));       // only the first element
-    BOOST_CHECK ( ba::is_partitioned ( l.begin (), l.end (), less_than<int>(10)));  // in the middle somewhere
-    BOOST_CHECK ( ba::is_partitioned ( l.begin (), l.end (), less_than<int>(99)));      // all elements satisfy 
-    }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_sequence1 ();
-  BOOST_CXX14_CONSTEXPR bool constexpr_res = test_constexpr ();
-  BOOST_CHECK ( constexpr_res );
-}
diff --git a/third_party/boostorg/algorithm/test/is_partitioned_until_test.cpp b/third_party/boostorg/algorithm/test/is_partitioned_until_test.cpp
deleted file mode 100644
index 379c06e..0000000
--- a/third_party/boostorg/algorithm/test/is_partitioned_until_test.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2011-2012, Alexander Zaitsev <zamazan4ik@gmail.com>, 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <iostream>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/is_partitioned_until.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <list>
-
-namespace ba = boost::algorithm;
-// namespace ba = boost;
-
-template <typename T>
-struct less_than {
-public:
-    less_than ( T foo ) : val ( foo ) {}
-    less_than ( const less_than &rhs ) : val ( rhs.val ) {}
-
-    bool operator () ( const T &v ) const { return v < val; }
-private:
-    less_than ();
-    less_than operator = ( const less_than &rhs );
-    T val;
-};
-
-
-void test_sequence1 () {
-    std::vector<int> v;
-
-    v.clear ();
-    for ( int i = 5; i < 15; ++i )
-        v.push_back ( i );
-    BOOST_CHECK ( ba::is_partitioned_until ( v, less_than<int>(3)) == v.end());      // no elements
-    BOOST_CHECK ( ba::is_partitioned_until ( v, less_than<int>(6)) == v.end());      // only the first element
-    BOOST_CHECK ( ba::is_partitioned_until ( v, less_than<int>(10)) == v.end()); // in the middle somewhere
-    BOOST_CHECK ( ba::is_partitioned_until ( v, less_than<int>(99)) == v.end()); // all elements satisfy
-//  With bidirectional iterators.
-    std::list<int> l;
-    for ( int i = 5; i < 15; ++i )
-        l.push_back ( i );
-    BOOST_CHECK ( ba::is_partitioned_until ( l.begin (), l.end (), less_than<int>(3)) == l.end());       // no elements
-    BOOST_CHECK ( ba::is_partitioned_until ( l.begin (), l.end (), less_than<int>(6)) == l.end());       // only the first element
-    BOOST_CHECK ( ba::is_partitioned_until ( l.begin (), l.end (), less_than<int>(10)) == l.end());  // in the middle somewhere
-    BOOST_CHECK ( ba::is_partitioned_until ( l.begin (), l.end (), less_than<int>(99)) == l.end());      // all elements satisfy
-}
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    test_sequence1 ();
-}
diff --git a/third_party/boostorg/algorithm/test/is_permutation_test1.cpp b/third_party/boostorg/algorithm/test/is_permutation_test1.cpp
deleted file mode 100644
index 2e1a12f..0000000
--- a/third_party/boostorg/algorithm/test/is_permutation_test1.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <iostream>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/is_permutation.hpp>
-#include <boost/algorithm/cxx14/is_permutation.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <list>
-
-#include "iterator_test.hpp"
-
-template <typename T>
-bool eq ( const T& a, const T& b ) { return a == b; }
-
-template <typename T>
-bool never_eq ( const T&, const T& ) { return false; }
-
-namespace ba = boost::algorithm;
-
-void test_sequence1 () {
-    int num[] = { 1, 1, 2, 3, 5 };
-    const int sz = sizeof (num)/sizeof(num[0]);
-
-//  Empty sequences
-    BOOST_CHECK (
-        ba::is_permutation (
-            forward_iterator<int *>(num),     forward_iterator<int *>(num), 
-            forward_iterator<int *>(num)));
-    BOOST_CHECK (
-        ba::is_permutation (
-            forward_iterator<int *>(num),     forward_iterator<int *>(num), 
-            forward_iterator<int *>(num),     forward_iterator<int *>(num)));
-    BOOST_CHECK (
-        ba::is_permutation (
-            random_access_iterator<int *>(num),     random_access_iterator<int *>(num), 
-            random_access_iterator<int *>(num),     random_access_iterator<int *>(num)));
-    BOOST_CHECK (
-        ba::is_permutation (
-            forward_iterator<int *>(num),     forward_iterator<int *>(num), 
-            forward_iterator<int *>(num), 
-            never_eq<int> ));       // Since the sequences are empty, the pred is never called
-            
-//  Empty vs. non-empty
-    BOOST_CHECK ( !
-        ba::is_permutation (
-            forward_iterator<int *>(num),     forward_iterator<int *>(num), 
-            forward_iterator<int *>(num),     forward_iterator<int *>(num + 1)));
-
-    BOOST_CHECK ( !
-        ba::is_permutation ( 
-            forward_iterator<int *>(num + 1), forward_iterator<int *>(num + 2), 
-            forward_iterator<int *>(num),     forward_iterator<int *>(num)));
-                    
-    BOOST_CHECK ( !
-        ba::is_permutation (
-            random_access_iterator<int *>(num + 1), random_access_iterator<int *>(num + 2), 
-            random_access_iterator<int *>(num),     random_access_iterator<int *>(num)));
-
-    BOOST_CHECK ( !
-        ba::is_permutation (
-            random_access_iterator<int *>(num),     random_access_iterator<int *>(num), 
-            random_access_iterator<int *>(num + 1), random_access_iterator<int *>(num + 2)));
-
-//  Something should be a permutation of itself
-    BOOST_CHECK (
-        ba::is_permutation (
-            forward_iterator<int *>(num),     forward_iterator<int *>(num + sz), 
-            forward_iterator<int *>(num)));
-    BOOST_CHECK (
-        ba::is_permutation (
-            forward_iterator<int *>(num),     forward_iterator<int *>(num + sz), 
-            forward_iterator<int *>(num), eq<int> ));
-    BOOST_CHECK (
-        ba::is_permutation (
-            forward_iterator<int *>(num),     forward_iterator<int *>(num + sz), 
-            forward_iterator<int *>(num),     forward_iterator<int *>(num + sz )));
-    BOOST_CHECK (
-        ba::is_permutation (
-            forward_iterator<int *>(num),     forward_iterator<int *>(num + sz), 
-            forward_iterator<int *>(num),     forward_iterator<int *>(num + sz ),
-            eq<int> ));
-            
-    BOOST_CHECK (
-        ba::is_permutation (
-            random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz), 
-            random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz)));
-    BOOST_CHECK (
-        ba::is_permutation (
-            random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz), 
-            random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz),
-            eq<int> ));
-    BOOST_CHECK (
-        ba::is_permutation (
-            random_access_iterator<int *>(num),     random_access_iterator<int *>(num + sz), 
-            forward_iterator<int *>(num),           forward_iterator<int *>(num + sz),
-            eq<int> ));
-    
-
-    std::vector<int> v, v1;
-    
-    v.clear ();
-    for ( std::size_t i = 5; i < 15; ++i )
-        v.push_back ( i );
-    v1 = v;
-    BOOST_CHECK ( ba::is_permutation ( v.begin (), v.end (), v.begin ()));  // better be a permutation of itself!
-    BOOST_CHECK ( ba::is_permutation ( v.begin (), v.end (), v1.begin ()));    
-
-//  With bidirectional iterators.
-    std::list<int> l;
-    std::copy ( v.begin (), v.end (), std::back_inserter ( l ));
-    BOOST_CHECK ( ba::is_permutation ( l.begin (), l.end (), l.begin ()));  // better be a permutation of itself!
-    BOOST_CHECK ( ba::is_permutation ( l.begin (), l.end (), v1.begin ()));
-    for ( std::size_t i = 0; i < l.size (); ++i ) {
-        l.push_back ( *l.begin ()); l.pop_front (); // rotation
-        BOOST_CHECK ( ba::is_permutation ( l.begin (), l.end (), v1.begin ()));
-        }   
-    }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_sequence1 ();
-}
diff --git a/third_party/boostorg/algorithm/test/iterator_test.hpp b/third_party/boostorg/algorithm/test/iterator_test.hpp
deleted file mode 100644
index da54456..0000000
--- a/third_party/boostorg/algorithm/test/iterator_test.hpp
+++ /dev/null
@@ -1,305 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2013.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#ifndef ITERATOR_TEST_H
-#define ITERATOR_TEST_H
-
-/* 
-    A set of iterator adapters for constructing test cases
-    From an iterator (or a pointer), you can make any class of iterator.
-    Assuming you want to degrade the capabilities.
-    
-    Modeled closely on work that Howard Hinnant did for libc++.
-*/
-
-#include <iterator>
-
-// == Input Iterator ==
-template <typename It>
-class input_iterator {
-public:
-    typedef          std::input_iterator_tag                   iterator_category;
-    typedef typename std::iterator_traits<It>::value_type      value_type;
-    typedef typename std::iterator_traits<It>::difference_type difference_type;
-    typedef It                                                 pointer;
-    typedef typename std::iterator_traits<It>::reference       reference;
-
-    BOOST_CXX14_CONSTEXPR It base() const {return it_;}
-
-    BOOST_CXX14_CONSTEXPR input_iterator() : it_() {}
-    BOOST_CXX14_CONSTEXPR explicit input_iterator(It it) : it_(it) {}
-
-    template <typename U>
-    BOOST_CXX14_CONSTEXPR input_iterator(const input_iterator<U>& u) :it_(u.it_) {}
-
-    BOOST_CXX14_CONSTEXPR reference operator*() const {return *it_;}
-    BOOST_CXX14_CONSTEXPR pointer  operator->() const {return  it_;}
-
-    BOOST_CXX14_CONSTEXPR input_iterator& operator++()    {++it_; return *this;}
-    BOOST_CXX14_CONSTEXPR input_iterator  operator++(int) {input_iterator tmp(*this); ++(*this); return tmp;}
-
-    BOOST_CXX14_CONSTEXPR friend bool operator==(const input_iterator& x, const input_iterator& y)
-        {return x.it_ == y.it_;}
-    BOOST_CXX14_CONSTEXPR friend bool operator!=(const input_iterator& x, const input_iterator& y)
-        {return !(x == y);}
-
-private:
-    It it_;
-    template <typename U> friend class input_iterator;
-};
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator==(const input_iterator<T>& x, const input_iterator<U>& y)
-{
-    return x.base() == y.base();
-}
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator!=(const input_iterator<T>& x, const input_iterator<U>& y)
-{
-    return !(x == y);
-}
-
-
-// == Forward Iterator ==
-template <typename It>
-class forward_iterator {
-public:
-    typedef          std::forward_iterator_tag                 iterator_category;
-    typedef typename std::iterator_traits<It>::value_type      value_type;
-    typedef typename std::iterator_traits<It>::difference_type difference_type;
-    typedef It                                                 pointer;
-    typedef typename std::iterator_traits<It>::reference       reference;
-
-    BOOST_CXX14_CONSTEXPR It base() const {return it_;}
-
-    BOOST_CXX14_CONSTEXPR forward_iterator() : it_() {}
-    BOOST_CXX14_CONSTEXPR explicit forward_iterator(It it) : it_(it) {}
-    template <typename U>
-    BOOST_CXX14_CONSTEXPR forward_iterator(const forward_iterator<U>& u) :it_(u.it_) {}
-
-    BOOST_CXX14_CONSTEXPR reference operator*() const {return *it_;}
-    BOOST_CXX14_CONSTEXPR pointer  operator->() const {return  it_;}
-
-    BOOST_CXX14_CONSTEXPR forward_iterator& operator++()    {++it_; return *this;}
-    BOOST_CXX14_CONSTEXPR forward_iterator  operator++(int) {forward_iterator tmp(*this); ++(*this); return tmp;}
-
-    BOOST_CXX14_CONSTEXPR friend bool operator==(const forward_iterator& x, const forward_iterator& y)
-        {return x.it_ == y.it_;}
-    BOOST_CXX14_CONSTEXPR friend bool operator!=(const forward_iterator& x, const forward_iterator& y)
-        {return !(x == y);}
-private:
-    It it_;
-
-    template <typename U> friend class forward_iterator;
-};
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator==(const forward_iterator<T>& x, const forward_iterator<U>& y)
-{
-    return x.base() == y.base();
-}
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator!=(const forward_iterator<T>& x, const forward_iterator<U>& y)
-{
-    return !(x == y);
-}
-
-// == Bidirectional Iterator ==
-template <typename It>
-class bidirectional_iterator
-{
-public:
-    typedef          std::bidirectional_iterator_tag           iterator_category;
-    typedef typename std::iterator_traits<It>::value_type      value_type;
-    typedef typename std::iterator_traits<It>::difference_type difference_type;
-    typedef It                                                 pointer;
-    typedef typename std::iterator_traits<It>::reference       reference;
-
-    BOOST_CXX14_CONSTEXPR It base() const {return it_;}
-
-    BOOST_CXX14_CONSTEXPR bidirectional_iterator() : it_() {}
-    BOOST_CXX14_CONSTEXPR explicit bidirectional_iterator(It it) : it_(it) {}
-    template <typename U>
-    BOOST_CXX14_CONSTEXPR bidirectional_iterator(const bidirectional_iterator<U>& u) :it_(u.it_) {}
-
-    BOOST_CXX14_CONSTEXPR reference operator*() const {return *it_;}
-    BOOST_CXX14_CONSTEXPR pointer  operator->() const {return  it_;}
-
-    BOOST_CXX14_CONSTEXPR bidirectional_iterator& operator++()    {++it_; return *this;}
-    BOOST_CXX14_CONSTEXPR bidirectional_iterator  operator++(int) {bidirectional_iterator tmp(*this); ++(*this); return tmp;}
-
-    BOOST_CXX14_CONSTEXPR bidirectional_iterator& operator--()    {--it_; return *this;}
-    BOOST_CXX14_CONSTEXPR bidirectional_iterator  operator--(int) {bidirectional_iterator tmp(*this); --(*this); return tmp;}
-private:
-    It it_;
-    template <typename U> friend class bidirectional_iterator;
-};
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator==(const bidirectional_iterator<T>& x, const bidirectional_iterator<U>& y)
-{
-    return x.base() == y.base();
-}
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator!=(const bidirectional_iterator<T>& x, const bidirectional_iterator<U>& y)
-{
-    return !(x == y);
-}
-
-
-// == Random Access Iterator ==
-template <typename It>
-class random_access_iterator {
-public:
-    typedef          std::random_access_iterator_tag           iterator_category;
-    typedef typename std::iterator_traits<It>::value_type      value_type;
-    typedef typename std::iterator_traits<It>::difference_type difference_type;
-    typedef It                                                 pointer;
-    typedef typename std::iterator_traits<It>::reference       reference;
-
-    BOOST_CXX14_CONSTEXPR It base() const {return it_;}
-
-    BOOST_CXX14_CONSTEXPR random_access_iterator() : it_() {}
-    BOOST_CXX14_CONSTEXPR explicit random_access_iterator(It it) : it_(it) {}
-    template <typename U>
-    BOOST_CXX14_CONSTEXPR random_access_iterator(const random_access_iterator<U>& u) :it_(u.it_) {}
-
-    BOOST_CXX14_CONSTEXPR reference operator*() const {return *it_;}
-    BOOST_CXX14_CONSTEXPR pointer  operator->() const {return  it_;}
-
-    BOOST_CXX14_CONSTEXPR random_access_iterator& operator++()    {++it_; return *this;}
-    BOOST_CXX14_CONSTEXPR random_access_iterator  operator++(int) {random_access_iterator tmp(*this); ++(*this); return tmp;}
-
-    BOOST_CXX14_CONSTEXPR random_access_iterator& operator--()    {--it_; return *this;}
-    BOOST_CXX14_CONSTEXPR random_access_iterator  operator--(int) {random_access_iterator tmp(*this); --(*this); return tmp;}
-
-    BOOST_CXX14_CONSTEXPR random_access_iterator& operator+=(difference_type n)       {it_ += n; return *this;}
-    BOOST_CXX14_CONSTEXPR random_access_iterator  operator+ (difference_type n) const {random_access_iterator tmp(*this); tmp += n; return tmp;}
-    BOOST_CXX14_CONSTEXPR friend random_access_iterator operator+(difference_type n, random_access_iterator x) {x += n; return x;}
-
-    BOOST_CXX14_CONSTEXPR random_access_iterator& operator-=(difference_type n)       {return *this += -n;}
-    BOOST_CXX14_CONSTEXPR random_access_iterator  operator- (difference_type n) const {random_access_iterator tmp(*this); tmp -= n; return tmp;}
-
-    BOOST_CXX14_CONSTEXPR reference operator[](difference_type n) const {return it_[n];}
-private:
-    It it_;
-
-    template <typename U> friend class random_access_iterator;
-};
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator==(const random_access_iterator<T>& x, const random_access_iterator<U>& y)
-{
-    return x.base() == y.base();
-}
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator!=(const random_access_iterator<T>& x, const random_access_iterator<U>& y)
-{
-    return !(x == y);
-}
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator<(const random_access_iterator<T>& x, const random_access_iterator<U>& y)
-{
-    return x.base() < y.base();
-}
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator<=(const random_access_iterator<T>& x, const random_access_iterator<U>& y)
-{
-    return !(y < x);
-}
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator>(const random_access_iterator<T>& x, const random_access_iterator<U>& y)
-{
-    return y < x;
-}
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline bool
-operator>=(const random_access_iterator<T>& x, const random_access_iterator<U>& y)
-{
-    return !(x < y);
-}
-
-template <typename T, typename U>
-BOOST_CXX14_CONSTEXPR inline typename std::iterator_traits<T>::difference_type
-operator-(const random_access_iterator<T>& x, const random_access_iterator<U>& y)
-{
-    return x.base() - y.base();
-}
-
-
-// == Output Iterator ==
-template <typename It>
-class output_iterator {
-public:
-    typedef          std::output_iterator_tag                  iterator_category;
-    typedef void                                               value_type;
-    typedef typename std::iterator_traits<It>::difference_type difference_type;
-    typedef It                                                 pointer;
-    typedef typename std::iterator_traits<It>::reference       reference;
-
-    BOOST_CXX14_CONSTEXPR It base() const {return it_;}
-
-    BOOST_CXX14_CONSTEXPR output_iterator () {}
-    BOOST_CXX14_CONSTEXPR explicit output_iterator(It it) : it_(it) {}
-
-    template <typename U>
-    BOOST_CXX14_CONSTEXPR output_iterator(const output_iterator<U>& u) :it_(u.it_) {}
-
-    BOOST_CXX14_CONSTEXPR reference operator*() const {return *it_;}
-
-    BOOST_CXX14_CONSTEXPR output_iterator& operator++()    {++it_; return *this;}
-    BOOST_CXX14_CONSTEXPR output_iterator  operator++(int) {output_iterator tmp(*this); ++(*this); return tmp;}
-
-private:
-    It it_;
-    template <typename U> friend class output_iterator;
-    };
-    
-//  No comparison operators for output iterators
-    
-
-// == Get the base of an iterator; used for comparisons ==
-template <typename Iter>
-BOOST_CXX14_CONSTEXPR inline Iter base(output_iterator<Iter> i) { return i.base(); }
-
-template <typename Iter>
-BOOST_CXX14_CONSTEXPR inline Iter base(input_iterator<Iter> i) { return i.base(); }
-
-template <typename Iter>
-BOOST_CXX14_CONSTEXPR inline Iter base(forward_iterator<Iter> i) { return i.base(); }
-
-template <typename Iter>
-BOOST_CXX14_CONSTEXPR inline Iter base(bidirectional_iterator<Iter> i) { return i.base(); }
-
-template <typename Iter>
-BOOST_CXX14_CONSTEXPR inline Iter base(random_access_iterator<Iter> i) { return i.base(); }
-
-template <typename Iter>    // everything else
-BOOST_CXX14_CONSTEXPR inline Iter base(Iter i) { return i; }
-
-#endif  // ITERATORS_H
diff --git a/third_party/boostorg/algorithm/test/mismatch_test.cpp b/third_party/boostorg/algorithm/test/mismatch_test.cpp
deleted file mode 100644
index 1c056c7..0000000
--- a/third_party/boostorg/algorithm/test/mismatch_test.cpp
+++ /dev/null
@@ -1,195 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2013.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx14/mismatch.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-template <typename T>
-BOOST_CXX14_CONSTEXPR bool eq ( const T& a, const T& b ) { return a == b; }
-
-template <typename T>
-BOOST_CXX14_CONSTEXPR bool never_eq ( const T&, const T& ) { return false; }
-
-namespace ba = boost::algorithm;
-
-template <typename Iter1, typename Iter2>
-BOOST_CXX14_CONSTEXPR bool iter_eq ( std::pair<Iter1, Iter2> pr, Iter1 first, Iter2 second ) {
-    return pr.first == first && pr.second == second;
-    }
-
-void test_mismatch ()
-{
-//  Note: The literal values here are tested against directly, careful if you change them:
-    BOOST_CXX14_CONSTEXPR int num[] = { 1, 1, 2, 3, 5 };
-    const int sz = sizeof (num)/sizeof(num[0]);
-    
-    
-//  No mismatch for empty sequences
-    BOOST_CHECK ( iter_eq ( 
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num), 
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num)),
-                input_iterator<const int *>(num), input_iterator<const int *>(num)));
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num), 
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num),
-                       never_eq<int> ),
-                input_iterator<const int *>(num), input_iterator<const int *>(num)));
-
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( random_access_iterator<const int *>(num),     random_access_iterator<const int *>(num), 
-                       random_access_iterator<const int *>(num),     random_access_iterator<const int *>(num),
-                       never_eq<int> ),
-                random_access_iterator<const int *>(num), random_access_iterator<const int *>(num)));
-                              
-//  Empty vs. non-empty mismatch immediately
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num), 
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + 1)),
-                input_iterator<const int *>(num),     input_iterator<const int *>(num)));
-
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num + 1), input_iterator<const int *>(num + 2), 
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num)),
-                input_iterator<const int *>(num + 1), input_iterator<const int *>(num)));
-                    
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( random_access_iterator<const int *>(num + 1), random_access_iterator<const int *>(num + 2), 
-                       random_access_iterator<const int *>(num),     random_access_iterator<const int *>(num)),
-                random_access_iterator<const int *>(num + 1), random_access_iterator<const int *>(num)));
-
-//  Single element sequences are equal if they contain the same value
-    BOOST_CHECK ( iter_eq ( 
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num + 1),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + 1)),
-                input_iterator<const int *>(num + 1), input_iterator<const int *>(num + 1)));
-                       
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num + 1),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + 1),
-                       eq<int> ),
-                input_iterator<const int *>(num + 1), input_iterator<const int *>(num + 1)));
-                    
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( random_access_iterator<const int *>(num),     random_access_iterator<const int *>(num + 1),
-                       random_access_iterator<const int *>(num),     random_access_iterator<const int *>(num + 1),
-                       eq<int> ),
-                random_access_iterator<const int *>(num + 1), random_access_iterator<const int *>(num + 1)));
-
-
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num + 1),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + 1),
-                       never_eq<int> ),
-               input_iterator<const int *>(num),     input_iterator<const int *>(num)));
-               
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( random_access_iterator<const int *>(num),     random_access_iterator<const int *>(num + 1),
-                       random_access_iterator<const int *>(num),     random_access_iterator<const int *>(num + 1),
-                       never_eq<int> ),
-               random_access_iterator<const int *>(num),     random_access_iterator<const int *>(num)));
-
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num + 1),
-                       input_iterator<const int *>(num + 1), input_iterator<const int *>(num + 2)),
-               input_iterator<const int *>(num + 1), input_iterator<const int *>(num + 2)));
-
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num + 1),
-                       input_iterator<const int *>(num + 1), input_iterator<const int *>(num + 2),
-                       eq<int> ),
-               input_iterator<const int *>(num + 1), input_iterator<const int *>(num + 2)));
-
-    BOOST_CHECK ( iter_eq (
-            ba::mismatch ( input_iterator<const int *>(num + 2), input_iterator<const int *>(num + 3), 
-                           input_iterator<const int *>(num),     input_iterator<const int *>(num + 1)),
-                           input_iterator<const int *>(num + 2), input_iterator<const int *>(num)));
-                           
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num + 2), input_iterator<const int *>(num + 3),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + 1),
-                       eq<int> ),
-               input_iterator<const int *>(num + 2), input_iterator<const int *>(num)));
-                       
-                              
-                              
-//  Identical long sequences are equal. 
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num + sz),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + sz)),
-            input_iterator<const int *>(num + sz), input_iterator<const int *>(num + sz)));
-
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num + sz),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + sz),
-                       eq<int> ),
-            input_iterator<const int *>(num + sz), input_iterator<const int *>(num + sz)));
-
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num + sz),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + sz),
-                       never_eq<int> ),
-            input_iterator<const int *>(num),     input_iterator<const int *>(num)));
-
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),             input_iterator<const int *>(num + sz),
-                       random_access_iterator<const int *>(num),     random_access_iterator<const int *>(num + sz),
-                       never_eq<int> ),
-            input_iterator<const int *>(num),     random_access_iterator<const int *>(num)));
-
-//  Different sequences are different
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num + 1), input_iterator<const int *>(num + sz),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + sz)),
-            input_iterator<const int *>(num + 2), input_iterator<const int *>(num + 1)));
-
-    BOOST_CHECK ( iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num + 1), input_iterator<const int *>(num + sz),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + sz),
-                       eq<int> ),
-            input_iterator<const int *>(num + 2), input_iterator<const int *>(num + 1)));
-            
-//  Checks constexpr
-    BOOST_CXX14_CONSTEXPR bool res = (
-//  No mismatch for empty
-        iter_eq ( 
-            ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num), 
-                           input_iterator<const int *>(num),     input_iterator<const int *>(num)),
-                input_iterator<const int *>(num), input_iterator<const int *>(num))
-//  Empty vs. non-empty mismatch immediately  
-        && iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num), 
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + 1)),
-                input_iterator<const int *>(num),     input_iterator<const int *>(num))
-//  Single element sequences are equal if they contain the same value                
-        && iter_eq ( 
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num + 1),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + 1),
-                       eq<int>),
-                input_iterator<const int *>(num + 1), input_iterator<const int *>(num + 1))
-//  Identical long sequences are equal.
-        && iter_eq (
-        ba::mismatch ( input_iterator<const int *>(num),     input_iterator<const int *>(num + sz),
-                       input_iterator<const int *>(num),     input_iterator<const int *>(num + sz),
-                       eq<int> ),
-                input_iterator<const int *>(num + sz), input_iterator<const int *>(num + sz))
-        );
-    
-    BOOST_CHECK ( res );
-}
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_mismatch ();
-}
diff --git a/third_party/boostorg/algorithm/test/none_of_test.cpp b/third_party/boostorg/algorithm/test/none_of_test.cpp
deleted file mode 100644
index 0a20ff4..0000000
--- a/third_party/boostorg/algorithm/test/none_of_test.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/none_of.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <functional>
-#include <vector>
-#include <list>
-
-template<typename T>
-struct is_ {
-    BOOST_CXX14_CONSTEXPR is_ ( T v ) : val_ ( v ) {}
-    BOOST_CXX14_CONSTEXPR bool operator () ( T comp ) const { return val_ == comp; }
-private:
-    is_ (); // need a value
-
-    T val_;
-    };
-
-namespace ba = boost::algorithm;
-
-void test_none()
-{
-//  Note: The literal values here are tested against directly, careful if you change them:
-    BOOST_CXX14_CONSTEXPR int some_numbers[] = { 1, 5, 0, 18, 1 };
-    std::vector<int> vi(some_numbers, some_numbers + 5);
-    std::list<int>   li(vi.begin(), vi.end ());
-    
-    int some_letters[] = { 'a', 'q', 'n', 'y', 'n' };
-    std::vector<char> vc(some_letters, some_letters + 5);
-    
-    BOOST_CHECK ( ba::none_of_equal ( vi,                                  100 ));
-    BOOST_CHECK ( ba::none_of       ( vi,                       is_<int> ( 100 )));
-    BOOST_CHECK ( ba::none_of_equal ( vi.begin(),     vi.end(),            100 ));
-    BOOST_CHECK ( ba::none_of       ( vi.begin(),     vi.end(), is_<int> ( 100 )));
-
-    BOOST_CHECK (!ba::none_of_equal ( vi,                                    1 ));
-    BOOST_CHECK (!ba::none_of       ( vi,                       is_<int> (   1 )));
-    BOOST_CHECK (!ba::none_of_equal ( vi.begin(),     vi.end(),              1 ));
-    BOOST_CHECK (!ba::none_of       ( vi.begin(),     vi.end(), is_<int> (   1 )));
-
-    BOOST_CHECK ( ba::none_of_equal ( vi.end(),       vi.end(),              0 ));
-    BOOST_CHECK ( ba::none_of       ( vi.end(),       vi.end(), is_<int> (   0 )));
-
-//   5 is not in { 0, 18, 1 }, but 1 is
-    BOOST_CHECK ( ba::none_of_equal ( vi.begin() + 2, vi.end(),              5 ));
-    BOOST_CHECK ( ba::none_of       ( vi.begin() + 2, vi.end(), is_<int> (   5 )));
-    BOOST_CHECK (!ba::none_of_equal ( vi.begin() + 2, vi.end(),              1 ));
-    BOOST_CHECK (!ba::none_of       ( vi.begin() + 2, vi.end(), is_<int> (   1 )));
-
-//  18 is not in { 1, 5, 0 }, but 5 is
-    BOOST_CHECK ( ba::none_of_equal ( vi.begin(),     vi.begin() + 3,            18 ));
-    BOOST_CHECK ( ba::none_of       ( vi.begin(),     vi.begin() + 3, is_<int> ( 18 )));
-    BOOST_CHECK (!ba::none_of_equal ( vi.begin(),     vi.begin() + 3,             5 ));
-    BOOST_CHECK (!ba::none_of       ( vi.begin(),     vi.begin() + 3, is_<int> (  5 )));
-    
-    BOOST_CHECK ( ba::none_of_equal ( vc,             'z' ));
-    BOOST_CHECK ( ba::none_of       ( vc, is_<char> ( 'z' )));
-
-    BOOST_CHECK (!ba::none_of_equal ( vc,             'a' ));
-    BOOST_CHECK (!ba::none_of       ( vc, is_<char> ( 'a' )));
-
-    BOOST_CHECK (!ba::none_of_equal ( vc,             'n' ));
-    BOOST_CHECK (!ba::none_of       ( vc, is_<char> ( 'n' )));
-
-    BOOST_CHECK ( ba::none_of_equal ( vi.begin(), vi.begin(),   1 ));
-    BOOST_CHECK ( ba::none_of_equal ( vc.begin(), vc.begin(), 'a' ));
-    BOOST_CHECK ( ba::none_of       ( vi.begin(), vi.begin(), is_<int>  (   1 )));
-    BOOST_CHECK ( ba::none_of       ( vc.begin(), vc.begin(), is_<char> ( 'a' )));
-
-    BOOST_CHECK ( ba::none_of_equal ( li,                                  100 ));
-    BOOST_CHECK ( ba::none_of       ( li,                       is_<int> ( 100 )));
-    BOOST_CHECK ( ba::none_of_equal ( li.begin(),     li.end(),            100 ));
-    BOOST_CHECK ( ba::none_of       ( li.begin(),     li.end(), is_<int> ( 100 )));
-    
-    std::list<int>::iterator l_iter = li.begin ();
-    l_iter++; l_iter++; l_iter++;
-    BOOST_CHECK ( ba::none_of_equal ( li.begin(), l_iter,            18 ));
-    BOOST_CHECK ( ba::none_of       ( li.begin(), l_iter, is_<int> ( 18 )));
-    BOOST_CHECK (!ba::none_of       ( li.begin(), l_iter, is_<int> (  5 )));
-
-    BOOST_CXX14_CONSTEXPR bool constexpr_res =
-        !ba::none_of_equal ( some_numbers, 1 )                                 &&
-        !ba::none_of       ( some_numbers, is_<int> ( 1 ))                     &&
-         ba::none_of_equal ( some_numbers, some_numbers + 3,            100 )  &&
-         ba::none_of       ( some_numbers, some_numbers + 3, is_<int> ( 100 )) &&
-        true;
-
-    BOOST_CHECK ( constexpr_res );
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_none();
-}
diff --git a/third_party/boostorg/algorithm/test/one_of_test.cpp b/third_party/boostorg/algorithm/test/one_of_test.cpp
deleted file mode 100644
index 8403a8e..0000000
--- a/third_party/boostorg/algorithm/test/one_of_test.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2008-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/one_of.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <functional>
-#include <vector>
-#include <list>
-
-template<typename T>
-struct is_ {
-    BOOST_CXX14_CONSTEXPR is_ ( T v ) : val_ ( v ) {}
-    BOOST_CXX14_CONSTEXPR bool operator () ( T comp ) const { return val_ == comp; }
-private:
-    is_ (); // need a value
-
-    T val_;
-    };
-
-namespace ba = boost::algorithm;
-
-void test_one ()
-{
-//  Note: The literal values here are tested against directly, careful if you change them:
-    BOOST_CXX14_CONSTEXPR int some_numbers[] = { 1, 1, 2, 3, 5 };
-    std::vector<int> vi(some_numbers, some_numbers + 5);
-    std::list<int>   li(vi.begin(), vi.end ());
-    
-    int some_letters[] = { 'a', 'q', 'n', 'y', 'n' };
-    std::vector<char> vc(some_letters, some_letters + 5);
-    
-    BOOST_CHECK (!ba::one_of_equal ( vi,                                  1 ));
-    BOOST_CHECK (!ba::one_of       ( vi,                       is_<int> ( 1 )));
-    BOOST_CHECK (!ba::one_of_equal ( vi.begin(),     vi.end(),            1 ));
-    BOOST_CHECK (!ba::one_of       ( vi.begin(),     vi.end(), is_<int> ( 1 )));
-
-    BOOST_CHECK (!ba::one_of_equal ( vi,                                  0 ));
-    BOOST_CHECK (!ba::one_of       ( vi,                       is_<int> ( 0 )));
-    BOOST_CHECK (!ba::one_of_equal ( vi.begin(),     vi.end(),            0 ));
-    BOOST_CHECK (!ba::one_of       ( vi.begin(),     vi.end(), is_<int> ( 0 )));
-    
-    BOOST_CHECK ( ba::one_of_equal ( vi,                                  2 ));
-    BOOST_CHECK ( ba::one_of       ( vi,                       is_<int> ( 2 )));
-    BOOST_CHECK ( ba::one_of_equal ( vi.begin(),     vi.end(),            2 ));
-    BOOST_CHECK ( ba::one_of       ( vi.begin(),     vi.end(), is_<int> ( 2 )));
-
-//  Check for a match at the end
-    BOOST_CHECK ( ba::one_of_equal ( vi,                                  5 ));
-    BOOST_CHECK ( ba::one_of       ( vi,                       is_<int> ( 5 )));
-    BOOST_CHECK ( ba::one_of_equal ( vi.begin(),     vi.end(),            5 ));
-    BOOST_CHECK ( ba::one_of       ( vi.begin(),     vi.end(), is_<int> ( 5 )));
-
-    BOOST_CHECK ( ba::one_of_equal ( vi.begin() + 1, vi.end(),            1 ));
-    BOOST_CHECK ( ba::one_of       ( vi.begin() + 1, vi.end(), is_<int> ( 1 )));
-    
-    BOOST_CHECK ( ba::one_of_equal ( vc.begin() + 1, vc.begin() + 2,             'q' ));
-    BOOST_CHECK ( ba::one_of       ( vc.begin() + 1, vc.begin() + 2, is_<char> ( 'q' )));
-
-    BOOST_CHECK (!ba::one_of_equal ( vc, '!' ));
-    BOOST_CHECK (!ba::one_of       ( vc, is_<char> ( '!' )));
-    
-    BOOST_CHECK (!ba::one_of_equal ( vc, 'n' ));
-    BOOST_CHECK (!ba::one_of       ( vc, is_<char> ( 'n' )));
-
-//  Empty range check
-    BOOST_CHECK (!ba::one_of_equal ( vi.begin(), vi.begin(),   1 ));
-    BOOST_CHECK (!ba::one_of_equal ( vc.begin(), vc.begin(), 'a' ));
-    BOOST_CHECK (!ba::one_of       ( vi.begin(), vi.begin(), is_<int>  (   1 )));
-    BOOST_CHECK (!ba::one_of       ( vc.begin(), vc.begin(), is_<char> ( 'a' )));
-    
-    BOOST_CHECK (!ba::one_of_equal ( li,                                  1 ));
-    BOOST_CHECK (!ba::one_of       ( li,                       is_<int> ( 1 )));
-    BOOST_CHECK (!ba::one_of_equal ( li.begin(),     li.end(),            1 ));
-    BOOST_CHECK (!ba::one_of       ( li.begin(),     li.end(), is_<int> ( 1 )));
-    
-    std::list<int>::iterator l_iter = li.begin ();
-    l_iter++; l_iter++; l_iter++;
-    BOOST_CHECK (!ba::one_of_equal ( li.begin(), l_iter,            1 ));
-    BOOST_CHECK (!ba::one_of       ( li.begin(), l_iter, is_<int> ( 1 )));
-    BOOST_CHECK ( ba::one_of_equal ( li.begin(), l_iter,            2 ));
-    BOOST_CHECK ( ba::one_of       ( li.begin(), l_iter, is_<int> ( 2 )));
-    BOOST_CHECK (!ba::one_of_equal ( li.begin(), l_iter,            3 ));
-    BOOST_CHECK (!ba::one_of       ( li.begin(), l_iter, is_<int> ( 3 )));
-    
-    BOOST_CXX14_CONSTEXPR bool constexpr_res =
-       !ba::one_of ( some_numbers, is_<int> ( 6 ))                       &&
-        ba::one_of ( some_numbers + 1, some_numbers + 3, is_<int> ( 1 )) &&
-       true;
-
-    BOOST_CHECK ( constexpr_res );
-}
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_one ();
-}
diff --git a/third_party/boostorg/algorithm/test/ordered_test.cpp b/third_party/boostorg/algorithm/test/ordered_test.cpp
deleted file mode 100644
index f2cbdd7..0000000
--- a/third_party/boostorg/algorithm/test/ordered_test.cpp
+++ /dev/null
@@ -1,174 +0,0 @@
-//  Copyright (c) 2010 Nuovation System Designs, LLC
-//    Grant Erickson <gerickson@nuovations.com>
-//
-//  Reworked by Marshall Clow; August 2010
-//
-//  Distributed under the Boost Software License, Version 1.0. (See
-//  accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt)
-//
-//  See http://www.boost.org/ for latest version.
-
-#include <algorithm>
-#include <iostream>
-
-#include <boost/algorithm/cxx11/is_sorted.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-using namespace boost;
-
-/* Preprocessor Defines */
-
-#define elementsof(v)   (sizeof (v) / sizeof (v[0]))
-#define a_begin(v)      (&v[0])
-#define a_end(v)        (v + elementsof (v))
-#define a_range(v)      v
-#define b_e(v)          a_begin(v),a_end(v)
-
-namespace ba = boost::algorithm;
-
-BOOST_CXX14_CONSTEXPR bool less( int x, int y ) { return x < y; }
-
-static void
-test_ordered(void)
-{
-    BOOST_CXX14_CONSTEXPR const int strictlyIncreasingValues[] = { 1, 2, 3, 4, 5 };
-    BOOST_CXX14_CONSTEXPR const int randomValues[] = { 3, 6, 1, 2, 7 };
-    const int constantValues[] = { 1, 2, 2, 2, 5 };
-          int nonConstantArray[] = { 1, 2, 2, 2, 5 };
-    const int inOrderUntilTheEnd [] = { 0, 1, 2, 3, 4, 5, 6, 7, 6 };
-
-//  Begin/end checks
-    BOOST_CHECK (  ba::is_sorted (b_e(strictlyIncreasingValues)));
-    BOOST_CHECK ( !ba::is_sorted (b_e(randomValues)));
-    BOOST_CHECK (  ba::is_sorted (b_e(strictlyIncreasingValues), std::less<int>()));
-    BOOST_CHECK ( !ba::is_sorted (b_e(strictlyIncreasingValues), std::greater<int>()));
-
-//  Range checks
-    BOOST_CHECK (  ba::is_sorted (a_range(strictlyIncreasingValues)));
-    BOOST_CHECK ( !ba::is_sorted (a_range(randomValues)));
-    BOOST_CHECK (  ba::is_sorted (a_range(strictlyIncreasingValues), std::less<int>()));
-    BOOST_CHECK ( !ba::is_sorted (a_range(strictlyIncreasingValues), std::greater<int>()));
-
-    BOOST_CHECK (  ba::is_sorted_until ( b_e(strictlyIncreasingValues))                       ==      a_end(strictlyIncreasingValues));
-    BOOST_CHECK (  ba::is_sorted_until ( b_e(strictlyIncreasingValues),     std::less<int>()) ==      a_end(strictlyIncreasingValues));
-    BOOST_CHECK (  ba::is_sorted_until ( a_range(strictlyIncreasingValues))                   == boost::end(strictlyIncreasingValues));
-    BOOST_CHECK (  ba::is_sorted_until ( a_range(strictlyIncreasingValues), std::less<int>()) == boost::end(strictlyIncreasingValues));
-
-//  Check for const and non-const arrays
-    BOOST_CHECK ( ba::is_sorted_until ( b_e(constantValues),       std::less<int>()) ==      a_end(constantValues));
-    BOOST_CHECK ( ba::is_sorted_until ( a_range(constantValues),   std::less<int>()) == boost::end(constantValues));
-    BOOST_CHECK ( ba::is_sorted_until ( b_e(nonConstantArray),     std::less<int>()) ==      a_end(nonConstantArray));
-    BOOST_CHECK ( ba::is_sorted_until ( a_range(nonConstantArray), std::less<int>()) == boost::end(nonConstantArray));
-
-    BOOST_CHECK ( ba::is_sorted_until ( b_e(randomValues),     std::less<int>()) == &randomValues[2] );
-    BOOST_CHECK ( ba::is_sorted_until ( b_e(randomValues))                       == &randomValues[2] );
-    BOOST_CHECK ( ba::is_sorted_until ( a_range(randomValues), std::less<int>()) == &randomValues[2] );
-    BOOST_CHECK ( ba::is_sorted_until ( a_range(randomValues))                   == &randomValues[2] );
-
-    BOOST_CHECK ( ba::is_sorted_until ( a_range(inOrderUntilTheEnd), std::less<int>()) == &inOrderUntilTheEnd[8] );
-    BOOST_CHECK ( ba::is_sorted_until ( a_range(inOrderUntilTheEnd))                   == &inOrderUntilTheEnd[8] );
-
-//  For zero and one element collections, the comparison predicate should never be called
-    BOOST_CHECK ( ba::is_sorted_until ( a_begin(randomValues), a_begin(randomValues),     std::equal_to<int>()) == a_begin(randomValues));
-    BOOST_CHECK ( ba::is_sorted_until ( a_begin(randomValues), a_begin(randomValues))                           == a_begin(randomValues));
-    BOOST_CHECK ( ba::is_sorted_until ( a_begin(randomValues), a_begin(randomValues) + 1, std::equal_to<int>()) == a_begin(randomValues) + 1);
-    BOOST_CHECK ( ba::is_sorted_until ( a_begin(randomValues), a_begin(randomValues) + 1 )                      == a_begin(randomValues) + 1);
-
-    BOOST_CXX14_CONSTEXPR bool constexpr_res = (
-        ba::is_sorted ( boost::begin(strictlyIncreasingValues), boost::end(strictlyIncreasingValues) )
-        && !ba::is_sorted (a_range(randomValues))
-        && ba::is_sorted_until ( boost::begin(strictlyIncreasingValues), boost::end(strictlyIncreasingValues), less) == a_end(strictlyIncreasingValues)
-        && ba::is_sorted_until ( randomValues, less)                                                                 == &randomValues[2]
-    );
-    BOOST_CHECK ( constexpr_res );
-}
-
-
-static void
-test_increasing_decreasing(void)
-{
-    BOOST_CXX14_CONSTEXPR const int strictlyIncreasingValues[] = { 1, 2, 3, 4, 5 };
-    BOOST_CXX14_CONSTEXPR const int strictlyDecreasingValues[] = { 9, 8, 7, 6, 5 };
-    BOOST_CXX14_CONSTEXPR const int increasingValues[] = { 1, 2, 2, 2, 5 };
-    BOOST_CXX14_CONSTEXPR const int decreasingValues[] = { 9, 7, 7, 7, 5 };
-    BOOST_CXX14_CONSTEXPR const int randomValues[] = { 3, 6, 1, 2, 7 };
-    BOOST_CXX14_CONSTEXPR const int constantValues[] = { 7, 7, 7, 7, 7 };
-
-    // Test a strictly increasing sequence
-    BOOST_CHECK (  ba::is_strictly_increasing (b_e(strictlyIncreasingValues)));
-    BOOST_CHECK (  ba::is_increasing          (b_e(strictlyIncreasingValues)));
-    BOOST_CHECK ( !ba::is_strictly_decreasing (b_e(strictlyIncreasingValues)));
-    BOOST_CHECK ( !ba::is_decreasing          (b_e(strictlyIncreasingValues)));
-
-    BOOST_CHECK (  ba::is_strictly_increasing (a_range(strictlyIncreasingValues)));
-    BOOST_CHECK (  ba::is_increasing          (a_range(strictlyIncreasingValues)));
-    BOOST_CHECK ( !ba::is_strictly_decreasing (a_range(strictlyIncreasingValues)));
-    BOOST_CHECK ( !ba::is_decreasing          (a_range(strictlyIncreasingValues)));
-
-    // Test a strictly decreasing sequence
-    BOOST_CHECK ( !ba::is_strictly_increasing (b_e(strictlyDecreasingValues)));
-    BOOST_CHECK ( !ba::is_increasing          (b_e(strictlyDecreasingValues)));
-    BOOST_CHECK (  ba::is_strictly_decreasing (b_e(strictlyDecreasingValues)));
-    BOOST_CHECK (  ba::is_decreasing          (b_e(strictlyDecreasingValues)));
-
-    // Test an increasing sequence
-    BOOST_CHECK ( !ba::is_strictly_increasing (b_e(increasingValues)));
-    BOOST_CHECK (  ba::is_increasing          (b_e(increasingValues)));
-    BOOST_CHECK ( !ba::is_strictly_decreasing (b_e(increasingValues)));
-    BOOST_CHECK ( !ba::is_decreasing          (b_e(increasingValues)));
-
-    // Test a decreasing sequence
-    BOOST_CHECK ( !ba::is_strictly_increasing (b_e(decreasingValues)));
-    BOOST_CHECK ( !ba::is_increasing          (b_e(decreasingValues)));
-    BOOST_CHECK ( !ba::is_strictly_decreasing (b_e(decreasingValues)));
-    BOOST_CHECK (  ba::is_decreasing          (b_e(decreasingValues)));
-
-    // Test a random sequence
-    BOOST_CHECK ( !ba::is_strictly_increasing (b_e(randomValues)));
-    BOOST_CHECK ( !ba::is_increasing          (b_e(randomValues)));
-    BOOST_CHECK ( !ba::is_strictly_decreasing (b_e(randomValues)));
-    BOOST_CHECK ( !ba::is_decreasing          (b_e(randomValues)));
-
-    // Test a constant sequence
-    BOOST_CHECK ( !ba::is_strictly_increasing (b_e(constantValues)));
-    BOOST_CHECK (  ba::is_increasing          (b_e(constantValues)));
-    BOOST_CHECK ( !ba::is_strictly_decreasing (b_e(constantValues)));
-    BOOST_CHECK (  ba::is_decreasing          (b_e(constantValues)));
-    
-    // Test an empty sequence
-    BOOST_CHECK (  ba::is_strictly_increasing (strictlyIncreasingValues, strictlyIncreasingValues));
-    BOOST_CHECK (  ba::is_increasing          (strictlyIncreasingValues, strictlyIncreasingValues));
-    BOOST_CHECK (  ba::is_strictly_decreasing (strictlyIncreasingValues, strictlyIncreasingValues));
-    BOOST_CHECK (  ba::is_decreasing          (strictlyIncreasingValues, strictlyIncreasingValues));
-    
-    // Test a one-element sequence
-    BOOST_CHECK (  ba::is_strictly_increasing (strictlyIncreasingValues, strictlyIncreasingValues+1));
-    BOOST_CHECK (  ba::is_increasing          (strictlyIncreasingValues, strictlyIncreasingValues+1));
-    BOOST_CHECK (  ba::is_strictly_decreasing (strictlyIncreasingValues, strictlyIncreasingValues+1));
-    BOOST_CHECK (  ba::is_decreasing          (strictlyIncreasingValues, strictlyIncreasingValues+1));
-
-    // Test a two-element sequence
-    BOOST_CHECK (  ba::is_strictly_increasing (strictlyIncreasingValues, strictlyIncreasingValues+2));
-    BOOST_CHECK (  ba::is_increasing          (strictlyIncreasingValues, strictlyIncreasingValues+2));
-    BOOST_CHECK ( !ba::is_strictly_decreasing (strictlyIncreasingValues, strictlyIncreasingValues+2));
-    BOOST_CHECK ( !ba::is_decreasing          (strictlyIncreasingValues, strictlyIncreasingValues+2));
-    
-    BOOST_CXX14_CONSTEXPR bool constexpr_res = (
-            ba::is_increasing           (boost::begin(increasingValues),         boost::end(increasingValues))
-        &&  ba::is_decreasing           (boost::begin(decreasingValues),         boost::end(decreasingValues))
-        &&  ba::is_strictly_increasing  (boost::begin(strictlyIncreasingValues), boost::end(strictlyIncreasingValues))
-        &&  ba::is_strictly_decreasing  (boost::begin(strictlyDecreasingValues), boost::end(strictlyDecreasingValues))
-        && !ba::is_strictly_increasing  (boost::begin(increasingValues),         boost::end(increasingValues))
-        && !ba::is_strictly_decreasing  (boost::begin(decreasingValues),         boost::end(decreasingValues))
-    );
-    BOOST_CHECK ( constexpr_res );
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    test_ordered ();
-    test_increasing_decreasing ();
-}
diff --git a/third_party/boostorg/algorithm/test/partition_copy_test1.cpp b/third_party/boostorg/algorithm/test/partition_copy_test1.cpp
deleted file mode 100644
index 4499f40..0000000
--- a/third_party/boostorg/algorithm/test/partition_copy_test1.cpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <iostream>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/partition_copy.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <boost/algorithm/cxx11/all_of.hpp>
-#include <boost/algorithm/cxx11/none_of.hpp>
-#include <string>
-#include <vector>
-#include <list>
-
-namespace ba = boost::algorithm;
-// namespace ba = boost;
-
-template <typename Container, typename Predicate>
-void test_sequence ( const Container &c, Predicate comp ) {
-    std::vector<typename Container::value_type> v1, v2;
-    
-    v1.clear (); v2.clear ();
-    ba::partition_copy ( c.begin (), c.end (), 
-                std::back_inserter (v1), std::back_inserter (v2), comp );
-//  std::cout << "Sizes(1): " << c.size () << " -> { " << v1.size () << ", " << v2.size () << " }" << std::endl;
-    BOOST_CHECK ( v1.size () + v2.size () == c.size ());
-    BOOST_CHECK ( ba::all_of  ( v1.begin (), v1.end (), comp ));
-    BOOST_CHECK ( ba::none_of ( v2.begin (), v2.end (), comp ));
-
-    v1.clear (); v2.clear ();
-    ba::partition_copy ( c, std::back_inserter (v1), std::back_inserter ( v2 ), comp );
-//  std::cout << "Sizes(2): " << c.size () << " -> { " << v1.size () << ", " << v2.size () << " }" << std::endl;
-    BOOST_CHECK ( v1.size () + v2.size () == c.size ());
-    BOOST_CHECK ( ba::all_of  ( v1, comp ));
-    BOOST_CHECK ( ba::none_of ( v2, comp ));
-    }
-
-template <typename T>
-struct less_than {
-public:
-    BOOST_CXX14_CONSTEXPR less_than ( T foo ) : val ( foo ) {}
-    BOOST_CXX14_CONSTEXPR less_than ( const less_than &rhs ) : val ( rhs.val ) {}
-
-    BOOST_CXX14_CONSTEXPR bool operator () ( const T &v ) const { return v < val; }
-private:
-    less_than ();
-    less_than operator = ( const less_than &rhs );
-    T val;
-    };
-
-bool is_even ( int v ) { return v % 2 == 0; }
-
-void test_sequence1 () {
-    std::vector<int> v;
-    
-    v.clear ();
-    for ( int i = 5; i < 15; ++i )
-        v.push_back ( i );
-    test_sequence ( v, less_than<int>(3));      // no elements
-    test_sequence ( v, less_than<int>(6));      // only the first element
-    test_sequence ( v, less_than<int>(10));
-    test_sequence ( v, less_than<int>(99));     // all elements satisfy 
-
-//  With bidirectional iterators.
-    std::list<int> l;
-    for ( int i = 5; i < 16; ++i )
-        l.push_back ( i );
-    test_sequence ( l, less_than<int>(3));      // no elements
-    test_sequence ( l, less_than<int>(6));      // only the first element
-    test_sequence ( l, less_than<int>(10));
-    test_sequence ( l, less_than<int>(99));     // all elements satisfy 
-
-    }
-
-    
-BOOST_CXX14_CONSTEXPR bool test_constexpr () {
-    int in[] = {1, 1, 2};
-    int out_true[3] = {0};
-    int out_false[3] = {0};
-    bool res = true;
-    ba::partition_copy( in, in + 3, out_true, out_false, less_than<int>(2) );
-    res = (res && ba::all_of(out_true, out_true + 2, less_than<int>(2)) );
-    res = (res && ba::none_of(out_false, out_false + 1, less_than<int>(2)) );
-    
-// clear elements
-    out_true [0] = 0;
-    out_true [1] = 0;
-    out_false[0] = 0;
-    
-    ba::partition_copy( in, out_true, out_false, less_than<int>(2));
-    res = ( res && ba::all_of(out_true, out_true + 2, less_than<int>(2)));
-    res = ( res && ba::none_of(out_false, out_false + 1, less_than<int>(2)));
-    return res;
-    }
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_sequence1 ();
-  BOOST_CXX14_CONSTEXPR bool constexpr_res = test_constexpr ();
-  BOOST_CHECK ( constexpr_res );
-}
diff --git a/third_party/boostorg/algorithm/test/partition_point_test1.cpp b/third_party/boostorg/algorithm/test/partition_point_test1.cpp
deleted file mode 100644
index 37d517d..0000000
--- a/third_party/boostorg/algorithm/test/partition_point_test1.cpp
+++ /dev/null
@@ -1,100 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2011-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <iostream>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/partition_point.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <string>
-#include <vector>
-#include <list>
-
-namespace ba = boost::algorithm;
-// namespace ba = boost;
-
-template <typename Container>
-typename Container::iterator offset_to_iter ( Container &v, int offset ) {
-    typename Container::iterator retval;
-    
-    if ( offset >= 0 ) {
-        retval = v.begin ();
-        std::advance ( retval, offset );
-        }
-    else {
-        retval = v.end ();
-        std::advance ( retval, offset + 1 );
-        }
-    return retval;
-    }
-
-template <typename Container, typename Predicate>
-void test_sequence ( Container &v, Predicate comp, int expected ) {
-    typename Container::iterator res, exp;
-    
-    res = ba::partition_point ( v.begin (), v.end (), comp );
-    exp = offset_to_iter ( v, expected );
-    BOOST_CHECK ( exp == res );
-
-//  Duplicate the last element; this checks for any even/odd problems
-    v.push_back ( * v.rbegin ());
-    res = ba::partition_point ( v.begin (), v.end (), comp );
-    exp = offset_to_iter ( v, expected );
-    BOOST_CHECK ( exp == res );
-
-//  Range based test
-    res = ba::partition_point ( v, comp );
-    exp = offset_to_iter ( v, expected );
-    BOOST_CHECK ( exp == res );
-    }
-
-template <typename T>
-struct less_than {
-public:
-    less_than ( T foo ) : val ( foo ) {}
-    less_than ( const less_than &rhs ) : val ( rhs.val ) {}
-
-    bool operator () ( const T &v ) const { return v < val; }
-private:
-    less_than ();
-    less_than operator = ( const less_than &rhs );
-    T val;
-    };
-
-
-void test_sequence1 () {
-    std::vector<int> v;
-    
-    v.clear ();
-    for ( int i = 5; i < 15; ++i )
-        v.push_back ( i );
-    test_sequence ( v, less_than<int>(3),  0 ); // no elements
-    test_sequence ( v, less_than<int>(6),  1 );    // only the first element
-    test_sequence ( v, less_than<int>(10), 5 );
-    test_sequence ( v, less_than<int>(99), -1 );   // all elements satisfy 
-
-//  With bidirectional iterators.
-    std::list<int> l;
-    for ( int i = 5; i < 15; ++i )
-        l.push_back ( i );
-    test_sequence ( l, less_than<int>(3),  0 ); // no elements
-    test_sequence ( l, less_than<int>(6),  1 );    // only the first element
-    test_sequence ( l, less_than<int>(10), 5 );
-    test_sequence ( l, less_than<int>(99), -1 );   // all elements satisfy 
-
-    }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_sequence1 ();
-}
diff --git a/third_party/boostorg/algorithm/test/partition_subrange_test.cpp b/third_party/boostorg/algorithm/test/partition_subrange_test.cpp
deleted file mode 100644
index 3604fba..0000000
--- a/third_party/boostorg/algorithm/test/partition_subrange_test.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
-#include <boost/config.hpp>
-#include <boost/algorithm/sort_subrange.hpp>
-#include <boost/algorithm/cxx11/is_sorted.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <vector>
-#include <iostream>
-
-#if (__cplusplus >= 201103L) || defined(BOOST_NO_CXX98_RANDOM_SHUFFLE)
-#include <random>
-
-std::default_random_engine gen;
-template<typename RandomIt>
-void do_shuffle(RandomIt first, RandomIt last)
-{ std::shuffle(first, last, gen); }
-#else
-template<typename RandomIt>
-void do_shuffle(RandomIt first, RandomIt last)
-{ std::random_shuffle(first, last); }
-#endif
-
-namespace ba = boost::algorithm;
-
-template <typename Iter>
-void check_sequence ( Iter first, Iter last, Iter sf, Iter sl )
-{
-// 	for (Iter i = first; i < last; ++i) {
-// 		if (i != first) std::cout << ' ';
-// 		if (i == sf) std::cout << ">";
-// 		std::cout << *i;
-// 		if (i == sl) std::cout << "<";
-// 		}
-// 	if (sl == last) std::cout << "<";
-// 	std::cout << '\n';
-
-	if (sf == sl) return;
-	for (Iter i = first; i < sf; ++i)
-		BOOST_CHECK(*i < *sf);
-	for (Iter i = sf; i < sl; ++i) {
-		if (first != sf) // if there is an element before the subrange
-			BOOST_CHECK(*i > *(sf-1));
-		if (last != sl) // if there is an element after the subrange
-			BOOST_CHECK(*i < *sl);
-		}
-	for (Iter i = sl; i < last; ++i)
-		BOOST_CHECK(*(sl-1) < *i);
-}
-
-template <typename Iter, typename Pred>
-void check_sequence ( Iter first, Iter last, Iter sf, Iter sl, Pred p )
-{
-	if (sf == sl) return;
-	for (Iter i = first; i < sf; ++i)
-		BOOST_CHECK(p(*i, *sf));
-	for (Iter i = sf; i < sl; ++i) {
-		if (first != sf) // if there is an element before the subrange
-			BOOST_CHECK(p(*(sf-1), *i));
-		if (last != sl) // if there is an element after the subrange
-			BOOST_CHECK(p(*i, *sl));
-		}
-	for (Iter i = sl; i < last; ++i)
-		BOOST_CHECK(p(*(sl-1), *i));
-
-}
-
-// 	for ( int i = 0; i < v.size(); ++i )
-// 		std::cout << v[i] << ' ';
-// 	std::cout << std::endl;
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-	{
-	std::vector<int> v;
-	for ( int i = 0; i < 10; ++i )
-		v.push_back(i);
-
-	const std::vector<int>::iterator b = v.begin();
-	ba::partition_subrange(b, v.end(), b + 3, b + 6);
-	check_sequence        (b, v.end(), b + 3, b + 6);
-
-// 	BOOST_CHECK_EQUAL(v[3], 3);
-// 	BOOST_CHECK_EQUAL(v[4], 4);
-// 	BOOST_CHECK_EQUAL(v[5], 5);
-
-//	Mix them up and try again - single element
-	do_shuffle(v.begin(), v.end());
-	ba::partition_subrange(b, v.end(), b + 7, b + 8);
-	check_sequence        (b, v.end(), b + 7, b + 8);
-
-// 	BOOST_CHECK_EQUAL(v[7], 7);
-
-//	Mix them up and try again - at the end
-	do_shuffle(v.begin(), v.end());
-	ba::partition_subrange(b, v.end(), b + 7, v.end());
-	check_sequence        (b, v.end(), b + 7, v.end());
-
-// 	BOOST_CHECK_EQUAL(v[7], 7);
-// 	BOOST_CHECK_EQUAL(v[8], 8);
-// 	BOOST_CHECK_EQUAL(v[9], 9);
-
-//	Mix them up and try again - at the beginning
-	do_shuffle(v.begin(), v.end());
-	ba::partition_subrange(b, v.end(), b, b + 2);
-	check_sequence        (b, v.end(), b, b + 2);
-
-// 	BOOST_CHECK_EQUAL(v[0], 0);
-// 	BOOST_CHECK_EQUAL(v[1], 1);
-
-//	Mix them up and try again - empty subrange
-	do_shuffle(v.begin(), v.end());
-	ba::partition_subrange(b, v.end(), b, b);
-	check_sequence        (b, v.end(), b, b);
-
-//	Mix them up and try again - entire subrange
-	do_shuffle(v.begin(), v.end());
-	ba::partition_subrange(b, v.end(), b, v.end());
-	check_sequence        (b, v.end(), b, v.end());
-	}
-
-	{
-	std::vector<int> v;
-	for ( int i = 0; i < 10; ++i )
-		v.push_back(i);
-
-	const std::vector<int>::iterator b = v.begin();
-	ba::partition_subrange(b, v.end(), b + 3, b + 6, std::greater<int>());
-	check_sequence        (b, v.end(), b + 3, b + 6, std::greater<int>());
-
-// 	BOOST_CHECK_EQUAL(v[3], 6);
-// 	BOOST_CHECK_EQUAL(v[4], 5);
-// 	BOOST_CHECK_EQUAL(v[5], 4);
-
-//	Mix them up and try again - single element
-	do_shuffle(v.begin(), v.end());
-	ba::partition_subrange(b, v.end(), b + 7, b + 8, std::greater<int>());
-	check_sequence        (b, v.end(), b + 7, b + 8, std::greater<int>());
-
-// 	BOOST_CHECK_EQUAL(v[7], 2);
-
-//	Mix them up and try again - at the end
-	do_shuffle(v.begin(), v.end());
-	ba::partition_subrange(b, v.end(), b + 7, v.end(), std::greater<int>());
-	check_sequence        (b, v.end(), b + 7, v.end(), std::greater<int>());
-
-// 	BOOST_CHECK_EQUAL(v[7], 2);
-// 	BOOST_CHECK_EQUAL(v[8], 1);
-// 	BOOST_CHECK_EQUAL(v[9], 0);
-
-//	Mix them up and try again - at the beginning
-	do_shuffle(v.begin(), v.end());
-	ba::partition_subrange(b, v.end(), b, b + 2, std::greater<int>());
-	check_sequence        (b, v.end(), b, b + 2, std::greater<int>());
-
-// 	BOOST_CHECK_EQUAL(v[0], 9);
-// 	BOOST_CHECK_EQUAL(v[1], 8);
-
-//	Mix them up and try again - empty subrange
-	do_shuffle(v.begin(), v.end());
-	ba::partition_subrange(b, v.end(), b, b, std::greater<int>());
-	check_sequence        (b, v.end(), b, b, std::greater<int>());
-
-//	Mix them up and try again - entire subrange
-	do_shuffle(v.begin(), v.end());
-	ba::partition_subrange(b, v.end(), b, v.end(), std::greater<int>());
-	check_sequence        (b, v.end(), b, v.end(), std::greater<int>());
-	}
-}
diff --git a/third_party/boostorg/algorithm/test/power_fail1.cpp b/third_party/boostorg/algorithm/test/power_fail1.cpp
deleted file mode 100644
index 6a3bf5f..0000000
--- a/third_party/boostorg/algorithm/test/power_fail1.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2014.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <iostream>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/algorithm.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-//  Second argument must be an integral value
-    BOOST_CHECK ( ba::power(1, 1.0) == 1);
-}
diff --git a/third_party/boostorg/algorithm/test/power_test.cpp b/third_party/boostorg/algorithm/test/power_test.cpp
deleted file mode 100644
index f4372e8..0000000
--- a/third_party/boostorg/algorithm/test/power_test.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2014.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <iostream>
-#include <functional>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/algorithm.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-void test_power ()
-{
-    BOOST_CHECK ( ba::power(0, 0) == 1);
-    BOOST_CHECK ( ba::power(5, 0) == 1);
-    BOOST_CHECK ( ba::power(1, 1) == 1);
-    BOOST_CHECK ( ba::power(1, 4) == 1);
-    BOOST_CHECK ( ba::power(3, 2) == 9);
-    BOOST_CHECK ( ba::power(2, 3) == 8);
-    BOOST_CHECK ( ba::power(3, 3) == 27);
-    BOOST_CHECK ( ba::power(2, 30) == 0x40000000);
-    BOOST_CHECK ( ba::power(5L, 10) == 3125*3125);
-    BOOST_CHECK ( ba::power(18, 3) == 18*18*18);
-    
-    BOOST_CHECK ( ba::power(3,2) == ba::power(3,2, std::multiplies<int>()));
-    BOOST_CHECK ( ba::power(3,2, std::plus<int>()) == 6);
-}
-
-
-void test_power_constexpr ()
-{
-    BOOST_CXX14_CONSTEXPR bool check_zero_power1 =
-        ba::power(0, 0) == 1;
-    BOOST_CHECK(check_zero_power1);
-    BOOST_CXX14_CONSTEXPR bool check_zero_power2 =
-        ba::power(5, 0) == 1;
-    BOOST_CHECK(check_zero_power2);
-    BOOST_CXX14_CONSTEXPR bool check_one_base1 =
-        ba::power(1, 1) == 1;
-    BOOST_CHECK(check_one_base1);
-    BOOST_CXX14_CONSTEXPR bool check_one_base2 =
-        ba::power(1, 4) == 1;
-    BOOST_CHECK(check_one_base2);
-    BOOST_CXX14_CONSTEXPR bool check_power1 = 
-        ba::power(3, 2) == 9;
-    BOOST_CHECK(check_power1);
-    BOOST_CXX14_CONSTEXPR bool check_power2 = 
-        ba::power(2, 3) == 8;
-    BOOST_CHECK(check_power2);
-    BOOST_CXX14_CONSTEXPR bool check_power3 = 
-        ba::power(3, 3) == 27;
-    BOOST_CHECK(check_power3);
-    BOOST_CXX14_CONSTEXPR bool check_power4 = 
-        ba::power(2, 30) == 0x40000000;
-    BOOST_CHECK(check_power4);
-    BOOST_CXX14_CONSTEXPR bool check_power5 = 
-        ba::power(5L, 10) == 3125*3125;
-    BOOST_CHECK(check_power5);
-    BOOST_CXX14_CONSTEXPR bool check_power6 = 
-        ba::power(18, 3) == 18*18*18;
-    BOOST_CHECK(check_power6);
-    
-    BOOST_CXX14_CONSTEXPR bool check_multiple = 
-        ba::power(3, 2, std::multiplies<int>()) == ba::power(3, 2);
-    BOOST_CHECK(check_multiple);
-    BOOST_CXX14_CONSTEXPR bool check_plus = 
-        ba::power(3, 2, std::plus<int>()) == 6;
-    BOOST_CHECK(check_plus);
-}
-
-
-BOOST_AUTO_TEST_CASE( test_main ) {
-  test_power ();
-  test_power_constexpr ();
-}
diff --git a/third_party/boostorg/algorithm/test/reduce_test.cpp b/third_party/boostorg/algorithm/test/reduce_test.cpp
deleted file mode 100644
index ec47db2..0000000
--- a/third_party/boostorg/algorithm/test/reduce_test.cpp
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2013.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <vector>
-#include <functional>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx17/reduce.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-template <class Iter, class T, class Op>
-void
-test_reduce(Iter first, Iter last, T init, Op op, T x)
-{
-    BOOST_CHECK(ba::reduce(first, last, init, op) == x);
-}
-
-template <class Iter, class T, class Op>
-void
-test_reduce(Iter first, Iter last, Op op, T x)
-{
-    BOOST_CHECK(ba::reduce(first, last, op) == x);
-}
-
-template <class Iter, class T>
-void
-test_reduce(Iter first, Iter last, T x)
-{
-    BOOST_CHECK(ba::reduce(first, last) == x);
-}
-
-template <class Iter>
-void
-test_init_op()
-{
-    int ia[] = {1, 2, 3, 4, 5, 6};
-    unsigned sa = sizeof(ia) / sizeof(ia[0]);
-    test_reduce(Iter(ia), Iter(ia), 0, std::plus<int>(), 0);
-    test_reduce(Iter(ia), Iter(ia), 1, std::multiplies<int>(), 1);
-    test_reduce(Iter(ia), Iter(ia+1), 0, std::plus<int>(), 1);
-    test_reduce(Iter(ia), Iter(ia+1), 2, std::multiplies<int>(), 2);
-    test_reduce(Iter(ia), Iter(ia+2), 0, std::plus<int>(), 3);
-    test_reduce(Iter(ia), Iter(ia+2), 3, std::multiplies<int>(), 6);
-    test_reduce(Iter(ia), Iter(ia+sa), 0, std::plus<int>(), 21);
-    test_reduce(Iter(ia), Iter(ia+sa), 4, std::multiplies<int>(), 2880);
-}
-
-void test_reduce_init_op()
-{
-    test_init_op<input_iterator<const int*> >();
-    test_init_op<forward_iterator<const int*> >();
-    test_init_op<bidirectional_iterator<const int*> >();
-    test_init_op<random_access_iterator<const int*> >();
-    test_init_op<const int*>();
-
-    {
-    char ia[] = {1, 2, 3, 4, 5, 6, 7, 8};
-    unsigned sa = sizeof(ia) / sizeof(ia[0]);
-    unsigned res = boost::algorithm::reduce(ia, ia+sa, 1U, std::multiplies<unsigned>());
-    BOOST_CHECK(res == 40320);		// 8! will not fit into a char
-    }
-}
-
-template <class Iter>
-void
-test_init()
-{
-    int ia[] = {1, 2, 3, 4, 5, 6};
-    unsigned sa = sizeof(ia) / sizeof(ia[0]);
-    test_reduce(Iter(ia), Iter(ia), 0, 0);
-    test_reduce(Iter(ia), Iter(ia), 1, 1);
-    test_reduce(Iter(ia), Iter(ia+1), 0, 1);
-    test_reduce(Iter(ia), Iter(ia+1), 2, 3);
-    test_reduce(Iter(ia), Iter(ia+2), 0, 3);
-    test_reduce(Iter(ia), Iter(ia+2), 3, 6);
-    test_reduce(Iter(ia), Iter(ia+sa), 0, 21);
-    test_reduce(Iter(ia), Iter(ia+sa), 4, 25);
-}
-
-void test_reduce_init()
-{
-    test_init<input_iterator<const int*> >();
-    test_init<forward_iterator<const int*> >();
-    test_init<bidirectional_iterator<const int*> >();
-    test_init<random_access_iterator<const int*> >();
-    test_init<const int*>();
-}
-
-
-template <class Iter>
-void
-test()
-{
-    int ia[] = {1, 2, 3, 4, 5, 6};
-    unsigned sa = sizeof(ia) / sizeof(ia[0]);
-    test_reduce(Iter(ia), Iter(ia), 0);
-    test_reduce(Iter(ia), Iter(ia+1), 1);
-    test_reduce(Iter(ia), Iter(ia+2), 3);
-    test_reduce(Iter(ia), Iter(ia+sa), 21);
-}
-
-void test_reduce()
-{
-    test<input_iterator<const int*> >();
-    test<forward_iterator<const int*> >();
-    test<bidirectional_iterator<const int*> >();
-    test<random_access_iterator<const int*> >();
-    test<const int*>();
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_reduce();
-  test_reduce_init();
-  test_reduce_init_op();
-}
diff --git a/third_party/boostorg/algorithm/test/search_fail1.cpp b/third_party/boostorg/algorithm/test/search_fail1.cpp
deleted file mode 100644
index 2b9f6f7..0000000
--- a/third_party/boostorg/algorithm/test/search_fail1.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <vector>
-#include <boost/algorithm/searching/boyer_moore.hpp>
-
-int main( int , char* [] )
-{
-    std::vector<char>   cv;
-    std::vector<int>    iv;
-    
-//  Should fail to compile because the underlying types are different
-//  They are (almost certainly) different sizes
-    (void) boost::algorithm::boyer_moore_search (
-        cv.begin (), cv.end (), iv.begin (), iv.end ());
-
-   return 0;
-}
diff --git a/third_party/boostorg/algorithm/test/search_fail2.cpp b/third_party/boostorg/algorithm/test/search_fail2.cpp
deleted file mode 100644
index 463d242..0000000
--- a/third_party/boostorg/algorithm/test/search_fail2.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <vector>
-#include <boost/cstdint.hpp>
-#include <boost/algorithm/searching/boyer_moore.hpp>
-
-int main( int , char* [] )
-{
-    std::vector<boost::uint8_t> cv;
-    std::vector<boost:: int8_t> iv;
-    
-//  Should fail to compile because the underlying types are different
-//  They are the same size, but one is signed, and the other is not.
-    (void) boost::algorithm::boyer_moore_search (
-        cv.begin (), cv.end (), iv.begin (), iv.end ());
-
-   return 0;
-}
diff --git a/third_party/boostorg/algorithm/test/search_fail3.cpp b/third_party/boostorg/algorithm/test/search_fail3.cpp
deleted file mode 100644
index caf8335..0000000
--- a/third_party/boostorg/algorithm/test/search_fail3.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <vector>
-#include <boost/algorithm/searching/boyer_moore.hpp>
-
-int main( int , char* [] )
-{
-//  Should fail to compile because the search objects are not default-constructible
-    boost::algorithm::boyer_moore<std::vector<char>::iterator> bm;
-   
-   return 0;
-}
diff --git a/third_party/boostorg/algorithm/test/search_test1.cpp b/third_party/boostorg/algorithm/test/search_test1.cpp
deleted file mode 100644
index 3fe3b91..0000000
--- a/third_party/boostorg/algorithm/test/search_test1.cpp
+++ /dev/null
@@ -1,285 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/algorithm/searching/boyer_moore.hpp>
-#include <boost/algorithm/searching/boyer_moore_horspool.hpp>
-#include <boost/algorithm/searching/knuth_morris_pratt.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <iostream>
-#include <string>
-#include <vector>
-
-
-namespace ba = boost::algorithm;
-
-template <typename Iter>
-std::string make_str ( Iter first, std::size_t len ) {
-    std::string retVal ( len + 2, '\'' );
-    std::copy ( first, first+len, retVal.begin () + 1);
-    return retVal;
-    }
-
-namespace {
-
-//  Check using iterators
-    template<typename Container>
-    void check_one_iter ( const Container &haystack, const std::string &needle, int expected ) {
-        typedef typename Container::const_iterator iter_type;
-        typedef typename std::pair<iter_type, iter_type> ret_type;
-        typedef std::string::const_iterator pattern_type;
-
-        iter_type hBeg = haystack.begin ();
-        iter_type hEnd = haystack.end ();
-        pattern_type nBeg = needle.begin ();
-        pattern_type nEnd = needle.end ();
-
-//      iter_type ret0  = std::search                     (hBeg, hEnd, nBeg, nEnd);
-        ret_type ret1  = ba::boyer_moore_search          (hBeg, hEnd, nBeg, nEnd);
-        ret_type ret1r = ba::boyer_moore_search          (haystack, nBeg, nEnd);
-        ret_type ret2  = ba::boyer_moore_horspool_search (hBeg, hEnd, nBeg, nEnd);
-        ret_type ret3  = ba::knuth_morris_pratt_search   (hBeg, hEnd, nBeg, nEnd);
-
-        iter_type it0  = std::search                     (hBeg, hEnd, nBeg, nEnd);
-//         iter_type it1  = ret1.first;
-//         iter_type it1r = ret1r.first;
-//         iter_type it2  = ret2.first;
-//         iter_type it3  = ret3.first;
-        const int dist = ret1.first == hEnd ? -1 : std::distance ( hBeg, ret1.first );
-
-        std::cout << "(Iterators) Pattern is " << needle.length () << ", haysstack is " << haystack.length () << " chars long; " << std::endl;
-        try {
-            if ( it0 != ret1.first ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between std::search and boyer-moore search" ));
-                }
-
-            if ( ret1.first != ret1r.first || ret1.second != ret1r.second ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between iterator and range boyer_moore search" ));
-                }
-
-            if ( ret1.first != ret2.first || ret1.second != ret2.second ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between boyer-moore and boyer-moore-horspool search" ));
-                }
-
-            if ( ret1.first != ret3.first || ret1.second != ret3.second ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between boyer-moore and knuth-morris-pratt search" ));
-                }
-
-            }
-
-        catch ( ... ) {
-            std::cout << "Searching for: " << needle << std::endl;
-            std::cout << "Expected: " << expected << "\n";
-            std::cout << "  std:    " << std::distance ( hBeg, it0 ) << "\n";
-            std::cout << "  bm:     " << std::distance ( hBeg, ret1.first ) << "\n";
-            std::cout << "  bm(r):  " << std::distance ( hBeg, ret1r.first ) << "\n";
-            std::cout << "  bmh:    " << std::distance ( hBeg, ret2.first ) << "\n";
-            std::cout << "  kpm:    " << std::distance ( hBeg, ret3.first )<< "\n";
-            std::cout << std::flush;
-            throw ;
-            }
-
-        BOOST_CHECK_EQUAL ( dist, expected );
-        }
-
-//  Check using pointers
-//    We're assuming that the container implements contiguous storage here.
-    template<typename Container>
-    void check_one_pointer ( const Container &haystack, const std::string &needle, int expected ) {
-        typedef const typename Container::value_type *ptr_type;
-        typedef typename std::pair<ptr_type, ptr_type> ret_type;
-
-        ptr_type hBeg = haystack.size () == 0 ? NULL : &*haystack.begin ();
-        ptr_type hEnd = hBeg + haystack.size ();
-        ptr_type nBeg = needle.size () == 0 ? NULL : &*needle.begin ();
-        ptr_type nEnd = nBeg + needle.size ();
-
-        ptr_type it0  = std::search                     (hBeg, hEnd, nBeg, nEnd);
-        ret_type ret1 = ba::boyer_moore_search          (hBeg, hEnd, nBeg, nEnd);
-        ret_type ret2 = ba::boyer_moore_horspool_search (hBeg, hEnd, nBeg, nEnd);
-        ret_type ret3 = ba::knuth_morris_pratt_search   (hBeg, hEnd, nBeg, nEnd);
-        const int dist = ret1.first == hEnd ? -1 : std::distance ( hBeg, ret1.first );
-
-        std::cout << "(Pointers) Pattern is " << needle.length () << ", haysstack is " << haystack.length () << " chars long; " << std::endl;
-        try {
-            if ( it0 != ret1.first ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between std::search and boyer-moore search" ));
-                }
-
-            if ( ret1.first != ret2.first || ret1.second != ret2.second ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between boyer-moore and boyer-moore-horspool search" ));
-                }
-
-            if ( ret1.first != ret3.first || ret1.second != ret3.second ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between boyer-moore and knuth-morris-pratt search" ));
-                }
-
-            }
-
-        catch ( ... ) {
-            std::cout << "Searching for: " << needle << std::endl;
-            std::cout << "Expected: " << expected << "\n";
-            std::cout << "  std:    " << std::distance ( hBeg, it0 ) << "\n";
-            std::cout << "  bm:     " << std::distance ( hBeg, ret1.first ) << "\n";
-            std::cout << "  bmh:    " << std::distance ( hBeg, ret2.first ) << "\n";
-            std::cout << "  kpm:    " << std::distance ( hBeg, ret3.first )<< "\n";
-            std::cout << std::flush;
-            throw ;
-            }
-
-        BOOST_CHECK_EQUAL ( dist, expected );
-        }
-
-//  Check using objects
-    template<typename Container>
-    void check_one_object ( const Container &haystack, const std::string &needle, int expected ) {
-        typedef typename Container::const_iterator iter_type;
-        typedef typename std::pair<iter_type, iter_type> ret_type;
-        typedef std::string::const_iterator pattern_type;
-
-        iter_type hBeg = haystack.begin ();
-        iter_type hEnd = haystack.end ();
-        pattern_type nBeg = needle.begin ();
-        pattern_type nEnd = needle.end ();
-
-        ba::boyer_moore<pattern_type>          bm_r  = ba::make_boyer_moore ( needle );
-        ba::boyer_moore<pattern_type>          bm    ( nBeg, nEnd );
-        ba::boyer_moore_horspool<pattern_type> bmh   ( nBeg, nEnd );
-        ba::knuth_morris_pratt<pattern_type>   kmp   ( nBeg, nEnd );
-        
-        iter_type it0   = std::search  (hBeg, hEnd, nBeg, nEnd);
-        ret_type ret1   = bm           (hBeg, hEnd);
-        ret_type ret1r  = bm           (haystack);
-        ret_type retr1  = bm_r         (hBeg, hEnd);
-       ret_type retr1r = bm_r         (haystack);
-        ret_type ret2   = bmh          (hBeg, hEnd);
-        ret_type ret3   = kmp          (hBeg, hEnd);
-        const int dist  = ret1.first == hEnd ? -1 : std::distance ( hBeg, ret1.first );
-
-        std::cout << "(Objects) Pattern is " << needle.length () << ", haysstack is " << haystack.length () << " chars long; " << std::endl;
-        try {
-            if ( it0 != ret1.first ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between std::search and boyer-moore search" ));
-                }
-
-            if ( ret1.first != ret1r.first || ret1.second != ret1r.second ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between iterator and range boyer_moore search(1)" ));
-                }
-
-            if ( ret1.first != retr1.first || ret1.second != retr1.second ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between iterator and range boyer_moore search(2)" ));
-                }
-
-            if ( ret1.first != retr1r.first || ret1.second != retr1r.second ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between iterator and range boyer_moore search(3)" ));
-                }
-
-            if ( ret1.first != ret2.first || ret1.second != ret2.second ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between boyer-moore and boyer-moore-horspool search" ));
-                }
-
-            if ( ret1.first != ret3.first || ret1.second != ret3.second ) {
-                throw std::runtime_error ( 
-                    std::string ( "results mismatch between boyer-moore and knuth-morris-pratt search" ));
-                }
-
-            }
-
-        catch ( ... ) {
-            std::cout << "Searching for: " << needle << std::endl;
-            std::cout << "Expected:  " << expected << "\n";
-            std::cout << "  std:     " << std::distance ( hBeg, it0 ) << "\n";
-            std::cout << "  bm:      " << std::distance ( hBeg, ret1.first ) << "\n";
-            std::cout << "  bm(r1):  " << std::distance ( hBeg, ret1r.first ) << "\n";
-            std::cout << "  bm(r2):  " << std::distance ( hBeg, retr1.first ) << "\n";
-            std::cout << "  bm(r3):  " << std::distance ( hBeg, retr1r.first ) << "\n";
-            std::cout << "  bmh:     " << std::distance ( hBeg, ret2.first ) << "\n";
-            std::cout << "  kpm:    " << std::distance ( hBeg, ret3.first )<< "\n";
-            std::cout << std::flush;
-            throw ;
-            }
-
-        BOOST_CHECK_EQUAL ( dist, expected );
-        }
-
-
-    template<typename Container>
-    void check_one ( const Container &haystack, const std::string &needle, int expected ) {
-        check_one_iter ( haystack, needle, expected );
-        check_one_pointer ( haystack, needle, expected );
-        check_one_object ( haystack, needle, expected );
-        }
-    }
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    std::string haystack1 ( "NOW AN FOWE\220ER ANNMAN THE ANPANMANEND" );
-    std::string needle1   ( "ANPANMAN" );
-    std::string needle2   ( "MAN THE" );
-    std::string needle3   ( "WE\220ER" );
-    std::string needle4   ( "NOW " );   //  At the beginning
-    std::string needle5   ( "NEND" );   //  At the end
-    std::string needle6   ( "NOT FOUND" );  // Nowhere
-    std::string needle7   ( "NOT FO\340ND" );   // Nowhere
-
-    std::string haystack2 ( "ABC ABCDAB ABCDABCDABDE" );
-    std::string needle11  ( "ABCDABD" );
-
-    std::string haystack3 ( "abra abracad abracadabra" );
-    std::string needle12  ( "abracadabra" );
-
-    std::string needle13   ( "" );
-    std::string haystack4  ( "" );
-
-    check_one ( haystack1, needle1, 26 );
-    check_one ( haystack1, needle2, 18 );
-    check_one ( haystack1, needle3,  9 );
-    check_one ( haystack1, needle4,  0 );
-    check_one ( haystack1, needle5, 33 );
-    check_one ( haystack1, needle6, -1 );
-    check_one ( haystack1, needle7, -1 );
-
-    check_one ( needle1, haystack1, -1 );   // cant find long pattern in short corpus
-    check_one ( haystack1, haystack1, 0 );  // find something in itself
-    check_one ( haystack2, haystack2, 0 );  // find something in itself
-
-    check_one ( haystack2, needle11, 15 );
-    check_one ( haystack3, needle12, 13 );
-
-    check_one ( haystack1, needle13, 0 );   // find the empty string 
-    check_one ( haystack4, needle1, -1 );  // can't find in an empty haystack
-
-//  Mikhail Levin <svarneticist@gmail.com> found a problem, and this was the test
-//  that triggered it.
-
-  const std::string mikhail_pattern =   
-"GATACACCTACCTTCACCAGTTACTCTATGCACTAGGTGCGCCAGGCCCATGCACAAGGGCTTGAGTGGATGGGAAGGA"
-"TGTGCCCTAGTGATGGCAGCATAAGCTACGCAGAGAAGTTCCAGGGCAGAGTCACCATGACCAGGGACACATCCACGAG"
-"CACAGCCTACATGGAGCTGAGCAGCCTGAGATCTGAAGACACGGCCATGTATTACTGTGGGAGAGATGTCTGGAGTGGT"
-"TATTATTGCCCCGGTAATATTACTACTACTACTACTACATGGACGTCTGGGGCAAAGGGACCACG"
-;
-    const std::string mikhail_corpus = std::string (8, 'a') + mikhail_pattern;
-
-    check_one ( mikhail_corpus, mikhail_pattern, 8 );
-    }
diff --git a/third_party/boostorg/algorithm/test/search_test2.cpp b/third_party/boostorg/algorithm/test/search_test2.cpp
deleted file mode 100644
index eba105e..0000000
--- a/third_party/boostorg/algorithm/test/search_test2.cpp
+++ /dev/null
@@ -1,153 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/algorithm/searching/boyer_moore.hpp>
-#include <boost/algorithm/searching/boyer_moore_horspool.hpp>
-#include <boost/algorithm/searching/knuth_morris_pratt.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <ctime>        // for clock_t
-#include <iostream>
-#include <fstream>
-#include <iomanip>
-#include <algorithm>
-#include <vector>
-
-typedef std::vector<char> vec;
-#define NUM_TRIES   100
-
-#define runOne(call, refDiff)   { \
-    std::clock_t bTime, eTime;                              \
-    bTime = std::clock ();                                  \
-    for ( i = 0; i < NUM_TRIES; ++i ) {                     \
-        res = boost::algorithm::call                        \
-            ( haystack.begin (), haystack.end (),           \
-                        needle.begin (), needle.end ());    \
-        if ( res != exp ) {                                 \
-            std::cout << "On run # " << i << " expected "   \
-                << exp.first - haystack.begin () << " got "       \
-                << res.first - haystack.begin () << std::endl;    \
-            throw std::runtime_error                        \
-                ( "Unexpected result from " #call );        \
-            }                                               \
-        }                                                   \
-    eTime = std::clock ();                                  \
-    printRes ( #call, eTime - bTime, refDiff ); }
-
-#define runObject(obj, refDiff) { \
-    std::clock_t bTime, eTime;                              \
-    bTime = std::clock ();                                  \
-    boost::algorithm::obj <vec::const_iterator>             \
-                s_o ( needle.begin (), needle.end ());      \
-    for ( i = 0; i < NUM_TRIES; ++i ) {                     \
-        res = s_o ( haystack.begin (), haystack.end ());    \
-        if ( res != exp ) {                                 \
-            std::cout << "On run # " << i << " expected "   \
-            << exp.first - haystack.begin () << " got "           \
-            << res.first - haystack.begin () << std::endl;        \
-            throw std::runtime_error                        \
-            ( "Unexpected result from " #obj " object" );   \
-            }                                               \
-        }                                                   \
-    eTime = std::clock ();                                  \
-    printRes ( #obj " object", eTime - bTime, refDiff ); }
-    
-
-
-namespace {
-
-    vec ReadFromFile ( const char *name ) {
-        std::ifstream in ( name, std::ios_base::binary | std::ios_base::in );
-        vec retVal;
-        std::istream_iterator<char, char> begin(in);
-        std::istream_iterator<char, char> end;
-        
-        std::copy ( begin, end, std::back_inserter ( retVal ));
-        return retVal;
-        }
-    
-    void printRes ( const char *prompt, unsigned long diff, unsigned long stdDiff ) {
-        std::cout 
-            << std::setw(34) << prompt << " "
-            << std::setw(6)  << (  1.0 * diff) / CLOCKS_PER_SEC << " seconds\t"
-            << std::setw(5)  << (100.0 * diff) / stdDiff << "% \t" 
-            << std::setw(12) << diff;
-        if ( diff > stdDiff ) 
-            std::cout << " !!";
-        std::cout << std::endl;
-        }
-    
-    void check_one ( const vec &haystack, const vec &needle, int expected ) {
-        std::size_t i;
-        std::clock_t sTime;
-        unsigned long stdDiff;
-        
-        std::pair<vec::const_iterator, vec::const_iterator> res;
-        std::pair<vec::const_iterator, vec::const_iterator> exp;        // the expected result
-        vec::const_iterator exp_start;
-        
-        if ( expected >= 0 )
-            exp_start = haystack.begin () + expected;
-        else if ( expected == -1 )
-            exp_start = haystack.end ();      // we didn't find it!
-        else if ( expected == -2 )
-            exp_start = std::search ( haystack.begin (), haystack.end (), needle.begin (), needle.end ());
-        else    
-            throw std::logic_error ( "Expected must be -2, -1, or >= 0" );
-
-		if ( expected == -1 )
-			exp = std::make_pair(haystack.end(), haystack.end());
-		else
-			exp = std::make_pair(exp_start, exp_start + needle.size());
-			
-        std::cout << "Pattern is " << needle.size ()   << " entries long" << std::endl;
-        std::cout << "Corpus  is " << haystack.size () << " entries long" << std::endl;
-
-    //  First, the std library search
-        sTime = std::clock ();
-        for ( i = 0; i < NUM_TRIES; ++i ) {
-            vec::const_iterator s_res = std::search ( haystack.begin (), haystack.end (), needle.begin (), needle.end ());
-            if ( s_res != exp.first ) {
-                std::cout << "On run # " << i << " expected " << exp.first - haystack.begin () << " got " << s_res - haystack.begin () << std::endl;
-                throw std::runtime_error ( "Unexpected result from std::search" );
-                }
-            }
-        stdDiff = std::clock () - sTime;
-        printRes ( "std::search", stdDiff, stdDiff );
-
-        runOne    ( boyer_moore_search,          stdDiff );
-        runObject ( boyer_moore,                 stdDiff );
-        runOne    ( boyer_moore_horspool_search, stdDiff );
-        runObject ( boyer_moore_horspool,        stdDiff );
-        runOne    ( knuth_morris_pratt_search,   stdDiff );
-        runObject ( knuth_morris_pratt,          stdDiff );
-        }
-    }
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    vec c1  = ReadFromFile ( "search_test_data/0001.corpus" );
-    vec p1b = ReadFromFile ( "search_test_data/0001b.pat" );
-    vec p1e = ReadFromFile ( "search_test_data/0001e.pat" );
-    vec p1n = ReadFromFile ( "search_test_data/0001n.pat" );
-    vec p1f = ReadFromFile ( "search_test_data/0001f.pat" );
-    
-    std::cout << std::ios::fixed << std::setprecision(4);
-//  std::cout << "Corpus is " << c1.size () << " entries long\n";
-    std::cout << "--- Beginning ---" << std::endl;
-    check_one ( c1, p1b, 0 );       //  Find it at position zero
-    std::cout << "---- Middle -----" << std::endl;
-    check_one ( c1, p1f, -2 );      //  Don't know answer
-    std::cout << "------ End ------" << std::endl;
-    check_one ( c1, p1e, c1.size() - p1e.size ());  
-    std::cout << "--- Not found ---" << std::endl;
-    check_one ( c1, p1n, -1 );      //  Not found
-    }
diff --git a/third_party/boostorg/algorithm/test/search_test3.cpp b/third_party/boostorg/algorithm/test/search_test3.cpp
deleted file mode 100644
index e4c7661..0000000
--- a/third_party/boostorg/algorithm/test/search_test3.cpp
+++ /dev/null
@@ -1,153 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/algorithm/searching/boyer_moore.hpp>
-#include <boost/algorithm/searching/boyer_moore_horspool.hpp>
-#include <boost/algorithm/searching/knuth_morris_pratt.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <ctime>        // for clock_t
-#include <iostream>
-#include <fstream>
-#include <iomanip>
-#include <algorithm>
-#include <vector>
-#include <string>
-
-typedef std::vector<std::string> vec;
-#define NUM_TRIES   100
-
-#define runOne(call, refDiff)   { \
-    std::clock_t bTime, eTime;                              \
-    bTime = std::clock ();                                  \
-    for ( i = 0; i < NUM_TRIES; ++i ) {                     \
-        res = boost::algorithm::call                        \
-            ( haystack.begin (), haystack.end (),           \
-                        needle.begin (), needle.end ());    \
-        if ( res != exp ) {                                 \
-            std::cout << "On run # " << i << " expected "   \
-                << exp.first - haystack.begin () << " got "       \
-                << res.first - haystack.begin () << std::endl;    \
-            throw std::runtime_error                        \
-                ( "Unexpected result from " #call );        \
-            }                                               \
-        }                                                   \
-    eTime = std::clock ();                                  \
-    printRes ( #call, eTime - bTime, refDiff ); }
-    
-#define runObject(obj, refDiff) { \
-    std::clock_t bTime, eTime;                              \
-    bTime = std::clock ();                                  \
-    boost::algorithm::obj <vec::const_iterator>             \
-                s_o ( needle.begin (), needle.end ());      \
-    for ( i = 0; i < NUM_TRIES; ++i ) {                     \
-        res = s_o ( haystack.begin (), haystack.end ());    \
-        if ( res != exp ) {                                 \
-            std::cout << "On run # " << i << " expected "   \
-            << exp.first - haystack.begin () << " got "           \
-            << res.first - haystack.begin () << std::endl;        \
-            throw std::runtime_error                        \
-            ( "Unexpected result from " #obj " object" );   \
-            }                                               \
-        }                                                   \
-    eTime = std::clock ();                                  \
-    printRes ( #obj " object", eTime - bTime, refDiff ); }
-    
-
-namespace {
-
-    vec ReadFromFile ( const char *name ) {
-        std::ifstream in ( name, std::ios_base::binary | std::ios_base::in );
-        std::string temp;
-        vec retVal;
-        while ( std::getline ( in, temp ))
-            retVal.push_back ( temp );
-        
-        return retVal;
-        }
-    
-    void printRes ( const char *prompt, unsigned long diff, unsigned long stdDiff ) {
-        std::cout 
-            << std::setw(34) << prompt << " "
-            << std::setw(6)  << (  1.0 * diff) / CLOCKS_PER_SEC << " seconds\t"
-            << std::setw(5)  << (100.0 * diff) / stdDiff << "% \t" 
-            << std::setw(12) << diff;
-        if ( diff > stdDiff ) 
-            std::cout << " !!";
-        std::cout << std::endl;
-        }
-    
-    void check_one ( const vec &haystack, const vec &needle, int expected ) {
-        std::size_t i;
-        std::clock_t sTime;
-        unsigned long stdDiff;
-        
-        std::pair<vec::const_iterator, vec::const_iterator> res;
-        std::pair<vec::const_iterator, vec::const_iterator> exp;        // the expected result
-        vec::const_iterator exp_start;
-        
-        if ( expected >= 0 )
-            exp_start = haystack.begin () + expected;
-        else if ( expected == -1 )
-            exp_start = haystack.end ();      // we didn't find it1
-        else if ( expected == -2 )
-            exp_start = std::search ( haystack.begin (), haystack.end (), needle.begin (), needle.end ());
-        else    
-            throw std::logic_error ( "Expected must be -2, -1, or >= 0" );
-
-		if ( expected == -1 )
-			exp = std::make_pair(haystack.end(), haystack.end());
-		else
-			exp = std::make_pair(exp_start, exp_start + needle.size());
-
-        std::cout << "Pattern is " << needle.size ()   << " entries long" << std::endl;
-        std::cout << "Corpus  is " << haystack.size () << " entries long" << std::endl;
-
-    //  First, the std library search
-        sTime = std::clock ();
-        for ( i = 0; i < NUM_TRIES; ++i ) {
-            vec::const_iterator s_res = std::search ( haystack.begin (), haystack.end (), needle.begin (), needle.end ());
-            if ( s_res != exp.first ) {
-                std::cout << "On run # " << i << " expected " << exp.first - haystack.begin () << " got " << s_res - haystack.begin () << std::endl;
-                throw std::runtime_error ( "Unexpected result from std::search" );
-                }
-            }
-        stdDiff = std::clock () - sTime;
-        printRes ( "std::search", stdDiff, stdDiff );
-
-        runOne    ( boyer_moore_search,          stdDiff );
-        runObject ( boyer_moore,                 stdDiff );
-        runOne    ( boyer_moore_horspool_search, stdDiff );
-        runObject ( boyer_moore_horspool,        stdDiff );
-        runOne    ( knuth_morris_pratt_search,   stdDiff );
-        runObject ( knuth_morris_pratt,          stdDiff );
-        }
-    }
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    vec c1  = ReadFromFile ( "search_test_data/0001.corpus" );
-    vec p1b = ReadFromFile ( "search_test_data/0002b.pat" );
-    vec p1e = ReadFromFile ( "search_test_data/0002e.pat" );
-    vec p1n = ReadFromFile ( "search_test_data/0002n.pat" );
-    vec p1f = ReadFromFile ( "search_test_data/0002f.pat" );
-
-    std::cout << std::ios::fixed << std::setprecision(4);
-//  std::cout << "Corpus is " << c1.size () << " entries long\n";
-    std::cout << "--- Beginning ---" << std::endl;
-    check_one ( c1, p1b, 0 );       //  Find it at position zero
-    std::cout << "---- Middle -----" << std::endl;
-    check_one ( c1, p1f, -2 );      //  Don't know answer
-    std::cout << "------ End ------" << std::endl;
-    check_one ( c1, p1e, c1.size() - p1e.size ());  
-    std::cout << "--- Not found ---" << std::endl;
-    check_one ( c1, p1n, -1 );      //  Not found
-    }
diff --git a/third_party/boostorg/algorithm/test/search_test4.cpp b/third_party/boostorg/algorithm/test/search_test4.cpp
deleted file mode 100644
index 997e359..0000000
--- a/third_party/boostorg/algorithm/test/search_test4.cpp
+++ /dev/null
@@ -1,123 +0,0 @@
-/* 
-   Copyright (c) Marshall Clow 2010-2012.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-
-    Testing the range-based interfaces
-*/
-
-#include <boost/algorithm/searching/boyer_moore.hpp>
-#include <boost/algorithm/searching/boyer_moore_horspool.hpp>
-#include <boost/algorithm/searching/knuth_morris_pratt.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <iostream>
-#include <fstream>
-#include <iomanip>
-#include <algorithm>
-#include <vector>
-#include <string>
-
-typedef std::vector<std::string> vec;
-#define NUM_TRIES   100
-
-#define runOne(call, refDiff)   { \
-    res = boost::algorithm::call ( haystack, needle );  \
-    if ( res != exp ) {                                 \
-        std::cout << "Expected "                        \
-            << exp.first - haystack.begin () << " got "       \
-            << res.first - haystack.begin () << std::endl;    \
-        throw std::runtime_error                        \
-            ( "Unexpected result from " #call );        \
-        }                                               \
-    }
-    
-#define runObject(obj, refDiff) { \
-    boost::algorithm::obj <vec::const_iterator> s_o =   \
-      boost::algorithm::make_##obj  ( needle );         \
-    res = s_o ( haystack );                             \
-    if ( res != exp ) {                                 \
-        std::cout << "Expected "                        \
-        << exp.first - haystack.begin () << " got "           \
-        << res.first - haystack.begin () << std::endl;        \
-        throw std::runtime_error                        \
-        ( "Unexpected result from " #obj " object" );   \
-        }                                               \
-    }
-
-namespace {
-
-    vec ReadFromFile ( const char *name ) {
-        std::ifstream in ( name, std::ios_base::binary | std::ios_base::in );
-        std::string temp;
-        vec retVal;
-        while ( std::getline ( in, temp ))
-            retVal.push_back ( temp );
-        
-        return retVal;
-        }
-    
-    void check_one ( const vec &haystack, const vec &needle, int expected ) {
-        
-        std::pair<vec::const_iterator, vec::const_iterator> res;
-        std::pair<vec::const_iterator, vec::const_iterator> exp;        // the expected result
-        vec::const_iterator exp_start;
-        
-        if ( expected >= 0 )
-            exp_start = haystack.begin () + expected;
-        else if ( expected == -1 )
-            exp_start = haystack.end ();      // we didn't find it1
-        else if ( expected == -2 )
-            exp_start = std::search ( haystack.begin (), haystack.end (), needle.begin (), needle.end ());
-        else    
-            throw std::logic_error ( "Expected must be -2, -1, or >= 0" );
-
-		if ( expected == -1 )
-			exp = std::make_pair(haystack.end(), haystack.end());
-		else
-			exp = std::make_pair(exp_start, exp_start + needle.size());
-
-        std::cout << "Pattern is " << needle.size ()   << " entries long" << std::endl;
-        std::cout << "Corpus  is " << haystack.size () << " entries long" << std::endl;
-
-    //  First, the std library search
-        vec::const_iterator s_res = std::search ( haystack.begin (), haystack.end (), needle.begin (), needle.end ());
-        if ( s_res != exp.first ) {
-            std::cout << "Expected " << exp.first - haystack.begin () << " got " << s_res - haystack.begin () << std::endl;
-            throw std::runtime_error ( "Unexpected result from std::search" );
-            }
-
-        runOne    ( boyer_moore_search,          stdDiff );
-        runObject ( boyer_moore,                 stdDiff );
-        runOne    ( boyer_moore_horspool_search, stdDiff );
-        runObject ( boyer_moore_horspool,        stdDiff );
-        runOne    ( knuth_morris_pratt_search,   stdDiff );
-        runObject ( knuth_morris_pratt,          stdDiff );
-        }
-        
-    }
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    vec c1  = ReadFromFile ( "search_test_data/0001.corpus" );
-    vec p1b = ReadFromFile ( "search_test_data/0002b.pat" );
-    vec p1e = ReadFromFile ( "search_test_data/0002e.pat" );
-    vec p1n = ReadFromFile ( "search_test_data/0002n.pat" );
-    vec p1f = ReadFromFile ( "search_test_data/0002f.pat" );
-
-    std::cout << std::ios::fixed << std::setprecision(4);
-//  std::cout << "Corpus is " << c1.size () << " entries long\n";
-    std::cout << "--- Beginning ---" << std::endl;
-    check_one ( c1, p1b, 0 );       //  Find it at position zero
-    std::cout << "---- Middle -----" << std::endl;
-    check_one ( c1, p1f, -2 );      //  Don't know answer
-    std::cout << "------ End ------" << std::endl;
-    check_one ( c1, p1e, c1.size() - p1e.size ());  
-    std::cout << "--- Not found ---" << std::endl;
-    check_one ( c1, p1n, -1 );      //  Not found
-    }
diff --git a/third_party/boostorg/algorithm/test/search_test_data/0001.corpus b/third_party/boostorg/algorithm/test/search_test_data/0001.corpus
deleted file mode 100644
index 3558075..0000000
--- a/third_party/boostorg/algorithm/test/search_test_data/0001.corpus
+++ /dev/null
@@ -1,43067 +0,0 @@
-TU0AKgAfhPqScHN4dnZ2e3p5e3h7eXl4dnd1dnV3enp5dnd3dHV1dHNzd3l3eHh5
-eXZ4dXd2dHNwcHFwcXBxc3h0dHN1eHVzcXV1dXV2c3h5dHV3eHVwcHF1d3V0dXJy
-cXNzcHBwcHJyc3R0dXl7eHJycnF1dHV2d3h4eHV0cnRycXN1dXN0c3R0c3R0cXJx
-cHNxb3B0c29scHFybm9sbGpscXJ1c3NycXN0cHFvb3JzdHBycG9vb29vb29ubXBt
-bG9vcW5tbGptb3Fwb3Bwb25vbXFtbGtvbGlrbnBuamxsbW1rbW1vbm1ub3Btamxw
-bWpsbG9xcG5ua2tpamhqZ2hnam5saWhsbG1nZmZnZ2lnamtxa2ppZWZmZWlrZ2hp
-Z2hraWRmZ2psa2pmZWVkZmplZWZiYF1gZGVlYmJiY2RhY2NiYVxdW1xgXl9iZV9d
-X11hXF5gZWJhYF5dXWFmYl9hXl5gXmBhZGFhYV1cYWFiX2FgXFxgYF5iYmNiYWRk
-YWFjYGFgXl9jYmVlZGZiYV1cXFtbW1pYV1dVV1VWVlFQT1FRUVBRTkxLSktNTVFP
-UU1RUVBSUlBPUlNTUVRWWFpcVldXWldXVVhYVlpbWV1dX1tbW11fYWFdXV5iY2Rg
-YFxeYmNmZGJgYFxdYmdjYWFhYGJoZGReXF9gX2BhYWJhXltdXVtaXGJlYV5gYF9f
-XVxcX2JiYWJkZmNjZGNhYmFcW15dXmBgX1xdYF5fXF5eX15bWV1dXl9eWlldXGJf
-XmFgXVpcXF1dXlpdXl9dXWRlYF5cXFtfYF1eYl1hX1xbXV1ZV1hZWltbW1peW1lb
-XlteW1xeXlxbXVpYW11dY19fW1xfX2NhXV5hZWNfX2FjZGJlZmVoaWtlY2FkXl5f
-YWJjX19fX15dXWBjYmJeWlteXVlaXmBeXF1fXVxdXFtaW19dXFtfWllZWVxeXlxb
-XV9eW1xcXF5cW1paVVdbWVteW1pbWlZeYV1ZV1tYVlVWV1hbWVhcWVhVWlhbWFtX
-V1ZWWFtXVlZaVlRVVVdVVldWVlNRU1NTUVNXVVlZWVZVVFNWVlRTU1JVV1ZWVFRU
-UVFVWVpZWlZWWFpYWllcXVpYWFdWWFxbWFlYWVhVVVRVV1dZWllYV1lcXVpcWVhY
-V1VbW1tVVVdaXFpXVlZZW1leW1lWWFdaXVpXWlZXW1xdXFxgX1xYXVxcXFhaW1tb
-WlxcWltdWldXVldXWVZVUVJVWFlcW1tcWVpgXF9hX11aWVpdXFxeXFtbX19aW1lb
-W1paWlxdXVpaXVxeX1xdX2JjYV9gYl9cW1xbWFpeXl1gYGBgYFtdY2JhYmBgYGBe
-YGJkY2NjYmNjYGNiY2JgX19hYV1gX2BgX19hZWRiZWJjZWVgYmFiZGRmaWdmYWJj
-YmJjZGFhY2FhY2RkZmVmY2FhYWVnaWVjZGNhY2RlZWZqZ2doZ2RjZGRmaGVlZmZm
-Z2dnaGhoZ2hmY2VmZWZjZWNlaWdlZmVoZ2hpamlra2hram1samtqZWpuc2tpa21u
-bmtraWtqYWNmZmZmZGVpZ2p0kMLk8vv+/////////5drcHZ1dnd7fHt6end0c3V1
-dnR0d3l3d3Z3fH16dnV2d3h0d3l2d3h4dXZ3d3d3dnBucW5vb3BxdHR3d3Jzd3Vz
-dndzc3V6eHVyc3JydXZxc3Z1d3V0eHZ1c3Bvc3V0dnR0dXR0dHd4dnRxc3J0c3d3
-dXNycXd1dnVzcHJ0d3h0dHFwcXFzc3N0dXFxcXFwcHFwcnFycHJwcHJzc3Fwc3Nx
-cnRyc29ucHJ0cnBwb29ub3Fwc3R0cnJubnBycG1ucHBxcXBuc3FwbGpoamlsb3By
-c25qaW1tcW1qbXFua25xb21vcG9tbG5qa2doaW1sbGpqamxsaWpmZ2VlZ2tqZ2Zn
-amVoa2tvZ2ttamxpamlmZmdoZ2dnZ2traWZmaGlpa2loaGdmZ2ZoZmRnZ2ZmZ2Vl
-ZmJiZWBhYmBhZGBhX2BfXVxfYmJkZF5dY2JjYWNhY2BgXl5gXl5dXl5cXFxgZGVh
-Xl5eXlxeXl5iYl9fYF9iYWFhZWRkY2RgX2BhX19gY2VlZWRkZF1gYV9bWV1dWVxX
-WldXVlZVU1BQUE5QUU9SUU5NUE9OTk9NT1FRUk5PUVRUUVJSVFVYWVpXVldVVlRV
-V1dZXl9bW1pYWVhaXl9gXl5bYGJhYWFhX19jZGZmZGJgYF9eX19fYWRmX2RgYWBe
-XmFhYWNhYmBeXV5eW1xdYF5dX2NiX19hX11dYmVoY2RlZWFeYF5fXV5hYGFgYF9i
-Y19dXl5dYGNgXFZbXlxZWVxhYl5gYF9cXV1cXFxdX19gYVxZW11dX2NjXF5eXF5e
-Xl9eYGJfXl9eW1xcX11ZW1xdW15eW11gXl1dW11cX15dXl1cXF5iX2BgYV9iYmJf
-XF1mY2NiamZiYWNiY2JiYmVpbGdkYGBmY2VoY11dXFxcX15iYmFiYmBeW19hX19h
-XV5aYlxZWlxcW1xcW2BfW1hZXF5cXVtcXlxbWldYW1xcXVpYWFlYWllbW1paWFpe
-XFtYWFdUU1ZVWFhVWFteWFdYW1pYVlhVU1RVWVZXVVdXVVFSU1ZWVFNSVVtSUlJT
-UlNUVVVWVVVWVVJWVVVTVldXXFlXVVlbVVNUWVtXWVtVVVZVWltZWltYVVVZWVlY
-V1ZXWVVUV1dXWl1dW1tcX1lZWl5cXVtcWVVXXFxZXVxZVVRUWVlcWVZWWFhaWFtd
-WV5YVlZZWVxbXFxdXFtbXl5dXVtbWV5dYF9cXl9eXF9iXl5bWlhbWltZWFpgXlpa
-W15iXF1jYVtcYGBfXVpbXFxcWlxbXFtZXFxaWllaXl1aW11gXV5fX19fX19hXl5f
-YGFgYWBhXmBiXlxfXFtcXmBgYmJkYWJgYmNgYGJiYWBfX19jZWBeYWJgYFxcY2Vj
-ZGJfX2JiYWNmY2RjYmFhYWFjYWNpZmNjZ2ZkZGBiYmVmZ2VjYmNlZV9iZWhlZWJi
-Y2JmZWdoZWVoZmdrZ2VmZmhmZGRkZ2VmZmVkZmppZmVnaGhqamhiZmZnaWdnamZm
-ZmtubGtpZWhna21rZmRpaWVlZ2l0bGtrbG1obmdmamZnaGVkZGNobYekwePx+///
-////////l2pzdnh3fHt8enp5fnl2d3Vyc3d5eXd1d3d3eXx+eHp3eHR0d3d6enl1
-dHNzc3d1dXNwcHJ0c3Byc3V3eXVycnZ5enl3dnh3dnJxdHZ3dXh8eHl1dXZ3dnZ1
-dHV0ent2dXl3dXV1d3h3dnZ3enV0d3d0dXN3dXZ1dnd3d3h6eHZzc3JwcnN0dHJy
-cXFub3FycW9vcHBvbGxsbXFzcHF1c3N0dHR0c3Bvc3N1cnFvcW9tbXFzcm5ub3Bw
-b21ucXBxcXBvc3Bvbm5sa25sbW1vbnJ0b2trbWpqbGtsbnFxcG9vbm1xbm5tbGpp
-amtqbm1rbW9sa21taWhoZmlqaGlramxqaWdraGdqaWluam9ua2psamhkZmxpaWpq
-aWVlZWdnaGZjY2RpZmZnZmZmZ2ZnZGFhZWNgZGJhY11dXV5hXF9lZGRiY2JhYV5f
-YGZgYF5fYWJgYmBgX15gZF9eYF1gYlxeYV5fYV5eYF5gYWJkX2JhZGNlZGJjYmFh
-YWNgY2VhZGZkY2JgYWBjY15fXFtaWllaXFlYVlNRUVNTUU5PUlNSVlRPU09PUVJS
-U1FNT1JVUk9RU1NRU1NSVFRWVllWWFtXWFhaWlpeXl1hYV9cXV5fXl9gXmFgYGBh
-ZmJjYmJjYV9fX19dYWFiZmRiYWBeX2FcW15fYWBfXmNkX11cXF1cXVxfYl9hYWJd
-X2BhY2JjX2BkZGFgYWFfYWFgYF9eX2JiZGVjY19dYF9eXV1eXF9hXmBeXF1fXV9b
-XGFhY2FkX11dW1peXl9hYWJgX19jYV5dYGFgYGBdWV5cXV9hYGBcX2BhYF5gZWBb
-X19dXWBeXl1eYWNhXl1eYGFhYGFgYWFhZGNiZGZoZWdnZWNgXWFhX2NlY2NjYGBl
-Y2NjY11bXl9eXmBiYGFgXF9kYF5iYF9iYl9cW15aWV5fXFtaWVpdW1xfYGBfXV5d
-XFtcWlhXVlZWWFhYV1ZWV1daWlhbWlpcXFhaWFdYVldYWFlZWWFYWltYWVhSVFdV
-VlpWV1RYV1VTVFRYVVRWU1RUVVhWVVRUVFZXVFVXV1VWV1ZYV1lcbltbWV5ZWlpX
-V1VSWFtfW1laXV5bWFpZWlpYWVhYW1hYV1daWVlZV15dWlxgXVpZWV1hXFpbW1dY
-WlpZVldXWlpWVlZXWlhaWVlZWVpZW1xbWltcWVhaXVxdWVxZWVtaX2BfXFlaWVte
-XV5dXV9eXF9bW1xbXl9hXFdcXlxbWVpbXFpcWF1gXV9gYF9aW11cX15dX19gXFtY
-WVtZW1tdXWBiYWBfXV5cXV5dX15eXV1iZWRhYWFiYF9eYmBeXV5dYWBdXWBlY2Fh
-YWRfYF9hY2RhZGJjZGZjYmJfYV5fZGJkYWBgYGBjY2RlaGNhY2BjYF9eYWJgY2Vk
-Y2JlZWRhYmRlY2FjY2RjY2RjY2NjYWBkY2NjZGVjYWRnaGhramtqZmdnZmhoZ2pn
-aGhoZmhpa2lpaGhoZ2hlZmdqaGZsbWhpamlqamppa21qaGhmaGxqZWVra25sbW5p
-aWhraWprbW1pZ2ZoZ2hsfKXG4/D7//////////+UbnR5e3t6eHmAfH58eHZ2dHd2
-d3x3dHZ2dnl4eHt8fHh4dXVydXR3eXd2dXR4d3Z1dXZ1dnhydHd5d3ZzdHV4dXR2
-eX13dnV2dHN1eXh4eHd3dnd2d3Z3dXZ3dXZ3eXh5dnR4eHV2eHt8e3l1d3d1dndz
-dXV2d3Z1eHh0dXR2d3V0dnNxcXFvb29yc3FucnJycW9ycnBvbG5ycXF0cnNyc3Nz
-cnFydHFvcHFycnNucXJvb3FzcHBxc3NxcG1ucG9wb3NxcXJwa2xtbXFwbnJtbnFv
-b25uamtqbG1scHJwa25tbXBycG9tbW5vbnBvb2xtb2xxbmtsbGtpamtrbGtnamlq
-a2pnaGhmZmptaGhoaGxqaGhpaGhpaWlpZmVkZWdoZ2lmY2RgYmhmamdlamdhYWNg
-X2BgYmNhYV1eYWFmZGRkZF9cXWBgX19fY2NdYWFiYWJhXl5kYWJhXF5eX19hYmZi
-Xl1gYmJiYV5iY2FhYmRhX2BlYWFiY2FfYWJhYmRjZGJeYmNmZGBhYmFaWVpaXFtZ
-V1ZUU1NSVVVXWVNSUE5NT1JPTk1NT09OUlVOTk1PU1NRU1JTVFRRU1NWV1dXWlhb
-W19bX11cX15fXFpeXV1dXV9fX15hYGBhZWJhX2FhYGBeXmFmZmJhYV9fYWFeXl1b
-XFxcXWJgYGFgXV5eW1peX2BgYF5iY2RiYV9gYmFhX2BhYmNiYWBhYWJfYWFfYF1f
-YWBgY19fXV5kYVlcX2FgYF1eW11gX15dXGBgX11hXltbW1teX2FdX15cW11cXF1e
-X2BfYGRmW1tfYFxeYWNiX15dXmBcXF5gX2RfW2BjYmFhY19bWl9hXl9hYmBeYWVk
-YmRiYmNjY2NjY2FjZGNgZGJjYmBfX19eYmJeXl1eXF5gXV1dXV1cXFtbW15fXV1e
-XlxdW1xeXmBbWVpYVltgYF5dXV1eWVlbW1xZVlZVWFhcXl1aWllWWllbXVxbWFha
-WFpbW1lZVldXVVZdV1daW1hYV1hVV1hTV1VUWFlXVVNVVVJSVVVWVlJWVlZWVFRW
-U1VVUlVUV1RWUlJYVldXVFdcXVlYV1JQVVlTVVZXV1ZTU1dYV1VUWV5bV1RWV1pY
-WFlbWldZWlpaWFdZXVlcXlpcXFlcWltfW1hZWltZXVteWFRSV1hZWlpZWFhXWFla
-WFheWVtZV1laW1tcWFlcXF1eXVtYWlpbXF1dX1xbWltcXl5hYVtZWl5ZWVxeXV5d
-XltaVlhaWVxcW1pbXV5eXlxdXV9eWllbW1tfXlxeXWBeXGFeXl5gX2BfXlxgY2Ji
-YWFhZWJgXWBfYV9dXl1cX2BeXl9eXFtcX19gX15hY2NgY2NnZWFjY2FkZGJjZGJi
-Y2BhYmNkY2NhYWJjZWJiYGJkY2BhZWVmYmRkZmVmZGNiX2FkZGNkZGJjZWRlZGJi
-YmRjYmhjYGJmaGlmY2NnZWhlaWtramhmZmZqZGhpaGZnZmRnaGloZGhqbW9tamdp
-bGtubGlsbmxpbW9wbGtra3ZoZGdpamhnZmhpaGhtbW1ra21qaGt+psnk8Pr+////
-/////3ZnbnV6fHh2eHp7fHx6d3h3dXV6eHd5dXNzc3h5eXl1d3d3d3h2dnd0dXR0
-d3h5eXh6dnV1cXR1dHV1dHR2d3h3d3p4enp5c3Z3dXV0dXl4dXV4eXt5dnZ0dnp5
-eHZ4dnd1dnNzdHJ4fHx8eX16eXh2dnR0dHV2eHV0eXRycnB0cnNwc3dzb3FvcXNv
-b3Fyc3Bxc3JwbmxucXJycnRzcXBwdHV1dnVzdHJvcnFycnZxcXFxcXJwb3BvcXRw
-bXFvbXBubGpwcXFtbG5ub25xcW9rbG1tbGxta2xrcnRxcm5qaWxra25wb3Byc3Zw
-b2xsbWtub21ra2xpam5tbG9ybGxrampramlpbHVza21maGxqaGhnaWxrbHBsa2tp
-amhnZ2ZmZWViYGNjZmRjZWRoZ2VhX2JjY2JiYGFhYGBfY2dnY2FiY2BiX19fX2Fh
-YWJgY2JjYGFgYV9dXF5fYFxfYWJkYmJeX2BiYmFiYGFkZGJfX19iZGlhYGBkZGJj
-YmRlZWRhYWBhYmBcXV1dXVxbW1xbW1ZXVFBXVFNUUFRUUk5RVldQT1FPTkxMT1JR
-UFFRUFBRU1NRUFBRU1RRUlRVUlNWWFhaWV1eW1tdX2BgXllYXV1bXWBgZWNgYV9g
-X2JfX2JlY2VmZ2RmZWZiYl9fX19hX2BeW15hYF9dW15gYWRiXVlbXF9hYmNjZWBm
-YmFdY2RkYWFhXV9gZGFhY2JeY2FeXmFgXl9iYl5dYGFcX15dYF5dXVpZXGBfXl1d
-W19gYF9cXF1cXl1bX2JeXl9aW11cXF5fYGBhY2ReYF9dYWFjYmFfXlxcXVtdXlpe
-X19lY2NgYWBeYmJdXl1hY2JiYl5fYGBhYmRkZGJjY2JjYmJiYGFiYmBfXV5eXV1g
-Y2VgX1xbW1teXVpZW15fXVxeXVteW1lZW1pcXFxfW1tZWmBeWl1eXV1eXlxYWlxc
-WlhZWFlYWFdbWltdWVlZXFlaW1xaWFdYXFtaWFhYVVZZWVhYWVdWVVZZWFxXW1lU
-WVhVU1dXU1JVVVZYWVlaWllYWVZWVldWV1hWVlhXXllYV1ZYVlZVW1lYWFRUU1RX
-VlZYWVlYWFhTWFZYW1xaW1lYWVtZVVZXWVpXWltZXFxaWFpXWV9fWltYVVhdWlla
-WltZV1hYWVxbV1hWWFxbWl1cWlddWVlbW11aV1daXFlZW1xbWlpeW1pdW1lXW11e
-W1tbXFhaXl1bX19cW1tcXF5cXF1aV15eWlpbWl5cXlxcWltcXFxbWlpcXFxdXV1i
-X1tcX11gX2BhYWNhYF1eYmJfXmFnZWBeX2JeX2FfX2FjYWNeX2BfYmRkYWBfXl1f
-YmNgX2BiYGFhYmNmZmZlZmRjZGVlZWZkYF9kZGNfXl9gX2BjZWVlYmJhZGJlZGJl
-Y2JkZmRjY2ZnZGVkaGZlamJiYGRlZGNhY2hoZGNiaGVpZmtpZmBkZ2plYmRnaWdl
-ZWdpZGZnaGdkZGlta2pqaGlnZ2RoamlmZ2pta2lqa2tqbG1sbGtqZmtsamhqbGlr
-aGloa2dnbmpwZ2lra3uly+Hv+P7/////////cHR9e3x6d3Z3eX16eXh3d3p6dnh3
-e3l0dnVxc3Z3dnd5dHV2eXh1c3N0d3Z4eXl8fXyAenl4c3RzcnJ2enZ2dnV0dnl+
-e3d0c3V2dXR4eHh2dnZ1eHZ2dHh4d3V3dXR1dnZ1dHR0dHd3eX97eXl2d3R1dnd2
-dnd5c3JzdXJ1c3Z3dXZ2eXRwc3FycHBwbnBvbnB3dXV1c3RwcW9ydXJzcnR3eHZ1
-eHR0dXRzb29wc3FwbXBtb21ubWxtcnNxbm1rb3Fubm9ubm5vbmlqbmxpamxvbW1s
-bm9vb3FycHJsamtra21rbW1vcG5xb25tbGlpbWttbmxsbGxpa2hoamtrbWxqamxt
-bW9ub21rbnBoamdlZ2ppbW5tbW1vbmxqZWdmY2VlZGNjZWZlYmVjZGBiZGBfYmJp
-Z2NkZWNgX2NjYmNhX15lZWBdYF5iXVxbYWBgYmJgYF5gX15cX19jY2BeXmBfYGJi
-YV9gYmFfYWJjYF5gY2RiX2FdYWJlZGRkZWVmZ2NhX2FhXmNiXVtcXV1dXVxaWFZX
-WFVUUlNTU1VTVVNVT05QUFFNTU5MTU9RT1JTUFBRU1BRU1VUVVVTU1NWVFZaW1pY
-WlpaX11eX15cXVxdXlteXF5hZF9fX2BiYWFgXmBhYmJiZGVkY2RjYGBfXWBiXVxg
-YWJhX19jYF9hYGFfXFpbXWJjYmFgYGFiYmNiY2VhXV9fZmNkYWFiZGNjYF5cXVtb
-Xl5eX2RkZF9eXF5dYF5hXlxbXFlYW2FiY2JfYlxZXl5dXV9dXV5gXl1cW1tbWVpd
-Yl9fYl9eY2FdXV5eXlxdXFtbXmBgYGJiYmhlX2FeXl5fYF9fX11fYGBiYGFiYWFl
-Y2NiYmBiYWJnZ2FfY2NgX2JkYmNlYl9fYFxdXV5fXVxZWlpdXF1cYF9gX19eXlla
-XWBiXlxYWFxcXVtdXFpfXmFgW1lbWlpYW1pbW1tZWFtdYF1aWVpZXWFfWFZUVlZX
-WVtaWFZWWVZUV1dYWFVUVFZVWFxcXVtaW1hWVlVUU1ZXWVhbWFlWVFhaW1lYVlVU
-V1pYVllZWFdaW1pYWFtVVlpcWlRUWFdXWVlWWVtbW15YWVtbXFtXWllWVVdZVlZY
-V1hZWFlYV1daVVZZV1tcXF5cW1paWVheXl5dW1xcWVpbWFhXWllYW1hZXlpWXFxc
-W15aWVpZWllZW15dWlxbW1lbXVxaXF5cXFpaWlpbXVtcXFxcXl1aWFpcYWNfXFxc
-XF1dX2FaWVtaWVpbXVxbVltaWVpfXF1cYWBeYWFeX19dX2FfXmBfX2NqZGFfYGJg
-YGBjY2VhYGFmZGFgYWJgYV9eXl5fYmNiYWJiYF9eX15gYGBiYGFjZGNjY2JjZmdj
-ZGFhX2JfYGJhY2RjYWRjYmNiZGBkZmZmZWRkZmVmZmVlZGlmZ2lpaGZmZWdkZWRl
-aWhjY2FjZmNmaGlnZmtqZ2dkZWdkZWVkZGdkZ2hpamhoZ2ltZmZnZWVlY2dpamtr
-a25wbWppbW9vcHFxb2xxbGxva2xoamlpaWprbGprb21mZ2h3gqDN4u/3/f//////
-//9ueHx9fnx5eHl8eXp7fnl4eX14e3l5d3l3dHV0dXZ4dHZ1c3N0dXRzdnR3eXd4
-dHV6e359eXd4dnZ5d3d3dXZzdHZ0eHh3d3h3dXh1cnB0c3R2d3Zyc3R4enp3eXh4
-dnd2d3p7e3x6dXh2eHd2dHd5dHR0dXd5d3V0dXR1dHRzdnZ3eHhzdHd4eHJxbmxu
-b29wcnJzdnV0dXBydnNxcHFwcnV2dHd1dHV3dXRycnRzdHFtb3FxcXBubm9wbG9w
-b29vbnFycXBsbW1xc3Jtbm9ubW9ubWxtb25wdG9tbm1wcWttbW5sbWtwbnFsamps
-bGtoaWtvcW9tbGxsa25vbWtoaWtsa2psa2tpbWxqbGloZ2loaWtsbG5tbGxraWlq
-a2lnZWVlY2JiYGJnY2JkZV9hY2JjYmBga2dhYWNkY2ZkYWBfXmBgXlpdYl9bW11h
-ZGRhX19fXGFcXWFfX19hXmBhYGBiZGZlY11cXWNkY2ZmYmJgXl9eXmFhZGFjY2Nm
-Z2hmZGJjYl9eYGNhXl9eXFpcXVxbXFhWU1FRUlNSUE1OUFFNTE1NTUxPT0xKT1JY
-U1JSUE9SVFZTUlJTVVdZWlhZWlhZWVhdXF9hXV9cXVxeYl9dYWBdXV9gYl9fXmBg
-ZGNfX2NjY2NkYl9jYGFjY2FfYV5gYV9gY2JhYV9lXWBiXmFjX2BfXV9eXF5dX2Bf
-ZWNkZWhmX19dYF9jYmFfY2NhXF1dYV1dXFxdXl9gYGFdXGBeXmFhXFtdXVxcXl5g
-YmFeXVtcYGBcXl9dYFxfXl9fXl9eXV1eYWBeXVxdYGNhX2FfYWFgYV9eYGNgYWJj
-X19gX11cXl5dXl5hX19hYWFkZGJiZGNiYmJjYWFjY2FfXmFkY2RlZmdhX2BhXmBi
-ZGRiY19dXVxZXF1eXV1gYV9eYl5hX19eX2JdWl1cXVxcXmFeXF5eXV5bXmBkXFla
-WFtbWltbWllbW1hbWVlZWltZW1hXWFdaWlpZW1tVVldYV1ZXWFRWU1NWV1hYWVpY
-V1hWVlRTV1ZXVlVTUVNaVlVUWFdVVVNUWl5bWldYWFpbWlhZV1ZYVldcWltaWVdY
-WVdVV1lbW1hXWFpXVVZWVlVUV1dZVVVVVVZVWFpXVlhaWlpYWFtaXWFdWllZWltb
-W1pbW1lYWVxZWFhUVlhZXF9bWVtYXFxcW1pdXV5YWFlXWltcWldaWVlbW1pXV1pa
-W15hXVxgXVlZW11aW1laW11bXF1ZWmBcXFxdXF9cX15aWlpZXFteWl1gYl9hW1la
-Xl1eXV1eXV9dXl1dXV9hYGFhX11dX2BgX2RlZWNlYl9hYWNgX2BfW1tdXV9iZGJg
-YF9gYGNiYGFjZ2FgZGFgYWVhY2ZkZWdgYGBmZGFgYWFiYmBfY2RlZWRkZGVkYmVn
-Z2RnamdoZ2VjY2ZnZGJiZmplZmVlaGNkZ2ZmZWFiY2ZmZ2trbGtpZ2hqa2xqaGZn
-ZGVmZmljaWhramdoaGhoZ2lvampqam1sbW1vbmtscHN0cWxsbm5raGdsaWZpaGlp
-Z2drbHFoaWpqaGx8m8zj8vj//////////3l5eXh4enp9eXp6eXp2eXx8e315eHd6
-enp7dXJ0d3ZzcXNzcnBxcnV5d3Z2d3d3dnZ5eHh5eHd7d3d4eXZ2eHV0dnh6eXZ1
-dHh7enl2dHBydXd1dnJzdHZ1eHx6d3x3eHl5d3h8gHt6eXh3dXR0dXp5d3Z2e3h0
-c3N3d3R1d3d0cnRvcnFydXF2dXJycm5ucHVwcXByc3Nzc3V1dnFxcXNycHBwcXV0
-c3N1dHNzdXV3c3BwcXRxb3Rxb3Bub21wcXBwcnBwb3BxbnFycW5ucXBxcHNydXBw
-cG5ucGxubXFvb2xqbG1rbGpua2ppZ2xta2xsbG1sbWxpbWpraWlpaWppbW5qa2pr
-bGxtbnBra2xrbGlpZ2pqbGloZmhpamtqaWhlY2RiYGNiYWRiYWJjZGNjZWViYmNl
-ZF9dYGFhYWFiX1xeYFxbXVxjZVxbX2JgYF9eXV9eX1xeYF9eXGBhYWFiYmFhY2Ji
-YWBgXWFhY2VnX1xeX2BfYWNkY2RkZWdoaWViYmRlZGRiYmBhYGRgXF1dXVlXVVZV
-UlNSUlNSUFFOUVFPTEtKS0xPTU5PT1NPUlRRUFNVVFVRVVVYV1ZWWVpWWlxZXVxe
-YGBgXl5bXF9gXl9hXl5eYGFiXmBkYmRgXmJgYWBhYmFgYWNiYWFgYWNhYWJjYGBe
-XmBjYF9fYWBkY2RhYmVhXl9dXl5cXmFfYWZoZGNjZGJjY2BfYGBgX2BlYl5cX15e
-XF1aW15gXVxcXl5cXFteYV9fYF9gX15gX11eX2BfXVxeYF1dW1xcWlxeXF9gXlxZ
-XFtcW1pbXV1bXV5hYmJjXF9hYGFeXF5fX11fXVxcXV5fYWFhYmNjYmNlYmJjYGRj
-Y2BdXV9jYWBhYmVmZmdnZWNgY2FfYl5eYGJkYl9iYF5cXFxdXl5fXF1gYWJjYF9d
-XF5cXl1bX2BeXV9fXVtdXl5eXl1bWl1aV1lcXFpZVVZWV1ZYW15cWFZXWVlXWllb
-XF1XVlhbXlpVWllXVVVXWldaWF5eVlZYWVdZWVdWWVpXWFpYV1ZVVllYWFRXWVdZ
-XVpcW1hbWVhWWFRUVFVXV1lYWFhYWlxXW1hYVldWVllZWVVWU1dYWlpXWl1bWFpY
-V1hcW1xbXF1cWldYWFhYWldZV1dVVlZWVllYWFlfX15ZW1lZW1pcXlxaWFhZXVpc
-XlxcWVtdW1dYW1lYWVlcWlpcXFpYWFtZXGFeXV1bWllbX11dXV5hX1xbXl5iXl1b
-WFxdYFxfXVtbXV5cXl9dXl5eYGNhXV1cWVldY2BgXl1fXWBeXl5hYFxcXl9gYmFi
-YGBjYVxdXF5iYmNgYF9dW2FkYGFkYF9hYGBnZ2NmY2BfYGBgYGFhYWBfYmNoamlj
-ZmtoY2FhZGRgYWlkZGFgY2NhZWZmZGNkZGNlZmZlY2JiX2NjZGdlaGdmY2VlZmNo
-aWVkYmRnZWVpaGZmZ2lnZ2ZsaGdoZmhnZ2dnZWdnZ2lnamtoa2ppZ2lqZ2pqaWpp
-a2tsbWtrb2xnaWlrbGloZWVnamptaGhqZ2loa2xnamlqaXWSy+Lu+P//////////
-dnd2eHp4e3h4eHl7enp7fYB7e3t5e3h3end3c3Z3dXhzdXZ3d3Z3dnd4dnR1dXV1
-dXJydnd6enZ0dXR2dXl2dXN0d3h4dnRyeHl7e3h3dXR0dXNyc3Z2dnd6fHp6fnp3
-eXh1d3p5eHp6eHR0cnV0dnh0cnZ3d3N0dXl4eHp4dnV3dXJyb3JxcHN1cnBxbnFw
-b3VxcnJ0dXdxcHdzcnR2dXVzdHZ3dHRyc3Z2c3Nyc3RydnRxcnF0cXNxcnFwcXFx
-cHRzcm5ucG1ubnJzcnBvcHFydHJzcXBwbWppam5xbm9ubmxoaWxtbWxta2tpZ2hp
-a2pqamxsbWpra2loZ2lqam1qa2tnaWdmZmhpaWpvb2xqamxua2ppaWpqaGdlamxs
-aWdkY2VlZmdlY2RkY2NjYmVlZmVjYmNfZGJeYWFgYGFeX2NjX2FiZmFgX1xdXGBh
-X19aW11kY2BgYWBfYGFgYl5eYGNkXmFmZGZkYF9hZGNhYGFgY2VkYmZiYmVnZGRn
-ZWRlZWVlZWJfXl9fX15cXVxaWVdaVVZVU1NUU1NQUlNSUFFPTkxLTk5NUFJRTE9R
-UlRUUVJUV1ZUVVVVVVlcXWBhW1pbWlpcXl9gYF5fYWBjYWFgYl9iX2FhYWRjYmVk
-YGBeY2JjX2FiZGhlY2BiYWFjY2NjYmNhX2JeYGBfXF1hXl9iYWJeYV5fYGBeW11e
-XWBfY2RkYGBiYl9hYmJdXF5eXF1dXmFhW11dYGRfX11dW1xeXF9gXl5fXFlbXlxe
-XWBhY19eXl5fXltcXF5dYWBfX2BfW1tfXV5eX19cXFxdXmBgXmBkYV1gX1xbXV1e
-XlxgYV9eXmBgYF1dX2NjY2RlYmJkYF5gYV1eXWJkZWNmZWNkZGRjY2FfYF1cXV1i
-X2NnZ2RjYmJeXl9gYWFfX2FeXVtaW19fX1xdXV9hYFpdXl1bXF1dXl1aWFhbXV9b
-WVhcWVlZVlRaWlxaXV1dXVpbW1pdW1teW1xaWVhhXl1cWVRUWFlXWldXXFxZWVdY
-WVlZVlhcWlpaWltXV1lYV1ZXV1dWVldYWFdaWVpXVVdVVVVXV1dWVVZXXVpZVlZX
-WFZYV1ldWlRUWFVVWVpbWllcXl9dWVhYWlxbW1pbWVlaXFpYWFtZWl1cXV5ZVldc
-WVtaXVtdYFpZXVtaXlxbW1lZW1tZW1xcX1paW1laXFxeXVxbXFphX19bW1taX2Ff
-XV1aWVxbWVlcYV9hXVxZXFxcWl1hYF9cXWBdXWBgYF5eXmBeX15eYV1fXV9gYF5j
-X19eYF5eXFxiY2FfX2BhYmFhY2JiYGFgXl9hY2NlYGBgXl5dX19dYmRiYmRiYGNi
-YWNjZGJjY2ZiYmNiYGBiY2ZkZmZkZGhlYmVlZWNlZ2dkZGRhY2NjZGBjYmNkZ2ll
-ZmJkZGVmZmNkYmFhaGdlZWVpZ2VjZWRnZ2poZmlnZ2ZoZmZmZmVnamhsbmptaWls
-a2hnZ2doaWhpamlpaWdnaHBxaGhpaGxubGxubGpsbG9vamdqaGlmZmlqamlqaGdn
-ZmZmaGxramtpcqLL4uz6//////////96eXp6eXp2dXV3dnd6eXZ2eHp8f314dnZ0
-dHN3dXN4e397enl4eXd6eHh6eXZ1c3R0c3N3dnd5dnR0c3V2c3V3cnN0dXZ5dnd2
-d3l9d3V3dnd4dXV1dnd5dXZ4d3d5eHZ3eHh3d3d4dnd3dHV1eXt2d3l6d3V1dnh1
-d3h6enZ3dnRzdXNycHFxcXN1cm9tbXJzdXhzdHJwdXFyc3Nzc3Zzc3Z1dnVzcnFy
-cHBxcXFycnN1dXNwcXd0cnJwcXJwcG1scnN2cnN0bnFvbWpscG9tcG9wbnFtbG1t
-aWttb3Bvb2xubmxra2pqbnBxbm5ua29ub2tqa2tubWpsbWxra2xubGtqampoamlr
-bmtmZ2lrbWpoaWhoaWhoamlpZmVjZWhqZGVlZWdnZWZqZ2ZlYWNhX2BiX2BhXlxg
-YmViY2BgXWBgYGBgYWFjZGFfYWJeXmFeYWNhYGFiYmJfX2RlY2VkYmNgXWBhYmJm
-ZGVjYmBhaGhpY2NiY2BjZGRlY2RmZ2NgY2RjZmZlZGJhYF1ZX2BbWVZZW1lVVFZW
-VFFPUFBSUlJSUU5LT01LTExNT01MU1NQVVZUU1RVVVNUU1VZWltdWltWWFxdW1pb
-XF9gX11dXF1gYmNgYmBiXV5hZGViYV9iYGRkZGRkYmJlZmtnZmNjaGdjYGFgYWFj
-YWVjYmBiYV9hX2FhYWBgY2JkYWBbXF5iYWRmZWJgY19hYV9hYGFgYl5eYV9eXlxa
-W15gYF9dX1xcXF5gYGBfXmBgX15fXl1eXF9jY1xXW11cXlpgYV9cXmBgX11iYl9g
-XmFjX19dXl1fX2BhXl9cYGBeXl5dXl9gX2FiYF5eX2JhX2BfX19gYmFjY2NgYF5c
-W2JjYmZraWZkZGVjYmFhYGJhXV5cXl9gYGJjZGNjYmJhXmBeYWBgY2BcXl5dWlxd
-XltcXV1eXl9eXFxcW1xbXF1dW1taXWBgXlhaW15cWVleXFxdXlpbWlteXVxZWVdX
-WVlfY19bWlhYVFlcVlhVUVNVVFVUV1dYWFpZV1ZYWVhUWFxcW1dYWVdXVVhXU1VV
-VVZXVVRWVlNUVlhWVFhZWllZWFlVWFZXV1ZVWltXWVhYVVlYWFlbXWNgWFtaVlhZ
-V1laWlpbWllYW1paWVddYF9cXVxaWFpWVFdbW1lYWFxYWVlcXVpZWFtdWVxcWl5b
-XFhYXFxdYFxbXl1bXltfX15dX15hX1xgW1tZV1pdWVhcYGNgWFtYWFteW1teX15h
-YWBdYl9ZXFtbXmFiYl5fXFpeYmJhXl1fXV5eXl1bXVtdYl1dXmRhYmRhYV5hX11f
-X2NlZGNjX1xcXl9gY2JfY2FhYmBdYGBjYWBfY2FiY2FiY2RiZGRiYV5gY2RkZWFf
-ZGFnaGpkZGZmYmBgYWNmZ2JiYmRnZ2ZmZWNlYmJkZGVlYmJgYGRkZGRlZmdlYmFn
-aGdlZWVnZWZoaWVmZWdqaGVoZ2dla21qbGdmZmVhYmZpaWpoZmhqaWprbWhqa2tr
-b29tbGxubXBtaGhpamxra2xpZWdnZGNoaGpmaWloamlznsni8Pj//////////3Z5
-enh1c3V5enp8d3h2eHd6eHh8fHp1dnh3eHV0dnV4fHt3eHt8eXV2ent6dnl0dHZz
-dnZ7dnJ2dnZ3eHZ0dXNyc3R4fH15eHd3d3R2dXd+fXp4d3Z0dHh7dXh6eHZ5d3d5
-e3l2d3R0d3d2d3p6fnt5fXl3d3t5d3d4eXl1dHN1cXV3cXVyc3RycXNzdXNwb3R5
-dnRxc3BvcXJ1dHZwcXFxd3h0dHR4dHBucHFzcW9ydnRzcXV1eXVybm9xcXFxbm5s
-cXJycnF0cXBxb29wcm1vb21wbnJzbWxtbnBwcG5ub29yb25ra2psbnJwcW9tbm9w
-bnJubG5xc3Fsbm1scXFtbGpta2lpb21qaWtsaWtqZmZlZ2lqamhnaGdmZWRpZmZq
-Z2ZjZWNkaGhpaGdlY2NjY2RnZGFeYmJhYmFgXV5cX2NhYGBgYF1dXWFkZGJeYWNi
-ZWVjYmBmYmBhY2FgYGJjYmJlYl5fX2JiZGdjYWFjZWNmZ2ZkYV9fZGNhYWRjY2Zk
-ZmJaY2NkY2JfW1lcX15YWVpXWFdWVlJRT05PUlJRVVJPUlNPTk1OUE1PVVNSUlJU
-UlJTU1NXV1dWWFhbXFhZWldcX15dWVlcZGNgYF5dX2BhYF5dXVtfYWRjX2FgYGFi
-ZWNmY2RjY2ViY2JjY2JiYWBgYF9eY2NgYWFhYGBhX2BfX19fX2FjYmNjY2JkZmBj
-YmNlZmhmZmNiY2JlYmBiXVxeXV1eXl1eXl5gYmBbW15bW1pbXWBhXmJlX1tjYF9g
-XVxfXFlYWV5dXF1cXmBdXl9eYGBgXWBiZGFiYl5cXF5fYGBlYGFdXl9eYF5fYGBc
-XmBgYWBhYWFkYmFgX15hY2JhYWFkYmRlYV9gY2JkYF9hZGRhYWBfXl9gYGFhZGJd
-XmBkZmVjZGZiY2FgX11eYFxZWVxdWlpcXF1fXVtcXl9cXl5fX19cW1tfXF5gXlpc
-WVhaXFtaWl1eXFxaV1VZXFpaWVpaXFtdXVtYZ2ZbW1lbXVpZWlhXVlRYWFVXW1pd
-X11dWFZXWVpXXF5ZWldXVlRTU1RUVVRXWltWVVVUVVZUVlRlXVlYWFpZWFhXWVhW
-WFlYWFhZWlxeW2BfW2BzalZZV1ZaWVZXWFlaW1xaWlpbWlpcXFxaWltaWV9ZWVlX
-WVtZXFpcWVdZWVpYW1xcXF1dXV9gYWFgYV5eXV1cWlxcYV5bXl1cXl5fYGBcWlxb
-XVxbWlpaWVpeX2BjXFtfYmBdWl1iXlxeXlxaWltZXV5aXV1cXl5eX11fXl9gX11d
-XV5dXWBfX2BhYF9iZGJgYWJjY2FgYF5eYmNeX15eYV9bYGNkZWJgYGFfYGJiY2Rk
-YWBgYl5cXmBiZGRjY2JeXl1eZGVmZ2ZiZWdmYl9lZ2ZlZmFiZGZmY2FkZmRnaGVl
-Y2RiZGdmY2ZpaGVjYWFnaG1tamdnZ2dmZ2dlYmRoaWZnaWpmZWdmZmJmZ2psamtp
-ZmtoZmVlYmVqaGdqbWpmZWprbW5ubGxtcHBwa2ltaGlpaGprbWtra2tmaGhqa2po
-aGhoa2toam+cy+Tw+f//////////fX17end4dnd5dnl5eX19e3l5fnx7d3Vzc3J0
-d3dzdHR5fHp6eXp1cXJ0dHh6eXx2c3Vxc3V0cnV2dXVydnp2dXZ4enl5fH13d3N3
-eHd4d3t+fHt8dHRzd3p5dXR0dXd4d3Z3dHN2cHZ4dnd2dXZ6e3p7fHl3d3V1eHh5
-eXh0dXR2dHRyc3Fyc3Jxb3BxcXR1dHN2cW5ucHJxcHBvb3BydXRzdHZ2dXJyc3Nz
-dHNxcnFydHRxdHR9c3Btb3FydXNybWxxcnNvcXFybmxzcXFxcG9sbm5sbnBtbGxt
-bG5sbG9ua2prbWprbm1ucHBrbG5vcXBxb3FvbW5ubGtpamprZ2VnaWtra2tqaWlp
-amttb25raGhramdoa2lpaGZoaGpqbm9tZ2VkYmNnaGZnZ2NhZmNjZ2VmY2NkYmNh
-YWBdWlxfYWBjYWBgXFtfYWFkYWNfXl9eYWZlYWBiYWFfX2BgYWBhYmNgYGFiYWVk
-Y2JgY2VnZGRlZWhnY2FjZWFjZGNjZGZmZGNlY2NfXF1eYV5eX19aWFhZWVZWVVNS
-TE1OTU9OTlVUUk5PUVFOUFFSVFFRT1FVUVBVVldXVllYWVhYV1hZWlpcXFxfX2Fg
-YGFuX1tfW15fYWJkY2NgYmFjYGVjY2NjYWNlZGJiY2NjY2JiYmFfYWFgX2BgYWJi
-YWFfYl9jY15gX2JgYmJhY2ZoZWRkYF5hYWZmZWBiYmJkYWJhYGBiYmBgYl9gYWBc
-XF5dXWJeXF5dXVtbXV1ZXF9bXF5fYl5eXl1lX11aXl1cW15fXWFjYGJdYGFiYGNi
-X19dXFxdXV5eXmNiZF9fXWJhYGBmZGNfXl9kZGRjY2NkYmRiY2RkYV9hYWNmZmdk
-ZGFiZWRfX15fYF9gYmNhY2VgYWJhXl5hYV9gY2RiZWRiYF9bXl1fYF5eX2NcXmBe
-WltdWlpcWVtdXV5fXFlaV1xeW1tbWVhYWVtbW11aWltaV1dbXlxdXltZWFlaXFxZ
-WVVVWFdYV1lXVVhbWlpZV1dUV1ZaXFhgX1lYV1ZYWV5aXF5cWFdaWlhXVlRWV1VW
-V1VUUVRVVlVQU1tXVldZWVlYWFJUV1lYVlVbX11dXlxeXl5jc2JVVlhaXltaWltc
-W1xdX1xeXFdXWlxYV1ZYWFlbXFxZWFpaWVdVWFxaWFxZWl5aW11kW11cW1xeXl1f
-Xl9dW11aV1tZWl5cW11eXlxfW1lbWlpdXV1eXF1aW1tcXmVjYF9iYFtaWlteXFpc
-XVtaWl1bXF9bW11dW1xdYmJdXF1aWVxdXV5iY2FeXF1eXVxgYl9dXF5jZmNgYmVk
-YmFgX11iZmNhZmRiYl9fYmNjY2FlYl9hX2FiYWFgYF9gYmJiYF9kZGVnZWRjYWNk
-ZGVjZ2dkYmRjY2RgYmFjY2ZkZWVmY2FlZmZnZ2VlY2VmaWllZ2VmZGZmZ2dlaWhm
-ZGVnZmdoampra2tsaWNiZWVlamhqZ2prZ2hnZmdqbGVmamtsa2ltbW9vbGxsa3Jv
-bW5ub3Bramtqamhra2pua2ppZ2lta2hoZ2lpZ2dsbqDM5fL6//////////95dnh3
-enh4dnl5enl3enl4d3h4fXp0d3Z1d3Z0dHh3dHV1eXh2d3Z1cnV0dXR2eHl2d3h2
-eXV0c3R4e3p3en2AfXh5eXd3d3Z1eXd6eXh6eXl6e3uAe3t3dHV2dHJydnd4e3Z1
-eXh2dHh3end5eHZ5eXp9eXV2dHZ3eHd3eXl5eXh2c3d1dG1wcG9xb3J0dXZ1c3Ny
-cW9xc3Rwb3BycnFwcHBydXZ1d3d1dXJycm9vbnBxc3Fzc3lycXBwcHBwb3Nxb3F0
-c3Bub2xucXJwc3NwbmxsbW9wbnFwcG9ra2xtbG5rZ2xsbGtqa2xtb3JzcW1tcG1t
-bW90cW9sbnBvbWpqcXJxbnBwbGpqaGppaGtqbGttbmpnZ2doaWpoamtta21vb25w
-Z2VnZGVlZmhsZmRgYmNfYGNoaGVmZWBfZWRkX15fYGJiZGVjYWRhYmFfXmBiY2Nh
-YV9fW1xdW15hYGBgYGNkYWBeYWJlZ2VjYGBjZ2dnaWVpZWVkZWZnYWNjYmViZ2Vh
-YmRhYWFjX2BgYF1bW1hWVlRVWlVUVVFPTlBPTk9SU09OU1FQTlFRTlJRTk5TU1VV
-UVFSUlRXWVdXWFlcWVxYV1pdXV9eXl5gYmJfX2FhYGBgYGBgX2NjYmRjZ2hmYl5f
-YmNnZGZjYWJiZGNkY19eYGBfYGBgYGRmZmNfYmBjZ2RhYl9fYmFkZWppaGpmYmVi
-YmVjZ2RhYmFjYmBhY2dmY2JhX2FjYV9fXl5gX11cW11cXlxaWlxdWlxaV11dXVlc
-XmBgYF5bXGBeXmBiXlxgYV9gYGBlZWRiXFxcXl1fXl9gYGNiYmNiZWVjY19gYGBg
-YmJmZGJjYWNkZGVnZWNfYWFkZGVkZGNjYWRkY2ZlX2FjYmNgZGVmYV1dXV9eYmJi
-YmFgYmJkYGFgYV9eYGZkYWBgYF1dXWFeXFpdXltfXl5eX15eX1lcW1tcW1pcW1pe
-XVxdWllWW1teXFlbXV1dXl5bWlpcXFlYXFpXV1ZZW1pXWllbW1tYWFlbXFpZWVtY
-WFVYWFlZXFxbXl1bV1VWVFhZWldWWFZYVlFVVlpaWFRSVVZYWFhYWVpWW1RTWVta
-WFheX1taWllXVV9ZUVFTV1taWVhXWVxbXVtdXl9cXFlYWltaV1xeXl1eXlpXWVha
-W1lWWFxdWVhRVVtaW1hZWFhXWllZW1tdX1tcWltbWltdYF1aXVtcXFpZWl5fYVxe
-Xl1eXV5eXF1cYGBgX11cXFtaW1xfXV5cX2FkYV1eX11cXl5cWlpaXmBhXV1bW1xf
-YF9hY2JfXF1gXVxeYGBhYGNjYl9eYGFeYF9gYF5hY2RiY2BgYF5cZGVjYmFfX19g
-YGBmYmBgYmJiYWdkZGZiY2VnZWRlZGJiY2ZnZWNkZGJkZGJkZGFiYmNjZWFiYWVl
-ZWVkZWNjZ2VpZmZmZGNmZmdlZWlnZWJlZ2pqZ2lnaGlnZmVkZGZmaGZlZmlnZmpq
-aGdmZWhoZ2lsbmxqcXRvcG5saWtsbm5vbGxvbGtpaWpram5wamptaGhpaGlra2ln
-Y2ZraWpwncvm8/r//////////314dHVzdnh6d3Z0eHp5eXZ4dnd6eXl5d3Z3fHd4
-d3t2dHh3eHh3eXp4dHNwb3R0e398dnd0eXl3cXZ4enx4enx8fHx6d3R0d3h5d3l7
-eXx5eXZ0eXt6eXV1eHp2c3Z4e3l6enl5d3l4d3V3c3d4d3l7dnZ2dXR1dXd0dnh4
-end8eHx7eHZ2dHBxcXFvcXJzd3RxcHF0c3FwcnRwbXBwc3R3dnNydnV3dXRydHh3
-dXFxcnNydnZ3enBucnFycHFzdHV0c3JxcnJvbG91cXBwcXBtbW9tbm1tb3BtaW1t
-b29ubmtubGxpaGpob3Nxb3Bvb29vb3NvbmxsbXBxcnFxbWtubm1wb21ta2xraGlr
-amxta2hva2xqZ2ZmZ2psbGtsbWxnZ2dlZ2hsa2hnam5nYmJhYGFiY2RjY2BgX2Fg
-YmVgYGNeXmFiY2NkYl5gX15fYGBgY2NiYFxeX2JiYV5iYWJiY2RlZGJfX2BjYGJk
-Y2NlZGhnZ2VjZmZoZ2VnY19fZWJkYGNkY2JiY2JjYmBfX1xcWVlYWl1XWVRSUVRS
-Tk5PUk9PUlJRUFBRUE9PT1JRUFJOUFRXVFRaWlhWVVhYWltZWltbWlxeXV9hX2Fi
-Yl5eXmNgYF9eYl9lZmRkZWVlYmRlYF5gYWFgYGJhYWBfYV9hYmFgY19eX2BeXmFi
-YWBiY2JjY2BiYF9hYmRjZWJiYGBhYF9dYGZlZGFiZWJfYWVkY2FiXl9gX2BgX15d
-XFxbWVpdXFtfX15fX1xdX11dW1tfYF1dYGFiYV9gYmNeWl5hXmFgYF5dYWNiYWBf
-XVxeYF5hY15dX2RjZGJjYWBgY2NgX2FjZWVlYGJjYGFiY2NkYmNhXmBiZmNiZWNi
-ZGBhYWFjZmRlY2JjY19gXV5dW11hY2JjYV5cXVxeX2BgXl1jY2NiYl5bXV1cXlxa
-XV9fXF9gX15cXV5eXV1cX15gYGFeXltcXF1bWFlZWllZXF9eXFteW1xcW1lWWl1b
-WFZXWlxfXlxdWllZWVlZWVlbWVZVWFpdYFpbWlpcWFdYV1dYWVZUV1haWVhaWFZU
-VVpaWltaWVpZV1dZV1dXWFVXWFZXWF1eXVtaWFVVWFdYVlNSU1dbWllbWllfXVtd
-W1tbXlxcXFVWV1thXl9fYlxaWFpYWVhYWllYXF1gXVhdYV1dWllZWlpVWllbXF1a
-W1tZWFlaWV9eXF1bXVxcXl5eXVxbXF1fYmBdXV1dX19gYF5eWltbXFtbXF5gXl5d
-XV9eYF5cYWBbWVhZXFxcXV9eW1ldYF1eX15hYGFiX15cXF9hYGBhYWFfYGNhXV9f
-YWJiY15gYWBhYWBiY2JiYGFfYF9jX2NiY2FgYWFhYmJkYmRmZ2hoZWRgZWRmZWRn
-aGpmZWJiZGRhX2RkZWRjYWFjY2RlY2NjYWRnZmdlZmxra2doaGhoZ2hoZmVlZ2Rl
-aWppaWdqaGlmZGVnZ2hraWdnZ2VnaGVnamZnamdnaGdqamlqamppamxpampra2xo
-aWtqampqbmxqbGxvdHhsZ2RnaG1qaGdnaGlsaXKazubx+v//////////ent5eHZ1
-dXZ3dHN4fHp4enh5eXd3end4eXd2ent7eXZ6f3p6fXp6e3h2dXZ4eXd5eHx/e3x7
-eHt5dXd5eHp9fHx7en18eHh5enp6d3t9eXd6d3d1e3lzdnp6d3l5eHh7e3p9eXZ5
-eHl2eHdzdnl4dnh2dHh3dnZzdXZ1dnd6eHl6d3l5dHR0cnBxdHF0cnJzcnBwcnRz
-c3Fwb3R0dHR0d3Z2dHNzdXV1eXZ1dnd3d3FwdHd2dHd0cnBydXNycXNxdXR0cW1r
-bGtub2xtb3BvcG1rbW1wcHFwbm5ubGxsbm5ucW9vbm5ubWxub25xbW1xbm9wbGxq
-a21yc3JvcHFwbWtta21vbnBzcWxpaWpra2pscW1qampmaGhpamxtbWxraWZkZWZp
-aWhnZGNjYmFhX2FjYF9dX2NhYmBiYGJgYGFfXmBhYF9iZGNjY2NkZWJgYGJlZGVi
-ZWRiZWRkYmVnZGZiZGZjY2RkZWJjY2FkZGJkY2VmYmNpaWZoZmhmY2BgYmRjX1pe
-Y2JhX2BhYmFeXmBfXFtdXFdUU1ZUU1JXVFBPUExPTk9QUVJRTk9OTVJQUlRRUFJV
-VldcWFlYVllaWVtbXVlaW1pcXV1hYmJjYGBgXl9gX2BgZWNjYmNkYWBiZGVhYF9h
-YmFhYmFgX15hX19eX2FeXl5dX19fXl1eYWFfX2BgX2RjY19gYWBiZmRhYF9iZGVg
-X2BhZWZiYmBjZGNfX2BiZWRjYmJcW15dXF1cXV5cWVpcX19cXWJhX2BZWVleXl9f
-YGNiX2BgXltcWl1eXmJiXl9gYV5hYWFgY2NhXV5hYl9fYGJjY2FhYmFhYGBfYmRk
-Y2JiX19iYmNjY2NiZWRkYmJjYWRjY2ViYGFjYmNiYmJjY2NfW15gYV9eYGFgYV9g
-Yl1eYF5eX2BjY2BhYWFfXl5bXWFfX19dXFteX1xgXl1dXmBeXFxfWltdXV1eXV5e
-XltbWlpbWlxYWlldXVtbW1laWFhdW1xbWllbXl5gXlhZWVdXWFlZWFZVWltcWFdZ
-V1lbWlpYV1dYVlZXVVNWWFhXWVRWWVxcW1dWWFtbWVVVWVlYVVZWWlhaW1teXFtY
-WVZaV1pSV1pbW1lZXF1bWlpZWFtbXF5dW15bWFpZWllUWFlXWllaXV9dXl5bWllb
-W11bX1leXl1cYF5dXl1dWldYWVtcXV9dWlxdXmFfXFtcXFtbXVtdX2BeXl9iYF1d
-XGBjYF1dX11bXl1cWV1eXFlcXV1dXV5fXV1aWV1dXVxbWlxfYV5gW1tgX11hYmBh
-YF9gX2JfX2JhYWFmZ2JgX2FjYWFhZGNfX2BfXGFhX15gYmVlY2FgY2JhYWBoZGRi
-Y2RjYGJnaGVkaGxoYmBiZGdjZmVjaGZlZGZhYmJgYWRmZmhraGVmaWdlY2JhYWJk
-ZWZkZmhoamppZ2trZ2VmZmZnZWhkZWdlZmpoaWZnZ2hobWppZmVmZ2ZmZWdnZ2hp
-aGZnaGhpaGdpaWhoaG5tbnBva2xtaWlraWxtbW1qbW5rbm1wcGtpamdmaGdra2pp
-aWhsb5LN5PL7//////////97eXl5e3h3dHZ4d3Z3dnV4d3l5eHV1eXp7eHh4eHR2
-eHd5eXp8eHR1d3l3dnV0dnZ3dXt8eHh5eHd6enV4eHl7enl6e3x/fHh5eHh4e3l3
-dXd6fXl8enp5fH15en57e398enp7f3h4dXV0dXZ2d3l1eHV1eHd2dnd1dnp8e3x4
-dnd1dXV2c3BtbXBxcnN0c3JxcHBvdHV0dXRycnJzcm5vdXR1c3N0dHV4ent2cXJ0
-cXBzc3FydnBvcHFycnRycnNycW9wcG5vb21va25rb21ua2xtbW1vbm1ta29ucG1r
-bXBxdHBwcnNvcHJsbG1ua21scXFubGlqbm9tbW9vb3Bvb21sbnBubm9wb21ubW1q
-bW1rbWlnaGlnZ2pra2tsaGdkZGVmZmZmZGNhYmJiYmFgY2NgYGZlYGFgYWJiYWNh
-X2BeXF5kY2FhYmFhZGNmZGRgYmNlY2RlZGJjYF1hYWZiY2FkZWhiZWZjYWVjYmRl
-YmJkY2RlaGVoZ2dmZ2lsbmdjYmZmYVxgYGBeX15cXV9fX11cXVtZWFdTVFVSVVVU
-VFJQTU1PUVVRTlBOUlJQUE5QU1lRUlBVWFthVlZWVVlYWl1dXlteYGVlYV5fX2Be
-X19eZGFhZGRlZGRjZWNiZmVlZGNiYGVkZGZkY2NkY15fY2BdYWBdXV1dXmBhYmBe
-YGNjYGNlZGNkYl9jYGJlZGBiY2FkY2JjYmNhXmBlY2NiYGRhX2BiZGBdXmFgXGFe
-X19fXV1cXl1hYGJjY2RhWFhaXVthY2JhXF5eXl5fXmBfXl1eYF9fYF1eXl5fYWFg
-YmFhXWBfXl9gYWJiYWNgXl1fYWNfYmJjY2BiZGFhYmNkX2FiY2ZkYmNmY2RiYmJk
-ZWNfXl9gX2JhYV9gYmFhYWJiX19gYmJiYWFfX19gXV9jX15iYV5eX19gZ2JfXVtb
-WVxeYWBfYV9cXF1cXFlcYVpYXWFgX1xcXVtcWltbWVxYXFlZWltcXV1ZWVlWWmBb
-WllZWVxdXl5aWVVYWllYV1dZV1hXVlhXV1hZVllXVFVUWFdXVVNUVVVVVVNSVllY
-V1hZWlpZV1dRT1RVVlhZWFpZWl1aXFtZWltbWVlbWVxcXFxaXVxaXV1eWllcXl1e
-W1lYWVxdWl1ZWVdbXFlbW1xcXl5dXFteX1xgYFpaWFpaWllaWllXVldXWFlaXVlb
-XV5bXF1cXV9eW15cW1laXV9gYWBeYF5fYV5dXl5eWlxfXl5gXFlbXl1cWlxfW1xf
-XFteX2BdXF1bW1tdXl1fXV1jX19eXl5hY2RkXmBjXVteZGVkZGFgX2BjZWViXWFi
-YF5fYWFfXl9eYGVkYGFgZGJiYmNlYmNkZmdpaGRjYWNkZmZiXmFjY2NlaGZmYmJm
-Z2JjY2FlYmNkaGdlZmdkYmRmZWZlY2JiZGNiYmhnZmlraWdnZGNjZWNhY2hnZGRm
-ZmdqaGZlZWhnZ2loaWZnZGNkY2dkaWhoZ2doZ2doZ2xpbG5tbW1rbmtram1ubWts
-cXBwa2hnbW1tbmpta2xpZ2hqZmhqamhoZmZoicrk8Pr//////////3t7eXl7fHp4
-d3Z4dXN2dnl5dnh5eHh8e3p5fXl3d3d5enx5fX17fHh4d3l4c3N2c3J0d3JydHl6
-en1+eHZ5eXh5eX17e4B9eXl4dXl5fHt8enZ1dnl4eHd6e3t7e3l9fXx5eHl4eXl3
-dnJydHV4eHN0eHh3dnZ3eHl5enx7eXRzc3R2cnFwc3RzdXFxdHZ2c3Jxcm9xdHR0
-d3h2eHZ2c3NzdXFzdHFydHZ3eHVycnF0dnd1dXN1c29xcnJub3J2dHJxcW9wb3Fv
-bXBycW9tb21qamxsb25wcGxqbXBxcG9ub2xwdHJxcW5vbm1sbG5ua21ub3Bvb29w
-bnFvbm5tbm9xb3Bub25tbWxvb21ta21vcW1paWlpa2dlaWhoZ2ZnaGlsZ2ZqaWVi
-YWNjZWRjY2VjZmVhYmBgYmNhYWJiYmFiYGBfYWNkY2BfYWJjY2RjZGNfXmJiZGNi
-Y2BfXmFkYWVjZGZjZ2ZmZ2ZmZWVkZmJjY2NlZWpnZmdmZ2hoaWtpZGRlYmlqaWJh
-ZGFfYmFeXV5eYFxXWVhXWFhUVldTUFNTUlBOUFNPUFFRUlRVUlNPUVFQVFdSVVZU
-VVRTV1ZWWVxZW11fXlpdZGRjX15fXF9gX15fYmJgYGBjYGBjYmNjY2VjY2NhYWNj
-Y19iY2BiY2ZkYmRiYWBeX2NjY2VjYmJoYl9fYV9hYGFiZmNkYWJkZGRkZWZiY2Zk
-YV9eYGNlZWBlaGVjYWFcW11hX2BkYGJeYF5fX2FgYGNkZWFeYF9eXF1dYWNkYV1e
-X19hYmNiYGFgYWFfYF5fX19fXl9gYl9eXWBiY19eXV9iYWRjYWBeYF9hY2FgX15f
-YmZmZ19iZGNiX2JjYmBhYGJlZGJjY2JkZWZjX1xhYWBfYGhlYmFhYWJkX19iYmRi
-YGBfYGBdXVxeYV9iYmJfYGJtaGViX11eXV9dXmBhYlxdXlpYV1lZWlteXlpdXmBf
-XVxbXVtaX19cXFpZWl5cW1pZXFtbXFdYWVxYWVxfW1pbV1dXWFpYV1haVldYXFxZ
-WFlWV1haWFdVUVVTVVhUVFVVVlhbXFpZVlZZV1laVlVVVFdaWllcWlhWWFhaWlpa
-Wl9dX19ZWlxfX1lZW1lZW19hXltcW19aWFxbWlldWVpcXFlYVlhXW1pcW1peXV5a
-WlpYWVpbWFlaWlxbWl1aWlpbXV1aY2NgYWBfXVxeYFxdXVpcX11dYGFiY15eXmJi
-YV1gYWFeXl5dW1xbWV9gXl1eX11gY2BeXmBgXV5fYF1aXV9dXV5fX1xhYGNiYl9e
-X2RfYF5fYmFbXWNjYWRkYmFhXl1dXWBhY2JkY2FfYF5fX2BjYWBjZGJkYmNiZmVk
-Z2lpZ2VjY2FgY2RiYWJlYmdoaWRiYWJkZWZjZWZnYWNmZmZkZGViY2VnZGRkYWJk
-aGZjYmVlaGdoaGNkYWBiZmZnaGdiZ2hnZmdnZmlsZ2dnZ2ZmZWZmZWNhY2dlampp
-ZWVnZWZnaGlqa2xucHFwb2xvbm9vbW5zbmxqbnBsbmxraWhvbWxsamhraGtoaWpt
-ZmiLy+Pw+f//////////eHp4enp5d3t5e3h2dnh3en56fHt8fHp+end5eHp6eHh6
-eHN3eXp7eXd0dnh1d3NzcnNzeHl3eXh6eXp5eHl5e319eXl6fXt7fHh6eXp6eXd7
-fHx5dXp6eXp1dXd8eXt8fHt8eHd5eXh4eHh3eHV3fHZ6dnd3d3l6eXp8fHl1dXNx
-dHJxcHFydHN3dnFzcnV0b3V2d3Z1dHR4eXx4dXZ3dHN0cnVzcnR2dXV4dHN1dXl4
-d3h0cW9ucHBzdHNwbnBycm5ubnJ3c3Jwb25xcXBubmxtbWptbm5vb3Bvb25tb3Fv
-cWxqb3JubnFtamxubmxtbnBrbG1tcHBtbW5ubGtwb2xsamprb25sbG1vcG5rbG9v
-bWtoaGhqampoZ2RjY2VkZ2djZWdnZmRlZGNgYWNiYWFgX19jYl9eXmFiY19fYmRh
-YWRhZGFgYWJhY2NiZmNhXl9hYWVpZmRkZF5hY2VkZGFiZmRmaWdlZ2ViY2VkZWJj
-ZGRkZ2hmaGpraGVmaGZlZmxtbGtnZWJkYmFiYWJfXlxdWVteWFZWV1RVVlRST1FT
-U1FTVFJQUlFUVFFUVlFPU1BRVVVXV1ZTVFZXWVtbXFtYW1laXVxgYmFhYWBhYmJh
-YGFjZmRiYWRkZGZjZWhiZGRiYmFlX2BhZl9iY2toY2JiY2FhYWBgYGFhYWRmYF9h
-YmRiYF9eXl5gX15gYmVoaWtoaGZnYmJjYmJiYmNiYmJhXmJmZWFiX1xfXGFjYmRf
-XVtfYmBiY2JhXlxfXGBeXmBiXl9fXlxlYWBjY2NjYl5cW11gY19jYV9fXmBhY2Fb
-Xl9fX19fXl9gZGJiX2FkZGNiYWFiYF9jZGVmZ2NgYWNlZmVnaGNiX19gYGJjYmNi
-Y2FgYGBgYWFlYWFgYWRhYF9fYWNkYmJeXV5eYGReW1pbX2FhYGBhYmJhY2RhXV1d
-X15bXFxbXVtaXl5fXV1eXFpXWlxYW19fX11dXmFfWVlcXF1cXFxbW1xZXF1bW1tc
-WlhWWVlZWVtZWVlaWVhUVWBXVVdYWltYWVZXVVVYVVVXVlhXV1ZUVldYWVxaXV5d
-U1NWVVhWWVhVVVdZWVpbWFdWWVhVVlddXl9aWFdXW1laV1dWWFhZXVtWV1tZWlpc
-W15eWlpZXFxaWFdVV1hZWlpbWVdYWVlaWVhYWFtYWV5dXl5cXVxdXVlaW1xdWlhc
-X15bXl9fX2FhXlxeZF9fYGBhYVxfX15fYF1dXV1eYF9fX15cW15dX2BdYV9fXF9h
-XVtdXl1fXVxgXl5dXV5fYV9fXF5fYWNiXl1fXV9gYF9gYF9jZGJhYGBiYVxeXmFi
-ZGJhYWFiYGFcXF5iYGNlY2VjYmNgY2VlZ2RjYV5gYmJgZGRnZWRnZ2RkZWNlZGVp
-ZmRjZGNkZmVjZGVhYmZlYmRmZWBjZ2doZmRnZWhpZmZkZGJlaGZkamhnY2lmZ2Vk
-ZWVmZ2ppaGloa2RkZGZkZ2hoaGhqbGxqZ2ZmZmlpaWlrbG1tcG9tbGxtcGxtbmtt
-bGppbW1sbWxsbGtramlnaGlsaWtoamdmcIzK4u73/v////////94d3l5eXZ5eXp6
-eXl2eHl6ent4fHp5d3h3dnZ2dHZ2eXt8eHp8fnt9e3d7d318dnV0dHh4dnR2eXh7
-eX9+enp6e3l4e315eHl3eXx7eX14eHZ2eHx9enh7dXRwdXl6fHt7enl7eXd7fHp6
-d3V4eHh6eHZ4eXh4eXl4enp3d3l3d3Z1c3J0cnFxcHJyd3R0d3V7gnV3d3V1dXN3
-dnVydXV0dHV4dHV2dHN0dnV1d3Zzd3Z2cnN2dHFycnN1c3FwcHFwb3BtcXRwcHBt
-bHBxcG9ycm1samlqa3B2dXJtbnBrb21ydXFtb3Bzb3Fxb29vb25wbmttbnFxcW9s
-bm1ucm9vbmxua2tra2ptb21ubGtrampqamhpZmZnZ2lpZ2loamlpZWZnZWVlZmdk
-YWNkYWNjYWBhYV9iXmFiZGJiYV9gZF9fX2FhY2llYWRiZWdkY2JiY2FhYWNmY2Ni
-ZmVkZmVjZGNkZmVkZmljZmZmZWRlZWdjY2ZjZmdrampkZ2doaGdoaGtrZ2ZlZGJk
-X2BhY15dWllaWlldV1RUVFdVVVNQUVNSUlRQT1FQT09SVVNTV1ZXWFJTVFNUU1dZ
-XVpaWlhXWVpaYF1eYF1cXl5iYmNjZGlpZmFjY2NjY2dmY2RiZGlmZmNiZGVkX2Fh
-YWFiYmdkY2JeYmRhZWZiY2FgY2JiY2FiYWNkZGRnZmFhYGFjYmRlZGVlYmJhYmFi
-Y2NkY2FhXV9dX19iYV1dYWJfYmRkYl5fYmBgYl9hYF5eXV9fXVtfX19gX2FhZl9f
-YGFkZGNgXlteYGFhYWBgX15hX2FjZF5gYGFgX2BgXl9gYmJjY2JiY2NjY19eYGFj
-ZGNhY2JiYmNkY2NiX2JkYWBhX2JiYmNhYmJdWV9iYmFhX2FjY2BlYl5gYmJjYmJf
-Y2BgYGBeYWBeYmFgXl5iYGBdX2FfX19fYF5cXFpcXF5dXF9gYV9hXV5cX2NdXVtb
-WllcXl1fWVxdXlpbWFtcXF1eXFxcV1VYW1xYWFtbXF5cW1paXVxWVVZZWFlYU1RX
-VVdVWlpXWlhYWlpXV1RUWFlVV1pZW1dVVFdYW1hXWVZWVFdYVlhcXVpZXGFbWFpe
-YGJiXV1bX1laW1xcXFlYWFpYWVhbXVtbXl1eXVtZW15bXF1ZV1lZWV1dW1xcYF9c
-XWBfXlhYW1pbXFtaXVtaXVpbX1xcXVxfX1xbW19eYGNiYV1fX2BjYF9hX2BhX2Jh
-Xl5fXmFeYGReYGNgYGBgYWFgX15jYF1dXlxeXF1hX15fYGBhW1xeX1teXVpdXV9c
-XlxeX19hX11eX15eX2JiY2BgYV9iZGVlYmBfYGFkYmJeXl9fYGFhY2RjZGFhY2dn
-ZWNmZGRnZGNlZ2NjZmZjZGNkZmVoZGRjY2NmZWJiYmVnaWViZGNlY2NhZWhlZmhp
-amlmZmZkaGdoZ2toZmVlaWZqZWVoaGhoZWVmaGloZmdoZWRjZGVpamdmaWhoa21p
-ampqamlpa2tubW1sbW5rbG5sbW5tb25wcHBpaWtqaGlsamlpZmptbWpsZ2tmb3Fu
-iMri7vj+/////////3l7e3x2en59d3V3eXl6eXp7fXp2d3h4d3ZzdXZ4fHt6fH59
-fHt4fH56fHp5enl2d3Z1eHp5dXJ3eXt5fHx/enp8fHt7ent6eXl8e31/enl7fH18
-eXt+f3t8e3p5e3l7e3t3eXp9fHt5ent7eXZ4enl4dnl4enp3dnh2eHh3eXx4d3Z3
-dnJycnJycHN2c3N0eXZ4dXJwcnJ2c3FwcnN0dnlzdHd3d3Z2dXN0dXR0cnJzdG9x
-dndycHFvcXFzcnJxb3BycnNxcHJzc29tcG5wbm1ycW9taWppbXFycnJvcnBxcXJy
-dnJxb25xcnFwbW1ub3Bubm5vbm9ycnFycnFwcHBubG5waWtrbG5samprbWxqamxs
-bmtqZ2hoamltaGlpaGhpbWtnY2NnZmZmZGRiZGVkZGNfX2FhXmJhX19eX11gYGRk
-Y2NlZGNjYWNiY2JiY2RkZGFjYGRkY2JkZGVjY2dkZmhtZGJiYmFhZGdnZGdqZ2hq
-Y2RkY2ZoZ2VmaWhqZmdlZGZiZWVnZGRjYWFeXVxbW11dXVlYU1JVV1RTUlBQUVNU
-T01OTk5PTlNQU1NUVlNWVFZUVVNUVlRVWlhcXl5hXFxhY2FiYmBfY2FhYWRlY2Rl
-ZGVmZ2RlZWNlaGZjYWFhYmRlZ2ZhYF5iY2ZkZWVjZGJgZGVkY2RhYF5dYGNiYGJi
-Y2ZlamlpZ2ZlY2NkamlpamVkZGRjZGVjYmFgYGBgX2BlYV5eX2FcX2ZjXmBgY2Bd
-XV9eYWBgX15dXV9cXV9gXV1gX2BhYF5hY2BjYV5dY2NiXVxhY2FhYGNeYmFfYVxc
-Xl9aXGBgYF9gYmVlZGJhYWBfYGNlZWRgYWVhYWRkZGFhYGJjYmJlZ2ViYmJhYWVj
-X2FlY2NlYmBhYWBfY2BiZGBeYWFgYGFcXV1cX2JkY19fZWRjZGBiYl1cYWJkXl1f
-Y2NgXV1gX2BeXWBdXVxcX2FhYWBeW1xaWFpdW11bW1paW1xdX2BaXFpcXVxdXldd
-XF1bW1xaWlxZWltaXFdTVFVXWFtXVldQUFNXV1pbWVdYVlVWWFhTVFpbWlxdXVpZ
-WlpYV1hXWFdXWVpXWltdXF1dXVtcWl9lZmZdXVxcWVtYWVpYV1daWltYWVhcXFxa
-WFhaXFhXWVxeXVtbXFteXVlaXF1eXV9gX2JcW1lYWVpXXVxiZmBiXl1eW11eXl1g
-Xl1bXVtbXWFhX1xbW15eXF1fYF9jYF9aWlxcXl1bXl5gX15eY19cX1xdW19eXV1d
-Xl5dYWVhYGZhXl1YWVpaXmBeXFdaWl1bXF1dYGFiX15fXV9gYl5fXl9hZGFiYmFg
-X1thZGJgYmFeXlxeYmdlZGNlaGZmYmBlZmVlY2NkZGNlZmVjY2JjZmZoY2NkZmVl
-YmVlYWNjYGJjYmFjZWRjY2ZlZWBjZWZnaWllaGZnZ2hoaWlqaWpobGplYmRnZmdq
-amdmY2ZnZmVpZmZkamlqbWxoZWhpaWlsa21sa2lqaWtsbm1vcG9vcG9vbGtucWxu
-bHJtZWxsam1samxwbGtpaWpoaW5ramyGyOHs9vv/////////eXx6fH58fXp6d3d3
-enl1d3l5dnh5eHd1dHR0enl4d3Z6end3dnJ5fXx6d3t6dnh3eHx6f4B5d3d4d3h2
-dnh1dnt8fX17eXt+fn9+eX18en1/fnp4d3l8fXp8fnx+fHp7eXt6eHt8e3p9e358
-fXx6ent8eXd7enp4eHl4eHZ6e3p6eHl4dHR1dHNwcHBxdHFzdHJycHFxcHFycnNy
-c3R0dHN0d3R1dHl4dXV2dHN0dHJzdXV3d3FxdHdxdHJvcXJubm9xdHRxcHF1cXJw
-b25vcm9tcG1saW1udXNxcXNwcHFwbW1yb25ub3BycXBvbnFxb25ucG1sbm1vcXJx
-c3Jwa25vb29wbWxta2xubW5sbGtrcG9sbW5ucGxsbW9tbWhoZ2hoamdmZ2lnZ2Zl
-Y2NkY2JgYWBgYWJkZGJgYmFiYGFiY2RiYWJhYmFhYmFhZGRiYmNjYmNlZmNjZWdk
-YmJiYmNjZmdkZGJgYWFkZGRmaGdmZWRlZWZpaGRjZmdnZ2dqamdlZWRlZGNhYmRi
-Y2BhXltbXFxbWllXVlVVVVdVVFBPTk5QUVJOT1BPUU9QUU1PUlNUU1RVVldUVVZW
-WVpbXl1ZXF9eXmJeX19hYGJgY2JiYmJiZmVmaGRmZGVkZmNiYF5gZGhlZGRiY2Rl
-ZGNiYWJhYWFhYmNgX2JgXl5gYGBiY2BiZGRjZGNmZWhraWZkZmRnZmZoY2VoaGVl
-YmNiYmRfYWVlYGFiYGJgX2BhX15gYF5hYV9gYGBgXVxfYV5dXGBgX15dXFxfX15e
-YGFeXmBgYGFgX11gX11cW15dXl5gYV1eYGJhYWBhYV5eYGBjZV5iYWRjY2JjY2Jh
-YGJiZ2ZnY2FhYmdlZmRkY2JgXl9kZmZiZmdkYWBhYWNgYGBkYWJfXmBgYGBeX19d
-XV9hZWFgZWNjZmVjYF9hX2JkZF9fXl5gYGFhZWJiYVxgYV1cX2FgYGFhYWRfXVxc
-WVlaXVxZWVtcWl9cW1hYV1lcXF5dXFxaWF1eX11dXltaWFhXVVldXFxZWVpYVlRZ
-XFhaXF1aV1hbWFtYVVdXWFpZW11cX15aWltYWllZXF1cV1lbWl1eXV5cXmJoZl9b
-WVZXW1tbWVhYWlhcW1taV1hZWlxdXF9gXl5dXVxdXWBcYGFeXFpZXl5eXV9fXlxc
-XF1fW1lcXFxbXFthZ15fYWBeX15eYFxcXFtdWlhVXV9eX15eXF1iY2JgYWNiYWFe
-X15eXmFgW1pgXl1dXFtcWVpbXGBeXltcX19gYmBfanJbXGFcW15fXlxeWltcXVxg
-Yl5hXlpbW11fX11cXmJjY2NhXmJiY2BfYWJjYV9gYGFiYmFiZGVjZGNlZGRjXmFi
-YWNkZmFjZGNjY2JhYWJjZGJkYmVoaGpnZGRkZGZiYGBhYGFlamZoZmlmY2VkYWVk
-ZGVnaGZmZmdpZ2ZlZGVnaWZjYmRmaGtrZmZpaGhnamRlZ2dnam1vb3BpaWlobG1v
-bW5sa2pobWxrbWxtcG5sbXBta2trbm5va2hoaG5ubmxua2ppbW5taWhrbWxtcIbD
-4ez2/P////////93d3h5gXp7eXqCfXp8d3R3eXl+e3t5eXh1dXd1fXp5fXh6eHV1
-dnh8enp5d3Z7end6e3x/fn18fHt8eXV0dXh8en2Bfnx9fn+BfHh5fICDfXx8fHx7
-fHl4e319ent6eXl3dnV5eXZ6fXl4eH1+fHt5eXp5dnl3dHZ2eHp1eHx6fXt7end1
-dnZ3c3VycnBwb3BzcG5sb3RwbG10dHJzdHJydXN1d3NzdXZ2dnN0c3JwbnF0c3Z5
-cnFyc3RxcHFucG5wcG5yc3JzcnBwbm1vcW9ubW5wdW9vcHFwcXJtamxxb3BzcXJv
-b29ycnV1dHJucHFvbG1sbGtucG9xcW9tcW1sbW1vcHJsamtscHBubW5va2tsbm9w
-bnBvbXFxbW1pamhpaGloamttamdnZ2RlZmFgYWFfYWJlZmVlZmNgYmNnamZnZGJh
-YWJhYWJiYl9iY2NiY2BhYmFkZGRjZGJjY2JjZGNoZ2VkYWRhYmRjZmZnZGNjY2Nj
-YmRkY2NnZmJjZWZlZmdnaWdlZWRiX2FiX2FgYVtZWVhXWFpWVVJVUlRUVVJOS05P
-T01QUE1OTk5PTlFSVVJUVFJTVFRWV1dYXVtZWVtcXFxfX2FiYl9eXmFgX19fYWJi
-ZGJlY2BkZGZkY2JhZGVlZ2RjYF5eX2FgYGJjYWJiYWNjZGNdXWBhYl9jX1xeX15g
-X15iY2VkY2ppZmNkaGhnbGljY2ZlZmViZGNhYGNkYV9fYGBeXl9fZV9eXF5iYV5c
-W1xfXmFhYV5iY19eX2BhYF5hYGJhYF5dXl9gYF5dXVxbXWNeX2NfXV5gX2BeXWFg
-YmNhYV5gYGFgX19fZGRiX19gX2FiZWdnZGRmZ2NjZWVkYWRjYmRjYmJjY2JkYV9h
-Y2NjY2VmYWFjYV1fY2JlYmFhX15iXmBdYmZlYmFhYWBfZGNgX15eXl9jX19iYWBc
-XmBjYWNkYV1dXV1dYWNgXl5fX2FfXV1dWltbXFlaWV1dW1tcWldaVlZXVVZYWmFc
-WVpZWlhaXVtaW15bWVhaWlpbWllbXltZV1laV1ZXWVhZW1pZXFtYWVpaWVhaWVtb
-WVtXWVhbWVpbXFpbWlpbV1hcYFxXWVlbXVpbWlpaWFlYVldaWVhZW1pZWlhbX2Bf
-XVxdW1xZWVhdX11aWF1fXllaXWFhXF1aWFldXVxdW1tZV1dYXFtbW1xfXVxdW1xe
-XFtbXF9cYF5dXF5fXF9hX1xdXV5iYF5fXV5eX15bWFxeXFpfXFtbXFpdXV5dXV9f
-YV9cXmdxdl5cYWRgYF1cX15dXF5gYmRfYF9fXFxgYl9eXV5aXFxiYmBeX15dYF9i
-Y2NgYWJhY2ZmZWVjY2FhZGVlY2NnZWJlZGJgXF9lZmVqYmNiYl9kYmJhYmhlZWNk
-ZGVhYmJhYmZlZWZlY2RlaWdnZ2RlZ2ZlY2NqZGRoY2JiZmVmaGNmZmRkZGNkZmRl
-Z2dnZmhpamloa2pqa2xrbGhoZ2hra2doaGloaW1sb21vbmtsbnBuamtucW9sbm5w
-bWlqaWtqamlqbWpqbmxrbWxpamtuiMHg6/X6/v///////3h4eHZ2dnh5eXh3d3p6
-d3Z1e359enh3enp7eXl2dnV1dHZ1dXN0dnZ2d3h1d3p6d3x5d3l2eXh4dnx7fHh3
-d3h6eHh7e3t9e3x7f319fHx8fHp4eX5/gYF/fXp5e3h7fXt6eHl5eHl6enh5fnl1
-eHp5e3h2dXh5eXd0dHR4d3p5eHh2dHh4dHJzc3Nvb29wb3NwbnFzcG9ucHB0dHZ2
-dXR0dHN1enh1dHNyc3R0dHZ0dHNycHJzc3Fwb3F0dXFycnBucHFxdHJvb2ttbm5w
-cm90b3Bzb2xsbW9ubG1wcXJubnFzcnBtbW9wb3R1dHNycXBucHJubWxvcG5ucG5x
-cG1vbnBxcGtta2txbm1vb29wb29vbmtrbnFycWxra2tsaWRjZ2lnaGZiZWRlY2Rl
-Y2JhY15iZGRlZGJgYmRlZmdnaWZkY2JhYmJkYmdmY2NkYmJjY2JhYmNiYl9fYmVk
-Y2RiZWZnYWVjZGRjZmNjY2NkZ2NkY2RjZWNmZ2ZjYWRiYmVmZ2dmZGVmZWZjYF9e
-X2BeXV1fW1hbWVZWVlBQUlNTUE9QUE5QT1FQU1BNT09UVlRVV1VVUVNVWlZaWVlY
-XFtbXVxaXF9gX2BfX11fYWNjZGlmZGBeX2JiYWJkZWJkaGlnZGNmZGZkZGRjYGJh
-XGFjYWNnZWRiYmFeXWFfYGFhYGJgX1xgYmJjY2ZmZWVmZWdlZWxqZmNlYmJjY2Nh
-Y2FgYGFgX2FgYl9eX15dXFxeX19hYF1dXlxfXmBjYF9hYWJeX2JfXVpeY2FgX19f
-W1pcXl5cXV1iYmJfXl9fX2JiYmJhY2RjYV5eZGJkY2BgZmFjZWZhYF9gYGBiZGZn
-Z2VlaGtnZ2VkZWNlY2VmZmZlYmFiYGJhXmBmZmNiXl5eYmRjYmRkYmFgYWFgYWNj
-Y2JhX2RiYmFhYWBeXmFfXlxmZWBgXl5eYV9dWVthX19fX11eX15bWllbYF1dX11c
-X11eXl5eXVxdXF9gYVxaXFlaXFxaXFxZWFlXV1ZXVllcXVtcXFtbVVVWW1tbWFlZ
-W1dZWVdXV1dZVlpgXF1bWlpaWFlXV1tZV1daWFtbWFxdXllaW1pYWVlXVVJTVldb
-XFtaWFpXWVtZV1dZW1xbWlpZWltdXGBcW1pZWFhbXVtbXF5dW1tcXVlbW11fX1td
-XFxcXV1bXFxZWltbXFxfYF1cW1lbXFxaW11hZWFgYFxbXWBgYl1bXl9jX2FhYWFf
-X19gXmBgXFxdXVtaXWFhXVxeYGFgX15fXFpaXl1bXFtgYWBiX2FhX11dXF9iY2Ff
-XltbXl9gYWNjYV1cXF9jX1xcYFtfYWVlZWJgXmFjYGNlYWJfYmdlZmVjY2NiYWFj
-Y2BlY2FlY2VlZWFhY2RgYWFgYWBlYGRmYV9iY2VkZGVlYmRjZGVkZGRiZWVkY2Nj
-YmRnaGlnZ2hmaGVkZGVpbWhmZWRkaGhnZ2VpaGVoa2hnaGpqampoamlqbGxubGxp
-amhrbW1sa2tsbGptbmprbW9vbWttcG5ta2xqa2lrbGxubW1ta2pqaWpqbXOPxuHs
-9fv+////////eHZ0d3d1eHh1dHV6e3t7eHt+e3l6e3p5fHx7dnh4eXtydHV2c3d4
-dnh7fHp7fIGBe3V4d3V4end3d317enl6enp4eXh6e3p6e35+enx7enl4d3d6ent9
-gH18e3x9fHp8fHp8eXd2eHd4d4SCd3h8enh4e3p5enx7eHZ0eXR2d3Z2eXh4dXNv
-cXNxc29wcXFyc3RzdW5wdXJvb3J2dnV3dXZ2dXV3d3N0dXR0dHV3eHd3cm9ub29w
-cW5vdHJxcnNwcHJwb3FwcHBta25xcHFua29xcXBxbm1yb2trbXBvcG9ucW9tbG9w
-cW9zcXBvb25wc3JwcG1wcG1wbnBwcnFvbm1tbmxpbG1tbm5vbWlucGxpam5vbW9v
-bGxtbWxsa2hnZ2ZpZmhraGVlZWVnZWRmZWdmamZoaGNiY2JmZWRhYWRiZGRlZGNj
-Y2ZnZmVkZGJiZGJjYGFnZWVkYGBkZmZmYmFiZWRraGpqZ2NjY2NiY2VlZ2doZWhl
-ZWVmZ2djYmNiZGNlaGdrZ2dkZGVjYV9gX11dXFxZWFhWVFRUVFNRU1NSUlJQUFFS
-U1JRT1VTVVdVVVJSU1ZaWFdZWlpWWVdbWFtcWV1dXl5gYV9eXV1hZGdnaGZjZWVj
-X2BhYWNhYmNiYWJjZ2NgZGRmZmZlZWRhXmFiXWJhYV9fYF5fX2RgX2JjYF5gYmRh
-X19iZWViY2NmZWJjY2RpaGhmYl9hYWBlZWdlY19gZGZjYWBcXV5iY15dYV9eYF1d
-XF1dW15cXV9fXV5cXV1fX19fXWJlY15cYGJjX19fX2BfY2RiXF9hYWJhXl9eXmJk
-YF1fYWNiYF9hZmNfYmZmZ2BgY2NiZmhmZWJjaGlkaGdjY2VpaGlkZGRlY2RmZ2Zl
-YmJhX19fYWFkYWRmYmRhYmFiYWFgYWFgYWJkYGBhY2FfXF1cXmBhX2NiYGBfX19c
-XmFcXF9gYWNiXFtcYF9gXl5eZGJfYV5eXmdta2RdXF9eXl9gX1tZXV9fXl5bW1pa
-W11aWldXWFhXWFlaWVZVVFVYWVtZWVpaWVhaW15eWFZXWFpaW1xYWFdZWFhaWV9h
-XVhYWlhXWVpYWF5aW11dWlxcXVxaXVpYW15eXl9dXVxcW1pcWllbXV1fWlxaXFxf
-X15aW19fXltcX1taXF1dX15cXV5aXV5fX1tbXF1dXF1bWFlZXF5eXlxcXV1aW15d
-YF9eXV5fX11XW19dXVxbXF5eYGBhY19fXl5dXGFdWlpcXlpbXl5eXV5cW1pdXlxe
-XlxcXlxeYF9gX19hYF5bWllYWVxdX19fXV1dXl5cXl9iYV5fYmNhYWJjZGRnZWNd
-X2NhYmFhY2JhYmxmZmZkY2ZkZGRiZmVtYl9eZmNjYmNlZmRkZmVlZGJgYmRkZWZk
-ZGRmZmNjZF9hY2ZlZWNiYWVjY2JiZGVnZWVoaWdkZmhnaWdjZ2ppaGZmZmVlaGZl
-Z2poaGxpaWdlZGZnam9xcW9ub2hsamlqbW1vb25ua2lraWlra25tbW1tb21tb3Fv
-bWpqZmdpZ2dpbG9sb2tnZmZqb4/I3+v0+v7///////96eX15eHZ1d3Z2eX9/fHt5
-eXt3d3l6dnh3eHh3ent5enh2eHh5eHd2d3l5fHx6en13d3R1enx9e3l9fH2Dg3t8
-e358e3t9fHh4fn16e3l4eXl5enp6enx8fn2Bfnx6eXl5e3x6e3h5fHl7gnp7fX98
-fn18fHd4e3t7e3t7eHZ1dHV1dHNzd3VzdHR3eXdycXJyeHVvbnNydXVydHZ1d3Z0
-dHV2d3R1cnBzdHN0dXd5eHVwcnZ4dnR0c3JzcnJzcnBxc3F0dXRxcG9ua2xtcG9u
-cXJycXVwbm1ta25ycm9xbnBtbm9xcXFwcXNwcnFwbmxucG1ucXFxb2toa25vb29t
-bW9ubW1xcnRxb25sbmtqamhoaWtsa25vbm1sa2xubGlscGxrZ2dqZ2doZ2dlZWdn
-bGhnZGNlZGJiZWVlZGVjYmVlY2RmZGFgYmVnZWJiYF9hZWJhYGRkZGFiZWVkY2Rk
-Y2JkZGNnamhnZmZkZGNkZmZkZGNmZGNjZGRnZ2llZmdmZ2lmaGtoZ2dlY2NhYl9c
-XFtdXFxcV1dSUlVSVVNRUlNUU1JTUlFOTk9SU1RTUlZWVFZWWlpaWFdWW11YWFxZ
-Xl9gXVtfXl9eXFxcX2FiY2RkZmZlZWZkY2JiYWNmYmFeYmNoaGdkY2RnZmNjZWFe
-YWRiXWBgYWNhYWFgYmNiY2RjYWFiZWNiYV9gZWViZWVhZmNkZWlnZWJlaGRhZWVn
-Z2VhYmViYWJiX15gX2NiYGFcXFtfYmBdXl5dXVxiX15iYWBfYGFgXmBgX15fX2Ng
-X19gYWJfXWJgX2FiX1xcX2FfYWJiX2BkZGFhYWNiYmRgX2BjZGRjZGVlZWVjZWVl
-ZWFkZGVmaWVlZGZmZmVkZmRjZWhmZ2ZiYWBiY2JgYWFiYF9gYGFjY2FiYmBiZWJh
-Y2JhYGJiX2JgYF5eX2JiZGRhYWJgXltfXmVlY2NhZGNgXl1fYGNgYWBfYmBgYF1c
-WFdaWl1dWVxaW15fX1xbWV1ZWFdcWVpbWlhZWFlbXV1aWlpaXFlcXFhYV1dXV1dY
-WFpYUldVVFlaWFdYWVdXWldYWVpaZGleWFZYWllZXVtdXVpXW1lbXV5hXltZW15e
-X11dX1xYW1pbXF1aWFlaWVxeX15bYV1bWlpZWlpbXl1cW19fX1xbW1xeXl9dXF5b
-W1tdXl5dXFlZWFpaW11dYmFbWl1aW1xeYl1eX19dYF1bXF5fW1tfYGBhYmBgXl9e
-XV1fXFpZWFlaXFxcX2FeXFtbXFxaWl1eX19kZ2NhYWFfXV5fX1xdXFlZXVpbXFxb
-Wl1dXV5fXlxcXmBhYGBhYmFlYmNhX19jZGViYmNhXl9gYGFlY2JiYWJmZWBiY2Ni
-ZGlkYmNjYmRjZmZkZWJhY2RiYmBgYGJmaGhoZmRhZGJjZGJiYmRkY2RkYmJiZmVi
-Y2ZpaGlqbGhnZ2llZWdnZmdnZ2dlZ2dnaWptcGtnY2NkZmdpcm5sbW5ua2pqaW9v
-b29va25sbGptbGltcHBwc3BwbWtscG5vbWtnZmNlZWVnamxraGhsaGlzk8ff6vP4
-/f///////319eXh9eHV1eYB8fH1+eXp4dXZ0dnR6d3l3d3t4enp4dXV2dnZ4dnl7
-en16eHl/f314fXt9fHl8fHh3eX2AgH6Afnx+f354ent6e318e3h6ent+fnx+f4J9
-fnx+fH17eHp6eXt7e3t6enp8fHp9fX19fXp5d3R2e359enp5eXh4eXl5e3R3dnd0
-dHRzdXV1cXJxcXFzb3BxcnZ2cnN0dnZ2dXR1c3VzcnJwcHN2dnZ2dnZ4d3V0c3Jy
-cXN0dXZzcnN0dW9vcW9tcHFwcnBydXJvcXJ2d3RubW1rbW9ubW9xb29wcG9wbnBx
-cXNxcG9vbnBvcHBtcXBvcG9xcW9ub25xc3FxcXBycm9wbW5tbGpobG5vamttbGxu
-bW5ta3ptbGtrbGxoZ2dnZmZoZmFkYmViYmRlZmRhY2NkY2RjY2JfYWJmYWBhYGFi
-YGVkY2NiYWBhY2RhYGJiY2ZmZGNiYWVjZWVkZmVnamhnZGVlY2ZnZWhpaGVmY2Nj
-YWVraWhqaGhpbGpoZmdlZmJiYF9eX15bWFtbWldVV11YVFNTVVVSVldVUVJTUVBT
-UVRVVFVWVlNUWFhXWFpZWFZaWVlaWlxdYWNgXV5eXmBgYWFjY2BhZWRjZGVjY2Ri
-ZWFhY2RkZGVkZGNmaGdmZGJkYmJiYWRjYWNfXF1eX2BlY2ZmZGFgYWJjYWFhYWVk
-YWJjZWNgX2JjY2FiYWRiY2JmZ2hlY2RlZGNgYGFjYGFeYWFfXVxdXVxeY15gYGBf
-X2NkY2BeYGFhYF9eYWFfXl9bWlpgYWFjX15dX2RhYl9eXl9cWlpeX2VkYV9hY2Jg
-YWBfX2JkYWVmYmNjY2JfYWJgYmVkY2RmZGNkZmRjZWZmZmRoZmZnZmViYmRmaGNm
-YmFkY2JiXWBiZWRjYWRmZF9hYmRiYWFgZWRjYF9gYGFkZGBgYGFiX2JeXmBgYWJk
-Yl1dYWJgX2BiXltcXmFgYmJiYGBcWVxcWlVZWVxZWFxeXV1dX15dWlhZX19fXllZ
-XFlWV1ZXV1daWldaXF9bWlhaXFlWWFpXVFVXVVZZW1taVVZUVVhXVVlaV1dVWFZV
-VVhcW1pZWltYWFpZXFxbWVpbW1tcXlxcXV1fYF5fX15gXVxfXV1cX2BcW15YWl5f
-XVlZV1pcWlxfX19cXFpYXF5eX15eXV1dXV9dXV5fXFtbXV9cXF1cXltZW1taX1xg
-Y2JgYF9jYmFiYmBhX19eZGFdXl5kYV1eXFpcWVtZV1hbXGFgYF9cXV9cXV1dXl1c
-YGJiX2FlY2FkX19gYmBgX1xdW11cXVxaXFtfYF1dYl5dX11eXl9lZmlmYmdfaGdl
-YWBgYWBfX19gYmFjZ2doZmVnZGNjXWBkZGJjYmRlZWhlY2NjZGRiZGNjZWFhY2Fk
-ZGlnZWZlZGVlZmBhZ2RoaGJjYmRnZmVlZ2VmZmVmZmhpaWxlZmhkZmVlZGdoZmdn
-Z2tlZ2lmaGVmaGlnaGxrbW1ramxsbG9ra2xsbWxtbGxsbWxvcHRvbnFtbGxta2lo
-aWlnZmRoamhqaGprampqZXGLxNzp8vj8////////fHh4eXx7eXp8dnZ5d315d3h1
-d314d3p3eHt3eHl5eXt3dnV2dHZ4dnl4d3R1d3p/gIF7gXt8fXt7eXl6d3d4eXl/
-gXx9e3l4enp+fXx9gH15fHp6fHt6fH18e3p8e3p5enl7e3x8fX59e3t7fXt9foKB
-fHt6enl9gH98eXt6eHp4dXV2eHl4dnZ0cnJxc3Z0cnBzeXl0b21zc3ZycHB1eHd5
-eHZ0c3l2c3J0dXR0cnR0dXNycXV0c3JydHV1dXZ3dnh2em9wbnJvbnBxcXJycXFx
-cHNtam1qbW9tam9zcXNwcXJwcG9ub3Bvb21ucHJsbnBycXBubmxrbW5wbnBxcnNy
-bm1yb21wb25scG9vbG5ucW9tbGlsbW5sa2pqbmNja2tqZ2hmaGljZGVkY2FgYGJj
-YmFkZWFjYmViYmJjZF5eX2JhYV9kZmViYmNiYWNkYGJhZGVlaWRnamhoZWRmYWZm
-ZWVpa2djZ2doZGFjY2ZnZ2doZ2VmYmJiZGRkZ2doaWpsamdmZ2JjZF9fYWBdW1lZ
-WVlaV1dWVlNRVFJSUk9RU1FPUk9QVFNUUlRVVFVTUlJUVlhWWFtcXFlbXFtcYGFh
-YWJeX2JeXGJnZGFhYGBiZ2NmaGhmY2VlY2VjYWNjYV5dXWBmaGZmZ2JhYWBgY2Vm
-YmVjYV1cYWFjZ2RiYmFiZGNjYWRkZWhjZGRkZ2ZlZGVhY2RjYWJmZGNkY2NmZWVl
-ZGJhYWJiYWZkYmFfX19gYF5fYF5eXl9cXF1hXmRkYV9eXFtdYWBfYGBeYmJhY2Jg
-XV1cXmFcXVteXl9kX15iZGBgYWJhY2NgX15iYWFmZmdmYmRhYWVpZWVnZGBjZGdk
-ZWZjZGNmZ2hqaGZkaGdjZWVjZmRiY2dpZGRmZmVjYmFkZmVkX2FjY2JjY2FhYmJj
-Y2VjYl5hY2ZoZmNhX19dYmJiYmVfX15hZGNiYWFhYV5fX11hYGFeYGBfX19fXV1e
-XltbXmFdXGFeXl1cXFxcW1xeXl5dWldZW1taWlZYWF1cXVtbV1dWWVpcXFxaWVhY
-WVlbXVtaWllXWVxXVlZXV1hWV1ZWVFRXV1hcW1tYWlpZXF1dWllZWltcWlpaW1pc
-XF1dX19fXV1cW1xdXFlfW1xdXmBcXl9bXF1cXV9eXVxcWlxbWlpcXF5bXVtcYF9f
-X19fXV5eXF1cXl5bW15dYV1eXVxaXlpfX11fYGNgYF5fYWNgYl9eX15eXVxeX1xe
-XmBfXV5bX1xcXVpcX2BgYl5eXWBcXWJiYWBeXmFhYV9fXl9gYV9eXFxfYWJiYV9c
-W1xcXl1cXFpeXmBfYWNhYmFjY2diZmRjY2RjYWFiYWNlZGdmZ2hlZGhqaGZlYGFk
-Y2RiYGJkZWJhYWNmY2VmZWVmZGJkY2djY2NkZGRnY2VgZWRjZGRhY2RjZ2ZkZmRm
-ZGhnaGZpaGdlZmVkYmdmZmlpbGxoaWVmZmdnaWlqamdnam5tampqb3BvamltbXFv
-bWxqbGprbm9tam1vcnNxbm5ta2psa2hnbG9taWpqamdpaWhtamZmbIbA3Ojz9/z/
-//////95eHh8e3x8eHh1d3p5dnZ2d3p6fnx6enx8fn98ent7eHh1dXh1d3l4eXV0
-dXR0dXh6fn56fH9/eHh6enl3ent4eXZ7fXl8foCDfnuAgH59fH19fXp5dXd9foF/
-fHp6fHp7fX57e3t8fXt6enp6e3h7fX59fnx9fHt5eHl4enp4d3dycXJ0cnBwcnV3
-dnZ2cXVycnN3dXNxcXF0c3Z1dXN2ent4eXh3eHd0dHVycnNzdnJ1d3Z3eXRxcnJv
-cnN1dHV3dXZ1dHFwcXFwcXBwcHJycnFwbWxubG9tbGxvcHJ0cW9ucXJycXNzdXNx
-b3N0cW9vcW9xb29ubWxwcnJwcXF2cnJ0c3JwcXBxbW1uam1wcnFvbW9ub21raWpr
-a2lpaG5qaWVna21qa2hlYmNjZmRiYWFgYWRlZmZjYWJiZGNhYl5eX2FeYmNkZWNi
-XV5jYmFiYWJkZGZlZGhmaGdlZmVpZmVmZWZoaGRkZWVmaGhlZWlqaWdmZGloZWdn
-ZGZjY2ZoaWlmaGdmZGNiYWFhYV5bXF1aWllZWVZUVVJUUVNSUVJPUVZUVVJTU1RV
-WFpVU1BPUVNWVFVVVlhbWldaW1xcX19eX19jYmBhY2hlYWBkZGJiZWZnZWhkZGVo
-aWNiZGRgYGNjZ2VmZGdkY2VjYGFfZGZiZWFhX19gY2FdYWFhZWRkY2FcXl9lZ2Zo
-aGtoaWVkY2ViZWZlZGRjZWdlZmRlaGdlZWNlYWdlYmBiYF9fYWZlX1tdXlxcXVxf
-XV5hYGRkYmFcXV9fYF5gYGJiZGVhY19gXVtaXWJeXF5eYGNfXl5eYmNgX19iYGBg
-YGRoY2JhYWZlZGRlZWNjZGJhYWNkZmdpZmJhZWVlZGVmY2JjZWdoZWdlYGJkZGlq
-Z2NiY2RkZGNkZGZpZWFlZGFfY2FfYmFhYWJkYWFmZ2ZqaGNiYF9gY2JjYmRjYF9g
-YWBhX19eXlpbXFtdYWJbXF1dX2BdXl9eXVtbXF9gXl9dXVxcXF1cXVtaWFlbXVxb
-XF5gXFpbWlpbXFpdXVtaWllYWVpZW1xcXVxfXVxcWVleX1tUVlddXFlYWVdYWVZX
-VldaWllWWFhXWV1cXFxZXF1bW11bW11eXl1eXl1eXV5dW11dXVtZW1xcXVxeXmRg
-YmBeYWBeXl9fXl5ZV1xeX2BdYF9gYmxkYWJeXF5eXVxcXV1cW15eXmBcXF5eX15g
-YF5eX19fXmBeX2JjYmFfX15gXlteXl9dYGFdXmFgX15cXFtZXl5hZGBgYmVjZGNf
-X15fYWJeXlxfYGBhYGJeXmBcXWFgX2BeXF1eYGBfX1xhY2JjX2FgYWBjY2hjYGRl
-ZWZjZmVlZmdlZGJkYmNjZWRnZmhiY2RhYWJkZGNjYWFjYWRmY2NnZmVkZWNiYmJi
-ZGNjY2ZkYmNjYmRqZWJiXmdoZWNkY2VnamlnZ2VkZWhlZGFnZ2ZmaWloZmZoZ2Zq
-amtoa2hrbWprcGpsbWpraWxvamxrb2xrbW5rbGtrbG1ra2trcG9tbG5wa21qamtq
-bGlubmppaWhqaGloZ2Vqf7zc6fH3+/7//////3h8fXx7fH16eHd1dXd4d3p5en56
-d3x5fXp+fnx5enp7enp5eHd5eHd3eHV1eXp5en97fXl7fn6Afn14eXx7fX19fnp9
-fX+Df3x+fXd5e3p5d3l8fX19eXx8fHx4eHd6fX18f4B7enx5fH16dXl9gYF9fXt8
-enp6eXh6eHV0eXd1dHRzeHZxcnJwcHNzdHNxcXN0cnV0cnN2cXN0dnh4dnV1eHl5
-eXp3d3Z4eHVzc3FzdHV3dHZ4dnVxcXFyd3V1dHR3dnZ0cHByb3FvcW9vbW9vbm1t
-bWxvcnFvbm9ycW9xcXRyb21yc3JwcXJzcnJwcG9wcG5tbW5tcXR1cnJxb29vcnNz
-c3JxbW5vb29ubm9wb25vcG5sbG5ra2xsbWtra2lqaWlsbWtnZ2NkZGVmZWNfYWJg
-X2JiYWJkZGVlZWNkZGFfYGJmZmVkYmVkYWFkYmBiY2JkYV9hZGhoaGhmamtnZmZm
-aGpnZWRhY2VkZm1tZ2dpaWdnaGlmZ2llYmVmZWVnZ2RkZmZmZWNhYl9dXF1fXVpb
-W1pYWFRUVlZWVVNTUU9QU1NZUVJVUlNSUlJSUVVTVFVWVlRWWVZWV1ZXWl1eYmBg
-X1xfYWNkZGZjZWNhYmJkZmFjZWJjZWhnaGZlZ2NoY2NfX2JfX2FjY2BjZ2RlYWFh
-X2RkYF9gX19fYmBkY2JhX11aYmNhY2ZmZGdlYmZmZGNjZ2hpaGZlY2VlYmRgXWFi
-YWJiZWdkZGNkYmJlYmNkX19fXV1cX19dWl1hYF5eXWJfX1xdYmBcX1pdYV5cXWBi
-Y2ViY2FjX2RiYV9eXGRfYWJiYV1eX2RnZGVlZGNkZWVkZWdoZGRlYmFjYmJiY2Vo
-ZWNlZWRkZmRkZWJhZGZlZmJjY2JkY2VlZmZkZ2JjZGNmYWNhYWFlZmNfYWJgYF5d
-X19gY2RjZGNhYWZiZWNgYmFiYmBgYV9eX2BfYF5aXlpbXl5cXl9cXl5gYF9eX19f
-YltdXl9fYV9eXF1cW1pdW1tbWlxcWltcW15dXFxbWVpYWFpaW1tbW1taV1tZW1xc
-XF5eXlxcVlhWVlZVV1hZXFdXV1hZWVhcW1paWVdYWlxcXF1dXFxdXFxbXl1dXFpe
-XF9dXV5eXmBdXF9dX1xdW1xcW15gXl1dX11dYV9eXl5dXV1aYF9hYF9hYF5gYF9e
-XmFgYmJdW11fX1xcXmFfX15cWlxcXV1gXV5fXF1eXFpcXmBfXVxfYV9eXl5gXWNg
-YF9fX15fXV5fXmBdX11dXmNgYGJhYWNhX1teYGFfYV5cXWFhX15dYWJiX15eX15d
-X2FhYF1cXmFfYmNhYF5iZGRiY2RkZWVnZWVjY2NjZmRkZWRmZmdkaGZmZWhmZWBh
-YmNiYmJmY2JkZWRkZ2VgZGVlZGNfYWNdYWZnZmNkZWRlZmJkaGFiZ2dnZ2NiZGZo
-aGhmY2VlZWZkZGNkZmVkZ2ZnaGdlZmZmZmdoa2tnZmhtaWlsa2lrbW1vbnBqa2pr
-a3JubWtrb2xrbG5sb25tbnBtbmpsbm5vb2xtbG1ramppZ2VnaGh/v9zo8fX7/v//
-////en6Ae3l5eXh1dHZ4d3l7fHh5fHt8f3x7eXh4d3l5d3p5eXl5enV1eHh9fHl6
-eXt6enx8fHh9fH58fn59enl7e3x7fH2AfoB+ent7fXt5eHh6en2Agnx7eX54eHl6
-e31/fHx4eXx+e3l2e3t9e317fHx7gHt7eXt4eHl4e3l4dnZzdHR3dHNxcnNwcXJ0
-dHRycHFzdXV8eHNzbnF1cnNydHJ1dnd6eXZ2dnR2dXd1cnN0d3Z2cnZ0d3dzc3Fw
-cXJycXBzcXBtbWpubmxvdHFxcG9vcnBtb3JzcXJydHJwb3NxcnBvbnBxb21ucXFu
-cnBxcXByb3BzcnNyc3Nzb25vcHB3cG5xb25sbGxucG9ucG5ubm1sa21ybW1pbG1s
-aWxsaGlqa2loZmJnZ2hmZ2ZoYWJjZGdjYmJjZGRhYWNjYmRjY2JjZWhlZGNmZGNi
-ZGRhZGJhYmVjZ2NlZWVnaGdlZmhnZWZnZGdnZ2VlZWVmZWZmZ2ZnamhnZ2toZGZm
-ZWdqbW1qZWZmZ2ZkZWNiYF5gX15cW1pZV1pZU1ZWUlJSUFdWU1JSVVRSUFJSUFFP
-UE9UVlRTVFNUWVhWV1dXV1tbXF9fX15dXFtgYV9gY2ZiYWJjYmRnZGJhZGdmZmdq
-Z2NlbGdlZWdkZWpqaGZiYmZlZGFjZmNlZWNfXmBgXl1fYGRlZV5fX15dX2BhY2Fi
-YmViZmRkZ2VlZWdnZWRmZmNjYmNmYmRkYGBgYWRgYmFlZGRiY2ZkYGFjYWNjYWFe
-WltfXl1fXV1fXl9hYl5cXlxeYWBdXWFiZGRgYF9eX2BhYmNhXmBiY2JfYWVjZWJj
-ZWRkYWBjZmdnZ2RjY2JlZmJlY2JiZmdnZWRlZGVnZmlnYWRhYmNiYGJhY2JiYWNm
-YmNjZGVjYmBhYWNhYmNkYV5gYGJmZGJkY2RmZWViYWJjY2JjZGVhYGFeXmNlZGJg
-YGRfX15dYF5gYF9eYWBfYF1dX2JfYF9hY2BdXFxeYF9fXlxdXV1aXF1cW1tYWV5g
-X1tZWllcXFtaWVpbWlZaWVlZWlldWlpbW11cXVpYVlhZXFxaWFlaWVhXWlRWWFtd
-XVtZWllYWldZWlpbX1taWldaXVxcWllbXFtdXV5dXV1fXltdYF5fXl5gXVxcXV1b
-Xl5eYGBdW1tcXF1dXl5fYV9fXl9gX15dX2JkYmRgXVtdXlxdXl1bWlpaWVlfYF5e
-YWVhX19jYV9fXmBhYF5fXl1bXVxbXVxeYmBdX19eYGFgXV5eYF5eXmBhX2FgXl5g
-YF5eX19eXmFeXV9gXl1gYmFjYGFfXl1eX2BfXl5gXl5eYGBeYGJiYWNiYmRiZWNl
-ZWNkY2NiX2VkZWJlY2RkZmhmZWZmZmNiYl9hZGJiYWRlZ2hpZWVmZmViZWZlZGdk
-ZmlnZGZmZmRiY2FkZ2RjYWNmZWNmZWZoZ2ZnZ2VkZGRkY2RkZ2ZkYmFjZWdrZ2dm
-aGtoZ2dnZmdmam1qaGlqbW5sbm9tamxtcG9ucW9ubGxubG1sbXBvb29ucHBzbmpv
-bGlpb21qaWlnZ2hpcH3A2+bw9/v///////92fHh4fXl4eHh3dXR0dXd6e3x5fHt/
-fX5/fHh4dnp6fXt2dXV4eXt5eXl6enh8e3h5ent1eHl5fX58fX99fX17e358e3x8
-f319eHt8fXx6fH19f31/f3x6eXl5eXx9fXh6fH1/fHp6fH53eXl+goF7fHp5gn58
-e3l2d3l6e3d3dnN4dXR1dHR1cXN1dXFwc3JycnNzdXl5e3d4c3F0cnFydXV3eXh2
-dnV3dXJ1d3h2dnRzc3JzdXR3eHd1c3JvcHJzdHRvb29ub29xdXRzc3Bwb3Bvb3Jy
-b29wb3BycHJwbm5ubW5xc3JvbnBub21wcXN0dHJycXBxcG5xcXFub2xtbm1tb25v
-b3FycHBxcHBucG5tbnBubm5tampsampsa2prbGppaWdmZWRkYWhpaGdmY2NmZWRi
-ZmpnZWFjYWJjYGRiaGVkYmJkYGNkZmVjZWVmZWNjZWdpZGNgY2NkZmhnZmRkZGVj
-ZmhnZmZoY2ZlYmNlZGZna2toamxnamhna2lqaWdnZ2ZmZWVlY2BhYV5hXVpaWlpY
-WFhUVFVRUVJSUlRVVVZQU1BRUVBRUFFRUlNVWFZYV1hZVlpZWVxfXmBhYF1hXltd
-XGBhYmFhYmJjYmRiYmFkYmRmZWZnZ2NiamdnaGlrZmhnZmdlY2NhYmFhYWNjZ2hn
-aGZmZWJgY2BeX2JhYWNnZWNiY19iY2RjZmRnZ2loZmZpZWZnYl9jY2RjZWVkYWNl
-Y2ZjYF9gY2BeYGBfYGBiZGJhX2JiYmFjX11cYF5eW19gYGBfYGBfYGFfYGJgXV1e
-X2BfYWJhYGRlZGNhYmRjX19jYGJhYmRlZGVjZmdmZ2RlZGJjY2RnZ2VlY2VoZ2Vm
-ZWZmZ2ZnZmRjY2JjYmRkZGNjY2FhYmFhZGFhYGVjYGBjZmNhYWFkZGBjZWJiY2Nj
-ZF5iZWRkYmRkY2ZjY2JfXV1gXl5iZWVkYWBhYmJiYF9fYF5eYF1fX19eXltfXl5e
-XFtbXV1bXFxdWldaWl1fX11bWltZW1tcWVhYWVdYWlxZVldVWVtbXFxaYF1dWFpa
-WVhYWl5eV1ldXVlVWl1bWFpeWltYWldXWVtZWFpYWVlaXWBeXVlaXVxbYF9dXl1b
-XF1cXFteXFpeXl9fXltbXF5bXF9fXGBhXFpeYVlWWl1fXl1bXF5eYGBfXFxfXl5f
-YGFiYWJdWl5eXl5fX1tdXV9fXV5fYGFgYF9fYF9lYWBgYmFfYF9dXVxcXF1bXl1j
-X15fYmRgX2BeYmBhYF5fYGBfX2FhYVxcX19dXF9eYWRfW1xeYGJeXFxeW1xcYl5d
-YGVkYV9eXl5eX11dXmJhYGBfYGBiZmdnZGNmZF9jZmdnZ2VoZWVlY2dlZWZmYWJl
-ZmRlYWBkY2FmY2RjZGVqZ2dkaGRnaGVkYmVpZ2ZnZWRlZGVkZWViZGVnaGhlYmJl
-ZWllZmhjZGhoZGNjZGNlY2JjZmZoaWpsamllZ2loZ2VnaWtoZ2lrbG5sa2prampr
-bW9wcW5vbnBsa25xbm9ybW1ub25uanBwamhqbWxsbmxmampthsDa5e71+v7/////
-/3d3enx6enp9e3p4d3d2dXR1d3d6fHt8e3p7eXp3eXd5enh2d3d6gH15e3h5d3l7
-eHl7e358eHh4fICAfoB/fH56fYB+f317fX1/fXt5d3p9eHp8fHx8e3l7fX59fHuB
-fnt7fYCAfHx9fXp8e3x8fHp/enp7fH18fHx9e3t4dXV0dHh2cnF0dnJvcXFzc3Fw
-cXFxcXJzdnd6eXRzcnV6dXF0eXh2dXd5eXh3dnd4dXR1dXJycXJ4dHZ5enVzdHVy
-cHByc3Nwb29wcXBycnFycnJ0cW9ucXFwbm1vc3V0cnZ0cm5ucHR1dHFxcXFtb29y
-c3NwbnBvcXFycW1ucHBvbm5wcG9tcHBvb29ubnBubm5vbmttbm9ua2xta2tsbm1s
-bm9tampmZ2ZlZWRpZ2dnZmdmZGNkZWVlZWVmZ2NhYWFkZGRjZmZjYGBgY2ZnZ2Zo
-ZGJkZGRjZWdkYGBiY2VkY2RpaGVjZWppZmNiZmhnZ2hnaGdoZmlqbGxra2hramhq
-aWtnZ2pnZGRoa2RgYF1fYGBeWVdXW11bWVlXVFNSU05PUlRUU1FSUlFRUFBUU1VW
-VlZYVVZZWlhZVltYWVxdXl1gXl1bXmFiYWBfXl5hYWRjYGBmYmNlZ2VmZGJiY2Nl
-aGZqamVkZWZkZWVjYmNkZ2VhX19kZGNiZGNgYmVgX1tdXGFkY2VmYWFkZWJhYmNl
-Z2dpZWdnZGZjZmRlZWRlZGNiZGRhYGJhYmVjZGFfYGRkYGBgZGJeXmFfXmFjYGBd
-XV5cYF9jYGBfWlxjYWBeX2FhYmJfYV9hX15eX15hYWBiY2VlamNjYmBhZmhnZGNj
-ZWlnaWlnZ2NlaGdkZWdkZGVlZGRnZ2dkZGNoamZpZmVoZmVkZmRkZmVkYmJiY2Zj
-YmNiY19iZGZkZGdlZmhoY2FiZWVkZGVoaGRjYmBkYV9iZGVkYmNiYmBgYWJiX2Bj
-YmFhYF9eYF9hYmBfXF5eYGBgX1xeYGFhXV9cX11dXVxdYFtfX1tYW11cW1xcXlpZ
-WllZVlZZW15dWFdXWFlYWV1cXl1XVlZWWltaXFxdXlpaW1lXWV9fW1tbXVxcWllX
-WFhYWVlYW1pcW1lbW1xgYGJdXmBfXl5cXFtbXFxhXV5eXVxcXFpbW11dX1xbXFxd
-XmFgXV9hXl9fXGJgXl9eXVtaXWBfYWJjYmJiX1xdYGBdXFteX11cX11eX15dW11d
-YF5fYWJiYGBgX2BgXV9gYF5eXVxcXl9eYWFfXmFdXV5cXFxeYWFiXWBfXl9fX19e
-XlxcXmNkYl9dXF5hYl5cXl9cXV5fX11cYGFeYGBjYF1cXl9gY2JgX2BfX2FnZmRm
-ZmZoZGNlZ2ZlZWhkZWZoaGhmZmppamhmZGFkY2NkZmJiYWNjZGZmZ2VmZWdlZWZk
-YmdpZ2dmZ2dlZGRiYmJjZGRnZ2ZkYWJjZGVkaGdnaGZoamlmaWdmZ2NlaGlnZ2lo
-amloZ2hpZ2dnaWprbWhpa2hrbW1ya2xubm5vb3FsbG1tbGxvb21ra21saWtpaW1r
-bmxvbW5raGtoaGyPw9nl7vX6/f//////c3N7fH18end6fHp7eXdzdXh2d3t8e3x8
-fH14eHt9fn98e3p6g4B5ent9e3t5dXx4dnt7eXl4enp2eXx/f3x8e39+fn9+e35+
-fX59fH17fXmAf3x5fH1+enh4fH6CgHx9fnx9foB+fX1+f319f3x7eHd5eHV2d3p7
-f317enp5eHd4dnR0dHRzdHd1dnVzcXB0dnVyd3Rzcnd0dnZ0dnl0eHd3dHZ1dnh6
-d3d5eXh4dXV1c3F0d3V0dXZ3d3J0cnJycW5xc3Bua29vcG9vcG9wcnJ1dHRwcG9w
-cHN1c3JwcHFzbnJwcXJzc3JzcHFzd3NzcXBtb3JxcHNzcWxwc29tcm9ubm1ubm1u
-cG9tbm1vcG5ua2xtbW1ra2lra21xcXFtb21ra21raWhoZmhoaWVmY2RnaGNkYWBi
-ZGVlZWNjY2NmY2NjY2JlY2NjYmJlZmZoaGZoZWJlZGFiY2JhZWZlaGtpbGZlY2Nk
-Z2RkZWRjZGRmZWltamhrbGttbmtrampqaGdnZ2RkZGVjYWBhYGFgXV9fXVhWWVdZ
-W1ZWVFFSVFJQUVBSUFBSVFJQU1NWU1dWVlhZV1dZWFdYVVdbW1haXFxbXV1iY2Rk
-Y2JjYmBfYV9gZGZlZGRlY2JkY2VmZWJkZmNlZWRjY2VmZWRlY2VjY2NiYmBjYGFl
-Y19eXmFgZGJkYWBqamViX2BjZmRjZmVjY2FlZWRjYWBjZGVmaGZkZGVmZ2dkY2Ng
-YmNjYWBhY2FhYmJfYV9cXmJiYWNhYV9cYF5aW19fX2JhYWNhYmFeXmBhXl9eYGFe
-YWVeYGFkY2JlYmBiZGdkZGZjZmhoYWRlaGxpZ2RkZWdmaGdjYmJjZmhmZGVmaGNm
-ZWdoaGRoZWVqaGVjY2VlY2RnZWFiZGZnYmFlZ2VkZGZnaGFoZ2ZkZGRlYmJkZGRl
-ZmVnZGJfXV9kZmVhZWJiY2BgX2JmYWNjY2BeYGJhYWFgYF5gX11eXl9dXF5gYGJg
-XV5bYWFhX2BfYGBdWllYWlxbXVtaWV1dX11cX1lYXFldW1lZV1tdXFpbWFxZWFtb
-XFpZWVlbXVtcXFlZWVpaWFxcXVxcWVhbW1taXFpbX1xbW1lcWllbW11fYWFgYV1c
-W1pXWVxeYWJcWVpcW1tcXV9gXVxbW1tcXl9jXlxcW1tbXV5gYF5dXl5dX19fXlle
-X15eYWJeX2FeXF9fXV5bXl5cXV9gX2BgXl1dX2JiY2FfXF1jXV5dXV9eY19fXl5e
-Xl5dXVpbYGBeYmBeXWBfXl1bXF5dXmBjYmNjZGNhY2VhY2JhX15dXmJgXl1eW15f
-XmBfYF9gYWJgXV1fYWNkYWJiYmNiZmVjZWRlZ2VpZmZlZmRlZWRkZGVlZGJhYGJk
-Z2JhYmRjZGZhZWZlZGRjZGNkZGRhYmNkZGRgZWNoZmZoaGVkYWBkYmNfYmZpZWVl
-a2lmZmZnZ2lpZmZnaGVnamZkZmdmZ2lraGlpa2tpZmlsa21ydW5sbWlrbm5wb29w
-bWtsa25ubm1ub21tbm1sa2tsbGpnaW9vcG9saWlnamlla4zG2OXv9Pn9//////98
-fHp4enx5fXx6fH9/fXl8fXt5eXl6eXt7fHp9fHl7f316enx8fHt+e3t+fXx9fX15
-eHh4enp5eXl4fXl8fX1/fXx6fH99fYF9fX14d3x+fH18fXp8f36AgH56fX97e3x8
-fXx8e358e3x+fHt7d3l9fHh4enh6eHp/gIB9fX15dnRycnJzdXZ2d3d1dXRycnR0
-dXV1dXJycnNzcnV4dnR2dXd3d3V2eHp5eHd0cnh3enZ0c3R1dHN1c3NxcXNuam5v
-cXJwb3JycnFvbW5tdHFyc3BycW1tb29xc3NwcG1tcGxvcHBvcnJycnNzcnJxcHJv
-bm9vcXFzb3BxcG5wb3FubWtqa2xucG9vcHFxcG9ubWxrbW9ubm5samppaGxtbWpr
-bGxsam5sb2lpaGVlZmhoaWtpZ2RlZmNjY2RmZGFkY2JiZmZlY2NkY2JiZmRkZWZm
-aWhlZmJlYmNkZWNmaGlpaWpqaWVjY2VoamZkZmdkaGhmZ2dta2xqaWpqa2xpaGdo
-ZmRkZWZlY2RoaWRiYWdjYl9cWlpaWVtbV1RRUVdWU1NNUVBRTk5OT09TUlJTVFdW
-U1dYWFtWV1pbWVxaWFlaXV5gYWNgYWJiYGFiYGJgYmNlZWRlZmVlZGRjYmJhY2Vl
-ZGZlaGlkYmNmZGhlYWJfYGFiYWFjY2FkY2NgX2NkZWJnaGdlZ2ZiYmRlaGZkZWRk
-ZWRiY2FiYV9jY2ZnY2BeYmVnZGVmX2NfX2BjY2JhXmFjY19gY2BgYWFeX2JfYmBf
-X2RiYV1eYWZkX11dX2BiY2BdX15fYWZhYmJiYWRjZmNiZGNkY2RmZmJfYmJiYmJl
-ZWNlZGRjZGRkZGNjZWNkZ2hqaWhpZGNlZ2VlZmZmZWZhYmVmZGZhYmNma2tmZGNl
-YGVnZWJiYGNhZ2hnZ2VlY2JjZGJiZGVkZGRkZGNiYmFkZGNiYWNkYWNlZGNiY2Ji
-Y2JgYF9hX19fal9cX2JeX19eXWBgYl5fXFxdXWBhYl5cW1tdXVtcXFteXV1eYGBg
-YF9dWVhXWlxcWltbXV1cWlVYVlhcW1tbXVhYWl1dX19aWVlaWllcWVpaXVpZWlxa
-WVhbXl1eYVxaWlxdW11bWmFjXmBfX1xcWl9eW11fYF5bXWBdW19hXmBeXVxaW1td
-Y2VgXlxdXV5eXV5fYF9fXVtZXF1dXFtYXGBeXmBfYF5dXFxcXl1gXl1dX15gXmFg
-Y2FeXV5gY15cXV1dYmJhYGJhX15eXV5dXl5gZGJgY2NiYV5eYWFeXV9fX1xfYF5g
-Y2NhX2JhYGFeX2FjYF5eYV9gXFxfXmFgYWFiX19fYmBdXV1gYWVjYWJiYWBgY2Nj
-ZWhlX2JiZmVjYGRkYGFhY2JjZmRiYmRlZmRlZGZkZWVkZGRkY2JgY2RjYmJhZGJj
-ZWhkbWZjZmdqaGZlYWJhZGZiY2RlaGZnaWhoaGZoZ2pnZmZoZ2RnZGVkZ2dmaGpp
-aGxrbGxma2tsbW9zb21ta2hrbW9ubnBub21rbmxtbWxsa21ta2xpam1scW5pa21t
-bWpnaWxoaGxxgL/X5e70+fz+/////3l8fHp4enl6end7enx9fX1+e3t3dHx3e35+
-gH59fnt+f35+fHd6end6e3x/g39/gnx2eXx8fXx+en96eXp2d3h3e359e358fIB9
-fYOCf357en5/eXt8fn99fnp8enh8eX2AfHp5e317fHt9f31/fX9+e3yAgX19fn5+
-enl7e3l4eHZ3dnZ3dnh3c3BxcHR1cXN1cXJ0dXZ1d3NxbXV2dnNydXZ3d3Z5e3d6
-d3h0dnh4dnR1c3N2dnZxcXJydXRzcnNycW1xc3Nvb3JtcXFxcnF0dHFzcG5xcXFy
-cm9ua2pvcXJ2cW9ucnV0c3Fwb29ucG9xb3BwcHJycXBycW9vcG1ub2xoZmttbG5v
-bW1ubm5ucW9ubGtsbW1qamxtbGtsbGtta2lnaWxtbWdmZmRmaWpramhkYWZlZGJj
-ZmVjZGFkZGBiZGVjZWZmZWVnZ2ZkZGVoZmdkY2Voa2pnamZnaWdnZ2hnZ2lnZ2Rm
-ZmFgZGZnaWhqaGloZ2VnamtqaWZmZ2doZ2lnZmZnaGhnZmllZmNhX15bWlpbVFha
-VlxaVlZYU09RUE5RT05PT09QUVNTUVJTV1dUVldZWlpZW1taXV5dXl5hYl9iY2Fg
-YWBhYWNjZ2ZjZWZkZmhmZ2RjZGRkZmZmY2VmZ2ZjY2FjZWdlYmFgYWJjYWRlYWJj
-ZWJhZGdjY2VjYGBnZGVjYWRjYmFlZWRoaWdmZmRlY2ZmZ2RkZGJiYF1cX2BiZmZj
-X2NiYWBhYl9eYF9iY2FhY2VgYWFcXlxfX2JjYV9eX19eXltdX2JeXWBiYF9gZWRk
-ZmFhYV9gZGRjZmZkZWZlY2FiY2hmZmdlZ2ZmaWVkZWRiZWdkZGJnaGdoaGlnZWVj
-ZWVlZmNkZGVjaWpmZWRjY2dpaWNiYmNmY2hkY2RhYF1gZGRmaGVmYGJiZGBfY2Rm
-Y19gZ2hjZmRlaGFjY2VhYF5hYGNiX2BfYGNjYF9dYGBiX19iY2BgYF9eYF9fX1ta
-W1tcXmBdW11eXmBeW1lZYV1fYmFbW1xcWVpcW1pZXFdaWVpaXFtYWVlZW1taW1tb
-WlpcXF1cXFxcWlpbWVtgW1pZWl5fXF1ZV1ldXl1eXVdXWlpaWlxfXl9gXVtbXF5d
-XVpaWVtbXWBfXlxcWl1dXWNkYWBeXl9cYWNfYWFjXVxbXV5hYV5cW19fXl9kX19f
-X19gYGBfX19eX11aXWBgXF1eX2BfXl5fYGBcW15fYV9gYGBgYGFfYmRgX15fWlpc
-Xl1dYGNiYmFgZWVlZGJfXVtbXF5cYWJjZWNiXV5hYV9fYWBgYmFiYV9eYmJfXmFi
-YWFhYWNiY2BiX19gXWBjYl9fY2NiZWZmZWNgYWRlZGVkZWdmY2VlZ2VoZ2VjX2Rj
-ZGFiZGRjZWNkZGNkY2JhYmRiY2hpZmRlZWdnZGVnZWJhZWVmZGNkY2dlZmlmZWhm
-ZWZlZ2dlZmdpZWVmZmZmZWdmZGRmZWZoZ2NmaWdoam5ramxwa2lqaGprbWpra2xu
-cXJvb3NzcG5rbGxubWxpa21scG5sbmxramxsb2trbnN7utjk7fP4+/7/////e3l8
-enZ5fH13d3h3dXl6fXx4c3p8e3t7eX5+f318fnx+foCAf358e3l5e4N/fnp8e3t5
-fX6AfX17fH55dnl6en59fHl9fX99e3yBg4KAfn1+gYCAgX9+f4B+fXt9fXl6fHx9
-fX18fH1+fnp7foCBgYCBgH9+fn96fH18fnt+fXt3eXl6d3JzdnVyc3N0cnV3c3Fy
-dHVzc3R0dHBscHJxdXV0eHh5eXh4dnh7d3h3dXV5dnF1dXN3dXV1dnZyc3R0dG9v
-b3F0cnFwcnJxcW5wdHh4dXJycnBzcXBycW9ub29xcnFycnBvcW9wcXBycHBycG5x
-cnJvb3NzcnNvcXBvb29zc3FuamxubnBwbnBvbnNzcnFxbm1sbGppaWdpaGpta2po
-ampmZ2lra2xpaGlqaGxrZ2VnZ2hoZWVjZmhlY2RmY2JkYGNmZ2dlZWZjYmJlZGhk
-ZWVkZ2loaWlmY2RmaGltb2tpaGpra2VmZGNmZmVmaGhlZ2ZmaWdqaGdpa2lnZmdq
-Z2hpaWdoaGdlZGZlZGBdYGBdW1xaV1ZWVFRUVVVVV1NVUlNUU1JYUVBRUlRVWFVZ
-WFJUVlpcWlpaWVtcXV1dXF9fY2BfXmFhY2JkZmZmZWNlZGVlZWdkZm5paWloZmVn
-ZmNkY2ZkYmNlZGNlZGJkYGBjYmZkYmZmYmFfYWNnZ2NjZGJjY2RjYmNnYmdoZGNk
-ZmZpamlpZ2lmZWRjZGJiYmBgYV5gX2BiYWNhYWBgYWBhY2JkZV9eXl9dYmNhX2Bh
-YWFgYGBdXl9gYl5hY2FiYWdnaWZiY2JiZ2VgYF9gYWBiZGNgYmFkY2JlaGdmZGRl
-ZmlqZ2VnaWZmaGZoamlramVoZ2dlZWVlZ2ZmZ2lmZmRlaG1ramVnZGRnZF9fYGFg
-X2JmZmZjYmNhYmVkaWdmY2JhZWNjZGRjZ2ZnZGRjZWVkZGNiYWNkYWBgY2JiYWJi
-YWFiX2FgYWFjYV1cXl9eYFxeXV1gYmBfXVxdW1teX19eXlpYWFhbX2BgXFtbWlxZ
-WVlbXl1bWlpZWVtaWFdbWlhbWF1bXFtaWlpcW1xcXVlYXVpbXF9hXVtgXl5hYl5c
-XVxeXFxbXFlaW1pcXF5cXFtdXl5bXV9gX1xeYF1dXlxdXl5gXV9iXl1gYmBdXFla
-XFpaXV9eXl5dX2BfX11iYV9fXWFjXF1eYF1eXl1dXmFjZWJiX15cWlpcXmBiYWBe
-YmBfXFxfY2JhYWFgYWFjY2NhYmJfYF5dXWBcYGFgZWJfX2BfYWFgYWFjYWJlZGRi
-YV5jY2JiY2RhYF9kZ2NhYmNiXFxdX2NjX2FhYl9eX2FcXF9hZWZpaGVkZWVmYmFj
-ZWFhY2VjZGZjYmRkY2VkZGNjY2JiYmNjYWFiYmFjZWRiYmVoaGdoY2VmaWVmZWZp
-ZGNkZ2NkY2JiYmVlYmNkZmdmZWRhZWdmZmhlZWNiY2JjYmZra2doamxnZGJiZ2Vl
-cWRmaGlpbW9tbWlnaGtpZ2pocHBub3RzcHBzb29vb29ubGxqbGxsbm5sbmtsbGxp
-a2xpbGxucn+52eTv9Pj8/v////90dXl4en17eXp9f319eXt4dnh8eXp4fH17fH16
-eHl7eHd7fIGFfnt+eHt9e3p8eXt8enx6e3t8fHx9fX58en19fHx9fn59f36Bf3+C
-gH18fX2AfYCAgYB+gH5/f3+AgX5/fn16e3t5enl6fH6AfH+AgHt7enx7fHx+e3l6
-enp7enl1c3V0cHJ0cnR0c3V2dHR0c3BwcXRzeHV1dnZzdXp2enp5eHl2eXd5eXh2
-eHR0dHh3dXJycnJzdHVwcXNyc3RxcnFyc3d1c3R0cnFxb3Bxc3Nta3BwcHFwcG9w
-b3JydHVzcnJzcW5wc3FucnJubmxub25wcXNybG91c3Vwb29scHFxdHBucnFvbG1u
-cG9ub3VycG5ycm5rbWppaWhnZmppaGZpaWhnaGpsa2hoZGRlamloZmlnZmdoZmZn
-Z2lsZ2VlaGNjZGpnZWNkaGdlZGVlZGRkYWZmZmlsaWdjY2VjY2doZ2tramlmZmlq
-amdnaGhnaGhoaGhoaGhoaWhnZmdoaWdlZmhnaWdpZmVkYmBhX19bWllbWVVUVVVY
-VVNST1FRUVVST05PUlRVV1dUUlJTVVRYXFdYWVlZXFlZW1pdYVxdXl9fX2JiYWJj
-YmNlZmZlaGhhYWNkZmhoaGNmZ2hoamZkZGJlZWdjY2RjY2NjZ2JkZ2ZlZ2ZkZGZl
-Y2RgYGBkZWdlYl9fYWNjY2RiY2dnZWFkZGVnaGRkZGRjZmNlY2FjYWBiaWNiY2Ri
-YmNiYWNkZWZiYWJhYV5iY15gXmBkYmFiYWFfY15gYF1gYF1gYWBhYmJhZF5eX2Fj
-YWVhZWNiZGBgYGVlZGNjY2RkYWJgYl9gY2hqZ2lqaWZoaGZnZWZoZmNnZmdmY2Nj
-ZmdnZWVoZ2FjZmVlZWRjY2JiYGBfYGFeX2JjYmRlY2FhZGZlY2NhY2NlYmNlZGdk
-ZmVkZmVkZWJhYWNiZGVlZGNhYGJjY2RlZGJkYl5jYmJeXl5dXV9hX19fXl9hYV5d
-YF5gXmJfYF1aXl5bXFxZW1xcWldcWltZXFpcXl1cWlhaX1xYWVhbXFpaW1peXWBc
-W1lbXF1bWVxcW11ZWlxbWlhaX2JkX19cXVpcXFtbXV1eYGBfXVxgX11fX2BeXF1f
-XV5eYF1eXl5gX15dW19eXF1eXFxbWlpaWVpbXVxcX19fXVxbXV1cXFtcXGBfXFte
-Xl9gX2FgX2JkYWFhW15eX2JgX1xgYWFgY2FdXl9cXF9hX15iZmNhYWNkZWNgYWNj
-Y2FfX19jY15gX19gX2FiYWVhX19kYmJiYF5fYGBhY2FhXl1iYmJjZGNiX11dX2Jj
-YWBeXF1bXmBeX2FgYmFhY2JgZGJgYGBmYl9fYWNlYWFhYGFjZGFmZWRlY2ViYGJh
-Yl5gZGVkZWZoY2NlZmRnbGVjZmVlZGZlZGNmZmRiX2FhYmVnZGJiYWZnZWdmZ2hn
-ZGZnYmNjZmxqZmpoZmZmZmdmZWpqaWpmZ2RkZ2hqbmxqamloaGxqaGpsbW1wcnJx
-bWpubW9wcXNwcG9sbGtscG1pa2ttb2lqaWhqbm1sfLnZ4+z09/z+/////3Z2dnJ3
-eX17eHZ6eXx5dnZ5en19eXp4e3x6eXZ9eXt7ent6fYGAfX5+fXx8ent5dnh4ent9
-fHh3eHx7fnt5eHl6fHl8fX5+fXl+f3x+fn5+fH2CgIB+gYKBgH5+fn6Afn96e31+
-e3p5en5/gH18fH59e316en5+fH18e3h3eXl5d3d0dXFwdHBxcnF0dHRydHRzcnN1
-dG5ycXB1dXR1eHl7e3d3dnd3d318dnV2d3Zzc3J3d3R2c3R1cnFzc3RycXNxc3N2
-dXR2dHRyc29yc29vcXJxb3JzcnRwbm5vcnFxcHBwcXJycm9xcXBycXBwbW5tbW9x
-dHRzb3Bwbm1wb29xcXZ3dXFycHFubXFtbm1sbXRzcm5tbG1rbG5ra2lqbGhoaWlq
-aGVka2poaWhoZ2hpamhlZGVlZWZoZ2dqaGhoY2RnZGVjY2JkZWVmZmhnZmViYmNj
-aGZnaGlnZGdmZWdnaGVnamlpZ2ZoZ2hra2lqaWZmamhoaWdnZmZqamlraGpoaGhm
-amlmaGdnZGRiX1xbXmBcWFlVVlhWVldUU1JRUlNTU1RRUVNTVFRRU1JVU1NSVFVX
-WFdZV1lZWVlZW11dXF1eYF5eX2FgYWFjYGJjZGdkZGVlZWNnZ2hqamhpaGlrZmNj
-Y2RiY2ViY2JjZGJjZWdoaWVkZ2NiYGBlYWBdXWVmZ2NkZGJqY2RmZWVjZGRkZ2Ne
-Z2llZGNjZmdnZ2VlZWNhY2dlY2FkZWVlZWJgX2NnZGJgYWJiYmRjYWJjZGRiY2Jj
-ZWNgZWNhYmNiXl5gX2JgX11gX15fYGJjYGBgX2BiX2NnY2ZmYmJgYWFiZmNdYWFi
-Z2lqaWdoamxoZWNiYmVnZmVlZmRkaGlnZ2hmZmhoZWNkZmNiZGZkYWBiZGFhYV5g
-Y2FjZWVmY2JiZGFkYmRmZmNlZWdmaGdjZGpnZGRgYGRlZmZmZWdmZWRlZGdmY2Jm
-ZWNhaWBgYWBeXFlaX2JiYmFdXlxeYGBfXWBeYl9fYGBcX15eX2BfX19bXFpdXFtc
-YFxbX2VfXFpcWVtYWllZWFlbWVxbXVpcXFtZXFtZWVtaWlpXWVlaWlxcXF5gYFtb
-XVtbXVtdXl1dXlhcXl5cXV5gY11cXFpcW1tZXFxeXV1gYGFgXlxbW1xcWFxeYF9b
-WFtfX15hXVpcXVxcW1tcXWFgXmBgXmNhYWJhXl5iYWFhYF5eXF1fXFxgYF5eYmJi
-X2BeYGJjZGBgXl9eXVxcXl5iZGFiZWNhX1tdXmBgX15fYF9gXVxhY2JkYl5gYGFi
-Y2JkZGJgYGFfYWBhYWNkZmNgYV9cYGRmZGZjY2ZjYmFfYWNhYF9eYGJgY2NjZWRl
-ZmBhZWZnYWRhY2JiZGJjZGJkZWRjYGFgZGFhZGJhZGdoZGJgZWRnY2NjYGVjYWRl
-ZmRhYmNjZGVkYmJkZWZiYmRkaGhnZWdnZmVlZWdmZ2hpaGppamdlaGhpbWlraWlm
-ZmVoampqa2tpa21ubmtpaGpqbG1vcnJtcG1vcHBucm1scHFtbmxsamlqbGxsampp
-amxuaGmEvdjj7PL3+/3/////dnh3dHd0d3l7enl6e3l3dHV7enl6eXl5d3p9fXh8
-e31+end5fH59fH98e3l5fHl5eH+Df397enx9enx9fn58eHl5eXt+fHx9foB9fHp9
-gYF/fXx5e4CCfX1+fHx/gISAe315fHp9gHuAfoGAgX5+e39+ent7ent+fX18fHt7
-eHl4dnl0cHRzcHN0dHFxc3ZzcHN0dXV1cG93dnd3dXZ5d3Z3dnV2dnh1d3h2c3R3
-dXRzc3R3d3N1dnh5eHZ0dnd1dHRxcXFzcnJ1cXFtbW1ucnJxcXJ0cXJwb29vcnJx
-b3Bubm9wcnFvbnByc3Byc3NycXBwb25vcnRvbW5rbnFwc3JycHJxcm91cm5ucHFx
-bm50eW9vbW1rbW9tbWlqbWtobGpubm1ub2traWlqZmlqa2xnZ2VhY2dmZmlqaWdn
-Z2NlZGJiZWZlZmNhYGNkYmNlZmhnZGRmZWVnZ2hmaGpmZmVmZ2loaGlpaGdoamxr
-a2xramhqaWhnZGRnaWtmaWtmZmhpaGppampoZ2RmaGNfXltfXVtZWFZZWlZZWVZT
-UVNSUVFSUlRTVFRPVFJSU1RUVFNUVlRaWVhYXFtZWFpYWltZXV5dXWFiYGBiYGNm
-ZGVmZ2JlZ2RlY2RpaGZpZ2dlbGxqZmVlZ2VjYmJkZWNkZmhoZGJiZmViZmRfX2Jj
-ZWZnZ2dmZmRkZWdkY2RlZmVlZGJhZGVjZWdoZWJkZmRnZ2RlY2VlZmVgYmFiYmBi
-Yl9gYmNiYmBfYmRjYmFhY2ViY2JiYl9iYV9fXl9gYF9iYmFhYGFhXl1gX15hY2Jg
-YmJiY2JkYmVkZWNiYWRmZWVmZWVkZmNiZGpqamhoZmZmaGJgYmNlZWZmZ2hoZ2po
-aGdlZGJlZGRjY2FfYGFfYGFkZGNiZmVkY2VjYmRkYWRkZ2RkY2RkYmVkZmdnZGdk
-ZWVnZWVhZWdlZmZmZmdnY2JlZWNkYmJhYWFjYmFhYmBfX2FiYWJiYl9gYV9fXV1j
-X1xeW1pcXlxdYF1dYF5eYF5cWltdX1tXV1lZW1tdXGJYWllcW1peWVhYWFlbWVpa
-VldaXVtXV1pcW1lXVlpcWFlbXVtcXFlZWl1cXWFcXFteXl1dXV5dXVxbXV1bX11b
-V1heXl1gXl9gX2BhX19aXWNgXV9fX2FhXl5cXl1eXFxfX1xeXlxgY2BfXWBfX2Bf
-X15fYmJgX2BeW19fYF1bWFxfX2FkZGFdXVxeX2BeX2FeYF1cXV5gY15eXGBdYF9f
-YWFhYF5fZWNfXV1gZGJiY2NjX15bXF5gYWBgYmFhYWFgYF9iYWRkYmBhY2ZjYGJi
-Y2VlY2FgYmBhYmNjYWViZGVlaGllY2ZlY2RiYmhkYmRkY2FgXWJgX2NkZGRfYmNk
-Y2NjYl5iYmNkYF9gYmBgYWJkY2RiY2FjZmZiY2RkZWZkYmNlZWRjY2dmZWVjYmNm
-ampmZWZlZWhnZWdmaGtpaWloZmZqaWdpamxtamtqamxrbW5ubWttaGtubW9xb3Ft
-bWtpbWxpaWppbWtpaWlra2lnZ2lqbWtsb2xqbIa91eHr8fb6/P7///94e3p6enl5
-e317fn16eXZ2dnp3eHl1dHN2dHZ4e3t3eH2BeHd3fHx+fH57fHp6e3t8fH17fH19
-fn1+fYJ+fX59e3t7en5+e3x/eX1/f359fH19fH59fIB/gYB/fX5+eXh3enl6fX56
-eHt+fX1+f3x9fXx8enh6fHp7fHx6d3h6eXZ2dHV1dHR1dXNwcnR0cnJzc3RvbXFy
-cnFzdnV2eHR0c3N1d3d0c3R3dnh7enZ4dnV2eHZ6eHd3eHh6e3x0cXBxcXFwb3Vz
-cXJzcnRzcnBxcHFxcHJ0c29scW9ydHNzcG5sbnBubm9ubm9wcG1vcXRzcXBycG9x
-bm9ubXFtcHBxcnJwcXFxb3FwcHBxcHBybmxtamtwamttbG5qanBwbGptcW1rbW9t
-cG5qamtsampra2ZnaGZmYmhoZ2ZjZWZkYmFjZGdlZmpmZmBfYmdlZmNhZWhnZGRp
-aWloaGZnaWhlaGhoaGhoaGhlZmZoZ2lpZ2dmamdlZWpmaGlnaGtrbGpoamhqa2pp
-aGZlZWhmY2NjYGBeW1tVVldYWFhXV1RTUFFTU1RSU1FSVFJSU1ZYVlRTU1RUVlhW
-V1tZWltcW1pYW2BeXV9eXl9lY2JiZGRlY2NiZWVmZ2lnZWRlZmVmZmNmZmVnaGpn
-Z2ZlZmdkZWVkZWZoZmRlZGNhYmJjZGRiYmRiY2VlZmhlZmVmYWJkYWRjZGdlZ2dn
-Z2dkZGJjZWNiZ2NmYl9kYWNiYWFfXmBhYmRlY2FgYGBdXV9fYV9dXmBfXl9hYGFi
-ZWJhYmRgXWNhYVxeX2JhX11fYmNlYmJjZWJhZGFhYWBgY2RnZ2RlY2VlZGRnZ2Rk
-ZmZkY2RkZGJjZGRnaGZnaGpoZ2dnaGdnZ2hpZ2VmZmVmZGRlZWRkY2ZpZGZkYGJk
-YWFiYl9gY2NkZ2plZWVoY2VmZ2tlaGZjZGVnZGRlZ2VkZWZlZ2dnZ2NhYGRjY2Fg
-YWBgXVxcXGBgYWBgX19eYmJgXV1fXl1hXltaWlxdXFxbWl5eX15cXGFeXF9cXlpX
-WltaWVtcWVxbWVpdYVtYW1pYWVhaW1dZV1hXV1hcWlpXWVddW1xdXltZXFtaWllZ
-XF9hX19gXFxdXlxdXl5ZXV1bW1hdXVxaW11fXV1gYWJgX15gYF1cXl9fYl9dX19b
-XWNkYF9iYmJfXF9gXmBiY15bXl5gYmBgX15gX15eX11dXl9gYWBfXF1fYF5gYWNg
-XV1fX2BfX19gXV5fXmBhXlpeXV9hYGJgYF9fX2FgYV9hYWJjYWBiYl9hYWJgX2Fg
-YGFeYFxgYmJhYmFhYmNjY2JiYV9hYWRkZGViYmBgYmJkYmVoZ2ViZWNlZmNhYWBo
-ZWFfYF9fYmlmZ2RlYmNlY2JjYmRiZmVlZWJfYWFiYWZlZF9hY2VjY2ZkZGVnaGlr
-ZWZoZWRkZGdnZWZpZ2ZkYmNkX2FjZGJjZGZlZmZnZ2Rpa2pqbGdsa2psbGxra29t
-bWtubG9tbGppamprbWppamxubm5xcW9sa2twb25tbGhqa2lsbGxsbGppam9ta2pr
-a2xxf73W4evx9vn9/v7//356fnt7e3h+fX97eXp3eHl4dnd8eHVzdXh5e3h6e39/
-fn1+fHh7fHh4e3x5ent7fH58fXt7fX6Af3t9fn59f4B7fHp+fH19fX2CgIB9fH18
-eX19f4F+foB+fnx5e31/fHt7enx9f35+fH59f318fH17fHp6enx7fX17eX1+en15
-eHl1dXl3eHR4dXZzb3JwcHBxcXFvcXN0dXR3eHh2dHZ0dHp8eHd0dHd5d3d5dXZ3
-cnZ2dHV2enx2dXV6dHJvcnJ0c3Jxd3FycXFwcnRzdHBubnJ0bW9xbm5tbnBzcnJv
-bmtsamtubG5ub25vbm5xc3FycXFvbm1wcnJzcG9tcHBwcXFxcXBtbG9wcG9vbm5w
-cGxqam5xc3Jta21ubm1tcHNwb21tcHJubGhrbG5taGloa2lpaWlqaGdoZGRmY2Vk
-ZWZmaGZlZmZkZWBiY2RkZWNhYmlraWppaGRjZGZoZmVoaWlpaWdlZ2ZlaGdmZmhn
-aGppaWhnaWhqbWxsa21qbGtpbW5pZmhnZGRiZWZlY2JhYV9dWlhXWVdTV1ZUU1NU
-UlBNVVNSVFVTVFJRV1ZVVFVUU1NYVFRWWVtcW15dXFpbXGFhXmNlZWNmZmFgYmFj
-Z2RkZGdnZ2hnZ2ZmZWVmZmJiZWhoZmpramdlZGZlZWZjZGVkZGNlY2ViZWRmZmJh
-YmBfY2JoY2NjZmFkZWNiY2NnZmxpaGVkZGRjZGhlYmRjY2RgX2NiY2RhX2FkYGJl
-ZWFhYV5hYF9gYWBiYmFgX15gYmFjYWFiY2NjYmFkYl1eYV9iYWNmZWhnZWJjYF9h
-Y2NkYWBjY2JiZmVoZmlnZGNlZ2lnaWpoZ2ZmZWdnZmVjZWVnZmdoaWlpamlpaGZp
-bWhnamZnaGdlZGVpaWViY2RhYGRjYWVmYmBjZGNgY2NjZWdkY2ZmZGRnaGloZWVj
-ZWdoZmVmY2RiYWVkZmhnZGViY2NiYmFiYV9eXFtaXFpbYGBhYWBeX2JfXV5dXlxd
-XVxcXVlYW2JmYFtcW1dZXFhZXF1dWldaXllWWV1aXFpYWVpdXFtbW1xbW1hUWFhZ
-VldbXlpYWlxaWVtcW2BcXFtbXV1aWVpbXF9eXV5eX1xeXV1eXlpbXl5cXltZWV1i
-YV5dW1xcXl1gXWBgYl5eYmJgYWFjX2BgZGFfXmFgYWFeX19eXV1cW15eX2BgX2Bg
-XV5fX15eX19dX19iYGFeXV5gXmNhY2JfX11bYGBiYF5iYGFhYV1bXVxeXl9gYGBg
-X2NhYF1dW19iZGJfXV1eYGVfX2JiYl5eYWJjYGRlY2NfY2NiX2BjZmRkYl9eYGBj
-YmJgXV5dXmBkZGdmY2RjYmJjYmNmaWVjY15eY2JeZGdnZWRiYmdkY2JhYmBkZWJi
-YmBiYmJgXmFhYWBkZGFfX2FjZWhnaGhmZ2ZkYmRlZWNmZmdlY2RpamNkZGRkZmNn
-ZWJhZGNkZ2NmaGlpZ2dpbWprbXJwbWlsamlsbnBybWxra2ttbm9raWtub25sbWxu
-b3BvcnNwbGpoaGloa2ppamhtbm1vb2pmaW98t9Xg6/D1+Pr9/f//gn58e3l+fn1/
-fXt5eHl5dnZ3d3d6e3l5eHt9eXh7fXx8fX1+fHp7fHh5fHp5e3x7fH57ent9gIF+
-f4F9en99fX2AhX19fHl+gH19gIJ+fXt7eXt+fXx8foB/gYR9fYGDf3x7enx/g4B8
-e3x6fXt6fXx7ent+fn57eXl6en57eXp6e3p4eHd1dHFzdHRyb29ubmxudHFxcXZz
-c3Z1dnh4dnV2d3h1dnVzc3d3dXV4dnZ1c3N2dXd5eHl0dnd3dHV1dXZ0cnBycnFv
-b29vcHBvcG5wcXBycXFycHFvbm5wcXBvcXFxbWxsbG1tb29vcXFzdHdzcXBub3Fx
-cnR0cnJycXNycXFwcHFvcXFubWxubG1sa2tqbHFxcG9vbnJybm5vcm1wbGpuamxs
-bXBsamlqZ2Zla2lrZ2lnZmhpZmJjZmZmZGhmZ2lmZmdlY2NmZWNjZmRiY2lpZ2Vl
-Z2lnZ2hkZWVlZmZna2toaGdkZ2ZnZmhpaGdqamxsbGlrbWppa21tbGpsbWpnZWRk
-Y2VjYGBeX19gYWBcXFtYU1NXV1ZXVlZWVFFPUVVTUFFRUlNPU1NWU1NUVlNUV1pa
-XF5gXlteXltaX2NkY2VnaWVnZWJjYmJiY2dpZmhmZ2htamxsaGpmZGZoaGVlZGRo
-amZkZ2NjZGZmZmZmY2BhYWRnaGdjZGRlY2NjZmlpaGpoZ2poampnaWtrZ2ZnaGZm
-ZmVmZWhkY2ZiZV5kY2VgZGFhYmNkYmJiYWBiY2NhYV5gYmVnZ2NgY2FgYF9fX2Bh
-YWFkY2FkYmFkYWJgYWNlaGJgYWBhYWFhYWJoaGdnZGFjZWVlZGNkZmZkZ2hpZmVn
-Z2ZmZWlqaGVlZWhmZ2ZkZGdpZ2VqaGVnaGVnYmNjZGRlZWRlZmNmZmVrZWNgY2Nk
-YWVmZmNjZGVlZmdkZmZiYWNkZGRjaGRnaGlnZ2RkZmRlZGNkZWZlY2ZkY2BhYmRi
-Y2FcXl5eXmBgX19hX11fYGBeXl5cXF5cXVtaXFxeYF5bWVtaYFxZW11dXV5dWlxd
-X11YWFpaW1pcXV5eXFpcXV1cWlpcW11fXV5dXV5aW1xaWllaWlxgXFpdX11bW19e
-XmFeW15cXmFeXF1eXllcXWBeWV1cXWBdXl1bWlpdXl9gYGBfX11gZGJgXmFgYF1b
-XF9eX11cXmJdX19eXl1eXFxeX2BkZGVjYF5dXl5iZmJbXWRjaGNgXV9dXVtcXl5d
-X2FhYGBgX15hX19eXlxdXl9gXF1hYmVlZWRjYF9gYV9fZGJdXl9gXl1eYmVjYmNh
-YGBjZGJgYWBfX2FgYGBkZWRkZmJjZWJiYmBdWl5iYmRjYWFjYmZpZWVlYGNiYWNi
-XWBkZWVgYWBkYmNlZmNiYGJhYmFiYmNmZWVgYGJiY2RiYmlnZGFjZmNfYmNmZWZl
-ZmJiZWVoZWdnZ2VjYmJiYWJjZGhpZ2VjZmVkZWJjZGVlZWVoaGdpa2prbGxwbGlp
-a2loa25ub29wbm1ubGtsbW5tbW5wcXVxcHBucXBycG9sbG9tbWtqZWlub29uaWpo
-aXm11eDq8PX5+/3+//9/fXt8gn5+g3p6f4B+f39+dnh4d3d7fnx+fHt7e3p+fH18
-fHt8e3l6e3x8e3t9gX9+fXt8e3p7fH5+gn99fH5+f31/gIB7fXt9f318fX1+fH1+
-e3x8fHyAgYGAgn98fH5/fnp7fn59fXt7fHh6eXh7fHx8eXx6e3h6e3t7fHl7enp6
-d3Z3dnZzcnNzdHRycXJycXJxcm9ycXJzdHF2dXV2cnJ0dnR2dnVxcXZ2d3Z2d3d2
-d3V1dXNzc3d0cnV4dXd1dnNxcXBycXNycW9ycm9vbnFwbnFwcXJvb29vb25ucXBw
-dXVybnFyam1wb3BwdnR0dHRzcnBwcnNzdHRxcG5xb21tcW9vcHRzc29wc21vcHBt
-bW1scW9vbW9tbnJycm9ta2lqa2pqa2tsbmxpbGtqZmhpaWtoamVqbWtnZ2VlaGpo
-ZmVmZWhnZGJlZ2VkZGVlZGZlZGdoaGZmZ2VkZ2dnamhoaWhoaGZlaGpoa2poamlp
-Z2hpaWxtaWpta2hnaWppaGpsaWdpZGNjY2NiX2JiY2BjYF1cXllXWFZTU1NUVVJS
-U1JTVFNSVFhUVVFRUlFUUlVWVFZeW1hZWltbW1xfYWBdYGBlYmJiYmBhZWFiYGBi
-YmVkZmhqZmdpa2poZ2dmZmdlZGRmZmdoZ2dmY2RfYWFiY2JkZGJgZmVnZmlqaGZo
-aGdtaW5saGhqZ2ltb29ramhpamZnZWhoaGdnampkZGNmZWNlY2VnZmJeYGFhYmNk
-YGNmZGFfX2JiYmBhY2RjYl9gZF9fXmBfYGRkZGRgYGJiZGVjYmFeXF5gY2RhZmNj
-YmRna2dkZmZjYGNiZmdmZGRmZWVlZGRkZ2ZoaWZmaGloZGVmZGJfXWRjZGVka2hl
-Z2VjYGRmaGppaWZjZWVoaWhpaGRjZWVmZ2RjYmNlY2JlZmRjZWRiZGRjY2dnamxp
-Z2pmZGRmaGdlZ2VkZGJiY2FhYWRfYGJiYWFfX19eYF5dXltfYF9gXV9fXmJeYV9d
-WVxbXF9fXVxXVlhcWlteXmFfXFpdWllbXV1dWlpcXl9aWVlaW1tZWlhZW1pfX15f
-XFteXVlaWVdbXVlXW1tbXV9eXV9iX19fYmJjX11dXVtdXmBfX1xbXF5gXV9gaWBc
-YV9cX1peX19gYWFgXFteXl1eYF9jZGBiYV9gXl9cYFxcX15fXl1eX19dXmBiY2Nh
-YlxdYGBfZGZlYV9jYmNhX19dXV5hYmJhYGJhX2BgYF9iYmFfYmNjYWFgYGFfYGNf
-XV5gYGJfYFxdXmFfXmFiXF9hYWJiYmBhYmNlY2NhXV5eYWBhX2FhZGFhYmFfYGNm
-ZF5dYGFgYmRkX19lYmFnaGhnZWRiYmRiY2NlZmZlY2FkZWZoYV9hZGFhYWRjZGJi
-YWJlY2RmZWVmZGRlZGRjY2NhYWBdXmFjYmVmZmllY2ZkYWNgYmNkY2RlZWhoZmdq
-ZmdmZWRlY2ZlZWVnZWVnbGpta21wa21raGdoa2xpa25xbWxva2tsbGpucG9rb29v
-bm5ra2xtbm9wbW5tbmlsa2tvbW5uZ2Vpf7fU4Orv9Pf6/f3+/n6Afnt6foB9f3x7
-fH19fHt4eXZ5end9fnx9gH58e3l5fnx8e3p9eHx+fXp9f3p+gH59e3l6fHd4e397
-e3x+fn5/goB/fIGAfHx+fn19e3x8fHx8e319f36AgICBf358ent7f39+fHp/fHx8
-eXx8e3p7ent4enp4en17ent7e3p+eXNzcnN2dHR3c3N0dHFucnJ1dXVzdHB0d3Nz
-cXR3dnN2eHlzdXh2dnZ1dHZ5eHd1eXl4dnVzcXFxdHd1dXV2dHJzc3Fycm5zcnN0
-c29ucW9tb3FtbXFubG9ubW9vcXJyc3R1cXRzdHRxcW5xcHNzcnFxc3FydHRzc3Jw
-c3Fyc3Jsa3NxcnNycnNxcG9tbW1tb25ucHJxcG5wb3BwbnJwb2lpamxucXFtbG5v
-cHFqamtrbGtqamxpaGpsaWprampraGdlZGZkY2RhZWdmZmVkZmZmaWdnZGRoZGRm
-Z2hnamlpaGpqaWpqZ2drbGpoaWtnZ2loaWZnam1sa2ZmZmVna2lpaGdnZ2ZkZGdo
-ZmFgZmRfYGBdW1lWV1ZUVlRUVVhWVltVVFVSVVhVWVVTUlRSU1NVVVRYV1RWV1VV
-V1xeX11hYl5iYWJhYWBhYWJkaGZiY2JkY2VmZ2ZmZWZkY2RkZmVmZGNjY2JkZGRk
-ZGRjYmRmYmBeWWBlZWVkZGRjZ2ZjZWRmaWdnaWlnZmdjZ2lqamppamxrampoaGxq
-ZmZkZmZhX2FpaGZlZ2VoZWRjZGRiYWNkZGJhX11eYGBhYWJnZWFiYl9gYWBfYGRj
-Y2RjYmVkYWJjZmRiY2NjYWBgX2BfYl9gYmRlYmJpZWVjZGZlaWdpaWVmZmVlY2Rn
-Z2NiZWJmZ2dmZmlqZGVjZGNiY2dnZ2pqa2tlY2RmaGVmZ2ZnZ2lpamdmZ2VkZ2Vl
-ZWZmZGNkaWZlaGhjaGZnZ2VkaGlmZ2ZmZWVkY2JnaGRkYmJiYWFjYWBjY2FiYGJh
-YmFjYmBgYV5dXWBeXl5eW11fX2BkYF9jX2BfX11ZWVlZW1xbWVxdW1lcYV9cXFpb
-W1tbXFtbXltXVllcXlxaW1xdXF1gXltfXVlaWVldYVtbW11YW1lfXl9dXV5dX19i
-Y2BgX1xcX11dXmBgXFlcYGFfW15dXVtcX15dYF1fXlxbWl1eXlxbW19iZGFgYmJg
-YWBdX2BfYF5ZXGBeXl5fX19fXV5dX2JhXmBhY2RgY2NkYWJiZGRiY2RhXF5hY2Ji
-ZGNgYWNjYWFgYWJiYGJjYmFiYmFdXl9gXl9dX2BdXl5eYWNlY2NlYmFgYmFkYmNi
-ZGNjY2ZjYWJgX19hYV5iYmFgX19fZmdoaGVjYmZnZmZmYl9dXmJjZWZjY2NiZGdl
-ZGZmYWNkYWdlZGRhY2RhXmBfZGhkY2FiYmJkZGJhX19fYGBiYGFhZGNhYWJjZGBj
-Y2ViZGZmZ2RkZmZmZGhoZGZnZ2trZ2ViZWVlZ2ZlY2VlZWdnaGlsbWxrbG1ra2pq
-amlramtrbWxrbG5ta2hsbW1tbm1sbGtna2xua21xcG1ubmxpaGpsa2xwbG9rbHCC
-tNTg6e/z9/r9/f7/enl/fH17fH56fX57eHp5eXV3d3l5eHl6enp5fn15eXt4fHt7
-fXl6e3p6eX58e32AgYB/fXt6enl7e3p7e3t9enx8gH9+fn6BhX9/gIGBfHx7fHp/
-fn18gH58foF/gH14enx9fHuAgH5+foCBfH58fX58gHx7e3p8fXt7en17e3h1d3t+
-d3RzdHZ0c3JycnNzcnZxdHR2d3NzcXBxcnR1d3d5dnNydXZ0dXd3eHl3eXd4enl3
-dXNxcXFydnl4dnV0c3ZzcnV0cnFzcXJwcG5tbW5tb3Btb21vcXFubm9yc3RzcnJy
-cHRwcXBycnJycHBxc3Rzc3RycW9tbWtwc3Fzc3JydHV2cnFwbm5xbW9xcGtqbXFv
-bm1ubnFwbW1ubW5wbm1tbXBxcHJtbG1qbGtpamtqa2lpa2loaGhpaWtsbGlsaGln
-ZWViY2NmaWZlY2JiZWdlaWdnZ2NlZ2pramVmZmhoaWhqaGlpbGppamhpaGpnaWpn
-aGdsamdnaWhmZWZnam1qamlpZmlmZWRhZGFdX2FdXVxbWlhZW1tWV1dWVVRVVldV
-VVZWVFVUVVFSV1ZTUlZUVldYWVtZVlVaWl9gXl1hYmJfXmNkY2ZmZmNlZ2hoZWJi
-ZmRkZGVlZGVlZ2VlZmVlZWNjZWRmY2NlY2NnZWVkY2FkYmBkY2NjZ2JiY2NnZmRj
-ZmZnZmNiZ3BmZ2ZmZmdoaWhpZ2xqaGtpamdlZWJnZmZlZWVkZGRkZ2VlZmZjYGJh
-YGJjYV9fYmJiYWFiYmNhYF1gX2FkY2JgXmNiXmJhYWFkZGVgXmNkY2JhYF9gYmJi
-YmFjZWVmZmRhZGZlZWdnZmZkZGdoamdlZmRlZGZmZWZlZmhoZWJiY2NkZGNiZWps
-amViZGVmaGhkZGNjZmVoaGVlZmZoa2dlZWRoaWZiZGZlaGRkaGhmZmZnZmdlZWdk
-YmNhYmRjYmJhYWVjYGJjZGRmYV1eYGJlZWNjY2BfX2BjX1xcX1xdXVtdXl9eXmBh
-X2BcXWBdXltbXl1cWlxcWl1eW11eW11bW1laXVxdWVdYWVlcW1haW1pbXF9gXVxd
-XVhYWFpYW1pcWlpaXFxeYGBcXF5fX15eYGBgX15dXmBfXV5aWlhXX2BjXltbXmFj
-YF1hYl9eXWFhYl9gXltZXWBeZF9dYmFfYF1cX19hX2BcXVtcX15eXl5eYGBiYWNj
-YWJiZGReYmJmaGVfZGRjY2FgXV9gYWFhYGFgYmVlYV9iZGFdYGBgYGJdX15dYmVf
-XmBgYV9dY2JhYWBgYmRmY2NiYmFhX2FjYmBcX19iYGJkYmFjYWJiYmFhXl9gYGNm
-ZmdkZGJhY2FfXGBeYGJmY2NmY2FjZWFhZGJjYWRnY2RiZGRlbGRhYmJhZGRhZGZp
-ZmVkZGJiZGFiY19gY2JiYmFjY2RjYWdhY2RiZ2lnZ2ZlZGVpaGZjZmdpaGppZWRo
-amhmaGdjYmNiZGdqaGhobGxubW1raGlnaGlra2xrbG1saWlpa2xrampqam9wbmxr
-b3Jua2tsbWloamdoaWppaWxqa2tqboK0097o7vL2+fz8/v56fHp8fXp4eXp6eXp7
-e3d5eX1+eHZ4end2d3l+fHx6fHd4eXp7fX19fX5+ent7fXt9e3t8e3t8fX18fH19
-fHx7fYF9fX18fXx+fX9+f399fX59fn99e3t8fn9/gX59fXl8fX18f3x7e35/gYGB
-f318fHx8fn18fX98fXt9fHl7d3d6eXp3dnZ0cnVydHF0dHJzc3NzcXJzcnZzdHBt
-b25zdHV0dHt+e3Z3dHV2dXR1dXZ2dnh0c3JzcnV2dHV1d3N1c3J1dHV2cG9zcG9u
-b25ucG5vcG9ucHJycm5ucG9ucXFzcnJzc3JxdHBycHFxcHJwcm9wcnFwbm5tcHB0
-c3JzdHN0cnF0cG9tbW5wb29wcm5ubnVwcW9tbW9vb2trbW9vb21ta2xucXJsbGtp
-bG5sa2ttbW9sa2ppaWhoaWhoZWVrbGpoaGZkZGRoa2lkZmRpa2tpZ2lnZmhmZ2ln
-ZGZpamdqbGpoamtpaGdoZ2lpaWhoaGlramlmaGxrbGlnZmppbWtrbGltaWZjZGZi
-YV5cXFxYW1pWWltZWFhaWVhVV1dYWFVTWFdYWVdTVlRTUlhYVlRTWFtYWllcXFtb
-W1xbWl9gX2FlYmRkZGhmZmRlZmlkY2RkZWRlY2RkZWRmYmNpZmZqbGplZGRlZGVl
-ZWZmZmViZmRhYGJlZGFkZWRkZGVkY2RlZGJjY2NnZ2dna2pqa2VoaGhqaWdmZ2hp
-ampmZ2VpZ2RlY2RlZmRkZGRmY2NhYmBhYGJlY2BfYGRjYGBgYWFiYWJiY2ViYl5g
-YWNkX19gYWRjZGNkZGNjY2ViZGRkYWJhY2NnaGZlY2NiZWdlZ2VlZWZjZGdlZWhk
-ZWVoZ2ZnZmdpamhlZWZnaGVlZGFjZ2dnbmZkZWRkY2ZnZGBhY2NjZWZkYmVlZWVn
-ZmJgZGdkZmBkZmVlZGVkZWVkZGJiZGVoY2RjYmJiYWNkYmJjZmVkX2FgX2BgYWJj
-ZWNjZGFiYF9fXV5fXVxcXFteW11bXV1eX2JgYF9fYWFcXF5eXVteXV1dXF1fXV5c
-Wl1dXV5dXFxbWVZYWltaWl5WWl1gXl1aX1pfYFxbX1xbXFtdXFxbXFxcXl9eX2Ff
-XV5gW11cXlxgX2BeXFtcXWBcXWFlZWRkY11eYGBgXl9gYF5eX11fXmBhZmReXl9c
-XV5dYGJjZGBfX11eXVxeYGBfYF1eXF1eYWBdXl5fYGBlZWNhYmNiX2FjYWJiX2Fj
-YGFhYmRmZV1eXmBeW15cYF5fYmJhY2RgYWJiZ2hkZWBhYmFiYmFkY2NiYWRkY2Fk
-ZWNiY2JiZ2ZjYmNjYFxfZWVhYl9hYmBeYGJgX2JgXmFgXmJjZGJkZmdmZGRkZmRk
-ZGViZGVnZmRlZWZkZWVhZGRgYWNhY2VlZWVkZmBfYGRiYWJkY2RmZ2VlZ2JjYGNk
-ZGNjZWVoaWRjY2hpamloaGdnZmlpamZqaGZlY2hnamdnaGhnaGlqa2tqaWhqbW5q
-amxram5sbm9ta2xsa2pramlubG1sb21qa21vbWxsamprbm1rampoaGhqaWprgLjT
-3+ju8/b5+/v9/nl5fHl6e3t6dnl3eXZ5e3p6e3h4e3t6d3p6eXl9f3p7fX99fXt8
-e3t8fHt3ent9fXt6e3t9fHp6enx6fHx7d3p6fHx9fXx8fHx7fXp9gICBfXx+foCB
-fn5/gH98fn16e35/fn+Cgn58fnx7fH5+fnx+f4CAgYCAfXx6fXt7eXd3dnh3enR0
-dHNxdHRzdnZydHZ1dnNydXZ4dXZ0c3R3cnNzdXZ3eXl5c3l3dHZ3dHN0dHJ0c3F1
-cnNzc3N1dnV1dHVyc3RxdXx0cHFxcm1sbW1ub29vcnBvb3Fwb3FxbW9wcXFwcnV1
-cnJyb25yc3NwcHByc3N2cnFvbmttbnBycnFzcW5sbW1vbW5tcXNxb3Bzcm9ydXNw
-cXFubnBubm1tcG9wbWxxbm5ta2ttbm5vbGxpa2tqaWtsa2loaWhpaGhnZmdqaWdn
-Z2dkZWdkZmlramdpaGZoaWlnZWZoaGZqZmZmaWppaGdmZ2ppa2tqbGxsZ2dnZ2hn
-ZmZpaGxrbm5tbmxramlqampramhkZGNhYGFfXF1bW1hYWFdYV1dUV1pWVFVVVlVV
-VFRYVlJTU1FSU1ZVVFVWWlhWXV1cXGBcXVxeW11cX2FkY2NkZWVmZmNiZmhqZmRi
-Z2ZjYWNnZWRiYWNkZGpqaWhqbGhqaGZmZ2ZlaGZnZ2ZkY2diY2VkZWRiYGVlY2Jk
-ZWJiY2lnY2NnZWZnZmNlZGVmaGZkZWdmZmRjZmRjZmZqZmZlZGJkYGFkYmNlY19e
-YWRjY2JgYGJiYWNhX2BhYmBiXl9fX2JeYmBiY2FfXmBkZGVjYmVkY2RnZGVmaGRi
-ZGRmZ2dkYmRlZ2ZjZGVlYmdpaGljZWhqaGdmaWtpaGZlZmVoaWlpaWZkY2FjZmZn
-a2dlZGVjY2RkYl9gYWNgYGBlZGZnZWViY2FmaGVmZmZmZWJiYmNjZWViYWRkaGdi
-YWNkY19fW15jYGJgXl5gYV9fXF1eYGBjYV9hYV5fX19iYV5fXl9dXl5hXV9gX2Bd
-X19cXF1fXl1gXF5eXlpaWVlbXF5bWFhaW1xbWlpaV1lbWlpaWFtbXltcW1xfXV1h
-YGFiXVtcWlteXV1bXFxcXF1eXl9fX2FgYGBeXFtdXF9fZmRkZVxhYF9hYmJfYGNi
-Xl1dX2BeXF5eXmFhZWJgX15hYF9eW11eYF1dYF9iYGBhX11bXF5iXl1cXVtbXGFf
-Xl5bWlxeXF1gX2BeXV5eYF9iYl9jYl9eXmFhX2NhXl9eYF5eYmReY2BgYGNjZGBe
-YmFlZmdlZGJhY2JfYGNjYWJiYGFgYmNlZWNhYWJhZGhjW19jZGNgX2JgYGdkZmZf
-X15eYGJfY2NiYGVjZWVlZGNgYmRmZGhlZ2ZjY2JiZWJgYWVjZGJhYmBiYWNiYmFh
-YmJkYWRkZWZmYl5gYmFfYmhmYmVlZGJiZmVlY2VmY2ZgYWVpaGhpaGloaGtsZmVo
-aGVnaGlsa2ZkZmVnaGdoZWhnaGhoaGlqaWxraWpraWlqa2xsbG1tbmxtZ2hrbWls
-bG9wamhpamttbW5tbGxoaGloaG6Ft9Le6O3z9fn6+/38e318e3t8eXh5eXd1enp3
-eXh3dX1+e3t9eXt6eHl5dXd5eXl7eHt7d3d4dnp9fHx6e3p7e3p7eHh5eXx6e3+B
-fXl2e3p6fH19fXx9fXx+fH19f35/gIOAf4CBfX+AgH96fX2Bg35/fn9+fXp9fHx7
-e3t5e4B9f399fn18eHd2eHl4eXl1dHl3dXV2dHN1dHZ2dHN1c3ZzdHR3dnd7d3h5
-eHR0d3p8eXd6eXp2dXZ1cnF1dXR0dHVyc3Z2dXRzdHR2cnN0cXN1dnR1dHJvb3Bz
-c3FucHVzdHJva29ycnFycHJycXB0c3R1c29ucXNycnNxc3Jxc3BydXVycm9wdXZ0
-b3FxbmxrbGtucXFxcnJxcm9wcnNwcHJxcG5vb25vb3BubGpqa2xta21tbW1zcm5r
-bmxoaGxpamtraWdnaWhnZmtpaGRlZGZoY2NnZ2hnZ2hoamZlZGZlaGlpaWpmYWdm
-ZWNjZWRmZmZnaGhpbW1qampoaGlpaGVoampqbGxqa2tra2xqaWlraWZnZmVmY15k
-YWBdXWBaWllXWVZVVFVWVllVVlNSU1ZTVldVVlVRU1JSUldaWFVVWVtbXF1cWVtd
-YmNdXV1gX19iZGNiZmdmZmRnaGZnZmdmY2VkYmJkYmNnZGBfYmZnZWZkY2RlZ2do
-ZGRkaWlpZ2ZjYmRkZWNgY2JgZWZlZmZjZGdmZWhoaGdmY2VqbGhmYmFjZWFjZGRk
-Y2NjYGJgZGdlZWRjZGJgYGNjYmFhYF5hZWZkYWFfY2FiZGNiYmJkZWVkYmJeYV5f
-ZWZlY2BdYGBhYF9hZWVeYV9iYmhpZmRoaWZlZWdlZWNiZWRlZGlpZ2hpaGhoaWdp
-aGlrbGtoZ2VpZ2RoamlqaWtqaWhkY2RlaWVkZWdjYWdmZWRmZWRjYmJjZGdmY2Nk
-ZWJiaGhqZ2RmZWpmZWRlZWNmZWZlaGZkZGRiZmJkYF5hYV9iZF9eXV1gX15dYmBh
-YF9gYGFdWmBeYF9eX1xcXV1hXl5eX15eX19iYltcYGBdXFpaW11bWlpaWlpcXFta
-V1pbWVZYVlhbWllbW1leX1tcXV1dX15jZGBbW1xfXl1eXl1aWFpdW1teXFxcX19f
-X19cW1tcYmNgYmNhX11dXl5gXV5hYmBeYWBgX1xcW11cW19hX2JiZF9gYltgXl5g
-YmBfYWFiY2NiXl5hYGJhXl1bXVxeXl9gXVpbXFxgXVxeYV5bXVxfYF9iYV9fXlxd
-X2FfYGFiYmBeZWJhYGFiZGFhYGBgYmNgYGNgYWFjZmViYWFlZmZkZmViX2BhYl5f
-YF9cXWBiYmhnYWJiY2BfXl5hYWRjY2FjYmFiYmJgX2BiYmRjZWNlZmRmZmVkZGlm
-ZWViYWJjZGJhZWRjYmRoZGRjZGNjYmNjXmBmZmVjY2VkYWBkbGZoZmViZWdlYmJh
-YWJkY2FgZmRiZWhkZmZlYmNnaGVmY2pqbGtnZ2ZkZ2dnaGdnZmZlZWRnaWlpaWht
-a2ptbmloaWxtbm1ubW1tcG5tbm1sbmxubG1tbGxsa21rbWtua2traWprbZe70t/n
-7vP1+fr8/Px7fHt6enl5eHZ5eXx4eXx6eXh7e4B8e3p2e3x6fHx/end2eoJ/e3l3
-enx/fXt7ent6f36Af3l6eHh6fH99fX6AfHx7enx6fHt6e3+AgYCAgX5/f39/fn5/
-fHx+fXx9gX9/foCBfXx9fYB+fXt8enl8e3t8fH5+fHt7fn14eHp5eXp7enl4d3Z3
-eHd0cnFxcnJzcnJ1enR0dnVzcnd7d3p2d3V1dnd3dXl7f3h5eHZ4dnR0c3V2dnV4
-dnd0dXV0dXR1dXJycnFycnRwbm1wcXJwcHJxdHRxcHBybnFycnNyc3JycXFxcXNy
-cG1xc3Jzc3NycnNycXBycXJzcnV2cXFxbm1vcHFtbG5xcW1ub3Fsb29xcnNycHB0
-c29ycnFubW1ubm9ubGtqamttbm1vb21sa2ppa21qa25ra2ppa2psampmZGRlZGZk
-ZGVoaWppamlraWlrZWVoZm1rZ2VlZWRpamVlZWdmZmpoZ2lpa2hma2ppaGdnaGtr
-aGlpa2pqaGppaGdpaWhraGhlZGFgYmNiYV9gXl9hXFhYV1RUVFNTVlRTVVVVVFNV
-U1RWVlVUU1dVVVpYU1RZW1hWWVpbXF5gYl9eYWJiYF9hZGRnZ2lnZGNjZmZmZGJh
-Y2doaGZmZmJjZWhnZWRkY2dkZGRpaGdjZGZnZmhlZ2dkY2JiY2hjZGRjZWVlZWRo
-bWdoaW5saGZkamlnZmhra2dkaGVlY2RjZ2ZjZWRkZGReYGJgYmJhYmJhYGBiZmNl
-ZGVjYmNiZGRlY2pnZWVjZWNiY2NjZGFfX2BiYmNiYmJgY2NiYGJgaWdlZmVmZmVl
-ZGRjZWRiYmNkZWZlZWljYmFlaWpoZ2tsaGpoamdlYmVnZ2dnaGZoamloZmZmZmdo
-aWZnZ2NlYmJiXl9jZmNgYGFhYmNjZGRkZmhkZWdnZWVkaWNhZGRkZmdlZWJiY2Zi
-YGJkZ2VlZGVhZGJgYV5dXl9eXFxcXl9aWl5gYGFeX19hYV9dX19cW1tdXV5eXFte
-X2BfXVxcW1xYXF5cWl1bXlxcXFxcXVtcXF1fWllXWlpbXlxdXlxaW15fW15cXV5d
-W2FfXF1gXmBgYF1ZXV1cXVxcXV5cXVteY2JeW1xeXV9kZWBfYF5hXV1fXlxdXl9e
-YGBeXl5gXmFiYmBgYGFhZWBfY15eYF9gX19fX2BgYmBeX2FkYl5dX2BfYGBgXl5e
-YmJhYV5gYF9iYmJjZGNhYmFgZGBeYWBfXmBfX2FhXmFhYGJiYl1eX19gYGNlY2Nk
-ZGVjYmFiZWRkYWNlZ2ZjY2RiX2FhZGJhXWBmY2BjY2RgX2NjYGBeXl5gYmJjZmBi
-YmNkYGJjYmNmZ2RmY2FiYWFiYWFhZWNiYWRhYWFfY2NiZGNjY2JlYmJhY2NjYWBf
-YWRjY2RlZGVkZGNlY2dmZmhlYmBiXWFgXmFkZ2hmZF9iY2JkZWNiY2NkZWZnZmdr
-aWZlZ2ZkZmVna2lnaWpoZWVmZ2hqbGtnaGlpZ2tqbW1tbWtraW1vcW1wb25ybWtu
-bW1sbm5wbGxubGhrb21qaWtsk7/R3ebt8vX4+vr8+Xp7fX15enp7eHl5fH5+fHt7
-fn5/fHt6enh5ent8fnx5eXx1ent6eHp7fIB9fHx6eHt8fX19fXt6fn2BgH59gH59
-gIB+fnt7e3p9gYOBfn1+goGBgIGAfn17fIB+eXmAgYB+gH98fX5+foB/fHt6eX17
-e3t7f35+fHx8eXp8e3h5en18eXVxdHVzc3R2dXNycXJyb3Nzd3J0dHZ4dnV4enl5
-d3R0dHd2dnZ4enh3dXV3eHV2d3d1dXVzcXR1c3BxdHN0endxcXFwcXFzc29xcHFv
-cXFxcXJ0c3VwcnNxc3N1dXFycHFxcXRzdXRyc3V1dHFydHNzcW5ycnRycHBvcHBz
-cG9tbm1scG5vcHBvbW5vcHNyc3F0cW9ycnJub25tbWptb21tbWxsa21sbW1xbmxq
-a21sbGtpamtpaWlqamlpbGhlaG1mZGRmZ2toaWpsa2hsa2tsZ2Zqa2poZmVpaWxu
-bmZkZGNnbGpra2toaGtoZ2hqamdna2hnbW5sbmxqaGdrbGxraWdlZmVmZWRiZWVh
-YWFeYF9eXlpXWFRUVVZVVlNSU1VUVlJUVVFTVVVWVlZVW1lXVlddW1tbV1xaXV9f
-Y2djYGBkZmhmZmViZGdnZ2RlY2ZmZWRkZWhoZmdmZmVqaWZmZmVqaGtraWhoamln
-ZWVmZWtpaGRhY2ZmYmFoa2xnZWVkZGNlaWlpbGtpZ2Vma2hmZmlpaGZlY2NgY2Rm
-ZmVjZWRoY19jYGBfZGNiY2FhYWFhYWNiYGJjYF9hYmJhYWRlZGRjYmRhYmNlaGdh
-Y2NkYGFgYmNiY2FlaGdnaWdkY2doZGRmZWJjY2VnZWdnY2FgYWRkYmZjZWNnZ2Zm
-Y2RlZ2ppZWhqaWdmZmZpaWZlZmdiZWRnZ2RkZmJiYmFlYWNjZGNiX2JiZWNlY2dl
-aGVjYmNiY2JnYmZlZWVmY2RiY2JkYWNlZmNgY2RmZ2RhYmNhYGJjYmBfXl5gXmBh
-XFxdYGFhYF5gYWNhYV9cX15eXV1cXl9eX15dXVxcW11bWl1dXF9dXV5eX11aWltZ
-XVxcWFpXWVtcXl1bWF1fXl1bWVxbXF1fW1hYW11jZGNfYF9fXl5dXl5eXF9dXGBr
-YV9fX15dXl9eYF9gX11cYGBfXV9eYV9eXl9fXF5hX2NlY19gX11bWF5hYWNfYV9b
-X19gY2FhYV5fYGBfX2BfYWFkYmFkYl5gW1xdXl9dX19lY2RhYV9cW1xfY2VeX2Fh
-Xl9fX19iYV5gYWNjZmRkY2FgZGRkYmNjZGBcX15gYGBgX2FkZWJhZGJiZGNmZWJj
-ZGFhZGJhY2RpYVxeYl9hYWBiZWRjYV9gY11jYWFmY2RlZWNiYWJfYmRkYWBhYmJh
-YWRjYWFgYmRnZF9gYGVmYGFkYF9hYmBiZmRkZGFkZGNiZGllZGVnaGZlZmBjZWhk
-YWNlaGVlZWJlYWFhYmJjZ2pnZ2tpaWhpZ2dmaGdnZGZpbGttbGhqaWpraGZpbGlo
-aGpqam5wbmpub21ubGttcGxvcW1sbm1ua2dqbWtsbmtubW1tbGtra2+VvNLe5+3y
-9fb5+vv7fX18eXx6eX9/eXx9e3t6f4B/f35+fn15en18e3p3d39+gH96e3x6eXh6
-e3x9f394eXl6fHx9e35+fH99fX19fHx8e3p8fHp7fH19fn59fHp+f39+gX+Ae3x8
-fX1+gICBg4SAf4F/gIB9fX16eX58enp6fn58en9+fnx6fXl2eXh5eHd3dHFwcXBw
-dHRzc3NzcHFycnJyc3F0dnV1d3V0dXV1dXZ3eHl6dnh2dXZ6dnN0dnZ2dXNzdXV1
-eXZ3c3h2dHN3d3V0dXNwcXJ0cnBxcnFtbXFub3F0cnJ3dXVzcXFtcXJxcnR0cnd5
-dHNyc3JxcXJzc3JzdXR2dnVzcnJwcXF0b25tb3R0c3B2cnBubm9vcHJubW5tbnJx
-bm9ubnBxcW1pbGprb25vbGtrbW1vbGpra21taWloZ2xnamxra2tqZmRmZ2RiYmVl
-ZmhqaWhlZGZma2tqamhmZ2pmaGhpbGxqaGZmaWloZ2tsamhmaWxqam5ta2lpbWln
-a2trbWtrZ2hpa2ppZ2NjY2NmZWdkYV9gYmFjYl5bWllVVFJQT1FTUFJUU1RVVFRS
-UlNVVVVVVFVZWFhXW1xfXFpcXV9hX19iYmFjYGJmZmRkZGZkZWVlZmRiYmNkZWZn
-aWhnZmRkaWxpZWdoaGZpamplZGdqZWRkZmVkZmZlZWVkZWRmZ2hraWZoaWhmamVp
-bGpnZWVlbGlpZmZpaWloaGJiZGVlZWJiYmNjY2JjYGFhYV9fYWFjY2ZmZmJgYWFg
-YGRjYV9fYGBjZWRjZGNmY2ZkYmJkZGNiYWFlZGRjY2NhYWVpZmZmZmVmZWRkY2Rl
-Y2NkZ2doY19gYmVjZGVmZWZkZ2prZmNkY2RkZ2hkZmdnaGlqZmNjZm5nZWRmZWRj
-YmRoZWJjY2VkY2ZhY2JiZGRjZmVlaGlmZGZlZGFhYmJhYGJiYWNjYmNiYmNkY2Zm
-ZmRiZGJhYGZjYWNjYmNkZWRiXV9hYF1fXF9fW11dXV5hYl9eXWJiYWJhXl1gYF5e
-YV1dXVpZV1xaW11gWF1cWlxdXlxcW1lcX19cXGBeW1tfXFxdXl9dXFtbWVlYW1tf
-X2FhXV1fXV9dXV9dXl1eX11cXFxfYGNcXmBfYGFgX2FhY2NiZGNgY2FkYGFgYGBf
-XV5cXGBiX2NgYGFhXl9gYGJiYmViYWJgX2BhX15eYWJeXV5gYV9fYGJkX2BeZWRh
-XF5gX2FhXmJiXmFiYl9fX19dX2BfXl9fXlxdX2FhYmBiYGFiYmNiX2JiYGJiYmBf
-YWNfYmFfYGFhYWVlY2NjZGBhY2RjYmBeX15fYWBgY2JgYmJjYGBhX2NlZGJgYGBf
-YmVmZWNmY2NjYWFiYmFgZGRjYmBhZWVjY2RlY2RiYGNhYmBhYmNjZGJlY2JlZGRk
-ZGRlYWJkZGRjZmRjZGZmZ2ZnaGRlY2VhYWZnZWlkZGBhYWBgYWNkZWVmZmlqZ2dm
-Z2VlZmloZ2lpaWtramhpZ2ZnZmhqbGprbGtoamxrbG5tbm9ubG5ubm1ramtrbGxs
-bW5rbm5ub3Bra21vbGdrbom8097n7PH09vn6+/t9fn9+fH16fH18d3h7e3p6fH1/
-fXx6fICBfHl4eXx/gH9/gIF6enp6fn5+fHp9fHx7e3t+fn58en57e3t6e3p5e3l7
-eHt7fH19fHt7e359fnx9fnx9fn18fX6AfoB+gISEhICAgn6Af31+fHx7foB+fXx7
-f3x8enp8fHx6eXl7enh3dnl0cnJybnR1cXR1dXJvcG9wcnV3dHd3eHd1cHV2cnh1
-d3d6eXt8dnp4enl3eHd2dnRyc3R1cXZ6dnV1eHh0c3V1cnBxb3Bwc3R1c3JvcnJw
-bm9wcHFzc3Bzc3FycG5wcnJxcHV0c3N0cXFvcG9vb290c3N1c3FvdHJubXBxb29u
-bHBxcnJzcXVzbm5vbnBvbm5ucW5vc3NwcXFvcXNxcG5sa2praWxtb3BtbGxrbGtr
-a2tpa2toamtqaGxraGRkbGRmZ2ZlZmpoaWhnaGhoZGhoaGZnZmZmaWdqZ2loaGls
-amhobGtsaGloZ2lraWdnZmlpa2hsamhpaWlra2poaWhoaGlnZGJnamhpZmBdXl9e
-Xl1fW1xYWVhZWFZWU1VaV1VTU1JSUlJTVFJTVVhYWlpaXF1dW1laW15gYGFgYWBe
-YWNlY2NiYmVoZ2dlYmZnZ2dkZmhqaGVnZ2ZmZWlpZ2RlZGdoa2dmZ2dpaGhnZGRh
-YWFhYmRraGdkZGVoaWlmZWdra2tpaGdoaGdkZGVmaGdmZ2lpZmhnZ2ZoZmRjZWNi
-ZGZkZGRkYWVkYmBhY2NiY2VkYmBgYF9eYGJkYmFjZmRkYmFhYmFjYGJlY2FiZGFi
-ZWVmZmZlYWJjYmRnaWdnZWNgYmVjYmVoZWdnZ2Zla2VkZWlnZWdoZ2lqaWVoZmRl
-ZGJlZWdnZWZnaGtpaWZmZmlmZGRmYmVpaWdmZGJjZWJhY2NiZWlnZWVlZWZkZmRj
-YWJlZWJkZGRkY2JkZmVhYmJiYmNiYWBhY2JiYWNjYV9iYl9jY2FiX19fYGBgX1xe
-XWJhYmBgYGFfW11bX2BjZWBgYl9eXVxbXF1cXllcXl1bXX10XF9gXVxbWlpdWVpb
-W1pgYmFhX1pcXF1dXl9gXFxaW1hbXmJeXl9eXltdXV5cWlteYGBgXVxcX2BgXV9d
-YGFhYGBgXV9jZF9jZ2djY2FeXVxhZGFdW1tbXFteX15iYmBiYWJeYWBhYF9fYWBh
-YGFfYWJhYV9eX2BiYWFkYmJjX2BgZGNdWl5eXmBcXmFiXWBhYWFfXFleYGBiYl9e
-X2BhZmNiYmJjZGNiY2RgYWFhZGVjYmBhYGJhYV9gXV1hX2FkYmJjYV5fX2FgYmFf
-X19gYWBhY2VlYWFhYmNhYWJjY2RgXl5fYGFjY2NjYmNjYWFiYmFkZmhlYFxeYGBg
-X2BhYmBgXmJhYWFgX2FiYGBiZWJoZmFgYWRnZmNlZ2JiYGNkZGZnaWRjX2BfYGBl
-ZmdlZ2VjXV5eYV9iZWVoZmVlZmpqaWdmZmZnaGlnaGpraGhqampqZmdpZ2pqamlq
-a3Bta2xsbWtsb3Nxb2xtbW1ta2ptbHBwa2ttbmxsa2xpa2tub3JuhLzU4Ojt8fT4
-+Pn6+3x8fn17fHx5e3h3eHV2e359fnx7ent8fn5+fXx8fX5/fHx8e3d4e318fHh6
-fX16eHh8gXx+fHx8e3t7fHp7fHt5e317e3t9fnx7fn1+fYKBf318fXx8f318e3x9
-fYCAgIF+gH99f4F/fn5/fn98e35/fnt+e3t7e3yBfnp8fnx6end2eXl5dnZzdHV3
-dXZ3dHJxc3Nyc3R1dXRybnNycHZ3eHd2dXR2eHl2d3l5dXl7eHh3dXZ0dXRzdHl1
-eHd1dXVycXJ0cnBwdHN0cXR3c3NxcXBvbm5vcXNzcXBvcG9wb29xcHBxcHBucXBw
-cXNxbnBxcXFyc3Jyc3JvcHFydHFxbmxqcHBvbW9wcXRwcG90eHBvbm5xcHJtcHJv
-cHBwb3B0cXBra2tubm5vcnBtbG9ta2ltbG1samtqamxoZWdoa2lnaGdoaGdlZ2Vm
-ZGRmZ2NlZmZna2hpaWdpa21rZmRlZmluamlpaWlqamloaWhoaWpnaGlpbGxsbWhq
-a2ptaGtoZ2VlZ2dkaGdpamplZmRiX19eYF5fYGBeXl1dWVRVU1JVV1RSUlJUUlRT
-VVNVWFpbWFpbXlhZWFldWl1dXV5eYGJlYmZnZmVkZWNlZGJkZmZsbG1raGdpaGZl
-ZWVpaGRjYmVpZ2tmaGVlZWhnaGRgYmJhZWVjYmRlZmZiYmdmZmdoYmRoZmtoaGZo
-aGZiZ2praGhnamViaGtsaGhjZWRkZGVeXl9iYmZjZGZkY2VkZGJjY2JhYmBgZGVe
-YGFiYWJiZWFjYmFjZGVjYWFlYmBfYWZnY2ZlZWhmY2VkYWRmZWZpZGJiYWRkYWNm
-ZWRjY2ZoZmRkYmRkY2RmZ2VjY2ZqamZma2dmaGhlZ2dlZ2hoaGhpZ2ZoaWdmZmVl
-ZWRkZWJjZGBgY2RnZmNiZWVoZWZiY2NmY2VjZWRlY2JkYmRmaWVjYV9gZWJgY2Nm
-Y2BiZGFgY2NjY19dXl9dX15dYF9iX1xdYWBcXVxcYFxcW15bXl9gYF9eXl1eXl9c
-XVxcXF1dX19dXmNcXVxdXVpaWVpbWltbYGFeX19bXVxcXl5eYFtZWlxcXl1aXF5h
-YWBfYFxbX11cXV1eYF1cW1pfYF5cYFxeX2BgX2JgXl5dYF5hZHZqY2JgX2BgYFxX
-XF5eXl5eYGBfYWBgX2FgX15jYF5cXGFfYF5hX19dXlxcXV5gYGBgYmNhX19gX19e
-Xl9eYWNeXmBfX2NhYV5bXFxeYF5dYGBdXGBiYl9hYWFjY2JhX2JeX2FhYF9iY2Jj
-YmFiYF9hX15gYGBjYmJjXl9eYWJiY2JhXmBiZGdkYmZpZWRiY2NmZWNiYGFkYV1h
-Y2JgYmVlYmNjY2JhYGFmZGJhX2FhYWFgYWBeX2RiY2JhXmJlYGBiY2NlZ2dhY2Jk
-aGVkZGRjZmBjZGRmaGVjY2JhYGJkZWZkY2VjZGNgXVxeZGVkYmJmZ2NmZmhoaWpn
-Z2lpampqaWltaWpsamlrbWloZ2lqaGttbGtqaWZrbmxsbnFybGpqbWtubGltb29v
-bm1pa2xsbWxuaWprbG6EvtTf5+zy9Pb4+Pr7fXx8fHp5fX56e3h7fHt6en5/fH2A
-fn18fn59d3p/fH19fH99en5/fX16eXx+e3l+fX1+fH59fn97e3x+gH96eHt6fYGC
-gYJ8f3x+f4B+fn6BgH59e39+e3p8fXx/e3x+fHyBgIKAfHx/fnt7gIF/fX1/f4J8
-fHx+fX99e3l7eHd3d3d3d3V4c3F2dXd2dXVzdXNwc3Vyb3R4dXV0cnN1c3V3eHd6
-eHl4d3d3dnd3eXt4dXd3dXV0c3Zzcnd1dnd0c3NxbnBwc3NwcnN1c3FzcnFvcHBt
-b21ubXBxcXNybWttbG1vb3NycXRxcnBvcHFxbm5xcHBycnJzdnRxcHJ0cHBwbG5w
-b21rbm5vdHl2cXJ0b25vb25ycG1ubm5ucnNxbm1tbmxtbGlrcnRwbm5taG1sa2tu
-a2xpaGhoaWhnZ2xramhnaWlqa2lmaGhmaWlmZmVnZWdmaWhoamdoZ2tqZ2VmZ2hr
-aWpqaWtsamppZ2hkZ2Zoa2xsbG1ubmxqaWptamloZWVnaGhnZ2dnZWRkY2FiX19f
-XFteXlxZVlZXV1ZTVFVTVVVTV1NUV1ZVVVJTVVZXWVdaXFtZWF5cWl1eXF5gYGFl
-ZGVjZGNkZWVlZmZoaWlqZ2RnZWVmaWdmZmhpbGZoZmdoaGdkZGhpZ2dpamdiY2dj
-YmVmZ2loZ2dnZWZlZWVlZGNnaG9saWVnZmhqZmdnZ2loaGhmZGRlZGZmZmloZmZj
-YV9hY2ZlY2RlY2JkZWBiZmNkZWRlZ2dhYGFiY2BhZGNkYF9iY2RgX19kY15gaGZp
-Z2NhY2RlZGhtYmNlamlpZGZoZmVjZmpraGZoaGVmaGZoZmRlY2FhYWJkYmFjZmhr
-aWhoZ2dnZWZlZmdqaGdmZmZlaGhmZWRlZGZnZWJlZGhmZGNjZGRlZWJiZWZjYmVm
-Z2ZmZWpmZWVkZGZnZ2tjZGZiX1xjZGNjYWZiZGVjYWBgX2BgXV9iX19fYF1eYmNj
-Y2BfX19fXV5dXl5aXF5eXV9gYGJbXl1cX15fXFtbXV1dXVhbW1pbWltbWFxaWFhb
-X11cXltcXFpaW1haX2JfXlxdWV5cXl5eX19fXVtdXV9hXl5eYWBeXF1dXl5dXmFj
-YGFiZF9hX1xfX19mb2FhY2RiYF9dXl5hX1xfX2BhYmBdXmFjZWFgYV5dX2FeYGFh
-X15eXl9eX11fXmFmYGFfXl9gXl5gYV9eXl1hXl5dYWBfYGJgX11eXV5gYV9cX1xg
-X15fYmRlY2JhYWFgYmJjYmFhaGNkZ2hpZGRhYGFfXGFgXl5gZGRiYWFhY2FfYWVi
-YF9hY2ZmZmdnZWhlZWFhZWNjZmJfYWFjYmJiYmFgYWJjZWhiYmVlYmJjZGRiY2Nj
-YWFnZ2VkZmNhXGJiYGFjZGhnZGJiZGVkY19iY2FhY2NkZGFjY2RmaGloZWVnZmVi
-ZGVjYmRiX19eX15gYWVnaGRmZmhpZmZiYmNnZ2lnZ2hraWpramtqbGpoaG9pam1q
-a2psa2xtbWttbW9xcG1ubXJuamxvbHBtbm9ramxqbGppZ2dqb4a+09/p7vDz9vj4
-+vp6fHt7fHR9e3p6eHl/fX9+fXt6fn16e3x8e3t+e32Af3t7en2Ag396fHp9f315
-eXx7e3x8fn5+fX5+e3p6e3p9e3t9gH18gICBgYB+fX5+f356fnl8fYB+e3t/gICA
-gICBf3x+goB9f4B/gH9+fH5+f3p+gIF/e3p/f317enl5d3h7e3l4eHd2dnV0cnN1
-dHN1dHZ1c3NxcXVzc3ZydHZ1dHV2eHd2d3h2enl3e3t6dHR0dXd1dXR1dHRydXZ2
-dXVzdHNwcHBwc3FxcnZ3dnZzbnBvbm1ucnFwa25yc3VzcnJxbmltc3NzdXVycHBt
-bnRxcXBzc3NycnJ2dXZ0dHFvcXN1cGtsbG9wbW9wcXV1cW9vb21tbnFxc3Jwb3Jw
-b29wcW5tbW1tbW1rbG1tamhqa29ybm9vb2toZ2hnZ2lpaWdpaWlpam1tbGtoaGdq
-aWhnaWhpZmZpamZpaWpraGhpZ2lraGpsa2pnZ2ppbGhpaWtnaWpqbG5sa2xsampo
-aGlramhnaGVoamlkY2RlYmJiYmNgX1tYWFdaWVdWVlZXVlVXVlJTVFNRVFNTVlZX
-V1VXWFpcXFhYWFtVV11cXVxfXlteY2RgX2BiYmRlZWRkaWpnZGRlZmdjY2RjZ2dp
-Z2ZqaGlpaGdoZmlmZ2ZnZ2ZlZmVkaGVnZGVmaGhnaGZlZGZmZGRiZGhsaWlramdp
-aGhpaWhoamloa2hoZ2lkYmRjZWVkY2NhYmNlZGVjZ2VkZWNkYWFfYmRlZ2dmY2Be
-YGVhZGdlZWdlYGBfYGRkYmNkYWFjZWhkZ2JgYGNjZGVjZWdnZ2dnamlnZmhraGRk
-ZWZjZmNlY2VmaGdiZGhmZWVoaWRmZmlnZmZoZ2ZmZWZlZ2VlZ2RmZ2dlZmhlZGRi
-Y2RnZmNlZmZlZmJjZGVkZGVjZWRiY2VlZWRmaGhnZmNjYWBiZGZmZGZmZWNlY2Nk
-YmBhZGJhYGBhYWNiYGBfXl9fX15eXVxdXl9iYWNeXl9hX11eX19gYF9eXV5cW1pc
-XVpaXWBfXFtdWltcW1pZWldYW1xaW1xZXV1aW1xaW1pZX1xbXl5dWltdXVtdXFxc
-XVtfXl1dYWRjYV9eXl5cW11fXl9eXmBgYWJfZWNhYWFkY15bW1pgY19dXl9gZGBg
-XF9hY2JgX15hYWFiX2BhYWNjYGJfX2BeXV1iZGFfYGBgXWBhYF5eXl9gYV9gXmBf
-XF9gXl5fX15eYF9fXl9hYmBgYF9jYl9eX2FhZWRgY2RkZGFfZGNhYmJjY2djY2Nk
-YmZlYV5gX11eYGNjY2RiYWNiZWtnZGNiZGNjZGVoZWZmY2RgX15hY2FfYmVhYGFi
-Y2ZiYmBgYmJjZGdmZGVmZ2JgYWBiY2JhYGNgXmFkZWJjYGBhYmJiYmJkZ2RjaWNk
-aGNiZGFgX2BgX2NhYWVmZWhqa2hnY2JkZGVjYmJjYV9gYWFlZmdnZmlmZWVjY2Ji
-Y2VnZmdnZmhpaWtqaGlobGpqamxpbG5ubGpsbmtoaWtpaWxscG1pa2psbG9tbm1r
-bGtra2tqZ2VpbWttjsDU3ubs8fP19/j5+X55dnp8f359fn56eXx6fH58fn5+e3p6
-e3p6fHx9e317cnh6e3x5eHt3dXh5dnd4enx7e3x6enp+gXx8eXl7fX19fYeBfoGF
-gYCBf4CAfXt5d3x7fH1/f3x8e318fYOFhISCgIN/gX5/gICBf4KCgH57d3x+gX6B
-gX17fHp6eHp6enp6eXZ2d3V1d3ZycW9wcnNycXBvc3RxcXJ0dXR2d3VycnV5dnZ6
-eHl3eXt5eHVzdHV1dXd3dXRzeHRzc3V1dHV0cnJxb3JycHFwcXJxcHFvb29vbnBx
-dHVxb29ycnR1dXZ0cnBwb3Jxcm9ucG9vbnBycHJ0dHN0c3FzcXNxcXFvcnN2cm1q
-cHBsbW9wcnBxbWxvdHVxcnNzdHJ1cnFubm9ta21vb29uam1rbG5ubGxtbW5vb3Bt
-amhqa2toZmdlaGZlZGJmam5paGlqamhoZ2ZlZmVnZ2hnZWVoZWVoamdpa2lpaWlr
-bWtpZmtnaGdpbm1tbGppamlqbGtpbG1rampqa2ppaGlqaGdiY2JjYV9hX19eW1lY
-WldZVlNWVFVYVVNPT1RSUlZUVlZVWFZXVFRUVVlYV1ZYXmRgW1tbW1teW11fYWRh
-YmRlY2NiZGZnbGtlZWhta2ZmZWJmZ2VnZGVlamdmZ2VlZ2tpZmVmZWVnZmVkZWVk
-Z2dmZWVpZWNlZ2pqaWNnaGpqaGloZmhmaG1nZWhnaGdlZ2lnaGhmZmVnZWRmZ2Rk
-Y2JjYWZlZWZiYmFgY2JpZ2diYmJjY2JhYmRjZWNiXmJkYl9gZGNkYmNiY2JkY2Nk
-ZGNmZGFiZGNlZ2VpaGluaWdpamdkam5paWhlZGRmZ2llZmZnZmhpaGtqamhqaGdo
-aGhmaGdnZ2hnZ2lnaGdnZ2lqZGZmY2dkY2NlZ2JiYmhoYmNlZ2hmYWNkZmdjZGVk
-ZGNlZGVlZ2dqamRlZmhhY2ZnYmJjYWJjYWZhZGRiYWJiY2NiZGFgYmJkZWBdXVpc
-Xl9cX19fYWBiX11cXl9dXV1dYF5dWlhcXl9dXVtdXltaXFtZWVlZW1tbWl1ZWl1c
-W1xgW1lbWltcWlpWWFpbWlxdX2FhYV1bXV9cXGFdX19fXl5eYGFcW1xcX2FhYV9f
-YGJjYmJkY2FiYV5fYmBfYWFfYmBiYWBfYmNiY2BhYWJiYF9fX19fYGJkZGNgX19f
-XmFjYV9gXl5cXWNiXl5dX2BdX2BeXV1gX11eXl1dXV1cXl9hX2BgYWBlY2JmZGNg
-YGBcYGFgYWFhY2JiZGJiY2JhYWFiYWBjZmRiY2NiYmVlZWNhZGNjZGRkYmJiYmBf
-XmFgYWFfY2diXl9hZWVhYWFgY2ZjX2JhYGBhYGFfYmJiZGZjZGJjYmFkZWFjZWRj
-X19eYGJhY2RjZGVhYmNiYGFkYWBgYWZlYmRlZGJjYWBgX2FhYWJjZmdlZWhkZWNf
-YmVjY2VoZ2ZlZ2ZnZ2hnZmdoaGloZmRnZGRhZGdqZWhoa2ppbGloaWtubGpsb21t
-a2tqaGpta25sbG1vbGxsbGxrbG5sb2xsbG1samhrbGpqb26KvtTe5+zw8/b3+Pn5
-foGDgH5+fHp7e3x7eHl6fHp4eXp8e3h3eHp7e3x7d3h7enZ3eHl6eXh4e3x7ent4
-fHp6enp5e31/gH5+fn1/fX5+e3x/foGAgH59fXt8ent7fH5+f4B/gn18fYCCg4N/
-gIF/gIB+gIKDf4GCgIOCfn17fH5/gH59fHx8e3x7en18enl5d3d4dXV0dXJzcHFx
-c3Nyb3FycnJxcnV6enp5dXZzdHZ4d3d4fHl6eXl4d3R0d3NzcnB0dXN2dnNzc3Jz
-cXF0dHJycnNycG9tc3R1c3FwcHB0b29xcm9xcXJyc3R0dHZ1dG9xcnFwdnFxc3Bx
-cHFyb3BwcnJ1c3BvbXBtb3JvcnF0b3Rzbm9ub3Fxb25xb3FucnV3dXNzcnZ0cnJu
-bW1ucXFxcHBzbW9ta3Bua2xubm1qa2xubWxubGtjY2hmZmZnaGhpaW1qa2lpZ2ho
-amhoaGhpZmNkZWVlZ2hna2lqaGhoaGlqbGlnZmlmamxta2prbGtta2doamlqamtt
-a2pra2lsaWpnZmVkY2JhX2BiXl5eXVpYWFdVVFRUVlVXVVVVVVVUV1VSVVVUUlNW
-WVZaV1lVWFxgXllbXV1dWVpZYGVjYWVlZGRiY2NlamZnZmdnZmZoaWVkY2NiZGRl
-Z2ppaGdmZmdnZGRnZmVnaGlpaWVnZGVlZ2NkY2NnY2ZkZGdpaWRoZWlnaWZmaG9t
-ZmdnZGRhY2ZoaGZkZWZkZWNjY2dkZ2NiY2FlaWhkYF9eY2dhYmZkZmdkYmRgYGRi
-YmNkY2BfYmBgY2FiZ2NiZGVkZWNiZGdmZmZkZWJmZWZlYmRoaGVnZ2lqZmVoa2hp
-aGpnZmdnZ2dlZGdpamdpamtrZ2RjaGlpaGlnZ2ZoZWhnZmhpaGdnaGVlY2RmZmdm
-ZGRkZmVkaWtiYWNlZGRmY2VmYmJhZWZkZWVmZmZoZGNlZGRjYV1gY2ZlY2RjY2Ff
-YmRgX2JgXl1cX2FhYmNgYmBiX15gX19dX2BeXV1dX19eXl1cXF1fXV5aXV5cXVtd
-XlxbXF1cXF1bW1dZWVlaXV1aWltZW1lYWFpaXF1aW1paWlpYWVxdX2FgYV5eXV1b
-XV9iYGFfYF9cW19eXFpcXV9hY2RjYGBlY2RiY2FfYWJkYmBfYGJlZF9fX2BkZmVd
-XF9fX2FeXV5fX2FgYGJfX2BgYWJfX2BiYWJhYmJgY2JfYF1cWl1gXV9eXVtaXV9e
-XF9eYGFdXl1dYGBhYWBgYGFhYV9hX2BgYWBgYGBfXmJmZGRkZWNlZ2NgYGJiZWRl
-Y2VjY2RhX2JjZGNjY2VjYGNiXmFiYWNjYGFiYmRjYl9fX19hYGJjZWNiYWNgYmFf
-YV5fYWBgYmFkZWNjYV9fX2JjYmFhY2FfYmFkYWRhX2JiYmFfYGNkZGRiZGRhYmNm
-ZmZlYmFeX2BiYWJfYGNkZGhmZWVlZWRiZm5oZ2VmZmZpaWhmZmZmZ2lnaGloaGdq
-Z2hjZWNkaGloZ2NmZmdpaWlra2xqbG5vcHFvb2xsa2xubG1ubW9ubWppaGhqa25t
-bmpsaWhrbG1vcIi/0+Dm7PHy9Pf3+fl8foN+gX98ent9gn96en6AfXl7eXl4enx4
-eH17e3h1dHd4eXt6end7e3h3eXl9f4J+fXt6e3x9fHt/fn17fHx+gH58fnt7fX5/
-fn5/e3x9f4GEf36AfXx/goGAgoKAgYB/gYGBgIGEgYKAgIGBgoF+fX58fX19fH98
-fH5+fHt9e3l7fXl4eHZ2cnBxcXBxcXFxdXVxcnBucHF0dXd1d3h5eXh0c3V2eHl5
-e3Z2eHV2eXVxdXR1d3V0dHR1dXJxcXBxcHFzc3N1cXJzcnJvcHJyc3JxcHJzcm5y
-cHBxc3Jxc3VzcHFzcHFwbm9vcHRzb3FxcnBwcXFwcHFxcG9scG1ucnBucW9ycXJw
-cHB0cnFxc3BycHBwcW9vcW9xcG5ub29vcXV0cnFydHRwbGtqa2tub3FraWtpbWxq
-amtsa2xtaGtqa2Zoamtqamxsamlqa2pqa2hoZmhpZmlnZGVoaWdpaGZmamlpamlp
-Z2hpaWpqaWlsa2tqbGlqaWdnaWltbG1samxrbGtoZ2hlZGZkYGFgYGNfYF5hXVZV
-VFNXVlNVUlZWVlRTVVVXVFJTVVdWU1VWVlZWV1hbWVxdW1hZWV1aW19fYF9kZWlm
-ZmNiZGxqZ2ZlZ2lnZ2dmaWdkZWViZmRlaWpoZ2dmYmJkY2dmZWZjZmdpaGdmZWVk
-ZmZmZGZnaWVoZ2ZpaGVoaGdnZGhoamxpbGloZ2pqamdoZ2dkY2NiZGdlZGRjYWJi
-YWNkY2VlZ2FeXmRjZWNhZWJcYmNmZGRnZWNjYmNkYmFjZmVlYmRnZmZnZmNkYWNk
-Y2NlZmRjYGVkZmRkZWZmZmdoamlmZ2VoaWZmaWpnZGRmZ2pkY2dpampnZmpmZmhq
-aGZqaGVmZ2hqa2hlZmRlZGVjYmJlZGFfX2FkY2JlYWFfYWJnZ2RkZmJkZGRkZmZk
-ZWVraGVkYWNjZmVlYWVlZWZjYGFhY2RgYWJgXWBgX19dXV1hYmJfX2BgX19hXmBg
-YFtfXV5hX15dXl9dW15dXVxbW1xbXFpXWltbWlteXlpaXFxaWVtaWVxZWldWWFla
-WF9aV1VZWlxeXV1cWl9dYV5eX11cXF1eX15cYGBgX11cXl9eXWBhYmJhYWJjYGFh
-ZGRkZmNjY2NiYWBeYmFiX15eY2FhY2BhYF5hYWFeXV9hYV9fYWJiYGBcXF9gX2Bi
-YGFgX19fYF5bXV9gYF5fYGBgX11dXF9jY2JgYmJiYWFeXl9hX1xfX2JgYWFhXWBg
-X11eX2BfYmNhXl9gZGVlZmdhYWJgYmBfYF5fYWJjYWViY2VlYmNkZGJhYV9hY2Bg
-YGRkZGNhYF5fYWBkY2RkY2FgYmVgX15eYmFhYmNhYGFjZmJiZGRiYWNlY2NjYWNm
-YWBkY2VkYGFiYGNiYWFkZmRlZGNjY2RkY2FfX2NiY2ZlZWRjZGNkZGNhY2RkZWJk
-ZWRmZ2hnZGVjZmhmYmVoaGdmZWdkZWZmZmRnamlnaGlnZ2hnamhqa29wb2xtbWts
-cG9va2tua25ram1rbW1ucGpnbG1raWhsb2xsbm1vbW1th8DU3ujt8PP09vj4+X19
-f3x/fnx8fX+FgIF9e3x+e3t7enh5fH56enx6enl4eXt9f4B+fnx/fnx9e31+f3t8
-fnt9e3p7fHx8gH96eH5+f4B/fn1+fX1/gIN+e3+FhIaDgH59fn6AgH19f3+BfYGA
-gYGBgH+AgYKAgYJ+gH97f3t7enp7e317fH56e3p5fHp5e3h2dnp2c3JycnNxbm1x
-c3RxcHJyc3V3dXN0eHd3dnd0d3h4eHx5d3h3dnh2d3Z1dXRycHByc3R0c3Rzc3Jy
-c3J2cnBycXFzcW1vcHJzdXRzcnNzcHBvb3JwcHJycHFyb3Bwbmxub29vb3JxcXBu
-a3Byb3Fvb3FwcG1sbW9ucHN2dXJxcXZ0cnFzcXJ0dnV2eXFvcW5vbG1tb29ycHNy
-cG9zdHJvcm9sa2prb3NubW5sa21ubmttbG1qaGlramhnampraW1rampqampsbW5u
-bGpmZWRnaWdmZmdpamtpZ2hoZ2VkZmhsbGppaWttamxpaGlqaWlrampqa2tsbWtp
-bWxta2dmZmVmZ2hjZGJeX2BcWVpeXVhXV1dUU1BRUVFTVllZV1JUVVRTUlVTU1hV
-VldaW1laXVxeWlhbWF1eWlldXmBmZWZlaGJkaWpraG5uZWZoZmFkZWZnaWdnZmVn
-aGZmZmNhZGNjY2NjYmVlY2VnZmRjY2FoZGBiY2ZnaWVkaGlnaGdnZmhnaWhpZmVn
-bGlna2traWhnZGNjZmdmZWhoZWJgYGRlZWVkZWNkYV9iYmRkYmFjYmNjaGRiZGJi
-Y2JkZmZkZmRkZ2RjZGVnZ2NlY2JkZGNjZWdmZGRjZWViYmNjY2JjY2VnaGhpZ2Zn
-aWdpamloaWRnZ2lrZ2dlZ2xqaWhoZmZlZmhmYmRnZWVlZWZkZGZlZmZlaGVjZGJh
-Y2VlaGVkZGJgYWNlaGdlZWJjZGRjZGNjY2VjYmRiY2VlZGFiZGRiY2NhYWJkZWVi
-YGJhX2BjYl9eXl5fX15hYGRkZmNmZWRiYl5eXl1fYGFgXl5dWltcXVxbWlxaWFpc
-X11bW1pdXl5eXVxaWFpZWVtaWldXWVlaWFdYWVlaXF1fX15gW1tdYF9dXF5gYF9f
-YWJeXl1dXlxeYGBiY2BdX15dXV9hYWFhY2NlY2NgYGJjYWNkYF1bXVpfYGNkY2Fi
-X2FhYWFlYF9eYF5gY15fX19iXl1fYWJkZGFfYV5gX2BfYmFhYV9gYGJiYWFjYmRj
-YGFhYGFgX19eYGFiYV9gYWFgY2JhYGJiYV9eYGFiYF9iYWFfYWNiYmVjZWJfXl1e
-YGJhZmVhYmJiYWJjYWBhYmJjYmBkYWNiYWJjX2BhYWRkZGVjYmFiYmJiYWBfZGFh
-ZWRkZ2RkZGNfYWRjYWFiYmJgYGBhYGNiX11hY2BgYmJjYWRiX2BlY2JjZWVjY2Jh
-YF9gYV9iYmVhYGJjYmBjZGViY2RlZmZkYmFmZGRmZWhmZ2ZpZWVlZ2dnZWVlZmRo
-Z2psa2lsaWpramppamxrbGxsbm1qamtsbW5tbWttbWppaWtrbGlpb3BtbGlra2pq
-b2xsaGtwbG2MwNTe5+zw8vT19vj3fn9+gn5/fn19fX2Gfn9+fn5+fXt6eXx8f355
-eXx8d3h7fX16d3t9e36DgHx/gHt6eXt9fn57eHh7fX58fXt5fXx9ent8fX18fn+B
-fYF7fYCAfYGCfX5+fnx9fXx+fX5+gYB8gIGBgX9/goJ/gIB/gIGBgH59fH19fX59
-fnp3eHh4eHh6eHd2dXhwcXJ0dXN0cHJzcG5wb25wcHFyc3N1dnh6eHV1dnd3eXh3
-dnZ2dHR0dnh1dnFzc3R2cnVyc3Nyc3NzdHJzcHNycHBzcXFwcnRycHBwcHBwbnBv
-bG5wcXBxcXFxcXBxcG9vbW9ub3F0cHJ0dHRxcHBvcG9ub3NvcXJxbXJub3BydHV1
-cnN2d3ZzcnRycXJwb29zcnJxcnJxb25vcXJzcnpycXBwbW5wcXdzbG1sbmxsa2tq
-aGlra2lpZWdsa2hnaGtpamhqa2hqbW5saWRjZ2lnaWlraWdpaWpqa2lnZGZra2ts
-bm1ram5tbW1taWZnaGtqbGtqaGtsaWpqamppaWpqaWhkZmVkYGFeXltbWlhbW1ZW
-VVZYV1VXUlVUVVZWVFdXUlNSUVRVWFlYWVldX15cW11gXVpcXVtdYGRhYmVkY2Nj
-ZWZjZGZoaWpqZmZlY2RmZmdpZ2VlZmVmaGZnYmlkZWVlZGdoaWVnY2NlZmNiYGRl
-ZmJiZmdnZGdrbG1qaGdpZmhna2ZlZmZiZWZmZ2hsZmVpaGVnZWdoZWZjYGJjZGdj
-YmJiY2RkZmNlYmNkY2RjZWZkZ2NjY2VjYWVmZmVjY2RkYF9iYmRkYmNiYWRkYmZk
-ZGJiY2RmZWJkYWRiY2NiZmRmZ2lpaWlsaWVlZ2ZmZ2hoZmZkZ2hoamxpaWlmZmRl
-ZmRmaGZoZmhnZ2ZmZmdnZmVmZWBjZGJiZWRmZmVkYV5fYmVmY2RmZWRkY2NjZWNl
-ZWJjY2VjYGJhYmFmZGVjZGNjYmJjYWBfYWFdXFxgYF5fYGJeYWBiZGNhY2VlYmFf
-Xl5dXV5hYWBhXVtdWlxcXlpdWldYW1xdXl9dWltcXV9eXltaWVhcW1pbXFpZWlta
-WlpbWFxbW15eXF1eW1tcXV5cW1xbXl5iYFxdXl5cXl1bYWBeYGFfX2BdYmFgX19f
-YF1fYWJgY2FeXV1bYWReXVxgYWNlYl5fX19eYGNfXmFiXl1hX19dX11fYWFhX2Fi
-ZmRiYWFfX2BiYmJiZF9gX2FiY2FhYl5gX19hYWFiYWJeX19hY2RjY2RiY2BfYWNj
-X2BfYmNiYWJiYF5eXl5hY2RjZWNfYWJkYmNgZGNlYGBhYmFiZGNiY2NgYV5fYGNi
-YGFgYWRlZ2ZjYV9fX2FkZ2diYGBeYWRlZGZkZGJhYWFhYWNhYGNhXV9eY2RjYGBf
-YGBjZGJjZGViY2FgYmFiZWZnZGZfYGNdYGNgYmBiY2ViY2JjaGdkZGNkZmdoZWRj
-YmJkZGJjZGRiY2dmZGdoa2xqaWVkZmZoZ2dpZWVnamhnZ2hqam1tcGtraWdpZWdr
-amtrbWtta21ra2xtdnNzcGpoaGpsbXFwbWlnaGlwco/C097m7O/y8/X29veAf35+
-gH5/fHt7fH6CeXx+fH9+fHt9fXt6enx1ent/e319en18fXp9gICCgIJ9fXx8enl9
-fXt5fX16en1/fHp8e3h3e4F/fX5+fIGDfoJ/fn5+foCDgH6Bfnp9gH16fn9+gn9/
-gYCAgIKCg4F+gH9/g4CAfnx8e318f3x7e3l3enZ3d3h4dXR0dnR0dHFwc3R6dnJy
-dHRycG9zc3ZxcHV0d3VzdHN2d3V3eHh5eHR0c3N0dnd0dXN2dHZ1c3NzdXVycXJz
-dXNzdG9tcG5ycXJvcXBzcG9wb29wcnFwb3FydHNycnRxcXRzc3JycHNzcHBub3J0
-dHR0c3JvbGxub29wc3J2b29wcnZycXBycXJ0dnRwcG9ubW5wc3RzcHJyb29vcnJy
-cnJzdG9zcG9vcnBvbnFwbW1tbW1ubGlpamxubGttaGVoaWppaWtnaGhpamloZmdq
-bGhlaWtobGxqZ2dnaWlqaGloaGtqamloampnZmlramppaWdpamtrbWppamtramln
-aWdoaGdpZmdnZWNhX15cYGBaWllYWVlaV1hZV1RTUVRTVlhZXFZUU1ZVWFZXWFxc
-W1hcXF1eXF1cX1xaX2BkYmViYGVnZmdmZmRlZGdnaWhlY2JlZmRhY2VmaGlnYmRn
-ZGNqaGZmaGdoaGhoaGdkZGJhZGRiY2Npa2ZmZ2hqaGZoa2pnaWhsa2ppZmViZWZm
-Z2hnaGpmZWVkZGdkZGNjZmRoZ2VkZGJhYmNnZmVmZmRjZGNjYmFkY2JmaGhmZWRl
-YWFhZWRoZmNhYmNkZWZlYmFkZWdlaGZlYWRlZmVjY2VlZ2RmY2doZmVkZWVnaGdn
-ZGNlZmVmZGZoZ2NjZ2hpZ2ZpaGdlZmZnaWdpZ2huaWdlZWRkZWRlYmVlYV9fYWFl
-ZGNkYmNhY19hYmNnZWVkZmdkZWNla2tjZGRjZGVjY2BiZWRkY2BiYWNhYWFfYGJh
-ZWJjYGJkZGFgYWNgY2RiX2JiYV9eX1xfYF1eXV1eYV5gXl5dXVxcX19eW1haWlpZ
-W1tcXFxaXlxaXVtcWFZaWVpaXFhYXGRcW15dXVpZXWBgXmBeXlxdXV1bW2FjZWBc
-XV1bXFxcX2BiYV9hX2JhYmRgXV1eX2BcXmJhY2NiYmBcX2BiY2FfXWBfXmBfXmBe
-X2JfYGNhX2FiYmBfXV9eXmBfX2NgYGFlYmFfYF9gYWBhYWRjX2BiYmJlZF9fXl9d
-XWBhYWRlY2BgXmBgYmRjYmRkZGZjYWRlX2BgYmNjYWFfXV1eXV9iY2RiYmNlZWZi
-YWFjYV5eXV5eY2NjZmRiYWFhXFtgYGBhYGFiZGNjZGNfYGFhYWVqZmFgX19iY2Jj
-ZmRiZGRgYWFgX2JkYWNkY2JlYWFgYmVnZGJkYV9gYWFiYWFiYmBhYmJjY2VkYGJi
-Y2ZjYmRnZmViZGNhYmRlaGRgY2poZWljZmNiY2RiZGVjZWVnamdoaGlqaGllZGRq
-ZmNjZ2lrbWhpZ2Zrbm1ubWxrampwbW1vcG1tbGltbWxoaWlsbWtqbmhoZGdqa2xu
-aWloa2tvjMPU3eXr7/H09PT394F/fHx9fXx+e3t9fX98eHyBgHx7fn1+fHx9e314
-e3+AfX58fH14eXp8goGBgIB/e3t4d3t9foB/f4KBgH5+gHx7ent9gH98d3x+f4WC
-gHp6fH59foB+gYKDf3x+fX6AhYKCgYB/gYF/gYJ/gICBgIKDg4F+fHx9fX97en58
-ent3d3h2dXJycHJycnByc3N1d3V1dXR1eHZ0cnBydXNzdHZ0dnd3eHV1dXZ1dXZ2
-dnh1dXZ3d3d2eHZ3eXV2dHZzdXV1dnNydXRydHBwcW9vb25wb3J0cHF1d3d3dHNy
-c3J0cXF1dXJvb29wc3FxcnBycHFycnBvcXNzc29sb3JzcnR4cHJycHF1dXRwbW1x
-c3Nyc25pb25zcnNvcG5tb3Bubm9xdHRzcHBsb3Bwbm1wcW9wb3Bva2xvbW1qa2tt
-bW1ra2tpZWdnaGtubGtraWdmaWZnZ2pva2pqaGtqamhpaGhpaWxqbWxqaGZoZ2Zn
-Z2hnaWlnaGlnamppam1tbm1uaGloa2loa2ppZmdnZGZjY2FfXV1eXFtbW1lXV1hW
-VVZWVlVXU1hYWFhVWFpWVldYVlVVVldXV1lZV1lbXmFjYWBdXmFhYmhiYGJlZWZi
-Z2ZnZWRkaGhmZ2ZjZWtnZGNkZWRlZ2ZoZWNkaGlmaGVpZ2ZmZ2RjZGJkZWxqZWhs
-amlnZWxrZmloaWpra2lraWtpaGdlZ2doaGhoZmRlZmVnaGVmamhjY2FkZmVjZGFj
-Y2VlZWRjZmRiX19jYWJhYmJkZWVnZmdoZGRmZGVnZGNiY2ZiZGdjZGloZGNlY2Ni
-Y2NhYmVnZmZlZmlqa2lpZGFmZGZoZ2ZnZ2ZpaGhpZGRmZWdmaGdpaWtraWtqbGpq
-amdnZ2ZnZmNmZmVmZWVjYmJkY2VlY2FmYF9hY2hpaGNmYmZnZWZmZ2diZGRlZ2Nj
-ZWdkZWRhZmRiY2NfY2VjY2NjYmFgXlxdX19gYGJhXVteY2JiX19jYWFgYWNeY2Fg
-YFxbW1xeXVtbXGBiX2FiXl9dXV1fXVtcWltYWVpZWFhaWVxeVlNSVVhYWVlZXFtb
-Wl5aWVxbXV9fX1xaXFxbV1hbXV9fXl1dXWBhX11gYWJhYWNkXmFfYV9gYVxdYmNk
-YmFhYmFfYF5dXWBiZWViX19fWl9fYV5gYGJhY2RjYGJgX19hYmFiYmFgX2BfX2Fh
-YWJhYWBfYmJiYF9fYWFhYWNhX19gX2BeYWBjZGNiZGRkY2JeX2NjY2RkY2JjYmZj
-YGJhYmJgZGNgX2FfY2BiYWJkZGRmYGNlYmFeYV9gX11fY2RkY2VjZWJhYl9gY2Bg
-ZWRkYWFhX2FjY2JhYl1gYl9eYWJjZGRjZGVjYmFgYmViZmRhY2JjaGRhZGFkZGRj
-ZGNjYWRiXl5eYGFjYmBjZGVkYl9jZ2lnZGVpZGJjZmRkYmRkZWRjZGRkZGlmZmVj
-ZWFhYmRlZGJkZGRlaGZnaWlpZ2hnaWdoY2Rpa2trbWtwamlsamppa21ycm9pamlr
-bG1rbm1taWlpbnFvbWtrcG9oZmdqbW1tcG1raHCPwtPd5eru8fPy9PX2fH1/fHl7
-fXx+f319fHh5fHp8fXx9gH9+f36BgoB7e3x7e3t6eXt/fXx9fX17e3p6fX17e3t8
-f399f3+AfoCAfnt8fH58fYB+fH1+foCAfXt7fnt6g4iDgYF+fH5/gIGCgYGAgoKB
-f4GBgX6AgIB/gIKCf399fHt8gICAe359fHp3dXV1c29ycXBucHJ1dnRzcnNzc3R0
-dXR0dXR1dXd3eHZ1dHV0dHVzdXZ2dHNzc3NydHV1dnZ3eHd2dHRydHV0dnh2dXJz
-c3JydHJxcG5tcHFwcHBycnFzc3V0cXFxcm1vcnJvcnFvbm5wc3Jxbm9ucG9wb3Bw
-cXJycXBxcnN0cW9vb3BvbXBxcnNzb25ubm9wbm5vcnNycXJvb25wbnBxcXJzdnV2
-cm5ub25vcXFxcnFycG9tb3FvbWtsamtraGtoamhramlra2hoa2tuamdjZmdoaWtt
-bGhmamlrbGtqZ2tramtsbWxqZ2doaWprbWpkZmZoa2lramtqa2hpaWlra21saWZp
-aGhpZWRkY2ZkZGJdXFtdXV1ZWFhXVltYV1tXVVhUVlRSVFVVVldYV1VUV1daWFla
-W1pcXFxdXl9iYmBdYWNhZGRkZGdnZmlmY2JfYWFkZGZmZGVjZWRkZGRkZWZlZmdl
-ZWloZmVjZWNjZWZkZWRiY2FiZGZnaWluaWdmZmVnaWdpamtoa2hqamlobWxubGxq
-ZmRjYWVlZmhnZWVlZGRoZ2VlZGdpZmRiZWVlZWVjYWFiYWVmYWVlYmFiZWZkZGJm
-aGVlYmFhYmdmY2dmZ2NkZWdmZGFhYmNkZWdlY2VmaGhoamlqZ2dtaGdoZmdnZ2Vl
-Z2hpaGVnZWRraWlmaWtoaWpramdnZ2lmZ2ZmZ2dlZmRkZWZmZGVkYWBlZWlmZGZj
-ZGNiZWZmZGRlY2VjZGZlZGJjY2RjY19fY2FgYV9dYGNjYmJiYWBhYmFhX19dYGFi
-Yl9dW1xdX19hYWBhYl5gYWFhYGFiY2RgXF1eXFpbWlxeYGJdXmFdW11dX11cXVpd
-XVtbWVlZWFhdWlxdWldVWFlYV1dWW1xbW1xdW15dX2FgYF5eWldYXl5dYFxcYF5e
-Xl9kZGFiYGNjX2FiXFxfX11kY19eX19hYmBhYF9fYF5eYmVmaWZhXl9hYmFiYV9f
-YmRiYGBdYGBfYF9gXmBgYl5dYGFhYWJjZGFgYF5gYWFgYGFiYGFfXF9gX2BfXl5e
-X19jY2NhY2NjX2BeW11gYmJjYGRlZWVkY2ViYmRjZV9gY2ZiYmBgYmJiY2FgYWFh
-YGFgYGFjYmFiZGRiYWVkZWdlZWJhY2ZlY2JgYGFiXl1hYmViYF9kYF9eXmBjYmFk
-ZmNhY2FiZ2lhY2BhYmRiY2dkY2BgYmFiYmNnZWNgZmRgYGNiY2JiYWNfYWFiZGNj
-Y15fX2NhYGRmZmRjYmFjaGRjZGZnaGdnZ2ZmZ2ZmaGdlZGNmZmhoamtqaWdmZ2Zo
-ZmhpamtpamloaWdram1paW1qbXBqaWpubGtramlvcXJybGxsbGpub29qaGlqbXFu
-bW5qbZfC1d3m6+7x8u309vZ8fX+Bf316fX59fn18enl6e3t6fHx8eoF/goGBgHt9
-fXx7e3yCf3x9f4B9ent8fX6Af358e359fHp7foCAgIOBf39+f39/gIB9fX9/fH1/
-fXp/gH9+gICBgICCgYOCgH5+foB/e35/fYB+gIKDfn2Af318foF7e3yAfn5/fHt8
-ent7d3d3dnNwc3t2cnNzc3Fyc3V1cnFzdHZ1dHd6e3h3eHdzc3V2eHd1d3Z2dXVx
-dHJyc3Vzc3R0dXN1cXNyc3NzdHV0dHJzcnByc3NycnF0dnJvcHFwcHJ1d3d3cXBz
-c3FwcXBydHFycnJ0cXN0cXFwc3VxcXJycnV0dXRzc3Bwb3FxcnFvcHBvcnRxbW1v
-bnB0dnFzcXFubm9wcXFvcHF0d3Nzd3hydHNvcXJzc3Fxb2xsa3Btbm5tamtpaGlq
-aWhmaWxraGdnaGpram1pZGRoaGhoZ2dmamtsbWtsa2dmZ2lsaWtpa2xoZmRoamlo
-ampjZ2dobGhpa2xrbW9qa21tb2xqaWhqaGhnZGNiYmRgYF5bXF1cWVlYWFhaWFhY
-V1pYWllXV1dUVllcW2RaVlZWV1dZWlhZWFlcXFxdXGFjXlxfYWFjZ2dlY2JmZmZn
-ZmhnZ2dpZmZjY2VkYWRoZmVoZ2JhZGppaGVkZ2loZGVmZWVnZmNhY2RlZmZoZ2to
-ZGVoaGZpaGdoaWlmZmdmZmxta2tpaGhlZWhoZmVnZWdpaGZkZGZpZmRkZGNnaGZh
-Ymp6ZGFfX2JlY2NnY2VmY2JmZGdnZGVlZGdnZmRiZWhkYmNkZWBiYmJjYmFjZWVp
-aGhnaGdjZ2ZkZ2hnZWVnZmdqaGRpY2JmaGhpZmNnZ2dnaWdnaWpqZ2ppZmdmZ2Vm
-Z2VnZ2hpZmViZGdpZWdqamZnaGZjYmZjZGRiZ2RmY2ZjZGVkYmNjZWNlZGNgX2Bf
-XmFhYl9gX2FhY2BgYGBhYWFhY2NlZWNiX1xdXmJhYWBgYl9dYl9gYFxjZmJhYV9f
-YGBeXVxbWFlcW19eXmBdXF1cW11eWlxbXVtaVVlZWFldXFxaWVlaWVhZW15dW1xb
-XF9eXFxdX2BhX11aWl5gXl9cW1teX1ldYGFhX19iYGBeYGBeXFlaWlteXl5fXVxe
-YF5hY2FiYmRgYWVkYmNiYmFfYl9fYWFeYl9eYmNhYWBfXV9hXVxdX2BgYF9dXV5e
-YGFeXF1gZGRjYmBgX2BhYGBiYGBiY2BeW1tdYF9dYGFkYWFlYmBgYWFhYWJiZWNk
-ZmZlZGViYWFfY2NiYGBhYmFhZWVhX2FgYmJjZWVnYWNiY2RiYWNeYmNiY2RkZWNi
-X2BhYmJiYGBiZmJiY19fZWVlX2BhYmJpamdlYmFjYmBgYWFiYWRmZ2NjY2JkY2Ji
-Y2NhY2FkZGBhYmJhY2FhYGBkZGRiZGVkZGJhYWJnZWVjZmNgYGFgYGJlZWRkZWZk
-ZmZnZ2hlY2VkZGRlaG5qaGtlY2RmZmhpZmloZ2dmZ2loaGhmaWloaW5tbWxsaWpq
-a2ttbW9vbWhpbHBram1sbmpra2xra2lwbHJxmMDT3eXr7u/y8/X19Xx9gIF/f399
-fnt6fHx7ent6e3t7fHl6fHp7gHt9fnx7fHt+f318e31+fXx7e31+fX5+f4B/f35/
-fHx8fXp8fH1/fHt+fX59f319gYOBgH97fH18en2Bfnp/g4SEg4WEgn5+fIGDgn5+
-foGAgIF+fn58e3p8fHx+f319gH57enh7eXl5d3V3d3VydXRycHFxcHBxcHBwcnNx
-bnF1d3d2c3JwcXNxdnZ2eXl3eXd3dW9zc3JzdHJzcnJycnR5dHV2c3NycnFzdHFx
-dXRzcXFyeXZ2dnNwcnBxcHJxc3V1cXJyb3JxcHN1dHR1dXNzc3J0dHNxcHJ1cnJz
-dXZ4dHBvcHBwcHBvbXFzb29xc3JtcHFyc3RxcnFyc3RydnVzc3VydHZzdHZzc3Rx
-b25xcnJvcG9tbm1rZ2tsa2tvbGlramlpa2pnaGZoa2ZoaWlqaWhpa2psaGZoZ2Zq
-bW1qaGlpaG1rampqaGhnaGhna2xmZ2lpZ2lnaWlnaWdpamlsa2hqbG9wbm1samlo
-Z2hmYmNjYV9fX11cW1pdWVhXWFhWU1VWV1dWVlZTU1JTU1ZdWFhWWlhUV1dYWFlZ
-W1lcXlxjX11eYF5cYWBhYmJkY2NgZmdnaWdpZ2ZmZ2dnZWNkZ2hnaWVlZGZnZ2hm
-ZWVpampnZGJjZWRkZmNmZGlnaGdoam1oZWZlamVkampsaWZmZ2hta2hpaGpqaGhn
-Z2lnZ2dkaGhkZWdqZmZlY2ZiZWRjYl5gYF1bYWRkZWRkYmNlZWRjZmRkZmdlZmJl
-Y2ZmZ2hoZ2NlY2RlY2VkY2dnZWRmZ2draWhnaWVkY2ZmZmlraWZkZWdoaGZpaGZm
-ZmpsaWdkaGZmZGVnZmVlZ2ZnaGZmaWhnamtnZmlpaWVjYmRoZ2ZoamhlZmZkYWFj
-Y2JjZGNmZmVlZmRiYWJkZGZlZGJfX11YXmNgY2JfY2JgXmBfXV5hY2RkYmBfYGFi
-YV5gYWJiYmFgYmFgYF9gYWBgYmJgYF5cXV5dW11fXVtaXVtaXV5dYV1dYF9eXV1c
-XV1dXVteXVpbW1pcXV5bWFlZWVpcW15cXF1bW11eXVpaWlheXlxfXl1dXVpfYl1c
-YGBfYF5fYWBcX1tcYF5eXmBiYl5eYWFeX19gYWJiYGJiYmFiYWFhYWFfYmViXlxg
-YGJhYGJjYV5gX15cXF5cXlxcXFlZYGFhX11dW15hY2NiYF5cYGBeXl5gYGRkYV5d
-XmBhYl9jYmBgZWZjYGBgX2BgYWFfX15hZWNiYF9jZGFgYGBfX2BhZGFlY2JgYGRh
-YmFhYGFjZGRhYmFiYmRhZGJiYV9eZWRgYmJgYmNkYmNkZWVkY2FmYGBfXl9dXGBk
-ZGBiYV9eYmJjYGNkYmRjY2NkYmBgYGJeXmJkZmBeYWNgYmNgYWFhZWdlZF5eY2Rk
-YmJkXmVlZGVjY2JiY2NjY2NkZWNjZ2VlZWRjZWdnZ2ZnZmhlZmZnZ2hkZmZna2pr
-ZmhnaWhpaGdoaGtpamdpbW1ta21rbWxvbGttbG1vdG5sbGxsbW5samlqa2xqbWpv
-cHKZvtPc5ert7/Hx8/X1f36BgYF/f399e3x+fXt8e3l4fn54d3h8fH59gHl6fn2D
-gH58e3h7fX99fHx6fXx8fX59f4CCgX96eH1+fn56fnx8enp+f3x8hX9/fX19fYGC
-e31/fn5/f4CEgoODgYCAgoKBgISBgIB+gH9+fYF/fn5/fn14fH58fH5+fnx7ent5
-d3h3dnd4dnV0cXBvcXJvb3BvbW1vb3FucXd0cW9xdHV2dnJzd3Z1dXV1c3N5eXV2
-d3d1dHR2dXFwc3N0cHBwcXFzc3VzcHJzd3V0dXR1dXV1dnNwcXNycHJzc3d1cG9v
-cHJxdXBvcHJydHFwc3FydHFzcnJzcnFycW9wcHNzcXFxcm9ycXBwcXJydHRzc3Vy
-dHBycXF0d3h2dnh1d3Z0cndzcnFxcXNucXBxbXJvbnBvbW9wbGtoaWtpamprbWhq
-a2ptb2ppZ2dnaWhqa25ta2loaGloaWlqbGlnaWtra2xta2hoaWdnZmlqampmaGhp
-cG1rbW1sa2dqbWpqa21ra2xta2xsa2lqZmRmYmNhYWBdXVtbXVxaWFlYWFlVVFZZ
-VlVXWVdRVFBSUVBQU1NWWVZZWFZZWllcXltaXVxdX15fYmNhY2VkY2JlZGJjZ2hm
-aWZrZmVnaWhmZmZjYmRhYmlnZWdmZWRiY2NlZWVlZ2ZhY2NnaGNmZmVjZWRpaWdo
-Z2hna2hnaGpqZ2lnZ2dka2tnaWdkZ2dnZ2ZnZmhlZmhnZGRjZWRiYWJhYWFjYWJh
-X2ZpamtmZWNiYmBhYWJiZGZmbGhnZmZlZ2ZkZmdnZGdkZWdkZGNkYmNoZmJkYmJk
-ZmRqZmNjYmZlZmloZmVjZGRlYmNjY2NmZmRkaWhjZmVmZWZoaGRlaGdnZWZnaGdm
-aGdmZ2RmaWdnaGhmZ2VkZWZmZGRjY2RkY2JhY2NkZGVkZWNjY2RiYmVlYF9fYGdh
-X2NjYmNiZGRjZWdhYmJjY2FjY19fYmRkYV9gYWFjYmNjYmFgYWFhXlxgYGFiYF9e
-XmBhYWNgXlxaW1xaXl1eYF9cXWBfXlpZWlhYXV5dW1peX19eX15ZWVtdX11dWltY
-W1tcXV5bWFdXW15fW1xfXl5XWVtiYGBgX15fXl5jYmJgXV5bXFxeXmBhYF9fXl5f
-YGBfXVxaW2FiY2FkZmRjYmFfYF5iYF1iYWNkYWJfYlpdYGFgX15cXFxdXGBfX11e
-YV9fXV5fY2FjYWJfXmFhZGFgYWFhZGRiYl9fYGBgYmFhYWBfX2BiZWFhYWFeXWFj
-ZWJhX2FeYWFdX19fYGNgYWBeYWJiY2JjZWFjZGNiYWVkYl9fYmJiZWVhX2FgY2Vi
-Y2JkZmRhX2JkY2JlZWphXFxcYF5fYWFiYF9gXmBhYWFjZ2VkYmFiYWFiYWJjZGdm
-Y2JfYWNjZmZkYWRjX2RlZGVlZmZmZWZmZmZmYmJhYmJkY2ZnZWZkY2BiYmZlaWhn
-ZGJmZWlmZmhlZmVkZGRmZ2ZmZmdnZ2ZnZWZmaGhoZmZoZ2hpbGxsa2xsaGlna3Bu
-a2pucGxvb3Btbm5wcG51b25oaWpoZmlpcJ+90tzk6ezv8fLy9PSAfnt9foB+fX5/
-gIF/fX18gX1+fX1/f3x9goN8enx7enl5eXx/gn6BgYKCfnx8fXx+fn59fH1+fXx4
-eXuAf318fYGCfoB/fnx8fXt9gYGBgn+EgIR/foCAgICCgX6AgoCAg4SBgYKBgYGB
-fX18foKDgX9+fn5/fXx6fIN8eXp7eXp6enp4dHJycnJybnJwcXFwb29ycG9ucXZy
-b3BwdHN0dnl3dnVzc3NzdHV1eHd2d3RzeHZycXN0d3Zzc3JycnJ0c3B0dHNzdHV0
-dHRzc3Nzc3Z2dHJzcG90cnNyc3J0dHFxcnJvcHFzc3JxcHJzcHFsanV2dHR0c21s
-b29zc3N0cXJzdXVzc3N3dHN0d3VzcnJzdXRxcXJvcXFydHV4d3Z0c3NwcXJycXRx
-cXFvbm9tb3Buc3JtbGtsbG5sbGxpa2xtbG5saWlpaGdoam9raGhoZ2ZoZ2Zra2tp
-a2trbGtqaWxsamxraWZmY2doaGZpa29ubWxsamtsbGpram1tbm5raWtpaGppaGlm
-ZGVjY2FhYWBeXVlaWl5bV1dWWVpVVFVWV1NUVFZTVFRSVFZUV1JTVVhXWVpbXFlZ
-WFlZW1tcXl9fX2BfYGJoZ2hqaWVnaWdmaGZqbWdoZmdoZWRjZWhmZWRoaGhkZmdm
-YWNlZ2ZjY2VjZ2dpbGhnaGZoZ2Zna2dmaGVoa2tpaWpscGpsaWpmaGlraWRiYmNm
-aGdpZmVmaGVlZWJkZmRlY2NiY2JqYWBhYWNnZmRkYWFdYGJiZGNmaWxqamZoZmdk
-ZWdlZGloZmZpaWZmZ2pmZmRkZWZoZGNjZmZmZGRjYmVmZWViZmZmZmNiZGZnZWRn
-ZmZoZ2dnZ2hnaGlnaGpraGdpaGloaGdnaGdoZ2VlampoZGVlaWdlZ2ZjY2JmZWVj
-YmJjZGdoaGNjZGFgYWJhY2RmaGNfXV9gZGdkZWRgYGVkY2FiYGNfXWBeYWRkY2Zg
-XV5hYmFjY2NgYWNjYV9eX11eYGBiYV5fYGBgYGFeXVxgYF1gX11gXFtcW15fXVtX
-V1taWllVVlxfXlxbXFtdWllbXF1dXFlZWl1dXVxhZFxeYWRhYGBcX15hX11cX11e
-YGBgXl1gY2FjYWRjYWJgYWBnYmVhYWFgYGBfY2FfY19fZGVnZWNlZmVhYGFiXmBh
-Y2BhX19gYFxcX2NgX15fXl5eXmBfXl9hX2BhYmBfYWFhYV5gYV9gYmFfYWNhZGFf
-X2FkX19gYWRmY2FjZGJfX2BkYmFgYWNgZGVhYF9fXF9dW19iYWFhYV9fX2BjYWNk
-YWNjZGBgY2NmZGJiYWJiY2JlYV9gZmZjY2VlYmRhYV9eXV9gXl1dXF9hYmVlZWNk
-Y2NhYmBhYGRiYGBhYWJiYmJiYGJhZGZmY2ZhYmRkZGNgYmRkZGNjZmhraGVmZmZm
-YF9mZmNiY2hmZWVnZ2RjZmRkZGNkZ2ZlZ2RjZmRiZGVkYmJkYWVraGZnZ2ZnZWRk
-aGtpaGZnaWdna2hra25samppam1sbGxpanBxbm1tbW5vcG1rbm5ta2lpa2toaGp0
-osDT3OTq7O7w8vT09H59fn1+f35/f359e3x9e32AgoCBfX2Afn5/fn57fnp3dnd7
-fIGCf4B/fX18gX9+fn1+fXt7fXt7e3t9fH5+eHl9fn57fHx8fHx6eXl5f4SBf36A
-fn5/g4B+fX9+fX2CgoCBgX59f4CDgX1+fn5+f39/f3+Ag4B9enx/fHt5eHh5e3t5
-eHZ0cW9vcHFxcHBxdHRycnJub3Bwc3NxcnJxdXZ3dXl6dnB0c3Z5eHd1dXh1c3Z1
-dHNwdnN2c3V3dXp1dnV2c3N0dXR1dnR1dHV1cnR0dHJ0c3F1cXFzcnFyc3R3c3Jz
-cW9wbW5xbm5ycHBydHFxcHBycnFucHFycnR1cXNycnJ2c3J0cnNxcnR3dHRycHFw
-bnF2c3JvcnJ1dG9xdnR1dXRyc3N0dHRydnRwbm5tbW5xcm9sbW1tbm1tamprbG1r
-aWpramhqaWlpamxrZ2hpa2tqaWpsbGlpbGtsbGtsbWppaGlra2ttaWpqaWhsbm1t
-a2loaGlpbW9wbGpsbm9ta2xqaWdoamhnaGZjYWRjYGBaWFZZWlpaWVVTVFdWVFRU
-VlRUVFVWWFlYVlhVVFFUVlhXWFpZWltZWVpdW1pfYmFhZGZnaGdmZWNhYmVlY2Vi
-YmlnaGdnZ2VlaGZoZ2ViZWpmZWZobGhnZmhpaGRlZmhsamVnZWZoamZnZ2ZnaWtr
-bGxqaWtpaGpsa2toaWpsbGxqaWloaGhpZ2dnamdnZWdoaGhqZmFlY2JjZWRlZGRn
-ZWdmY2JmZ2xnZWdiY2NmaWdnZmZlZWxoZWdjZWhpZmRlZGNkZWVnaWloZWVkaGhk
-Y2VnZ2ZkZmRkYWJkZGdkYmVnaGhpZ2VnaGluaWdoaWxsa2loZ2lpZmdoZ2RnZmdo
-Z2dqaWlpZmNiZWRlaWlpZmhmZmloZmJiYWRkYmJiY2FjZmRjYGNkZWVlYGFlamhl
-ZmVhYWFfYWNiZGNiYWFfYmJjYmFfY2BkYWNhX2BfXF5iY2JhYWBhXl1fYGBgX11d
-YGBfXV1cW19gYVxcXFpbW11eW1paW1peXl5cWFdbW1pbWlhbXl5dXltcW1tbXFpb
-X19dXF9hXl1gX11fXF5bXl5dX2BdXV5gYWFfX15jYWBjZGRkX1xbX2FhYWFiYGBg
-YGJkaGNiZWZiYmNiZWVmZWRkZWNhX2NiYWBeXl9eXFxgY2FfYV5dXmBfXl5cXV9j
-ZGJhYWBeXV9iX2BdW1tgYGJjY2RkYmFiYGVkYmFiY2RlZGJiYmBiZWNhY2diYGFh
-YGJiYFxeXmFeXl9hXmBhYF1gYWJgYmNiYWJjZGNjYWNiYWBeYWBiYmVlZWVnZGNj
-Y2ZnY2JiYWJhY2NgY2NhX2FjZGRlY2BiY2BgYl5hY2FgYmBiYGFkYmJeXV9dX2Ji
-YGRmYmJjYWFgZWhkZWFjaWdlZWVjZGZnZmJiZWNiY2ViYmFiYmZmYmZkZWRjZGRj
-ZGRjY2VlZmVkZ2ZkZ2dqZ2dmZWZjZmprZ2NkaWtqaWVnaWlnaGtraWdqbWxvbWtr
-bXBvbGltb21ra2tua2ppaWpoaGtoaXWYwdTe5ens7/Dx8vLzgYKBgH59fXx8fn17
-fX5+e319fYB+fHt7fH5/fHp6fHx6eXp6eH1+e3h6enl8fX16eXp6e3t6eXx9fn59
-e31+fX1+fn57e3t+ent7eHx/gIB+fHl3fYCDf35/fn5/fH17fYGEgoB+gYOFgYB9
-fX59fYB/gYN/f35+e3t7en58enp3eHV0c3R0dXNxcnNyb29vcHJzcHBxc3NxcXNz
-dXh1d3V3dnV5eXV1dnd0dHN0dXZ3dnR1dnZ+eHZ4enp5dnd3d3h1cnJydnV0eHd1
-dHZ1dHNycnN1cnJxcG9xcHJ0dHR0dXJydHNzcm9ub25ub21wc3N0cnNycXBwcHB1
-dHJzcnFxdHN0dXN0c3J0dXN0cXR0cnFzc3J0d3l1c3Nzd3Zzc3R3eHd0c3FycXBw
-cnFxb29tbW1sb21qcG5rbGtpampqbW1raGxtaGhramhpaWpoamtsbGxramhramxt
-bXJrZ2lqamlpaGtrbGtqbG5taWtqam1sbG1sa2tqbW5raW5rbGlmZ2lpamZoZ2dm
-aWNfX2BgXl1bWFhZWFpYV1RWVFNVVlRUU1JTUlJSWFdaWVNUV1NVV1hVWVtbXVte
-XVxdYGJfYGBkZmRjZGFkZ2ZnZWZlZGZqZGVmamVkaGhmaWllZmhqZ2lnZ2dmZWhp
-aGloZWZnbGloaWdmZGVnaWhmZWdsbGlraGpqaGxpaGhpZmZra2drbWtsbGxnaGZk
-ZmZmZ2drbGppbmpvZmNkZWVlZWNiY2RoZWNkZWRmZmRmZWdmZWVnaWZnamhpampo
-Z2dlZGZlZGZlZGRlZ2hqaWdkZWhmZ2VmaWdlaWhkYWNnbGRkZGRmaWplZWhmZmdn
-am1rZ2lqaGhmZmZoaWdnZWZkZmZnZ2doaWprbGllY2VkZWZmZWdmZ2ZmZGRiY2Fj
-YWRjYmRlZGRkZGBkY2JjY2RlY2VkY2VmY19bW2FgYGBdXmJhYGNlZV9gXl1dYWBi
-YWJhXl5cXmBgXlxeYF5eXVtbXl1eYWFcWltZXF9fXl5fXFtbXF5bXl5cWl1eX15e
-W1hYW1xcXF9gXmBeW1xZWVtbX15fWVlaXFpYWVpdYGZjYV5eYWNhYWNfXV5dYmJg
-X2NgX2BiX2BlYmBhXF9fYGFjYF5eX19fXmJiYV9dXWFgYWJiYWFiYWFiYWJhYmJg
-X15bW11bWltaXV5gXl5gYFxeXF1dX15lZGFfXl9dYV5fXl5gYmNkY2JhYmNiYWBi
-YmJhYWNjY2JjYmNmYl9gYWNjZGJeWmFiYWFhYF9fYV9gYF9gY2RkY2FiY2JfXl1g
-Y19gZGRkZmJgYV9iZF9fYGRnZmFhY2NkY2RgYmJkY2VkZGdkZGJjYmJjYmNkY2Nd
-X2FjYmJhY2JiZWFgYGFiY19eYWBiYmNjY15fYGBfYmJjZGdnZWRlZ2VjZGJiZGNh
-YGNjZmZkY2BhYWJkZWVnZmJiY2FjYmFkZ2RiZWZmZmRkYmFkZmlmZmRnaWlpZ2Zn
-aGVlaGtsbWtramxqam5tbWxram1rbWtqam9wbGlsbWxqbnBsa21rZ2dpamhscZm9
-0t3k6ezv8PHz8/R/gIF/gH99fnx9foN/gIF+fHp9fn59fnx5eHh6e3p3e32Ae3t6
-e3x/fnt5e3x7ent6ent6fHp9e31+fX5/gYJ+e3t7f317e3t9f398f315fX19ent+
-gX1/f358fn9+e3x8f4GCgYN/goJ+fn6Cfnt9fn9+fX17e3x6enl6ent7e3d2d3hz
-dHV2dHBzdHNycW1vcXFzcXRydHFwcXR2dXJyeHp4eXh4dnRyc3l3dXV2d3Z1d3Z5
-eXl3dnd2d3d4dnZ1dXZ1dHR0d3Z1d3p5eXVzcnF0dnZzcXNzc3Fzd3d1c3RzcnJz
-dHd1cXJwb29xcXJxdHJwb3Fxc29wcHNzcnNwdHh1dnV2dHJwcnJ0dHN1dXNxdHh1
-cXJ3dHJycXBxcXN1dHJzdHVzc29vcXBwcXBvbm5wcHFvbW5vbmxrbGprbm5sbG9u
-aWpsbWpraWhnaWdoaWprbWpqaWtua29wbWlpamhmaGtqamxuamxqbGpra2ttbm9w
-amtubW9tbWxsa2lnY2dnaGlqaWlpaGhqaGJhXl5fYF9gWVhbWFZYWFpZW1dZVVVT
-UVFTVVVTU1ZZWVVXW1dcW1lXWVhbX19eYF1gYl9kZWRjZGVmZWRnZ2dmZ2Zlb2tk
-ZGhvbGppaWdmZmVpZ2VmZ2pnZGdoaWhkZGRlZmhmZmZmZ2NjYmRmZmlpamtrZmZp
-ZmdnZ2dpaWdnaGxta2hpbG1qa2tnZ2hqaWZtaWdmZ2dpbGlpamFjZmRfZGVkYmVk
-ZWNkY2NiYmRnZmZlYmJlZGRkZGZmZ2dnaWljZWdnZmlsamlnZ2VkZGJmZWZlZ2dq
-aWZmaGpmZWNmZWJjZGZmZmVmaGhnaWtra2tsaGZlZWdnZmZoZWZmZmhlZmdoaGlr
-aGdmaGdoaWdkZWZmZWRmZWRjX2RnaWZmaGNlZGZoZmplYmRhYl9fZGdnY2JiYGJh
-XmFhXV9fXV1hXmJjZGBiYV5dYWFfX19eX15gX2BeX15eX2BeW1pcWlpbXmBhXF1c
-XVtbXmFcYF9gXV5aW19dXV9dXF1eYWBfXl9hYF1dXV5kXltfWllaW11aWVhfYF5e
-XFpdW2BgYmJcXV1fYmNgX15eXl9cXF1gYmFeXmJiY2NiYV9jY2FhX2RiXl9hY2Jk
-X19iYF9iYF9eYGFhYGJiYWBiYGBgYF5fX19hXVxdXF9fYlxbXl9iYmBfYGFcYGJh
-YGBgYWFhX2BjYmVmYmJlYmBhYGNkYWBhX2BiYWFgYWFhYGJnZ2FfYWNiYGBhX2Bl
-aGVjYWJjYl9eX19gYWFhZGRiYF5cXV1qYV5cYWRiZGJkYmFiY2NgYGFhYmNmYWBf
-YmRiYmBiZWJlY2VlY2VkZmRlZGZmYGBjYmNlY2BhZGNiZGJiZmRgYGJiYmFjZWJe
-Y2NjZmJgYF9hZWVmZGNkZGFlZ2llZWRhYWNkZWFhY2NkZWJgYmJlZWFiY2BiZWRm
-ZWRlZmJjZmVkZGVmZ2loamZpa2xqaWxsa29raGtsbG1ramxrbmxubWptaGtvaWln
-aWtsbWxraGpraWhoamtnZ2VrbG5wnr3R3OTo7O7v8fHz9H16e3x9fn6Agn+Bf36A
-fn17e3h6fX9+e3x5eHp8e3p3fHx6e3x8fX58foOAfXp4eXl9fnt8fHx+fXt7f358
-fX19fXt+gH5+fHp6f359fIB+fn18gH+Afn1+fYB+fX6Bg3+DhIKDgoCBf3+AfXt8
-fHx8fnx9e317enl5enp6enp5d3d1d3R0c3dycHFvcHFvbm5vcXFvcG9wb3JxdHZy
-dHJ0d3ZzdXd4eXt1c3Z4d3V1dXV2dXd1dXl3d3V2dnh2dXZ0dHV3dnl3dnV1d3Z0
-dnZ0d3d1dHNzcHNzdnZ1d3h2dXJxcXJ3dXNxcXJubm9wcHJ1dXdycHFzdHN1c3N0
-cXd1dHV2d3Z2dHJyb3FydXZyb3Fzd3d4dHVzdHNzdXR1d3V0cm5zdXVycXFycW5u
-b3Bub29vcnNwbWtubWlqbW9ubm1sbm5ramtrampraGloaGZqamlpa2xpaWptbGxt
-a2tsamtpaW1rbGtraWlqa2xubW5sbG5xcW5ubWtqbG5raGdqa2xtbWlnaGhoaWZj
-ZGRhXl5hXllWWFdbXVtaWFdWU1RWWFRUU1RTU1VTVFhWU1lYV1dZW1tZWl1fXWNg
-YFxhY2NjY2ZoZ2praGNkZmdmZmhpZWhoaGpqaWdqZ2dmZmZlZmRoZmVpZmlsamZk
-Z2dmaGVjZGNnZGhmY2NlZmZnaWppaGVmZ2dpa2hnZGVmaGxvamprbm5saWhmZmhm
-aGhkZWZmZmdoamllZ2lmZGhjYmNjZWRjYWBiZGZmZGdoZmZlZWNkZWZkZGZpZmdq
-ZmhmZWVnaGdnamdmZGZmaWZlZWVkZ2VlZWVoZWRmZ2ZmZGVpaWhoaGVpaGlqZ2dn
-bGtoaGdmZWloZmVma2dmZWRmZmdpaWlnZGVmaWpoaGNhYmVnZmVnYmJhbW1nZWNl
-ZGNlYmVlaWhjYF5eX2FjZGJhYGBhY2JjYmBeX19gYV9iYWFlZGNkYl5dXWJjYWBh
-YWJjYGBhXlxdYFteYGBfXV5fXl5dXF5dXF9eXl1cXF5gXl1cW1xeXlxcW1tcYF9f
-X15dXV5aW1tcW1taWFtaW1laXl9hX15cXVteYF9iX15fYWJiYGFjXlxdX2JfYWJd
-Xl9iYWRgX19fYWJhYV9dXF1fXltaXF9gYF5eX2BkYGJjYGFlYWFhY2BfXV9cWV1c
-YWBfYmJhYmBgY19dXF5hYW5hYWBfXWBjY2FiYV9dYmNiZGNiYmVlY2JjZGJjYF9g
-YWFiYmFhYV1bXmJjYGRlYV9fYGFfYmVlZWVkYmFiX19gX11eXF9fY2RjZmRkYmFi
-ZGdhYmFjY2VmY2RiYGVjY2NlZ2JgYGBhY2ViYWJiYGJkZWNjY2RmZWZmY2VlYmNi
-Y2ZkY19iX2BkYmBhYGFhYmBiYmNjX2JhYWNkZGViYWJnZWJgX2JmZ2dmZmhnamhi
-ZGJiY2BgYGFhY2RlYWFkYGBhYmRkZGRiZWZkYWZlZWdlZ2ZnZWhoZmZmZmVnaWtp
-aWppZWttbGxqZ2dsbGxvbGxucXBva2tqam1paWlramhoa2xqaWZjZWtsamySvNLd
-5Ofs7+/w8vPzgX59enp7fX58fn1+f319e3t7fX18fX19fHp9fXp7fXx7ent4ent8
-fHx9fnx8enh5e3yCgnt7fH1+f3t9fn59fnt+fn6Af3+BhH98enyAgYKAf4F/g4J/
-g4B/gH+AgYF/fn9/f4CBfn18gIKAgIB+fXp8fn99fnx5fXt7fXx7eXZ1dXNzc3Fu
-c3JycG5ubm5tcHBvcW5vcHBycXJzcnRyb3J0dHZ2d3Z2eHt3dXh6d3R1cnZ3dnZz
-eHV0dXRydXZ1cnR2dnd5enh5d3R0c3N0dXd0dHV1c3J0cnFwcHN1c3V1dnRzcnR0
-eHVzcWxvbm9xcXJ1cXJzdXVzc3RzdHR0c3Z0c3Nzc3J0dXRyb291dHF0c3Nzdn1z
-c3Jzc29wdXZ1cnNydXRzcXRzdHVzcXBvb29vcXNxcnBubm1sbGtvbWpra2tqamps
-bGhoamppZWVmaGdpaWloZmtra2ZraWtraGlraWpqbWtrbW9sb21qaGprbGxqam5t
-bGprbG9ydnNtcHBsa2hnZmZkZmRmZ2dmYV5eXGBcW1taWlpZVlpaV1ZWVFBOUlNV
-VVRVVlVUVlZVVVVUV1lYWFtbXlxfXl1dYGBhYWBiZWZqZ2hoaWJmZWdnaGdoaWtr
-aGhnZmZoZmtsamZmZWZoZmpqZ2VoZmVlaWZnZWViYGFnaGVlZmRnamdoamtqaGds
-aWdoaGhnZmtqa2xqaWppaGhqaWppaGhpaWdmZWZlZGZpaWxrZWRlYmJhYmdlZ2dp
-aGlqaWhjY2doZWZlZGVmZ2pmZGRnZ2ZmZmdmZ2ZkZ2hnZWRnaWloZ2poZmNlZWdo
-ZGRmZGhraWlmZ2lpaGNpZ2VkZGRlZWdoZ2VobWhoZ2dnZGRoZmloZ2hlZ2VjZWdm
-Z2tpZ2dmZWJiYWFlZGRlYWRlYmNkZGVlZGNkZGZlZmRfXFxeYWJkYmBkY2NkZWFj
-Xl5eXmJiYWFfYGNkY2BhYmFfXWBlYmNjYGFiYWBfXVxdYF1dXl9hXlpdXF1fXl5f
-XF9fYGBeW1taW1xcXFpdW1tcXFtbWl1dXlxbXFtZXGBcWVZZWFtbWltfXl5gX2Jh
-X15bW1xfX2BgYWJdXV1dXl1eXV5dW11iYmJhYWJfXlxhYWFkY15dX2FfXl5dWVxf
-YV9gXmdiY2JhYmNhYGBgX15eXF1gXGJkYmJgXV1fYV9gX1xcWFhZXV9eXWFeXVxg
-YWNjYV9gYGJlZGBhYmZkY2NlZGJjZGNjYV9iY19gY2NjYmFgYF9fYGBgYGJkY2Jh
-ZGZjYmJhYmFhYF5iY2FfXmFmZ2VgYF5hYF9jY2FhYWVjYmNkY2NiY2BgYWNlX2Fj
-YWJhYmFkY2JhZWhqY2RjY2VlZGNkZGRjX2BgX19fYmVlZ2FkY2JiYWJhYF9fYV9f
-YGBlZmNjZGRiYV9kaGVkZGNmaGpmZ2lnZmNiYmJgYGFjY2JkY2JlZmRkZWRkZGln
-Y2RkaGhlZGVnZ2hnZWVlZmdnaGloaGpoamplZ2ppbWtnaGlrbW9ubW5wcHFtbW1u
-amhraWdlaGdnaGpqbWlnaWhoaYW80t3j6Oru7vHx8vJ+goF+fnx7fXx7fHx8fHt6
-fHp5fX57e3x7e3p7fX6CgHx+gHx2eXt6fn99enh5e3t7fIGCf3x7fHx8e3t9e3x9
-f4F/gX9+fn6BgH9/f4CAgYKCgX9/fYGEgn9/gIKAfn5/fX1/f4B+fn1+hIKBgYJ+
-foJ/fH19fX1+fYB+fHp5eHd4dXFxcXJ1cXBvbW9ubG5vb3FvbnBxcHBvcXFxdHN2
-c3JzdnZ4dnZ3eXl4eXd3dnVzdXh3eHd5eHF2d3Z2dXR1cnN1d3d3eXZ4dnRzdHNz
-dXVzcnN0dHd3dXNzdHN0dHN1c3JwcHJycXFvb3Jyb29zcXBwcHR1dHV4dHNzcnFx
-cHFycnNzcXJyc3Nyc3V1eXh4dXZ1dHFydnV1c3N3eXl3dHZ3dXN1dHR1cnNxb3By
-dG9ucHJxcXFxb2xsbWppa2lpbGppamtoZ2ZmZ2dmZGZoZ2VobWtsbmhpamxsampr
-bm1ub2pta2x6bmxsZmtpaGhraWlsaGdqZ2htcHNubm5sbmpqamdoaGRlZ2RmY2Bj
-YFxeXV1hW1tZWVhWV1hUVFNSU1NUUlFUU1ZWVFlYWFpaWFdYVVlZW1pcXFxcX2Fe
-X2JgYWJlY2ZlZWRlZGNlZ2hqZWdmaWtmZWdqZmVmZGdoZmpqZ2ZpZ2dmaGloaGRj
-ZWRlY2NmY2RlZmZkZ2VnZ2doZmlnaGdoa2hoZ2lubGloaGhsamhoamlnZ2lqamdn
-ZGNnZ2loZ2VlZGhoZ2VmZGVjYV9jZ2hpaGZkZGRkYmRnZWVmZmlmaGhpaGllZGdn
-Z2hmaWdmZ2ZjZGZmaGppZ2doZ2ViZGZmZmRoa2poaGhlZWdmZ2hsaGRkZWprZ2dq
-a2hnaGdoZGNpZ2RlaWdmZWpqaWtpaWdmamhnZ2lnZGJmZWJiZWdnZWFhYmNjYmVm
-ZGZlYWNjZGNmYmNkYmJkZ2NlY2NnY2RmYl5hYWBgYWFkYmBiY2BiZWBiYV9gYWJm
-YmJjYF9dXV9gXV9gYWBdXFtbXWJfYGBeX11gX2FfXFpaXVtbW1xgXF1dXlxbXF1Z
-WFpZXF1bWlxdXVxcWlpZX19dW15fXmJlYF5dXl9hYWJfXmBhYWFhYmJgX11eXWBi
-YV9cXl1gYGFkYWJiYGFjYWVkY2BhX19gZGJhZ2NfYWBgYmJhXl9dX2BgYmFfYWJl
-ZF9eXV9hX2BhY2NjXl9cW11fYWJeYV9fXV9kZGRkY2FiYGNlYWBfYWFiYWJgX19f
-YF9hYV5fZGZlY2NfX19hYmJjZWBjY2BjY2JgYGVkZGZhYF9cYF9gX19iZWFhYV5g
-YWFkY2BhY2RiY2JgYGBiY2NkZmRmZmNjYmFkZWJjYWdnZ2djYmNkYGFjZWNjZGNi
-YGFdXF1gYmJiYWBkYWJiYGBjZWRhYGJiYWNjZWRkZWRhZWRjYmJjY2hpamxpZ2lk
-YmJgY2RjYF9hZGNjZWlpZGJlY2ZnaWVjZGVlZmVobWlpZ2dpaWVlZWpoZmlqbGdn
-a3BnZ2loa2xmZmptb25tamprbW5ucW5vcGpqZ2lnaGhnaG1ta2Vqa2lliL/R3OPn
-7Ozv8fHy8n18gICBfn16e32Afn59e3x6enx9e3t6eXl9fX59g39/e3l7eXh4eXl9
-fHt5eXl8enp7fX1+e318e317enl8en5/gIGBf3x6eXx/goCCgYGAfn56en6BgH6B
-goKFgX+DgoCBf3+Bf315gH+Af36Bf39/gYF/fX18fn6AgH57eXl4eHN0dHN1cXNz
-cXBvbW90cG9ra2xqam1vb3BvcXR0c3JwcXFxdnZ1dnV3eXV0dXd3dHZ4d3d2dXh4
-d3l4eHd2dnV1cnR2dHd2dHVzdXV0cnV2eXVycHR0dHh3dXJ0d3h2d3J0dHZxc3Fy
-cXR1c3Nzc3BtcnFucHJyc3Rzb3Nzcm9sb3J0dHNydHNwcnNzc3Z5eXZ2dXZ0dHV2
-d3t5dXN1dnl3eHd1c3V1dHFzdHRzcnNzcXBvb3Nzb3Fwb3BtbmhpaWhpbGpqamlo
-Z2lmZ2dlZGVnZWhna29tbGtsamxtbG1rbW9vbWhpaGdqa2pwbGtqa2xraWtramlm
-aWxvcGxram1tbGxubGhnZ2doYmNhYGFfY15eXl1cWVdXWVlVVFdWT1FTU1RWVFRU
-V1RWVltZV1daXFhXVVVWWFhaXV1eX11fYWBkY2VjYGRmZWVmZ2ZmZ2RlaWtpa2ps
-amloZ2ZiY2ZnY2VmZ2lnaGdnZ2hnZGVjZ2ZkY2ZoZmdmY2VmaGhnZ2VmZ2poaGhp
-bGlpbGtoaW1qZ2pqaGVnZ2hpZ2ZlZ2lqaWdjZGZnZmVmaGhoZmhoZGNjZmVmaWhj
-ZGVkYmRnZWNkZGVmZWdmZmVkZmZmZWZnZmlpZ2hoaGVmZGZoZ2ZoZ2NnaWhkZWVm
-aGlqaGdmZmZkZWZpamlrZmRmZmhmaWhlZ2lpaGhpaGhoaGhmaGdpaGlqamhnaGVo
-Z2pmZ2RjZmZiY2hoZmVkZGRkYWFgY2BhY2NlZGJkZWVnYl9iZGdmZWNhY2JlZmRg
-YWNiY2NjYF9hYmNiYWJiX11gX11hYWFgXV9jYGJjYWBgYGNfX15cXl9eXl5kaGNh
-YF1fYGFgXV5gW1xdXV9bW11fXl5gW1tbW1laWllYWlhcXl5cXV5eXV9fX15cXWBi
-YWNfX2FgYVxbXl1hZWBgX11gZGFhYWBeW1taXV1gY2JjZF9fYF5jYmBmZ2FiYF9h
-YmNgYFthYmFhX2FiYGNiXmBfY2FfYWFjbGdkYV9eYF9gYV9gYWFgXmJgYF5gYmJg
-YGNhY2RjYmFfYGBcX2JiYWBgXFpdXF9gYWNiYmRgY2NjX2JiYF9hX2BgYGFfYGNk
-ZWNiZGdrZmpkYV1gX2JiZF9iYWBeYGJiYWBlZGBeZmhhYWdiYWRkYmNkZGNkaGRn
-aWRfX2NkY2NjY2ViZWVjZGFhY2NiYmJgYF9fYV9iYWNhYGBfXmFhZGVlZl1gY2Bi
-Y2RkZGFhYWNkY2FkaGRkY2RlZmZiZGFjYWJjY2FfX15fYWRjZWZnZGNjZWRjZmlo
-ZWVlZWVpZ2VlZWZmamdmZmpoaGVoaWZrbGhnaWlraWdpbGxubGxta2ppamppbG5u
-amZqbG1pa2lpaGlwbGpta21/utDc4+fr7O/w8PHyg4GDgoGDgX6BgoJ/fHp8fXx/
-fHh5eHt6fHx8eX1/fn59fH19d3d2e3p+fnx5e3t4d3l5enl/f4F9fH18enx7e3x+
-fn98gHt6e31/gH19e318fn59fH19e4CBgYOBgYKFgn6Af399gIOAf3p9fICAgX9/
-gYKCgICAgH2Afn17enh5eHV4dXR2dHFxb3Fvb3JzcG9tb29ub2tucnJzc3N0c3N0
-cnN0c3d1dnZ3dXNzdXZ5eHd0d3Z5eHZ3enl1eXh3eHd4dHV0eHRzc3N1eHh3dnNz
-c3NydHJ0dXR0dHV2dnh4dnd2eXV0cXJ0dXNxdXNwc3NwdHZycnNzc3Jxb3F0cnN0
-dnNwcHFxcHV2dXd2dnN1dXV2dXJzcnNzdHZ0cnVzdHV1dHR2dHR3eHd3dHNxdHB0
-c3FxbnFxcm1vbnByc3Bub3BtbGxrbGpsbG1rZ2tnZWdpamtsa2tta21wbGtta2xs
-bW5qb21ramlpam5ucG1sa2xsaWxpaWlubW5ubnFtbG1wbW5sa2pqZ2VnYmFjZGNg
-YV1dW1xZWlhVVlVTU1JUVlVWWVVSU1RXWFVVV1hYWltZWFVaWFtdW2BeW11cXl1e
-Y2NlY2NmYmVmaGhlZWlnamloZmZmZmZoaGlpZWRlaWxqZ2hoZmdoaWppaGhmZ2ho
-aWltZWRkZ2dkZmtqaWdnaWdmaGppZmJkaWppampra2poZ2dqamhkYWVlaGdnZmVm
-ZWdkY2BfY2VkaGdmaGlnZWVjZGVjY2dkY2VjZWdlZWVnamVlZ2VlY2NjY2ZmaGpr
-Z2RmZWlpZmhnaGlmZ2VlZmRmamdmZmhnZmdlYmdmZ2hpaWtpZ2RjY2VkZmVlZ2lp
-Z2tpZ2dnZ2tubGlsbWdnZWZoaGhramdnZ2dmY2dnY2RpZ2VmY2NjZGRmYmRhYmFm
-ZWVnZmJkZWVkZWJiZmhlYWBgX2FiYF5hY2BfYV9jYF9fXmFiYGJjYWJiYGBhX2Jg
-Y2JjZGNkYGNkYF1gYl5gX1tdZGFhYF1dXFxfYGBeXl9gXl9bXFpaXV5eXV1dXV5b
-WltcXl9dX1xdX15dX15dYF5eXl1dX2FhYGJhYF1cXl9dXGFiYWBfYF5hY19gXF5e
-Wl1eYmJiYGBgYmVjYWBjZ2NdX19hX2BiYWBhYF9iYmJhX19eXV5hXl5fYGFhX2Fk
-ZF5eZGFgYGBjYF5dXF9fX2NgYWJiYWVlZWRiY2NiYV9cWl5iYF5hYmRgYF5cXmBj
-YWJpZGFhYmRmYV9gYGJkYmJjY2JjY2JiX2JgYGBjY2RlY2dmZ2VmZ2VkY2BhY2Rn
-ZGRkYGBiYWJjY2ZmZWdjZmRlZWRiZGJhYmVhX2BjY2BfYGBfYWJkZWNiYWFgY2Vj
-ZmFjYmBhYmRjYmBiY2JhYmNlZWNmZmFhYGJkYWRkZWRkZWVlZ2VkYmVjY2RlZWRn
-Z2NhXl9gYGVkYWJkZmplYmZlY2ZmZmZlZWVkZ2lmaWlnZWVoZmVnZ2hoaWhoZmlp
-bW1paWdpaGZqbGxtbW5qaGhramxvb29vbXBub2ttaGhqZ2xtbWlncoK60dvj5+rs
-7vDw8vN/gX6AgIF+e4GCgXt3dnZ8e3x9eXp6fXx7e3t+e3x9fn59fnt5eHt9fXyA
-fHt9fXt6end4enp6fYKAf3x7ent9gH5/gH99e3x9fX6Bfn17f4CCfnp7fX2Af3yA
-goJ+gIGAfn9+f4KChICAg4CAgH+AgoKDgH9+f39+fXx7eXl6enZ4enZ2d3d2c3N0
-cXFvcG9wb21vbm5scHFzdXV0d3V0dHR1dXV0dXV3dnZzd3V0dXl4d3p4enh2dXV3
-enp2dnZ5enl5dnV1dHZ1dXh2d3l2dW91eHd0dXR0dHR0dnV3dnd6d3Z0dnR0c3Z0
-eXV3dnZ1cnF0dnV0c3VzcXBwdHV1c3Zzcm9zdHNzdXB1eHN0c3Rzc3V1dHV0dHR2
-d3d3dHRydHRzcnJydHV3d3V1c3RycnVycnR3c3Nvbm5ub3F1c3FwcG9tbGtramtr
-bmtpaGtqZ2pxbW1qZ2lraWhnaWprbGlqbG1ua21ramxsbm1sb25ubWpraWprbWht
-cG5tbW5tcG1ram1ob2xqaWZgZmRiXmBeXFtbWlpYVlhWV1RTVFZUVlZUWVhWVlRX
-WllWVlZaV1daW1lbWVpaXVtcXltcXF1hYWFlaWZkZWVlZWRmaGpqZmVlZ2VmZmZk
-aGhraGNlZWZna2hoZ2dmZ2ZmZ2VnZmdraWZmZWRjaGZnaWttbWppZ2djZmZoaWVl
-ZWlrbmpra2lnaWlramtmY2Zpb2pnZmVlZ2JlaWZjYmBiZGViZmhpZ2ZiYmNmZmVm
-Z2hmY2VnZ2dmZmhoZWNkZmVoaWpsbmpnZ2hmZmZmZ2lqaWVkZGVlZ2toaWZjZ2Zl
-ZmNmZWRpZGZoaGZnZmdnZGVkY2NkZmZlZWVnZ2tpamlqaGloa2lpZmdlaGloZ2Zo
-Z2ZmZGZkZWVlZ2ZmZGJjYGNlY2FhZWJhYmRiYmRkYmJjY19jZWZkY2BiX2FgYF9i
-YV9iY2JjX2JhY2RiYGJhYWFgXl9gYF9hYmVkYWFeYWJiYl9dX2JeX19eXWJfXl5c
-Xl5hYl9dXl5dXl5cW1ldYGJhXl9jXVxdXl9fX2BfX15fXV9fXlxbWVxcXl5eX2Bg
-YGJgYF5gYV5gX19fXl9eXl1fXVxjYmZnY19jYVtcXl9jZmdkYF9iYl5gY15hYmFi
-YV9eXWFgX15eYWBfWV5iXmFeXF9eYF5fYmJfYGBgX2BjZGNiYGBgYmRkZGFkZGNh
-YmFgYGJhX11fYmFfXF1fXmNiY2JfYmFhYmVmYVtdYWRhYmFiZWVkZmJjY2FiYF9j
-Y2NgXl9nY2hpZWZlZGJjY2RfYGBhYmRjZGRkZGRlZmVlY2VnZWZmZGNkY2JiYWFi
-YmRjYmJgY2RjZGRjY2NjYmNjYWNjZGZnYWJjYmNgYGFhZGRjY2JiYGFmZmVhYmBh
-YmBkYGBjZWVnaGhkZWVmZGNkZ2RhZGdoZWFhX2BjYmZhYWNlaWZnZ2ZiY2NkZGVj
-YmVlZWZkZmhqaGNlZmhlamtpaGlpaGZqbW5sbGtqaWxqa2hqam1sa2dmaWxsamxu
-bnBua2hqZmhoaWpoZ2l1iLzQ2+Ln6evu7/Dx8YeDgYGAfn97e3x+enh3eX16fXp8
-eXp8fn57e319e3p9fHt6e3x+f36BgX19fnx9fXt9fHZ4dnl6en2AgH99f399fXx9
-gX57fnyAgYB+fYB9gYKGgH5+f4SAfn9/gH+Ag4OHfn5+goODgX19f4B/gH19gYJ+
-f318en15e3p6enZ1c3V0dXN1dXV2cnR1c29qbW1vcXBucG5xcHFucHJ0c3Nydnl3
-dHR1dXdzeHh3dnR0dXZ3eHd3eHh2dXd4eXh3eHd7eXZ4enh5d3d1d3Z2eXh5d3h4
-e3Z0dXN1dXZ7dHd6dXV2dnV2dnd1dnh2d3Jwc3JycXJvcXNycXFycHJxc3Z2cnBw
-cXNzdHRyeHRwcnVzdXVydHZ4dnd3c3R2eHp3dXZ1cnFwcHFzdHR1c3R1cnN1dXNw
-cHJ1cnBybm1sbm1ub21ucGtqa2hqbmtpbGxsaWxqamlubWxta2poZmdkZ2tra2lo
-bG5pb2xrbWxqbG5sbGtsampramtqbGtpZ2trbW1sa2tra2pta2poaGtqZmJfXmFg
-XVxaXFxXVlhYV1dWVVZUVFdYVVRVWVZXVVhXWVdcWVhVWFtaW1lgXF1aXGBfYF5h
-YWNlZ2dlYmRlZmRlZmdnZ2RlZGFiYmdoaGlqZGNiZWdmZWdmZ2RlZGVkZ2dnaGll
-ZWRiZ2VkZ2loaGdpampramVkaWdra2hoZmZnaWxsaWVoampnaWZmZGVmaGloaGZo
-Z2JkY2ZmamRlZmdkZmRlZV9gY2VnZGVmZWdiZGZoZ2dmZWdmZmNkaWlqamxraGdo
-aGllZ2dnZWVjYmBhZmZiY2lnaGZmZ2lmaWhmaGZkZGVkZmhjZWlqZmNjZmdnZmZp
-Z2hqaWhna2hnaGhsa2lmZ2hnZWdpaGppZ2dqZmRmZGNlaGdjZmNjYmBfYmdmZGVj
-YGFiYmFjZGRkY2NlZ2NhYWJjYGRiYWFhYmFiYGBgYGBeX15gY2NiX2BiYGBfX19e
-Xl5hY2NdXF5gX15gXV9gYWBcXmFhXVxbXV9fXV1fW15fXF5fYVxeYWBfXV1dXV9d
-Xl1gYF9eXlxdXF1eXV1cXV1eXFtcW15gYWBhYmFiY2BcXmBfXV9eXF5iYF5dYWVi
-ZGBeX19eYWRjX2BiX2FjZ2hnZGJhXl9cXVxfWmBjX1xeYGBlY2FgX15iX2FhYWJi
-Y2RiXV9hYWJhYGFhYl5hY2ZiYWFgYGBjYmFjZGNjZmReXFxhYWFeX2NkZWVjYWBk
-YWJjYmNkZmNkZWNiYmJjZmBhZGJiY2VlYmJlYmFjY2NmZGZmZWRjY2BeYGFgY2Vl
-ZWRlZGVjYmRiZGVjZWJmZGNhYGJkYGJjYGBgY2ViZmNhYWRjY2JiY2RkY2JgYGBh
-YWBiZGRiXmFfX2BgYmBgX2JiYmNlZWFhYGBfYGNlZWZlZGRmZmRkZGZlZGZmZmdn
-Yl9kYmZjYWBkZmRmamhjY19iZGVkYWFiY2NjZGVwaGhoZ21pa2lpa2ppaWxubWlq
-a2xtbXBya2praGhqbXJubGxoZ2lrbHBwcW5rZWZsbGtqaGloaG6Ou9Hc4+fq7O7v
-8PHxgoB+fH19fX18e3p5enl4enp5eH5+e3t/f3+AgX9+fn19e3h3eX5+e3x8fHt7
-fX57fHt+fXp5d3h6fX59fHt9fXl8fH6BgH17en1/goOBfYOChYODgoCAgX9/fH6A
-g4WMhYB+f3+BgH6Bf4B/f318fn2Af31+f3t+e3p5fH18dnZ0dnd0cnRzcXNxcW5t
-cXRtbW9wb29ub3Jxb3FxcG9ucnR0c3N1dXV3eXl6dnV1dXV0eXZ1eHV3eHV4eHd2
-dHl6eXl3d3h7fHt4d3d4d3h3d3Z2eHh4dnRyc3R2dXZ5dXNydXV1dXRzdHV2enl3
-dXV3dXRzcXBxdHJzcm5vcW1ydHZ0cnF0dHN0c3l0cm5yc3BvcXp2dnZ2dnd4dnNz
-c3FzeHdyc3J0dHZ3dHNxcnJwcnRzc3NycXFvb29ubG1sbXBycXFxcm9sa21oaGts
-a2tra2xramhqaWdmZ2doamppamtpb2tqa2psb2tpamttbm5ra2pqaGpmaW5wb2xt
-bHBtbGltbmxvbWlpZmNmZmVmYWBeW1pbV1pbWVhYVlhbWVhWU1VVVFVaV1hYWVdV
-WFpWV1hZWlhYXFtbW11fXGFeX15eY2JfYGFjY19eYmZoZWNkYWFiYmJlZmFkY2Fo
-Z2poaGloaWtoaWhmZmloZWZqaGloaW1lZGdlZmhqa2lkZmdqaGhpaWVnZmVoZ2do
-amppaWlqa2doaWlqbWlpZ2lraWVnZmhpamhlZmRmZWRkZGRjZmJjZWFgYmRlZWNg
-YWdnZ2ZkY2RiYWNkY2RkaGVmZ2dlZmpmaWhnZ2ZpaGVpZ2hlaGdkZ2lpZmVoa21r
-ZmVnZmZmY2RkZWVkZ2hnZ2NmZ2dmaGlnaWdqa2hpamZmZ2ZmaGpoZ2lnZGhsZmdo
-ZmZmZ2dkY2FgYWNmYmRhYmJlY2NiY19fYWJiYF9fYmVlYmJkZGRkY2NjZGFgYWNe
-YWFcW2BfYGJgWl9gYGBgXl9gYmFgX19cX2FgXl9hXl9fXl9eYF9gX2JgXl9cXV5d
-XmBeXVtaXF1fYF9aXV1cXF1eXFpbXV1cYF9eXl5fXVtbXVpeXltbWltfXlxbXmBg
-X2FhYWRjYWBgXF1bWl5fX2NjX2BkY2FhYGJjY2ZhY2JfXWBkYmJlZGRlYmFfYWJg
-YWJgXV5hYWNiY2dhYmBeYWJfYF9fY2RjYl9fX2BgYV9fYWJjYV9mZGNiY2JgX2Ji
-YmNlZWhpaGVgX15fYF9hYWNmZWJlZ2NiX2FhXmBiYmNhYWJhYmNiaWJiY2FhYV5g
-Y2RhXl5fYmJkZGRjZ2ZmY2JgY2JgY2RjY2NiY2ZjYV9fYGJkZWJlYl9fYmViYmRk
-YmJiYWJhZGNjYl9gYGJgYWJiYWNhYGBiY2FiYWBfYF9jYWJfYmFiYWRjYmJjYV9f
-ZGVkZGdlY2VjYmRmYmJiYWJkZWNmZGRlYmRhYmNiZWVnZF9kZWVkZGVlZGZnY2Bh
-Yl1eYWVlZ2htaGxpZWRnZ2ZoamtsbGhqamtqbGtrampqaWpqbm9vbnBtbGhqbGln
-aWdra2xwbmlpaWllbIG30Nvj5+rs7e/w8fF+fXx6enx8e3p5e3l9foF/e3t/fX18
-e3t7f3+BfH59fH9+fn19fHx4e3p4e3x9fHt+fn1+f4F9fHp6fX19e3x5en1+e4N/
-fX5/fn2AgIKDf36Af4F/fn5/f3+BgX+AhYSCf35/fn6Af4GBf31/goKBg3+BgYF/
-fn57fXx9end3dXV2dnd2dHFvcHBvbHFscW9vcnFwcXBycnBvcnFwcHRxcnJ0dXN3
-dnd3eHZ1d3R0eXp3d3h3eHt7enh1d3p1dnt0dHh5d3h7e3h4eXx4dnZ3d3R2dnZ2
-eHd2dnN2dnZ2dXV0dXNzdXJwc3Zzc3d4eXh4dHRydHNycnFycXRzdHR2dXVzcXBy
-cnR1dHV0cXJxcnN0dnh0dnd2dnd0dXV2cnJ0c3RxcXFxcnJycHByc3FzdHFzcW9w
-cXBub29ta2lpbHBvbG1vcG9tbmxrbW5qa29sbGxvbWxvbm5uam1rb2tpa2xqbmtq
-am1xbGpoaGlsamtqbGxrZ2lobG5ubm1vbWxqbGprbGtpamhjZWRoY2NiX1teXllZ
-WVlaW1lWVlNUWVhVVFVVVFBVVllZVlhXWFZVVVlZWVpbXl9dWlxeX15eXVtcXmBj
-Y2NjZmdmZ2llZGRkZ2hnZGVkamNiZ2dmZmxraGpsa21qbGlpbWdmaGtpamdqam5q
-Z2ZoaGhpZWdlaWptaWxsaWhmaGlraWxtbGpqamlqamloZ2hsb2pmZWdnaGhpaGdr
-amhjZWZmZGNgYWRoaWZjY2JkZGVmZGNiZGhnZGRnaGZlZGdnaGRmZmVkZWRlZmdk
-Z2lnaGdoamlraWVnaGdlZ2NhZmVnbW1rZWNhZmdjY2Vma2VoZmZqbGxoaWtmZWho
-aGlnaGlqbGplaGRlZmZlZWVlZ2hpZ2lmZGVqaWhkXl9iZGNkZGVlXWNhXmNkaGVn
-Y2JkYmJgYmRmaGRjYmJgYmRlZGJjY19gX2NiX11gXl9jYV9gYWBfYF9hZWRfXmBg
-Y2BfX2BiXmJlZmNjYmJjY2FjY2RjX2BiYmFgXl5aXl9dX19eXV5dW1pbXV9dYmNi
-Yl5fYF5dXV1dXl1dXl5gXl1bXV1dX19gYGBjYmFfX1xfXl9bXl5gXmBgYGBgYWFi
-ZWdkY2NgYGBiZWRmYWFhX2BjX19iYl9gY2JhYmZhY2NlZGNlY15eXmBdXl5fXl9f
-YmRhXl1bXFxiYmdjY2RhX15jZWJhYWFiY2JkZGVlaGRjY2BfXmBfYmZkZGRgX2Fg
-YmFiYGFjYmBhY2FhYWVlYF5gYWRjYmJfYGBhX2FhYWBhYGRxaGViZWJhY2NiYmJk
-ZGNkZGNiZWBhYGJjZGVjYmJjZmZjZmZpY2BiY2RmZWJgYWRjX2FhYWRgYGBhZGRk
-YWFkY2FhY2RkZmNgYWNlZGJkY2FjY2NjZmJkZGRoZGNhYWFiYWRjYF5gY2RjY2Nk
-YmJhYWJiYmJjZWNjYmJiYmFiY2RkZGJiZWRjZGZoZmVoaGpnZ2VmZ2pnaGZnaGZn
-amtraWdlZ2ltbGtra2xtbG1tbWxqaWhoa21ta2lsbWlsamlrdrDP3OTn6ezv7/Dx
-8nx9fX5+fH12eHl5e3x9fHt/gHp8fnt8fHp9gIF+fX18e4CCf3x5dnZ4enp5eXl5
-enp7gIB/gYB9ent7enp7e315e31+foB9foR+fn1+gICAf359f4KAfX+Cfn1+gICD
-gHx+gIF+fX5/f4CBgoSEf4KAf35+fH18eHl6fnt5enV5eHh1cnJub25vcW9sbm1s
-bm5vb29ydXh2cnRwcHFvcHRwcHV3eXV0dXV5dnd1c3N1dnd2eX18ent7eXd2eoF7
-e3l5d3p1dXh8e3l0dXd4enZ0dXR2dXd3eHd2dnZ2dXN0cXBycXRycnFzcnV0cnN1
-dXVzdXV1dXNxcHN1d3Z0dXRzc3VxcHFzc3NydHR1cHJzdXR0cnp2dHR2cnN1dXR2
-d3VzdHV1c3NxdHJxcW9wcnN1dnJvb21vcG1vcnFvbW1sbm9uampqbWxscG5rbG1q
-a2pubmxqampubGlrbHBvbWxta21paWpsamhpa21rampoampubmtram1sb3Jwampt
-bWpub2pna2pnZWZlZmNjYmBeXVtcXF1aWFVZWVpZVlZWV1dWUldXWFdYVlZYWFZZ
-WFlYWFhbXF1cW1lbXFxfYWJdX1xfYWFiYmFlZ2hnZmhoZmdlZWdlZWVoZ2ZoZWdo
-aWxoZmppamlmZ2ZmaGpsa2VnaWZoaGtraGxqaWVrZ2ZoaG1ua2xqa2trbHBubmxp
-ampnZ2hpaGppaGptbGpoaGhobGpnZmdmZ2RkZGhtaWliYmZpZmVnZGJlZWRlZ2hn
-aWdnaWpoZmVlZ2praWpqaWZqZmZlZGRoaGlsa2dpZ2lta2tpZ2RlZmhoZ2dqZGZm
-Y2VkY2NkZ2ZoZ2hmZWlra2xraGZoZ2RkZGRlZmhsamhoYmNnZ2dkZ2hnZmdlZWZm
-amtnZGJiZGRkYGRmZGNiXmFlY2ZlZWJjZmRjY2FdYmJhYmFiZWNhYmRkZGFfX11e
-Xl5dXlxcX15fXWJiYWJbXGBhYF1fYWJhYmhkYF5gZGRjY2ViYWBeX2NjZmBgYF5f
-YF5cXFxdX2FeXF5gX11dXmBgX15dX15bXVxeXVxaW1tdX2FfXWFgX2BjX2BfX2Jh
-YGFhYF9cXFxhXV1eXWBgX2JhXl9iYWRkZ2RjYVxbVltdXl9fX15dXmBdXV5eYF1g
-Y2JjYmNgZmRhZWdhY2FjZWJgYWNkYmFiZWRfX1tdYF9hYWJfXl9fYWJkY2BfX2Fi
-YWJjY2NhYmRiYmBgYWBiYmVmZGNjYmFhYmFhYWZiYWFkY2BhYF9cXmBjYWJjY19f
-X2JgX19eX19eXmVoY2ZkYF1gY2NjYmFhZWRlY2ZlZGFhY2RiYWFkY2RlY2RjZmdl
-YWFlZGZmZmVkZWRlY2JgYWNiYGJjY2JjYGJjYWFiZGJiYWBeXF9hYGFhYmFhYWFi
-ZGFgY2VjY2JiZGJgZWZlZWRiY2NiZmRiYV9iY2NjZGRlZWJjZWViYWBjZmdjYWJi
-Y2FkZmZjYmZlaGdnZ2ZnaWZlaWtoamtqamtqa2lpbm5va2lsbW9rampqb25samxq
-bGtvbmpqZmVqa2h0rc7a4+bq6+3u7u/vfX18fXx8e3x5enp8eXp5fHx8e399e3p5
-eoB+f4GAgH17fX5/fnx6dnh6ent6eXh5d3l7fIJ/foB/fH1/e3l9f397fHt+f4B+
-gYKBgIN+f4GAgH9+gYOBfoGBgIGBgX9+gIB/f359eHx/gX+Agn+Bgn99fH5/fnt6
-e318eHd5d3d3dXRycG9wcG9vb3BrbW9xbm1sbnFucXFxdHBwbnR3c3FzdXh4eHlz
-dXR0d3l4dnR2dHh3dnh1dnd7eHp3eHl4fHd3eHd0c3Z4dHV0dnR3d3Z0eHd3ent8
-enl4dnR3d3Z1c3F0dnV3d3R2dnV0dXNzc3Byd3h1dHR4eHp2dHV3d3V0cnFxcW9v
-cXRzcXFzcnN0c3Jzc3Z0cnN5d3Z2dHh1cnZ0c3V2dnRxcXFzcXJ0c3Nxc3BxcXJx
-cHFzc3Zxbmxtb21sbGpsb29xcmxsb3JwcXBta2xtbm9tbG5wbWxrbGtqaW1qaWhr
-aWpsa2tra2tqaGdqbGxqb3FtbmxtbGtubW5sbmhpZ2ZkZ2dlY2NfXmBhYV9eXFxZ
-WlhXWFdXVVdYV1ZXVFlaWlZXWFlWVVdYWVhaWllcXV5bWlpaXWFhY2FgYmFjZWZk
-X2ZmZWZlZ2hnZWhnZWZmZ2lsbGtpaGpnam1xamtqaWxkZmdmZWVoaGdqaGRpaGZo
-bGtoamtnaGpmZ2dpaGxrbmtqa21qamtraGdqbmhmamdnaWloaGdoZmxqaGloZWVk
-YmFkZWlpZmhmZmdlZWdlZWNnaWVpZ2NjZmdpZ2VjZWNiZmZoaGhnaGdmY2dqbWlr
-amtraGdnaWpqamlnZWRpbGZnZWdoZmdoamplZWRnaGhmamxnY2VnaWhpbWhlZWNj
-ZWZmZ2ZnZ2dmamtraWRoZmRlaWhna2ppZmhoaGhiYGJmZGRkY2pkYWNjYWBfXWJh
-ZmZmY2BiYl5gYmJjZmRjYmNlZGNgX2FgXmJkYGFfXWBhX2BgZGVkY2NfYFxeWl1e
-Xl9iY2NjZGNiXmFfYWVjX2BcXF5dXlxhY19gX15bX2BkYl9fXV5hYGBfX15dX2Bd
-XVxeXFxdXmBdXF1cX2FiYmJeXV1fYGFhXl9hYF5eYWNiYV5fYGBfYGBdYmNkYmNi
-YV9jX2JmYl1dYWJiX19eXmNgXV5iYV5fXl5fX19gYmFhY2ZlYWFkZmhjYmJiYGBh
-YWFfXVtdXl5iYF9gY2FgYWFiX2FgX2BfYWJgY2VjZmVlYWFhYmNjYmBeXmNjYl9h
-YmFiYWJhYV9fX2BgXmFfX2FjYmBgYF9gX2BhY2FgXV1cXmBmY2JhYWBfYmRiYWBi
-Y2FiZGNlYmFeYmVmZ2JjYGFjZ2RlYmJgYGNmZGRkZWVmZGNiYmBgYGBhYF9hYGFi
-YmRjY2JgXV9iY2JgYF9hY2FhYmFgYWFiY2JkZGVmZmNiY2JiZWRoZ2VmZWVmZWZo
-ZmRmZGdkY2JjZGRmZ2RlZGRmamlmY2NjYmViZGRlZmpoaWhpamhsbGhmZ2lmamps
-aWdoamxqbGppaWtrbHBvbGlqamxtbG1ubWluampoZ2dmZnmyzdnh5urq7e7w8fCA
-fnt+f3x6eHl8fn18fXt8eXl7eXl3eHd7eXt9fn5+fn58fH18enl4ent7e3x5dnd6
-fn9+fX9+fn+AgYCBfn19goF8eXh6gIKChIKAfnx+gYF+gIN/fX+CgH+BgIB/gH9/
-f39+f31+fYF/f3+AfH1+gHx6f3x9f357fHx4enh3d3V0c3N0c3NydG5vbm1zc29t
-bm5wbnJwc3JwcW9tcHRzdHZ1c3N3eHp2d3Z2eHh3end0dHV3dHd6eXd4eHh4enp6
-eHh4eHh4eXh6d3RwdXh3dnZ4d3Z4eHl6eXd3dHd4d3VzdnZ0dHN3dnZ2d3V0dXR0
-c3Vzc3Nzd3l2d3R2d3RwcHJzcnJ2dHBvcHNzcnNxcnR1dXZ0dXJ1dnZ2dXd2dXd4
-dXR0cnR3dHF0dXBzdXR1dXZ2dXJxcHFwcHNxcXBvb29tbm1sbW9wcW9xb2pobG5t
-bWxtbG5sbnJwbm5ra2hraW1ubGpraWlqaWlpbGxrbGppZ2hra2ttbmprbG5tcHFw
-b21rbG1oamdoZmZnZ2deYWNgXllXWFlZWVRWVVZZV1ZUVVRUVFdXVldXV1dWVlZX
-WVtcWlxcXFxeYl5eX2NkYWBfY2NjYmVoZGNmZ2dlZWRkaGVmaGdoaWtnaGtpa2pp
-amxpZWdnaGZlZ2trZmZlZ2hnYmJlamdkZWdqaWZmZmZpamdnaGlqbGlramdmaGtr
-aGlqa2dnaWZlZmZlamdnamlqamZkZGNlY2NhY2VoZWRjY2VkZGJlY2NlZ2hnaGhn
-Z2RiZGZlY2RiZWVlZ2dmZWlsaGNpaGdlZmhpZ2lnamlnZmRkY2hnZGJiZGNoaWho
-aG1naGNjZmltbGhmZmRjZ2ZlZGhpaGZnaGlqbGpnaGpnaWtvb2dpZmVmaWhlZmJj
-ZGVoZ2VkYmNmZ2VnZmFhYGBhYWFjYWJiZWRjYmFgY2FhZGRkZWJgYWJmZWNgYmBf
-X19hYmVkZGBgXmNiYmFdXWBhZF1dXmJhYWBhYmNkYl9gXV5kZGRiXF5hYV5fX2Jj
-YmFhX2RhY2lgXWJfXlxfYV9hYF5eXFxcXV1gX15iX15bXF5fYGFiYV1bXF1dX15c
-WltdXWBeX2JhX2RhYmFhYGRiY2RiYWBiYmFkZ2ZiXVteYGBhX1tdXV9eXWBkYGFd
-Xl1gZWRgX2BjZWNkZWNhY2ZiYV1eXWBiYF9jYl1bXmBjYV9gYmFfXl5gXmBfXmFi
-YGBjZWJlZmRiXWBhY2FhYGJiYWBgXmBhX2BfX2FeX15gYV9gYmJhYGFiYF9eYmBe
-X19gYmBgYWBhYGJhZGNjYF9gY2NlZGJkYmJjYmJjYWJiZGNiYmFiYmJlYWFjYWBj
-YmNhYWFiZWRhXmBhYmFgYmJgX19dYWFiYWRnZGFjYl9gYmNjZGNiYmNjYmJlYWJi
-Y2JjZWFlZWVlYmNkY2VjZGNlZmZjZGRjZGllZmNkY2dpZmZlZmVlZWNmaGRjZWRi
-YmZlZmZnaGlrbGtqaWdoaGZpaWVlaGtsamtqa2lqbGxqbG5vbm1tbm1tamxvbmto
-aGZpZmVnZmZpfrLN1+Dl6Ors7u/v8H17eXt+fXx6eXp7enx8e3t8fnx+e3h2d3l4
-eXl7enp+f318fn97eH19ent3eXd8ent6foCBgH98fH9/gH+AfoGAgH18e3p/goCA
-f397e35/f39+foF/fn5+goCCgICBfn19fX9/gICAfHx/fnx8fH57f39+fHx9fn58
-f3x5eXl1d3Z3dnNzc3Jyc25wcXBvcHJ0b29tcG1wbXF0cXRzc3R1dHRwb3R0dXd1
-dnV2eHp4d3Z3dXR2d3l4dnh4fHp4eXl3eXl4enh3eHl7d3Z4eHt5enZ1eHl5eHh4
-d3Z6dnd3eHZ3cnN0dnR3dXV0dHJwc3V0cXFzc3V1dHV0dHR1cnV1cnRzc3Fzdnd2
-dHVyb3Nzd3Z2d3V0dHF1dXZ1dHR1eHZ0dHJ0cnV0d3RycnJxcXN0cXN1dXJxb3Bu
-cG9wdXRva2poa2xrbXBxbG1ubmppa2xva2pnaWtrbW5oa2trc3Nqa21pZ2loaGdo
-aWtoa21ta2psa2hpa2hlZmZqa21vbW1vcXBsbWlramtpZmNlZV9eYWRfXFpXWllY
-VlNTU1dYVlVTVVVUVldXWFRWVlVUVVhZWFdaX1xcXF1dXF1cYF9hYmBiYmNjZGZj
-Y2dpaWhmZmZpZmhoaGRiZWlnaWppamdnaWhoZmVkZmZmaWdmZGZlZWVmZWViZGln
-ZWhraWloaGhraWtsa2tta2lrampraWZsamlraWdnZ2dkZ2hoaWpraWpoaWhmZ2dl
-Y2NlYWFiY2RjZGhqaGdnYWFmaGhqZWZjX2pkZ2hlY2NmZmVmZ2doaWhnZmRnZ2Zn
-Z2dnZ2hoZ2VpaWdnaWhlZWRiZGZmamdoa2hoZGNlZGdqa2xlY2NmZmZmZ2ppaWdq
-aWlraWplZGZpbGtramhqaWZoaGhlYmNkZmRmZ2VmZ2VjZmZkY2JgYGRlZGVlY2Fe
-X15fYGNhY2RiYWNlYmBgXWNiYGFjYWBhYV9hZGVhYmBgXV5iX19fXmNkYmNgYGNh
-YmBhYmJjYV1eXl9gYV9eXmBhYGFjYWFhYWJlY2RfX11gYGVjZGNhYV9hYWFeW1xc
-XV5gYV9cW1pdXF1fX15dXV9dXl1cXFxbXV9iYWJhYWJiYF1fX19hYWNlYmRjZGRi
-YWRgXF5fX11fX15dXV1cXV9fYGRkYmBhX2NiY2FgYGNhYWNjYWJhYmRkY19hYWFg
-ZGJiX19eYF9fYF5gY19gYWZiYWBeYGBhY2BiYmJiZGRhX2JiYWJiYmFgYF1dXmBe
-YGNhZWReW15fYWJkYV9fYGFhYV5dX2RjY2JjYWBiY2JiYWFjZGBfX2BgYmVmZGBj
-Y2BiYWRmY2FiYGFjZWRlY2JhX2FiYGBfYWJjYmJhYmNlYWRjYmJgYmNeXl5eXmBj
-ZGNkZ2NiY2FiYWVkYWNlZWRnZWBgYGJiZGZjZGdlYmhmYmJlZGFiY2RkZWViYmRh
-ZGRlZmVnZ2dnZmRjZWNiY2FjY2dlYWVmZGNmaWhoamhoZ2poampsbGhoZ2hqam1w
-bWpra2trbWtna21tbG5ubGxtamttbW5pZmZmZ2ttbW56s8zX4OTo6+vt7u/wfn17
-eXx8f3+BfXh5e3h6eHl5eHx5dHZ4eXd6enp6en2AgXx6fIF+enp8fHl5eXp6e3t8
-fH6BgH98fH1+fXx+fX19enx9e398fXx+gHyAfYCBgoGEg4J/f3x+f3+Bf36Bf31+
-f4F9foGCfn19gH97e3t/gn58fX59eXx6eHh6d3V2d3R0dHFvcXFxcm9tbXFybm5x
-cm9xcnFxbm51d3Z1cnR1dXV5dXd4d3R0dXZ5e3t5eHp7eHh3eHd2eXh6e3p5eHd6
-eHh3d3d3eHx5dHZ0cnV2dnd4d3h2dXh3d3VzdnN2dnZ1c3BydnV1cXJzc29yc3Z2
-dHV1dXVycXN2eHZ0dHZ0eHp1c3N1dnh2dXR4dXVzdXZ2c3N1dHNzcW5tcXBxcXRz
-c3NycXR2cnBycXBxcHBvcnJycXFta25tb29sa21taWtrbWpoamprbWlqbmtrbGlp
-amxsbWtra2pmamtsa2ZnZmZoa25sZ2dpa2trbm9tbWpnaG1ramhrcG1ra2hub2tp
-bWxqamxqZmZmZ2JjZ2JhYFxcWFpXV1hYU1dWV1dWVVVUVlZSWFhXVldUVVdVWlpZ
-VFRYWltZW11eXl5eXl9hYF9fZGJjZGRnZGhoZGZoZ2ZoZ2dpZ2dnamlnZWVna2tn
-aGZkYl9hZmdmZ2ZoZWZkZWVnaGlkZmppZ2VlaGloaGhqamxpZ2tra2pram1tdWtq
-Z2hraGpoZWZmZmVmZmhoZWNkY2VnaGZmZmVjZWVlZWRmZmZlZmVjYmRmaGZlZGVp
-Z2hoZ2dlZWdkZWVnaGhlZGNmZ2hmZmhpZmVlZGZlZGlqaGZlZGVnY2NjZmhoaWho
-aGlqZ2VlZ2VlZ2VlZWdlamxpZWloa2lpaWhqaGdnZWhuamlnaGlqamhlZ2VnZmVl
-ZGZnZmVlZmNkY2VnY2NjYmVmZWVhY2JfX15hYmBiYmNjYWJiYWBfXmFjYWFkY2Bf
-X11eYGBhYl5dY2BlYF5iX19gYGJkZGBgYWFgX2BiX2FhX19fX2BfZGNiYmFiYWBi
-YmViYWJiYl9gYGBiX19eXl1eXVxeXF1fXl9dW19gYGBeXl1fXl9cW11dXVxbXF9g
-YWFiYGBeYGFfX19hYGFhYWJkY2NgYWBWWV1gYmNfXFtcW2BgX15fX2FfX2FjYVxc
-XV5eXWFhYmBiYWFhXl9jX2FjYl9eXV1hYGFkYmBiZGViX2FhYF9fYGBiYl5dXl9g
-YWNiX2BhYmFhYGFgYGBgZGVlYGBgYmJjZGBgY2FhYF1hZGNjYmBgYWBhYWJiY2Nj
-YmNiYl9gXmBmZV9hY2JjYWBhYWNiY2FiYGFmZGRjZWRiYWJhYmJfYWBgX2NhYGJi
-Y2ZlYmNjY2RkYmJkYmVhYV5cXmFfXmFgYWBjZWNiZGFgY2RkYmNlY2RoY2JhYWNj
-ZGJfZGFfa2tjZWRkZWViYF5eYGNiYmZjZWdmZWZlZGJkZWVoaGViY2FkZ2hsZ2Zn
-ZmdlZ2hqamhnaGhnaWpoZmtqaGhrbG1ta21uaWhqa2lpa25ubW5uamtraGdsa2hn
-aGhoaGdma3u3zdjg5Ojq6+3t7+6Ae3h5fH5/fX19fnx7e399f3t/gHt+e3l2d3V5
-eXh7e3t7fICAg318fHl5fXt7eXx7fHx/fn6Afn19fH1/fX18fH2AfH58enp+fXx8
-foB/fn1+gYGBgYCBgn+AgH9+fn5/gX1/fHx/fX+BfXt5end4fIB/fHt8d3p7eXp6
-eXl2d3h3c3Ryc3Rwb3Btam5tbW9vcnFxcHBwbnNycXFycnZ7d3Z2dXl4d3h3d3Z0
-dnd5eHl3eXd4eHl4dnh5e3x6dnd4eXp4enl3eHh4eXt6enx3dHN1dXd3d3Z3d3Z3
-dnN2eHh4dXRzdnV2dXRyc3BwcXFxd3h3dHJycXF0dHd2dHV0c3d4end1eHNzdXNy
-dnZ4dnZ0dXJwcnZ3eHBxcW5yc3J0cnN2dHR0dXN0b21ubnJvcHNwcnVycXFybm1t
-bG9sbnFtbGprbGxpampoaWhlaGpxbmpqa2hra2xrZ2ZmaGtpamhnaGprbGtsbGtu
-bm9vbm5qbW1vbG1tcHFvcW9sbGpraWpoaWxra2VlZ2hmYWJhXl9dYl9aV1hZVlVX
-WFhUUlNUUlhWU1JWVlhXVldXV1haWFdaWVZWWVpbXF5gX2FhYFxhX2JiZGVlaGdj
-Y2JjZmRkZGdnZ2hqa2ZlZmhpaWlqaGtqaWhlZWZqaGpsamloZmhnaWloZ2dmZ2xm
-aGpnaGlua2tsbGppamppbGtraWxsampoaWlqa2hkZGdoZ2ZnaGlmaGZoZGNlY2Rh
-ZGdnaWZkZ2dkY2VlZ2loZGZnZ2JkZmdoaWdnamhoZ2dnaGZpZmZnZ2hnZWZpZWdo
-ZGZnZWRlZWZoaWhlY2RnaGZpamhlZWhpaGpoamppaWhlaWxrZmVlampuaGtoY2Rn
-amppa2pqa2pubGppamppZWVkZGVlZWRnaWZoZ2doaGZlZWRmZGNiYmNkYl9fX2Bj
-ZmRgYWJiYmJfXWBkYmNiYWNgYGBhX19fX19eXmFhZGdiYmRgYV1cXl1bX2NgYGBh
-YWFgYGJnYWRgX11fYmNiYmJeXV5iYWJiXV1iZGFfYGFgYmNgYGFdXFtcXFtbXl5c
-XF5fYGRjYl5dX15fYl5dYWJgYGBiZGJiXmFhXV1fYWRkYF1dYmNhY2JiYGJhYF9g
-YGVfXl9hW2NjXl5fYmJiY2VgYF5cX19eXWJhYGFiYWFhYmFhYV9iX19cXV1dX19h
-YGFfYl9gYWFfYF5bYmJfY2VhX2FgXmFhYF9hYmRlYmNhY2NkZ2RlYWBgX19iX19g
-YGBgY2VhYF9hZWJlZGNiYmNiYWRiZGJkY2FjYmBgYmFgY2BjZGRmZWJiYWJjYmBg
-YGNkY2VjYWFkZGZlZWRlZmdhYV5fYmJgXmNkZWVjZGZiYmhjY2VhX11gX2JiYV9f
-YWJgYGBiZWJhY2FhYGNiZGRgYWFiYmJkaGdjYl9jZmJjYWJjZmFgYF9iZmJlZGdn
-ZWVlY2JhYWNlZmRmY2NhY2BkZ2ppZWVkZGVpamtpZGdnZ2hoZ2pra2prZWZpbW1s
-bWtoZ2lrbWtsa2pqbG5taWprbWpqamprZmZlZGJsgLjN1+Dk6Ors7e7v8HZ5eHl9
-fn6CfHd3d3l7fHx9fXp7en18eX18fHd3enp5e319foF+gH98enp3enx8fHx8fn99
-fYCBgIKAfn1+fn18fXx6eXt/fn18e32AgIB9e3t9f4B+f4CAgoF/f3x9fnqAg4B/
-f3x9f357e3p5eXd7eXt8e3t9e35+enp3c3J0dXN2eHdycG9ubm5ucnFvbnFycG5v
-bnFwcXVzcHJ0c3h5dnV1dnh2dnV7eHh4d3l2d3h4d3l3eHZ3eXl3d3p4eXl4d3d3
-dnR0dXZ3eHd1c3RzdnZ4eHh3dnV3dXl2dXR1eHl2dHV0dnV1c3V1cnNvb3JzdHh4
-dHZzdXV1c3R2d3R0dHR3e3d3dnN0dnd1dnV0dHR3dnJycnN2c3NvcXF1cHF0dHJy
-cnV0dHBwdnRydHJ0cnRycG9vcnBvb3Fxb3FxbmxqamtvbWxraWhnZWxsaGdsampr
-bWdpamtqa2xqamlqaG1ubGlubWxxcXBsa25sbG1tcXBzb3FxcnRvbm1ubWtraWpr
-Z2hmaGVnZGFmYGFhX15cXFhcW1hWV1ZVU1RVU1JYVVVaVFNWWFdaW1xcXFhXWVhb
-WFlYWVpbXV1eXl9iY2NmYmBiYmJmZmJiX2JhY2BkZWZkZGVlZGJjZmdqa2dmZmVm
-Z2dkZWdsa2hnaWlrbGlqaWhpaGZnZmdnZ2dlaGpsbGpta2poZmprbm9sa2lraWpp
-amloamhpaGhnZmdkZWZpZ15fYmdnZGRjZWhnZmJgZmlkY2RnZ2hoZWRqZWNjZGVl
-ZGZlZmhkZGZlZmRkZGZlZ2lmZWVlY2Rma2lkZWpsZ2FmZGRjZ2lraGppZmRjZWNk
-Z25sa2toZ2Zpa2ppa2pramhoZ2dpZGRnaGdpbGpqa2xqamhpamhnaGRjZmhkY2dm
-aGhnZ2diY2ZjYmNiYmRlYWJgYGBfYWNhX2FhYGNiYWNiYWFiZWNeX2BdXlxeYWJj
-YF9hYV9hYmFiYGRjYmFfX19gXWBiYmBiYGJhYGJiYF9eYF5eYWJgX19gYWJiYV9e
-XmFfYWJeXmFhYGBgYF5dXVpbXF5eXVpcXV5dX2BgX15bXV1fXF1hYWJiZWNjYmBf
-YmBhYGBgYWJfYF9kYWBdYWVjYF9eYWFiY2hhY2FeX2JiYGFlY2FjYWFeXmViYmFg
-YWBjYWNjZmNgYWBfYGFiYWJhY2JfYGVlX2BhY2JgX2BfX2FdXmBfX19gX2BiYWBg
-YmFfXmNjY2JfYF5kY2FiYl5fY2NjYl5cYWNlY2VlZGBgYGBhY2JfYGNlYWRkY2Fe
-W2NlZGNiYGBhZWZmZGNhYmFgYmJhZGRkYmFgYWFfYF9iY2JmaGZlZGRkZGNiY2Jf
-Xl9iY2ZjY2JhYmNkYGFiYF9hYmNhYl9gYFxeYV9gY2JjYmJiYmVlYWJkZGRjZWdk
-Z2VmZ2ZmaGVlZWJjZGZkYGJiYGJiZWRjZGNiZWVlZ2VlYWNjYWNiZmtpZmhlZGNk
-aGhnamhmZ2ZnaWlqbGtpam9xamtqaGpra2lqa25vb2xqaGtramtramlpZ2hqaGZl
-ZWVjZmqBuM/Y4OTn6uvt7e7ue3t5eHx7e3t8dXd4dnd6fnx9fXx6eHx9enp7end2
-dXh6eXt9fn1+fH18ent7fX19fnt8e32AfoSAfoB/fn1/enmAfXx8fH59fXx8fX9/
-gHt+fn18f35/gH9+fXt+fH1+fH19fHt8fHt8fnx7e35/fXp6ent7enl5fH56d3Z4
-dHV4dnR2c3Vzb29ubnFxcHBwbnBwbnBycXJzc3J3dXJydXV2c3V3d3Vzdnd3eXh4
-d3V4enx7eXh5dnd3eHl4dXh2eHp6eHV2dHRzdnV2dXNzdHZ5dXV1dXR4dXR1dnV1
-c3Jzc3V1dnV2d3VzdnNzdXh2dnNxc3R5dXVycnRzc3R3c3N1dHV3eHh3dnl4eHV0
-cnR1c3Z1dHNzdXRycHF0cnRzdHBxcXF1dHNwb3N0d3h0dHRzcnJubHBycnNyc3Js
-bW9tbGtqampqZ2dqZ2ppbmtnaWhnaGdnbGtoaWtsa2psaWlqa2lqbGlvbmtvbXBt
-b290cGttcHJwbW1vcG1tb3BwcW1qa2loZWRnZWRnZGNhX1xeXFpaXVZYV1hVVFVR
-U1BQVFhZU1VWU1VVV1haXFlXWFlZV1ZXV1lcW1tcXV5cX2RlZGFfYWNkY2BhZWJh
-YmNkaWZnZmRpZ2lmZmZmaGlmZ2ZnZ2doamtnaGdoZWhmZmdoZmdqZWVlaGlkZGhl
-ZmhnaGVnaGtrbGlpaGpsaWpoaWhoa2pqaWpqaGVkZGZlZWZkZmlkZ2RkZmlrampn
-ZGZhXmNjZWNkZGRjZGVlZWRjZmlnZ2RnaGhmZWZlZWRkZWZmZmVnZ2ZoaGpkZGZo
-Z2doZ2RmZ2RlZ2ZqaGdoZ2hoaGhmaGdpaWhnZ2hoZ2ZoZmZoaGhoZ2ZnaWloZWZm
-aGhpaGdoZmZmaGdmaGZkaGRjZ2djYmhlYWJfX2FjZGNjY2RnZWRkZGFhY2NiZGVh
-X19fXmFhYmJiXV1eYWJfX15dXWBcXV9fYF9fYF5gY2JhXmFhX15gYl5eYGJlZGNi
-YF9gXV9cX15hYmRjYWBjX2BgYGNkYl5fYmRkYmBcXF1hYWJhYF5dXV5eX11cYFta
-XF1aXV1gYF5eW11eXVxdYGFhXl9iYmBeXV5fX2BhYGFfYWBdXWBeYmRgXmBhYWFh
-YmFdX2NgX19eYmFkZWVjYVxeXV1iYGBeYWFgY2JiYWRjYWFiYmFgYmRfX2BhYWFg
-Yl9hYl9gYF5cYGBfXl1bXmBhY2JhYGFiYGBgXV9jZWBdWlpbXF5hZWVhY2NgYmFi
-ZGdkYmVlYmNhX19jY19fYF9iZGFhX2FgYGNjYmNkYWNiYmNkYmNhYmFhZWRnZGFl
-Z2ZiYmJiY2JhYWNlY2JlYmJmZ2ViZWRiYmFhYWRgYWNkYmFgYmJiY2BkY2VkYmBc
-Xl1fX2FiY2FgYF5dX2FiZGJiYmJiZGRlZmJlZmRiY2VlYmZoZWFhX19iZGNjY2xn
-ZGloZ2VkZWJkYmRmZ2ppaGhpZWZiZGdmZ2dnaWxpZ2hqa2tmaGZnamtpaG1qam5s
-a3Fvbmpsa25wbGpraGlraGlpbm1taGdjYmBlaH+2ztjf4+fp6+zt7/F9fH97enh6
-enh4dnR4ent6eXx8fIB9fHx7fn59fHt6d3Z4en5/fHp6d3p8fX98e3x8e3t9fn+B
-f36Afn17fHt7fH5+e32Bf3x4e4CBfn9+f4B/f3x9f4F/gIB/gH1+fH59fn99fn17
-fXp7fn5+fHp5eXp6e3p9fXp5enp6enh7eHd1bnBzcnBub3F1dnRxcG1tcnR0cXNw
-cHNxdXZ5d3R0c3d1dXV3eHh4dnd3eHl4eHd3d3h6eHh2eHh3eXd1d3l6enh7enZ2
-d3V0dnZ0c3R2end4dXR1eHV1dHJ0dXV0dHV4dnd4eXdxcHN0eHV0eHh1dXJzdHZ1
-dXd2cHRydXV1c3JycnR0eHp5dnV2dXJzcnNwcHR0d3R1cnZycXBzdXR1cHFzcXJy
-cXBvcHBzdXh3dHJ0c3FycXFwcXFycnBubGpqbG5sbGppaG1samllZGhqampqbGdn
-amdna2ttZGZqbG1pZ2Zra21ramxsam1ta2tubG1ub3Bsamxta2ttbmttbG5wa2to
-aGhmYmdmZmVjX1xeXldaX1taWFhWVFJSUlJTVFRXVVZWVFRUVlhWVlRWV1hWV1dc
-XVtaW1pdXF1dX2FfYmFgX2NiZWFjYGRoZ2hqbWtpZ2RlY2RnZWVnZWdqaGZnZ2dm
-aWlmZ2lqaGlkZGdnaGdmZ2doZWhraGtqamdnZmdqaWlsbWxsZ2poaWtsa2xtamtp
-Z2lmZWZmZGRmZmVkaWplZ2dkZmlmaGdkZGVnYmdmZGRjYWVoZmFhZGVkZ2hnZmZk
-aGpmZ2hoZmhnZ2ZqaWdmZGRkZWZnZ2loZ2hkY2dmZGRna2dqaGdlZWdqbGtrZ2ho
-aWhpZ2draWdoaWtqZ2hpbmtpZmZoaWZoaWZnZmVmaWpnZGVnaGhkY2RkZGNiZGRh
-YmdpYWRkZmZmZWdlZmZiZWNiZGNhYl5fYWRhYF9fYGJfXV1gX15gYmBeXFxfXmFe
-YGBgYl5dXmRkYV9eXl5dXWFhYF5iY2JgYmBfXV9eYGBiYmBjY2BhYGBjY2JiYWFe
-YWBiYl9gXGBeX2BfYGBhYV9fX2JfX19eX1tcYF1fXltcX11bXltbX2BfYF9fYWBg
-X19eYWBcXmNgYGBkZWBhYGJiYGFjY2JiY2JfX19gZGBgYF9dXl9dXl1cX2BiX2Bd
-Xl1fYGBgX2RmZGNfYWBdW19gX15hYV9gX15gYGBhYV9fXl9cX2JgXV5hYGBgX19f
-YWJjYF9iY19eXVpcW19kYmJgYGJiYGFiY2NjYmFjYGBfX2BkYl9cXV5gYWFgYGVh
-YWNiZGNiYmBfYGFgYmJjYmNjZWNiZGVlZmViYGNhX2BhYmRjZ2RhY2JjZWdjZGJi
-YWJeX15hYWFiY2NjYGRhYWBhYmRiYWJkYmFhYWNkY2JgXl1bXmFgYWJgY2RjZGNl
-ZGRhYWJiZmhmYmNjZ2VmaGdmY2BkZGZlZGNjaGFkaWdoZ2dramZjZ2llZWZiZmVl
-ZWZoaGpnampoZ2dmZ2hoaGtqa21sam5vbGtta2ppaW5saGZmZmdoaWdoZ2psbGpp
-aGRngrbM197i5unr7Ozu7n5+e3x+eXl7eHp5e3t5ent8enuAfXt8fYB/f4CAe3p3
-fHx6eXt6eHt8eXx+fn57e358e318gYSBgIB/gYKAf3x9gYOIhoJ+gIN+gn18fn9/
-gIKBgn6AgX9+f4CAgoF+fn59fn99f357eHp8enx8enx9e3x7d3Z5e3d3d3h4dnd1
-dnZ5c3Nxcm5yc3BvcG5tb3FucXNxcHF0dXR3c3V7dXV3gHl4dHNzdnl8eHh6enV5
-eXh0eHt4eHl6end2dnd3eHh2eHd6eHd4c3Z3d3Z2d3p2d3Vydnh3d3J1dnN0dXV2
-dnZ5end3dnN0dHd3d3l6eHd1dXV3d3l1cnN3dnZ2dXZ4dnV1d3N0dXZ3dXN2dnd1
-dXVzdXR2dnJycnNydHNwdHRxcXFyc3BvdXNwc3NzcnFycnV0cnJzcnJybm1ubWxr
-aG1ucG5sa21ua21rbWxrbGdoaWtqaGZoaGpqbGlqamlqamxubmxmaWtsaGprbGtt
-bGpta21oaWxsbG9sa3BubWprbG5tbGtpaWlpZWVkY2RgXF5bXF9bWlhUV1pXWFVW
-WFZUVVVWWFZXVlRVV1lWWFhYWlpYV1lbXVpcW11fX15bX2RfXmJkY2RkYV9mZmln
-aWtraWhnaWVnZ2JmZmdoZ2dpZ2dnaWZoZ2ZmZmdnaGltaGhoZ2VlY2VramZoamxq
-aWVkZmhpaWpmaW5vbGloZmxtbWtqa2dmZ2hlZ2hmZmNiY2dkZWhlZ2RlZGVnZmZh
-ZGZnZWRkYmJjYmNkZmNiYmVkZmZnZmRnamhmZ2lnZWNgX2RoZmloZ2dnZmZoaWhn
-ZWdmZmZoaWpnaGhoZWVlZWdnZ2doZmVmZWRlZ2doaWVoaGhnaWtqaGdoZ2dmaGdp
-aWVoaWdlaWhrZ2ZnZWlkaGpkYmZoaGdlZWVkZGJiZGVkZWRjYWRiYmZiY2BiZWZi
-Xl9fXV1cXV9fY2BeX19eYF1fYmBgYF5eX11hYF1gYWBgXWNjXlxcX2BfYWBgYWFh
-YWJlYF9eXl9eYmBjY2NlZWBhYWJjZ2piZWNlZmRlYmFfYGFhXl5hYWFeXmJfYF5b
-W1tcXV5eYF5eXl5eXF5eXmJhYGFhYmNfX2FfYGNjXGBgX2BjYmFcXl9gYWVkZ2Zl
-ZWNgYF1eX1xdYF5dW11fXV5eX15gY19eX19hX2JgY2ZkYGJiYWJjYGBhY19fYF1e
-Xl5hYF9dXV1eX19gX2NmZGJhYmFeYGFiY2RkYF5hZGNhYlxdYV5hYWBgZGRjYGBf
-X2FkYF1eXmNjYWRnYmJhYV5fYGBgY2NhY2JgYWNiYV5fYGBhYWBhX2FiYWFfYWRk
-ZGRmY2BfXV1gX2FgYGJjZWVjYmZkY2BjYF9fYWBeYmRkZGRjYmBiYmFgYmNiZGNg
-YGBiXmJkY2RhYGJiYGNhX2BhYF9fYmVjYmBkYmJhZGZlYWRkaGZnZmViYmNjZWdi
-X2VnaGZmaWloZmhoZWZkZGVmZmRiZGZnZGZnZmdmZ2dmZ2dqaWdoZ2hoamtpbGxt
-amxubGpraGlsamtnZ2xta2tqaWpsam5vaGV+s8vW3+Pn6ers7e3ven2Bfnl5eXp4
-fHx+fHt6e3l4enh5fX2AfoF+fH19fX1/gH18fH57f4B9eXt6e317fXp9fH17fIF/
-fX1/f4GBfn6ChoSDgoGCgH98gYB9gYCCgYCBg4KAf35+fYGCgIB8fn59enp+fXp6
-eHx9fXt8fH6Afnt3d3l6eHZ3dnd2cnRydXp3cXFxbnFvcHBvb3JtcHFxcXNxb25x
-c3Z2dHR0dXl5eXh3dHJ0dXd2dnZ2eHV0dnV0dnZ2eHh5end2d3h2dHh4eXp5eHR3
-dHR4eXZ4dnl1c3N0eHV0d3Z4dnZ2dnV2eHl4eXd1cnR3d3h3dnV3d3Z2dHZ2dHZ0
-dHV1dXZzdXN0d3ZycXR1dnd2c3J1dXV2dnd3dXZzcnNycXFycnBzdnRwcnV0dXV0
-dHZ1dXNzdHVzcXFwcW9vcXJubG5ua2prbW5vbW9taWpsamlsbWtsamhmZ2NnZmho
-amtqamxuaWxrampra2hrcGlmZ2hpZ2xqbG5tbm5sbG9ybm5sbm1tbGxtbm5ubWtq
-Z2VmZmVjYGJiYF9hW1tZWVldWllXWVVYWFdWVFZWWlpWVlZYWVdZV1hYWVZWWlZb
-XF1eYV1hXmBfYGBgYWZnZWZkYmloaWlnamdmaWlpaGVlZGRkZmhpbGlpZ2pqbmho
-aGZoaWZpamlpaWhkZmZlY2dlaWhqaWZlZ25pZ2hpaGdlamtrampqamppZ2doamhn
-ZmRjZmVkZ2hlYmBiaGlnZ2ZlZWdmZWNnamdnZWNnY2RkZWNgZWhoZWZpaGdnZWRj
-ZGdpaGZoaGZnbGhqaGhjZmdoZWdnZ2ZnaWRkaGdoZ2ZmaWdpamZlYmRlaGhoZmdl
-ZWVpZ2doaGVpaWZpaWxpaGpmaGpoZmpqZ2VkZWVnZ2hoZWdqaGZmZWlpZWVjYmNh
-YmJhYGFmZmNhYGNhY2RhYWJjZWdkYWRmY19fW15gYGFjYl9cX19jZWZgX2NiYWBh
-Yl5cXmFfX2BnYmNiYF5cXl9hXl1gZGRjYmBgYWJiXV1eYmFjZGRjYWJmY2BkZWVk
-ZGNiZWFgYF9gYF5aXF9gXl5dX15cXFpdXF1eXl5fYFxfX2JjX15cXl9dYGBgX15h
-YF9gYmNjYGBiYWFhY2JgX19hYmVlYWFiZl9fXl5fYmNjYl5dXlpdYmFiYGBkYmJh
-YWVhYGFfYWFfXmNiYWJlYWNeXWBgW1tdX19hYWJiXmBiYWBfXmBkX2BgYmJiY2Fk
-ZGJfYGFiZWNiYV9hXWBiZGNiY2FeX2BhYWFjY2ZiX2BhYWJkYmFjYV5fX2NgYF5f
-YGFiYGFhY2JgYGBgYF9iYmNkY2RiYWJfYWNjZGJfYWFiZWRgY2ViYGBlY2NjY2Fh
-YmNkYF5iYGNkY2RlZWBfYWNiZGVlamRkY15eXVxeY2VlZWBgYmZjYGBkYGFhY2Ji
-YmJjYmJhYWNmaGRlZWNlZGJjZWRkZGVjY2RjZGRmaWloaWdnZGJkZmhoZmZjZWZp
-aWpqZmhlZGdraWhmZmZsbGpsamtpb21tbW1tbGllZWdnZmlqZ2traGlmZmhpaG5t
-bnq0y9jf4+fp6uzt7u57e4OCeXl6eXt8enp7e3h7eXd1eHl9f4CAgH19enl4fXx6
-eXt+gIF8fHx6enp7fX18fnx4d3l9e3t7fH1/f4CAgH6AgX+CgX+FhH99e3x9fn57
-ent+fn5/f4B+f3x7fHp6fH6Afnx8e3t9gHt7fH1/fnt6enp5enl4d3V3dnV4dXNy
-c3NzcnN1cXBvbnFycnNyc3Rwb25zdXBwcHBwcnB1dXR1eHZ2dnZ6d3R1dnZ1dnZ6
-enh4eXZ4d3d5d3h4e3h5d3p3dXV1dXV1cnV4eXZ2dXd7eHd2dXd4eHV3dnZ3eXh3
-dnd4eHd6eXp7dnd4enh2dXh2c3RzdHR0cHFycHJycnR1d3Vzc3Z3dnR0dHR0dXJz
-dHVzc3NzcHJxc3Jxbm5yc3B0dnl3dHJ4eXRzdHN2dXFwcHByc3NubmxtbGpsa2xr
-ampwbm5raWloa25ubGxraWpqbGlramtsbGxpa2xsa2hpZmVlZ2poamhoamppaWtw
-bm1ra21sbm1tb2xsbW5tbm5ubW5tamhqamhmZWVjYmNfWllZWVlYWFlUV1VWV1ZV
-VVNXVVRXWFZTUlJXV1RVVldXWlpXV1ldXVxeXV5fW19dW15hY2FhY2VmaGdnZ2ds
-Z2RjYmZqaWhqZ2hramhqZ2ZnaWxsaGRlaGZoa2xqaWpqaWNiZmRjZ2VmZmlpZWhr
-aWppaWhqaWhqZ2ptbGtqamlnZ2doZ2dqaGZkZGZpaWdlZGdnZWRkY2VjYmNka2ll
-YmJiYmloY2JiZGJkZmVjZGZlZGZnZ2RnZ2RkZ2dpaGloaGVnaWZnaWZpaWloamlr
-bWdkZmZoaGlnZmloaWhlY2VmY2VoZWdpZ2ZmZ2llZWdoamhnaGdoaWxqamZpaGpn
-ZmdmYmlnaGllaGhnZWVmaGlkYmFhZGRkZGNkZ2NmZGJiZGZiYmBgZGViYmNjYmhl
-ZGFgXmFmY2JhYV5gX2NjYl9hYGJjX2BhYWJfXlxeX2FfX2JjX15fYV9eX2FhYmJe
-X11dYF9fYmFgX2FhZGRlZGhlYmBfY2NgX2FgX2BhYF1aWVpaXV9eW1xdYFxZXV5i
-XltcWl1gYF9iYWJhYFtcXlxfYGNiYF9gYV9fYmZkYF9hYWFiZWZmYV5gYmNgX2Fs
-XmBcX11eYFxdXl1gYmJiX2BiX2JjZWRhYWNiY2JhYGViX2FgX2JkY2FgXl5hXV1c
-YF5fX19gX19dXl9gX11fYmRiY2FjYGBiYGFhXmFxYmFhYmFeX15iY2BgYmBgYmBg
-XmBiZGNkYF5fYmVkYmFhYWFjYmFgX15hYWNlZGFhYWJhYGBlYl5cX2FjYWNhYGFg
-Y2RjYmFgYV5gZGVnZGRlZGNhYWNlZWRkZ2NgYGFiZWFhZmZjYmVgX2JhZWZmY2Vj
-ZmJfYGBeX2JiYWBgZWViZWVlZGBhYWRkYWFjZGJiYmRjYmNlZmJkYmJhY2JiY2Rj
-YmVmaGdnaGhoZ2VkZmdlZmZnaGZlZmpqZ2ZnZ2VmY2RpaWZlZ2hqbG1ubW9ycGpq
-bWtramhmaGhma2psZmhsZ2hnaWtraWppe7jN2N/k5+nq6+zu7oB9fXuAeXh6en5/
-fHx5dXVzc3Z3eXp7fHl5ent5eHl9fXx8e319fn5/fX16e31+fXx8e3x8eXh7fHx8
-fICAfX19fH5+fH+CgYF+g4B8fHx+fnp6fX18fH58fXx8eXh5e3p6e35/eHyAf399
-fnx9fn16enh8enl5end2dHR2d3h2dHd4dXNzc3V0bWtydHJxcnJxcW9wcHFxbm9x
-c3Bxc3V1c3R1dnl4enp4dXR3eHx8eHh7eHh5eXh3dnh3d3d3dnh5eHZ3dnl3c3R1
-dXd5dHV2eX9+gXp2dXN2eHh2d3d3dXZ0dXl1dHh4eXp6eHp6eXdzdnd2dXR0dXJw
-cnV2dXNzcnV1dHNzc3V0c3d4dXRycnJzcnN0c3NvcHBycnFvcHJ1cnV1eHd2d3V4
-d3R1dXR0dHNzcnRtbWxsamtrbGtsbWdpa2xsbnBvbWxrbm5ubm1vbmdmZ2dqbWtt
-amhtbmprampqZ2ZmZGVoamhqaWpqbm9uaWtobGxsa25vb21sbW9ua2tsa2ppaGhn
-Z2NjYmNhXl1dWFteXVtZVldWWFhbWVhZWFVUUlRXU1JSU1FVWldaWFlZWVxdXVxd
-Xl1dXl5gYGJiYWNhXmBfYmRlZWpoZGBiZGdnaGpqaWlmZ2dmZWZlY2VmaGpsZ2Vn
-ZmZoaWtrbGZnbGtoamZlZmtraWtrbWtna2tqbW1uam5udWhsaWtpbHJsaWhmaGtp
-Z2VmZmdoZ2lmZGZmY2ZmYWRlZGVnZGVjY2RoZmVnZ2VlaGZnZmZkY2JnZ2Nfa2ho
-aGloZ2dkaGdoZ2hoaWtpZ2dra2pnaWdobmtmZGRnZ2NlZmdrZ2dmZ2ZqZmVlaGZn
-Z2lmZGZnZ2ZmZmZmZmdpamhmaWppaGlpamlnZ2ppaGdoa2dnZmdlZWNiY2NlY2Fk
-ZWNiY2RlZGFiY2RkZWFiX2JhX2FjYGBgX2FjZGRkYGBkZGBhX2FkZWJfYGBjY2Jh
-Y19eXFxeXmJhYWFgXl1gX2BiYGFfX15dX15fX15fY2JkY2FhYWNiZGRiX2BgX2Ji
-YF9eXV5dXlpcXmBeXGBfX15cXVxdXV9bWVxhXV5fXV5fYV9fXF5dXVxdYmRiX2Jg
-YGNiYmFgYGBfYWNgYGNhYmBeYWJeYWBfXlteYGBdYV9fYGFiXmBhXl9kYWJiZ2Vh
-X2FiYmFfYWRjY2ViZGNiYmFfX2NjYWFdXmJdXGBfXl1eY2FgYWFhX15dXV9fXlxc
-X2JgXmNhY2FiY2FhXV9hYGFkZmNeX11gX15lZWNgYWNtY2NkZGJfX2JkZWNkY2Ff
-YWFhYl9fYl9eYmJjYmRiYmFgYGJlY2JjYGFjZGVjYGFhYWNlZWRjYmNjZGNkZGNj
-ZGNhYWJjYmVmZGNhYWFkYWFgYmBhYWRjY2RhZ2NmZGJhYWNiZWVkY2JkZGFgY2Vn
-ZmVkYmZlY2JiY2FgYWJlY2NkY2RiYmRkY2JmY2JjY2NjZWZlZ2tpZmhoZ2VnaGhn
-ZWVnZWZpa2hmZ2lsaGlqbXBtamxtb2tramttbG1tbGlnZ2VhYmVlZGVkY2hqbW+F
-uMzY3+Pm6ens7O3tfn97fnx7e3t9fH15e3x9e358en1+fH98fHZ4d3p7fH19eXx6
-fH99fXx8f39/fX16eXp5fH97enx/fH1+fH1/g398fH5+foKAgH6AgIB+fH59fn18
-fH57e318eHp8e3p7fnt6fnx9fX19e358fXt7e3t5e3t9e3t7enZ3dHdzdHR1dXd1
-dHZzb3Jzc3BxcXBxcW9vcG9xc3JubGttcXJzcnN0dHV5fH58eHh7dnZ6enp3dXh7
-d3h4enl4dnd2fHl5dXd2dnh0ent1eHl5dXZ3eH16eXh3eHV0dHR2d3t5dnZ4d3l1
-dHJ0dXVzdHZ3eHd3eXh4eXd0c3FzdnVxc3R3dHN1c3V2dXVwdnRxdXd7d3R3dnNx
-cnN0cXBycXR0cHN0cXR0b252dXd0eHd4dnZ1c3FzdHNxcHBycW1ubm1ub2tramlp
-aGhpampqbG5tbGpqbW1qamdmZGhobmxramtqbGttbGtraWhqZ2lqamdsbWxsa2xt
-bm9sb25wcXBvbmxsbGxra2tpZ2pqaGhnZWNiY2VkXVxbX2JeXlpYWFhWWFlbXFla
-WFdZV1RWV1VVWVlcWFhYWFlZWV1dXVxgXl1dYGBfYmJjYmNfXmFiZWNkZWZlY2Ni
-ZWhpaWpmZ2hnZWVmZGRmaWdpbmtkZGZlZWZoaWVoaWhtbm5qaGVnbmtoa2loaWxo
-a29tbnFxcG1sbGhoZ2hobGhmaG5tZmRlZmhpZmZlZGlraGdmZWVnZGRlZmVkY2Vn
-ZmdnZWVlZGZpZGNjY2JjZWVlZ2dmZ2dkZWhpZ2dlZ2lqaWhtamppaWdnaGdlZGRo
-Z2ZfYmVlZ2RmZWVpaWxpZmZnZmZjZ2ZnZmppZmZlZWRjZmRnaGhnZmpqaWhpa2xo
-aWpoaWprampoZmdnaGpqZmVlZGdmZWZlZGJgX2BkZGVmZGNkZmJgXl9fXl5eXGBi
-YGBgYmNkYWNlZmJlZGRjX19fXWBeYV5dX1pYWl5gZGFcXVxfYF1cX2FfYF5dXlxh
-YmFhX2FhYWBjY2JoZGNgX19jYF5fX2NkY19eXV5eXl1eX1xdX2NfXFpbW15ZWVtb
-XWFeW11fXVxfXl9gYGFhX2BhYmNiYGFhYWBfYmFeXGBeW2JiYWFdXF5gY2JhXF5c
-X1xfX2FfYF9eXV1fX19fX2FiYWJiYmBkZGRiYl5dYWBiZmVjZWFiYmBgYWBgX2Bh
-Y2JgYmJiYF5iYmBhYV1dXV5gYF5fYmFfYGFfXWFjY2RlYWRkXVpeYWFhYV9fX2Fo
-Z2RmZ2NhYmtjYWBgYGBhXmBeY2VkYGFhYGBoYWFgYmJgYWJlZWRjZGJhYWJkY2Bg
-YGVkY2RjYGJlZWNlY2VgYWJlYmRiY2NkY2JjYmRkYmFjZWJgYmNfYWBjYWNkZGJj
-YF5iYmJiZmNiYmZmZWJhY2ZlZGJiYmVoaGVgY2NjY2dhX19kZGVnZWJhY2NjYmRh
-X15jYmNgY2dmZGNhaGloaWdmZ2poZ2VmYmdoaGpqZWZlY2NnaWxpbW9ubW1ub2ts
-bGppaWptbGxramlrY2dpaWVlYWhscYe3zNfg4+bo6uzt7e2AfHx9fHt7fXx5dnl8
-eHh8fn5/gX97enx+e3t8fHx6eXt6enp7fn55fn17e3t7e3t6fH19fX57e3p8e318
-fIGBf316fH5/gn2AgHx9fn18fH5+fX18fH17e3x8fn98fXt8e317eXZ8enp5enp8
-ent6e3t8e317d3h7e3d2dnZ1d3V4dHVzdHN1dHNzbnFxcnJxcXF0cXZ0dHZ0c3Jv
-cXBxcnV1c3N3fHx8fXh5eXh4e3l7e3p6eXl2fX58fX54dnl6d3d5eHl5enl4end2
-d3h5enZ3eXd5d3V0c3V3d3h2d3Z5eXZ3dXV3cnB0c3R1e3p3d3h3dXRzc3d3enl0
-dnZ3dHV4d3h0cnJ0cXJzdnd3dnV0dHNvc29tbnFycXFwbnJxdHhzb3BydXdzdHd3
-c29xcHJydHFyc3BwcXBtZ21rbWpsa2toamlpaWltb29raGppaWRkZGhqamhmaGts
-amtqa2ZmbG5ta2traWtuamltb25ucXNwb3FtaW1ubmxwcW9wbm1paGhqZ2hpaGdl
-YmJiYGJiYFxdZ3BrXl1aWFZYWFdVVlVWVVVXVlhZVFNVVVVYWVtaV1pcXV1eX11c
-W1xgYF1dX2BkYWRhY2JjYWRiYmVlaWhmZmVmaGdnaGZlZmhmaWpoaGZpaGdmZmZl
-ZWlqbWtsa2dqa21wbWpoZ2pqaGloaGtqamhramtrbWtqZ2pnZmpoamhoam1pZmZm
-ZmRma2lnZmdmZWhraGdlaWVpZGRjZWhoZGNiYWBiZGNjYmNnZ2ZlZGZkY2doZ2Vk
-aWtpaGxoaWZlZ2loaWxqamhmZ2VnamZjaGhnZWVkZWZnaWdnaGhpaGpoZmlpaGdn
-aWhnYmJjZWhqbWppa2ppbGpqa2hoaWVoZmhmZ2hmZGdoaW1paGZmZWNjZGdpY2Vl
-ZGVgYmJhYmRjYGNgYGJkY2FeX2RiYGRiYGBhYmFiZGZkYmFfXmNhYF9fXmFiYmFg
-Yl1cX19gX2FgXV9hX2JgYl9cXV9iYmBeX15dX15eXV9jY2NlZWRjYWNnY2FfXmBf
-X2FhX2BgYl9eXF5fYF9fXWBeXVtbXF1eX11cXV5fXFldYGZlZmRiX1peYGNfYGBg
-YF9eYGBhYF9hYWBgX19gY2FgYl9dYmVkYl1gYV9dXF5gXV5iYmFgX2BkYmNhY2Jl
-ZWRhXWBeYWNhX19iYmJjY2ZjY2RjYl9iY2JiYmBkYWJiYmJhX2FgX2BfXlxfYWJh
-YmNiY2JiYmZlZGRgX2FgYmBhYWBhYGNkYGJkZmZiX15dX19iYmNfX2BfYmJhYWFi
-Y2BjYmBjX11fYmRiYWNkZWRiXV1fXmBeX2JjZ2RlZGZjY2NhYWNiY2RiZmNiYWNj
-ZWVlZGJgYWRkZGBgYGJhYV9eYGBjYWFhYWJgYWFgY2NiYmBlYmFjZGNmZmRmZmRl
-Y2RkZGhpZWRkZGFiY2RkZGRiY2FhY2djZGZjYF9mZmNjZGVlZ2tpZ2lpZWppZ2Zo
-ZmZqaGlpZmZmZWppa2prbGxraWlmZmdmZGVmZmttaGpua2htbWpoZWVkaGdsi7XN
-2N/k5+jq6+3s7n16eXp6e3l8eXl6eHl7fHp7eXl9fHt7ent7fX59fH19eXp5enx/
-fXt6eHl5eXl6fX17ent6enp7fXt9fXt7en19fXl5fXt8fnyAgH5/fn9+fn59fHx+
-f359foF/fnx9fn+Bfn18e3l7f3x5d3d4eHp7fHx6eX55fXt4dnh4enp3d3V1dXRz
-dnNwcXRvbW9xbm9wb3FucHBwc3V2enl1d3V3d3Z1dXd2enl7enp7enl6eXp3eXx7
-fH15enqAenV5eXh3dXZ8fnt7eXd5eHp6enVzc3d9fHd3dXV4eHZ5eHZ0dnd8fHx4
-eXd1cnJxdnV1eHl5dnZ2c3d2eXl4eHh5enp2cnZ2cnJ1c3J1cnFyb3Z1dXRxcnBv
-dHRxcW9vcHV7c3JxcG1vbXJwcnV3eHVzcnJvb25vcHBxb3B2c21sb29tbnBta2lm
-Z2ptbG1sbG5samhkZWRnaHFraGdoaWdqamdpZWdna25xb21tcHBvbXBtbm5xb2tw
-b25ub3FtbXJwbGxramhra2poZ2hpaWZkY2FgXVteYFpeZm9mWllYWFdUVVVWVlNV
-VFdYVlhYWFZUVFZWVlhZWVpbXF5eWlxgX2JkYV1eXl9jZGRmamllZmJlaWZjZmZl
-ZWVmZ2hoaWRmY2RlaGhoZWZnZ2lpaWZpa2psaWZoaWttbGhmZmhoZmdqamZoaGho
-ZWdpaGlqZmdoaGlpamdpZ2poZmhnZWNlZGVlZ2pqaGRkaWVmZWVlaGdpZWRlZ2hn
-YmdpZGNkZmViZmZnZ2ZmY2VnZWZnZmlmZGFmZ2tnZGVlZ2dnaWppbW1oZGJjZGhn
-aGdpZ2VkYmZnZ2ZqbXBqaGlpZ2hoY2ZnZ2dlY2VnamtoaWhsa2lnaGhobGtpa2tt
-amVjZGhmamtoZ2VlaGhoY19fYmVmZGJkY2JhYGNiYF9fYWFjZGJiZmRlYWFkZWNh
-YGFhYmJjYV1fYGBgYGFhYWBeYGFfX2BjYWBfXl5eX2FeX19cYWBfYFpeX2FfXl5d
-XV9fXl9gYWBgZGVkZWRnZ2hoZWJfX15iX19eXl9eX2FgX1tbWlxaXmNfXF9dXlpb
-X15ZWV5fXmFkY2NkYl9eYF1fXl5eYmJfYF5hYWNfYGFeX15gXl9eX2BiYWJeYGJf
-XVxfX19iX15hX2BgX19hYWJjYmJhYmNiY2FfXFxdYWRhXmBhYWBgYmJjY19cXVxg
-Y19gZGJhXl1dYWBgX2BdWl9hYmBgX19gX2NiYmBfYWFiYmJhYmBeX2FhYGRiX2Jj
-YmNiYmFfXl5jZGRkYWRhYmRiYmBeXmBkYmJgYmRjYWBfYGFeX2FjYmBkYmNiX2Bg
-ZGNhY2NkY2RiYl9jZGRkZWNkY2RmYmJhYmNjYWNjYV9dXWBiY2NgYGFhZGNfYGBh
-YGFjY2JhY2JhXl9gYWBiYmVkZWNhYmJlZmdlYmRjYWRhYGJjZGNjY2VkamdlZGVo
-Z2Voa2RfYmNnaWdlaWpoZ2dkZGVmZmhmZmdoaGdnaWtqaGZmaGtoaGlnamxqaGpr
-cWlpaWtra21tampoZWVkaGhnamd8sszX3+Po6ert7ezue4B9en59fHl5dnt5en17
-eXd5eXt9fHp3e3x7fHt9enp9en96eHl4d3h5fXp8fn17f317fX17e3l8fX58gH9+
-fX18e3p7fHx9fXx9fn5/fHl8fXt5eXp7fHx+fn5/f31/gH1/fHp5eXx4eXh3eXh7
-fnp4eXt5enh6en57enl4enp2c3J0cnRvbm9zdXJvc3FvbmxwcXFxb3BzdnV2dnh4
-eHd4eHl4d3Z3eHh7eHd3enl3eHl3eHl4enp8fXd3fnt4dnh6eHt9fHp+fHd4eHR2
-c3Z2d3h4eXR4fHx6eHZ2e3p6d3V2e3l6eXd4dnd4eHV2eHl3dXZ3dXRydHV6eXd3
-d3d1dnRzdHN3dHZzb2xwcHJyc3R2cnBxdHRzb3J1dXZ1c3BwdXBxb29yeHd4dnNw
-cHJxcW9wdHBxcHR1cXJxc3J0cW5pamdpaG1ta2xrbW1sa2hpaGdrZ2ZpaWdqa2hm
-amdmZ2xpbGtucndsa3BvbWppbW1rbW1ucXRyb29xb29tbWtvamlpZ2loaGZlZGZk
-YmBgYF9dXVxdXF1WVVZUVVNTUlJXVVdVVFRVV1dXVVVUVVhXV1xcWVpbXVtdXVxf
-YWJiXVxiYGBmYmFjZWRmZWZmZmlmZ2RnZ2lpZ2VkZGRiY2VoYmdqZmZsa2lpaGlm
-aGtqaGppZWlqa2hoZWdkY2RnaGdmZ2ZqbGlpZWdmZmlra2poaGhpaWZnZGVnaWRm
-ZWZnZmViZmVmZ2VjYmNmZmpsaGdmZWVmZ2hlZ2ptaGVlaGdoamZlZWRlZGdpa2lo
-bWtkZmVmZ2lqbGtsaGppaGlpZ2VlZmZnZ2ZnYmBlaWZlZGNmZ2xsa2xoaWppZWZm
-aWhoZWdnZmRjZ2lqaGZnamdnaWloa2poaGRkaGZnZ2ZlZ2ZraGdobGRkYWNjZWJg
-YWJjZGFhYGBeX2JjYmFhY2FkYGBgYWJiYF9hZWJgYmNhYWFfYF5gXmJgX15fXl9f
-YGJfX2JgXV1gYF9aXWBhYmBfYGJjXmFfXl1eYmRhYWFiYGFgYmJiYWFhX2JgYV9e
-XWBfXl1dX2FgXVtbXV5eXl1gXV1fXV1dX2BeXl9hYmFiYGJgXmBdXWFgYWFgXl9h
-X2NgXl5eXV9gYFxcW1xeX2BhYl9eX2BeYWBgZGBgYWBdYGFlZWFgXl9fYWFgX2Ff
-X2BhW11iYl5gYWJeXV9gYGRjX11eXGFkYmFjYmFeYWJeX15gYWRgXWFgYl1eYGFg
-YmJeW1pgYGBhYmBiYV9hYWNjYF9hY2NjYmVkY19iYl9iZGBhXl9iZGNhX2BhXl9g
-Y2FeYWNkZ2JgYV9gYWFkZmZjYmRkYWFjZmRkZmRgYmBdYGFiZ2NkZGJlZmRjYWFi
-YGJiYWNgXl1eXmJkY2NkZGJiZGRfYWNgYWVmZWVkYVxfY2JiZGNlY2NjZGZjY2Vm
-ZGRjY2JeXmRkZWZnY2BgY2JlZmVkaGVoaGlrZ2hjZGZmZmVlaGZlZWRkZmZnaGln
-ZmZmZmZnaGtnZmlsZ2hsaWZiaGtsbm9ta2lpaW9sbGloaWdnamRoaGhoanmuy9jf
-4+jp6evs7e1+fHp3fX9+eXp4d3p7eXp7enp8e3t6e3t6eXl5fH96eHl0d3t5ent7
-e3h7e316e3t6enx+fX15eXl7fH6Af3+AfoB/fHt7eXp9e3t5eXx9eX16eXp6eX59
-fH6Af31/fX2BgX1+fX99e3l4enh5eXh5eXh3eHp6enp4eXh3dXZ5d3d4dXNwb29s
-bnFycnFyb2xrbG1ub29ydHV0cXB1e3Z4d3t9fHd0dnZ3eHt5dnl6d3p5e3l4enh4
-eHh5dHd1dnl5eXx+fXp6eXt6dnh1d3V4enh6fHl4d3Z9f3p4d3Z4eXh7eXh7enl5
-eXd3eHd6fHl4d3dzdXd2dn14dXR1d3V1dXh1c3JxdXR1dnVxbnByc3NycnN1cG10
-cnBzdHV3dXRwcXFzd3ZzcHBycnJzdXZwcnBta21xcHBydXRycm5rbXBvcGxqampu
-b25ubG1ra2prbGxvbmppaWlqbGlmaWhoZmdnaWlnaGhmZGZqbmttaWpqbGlqbW1t
-b25xc3JycnJub25sa2lpaGhnZWViZWJiYWBfXV1gX1pYWVhWVlRUVVRTUlJUU09R
-UFVWVFZYVVZYVVZXWVlYVlZcXl5eXl5fYmFhX15eYWBkYmNhZ2ZjZmtsaWZlY2Zn
-aGhnaGRiZmdpZ2VlaGtqaGlramhpZWdkYWVmamtoZ2xtaWhramtqaGdqamprZ2pq
-aGpsZ2poaWlmZWVlZ2loZ2dnZ2ppaGVlY2NlZWVlZWVlZGJlaGZlZmZpaGZmZWZm
-aWZjZ2lnZ2VmZmZnZ2dmZGdkZmpra2xsamxnZGVpa2hrampoZ2ZoamlraWdmZ2pn
-ZWZmY2RlZWJlZ2ZraGlpZmdoZ2hpZ2lqa2ZpZ2VjZGdmZmhpaGptbGZpamdnaWVm
-ZGVnZ2ZoamVkZGdramxsZWJlYmJhY2NjZWFfYGBgX2VkYGBhYGFgYGJiX11eYGBg
-YGFiY15hYF9hYF9cXF1fX1xcXV1cXl1dXl5bXl5gYF5gXl5fYWFiYF9gYWNjYmJf
-X2BeYWFhX19gXl1fYGJhYmRlY15fYmBgZWRfXV9hYl5bW11gX15bX15dXl5aWlxg
-YGFhYV1iX1tfYmNiYGRhXWNgXV1fXmBhX19eX1xaXmNkYF1eXl1fXF1cXl1eYWFg
-Y2ZkY2FgYWRiYWBjZGNjX2BfYWFhYWJjYmJiX11fYF5fX2BeW15fX2NgY2BkYmFi
-ZGNiZGlkYl5eYGNmZ2ZiX2VgXF1gYGJhYGFlYV9fX2FiYmJhZWJjZGRiYV5gYV5e
-YGNiYWFkYF1eXl9gXl5hYmJhYWJhYWRjYWFdYWJiYF5hYWFgYWFjY2NkYmJkZGJi
-ZGNjYl9dXWBhYmBkYmNiY2RiZGJhYmJiYmRkYWFhYWBhYWNkZGZkYmJkZmRkY2Rj
-YmRkZF9gZWJgZGdlY2JiYWNhY2VlZmdkY2llZmZjZGZnZmRlZmhnZmJlaGtqa2Zn
-Z2VlZ2hnY2VmZWZlZ2doZ2dnZ2ZnZ2pnY2VkY2RlaGhoZmdoamxsaGpoamxra2pq
-a2tsbWxqbGpmaGpsa2RmaWlod63L1t3j6Onq6+zs7X1+fH6Af3x7enp6d3l7fXx+
-fXh6enl5enp5eHp6ent4eHp4eXt6eXt3d3Z8fXx8ent+fHx7e3t8gX1+foKAgH6A
-gH9+fnt7fH59gXx8ent6eXx7e319e356eXx8e3x/f35+fX19fn19e3l6eXh5enl6
-d3Z4enp7e3d3eHZ2dXV2eHV2c3JxdHJvcHB0dXJubm90dHF1dXJ1dHRxcG9xc3N1
-eHd1dnZ2eXp2eHl8e3p4enp7fHx6eHh2dnZ2eHV1en17eHt8eXd3d3t7e3t7eXp6
-d3h4eXd2dXp8eXd3eXZ2d3d7fHt7eXd4eXZ2d3t8eXR1dnh3c3Z4fH13dnZ0dXd5
-dXV2dnh1dnRycnV2d3RzdXV0dnRycnFwdHJ3dnV1cnJycnVyb3B2cnJ1c3R2dXRz
-dXFwb3JycHByc3FycG1tamxubWpqbGxsa2toaGxsbmxva2lqaW1ramhpaWpqZ2Zm
-aGppaGdqamZmaGpra2tqaWZsamtoaGxtbG5vcnFxcG5tbm1oZ2hoaWdkZGRiX19g
-XmBgW1pWVVlXV1ZWVFVRVFlUUlNRT1RTVVZUVlZVVldWV1lZV1hZXVxbW11gYF1f
-YGJlYmRkY2JiZGVjZGVlaGdnZmViZWZnZ2dlZ2lnaGZnaGlnaG1rbGtpaGdqaWdk
-Z2ZlZ2ZlZ2ppaGppaWppaWtoZmZlZ2lqbmpubm1qaWhnaW5sbGppa2dnaGdlZGVm
-ZmRlY2ZkY2NiY2JmZWNjZWVmZWVjYWZmZ2ZlZGlpaWZmZ2ZnaWZoZ2hoaWppbWtr
-bGpnZWdlZ2lmZmhqaGlramhoamZmaWNlZmdlZGJlY2NlZmRjZWdpZ2VoaGdobGpo
-ZWVlZ2dmZ2ZpaWlqamlqaWlpaWpoZmlqaW1pZ2hpaGZlZmdoZ2NpZ2RiYWFhYGBh
-YWBfYGFkYWFjY2NjYF9gY2VlYWBgYGBgX2NhZGRiXV9cXGBiY2FhYV1dX11eYF9e
-XV1eX2JfXWBkZF5fYWBeXmBeYWRhXF5gYF9dX2JiXV5hZF9fX2BhZmZhY2FjZWdn
-Y2RhYWBeW11fXmJfX15hXlxbXVtaXmJfXF1dXVxdYVxdYWRjY2NiYF1bW1xZXF5f
-YF5dYF5cX2FeXmBeXmBhX19eYGBgX2FhX2BcX11dYF9iZmdkX2FgY2NhX2BgYmFi
-Y2JhX1xdX15eX19cXlxdYmRoZ2RmYmBhYGFlYl5gXmBfYWJjZGRhYF9gXV5fYGFj
-Y2BiYGFhX2JgX2FgYF9gYmNhX19fXmBhY2RiYGNiYF5dYGBhYWBfXl1dX2NhYWNh
-YmJhYWJiYV9iX2FkZGJkYWRlZmVhX15gYmJiZF9iYWBiYWFfYGJjY2NiY2VlZWVl
-Y2NkZGJkYV5fXmJlZmVlZWVlZ2ZmZmNgYWFhX2BiY2BiYWJjY2RhXmBiZGVjYWFf
-ZGVmZ2hjX2JjZ2xpZmRiZ2dhYWRlY2JiZGVpZWJiYmRiZGNiY2JlZmZmZmZmZWhm
-ZmdmZmhoaWlnZ2hpbWxsb2xrbGlnb2xqa2doZ2lnZ2NlaWlpaWlnaW10r8rU3OTn
-6Ons6+3tf4B+fH18fHl8fnp7ent7fH97eHd5eXd7fHl3eXp5d3h3eXl6enl4d3d3
-eHh6e3x7fHt6e3x5en19fn+Bgn5+f3x/fX2AgX99fX57en18d3V5e3qAfXt6en99
-eXd5en57fHx8gH9/f3t7enp5e3p6eXl7fHx8e3l5d3h1dnd4eXN1dXN0dHV2c29v
-cHBvb2xub3Jzc3Jxb29wcnR1cW9xcW91d3VwcXF1dXZ2eHd3eHh5eXl6eHt6eXd2
-e3t4d3V5fH98enx7enp3d3R4end5eXl8eHd4eXp4d3V0dXV0dHd4d3l6eHd5ent+
-e3l7eXV2dnZ1dnNwcnN2eHZ0d3hzcnh3cnN0dXR1cnZzeHl4dXV1cnRzdXVzdHV1
-dXNwcnR3eHR3dXVzdXR1cnFtdHV2dnJvcXJydHRzcHBwcXFucG1raWtsaWtqbGxp
-amhpa2tpaWhqamZpZ2ltamdpaWZoaWpsaWhpaWlrbW1ra2tpaGpta2dpampoamxr
-bmxub2tqbmxoamhpaGZmZ2RiYmBeYF9fX19fW1xbWVdaV1VXV1RWV1lWV1ZWU1ZU
-VFdYVlhXVlVaWVxXWlxaW1pcXF5eXV1fZGNjZ2hlYWJjZWRmZGZpZ2hnZ2doZ2do
-Z2pnZWhnZ2ZnaWxrampqa2ppbGhnaGlpampqZ2ZmZ2praWtsaWppbW1pZ2hoaGhs
-bWhqZ25rbG1nbWdqZ2prZmlpZ2ZmZWZlZGNhZGRoZ2dkYmRnaGZjYmJiY2dlZGVm
-ZGNlZWhpZmNpaWlmZWdrbGxoZmhrampnaGhmZ2tnZWRnaWppaGhoaGppaGlpa2hq
-amhmZmZoaWJkZWZnZ2hraWloZ2pobGpqamloZ2doZmhpbGtqampraGZmZ2lpamto
-Z2doZ2hqbGpmaGdoZGRnZ2JjZGNgYmRlY2JiYmNgX2FgXl5fYmJlYGNjYmBhYWJh
-YF9gYmBlYl1cX19eYmFiYF9gX11dXl1eXl9mZF5gYGBhX2FhX2JkX11bXV5kYWRh
-W1xhYWFgYWBgYmRhYmhnZWRkZWNhZGRmZWRkYmBdXGBiXmBdYF1hYV9eXl1dXF1d
-W1paXl9fYGBgYWNjZWBfX19gX15dYGFhYF5dXl1eXl5dXFxcYGBfXFtfYF9fX19e
-Xl5eYF9cYGJjYWJiY2JkZWBeXV1eYGJgX15gYmBjYV1fYGFgXV9eYF5lZGBgYGFg
-YWFgYF9eYWFgYGBhYmNiY2FcYF5fYFxeXF9gX2FfX2BfX19eX2FeXV5dX19eX11f
-Y2RkY2RjZGFiYmRhYGFjZmNiYWBgXV9cXl5gY2FmYmFfYGRjYmBjZGNjZWFeXl5f
-YV5gY2NjZWVhX2FhYWBgYGFkZWZkY2BjZ2ZlZmRlYmBhYmFjZGNkZGNiYmRhY2Ng
-YmNjYmFlYmRlZWNjZGJgXmFiY2BkaWdiYWRlamhkZmJiZmVmZmZmY2ZjZGdraGlp
-aWdlYV9gY2NlZWZjZGVkZmVlaGlmZWZnaGxrZmZnZ2hoampvb3BtbW5rampub2pn
-Z2VpaGZlaGZoaGtsbW1saniwytXc4ubo6uvr7O18f3t6fHp4eHp5enx8ent7ent5
-eXp5d3l5eXd2eHx7fH5/fHl3eHh3eXp4eXd7en9+fXx7e3h7fYB+f399fX18fX18
-e3p+fXp7f317ent5e3h9fXx/fnt+fn18fXt7enx8ent7fn58fX9+fXl5fHl1d3l6
-enl6fHl5eXh3eHZ3d3J0eHd2dHRxcXF0cm5vb29wb29xcnFubHBydHNycG5xcnNy
-dnVxdXN1e3Z2dnh5eHt6eHh5fHp4dnp8fHl4dXV0d3p5eHt9enh2c3R4eHl7enp7
-eHh5e3l2dnl3eHd3dnl5ent4eHl8eXp+fXx5ent7end1dXl3dnR2dHR1d3Z1dnh3
-dXZ1dHNzdHZ3d3N0dnV5d3JvcnN1dnVzdHRyc3B0dHV1dXR0dnFxcXV2c3Rzc3Ny
-dXJycXByb21rbm9za2tqampqaWxtbGlqaGlnZWVjZGdpZmhnZ2VmaGhqaWprZ2tq
-aGlpampoamtramxsbGlwbWpubGlqa2xsbnBraGtrbm1oZ2ppaWhnZWJjZWRhYWFg
-XF5fXF1dXFlaV1ZWVlFUWFZZVllYV1VVVFVYVlRXVVhaV1taWVdXXGBfXl1gYFxg
-YmJiZWVoaGVjYmJjZGVoaGtqbGhoZmdnZ2lpaWhmZmhoaWxqamtnamlqbWloamtp
-bGloaGZnaWptaWlrbGlpaGxrbG1naWlmaGxtaGhra2dlZmhpamxqa2tqZmRlZmVk
-ZWRkZmdnZWJoaGhqamdmY2RlaGdnZWRlZGVnZ2VkaGlqamlpZmZoamllZmZnZWVm
-ZGZmZ2RjZmRnaGloZmhoampsa2VoZ2RlZWVkZ2dnZWZmZGNkZ2dnaGlna2tqaWdl
-ZmhnamtmYmdlaWtoaGppaGpqamtramlqaGhnZmVmZWNhZGVoZmZnZmRlZWFjZGRj
-X2JgYWBfX2JfX2FhX2FgYWFhYWFiYV9hYGFfX2BhX2FbW2BhX15gXl1dXlpeXF5f
-YGFeX15dYF9fXl5cXmNhXl9gX2FiY19cXF5hYGViY2FlZWJhYWFlZWZnZGFfX2Bg
-XWBgYmRiX19eYF1aXmBeYWFeYF5bX2BfXl9cXGBfXl5eX2BdXl5cXl9eXFxfYV9d
-XV1dXFpZW1tcW11dXGBfXmBgYmBgXF5fX11hXl1fZmdlZGNgX15gYGBfX15fZGNk
-YGBhXl5fYGBhYmJiYl9gX2BiXmBfYGFhYWFhYWFhYGBiYF5eYGFeXmBdXVxeYFxe
-X2BfYV9dXV9eX2FhXVlaWVpbXV5iYGBgY2NlY2NiYmNkX2FhYWFjYmBiYWFgY2Zg
-X2FiYV9iYmFgYWJfYWNlZmZjY2VhYmRmYmBjZGNlZWZiYWJkZGJhYmFjZGRkZGVl
-ZGNnZWRmZmJfX2JhYV9fZWVjYWBiYmNfX2JgX2JlaGFkYWFhY2VjYmFmZmJjZmRj
-ZmRmaGdmZmdiXmJlZGRiZGRmZmZjYmRiY2NjYmJkZWRjZGZlZWVlZ2dnZ2djZmlr
-Z2loZmhra2lsbG1uamxraGpqaGlqa2loZ2hmZmZnaWdtaWlraG1seLHK0Nnh5ujp
-6+vs7Hx9fXl2enl8eXp6fXt7enp3fHp4enp7enl4eHh4fH16fHx9enl5d3h6eHd3
-dXZ6fH9+fHx9goJ+fX9+fX19fHl6fHx5eXh6en5+fX55fH5+e4B7fH58fHt8fnx8
-fX15enh3eXh7e3x8e31+fXp8enh4eXl5enp7fHp4eXh2eHl3dHV1dXRydXNzb3Jz
-c3V1dG5ucG9vcnJycXJzdnV2dXR0cXN3enZ3eHl2d3Z2dXd4e396fHt5eXV2fn56
-e3h5d3l5fHt7enl4enR1d3Z6e3Zzd3l4d3d4e356end1e3h4enl4eXd2enl3eXl7
-enl4fHx7enp5eXZxc3N0c3NzdHZ3eHl5eXNwcnZ2d3l4eHd2dnNxcXV0dHVxdHNx
-cXN0cnFxcXN0dHVycXBxdHNyc3NwbnF4cW5wcHFybGptbXBua2toZmtwbm1mZ2xt
-bWtmZGJiZmhpZ2VnZ2ZnZmVlZmZjZGdmZWhpaGhqbG5saGlsbWxtbG5ubmttcG5t
-a2xrbmttcGppaGloZ2dlZmJkYmFkZGFgX15cXFpYVlRUVVdTUVJUV1ZWU1VVUlVX
-VlhVV1JTVFZXWlpcWFpeXl5gX19gX2BfX2FjYmRmZ2RkY2RkZmhqaGhnZmlnZWho
-amppaWloaGZlZ2ZkZmhoaGtrbGxtamdoZ2ZobWtpZWtpa2pqbGhpbGxrampoZWRl
-a21va21lZ2hnamlpa2lkYmRnZmVmZWRjaGlpZmVmZmViY2ZoamZlZWlnaGZiY2ho
-Z2dlZWpnZWllYmNkZmZoZ2lpaGZkZmppaWpoZmhnZmlqa2loaGdnaWpnZWRlZGNm
-Y2NkZ2dramloZ2VlZmlmZ2VpaWlqbGxqaGZlZGRiY2ZnaGhpamZna2xpa2tpZWVn
-ZWZmZWJiZWNhZmZnZGVmYmJmZWFiX2JiYWBhYGFeXl9eW11dXFtfYGJjX1xdXl5g
-X2BeYGFjY11eYGJhXVtbWVtbYWFfX2BgYWJgYGBeX19dXV5iYWJgY2JiX2BhYGFd
-X2FiX2BiYmJjYWBjYWBhY2JkYmFfYF9iYWFeYF9gXl1eX2BeXV5gX2BfXlxbXV1e
-XFxdYWJhXlxeXFxdWltcXGBfXlxfYWBeXl1dXV1cXmBcXVpdXV5fXV9eXWBfXmBg
-X15hZGNhYGBhYF5gX15jY2NfYWBdYmFkYmJeXWBjYF5fYWRkY2VlY11eX2BeXV5j
-aWdmZWBiYGFhXF5fYGJjYV1hXWBfX2BdX19hY2JgY2JiYWBhZGBgX19fY2JhYV5d
-YGJlYmJiZGJhYmFhYmRjXl5eXWJlZGVkYF5gYF9eXmBfYGFgYmRkY2RiXWFmZGNk
-ZGJmZGNiY2NhYWRkZWFiYmJjYmRjYmFhY2NjYGRkY2ViYV9gYWVhZGNjY2NjYF9g
-YGJiZWVjYmJjYmJhZ2dkZWVoZWJgY2RkZGVqaWlrZ2FhYmJiZGNiZWZqbGloZWFi
-YmRoY19hYmFlY2NlZ2dmZ2RlZGRoamhsa2lmZ2hqaGdjZWlqamtnaWhmZmpqaGpp
-aGhmaWxqa2tqaGlna25+tsrS3OLm6Orr6uzse3t7eHd4eXt7eXp4eXp5e3l5d3Z5
-eH18eHh4eXl6e3x8e3p3eXZ2eHh4d3h4enp8fX59fICBfn17fHp8fH6Ae3p7e3x+
-e3x7d3t/fXx7fX+BfXx7fHx8fn57en16enx4d3h6eXt8d3h7f4F8ent7fHp4dnh2
-dnl6eHh4d3h4dnV2dnN0dXNycXBwcnR1dnZzbm5vcHR1dXR1c3FzdXl6fHl3eHh4
-dXl4e314dnZ2d3h6e3p4eXl3eXZ5eHp5e3x5eHl2c3V1dnd6fn5+e3h3dHR5e3x4
-e3t7e3p4eHZ5eXl5ent5fHp5end5enp5dnl5eHh4d3d3dnV4c3R2dHV5eXl3dnZ2
-dXh1d3Z1dnJ0d3h1cnF2dHFycXN0dHJydHJyc29xc3JydHl4cnFzcXFycnNwcXNx
-b3BwcG9wbWlrbXBvbGtpam1uaWpqampra2ptam1paGxsaWZnZGVjZWZnaWhoaWhq
-aWppampsb25ta2ptbGlqam1vcGtsbG5wbnJwa2xrbm5ta2ppZWNjZWRiYmJiYWJd
-XV1bV1hUVldVVlhUUFFUU1JVV1RWVlRVVFFVV1tZVlZZWVhYXmRiZGNcX2FfYGBj
-YVxeX2RmZmVjZWZkZ2llaGdmaWZnZmdoZ2hoZmZlZWZmZmRhZ2hrbGtpbWxoaWZq
-aGxqaWhsamxqZ2ZpamZoamhqbW1qZmVoaGpnaGtoaWhrZGVoZ2dmZ2VkZGNkYmFk
-ZmVmZmZnZmZmZGJlY2VkY2NkZGViYWRmZmNjZGZpZmRmamhnamZlZmVlZmVmZmpq
-Z2dnaWpmaWpnZ2RkaGRnaWhoZ2Zoa2xoY2JnamxqaGlmZ2pqaGdmZGJjZ2xua2pp
-bWlkZWVoZ2poaWptcGxoaGtmZWVnZWNhY2ZnZGVnZWRpaGhmY2BjYmBhYmNmZGVj
-ZmNlZmRfXl1iX2FhXV9fYWBfYF5dXl9eYF5cXl5dW2FdXV5dXVxgXl1eXl9fYWJj
-YWBjY2FiXltfXV5gYmBeX2BbWlxgYGFhYWRhXmBmYV9jYWJiYWFiYGNlY2NkZGJg
-YWBeXl1cXFxeYWFeXFxeX2JdX1pcXV1dXF1fXGBfX11aWlpbXV1dX2JjXV9hYWNg
-X2BfYGFgYmNhYF9ZXWBiYWFhXmJhX2FeXGBjZF9eYGFhY19eX2FiYmBgYWNhYmJh
-X15gYWJhYF9hYWRmY2VgYV9gX2BfX2JmZmRiYmJcXWBiZWRlZmZjY2BjYF9iYV5h
-YmBfYWBdX1xdX2FfYmBfYWJkZWNhZWJfYF9hXmBjYmFhYGBfYGFiYGFeXl9hX2Bh
-XV5iY2FgYF5gYGJkYl9gYWNjZGRhYmFiYmJjZGRgYWVjY2RmZWRiYmJgY2FhYGBf
-X2JhX2RlY2JgYmZiYWRhYmJgX2NkYl5gX2FfYWRnZWJiYWBiZGZkZ2VjZGNmZGFh
-ZWNmZ2dkY2NjY2JkaG9rZWNlZmZmZmVma2VhX1xjZGZlY2RjYmVkZGhmZWZnZ2ls
-bGpnZ2hnaWplampqaWhoamVoaGhqamhmaGhpbGxrZ2tramttbX2yy9Xd4ebo6Orr
-7Ox4eXp9fnp6dnh3eHp6eXt7enl4eXd2eHd3eHd3eHp5fXp7d3p6eHd3eX57enl5
-enx8fn58en18fX98fH19e3p7fX59fn19fnt7fH96enh5fX58d3Z2eHp8e3p9e3l6
-e3p6enl6fn14eHh5enx5enx9d3V3d3l5d3l5enh4eXl4dnZ0c3JydHBycHN0cnRx
-cG9wb25vbnBxc3N0c3V7eHZ2eXt6d3h9fYB9eXt4dXZ2eHh3eHl7e3t6eXd4eHp4
-eHV4dnd3d3Z9fnZ5enh4end8eXV7e3t6e3h7eXZ5enp6e3l4dnh6e3l5fX15eHN1
-dnZ0dnZ2dXZ4eHd3d3l4dnN4e3d0cHd0cnNzcnN3c3J0dnRxcnN0c3V2dnZ0dXFy
-d3Z0cXFxcXJ1eXlzcXR0cnNycXNzc3BwbWpsa21sa2ppbWxsb29wa2psbGtlaGZm
-amtrbGpqa2hoa2lnZWVjZWdqbGxtbm1oZmtpa2pra2tqa2poa2doZmltbmxvcW9v
-b21xb25qbWtsaWVmZmRnZmRkYGBgYF5aWllXVVhYV1ZVWFNSU1BTVFNTVFRWV1VV
-V1hVWFhVVVZZWV9eYF5fX11cYF5dXmBhYmFfYWViZGVlZWZmaGdnaWVnaWloaGVn
-amlnaWloZmZoZmRmaGxsbG1oam1oZ2lraWdoaWpqaGppaWdpaWpra2puamlsaWpo
-Z2psamhnaWhsbGhlZmdpaGhmZGRjZGNlYWNiYWZjZGFjY2NjZGVmZWVlZGNjZGZm
-ZWFlYmVnaGpsamdmaWdkYmVnZmdnaGhnaWdqamxoZ2hmZGRlZGNjZWdnZ2lsamlk
-ZWdlZWdoamlpaGdmZ2dmZWVnbWxraGtraWtpZmZnZ2lpamhqaWZmaGZmZmRiY2Fj
-ZWZoZmhmYmRnZmFgYmNgYWhmYmRmY2VkYGVnZmFhYWBfXWBfYGNgX2RhX11cX19e
-X11fX1tbXFpZW11fXWFfXl9gYGBfZGVlYF1dYGBfYmBgYmJjZGJfYF9gYmFkY2Jj
-X1xeXl1fX2FiYWJhZGVhYWNiYWRjX2FiY2FjYF5bWVheXV5eYGFfX1xbXF5cXV5e
-XV1eXWFdW1lbXV1iX1xfXmFhXV9gYWBhXmBiY2JkY2JjX2RjX15fYF9eXV5aW2Bg
-YWBhYWFfXmNjYGBfYmJiZWFgYGJlYWFjYWJfXl9eXl9iYWNiYmJhY2FgX2JiZGJk
-Z2NgY19bX2NiY2FiYWBfYmJiYmRkYGJgYGNhX2FhX2BjX2FeXV1gZGFiZGJiYmBe
-X2FjYmBeZGVgYGFgX19gX11eXV1cW15gX19fX2BeXWBhYGFjYmFfX19gYmBhZmRh
-ZGJiYmJhZGJiYmNlYmJiYmNiYmFiX2BfY2JkZWNkY2JlY2NkZGNiYWJfXmBiX19j
-YmJjYWRjX2BjZGRkY2JiYWVlZmZmZWRhZGRjYmJjZGBgZWNjZWVkY2NiYF9bX2No
-ZmdjZmNkZWRlZWdkZGJjZ2lnYmJkZGdpZ2ViZ2hlaWtqampqa2prbmhoamlpbGln
-aWhoZ2lpamhpa21uhLPL1t7j5ejp6+rs7H58e3l4fnl3eHh3d3l2d3l6e4B8eHZ2
-eXh3eHh7enl5e3t6fHx5eHd6f359fXx7eHp7fXx8f358e3p7fn16d3p7e31+e35/
-goOBf318fXp7enl2dnl7enp5e31/e3d2eHl7fH18enh4eXd5eXl3eXp4dXh3eHh4
-eXt8e3h6e3l7e3d1c3N2dHN0c3RzcXJxdHVzcHJxcnR1cnNydHJzdnh3eHl7fX15
-fXx5enl3dXh5eXh5eHh3eHp5d3l1dHVzdXV3eXZ4eXt6eXp5eXV3eHd2dnl8enh5
-ent5e3p5e3h7enp1eHh2eXZ5eXl4eXh2dnV3dnd5dnh4eXp6enp4eXV4d3d2c3R1
-cHR0c3FwcXFyc3NxcnJ1dnVycnNyc3R0dXJ0dHVzc3V6dnFub3FzcHJ1d3hxbW5u
-bm1tbWtsbGxoaGxwc3BtbGtqbGpqaWplZGhrbGtqZ2hnZ2dnaGdna2lpaWRoa2hq
-aWlnaGhpa21ram1raWhpaWhucHBxbW9ycGtsbWtqamllZmZkaGVjZWNgX15cXFpY
-V1ZZWFVVVllVV1dXV1ZaVldWVlVTU1JTUlNVVltbV1hZXmFhX1xdXF5fX2JeXmBg
-X2NhYGBgY2NmZmZmZWVmZWZlaGloZ2dkY2NlZ2hoaGVqbGtsZ2ZnaWlra2lnZmdq
-amlqa2hma2tpaWptamlqaWlra2xqaWdramdpaWlnZ2ZnamlpaWdpZWRiZWRlZmdl
-YVxcY2NmZWVjYGJlbGdmZWVlZGJkZmdnZmhqaGhoaGZpaWxoZ2lnZWdmZGJpamhp
-a2hmZ2ZnZmlqZmVlZWZlZmdoaWloZWdoZmhpamloa2lpZ2hnaWhmZWhnaGhpaWlu
-amdpZ2RjaGtnY2dpamdlZWVmZGVmaGdkZmpmZ2VjY2VkZmZkYWJiYWVlZ2VkZmJe
-X2JjY2FiYWRjX2FeXVxfY2NfYF1eYWFeXWBhXlxeYF5eXF1fZWFhXVxaXF9jYWFg
-YF1eXV9fYWRiYWNlYmRkY2BfYWNhYV9iY2JdXWBhYF9gYWJlZF5fY2JgX19fX19f
-YGJgXlxcWlphYWFhYGBfYWJhXFteXVtbWlxcW1tbW19fXV9iYl5fYF9dXF9fX2Bi
-YmNgYGBfYGVlYl9gX1xhY2BiYGBfYGBfYGBhYmJhXV9iYWFgXmNnZGFmZGJiX2Bi
-XmJiXmFkZF5gYWFeYGFjZGNkZWJjY2dnZWRiZWJhYWBfX2BfY2VgXGJnYV9gXF5e
-YF9fYWFcXWBgXmBgX19eX2JkYmNjYl9fYmFhYV5fYGFgXl9eXV9eXlpbW11cX2Bh
-YV5gYF5gYF9eXmNiY2ZjYV5hX2BjYmFlY2FiYWFhYWBiX2BiY2ZjYV9eYmNiYmJi
-YmRjYmBjY2RkY2JjZGNhYGBfY2NiYmJiXl1fX15jY2BgYGFfYmNiZ2dkZGdlZWBj
-Y2FeYWBfZGRhZWJgYmVoYmJgXV9fZGdkZGFmZmVmZGFjZWNlZWdnZmVmY2VnZmhk
-ZmdnamxqbmtoaWZnamlraWlqZ2hobGpoaWpnaGhta2poZ2uDscrV3uLl5+nr6+zs
-e3p6eHl6eXx7eHp5eXt5eHd5ent3d3l5eHx7e3t5eXl5enp9fX15e3l7fXx7fHx5
-e3t8fXx7e3t7eXh5eXp6fHx8fHl6en6BgoOCgXt9e3p4eXp8f35+fHp5e357eXl5
-fHx5eXV4enp8fHl5enh7fnl1enp5eHV3fXp5e3p4dnl5dXd2cnFxc3RvcHFxcXFy
-cXRzdHR1cnZ0dHVzcG9ydHV3dXV5fH15d3d2dXV4d3d4eHd9end5ent7eXd0dHd3
-d3Z4enl3eHx8fX18eHZ2d3l5enh5eXp4eXx9e3l6enp6enl1dnh6enZ5dXR3eX16
-enZ1f3p4e3l6eHl6enp6d3V0dnd1dXZ1cnR1cHBvb3Byc3Nzc3N0c3JzdnZ3dnR2
-dnRzdHVxcnNxcXBzcnJ0dHRzcm9tb290cG9ub2pqbWhrb29ucHFuamlqbGxuaW5q
-Z21ra2ppaGhqaGptbW1vbGltbmdlZmlsbW5nZWZna2lsbG1sbGxqbG5ydXFtbG1t
-amlubW1qamhoZ2RhYmVlZF9eXl9cXVlaXFpXVFZWVVRUVllYVlZUVVdVU1VVVFZX
-VVZXVVhXWltZWlpcXl5fXmBiX2FfYWRmZ2FjZGVlZmJlZWZnZ2tnZmVmZGZlYWVk
-aGhnZmhqbWxqaWtpZWVoamtqaGdsamtoaWhqaGdpamltbGhoZmZnaGpobGlraGtq
-ZmdraWZnamhpampmZmZpZ2tmZ2RnaGViY2NjZmVnZWNkZGRiYmNjZWRlZGFkZWRq
-Z2djYWVmZ2prZWdpaGdpZ2ZoaWZpaWZoZ2hlZGVmaGhlY2VoaWtnZWhlZmRkaGZn
-ZWhmZWlsZ2VmZWdmaGpnZGhta2toaWpra2lnaGhpa2tqaWhnbWdoaGpramhlZWhn
-aGhkYmVmZWJiY2RiZGRiYWNjY2NgZGFeYF9hYWFhYWFiYVxdX15gYmRdX15eYGJl
-YGBgXmBfYV9gXl9gXFxdXFxgXF1eX19gYF9jY2JjZGRgY2RmYWBgYGFiZGNlZWFi
-YmBiXmBhX2FkZWVmYWFiY2JfX15eYF9eXWJhYV1eX15gYl5eXl9hYmBkXltdXF1e
-YFtbXFxeX2BfYGFdYGBfYF1dXmJfX2JfXV1cXFpcXV9eY2FgY2JfYF9fX11eYGFh
-Y2FjY2RhX2FiYl9jYmJkY2RqaGBfYGJiYF9dYGJjX19gXV1eYl9hX19hYWFiY2Rk
-ZGFiYmFhYGNiYV9gXlxeYF9gX2BjYGRhYV9dXV9dXF9gYWBfX2BfYF9hX19iY2Ji
-ZGBjYV9eX2BjYGBfX15jYl1cXF5eXV5fX2NiX11eXl5gXmFgYmJdYGBfY2NkZGRj
-Y2JiYWFmZmNhX2JhYWBeYWNhY2FjZWRiYGBjY2RlZGNkZGJjYV5dXV9fX2JhYWFg
-YF5fXmBlZmJhYWBhX2NuaWNiZmZkY2FfYmBeX2NgYWBgY2FjZWFjYmJlZWNkZ2do
-ZmJiY2FmZGVlZWVkZGVnaGdpaGhoZmdnaGlra2xqamxqa2tramloZmdraWdpbGtn
-Z2hpZ2tua2hoaX+rytbe4eXn6evq7Ox4eHl4enx6eHh3ent7enh6e399f3t8e3t+
-fHx9fnt9fXp4enx8fnp8e3p5eHt8e3x8fX5/fnx6e3l9e3l8fHt8fH5+fHt7fX5+
-gH+AgHt5fXh5e31/fYB/f3t+fn14eXp6enl1d3Z5e3p5e3x8enx5e315eXh3dnh5
-e3h5eHl8enl4dnd3dHJwcW5wb3BwcHJub3N1dHV4dnZ4dHFub3F0d3h2enp5enp5
-d3V1eXt3d3l4eXp4eXl3eHd5enp7enx5e3l4eHp7e3t7eX58dnF4dHh6eHd5eHl6
-eHh7e3l5eHl3d3V4enl2dnZ2dXR0d3p5d3p5e3t7fHl2eXh5d3p5dnR1dXBzdnVz
-c3RwcnJwb29xc3V3dHR0dXd2dXZxdnR3dHVxcG5ta29xc3NzdHd4c3NwbmtvcnBx
-cXJzb21sbW9ra21tbGlpamtraWhpZ2xvamVoaGloZ2hpaGxsamtqbG9rbWVlZWpp
-ZmdmZWlra2lla25tbnBubW9ua2tsbXFxbWttbGtnampnaGdkZGRjX11hYF1aWltZ
-WlZYV1VVVlZWVVVTVVRUV1RUVlZVVllaVlhZVlVXWldWWFxeXF1fX19kZ2pkZGBh
-ZmlpaGdmZmZlZ2ZqampmZGVlaGdna2tramhqaWpqZ2dpZ2dkaGloaGhoamdnZ2lo
-Z2hnaGpqbGtpaWlpa2psbWpraWhsamxraWlqaWtqaWhmaWZmamxsbGhnaGVlZmZk
-ZWRmaGdmZ2dlYmRiY2VkY2VpZmFmaGZkY2RnZGZmZmVoZ2hnbWtoaWpqaGhramtq
-Z2VlZWVoZmdlZGZmZ2dmZ2NlZWVnaGdoZmVmaGlpaGZlaWpsaWdqaGpqZ2ppZmhq
-aGhoZmlqbG1qaGhoaWlnZWhoZmhmZWtsbWdlZGNlY2VkZGdiYWFfYmFnX2FhYGFe
-YWBhYV9gX2BgYGFgYGFfXWFgX19gYGFhYmRiYF5eXV5bXV1cXl1fYFxZXmBfX2Bg
-YmNhX15iYF9hYmJhYmNhYmFhYWJlYF5hYmBmYmJjX19gYmRmZWNiZGJkYWBhYF1d
-X2FgYWBiYV1eWl1dXmBgYGBgYWBeXV9eXVteXFxdXlxcXV5fYmNhYGFhXV9fXV1e
-XltZWFpfX19eX15gYV9dX2BeXWBiY19hYmJgY2RgYGVmaGVgYWJnZmJhYF5gYmJi
-Xl5eYmFgYWFhYV9jYmRkX2JhYGBfYWFjYmBgXl9kYGFhXV9eYF9fYGJgZGRjYGJl
-YV5gYWJfXV9kZGFeXmFiZWViYmNiYmJlY15iZmBhY2JkY2FjZmNiYV5fYV5jYV9i
-Xl9hXVxfXmBeXl5iZWJgYGNjYmZnZWRfYWBhYGFhYGJiYmRmZGNkYWBhYmBgY2Ji
-YmJkY2NiY2NhYWFkY19eYWFeX15jYmNfXV5hYmVlZGJfX15eYWRjZ2RiYmVjX2Fi
-ZGBfZmdhYWFgY2NkZmRjYWFnZGRjZWNjZ2ZnaGNpZWRkYmNlZmhramdpZ2ZlaGhn
-aGdpam1ta21oZWVnamhoa2xpamlnZ2dlZ2VpaWpqaWprgrLJ1d7i5ejp6err63l5
-eHp8e357eXh4eHp6fH18fX19fXl1eHt9fX+Bf3t7eXp6e3l9e3d6eHd5enl7fn98
-fH59eHh5eHt5fHh5eHl3e3h7e359foF+fHx9fH18ent6e318fX19fHx8e31+fXx7
-fHt5eXl6fnt7e3p6enl7fXd3end2eXZ2dXJ0e3l6dXZ1dHJ1dnVxcnFxcXRzdXRy
-dHNydnNxcXJzcnV1dXZ2eXt5fHl4eHd7e3p5e3h3dnV6e3d5eXp+eXh4e3t8e317
-eHh8e31+foB+fX58dXl5eXh5d3V8enp+e3l4enl6eHh7fHl5eXh3dXZ1dXh5eHp8
-end4d3l2d3l4dnJzd3Rwc3Z1d3Rzdnd1c3FxcHJycnJycnNvb3N0dXRycXRxcXJz
-dHRxcXBwcXNxdXNwdHVxbnJtcXFycnJycGxtbGxwamlwb21qbGppaWlnZ2dlamxq
-aWZoaWtoaWtramtpaGpua2ppZWVnZmtrbG9sam1sa2tsbGttbGxubW1tbW9vbm5w
-bm9saWxuamhlZWNhYmJgYGBfYF1ZV1hZXFpaVlVYWFpWVFZUVU5SVFVVV1RPU1lY
-VlZZVlZWWFhbXl5hX15dYGFlZGBhYmRlZmdpZWRkY2JgZGZna2plaWxqa25sa21t
-b2xtaGlsZ2VnaGppaWhoamtnaGpnZ2tqa2loaWxvamxnaWpsamhramhpb2traWxt
-amlpaWllZmhmaWloa2dnZmhiZGVmZmhnZWVkZmZoZWRlY2FjYmVkZmZmZmhoZ2dq
-aGZpZ2hqaWppaGlsampoZmhmaWdoZWZmamlnZ2dnaWtoaGloaGVmbGZlZ2dnaGZk
-ZWZlY2hsZmRjZ2hobG5paGlvbGppaGhlaGhrZmVmZWpoaGlmZmhpZ2drZWNma2pn
-aWdkZ2ZnZGZjY2JjZGFjY2JgYGBgXlxfX2FjY2JgYF9fYmJiY15cXVpaXl9dX2Fh
-X2NgYGRgXlxdXV5fXV9hYVxcX2BgX19iYWNhYWFiX19fXmBfYGJkYWBhYWJiYGBi
-YGBkYmVjYGBhY2VjYWFhZWVmZWJhY2djYF9dW1hdXl5fXl1eXVlbW11dXF1fX2Be
-XV1dXF5dW1xcX2BfXlxgXl9fYV9aXV9eW1taW15gXl9fYWFhYWNfXmNiYF9fYGFh
-YF9fYWFdYWZoZGNgYmFgYmJgXWBdYF9hX19iYV9fX2FgZGBkZ2NkYWFgYWBeXmBi
-YFxdXV5eYGFfXFxiYF9gYGJgYWFgX15fYmJiY2FdX2JjYmNiYF9fYmFhYGBiYmNi
-Y2JhYmRhYF9fXmJgZWZhYF9fYV9dXl1dX2BeXl9eW2FiX2BhZWJiYWJkY2NkZGRj
-YmNhYGJjYmJjYWJiYGJhZGJgYWFhYmFiYWNkZGNgYWFhYWBiYmVhYF9cXl9hYWJg
-YmJiYmNhX11dYGFdX2BhYV5hX2NkYmNlaWdlYmRjZWVkYmFlZWJlZWReXmJiZGNm
-ZmlsamhlZmZjZGdnaWxnZmZnZmpqa2hnZmZoaWxsbmhmZ2hrbGlqa2tta2hpZ2ho
-aGZoaGdkZGiCsMfV3uLk6Onq6uvseXh3enl7fH15e3l4d3l4enx7e4B9fnx6enh6
-e3x6eXZ3eXp4fHx8enp4eXl4eHl4e3l7enp7eHp8ent7fH59fnx6e3h5fHx5e3x9
-fHx8fn97e3h6e3h5e316eXmAgn9+fX17fHt6e3p8fn18fHx+gXx5eHl6enp4fHp6
-dXZ5eHhwc3VzdHR0cXJvcHBwcG9xcXlxcXBxcHBucnRzcXVzdHZ1d3Z5d3d5eHl8
-e31+fX15dXh6eXd4dnp4dXd5e3t5e3t7fXx4e3x4eX59fXx4enl4eHd7fXh4eXl7
-e3x5eHh6f3t9eXZ3eHh2eHZ3dXh3e3h3dXt5dnF3eXl3dXR3d3h5dXh3dnV1dXd3
-dXZ2eHV2c3N1cnh0cnNzcHFxcW50dHVycHNwdHJydXNyb29zbm9xcXFxc3Rxb29u
-bm1qa3BubnJycm5rbG5ta2hqaGdnamtramlnaWloaWlpaGVmZ2lqaWlqaWtua2tq
-bGdpa21uamttbmltbWxpamxtb21ua29xa2tubGhmZ2RkZ2ZjYF9fXVxbWllbXlpb
-W1hWWVdWVVhVVFVRVFBXVldWXVhUWFpbW1lbWFhbXl1bWltgXmBfYF5eX2FgYGRm
-Z2dlZmRlZGNlZGZqaWpqamlrbm5tbW5wamhpaGpqZmlra2dnZ2hoa2hraWdna21s
-bGxnZ2ltbGtramlpbmtubGpraW5wbmttbG5pa25oZmJma2ZnZmRkZGRkY2NmZ2Vm
-ZWVkZGRnaGRjYGRlZGdmZ2ZlZmVnZWZkZWZjaGhpZmdoamhmaGlpaGhpZ2dlZGZo
-a2loZ2doaGlnaWlraWdoZ2dnZmhoZmNkZWZmZmdnZGRlZWdra2lrampubmhpaWlr
-a2xtamZjZGdpamhnZmdmZ2hmZGZrZmZlZWVkZWVmY2NhYl9hYmBhZmRgXV9fXmBh
-YGNjYWBfYF9fXl1gXF1eYGBeX11cXGFiYGBeYWJgXl1dXl1dW1teXllaWVldYGFj
-YWViYF9gX2BgYWFeX2JjY19gYmRjYWNhXF1gYmNfYmRjYmFhXmBkZmVlY2RhY2Vh
-YWFeXVtfXV9gXl1dXFxdW11hYF9eX19eXmNgX15eW15hY19gXV1fX11fYGJfXV5g
-XmNiYGBeXl9fXV9hX15gX19hYV9fXl9dYGBgYWVhYGBhYmNjX2BgYmJfW1tbXmJl
-ZGFfX1xfYWJjX2JgYmFiYF9dYGBgXmBfXlxdYGBdXmBiZWJeXF9gXlxhYGFfYWFg
-YF9fY2RhYWZkY2JgYGBiYl9fYmFgYWFgYWNhXVpgYGJhYWNcX2FkYWBgXl9jX2Bf
-YGJfXV1eYGNfX2JhX19iYmJiYWFkZ2hoZmRkYWJgYl9gYWBmZGRjY2NiYGBhYmVj
-Y2FhYWBiYWFfYGRnYV9kZGFhX2NeX2FjYWVjYGBhYF5fYF9fY2FfZ2VkYmRjYWFi
-Y2BhZmdkY2VkZGJkYmNjZGNkY2RjZWFjZGdmZGVnaGhnaGlpaGdmaGdmaWloaWpm
-ZGVma21ubGttbWxqaWlnbW1ra2lqaWhoZWRnZ2hoaH6sx9Td4uXn6Onq6+x5eXd3
-eHh4eHt7fHd2eXp7e3l7fX56dnV3e3t3d3l6dXl5fXh5enp5eXl5enl6eXp7fHp6
-eXp8fXt7fX59fnx9fn9+enl5ent8fn18fHl9f399fXp6eHp7eXl5e3x/fn19enp6
-fYF8e3x6fHx7eXt9fX15e3x7d3h5dnR4d3h4dXV1d3RycnJxc3BxbXBxb2xvcW9v
-cHBycnJxcXN0cXJxdHR2dXl4eHd0eHd2dnZ5enh8enp5e3l5dnd3dXh6eHp6e3p4
-eYN8fHl7eX57eXh7e3p6d3Z8e3p3eHl9fHl3d3d2eXh4eHd5eHd4e3uAeXd4e3h4
-dnVzdXd4dnd3dHR0eHZ1cnR0dXNxcXd2dXZ2dHVzdHFyc3N0cW5ucHFxcHJxcG9z
-dnd3dXNzdHJwcHBzcXR2c25ub25ubmxrbW5ybWxsbm5ua2xtbGxubGxraWppaWlp
-Z21paGZmZmZmZWZjZ2lramlraGltbmxsamxsamtsamttb29ramtsbWttb21sa2pq
-bG1wa2ZoZWRmZmJiYF5fWlhZWlxZW1pYWl9aXVdVVVZUVVJRVFVWVlZYVlRXWlpZ
-WFlaWFldXFxdXmBjYV9fXlxcYWFkZmNjZ2RiZGNnZGNiZWZmZWZmZ2pqa2pqa2to
-Z2VoaGhmZmhpa2pnZmlpaGlnaGtraWpsaGdmZ2Zoa2hnaGlqampsam1raWloaWtr
-amxpaWdoZ2dpZ2hlZmRjY2NiZGVoZmdnY2ZiZGRlY2NhY2ZmZGRlZWRlY2NgYmRl
-ZWllZmhmZWZqa2loaWlqaWVnZ2RlZ2hqaWhlZGVpamZnZ2ZoaGVkZGRoaGZmY2Zl
-ZWZjZmxrZmloaGdmaWpqa2ptbWhoaWlra2prZ2hmZmZoZWZlZ2hpampmZ2poZmNk
-ZWRmY2JhYWNkYmBhYmFgX2BgYGBhYWNkYWFiYmFhX15eXl1gX15eXV1bW1teW19i
-YGFiXlpeYGFhYFpdXF1gXltcXF5eX2BjZGRiYWFhZGRlZmJeXWBgY2JjYmVmY2Bl
-Y2FgYGFjYWRjZGdnYmJiZGNiYFxfX15iX19cWVxaXV9dYGBfXF1fXV9fX11fX19f
-XWBiYmJhYF9fY2JeXF5eX11bWltbXV9hYGRgXVxeXl9gX2BfX2BgYV5dXWBgYGBe
-YWRlYWFfX15fX15dW1xgYWBbXF9iYmFdXF9iX2NjYWFfXVxeXl1eX15fYGJkXF5g
-YF9eXF1dYWprX2NkYF5fXmBmYl9gYGJiYF9hZWNgYF9iYGBfXl1hZWRfX2FhY2Jh
-YWFjZF1dYWNgYF9gXWJlYl9fYF9hYWNjYV9eY2JhXl5gY15eXmNjYF9eYWNiZmZm
-ZWRhY2NkZWVhYl1hY2JkZGFiYGFhY2NkYWJiYGJhYmNiYmBiX2FkY2FgX19kZmJf
-YmBgYF9iYF9gX2BjaGRkY2ZlZmVkZmhjZGJfZGJgY2RkZWVjZGVkYl9fYmhoZ2Ni
-ZWZmZ2dmZGVnaWhoaGlmZWZoZGZnaGdnamlqa21wa2tpa2trZmdqZ2hnZmVlZGZl
-Zmhqamlqgq3I1d3h5Ofp6evs63x7enh2d3l5enl7e3l4e3p7ent8fIB/dnt8fHp2
-end7eXp6eHh4dnp6eXh4fHt7e3p7fXt8fX18e319fX2AgIKDgX19f3t2en17fn5+
-eXp/f318fX1/ent7enx/e3t7f3x5eXt7fn99fHt3enx8ent8fnuAe3p7ent5eHd3
-d3t8e3l3d3ZzcXFvcHFwb3Bzcm5vcXFwcnBycnFzdnVzcnZ1d3d0dnh6fXd6fXh2
-dXZ3dnh1dnp8dnR3d3l4d3d5enl7fHh5fn17e3x5eHl4e3p7eXt3d3l4eXp2eHp8
-eXh6eHh5dnp5eHt7fHl7fHl4ent6en93eHZ3d3Z3dnh6dnV1c3RydHRydHJ0c3N0
-c3N0dXh4dXFycnJydnNxb21ubm9xcnFzdHV1dXZ1c3N1cnRycXNzeXJycG1tbW5v
-cG9vb25vbG5ubm1ubG5qa25ua2Zoamlpa2tpaWxnamloZ2VnamtsbWlsbWpsbm9q
-amtrbWtramltbXFubWtsampraGdoaWxtb25tZ2doa2diYmNhX1xaV1taV1hZVldX
-WVZXVlRUWFdTV1hWVlZXVlZcWldXVlhZWlxcXVtZWV5eX2VfY2FfX19gZWRkZ2hl
-Z2ZiY2RjY2RobGloaGVma2hpaWpraWhkZWVnZ2ZkZmlnZmpqaGhmZ2poZWdna2tr
-bWtobWtoaGZpamxpamxtb2xraWppZGVrampmaWhkZGVmZWVoZWJmZWZmZmVjYmJh
-ZWRiZGJlamZiZWZlYmNmZWJjZWhqaWlnZ2VkZmdjZ2dmZmVnaWdlY2RlZmlnZGho
-aGJkaGdlaGVoZmZpZmNiZGZnZGRpZmRiZWdjZ2lsaGZnaGhoZmhraWdoaWdoaWhq
-aWtqaWlqa2hnZGVpbGpmaGtnZmVlZWNhY2RjYV9fX2FiX2FiYmFgYGBiYmBfYWFh
-YWFhY2JkYl9cX19hYF1bW1xZWV1fXl5fX11eYF1fX19hZWBeXVteYWBiYGBfYWNi
-X2BgYGBjYl9fYGJiYGBhYWNkY2VkZGdlX2NlZWFgYmNkZGVhYGJgZGJgYV1gYGBj
-Yl9cWl1cYF1cXF5gYF1dWFlaWV5fYGFdXmBeXV9dXl9fYF9gXV9jYF9hXl5eXmBf
-X19fX11dX19gXV9gX11eXl9ZXF9gYWJfYWFiX2BiXl1gXl1fXV5hYmJhYmBgYGFh
-X19dX2BhYl9fYWBgXVteYGBgXmFkYmJhYWFhY2FedWVdXmBbWV9gXWBgXl9gYmRk
-Yl5gZGdiYmNjZmBeXV1dYGFfY2NjY2NiYmBhX11dXV9hYmJkYF9eXGJeX2FjZWRi
-Y2NiXl5eX19cW11fYWJjY2FjYmRnamZjZGNhXl5jYWJjYmBfY2FhY2RfYmNiYmJh
-X2FhYmJhY2NjYWFiX2BiY2RgYGJiYWFhXl9gZWNhXmFlaWVlYmFhYmJhY2VlZmNf
-YmRiZGZkZGRkZmdnZGRkYmJhZGRhYWBlZGViZmlnaGloaGhkZmllZmZlZWdnZ2lo
-ampqbGtqZ2dqbmxpa2lpZ2VlZmdqZmhkZWhnaGp9r8rV3OLk5+rp6uvreHp7enh5
-eXl4d3p8end6eHh3fXp7fH15fHt4eXp7eXl7eXd2dXh1eHh6eXl4e3x5enx8fX5/
-f3x8fX19fnx9f3+AgH58e358fX17fn97enp/f3p/gH1+fH58fnx7eXl5fHx7eXd6
-fH17eXd1eHp7e3p9fXp7eHh5e4B/eHd4enh3eXZ3dnZzcXRzcm9ucHNzcnBwb3By
-cnBub3R2c3R1dHN0dHJ0ent6ent6d3N0dXZ5dXV3e3h0dnd5enp4eHt7eXx8fHx5
-e3h1eXp6enp5eXh2eHx4d3h6d3d3dXh7eXZ5eXl6enp9e3h5enx9d3l6dnZ5eXR1
-d3Z3dnZ2d3V2dHR1c3R2dXNxcnV0cnNycHFzd3d4dHRxdHd1dXRyc3Bvb3N1c3V1
-cnV0cXJ0cHJzb3Vxb25vb3Fxbmtqb3NvcW5tbGlsbW5sbGtramtrbG1sampsa2ts
-bWppa2tqaWpraWVoZ2ZlZmtpa2xqampqaWlnaWlram1tbmtoaWloamdmZmlnaGlr
-aWhoZmZlY19gX11bW15cXF1aWFdZVlZVU1NRUVNWVVRXWFhYWVdbWltaWlhaWl1a
-XFtcXVxdXl9dXGBjYV5fZV1fYWRmZmZmZ2JmZ2VjYGJlZmZoaWloZ2pqZ2lqZ2Zn
-ZmRraGdqaGVraGZkZmZoZWdpampvaGZoZ2hna2xoZmptbmtubm1rbGxsbWpnaGZl
-bGxpaWpiYWNlZmViYWdnaWVlZWRjZWFiZ2dlZWZmZGRkYWRkYWVjYmFkaGhoamhm
-ZGNmZGJjaW1oaGVmZ2poaGlqaGhoZmRkZ2dmZ2RjYWJlZWlnZWJjY2FjZGNkaGpk
-ZWZoZmdkZ2ZnZWVmZWloZ2VoaWllZmpnaWlnZWhpZ2RlZmdpaGhrZmVkZmdpZGRj
-YV9hY2JiYGJhX2FgYmFgX2BfYGRhX19gYmRhZGRhXlxeX2BfX19fXl5fXV1cXVxe
-YF9fX19dYGFjYGBgX19eW19hYF1eZGNkYWBeXl5gYFxfYWJgYF5hYl9dYF9kYmJm
-aGZhYWJjY2RjZGRjZGRiY2RkYWBiYWBiYmBeX19bXF5bXF1eXVtcWVxfXl9iX19g
-W15gX11gX2JjXV9gYGFhYmNfX19eXl9fX19fXl1dXV1gYF1bWl1eYGNdXV9iZGNj
-YV9iY2FgY19gYV9dXWBjY2RiY15dYmNhX19eX19cXV9jYGJhYGBfYWFhXV1cXF1e
-XmFhYmFwXFxdX2JiXF9hX2FiX15eYWFfXV5eZmRkZmZgYF5dX15gY2djYGBgX19g
-X19gXV1dXl9jXl5eX2FfYWRlYmJeXl5dXl9gX2FhYGNjYl1fYmFeX15kZGVlZmVk
-YmNkYV1fXmBkZGZgXF9kZWZmYmJfXWBfZGFhYGBgYWNhYmNeXWBhYmBgYV9gYGFh
-YmFhXmFfY2VkY2JjY19gYmNiY2JfYWNiZWhnZmNmZWVjY2RnYV1eYmBfYGBiZGZh
-YGNgZGZmZWdmZmloamVkY2dnZmhraWtqa2tpZWRmaWhrbGtpZ2lpaGpqZ2RkZmZk
-ZGhmanusytXd4uTn6Orq7Ox8fX19e3l4eHZ7fHt9fHt+fXl5dXZ8fHp5eHV5ent5
-ent4dXd2eHd5eXh4eXd6eXl9fnx6fHt9f358fX9/fYB7en5/fXp8fHl8fXx/gIJ7
-enp7f3yBg399e3p7fXx5d3p+fH16eHt6eXt6eHd5enl5eXh8fHx8e31+f359e3l4
-enh2ent5eXh0cnJxcG5ycHFvcXBycG5wc3V5eHN0dXNzdHV3eHd5enl+enl5d3J0
-eHd2d3h3d3l7eHl6enp6enx7eXx7e3p6eHt5e3x+end0c3Z3eHx6eXR4eHR4dnl8
-enZ2d3Z2fHp4eHp9fnt5dnV0cnV4enl2dXZ4dnZ4eHRyc3d2eXh4d3Z0d3N2dHJ2
-dXRwc3N0c3FwcXBxdHN2c3JzdHFxcHVycnNyb3Bxc3Nyc3NwcG9saGpubGxubm5v
-b3Fwa2lnaG1ubm5ta2ZoaWpubW5ua2tpamlraWhlaWhsaWltbGhpZ2pqamhoaGps
-a2ttbG1rbGtucG9qamtpam5sa2poamlnZWhpZWRjYmBdXFxcXl9dWlxYVlZWVVZW
-VVpYVlhXWFxcXVtZWl1dXl1ZW1tdXVtbWllfXlteX2FeYF9gYF9fYGFhYWJjY2Nl
-ZGNkZWJjZGNlaGdpZmhnZGVnZmhmZWhoamlta2hnaWloZmNkZmdnZWhna2xrcGxr
-amdnaWxubGlpamtvbW1tbG9ua2praGdlZWVhY2hqZWRkY2ZkZGFiZGNmZmRkZ2Jl
-ZmRkY2JjYWFiZWhoZmVkZ2RnZmhpamtsaGRnZGhoa2xpZ2dqampoZWZmZWdmZmlp
-aWhmZGVmZWVmZWZkY2JjY2NjY2hqZmdmaWloaWdmZWJkZWNmZWZrZmdpZmRlaGdm
-Z2ppZmNkZmloaWpqZ2ZmZWVnZmVnZmJhYmFjZWFlY19fXmBgX2FjY11eYmJiY2Ni
-ZWNjYV9dX19fX2BfXV9fYl5eW1haXF5eXl1eXl5eZGFeXV9eXFtaXV9gZGBeYGBe
-YGFfX1peX15eXl1eYF1aW2FkZGJjZmRjYWJhYWBeYmNjY2RlYWBcYGFgY2JhYGFi
-YWFfXF9fXlxgW1ldXl5dXV9fXVtbX2BiYGBjY19eX2FiXl9dX15hYGFgYGBiYWBg
-Xl5fYWJhYmJjX1xcYGFfYmBfYWFjY2FhX15fYF9fX2JiZGBgYGNnZGVjYWBiYWFg
-ZGBhYV9jY2NjYmNkX1xeX2NhXV9eXVxbXF5fZnBdXV1eYGRlYmFjYV5gXV9dY2Vf
-XF1cZGNbXV9fXl9gYWFhXmBfX19jYmFiYV1dXV9dXFlaW19eYGNjYmNgX2BgX19e
-YWJkY2FiYmJiY2BgYGBgYWJkZGRjZGJlZGFeXl1eYGFhYmFhYGJkY2BfYV5eX2Fg
-Xl9gYmFdXmBfYmFiYmNhX2BdXl1fYmhlZGdlaGVgX2FiZGRmZWBjZmJgYV5hYWBY
-YGZnY2RgZGRkY2NkY2FgZWRlYWJhYmViaGhkZWdmZmlmZ2dlZGdnZWRmZ2hoaGxt
-bm1tbG5sbGtrbGlqaWpmaWpqZWVnaWpoZmVlcqPJ1N3h5efo6uvs7Hl6fH9+fHZ3
-dnd8fXp8e3t7end2eXp7eHV1d3h+fXt6fHp6eXp6eXh6fXp6e3p6fYB/e3x7fHl+
-f3+Af35+fX+Af4F+gX98eHp7fH99fn59gH5/fX19e3p5fXp8fXp5ent/fYB9e3p8
-fHx6end4eXh5e3t8en1+fnt6fH57end3eHZ0dnR0dXd0cW5vcHBvcXVzc3NydHN2
-eXZ2eHhzdHRycnRydnd5d3d4e3x8cnR4enl4dnd2eHZ3d3V5enx9fX18eHt6eHh5
-eXh3eXh6fHZ3e317fHp4dXR3d3h5d3Z3eHZ4eXp5enl5fH15dXVyc3R0d3h6eXd3
-d3Z4enh5endzdHd2eXt8enVydXV2dHJwb3NzdXd1c3J1c3N1dXV1dXV0dnRyb3R1
-c3V2dHp2c3JxcHJybGttamtrbW5vcHFxcGxsamttbm1qamhnZ2xsa25tbmprbm1q
-a2poZWlsamdkZmlra2lqaGxqZ2pqZmhpamlqamxranBxbW1vb25tbm9ubGZma2hp
-aWtoZmJeX15fWlteXVxZWllaWFdXV1lXWVhZWVldYGBfX15aXV1eXVpZWVdZW11e
-YGBgXVxfYGBfXl9fXl9fYGFiYWNkY2dpZWZmZmloZmZnaGRpaGhmZ2lsbWprZ2Zn
-Z2loZ2hnZWRhZmZlZ2dlZGRpZ2lpaGpnZmRmaWpnaGhrbG1rbWxrbGloaWpnZ2dh
-Y2xraWdoZ2VhYmVkYWZjY2FiYmNiZWJgYmRmZ2ViZmNmZWFkZmZnZWVmaGhqa2pn
-ZWpoZ2VjZWdnaWlpamhoZ2loaGZlZ2lpZGVlZ2dmZWdlZGNlZmRlY2dsaWVoZmhn
-Z2Vqa2ZnZGVlZWRmZ2lnaGhnaWpnZ2RlZ2hnaWppaGdoaWhnamlmZmVkZmRjZ2Vh
-YGFiYmNjYmRlYmBhYWJiYWNkYmFjYGFfYWFiX19eX2FeXmBgYmZjYmBfXV1dXV5b
-XmNhYl9eX15fX11bW15gYGBgXV5dYmBfXl9fYWJkYmBfYGBfXF1dYF9hYmNhYGFf
-YWNfYmZkZmRmYmJhYGRgX15eXVxbXmNhXl9gYWFfWFlaWl1gY19fX19gX19fXmJg
-Yl5hX15eX2JhXWBgXl5gYGBfX19eXV1cXVxdYF9fYWBiYWVkX15cXWJhYGJjZGJf
-XmFfXl1dXV5hYWBhYF9hYWBjYFxhYmFgXmBkY2FjYGJiZWNeX15hYWFiX19gXlpe
-X2BncWBeXmBgYmNjZGJgX2FhX2FcYGZhXVxcWl1eYGFfY2JdXWNhYGFiYWFiX15h
-Yl9cXl9gYV1gYGBjZWNiZGFdXWFlY2FkYmNkYWNlYl9hYV9fYWBeX2BhYmBiYmJk
-YmBfYWFiYmJfYGNgYWJjZmFfYV9iYWFhYGBhYV9fYWBeYWNiYmNiYmFgYl9fZWVi
-YmVlYmNjZWhnZmNmZGJiYmBiZGNlYmFjZmZnZGRjYWFiY2RmaGRlZmVlYl9iYmJn
-aGVoamZmZmZlZWRnZmdqaWVnaWdpaWtub29saGpraWhpamhoam5nZGhoZGdoZmdn
-Y2RsosnV3OHl5+nq6uvrfHx8fHl5dXV2d3p7fHl4eXp5fXp2eXp5dnl6eXl7eXt8
-fHp5e3t6e3p7fHt6eHt9e3t6enl3enx8fH9+fHx7fHyAgH9/fn2AfXp5fX1/ent8
-fn96e3t5enx7fH19fXx9f3x7eX16eHt7fH16enl4eXx8fXx6fHt8fHt3eHd5eHV0
-dXZ2dHJ1dXZ1dXFvbnBydnh5dnRyc3V0d3Zzdnh1dn15dXR3dHZ4eHp4eXl5d3V4
-eHZ5dXZ5eXh5eHh5e3p5eHZzdHp6eXZ5eHd5enp7foB7e4F7eXV0c3h2dHVyc3V2
-d3Z3fHp2eHZ6eXd1dnV2dXJyc3R1d3d5dnd2d3d4dnRydnl3eXl4dHN1c3Jzcm9x
-dHJ1dnd1d3VzdnJ1dnRzcnB0c3Nydnd0cnZ3dXNyb3Bxc3Bwcm5ta2xvbm9vbm1t
-cHByb3FqamhraGxubm1pa21sbWxqa2xpZ2hmaWlqaGVmbGxta2tqaWtqbm1maWlr
-bGtua2lpaGtvcG9sa3Bwc29ramdqa2pra2diYWNiYWFeYFxcWllYV1ZXV1dZWVhc
-XFlYWVxbWlteX2FhXl1dXFxZWVpbXVlcXFxbXV9gX11fXWJhYGNjY2NkZmdnZWhr
-aGZoZ2ZpZ2VlZmZnZWdpaWhrampraWlqZ2hnaWZmZGRkZWZsbWlnZ2RmZmdkZWZn
-Z2hpaGlqa2tqa21qbGxramlra2xqaGZnaGdkZmhmZGJmYWJnZGNkZ2VjYmFkYWJf
-YWVoZmpnZWVlZmNjZWVoZWVkaGdoZ2hsaGpmZWZoaGhkZ2pqaGxpaWlnZWllZmln
-Z2hnZmJjZmdoZWFjYl9iZmlnZmlrZ2hpaWhlZWloaGdsZ2doaGppZ2loZWZmZ2Vj
-ZmlpaGdnZmNiZWlmZWZlZ2lnY2RkZmRiY2VlY2JjYmVhYWBdWl5eX2JiYWRlYWJh
-X15eYGFjYF5eXmFiYGBhYV9iYF5fXFxbYF9iY19fXl9gYF5eXmBgXl5gXV5fXl1c
-YGFfYWNhYmJhYWFiY15eX2FiYmNgYGVkYGNlYmNiYmFhYWRgYGBeXmJgX11bXl9g
-YmFgYF1eWVpbWV5fXl5dXV5fYF1fXV1cXl9dXF1eY2FeXFtcX2FiX11dX1taW1pa
-WVhZWlxdX2FfYGBiYV9eXl5fY2FiYl9dX2JeX2JgXl5gYV9jYl1dYWFiX2JhZGJh
-YGJkYmFjYWRjY2RkYV9gYWFjYmBjYV1bW158bGRjYF5gYWFiZGViY2NjY2JgYWBf
-YF9dXF5gYmFgYGBfYF5fXl5aXl9hX2JiXF5bXV9fYmFjYV5fYl9hYGBhYGBjaGZk
-YWBkZmJiYmFiYmFiX2FhYGFeXmFhYmRmZWFiYGFgYWJhX2RkYmRlYF5hYmBeX19h
-YWFgYmFgYWBhX15fYmBhY2RiYF9fX2BjYGJkYV9hYmJmaGZjZGRiY2NmZGdnY2Rj
-ZGRkZGNhZGFgYGNjYF9gYGFlZmJlZWVnaGxnaWdnZ2dnamtsbmppbGxqaWtra25t
-b25rZWVoZmdqaWpsaWpoaWhmaGhkZWVnY26oyNTd4uXn6Onq7Ot7eXV2d3h6e3l4
-eXl7eXh4eHx9e314eHl3eHl6fXt6enx4eXt9fX15ent7ent5eHd6e3x/e3x4eHd8
-fH19enl6fIF/gX1+f39+gIJ9fnx/fHp8fn16eXh6fH18eX18e3p5en59eXp4d3l6
-e316e3x5eXl7e3l6enh7fXl4eHh4dXh4eHp2dHFycnJyc3Bxc3Jzd3R0dXJ0cnR0
-dnd7fXp4eXl5eHVzeHd3eHh3dHl4eXh2eHl5ent8eXd3eXl3dXN1enp6d3h3d3l8
-enx6enl+fnp5eXx8e3x6d3V2d3hzcHd5eXd4eXl4eXt4eHh1dXV0cXN2dnl6eXd2
-dXh3d3d6dnZ4dHN0dXd4d3VzcnNxcnNwcW5ydHZ1dnh2dHFzdXVyc3FzcXdzc3R0
-c3FzcXNwcnNzcW9wcW5oa29wb21scGxtcXJycHFub29vbWhnamdmamhpaWlpaWhr
-bWxpZ2lqbWtraWlsamppZ2lqZ2ZrbG1tb21saWhpa29rbG1tbW1sbWxta2ppaWps
-aGJgYmBgX15cXlxZWlhZVVZZWFdYWVxcWlpbWVZXWVpeXl5fXWBeX11bXV1eXVxa
-W11cXl9fX11dXV5hZmJkZWRlZmdnZ2hoaGpqZmZoaGdmZ2hpa2hoaGdramttaWVn
-aGloZmdmZWVjaGhoa2xraWhoZmRmaGdnamZnamtubm1tbGlpaGtta2traWtsa2dp
-ZGJlaGdkYmNoZ2ViZmlmZmJkZ2VjZGZkYmRlZWVmaGhkZmhoZ2ZpZ2loaGZlZ2lr
-bGlnZGhqZ2hoaGtoampoaWVoaGZkZGllZmdlZ2dnZmZoZ2NkZ2NmZ2hoaWpoZmRk
-ZGZrbWlqaGtsaWdlZGNnZmloamdlZ2ppaWhnaWhnZmNjZWZmZ2dpZmZkZGJjYmRj
-YmNiZGReXV5eX2JgXV9gXmBiY2JhYGFcX2BhZGRkYGJhYWFhX19eYGJfYF5iYWJe
-XmBgYV5dXVxbXmFfXl5hX19fYl9gX19fYF5fYGRiYWJjYmBcYWFhYGBjZWRjYl9h
-YmJiYGFhYmNjYGNjY2NgYWNgY2BeXWNiX1xcXl9eWFxdXV1fYF5eYGBiX1xaXVxd
-Xl1gXl1fYF9dW1pfXWBgXllaW1taXFtcXFxZXFpcXV5hZF9eYF9fX2JhXmFjYmRi
-YGFeX1xdYGFhYGFeYV5iYmRgXmBjYGBbWmFhYWFiYWBhYWFfXl5gY19iYmNoYWBf
-XF9yYmNjYV9jYWFhYWNkZWBgYV1dYF9dX2JfYGBhYF9gXl1eYF5fXV9dYGBhYWBf
-XFtaW2BhXl1dYGJeYGFiYWBeXWBnYGBfYGdpY2BgX2BiYF9eX15fYGJdXGBhY2Rl
-ZmdlYWRhYF5fYmJkYmBeYGFhYWBdXmBhYWBjZGNgXl1eXl9cX15fYWFiYWBgXl9h
-XmFhYmBiYmVoZWVlZGVhYGJjZ2VhYmJjZGNiY2ZiZmNjZmRgXmBjZGNkZGZkYWRi
-ZGppaWtramhmaGhpa2xramhsa21sbG5uaWlnZ2ZlZ2hraWlnZ2lnaWloaGZiZWRm
-gK3H1N3h5Ofo6+vr63x7enh7eXp6fnt4fHx7e3t4eXp5e319enl6e3h4eHd3enx8
-e3t7fXl7fH19fH15e3x9fYJ+fHp3d3t9fH9+gX55fYCBfX5/f31+gH9/fXx8eHd7
-ent9fYB+e3x7ent9d3h5en18e3p5e3p5e3x7eXx5e3p6enh4ent7d3Z1dXd1eHZ1
-dnRyc3NwcXBwcXN0dXRycnJzdHFycHBydXx8eHd3dnp4d3h3dnV5eHl4eXp7enx7
-eXd3eHd3enx7d3Vzc3V6e3p3d3d6eHV3eXuAfnx7dnd4ent5d3d2dXd2dXZ4d3Z4
-dnh3eHp4dnl6eXd3dnd3eHh4eXl4d3d4eXh3eHp7eHZ1dXR0c3d2d3R0dXd3dHVy
-cnRydXJzcnN1dnZzdHR0c3R0cXBydnZ0d3RxcnFwcXNyc3NvcW9tcXNwbHFvbW9o
-b3JscG9xbm9sbGpnZ2dpbWpsaGdqaGlra21qbG5wcGtpbW5taWlraWppbGdsbGxr
-bGttbWxta2tramxtaWpqa21sa2hoZ2hnZGJjYmBfXVxbXFtcXlpXVldZWFdYWFdX
-Wl9cXFpaWl1fXl9fYGBgY2RkY15hYV5aWFtdXV1cW1teYmRjZGJlZGNjY2JjaWll
-Z2hnaWdpZmVjZWdmZGNlZmhmaWVkZWlnaGhlZ2ZnY2RoaWZna2trbGdlZmZmZ2hn
-Zmdoa2ttbm5ra2lsbGxubm1samlqamlnZ2doaGVmZmloZWVkZGRjZGVmZ2dmZ2Jh
-Y2FkZWNmZWdoZmZmY2dnZWZmaGVuamlja2tqbGxrZ2lmZWJoaGdpamZnaWlmZWVl
-Z2VpaGhlZGVqamdqa2RkZmdlZmZkY2dpa21raWxsaGxqZ2NnaGhraWhnZGdpZ2lo
-aGdnaGlnZ2djZWhoaWRkZGRkZGNgY2BhY2RlZWJeXWBeYF5eXWFiYmBhYWVhYmJh
-YGBgX19eYGVjYGBfXl9eYmJfYGRlZGJfX19fX15dXl1dYWNgYWBfX15bX2FiY19e
-YWFeX2BfX15fXWJgYF9kZF9gYmJiY2FiXltdX19hZGRmZmRhZGVfYmRiYGBeX15e
-XltbXF1cW11dXF1iYl5eXl9eXl9cX11eX19eYl1gX2BgX2BeXmBiX11dWlpbXF1c
-XF9dW1xeXl1hZWFgXl9fYGBiXl1iY2BiYmBeX19hYmVmYmVlYGBfYl9gX15eYGRe
-Xl9kZWNhX2JfXl1eX19eYWBjYmJiYmJeXGthXl9hYmBgYF5eYGBfYGNgX11dYWJh
-YF9eX15fXl5eYF5cXl1gXl5iYmFdX15eW1xdXFxcXmBkYGBfY2BgX2BiX19dW1xc
-XWJfX2FgYGFfXl9gXV5gX15fYl9fY2ZmZWZiYWBhZGNgX2JgYGFgYWFhY2JeX19g
-YGFiX2BeXl5fXWFgZGJiYmFhYF9dXmJiX2RmYl9fYWZlZWNiY2JiYF5iY2RlZmNh
-YV9iYGJhZGZjZWRkYWZqampoamhoZWRgZGdlaWlramtrbWlnZ2lnaGlsa2trbWpt
-bGpmZWRlaGtpaGhpaGtqa2ZoZ2ZjY2iAq8fU3eLl5+nq6uzsenp5eXd4fHp9fX18
-e3p8fXh5fn98e3l5d3h7e3d5eHh6enl7eXp7e3x7fX9+f354d3p9gYGAf357e3p7
-en19f3p6f359e359e3t9f359e3t7enZ4enx+f399fnx6eXx5eXp6fH55eXl7fHp6
-eXl6enh5e3l4d3h7e3t7eXp1enl5eHV0dHNxdHBvcnJvc3Jyc3BwcHJ0cXBycnFy
-dHV0cXNyc3V2d3d3dnVzd3l5eXd4e3x8eXd2d3d2enZ0dnZ1eHp5end6eXt3d3l8
-enx8eXZ6eHl4e3d0dnV1dnl7fn53eXd4e3x7eHZ6e3l7eXZ2eHl3dnZ3d3p4eHt5
-eHR2enl3dHd5dXV0dXV1d3NydHJyc3J0cnN1c3ZzdXZycXJzcXJvc3JvcHFyc3Z0
-dnR1dXRwcHFvcHFxdnJscHNzb29ubXZua21paG1sbG5ta2pqbGdqZ2ZpaGhoamlo
-ZWdobG5va25sbGxra21sampqbGxsaWhqamxubWtoaWprbWtua2tta2traWhoZmFi
-YmJhYl1eXVtbXlpcW1tbV1NUVFdZWldZXFpdV1ZYWFdaYGBgYmVkZGFeYGJhYmBg
-XVtbXF5iYmFiXmRlZWZlYWJnZWVmZGRkZGRmZ2ltaWtqZmVnaGVlZGVoamdmZWVl
-Z2hqampmZWZnZmZnaGVkZWZmZWRlZ2ZoaWhnaWhqampqamdoaGpsbWhqZmZlaGpr
-bGdlaWdjZ2dmZ2RjZmdjaGZmZmJiZGRiY2NmZGRlZWNlZWdmZWRoamdnZmNjZ2lm
-amhqaWlmZ2loZ2lqaGlmY2dnZGdoZWNkZWhpZmdqY2VnZ2NmZGZoZWlqZ2ZmZWRl
-ZmhlaGlqaGdnZmloaWhpZ2dsamhpaGpnZWVoaWlmZGVmZmFgYmJjY2ZnY2FjZ2Rh
-ZGRjY2FfX15dXmBgYGBhYF9iYmBeXWBiYV5dXl1gYmFgYF9gYF9eYV9gX2FjYV9c
-XV5cXVxdYGFeXF5gYGFgX2BfX2JjYV5eXl9fXl9gXV9fXl9fX2BhX2NiYF9gYWBg
-X2FiYmBhYmZjYGFgYGBgYGBiYl9fXl9cW11eYmFfYF5eXmBdYF9fX1xdW1tfXV1f
-Xl5eXV1eXV9hX2BfXmBdXmBeWV5eXVxaW1xdXl5eXl1gX15cWVxeYWBgYF9lYmBi
-YmJhYWFhYmJlZGRjYmFfYGBhY2NiZWZkYV9fYF9dW15dX11gYWBeXF5gYWNiY15c
-Z2VcYmRiYl1dX19hY2NkYl9gZGJgXmFkYV5eX15fXl9fYmFgYWFkX11gXmBdYGBm
-YWBfXlxbWlxeX2BfYGJjZGBgXF5gW1lZW11hYWNhXl5dYGFgX2FgYWBeYmFkZmVj
-YmFjYWBkYmFgX2NhZWNjYmNgYmJfXl1gYF9fX2FgXV1dX2JhZWRkYWJgXmBfYF9h
-YWRjYmNnZWdjYWFjZWNfX2BlY2NkYWBhY2BgXl9lZGJjZWVmZWlraGhpaWdlZWhl
-ZWVmaWlmaGppaGpsaGhqa2xsamhqbW9saWlnZmRnZ2dqaGdpbW5pZGVoZ2ZtbYKu
-yNXe4eXn6err7Ot7fX17eHd7e3p9fXx6eHl6enh6e3x8enp8en19fHt8fXx5ent6
-eXp8enl8e3x9fX98fHx9e3x/goF+fH5+gX19fHx/fYB+fHx7fHx8fHp6end4eXh7
-e3x7fH58fXt+gH55eXp6enx7enl7enl6eHZ3enp6fHl4eHl6fHx5ent5eXl2dXZ2
-d3Zxbm1ucHBxdHV0c3JxcnV0dXNxcnRzdHJ4dXR0d3h4d3h0cnR2d3p3dnh3e3x8
-d3h4enh3d3d2d3d1d3h4eHp3d3d6d3p5eHh3eXd3d3Z2dXh6eHVzeHp7e3h4eXV5
-eXd7eXp6dnd5d3Z2dnZ0dnV2d3p5d3ZydnV3eXd3eHp5eHR1dHRzcnRzcXF0dHR2
-dnV1dHNzc3N1cXFxcXJycnNyc3J0c3J2dXN1dHNzcG50dHV0cnFtbW1xbWxsb21s
-bWtqam1sbW9saWtramltbGloZ2ZpZ2dnZmVnZ2lvbm5pZ2xtbW9qampqbGtpa2xt
-amptampra2xtcG9xbWxvbmpqaWZiYmFiYmFhYGBdXlxZWVpcXFtZWFdYVlxeYl9h
-XVpbWVpaXFxcXF9hYmRpZGBiYmNjZmNfXmJeXWJkY2JjYmVkZGZnY2RpaGdmZmVl
-ZmdmZ2lqa2hpaWdlZWVkZWhpaWtnZ2ZlaGpoZ2hpZGRjZWVnaGdlZWpoZmNmZGVp
-a2lqanBoaGlramhoa21qbGppamdkamhnZmdlZmNhY2JiZGdlZ2VjZGRhYmFgYWRk
-Y2JjZGZkY2NjZmdlZmRoaWRlaWloZ2ZmZmVmZmdnZGRlZGdpZ2RqaWdraWhkZWVm
-ZmZlZ2ZmZWVnZWVmaWlqZ2tnZmhmaWhnZ2VnaGpqaWZnZWpoamxraWloZ2ZjZWdp
-ZmZkZGdnZ2dmZ2NgYGJkZWRkZWRlYWJjYGBjZF9gX2BeXmBhYmNfXWBfXF9gYWBh
-YmBeYl5fYF9eX19gYGBfXV1eXl1hYF9eYWBfXV1iYF9eXGBgXmBhX15gYmFjYl1f
-YV9gYmBgX2JeXWFhYWNgYGNgYWRhYWRkYmBiY2JkYmRiYmJgXV1cXmBhYmJfXV9e
-YF5cXV9cXF5bXF9gYGBlX11bW1lbXlxeYV1bXFxbW15dXF1eXl9cXF9gX2BfXFxa
-W11cXWBeW11gXFhaWmBgX1tdX19cX15eXltgYmFhYmBjYWRiYV5eY2FjZmRlaWNh
-Y2BeXV5gXV5fX2JhYV9hY19gY2JiYl9eYGRhX2BfYF9gY2BiYmFhZGBfX2JeYGFg
-X11gY2JiYF5hYmJkYGJiY2JiXl5fX2FhZGNgX15gX11eXFxeYGNiY11bXF5fYF5g
-YmZmYmJjYmJjYmViY2JiY2NiZGJjY2FkZWFhY2dmY19cXV9iYmBkZGJiZWNhYmJh
-YmNkX11gY2RiXl1eYGJhX2BjYF9iXmJjYmRkZWZjYGJhY2JkYmJhY2VkYmJhYWFh
-Y2RnZGNjYV9dY2ZmZGNhZWZoZ2hmaGdoa2ZmaGhsamlpaGdoaWxpa21pamprbG5u
-amhnZWVmZmhlZmRiZ2tqaGpraWpod6bK19/j5efp6+vs63x9fHx4eXx7eHt7ent4
-dnp7eXd9fn59eXt7e3p9fHx7fXp4fHp7eHl8e3p4eXp9gIN+fXl6gX58fn5+f3x7
-enx9fX9+foB8e3p8fnt9fHx5dnR4eHp7enl7fn57e3x9fXuBfHp7f3t5fHl6enx8
-fHt5e3l5e3t8fHp7eXl5e3h3eHh0cnBycG5tbm1vcnF1cXB0cnN0c3RzdHZ0cnR3
-dXNzcnN0d3h3d3VzeHp4eXx6dXZ4dnh3dHZ2dnZ4eHh0eHl3cXZ3en14dnV2dXZ3
-dnV7fXh2dHR4eXl0dHR5eHp2d3l9enl3d3h5eXh3eHh5eHd6e3h3eHh4eHZzdHV2
-dnh1dXh1dnV0dHR1dXd1dnV3eHh0dHZzd3NycnV1dXdxcXNzc3FzdHR1c3BvcXFw
-cXByc3RycXF0dnNsbWxrb29tbm5ubGxrbm5qbXBtb2tpaGVsbWtpaGpsa2dpaGpq
-bGhoa2lnaWxsbm5sbWlpa2praGppZmhnZ2poamtrb29ubG9tbGttampmZWFhY2Nj
-X2VkY2BdXF1bXmNfXl5cWVlcXF5hZmdfYFxbW1xbWlxfX2BkY2NiYWJkZGZmZ2Be
-YGBjYmBiYmRhYmNiYWdmZGNjZWRoZWdnamdmZ2hnZmlnaGlmZWZmaGdnaGlnZ2Vl
-ZmRnZ2dnZGVmZmdmaGhoaGhqaGhnZmZqbGlsa2pqZ2dpZWVoZ2dmaGlqaGVnZ2Ri
-ZGNpbWZkYmNsZ2hnZWRmZGRmZF5gYWFhZWRkYWNmZGVkZ2VoZWRhY2JnZ2pmZWZl
-aGhnZ2RmZGRlZWdlaGlqaWloZ2RfZGZmZmdoZ2dmaGhmZWdlaGpmaWpnaWdoZ2Rl
-Y2VoaWlramlpa2lobGhpaWhnaGdnZ2hnZmVlZmdoZ2RpYmNiYmRjZGRlZGRkZWVi
-XmBjYmJiX15fXWBkYmBiY2BjYmBhYF9eXl9fYV1fX19gXl9fYV9fXWFfXmBeYWFi
-YGBfXV9gYV1eXV1eXl9gYV9fXmBgYWBgYWFgYGJgYGBjY2BhY2ZiZGNgYWJhZWRg
-YWFiYmFhYmVkY2JhXVpaW15gX11dXV5hYF9jYGFeXF5dYF9gYGBfYmRkXV1eXl5f
-Yl5dXl5bWV1ZXF1dW1hWWl5fX11dXltfXl1bXF5jX2FiXV1eXl1eYWFgX2JhYmVm
-Z2FfW1xfXmBiYWFhY15eYGJiYmJmaGRkYmFfYFxZXV9gXV1dYGJgX2JhX11fY2Bh
-YGFgYWFlYmRkZGZkYWFfYGBhZl1dXF9hXlxdYGJkZWNhXltcXmFjYmBiZWBiYWJf
-ZGJfX2FgXltcXGFgYF9bXWRtY2FfX19jYmRkY2hkYmJhYWBfX15fYGBhYWFlZWJj
-ZGFkYWVmY19eX2JkY2JhX15gYGJjYGBfYmVjYmJjYV9gX2FiX2NjY2VmZWFiYl9f
-XmFgYWVjZmVlZ2BgYmFiY2VnZGViYmFjYmRhYGNkYmRhYGFlZmVjZWZnZmhoZ2do
-Z2dpamhoaWlqamtpaGhpaWxsbWtqbGpmZ2ZlaGlnaGlnZ2ZmamlpaWxpaGl1qsnU
-3eHk5+nr6uzsfnp7fH5/fHl5eHt8end4eXp6dnp5eXp3eHl6eH58e3t6enV3eXh5
-enl5eXp5eH5+fH2AgIB5e3p8e3p8fXl9e31+fX5+gH59enx+f35/fnx6eXt5e317
-fHx8fnt9fn56e319fX18fX18eX18fHx6enl8fXl6fX18fHl4enl6eHp5dXJzdHBx
-cXFycXJycXJzdHZ4dnJycnJ1c3JycXR6dnV0dHR3dnZ3eX54d3h3d3d2d3V4dnd5
-eXh2dnh4eXd2dnd3eHV0eHl7e3d3dnh3eHl5eHZxcXZ5d3h3eHd4fHl7eXt6dHV3
-dnd2dnZ1eXh3dnh6fXx4eXd3d3l2eHh4eHd2dHRyc3Fzc3Nyc3R1dXV1dXR1c3N1
-cnFzc3Nzc3JxcHRzdHR4eXd0c3JzdXFxcHBycnFwcXNxcG9oaWptbm5ubW1sa2xu
-b21tbWxsbm9ubmpnaWhqbGhkaGpramdmZmtraWhpaWpsa2xramtrbGhnZmhkZmZo
-Z2lscW9qaWpqa21vbWxpaWdoZGRkY2NjYmBhYmFhX2BjY2JjYmNmY2VlY2hra2xp
-ZFxcWl1dWVpeYGFjXV9iY2RmaWhrZmVkY2JhYl9hYWFhYmJiZWZjZGZqZ2Zna2tp
-ZWhnaGhnaGhmaGdlaGpoZmRlZGRlZWdlZWZlZGRlZmVmaWdkZ2ZoaGhra2ppaGlo
-amhpa2hoa2lmZ2lnZ2doZ2doamxramdlZ2tnZmZkZWVmaGdkX2NhXmFhZmVkZGBj
-ZWNkZmVkZWVlYmFkY2ZiYWRoamtpZGZlampoaGVma2ppZ2loZ2duaWdoZWJiZmZk
-aGdnZGdraGloZGRiZmZnZ2lpZ2RlZWNnZ2hraGtqamloaGlpaWdlZ2tnZ2loamln
-ZmZmaGllZmhqZ2dmZGJiY2RiYmFeYV9fXl9fX15dXl5gXWJmZGVjY2NgX19fXFxd
-XWBhY2BgYF5dXVxfZGZlYGJiXl1dXmFgXl5gYWJjYGBeYFxbXF5iYV5gX2FiYmFg
-X15eXWFhYmBiYmNkYGJiZWVkYV5hZWZjYV9hYmFgYmRhX15cXV1cXV1fXl5dYWJj
-Y2VgYGNfXV5cW11fYF9eX15hY2BfX11cXlxcX11bXF9cW19bWFdeXFpaXWNoYF9e
-XV5eXVxfX2NgXF9eXWFgXFxdX2BhYWBfYGFfY2NhX2BjZGZiYV9bX2RiYGFgYGJk
-ZmNlZGRdYV9dYGBeYV9fYGBbXGFhYGBiY2NiYWJmY2JkYmBiX2FhYmJhXVtdX2Bf
-YGJhX15gYGFgXVxcXl1fYF9fXl9fYF5gYV9fXGBjXVxgYWNhYWBaV1pbXV1fX2Fh
-XWFjZGNgXV9dX2BiY2FgY2NgYGFhXmBfYGBfYGFgYmJiYmFiZWNfX19jZWRiYGJg
-YmNgYV1fYmRjX15gYGBhYWFiX19gXl5fYGBjX2FkY2FiYmVjY2FhYGJiY2RgYGRg
-YWJiZGdjY2FfYGVkZWdmZmZmZmdpaGhnZmdmaWlnZ2tpbGtqbWlqbnFua2pqaWpt
-a2poZ2ZmZ2doZWZkZmZoa2tmaG+mx9Ld4uXn6Orr7Ox0dXp7fH17fXt6eXh7fHp3
-eXh4eHh3enx4d3t8fHp5eHl4eXl6fX17fHx6eX99fXt7e31+fn1+fnx9e3p8fnx9
-fH59fH2Afn59fH18e3x+fXp6fH1+fX17e3p4e31+f3x8fHt9e319fHp8fH1+fHt5
-fHt6enx+fHl6eHZ6fn9+eXp3dXRxcHRydHV3c3FwcHFxcW5vc3N1c3J0dHR2eHN0
-dnh6eHV0eXh2d3Z3dnN0dHZ1d3Z3eXp7d3Nzdnd1d3Z2eXt5eXx5d3Z5eXh4eHp4
-eXl4eHZ2eXt6e3x4d3p8e3l+fXZ3dXZyc3V5d3N1dHVzdnl6eXd3eXl2d3x5eHl6
-eHRycXFzdHNwcXBwcnR3dXN0cnN0cnBzc3NwcnVycHJycXV6c3Z5d3d2dXFyd3Jy
-cHJ0dnZ0dXRyc3Jvb29xb25rbW5sbW1rbWprbG5qaW5rZmZsa2xsaWxsaGZma2tn
-Z2dpaGtqaW5taWVlZmdqamhoZmVpa2VkaGtvbmtsbGtpbG1sa2xoaWhnaWZiY2Vm
-Zmppamptbm1tbXBwcXJyc3FydnZ2e3NqYl5dY19gYF1fYmFfXmBhZWlpZ2VmY2Ff
-ZGBgYVxgXl1fYWNjZ2hmZGVnZ2llaG1mZ2hpZmdmZ2dlaGVma2ppa2hmaGdpZmdm
-ZmVlZWZkZ2ZmZ2doZ2loZmhoZ2dnaGhqaGppbW5rZ2xtamtqZ2VlaGlqZmhobG5o
-aGdnZmNjYmZoZGVkZWRlZGpmYmRlZWRlZ2doZWdmZ2RjY2NnZ2ZlY2RlZmhmaWpp
-aGVlZ2lsamppaGhoaGZpaGdpbGtkaGdkZGJlZGVqZ2hlZWNlZWdmZWVkY2NjZWho
-aWdoaWlpamhoaWhkY2RmZ2pra2dmZWZlZ2VkZmlqaGhmZWVnY2VkZGRhXmBdX2Fg
-YGJgYWhiYWNgYV5hX15fYVxcWlpdXF9fXWJiXWBgYWFcXV5gY2FhYWBhXlxdX2Bd
-XV5eYGRkXl1eXV1dXV5dXlxdX15hX2FiX1tbXV9fYF5hZGRjY19gYWJlZmViZWlj
-X19gXmBgX2BeX2BhYmFeX2FfW1xdXWFgYGJgYmJfW1teXFxeXV1fYWFgX1xaWlld
-XltcX15cXVpaW2BiXVxeYVpXWV1bWVxeXl1dW15gY15jX11fYmRkYF5gYF9eYWBe
-Xl5cX2JiYWJkY2JgXFpcYWJiYGBgYmNkZWVkZWJgYmRgYmBhX19dXmJgXWBiYmRi
-YmNgYGRiY2JgX2BgYF9fXl9gXmBfXWFhYmFhYF5fX2BfXFxeX2BhYWJhYl5fX15e
-X2FhYmBhYGBgYGNgXVpWWVZfXFxeX19hYGBdX2FiYGBhYmFhY2BhYV9gYGBfX2Fe
-XWBiZ2ViZGNiYV5fZGJhX2BhYWFhX11fYWJiZGBiYWJiZGFiX2JiYWFhX19fXF5g
-YmBgYWFiY2ZmZmNjZmZkYGRlZWReYGNlZWBlZGdkYWJgYGRpaGZmZWVnaWtqZWho
-ZmRoZ2ZmampubW1ra2xsa2tra21vbmxtbGVnZ2hpZmRnZmdmZ2tpaWVqcaXI093i
-5efo6urr7HZ4e3t+gH58fn17fXt8e3l5e3h6fHt7fX56eHZ4e3t4e3l6eXd7e3p7
-enl8gH97eXh7fX1+fHp8f4B/foB+fX1/gX9/gH+Cf316f4B+fX19fn1+gIGBf398
-eXp8fH1/gHt+fXt7enx6e3x6e3t7e3p5enl4eHt9fHp6fX1+fnp6eXl3dHZ7dHFz
-cW1zdnJwbm5wcnBwcXFycnFycnV0c3Z2eHp4dHR1dnR1dXZydXZ1dXd5dXh4dXV2
-eHZydXh2eHp5eXl7dnd4enh4e3d3eXl3eHl3c3V0en18e3p4dnR5e3x4eXh4c3Rz
-d3h4d3d1dnV2eHZ1dXh5d3p4dnZ3dHR2dXNycnR1dHRwcHN0eHd0c3V1cnR1cHN0
-cXFzcnJxcnR0dHRycXJxc3N0c3NycXF0cnNzdXR2dHFzcW5wb2xsbXBvbW9ubWxq
-a2xsaGpvbGxscGtpbm5ta2hmZmtqamloaWloamloamloZ2RnamxvbmtrbmxuaGlo
-aWhrbWxtbm5ubWtoaWpqbGppa25rbG9xdHJ0eXt8fX18f318fYF/gYF/fH55dnBp
-Z2ZjZWNlY2VjXl1dYmBjZWlqZWRmZWBgXV5cXGBiYmNjY2VoZ2RjY2NkZmVoaGZj
-ZGZlZ2hmZmhlZWNjZmZmamtpaWhnZWZqaGVlZ2hpaWlnZmpnZ2ZnZ2NkZWhoZ21p
-ZmhpbG1saWllaGxmZmhoZ2lpaGVmaGdlZGVkYWBhYWJmZGRlZ2VkYmNjY2VnZ2Zk
-Y2VkZWJjZGRjZGNmaWdmZWZmaWxoamhoZWVmaWhqaGpoZmhnZWVnamlpaWdlZmVn
-ZWRlZmZoY2dmZGRjZWViYGFkZmVoaGhpamlmZGVkZmhnY2FiZWVkZmhnaWhmZGRl
-ZmVlZmZlY2VlZWRmYmJiYmNlYWBjYWBjYl9iYl9dX19hYGBhYF9eXV1bXF1dXl5e
-YF5eX19fYGFdXV9fXF5eX11eXl9dXV9cX19gYmFeW19eYWFfYF1fYV1dXl9gYGFi
-YF1cXGBiZWBfYWRiZGJhYWJjYmJmZ2diXl5fYF9fX2JjYGBhX11fYWFdWl1dXl5d
-YGBfXmFiXl1fXV5ZX11dXmBfXlxaXVtbW2BdXl1dXFtgYWNlX11gYmFdX11hX2Bh
-YV9eXmNiYmBeYV9hY2FbX2JjX2JdXV9hYGJjYWBkYmNjZGdkZGFgX2JiY2NjYl9h
-YGNmY2BiYmNiYmFfXF5eX2JiXmBiYWJiYV5bXmBfXmFgX11fXl5gX2BhYWJiX2Bf
-X2FgYWBeYWJkYmJeXV5hZGRjY15fXV5dXl9eYF9hX2BbW1xcX1ldYVdYXl1cX2Fg
-X2BhX19hYF9fYmNhYWJhYF9fXF9fYGJiYGJkYmJiYmJeXGBkZmVhYV5fYmFfYF9f
-YWNkYmFhX15eXWBfX2BgXmFgYmJcXF9gYGBjX2FkZWNhZGZkYmRjY2dqZWFgYWNh
-YWVmY2NjZWRiYWJjZGRmZ2dsamppaWdmZmZpaWtnaWttbWpna2xsbWtsamxwb25t
-a2hqamtra2lnZ2ZnY2dpaWd5q8fV3eHl5ujq6+zrfHt6eHl7fn9+e3p7enx7fXt7
-eXp6enh6e3l5d3p8fXx6e3l5eHp8e3p7fHp9fHt9e319fXx+enp9fn+Dfn5/f36B
-gYKCf318e3l3fHt6fH19f35/f4CCf35+fnx8fIB/e3x8e3p+fHp8eXl6e36AfHl6
-eHp7eXp6e318fHx8eXd1c3Z3dXZ6d3R0c3Z1cXBucHFxdHJzc3FucHJvb3F6eXh3
-dXdzcXN0dXR4dXR0cnF1d3d1dnd5e3p8dnZ3d3l3dXd4eXd3eHl3enl0dnl3d3Z1
-eHl8d3J2d3h4d3hzc3Z3d3N2dnd0dXl3dnV0eHV3enx6d3Z2eHZ3enl3dnh4dXR4
-d3JxcnV7dnVycnR0cnFxdHZ4dXV0cnJycnNwcHBxdHVwcHJvbXFzdHBxcXJxcHFw
-dXV0cXFyc29tcG9tcXFwcW9wcW9xb3BxcG9vbWxrbGpta2tram1ua2xra21qaGpq
-aGVnaWdoZmlqbHBub29ubnFtbW1rampqbGxsa3Bvb2tsbWxubnBxdHJ0d3yAfnyA
-hYCChIOGh4SEgoSGh4SHhIKAfn16dHNxb3Fra2doZmNhXWBkZWViYmNhY2dlYmNg
-YGBfYWRkY2ViZGZlZWRjZWZjY2JiY2VhYWRjZWVkZmRlY2RlZWZnZmRlZmVmZmdr
-amdnaGdmY2dlY2VmZGRmaWdkZmhoZ2hpaWpsbXFqa2lraGlmaGdramhlZWRmZGdk
-ZGRiYGRgZGZnY2JkZGRjY2ZmY2RkYmNlZWRkZ2RhY2FjY2JlaGpnaGhqamRnaGdn
-Z2hmaWhoa21qZmRnZWZpamlpZWJlaGppZWNnaGhlZ2dnZWRhYV9kYGdnZ2tqaWZq
-aWZmZ2doaGdmZWdoZGZkZGhmZmdnaGhnZmZnZWRiYWNkY2FgYmJiY2VlY2FlY2Fj
-YWFnZmRiYGBiXl9hX15eX11bXF5eYWFdYF1cXlxZXWBdXWBfXV1eXltbWV5fYF5c
-YGFjY2JeXWFgX15iYWBhYGFhYF9hXl1hYWFfXmFkYFtdX2FiYGBiXWFjY2VnZGRm
-Y2FhYmBeX15cXV9gYmNeXFpbXl9eXFpeXV5gXV9iX1tdXWBgXl9cX19fXl5dWlxc
-WlteX2FiXl5gY2FgYGBfX11bWl5dXl1dXWJhYWFfX19eX15fXl5dX2ZiX2BfX2Bi
-Y2FgYWBfXmBiYmhoYV5eYmNjY2JgYl5eXF1fYWFhXl1fYWJfXl5kZGRjYGJhY2Nj
-YWBdXlxeX2JiYV9fWl5eXWRjYWNhYGFgYWFfYF9dXl9gXV9fXl9iYmNjY2FcYGFd
-Xl9hYWBeX2BcW1taXl1YV11eXl5gX2BgYWFiYWJhYV9fYWNiYV9eXlxgY2ZkZGRg
-XGFgY2FjX2FhYGBhZWRjZGJjYF9eY2JgYWFiYV1hYV1dXl9hXl1fX2FkYV9dZGJh
-YGBhY2NkZWZhYmVjYWJhZGVjYmNiYmJmZWJmZmRhY2NnZWJlZmhoZmZmZ2hnZmdl
-Y2VnaGlpZmhraWlsa21rbG5tbG5vcG1qaGlmZ2tra2pnZ2hoZ2toaXisyNTd4eXn
-6enq6+x3d3Z1eXp5eXt7ent1dHp9e3x7e3t4dHd1eHp5eXl4fHx8fH18e3l5eHt7
-eXp7enp8fHp6fH15ent7e31+eYCBfH6AgX5+f4CAe3l8f4B8fn5+fH1/fX5+fX19
-fn9+fX19fH59eXh4fHp6enl6fXl8fH15eXp6eHh4eHl5d3l2dHR2dnh3dHZ2c3dz
-cHBvbXF0dXJ0c3NycXBvcXJ0c3R3d3h3dHJxcHN0dXd2dnVxcnN0dHV1dnZ2d3h4
-d3h2dHZzdHl4d3p5eHp4eHd1dXd5d3h3dnx4enp0dXl3dXR1cXJxcnN0dnZ3eHV0
-d3t6en55eXh2dXN0cnV2d3t3dnRzdnd1dHN0c3ZzcXNxb3B1dHRzdnZ0b3Btb3Jx
-bm5xcXN2cnJ0cG9wb29wc3NucHFzc3Nxc3NycW9xcW9sbHBzc3FxbWtra25ucHNx
-cnNubWxsbmlsbW1sa2ttb25ra25ra2xoZWdra2ppamprbW1tbm1tcHJzcGxucG5t
-bG9vbm5wcGxvb3BvcXZ8fn2Bg4ODhYSGhYOChoaDg4WEhoJ/f35/f4GAgH98ent3
-d3JtbWdjYWBkYmNiYGFiY2ZnZWdnZGRjZWZmZWRjYmVmZGRmZ2RmZmRkY2VlZGNh
-YWZlZGRkZmVmZmZnZmdnZmVlZmZnZ2hqaGdlZGJkZGZiZGNkZmZpZ2hoamhoZmdr
-bmlra2traWZlY2RlaGlpamNhYmZkZGZnZmdpZWZmZWVjZGhnY2NjYWZkZGBhYmNk
-ZWZnaWhoZWZlZGRkZGhqaWlkZGhlZWhpbGhoZ2hqaGdoamdkZmhoaWhmaGVnamlm
-ZGNnY2VnZmNlZWdmY2RkY2Rqa2llaGdlaGZlZmZkZmhqaWhoaWhlZWVmZWRmaWho
-ZmRlYWJkYWJhYWFfXmNjZGVlY2JhYGNhYGNmYmNhYF9fYV5fY2BgYWFfYWZjYVth
-XV1fX11dXlxfXmBdXF1eXl1eXF9iZWFgYGNkZ2ZhXl5fYWVlY2JgXV1fX2BfX2Jh
-Xl9hYWBfXV5fYGBjYmNlYWJjZGFiY2VlYWBiX15eXmNhYGBhX2FhXFtfXmBbXVtd
-YWFhXWFhYF1dX19cXFxdXl5iYV9eXF9gYV1dXl5dXVtfYF1aX2BhX2JfW15fYFxd
-X2FgYWNfY2BeXV1eZGNfX19hX15cXWFiYGFeXV5gX19iYmFhZGFeX2JhYWBhX15c
-XFxeXmFiYWBeYF9fYV9gYWFeYmFgYmFlZWBcXmFlY2NiYWFhX2BgYV9fYF1hY2Ff
-XmFhXF9iY2FdYWJlY2NiYWNkX19dX19gX19gX11cYV1cXlxdXF1hYGJiX19gXV5h
-YF5gYmJjYWJfX19hYWFeY2FiYWJhYGFiYWRjZmFhZGViYWBjZmViX19gYmJiY2Ff
-YGBgX2RhY2BfXl5fXV1fYmFkYmZhX2BgX2FhX2RlZWJiYmRmZWdmYmBdYWNjYmVk
-ZWRjZ2ZjZ2diYWNkZWRmZmZlZmhoa2tnZ2ZmZ2dqaWxta29tampramtrbG1tbGlq
-aGZoZmdpaGdlaW1wampmc6rI1d3h5ebp6+rr63h6d3l4d3t6e3t5eHh0d3Z6en57
-eXZ3enx7eHh3d3p6d3t6enl7eXl8eXl6eX17fn16enp8fHl6eHh7e31/fn99fX19
-fH1+gIB+fnx+gIGAg4J+fXt7e3t8foB/gIJ+fn59foJ/fH16fX19e3p8fHx9e3t5
-e3x8eXh4eH18eHl5e3x4dnh2dXZ3c3Ntb3FycnV0c3BwcHJzcnJ0dXVycnNzdnRy
-d3Z4dnR0dHV5d3d3c3B0dXd4d3d1d3l3cnJ4fHd3dXV5e3p4d3V3dnV3d3h3dnd4
-eH15eHR0c3l2eHd0cnJ2dXNydXNzdXd4enh6d3Z2eHh3dXJvcnJ0eHd4dnZ2c3Ny
-dXV3cnJ0dXV0cnJydnJydXRzcnJvcnJzb291c3J1c3FxcHF0c29xcnJxc392c3Ry
-cG5ta29xcnFzcnBzcnBxb2xraWptbm9vcXBsaWtpbm1sa21tamxubm5ubm1raGlq
-Z2lqamxtb2ttbm1scnNvb29tcXBwcXBxcXBvcHFzdXJ0eHp9goSEi4SBhoSEhIOE
-goGCgXyBgX+Af359en6Eh4aDg4KCf4B+c3JtZ2ZgXmBhX2FhYWFjZ2hqbW1qamlq
-bGpmZWJkYWRkZGZkZmdmaGZmZWRmZWhpZmdpZmNmaGhnZGdoZmhoZ2RjY2VmaGdn
-ZGNjY2ZmZWdnZWhlZGdpamppaWZoamlpaWpraWttampnZmdpZGZoZmNoaWllZGRn
-Z2ZlZ2RkY2NiZmVlY2FiY2NhYWNiZWZkY2ZlZWVlZWRiYmFiZmNkZWVlYmNkaWtq
-Z2lpaGhoZWdnZmdoaWhoZmVmaGhnZGVjZWFiZmZlZmRjZmhoaWVmZ2doZmZnZ2hr
-Z2dmaGZjZWdoZmlramVjZmZkYnJlZmZmY2JiZGNlYmJhYWFiY2NiYV9kYF5hYGBh
-YV9jYmJgYWFeXWBhYmZjYWBgX2FgX2JgXl1dYF5bX19gXV1gXV1eYGFiXmFgY2Zk
-XmBgYmNiXmBiYmNgYWBeXVxeX1xdYF9gY2VmYGBjX19fYGFfYGJgYmRiZmNjY2Rg
-YGBfYmBfYWFfXltcXV5hXV1cXF5gXlxdX15bW19gYV1dXFlaXVtcYF5eYF5cXl1f
-YF9ZXV5bXVpdYGRgXl5bW2BhX2FhXl1dXV5fXV5eXV5eXV1gYF1eY2JfW15fX11g
-Yl5fYmRfX2BhYmBfYmBgX2BgYmBfYGReYF9hYF9gXl9hYGNhX1xcYGFfYWFkZGRi
-YWFgYmJhYmBfYGNiZGNgX2FhYWRkYl5kYmNlYGFgYGRkZGNhYGBeYl9gX19hYl9c
-X15dXF1bX2NiYmBdX19dXl5hYF5fYV9fYV5dXWBgYmJgXmBgXWBjZWNlY2FkZWJg
-YmJhYGJiXmBhYGNjY2RjYmFhX15gYV9fYGFkYl5gYmFgX2BgY2FiZGRlYF1eYWBf
-YWJgYGJjZGVjYmJiY2FmZGFhY2RjYGRlZmVkZ2doYmJiY2RkZWNkZmZnZ2VmZ2ps
-a2djZWhpZ2lpaWxramxua2pqa2pwbWpvaWlmZWJjZ2ZlZGpsa214rMrW3eHl5+np
-6uvrfHp+fX17enp6ent5e3p5eXl6fHp6dnh4enx7e3l3d3h5fH5+enp5eHp5eHp8
-f4B/enp3d3l7enp8e3t7fn19fIF+fnt8fHp5eXl+fn+CgX5/fHt6e318fn19fn19
-e319foF+f4CAfH59fX5/gHx8eXt8fHt8fHx6eHh6fHx5e3l5eXt2d3Z1c3Nyc3Bs
-bXJ1cnNzdXBxcXFxcnN1cnJ0dXd1dnV5d3J0dXd5eXV3eHh4dnN1dnh1dXd2eXV0
-dnp7enh3eXh2dnZzc3Rzdnd4d3Z2eHZ4end3cnNzeHh2eHh1cm9ycW9zcnN4d3V1
-dnh3dHd2dHR1dnRyc3V5dnN2dnNzc3Ryc3Z1d3Z3dndxdHN0cHBwcG5ydXRzdXZ1
-dHR0c3BvcnR1dXV1c3N0dnh0cXVxcXV0cnJycW9vbm1tcHFwa2lsbWpqa2pqa2pt
-bW5paGhqbWpqa21pZmtrbG1pbGtrbW1saWtrbWttcG5ucXB0dnBwc3FxdHJvcnJx
-b3Fzc3R2eX2BgIF7fX6DgYWGh4eFgoSDhIODgYWDgIB/gnx/g4aJiIaDgoF+fXl1
-cXBrZWFhZGNiY2ZkZGRna3BvbW5sbW1raWlmZWVmY2ZiZGRnaGhnZ2ZmZmhpZmdn
-ZWhlZ2loaWdoamhoZ2hmZWVqZWVkY2dmY2hkZ2VnZ2RkZGZmZ2tra2tsaWZlZmpn
-aGhoa2lrZ2ZpaGloY2NkZmRnZmdlYWVoY2JiYWFjZ2RlZGRkYmJhYWFfY2RiYmNi
-ZmJkZWlmZmNgZGRiY2RmaWZoaGdkZWdoZmhmZWVqaWhnY2RnZGdmZ2tpYmJkY2Vj
-YmRlY2RmZGRlZGdmaWlkZGZlaGhoZ2lpa2pqZ2RmampoaGhnZWNjaGRiYmBjaWNi
-ZGNkaGRhYGJhYWNlZ2RgYmFiYmRgYWJjYV9hY2BeXVthXl9fYF1cXWBeXF5cXl9d
-XF5jYWBfXl9fX15gYGBhYF9bXl1fYmJjYF9fYWFkYV9gXmBgYWJhXl1dX19gYmNj
-Y2BhX15dYWFjY2FfXl1gYWFgY2NjY2NhYV5fXmBjYV9dXF5eXl9fXlxcW15eX15d
-XV5ZXWBgX2NhYl5gXl1dX19dXmFeXV1eXVxbWFlbXV5fX19dXFtgX15fYFxcW1tb
-XF5dWFpdYF9aXl5eYl5dX11jY2BgX2FgYGJkY2BfYWFhYV9gYmFgX19gYWNjX19g
-X2BgZGJgYGJjYmNgYF5gZGNjY2FhYV5gX11fYGBhX11gYWNjYGFfX19iYmBiY2Fe
-YGRnYWBhYmBhYWFgYmNjZGFhYGBiYWFfX19bXF1dX2JkZGJiYWNiYV9hYF5eYGRg
-Xl9dXl5hX2BgYl5gYWFgX2NkYWFeYmFfYGNiXV1hY2NiYmBhYGJjZWNhYWFiYWBg
-Y2ZkY2VlY2FhYmBgX19fY2NiXl1iZmFgZGZnZWRkYmJjY2JhYGNiYmBfYmRlZWZm
-ZWVkZGNkZmRhY2ZpamRiZGVjZWZoaGdqbGloaGlqZmZma2hnaGxqaWhqam1wbnFw
-aGloaGZnamtoZGRqbnqtytfe4+Xn6Onr6+t9e3x8fn96e3l6en17fHx9enh3d3l5
-end3eXl4d3d4e3t6enx7e3p5eXl5eXx6fH58fXt7enx8enp5eHp6eXl+fn5+foB/
-enl6eXp4d3x9fn5/fnx/fnx8gH57eXp6fYB/f35+fn9/fXt9foB9fHt9enh3e3p6
-fX17enp6d3N2dHV2eXt2dHZzc3R1dHBtcHV0c3FzdXZzc3N0cnZydnd3d3V4d3Z0
-dXJ0dHN1dHZ8f3t4dnZ1dnZ1d3Z1dn18enl2dnl2dXRzdXNycG1wdnR1cnR4dnd6
-enR1dHN2eXl1dnV1c3Jyc3R1dHNxc3J1dnd3eHp4d3l3dnh1dXJxcXJzcnZ3dXd1
-dnRzdnV1c3Z2dHV4dHRzdXJ0cnN0cG1zdXJwcHJyc3Z2dXZ4d3h3dHRycHF2b29u
-bnN0cHFxcm9wb2xtbmtqbGlqaGprbWtqaGpqamhpamxsa21tbmtrbGtucmxqamhq
-bW9wbXF0dHNxcGxvb3JwcnFycXN1cXFwdXh3fYJ+f4GAe3x6fH+AhYqJhYaGiYiG
-iY6OjIiHiIiEhIeJh4iGg4SAgH9+enRyc3BsZmpoaGZmZmtva2xrbW1vb3BzcGtn
-ZmRmZ2VkZmhkYWdmampoY2NkZWhoaGdmZWdlZWdoamlqaWloaGRlZmhlYWNmZGNj
-ZGZmaGZjaGtsamhpamtraGtrbGxqaGhmY2ZpaWhqaGppaWVhZGVoZWRmZ2ZkZWNj
-YmRlZGZlZV9gYWFmaWViY2RiZGRjZGNiZWJjZmRnZ2RiY2BlZWVmZWhqZGRmaGll
-Z2ZkZmlpaWVmZ2hoamdra2lqZ2ZkYmNhZGVkZWZkY2JmaGpoZGNiZGZoZ2ZoZ2Zm
-aGlmZ2dmaGVlZ2hkZ2ViY2JhYmJhY2VnZmVkZWNkZWJjZ2NjYWRnY2FgYGFiX15f
-Xl1hYmBdXmBfXmNiYl9eX11fW1pfXV9gX19gYV9fXl5dW19fYF9hYWNeXV9eYWFi
-YF5eYGJiXV9eYWFeYmRiXllbX15gZGJfXl9hYGFgYWNiYmFhY19eXV5fYGJhYWFj
-Yl9iY2BeXmBgX2FdX11bW1xdXF1cWlxeX19eXV1eYGFiXmJgYGBgX2JgYGFjX1tg
-X15dXl5cXFxeX19aWVtcYF5gYF5eXltdX19fYF1bXV1aXFtgYl5bX2NhYGJeXmBf
-ZmFgYF9gYmFlY15fX2BiZWJjZGFhX1teXlxfXl5gZWZnYWNiXmBeXVtcYF9fYV5e
-Y2JiYWBgYWBgYWBgYGJhYGJhYF9fYGBfYWRgYmFhX15dXl1eXmBjYl9gYGFhX15f
-XmBhX15fYGNjYmFfX19dYV5fYmFjYmJjYmNhX15eYWJkYWFhYGBfXV9eX2BfXl5g
-X19dXmFhYWBgYGJfYWNjYWBhYWJjY2RhYGJjYGFjYWBhX19cW15dYGNjYGFkX2Bj
-Y2BeY2VoZ2RjX2FiYWJjYmFgZGJlZGFkYWJgY2JkZmRkZGhpaWRiZGVnaGhoaGlt
-bWZjZ2tramlobGtta2ttaWVmam5xbmxoaGdoZ2lnZmhlZWhoeK/K1t/i5ufp6urs
-63l1d3p6fH5+e3l5eX55e3p4eHd3eXp5enV2dnd3eHh7e3x9fH2AgH18enl3ent/
-fnt6fX58fHt5eHd3eHh5e3x8fXx9fYCAen+Afnh9d3t/gYGDgX18eXp+gH57eXp+
-f319fX9/gH16enx6enx+gIF+fn18e3t9fX19eXh2eHV5eHp7eXd2eXZ4eXZxbnJw
-b3F1dXJ0dXV2dnN2dnd0dHVyeHt7d3d0dHV0cnR2dnV6eXl3dnV5eXl3eHd3d3d1
-d3h2dXRycHVzcnVwd3d1dHZ2dnd3eXl3c3N3eHV3dnZ2dXV0c3V4d3d2dHV4dXFy
-dHZ3eHp8enl4dnVyc3R1c3V2dHZ2d3JzdHNzdnh3c3JycnNydnV0dXR2c3F1dnZz
-cXNzdXVzdXd4d3l8eHJ3dHVzcnNwbm1vb25wd3l0cnBwcG5rbnNxb2ttamxqa21t
-bG1samtoaGtqaWprZ2doaGdsbGtnaW5wdnVzdXZxbm1tbGxwcHJua29vb3B0c3R4
-eX2Fhn96eHl5f36AgoOHiIiJkI2PjpGQkpKRk5GRkIuMi4mHhYODgoKCfn16dXR3
-dnJxcW1ubW1wcW9xcm9vdHR1cG5tamxnZGdnZ2ZlZGNmaGZnaGlnY2NlZWZoa2lq
-aGlmZmRkaWhmaGdnZ2hmZGJkYWBiZWlnZGVpamtpaWlnZ2dmZWZlaGlqamhnaGVn
-ZmdoaWtpaGVmY2doaGdpaGRmZ2ZjYmVlYmhkY2JmZmZlZmRoZ2NjZGJjZGNkZWRm
-ZWVjZmVmZmdiYmJjY2Zpa2poZmVmZGhnZWdpaGxpaWdlZ2djZmdlZGRoZWZmYGJn
-ZGVkZGZlZmhqaWRjZ2RnZ2dmZmhqamhnaGppZmVnaGZmZ2lqZmJlZGllamdjYGRh
-Y2NkZGBhYWRlYmBhY2RkY2NgY2BgYWFgX1xcXF9iYGFiYWBhX15eX1xeXl9fXV5e
-X2JeXl5fXV1fXF5gYWBkYmJeXWBiY2BfYWRjYWBhYWJhX2NgYGJfXF5ZXV1eXVtc
-W15fYVxgYmBfYWRlX11eYV9fXV9gXmBgYGJhXl5dXWBgYmNhXl1eXF1gXWBeW1tb
-Xl9dXl1fYV5gXV5dX11eXl5fYWFfXWBgXl5eWF1cXF5eYV9hYGBiYGBeYV5dWlxb
-XmJgXV1dYF9hXl1aW15jYVxaXWBdXFxbYGFfYV1eX2BhYmFhYWJjY2JfXl9eXlpa
-Xl5gX2FjZGFhXV5fYGBeXmFcXV9gZF9dXmNiX15gYF5hYWRjZGJgYmBgYmFgYGJg
-YWNgYV9fX19iYV9eX2BjY2JgX15gYV9iYmBkYmFgYGBgZWJhYGBeXF5lY2FjZmZj
-Y2JdX15fYmNiY2NkYV9fYWBhXl1aWlteYGFkXVtdX19dX2JiYWJgX11gYGJiZGFg
-XV5gYGBgYGFhYF9fX2FfYWJjYmNkX2BiYWNjY2RkYmNiY2RiZGZjYmVmYmNjY2Fg
-YmRiZGVnZmZlZWRoaGZlZmdnZmhrbWtraGZoa2lqbGlpamlqa21raWdpbmtsbGlk
-Z2doZmdnZGZjZmV9ssnV3uLl5+jp6uvrend8e3t+fIB8end8fXl3dHJ1eHl4dnd5
-d3Z2d3h7e3x8e3l6e35+fH17e3p8fn15eXp7fHt7fHt7fHp7eXt6e3t7e3t8f4B+
-f35+gIGBfX19fHx9fYB+e3yBgIB7eXt8fn1+fHx7fX9+fXt9fnt8fX19fnx6enx7
-gIF+eXZ1d3h4dnV3dHV2c3d0dXdycHFwcHJycnBxdHR0cHJ1dHR1dHN0dnh4eXp3
-eHJyd3d0dnh2eXh3dnh5dnl9fnl4dXVzdHV4eHV2dHZ2dHV4d3N1d3d3dnV6eHh4
-dXV4dnN1dHV3eHd4e3h1dXV1cnd2d3N1dnh3enh3eXp3eXl4d3Rxc3V0d3h3cnJx
-cnFyd3d1dHRwcXJ1eXV2dXd1dXZ5eXZ1dHV0dHd5d3N0cnN0cnV1c3JzcGxvb25y
-c29ucXFvcHFua2tsbGxubGtra21sa25ycG1uaGpoZmVmZ2hpZ2VnZWZpbWxucXRz
-cnRsamlpamxsbW9wbnNycG5wc3R7f4KEgYF9ent7en6JjoeIiYiJjIySlpOUlJKQ
-k5SWmZWTkY+KiYaChIaEg4GCfXl4dXh8enl0cG5xcW96fHp3dnd3d3R0bmxqaGlq
-aGNiZmVkZWZnaGpmZ2dnZmVkZmZnZWVpaGhoaWxpZ2hnZGlnZWVlY2RjY2VmZ2Jj
-ZmloampraGpqaGZjYmNlZmVjZWhnZ2ZlZmlub2llaGhpZ2hnaWhnZmVlZWVjZWZm
-ZmZjYWVoZWVlZGFnZ2FiZmNjZGRlZGVnY2dqZ2ZpaGVlY2VlZmdmaGVmZ2VmZ2Zm
-aGVnZmdoZ2lpZmZnZ2hpZ2VlZGViY2NmY2JhZGhoZmZqa2hpZmloZWVpa2tqa2Vi
-ZmdoaWdnaGdjZGVnZ2lmZWVhZGNhYGBiYWNjYmJjYGFiXmBhYmNiYWRhYl1dX19c
-XF1eYmViX2FfYl9cXF9fYF5fWVxeXmJgXlxdXV1fXV9cXl5iZmdhYWFkZWRhYF9h
-ZGRiYmJiY2JjYF9eXFtcW1xfXVlcXFpbXmFeWlteYGJhY2NfW1pcXl9gYF1fXF9h
-XlxhXl5gYmBfYGBcXWBhXl9gYGBgXV9eXWBiYF5cX15dXV5cW1taW1xdW1teXV5e
-Xl1iYVxaW2BfYmBdXF5dX19hYF1dXF5dXV9eYGFjY2NgW1taW11eXl9dYWBhYF1d
-Xl5fX19gX19gX2FgXV5eXl5gYFxcXmFgXl5jY2JlYmBkZmRgYWJhYWBiYl9gXl9f
-Y2JgY2RiYV5fYGFhYWRgYl9gX2JhYWNjY2JiYmRiX2BgXmBhYWFiYWJfYGFdXV1g
-YF5fYF9gXmJiZF1cXF1bXV9gYWFjZWVlZWBgYWFiX19fX2FgZGFjY2BfXl1dX11g
-X2BgX15dXmJgX2JfX2RjYWBgYF9gYF9hX2BhX2BgXV1fYGJhY2BgYV9fX2BfX2Jj
-YmRnZmFgYWFiZGZlYmNiYWBdXmBhYWNkY2JhY2RjZmdoZmRpaGlnZmVmZ2hqaWds
-amdpbW1tcWxsbGtoaWpoaWxqampsa2doaGhmZWVlamdmZnmvytbf4uTn6Orr6+t3
-enx6e3x9fXx7e3l5fHt4e3l4eHl3d3V2dnZ2dHd5eXx8e3p4d3h5en58e35+e3p6
-e3t7e3l8e3t4e358eHt+fX2Afn98fn9/f3yAgH5+fnx+gH59fHh8e3p8f396e32A
-f3x9fX19foF8fX98fH17fX14enp+eXp7fHt3dnp7fX56d3RxdXBucnF3eHRzcG5v
-cXNwb29wcG5ucnJydnJ2eHV1dHZ4eHd3dXZ1dHh6eXh1ent4eHZzeHl5e3t6eHR0
-dXZ3d3N0d3d0cnN3eHV3eHZ3dnZ1cnN2dnR2dnRydHZ1dXV1dnd2dXd0cnV3eHh3
-dXh3dHV0dXZ2dnZ1dHV1eHd1eXZ5dXR0cW5wcXNycnBvb29xdHV5d3NxcnRzcXJ0
-cXBqb3F1cnJzcHJyc29xcnFycnBxb21yc3Fubm1tb25ub25tbWtsa2lra21tb25t
-a2hoamxpaWdna21oa2hoamxqbW1ubWxubmtqaWlramptbW1ycnJwc3R4gIWFh4iF
-gH+Af4CDhYmFh4qNjY6Sk5OVlpWWlpiXmpiVkIuLi4mEhYeGhYKAfn19fH16eXt5
-d3Z4d3V3enuBgH9+e3l1cm9ua2xraWlsamloZ2htamhpaGhjZmVkZmVlZmdoY2Rn
-aGhmZ2ZlaGZpaWhlZmZnaGpmYmJjaGdkaGdoaWlnZ2lpaGhpbGZlZGJiZGhqaGdp
-Z2prbWllaWppZ2toamZlZWVkZWRkYWFjYmZqZWRjZWdnaGZkZWJkY2BgYmRnZWRl
-YmVjZGhkZWRkZ2lnZ2ZnZmdmamtoZ2hnZ2hnaGlnZWZqaWpnZWZlZGVnZWVoZmVl
-ZGJkZWZiZmdnZ2tpbWxoZ2loa2lnZ2lpZ2toamprZ2dmaWhoamhmZGJjYWNlZGFh
-X2BhYWBgYWBgXl5hYWBfXF9hY2FfXV1aXF5gXl1cW1tfYGBfXmBgXVteYGJhYV9g
-Xl1aXWFgYGFfYWFhZGFeZGZiY2FgX2JfY2FgYGJgYGBdXFxdXF5dXVxbXV1fX11f
-Xl1dXltcYGBgXltcWlpfYGBhY2FmY2BfXV9fYV9iY2BfXmFgYWFiXVtfYGFiX11b
-XV5cWltcXVtdX15dXFxaXV1aWl1eXV5hXV1cW1tZW19gX15bXGFgX15dYWBfXGBi
-YmBgXmBfYmBfXl1cXFtdXmJeX2JhXl1gYF9eXF1fYV1eX2BiYF9eXV1dX15eYWNg
-YGJgX2BiY2RiYmFfZGVjZGRkYmNfYWNjY2RjYmJkZGFfY2FjY2BdYF9hYmNkYWJl
-YV9jYWFjX15hYF9gYF9hX11dXmFiX11fYWBfX19jYV9jXlxfX2JfXV5hYV9hYWNi
-ZmNgXmBhYGFhXV9hYF9fYGFfYGVgYF5fXl5hYl5gXmBgX2JgX2JjYGNgYF9hZWRf
-YF9dXl5gYGJgYWJgYGBfX2BgYl9fX2BgYWRmamZhYmBjZWNgZWRhY19eYmJjYl5g
-ZGJjYmZmZmVhZGZnbW1sZWRlaGloZ2ZqaGpqamtra2hlZ2hrbG1rbGxsaWhqa2dl
-aGVlaGVlZGdofK/L1t/i5efo6urr63t6d3l7fHp7fHx6eXh5fHx7eHh4enh3eHp7
-enp4dnd3enx4dnh3d3p7en19fXt6eXl+fn18fXt6eHx9fHh5en19e3t9fHx7en1+
-f4GBf4F/gIB+f4OAhH98fnt9gIJ6f35+fH5+f359fXt+foCAf3x7eXl6eHt+fX13
-eHt7fHl2eHd2dHVycXRvcnJwcXNzcXFucG5tbW5vcnJ1dHZ1dHV0dXRwcHR0dXV1
-eHd2dnZzdnd0d3h2dXd1d3d2d3t6dnp7d3Vzc3Z1dnl4eXN3eHd3eXh2d3FtcXR1
-eXZ0cnNzdXVzdnZ2eXZ0dHV5eH96dnd3eHd1dHR0dXJxdHV0eHR1dnZ0d3V3d3Vx
-cHFxc3FvcnNvbXFzc3V0cHBtc3N0dHJxcXBycHFycW9ucnFxcnR1c3FydHFvcHNx
-bG1wbWtpa25xb21tb3Jta2hna2tucW1ubWttbmlpamtsa2tra2lpa2tsbGhna2lo
-amtubGtobm1tbG5tb3J4f4GDioiFhIWHiIqIiYmKjIqNjZCTlZiYlZOVl5qanJqX
-kpCPioiEhoWGiYaDf3x8eHp7fH57eH+CgYJ+en19foKDgH55d3Jxbm1qbW5sbXFz
-cGxpbG9tbGxqa2pmZ2doaGllZ2ZnZWZpam1qZ2VmZ2lmZ2lnZ2doZGNmZGRoaWVl
-YmFlZWRmZWhmaGpqaWhnZ2dlZGlsaGJjZmpraWdobGdnaGhnZ2ZmY2dmZWVmZGRn
-ZWZhZGRmZmVlaGZnaGViX11gY2RkZGRlY2dpZWZmZWNiYmRhZ2plYmNnaWdlZGdn
-aGplaGVjZGZnaGZoZWNiYWVlam1qaWdoZmZkY2VmZWdnZ2loZ2hpaGlrampoZ2dp
-a21sbGlnaWpoZmZpamZiY2ZnY2RiZ2NiYFxeX1xcXmJfX1xeX19fYWBjYV5bXl5e
-XV5eXl5dXF5gZmReX15dXV5hYWBfZGRiW1xbXGBfX2FhYmFgYl9cXl9hY2NkY19e
-X2BgYV9dXmBdX15bYF9hX2BdXGBfXl5dX19cW19hY19hYWBfX11cXmBiY2JkX2Jf
-Xl9eXlxbW15dX2FhYF9hYF9fXV5eYF5fXV5dXlteXV1fX2JcWVxgXFlcW1xdW15g
-W1tbW11eXmBcXFtdWFpfYmJhYWVkYF1eYGBgXFpbYGNgXlxdW11gX2FgYGJgXVxg
-YmJgYmRnZ2NiYmBgX15eXmJhYmJiYmJgYmFgXmFkY2JeX11dXV9hZGNgX11dYWJi
-Y2VkY2RjZGJeX2FhYWBdYWBiYmJkZmZjYWFfYWBfYmJiX15hY2FfYF5eXmFgYWFh
-YGBhX19fZGFdXmBdX2BgXl5fYF5gYV9hY2RmZGJhYGFgXWFhX19eXmBdXF9cXl9f
-YWRhY11dXV5fYGJiY2JiYV5fYF9gYmJfXF1fXl1eY2NiYmNiYmRgYGNkYmBfXl1f
-YWFiYWFiY2BgYmFgY2VjY2BiZGVlZ2VnaGZkX2FiZGlnZmtpa2tnZGRmZmZqaGtq
-Z2lraWtqamtramtqamppa2tra2tpZmVnamZiYGRkaGZ8r8nV3uLm5+no6+vreXx8
-enx5enp5eHt8fnt5e3p7enl7enl3d3l6e395enl4d3h5enp4d3p3eH56e3l5ent+
-fn1+f3x9fX97e3l3eXt7fX99fHt4d3yAgICAgoF9gIB/gIB/fnx6fn1/gYKCf3p6
-fH5+f4F/gHx4e39+fnt6eHh4e319fnt6fnx7fHp0c3R1c3N0cXZ0cXR0cnRwcHBw
-cnJycHR0dHJ3d3p2d3V0dHR2c3R1dnp4dnd5d3V0dnZzc3N2d3V1c3Z3dnd5eHl3
-dXR4dnVxc3Z1c3V0c3h3eHh1d3RxdHR0cXJwcnR0dnZzdnh3dnN0dHF0dnh2dnFy
-dnh0dHV0cXFxcHFzc3N0c3VzcnFwcnJxcHB2dXFzcnFya25zcnBub29wcXF0cnVx
-cW9wcW5wbG5wcXBxc3RzcnJ0eHR0cXFxb29zdHBtbG1vbWxxcHBrbHBpa21ydHFu
-amxqbG1tbGlobG1ub3Bubm1saGdrampubm5ramdpamxubW5zfIGHiIyPiomJi42L
-jY+PjpGVk5GRlJqcmZaYlp2ZmJeWlZGMiImHh4eLioiIg3+DgX9/gn+AgIGGhYOD
-goSGhoeHhYKAeXVzcXFra2hqbnJ1dXVwbm5yb3BtbWtrbGhpaGhqaGZpaGpnamlq
-amdnZGVpamhnZGZhYGNiY2JkY2VlaGllZWdmZWVmZ2lpaGZpa2llZWdpaGppaWts
-aWprZGdpa2dkZWZmZ2VmZGRnY2JmYmRlZWJjYWJkY2RmaWZlZ2djYmNnZ2dlZWRr
-ZWRjZ2pkZmdqaWdmZmVkY2RnaGZnZmZoamZoZmRjZGVpaGhnZmdoY2VnY2RiZ2hm
-aGVlZ2hmZWVlaGhnaGZlaWhqbWtqamZqaGtqaWdlZmZnaGZjYmNjYmRkYmRhZGFe
-X2JiXl1fZGJdYWBgYmJiY15dXWBfYF9eX15eXmBcXV5hXllaYmJiYmNfX2JjY19j
-YGBhYWJgX19cYF5hZGFbW1xfY2NlZGNjYV9fX15eX19jY19fXl9eXF1cX2FfXVte
-XVxdX15gYWJlY2NfXmFiYGFkY2BkY2JiXV5eXF9gXWBfX2BiYF5eXFtcW11eX15d
-XV5cX1xaWlpbXFxjYV1bXVxYWVxdX11dW1tdWlteW1xdW1ddYF9fZmFgYF5bWVxf
-Xl5eYF9dXWJfY2FeXF1fYWBfYWRfXVxeYmFgZGJiY2JfYF5cXGBjYGZjYVxfXmBf
-X15eYWNjZGNhYGFhYV9hYGBgYV9fYWNjXl5cXl9gX19gYWBfYF9hYWJgY2FhXl9f
-YmRkXmBjYWBjZmNgXl9hYWFdX19eXV5gYF9jYWBdXmRfX2BeYF9mYmBgYWBeYmRk
-Y2BjYWJhYWBgXl5dYF9hYGNfYF9dYGFhYV1eX11fYF9gYWRhYGBeYGNhYWNkYmJg
-Y2JiY2NhY2ZiYmRkYmRhY2NoZGNeXlxeYF9fYF9hZWJjYmRjZGNhYGBgZGdqZ2Vm
-aGRjY2RiY2RmZWVnZ2llY2JkZ2xpZ2doZ2dqbG1sa2xta2xqa21samxsbmxqbGlp
-amhnY2RoaXqrydXd4eXn6Obo6ul6eHl8enp3d3h4fHx8eHp5enx8eXl7eHl5eXh6
-fHd2e3t5ent6eXZ3eXh5eHl8eXh6e3x8e3t9fYB+f3x6e3x5eXx8gH16enx6eX2A
-gIOCgoKAgYF9fHx/gH99fX1+gYF+eXp7e31+fH59fH5+fn59eXt8eXh6ent+f358
-enl7enZzdXd4eXd2enh4dnZ0dXJwcHF0dHNyc3NxcXF0dXZ4dXN0c3Z0cnR5end0
-eHh3d3d3dnZ2dXJ3eXFxdnh6eXd2eHl3dnZ3eHh3dHNyc3J0d3d2dnV2d3V1dnRx
-cHR0dnRzc3V3dnZ3enRyc3F0eHl5dHF1dXV3d3d2c3V2dnV1c3VzdHR1cHR0c3By
-cnJzcXBxcnFrcHJycHFxcHFyc3NxcnR1dnJvc3N0bmxtb3BxcXB3dnNzdXRycG9t
-cXN0dHFvbnJxaGlucW1ubWtubmxycm5qa2psa2prZ2dra29ub21rbGpramhpZmlo
-amlsa2lsbHBxdH2Dh4mNjo6Nj5CPjIyMkpiXmZeWn6CZmJaVl5uam5iVlZGQjouN
-jYyLjoyFfn6CgYSGh4WCgoOEh4eFh4WIi4uMioeAfX11c3Bvcm9rbHB1dHRtb3Fw
-dnhycG9oaGdnZWhqam1tbWxsa2hmZ2ZmZ2dmZWVmaGdnZWZiYWJmZmRmZ2doaWpm
-Z2ZkYWJlZmllaGlra2hpZ2pqaWprbWpnampoaGlmZmVlZWZqaWZnZmJoZWNiYWNl
-Z2RjZmNkZGRmZGhoamhmZWdoZ2VlaGZnZWppZmZlaGdnZmdmZ2tpZWZoamRnZ2Zn
-ZmhraWVmZmVlaGdpaGZmZ2lqZ2lnZmpmZ2ZkY2dpaGdoampjZmhoZmVnZmdoa2dm
-Z2lqaGhqZ2hmZWVmZmRjYV9gYWNhYGFeX19dYGRkZmhlY2FhXmBjYV9fYGNjYF9c
-XF9gYF9fYWBgYGFhYGNlYl9gXV5fY2VkYl9iYmNiYmJiYmBeYmBdXFxaXWFjYGJi
-Xl9dXGBdXmBiYV1cWl1eWl1eXmBhYF9eX1xbXl1fX2BfYGJjYmFhYWJhX2FiZV9e
-XV9dXV5dYGFhYWFgXFteX2BgYl5eXl9fXVxcXmFaW1xbXV1fYFlYV1lXXF1bXVxb
-WltZVVlZXGBhWlxfYGJhYmNeXV9gYFxaXmFeXFxeXF5hXl5iX15iZGJgYWJkY2Fg
-ZGJfYGBjYV9XW19gX2JiYWNhYF1cXl9hX2FgYWBjYmFfYWBgXl1gYWNjYl9eX2Bf
-YF1fXmNkY2BfXF9jaWRiX19eXl9gYmBiY2JkY2JiZWNhYF9dXWFgYmBfXl5cX2Rj
-X1tgYWJlZWNhYmJgX11gYWJdX15gZGViZmJjYmFhZGJeXmFeYWJiYl9gX2BhX19f
-YGBjXmBjX11gZWRhYmZlY2NkYmFgYWVhY2JiYGFjX2RjY2JiY2RiYWNhYWFfX19f
-YWBbXmFgZGRnZGNhY2dnY2VjY2hpaWloaWdmZWRjY2VnaGVmaGhlZGlsZ2dnaGhq
-amtucG1qZ2lqaWhpaWhqbWxramVoaGlkY2JjYmJofbDK1t7i5ebo6Orr63t4d3d6
-e3p4eHx8enp7eHh5e3l5ent7eXt4eXt9eXR4e3t7e3l4enh5e3l3dnt+enp7fX16
-eX2Af4J+fX9+fX16eXp9fH1+fn58e35+fXuAgoKBgnt8gIGBen1+enl9fYCBfn17
-fH18fHp9gIB9fXp6e3p4ent6eXp6e3hzd3p6eXt3dXR1dnh3eXhzd3Jzc3Nwb3Fy
-dXZ1dG9vc3d3dHR1dXNzc3R4e3h4eXV1eHh4d3Z4d3h4eHNzbHJzdnZ2dnN1dnl5
-eXd7eXd3dXRydHZ2dXZ7eXh3dXRzdXp1dXZ1dHZ3dXNydHV1cnJ3dnl4eHRycHN0
-dXh1cHJyd3p6eXl2c3NydHN6d3Juc3JydHBvcG9wdHRycXF0d3Z0cnN0c3Jub3V1
-dnNwdHNzbnBzc3JzenV1c3d1cXJwcHJub3Jwbmxrb3B0bmtrb29sbm9ta2tsbWtq
-amttbGloamxvbWpta2hpaWprbWprZ2lqbG5vb21wc3d7hIeNjYyMjo6OkZOTl5eZ
-mZ2cmZqXm5uamZicnZ+ZlJOSj5CRkZGPi4qLhH58fYSJh4WIhoCHhoaGhoaJiIuK
-ioiCfnt6d3dzcW9tcXJ1dnRycHJxdnt4c21mZGJgZGZnamxvbGttbGtra2hnZ2lq
-amdmZGNnaWdoaGVhYGRpaGRjZWpkZmZhY2FkZGNkZGRoaWhoZ2doaGlramtpZ2tq
-bXFra2doZ2VjYmNjZGVlZWNlZWJhYWJmZmhlZWVjZWJjYWJkZmZiYWFkZmdmZWdo
-ZmdlZmVlaGpjZGZoZ2ZnZ2ZmZWhnY2JoamdoaGZmaWdmaWhjY2NlZmhrZ2VmZ2Vl
-ZWRkZWlpZmZpamloaGdnaGZnZ2poa2dnZ2RlZmRiZWhoZ2VnZ2RiX1xcX2FfYWJh
-YWJkYmJiY2ZiXlxeXl9hYmJfX15iX1xcYF1fXlxeYWNlYlxcXF9jYF9bXV1fX2Vl
-ZGJlYmFfYGBiYWBgYl5bXFdbXmJfXV5eXF1dW15hXmBhY2FfW1xeXV9dXWBiXl5g
-X19gXV9eW11eXl9hX2FhX2BjZWNgXVxbXV9gX15eXV5gYF1fXVtdX19fXl1cXFxZ
-VlVaXGBeW1pfW1xeWVxeYmJeWlxeYF1aW11dW1xfXl9gXF9gYmNgXGBdXGNjXl9g
-YmZeWl5iYV9kYF5dXF5iZGJgYV5cX2BjYl1fYGBfXVxfXmFhYmNlYmBfXF9dXmFf
-X19hXmBiZGRgYWJgYGFjZmNhXmFiZmhjXVxeXF9gXl9gYGNkY2BfXl9gYGNmYWJk
-YWFjY2NfXmBfYWJgXmBjZGFkYF9fXl9fXV5fYGFhYmFjYmBlYmFhXV5hY2FhYmRi
-Y2NiYGBfYF5fX2ViYmJgYmNgYGFjX2BiYWFgY2JhYF5iY2RoZ2RgYWFhYF9iZGVm
-YGFiY2RkYV9fXl9gY2JhYGRkYGJhYV9cWl9jYl9hYV9fYGBjYmZkYmNmZWhoZ2Vm
-aWRfZGdmZ2hqaGhoZ2dqaWhoZ2hqbGpqamxtbWpqaGVoaWppaGlsa2xqa2hqZ2hn
-YGBkZmZ/scrV3uLl5+jq6urqenl3eHh4eXp4eHd6fHp1dnl2eHh6eHl5eHl4fXt7
-fXZ4enl4d3l9e36Ae3t8e3p3eHZ8fXx7e3x9fn99fH17eXp8fHp6gYF9gH6Afn5/
-hYB+fn+BgH6AgYGEfn1/fnx8gIB8fn5+f357e3x6e398eHt6enp7fH17eHl6fHl3
-enl3e3l0cXRzc3Fzd3d3enBxc3BvcnNzbnBycnN1dnZ3c3J2dnd3eHV3fHh6d3J0
-dnh4eHp4d3l5dXB3eXd0dnd6eHZ3eHt6dnh6d3h2dXZzdHV2eHt5end4d3VwcnFz
-dXZ4dnd1dnJ1c3d5eHZ2d3Z3d3Zzc3Z4dnZzcnNzdXZ2dnV2dHV0d3h2dXRzd3Z0
-cXJyeHV1dXF1dnV5fHl0dXV0cG9tdHd3d3JubW5wc3F1dHR0cnN2d3d1dnFxcHNv
-bm1rbGptcW9raGpucnFua2pubGtub2dmamxqbG9wb21vb2xrbm5pam5xcG1samtq
-bG9weHd8hoyKjY+Nj46OkpeUlpSZnpubnpqdnJ2fm52hoJ+fn5aUk5CRkY6Pk5CN
-i4WCgoaJiIuKiYeIiYaIiYiOj4uGg4KCf3x6enZzc3FwcnFxdnd2b2xvdHV4eXVq
-ZF1XWGFkbW5wcXBsbWxraW5ubm5oamlmZmNlZWVmZGJjYWRnamppZmRlZWdlZmRh
-ZWhlZWFiZGlpaWpmZ2hmZmlpaWloZmlpa2xsbGtraWZpZWVjYmJjX2JlZGFkX2Nk
-aGVmZGNhZGViZGZnamhiY2RlZWZmY2VlZGNkZmVjZmVlaWllZmdnZmNmZ2VnaGRm
-ZWVmaGZnaGVmZmJjZGNjYWNoa2ppZ2ZoZWNlamtqZ2VpampnZWNkZ2lmZmRmY2Vj
-ZmJlY2RjYmRlZmNkZWVkX1xbYGJiZGVlZmRjZGJiY2NkY2FfX19hYmFeXF9eXV5g
-YF9fXltfYGBeYmRgXmBhX11bXV5eXmBhYV5hYWFgYWFiYmNgYmFgX19dXV9iXl1g
-X15eYV9gXl5fZF9cXVxYWFxbXF5fXl1eX19fXl9dX15gXV9jYl9eYF5fX1lbXmBf
-X2BgYGBcXV5fX11cX2FiYFxbWllZXFtfYFxdXmBdWlldXFlYXV5gX19dXl5gYV1a
-Xl5fYGBfXltdYF5dX2BiX19dXV1hYWBiYmFjYWFiYmNjYF5gY11dXmBhYmNgX19e
-YWBgYmJfX19fZF5cXWFhYF9iYmFfX2FfYF9gY2JiY2BfYmJjYmJjZmNiYmFhYWBh
-XV9fXWBfX11eX2FfX2FfYGBlY2NjZGVjYWFiYmFgXl9eXl5fYGBkZGFhYF9cWFtd
-X11fYGFiYmJiYmBjYmJiYWFeYmBiZGRkYWBfYV5fYWNjZWJiXV9eX2RdXV9eXWBg
-YGJhYV5gYWBhYmJlaGZiZGBjYl9eXWBiY2RiYGNgX2BfYGBfX19eXWFiYGFiYGFk
-YmJjZGBiY2JiYGJlZmVkZGNkZWRmZ2RlYWlnamVjZGZpaWtrZ2dnZWdpa2praGhr
-a2dna2hraWVlaGplaWhpZmZlYWJmZmViYWVpaHikw9be4eXn6Onr6ut2c3V1d3d3
-d3d2d3l/eHZ4d3h2dnd4eXp6eXx5e319end4enp5eXp9fX6AfX53fHx4fX58fHx8
-fXx9goB/fX5/gH17e31+fn6AgICAf4B/fnl9fH2AgoB/f4KCg4KEgn58fX2Af3+A
-fn17e3t9fn19eHt+fH56fH59fXx+fH56enl4eHd1c3Z3dHd4d3R2dHNxcHVzcm5v
-cXN1dXV1dXh4d3h4d3l3dXR0eXl3dXFxdXV4eXl7dnV0ent1cXNzdHl2dXZ1d3d1
-eXl7fnt3dXVzcHJ2cnh3dHRzdHVyc3R0c3h1dXRzc3N1dnp4dXd4eHd0cXR1dnd1
-dnZ2eHhzcHN1dXR5d3d3dnF2dnV1cnB0dXN0d3RwcXV1cnd2dXNxcnd4dG9vcnJ0
-cHFxdnFwcnJzdHBvcXJyc3V4dHVxcm5qam1ubmxucnJwa2tpa2xua21rbWxucHBt
-bW9ucHBsaWxsbHBxbnJtbWxtbW5tbG9vcnl+hIyQko2UkpGQkZOVlpeVlp2ipaGc
-n6OlpKSipqain56ampiWkZSVkpOUj4ySjouNjoyMiYmKioyJjJOOjY6Jh4OAgn6B
-fnl4dXR0c3R2c3RzdHR2dXV4e3h0bWdgWFpeZG1ydXZ1dHFvbm9wbW5ua2ppa2ll
-Y2JkY2JnZWRmaGpnaWdnZmVlZ2VnZ2VlZ2Zna2hoaWloaWtpZ2RmZmhpZ2hpaWhr
-aWxrbmpqaGliYV9dYWloYWBgX2FhZmZlaGJkY2Jla2RnZ2ZmY2NjZWZlYmJiZGdo
-ZmdnZ2NlaWdlZ2lramllZGZmamhnY2NjZmNnZ2dlZWZoZGNmZmRjYmdpa2dlZ2lo
-aWloZ2dqaGltamllZmVlZWRlbGdkZGVmZWBiZWJjYmBhYWFhZGNhXV5fXl9hY2Nh
-X2NkYmBhYWFfYV9fYmJhYWFgYWRhX15eXl9eYF9eYF5fXWBdXV1eYF5eYGBfX2Fe
-XV1fYF5gYGFiYWBeXFtjYGBdYF1gX2FhYGNjY2JiYmFiYmNdXFtbW1pYWlpcXl1c
-YmBfXl1dYGJhYGBeXF9eX19iYV5hYGFhYmJgXV9dWV5eX1xcXV9cWVpaXFxdXmBj
-Y2FgYF5dW1hZWlxfYl5eYF9eXFtdWVtdX1tbX19dW1paX2FfYWBfXlxgYGFgXlxg
-Y2VhYGJeYF9gYWJhXl9gYl5jYmJgYGFhYGBgYF5eYGFhX19fXFpbXl9kZGFhYWBg
-Xl9hYWFiYWJkYmNhYGBiZWJeX2FhYWRlY2FeX1xdX15fXl9hYWJiX19fX2JiYmVj
-YWBgY2FhYWFdXV5cXV5gYWFiYV9aXVxeX2FiX15gYmVjY2RmZmRiX2BgYmZkZWJg
-X2FeXWFgX2FgX19gYGFeYV5fXmBgX2BeX2BiYV5eX2BhY2FiZmFhYmRhYF9jYmZi
-YGBgYGBhXl9iYV9gY2RiYmBhYWBhZGRlZWNiYmFlZF9lY2RlZGFiYWFhZGRmZ2Rp
-aWZkaGZjYmlmZ2dmZmdpZ2doaGprbGtpamhqampmZmVnaGpmZmRmZWJkYGJnZGdo
-aGdnd5y91d7i5efn6err64B6e3d4eHl6e3h4eXl6fH59d3Z7eXp5d3l6enh4en96
-d3d5e319f4F/e3p4e3x8f35/e3p8fXx8fn1+f4GCf3+Af359fn19foGAf3+AgoJ+
-fn1/f4CAgHt9g4WEgH5/fn+Eg4GBgICAfoCAfX5/f358e317e32Bf36CfX17enp5
-eHl4enh3dXV2dHN1cnBydXRycHJzdXFwc3ZydHJ0dnh6end2d3d1d3h3eXd1dXN4
-dnZ2eHZ6e3R2c3J0dHZ1d3p4eXh3d3d4eXl3eHh3d3hwc3N1dHV1dHJzc3V1dHR1
-c3Zzc3FxcXFydHNzdnd2dHRyc3F0c3N3eHh3dHJzc3R2dHV4dHR2dXFxcnNyc3Vz
-dHF1cnNwc3VwcnN0cnN3dnZ2cnFvcnV1dXR2e3Fvb3Fycm5ycnBxcXJ0cXFwbW1w
-cG1wbm1qcHJvbnBqa21tamxvbm5wcXFxcW9tcG9ub21ubGxuc3VvcG1vcXF2dHmA
-iIqLkJOUlZCQj5CUk5WYnZ6joqOin6GipaOlpqWkpKOgoJ2YmZaWk5OUj4uOkJGR
-kZKOjI2OjpGRj5STlJGOi4mJhIODg315dnh4dXRzdnl4dXh8eXh4enl4c25oZmBc
-YGZvcnV0dHNzdHNvb3J0dHRxaWhoamlnZWdnZmZlZGloaGhoaGRkZGNnY2NmaWVm
-ZWlnaWhmbGpqbW5paWpqamZmaWpnaGppZ2Roa2doaGVmZGZiZmhlX11gYGFgYmFg
-X19iZWtuZWpoZmVjY2NlYmJjZWNmZ2ZnaGRkZGNlZWNiZ2xsZ2lnZmNlZGdmZmhp
-aGVnZWVkYWJkZWprZWZnaWZqamlmZWZnZmZoY2RoamhoaGdoaGRjZmRlZmNjZGRj
-Y2NiYmJlY2BfXWBhYmJiY2BfXl5gYGFiYmJgY2FhX2BgYmJgYGBhYWNgYWNiXl1f
-XmJiYGFkY2NhYWNfX19dXl5iYGFhZGBfXV1dXVpgY2JhX15bXl9jYWFjX1xdYGJi
-YGFfXWBjYmFjZGRjW1lZV1pbXFtaXV1fXl1fYV9dWVxdXF1fXF1eX2JhYmBeYGBg
-X11fXF1eXVtgYF1eYFxbXl9hXl9fX11eYl1hYV1dXF1dXVpZWVtbWltbW1ldWVha
-Xl5hYGFgYFtcXl9iXFxdXlxdXGBeWFtcXGFkYF9hYF9dW15iYWFfXl1eYGJjYF9g
-YGBfYmFlYWFmY2FgX19iZGJkZGJgYF9eX15fYGBgYWJjYmJiYmBhX2BdYGBiYWFg
-YWNiYFxfY15fX2JiXmBgXl5fYl9eYWRiYWJhY2NjYmFfYGBfXl9iZWVhYV9dXlxe
-XmBhX2FiYmBgYGFiY2FhX2BiY2ZjY19gYGJfXV9eX2BgXWBhYGFhYGFeXV5eYWRh
-YV9gX11fX2NiYmFhXl9gYWFfX2BmY2NmaWJeXF9fYGFgYmFhX19hYF9gX2BgY2Jl
-ZGRkZmNlaGViZWRjYmBfYGVlZmlpZGVkY2VoZGFhY2ZmaGdoZ2dqaWlpa2lma2pp
-aGpqa2dmZ2hpaWlpZ2ViZWpoY2hqbGZkZGd0oMHV3+Hl5+np6uzrf39/fnp+fX15
-eXl5e3t+fXl4ent7ent7eXh6eXt4enp5d3h7enh6en18enx7e316fn98enp6enp6
-e318fn5/f39+fH19fH98e4J+fH19gH9/f39/gH59f4B9fYCBgISDgYKChYN/gX98
-foB+fn+BgH59fH18f35+fnt8eXV1eHZ4eXl3eHd3dHNzc3Fvb25zdXNzcnF0dXRz
-c3Z1c3JydnZ2eXh0dXp2dHNzeHp4eHd1dXV2dnd1dnZ3d3JwcnR1d3h3enp4d3h3
-dXh4dnl3d3Z0dnV2c3Z2d3FtcHJ2dXh1cnFzdHNwcnJycnR1c3FzdXV0dHR1eXd5
-d3V1c3V1c3J3dXN2c3FxcXFucHJzcnJ1c3BxcnJzcnFwcnRxcHFwdXh3dnNzc3Rz
-dnR0dXd0b3JycnJwcnJvcW9zcG9sb3BwcXFxdXRvcHFvb25sbGttbW1rbW9ub3Ny
-cG9sbGtrbG1qbm9ybm5vb29zdXl/hYyPkI6KjpKUko+QlZWWmp+mp6iopqSnpKWq
-pqSln4CdoqSgm5qbm5aTkpCOjI+QlZaVkZCOkpKTl5eWmJKRkJKQj42HhYKDgoN+
-fHp4dnZ2eHh4eXx8eXx4dnJrZmReWl9la251dHN0cnJycnZ1ent+e3JraWtoamlq
-aGZoaGVjZGRjZ2hlZWRlZ2JkZGNmamdnaWVoZ2doaWVma2tsaWZnaWtpaWVmZWZl
-ZWVlZWZkZWVlZmRjYmFgYGFiXl5hYmNiYWFmZGNmZGZjYWJiYGFiYWRkZGhmZ2Zj
-Z2VmZGJjZWdkZmdnaWVmZmZmZmlnaGVlZGJmZWZmZmlmZGdpZ2ZkY2RlZmRlZmdm
-ZGZnaWdqZ2VkZ2lpZmRmZmRlZ2ZlY2RlZmVlY2RhY2VlYGJjYWFiYGFkZGBhYmBi
-Y2JiYV9eYGVmX15fYV9eYmBeXmBfXV1dXGBeXV5gXmFdX19fX19dXGBgZWJgYFxd
-XV5eYF9gXl1fXmFgYGFgYGBiYF5fYGJgYFxcYWJhX2FiYF9hW1lZWlteWl5cW1lc
-XVtfYVxfXF1cXWJhXVxeXmFiZF9fYGBfYF5eYV9cY2BeX11cWVpZWlxdXltdXVhY
-WF1fXl5gW1pbW1pcW1hcXl1fYF9cWVxeYGFhYmBeX15dWlhbXV1fXWBfXVhaWlxh
-YGJgX1xfXV5gXl9fYmFgYWJiYGFjYF9fXmFjXl1eX2JhYGFkZGVkY2RiYGFgXmFg
-YWBhYF5dX2NhYGFiY2JgXl5fY2JfY2RhYWFgXl1eXmFjYV1fYGJhYWBgYWNfYWFh
-X19jZGFiaWdfXl5dYF9gYWJhY2NgYWJjYWJhYWFiYWFgYGFiY2NhYV9gX2FfXl5h
-YmFfXV5fX1xcYGBfXV5eX2FeX2NjYmRiYmJjYWFjYWBiYWFiYmFeXmBfYmJgZmFj
-ZGNfYWJhX19iYWJhY2RhXl1hYmBhY2BjY2NiZGRmY2FiZWRhYWFjYmRnZ2loZWNk
-ZmZnZGRhZGdpaGhqamlqbmxnZ2hlbGtrbGhnaWZpZ2dlZmZkZ2tnaWhpbG1rZmZi
-Z3WfxNbd4ubn6erq6+yAfn17eHh7e3t7fXt5eXp7e358e3t5eHl9e3h7e3l4d3l4
-eXl7eXp5eXt7fX18e3t5enx+fHt4eXt7gX99e31+gH16e35/fn19fnx9fX+Af36C
-gHx9fnt9f399gIGDhISIh4eEgX9+gIN+f4J/f4B+fX19gH9/e318d3p6enZ1dHd3
-end1eHl0eHZ2c3Fwb3J1eHNzcnNzc3V1c3RzcXR1dnV3eHh4e3l4dnd4eXp3dHZ2
-dXRydHh5enl3dXVydXV1eHd4eX56eXV2eXd3d3p0dnR1c3V0dnV2eHVycnV2d3h0
-dHN0c3Jwb3V1dHNydHR2eXd8dnV1dnd3dnVzdHR1dHRyc3Z1c3FxcHJybnBzcXBu
-bW1wcW1wcnRydHZxb3Nzd3d0dHBycXN0dHd0cHBvcHNzdnFtb3FubnFycG1tbW9z
-cnFxb29wbWxoZ2tnbG1sbmtvbGpsbHBxc3JwbWtra29vb2xscHB0d3qBhYuRko+S
-kI2OkZSUkZWZmp2gpKqqqqimp6iqq6qqrqeVkaCkpKSempqalJGPj5GTkpGTk5GR
-kJOWmpmZl5eVlJKSk4+KhYF+foOFgIF8e3p4c3F1dnZ3eHd3c3FsamVhW1hXXmZt
-cnN0c3JydHR2d36AgHt6e3Z1bmtpamhpa2lnZWhmZ2ZkZWdoZWZmYmJlZ2RmZ2pl
-bGZnZWVlaGlnZmdqaWhpaWtqamlpZ2hlZGNjY2ZmZWRkZ2VjYmRlYGBiZGJiY2Rj
-ZWVkZWZkY2JhY2RhYV5eY2RoZmVmZWVpaGZlZWdmZWNjZ2ZnZ2VkZ2hmZ2ZlZmhn
-Z2ViZmZnaGZlZGdnZWNkZWhlYmVlZmRkZmhnZmdmZWNmZmdlZWVmZWdoZmZlZmRk
-ZmVnaWZmZGJgYGJiYWRgYmFjY2FgXl5fX2BgYmBeX2BhYF5fYV5gZGRkYV9eXF5f
-XF9dXWJgXF1dX19fX19eXl9fYGBfX19fYGFhX19iYWBeXV1eYF5hYGFiYWBgYGFe
-YF1fXl1eX19dX2JgXVtdX19fX2FeXVtdX19hX2JdW1tdXVtdW1tcX2FhY2JiYWBb
-XF5dXlxfXl5fXV5dXF1dXl1cXF1bW1pUV1tbXFdXWl5gXl5cXFxcYWFhX15fXV5h
-XGJiYGFeXl5dX11dX19gXV9gXV1eXWBeW19eYGBjZGFgYV9hY2ZjYmFhYGFjYGFd
-XltaXF1cXmJjYWRjZWRgX19fX19hYWRiYF9fYF9fX2JiX19dXmFiY2BgYV9iYWBe
-YV5gW1tdX15dX19gY19dYF5eXl9fYV5eYWBgYl9fXmBhXl1bXl5eX2FiYWFgY2Fj
-X2FkZGRjYmBhXmFkY2VhY15gYF9hYWNlZWJgYV5fX2FfYF5fXltgYmNgX11fXmBi
-Y2RlZmJhZGJjZWJgYWRjYGBiYmVjZGJgYWFhYmFfYGJhYWFkYWJjY2NlZ2RkZGFj
-ZGJjZWRiYWFhZGRlZWNkaGZlZ2dkZGVlZmhoZGNlZmhqZWdpZ2hpa2lqaWVkamxr
-aGxsaWtmZWhnZmdoZmhqaGZkZ2lqZWlpeKbE1d7j5+fp6uvr63p7eXx6ent6e3p5
-enl4eXp+f3p9f3x7eXp9e3t8enl4d3h2eXx8enh3en17eHh5eXx/fn5+fHt8fX5/
-f35/fn18fXx9fX6Af39+e35/f4CCgn5+fX1/f4CAf4GFg4GBhIKBf36Af4CBhIB8
-foCBgIB9en6BfXl7e3x9fnt4eHt3eXp4eHh7eHV3dnVzc3VzcHJ2c3N1cnFzdnp3
-dXd0dHN2dXd3d3Z4d3h1d3h6dnl6eHV0c3N0dXl5d3R1dHR1d3V5end0dnh1dXN1
-c3R1d3R2d3V4dHN0dnh3dHV1dHd2dHZ5eHdyb3BydHN3dHJydXd4dXN1dXh3dHV2
-dXVzdXJ4c3V1dXNzc3NzcnRzcnN0c3RycnBzc21wcHBxdHV4dHN1dHRycXN0dXNy
-cnNwbm9ycXFucnNzdXRvbXFwb29wcHFsb2ppampqbGpqaGprbW5tb21vbW1ub3Br
-bmtrbW5vbXByd3V1eH2AhYqPkZCQj5CQkZSZmpaTmp6gpKaoqKquramnqKqnp6il
-n5ugoaSkoZybk5GNjY+PlJORjo+QlJeWl5yXl5eWl5aVkY6KhH55fH6AfHd3eHR2
-dnd3dnNydXh6dXNxbGlkX1xbWl1gaXF4d3h3eHV2dHN3en1/fn16enV0cWtpam9u
-aWppbGlpamZnaGdmZWRkZmdkY2VnaGtmYmNlZ2hoaGlobmljZGZmamxrb2tqa2ll
-Z2ZmZWdnY2ZlYmVkZGVlZGRiYmNmaWRhX2BgYmFmZGNiZGNiYWNhZmRkaWlqZmZp
-ZmhraWllZmdoZmhqa2loaGZlZGVnaGZkYmFmZmNlY2RjZGZlZGJjZmNmZ2VmZGhp
-ZWdoZGJkZ2VmYmJiaWhmZ2dnZmVkYGJlZmZiZWVjX2BfY2lhYGJiYV5dXF5cW1xe
-YV9iYmRhYGNgX19fX2FiYmVlZGFeX2FiX2JhYF1cYWBhYV5cXV5fXVxcXl5fX11d
-XlpeYmNgXl9gX1tgYGBhYWRhYWNfXmBhYl5aW11eXl9eXV5cXF1eX19eXFxgYmJg
-X19cW15fX2BgYF1eXl9gXV9cXl9eYF9fX15cWl5hYF5fYV9fXV5gYF1gYV5eXVla
-XVpZW1lcXl1dXV5dXl5cXl1eZGRjZV1eX2BhYmJgYGFgX15gX11eXF9gX2FgX2Bi
-Xl9fYWFhZWFdXF5fYmFgYGJgX19fX2BhXl1cXmFfY2JkZGVjYWNgYF5cXWRkY19e
-X2BhYmRiX2FiYmBhYWJfZF9hYF1dXF9eXl9dXV1fX2BgYWBgYGFhYF5eXl9fYGJi
-X19eXV1fYmFhYl9hY19hYWJiXl1eX15hYWJfX2RlY19fYmNiZGRdXV9eXF5gYGNj
-YmZlZGJjZGJhYF5gYWBfYF9bW1xdXV5gYmRgXmBiYWBjZWBeYmNjZWVnZ2RhYGJg
-YF9fYGBgYWFhYmJgYF1iZWRlY2NmZGNgYGBjYWNmY2RjZWVlZGNlZGZmZWZmZWdk
-ZWloamtraGhnZGZmZmhqa2lraWZma2hqam5uamZmaGtnZmRnaWVlZmdma2hnbm59
-scrW3uTl5+rq6erse3l5eXh5eHh7ent6fXp6eXl4enp7fX58eXl3eXx7e3t3dXd7
-fn98en55eHl9eXh+fHt7e31+ent7f39/gH9/fHp7fH18fn5/fn58gH+AgYGCgIB9
-fnt9foB/gYGEgIGAf35+fYCChoWEhIJ/fnx9fX9/fn99e3x7e399f358fH55e3d3
-c3R1c3FydW9ydXRvcnFyc3JvcXV1dXN3dnZ2dnZ4dnV3dnZ3fH16eHZ2dXR2dXd4
-d3N4eXd1eXd0dnV6eXp4eHZ0eHl4e3V3dXNzc3Vyd3d1dHR4dXZ2eHZ1dHZ0dHh6
-d3d3c3BxcnJ4dXV2dXR0d3Z1cnR0dnJ3dHZycnR2dnN0cnR2dXNzdXZ0dHNyc3Ry
-cHFzc3BwbW1xcnh3dHRwcXN0dHZ3c3FxcnNycXJzcXFycHNycnFxcnBwcnJybGlr
-amtqamppbWxrbGtram1vb2xtbnJvb21sb3FwcG9wdHl4eXyAhYqRkpKSkY6QkZWZ
-m5KQk5efoaSmp6akq6qorK+tqaampZ+hoqOio6CclI+Ph4uRkJGUlpWTlJWWl5ya
-nJqVlpSVko6IgX16enl5dnNyb3F2enx8enx9eHd0c3BwbmxqZ2FbXF1hY2p2e3p5
-eHVyb3J2c3R0dXl7f3+AeXZycHN0b2tsbGxramxramJoaGZlY2NmZ2ZnZGRmaGJm
-Z2RmZ2hqb21raGdmZ2dkaGlqa2Voa2poaGhpaGhrZmRgYWNiZWVkZGdlY2JlZGJk
-Y2FkZWJjY2NkYmZmZGRlZGRkZmdmZmZoaWhpaWlramhmaWtqZ2ZmZ2hraGVjZWVl
-ZWRkYmRpY2BgZGZmZmNhY2NjZ2loaWViZWhnaGdlZmRmZmVlY2JkY2VhY2RjZWVo
-Z2VlZWNiYmRkY2RkYmFfXWBgYWFfXV5dXl9fYGBfYF5hXltgX2NiY2RmZWdnZWVg
-X2FeX11gYWBgYGBeXV5fXV1dYGJgXlxcXF9gYV9gY2BfXFtfX2BiYmFgXl5iYF9g
-ZWJhYWFgXVxfXl9iXV5eXlxgW1tfX2BfXV1fYF5dX1xeYGJfX15eX19iYWBgXl5f
-YF1aWl5gXlxgXV5dXF1gXV1cXV1cXF1eWl5aWVpcXWBdYGBhXl5aWV9dXl9eXFtb
-XmBiX2BfYWNeXFxfXmBgXGBgXmZfXV5gX15fX2FhY2FfYWFeX2BiYF9fXlxcYGJh
-Y2VjYl5iZGBiYWFfX2BiY2JkYl9dX1xfXmJgYWJiYGFjZWBgX2BhYl9hYmFfYmZi
-Y2JkYmBiZmRjYWBhYWBeYGNgYGBgYWFgXlxeYGVmYmNlZWNlZWRjY2JhYF9iYGRh
-Xl5gXl1dX2NhYmJiYmFhX11hYmNhY2RkYmRkYmFhYV5eYGFhYGNhYGJfYV5dX2Jh
-YWBgXWBjY2RkYmJhYGJmZGVkY19dX2FjYmNjYl9jZGFjYWNkZV9gYmNiYGRlY2Ng
-YWFmZGRnZGNkZWNjZGVlaGZmZ2lmZ2hoaWhnaWtoZGdnaGlnamhqaGltaWhqamtr
-a2lpZ2dnZWdkY2VrZ2JlZWVjY2dqbXqyzNff4+bo6evn6ut8eHl5eHd3eHd2eHp7
-fnx5eHp6eXt4eXh5eHh6fH19enl6eXl9fnt8fXl3e3x9fHp7fHx9e3l4e3l8fn59
-fHt7e318enl8fHx9enx8f39+foCBgoF/fX5+f4CAf4CDgYCAf32EgoSDhIOCgX98
-fX1/g4SEgYGBfXx7e318fHt6eHd1d3p6dnR1c25sanB0dXJxcXN1cnV0dHR2d3V4
-dnl5dHN1dnh3dnd5eXVycHV5dnZ3eXVzdXh6eHV1c3Z2dXt7eHV3enp3e354eHl2
-c3Fyc3R2dHR0d3N0dnl1eHh0dHVydHh3dXZ4dnJycnR3eHd0d3d0c3V2dnN0c3Z0
-dnVwdHd3dnV2dXV1dXV1dHVxcXN0dXNydHRycm9ubW9yc3R1c3Fzc3d3dHZ0c3Ry
-cnF1cm5ub3N0c3RxdGpubXFwbWtra2tubHJxcG9wb2ttbmlrb3BycG1wbGxtb3Bw
-bW1xc3R5e32AgoaMlJeXlZWWmJaWl5uYl5GUnKCjp6ilpKSmqKaqq6mmpaGlp6am
-o6CcmJKPjY6Oj5GTlpmamZucmJeampycmpmYlI+NjYWAgH59eHNwbXB5eXx+f359
-fX5/gHt6dGxnaGRhXV5eYWVsc3h7fnt2bmttb3Fvbm5yd3uCg4B9enZ8fnl1dHVz
-c3BtbGpnaGhnZGBhZWdoZWhraWVlZmhoaWhsamhmZmpoamloZmlpaWhmZ2hoaWhp
-aWxra2pnY2BhZWVkZWVnZ2NjY2FfYWJiYmRkYGJmZGVmZGdlaWZnZWRkaGZkZGZo
-ZmVkaWhqaGZoaW5tZ2VmaGllZWViYmRlaGVlZmRiYWRkZGRjZWZlY2FlZ2ZmaWlm
-ZmdkZGNjZGZnZWRnZmVnZmNnZWZlZWdmYWNhYmhmZ2RmYmNjYmJfXl5jYGBgX15i
-X2FeYV9fX2FdYmFgX19hYmVjY2JlYV9fXl5iX2BhY19gX2BgX2FfX1tdYl9eX11g
-YWNhY2JiYV5eXGBhYmJhYGBdXl9dX19iYmFiYF5fXVxdXFtbXmBeXltdXFxcW2Rf
-YFxdYl5dXF9gY2BcXV5dXl1eYmBgXl5dXWBbWl1fXVxfXGBfW1xdWVthX19cXFla
-XVtcXV9bXl5dYF9cXWJgYF1cXF5fYV1fX15gXl1dXmBhXV5gXltdXV5dYF5gYWBg
-YGBgX19iYmBeXmJhYmJhXl1cXl1gX2JmZGFfY2BhX2BhXl5fYGFgY2ReXF5eXV1g
-Xl1eYV9dX2FiYmBjYl5gYVxfYmNhYmJjZGJhYWJhZGJgX2BjYGBlZGFfXlxfYF9f
-YV9jYl5eYGNkZGNfXmFhXmBjYmJkY2RhYWJgYGNjYWJkZGNiZGJeYWVlZWZjYmJj
-YmJgYF5fXl1fYmNfYF9fX2JiY2JgX2BgYV9fYGNkY2JlYV1fYGJgX19fYV9eXl5f
-YGJhYV9hYF9fYmRjYl9fYGBgXmJmZmdgXWFkYmVlY2VmZ2VlYmFiYmZoZ2hmZWhp
-a2dnaWlmZmhnaGhoZ2ZmZ2pvcmlsbmtpamloamloZ2JjZ2doZGRoZmJqbGtufLLK
-19/j5ujo6urr63V3enl6enh7eXZ4en18fX58e3x6eXp5eXp7enl6enl7enp7enx7
-fXt7e3t6e3x7fnx5e3x6enp5enx9fH59fHt6fnp8enp7d3t6eXt9gH5+gYCAg4F/
-gIF/goF9fn1+gH9+gIGCg4GEhoKBgoCCgoGDg4B+fX1+gICAfHp6fHx2dnd3e3t3
-dXRwbmxzdXZ1cHFxdHNydHRzcXN0dXN2eXp5eHd5enh4eHV0dXZ4enp5e3h4eXx9
-dnl5dnVzeHd3dXV1dnR3dnh5e3h4enh1dnl2dndyb3BycnJ0cnN1dnVyc3Vzc3J1
-eHp6eXd4eHd7dXR1eHVzdXlzcnFxcXN1enh1cnV2dnV1dXN1dnl0cXJuanF4c3Fw
-dHZ1c3FycXN0dHNxcnFxdHFwc3V0cHN1dHR1dXBucHJxcG9xb29wcHFxbW1wbmxv
-bm9wcXBwa2psbW5wbnFycXBycnBubm9vc3V3eX6BgoaKjI6QkJeXlZudnJqYlJKU
-l5aeoqipp6eop6WkpKakp6eor6yspaCenJmWk5KYmZiZmpueoZ+bnZ2bm52dn5qa
-mpaOjIiJi4WDfHhxbW5wdnp7fX6AgYR+gISEfXZ0a2ZkXV5fYGVrcHh9fHt8e3Bs
-aWxwb25ucXB4fYKEgn5+g4SBfH16eHZ3c2xoZ2VmZWRiYmNlZGZoZ2ZlZGlqZWZp
-ZmppaGpoaGlqZWZnaGdkZ2hmZGZlZmZoaGZoaWllY2JjY2RnZ2VnZmViX2BjY2Rj
-YWJiYWRkYWJkZmRiZGZmZ2RkY2NkZGdqaWxramlpaWlpamhmYGJnaWZna2ZhZ2Rn
-ZmdkYWRlZ2hkZWRlZGRlZGNiZWdoaGdlYmNkYWJlZWNlZGdjZGdnZmdpZmRmZmRl
-Y19hYmVlY2FgYWJgYWBjZWVkY2NiYGFkZGNfXGBeXGFjYWFgXl1fYWRkYmdiYF1f
-X11cXV5eYmNgXV5iX15fYV1eXl5dYWJhYGBiYmRjYGFhXV9gYWNmYWFfXl1dX11g
-YF9fYWFfXV5bWVheXV9fXl5dW2FfXF1eXV5dXF9fYGNhX15cW15dXFtdXl5eXV5f
-XFxdXF1cW1tcXlpdXV1cW11fXWFeWlpaXF5hXl1dXl1eXl9fYmRkY2JgW11dXV1f
-XV9aWVtdYGBfYF9eXWFhYWFcXF5gYGFgYl1gYmJhZV9hX2JiYV5dXVxgYGBiZGJf
-YWJiYmBgXmBhYGBgX1xeX19eXF1hYGBhYF5gX19gYGNlZWRgYF5gXl1hYGNiYmJi
-ZGdmZWJiYWFfXV5eYGNkZWNiYF9gXl9jYl9fXVxcYmRjYGFeX2FjYmNjZGRgYmBg
-XV9hYWJhYGBmZGJiYmNkZWRhZGRhYmRiX2BgYmFgYF5gYl9gYmBgXV5cX2RmZGNh
-X15fX2JkZWZhYGJkYV9cW11hYGJgYWFgYmZmZGFgYmRiYmVjZGRgYmBfYGFjZmJg
-XmNiY2NkZGZmZGNiYGFkYmRoaWhmaGhpaGZoaWlnaWtnaWlpaGlqbG5ua21samln
-amhnZmdlZWVnZ2hqZWdoZ25pam98rsjV3uPl5+jq6+vreHp7fH19fHx7fXt8fH18
-fHx+eHp5eHp6fHt8e315eHh6enl7fHx+fXx7fHl5enx6eHh6d3h7eHl5eXx7e35+
-fX59e399fH19fH5+foB9gIGDgYB9f4GAgYKBfX9/fn9+fH9/gYB+fX6CgX5+f35+
-gIGDf4F+e31+fn56e3t6eXp3enp4enZ2dHZ2dHVwbHB0cHBycnJzcm9vb3J2eHZ1
-dXZ6eHR4eHV4eHd0dnx4dnl3eHt5eXZ1dHt2d3l3d3ZzdHV1c3h4dn97fHl5eXd4
-d3VzdHR1c3R0dHV4dXN1cXJ2c3V1dnV1dHd4dnZ2dXZ2dHRydnd1fHVzdXh1d3Z1
-enl4dXFydHZ4dnV1dnNwcHJsbnRzcnJyc3VzdXNsbXBxcHJ1dHNzdnV0c3RxcnN1
-c3NxcGtrbW9wcXBqbXFsbW9vb29tbWpvcHFzcnFsa21ubXFsbG5wcHFucXJzdXR1
-eX+DgoWHjI6OjY6MlJSYnZ+em52Ul5ebn6SnpKSipKugoKGfo6eoqKuspqWhnJud
-m5ibnqGioaSpp6KgnJycnaKjoqGhmZaQj4yJiYeJh392cG1ubWxwcXd7gIGFg4OD
-fnhyc29vbmppaGpsdHR7fn9/gHhxbWpraW1wb3BxdXl/hIWDgISEgX98fHl6d3l4
-cGxsa2VkaGpqZmVlZWZmY2NjaGtoZGVlZ2lpa2lmaGtpaGpmZWRmZmpoZmNlZmNm
-aGdoZ2tpZGRnZmVnZGVkZWNjZWdiZGhlZGNiYWJiY2VkYmNlZmRnZWVmamdmZmlr
-aWhmZ2hnaWlnZmRlZ2doaWRkZ2ZlZ2RmZ2ZmZ2loZ2hmZWRlZ2dpZmhmZGNhYWJh
-ZGNkY2ZlY2VnZmhlZWRjZGZlZmVlZGJhZmljY2ZlYF9gXF5fYWJfYmRjYWBfYGFi
-YmVjY2VhYmFgZGNiYmFiXmJhaGFgXl5gXV1eYGBfYGZlYF1cXlxcXmFiY19eXl5f
-YGFgXF9hXV9eXV5dXV9jYmJiX19fXV1gYWBgX2FeXV1aW1pdXFlbWl1eXFlZW2Bi
-YV9fXmBfXl5eXl5dXl5dWVldXV5fYF9dX11ZW11dW11dW1pbW11bWVxhXVpaXVdb
-XFxdXl9dXF5dXVxfYmFhX1xfXmBhX2BeYFxgY2BfW19cX2BgYWJeXl1dXF5dYWBg
-YGBfX2BfYGJmYWNhX11cX19kZGFhYGFeYWJkY2FfYGFiX2BeXl5eYGNgXmBhYWBe
-XmBfXV5hYGFjYWJiYGFkX15hYWRkYl9hYWFhYWFjYmFfX2FgYF9fXmFkYV1eYmBh
-YV9eW11fZWZjYmRiX2VjYGBfX2JgX19fYmJhYmNhY2RlZWRlYmFjY2NjY2JiYmBf
-XFteX2BjYF9hYmNkZmdkZ2FcYWZlZWRhX2FgYGBhY2dmY2JiYV9hYWViZWFhYWFg
-ZWVjZGJjYmFhZGJhYWBhYmBiYWNmaGZfYGNiYmJiZGZkZGViYGFgX2NmZ2hnZWVk
-aGdpa2tqZmZoa2hsbGtrb3Bva2tqaWlsbGplZWdnZGloZmRnaGZlaGppbHquyNXd
-4uXo6erq6+t5enx9gH57eHt9e3x4eXt9eXh3eXx6fXp4en96e3x5eXp5d3x7eXp5
-ent5eHt7fHl6enh6eXR3d3l4e3x+fn5/foF/fn18fn17fX5/f39/f35+fX9+f4KB
-goGDgoGCf319foGBfn6Afn59f399fH6CgYF/f3x+f358fHx+enh6eXh5enp5enl2
-dXp8dHJyb3BxcXBwdHJyc3RxcXN3dnR0dnd2d3h4dXh7ent2cXV2dXZ4dnh5dHN3
-enh5eHd3dnh5d3Z1dHN3e3d7eHh4eXd4dndzeHl3dnh2dXZ6d3Rzc3N0dHV2dnV1
-d3h1dHVzdXJ0cnZ2dXJ0cnNydHd2d3RydXZycnJ0cnV0dHZ1dnJzcnNwcXJzcm9v
-cXFwcXBwcnBzcnJwcnd0dnRxcnNzcHRxcXBzcnJva2xva21ubW5ycHFxbGxsbW1w
-cHBucW9vb3JwbnBxcnB1dHZ3eHd3eHp8gIKHioqKi4+TkpKUlJmcoZ6dm5eZm5yi
-op+fnp2gn5ybn6GpqqunqKimo5+fnqKlpqalqKWlq6mlnpmVmJuhoZ+cmpeSj46L
-h4WDg4OBenNsbGpoaHB2f4OCg4ODgnt2e3hzcXZ1dnd2d3t+f4aHgn55b2psbG9u
-cnV2dnZ4fH99foKFhoaDf314dnl6fHh0cm5ra2hpbG9rY2NnZmllZGZnaGpmZmRo
-Z2VnZ2hsamdraWdlZWRkZWdlZGNkZGVmZmRmZmRjZWdkZWVjY2JjZGJkaGdiYmJh
-YmNiYmRjZ2NkYmFjYmJlZ2pmZmtkZmZqaGZoaGpnZmVmZWhmaWpqY2RnaGdnaWdm
-ZmdnZWZjYmJmZ2doaWdqaGdmZGdlY2dmZmdlZWdhZWVlZ2djZmhpZGFjY2JjZWNj
-YmFlYWRjYWJiXmNjX2JgYWFfX19fYWRiYmBhZGRfYWFhXl9fX19eXl5eYWFgXV1h
-X2FgXlxdYGNiYF5gYGBfZGRhXF5dXV5hX15eW1xgX19cXl5cXF1fYmBhYF9fXV5h
-Y2RiZGVhXV5dXltZXFpbXVxdXmFhYF9fYWNeXVxfX11aWlteXV5fXl5cW1peX15e
-X2RhXl9eXV5dW1xcW1leXl1dWllXWV5gW1xeXl5gWlxaWlteX2FiX15eXl5hX2Fg
-YWNkYV5cXFxdX2JiYF5gYV5aX11cXV5hYGNiZGFkZGFhYF5fX19jYV5iYWBgYWFg
-YmRgYWBeXl1cXVxeX19gYWZlX2FiYF9fYWFeXV1eYGJeYWJhX2FkY2NlY2FfX2Bg
-YGFlY2FiZWZlYV9cX11fYGBfX19hYGBiYGJhYWJhYmVkYV5iYF5bXl9iYmBgX15h
-YWNgYWJjYGFhYmJiYFteY2ZjY2FjYWJiX2BiY2FhYGBkY2BiZGZkYmJfYGBjZWFg
-YV5eXl5iZmRkZGFgX2BjYWNhZGNjY2FgZGRlZ2VjYl9hZGNjY2RjX15eYWRlZWRj
-YWNiYmVmY2RlZWNlZ2lnZWRnamdqZWRnZmhoa2llZGZoaWtpampsa25qaWhmZ2pq
-aGhqbGhjZGFlZWZmZmZkamZpf7HK1t3i5ujo6uvr7Hl9fHp7f316eHt6fX55eHh3
-eHd5enl5eXh7fHp8eXl6enx+enp5enl5dnl8eXl5e317e31/e3h5eHt7eHh6en2A
-fICBg4KBfoB/foKBgoF+fX1+goOCfoCBgoaCg4GAf4OCgH9+f4SCgH5/f31/gIGC
-foB9fYB+f359e3t4eHl5fHx3eHZ3d3d7d3Z1eHZzcHBwb3BzdHBzcHJ1c3R3dnZ1
-eHh4enp4eHd4eXV3dHx4e3h4d3d7d3p8fnx3dXV0dHR2dXd2dHV1d3d2dnd6eHd1
-eHV1d3Z3eHd3d3d3dXR0dHV2dXZ2dnZ7d3p3dXV2c3R1dXRycnRxcXJ1e3d6dnR1
-c3VzdHN1c3N0dXd0d3NucHJvcHNxcnVyc3JycnJzcXB0cnJ3c3J0cXNvbHFxb3Bw
-b3R2dG5wcHBrbHJucG9tbXBzdXBqbGtvbm1ucHBvcnJvbnNzc3V3c3V4eHx8gH6D
-hoyNjIqMjpOYlZSXnJubnJ+enJ+foqCgnp6dmpyfn6KlrqusqqinpqWlo6Cfoqak
-pKWlp6WinpmUk5WTl5ialpOMiIqKh4WCg4F+fH15d3Z1cXR3foOCgYSBgH99fXh2
-dnd6eHZ3e32BhYaGg397eHRua2pwc3h6fHp6e3yAgn+Bg4OIiIiCeXN2e3x7d3Zz
-cG5ubG1tbG1saGVpZGVnZmZlaWVkZWNkYmNkZ2ppaGdpaGhoaGZqZ2ZoaGZoZmVl
-ZGRkZWNiZWFhYWNgZWlnZmNmZmNkYWNjY2VnZ2VhYmNnZGRmZWRlYmRlY2VnZWZo
-ZmdqaWlpaWZpaGhqamZramppamhnZ2VmZmRjY2VkYWNkZGZmZ2draGZkZGVoZmhq
-aGhlZ2djY2NhZWZnZ2VkYF9eXmBfYGFhYmJfXmNiZWJeX2JfX2FhYGRfXl9fXmBf
-X2BeXl1iYV9fY19jY2JhZGJeWlxcX19eXF9gX15cXmNiYWFiX2JhYGJfYWFiYV9f
-X1xeXl5eXV5eYmRfYGFfXmBhYWBhYmBiX19fXV1fYWFbWltdWlpbXFteYWBjYFxd
-Xl9cXlxcXFxZW1xdW11fXVxbXl5eYGJiYGBhXlxbXmBcXl5cWllZWFpbV1lbXl1b
-XFxfYl5bW1lcXF1eX19jYF5dXlxbXFxjY2BeXV1aXFteX11eYWBfXGFcW11eX15h
-YGJfXmBfYGFgYGBlX2BgXltgYWFhYWFgYWRhYGJhYWNfXFxbXWBhYGFjYmFgX11c
-YGFfYF5dX2BgYWFfXV5fYmFhYGFgX2FiZGRhYGFhZmVjYVtdYGBhYF9eX15iYmVj
-ZGRhXl5eYGRiYmBjX2BeYWBfX2JhYWRkYWBhYWFhYGNlY2BhYF9gZGFgXmFgYWJe
-YF9fYWJgYGBkZGJjZGRgYWFfYGFhZGFgXmBhY2JjZWJhYWBgYWFhYWJiZGRkZ2Rh
-Z2djYmFiYWJhYV5gX15eXWJhaWNlZ2NiYmNjYmNjYWNlZmdnaGlmZWRmZ2VoZmRn
-aGppaWppaWhpa2dpbWtpamlqaWtqbGlqaWZpamhlZmZoZmJjY2dlZmmCssvV3uHk
-5+jp6uvqe3x6fX59fHt5eXh5fHl3ent5d3d9enh3eHt7eXl7d3l+gIJ6d3Z4eHx4
-eXl2eHd4eHl8e3+Afnt9fHp8e3x8fXx/gH9/f35/fH1+goOGg4B+gIGBhIF/f4J/
-gYGCgYGDhYF/gICAgIGAf36Ag4WBfnuAgYWBf358fXt9gH95enl3dnR4d3Zzd3d6
-dnNvd3VycnN0dXJzcG1wcnF0dXd4dnd9e3x9eXh3dHN0e3p9eHp6eHh2dHZ4eHh6
-enZ0eHl1dXV1dnl1dHVzdHd3dndzcXN3dnV1c3d3eXV1dXR0cnRxc3V1eXZ2dnZ2
-eXR0c3N1d3h1dHR0dHdzdHVxcXRxcnFydnRzdnF1cnFzdnZxcnJxbW5wcW9xcnFw
-cXJ0cHFzcXJzcnJycG5ub3R0cnFubnBwcnRycnBwcXBwdG1ubW5vcnFxcWtoamps
-bm5ubnBxcXBxb3Fyc3Rzdnd7fYCEiIuOjIyLio+Ym52fm5qamJSXm56hoqOinpyc
-nZ2dnZ6jqa6oqKmqqaiopqSloKOkp6enp6SkoKGblZOVkpOSkZKPjIiFh4qOiYmF
-gYODhIKDhIR/gIOEgIKCgX1/gX9+gIF+e3p8eHp9gYOIhoB/dXRwbWtqa3J5fX5+
-fnx+foKDgH+AgoGCgXt1cnl6fX16dXNxc3N0dHFzcWpmZ2hmZ2ZmZ2lmZmdmaWln
-ZmhoamlmZWZnaWlnZ2doa2pnaGZnZmRnY2VnaWxmYV1dXWBkYmFjZGJjZWRhYWJg
-YmJkY2NkYmJiYWZlY2RkZmRgZ2VkZWZqaGhpaGhpamloZ2hoaGpqa21mY2Zpamhk
-Y2JhZGVlZGVnZ2VnaGRnZmZjZWlmZl5kZ2ZmYmJkZWZkZ2NjZWRjY2NjY2dkZ2Ji
-YmFiY2RkZGViYmBjYWRiYV5gX15gYWBhYGBgY19iYV9gYmFlZWNgX11aXFteYV9e
-Xl5hXV9iYWRkYF5fY2BcWl5gYF1gYGBjYF1dXlxcX19gYGFgYFteX2BgXV1fYF1f
-WltfXV1eXFxdX2BdWldaXF1fXl5fXlldX11dXVxgXV1eX19dW1lXW11gXVxgYFxb
-XV5fXVpbXF1bXVxfXGBeW2BgWVtdXmBeXF1aXGBiY19fYGFfX2JcXV1cW1pdXF9e
-XF5fXmFeYGBgYl5dXFxeXl1eYWVjYV9dXmlnY2FgYWBdX15fWl1hX2FgY2RjYWBg
-YF9eYGFiYF5hX11cXWFiYmFfYF5eYWJiY2JgX15jYmBiYmJhYmFgX15fX2NlYV9e
-YGJiYmFiYl9gYmFiY2JgYWJiX2BfX2BhYV5dX15gYmFkY2VjYWJgX2FgXmBgZGVg
-YWNlY2JhYmFkZGJiYF9eXlxcXl9eYGVjX15dX2FkYmJhYGFjZGJhYWFgXl9hYV1f
-YmVhYWJjZWRkYmJgY2JjYGFhZGNjZWJjZWRjY2JiX2BjYWNlXl1dYGFlYWNhY2Jh
-ZGBgYWFkZGRlZGZnZWNkZ2Roa2lqZ2ZnaWdpaWllZGVoa2tramloaWtraGpqaWtq
-ZmZmZ2poaWdlYmZlZmVkaIC0y9Xe4ubo6efo6+t4e3p9e318fHx6fnx5enh4eHp7
-fnp6eXp5eHt6eHp6eHx8enl2eHt5d3h1dXl4eXh1dnx7eXl9f3x8eXp8fHt7foF/
-e3uAg4KCfX+Af4GCgIGDgIGBgH+Agn+Ag4KDhoOCgoCAgYR/goSBfoCBgH6BfICA
-hH99fnyCgYJ/ent3d3Z1d3Z2dHl4eHZ0cW5wcXFxc3Vzb3J0b29ycnJzdnh5fHl3
-eHl4dHR2dnV2enl4dnl3dHV5d3Z0dXh4dnZ5e3pzd3Z1dnt4dXRzdHd2dXRxc3R2
-dnN1dHR2dHRydHFzcnJ3d3h1d3V0dXVzc3NvcHR0eX18gXt3dHR1dXFwcG9wdHJ0
-eHd0c3NzcnJydHl4cXJzc3JzdW5sb3Byb3JxcG9zcnJycnBtbnR0c3RxcnJvb3Ry
-cnB0cnJzdnRucG5tcXBvcHFvb25ubW5ucHBxbm9xcHNxb3FwcHF0eH2DhomOj4yM
-ioqTnJ2fnZ2Zlo+Wl5man5yenZ+dnJuen6CiqaeppqempqisqaagoKWppqqrp6en
-pqCenp2am5aPjpGQjYyKiYiHj5OUlI6NiIyNjpCOjYiHhYSHgICAfoSEf4OAf397
-eXl+goiKiImLioB5dXJubm5yfIKBhoJ/foCAgYB/fX5/fn57dnV0eXt+fXl1dXV7
-fn15c2toamdpZ2dmZWhrZmtoZmdiZGdtbWtpaGloY2RmaGhoZ2VnZmVpaGZmZWZl
-aGlmY2RkXl9dYWJeYF1hX2NjZWVjZGNnYmFiYmVkYWJjY2FiZGNjY2RoZmZmaWlm
-aWllZWVlZmdnZWVoZ2ZmZWVnZGZkZWNgY2VnZWRkYmRkZ2lmZmBgZmhlaW1mXWFi
-Z2VlZGZkZGZmZWFfYWZjZGljYWNkZ2NhYmNiZGNgY2ZiYmBhYGBgYGBiYV9hYWJe
-YGJgX2BgYGRiYmFjYl9gX19eXV1eYWJgYVxeYWBeYmFeYF9eYGBhYV9eX2BjYFxd
-YGNhYmFfXVxbXl9eXVpbXl5fX19eXFxfX15jYV9eXV1aWl1eXV1bXl5bW19eXV1d
-XFlbW19eX2BeX1xdXFhaXFxcWlxdXl1fYWBgYV5eXV5fW11fYF5cXlxbWlxcXVxd
-XF1dXV1gX19cXWBdXFxcXl5dXV1gXlxdX19fYmJhYl9eX19eXV1fYGJfXlxeW11f
-XmNgYGBiXl5cXmBhXmBfYWJgXl5fY2FiZGJeXl9dXV1gX15fYWFeYFxaX2BhYF9g
-YV9dX2BfYmJiYGBgXl5eXl1eYWReYWFgYGFgYF9dYmRhYGJjYF9iZmZjYmVlY2Ji
-YmFjZWViYGJlYWJjY2NiYmFgYWNlYGBhZGRnZWNgY2NhXV5eX19dXF9gYF1cXmFh
-X19hYWFgYGBhXl5fXV1eX11cWl1iXV9jYmJjZWJiX2BjZGZjYmJhYWFgY2ZjY2Ni
-ZGJlZGFfYGFiYWBkZWBfYWRgX2JjZGJhYV9fYGNiY2VmZGVkY2VkZ2ZnaGdmZWVq
-aGdqa2hnZmZlaGlqamlpa2ppaGZoZ2VmaWpoaGpoZ2RlY2lmZGpth7bL1t7j5efq
-6err7Hp4eHd6e3x+e3x5d3d7e3l4eXt9fHx8eXh5dnp6fn98fXp5d3Z8enl5e3t8
-e3d5eXh8enp6fXp7enl9fX17e3x7f39+en2Cf4CAg4CBgIKBg3+Af39/f4OBf4GC
-g4KDgYKAf4KCgH6BgYKBfoCCgn56fX9+f35+f4ODf3x6e3d2eHh3dXN2d3V1c3Jx
-cnRxc3BycG5tcnRwb3JzdXV0c3Z2d3V1dHF2dnZ3eXl4d3l6eXZ2eXVycnh3dnd7
-eHt6eHh3d3p4d3V1cXVzdXZ7eXVzcnJxcnV2dnZ3dHVycXV0c3J0dXZ0d3d2dXV0
-dHVzc3Z3eHx5d3R0d3Z1dHFzcnV0dXRyd3Z1c3FzcG50dnRwcnN1cnRxcHBxcm9v
-b21scHNxc3Jwc3FwcnN0c3FxdnVxc3FxbnBzcnBycXBubm1vb25tbm9xcnBwb29w
-cG5ycHNxcnJyb3BxcXZ5gYuPjI6MiomMlZyfnpqYlJKSlZaZmZqenZyanJ2hnZ6f
-oqWkpqmop6eorKqkoKKoqq6traqoqaSin6GfoKGcmpealpaRj46RkJCQkZSSmZSU
-lpOUk5SPj42LiIaDfH6AgH5+gYSEgHx9gIWLjIuKiIiJhoGBgn9/gYKFhYOFg4eJ
-hYSCf3t9f398dnBxc3l8e3x5eXx6foGCfndxbmpucm1pamtsaWhpaWlnZWpqamxq
-bGpra2tpZ2VlZGRkaGtpZWZkZWRlaWZlY2JhYWBmZmVlYWFjZGBkZmNjZmplZ2tm
-ZGJjZWVgZWViYmRjZGVnYWNlaGhqamlnZmRmYmRkZmZmZmZlaGZjZWhlY2RiY2Fj
-ZGRlZmRjXmJjZGNkY2NiZWZkZ2VjY2ZkYmRjZGVlY2RkZWVlaGRhYmBhYWNiY2Jh
-Y2VhYV5eX19fXl9gX19gXV1gX2FlYGNhYGBhYWFiYWBgX2FiYF1fYWBiYGBgYGBf
-XWFjX19eXl9jYl5eX2FfXmFfYmJeXV5eZGVjYWJlX2BeX2JdWV1eXl9hX1xaWF5g
-XmBfW1tcWlxeXl1fX15eXWFeYGFeXFxeXVteX2FfXl9fXV1hXlhcX2FdWlphZGJh
-YGBeXl5dXlxdW11eW1xbWFlaXF1fXV1eYGBiY11eXmBdXF9iXl5fYWFiYl9cXFhZ
-XF5fYF5gW1xcXF1gYWFgX1tbXFtfYV9fXmJjY2NhX1tbXmFjYV5eW1laXmBeXV9f
-YWRhX15bXl5eXl1fXV9eXVtcYF9eXWBhX2BgYGNfYF5iYGFhYF9gX2BgX2JiX19h
-YWBgYWJkYV9fYWFfY2FhY2FlZmZjY2JjZGNlaWVjYmBhX2BjZGJiYWBjYl9hYGJh
-YWJjX2BjZGBeXV5aXGBgX11eX2BhYGNjY2JhYGBgY2JiXl5gXl5dX2BhYGNlZWNf
-YGNgY2BgYGFhYGBiYV9fYWVhZWViY2NjY2BgY2NjY2NiYGBhXlxeYWRhYmNjZWRi
-Xl9hYmVmZmZjY2dnaGZqaWVlZ2ZnaGhpaWVpa2ppamhnamlpZ2hpa21paGhmZmVl
-aGlmZmVkZ2RkZGNiaWuSs8vV3eLl5+rp6evsfX19enp3dnl8eXl5eXl5fHx8e3p6
-e3h5d3R0eXp8fH57eHl6fHx7fX58fXt6fXt5eHh5en16d3h5enx7fX1+fnl7fIKB
-gIB2fYKEgH9/goJ/f358e3+Cgn+BgYCChIJ/gYKEgoF+fH2AgYGCf4KEgX18gX99
-foCCg358fHx8enp4e3h1cXJydHV0c29yeHZ1d3Fzc3Jxc3N2dHN1dXV3dnV1eHR0
-dnd5ent+e3l4eHd4dHd7eXh8eHd1dnV3eHh4eXh1eXl3d3Z3dXN2dXZ7d3R0c3J1
-dHZ3dnV0dXV0cnVzcnBvcnFzd3d3dG9wc3R1eHd4d3d4dXZ3dHR3dXVzd3t3dHB0
-d3N2d3V1cnR0cXBxcnJ1c3FubG1ubG1vcW1scXFzdXNzcnJxdXNwdXd2dnN0cnRy
-cnFybXJ0cXJubnBxbGprbm9xc3BvdXRwcG5ucHJwcnJzdXZ4e4WLkpeUkIqNkpic
-nZyalJaXlpmXmJedm5uampmdnqGgoqSnpqKnp6Omp6ilo6SlqKerr7SsqqekoqKf
-oqGfnJybnqCdmZiVkpCOjY6RlJiampWVk5GRjpGTkIyMioiKhoWFgoODg4SBfoOI
-iYmNjoqIi5CNioyQkY6JhISDhYOFiIaAfXx5eH2BgX14cXF5foKDf3p7e32DhoR9
-dHBvc3Fwcm5vbW1saGdqbG1ua2tqamppaGlpZ2ZmaGViY2NlZmZoaGVlZmRlZWRk
-Y2RjZGhjY2JjYWJhYmJjZGNlaGtqbGtnZWFhZWZkZGdjYmFoZ2tuZWZnaWlpZ2Vl
-Z2doYmRqZ2RkYGRmY2NiY2ZmZmVpamRlZGNnZWRlZWVlZGhnZ2hnZGZmZmZkZGZn
-ZGRkZGZmZGRnZWVnZGJgYmNhYWRiZWRjZGRgYWJfYGFgX19eYF9hZWNhX2FgYmBh
-YF9gXmBhYmNgY2JiYWFkZGNjYmBjYmRgXV1dW11eYV5gXl1fX15fYGNgYWJhZmNg
-YWBgY2NmYmJhX19hXl9eXmFfXGBhX19fWV5bWVpdWl9gXl1fX11fX15dW1paWlxg
-YWBfXl1fYWBjYF5hZGBgW11ZW15eYF5dXV9gYV9gXmFeXl1bWltZWVhbXl9gXmJi
-YF9cXV9fXF1eX19gXl5hYmBdXV1cWllbXFxeW1laWFxbW11hYF1eXF5jX15bW11f
-X2BeX2JgX19eYF5eXWBdW1pbW1xeX2JgYWJgX2BhXl1cW15hXl9fXV1eXl9dYGBh
-YV9hYmBhYGFhYGFfX2BfY2BeYGJjX2JjYWBiY2JhX2BfYGJhYmNgYGNhYWFjYWFk
-Y2JjY2NhYWFhW1xfX2FmZWNnY2FiY2NjZGNiYWRlZmFfXGFfYmFgXVtfYGNgYGBf
-XV5fX2FfYWBfXWJhY2BgYGFfZWNfXl1dYGBiZGZiYmJhYWBhYGFhZGVfZGZlZWVj
-ZGJjYWVjY2RjYmJfYV1dYmBmY2RpZmVhY2BiYWRjYWFkY2VmZmVlZ2VpZmhpa2tr
-bGhoZ2hoZmZmZmhoZGVpaWlnaWhnZV9hZ2lqaWpmY2JlZWdobIKwyNXd4uTm6enp
-6+t8fX1/fXx/e3p5ent6e3h6fnx8enh6fnt3d3V4eHt8eXZ1eHp6eXx9eHh6e3d5
-enl+fHl8eHh4dnp4ent6e3t6eXx6f31+f3+AfoCCgH5+gX1+fn5+gH5/gICBf4GC
-goKDhoeGg39/fX6Af32Ag39/fHx8f4CAf35/fnl5eX58fXp5dXJ2dnN0dndzb3F1
-d3Z3dXVzdnZzdHZ4dHV3dnd4d3NzdHh3e3x7fnp3eX19enh6fnp5enx4dnd6eXx8
-fHx2eXp4end1d3Z3dHR3dnd2dnV1d3p0dXZ0c3R2dXh4eHZwcW9vc3Z3d3l1dHR0
-eHd1fXh3cnZ2c3Z3dnZ5e3l4d3h3dnR2dnRzdnNzcnJwbnB0dHVyb2tubXF0b25u
-b3FvcW9wcHRycXF0dG5udXh4eHR0dXFycG1vcnNwcHFvcXJwbW5ub2xvb29xcnJx
-bm9vb29vcHN4fIOHjpCQj4aGiZKZnJiWlJKUlZSTk5mZmpiXk5SZmJ2foJ+fo6Gj
-paWkpqioop+eoKOlpqWrq6mmpaelpKShoaGfnZ+dn56cm5aTlpaTlJWUlZaTj42K
-hoSJjIyNioqPlZOTkY+Qh4SEhYSFhYKEhoaIiYqHiYqMj5OUk4yIg4WEg3+Df319
-gH+DgH14d29vcXuChH97enp7gIF/endyc3J0dnZucG5qbGlnamlrb3NtaWdmaGlm
-ZWZkZmNlYmZlZGZmaGdkZGhkaGVkY2ZpaWdkZGNmY2JkYGFkYWJjY2dnZmhnZmNi
-YWJkY2NjYGRkY2NpZ2hkY2VlZGZlaGhpZmVnZWVlZ2JgZWZlZmRiZGdoaGxpZmdk
-ZGZmamlnYmFnZWlqZmdnZ2dnZGVlZ2NkYWNlZGVlZWVjZmRkZWNjZGVkZGRmZ2Jg
-YGBnYWNjZWJeXF1dXmBdYGJgYWBgYmRlX19iYGFiYmBhYGBeX2RkYGFiYWFhYmBf
-XV5dXFxbXVxfYGBfYGBgYGFdYGFiYGFhX15gX2FfX2BgYWFhYl9dXmBeYmJaXF1b
-XFxbWVhbXFxbW1tcXV9fX1xcXl9hX1xfYF1cXWFiY2ViZWFeYV9cXmBeWltdXmFf
-Xl5cXV1bXF5cXFtbXFxbW1xgYV1gYGFeXV9dXF1eXFxdXFxbW1xeXF1bW11bXV9d
-XlxaWlpeXlpbXV1bXF9eX2BcXFtcXmBcX2JiXl9eYmNiYmBdW1tfYV1eX15dX19i
-YWBhZmFbWFtdXFteYWBhXVteX19fX2FfYF9iYWBhYWFhYmBcXl9fYGBhYWFfYF9i
-ZGNgYl9gZGRhYGFhYWFhYl5dX2FgYWJiYV9eXWBjZWNfXl9iYmJkZmVkY2FfYWFh
-YGNgX2FhZGJiY2BiYl9fXWFiXV5fYGFhYF5gYF1eXV5iYGBfYl9hYWBiYWFfXltd
-YWJgYmFiYmJkZGRmZmRkZ2RjY2NjYmRkZWRmaGFgYV5iZWZjZGJgX2JjYWBjZ2Vi
-YmNiYmRkZGNlZWVmZWdnZmdmaWhqaWtubGtqaGtraGdoZmlpaWlsa2hmaWlnZmVk
-Y2ZoaWhmZmNnZmVpe67I1t7i5Ofp6urr7Ht8fXx9fHx8fX9+f31+e3l6eXh6eXt9
-enx7fHx4eXh2eXp5eHl5eXh5eHd7eHd6e3x7eXh3dnh5eHd6e4B7eXh9foV/f3x5
-fICBgYGBgH5+gYB9foKAf4GBg4KEf36BgoCBg4GCgoKBgXt8f4GCfX58eX6AgH19
-fXp7fH18fHx+fnp3eXd4eHRzc3F2cXJ0dXV0dnNydnl3enh5eHh5d3d2d3R3eXp4
-dXh6enV0dnh2eHd4eXt3dXRzcnR4fH55d3d0eHd3dXV1eHl6enh2e3l1d3d2d3dz
-dXV2dHV2eXZ3eXh0c3d3dnd4d3Z0dHN5d3d5dnR7d3p7fHh2dnd1dXV3fHp6d3h8
-enh1eXZzc3Jxcm9xc3FwcXd5cnBvbHRzbnFqbHBvc3Z0cnJzcnJ0dXd6d3R0cm1t
-cHZycXBwbnBxcW5ua29ua2xwcXRycnFuc3Nwb25yen2EjJCOi4WIioqRmZuZlZiU
-ko6QjpGWmJeUkpCQlJWYmZian56eoqKnqKiqqKKin6GlpqaioKKkoaOlpaOhoqCe
-oKGhoqGio6CfnJuZm5mYlJKPjo2IhoSGhISEhoaEhYyQlZWTlI6MiYWEh4eFg4KD
-hISFiIuNioyOkJKRjoiIiYeBe3uAgoKEhod+dnJrbXR3dnl4enl6en6Afnh1cXBw
-c3Nwc3Vzb2ppaWVoaWpwbW5qaG1vaWdlZmllYmVlZGRmYmRoaWZmaWtnZWdmZmdp
-Z2ZnZmRiYWRkZGBlZWNjZmRiYWFiYmFhYWVkZGRnZGZoaGdmZmhoZWlkZ2RmZWJj
-ZGdmYmRnaWVgYWRjZ2ZjZmRjZmRhYWBkZGNnaGVka2loZ2ZlZmJfZWNiYmJiY2Ni
-Y2ZoaGNkZGRjY2VnZmRkZmRlZWJkY2JkZmhkZGZmZWBcX15fXmNkZGJgYmJiY2Ji
-YWJiYV9hYV9hYmNeXWFnYmFeXl5fZmViXmFfXVxeXl5fXl9eXV1eYGNeYWBkY19j
-Y2FkYWBfXlxcYV9dXF5bXmFgXV1aXF1bXVtcYFtaXFhbXV1bW1laX19fYmFkY15a
-WFddXltdX19cXlpdYF1dXmBfXl5eXFxaW15dXF9fXVtaW1taW15eXVxfYV9fXl5e
-XVxeYF9dXF1hZmBdXV1fYV5cXV5eXWBgX1xbXF9eXF5hX1xfXV9hY2FdXl9fYGBg
-X19eXV5jZWJiYFxcXWFdX19cYGNdYGJgZGRhYF5fW1tbXFxcX2RgWFtcXlxcXmJh
-XVxdYGFgX11dYGNjY2BgYV5eYGFjYWFhYV9hY2RiYWFiY2BhXV9fYF9dX2FhX19i
-Y2NfYGBkZ2ZkXWJiYmZkYWNlYl9fYWBfYF9eYGFfYF5fX2BgYGJmYmRiX15iYmRn
-YWBiYF9gYmNnYl9gYGFgZF9gX2BhXmBhYmRiYWFfYGJlZmdoZWNhZ2RjYmRhYWVh
-ZWVnZWNiZ2NlZGFjZGZjYWRmYmJkZmVjX2FjYmVnaGRiYmZnZWVjZmhoZ2lsamxs
-bGtsaGpqZ2NmZWZlZmhsZ2ZnZ2dkZGZjZGZmaGpnZ2pmYmmBrsrV3uLl5+nq6+vr
-fn57e3h5en18e3p5fH2Af3l4e3h3d3h7e31/fHt8eX9+fHp6fXp4dnp5d3l3eHd4
-eXl8d3l4dnx7e3p9fHx+e31+f4CAfnx8f4KDhYCBgYCAgYGBhYKAgYSCgYKBfYCB
-g4N9hYCAf4KBgICAgYR9fHx9eHx8foB8fX5/fnx7fYF+fHp6eX16dXNxcnNzc3d3
-dXVzdHN0dXd2dHd5dXh4eHd6eXh4d3p9enl2c3R0dnV3dXR2eXd1dXNwdHl7e398
-fnd1dnd1eHh4eXt4eHZ4dXl3dXR1eHh3dXNzdXh4d3VzdHh4eXh3d3d5dXVzdnh4
-d3h4enl1d3p3dHR3dnZ1cnN2dnd3dnl3eHl3eHZ3dXFvcXNyc3VwcG9wb29ubnFy
-bm9scHFyc3Fxbm5zcXJwcG9zcXBvdHFva3BydHNwcHBvbWxrbHBwbnBvcHFwcnJu
-b3FzdXp9hIiLiYmGhIWSmJybmZKUmZWVjI2SlJWRj4+QkI2PkpicmZqcnaCjpqau
-q6Sen6Kkp6WmoJycoJycnqOlpaWjoaGho6SprKmln52ZmJmZmJaTkpCNj5CRjI2K
-hYeEhIOGjo+XnJeTk5CJiouMiYaEg4SIi42OjIqLiIuOkJKMjIuKg4KBgX6BhIWG
-hH12c3d6e3h0cHF1e3p8f396eHh2cG5xdX16eHdzbWZlaGZoa2lpaWlrbWxoaWdp
-aGZoaGppZmdmZGZjYmFjZGNiYWRiYWBhYmVnY2FjaWllZGZhYmdkZmVkY2JiYmVj
-Y2NkZmVoZ2ZnZWVqZGJiYWlqZ2FjZmhqamhqamtqZmNmZWRnaGZmZmRkZWhlZGZj
-YWNjZWZoZmVjY2RlZWdlY2FiZGJiZGRmYmVnZ2ZmYWNjZGNkYmRiYGVkY2NnZ2Vi
-Y2ZlY2RjYGBgYGBhZGJhY2BiYmNiZGJiX2FgYV9eXF9eYWJcX2JgX15cX2FhY2Rl
-YF5eXF1eXVxbXF9gXl1gYWBgX2FhYWBeX19hY2BeW2BiZGBdXmBdXl1iX11aWlpa
-XFxaW1pZWVteXV1bXl1eX11eX2BeXVtaXGJfYF5eWllYWlxdXVpbXF1aWl5eXFtb
-W15dXlxcW1haXFtbXVtaWlxaXF5eX2BeX15fXV1bXV5gZGBbXWFgXl1ZXFxcXl9f
-XV5eX2BfWlxeXl1fYmFgYmJiYF5cXl5eX2JhZGFgY2FeXlxdYV9dXl9gX2JfYmBg
-XFtdXFxdXVlZXF9eXl9eXlpeXV9fYGJfXF1eXl9fXl5fYWFgXmFgYmFgYWJhYF1e
-Y2FhYmFgYF9hYmJiX19dYF1dYGFfYF9jZmFgYmNkZWZiYWJiYV5eXF9kYV5eXl5g
-XWFgYmJlY19dXl5fYWNiYmFhYmNmZGRjYWJlZWRjYmRkYmJgXGBjYWFkZmVhYF5f
-YGJfX2BfYmVmZGNjYmNiYmNiYmRiZWJiYWViZWFiY2BhYV5hYGBiYWJgY2NhY2Vl
-ZGNkZGdmZGVqaWdlZWhoZmdnaGZnamtqamxqamhkZGRpZ2lpZ2loZmhoaGlmZGNi
-Y2dqZWlpZ2RnZnisytbe4uXn6Orq6+t7fHt7e3t9fHl5e3l6enx9enh5e3l6end6
-fnx7e3t5eHl5fXx/fXp2dXd8eHh2dnV0d3d5en57gH19e3t9fH58fXx+goF9gn5+
-fX+AgYGCg4WFhISEg4CAgH+AgYGAgoSAfn9/hIOBfoCBgYCAhIB+f356fH1/gYGA
-gIF8fnl7fXx8fn99fnp3d3h2dnRyd3R1c3R1dHJ1dXV0cnV1dHJ2dnl6eXd8fH17
-eHd2ent5dnV2dXl5eXh2dnd5eHp5d3t6e3t5dnFzd3l2d3d6eXZ2dXd4d3Z2dnVy
-cnV1d3Z3dHF0d3d0enp5eHpzd3h3dnp2c3R1d3h5d3l6enZ6e3d1dnR3enJ0dXR4
-dXNzc3R3d3p0c3FwbnBtcHBtb25scHNydXVxbm5vb3BxcHRycG5ubmxsbm9zcHFv
-b3BxcnFxbW5wbm9vbm9ubW5rbG1vbW5vcXV+gYaMi4uIh4iLj5eampuXnpuVl5aU
-lZKQjImOj5eVk5SZmZmfnJyhpKGkpaempKemoqKkpaCfnp+foJ+enKCkpqOjpKWj
-oaSlpqajoJ2cmpqYmZqbm5eUl5aVj46OkI2Ljo+Pk5eXmJWTj46KioyHgnt8gImO
-joeHi42JiIaFhIiJiomJi4aAfoGFhYeDgH1+fn96d3V1d3d1en+AgHl3dHJ0cG10
-eXp5eHVyc3BtbWloZmZnaWtqaGtraWdqa2lqaGZlZmhjY2NiYWJiYGNiYGJiX19l
-ZGNhY2ZnY2VnYWNmZWNmZWNhYmVlYmRlYmVmZ2lnaWhlY2RlY2dnZ2hnZGRnZWln
-a2tqampraGdpa2pkYmVmZWNjZGJhZmdkYmJhZGVhZGNiYmNmaGVkZmdjYWRmZ2Ji
-Y2JkZWNnZGNhYmNiZWRhY2VlZGRmZmRlZ2JhYWJjY2NnZWRhYmNhZGBgY2RjYWBg
-YV9gYFxfXV1eXF9gYGBhYV9eYGFjZWFhYF1dXl9eXl5fYF9fX19fYF9jYmFfX2Bb
-W11eXV9gYWJfYGJhX15fYWJhYl5bWltcWVhZWltbWllbX11cW1paWltdXVxbXFtc
-XV5fXlpcW1pZW1lYVlhcXGBeW1tcXVxaXl5cXFtcX1tZXV1eW1hZV1pbXV1fXVxd
-XW1dXl1cW1leZ2JeXl5fYV5bXV1jYV9hXVtcXlxdW15fXVxfYF9gYWJgX15dXl9e
-X2JfX2FfX2FgYWFfYV1dXFtdX2BfXV5eW1ldX2JgY19eXF9eY2NfXF1eYF9gX11d
-Xl5fXVteYGFiYl1eXV9gXmBfXl5gX15cXV5fYmJeX15gYGJeYWBgX15iYGJhYWFi
-aGFeX2JgYWNiZGRhX15fYWRhYmBfX2BgY2FgZGViY2FiYGFhY2NkZGJgYmFjYmRj
-Y2NgX2FeYmVjZF9iYGJkZWRkZGRfXWBhYF9dY2JhYmFjYWJhX15gY2FfX19gYmRk
-YmFhY19eX2NjZGJhYF9jYWFlYWRjYmRkZGJiYmFiZmZmZmhoZWdnZ2dnZ2dpaGho
-aWpnZ2lnZmVma2tqaWpsaWpoaWhoZmRiZWhoZWRjY2dpeq7J1d3i5efp6err6n57
-e3p7fHt+fXp5eXl7ent9e3p9fnt6enh3e359eXx7eXt8e3t7eHd2eHl4dnR1eHd1
-eHd6ent8e3x/f3x6foF+f4CCfoB9f358e3x+f4CAg4aFhoaEgHx9f35/h4SFhYSA
-f3+BgYCBhH9/f36BgH2Afn+CgICAgoGBgH+Bgn19fHyAf3x5d3t4eXl4d3Z0dXV1
-dHV3dHBvdHZ0c3Nwb3J2eHd5dXV3d3d4e3t4dHh4d3Z2eHt8enh2eHd1dHV4fn17
-f3p4enh3d3ZzdXR2enl4dnV3dXR0cXN2eHd0dnh1dnh2d3V1dHR2eHt5fnh2dnl2
-dnd4d3d6fHx9eXt6end1eHNxYm11c3ZzdHJvc3d4dnNxcG5vbm5ubm9xcXNxcXJx
-cHFvbnF2cXNxcm9wcHBxcXFxcXBvcnN0cW9vcnBvb3Fvbmxta29vbWtqa2lvcXJ2
-fYKKi4yLiYiKjI+UmJqZlZaZmJeVmpOSjomIiYuMkJacmpeSlZ2dnqCmpKOjpaSl
-o6Ofo6SgnJudnqCgnJqcnqChpKempaShoaKlpqKioKGho6OkoJyYl5aYmJeVlpSX
-lJCQj4uJhIWJh4WIiISFgoF9e3t8foGEgYOLjIiHg4OFh4WCgoKDfH1+foGBgICD
-gH16d3V8f4B9eXZ5e3t6eXt3cW9tbnB0eHt8fX58eXh0cWxra21saGloaWpqZ2pp
-aGhpaWdoaGdgZGVlY2NnZmNiY2NkZ2dmYmNjY2RlYl5dYmNiYWRjY2NkZWhsZmRl
-ZGZkZWRkZGVlZWdmZ2ZoZmhlY2Zpa2hlZ2lmZWdpaWppaGZlZmdjZmZkX2JmaWhi
-YWRjY2VmY2NiY2FiZGVmZGVhZGpraGRhZGZnZGRnYWFhY2RpYWFmZGFlZWJjZGFi
-YmFgZGNiYmJgYmFhYWJkYmJjYmFiYF9gX1pZXWJgX15eXV1cXV1hZF9gYGNlZmRk
-YWFgXl9hXl5eYGFfYF9hY2BeYWBfaGtcX1xeYGBcXl5fYWFiYV9gXl9eYFtcW1lZ
-V1paXF1dW1pdYF5fXl1cWVpXWVxcW1xbXFxcXV9hYl9cV1paWl1fX15bXl1eXl1e
-XF1cXF1gYGBcXV5cV1hYWV5aW1xaW11eXl1cXF5dWldfanBiX2FgYV9aXV1cXVxe
-X15dXFxbXF5cW19iYmJfX11eXV9fYF5dX15gYmFgYV9eY2JeXltbXVxcXV1eXl5e
-Xl9iYmNiY15eXV9iX15eX19fYF5cX15eX15eXWBfYF9gX2BhX2BjYGBiX19fXF5g
-X2NhYGJiZGNiYWJfYWJdXF9iX19fX2NjZGRiYmJgZWFgZGRiYmRjY2JjYmJhYGJk
-aWRhY2RjZmNjYWFiZGRhX19gYWFhYmNlYmBfYWBgY2FiYWRgY2NhYGBiZWJkYWBh
-Y2NeYGFgYWJhX19gYmBfXWFeYGJhYmNjZGJkZGNgYWJjYGFhYWBgXl9iYGJiY2Nj
-ZGRiYWFlaGpnaGVjZWVmaGZoamlqaWtoZ2ZlZWptamloam1sa2lpZWZnaGVoZ2Zp
-aGZoZ2hlZmV0q8fU3uLm5+np6uvqenl5e356fHt+gHt8end2enh5eXx6e3x9enh3
-enx/fXt5ent5d3l5eXx5eXl6d3Z9e3h2eHx9fXt4enx/e3t6fX1+f36BhoF/fX2B
-gYCAgIB9f4J/g4GHhX5/gIGEhoOBgYCBg36AhICBhYGAf39+fn+BgYKBfn6CgYOA
-gH5/fn59fX19eHh5eXd4dnZ3dXJyc3R1dHR1cXZ0c25tbXNyc3V3dnV0dHR1dXl9
-fXl7enx4d3d4eXx6e3h8e3d2fHp5ent7dXRzc3JydXV0dnV2d3l3dXV2dHVydXR2
-c3B0d3d4dnN1dXRycnJ1dnN4d3Z2eHR0dHZ3dHd7dnd4eXt5eXZ1dnNxcXNyc3J1
-dHJxdHRyc3Fwb29xcW9wcHFxcXFxcW5xdXFxcHJ2dnNxbnB1dW9zeXN0dHh0dHBu
-cG9tcG5vbWxucHJycHFpaWxua3B0eX6Bh4uLh4WFiomNkZaVlpebmJeWkZSOjYuF
-hYOGio6QlJiXl5aWl5udoKSkoqCgn52enJ+kpJ2XlJeXl5iYmZean6Gfo6WmpaCh
-oaKmqKemoqOhqaehnpycm5qamJSVk5eXk5GKhYCChYeIjYqHhoiDfHt6e3l3d3p8
-foKIjoqMiYaGiImIh4SDgHp6fHx+e35+dnZ2en+Dg4J/fn2AfXl1cG1qa3B2dXl7
-f4KBgn99fnZwb3Bta2psbW9saGlrZmVnZ2doZ2ZnaWRjY2NlZmZmZWVlZGZlYmVl
-ZGRiYmVmZGNiYmNkZWFhYGNiaGZoaWdmZ2hoZ2lnZ2VjZmZoZmRkZWdpaWZmZ2Rk
-aWdmZWZlZGNkZGViY2VmZmNkZGNkZGJkZ2RjZGVjZGZiYGNkYmJjZWNkZ2hoZGRm
-aGVkZWRiYmFhYmBkY2ljYWFjZGNjYmBhYGJhZGRhYWNkYmJgX19fX19kYWFiXl9c
-W11fYl9fYWFjYV9dYGBiZV9gY2JhYGFkYmBkYmNiYV5fXl5gX2FgYGFfYF5hZWBg
-YGBfYGBeYGFjY2BgXl1cXVtcWlxaWltaW1tbWlxdW1xfYGBgXVxbWVdcXl9cXFlb
-W1peX2BgXV1bXV1dYGJfXF5eXV9eX15eXV1aXl5dXl1cW1peW1pZWlxdWllZXF5f
-X1xeXl1eX1xbaWZkXVxcX19dX19gYmBcW1tfYF9cXVxbXVtcXl1eX2FgXl1dXF1f
-X19gW15gYGJdXV9eXlxcXl5dW1taXWBgY2FiXl1fXl5cXFtdX15jXGBhYGFfXl9f
-X2FfXV9hX19gZGFhXmFgYGBhYGBjYGNhYmBfYWJhX2BdYF5fYGZmYmBhYF9fYGFh
-Y2JiYmVjZWNhZWVkZWRmZGBgXmRlZF9hXl9fYWNkZWRjZGVgYmJiYWNgX19hYmJg
-YV9hYV5fX15kY2JgX15fYmNjZGNeXl9jYGFgYl5dX2FhYmJhYGBeWmFkYmNjX2Fh
-ZGZkYmBjYmJhYmJgYF5eXF9hX2BjY2JjYWBjY2NlY2RhYmRkZmdnaGhoZmlpamdo
-aGdnZ2tuamhraWxsamloZmVqaWdmaWZmZWRnaWhqbHiryNXd4eXm6Onp6up8fHt8
-e3x9e319fXt8eXh7fXp7en17fXx7eXV4fH99f358e3t3enp3d3h4eHl4dnh8eXh5
-ent6fn1+fHt6gIB7fn97foB+gn99gH9+fn+Ag4OAf4ODg4OBgICDgoOAfH+ChIF/
-gYKBgoGCgH+Af4B/foGCgoSDf39/f3x8fHx+fX17e3l5dXh5eHV0dnV0cXJxcnh1
-cXZ4d3ZycHJ0dXh3dnd4eXdydHZ4ent9e317eXp9gHx3eXp5eX59d3h5d3V2eHp3
-dXV4eXJ3eXh3eXV2dHZ4eHh6dnV2dXR0dXJ3dXZ0cnR4d3hzcnV2dnd4end4dnR1
-dnZ4dnd4eXZ0dHR0dXV5dnJ1dHVxc3d2enZ0c3R0c3N2cnFzcXFycnNzdHFxcnR1
-dHRzcXFwcXJucHJycHB1c3d1dHRxcG9wb25vcG9tcHJwcHFvbWprbGtvdXp+g4eK
-iIOFhYeIi4+UlJWUk5aWkY2Mh4WBgHyBhoqNkJKVmpiVmZmboaCgpKakoJycoJ+d
-nqGemZaVkJCRkZGVlJOXnpufoJ+enqGhpqmsqKalpqSnpqOfm5mYl5aUk5OSk5CO
-i4aFg4qLiouMh4aJhYd/eXl6enVycHB2e4OKjZCTj4yOjZCRjY2Ff3t5ent7dnZz
-dnqAiIqIiYmIg4aFfXdxcG1yeHx/goKAgH+BfXp5cG1rbWpoaWxra2hmZWdmZ2Rn
-ZmNlY2NjZmVlbGZkZ2VjZmVoZGRhZGZoZmlnZmVnaF9mZWZkZ2ZjZGRkZ2ppYmRl
-ZmpmZGJjZGRlZ2VkZGVoaWtpZmRkZGVoamlmaGpoZ2hoZGJiYmRmamhjYGFjY2Fi
-ZWJla2doZmNhYWNhYmVlZWVhZGZmZmRkY2NjZGRmY2VjY2ZqZmBiYmFjYF5hYWFi
-YmNjYmJfX2JhX2FgXVxbXmBiYF9fX1tbXmBeX19gZGRhXmBiY2FfXlxfX2BiYWJk
-YWBfW1tbX11bYWBhYF1dYF1dXVxhYGJgXF1fYmFfXV5fXV5dW1paXFpfXF1bWlta
-W1paW11cWFtdYF5eXV1cXFxcV1hbXFpaXlxeX15dW1tdXV5fX19dXllcXVtaWlxe
-Xl9eWlxcXVxcXFxbXVtbW11bXFxbXl5dYF1eXGJgYWBcXF5dXF5eX11eXV1fYWFf
-X19iX1xcXVtcX1xcXF1eYWBgXWBdXV5eXVxfXl5gYGBfYF5eXVpbXFtbXFxdYF9f
-Xl1cXF9gXl9eYVxfYmFhX2FhXl1cXV9gYmNgX2BiZGJjX11eXV9hYmJkYGNiYF5e
-YGBgYF5dYGJfYWJfXV1dXl9hYGFiYWJiZWJfYGBiYWJkZWZmY2RmZWVmYGJjZ2Rj
-YmNfYmNhYWJiXlxeYWRhXl5gX2BhX19hYWBfXl5gYmdiZGJiYV9gX2BhYmJgX2Fp
-YmBgXV9fZGVjaGVkaGVkZWdlZWJkYWFjZGNiXV5iZGNgYV9eX2BiYGFhZGFjY2Nj
-Y2RiY2NjY2VnZWdoaGhqaGlqaWdlampmampqamppZ2lqbGxta2psZ2lqZmhlY2Nm
-aGloamlwj7LF1Nzh5ubn5+np6nx+fn17eXt/f39+fn9+fn2Afnt7fnx9f356eHd4
-eXp8fXx6ent6enp6eXp6fHp5eHh5fHp5enl7f396fH99e39/fX+CgIB8fYKBfX+C
-f32AgH+BgYCFg3+BgIGAfn6AgYOCgX+DgoODg39/fX5+fn5+f4GChISDf4CBf35+
-f35+fH59end2c3J0dHVydHJzc3ZzdXZ0c3Z0c3V2dHd3eXh6e3l0dHd3dnp5d3V4
-enl4eHZzdnh2dXV7fHx6dXd2dXd1d3d3d3p2c3R3eXd4eXZ2dXF0dnh3eHd2dnZ3
-eHV1dHR1eXp5enl4dXp9en16dnh5enh4eXt7enZ1dHFxdHN2enh3dHV0dnV0dHh1
-dnZydHRvc3d2dnJwcW9vcXV0cHFvc3R0c3RxcG9vcnRybW1ubG9vcHR1cnB1cG5t
-bXBtcm1ucHNvcm9wcnFwcXZ5fYOGhYqIiIiHhYiOkZCRlJSSk46JioiDgn1/goaL
-j42PkJiXmZybnqGlop+gop+eoKGem5malpSSkY+Jh42PjI6OkpaamJqanZ+jpqWl
-paKjoqiopqWko5+ZnJmYl5aVkY6Ojo2JiIyMiYqHh4eFhYaGhIKCf3x2dm5mYmZq
-cHqDiYqMiYmKjY6KhoGCgH59fHh4eXp8fH6EiIaJiISGhoKDf3p4eXl8fnx4e3t/
-gX59dHBvampoaW1ramhoaGlpaGlrZ2RkaWRkY2ZnZ2VmZ2RkY2JjYmJgYGRiYGBh
-ZWVoaWZnYWJhY2NlZWNkZmZlZWVnZWZlZ2VnaGVmZ2ZlY2RnaGhpZ2ZmZWZpaGZp
-a2traWlqaGhtZ2JjZWNlaWdiYmNgYGBiZGVkZGlmZmVmYmRkZGVnZWRhY2VlZWRi
-ZGZlaGhoZ2dnZGVnZWFgZGRjYWNkZWBjZWRhYmJeYGJhYmBgX2NgYWBiYmBhYV5f
-XltfYWNiX2FfYGBfXl5eXlxfX15dYGBgY19fXVpdXlxdYF5gYV1fXlxeX11fX2Fi
-ZF9cX1xeXl1dXVpdZGFdXV1gXVpfXltZWlpXWFlbW1xfX19dX2FhXFlbW1tbW11g
-XV9gWllbXFxbWl9fXlpYVllaW1paWltcXV1fX11gXV5gYV9eXF5dWlxdXlxcXV5h
-ZWFeXVxdXl9cWVtcXV1eXF5fXVtdX2FhX15dXVxgXlpbXFxcXVxdXVxeWlxfYmBd
-XVxbXWBgYF5dXV9cXFxcX15bXVxaXl1eW1paXF9dXF9fY19eXV9gX1xhXl1fYGJh
-YGFeXVxfYWFjY2ReYV5eXV9fYGBhYmJfX15dX1xZXFxdW1ldXmJhYGBiYF5dYGFg
-X2RfYWFhYmFgYmFdXF1gYWFgYV9dYWRjYmNhXl9iY15hYWBfYGBfXFxgX19iYmNj
-ZWVlYWJjYmNlY2NiYF5eYF5hY2RkZWhiYmJhYl5eY2VjYWNkaWllYmNoZmZkZGVh
-Y2JfYGFgYGBfXl9iYmJjZGZhYGFhYmNlZmNhY2FjZ2dmZGVmZ2lqaGtqbG5oZ2ho
-cWpua2toamhraWdtbmhnZ2ZmZ2dkZGdpaWdnaWuPtcjU3uHk5uXn5efqeXp8ent5
-eXx7en5/gH58fX59e3x7fHl8fn17fHh5fHp3ent8eHl6eXl6eYGBfXh6fX16e3t5
-eHl5eHd2eX18fn19gICBe3h5f3+AfXyAgYGBgYOBgICCgICAgoCAfoGAfoF+fX6A
-fYGAfn5/f4B8e3+BhIN+f4GAfn9/gH5/gIB7eXt8end1c3NycXRzc3J0dnV3dnV1
-dXV0dHR2dHZ3dnV2enh6eXp6d3V1d3h3dnl3dnp4eXd0dnd5eHp1dnR4eHd4eHZ3
-eXd0dnZ2dXd7eXh1dXR2eHd4eXl4eXp3d3d3eHh2dnl5d3t5ent6fXl6eHh2enh4
-e3p6e3hzdHJydXRydXRydHN0d3d4eXV2dHFzbnB1dnR2c3Nzc3JxcXJub3BxcXNw
-cXFzdW9xdXNtbHBycXV0dHZ1d3BtbnN0c25rbG9tcXJycXBxdHR8f4GDhISHiYeH
-iIiJi42OjpCQj5GMiIWFhIWAhIiJjY2Sj5CSkZCSm6GioaGgnJqZm6Sjm5qalpaS
-jI2LiIOFiYqNjY6OkJOXmZ2goaCfn6Kjp6WjpqWipKGgnZeampiXlZGPjZGTkpOQ
-jYyJhoKAgoJ9eXx7enp7fHl3dW9uaWdlbHJ4e36CgoGEgIB8e3t/goB9eXl5fHx9
-eXx9goKBf39/gYOCfH59fn97eXh6eX18eHNwcW9tb25ubWtpaWhpa21oZ2ZmZWZk
-ZWZlZ2hiYmZnZGZjYmFfYWFiY2NiYGNiZGVmamdoZmNjZWVpZWBiY2NjZWhmZ2po
-Y2JmaWhnZmJjZWVmZmdnaGpmaGdnbGloaWhoZWVkYWdpZWNhX2FiZGRjYmBkZmRn
-ZmRlZmhkZGJiZmVlZ2VlZWdoZ2ZmY2NlZmZmaGdnZWNiYmVjY2JjYGFjYl9jYmBj
-Y2FiZF9hYGJhYGFiYWBiYV9iYWBfXl1eXl9cXl9iYWFgYF9cXVtdXV5dXV9dYWJh
-YV9dX1xfYV5fXl5cXF1gYV5dXmBfXmJhYWBfXl5gXltZWVpbXV1cXF1dXl9dWVxa
-Wl1aW1tbW11dXV5fXltcXFtaW1xcW11eYF1fYF9cXV9aXVtbXFtaWVldXVtaXl9g
-YWFhXVxeYWFfXVxZWFpaXF9eW1tfX11fYmFfX15fX2FfXVlcXF5gYGFgXV1fYWBd
-W1taXV1cWVpcXV5fXV5cXV1cXV5eX19dXFxcW15eXFxgW1xdXVtaXF1bW1xZWFxc
-W1xaW19hX15cXFxeXF1fXl1gYF5gX11cXV9dX19fXVxhYmFcXV1eYGBdXV9eYGFg
-Xl9gYV9eXl9eXmFgYWBeX19gY2VgYGFhYGBgYGBiY19dXmBfX2ZjZGJiYV9gX2Nk
-YmFfXl5eYmBgXl5gX15fYF5cW19hZGVkZWNkYWNiYl9fYmRhYWNgYWFjY2JhY2Rg
-YmNkZF9hZmJjZGNkY2RiZGRiZGRkY2RjY2VhYV9eXmBfYF9gY2JjYGRlY2BhYmFh
-ZGJiYmFmZmZmZ2hoaGhqaWloaWxqaGdnamlqampqaWhoaGlnZmhmZ2doaWdjZWhn
-aGhnao21ytbd4uTk5ujn6Ot9eHx6fH58fX18e3x5e3t9e3l6fnx7e3l4en16enp9
-fHx8e3t+fHp7eXh8fHl8ent8gX55e3x5e3p3d3t3e3x5en99ent6g39/goGAfX6E
-g4SEg4ODf4GDf4GEhIGAgoOBgoKDgX2EhH9/f39+fn5+f4CDfn9/gH+AgIGBfn99
-en16eX96eXZ0dG5wdnV0c3d0dXN0dXh4d3R1dXV2dHR2dnd7gH9+enp7eHZ5enx3
-fH57eXl4enl2dHd3eXl3dXZ7d3d8enh3d3Z2d3Z1d3Z3eXh6d3Z1dXV1eXx5e3d5
-e31/end5eHVvc3V2eHp3d3l3e318eHt6d3R2eHV2dnV0dXRzcXNzc3Vxc3N1dnZ0
-cnBxcHNzdHNxcHFxcXBvcW9xcXFxb25vcnNycXR0c3BycHFycXV1cnBvb25wc3Jw
-cnRxbXFucXFycXJydn2EhoOEhIOGh4aIiIqKi4uLi4yLioiHhoCEhIiHjJGRk5SU
-lZaTlZqgo6Cgn52am5+goZuWlJKOi4iMjouIiIeIiYqLjIuMk5iZmpmWlZidpKaj
-paWmo6Oem5iYlpKSkpOOi5CRk5WWlpGMiImFhYN+e3h1dnN0dnZ2eXl3dHFqa2po
-ZmlweX+GhH98eXZ1dnx7gICAfn9/e32AfX57e3t6dnh5eH9+fn96eXl2c25vdHNz
-c3N0eXx6eXVybmlpaWlramdoZ2ZpZWVma2hlaWhlY2JnZWVnZmNkZWZjY2NoZWNj
-ZGRjZmplZmFiZ2ZmZGJiYmRjZGRlZmRlaGVpa2hnZGVoZWdlZWZramZlaGlqb2ln
-ZmZiY2NqZmNmZWNjYmRkYmJjY2FjY2dlZGVlZmhnZGVqYmNiY2VmZ2ZoZmVkZmZn
-Z2ZlZmZmZmVkYmJmY2FhYGFjZF9jYWFjZGBhYmFiYmJfXV1iYGBhX2FjY2NjX11d
-X2FhYGFhZWVhXGBdXGBeXV1fXV1cY2JjYmZhYl1eXlxdXF9hYGFgX15fYl9hYGBe
-YF5gYF9bXFxbX1xcWFxeX19dWVpaXF1bW11cXVlbXWBfXFpdX1pcXl9fYWFeXV9f
-X15fX11cXFpaWVddX2JbWl5fYWFhYl1eXFlbXVpdX11dX15YWF5hWVhXXV5bWl1f
-YV9cXV9eYGBgX11gX15eXl5dXl9dXl9fXFtdX11cX2BdXl9fXlxbXWBeXV9fX15d
-W1tYWVteX1xfXl9gXltaXF5dXltaW1xbXFtbXV1dXlxcXV1bXF5iX15fYF9cW11e
-X19dXFxdWltdXmBfXmBjZGJeXmBgX2BiY2FhYV5fYGBgYV9dW1tcXl9jZmJhY2Jh
-YF5eYl5fXl5fX2BkY2JgYWBgYWNjYGJhX19hYmBfX2JhYWFgX19eXFxbX2JiY2Jk
-Y2RkYF9fYF9bXGBiYWBfYWJhZGZiZWZlYmFhYmBlZGNmZGNjYmFhYmNiX2BiYGFk
-ZWBfYGBfYWRjYV9jXmNjYWNiZmZmY2BgYmNlZmZlZGdmZWVnZmdnZGZnZ2loaWtp
-bGppbWppZ2lkZWtqbGtraWZlaGZmaWhoa2hwprnM197h4+Xm6Ojn6nx5fX1+enx6
-eHt8fH98enp8fHt9fnl4eHl5fXp8fHp6fXl4e3l5eHl8enh4en1+fnx8fX5/fHx7
-e39/e3h8fn16fn6AfoF/gIB+f35/gYGBgX+Bgn+BgoOCgoGDgYCDgIKDhYN/goSA
-gIB+fn1/gYOCgnp9gIF/gX1+f359fXx4eXl8fX14d3d4fH11d3h6c3NycnZ4dnd2
-dnZ3dnBydnd5fHt3fH95eXZ3ent7fXx7fXl3dnZ8e3p4eXZ3eHZ0e3l6e3t7fHl4
-d3l3d3l7dnZ5eHp1dnZ1eHZ2enp1eXp8fIF7eXV0dHNyd3t5eXhzeHd2eXp5eHZ7
-eHh+enl4e3hydHV4eHdzcHJ0dHV0cnJzb21ub3Jyc3d0cnBydXVxcG9vbW9ubXJ0
-c3JxbW1ubnJvcHFwcXByb29vbm9tbXJydHNwcHFvb3N0dHV5fH6BgYCAhYiHi4mL
-iYqHi42OjIuHh4SFhIKFiYmLlJaVkpiTlZubnaCin52bnqCgpqeflpKKhoaJjY2J
-h4qJiIOHiIeFiYqRk5CNkZKXoKWipKSmo6Gdm5iVkI6KjI6Gg4WIi4yRkIyIiIOD
-g4WDfn17fX53dnh3dXl7eXl5fHp4dG1oY2hvdHt9end2cG1xb3N6goODg356eoCF
-g4J9eHFvcHBydHVycnNzb2xnYWJkaW5xcHF0eX14dnRybWtpaWdoZ2ZraGlmZWhp
-aWhoa2dmYmJgZGZlZ2doaWtnZmdnZGVkYmRhZGNhYGBjY2FkYWFkYmRjZGJiYmJn
-aWZoZ2NkZWVpaGRkZmZmZmNkaGtoZGFlZWdnZWhpZ2VoZmdiY2NkZ2VpZmVjYmFm
-ZGZmZWdkYGFmZ2VlZWZiY2RmaGdlZWVkZ2ZmZWdlZmVnZGNhY2dmY2NjZGFbXV1i
-YWBhYmJfYF9fYmBgYmFiYmVjX19eYmBhYmVkZWNjYmFfXV9hX2BeYWFhYmNfXV5j
-ZGFfXFtaX2BeX11hYGJhYl5fXFxcW11fYWBgX2BfXmBeX1xZV1pcXFtZWVtZW1hY
-WlxcW1xhXl1bWlhZW15eX15gYGBeXlpcX1tfX1xbWltdX2BiX2BdW15dXF1aWVpd
-X1pbWllZWltcXVpaXVxeWlpaXV5eXFxcXF1eXV5eYGBgXl5eXWBdXl1dXF1fXFxe
-XV5fYV5dXl1dXF1cXlxdXV9fW19eXl9bWFleX2BfX19cXltbWlldWldZYGBgXlte
-X11cXV5eYGJeXl9gYWBjX19hXl5dXV5cXVxbW1xfXVtdXmFdYGNfYmBfYGBiX2Nj
-YV9cXl5eYGFhX1xbXF9gYWNjYmJkYmNkYF5hZWVnZF5eY2RjY2JhYmBhYWNkZGJg
-Xl9jX15dX2FjYV9hY2FhX15cYWFiZGFhYGBgX15eYGJiYGBjYGBeX2BgX2BgYWNi
-Yl9dX2FnaGRhYGBfXl5hYWBiYmNjYWFiYWJiZmJjZGFhYmNjYWFhX19fX2BhYV9h
-Y2RhYWVjZGRmY2RmZGZlZmhraGpqamdpa2ptbmhmaGloZ2JnZmZoZmZpaGZnam1o
-a3Wqt8vW3+Hk5ebo6OXpgIJ+fHp6fHp5en18fYB+fX59fX6AfHp4eHd6e3l5eXp9
-fXp9fXx7e3x8ent/gIKAfnl8gIB+gH99fXx/gIF/fXt9gIGCgYKCf3+EhIB+gYGB
-gH9/fYCAgYKFf4ODgIKBgYGCgYGFgYF/gn18fn9/gYB+fX+CgIKCf399fHl6enl3
-eXl1dHZ3dnV4d3VydnVzcXJydHRxcXR2dHJzd3h5dnV3ent6eXh6eXp8e3x8fXp7
-fHp4d3l5enh3dnl3dnd3d3d8fH5+fX56fHl6eHZ1c3N0eHl4eXRyeHt0dHh3eXd5
-eXt4d3t3enV2eHt4eHd2eHV1eHt5fHl7eXZ1dnZ2eXp2dnd1dnFvc3R1d3VycXN0
-cnNzbnN1cHFvb3Bzc3Jxc29vcXJybHJydHNwbnFybnBub3FycG1sbm9ubG5vc3Fy
-cHBubm1wcXN0dXt9gH99f4OFhYiGiIiJjIqNko+Mh4SBgYOEhYaLj5OVmJKPj5OY
-mZqenZqXmZyjpaKgm5aSjoqJi4uKi4eKiYOCgn9/fYGFiouLjI+Rl6CkpKKioaKd
-mZKQi4uGgoB/fnh2eHx8fYB9gYN/gHl2e3x9gIKDg4J+fnl5enh3fX6CgoR8dnBt
-bG1uc3l6endzcW1ubXR5fYGCgIB5fYCCgX12b250c3Jxb25saWhnYFxeX2JoaW1p
-b3B0eXt5dHh2dGxraGdnaWZraGZpZmZkZWRjZWRjZWZjZWNkYmBkaGpoZWZnZGRl
-ZGZkY2ZkY2NkZGRkYmRkaGdkYV1eZmNkZmljaGlnaWRkaGZmZWVkZWhoamhkY2Zk
-ZWZlZ2doZmhmZmRiZGNkZWZmZWRqYmJiYmNjYWZkZWZlaGVmZmRkY2JmZWRkYWFh
-ZmZmZmVmZ2RgXWFgY2NiYmFiYWJdX2BjX1xgZF9bXWFjYmBgYWJgYmZnY2NkZmRm
-ZGRiYmZiYmNgXV1eX1tgY2NhYmFgXl5gYF5gXl1eXV9gYV5hY2dmY1taXV5hX19f
-YGFhW1tdX15eXl1bXF5cXl1aWVZYWllbW11dX2FaW1xcW1haXFxbXVxeW1xXXFtZ
-WlpZWV1dXV1gXl1dXl9cW1xdYWNeWVxdXVxbWVlcW11dXV9eXVxfXlxeXlxaWFhb
-XV1bXl1fXV5cXl5eX15fXV1bW19gYV9eXV1dXGBdXF1eXV1eYGNfXV5fX19gYGBd
-XVxdXV9fXVtYXV5cXVxcWlxgXl1cXlteXFpcXF9dYF5gYGFeYl1eYWBhYF5dXF9h
-X15bXmVgXltcX2JfX15iZGJgYmRkYF1eXFxdYGBeYGFfXmBgYGFfXmFhYF5hZmFf
-YmNmZ2ZkZ2VkY19hZWJhY2JiY2JiYWJhYWFfYGBhYWBiYGNkZmFiY11eYF5gYWRl
-YV9eXVpcYWJhYGJhYmJjY2NlYmJjYGBjZGFeX2FhYmBfYWNgYGFiYmNgYGFhYWFg
-YWFgYmNjZ2VhYGBfYmJiYV9fYWFgYGJiZGRkZWNkZGdlYmdmZ2hlZ2hra2tqaGdp
-aWhramhqbGpnamZoaGVmZmVmZGdra2lrb6C0ydfe4uXn5+jp5+p9fYB9fnx5fX58
-fH19fn9/fX57fXx+fHl3d3h3dXh7eXZ8e3t9fH9+fn99fn6AgX17en9+gH9/fXx8
-eHt7e317fXx9gYKBgYGCgICCgoKDgoGFhIF/gX+Bg4SEgoKEhIN+fn6BgX9+f4GA
-f4B+gIOEgoKBhYCAgX6BgoB+fHx7fHt4eXZ3eHd0dXR3dHN0dnVxd3ZzdXZyc3R0
-dHN1eHd1dnh6eXl4d3l7e3x7enl5enp2eXd3eHp9fHp5eHl6d3l4enp7fX57e3l7
-e3x8eHR0cnR1eXd2eHN4d3R3eHZ3e3h2dnl4dnZ2fHl3dnZ3d3d5d3V4dXl5enx/
-eXd4en16d3l5ent6dXR0eHR3dHVzdHNzcHR0cnJ0cXBwb3FxcnB1dHNwb29vcHBu
-bm9xbm1tbm9tbW9ucXFwb25tbXBubW5vc29tbG9vdHh8eXx6en6Ag4GHiIiFgYSI
-jZSTk42De3+Bh4WIiY+Uk4qLjY6QlJmYnJucn5yfn56clo+Ni4qKioqIhoWEg4KB
-gH98e317f4OHi4yOlZyeoZ+goZ2alpKMioiFg399fXlzcnFxcm9tc3V1eXZ2dnh6
-fX2AhIOCfnhydHR1dnh8gYaKh4iCe3hzbm9vcXp5enl2dHJxcnd5eXl3dXl4d3Z3
-dnNwcnRzdHVwbWRfW1lXWF5iY2ZpZ2ZpaW52en18fHp2cWpmY2VlY2dpaGRnZGRj
-ZGJiZGRjYmRjY2VmZ2RhY2dkZ2JiZGZmY2NmZWNiY2BjZWhnZWVkYmRgX2BwZmNm
-ZWZmaWhmZmNmY2JjY2RmZ2VkZGppZ2djZGRhZWdoZmZoZl9iZmJiY2RhYWFiZGJi
-ZWNiYWRmZWRjZWVkZWNoaGRkZGNiYmFgYmRjZWZiY2JgYF9gYGBhYGBgX15iYmBl
-YmFeX2BkYmNjYGBjYmJhYWFiY2JjYGBiXl1eX2FgYmJiX19gYV9hYF1cXV1bXF5j
-YmBkX11cXlxhX15gYWBgX2FeXV5fXl5fX2FjYWFhYF9cXF1bV1VZXGBbWllaWlpd
-XFtdWlpcXl5gYF1bWVdaXV5iXl5cXFpbWllaXV9dXV1dXWBeXFxaW11eX1xdXltV
-VlhbXl9hX19eYF9eXV9gYFpdWlhYXFtdXV5dW1peXFxcYF1dX15eXWFjYF9dX2Ff
-XFtYW1pcXl9gX15iYF5bXV5dX19fX1pbXF5fX19eXmFdXFxfXV9eX15eW1tdXV9g
-X19cXlxcYV1gX2FhX1xfX11hYF9eXF5bWl5fY2FhXFxdX1xfYWFgYF1fYF9fX1xe
-ZF1hYWNgYWFgXmBgYGJeXl9eXV5dXmJhYmRiYmVmYmNhYmZlZWVjZGJfYmVjY2Bg
-Y2FfX2FhYWFjYmRjYmJhXl5eX2BiYmFfXl1fYF5eXVxeX2VeZGBgZWRiY2ZlX15f
-YGFhX2FjYl9gYWNkY2BfYWFgYmNjYmBgYmFiYF9gYGBjX2FiY2JkY2VkYWBeX2Fh
-ZGVmZ2JmZWZlZmhoaGtqa2lpamlraGZmaGlraWdqaWptaWtpZWhpamVkZWhqa2dp
-h67J1t7j5ebn4+np6nx8fXx9gIB7ent6fH58fXx9fH16fHl6e3l2dnl5fHp6eXl6
-fHt8f3t5eX18fX6AgYKBe3x7fn+Bf3+CfHx5fHt8e3x8f4KAf359foCCgX59fIKF
-g4KAgH6DhIN/hIKCgYWFg4SCgoGBgoKBgoOCgIKFgYGCgoJ+f4KCgoB/gH1+fn16
-fXp4d3h2eHZ3dnR0dHV1dXd0dHNwcXZ4eXh4en98eXl3eXd2eHt8fHh4d3p7e3x6
-dXh6ent9fHt5dnh5eXp4d3h4enx8e3x5enp3gHx6eXZ6d3V6end4e3h4dnh5d3h6
-dnR1d312dXZ3dnd7e3l4eXh6d3d5eXZ7fnx5eX17e3x7eHZzcnN1d3RydHl1cXFw
-cnNyc3JwbnBzcG9vcXBvcW9xb3Fvb3h2d3FucHBycHBub29xc3JxcXBtbW1qaWtv
-cnJwcXJzd3x4eHd6f4WCg4eJhoaHh4iNko6JgYSChIeJiIqRlpONjIeKk5mgoqGc
-mpqfn5qalZSMjIuIiIiHhYWBgoF9d3l7e3l5eX1+g4mOkZOanJ6YlI+Rj46MiYmI
-hoN/fH5/enhzcW9rbGxudXV0dXiAfoKBhYmIg4F/fXt2b21udXyAf4CFh4aFh4F6
-cm5vc3Z4dHV0cnN2dXZ2cm1scXFyc3Fzc3BwcXZ6enNsZGFcWVteY2dnaWhhYmNn
-bXF3foZ/dnJta2loaGlvaWhmZWVkZGZjY2RjYWNkZmZkZGVkZGVkYWFgYWFlZWRj
-YWFhZWJiZGRjZWVkYmNiYGFfYmdmZmVkY2RmZGNjZWRkZWZlaGhlYmFnZWJkZWRi
-ZGJhZWRjYmRnZmJkYWBiYWRjYWRkYWFjY2JkYmFhZWNkYmNjYmJkZWZlZ2RhY2Vi
-YWFhYmFhYmJkYV9fYF9fXmBiZGBfZGdlY2JhY19dXVxfYl9iYF5fYmFmYmJhYWFi
-XV1gY2NjYmFiYF5dXF9fXmBcXVlbXWBhY2NhYmJgXl1bW15fXV5iYF1cX2BfYF9f
-YGFgYF9eXF5eYFxcYFxcXV1aWllbXFtaWllXV1tdXl1cXlpaXFpaYWBbXVxbWVdZ
-WFlZWldVXFpdYF5bXFtcXVteYF9eXWRfW1pcX19dXl1cXFxbXV1eXFtcWlhZXl9e
-XmBcW1tbXmFeXl1fXV9eYGBgXl5hYWFdWVxbW11bXlxdXl9fX15dXF9eXV9dXl5c
-XmBcW1tgYF1dX2BcXV5fX2BeW11gYGFeXV1bXmFeXFtfXl1fXltbW11fXl5dXF1e
-Xl5eYWJgX1xdYWBgYmJhYmBfXl9aW1xeYF9eYWFgYmFgYl9hYGBcWlxfXFtdXl9g
-YGNkZWdnZGJmY2FhY2FgZ2ZgYGRhYGBiY2RgYV9fXl5iYGBfYGFjYGBiYWFiYV1d
-XVtcXF9gX15gXl9fYGBjZmRlY2JgYmRjZGhmYmFgXmFkZGNkYmNjYWRiY2JhY2Fd
-X2JiYWRhYGFkYWBgX2BiYmRlYWFfX2JlY2BjY2VnaWloamhmaWloaGhoaGhoaGpq
-aGVpaWptbWppaGpnaWllZmZpZWZlZ2t5rsnX3eDl5ubo6evpf315e32Ae3x8e3l6
-e3p7e398ent+fXx4e3p5eXp6e3t4ent5fHt6ent5e3x9e3x/f4CEgoF/f4KBgYmI
-f3x+f4CAe39/f3+AfoCAfHyAfn5/fYCAf4N+gX5+goKAgoSEf3+Bf4KDgX9/goOB
-gH+CgoGDg4KDgH+Df4GBfn18fHt6fn+AeXZ4dnN1dXhzcnNzdXd1c3NxcXBzdXh5
-enV5fHp5e3t5fHt5eHp5enl4fX1/fHl2eHt6d3l6e317d3d2eXp6enp6dnp4eXt5
-eHV2fH55dXh5d3h5eHx7d3d3d3d4ent6eXd5eXV1eH16eXp5eHV5eHl4eXl4eXd4
-fHx+e316eHV1cnRydHZ3eHV3dnFyc3N0dXV0c3BwcW5wcXB0c3RxbnNzcXFwbnB0
-dXFxcnByc29xcXFycXFwc3Bqa21vb25xcW9ucHBydnl2eXl5e36EhoiGhoeJi42I
-gn6DhIeIh4mOkYyMjoyPkZSXnp2Xk5WZn5mVjJGSkpGOiImIhYOCf3x8end3d3h5
-enh5fIGEhoiMkJOVj4qIh4SFiIyIh4V+eXZ4dnl4d3R1dHBzd3t6fnt7gIaHiouO
-jYmCf318e3p2dn5+fn15e39/goiLioaBf3p4enh1dHNwcnV1dXNxdHFtb2hoa2xs
-bW1ydnt+fHNzc2xlZmlpamdnY15jaGxxeH+Dg4B3b21uam1qaGZmZ2ZjY2dlZmZk
-X2NfX2VjY2RiZWdhZGJiYmNkZ2ZmZWdmY2NjY2JhZGZgX2BhYWFiZGRjaWhkZWRj
-ZGRhY2RkZWhnamhmZGRkZGJkZGRkZGVmZmVmZ2doY2JkZWJjYmBmZWJlY2BiYmBl
-Y2VqZmRjZGNjYWBgY2ZpZWZmaGpkY2FkZ2dlZmVlZGJgY2RlZWFiYGFgYmNhY2Zk
-Y2JiZGVeYGNjYmJeXmFjY2ZhXV9gYF1eXl1dXl9jY11cXGJlYWBeXV1gYF9eYF5j
-Y2JhYl5bXF1dX19eXVteYFxfY19hX11bXF5eXV1gYFxaWl1eXFxaXFlZXFtcV1hb
-WlVYWFpfXVtbWVpZX2BiXFpaX1teWl5dXl1fZWBaXF1eYVxdXVxbWltdXFxeXmBc
-WlldXl5eXFpbXl9dX1xcXFxdWltfYV9fXV1bWl9fXFtcXl5eXV9cW11gYV9eYF9g
-X1laXl5fXlxdW1xeYF5cW1tbW1tcWlpeYGFeXV1dYGBdXFtbXVxaXFtbXV5dXmBf
-XmBfYl1fX2BfX19eXVxbXltdXFxeX2FeXF1fYmJdXVxcXGJjYmFhYWNgYF9fYGFh
-X2FiX19dXmBhYV1dYWBgXl9iYl9gXV1cXWFjZGZkYWJhYmNkZ2VoZWNfX2JiXl5h
-YWFhYF9eXV1dX2BjY2NiY2BhYWBdXl1cXWJiX19hZWBhY11fXmJiYmZkZGNlZmZi
-YmRmZ2VkYmJkY2VnZWNiYmJiYmJhZWNgYl9eY2NiY2BjZmNjY2FjZGVoZWJiYmRk
-Y2NkZGVnZ2ZmaWpoaGlmZ2doaGdmZ2ZlZmhobGtoaWdmZmdpamhoZ2dlZ2ZsbHew
-y9Xb3+Xn6Onq6eh8fH19e3p7enx7fX18fHl7fn19eHp8fn98e3l4d3h7fH1+eXp3
-eXp6fHt8eXp7fnx/fHyBgH57e31+fXx9enyAf4CBgYCDgX6Bfn59fH5/gIB/fXx+
-gIJ/gHt/fYGBhIKGgn6BgoKAgIB/fYGDgoKDgYGCgIGAgYOAfn57fn1/gX98e31/
-fHh1cnR3d3h4dnZ3dXR0cWxtbnF0dnd4e3h0dXd2eXh6f3Z4eXp4eHp5eXh6fHl5
-fH58eHd2dXV3enp4d3p7end5eXp5enh2d3t7e3Z5eHd7f3l5eXt+eXV0dXh4enx+
-fXt6eHR3eHl5eHd4d3Z5end7eXp5eXd2enp6d3p1dXZ3dXN0cnR3c3JxcnNzdnZ1
-c3JycXBwb3Byc3Jxc3FvbXBwcG5ycnFvb3Jzc3Rxbm9vbnFwc3BucGxubm9sa25x
-cHBzcnx8gH95eHh6foKEhoeEioyKg35+gISFhoiNkJCMio2LkJOWmZiVmJiZnZ2d
-mpKOkZKTlZCKi4eBe3t6eXh1dXZ3eHl1eXuBgoF/foCJjY2KioiKjo2Hg4aHg3t1
-cG9xeHp5dndzdnR+goOAgYKGi4yNj4yNioiDf3t4e3t5d3d1d3l2eHl8goaGhYaK
-ioeGgn15d3NwbnBwbm9xcHFtZ2VmaWlrbXJ0f3l4fXt5d3l2cW1qaWtrbHF3eXmA
-hYaFfnh4d3N0cXJvbmlpZGdnZGZmZmZlZGJnYmNjZGNkYmNkZWNmZWdnZWVkY2Nj
-YmNlaGhiYGBfY2VmZWRpamloZGVoZmVhYGRmZGJkZ2VmZ2RlZGNjZWVoY2FjY2Fk
-YmNkZWNkY2dkYmJjYmRlZGBiZ2diYmBkZGJmZmNjZGVlY2RiZ2lmZWRmZWpmZmhm
-ZWRlZWRkZGhlZWFfYGFhZGNiY2FiYmBjZmFeXmNkX2BiZGdiZmhlY2BeXF9gYF1e
-X2FgYWBeX15aWVpgYF9dX2BeX2BiYGBfYWFgXlxeX2FgXl9eZGBgZWRiYV9gXFxb
-Xl5hXl1fXlxdWVhZWVpYXFlZW1lbXltcW1xcXFtdW1dbXFxaXWJdXF1eXF9iXWBc
-W11cXlpbXFtfXF9iXlxbW1xbXl9fXl9dW1tcX2BdXFxeYV1bWVtgYF5eXF5fYF1c
-WlpcXV9bWWBeXF9eX19eXGBeXVtbXV9cXFxdXl1fXl1eXl9gX1xeXVxcXFpZWl9b
-W19fYGFfX2BdWltZW19bX15fYF5eXV1cW15fXl1fXl5eXV1eXV5dXVxbXl5eYWNi
-X11fX2JiXltcXV1fYWFfX2BfYV9dXVxhYWJhX11cXF1eXl5eXV9hYWBfYmBgYVtc
-X2BhYWFeXl9iY2FiZGNjYl5eXmBfX1tdYGFhX19eXl9dXVtdX2FgXmBeYmVkX19d
-XmBgXl5cW2BjZmRdYGNkY2JjYmRjYmNgYmJmZF9gY2NlZmRhYWFhY2RiYWFkZGNi
-YGBfX2BhYmBgYGBhYmFiY2ZlY2ViYmZmZWNlY2RpZmRmZmZnZ2ZmY2tsamloaGZp
-a2traGloa2lpaWVmamppaWZiYWRofbHK1d7h5efn6urp6H1+fH17eXl7e317enx+
-gX+Af32Afn1/fYJ+e357ent6eHl5e317en18e3t5fHx5e3uAf4OAfXt9fn6Af3x9
-fnx9fn5/f35/fX+BgIKChYSBfn19fX9/goR/hIGBgn1/gIKBg4OAfn5/fn+DgIWH
-hYGAgH9+fn+BgoCBgH99foB/fH1/fnp3dnd3eXh2eHR0cXN2dnJ0dnV2c3V3dnh4
-dnV3eHp3d3Z3eHh5eXl6e3l3eHZ4fHt8enh1dnd3dnZ1eHp4eX14eHx6eXx8fnp2
-d3h4fnd4dnZ4dXh3eHt9fnp6e3t6enp5eHp8fXt5eXh4eXh6enl5gH99enp3eXh3
-enx7eXh3dHd0dXNydHRydHB0dXZ3c3NxcXFydHFvb25wcHFwbW1sbm5yc3F0dnJx
-b3BvcXBwcnNzcnJvb2xpamhtbG9ra29vdnN4end7enh5eHl6fH9+hIeHhIB+gIKA
-f4CGjIuJgoaKjZCXm5ualo6VmJ6dmJiUk5GRj42OiYeCfXl5e3l2dXp7eXd3dnl3
-e3x9enV5gIKCgoSIh4uIioiNi4eCfnt8fnl4dXNycHJ4f4SIiYmHiomMjoiHiYaE
-gn55eHh3ent3dHFvdnl2fH58goWDg4aIhoaDfoJ+enZya2hpbGxwbW1samdhYWNl
-aXJycnl8e3h7enh1cnF0dHV4fn98fYF+gn97fH17eHd3dG9ua2tpZ2lqZWNlY2Nl
-ZF9gYmNjYFtiZ2diZmViZGJjYWJhY2JkY2NkZGViX2ZjZGRmYmVlZ2VmamdnZ2pm
-Y2doZmlkZWZkY2JlZWZpZ2dlYmRkZGVlZF9hYWBjY2ZlYWFmaGVjZGNiZGRiY2Bh
-X2FgYWJhY2ZoaGZiZmdlZ2hqaGVkY2ZmZmRlY2NkY2RhYl9fY2RlY2hmY2NgX2Bk
-Y2FhXmBlXmFiZ2dlZWZlY2NgX15gY2FhYGBfXWBeXmFgYlxbXmJiYGBeXGFiYWJd
-X19kXl9fX19gYWNlYGFhYF5eXF1bWmBbXF1eYl5eXFlcXFxeXltZW1dYVVlZWFlb
-WlpaV1hcX11cXV1aWlxbXF1cXV1bWl5fXV1cWltaXmBeXV5eXVtbW11eXl1eYmJd
-XFtcXFtfXVpaWVhZXF1fYF1dXFxcXVtcXFxfXVtcXV5cX19gXl5cWl1dYF5dXGFg
-XlxbXmFfYV9dXF1eYl9dXlpgXlxcXV5cW11cXWJeXFxgX2BfXVpdYF1eYF1cW1pb
-Xl5fX19cXV5dXl5gX11eXlxeXWBiYGFhYWNiYF1cXFtbXV5eXl5iX15gYWJgYV5e
-YF9hYl5fXV9dXV5fXV9gX2BjYmFhZmJkY2BhYmBiZGBgXV5gYF9gXFtcXWBfYF1d
-XWBeXl9eX11fY2BfYF1fYGJfX2JeX2JgYV9hXmBfXWBjYmNhYWFeYGFhX15kZGRi
-YmFiZmViZWNiZGJiY2JjZGRjYmRkY2JiX19gYF9eYGFeYF9fYmNlY2NhYmFkY2Fl
-ZmNjY2RmZ2ZmaGdnaWpoZ2psbGlnZ2dlaWhmaGhoaGhqaWdoaGtra2lrbWp4q8rV
-3eLk6Ono6erpe3x8f316e3t5en+Bfn+BgIB8e359fX18foJ/e3t2dnp9gHx8e3x7
-e32AfX19fHl5enp9fX99enx/f36Bfnx/fX9+gH98fH+AfX19foKDgYKBf3+AgH5/
-foCCg4OAfYB/f3+BgH+Af4B/f4B/foGAgYCCgICBgoKBgYB/gIB/fHt+fXt6d3d5
-enl4dnJ0eHd2cXB2dHZ2eXx7eXh4eHR2eXd2dnh4eXd4eXp6e357e3h3d3t5eXp9
-end6eXZ4eHZzdnd1d3l9e3h3eXl7f3p6eHh4enl6fXh7e3t7fH18fH17e3t5enp6
-fXx8fHx3eHh6e3l7gHt5eXh4eXl9fX19f355eHx9eHd2dnV1dHNwcnR1dHJ0cnFw
-cXJ1c3Nyb3FycnBxdHJwbm1ucHFxbW91cXJ1cnNub25ucG5ramxxbW9zcGxsbnFy
-dnl5eHN1dHh9fX9/f4CAgIB8e3+BgIB9gYWEhYJ8f4SMlJmampaSkZSZmZ6Zl5WP
-jY+Nj42KhYB6eXd1dXZ4enp3c3Bwbm5ucnVzdHh6enp+gYKDh4iJjI+OiYWGhoN+
-e3p3ent8f4aGhIWGiYyKhYaFgH59fHx5en5+fHl5enVycnN3e3V3enx5e3x5d3Z5
-d3h5fH58enJsamhpaWpoaGZlZGVpZGJkZGltcXV3dnNzdnZ2dnZ2d3p7enx9fn1/
-gIWFhoKAeXRvbWlsbG1tZWVlZWVoZmRiYmFhY2JiYmFhYmVjYWRfX19hYGJnZGZl
-ZWVjZGRgYmVoaGRkZ2RiZWZiYmdmZWNkaWloZ2RjY2ZnZWVjY2VmZWNjZWhjZGVl
-ZmVlZmNiZGVjZGdkZmViY2FeYmJiYmBgYGJgYWFkZGVpaGloZmdnZ2ZoZmVkY2Zm
-ZmlmZmdoZmJkZWRiZWVkZGNiZWRfX2FjYmBeX2JlX2BiZWFhYmNkY19gYmBmZ2Je
-XF5eYmJiYmBgX19fYGJgYl9gYF9iY2BbW11gX15dXmFdX19eXl5eW15eW1tcXF1d
-XF5dXF1ZWVtbW1xdXFlYVVNYWFlYV1lYWFlZXFtaW1taXF1cYF9bXV9fXl5fXl5e
-XFpaW1tbXV5cXltaW15bXl1fYmBgY19eXF1dXl9dXVtfXFtcXF5fXl1bXV1dXV9e
-X11dXF1dYV9dX1paW19eXV1eXltZW2BeXV5hXV5eXVpdX15eXF5gYF9dXF5eXl9e
-X15cXFxbXFxdXl1bW1xdYV9eXV1dXFxbXFtdXl1eX1xZW1xcXFteXl5dXV5gYWBg
-X2BgYWBkY2BgXl1eX2FeXV9hYGJhX2FhYGBeXl1fYGBgYGNfW15gYWFhYmBiY2Bg
-YGBkZGhtY2FeYGFfXV1dXF5dW11eYWFiY2ZkYmBeXmFiYGBhY2JfYGBgYGNhX2Fi
-ZGRgXl1bXF9hZGJeX19gYF9hYWBgYWNlYmRmY2RlYmJjZmNjYmJkY2VlZGhnZmJh
-X2JkY2NgXmBhX19hYmNjZWdmZGRjZ2RkY2NgYWNiYWVnaGhpaGdnaGpqampoZmZn
-aGpoZWhoZ2VnZmZqamdqa2tsb3mrydXd4uXn6erp6+t4eXx/fn5+fn9+f4B8e3t8
-fnx9fnt9fHp5fH97e3p7fXx8fHp1eXx7fH6Ae3p6e3l8fX9+e3t8fXx5fXx8fXt5
-d3x7gH5/fn59e3x9fX9/gH9+gYGAfoGHgYF/fYB/f32AgYB/gIF/gIODhIJ/foOC
-gIB/gH+AgX9/f317fHx6fX57eXh0dXd2c3R0dHJ2dHZzcnJ0dHZ2dHd0eXp3d3p8
-enh5eXp7end7enp6fn19e3t9fH96eXuAfH16ent9eHZ2dnR2dnp7eHV2dXN5fXl4
-eHh5eHx6f358enl4ent8fHt4eXt4d3l5e3p9fHl4enl5eXx6enh4eXp6fXx7fnx6
-enx9e3l6eHh3d3d1c3FycHN0dnVxc3R2dHNzcnJwbnBycnN3dXFsbW5ubW5wbnBw
-cnJxcnBucHFvbWprbW9wbW1vb25sbnJ1cnFvcXJ1dnx+goGBfHx5eHl+fXx2d3x7
-eXp5foGEh4yPjIyUlZGVlJmdm5eYj4qLjY+Oi4iEfXd2dXRycXNzcWxraGZnaWlp
-bXJzc3F0dnt/f4CCgYeOkI+LiYuMiYSCg3+CgoOEg4aGiISDgH+Cf35+fXt8enx8
-eXd0dXJ0d3d2dXR5dXZ3dnZ0cXBtam9zdHh3dnd1c21ta2loZmNhXlxeYWNkYF5d
-YGBma2xta21xdHd4dXRzeHh6fX+DhIKLioWGhYF8dHBxbWpqamRlZ2NmZWdlYmJj
-Y2NeYGFgX2RiYmNhYWZlYmJmZ2ZnY2VmZmZlZWRnY2RnZWRiYGNkZGFkY2VkZWlm
-ZmNjY2RlZmZlZWZlYmJmaGtsaGhnZWdmZmZmZWVkZGJhYGFiYl9gYWFiY2NjY2Vi
-X2FiZmRiZWZoZmhkY2ZkZGVlZGJiY2VlZGVmY2NmaGRiYmNiYWNjZmZkZmdjYmNk
-ZWZiYGFhZGNiYV5gYWViYWBiZWVnZmVnYGFhX15gY2NiZF9fYF9fY2RiYl5eXV1c
-Wl1hXl5bXl5hXl5hX15eXl5dW1peXltfX11dXF1eXlxYW1teW1lZV1RZWllaVlhY
-WFlXWFlXWlxdXl5eYF5cXF1dYFxfYF9bWllZWVlZWltcW11cXF5eYV9fW11dXV9d
-Xl9gYWBgX2BgXFxcW1taXl5dXl9eXlxdXVxbXF1eXlxcX1tbX2FeXV1dW1taW11e
-Xl9iXl1dXF1gYV9gXV1dX2FeY19eYF9fXlxeX2FhXltaXFxfXlxdXVxdX15cXVxe
-X15gYF9eXVxdXV1bW1teXFtbXV9fXl1gYGFgYGBhX19fYmJeXV5dXGBjX2BjXl5d
-WlxcXF5fXl5cYF5cXmFhYWBfYV5hZGBgYGFjYGFeYmhgXl9hYWFgX2BgX2FeW19i
-Y2NgYV9gYGFhX2BeXmBjYWRiY2NeX2NhYF9cXF1aX2BiYWBhYWBdYmFmY19gZWVl
-Y2RkYmJlYWFgZGFiYmVpZmZkZGFhZmJjYl9eXmBjYF5gX2BhYmZkZWNlZmViZWlo
-ZmVjZGZlZmlna2loaGlkZmhqbG1paGpsbGlpaWdoZ2dpaGdpa2ppaWpqc6bI1t7h
-5Ojp6unp6np7e3t/gYB+fnp6fXp2eHl6e318eX5+fn56eXt8enx5enp6e3t/fHx+
-fH58enp5fXx/fX98fX15enp7fX95eXp7eX+CgIGBgHx6e4KAe4B/fn9/fXx/fH5+
-foCBgIB9foKAgIJ+foCFhIGEhISCgH6AfoKCgoN/gIGCgoCAf36Afnl4ent7e3h6
-dHh5eHZ2dXZ0cHFxdXdzdXh3dnl4fHx7enp6fHx8fHt7fn99ent7e3t8fHx+fn58
-e3t8fHp6enl7dXd2eHd3eHh4eXp5eHZ7fHx5d3p7e3p5eHx8fHx6eHl7eXl3eHh5
-fH17fX57enl9ent6eHl5enl5e3l7fHt8fHd5eXh7d3N0dnZ3dXd1dXNzc3Fyc3Z3
-dXFwcnJxcHBwb3Bwc3V2cXBvbXFxcXJxcHFxb25tbW5tbW9wcnN1c3Bxb2prbmpp
-a2xxc3Z4enl7fHp8fXZ1dXR0cG5wcXBzd3l6gYWJjYyMj5OSkpaYlpiZlJGSkI2O
-jo2If3t5dXJ0cnBuamxmZmVjY2JjZWtwcXFycHB2d3d4eX2AhoaHhYeLko+Oi4iG
-hIeHiYaFiIeBg4KDfn59gX97d3p7e3t5d3x5eXx+fHVzdXd4d3Zyc3NvbG9xc3R8
-f317e3h1c3V6c29saWJgX19jZGJfWFdaZGlqa2poaW5ydHRycnh+fXp5eXt9gIOG
-iIqKj4h/eHJxbmljY2VkYmJjZWdnZmdjY2FiYWJiY2ViYWRjYmNlZmVkY2RkZGZo
-ZmJkaGhlY2FkY2NfYWFmamdmY2JjZ2ZoZGJiY2dmaGhnZWFkZGZlZmZlZmNlZWNi
-YWVkYmFjY2FfYF9hY2ZkZGNkZGNhZmRkY2BhY2ZpZmZnZ2hqaGVmampmZGNlZGJl
-ZGVmZGZkZGRjY2FiY2FiZGVlY2FiY2RkYl5iY2VhY2JiYmRmZmNiYmBiXmRhZWRi
-YWBlZGVkZmJgX2BeYmNjYGFeXlxeX19dXFxeYWJfYF9gYV9fX11aXVxfXVtbXl5b
-VlhYWl1eXFxaXl9bWFdcWFdaWllZWldZWVpbYF5dXFxYWl1dXV1eXV9cXlxbXF5d
-XVhZWVpcXFpaW1xaXVxcW1tbWVtaX2FgYGFfX11cWVtdXFtbXFpaWltaW1pcX15e
-XVxbXV5fXl1bXFxfXmBgXVxbXFlcXl1eX15fX2JhX11eXmFiX2BaXGBeXWFiYFtc
-XV9fYGFgXV1dXV5fXl1cW11cYWBdXF9dXl9gW11eYmRgX1tfYV9dX19cXF5hYF5e
-Xl5eXV5iYWBhYFxdXV9hX19gXl5fXlxYXF9dXlxbXWBfXl5fXl5eX2BfYWBfXl9e
-YWNkYGFiYmJgXV1gXmBeYmFfX15hZGZjYmFhYl9gYWBfYGBhYGFhYmNgX15aXl9d
-XF5fX19eYV5hY2JjY2JkZWRjYmBfYWNiYWFiYWRiY2VjY2JhY2JkZGNhY2BhY2Nh
-YGFeX19fYF9gYWRgYGBjZGRhYGNjZWdlZmRiZGhqa2xpamxoaGRlaGhoamtpamps
-bWtuaWVoaGhoZ2hsamloaGp4qcjU3eLl5ujp6urqf4B+fXt/fX1+e3l5ent6enp5
-e3x+fX98enx9fHt7e3p/gX+BgH99fnx6enp/fHt8fn96enl6e3t6e399fn97fH9/
-fn6Cg4CChYR9gH9/f4F+f397fX18fn9/fn+AgYGDg4SCgYGBf4GEg4CHhIGAfn5/
-gYKBgX6Afn9/gn+Af36CfX1+eHp7eXl3dnl5eHd1cm9yb3V3dnZ0dXh3eXt7enh5
-e3p8fn18eXx9fHt5eXh4eHl7fX59fXt8e318fHp7ent4eXd2dHV2d3t6eHp6eHl8
-gH16enp+fnp4eXp7eHZ1eHl3eHh3eXp6fH19en55fHl2d3Z4eHt7fHx7enp8end5
-eHd2eHZ3c3RzdXV2eHRycnJycnN1c3Fwbm5wcXFydHFycHBzcW9vcHFxb3F0cXJu
-bm5vbW5wcnBvcHN0dXBubW1uaGhlZmZqbG9ydHV0d3l8f3x1dnVzb2tqaWlpbHFy
-dn+HhoWJio2WlJGUlZOUjY+Pj5CUko6LhH55dnZ4dHN0bmtqZWJjXFxcWl5iZmhm
-amxrbG9ubm5wc3h8e3uBhoqKiYuIhIGEhIWKh4aBgoSDh4F/foKAgH+Ben17en1/
-gYGFhH95dXZycG5ucnNxcW5wdHd4ent9fHx4dnh8eXl3dnJvbGlpZ2ZmXVdSVVxi
-aWtra2lnamhmaGpqbXJ1dXd4eXl/goWGiImMiIF/eXZvaGtqZWZlYmBiZWVmZmZk
-YV9iY2BgY2JhYmRjY2VlZWRjZmVkZmdlZmRkY2dmZWZpamRkZWZkYmVjZGJhZGVl
-ZGZkZWZmZ2hna2ZkY2ZlYmJjZWJhY2djY2RjZGViZGRjZWRiYGJjZ2VmZWRhYGZm
-ZWZlZmZpamxnaGdqZWRkZ2dkYWRiY2hlaWdjZ2VjaGZjYmFdYWNjY2JhZWJgYmFf
-YWBjY2VkZGFhYmNiXmBiXl9cX2VjY2JjZl5dYGFiZGJhXmFjYV9eX15fYF9hX2Bg
-YGBhYmJgYmBdXV9eXVxeXVxfXltbWVlcVVhZW1xdXlpaXVxbWFhbWFVYWFpaW1td
-XF5bXlxbWldWXFxcWltcW11eYF1eX11dYF5bWF1cXVtaW1paXFtbWVhZWlpZXV5d
-XFtcXF5jXVxeXl1bWlpcWVlaXl5dXl5dXl5eXFpdX15fXltgX15cW1tbX2BeYF9d
-WlxcX2BfXV5eX15eYF9cXV5dWlxcXF5cXl9fXF1cXFteX1xbWVlbXlpZWltbX19g
-W1xeXF9eXF5dX2BhX15dYF5eXV9eXmBjXV5eXl1eX11eXl5eXV9gYF5eYV5eWlZc
-Xl9eXV1dXl5fX11fYFxfX1xdYGBhYGFkZGRjYmJgYF5iXlxfXVlXXF5fYV1fX19f
-X2BfYmNjYmJhYWJjYmNiX2BgYGNeX19fXl9fXVxcXV5iZVxcYWJhYmNiX19lZGRl
-YmNkYmRlZWVkZWRiYmRjYWJiY2FhYWFjYmBfXl5jYWJiYl9eYWNjaGhlZ2NkZWVl
-ZGFiZWZnamttbGtoZmlsaWhoaWhpaWhra2lqamRiZGhnZGRnZ2ttb3WmyNXd4eXo
-6Onp6umCf4ODgoCBgn1/fXl3eHx6eXp6ent9fHx6fX59fXt7fn9+fH6Afnx7eXl6
-fHt8fXx8enp6e3t6fH19fn9+gH9/foB8fICBhH5/gISBgYCAf36AfoKBgn9/f31/
-fYB/g4F9f4CCgYB9gH9/goSDhIKDfn+Cg4GBfn1+gYJ+gYGBfn9/enZ4dXV0dHh5
-e3p3dnR0dHBxc3p3eHd2dnp5eXh6en5/fnx9fn56e3x9e3x5e3t+fHt7fn97d3h4
-dnh6e3d6eHl4d3l5eHl4d3Z4eHR2d3t8f4B8e397enp6ent6dnV2d3R5fHh2d3d3
-eXl6eHx7d3d2c3N1eHt8fX58d3Z1c3R4eHp5eXl8dXN1dXZ3dnNwb25xc3FxcXBx
-cHRycXNxc3Rzc3Jyc29ucHFyb29wbWxtb3BrbmtvcnV0dXJvbnBta2lnYGJfZmtw
-cW9xcG90fHx5cnRycWxoZmRiY2htdXuAgoKBgISJkJaXlJeXlZKPkJSTkZKNiYJ+
-eXh4eXd2cm5tcG9nZV9bWFleX15fYGJmZ2xva21vcHF2dnZ3d3l7gIKBgH57ent+
-foCAf4CDgoaIhYSEg3+CgYB9fn57gIiMiouKh4ODgXpzb250dHRzdXN2d3h4enx9
-fHx9enVzdXVxbW1ycm9uaGNcWFZYWmBlamtuamlnZmVlY2Noa2dobG90eXl7fn6I
-iomMhn98dm9sa2lqZmRkYmJmZmZnZmdmY2FgYGBiY2FiYWJlZWJhZGZmZWdoZ2Rk
-Y2JkZGZoampqaGZlYmZnZ2hlZmRkZ2dnZ2JlZGZnaGhmZGZlZGhnZmRlYmFjZGRh
-X2RkYmJiYWZkYmFkZmRkZWVlZmNkZWZmZmhnZGRkZ2tpaWlpY2VlZWNmZWNhZGJh
-YmRjZGVmZGRgXF5eY2JfYWJhZGViYmJfYWNgYF9fX2BfX2FiXmJiYWhhX2JjZGJf
-YF9hYl9fYF5fYl9fXl1fXmFiYWJfYWJfX19hX2JhYmBfXF1dYVxcX11dX2JeXFtZ
-WVxnXVtcW1dZW11bXVdaWFVYWVlXWFxcW1pZX15cXV5dXV1cW1tdXl5cXFxdXFxd
-XV1gX1xcWVlbXF9dXl1fYV9aXFlcXFtZWVxaXF1bXFhZXltcW11dW1tcW1pbXV5e
-Xl1dXl1eYl1dXVpdW1xcXF5gY19dXF1fX1pbXl5dXV1bXl9eW1pbW11eW1xcWVha
-W1xbW1teXmJdWl1bXFlcWFldXlxdXV1dXl9hXV5gX19dX19eXV5dX11dX2BhXl5c
-WlxbXVxbXWBjYWFiYF1eX2BgXl1bWlxgYGBiXl5fXF5iYmFeXF9gXlxcXl9hYWRj
-YmNgYmJiZmFdYGlgYF5ZXWNiYF9fXl9iZGRkYGRhYGJjYWJhYWFkYGFeX2BgYWBi
-YF5gXl1fYWJiaWpiXmFiX2RkYWBhYmZpY2RlY2NiY2JiY2NjYGBhYWJhYV5fYGFl
-ZWRnYmFkZWJgYWBhY2VmamhlZWRkZWZqa2hnZmZmZ2lnaWpuampmaGdmZGVmZGRp
-amxramVmZmdpa2lpa2xueKbH1Nzh5efo6erq6n6AfX1+foF+fH19end2dXV1end2
-e3p6eHl4enh7fX99e3t7ent9fX58e3t7fX18ent5eXp7fXx9fn57e3yBf39+fX6B
-goF/fHx7gIGAgX9/gIGAgIGCgYGAgH1+g4R+foGAgIKBgH+BgYGAgn9+fn5+gYKB
-gH5+fX5/f4OEgH98e3x6e3p4dXZ4eHZ5dnR1dnNwcm1vc3V3eXd1dnV3eHh6eHt6
-fX18f3l4e3l8e3p7eXd5enp5eXp3enx4dHl6eXh3eXp2eHZ4e3l5e3p5dnt2dnd7
-fHp8e3x4d3d1dnZ0dXV3dnl6end2dXh2e3p6eXp3d3Z3dXd5enp4eXp4dnZ5enh3
-dnl6enl2eHJ1c3d4dHRwcnNycHJyc3Jvb3JxcXNwcnNxcnRybm9xcnJycW5vbm9u
-bW1ucG9ydXV0cnBuamhkX11gX2Nna2psbXBzdHZ2cm5rbG5taGJiYGFja3F4gIGB
-foGIiJCTlZOSk5CSlJWSj5GOi4WBgXt5eHZ2d3Z1dHV0cGhjXmBdX2NjZmVnbXBx
-cXBydHZ2eHZ3dXh2dXV0dHRycW9ycnV4en19fn+ChIOCgHx8fHt7ent4foOGg4eI
-iYeFhImIhYaAfX59fnt5dHJxc3R1dnp8f355eXx4dXB0dHR0c3JuaWVfYmRlZGRt
-dHJwcW5ramZlYmRjY19faHFzenx8fYKGiYmGgoJ9d3BtamdmaGlpY2VlY2NiYWJj
-Xl5iZ2ZmZ2JiZWJhYGNjZGZmZGZlZWRkZWJkZmZmaWhqZmVnZ2dmamVkZWRkZWZk
-ZGRna2hqaWZjYmVjZGRnZGJgYGNkYmVjYmFkYmVjY2JjY2VnZGRlY2VlYmRlY2Rk
-Y2RmaGZnaGlqZ2xsZmRiZGZkY2RmZmNkYmRkZmJmZGRhYWJfYmdkZGNgXmFmZWZf
-XWBgXVxfYWBdYGNiZmRgYmFfX2FiZmRiY2JfYV9eXVxhYFxdXV9hY19eX2BfYF9g
-X11cXWBgZGFfXV9fXl9gYF5cXV1cXWBdW1xcW1haWVRYW1pZV1VZWllaVldaWFtd
-XVlbW1hbXVxdXl5fXFlaXFpcXV1eWl1gYmFhYl9dYV9eXFxdXV9hX11cWllaXFxd
-XV5cW1xbYV1bXFxbW15dW1xcWVtZW15cXF1eXl5dX15eXVtbWmBhYWFgXV5fX19d
-XFxeXmBcXV5fXVxdY2FeYWNiXVtaXV5aWFtdX11fXVxeXl9dWVpdWl1fX11dXV1d
-Xl5bWlteX15eXV5fXVxdXFpcXF1ZXFpbWmBgYV9cXF5fYmBfYWJeXF9hX15eYGRj
-Y2FhYGBfX2FgXV5eYWFhYF9fXV1cYGFiZWhiYmJhXmBeYl5bW1pdYGNkYl9eYGRj
-YGNiY2JiY2JhYGFhX11hYWFhY2RhYWBkY2FgYGFeYGNlZGFeXmBgXmBgX2FgYWFh
-YmJiYGNkY2VjYmJlZGZqZ2RfYF5eYGFkZ2NhY2NjYmJfXGBkZWRjX2FkY2NjYWVp
-Z2RkZ2ZnZ2doaWhoaWZnaGJeXlxdW1xiamtqaGtsamhjYGNmaGh2psfU3eHk5+jq
-6erofHx8fXx7fn98fIB+gIB8fX12eHh4eXd5enp9e3h7eXh5e317eHl6fXt8fn58
-eXd6e3l5dnh6fX6AfH1+gH5/f3+BgICBf36Af3t+foCBfX59fH98gYJ+gIKCgIOD
-gn99fYOCgoKCgX5/f3x8fYCAf3x+fX5+gYOCf3+AgX+BfX5/fX99f318enl5dnVx
-cHV0dHNydHFxdnh3c3Nzc3V2eXl5fHp3eXt7fnx5eXd4eHl8e3p6eXZ3eHh5e4B6
-eXt8eXp6enl4eXd6fn18fHp4eXt6d3h3eHx4eXd4eHZ3eHh5fHx4en1/fHl1dXN4
-e3p5enp5dHd6eXl5dnZ5eXZ5eXZ2d3d6fHx5enh0dnZ1dXd2d3V4c3R0dXZ4dHBv
-cHRyc3Rwb29vcnFwcHJxcXNycG9vbnBubWxsbXBycXFzcW9saGNZWVpcYWRlaW9u
-cnV1dGxsbW9oZWRiXVpdZWhtdHh5eX2DiY+SkI6Pj46MkJGQkIuGhYaFhoN+enp6
-dnl4d3V3dXNuZmRjZWhrbW1ucHJ2c3Jzdnx9fHh3dnVxcG9taWltbW1rbG5xdXV6
-eXuAgH58eXt8eHV2d3l6eXuDh4WAgn9/gH+CgoaJi4uMjo2Jhn+Af3h6eXh2dnh1
-dnp6fnt8fn58eHl6eHhxcnFta2twdHV3eHd3d3RwamtoZmdlZ2lwc3t8fH9/e3+E
-hYaChYN+enVzcG9qamdpZ2dkZGNiYV5iYmJkZmZkY2RmZmRjY2NiZGRkY2NkY2Rl
-ZGRkY2FhY2RoaGdhZGNkY2NkZ2ZmZGNiZGlpZ2lnZGVlZWRjZmdmZGZmZGRjZGho
-Y2ZlaGJkYWRkZGJmY2RlZ2hlYmRkZWdnZWZqZmZlZGZnZ2lkZWZlZGJiZGNhZWZm
-ZGZla2RjZGRhYWFkYl5fYV5cWV9mY2FjY2RgXlxeYV9eYGFhYmNfX2BgYWBfYF9i
-XmBgYGBkYVpYW1xdYGJhX2FgXl5eXl9eXFtdYWJhX19hXlxcXWBgX19bXF5cYGBe
-XllUWVpYVVVZXFlaVllaW1pbWldVW15dXVpbW1laW1tcXV1aW1paW1xcW15eXFxc
-YGBfXl9fYF5eX11eYF9fX19dXl1cW15cXF5gX2BhYV1fXVpaXV5cWVlZWFhbW11d
-XV5fX2JgXV1gXl9eYWBfXFxdX2BhXFxgXV5dXV1dXFxfYWJeXF5gYF9gX2BiXVtc
-XWFiXmBdXV5eXlxcXVtdWl5dXV5dW1tdXVteYWFgYGFeXVxdXVpcXlxcXFxaWlta
-XV9fXV9eXV1fYGJgYGFgXV9gYmJhX2BhYGBgXVxeX15cXF9gY2NiYV5dW1tdYmFi
-YmFgYGBaW1lhXltYWFxfYWJgXlxcXmFhX19fXmBhYmNgYF9fYGBhZmNhYWJjY2Vl
-YmBfXWFhYWFfYVxhYWFhY2JiYF9eYV5eX2BhYWFhYWNhY2NlZGhoZGVjYV5hYmNm
-YmBhYmJiYWFfX2BhYmVqZmFiaGhmZWZiY2NkY2JiZ2doZ2doaGRiXFtcWlFZXWJm
-am1saWxpaWllYmlpaXSqx9Tc4+bm6enp6ud/e3l6e359fX9+gIKAgYCBfX57enp4
-d3l4eXt6eHZ4eXp4eHt6fXt6fn16e3l3d3R3ent6d3t7eXp8en5+e3p8fn+AgHt9
-fH5/gH5+foB+fH2FgoF9gISBh4ODg4GBgH9+gYSFgYB/fn17fX6Afn5+foGBg4KA
-hIKCf3t9fH19f3x+fn16eHl5ent2cXN0dXV0dnd0dHJydHJ0dnR1eXh2eHp7e3t6
-fH19e3l5eHd5enh5enx6enx7fXh2eXx6en99fHl4enl7eHh6ent7fH2Be3x7eXl7
-eXl3dnd2d3h4eXh7fH+Bfnt4fIF3eHl6d3Z5eXt6enl4eHl5d3h8fHl3eXt5d3p3
-d3d4dnV4dXZ3dHVydHRycnJ0dXV2dHNyc3NxcG5tb3Bvb3JycG9vcHFxcHFxcWxq
-aWhrb3Fwc3RzbmhkYGBaW15gYmdrcXJ0dHJubGxqZGFdXl5eYWFjaG9tcHR4gIWG
-i46Qh4aFh4uNiomIiYiHg4SAe3t6fHd3c3JycHNua2doaW1pamtvdXdycW5ra3J4
-fH16eHRzcGpnZ2VlZWdnaGVoZ2xvc3d4eHZ5eHZ3d3d5c3JydHl6fn6CgH59e3t/
-f4OIioqLjZCSj42Ni4WAfn5/fXx/f3x2c3Nxc3d4enhycXR1d3l3enh1cW9wcHN3
-d3h5eHl3dHJ0b25ydHR6eXx5d3V2e39+hIeHh4WDf3t3cm5pZmllY2JjYmJnaWNi
-Y2NlZWZrZmVmZmZlYmRkZGZkZGNkZWZjZGRjYmRhZGNiY2VlZWdkaGVmZmVnaGlm
-ZmRjYmVnaGhnZmZkYmdnZGRlZWJkaGZoZWRkZmVlZWVmZWVkY2NjYmViY2ZmaWhn
-amhnaWZlZWdnZmdlZGZlZWRkY2RkZmZnZmNhX2NeYmFgYWFgYWFfYGBkYF9gYGJi
-ZGRgY15hY2FhX2JjZmZjYF9dXmBgYWJhYmBgYWFiYFlcXF5dYF1dXWBfXWFhXmFe
-Xl5hXl1fX2BeYF1dW15eW1laXV1bWVhZWFdWVlhaW1laWFhZWFhZXl1WVlhaXmBi
-XFtdXVxaWlpZWV1cXlxdXVxbYV1bWlxaXWBfXV9dXl1bXmJhXlxeYF9eXlpeXF5e
-X19hX11dXl1cWFldXl9cW1pbXFtaW1xdXV5cXV1gYGBgYGJdW1pcXV5eXV5bYF9d
-W15fXV9gX19gXl5fXl1cXl1gYV1dX2BiYmJhXl1fXl9eXF1eYGBiXl5cW11bWFlc
-X15eXl9gX2JdYF9bXV5dXFpeXl5fXltbW1xcX2BfXlxbWlpdXlxdXlxfYV9gYmFe
-Xl9eXl5bX19eYGFgX2JgYV5hZGFeYGBiXl9hXldWW1teW1lYXWFhYGBgXl5dYGJh
-YGBeYGFiZGRhX11fYGFgYWFjY2NkYmJfYmBlYWFiYWJhX15cX2JiYWFeXmJiX19f
-YGJiYmRjZGJgY2NhYWRkZWVgX15iX1xdX15gX15hXmJhYV9iZWVjZWJgZGZjZGdk
-ZGVlZGRpZ2dqaGdlYVxdXF5UNj9bX2VoamxpZ2lqbG9qbW5wea3H1Nzi5efm6erq
-6n19e3l7fHx7e3x9f4B/fn+AgHx6enl9enl3eHt7dXp5eXp7fHt7e3x7e3l6e3x7
-eXR3enx6enl5eXt9en58fXx9fnp7fX59fYCDg4GAfnyAhH98f4GAgoOBf4B9f3+B
-gIGAgoaGgn59e319foKAfoCAgH5/gICEhIB+enx6fXx8fX57fXt8e3t5dnV0dXR1
-dXZ3dnNzc3N0d3l4eXZ6eHh6enp8fXx7e39/gnx8fHx6eXh6eXp8ent+fX95d3d2
-ent3fHp5eXp9eXp3d3yCprSLf3t5eHp5dnZ3d3Z4enh4fHp8e3l3eHl3eXp5eXp5
-enp7fH18enp5eHh3e3l4d3d1eXt6dXd9eHV1eXZ2dnR0dXJ0dHFtcnJzcXd4dnZ0
-cnV0b3BycHBucG9wbW1vcXFycXFyc29qbG5ycHFwcHJsZV5bWFZYWl5la3F1dHJu
-bWxqamVkXl1dXmRjZmZocHRzdnx9goOCf4GAgoSGiYSChImKiIWCgH19e3d0bmxs
-ZmVoamtmZGhmZWRkaG1wb21sZ2ptcHF3e358enNsZmFgYGFgYWRjY2JjY2xzdXZ4
-dW5vbnFyc3B2d3l5en1+gYB9e3p4eXp+gYKDiYiJi4yLiIaDhISGhYSDhYWBem9p
-bGlmaGhub29ub3BvcXV4eXp3cm5ubGxvb3R3eX6Af315dXp2dHJubWxsa3B3ent9
-f4WHhoeEgHp0cG1qaWhoZ2JiZWRiZGNdXmFkY2ZmY2RmZmJhZGRjZmdnZmdkamZh
-ZWRlZGNlZWdiZWdiZGdnZ2loaGZnZmJjZmdmZmdmamZoZ2htZWRmZ2VjaWVjZGVl
-aGZkZmNjYGJkZGFhYWJjZmZoaWhmZ2VkZWdmaGdpZ2dpZmRjY2ZpZmZhY19jYmNi
-ZGJgYWJhX2BcYGFhYmRfYWJiYmJlYGBhYmJiZGJoZl9fYGBjZWJgXWBgYWJjYmJh
-YWFjYmNjZGFgYGBaWlxcW15iYGBbXV5fX19dXl1fYF5eXFxbXV1dXmBhX1xbV1VX
-WlhVVlpbWFdcXVpZWlhZWVxcWVpZW11dYGFiW1lcWlxcX19gXltcXFxdW1peXlxf
-YFxeYGJfX1pcX2BgYV1cXl1cXVpbXV5cXV1bWV1bXFtbWldcX11cXF5fXF5dXFxa
-WlxeXVxdXl9eW1taXVtaW1taXFxdXF1ZXFxcXl1fXVxeYWBdXF1gYGBbW15eW1te
-XWBgX11gW1tcW2JgYV5cXllbW11cWlpdXl5eX15gXlxdX1xfX15eX1xfXl9fX15c
-XF9dXl5cXVpbX2BgX11dX15eYWFhX19fYGBeX2BbXV9hX15gXmJgYWNfYWFgX15f
-X2BiX1dZW1tcX11jYmNiYV9fYF9fX2FkYGFfXl9iYmNjYWFfX2JgYGBjYWFgX2Fi
-ZWFhXV9gYGBfX19fYWJjYmNiYmNhX2BfYGBgYWJhYWNkYmRmZmZnZGViYWFfYF5d
-Xl1gYGJlZWRiYWFiZmVjZGJjY2RlZ2ZnaGdoampraWhlZ2RfX15eXVlHO1ZbYGZn
-amlqamtscHFucHCDs8fU3OHl5+jo6OrqfHp6fH19fHt6fHt9fH9/gH18e3d4enh7
-enp7fHx+gHt6fHt6eXp6end3en19fXl9f3t7fX99fn19e3p8fX9/fnt+e3p8fX9+
-fX18e3l+f4ODgYKChIKCgoB8fX2Bf4B/fX2AgYSDhIOAfIB9fn5+gIF/fX5/f39+
-fYCCfoF9e3t9fnt8e3p6eHh5d3Z2dnZ1dHV1dnd3d3V0dXh4eXh7d3t7e3l6enp8
-fH6Af4CBe3p7enp5en18gX59fnx4fH99fXt5eH59eXyBfHx8fYCjs4x8e3l3d3Z2
-eHl3d3N0dHt7enx5dnZ1eHp4eHl+e3t7en16eHp6eHh4eXx5e316eXl6eHp3dXd4
-dnR1dXV0c3N0c3N0dXd1cW9wcXJucHFxcHRyc3Fwb25wbnBvbG1tbW9vb3Bzc3Nt
-cm5vcnFvbGlpYlpYVlhbYmpscHJvaWZnZWJiXl1eX19fYWJiZm5zd3l7fn5/fn18
-e3x8fYWFhYOEh4N/fnl2d3VzbWhlY2RmY2FjZWRlYGBeYWlvbmxsa2hscXNvcXh6
-eXdybWdkX2FhYV9hXlxZWlteYGZraGNgXWVucXZ2eHV5fH6Cg4SHhISCf4B9gH97
-fH9/g4qLi4qIhoaJio2JhoF+eHdxamVvZWRiYGBjamxtcnJtbm5wdHV2dHN0dnp6
-fHx7d3h4dHV2dXJta2xoYmRobHFzdXp9gYaIiYiHg3x2cnNvbWhpZWJjY2dnY2Fi
-YGFiY2ViZGJkY2BiY2VkZGRkZWZlZmRkZmhkZ2VjYWJkZmNjZGhmaWhnZ2djZmZq
-aGZgZWRjaGZkaGVmY2FfZWJjZGhmZmVnZmVmZ2VmYmNkY2NiYmNmaGdlZmtoZmJm
-ZWNobGloZmdkY2NkZmdoZ2hlZGRmYmBhZGJiZWJlX2BgZWRiYV9hYGJhYV9lZ2Nh
-Y2RlYmNlZ2RgYWNgYWJhYmZjYGRiYmJiXl9iY2RnYV9fX2BbXF9fXl1dX15dWV1e
-XV5cYF5fYV9cWl1dXV1cX15dXFlbWVlaXlxbWVteXltbVVZYWltbXFpXV1ldXFxe
-X15dXl1aXF9iXl9fXVhbX1xdX11cXV1dXlpaXFxbX1tcXV5dXF1fX1taXFtbXl9g
-YF5hXFlaX11aXVxfX19fYWFfW1taW1xaXF1cXF1fXV5cW15fYF5bWlxdYFtcXFpc
-X2RhY2BdXl5gX19eYWBeYF1dXF1fXltdXFtbXFpbXVxdXl9fXltbWFxdXVxbW1pc
-XlxbXFpbXF1dXl1aXGBhYF5cXV5hX19hYF5fYF9dXF5hYF5fYF9cXV5eYmNjZGBf
-X2BgXV9gYGFhX19gYmFdXV9gYWFfX2FhYGFjXlhVWVleYGFhYGBhYV9gX2BgYV9h
-YmBdX11eYWBgYGFhYGBkYl9eXWBiYmFkY2FhYV9gYmBhY2RhYF5gZWRiYV9fYGFf
-Y2JhYWJjY2RoaGdkZWNhYmFhYGJkZGBhYGJiYmRhYF9jYGFiY2NkZGNjZ2RkZWVm
-aWdsampqa2lnaGBeXFxfYFlTW1xiamlkaGhpbGdlaWtub4a0x9Tc4eTl5+fo6el8
-fH1+fH99fnp4fHl5fH+Af3p4ent/f3+Aend7fHt7fn59e3t9e3t8enZ4en18fYB+
-fnt2d3p5e3l4eHl7fH9+f3t+fn19f315e3p6eX6BgHx+fYB/gYCCf4KBfoCBg4GB
-gX1/f3+Afn99gIGAfn1+gH59fn9+f4KAgH+BgX17fH59fHt6eXl5eXp3dXNzdnZz
-dHZ3dnZ2dnh2eXx6fn12d3x8d3t8ent8e3+Bf36AgH19fXp6en19eXh3eX18fn1+
-d3h6e3l3e3x7eHp9eXd6cXBzeHV1d3d0dnl4eXl4d3p6eXl5dXR3eHx9fn6AeHp9
-enl6fHt5eHh6e3p7eXp7e3h4d3d4eHZ1dHV2c3Rzc3Z1dnV0c3BxcHBwc3Ryc3Fw
-cXBwcHFwcXJvbXBxb3Fxc3Fwb25ucnBtbnJxb29rZ2ReWVZTUlZia3BsZ2NgYV9b
-WldXWFpeYWJgX2lucHR2en6BfXx8en54eX6AgYSFg359fXp2c21ubWpmaGVnZWJn
-ZmRnZmRfYWNpbmtpZ2lrbXJzdXV0c3FuamdlX2BhXlxaXVtYWVlXWFxeYFxWVlle
-Y2hvc3h5d3V5e3t7fH1+f4CDhYWCgoWDgYKGi42QkpOTjpKMhoOBfn12dnNsaWVj
-Y2FfYGBgamxqa2loam5wcHR1d3h3enp7dHRycHFycG1qaWhpZ2ZhX2htcHF3enp9
-g4eKhoSCgXp1cWptbGpqaGRfYGJlY2FfX2FhYWNiYGBhYF9gYmRjY2NiY2FiZmdm
-ZGZlZGJiYGVmZ2ZiZmZmZmdpZWJlZWRkZGdkYWJmaG1mZGRkY2NjY2JhY2dkZmdn
-YmBjY2JkYWNlYmNnaGdnZ2VjaGdmY2RlZmdnaGdlZWdnaWZmZ2VoZWdlYWFlZGFg
-Y2NiYmFgYGJiX11eYWNlZ2RfYWFjY2NhY2ZiYWRoZmJgX2JhYGBfYGBeYGFiXV5d
-XmBfX2FgYFxZXF5bXVxaW2BhYmBgXV1eXVxbXV5eXlpaXl5gX1xcXFtXV1peW1xa
-YWFbWVtZWlteXFlaWFpZXF1YWF1cXl5aWl1eXl9dW1xeXV5eXFlaXV1fXVxcWlpb
-XFtbW1xdXFxdXl1bWFlYWlpdXV1eX2FfXl1dXl9eX11eXVtcYF1cXl5fWl1cXFxa
-XV9dXVxdXVxdYGBdYV5fXlxdXVpWWFtgX15dW11fYGBfXFpdYV9eX15dXFldXFxc
-XFtcX1tbXlxaW1tcWVdaXFxcXFxcXltcXVlaWlthX19dXFxcXV9cXV5cXl9eXl5i
-Y15eYF9eX19fYGFjYFxgYGNiYmBiYF9eX15eX2BhY2FgXmJgYWVnYmFgX2JiY2Bh
-YV5gYVxcXmBgYWJjY2FjYmFjYmJgXWBhX11dX2FiYWJiX2FlZWJlY2FfYV9gYGBi
-ZWNkY2FkZWNhX2FgYGBgXl5hZGFiZGRiYmJjZGVkYWNkZGRkZGRmZWJjY2RlY2Ji
-YmVjY2RiYWNiZGRkY2NlaGVjZ2ZjY2JhZmZpaWloaWprZl5YWFtfXVxeX2ZoamZo
-aGlpbWlpaGxwhK/J1Nvg5Obn6Ojp6X6AfX97fHx9e397eXl8fH59f3x9fH59fHp6
-fYKAfX2AgXt6ent8ent4d3h7e359fXx7e3x8fX19fXp4fH17fH19f3x9foCAfXl5
-en1/eHx+gX2Afnx6fXt/gIeCgYGCgoOCg4F+f3+ChoKBg4R/f399f318fX5+f4KB
-gYJ/f36Bfnx5d3l2dnl7enl2dXl2d3l3eHd2dXBvdXt5eHt7fn16fHt6fHt9fX6A
-fX9/fnt+fXt+fnl4eXx8fYB/fHt8enqBfHp5eXp5eXl6dnVzcW5ubnZ1cnBydHV5
-end2d3h6e3p9e3l7fHl6fYB8fHp4eXh3en59fXx6eXt+fX98gHx6eXt3eXRydXR1
-dnl6c3F0dnd2c3NxdXRzc3N1d3Rtbm9ucnBvcXBwb3Jzc3BwbW5wdHFycXJzcG9w
-cW1ubGlkX1pWUlpbY2drbGpjXVlbWVlZWVhWWF1gXV5jbG9zcHN3eXd0c3Bydnt9
-gYKBgYB8enh3dG5vbWlpZGNgYWJjZGVmY19hYV9gY2FiZmVlaW1vcnNzc3Z1bmVj
-Y2NgX11ZVFJUU1BUU1NUV1paWFldXGFpbG1ub2poamptcnRydXt8foOCgoGIioeH
-i42Oj46NjYyGhIKAfXt4cnV2eHlybm9ubGVhYmVoZ2BdW11hanJ2eHd6eHl8f3x4
-enV1enZxbWhjYWZraWhobG5wb3NzeX+DhYaFhYeGf3t1bmhoZ2hlZGBgYGNhYWBh
-Y2BhYF5jYWJjYGBjZGVmY2NiYmRmZWViYmRlY2ZjZGFiaGppZWZlZWVkZ2hoZWRj
-ZmdlYmNmaWVlY2FjYmNjZGZlZWViY2JhYWZlZGRjZGVkZWNkY2FjZWViZmlpaGdm
-Y2FiY2VlZmdraGJnZ2dnZF9fZGRgX2JjZGZlZWNhYF5cXmFiZGRiYmNjZGFhY2Fi
-YmBiZGVkZGFhX15dX11dXmBfX15gYWBdXGFiYGBeXFpeYGNhX1xcXl9dXF9fX15d
-WltcXV1dXl9ZWVxdX1xdW1tbXF5aW1tdXVxYW15aW1xbV1dYV1lbXF5fXFxcWlpY
-WlxcXltbX11cXFlZWV9gX2BdW15bW1tbWlpcXl1cWllaWVxcWVteX15fXVxcX19b
-XFxdXl5cYV5bW1xdYGFdXF1bXFtdWl9gXlxdW1tbXlxcXF9fYWBdXVxdXltaW1xd
-X15cWltdXl1cYWBfXl5bW1tYXVtcWlpZW1xcWlpZW1lbXV1dWVVYXFtaW11eXV5f
-X11dXV5fXV5cW15aXFxcXlxeXWFfXV1dXl9fXl1dXVxdYV5eX2JgX15fYmBgYF1f
-Xl9gYWNiYl9fXF1eYGNjY2RhYGRhYmBfX19dYGJfXmBhYmJiYWFiZl9fYGFgYWJi
-YF1fYGFhYWBgYWRjYWJgXl5hY2FgYmJgX19hYmJhYmBfYWJkYV9fXWBjYmRiYF9g
-YF9gYmJfYF9hYWFiYmBjYmVlYmJhY2RgYWFjZWFhZGFfZWRjY2RiYmJiZWRmZmho
-Z2hpamhqbGtmYl5bWWFdXl5hZWdmaGhqaWdlZWRpZ3B+ssjU2+Dj5efn6OnqgX59
-e3Z6fIF/ent7enh8fHt+g4F7enp5ent+foGAfXt7fXp8eHh2eHp5eHh9fHx+fHx8
-fXx7eHp7end4fHx9fX19fn18e3p8fHd7e3t7enx+fXx/fXx9e31/fn+CgX+AgYKD
-hIuCg4SDhYOBgn9/gX9/f4F+f39/f4N9fn59f359fHp4dnV3eXt6e3t3dnV0eXp3
-d3l2dXV3dHd8fXp6fXt+fH1/gH9/gn18gYB7e3x9fn18fH18fH2Bf358e3p7dn5+
-enl6e3d1fHh4d3R0dXZ3dXV0dXZ4eXl7eXp8fHx8en16enp7ent7fHt7e3p5dXd8
-fXt8f31+fHt7eXl7e3t5eHd4d3Z1cXVzdXV2dnR3fXVycHNycW9ydnV4cXBvbW5v
-cG9vb3FvbXFxdXFzb29wcXBybm5ucXFwc3JraF9YU1RUWF5maGloZF1bXV1eW1ZT
-VFVZXWFiY2htbmpsbm9ua2dnbXJ6g4F/g357dnJycnVyc3Fzb21qY2BeXWJkYV9e
-Xl1gYVxZWFlYXmNnbG9xdnh1c21raWdiX2BfWVhTUU1RTE1MTU9OUlJXWl5iYWBk
-ZmRiYmNjaGZsb29ydnp6fn18fH+BhYuOk46HhYaFgn9/fXx4bWZnanN1d3BwcnFt
-aWNjZmRfW1dXXGFqb25xbnRydnRze4WDgXt7cm9rZ2RjZ2lnZmdrbWxrbXF4fYCA
-f4SHhoaEf3dwbG1raGlmZWFgZWNmZ2VmX2BhYWBgY2VjYmNlY2RiYGJjYWNkYmNi
-Y2RjZGZnZmNmZ2djZGVlZmVmYmBkaGlnaGZlZWJjZWZkZWNmY2JiZWdiY2dkY15g
-ZWVlY15fYGNiYWJhY2ViZ2dlY2ZkZ2dlY2NjZWZnaWptbWdqZWVkZWpiX19jYmNl
-ZGZiY15cXWFfYmJjY2RmZmNiX11iY2JiYmBgX2FgYGNkY2FfYl5hYF9eX2BgYWVg
-Xl5eYF5fZGNgX2BgX11cW1tcXllYWVteXVxeXVxeXFxdXV5aWFtbW1xbW1tZWlxb
-WlhXWFtbWFhYWlpXVldbW1dYW1xeXVxZW1pcXF1eYVxaWlxeXV1cXl9hXlxcXFlb
-XWBcWVpcWFhaWFteX15fYGBcW1tcXV1dXl1cXVxaW1pbW1xfY11eW2BeX19fX2Be
-W1xcXF1fYGFgYF1dW1xaW1pcXV5bXF5bXV5fX11aW1xcXV1eXWFdXV5dXVtaW11b
-YGFhX19dXVxaXFxdXltbWVtcXF1cYFxbWl5gXl5eXWFiXmBgYF9fXV1dXl5bXlxd
-Xl9fXVxcW11gX19hX2FdXl5gY2NiYVxdXF5dXl5dW1laX15iYWFhZmdjY2RjY2Bh
-X2FkZF9fYGNhYmFgYmFhYV9eYGFfYGNkZWNgXFxdXl9fXl1gYV9pYWFdYGJhY2Jh
-YmRnZWNgYGNjY2BgX19fXWFiYGBiY2FfXmBfYmFhYWBiYmNgX2BjYGBjZGJhX19f
-YWNjYGJgY2RiYmVoZGJhY2RlZWVlZGZnaGZnamdqaWlhXVxdX2JfYmZlaG1samdn
-bGdjaWpsbISyydPa4OPl5+np6up9fnp7e3h8f31+fnl4eXl9f3x/g36AfoB/gX6A
-gX18fHt6e3t6enh7e3d5e3t7fYJ/gYN9e3h8enl3enp6foKAfnx9fXt5fHx7fH19
-fn9+e32BgICCgH+Bf31/hIOBgIGEgYCBfoKAgoKEhoaCf4GCf4B/fn18fH18fHt5
-en18enh5ent4eHd3d3V1dXV0dXR1eHh3dnZ5eHZ2dnd5enx+fHx8fH9+e358e3l6
-eX1+eHp4e3t7eXt8f35/enh9fXt6e3x7d3h7fX96enx6d3V2dnl4d3p6eH57eHd8
-fHx7e31+enp5ent+fH6AfHx+fnp5eXp6fHp+e3x8fHh5dnl5enh4eXZ1dXl1dXh0
-dHl3eXZwbnB2c29wcnNwcXRzcG1ta2xvcXJycm9vbnFyc3JwcnFxcHFwa2twbG1t
-amZiW1ZWUVVaY2RlYl5aXVtYWlpWVVRVWV9dXmFmamhoaGxqaWNgZWpyd31/enl9
-eXdzcHF2dnJ0b2xuaGFjY2BhZWJfYGJhX1xZWVdVVlVXX2VtcXN0cGpkZWVjY2Be
-XFlaWV5WVFFPTUtLSU1PVVZXWVpZXF9bW1xdZWZoamhscnV1eHl6eXp9fX6ChIOF
-hYOCgYWGiIZ8dXNxb3BvcHFxeHl4d3Vua2dnZGRjYF1eYmdnZl9cXmNkaGhscnZ4
-dnRwbGhpaWZmY2VjY2RpbGxrcHZ8goJ/foWLj4uFfHVxb29taWdnZWRiZWRjYmNj
-X19gY2FiY2ZkZGJhY2JhZGZjYWNkYWBhZWNlZ2ZpZmRlZWRjZ2lpaGdlZGNkZWVk
-Y2dnZWZlZGVjYl9nYmVmYl9iZWRiYWRlZWNjYF1gYmZhYmNmYmBhYmNiY2NjZGNj
-YmFkZmZoZmlsZWRnZWdmZmVkZGBlYGNjZWJiZWNhYmBgYWVkZWRiXVxeW1leX19g
-Xl9hZmRjY2RjYmJjZF9gYGBiYF5fYGNeXl5cW15fX19gXl9fX15jYFxYWVxbYGBe
-W1pcX15cX1xcX1hZXF1bXlxcWlhYWlhYV1haWVdYX1tcWlxbWFVaWFlZXl1eYWJe
-W1pdXF5eXVxdXlxaW1pcXmFfYF5bWltcXVxaWFpZWVpeYWBdXV5fXWBcWFxeXVld
-XlxeX1xdXlpbW19eY15cW1xbXVxcXVxcW1tdXV5dXV5eXFxbWltbXV1eX11eW1tb
-WltdXl1dWVxcWVpbXV9fX1xaWltcYWFfXmNgXl1eXV1bXGBfXVtfXWBdW1xaW15d
-XV9dXV9eXl5iYF1dXV9iY2RiZWNdXl5eYWBeXV1eXl9fYV5fYF9bWlpaXmBgX1td
-XV5cXl1bW15gZGdlZWNiY2FgYmFkZGFiYWFiX15dX2BeX2NjYWBgYWFgX19hY2Jk
-Y2BgYGFhXl5hX11fY2FfYGNjZWNhZWJiZGVlZWRhYWBfYl5fXV9gYWJhYGFiYGFh
-YGJgYGBiYWNmaGViYGFiYmFlZGRfYmRiZGJiYmBfY2BiZ2ljY2JiYWRlZWZkZmdm
-Z2dkZmdmZ2RgYF9gYGRnZmRiaGVia2xramdqZ2xylrjJ0tre4uTm6Ofp6Xx9enh8
-fn5+goF/fnx6e4CCfnl+fn97fX19fn+AgX97fH1+e3l6eHl6eXd3eHp5en1+f357
-d3p7en18fXt9f398gX6Afn19fICCgYCBgICAf4CAf4F+gn+Cg4KBgoOAf4CBfn97
-f4CDgYCBgoCBgICCg4N+e3x9fX1/fnh4e3t6fHt9e3x6d3h4d3V0dHR1eHZ3d3d2
-eH94eXp7dXd5enl7fX17e3t+f318e32AfXx+fHt+f3p3e3x+e3p6eXh4eHl4eHt6
-eHp6fHt6e3V3d3N2d3l6eXt9e3t+e3t7enh3d3h4eX5+en18fX5+e3t7e3x/fHp4
-eHh9fHp3dXh4dnZ4enl7eXV2dnV1dXV0dHZyb3FzcG9zd3V0cnFycHBvbXFtbG9w
-cHBycHBwcHJycG1xcG5sbGxqaWltcG5qY11YVlRSVVpiYVtZVVVXWlhZVFZXV1pb
-XFtfZ2hpZ2doZWNeXWFnb3B3fnd8gHt2c29ucnVzbWtoaWhmamhlZ2diZGRlZGFe
-W1lZV1dUU1VZZmxuZ2RhYGJlY2BeXVlZW1tdXltWVlVRUlBPT09PUVZTVVlVV1da
-W1pXW11fXmNpbHBzdnVzdnl/fHp9f359f3+AgIKDhYN/foSDfXp1eoCBf3t3dG1s
-amludHVxa2hnYVxZVFRUUlVSVVlZXGJkZGhoaWdkY2JhY2JkaG5xb3B0dXd4enp9
-ho2QkI2GfndwbmpmZ2VjYWRlYGJfX2BfYGFlZmRkZGZkYmJiYmBjYWJjYmBkZGNj
-ZGRlZ2poZmNjYmNmZmhpamdnaWhjYWJiZGVlZWVmZ2hlaGZjZGZiYWVjYmNmZmFg
-YWBiZGRkaWhlZGRmZ2NjZGVjY2JhYmRoaGVlZGRmZmRmaGRlY2JiZ2dmZmRjZWVn
-aGZkYmJiYWFkY2RjY19gXl5eXl9hX19gYmVhZGFdXGBgX2NjYGBgYWNhYF9dXFtc
-XVtbXl9gW2BhX2BcX19eXVxfYWFeXVtbXV9gYV5cXV5dYFtbWlpaWFhYWlhaV1hV
-V1pbV1hhWlpaXVpXV1haXFtbXFpYWltdXl1bWl1fYWFeXVpYWVlaXV1bXFxbWVpc
-WVxdWldcXmFgX19fX19cXF1hXF1bXlleXF5hXF9dWlpdYV9cXF1ZWVhXV1lbXF1c
-XV1bXl5eXl1gXVtcW11gYF5fXVxcXFpbWVlcXl5dYFtZXF1gYl9eXVxgX15fX19f
-XV5fXl5gXl9aXV9cXl9dXl5dWlxeXl9fX1xYXV1fXmFfYFxcXGBiYGNiZGFbWlta
-XWBgXl9hXl9bXl5fXltbXF9eYGJgXl9eXV1bXmJlZWFfX2JlZGRlZmRkY2RoZmNh
-YWFiYGFfYF9gYGBfXF1fYmBfYWBgZ2ZlYWFjYmNgXl5hXl5iY2JhYV9hYF9eYGJl
-Y2BiYmFiXmBfYWBgYGJgYGNfYV9hYWBhYmBhYmBiYWNjZGFfYGVlY2RlZGhkZGBi
-YmJgYWRjYWNkZ2ZkZ2ZoZ2llY2doZWZnZmZpampnZWZlZWJiY2hma2lmaGpnaGxr
-bW1zanCgusnT29/i5Obp6OrqfX9/gYGAfXyEg394eX17fXt8fXx7e318fH5+fX16
-e3p8f319fHt5eHh6eHh2d3V2enp7eXx6enh6e3yAenp7fYJ7fn6BgH59fYGAfICA
-g4F/f4F9f4GDhIKAgICBgIGCgn59fX98gH9/foB/foGCgYGBf4F/fX9+gYCBenh7
-enl8fX58e315enh2dXNzc3V4eHZ1d3p6e3p8e3p+eHp3dnh8enx8fH57enp5fXp7
-fXx+gYF6enp8fHt6eHl7ent8end2eXd4eXx7e3p9f313eHh6d3d5e3p7ent+e3l3
-e3t4dnV2fHx5e358fH19e39+fXt+fH97eXl7eHV1eHd3eHp5eHl6eHp5dXV3dnd2
-dXR0c3JydHBzdXVxc3FwcHFwbW5ub21sbmxtbm1xcW5xcnJxb2poaWtrbnBrZWFg
-WVRUVVpcX19iWlJPVVhYV1RTVVVbXF9dZGZpZmhmY19eW1teZWxxdnNwdXx9dXBv
-bnV4cG1qZ2hqbGhkYWBkZmZoZmdpbWpnYV9dWFZWW2BkZmRkYWBdXl9gXVlXVlRX
-V1dYWlpXWVdSUlRWVlRVVFJQUlFQU1RZW2BaW1tWXF9hZGdkZWhuc3V1c3h2d3dz
-en59gIGDhIOIhIJ+d3V0c25qZ2VjX19hZW5wdG9oZWJiYGBhYVpZWFdVVVhYVlZc
-YGBfYmNjZ2lpZ2RrcHBzenl7eHZ3enx+hIySlJGNgnRubGloaWhnaGNnYF5fYWBf
-X15gZWdnZWRiYmRhY2JiYmFhZWRhZGRjYmFlZWZkYGJlZ2VlaGZmZmdmZmRgZGZm
-Z2ZiY2VlYmRiY2RhYGFmYmFlZWRjYmJjZGRoZmZmaWlqZWNkYmJhY2JjZWZnZGJm
-aGVlYGNiZGNjY2RlZGRiY2RkZWZlYWBgYF9hYWFiYV9fY2NkZGBeXl9fYGBfYWJj
-YmBgX1xdX2FiYmBgYGFfX2BhX2BhYmBcXFxfWVteXF5gYV1eW15dXV9dW15eXVxb
-XF1fX11aWltdXltbVldWWlpaWlpaV1VVWFdYV1laWFlbXVxYWFdbWFdZV1xaWFxa
-W1paW2BfYF9dXVtZXFtfXV9dXFtbWFddXVlYWltdXV1dXlteXVtcXVxbW11dXFxd
-XV1eXmBcW15dXFtaW1tYW1tdWlpcX11fXVtcYGFeXV5dXFxbXF1fX15fXF1cZF5d
-XF9fYmNgX11aXl1bXF5fYF1eXFxdYF1cXl9gXV9cW1taXFxeXl5eX11cW1paXFxd
-XFlaX19fX2BgYGJgXV9hYV9fX2BcXF5cXV9fXV9eW1xiYF5fYVtbXl5fXl9dXV1c
-XF1eXV5fXl5fXmBgYGJiZmViYWJiYWJiY2NjZWBeXmFfX19fX2FiYV1bX2BiYWFh
-YmNiY19fX19fXl1eYGFhYmJiYGBeX19gYmJiX19iYV9gYGNiXmFhX2JfX19eYGNj
-Y2VnY2BhYmFhYGJiX2FkYl5hZmRiYmBhYWJiYGJlZWVkZ2prbGhpZWZoZWVlZmhp
-am1sa2hpZWRhYmZoZmdmaWloZmZnZ2hpaWtsbIO2ydPb3uLk5ujo6el7fH+AgHx8
-eX2Ffn+KgoN8fHl7fnt7fXt6fIKEfHx5en19fX54eHZ5eXl6eXp6end1dnR4fH99
-fXp6fH17ent7fH5+gICBf359fn99gYJ9e3x8enZ3eX19f4KDgoF/fn6DgX98hIWA
-gH9+fYCBfn6AgYF/goKAfn1/gH99gH54d31/e3x6enh5dnVzdXV0cXBydXl0dHZ3
-dnd3dnd2eXl6d3p4enx7e3x7ent7e3p/fnx8e359fXp5enl6e3x6fHt3e3p3eHp6
-fX1+fX57enp8e3l8e314eHx9eXh3eXp6eHZ3dXh2eX17fHt4e3t6fH16eHZ4enl5
-eXx6eXl5e3p6eXl7end5dnd4c3N5d3Jyc3Z6eHhzd3J1dHRzcXFxcHBubmtsbHBw
-bW5vcG9tbXBweXBxa2hqbG91a2hnYFtXVVZZYl9dWlZTT1JUWFlYVVRXWVxeY2hp
-bGhkYV5aVlVZXGJqbWxsbXV6fnx3cnF0c3FsaGptbG1oZmtraWppaGhsbWttb21q
-ZGBcW11gY2RhYFxaWVZWVVVSUlJVVFVWWFpdX15cXmBcW1paXF5aV1NSUVJUVVZY
-XV5eX2BeXF1eY2JkZ2lqZ2tra2xqam5zeX1+foCFjYmJhYR/enRuaGpvcG1saGhs
-bXF0dHF0c3JxcnRxbGxsaGZoaGhjYF1eX2FhZGdqa21oamxvc3Z5enx6dHVzdnl+
-ho2Tl5KKgHVta2pqa2hkYWBhYGJiZmlhXmBgYWNkZWlkX19jZGJhY2NiY2RkZWVi
-ZGRjZmZkZGJkZmRkYmRnaWVoZWZkZmNlZWRkYmRjYmRfYGRhYVxgYWNkZGJkZWVl
-aWVlaGlnZmVnZWVkZGRmZWVnZmZjYmJkZWRjZWVmZmZnY2NiY2RgY2VmZmRhX19g
-X2BhYWJgY2FjYmNkYmFgYmFdYGFgYV5fYGNjZF9iX2FeXF1dX2BhYmFfY2ViYWFf
-XFtdXl5dXV5dX11cWl1eXFxeXlxdXltbW11eXFxaXl1bX11fXV5aW1hZWFtbWFNT
-VFVXWFdYWFhbWlpXWllbV1lZWVlcWlpcWVpfYV1iXmFjZF9hXFxdXVpaWllcXlxb
-WVpeXFlVV1lYWV1ZWlhcXVtaWlxcXFxbW19dXmBdXV1dXFpZX1tdXlxaWlxdYWBd
-XV5bWVxeXlxdW1teXFtcW1xaXF9eXFtcXFtbXVxcXV1eW11cXGNgXFxdWltcWlxZ
-Wl9gXF1bXF5dYFxcW1xfX1tdW1tcXF9jZF9eXmBcXV5fX1xbXl5hYWBfX15bXF1e
-YF5gXl9fXl9gYF9fXlxcXl1fX1xZWl1kZWJfX15gYGBeXl9fX2JhYmZjX2BeYGJh
-Y2JkZGBhYmJjYmJhXmBgYF1eYF1eX2FhXl9hX15gYF9fX2FgYWFiYF5cX15gYmBg
-YGBhYmFjYmBgYmNgXF5gYF5fYGJgYmNfXl9hYmFhXmBjX15hXmBfYWBgY2NhXWJh
-XV1iYmViYWJlZ2hlZGVmZWNjZGhmZmVnaWlpbG1sZ2ZlZ2dnZ2JmaGhlZ2tsamlm
-Y2dqkbjK09rf4uTm5+jp6Xh6eHZ1f399gn+Cg4N/enp6eXl7enp8fnx+foR+f3x7
-f3x7fX1+e319fXt9gX9+fnl4d3p4fHt+fn5+fHp8fX15e319f4GAfX6AgH5/fnx6
-eXx9fIGFgoKCgoKAgYCBfoJ9gIGEgoJ/fXx+f4GCgICAf4GHg359fX1/fX5+fHd6
-fHt5enp3cnN2d3R2dXRzcXR0cnZ4eHh4dXd1c3V4fHp2e3p4enh7enp4d3h7fHl7
-fHx6eXt9fYF9eXl3fHt6eXh4e3t8fHt6fn19fHt8e319fHt7eHiCgHt6eXl4eX18
-eXl3dXd7fHt7dHZ7eXp7eXx6fHx7eXd5eHl6fXl8ent8fHp2dnh4dXR0dXl5dnN1
-end0dnV0dXRydXNycW5tbGxtbW5tbnBxb29wcW5ubm9sbW1pZmdsbWtmZGBdWFVV
-VlhgZF1aWFVWV1VWV1dXV1hcYGhpamhnZ2JdWFZXWFlhaGppZWlvdnZzdHN0dHRv
-bnFycm5pamxrbG1samprbnJ2d3JxbmplY1pbWlpcX1pYWFlTUE9QT1BRUVVXWFlb
-WlpdXF1gYl9dXGBhYmJfXFxZWVhaWV1dX2RoaGViZGZjYWFkaWhjYWVnaWtmZW1z
-dnd4e3+Eh4mJjYZ+eXp6eHp/fHl3cHJ3ent5enx7fIKAhIJ/enh3eXV5dXBsaWhk
-ZGhpaWRkam9xb25rb3BvcXFub3N5eXh7hpCWk4+FfXRvamlnaGdjY2FcX2JgY2Rh
-YF9fYWVlZWVnZGZhY2FhYWBgYWFiZmRhYGNkZmZlZ2RlZGVkZWJiYmNmZmFhZWNh
-ZGRqYV9jYmRiX15hYGBiY2RkYWJkZmJlZGZlZWZmZGVnZmVoZWVlZ2RiYGNjY2Fj
-ZGVkZWRkY2RkZGJlZGRkYmNiY2FiYmFhYGNjYmJlYmBhX19gYGFgYGRhYGFiYmJh
-YGBhYWZiYl9fX2NiYF9eYGBhYGRjYVxcXFxhYl1gXl1gXlxbXV1aXV9fX15dXF1e
-XV1dW19fXVxdXl9dXl5cW1lYXFpZV1VXVVRXWVlaWltbWFlaW1peW1tZXV1cW1lb
-XFtdXVxeXWBhXlxdW1xcW1tbWl5dXF5cWVtcW1lZW1tbW1paWVpZWVtdYFxbW1xe
-X2JfYWBhYF5gYF1eW11dW1xaWl1eW1pbWllcW1pdXV9eX19cXVtbXF1bW19gYGFe
-WltdYGFeXFtcWlxdXl1aWlxhXFxcXV1bXF1aXV1cXV1dXVxcW1xeXV1fXVhaXF5f
-YGBfXl9cXF9fXVxaXF1cXl5dX1xcXl9fXl5fXl9fYGJiX2BeX19dXVxfXVxeX2Nf
-YGVlZWRiXl9gYGFhYWNjY19hXl5eXmFgYWNkZ2ZkY19gY2FgYGBfX19fXl5fYF9e
-XV5eX2FiYmFjYWFiY19eXWNhY2BfYWBfX2BiY2JjYF9eYWBdXl5dXmBgXl1gX2Fh
-XltfYV1hX2JfX2BdYV9dXmFhYV9fYmRlX2JnY2JiZGVjY2NgY2VmZmVlY2ZnZ2lo
-aWppZmZmZ2hnZ2hoZ2RiZ2dlZWRoZ2dnZ2+ju8jS2d7h5Obn6ejpeHd6fn15f3p5
-fYCGf3h+f317enx9fX2Af3x7eXp6e35+fHx7fIB8fH18fnx9e3x6eXl8eHh4eXt8
-gH9+eXx6fn2Aent7f36Bf3+Cg357fH6Af4N+gYJ+goGBgH5/gIF8e398fn58fnyA
-f4GCg3+AgH9/goGBgX+Afn9+eXp9fnt7e3x6eHt3dHV3e3l3eHx3dHBxdXh1dHR3
-dXR2dHh5d3t7fn55d3p4eHx7eXp5fHt9eXl5enl5fX99fX14eHx7d3V1en59fHp6
-enx8ent5eXh5d3t6e3x5dnh6e3p8enx8eHh6eHd3d3Z3eXt8enx5en19fnp5enh1
-dnd8ent4eXt5eXd3dnZ3dnl4d3t7e3l1d3d3dnR0cXd1dnV1cm5zdXJxbW1ub3Fw
-bnNucHJ0cWxramppaGtta2ZjYl1bWFVWWlxdWVhXV1pbXVlYV1xeZGZkYF9gYmNi
-XVZTVldcYmZkX2BlZ2xzcHBxc3Nyb3F1dHFrbm9wc25xbG5wdHh3enZzcG9rZGNh
-Y11fYF5bWlpWWVZRUVBSUlJRVllcW1dYXF9dXmJhZWJiY2JjZ2dmaGdnZmViYmFg
-ZGhmZGdpZGJcXV1aWlxcZGZnaGlucHRzcnF4foaKhomHhX+Cg4ODf3l4eX17f4J+
-fHl+gX16foWHhYiHiYuIhYeDfHh0c3FtcHJwcHRvb3BraGVlaGlsbGprcHN2dHd+
-ipKWk4aGfnRwaWVkZmNiYGZoY2RfY2RhXWBjZmRjXl1hYWBfXV5gYGBfXmBeYWBi
-YWRoaWVkZWJgZGRkY2JlZWRkZmNkZmRlZWVmY2JiYWBhYWNlZWRjY2NoY2JiZmVk
-ZmZkZGZmZGNmZ2VlZ2hmZWVjZWVkZmZiYWNnZGViY2VkX2FiX2RmZGFiYGRkYmJj
-Yl9iZWNjYmNkY2RjYWBjZGFhYGBfXmRoaGBgY2RkY19gYmFgYGBgYGBeXmBiY15e
-X11fXV1bXV5dXVxcXVlZV1lbW15eXlxbWltYWldbXl1fXVtZW1tZWltXWV1YWFda
-WFlaWVtaW11aXFhbXV9fX1pZV1pbXFZYW1tZXVpZWFpZXFxeXV9cW1pdXl1cXWBg
-XFhYWVdaXFpcW11dW1lZWFxbWlxcXV1gX2BhX2BgX1xcYF1eXV9cW1paWVhaXVxa
-XF9cXGBhYWBeW1tcXF5dXFxaXFxfX2BfXVxdX15ZW1xdYWJeXVtaWltaXV5bYV9b
-W1xcXF1eXVhYWVtcXFxcV1dbWltdXmBgW1pdX11fYF5fX15dXVpaXWBjYGBfXV1e
-XmFhYGNjYV9eYGNhYV9dX11gYGBhY2NgXWRiYGNjYV9hYWJjYmJjYGFhYWNhYmNj
-YWRiY2JfYGFjY2VjZGFfX11dXGFiYl9fX2JhY2FiY2BgYWJiYGBhXmFjYF9fYF9g
-XmFjYmJiYWJjZGViYWFhX2BeXVpgYGBgXV1fYGBgX2BfX19hXmBgX2FjYmJjYWVn
-ZWNjYWFiY2JiZ2hmZGhnZ2dnZ2lpZ2hmZmlmZWVjZWVmZmdoZ2VkZWdoZ2Nkamhs
-cqm7yNLa3+Ll5ebo6emCfXR4gX19gHx+fn58gHx8enx+fX5+f399eXh4eXl5enp+
-eHt8fnp4eHh8enp5enp3eXd2d3l3d3p4e3x9fXp7fnx8e358e359fH19eHh+gHt7
-f35/fn1+fH9/fX+Ag4B/fHx+fn5/fn+Af35/f39+foKAgYGCfH9/f32Afnt9fHt6
-eHp2dnt1dHp7dnh6e3t0dHV0dnl3dXV0dXl6eH1+fX18e3t6enx7enp7fHx6eXp1
-eXh3ent9gIB9eHt6eHV4end5eHh9d3V2enl6eXl5fH58dnV5d3Z2d3p4eXd3eXx7
-eXx5dnh3dnh3eHt9enh7en16fHp7ent8fnh1eXd4d3Z3enh4eHp7enh5dnd3d3Jy
-dXZ0c3JzdnZ0dXFxeHpyb3JxbnBucG9tcXJwc3JubGhmZ2lrbWttZ2JeW1hSVVdZ
-WFlZWFlYWFlaWVpeY2VmYl9ZXGBiZF1VVldYXWFhZGJfW2BqcG5vcnV2b25vcnFy
-cHJ0dHF1eHl3eX17fHp3c3Nxa2ZmaWlnZGJgYGBdV1dVWFpYVVRWV1hWVlleXlxg
-Y2Zpamttbm1tZ2JiYmZkYmNiYV5eX2RkYGFmZ2RfXFhZVlVTV19kYmNmaWlucnRz
-dnd7foOIiYmLj42Kgnx8fIOLiYB7d3Z9gYODg4F/hISAhIeNjIuKi4uLjIeAeHp7
-e3t5dHFzcWlnZ2lqbGdkZWVqbnBydXyCio2SkIyEfndva2llY2RnZWJhYWJiYGBj
-YF9lZWFgYGJiYF9dXl5gY2NqaGFiZWdmZmZlaGZlY2RhZGRmZmVkZGRpaGJoZ2hl
-ZmZkZGJkZ2BhZmVjY2JhYmJlYmNkZmVkZGJiZ2ZpaWZnZWVnZ2dmZmdpaGloZ2dm
-Z2ZnZWJiZWNgX2JlZGBgYmFjZGFjYmFjYl9gZGJfYWVmY2VkY2BfY2NiYGBkY19f
-YmJjYGFgX2BgXl1gYF9fYGBhX2BfX19cXF1fXVpdXFtbXF5bW1dXV1hcXmBcW1tb
-WldbWlxdXF1cV1VZW1hZWFdbXFpaWlteWlpZWl1XWlxbW1haWVpYWFtYWVdZXFpZ
-WFtaWVtaW19dW11bWllbXFxdXVxbYGBcW1paWVlcW1laXV5cW11cXVpbW1xaXmBf
-XlxcXFteX11eXV1cXF9cXV1bXFxeXF5eXl5bXl5eW1taWlteX19cX15bW11dXlxa
-W1xbW1xaXF1fXVxeXmFcW1xdXVxcX11eXV1eXFtZWVlXWFtdW1pcWVdZWl1eXVpc
-XFtdX1xcX2BfYFxdW11cXV9hYF9gYWNhYGJiXl5gXV5eYGNhYF5dX2FkYlxeYmRj
-YGJkZGNhY2FgYGNjY2RjYmljY2JjY2JhYmBhYF5fX19iZWBgYWBeYGJjYF5gYF5h
-X19hZWRjYF5fZGVgYmFgYWBiYmBcXl9gXl5gYGBkYmFhYmFgX19hYF5hYmFiXWFh
-X15dX19eXmBgYl9iYmBgY2ZlYWFhZGJgYWVjZGNlZmNmZWNkZmdmZ2ppaWppa2xp
-aGhoZmNjZWZqZ2ZnZ2dnZ2RlZmRiZmhuk7nJ09zf4+Pm5+fp6YB+gICCgX17en19
-foCCfXx+fn+Cfnt6eX1/eXp8fHt6e3p5enx9fHh2eHp8fX58fHx9fnx6fnt5e3l4
-eXp5enZ1fXx7eXx7en1/f3t5enp/e3t9hIJ+fH17f4CBf3+DgYCAfX5/fnx/gIB9
-fX6Afn16f39+goGBgYSEfnp5e318enl4eXl4d3l2d3x5dHh2dXh1c3d3eHZ1dnl5
-d3t6e3x8eXx7fX18fX17e3l6enp8eHh4enx9fHx/f4B+fX96end6fXp4eHh4enZ6
-fnp5eXd5enp6d3l5eHZ1dXp5d3Z3enp9fX98fXh4eXl3d3l5eXp9enl6enp4e3h3
-cnZ4d3Z3eHd2enl3enl9e3t6eXl4dXRzdXV7d3JxcHJ0dnJvc3V0dHFyb3BwcW9u
-bm9wcG1saWhoZ2hoZmZhYFtXVFlWVFVWV1hYVllaV1ldX2VqZV5aVFVYXFtZVlhZ
-W19lZGNgXmFkaWhsbnF0d2xra29vZmJqdHJ0dHZ6foOAfXt6dXN3dnJsbXBua2ho
-ZGFbWFVZUlJUVlhXUFFSWFhdW1tgYWdrcXV5dnd0bmtoZmVgYF9fWldXWlpeXmBi
-YmRgZGZjY2BgYmdiZ2VgWFhiaWx0dnh6e3Z4goWKjImKh4SHh4yMiIJ+d3d3fISH
-h4SGh4SHh42KjY+Tko+TlpmYl5KPjpCSjoaBfXpybmtpYmNmZGVoa2xscnF1e4CD
-jJWTj4yEfHNvaGlnZGZoZWNjY2RkY2JjYWBkZmVkZGBeY2NjYmFjYWJiY2VpZ2Vj
-YmJjY2RjZmVlaGpmY2ZkZWhqZWJkZ2llZWdpZ2VjY2RiYmJkY2NkZ2VhX2FhYWRk
-ZWVmaWZpa2toZWNiZWZmZmhmZmdnaGZkZGNkY2VjZmhgXmVkZmVhZGdmYmRjYV5e
-Xl9iZGJjZGBgY2VnZWBgYGBiZGJgX2BfYmBfYWJhX19gX11dXVxeX2BgX2JjYGBc
-Xl9cXFtcXV5eW15dW2BiX1xaXV1eXV1eZF5cWVtcW1lZWFlcWllXWVtaWFlbWlxd
-WltZW1pYWVtZXV9dXV1aXV5bWVpcW1paW1pbXl9cXV1dW1pZWV1bW1xbXV1cXFpZ
-W1laWV1dW1tcXl9cXF9aWFpbWVxbW1xcXFtbXl5bXVtcXF5fX2FgYF9gYF5eYl9f
-XV9fXVxeXV5cXVlbXF1ZWFlcXFpfX1xZW1xbXF1bXF5dXVxeXF1dW1xbXF1dWl5d
-XVxaWllZWVpcW1tdXl1dW1lcWlpeXV5fX19dXFxbW1xdXFpcXWNgXV5dXVpcYGFh
-YmJgYGBgWVtdXGBfYF5hYmFgYWBeYWJgYGJiYmFiY2FiYWBhYWRjY2NgYWRjY2Fg
-ZGNfYWJiZGFiYF5fYl9fYWBdXV9dYWJiY2NjZGJeX2BgZGNiYWJgYGBgX2BgX19g
-Xl5dX19bW19gYGBhYWFkZWFjX11fYF9eXl9eYmNhYmJjXVxgY2NgYmdlZWhmZGVj
-ZGZhYmJkYmRmZWRkZGZlaWtsaWtqa2lmaWlnaWhoZmZnZmNiY2ZpZGRlZ2ZoZm6K
-tsrU3OHj5ufo6erqfoKCgYCAenp9fHx/fXt8e3p7fX99eXt8e3l7fXh6enx9eHh6
-eHt9fHx6eXh5ent8fX+Ae358eXx8fHl8e317d3d/g357en17fnt+gIB9eHt7enp7
-e3yAfoKCgX+BgICBgYOCgn9+fX9/fHp6fH19fIF/enx9foGDgoJ8enp7fHt6ent5
-enh4eXt3end4dnNydnh3eXl3dXR1eHt8eHh4eHh5fH+CgX17fH1/fH18e3l4enp7
-e3l+f4CAg4B8f3t6d3t8e3t3enh2dnp6e3p2eHZ1dnl5eHl4eHp6e3h5d3Z0eXt7
-fH55dnh3eHZ5d3p4fHh0dXp7d3V3dnR0eHZ1dXV2eHl3end3eH58enp5dnZ4c3Jv
-cHV0b29wb3Bxc3ZwdHJxc3JycnJwbnBzcG5tbG1rZ2VnZWRlZmBdXFhVWVZYVFZX
-VlRTV1daXV5hX19cWFdZWFVWWVlZW11fYWJjYmFjZ2loaW1ybWhmZWpoaF9hanBw
-c3p8e3l4enp8eHFvcG9xcHFxc3FtbGpmXldYV1VVVFNST01OUVZZWVpaXWBhZGpr
-bW9raGNkZWZiXVxaWVlYWVtbWFtZWFleX19hZGJkZ2hsbGpnY15eZG93eHt+goSE
-gHt+g4mFiImGiomPj4yCe36AgIB+gIGEhIWFg4WGh42MjpGWlJqbnJ6enp6fnZ6c
-mZSQiYF4cG1pZ2lqbG1samxvb3B2e4OHjpOSjYqDfnNuaGdoZGVjY2RlZmhmZmRi
-Y2NkZ2lmZGVgYmNkZGRmZGNhZGVmZWRgY2RjZWRkZ2RlZWZlZWhmaGtmZmVjY2Nk
-ZGNjYWFjYmFhYF9iYV9gY2FgZWZlZWdpZWZjYmRlaGhnY2ZjYWVlYmNlY2ZlZ2tr
-aGNiYmVkZGRhY2VjY2ZlZmNiZGJhZGNgXWJhYmRkYmBjY2RkY2FhYmFhY2RnZmNh
-YWJdXl9fXl1eXl5jY2FfXmBhYWFgXFxbXl9dYGFiYFxcXGFgY2FfXFpZXV5dXF9g
-XV1bWltdXV5eWlhaWFhYW1tdW11bXV1bWFlbW11ZWVxcXmBfXVpbXl5bXFtaWVxc
-Xl1cW1tWXFxcW1pbXl9cW11gXlpbX2FdW11cWlxZXVxfXV5iYltZWl5fW11eYV9e
-XFxdX15eX15eXmBfYGJgX15cXF1dXF1dXF5eX15dXVpbW1tcWl5cXV1ZWlxdW1hZ
-Xl9gXV1dXF1fXFpZW11bW15cW1pZWVtaWVtbWlxeXlxbXFxfXV5bXFhXW1xdW1ta
-XV5eXlxbW11dXmFcX2BfXV1cXF1dXV9gYmRhXV1bXGBdXFteXmFiZGRhXllfYV9f
-YmJfXV1fYWJhX11gY2FjZmZjX2BhYmBdX19jZmZiZWJhY2FfYGNhX2BeXl5iYmNl
-YWJfXWBgXl9gY2FfYmRiXmBjYmNfX2BiYWBgXV1dXl1gYWJiYmFjY2NiYV9cXl5g
-Y2BfYWBhYmFfYGFgXl5gY2dlY2NiYl9kYWJiZWNkZ2dnZmZpZ2RkZmdoZmZmZ2lm
-aGhnaGRkZWZlZGNiY2dmZmZlZWRnao22y9fe4+Xn6Ojq6+p8e4SCfn5+eXl6fH17
-ent9e3t5dnl8fX57eHt4dnd5d3l5eHd3eHp7e3h2dnZ5d3l7fXp7e3t7e3t7e3x7
-fn17en1+gYB/fX19eHd8gHx5enx7enx9fYCEgX5+foCCgoCAg4SGhYB/fHx/gIR/
-fH6AgYB+fHp9fn6BfXx+fHp7eXh5e3x6eHp5eXh3dnR2dXd4eHd4e3d0dnN2d3d2
-eXl5enh5e31+fH99fn5+f357e3t8e31+fnx7eoCAf357enl2dnp6eHl5e3p2d3d5
-eXd5d3Z2dHd7enx+e3d2eHh6e3h0enp+gHp2eHx6eXx9e3Z3dnp9eXp7eXt3dHZ2
-dHN2e3h5e3d3eHd4enl3dXd2dnh4d3Vzb25vbm9vcXRzc3RycHFycXJvb3NzcXNw
-bWpoaGhoZ2dnZGNgYl9dW1pXVlVWWFdUUlNVWFheYGBbV1VVVVVSVlxdXV1gYmFd
-XFxjZW5sa21wb2ljXF9hYWBfZWlvcHF1d3V2dnd3eHZwbm9sbm9vcnJub21mYFlV
-VVVTVFFOTk5MUlRUVVdYWFtgW1lXWV1dXllTVVZYWlpYVFJRVltcWVlYVFZUUlFO
-UVRXWV1jaGlra2pqZWVrbWtwbm91d36Afn6ChoiMiYaGjpGPiIKCf4KEgIGAfoKD
-hIeGiYuMj5CMj5KXl52enp6fo6Ojo56dnpuYk4yIgHh0cW5saWppamppaGx0fIKH
-i46Mi4aAenRrbmZjY2FjY2NlY2VrZ2VlZWRlY2RlY2pkaGVkY2VlZmRoZ2RjYWJj
-ZmZlY2NmZ2VjZ2ZnZ2hpZmdnZ2NhYGFhYmNgXmBhYWNhYVtgY2JjZWVkZ2draGVh
-ZWhkY2Fna2dnZWlrZ2RlZGRmZ2lpaGdmZGFgYGJnaGpjZGZmY2RjYWFeXmRjYWFj
-XmBhYmNlY11bXmBeYGJhXl9fZWZjY2JjZWBfYGFfXl5hYWNjZGBeXVxeXF5gXl5e
-XFxeXmBeXlxbWV5dX15bW1xfXV1fYFtZWllaW1pdX19eWltdW1lZXFxcWl9fWlpY
-XFtcXVpbWVlZXVtbXlxbXllaWldUWFtbWVhaW1lZV11eXF9eYWBaWl9cXl5gYmFf
-XFtZWltbXF5eYF5dXF9cXl1fXV5dXl5bV1hdXV5gX2BfX11eXVxcYFxaXVxdXGBe
-XFtfYF5bWllZWlpdXVtfXl5bW1xbXVxaX2FfXl9cW1xaWVpdYF9eXF5gX11YWltb
-XV9iY15aWlteYFxdXF1dW1tZWFtdWltaW19bWlxZWFpdYF1fXFldX2FhX19gXmBf
-YWFgYWFiY2RhYGFhXmBjZWRhZWFgYF1fYWFiZF9gXl9hY2JiZGVlY2VkYWJiYV9d
-X2JlZWVkYmBiYV5fYGRjYl9eX2FhYmJgYWJkYmFgYGBfZGJgYmFhYWJjYmRkYWFg
-YV9bXl1bXVxaXF5eYF5eX19gYGBgYmFhZGRjYmNhYWFiYmFiYF9gZGNiZmhkYmNk
-ZWRkZWZjZGZlZGhmZWlqZ2VjY2RmaGpnZmdpZmhmaGdnaGZmZ2ZhYmJkZWVpkrjK
-19/k5+jq6uzr63t6fHp4eHl4eHp7e3h5enl4d3p6eXt8enx7enx5d3Z2d3R3eXh7
-ent7e3d4eX58ent5fnp5e3p5e318fXx8e3x9fH1/gHx6eXt5e3x7fHt5e3t7e39+
-f3+BgoKAgH5+gYGBgYOEgH99fH6BgIGBf399fXt+fXyAf3x+foB7e3t6enp5eXh6
-eXd0d3RzdHV3dnh3dHV0dXN1eHd3eHR2eHh6eHh7e3p8fn9/fnx+fnt/fXyAfnx7
-e3t6e3x8enh6eXx4dnh6ent7enh3e3d5eXV4eXZ5fn16fHt2dnl3enl7fnt6eHh3
-eX96e3d4eXx7enh7fH59en18enl4eHd1d3l5eHl5d3x8eXd0cXN2c3V3dnR1dnRy
-c3Bubm9ycHNvcW1ubmxrbG5vcXBwcW5tbGdhYWNmZmJjYV9hYWBfXVhVWFdWVVNV
-VlVXV1xbWFJTVVZVVlZTVlhbXmReXFtgZmFkbW9ua2djYl5gYmFfYmdtcXJ0c3Fy
-dHN0dnRwbnBtbm5ucXZ0c25pamRfW1ZQTE1NTlBRUFJQTk9NTlFYV1VVUlFQVVNR
-T09OUlRVVlFRUVZWWl5bV1RVVFJQTEtITFJaWldZXmNvcW5oYmJgYl5gXV1jcH2C
-hIiMjI2Pj4yPjomEfXx8fX1/foB9e3l+hYeKjY+Qk5eWlZqWlZaanKKgn5+foKCi
-oJ6alpmVj4uDdm5lZWVqaWNlaXF0eX+DiYmJhYR+enBpZWRhYmNkZGViZGpoY2Nl
-YWJjY2ZoZWdkZWRkZGZmamZmZmFiY2NhZGZmZmZoamhkY2RlZmdmZ2plY2RkY2Ji
-YmBhYWRjYmRjYl9hZWNiZGJgY2RlZGVoaWpoY2dpaWZgYmdkZWZlZGZkZ2hpZ2Vl
-ZGRjZ2VnZmZpaGdpZmJgYmNfYl9cXV5hYGFgYmBfW2BeYGBgYWJeYGFjZmRmYmNl
-ZGFeYGFeX2FhY2FgY15cXFpbXV1gY2FgYF5aX2BeXl1dW1xdXF9fYV9dXl5eYl1b
-XFdYVVdcXVxbWVpbW11bWlhdWllaXFpaV1laWFlZW1paWlhbXVpZWFdWV1xbW1tb
-Wl1aXF1cW1pcXF5fXV1cXlxaW1xcWl5fXlxdXF1cXltfW1lcX11fYWFfWlpbXF5b
-WltdXV9dXmBfXVpbXFxgX19eW1lbWltbW15wZ11bW1taXFxaXFtbXlpZXWFgXV1e
-XmFhXlxZWltYW1xcXF5dVl5gXltbXF1cW11eXV5bWltdX15eXlpaWllfXFpZWVtd
-XF9gXmFgXF1bXF5dXF1fXmBeXl9fYmFfX15fXmFgYGJfX19fX2FiYWFhX1xdYF5e
-YmRiZGFhYWFhYGFkZGJhYmRjY2JhYV9hYmJlYmFgYl9hYmVjYF1cXV1gX15iX19e
-ZGVjYmFhYWBeYGFfYWNiX19fYV5gYWBhZGVhYWJiX2NiYF5eXV9gYGFfX2JfX2Fi
-X2BhYGBiZGNgYGNgZGJiZGVjZ2ZkZWlmZmhlZWdkZmRlY2RjbGlkZWZlYmNramlm
-aGdsaGdiZ2hnZ2lkY2RhZWdmZmqatsvX4OPn6evr7O3ten59eHd4eXl6eXp9enh7
-e3l8fHx6eXp6e3x7eHl7eHR5eXh4eHd5dnl5d3d2e3t7eXd7e3p4eXh8fH1+gHt7
-gH1+fX96fX56e3h3enp7fnp8fnx9foB8fIGDgoF8e4GAgX5+gYJ/fn9/foGAfoCC
-fX56e32Cfnx/fnt9fn17enl6end2eHd3dXFwdHd1eXd2dXV0dXZ1d3V3dHJ3enZ2
-d3h3dHl8e3uAfn9+fHx8fHp4e319fXt+e3h5enp6eXl5fHx5eHp9enp5eXh4eXh7
-eXh6eXl7e4B8enZ2d3d3eX1+fXh4e3l6fH18e396eXt7e3p/gH56d3Z7eXh1dXp8
-e3l5dnl7fHh1d3ZydHV2dnd1dHR3enh1dHRvcW9wb3Bvc3NvbW5ubnBvcG9wcWxq
-aGlkYWBhYGJgYGJfXVtZWFdWWFtXV1dXWF5hW1ZRVFNTVVpaWltYWlxkY2FgX19g
-ZGprZmdlYF1dXl1eYmtsbG9paWloZWlrcHBvZmNnbW9xdHl7fXl0cGplZF9bVE9H
-Sk1OUFZVVFFOUE1NTE1SUVJSVFVUVFNTV1lYWFVWVlpZWVlhY11dXlxbV1dXV11i
-ZmNdWFpgZWhxcXBrZmNhYmVna3FzeX6DiIqQjJGRjYWBfn17d3h2eHyAg4WFg4WH
-iI2QkJaYmJmZmpiXmZmXmJaXl5idoaKloZ6bn6KempWKemtobGhkYWRpaWtxeoGF
-iIqHh4F6dHNtY2FiYWdjYWFhZmhnY2JlZGNkY2RmY2JjZGhlaWtoampoZmZlZGJm
-aGVgY2RmZGRmZmVnZmhpZGNjYmJhYGFkZWVmY2ViYWFmYF9fX2JkYWFgYGdnZmhn
-a2hmZWRnZWVoZGZpaGdlY2ZjYmZnZmZmZWRlaGZoZmVmaWRkZWVlY2NhYGBiYGNi
-YWNfYGFjYmFfY2BhYWRjYWBlZGNkZWBjYGBfXl5gYmBfXl9cXV1eW1xcXmBfYV5h
-YmdgXltfXWFgX1tdXVxcWltfXl1cXlpXVVZaX1xaW1lZWFxhX11YVVRXV1dZV1lX
-WVtaW1hbW11aXFpaXFpcXVtZWllZWVtbXFtbWV5eWVpbW11eXlpZWVxdXF1fYF9c
-XF1eXVtbXFlbW11dX11cX11bWllaWVpYXWBeXFpeXl5gYF1cWl1fXFpZW11bXl5c
-WVxcXV1bW1tZWVxcXVhZWlxdXlxeWVxdXVlZXFtaW11eXVpaW1taXF5dXFtgYGNg
-XVxaW1laXV1cXl1eXlpZW2BdXl5cWl1eX2BeYmNdXVpZWlxfXl9cWl1bXl9dXmFh
-Yl9hZGBZW1xdXmFdXl5fYl9cX2JdXFxeYGJkZmJiYGBgYmJgYWFiYmFiY2FhX19i
-YWBfYF5gX2BjZWRiZF9fXFtjYGBgYWRkY2NkY2BeXmFjZGNhXlxdYGFeXl5iXV1j
-YF1cX2FhYGBfXmBjYGBjZGJfYWFeX15gX2FiY2JhYGBhYmJkYWFhYGNhY2JiaGVm
-aGZpaGZmZ2JjZGNiZ2hlZ2pnZWdsamZoZ2loZ2pmZWZpaWRjYmJlZWNlaZS6zNbf
-4+jp6+7u7/B7eXp8fHl4eXh6end2c3l8foF+gHt7eXp8e3p4eH15dXl5eHh2d3l4
-dXd4eHd7fnx3dnd7fHl5eHp9fHl6fX15fH5/foF9fXp6eXp6ent+g357fH19fX6C
-g4F/goGCg35+f35+gX98gIKBf318f36AgH56fYCDgX+Af32Afn9+fnt5d3d4dnp5
-dnJzdXZ1dnh1dnd5eHZyc3Z5d3h4eXl8fHh5eHx7e32Egn98ent7e3p6enyAfXx7
-e3l5e3d1d3Z6f3x8ent8eHd4eHZ5fnh4eHh5e3h7enl4eXh3eYB/e3t5eHh7enp8
-fHl7fHp6fHt7fHx+fXl7eHd4enh1d3p5e3p6fHt5eHl5d3V1dXV4eHl0dHR0d3Z2
-dXVzcnBwb3FwcnBxcW5ucW9ub25vb2tmZ2RfXVteYWBhYF9cWlNXWlpaV1RUV1ZY
-W2NcW1lYWFZWVlleXV5gYGFgYV9fY2RlaGVcW1xeX19eYGRnam1qZmNfXFpbXmRp
-aWNiZGlrcXJ2fHx9eHNwamBbW1RRU01OUE9QUlZVVE9OTklISU1SV1teXVpbXFxf
-YF1fYWJlZmVnaWpra2lsbGtoZWZma21oY2Zqa25wcXZ7fH51b3J0eYKGi42NiomI
-h4aDgoF6eHV1dnh3dnd8gIOFh4eFh4eLj5KVmJqYnJ2enaGhnJaVl5aalpaam5+j
-oZ2en56ZlZSLgHpxaWVlZ2dpaW13goiKjYuJg3t1c2xoY2RhYWFiYGJnZ2NgZWRm
-ZGFgZGdjYGRkZGdoamppZ2dmZmRlaGhqZmdnZ2VkYmJjZWRoa2hlZGRiYmNkY2Vm
-Z2RkZmhjYGJkY2RkYGBiZmNhY2lmY2VmZ2VoZ2dmZWVoaGZoamZkZWNgY2lmZ2Jf
-YmRiYGFhYGJhY2VmZWNkZWNlY2JgYl9eX2BiYmFgY2JhYWBiZ2VkYmFgYmNjZWJg
-YGBlYV9dXFxeXlxbWllZXV1fXl9eXV5eYGFgXVxdX11dXVxcWltdXVtcWlpfYVtX
-VVlZV1haWFlZWVtfYF9fXlZWV1dWVFZWWFtbW1hZWlpbWVpgXl9fW1tbW11dYF9e
-XFxcXV9fXVxfXl9fWldZXF5cX19fXl5bXVtbW1ldXVtbXFpaWltZXF1gYV5bWVtd
-YGJcW1xeXV1eX19dW1xdXFxcWlxaWl5cXmBgYF1bV1paW1xdWldWWltcYV9gXl1b
-XVtbX11eXVtZWlhcXVpbXVxeW1lYW15fX1xcWVtdXFxcX1tYXV9YW1xbW15cWVxd
-XV9gYV9aXFtZXFxeXmBeX15cXVxdXmBdXl9gYGFdXF9hYGNhYWFeX2FiYWBgXl9h
-YmFlZmRkYmNkYWFhYGFhYWFjYWFfXF9gXVxhYF9iXmBiZmNjX2BeYWBgYV9gYV9l
-Z2JgY2FgYWJgYmBhX2BhXV5hYWNnZl5eYGJhX2FoZGBiX15dYWBiY2BeY2FgYGFi
-Y2RiYWJhZGNgX19gYmBiY2JfYGFiY2VlaGpqZ2JjZWNjZWZmZ2RmaGpoaGhpaGVk
-ZGdpZmVmZmVlZmJhZGJiYWJojrjM1+Dk6Ort7e3v73p4en1/fnx7fX59e3p5e3l7
-e3t6ent8gX57f3t4d3V3eXx8dnd8e3d4d3Z3eXt7eXh3d3p7eXt6eXx+fXx4eHZ6
-fH57fX57en6Bfn6Cg4GDgX98fHt+foB+fn16fX+CfX59g4CAf357foF+fX1+f36D
-gn+Cg4GDgX+AfXx8en59enh1eHh2d3d0c3NzdHd5e3ZydHR3eHVzcnZ7fXh8eHp+
-fX17eHh3d3l3e3x+f355fHx7e398e3p7eXp3eHd5eXJ8fHl7en57fHl5dnd3eXV2
-eXp6eHl1d3Z2dnZ5fH57eHl6end3fHt8e3x7enx6ent6eH19fX18e3p6eHd7fHh4
-e3t6eHh5enx9eHR1dnZ1dHR0dHJ0c3JzcXN1cnFzcXFwbm9xcW9vb25tbm9sZ2hl
-ZWFdXl1dX11cWVlYXVleWlVWVlNVWl5dW1hYWVdXWFxZXWBeYGZqYV5dWVpdYF5d
-WlVYWFlZV1lfY2ZjYV1dYV9cV1RWWl9lYWRkZmprcnV4enhzbWdkYVlWWlVSVFVU
-U1ZWVlJRUVJOS0tQU1NZX1tbXl9iZmloZmZsbm9tcnV2cnFtb3B1dndzc3RzcW5u
-bW5xcnJ2eXd3dHh+iouNkZGTkoqEg4CCfHZ1dHJvcHJ1dHd3en6DgoOBgYaIh4uM
-jZCSlJiboaKkp6OenJ2dnJ6ho5+XmJ2empqbmZaUkI+MhoBza2lnZWZpcXh9h4uP
-ioqIgoB5dWxpYWFfYGFgYGFiYmJjZGVgYGJmZ2RkZWRnZ2doaGhnZ2dmZGNlZmpo
-ZGZmYmFhZWZlZWZmY2JkZmJiYmJkZmZkZWdnZ2RjYWdsamVgY2RlYWFgYF1hZWdm
-ZGdpaWppaGZlZGdpZmVmZGRkZGVnZF9dYGBgYWNlZWNhY2JhYGBjY2FgYWRlYmFh
-ZmZmY2VhYV5jYGFhZGZgYGJiY2JgYWFhYF9gYmJeXF1dXF5ZWltdYF9gYmBgXl9d
-X1tbWlhYW1lcWlxeWllaXFhbWlpaWVpWVlpdW1xbW1hYW1xcW1laV1VWWFtaWVhX
-W1pYV1daWlxZW1pcXVxbXVpbXF1cXFpdXGBgXl9fXFpaWl1fXVlZXGNfXFhcXl5a
-WVpYXF1bXV9bW1xbW15eX2BhY1xeX2FcXmFgW1xeYF9dXl5aWlxZWFtZWVlaXF9e
-YV9dWllbXV5bW1tbWlpbWllcWV1cWlZVWFxdWltfWVpdX1xaWllbX2BcW1paX2Fi
-X11cXFtaWVxZXF1aXGBbWVpaXV1cXGFbXF5fX15dW1pbWlpcXl9cWlleXF5eXF5c
-XV1gYF9fYGNhYF5eXmBeX19iYV9dYGBgYmFiZGJjYVpfYGNiYGJfX19fYGFfXF1f
-YWJjYV9fX2BiY2BfXmBiYGJiYGFhXmJkY2RjZWNgXmBgX2NhYWFeYGFhYmFfX19l
-YmJhYmFkXl9eYF9iY19fYmBhYl9dYGNhX2BlY2ZiXF9hYl5fYGFjY2BfX19iY2Rn
-ZmdlZmVmZWlmZmNjYmFjZWdjY2ZkZmRmZWdjZmhlZGNkY2VmY2FiZWyZtcvY3+To
-6uzv7u/xfnx6en2Cf4GEg4F/fHt7e3t8e3t6eXp8f359fHl3dnx5en5+e3x8enl5
-fX5/fnx9d3l3eHp4eHh5eXl5fHx5eXt4d3l8fXx8fX2AfoN/gIB+f4CAfYCAgHx9
-f3x8ent/f3+AgYGBf356fX58f4CAgoGEhYSBgoB9fH58e3t8enp3dnV1dnV3dnZ6
-eXh6eXd2c3NzdnV2dXZ8eXt+f3p7e3p9fHp6f4B6enl6e3l+gH99fn1+e3p6fHp8
-e3t5enp8e3t8en6Be3+Af3t4eHp6fHt7e3l5eHR0d3Z2d3Z4e313en9/en17e3t5
-fX18fHt6eHh5eHp9fXx8e3t5eXx8gH9/eXh9fHd6e3p5eXVxcnVzc3R2dnR0c3Bw
-b3Bvbm5xb3BxcWxubG9sbW1vbmtmYWFiYF1dXl1bXVxaVVlbXVxZWVVWWldbWVxZ
-W1pWWFdaW2BgX2NlZGBdWlhXWlZXV1hXV1dVVVVaX2NiX1taWFhYWVlXUVdZXl9c
-YGVraWlscXR0cWliYFxeXFdTUk9TWVlYWFZTVFhXVFNUVFVVVlpfY2FiY2ZnaGtr
-a3JzdHR2dnl5b2tqbnFxc3R2d3h4fHx7fHx6eXdya2htd4SNkJCQjouBfXx+fHh3
-dXR0dHR0c3N1dnmAgH+AgoaEh4eGhoWFiY2Qlp2foqWmp6alp6Sio6KhnZyYmpua
-mpqZl5aTj46Jhn91amtubWxvdHqAioyJiYiFgXlzbmpmZGJgXmJiYmFiY2JgYmRk
-ZWFjY2JfZGRmZWVkZmNiZWRkZGVmaGhjYWBhY2ZmZWZlZmRiX19gYmNhZWRjZmVj
-Y2JjY2dlZGNiY2VkYmRkZmBiY2NjY2RmaWdoZmRmZmRiYmNmZGdlZWViaGhmYWJj
-YmFlYmJiY2NgYGRiYV9gX2FkXV9jYmJjZGBgYWVmY2BhYGRjYmJfX2FkYV9fYmBe
-Xl1dXWBgXl9eXl9aWVteW1tdX19gXF9cW1xeXFtYWlpaXV9eXFtdXlpcXlpeW1dW
-WFhZWV1aWlpbXFlXWFhaXlpaW1pWWFlYW1lcWlpbW1lXW1xaW1paWlxbW11dW15b
-WVtcXV1bW1pZWl5cXl1aWVtaXl5fYWBdW15cXFxaX1pcXF5fXl1bXV1ZXl9hXl9c
-YGJeXF5eWltaXF5cW1xaW1xcWlpdX2JfX15cW1taWltaWllaWlpaW11eWlhYWlhZ
-X15dXVxaWFtdWllZWVtcXF5eXV1cYF1eYF5bWlpaWFhaXl5dYF1cW15dXVxbXV5d
-XF9gX15cWltbW1tcXlpdW1lZXF1dXF1ZW1xfX19eX11cXl1eXWBdXF5gY2FfYGFh
-YGFjZGRhXl5cXmFgX2BdXFxcXV1dXV9iYGJiY2BgYGFgX19fZGRiY2BfXl1hYWNk
-ZGRhYWJgYmNiYmBeX2FhYmNhYmBhZWRiYGBgYWBeX19hX2BgYmNiYWJgYWJgYWFh
-YGJjZGJhYGFgY2RiYmFlY19hYF5fYmJjZGdmZmJjZ2poaGdoZ2ZkZGRlamdmZmVk
-ZGRmZWRjY2FiYmNiZGVobYu1zNff5Ofq7O7u7/B8fXp7eoCEgn9/fX5+e3t3eHt7
-e3t+fXp8foB6enl8gHt9fHt8e3p6eXd6eXl4dnh2fHx6eHl2eXdzdnp6e3t8fHh3
-en+AfXt5enx/foB8fnl+fX99f4OBf35+gYB9fIF/g4GCfn6BgYOEgX+AgX+Af399
-f35/fHx/fHt+fn9+e3t6end1d3Nzdnp4d3N0dnV1cW9xdHh4fH15eHZ2eXt5eHp4
-dXh4dnd6fn1+fnp9f3t7f3t7fX58e3x6eXt6e36Df3l4eXx6eXt8e3l3enp8fXp7
-eXd7e3l4d3Z4eHl5e3l4eXt8e3t5eXl6eXh5eXt5e3x6e318e3l7d3p8f357ent6
-e314eHd4fHt7dXh2dXRwc3RzdHR2cHRyb29tbm9vbm9sbGtub25vbG1taGNfXl1d
-XFxdXl5fXVhZWllYV1ZXWFdYWFZVVltaWlhcXFlfYGJkY2JeW1hYWVZTUVVXV1lV
-VlVYWFpdXVhbWFZYWFdUU1VcXFtZWV1jZmNlaGxucGxmX15cYGBdVFNUWVpcXF5f
-XWFgYmBgXV9bVlpdYF5fYV9iYWFmam1xcnNubW5wdHRubGhoaWtxdXN1dnp9gIKD
-g4WFgn14e3p8gouLkJKRiIOAfXp0b3ZzcnV5eHZ1eXt/gX17eHd7gISDjJKMjIuN
-jZCUlZycoaSmqaqpq6uqpKOinZ6ampeWmpuYlJSVlI+JhoV6dXNwcnF1eH2Dh46K
-ioqHgHlwbGlmZGNjYmBgYF1fYmBjZmVlZGdlZWdjYmBiYGRkZGRlaGdoZmdnaGhl
-Z2VjYl9kYmJjY2BhY19hYmFiYV5jYGFjYWBhYWJjY2JiYWJkY2RmZWNnZmVjZGRl
-ZGJhY2RnZmdmZWVmZ2ZmZWNlZmZqZWJkY2djZWRkYmJiYmNfXV5jZWJjYl9gYWBh
-YGFgXl9kYmJiX11cX2FgX19dW1pbX15dXlxcXl5dXF5eXGBgX11dXV1dXF9fYWBd
-XF1ZWlpcXFxbWVtcXl9dXFleX15eXVdXV1dYW1xZWFlYVlZVVFVXWFpXWVlZWVlb
-XFtdXFtbXVpWWVtcX15cWF1eWlldXl9fYVxbWl5cXFxbXF5dXV5eX11cW1xeYF1Z
-W1xcXltdXVxhXl5cW2BbXFpaXl5cWltfYF1cXFxcXVxaW1xcWVtbWlpdWlpcXV1e
-XV9bW1laWVdVWllbXltcWFpaXlxaXFtdXVxcWlxcXFtZXV5bXV1cXF1fXV9bW11h
-YFxbWVtaXF9fXV9cW11gXFxfYWBdXV9fXmBeX2FfXl9eX15dXVpaXl9dX2BgXFpb
-XFxcXl9dXF5dXl5eXV1fX19hYmBfYF9hYGBgYWBiXl5fXmFgYWFhYGBeXmRgYV9f
-Y2RiYGBdXl9iYWNiYmFeXWBgYGFgYF9fYWFlY2FfYGFhYWBgXl5eYGJiYmJiY2Je
-XmBgX2BfYF1fX15hX19jZGJiZWFfYGFiYWNgZGRdYWJhY2RkX2FhYmJhYmFiYmRj
-Y2dlZWRmaWVmZmhnaGZkZWdoaWlpamVjZWJkZWNkZGNjYmZmZmRrirjM19/k5+rr
-7e7v8HyAe3t7e318fHt8e3p9eXh1d3l7enp8e3p9fXt8e3l2enp4eHd3eHV0dHh5
-fXp2dnl6fHx+eXt+enN1eHl5eXx9fXl6e3x/enl7fIB9fHx7fH+BfX+Bf32AfX1/
-gIOAf3+Af397fX2BgoGBf3x+goB/fn1+fnx+fn59eHx+hIJ9fHx4d3h5d3h1dHZ1
-cXJ2c3R5dnRxcXR1eHV1dXFydXd5fHx9fHh6eXh5e3p6eXl6eXp8e3p8fHx7fXZz
-eHp6fnt7enl6e313eX58eHp7e3t6fHt4eHp6e3p2eHZ2dXV3fHp4eHp6fH57e3p1
-eXt7dnh7e3d0d3h5eXV2d3Z7dnh2eHh+e3d4enp6enp8eHNxc3NzcnFzc3NwdHJv
-cnJxb25wbXFzcW5ucHNybWhlYl9cWFdaWVpeYGFdXVpYV1dYV1VSVFdXVFZYW2Fb
-XVhaW11gZmVhXFtWVVNSUFBVVldXVFRVWV1ZW1RUVldWVVdaXVxaXVxaVVdaYGRn
-Z2dna2lnYVpZWl5gXllYW1lZWF5iZWdtbmtraWpoZmVlaGdlYGJjYmNjaGtwdm1l
-Yl1dYWZxdXZwamVmanR2d3l7fX6Cf4OCgYWEhIWDgIKHjI+OjIN9fHhycnFzdHJ0
-eXt+f4OEg4SHh4WCgH+ChouMjY+PkI+Pj4+Tl5yhpaaoqqivrbKvqKOgoJ+fnZeV
-lpaUlpiWlZOMiIR7dm9oanR1eXx/hISKjIuEe3RsbGdmZmRjX19eYmFfYWVpaGNm
-ZGhramVlYWFhYmNkYl9lZ2hlYmRlZmdlZGVkZGFfX2BjY2RfX2JmZF5iZGNhYmFi
-YmBiXmJgYV9fYWJkY2JkY2NlZGVnZWVkYmZmZWZmaGdnZ2ZjY2RlZGNna2tpZWZk
-ZWZiYGFgX2NeXmFgYmJiYGFgX2NgYWBeYGBgYGBiYF5eYmBdW1xdW1xfXVxdXVxe
-XmJfX2FdXmBfXV5cW1xeXl1cW1tcXWRfWlxcW1ldWlxZXV1cW2BfYV9hX1xaW1ld
-WVtZWFlYV1taVlhZV1ZWWldXW1lkXlxdX15cWVtcW1ZXV1xhXV9aXV5cXFxjX11b
-XVlYWFlbXFxcXWFfXV1eXVtaW1hdX1tcWlpdXFtdXVxcXF5cXF5cW1pfXF5fW1xe
-XV1dXVxcWlpcXVtaW11bW1taWl1dW1taWVxbWltbWFhZWlpbXFpaV1RZXGBdX1tc
-Xl1bX2BeW1lYXFtaWltbXmBeWlpbXF9fWVpaW1xbW1peW1xdXF1bW1teXFtbXF1e
-YGBjY2NfYWFeXFpcWl5eX15fXV9dW1teXVpdXlxeX2BgYV5eYF5fYWBhYWBiYl9h
-X11eYWJgX15iYGFgYGFgYWViX15eW11fXGBgYF9fXl9fYGFfXl5fX2BhX2BhYl5e
-YWJiY2FgX2BhYWFfYV9iZF9eXl1gYWBgYl5eYmJfXl9gX2BfX19gY2NjX19hX2Bi
-ZGNiYmBeYGJkZmJjY2RkY2NiYWJiZWdkZGRlZWVkbGdpZ2ZnY2RmZGVlaWloZWRl
-ZWhmYV5iYWNlZ2doZ2iEs8zW3+Tn6ezt7u/veH18fX5+ent9e3p5eXp9f3h3eHx6
-e3l7en18eXx7enp5eHV1dnR1dHd5eHx5fHt4ent5eHh0dnp5d3Z5fHt6e3t+fnx6
-ent6e3l7fIB7e3t/g4OAgH99e3t9fHx+f4CBgH98fHx6e31+fX+BfHp+fX5+fnuA
-f3x9e3x8e36AfHp6fHt5d3R1eHd2dnZ2dnJ0cnN0cXJzc3Z2dXZ2dnVxc3Z3eHl7
-fXx7eXt8eHl4eHZ2en5/e3Z5fnt7enh7f397e3x5fHp3eHZ1d3d6eHl6enp5enp3
-eXh6eHd5dnhydnd6eXp2d3l6enp4eXp2dnl8eXd5d3d3d3V3dXV2dnp7d3d1dXZ4
-eXl4eXl4eXl4dnZ0cnJzcnJ0cnRxb3FzdHJwb25wcnNwb3Byb25pamdiXV1dWVdX
-V1tdX1tZV1RXWVhYVlRWWFZZWltcXFhXWFlfYWJfY2FcWVRUUlBPU1ZWVlVSVldV
-UlBUVFdUUlJYWV1eY2JgXFhTV19kaWhoaGFhX1pYW1hbXF1cXVpaWmJnZWVqcXF1
-fXx3dXV2dnZ1bmpsbm9xbm5taWVgWlZXWVlcZW95dm5oZ2hub3F1dXZ6f3+Ae3l5
-gIKCg4WLjI2MioeBend1dXZ0gnp4e318eoSHiouMjpCRkpOTk4+LioqMkZKSkIyJ
-jZCVmJ2io6SoqKysrqyopaKhn56em5WQj4yOkJKWlY6Gg4OAd3NvdHJyc3N1f4iM
-joZ/e3pwamVlY2JiX2BiYWJmZWVjZGBhZWZiY2VhYWNhX2FhZmZmYWNmZmZmYmRk
-Z2poZGBgY2FgYmBfYGBmZ2dkYmRgYl5fYmFlZWNiZWNkYWVmZWZmZWRnY2NkZGdn
-aGZlZ2dramVkZmZmZmVkYWRoamZkZ2diY2NkZGJfX19iYWRmYWJlYWBhYF9eYGNh
-YmBhYV9fW1tdW1tcXV9eX15fXVxeXV1fXl1dWVpiYmBjXV1fXlpbWlpaXFtaWFpc
-XlxbWVlaWlxgXFxfXl9kX1xeXV5cXFxZWFdYWltaXFhYWFtZWFdaXFtYWVpbW1tb
-XV5eX19hXlxaXF1fYlpaXF5cWFtdX15cX1lZXV1hXl5dXV1eW11eXVxbW1xbXFxa
-Wl5eXlxZWllbW11bXVxbWltfXlxdXl9gYV9bW11dXV1cX2FbXVxbWlxcWllaW1xb
-XFteXVxcWVhZWFxeWllZWVhaXlxcXVteXlpcYV1YW1xbWVdYV1VbXV9gXltdWFpd
-XV1eXlpbXl5dXFtaWllcW1tcW1taXF5cXV5eXVtbWlxeXVxbWlpaWl9fXVteXl5e
-YV1eYF1fX2BhYmFhX2FhYmFgX2BkZGNhX19iYGJgYl9gX19eXmFgW1heXl1bYF9d
-YGJhZmFhXmBlZmJhYWFfYmNkYWBhX2BhYmNgYGJfXV9jYmBfYF9gYWFfXl9hX2Nj
-YGVjX15cXV1cXl1fYF9gYmJhX19eXV5dYGJiYV9fXmJlY2RjZWRjZGJkYmNjZGRh
-Y2loZmlpZmZlZ2hnamVkZWdoZGRoaGhrZmZnZWJjYGFkZWZkaIixytbf5Ojq7O7v
-7+96eXl6fH17fH96eHp7eHp5d3h3fHp5e3l5enx7fXx6fHx2c3Z0dnZ1eXl1eHh5
-dnd4ent8d3NxdHV2dHV5e3h2dXh6dXl6ent8eXd6fX18fH18eXt5eXx7eXx+f4GC
-f36AgX59e3x8fHp8fXyAfXp8fHx8fIF/fX19f39/gH1+e3x5eHp7fHx4eXd1dnV2
-dHNzdHZzcnV0dXZ2d3d4dXFyc3Z6e3p4eX17enp1d3h3end3eXd2end6eXx6fHl7
-e3t4dHd2d3p2dHZ1dXd4eXp5eXl6eHh4eHh2dXZ4eHZ3eXh4dnd3eXt8e3t8e3l4
-eHx1dXl4eXh4dnRzdHl3d3Z2dnV0dXN1eX16fHp5eHh2dHRxcnJvbW5xcHBvbnFw
-b21xc3FxcW5rbG5sa2hmY15fXVtXWVdXWVlYWFVZXl9aWlhZWldYV15cWldVVVJW
-Wl1eXlxZVVVUWFhXVlZbX1pTU1BTU09OTVBTWFZXWFleYWFiYFxeXV5iZWtuamFg
-X11eXF9dWltbWldXWl9naGZmbHV4fX+Cf399f4B/gX96eXp6enlxaF9dXFlZVlle
-ZGdtdHh2c25xc3Ryb3B0dHl8fHZyc3V6en2CiImHgYB+fnp2cnh8gH9/fn6CgoaH
-iYiIi4uMj42MkJWYlJiYmZmUlpWXk5CQkpSVlZSYmJ2mo6WsrqupqqOhnZ2gnZSP
-jo2MjIyNjIiFh4V/f351b25wbmx1gImMioN+eXJqZGVjYF9gYmRlZGdiZmNiZGNn
-ZWNhYF9fXmFiYmZoZGJiYmNlZWhmY2dpZmVkZmdjZWJiZWJeYGJkZGNhXlxeYmRi
-ZWRiY2VmZGRhYWRmZmVlZGRkYmRiYGFkY2RoaWhoaGdmZWVmZ2RjZ2dmZmVnZWdm
-Y2FgY2FhYmFjY2RhXWBiY2NiXl5hYWBfX11gXWFkY2NjYGJgYmJjYl5eXV9hXVxf
-YGFeWVtfYmFhXl1eXl1cXVxeXVtcW11eXV5cXl1eX2FcW15cXF5cXF1fX2BdWlpc
-W1pZWVZUWV1bXFxeXVpaWFpbWVZbW1xcXF9hXV9eWlpcX1xdXlpdYFxeWltcXV1c
-YFlbWVpbX15dXFpcW11cXVpbWVhdXFxZWlxZW15dX15cXVtaW15cW15fW1lZXmBg
-X2BhYF5cXl1eXF1cXV1dXFxbWlxbW1xdXFxZWlxcW1xcXVtcWlxbW1lcXVxeXF1d
-X15bXFtbWldaXFtbXVteXFxeW1pdXl5eW19fW1ZYXF5cXVhYWV1bW11cW1tYWV9d
-X11cXF5fX1xdWVlaWVpdYF9gYmBdW15eXl9jYFxgYGBgXmRiX19fXl9gYl9iYmJh
-YWNjZmJiYl9fXVxeX2BgY15dYGNgXV1eXl1gYWVjXl5eZGhmYmFiYmViYl5fYGBf
-X15eX19gYGBfX15fXmFjY2VjYWFkYWFgX19hXl9cXV5eYF1gYV9gYWFgXmFgXWBk
-YmFlY2RkYGFhYmRjZGNjZGRjYmJjYmRjZmloamhoaGdmaGlnZ2ZmZGRkZWVlZWdq
-amZkZmZkZmNkZWJoirDK1t7i5+nr7e7v73h7fnh5fX15e3t5eXt6eXd3eHl5end3
-e3d5e3x7eHh3dnd2dnZ0dXZzdnd0dnt7enh3dXV2eHNydHd3eHl4d3d5eXp2eHV5
-eXl4eHp7fn58fXp7fXt5en9+f319e39+f36AgX9/fX18fX19fnt8e3x6eXp5eXp6
-fH58fH18f3x9f4B/e3l2dnd4eHZydHZzdXN0dXl3dXJ1dnd2d3R5eXV0d3Z5ent5
-eXt9ent6fnt/enl6eHh7e3Vydnh5enp4eXp8e3Z3dXV1dXl4dnZ6eXp5eXt5d3d3
-d3Z5ent3dXV6fHt6eHl5eXh8fXx7e3l7eHF0dHd4dXZ3d3R1eHd6eXZ2d3Z2d3h5
-eHd5d3d0c3RzdHJycm9vcHFzcW5qamxucG9wcG9ubmttbGlmZGBfXltZWllYXF1a
-V1dYWV1dXFxXWllaWltYWVxaV1JRV15iX1xZWFVXWltaWFdWWGBYVVRRT1JOTVBT
-UlBOUVRYW1paXV9gW1paX2Rpa2JfXWBjYWFkYV9fYFhWWl9iZWVoaW1xeXt9goOE
-hI6Hg4F/fHx6e3VyaWFYV1pdYV1eYGVoa25vcHZydHR5dG9tb25vcHN1dHR1d3Z6
-f3+BfHh4e3p4eHd3eX5+goGAfX2AhYWGiIuKi4mEh4mHi5KPl5ydm5idoKKknpuS
-j46PjpGSkJKcoqWqrKqmoqKhn5mZmJSXk5CMiIaEiIiNjIiAfnx1b2xqZ2p4foOF
-h4N8dG5sZWJhYGBgYmRlZGJhYWJjYl9iYWFjY2JgYWVmZ2ViYGVlZGRlY2VkYmNk
-ZGZnZWZmZ2NjYGBiZWRhXmFnZWFhYWNjXV9hYWRlZGFgYmJlZWZlZmZnZWVjZmVk
-ZWFkYmVlZWZlZGVlZWVlY2NlZWRmaWNjYWFjZGBiYmNhYGBeYWFhYmBgXl9eXWBi
-X2FgYmVkZGJdX2BiYmFjZWNhYGJeW19gXl1dXV5dXVxdXV1dXVxcWVlbXV9cXFtd
-X19cXF9gX15bXVxaWFhbXVtbXVtZXF5aWVlaVldYXF5eYF1ZWFlYV1laWltbW11d
-XF1aWVtbWVxeWVhfXFtdXV9bWFdaXFxeYV1bXl9cXl1cW1tdXV1gXVtdXFteW11c
-WVtaX15fW1xcXV1eXF1fXV9eXF1dX2BfXl1eXl1fXV1eXl5fX2BfXl1cXF9bXV1e
-W1xcX11bW1xeXlxaWl1cYVxaW1paXV5eWVxcXl5aW1pbXFxfXVxaW1paWlxbW1xd
-WVxcXlxgXF5gX1tYWltbXV9bW1laXV5eXF1eXV5hXltbXVtZWlteXVxeYF1gYWFe
-WltdWltcX2FgYGBjZGRhY2NhYWJiY2BeX2JjYV5eXl1hYWBhYGNiYF1bX1tbW1xd
-YGJhY2JlY2BgY2BhYV9fYGJfYmJiX19gXV9hYWFiYWBhYF5gX2JgYGBfXl5dXlxd
-YGVjYV9dX15fXl9gXV1fYWNkXl5dX2FjZmdlZGRhYWBiY2JiZGRjZGRjYGJjZGNk
-ZmhoamhnY2JkZ2pnaGdmZGVmZWNjZGZnaWppZ2VlY2VgYWWIssrV3eLn6+vt7u/v
-d3l5e3l4eXd5eHh5ent7end1ent5eHh2e3p4enh8dnl1eXh1d3t4dnVzeHh3eXl7
-dnZ0c3Z5dXZ2d3Z1dHV4d3V1d3Z3ent8d3Z3enp7ent6fH1+fn9+en19end6e3t9
-e3+AgoB+f4KBgH9+fXx7e3t5e3h5enp4eXt6enl6enl7ent5d3VvdXV2cnJydHJu
-bnJzc3VvcXV2dnN2d3R3eXV0eXp6enh3dnh4eXp7eXx+eXp4eXh+end3dHZ6enl1
-eXx7eHh2eHZ2fHp3dnZ3dXV1d3Z2eXh3eHl6eXhydnd7e3l2eHh4dXZ2d3Z2eHh1
-e3l2c3JxcHN2eHZ2d3Z0dHV4d3l2eHd2cnR0dnZzc3NzdHJxcG9ub3JwbG1sa2xv
-bWprbG5wbWxpZmNgX1pcWVhZWlleXFxZWFdaXl9gXV1aWFlcXlxaVFRUV1xdXlpa
-VVZRV11cWVpbWFldW1hWWFdZU1JWUU9MT09TV1RVWF5hYlxaW15kZmJdXF5maWtr
-a2VfX2BdX15jYmFobm1ydXZ4e317fXp9f32AgX14dnt3c2dgXmBiYl9eWl5eXGNs
-bXB2e393dnNtbXF1dW5pbnZ7fX+AfHx9eXh7end3d3d3dnZ3eX1/foOAe32Ah4eO
-k5GTi4qIh4qQk5OSk5eVlZmfpamspZ2XlJSUlJeXlZibnaGlqamkoqCemJSWmZeU
-lpWQi4SCg4aPi4eCe3lyamliZXB5goeJh4J7cmtlY2JjZWVjYmFjZGNkY2JiYmFj
-ZGZkY2dkYmZlZWZoZ2hmZmNkYl9hZmhnZmVjZGRnZmNkYWNkY2VlZGZlZWNhY2Vk
-ZmNjY2FlaGpnaGZoamllZ2tmZWRjZGdmY2NjX2JhY2JjZWVmZ2NiYmJhZWNkYWNm
-ZmJkYmNiYWBfYWVgYmViYl9fXV9hYWVhYGJiZGNgXV9iYWFkZ2djY15dYGRgXV1e
-YGJfXWBhXVxfX15eXV1dXFpbW1pbWl9gW1haW1xeXlxZW1xcXVlaXFxeXV1bWVhY
-VllcWltZXFlZW1lXVldZWFtaWVlaWllbXGBdXF1dXVteXF9gXVxbXl1dXFlYWFpc
-X11dX1pbXl5fXFhZWFxeX15cWllbXV1dXltcXFxeXV5gXFxdX1xbXmJgXV5eXl5a
-W1xcXV5fY15hYF9cXV1dW1taXF1eXVtbWl1cXVpYW1tcXF5eX1xdXV1bXVtaXF1b
-WVtdXl5bV1ZaXV5eXlpaWltcWFlbW1pYWFtfX2BeXl1dXl1cXFtbW1xaWltYWVpb
-W11eXF9eXGBhXF5cWllbW1peXl5eX15fYF9eWlpbX2FhYWFfYGJfYGFkYV1eXl9i
-YGJiYFpdX2FhY2VjY2FhYF9hYV9gYF1fZGFiZGRjZGFhY2FhYmFfX2RjZmVgYV9g
-X19gYF9fYF9jX19gX2BgYWBcXGFfYWJiYl9gY19gX15fYGBhXl1fYGBcXmBhX19h
-YWNjYWFgYGFhYmFgYWNjYmVlY2NkZWZlZWZmZmZlZGZnaWhpamhmZmZlZGVlZ2Vk
-YmJhYmJgYGJhYoC0ydPc4+Xo6+3u7vB1d318fHp1d3x6eXh8e3l4eXh3dXR0d3Z4
-dnh5dnt2cnV6eHd2eHl4eXd4eXp6enl1dnR4d3p3c3Nyc3R1d3R2dXN1eXd4d3d5
-eHp6enV3eXp7fX6DgX59e3h2ent8fHp8fHx+foB9e319fXl3eHt5eXl6fH18e3t3
-eXl4eHh1eHh3c3N1d3p1dHFzd3RzdHJzcXBxc3lzcnV3dHR0c3V1cnJzdXl5end2
-dHV5fXd3eXh7fHx4d3d4dnF1dXR1dnV2dnR1eHh6e3l4d3d4eHV2dnR2enZ1dnh1
-cnd3eXt1d3d5enp4dXd4dHV4dXZ1c3R4dnZ1dXNzdnt6eXh3d3Z5dHR4eXd4dnRz
-dHV1c3Nzc3J0dXVzcXBwcW5ub3Jwbm9tcGxqbW5uam1pZmFdXltZVlhYWltfXVhY
-WVhcXl5eXllYWl9iX11aU1ZaWFlYVVJSU1lhYGFdX1xcWVVVWVxfZWFZV1BRSU5O
-UFROTlJWWVpZV1phYmNgXVpdX2RqbXFrYVtdXV5fX15kaWxsbnJwcnR0dnl9ent7
-fHx8eHd6e25lY2FjZWFZUVFVV1xZXGNqcHV5f397eHR1dXhzcXJwbHR2e3x9e3d0
-eH16eXd4dHh8eHd3eXh7e3t7fYKDhoySlZGSk5aZmJSUlpOSkpOTk5adpKakop+e
-np2gn56bnJqbm6OrqaOin5eSjZGUl5eVk4+Mh4SCgIGEiYyIgXttYmBjbXmEjI2L
-iHxybGdhYWRjYV5fX2JjYF5iZWRkZmRjZWBjZGNmY2RmZGVnaGhlZWNjYWNfYmdm
-ZWZiY2VmZ2RjY2VnZmZjY2VkZGZoZ2RlZWRjY2VmZ2VjZWdnZmZlZGdlZmZjZmRk
-ZGJjZGRjY2NjZmhlYmJiZWZjYWFiYWNiYWBhYl9iZWNiY2NhYWBfX19jYV9hYWRk
-ZmdjZGRiXV5fY2VjY2JhYWFfYmBgXV9gX19gYWJfYWJgYF5eX11aXl5gX15gXl5e
-XVtaXFxbWVlWWFhZWVhbYV1dWlVWWFhYWFtbW1xbXFpcYF5bWVpaWVpdWVlYWV1c
-XV1eYF1dY2BiY2VfXFxcXVxYWl5eXVteXl1eWlpdW11eXFtcXFhYW1pcW1paW11g
-XVxZWVtdXmBfX2BeX19cX2FgXV1bXV1dW1xdXFxdXlxeWllZW1tZWlxbWlxfX1pb
-XV1dW11dXVpZWVxbXFtcXl1bWl1cWlteXV9gYGJdYV1aXFpdXFtbWVlYWFhZXV5Z
-WltcWVxdXl9gX1xaWlxbXFxfXFlZVldaW1xbXF5kYmBeXV1dXV5dXltbXF9gY2Jh
-X19dXF1eXl5iYmJgYmRgYmVjYGBjYWFkYV9eYWJfX2RkYl9hYV9dYF5cXmBhZWNh
-YmNkY2RhYWJkX2JiZGBfYmFjYWNhYmFgYmFhX2BeXl9hYF9dXl5fXl1fYF9iYFxg
-ZGFjYWJhYWFfYF9eXWFfXl9fX19fYmVkY2BfX2BhYWNjYmBhY2RkZmZkYWJjZWRl
-ZWJiZmVmaGlqaWhnY2VjZmloZWNkYmRlYWFfYWFhZmNoe6/J1N3i5ujr7Ozu7nR5
-eHl7end2d3p3dnl2dXd3eHd2dXd4d3Z6d3R5dnd0dHx6enl1dHZ4eXl4eXd4c3V0
-dnd3eXl4dXFycnJwcnRycnFzdHV3e3p8e3l5enp4eXx9fXx6e317eHZ7en18fXx/
-fHx6e3p5e3x6d3l4e3x6e3t9fX98enh5eXp6eHZ4eHd0dXBxb3ZycXJwcm9vbnFx
-cW9weXVyc3Nwb3Nzc3Z4dHV1dnZ4d3R1dXZ4eXd5eXl5d3N1dHN0dXR0dXh2dnVz
-c3N3dnp4dXV3d3Z3dXh7d3V0cnFxdHJyc3Z4d3d3d3p7enh2d3h5d3Z2dXN0dXd4
-dXZ1dHNzdnh2dnd3eHZwdHZ4dXZzdXV0dHV4d3dzcXF2cnFyb3BwcXFtbW1tbW9u
-bWloamlpaWhlYl9aV1dYW1xbVlhWVVVYWV5eXV1cXFtcXl1aW1xaWlpZV1NSU1Vb
-YV9dWVxhYWNaV1tfYGRnYlZSVE9KTE5PTk1NT1NYV1pcWlxdW1tgYmdobnV1cmde
-XF9gYmRhZGZnZ2ZramtqbHF2dXd2e315eHZ1dW5jXlxaWVVWVlJMTlBTVFhdYmVk
-aG58hIJ9eXt4eXZ1eHZ3dHZzcHNycXN1c35+eXh1d3x5gHt6ent7e35/gIOIiIyO
-j5SYnqehnpybnZqZmp+bnp6eo6aioaGgoqWhoJybmJuenJycoJ+im5OSkpGLjJCP
-jIyIh4R/fIGGjImGgHVtZWRqeIOKjIyKhntzbGZjYmVlYmBfY2RiYGBgXl9iY2Nk
-ZmVkZ2hmZGRjY2RlZWJkZWJjYGBlZ2RnZGRjYWFhY2NgYGBlYmZlZGJkaWpkY2Nj
-ZWloYmBhYWdpaWZlZWVkZmlpZWJhZWRkZWZnZmVnZGVkZGFgY2RlZGNiYV9gYWJi
-YGNkYmJgZGRiYmBgYl1eXmNgYWNiYGBhYmdmZ2RhXl1fYV1gX15gYF9fWl5gX19g
-XF5iY2RjYmNgXFtcW1xeX2BeX15bXFxgXVpaWFxdXV1bWFpYVllbXFpaWFpaWVlZ
-WlpXWFlZWFlbW1xcXVxcXFtXWF5cW15dXl5dW1tbWlteXVxfXV5eXV5dXmBhYVta
-XFxeWVpaW1paW1xaV1daXVxbXFtcXFxfXl1bXVxcX2BgXV9eXFtbX2FcXFtdX2Bf
-X2BfXF1eXF5bW1taXVtdXVtdXFpeXFlbYF5bXF5bWlpZXF9cWlxcXF5eXVxcW1tc
-Xl5fYF5cXmBcW1pYWFhYV1pZWV1cXV1bXVxbW1tcYV9dXF9bXFpbWl1eXFhZWVxb
-W15fXF9jYV1cX19dYGBfYF5fYF5gYF9eXV9dWlpeYF9hYWBhYWBgX2FiYGNjYGBh
-Xl9lYWRhYV9iYWFgYWJgX15hX2FlYmRiYF9gY2BhYmFgYGBhY2NeXl1fYWFhXl5f
-X19hYmFhYWJhXl5fYV5fYF1eX15eXmBiZGFjYWRkZGRfXmBiYGRlZWJjYGBfYF9g
-YmFfYWFkZmRjY2RkZGNlZGRlZWZmZ2RkZGZlaGVlZ2ZmZ2ViY2RkZ2hlZWVlYmFf
-YWJiZWNkZXCIscnW3eLn6Ors7O/ueHh7end3d3t4dnh5d3R2enl4dXJzdnl7dXd3
-dnZ2eXp2d3d3dnVwcnR3d3R1dnV1c3d5d3l1d3d1fHdwcHBzc3VwcXFycnN4eXl2
-cnR6enp5fXx3d3l8eHl5f3d3eHt6eXp8fHt5eXl7eHZ6fnt8fXp4eHl+fH16fHx5
-dnh3c3d0dXRzcnZwbXFycXNydHZzeHFyc3V1dHFydXJ1c3VydXR1dXN1c3Rxdnt4
-dnZ2dHRyc3Fwc3R0dHRyd3l5eXh0cnFydnZ3dXV0c3F0dHZ0dHV1cnBucHRzcnR2
-d3d2eXh1dnZ2c3Ryc3Z2dHR0dXVydHRxcXN2dHN2dXR0dHZ0dHNxcXF0dHV2dnF0
-c3N0dXNwb29ubGtrbXFubGxucG1qbG5tcG1rcGdmZGViX1lZWVtdXVhXVlRSUlda
-WVtbWltcW15fXVtaXFxYV1hXVFBVVltdXVlYX19cVltdY2RkX1pVTk9RS0xMTE1N
-TVFVWVlbWFlUVlVZXmRna254fG9mYV1eYmVpZ2RiXl5eXV9eYmVqbW5ubGxzeXVz
-b2tkYFpXVlVVVVJTVFZWWFZWXGBcXV5iZnN+gYF/e3h2dXd4entzcG5vbG1wb29y
-foF2eHp5d3Z7fHl9e318gYOEiIuKi42SlZidnaGnpaamo6Kmp6eho6KgoKSnpqan
-paChnp2dm52ZlpeZmZmWl5SSkIyMiYWGhIOGhoaDf3x8gn9/enZxZ3F3f4KKjo6M
-hnpzamRiY2JjY2NkZ2hnZWZiYWNlZmNma2ZjYmNmZWNiY19jZ2JjZWZoY2RkZWRl
-Y2JiYmRiZWNhYmJkZGVlZGVmZGRgYWNlY2RjYGNmZmNjZWVkZGRpaGhmZ2dnZ2Vn
-ZWhqZmdmaGRjYmBkZWRlZWNhX2NkYmVkY2JiYmFhYWJiYmBhYWJiYmNkY2FhYWFg
-XmFiY2BeXWNeXmJgYGFgX19bW15eX1tcXFxdYGJhX19fX2FgX11eXVpZWFpbXF5d
-XFpcYV5cX19eXFlbW2BeWV1fXVxbWlRaWVZXV1haW1taW1xbXFtXVlVZWl1eXV9f
-XV5cWVtjX15bXF1eX2BeYF9fYGFdXFpbXFxcVlpbXl5bWltcXF1cXV1dXVxYWlxh
-X15cX11dYWFfXF9fXlxfXl5bXl1gXmBiYF9aXFpdXFteXFpaXFpcXFxcW1teXltd
-Xl1eW1dYWVpbXFxdYGBgYF5aWl5fXltaXV1dXVtXXF1bXF1cWVZZWlhZWVpcWlpb
-W1tbXFpaXF9cX11aXF1fXV1eXl1cWVxeXV1fYF5cXVtbWlxeYGBhXl9dXV5gX2Bf
-YF9fXWJiYF5gYmBfYWJhYWJhYWFfYGNmYmJjZGZjYF9dXF5hX2BiYWFgX2JiY2Ji
-YmBfY2JlYmRgYGBfYmBeXWBhX15fY2FiX2FiZF9fX2FjYmFmZGJlX15fX2FhXWFi
-YmBgYmNiYWJhXmBjXmBiZmNiYWFfX2BiY2FgYWJhZGVhYWNjZGNiZmlpaGZkY2Nl
-YmNnZmZnaGhmaGpnZmVmZmdmZmVlZmZhZWJhYWNkdKe2ydbe4ubo6uvs7+96eXZ1
-c3VzdHNydHRzc3JwcXV3cnFxd3ZzcXV4dnR3ent4end2cnBzc3Rzd3d0c3NydHl3
-d3p5dHdyb3JxcnJ0dXV2dXBwb3N3eXl2dnp7fX13dnVzeHd5enV8dnV3d3h3d3d4
-eXp6ent7e4B8eHl5dXV2enV2d3l9e3l6eXd3dnJzcXNyd3dycXFxcW9wcXJ2dnV3
-dW5sa25xeXh2cW5udHZ0cnR1cnR1dnt0dHNxcnFzdHNxcnN0cHR0eXdzcHNzdHJ2
-d3V0dXh2d3VxdHVzb3BwcW91dnd0d3d0c3V3d3N2dXV2d3NvdHh4dnh2dHVycHN4
-dXV0dnFydHVycXNzdXFvcXh2dXNydHV0c3FxcnBvb25saWtucW5vcGxrbGlpZmdp
-am1naGhnZGFhX1tdXltaW1pYV1NRWFpbWFlaXWBiX11bXFtcWldSUlBQU1leWldT
-WGFdWFhVWl9fXFhWUUtLSUhJT1FTT05RUVZXWVVYVVRZXGBjaHB0dHNrZmJmaGlp
-aWdiXVlaWFdXWFdaXGBlZ2ZnbHJwaWZmYF5fW1haW1RUVFRZXV9gX2BfXWRiY2Rr
-c32AgYB9eXV4eHh3c3Bqamxub3BxcHR7gHd5enx/fn58fHx6e32DiI2RlI+Rk5WZ
-lJOWmp2lpqirq66rqKajoqKjo6apqquhnZyeoJ2gn5mYlZKUlpWRlJSYlI6GhIWA
-gIaGiImEf3p2d3h9enp0cnR6gIiOjoyJf3ZvZmRhYmJhZGVlY2NnbGRjY2VpZWNj
-ZWNhYWJjZ2RiYmBjYmBiZmdmZWJlY2NhYWJjZGRlZGhjYmNiZmpoYV9iZGVgYGJk
-ZWdnZGVjYWNiY2hnZGZmaGlkZmZkZGhoZ2dlZGJjZWdnZ2VlZGVkZGZhX2BgYmVh
-X2FkZGJgX2BhYGBjY2NjZGZiX2FhYVxfYF5jY2FdXVxdYGBfYmNfYGReXV5eXV1e
-YV5cW15cW2BfXl5dXl9gXV1dW1pWXF1dXF1eW1xdXF5eW1pYW15bXVxcWl1bW1xa
-WlpaV1laXVxbW1xdW1laWFlWWl5aXFxcW1tcX15bX1xdW1xfXV1eYGBdXFxhY15b
-XF5cW1xeYF9dW11cXVxfX11bWl1eXFpdX15dXF1dXFtdYGFbXVxcXFtbXl1cXFpZ
-WFhaWlxZWVtcXVpbXF1eXVlaWVpZWltcW1xcXFpaWFtcW1xeXFtbXF1cXFxdXVpd
-XFtcWVVTWltbXl1cWl1aWlpXV1hZWlpbWltcXFxdXl1cX1taXFxbXV1dXVxcWlte
-Xl5bXF1eX11dWlpcYF9gXF1eXl9gYGNiYGBiYWBgYF9eYGJhXl5eX15eX11hX2Jg
-YmNjY2JjXl5eX19hYWNhYmBeX19hYmJfYmNfY2JkYmFhYWFhYl1fX2FiYWBdY2Vi
-ZGNjYF1cWlpfYGJhYmJjY2FiYGBhYGBeXV9eYWJhZGFhYWFgYmViYGFgYV5fY2Jl
-ZGZhYGJfZGVjYWNlaGlmZmVmZmRgY2FjZGZlZGZkZGdnZmViZWdnZWRlZGZmaGVj
-ZWRlY2NsobrK1t7j5+jr6+zt7nd1cnJxc3VzdnRzcnJycXVxdHR0cXFxcnN0c3V1
-dXF2eHZ0dXRxcXR1cnJyc3RxcHFyd3ZycXN2dnZxb2ptcHBxc3NzcnRycXJxd3V1
-eHl9enx5eXl2d3d3d3Z1dXh3c3R2dXR5enl7eHh9e3p4end3d3d3enR2dXZ7enp3
-dXdxcXNzcHJ0d3Rxb3Nwa2xubW5udHRwbXByb3FzdHBvbHBzbnBycXB0dnh3dHBx
-c3NzdXJyc3RzdHJxdHV1cnNydnl2dnNzcnJ0dHR2dXFycnNycXJ1dnh0cnd5dXN2
-cnF1dHR3d3ZydHVxdHVycXNzc25xc3R0c3dxcW9wcnFwcnF1dnR1dXd1cXF0c3Vz
-cXFwcXJubnN2bWpsa21saGlpaGxtbWlpa2ppaGVhX11ZWltcWlpZWFRRUlRTV1hb
-XV1gYV9fXlpXWVpZU09NTVFTU1VTV1dZWlVXV1xcWVZTTEZISUhFSU1QVFFQUVNZ
-WFZUUlVZWlpgZGVrcnRwaWNnb3FuaWhhX1tYWFpZU1NVVFhdZWhpa21xc25qZmFe
-XFxdXlxbV1hdWlpeZWhoaGdpa21qbHB0d3l9enx6fH19fHt5dW5ucXFxcHBycnFz
-eX+AgYCDg4GCf35/f4OJkJOTl5mZl5iYmJubnKGgoaOoqqijq6elpKKiqKuoop+k
-oqGgnZuenJaQkZiYlpaRkJSVjIeCgoB7foCBhISBfndzdXd7enh1d3p8hYqKjoqF
-fHZvaWZnZGNkYl9eX2x0ZGRhYWNkY2VlZmNiYGRjZWVhY2BfX2JmaWhmZmNkYWRl
-YWNkZmRoZWNfYGBiZWJkY15eYmNiZWJjZGNnZmZjYmJjZWlmZGVlZWRmZmZkZGZm
-ZGRkYWNlZmppaGVkZWNiYV9hYGRhY2BiY2NiYmVhX2JiYmNjYmRkYl1gYWNiX11d
-YmVlY2BdXF5eYGFfYWFiYGFcXF5eYF1cXV1eX19eXlxbW1xdXVdaXVxbWltcXlxd
-XFxcXFpaW1lYWVleXFxgYFtbW11dWllbX15bWllZW1pbWVlaV1haWFpaWVpaV11d
-X2BdXFtaW1pbXmBfXVpcXl1eXlxbXF1cXl9dWVxaXFxaW2BcXl5cXF1eX1tbXF5e
-YF9cXFpbXl9cXV5bWlxbXVxcXVxfXVpYWVhbWVpXWltYXl9fXFtYWFlZWVpXVVhZ
-WVpdX19jXFhZWFdaXV1WWFtaWlpeXFpaXVxfWVpcXFxbWmBfW1lYV1haWltbWllg
-X15gXl1bXF1cW1tdXVpZWVhYXV1cXV5fW1xcX11dXFxdXV1eXl5dWltbXWBhYmRk
-ZGJhX19gYV9fYGJfXVhPU19iYF5fXmJjYmFkZWFfYGBgX2BfYGBgZGJgYWFkYmFf
-X2JiYmNhX19fY2JgYGBfYV1iYWBfYWBgXlxhYWJeXl9gYF9hYGJiYV5dXl9fXl5f
-YWJeYGFhYWBjY2NjY2FgYWFfXlxgZGRkZWRiZGJgZWJiYmRlZWRjZGVkZGNkZmVl
-ZGNkZGNlZWVoa2hnZmdlZWRoaWlpaGViYWFgYWmcvczX3+Pm6evs7e3ueHV2dHFy
-c3V1dHJxcXNzcHFyb3V0dXZxdHRzdXN0dnJycnRybnFxcnRycHJvcnNtcXR0c3Ry
-cHN2d3Nxcm5tbnFzcnNydHNycnFwd3l1eHt7enh6eHh3dXZ0dHV6dXJ4dXd3d3h4
-dnd4enp7eHZ0d3R4dXV2d3d1dXd4dnZ0c3Fwb3N0cW9wcG5vcm9tbG9yb21ubm5u
-b3Z5dXBxdHB1dnJzdHZ2c3BwdHZ1cHR1dHZ4dHV0c3JwcXVxdXV1c3NzdXZ0b29y
-cXRycnB0cnRxcG9udXRwb3J0dHR3c3B1cm9vc3R0dHZxb3FubW9yb29wbnBzdnV0
-c3BxcHBzc3JucHB2dnl2c3JzdHRzc3NzcHRwbm9ub3Nxb21wbmtraWlqbG1taGho
-a2pnYmJfXl1dWVdYVlhWVFNVVldaXWJjY2BhYF9dW1pZV1FNS05QUE5PTlFRUVVY
-VlRWWlhRTUxIREREQUlKTk9PTFFXV1hYVlVVWFpdX2FmZ21vbHBzdHNvbmhlY1xZ
-V1dUUE9OT1BXZGZnZ2hqbG9vamdeW1tWWl5gX15gYmNcWl5mZmlsb3J3dnNydXdz
-dnN6fX+ChoB+d3VvcXBybWtsbG5oa2pxdnyBhIeKioqKiYmKiYqNj5GSlpiamZqd
-naGko6SlpaKjpqqzrKqqp6SioaKfnqGhpKGdnJaUlJWVkZGTlJGLi4yJhYOGg4R/
-eXt+gX19gXt2cnV1e3x6e3yDiIyQjoiEfXZpZGJmZ2RgZWVgXF5fXF5hYWJiYmFj
-ZmFkY2JhY2VkY2FgY2RlZmdmY2JiY2ZoY2ZkZGFiY2VkY2BfZWVmaGdmZWVjZGJh
-ZWdoZmVkZmZnZmZkYmBhYGNiY2JhY2ZjZGVlZ2loZmdkY2NlZ2NkY2ViY2JjYmRi
-X19fYGVmYWFiZGFfYmBhYF1eX2JjYF9gYF9iZGJgYF1gXmFfYGFhXl9iXV5gY2Bf
-X2FcWlpcXF5ZW1tZVVhYWVpaWlpeW1paWFhZW1xaWVZVVFxcW1hcWlxcXFtaWVhY
-WFlXW1ZWWlhXVldaWl9bWlZaWllfW15gYFxcXVpbXVlYWV5fXV1dW11cX11bWFta
-W1tcW1xYWlteXl5eXFtZWVlaXV5dXVxbWVtdXFxfX15cXV5eXV1bXWBiXl5eXVxa
-XFlZWltcV1tdYGBdXVxdXF1bWlxZWFhZWl5eYV5aXlxdW1hbW11aXV1bWl1bWVpa
-V1pgXFlXWllaW1haWlteXFtbXFtaXVxbW1tcXF1fXl1aWVtdX11dYF1aWVtcXl1a
-XFtaW1tcXVpaXFpaX2FgXVxcXF5eX2BgX15fXmBgXl9dX19aSENIW2FkY2FiYmRm
-ZGNjYWBfXl9gXl9gYV5fYGBfX2JfX2FgYF5fX15fYmJgY2BiYV9gYmBgX2JiXl9f
-YmBgYmRiYGBiYV9iY2JfXV5dXmFgY11fXV5eYWFiYGFjZWFeYGFgYF9dXmBjY2Jl
-ZGJiYGJiYWJhYmNlYmRlZWVkYmdnZWVkaWVlY2RmZWVpaGdmZ2VlZGVkaGhoZF9i
-ZmNjbaW8zdbf4+fp6+vs7e13e3hzdXJ0cnNub3N0cXFycm5tcHFzc3JycnN0eHNw
-cG1ucHBvb3FxcnZ3c3JucHFxc3FzdnNvcnFzd3J4dG9ycHBwb3JwdXRzc3J0eXZ2
-d3p3dnV1dnp2dXJzd3d5eHd3eXZ1d3h0dXJ1dXR8eHd1d3Vzc3V4d3Z0d3d3dHRy
-cnJvcXRycm9wbnBucG1ubHByb29xdXNwbnJycm9xcXNxdHV0cnJvcnNyc3J1c3N2
-c3N3d3JzdHN0cnBvbnB0cnR3d3Rxc3RycnFxb3NzdG5ubW1vc3Bycm9wcHFvbm1u
-cm9wdHd2dXFxb21ub3JycnFwdXd0c3R1dndycW1wcnBxcG9vcXFwc3dzcHFydHRw
-cXBvbXBrbHBvbWtoZmhrbWxtbG5samZpZmJgYWVhXlpZVldXWFdSU1VXXWFpbWpl
-Yl1aXFlZWVZTTU5RVVJRTk9TUlVYU1FSUFBOS0lDRkdGR0hFREdHTFBUVldYWFVT
-Vl1gYmBgZGlsa2twdHdwbGpmY1xYWl1VUE5MT09TV19kY2RjZmdubWZgXVRSVltd
-YmFgYGJjYV1gYGZoam52cnBzd3l6dHBuc3p+fn19enVzcG5sbWxmZWdraGlrbG1s
-cniAhIeMjI2KjpSVlI6LjZKRlJueoaGfoJ+ip6qqpKalp6isrK2uq6imo6GemJqe
-op2dm5WSkpSQkI6PiIaIhYKBg4eHh397e3l5enx6eHl3c3Z3eXt7foSLkJKMiYaB
-enJoZWVkZGJhZGFfYGBcX2FhYmFgYWJjZGNjYV9jZGVlY2BcY2VlZWZtZGZnZmZm
-Z2dkY2FiY2VkY2BiZmZpZWZmZ2ZjYWRkaGdpZ2VkY2RoZWVkZWJhY2NiZmVjZ2tm
-ZmdkaGZjZWRjZmRnZWVkYl5dYGNkZmFgYF9gYGFgYWRiYlxdXV5iY19dXV9hYV9e
-X2FkY2FeX2BeXV9gXWFgYF5hXl5hYmBgXVtbWVlbWlxcW1pbW1pbXFpaW1lZW1dX
-VltcXlxYWVxcWVxeWl1dW1paWVZXWFtaWlpXV1dYXFlaWVtdXV5bXVlbWltcX15d
-W15bWVZXWVlaXF1cW1taXFxcXVxfWllfX11eXl1bW1pfX11dXFxaWVpaXV1fWVtb
-WVtcW11bW1tZW1tZWVtdXl1eYV1cXl9dWVtaXV5eXl1dXF1cXVtcW1lbW1lXWFdY
-XFxdXVldWltcXVtcX11cXV1ZWVlaXF9eXVxeXl5aWlhXV1VYWltbWVtZWlxbWFhZ
-W1tfYF1eXmBdXVlbXV1gYF1WWFxaXF1cW1lbXVtbXmBfXV9gYl9dXV1eXV9dYV5e
-XmFhYV9hXlhbWkdARVJfYmRjY2VkYmRiYWFiZmFeX19gYF1dX2BeXVxfYF5dYF9g
-XmJhYWFiY2JhYmJiX2FdXlxdXV9fX15gX11fYmFhYGFiYF5fYV1bXl5gYmNiYV9f
-X15gYF5gYWBdXmFhYmBjY2JgYWBiYmNjYWRlZGFhZGJiY2RiZGZoaGZlZmRkaGVl
-ZWNmY2NmZ2ZnZ2ZlZWVkYWFpaWNlZWRiYWRrmLvM2N/j5+rq7O3t7XV2c3FzdHV1
-cnJtb3BwdHNzdHBwcHBxcnFxcG9xcXFycGxsb3J0dHJxcnNwcG9vbm1sbm9wb29u
-b3JwcXVyc3Bzc3Fxb2xxc3NydXV1dXd4d3V0dnZzdXRzc3h0cXV2eHh0dHR2dXNz
-dHJ1dXV2eXh1dnRzc3R4dnd2dHVzcnNycHJ1cnNwbnBybmxycXBwbmttbnF1eXRy
-bW1rbnFybm5zdXVwcnJvb29udHZ2dHRvbnN0c3FvcXF0dXFtbG5wdXFydnZ1dXd0
-dXJwc3JucXFxc3Byd3Rwb29tbW9ub25wb2xvcXRzc3JxbWtwcHRydHFycXNzdXd1
-d3N0cXBxcXNxcHBydHFvb25ub3Bwc3hzcW9tbW1ra25saGppbGtqa25qZmVpaGdj
-Y2JgXl9aVVBTWVhWU1BRVFheaWxtaWNgXlxZWVlXVE9QVVZYVlFRVVlYVlJOTklL
-S0lHQ0FBQ0NFRUZEQ0ZJTlNaWltXWV1fZWZmZ2NnaWhrbW1vZ2VlYl1hXFhgcVpM
-SktNVVtfYGVgYWNnbWpoZGFcXFtcYGFgXl5eZF9lZGhpaWpwc3BqaW1yd3Z3dHJ4
-e3p1c3Z2c3Bua2hmY2VoZWRkZ2VmaGZpbXR3foeLiYyQkpSWk5ORkZSVmZqfoaOk
-oqGipaqpqqmopqamqq6tramppaCcmZibnZyWlpKSkIqMiYaFhomHg4GCg4KDgHx2
-cnV6fXt3enRvcnV2d3Z8hIyUl5SSjYWAe3FmZGRjZWRlYWJfX2BiYWRhYWRiYmRl
-YF5iZmBjY2RkYmNhYmFhY2VkZGNjZ2dpZGRmZWRmY2JiYWBgZGdlZGZlaGViZ2ho
-Z2hjYmNkZGVlZ2dlY2JkYmVlZWViYmdjYWNiZmpjY2JlZ2JgY2ViYF5eYGFfX19g
-XV9gXWFiX19gXlpeXl9fXVpcXFxgYWBiZGBgXV5dX19fXVtfYF9fXl1eYF9fXF1d
-X19dX15cXVpaXV1cXV5gXl5cWVxZW1taW1tYWFpaWVdZW11dXGBcWlhXV1pYWFpa
-WVlZWl1cWVxaWFhcXltdW15fXl1bWlpeX15dXFlbXFxdW1xeXl5bWlpcXFtaXl5c
-X2NgXlxcXV5eX11bXl1dXFpaXV5dXV5bWVxeW1haW11aXVxcW1xbXV1bXFteWVxc
-WlxbWl9cW1lbXFxbWFtZWlpaWVpZVlhXWVtcWllZW1xaW1tdXVteYFxXW1pfYF1a
-WltfXl5bWVpdW1heW1tcXVlZW1tcXFpeXVxeXl5eYGBcW11dXGBgXF1cXFpbXVxZ
-XFpdXFxeYF5gXl9eXV5gYF9eXWBhYWJfYWFfXmFfYllKRkNOXWBkY2BiY2NjY19f
-YGRkZWNhZWFeXV5eX2FgYF5fYWBhYWFpbGNhYWBdXmFeYWJgYGBgXl5dYWFiY2Je
-YWBgX2JjXl5fXF5gYGBeYF5fX19hXl9hYGBgYF9fYGFeYGBhY2VkY2NiYl9fY2Nj
-Y2NjZGRjZGNjZWhnZWZnaWloaWhoaWdmZmVjZGVsaGdlY2RlY2NhYmVnamRmY2Ri
-YmyQu8zW3uPn6Ovt7O3ucHJwb21qbW9wcnJsb3Fvb2xucnVxcHByb2xsbmxvcnNx
-cmxzcXBzcnJwcG5wb25tbnBsbW1wcXR0c3ZzcXFxb29vcXN0cXBxc3V0c3N0dHNy
-cHF0dXN0dXd3eHR3dHRzdnN1dHN1dHNzcnFxc3V0d3VydXJxcHR3dXRycHFxcHBx
-c3JxcXBsamtra25wbHBwcXJxcHJybmxvc3Nubm5wb3FwbW9wcnJvbW5ydHNyc3Jw
-b3R0dXZ2c3FvcHFxbnF0dXZzcXN1dHRydnNtanB0cm9vcW5wcXJxbG1vcnR1cnJx
-bmxrbnRycXNzcnJzdHJxd3NwcHJzdXN0cG5tbm9zc3JxcXByc3BwcG5vbnJzcm9u
-bm9wbG1qbW9saWdoaGdoZW1saGdlY2NgYGFbWVRTWFhWV1dYVFZWWWBmaWtqaGVk
-YFxdWldVVFdaWFZWWl5eWVROSUlIRkhJS0hDQUE8P0RAQURFR0hLUldaXl9fY2Vk
-ZWJgY2VmaGxsZmFfW1hbXF1aVWBlVExMUFZbXF5hZGRiZGZoZ2doZ2RpbGZjYmRn
-Z19fYmVsbG1tcXNvbm1rbW9ucHd5fH19e3ZvbWxub2tlYV5iZWNgXF1cWVpcXmNn
-Z21yeICFioyOk5eXl5eXmJiZnJ+iqKitr6ynp6mrqqeqq6unqaqopailo6CZmJSR
-kI+Qk5aSi4J/fXx/g4SKiIWCg4OBfnt1c3l6eX15eHVydHRzdnh+ipWcmZKNiYJ9
-d29oZF9gYGFiY2VkY2VjYWNiYmVkX15eYWFgYWNjZmlnZGFgX2NjY2NmZWRkZGZn
-ZGVmZ2ZoYWVlY2NiY2RlZGFfYmVkZmNkZWVkY2ZmZWdmZmVjYmZmaGZkZGloZ2Fg
-YWFiZWZnZmZjY2BiY2hlYmBhYmBgZGFeXl5fYWRkYWFiY2RkZGBcXFxcYGBgZGFf
-XF1eXl9hYl9eYF1fX2JiX15dYWBfXFpbXF5hYWBfWlldYF5gXlxZWlteXWBeXVxa
-WlhWWVpXWllaXWBeWFhbVlhZWldWVVpcXGFdW1laW1taW1tfXV9dYWBcWlpZWllY
-XmNfW15cW11cW15hXl1aWVlbW1tcXF1bW1tdW1taW11aW19fXVteW1tdX19fXl1a
-W1xbW1taW11dX11dXFlYV1pZW1pZW1tcWlhaXF1cXFlZV1dZWFhaWllaWlxfW1tc
-WlpaV1haWlpZWVlaXVtdWltdXFlZW11bWVxaXF1eXl5bXGFeXF1aWVlbW11fXFxc
-WVxZWV1eY2FgYGNfXmBfW1pbXFtbXGFdW1pcXFxdWlldXFteXl1cYGNgYGJjYl9e
-YGJjYWFeTUtMWF5hZWJhX15gYWBgYGRkX2FlY2FhYGBeXF1eX2FgXmFiYWJiYl5e
-Y2RgW15dXlxeY2JfXWFjYmJfX2FkYWJhYmFgXl5fX2FjYF5gYF9iX11fXV1fXl1e
-X15dXWFiYV1dXmBfYmRhYWFiYmFjY2JjYmJkZ2dnY2BjZmhmYWNkZmdlZ2ZnaWNj
-ZmdnZ2dlZmRkZWZlY2JiY2RlaWZiYWRmc528zNbe4ubo6uzs7u1ubW5sb3Bub291
-c3FubHFyb3BwbnFua2xta2tsbmtucW9xbWtvbnByb2xucG5ubnBwc25rbm9ucXRw
-dHVzcG5ta21wb3Frbm9xcnVxcnJwb29xcXJzd3d3dXZ1eHp4eXJ0dnR0cXJzd3V0
-dHRxcXV3dHN0dnR0dHVxcXB0cHJydXNxcHFvbm5taGtucHRxbWttbm5va2pvbnF1
-cW9ubnJta25xcXJvcG5tbXJ1dHNxcHJxcnNydHV0cG5wcXNzcG9xdHJvcXFvcG9w
-b29xdHJzc3Bsa2xvcG9vbXBwcm5tcHBrbW9ucnNxcXF0cXBwcXJ1cm9xcW9ycW5t
-bmxtcXJzcXFycXBzb3Bxb2xwb3JvcHFta25ycnBtbW5tbGdmZ2lqamlmZGBiYF9d
-XFtZWFZUVlZYVlRTVldaXF9iaGxtb2xnZmdiYF5hYmNhZWtpYFZPSUdKSEtLSUlJ
-SEdFRkZDRURHSEhLTVJQVVlbW1tdYl9bXFxfXGBlZF9ZWVxaWVtZVllVVFJQUVZY
-WFpfYWBfX2NmZmNkY2ludHFpZGZpZmdlZGVnbnh+c3Btb3JzcXBwbnJ1dnt+f4CA
-enRraGlqaGFgX19gYl9cW1lWU1VXWF1hZGptcXd8goaMkpWWl5ycm52dnZ2hpKas
-rKqpp6Wmp6ivsKurrKmopKalo52alI6IhoeMkJCMi396eXl+foCCg4KDhYiHfnt7
-d3d6eXl6enlycHJzd32IkJiXlY2JhX54bmlmZGJhYGFiaGlnZGVjZGdjY2FjZmFh
-ZmZlZWVmZGVkZGJjYWNkZWZkZmNjZWRkY2VlZmZiZWhjZGVkZWViY2ViYmFkZ2Vj
-ZGhnZ2RlZWRkY2RnZmhmZW5oamhiY2NjYmVjYmRmZGViZmZiZWZjY2JeYGJjY2Rk
-YF9eXV5hYWBjYWJhX2BhXV9hXl1eXmJlYV9gYF1dW19fXV5gXl1fYWJeXl1dXVta
-X15gYV1cXFpeXl1eXVpcXl9fXl5cXVxXVlpZWlpbWVdZWlxdY1pbWlpZWFlWWlhb
-W1hZWlpYWVdaXFxfXlpaXF5cXV9dXFpdXl9cWVpcXF5fXFxcXl5cXVxdXV5eYF1d
-XF1cWl5bWlheX11bXF1cW1xcX15dXl1bWltbW1xbXl1dX1tbXFxcWVpeW19bW15h
-YVxgYl5dWlpZW15cWFpaWVpXWllcXFxcWlpYVldYWVhYWVlYWllZWVtZWlxcW15f
-XlxbWlteYl9dXFxaXFxeXFtbWlxfXl5cW1pYWV1fX2BfXV1aWltdXl1ZWFlbWl1c
-WltbWlxdXltdXl9gW1xeYGJfYmVjY19eX15fWE5IUVxeYGFiYmJjYV1eYF9gYGBf
-YGNkYWBhYWRhYmBfX2BgYWBfX15eYV9fXl9dYF5gYV5fX2BjYGBhYWBeX15eXV5f
-Yl5dXFxeYmNfX2BiYmBdXF1fXV9eX2BeXV5fX2FhYVxcXl9eYV9dYF9iYWNhYGBj
-YWFjYmNiY2VlZmdmZWdlY2JkZGdnZWNmamlnZ2dlZ2hlaGZnZ2NiY2NmZWRlZ2du
-p7jM197i5ujr7Ozt7nBwbmxvbW1sbm1vbm1sbW9vb3BsamxpaG1qaGpsbWxsbWxr
-a21xcG9xbm90b2pqbm5xb25ub29xcHBub3Fwb3Btbm5tbW1tbnBxdHVydXJwcnJx
-cXJ1cnRyc3V6enV2b3FycXFwcXNwdnV1dHRwb29ydHJ2c3ZxcHFvbW5zdHVzcW5t
-b3Fvbm9vbWxubm9sbmpqa2hqbGtsa2xsamptbmxrcG9wcW9tb3FwcHJvbW1vcHRx
-b3BzcnV0cW5vcG9ubGtucnBubm9sa25vb3Fza2tubG1ubm9vcHFub3RybnBvbnJv
-bW5zcHBxbnJzcHV1dHR1cm5sbG5wa2xtamptcG9ucG9tbG5wcXBxb25ubG9ycXBo
-aW1wcG1tbmxsa2dqamtmY2dkZmZiYWFfXlhVUFBQUlVXV1dVVlhZXWFpb3Bxbmpu
-bmtsa3Bxa3BwbGFWS0pIRUhOUFJQTEdHRUhJTklFSElOS05UU1NVWFlXV1lZW1hb
-XFtbXF1ZWl1aW11fXFRSUFNRT1BRWF5hYl5fXWNmZ2ReXWNucHlzbmpnY2ZnaWZo
-am9vcHBxcnFxcXJydHZ2dHd1eXt+fnp4dW5pZmVhYF5gX11cXFhaWVZWV1VUVldd
-YWZpbXB0eH6IjpWYm56hpKWnpKWlp6eoqaaqrK2traiur62sq6+pp6imo56akoyG
-hYF+hY6QiYSAfHV1e36AfoCBhYSGhIJ9e3t7eX16dnJxc3R6foGEi5CQjouHfnhy
-a2dnZmRjZGNkZmJkZWVlZWNjYmBgYmJgYmVkZGRmZmRhYmJiZWRkZmVkZGJiY2Zn
-ZWVjZmdjZWdnZmVkZWZlZGFhY2FhZWVnZmRlYmFjZGRmZ2RjZWZmZ2ZlZWZoZGNk
-ZGZlZGJjZWNlYmJkY19jYWFfYWFjZWBfYGFgYWFiYV9iY2NjaGJhX2FdXmBhX19g
-X19fXl1eXV1eYl5dX2BbW1tbW1xbW1tdXV1bWlhYWVteX19dXF1cW1pYW1pbXFpY
-WVpbXFxZVldYXFxaWFtgXVtcWFpaWFlfX1laVlZWXFxdXltVV1ZZW1pdYF9fX15d
-XF9bW1xZWlteXFlcYWBeXF1eXl1bW1tcXF9fX1xbXl5cXF5cXFxbW1xcXFxeXVxb
-W1xcXV5dWltbWVpcXFtbWVtdXl5cXV5gXl1dWlpaWVlbWltbXF5eXFdaWFpZV1Zb
-W1hXWVhcW1laWl1cWltdW1xZW1tbWl5dXFxbWlhbXl9aXF5cXF9fXFpaWV1dXl1b
-Xl9cWltcXl5eXFhZXV5eXVpaXFxZWltcXVxdXl5cW11hXl1bXl1fYl9eX2JhYF1c
-WFNOTVhgYl9fXmBiYmJhYWFeX19fYF5eXF5hYWNiX2BgXl5gYmBeXF5eX2NiXl5e
-X2BhYV9iY2JeXmJhYWBfX2BdXl9hX15eXl1dX2BhYl5iY2BfYWBiX19gXmBfXV5h
-Xl5kXl5fXltaXl5eX15eX15hY2FgYGFiYWBjYGFjY2RmZ2loZ2RkY2RkZGdnZmZn
-ZWlpZ2dlY2ZlZ2ZlY2NjYGFmaGZnZWqSt8vW3uPm6Ons7e7vb21rbm1tbG1ycG9w
-cm1qbnFxbXBvbm5vb25sa2xubWxubmxra2prbXBwbnBwbGxsb21vcW1rbG5vb25u
-b25sb25sbWxoamxvcm9xcHBycnR1cm9xc3RxcXBvcHR3c3Nybm9xc3FvcnZ3c3V1
-c29vcHFzcW9wc3NwcG5tb3Z0cHJxc3Btbm9xbnBua2praGtsbWpubmtrbG5ybW1v
-bG5tbm1rcW9scnBycnBwbm9xbm5wc3JybHBzcm9vbnBubHFvb21tcnJvb3Bsa21w
-cXFwb25qbG1ub25tbG5wcG5ucW1tcXJybW5ubW9zcG9tb3BydG9xcG9ucG1zcG9y
-a29tb25sbWxsbW1tb3BwbG9ubm9ubWxpaWpvcW9saWpsaWlpamlpZ2dmZmNhYWBd
-VlNSU1ZUV1ZYVVNTV1dcXmFna21tb21ubmxxcXJwb2peVUxNSkdLTk9MSUpMSkJH
-TUtKRUVJTEtNUFFWWFZWUlRWWVdUVFVUU1ZYVVdaWl9dX15bVFJTUFFWVlNXXmNe
-WVldYWJeW1xfZm1ucXFubmtqaW5ycXBwcXBxcnR2cnFydHd4d3x1dH59fX18enh3
-cmtqZ2FeXV5cWFhZVldZWFJTVFNWVlhbYGFmZ2pvcHuBhY6VnZ2jpamoq6qopaej
-oqqusbKusLCztLKysqupqqelp6OclZCHfXp5foWLh4aAeXVzeXuAg4OBfHx/gYB/
-fnp3eHd2c3JvcnZ4eXx/g46NjYh/eHNtZ2dlZWJjZGNkaGRjYmFiYWBgYGFkaGJi
-ZGZnZWZjZWVjY2RiY2NjZmJiY2RjZGRkYWFmaGFiZWtpaGlkZWVkY15gY2JhZmNj
-Z2JgXWpkY2RiZGZgY2NlZ2RoaGdmZWRkZmZkYmJmZ2VkZGFjZGJjZWVhYGBeXmJj
-YWFhXmFfYF1gYGFjZGFfYWpfYV9hY19gYGFgX19dX1xeXl1dXl9bWlpcX1tdXV5b
-XFtcWllZW11dWlhZWllYWFtbXVtcWl1YV1xeXlpYWVpcXlxbXlpbXVpbV1VaWlpb
-W1xYW15cW1lbWFhcW1tcXl5cWVlaW1xcW11bXFxaWFlaWFxeXlxbWltdW1xcXV5d
-Xl9dXWBfXV5gX15cXVtcWllbWltcXlxcXVxcWllZW11gXltbYGFdXFtcXF1gYV5d
-XFlaXVpdXFxeXFxcXVhaWFhbWVhaWVhYWlpYWlpdXl5dWltcW1laXGBfYF5dXF1e
-W1hWWFlcXFxaWF1fXV5dXV1dXF1cW11hYFtcXV1gXV5dXl5dXFxdXFxdXl9ZWl1i
-Yl1eX2BeXV9hXVxcXV5gYFtcYF5dV1NTV1lhYWBfY2JiYWBfX2FkYmFeX2BgXF5g
-YF5gX2BhYWBgYWNfX2FfXlteYmVhYmJgYmBeX2FhY15eXV5eXmBiXmBfYV9gY2Fe
-X2BhYGBgYl1dYF5eX2JeXl9eYl9gY2VhYV9hX11cXl5gYWBgXV1cXWBgYmRkZGBh
-Y2NlY2RiYWNlZGZlZmVnZ2poZWhoZmRmaGhoaGhlY2FhYWRmYmFcXWRmY2ZlaYK0
-ytbe4+Xp6uvr7u1vcXFvcHBwbW5wbHBva2psbW5qbW9scW9tbW9vbmxuaWhpa2tp
-a2xqa2pra21wcG5vbm1ucGxra2ttbXBubW1raWtvbm1tcXBubG5vcG1ub3FxcXJ1
-dHR1b3B0c3Nxc3NycnRzcW9wcnBtcHN0cnRwcXFzdHFwbm5rb3BvcHF0c3BvcG9u
-bmxtcWxnZmdoaWpoaWpta2trbW5xcHFtbWpubG1vcHdwbnBzcG5tcG5vbW5ucHJ1
-cHFycHBwcXBub25ucG9tbW9vbm9vcW5ucnJyb2xtbm1sa2xvcXJvcnBsbnBxdG5y
-bHBxcHFubW5ubG5wcW5zc3RvcnFxb3BvbWtsbm1vbW1tbmxtbmpqbm5sbWttbm9u
-bnF0cWppa2xqbGlnZ2xubGdmZGBeW1lXU1ZZVldWU1NRUlRYWVxgXF1gZWVoaGdk
-ZWlrbWxlWlNOSEdNT1JTVE5LS0hJR0hNS0hHSEVHTExOTk9TVlVXW1dUUlRUUlJT
-U1ZYV1pcXGJjW1pWVlhXVFRWXmBfXFxeXF1bWltcXmJnamdpaG1zbm1tcXN1d3d1
-cGttb29wcXN2c3h4e3x3dnd5eHd2dnRybWdkYl9bWVhaX15fWlhTUU9PTlFRVlZZ
-W11eX2RpbXV4gIiPmKKkp6yysa6rqaajo6Soqq+wsK+0t7Ssp6Sio6SnqKWel46H
-fnd1dnqDgYKCe3ZwdHd8gXx/f3t7fYJ+e3h2d3h0c25rb3Bye36JiYuIg4B7d29o
-ZmRkZGJhYGBiZGRfZGNhX2NiY2ZmY2NgYGNiYWZlZGJiY2NjZGBhZGVlYmBgYWNk
-YmVmZWVkZmVlZmhoaGRhYmFiZGNmZGRjZWRpaGZmZ2RmaGZlZWZjZGFgZWZnZGRj
-YGNkZWNkY2RlZGRlYWNmZWJeXWFkZWVjYWFhYWFhYWBgYF9jYVxfYmFgYV5fXWBg
-YGJhXmNhX19dX15eYF5eW1tdXV5eYGBdXl5cXV5fXF1aXVxaV1pXWFlaW1xbWldY
-WVtYXFtWWFlbXl1dXGBcXl1ZWFlbWVZaWlhYXFlYXVxfW1tbXFlbXFtcWFZXW1xb
-Xl5dW1xaWltcWVxbW1xcYV1cXl1fX15fXl1cXlxfX2BgXV1cWlpbXF9fYGBeXltZ
-W1lbXF1bWVlcXl1dX19dW1laXF5fX1xcWltaWlpbW1paXF1bW1paWFdaXlpbV1dY
-XFpaW1laWlxaWllZXFpeX11eXlteXFtbWllfXVtaWlxcXFxcW15dXV1bW1ZaXFta
-W1xdXWBhXl9gXl1bWVtZW1lbX19fXl1fX15gX15cXFleXVxfX15eY2FdWVlcYGBd
-X15iY2JhYWJgX19hYWFjYmNeX2JjYWNjYF1cX15eX15dXV5fYF5cXWNiY2JhYWFh
-YmRhX19hX2BfXV5hYGBkZWJgX2JeX2FiX1xdXmBgXlxgYF9fXFxcXlxdXF5fYWJg
-Xl1fYGJhYl9fYGBgXWFkYV9hY2RlZ2FiZGdmZGRfYmRlZWpnZ2RkZmpqaGpoaGhk
-ZmZnaGdlY2VkZGZkZGhkYGBjZWNigrTK1d3i5ejp6uzs7Gpsb2txcnFtbW1sam1v
-bmttbW9wb25sbGtsb29vamdsa2xraWpqbG1sbGlqa29samprcG9yb25tbW1sbGtr
-bHBvbHByenBwc3Fva2xucG5wcG9xb29ucXFubG9wbnRycHJycnFwbXBxcHRybnZ4
-cnVxcG9ycXFvb3BxcG9vbm5vcHFwbmxsamtqaWppamlpaWdpamloaW5rbG1ubGxt
-bWtubnF1c3FxbG5wbG1ucXFtbGtsbXBwbm5wcnZ0cXRzcHFxb25tb21ubGxtamxu
-cHBwbm9ta2lqbW1xdW5ucG5wcHBwa2xtbW5vbmxrbW9wcW1ramxub3Fvb3Nwbm1t
-bmxsa2pubm1ta2ttcXBtbW9sa2tqa21ra2xubWpqbGpraGdmaGtraGdkZGJeWVVX
-WlhWVFZUUVNSVVZXV1leYF1aXV9eWlhYXGFiXFVTTUxKS05TV1hWVE5LS0lMT01I
-R0VFSEhMUFBQUVRTUlNVV1VTT09QT1BTVFhcW11hYV5VVFdYVlZVVVxhXl9hX11f
-XVlaWl1bWFhcY2Zrc3Jzb2xtcHNzdXRuaGdobW94d3h4d3l7e3p2dnh2dHBsa25p
-ZmNiYV1aWFlbW1lXVFNQU1BSU1BSVlVVVldVW2FkZ2xwd4GJkZihq6yqrayrrKqs
-p6WlqK2rrq+ysqympaKgnp2hp6egmJeOh4B5c3N1eoCFgnpyb3B1eXt5cnFwdXd7
-gHt8eXRxbGttbG1zfH+FioeEf3t2cmlkYmNgYWFhYGFhY2dhYV9eYWRiYGRiYWBg
-ZWVoaGlqaGhkYGRkYmRlY2FgZGJiZGllZGRlZ2llZWJkZWRjYV9kZGFgYmZpZ19j
-aGhnZGdmZmZkZGNiYmJlZGZnZGZpZmJjZGRnZGRmZWZlYmNkZGJfX2BeXmNjYmJd
-XGJiY2BeXGFjZGljX2NhYF5gY2BeYF5gYWNiYl9fXl1cW1pdXl9gX1xZWVhZXl9c
-Xl5iYV9fW11bXlxbWllZW1xdVltaW1hXVlxdXGBZWVhaW1xbX1pcX2BeWVlaXlpe
-XVdcWlpcXV1fXFpYWltbWVpXW1xbXVteXl1bW1xaWVpbWlhXW15dXV5eXlxbW1xc
-XV1eXFteXV5fXVxbWltaW11dYWBcW1lbXl5eXVtdWl1cX15dW1tdXF1cYVlZWFld
-XV1aWlxaWFhaWVxdXFxcWlpaWltaWllXWVlaWV5bXFxYV1tZXF5cW1paW1tdXmJd
-WltfYl9cXl1cW1pZWVhbXFlZXV1eW1xaXFxcXV5fXV1bWFpeX19dXF1bXltcXV9f
-X15eZGFgXl5dXF1cXlxcXVpdYWBiYmBiXl1fYGFjYmJiYGFgYF9cYmFgYGJjZGVh
-X15dYV9eXl9cXF5fY2FiYGNjY2JiY2NmYmBdX11cXV5iYGJhYmJiYF5fY15dYV5e
-YF5dXl5eXWFhYV5dWlpcXV5eXmBeYGBgX2BhYWFhX19hY2FiXmBhYmFiZWZmZ2Ri
-YmNjYmJfYmVlZ2poaWdkZmpraGdlZ2hmZWRkZmVjaGZkY2JiZGRiYmRmZWp8tcrW
-3ePm6Orr6+3tbnFrbW5ra2ppaGtrbm1sbWxycXBvb21sa21xbGpqamxsb25saWtq
-a25ybmttbWxqam1ubWxubGpra2trbm1wcHJycm90dXNscG9vbW1rbG9zbnFzcm5s
-bm9vbm9ucHNzdXJvcHBwbm1wcHJ0cXJzcW9ycnJ1cnJxcHFxb2xtcG9vcG5tb2tr
-amxnaGhpaWhnaGloZ2ZoaGxpaWhoa25sbW5wbnNybm5tb3Jva21tcG9taGxrbW1q
-bHBvc3JzcnNzcXJvbG5ubGxtbm5xcG1ubm9sbGtpbG1ub25tbm5vbm9wcG1ramlp
-amxtbm9wbW5samdmZ2hrbm1rb3FxcW1ubW1ucWtrbGxsa21wb25wcm9tbGtpampo
-Z2ZvbGlnaWlobWpmaGZnZ2plX1tXVlVVVlhUVFJQUVRTUFFWVlpYXF9dW1xZWVxd
-W1hWU1BLSklJTVJXXFpYT0pIS0xPUU1HRUZMUE9QVFZYVFFOUFJVWFNNTE1NUVZa
-XmBkYmBZVldXWFhUVFVbXl5hZV9ZVldVU1RUWVRVWWBkbGlsbG9xb2xwdHR1cGxr
-aGVraW1zdXl8e317d3t6eXZybmtsaGhnZF5cW1lVWFlVVVRTVFVRUlNPUlBPT05O
-UU9SVldbXWRpcXmCiZCYn6emqK2trq+urqmmpqesqauur66ppKOdnp6fop+enJeT
-jIOAe3d1dXp9foF8endzd3RvbGxvdHmDgX58eHFsbm5rbXJ0eoaIi4mDe3Zxbmlk
-Y2FgYGBfXl5eYWNiYGRiYmJiYWJgYmVnaWVmZ2RlZ2diZmdlZGVmZmRkY19jY2Zl
-Y2NkY2NhYGBhYWBhYmRjZGRkZGJkZGZmZGZlZ2lmZGRjZGNkZWVjZ2dlZmZlaGVl
-Y2FfXmJjZGRhY2NhX2JhY2FgYmFgYV9hYWFgYWBfXVxhZ2FdXV5fYGBiZGNeYF9f
-Xl5aYGJgYGBeW1xaWVxeXV5bXFpbW1pbXVxdXltaV1dYWVlZWlpbXFlUXV1eXFla
-WFpaWlpbWllYWlxXXV1bXFxcXVxZWVZYWlpaWldcW1xbXFtbXF5dX15eXFtdXFxb
-W1xbXFpYWFtaXV5fYF9gYGBdXV1dWVpcW1tbW1xfXlxcXlxcW1pbXFxcW1xdXVxe
-Xl9eX19eWlxcXV9cW1pbXFlaV1laW15eXF1aXF1eXFhaW1xdW1xeW1tdXFpYWFhY
-W1pcXFpcWltaW11fXl1cWlpaXFxbXWBfWlhZW1tcXFpdXFtbX1tbW1xdXFpdYF5e
-W1xdWlxeXmJcXFxaXFxbXFxcW1teXFtcXF1dXGBgXl5cWllYWVxdXl5gYWJiYF1i
-X11dYWJhX2FhX2FhYWJhY2FeX19hYmBeXF1gYV9eX19eYV5fY2JfX2FjZGFhYGBh
-XmBeYV9eX15gXl5eX2BgYF9jYWFfYGBgX1xaW15fX2FfXl5gYF5eX15eXmFfX2Bg
-YWFjYl9gZGFgY2BgYWFhYWNjZGZmZWRkY2JjZGNgYmNkZmhqamdkZWhkZWNhZmdm
-ZmNnZWZmZGRlZGFgY2RgYGBkbny0zNbe4ubo6evr7e1tbm5saWlsbWloa29saG5t
-bW5pbGtucWtpbG5rbW1tbmprcHNqam9ubGxtbGxucG9sa2xramtqam1vb29ubWxv
-cHJxbm5zcW9yc25sb3F2bGxub3FwbnJycnFxbG5wb3JybnBvcXRwcHJzdHRxcXFw
-bG5zcnN1cnJzcm5ycHBwbnJvcG1raGppaWlqaGppaWlqaGZobWpnZmZlZ2lqbm1t
-bGpsbGdrbG1rcnBxb2trcXJvbGtsbm1vbGtwc3NzdHFucXFub3BwbnNyc3Rva21t
-bW1ubGxtbW9vb21vbm1tbG9vb21ubGtrbG5wbmtqaWhqa21oaG1wcHBrampubW5s
-amppbWxrbWxqa2tsb25ta2ppa2tpZ2VoamZqZ2ZnZmllZGdpampsaGRcWlhXV1hV
-V1RTUlBSUU9RUlZZVFZVVldYXmJiWlhXU1NRTk5LSVBRVllbWVNQTU5NU1JRUE5K
-TVJUVVRSUFBPUFlXVFNPUVFNTU5UV1xkZ2NgXVtWVVZWVlNWXF9fYWNgWFZTU1NT
-Uk9VUmBkaGpscnFzdXFxb3BycnRzcW9ubGVmZmlsdHV3e31+e3x7eHZxbGppZ2hj
-Yl5fYFpVVFJRUlJRUlBOTU9SUE5RUFBLTU1RVllcW15mbHd9gomPmaCrsK6tra2u
-rKmrq6ekqqeoqKWloZ6bmJibmZ2en56Yj4qEf355d3V1d3p9fHx6dHV2c3J0dXl/
-fnp5dG9ta29vcXJ4fYKGiYV9dXBrZmNjZGNhYWJiYGFjYGJlZGJlY2NiYmNkY2Zo
-ZWNmZWVkZmtnZmZmYmFjYGRmYmVnYWFiZ2diYV9hZWRkZGNmZWFnamJjYmVnZmdo
-ZmVjZGZjZGRjZWdmYmRnZmVlaGZmZGRiY2NiYmJjYWJlYmFiY2BhX2BfX11dYV5e
-X2JjYFxeYF9nXlpcYGFhX19gYWFgXl9hXl1dYGNjZGFeXFxbXFxdXV5bWFZaW11e
-XFtYXFtbWVxdXVdaWFpYWVlbXl1bWldUVlhYWFtdXl5YWllbWllbW1ldXVpcVlpf
-XVxZWllYWFpbW1pZXFtbW11bWltdXl9dXF5dW1pWWFlbXF5eXlxfW1xcX15cXFxc
-XFxZW11eXl1dW1tbWlhZV1ldX11cXF1fYV9gXl9dXVxdXl9aWlxbWVhaWlpaXFpZ
-W1tcXl1cXFtaWltcWVxbWVlZV1lZWVZZW1xbXVtdW1taW1xcW1lbWFhbW1lZW1xa
-XVxcW1tbW1xeXVxdXV5bXVxcWl5fXl5gXV1fXl5eYGFiX1tZXFxaXFxaV1pcXFxc
-WlpcX19dW11dXV1eYF9dX19dYWFjZGFfX2BjYWFfYF5dYGFgX15fYGFiYF9gX2Fe
-XmFhX2BgX2JgXWFkYl9dXl5fX2BfXl5fX2FfXl1dXV1eX11eXl5gXmBjYGFhYWBd
-X2BeXl1fXl9eYWRkZWFgYGJgZGVhYV5eYF9dYGFeXV1eX2BfZGViZGFhY2RiZWNh
-YWRjY2NmZ2VpZ2loZ2dlZGhmZmZjY2RkZmVlZmZjY2RiY2NgX2VjYWRwiLbM1d3j
-5ujp6+zs7WxqamttcHBwbG5wbGtra25sbGhqb2tobWlpaGtsbWxtaGhrbWxrbmpq
-bG1tbW5ubGxtbWtoaWttbm1sa2hqbm1vcG9ucnFwbm9ubWtwcm5tbm9xcnBvcHFz
-cXRvcHFycHBycXFwcHR1dndxcnJzdHNyd3Z0cXFydHJxc3NxcW9wcHBtampra2tq
-amlpamdnampsbGxoa2toaWlobGxscHRyb2xpa2xubmxuc3RvcHJwb21ra2xsbG5v
-bWtucW9ydHFwbG1vb29tb29xbW9vb3BvcG9tb3Byb25ubWpqbHBucG5ycm5ra2xr
-a2twcGtqaWtvbGpqa21sbG9tcG9ram1vbnJyb21rZ2dpbWtrbWxsaWtsa2pua2lq
-a2poamhnZWlsZmhlbGhiYF5dXVpYX1xZXFZST1BPUFRTVVZVUlNWW2BiXl9aVU9O
-T0xHS0hMT09TV1hWVE9PVlRUUlBNS0tNUVVVTU5NTk9PUVdVVlVSTUxLUVRaXGFh
-YWBYVlZXWFNUWmBkY2RiX1pVVFNUU1NYXF9hYWFhZ2tra21wbGtqaGxxb3BubG5v
-ZmJnaGhrcnd/f3t5dXN4enFwbm51b2ljYWBjYl1aVVNTUFNST1BRT1BSUU9OTlJR
-T1BRVlpbW19pbXR+gYmRl56lqa+wq6mmqquppaOhoqKkqKakoZ6dm5mXmp2dmZiW
-koyFhH94dHFvcnV7fnt7e3t+e3VzdXl9e3x5eHh2cXJxcnN1e4OHiIJ8d2tmXV9k
-Y2VkZWFgX2BiZGRiY2JkY2JiY2ZlZWZnZGRoZ2ZnZ2NiZGNhYGFnZmdpaGdkYmJj
-YmZkZWdkZ2loZmdmZmdoaGhkZGVlaGJgY2VkZ2ZkZ2ZnY2ZkZmRjZGRjY2VkZWdk
-Y2NlZ2RkYWRjYV9jZ2RgYF9cXF1gYGNhYmBfXl5hYmFbWFpbYV9eXV9gYWFhYWNg
-Y19dXVxcXFxfYF1cXFpbXFpVWFlbXFtbXFtbV1pbWlxcXVlZWVpYW1tYWFpaWFlY
-WFxbWVhYWllYWlhZV1laWltaXFtjXFxcXV1aWlpWVFhcWlpdWlpcXVlcW1pbXVtc
-W1tdXltaV1ldXV1dXFpbW1xbW1pZWFhdXFpZWltcW1lZWVlZWFhbWltbX19aW11c
-W1tdXFxcW1tdXV1bW1hXWlxbWFxaWl1fXlxbXF9eXFlcWlhZWVtbW11bW11bW1pa
-XF1bWlpbWVhcXVtcWlhcWlxbWFlYV1pXWFpaXV1eXF5eXV9fXl5cW15dWlxdXl5c
-W1xcW1xcXF9bXVxbXF1fXlpaWFZYW1xdX2BeXFVZX19eX15gXl5fX2FgYmBgYmNk
-YF9hYGFiX2BgYGBgXlxdXl1gXl5hYWFdXV1gXmBhYF9gX19eX2NgYF5gXmFhXFtd
-XmBgXl5dW1hdXl1dXl9gXmBhYF1gXl9gYV9hYV9eXmBjYF5gYV9gXmBfYGBeYGBg
-Xl9cXl9hX2BhX2NkY2NhYWBjYmFlZGdiY2ZhYmJlZmdmZ2VkZmZmZ2lqaWRlaGRk
-ZWRlY2dkZGJjYmVjZGZhYXaKuMvW3uLl6Onr7OzuaGtrbm9ubmtqaGlobGpqaG1s
-ZmltcG5nbG5rbW5ubGloaGhubm5rbmxsbm9wcG5sbWxub29ubW1ucW5oa2tta2ls
-a21ub21tbW9xdG5ubG5tbnBwcnFucGxyc3JycW9vc3ZxcnN0cXB0c3Rzc3JxcnNx
-cG9wbXBycm9zb2xtbnBubGpubWtra2pnZWhoaGpscGtqa2xra2tub3Bua21sb29r
-b3FvbXBvbmxwbm9ycG1qbW1tbWtrbmtvcG1yc3RycXBwbW1wcXBsamxua25ucG9t
-bmxsbWxua21qa2xtbG5vc29wb21tbGxqbWtscW5sa2xvb21sbWxqa2xub21tcG5u
-bmxtbGpoaGpsa2ppam5ta2twbGtqbG1tbWxtaWZmZ2hmZmpnZmNgXFtbWFhbYFpa
-XFZST09QUVlZVVJSVF1jZGJbWFZTU1BNR0lJSk1QVllXVlRTVVNUWWJaUkxMTVBR
-UlBLTE5OUFFUVFJVVFJRT1JXWltdW1pdXVtaX1tXVVZcZGZhXV1dWFdWW1tcXmdo
-aWdjX2FlY2FlaHNxa2VkaWptbm9sam1rZmlrZ2hydHV9hH96dXNzeXl0dXJtaGJi
-XV5gXlpcVlNRUlBRUVFRUlNRUVBSU1FNTE5SVlxbW11iaXF1fISLkpqkqbCwr66t
-rq6tq6OjpaenqKSko5uXnJmWlpCQkpSUkZCLi4N7eHRxcnV2d3V2dnd4dnR0c3Z9
-fH99fXp7ent2cHB5gIaHhIF6c2traGdnYmFgX2BfY2FjYWFjXmJlZWZmZWNhZWdo
-aGtnZWNkYmJkZGZmZ2RkZGRjZWRkZWNhZWVkZGRnZ2hnZWdpamRnY2VjY2VkaGRi
-ZWVmZWZkZGVlZGdnY2ZnZGZiY2FiYmRjZGdjZWRlZGJpYmBiZWRgYGBfXmJiXl9b
-Wl1hX2NgXVxdX11eYF9fXl9eX2BfYmJiY2FeXVxdXF5fX2FdXFpcWV1aWVlaXFxe
-XltZWVpaWlpcWltcWltaW1tcWFpdW1pWVlhYWVZXWFlbWVteWVdXWFhcWl9cW1xe
-XFxcWVtYV1pZWllbW15dW1tbXFpaXFteZV1bW1lhXVxbXFtcXF5eXFxaWVlaWlxZ
-WVpZWFpaXFxbX11dWlpYWVtcXltbWltdXVpbX15dXlxdWlpbXVtbXV1bW1tcWVpa
-W11dXFhZWllaWVlcWl5dXVxcWltbXV1eXFpYWVxdXVxcWVlZW11cW1haWFhZWlxb
-XVpaWl9eXVtbXV1cW1xcXVpZWl1bXV5dXVtbXGBgX15cW1xbW19eYV9cW1tcXWFh
-Xl1ZW15fX2BjX2JfYWFiYWFhZGRiYmFhYmJiYGZhYmBfX2FhYFxgYGFhXltbYV1c
-XmBfXmFgYmFfX2FhY19fYGBfXV1dYl1bXWBgX11bXl9fXl9fX15dYGBfYF1dYV5d
-YF9dXV5cXGBjX2BfX11fW1xfXGJgYV9gX19fX2VgX2JkYWRnY2BgX2FjZGNkZGhl
-Y2ZkZGNmZWRlZmZmaGhpZmdoaWhoZWVjZGRhZmVkZGFgYWNiYmJiboG1ytbd5Obo
-6uzr7u9rampra2xoY2toaWptamtnamtzbGVrcW9qa2dpa2toampqaWttbWtsbGxu
-b3BtbW1vcG1tbXBwcG9ubWxsa2ppamtrb25ubGxtcnFxb29xcnBwb2xycG5xcXNx
-c3FvcHFvcHN3dXJybnJ1c3NycXV0cnNyb2tvb3BwdnJxbWxtb25ubm5sa2tra2ho
-amdoZWdpaGhmZmdqam1tcG5vcXBtcW9vb3Byb25sa2ttbGxubWtqa2tpbXBwbWxu
-b3Fwcm5vbW1ra25vbGttbXFxcW5tb3BsbG9xcW5tbmhscXBvb29ubG5ubmtrbGtr
-bW5ub25ubGprb29qamttb3JycXJtbW5tbmxqaWtsb25paGlrbG1rbm5sa21sb2xt
-bW1va2xrZ2hnZmNmY2RfWlheXFhVVFZWVVFPTk1PVFZVVFZcYGBgYFxZVVVTUU9P
-TU5MU1hdWVRRU1NWWFheYmBVUEpPUFJQT0pMTk9PU1RPUlNVVVVZXFxeYFxaW1tc
-YGdkXFVWWlpgZF9cW1laWlpgX2ZqamdmY2BgXl9kZWdwbnBva2tsbW5vcGxmZGlu
-bGtqam1ycXiAg4F+d3NzcXFva2tnZGFdXFxiYlxbW1RRUE1QTU5PUlBVV1FSVFNO
-Tk9QVllXWVpcYGhvdH6Hkpedpayxrqyqqq6pqKalpqipp6SjnZiWlpaSj4+JjpCS
-k5SVkYeDgX18d3d1dXZ2dnZ6dHFvbnd9goGBf3t9fnt8dnV8hYyNiH11b2tpY2Jh
-YGBgYGBfXlxfX19kY2dnZWVjX2JjY2Rqbm9pZmJhZGNkZ2ZlYWJkZ2llaGVjY2Rh
-YmJiX11iY2ZmZGVlZWRlZmNkZWNjamtnZ2NlZmJgY2ZjY2JoZ2doZmRlZmZlYmFk
-ZGVkaGplYV9jZGJiZGBdYmBhXl9fXl1fYGBhYWNdX2FfXVxdYV9fYF1fX19gXmBh
-XV1eXV5bWV9bXl1gXl1aWlhXWVtdXFxdW1lYWVlaWlpbXVxbWVlYWFxbWFpaV1lY
-V1daXl9eW1lbW11dWVlYWVpdXltcW1pbWlhZXFxcXF1gXVxfXl5dWlpbWltcW11c
-XFtcWltgXV1cW1xbW11dXlxZXFxcXFxcW1tdXFtcX11eYWFiXV1cW1tbWlxcW15c
-WVtdW1xcW11dWlpcXl1bW1lZWlpcW11bWlpaW1lXWlxaWFlVVVpeXFxbXVxbWlpY
-VllcXF9dXF5dW1teXFtcWllaWlpcXl1eV1lbX19eW11dXl5bWlxaXFxbWllZX15d
-XVxeXV1eX1xbWllaXF9fYGBgX2BeXFhbXWBeXV9gYGJhX19fYGFfYGRjYGFjX19i
-YWJiYGBjY2JiYWJeXV5cX19fX2FbXF1cXl9fX2BdX2FhYV9hYWBhYF1cW19iX2Fg
-YGBdXV9hX2FeXV1dXV5hYWNiYmBgYl9eXlxdXFtaW1tfYV9eX2FjY2NhXlteXmJh
-Y2BgXmJiXmBgY2JjZWJhYmFiXmJmZWlrZmRkZWZqZ2VlZGdna2lpZ2RoaGpmZGNk
-ZWVlZGJiYmFhX2BhYGNrhrXL1d7i5+nq6+vs7WxsbGdsbG1saWxvcm5ra2lmZ2xp
-bWhub21pb2tpZmhsbWdqbXZyb29wbm1tbWttbW1rbG5vbm5vb29wb29vbnBvbW9s
-bm5ua21wbm9vbnNycXFwbm5xdHR2dHJzcnF1cm5ucnN0dHJxcXBtcXFzdXFycnBv
-c3Jubm1wcG1vcHBwcW1tam5ubW5tbWxsbWlnamloZ2dla2xrbW1tbm5vcHBvcW9u
-b3NxcW9tb2xqa2trbW1qbGxua2tub25ubm5ubHBxbnJsbG1ubGtucHRwcW9sbW5t
-cW1vbm5vcG9xcXBxdHJwbWxub2tpaWpub25vbG1ub21ramtscHJ0cG9sbG1tbnFu
-bmpoa21qaWtra29rbWtsbGxoaGptaWxubG5sa2tsaWlsaGZmYVtbWFhXVlJTVVFO
-UE5PVVRVVlVaV1lbW1pbWVdYVFFPTUtOTE9UV1dVVFNYVltiYWNkX1dRUExOV1NP
-UE5PU1NRU1JUVVNUW1xbWlxZV1RVWGRnYlxVVVdbY2diXl1ZWllaX2VpaGlqaGNi
-YF9eY2xua2lpam50b2lnanRycm5paWpubWlmZ210dHp8fHx5eHZva2ZkZGpqZF9e
-X19fYF9gWlZPTlBMTFBTVE9SUlRTVVBNTExQV1xWVFpaXGBmbXuFjJaWnaCjqKWo
-q6mppaWlqaqpqaqloZqal5KPjIqKh4mLkJOTkIWBgXt4eHp5d3h9fHt6dHBwdH2C
-gYeFgH17fHp3dXqBiZKGf3twamZlYWBeX2BfXV9fYGNkZGZmZmFgYl5nZWRlZWZp
-aGdjYmZlZmVoY19hZWdlZmRnaGVjYmVkY2BeYWJjZGVkZWRiY2NlaWZjZmVkZmlp
-bGhnZ2NkZGRkZGdmZWRlaGhnZGNpa2ZlY2RkZGJfYmNfZWBmY2VjX2FkYF9hXl9i
-YWJiYF9hY2JgX19fYF1gYWFfX1xbXmBcWV1cXFpXXFxeYWFgXl5eXVxcX1xdXVxZ
-WlhZV1dcXVpZWFlZWltcWVtaWlVYV1ZbXlxYXFlYVllaXF1dX1xZW11iW1lbX15Z
-V1pbXVpbXV5cXVxfYF1cXV9fXltcW11dXl5eXFtcXF5bWl1fXl1fXlxeXFxaXl1Z
-W1xaWlpbXF5gYWFgX11dXVpaWVlZWVtbXFlbW1lbXF5cWlteXVtbXFxYWVxaWVtb
-WlxcWlpaWVlaV15cXFtbW1tdXltaWltbXV9fW1taWlhYW1xcXFhbW1lYWlxdXFdZ
-XVpbXVtYW1tdXFpcXl5fX1xcYF1aWlhcX11dYGJgYFxcXV1eW1xfX11eXV1eXF5f
-XlxdX2NhYGFfXl1eXF9iYmNiYmJdXV1fYWJiYGBgYmJlY11dXFxcX19dXVtiYWVi
-YF5dXF5jYmFiY2JgYGBfXlxdXV9hYmNgYF5gXlxaWldYWVtbXF5hYF5hYGFhYGFh
-X11aWVxZXF1gYGFeX2NiYWBgYF5jYGFhYVxdYGFfXl5hYmBhYmNlYWFgZGdnZWVl
-aGVlZmdpZ2VlZ2hoaWlqaGdkZ2ZkY2JmZ2VjY2ZmYWBfYGFhYGmKuMzV3ePm5unr
-7OztaWxtbmprbWtrbXNzbWxtbGtrbG9uaWttcGxsamhqbGtra2tsa29sbnJubGxu
-bGxsb3Bwb3Vva2lqbG9xbW1vc3Jwb3N3c21rbXBwcG1tbm90cW5xcnBvbXByc3Ny
-c3Fwcm9udHRzcXd1cGxuc3BycXFwcHBzcW5uc3JycG5tbWtrbnF2cXRwcW9sbGlr
-a2tpamlnam5tbWttbHBwbXBwcG9ycXBzdXVycW9wbm5vbWhpaW1tbW5xcXRxcW9w
-bGlsb25vcWxubW5sa2xvcHJub25taWppbW5wb21vcnFvb3JubW5raGtucG5ta2xt
-bW1ua2trbW5ubW5ubm1ubnJubG1vbm1sbW5wbnBubXBta25samppaWppaWlramtq
-aWpra29ma2pmZmRdWFZVVlpVV1ZTUExNTlNVVlNVUlBTVlRWW1pZVVNRUVBPTUtT
-UFZXU1FRVFtdXmNoZ2RkW1JPUlZVU1FPU1JXVFRVUVdWU1ZYWVlYU1VTTlVeZmNb
-V1FRVV1kZmBiY1tYW2JoZmNlaWdoY2NiZGhucXFsa2hpbHRzbGpwdHNydW9ramRn
-Z2RpaWtwdHp/fXl5eHJuaWZmaWtoYlpZW15cW1pcW1lUT0pLSUtSUlFUUlNSU1NN
-TVJUW15XU1ZXXGJncHt/gIWJjpSZn6Soqqenqqqsp6mpp6mpp6aelZGSj4yIhYSI
-iIyNioiAfnt7e355gX6FhYB8dnZ4enx+goKAe3h3dXd1dn2Fi4mEe3ZxbGNgXl5h
-YV9gX2BjYWFiZWRhYmBhaGVkYmRmZ2dlZ2ZlaGlnaWRjZGdpZmRjZWVoaGRjZWlp
-aGRlY2JgZWJmaGZkYV9jZWVmaWVkYWJiZWNmZ2ZnaGZiZGNhZGNlZGRmZmZlY2Nl
-ZWRlYmFkaGRiZGJiZGdiW2BfXV1hYV5hYV5gYV9gX2FdXl5gYGBfYF5cXVxdXV5f
-XV1dW1teXV9gXV5eX2JgYF1dXlxcWllXV1pZWVpbXFtbWmBdXFlbXltcXVxYWFha
-WVlWV11bWFlaWlpZWlpZWVpZXlteXVpdXlxaWV1cXVtaXV1eXl5cXF1eX15eY1xa
-WltaWVhZXFpaW1tcXmFfX19gX1xcX15bWltbW11eXV1bX19eXV1dYVxcXVtZXFpd
-XlxZWltYWF9dWlxbXVpbXF5cWlpcXVtbWVpaW1xaW1VVWFtZWFpbWVpbXVpXV1la
-WVtbXFxcX1xbWltdW1laWltbWVpZWFpdWFxbW1tdXVtZXF1cXmBfXFxbW1paV1ha
-XF9dYF9cXVpYV1lbWVlbWlpaXmJiYl9dXF9eYWNgXmFgXV1fXV9fYWFhYGJeXlxe
-XmFjX11dYGNkZmFcXl5gX1xeX2NjYmJiYl1eZF9gXl9eXVxeXV5fXl1eXFtcXmFg
-YWFeXVxbW1lbWlteYGFeX15hYmJhYFxeXmBhX11cX15eYV5cYV9fXl5bXWBiYWFf
-Y2FfYGJiY2BkZGJjYWJkY2RgYmNjZWZmZmNkZWZqZ2dmZmdnZmZlZmVlZmVnYmJo
-ZWNjZGNiYWBgYV9hZ5q1y9Xe4ubn6evr7O1ubm5ubWtqbm9sbWttbGxubGdoamls
-a2tva2prbG1ra2pubGxrbnBta2hpbGprbnFycm9wcG5ubWtwb2xrcHBtb25ydXVy
-b21xb29tbG5wcXZ1cnBzcW5sb3FzdHVyb25wb25zc3V0dXNwcG1ycnBwc3N0dnNy
-cXVzc3Jxcm9vbGxucnRxb25tcXBsaWpqbW5sb21ta21sbGtrbG1sbGxvcHRvdHNw
-bnFwc3FxbXBsbGxucG1tc3ZxcnFxcnBvcG9wcXBxc3Bqb3FvcXFvb29wcG5ua2lq
-bGpqbnFwcHFwbmtsb29sbXJybnBua2prbm1wcHBwbWtrbGtqaGlrbG1vcW1sbG1t
-bm9tbG1ubWtra2tpaGdqbWtraWtraWlmaWhpa25vbGZmX1xaVlRSU1dSUlBPT1FT
-VVlZVFFQT1JTWltbWFVTU05NTUtNSk5UW1hTU1JWWGFnZ2ttaGVbVVlZVlZWUVBR
-UlFSUlBSVVNVXF9eXVdXVFVXXl9gXVpUVFJWXmFjYmZjYV5kbGljX2ZlY2ZiYmlu
-cW5pa29sa29ydXZ3dnJtb3h0enlycW9tbGlqa29tc3h4dXRwbW1oZWZma2tnZWNg
-XFdUU1lXVU9NS0xMTkxOS01PUVBTUE5QU1JWX19bVFJYX19janJ4en+FioyUmJ+a
-n6Kmp6SipaapqaOjnp2YmZOQjoyHhYOIh4mLjIuBf3t7d3R0dnp9f393dnd6eX2B
-gIF+d3V9e3ZyeX+FhYN+dm5mYGFgX2NjZWVjX2JjYWJhYGJkY2NhX11gY2NlZmRh
-ZWhpaGRkZWhpZ2lpZmZlZGVpbWhlZWBfYWRiYGNmZGhmZWZgX2FjY2FlZmNoZWZq
-aGdlZGVkZmVjZGNhYmBfYGRiYmVkZWZmY2NiaGRlZWRkYmFjY2BgYGNiXl5eYFxh
-X15eYF9gYF1eW1xcXl5dXmFgX11cW1tdWldaW15cWV9eXV9cXmBfXlpbXFlaW11a
-WllbVlheXlxbWVhVWl5gXlxcXFxZWVlaWFZZW1lXVVhZWllYWlhXWFpWW1xbW1pb
-XFtcXFtbXVxdXV5gYl1aWlpaWllbW1tdXlxYWVlZWlpZWFtbXlxbXFtZWVhaWlpa
-WFlbWVtbWl1cXl9hX11fXltfW1tbXVxcXV1dXFtbXFpZWlpeW1pcW1xZWltaWlpY
-WFdXV1hdV1paWlpbWVhaVlRWWFxaXVpZWVhYWVpdXltbW1tZX15bWFhXWVxaW11g
-XFxbXFxeX11dYGBeYGBgYV9bXF5eXFxcW15fX1tdXF1cWFhXVVpcX11dXl9eX19f
-XFpcXFtcYGBhYGJiYV5eX19gY2JiYmFhYWFbWlxfYWRkYmBfXl5hXl5iY2FjYF9g
-YmFgX19fX2BeXV5gYl1bXV1dXV9gYF9fXltbXV5dXGNdX2BeXl1fXl9iYF9gXVpf
-YWFeYWJgX11eX1tcXV5hYmBhXl9kYl9gX1xgYWNiYWJiYmJjZGNkZmRkY2RnZWRj
-ZGJlaGhmaWhoaWhoZWVkZWRlZWVkYWNkZWZmZWJiYWFjY2FqmbPK1d3i5ejr7Ovt
-7G9sbW1ubGtsbGhpamhrbG1taWdmZ2Zmbm5sbW9xbW1vbG1ubWxua2tqamtqbnBv
-b21wb21tam1ucW5va21scXFvdHNzcHJzcW5ubm9ybW5sbW9wcHJ0c3VwdXZxcXF0
-dHFxc3Jyb2xwb3FwdHJycHJybnB0dnd0dXZ0cnFycnJtbW9ydHNxbWxsb25wbm1q
-a21ram1tbWttbW1va21sbG9wcHBwcW90c3J0dXN0c3BycXNxbXBxcXNyb3F1dHRz
-cW5wcnN2dnZwbXFxcm5ucXJsbW9tcnFzbm1sbG1vb25ubm5wbmtub2tub25tbW9s
-bXBxcHJsaWpra2ptampqbG5ubm5tbGtsbW1wbmlna25taWppbG1vb2lqbGlqa21m
-Z2dqbGlmZmBgWVlYV1hYV1VRTlJUVlpYV1hTT09RVVRZWVxcWVRTUE5MTk5QUlVV
-V1JUVVlfY2VtcW9qaWFZWVtcV1RUUlFRVVBPUFFVXF1ZXVxdWldWXF9hW1VYWFZX
-WVtgYWRjX15dYWVlYmBiYGdlYl9hZGdmZWdlZGpucXFwcHJzdXFwcnZ4d3V1cG1s
-a21wa2dobW9zcW5saWZjZWdpaWpmX15gXFZUU1FSSkhJSkxQUUxHS1NWUlNPT0xP
-U1JYY2VeWFhcX2JfZnB5gISGi46Mjo+OkZGWmp2gn6GjoKKdlpSVlI+Qj42Hg4aG
-h4mJiIWCfX18endwb3R3d3p5eXR4eX59f397eHt5eHZ0eX6Agn52bmljYWRlYmJj
-ZGZiZGFmYl9gYmViX2RjY2BgYGNhZWdlZWVkZWJkZmlqZmlnZ2NhYmRlZWFdXV9g
-YGBjYWJkZmVmZGZhYWJiY2ZjYmFlZmppaGZmZWNkZ2hoZWNhZGNjY2JkZmVjY2Ni
-YGNhXGFiYF1hZGNfX19hX2BfYWBhZGNfX2FgYGBeXmFgYV9fX15dX2NhYWFgX2Bf
-YF1bWFtcWlxeXltgXl9cX1taW1xaW15bXlteWlpbW1pWV1lcXFtcWFdWWFdYWldX
-V1dZXFtYWFpbXFxaWV5aWFhXWVlaWVpZWVhaWl1cWlxeXV5iX11dX1laWlhbXF5b
-XFpbXVxbXV5eXl5bW1xaW1pYWFlbXVxcW1tZV1lYWltcXVxcXF5fXF9dW1pcV1lc
-Xl5bXF5dW1laW15aWFpZWVdZWVdZWlhXXFtbWFlYV1dVWFxbW1pbWFhdW1taW1xc
-W1taWFleXFlaWFpYWlpYVlRXWl1aXl5eWVpbXV5gXV5bXF1fYF9fX2FeYV1dX15c
-Xl5cX19bXV1YVllaWlxdXl5hYF9bW11cYF5eYF1cYmJgXl9fXlxfXV9eYGBgYF9f
-X19dXV5fX2BjY19hYGBiYGFjYV9hX19eX19gYl9hYWNiYmJfXmNkYmBiYF9fYV1d
-WllcXFtZXF1cX2FbXF1eXmBhX1xdXF1dX2BgXmFgX2BfYl5gYGFhYWFdXV5gXl9e
-X2BhX2FgYWJjYmRkZWZjZGRkYmJjZ2loZWVkaGloaWdmZmdqaGpmZGRjYWJkYGNk
-ZmJhZGVlYWBiYmWAscrW3uPl6Orr6+zta21ubW5pbWttbW1raWtsa2tqaWhmaWls
-bG5qbnBxbm5ubGxrbm5wa2xsbmtrbGppamttcGxsbGtpbG1qcHN0c3RzdHRvcXNy
-cHByc3JvcXFyb29wcHN1dHR0dXd0dHRxcnJxcHVzd3Nzc3RycXFwbm50dnZ0dHh2
-dHZ0dXNwbnVxcnFxb29ubGtqbW9wbm1tbWtqb2xra2xubG9wcHNvbGtrbm9wcnZ4
-dHJxcnN1d3BtbmxvbnFzcnFycXJycnJ0cnFycnFzcnJzc3N1bW9xcHJtbm5ub25s
-cHJycG5vbm1ub21vcGtsbm9xdG5ubm9ub2tra21vcWtrampqaWtsbG1ucG1qa2xs
-bG1tbmtsbG1qbGxvbG1samhqaGdqam9sbWtnZ2RhXltXVFNXVlVRT1FUUVdXW15Z
-WFdUVFRWVldcW1pZWlVUUVBOT1NWWlZUU1ZXWV5kaG5vcG9nZV9eYV9cV1NWVFZV
-VFRVV15hYWlfXVtbV1ZcXl9XU1VYYWZnZGZmZWJgXl1hXVhcYmdnZGBgXl9fZGFf
-XFpbXmRmZmZmZWtwcXNxdXp6fHl0cXBub3JwamVnanNzcW1qZ2dpbGhoZ2RdWVVX
-W1xVUlJRSUlJTlNTTklJTVFPT1NRTkpLTU5bZmhjX1xgZ2diYmp2hI2PkJKKioeJ
-h4WIio6Tm6CdoJyYlpORk5KTkIyHhoeHioaEioN/fHp+fHl0cXJ0d3l6dHR4d3t9
-e3l4dnZ6eHd1d4CEg31wbGhkY2JiY2FfYWRfYWRjZGViZGRjYmBjZWVhYWJlZmho
-aGZlZ2RjZWllY2dmZWRlZGFiYmhkYWJjZGNkY2FjY2JhZmhjYWNnZGNnZWNlZWRp
-aGVmY2NmZ2dlZmRiYV9hY2VkZGNlZGNkZGNkYWBeY2FiY2RlY2dhYGRhYmJgX15a
-X2JhXWJjYWFgYmFdW15gX19fX15dXmFfW11fX15eXV1dWllbWVtaXF1bWVlcXlxa
-XF5dW11cWVdYW1pXWlpaWltZVVZYWFpcWVxcWVlXV1ZYWl1dXFxZWFRZVVhbWVxc
-WVlYWl5gXV1dYGBdXV1bWltaWlpdWVpYV1hZXltZWFxcXltaWlxaW1pZW1tcXl5d
-XFlZW1pbXl5ZXFxbW1xcW1xbXFtaW1tbXV9gYF1bWlpbW1lVV1lWVVlbWllZW1xf
-XltaV1ZZWFtYWFtYWVpaWVlZWFhYWFlZWVhYVlhZWlpYWVxaWlhXWFdaX19bW1te
-YF9bWFldXlxdXV5cXFxbX15eX19fXV5dXl9iXlpcXlxaXF1bXV5bW11eXl5fYmNg
-Xl1bXmFgZWRjYF9fYGFdYF9fYF9eX19iZmJfXF5fXV1fYF9gYWFkY2FgYWFhX2Bf
-XmBiXV5dYF5cYGBgXl9fXl5bXV9cXl1eV1dYXF5dXV1dX1xcWFtcXl9eXlpZWF1d
-YGJcXF5eX19hYF5eX2BgX19gXl5fYGFhYGBfXl5gYWJjY2VmY2FiYGNlZWNlZmZl
-ZmdnZ2ZmZ2dnaGhmZWhmZ2VnaGhiYmJjYmBjY2ZhYmJka3msydXe4uXn6evr7ext
-bW1vcHJxb21pbG9sa2pqZ2pram1qbGtqbWxtcHFubm9va2xub25ub2prbm1va2tu
-bG1ubm1tcG5ra29ycnFwbnFxcnFvcXJycG9zcnFsb25xcXBydHRycnV0c3BwdHNy
-c3F0dnR0dXNzcXFwcnFyc3FwdXd4dnNzdHh1cnVxcHV2cm5ubHBubmxvcW9tbm5u
-bW1ra2xrbGtrbW9xbnFxbWxwcXNycXFyb3Byc290cm1wc3NwcHBvdHd1cnRydHh2
-dHNycnhxb3JwcnFxb29ycXBxc3Jtb3BvbWtsbW1ubm5sb3Fyc25ubm5wbWpramxt
-bWtrbG5ubmtuaW1vbGxsa2xwbWtrbm5ramxrbXBsbm1ubW1samxpaGprbGltbW1q
-a2xnYl1aWFVXU1ZUUE5RT1BTV15bW1pXVlRSU1NZW19ZWVhWVlhTUlBQVVdUVFJT
-VlVWYGhsbG9raWdnYmFjYFxWU1VWVVZUVl1dXF9lZmZkXl5eX2BgXV1gX2JiYGFm
-ZGVjYVpWWFRWWWNmZmBeXWBgYWBfY2JeXFlcYGFfXV1gZmx1dHFzdnd3eHl6dnd1
-dnh2cmtrcndzbWlmY2ZoZ2JeWlJRVVZaV1FNT1BNT01LUFNQTUxITFBUVlJUTU1M
-TktVZ2pmYWFiZGVlZGp1goyRlpSQjYmGfX19fH+KjJCUl5WUlpSWl5ORko+Oj4iG
-iImFg3x2dXt8fnx6dHF0c3h6dXd8eXl5d3Z1eHd2cnh2fYSHgHZxa2ZkYWFhYWBk
-YF9hY2RjZ2RiYWFgYmNmZWNlZWRkZmtkZGNnZmRhYmFgYWRmaWFiY2BhYmVnZWFh
-ZWVjZGViYWJnZmhlY2NlZWViYmZpZWZnY2NmZGVkZmZlZWJlZGVmY2BgX2RhY2Bk
-ZGJiZGRkYWNkYmVmY2FiX19fYV5hXl1cXl1dYmBhYV9jZWJiYV1fX11eXl9cXF1f
-YWBhX1xdXFtZWlpaXFxcWVhXWlxcYF5aXlpZWl1ZW1pYVlVXW1tbXFpaWVlaWllZ
-V1laV1pYW1taWlxbXVlYWFVXVlhcXVlbWFlaW11cX15cXFxdXltdWl1dXFpZV1lZ
-XFtcXF1bXF1cXVlYWllZXl1ZVFZYXV5fXFxeYF9cXV9cXl1dXF5eXFpZW2BdX15c
-WlxZWFxcWldVV1hYWlxYV1hYW1tZXVxaWVxZW1taXFxYWVpdWVZWXF1bWFhbWllX
-XFtaWFhaWlhZW1pbXFxfXFteWldZWmBeXF1cXF1gXlpcXlxcW1xdXFpdWlpbXmBh
-X19fX15fXV1ZWl1dXF1eXFtcWltcXV5gYGFhYGBhYV9cXV5iZGVgYV5fYmBfYGFi
-ZGNfXl9gY11bXl9iZGFfX2JgX11hX2FjYmBhYGFfYF5hYFxfXl1gYF1cW15dXlxf
-Xl1gXVpbXV1fX19gXF1dXl5fXl5eXF5eXlxZWltdW11fXl9dXl9eX15eXV9eXl5f
-XmBhYl9gYWNgZGFhYV9fX2JjZmRkYmBiY2RjZWZoZ2loaWhoZ2dkZWRjYmFjY2Ri
-ZWNjY2FfXWNpd6PH1d7i5+jq6+zt7W1ua21xdHBsa2twcW1sbWhrbXJzb21sa21q
-bG9wcmxvbG5tb29vbnBub2xubWxsa21sbW9zcnFyc25ubm9xcXJwcHFvcnJzcXNu
-bXBvb29xcnFzdHFxdXd0cXJxc3V1dnZyb3J1dHV0dXZ0cXNzcnBwcnFxdHVxcXJz
-c3JzdXNycnJwcnFwb3FucG5qa2ttbWxtbGlra2hrbW5vb29raW5zcXNwb25wcnJw
-bnBxcXNydHJ2dXJxcXJ1dHJxdXV3dnN1dXR4d3RxcHFvdHNzb3BxcnJydXNycW9v
-cG9ubm9ua21ucXN0cm1tbWxta2hra21xcXJvb29sbWtraW1ybm9vbmlsbW1rbGxr
-am1wbmxtcW5tbG9vbGpqaGtub25vbWxtampnYl1bWFdaWFpVUE9VU1NVXl1aWFVT
-TUxPT1NZWVpYWVNTUlBMTVVUVVVPUFFTU1pham9wb3RxamlkY2JiWlNRVVZYVlZX
-VVpiZWRiYl9fXlpeXlhVWFpZWFheY2VjWllRT1FRUllfYl9XVVpjYmJfXV5hYV5f
-XmBgYWFhYWJkaW1vb3R2dXh2dHV4ent9fX57dnRydXdybGpjYV9cWVpbV1VUVldV
-Uk5QUVJUVE9NTk1LSUlJSk5OUVJVUE1LSUlXZWVjYV9fY2hqamdyf4iRlpWQi4N8
-e3p4d3p9foKGi4uPkJKWlJGRko6QkIqJh4iEgHp0bm91eHh7e3Z0dXl7ent9eHl1
-dnd1dnJxc3d7f4SCgHhwZ2RjZWNgX2BkYWFhZ2NjZGdmYl1hY2RnaGRjYmRnZmRk
-Y2JlZWVhYmZjZmJmZmViYV5gX2BeYWJiZGJiYmRiYmRkYWRhYmRkZGVkY2VlZmdp
-amdlZGNiYmVnZmZhYmNjX2BhYmFfX19fXWBlZGJhXl5gYWJhYWRkYmFhZGRkWl1h
-Yl9fXV9eXV9hYGFhX11gYF5fXl5eXF1kYWBdXVxZWV5cWllbW1lZXFlbWVlbXVta
-WltZW1pbXFpZXVxcXV1bWFlXV1pbWFZZWFhZW1ldX11ZWVtXWFdVVFdZWVlYXFpW
-WlxXWFtZWllbXGBdXF1cWVhZWFteWFlbXV1dXl9cW15bXV5bWVpcW1tcWVpYXF9f
-XltcXFpaW1pdXVtdX15eW1xcX15cWllaWltcXV1aWltZWFhXWlpYWFZYWldVVFla
-WVZYWVtcWVhbXFhbXlxZWVlYVVpbW15aW1pbX1xcXlpaW1xYWV9eXVtZWVxbXV5d
-XF9fW15cXFlbW19fX15cX15aWlpdXl1bXmBgX15cXGBgX1tdWlxaWVleXVpcXl9h
-YV5fXl1gXl9gYGRiZGdhX19fY2NhYl9hX2FgYGNhY2BdX19iYl9eXmBgYWFfYV9f
-XV1dYV9fYmFcXF9jYF9gYF9cXF5hYGFjYFxdXV1bXV9eYWBcXVteX2BgXlxdW15d
-XV9cXV1eXltZW11bXV9fYl9hYGBfXmBkYl5fX19hYGBfXWJfX2JhY2doZGdnaWVk
-Y2JlZWZnaGhpZ2ZkZGNkZWJhYmJhYGFlY2NhYWFgY2J2qcfU3uLm6Orr6+3tcG9s
-a3Fxb2xubm5ua21scGlrbGxwbm9vb2xwbm1ucGxsbm5tbnBtbm9vbm1tbG5ub25v
-cG5vb29sbXBtbG1ucXJyb25wb3F0dHNxcXJucHNzdXd2eHRycnNzdHZ1dnZ2d3Z4
-d3Vzc3ZzdXV0cnF4d3R1eHNwcnN0cGtubXF1eHp0dHJxbm5tb3Bvb2xtbGxtbW9s
-a2psc3Bwb3Nwbm1vcnJ0bWpucXBzcnFwbnFzcnVzc29zdnV0c3F1dnVydXZ1c3Z1
-dXV0dXR1dnJzcXR3d3NzcHFxdHNwcHJ0dXJwcXNvcHJ0c3JxbWppbGtub25saW1u
-bnJxcW5tbm5rbW9ucG9sZ2lqa21ubmxqamtpamxxcnNubW5ubG5vamtsa2lpaWdm
-ZmFfW1pdWFdXVU9NS0xPVFVVV1tYVlFQT05TVlhYWVhYVVJQTU9UVllTUlVUUlJV
-XmJqbm53jH1taGdjYmFdVVRWWVpaVVRTWFtbW1xbW1pTVFNXVVNQU1NUX2JgXFRN
-TVNUT1FbY19cV1JXXFxeW19fYltbXFxdWV1jaW5tampsaGhqbHB0dnN3dHl9gIKD
-gH94dXV1dnZ0bmReWFdYWF1gV1RRU1dXVlJSVVNUVVFPS0lGSElKSEtLUVJOTUxP
-TkpRX2NgXV5fY21wbWtygYqMkI2MiIaCgH98enh1d3t/fX+ChYuNi4uLjo2MjIiD
-goGCgX11bWxucHR3dnV5enh2c3Z8eXV1dXp4dnZ3enp/hIaDfHJqZmVkZGZiZmRi
-ZGFjY2FkZWVnY2FkZmVlZWRjZGVjZmVkZGRlZWZkYmNjYmJjYmFiYGFmZmVjZGNi
-YmNlZGJlYWVpZWVkZWFkZWVjY2RjZmVnZmRjY2FgZGRjaGZiYWJnZWRkYV1gYWVj
-ZGFhXmFiYWBdYGBeYGFiYmVhX2FnYF5hXmJjX2FgXltcW15gYl9dWllaX19fWltb
-W11dXF1fXVtaWllbW1tZWFheXl1aW1teW1pbW15dXVxaWltbWVtcWFZZWFhbW1dU
-V1ZaWltbWVphW15ZVlpbXFdXWltZW1lZWVxbWVlbXFxdXlxaWVhXW1pZWlpZWVla
-XFlaXF1ZW1pdXVxbWltcXF1dW1xYW15eYVxbXmBdWlxdWltdXl1cXVtdXVxaWl1e
-YV5dXVxbWltbWVlaWFdYV1ZXVldTUVVXWVpZWFhaWlxbW11dW1hbXFxbWllbWlla
-WVhaXl1dXVpbWVdbWl1dXFtaWlxaXGBeX15dXFtZWltbXl1fXl5dXmFiYWBeXV9f
-YWJfX1xcXl1bW1pcW1tbWV1eXmBeX15fX15cX19gYmJiYGNgYGBfW1tdW19gYWBg
-YWBiYGBjY15dXmBiYl9eXmFfX19gYF1eYWFeX19fXl5dXF1gYl9hYWJeW15gYF9c
-XltdXVxbWVhcX15eXV9gYF5fXV5eXFtcXl5dX19fXV1eXFtaYGFgXl9gYGFhXmBg
-Xl9iYmBgYGBhZmdhZGVjZWZjZmZmZWRhYmRlZWdnZmZqaGZnZWhmY2NkY2JkZGNi
-YF5fYWBhZnWkw9Pe4uXn6err7e1vb2ptbHBwbWtpamtwbGxwbm1tcHJwbHBtbW9t
-amxvcW9samxrbW5vb3BycW1rb3BvcG9vcXBwcXBwcW9vb3N1dW9ub25vcnR3dHJy
-cG9ycnR3d3V1dXZ2c3R2dHV1d3l6eXZ0dXN1c3V2d3p3dHZ7dnh1cnN2c3Jzd3Bu
-cHJ3dnRzc3Vwb21tbW9vcG5vcG5uc3Rwb29vcHRxb29ub29yeHNxcnBydXVxcnNx
-b3R3dnR0eHNzc3Fxd3R0eXh1dHRzc3NzdHZ3dnZ2cXFydXd1cG5wcHJ0dHRwbHFx
-cnBxcnFzcnNxcG9taWhsb25wbGttbW5rb3FwcHJxcnFtbm9ta2tvbG5sbG5va2xr
-bWxua2xwb21qbG5ubm1ubGtqaWtraGRjYGBbWVlZVVVUVVFRUFBPVFdYWVhUU09O
-UVNaX15XVFNSUFFPUFZWVVdVU09PU1phaWprboCPd2plaGhqZF5WVFRXVFlZVVhU
-VlddXllUU1FUVlZTVlpYWlxdXVhTUExKSUdQW11ZWFRUVVZYXV1fY2RcWVZWWFpe
-YWdrcHFvbGZhZGVnaW1ucHR6foSFg4aFhIJ/fXt7fH15dGxlYmBbW1laVldUWl9g
-XlhWWldTU1NPR0ZHSUhJS05OUlVOTU9KTU5SX2dkYF9dXmZvcm90f4qSkI6Li4uL
-iIN6dnR0dXJzdXN2en+Bg4SEh4WMhn99foGFhIV9dHJzc3Jxc3Z5d3N1dnh8ent3
-dXl6enl6fYSPjol+dm9lYmNiYWRkZGNfYmJjZWJiY2NnZWRlZGJkZWVhYWNmY2Nl
-ZGNjYmRnZ2RiZGRkY2BjZGVnamVlY2NlZWBgY2FiZWZlZWVjZGNkZGVmZWZhZGRj
-Y2JkY2FjZWRmY2JkZmVlYmVjYGJkY2FgYGNhYGNiY2FiZGBgYGFjY2JmaWlmXV1h
-Xl9iYWBdYF9bWl1gX1xgX11gYF1fW1ldXFxbXV5aWlhZXVpYWVlbWFdaW1xaXFtZ
-WVhcW1lYWlpaXl5dXVxaWlpbWVZWWV9bWFhbWFhaWlpYW2BeXFtYWlpYWltZWllb
-W1pYWlxdX19bWlpbW1tdWlVbW1tZXFhYWlpcXFxcWVpZWVxdXVtcX1paXF1aWVpc
-XFpZXF5dXFlZWllaW1taWVhcXl5eXllcXVxbWlxdXFpdXlhaWlhWU1VXW1haVlha
-WVdVWFtbWVpZWlhaWlpaXFpYWlhYWFZZWlpaWVxcWltaXVlaWFpcW1taXF5dXV5c
-XF1dXF1aWV1cYGBeX2FeYWNjX11bXF1dX15aX2FcW1xbW1lbXV9hXlldYGJeXl1f
-YV9dXV1hYWFhX19gX19dYF1cXl9gYWJjYl9eXV1gYF1eYGBiX19gYF9cXV5eXl1f
-X15fXl1eX19hY2JfW1xfYWFgX2BiYmBdXl5bW1paWltcXWFcXmNjYl9hXl9dXl1d
-XlxfX11dX19eXmFhX2BdX15dXmBgX2BgYGBfYF5fYWFjY2VkZmNgZWhkZWVmZWZi
-ZWFjZWZpaGhpaGhmZWNjYmFgYWBjY2NfYGFhYWJmeKrA0dzi5ejq6+vs7HBvbGtq
-bG5vbm9sam5tcHFxbGttcnRxbmxxc3BubnBwbm1rbGxrbG5sbXFydHNycnJzc3Bx
-dHFvcXVydnVzcHFyc3Jzc3R1dXV1dXRzdXNydHVydHNydHNzcnV1eHVydnh4dHVz
-c3R0dnJwdXx7dHh7eHZ2c3V6d3N0c3Nxc3Z3dHNycHFzcnBxb25vbWxwcW5tcXF0
-cnFxbnNtbGxvdXJyc3RydXR0c3Byc3FycXJyc3Z1dHJydHd1dXR4enl3dXd4dn15
-eXd2d3VzdHV3dHR1c3Byc3V1dHBubnBycnBxdHRycnNxbnJxbGxvc3BrcG5tcnBw
-bW5wdHB1cnBtcW9ram5vbG9rbWxvcG5qcHBtb21vbW5sbG9ybWxua2xpbGtpamNe
-XFpXWFlYU1JVUlNNTlRTWFhYXVhWUVFTV1hdWlVPUVNSUlJRUlRaVlNSUE5cYmhr
-Z2dnanFhZmlpbW1lXVZTU1JUUlRSUFBTWFtXVlNTU1ZVVlhYWF1dXl9fV1NMRkhP
-UFNTUlFTV1hYXF1hYGJlYl5ZWFhXWVxiZWdoam1ua2hpZ2NjZWlvcXZ8goODg4SG
-hoSDg4OCgoR9fXVuZGBjZWVhXVxeYmBhZl9ZVFFTVVJLR0VFRURGSkxOUU9PUU9M
-UlBTYmZgW1lbXWNudnR4fYaNkpiRjomGhoN/eHNtbG9xcXRzdXl9gYGAfHx9fHx5
-fH+DhIaCenl1dnZxcXN1dHJxd3p/enh3d3l4eHp8foWKiIeAdmxlZmFiZmZgYGBk
-ZWVkYWJgYmVkYmNhY2VnZWZkYmJgZWhmZGFjY2RoZmViYWJiYGJkZGhoZmJiY2Nk
-ZGVhZGlmZGJkZGZoZWNiYWFgYGRmaGdoY2RoZWNoamNjY2JkZWVkY2JkYWBhZmRi
-ZWVgX2NiYl9iZGViYWBkYmZiZWRfXlxeX19fYGBfYVxaWVtcXV1gXFtdXVtcWlhb
-XVtfXVteXV1bWVxaWllaWllcW1taWFhaWl9cW1paW1lbXF1bXFxZWllXVVhZXVtY
-WFlWW1tYWVhbXVxaXFxYWFdVV1hbXFtZWlpaWltcX19dWVhYV1laXlxaWV5bW1dX
-WFtdXVxcV1peXF5aWFtbXF1dXVlYWFtdX15cXV1eXVtbWVtdXFtZWVhYXFtcXFhZ
-WVlZWl1eW1tYWllXV1dZWlxZXFhZWVhdXVlaW1pYWFlZXFlbWlpaWlhXV1hVV1hZ
-WltaWVdZV1lbW1lZV1lZWVlaW1xdXF5cXV1eXl9hYV9bW19dXl9dXl5fXl5ZWlxb
-W15dX11eXmBeXVxbW19kZ2NhYF5dXV5dYlxYXGJiX15eYmBjXl5hYl9fYmJgYWRl
-YmBhY2NhXV5eXmFgXF9gXmFgX2FhXVxdXV5eYGFhYWNiXlpbWl1cXV9hYWFhZV9f
-X11bXlxcXV5dXV1eYWNlZGBjYmBdXF5eXF9fXVxdXltdX19eX2FgX15gY2JgXV9g
-X2FgYGJjYWFjZGdkZGNlY2ZmZ2dqb2pmZWJiZmZoZ2lnZmRjZGNjYl9eXmBlY2Zg
-X2FjaWd3qr/S3OHl6Onr7O3tcHBvbW1wcW9xc3JwcHFzbm1ubWxrcHNvb3FxcHBt
-bm9ucHZzcG1tcG5wcXJwcnJyc3V1c3Jyc25zdHFxcnJzdXF1dHR0cnV4dnd2dXV0
-dXRyb29wdXV1dnl2dnZ6eXZ1eHZ3dXZ3eHp2dnV1d3Z6eXZ1dXd2dnh5fHl2d3d6
-d3dyb3B0dnVzc3Ftb21ubnJwbm1tb3FzcHFwbm1sbXF1dHBxc3Vub3JxcnZ1cnJx
-dnd1d3Z2dXZ3dnVxdHV1d3x6d3d4end3cXN2dHRzdXl3dHJ1cnJ0dHZ2dnBucHNz
-b25vcHN0cHFxcnV2cnN2dXpyb21ucGxub3FwcG9xb29tbnFxcHFxbWtrbGttcXBt
-a21ramxwcXBvbW9ub29ubm1ta2xpY11bWVlbW1pUVVBQT1BSVFRTVlVWVlZSUVVW
-V1dTUFFVUlRRVFRPUlVYUE9SVmBua2ZjY2lpaWpsbGpramVcUlFWVVVVUU5QU1ZY
-W1xTUFFZWVRTW1xdW1ldXVpVTExRT1NTUlJUVVlcX2BhZWltZ2JcW11cWVxaXGFk
-Y2FlaWlsamdlYmBfY2pxdHh8goKBhISEgn+Bg4GAgoCAfHRvbW9raGViXl1cXWFp
-aWNdV1ZbWlNNSktJSUpNUFRPU1xjZWBbVlNYYWZfWFRVV19ocnRzfYWOlZaWkIqI
-h4J+gH14eXp4dnNxc3V0eHp4cm9ydXF0eHh8gX97eHd3dXFscXZ4enl4eXl4end5
-e3p3d3h7g4iMiYJ2amRiYV5fYmBfY2RgY2JgYmNhY2VjY2VlZ2dmYmJjZmJhZWNi
-Y2VlZGRlY2RkYWFmY2JlZmdlZWFhYGFmZWNlZmRhYGFlZWZjY2RhYWNkY2RgZGVp
-Y2NkYWJmZ2ZlY2ZmZ2lnZ2dlZGViY2JjY2FiYWRjYWFhX15fYmJeXmFcYGFeYGJe
-X2FgYGBfXl1bWltdYGFfX1lbXV5dXFpZWVlcXVxeW1lcWlldX15cW1hYWFlbW1ld
-W1tdXlpaWVVWWFpaWFlcW1laWFhXWlhXVFVTV1pYWVpYXV1eXVZWVVRYWFpaWlle
-XFpcXFtZWVpbXF1bWGFhXlxaXF5bWllcWFhZXF1aXF5eXV1bWFtbWlpaW1xbW11d
-W11dXFxfYV5aWFtcW1tXWl1bXF1bW11fXV5dXGFbWVpcXF1YWVxYWlxcXVtZV1VZ
-WVlXVlpZWFlaWllZW1xcWllXV1lXVlZXW1hYWlhaWVxbWVdXW1lXWFpcXFpbWFta
-X15cX19gYFtdXF5gXlpdXFtcXVpZWlteXV9gX19dX15fX15eXGFiYGBiY19cW1xe
-YGBdXl9fX15eXmJiYmFjYWBjYl9hYWBhX2JiYmFiX15hYF5dX19iYWBhY2JfX19f
-XV9gYGBiYF1bXFxbXF5fYWBhYWNgYGFfXltcW15fXl1cXV5eXV1fX2BdXV9eXmBd
-XV5dXl1eX1xcXVpbW11fYl9fYWRiYF9eYWNhYGBmY2NjZGdjZGVmZGVkZWRqamhj
-Y2VmZmVnZWVmZ2ZjZWVlZGJkZWFiZmFgX2FhZnqrw9Lc4eXn6Ovs7e1ubnBsbm1v
-b21vcXFucnFubXBtam9vbm1wb29wb2xsbGtsbnFwcnBycnV0dXNzdXVydHZ3cXJx
-c3RzdHVzcnFwcXR2dHd2c3R0dHR1eHh0cnJycnNzdHZ1dXd3eXd2eHZ0eHp+eHN0
-d3h4fHp5d3Z1dHl8eXl2dXmAe3d7end3dXZ1d3h4dXZzdG9ucXNzc3RycnFwbnFx
-bm5tcHZ1c3R6eHZ0cnNycXFwdHV3dnV1dnh3d3N0dXN2dXR0c3N2dHV3dXV0dnZ5
-dHJ1dHd4eXZ3dHN1dXNycnJycnFxb29ucW9xcnJ0cnNyc3d6cnJwcXFubm9ua29v
-bW5xcG5xcW9tcXFxb3Fub29xb2trb2xub2xqam9rbGpsbW1qamlub29tbWhkXlhW
-VlZWV1dVVFBNTVFSUlNTVFVTVFZTVlRTUE5PT0tMTVFRU1FSUU9RU1dgZWVoZGVk
-aW1wbm1scnJuZFpVVFRXWFZPT1JUWV1cWVdVVVZSU1dYWlxdXF9bU1NVVlVaWVZZ
-WVtcXWBiZGhtcGxlXl9gXl9cW1hkaGZiZWRlZGVsa2llYmFjZ29yc3d7fn+BgIKA
-goF+e3x9fn55dnZ0bmxpZ2NlYV5bYGdsbGRdWl1aVVRQUVBUVlpcYGNlaXFzdGxo
-ampub29nXFtaWlxkcHl3eH2Dh4yKiIWFg35/foB+e3dwa2ZpampvcnFxcG5tcHNy
-d3d0eHh8enl2b2trcXZ3eH9+fnt4eHp+fnp7e3x/hoqLhoByZ2NiYV9fXl9iYWVj
-YGFlZ2ZkY2NlY2FhZGRlYmhqZWRnZ2dlZWBgX2FhY2JhYmhlYmNjZGRlZWZkZGdj
-ZGRkZ2ZmZ2FgY2JhYmVkY2dkYmFgZWVjZ2VjX2NmaWlpaGVjY2ZkZWloY2VlYWNh
-YWJlY2RiX11eXV9fYGFiYGJfXV9cXV5dXmBiYWJgX1tcXV9eXl5dY11cXF5dXF5d
-W1ldXF1eXVtYWVlZV1xdW1tZW1taW1xZXF1cWVtYWldVVlhbWllbWlpaWFVXVldW
-WVpWV1lXWlhXVlhcW1lZWllbWVpaXVxbWl1dW1paWFtbXFxbW1xbXl5dXlpaXF5d
-XVxaW15cX11cWlpcWlpZXl1dX2BfYFxcWltdXlxeYF1dXFtcXVtaW11cXFxcW1pc
-XFleXlxaWVxaWlxaWltbWFldW1hWWFZXVldXWFdZWltbWlhaWldWVlZZWFdYVlha
-WVdYWVpZW11ZV1lWWl5aXVxeXF5cWl1fYF1fXVxeXlxfX15dXVtcWVldW1xeXF1d
-XV1dXF1aXF5fYl5gX2NiYGFjYFxaXF1eXl9dYGBeX15iYmFfXV1gXl9eYF9dX11e
-X19fX15fYF1fYGRjY2JfX19gX2BfXmBgXl5cXV5gX2BiY2BbW15dXmBgYF9hYWJg
-X15eXlxbW15cXF1eXl5dW11dYF9eXV5fYF1YXFxeXl9dXV1eYWBhYWJiZWNjY2Jf
-YmFeYGVkY2RjZ2hnZGVlZmdpa2ZmaGZlZWVlZGVlZ2hnaGdmZGVmYWRiZGRiZ2Zh
-YGNpe67I09vg5efp6uvt7HFwcXNwcHBwcm9sb3BtbW5yb25ub3JxcG9vcXBxa2tv
-cXNzcXNzcnNycXR0d3V1c3Vyc3FzcHJycnZ2dHV2dnNydHV1dXZ5dHZ4dnd5d3l0
-c3R0cXR2c3V3eHl4d3Z4eHp4d3h4eHR0d3Z5enh5eXl6ent9eHhzdXl8fHl2d3h7
-eXZ3dXF1d3h2c3FwcHFvcHNybW1wcnJzcnN1dHJzdHR1dHN2eXR5eHR0dHR3dnh2
-dXR2dnd4eXd2d3V4dXV0c3JzeHR0dnd3dXR1enZ2dXR2dXN0cXBvb3BwcnNycHBy
-cXN3c3FzbnBxb29raW5ucHJvc29sbm9wbW9vbW5sbHJ1cnJydXBvbWlrbW5sbG1s
-bGxra2xraWpsbWppa2tubGtqamJdWVVVU1RWVFZUTlFRU1NVVFVUUlJTUlVSVFBN
-SkxPUFFPU1BRVlVQT1BYXV5iYmFlaG1tbG5qa21zdG1mWlRUVllaU1BSV1tdXFlW
-V1lWVVVZV1dZXF1ZV1ZbX2FdXl1YVVdYWFxiY2htcHBqZWVnYV9gXFtbZ2pnY2Ng
-ZGppaW5vaGxqZmtuamhqb3N4foGBgoKDgH+BgIB8fH16dnBvbWZjZGNgXVxeX2Rs
-bGVhYl9cX2FiX1xeXV1eXlxaXWBfXF5gYmxxdHRxdGxra2VnbnN5dnqBhIaEhH96
-dnJva2xramlnY2JjY2NnbHFvbXFvcHBxa2htc3p6eHh2cW9xdHV5eXp5en5+en2B
-fnx9foGIi46LgHNpYmFgZGNiYmNhX19gYmJlZmRkY2BlY2JhY2NkYGRpaGVmZWRi
-ZGNkZWNiY2NkYmNgYl9iYGFmZWNlY2VnZ2RnZWZoZWJhY2hmZmRiZGRiZ2hkY2Vk
-ZmNiZGZpZWZkZGBjZ2NkY2FmZmZmY2JgY2NkYl5gXl5fXmBdXl9hYWJiXl1cXV5e
-XF9iYmJfXFtcXltaXF9eYmBdWltcXFpbWlxdXFpbWVpbWVhWWFpbW1tbV1hbWlxa
-W1taXFxaWVdaWVdWVlhdWltaWlxcWllZW1lZW1xaWVdYWVhYWFpbWFtcW1lXWV1c
-XFtZWltdWl9cW11bW1pbW1paXF1bXF1fXVlbW15fX15eXFxdWVdXWFtcW1tdWlxb
-XFxbWVhbWl5eWlpdWlxdXFxcXVxdW11ZWVdXWVtZVldYWlpbW1tZWVlZW1hZWVdW
-V1lZXFtaWllZV1dXWldXWFlaV1laWFdaWldaWV1cXl1dW1pYXF1aXVtaWlpcXFxe
-XlpbW1xaXFxbW15fX2BdYVtcXFxbXF1fX11cXl9cXV5gYWBeXWBhY2JhX11cXl5g
-XmBfXV9gYmRgXV5cXF9iYmFgXVteXmZjXV1fXl9fXmBeYmNjYWJgYl9eXl9fX1tf
-X19fYWJhYWRmaGJdXV1cYGBhXmBfX1tfYGFgXl9eXl1bWVpbX2FfXV1hYmBfXVxd
-XmBaXV9gYGFlZGFhXmBgX2BhZGJjYWBiY2ViYmNkZGJkY2JoaGVkZGdmZmNlZ2dk
-ZmVjYmVnaWdlZWZmY2RlZmJfXWJnZWJdYGaBs8jU3OHk5ujr6+3scHF0cXF0cW1z
-cm5ubm5uc3NwcG9ucm1tb21sb3F0cXJ1dHR2dnFwcHJzcXBycnZ2dnJxcXJvc3Vz
-dXNzd3d2d3V0c3R0dXJ2eHZ2eHZ1dXl1eHZydnR3dXNzdnd3eXR1dnt5eHZ3dnR8
-e3t7e35+gH58fXp6d3t8eXt5eXh4d3V2dnRzdHNzdXV0cW9wbm9vc3VzcnFycXBx
-dHV0dXV0cHF1d3h2dnh5d3d1dXd5enZ1dXd7d3Z3dnZ4eHh2d3Z2dnd5d3d4eXZ1
-dHd2eHl3c3Nyc29vb3FydHFzcXNxcnJxbm9wcG5tcXFxcXFvbXRxc3NwcW5zcHFy
-cW9ub3Fzbm9xcHFzcGxrbGxqcm9qa2lrbW1ubHNwbm5ub25ta3BtbGdiYF1XUVJU
-VFVVVFJRUlNWWFZWVlRRU1JSUFFRTUdHSlBWVFVRUFBOTVJSVl1lYmFjYmRnamxr
-bW5wc3VzbGNbV1NXXF5aWVpbXl1XV1ZXVldWV1lcWVdWWVdZYWZnZ2FcWVheWFdf
-ZGZrcXVuZWFpbWtiXFlWXWJlX1tbYmZmaW1vbXFxb29vcXFvaWxvcXF0eoOHgnx8
-e4B/goOEhYSAdnRuamFjYV5bV1pdY2dpbW1rbWdiYWBfXFxXUVFOTEpNVFNOTkxQ
-T1JXXmpxd3l2c2xna3F1d3x/hISEhYJ0bmdkYmNkZGlkXl5jYGRmbmtpamxsbm9v
-amdrdnd5e3p3dnVzdHZzdnh6fnx4foF/fX19gIOJjYiGeG1nYl5hYWBiY2FgYmVi
-Y2NkZF9jZV9kZWVjZWNiYWNlaWVkYGNkZWRkYGBjY2ZjYWNfZWBiY2NkYGNjY2Nh
-YWJkZmdiYF9iZmdjYV9gY2VnZGNkZWdkYmNiZGZnZWNiZGljZWNjY2FkZ2ZlZmVh
-YWFgYWJgYF1hYWFdYF1hXmBjYF5eXF1eYF5eYWBgYF1bW1tcX1xeYVxdXFpYXFxc
-XFpbWllbV1hYXFtbWlpcW1xYWFdZXFpaXltcXVxVWVpbWVdYWVxcWVxaW1pcWVlY
-WVlbWltaWVlaWVZaWlxaYFlYWFhZXFtdXFlaXV1aXV5eW1pZW11bW1haWltbW11d
-XFtZWFpbW11fXVxdXFlaWlpZWFxdWltaWllaWltaWV9dXFxZV1pZWltZWlxcWVlb
-Wl5eW1tYWFpZWl1cW15dXFtZXF1fWVhYWVpbW1hXWlhbXFtXV1lbW1pYWFZYWVdZ
-V1lcXVxdW1pZW1hbWl1bXFpZXFtZXF5cXVxdWltbW1paXmBeXmFfX11dW1xdXl9g
-Xl9hYWBgYF9dXV5dYGJiX2BeXFxeXmFiX15eXl9gYF9fXV9gX2BhXl9eW15dX11f
-Xl9hYV9eZWBkZ2NiYV9dXV1eXF1dXVpbXF5hXmBiYGJhYmBdXVxdX2FfYWBhYmBe
-XV9eX1xfXFtaWlpeX11dXFxcXF1cXVxbXVtcYV9dYGJhYWFhYF9hYF9eX2BhYV9h
-ZWJjYWVlaGZoZ2NjZmdnZWRkZmJjaGdoZmRhZWZoZ2doaGVlY19fYGFlYWNiY2Ng
-YXyyydXd4eTn6ezr7e10dnF1dHFvb3N2dHBtcHFxdnZ2cnBvcG5sa21vcnRzdHR1
-dHVzb29ucXRyc3V0dnd6eXJwcXR0d3V1c3Z2dnd4dXZ1dHRwcXV0eHZ6dnV1d3d3
-d3Z0c3NzdnV2eHx6fHd2eXl6e3p5e317fHh9fXt5eHZ1eHl7dnx9eXd4d3V1d3d0
-c3JycXJycnN0cXBtbW9wcnRzb3FwcHR0dnl5dXdzdXd5d3Z6d3V4d3N0dXl2d3p5
-dnd3dXFydHd5d3R0dXd2end6fnp5enh2dnl0dHFzdHZ1cnNwc3V3dXJycXFydXNx
-cnFxcXd2dHVzdnRycm9vb3BydnRxcHJvbnJxcXFwbm1vbmpqbG1vcHBzcmxobWxq
-bW1ucG9sbGxxb29vbnBqaWJfXFdTUlNUUlJRUVNWV1RUWFhUVFRXVlVSTElMTkxR
-VlVXU1FPTk1OU1JZYGhlY19fZGVqamxpbXFxd3NuYVdSV1lgYVxdYWRiYVxbWVZc
-VlJWVVVXVlRXXGhsbWllYV9dWltaXWZrcHVxa2VpbnFtZF5XWF1gXlxZXGRqa2hs
-cG5ta29sbnFzbm1saWppa3F0fYaHg3yBfnp6gYKHiIiBfXdwaWZnZWBgZWhnZmVl
-ZWdpZlxZWlRUUFFTTUpIRUVHTE1NS0dGTU5LU1tmamhoaWxqaW53eXh5fH+EhoV6
-dGxlX1tcYGVnZ2FjZGRqbGxqampoa2lqaWx3fnh1eXx7eHNyb3N4eXl9fHx9gICA
-gH2AhYuOj4h7cWVhYGBhX2JiYGVmY2RkYmRiYmJkZGNjYGFiY2ZhYWRlZ2VlZmdm
-Y2RqZ2NnZGJiX2BgYmRkZWBgX2FjYmBjY2VhY2djXWFjYmRmZWNmZmdmaGdmZGVk
-ZWFhZGdlZWRkZWNkZGBiYWJjYWNhYGNjYWBfX2JiXl5fXFpeXl5gXl5cXVxaWV9f
-XlteYF5dW1xYWFtZXF1dXVtaW1tXWlxbW1lYWVpZWltcX1tYXV1aW15ZWFhXWFpa
-W1xbXVtZWFtYWFlYV1tYWVtbWVdZWldaWFdXW15dV1haV1hcXFpXWFtaWlldX1tZ
-WFlbWl5dXF1cXF5eXmBaWlZZWllbWlpcXlxaWFhYXF1bV1paWlpaW1tbXl1aW1pc
-XWBdXV5bXltaWVpcXFpbXVtdXFtdXFtbXF5dWlxaWl1dWlpbWFpcXF1dXVxdWl1a
-WVpbW1hYWFhYWVlcWFlbWltYU1ZXWFdZWllZXFxaXVxcWVpeW1teXF1fXFpZWlpc
-Wl1dY1xeXWBeXVxfYl9gX2JhXlpcXGBhYF5iZGFgX1xdXV5gXl5dXmJeXV1fYGFh
-Yl9fXl9eXmFfYWBZW11fYGJfX19dXFtfX2BgX1xhX2BkXlhcWVpdXV1eYl9eYWBg
-XVtcXVteXWBfYV5dXVtbX2BhYWFlY2BgX19fXltZXl9dXV5eYV9dXlxdX2BgXl1c
-Xl5eYGBeXl5gY2RkYFxdXl1eXl5cX2FkZmVkZGVoZGZmZmZmZ2hmaGhoZWRlZGRn
-aWdlZWlkZGRlZWdsZGNnaGdhXWJmYV9fdK3K1d3h5efo6+vs7Xh1dXZ3cXB1c3Nz
-cnZ2c3JycnR1c3Fvb3Fyc3BvcXFxcXBxc3R0dXNzdHZ1cXV3eHV0dHlzcXBzcnJ1
-dnVydHZ4d3Z3dnV2dnZ3d3d2dXR4dnp6fXt2dnh2eXh8enh3en59dnh5e3x/fnt5
-fX1/f35+fHl4eHl9ent6eXl4eXZ5d3Zzc3Fvb29xdHRzbmtqbW5ucHNwb2xucG9z
-c3V2dnt4dXl7e3l4eHl2d3h1dXd5e3l2d3p6eXl3dXh3d3V1c3NzdnZ5d3Z2d3d1
-dXR0c3Fzdnp3dndzdXd2cnR0cnRyc3Nzc3JzcnNxcHFyeXZxb2xvcXB0dHRycHJv
-cG5tbG9ta21tbGtucHRwcHJtcHhzcm5ucG1vbW5vbm1yb2xwbmxpZF1ZWFdSU1FR
-U1NRU1NVVlNTU1JVU1NUUU5LSU1KTlNWWFhUUlFQTU1PVlxkYmJiYWRnampqaWtv
-bnF1cWZdWVdZXl9ZXmNnZWFbWVdXVVNTVFVUV1dYXWNpcG9xaWVjYV5cXWFma3J1
-cmhmam1qZGBeWlZdYF5aWmFlbWxrbm5ramhnZGptbmxqamppbGpmam13g4SCgnt4
-dHN0dXt/hoaEfndycXBwbWpmZ2JdXmBgYGJkXVpXU09RU1FOTElHSkpJS01MTE5O
-T1NSU1ZiZWBdXV9iYGFla29ycHR7gYF6cWlkYF5gZGRkZGNkY2VnaGhpZ2ZqaW1o
-bHV4eHR0e355eHR0dnh5eXp9g3+Afn+Bg39/g4eMiHpvZ2BgX2JlZGBgY2JkZWZj
-YGJgZWVlZGBhZGRkaGRlZGVjZ2hlZGNhZGdkYmFhYF5iZGJiY2NjY19gYWBhY2Nj
-YmJiZGVlY2FiaGRlZ2dnY2ZmZWNiYWRkaGNiZGJkZWRkYmJkYGBhYGNgYWFgYmBh
-Yl1eXmBhX15iYF1hY2BfXF1eXl1eXF1eW1xeYF5dX1pZWlpbW1pbW11fXllZW1xd
-W1taXFpcW1tdXVxeXV1YWFlZWVhYWFhaWlpbW1lXV1haWVhWVVdZWVpbW1leWVRY
-VVdWXV5cXFxYWFhbWFtcXFpbXF1aWFlbWlpYWlpdW1hZWVhaWVdXW1pbXVlbW11c
-W1xaWFpcYGBdW1pcW1xbW1xdXVxbXV1dXFxaXF1aW11aW1xdXlxdXVpZWFlZX15d
-XV1bW1ldXFpbWFlaWlpbW11bXFtfWltZW11bV1hZW1hZWltaWVlZWFpbU1paWlxc
-WVlZWlxaWV5dXVpbXFtcXFxcXF1dYFtaXFxdXV5fXVxeXl1bXl9dXl5dWlxdXmBe
-XV5jYV9iYmJiYVxbXl5fX2FgXl9gYGBkYl9cX11eY2FgX2BfX11fYWNgXltdX2Ni
-YWFfXlxcYWFfX1phX2BfYWBgXl1fYmNgX11dW1lbXF5dYF5gXF1dXV1eX2BfYWBf
-Xl1gYF1dXl5dXGBfXmBeYF9fXl9dXF1dXV9iYmFhYmFiYF1dXV5fYFxgYF9gYGVm
-ZGZlY2JlY2FjZmdkZGZmZmVkYmJmZ2VnaWhmaGloZWNkZmZnaGRkY2BeYF5iY2Rz
-qMnU3OHk5+ns6+zscnF2d3x5dHZ0dG5ydHR2dnVzdHJyc3Bzc3NycHBvcnJ0bXFx
-dHV0dXZyd3h0c3Z1dXRwcXN0cXByc3NzcnV5d3d4dXV0dnl3d3Z2d3R2eXl+fXl5
-e3t8eXx5eXp3eHt6eHp2eH2AgICBenh4enp7e3x7e36AfHh6eXt6eXl4dnZ2dHN0
-cnJycnFycHFvb25xcnFwcnNyc3JzdHJ2d3d6eHt7eXp6eHZzdXp5eHZ2eHh4eHV0
-c3V3eXt4dXh+fXh5eHd0d3ZzcnRwcnR1dnV4dnZ3eHl5dnV2dXV1d3R0dnVyb3J1
-cm1wc3NzcnJzcnBtcHNzcm9xc3Rybm5vcG9va2tobXBubnBwdHVwbm9zcXJydHJv
-bm5ucm9tbGpsbW9wbmtkXlZUUlJVUlRUUExSV1dWVlBRVlVVVVJPT05OS01UVlRU
-VlVQUE1NTVNcY2NgXV1eZGtucWtpbW5ydHVwbWZbWF1gXmJlZ2VdWFRUVVNSUVBU
-VllXXWRqa3JwamJhYGFgX19bY2ZrcG1paGpqYVpXVVdaZGVdVldeYWNmaGllYmZs
-ZmNlaGxubW9ta2tmZmdqbG92gIOAfnx7dXNxcnZ5fH16dXJ4eXd1dWxnY2FgYWNk
-Z2NeWlZXU1JSUk9PTUxLTEtMTExPT09ST01OUFllZl5YV1pcXFtcYmVpamtvcnNx
-a2VfY2lpZ2tpZGZlY2ZnaGhqa2tvbW1tbnV3dXBvdXh7e3h2ent9foKCgIKBf4B+
-fX5/gYWFgXZmYl5hZGNkZmNhX2BfYWBhYmNjZmVlZWhmY2NlZmNiZGVkZWNnY2Nh
-ZGVjXltgYWNiYV9eY2RlY2NiYmZnaWVkYWBiY2RjZGdlZmVkZGJkY2RkYmJjY2Rk
-Y2JlZmVlZGViYmJjYWFjZGRjYmJjYV1eYGFgXmFhYmFfYGBhX2FdX2FgYV1cX11a
-WlpfXl1fXlpdX15fXVxZWFxdW1xbWllcXVxaVlpaXV1cWVpZWlpaWllZWldYVllc
-W1pZWlhZXFpZVldXWVlaWVtdXl1aWVdaWVpYWVtaV1paXFpaW1taWllaXF1dWlxc
-XWBdXFxbXl9cW1hYWVdZXF5cWlpcXFxcW1lbXFxdXFxYWVxcW11cWlpdXl5aW1te
-XFpbXFxdXlxcW1xcXVlYW1tYV1hbXl9eX1xcXFxbW1tZW1taW1tcXFpdWllaXFtZ
-WVlcXltZWVleWVtXWVxZV1ZbXWBcWVlYW1lZWVpYV1daW1xZW1tdX1pbXF5cXFxd
-XmBdXV5eWlpbWlpdYV5fYWBeX2BfYmFfYF5fYGFhX2FfYF9fYF5fX15bXl5eYmNj
-Y19fX19gYWBjZGFgXl5fYGFfYF9eXmBfXV1jZGVjYmNhYV5fXV5hY2FjYmBgYGBf
-Xl1eXFlZW1taXWFgXmBeXVxdXl5hYmFdXFxfX15eW1tdX19fXmBfXl1dW11gX15d
-XF5dXmFhYF9fXlxeYGFjYl9fX2NlY2FiZWNgYWRiXl9jZmxoZmdnZmVlaGdmaGlo
-Z2NjZGVkY2FjY2VlZGVoY2FhY2FjZXigyNPc4OPn6Orq6+t0dnh4enh3enh0cXJz
-cXFydXV1dHFvcXJ2dHFwcHBwcnJzcG9yeHl1dnZ1dHV4dnJ3eHZ1dHRzdHV0dXZ2
-dnZ7enp4eHp4eXl2dnZ4e3h7fHp7fnt8fX57eXp7ent5fHl7fHx+fH9/gH99e3h7
-enp7fn19e3p7eniFfHt9gIB6d3Z5d3l1dnNvb3F0dnR1dnJydXNxcXd0cXF1d3Z2
-dnR0eXt4dnZ5eXd1dHZ5enl1dHd4eHd4eHl5e3h3eHt3e3t5e3t5dXR0c3d4dXR2
-dXZ5dnd4dHNycnBxcnN2d3Z4dHJwcnBvcXNwb3J2c29vb25vcHBycnVwb2tyc3Jx
-cG9zbnJvbnJycnJycXFxcXJxc25ub25vbnJxb21sbm9wbW1uamdfV1ZRT1RUVFRR
-UFBSV1VXWFtWV1NPUE9NTU1PUlFUVlVUUFJQT01SWF1gXWBfX2FiaGxvcW1sb3J0
-cG9oYmFfXGBhY2RkXFZRUFFSVFJSVlZbXl9kZmhxc2xkXlxaWl1dXWRlZ3FnYmRs
-ZllUUVJTVlpfXVxdYGJkYWNjYWNgXmFkZmNjZmpva21raWhpamxtbW92e3+CgX16
-eXRxcnJzdHN1c3JvcXZ2dHFwamtraW1uamliXVdTUVBRTVBST1BMTU5KSk1PTU5P
-UU9RVFpgYVtWVFhcXVlbYWRnaWtsbm9vZmFfYWRnbG9saGZnZ2pmZ2hqbWxqcHBx
-cnR3c3R7e3h9fH18fIGFiYqKhIGCgYF/fXyEiIiDeW5mZWNlZWZkYmFjYGFhYWJm
-ZmRlZmRmamhmZWRjZWdlZGNkZWFjY2JjYWBjYl9hX15gYmFjYmNmaGRiZGVkZGZn
-ZGRlZWNgXV5jY2NjZWZmZWRlY2RkYWFkY2BjYmFkY2JhZWFhYl5iYWJgYGBhYFxe
-XV1dZGBiY2ZgYGJhYGFeX15gYF5dYV1bXV1dX2FfXl9fXlxdXVxYWVtbWlpaXFtZ
-Wl1dWVlbW1laWVtbXFxdW1lYVlZVVFdXWllaWFlaWFhYWlxdWl1YVlxcWl5cW1pc
-V1ZYWFxZWFdWWldXWFlaV1tZW1xaWVlXWFxeW11aXF1cXFpcYFxbW1taWVlcXFta
-XFxbWl1dXFtaW1hbXF5eWVlbW1xbXV1cWldaXFpaXFxbW1tcXFxcWllcX11cWltf
-YlxbWlxbWlpaWFdWWlpYV1dYWVZWWVlWV1pcW1pcW1paWltcWVpXWltcXVtbXFxa
-W1pZXFpbVlpbW1lZWVtaWFhaXVtcWltdXl9fX19eW1tbXVxeYGBgX1xeXl5eXFxa
-XV5fXlxdW11eXl9eYV5fYWVgYmFkZ2loYl5eXmFgYWFhYWBhYGFgYmFgYWBdXmJg
-YWBgYWBcXF5eYGJgYGFgXl9fXlxeX19fX11fXFlYWV9eYmFeX15eXV1dX2FhYGBh
-X15aW1xcX15gYF5bXFxdXl1eXV9fXl5dX2FgXl9iYmBgX19fYGFjYWFkYmFgYWFh
-X2JjY2NoZWRkZmhkZWZmZmZpZ2ZnaGlmZ2hkZmhnYmRjZWZkYl9hYGVmYmRqeavH
-09rg4+fp6evr7HRxcXd2eHZyb3J0dHNxcG9yc3V0dHRvcHJ3dnV0dXFxcXBxcXFy
-c3F0dnN1eXl3dXR5ent5c3F1eHh0dnd2dXd4e3x3e3t6eXd4eHl7eXl6eXl7eX18
-f4B7enp6e3h6d3t+fXt7e3p8gIB/fXt9fn9+fYN/fX57enl7fHp+fXp4d3p6eXh3
-dXFycnN0dHR2dnJydHFxcnFzcnR1dXV0d3Z7eHl4e3d2dnVzdXVyd3V2eHl7eXt4
-enp8fHt6eXh1dnV5enl5en58eX16d3Z1c3N0cnRydXZxcHFydHR0eHZzdHNxb3N0
-cm9tbnJzdHN0dXJxcXJyb3Fubmxub2xrb3Fxcm5tb3JycXNxcG5ycXBycm5uc25s
-cG1vb25vbW9tbWppZl9YVFRRUFNRU1ZWV1VTVFdYVVVYVFNPUlVTTlFRUlRYWVlV
-UVFSVVldW1xbXFteY2ltbm1qbm5ucHFxcm1kYmFkYmdjY15bVFJTVlNTVFdcXl1d
-YmJlbm5lYFlYV15fXV1cX2ZrZ2RjZF9ZWVVSVlZWWVdWWWFmZmdlYl5bX19eX2Zk
-YWBhZmpqamhnbm5saXBvcXd5fX17fYJ9e3d1dnR3dXp8d3Rwbm9wcGprbmlpaGlu
-bGJcVVZTUlRRUlFSUE9RUE1NTUtKTE1NTlBQUFNWV1hVVFVcXFxcYWJjZmtramVl
-ZWdhXV1hbm9raWZmbW1mZWRna2pqbG5wc3V3dXZ6eHV4en59f4eJhYSCgoGCgH59
-fYWGiYV8cmtmaGlmYmRkZGNhY2FjZGZlZGJdYGNlZ2xsZmZmZGRmZGRkZGVjZF9d
-X2JmY2JhYWJhXl1gYWJjY2JjaGVnZ2hnamhmZGZhYmRkYmNiZWdqZWNmYmJkZGFf
-X2BlZGJjYmFlZGJgZF9dYGFiYV9eYV1hXVtaW1xgYmBiY2RfX19iYGBeX15eYF5g
-YGBgX2BiXl1dXl1eXFtZWFZZXFtYWVhXWFxaWlpXWltbWFlZWFpcWlpdXlpYWFhX
-WVpYWVpbXV1bW1xcW1tZWVhcWlpZWVxcW1xYWFtbVldWWFldW1xYWFhZV11bWVZX
-VlhcXV5cWVxdXF1eXl5aXFxdWlhaW1tbXFlZWVxdX11bW1laWl5cWltZXFpaXVtX
-WFhYXl1ZWVxfXlxcXFtcXFxcW1lYXFxcWlhbXF9cWllZWlhYW2VgWFtcXFhXV1dY
-WFZZW1lXVlpeXVtcW1tcW1xaW11cXFlYWFhYWlxXV1lZWlldXFteXFhaXFtbXV1c
-YF9dX11bXF5fYV1fYV9fX19fXVxeXVtbXV9fX11eXl1gYmRiYV5eX2BjZGlpaWVh
-W15gXl9dXGFlY2JhX19eYF5gYmJfXl9gYGFgXV5eYmJcX2JgXF9eYGFfXl1dW1xf
-X2FhYF1fXV1gX19eX19fX11dYWBhX19dXVxbWmBfX2BgXl1cXF9dYF9hYWBeX1xd
-XmBeYWFgYGBeX15eYWFgYmNhYGNiXltfY2NjZGRkZWhmYmhnZWZmZmZpZ2dlaWho
-Z2VlZ2hmZWhjYmFfXmBgXF9gYmR6sMfT2+Dl5+np6uzsc3JzcnNzcXJ2dnR1cm9t
-c3J0dHd3c3Vyc3J4eHV0dHN0c3J0dHR0dHNzdHl5dnZ5eHd4eHZ3enV0cnZ6e3t7
-e3h5enp5fXp4eXZ3enl5enl7fHl7fHp9fHt4ent6eHh6e3t7enh6ent+gX+DgX9+
-gH5+f31/g398f4J+fHt7d3Z4dnZ3dnZ1eHRwc3NzcXJycnJydXR4cnF3cnJyc3N0
-dHJxeHR1dHR5dnl3dnl2eXp7fHt5dnl6eHh3ent6eHl5eXp7dXR2eH14d3Z3dnlz
-dHRzc3Ryc3Rzc3h0dnVycnJyb3JydnJzb25vcG90cXV6dHFxcHBubm13bW1ub2xr
-am9xcnBubW1wc3Bvc3Bxcm9ubG5ucXBubmxsaWlqa2xramdhX1ZSUFBPUVBRVVdW
-VlhUUlRXWFdUUlBQUFJTUldaW15cW1dVVFdXW15dVlpbW2JnbW5sbG5wcG9xbm5u
-bWhoZWJhZGRhX1pYVllaX2JeXFtcW19oa21oXlhbWVRXWl9hYWRjX2BgYF1XU1RU
-VlhXV1hYV15jYmZma2hfWlpbW2FmYmRjYl9jZGVoaWpqbm5rb25ydHl/fnt7f4KC
-fnp2cnJ1eHx8eHdxbmxuamdrampoZmZqZl9XVFNRT09PUU9PVktRU09MTEpKS0xO
-TUpJSk5OU1dYVlZYWVhWXF5bY2lraWdkaGReW1xgZ2dra2lrbWdlaWZkaWtscXR0
-d3l2dnt6e3l+g4OGh4iEgYSDgIGAfHmBh4aFhX50a2RlY2FlYmRjY2NiZGFjZGZh
-Y2VlY2JjZGNjZWZnZ2NjYmRmYmVjX19fYmRgXF9gYWViXmBiYmllYmVkYmJmaGhn
-aGlramdlYmRkYGNmZmZoZGdmYmJkZWBgYmNmZWFiYGFiX2BgXmFhZGRhYGBiYF9g
-Xl5hYV1fYF5bX2JiYV9eXV9cXF9ZYWBdXV1gX15gYGBeW1pZXVlZXFpZWl1bXV1a
-XF1bWVhaWltaWlpYV1ZbXFxeWldYWFlbW1pZWlpdXVxaWltbWlxbXFpZWVpZW11e
-W1pYWlpZWFhaWFlbV1dVWVxaXVxaWFpZWFxdXVxcWFlbXlxcW1tdXlpdW1pZWVtd
-XF1ZWVxcW1xcW1hYWVlbWVdaXFtbW1xbW1laXlxeXF5cXl1bW1taXltaW1tbXFpZ
-XFlYW1tZWltdXVpfamFcXFxbV1dVVFVZWFdXWFdZW1tbWVpcXFxYWllaWVtZWVlZ
-WVlaWVtcV1paWVxZWVtdWVlaWlxcX1taW1xeW11cXV5dXFxcXWFhXV1eXV1gX11e
-XV1fYF1eXV5eXGBhYWBfYGBiZGVkYl5fXl5hYF9gX2BfYF5dX19iX15gX19dXmBe
-XV5hYGJhZF9cYGFfYGJiYGFiX15aW11cXV9gYl9hX11bX15bXWJgXl1eXF1cW1xb
-Xl5hYF5fXl9eXF5eYGBjYV5fX11gYV9gXV9gYmFgYV5fX19hY2JgX2JjY2NjYGFj
-ZmVjY2VmZ2dnZGRmZ2dlZGloZ2doamhmZmZoZ2RlZGVkZGRhYmJgXWFlZnqtxtPb
-4eTm6Orr7ex0dnZ5c3RycXFydXd0d3VycnJxdnVydHR3eHR2eHd2cnFydHV0dnRy
-d3Z4eHh6e3t5eXd3eHt4dHZ1eHp6eXp5eHV0dnd4enp8eXp3enx+enp4enx8e3p5
-d3h5ent6d3p4eXx9d3V2enp6fnt9f3x6e398fH17f316e314c3V4eHd5eHRwc3p2
-c3RycnN0c3NxdHR0dHFzdHR1c3F0dHJ0c3J1enp2eXh3eHZ3enh6e312dHN0eXl4
-d3J4e3x/fnl7eX12eXh6e3d3c3J2d3x4dnh1dXV1d3l4dnV0cm9wcnJzcnN0cnJw
-b29wbm5vc3RvbGxtbHBxb3BubHBvbWxub3Bxbmtrb25ucHBubXBtcXBuaWxvcm9t
-bG5ubGtpbWplZGFcVlBRUFJQTlBVWFhWU1JTWVVTUE9RUk9PUlRQVFdZXlxXVFNT
-VVlcWVZXWFtdZ2xuamhqbW9xbmxraWlsbGlgXmBdXV1dXl1eYWFkZWBaWmBiZm5t
-aWBaUlJRVFlaXGFpaWFeYGBbV1JTVFVWU1JWV1hfY2JjZ2xqY1xWUlVaXl9iZGNk
-ZGNlZ2dpaW5xdHNwcXNxd36Bfnd3eH6AgHhzb3N4en18eXZycW1sbGxva2dlZ2pq
-aWFWU1FRUk9OUE9QT1RTTU9MSUxSUkxMSEhJS0tKUVZZVFFPUVBWV1lbZWpramhk
-Y2RiYGBgYmlvcG9va2loaGhrcXZ0dXV5eXp5fH5+eXqBjIyKiYSAgIaFhYB8eoCH
-ioiDfXVrZWFlZGFiYmFjYVpiY2JhYWFlZmNkYmZlZGRjZmZiZGVkY2RiZ2RnYmJl
-ZmdoZmRjYmNiZGVnZWhlZWNkYWBlZmRmZWloZmNkYWVnZWZnaGpoZmRlY2NlYmJg
-YWRiZWJiXltbXF1hY19eYWNhYmJgX2VjYWBgX11eY2NfYGNiYF5iYF5dXF1fXl1a
-W15fX2BdXltbWVldWVdYV1daXFxbXFpcW1pbW1lYWVtcXFtWVFhaW15dWFlbWVpZ
-V1ZZXWBcW1lXW1tZWFpYWFdWWVtaXFxWV1dXWlpWWVlZWllaWFZZW19dW1pcXV9e
-WVpbWlpZXV9hXVxbWlxdXV9eXFtWWl1dXVxaW1xdXFxcWlhZXFtcWFpaWVtaW15c
-XF1fXVxcW1paW1lbWl5aXFlbW1lWXVtaWFhZW1tZWVxcXFxbWltcW1laV1hVW1dX
-VlVWWFhbW1lZXFpbWltbXVlWWFtcWVtdXV5eXFlXWVxcXF5eX1xaWlxbWVlZWlpd
-XFtfX2BeXV1cXVtcXV5dXFxeYF5bXF1cW1xdW1lcXF1bXmFfYmFfYF9fXl9fX19c
-XF9fYGFfXlxdXVpdX2FgX11fYV9bXF1aXFxgYGBiX15jXl5fYWRiYGJiY2VgXl1c
-X19gYGBfXl1dX2BdXmBgXVxbW1lbXmFdYGBeX15fXVxeX15gYF9hXmFfYGJgXl1d
-XV1fXl9fX11eX19gZmJhYGRjZWZlZWNjZWhkY2RlZWdmZWdmZmZoaGdpampta2Vl
-ZWdnZWJkYmJjZGFfYF1cX2Bpc63H09zg5Ofp6uvs7HZ3dG9ucXJycXJyeHt0cnJy
-eHV0c3R1c3N5dXZ3d3Z1d3h1dXh2d3d5end3dnl4eXZ5eHl6e3t0eHd5d3V2d3Z1
-dnR2d3h5d3p6eHd6eHt8e3t7enx7enh9fn19f3p8fX16eHt7eXl3d3h5fX19fH16
-fX13eHp7fHx7enl5d3l6end3dnh6eHd5dXJwcnFxcXVxcHFyc29ydHV1cnR0dXl5
-fHd3enh5d3d6end2eHp4endzcXN0dXZ4d3l8e3t8eXh2d3d5d3l2dnZ3d3V3c3V0
-dHN0d3h4dnR2dHBxb3Fxc3V1cW5xc29wcnBtbXBycHFvb25sa29ycm5sbm9tcm5w
-bW5tbmtubm1wc25sbm5wb3FybXB1dG9tb2xubWxramdfXFZVVVRTUFBNT1FUV1ZU
-VVdWVVVYUlBQUlFRT09QUVJVVFRSU1RVWFdYWFpXW2Rsb29taGprampsb2xrbG1o
-ZF9cWVpcW1tfXl9fX2FgYmZiYWxvbWZcWFRSTlFVWFphZmFdWltdX1hVU1ZYU1FR
-UlRaW19hZmdqa2JaWVVXWVlkZmRfYmJmampkZWhrcHJ0dXR3d3N1eX1+eXZ0eX2C
-f3t8enl9gYB+e3t7eHJ0c3FsaWhoZ2psaGVaVFVWUlRUVFNYWFZRT05PVlFQT0xI
-T01JSExOU1dVVFFQT1BYW19fZWhoZ2ViaGhjYmJmam5vcXBubGpoa290dXZ5dnh4
-eHp9foGBf4GEiYmKhIKGjI+NjIh/f4aLi4aAdW1oZGJgYGJhYF9hZmBgY2ZlZmNk
-ZGRkYmJjZWRkZm5samtpY2RkZmZmZGVlZWhqZ2RkZGJgYmRhYGRjY2JjYWBiZWVj
-ZmdnYl9kYmVjZGViZGRjZGNlYmZoZ2JeYWJkYWBfX2BfYmJgY2ViYmFhZGBjYmJg
-X15dXFthYGBgYmFhYFxeXV9kZVtcXlxeX11fXVxZWVtaW15kXVxeXFhbW1xaW1tb
-WVpYWFhZVlpbXV5ZVllaWl5bWlpYWVlYWllbW1paWltcWVlcW1hZWVhaXFpaWVdZ
-WVtZWFlaWFdZWFhaXFpdXFhZXFtcXFtaXlxcX1tcXmBfX11dWllZWllZW1paWFpZ
-WVlbXV1eXV5bWFhZWFdcWVtcWlpaXVxcXFxcXlxbWVpbW1laXFxbWllaW1taWVZV
-VVhXWVhaWldWWVpZWllZW1taWl1aVlVYV1lYV1dZW1pYWllbXFxgXFtYXF1eXF1e
-WllXWVpaXFtZXl5dXlxbWVpZWVpbXlxcXVtcXF9fX1xcW1xcXl5cWltbXFxbW15f
-Xl5cW11eX19eXmFhX2FhX15dXl1hYGFbXF1gXl1eYF5dXFxeYF9gYGJgXl5cXl1b
-XF9fX2FiX2JiYF9fX2BhYGBfYmRjX1xcXVtdXVpfYWFfXlxeYGBgX19fYWBfX19e
-XV1eXl1cXmBfYGBfYGRgX2NhYWBfX19gX19gXV9hYGFhYmJhZ2RhX2BhZ2hkYWJj
-ZmdkZWZmZWdpZWZpaWppaGhoZ2ZoZWZlZ2dlZ2ZkZGNiY2NeWl5eX2BypsTT3OHl
-5unq6+ztcnF0cnBvdXdzcG5ycHN3eHNzdXJzend0c3NydXd1dHN0dnd1dHZ1eHh4
-dnV4dXl5d3Z3d3Z5e3p5fHp4d3p6eHd4d3d4dXh5eH56eXh5en19fHt8e3x6eXl6
-e3x+f39+fX1+fnyCfnp6eoCBf359e3p4eHp2d3t+fnx6eXl6eXl4d3d1eXp6dXJ2
-dHN1dHN0c3JycG1wcnBwcHBydHR0eHh0dXZ1dnR3eHd6eXp0eHh3eXl6fXl5eHd2
-d3p6d3Z4eXZyd3Z2eHd3eHd1dHV4dXJzcXV2dHFydHJxcHByc3FydHFxcW9vb25y
-cXJyc3Bxc3F0c3JwcnVyb25qbW1tbW1tbnBvbWpubm5vcm9rbm9ucG9va2trbHBs
-a25wbWxpZ15aWVNOUFFPUFBRVFJSVFNSU1VTU1JST05QUE5MTlJTVFBTUFJWV1dU
-VFFTWV5janBwbWtqbWltbWtoZWxtbmtlYV5bXVtaXVtbXl9iYWZlZWBoa2hiXllU
-UU9SV1xaW19cVVlaX15cV1JXV1ZQUFVYWFlcYWZoaGlfW1pXV1dXXGFlZGBcYmFj
-YGJnanBzcXBycXFucG5xdXp8d3d3en+BhIF8enx8fIB7dXt6fnd1cm5qamltbW1u
-ZmBgW2BcWFZYWlpYUlBPVlxYVE9PT1FOUE1OTk1PVlZZW1pWVFVcX2BeYGJkY2Fi
-Z2pnaG1saW5vb25ubGtucXRyeXt7enx8eXx/f4WCf4KHiYeJj5GOjo+PjIWBg4eK
-ioF4cWhhYGBeX2VmZWBiYmZlZWNkY2NjYWFkYGFhZWdpaWloZmdmZ2RlZGJhZ2dm
-ZGZnZmZlYl9gX19iYmNiYWBhYV9nZ2RkZGdkYmJgY2VkZWdmZGZnamlkZWZmY2Fh
-Y2ViYV5dYWBiY2RjYmNiYmNkY2NgYF9eX15eXVxdYF5fYWBhXl9dWmFjXmBcXV1c
-WmBeW11bW1tdXFtbWllZWldaXFxcW1laWVZYWVhZWFlaXVtbXVpYVllZVlxbWVpa
-W1lXVFZZV1dUVlhZW1tfWVhYWVlbW1lbW1tgXlxbWVhZVlVYW1taWlpZWFlXWVpb
-W1hbXlxaXF5fX15cWlhZV1daXFpYW15cXFpcW1xbWltaWVhZW1ZYWlhdXF1ZXV9c
-XVtcXFtdX1paWVlaWVpYWFxaW1pcWFdWWFhZWVldWl1aW1lZXFtZWlxaWldVV1ha
-WVhaVllbWllZWVpaXltaW1tYWVtcXFlZWlpZV1hbWlpbWlpcXFlbWlpbWVdYWVpa
-WltZXWBeYWBeXl5eX19gXltcX2BeYWFfXFxeYFtfYF5dXl9hX15cXmBhX15eXVxa
-XWBhXV1eXlxfYGBfYWRiYV9hYF9cX2BdXVxfX2FjY2BgYV5cXF5dXVtgYF5eXVxc
-Xl5fX19hYmFhYmBjYV5eXV5eX19eXllbXlxbW1xfX15eX15eYGBdXl5fX2BjYWFg
-X11dXWBhX2JfX2BfYGJkY2JgYWNkZWJjZWhoZ2doa2dkZWdmZ2lqaWVmZmdnZWRk
-ZGRmZGdlY2JiYl1eXVtiYXumxdHb4OTm6ers7O11d3N4dnZydHZxcHJtbnJ1cnBy
-cnV2dnh3cm9wcXN0d3l2d3h1dXV1dnd0dHR1dnV2c3V6fHl5eXZ2enl7enp3d3h3
-dHR2dXl4enp8e3p5e3x8fHl7eXh4enl6eXZ6fXp7fH1/eXp7enl9gH99fX18fHt4
-e3t9fHp4e3x5e3p5e3t6enZ0c3Ryc3Byc3N0dHZ0dnVwbHBycnBubnB1c3V1dnZ1
-c3V2dHV3dHZ5eHh1dHR2dnh5dXh3d3d3dnl3eXp4eHh1dnZ3dXZ3d3Z1dnB1dnN2
-dHRycXV0cXJ4cXBwc3Nxb21vbm9vcG9ybW5zcW9wcnJxcnFvcnNybm1vcG5vcW1v
-cnJycW1vbGtsbmprbW5sb3Jvb25vbGxvb2praWtjY1xXUlNRUk5QVldWVFFUUlFS
-U1xWVFBRUU5OUlNUU1NSU1NUWVhWUlBRU1ldYmhtbm9vbGljaHNuamdmaW5saWZf
-XWRfXV1aWF5gYmVkZGBhYmJjYF5XVFFNT1hdXVZSU1VZWV1gYVpVUVRTT05QVFZU
-VllhZmplXFpUU1dZWFhgaGViYF5iZmRkZGRqb3BtcHBuam9wdHVzeHt5d3Z+f4CB
-gYKBf4B7fXx4eHp6enl5eXNvbm1pam9oX2BjYmFfY2FhW1laWVtgW1hUWFlUVFJO
-TlBPUVVUVldbXVxYWl1iYmFgY2JiX2JnaWZnbWxqcXlzcHBvb3V3dHZ3eX18fH57
-foCAgIKEhYmJjpGTk5COjpCQjYWFg4aHgXpza2RiYGRmZ2RjYV9iYmJkY2NoZmZl
-YmNkZWJmZGRjZGBiYmRoZGVkY2ZkZmRmZGdlYWJiZmVkYWFgZWNiYFxdYWJkY2Fj
-ZGNiYmNiYmRmZF5lYmRmZmdmZGRlZGFmZWNgaGRiYWFfZGJkZWZiYWVlYmBbXl5f
-Xl1cYGFfYF9dW19hYV9eXF5gXl5gYGFdW11eX2BfYWBfXl5bXVtZXFxbWlxbWltc
-XVhXWFlZW1lcXlpbWldYWVxbWVhaWVpaXFtcWVlYWFlYWFpcXVpcW1xaV1xhYV9g
-XV1eXlpXWVpaXF1ZVlhYW1lYVVlbXFpdXF9fXlxbWVpcW1tbW1tbWlhZWVtaWVtZ
-WltcXFpcW1tbW1lXWVhZWFlZW1hZXFpaWVpbXF1cXVhaW1pbWlhWVldXWllaXF1d
-W1hdWltgWlhXWlpaW15aWltXWFlYWFdYV1laW1xfXFlbW1tdW1tYWVlcbFxYWVhY
-X11ZVldaWVhaWllcXV1dW1pZXVlXWVpeXV1dXF1fX15dXV1dW1taW15hYWJiYF9e
-Xl1fYFpbXV5gXl1dXWFcXV5eXF5eX2FeYV9eXmBfYGBfXlxeXl5gYF9iYF5eXl9f
-Y2BgYGFeYWRjX11iYF1bXV5fXl9fYGFfX2FfXmBfX19hY2FgXl1eX2BeXmBfXlxe
-XlxbW11fXl5eX19cXF9cWFxfY2JiYV5dYGNhYGBfYGBfYWFhYWNjYGNkYWZiYGBj
-ZGVlaGdnamlnZGRlZ2loZGZkZWRmZmNjY2VmZGVjYmFgYV9fXV5ke6fE09vg5Ofp
-6urr7HN2dHV0c3NydXl3dHlxb3Bwc3d2dXRycnRzc3Zzc3d3dnZ3dnR2dnVzdXZ1
-cnJ1dXVzdHR3dXZ6fXx6enp7eHh3eHl5d3d2dnV3eXt5d3l5fHl6enp7eHl6enh1
-c3R6fX5/fHx4eHd6e3t+f3t7fnx8f358e3x8fn17fHl5d3h2eXh6eXl4cXJycnFw
-cXNyc3J0dnt5c29vbnFxc3V7dnh4eXV0dnh4d3h5dnV0eHd5ent6eXh5e3p4eXd3
-dHd5eXh3eHV3dHV1dnd2dnR0dHR2eHd1d3l4dXJxc3Zzc3JycW9tbWlrbHF0cXBu
-a25tb3Fub3Jxb25scHJycXBwcG5tbm5tb25vbmxra2tra2tpaGxvcm9rbWxvbW5u
-bmxpZWNiWVNXVlJSVVJSUlRVU1JTUlBSVlRUVFJPT1FRV1hVVVZUUlVVVlNQVldZ
-XV5haXBwbmtlaGVobGpmYmVpamppaWdmX11aWFdYWltdXllXV2BiX1pVU1FQTVNb
-XV1ZUVFVV1peYF1aU1NTUlJPUFJQUVVXXGFjY19XT05QVllcXmRnaGNjZGNkZ2Vk
-ZGVoaW5ycHFvdHd2dnd6d3h4fXuAfnt8f4SEgHt7e316enh7ent7e3V0b2xtb2to
-amxqaGlpamdkZGZnY2BcWFpcWlhWVlZWV1pZV1ZWV1tdYGFhYWJiYFtdZWNiZ2Zp
-ampqZ2xsc3d3dnR1d3l6en1+foB9fn9+fYGChISHhoiOj5CTkZKVlpOPiYmDhYOD
-f3drYV9fYGVhZGZlZmNgYmFhYmRiZGRkaGlkY2JmYWJjY2NjYGFjZWNkZWJjZWRk
-Y2FhYmFgYmJhYWFjZGFlZmBjY2NgXV5hY2FfYGNjZGRmaGVjY2ZmZmdlZ2VlYmBj
-Z2ZkY2RiYmJhYGFkZGNgYWRkYV5dX19cX2JhXVxdXV1dW11dW2BgXl1eX19fXl5Z
-XFpbXV1aXV9cW15ZWlpZWVxcWVhaW11cVlVVVFVWV1pcXFpbWlpbWltYWFZYW1dZ
-XlxbWV5dWlpaWVtcXl9eYFxbXF5cW19bWl1fW1xdWltcWVhaV1tdXl1gW1hbXGBh
-XVlcWFdYWFVZWFhaVVhaWlhaW1hcXFxdXV9cXFpbW1tbW1xaWFdYW1haV1dWWFhX
-XF1cYGBgXFtWVlZVWVdUVVlbW1tdWltcWVlbWlteWl1bWVdYWVZXWVpaWVlXV1ha
-WVxbW19eW1xcW1tZWllYWFpdWVpaXFlZWlxcW11bWlxbW11cXV1cWlxcW1xcXV1c
-Xl5fX15dX19eXl5eYmBeXV1fYF9eYGFgXV5fXlxfYWBgXFpdXl9eX1xbXl5gY2Bf
-Xl1dXF1gX2BhXVxaWlthX2BeX2BeX2JiYV9fYF9gYmFjYV9fYWFhX19dYWBeXl9e
-YV5eXV5fX2BfX19iX19gYGBiX19fYGBcXl9cXF1fYGFfXVlcX2FiXGBfXl9gX15c
-XmNjZGBhYGNhX2FhYGJgYmFgZGRkZmRhYWFkZGNmZ2dnZmVmZ2hqZmVmZ2NkY2Nj
-Y2ZkZGRhYV9gX2FfXmNqnMPS3N/k5unq6+zsdHVyc3RzcnN4dHVzd3l1cnN1cnJ0
-dHNycHZ0dHd1d3V0dHd0dHZ1dHRzcnNydHNyc3N0c3Z3dnh1eXl5eHh3dnh4enl5
-eXV1dHV3eHZ5eXx4eXh3enp5d3d3eXp5eXt7fH6Bfn57ent7eXp6e3p7e3x8e3l7
-e3t7fHx5e3p5eHh4dnZ3eXVycW5ycXBycXN3eHR0dHh3c3J1cnV0dHh7dnd2dXZ3
-d3p7eHp3dnh5d3h4eHl8fX14d3V2eHh0eXl4eHV4eHd4end2dnZycnN3dnV4eXx6
-enZ0cnJxdnRxb25ub29ramhsb3BwcG9wbmxub3BwbnJxcXBxcm9tbm9vbnBvbm5v
-cXFwbWtram5vamlsbmppZ2lqamxub21vbnBnY19ZUlFQUlJTUlVVUlRVUlJRVFJR
-UU9RUlJSUVVVVlhYWFZUVFhUU1FTVVxeYmlpb21oY2Nma25saGNgZWhqZ2ZmaGNd
-VlVWWFlWWVhVV1xcX15ZV1dUUU9UW2FcWFdZV1piX1tdXFVRUFNUUlNOUFNTVl1i
-YF1dWE9PUFNYWl1hZGViXmBiYWZoZ2doaWpwdXR0dXJ0eXV4enp6ent+end8e3t+
-gYGChISAfX58fHp6eHl5enp5eXNwcHFwb3FycnJycW5raGNfXFxXWV9jYWBcW2Ff
-X19eWVpbXF5jZmdmZmNhY2RlZ2VmZGduc2xubnF2fHd3dnZ6fH1/g31/fn5+gYCB
-gYCBg4SGi4qKj5GRkpKVl5SPi4iHg4F9d25kZGNgYWJhY2RoY2JiYGFjY2NjZWRl
-ZmdoZWFhYmNkZWRiYmZoaGZlZWNjZGJiX2NlYmFgYWRjY2NlZWJiY2JkZWNkY2Nm
-ZmhpYmJiZWdqamdlZGBiYmNnYl9hYmFnYmJjYmNiY2NjYmJhYF9jZWFfX2BfX19g
-XF1eX2BgX1xbWVtfW11dXl1gXFxcXV5cXltcW15gXl1dW15eXltZWVxbW1xcV1ZZ
-VlRVVVdXWVtZWFpdW1hbXVdaWFZWWVhYW1xZW11cXl1dXl9fXlpXV1hbXFtaWltb
-W15dXVxbX19bWVhbWFxdXVpaV1lYXVxcWl1dXFpYWVpaW1xeWltbXFxdXF1gXVtZ
-WlpdYV1bW1xcW1lcXF1bXFlZWVlZW1lYXl9cWVtcW1dVV1hVVlhaXFtbWl1aXmVo
-XFlYWllXWlpcXV9bWFpaWVtZXVpXWlpdW1lbW1tbXFpaWlhcXlpZV1lbWlpbW1td
-YFxWVldcW11aWlpbXF1ZWVtdXlxfYWBhX1xeYGNhYV1cW1xeXmBcWlxeYF1cXVxb
-XFtdYmJgYV1cXV5iYWBfX15bX15fYFxeXFtbXV9dXlxbXF5eXV5eXV1fX15fYmJh
-X15eYF9dYWBiXVxgYWFhXmBeXWBgYF5fXl5aXF1bXl5dXl9fX19fXmBhXF5gX15h
-YF5eXmBeW11bW1xiY2JeYGFiYF5dW2BgYV9gX2BhXl5dXl9gYWNiZWNhY2ZmZ2Rn
-ZWZnaGZmamdnaGZoZmdqZ2VnaGZjY2JkZ2ZiYGBgX19hX19hY2mbw9HZ3uTm6enq
-7Oxzdndyc3Fxd3V0dXh3c3J0dnd2dnNycnZyc3N3d3Z2eHl3c3d1d3Z2dHV2cnN4
-eHZ4eXh1dXZ3d3h2dXp9eHR3dnd6fHp4d3Z1d3V2eHZ5enl2dnZ7fXt6e3h6eHZ6
-eHp8enx5f39+fH9+fHp4eXl6e3l3e3p6fHp7fHt7e315enh2dXd2dnNzc3NycXN2
-dnN1cXBwcHFudHZzdHR2d3d4eHp5dnp6fHd4eXh5enl4dnh2enh4eHh5eXl4en98
-eHhzdHh7dnZ2eXd1dnNxdHRzbnh3dndzcXNycXJycnFwb29wb25scXFwcXJwcXJw
-bW5wbnBybm5sbG9xb21sbm1wcW9xcW9tcHBubm9ubG1wcGlrbGlpZmlubG1vbWxu
-bGZkX1lVUlBRUlRTVldTUFFPTlJQVVJSU1RWVFJVV1RUVlpYVVNVWldaXF1iYl9k
-Z2pqaGdlaWpsbWtmX2JnamlqamdhWlhUUVhZXV1VT1NZYV5cWVhVVlVVVFxdXFxa
-VlVYW1laXVheWlpVUlBQTVBUV1pbXmBfXldTU1NUWVpbYmNjXmBjYmNgYWdobG1u
-cXR4enp7ent6e3p9fn5+fX59fn6Af3t5eoCEhIWAfX59gYN/fHyAfXx5eXt4cnNz
-c3N1dHJubWhiYmJfXV9eYmdlY2JkYmRjZWNhXV5iY2ZsbGlqb2tqbWpraWhqanR1
-dHBxdn1/enl9fH2BhIWEgIF9fH6AgoOGh4iIio2Pj5KTkZSVlJWZl5aSi4uLhX92
-bWdlY2VkZGVmZGRnZmVqaGdlZGFmZ2ZoZWRhYGJiY2VkZWZjY2ZoZmRkYmNkZ2Zh
-YmJhY2VkY2NhY2NiYWJhYmJiZGVlZmhpZ2VhYmNjZGZlZWNlY19jY2VlYGVmYWRo
-YWJhYmFgYGNiY2RhYWFfYF9hYF1eXV5fXl5fYFxgYWJjX2BfXl9gX1xaW1xcW1pa
-XF5gXl1cXVtaYF5aWltaWFpcWldXVlhYVlRWV1lZWlxbV1hZWlpdW1pbV1dXV1dZ
-WlxdXWBgXl5dW11aWFpZW1pZWVtaWFhbWl5dXV5dXlxaWFteWFdaWlxbVlZYWVpc
-WVpZWltaWVlXWl1cXFpbW1xdXV5dWl5bW1xeWllaWlpcYV9cXFxdW1teW1xbWFtb
-XV1dWFdcW1lVV11eXltbWl1ZW1tZXFtYVlpZWFpcXF1cXltcWltZXVtdXFlZV1lZ
-WlhXWlpYWVZWWVxfW1pbWVhVVlZYWFpaXFhbVlhaXF5ZWl1dXFxdXV9cXV5fXVxb
-XV5fXl9gX1xdXFxbWllaWVpcXFxaWltaV1hbWl5hXlxbXV9dXmFdXF1eXl9gYF5d
-XltdYGFcXV1dXmFhX1xdXlxcXV1fYGNgX2BhX15dX19bXV5eYFtcXltgYWBgYF5e
-XV1cW11bWVxcXF5eYF5fXlxcXV1cXmBfX1xfXF1fX19eXV1dYF5gYGBgYF5hX2Bf
-YWBdXV5fXl9fX2FkYmJhY2NkZmhoZGZnaGdmZmZnZ2ZmZ2dmaGZmZWZlaGhkYmNk
-YmBjYF5fYF9fYF9ia5O+0drg5Obo6err7HR1dXJtb3F0c3V1dXV0dXFvcnN0cnZ2
-dHJ3d3Z0dXd5eXl1dnl3eHZzc3V5end1dnl4eXh8eHV3e3l4dXh3dHV2dnZ3d3h5
-enp8eHl6d3h4eXh3dnqAfn58fXt6eHh2eHp5eXp6e3x8fH17eXp8e3t6fHt/f3l7
-e3t8fHt8fn19eHZ4eXV0cnBzdXVyc3R3dHNzcHJwcnRzdXh3eHN0d3t5eHl8dnZ4
-enh3d3p6eHh4dnh3d3Z2dXZ2dXd4eHd2eXZ5end3d3Z1fH55eXZ1dnV1dHJ0c3Nx
-cnNyb3BxcWxtbWxub25wcXNxcG5xcXBubXBvbW9vbW5sbm1wbnJwcW1ta2tqbmxv
-bGtvb29vbm9ycGlqampqbG5tc21raWhoZ2JeWFJRUVJUVldXVVRXUU9RUVNSUlRX
-WVhVVlZYU1VZV1ZVV1lZWVpfZGNlZGNoa2hnZmZpamhoY2JiaGhoaGdkY2BZUlJU
-V1pdWldbWltZW1tfV1BPUlZdXlxaWVRSVFNRUlNZW1pdWE9QUVRWV1ZWV1dbX1pV
-UlFSU1VaX2NhYV5haGplZGpoaGhscG90dnR4d3Z4eXl9fnx+fX1/f4GAfnx+fn17
-fYCBgYKAfHh9fn99fX+EgH19fH16d3V1dnd0cW1ubGhoaWdnZmZkZWhlamdlZGZn
-Z2RlZ2dqbm5vbm1yb25tcW9ubW5vb3FzdHh6fHx7f39+gIKDgYCGf319gIGBhIqM
-iYqLjYyPlZKTk5SXlZaYmJePio2Jg3lvZmRmaGVkYWFiYmRmYmRlZGNfYGhoZmRl
-ZGNlYmFlZGZnZ2NjZWZkYWJkZWNhY2JiYmJjZWBgY2JiZGNkY2NdXmNlZWFkZ2hm
-Z2VjY2FkZWVkZGRjY2JkYmJkaWNiYmJkZGZjZGNkY2FjZGJeXl9hYmBfX15fXl9g
-XVxeYWBhYWBhYV5fXV9fXVtcW1tcW1xcW1hZWlpbW1haWFdaWVtbXFxZV1pZWldY
-VlVaWFlYWlpZWVhZWltbW1hZWFZYWFdbW1tZWlxbWVlaW1tcW1tbWFhYWlpXVlpb
-XF1aW15bWVxeXFpYWlpcXFtZWlxdXltaWlteYF5bWlhbW11aX1tZWlxaWl1eXFta
-W1xcXVtaW1tdWl1aWVtcXFxbW1pXWl1cXVtgXVxeW1xYXF9dWVlcW1taWlpZWFtZ
-WlpbXFtdW1pbWV1cWl9dXF5dXlpbWltbWlpZXFpaXFpdX11aWltZWlxZWlpbXFlZ
-WVtZWlxeXVxdXFtbWltZW15ZWlpaWlthYV1cXV1dXFxeXVxfW1teWVlaW2BhYV9f
-XWBgXWJiYmBcXl9eXl1eXFtaXF1cXl1cXlxcXmBcXl1gY2BeYV9fYWBgYmJfYWNe
-XV9hXV5gXVpcXltbXl1fXl5iYF9eXl5gYF5bXl1bW1xdW15eW1xeXV9gYVtbXl9f
-YF1cXF1fYF5dXF5gYmFhY2JhY2BeXlxbX19fYF5jYmRlZGNjYWFkZWFkY2VjY2hn
-ZWVoamlnZ2ZlZGdnZWNiYWFlZWVlX19gYGNhX2FfX11eXmNoj7zP2t/j5+jp6uvs
-c3R0cXFycXN0dHN1dXNzdXJzcHNxcnd1dXV2dndzdXV4dnZ0dXR0dXNzd3h2dXZ1
-e3l4eHZ2eHZ1eXd3dnV5d3R2d3d3eXl4fHp3d3l7eXZ6fHd3eXh8f3x5eHp+fH5+
-fHh6fn9+enl5eXh4eXh7e3d3fX19gHp6e3x6e3x9fnt6eHd6dnR1dHRzcm9ydHR0
-eHRzcnRzc3Rzdnd1dXN0dXh4enx3eXVyc3V2eXl5dXN2eXZ1d3Z6e3t4d3V1d3t7
-eHZ1d3d3dnZ2dnZ3eHZ0dnRzdXFzd3Nyc3Nzd29vcm9ub3BxcXJucG5xcnJxbm5r
-bm5ua2tpbG5vc3FvcHJxbG1taW1sb21ubW1ucXFvbm1sbmtsbnBvb29uaWtqa2dl
-YV1YWFpWVVJTU1NVUlNRUFBWUVBRV1hZWVlaW1hYWVlZV1ZZXl1cWl5hX19eYWhs
-bGlpaWlnaGlmYWRmaWlmZWNiW1hVVFldXllWVlhXU1RZW1dUVlNUWVlYV1RVT1NX
-Uk1OVlpfYFlUVlFVWFZUU1BQVVlaWFNUVlRYXF1hZ2dkZGVobGlpamloa25vcXBy
-dHRzdXZ3enp8fX59fX1+f319fXx7eX1/fXt8e3x7foCAf35/gHx+fHp6eXt2dXl6
-eXh3c3Fwb3FucW5wa2Zpa2xra2dpaWppbGllaGptbnB1dHNzc3JxbnBxdHNzc3V3
-ent7fICChoiHhoSEhYWAfYCEg4OJjIuOkJKUk5WTkpWXmZmYmp+bl5OMiIV/eXFo
-YWFlY2NjZGRgXF5gYWJjZmRmZGBfZGNkX19gY2FgZWdoZWNkZGVkZGZjZWJhYGFi
-Z2hnY2NkYmFnZGdlZWZmYWZkZGVlZV9iZ2RiZGRkZWNlZ2dqaGNiZWRhYmNiY2Vl
-Z2hkYWNiYWJkYmJfXV9hX19iZGJeW15gY15dYF1dW1xbXWFjYF5eXVlWWVldYFxb
-Wl1ZW1tYXFtbWlteW1tbW1xfW1lYXF1aWVlaWVdaVllcWVlXWFlYWVpZWFdaV1pa
-W1tYWFlbWVpeWllbXFtYWVpYV1pZWltcWltbW1tfW11bWF1dXV5eW1peXmFfWlpZ
-WltdWVdZWFlbWlxdXF1dXVxaXFxfXlxZWVhcXV5aXFpaW1pYVlpbWVtaWllZWFha
-W11hYFtdW1taX1tdW1hZWFlYWFhaWllaW1tbWVpYWFpZXVlZXFtdXlxcWltbXVtY
-WVlZWFlcXFtbXV1aWllZXF5dXVxcW1tcW1tdXVxbW1laXFxfX19bXV5dWlpcXVxb
-W1xaXF1eWl1eXWBfWltdXV5dXl9eXl5eYF5gYWBfXl1dXWBdW1tZWlpXWVhYWVla
-W11eXV5dX19eXl9eYGFhZWJhYmJiYl9fX19dXl1dXF1fXl5eYF9fXl5cXF9fXmBg
-YmFdXF1bXFxfX15fXl1gYF9fX1xeYGJeYVxfYGBdX15fYGBhYV9cX15dXV5dW11c
-YWNiYGJiX2BiYGFjZ2NjYmBgZWhgYmVlZWZraWhnZWNjZWhoZ2VjZGJjYGBfW1xg
-YWNfXmRiYmFgYG2dwc/Z3uTm6Orq6+x0c3Nzd3ZxcXNzcnJzcG9tcnRycnFzdHV1
-c3V4eHl1dHh6end0c3Z3eHZ3c3R4e354eXZzb3V3dHN1d3d2eHZ4dXZ5eXd7eHZ1
-en57eXl4d3d6eHp6eXp8eXl2eHp6fHl8fH+AgX98fHl3d3p4enx7fH9+fn19fXx+
-f4F+gHx6fHx8dnV3eHh2dHFybm9xdHNzdnRzc3NydHR1dHN4dHZ1eHt7end0dXhz
-c3Z1d3V2c3R2enZ1dXd5e3p2d3d6eHl5e3d4dXN1c3V6eHl4dHR2eXRyc3V0d3h2
-dG9ydHF0dHBwcXFxcXFzc3Fyc29vcXZvbm1xcXFucHFvb3Nxb25wcXBvbnBvbG5x
-cG9xc3Bta21ta21vbGxtbnFva21oaGNgW1dUUk9OUFJRUlNUVFRUV1NRVlhWWVlc
-XFtZWllVVFhaW11dXV5iYF1ZWlthb3FsaWlsaWVnZmJiYmRoZmdkYGFbVVRVW1xa
-WlVYWVNRVlhXVVNRU1RZWVZVUlBQU1NQTU1UWl9YVFZaVlhYV1VSUlVbWFlaW1ta
-XmBgY2VmaGhpZ2NiZmlrZ2pucW9vb290dHZ5end6fXx6e3x/foB+fH99fn19eXp8
-eXp+foKCgYKAfn58eXx8eXh2dXd3eHx6eXh2dHh2cnN0cnRxb29vcXJxb2xrb3Rx
-bmpqbW5vbm92dnd3dHd2dHR4dnd5eXh6fH1/g4OFiYuIhYaIiIaEh4eIjI2LjI+P
-kpSUlJOVmZycnqChnpmWlIyDf354c2xnZWJkYWVkZWRlYV5gY2JkZGNiYmFhYGFh
-YGFgXl9iY2JmZGVjX19gYmFgY2VkZWVlZWRjZmNhYmFhYmNmY2BhZGVmZmVmZmFg
-Y2NkZWRkZ2ZoaWhmZWVjYWlmY2NjYWJlZGJhY2JfX19hYmNfX19kX11dXl5fYF9g
-X1tdYGBgYF5gXl5dXlxeX1xcXVxcXF1YWFhaWFhYV1teWlpZXVxZXVxcWVpZV1he
-W1lXWlxZW1lbXVlZWFldXVtdXF1eXFpaW1hYWllcXlxcXFtYWVpYWFlbWVlYWFlZ
-W1pZWFtaWVtbW15dXl1bXF5eW1pdXltaXFtYWltYWVdZW1ZaW1paXV1cXFpcWVtb
-W1xcW11cXFpbWFlYV1hgYltZWVdZW1pbW1xaV1laX1xbXF5dWlpXWFhYWFhcWllc
-XVtcWlhYV1pZWlpYVlpbWlxZW1xdWlxaW1lYWllbWlhZW1hYWFlaXVpcW11bW1pZ
-XV1bXFtcXFhZWltbXmBeX15dW1xeW1pYW11eXFxcXl9fXV5cW11eXlxbWlxdXmBg
-Xl5dX19bWVlaXFtcX15fXVtbW1tcYGBdXF1gY2BbXF1bW1tcYGJfXWBgX15gZWFf
-XV9fXlxaWl5iX11dXV5dXV9eYmJeX19eXVxaXF9dXF5dXmBhX1teYV9hYFxhYWFj
-ZGBeXl5eYWJeYGFgX2FfYGFjYWFgX11eXmFhYWFfXmBjZWRjZWNeYGRlY2NiYmNo
-ZmVnZWZsaWZlZ2dnZmdlZGVkY2BjXl9gYWBeXl9fYF5jc6XD0drg4+bp6urr63V0
-cHd1c3NycnJ0c3FxcXRzcnJvc29vcHFxc3d4dnNwdHVzdnV4eXVzcnRycnZ5eXZ5
-eXh6d3VxcnV1dXJzdndzdnd8fHl3dnV0eHp7enh5dXl6eXx8eXh0dXh6en2AgHx+
-f316eXt5e3l6enl7fX1+f359fn2BgXt7e3p7eHl5d3RzcnN4d3R0dnJubGxyc3Rx
-cG9ycXNzdHh5d3V2dHZ4eHh1dHV2eHh2dnR1dHR3dHJ1dHZ5e3h5eHl2eHd5fHx8
-fHt7ent4dHd8enZ3eHh3cXJydHR0c3Fzc3FzdXR0cm5vcHNycHBucG9ubG9xbm5s
-b29xc3BucW5wbm1tb3JvbnJzcG9vcHBvcnNxcXNxa21ubW5wb21tbm9vbmppZWBe
-V1hSUFJST09PTlBVWVdVVVVXWFdXWVpZWVxXWFdZWlthYl5cYWFgXlpbW2JnZ2dm
-Z2pmZWhkY2FiY2RkY2ZiXFxYVlVWW1tcXVhVUlZYV1RPTlVYWldTVFZRUFVRUFBQ
-VFRXU01PVFlZU1BQUVNcWF1eXF1cXV9iZWhoZWFjaGtpZmVmbHByb3FxbW5ucXN3
-eHl5eXx9fH1/fn5/f399e318f359e3h3ent/gISBgHx8gIJ+f4B+enp5d3l8fX16
-d3t9eXp5d3l5eXl2dHZ2d3ZzcnRzdHN0d3N0c3Btb3BxdHZ4d3R0eHp9fn9/fn1/
-gIKEh4iJi4mGiImIiYmLi4yNjpCSkZCXlpiVkpWYnp6epaOhnZeQiIaCenVwaWdi
-ZWNlY2JiZWdgXl5hY2RiY19gYGBeYmNhYWRkZmJkZGJjYmBcXmBgY2RmZmVjY2Bi
-ZWhkYmNjYWJjZWdjY2JiZGVmZWVmZGpnZWZkY2NjZmVmY2hlZGJjYWFmY2BiYmFj
-Y2RkY2FeX2FkXl5fX2VkX15eX15eXV5eXVtbYmFfYF5gXmFjYmBdYl9eXl1bWVtb
-XlhYWFpYWFxaWFhZWllZWldYWVhcXFpYW15bW11ZXFtcW1taWFhZW1xcWltbWVlZ
-V1hYXVtbW11bW1lXWllYWlxgX15eW1xcWVlYWFlYWltbXFxdXlxaW1paW1taXl1f
-X1tbWFlbXFxYWVdaWVhZWllZWVlcWFhdXlxbXFpcW11cWldVVlhbW11aWVlYWltY
-WF1cXF5bWltcX15bWFdaXFxYWV1eW1xdWFlbWVVYWldXVlhbWVlWWFpbWlhaXFxf
-XlxdX1xbWV5bWllXV1lbXVlYW1xbWltcXFxcW1taWlhcXVxbXFxcYF5eXl5bWlxe
-XV1cWVxfYmBeXFxcW15fYV9iXFldX15eXV1iX19iXVtaW1tcXlxfX11eXV9eX11d
-XmFfX11eXl5dXl1gX15hZWBdXF5eYFxaXF1cYFxdXF5eXV1cXV1eXl9cX19eXVtc
-X11cX2NfW1teXl5bW1xdXVxdYF5fXmBgXltaYmJdXl9fYV5dXGBjYGBgY2JeXFxf
-XmJiYF5dYGJfYWRjYmRgY2NkZGVkZ2dnamlraGVoamdmY2JkZmRjZGJlZGRiYmNh
-YmJiXV5gYGJ6qcXS2t/l5+fq6uzse3Z0dXJxcW9xc3V0dnZzc3FycXNycnBucXFx
-dHR0dXV0dXR4dnd1dnZ1dXZ2dXZ0c3d5dnl4d3Z3dXR0c3JydXl6eXl6eXZ2d3p5
-d3h7eHh4eXh6e3t6e3t4eHh6fH+Aent9fX9+fXp9fnt7fnt+fHl5e36AhIB8eXp5
-eXp1enl1c3Z8d3Z2dXl4dXR2cnJ0cW5vcHF1cnJ0c3h3eHZ3dXV1cnN3dXR2dXh2
-d3Z0c3Vzc3Nyd3V3fXh0eHl5eXp2d3t9d3p7e313dnV6ent3dnRzd3h2c3N0dHFz
-c3V0cG9xb29xcm1tbW5wcG9vcXBwbm1zcnJxbm9vcW5ubWxwc3RzdXRzc3FtbnBw
-cG5ucG9ubmxua25wam1tbG9ta2pmYFlYVlJRUlFUVVJSU1hYWFVSVFdXXF5ZVVpY
-WFlaWltaVlpeXl5iZWRcXVxcXmNgZmNjZWZpZ2ZiYmNnZGZoZmFdW1lcWVdWWl5e
-W1pcXltXUk5QXF9eW1dWVVVRUlFRTE9TUU1KRkxPUFJRUlJVVlhZW15hXmBfYGRk
-ZGJjZGVraWpsaGpscG9wbWxsa211enp3d3h4enl5fX5/fn18fn17ent+f3x8eHh4
-fH9/fn5+fn19gIJ/fn6CgoB/fH6AgXx8fH57e3t7fH97enl5eHp6end3dXd4d3R3
-e31+e3h0dnt2eHZ1dnZ5fICEhYGBgIKGiIWFhYiJi5COjIeEiY2Ki5CSk5KPkZqX
-lJaYlZqco6Wio6SclZKJhoR7dWxmZmFiYmRkYmFjZWJfXV1kZWViZGNiYmFfYWZl
-Y2JlZGVkYWxkYGNhY2RiY2NjYF9hYWNlZ2loX2BgYmVmZWZqYGNlZWhpaGlqZmdo
-aWRjZmVfYWBiYmNjYl5fXmFjZGJkY2RkYmJfYGJgZWJhYWFjY2RiYF9fX19gX2Bf
-Xl5fYl9hYF5dXF5fYGFgXlxaW1pZV1tYV1hZWV1ZWVtaV1hYWFpYWFpcXVlbWVZa
-Xl1cXl1dXFhbWltZW11dW1paWFpbWVdaW1pbXV9bW1xaWl1cWlVXW11fXVpaXFpZ
-V1ZYWFlYV1tcXV5dW1xcWVhYW1tcXV5cXVxbW11bW1hXWFpaWVxZV1ZUWVtbXFtc
-WVxeW1xdXlxZW11cW1pbW2BhXl5bV1dVWl1eXlxZWFpcXVxcXFpbW1lYXVtbWVlc
-XF1eXVlUV1hTVVlcWlxbXVxaVlZaXVxeXVpZWlpZWltbXFxbW1pYV1dYW1pbX19d
-XFxcWl1fXFtZWVtbWVxeXV9cXF1eXl5hYFxaWltcXF5cXFtZWl5eYWRkY11dXV1d
-XFpYW1pdWltaXFxeYF5hXl1fYGNkX2BfYWNeX19fXmFfXl5dXl9fYmFgXmJgYGFg
-X19hYWFjXl1cXmBeXV5fXV5eX19gYF5dXVxfYV5eX19eXF1cXl1dXl5cXl5bXF1d
-XV9bXlteXl1eX19dXlteXV1gYGBgXV5gX2BgYGJgYGRjYmRjZmVkYmNmY2RmZmVn
-aGprZmVlZGJiYmFiYWNnZGJkZmViX2FkZGRhYF9fYn2txtPc4eTn6erq7Ox2d3Z0
-dXJ1dnNzdXVzdXV6eHRuc3N0cHF0dHN0dHZ2eHhzd3Z0c3R0dnd1dXZ3d3Z5d3Z1
-d3p2dXR2eXd4dXZ1dXZ3eHl5fHh3d3h4dnd5eXl6dnh6fXp6enl6e356ent8fX6A
-gXp8fHx9fHx8fnx6eXp/gHx+fnp6eXl6eXZ5d3Z1d3d2eHZ0c3d2cXd0c3RxcHNy
-dXVycnNydXd2eHdzdXd1cnd6eHd5d3d3eHR7eHV1dHR0dnh2d3d6fHl5e3d6eHd4
-e3l2c3N0c3R6d3R3dXR7dnd2cnNzcXJycXBzdHRzcnJxc3FubW5ubW9wcm90dG9z
-cnVzcmxwb25ucXFxcXR2dnRwcm5tbWttbGtoa3BvamlqbW5tbGtubmhnZmhhW1hU
-UVFRVVdZV1dUU1NUU1VVWFtYVlZVWFpYVlhdW1pdXV9fYWJfX1xZW11fYWRiYWNl
-aGlrZWBfYWRmZ2xoYmFeW1pWV1VaXlxaW2BhWFVTV15gYFxXV1lYVFFQU1FOT1FP
-S0lJTVBTVVJWW1xeXVteX2FdYGBfXmBdYGZnaWtqa21vcnRxb29tbGlqbXJ2d3Z3
-dnV3eHZ9gIF9fX5+enx+fnx7en18fYCCgICCf36FgoN/gYKAgICGiYR/f4CHhoJ/
-fn58foCBgH1+fX2BgX+Af397eXt8eXl+fHx7eH+Dfn18fXh7fn5/gISGh4iHhYaK
-iY6PjYyMkZKMiImLjJCQk5WWl5eZmp+bnp6dnJqhp6ShnZuVkI6Hf4CBb2tnYmJm
-aGZmY2VkZGRkYWNjY2RkYmNiY2ZnZGRiYGJkY2JlZWNiZmZjZGVqZ2NkZ2RiYmJk
-ZGRhZWRmaGdlZGJhY2NjZGRlZmdkZGZjY2RlZmZgZmNgX2FjY2BjY2NjZGNfX2Fm
-Z2BiYWFiXl1fYmRiYWBhYF5dXF9dW1xdXl5eXmBeX15eXF1eX2FfXGFgWllbWFpb
-WlpZW19bXFxeXF1hXllaWlpZWVhWVlZZXl5bWVlbWVlbW11aXFxcXl1fYF5cXltc
-XV1bXF1cXFxbWVhXWFdYWllaWltaW1lbW1lZWFlXWV1bW1hbWldXWldZWltcXlxc
-Xl1cXV1cXFtaWllZW11ZWlhZW1pYWVlZWltaW1tbXF1gXl9fXlxbXmJfW1xcWlhW
-V1dZW15aWlpZWltaWlpYVlhWVFlZXF9cXFpcW11bXVxYV1pZXFxfXVlbXVtbW1pa
-WllYVlhXW1teXFtcW1pXV1hYXFpdXFlYWVtcXFtaXFpZW1lXW1xeXlxcXV5dX1tc
-Xl5cW2FfXFpbW1xcXVxfZGJgYF9eYGJeWllZWVteXFlbXlxbXVteXmBgXV9eYF9f
-YmNlYFxbXWBgXlxdXF5hYWBeXWBiZF9fXV9hYF9fYF9fYmBfXF5fYGBhYWBgYWBi
-XV1fYmFhX2BdXV9eW11fXFtbXVxbW2BeXl5eYFlbXmBfYGBgYF1fXl5hY2FhXmBh
-YWBkYmFhYmNhYmRkZGJjZGVkZWpmaGlnZWpramhnZmtoZmVlY2RkZGZkZWRgYmNg
-XWBgX15if7DH1Nvg4+bn6uvs7HJzdnN1dnR1dXZ0dXNyeXl1dnZzcHN4dnV0c3R1
-dHRxdnh3dnh3c3NzdnZ1dXV4enx8enV3d3l6end3d3d5eXt1dHZ3dnt5eHh2enp5
-enp6eXp6e318fXt4eXyLgn56eHp5eXt+gHt9e31+gn18fX19fH1/fXl6enp7e3l2
-d3h6eHZ2dHJwc3NzdHJ1dHV0cG9xcXRzdHR2dnh1eHZzdXd2eHh7e3t5eHp4enl5
-dnh5e357eHd2eHV4enx7eXh4eHd3dnN2d3d3c3d3dHR2d3Jyc3l0cnFwcXFxb3Fx
-b3FwcnN2c3NzcW5vb29vb3FzdHJxb3Jyc3FvcHBycnFxb29xcnNtb21ub2xsa21w
-bm9xbm5vamlqa2tpaWtsbWpoY2FcVVNQTVFTVFhYVVJUVlZYVlpZV1lbXVpZW1tc
-W15dYGBiYmNlYFtXWFhaXV9hYF1hYWRpbmhjYWFiZGNmZWVfX1xaWFhXWVlfXmBg
-YFpYU1pgYWBdVFVXVlZVVlRQT09QTUxQUlNOTk5QV1hbX15fXV5eXVlbX2JiZGdr
-a2pubGtvcXd3dXRubWtub3BxcnZ1dXFxcnN0dXt+en1+fn1+fn59fXt6e3+ChYaF
-goOBgoaGg4aEgnx/goWIhoSDhIODhICDhYWDhIOAhoeEiIWFhoaFhYSBgYOCgYCB
-fYGIhIOHgoCAgYWFgoaHiIWGi46LiYmOkpOVk5KRj4uKjI6QkZOVmZeYmpyenp+e
-m56cnJ6jpqShoJmWkIeBf4pybWhjYmRmZGJmZGNmaGVjX2JiY2RlZWZlZWNjYmRn
-YmBiYWFiYGFjYWNiYmNlZWZnYmFiZWRlYmJhY2RmYmBlY2NiZGNiYWFkYmNiY2Nj
-ZGNnY2BeYWFfYl9eYmNhYWFiYWdeXmBiX2BiYGBfXVxeYWBeXV1gX1pbXF1dXltg
-XV9cX2JgX19dXV5eYV9bWlleX1xbW1pZXF5dW1tcWlhaWFdaWVpaWllYVlZYXF1b
-XFxcWVpaWltcWVpZW1xbXV1aWVpeYF9ZWVlaWlhZWFdYV1pbWldaW1pYWFhZWlxc
-WlpZXllZWl1bX1xVV1lbXFpaV1hcWVtfXlxdWlteXFtcW1xaW1xcWl1bWllZW1pa
-W19dYF1fXFtYVVhcWlpdXmBeXVxaWFVXWFtdXVxbW1lZWltbWldaWVdaWlpaWlxc
-XV5eYl5eXV1dW1pbW1tcXF9bWFtZW1pZWltdWFtbXFxbW1tbWVpeXVpaXFlXWFlc
-XGBeXF5bWFxdX1xaXF5cXFxfYV1eYF5fXV5cXFtbX2FdXV9gXmRfX2JeXF1cW1tf
-XlxcWlxZV1hdXVtcX19gXlteXF5gYGBdX2BgY2JeX2BeXl9eX19eX15iYV9iYGBg
-Yl5hYGFeYF1eXlxeXV5hYWJjYWBiZWFfXF5hYl5eXl9eX2BiYl9eW1tdX1xcZWJf
-X11fX11gXV5gX11fYV1gXl9kYF9hXmBiX11hX2BfX2BfX2NlZGNlZGVmZmhpaGZn
-Y2ZpaGZnaWdlZWdoZGRmZmdlZWZfX2JlYWBjYmF+s8jU3OHk5+jr6+zrdHJycXR5
-eHl2dG9xeHd0dnh0dn14dnV0c3RxdHh3dHR2dXl7d3RycnN0eXV1d3h4d3x9fXd1
-dXZ1cnR1eIF+e3p3dXd3enh3fXl6e3t8fXx6d3h4e319eXl9fX18enl9fHl4eHp6
-e31/f3+Bfnp4enx8enx6eHd3enl5eHt5eXV1e3Z0dXJycnJ0dnFzdHNycHF0c3Nz
-dnJ0cnBzdXRzdHh1dnh6dnp6eXd3eXt4eHp6eXh8eXl3dnp7fX17fHp3d3Zzdnh4
-dnRzdXZ2dHZ1dXJxdHFxcHJxcG5vcHd1cXJwcHBwcnNxb25ubHBvcHFzcXBwb29v
-cXVxcHByc3BvcG5ucHBsb25wcG5vcXNtbmptb3Nva2toa2xsaWxubm5mYFlUUFFR
-UlVWU1FSVVhYVlZZXlpaWVpcYGBeYGBdXV5dX2NnZWJcWlZXVVddYGNeXWFhZmZo
-aWZmZWVjaGdpZ2FfYV9cWFdZWl1eYWBbVVVZXmFgXVlWVFZWVFFSU09NTU1KTVJV
-VlNOT1NWWFxdWlxdXFlZXF1gY2Vsb3Fwb2lnZ21ydXV4d3Z0c21vb3N1eHVzcXBz
-c3N2d3l9f398fn9+fn58fHt8f318gYWJhYaHiIeFh4aFf4SFhIeHiIiJh4WGhYaI
-ioiHh4OJjIaJiYeHiY2Ki4yGhYmJh4uNi4qIhoiFhIOEhYKGhoaJjYuPjo2Lio6Q
-kJGTkIyNjY2OkpaUkZabnJudn56foqKeoKCfoaClpaCfnJqSioh9cnJxZ2lgYmJj
-YWFjZGNlZ2JfYF5dX2FjYmNjYWNjZmdlY2JhYGJhYmRjZGFiY2RjYmNkZGJjY2Zm
-ZmVjZGVkY2ZnZ2dpZ2ZlY2RlZGNjZWJiYmVoZGBjZGJhYWFgY2RiYV9iZWRgYGBg
-XV5fYV5dXV5iYGBeYF1gX15dXGBeXV9fX11dXl1fX15fW1xdXVxZWV1eXlxZWl9e
-XFpcXl1cWlhYW11bW1taWl1cXltbXFpXWVhZWVtbXFlaWlhXWlxdXV1ZXFlaWVpX
-XV5aW1lbWltaWFlaWFhZW1pZXFpdXlxcWVtYVldaWWBdWlxdXF1dWlteXFtcX15d
-XF1ZXVtZW11dXF1aXF5dXltdXl9cWVlYWltbW1tcWlpaW1xcXlpbWlpZW15ZWllX
-WVlaWVlXWFhaXFtYWltYWllaWVpcW1xgYFxaWVhbWltdXVpbW11cXl9bWlpdWVhb
-W11eWllcW1taV1dbXFxcXlxaWFhYXF1dXl9eXVxhWlhcW1dYWlpcX11eXVpcX15e
-YF5eXlxcXV1bW11cXV5eYV5dW1pcXFxcW1xZW11cXl5eW1pbWVxdW19fWl1bW15f
-XV5gYF9hZGJdXFxfX11dX11dX2NhYF1eYGBbXF9fX11iYV1dXmBhXl9gYmFhYmFi
-YmBgYl9eXVtaW11fX15eYF1cXl9gXlxaWlpdXmFfX2FhXl5eYF5iY2RlYV9fYGBl
-YmFhYWRmYmJkZGVlaGZnZ2RkamdlZGRkZmhoZGVkY2JkZWloZmhqZ2NhYF5eXl5g
-Y2JmY3WwyNTb3+Tm6Orr6+t2c3Fyc3h3eXd1c3JxdnZ1eHl2dnZ3d3Ryc3hzdHZ6
-f3h4dXh2dnV0dHV1d3h4eXx8dnl6dnR2eHZ3c3V1eXt5enl1c3d3eHl7e3x9e3l5
-end4eXt6enh5eXl7eXx7fnp7eH19enx7e319fXx7eHp+enl6e3l0d3d6fXx8end5
-eHd1d3R1cXFvcXBxcXFvb29xcHN0cXZ0cnN2dXBxdHd1dHZ2eHp6eXl2dnV2eHp1
-dXZ1eXp4eXp4d3d4dHh6eXl6eXl1cHJxdHh4dndxcHJxdnNxdHFzc3Vzc25wcXNy
-cnFxcG9wcnRzb29wcXFxc3JxcHFwbm1ycXJwcHFwcXBxdHNvcXFsaW9sbnJxbGxt
-aW9ubXBub21sb29ubGxrbWRcVlRSU1NWVldZVlVUU1hXWlpdXllXV1dcX19jYGFh
-YGBiZWhnYV1YVFNVWV5hYWFfXmFnaWpoamZnZmdnY2BiZGZiW1VZWV1gX2RkYFpW
-W11hYVpXU1FUV1ZVVVVUVU5OTU1RV1dUVFdbXF1dWVpaWVhYWl1cXWNmbXR1dnBs
-a2htcXN1dHFycnNzcW5zdXd1c3FxdXR0eHl5fHx/f31+gIF/fn1/fH+AgXt8gYOE
-hYaJh4aHhIWEhoiKioeDhYWGhoeHhoSHjIyLjIuLi4yKiouQj5CPjYmIiImKjpGP
-k46KiYuNiomGh4mNj4+Rj46NiYmJi5CSk42OkIuRlZOWmJeYmZqanZ6hn5+doKKn
-pKGjo6WloaCfmpSLiIB5dG1paGhmZWNkY2JjZ2llZGFhYmFfXV5hZGRhZWRiY2Vj
-YGBdYmVnZGVoZ2RhYmFiY2VlZGNjZGhnaWllY2JiYWNlZWZkYmVpZ2RlaGRkZGJi
-ZGVmY2VkZmJlYmFfYWRjXmFhZGNjX2FhYF5dYV9dXF5dXmBjYWBiZ2JdXl1cXl9d
-X1xfXFxfXmBdXFxbWl1fXVtfXltfXl5cWltbXFxbWVlaXVlXWFtYXFxaV1dXWVpW
-WFlZXF1cYGBbW2FbXFtbXVtWWFhZV1lcWllZWVteXV5eXFxcW1ZVWFpYXF5dXVpY
-V1hUVldWW11eYF5bXF9bW11aWF5eXFlaWllZWlxdW11dXF5cW1tcXF1eXF1aWlpb
-WltcW1paW1hZW1paXFpZWVxZW1tbXFxbWlpZV1hYV1paWVtbXV1cWlxbVldZWVtb
-V1hZWFxaWldXWlpcWltaWVhXWVtbW15aV1hZWlhYWlhYWlpaXlxdWllWWFdbXF5e
-Xl5cWVdXWVhdXlxcXl5dWltcXF9aWVthYV1dW15fXWBfW1taW1xbXF1dXmBeW1xf
-YV9bYF9eXl9gXFpcW15hXmBbWlxcXmBeXVxfXV5fX19eXFtcXl5fXV5fX15eXFte
-YF5gXF9fXmBeXV1fYWJeXV1iYGBgX11eX15dX15fXV9eXF9gXV5gX19gXl1dX15c
-Xl9eXVtbX19eYF5gX15gYGNlY2JfYGJjYWNkYmNfYWJkZGRiYmRlZmVlZmhlZmZl
-ZWBhZmVlY2VlZWRjZmdkZWRgYGRjXl5eX19ldK/J09vg5Obo6urs63JzcG5wcXVz
-c3NycG90dXV3eHV4eHZzcnJzc3Nxc3F0dHN2d3R1eXRyd3p1dHJ2d3d4eXp8enp3
-e3h3eHd4dXh8eHZ1dnZ5e3t6enl4eHZ5eXd4d3Z3eHp7eHd3d3h8fXt9fHt9f3t3
-eXx7e3h/fHx2dHl4dnl8e3p6eHl6eHZ5d3R0dXRvbW1vcXR2cnBub3BydXZ0c3R0
-d3d1dHF1c3NxdXd1dHh5d3d4dXZ3eHh3cnV4d3V2dXd3dHR1dHd4dnV2eHVzcXd2
-cnR6eHVydnt2c3FycnRyc3FzcXBucHFvb3Bzc3Bwc3Fzc3FubW9xcW5xcnBycXFz
-cG9ucG9vcHN0cXFxb25ybm1vb25ucXJvb2psbW5tbG5raWlsamdlX1pWU1JVVVZZ
-W1hbWVhaW1tfX2FaW1pYWl1eX2FgXV9hY2VoZ2FWVFVWV1tgZmhoZWFhX2NlZ2hq
-bGtpaGZhY2RjYV9bWVdbYWJkY2BcVlhcXlxZWlpVWFtZV1ZSUE9QUE9QUVVWVVRZ
-X2FkYl1aXF1dW1lbXWNmaGttcXFsbXBtbnJycHVyc3FzdXV0c3N2e3p3dHJzcnV4
-eHt8e31/fX59gYCFhYOAgIB+f31+gIWIh4eJjI2Li4yMioyMi4iHhIaFh4mJiYuL
-j5CMjpCNjJKUlZaXkZCNjIyMjZGQk5SVk5GMjZCQjY6Mj46Sk5aXkYyLi4uMkJOS
-k5WRkZWXmpqanZ2enp6doKCgnZ6io6Oip6SjpKOin52YlZKJgnlzcGlnZmZoZmJk
-ZGZlZGVkZGJiXmFgYGFjZmZjZGRhYWdjY2NkZWdnZWRiZGJgXmRvZmRnY2FiZ2dl
-ZmZmZWdlZmhlY2NhY2NkYmFiYGBkY2NgYWRkY2RlZmRlYmBgXWFiYF1gYl9dX2Bg
-XF5gYV5cXGBkY2FgXV5hYV9bWF1bX1tdXFxdXF1eX2BhXl1bXl5aXlxbXVxbWFpY
-WllYWVhXWFdXWFhWWFlbWF5eWFZYXFhYWlpaW15bWlpaYFpZXV5cXVxZWVpZVllZ
-WVtZW1teWlxeW11aXFpXWFpWWV1gXFpaXVhVWF1dXV1dW1lbWlpcWldaWV1aWFhY
-WVxdXFpYWVlaW11cWlpeXVtaXltbW1hYWlxcWlhZWldZW11aW11aWlpWWVhZWlpe
-XFdWV1hWWVxbW1xgXVpaWlpaWllbWFZXVlhYVVhYXFxaWllbXVpZXFtaWVlXXFld
-XFlZWFpZWFZXWVlYXVxbXVpZWltbXVxdX1xbW1pZW1tdXFxbXlxbXFtbW1tbW15e
-XllYXVxbXF9fW1xaXFxaXFxdXV5eW11hZWFhXl9eXl5eX11bXmBeXGBbXl9fXV5c
-X11cXVxdXFtcXV9dW19hX2FgYGFeXl9gYWFgXmBgYWVgX15gYGBfYmFgX15eXFpc
-XF5cX2RfXV5cXFxfXl1eYF5gXV1dZWJfXl5fXl5dX19gYV9iX15eYGNjZGJiYmNj
-YmNfXV1eYWJjZmVjYmVlZGVmZGZlZ2ZkZGFlYWZtaGdkYWVmY2ZiYmJfYWNfXl9f
-XWZyqsjU3eDk5+nq6evrdnR2dXNzc3R1dXZycXN1dXV1dHZ3eHR2c3J0dXd1dnd2
-dnJydHR4end3eHZ5eHdzdHZ3eHp7enl4eXV2dnd5eXZ3d3d1dXZ6e3t6d3h4enh4
-eXh1dnp6en18fXl6fHx8ent6eXx/e3x5fH17e316eXuCe3h5e3x5d3h5eXt6enZ5
-eHd4dnNvbnNzc3VycHFxcnZ3dHV0cnNycnV0cnJ0cHRzdnd9e3p4d3d0dnh0dHl3
-dnd7end5dnV1d3d2dHV2c3J0dXh3c3V1dnZ3dHR1c3NzcnJ2dXRycnFxcG9wcXJw
-cXVxb29vbnBxcG1wbW1xcXFvcG1sbm5ub3FucG5wb293dHBxb25tbG5sbm9vb25r
-bWptamtraWxta2lpZmJeXFVQTlBRVFdYWVhbYF1dYWJfX15aW15gX2FiYmVkYGJn
-aWlnXVZYWFdbYGdvbWxpY2BkY2VkY2doam1pamlkZGVjX1pZWl5kZ2VkWlJQVldX
-WFpbWllcXVtWU1dVU1BRUVVaV1tdYGJkY2JcXF5hYGFiZWNkY2hsbG5ta2xvbGtw
-cHV3c3V0dnZ2eHR3dHR2enh3dHV0dXV5eHp9fn58enp+gIKEhYWDgoKAg4SFg4OF
-hoqJiYmJi46PjYyLiYuOi4uKjI6Mi4uLjZCQkJSVlJaanZmXlZOPj4+TlpWZl5aX
-lZaUk5ORkZGSlpeanJqXkpCQkJSUkpSTk5OXmJqanp6dnqGenp+gpKOjoqSjoKOj
-pKKeoqKem5yVlI2CeXJsaWRgZGJlZWNkYmJjY2VnZ2RhY2NhYmJgYGBgY2NiZGVq
-Z2RhY2hmZmVjYmFmZWRjY2RjY2JjZGNfZGZmZWRkYWBfYmNiZGViYWBiZGFlY2Vl
-Y2hnZ2dlYF9hYmRgXl5fYWFgXmBhYWFhX19hY15cXl5dXV1eYlxdYmNhYGFhX1xW
-VVdaXF9cXl1fXVtcX2BdWFtdYFtcXV5bWVlYW1xZWVhZWVpZWltdWldZW1hYWFxa
-XFtZV1tbWlpcXVtZWVtcYl1aW19bWVlcXFpaW1taWltfXV1aWFxbWVdZWl1bWlpd
-Xl5YWl1dXWBeWVhbWl5cXV1aWlhZWlpYWV5dXFhZWV1bXF1dXFxcXltYWVxcW1tc
-W1tbW15cXV1dXFtcWlxZW1hbWVpbXFtZWltbW15bXFtdWlpcXVtbWVpcXFpZWVtZ
-XFhYWldZXV1bWlxfW1tcXltbWllVV1tfXVpZWFhXVVRYWVpaWlpcXl1aWlpbXVxb
-XFtcXV1dXFxcW1pbXVtYW1pZWl1aWlldXFpaWVlaWl1ZXF5dXl5dX1xbWl9iX1td
-XF1gYF1dXl9fXV9dXlxcXF1dXV5dW15iYGFcXV5gYGBeX15gX2BgYF9gX2FiYWBf
-Xl5iYF5eYGJhX19gXVxeX19gXlxeW1tdXV5bXFxfXl9gYV5eWlpcXVxcXVxfYWFh
-X2FgXV1cX15eXmNhX2BhZWZmY2FgZGNjYl9fX2BgY2JmY2VkZGVhYmVkY2VnaGhm
-Y2VmZmdnZWNlZ2FiYGBgYGFgYF5gX2JfYXGnxtPc4OXm6Onq6+x2eXd4dnl4d3V0
-c3JxcXJ0c3BzdXNzc3VzdHJyc3V1d3t8d3Nxc3V7eHx7eHl3dHJ0eHh4d3h6eHV1
-c3V3d3l4dnZ4eXl4dXZ7d3Z2eHl7eXd0dXZ4dnx4eHp7eHl6enh4eHl6e3x7fHp6
-enp9fHl2eXx7e3h8fH56d3h6fn19eXp3end2dnJ0dXNycnF0dHN0dHJ2dnN1dXNy
-dnV1c3R0cndzcnh4fHl4dnRzc3V1dXh4d3h2eHh2dHh3dXVzdHl2dnR4eHl4eHd3
-dHNyc3Vxc3Z4dXZ0c3RwcXBvbm9xc3NydHN1cG1ucXR1cXFwb3Bwb25xbG51cW9w
-dHNxcXFub3FubnBwbWtvbnBubWtubnBubm5tb2xucW9tbm1sZmFdVVFVVVJUVVpY
-V1peZV9hYl5dWl5bX19gX2NmZWJiZWZoY19aWVhaX2Bjamxua2hlYWVlaGdmZWts
-aWxua2lkZmNeXVxbYGRkZmBZVFlVWV9eXFVXXWNfV1NUWFZWU1BTV1lfYWJjZGFh
-YGJhXmJgXmRnZ2hnaWdqbGxuc3RydnhzcXZ0cXJzcnJzdXR4dHN1dXV0dHJzeHl3
-eXp8fXx6e3t8fX6Bf4GDhYWBgYCAgoWHhoqGhIeGjIyNkJaSkpOPj42MjpGOjY+P
-j4+RlpmYlpeem5uYl5aYm5qampuVlpeZmJeWlJWXlpSXmZ2dnJ6ZlJSXlZOVlZWY
-nJyenJubnp6gp6OkpKOioaOjpKGgoqSko56gn5+dnJuWjoZ+dG1pY15hY2FgYmJj
-ZGJjZGNlYWJjZ15fYWBgZGJjZGNjZ2NiYF1gYWNjYmFgY2JhYGBhY2JhYmNjY2Rm
-ZmRjZWZlYmFiYV5gY2JkZGdmZmdnZGZkYmNhX2FgYGFhX2FgX2JiYGJeX2FeYGFg
-YWJiX11dXmBeXV1dXV5dX2VoY2JfXV1aWVhZWl5gW15aXl5cWVlZWltfXV1dXlta
-WlpbWVxZWFlaXltcXmBcWFtcWVtaWFpcXFxfXF9dW19eXl1cW1pdX11bXV1eXV1b
-WldXWlxaWVhZV1leWldYWlVXWVpYWVpcXF1bWltcWV1bWltaWVtdX1xZXltaXFxc
-W1xdXF1eXl5eXlpXXl1aXVxbWVtaWlxZWV9eXVtbXFlcXFtbW1lVWlpdX11aWlte
-XV9gX19eXl5eWltcXlxaWFlZWFlZWVpaWFlYWllbXFxeX11dXFpYVlZXV1VTVFVW
-VlZaWllaWlhaWFpcWlhZWFxdW1tcXFxcXV5fW1xbWVlbWFpZXF9bWl1aW1xfXV1e
-XV1dWllbWl1cXltcXF1aW11dXF5cXV1gXl5jXVpdXV9gXl5dX11gX19hYF5fXV1d
-XV5eX11eX1xcYGBfXl9fXl9fXl5gXVtiZGBfXmNdXF5fXl5eX19dXV1eW19fYF9c
-XF1cW15eXFtgX15eW1tdXFtaXl5fX2BgYV9iX1xeYWFiYGBiYWFhYWFjYmJiYGNj
-YmJhYmJkZGRnY2JkZGVnZmZnZ2hmZmZmZmVmZWVkZGRlYGBiYWBfXmJkZWBfYWRg
-cKrG0tvg4+fp6urr63d2dnd4eHZ4d3h1dHN3dXNzcXFycXN0c3N2cnBzdXZ5eHd2
-dnd0dHl3eHZ1dHZ0dnZ3d3V1dnZ2eHl4d3h5eHp4ent6eXp6eHZ1eHl5eHh4eXZ4
-dXZ2eHp8fXt4dnd4eHh4eXh7fXt8f3x7fH17fH5+fnx6e3t9foB9e3d2enx4dnd3
-eHp2dHJyc3R0eHN1dHF1c3Jyb25zcnVycnRzd3d1dXd3c3d3dXR1cnJ0dnV2dHZ6
-eHR3dnV2c3F2dXJ0dXV4enV3eHV2e3h3d3V4d3N1d3p1cnNyc3FxcnZycnNzcHFy
-c3RycXJwcXNycXJvbnFycG5wb29tb3Bxb29wcXBwcG1ucHBycW9wbW1vb29sbm5u
-bWtrbW9vbmxubGxnX1xZVVRWWllaWVteYF9iX15eXl5aWl1eXF9kYmZlYmBlaWRk
-XllcXmBiY2hsb29pZmRhYmZoaWhramlrbHFsbGtpZmFcW2BhY2FcXFxZXFxjYlxV
-WVRbXlxYWl1bWVlbXltcXGNjZmhpY11dX2JhY21pZ2pqaGxtb29ta3BycnV1cnJw
-cXJydHJzd3Nzc3FucHJ3e3h4eXx6enuAgHx+gH1+fX59eX1/gH18hISDhYGGioeK
-i4uKiIiLjYuPk5STkJGRj46QkpOTk5GRlZSYnJybmJqan5qanZ+hoZ+bmpubmp2Y
-mZmWlZiYmZuhpJ+fnJ2ZmZuXmJeUmZucnp+fn5+hoKCfoKOko6OlpaOjop+doKCh
-oqKfn6OenZeQg3hyb2plZWFfYV5hY2FhYWNhYGJlY2JiXWBeYWNhY2NhYGBiY2Be
-X19eY2VjYWNkZWRjYV9hY2VmY2RjaGdoZmNkYmRlZ2JhY2JkZGNlZWVjZWdlZGhl
-Y2RkY2FhYl9gYWBfYmBhYWBhYWJkYmJhYV5eXV5gYV9dXV1cW1xdXmFhYWBfX15c
-XF1hYGBfX11dXV9bXF5aWltcXlpdW1xZW1taWltcXlpYWFhZWFtcXV1eX1pXWllZ
-XVxeW1xcXFtcXF9fW1lZW1dcX2BbW1xbW1pZW1lYWVlaWVpZW1laWVlYWl5aWl1a
-WVxaXFxcWVtZWlpaW1xcXFtbXl9cW1tbXVxcXWBfXlpaXlxZWVhaW1lYWFtbXFta
-WVpbWVxaW1xeXFtcWlpaWl1dXFtXWFpbXVpaWVpZWVthW1tcW1xYWFdZV1ZZVlZZ
-WVpbXFtbXV9eXV1eWVhfXFlZXlhWVldbXVtbXVxaW1dXW1xdWV1cXVxaV1hYWl1e
-X2BfXFtdWVpcVl1ZWlhaWlteXVxbXFpbWltcWltcXl5dXFlbX1xcXmBeX1xcX11d
-XVxdXV1cYF5cYV9eX19fXVtdXV9cYGNgYF5eYV5eXVteXl5bX11cXV5fYWBdXl5g
-YV9cXl1bXV5dXl9hYF1dXl9fXl9gX15gXl9dXV9fYGJfXl9gWllZXFxaW1xcW1tb
-XVxeXF5eX19cXFxfXmFgYGFiX2BiYGNmZ2ZjZ2RmZGRkY2NnZ2RoZWNkZ2ZlZmZl
-ZmJjYmRkY2JiZmRjYmFgX2BjZGJgX2R1rsbS29/k5+np6uzsd3VzdnZ3dnZ2dHR0
-d4B5d3d1b3FzdHV1dHZzdXV3d3d1dnd4d3V4d3Rzdnd1d3V2eXl7enh2dHZ4enp3
-d3l5eHh8eXl4eHd6eHh5ent8eHZ5f3t3d3R1enh6fHx6eHl3d3h4e316e3x9fHp9
-fXp7fHp7gX97fHp7gIB+fXp6dHZ5d3d4dnd3d3RydXV1c3N0dXFvcXJ3c29xcW1x
-dXZ3dXJzdXR0d3p4dHZ4eXR0dnNzdHh5dHV2d3h3dXV6enZ1dXd3dnZ7fHl1dXN0
-dXR1dnRyc3NycHJvc3J0c3R1c3Fwcm9xcXJwcnNycnBwcXJxb29wcXBubm5xcXBx
-b3FzcnJybWtub3Fwbm1sbG9wbm9tbWxrbGtsbm1rbWxta2ZgXllaVldbWlhdX15e
-XV1cXlpcW1lXXGBeZGRnYGBhY2RjZWdlZWBgZWVrbW5va2lnZGJmam1pampqbGxq
-bWxscGxlXlleY2VlX2FfXWBjZGFcWFRWW11gWlpZXVxaXF9fYWRmYFxgY2BaWWBm
-Z2ZjaGxoa2xua2xvb25vcXN0dm91dXJxbm9zdnZ1c3BybnF0d3l7fn57en6Bfn+A
-gYB/gYB+fn59fH2BeneGiIqIh4SHhoKJiomMiIaKjI2LkJKPkZOVlpSWl5eYmZmY
-k5ibnZ2cmp2bmp2fn5+foZ+io5+doKKioKKhn6OhnJ+gn52eoKKdmpqbmJmgoKik
-pJ+doqGhoaKgoZ+jo6Oio6CgoZ+goaKlop6enZyamZKFdm9taWJmZ2JlZ2VgX2Bh
-X19kZWVjYWNlX2BgY2JmZGJgW19iYmJgX2BcX2NjY2VmYmFfXV9eYGNlY2RiY2Vm
-ZGBfZWZmY2BgZWFmZWVmZmVjYmVlZmVlZWJfX2BfYmVjYl9fYmBgXl5hXl9gYWBf
-Xl5fXV9gYl9cXFpYW1pdYV9gXWFdX15cXlxcX11cX15cXVxaXV9cWl1cWlxbWlxe
-XFtZWVpZWFlbWVhXWlxbWVhaWllZW1hYV1hbWFdZW1tbXFxeW1paXVtbWVlbW1xc
-WlhcXV9cWVdaWlZXWlpbWVlZXF9hW1pbXl1cW1lYWVxdW1xcW1lcWlpaW1tbWltY
-WFxcXFtZWV1cXltZW11dXVxcWlpbWV9cXV1ZW1xbW1paWFlZWVtbXl9bWVxaW1xb
-W19ZWFpZXVlZWVhcW1pZWlhaXFtjX11fXFxeW1laW1xcW11aWl1cXFxZXVtaWl5g
-XFpeW1dWWFhcW1tcXl5cXFlYWVpcW1tdXltbXFxcXl1bXV1cXl5cXWBiX2BfXF9h
-X11cW1lbW1paXV1cX15fYF5dW1tdXV1dXmFeXl1dXVxdXl9hYGFeXmBeXGJhYV9e
-YWBfX2BgX2BfYV9fYF5dX2FiX2BfW11gYGBcYV9eXF5eYGJhX11gYF9cXl5fXlxf
-W1xaW1teYF9gYF9eXFxcXFteYV1gXF1cW1xdX15XXF1eYWBeXmBgYF5fYV9hY2Jk
-ZWVnZmdpZWVkZGNmZmVoamhmZ2ZpaWZmZmRkYmNhX2JiZWNjY2BeYWFgYmNjZXWv
-xtLb4OTm6Onq6+x0dHNyc3ZzcnJwc3R1fHZ4eHRxcHFzcnJzdHRxcHZ4dXZ5eXd2
-dnV2d3R4enl2dnZ3eHd3dnZ1dXRydHd1eXl6eHl4d3p2eHt6eXp7fHt4dnd4enp9
-e3h6fXx9fXt7fHd5d3l6ent+e3t6fH1+eXl2enx7e318e3t4eXx+fXp4e3x4d3h1
-dnd2dXRzcnN2dHZ0cnFvb291dnZ1dHVzdHd1enx0dHd1dnd4eXh4dXd3dnV0dnl1
-dHd3enh7eXp6d3Z2d3l5eH18eXh4dnd5dnVzdHNvdHRydXZzcHJzdXJvcnR1cnR3
-c3Fycm9vbW9yc3FybmtwcHByb3JwcHV0cnJzcG5vcHBvcG1ubm5ta21vb29wb29w
-bmxsbG1ra21qZ2RgW1VZWVtaXV1dXF1fXV9cW1lYWV5dX2JnZmJgXV1dYWVpZWJk
-ZGRpbG9wcHBubWpnZ2Zpa2pobHFramdobGtrZWFdWWBoa2dkZGJhZGRjXVpYWF1c
-W15cXVtYWl5fX19kZ2ViYWNeW1lgZGViaGttbXNyc29wcXNubW1tbXFxcXNzcG9w
-c3RydXZ2b2xucnV2eHl5enx6fH19gYCDgIKDgoOCg399fH2ChomKiouLiYeGhoiH
-io6PjoyLjY+Pl5mXmJqXm5ycmpqal5mamZqfoKOhnpyampycm52eoKGko6Smqaug
-oqGjoaOjoaGfoKGiop+empqcnZ+jpqSnpKCio6WioaKfn6Cio6GhoqSkpKakpqai
-oZ+bmZqbmI2BdG5oY2FfYWRnZ19hYl9fYmNjY2NeYWViY2NmbGVlYmFjYmJfYmNf
-Xl5iY2NjZGhmZWViYmFfYGFhY2RjZGJhZGFlZGRiY2VlZ2ZmZWJjZGVkZGNhZGNh
-YWBeYGBgX19fX15dXltfYF5hYWFgX11eXl5iYWBjYF5fXF1ZYWRhXl9jX19iZGFd
-XVtcXV5fYV5dXVpdXmBgX1xeX19ZWVldWlpbXVlbWVlbXFpcYF9eWlpbWVlaWFta
-XF1cWVZbWlxbXFpZWVxdWVlbX1xaWVlbXl1aWFtZWFZVWFZWVlpWWFdcWFlaW1tc
-XlpYWVhXWlxdXFpbXlteX1xfWllbWlpYWVhZV1laW1pcXF1bXGBfXlxcWlpcYFxc
-X11cXF1dW1pZW1pbW1pXW1tdXltcW1lbW1lXVlhWVFZWVlhZWlxdX19iWlxcXF5Z
-WVtZW1paWVdZWVhWWlpeW1dXWFtcXFlZWVpZWFhZWVhcXlxgXl9fWFhaXltdWllW
-WVpaXVteXlxZXFtcXl5eXFxcXV9eXF5eW1peWlpbXVtaWltcWlpaXV1dXFxcXF5c
-W15gYF5eXV5bXF9eXF9gYWFeX1xcXF5hZGBgX2JfX2JhX15gYV9hYmFeXl5fXlxg
-YFxbX11cX19aWlpcXl9cXGBgXVteXmFfXVtbXl5fXl1eYGFiYF1bXGBiXVtcXl5f
-X15dYGRfX19hYWBeXmBeX2FgZ2NiYmJkY2JkZmdoZmNjYmNmaWpoZmZmZGdnaWRk
-ZmRmY2VhYmFhY2NhYmJdXl5bW15kdKjH0tvg5Obo6urr63NzcnFwcW9wdHV2dXFu
-bm9ucXNycnJub3J1eHR0dXl3dnd4enh2dnh5e3p5fHl6fXh3dXN1c3VzeHV0dHR4
-eXp4eXd3eHx5d3p7fHl8fnx8ent5enx9fXp7fHt9fnl3eHh5enp6enx8e3t8fH16
-e3p7enl5eXt4enx2d3p4e3l4eHp3eHZ1dnZ6eHJxcHN0cXFydHBxdHR0dXR1d3d4
-dnh5d3l3dnV4d3d2dnd4dXd3eHh2dnZ2dXd2d3h1eHh3eHh6eHt4eHd5dXV4eHd4
-d3Z2dXV0dHJ1dXJycnZzdHBvcXBwcXNybm5xcXFwb3Jwb29tbWpucnRzb3Bwbm9u
-b25vbm9vb3Jxb21ra29vbW5ubm9ycG5sbG5uam1rbGlpZmRdWFVUWFteXl1bW1lZ
-Xl1WVlVZWlteYWJmZWNiYWJkY2JiYWRlZWlvdHRwbWxta2lsbWloaWdqbG9ra2tr
-aGZiYGJnZ2dpZmdmZGJiXl9eV1phYl5bWF1eXmBiX1xdZW1sZGdmZGJfYGRlZmxv
-b21uc3V1dHV0cHBubm1oaW1tc3Vub21tbmxscXJwbnBzdXZ2fXx8f4GEgICBhYOA
-f4CEhoWGgoF/g4KFiYqKi4uLioiLjY2Pk5SVlJGRlJqcmZaUlpiZnp2cmpubm56d
-nZ6hoqCfnqCioaGjpKShoqOrr7CrqaagoaamqqimoqeppKSjpKKhoaKfoKOgoaKj
-o6KipKSjoqajn6CgoKCmo6SkpqiopKKioZ6cm5yZj4F5cW9qaGVmY2NgZmVhYWFh
-XlxfX2JkaGhkZWVnbGdjY2NhX11eYWFhYF5gZGJkYWFkYmVhYmNiZmVhYmBjZmRj
-ZWZlZmhnZmZmZGNlYGFkYmBgY2JiYGJhYmNkYl9fXl9gX15eXl1fX19fX2BfYV5e
-YV9fYGBdXFxeYmFhYV5gXl9fXl9gYWBfXVteYGFiYWBcXFxdXl9fXV1cXlxbWVtc
-XV5eXVtaWlpcYV1bXFtbWFpZWllcW1xbWFhYV1hbWllZW1xaXlpbWltbXFxbW1pc
-XFtbWl1cWlxZWVhYWFZYXFldXl5fXFtaW11bWlpcWlxcXl9fW1xdXFlcWlxaWVpX
-WFlaW1xbW1pZWFxcXltYXF1bWlpaWV1ZXF1eXFxZWlxZWltaXFpZWFpYW1tbXV1c
-WVpYWFdWWVxZVldWWFpbW1xaWFpYWVtaW1xYV1lZWlVSVVhZWlxYWVtfXVlXV1dZ
-V1hXVlZXWFhYWltaV1dXWFpZWVpaWVpaXFxaXFldXl1cWFhaXF1dW11dXF1dW1xc
-XlxdXF9dW1xaW1xcXVtbWllYV1lbW11cXV1cXF5eWl5dXWFhYV9dXl9jXV1eW19f
-X2FcXl9fXV5fXF1gX19dXl5dXl9gXl5gYGBiYGBeXF9aWVxfYF9fYV1fXFtdW1xc
-XF1fYF9eYF9dXV5cXFlbXVxeXFxhYF5eXF1eYF5dX15cXWFhX19fYWFkZmZmZWRg
-Y2RkZmRkY2ZlZ2hra2hnZmVkZGVmZmBjZmhkYl5fYV5gYGJjYWBdXV1bWmF3ocTS
-2+Dk5ujp6uvsdHd5dXVzcW5ubm1ub25ta2xvcXZ1cG9xb3V3eXd0dnVydXR3eHh4
-dXV0d3p5enl2c3d3dXN0cXN5dnd4dXV4eXh2d3h4ent4eXp6e39+f318e3h6eXh4
-d3h6fHh4eHV2dXl5eHd6eX2AfHx6dnp7fXp5d3V3eH16fnt4d3x5eXZ4d3h4eXh3
-dHV0dndzcXJxcHR0cnN1cnBxcXV1dnZ4eXR1dXd4dXd4d3Z0dXZ0dHd3e3l0d3Z5
-dnRzc3R1d3t4eXp7eHh1dHl6eHV4dXVzdXV1dHV2dHNzcnRybnN0cHFvcnFvbW1w
-cnBzc3JycnFxcnJycm9xcm9vcG9wcnFycWtvb29tbnBtcHBvbm9va2lpam5vbW1w
-bWptaWpqamViYFpWWFpYW1lbXltcXF5cXllZW15fXmFjZmVjY2JjZGRjYWFiZmht
-cXN0dHFub25vbWtnZmdoaWhoZ2llZWhtZV9ib3NuaWZnZWNhZF9eW19fZmNfXFpd
-YGZmZmNYXGNta2VnaWhmZmZlYWZrcXFtbHBzcm9ycXBsa2tpZWRna29tbHBubGpo
-aGtvdHVxcHJzd3l7ent7eHx9en2BhHt9goWGhIWGhIOFhomGioyJh4SIiYuMj4+S
-l5eVlpianZual5qbnJ6hn6Gfn52goKCgo6ShoaWkpamnpaeopqipqa2ur6+vrrCr
-qKysqqqoqauppaWkoqOlpqOjpKOhoaKjpaSnpKOjp6akoaOkpqKioqOnqaOkpaai
-n56enJeNgnlzbGhoZmRjYWFlYV5fYWJjY19gY2BkZmdkZWRlZWZgYmJgY19kZGNj
-YGBlZmBiY2JhY2VjZWZnaWdjYmNjY2RjYmNjZmZhY2NiY2JmY2FiYmBgYV1hY2Vk
-ZmViYV9fYGJkYWBhYmFfXVpeYWFgXlxeX15gXl9fX11fYmJcXV9dXGNiX11eYGBg
-X1tdYF5cWlpbXFpcXF5fX1xZXF1bWVleXl9cXF5cWltZW1lZW1xZVllZWVpbW11d
-W1hcWldZWlpcXFxeWFtaWVZcXFpcW11cXF1bYF5bXFxYV1lYWVhYWFlbW1tbWFlY
-WlpYWVhZW1xbXVxaWltbXFxYWFpcWllZW1pZWllZW1paW1taW1paWlxfXFhcW1xe
-Xl5bXF1cXF1dXV1dX1xZV1taWlpdXVpaWFhZWV5eW1paWltXV1laWldYWFZZWl1c
-WlpZV1pZWltWWFlYWFxaWl1bWVRXWFhYXFxZVllXW1pZWFpeWFlaWFhVWVxdW15e
-XVxcWFtfW1hZWVZbXVpZWltaWl5cW1pgXF5eXFlZWlpbW1pcX11eXFlYW2BfW1pa
-W1tcWlxaWl1eX2JfWlldXF1eXV9eXl9dX15eYV9dXF1eXl5eXF1eX2JhYGFgXmBg
-X11eXF1fYWFfXV5gYGJeXlxbXWJgXV5dXV9hX19dXl5gXl9gYF1cYWBfXV1eX2Ff
-XVpbXlxdX15hYmJjYWBkY2FiYmRkZGZkYmJkZWRmZmhlZ2hoaGllZ2ZnamhoZmRj
-ZWVoZWNkYmBjX2FiYWJiYGBbYHSdwtDa4OTn6Onq6+t1d3d3dHNwb25sbHBycm9w
-cXNzdHR1cnFvcnF2dHVxc3V1c3d3eHV0c3d2dnl5eXp5dHR1dnh5e3h5eXl3dXV1
-d3Z1eHl4eHh5e3t6fnt9gH18e3l3dnl8ent+e3Z2eHh3d3d2eHh7eX54eXp3d3t6
-eHd4dnV5e3t7fHp7enp5eXZ2dnh5d3ZzcXFzc3FxdXV1cHFyc3N1c3F0dHV1dnh1
-d3d4d3V2d3h3eHd2dHR3dnd1d3d1eHd2c3N2dXV3d3l7eXl3dXV5eHh2dHV0dXZ4
-dXNyc3NvcHBwdHFvcnN0c3JvcG9ub3BwcXBzdHNvcHBzdHV0c29uc29tb3BxcXBv
-a21sbG1vcG9vcnJtaWprbGtrb25tbGxsa2tvbGloZmFdXV1YWl1cWlpcWlxcXVxa
-V1lbYWVmZ2dkZGJhYWBjYWBfYWhpaGtxdW9wb29wcW9saGhnZGNjZGVnaGtqZmZo
-Z2hxc3FwbWRkYWBgYl9eYGRmYVtbYGloZ2lnZ19fa3FqaWhpaWdqaWRlaGpub3Bt
-bnF0dm1sa2ttaGhmaWlrcWtvcW9ubW5tb3R2eG9qanJ3d3l6e3x+fn59gH58f4GC
-hISFhIeFhoeJiYiNjIqCg4uLi42OkI+SlpSVl5WOmpqYmZqcnqKfoJuaoaGeoKem
-paSkpqimqKejqKyrrK2vqa+xsK+urq+vrq2pqqyop6eoqKSipaakp6ajo6WloaKh
-paSjpKOjo6enqKqmoaGhpKWlpKShoaChnZ+emJCEfXVwamdmZmRjYmRkZWJjYmBl
-ZWhnZGFkZ2VlZ2VjZGNjY2NmYmFiYGBiZWNjYGJgYWNkYmNjYWRmaGdmZWdjYV9d
-XmFjZWRiZWFkZGZjYGBjYWBgYGBfYWNkYWJeXWNhXl9iYWBgX2BdXF1gYF9cWl1c
-XV5fYF9gX15iYV1bXGBiX2BhXl5dX15dXmFeXFpeW1xZWVZaXl1eXFpbW1xgXFtc
-YF9bW1pZWFpbXFtbXl9dXVtZWVpXV11gYFxcWVteXVxcXV5dW1tYV1pYWlxcXFla
-W1tcXFtcW1ZXWVpbWVhZWltdXF5bW1lbWV1aWVtbW15dW1ZWWl5dWlpYW1pbXFxc
-XFpbW1pdW11bWVtbW1pcW1teX1xbWlZYW1pZWFdZXV1cWVhcXVxdXVlaW1xbW1ta
-WVhYW1xbXV1fXVpWWVlaX1pYW1tXWFldXF9dWmBbXFtYV1hZWltZVlNVWFdcXlhb
-XF5aXFlaW1xbXFxdWltZWVtaWFhbWlxaWVldW1xdYF1dXFhZXVxaXFtZWVtcYF5b
-W1xcWlxfXVlWWFlaXFpYXmJgXV1bW1xcWllbWVtaWVxcXV1dX1xcXF1cXF5eX15d
-XlxfX15eXV9fYF1dXFtcX2NkYF5cXmBfXWRgXV5gYGBfX2BhYF9fX2NkX2BfXV1d
-XV5eW11bXFtfXV5gYF5gX2BdXV9eXl9hYl1eX1tfXl5hZGFkZGJkYmRgYGFhY2Nk
-ZGVoZ2ViZWRjYmRqampsa2ZoZ2tpZWZfXV9fYmJkZWZkY2NiYV9gX1xfcp7C0dvg
-4+bo6err6nJydHh3eXd0cXBzc3NxdXZzcnZ1dHRzdHV0dXV2cXBxcnV0dHRycnR1
-d3t0d3Z4e3d4eXN1fXx9enp7d3d2dnZ2d3h3eXh6enh4eXp6enp6fHx8eXh5e3x8
-fHx8enl5eXV3eHl4eHl4d3h4eHl4eXp6enl8e3l4eXp5eXd5dXZ3dnd5eXp2dHNy
-bnBycHFxc3FvbXJycnJydHN0dnh4eXd5fHp5e3d1enp7fHp5dnNydHV1d3V1dXZz
-dXNydHZ2eHd1d3NzdXl2eHd2dHV2dnZxcXRzcnFxcnFwcXl0c3N0cG9ub3Fwc3Bu
-c3JzcW9ucnJzc3NycXJxcm9sa29ubnFyampqbnJucnRvbm5ra2xub3BubGpqbWxq
-a21uaWVlZF1dXlxaXl1eXlxeYF5aWVlaW2BjZmxoZmRjYGBiY2hlYmNmZWlsbXB0
-b2ltcnJwcGtpaGNgYWJkaGlsaWxpZmlpZmprbGtnZGFhYGZkYWNkZmhjYmNmaWdo
-aW1uaWtua2poaWVnZWdmaGdma2lsbm9wbm1ta25sbGZoZ2ppaW5vcG9ycnNwc3J0
-d3dza2lscnR1eHZ6fX+AfYCCgXx9goeHhoeIiImIiImIiomMjIaKioyOjYqMjpGW
-mJOQkJWamZmZnJydnqChoKKfn6Gio6eoq6elqKmtr66uqquusq6trrCtrrGwsa+s
-qamoq6mqqKmpq6Slqaikp6mqqaiko6GlpaKhoqamqa2vramopqalpaejo6SkpKOg
-nqCZkoyDfHVuaWZkZWRkZGVkZGNiYGFiYmJmYGRlZmNkYmRiY2VkY2RlZ2ZoZ2Vj
-Y2JhYGFkZmReX2NiZGVmZmVhYmBkZ15iYGFhZmdlY2RmZmVmY19eYl9hYF5cX2hk
-YmVmY2FdXGNhYWFhYmBhXl5eX15cXF1dXl1bXl9hYFxeXV9gXV1dYF5hX11bXF1h
-X11cX15cWltdXV9cW1tdW1pdYF9aVlhbXV5cXl1bW1xaW1pbXFxgXVxbWlhQWV9f
-XFtbXF1fYF5dW1taWFpcW1lcWVhZW15cW1taWldYWlhYW1pYWVlbXFtaWlpaW1dc
-XV9dW1pcW1tbXFpcXFtbWlxaXF1dW1paXFxaXF1cW1taWl5dXFtZWVxcXFxZWFla
-WlhWVllcXltcWltcXF5eXV1cXFtbWlpYWFxbWllaWFdZW1dYWVtcX2JeWlhbXF5d
-W1pcXFxcXFpWV1pZW1dXWVlZXFxdXVpaXVlXWltaW1xbW1tcXVxbWVhbXV1cW2Fd
-WVxcXFpcXFxbXFxfX1xcW1pcW1pfXmFcWFteW1tdX1xYWFhXV1peXl1dX1tdW1ld
-X15dXV1dX1tcXFxeX15eXV5eX1xbYV9dXV1dYF9eXmFfX1xeX19eYmRiYF1dXFla
-XFxfXl1cXV9fYF5jX2FeYWFdX19fXV1eYGBgXV5cXWBhX11dW19dXltcXWFgX2Bf
-X2BiY19fYmNfY2RjYGFgX2JiYF5fZGRjYmVkY2dlZGNlamVnZ2pnaWZmaGtoZWNg
-YmJjYmRiYmNgYGBfW11gYV1tosLR2uDj5ujp6uzrc3Vzc3Z1dXhycnNzc3R0cnV1
-dXh0dHVzdXl1cnFzdHR2cnFzdXZ2dHR1dnV0dHR3e3d2dnR4eXt3dnd3enl6eXp5
-enh3eXx6fH5+e3d3ent9e3t6e319e318e3p+f3x2eHh6eXd8e3h5eHl3eXl7e3h5
-fH5+fHt6eXp4e3p6fHd1dXZ7eXl2dHN0cnFwbm9tbm9ycnR3dXJxdnRzdXd1dHZ2
-dnd1dHV6enp2eHh0dXd0dnZ2dHR0d3h0c3V5dHZ1dXN1c3N1dXV0d3h4dXZ1dHR0
-dnV1dHNxcW50dnVycXJzdXBsbnRydHNwcXZ2c3Bwa2tsbXByc3Fwb3Fxb25wbmlq
-a25rbW1ucHBub2tsbW9wb3Bub29ra2xvcG1saGViYF5dXF1fYl9eW15eYF5ZW11f
-Y2lqbmxpY15dXmBiYmRmaGRkZ21zdW9ram5vcW5raWlnZ2NkZGpra2xrZ2Vla2xq
-cXNyamRgXF5iZWRhYGdpZWdpbW1naGxpaGtubWhnZWRkZ2ZoaWpoZ2pmY2Vob3Jy
-b21ra2hkYGRoamxubW9wcHFxcG9zdHZ1c25qbG5yd3d4eXd3en2Bg4B9e32AhYiN
-jo2OioqLjIuHiYmNjo6Mi42Lio6QjpGQkZWcmpeZmpmZmJmbnaGhoqGjpKamqauq
-qaepqqqvs66qq62srKerrq2usa+yraqrq62pqKutqa+zsKurqqinp6eop6alqaup
-pqSjqqump6qrqKWppqempKSkpaKioaCdnJiTjYR2bmxqZGJhZWdjYmJjX2JlZGNf
-X2JjYmNiZGNkYWRiZGJkZWFjZmdpZGJjY2RiX2JiZmReYF5gZGNlYmJhY2RoaWhm
-ZGNjYmRlYmBkY2JhZWZkYV9dYGJkYWJiZGVkYWNlYlxgXl1cYmBeX1xdYGBdWFtb
-XV5fX19gX15eW15fXlxeX2BgXWBdXl5bXF1cXV1bXVxeW1pZW15eW1dYXV5cXF1a
-WltbW1pbXFpbWFpdXV1bWltaWVpYW1xdX1pbWlteXV5bW11aWVlbXV1bWVpcXVlZ
-W1pZW1xZWllaWltaWVlaXFpbWlpXWl5ZWl1cWVtcWFpbWldZXFxdXFtdXF1ZW1la
-WVpaWVldWVtbWltaWVhYWFpcW1xbWlhaWlhYWVxdW1paW1pcXVpZV1hZWVtcXlxa
-XVpaXFtXV1dYWVpbXVlaW11dXltcW1tcV1paWltXV1daWFdWV11hXVxbWltcW1la
-WVxaWVpbXFpZXVtYWVlXWFpdXVxfXFxZXFpbWlpcWlxeYFxdYl5cWVlcW1laXVxf
-XV1gXFpaW1xYWFlZXWBfX11bWltdW1xdXFxdXV1aWltdX11eXWBhYV9gXVtfX15g
-XV1eYF9eXl5eXV5hYmBeYWJhYF5eXltbXF5fXmBhX15fYV9dX15fYGNjXWFdW11f
-XWBiX19eXl9hXl9eXl1fY19cXFxfX2NrYmNhYGFjZmRiYl5gX2NkYWBhYV5fY2Jj
-ZmdkZWNmZ2ZqaWdraWtnYWBlaGdmZmRkY2VkYmJiYmFiX19hYF9hYmqfw9HZ3uPm
-5+nq6+x0dXRydnR1eHh2cnNxcXN3dXNydXV3eHZ1dXZ1dXR2dXd5dHN0dXR1d3Ny
-dXR1dnl5eHV3enZ4eHp1dHh2dnp1eHh5eHh3dXZ2eXt6fnh4e318eXp5eXp4eHh5
-eXt4eHd3enp7e3x6eXl5eHZ4e3p6d3x6e318f3x4d3d2enp7eXV1dXd3eHdydXR2
-b3BwbG5xb29ubW10dXNycnJzdXJzc3N0d3R2dHh2dnV2dXN1eXV0dnR1d3p5c3t3
-dnV1eHh5dnR0dHZ3eHZ1dnl1dXZ1dXZzc3VycXNwcHN2dHFwcHF2c3V1cW9zdHFy
-cXFxc3RxbXJycG1ucXBucXNxcm9ub25sbm1scXFxb25vbm1sbm1pa2xtbW9tcG9u
-bm5taWRhX2JdXV9mZV9bW1xZWFhaX2RlamxuaWReXVxeX2FhYGRkYWBoc3JwcGpr
-bG9wcXFvcG1nZmZmaWlnZmNjZmhnbXBzcnFtaGBfYWdlYGBmbGpoam1sbmlpa2ps
-bG1taWNgYGNnaGxramxtaGZla25yc25tbmdhX2JmaG5yc29vb29wcnJwb3J1dXJt
-aWxuc3R2eXl2dXd2fICCgH18e3+BhomNjY6MiYmJiIiLjIyNj42Ji42Ljo+PkJGR
-mZqXmpqam5iWmJmWnqKipaemqKqprKupq62prK6wq62trKytqKyvr7O0s66rqKal
-qaysrK2trbWzq6qnpqioqaqqqKmkp6inqaurqaSnp6irqaipqaKlpqWkoaCfn6Ce
-mJaOh3hybGVjYmFlZGNjY2JjZWhnYmJfYmZfX2JlYGJkZGZoZWJiYmJiYGBhYWRk
-ZGBfYGBfZGZlaWJjYmNlZGZnZmhmZGRkY2BiZGNlYmBeX2NjYmBiX2BhZmNfX2Fh
-YmNgYWJgX1xaXmFhY19eYF9cXmFdWllbXF1cXl5dXV9eX15eXFdcW1xcWF5fXVxd
-XF1fYV1ZWFlZWl1dXF5cXFpcXF1ZW11dX15aXVtaWVxcX19cXmBcWldVWFVXWFpb
-XFtbWlteX19bXF5ZW1ldW1xbXVxaXVxbXFlaWVhZV1pcWllZWllZXFlZWllaWVtb
-XV1aWVlZWVldXVtaXV5dXFpaWlxbXFxfXFpbWVpXWVpbWVlYWVVYWVlZWlpbWFhX
-WldZWltbXFdXWlpbWVlbWVhXV1hbW1xaWltaW1dZWFlbXF5aWl1bXVlZWlteXllb
-WVhZXV9dW1lbWFdXWF1cXV1eW1taWlpaWFhZWVlaWVpYWVdXW1xdW1xdXl5bXFta
-XFtdXFpcYGBcWltfXlxbWltdW1tdXV1cW15fXFpbXFdYWl5fXlpbWVxbW1xdW11c
-Xl1cW1pZXF5gYV9cXl5fX2BfXl9jYmFeXF1dXV1dXV9eYGBdXV1eX2BeXF1dXFxc
-XFxeXF1dXl1fXFtfX11cXVxcXWBdXF9iYF9gYGBeX19gXFxfXl5gXl5dYmNhYHFj
-X2BiYGFhYWFfYWJmZGNlX19iX19hYmRjYmJjZGRjZWZnZ2RnaWhnZGdmZmppZmNf
-ZWJhX19fYWFhYGBdXl9eaqTE0trf4+Xn6err63N0dnVzdXd2d3ZycXBwcHJ6d3V0
-eHl5end1d3N1c3R4eXh2d3J2d3Z1cnN1eHZ3dnZ1d3l1dnN1dnR0dXNxc3Z8fHx8
-e3l8eHh6dnh6eHZ5eoB9e317f3t6eXh6fHl4enl6eHl6d3l4e3d3d3p7eHp7e3x7
-e3p9fHh2eXh5fHl5eHh3c3B2eHZ0c3Rxc3FycnFtb3FvcHRzcXFyc3NydHR0d3d2
-dnh4d3RzcnZ1dXR3dXN1dnV1dnFydHd2cnV4dHR1eHd1eHd5eXR1eHd1dXR3dnV0
-c3RycXFxcnJ0cXFubG5xcXFxcXFzcnNvb29xc3F0cnJxb21zcHJwcnBtbnBwb29s
-b25wcG9ubnBxb21sbG1tb3BtbW5vbWxwbmloZ2NiYWBfYGVjXV1ZXFtZWVddYGls
-b2pkYmBeYGFeX2RhYV9fY2hvb21rbm1ubm5ucHJxbWxoZ2VkaGdnZ2JgYmVmamtr
-a2lnZmRpZ2BfZGhta2ptbW5sam5rbW1vcG5oZF5gZGlxcm1qaWtqaGtwdXVvbWtp
-ZmVkaG5xcm9tcG9sa29xcnd7d3RwbmtsbG5udnl9eHNyd3d7fX+Df317gICFhYqM
-iouLiYiKiYuLkpGRkY6KjpGQlJOWlJWXl5qZmZeZl5WZnKGho5+ho6mrraurqKqo
-rK2yrKyqqq2srKeurK6wsrKuq62sra2qra+vrrGwsLGrp62urKurqqqrqqiopqap
-rqakpqepqaupp6Wmpqejo6ShoJydnJyalY+HenFrZmdmZGNjY2FjYmFhZGBhY2Vl
-Y2JkYWNhYmZoaGZkZGZiZWNiX2FjZWNfXVxdYGFkZ2dnYWFhY2dlZWRlZWNjYmRl
-YmNjY2JhYmJjZWJlY2BhY2JhYGBhYF9gYGNgYWZiY2FeYWNiXVxgYGFgX15eX11e
-YWJiYmJjYV5eX15cXGFhXl1dXFxaWFxcXFtbXFxaXF9hXl1cWl9eXFlbXVpaYF9f
-WlteXVtbXV5fXVlYXWBeXV1bXVtbW15ZW1pXWVtaW15ZWFlYXFpaW11cW1tbWVlY
-VldbW1lbWlpZWVlaW1xeW1pcWVlcXVxbXFxZWVlYWFpaWl1gXF1eXFtaWFtYWlpc
-Xl1ZWVpZXFpaWlleWllaWVhZW1xcXVtbWFZYWVlZV1paWVxaWlxbWFdaWVpbXVxf
-WVVdXltbW1laXFpZW1xbWVlYWFlbWVtaW1pXV1haWVdaVldVVlhZW1pcWVxdXltY
-WVlaV1dZV1dWV1tbXl9fW1xhX11eXVxbXFpcXl9gXltdWlpeXl1cWVtbW1lZW15a
-WlxdWltbWVlcXmBeX1pcWlpXVVhbXFpcX2FfXWBgYV5ZXF5fYGBgXV5fYF9cXF5g
-Yl1cXGBhYF5dXl5eW15fXmJhX11jXlxcX19gX19dXlxeYF5fX11cWl1dYGFgYGJg
-XV9gYGBfX19gXmBfX2BgXV5eYWBfamZdWl1fZWJfY2BhZGJjYWNfX2JjYmZnYWNi
-ZGRiY2NjZmZlZ2ZpZmZpZ2dmZWZmY2ZiYWJhYF5dX2BeX2JfYGNxpMPP2t/j5ufp
-6uvrdHV0dXd3dnV2eXZzc3Vyc3V2dXZ3eXl3dnd1d3Ryb3N1eHh2dHR2dnZzdHJ1
-dnd3dXZ3eHZ0dnZzdnZyc3R1eHh3eXt4ent5e3h1dnl4dXh8eX18fXt8fXt5eX1+
-f4R8fHt7fHh4enl7e3h4eXd3eXp8e3l5enx7eHt7ent5eXh5d3Z1c3Z1dnZ0c3N2
-c3R2cm9vb3FxcHByc3Ryb3B2eHd2dHV2eHl2c3V6eHh6eHh3c3N0c3V3dXNwdXl7
-fHd4dXV2eHd4d3Z4d3V0dnh1cnN1dXVzc3JzcXB0cnJycm5wcnRvcXJyc3BwcHFy
-cXB0c3NxcXFtbW9ycHJvbW5ubm5wb21tbmxxb3FvbmtpZmxtb2xra25sbW9rbWxr
-amdlYl9gX2JjZWFdWVlcW1taW2BjZmppZGJhYGBfXV5gZGhjY2JkaW1tbWtrbW5v
-cnR1dHJ0cm1oZmhnZWNiXl1dYmNnampoaGJpbGhjYWJiaGhjaWxsb2tsbGxtbm9r
-amllZGZsb3Bta2RoamxvcnV2cW5raWdkZGduc3RxcXBwb2xtcXV2c3Rxc21ramxw
-cXF1d3Vzc3F2e319fX5+fYKAhYiJiouLiIaIiYyNjYuRkY6PkJeYl5SSkpOWl5eY
-m5ybnZ+enqKmpKKhoqOnrKyrqaiqq6mssK6sq6usrq6urq6rsLa0sa+xs7ezs62x
-sqytr62urqypq6qqra+tqaWkp6aoqKempKanp6ispaOlpqKhpaWmo6Ogn6SgnpqY
-lIZ4cmloZmdmYmBgXl9kYmNjYWNhX19hZGdmZWFiYWVkZ2ZjZGZlZWFgYGNhYWBj
-Yl5eXl5iY2ZmZGFeYmJkZGhlZWFhYWJkY2JhX2BhY2BiYWRmZWBgYGFiY2BhZGFi
-Y2FiZGFgYWBgYGBiXVpdYV1eYGFhZGFiYV9dYWBdYGBeWVtaXFxdXl5eXF1bW11Z
-XF9dWlpbXl1cW1tbXF9eXV1cW15dXlpbW15ZWVpbWVpcXl5cXFtcXV5eWFhZW1tY
-V1laWlxaWVdXWFhZW11bXF1bWFxbW1lYVllWWFtaW1dXWFlZWVlYWlhWWFlXV1dY
-WVhYVlhXWFxgXVxdXl9eW1hYWltYW1xZWFxZW1xYWlpaWlxbXFhaXV1cXFtZWVlY
-WVpaWlpbW1dXWllbWltbWllbWVlaW11bXlpcXFtfXFxYW15XWFtfWVpYWVlaWllY
-WllWV1hYWVdYVldVWllXV1lXW19fXltWWVpZWFpaWVtbXF1eX11bW1paXmFfW1td
-W1tcXV5eXV1eXVxeXmJfWlpcV1ldXFxcXV1fXVpZWVtbXV1cW15cW1dWWVxbXl1a
-X19dXl5eXFhbW19eXV5eXF9jX15fXl9gXVxbXFtaW15dXmBgX15fYGFiXl1fYF9h
-YWFgX19dXl1fXVtgYmBfXVteX15bXF9fYGFgX2BfYGBhX11gX19cW2BdXmBiY2Bf
-X2BeYWNiY2JhYV5eYGNjZWRjYWVgYWFmY2RlZ2RmZWRqaWdoaGdlZmRlZmRjZGJi
-Xl5eYF5eX15hYmNiX3SnwtDa3+Tm6Onq6ux4dnN0dXRydXN1c25vcXV1d3Z0dHVz
-c3R0dXV3dnRvcnV3dHV2d3JycnJ0c3R2c3J1dXR1d3d1d3R0dHZ0cnN1dXZ4d3h4
-enp5d3Z3d3Z4fHp5eHp5fH9+e3l+e3x9gnx8f3x8fHh5eHh5eHp2dnd7fXt5eXl1
-d3t5eXp6d3h3dnZ3c3R0dnVzcXJzdXN0dXR0dHN2cXFwcXFucXFycHF3d3V3cnV1
-dXRzdHZ6eXd3eHV0cXBzdXl5d3Z6e3h1dHV3e3l1dHJ3dXV1dXNydXRwcXR3dXVx
-cW9yc3JycXFydXRybWxudXV1cXBzcnJzcnRxcnNxcG5sbW91cnNzcW5tbXB0cWts
-b21ubmxrbW5ua2loZmlsa21wbnBva2pnZV9eX19gYmBhW1phWltcXF5jY2RlYmNd
-XWNmYWFgYmRlZWdmZmZqcG5qam9wb21ucHJzc3JtbGlnZWhkYGJhXV5gY2pvbWZg
-YmhmZWZmZWZkZmptbWtpbW1rbnJzbmtobWttcG1raWRkaGptcHZ6eXNvbG1tamxt
-c3Jxcm9vc3NxcW9yc3VtbW1sam5vb25wc3V2dnt4d3R4e3uAf3x+gYSAhYaIiIaG
-hYeMjI6Mio6Tk5GTk5qZlZWWlpSVl5WanJyhoKKeoKSmpqSlqaipp6mmqKqsqqqu
-tLOxrq2xsa6ur7GzsbKzr7Cys7O2t7SvsLCtrK2tsKyrqKiprayqp6emqaqppKSk
-pqWjpqanp6impKWlpaSgoaKop6Cdm5ePhXpwZ2VlZGFkY2JgYWJiYmNlYmFgZWJi
-YmNkYmVgYWZiYmRmZWZjXmFjYWVjYmJiX19jZGBiZGNiaWVjZWVnZ2VmZ2NeXmBj
-Y2RiYmFhYV9iZWdkYWFfX19dXmJlZGRkYWBfX2BcXmBfXV1dWl1eY19cXWBfX19e
-Xl9fX2BeYGJfX15dW1xeX19eXFxYV1hZXF1eXF5dYF1eXF1fXl1fX19cWlxfYVxa
-WllaW1lVV1dbXV1eXV5eW1hVVFRbW1tbV1dZW1pZWFZZW11bWVhbW15cXFxcV1ha
-V1lYWVhaWltZV1dXWVlZWlxZWFVWV1hZWFlaWVdYWl9eWllbXF9bWVlbXV1YWlxb
-W1lYVVpXVVpbWVhYVlleX1tYWFhcWl9aXVxbW11WVVdYWltZWVxbWVdXVVlZXVpc
-XVtaW1tcWVlZWlZXXm5pWVpdXFtbW1lZW11aWlxYWF5YVlNWWVlZWFpaW1xcXV1Y
-WVlaWVlZWltZXVxbXVtbW1teXlxeXVxcXFtbW11fXV1dXl1aWVhaWFplYFxcXVlb
-XF1bWllXWFhZXl9fW1xcW1lcWltcXVtbXV5cXVxbXlxdX2JjYF9gXV1eYGBgXV5g
-X1paWV1cW15fXV1fX2BfX19eXWBiX15eX2FfXl9fYl1eXV1hYGFdXF9gXlxbXmBh
-Xl5cXl1eXl9eXVxcX19gYF5fX2BgYmNeYGBgYmJjYVxeYGBgZGViYWNiY2NkY2Nl
-ZWVkaGhoaWppaGpqamloaWdoZWRiYWFhXl9gYWNjYmFfYWBhdq7E0dnf5Obo6err
-7Hd3c3Ryc3JzcXV2cXRycXN0dHRycnNzcnJxcXJ0eXlzcXRzdHR2dnV1dnZ1dnV0
-dHR0cXRzc3V1d3RzdHt4d3R5d3h1dnd5eXl3ent5eXp7e3h3eXt8e3p9fnx+e3x7
-fHt6eHt9e3l5eHp4eHh3en16fHx3dXZ8fnl3eXl3dXZ3eXZ1dHR0c3V2dHN1dXR2
-dnRzdnJ2d3RwcHJyc3ZycXJzdXZ1dHVxdHh2dXV3eXN2dXd1dXZzdXR3fHd7fnp3
-d3d0cXN2d3JzdHZ1c3ZzdHh5d3d2dXR3dXR4dG5tb3J0dHVzb3Fyc3N0c3Jyc3Nw
-b3FucXNvcHFycXBxcHV0cW9tcXNua21ubWxvbXBsbHNua2pubW5sbm9ycW1qaWVj
-YmFfYF5eX15cXFxaXV5gYWNiYmBeXmBkZGZlYWFfYmFgX2Jka2tramlrbm1qa2ps
-cHN3c25nYWJlYl5aW1xgYWFpbHBqY2RjY2JlZGdxbGhsbmxsb2lqamxzdHBramtr
-b29sb2ZiYmltcnh3eXhwbXJxb2xrbnBvcnZ3dXJub3Jwbm9ub2toaG1vanB0c3J0
-dXiAe3p3dnl7e3x8fYWHhoWDgIKGiIyIiIaLjYyMi5GXlJWXmZiWmJmalJOUl5qb
-nJugn5ufo6WkoaGmqqqqqKiqrKuqrK6tr7Cura+xsbKytLOwr6+urK2wrrK2tLO2
-tK6rrq2rrKmqpqmsqaurq6WoqKako6alpqOjpKaop6Wnp6WjpKKjp6mjoZ+dm5OM
-e3FoZmNhYWRjYWNiYWNgYGFhXl9eXV5fYWVlZGNmY2BhZGRmY2BiYGNiYWJiYmJh
-Yl9gYmNkX1xhYF9iY2ZlZWNlZGNfYGFjY2RhZGBhY2NhYF9hY2RiY2JjYmBfXl9g
-XmRdYmBiYFtXWVtfYWBgXl5cXl1cX1teYGBfXl1dXl9fXltdXF5dXVxbX2FdWlhZ
-WlteW1tcXF5cWlpaWl9cXFpbXF5dXFlbX2FeW1pZWVVYWlxfWVhZWVxaWllbW1tb
-WFlbW1hZW11cXVpaXVpaWldYXFpcXVtZWVlYV1hZW11bWFhZW1tcXFlYWFlXWlhb
-Wl1cXFlYWlpaWllbXFxdW1pZW1lYWltcW11cWltXWFpYV1tcXF1dWldYWFlYWl5d
-WltYWlpbXltYWltdXVpaW1xaWFhbX1xZWVpaW11bWFpbWFdYW19dWVtdXVxdXlxa
-XVxaX15eX1xbXVpbXFxbXFpaWVpbWllYWVtcXVpaX11bWltXWlpaXlxZWlpbWVZb
-WFhaXVxdW1pbXF5dWVpZWV1cX1taWVlZWVpaXFhXVldaW1tbXFtaXV1aWFxcWVhc
-W1tYW11fYl1dYWFhYWBfXF5fX2BiXV5dX2BfX15bW15fX15bW15eXl9gXl9dXlxc
-XF9gYGJgX19eYF9gXV1eXFxaXV5cXV1fX2BgX2BiXl9fX15dYF9gYWJfXFxfY2Je
-XF1gYF9eXF9gYGFlYmNhYGNkZGNmY2dmZWNlZGdpZ2Zna2xpZ2ZmZGRlZWNfX2Nm
-YmJhYF5fXV9gYmN3rMbR2uDj4+fo6uvrc3Z1cXFvb3B0d3d3c3Fxc3N0c3NwcXBy
-cnFzdHN3d3R0bXV4eXx3dHR3d3Z3c3Vzcnd2cnVzdXZ2dnV2eHt4end5dnd2cnR6
-fH15d3l6fHl5d3h6fHd3eHV4eXp4e318fnh8eHp7fnp5eXp7d3d5enp5eHp4e3p3
-eXh6eXZ3d3h6dHRwc3N0dHV2d3Z2dXNydXd0cnNxcnVycHJydHN0c3d1eXd2dXZ1
-eXVycnV7eXV3eHd1dXNydnJ1d3h5fHp6d3d2d3Z5c3d5eHd1dnd2dnl6dnh4dnV2
-c3JycXBvcnFzdXV0dnFzcXFzcXNxcHFycHFvb25vdHBwcHBxc3BvbnBxbW5sbG5x
-bm5ra2xrbW1vbW5ta3BxcW5samppaWdlY2BhYmJhX11gXmBjY2RkYmBcZF5cY2Rj
-YmBhX2BfYV9dYmdpamlra21tbWhobXN4eHVxbmZiYGFfWFhZXF9fYWVma2heXF9g
-YGJjZ2hoaGpucG1saWluc3VxaWhra2lqbGloaGZpbXBydXZycG9wdHNwbnJvbnJx
-dHRybmttcXV1cG5wbmxqbm1tcHNzdHNydHl+gX1+fH18e3+DgoaKiYeFhISBiIyN
-iYuQjY+OkJSSk5GUmJaXl5OUk5KWlpeYmpqenp+fnqGhpKOmp6moqqyrqaipra+t
-rbCysq+0sbCusLGys7Gtrq6tr7SxrrCxsK2rqqutq6qsraipqqimpqakp6Who6Oj
-o6SlqKqppKeopKWlqKako6KfnZ2Zlo+Ed21nZGBfYGFgY2FiYV9hYWFmZF9fYGJh
-YGNiY2FhYWJhYmBgYF5iZGRiYWJkY2NiZGVlYV1dXFthYmFjZGVkZWFhYWRiZmJi
-Y2RgXmBeXF5fYV9fX19fYGNiYl9hX15cXl5fZGFfXltdWl5bW2BeXF5dW11cWlxb
-W1pbXl5eX2JeXlteXFxeWltcXV5eXl5gXltcWVlZWlxeXF9eXV5eX15eXGBgW1xf
-XVpdXldbW1paWVlbWFthXVxcWFZaW11cV1hYWVdZWFtcXVtcXVxbWldXWlldXFta
-WltbWFZXWVtaW1tdXFpbW1dYWFdVWVxcXF1bWllZWFVYV1lZVlhYWFpZWlpZWVlb
-WVdYW15ZWlpbWllZXFxbXVlXWFtbWlpZWlpaXFpcWVhYW1tZWVlZWlhXV1VXWFla
-WFtcXFtdWllaXFpXWFxaWVlaWVxfXV1dW1pbX1xaXFpZWVlXWVhYWVhaW1tfXV1Z
-WlpaW1pbXF9cW1dWV1pYWFlYW11eYF5eWFhbWl1aW11eXl1bXlxbWlhbWFdYWVlX
-WltfXVtaWllXWFpaW11eXV5eXFteXF9fX15cXV5jYmBfX19eXFtcXV5dX15cX2Bf
-Yl1gXlxdXVtdWlxcXF1fYWBgX19dXF1dXF5cXmBeX19iYWJdX15dXl9fXF1dXVlc
-XmBiYF9gXl5hXlxcXV9iYGBfYF9eYF5dXl9hYV5eX2FgXV5lZ2NgYWNlZGhoZGVn
-ZWRoaWpramVoaWtoZmVkZGJhYWFiYWNkZGRhYGJeXl5fZ3CqxtLb4OTh5uno6ut2
-dHRycnBvcnNydXd7dHJzc3R1dnNxcHJzc3NzcXBxdnh4d3V3d3V1cnV3dHV5dnN0
-dHVzcnJzdXV3dXV3dXZ4eHV1dXV3d3l6d3h3d3h2dnd3eXp7eXh3dnZ5end5eHl4
-enl5eHh3fHt6fn14eXp7e3x5eXl5eHp0bnl2dHd2dXR0dXJxcnJydHN0d3VxcHN0
-c3J2c3JzcnRycnBxdXZ2dXh3dHd2eHh1d3h2c3h4eHd3d3V1eXd2dXN3eHl6fXd4
-d3h2dnR1d3l6eXt3dHR2dXZzdXV0dnV1dXRzdXNxcHBvcG9wcW9wcXRzcnNwcXFu
-b29ub3Bvb250cXBucG9ucnNxb3FvcHBwbWtqbG5va2xramxxcG5sa2tramlpamhh
-YGJiY2BiYmBlZ2NlY2NiX1xdWVpgYWFgYF5dXmFhYF5jaWZjYGNpbGxoaGduc3h4
-c21lYWNhX1tdWVpdYGBiZGVfWlhZXmFhYWBnbG5wbWlra2dobXd4dnFtbGloaGpn
-YmVoam9wc3Nydnh2eHx4cmtwd3d5cm9zcW9vb3JvcnN0c29vb2prb3NzcHN0cnFz
-d3p7f36BgX9+hIOChoeIhoeEg4CGiYuNjY6Qj46Pk5aWk5OUlJWVmJ2XlpaXnZqb
-mZubn6Cio6GioaClpqqur6ysq6mrq62usri1srKxrq2ysrOys7Ourq6urquoqquq
-qaqqqamnqrGrqKamp6mpqqenqqeioKSmpqemqKelpaKjpKOlpKWkoZqampiUjIR3
-bGhkY2FfXWJkX15iYF1iYWRlZWJfYGNlZ2VjZWJiY2JiYmFgXmFlZGVjY2FjYGNi
-X2VjZWhjYmFiZGJiZGNkY2VkZ2JiZGBhYWJiYmRhX19eYWBiYmBfYWNlYWFgX2Fi
-ZGJfXVtgZWFiXF9gW1teYGFfXFtcXltYWF9fX11hYV9fWlxcXl9dXVxcX1xdW1xc
-XFteW1tZWlxeXl9dXF9fXV1cX2JhXFteXFtZWFtdXltYWl1fXFpbXl1cXFpYWVha
-VFlYW1lZWVtbWlhcXFxcX1xbW1xbW11aWltZWFdYWVpbWllXV1pcXFlaW11eXVdX
-WVZYW1paWFdXWVlYWlhYWFlcWllZWFhYWVdYWVdXWVhWVVlZWlhbW1tbWlxYVlpd
-XFtaWVdcXVpXWFhYW1lZWlhWV1tYWlpbWlhWWVpbXVtaWFdWWFZZWllbXF5cW1xc
-WVdZWFlaW1tYVFRXVlVXWFpeXFtYWVdZWllbXVtZW1lZXFpdXFpeXFxdWl9dWF5b
-WFpaWlxcXVpcXFxbW1pZU1VVVlhaWltbW15cWlleWllZWltgXFxYWFtYXV1dW1xa
-XFtgYWJfX15fYF1bW15eX19dXF9eYF9fYl1dW15dYGBeXlxeX19hXl1fYF5cXV5e
-XF5dX19eX11dXl9dWltcXV5dXVxcX19gX19fYGNiYF9cXV5eYV9iXlxgX19cYmJh
-YF1eXWBfX11dX15gYWNiYGNiYmNiZGRkZWRhZWdla2poaGhmaGVjZGFhYmNjYWFg
-YWJkY2BiYWBkbJ/G1Nzf4+Po6unr63Z1dXVycXFvc3N4d3R2dnZ2dnR0dHZ0dXV3
-eHl2c3BxdHVydHV1dHR0dHRyc3J1dnNxc3Z0cnR0dnd4d3l2dnd4dXR2dXl7eXd5
-fHx6eHl3d3t8e3l4eHZ5enp7eHd4eHh5fXp6eXZ4e3t9enh5eHZ4enl4eXx9dm5y
-dnZ3dnh1d3p0c3J0cnFzbnFybW5wcXJ0dHNwcnVwcXN0cW91dXV0dHV1dnZ5fX56
-dHZ2c3d5dnV6eHV0dXh7eXd5eHd3d3Z2d3V3dHR2eXp3d3V4eXV1dXNycnJ1dnV0
-c3R0cG5zc3JxbmtvcXFwc3Jxc3R0cW9wc3Bxb29tcXVzcnBxb21vcHNzcXBwb3Jz
-bmttcHFvbmxtcW9vbG5tbG1sa2toZ2ViYWBhYF9iYWNoaWhpYl5eW19eW1pZW1td
-Xl5gYWJgY2NjYWRiZGZtaWhpanBzd3Z0bmZjYGNeWl1kYmJjYWVnYFpTWFpfZ2Rf
-ZWpsbWtpa2pqaG9ydnl1cXBva2dqamtqZWJqcXZ1d3l4dnx/fHZ2e313dnFybm1s
-bG5yc3NxbXJycG9sa2xuc3N0dnh1dXV4d3l6fIB/f4CDhoSEhomIiIqOi4uLjIqN
-jo2Pj5GRkpSUlZWVlJSXm5qWkpWXm5ydnp6foKGjoqOjpKWoqquvrKmrqKqwr62z
-uLm0sLOysLGwsK+ys7OxsrGuqqumqaytqqamqqmnp6ioqaqppqWopKWnpKSjoqeq
-q6moqKmoo6KepaSio6Sim5yWl5eNgnVsZmNiY2VgYGBhZmFiZmJiY2NjX15jaGdi
-YWJiZGNhYWJmZGZmYmNjY2ZkZGFhY2JgYWJhYmJiX2FjYWFfXmBlZWRiY2BlZGBh
-ZGFkYmNhYGRlYWBgYmRiYl9gX1xbYWJgYV5gYmNgZGFjYF9fYGBfX19eX2BhYmFk
-X15gXl1eYV5dXWBfX2BcXVxbW11cW1pbW1xZWFtdWlpbW1peYF1cXV5gXl5cXVtc
-WlpcWV1dXl1dX1xcXFtfXFhaW1pcWVlcWVpbXltcWlpaX1tYWltbXF1aWFpZWFla
-WFlZXFpbXFdWV1dYWlxbXFtbW1lYW1tYW1laW11dXFtaWFhbWFpZWVhXWFpYV1dZ
-V1hZWltaWFhbV1dZWVpbXFtZW1pZWllaXF1aWllcYF1aW1pYWFpbWFpZWllaW1xa
-WVxZXFpaWVtZWltdXlZZWltcW1pZXFxaVldZWlpcWlpYWVhVVVdaXFtcWFZWVldZ
-V1dWXFxbXFtaXVxbXFxeX1lYWFtbW11bWV5hXV1ZW1lbXVxZWFdXWFlcWVpZWlpe
-Xl5bW15dXFtdW19cWVpaVVpbXFtXWllbXV5eWltdXF9hYF5bWVtcX2FdXWBeYGFh
-Xl5gYV5eXl5dXWBiYV1dX15gXV5dX1xcXV5fX1pZWlxdXF1eW1pcXl5fXlxbW2Be
-W1peYF5dXF1fXl1dX2FiXl9iYWJhZGJhXl5hY15fX19dXWBlZWFjY2hmYWRnY2Rh
-Y2ZhZmprbGppaW5nZ2RlZGRiYmJhY2FeYF5gYWNjYmFtncbT2+Dj5ejq6uvrdXBx
-dXZ1c3R0dHV3dXJxcXN2dHN0d3Z4dnV1eHd2d3N0dXVzc3V4dnl1dXN0cXJ4d3Ry
-dnV2dHZ3d3t9eHZ2dnR5d3h5eXt3d3p5eXl5enl8eXx7d3V4dnZ4d3h3d3l7eXl3
-eXh0dXl5dnd4d3h5eXh4eHl5eXl0cHR0dXZ1eHh6enh4dXRyc3FxcG9wcHFwcHZx
-cnJ0cXNycXV0cXN1dHV4d3Z1dnd2e3x7d3p4d3d7dnp7dnRydXZ1dnd4c3Z4eXp5
-dnZ4dnZzdnh4eXl1dXVzc3JxcXFyd3dydHNwbXB0dHR1dXBvcW90cG9zc3FwcHFy
-c3Z0cXRxcXF0cm1vb3Nvc3V1c3Rxb29ucXBvbWtrbW1tam5scG1raWtpaWZmYWFj
-YWBiZWZoaGlmZGVjXVtXVlVWVlRYWFldWl5eXV1cXmJhY2JjZ2pmZWNqbnVzc3Bs
-aWNjYFxdZWRlYWBkZWNgWFNaZGNhX2Vpa2xnZWlsbnFub3JycnFsbXJta29zcmtk
-Z21xcHJ2dnV3enV3eHl2dXFwbnBua21ra25xcXB0cG9ucHJ0cnF0dHRzdHR0c3N3
-d3h6f398fYCDhoWGiouHio2Ojo+JioqKi4yPkJCSmJSTk5SUkpSXmZyblpiXm52e
-nZ2eoaGlo6Onq6qtqqinpamtqK6xsrKztbe2trSzs7Gwrq+ztLe0squrrqupp6im
-qaurra6tqaWnqainqKaopKKnqqipqqutqairrKmmoqOkpaWjnqCgnZ6ZlIyEeG9l
-Z2RmZWJgZGNgYWJnZWZlZWZkY2VkZGJiZGNkYWFiY2NmZ2NiYGRlZV9hZGFkZ2Nf
-X11dXmJjY2FgY2RgX15hZGZjYmJjZGBiY2FhYmVlZGJiYmBfY2FgYGJgXVxgYWJg
-YVxeYWFiZWJkYmNiYmFfXVxgX2BgYGBfX2FfXl9gXFpaWltaXmBfXVtbWlpeYV9e
-WlpcWlpaWllbXltaXV5cWVteXlxXVlZaXmBdXF1bXFpcWlldXVxcW1tbX1xaW11b
-W1xeXVxbW11cXFpYWl1aWlxcW1paWVlcW1tZWFdZWlpcWllZWFhaXVlXWlhbWlhX
-WltbW1taXFlcXFtbXFxfWllXV1paWlhVV1ldWVpZXFpdXVxaWltbXl1aXFtYWVpb
-W1tbWl1fXF9bV1dUWFtYV1lVVVhcWlhaWltcWllYV1lYWlpbXFpbWFhZXFxZWlla
-WlhaXV5bWFlYW1laW1tdW1haWVhWVFpdVlZVWVxbXV5aV11cXF1dWFtbW11dX1la
-W1paWVdXWltcXFlXW1paWlpbWVpaXFtbWVtdXl5fW1tbYVxdXl1gX15eX11cXFtd
-YV5eWV9gXl1eW1pcYFxfYGBeXF5kYmJeXV5dXl1eYmBgXl1cXF5gYF9kYV5bXVxe
-X15dXV9bW1paXF5bXl1dXl9gXV1dXV1cXV1cXV1dW1tdWltdYGJfX2FhYmJiYl9g
-X2BfXV5dX2FhYV9hY2NjZWJjZWRjZWdoaGVkZmhoaGhoZmVlZWZlZmNfYF5gYmRf
-Y15eX19cX2qbxdHb3+Tn5+np6+t1cXJzdnl0c3V0dnNzdHh1dHh4d3R0dHJ1c3Ry
-eHt6fXd1dHVzdnZ8d3R0dXp3d3h4c3J1d3h2eHt4eHh3dXNydHh4dnx9eHh4enp2
-dXd4enx4eXd4eHp5dnV3eHh7eHp+f3x8enh3eXp7eHV1dnl2eHl5eHp3dXN0c3V0
-dHd4eXl2enp4dnVzdXFycHFxb3BxcW5vcG9vcG9tc3Z3dnBydnh4d3h2dHN0dXV1
-d3h0d3l5eHV0dXR4dXZ4eHd3eHh6fHh6enx5dnV3eHh4d3Z1cXJ0dXV0dHNydXN0
-c3NzcHBzc3VzcnFxb3FycnBzcnFwcHN1dnRzdHJ0cHBzcnV1cG5vcXJucG9zcXJ0
-cW5tcG9ub29tbG9tbW1tbWxpZmRlZGNiYGFkamppaG9rZmRfWllTVFNQU1ZZVVVV
-VllbWlxcXWFhYmJkZ2JlZ2xwdnZzbWhoYWBmZ2VmaGVgYGNkYVxXXmFlaGRkZWpn
-ZGNjaGprbm5vbW1tcXFva2drbWpqaWdvbmtvcXJ0dHZ0eXZzdXVybm1ubm1sbG90
-dHBwdXVwcXV1dXd1c3JzdnV0cnN2dHZ3ent+fn6Af4GDgIOHiYiFh4uNjIuNiYmP
-jo6PkpSVlZOSlZKRk5efnZ6gmpiVl5ycnJ6fn6OmpKWmqqilo6Glp6urraqwsa6u
-sLi0sLWzs7Cvs7Gxs7SxrK6srKqopKWlqaqsr6ypp6OhpaSmpKWlqamtrq2rqqao
-qqipqKSjqKmop6OfoJ+goJ2XkYd4bWZkZWNfX2FfXmFkZ2ZmZ2RjYmRiZGFjYGJk
-Y2NhZGJjZmVlZF9gYWFiZGNhZWNkZGNdXV1fYWNgYmJgYWBgXV1gYWFlYl9fZGRl
-Y2NjZWVjZGJgYGNfX2JiYl9gYl9hYGFfYGBdYmJhYmBhX2RjYWFhZGNjX19eX15f
-YGBfX1tXWVxgXF1cXVxaW1lYXFxcWVtcW1ldWlxcW1laWltbWlxcYV9ZW1pYWVle
-XmBeXV5cWlpaW1xdW1xeXVxfXVpaW1tfYF9bW1lbWlpdXFxZWFhdWlpbXFpZWl1Z
-WlpXWltaW1tbW1dWWFlYWVhWV1pbXFtWVlZaWVhWWVtaXF5bWllZVlhYWVtbWVhW
-V1hZWVxaWltbWVtaXFtbXVtbXFpZXVxaXV1bXFtaWltcXVtbWltYVlVWVldYV1xc
-WV1bXF5ZWVpYWVlbW1lbWVlXXWBcWltYVlhaWVpbXFpbW1pZW1xcXF9cV1laWl9c
-WVpXV11eWFZVV1haWlhaWlpZWl1bWlZYWVpYWldWWFpcWltfXl5cWltaVlleW1ta
-XmBgX1xdXVlZXFxdX11cW1pdX2BeXV1gYF9dWlxeXVxbXl1fYF9dXFtdYF9dX15e
-YF9gX15gYV5dX15dXF9dXV9dXFxcWlpZWFpcXlpcX11fXVlbXV1fXlxcXltaXFpZ
-WltbXVtZXFxbWl1eX2BhYF1eYV5cXlxcX2FgXl1gY2RkYGFkaGZhY2JfYWRoZmdn
-ZmRlamdsamhoaGVkY2NhY2BgXlxdYmBhYV5cW1xeZp3E0Nrg5Obn6urq6nV2dHN1
-dHJydHNxcnF1d3l4eHh4dnNxcnVydnl7fXh1d3Nyc3Jyc3V2dnZ0dXl4dnV1dXd0
-d3VxdHd3dnh5d3Z1dXZ5enl3eHh5eHV3eXx9fHp5e3p6e3p2dXh3d3l6fHx7fHh5
-eHl5eHh7eHZ0dnZ3dnZ7fHp2dXN0dXRycnZ2dHV2dHl3dnJ1dXN0c3Jybm5xc3Jw
-b2pscHBvb29xdnBzdHd4dnt4dHV6enl5eHdzdHp1dHh2eHd1eHh3eHl7eHh5dnt9
-eXp4dXd6dXV4eHh2c3JzdXZ2dnV4dXRwc3R1cXFzdHJyc3Fzc3Bzc3Rzb3FydHJy
-cHFxcnJycHByc3Bxcm5vc3FubW1vbnFvcG9wcG9ub29ua21vcW9xbGVqZ2hlYmBg
-YmdpbGxrbGlpZV5cWFZSUlBRUFRWVFBTV1pYXF9gXWJhYWViYWVqbXFzdXJua2pq
-ZWhraGdjZmViY2JfWVpfY2NmZmVlZmZgYGJmZWloZ2tqb3N1dnFramxqbWdnbXNy
-c3FvcHNzcXJycXJ2dnVva25ubGxxc3Vyb25ucHBxdHV2dHV0cXJ0eHZ2ent4eH1+
-fnp+goKBf35+foGEhoqKiomLjY6OkZCOjo6RlJOUl5WSlpWWlpeYmp2XlZOWmZ2b
-m52en6Ohpaiopaempqepqa6urqqsqqqxsrCwsbGysrCtrKyvrKysqaaop6WkqK2v
-qKiopaahoKGmp6mprKmmqamsqKenqqqnoqKkpauqpqOjpaWhm56amJmUinxuZ2Fj
-ZmFiYWRjZGFiZGVjYmJmZWNiYmNgX19fX2BiY2FdY2RkY2NiYGBjYmRhYWRiYmFh
-X15jZWNjYmBiYGFnY2NiYF9gX2BlZWRkYmFgX2JkZmVlY2VjZWJiYGBfYGBfXV1f
-YV9iYWJfYF9fYF1gYGJfX2BhYF9gX19iYGBcWVxbXV1dYF1cW1pdXl5aXFtaWl5d
-XFlYWltaWltaWVhbXFxeX11dW15dX2BfXlxbW1laWllZW1pZW1pbWlpbWl1fW11d
-X19eXV1cW1tcWllXWFhYWl1bWVpYWVpYVFZWVVdbWFpXVlpWVlZYV1dYWlxcWlhZ
-WVtaW1paW1xeW1paWFxcWVlZW1xdXFxcWVdcW1tdXFxdXVxcW1pbW1xeXFxcXl1f
-Xl5dV1dUVllZWltaW1tYWllZW1ldWltZWVxZWFtcXF9cW1xbWVpbXmJeXFpcWlla
-W1dWV1laWVhYWVhZWFdbXVxfX1xbWllZV1dXWFlYWFpbXVhXXF1YWlpVWFtdW1lb
-XVpaW1dYW1tZV1pbW1tYV1laWVlbXVpaXV1cXVxaW1xbXFxcXFxcXFpeXmBdXl9d
-Xl9cW1lbXGBeW1tbXVtaW11cXV9dXV5bXFxbXV9eX11dXl1cWmFeXGBeWlxdW1xZ
-WVtcXFxcX11dXVtbWVxeYF1dXl5eXFtcXF5eXF1cXVtZWF1bXl9cXF5fXV9eXV5h
-YGFhXV5hZGNiYWFkZWJjZWJiYmVlYl5gY2RoaGZlZ2ZkY2VjZGJiYF9fYF5iZF5f
-X1pZWltinsTQ2eDj5ujp6uvsdXZ2dHR0dHVycnF1eHZ0cnN0dXV2dnd4eHd5eXl4
-d3R1dXN2dXVydHJ0c3FzdnV2dnZ0dHZ1dXV2d3d7eXp4d3d2dnd3dnh4dnZ1dXl7
-enp9fnx9eHh7e3p7eHp4enh1fH18fHx8e3t7e3p6eXl4dnJ1dXh6e3l5eXZ1dHRz
-dXR3d3l4dnZ1dnl2dHN2dHVycW5tcXNxb3R1c3RzcHBva3NxdHd7enp5dnZ2d3l4
-d3d2d3Z5eXl5eHd3dXh2eXd2dXNydXZ3eXh2eHZ4dnd6eXh5dnV5dHFzdXd1dnRy
-c3V0c3N0dHR2d3RzdXNzcnJydXN0dW9wcnBwcG9xb25vcnBxb21ub25tbG9tbG9v
-bm5ubXFwbmxtbm1sbG1tbmlnZWVkZ2dqa2tyc3JuaGhhXV1WVE9QT1FPUE1PUVJW
-WVlbXmBiXmBhYF9hZ2hnaWpqam1ubGpsaGVnZWZoZ2ZiYl5cX2FhXl9lbmpnYWFj
-YmJjYmdoaGtwc3BtaWhsa2tsaGlub25wc3JxcG9vbm9xenl1bGxwbnFweHd6dnJv
-cGxrbmtucXN0cG1ycHFydHt/gYCAe3t3en18f4CAgoeCgIKFhYSEiYiPkZGPj4+S
-kZCQlJiXlJeZmpmbm5udnp2dnp+dnZ6foJ6en6Ggo6Wko6Woqaanq6yqq66urbC0
-s7K0tay0r7Cvp6inqaqopKaqqKmurKqlpqOppqGkqqqoqqqpqqioqKikp6qrqKmn
-paimp6OjpKOjoaKamZiXl5KKf29qZ2ZlZmZpZWNjYGFhZWZkZmVhYGBfXWJgYmNj
-YmNlY2VfX2NmZmVnYmNlZGNkYmJmYWRnY2VlY2JeYGJmYWBmY2FhYWJhX2BjYmFk
-Y2JhY2FfY2NiYmNiYF9iX2BgYGFgXV5dYV9gXl5iYF5dXl9gX1tfY2RhXF5eXl5d
-W1teXlxaW19gYV5eXl5eXV9bXVtZW1tcWVhbXVxdXF9bXlxdX1xcXFtdXWBeW1xf
-XFpaXFxaXF1cWVtcW1taW1tdXl1eXFpaWltdXV5cXV1bXV9cWFpYWVtbXFlYV1hZ
-WVNXV1ZZW1laWVtZVlVZWVtbW1xdX1tcW1pdW1tcWVhZWltcWlxYVlhYVVhaXFhY
-W15aWVVXW1xbWlpcWldZXGBeX15dXl1dWVpZWFdWWlxZWVdXWFpcW1hWWFhYVlhZ
-WVpdX1xcX1xdXF9cWltbXF5bWVdaXFxbXVtdXFpbWFlXV1haW1lcXGBfXl1aXlta
-V1dYWVlcXV1aWFlWW1xcXltdW1taWltbWVhaX15dWldYV1hZXF5bW1xbXFlcXlpY
-XF1ZWFpZXF1dXV1dXFxZWVpcXl5fX11cXl9cWlxcXF5eW1tfYFtdXlxdXVxeYl5d
-Xl9gXVpeYGFeXFtaXF5fXF1cXFtbXFpaW1xcXFldW1teYFxbXFxeXlxbX2FhX2Bi
-Yl5eYGBfX11cX11eX19fXl5hXF9gYmNhYGFhX2BgYWJkZmNjZGNiYmJmZ2dnY2Rj
-ZWZoaGhoZmRnZWRkYl9iYF5eYGJfX15eX1teX2Kaw9DZ3uLl6Orq6+x1eXl4d3Z3
-dHR3eHl3c3JycnJxc3R2d3t6eXl0d3h6fXR1d3V3dHd2dXJ0dHNyc3Rzd3V2d3V2
-fHl1dnh5e4B6eHl3dXZ3dXd2dnd3eX57eHt/f3p5eHh4enp8eHt4d3l7en1+fH18
-fHp4eXx5fXt5eXd2dXd6e3h3eHZ4eXZzdHZ2dnRzc3Z8eXl1dXR1dHBxcW9tbm53
-eHV1dHZ0dXV1dnZ4eHp9fX13dXdzd3d0d3VzeXp5eXh5eHN3eHh2d3d2dXl2dnRz
-c3N0dHl0cnZ3eHZ0dXNycXN1dHR1d3d3dnR1dW9wcm5wcW9yc3R0cXJ2c3F0dXNy
-cnJwb3Nwb25vb29vcW9sb3BubW1tb3NwcXBvb25vbm5tamltb2tqamppZWNkZWdo
-bG9xb21sZmJbWVNOTUpQU1BOTlRVWlhbW1xcX2BeW1lbXWFkY2RkYmFna2dkZGds
-bGllZ2hnY2FiYmVlYVxeY2loZ2RiZ2hsZmVkZ2ZpbnFtaGdnZmVqcG1vbWppcHRx
-cm5oamtsb3JxcGxtb3Bscnp6e3Z1dHJzcXFvcHN1cW9tbnJ0dHd5en+DgIB+ent+
-fXl7foCBgoOHiIWHhYmLiY2KiY2OjI2PkJWYlpaUkpaWmpmenpuXnaGio6WgoaGi
-n5+fnZ+hoaGjp6eoqampq6qqrK6sr62ytLKvqqmpqaqoqKquq6qnqKurp6WlpaWl
-pqappaeqrKypqqemqKmoqKirqKmkpKyspqilpKSmpaCgoZ2al5SXk4x8bmhmY2Zm
-ZmZjYmBgXmBjZWRmZ2VhYWFjY2BfXmJmZWRjY2FgY2RlZWZmZWJlZGRlYV9gZ2Vk
-ZWVlZWJiZGNjYGJhYmJjY2FgYV5dZGJhYF5eXV1jX11eXmFkYmNgYGNjYWRhYF5d
-XmFgYGFhYGJhYF5dXl5fXVxcWVlbXV1bXGFgXV9dX19hYlxZW15gXVxdXlxcXFxa
-WVdaXFxdW1ldXV9gXltcX2BhXlxZWl1dV1ZZXF1eYFxbXV1fXVxaWlhaW1pcW1pb
-WVtZXFtfXl9cXFpbW11cW11cXV1aWFhXXFhWVVZaWFlbWlhYWVpZWFhYW11aWVpZ
-XFxaWVlZWFldXF1eXVxZV1dWVlhYWFlaWVZaWVdVWFhXWFhZWVtaW1lZXFxcXFpa
-WlpYWVdbWltYVVdWVlhbWVtaWllZWVtYXF1bW1haXVtbW1tbXFlbXFpXWVpZWlpe
-XV5eXFhaWltbWllcXFxeXFpcXF1bWllZWFdVWFlaWllYV1pYW1lcX1xeXVlXWVhZ
-WVtcX1xbWlpaW11eXFtbWVpcYF1dW1tbXV5dXF1bW1tdXV5dXFxbWlxdXmFeW1lc
-XltfXl1fYF9aXF9fXl9gXV5cX2BhYV9gYGBgX1tcYF1dXWBgXFxcW11eX1xcXFte
-Xl1eXVlbXFxbW11ZXFpcW11dX2FhX1xfYGBdYmRlY2BcXF1hX2BfYGJhYGNiY2Ff
-YWJgYWJgZWNiZGNkY2ZnZGVkZGRmZmdkZWVkZGdnZWNiZGRiYmFhXmBcXlxeXltd
-YGFhaKTF0Nne4+Xo6Ovr6nZ4e3d3d3d0c3J1dnRycXFzcW9xcnFzdHd1dXR1dnh5
-enJxbnJ4dHd3d3l4dHV0dnZ0cnJ0dHd3eXl3d3Z4e3p6enp3enl3dnl4enx5e3t8
-fn58ent8fXl3fHl6eXl3eXV0d3l4eHZ5dnZ8fXx5ent6enh3eHp5eHp4eXl0d3Z2
-cXRxdHR2dnV3dHR1dXV1d3dvbWxrbXJ0dXR0dHV5fHl4enl7e3x5eHl1dHJ0dXl5
-dnR1dnh2eHd2dXZycnR0dXV0c3NxcXJzcnV3dnR0c3J1dnh5d3V0cXNycnJzdHN0
-dHF2dnN0c3VxcnBxdHFzcnVycXNyc3NycXJzcHFxcHFub3J1cXN0c3JwcG5tbG5w
-cG9vbmttbGttbmxubXBsa2ZlZWFiZWhqaGlra2lnYltVUlJQS0xLTlFRUVddWlpX
-WltbW1tbW11hY2FjYmVnaGlpZ2ZnamlqaWlnZmVkYmRmamhjYWJjaGVmZmdoaWxo
-aGlobXRzcW1nZ2lqa25ubXFvam1ybWprZGNoaWptb2tscHBwcHNvdXZ0dXV3dnJx
-cXFzcG5ub3B0dnd5fH17fHZ4eX17d3yEf4CBf4F/gIKEiISIiImLjIuIjI2NkI+M
-kJOSk5eamZ2bnZqXmJ2ioqKeoaKgnqCenJuio6GkpKanp6Omp6qpqKuqra2qra2p
-rKyqq6ioq6qpqKemqaypqqeopKaoqqWkpairqqqsq6ipp6alo6Wjoqeuq6mnpqWi
-oaOnp6iorKmkoJ2Yl5mUin11bWhjZGRiY2RiYF5iZWVnZ2VmY2JgY2JhXmJgY2Nh
-YmBhYGBjYGRlZGVnZWNkZWRnZ2dlZGRkY2JiZWJiYV9hY2FdX2FfX2JdXlteY2Nh
-YGJiYV1eXFxfYWJgXmFiXmBhYV5hX2FfXl9fYGFhYF9eX1xfXl9gYGJfXl5eXV9f
-YF1cXlpcX2FlXFxbXV9fXFtaXF1fYV1bWFtgX1tcXFtaXl9gXl5dXVxbW2FdWVtY
-VllcXVtcXWJgWVlYWVtcXFdeXVxaWVdZW1pZXFxdXVxaW1paWltcWVxZXF9fXllX
-X1xaWVlZWVlZWlpbWVxbWVlaW1paWVhWWF5cWVpZWFlXWFlXWlZYV1ZXVldWWFZX
-W11fYVpaVllZWVdZW1pbXFlWV1RWWlhWVlVYWVZaW1teW1dbWVhXWFpaWltYWlxb
-W1paWVpbYFpaWltcWltaW19cW1hXWltaXF1eX1tXVlhbWVlZWVpZXFpYWVtbW1lY
-V1VYV1lYWFhbXVtcWlhaW1tdXl9bW1pbWlxbXFpYV1hXWF1eXltZXFlYVVZXWllb
-Xl5cWlpbWFxeXl1fXl5dXFtdXV5fXl1dXVtbXmFhYWFfXV1fX2BeXVtdXl5dX11f
-X2JeW1pcXVpcXFxbW1xcW1tcXVtdXV1aWFxaXV1bW1taWltZWVdbXlxeXl9iXlxe
-X15hX2FjX1thYWBgY2BfYmNjY2RhYWNhYl5dXl9hYWNiYmNnZWVoZGNnZWRlaGRi
-YmFjZWdnZGZlY2NiYGBgYV5aW1lbXWFiX19mpcbR2t/j5ujo6uvrdXZ5dnd6eHZz
-cXJ1d3R0dHNwb3JydXN0eHh2eHh2ent3d3N7enV0dXV4dnNxcXJzdXZ1dHF0eHZ1
-dXN2e3t6end5d3d4fXt5ent6eHp8eXx6fHx9fnt6fXp6enx5dXt4d3Z0eHl5eHh4
-d3p9fXh4fXp4eXt5eHl6eH58eHp2eHh4cnN0c3V1cXN0dHN0dnV1cnJwcHN6cHBw
-dXR4dnd4eHd6d3Z6eHh3d3hzdXd3eHl5d3RycnNzc3V1dHR0dXRzdXl0c3RydHN1
-enh1dXl3dnV2d3Z2dnNydnVzdXNzdHZzdXN1dXN0dXZzcG9xcHN1dHR0cHFyc3Jy
-cnBycHBzb2xrcHRzcnFzcnFvbW1sbWtubm1tbW5sa2xsa2hpaWdpaWlkZWhlZ2Vm
-aW5pZGNgXVlUT01NTlBPUE5RU1ZUU1dbXltcXFxeX2BhX2JiY2dmZ2hlbG9xb21u
-bmljYmRkZGRlYmBiY2hlY2VpbWxpa2lmam5yc3Fua21wdHJxbnFycG1ra2loaWdn
-aWRlaGppaWhucnFvb3R3dXJzc3R3dndydHJubGxxcHJ2eHuAfHx9fHd8f3l4en2A
-f4ODhIB+goKDhIWGhYqKiYiJjJCOko+RkJCVm5mZnZuVlZWUnJ6fnZ+enZ2goKKd
-oKGjpKahpKalo6OnqKenq6urra+qqq2rrKqrrauqqKmrqKinpqioqa6qqKSgpKWi
-qKurqKesq6ikpqanpKOlpqipq6ioqaakpaSmpaeno6GgnJiZmZSKgHRqa2hmZGFh
-X19hY2BhY2NlY2BgYWNiYWBcXmhiYWJhYWBhYWJlY2VlZGJhYGBjYGFjYmJfYmFj
-Y2NiYl9hZ2VhXmBhYGJkZV9fYF1fYGJfX1xcXl1gX15gYF9eXl5gYGFdX19mYV5e
-Xl1eX11eX19eXl9fXV9gYGBdXFtbX19eYF9eYFlaXV5eYFpYWVpdWllcXV5eYF5b
-WVxgXFteXFtcXV9fXVxdXV1eXlxYWV1ZWlxeW1tgX11cWVpbWlpdWFdWWFxZWVlZ
-WlpeXV1fXFpbWllXV1hbWFxZXFxbWVpaXFpYWllbW1pYXVpbWlpaWllbWllXV1xZ
-WlpdW1tXVlZTU1ZZVldYVlVWVlhYWFZZWVleXVlXVlVVWllbWFRVVVdXW1xZWVdW
-WFtbXFlYWVtZW1tYWldWWVpZW1taWFlbWlVZWFhbW1tbXFtcWVlZXF1aWlhYVldW
-WVlYWFlWVldZW1tYVllaW1pXV1dWWFlaW1hZWVpaXFlbW1tbW1haWllYWVxfYF5c
-XF1eXlxcWlhYW15cWlpcXl1bXlpXWFtZXGBeW1paW1tZXV9eXF9bXF1dXmBeXl5g
-XVlZXV5fXlxeXlxdYV5fYV5cW19fX15cWltaW1pbYF5hXVpdX1peYF9dYltZW1te
-XV5eXVtdWVlXW1tbW1xcX11dX15dXl5fYGBiYV5iYV9gYWFgYmBfYGBfYV9hY11e
-XmBiYGJlZGNiZGNjZmVmZmVoZ2VkZ2VkZGdlZmhoZmRiY2FgYF9eXlxaXl1gYV9e
-YGiixtLa4OPm5+np6up4dHh1dHZ2eXVyc3V2dnNzc3ZzcHRydHl6enh3eHd2dXp3
-dnR3dXVzdnZzc3JxdXd2d3d2dHN1eXp2d3p6fHd4end2dnh3eXt5d3h3dnN1d3l7
-e3t5eXh5fXt6eHl3d3h1dXh4dnV5eHd3eXp6e3p5fHp5d3d5f313d3d5enl3dnJw
-dXR1dHd2dHJwbXFwcXR4dnZ3dHBxb29vcXR0b3J2c3JyeX14enZ1d3Z1dXd4eHVx
-c3BxcHJ1dXZ2dXZzcnFxc3R0dHV3dXZ3enV4eHt3dXR4d3x2dnZzc3V3d3ZzcXR3
-c3JzdnJxc3NxcHF0c3FxcnJzc3Nyc25tbG1rcG9tbW5wcHR0dHFubW5samxxc29v
-bmttcG9ubGxraGppZmpqamloZWhjZWhnZWNjZGNdV1NRTU1QTk9PU1NRUFJWWVxd
-XVphYF5eYWJkZWNkZGVpaGhrbnJyb2xpaWRiZ2hlY19dX11kZGJkY2RobG1sZmdt
-dHR2cWhncHh1c3V2c3Nvbm1rZ2hsbGlraWtrZ2hra290dHVxdHZ2d3l7eXZ4d3Zx
-cHBvc3RxcG94e3x8fHx6enx3eH9/gH+AgH5+fXyAiIKFhoiKi4aHi4qLj5GRlZKQ
-lZecmZeYlZWVlJabnZ2cnp2cnp6bmp6goaKkpqiloaKjpaipqaipqqmrrKqpq6eo
-p6ioqqqqqqikp6qqpqiqqKikoKGmqK+sqqqqq6mlpqWlp6mppKSnqaaop6mnp6qm
-paeoqKKhnqCfnpuYkoqAdW1nZWFhYGJhYWJgYmFhY19eXl9gYGJnZ29lYmBfYV9h
-YWBhYGNkY2RkZGJiX2FgZGVjY2NgZWFkZ2VkY19iZGVlZGRmX2NjYmJiX11gYmFf
-YGFgX11dYF5fX2FdXF9hYmFhY2BfXmFhYV1fXV1fYGFfXl5dW11dXV1fXV5eXl5h
-YV9fXl1cWlxdW1hXWV1bXFlcXVtdXl1dXl5cXlxbWlpZXF9dXVlZWlpZWFtcXFta
-W1xfYVxYXF5bWVhaWlpYWVpZXFtaWFldXFxeXVxdXFtbWFlaXVxZV1lbXFxZWldX
-WFpaWlhXWFZbXVpbWlpZWllcWVlZXFhXWVhYXFtWVFJUV1deVVVYVVVYW1tcWVdc
-XF1cXlpbWVZXWFhZV1ZYXFxdXVxaXlxaW1lYWFpbWVlaWltaWVtYWVhZWFlaW1ZY
-WltYWVlaWV5cW11dW1leXVxaWlhYVldVW1lYV1hXV1taWldXV1ZbXlxcWVZWV1dW
-VFVYWFtbWlhYV1dcXVxXWFxYVlpcXVxcXF1dX19eXVxZXFpcXlxeYF5fW1lWVlpb
-XV1cW1xbW1tdXFpbXV9eW11eX15cXV1gXV1eX2BcXl9fX19hYVxeXl1bXV1cXV1c
-X1xYW19fXVtbXF1bW1pdW11dXFxeYGBdXV5eXFxdXFtaWFpcW1tdXl1dXmBeXGBd
-YWFgYGJiYGFhX15hXl9fYGFhYGFlZGJgYF5gYmNkYmRjZWJiYWNmaGNjY2RmZGZo
-aGdnZGNlY2JjZF9hXVxdXV9eW15gXF1gbJzE0dnf4+bn6enr6nV1c3R2d3Z2end3
-dXJzc3R1dXV0c3V3dXp2eHFydHZyc3R4d3N1dnh3dXVzc3Jzc3BydHZ1dXV2dnZ4
-end4fHZ8e3l4eXd3d3t2dXd4eXp6d3d2eHd5enp8fXt8fHx6e3h4eXd2eHh7fX16
-eXl3dXh3enx6eXt4d317eHd4eHh5dXZ0dHV1dXRzcHJzb29xcm9ubnBzc3Fvc3By
-cXJ0cnNybnJzc3V4dXZ2dXNycnd2dnVxdnZzcnR4dnJ0d3d0dHN0dHh7e3d4eHh2
-dnZ4dnZ4eHh4d3R5d3Z3d3d2cW90dHR2dXJ0cnJzcnN2d3V0cnJyc3RxcnFvbW5t
-bnN0dnVxcXJwcXFubG9xcnFtbWxubm1ubmpqbW5qa2tnZ2dqaGhlaWVlY2NkZ2Rh
-X2FgXl1XUk9MTEtOUFFTVlNUV1pcXF5dX2JiYF1hYmViY2ZlZ2dlZmpucnFva2po
-Z2lsamVlZ2RiYmRiYmFjZmxua2dna3J4fXJraW1zd3Nxc3FxbGtwcW5oa25vc3Rz
-cG1pam9zdXdxcHB1eHZ3e3d5dHR2eHVyc3Z1c3BucHR2enx8fX18d3Z7fHd7gYeA
-fnt4eXyAgoaFhIeJjIuNhoeMkJSRkpWYmZeXmJaQkpOUlpWZnJqanKKhop6bnJ+c
-n6GipaKgoqOmqqempaWpp6mpp6alpKSjpKOhpqurqaqmqq6vqKakpKOjqKWlqaim
-pqmlpqWlqamqra6tp6inqaepqa2tqKanpKOhpZ+enZmblZOTkIJ3aWZkZmhmY2Vg
-ZGFeXmFfYF1gX15fYWNhY2RiZWFgY2JhXl9eX2FhX19jZGJgYWFiYmNlY2RoZWRj
-Y2JkYWFiZWVpZWNlZGNiYl9fXl1dXGFeXl1fX11gYV5fYl9gYV9hYmFkY19gYF9f
-XF1fYWBgYF5gXmBaW19iX15eXFteXF5hXV5dXV1gX1xZWVpZWlteW1pZW1lcXFxb
-W1lYW15gXVtcYGBeWltgX15bXlpcXlpaW19eXltbW1tcXV5cXmJeWltYWVdZXF1d
-X1xbXV5cX1taXFldW1tXWFpcW19cWlZWXV1ZVlZbWVhZWVlYWVhYWVlZW1ZXW1lW
-WVdXWFdUU1ZaV1lZWldYVlhYW1xcW1dWWFlfXVpbW1pXVlhXW15aXV1bW1pbXFta
-WVlaWFhXXFlaWVpbW1pbXFlXWFxaWVhXXFtbV1hcXFldYF1YW1tbXF1fXVhYV1RW
-WFhUVFdYXl1YWlZYV1ZaXV5eWVlbWlhaWFteW1tbWllXW1pbWFlZW1taWVlZV1ZZ
-WllbWltbWVpYWFpbW15dYV9cW1lZWVVYWFdXW1xaXmBeXFxbXF1dXl1eXlxdXlxb
-W1pcXl9eXFtZW1xfWlxbXV9eX15cX19dX15cXV1cW1paWlxeXFteXl9dXl9gX15c
-XF1cWl1dXF1dXF5eXF1cXF1cXmBdXmFfXFxcYWBgXmBeYV1fYGJgXl1gYGRjY2Jh
-YGFhYWBhYWJjYmFiY2ZnZ2hnZmdoZ2hnaWZlYmFmZWRjYV5fXmBhYl5dWltaXF5s
-msTR29/k5ujp6uzqdHd1cXJzc3d6eXd1dnR1c3Jyc3R0d3d2eXNxb3J3dXR2cnV2
-dHR2eXh1dnR3dXN0dHF0d3V0dXR0eXt5dXZ4dXd6enl6eHR3eXZ2dHd6eXl6dnJ1
-eHt+e3t/fnt/fnx9fnh3d3Z5eXp8fHl6eXl3c3d5e319d3d7e3x7d3p7eXh8eXh5
-dnR2dXNxcnN0cXBwc3BsbHB0dXZzbXJxcHJwbG1ua3J0c3R2d3d2dnV0dXd1eXlz
-cnZ3d3Z2dnZ1dHR1dXl6e3x6dnh2dnp1dHZ1dXV2eXV1cnV2eHZ1dXJzdXZ0c3d4
-dXRzcnFzdnZ1d3d2dnh1c3V1cW90dHFycnZxc21tbnBucHBvcG5ubG5wbWpsbGtq
-ampsbW5qa2xtZ2VkZmRjY2BiZGRnZGBgXl9eWlVRUFFQUVFRUFVZW1xZV1pZXV5f
-YFxgYmFjZWdlZ2ZhYGRmb3V1cGpsa2drbGpoZGxpZWRkX2BkZGZsbGloZGhydXJ0
-cWlsc3V3c3J1c2djaW9wbG1qbm5rcXhwa2xvc3Z2dG5rb3N1eH18dnF0d3V2cnJ0
-cnFwc3FxcHN3eXp8gX9+eXt+f3p8f3x7eXp8gYODhIKDjIuNj46Lio6QkJGSk5WY
-lpSWlI+OkpaVmZiYnZeYmJianp6em5yfnp6dm6ChpKWoqqehpqanqqypqqampqSi
-o6Kkpqemp6SmpqmooJ6gp6mooqGipKSkqKSlpqWioqOkqqupqqypqKyrqamopqWh
-n5ubnpqYmZyYkpGOhHhsZWJiYmJlZmViYl9eX19hYWFeYGFfYmBgYWdjXl9hX2Bg
-YWBgYGBkZWNkY2JfY2JiYWBhYmJlZWJhYGFmY2BeZGRjZGVkZGNhY2JdXFxjX19j
-Xl9fXl1eXmFiX2JhYF9gYWBiYF9iX19aW1xgX15eX15eX11cX15bXFxZW2BfX19e
-XVpbXF1cXl5cW1laXV5dW1hXXGBfXFpaXFhaXl1eX15eXFpdXl9fW1pcXV1ZW1dY
-W11cW1taXF1eXFlaX2NdWFdaXFhZXF1eX11eW1tbXFxbWltbW11eXVpbXl5dXFdZ
-WVtXVllYV1ZXWVhYWFlaWltaWFdXV1lZW1tZWlVWV1VXWFdYWllYVVdXV1lbXl5b
-WVlaWl1eW1haW19bV1lZW1pcXltaWltcW1paW1pbWVpaW1xcXFlbW1pZWlxeW1lc
-WlpaW1paWFhXWlpYWVhaXF1ZW1taWVdXWVdYWFdWV1dZWllbWVxdW1lYWVpbWFZW
-VVhbWVhXW11cXltbWVdYWVpXWFlWWFpYWltbWVxdWlhZWFZaWlxdXFxcXFtbWFtc
-W1pYW1hZXV1cWVlZWl1cX11bXV5fYF1bXWBfW11gXlpdW11gX11eX2BeXlxcXV1c
-XlxbWlpbXVtbWlpaW11fXF5eXVxfXVpbX19fX11eXmBiXl5eXFpbW1pbXV1cXl9e
-Xl5bXV5gYV9eYGFdX19gXl5eYWRiX19hX2BgYWBiYF9mY2RkZmRnZmZnaWpoZ2Zm
-Y2RkYmJiZWJhYGNjYF9fY2ReXVxdYX2bw9Hb3+Tn6Onq6+t2d3V1d3V4endzdHZ3
-d3d2dHNzd3V1eHZ4cG9zdHR0dXV0dHdzdHZ2eHd2dnd3dXR2dnd4eHNzd3Z4end2
-dnZ4d3d5eHh6e3l2dnh4dnh5eXp2enp7e3t9fXt6enx7fHl9fnl4eHp8e3x6fHt6
-fH12eXl5eXd9fXp5fHp5eXd5dXd9end2cnJxc3NxcnJxb25wdXNwcnR1dnFtcXN0
-dHZ5d3R0dHJxcXJzdHZ3eHR0d3Z1d3Rzdnh4dXh2dXV1enp3eHl5dnh6eHZ3dnh5
-dnV2dnZ0c3V1c3RycXBvcXR5dnNxcHZ5dnR0dXR2dXhzdHJ0c3NycXR0c3VxcHFy
-cG5vcnNzdG5ucnFvbmxubWxtbHBtbGtrbGxramptbW5taWhoZmRhZGZnZmRkYF9d
-YFtYVVNTUE5OUlZYWFlbX2JdXFpZWlxeYGBjY2NjZGZmZWRjZG5yc3Fwbm1sa2xv
-Z2RlZmZmZmBfX2BjZmtqZGJnbHNxbnFwb29zdnNvbmxmYWBnbW5ycmpramtua2tp
-bmtudXVybW1wdXd4e3p2dXV1dHFwcHF2eXl4dXR1d3l6ent9e32AfH17eXh5eHl5
-en6AgX+CgoCEiYuOkJWQiY6PkpSRkpOUlJWUlpCUl5aXmZeVmZiXmZiXmJmanqGh
-mZWdoaGko6ippKOkpKWqqqqop6aoqKekpaSjpKOkpKWmpKGjpKSkpKappKCgo6Wl
-paenqqWgpKajpqmpp6inqaWkpqinpKKfn56cm52gnJSSkY+IfXFqZGRkY2RjZWRj
-YGFgYl9kZWJhYWFhYmRlZWdkYmBiYmNhYWJhY2RmYGFfYGFhY19iYmBkYmJhYGBg
-YmhkYWVjYmJhYWNnZWVlZGVeW1xfZWNmYmJiYV1dYF9gXl5eX2BeXVxbXl1bWVte
-X15fXF1gX2BdXVxaXVtbW11aXl1cXFxdXFpaW1pZXlxcXVldXlxaWltZWlxaWV9c
-WFpcW1pbXV1cX15dXlxbXl1cWFtaWVhYWVxbW1tbW1pbXFlZXF1YV1hWV1daW1xb
-WltbW11cXFtdXVxeXmFfW15dXl1cXFpZWVtaW1hYWVZXWVhcWllbWFldXVxbW1td
-WllaWVpZWlpaXVhbV1hXWFdWWFpdXVtYV1hYWltdXWBdW1peWlpcXFlYWV1bWVpZ
-WVpaWVdYWlxcWl1eW1lZWltYWlpYXVxcXFpbW1taWVhcW1xeWltcXFhZWltbWVpb
-WlpaWl1XV1lZVlpZWVhZV1lZWVpbWlhVVllZWVdXWFpbXFhcXVpaWl1aWVtaW1tY
-WFhZVlhaWVlYWV1bXVxbXl9eXV1cWVpcWFhXV1pbXFpaWVxgZGBeXl5dX19fX11b
-XF1eW11eXV1eXltcW11dX11aW1tbW15cW1lcW1tcXFxaWltgW1tbXl1eXV5gW1lb
-YGJdXl5cXWBgXFlbW1lbYF9eXF5fYWBdXVxdXVpdX2JfW11dXmBfXl9gXmBgYF9g
-YGFfYWRiZWNgZGNkZGVlZGVlZWdmZ2ViZWRjY2RgYWJmYV9fX2BdX1tcYGJkh6nD
-0drg5Obo6enq63dyc3V4d3V0c3N0dXR1dHV2dXN0dnN0c3JxcnF0cnR1d3V0dXd5
-dXN2dHV0d3l3c3l5eXZ3eHVzdXR3eHh2eHt5e3t7e319enl4dHp5d3V4dXR0eHd3
-enp7fHp5e3t8e3l6d3l7e3t4enx8e3h6fX18f3x+gnt7enp9fHt5d3l7end2d3h3
-cnFvbnFxcXNycnB3dXRxcHFyc3R3dXN1dnZ2eXd0dHV0c3R1dXR0dXRzeXZ5eHd0
-d3h4dHR4e3d4eXp4dnh3dnl5eXd0dHd1dXl5dnd2eHh3dXJzdHZyc3V0cnJxc3Jz
-dnd2dndzc3Vzc3VzcXR0dHNzc3VvcHBzcHFycnRzcXdzb25tbm1tbWpra3BubG1u
-b25ra2xtbW5ramdlZWdpZmZnYmFhXl1dW1ZVVFFRUVRTVFdYV1hYWVhYWFlcYGBc
-X2JhY2VlZWdnY2FgbHFycHFubWhrbGlmZGRmbGtnZ2ZiZWZkZ2hoZGxvbmxvb2tr
-cXZ3b2poaGJhY21zb21qZGdtcG1tbGxvb3Vzb29xcnRxdnV0cG9wb21xc290d3t8
-e3x5dnR4e3x9fXt8en6AfX16fH15eHh7fX+AgoOEhoiGi42NkI+OjI6OkJKTlZSQ
-kZWWmJeal5SYmpiZm5ibmZmYnJ+goKKcm5yfoaCgo6Ogn6KioqSkp6imp6mpqKep
-qainqKWkp6SkoqSjoJ6fo6Wmp6ejpKOiqqWlpKWopqempqWkpaSkpKaopqWhpaek
-o6ahpKedlZOUlo+BcGpnZmVkYmFjZWVlYmJgaGVlZWhiYmNoaWVkYWNhYmNjY19e
-XWBgYGBhX2BhYl9eXmBiY2BiY2RiXmJjZWJjY2FhX2JjY19gYmJiYmVfYGFhZWVh
-YGBiXl5cXVteW11hYGBfXF9cWlxbXFxcWllZXF1eXGBfXVxaWlpcWltbWldaXFtb
-WFpZW1lbWllbWlpbXFlYXFxbWVlXV1tcW19eXV5cWlxdXlteXl5dXFtbXF5eXmBb
-WFtZXFlZWFdZWlpaWVtZWVVWWVlbXl9gYWBcXl5cW1pbXlxcWVpcXVxZWlpaWVdZ
-W1tbW1lZWlhYWVpZWFhcXVpaW1paWllaWVlcXFtYWFlaW1hdXFhZWFZaWltaV1hZ
-WVlZWVpaW1tZW1dXVlxaWVlXWVlZV1pXWVlXXFlZWFdXVVlaV1RVV1lZWlxaWlha
-WltZW1laWVdZW1pbV1hYWlpZWlleWltcWltZWlxYWFpbWFtYWFZVWF5aV1dZWFdY
-V1lcW1pZWFhWWVhZWVpZWVpbW1paWFtaW1xcW11bWVxdXF5bW15eXF5dW11bWVtd
-XlpbWVlaWlpcW15gXmJfXV5dXV5eXV1dWllaWVtgYV9fW1tdWl5fX11cWltbWlte
-XVpaX15dWlxeXVxcW1tcXV9eXWFhXVxZW1xgXFxfYl1cW1taWlxdXV5cXV1eXV1e
-Xl1bXFxdYmBfXV1dXl1eXl5gYWFkY2FiYmBhZGRjY2NkYl9jZWVjY2NlZGNjY2Vm
-ZmRkZmFeYF5eX1tdXl1eXV5hX2J8q8PR2uDi5ejp6evreHd4eHd2c3F1dHl1dXh4
-eHZ1dXN0dHR0cnR1d3Z2dXZ1dnZ3eHh0c3Z3d3Vzd3Nzc3Z2d3p7eXV0dHR6ent8
-d3d4dnd3eHp4eHZ7ent5eHV2dXV4dnd4eHp4enp5d3l8fHh2e3x8en1/fHd7eHd5
-fX18e3p8eXl3eHp5eH15eHt5dnZ5dXVzcnJxcnFycHJxb3BwcHFxd3RzdnNzdHV1
-dXZ4eXh5d3V1dXR2dHRzdHR2eHp3eXl5eXl1cnZ5d3h2enp5d3d3d3dzd3V1eHp1
-dHd2dXR0dnZ2dHZ2d3V1dXVycXNwcnJxdHZ0dHNxcXV2dXFxcnFxb3BwcXFwcHFy
-cnZ1cnNycnFxb29samtrbG1qbHJyb21rbm1rbGxrbWxraGVmZWVlZ2ZjYWNdW11e
-XFdYV1RTVlNVWFpaXFtZWVlYW1teW1tdX19iYmRnZWFdXWNpbW1ubWloamtqZmVl
-Zmhsam1qaWVoYmVmYmJjZ2ltcW5tbGxxdXFoZ2dnZmlucXBwamZna25ub21tbG91
-cW5qbnZ5fXVxbWtraWlpZ2tzdnZ1eHVzd3Z3e3t8fXx7fHh4fH17e3p9fn17enyA
-fX6CgYaIhYqNioqMi46Nio+RkJKUk5WWkpKVlpiXlZaXmJmdnZqampucnJ2eoaOk
-oqGgpqOfoZufoJ+ho6Ojoqaop6WnrayqqqKio6OjoaWko6KenZ6jpKqpp6impaSh
-o6Oipqilp6SkpailoqWjo6ehoqGipaanpKKfnp6bl5iWjYN0amdoZWJhYmNlZWJh
-ZWNhYmNhZGRkZWNfY2FeYGJjYWNhY2BhY2RgYWBgZGJgX19fYGBiYmFhYGdoYmFg
-YGFiYl9fY2RiY2FhYWJnZmRgYGNjYWFeXl9hXl1eYF9fW15bXF5eXFxeXV1aWl1f
-XV5dXmBfXV9fXVlbW1lYWVpcXVxZW1pbWVhdXVtaW1xfXlxbW1pbWFlcXF1aWFta
-XFxZW1pcW1pbXV1bW1xcXV1dWVleXl9eWWBfX1pcXlxaW1hZWFZXWFdXV1lZWl1d
-XV5eXFlaW1tcXllaWlxaXFtZWltaWFhZWlxeWFlaWFZYVldYWlpaW1lXVVhZWFhc
-WVpaWFhWWFhYWlxaWVlWVldYWVpaW1lbWlpaXFtbW1taWVhWWVpYVllWV1dWWFpZ
-WllYWFdXV1hSVlhVVVdZWVhbW1lZV1laWltaV1laV1xdWV1cWltcW1dXV1lbWVlY
-WVlYWVlaW1pbW1pZW1lYWVdTVlRXWFdZXVpYW1tbW1xZWlpdXl1cX1paXFxbXFpf
-WltcXFxaXFtaWltbXGBeXVxcXVtZWFhbW1xaV1ZZW15cW1daXVpbXl9fYV9dXF1e
-XVxbWl5gXmFeW1xfXFtcX19cXl5cXF5gW1tbW15dXF5eXltYXl1fX2FgYF5dW1pc
-XVxiYltcXFxdXlxaWFtdW1tbW1xhXl1dXltcXVtaXF5eXl9fYWFgYmNjX2BgYWFj
-Y2NiZmJkZmNiYWNkY2VkZmhnZmVnaGVnZmVmY2RgXmFhZGFgYF5cXV1eYXGjw9Ha
-3+Lm6Orq6+t1eHd2d3VwcXJ1eXl3eXl0dHZ3dnRycnNyc3V2dHZ1dnNzdXVzdHh5
-endzdHN2dXN1dXV1eXl6eHVyd3h8e3d2dHd6e3h6eXl5dnp8e3l4dnN2dnd3eHZ0
-enl1dnh4enx9eHl5dHd1ent8fnl5eXp7e3t5d3V2dnZ2eXx6eXt5e3d0dnd2c3Fz
-dnRxcXBwc3Jvbm5xcHNxb3FydHRydXV3dnt4dnZ3dHd5eHNxe3x5dnZzdXh5d3h2
-enh2dnh0dnh7eXh4enh3dnp5dnV0eHh6eXl4dXR1eXh2dnJ1dXRzdHVycXJxdHRz
-c3JzcXVydHNyb3Z0dnFxc3Fzd3FwbnJ1c3VycXJycXFxb29ycHFvbm5ub29wcWxr
-bWxpaWpvbWpoZ2VlaGdnZmdgYGBgYGBeXFpbXFxdXl9jZGNfXF5hXV5cWVtZW1la
-X2BiYmJfXFlcY2psbG1vbGdqa2lkYWVraWhqcXBqa2dpZWNjY2RnbHFxa2tucXd1
-dG9sbGhlZmtvcG5qaGptcW5pbWpqcXJybnF1cnNyb2tqamdoZ2lqbnBzcnFwcnN2
-dHh3dXl9fHl8dnZ6enl7gnp4eHl5fH16foCBg4OCg4iFhIeIh4mKio2PkZGRkpCP
-jo+SlJaWlpeZl5mbmpibmpiXmp6do6Kho6Gjn6Chn56go6ano6KhoqSjoKGkp6qj
-o6SkpKOmpqWnpJ+en6KioaOjp6aop6WjpKOnqKejoKepqKOipamnpKOio6KgoqWl
-oZ2doJ+bl5WMgnVsZ2dgYWNjZmFfYmFhYWBfYGFgZ2ViYWhjZmJiYl9gY2FhYWFh
-YV9dXmJjYmNfZGNiYmBiYF5fXl9jYmFjYWNhYV1fYWFfYWJfZGRkZWRjYWJeX2Ff
-Xl9fX2FhYmJcW1tfYWBeXl9fYGFhYF9gYV9gXl1cXFxcWltaWFpaXF5gX1xdW1hY
-WVtbXFxbXGBfYF9dXFpZXF1bX1xgZWFeXlxcWl5fZWBcWltcXV1eX1xdYF1dXl9g
-YV1cX1xaWFlaW1paWllZW1lWV1lZWFdaW1lZWFdXXFtZXFpcWVhZWVpZWlVXV1dZ
-WFhXV1hXVVZYWVdYV1lYWlZZWVxZW1tXVFVXVFhbWVZWWVlYV1hZXFtcWlpbXFpb
-XFpaW1tZWVhZWlZVV1hYWltaVllaWlhZWllXV1dYWl5ZWFhYV1dYW1haWlhZWFhZ
-WVlZV1lZWlhZW1taWVtaWFdaW1hYWFdZVlhZV1lXV1leWl1aWFdaV1pZV1hYVldX
-WlhYWVpcW1tZWVlYW1xdXFlZWltaWVtYWFpXV1dbXFpdXF1aXFxdW1teXVlZWV5g
-Xl9cWVtcXVtaW1hWWVlbXmBgXl5aW2BhX1xbW15cXF1eW1xcWltdX1tcXWBfX2Fe
-X15eXWJdWVtZXF9eX1xcXFtcXl1eWlpeX19dXFxcXFteWlpYWVlbWllaXF1bXF1d
-Xl5dXV1cXFxbXmFeXV9hY2NiXGFjYmFiYWFeYWNjZGNjY2RjYmRmaWlkYmZnZ2Rj
-ZGVjZGZjYWFgXFxfYF1eX11fcZ/E0dvg4+Xo6urq63V0dnp3dnp4dnVydXRzc3Nz
-dnd1cnV0cnR1dXZ3e3l5d3V0d3d3eXh2d3l6eHV1c3V3dXh5eXd5end2d3d3dHV1
-d3d3eHt2dnd8e3l5d3p4eXt8fHt4eHl6enl5e3l+e3l5dnV4d3Z1dnl5e3p2d3l4
-enp5d3x5eHp7enl2eXd2d3V0dXB0dHNzb3BucXFxcnFvcnFxcXFscHFycG5wdXp5
-dnV2dnd6eHh5enx4eH54dnZ3d3Z7eXh3d3NzdXV3d3l3dXN1dXh0c3R0c3N3enZz
-cnV3d3Z2cnNxbnN1cW9wdHJzdnR0cW9zcXR2dnF0cnZ1cHNyb29yd3d4dXR0dHNz
-c3NvcHBycm1uam1wbW5sbWxsbG5ra21ub3FtamtoaGdkZmZqZmRlYWBhYGNhYGFj
-Y2RnbG9ycXRycXRyc2xqZWJfXVxeWllcX2FdW1ZUVFpeaWxxdHBvbGtra2pjY2dn
-aG1xbWtqZmRiZWRiZGlrbHJzb21scW5scWxoZGNqa21tbWtvbnJxcGlqbHBxcnNx
-cXFxb2tra29wcWtpbHBwc3NydHJxcG9vc3FxdHh5eXt7fXx4dnZ5enx5enx8gYB9
-fYGChYODg4OChoyOkZOQkZGSkZKQjpGSkZCRkpSUlJebmZmUlJqam5uZm5ufoKCg
-n52dnqGioqShoJ+kpqShoaSgnp+hoKOhoaKipaWjoKCipKSlo6SioqGjpaakpKKj
-pKSoqamrp6alqKenpKOioaOmpp+dnKOinZugn5qWkoyAd2tlZWhmY2VkYmBhY2Vi
-YWJfXWhkYmBhX2BhYmZhYmhiY2JhXV1iYWFfX2FgXWBhY2JhYmBiYWFgY2FhY2Ji
-YV9jZGBhYmBiZmVkZGVjZGZlYmBhYWBiYWFmYmFhZGRcXV9dYF5iY2JdYGRjY1pf
-X11fYmBeXF1cXV1dWllbXGBgW1laXVpYWVpZXl9eW11eX2FeXV1dWllYWFhcXFpa
-WVxbWl1gXl1aWlpaW11fYWVlXlpcXFxcWVZbW1tbWlhZWVtaXlxbWVhZWFlWXFtc
-W1taW19cWlpbXVtcWFhVVlZZXVpcW1ZXWVZYVlhVVFhXV1tZV1dYV1RaWVdWWFxb
-WFlXVFVXV1VUU1RYW11bV1laWltcXVhYWl1ZWltaWVlYV1VXWV1bXlpZWVxZWlhX
-WVhVVlpbW1lZWFlWWVlcWltaWltYWVpaWlxYV1lZWFhYVlhYWFdVVlhdYl1bWFdY
-V1hbW1hYWWJWVVZeXVhaWVteXFhYWVlYWVhZWltdX1RcXl1ZWFpbV1lbW1tbXVlc
-XFpcW1paWllZWVZWWFteYF5aWlhXWVpbWlxbW1leXFlZWVxaXVxbXF1bW1pZWFpb
-W1tbXF9cXF1bXFtcXF1dWlteX19cXF9eX2FjYV9dW1lbWlxbXF1ZXVxbXV9eWlha
-XF1dXFtdXFxbXV1eXVtbW15fXF9hXV9gXVxeXl5cWlxfYGBhYl9hXmJiYWBeYF9i
-Yl5eYGFiY2JkaGdqaGhtbG1raGZnZWVjY2dkYmNiYV9hYF9hY15cXV9xosLR2d/j
-5ujq6uzsdnR1dnd1dXZycXFwdXh1dXZ3dnVzc3Z1cnJzdHZ2dHZ8dnh2dHZ1dHRz
-enh6enh2dXV1d3d2eHt7eHZ3d3V1d3Z5d3V4d3h3eHt/fnl5d3p6fHp4eHx7enl6
-fH17e3x9fHt4enl4d3NzdXp+e3p8fXh2eHl4eXl5eHh4eXl7eXh5eXh3dXF1dXFz
-cnJxcnNydHFxb3F1cW9vb3FzeXd1dHR3dHd5dXZ7eXt8e3hzdHh5d3p5eHp6eXh7
-gnp1c3J3d3t5d3d0dXVydHJydHVydnV1dXZ3dnBvcHR0cnV2dHJxc3Z1dXVzc3Nz
-cnNycXJ3dXZ4c3BycnN1eXZ0c3Nxc3NzdHNvbnFvcHB0cHBvcW5sbG5tbGtvbnBs
-bW9sa2trZ2hoZmZmY2FfYGBhYmVoam1xdHZ6fHx+gIB+fH1+d3JxcG1mZWBdXV5Z
-WllWUlJSV15lbXJ1b25ubWtnZ2VjYGBka2pqa2hlYmJlZmVoaWpqbm9tbW1tbG1p
-amNiZWZpam1qbXN4dXFrampxbW9tamlpaWtwc3VycnFyb2xtbG9vb3F2d3Vwb3Bz
-dHN0dnyBfoGBeXd2eXt5fH9+e32BhIB/f3+CioiGiYeLjY2Qk5KNj5GRkpKRjo6O
-kI+RkpKVlZWUlZaXmpmYl5WYm5ybmJman5ufoKGhn5uZnp2ipKChpKahpKGenaCh
-o6GloqKfoaCko6ClpKGioKSnqaeioqWjpaWmpqalo6SmqailpqSjoaiknZygoqae
-n52ZlJaTkYh4bGZlZ2ZjYmFhYGJkZmNgYWRlYmNhX15fYGJkYWNkYmBiZWJfYGBf
-X2BfYGFjYmNjYmJgYF5fX15fYV9hY2FjYWBhZWJiYF9iYWBjZGJiYGFgYmFhXl1a
-Xl9hXl5gX2FgXV9eYmBgYmNkZWVjYFlaW15fXl1cW11eXltXW19hXFxdWVlfXl9f
-XmFfXltaW19eXV1cXF1bXFxZV1lcXl1dWVlaXFpcXV9bXVxbXF5hYF5cXlxbXVlX
-WFpbX11cWlxXV1xcXFpbWldZWlpaWlpcXVxdXl1aW1tbWVlXWVpZWVhcX2FeXFtZ
-VlRVVVVWVlZWWVlaWVxcWlhUWFdVV1lZVVhXWFlYVlRUVFVXV1hYWFtdXFtZWllb
-WllaXFlXVlhbWlhZW1hYWFlZV1pZWlpaWlxcWFlcXFtXVldWWFlZW1tZWFdYWVlZ
-W1pYWVpaXFxYWVlZWVhaWVpYWllYVVdaVVlYWltVV1pYWV1bWFZWVlhYW1lYWFlW
-WFhaW1xdXVpZWlpbWltcXl1dXl5cX15cXFpbX1xaXllYWlhWWFtbXV1dXVtaXFlc
-XmBfXlxcW1xZWlpYXFxdXV1cXVtaXV1eXlxcXl1bXl1aW1xcXFtdW1pZW1pdYV1c
-XF1cWVdaW15fXV1cXVxbWl1bWltYWlpbXl1cXV5gXVtbXV5fXltcYWBeW19eXFxc
-XWBeXlxaXWBhYV5eXmNfYGVjYmRhYmNkYWNiY2RkZmNnZmlqbGtsa2lnZmZmYmJg
-Y2JgYV9hYWFgY2BcXFxaXWuhw9DZ3uPl5+nq6+t3eHRzdXR3dnJwcnFycHF2eXd2
-c3V0dXV0dHJyc3B0dnZ3dnR5dXN2dnh3dnZ1dHR2dHh3eHh4eX18e3h1dnZ3d3Z3
-dXt5e3h7eXx9e316e3p6fHl6e356eXZ7fHx+fnx7fHl5ent5eXZ2fHt7fXx6enp4
-eXl4ent5eXh5eHl7eXh3dHZ1cHFvcXB0c3Fxc3N0dHNvbnNxb3FwcnR3eHd0dHJ4
-dXRzc3Z5e3p2dHd2dnZ2eHt5dnd3eHh6dnp7enh4d3t4eHd1cnR0dHR0c3R1dXR1
-c3Nxc3Jzc3N0cXJvb3J2dXFxcXJ2dXV2c3JzcnBvc3J1dHV2d3l3dXVwcnVzdXV0
-cnJxcHBxdHFxbm5sb21tbG1ubWxsbm5tamlpamlqaWxpamdlY19gY2hqbGx2e3x+
-g4OEhoeKiIeFg36Af39+fHVrZmFhXVhVU1NRU1VZXGBnbW1ubW1tamVjZWJgY2Zp
-amhmYWRpZmRlZ2lnZmtua2tsbmtqcGpmZGVpamxramlub3Nybmhpa2xsamppZ2lp
-a3BwcHJzcXNzcW5scG1zcm9xbXN0cnJxdHl7fn59gX9+fHl4enp4fHt0en6EgXp8
-fH6DhouKiYuOjo+OjY2Tk5GRkZSQlZOTkpSWmpqVlJaWmZeYlpWYlZOWnJmbmZeZ
-nZydoKCdnZman56cn6KioKKlpaShoJ+enKCgn6Chn6SkpKShoqCgp6SjpqWjpaSj
-oKSlpKKloaOppqikoqGjo6epoZ+eoKGdm5qbl5OQiHdsaWlnY2JgX19fYWBhZGJj
-YWBgYGBgYV5fYmBgYmFeX2BkZ2ZiX19dW2JkZGJhYmJhYGBeX2BgY2JhYWJlZGFg
-YGBiYmVjX2BjY2FhZWJgX11iYGFkYWFfYWBgX19iX19iXlxgXl5eYGFhX15jYl1f
-XF5eX11cX1xdXmBdXl5cXF5dW1xdX2FeWVpYWlxbW1xcYF1aW15fX1xaWlpWWV5d
-XV5cX1taXVxaWlpXWVtaXlxcW1peXVhaW1pbWlxcXFtbWVxcW1pdWVpbWFhcW1xe
-XFtdW1xcWllbWldaW1teXVhZW1tdW1hYWFlYWVhXV1daV1pYWFxaV1lXWFhVWFpV
-WVtbWFZWWFhTVFRTV1RTVFdZWllcXVtXWFhYVVhbV1paWldYW1hYXFhYWltdWlta
-W1haV1ZWVlZUVlZaXlpaV1hXV1lZVlZYW1lYWVhYV1laWVVXWltZWFdZWVhWVFVZ
-WlxXWVpZWVlXWFZXW1lZVldaW1tWVFRbWllaWVlZWFZYWVVYXF5dWl5gX2BgW1te
-XFlaWVdWWlpZWlhZWVlaX15bX11cXFxcXl1cXVtcXFtaWltaW1pcXF1eXVpaW1xb
-XVlaW1tYXVxdXFxcXVteXl5dXFtcWlhYWldcXF5gW1tbXF5cWVtdXl5bWlxcWVtd
-XV1bXl5dW1pdWl1dXV1fXVtbYWBdYWBhYGBgXl5fX19iYF9gYWFeYGBfYGBhYGBh
-ZGNkZWVnZWloaWtpZWNoaWVlY2RjYWBhY2JkY2FjX2BfXGBeXV1ca6HF0dnf4+Xn
-6uvr63N5eHl1dnN1dnN0cG5vcnR2d3Z1c3Nzdnd1c3Z0cnVxdXZ2dXV2dnV0cHFz
-eHh6d3V3eHh4dnd5eXd2dXZ1cnJxcnR4enl5eXl4d3h3eXp5eXd3d3h7fXx6fXx4
-dnh7eXp8eXx8e3x5eXd3enh6d3d4eXp6fXp5d3V4eHd6eHt7eHZ0cnR3dnNvb3N1
-dHNzcG5wcHBucW5wb3FucHR0d3d0cnR6d3V0c3Z5eHd3dnZ3dXd3eXp5fHd1dXl6
-e3dydX13eHV2eHp3dXd0dnJzdXd2c3ZxcnZ0dHJzdHJycm9scHd2cHFyc3N3dHN0
-dnRzcnRydXR3d3d5e3h3dXRzdXV0cnJ1d3V2cXJ0cG5tbHBtbWtub29va2xwcXFs
-bmtnaW1ubGtnaWdkY2RjZ3BzdXx+gYGFiIiNi4mJiImJhIaIiIeBenNqY11XVFRS
-Tk5PVlhcYmhucG9vb2xmYmVoY2JlZ2hoZ2JiZ21qY2RmaWlpa2xsbWlqbWxsbWVm
-aW9xbWhra21yb2xpZ2dnaWprZmdobG1tcHBycnN3c3VwbXBzdHBubXNxdnZ1c3Jy
-cXN2fHx9fH15eXV5e3d7eHp4fn9/gH5/fX+ChoeFiYqKjY6OkY+Pj5KTk5SVlJeV
-lJaVmJqWk5SXlZmWmZeVlJaamJiXm52dnJ2dnp2cmJqamp2goZ+goqWlqKaknp6f
-oJ+fn6GhoqSkpKKgn6WioaGmpaWgoqWkqKKioaOnpaOkpaKfoKCgpKOjnZqYnpuZ
-mZaSk5KGd2tlZWVkZWRfYGFlZWViYmNiYmJhYWFhX2JfXmBhZGRiYGBhY2NkY19k
-YWFgY2RiYWNhYGFhYV9iY2NgYmJiYGFjZGRjYmRiYWNiYV9iXmBjYmBfYmFeXVxg
-Xl5iX2BiYmJgXV9eXF1eYF1bXmRjYl5eXV5gYV1aXF9fXWBgXFtdW19fYF1gW1ta
-W2FdXFxcW1xdWltcW11dXF5dW1pZWlpfXlxdXF9eYWFcWVtaWVdaW1tdXl1fXl1a
-WV9dXFphYl5bW1laXVpdXFxcXFxbWltcW1VXWVxcXF5cXVtaWVpXWlhZWVlZV1ha
-WFdVVVhaW1hbWltWV1tXWFtZXltaWVpaWVlWV1pZWFdWWltWVldWVVhYWVhZWFlX
-WlhXV1daWFtbXFpYV1hXV1ZYWVlYV1xZVlZVVFlYV1ZVV1laWFpYVVRWVlhXWFdW
-V1hZWFpbXFpXWVhbXFlYWllZVVFTVlVVVlhYWlhbWVRSVltbWVdZV1dZWVtaWVZY
-WVhWV1hWVldWWFpaXF9dW1xbW1lZWVlaWllaW1laWFdaXFpbW1laWlxcWVpaWl5h
-YV9aWlxcXlxaXVteXFpaXFxhXVtdW15bWVpcXFpdXVxcXlxeXV5eX19dW1tdXlxa
-WF1gXF1aXVxZWl9cW1xbXFxbWFhaXlxeW1taXl5fYVxcW11hXFxcW1xcX19fXV5e
-XVxeXV5eXl5gYF1gXmFfYV1gYGFgX2RhY2NkZGVkZmVoaWlpZGJkZmRkZmdkY2Ji
-YmFfYF5eX19fXl5gX2VsocTR2t/i5efp6evqeHl4eHd2dnR3dnd1dHN1dXZzeHl1
-dXV1dnl2dXd1c3F0c3J0c3FzdHV0enh5dXZ1dnV2dHV0dHd5eXd1c3R0dnp4dnl0
-cnd7eHh5d3Z4eXp6eXx6e3p8fXx7fHd2dnl8eXt8fnt6eHh5eXt4dnh5eHl5eHh4
-eXh3c3Z5eXp6fHl7c3V2eXt0c3Fwc3d4d3NxcHJxcHJyb3JzcG9wcnNzc3Nxc3V7
-fHh1dHR3d3h1dXV3dHd3dnh4eXd4eXl5dnh4eHl3dXh3d3Z1dnd5d3V4enZ4d3Z1
-dnV4dXZ3dnVxcnNycnh4dXZ2dHN0dnZ2cnJ0cHV1d3V0dXR0eHd2dHF0dHVzcnZ1
-dXVyb3FydXNyc3NxcGxsbG1rbG1ucW5ua25sbG1qbGpmZWNiZmlscXZ8foCAgIKJ
-jIyLiIaHh4eHiYqNkomDfXVtZFtaVFFQUU9PVV1iY2pvcnRzamJjZGlpZWVlbG1n
-Y2RpaGVhY2RnbG5ubGpqamhpbGpqaGdscnBsaGlra2lqa2doa2hqaWdoZ2lrbGtr
-bXBzc3JycG1xc3Jvb3F3dnlzcHFvcXR1c3J1fHt4enh3dXN4d3h8fH58fX2AhIOC
-hISGiIiJiYmJjI2Oj46SmJKRkpOWk5aVk5SXl5SVlpiXl5mZmZaYmJmXlZqZmpue
-m5ubnZuamZ2dnaGgmpueoKKkpKagoKGhn6GhoqGhpaWmoaCioqKgoqKlo6Omo6Sf
-oqKio6SnpqOgoKGhoaSjn6SinZ2dmpaVlZOTk4l6bmdkY2VlY2BhYmNlZ2FeX2Fk
-ZGBgYWBgYGFgYmJjZF9iYF9hY11eYGJfX2BgYmVjYGFlYV5gZGBkYWVlYWFkY2Vh
-YF9gYmFiYWBeXl9fYWVjX2BhYmBhWltfYmRhXmBhYGFcW11eXl1dWlphYl5gW1xf
-Yl9iX15fX1xfXF1eWVlbXV5dXlxcWVxbYF1bW1hXW1xbXV1cXl9eXV5cXVxcXVlY
-W1xgX2FfYF9dXl1ZV1ZWV1pdXl1fXVtZW1xdXV1gX11ZWldXWltdXF5bWltbWlxY
-WllaWFpaWllYWFhbXVpcW1pZWldXVVdWWFZWVltbWlpYVlVWWV5ZW1laW1pZWlpW
-VVZZWldaWVZUWFlZWVtaWVxaV1dVV1dXWFdVWVpaW1tbWldXWVdaWVlYV1hZWVpa
-WFdZVVVWV1lYWFpYWFZWVFNWWlxaWVlcXFlbWWBdWllXWlhWWVlXV1dYXFpYVVNX
-WFleW1lYV1dZWFdYVVVXV1hWWFtbWFpaWltbWlpaW1xbW1xcWltaWFpaWlpcXF5d
-WllcWVdbWVlcWlpXWlpcW1pZWVlbXF1dWlpbW1pbXF1dYl1dYWBeX2FgX11eYGBf
-XFtfXFxbWVpcXF9eXV1bXl1fXVpaXV1fXFtcV1dYV1hXXltZWltcXVlaXVxaXFte
-XVxeXFxcXF5eX11hYl5gYV5gX1xcXV1eXWBhYF9eYmFgX19jYF5jYF1fYV9hZGVj
-Y2NiZWVmZ2dlZ2drZ2RmZmNjYmZiXl5gYmRiZmBfXmBeX2NhYm2ewtDY3+Pl6Onq
-6+x4dXV0dHZ4d3V8eHZ5d3l3dHN0dnN1eXl5fX15eXRzdHBxcXNzcnd4eH15eHV0
-cnZ4dnZ5dXZ1c3Z4dnZ2dnl4d3p5dXd3d3Z4eHx6e3p7e3t7eHh8f3t9fHx7eX15
-eHh4eXx8enh4eHt7eXp8e3l4eXx7eHl5eHl5d3V4ent6eHh2d3Z4eHd6dXNzc3Bw
-cHBwcXJxcnNxb29vb29ucHBwcHN3d3h4dXd3eHd3eXp8e3Z2eXp4dnl6eXh4eHh9
-enZ4fX55eHNzd3V0c3R5dnR1dXd4dXd3eHZ0d3d1cnF1enZ2dnd5dnVzdXV2c3N0
-c3h3dnZzc3R1dHlzdXRzdXVzdXZ2dnRydXZ0cXNzc3JvbW5tbnBxcW9sb2xra21t
-bWxsbGtvamlkaGhqbG9xd3l5e32BhYeJh4iHhYaGiIqIiYuNi4aDfXZsZmFYV1NN
-TlFVW2Fqa2ttbmpjZGVnbGdiYmlqamdjZmdmZWNiZ2hpbnJ0bWtqbm5xcnNuc3Fx
-bWlna25uaWprbmxnamllZWVpam1pa2tpZ2pvbW5xcnVzdHh5dXFvamppanBydHRx
-b3V4eHZ3d3V0eHt3dXl+fnt5eHyBg4OBhIaJiYqMiouNjYmHiouNjo6RkZiWlZOO
-jpKXl5aVlpaWmJqal5aWl5aZm52bm5yamZucoJudnp+iop+cm52dpKSopqSioaSh
-np2foqKfnZ6hoqOinp2ioqGho6WooqKkoaCipKKjoqKmpqOhn6Kgo6Sin5ubmZyZ
-lZaVjX1uaGllZGRiYGJiYmNlY2BgY19fYGBjY2NgXl9dX2JlZF5cXV5gY2BiX19e
-Xl5gYF1cXV9fXmBiZWJiYmBgYmRiYF5dYWBgYWJgX2FhX2NhYGRkY2JfYmFeX2Bh
-YmFfYGNmY2BdX2JhYFtaW1xfYF5eXV1fX15dXl1dX15eW15bXVtgX19fXFdYWV5d
-Xl5aXVxcXVxbW1teX19bYF9fXFlZW1pZW19cX2FhYGFfX1pbWllYWlpbXF5dW1lc
-XltbWltaWVxaXFlYWlpdXV9dW1pWWVlbWVdaWVlZVlZZWFlaXFxZVlZXXFtaXlpU
-VlVYWFlZWVpbW1lWV1laXVtYWFlYWFlWV1VXV1dZVlhaWllZWFlZWlxaW1taWVtb
-WVlbXFpZXF1cW1pbWllaWVtaXFpaWVlcWlpZWllaW1hbWVhXWFhcWFZUVVdWV1lY
-WllbWFlZWFlaWVhYV1RWVlVXWFhXV1VZW1lbWlpbV1hXVlhTVVdXVVVVVlZUV1lY
-WFlXV1laW19bW1pYV1lcW1laWFlcW1taVlRTVFdbXV1ZWlpdX1hcX1tbXFtbXF9h
-YFlbXllaXVpZXl9hYF5dX11bXVxcW1teXFxcWVteXVxbWldZWVpbWltaVldYXF1c
-W1lZWltaV1lZXV9dWVheXF5eXFxcWVlcXl1dXFxcXF1fXl9fX2BgXl5dXl1fXlxc
-X15gYGBfXWBgX2BgZWBfXV5eYmJiY2JfXl5iZGRlaGVkZmhpZmVlZGVjY2ZjYGBl
-YmJhX15fX2BhY2Jja5zC0Nne4uXn6Onr63F2d3Z1dnd0dHp3c3d3c3V0dHd0dXV0
-dnN2d3l2dHRwb3FwdHJzcnJ1dXV2eHh2dXd1d3l3dXZ2d3Z2dnh5eHp8e3t4eXl5
-d3d2dXd7eXp8fHx4e3x5enp2eXd6fXp7enh3eXp4d3h7fHt8e359e3l4enp7enl5
-fHp7d3p7eHVzdnl4d3h3d3d0dnVzd3Ryb3BwcXJ0dHNyb3FscHBxc3N4eHV1d3R1
-dnh3d3V2dHV5d3d2eXp7fXx6dnZ6fHx2dnh7e3p3dnV3dnRzc3V3dnR1dXNybXF5
-eHZ4d3Nwc3Z3e3h1c3N1d3R0dnVydXJxc3Z1cW9vcHJydXh0cHJzcnJyc3JycHNz
-dXV1dHRwb29wa2tucXBvbW1ubWxpbG1tbGxpamxpZmlkZGZqa2xub3R1d32ChYeI
-hYOChIWEioqKjIySjoeCfnlxZ2RcVlRTUlddXV9iYWFiYGJja3JuZmJna2xoaWVj
-aGdkZ2lqamxvbmxqZ2xtcHJzdXZvcXFtaWlrbmtpbGpvcWxnZ2hqaWlmaWplZGVm
-bGxwcnF2dnd1eHhzcXRubWxqb3Nyc29zdnd8enp5dnZ2e3p5gYKAf3p7eX1/eXx/
-hYaHhoqJiYyOjY2IiYmKj5GSk5KVkZCQkJKTkpKVlpaXm52clpaUmp2coJuenZyY
-nKGhoJ+io6WfnJycnaGioqKgo6WioaCeoqCfnpudnp+ipqOioqOlp6ilpqanpaGf
-oJ6ioaKhoqKjpaSkqqagoKOboaGkn5qblI6JfG1lYmRmYmVmZGVlYWFoamRhX2Bi
-ZWVkZF9fYV1fYGJgXVtgXWBhXVxhX15dXl5cXl5fXV9fX2BiYWRmY2ZlZWJjY15f
-X2FhYmNjZmRkYGNhYGFiYWJhX2BhYF1kX11iY2NlZWBeXl9gX1xfXWFhYF1bW1tc
-XV1fXVpcXVtcXF1bWltbXF1bXV1eZFpZXFxgYF1eXVxbWltcW1pcXVtbXVxaXV5f
-XV5dY19eX11bWlVVWVhZWVxfXltcW1pZXV5aV1hYWVxcXFpbXVxZV1hXWVlXWFlZ
-W1hYW1xbW1tcWFlbV1haWlhZXV5aW1xaWFZXWVhWWFlZVVNWWVdXWlhaWVhYV1hY
-U1ZZW1tZW1pXWFpZWVhZWVdYWVpaWldaWVpcWldXVlhXWFlbWFlYWFpaWVxZV1ZY
-WVxcWVlYWllWV1hZWlpaWVVWWlpXWFlaXVxZVlVVVVdaWVpYWFpaWVdXWVVYVlRW
-WVpaW1tbWVZaVVZYV1RSVFdYVVRVWFpaWVtdWVpbW1haWFhWV1hWWVlcW1pYVldY
-WVhaWVlbW1paWFpeXlpcXV9dW1pYW15dXlpcXl9bWV1cXFxaW15fXVxZV1tYXFxe
-X19dXVtdX1xaXFtXWFhaWVhXW1pbWVtcXFtZV1dZWFhZW1taW19gYF1cXFxcXlxb
-W11cW1xbXVtdXF5gYF9fXl5fXV1eXl1hXl5fYV1dYF9fYF5eYGBhYmBiX19fYmNj
-ZGdkZWVlamhmaGdnaGNjYmFiYWJhYWJgXl5dXV9iYGBgYV5sosPQ2t/j5efq6uvq
-dXd1c3F2end1c3Z0dHh0cnR0dHZ1dXV2dHRxc3FycnNxc3FyeHdzc3NxcnNzdnZ2
-dnl5dnRzd3h2d3Z1d3h7ent4d3d2d3l7e3p4d3l2eXl4fHt8eHh3eHl2eHp8enp5
-enl3eHd6eHl6fHp6fH95d3Z6enl6eXd3enp6d3d4enhzdXd4eHt7dnV2dHV5d3Nw
-cHFxc29ycnFvbnJzc3R1eHl5dnV1c3d2d3d4d3V2cXZ5eHp6eHmAfXN4fHl0fX92
-eHl4eHd3eHZ4dnR3d3l5dXR2dHRrcnt7end1cHJ0dnZ1c3Z0cnZ4d3VxcXJzcnJz
-dHR2dHNxdHR0c3VwbXFzb3JxcHFzdnd3dXV0dHBuaWtrbXBwcW5saWdrbm9sb3Fw
-b25samhqZmRlZWVoZmltbWttc3l+hIaHiYaIhoSGiImMj5CQjYiDgHpzcWZbWVRV
-WlZYWVlZXF1eX2VubmhiYmVpbm5oZ2pqa2pqbG1sb3BsZWFkamxwc3Z1cXBubW1v
-bW9ua2dnbnBtZmFjZ2tsaGRjYmJhZWlqam1xdHl3endvbHN0b2tsbGpwdXRxc3R3
-dnd5d3dzdXl6fnx3e31+gIN7fYCCgICBgYWEhIeGiY2NkI6KkI+PjpCQlZWTj4+O
-kY6QlJmXl5iXmJebmpmZnJ6gpKSdmZSYn5+dnp2iop6foKGjoZ+doqihn6SioKOi
-naKjm5ugoKWoo6KjpqSko6SkqaaioqGfoJ6fnqGlpKWkpqamoJ2enZ6cnZ6Yl5eX
-l459b2VjYmJgYmRjY2VkYWRkYmNkYWBfYmNlYmFgYGBgX15eX2FiYF9eX2FiYF1f
-YGNiX15hZmNfZGFiZGNnYmJkYmNmaWJhY2VkYmFiYmBiY2NfYGJgX2BgZGFcXGBe
-YF9hYWJeXV1dXV9gXl5gYWFhXF1dXV1dXWBfXV1bXVtfY2JhXl1dXFxeX2FgX1pb
-WlhdWl1eYF1fXF1hW1pZWlpdXlteXFpcYV5eX15fXFlZWVlcW1xcXF1bW1xcWlhb
-XV1cXF1dWl1eXVteXFtdWFtbYF9cXFpXV1hbWFZbXVxbW1lYWVhdWlpbXFlXWVdY
-V1VWVldZVVhYVVhXWFNTVlVVV1VWWFtaVVdXWVtcV1lbXFtbW1lZWllXWFdWV1hZ
-WlhaWFhaW15bVlhcWVhVVFVXXFhYV1hZWVtZWFdXVlhYV1dXV1dYVldbW1taVldY
-WFtaWVVWVlZXWFlXVlpaWVhWVFZgXFlaWFlbW1pZVFVWVltYV1hWV1dYVVZVV1lb
-W1hZVllaWVlYWlhZV1hYXV5dWldXWFZZXl9gWVpaWllaV1leXV1bX1xdXVtbXl9c
-XV1fXl5eW1lYWVlaXFtgXlpYW1lbWVpcXl1eXlpcXF1cWlxfXFpaXF5fW1taW1xd
-XFxdWltfXFdXWFteX2BfW11eX1taW1pdWF1cXFpbXV1hYF5fWlpcXl9fX2FbX2Be
-XFteX15fX2BfYGJkYV9iYmJiYGJiY2ZlZWZoZmZjY2lmYmRlY2ZlY2JhYGJiYmBi
-YF5eXmBgYl9eX2qfxNHZ3+Pl6enp7Ot2dnd1dHN0cnR0dXV2dHZ2cXJ1c3JwcHF0
-c3Bxc3V0c3R1dHV5d3Nxc3JzcnR2eHp6dnN2d3Z0dXR1d3l5eHp5dnl6fHZ2eHd4
-eHh7eXh3eXx6ent5eHV3dnZ5fHp4eHl9fXt7ent6e319eHx8e3p8fHh3d3d6fHl2
-enV1dXR4eXh1d3l4dnl3eHh5dXRzcW1wbW9xcXBrb3NycnJ1d3h0dHV3d3d6d3p4
-eXh0dXp5dnV2eHV5e3p8enV2eH2BfXh4eXh4enx9e3p4dnd0eHR1eHZ0cXJ1dXZ3
-dHNycXNzcXNzcnR1dnd3dnN1c29wcHNwcnJyc3R0cnV2dHV1c3FwcXBvcnFycnFv
-bm9xb2tta2trcHBsam1sbmtsbGxubGtramhpbGppZWVjYmJiYF5fYGNobXd7foOF
-iYqJjIaEhYeHjI2Ni4uFgXlza2ZfWlVTUlNUUlBWXF9iaWxtaGJhZWdqaWZkZmps
-a2pqbGtubmlmYWZtcnV6eHBvbW1vbm9tbW9rZWdxdWxmZ2Zpb3Fta2ZlYF9lZGZq
-bnN2d3Rua2xrbXJxcW1wcW90dHFxd3Ryc3d2eHZ0dHJ1dnd1enyAgH2AhYV/gIOD
-hIeGhomJiYyPjo6Pj42Ji4uQk5KRkpKRk5GUlpeXlpaYmJiZl5ycm5qfn5uXmJia
-mp2dnZqdnqWjoaSgoqCioJ+anpydnqCfoJ+foKOmp6SnqKappaKioqKnpqKioKKg
-nZ2ipaSmpqmioJidmp2bm5qdnpyTmJyUjX5xaWNjYmBkZGJjZGFjZWJhYmJjZmRk
-YmNiZGJlYmFfXmFgYF9eX2JfW15fXl5dXF9eYGJkYmFjZGRkY19iYGBhZGNjYWRi
-ZGdjYmFgYmNhX15bXV5eYWNhYWFeYWBfYWBfW11gX1xdYmBhYF1aYV5gYV9aWmBj
-YV1fYl5eXV9eX11eXFpcWVhcXFxcW1taWltdXl5fXV1bXF1eXFpfXlxcXFpbWVtZ
-Wl9fXltZWlpaWVtdXV5bWlpcW1tYWVpbXFxcXVlZWFxcXl9eYF5bWltaWlteXlxY
-WVlZWVlYVlhYV1laWlxbWltbWllXXFtXUlNWV1dXWFxZVFVVWFNWWFpaVlRWWFlW
-V1hZWVpZV1lbXFpdWlpZWFdXWFdaWlpaWVlZV1tcXVtYV1dZWFhZWVZWVldUVFhY
-VlZWVlZUVFVUWlxZWFhZWltbW1pXVlpeWVdUUlNWVFVVV1VSVllbWldYWFlbW1hZ
-WVlYWV1eXF1ZXFpYVlZXVlVVVlpaW1xaWVdWV1pZWFpYWFlZVVVaWVlZWVpWWVxc
-WllbW1hYWlpYVlpcWFhYWVtcXVtbWlteXl5cX15aWVtYXFtcXV1gWlpbW1tdXlxa
-WllcXFpZWlpYWlteX15bXVxaXVxcW1lbWltcWltaWVhZW1xcXV5eXV1bW1pbXFpc
-XV5eW1pcXVtcWl9fXl1cXmFeXmBgXV1eXF5iYV5eX2BhY2FjYmBdYWFgY2VkZmRj
-ZmhmZmVpZmZkY2ZlaGZlZWVlY19hYWBhZGFhX1xcX2NmbaHF0dvf4+bn6ers63Z1
-eHl2dnJ0cnN1dHV3dnZ3dHV2dXN0dHZyc3J1d3lzdHZ2dHV2cnFyc3N0dHp8eXZz
-c3d7enZ1c3N2d3h2d3d5eHZ1dXR0dXd2eXx8fXh6fXt6d3l4d3Z3d399end1d3Z5
-en17fHl9e3t/fXh6fH56eXl6eHh6f3uAdXl5eHZ3d3p6enh1dnh9eXh2dHVwcHBv
-c3VzcG9ucnR0cHV3d3Z0dHR2dnZ1dnZ1d3h2end4eHV2eHZ4enl3d3Z4eX1/e3p1
-dnd5eXh6d3V0dXR3enx0d3Z1dXR1d3VzcnJ1c3h5dXF0dXFzdHRzd3Vzc3F0dHFy
-dXRyb3F2dXV4d3VycXV0d3RwcnRvbm5tbW9ubW9wbmttbGxvb3Bua2xraGlubmxn
-aGlmaGpoZWRcWVVVVFJXVltfaXF6foaFhomLhoWChoOFh4qLjIyGhIJ6cWpjX1VR
-S0xPUlpdXWJmampmYmNkZmZkX2RkampramhobWxraGdtbG9xdnVzbWdoam1ra3F0
-cW1obG9oY2Nla25xc25rZmJjX1xkaG9tdHJuaWlrb29xbm5ubXFzc3JycXNzdnVz
-cW5xcnNycXl3eXl6eXyCgoOGgoaGhoeIiYuIiouKioqMipCQjYqLjI6RkJCUkpWW
-l5KUmJSSk5qalpKVmJicnJ2bmJmamZudnJqampyfoqKgnJucnJ+ioKGho6Ojo6Kg
-oaGjpaalpKOnpqqkpqWlpaWlo6GhoqSioaSjpaeppKGgop6dnJ6gnqKem5KUmJGM
-gXJraGNkZ2VjZmZjYmBfXVtcYGNjYWNhZGJhYmFhY2JfYmBhXmBfYFxbX1xdW1pc
-XmBgYGNiYGBhY2BiZWJgX19gY2RiX2FkY19jYWFiYl9fYmBfXl1gXmFgXV1eYWBg
-X11dXl9eZGNiYWJhYWFgX15fX1pfX2FhYV1eXVxdWVpaWltbWl9cXl5dW11bXl9c
-WVpbW1pbWlpZWlxfY2FeXVteX11ZWVlbXV5cXVlbW1tbWVhaWllZWlxdXVxbWFZZ
-W1tYWVpYWVtdW1xcWl1dYFtZWlxbXFxZWFpaWllYWFlZWlVYWFlaW1xYW1dZW1hW
-VVhWW1hXWlhbW1hYWlhYWFhYVldXV1lXV1hZWVpbWFtZWFhaWVtbWFhYV1paW1la
-WFlZWFhbWVhWVlhYV1lZX1tZV1NXWlhWVlhaV1ZTVFhZWlhYWVlXWFdYWllWV1ZT
-VVZaXVhUVVNVVVpXVlhbWVpWVFdXWFhYWVhZWlxaWlpYXVtaWFVXV1hYVVpaW1hZ
-WVlXV1lYWVpcW1xcWVpXWFZYWlhXVldaWVpZWllZWVZYWFhbWFZWWFxZWVxcWlpb
-WlxdX1xcXV5eXVtbXVxcXFtfX2BfYl1dW1laWltaW11YV1lcXFxYW1tbW1xbW11b
-XVxbWVpYW1xdXFtbW15dW1xcW11eXmBcYF5eXFtYWV5aXF9eXl1eXV1eXVlbWlxh
-YWJgXFxcYGNhYWFiYWBeX2FjZGRmZWRlaWVoZWRoamhnZ2ZmZmNjYmFgYWBjX19e
-YGFgX1tdYGBuo8TR2t/j5ujp6+zrdXN1dnV2dnV2dXV1dHZ4dHN2dnZ0dXZ2dHJz
-cnJ2d3h1dHZ2c3Z0dHZzc3Z2enl1dnV4dnd4eXp3dnd4eHd3dnl6eXd0dHd1dHh6
-fX58fHx7eXp8eXd9e3p6e3x9ent6enl6e3t8enp8fXx8e3x6eHZ1dnmAgH59fH13
-d3p8e3l8ent3d3l6fHp3d3RydXR0cnNycXFycXBxcXJzcXR2end2dXZ3dnJ2dXV3
-dnp9fHl6dnN2eHp5dnV6e3t+en19d3l7eHh4eHd2dXF0dXZ5dnV7eHd2dnZ0cnR0
-eXR0dHNzdXRyc3Bxc3FxdHR0dXBwcXNxc3Nyc3R3d3VzdHJ1dHZ0dHJzcXFvbmxr
-bG9rb2xua25vcHBvbW5ta29raGdqaWlsaWppaGFgXFdTUEpGSkpNUlhdZm96f4SH
-hIWIhIKCg4SEhoaKjYqKgn52b2ljW1RQTlJTVlZaX2VqaWVjZ2RjZF9bXmVoa2pp
-ZWNnbGpraGlucnRycWpoaWlsbGlrcHRzamdta2VkZmtwdHBrbWtoY15dXmFna25t
-aWtqbXBxbm1vcHJ0b29wc3J2cnRvc3NwbW91dnZ2d3h6d3Z0eHx/f4CEgoGAg4mM
-jo6OkI2PkY+NjZCTk5KQj4+QkJSVmJiZl5WWlpiamZmZmJecnp+cmZiWmZmbnpyb
-mpqgo6KioKCfnp2dnZ6ioZ+hpailo6GgoKGipqWjoqeopqGfoaCpq6KhpqOmp6Sh
-n6Ojpp6foqOknp2dop+ZmJuak5SYlIyBdm1qZWJiYmFfX2BjYF5eZmZeYmJfYGZj
-Y2RiXl5gY2FgY2FeYGBeYGFeXFxcXV5dYF9gX2JfXl9gYWBgYGFiZGFeYGNlZWNg
-XmBhYGBgYVxeYF5hX15hX19dYmJeXl5hX2FcW2FgYWRjYF5fYV5dXl1iZF9gYGBe
-W1tdYF1aWlxcV1ZVWVtdXVxaWl1cXl1bWVdaW1tbWllaWV5fXl5bXmBdW2JfW1la
-XFtaWltcXVtWV1lZXFpdXV1cXV1dXF9fW1paW1pWWVpdWVxcXVtfXVtaWVtdXlxZ
-WVtcXlxaWFlYWVlZW1tcXl1bW1lWV1tYVlVWWlpZWVZTVFNSVlhWVlZVVlhaWVle
-W1hWWVpYWFtYV1lXWFtaXFhZWllcXFtdWVtbWlVZWVpZVVdYV19aWlpYWFpYV1ZY
-XVxZVlRVVVlXV1ZZWFZXV1dYWFZZWVdVVFRXX1VYVlVWV1ZXV1haWFlZWFRVVVZW
-W1pWVVhZWFlZWVlYV1dWV1VWVVdYWFhWV1hVVlZXWFhaWVtaWlVWV1ZZW1tZWFlc
-WlxcWFtYV1ZVVVdXXFxbWltbXFxbXFxbXF5dXV5dX11bWV1fW1tbXV1fXVpfX1tc
-XV9dW1lYV1pbWlhYXF5bWltbXFtaW1laXV5aWlpZWVpbW1pcXV1cW11bWVtcW11d
-YmFeXltcW1xdXF5cXVxbXV9fXl9bWl9iX19fXl9fXl9gXl1gYWVlZGNhZWRmZmlm
-ZWZmY2VpZ2ZlZWZlYmFiZWJgX2BhYF1gX15fXl5fY22kw9Da3+Pm6Onq6+t3dHR0
-dXR1dnRzcXFxcnd3dXJ0dnV1eHRzdHN0eHh1d3V2eHt1cnN2dnR2dnh1d3N1d3V3
-d3l6dXd2dHR1eXp7dXZ2eHh2dXZ1dXh5e3t5d3d5fHl8fHd7f396eXd2eXl7fHx8
-enp4eXh7e3t9enh5enh2e3x6dnd6fXl4e357enl3eHp5d3d3dHR0c3J2cnV2c3Fy
-dHJzcnJzcnBycnN1eXx6enl6fXh4dXZ5e3h5d3h0eHh4e3l5eXx7enx7enp7eHd2
-eHd5eXR0c3JydXVydXZ1dXJzdnN2dXl5eHh3dXV1c3Z1dnByc3N1dXNxcnFwcHJv
-cnV0cHR1cnRzdHR0dXNzc3BxbGpubmxsbWpvbW1ubm1tbmxqb3NubGtramlqbGxu
-bGdjXVpXVVBJREVJSEtMTVJaZGp1e4GChIWDg4SBfoCFh4qJiYaGhIB5b2plX1hS
-Tk5SUlVaYWhlYWVkYmFgXV5fY2VoaGhjZGlpaGlpa3B1dXNua2hoam1tam9xdXFp
-aGpmY2NnbnJxbGRmaWhnaWNjZ2ZnZWdsb29wbm5vbW1zd3ZxcXBwdHNxcmttd3Fx
-c3Z4eXZzdnZ4fHp4foKEf4B8fX2DiYyPkpSQlJKUko6RkZOXmpiVk5OYlpeZmJSW
-mZeVlpeXmpacm5qdm5mXlZidnp6dmZqdm5+lo6ShoKCgnZ+gn6Kho6OjpJ+kpJ+g
-paOioJ+go6ChnqCgoqmrqaSkpaKjoqCin5+eoqSloqCfnZ2dnZyYl5eVlZWSioJ0
-amdnYmJhXV5gYGFkYmJjYWJfXmBjZGViY2FhYGBiYWdjXmNjX15gYWVkYGFiY11g
-YF9dXGBdX15gYWBeYGNhYGBiYWVkYmBhYWFgYF9fYGBhYF5fXl9eX2JjXl5dYGFj
-Y2FhYWFeYGNfYFxbXWJiXFxfX1xeX11eXmBeXVtdXV1dXltZXF5eXl5cWl5cWltZ
-WlxdXltaWltaXGBdW1xbWllZW1xcWVtdXV1aV1pbWFhYWlhbX1xZXF9bWVxhXlpb
-X1xdW15dXVtYWVpcXV9aWVpYWVlbXF1bWlpeXVtcW1pYWVhbXFpbW1pYV1dZW1ta
-VlZYV1ZZXFZUWFZSVVZWVFJUV1hYWVpbV1hXXV1aW1taX1xWVVpaWFpXV1pbXlxZ
-V1lYVVRVWVZYWlZXWldYWFlYV1RSVVhaWVZWWFdVVlZUWFlXV1ZXV1daWVlZWldX
-VllaWFZTVVdYVVVYV1ddXFhZWVdYVldYV1dWVlNUVlVUVFlbWVtaWldVVllaVlZX
-VlVVWFZWV1lYVlhZVlhVVlZWWlxbW1pdXV1bW1laW1pYWFpaXV9aWVlcXlxcXl1d
-W1hcXWBcWVlXWltbXlpZWVleX15dXl1fXV5aWltZWlxaWllXV1pbXV5aWFpZWVhg
-XVtaW1lYWVheYF1cW11cXF5eXVpcXF1eWlpZXVpaW1xcXF9fW15fYWJgX15gXl9g
-YV5iYV1cX2RlYV1cYWVjZGVkZ2VkZWRmZWZlZGhpaGViY2NkZGJkYV1dXl5eXVxf
-YWFhYV9gbqfBz9je4+bo6urr63h0dHV0cXF2dnR2eHdxdHR2dHh2dXV1eXl3dXV3
-d3RzdHBwdXV1cnBzdHN2dnR0dnV2eXl6eXd3dXZ1d3h8eXp3d3d3eXh3dXh5eHh1
-d3h6fnp6eHt5en9+enp4eHl4e3t+e3p8e3p4eHd3d3d5eXx7e3p6dnZ8fXl4d3t8
-enl4eHp4d3d3dXFycnFycnJ2eHZzcHNxdHV0c3JydXpxc3Z4dXd3eHR2dnZ2dnl7
-eHZ2d3p5d3Z3d3h4eXh5enp8eHh3dXZ1dXR0dHNyc3Fxc3Z2dHV1c3J1cXZ0dnZ4
-dXN1cXR1dHZycHNzdnZ1dXVzc3JycnNxcHJxcG9ub29vbG9zdHNub3NycXN0cG5r
-a2xtbm5sa25vbnJwcHJvbm1saWtra2tpZ19eV1JQTkpIRkNERkdKTFFXYWhwd3yE
-g4SBfnx9f3+EhYiHio2JhX95dXFrZFlQTU9SUlplY2ZiZWVlZWNjYmRnaGlqZmNk
-amlnZmlvcnJxcW1qZ2ZsamttbXFzb2xnZGFhZmhpaGJkZGhoZmdrZ2NoZWVpa25w
-cm1pam9vb2psbW5vcnFycXN0cW5xdnR2c3Jzcm5zc3p9gX+AhIeKiImHhYaLioyO
-kZaTlZOTlZWVlZeam5aWlJaXlJeZmZmam5mbl5iZmZidn6Cfnp2bnqCinJyam52e
-n6KioZ+dn6CeoaWmpqepqKOloZ+ioaeopaKen6KmpKGgoaWmqKanpaWioaOlqaap
-n6KnpaSin5uYl5aam5mYlZWUk5KMg3VrZmNiYGBhXmBgYGNkYmJjZWBbX2FfYmRk
-YmNgXmFhYmNiX2NgYFxdYWNkYV9fXFpdXGFgX11fYV9gYWBfX15hYl9gYWFgYGFg
-YmVgXl5gYGFgXlxdXV9iYmBhX1xdXF9fZGRjYmNdW15hX1teX2BgXlxcXl5fXFxd
-XFpbXV5cWltcXWBeXlxeX1pXVFhYWVpaWV1fXl1dXFlbWVpcXV5aWl1ZWFtaWVlb
-Xl1bXFtcWlhXWF1bW11dWlZbWlhZXl1cX1pcXV1cWFpaWFxbXFhYWVpYVFhYXF1c
-Wl1cW1daW1pXWVlYXFxbWldZXFlXWFlaWVdYWFhZVlhXWVdVVFVWWFdbXFlXV1dY
-WFdbWldZWlhYW1lYWVpcWVpXWFlbWFdZVlZYVlpYWFhZWVlWV1RYVlZXV1VUVFVY
-WlVZWVZYVldZWlhVVVVUV1ZZWFlZWVhWVFZWWFRUVVVVVlpcWllaWltZWFZXWFhb
-WFVWVFVWWVlaVVZYV1pdWlxdWVlbWlhXV1dUV1dbWFZWV1laVVNUVFRYW1xcWlla
-XFpbWFpbWFpcXV5bXFtbWlxdXlpbWlxdXFxeXlxYWVtcW1laXFtaXV1gYV1ZXl1e
-XV1dXl1bXFlaWlpcWVpcW1xcW1taW1paWFtZXF5bWl5eXl1dXWBcW1xaW1haWVha
-WltaXV1eWllbXFxcXFxbW1leXmBfXl5fXl5fX11eYWBfX2FiYmRiZWloY2NiZWZk
-ZWdjY2RnaWRjY2NlYGBdX19eXl5gX1xdXV5eYWSRrsLN2N7i5efp6uvscnV3d3V0
-cnFwdnZ3end0cnJ2eHt4eXd0dnd0dXR0d3ZwcnFydnd1cXRycnNycXJ0dXd5dHV2
-dHR0dHp6fHp5eXh5eXZ7e3h4d3Z2dnp6e3t6en17e3h7fH18eXd5fHt6enl7fnl6
-enp5eXt5eXp4eXh6fXl7ent9eHd3enp7eXZ3eHZ2dXR2cnJxb3BxcnV3dXd0c3Nw
-cXZ0b3BydHVzdXR2eXl2end1dXFzc3Z3d3d2eXt5dnd4e3l3d3Z3dnl3d3l5eXd4
-eHV2eHNwcnNzdnRzdHR1cnFzcnJ0dnd0dXNzcnJwb3JzcnRzdXNycHRxcnV1dHFv
-bXBwdXFvcnNwcHBxc3RzcnZ4dnV1cXFucHNsbWtra2xwcm1qbGxya21ta2praWBc
-X1lYV09LSERBQkFERUVGR0xUXGNqcXt+f4CBgHx+gYSIiYmLi4aGg4B7dnJtZFlT
-U1FRUlpZW15jZ2ZnY2VmZWZpaWdlZmlqaWtqbXBvcHFwbWljZmxqamxqbW9tamZk
-YF9iZmdoZWdoZ2ZkZWJmZGNpaG5sam5tamllam5vb2xqbG5zcnFtcnBsamtyeHRx
-dXNzdHN0e4GDeX9+foKGiIaIioyOjYqJi4yNj42RkJGTmJyYl5eVlZiZmpaXmJeZ
-mpuZmp+enp2gn6CgnZyhoJ2cnpqcnqGfn6CenJycnKCjo6WmpKWjoaCiop+goqOf
-oJ+hoaOjnqGlqKilo6Kho6SjoqKio6iooaKhnqCjn5uanZ+inZqYlZWQkYqCd25o
-ZGFhYWBeXWJhYGFhY2FkZmRhYGFfYGBkYWJgYmBhYWBeXl5eXl9eYGJjYGBcWF1h
-YV9gX11eX15dX2NgZGRjYWFiY2VfYGRiYF9cXV1dX15gYWBfXmBiYF5fXWFfXV5h
-YV9gYWFjYGFkZWBdYF9aW2BfX19cXV9eXFxfXl1ZWlhcWl5cWVlYWlpaWVxcX15e
-XFxdXV5cXFxaW1tbW19cWVlYWFlaWFlcWltaWVxcW1pXWVtdXF1eWVpXWFpZWltb
-XVpYWFlYWVpdX2BcXFtaWFdVVVhYW1tYV1hbXFtaWVhYV1hXVlZWWFdYXFpWV1tc
-WVRWV1ZWWVpZVVFTVVVaWFlbWVlZXFxbVldZWlpdWVhXWllZWVpZV1VWVlpXWVdY
-XFhdWlpaWFdaWldZV1dVU1NVVVFWVVhYXFlbWVdXVFVWWFVTVlhYVVVWVlJVVFZV
-U1RVV1ZYV1ZWWVlWVVdZWFhXWFZXVllaWVtYWFdXVlVTUlRXVlZXWVlbW1xaWVhY
-WVdXWFhWV1dVWVlgYFxcXl5aWlpaWFdYWFpeXlhYW15eXVxdYF9fXV9bW1ldXF1e
-XVtaYVxZXFpaXVpbXF1dXV1cXmFdXV5gYF9eX2BeWFpaXl9gXVxdXF5dXF5eWlpZ
-WlpbXV1dXV1fXVpbXFtaW1pZW1pYXV1fX19eW1teXV1aWV5cWl5dXl5gXF1eX2Bf
-Xl9hX19dXmFhZGNeXmBjY2RkZGdmZGZmZGJiYmRnZ2ZlYmRhYF9hYl9eXV1cXV9e
-XV1gZJuqxdHZ3uPm6Onq6up4eHd2d3d1cnR2d3d2dXh9dXV2dXZ1c3Jzcm9zc3R1
-dXh2dHZ0dXN1cnN3dHV2dXZ2dXZ2d3d3eHZ3dnh6dnV5e3p6enp8e3p5eHd6enp7
-fnl6eXp6ent5eXd5en5/fnx6e3l5eXh8e4CAe3N1fHp3eXl5eHp7enh3fH16fHp6
-eHl3dnh0dXR1dG9ucnVycHFzdHVxb29ucnNwcHR0eHZ3d3Z4eHV4eHd0dXR6dXZ4
-eHZ1d3Z4eXZ1d3h4d3V2eXh3e3x4d3Z4enl4eXZzc3R0dHd1dnR2c3JzcXJyd3R1
-c3Nyc29xc3N0c3NxcHFydXNwcHR0dnNxb3Rzc3JzcnRzb29vbG9ub3JxcnN3dnJ2
-dmxvbG5wb3Bvbm5ubW5vcW1raWloYVtbXFlUTktGREdGRURFRUNERUxQVl5mbXl7
-f4KCf4GFgIGEh4iIhoeHgn9/em9nX1lVUlJWVFNZXl5iZWZmZmdmYmVmaWRjZGhq
-aGttbWxtcXBxamtyamlwaWtrbWtoZmBgY2RnaWhnZ2dlY2RiZGRiZmlqampoaGlq
-Z2tqbXFua2xtb3Btb3R1c3BtbnNzc3R0cnFzdHJ3e3t6fXx/fn6AgYSDg4aKi4+P
-kY+Qj5GQkI2TmJaYmJqZmpiXk5WZnJ2bmZmcnqKin5+joqCfnp6fnZuen5yfn5yf
-oaGeoKGenqOjoJ6en6Gin6GioKGjo6Gfo6OhnZ6ho6Khpaeko6CipaSinZyeoKGb
-mpudnJ2goqCfn5+hnZeXkY+MjIBzampiYF9gYF9eX2JiYWJiYWFjYV5fXl9iY2Ji
-Y2JiYF9eX2BeW1xeYV9dXF5hYmFmYmNiX19iYV9iYmJfX2RlZmVlYWFiYWFfY2Jj
-YGFjYF5dYmRjYmFfXmBfX1xeXmBjYmFeXmFhYGFgYmFgYGBfXGBeXF1dXlxfYl1f
-XlxbW1xbWl1aW1xbWVpZWVxZWVtbXF1eYGBbWFdYWVlaXl5bWVlZV1ZXXFpaW1tc
-W1tfW1tYXFpdYF1cXF1dWVlWV1lZXFpbWVhaWVhWWFlZWldZWVxdWlxZWFpaWVpY
-WFdZV1hZV1laWVpYWFpdXFtbWldXV1tZWlZUVVVUWVlWVVNWWVpaWVpXWFZZWlta
-WllZWllZVlVWVlpZWFlcX1pWWllXV1ZWWVhZWlhXWFZXWFpYWVVZVlRTVVdaW1ha
-XFpWVlhWVlZWWFtWVVhVV1VWVVVVVVdVU1RXVldbWFdZV1lYWFdXVlZWVVdaV1lb
-W1lXWVlXV1lYWFhTVVZXVVhZWldYVlZYV1hYWFlZWllYV1tcXFtcW11dWllYVVRU
-WFxbW1xcW1xeXF5fYFxbW1xVWFlcWVdaW19eXltcXVpbXV1dWltfW11cXFpcXmBh
-XFxfX2BdWlxbXV5cW1pYWlpbWlpdXFxcW1xYWlpdXltZWFhWWFdYV1lZWVhcXFtc
-WltcW1tdXVxcW1xeX2BfW2BiYF5dW19dXV5jYGFeYGRhYF5fY2BiY2ZlZ2VlZWdq
-Z2RkY2RmZGRjYGFhYWJiXV5dW1taXF1eXF9fe53E0trg4+bo6uvr63l4dnd0dHd3
-dnR2d3d2dXp3dXV1dnR1c3R3eHNwcG9wdnd4d3V2dnR0c3Z2d3h4eHRzc3Z0dnR5
-fHp3dXV1dHZ2eXl7e3p8fHp7fX15eHl6e3t4eXx6e3t3e3x6fXx+fn18e4F6enx+
-e3t6eXp6fHp6en5+fXx4eXt+e3x8enZ5dnNzcnd2dnd1c3NycHBvcnJzc3NvcG9t
-cXNyc3R0dnVxc3F1cnRyc3d3dXV4eHl7d3R5eHh3dHZ4dnd3d3h0dXd2c3R2eHd3
-dnZ4fHl5dXRzc3FzdHV2c3F0dXV2eHRzdHR1dnRwd3Z0c3RwcXh1cHBwb29xcnNx
-cnFzd3VycHFxb25vbWxucG9vcXFwdHJydnRycXFwcXNsa21oa3BubmxramplZmJg
-XFVUS0ZGQkVEQURBR0pKRkdMU1ZeZnF2e31/g4CAfnx/gYWIiIWEgn58dm5jW1RQ
-UFFSVlddXVpiZWhqaGhnZWtpY19dYWVkZWppZ21wcW1rcnJqbGxnYGNraGhjXmNm
-aWtsbG9tb2pjYmhlY2BfZWZiYmFlaGpoaGhubHNwcG9sbGltbXV3dXFxcXBzc3Rx
-c3Fyc3N5fHp5e319fH+AhYODh4eKjpKQj5KUkZOTkpGSkpSVlpeZl5iVk5WYmZua
-mJmbn6Cfn6CjoZqanJ2cnZ+goKOjpaOipKaloaSgoaSkp6SinqGjnp6bn6KlpaOi
-op+eoKCioqWip6ejpKWjpaKgoJydnZ2en6Cen6GmoqGkopyWk5KSk5KMenBoYmNh
-YGBdXmJiX19eYWFdX19jYF9gY2NgYGJjYWFlZGBdXl1eXF9hYF9fX15kY2NgXF5e
-YF5eYWBhX11fYmVjY2ZkZGNkYGBgY2NmZGBdYGVjY2FiYWBeYF9eWlteYF9gXV1f
-YWFfYGFiYF9fX2BeX15cWlxdYF1dXl5dXV5cWl1gXlxdXFhaW15bXV5aW15eWlxe
-X19cWlhYWFpaXl5bXVpYWFhWWVxYVldYXF1aWlpYWVtcXVxbW11cWldVWlhXVllZ
-WFlYWF5aWVthWlxaXGBdXF1WVllYWVdZWVpZWFlYWFpbWFlbWVhXV1dXV1dVV1lY
-WFZXWFlXV1haWFVVWFtaW1lXW1hYWVlXWVpaV1paVlRXWllbW1xcWllWWVVVVVlY
-WFdaV1ZVVVVVWFlZXFdUVVZXVVhYVllXVFNTWlhVU1ZZWVdVU1NUVFhXVVpaV1VW
-WVhWWFlYV1dUWFdYVlVUVldXVVVYVlhYWVhYWVhZWFlXVlZVUlRVVVdYWFdZVlVZ
-WlpaW1xbW1hXW1pdXF1aWlhWVVZYWlpZXV5eXVtcWltcWFlbWlpXWVhcXV5bWl1b
-XGBgXlxgW1tgYF1cWllYWFhbW1xeX15eXV5dXFlYWVhWWVtZWVxZXFhZWllZWVlW
-VlhaWVpdWlpZWFhWVVhaW1lXW1haXFxaWlxbWltYYF9aXV1dX19cXV1fXl1gYWFh
-X2BkZGJiYWBgY2JiZ2NiZGRlaWdlZ2dmZGZkZGZnY2NiYGJkYWNjX1pcXFteYGBh
-XWOKqcXS2+Dk5ujq6uvrdnV3d3d5end2cnJ1c3Fzc3R1dXd3eHd4dnd1cW5wc3Fx
-dHZ2dHZ1dXV1dXZzdHd3dXRzdXp3eHh5enh0cnl4eXt5d3p8fXyAfXh6d3h5e3t7
-enx7e316enl8fXt9fn98eHt+fX18ent6d3h6eHl5e3t9fHx+fH98enp8enl5eXt4
-eXd1dHd1dnJ0cnR2cXRydHJyc3RxcHByc3RzdHJzdHJ0dXZ2c3F0dnl3dnd3end2
-dnp6d3h6eXh2dnd3dnN1dnd4d3V0eHZ1dnd3d3h2dXJwdHR4eXt3dnZ0eHh0dXBv
-cnNzcnZ0cnFydXNydXNyb3FxcXFyc3h2cnJxcnZzcW1tbWpscXFydHBzdHN1c3Nx
-cHF1cXFwb3FtbWpqaWpqbmxtaGNkYmFhXVNRTEdEQkNGQkNGR0hGRUdKTlJZZG51
-fHt+f358fH+AgYKFgoSGhH51cWdaVFFNTlJXXF5fXlxdZGdoZmptamhjXVpdYmVo
-ZmRlZ2ltc3R0bGtrbWVhZWVnZmVpbWhobW1rbHBwaGRnZWZgXmFlYmRgZmZkZmlp
-a21xbnBycnBtaWpscHJ0cm5vdHBxbnBvcHVub3d6dXR5enx/homKi4mMjYiIj5KQ
-jpGXlZiSkJSYl5iYmJeYmZeblpSVmJiXlZebnaGjpKOhnJiamJuipaOjoqaloqSk
-pqOjoJ6ho6WkpKKop6Wppp+hoqSipaOmp6mmpaKio6Sgn56eoaSnp6SeoaSloaSk
-oqGjo6Skop+fnZuWkZGWl419b2lmZGNkZGRgX19gX19jYWFiYWFhYGBjZGRiY2Ff
-YmFfXl1dXFxcXVphYWFfXWFiYGBfXVtbXl1dWl5eYGBfYmFiY2NhX19fZWJhYWBd
-X2NhZGRgYmRhXl5eXVtdW1tbXGBjYF9jZGJkYWBeYWNgYGBbWllaWlpcW1paW2Bf
-XF5gXV5cXl5cXVpbXmBeXltbWVxcXF5hY15bYV1aV1RYW1pcXVtbXl5YVlVZVlha
-Wl1bWVpZWlxZWFdXWFhaWVZWWltbXVtXV1pYV1pZW1xdWlpZWltcW1paWVdWV1VW
-VFdYWVpYWlpYWFlbWFhZV1dYWFZVVVVYVVRVWFhXVVZbWVhWV1hXWVhaWFZZV1ZV
-WltaWFhYV1VWVVldWlpaWFhYWVtWWVlZWlhXWlxZWVdZVldaVldVV1RTVFVUUlNV
-VVZWWFRVVVRTUVNXVVJVV1pXWVdXVlVXV1dXVVZWVVVYV1pYVVZYV1pbWVhYWVlX
-V1hYWFZXWVdWVlRXVVRUVldYV1laV1hXW1tZWltZWlhXWVtZW1hVVVdYWVlZW1tc
-Xl1cWVpZWlpbWldaWVhWWVpaX19hWVhbW15dYGRhW1tcXlxZWFlZW1tdW1tcXlpc
-WlpaWVhYWFdXWVtZXFpXV1dXWFpYVVhZV1lYXFxbW1lbXl1cXFxdWVpcW1xbXGBd
-XltaWVhaW1xcXV9fYV5gYV5fXmBgX15dX19iX19hYWNjY2RkZGRkYmVnZ2ZmZWpo
-ZWdhYWRkX2BjYWFhYGJiX19eXFxeXVxiZ4Krw9Tb4OTm6enq6+t1dXV3d3hzdHV1
-c3J2dHV0c3V2dXd4d3d0dHt4eHV3cnByc3J1dHNzdXV4dXV2dXh0dHR5eXd2d3d6
-d3eBenl6eXl9fHx8fHx8enl8fHh3eXl6enx4eXh8e3t5d3l5eHR2eHt7fn9+e3p6
-enp8e359e3l+gHt6eXt5fHt8e3Z2eHZ2eXt2dXRzcnJzdHV0dXRxcHR1dHNycXF2
-dHRxcXJ1eHd1eHl3dnV2dnR1dXd3eXp0dnZ1d3Z3fHd3eXh6d3R2dXp5eHZ1dnd4
-eXt4eXl3cnF2dXR2dnJxdHR1dnF0dXBzcnJ4eXNzcnBxdHZ0cXJwc3NxdnZ3dnZ1
-dHJydXRucW5tbnBxcXNzdXR5dHNyc3dzc3BucHJvdHJubmtrbG9wbWhmYmVkYl5b
-VE5HRUNCQkBBQkJGSEZGRERHSk9XXmhxfX+Efn5+fXt8fn+BhoOAe3VxaWFWTUpM
-UFVeYF1bWVpgYWFiaHJwaGJeWl1iZmhnaGZnaWxycm1sbWxoamhqaWZma25rZ2pp
-amhmZGRiYmJgYmFdWl1jYmVlZmVjZWdpa21uc3R3enRxa2twcXVzcnBub3Nua3Bx
-c29xd3hydHZ5eXuDhoiJiY2Kh4eNkZKRlJWPkZKSk5KSlZianJ6en5uYmZqbmZqY
-l5eWnJ+eoJ6dmpmaoKKeoqKhoqOlpqWkpqikpqOjoaCjpKWlpqWnpKaioJ+jpaek
-paWkpqWnpaCfnp2gqKelpaShoaKhnqOjoqSloaCenZuZl5WPkZWSh3lsZF9hY2Nk
-Y2RhXl9eXl9iYGJfYWRiX19fYWNhYV9dXV1eYF5gXF5gYmBdXFxeXlpeWltdXl9e
-X19dXV1dYWFjYWFiYl9dX2JiY2FiYWBdYWNfYGNfYGBhYFxbW1tfXl5bWmFhXmFe
-YmBgX2FhXWJhYl9cWllWWVpaXF5eXF1ZWVtYWVtcWlphXV1eXlxdXVxdWF5eW1xf
-XF9dWl1aWVdZXV9bW1hbX1pYW1xZWlpYW1xZWFlZWVlYWFlXWVpcWVlXW11YXFtY
-WF5eXVpbWltYW1lYV1taWVpbWllbXFhZWFlZV1pYWVhYW1taVVNRVVhYVlNTVlpa
-WFZaWVRTUlZYVlZVV1xeWVhZWVpVVVdXWVtaWFlYVldZWllZV1laWVdYWlhWWlpa
-W11WVFZZU1dVVllZV1ZXVlJSUVRVVFRVVFZXVVNVVVVWWFdUVVVVWFhaV1ZYVFhc
-WlhXVlZYVVVWWFhZWFZZV1pbWldVVVVUVFNUVVlWXFhYV1hXV1dXWFZYWldVWlpY
-WlxaXFpbWl5dV11dWllZWFlXWlxbXFhaW1xaWlpbXV1aV1hZW1lbWVxdXVxeWlpb
-W1peXl1YV1hgXl5eXV5cXlpdXVlXVVdZWVpdXVhZWVhYXl5cW1pZWFhZV1pbWFhZ
-WFpdXVxbXFxfXFlcXFhbXltcXVxcXVpdXVlZXFZbXV1fYWFiZWBeXl1eXmBfXmBb
-XVxdX2BiY2VkZGNiZGdlZWVmY2NjZmloa2ZgX2BeYWVlYF5eX2FhX15eW1xcW11k
-ebDH09vh4+fo6unr7HRzdXV0dnh0dHRzdXl2dHN2dXV0cnR3dXR0dHN0dXRzcXBw
-cnNzdHV0d3d0dHZ4dXd4dXZ3dXd1cXR3e3t3enh2eXl8fXx5d3t4d3t6eXh2eXl5
-e3t5eXl5eHl7enV1dHZ4ent8fX97eXp6e357fHx7ent7e3x8end3d3h2eHd0eXx5
-enRycnBxc3R0cnNyd3FwcXJycm9vcnJzcm9ydnV2dnR0dnd3d3d2dHR1dHV1d3Vz
-dnV3dXNzdnl5d3V5enl4dHR5eXp7eXl1dHZ7eHV3dHJxc3B1cXRxcnR1dHJzdnV1
-c3V1dHRzdHR6dXN0cnFwc3F2eHRzdnd3dnNwcHFxb3J0dXJycnJ2d3N0c3VxcHBw
-bnFtbG5ubm9wamxtcG5raWJfYF9eXllSUE9KRkJCQ0hFRURISEZEQ0NGSE5WXWhw
-en+CgHt7e3p8fn+Cf319eHJoY15XVVBSWFpaWllbWlxcXVxmbG5pZF1bXWJna2li
-YGZnbm9wamtwb2hnamhoZ2tuamhmZGFkZWBbWlhdW1tdXltdY2RmaWhlZ2hjZmxr
-b29zdHl4dHNua29xcnBucGxubW9ucXFxcG1vb3FzcnZ5fYOChImJh4aBg4WLjpGP
-k4+QkZGRj4+SlpaYmJienaCgoJ2bnZybm52foKCfnp+gnJygoaWgpKalpqinqKSh
-qKiqpKGioKGkoKCin6Kgnpman6OnoqmppqOgo6WmpKKjpKOipqamp6WjoqSjoqOi
-pJ+enKGdm5yamZaVk5GCdmxmZmRmYGBgYWRkYmJfXl9gYV9gYGBfXF5iYmFfXmBf
-Y2JhYF9dXWBhW11gYGFdWVpfX2BjYFxdY1pcXF5gYGBgYWJhYl9bXl5hYWFkY2Jf
-Xl5dX2JgX2JhXl9eYF9dXl9eYF9dXVxdX19eYF9hYGBhXl9ZWF1aWFhaXFpcXltb
-W1dYWVlZW1xbWVpZWltbXFpaWVhYW1tcXltbWFlcWVtbW1taXVlXWVlYW15dW1hZ
-Wl9aWlxaWVtXWFlZWVxdXFhcWlxcXV5hX1tbW1hZWFhWVlVYW1xbWVxdXF1aXVtZ
-W1pVWVhXWFhYWFlbVldUVldXWFlYVFZVVFVUVVRUVVdZWFhXWltXVlZXVlhaVVhW
-V1pbWVVVV1ZYVlhYVVpZWVhXV1hZV1laWFlVVFZVVVdYWFlWVlVXV1hVU1dXVldX
-VVdXVFRQUldWWFhaVlVUVFRVVlVTVVVVV1ZZVldZVlhZWVtZV1pYWlxdWlVUV1VW
-WlZWV1VZW1xWVVNVU1daWlpZWVNXV1dYWFtaWVhdW1haXFtaWlxYW1taWlpZV1pX
-V1pZW1tcWlteWllbWVlbWlpeW19cWlpaXltbXltdXV5dW11dX1tcYV1aWVxdW1td
-XFtaW1lXWVtbW1xeW1lXV1hZXV5dWlpaWFlbWlxaW1xcWVlaWFtaXFtcXF1cWVhb
-WltbW1xbXFxcXVxeX15cXFtdXmFiYGFeX2FhYGFiYV9iZGNmZGJkZWNiYWNnaWJn
-aWRmZGRjY2JgYl9eY2NgXl5gXl1dW119t8jT2+Dk5+jp6+vqdXZ5dXZ2eHZ1c3Jx
-c3V4dHJxdHBzcnR1dnh3eHhzbnBucHN0dHd5eHl1cnV3enV4dnl2dXVycnV1dXd6
-fn96d3d5eXl6eHh4eHx4eHh9enp2d3t8e3p4eHp7eHt8d3V5fH98fH57eH57enp5
-enx8e3p5eHx9fHd0eHZ4dnl5eHZ3eHV2dXZzcXNvcHBycXNzb3FwcHJxc3JvcnNw
-cHN1eHd2d3d3dnl4dnh4dnZ2d3Zzc3Z2dXh3eXd3d3d3fHl3c3N2fHl5end4dnx6
-e3V3c3R0dHBxcXJ1dXNyc3RxcXVycnRzdXRydHd2eXNwb3FwcXBycXRxc3V1dnV2
-c3JycHV1cHJ0dHFydHV0dXZ1dHFxc3BxbWtsbW5ucW1rbWtsaGZlYWBfXV5eXVdQ
-TUtHQ0NDRUNAP0NCQkJFRkRESlBTXWdweHh6fHp7fHx9gH1+fHh2cm1qYWFcWFdZ
-VlVVVVhaV1daXWFpamxmYV5fYWZmZWFkYmVqb3FycnNxamdpbGtsaWdpZGNlZGVj
-X1tbXVteXl9dXWBiZWZnZGhmZmVlamlsb3B1dHJwcHBraW5ramhscXBwcnJvbm1x
-b2pudHRydn2BhoiGgYWKjIeGhoWHiI2PjY6PkpSSkpOWmpyampmbmpyanJ2enZ2e
-oqSioKCfnp+goKalpaanpaCipqSin6GjpaOkpaSjo5+fnZyfoKOjo6Knq6ajo6Oh
-nJ6epaSmpqupp6Oip6imoqKjpKakpaajnqCcnJ2cnZ2Xl5eSjIN1amhqZmViYF5f
-YmNiZWBfYmJhYmNiYmJgYWNkYGFgYGJhY2NiYWBgX1tjXl5gY11gYGFfX2RjYV1e
-X2BhY2FfYmRiYWNjYWBfYGJeX15jYV1fX1tcXV1bXV5eXlxcXlxdXF1eX19eXV9f
-X1xfYWBcW2FhX19mYl9bXl1aWlxcXVpbWllcW1pdXFpaXV1dXVlbXFdXWlxcXFpY
-V1lcXVpZWFtcXV1eXFtYWFdYWVhaW1tdXFlZWFZYV1ZZW1tbXFxeXFteW15cXV9b
-XVxeV1dXWllXXFxdXFpbXFxdWlxdXFtbXFhWVFdYV1dWVlpdWFZWV1laV1NWVVZW
-UlFWVVJQU1VWVldXVlZWV1VXV1hZWFhZWFhXWVhXU1BTVldVVlZVWVdYVlhWVldY
-V1JTVFVXWFZUVVhXV1NUVVVRVlVVV1dVU1NWVlZUVVhXWldXVldVVVNXWFlWV1dX
-WFhXV1hYWFpaWFdYWFlYXF1bVlRVVVdZVVNWVlRVWFtaWFhbWFhaWVtaW1lVVlZX
-WFlZV1daWlhWV1pcXFtcXFxZW1haWVpaXGBeXl1eX11bWVtYWl1dWVlcXl5cW15b
-WVlZW1xcXFtcW1pbXVtYXFlXXF1dWVlYWFpaWVlcXV1bXFxZWVhcW1xcXlxZWVhZ
-V1laWVlaWFpbWlpZW1xcXlpZWFhcXFxcWVtcXFteW1pdXV9gX11cXmBfXl5hYWBj
-YWFiYmRmZGFiYmNnZGFiYWFiZWZoaGhmZWVkZWJgYmFfXV9eXVxdXmFgXlpdYnS1
-ydTb4OPm6Onq6+tzdnt9d3Z4eXh3eXZ2dnl5dXNtb3J0dXR2d3Ryc3V2dXR0cXR3
-dHZ2eHd0dnZ2dXh3d3l3eXh3d3d2eHp7e3h2eHx6dHd5dnd4eXp3d3p5eHh4eXt9
-eHh3eXl+fX18fH18e3x+fXx5eXl6e3h5enp6e3t5e3x6e356e3h8d3Z4dnZ4dnd2
-dndzc3JwcHFwb29ycnBtcHRzcXFwcnFydXRzdHV1dnZ7eHd0cnV4fH56dXR2dnRy
-eHp7e3t4eHh5eHZ2dnV2d3Z1dHV1dHhzc3Fzd3Z0cXJvbXB2dHBwdXRvcHRxdnR3
-dnRydHJybmxtb3BucnJzc3V1dHVzcnZ1cnJvcnV2dXRwb3JzcW5ucXFwb21tcG9s
-bmtqaWxtb29tb2ttZ2NiX11gYF1cVU1JR0RFRURDREVDQkVDP0FBRUVDRUxQW2Rr
-cHh8e3t7e3t5enx6d3RxcGpoYF1ZVlZTUVJPVFhcWFleZWxtaWNhYmBiZGZgYGRl
-ZGlubnBycW1oaGtpamlpamhkZGVtamJdW1lcW11gZGFeX2RlZGZlZWllZWdpa25y
-d3JycGppaWlobG5qanB1dnd3cG5wa21yc3N0dnd6fXt6goGBgoiKiImIiYiGhYiO
-jo2Ml5aWl5ibnJ6dnZybnJqZm5qampucn5+doJ2eoKGhpKWho6Kfo6SlnJygo6as
-qamloqKioZ+doqGgoKOjoaSioqGeoqWkpailpKOoq6ako6OkpaOjp6mprKmnpaSl
-o5+enp6dnZmYm5SOh3ZvaWdlZWZiYWBgYWJfY2FiYWBgYV9iYl9fX2FgX2JiYWJh
-Y2JiY2RiYV1cXmJkWl9kY2BjX11fX15jYWRiY2RjXmBiYl9gYWFfX2FfYWFhYF9g
-YmNhYGBgYF5eXFxcYGFgXF1eZF1dYGBhZWFeXmBeYGBeX2FfX1pYWVtYWVtaWVdb
-XV9cXFpdWldYWFlZW1tcYF9aV1pYV1ZWWllZWFtaX2NdWllYWFpbWllbVlhZV1ld
-Xl1aWlhZWVdbXVtaW11bWVxdW1taWlpYWVtaVlZYWltdX1xcWlhbW1paWFpZW11Y
-V1RUVVVZVlRVWFtbXlxaVVVVWVtYV1hVVVVTV1VUVldUVFVTWlVTVFdXW1lVVlZX
-VlhVV1ZVVlRVVVVWWVdXWFlXWFRSUlJVVFRXVFNUVVdVXGJXVVVWV1hSVFVYV1VT
-VVVUVVdVVVRXVVdWVlVZWFhaXFpXVlhVVVZVVFVVWVhVWFpXWlpbWVtbV1lZWFdW
-VFVUVVhYWVZZW15bV1tbW1xhWFRVWlpYWVdUUVZXWFdVVVhZWltZW11cW1dYXV5g
-XF1cXWBcW1pYWVhaXV5bW19dXFtcWV1eW1pcXV1dXV5aW1lZXFtZWV5hX1xbWlxg
-W1pcWltbWFpYW1paWVlaWlhbWllWWVlYWFhcWVdaWFtaWlpXWVpaXVpXWFlcW11c
-W1xfYFxdW1pfYmBhXl5eXV5eXl5gX2FgX15dYGRjYWBhZWZlZWRlZmNkZ2ZmZWZo
-aGRhYWFhYl9cXF5cYF1eYF9dXmFlcrDL1Nzh4+Xo6urr6nV3eH16end5eHp7d3V2
-d3h4c3FzcnN1e3p2dnV0d3l3dnJydXp4dHV2dXZ0dHV7d3Z3eHl6eHx6dnp5ent5
-dnZ3e3l6eHt3eXp5eHl5d3p7e3p3dnd6enl5enl6eHx9fX15en56enp7fnx8e3t5
-d3l6eXp6en6AfIB7fnx6ent4dnd4dnJ0dHR0c3Bxbm5vcHNwb29wdHZ2cW9wc3Zy
-cnV0dXh3eXVzc3R1dHZ6fHh0dnh4enRzd3l3d3t3d3l5dnd0dXZ0dXR0dXR1dnR0
-dXd4dnNxcXRwb3R0cXFycm1vc3BxdHNycm9wbmxwbnR1cnJ5dnZ3dHZ1cHFzdXV1
-d3R0dHVzcXRxc3Nxb21ua21tc3JucG9sbGtram9tbWttbWloYWNdXWJhXllVU0xH
-RERDRENEQkZERURFQ0RDREJFRUpOVVpibnFyeHZ2dHl2dnVycG9wbGxqY1xWUFFR
-UE5OUlFVU1dhZmZmYF1gZGRjZGJfXl9nbG1tbnJyb25tbm1kYWNiYWJlZmhkX19e
-WlpaXl5hYF9hXmFgY2ppZmRoamptcXBycHNzbWtsbm5xbWhma25ucG9vcG9vcHFy
-dXNyd3p7e3p3e318goWHioWEiIiJi42MiYaNkpeYmpmYm5ycnJubnZmYmpyamp2f
-o6CeoKCopqOioaOlpaWloqWjo6SmqKqnpKSipKWloqKlpaWhoJ6dn5+goqGipJ2h
-oqGio6Slo6Cjp6imp6qtr6+xraWjoqSkoaCfn5yYl5uZmZGFeW5qZGJhYmJjYF9f
-YWBfXWBhYmJiYF9fYGJiYmJmY19eYWNeYGJjYV1cW1xdYGRXX1xhYmBdYF9eX2Fj
-YWFgYGFhYmBhZGVkY2FfXl5fYl9eX2JhX2FjYV5fYWFeXl9gXlxeX19bX19hYmFf
-YWFeXmJfW11bW1tfXlxYVFVYXVpcWl5dX15eXlpZWVpZWlxdXVxZWVtbW1pcXltb
-VllbWVtYWltcWVhZW15bXl1aV1ZZWVpeW1pcXFpaXFdWWlxbWVpbW1paV1hZWlhY
-WVpbWllYWVpbVlZaWVdVVllbWVtbV1paWVdVVVdZWFZYWFZZWFlVV1ZaVlVZWldU
-U1VYV1dYWFlYV1VWWFhUUlZWUlJUVVZXV1lZV1dXV1JVVVZYVllYV1dXU1FUU1JV
-VldVVFVSUVNXYlhYV1RWVFJWVllYV1hYV1hbV1lYV1NUWFlWVlZYVlhYWFZVVldW
-VlRSVFdYV1VWWVlXVlZWU1ZYV1RUVlRXVFNRUVNZWl5bWVhXWFhYWFdaW1pbW1tc
-WVhZV1hWVV9ZWFhZWFpcW1tbXFtcW1tcW1xZW1tfXlxcXFxZWVhZWVhcXFtcXlxe
-W1dYWlpaWFlbWlhVV1lZW1tcW15dXV1YXV5dXFtZWVhbXVlbW1pZWVhaWFxZVltb
-XF1aV1tZXFlbYGBcWVpZWVtbW11bXFxeXV9dYF5eXl5eXVxeXVxeXF5eYF9fXl5f
-YF9dXmBgYGJjZWVkZGZmZmpoaGhoaGVlY2JhX19gYFxcXV5eXltZWV9iYWVzrsnT
-3ODk5ujq6uzqeHh3eHl3fHp2dnl2c3F1dXZ2dnd0cnp5dHN0eHh4d3Z4dnR1enl4
-eHd3cnNzdHV1eHRzdXd4fH18ent2d3l3dnR4eXl6eXt7eXh4eHh4e3p5e3l4dXZ4
-eHl4e3x7e3x7enl7e3t4eHp8f317ent5dnZ4eXl9f4J/e3p8eXh4eHh1dXV1cHBx
-cnJ1dXFvcG9yc3Fub3Fzc3BvcnJ0dHN2c3J1dnh2dHRzdnh5dHR2dnx8eXl3dnh2
-d3d1dnd6eXh5dXZ0dXR1c3V0eXd3c3J0dnt9dXJzcnNzcXNzcnBwcHFxb3JvcHJw
-cXJwbnJ0dHJwc3Zyc3R2dXV0c3N2dXd2dXJ1dHJubnByb3BubW5wcXFwbm1ubWtq
-bW5sbW1ramlpa2ZhX11gYl9dW1pWTktHSEVCQkJCQ0RFRkJBPkBBQkJBQ0VJUlxj
-aGxxcXJ2dXZ5c3RycW1qaWRiYFhRTkxLTElKSktQV1xgY2RgWlteYWRiYmBgW2Bn
-am5vcG9tb25nY2BfYmJoZmpsZ15cW1pbW1xeYWBjZGFjYWJjY2VkZmVnbHBvbGty
-cHJvcHFyb25rbW1nbGxpa21tbXJycXJ0c3R2dnh4eHh0eH2Di4aLiIWHiIaJioeK
-iYuQk5CSl5eZl5eYmJucmpSXmJ2cnZ2jop+ioqipqKOlpainqqqlpKimpqepp6il
-pKKhoJ+gpKGioKKioqKkpqSipKOeoKamp6akpaOlqK+xrKaqqayqqqqhmKSlo6ai
-n6OgnpuZmZqVkIR3cGpnZGFiZWFiX2FiYF9dX15hYWlfXl5dX15eYWBfX19fYGFh
-X11bW1pcWl1aWVRUWl9fYGBiYmFjYmFhYV9hYGJkYlxgY2RjYmFgYWNhX19gXmJk
-Y2FhYF1dXWBjXl1eXFxiYmFfYmJeXGBfXV5dXlxbW2BdYFxbX15cWldZXF5fXVxd
-XVxbWltbXFxcW1paXFlaW1pYV1lcXl1aV1hZWFhXWFhZWFdYXVlaXl1dW15bXFpc
-W1pZWVpbVlVWV1haW1pXWltdXFxYV1ZZXF1aWllWWVxZWVZXWVdXWVhZWFhZV1pZ
-WVlaV1ZZWFdVVVlXVlZWVFJUV1dYVlJTVFNVVlhVVVlXVlZXV1RWWVpbWldWWFtb
-XldXWFdbWFhXWFhXV1ZUV1RUU1JVVVRXVVRWVlZTWVVTV1ZWVVRVVFNUVVZWV1ZY
-VlhbW1dYWFVWVVZXVldXWFhZWldWVFdYWFhWVFRWVlZXWFdXUlVVV1dTVVZYWFpX
-WFdVVVdYWFVVVlhYWFZSU1haVVRVVVZZV1dZW1hcW1ZZV1ZXW1lXWlpaXF5gXlpc
-XFtcW1tbXF1bWlxeXFlXWVtdXlxbWlxbWVhYWlhZWVlcWVlWW1paXFlhW1xdW1da
-XVxdWltaW1tbXV9dXlpbW1laV1ZWWF1dXF1bW1tbW1xbXFtaXVxaWVxbWltcYWFg
-XFxeXV5fX19dXl1bWl1dXl9eXmBgXV1eYWFfYWVoY2ZnZmZlZWVkYWdoZGZnZ2Rj
-Y2NjYF9eXl5dX1xcXF1dW1paX3KsyNLb3+Pm6Orr6+p5e3d3eHh1eHZ2eHt4dnRy
-dnZ1dHRzdHJ0dHR2enp6eHRyc3Jzdnp2eHZzcnR1dnd2dnd3d3d7e3x3d3h1d3l4
-eHh4eXl7e3x9fXx5enl8fXt8fn6Ae3h5e3t6fH16e3x4eXp6e3t5enx6fXx7enh4
-d3d3dnp9e3x9gnt3fX55e3h3cnJxcnVyc3V1cXFzc3BxcXBvcHJycnJwdXZyc3N0
-dXV2d3l1dHl7dnN0dnl4ent5eHV2d3J0dHJ0eHZ1dXZ4d3Z4d3V2dXNzdHZzdXNy
-dHVycnN0dnVzcXFyb29ybm5vbW5wb29ubm5ucXBvb29xc3VzdHRwcXNydXR3enp2
-dXVzb29xc3FvbHBvb3BydG5sbG1ubm5ubm1ramppa2lnZF9eYGBfYWNeWFVQTU5L
-RkRGRkdFR0ZGREJAPj1APztBRERJUFZdY2drb3J4dnN0eHRwbmtoZmhiXFdRSEVG
-R0dDSUxVWV9iY1lTWF1iZWZmYVtbX2dra21vcm9xbGZiX19iaWdnY2ZkYFxbWVte
-XWBjYWBhX2RiYWFgY2RoaWtrbm5sbG1vb25va2xsaGlsZmZsa2hobWxtcWxtcnFx
-dXR3eHl7foCAhIiLiYaGhYaIh4eJi4uLjI2Ojo6TmJeWlZibmpubnp+dnaCfnZ+g
-pqisrqmmqK2qrKyrpqKjoqOmpZ+foKCin56bmZ2hpKOhpKGhoaamo6ChoZ6epKep
-p6WkqamqqKein56Wmp6io6Olqaimo6CgpKSfnpeUk5SQhHltbGFkY2JkX15fXWNi
-YF1eX2BhYmFhYmNgX2FgXF9hY2JkYV5gYFtdWllZWVpQT2BiXl1dXF9kY2VjYFxg
-YF5gY2BeZWFkYFtgZmdmY2ZiZGBiYl5eYV9gYF9kYWFeW1tcXFteYmFgYmNfX15e
-W1xdXF9aWV5bWl1bXF9eXFpaXGBeXF1aWFtdW11bXVxaVllbXWBaWFdaWFpcWltc
-WlZbWVhVVlpaV1haWVpXWFxdW1tcW1tcXltZWVtaWFZXXFhYWFtYWVtdW1xbV1dZ
-WVlXWVpZWFpZWFhYWldYWlxbWldVV1lbWlhYV1hXWFZVVVhVVlZUVFRXVldZXFdV
-VFNVWFZaWFhWVlZXV1VWWFlXVlZVWVhWV1lUVVVYV1dYV1dWWFVSVFVVXVZXV1lW
-V1lTVFJTVFZZVVVVV1dZVlZVVVVWVFZXVVZXV1hZV1JWVFVXWVlZWldXVVNXV1hY
-WVlVVlRVU1NTVFJVVVRYVFRXVVRVWVhWVlRWWlVWVlZYV1lXWV1ZWl1aWVdXVlVW
-VVhWVllbWVZYVldaWlpbW1xcXFtdW1dZWVpaWVpcXlxdXVxcWVhZWFlaWltdXlxZ
-WVZXW1paXFtbXFpaW1pbWlxdXFxdY2BeXl1bXFxaXFxeXlxaWVZXWFdXWVlYV1pb
-W1xbW1lbWVpbXVxcX1xaWl1eXl5dXF1cXFxcXF1eXF5eYGBgWltdXltbXF5eXWFi
-ZGNiYmRlZGRkZGVkZWVkYmNoZWVoZmRiY2JhYl9fYF5dYF9fX1xZW15hcq7G0trf
-4+Xn6enr63d1dXV2eXh2eXl5enx3dHV1d3l2dnRzc3JycnJ3eHZ1eHd1cm9yc3V4
-dnV1dHZ2dHV0dXV3eXx6e3p3dnl2dnR2dXV0dXR4eXp5gH99f3t+e3x7fn98fHt9
-f3x+fHl5e3p6e3p6eXh5fH57e316end3en14e3x7eHh5dnN0d3d6fXl2cXJzdXZ1
-dHd4dHFxb3BxcnFvcHR0dnV1cnNzdnl0eHd1dnd2enl5ent6eHp3d3Z1c3V1c3J0
-dnd4dXR3d3Z4eXZ2dXd3d3NvdHVzcXFvcXVyb3F0cXNxc3V0cXFzbm5ydHBxbm1u
-cnRvbW1xcHNxcnFwcHJwcHB1cnR0dnl6dnRxcHB0cHBtbm5tbm1ubW5uamptbGpr
-bmxpaGttb2xmX1laYWJkZF5XVFFNTkpJS0lJSkpFRElIR0FAQT8+RUJAQ0NESlJY
-X2dpbXFucHV2eXFubGdpZWZhXFFLRkZGRkhHT1FYXV5cWFZVW2BkaWZhXV5jZ2hs
-b29vbmxqZ2NiYGNmZWFiZmJgXmJlYV9hYmVjYWFfY2BhY2RjYmVqZmJmampxbW5s
-bGpraWpqamhqa2twbW1taW1vcXRwcHN1eHd6foCGiIOAgYSGhIaHhoyNiIiMj5GS
-kI+SjpCVlJWZmZudnJqenZ2eoJueoqOkp6mnqqenq6+ur62qqKSkpqemqKKioqOj
-oZ+gnaCipKWnpaCio6GhoqOhpKGhpqWjpqioqKSfmZubmJOQnKSjoqipqaSjnqCe
-oZ6dmZaTkoyDem1qY2JlZGBhX2BeX19dXmFgXmBjYGFiYGFhXl5fXmNiX2JhX2Bf
-XlxnX11cX1lVYGFfW1lgX2FiYGJjX2BkZF9gY2NkYmFfYWBmZWRmYmNgYl9eXF1e
-X2JjZ2RhX15fXV1fX19cXGBeXF1eXlteXV1dW2FcWlxeXmBfYWNhYVxbXF5gXFpZ
-WFleXFxZWllcW1teW1pbW1xdXVtXW1hXW1pZWlxeWltZWVxbWVlYWFdYWFtbWlpZ
-W1tbWl1bWFZbXFhVVVhXXFtZWVhWWFlbWlxcXFxdXV9dWllbW1tYWllYWldXV1lZ
-WFlXWlpZV1xYV1laWVhXVVhZVldYVFZXVlVXWFZVVFlYVlZWVVVZWFlXV1hWVVlX
-VldXV1hWWFdWVlhVV1VTVFRVWFpXV1VWVVRWUlNVV1VWVVhcWFlWV1NTU1NUVVVV
-VVRWVlhXV1lVVlhZWFhXVVRTU1VXVldYWFlWVFZYWVlZWVhZV1VVVVZWVlVYWlhX
-WVdXVVZaWVhaWVpYWllXVVVbWlpVVltaXVtcXVlZXV1eXFdaXFhbWVxaWFdcWFdZ
-W1lbWlpbWVlcWVtbWFpbWltcXVhbXF1cXFpbW11fXV1eXFpYWVhdXVpbWl1cW11a
-WVtbWVxcX15hXVxaV1ZaWVhYWVtbWllcW1tbWltZWVtcWldaXV1dXF1cXVxbW15e
-XFpcXVxeXl5bW1taXFpdXVxfYGNhXWBjZmViYGJhYWFiZWRjY2RlY2NjY2NjZGJh
-Y2JgYWBgYWFhYmFeXVpfX1twr8XQ2d/h5ebo6evrdHR1dXZ3eXp3dnl5dXZ5eXZ1
-dXd3dnR1cHFwb3Fzd3p6eHJzc3JzdHR0dnV0dHZzdXR0c3R3d3l6enl3d3l4end2
-d3V2dHV6dnp5e359eXp7fH1/fXt6eXt8enl3dnl7e318fXt9end6enp9f3x7eXl6
-eHd3eHp5eXh2dXNzeXl2dnRzcnV1c3RzdHVzcHF2c3VxcnBxc3J2dHN2eHN5eHl2
-d3V3eHl5eXt5d3Z0d3d1dnt8e3h3d3h3eHd0dHV1dnd5eHd2dHN0dW9xdHZwcHNx
-c3JxcnNzcXR1c3Nxb3Fyc3R1c3FucHR0dW5udXZ1cnFwbmxvc3NwcHBwcnd4dHJt
-b3R0cXJxcW5sbm9wb3BvbWhpamxubmtsa2dtbGxra2tiXF1lZmVhXldSUlBNTEpM
-RkZKTExHR0hJSUJAP0FBPUJDQURHSE9aY2ZlZWhucHZ5dm9pZ2VnaWZfVlJLRUlK
-TUxOUVNaV1JTWFpXWl9iZWlnZWZqaWhqbW9sZ2NkZGJeYWJcXF9kY19dY2dkYF9h
-Y2JgYV5gYWJgZGNhYWNgX19lZmlsaWZqbWxrbGhqb3Bsa3BybG5raW1xc3JzdXJ3
-eHh8f4OEhIN+gIOCg4eJiouMh4qQkJCRlpmVlZGTk5ibnZqXl5ucnJ2dnZ+enqGk
-paSoqKqoqq+sra2sqKKoqqempKKlpqaio6Sgnp+goqWnpKGfoKOopKCjo6Glpaur
-p6KjoJ2VkJibkoqYoaKio6GioaKio6Kin5+cl5OSioN4b2hlYmNgZGJhYV9gXV9h
-YV5eX2NiYWBkZF9fYWFiXltfYVxjYF1dXF1cYF1fYFteYmBhZWBeYV5iYGBiXmFi
-YGBfXmBgX2FjYmFkX15dYGBeXFpdXlxgX15hYl1bXl9gYGRiYV5eXVxbXl1eXl9d
-XltaW2BbXF5eY2JeXmBgX15dXFtbW1xbV1paV1lcXVlaWVZUWFlYWVhaWl1cWlla
-WFdZW1tdXVtdXVxaV1taWVpZWVlaWllYWVhZWFtcWVlZWFhWWVhbW1lXVldaXFxc
-W1pcW1lZWlpbW1lXWFpZWFtZWllXWFdVWV5cV1ZaWlpYWllYWVlcW1dbW1hcV1hY
-VVVVVVRWVFRXVVRWUlVXVVtYV1ZXWldVWFZTVFVUVVpYWFtcWVVTUlJTVlZUVldW
-WFlZVlRVU1NYXVRYVFhYVVRUU1VVV1VTVVdaWVhZW1lWVVhXVlVWVVdXWFhVV1hY
-VVVWVVZXXllXWVhXVVRVVlZWVVFRVlZUVVZXVVhYWVpYWVlaW1lZV1ZYWFZYXVxZ
-V1lWV1dZXmBbXFlaV1ZTWVpZWVhXWVdaWVpaWVlYW1laXFlaWVxaWFtZXFtcXVpe
-Wl1eX19gXFxeXVtbWVpdWlpaXF1ZW19iXFlXV1hWWlxaWVldW1lcW1ZYWFpZW1hX
-WFZYWFxdXF5aXF5eYF5dXVxcW1pbXVtaXFpaXFxcXV1dW1xaWVxfYl1fYWFeX19c
-X2BfYWVjY2dkZWVnZGZmZGVmZWNiY2RiYmFkY19fYmNeX2BfZF9fYXKtxNDa3uPl
-5unp6+p3eHR2dHR3d3l5dHV3d3d2dHV1d3Z3c3J0dXRxcnFwdXV2eHRzc3R2eHZ2
-eHd3d3Z5d3d3dXN2d3l5enp2eXx6eXh5e3p8eHl9eXp9fXx+enh4fIF9fXl5eXp2
-dXN0eHp6enx+fn59e3p5eoB/eXp6eXZ2dHV1d3l5eHp5eHZzdXh4dXRzcXZ1cHBw
-cXN0cnFxdHZ1cXFvcHBzeXZ2dXV2dXZ2dXV4eXx5enp2dnNydnh0dnp5ent5d3d1
-dnNydXZ0dXV1c3V2dXN3eHR1dXd3dXR1dXNyc3VycXNyc3N0dXVzc3BycnJzcnNw
-cHFzcnRzcnBvcWxvcnFzc3RzdHVzdXJ0c3FxcXNzdG9tbHBubnBubmxrbGxra2pr
-aWprbW1oZWFiYWNkYl5aV1ROSktIR0lOTklKTExKTEtHQ0RBPz4+QkFAO0NGT1Vc
-XV5gZ2ZqcXd1bmpqaGlpaGBcVlJPSEhLTU9NUVNTT1NaV1RVWl9la2doZGZmaGtu
-a2ZhZGdjYmNkYF9fYGJhXl9eYF5dXmJjZWFhYV5fYGRlZWFiZWJmaGZlZ2dmaGlt
-cHBrbWtqamppaWtta2xtc3NwbG9yc3d6fXt8f4CFg4B9gIOEhnx/h4iDhIeMkpCS
-l5mXlZKSmZqcmpqcnJyenJ2foqCanqGkpqOlpqWjpqenqaipo6CkpqelpKaoqaal
-paKfn5+gpqejoqKhp6elqqKgo6SpqqiioJ+dlpCRlpqZmJednJqdpKajoqampaSl
-o6KblZGIgHRtbGpkYl9gY2RjYV1hYWBdXV9fYF9hYWNhY2JiYVtbX15fYGFiX15i
-Y2JgYGFhY19hYWRlYWBkYF5eX2JeYGNjYWJhYmRmYWNjZF9dYWBgYWBeXVxdXVxf
-Xl1dX2BeXl9dX11fX15gYWBdW1xdXF1dXF9aWlxbW15fXl1cWl9fXlpXWFlZXF1d
-WVhZWlxeXFpWWFdYWlpcW1paW1hWWVpaWFhZVldaXFxbXFpdWlhbWVhZV1haWldX
-WVdbWVpZV1hZWldVWFdXV1ZYWVlXWVpaWFtcWVlbXFpaWllbWVtYWVhbWlpXVlRT
-WFZWU1dYW1taWFpYWVhXWFdYW1tXWVRTU1VZWVNTVVRZWlZTVFdXV1hYWFpaV1dX
-VlVUV1VYVlZXV1dYWlhaVlJWVlZXV1hVVlZZWFxXVFNZV1dTVVZXVVNZWVdVVlNT
-VlhaWllYWFdYVlNUVFRVWFhYV1dYV1dXXFpZWlpUU1ZZWFdXVVZVV1hWVFNSUlRX
-WFZYWFhaWFhaV1lYWVlYWFZVV1laWFZZWFdWVlxbXF1dXFpaWFdZW1pbWVdYWlpa
-W1lfWlxbWlpaX15aWVdZWVhaXVpcW1xdXWFeW1pWVldbWlldW1xcXV9eWFhbW1ta
-WVpYV1tbWFpYW1laWllcXFtaWFZWVlZYWltYVldZXF9cXl5dYV9cXFxeXV1cXFpa
-WFthX1xdXl5bWl1eWlxeXlxfYmNgXmBhYV9iY2ZjZWNjY2RmZWVlZ2RkZGRlYmJh
-YmBgX15eXVxeXV1eXWBibKjG0trg4+To6err63d3d3d2c3Z4eXl4dHV3d3V5dnd4
-eHh4d3d5eHRvcHBxdnd2eHZ5eHh1dHZ2d3h4dnh2d3d5dnV2enl2d3Z3enl3e318
-fHx4e3t6fn57e316eXl6enx9fHp3eHV3eX19e3t8fHt+fHx7enx+fXx8fHp8e3t7
-eHV3eHV5eHd9fnl3d3p4eHd3c3Jxb292dHJwcXBvcHFvcG9wcHNzdHR1dXV3eXh2
-cnZ3dnl3d3l1dXR2eHR0c3N4eHh0dXZ0dnh4d3VzdHFzdXd6d3Z4dHN0d3h3dXVz
-dnNvcHFuc3RycnBucXJycm9xdXNvcG9ucHNybnFxdHRwcHFycnNzc3FucHR2dnN0
-cXFvcHFxbm1paWxtb21tbm5sa21ramhsa2lqa2ljZGNjZWJjXllVUk1LTExISUxP
-TE1RUU9PT0tHRUdDREJDQT5DP0lKT1lfYWBhZWhtcHBtampraWdoY19ZVlJKSElQ
-Tk5RUE5WVlVRVFdbYWRnZWhpaGdpbG1rZl5eY2Zrb2hfYGBoaGJfXl9dW1tfYmNj
-YmNlYl5dY15bX2FiZGRmZGZiZ2pmZGZucW1sbWtnaW1oZ2lra2ltc3FzcnN1d3d7
-enx+gISCe3d7gISChYaIhYKEjI6Pj5CRk5WUk5KQlZeam5ibnp6bmp2foaGhoJ+i
-pKSjoaakoZ6doaWmpKOjqqqnqKqtqqqipKOkn5+go6mhpaWmpqSoqKempaippaWl
-oJyWkJOYnp+hoqCioaSmp6ShoKWipaakoaGakot9cWlraWdiY2VjYWFhYF5dXmBh
-Xl9dXF1eXV5hYGBgYGBgYWBeXWFfXl5gYl5gXmBgX11cYF1cXWBhYGBhZWhlYmNf
-XFxhY2BfYWFgX2BhYmFiYFxeX15eX19eXlxfXWFgYl5cW1lbW19gXl5gXGNcXl9e
-XlxdV1tgYF1dYmFfXl5eXltXWVxcXV1dWVxaW1xeWlpcXVpYWVtbWlpZWFdYWVlZ
-WFZXWFhZWlZUWlxgYF9bWFpYWlhZWltYXlhYVlVWWVpbWFZVVFJQVFtcWltZW1pc
-WFheWVlXWFlWV1daW1pWVVdXWFlYWlpYV1VcV1dWVlhXVVZWWFRWWVdYWVpVVFRU
-VlVVVlVTVVhXV1pVVVdXWVxbXFlWV1NYWFdZVlZXWFdWWVhZXFpaV1dYV1hZWVZW
-WltXXlJXV1dXV1dXWFdUVVdVVVJRWFVXWFlZWFhXV1VUUlRXW1lUV1ZXVlRWVVVV
-VlZWVllXV1VZWFdUVldVWVZYVVJYV1ZYVlZVWllbXVlWV1paWVlYW19fXFlZWFda
-WVpZW1dYXV5bVllaXlxZWFxfXF1jW1pbX2teX11bWltdWlpaWltaWlxdW1lZWFpe
-XVxdW15aWVdaWlxeXFpaWlxdXFtcXFtaWltXWlpYWlpZW1lXV1daWltXV1RWW1dW
-V1daWlpdW11bWVpaW1lcXltcXVlbWllaW11dW1lcXGFjYF9dXVpZXGBkZGBgY2Ji
-YWBlZmRkZmZjZWdnZmdlZWNgY2RgYmBgYF9hYFtdXl9fYV5eX2BuqMbR2d/i5efo
-6evqd3Z3eXd2dXl4eHh3dHR0d3t6dXZ0dnZ3d3d4dnNycnBwc3R2dXd4dnd4d3Z1
-dnd3eHh9ent4dXV6eXl4d3Z1dnd6fH99fHt5enp8fX58e358fHp4e3p6fHt1eHl6
-fH16e357fH19enp6enx8fH18eXx7eXd4d3l5eHx5dHd8enh1d3h3c3Fwc3N0cnJx
-cXFxcG1tbXJwcnFzc3Byd3l4dXZ4e3h3dHd5d3d7eHl4c3JxdnZzdXp5d3Z7enl4
-enp4dXRzdHZ3dnl3dnNzcnR0dHZ1d3N0d3Nzc3h1c3Fub3RycnRxcnN1dXNxdHd1
-cHBubm5xcnJybnBwcW9xcG1ub3FwcnJzcHFxb3BubGppbG5sbm1obG1rbW5wa2xt
-a2xraGRfYWJlaWdiW1ZPT0tLSUtLS0pNU1RXVFJRUVFKRkVGQz49QUJBQU1NUl1h
-YWNkY2ZmbW1saGdmZWNjX1xbVE1QT0xNU1NUVFhUVFNPVV1gYWJgY2htbGlrbmph
-XFxfZm1tZV1bXmdkXl1gXFxZXF1fX2BgZGRiYWNhYGBdX2dmZWdpZWpsbGZkZmtn
-Y2VqaWVpa25ubm5ubGxxc29xcHZ4enh5foCDgn54d32AgICEh4iHhYaHjo6QkJOV
-kpmYmJmZmZybnJ2iop6goaGgn5+foKKhoqKlo6OfoqWipaeopqWoqaeoqamoqKWk
-o6ChoJ6jpaWko6Wnp6eqqamnpaamq6iim5mWl5yko6SlpamlpqakpKSio6ekoqGf
-oJ2TiXxwaWRjYGJeYl9iYWRhXF9eZGBhYF9gX15hYmBgX2JjXV9iYFteX19fX19g
-WFxgXmNgXlxbXF5gX19eX11gYWJfYWBiX15iY2VhXV9cXWBjYmNlYmNgXV5fYF9e
-Xl1eXVxgX2BeXGBdXl9fX15kYWFeXVtbXV9eX19eXFxeYGBgYF9dWlldXFtaYGBc
-WltZWlxdX11aWFtZXFxfXVteWldZWVdaWllaV1dWV1dXV1haWVlZV1lYWVhZVlhW
-WVlXWVpbXFpYWVlaV1RTVVlZW1taXF5cXFlYWltYWFdWU1ZYVVpYV1lYV1hbWVlX
-VVZVV1ZWVVRWV1dYWllXVlVXWlhWVlRRUVRVVFlYWFdaWFlXVlZTWFtaW1pXVldY
-XVhWVFVYV1lZVVdWV1RUWFtZWFhWWFdZVlVZVVNWV1hXV1ZWVVZWVVNSVFZWVVZV
-WlpXVlZWVFRWVlNYWVhXVlRSVVRXWFVVV1lZWFdWVlZWWFdWVlVWV1VWWVhbWFhW
-WVpZWVxbV1pZW1xdW1paXFxaWlhXWVZaXlhZXldZXFtbXFxaWlxcWlxfXFxdXFxY
-WV1eXVtdXV5cXVpbWFdYVlpdW1pbW1tbXF1cYF1aWFhWWVpaW1pZV1tbWllaWltZ
-XF1YVlZYWVxcWlpZWV5XWlxZWFpaWFdXVllbWFlaWFlbV1haW1laWVpaW1paWVpb
-WlpcXlxdXl1dXFtbXF5eXl5gYWBiY2JkX2JiYmNla2plZmlnY2RjYmVkYmJhYWFg
-YGFfYV9dXF5eYmFfYm2lxtHZ3+Pl5+jp6ut4eXh2cnZ2dndzdHV2dnZ4dHJvcXF3
-dXZ3eXl1d3J3dXZzc3Nzdnh3ent4dnN1dnl6enp5e3p4e3p6enp4dXd3eXl6d3d5
-fHt7e318fXx9fX18e319eHh5e3l5enx6enx8e3p6ent6e3p6eHd4eHl6en56enp7
-dnh5enx8eHd4d3Rydnh1dnByc3R1cXJzcnRwcHBycnFxcXJydXZ2dnV0eHZ2eHl3
-eHl5d3V5eXh3dHN1dXV5dHR2eHZ3dnl2c3V1d3l4d3l3eHZ1c3NzdnJ0d3d3eHVz
-dXR0dHNxc3VzdHJycXRybnBxcHJycnVycG9sbGxsb3Bxc3Bub3BwcXJxcm9vcnNy
-c3Jtam1wbXBubm5tbWxscWpsbG5uam1raWpkYF5gYmdlaGZeWFdPTklISk9TVlhW
-VldVUlVVU09KRkNBPkJDRERCQ0tSU1ldXWBiY2VnbGpoaGhlamNjYFxZU09MTE1L
-TE1SVlZVWFhVV11dWlteYmhtbW1qZGZiYmRna2xlYGBhX2BfXlxZW1taW1tdW15f
-X2JjZWFgZWReZ2VpZ2ZnaGdqbWtkZmJiYmVsamtpaW1vb2Nra3Fwbm1zd3Z5e3t5
-fICAgIGEhoWDhYiJiIaChIqOk5OQj5KWl5aanJeWmZqfoKCioaKeoaGcnqCjop6d
-oqOnqKSqqaempaSnp6WnqqempaeqqKmmpaSfn6SlpKCen6Kkpamop6akp6eoqKOh
-nZqhoaSgpKWmp6amp6Wko6OlpqSinp6em5KKhHRqZmRhZGNiYF9jZWNfXl9gX15f
-XWFgYGBeW2FhXl9gX11iYV5hX1xfXV9gXl9dXV5eXFtaXF9dXF5fYmFgX15fYmBm
-Y19cXV5dXmBhY2NhY2RkYmBhYmJgYGBgXWFcXmFiYF9eX19gXl5fX2RiYF9eXl1g
-XV1dW1tZXF5cXV1eXl1fX1tcWFpdWllaXFtbWltdWVpZWVlYW1tZWFhaWVpXV1hb
-WlhZWVdZW1hXWVxaWFlbWllYWVtaWFhZVlZXVlZXWFtaWVpaV1RTVlpYWVlZW1dV
-WFtZWVhXVFBSU1dXVldYWVdXWVdVVlZVWFZXV1VTVVdYWFdVVVZWWFVVV1ZUUlNV
-VFRVWFZXWFlWV1RTV1hWVVhYWFlYV1hbXFhZWVVZWFZXVVhVV1hXVlhZWVZVVldV
-VldXVFNXVVVUVFVUV1dXV1ZXVldVVVVUVFRVUldYWVZWV1JYWlpXVlVZWVhWVlhb
-WFlXVFhXVlRXWllWVVRVVldZVFhXWldaWVlaXVtYWVpXWllXWlhaWVlZV1hZWFhZ
-W1ZbX1hZWl5cXFtbXVxaXWBeX1tbXlxbWlxcWllcX15fXllYV1ZYWlxaWFlbWltb
-WltcX11eX2FdW1lcXFlcXV5aXFlcW1lZWFRWWlhXWVldXFlXWV1YWFlZWVxbXVta
-WFdcWVhXV1xdW1laV1ZWV1dXWlteW1haXF9cYV9eXVteXVtfXmBgX19dYWNiY2Jf
-YWZjY2RnZWRkZWRlZGRlYmVkY2NhYV9fXF9dYF5dXF5dXV1gcKbF0drf4+Xn6eno
-63d4d3h5eXl7enl0cXN5enVzdHJxcXRzdHV2d3Z1dHV2dnN3eHd1dHZ3d3V0c3d1
-dnp6enp2d3d3dnd4eXp5eHZ4eHp5eXp7eX59e3x8ent9fXp8e316eXp6enuAf356
-fX17e3x6enp6e3x8d3V5eXt7e3p9e3t9fXx4fH98eXd3d3l4eHd2dXR0c3RvcHJx
-cnFyc3Nzc3NvdHR1dXR2d3h4dnV0cnR5enh3dXd5eXd6e3VzeHV1d3d3eHp3d3V1
-cXN2d3Z5dnd2eHV0dnVzc3V0eHp2dnNxcnFxcXF1dnR0cnJydHJxcG9wb3JzdHZ1
-cW9ubGtra29ucXJvcHBtcHRwc3R2cnFxcHBsbm1ucHFvbW5wcG5ta2xraGhqbWpo
-amZjX2BgZGZnZ2FbUVBLSUhJTFBSU1daWVpcWVtYUUtFQkJCQUJBQkNFR09VVlxf
-XV9fYmVpaWVlZ2loZWlnYFtVUkxMS01MUE9TUVRVVlZVVltbWV1hZ21uamdlYWJj
-ZGhtaWRiYmBbWV1ZWFlcXFxcWl5dXWJlZGNjYmNoZ2NlZ2hqaGJlZWhoamdkYmRm
-ZWVobGxubnBuaGhscXNwcnR4dXR4fX17fYGBgoaGhouFhYqNiIaDjZGOj4yJi46R
-lJiZm5uUl5ublpiVmJmboJ+enp6foaCfoqOko6isp5+eoqSmpKKmrKqkpqekp6Sk
-oJ2hpKOjoaClo6KjqKOjpKSloqKioqKgpKSjo6Oko6Kmq6qrpqeioaWloZ2bnJqX
-k46DdmxoZmBeYmJjZGJlYmVkYF1bXVpcXmJfX19eXmFeYF9cYWFhYGBfX15gX11e
-XVpcXV1dXWFgXV5eYGJkYmBfYV9eXmFgYF9dXlxiZGBgX2BfY2VnY2JiY15gYWBf
-Y11fXmBdXWBeXV1bXV9fXlxhXl1cW1xeYV5cX15eX19eXmBeXl5eXF1eXFdWWVxc
-XVlZXFtaWVlbW1lYWFtbW1hbV1pdWlhYXFhaW1tbWFlbWVlcWVRXV1haXFtZWFZX
-VllbWFRTUlZXXllaWVlaWFlcW1pYV1lZWVZXVllYXVlWUlNWVVVUWFlZWltYV1hV
-V1dWV1pXV1lXVVdYWFdaV1ZYVFdZVFRVVVdWVldXV1hTXlhUU1JUV1dYWlpaWlhY
-V1ZTU1RVV1VVWVtdXFdWV1ZTVlZVV1ZWVVZWUlZWU1RSU1VVWFVXWVhWVldXVVJR
-UVVVWFZXVVhXWFhaWVZUVFRVVVdZWFhXVVNTVVdXWlhZWFhVVlhbWFZXVFpaWFdZ
-V1hbWlldWllUVllbWVhYWltbWlpXVllaXFpaWFhZWlpYWVhZWltbXV1dX1xcXVpe
-YFxbXFxcXV1eWVhVV1daVlpZWVdYWVxdWVhaXV1eYGBeXlpaXl9dXWBbXlpaW1dW
-VldZXFxZWlpaWFpXWFhXW1xZWltaXFldW1dZW1xcW1paWVhaWVVYWlxaWVxZWl1f
-X19eXFxdW1paXGFgX1xeXmBfX11dYGVmY2RlZGRiYmBjYmRiZGZlYmNiY2JiYV5e
-XmBgYGBeX15eWl5xq8bS2uDi5ejp6uvrd3V4eXl7e3x3dXZzc3Z5eX15eXd3dHRy
-c3Z5e3x3dHZ1dnh6eXh2eHl5enV1c3JzeXl6eXZ5eX57enp2dnd5d3d4e39+enl2
-eXh5eHp7fXt+e3p5enl6enp7enx7e3t4eHd5enl6enp7e317eXh6d3p4enx+f3p2
-d3Z3ent4eHl3dnd3dXZ3dnRyc3FzdHJxcnV1c3R0cXR1dHNycW9zeHl3dXN3eHV0
-d3h4c3R1d3h5d3t2eHVzdnd1dXV2dHd0c3Z4dHR1dnV2eXp3dHN1dHl1dHR0cnNz
-cnRyc3R0c3Rxc3JzcnFvcHFudHRycnBwbm9xcHJwcHR1cm9wcnJzb25wb3B1cW5q
-b3BwcXFycWxtcXBwb2tsa2xoampsamlpZmBdWl5kZ2RlYmBVT0xJS05MTlBUVlha
-W19hXlZTTkxHSERFRURCQEJGTE9VXF9eXV9kYmJnaWhpamZkZmRjXlhWVk5NTU5P
-UFRSU1ZWVlRYWlpZW15gYmlsbGxuZ2FjaGtkYmFeXFxfX11cV1haX2BdWl9gYmNj
-YF1hYWVlYF1iZ2prZWRlZ2ZraWhmZWltaWttbmxubm1qbGpwcW9xd3p4c3V4fXx7
-gICBhImBhYOEhYeLioyPjoqPmJWQkZOTl5ufm5ednp2YmJmbnqOioKKloKWioqGh
-oqSlo6KmoqGho6CeoKKoqqSkpKGho6WjoqSkp6WlqKelqKWpp6WkpaShoJ6doqSn
-pqSkrKyqqqupq6qopKKiop6gop2bmJiWkYN4bGhpZWJeX2NgYGFgYWBdXV1eXWBf
-X2NhYGFgYGFhY2RgZGBgXF5gX19eYF5bWVdbXVpaX2BfZF5eXWFiXmJiYmFgYGBf
-ZGRnZGJgYV9fYWNhX15iY2NiYlxeYWBeXFxfXl9iXmFeWl5aWVtbW15dWlxdWlxb
-W15eYFtbXF5hYF5eX15gYmBcXlpZV1daWFlXWlxbXV1eWllbXVtaXFtdWFhbV1pZ
-W1laW1laXFtYVllXXFpWVVVXWFpZWltbWVpXUlFWU1RUWFhXWltYWltcWlxaWlhh
-XVxbWVpaWVlVUlVXVldYWllYWFVWV1hZVlZVVllYV1hZXFhXWVlYV1laWFhbWFda
-XFdXVldWVFRTV1VUU1VVVVdWWFlZVllXVVVVVlVVVlVXV1dVV1VVVFRXV1lZWVdW
-VlZWU1VXVVNUVFRWV1RWV1dXWlZWWlZTU1NUVFhcV1dXVldWVlVYVFVVWFdXVlVV
-VFNUVVlaWFpWWFdVV1RUV1dcV1paWFlXV1hXVlhYWlpbXFteWltdW1pZV1lXVltb
-XVlXW1xcXFxcW1tbW1hYW15dWFdbXFpYWltbXl5dXl1ZXFxaW1pZV1lYV1haXV1d
-Xl1ZWlxcXVxeXF1dXllWW1tcWlxcWVpYW1tXWVpbWFVWW1taWFpZWFZZW1xcXVpc
-XlpYWVxdXFtbW1pbXFtdXVpYWFhcXV5eXl5dXWBcX11gZGNeXV5fX2FfYGBkZGBj
-Y2RmZ2JjY2NiYGRlZGJlZGJgX2BgX15fYWNkX2BeXmFfX3GsxtHa3+Pn6Onp6ux0
-dXV2dnhzdnl2dHZ2dHh4fX9+f3t7eHV0dXl6eHZ2dXV2eHp6d3Z2d3d2c3R0dnd4
-dXh5e3h5fXx8enl6d3p5enh5e315eXp3e3x7enl5en18fX19fH98e3t6e316eHd8
-e3h7e3x6eXt6e318e3x5eX58fHp6eHZ2eXl7enl4enl6eXl5dXR2dnV1cnNwc3Fz
-dXR0dHJyc3Z0dXJydnt1dnd4eXp6eXd2dXV2dnd6eXd3dXd2d3Zzd3l1dXV4eHd3
-eXp2dnd0d3VzdHN1dHZ4eHZ0cXJzdnRzc3JzcHNycHJwcHFxb25tbHBwcnR1cXF1
-dXNwc3FzcXFqbm9yc25rbG5vc3NvbnJvcHNzb25vcHBscG9vbm1ubmxrbWpoaWRj
-YV1bXmJlZmdiXlVOSElISk1PUlJVW15eXFpZVlVPTEdBP0RFREVDREBGS1ZaXl5f
-XmFeX2BmZmdoZmdlY2FgXFpaVlJSUFBQUFNTUVNTVlhYV1pZWl1gZGlsbmxoYmBm
-aGdoZF5YWlxgX1xYWV1hYF5dXV5fYGFjYFteYGRfZmVlZ2trY2Nla2hmaGdnaGlr
-amlnZWhsam9ubHN3dXJ3e3R4eXl6fH58gICAh4OCiIqLjY+NjI+MjJCTmJOQlJWZ
-mp2foJ2bnp6gn56ho6OlpqWlpqmpqaOio6impKqspaClpqOkp6Kko6Kjo6Gfo6Sk
-oaGgpKSoq6enqKWioKCjpqimpKemq6qrqamqq66rqq6tqqinpKSgnpqenpqYmZSO
-gnVsZ2NiZGBjY2BeX19fYWBeYV5bX2JdWF9hYV9hYF5iZGRgYF5fXWBfXVxeXl5c
-XV5hYWNfX2BeYV5cXmJlY2FdYF1gYmdmZGRiYmFeX2FhX19fXV9fYGFhZF9bXF5a
-X2BkYV1eXF9dWV1cYVxbW1xeXmFfW1hYXFtbXVtaW1xdXl9dXFteXVtdW1pcWlpW
-V1daWFtcWV1cWVpZW15eWlpWV1lZWVdZXFxaWVlbWFpXWVtaW1hUVVhXWVlZW1ta
-WVRWVlRVVlNVW1paWVhaWVpdWVhbWlpYWFpZWVpYWFhZWVZYWFVVWVhZWFVUVFVV
-WVpYVVRUV1dZWVpYWVpZWVhWVVhaWVhXVlVUVVRUV1VWU1VVWFZVWVlYVVJSVFdY
-WVdYV1VWVVZWV1dWWFdYW1ZWVVVWVlZWV1hVU1ZWUlRWVVZXV1dZVVZVU1VWVFhX
-VlZZVlpXUlhXVFVXVFZZU1NUVlRWWFpeV1hUVVZZWFhXVllYVlRVWFtXV1lXVlZZ
-WVlYWVpZWVdZWVpYWlpcW1xbWlhZWFlYV1tdXV1eXl9dWVpYWlZWWVpZV1ZZWlpc
-W1xcWFpcXVpaWlhaWVlZWltbWVdaW1xZWFpbWVhXWFlaW1tZW1tbWllaXVlXWllX
-VVhXW1tdXVtZV1dXWVlbXFtcXVpbXF1dWVdZWltdX15dXl1bV1pYXl5fXl1cXF9f
-XFxeXF1bXl1eZGNgXl9hYWBgX2BjZGRkZGJiY2JjZGFgYmBgY2FgWWBhYV9gX1te
-YmJfXF1fYWNhd6/H0tvf4+bo6urr6nl5e3l2c3N0dnJzdXh1dHZ5eX59enZ1dXVz
-d3Z2dnd1dXV2enl5d3V2d3h4dnV2eHV1eHZ4d3p5e358fHt5fXt8eXZ4e319fnp5
-fXp2d3V3eHx8fHx9fXt/fHx8fHl6eXx8eHx9gYB9e3t5d35+e3t5e3t4d3p5enV5
-ent9eXt7enl6eHZ5eHV0dnVycW9ycm91dnJxcnNzdXJycnFzdXR2dXV8e3l6eHd2
-dHh8fXp1eHR1dnR4eXx7dXVydXd4dnp5eXZ2dXZ1cXR1c3N0dHZzdHNyc3V0c3Fw
-cXBxb3F1cm9vb2xvcnNxcnFxc3RwcHBycXFxbm5zcm5vdXVzcXJxc3R0cGxvc3Fw
-bW1pbW1ubG5xcGtrbW1wcW5ra2hmY2FeXFxgYWZpZmFdV1FMSktNT1VVVVhbX2Be
-XltbWlRKSEJAQENFRERCQENJUlleXmJiYmFdXF1hZGdnaWljX19hXVhbV1RTUlVQ
-TlBUUlVXU1dXWFdYW15eYWVpa2loZmVmZmZiYGBeX19fW1pYV1laXl9eX15fYGFi
-YFxiZWhpZGdqamdkY2VraWdrbGpra2Vna2hkZmxtb29tcXF0cnh9fXt8fXt8fHx/
-goGNkImLjY+Ojo+FiI6Sk5WYlZOVl52cnZ2fnpudnZ6em52fnaCioaSlqKqmoqKk
-p6Wkpqioqa6rrauop6Spp6WjoKGlpKKfoaKgoqekoqKloJ6eoqSko6WkoKSpqqmo
-qaipr62sra+uqKemoqGfmZqcnZuYko+Bc2plZGNhYV5gYFtfXl5jYV1hYmJfXVxd
-XmBhYGBfZWJhYF5eXlxbXV9cWVtcXF1eXl5dX2BjYWBfX1tdXmNjX19gYWFlZmBf
-YWFiX2BhYV9fX2FgX2BkYF9eXFxcX19dW11cXl5cXVhXXFxeXFlaXF9dXF1dX11Z
-XFxdWVhZWV5dW1xXWF1cXVxbXFtaWllXW1xbWVpdXFxcW1xbXFpZWVlYWFxaXFtc
-W1tZWVpYWVpXV1dXV1paW1xdX11cW1dYVlVVU1NSWFNXWlpXWFhWWFpaWldYV1dX
-VllYVlZTVVtdWFdWVFFUVlhaVldXV1hcXF5YVFVUV1pXVlVXWlhUWVpVVVhYVVhZ
-VVRYVVRVWldXWFVTVFRVV1ZTVVVZVlVZW1lTVVNVVVZYV1ZXV1pYVFVSVFVVVVVV
-VVNWV1ZVVVVWU1RUUVNWVFRWVFdUUlRYVFVXWlhaWVlWVVZXWFVYVlNUVVZZWlta
-XFlWVVdXVllZV1lWV1haWllZWltbW1hXW1dWV1hZWVpVVldWVFdYWlpZXFtaWllY
-WlpaW1pcWltcW1tZW1dUV1paWFheYV5eW1dZWFpaXFtXWVhXV1ZZWl5bWFlYV1xd
-WlhYXFtYWVxdXFtdWllWWV1cXV9cWVlZWFlYVlhaW1laWVVWV1hbWFlYW1tbWVpZ
-WldYXF1aW11eXl5dWVxcXFxdW1xdX1xcXl1dW15gX2BgYVxfX2FeYF9hYmJjYWJi
-YGNkY2RlZWNkZWNkZGNhYWBhX19hYV5eX11cX11dXmNyqsfS2uDk5ufo6OzrdXx6
-eHd3dXh2dXN0dXNyd3R1eHx4dnV3dnd0dXh4eXZ0dXV4e3p3d3d2eXl4enl3eHZ3
-d3d5eHp4e31/e3p2enh6enh4eXt4eHd8ent6eXl4e3t6fX17eXx9fHl7eXp8eHh5
-enp6fHx4enh+dnh8fX19f3t3eHh7bXdzeXl4dXh5f3t3eHh4d3Nvc3J1c3N1dHNz
-cnF3dXRzcnNxcXV0cnR2eHh9fXt4dXV4dXZ3dXR0dnZ2d3Z3eHt4d3Z2dXR0d3Z0
-dXd3dnV0dHFzcXJxcnR2d3Ryc3NycnR0dHFwcXBubXFycXBvcXFxcG9wb29wcHBu
-bWxtbm9wcW9vcnJycXR4cnRyb29xdXNvb2xobW1wbmxsb2tscG1ramlqZmVlY19c
-XV1jZGZjX1xXU1BQT09QUlVYXWJhY2JfWVtZUlJKQ0VCQkE/P0VFRUZOVVtdX19Y
-XFtcXmRjZGRjY2RjYWVgX1xXVlFRUlNSVFNTVVdYWVhXWlldXF5iYmNnaWpnZmhl
-ZGJhXlxbXFtWXFdUV1hXWl5dXltdXmNhXmJnaWZkY2ZpaWRka25ramlqa2tqZGFp
-a2Voa21wb25tbW91d35+fXp8goB6fYKEiI2Rjo+OjYuNjIqIi42SlpSNk5SRlZSX
-mJubnZyanJ+dmJianZyeo6enoqGkp6imoaGjoqevrKerqqmpp6urpqOhpKajoKCf
-oKCio6OcoKOkpaSlpKChoaOipKWjpKSrrKyrrK2rqKirqqekopyZmZ2cnJuXjX9x
-amZoZGFhYWJeX11fYF9gYWFjY2FeXV5gYWFeXl5fYGNfXl1eX11aW19dXF1gYGBe
-XFxcYWNgXmBeYWBhYmFiZWJiYmRjYWJhYF1iYmBeYGFiXl5hX15dXV1bX2BgYV5f
-Xl1bW1tbWFhYWllZXF1cXFhbXl9bXVxcWVtZVlpYXF1bX1xaXF1cXFtYWVhbWlha
-XV5bWV5ZXFdXXllYWVdYVlVbWVtdWlhYVllYWFhZW1VYV1hWWVpbXFpaWlpaWlha
-XFtaWVdaWFZVV1ZWWFVYV1dZWFhVVlpbXFtZWVZWVlZZVlZYWFlVVVhdW1ZWXV1d
-WlxYV1hYV1ZVWVlXWFtXW11YVVZWVllbWlVXWVhWW1dXWVZUUlNTVVZSVVZXV1ZZ
-WVlZWVVSVlhYWVlWVlVWV1hUU1RTUlVWU1FWVVZUWFdXVVVWVFRWV1dVVVlUVFpX
-V1hZWlhXWlpZVlZUUlRXV1VWVVZYWVxaV1lXVVNUUlRYVlhXWVtZWlhZXFhXVVdZ
-W1paW1tdW1hWVlhaWVtbXV1fXltaXWBbXFtbXFtZWFpaWlpcWFhaWllaW1xcW1xa
-WVhYW1tbXFxcW1laWlhaXFtZWllaWltbW1lcW1paWVhZXF5bW1xZWFtbW11dWltd
-XFlYWVhXWFlYWlpZV1dZWVpcXVlbWltbWVpaXV5dXF1aW11eXVlcW1taV1xdWlxb
-W1tdX2FcYGFgX15fYFxdYF5eYWVjYmRkZGFjZmNjYmVlZmNiX2BhYV9eXV5eXVhb
-XVtfXl1eYnenyNPa4eTm5+np6+t2d3Z3dnV6enp2dHN0dXJ4eHZ3d3d2dHd3eHd2
-d3d1eHZ3dnR2enl3eXl5eXx7e3h6d3p4d3Z1d3d4eH59e3l3fXt6eHl4d3h6eXp8
-e3l5e3l7fHt8fXt6eHl5eHl6fH58fXx7e3l6e3h2en18en19e3p8enl7e3d4end7
-end6fHt7eHl6dnV1c3VzdHV2c3NydXN3c3V5dHF2dnN0dHh5eXl7fnt5eXd5dnl0
-dXlxdHVzdnh2eHd2d3p8d3Z3eXd2dXd4dXZ4d3NzdHRzcXJ1d3d4eHNzdHRzcnVz
-cnFvcXJtcnVwcHBycXFubm1ubW5xdHVvbnJxcXJxcG1vb29xdHJzcHFvb2xwcG9x
-cG1na2tqbW5sbmpqaWtpaWdpaGVmZGBdWl5hZGRiXlZQUFJPVFVTWFxfYWNjY2Bi
-YFpTTUpJRUJCQEA/PkFCQ0dRV1haXFxZV1tbYGZjYWJhYWNhYGBhXVpXU1NTVFNT
-VVlaWl1aWlpdX2FhX2JkY2VkZmdoamtoaGNeXFpbVlRUVVVWWFhaWFZXWVtbX2Fh
-ZWZpZmRlZWZmZWZoamtpZmhoZ2ZmamtsbWlobXB0b29wcXZ8fHx+e3x7e36ChISI
-j5GMiomKiomKj4uJio+OjYuLkpKTlJKVlJacnJ2bmpycmJuamaCgop+goKOnqKOf
-oKOlpKmrqainqamlpKWkpKampaGfoJ+fn6GjoqSpqaalpKinoKOjpKipqKeop6is
-rKuppKOkoaOkop2cnZuZmZmZl5iQf3JraWJfZmFiX19hYWJkXmFhYWNlX2RgXF5h
-YF1dXFxfXV9fYGBeYV1cYFxfYGNgYV1hX2FfX2NjY2NiX19eYGJjZGJkYl5hX2Bh
-YWFfXl5eXV1cXV9hYF5cX2FeXl9fYmFfX2JeXVtaV1tcW1lYXVtdX1xbW2JfXlxa
-XFlZV1peXV9aWVlaWFxbXV1cWlhWWVlZWltXVlpbW1lbXVtZWVZZXF1aWFpWVVpa
-WVlZWFlXWlZWWVhZWFlbW1laWFdZWlxfYVhZWVpZWltSUllXV1daWV1cWVZYV1hc
-WlpYV1dUVldWVlZZWlhVW1lYWllaXFpbWlxeWVVTWFlYVlhWWVpYWlpXV1dZWllY
-V1pZV1dVVFdXV1RTVVVVVVZVU1NSU1dVV1dUVVdYWVdSUlVUUU9RU1RWUlNXVVhX
-UlNZVlRSVFZUV1dUVVRVVlVWWFVTVVdVV1lYWVhXV1dYWVdXV1dZWFdVVVRVWFhW
-VVdWWFRUWlhXVldXV1RUWFtbWVpWWllZVllcW1tZXl9aW1taW1pfW1tdW1pbWlpY
-Wl1ZW1xaXlxZWlpaXltbXFpYWlxgWFpcWVteWllcX19dW1lcXlpbWllcXV5YVlhe
-WVdZWVdXWlhZXV1gW1pcWFlaWVpaXFxdWFhYV1laXFhZV1tXWllbWllbXltbXFpa
-W1tcWllZW1xaWVxaWV1eYl1fXFxbXFtaXFxdXl9eYFxeXF5dX15cXmJgZGNgZWVi
-YmFhYmViX2JiYWJgYGBhY19dXV9eX11fYF5eXl9jdqnI09zg5ebo6unr7Hl2dXZ2
-eHd4d3Vzcnd2enx6eXR0dXl3dXl4eXd4dn18fHZ1dXV1dnl6eXh7enx/enl5d3Z3
-dHR3eHh9ent+fHl2d3R5eXZ3eX18fHt6fHt8fHp4enx6eHp6eHl5e3x/f398e356
-e3l7e3h7fHqAfXx8fXp6eHh6dnZ3e315d3h5eHd1dXV2c3V2cnFzcXFvcXNycXJ1
-d3h1dHNycnVzc3V3e3x7eHd6eoB7enh0dHh3dnl6d3Z4d3l2c3Z6d3V0c3V3dHJy
-dXl5dnZ1dnFzcXJ3dXZ0cnN0cnJ0cXN0cW91cnFycnVzdHV2dHR0dXFxc3JtcXNy
-cXJxb3Bubm1xc29vcHBvbm5vcm9tbm9wb2tsa2dsa21ubGlpaWhobGlpa2hkYF9c
-YWNlY2FaVFJPUU5RVlVZXWNlYmZhYF9ZV1RSTEhFQz45Ojw/PkFESU1PVlZWWFVW
-WVhfZGVjYmRiZGRjY2FhW1haVlZXVVZZWVpYVlhcW15fX2FgYWFiZGdlZWptanBt
-ZWNeXFtYUlRUV1pdXFtaWVpbW15fXWBgYmJgX2JkZWRhYmdoaWhmZmNjaGlna2ps
-aWhocHZwbW90dnx6fHp8fn18gISFh4qKi4mJi42Mi4yFhYWJj4+Lh46NkY+QkpSW
-lJKXmJiVlZaYlpaZnJ6fn6Gio6WnoaChpKWmp6qppqWnpqilpaKiqKqlpaGgpKSk
-paSmqaenqaupp6SipKOnra+trquqq6qppaKkoqOipaWenqCenp2goJyXlo1/c2tl
-YGBfY2FfX19eX15dXF5iYV9dXV5eX2FhYWFgYGBfYWJgX15ZXV1cZGNfXmBfYGBf
-YF9eYWFhYGNiX11gYWRjZmVkYWBgYGFgW1lbXV9cXF5gX1tdX11gYFxZW15gY11a
-XFxZYV1cXFlbWl9cW1tbX15cXmBeXFpcXFxbWltbWVpcWVdaW1lbXFtbWVhaV1la
-WVtaWllaXFpbWFlaVlhXV1hZWlpaW11ZWlpbVlhaWFlbW1dbV1ZWWFhaWFlaWVla
-XFpaWFdYWFlaWVVUV1hYWlldYVpaWFhZWllaWVhWU1dYVlRUV1lWWFtaWFVVVVVa
-WVdXVlNTVFdZVlhXWFZYWFpaVlVXVlVYV1VUVVZTVVRUVVdUVVdWV1lWVVJXVFVX
-WVdWVlZVV1xVUlRUVlVVUlRYVldWVFVWVllVUlNUVVVWU1JSUlFSU1VWVlFSVlhY
-WFZVWVhYVlhYWFVZWVdYWFhYVlNUVVRXVldXWFZVV1RUVlpXWFlaW1lWV1dUVlZX
-WFhYW1xeXV9cW1xbXFpaXFpZWlpeYV5cWlpaWVpaWVhYWF1bWVhaXFpaWltYWVxe
-XFdWV11cW1taXFpXWFtbWFlbWlpbXV5dXl1aW1tbXF1cXVtXVFhZXF5bXF9bWltc
-WVdXWFpXWVxdWVhYXFlaWFpbW1pcXVxbXVlaWVdZWlhXWltbWl1dXl9dW11dW1pa
-XFtdW1xeYF9gXl9hYGJjZGRjYWFjZGRkZGVlZGRmZGFgYmBeX11fYWBfX2FfW1xb
-XGFdXl92rMbU2+Dk5unp6uzreHZ2d3h4d3VzcnJ2eHZ3eXZ5d3RzcnV4dnd6fHx7
-eXd4enh3d3Z1d3V3e3t5enp6enx7dXVyc3J3fHt+fH19e3l3dHZ6ent8fXt6enp4
-e3p7e3p4eHh8fHt5e3p7fX1+fH56eHh6eHd5enp6eXh6e3p9fXt6eXh6d3l6e3x7
-eXh2dXJzdXZ2cXJuc3Jzb21ycG1vcXFycG1vbW5ucXVzdnV3d3h8eHh7fHl7fXl1
-eHp6e3Z2dnd0dHJzdHd3dXRydXZ3en97eXd1c3Fydndyb292dXRzcnJxcXJxcnNt
-bW9vc3J1dXFycXN1enVzc3RycHBxcXRzcHNwc3FtbW9zdXhycHBvcXFwcG5tb25w
-cWtsamdqbmprZ2ZoaWhmZmhqZ2NiY11gZGdmX1dTUE9NUVNUW1piY2JkY2RjYF9V
-VlBNSUM9OTY6PT9CRERHTVJVVlZVV1BWV1lbX2JiZGZmYmNjZWFfXVtYVFVWV1VZ
-W1haW11dXltaWWBgYWJjYWNjZmhucXBtZ2RdXVpbWFlXV1xfXl9fX19eX2FiX15d
-XlxbYGRlZmJjYmZoZGVmZGRnZ2tobG9uaWdsb3Fyc3R3enh5e3p6fH+BhIeLiYqJ
-jIuPi4mLjIiFh42Oj42Nj5GQkJCQkJOUk5OTk5WWlpWWmJydnaKloqOlpaWjoqWk
-pqalpqapp6elpKSlpKWmqaqqpqSnq6alpaSmqaWnrq6qqaurqqiurqmpqairqqaj
-nqCho6WopqOjpaSjoqSfl5ualId5a2RhYGBeYmBgXmBcXl5fYF1fXl1gYGJgYGRj
-ZWVeXGFfX19dYGVkXl5cW2FsZmJiXmBiYGNiY2BkYWJhYmJfY2JhXmFgX2JiZGJb
-Wl1cXFxeXl9dXGBfYWJhXl5hYF5dX1xdWVxdXF1bXF1ZWlpbXGBkYl9eXlxXXVld
-XFlUV1ZWWVtbWlpbW1pZWVpZWVpcW1lZWVlaWVdXWVxfWFddWlZXWVxbXF5bXGBd
-WVZUVlldXFlXV1dWVlhUVllZWFVUVVZYV1ZZV1dZWlRVV1RUVlpYWVhbW1hYWFRU
-V1xcW1pZWVxcWldWW1lWV1laW1lZV1ZYVVVXVFhWV1dYVFRTVlVUVFZVVlVVV1dd
-VlFRVFVUVVVWWVpTWFNUVlVSUVZWVFRWWVdTU1RUV1hXV1laV1VTVlZXV1ZUV1ZS
-UlVWU1NTVFdQUFFSUFBSVFRVVVVWWVhYVFVZV1ZWV1VVU1VUVVRXV1dVVFZXWVhY
-W1dXWFhXVVRUVFZYV1hVV1daWFdUVlhWV1daXVxaWlhZWlpcXlhXVVRZWVtdXFlb
-XFtbW1lXV1lXWVtXWFlcXVtaWVtdXVxbWFlaWVpbXFteXF1eW1tbW1lbWmFgYV1c
-Wl1bWlZYWFZVWFhZW1xbWVpaXFtbXFtaW1ZXWVlZWVhZW1pWW1pbWFpeWVpdWlhW
-WVlWV1dXWFdYXFlbXVxbW1pbWlxcWlxaW1xcXV9gX2BeYF9gYmBdYGFjYmNhYWJh
-YmNkZmRlZWJiYmBgZGJfYGBeYWFeXl1dXWBeYnSsx9Pb4ePm6Onq7Ot2dnZ5eHh2
-c3d8eXd0d3V0c3l4d3dydnV3dXR2d3l1dXZ1dHV1eHp6e3Z4eXl4eHd3eHp7eHl4
-d3d5fnx5e3t5e3l2fXx8fHh5enp6d3h7fX58fXl6enp/eHh6fnx6fHd1dnh0dnd7
-enp9fXt5ent8en1/ent7eXd2fHx5eHp7eXZ1dnd4dnRxcW9wcnNyb3BzcnJ0bG90
-cG5xcnZ0dXZ1eHd2d3h4eXp7eHZzd3V0enp3eHV4eHp0dHZ0cnV2dXZ3d3d3dnd2
-d3R3d3R4c3BydnZvcXR0dHJxcnFvcndycnFycXFwbnFwb3R3cnJycXJ0c3FxcHBw
-cXBvbm5wb3BxcHJxcnBtbm9tbG1rampsbm1sampqa2luZ2pqaWpra2xpY2JgYGJj
-amRdVFFQTU9QVFhbYGNmZ2dmY2NgXVlVUExKQzo/PUA/OT5DRUZHT1NTVFdRUU9S
-VlpfX2FhZGZqZmRiYWBhYF5aVlpZW1xbYGlkYmBeX15dW2JjYmJiX2FjZ21tbm1p
-ZGBhY1tWWV9dX19fW1lZXF5gXF5cWV1bXV9iZGdlY2RkZmhqaGhoamtsZ2twb29s
-amhtc3V1c3R2d3t9fH6ChoOGhomNi4iKi4yLi4mKjo6KioyPkJGSjY6VkpGTlJKP
-kZSWmpeUmJ2dm5qbn6KnpaKio6OipKSlpqekoaalpKampaWpqaanqaapqquop6Sg
-o6GjpaiwsbGpra2qqaakpaWlqaynp6SfoaGkpqemoqKkoaKlpZ6YmJeUh3ZtZV9f
-XmBhX2BeXF1fX2BiXVhZW11iY15fYmFhX1tbXGBfXGBcWl9fXl9haHZzYmJjYGBj
-ZF9iYGBfYGZlZGFdX2BfXV1fYmFgYWRiXlpaXF5gXVxcXl5fXFxfW19hX19gXFla
-Xl9hXltZWlxYWlpbWl1eXlxaW1taWVtbXFpZWlpZWFpbW1paWl1aW1lYVlhZW1td
-XFtaW1pYV15iWVpbWVlXVVNZXF1cXVhZW1dXV1dXWFdXVlVUU1VWWFhWVVFVV1VT
-VFlZV1laWlZTWFVWWFlbW1pZW1lYWFZXV1hYWVlXW11aWFhVVVhXWFZWV1hXV1lX
-V1dcWVVVVlhaV1dXVVRUVFNVUVVTWFdWWVNRV1VVVldWVlZRUlNXWFZYWFhWVVZX
-V1ZUVVVWWFhVVVVcWFlWWVhVVVRTVVJVUVFRU1ZTVVpVUlVTVFVWVVZZWFZVVlZV
-VVRVVVVWWVVVVlVXVVhVVldYWVlYV1dVVlheWltYVVJTUVVXVFZZWVpZVVdYWFhX
-WFdYW11eWlhYWlhcWllWWVlZWlpZWVtcWFlaW1dYWVdaWlpbW1xbXlxaXV1aXF5f
-WVlbXVxcW1xaW1hWW11cWFlcX19cWVtZXFlYWVdZWFtaXFpZWltWWFlYWlxcVlhZ
-WVlYWFlYWFpbW1lZV1lZV1dYWVhVWVdYVlZYWFRVWFlaW1paW1tcW1xcXl9fX1ta
-W1tfX1xcXF1gXV1eX15gYGBgZGRjYl9jYmFjZGRkY2FhYF9fYWJfYGFfYGFeX1tZ
-XF5jda/H09vg5Obo6evt7Hh3eHp7eXh6eHd7eXV4dnZzd3d1c3R2dnV2eHZ0c3R0
-d3V2dnd2d3l5eXh3eHh4dXZ2eHl5eXt6d3l7eHh4e3l4e318fHt7d3l3eXl6eXt8
-fX16dXV4fHt7eXt8f3x4eHh5ent4enx/fnt5e3x9e3l3eHp8e3t7fnx4dnR0c3Z1
-cnBzdnZ0dnZ3dHRzcnRycHBydXNybnJ0dXVxc3h3d3d0dHV5e3t5enh1dHV3dnR0
-eHh8fHh2dnRxcnNzc3Z4eXV0c3V0cnV4dXd4fYR6bnJ0dXR0dHNxc3Bvc3Zxb25w
-cHFwb29ycXFwcG9wcXJvcHJwbmtraWttbWtsb25xcm9tbG9vbGxoaWtsbGttcG9v
-bmtrbWhqbWtta2lpbGttaWdkYmFhYGNqZ2FZVVFQS1BWXGBkaGdoaGpoY11dWFRQ
-S0dCPD49PDs6OkBERUVJSlJTVFJRT1JUXWVmY2VpaGpnZF9lZ2JhYF1aWFtdYmBh
-ZGRjYV5cX11eX19lYmJeX2BiZGhucWtsZ2dfWVxdW1thY2FbWFpfXl1fYVtcXWFh
-YWZoaGdmZmNqbW1rZ2dvdG9scnJucHJtaW10dnV0dHh1fH+DhISEhoaIh4eJjI6N
-ioyOjIqOjY2Mi4mNj46PkJOSl5OTkI6SlpiZmJqWlp+enZudnaGioqChoqOkpKSl
-paako6eppKeko6KipqalpaOlpqmkoqGgpqOlpqurrqyqqaiopaajoaSlqKqqp6Sg
-oaOlo6KgpKGho6Cin56bkI6CeGpoZmRgYFxdYF5fXlteX19jX11cXmNiYF9dX19c
-XFxdXmFeXV1dYV5eXl1kcnFiYGBiY2JeXWFjYGJhYmBjYV9hXVxeXmBgX15fYGBi
-Y11gX2FfYF5dXlxaXV9jYF1fXl1dXV5dX19eXV9fXVlZXVtZWFlaWlpcYF1bXV1b
-XFpZWl1cWVpbWlpZWltdW15cWFpbXFpdXFxcXFtYWV1cXFxdWllXW1pcWlxaWVxd
-XFpaWFVWWVlYWFZVWFhYVlhXU1NWVlVVV1lWWVhWWFpYWVpaWFdXWFlbWVhWVFZV
-VldXWVhbWlZWVltcWlhaXVtWU1ZVVVZWVlZaV1RVWFdWVlZXVVZUWFVVVVZVVlhX
-VldWVFNXVlVVV1hYVVFUV1ZaV1JTVVZWV1hYWVlYVVVWVldaX1xYVldTU1VVV1RR
-UFBQUVVXWldVVFRYV1VVVlpWUlFXWVhVU1RZWFlWVVZVWlZXVFVWV1dZW1dYWVtb
-W1tZWVpYWVZVV1ZXVlhYVlZXVVZYVlVUVlhZWFtaWFxdWl1dXFpaXF5cWlhXV1dZ
-WlpbXFtfYF1dXV1dW1ldX1xcXFxfW1lcW1tdW1laXFtbXVpbWVdWWVpcW1taXFtY
-WVhZWVpaW1tYXl1cW1pbV1ZZXFpaWVtZWlhUVVdbXFtZWlhaWltZWVlbWVtdW1tZ
-V1dYVlZYW1lcW1laXVtaXV1dXF5eXFpcXFtcXlxeXFxhYF1aXV9iY2NhYmRkYWVo
-ZWNjY2BgYGJiYF5gXmBeYV9dXV1dX15aXF5zr8nT29/j5ujp6evrdHl5enl8e3p8
-enp6eHd5eHl3dXVzcnF1dHlzdnZ2d3d1dnR2dXd3eHt4dXZ2d3h1c3Z3eHx8enp5
-d3Z4eXh5fH16fX55d3l7fHp4eX59d3l3e3h3eXt8fH5/eXd4e3x5e3l4e3t8fH5+
-enh5enl3eXt6gH15e3t9d3d1eHl0dXZzdXl2dXZ6eXV2d3RycHFwb3JycHN0cHR0
-dXNxcXR3dnd2dnd4eHl5eHZ1dXZ5eXZ1dXR1dnJzdHNyc3V3dHd5cnBxdHRwcHV1
-c3GEh3lrd3FudHRycW9vcW90cndzcW9tbW9wcW1tb29vbm5ub3BvbW1tbGprbWtu
-bXBzb3BubWtsa2tqamxsbGtta2tsbW5ramtramppa21va25vbW5paGZfXmBjZGVn
-ZF5TUlJPUFZaYmdnZ2hvbGlnXl9YU1FKRkFAPj0/QEA9Pj9DR0lKSkxNUExQUlpd
-Y2dmZWhpZmpsbWdlZGRlXltaX2FkZGNhYWJkYmBhYF5gYWFeYGVeYF9iZ2psbW1r
-aGRcXlxbXl1eW2BeYFpaXl5fYlxgZWVjY2ltamJhZGxsbW9sbG5tcXBtbnFzdXJw
-cXR2eXV2fXx+gYGEh4aIiomHhYeIjI2MjIqIioeKiYyMjI2NkZOUk5aXl5OUj4yS
-l5eWl5aWmpqXmZqZnaCgoJ+gn56jpaShoaKeoaKko6CeoKGjoaCdnqGjpaOiop+f
-o6KmpainqaakoKOlpaSjoqWmpaOmpqWmpqKgo6Ogm52bn56dnZuYkod7bmdlZWJg
-X2BdXVxbXF5eXV1dXFxeXl1eXFxcWlxfXl1fYF5dW15fXl9eYF5jYF5gYGFeXF5f
-YWBdZGFgY2NkZF5gYGFgXl1dXV5fYFtfYWBhYWBfYGJdXV9fYWBdXV1fXF5fXWFd
-X2FhYF1bWlpcWVdaWl1YXFpfYlxdW2BcW1hZWFpbV1VZWlpYWVlaW11aXltYW1pa
-XV5cWVhXWFhfXllbWllbV1lbXFxZWVtZVlVVWFZVWVpdXFxYV1hUVlhZVVRUV1dW
-VFRWWFhXWVtWVFZVVl1aWVhZVFZVWFhYWFlXVVhaWlpZWVxZV1VVVFNTU1NUVllb
-WllaWFpaWFZWV1hYV1ZVVVhZVlVXWVhWVVhXV1VVV1hVV1VWVlVXWFhWV1VUV1VV
-VFNUVVdVWFZWVlVWV1NVVVVVV1dVU1FQU1VVV1RUVFJWWFdXV1hYV1dTVFVWV1hX
-V1hYWFlXVVNWV1VWVFVXWFlXWVlaWltXVVlaWVhYVFhWWFdXV1dUVFhWVllVV1dU
-VlVYV1pbW1pdXl1bWlpcXlxaWVtaWlxcWVxbW1lbW15cXV5aWVtcWFlYWVpeXltf
-X1tcYF1cW1pYWFlZWVdaWVhbXFxdWltfXl1bXF1eX11dXltcW1xaWVlZWF1bWVdZ
-WVhZWlhYWFpYXFxcWlpbWllZWVtbWl9bW1tcXlxaXV9dXl1YWFdXWlxcXFpdXlxc
-XFpcXl5fXFtdX2JfYmFgY2RjZWVhZWZlY2JjYV5dYmBjYWBeX1xdYl9fYV1eY2Bg
-XnawyNHZ3+Pl5+np6+p2d3N1d3p7e3t7e3l3dXV3eHZ3dnR0dnN1dnd5dnd4eHR0
-d3l2eHh5d3p5dnN0d3h3d3d3d3t4eXR3d3Z4e3h8e3x+enl+e3l4eHt6enl1eHp6
-enh6eHl6eXp9enh4ent6fHt8fX57dnl6fH15enp3en16fHp9fX1+d3Z4eXp9eXh3
-ent3eHh3d3d5dnNwc3JxcnR4c3F0dnZ1eHV1dHV7d3h1d3h2eXl2d3Z3d3d3c3F1
-dXNzcHRzdHVzdXR1dnR1eHdycnZ0cHJwbIR/gHeGc29zbnFycXZycXF2dXN0cW9u
-bXJxcG1ubnJxcG1ucG9vb25vbm1wb21vcnFwbm1ta2dpaWpta2xtbnFzcG5samts
-bGtraGpoaGhrbG9yc2xnY2NhYl9jZ2ZjXlxVUU9OVFpjZmVobG1saGZjXVhVTklB
-QDs8QUNbQj9GQkNERUdHTUxLSUpQWmBgZmptbGtkZGhramhoZ2ZgYmBiYWNkYmJk
-ZmZlY2JiX15fYWJiZWRkYmFiYmdra25uaGNeX1pXWVpaW15dX2NhYFhZX2NlYWNm
-Z2loZWVgZWltbGtqbGpucmxtcHBycXJ1dXl5dnh8fn+AhoeJjIyLi4iGiYiMi46L
-ioiHiYuLjJKTk5OUlZSPkpKRlpKTlJaQkJSampmXlpOZmZeYnJqYnaGen5+jn52e
-l5yjo6SgpKejqKijn5+gn5yam6GipJ6bo6Wjo6anp6SjoaOipaanpaajpaaopaSh
-oKCfoaClpKKgn6KgoJiUin56a2VhYV1cXWFhYWNhXVtdW15gYV5cWlhaXlxfYF5f
-X11dYF9gY2FgYWFfXFldX19fXl1cW1teYmFdXF5eXV5fY2JiYmJeXmBfX19fXmZk
-YWBcX2BdXFxgX19gYmBdXF9fXl1iX19gXmJfWVhaXV5cW1lcWVpaW1tdXVxeXFtb
-Xl1cVlZcWVhaWltZWltbWlpWVlxYWFlbW1laWFpbXFtaWVhYXFhTVldYWVtYWVZV
-WVhWVlZXWFhYVlhXVlhXVVVTVFJXV1dUV1hYWFdaWVpVU1VYWVhXWlhZWldWWVla
-WVZUVFVWWltXWVhXV1JTVlVUVlhaWVhaWllcV1hYWVpaWVdVUVdXVVdVVVZaWVZR
-UFVXVVNWVldTU1NUVFhVVlZVVlRWVVRSVFNYVlZXVlZaWVhUXFVSVFZaVlRSVVdX
-V1ZUVFRVU1NSUldXV1dXU1ZYV1ZVV1dYWFVXW1ZXVFFVV1VUVVNUVVVWVVlXV1dW
-VVZWVlheWlZYVllbWVdWV1lZWFhXWlRWWFpbWllbWllYWlxcW1daWFpaYF9eW1xc
-WVdYV1lXV1ZbXFxbWFRXWFhXWFpeXltaWllaWVlaYFxYWFlcWllcW1tWWFpbWVpZ
-W11dXF9eX2BcWVtcXF1ZWVdXVFRUVlRVV1dVV1pcWVdaWllXWFpZXF9cWFlbWlla
-WFpcXVtZXVlZXFxcWllaWVpaXFxeXFtcYWBfYF9eYV1cYmRgYF9gZGVhYmBhYWNj
-YWFgZGJfYGBgYWBeXlxcYGBbXF5cYmJkgLLG0tnf4+bo6enr63p+f3p2dnh4dnV3
-d3Z3dHR0cnV2d3d6dnR0eHd3d3h5end3fnx4f314enV1dXZ2dXl6enp7eXd3d3Z3
-end4eHp9fX19fHt8enp6gH18e3d3e3p5end4fHp5fX56enp6e3t7fH1+f3t5d3l6
-fH97enp7eXp4e3x4e3p9end5fHl6eXx4dnR1d3V2dHRvcXRzdXNzcXRycnV2dHB2
-dXN1cnR5eHp2dHF0dnl4dXN2eHp4d3d2dnR0dHJ4dnVydHl5e3l4eHp4dHJzcnBs
-enqIkJB1b29tcHF1dnh6c3FycnJucXBwb3Bxbm5ycXBtbnNvbm5sbHFwbm9ucG9u
-bG1vb29ubWpoa2prb2tub3BvbGlsa2hra2traWtqaGpqaGhqa2hmZGRgXmVmaGZk
-X1dSTlBWV15mampqbGxpZmBfWVNPRkNAP0JBPUNBQ0FBQUNCQUNGS0hJSU9XYGhs
-b3FvbWpoamxsamtpZmZfXmBiY2RjYmZoZ2VlZGFiYGNeX15hYV5eXmFmY2Fjam5s
-aGJeXFpaWV5gYV1dXl1dWltkYmRpaGRnaWhlZWZlZ2lsbGpram1ubG5xdHBycnh5
-e357e4GBgYGGiIeMi4iHh4SDhYqNjY6PkpOPjYyOjo2TlpeWk5KPi4uOkZSYmZeW
-lZmYmZeWlZaZmJiWlpudoJ+fnp6enKCenqCnpJ6do6WjoaSgm6CenJubn6Olnpya
-oqWioKGjnZ+ipKelp6eloKGop6mpqKOipKCgo6OkpqGhoJ2Zk5GLg3t1amNiYmBh
-X19jYGFeXFtZXF5fYGBeXl9dXF9fX11bXF9eXWFgYWBhYmFdXllZXF5fXlpZW1xd
-YGBhYWBiXl9gXmBeX2JiX2NfXV9fX2FjYFtgY15cX15fXV5dW1xfXmBfXV9gXFxc
-YF1cXVxYW1xZWVpaW11bWFhXWl1cXVtdXVteXVlaW1hYVldWVldYWFlYWldcW1xZ
-W1lbW1xdXFxbXFtdWVlaV1ZWV1pcW1taWFpZWVhXV1tYWVZWV1dXVFZVVllaVlta
-XFteWFlbWlhWVVZYV1ZVVlVTWFhYWFdXVldVVldZWVZXV1dYVlZYWVVWWFhWV1pb
-W1paWlpZWVhXVFlVVldYV1dUVVRVVFVUU1NVVlVXVVNRVFVUUlZYV1hZWllTVVZT
-VFVWWFhWVFZWV1hXV1VVU1RTU1VaWltVUlZUVllZVlZWU1JUUVRYVlZWVVVWWVhU
-VlRVVllXWVhVUVNTUVNUVFdVU1dXWldVU1RYWFdXV1ZZV1RYW1pYVlpaWFZWWFla
-XFlYVldXVllZWFlaW1tbWFpeW1tbW1pZWVlXWFpbXlpaWVhYWVpZWFdXWVtcWVlX
-WlhZW1daXFlVV1lbWFlbWF1WWFtaW1pZW1teWlxfbV1ZWlxbW1hYV1VVVVJUWFpd
-WVpZWlpcX1tbWl1eWVpbXVtaWVxaXVxcYF1bXF5bWVlbXl1dXmFgXF9iXFxeW1le
-X11dXV9fXl1gW15fYGBhYl9iYmJiYGBhYWFmZGJiX2BbWlxaWFpfXVpcW1xeXl9x
-psTQ2d/i5ebo6erreHd4enl5eHd0c3N0dnZ6d3ZzdHd4eHp7eXh4eXp5dnh4eXh4
-eHR8eHd3eHZ4d3hzeXl9eXl4eHh3eHh4dnh3enl7fX57enp7fH58fHx7fnp6d3d4
-enx7e31/fXx8d3d4dXt7e3t7e3t6eX57eHt9enl3eXx8eHl5d3l8enp2dXd2eXV0
-dHl1dnVzcXNxcW9vcnh1dHJ0dHN1eHd5c3R2eXd1dXZ1c3N1d3t7eXR1c3V2dXVz
-dXd3dXZzdHR2dnd4eHl2dnR0d3FzcnBnZouIeWlucXR0dHN2cnFzc3N2c3VzcXJx
-cXNxcHBxcHBxcHN2cW9ucXBxcG9ra2pubGtraWxubWxubm5tampqbG1qa2xsampr
-a2xxa2xsa2xtamhnaGpnZWFgYWVmZmRfV1JSU1JbXmJoamttamdlYVxZU1RMSEdB
-P0NERUNCP0RDQD9CSEdGRkdKT1pfZGtvb25sbGtta21raWdkZWFhZWVjY2dlZmRj
-ZWVmYmFjYmNiYF5dXl9eYWFeX2Jna2ptaGJgWldbXF1eYFtZXVxeXl5fYWNjZGNk
-aWxpZmZoamxvbGtmbm9tbHB0cnB1enl8fYCChYSBg4aJjY2Mi4qFhIOFjpGVlZCQ
-jY+MiIuPkZOVlJmTk4+PkJOVmJmenp6ak5KTlpmYkpWal5iYmJ+inp6dnp6dnKCc
-oqCcnqCgoKGenp6dnp+cmZicn52bmJygpqSkop+dn6OopqSkpKGZm56ip6imoaCi
-pKChoaGhoqGknZiWjoeCe3dvZmBbW1pbXF9hYWJjXl9dYGFeXmFgX11bXVxeW1xd
-Xl9cX2BfYF1gYWFfXVtZWV1cV1pZXF1eY2JhYmFeYWBjY2FgYmFhYmBeYWBfX2Bh
-ZGVkX11eXlpdXWFdW15dX1xcXV1cWVpbW1haXVhcXlxYW1xeXltfXFpZW1paWFti
-Xl1bWlhXVVVXW1taVVZYWlhZVlZZWFddW1pbXFlaXFtZW1xZW1ZWV1hcWlxeW1lV
-V1pYU1RYWlZXV1ZYVVVWVFNXWFdVWFlbWlpbWVlYV1VWV1JUU1VWWlVWVFVVVFVV
-WVZWVVZWVVZYVlhXWFhZVldWUlRXWVpbWVhZVVdXV1lcWlpZVlRSVFJUU1NVV1VT
-VVZVVVVVVVVTVldTV1dWV1dVVlZWWVlWVVNXWFlVVFRVUldTVlZVU1JRUVVXV1lZ
-VlNWWFpaVlRVVFdUU1NRU1RUVVdYV1dYWFZVWlZVVVVUV1lVVVNVWlhVVVhYWFZV
-VldYV1NTUlNUVVRWV1VWWFpaWVldWFlYW1hbW1dZV1dYW1dYVllYWVtcWFtaXVlY
-WVhXWVtaWllXV1hcXVtdXFpbW1lcXVlbW1hYW1pbW1lUV1paWllYW1lZWVhbXllZ
-WVlZWlhbVlhZWFtYWFhXWFhWVlNYWVtdWlhZWVhcWVdYWltcWlpaW1taWVtgXl5f
-XFtaWllZXF1fXFtdXGBhYV9aW1xbXVxeXl1gX19fWl1eY2NiYWBhYGFkZGZiYGBh
-YmNiYmJgX11YWlleWlpfYF1gX1xgYHOqxtDY3+Ll5+np7Ot2e3x8eXp4dnZ0eHd2
-d3h7d3Z1dnh7enl3dHh5fHt5eXd5eXt3dnR2eHZ3dnV5eXd1eHh6enh2dnd7fHx5
-d3V6eXp9fHt8e3t5eHh4eHd6e3h1dnZ4eXh4enl7enp5eHd3eHp7e3p7enx9e3x9
-e3x6enl5fHl4enp6e3t8eHZ4eHd0bnZ2dHN0dHNxcnNxcHR1dXJycG5wcHJvdXd0
-dnNzdHZ2cXV5d3d3eHl0cG9xbnF0dXR0dHd6dnV0d3V0d3V1dnZ0dHd4eXh0cnFs
-b3JibXF0b3F1dXN0d3JzdHN0c3JycnFxc3Nwbm5ucG9vcHBvbnBwcGpwbGxvbm5r
-amxpbGxwb29wcW5ucHRubW1vbm9tb2xrbW9vbWprbWlnaG1qaWdjZWlramlkZ2Re
-VE9OWFteX2RpaWpqZmRlY15ZU1FNSERDRUVBQUFGQ0BBREVFR0hGSExUX2Rnamxt
-bWlpa21qaWppaGdmaWdnZ2VkZWRhZGNnZ2ZmZGVjYmBiX15fXF1cWl9aX2JgZWlm
-ZWFbWFpcW15dXFtfX2VgYWRgX2RjYGBkaGhmY2Roam5ubGppbG5vcnFxcnJ1fH99
-gYGDhYaGhoiJiIqLiIeGh4yRlJWUkIqIioeFhoqTlJWanJ+YlJaamZiamZeZl5aS
-kpCVmJWSlZmXl5WYm56cnJuenZucmJmdnp6hnZ6fn6Cenp2ho52alJucnJ6anZ+f
-nqCfoKGioqSjpaOinp6dnJ+ho6Ghn6Ggo56joaChn52dmpaRh4F8d3FmW1teYV1e
-X2JjYmFhX15eX2BdYl9eX11aWlhbXl1bXV5dXl1cW11dX2FiX1hbXFxhX2JhYF9f
-YWJhYGNfYWZjYmBeXWJgYWBgX19gX2FgYV9fYV9eWlpaW1teXF5fYmFfXllZXFxa
-WFtcXl5aWltcX1tcYFxeW1tXWVlcWl9kYF1cWllZWFlZW1xaWlhZVldZW1taVlhY
-WFdaX15dW1tXXWFdXFtYVlVXWlpaWlxcW1lWVVJTVlRYWFdTVFVWUlNSVldVWVZX
-V1RTVVlYV1ZVVlZWVVhWVlJUU1JUV1hYWlhYV1VVVltYVFlaWVlZVFRXWlpYWFda
-WVlXVlRWU1NVVlhWWVRVWFZVV1VYW1VUWVVUUVFVV1dVXFlTU1VVV1RTVFVXVVNV
-VVdZWFZZV1ZTVVNWV1lUU1JTU1dVVVdVVFFWVldZV1dUUlJXVVRUVFdZXFZaWVpa
-WVVWVVZWVlRYWFtcWVxcXFlZWFdXWFdUWFZXV1dXWVhWVldXV1ZYV1laWltYVFRX
-V1lbW1VVV1hYWlldWlpaXFdXWFpfYFxZWFlcWVpXV1laW1laWVtYWVxaW1lbWlte
-WlhYXFpZXlxcXV5fXVpZW1xdYFpaXFlXWFhYVlhXWlpbVlhdWllYWlxZWVpaV1hZ
-V1dVVlpUVVdUVVpaXFlaWllZXVtbWlhZXFhbX15eXVtfW1laXVtaWVpaWltcXF5f
-YF9gX11cWlheYWRgYmBiZ2RkZGRkY2FiYGFgXl1dYV1eYF1eX19iYF5eXl1kd67G
-0dnd4+Xn6enr63h5eXt8e3p3dXZ4dnh4eHl4dHN3fHl4eH16eHl6eHh4dnd3d3Z5
-e3d2d3V1dXd4eXRzeXl3eHh7fHt+fX16eHh6fHx9f399fHp6fn16enl5dXd5e3l3
-dXh5fXt6eXl9f393ent8d3t8e3l5eXp8gH97fXh5d3h7e3p6eXd2d3p7eHR2dnV3
-dnR0dHJwcnN0dHJxc3BycHJuc3V1dndzc3JzdXZ1dnd3dHZ3eHV0dHFvc3F3e3h0
-dnV2eHZ0cnN0d3p2eHZ2dXVzc3RydHJya3Fwb3B0dHV3c3Jyc3RxcHNzcXBvcnJx
-c3JwbG1ucXBtbGpsbnJubGttb25saGptbm1qa21wb25ubm5wcG5sbm1tb21qaWhu
-bG9ta2xqZ2dpaG1samhlZGdlam5oYFlWUlNXXGBiYWRlaGlsZmJgWldZWVdTUktG
-SEdEQ0NAQkJDRUFDQ0ZNUlxiYmZpbnJwbWpqamxsamlpZmZkY2RjZWRlY2VlZ2hp
-aGRjZGJjYV5fYF9dXFpcXVxcW1tbYGhnZ2ReWVxgXV5iXl5hYWViYGFhX19eYGRj
-ZWNfY2VnbXBvZ2xqbHFub3J0dHN4eXp8foODh4mHiYiJiImIiYmLjo+OkY2LhYKC
-f36Ci5KXmJWbmpyco5qXl5eampmYl5eZmZeVlqGclpeXl5iYl5iYl5qamZ2bmZyh
-pKOioaOdnJygoaGioZ2Wmp6Zm5qfm5ycnZydoJ+goKGkoqCfoKCgnqCgoqOeoqGd
-nqGin6Cem5mWk4+MiHx2cWphXVxfYF9eYGJhYWBgXl9iY2RfY2FgX1xbWl5bXV5f
-X15eX1xcXV1gYmJfXVxcXGJjX2BeX15gYl9eYGJeXmJjYGBhXmFiYmRgYGNiYWBh
-ZGNhXVdaWFdeYF9gYF9fYVxZW1xbW1tcX2BfX15aWVtbWllXWVlZW15dWVpZWl5c
-XFhZWVZWWVlaWVlVWF5WVldXWVpaWVlaWFZaXFxbWlpbXFpYWFZVWVpaW1pdXF1b
-W1VUV1RSVFRVVlZVWVdaVVNWV1hYWFVVVFVUWFhZW1pbVFdWV1lZV1RRU1RYWlhX
-V1VYV1hXV1NRVVhYVVRZWFlZWVZWVlhZVFRTVlVWVlRUVVRVV1daWFdXU1RXVVJT
-UlNVVVVWVVZYVFJUU1JUVFVYVFNTVVdaWlZWVldYWFdVVVdWVVRVWFpYVlZXU1VW
-VFRWV1dZV1dXVFJWV1ZVV1ZWVldYV1ZVVFNRVFVYXVpVVVZXV1ZXVVdTVVdZWFZW
-UVJYVlZZV1dWVlVVVlZVWVpaVVhZVlJSU1laWlVVV1lbXlpWV1dWVlZVWFdbWVte
-W1laWlpWVFdYWVVZWVtbX11bWlpXV1VYWllYW1taW11cXV1dWltZWFpeXl1bWlpa
-WFRYWlteXVtcWl5bV1hXWltcWFZaXFpYVlZVV1dWV1ldW1lcWlhaWllYWlhYWltd
-XlxaX2JeXVxZWVxbWlxcWFpZWVxcXl5cXF1hYGBfXl5eXmFhX2FgYWJiYWJhYGVl
-Xl1hX15kX15dXmBfXl1cXlxfYGBsqMfS2uDj5ejp6uvrdnZ1dXZ2eXl2d3R3d3Z3
-eXp4eHd6d3Z2enp4enp9eHR2eXh4eXp5d3Z1eHZ2dXd2dXd3d3Z2e3l8eXl7e3x8
-fX1+fn9/fn1+fHp5eHp0d3Z3eHh6fHx8fXx4enx9f357fH19foB/enl8fXt7eHp9
-fXp7fHt3ent9d3Z3eHV4d3l8fXl3d3l1c3Nxc3Jxc3V0c3Z0cG9xcHNzdXV0c3V3
-dnJ0dnZ4dXZ3d3Z1dnV1cXJzc3N3eHZycnV1cXN4eXZ2dnh3dnd0c3BxcHB2dnVx
-c3Ryc3VzcnJzcXB0cnF1dnJzc3N0cXFvcXFwcW9va2trbm1xbnBwbm1tbG1ubnFu
-bGxtcWxsamhpbmxsa21sa2ppamxoa25ubGtqaWttbmtpaWlpZ2dlaWhoamxpXFdR
-UVJYX2NiZWltampraGZjYWFgX1pYVlJKSkZBQUJBQURCQ0ZHTVdaXl9jZ2ttbmxr
-aGhnbWlramdoYWVmZGVnaGhoaGZoaGlrZmFfYGdkZWVlY2JcW1xbWVZXV1VUW2Nq
-bGlkX11bXV9fYWBeY2FkX19cXWRkX19hZWdoZWlscXJvbm5wc25xdXZ3dnl1dHh9
-f4OGiIuHgoOGhIyLjIyPjpGQioWAf4GGiYuJj5GPj5OTmJqXlJiUlpKUl5iZmpub
-mpOUmZmVmJqZmZaUk5SVl5iXmJeXnKGoo6WioZyblpmZnJ2hoZ+eoZ2amqCin5ua
-mp2fnZ+doaaioqCfnJ6go6OjpaCfnZyamp6ioJuZl5WQjImDd25wbmhfYGBeXlxa
-Xl9hX15dX19fY2BeXV9hX19iXF1dXF9hYGFiY2FfYF9gXV9eXFpfX15eYl5dXl5h
-YF5fYWVlX2JhYGNjYGRkZGJeYWBgY19jY2FjX1tbXF1fX2BgXV5cX15eXV1bXF5e
-XlxdX11bWVdYWFdZXF9dXVtZW15ZW15fW1laWFhaXVhVVFlaWlhaXVxYV1haWldb
-WlhZWVtaW1pZWFlYW1tcW1taW1dUWFhZWFdXVlhYV1ZTVFVXV1lWWFlXVlNUVFdX
-V1VUVlpaWlpYWVhWWFhZVVZVVVZYV1hZWlhYVVlXV1laV1RTVFdXVVdYWFVUVlVT
-UlJUVldVU1dUVVVYVVpZV1RXVFRWVFFRU1dWVVZWVlRXV1VUVVVTUlNWVVVWVlVU
-VFVSVFVWVlZWU1FTV1RXWVZVU1JUVVZWVFVVVltVV1RRU1NVU1ZWVVdVVFZVV1dV
-VFZUVVdSVFRXWVZTU1ZWVVRUU1dXX1dWV15aWlhVVVZXU1hYVlZXW1tYWVtbW1ha
-V1dWV1ZaWVZZWldZWVhYVVJWVVdaW1xcWldaV1dWWFhbWFhdW1hZWlxZWlpYVVZa
-XFdYWVtbW1xcXVxaWFhZW1xcWVlcXlxaW1lXXFxcXFxcXVtaVVVYW1hXWVdaW1pb
-WlhYWVtZXl5aW1hZWllaWVhZWFdXY2NYW19cYFxYWl1dWVlcW1teXmFdXV1bXF1g
-XV1fXl5dWltgZGVjYGBgYV9gY2BjYWJhX19fX2JfXV5cWVpaWlpbX2BhZG2kx9Lb
-3+Pm6Onq6+t3eHd2dXV2eHt4d3Z4d3d2eX15enZ2e315end5enl3dXh4eXh2eHl4
-e3d3dXZ1dHp5eHd1dnp8end4e36Dgn+AfXp7e356fH58eXh6fX97eHd7fnx9e3x8
-ent6fHp5ent7e3x9gHt7e3Z6eXh5enp6fHx8e3t8ent6eHZ5fHh2d3V3dnp2dnd3
-d3d1d3V1dnZ0dnJzcnJ0dXR0dHZ1dXd7eHd4eXh2dXd2dXd3dnJxcnV1dXV0c3V2
-dHJ3dnV3d3V1d3p6d3V1cHBydXZxcHR1dXVycnFxcnNzdXFucXJzb2x2d3RycnBw
-cXNybmxsbm5ucG1tbXBwbW5ub2xsamlobWpra2lpaWtsbG1taWtqZ2xrb21rbGxp
-aGpqbWxtbW1ramhoZmVraGxubGdhVUxOUFRZX2Nlam1xcGtnZmZnY2NhZmBYVFFN
-RkREQj4+PkFBRU9ZYWdoZGVna2xsbmtnaWhoaG1qZ2ZkZWRlaGhqa2doZ2NoaGlo
-Z2VqaGtpa2lnY15cXVZYXFlWVlNVWmVtb21lX11dYGJhX2JiZ2RjYWBhYmdoaWht
-cG9raWtycnFtam9zdHR4eXl7dnJ0eXqAgYOFhoJ+gYSGjIqJjouHjI2GhYmJh4iJ
-joeLj4uKjpCPkZWTlJaTkZaWjZeXl5mXlJWWk5CTk5GRk5OXlpaXlJSWlZWYoaeo
-o6KemZSWl5mamJybnp+gn5+in52blpmfmJ2en5udoKKjnZ6goZ+doKGjnp2an56c
-n5uZmZmVk4+IiIF4dHFua2ZdX2BeXl5fX19gYFtaXGBhYFxdXV9iZWFhX15eXV1g
-YF5gYWFfW1xfX15cXl1iX15dXl5fZGFiYV9iY2BeYGBfX15fYWNjYWFhYV5hYF9f
-X2JhYWBdXV5fXV9eXVhfYWBfXVtbXVpcXFtYV1VVVlxbWFxcXllcX2NdV1hZWllb
-WlhbWVtYVVVXW1laW11cXFhZWFpcWVlWV1ZYWFhYWFhaWVhYWVpaWFlYVVRYVldZ
-WFlWWVhXVFRXWFZWWVtaVlJSVFhXVlZWV1ZWWFxcWVlYVVpaXFlXWVhTVVdYWFhY
-WFhZVldaWllXVFRUVlZVU1ZWV1dWVFNVVlVVVVNVVFhWWFlZWVtcWFZXWFdXV1ZS
-VVdXVVVUU1VUV1ZWWFdUV1hXVVVWV1dZVlVVVVdTUlZVWFhWVldUVFJRVlNWVVNU
-VlZWV1ZYWFxXVlVUV1ZXVVdVU1RWUlVVWFdXWFVUVFZWVFVTVFJUVFVXWFpYVlVW
-VFVXV1hXVVZXWVlXVlVYWFdWWFlWWFVWV1dYVlZWVlNVWFZVVlZWWFZXWFdWWVpd
-XFxdWllaXFtaWFdZWFdZXF5dWVpaV1ZaWlZZV1hYWlhYWlpaVlhYWFtZWFZXXFxd
-XFlXWlhZWlhZWFlcWVdYV1hZW2VkYWBcWV5iWVlYWFpXV1dZXFpaVlpaV1hcXllb
-XF5eXVhYXFteXV1dW1xcXV9cWFpbW11cXF5dXGBeXV1eX2RiZGRiX2BnZmRgYGVk
-Y2ViX2FfXl1bU1hbWlxdX2JgbKnH0drg4+Xn6unr63Z1dnZ1dnh4eHZ3dnZ0dHZ4
-d3p3eHl2dXZ1dXJ3eXd8e3p2enh3dnd4eHt6dXV1eXt6enp4eHl7e3x/gX9+f4B9
-e3p6eXl7fXx8e39+gH18e3V5fHx8e3l6fXl6end5eXl4enl4d3x4enl8fHx7e318
-e3l5eXx6eHh6eXt8e3t6d3h3d3h1eHd3eHd2cnJ1dnJyeHZ3dXNzcnV1c3V0dHZ2
-dXl4eHZ0dnZ2dnd3dXN0cnZ5dnh1c3d3eHh3dXd3eHV0dXVzcXFxcHBycm5tcXR0
-dXNxc3Nxc3Vyc3RycG9xcm5vcHBxb29zc3FubGttbmxwbWpsbHJwbmxubGpna25t
-amtpaGlqZ2lqaWdoZ2ttamxvbWtqa2tra2xtbGxubWpoZ2doZ2doamtsaGJaTk5P
-UVVbZGhpb3NxbWdoZWZqampoY2NfVlRQSUpFREI+QUJIV2NsbmtoZmZoaWhsa2pp
-aWxsbGppZ2dpaGZqa2lmamtpaWpobG9ramtqa2lqa2dhYF5bXFpeW1hXW1lbY2tq
-bm1pY2BeXl9eYGNlY2JeX15cY2poaWtsbm1qb3R2c3BtbnBzdXl6d3V0c3l7fH6C
-gH9/foGGi4uKiIaIhIaEhoaHjIqCfYCAfX+CgIWHi4yLjY+TkIuNkJWZlpKUkZOW
-kpKQkZKSkJKSlZqZl5iYmpeVlJOboKKenqCblJaZmpqZl5WSmJudnZucm5aSk5qY
-mJaZn6CdnZybm5+fn52gnp+bnZ6cnp6cl5KVlJOOiYSBeXlzcm5pZmNcW1xcW15f
-XWBfYWBeXFxdX15eXmJlY2BgYGFfW1tcX2FgXVpcW1teXl9eX2RhXmBfXlpeYWRk
-YGBfXl5hXWBhYV5gYF9iY2NhX2BgYmBcXmNgYF1aW11gX11eW1lXV1pcXV9ZW11a
-WlhZVldZW1xdXFxaXFhaVlheWVlZV1laWFZZWFhYVVhZWFZaWlxZWFpdXFhYWVpU
-VVVXWltZWVxZVVhYWFhXWFhXWVdXWVlaWldVVVdWVlVYWFpZWVhVVFZWWFdXVFNU
-UlNVU1VUWFpcVlZXV1lbWVVUVVZVVlZUVVRUV1hVVlhXWFtaWVVUVFVUVVZWVVZY
-VlRUU1RVVVZXWFhYVldXV1dWWVVTVFNVV1RWU1VWVVRXVlVTWFZZVlZUV1VWVVZY
-WFZXVlVSUFJUWVVXWldUVVRVVFJVVFdVU1VWV1daV1dYVFZYW1lYVldXVVRTV1dU
-V1ZVWVdVV1VVVFRUU1NXVlhXV1VXVlVVUlJUVVdTVFZYWVlaWVlaVllWVVZVWFpY
-WFdcW1tZWVlZWVtaXFtXVlNVWFVUV1laW1xdWlpdW1xWV1lYWFdYWFhcWFlbWVVV
-WFlYWFRVVVZaWlpXWFZZXFxXV1dYXF1aWVlcV1pYW1hWWFteXFpZV1hXWVpbWVxa
-WVpbWFZXVVdXVldWV1lZW1tcX11dXVtaXF9fWlxbWlleX1taW1taWl5cXFxcXVla
-W11fYGFkYWBhYWNhYmRjYmBiYWRiYmJgX19gYWJgYFpZWV1fXV9gYGFrpsDR2eDj
-5ufq6uvrdnV1eHV4d3Z5e3l2dHV3eHh2eHh5eXyBeHV2dXZ2eXl5fHp2eHp6eXx8
-fHx4eX98e3h5e3h5d3h6e3t7fnx/fXx7fXx7d3l5en56ent+fXx4e3t3eHl3eXl5
-eXl3cHJ5eXp6eXt8e3h6e3t7f319e3t8fHx6ent8ent7ent8ent5dnh3dXV1dHR0
-c3Z3cnRyb3NzcnV0dXZ1dHBxc3V2c3R3dnJxc3Fwc3JzdnZ4d3R0cnF0c3N3dnd3
-eHh3dnJ2eHd5dXRzcXRvcG9yb29ydHNyc3JwcXFycXJwcHJwbnFvcG9tb3BvbW1x
-dXFxbm9vbW1tcXBvcG9taWttbWxsamhpa2lqamlpbGppbm1sZ29ta2pqam1vb29w
-bWlpa29xbWlsa2poZGNlZ2tpZFtUTk1VW2FmbG1wcG9raGhoaGdra2ZmZGFhW1dS
-Tk1HSUNERExdZ21vbWxqa2hrbGlsbGlpbGxqaGZpam1rbWtoZmhqa2lra21rbGps
-a2tubG1samdlY2JgXFxcXVpaXF1fY2ZqcGtoZGBcW11fYWZkY2FiYWFiZWhkZmpp
-amxydXV0c3N0cnN0d3l5dXZ6en19fHx+fHx/gYOHjIyIgYOFgX57g4ODe3V2eHt+
-gX53dn2DgYGEjo2JjIyNj5KSlJOTlZORk5aXmZSRkpWTk5aWk5aWmpeWlpKWm5yd
-mpWTlZSanpuXmJSTl5SXmZeZlZOXmZiTlpabnJ+cmpmampqdnp6cm5qbm5yZmJOP
-kY6RjIeDfXp5d3hxbGZlYV9eXVxeXV5fXl9hYGBgW1dbXGBhYV9eXmFgYmJfXWBe
-W1xZWl1cXF1cW19hYF9iXmBeXFxcXVxbXWFhYGFfXWFiZGNkY2RkZGFgX2BhYF9d
-W15gXltaW11eXV5eWlxbW11cYGNhW1xdW1tcXFpYW11ZWFxbV1pcW15bW1tZWllb
-XFZTV1ZZWFpaWFpYV1pYWVlZWVdYVlhXVlRaXVtdWVZZWFlZVVdTVlhWWl1fXVpX
-WFtZWFdXWVVYWlhXWFZUVVdWV1hWVlZYWVZVU1ZaV1hXVlZZW1tYWFhTVlhXVlVa
-WFdYV1ZVU1ZXWlhWVlNRU1VWV1VXV1ZXVVVYVVlXVldYV1lWVlhWU1VVVFVZUlFS
-VlRVVlZWVldUUlVUV1VSVVVVVlVVV1hYVVJSVVFRVFJRU1JVU1JUVFRWU1FSV1NS
-UVRYV1ZWVVVYVlRVVlZYVVZXWVdXWVRSVVVXWVhaV1lYVldWVlZWVFNWVVNSUlJR
-UVJUWFhUVldZWVhXVVRWVllYWFhZWlpaWFdaXV5bXF1eW1hYWFhWW1lZV1hUWlpf
-W1pbWVhaWFhbVlhXWFhXWlhbXF5bV1VZW1paW1haWVhXWVxaWlpaXl1YWVxbWllb
-WFlcXFpZWVpZWlpYWFxaWllWVFVXXFtWVltaWFZYVlhXWFlZWFtaXVtZWltZVlla
-XWBeXFpdXlxaWlpZWFhbW1taWlpgX1xZXl9fY2FhYmJhYmFhYWJiX2NiYl9dXmFh
-YGFhX2FfXFlaW1xdX2FfYW2ewM/a3+Lk5+rq6ut5e3d2dnV2eHl6d3V2d3h1d3Z3
-d3p8fH17dnd4dnR3eHZ3eHd1ent6enl7fX17en19e3p5eHd3d3x6d3p6fH58fXx8
-enh3eXd4e3t7eXt5eHl6eXt6e3p6eXh8eHd6d3h5eXl4eXp6enl6e316eXd5ent8
-fXp5eHx5e3d2d3x6enl1dnd3dXV1c3NxdHNzcG5ucXFxcnJzdXR1dHR2dXZ4dnV4
-dnRycnR0c3RzdHV2d3Z2cnV2dXZ5dnZ6d3VzeHR4eHd4eXV4d3h1d3d2c3F0dHRx
-cHFub29wcXFyc25wcXFyb25ub3FubnFxb21ta29vbm9ucXBvbm5ta3BubGloa2pp
-amhoam1vbGxtb3FtaWlqaWxsbG9rbWxra2psbm1taWpva2tpaWVkampkXlhRUFNa
-XWNqa2xuc2xoa2tsaWppZmZnZ2dmXVVST0hGQ0RJU2NtbW5sbG1pa2trbW5qaWdo
-aGhraWppa2xsamxrbGpra2ttbWtpaWtta2tubGtvbmtqZmRjYWFdX2FhX2VkZWRj
-aG1qaGJmX11cYWJjY2NoaGRjZGRmaGpqaW12dHByb3BwbnBydXJyd3Z2eXd2cnN7
-e32AgoSFgYKHgoF8e3t6enl0eX19gIF3b3N8gX14fIGIioeFhoeLkJSUkJGTk4+U
-mZmTl5aYlZSXlZWUk5aYlpWVlJOWl5yckpKRkZOWmZeSk5ealpWWkpOWl5WYlI+S
-mZmZl5eYm5mXlpiamZubmp2cmZeWlpGLh4aEg355c29wcG5qZGVlYWJfYGFfYWBf
-XWBdXV1bWlpeX2ViYl9dXVpeX19gX11bXV5jXVpcXl9eXl5cXlxeXF1cX11eX2Bg
-ZWNgX2JiYWJhYmFiYWNkYmNgYWFgYF9dW1tdYF9eXl5ZXGBeX2JbWlxhXlxcWlpZ
-V1hVVllZXlhYWVhYWltaXl5bWltbXFxaV1paXl5cXV5cWllWV1RTUlVZWVdZV1pZ
-V1pXWFZZWlxeX1pZV1dXWVtWWVpYWVdYWVhUVlVXWVxVVFRWVlZUVFRWWVtaWVhX
-V1dXWFtXV1dWV1laWVlaWVdYV1hVUlZXWVhXVlZXWFhZVllXVVRTVVdXVVZXV1ZW
-WFhXVFZWVldYV1dYVlVWV1VYVVZYVlNWVFRTUldWVVRRVFVUVFNUTlVTUldXU1JT
-U1JVVFNUVVZWU1FRVlRVVFZVV1dVVVNVU1VYWVNTVFdVVVVUU1RVVldXVVVWVVZW
-WFRSU2BUVVVXVFRXWVdSVFVXVVdWU1VVVFZXWldYXFlYWFdUVlZVWFpbWVlaWVlc
-WldbXlxaWVpaX1tVVlpaXFxXV1hZW1peXVhXV1ZYWlpZV1lZWVRWVlhYWVhZW1la
-X11WVFRWVlhaWllXV1daWVhbWVpYWFhZWVxcWllYWV1bWFdYWlhdXFpYV1tbWlpY
-WVxbWVteXGFcW1xgXlpaVltaW1pVV1tbW11aWlpaXV5dW1hZWVhZXVlaW15gW11b
-W19fW11gX2BfYGBkZGRjYmFdXFtdYF5fXmBfXl5cXFxeXVldXV5fcaTC0dvg4+bo
-6ejr7Hd5enl1dXd4c3V7d3l6d3d4eHR2d3R3d3d5eHh3eHl6fnl5d3h5eXt9enx6
-e3l4d3x/fHp5eHt7fHh5fHp5fX18fHp7fHt5enl5eXp6eXl4eHl8eXt6e3x/enp5
-enx8eXp5eXd2d3d8fHt9enh6d3d3eXp6d3x8e3p5e3x6eHZ3d3Z3dHN2dXFycHJz
-c3B0dXFxcnJucXFzcnV1dXRycXN1dXZzc3Z1e3Z2dnd2c3V4dnN2cnh7d3Z1dHR3
-eHZ1d3Z2eHh5eHp5d3Z3dHR5dHR1c3NvcHFsbXByc3N1dXFwcnJvcHBwcnBub25s
-bnBxcXFwc3BwbWxqbG1sbG1saWxrcHBsa2tqbGxpa25sbGxra2tscGxtamxramdq
-a2lraWptbmxramhnaGZpamNgW1FQVVtfYWRrcnRvb3BvdGxqZmRkZmlmZmJhYFRO
-R0NARU1bZm5vb29sbGpqaGdpam9qaWdna21ta2tqaWxsa25rbm5saGtwa2dqb3Fv
-bHBycXFvaWlmZWZlZmFjZGVmZmRiX19jZmxta2ZYVlpbYWNnaWpmYWNmaGdla2dp
-cHN1dW5wdHRubnR2c3Z7fXt4enp1c3p7e39/gHx+g4aEgYCAgH+Cf3+EhXx5cmpp
-b3Z5eXR0fn98fn+Af4KJio+PkZSUlZSTlZWYm5mWl5iUlZWTkpGTk5GQkpORlpeT
-kpCPkJOQj5OUlpmXkZOUkpKUkpORkZKUl5eVl5aUlJWYnJ6foZycnpyZmJeYlY2I
-fHl8enRua2ZkY2FfYWRhYGFjXl1dXWBdXV9aXF1jYGFiYFxcXV9eXl9cXl9eXF5c
-Xl1dYWBfXmBeXl5cXVteX2JiYGBeXV9gYV9gYmBfYl9fYWBgYWNlZGRhX15dXFpb
-YF1dX15dXF1cXV9fXF5fXltdXlxZXFtcV1NbXFlcXFpZWFZXWlpZWltYV1laWlxb
-W1tbVFpaWVdXWFlZVVVaWFpbXFtaVllcWllYVllZW1lYW1xdXVlZWVhXV1lYV1hc
-XllUV1VXU1JWVVRUVVVVU1VWWFhYV1ZYWFpYWllVUlJUWFtaWlpZWFdWVlVVV1dU
-UlBTV1ZZV1ZZV1VRUlJVVlpZWVpWWFdYV1RVVlRUVFdXV1hUVVRWU1VVV1dXVlVT
-VFNTUFJRU1ZWVVNTVFdWVVNTVVZVVVhXVlJUVVZVVlhYV1ZUVVZWVVdXW1pYWldW
-VVVVV1dVVVVWV1laVFRVVldWVlRVWVNVV1ZTUVVWV1hbWFVWWFZWV1dZXlxWVVZY
-WFdYV1hYVlNTVVhWVVVUV1dbWllZWltaWFpXWlpYW1ZXW1tbVlhXV1lbW1lYWVZY
-WFhXVlhYV1lYW1hTVldWV1pbXFtYV1xaW11bWVZWVVdcWlpYWVlYW1tbWVhXV1hZ
-WVhYWllZV1daWlZZWllaW1ZVV1hZWVxeX1xaWVhaWltcWltaXFpaWVhaXFlbXF1a
-XV1aW1hYWVhZWl1cW1tbXFxdXV1eXltfXl5hYV9eXF1fYGNoZGFiYV1dXl1dXlta
-WVpaWlxcYGBdXF5gXWB1rcTO2uHj5ufp6uvsdnl7enp4d3Z1dHZ4d3d5ent2dnV0
-cm5xd3d6eXh4d3p6d3d4dnh4eXd5e3p7enh3eX5/fXp5e3d4eXx/e3t7e3h7fHx+
-fHt5enh2eHd6ent6eXd5enp7eHx7eXt6enp8fXl3eHh5eHt8foB8enp8enx7dHh6
-e316e3d3eXp6enZ2enZzdnh0cHFydnV1c3NzcnN1cnRzdnRycnFycnRzdHV1dHR0
-dXd4dnZ6dXNzdXZ1dXZzc3h5d3d0dXZ2dnR0dXd3d3Zzc3V3d3RxdHV4c3JycnFw
-cXJwcXNxb3FxdHRzcnRwbm9wcW9sbGtydnBycWxsbGttbWxvbW5wb2tqam5wb2pp
-bWxsa2lrbWtsbm1tcG5taWprbGpraW1ramlsb3BubGxoZWJkZGRnYltXUkxOVlph
-Z25yb21wcW9tbWhgX2BiZGJfXFhSUEpFQUJJWGVtcHVyc3Nsa2hpZmdoaWZlZ2Zu
-b21ubmxsbG1vbmtwc29ucGxsbnNxbnBxbWxwcG9saGlnZ2ReY2loZWdjYGFgYmNh
-YmdrbmZeWFhbYWRmY2VjZGhlZGZsa2tucnZ3bm12c2xudHZ4eXx+g4WBenZ3dXR1
-eXt7e3x9gomPi4iKiIJ/e3t9e3Z3bGNjamxudH17enx+gH59f4KGjIyNjo2TkI6T
-lpaXlZORkpOTi5CQjo+PjIuPkZKTlpKQjY2PkZGUlZWVl5OSk5GSkZSVkI+NkpST
-kI+SkpSTk5ibnpyXmJedm5mUlpSQi4R/eXl3cW1qa2ZeXlpbXmNgYWBdXl1cW1xc
-Xl1bXl9gXVxcXl9dXmRjX2BfXVtbWl5kX2FhYF9cXV5eX2BhXl5hZF9hYl5fXl9f
-YV9gXF9hYltfYWNgXmJhYV9dX2FhWlpdXWBeX19cW1xbXl5fXF9fXF5aXF9eXFtc
-XF1dXlxcXV5dWlhaXVtWWV1cXF1cXV9dXFpcXVlZWllbWldZWVdXWVpdXFpZV1hY
-WllXV1pZVFdaWlxZWVZVVVZUVFhZWFlYW1dWWFlXVVZWVlZUVVZVVVdXV1dTVVdV
-WVhTVVVSVlFQVldaWFhZVFRVVFdXVlZWVlVVV1dXWVVUVFRUU1dZWVhZVlVTUVZV
-VlRTVlRTVFRWVFdWV1NTU1JUV1dZV1hXVlRST1FUVlZTUlFUU1VRVVlUVVhZVlZW
-WFdWVlpYV1VWV1ZWV1hYWFlXV1VXVlRWVFRTUlRUWFdZWltWV1dWV1laV1VXXFdW
-VlJSU1VXWF1aWFZWV1ZXWltaXFhWWFNZW1hXV1RVVldXVlZWWllXVllbWFRYWVpY
-WVdYWFtaWldZW15bWVpbW1tcWltbW1ZZW1hYVVZUW1lYWVhXVldYWVlXWllZWFhZ
-WllWV1dZW1pZXFpbW1tZWVpcW1pbWlpYVVdaW1lYWVxdW1xaV1laWFdWV1xYVVlb
-W11aWVVXW1xdX15gX1tZWltbWlxZWllcWFdcXFlZWVpbW1xcXFxfXVtcW1xdXFxc
-YGBgYGBgX19jZGNlZWRmZGJiYmBgW1tcX15ZWlteXl9hW1pcY4KxxtLb3+Pn6Orp
-7Ot6eHd5eHp5eHl5eXh2dnh5end2dnV1dnZ5eHh5eHd4d3h2d3l5eHV3eHh8fHx5
-e3p3e3x8eHh3eXp8fHp7fHl5e3x9fXt5e3h6enl3dXh5e3t6eXZ1eXt8ent6d3l5
-d3d7fXp5fXp4eHl6eXt5eXl6eXd3ent5eHN1eHl4enh3dnZ6eHZ3dXNzc3R1cnB2
-dHF3dnR2dXFwcndyc3Ryc3V1dnR1d3h1dnl3d3l4dXZ1d3V5dnV0dXh6dXFwcnZ3
-eXZ1dHJ0dHZ1dHRzc3J2dXJxcXJwcnNycXFxcHN0dHR0c3JwcHFvcXFwb3Bubmxv
-b29vbGxubm1tcG9xbWxsdnNubW1sbXBvbmppbm1samtua2ttbmtubWxua2trbG9s
-a2tsbmxqaWlkYmNjZWdjX1pWU1RVW2Ntc3R0dnd3dHNtZWFeXV1eXl5YVVFJRUVC
-Q01ZZW50eXl4dHRvamtsaWdoamdnZ2htbW1vamprbW5vb3JzcnJua2tub3Fwbm9t
-bm1vbm9vbWhoZmdna2lpaGRfYF5eYl5gaXBuamZeW1tdZGhpamdmZmhnZGpubm1x
-dHRwb3JvbWxyc3FyfICChoR8d3d7fH1/g4GAgISFgIKEhYWAfXV1dnBsbGdgXmJo
-ZWhweXt3eX18e319f4aFioqGhoSLj4+TlJOQkI6OkJGOkY+QkI+OjpCQkpKXk4+K
-ioyNkZWTkpOTkpCOkJKSk5OMio6Tlo2Ljo+Qk5aYmpeYmZ2XlJWPjYqNioWDfnt6
-eHhxaGVnZ2NbWVtgXV1dXV1aXV1dXl9eWlxeX19fY15fYGFhXV9fXl5cXltbYGBb
-W2JgXV5eYGBgXF1gYmJfXWBeYV9hXGFiYF1cXWJjZ11hYmJgYmRiX2FgXWRfXWNh
-X1xdXF9aWlpcWltgXVxeWVxbWV1dWVlcXFpZWVtcXF1fXVxeYF1bWVtbWVxcXF1b
-XVpZXFlYV1paXF5dX1xZWFlcW1pXWllYWFlYWFhaWVlXVlVYXVtYV1dXV1RUU1VU
-VFRWVVlYV1lYWFRUVlVZV1lYU1VWVFZUVVVTVlhbVldVWFdXVVRUVFNUV1lVWFZX
-VlVUVFdWU1ZXWVdUUlZVWFZVVFhXVldZWVdVVFRUVldUUlVWVVZUU1RWVlZVVFNU
-VVNRUldUVFRRU1RUUlJSVlZXVVdXV1NWVllaV1dXVVpWVFVVUVVUVVdWVlVTUlFU
-U1RVUlJUVFRYVlZVVlhZVlhZVlRVVldWV1ZTVFVWV1ZaWlZUVlZVVVZZXFpYWlZa
-V1ZVV1lVUFFUV1VVV1hYWllaXFtWV1tYV1tZW1laW1lZXFhYWF9dWFlYWVpXXFxc
-WVdYWFlYWVdYV1lZW1lZW1hbWllYVlhZWFdZVVtaXF1bWVpbVlhaV1hYXFtaWVlZ
-WFhZWFlZWVxcWVVVV1hbVlhZWlpaV1dYWlpfXVpdW1taWFdbWlhXWFhYXF5dW1tZ
-WlxeWVdZWl5dW1xcWlldXmBdXlpaW15dYF5hYmBhYWRhXmNkZWZoZWNkZWNfX11e
-XmFgXFpaX2BeXGBjg7DH0tvg4+fn6enr6317enh5e3x7e3t4d3l9fHl5eXh6d3l6
-enl4dnh3dXN1d3d5eXl6eHh5enl6enl4enp4e3x+fHp5eXl5eHp9ent8e317enx8
-e3p6enl7eXh6fXx5ent7eHh4ent5eXl6eXp8e3x8enl5ent7eXp6e3t9enp5d3d5
-eXd2eHp6eHd1d3p5dnNzdHZzdXRwcnVzcnJ2dHFvcXBxc3RzcnR1dnZ0dXZ3eXl7
-eXp5eHp5end2dXh4dnV1dXd0cXJxdXZ4eHd3dXV2eXh3dXd3dnJxcHBwcHFxcXJy
-c3N0dXV0cnFydHRvbnFxcXFycHFycXFua25ta2tpbGpqaWtsbW5tcGxra2pub29w
-b2xtbW1ubWpra2xsaW1ubGltbm5qamttaGhoamhpZ2hlYWJiY2RjXVhTVFRbYmhy
-eHZ4d3l1cm5qZ2BbWFdWVE5MSUhEQkRHUV5ocHF2dnVzbWxxcWtramhoZ2traGxo
-bGppam5sbnFwcXFycHBvb21wa2tsbWtsbnF1cnBua2lpb2pmaGprZ2VgYWNjYF1h
-aW9vbmliYGFnaGloZmNkZmtpamtwcXR0bmpoa21vbGpvcGtvd32ChIaEgX1/f4B8
-eXx/enVzcmdlZ2ZjYF9dXl1fXGFkZGNoam90dXJ4eXR5fX18fYODgXx6f4eJjpGQ
-ko+Qi4yMjJCRlZWNjo6Ni4mOj5ORjYuKi46PkZGSk4+PkpGVk5KQj42KjI+Pj42O
-kJOWmZaXmZeVmZaTjomGgX15e3x7eXd5eXNsZWRjYFlZXFtdX11dX11cXV5fX2Bf
-XV1dXV1dXF9fYV9eW11dX11eXV9gYmFeXV1eYGRfXl5fX2BeXl5fYWBgYGNiXV9g
-YGJgYmNkYF9hYmJhZGJiYGBdYGJfYF1dXV1cWl5ZXF1dW2BgXlpaW1pbW1xbWlxc
-WFpeXF5dYGNhX2BdXFxaWlpYWFlbWVpWWF1bWllXW1xcXl5eXl1aWVdaWVhZWlpb
-WllYWVtZWFhZWlhXWVlXWFlaWldUUlFQUlRXV1hWV1VVVlRVVFZXV1hWWFVUVFNU
-VVZUV1hXV1JTVlRVU1JVVlpZWFxaWVhVV1lVV1ZXVlhXWVZUUlNSVFRVVFZUVFZW
-VlVUUlRTVFVWVFRZWFNUUlJVVVZZU1ZYVlZUVFVWVFVWV1VWVlRTVlZXVVVVVVhW
-WFdVVFdWWFVWWFRVV1VXVlZXV1VWWFhcWFZSUlRSVVVYVlJUVFZWWlhWVlZUU1VW
-V1dXWFdVV1ZWWFlVV1ZXV1ZZVVZVV1dXV1ZYWFlYV1VVWVdYV1ZWWFlaVlRWW1lb
-WllZWFlbWlpXW1pXWldbXFhXV1ZZWVdXWVdZWFxcWVhXV1daXFlXV1xbWFhZX1xZ
-WFZXV1hbXl1aWVhYV1lXV1lYWFtaW1paWldZV1NQWVhTU1VZWVdYWFlXWFxaV1xb
-WlpbWltVVlVYWFhaXFxcXl9cXFxcWl5fV1pZXWBdXF1bXF1fW1laXFpaW11dYF9d
-YWFlZGJhYGBiYmVlZmNkY2FiYmJhX15dYF5dW1xdYl1bXWR0qMjS2+Dk5ujp6uzr
-enp4enZ4e3l5d3h3dnZ3d3d5d3l8eXp4d3d4d3h0dXZ2dHl2e3t5eHh3enx7enp5
-dXl5en58fXl7fX17fX1+e3x7eHt5eHt6fHt5fX96eHZ3d3Z7e3h1dnd3d3h4d3h4
-d3p5fH5+fX5/enx6d3t+fXp9enh6e3t6e3x4eHp6d3d4eHh5eHh3c3JzcnNyb3Jy
-cXFwcnFycnFxcHF0dnd0dXZ2dXZ6fHt4eHl3dXh5ent+e3l5d3Zzc3R3eHVyc3d1
-c3Jzc3V2d3d1dHJ1dnNwcXNxcnNzdHVycnNybm9ubGxub3FzcXBvcnNwcHNvcG1r
-a2psamhoamprbW5ybnFwbWtrbm1wbG5vbm5tbm1tcWpsa2xsaGpqZmZpZmVoa2tr
-aWpoaWZoaWlmZGNlZF9dWVJOUlpia290dXZ3enZxcGtnYFpWUU5NSkZEQkJERk9Z
-Ym9xdXl7eXVwcHVyb2tpaWxsbmxqaWpoZ2hqa2tqbW1qbm1vbm9ubWtsbG9wbW5x
-dHV3dHBva2xta2dlZmdnZmBjY2NiXl5iZW1ycW5nYmJmYmRnZ2ppa21qaW5ydXNy
-Z2ZqbGtnZ2tqaGtudXyDiY+Rjoh/fYKGgnlubGxoXlhcXV5aW11jZF9dXWBiZWpu
-b25tbnNzdXp7eXh5fX95dnd9goSFioeJi4uOk5KOjomJjI2Mi4mLiYyMjouIiIqL
-kZONjY+Rj46Qk5KPjI6Ni4eGjY+PkJGQkJKPkZGUmJeSkYiFhYJ9dXNxc3Zxc3Nw
-bGZkYWBcV1ZXWl1dXF5bXVpbX1xdXFxYWFdcXVxaXV9gX15bWF9dXl9eX19eX11c
-X11fX11hYGBgXl5dXl1gYWBiZmNWW2RlYWBhY2NfYF1eYF9eXl5eYmNiYmNeXV1c
-W1pYWltbXFxaW1xcXVxbXFxdW1taW1xcWllaXl9eXV1aW1pbW1tbWlpZWlZXWFpZ
-WFlZW11ZWVdWVVpbW1tXV1hXV1hYWVlYWVdYXFpZV1ZVVldWV1hVW1pbWVZVVVNS
-VFZVWFdVVlZWVlVYVldYVFRVVVNSU1NVV1dWWVlXV1RUVlZWVVNVV1hYW1hWVVhW
-V1VTVVdVU1dWVFRTUVJWVFdVVlVVU1BUVFJUU1NRU1VTUldWVlVSVFVTVlVYVlZT
-VVZUUlZXWFVVVlhUU1NTVFNXVlVVU1VXV1ZRVVdVVVVVV1ZVVldYWVZVVVdZWVlY
-WFdUVVNWVlVVV1RYV1tYV1ZXV1ZXVlhXV1ZUUVRXWFlYXVlYV1lZWVdXWVhWVVZY
-VlZYWFdZVVdYVVZWVVRUV1teV1RXWVlZVVdXWVpZV1tbWFpYVVVZWFZVVVRUVFlX
-WFdZW1tUUVZaWllWV1dZWVhaWGF8aVlbWVtaW1laW1paWlpYWVpcXVxcXFhXWl1Z
-WVxZWVhaV1RVWFpXWV9cXFxYWVlcXVpaWVpcXVtbWlhXV1pcXFxeXlhaWVlZWFdc
-XV5fXl1dW1teX11eXlxbW1thYFxfX2BgYWFjYmBfYF5gY2RlZWJkZGRiYV9aWV9g
-YF5bWFpeX19dXmyoyNPb4OTm6Onq6+t1enh4dnZ4eXl6e3p4c3h2dnd3eHV0dnV2
-dnV1dnZ4enp5dXZ4ent4eXl5eHh6eHl6e3l6enx8fXt7ent5eXl9e3p5eXt7eXl8
-eXp6eXt6endzdnZ3d3V0dHR3d3d3eHt5eXd2dnl5enx9fXt6d3V8fXx7d3d5e3t8
-e3h2d3t6end3dnNzdHh2d3JwcnBvcG5vcXJzcW9vbnJ0dHRycnJxc3Z4dXV4eXl2
-dnh4eHl5eXl0dnZ1dXRvcnh3d3RzdHJwcnNzcXJ1dHZ2eHZ0c3BwcnNxcHJ0dXRz
-bXBwcGxxc3FycXNyc3RwcXFycG9tbmtwb3FxbGptb3Bvam5ycHBubWxtcXFrbWto
-bW9sbGpsbWhramhqaWhta2tramtta2tubGpsa2xqamhnZmdlYV1UUFBSV2FtcnZ6
-en96dnVzbWhhWlROS0tLRkJBQENFTlhjbG92eHh3dnFycW5ua2loaWtrbGtraGZn
-Z2hobGtoaWdra25vbHBwdWtrb3FycXNyc3N1eHRyb2xsaWlnZ2RjYmBiYGVgXmJi
-Ymhqbm9uZmNkaXFsa21samhlaGttbnBpZ2hsZmhtbGtmZWdrc3p8gIaMiYiIhod9
-eXNwc19XVltfXV1fXV1cWmJaVlpjam5uZ2pscHF2cHR0dnBwdHFwcXJ4e3x9goSE
-gYKIjImJh4WFhIeHh4WHiI2MiYmIhoeNjoqEi4yJhYWHj4yMioqJiYyLi42QjoyK
-j5KSkpKTlY6Jgn13dHNzdnt7dW9taGJhYF1dXl1bWVdcWl1dXFtcXVtaXVxbWllc
-XVhaW1lZWlpdXFtZXFtcXV1eW1ldXl1eYF5dXFxcWVpcWmBgXl9fXV1dXFhfYGBi
-YF5eYV9cXF5fX2FfX2FfYWZkYF1eXFxcW1hcWlxaWVdZX11bXFxbXWBeX2BeXV1b
-V1laWl1aXF1cXVxbXVtcW1tcXFpYWFlYV1hYWFhZWVhYWVxbWFlZXFdXW11aV1VX
-VlZYV1daW1taV1RXV1lZW1pZWlhXVVRWVFRXWFZXV1ZYWllYWFhVVlZWV1dUVFVX
-V1hYVlZSU1NVV1dYVlZWVldUV1ZRUVVVVFVVVVZYV1RVVFZUUVZXWVpYU1VVVVNT
-U1RTU1VSVFdUU1RWVldUV1lWVlRTVVJTVlhXU1JXWFlYWFdYVlRTU1ZXVVdXWFZX
-VlZWVVVVVVpYV1NVV1ZXVVlZWlhYVldYWFtZVlhXVFZYV1VYWFdYU1ZUVlVZWFta
-WFxbVlNVWVtbXFpYWFdVVlhVWVxaWFhTVFdYW1dYWVhYVlVYV1VWWllZWFtZWFta
-WFxZWFhaWVtaWFVSV1RXV1ZXWllaW1pYWVhZXFpdWltcXFlYV1laWltYX2xrWFtf
-XVhXWFtcWVlbXFlZWlhbXV1bWlpcXFxbWltZW1hYVlhZV1dcWlZVWFlcXVlZWVtZ
-WllaXFxaWFRWWV1dXlpZWlpaWVdYWFxdYF1bW1lcXFtaXV1dXV1dXWpeXV1eY2Jg
-ZWVlZmNjZGVgXmJiY2JjYmFhYF9eXVtbXV5cXltaXV9fbKjH0dvg5Obo6erq7HZ2
-dnd3eXV2eHx7eXl1d3h8enh4eHZ8enR3d3d3d3Z6fHt4eHt8eXt3dXh3eH58eHl8
-e3t7fHl5fHp7fHp3eHl4eXl5d3l5eHl5enp7eXl6eXd4dnd3eHNzdnZ2dnl2d3d3
-d3l5eXp6enx9gX55ent5eXh4eXl3eHp6eHd4enh7enl3eHN0cnR2cnFxd3VwcXFy
-cHRwcHFwb3J0c3JzcHBzdXd2dXV2d3h4dnZ1dXN2dXd2dXZ4dnN1dXZ4dnN0dHJ0
-eHR1dXJyc3J0c3J1dXN0cnBwc3V3dW10dHRzc3F1dHRycHNzc3FvbW5xbmxsbm1u
-bnRycG9ubGxsbnBwbmxtcG9sbmtna2psbG5ubGhpam1qamtsb25tbGtqam1ubm5w
-bm5tbWxpZ2ZjY2JjXldTUFFWYWhudnp/gnx4dnRvbGRcV1NRTUlJR0NBRUtSXGVp
-bnN1d3ZxbG5ubGlpaGlraWloa2ppbGpmZWRnam5sbWpoaG1rbW5vbG5wc3RzcXJ0
-dnp9eXRxbm5qbGhnZmNgY2NiYGZkZWhkZGltcnRybmlnbW1ra2tsaWdmbm9wb3Bv
-bmxlZWpsbGZjX2FobG1rbHR6fIB+d3JsYGFoZV1bYGNgXVdUVVRbW1lcYWFoa2xs
-bG1vcHJ2dXFua21qbG1sbG1wcnl6e3t/fn+FhIOCfn+Cg4aEhoWKh4WChYWFiYmH
-hoKGjoyGgoSHiouMiYWIjouJi46OjI6QkoyOjY+LhoF8d3NtbWtydXNuZ2FbW15d
-Xl1eYF5ZWVtbW11dXF5gXF1eXFdZW11cXFxcXltZXFtYWl5eXV1eXmBfYF9gYF9f
-XGBcX1xZWVteX19gX2BgX1xcYF9fX2BfY11cX1xdXl9eXV5eXV9gXl5eXV1hYV5d
-XFxdWlhWU1hfXVtaW1xdX19dW1laWllZWlxfXlxbW11aW11bXl1cW1lZWVVYWFla
-WVdZVVdcXVtXWFhXV1dYWFZVU1RWWVdZWFhYWVdXVlhXV1ZXWVlYWVlZWVdXVVJS
-U1dZVVVTVFZWVlZXV1VVVFRXVVVWV1hYVFZVVlRUVFJUV1pZV1dYVFRXVVhWV1VU
-VFRVWFZYVVRVWFVXWFpWWFdVVFhXVVVVVFVTVlZYV1hXVlVVVlhVVFRTVVNSVVZS
-VlZZWFVUV1ZWWFVVVFNVU1ZZWFlYWVlWU1JUVlRWV1ZUVldVV1ZVW1pZWllZWVla
-WFdXV1RTVFdTVFZVVVZWVFVXV1lWWFtZXFxYV1deZ1tbXVhTU1RVVlZYVVRVVVZW
-WVpaXFZUVlhWW1VWVlhXWVddW1xZWFZZWllbWlxaWVpXVVVYWFpYV1ZaW1taWltY
-VlZYWFZYV1dYWFlYWVpYWllbWFRYWltbW1tfXV1dW1xcXF1bWVpcWl5fWVlaWVlb
-W1pXVVhYWFpaV1ZZXFlbW1taV1dYWl1eWVtcXF1dXFhZWFdZWl1dW1xcWFdYW1xd
-WltbWVtaWVlYWF5aXF5haGJiXmBeYWFiYmNjZGNlZWRgXmFkY2JiYmBeYV5bW1pd
-XlxaXVpbXmR1qsTQ3ODk5+jp6evreXd2dnd3eHd2d3p8ent3dXh7enl9fHt4eHl6
-d3Z7fXl5eHh5dnt6eXl5dnp6enx+fnt8gH17e4F+e3x8end3eXt9eXl5eXl2dnl4
-eXp2eHt8end5d3h5eHd1d3l5enp8eHd0eXd3eHd3eHl8enp6eXd2eXh7e3p4eXh3
-eHp6eHt8eXd3dnVxcnFycHN0cnFvbm5ydHVxbXFycHJzc3N0dnZ3dXR1dHd2eHl6
-eHd2c3V3eHl3e3V1dXV4dnR1cXBwdnNydnRycXBzcnN2dnF0dG9ucW9xcnJubW1v
-b25vcnBzdXh0b3Byc3Nxc25rbWxtbW1ubW9sZ2ZsbWlrbnBubG1ybGxrbW1iX25q
-bW5wbWxqaWtsbGttbW5tbWtqaGxzc3Bwb21uamhlZGJiZGZiXldSU1JYZGl0eYCB
-g4R7dW9paWBcWlNPSkZCQUNFSlZjZWlrdXh1cm9qaWhpaGhnZ2llaGhqaGdpaWZo
-ZGZnam1tbWlqbXNwbW1wc3R1dnRydnV3ent4dHN0c25ubWxnZmRiY2JgX2JkZGdm
-ZWZpb3JxcGxtcnJrbGhra2dpcXFucW9ub2hmZWhmZWJjYmdqZmVgXWBiZF5dX1tY
-WVtfW1FRVlZRUUxNUF1eXl5maGppa2xoZGxzeHpwa2tqa2ZlZ2doampubnh4c3Z6
-eX6BfHp3eX6BgoOFhoiJhH+EgoKDiIeEhYmKjIWEhYaJjo2KgIOFgoeKh4uOjoyP
-i4eDgoB+d3Jwa2dpbmtoampkW11eX15iYl9eW11gYGFdW11cX19dXV5cXlpbWFpc
-XV5fYFxcWltYXF9eXV1fXlxgX15eXV5fYF5fXl1bW11hXl5dXGJfX15dYGBiZGRl
-Xl9fX2FgX11fYGJhX1teXmFhYGFhX1xcWltYWVlYWV5cXV1eX15dXV1cWFlYWFtW
-WVxbXVpbWFddXVpZWVtaWFdUVlhWV1hXWllYWFlbW1paWFlYVVRZV1dTV1VXVllZ
-WFlYV1VUU1laWllWWVdZV1RXWlVTU1RVVFVZVFdWWVVWV1lYV1VXV1dYWFRWVlVS
-U1VUVFRVVVVVWFZWV1RWVFVTVFZUVVVXV1ZWWFdVVVRSUlVWWFZZWFlXWFRRVFZW
-UlJUVVVUU1RWUlFSVVZUUlRVVlVYWFJTVVZVVlZVVVVUVVpXVVJUU1NVVVZUVFZY
-VlFUU1VVVlZXVldVVVZXV1VVVlZYV1dXV1dWV1lYV1VTVFhVVlVXVlZYVVNVWVtc
-X15aVlpbVlhYWFhWVVVYV1VUU1ZZWVdXW1tYVVRVVltaWldZWFpWWlZYWllZWVhZ
-WVdVWFZYWVxaWFhYWFhbWFtaWFhXVldXVVNWVlVXV1lYXVlVVlhYW1pYWVdbWVhZ
-W11fWltZXFxgXV5jX1xfXVxZWlpXV1lZWVZWWFpaWFlYWFZWV1hWV1VWVVhdXV1b
-W1xbXltZWFlZWlpcW1taXV5eW1tfXl9bXFxbXFxXVVlZW1ldXl5cXFlbX19gX2Bh
-XV9hY2JjY2NiYWBhYF9eXl1gX19dX11bX11dW2BfYXKrxdPb3+Pl6Ojr6+t3d3l4
-enh2dnZ4eXx9eHd3dnl5e3l6eXh6eXl6eHl8fHl4dnl5ent7fHt4d3l5eHl6enyA
-fn18fHt5enh5e3p5fX15e397eXh2dXh4eHp7fHx6d3h2eXl5enh7ent6enh6e3x9
-e3l6enl2d3h8gHt6eXp8enh5ent7e3h2c3h+eXh1dXZ2d3ZzdnFzdHNvcW9vcnR1
-dnpzcXNxc3Bvb3Fyc3RzdHR1c3l3eHh7eHl4dnl2dnh2d3Z3d3h6eHd3eHVzdnNz
-c3F0dnR3c3NzcnBwcXN0cnFwbm1ubW5wbm5wc3R1cnh2cXBxb29wbm5zcnBubW5t
-bW1saGhoZmRobW5ta3Jqa2trbWpqcG9sa2xubm1tb29saW1tbW9zcG9ubG5ubXBv
-bm1ramhhX15iZWRjW1RUVlhfZ3F2eoCDgn54dnNvaGJfXFRMSURBQ0VKVl9kZWtx
-dXNuamZlZmhnZ2ZlaGlmZGRmZmdkaWhlZ2dqa2traWhrbm5vcG9vcHF1dnV6enp4
-eHh1eHp2cnNtaWhoZGJkZGBgYF9iaGZlYmJlaW5yb2xtb25sa2prYmVna2trbGtq
-ZGBjY2RjYGBfXWFhWVNSUFFST09RWFJaUFBNRklKS05OT05QVllaX2Jqa2lpaGRe
-XmJmamdtbWdiXV5gZmtsamdpbGtoam5wcnN1c3F3fICBhISJjIyDf4SAgYSDgoOF
-iIyKg4ODhIWIiYN+goiKhYaLiImFgoCEgHx6dHJxa2ZnZWVlY2JkYFtbWV1eYGRg
-WFRXXGNjYl9aW11bXFtfX11fXV1hYV5fXV9cXV5eXFpdXl5dXV1bXmBgXl1eYF5c
-Xl5gX19dXV1fYWBhYmNjX2NfX19dX11fXmBfYF5ia2NeX15ZXFxeXl1eYGBhWltd
-XmBeZl5dXWNdXFxeXFpZWlxcWllWVlhXVldaXFxaWlhYVllaWVdZVldXV1VXVlZX
-WFlZVltbWlhZWVdbWFxaV1NUVVZbW1paWVdXV1ZZWVtaWltZV1ZUVFZaVlRTVFRX
-WVtZWFlWU1dUWFpYWFdVVldUVldWVFtUUVNUUlVVVlhUVFVVVVRXWVlUU1FUU1RX
-VlhWVVVVWFVWVVZXV1VWWltaWFdYWFVYVVdTUlRXVVJTVVhUVlZVVFVWWVhYWFRX
-V1ZWVVNUVFZWVldXVlNWVVZWVlVSU1VVV1NSV1RVV1hYWFZUU1NVV1NWWFhZVVVW
-VlRXWlxZWFhWUlRXVlZVVlhXWFdVUVVZW15aWFhdWlhXVlhbWFhZVldWVVZXW1xZ
-WFdXV1ZYWVxaWllcW1hbWllZWVlZW1tZWVdTVlhWW15eV1hcXFhWWFdZWlpVWFVT
-VFZaWllXWllYWFdYV1tZXl5eWlhZWldYWlpYWllXW1xbXl9dXFlVVVhXVlVXWVdX
-WFhXVldVVVZXVVRXWFdVU1ZYWmBhXFtZV1dYWltaW1paW1tbW1pdXVxaV1dUW2Bd
-X1taWlldXV1eXV1cXF5cX11bXF5dYF5hYGNiY2NgYmJjYGBhZGFhYF9iYGFfXFxY
-XFxfWlpfda/F0Njc4+Xn6Orq7Hh4enp6enh5eXd5enp4eXl3eXd5fn18e3p5eHp6
-enp5eXh5eXp6eXl8fX16eXd6fHp6fX59fX2Af3l6eHt5d3l7e3p4eXp7eXd5eHl6
-enp6eXh1d3h4enZ5eXl3d3p5dnt1eHt9fn1+f3p4en2Afnt6eXl+e3l6eHp9fHt5
-dnd3c3F3dXZ4dW9vcHN0dHNydHZ2dXh3eHVycnNxcm9xcXJycXN3dnRzeHd1d3V3
-dXd2dnN1dXR2eHR1d3h4eHl6eHVydnRydXh5d3h2dXFwc3F1dHZzbm5yc3Bvc3R1
-b25ydHJxc3NxcHBraWtsbWxqa21rbG1ubm1taGhma2xrbnJsbWxvb21qZ2VqbW5t
-bGxvbW5ub21tbWlraW1vbm5uaWtscGtsamppaGViX2FjZ2JeVVNVVVlfZnF6foSD
-f3p5dnBsbGhkX1pTRkVERk9OVV9mbHF2dW9qZ2ZmaGdnaWhnaGhoZ2RkY2ZmaWdm
-Z2doaWhoaGdrbG5vcHNzcnJ4e3l4enl4dnp8eXh4dXJua2lmZWRiY2BfYWNmampm
-ZGJgZm1wcXBxbGlpbm5qZ2NnZ2ZjYGJkX2JgXFtdW11ZVlNMSUdIS0pKSkpPTU9N
-S0lLSktHSUtKTlhZV1VZX2ZlYV5eX1xbXF5jYmRiXFpgamxvbWlpaWJgX19dYGFk
-aGdqaW52eX6Dg4WEgHx6fn59fn1+gomLi4eChIWHiYmJg32BfXuAh4iEgX18eXh7
-dm5qZWRkZGBhYGRkZGFeXlpYWlxdYWNfWFhcYGFhXFlaXFxdYVtZXFxdXl1fXl5e
-XF9dXmNgY2NeWVpdXl5eYV5fX2JgX15fYl9fYWBgX2NjYl9iY2JgX11gXV1hYV9h
-YWJjYF9hYl9cW1tdXFpdXl9fXl1dX11cX2BhYmFeXV1cWVhZX15bW11dXlZXVlVY
-WFpcY1lYVlRUWltVWVtVVllXWFZXWVhZV1ZWWlpZWVtYWFdXVlRYVVZVVlZWVVRW
-V1ZWW1tYV1dWVldXV1ZWVldWVlVVVFRTV1hbW1dXWlNVVFRUVFVVVVRTUlVVVFVU
-U1NTVFZUVFVRUlRUU1VVWVtYV1RUUlNUVFNWVlZUVFVUVFVVVlRVVVZVVllXV1pV
-VFRTU1VVV1RVVFRTU1ZUU1RWV1RXVFZZWFdUVFJRVVdUUlRVUVRXVlhVVFVUUlNX
-V1dWVFZWV1dWVFNRU1ZUVFhYWFhYV1dVVFVXVlVTVlJTUlRSU1ZUU1ZWVlZUV1dX
-WFdVVFhYV1VYWVxZV1haXFhYXVtZWFdYVVVVVldXVlhaWVtcXVtZVVZXWVpYWVha
-WFdYV1lYXFxaWVpbWlpVV1dXWV1aWVxaWVtaXVpcW1lcWFlaXFlaWlpVV1paXVdb
-XF1eXFhbXFpXV1lYVVRZWVdXWVxdVllcXl1aWFZbVVZWVVZWWFldWFdZXF1dWllX
-WVdZWVdZW1tbWlpdXF5cXFlYWF1bW19YWFpdYV9cW1paXWFfX15dXl5lXF1fXV1e
-X2JgY2JiYGFhX2BiYmBgXl5fYmBaWlpZW11bXmN5scTQ2N/j5efq6erreXp5enl5
-eXl5d3p5eHt8fHZ4enl6fn16eXh3eHh2dnh5eXh3dnl4fHt8fXt7e32Af358e3x8
-ent7fHp8fnt4enp5eHh4eHd5eXl7enp6enZ2dXZ5fHp1eXd4d3V2d3t7end3eXp6
-fXx7e3x7enx9fn15d3x/fXt8en18fnx4cnV1dXV0dnV4dG9ycnRycHJzdnR1c3Fw
-cnF0dHRzcXByc3J1dnR1d3d2dHN3d3Z7eHZzdnd4dnp9e3Z1dHZ2dnh3eHV3eXB0
-dnd3eXZ1cXJwc3V0c3FwdnN1cnBvc3J1dXVxbG5ycnJxcG5tb29tbnBvb21vbmxu
-bG1tamtsbGxsbGxraGltbW9vcW1vcGxtbWxwbWxub2xpaWlramxsa21sbGppbmxu
-bm5rZ2ZlZWVlZV5WVFFRV19la3F4e3x8eHd2eHRwcW9oX1dUTUxISk5UXGhqbnV2
-cGRjX2NoaGRjYGJgZmRkY2FiY2hlZWZka2pnZ2hoZGdoamxvcnJucnh2eXt7fHp8
-enp8enl3dnNwa2tpZ2ZmZGRnZ2dnZ2ViZGFgY2pxdHJuaWdpbGlkX2FmZGFbWF1f
-XVtZVlRSVFFQTEpJSEZDRUFGSkpERkdJSEZGSkpJSUxPVFNSUldZWldYWlVXWFZW
-V2BfXmNkZWdrZ2RhYGJgXFJPVFZaW1tfZGNnbG5yd3l4enx7dnJ3fH+Dg4WKkJCM
-ioaGg4aFh42Cen96e3l8ent6eXZybW1pZl9dWFhWVVhcXl9eW19eW1hbW1tdXl1a
-V1lcX15dX1pZWl5dXVxbXV1fX2FhYV9gX15eYF9hXl9eW11cXl9eYF1fX19fYV9h
-YWFhXmFeYGFjYGBhY2JfX1xbYWJgYWBiYl5dW11nYF1YWl1cXFxfX19eXGFhYV9d
-X19gYVxbWVtcWltbXl1dYGJgXllbWFhYWWB0W1lZWVdXV1hYVl1dWllXWVhYWFhW
-WFlaWllXWVlcWllVVlRSV1hUV1pYV1ZZWlpbWVVTV1dWVlVXV1ZWVldbXVhVVVdU
-VVhWVVNVWFVXVVJRVFVSUVJUU1VUV1ZWVlRVVlRXVlpXVFdWV1VWV1VVVlRTVFZT
-UVJTU1JSU1VUVVhWVlNVWFdWVFVVVFRVVFhVUlRXVFJTU1RUVlNTU1JTV1ZUUlFU
-VVNUUE9SU1FUVldXUVBSV1VUUlNSUlRYVlRWVVhXVFVTU1RRVVVVVFdWVldWWFhZ
-WFVTUVRVWldXVVJTVlVVV1dXVlZUVllYVVRUVVVXWVlbWFVYWVtaW1lXWFhXVlRU
-VldWVVhZWVdZV1ZYWFlWVlVYV1dVVldaW1lcWVpcW1pXVldaW1xYWFdZWFdXWVta
-XFpcXVtZWVdcWlhaWldaXFpcWlpcW1hbXFpbX1pbW1pZWlhZVllaWVlaWl1WV1da
-XmBbWllbWltaVlZYW1taW1dYW1pXWVpaW1dZV1laWlpaWltaWlpbWlpdXFpbXV5d
-XlldXV9dXFtbXV5gYl9cX2BdX1xeX11fYGJjY2NjYWJgYGFiYV5eX19cXV9cW1pb
-W11eZ3ywxdDY3eHk5+jp6ut6enx7eXl5e3h6d3h4eXh4eXp5enx5d3d4eHx9d3V3
-dnl5enp4eXl3eX1/fH17e35+fHp6enl5e3l6fH1+enx7eHp6e3p5fHx7eX15eXh1
-dnh3dnh4e3x6d3l5eHV1eHp7e3t4e3t8e3t9fX59e3p5fH97e3t4e318e318e3l4
-dXh5dXNzc3Z5d3JwcHBxb3BzdXVzdXJxcnFvcnRzcXFwcnV1dHd3d3Z1dnh4e3p5
-dnR4d3Z3dXh8e3hzdnZ5eHZ3e3p1dHNzcXFxcHJxcnh0dXV0dXZ5dnR0cXFvcnl1
-b29vcHBzdG9ubnBycHFzcXBtb2prbW9xb29wbXFwbm1qa2xrbGppb3Fxb29samxx
-cnJvbmprbGpsb3FsamttbnBubWdqbnJybW1tamZiYmRdV1lTTVJWW2FoanB0dXNy
-dnd3e3x5eXZtZV5cUk1PVFxhZW5xcXRya2FhX2FnZmJjYGNnZWJhY2ZkYWNjYmVn
-ZmRkamhqZmdob29wa25ydnd4dXZ5eXl4e3t8eXl3dnJsa25ra2dmYmhpZmdkZGRm
-Y2JhZW9ydXJuZmRmZ2RdV1tdX2BcWVxYUlRSUFJMS0tISEZBQEA/QUJDRktMSkZE
-REhPUFFST1BQTUxNT05PT1JSU1BQUFJUVllVWl1fXV1aW1lXWVdSSEhKT1VTVlhd
-YWRnaGtzenZ1dXBqb3R2e3yAh4qKjY2MjIiFhISCgX15eHNydHJ0cnNxaWNgX2Ff
-WFNQUFBTV1NYXVtaWVlbWlJQS05OT1BUWl5gX2FgYV1cXl5bXlpbXWJiYF5eX19c
-W11cWl9eXF9eXF1cWlxdXF5hX15fX11dX15dYWNgYGJkYmJgYl5gXVxaXGBhYF1e
-W11eXl1cW1lZXV9fXl9gX1xcXF5fX2JdW11fYF1eXV1eYF5eXV1cW11bXF1eWlhV
-VlZaWlheXVtbWlhdW1lYXFlZWVhXWVpdWVhYWVdWVFpeWlVXV1dWV1dWWVtXWFdY
-WVlYWVdUU1VTVVNUWVlaWFdXWVdVVlZVVVhYVlNVV1dYWVZWVVVUVlVVVlZYVlVW
-U1VWV1VTVVZVVldYV1ZVU1dXVVNTVFRVUFNTU1JUU1ZVVFVWV1dVVVdYWFZVV1VW
-WFhVU1hRVFFSVlZVVFJRUVBTVFRWVVRVVVBQU1RVVFJWVlhWV1NRVVVTUlJQUlJU
-VlVTUVVVVFRVVVdYVVZUVFZXVFVaXFlaV1lZV1ZWV1VVV1VVV1hZWldVVFVUV1ZZ
-VlVUVldWVlVWWFdYVlZaWFlbWFVTVVhaWVhZWFpZWVVWV1hYV1RUVlhYW1dXV1lZ
-WFhZWVlaXF1ZWVhZWVhZW1lYWVlYX2BZWVpaXFpdWltcVlhZWlpbXF1cXVpZWl5b
-XFpaW15cXFpaWlpZVVpcXFpUVFVXWldWWVhYWFpbW15ZWFlZW1pcWlhZWVlbWl1c
-XFhdX1pYWFhaWVtdXFtcWlxZWF1eXVxcXVxcXWJeXlxbXl9hXlxaXFxeX15fY2Fj
-YmBgZGhlZWViZGRhX1pdYV5dXl1bW1laXF1ndK7Fz9je4eTm6enq6nx9fHh4e3x7
-eXp2d3Z0eHl3enl4e3t7dnZ7eXp8fXh5eHx8e3p4eXt6eHl8fHh4e3t7eXl5fHl4
-e3t5fHt7fHl4d3t7enh3dXd5e3p4eXp6e3Z5eXl2eXp7dnZ4dXh7e3l9enh4eXh9
-fXt+f31+end5e3t9fX16eXh6e3p8e3l5eHR0dXV1c3V0c29xc3Fub3Z0dXJxcXFy
-cHFwc3FvcHFzcnd4enh3eHl2dnZ2d3d2eHd4dHJ5dHV4e3p4dXd2dnd1eHV3dHJy
-cnJ0cXNydnh1c3NzdXZ0c3BubmxvcHFzcnFubmxsb3N1cW9rcnBvcXBta2xrbW5s
-a25tbWtubGlqam5tbHRycXFraWlrb21tbm5ubWxpamttbm9vbmxub29ua21ubW9x
-bW1pYWBiX1xZVldWUFBWYGNobGtvb29wdnx+gH1/e3VwZl9UTkxQV15haW5xc3Fw
-Zl5fZmZkYGFjZWFeX2JjZ2VmZGNhZmVkZWpsbG1qa21wcG5wcnFwc3V7e3l4e31/
-f316eHl4dnh2cnJwb2xsa2ppamlhY2dlZmFgYWludXFta2VlX1pYVlRZWVtbWlVV
-T1FQTEpMSklIRkNFQT9APj5GT09IQkJFQkdISklKTE9NSkxNUk9OTEpHSUZLT05R
-U09MTE5RU1JTUlZRTktHR0RITk9QT1deX2JiZ2pwcXBpZWtxdHV2f4KGgYOIiouN
-iYeEgH55c3F1cG9zc25tbGphXlxZU1NOTk1KS01RUlRZV1ZVWFlXU05LTE9RVVxd
-WV5dXmBhXlxdXVxcX11cX15aXV9fXVtdXF5bXF1eWl1cW1xfXF9fXV5fXV1eXVxe
-W1xdX11fYWNfX2RiYF5eXmJiZGBdX11eXV1dXFtcW11cX2BdXF9eXlxbW19gXV1c
-WltaXF5dXF5eXlxaWlpaWWBeXF1eWFZWV1laXWBiXl5fXV5bWl1aWVlZW1pbXV5b
-WFdYWFdWVlZYXFxYWFZaXFdYWVtYV1dZWVZaWlhUU1VXVldWWVhYV1dYV1ZWVlhY
-U1dZWFhVWFlZWFVUVVdZV1lZV1haWlVVU1VUVVZXWFhWVFNTU1RYWFRUUlNVV1Za
-VVFVV1dUVVRWUlFTVl5UVVZVV1lVVFVUWFlWWFlWV1hXVlVUVlRUT1FWVVRUU1NU
-UlRXVlZVVVNUV1lWVVRTVldWU1ZTVlRUVlVYV1VUVldVVVZWV1lYWVVVVldXV1dW
-WVhVVldWVVVXV1hYVlhXV1dVVlRVVlhWVlNVVlVTVVhZWFdTVVhWU1NVWllWV1pc
-V1dWWltZWlZVWFlaWVVWWFhaW1pZWVtZV1haWFZWWVhYWFlaWV1bWVlYWFxfXllc
-WllWWFtbWVhYWltaWV9aWltZWFpbWltcW1lZV1laWlxeXFpZWFhbW1tXW1pZXFlX
-V1lZWltaWVhYWFxaWVpbW1taX1pZWltbW1pXVlVWWlpaW1taV1pbWFdaWFZcXF9f
-WVlbXV5dXFtZW19eXl5eXmFiY2FiY2NkYWJjY2RmZGNkYl9fXV5cXV1aWVlaXFpb
-W19yrcTQ2dzh5Ofo6evqfHl6ent7enx7enh3eHZ6enh3eHh2eXh7d3Z2d316eHt8
-fHx7eXp4enh5eXp5eXp6e3h5enh8fXt7fXp4e3p6fHp5d3l5d3Z2dHZ4eHh7e3t3
-dnh4eXt6eXh5eXt7eXh7e3t9fnp9fX18fH17fHh4enl7eHl5e3l8end6enl6eXl4
-eXZ5d3Vzc3Jzc3VycHVzc3Bwc3JubnNxcHJydXR0cXV1dnh7eXh2eHVzdXZ3end2
-c3Z0cnR3c3R0dHVzdHNxcXZ3e3l4dXBwdHJydHJ1dHF1dHNzc3BxdnJucG9vb3Jv
-cHFxc3BudXNxb21wbnJyc3JucXBtbm1taWppamxwbmpqaWxvb3BubnBsamxwbW1r
-bW5ua29wbW5tb3BvbW5ra25yb25xcXBva2VkY19gYmFbWVRTUVJWYWRtamptb3Jz
-eH5/f35+fXlvY1VLSUpQW2dvdXV0cW5hWFlgY2loZWVjX2JiX2FjZmlnZ2RlZmVo
-ZmdpbW5sbnBvcnFvbHFydXl3eH1/f35/e3p4dnZzdHV1cnBsb29xb2xqZ2ZmaGNf
-XVpdZGhtcXJxaGViWllVU1NUVVVSUU5NT09MTUtGRkhHRENDQ0JDREFGR0hGSERC
-Pz9BQUNIR0VGRUpLS0lGR0RISUpHREVGR0ZGRUpMS01LTkxNTk5KR0ZLSktOWVpZ
-XF1eX2ZrbWNeZ29zdnuBiYWDiYyQkIyIhoWEgXxydHFubW5tbGplYl1cX1pVU1JO
-TEtHSVBWWlRTVVtfXlxeWVZTU1NZW15gWlpbXV1dX19dXF9gXl1bXl5aWV1bWlxc
-XFxdXFpcXVxcXFxdXGFiXV1eX15fYF5bXlxgX19fYF9eXl5fXl5fX19gX2BeX15d
-X19dXl1eXFtfYF1bW1lYXV5eXl1eXlxdXVtaW11dYF5bXWBgXFpcXl9cWlldW1xc
-W1pZXlhZWllZWVtbXlxcWVlZWl1bXllZW1tdWllWVlhZWVtaWVlZV1hYWFdWWFta
-WVlaWFdXWFhWVFZYVlZVWFlZWFhVV1laV1haWVlZWFhVU1NVWVdXV1ZXVVlbWFZV
-VldTVldWWFhXVlRWWFVXWFNSVVVVVVtWVVVVVVZVVlRUVFZWW1ZVVVRTWFlXVVhX
-WVZTVFlWVVRVWFRUUlRTUlVVVlZXVlRUVVNRUlRVVlZXV1hZVlZYWFdVVVVTV1hX
-VlVVVVZWVlhWVVpaWFZXVlVXW1ZVU1VVV1dXV1lYWVpXVlhZWFZWVFVYWFhYWFpa
-WFdXWFlWVFZVVVRVV1dXVlRWV1RTVFhWWFhYWlpYV1ZVVVNXVldZWVZVVlZTVFVW
-V1hWVlZYV1hZWVtcXFhYV1VYWFlbWFhXWFdZXFhZW1pcXVlaWldYWlxbXF9dXFxc
-W1ZYWlxZXVpaXl9bWFpaXFtdWFlZVldYV1pZXFlVVVZXWVtZWVlYWFdYWFlYV1ZV
-V1pcW15bV1VXWFtZV1hZWVxcWlxcXFlXWVtbXGBhX1hbXltbXGBeYWJiY2JiY2Jh
-YmRlZGdjX2BiXmBeXF1dWltaXl1XWVpbYHOwxtDa3uLl5unp6up8eXh7e3p9fnt7
-fHx6e3l3eHd7fHx8e3l4eXh4d3p7e3p6ent8eHZ0dnZ0dnt3end0dnd1dnd6f3x6
-enh5en18fHp7e3p5dXd2eHZ5enp7dXR4dnZ2e3p4eXl6e3x5e3l3eXx8enl6eH1/
-e31+enp5fXp4eHh6e3t4enp7enl3eHl6eHh2dnNzcXFwcnRycnBycm9vb3NzcHBt
-b3FycnF1dHV0dHd1dXZ2dndzc3Z5d3Z0cXN1dnd3c3R3dnF1c3R2dXR3eHh3d3Rz
-cnVycG1vdHd3cnNyc25tcm5samxubW9xbm1vcHJzb3FycnFycG9wbW5qbG5sa2xp
-amhsa2traGpraWxtbm5sb3BvcWxvbWxtbW5scHBxcG5tbWttb2xtbG1vb3FzcnBv
-amhhX1xiZ2VdVlVQTlVZY2Rka2xsaW93eXx9fX19eXJmWk1JTlNdZ253eXhwaltY
-VVpjZGZjYl9hYWBfZmBjY2tqZ2lpZWNhY2hpaWxtb2xtcXVxcXBwcHN2fX98fXp5
-eHZ5d3Vwd3NxbWxucXZ1bWtraGZkXV1eX1xeY2prb3BxamZfWllVUk9SUEpLT05Q
-S01JSEdJRUVFREVDPz9DQEFDQj9AQkI/Pz4/QEJFQz1BRUREREVERkZHS0lHR0dF
-RkVEQUVLTU1NSklNTklHRUVGSVJRTk5UV1paXGNjYWNnbnJ2eoKHiYmQl5SRhoOC
-hoR9c3ZsZGJkYmJgXVdVWFdYWVlWTk1NSkhJTE5PU1hQTlJTUlphYWJdXVxeX2Rl
-X15fXl9hX1xcYV9fWllbXV5eWl5cW1xgXFxeV2BiXl9fXV5cYmJfXV9eXl9eX15f
-XVpbXl1bXV5cXV9hX11gYF9hXVpcXVxcXmFfXGFeXVxcXVxbXF1eX15bWlxbWV9c
-W1xcXF5bXVlcW1xcXVlXV1dcX1xfX1tfYFxYWVhZWllXWFtcXlpXVldaV1tYV1hY
-WllaWVhbWlpbV1hYV1lWU1dZVlpXWVpZWFtYWlZXVVNVVVlZWltZV1hXVVNRVFhX
-VVZXWlhYVltaVlVZU1NTV1VXV1dYVlJXVllXVlpYVFNVVVRUV1VVWVZXVVRVVVRV
-VFZVWFZXWlZWV1dXVVFTVVhXWVpcWldXVlZXVFVXVVVXVlVQU1RTU1ZWVlZUVFhV
-UlNUVldXV1RWWFdWU1VTVFVaU1lWV1dUUlZXVFVXVlZXWFdWW1tcXFpYWFZWWFlX
-WFdUVVtZVlhVVllXWFhWV1VUU1RSVVpcWldXWllVVVdVU1ZVU1dUVFdYWFhVUlVV
-VlVZWlhZWFhVWFZYW1haV1ZXWllXWVpbV1ZXWFZYWFpcXVpYV1ZXVVVVVFpZV1dV
-WllXWFxdWFtZV1laXFxbWlxcWlpZWVhYWVpaXFtZWF1dXV1ZWFtYWVhWVVVXVVZY
-WVlYVVhZWVlaXFtXXFlWWllZV1laWlpYWVlYXFpaWFlbXGBbWFhZWl1cXV1fX1xZ
-XF9gXlxdW1xcXV1aWl5eYl9fYGJgYmFfX2RmY2ZjYGJhX2BdXlpcXlxcWlhaXF1k
-dLDH0trf4uXn6ens63h3dnl6ent8fn+AgH16e3t5enh9fn19e3t7eHd3d3l4d3p4
-eXl4eHt8e3h1dHZ5eHh4e315dnV6fHt5d3h6fHt7fX58ent7fHl3e3x5eXl6d3R3
-dnV3eXt6fX58enl4eHl8e3t6e3p7e3p/e3h4eHh3d3Z5enh9enp5eHx9fXx4eXV2
-d3R1dnh4dXNzc3NxcnJ1c3FxdG9tb3BxcHJzc3N0dXRycnV0dXZ1dHJ0cnR2dnVz
-dXd4eHx5dXV0c3N0dXN0dXd4eXd3b3BwcnJ2d25ycnNwbnJxcnNwcXFycW9ybnBx
-cHFxcG1rbW9ydXBwcG9ybm5tbW5ubm1vcGhqbG9wbWhsbW1rbGptbWxta21wcm5t
-bnFwbW9wbG1saWtqa29ubnJvcG1wbmxpaGJfXmBiYmNaVFNVUVZZXl9kY2JmbnN2
-eXt+fn9+dG5dUE1OV19ncHd5dnRpW1RUWV5jX19iZGJeXWBhYWRlYmVramhqZmJj
-ZWlqa2hra2xvc3JtbW9ucHZ6fIB+fXx9fXp3eXV1dXNwcnFwc3V1bmtlY2djY2Rh
-X1xcYGVmanBwal5SUk5LTUlKS01NTU1LSkZHQ0RFRERDQ0Q/Pj9BQD48P0BGRUNA
-QENEQ0NDRUZERUBARElHRURER01NS0lHR0lNUU9PTk1KTUtJR0ZHRUpKTEpJT1VY
-V1dYWl1kY2ttcHd/gYaJioyQkYyGhYWEgXJnamNZWFldXFlTT01MT05PTkxKS0lJ
-SkhJT01PS0xOUFRTU1NXXGFoY2JjZF5cXl1bX19dXV9dXlxcXl1dXF1ZXFtbXF1b
-WVpaXlxdX11eX15gYF9fYGBeYGBfY19bWltYWVlaX11cXF9fYWFcXF1eYGFgX1td
-YmBeXFxbWltcXl1bW11cXVxaXlhZWV1eXmBeWVpcXFpaXF1bYF1YVFdZWFRZWFpb
-WlpbWVpZWllcW11dXVpXV1hXWVhaWlhZWFpcWlxbVVVWV1lZWFhZV1ZYWVdYV1hY
-WFhaWVhZXFpZXVxZWVZXVFNSVFJTVlhVVVhYVldXV1hXWFRTV1hWVFlVVVVYV1NW
-WFhVV1dVVldWVVFWWFdVVVRVU1RXVlZVWFNUVlNVVFRUVVdXWVNUVVhcXVtZWFdY
-WlhVWFZWV1VXVlVWVFZXVlVYVlZVVVdUU1VWU1VWV1RTVVVUV1RUV1hYV1VWUlRW
-VFVWV1dXVlRUWFdZVldXWFdWV1lYVlZWWVpWVVZXVlZUVldVVlhUVFVVVVdYWFVX
-V1haWVpXVVZWVVlZWFZUVVhYWlZXV1ZTWVZYWlhZWFZYW1hVV1haWVlZW1xcW1hW
-V1VWV1tbWFpdXFtZWVlYWldWVVZUWFdWWFdaW1xcWllaWlhVVVhcWVlZWlpaWVtc
-WVpZV1hWV1lbWVpdWllZWVhYWlpXV1hYV1hXWVlXWVpZWVlXWFVXWFZXWllbXllZ
-XV1VVldaWltaW1tbXF5eXFxaXF1fX1xdXV5eX19fXWJhX2BiYF5cYV9gYF9gX19g
-YF1gYGBeXlxeXlxeYGFdXVxeXVxeYWR3rsfR2t7j5efp6evqdnd6e3t8fHt7fH58
-fnx6fXt6eXt4e3x7eHl2enl4enp4dnd5fIF/gYF9enp6e317fH19fn15eXh8f3t7
-e3l6e3p7d3p/fH14eHt8e3h6eHp4eHh7gHx6eX1+fnt6eH1+fH97e319eHh5eHp8
-enp4d3h5d3l8e3x7e3x8d3N3enl5fXl5dnd3d3Z3dHJ0dnJzdHFwb25wcXJzcnBy
-cnJ0cnF1dnZ0c3RzcnZ4dnd3dHZ2dXV1d3Z4eXl4eHp4c3h3eXV4dXh5eHd0dXRy
-dndzeHNvcXNyb3FzcnRzc3BvcXRtb25vcHBycW9wc3Nycm5tb3FzcXFvbW9qa2xw
-c3Bvb3FvcG9tbmxta21xbGxsbm9zdW1tbW5vb21tbGppbW5tbm1vcG5sa25ta2po
-ZWBfX2FcYF1WVVdXWFlbXV9fYWJqb3d5fX+Ah395bmBWUVVeYmlwdHdycGZXTU1V
-WmBgY19dX1tbYV1gZGRgYWNmaGloZmNjZ2lpampvbW1scG9xcG1wdnp6eXp8fX17
-d3d2dnRxcnF1dnJwc3BtbGpnaGpramRmZVxbXl5fZGxuYllSUE5OSEpKSEhISktG
-RUNDQj5BQT9BQUBBRkRAQj9DQkZJRT9ARUhIRkdJSktHRkJCQ0dKSE1RV15kZ2Bi
-ZFxYUExITE5LSEhFRUdJSUZISk9RUFdWVVVZXWBqbXF5e4OFh4mGiYySjoeDhIFz
-YlteVlVQT1JTVE5MTFBSUFBQTE5KSUhHRktMTUtLS09VU1BTV1VYXWRnZV9dXF5c
-W1hWXl5eXFpYWVtcXl1eXmBjX1taW1xcWlxeYGBdXVxbXFxbXFtdXmBgYF1eXV9f
-W1tcX19eX19dX19gYWJgXmFhYGFhXmFgXV9hYF1dW1xcXl9cWlxfXVpcXFlaW1tb
-WVtdXV5cXFtdXVxdXFhXV1lZWVlbW1pZXFlbWlpbXFpbW1haWldXVllYWVhYWFhZ
-WVtbWllZWFdaVVRVV1dXWVpcW1dYWVdYWVxeWVlZWVlZWFZXVFdUVFRUVVFQU1NW
-VlZWVFZXWVhZWVVVUlNUUlBTWFlWVldUU1RUVVRPVFZUVlVWWFlYUVFSU1dYVFVT
-UlVUVlVTVVVSUldYVFZVVlhbXVxcW1pXVFVXVlNRVFZVVFRUVlVTU1ZWVlZUVFZV
-VFRXWFZYWFZWV1VVVldWVFdUU1NVVFVVVlhXVFRYV1ZUVVNSVFVWVVRXWVZWVlZU
-VFNVV1lYVVdXV1hWVlZXVVhXWVtZV1ZYVlZWWFZWVVZXVlhVVFVTVFZWWVVWWFhY
-WFhaXlxYWVhbXFdUV1dWV1ZXVlVXWVZVVllaWF1eWVlYWltbWltXW1hVVFZXWVpX
-WlxbWlhVWVhZW11cWlhaXFxcWFpaWVlaXFhaWFVVWltbXVtZW1taV1laWlhZWFlW
-V1VXW1hXWltbWl1cXFtZWFhYWVlbXFtaWVpaWllbXFxZXF1fWlhbWVpcXVxgXFta
-XGBfX11eX19hYmNiYF9gXl1fZF9hY2RjYF5gXV9dX11fW1xbXl9gX15aW1tcXXev
-xtDY3uLk5+np6ut9fn18fXx7enp3eHx7ent6eXd5enh3eXt4d3Z3ent5e3x+fHx9
-fnt9fn58fH5/e317fHt9fn59ent8fn59enh7enh5eHh3eHp9fHl5e3h2fH16d3V2
-fHl+fHx6en99fH2Af4F7eXt5eXh6eHR6e3l5eXh4ent6en17e3l3d3d3eXl4dHd2
-dHd3d3Z1d3Nyc3FxcnJ1dHB2c3Bwbm5xcnJzc29ydHZ0dXZ2en58fX16eHR1e3d4
-dnNydHV5eHp5d3h3dHR4d3t5dnZ1dnd1dHd1dXJycnR3d3JubXBucHFxcm9zcXFy
-cHNzcXNxbnBwbm5rbnJwcXJwb3Fsbm5wb3Bsa2xub21ramxycG1vbmtucHNwcG1s
-bm5tbGltamxubm1vbW5ub2prbW9sbGtrZWBhY2plYWJeYFxZW11fXF1hY2pvdnp+
-fn2CfXdwY1pVWmJjZ21zdnRtZFdRTk9ZXmRhX15bWltdXV1iY2BeYWJmZWZmY2Fl
-aGlnZ2ptaGZpaW50cHF1d3V4ent8eXt9enp6eHV0dHVzcnNybGxsbG1qa2VobGxp
-YltZWlhbZGhpZVxRT09QTUxMRUVIR0dDRENEQ0BBQUE+REFBQEFCQUBGQklJRkJC
-REtJSk9PTkxKSktOTVFfbXmCg390ZFlUTUtKSUJDRUdCQ0VCR0tNT05NTk1PUlBS
-UlJbY2pwdnh4gYWJiYmJiIyMi4eEfWxaUlJPT09NTExLR0tLUFBUVE9QTUxPS0pH
-R0ZMTUxPVVdYVldaWl5eXWBkY15cWllbW2BbXFpcXltbW1paXl5gX2BfYWFcWl5a
-XFpdXV9eXVtcW11bW1xgYV9jYl5fXFphYmJhYWFgY2FfXWBhYV9jYmFhYF9hXVxc
-XmBgY19fXWBeXltcWlxbXFpZW1pcW1tbXmhbWVxaV11eXV1bXl1bW1pZXFpYWVxb
-W1xaXVxeXVxZWFldXVhbXltcWVdXWFhaWVtYWFhYW1pXV1ZUVlVVWFpcWVdWVldY
-WFhZVlhaWVdYWFhYVlZUVFVXVFNTVlVWVFlZV1ZXWFtZWVdZWFhWVVZYV1lWVlVY
-VFNSU2BXVFZUVVdXWlhWU1NUVVNUU1JVVFRTVFVVU1NUVVlVVlFQVFZaWltZWVZT
-VFJUU1JSUlRUVldSUU1SU1NTUlJRUlNUVlZWVlZZW1ZTVVZXVlpZVlNSVFdWV1ZX
-VldYWldWVVRSUlVTVFJTUlNVVFdWUlNXU1JXWVZXVVRTVVVWV1NVU1RWWFVWWFlX
-VFZYWVlXW1tXWFhXV1pWV1dXW1lXWFhXVFhYW1pbW1lZWFRTVldXV1dTU1lcW1pY
-WlpZV1VXVlhXVFVYW1pZWltZVVVWVFRWV1VYWFlYV1haXFtaXV1dW1ldXVlXWV1d
-WlpfXlpYWFhaWFlaW1lbW1xZWFtkXlpXXVxbWVlaWFZXWltfXFpZWlpaWlpZWlpe
-YWBeX11aWVldWlxcWlpbW1xdXFpbVlpcWFxfXVtcYF9gYV9eXGJkYV5fYGBgYmVg
-XF1dX2FgXFpdXF9cXFtgXl1aW1tdc6rFz9jd4uXn6enq6nx+fHt9fHx6e3h5enl3
-eHp2eHl5eXh6e3p4d3d8fHt7f357fHx8fnp7en56enp5fYB+gH55eXh+fHp7e3t4
-end1d3l3eHh5eXx9e3l8e319e3h5dHZ3eXh7ent4eXZ6enp5dnh6eHl5ent9e3d7
-fnp4eHh5e3p5e317ent4d3d3dnl6dnN3eHh3dHN0cm9wcXBxc3NzcnN1dXFyb3B1
-dXFwdHV2d3d4enx6fXl4eHR2d3l4dnh1c3R0cXV0dXR0eHZ4dXR0dHF0cXBzdHNz
-dXZ2dHR0dXNzc3Nyc3J0dHFxcXR2dG5vb3Bub29ycW1tbW9wbm1ubm9ucXFxcHNy
-b21ubHBtbGttbG5ubG5tbW9ubm5ta2ttbGtpaGlsbmtqbGxrbG5ubW5ubW5sampj
-YFpfZGhramhhXVxYWV1eXl5ianB1enh3eX19enJlX1tfYWRmaW9zdHBlWE9PUFde
-Y2RjYl9fXV5dYFxhXVpgY2JeX2BiYGFjZ2dnZWlnaGdnanFzc3N1c3N0eXl8f36B
-f314dnd1cXRzc3BsbGpra2ppaGprZGZkYFxYVldaX2VqZ11WT09MTUhHRUhIRUNE
-QERCQ0JBP0A/QUE/Pz1DQkFDRUhJRkZHRENCSE1TUlBLTVFheIyRjoZ7cGNbT0lG
-R0REQ0FBQj4/QUBFR0ZOVVhUUFBQUVBSWmZpanB0d3yBhIeGh4OGiImHhoJ4ZlZM
-TEtOUE1LSUlQVVNRUlRQTU5MS0xOTUpIR0tRUlpbWFpbW1teX2BdXmJlZGBfXVtc
-W1xdXFxcWVteXVtZWVtbXV5eXVxfXVxbXFxcXGBeXmBfXl1cW1taXl9hYF5dXmBd
-XmFjYWFhZGRkY2NhYmFjYmFdXmBgX19eXV5eXl5dW1xeXFtfX15aXV1cWltcW11s
-blpbXlxbW15dWlxdWltaXFdYXFpcW1xcWVpaW1xdW1hZWFpYW1hcWVhZWVlWWFdX
-V1hcWVlaXV9YVldXVVdVWVddWldaWVZXWlpYWVlbWVlZV1ZZWVhaWVhXV1dXVVVX
-VldVVFlWV1ZUVlZXXVxYVVRWVlVYVlVYU1RWV1VWVVRTVlVTVldTVVRWVlNVVFFS
-U1JTVlRTUlNRUlRUVFZTVVdSVFJTUlVVUVFTUlJUUVFUUVBXV1JUVFRST1FTVVNR
-UlJTVFRUVlRUVFhUVVZWV1lWV1hYWFlVVVdVVVZXU1RSVlZRUFRUU1VYVldSU1ZX
-WFVUWFVWVlVVVVZYW1hWVldXVldZV1dYWFdZXVpYWVdWWlpXWFZYWF9bWFpWV1RW
-V1lYWltZWVlbVlZYVVhbWVdVVlpYWVZZWFpXVVZXVldZW1hZW1pZWlZXWFVWVVZV
-VFhZV1dZWFtaWVlcXFtZWVpZXV1eWV1dXl5bWl1aXFtYV1haXFtaWV1fWFlZV1VY
-XVlZVlZXWVZYWlpZWFdZWlpZW1pZWVlcXFxZXF5bXV5cXFtbWVpYWFxbWVtcWFpa
-XF5dXl1cXV5hZGBgYmJfXV5fX19fXmBgXV1gYmFeWl5cWVlbWllbXFtaXF9wq8XQ
-2N7i5Obo6enrfn1+fHp4eXp5dnl5enh5eHh4enx7enh4enp5eHd3eHp5enp7e3t8
-eXd4ent5fnx8fX17fH59fH18e3p8e3t6eXh8eXh6f3x8end4dnl6eHp9gXx5fX17
-eXh5fHl1dXh5enl1eHZ3d3h5enp5dnh6enp+enp5d3h6fH56eX17enh4eHd1dXR2
-dXZ8eHZ3c3FvbnN0cm9xcW9vcXNzcXJ1c3Z1dnV2d3Z4fHl8d3Z0dnl2d3V6d3Vz
-cnR3dHJ0dnh3dXRzcW9xc3Z4d3BydHBuc3V0cm5ubXN0dnl2dXNydXVycHR5eXZz
-bXFxbm1vb21qbm9rbW1tcG9wbm5xcXFtbnFycWxpa2xubXFxcnJybmtqb29wbG1s
-amlsa2xwbW5vbnBra3Bxb3JvbWpnZ2ZfXl5iZmdrZ2poX1tbW1tdYGdscHJ0dXp7
-eXt4dG1jYFxZY2lrbG9ubF5WTkxPWF5eYF9fYmRhXl9gYFdZX15iY15cXWBhYGFk
-ZmVmZ2lsbWpsbXBwb3F0c3Nzenx9fICBfXt5dHV3dHd0c3BvaWhlaGxraGRjY2Bf
-XldYWlhaXGBmaWJWTUlKS0hHQUBDREVFPDs7Ozs5PT88Pjs9PkJCQUNFRkpJSEhJ
-SUpNTkxPV11leY6Vk4p4Z2FYTkhERUNFRUVBQUVEREJCQD9AQERLVlxbVlJSVFde
-ZWhqcXR2eX2BgIGBgIKCg4KDfWtYUkxGSEtIRERHT15eVU1NTk9MSkpJTU1KRkdM
-UFJVXV5dYGNkYmNjYWFcXGBjX11eXV1eXV5cXF5fXFtYW1xdW1paXV1bXmBdXFtc
-Wl1eXl9eXl1eX15cW11cYGJhX15dXV1cYGFjY2BgXl9fYWJhYWBhXl5cYF5eYV1d
-XF1dXV1aXV1gX15fX19dXV5eW1paV25YXF5dWlpbWlpbXV5dXFxbW1tdW1paXlxa
-WFpZXFpcXVtXWlpaXFlYWV1bWVlWWVlZWlhcW1haWVhXWVlYXlpYWFtdWFZXWllY
-WlpWVVZZWVpZWFpaWVxYWlhWV1dVVlRWVlldWlhWU1NTVFdYV1dYWlhZV1tWVlhV
-VVhbVVVWU1JUVFRVVFdUUFFVVVNTVFRVV1NUVVNSUlFSVFRXVldTVlNTU1JRUlNW
-VVJRU1RUUVJWUlJUUlFTUlFTVVpaVVVUU1NSUlRUVFJOUFZVVlVYWVVWVFRUVlZW
-VVNVVFVWU1JSVFNTVFNTU1RTVFZVWVlYWFZYW1lWVFdXV1ZWWVpXVlZYWFVYV1NU
-VlhZWFVWVlhYWVlXVlZXV1lYWVdXWFVWVFVZWltaWltaVldaWVlZVldVV1pZV1NV
-VlZYWVdXV1dVVlZYWFpcWldVWFdaWVdZW1laWFVWVllaWVhaWlxbWltcXl9eW1lc
-YGNlX1xZV1tcWVhZW1lYWFtbWVtZWFhYWllYVllXVlZVV1hZWFdYXFxdWlhZXFtd
-XF1cXF1cXV1cXV1eXVxYW1xZXF9eX1tdX2JfXF1fYl9fYWBhX2BfXVxeYmBhYGBg
-XlxeX1tcWltcX1xcXFxaWVxbXmmjxNDZ3eLl5uno6ep9enp8eXt8fHl4eHp6enh6
-fH9+ent8e3h5eXh3dHZ4eXh4eHp5eXl6e3p7fnx8enx8fn58fHx9fX15eXp+enl4
-d3d3eXx9gXp4e3t5eHl5d3h6enx7eHt5eHl5eXl2eHh5d3d4eHd2d3Z4d3h3eXl6
-eXp5eXd3eXl4fH58e3l6fHl7eHh4dXN2eHVxdHV0c3Fzc3N0cW9vcm9wcnJwb3F0
-dXN2dHZ3d3p7dXZ6d3h3d3Z2dnRzdXV0dnZ5d3Z4eHZ2dHl1dHR0dHFwc3Jyc3Nx
-cHV0c3Jwb3JzdHVycXN0dXFxcHF3cW5ta3Jvb29wbWxqa2xsbW1ubWxubm9vbm1s
-bm9ubm1tb25ubnFwc29ubWxtbG9ubmxpbG9ubm1wb3BwcW9tbnBwcG9vb2pmZ2Fe
-XWBnamdsa2NfXFlZXF1cYmdtcG9wd3p6enp0cWhiYGNkamxwb21pYFZOTEtYWF1g
-YGFcXF9dXl5eXl1fXl5hX15gX2FfXF1fZWZnbG5tbW5sbnBvb3V1c3N3e316fYB+
-fHhzeHl5dnN0c25taWpsbWpoZ2ViXmBbW1tZXFhYWV9kZmNXTEpHRUJDQUNEQj4/
-PDs9PUM9PDw6Ojg7Oz4/QURBQkZERERFS0tHSFFld42XkIJ1alxTSUVFRUNAPT1D
-Qj4+QEBAQUE/PEFFRUhMVFxfXFhZXV9naGtwcnd3dnqAfnt/gn99dHRsXE9KTEZD
-Q0NFSFVkXldMSklISktMTk5OTE5PTlJUWVxdX2FjZWNlZmRmY15XWV9fXF1dXF9f
-W1pZWFhaXV1bWFpbXV1cXF1eXl1eXV1fYFtaXFxeYFxcW1xbX1tbXF5fXl9dX2Bd
-YGBdXVxeXWBfYmViYmBcXFxfW1tdXl1dV1laW1teXVpaV1tgXlxdXF1cW1pbVVla
-W1dZWFdaXltbXV1fX19dXVtfW1tdWllXVllaW1xcWllbWlVYV1hZWFhZWFlbWFta
-V1ZYWllaWlZXW11dW1pbV1pbWVhaW1ZUVlpbWVhXVFhaWVhXWFtdX19ZWFdWV1dY
-WVlZUlJWV1VUV1dVVVlbWllZXFhaWFhXVFJUVVdXVVRWVVZWUlFTVVRUV1RXVlVT
-VFNVVFZWVVRVUVVWVVZSVFZYV1hXVVVVV1lUUVNTVVZRVlVVV1JSVFdWU1NVVlNY
-VFZTVFNTVlVVVVFSVFRVVlRWVVRUVVlbW1dYV1lZVVJYWVlYV1ZUU09PUFRWV1VW
-VlRWU1JRUlVVV1ZXV1lYVldZVlZZV1hXWFlXWVZXWVxdXlpZV1RWVFRWVldWV1ZW
-V1xbWltbWVlVVVpZV1dXVldYWFlcWVVUV1NVV1lYV1taW1tYVlheWFdWV1hZWVhb
-WlpYV1dXWltaV1dXWVxaXFxaWlhdWVljWllgXFxcWllXV1pZW1tdXV1bWFdYWltb
-W1dYWVlZVlJXV1laWl5mZGNgX11aW11cW1pbW1tgXVxdW1xdW15dXmBcWltdXlxb
-W1tfYVxeX19hX11dYGJkYmNiZGBfYF1bXVxeX11cXFtbW1tcW1xcXWBhbZ7F0dre
-4uXn6Orp6nt5dnh4e3t6fXx6enl4eXp7fn18e3x9fHx5eHp6dnZ2eHh5fHh7fH16
-eXZ5fH98en9+e3t9gH56eXl7fXt5dXh0eHt5fX59fHt7ent7end+fXt9fXp7fHt5
-enp8e3p6eHd4fHd4dHh3eHh6fHp3f3p4e3x7enp7fHh9e359enp8d3d3eHd0dXZ1
-c29wc3Vzc3JwcHJwb25ucXFvdHdzd3h3dHV2d3R2eXx4dnhycnF1dnR0eHZzdXl5
-eHh5d3V2eHV1eHVyc3Vxa25yc3dybnB0cnJxcHF2c3JzcnB1cXFxcWxsbW5vcXJv
-bm1ua2trcXVtbWlpa21xd29ta21sa21vb21ubmxra21ubm9vbmttbmxvb25tamhp
-bG1sb25vcHJ0cGxqbm9ubXFvbmloZGBgYGRnaGxrZWBcWlpeXV9jZ2xucHF0dnd2
-enhzbmlrbWxvdXFtZ2RfVk9HSE1WWFtcWmFeX2JbWFhaXmFcW2FZXWFjXmBgW15k
-ZGdsbm5ra2xxcXJwcnNxd3t6end6e3p5eXR3cnd0cm5xb21ua21pZ2hoZGRfXV9g
-XV1bXFtdWlhdZWFXS0VERENBQUA/QkE/PUA/QUI8OT04Ojw/QT49QEJERkVDQUBD
-QklXaIaVl4VtXlNNR0REP0NDQD4+PDk7PD0/PDw9P0M/P0NFSEhLUVphY2BhY2dl
-aG5wc3FxcXZ6fX57eHZxa1xOSURCRENEQ0pedmxUSUpJTUpITE5QU1RSUE9WWF1g
-ZWNiYGBfYGVjZmVjY11bXl5bW11bWl1eX11cXl9fXl1bW15cWFxdXlxdX2FgW1th
-YF1cXF1dW1leXl5iXlxcXF1dYl9hYWFfXl9eYmBeYGFgYV9eXV9dXV5cYF1bWllb
-WltYWVpaWVpcW11eXVxYWV5eXl1WVllbXVtbWlpcXVxaWl1fW11cW1xdW1lXW1lZ
-WFpZWVlYWFlbXlhWWFhYVVlYWlpbWVZXWVlYWFdaWlpbXF1cW1xaXFtbW1xZWFxa
-XV1aWVlYV1tcWV9dXFlaWlpaWltaWlxeW1laXVtaV1VXVldYV1ZWVlhaV1hZVVZT
-UlNTUlZXVVVTU1hTVlVSU1RVVVVWV1NVU1RTU1ZUVVFTUVJSU1ZVWVdXVlRTVlVZ
-VlFUVFRPUFJSVVNWVVVTVVVSU1JTVFVQUlNVUlJVVFVWVldWVFVVVVVXVlNXWFld
-WlpVVVlWWVpYWVldV1RTVVlVVFdWVVRTU1JVWFRSU1NRVFZXV1ZWWVhYWFZWVFZX
-WFpbWVlaW1pZWVlZVVVXV1hYVlVWVVZYVVZWVVpZWFhWWFdXXFhVV1RXWlhXV1VV
-VlZVV1hXWVxZV1VXWVlYWVZUVFVXWFlZWVhaWllaXl5cWFlZWVpcWlpYV1hbXVxa
-WVpZWlhXVlteXFpbW1xaWFlZWFdaWFdaWFlcW1tYVlVYXFxYVVZTVVZYWlxdWlxa
-WVtaXFtaW1pdW1tdW1xgX1xdW1hYWllbXFxaWVteYWRhXl1fX2FkYGBgYGBhYmRg
-XVpfXl5cW1pbWVpcWl1gYWRwpsXP2N3j5ebo6erreXt5e3x7e3x+e3l7fHl5d3d8
-enp5e3x+fH18fH98d3t6enx7en57fHp8enp6eX1+fHl5e3d3e3t9fX5+eXl4d3p5
-eXp5e3h6fnx4eHt6fX14ent6en55eHt3dnt7eHp8eXt6e3d3eXZ2eXl6enp4c3V3
-eXp4fXp4eHl5e317fH98eXt8dnV1dnJzd3JzcnJucXBvcXF0b3Fzc3JzeHZ5d3Z2
-dHd2dXR2eXt1c3RzcnFzdXp3enp6eHR3fHp3enl2d3Z2eXh3dXNycXV0c3RxcXJ1
-c3RxcHJycnRzc3NxcHBxdXBqamxvcnBtbm9ra29ydXJwcHBpa21ybm1tbG1rb3Bu
-bm9vb25tbG1qbW1tam1vb2xpbG5ubm1pamxpaWxrbm9tbGpqa2lwbnBwbGloY15d
-XGJpaGllYmBiX11bYGZoa21tbG1wcnR0dnNubW9zdXNwbWRpaGFXS0hJTVJXWlpZ
-XFhZW1hTVVZYWVtXWl9cYGNhYF9dX2BkZWdqb2xqaWtvc3R2d3d0dHN1d3Z5fHp8
-eXV0cnJwbm1qa2lqa2hmYGdlZGJiYV9cWltdX15cVlhaYF9ZTUVEQUA+Pjw+Ozo6
-Ozw8PkE/Pj07Ozw7PEA9QUNCP0BFQD9FSmCDmI90XVFJRURAQ0BBQD0+QEA9Qj0+
-PDs8PT9CPkA/QUNGS01PVGBkZF5hY2RkZmlqa2ttcG9xdHR1b21jU0pIRUNGQ0NJ
-YX19YE5LSEhKSUpQUVFSVVFTWVpdZGxnZGNfXmFeYGJkZWRgXlhbX2NeWVhcXl1e
-YF5iYF5dW1lcW11cW11dWlpbXmBgYV1dXFxeXF1bW1xhXl9eX19eYWFeXmFhXl5e
-W19nY2BkYmBfX2FfX19eXltdX2BfX2JeXFxcW1dYXF9cWlpcW1tdYGBaW11dWltZ
-WVxdXFtcX1xeXV1dW1tdXlxZWFpaWllaWFZZW11bXFtYVVlXWlpbWVxcXlpbWVtb
-WldXWldbXFtaW1peXF9fX1tbXl5fXl9eXlxdW11gX1xeXFtbX1xhYmJjYmJmZGhm
-ZGVlZGBdXFhWV1ZXWVdWWFtZV1ZXVFJWVVdaVlVUVVZUVVVWVVZRUlNTUlNUU1JS
-UVRVVFZYVFJRUVVWVVRXV1ZWVFNRT1JTVlZTVVVTVFVQVVRUVVZVUlJVVFNSVVtT
-VVVVUVJRU1BUVVdYV1dWVlhXVlVVV1hXWFdWV1ZVV1daV1ZXVFZWVVNYVldZVlVU
-UFFXWFVWVFddVVZYWFVVVlpXVlZVVlNVWFtbV1VVVlVYWFlXWFhWV1hXV1VVV1VX
-WFdYWVlZWVZVV1hYWFdXVVVaWllWVFhXWVhVV1tcV1daWlhaWFpXXFtaVFdaWFdY
-WF1cVVhbWltaWlhcXVdZW1xcWlpbWllYWllaX11ZWFxXWltYXFhZWllaVVNTVlha
-V1pXWFdiXllXVFRZWlxbXF1eXFxaXVpZWVlaWFhaXmBeXFpbWVtaXl9aWVpcXFpb
-W1teXV9gYmRiYmNiXmBcX15fX19iYmBeYGJfXVtbW1tYWl1bW11aXWyjxNDZ3eHl
-5ujo6+p7ent4fHp8fXt7fH59eXt8fXl4e3l4e3x7fH99gHx8fXx8en1+gH18ent9
-eXx+fX1+e3p4ent9fXp7fHx6f4B5e316e3l6e3t8enl4ent8eHh6d3d6fXt6enx8
-eXp4fH19fHt3dXd4eHl2d3l7eXd6dnd7fHd7e3t6e3p3eXp5e3l4d3V2eHR2d3R1
-d3ZycG9xbm5sbG9xb3NzcXJ0dHV1dnh1dXd2dnd2dHV4eHZ0cnN0dXp3fHl0eHd4
-dXZ3d3d2d3d1dXx5dXh4dHV4dnR1dXZ1dXd4dHNvcG5xc3JwcHBwbm1sbm9ta2tv
-cGtwb3BsbW5wbm1ubW5vcW1saWpqbXBvcW5va2tqa2xta2hqa2xpa25vbG9ubG5s
-amhoamtsaGxuamZsbWlsbG1uamhjXF5eYGNkZWFfYGJgW1xfYWdjZ2ZoZmZrb3Ry
-b3Bvc3d1cnFraGNdWlRPSUtNUlRYWVlaWFdWWV5YWVlZWVxYWVlaYGFjY2BiYGNj
-ZWVlaWxoampvdHJ1eHd3enh4eXh7d3p+fXVxc3V0bG5sZ2poaGdnaGdiYF1fXV1b
-XF9eXFdWUFNXW19fUkhEQUBBOzs/Pj09PDxAPkA9Ozs9Pz8/P0BBPT9CPkFBQkJY
-f5GFZ1FHRkdHRUFAQ0A/Pj9APj1CPzxBPT4+Pz4/PUBCRUZNSk5PUVlgYF1fY2Fh
-ZGdoaGlqaWlrbGxnYVRJRkFCRkFERVmChWdRSkdKS0lLTFFUVlRYV1RYXGdsb2hj
-YVlfYGFkY2JhYmNiW1hcYV5dYWFhXV1bW1pcXFteX1taXVxcXV1eX2BgYV9eXl5i
-YF9dXVtdX11fX15fW2BhYmFfXmFeW1lcYWRiYmFeXl1eXl9dX2BdXVxeX2FbX11e
-XlxaXFxcXF9fXlpbXF9fW11cXV5cWlpaXFpdXlxfWlpYWVpaWVpcXFhbWldXWlxa
-WVhbWltaXVlaXF1eWllWWVtbWFpZWFhYV1hbWl1fW11gX1xhYmNkZGJhZWVkY2Rj
-YmRlZGNjY2BgZGRkZmhpbG5vcnVzdHN3fHl5c3JsaWRhXl5dXlxYWllZV1pZW1hX
-WFdTV1hUUlJTUlJVVlVTT1JTVVNSUVJRUVFVV1RVVlVVUlRVVFNTV1ZVU1JTU1ZY
-WFJQWFdUXVhVVFZWV1RWVFVTVFZXVlpWU1RXVFRaWlVVU1VXWFRUVVVTUFFUUlVS
-VFVXWVVUVlRTV1lZVldWU1RWWllZVFNUVVhWVlhaWVhYVllXU1VUVlVWVFZTUlRV
-V1dYWFdWV1daWFdYV1lZWFhXVlhXVlhZWFdYWFdaVVVXWVlaW1ZVVVVYVVZYVVdX
-V1hbWVhYV1laW1xaVVhWVVdbVllaWFdaWVdYWllZWVtcW1lbYl5cW11dXl9bWl1c
-WV1cW1lZW1hXWFhWXVtZW1lVV1ZYWVlYW1pdaWxXV1dZW1tZW1taWlhaWVpbW1xa
-XFhbV1pcXFxfXFpaXFpbXV5hX1pcXl5hX19cXF5cXmNjYmFlY2BgYGFhYWJgYmJi
-Xl5eX2JeXFtdXFpaWlhbbKTEz9jf4uTl6Onq6X5/gXx7fXp4d3d6fXx7enh6eXh8
-enl3d3d5eHx/gYGAf3x+fn+Af318fXt9f4CAfn6AfHp8fnx7f359fX58foF/ent6
-eXl6d3p7eHt6fnx7e3p4e3t4eXqAe3p6eXp4eXx9e3p3eXh3d3l5enp6ent7fHx9
-eHd6fHt8eXl6eXl5eXt2dnRzdnN3dnNzdHBxcG5vbm5vb3FwcXR4eXd1dXN0dnZ1
-c3V3dnd5d3Z0c3Z2dHR1eHZ6eXZzcnd2dHZ4end1dHJ1dnh3eXdxcnJxcXBydXJy
-dHZ2c3RycnFxcG9wb3FycG5tcG9wbGxsbXBubGxsbWxtamtsbm1ubmxsbGxrbm5x
-cG1ta2xsa2dnaGppaWlra2tuamlqa2tqa2hrbWxwbnBvb25oaGhqa25raGJgXV5e
-XmNfYl5cXF1fYWBiYmJiZGNjZ2psbXBycnV5enh2dHJmXVdPTUhJSktOUVZZVlpa
-WFdbWVVYWlpZWFlYVlNVXWFiYmJnZWRjZGJobWlnaGxub291dXd1d3NzdnZ0d3t8
-end2dXRtamxtaGhmZWZtbGlpZWRlYWZjYWBdWlVVU1RUWF5jV0xDPkBAPUBCQUI+
-Pz49Oj5BP0A+PUA8PDxAPz1APTs9TXKKe2BOR0dIRUZFRUZAPTw+Qj4+PjxBQTtA
-QEFAP0A+P0BER0tKS1BOT1tkY19hYF1cX2FnaWhjZ2NkZmVYS0VBQD9AP0FOb4Br
-VUxJTU5OT1BPUVdbXF5dX1xpbW5paGJdXl1jZGRkZWJgYF5dWlpdXWBgXmBeYF9a
-XFxbWVxfXFteW1tdYF1cYV5dXl9hX2FhYF5aXFpZXV9dXV1bXmBdX2FdX19fXl9f
-X15eX2BeXGBfX15dX19eXV1gXmBkYF9eWlleXl5eWllbWlxeXF1aXl1bXF1aXl9c
-XFpdW2BeWVtZWGBhXlxcW1taWVpZWVlcW1taWlhXV1pZWFpbWVpYV1tcWlpdWl1e
-X19cXl9eZGNmZ2VoaWprbGhnZmdnZGNjYmRoZ2ViYWRnZGRmaWhobXR4e3x7e3+D
-hoaHhYF/enZzcGppaWZhYGFeX19dXVhYVVVVWlxYVVZTUlRTU1JTUlRRU1RSVFVX
-WFZWVldYWVlTUVNTVFNSV1ZVVU9RVVVUVldWVFNXWFlWVVFVVlZWU1FUVVVWVFRS
-VVdRVVZXV1ZVVVdSUlNUVFNTUFJRUVBSV1hWV1ZXVlVVV1dVVVdbWVRXWVhYV1lX
-VllYVldVVFZXVldYVVRUWFlWVVdVVVZXVlhXU1ZdWFdYWFdXV1ZYWVpXVlhcWlhW
-WVdXWFxeWFlZV1hbWlZVUVZXWFZVV1ZZV1daWllVVVdZWFhYW1tYV1deWldaXF5e
-XV1bWFpZWVlbYF1dWVtYW1xbW1tdXl1ZWltcYF5bWVdXV1lcXFtdW1tcXFtZV1pZ
-WF1fWVlYV1ZaXVtZXV9bWFZWWVtaWltZWFleXFpbXF1cYF5dWVpaWlpbWlxeX19f
-XF5eX2NhX15fYWVkYF5gZWVfX2BeXl1cXGBiYV9cW1lcYV9fXV5qpcTQ2t/j5efn
-6enqfH5+eXN4enl6ent6e35+e3t7enl7eHl7fXt4e3+Bf319fXl8e3x5fH16fYCA
-fn59fHt9fYB/eX16fH19f319gYB8fnx6e3p6eHl7gHx8eHd5eXp7enl5d3R3enp5
-eXl5eXl6fHt8fX15eXp8e39/fn18fH96d3h5eHx7e3l6fXx4d3R0dHV2cnNycnFx
-dHJxcHFvcG5wcXJzdHZ2dHZzdXV0dXd2c3Z3eXd4dHJzdnl5d3h5d3V1dnh3dnh1
-dHR4eHVycXR2d3JzdXN0cnJwcXJzcXFxc3Z3dHB0cm1vcnNyc3d1dXJvb29xcW1s
-bW5vcHJtampucWxtbGxvcHBvb3FwbWtqbGxqa2prbWhlamlqaG9wbW1rbGtpamhq
-bWxrbm1ubW9saWltbGhpbW1oZWJgXFtfYF5dXV1dX2NhYGBiY2FhX2BiZGhscHd4
-fHt7fH52cGZdVk1JSUdISU1PUlJTVldXVlVTVFRUVllcWVlaV1daX2RkaGlpZ19g
-ZWlsamtpaWttcnNucHJ3dXZzcXJ0d3l6enV2dHFycG1paGhpa2hnZmVpaGRmY2Rd
-XFlbWFZWU1FOVF1fXlVGQEA/PDtAPkBBQEBDQEI7PkBBQDw/PDpAQD9AP0pneWtZ
-SUVGRUdCREtDRkNAPkFCQkE+PT8+PT49QUFDPkBBRUZHS0xOS0hIUF5iXVxfXmBf
-XmBlZ2ZnZ2hlW05JREJCQEJFSGB5blZPTVFQUlRWWVlZWl5jYGJgZGtqamdjYGFk
-YmFhYGNgYWNkZGBYVlpdW1xcW1xcXV1eWlpaWFlYWllZXlxbWl1cXFxbXl1eWllY
-VVlgX2BfYF5dXV5fXGBiYGBdW15fYWBfYl5eXmBfX2FdXl5fXVxeXlxfYmFgYF5a
-WFtbW1laWV1cW11bXFxZW1tcXFlcWlteYVtYWVxbXVtcW1peXF5fXF5eXFlXW1pc
-WlxbXFtZXltaWVpeW11cWlteXFxeYGRmZWdmZWhsbnF1c3N0c3Nxbm1raGpqZmhm
-ZGNnYGFgY2NjZGNlZWRrb3BydXl8fIKDiIuKiYqKi4iFhH18e3V0c29samhnZmJe
-XVpXVlhWVVVTVVVRVlZUUlFPTlJUVFZWWVZWVFVTVFVUU1NVVFVWWVVVVVJTV1ZZ
-VVdUUlVTU1NUVFRWVlNWVFRTVVZVVVhUVU9QVVZVVVhXVFNTUlNVUlNUVldUVFRV
-WFdaWFhcXlVWVVVUU1dWVlVXVVlbXVxZWltYV1VUVlZXWFVWVlZYVlhWVVZVWVhX
-VVVVVVhXV1ZWV1xZV1dXWVtbWFZZW1dWWVZXXVhZVldYWVxpWlhZWF9aWFdXWVZW
-WlpZWFZVVVVXWlpZWVVVWFpYVlpaXFxbW15dWlxaWlZYWlldW11YW1xcW1taWFdW
-V1tbWlpaWFpaWlpYWlxeXV1dXVpbXVtcWFZXV1lZWVtfW1pcXlpcW1tZW1xZW1xa
-WltbWldZWVpbWFtbW1xfYF1dWl5fYGFgYmFdX19eXWJkZF9gYWBhYl1dW11fXV5e
-WVteXl5cXFxfXl5gZXCjxdDa3uPk5+jq6+p+fHp4c3R2eXt5en6Afnx8e319fXp7
-en19fnt9fX97enp+e3Z7fXl4e398eXh6fX98fX1+fn98fHt6eHl8fHp/gH96fH97
-e3x5fHx8eXd3eHd4e3x7enh2dXd4d3h4en19fHt8fn5+ent6ent9fHt6fn56enl8
-eHl9fXx/fX17dXV1d3d3eXdzdnZ3dXJzdHJxcXFycHFwcXR1cXBycnR1dnN0dHV1
-dHh3eXt7dnZ4e3h4dnR0dXZ4eHZydnV2dHRzc3JzcnN0dXR2eHZzcXNzdXJ0dHZ2
-dXJydXZ3c3FycW9zdHR0c2tscHBtbm1vcW9ubmxqbG9ycGdmaWxrbm9vb21qa2ps
-b3Fsa2xra2xpaWhqbGxta2xpaWlrbWppamxtbm1sbG1scG9rbW5vampmY15aWFtf
-YmBjYmRgZmRkZGJhY2JiX15iamxyeH1/f4B8eXJsZl5VTUhCQ0RIS09RUFNSV1hY
-VFNWVlRUVVldXl5bVVRbX2FjZWljYl9iZGpraGZobXJ2c29tcHR1dnNzdHF1dXRw
-bG1zc3RybmlnZmtrZ2hmYmVkZmZpaWliY2FeV1RQVEtQT1thYl9QQzxAPj0+Pjo6
-P0A9PUFBQEJCPj5AQD5BPD9HYXJjUkdERkZHREhJRUdAPjxAO0BCQURAPjw+QUFA
-Q0E/QURFSU5OTE5NSUtMU1xdXGBkY2NkY2ZoaWpvbGZXS0RGRkVBQkZVbm1ZUE9N
-VFZSV1xfXV5dXV5jZGhoZ2puaGNhY2ZhXmBeYGJqZmVpY1dWWlxZXFxcXV9iYF1d
-XVpYWFpaXV5bW1hZWltcXFxbXF1dWldaXGJgXl9eWllaXFtaYmFeXl5dXF1fX2Bf
-XV9fYWFfYF5bXV5fYFxaW11cXl9fXFpbXF9gXl1bXF1dXVxbWVhZXVtWWVdcWlha
-V1pYW11fW1tbWlxdW1xdXVxcWVpbXlxaW1pcX15eXV5dYF5cXWBeX2VpaGhramxy
-c3B2ent/f4F/gH5+gYF/foF8dXBwdHRsaGVkYGVkYV9gZGRjZGNkZ2ZsbXJ1eHx/
-iIqIi4qKiYuJiomEiYWDf3l4dnBvbm1pZGFgWVdYWFVVUlVWVFVVVVJVVlVUVFZU
-VVZVVVZUVVVWVVZWVlRVVFNWVVVVVVZVVVdUU1ZTVFNVU1RTVlhUUlVVVVVWV1RU
-UlNSUlVVWFlZVlZWVFRUVVVYVlZVVVZWVVdaV1ZVVlNTUVRSVVVTVVZZV1taXV1c
-W1dUWFZVVFRWVldWUVJWV1dYVlZUVlVcWllZVVZVV1ZXWFVVWFZWV1lVVVdZWlhY
-V1hZVlRYV1tYWWVfW1hXWVdWV1ZXV1xWVFlWV1ZWV1VUVVhZWl1cXFtaWVhYWlxa
-WFtXXVpdXVpZWl1ZV1pbW1laWVdXVVZXWVhWV1hbWllYWlpbW1tdXmNjaWVbXFlX
-WVlbW1xdWVdZXl9dXl1aXVhaXFxcW1tbWVhYVVhaXFxYWVxdXV1gX2BgXFxfXmFf
-X2BgY2ViYmFgXmVhYmJhXl9gYV9iYV9dWFpdXF1dXl1bYF1fbqbF0dvf4+Pj5+nr
-6oF/fnl5dnh4enh4e359ent5fXp7e3l4eX1+fX19fn5+enx6fHl3eX1/f3x8foB9
-fX19fHx/f3t6end2eHt9fXx+foB+fnt4enl4dHh8d3h7eXp4eHl2eHVzdnZ2d3h3
-eHZ5e3x9gX59enl3dnp6eXx7fX55eHp3d3p9eXd5eXh0cXN1dXh3dHFzc3Fxbm5t
-bm9xdHJwcW9wc3JwcnNxc3NzdnR1dnZ3fHl2dXR1dHR0dnd5c3N1dHZ4dnZ3d3d4
-dXJ0dXRxc3d5dnZ3dnN0dndycXN0dXNycnR4dXV0cXJ3cXBzcXFvb2xsbG1tbW5t
-bW5tcWtsbm5rbG1sa2pqbG1yb3Bvbm1sb21ubW1zb3BvbGtta2prbGxpaGlnbGlo
-aGxsb3BucG5tanJzcm1ucGpjX1pXV1xfYmRkY2NlaGtqZmJiYF9bWl5jaW94gH+A
-g4F5b2hlWlVLSENCREdLTU9RUlRWWFhZVFVVV1ZTVVtbWllUV1teW1pdY2VjYF9i
-anBsamtucXNzb3FxdXN0b3NxcXJzcHJucXBvdXBqaWRlZWdoamtqZmNjYmRmZWhl
-ZWFeV1JRUk1PT05caGdVR0FCQURBPzo4OEJCQkJCQEBBQDtAPj07RVVsZVNLSUhI
-SUZKSUZHRkM9Oz4+Pz9BPj49QDg+QD1CQEE/QURKT1NWU1BQT05TWl5gZmlubW9v
-cXJzdHFsYFJNTUpLSUlHTWFrXVJRUFRXV1paXl5iX2BgY2lsa2hoaGZlYl5gZWNg
-X15gYmdmZWRhV1leXV5dXl1fYF1dW11dWlxbW19eWllaXFxcW1paW19dXVpcXFxc
-XVxdXV1fXlxbXWJhXl1eYWFgXF1eXFxdXGFhX1tdXl1YW15hXlteYFxdXlxdXV1d
-X2BfXltZXVtZXlxXW1paWl1ZV11dXVpZV1tcW1paWVhbWlxbXlxdXVtdXFxbXF5d
-W11fX2NjYmFeX2BhZWhnaGxvcXB2d3l5fYKEhoeJiYuNjJSUlZSPkJCLh4OBgXp0
-b2tmYmJhYmJiY2JhX15hYmJoamtycnN5f4CCgoSFioqKjYuIh4eHiIV9eXNwcHBt
-Z2VkX2FaWVdYVVJXVFRUWFhXVFdVVVRUU1RWVVNVVVhUVFRVVVZUVldYVVVWWVtZ
-WFZYVldVVFRWV1VUV1VUU1VUVFRTWFVaWFVTUlVTVVdWV1lZVVVVVFVYVFNUVFRX
-WlpVVldZVlRUV1ZUVVlZV1hbV1VaXFhYWlJTV1ZWWFdYWVlWWFdWVVpYVlRUV1ha
-W1lVVldZWFhYWVdWVllYWldUWFlYV1ZXWlpYWllYVVdXVVRbWFlXWFpZWVhVU1NW
-WVhYVlhYWFRXWlpZWFlYWFtdWldZXV1dV1dWWltfXVxYW1xbWVhXWVhYVVhXVldZ
-WFZZWVhZWVtbW1xcXVxfXmFzX1ZXXFxYWFlbXVlXWFpbWlhZWldbWFhWW1tbWVlY
-W1tfXF5gXVlbXVxbW11cWl5gXV1cXV1dXV9iX15fXF1eX19eYGBhXl1eXlxbW1xc
-WllcXl1aXV5eXmRxpsXR2uDk5eTm6evqfn57eHp6eXl5fHx6e3t7fH59fXx6fH18
-e3h6fH9/e3t6e31+fn9+fH19en1+gH99gHx9e319e3p6enl3enx6eX99fH58fHt6
-eXl5ent3enl3eHd4dnV5e315e3t4d3Z3eXh4en5+fX19eXh9fHt4fHx7enl3d3d4
-eHp7enl5eHZ4c3J3dHNyc3V2c3Fwb29vb3BvcnNvb3N1cHJzdnN0c3Fzd3Z2eXp6
-d3V2dXd1eHd3eXp4enh0dHR0dHR1d3x7d3d6dnVzdnd0d3d3eXZ3dXV0dXVzcHFy
-c3NycnRxdHVzdHFwbG5uaWxsbHF1c29vb21rbG1wbmxta2xubWtubWpra2ttamps
-bmtsbW9wb21ta2ppamxqa2pqZmxqaGlnZmhtcG9vb3BtbHBsbW9vbWZiW1pZXV9e
-XmBgZWhqbWxpZmJkY2BdWV5jbHN5f358eHhzaWJaUk5IR0hDR0hKTVFTVVNTU1NW
-U1VZWFhWVlNUVVhYWFpeW1liYmBaXmNpamZoa2lsbXFzc3J1cmhocXBvbW1vbm5t
-bmtra2toZmZnZ2dpaWpoZWNiY2FiYWZpa2RgX1xZV1FPTlBWYWZbUEZLR0ZDPjw4
-OTo8Ozs7PkJDQUA/QEJPZGNTSEZGSEdERUZGQ0E/PkFAOz4/PEFDPz4+QD5BQj9B
-QT8+QUpQV1dVVldWT0pVY2hta291fYGBfXh0cGlXS0tMSUhESEtYaGBTT05QUlhb
-WVtcYWRiYmRpaGhqam1pZ2NkZGJkZWJgXmBlZmVmZWFaVV9gWlxZWllaW11cV1tg
-X11dXFxbWFpZXF1cW1tbWl5dYF1bX1lcXl1aXF1hXltZXV1gYF9gX19cW11bX19b
-Xl5gXV5eXmBeXl5jYWBgXlxeXV1dWVhaWlpbW1pdYV9gXWFeXFlaYF5dXFxeXFxc
-WllYWFtYWVtcXFhbXlxcXF5eXV1cXV5fX15bXWFiYmNlZWdpbHFydnt6gYKHiYqJ
-jJGSlJSWlpeYmJ2YnKSpoJ2alpKOhoB8dXJrZ2llZGBjYmFiYV9fY2dnZ2dpa29z
-d3l/goOEh4uPjYeFgoF9fnl/fnt2dW9rbG1samlkZGJgXllZWVpXVVRUVVRWUVVW
-VVdWVlVTVFVWV1dWVFRVVlVUVVdXV1dWV1dWVlhUVlpWV1NTVVRUU1RVVVZUV1ZY
-V1VXVFVUU1VWVldYWFlYWVZWWFVUVFhaWlhbWVpbVFVXWVZXWVdWVldVU1ZXWldY
-V1dcW1lXWFdXVlZUVVVVVVVaWlpWV1ZXVVNVV1hXWVlZXFpaW1xYWl1aVlRUWFhb
-W1xaWVZXWVpbV1dZWl5bXFZXV1VVVlZXWFlbW1lcX1lZW1pWV1hYVldcXFxcXV5Y
-VltbWVpYWFxdXV9cXlxYWVpZV1laWVdaWVpaWlhZWVlbXFxZW1hYWFdYVllcXF1b
-V1haWFlcXFlZWVtaW11cW1lXWFhZWVtaW1hVV1dbXF1bW11bXF1bXV1cXF5cXV9e
-XV5gYF9bXmBgYF5hX19gYF9dXllcXVxbW1pbXF1dX1xhY3GkxtLb3+Pl6Onp6+p6
-fXl5e3p7e3l7enp6dnh8fHt7fH5/fXt8e31/goB9fHl8f36CgoB+fYGEfnx/e3l8
-fX6CfH57fXx9fnp5dnl5eXp5e3l5enp4ent5fHt6ent7fHx6eHd4eHl5eHl3dHt9
-ent7enx8fXt6e3t6e3p5dnd6eXh4eXh3dnd2eXV0eHZ3cnJycnZ4dnVycnJxd3R0
-dHBycnJ0c3JvcnJ2d3h2dXV1dXV3d3Z2eHh3dnd4eXp3dXZ4eXRzdnd3dHR2ent5
-dXVzc3Z2d3l3c3p6d3V1c3F0cnFzc3NzcXJ0c3JvcHV2b3FxcG5wbG1wcG9ubW1s
-bmxvbm1tbW5vbW1ranFtbm5ucHBwa2trbGtucHFramdoa2tpZmtpaGdna2doaWpp
-anBwb29wbmtqbG1tbmxtbGRgXVxcX15eYmRma2psa2lqaWhkYF1cXGNqa29zd3V0
-cnFtYVZQSkdIRkhHR0pPUVJTUU5QU1VWVVRVU1RVVlZYXFxbWltcWVtjX11fX2Jn
-Z2Zoamdoa29ubm9xbnBydHV0bm5ubW1maWtucG1tamtsZmdqbWtsZmRkYGNgYWNt
-Y15cXV9aW1pVUU9TW2VnW2JRT0xGPzw6Ozs8PDs6PTxAQDxBS1pgUkZFQ0NFRENA
-QUA/Ozk9Pj89PT89REQ9P0FBQUJGRURCQUBESk9UW15ZVVVTT1Rna2x0hJSiqKih
-lId3YlBHRVBFQkVIUWFnVUxHSE1RVFZVVlZaXF9cX2NkZmtub2phY2NhXl5gXl9h
-ZmViYWVlYVlYXmFdW1lcXlpZW1taWF5fXVxdXVtcXV5cWlpdXltaXF1dXV1bXFtb
-XF9bW1xdY2NgX11dX2BiYV9fYmBhYV5cXFteXVtcXV5fYWBhX11hXl9eX11dWFtY
-XV1dXFxaXFlaWlpfYGBhY11cW1xeXVpaWlxbXVpZWlldXVhZWlxfX2BeYGFeX2Fi
-YFxdYGNlZ2lvcnl6gYKIjI2NjpOUlZaUmJqaoaKjqaGgm52foqalpKGalZKNhoF8
-eHVybmhkZGVlYmNiYmRoYGNjY2FnZ2ZpcHN4f4GEhIeIh4WEgHp3dXZ1enh7fHh2
-dXRvb21ucG5rZ2RgXVpUVVdXU1RWVFNSUlZVVVZWVVJTU1VUVFFUWVRVVlhXVlVU
-VFZYVVdUVlhVVFJVVFJVVVVTVVNSU1RYVVVTUVFRVFVTU1RXV1dZV1VXV1NVVldZ
-WFdZV1tdWFdZV1ZWVFRZWFZXVlRWV1ZXWFpZWFlYWVdUU1RUVFZWVlRUUVJTUlZX
-WldWV1taWVpaU1daWlZXWVdYV1ZWV1pZWVhWVlZYWVlaWVtaWVtbWFZWV1ZYWllX
-WFpZWFpcWlhZWldaW1hfXFpeXVpaWllZWVhYWFdcYF1bXFpdXVpZV1laWVpbWltb
-XFxaW1hZWFpZWVpeXVpaXFtaWVdWWFtaWFldX1pZWF1bXlxeXFtaV1ZbWVZWWlhX
-WlpYWFpcW1tcXF1bXGBcXVxfX11cX2FhYGZlYGFfYGBfYF9jYGNjZmRhYF9hX15d
-W11bWFthYV1hcKrG0dvg4eTm6enq6Xl7enl8e3h4eHd4eXh5eXt/fHl7fn58e3p9
-fXt7enx5en1/f3x+f3+AgX9/fnp5e3x9fHx6fHt8fH59fHh3d3p5eHd4d3x9eHl8
-e3x8eHl3fHx9enx9fXx6ent7eXx8eXt+enp5d3t8en58e3x7fHl0dXR3enx5eHx8
-enp6d3R3dXZzcnZ2dXd4cnFwcHNycHN0d3NycXJzdXR0cnB0dXZ1dnV0dXNxc3h6
-eXd7eXd1d3hzeHl5eXZ5eHR1dnh5eHV4c3NxcXJ0dXRydHd4dnVzdHZ2d3RzcnJz
-dXRwb3F0dXJxbXBwbm5ua2xvb25ub29vcHBwbWxtbm5rbGxuc3FxcW5xb2xvamts
-a21raWttbmlnZGhmaGlpaWZmaG1qampra2xtbGpvb21ubWxua2tvaWVhYV9gX2Fj
-YWRoa29sb2xraWVgXFxZX2Zra2xwb21sbGZkWlFSTEVFSEZKSk9SUlBTUFVVWFlb
-WFVSU1VXVVpfWldYWVlaVlldYF9fZGdjYmdqZGVna3BscXN3cG9tcnJzc25rbm1r
-bW1wb3BubXBpaGZmaGVnZ2RkZGFgYmJgX15dXmJlYVxTT0tNU2JpY11XVExCREA+
-PDo9PD08PTs6PkZWXlJFQj9AREJCQENCPz08Pjs8P0A/PD5COj5EQ0BBP0RERkND
-REZLTlVXWlpYV1FMUmlzg5emsrS0trGmkn9pT0RFS0hHSUhVYFlKSEdJSktMU1NR
-T1BUVVVYWF1mb3JwbGhjX19jZGBhYGNhZGVnaWVjWllhYmBgXF5dXllZVl1fXl1c
-YFxdYV1dWllaW1laVlhaW15eXFlcXVpdXFpZW11bWl1bXFlZW11fYF5fXl9eX1xb
-WFpgXV1gX1xeX1xfX11fW1lcXltfXF5fYWNdXFxaWlpeYl9hYF9cW1xeXFpZW1pa
-WllaWVpbXlxbWlpcXl5gYmNkYV9cXGJjZGVlaHJ2eX2DjI2MlJOVl5ycnJ2enZ6f
-n6Knp6ampKWfo6WjpKKhnp2Xko6HgYGAfXdvbWppZGhsa2hra2VlaGRlZmVlZWhm
-a290d3h/g4eJiouHg4F/e3h3d3Z7e3l2dXJvbGtsZ2hnZ2ZjYl9cV1lYWVRUVFNU
-WVdXVVhWVFVWWFlZVlZZV1VYWFZVVVVWVFJRWFNSVlZXWFVVVE9QT1BVVVJQVFRU
-VVRSUlRSV1ZTVVZVVllZVVdYVFZRVFZYWVpYV1pbVlZWVFNaVFRYV1ZWVFVXVlVY
-XFhYWFpYV1ZVUlVVVVRVU1FRTlJUVldYWVdUVVdaWVhbV1dWVlZZWldbV1VUWVlZ
-WFZUVVRWWFxdW1hWVVdYWFZYWlhaWVpcWldZWFtZWVhaWlpaWVhZWVtcW1pYW11Z
-WVteX11eXl1bWVpcXVtbXVpbWFpZV1xbWFZWWFldWlhZWlpaV1hXVVlXVlZXWVlZ
-WlhaWlpaW1xaWlpZW1lbXFtZWVdYV1hcXV5aWVxaWV9bW1pbWlxeXV9dXV1gYWFg
-X1xeXVpdZWBfX19eX2BfYV9fXV1eX19aW1xYWFteYGR0rMbQ2t7i4+bo6evreXp5
-ent7e3l3d3p7fHt9fHx7end3enx+fXh6fnt6f4B8fHx6fn1/fn19fn1/fXx+eXqB
-gHx6fH59eXt7e3p5eXx8fHl4eHh7eHp4enx7eXl7enp4eXh6fH2Ae3t8eXh4eXl4
-eHt5eHp4fHx4eHx6eXl4fnt7fHt3dnh4fHh5fndzdnZ1dXZ4dXF0dXNycnJzcXBu
-bnBwb29ycnNycnVydXh6d3V1dXd1dnh4d3l6eXd3c3J0d3R3eHh3fHp6d3Z1dXh1
-dXZ5dnVxdXR1d3NzdXl6dHV3d3Jwc3VzcG5ycHBydHNxcG5tb2xscG9ub2xucHBv
-bm5vbG9ubmttb21sb29vcGxtbWtubWlramtsamxubWtrZ2loaWpoZmppa2lpa21q
-ampqbGxwcW9vb25sbm5saGFgYGFgYF9cXmFpbG1qbm1raGJdW1tdYmNoa2xrbWxp
-aGVgWFJKQ0NFRElLTlFTVFFYWldYWVdXVVdXU1JVVVhZV1ZYW1pVVlhcXV9gZWFk
-Z2hnZmNobGxxcXNva21vc3h6b21vc3Fybm5sb25ub2loaGdoamhmZWVlZGJlZ2Jg
-WVtgYGRfWVVTTktMUVxnamVaTUVEQUA/QD89PTo8QDxCVV9WR0RCQkFAPT5APz49
-PEBBQTs6P0FCPzw9P0NFR0lDQEJCRUFDR0hMUlRUVldVUU1SaoCQoa+3urStrKKV
-iHleSkVGSUZFTFxgVEpJSUpHSEtQTUtLUVdXVVhcYWlydXJvZ2RfYGBgYWVlZmVk
-ZWlpZWVbXWNpbmtmYGBdXVhcX1xaWl5eX19gW1tfXFxgXlpeX15eXF9dWVldYF1c
-X15eXl9hYV1aWVxaWV1gXFxcXl5dXV1dXV9dW1tdXl1fXF1dXVxdXFxdXVtYW1xa
-WlxcW1xZWl1fWl9gYF9dXVxdW1laXF1bWVtfW1haWlteY15aX19fYF5dYGNmZm1v
-cnZ/gYiKj5WVmJaXmZydoZ6dnqOjpqaipKapqKeop6inpqOempqZlpWOioR/fHl4
-dXRva2ppamppZ2xrbGpnYmZoaWpraGhoaW5xdHV5fYWKjI6Qj4uKhX9/enh2eXd2
-b2hoZWFhYWJhYGJjYWVjX11ZWFZUVldVVVZZWVlXV1dVVVVWVVZXVVlXU1dVVldV
-U1ZUU1VVWFhYWVdaUVNTVFBTVVRWV1VUU1VSU1VWV1dYV1RWU1dXVVdWVVZSVVVW
-WFhWVFdWVVVTVVRWV1RMVFlXVllZWFZXWFZXV1lXV1pWWFZUVVVVVFVTUFNVVVZW
-VVRUV1ZVVFlXUVNXWVhbWlZWVFZWWFVUVFRTVFdYW1paWVVTVVNVWFlaV15aV1ZZ
-VFhYV1ZYWFdZXF1cWVdXXFtZWVpZV1pYWl5dYl1XXF5YWFpdXVxaV1hbXV1aXFta
-WltYWVxdXlhZWVhbWVdXXF1cXFtYWVtbWltZWl5bWltbWFlaXFlZXF1aWVdYW1pZ
-W1taWl5dWFdYV1lZWF5eXWBgXltbXl1eYWBcYF9eYGBjX19dXl1cXV9cW1tdW1xa
-XFpZWVpcXnGvxtLZ3+Pl5+jp6ut4eXZ2e3t/fH19enl8ent5ent7eXl5eHt7e319
-fX19f317fH1/gHx8fX5/e3t8e3p+f35+fX99gIB+fH18e3p7eXt7fHt6enh4eHd3
-eXl3enl7e3d6enl5enl5e3x5e3l3d3h5enx7fH16eXh5eHp6enp6end4e3x7eHt6
-ent6eHp4eHVydXd3dXJyc3JycXFxcW9tcHRwb29wbnN1dHZ1c3V2eXV1dnd3dnN3
-dnd1d3d3enh3enx4eXl6eHh3dXZ1eXx8fHl5dXJ0c3ZyeHh3eXx7d3d3eXRycnFy
-cXFvbm5ucHJxcG5rbXFvbm9xb25xcWltcHB0cXNxcXFwcnBwb3B0cnBxbGxsbGxs
-bWxqbG5qbGpoZmVlaGhpbHBqbGhraWZpaG1ubW5wcW5xbG5tampnZV9hXl1dW1la
-XmRnbXVvbmxlY19eXFtfZGhqbW5xdG9raWNaUUlHRURFR0hLTlFVU1NVWFdXU09R
-VFZVWFZTUlRYU1RZWVlYXVtbWl9hYGFpbG1namtqa2xtb25tbW53eXVubXB2cm1q
-aWxrbm5uaWdqam1sZWVlaWxpZmZlYl9hYF9eXVlXWFhZV1RQTldkbWtdTUdHREJA
-Pj08Ozw8PkxfWU1FQ0REQEBCQEI/QDw/QD5CQD5AP0JCQkJAQERHRkNBQkJDSEVE
-R09SUVFTUExKTFJthZGapayknJeVkIqHeGNSSUpISE5eZVpOS0hHSEtHRklLTE1Q
-VFZYW1teZmttcHBpYmFjYV9hZmdiYmNlY2JiXldcanN2eXFmYV1YWVZWWFtaWlxd
-XV1bWl5fXV9gYF9hX11gX19fXF1dXl1fYF9eYWBfXl1fYGNgWVxeWlxcXV9dYGNg
-X15fW1paW1xdXFxdYF5bWVpaWlhXWVpaXFlaW11aXFhZWlxbX2BdW1lYV1ZbXF5c
-XF1bX11eX2FfYmRlZWhmY2Fma3F3eXuAho6TlpeVmJqbnZ6eoaGioKCfpaqqpaio
-pqaop6mnpJ+ioJyVk5CPjIeEg395eHVycW5qbGlmZGhnaWxvbGxqaWpqbW9sbmxr
-bnFucXRzeYKGh4mMkpCSjIiEgH17fHh2dW9mYWJgYGBfX2FhXV1eXl5fXlxaV1JT
-VlVWWVdUU1VYVVdWVFNVVlRWVlpYVlVTUVJUVVNUVV5cVldXVFZZVVBSVlZUVlNS
-U1NWVlRTU1JWV1RVWFRUWFVWVlhVVVlZV1lWWldUU1dZVlZYV1NXWFhXVVZVVFZU
-UVRWWFlaWVhUUE9SV1hWV1dVVFhXVlVTVFRUU1ZYWFlYVVlZV1dWVVVXV1hYWFZX
-VlZbWldXV1ZWVVRUU1paV1daWFpcXVtZVFJUVllcWl1eWllYWldYWllaWVZWWVlc
-XFteXllaWllcW1laXFtcXFtaW1tbXFtYVFRYWltdX15eW1xaWVxdWFpaWlpaXVxc
-XFpdWlpcXVlXWFlbWllbXVxcXF1bWl1fYF1dXVxdW1laXF9dW1xaXF9hYl9dYV9f
-YmNjYF9hX15cXF5eXl5dW11eXVtdX15fXltcX2Jhca/G0tne4uXm6Onr63h4enp7
-fHt+gXt8e3l6fHl8enp+eXh5enp6en19f39+e3t9f4CAfXt9fnx9fX1+fX59e319
-e35+e3t8fXt8fn99fHt7e3t5eXh2eXd2eXl5eXl6enh5eXp4eHd6enx9gH57fHt5
-fX59eXp+fXt5enh5e3p4eHd6e31/e3x9enp6dnl5d3R1c3RzcnFuc3FycnBwcXFx
-cnJvbnFydHN0c3F2eHd0cnZ5dnV1dHZ2eHZydXV+fHh7enp3eHp6enh2d3R1eXh6
-e3h1dXh0dHZ0dHZ6e3t3dHV1c3JxcnJ0b2xubmtscXBva2xycXJycG9ycG9wdXFu
-bXFvb2xva29ycG9ub3Jyb2xpbG1ramxtaWhoamloaGZqaGlraWlua2xra2toaGlr
-a21ua25vbnFvb3BsaWdmY2BdXFtZV1xjZWltbm9xbmxnYl1fYGFmam1xdHBzb3Fs
-aGBXT0hERUVFSkxPUVJUUFJSVFNTUlFUVVZaW1dUVVlUU1RdWlhXWFtcX1xbXF9l
-ZmRkcGlpaGlsbG5ubHFzb2xtcHFta2lqamprcGpra2loZWJgY2VsbmxsaWZjY11a
-W11bW2BjY2BfX1tWU1Nhb2leVU9PS0NCQ0JAQD5GWV9ZTEpHR0ZFQ0NIRUZLQkFA
-QUJDPkFCREVHRURDRUdGSkNFSUVHRUZJTUtLSElJTEtNUGV1dXd7gHl2eXZvb2pf
-W09NT01SW2ZfU0tHRkdHSEtMTUxOUllZWV1eX2BjZ2lqaWdhYl9dXWBlaGViY2Vi
-X2JeWmt+hoaHfGpfW1hYWlxbWlxdW1xcXV9fXVxfYWFhYl9dW1paYWBeXl5bWlxh
-YF9gXF1aXFxeZGJdWltaXF5fX2JgYF1fX15dW1xcW19fX1heYFtZWV5bXVhZWlpc
-WVpYW1xYV1tdW15cXVxXVldaWVdYW15cXl9gYGNkaXJpaWlqam1wb3N6fYSHjpSS
-l5uen5udm5ugoKGko6eoqaWqp6mlqKyoraqmpqOem5qUkY2MhIeFhISEf3t2dnJw
-b2xta2lqaGdnaGlubm9uam5ycnRzdXNzdnZ1dXRydnl9g4qNj5SWlJWRj4qGf3t7
-eXV0bmVjZWRiYFxcXFlaXVxeXVtbV1lWVVVdV1RTVFVYXFtWVVZVV1dYVlZUVllZ
-V1VTVFZTUVRPUlRUUlNUVlRTVlRTVVdXVldUU1NUU1ZUVVRSUk9SVVhWV1dYV1dX
-WlhWV1hWW1pUUlNTVVRWWVpVVFhVVlNUVVVYWVtaWlZVU1JTWFpXWVlaV1dXVVNV
-VFVUWFlbW1xXVVZWV1lZWFVVVFdZV1ZWVVdVWFZXV1lXVldaWVtYV1pcWVpbWlpZ
-VlJSVFZWV1ZXWFtbWFZYW1paWFpZWVZbWVhYV1dZWVZVWFtcWVhdXFpaW1paWVdd
-WFhYWVpcXVxcWllbWl1bWVlbW1peW1dXVVdXVVlcW1tYW11eXFpVV1laXFlbXV9c
-XFtZXV1aWlhWWl1bXV1cXF5hXVteX2BfaGdjYl5fX2BfXVtfXl5gYl1dXl1eYGFd
-X2BeX19zscfR2d7i5ufp6urqgIGAfn5+gX98fn15fH58fn2Afn98fHx6eHp8eXd5
-fX6AfXt+fXl7gX99fXx5e3p8ent8e3t8fHt9e3p6fXx9fH5/f317dnV3eXp4enl6
-fHl2d3l6eHp6eHh5eHd5e399f3t5fHx7e3t2dnh7enh4eXx/enh3dXd4e3t6fH18
-eXt6eX5+eHVzdXN1dXJzcnBycm9yc3JxcXRycXd1cnJycnRzc3J1c3Z6eHd1c3N2
-eHV0dnZ3dnd4enh3e3p3dnd3dnJzdXd0dHl3eHZ2eXZ1dXh0dXl3dXR2dnFyc3Fw
-cnNwcW9ub29vcW9xdHNubW1vbG5wcW5tbHBucW9vbm1tbnBubnFwcG1rbGtsa2lo
-aGhpaGVlZmptbWxpam1raWhobW5qaWdqaWtqbmxta2lpbG9tbWpnaGReW1paXWNo
-a21wb3BwbGpqZmJlY2drcnNycXN1dnJua2BXTUZBRklKS09QUVBQT1BVVVJaVlFS
-UVFXWlZWWVtWVVlcWlxaWltaYFxhYWJgY2FnaGlra2lra2xsbGtvb29tcnBuampq
-amltb21tbGlmaGdqa2pmZmVpZ2NhYV1gYGBhZmpqaWlnZF5XTU5YZWtgWFBNTEpI
-RkZCQ09hXllXT0hESUpISUhHRUZGRUhFQUJGRUZHSEpNSUhIRktPTEtLSUlHTE5Q
-TUdKSEhQTVBQXGpqZ2RnaXB1c29nXVNRUUlLUVJYY1xPTkxFRUZISEpMU1ZZW15c
-XmNmZWdqbGpkYGFgX2FiYmNgYGJjYmhjX1tbcX6AgXx0Z15aWFlYWltYVlxeX15d
-XVtcXFxdXV5gX15fXl9eXVxaXllYXF5fXV5dW11cXV9dXVxbXl5cXF1cXl9gXl5c
-XF5fXVxfX19aUllcXVxZWV1cWVtcW1pcW1lbXF1eX11bW1pcW1pZXFxbWVpaX11i
-Y2NlaWZmaGpoZmhrbXB6gYWJjpGUmJyanJ+ioqOin6aqp6ioqrCurqurqKemqqyq
-rKijn5mUjo+MjIuIhoWEgn18eXh2dHNyc29ua2toZ2hmZ2xvc3V1dHNydHh7fnt9
-fX19fnd0dnh8hIiKj5eanJqZmJKHg4GBfXt4c3Fwb2tlYmFdW1pZWFhXWFpdW1tW
-VFRWVldVWFhZV1hWWVZWU1VVVVZUU1RXVlVUVFhWVFVSU09TVVVYU1FUVVVYVVZb
-VFJSVVVXVVVWU1NWVFJUVldVV1RUVFNWWVlYXVlWV11XV1dWVVZXWFhZWVpYWlpX
-VllZWldXU1ZTVlhXVVRYWVdWVldXVVNTV1dXV1lbWl1YWVhXWVVVV1dWVVdZWVhW
-VFNRU1RXVltXWFdYW1tcXFpVU1VaW1dWVFRWWFhZWVhXXFlWVVVZW1taWVlZV1db
-W1xcWVhaV1lWV1lYWVhbWlhZWVhbW1tXWFdXWFdZWFtcWFhYWFlbW1paWVtbWVlZ
-V1daWVlaXFtYV1lcWlpaW1lZWlpbWltbWFlbXlpZW1pWW19eXl1dXlxcXl9eX2Bg
-Y2RgXmJhYF9fX1xcW2JfXV1eX11cX19dXWBfZHStx9HZ3+Tk5+fq6up+f3+CgH6C
-fn1+eXt7enp6e36CgX19eXp7fXx8fHt7fIB/enl6enx8fX19e3x9fH19fHx9e3x+
-e3d3eXZ6fHx8e3x9e3p3enx7eXl6e358enh5eHp6e3p5d3V6f318fHx+e3l5fnt3
-eHp6e3d4eHh4eHZ2d3V0c3d4dnh5enx5eHl5eXd3dHdzcXR0dXFydHJxb25xcXJw
-c3N1dXNycXR5dXN3d3d3eHp7eXV1dnR2enR2d3Z1eXd4d3h4d3V0dnd1c3V0dXV3
-dnVydXV3dXd6c3JycXRydHNxcnR0c3NycnJwb2tucnFvb3FxcG9sb25qa25wb21s
-bGxsa2xsbG5vb25tbmxtbm9sa2pta2xqaWpoZWdnZWZnZmdkaWpmZmhmbWpmam5u
-bW9tamtqamlsbm9tbGhlY2JcXV1iYmZtbW9ubm5wbmlmZmRja3N2dXNzdHV2c3Fs
-ZllMR0ZERUhNUFBUU05PUlNUUlRVU1dTU1FXVlNTVVZRVVdcWldYWlxbW15iZGRm
-ZGhqZ2ZkaGpqa2ppaWxqbWtxbWpqa2pqaWxra21wbXBybm9ubWxlY2NnYFxmamxp
-aGhpbW9vb2VhXldMRkZWaWtgV1NST0xJQ0JHWF9aT01PS0hMUUtHRUNKR0pEREdI
-TUtMSUtQT1FPTk1OTlFNTE5OSklKS01JR0lLTE5PT1Bne4B9foGEiIqKhHlsXlJM
-S01PUlddXlVSTU5JSExPUVVaW11cXWBiZ2pramhpZmBfXmBjZWRjYWNkY2NhYWNf
-WFdeam5oZWNiXVpaWVlYWVlbWVhZWFlcXVxcW1xaXltcXVtdWVpaXl1XXVtbXVxb
-Xl5dXVxeXl1bXV5eYWBcW11eXV1eXV1dXlxeXFtdXVtZW1tcW11cW1tYWV1dXl1c
-W1teYWBcXVxaWllZXVxZWFdaW11eZGJhZmVjZWJkZmdpbHN4hIqOkZKVmZyen6Ch
-nqKlpqWgo6WkqKqqp6ipq6uqqamnq6mmn52XlZGQkJORjo2KiISDgX18enh4d3Vy
-bnBtaWhoamhoamxwcXR4fH98eHx+gn5/gYGAfH17e3Z7g4WKkZSWm56bmJGQjYqH
-fXl5dXFvb25ua2dlYl5cWlpXV1VYWFdXWlRWVFpZWllXWVpVV1ZVWFZUVFVSVFRX
-V1dWVVVUUVFTVVVWVFJTUFFUVFVUVlVaVVNZVVFPUlVZVFNVUVRUVVRVV1hWVlVX
-VldYVlZZVVJUVllXV1dXVldVVVhbWFZTVVdXWlZVVFVVVldaWVlZVVVVV1ZWWlZY
-WVlYV1VYXF1ZWFlYV1lbWlhZV1laXFpWVVNVVFNWW1tfWllbWVpZWVdXU1hZV1dg
-W11bWFdXW1dQUlZXWlZZXFxcVlZYWltbWlpWWVpbWlpZWV9cW1laXVpaXl1bXF5c
-WlpdXFxXV1tZWVhZWldWWFpcWltcW1lZWVxbXF5eWFlWWV9fW1tdW1xbWVlcW1hY
-W1xZXGBeXltbWVpdXl9dXl5jZGFjYmFgYGJgZGJhYF9cWl5cWlxhXV9dW1pbXFxb
-YGJmcqzH0dvf4+Xn6enq6nt8foB/f4GBfX17en19fHx9e3yAfnx9fHx7fH2Afn9+
-f4B9e3p6eXuAgHx9fnx+fHt6eHp6fHp7eHl9fXl4e3d1dnp6ent6eXl3eHh7eXh5
-end5e3t5dnp4d3d7fXt5enl4dXZ1enh8fHp7enl5enx5eXZ3eHd2eHh6ent6end4
-e3l1d3Z0cnN0cnNzc3J0c3JycnRycnBzdHRzcXVxc3Z1eHV2eHp2dnh1d3l4eHp3
-eHZ5eHh3dXZ5enl2eHh1dnV3d3h1dXd4d3Jzd3Vzc3Z1dHJxdHFycW5zc3JvbnBy
-b25vbW9xcnBwbnBvbm5qaGpsbmxucW1qbW9ubWxscXJvb25qbGtubG1tamlqaWxn
-Z2ZsaWdmaGVnZGVmamhoaWdoa21tbW5ubXBwbGptcHNxbW1vamZgXl5eX2FnaGxw
-b21tcHBucG5saWlsc3d2dnh0c3Zzcm5nXVNIQ0VITE9QUlJUWlZVVlJWU1NUVlhW
-U1ZZWFRXVk1QUldZWlxdXFlZXF9gY2ZpamdkY2VmaGtpbW1vbGxrZ2xtbmpnZ2do
-amlrbXF1dXRvcG9oZGJiZmlrZGVqbGlpbG9wbmlmYFpXVE1ERUhZaGthW1xZUEpJ
-S1BfaVtPTEpNTE9RTUpLS01MREdLTU5MTk9OTE1PUlJST1daVFFOUFJSUU9MSkVJ
-T09PUFFOT2uGkpGWlZaZl5aUi4V4ZVlUUlBQW15WV1NUU1BSVltaW1teX2NgY2ds
-bm1rbGtoY15dXmVhYmFgZGRiY2BiY2BYUFZdYF5bXl9cWltaWFZYWFpcXF1dYFxc
-XFxbXVxaXFpaWFlcW19bXF5fXVhbW1xbXV1dW1tdXVxeXlxeXF1fXVxdXV5eXl5d
-XVpdXlpdXVxcXF9eX1tcXl9eW1pbW1leYF5fYFxdXF5eYF5eXV5gXmFfYF9hYWNk
-Z2NkY2dscXmAh42RmJmbmpyin6KioaOho6OhoqKmpqepqKioqaeoq6ympqSjn5mc
-nJmXlpaVlZKRkI6KgoWGfnl3dXZ2dXNxb29vbWxsaWhnaW1wdXt7f4KCgIB/foCD
-g4KDhIWEhIKBhYqNkZebnZ2foJ6YmJOKhX14c3FvbHBwcm9tbGNgXl1bWVhYV1VX
-WFhaVVZXVVhXVFNUVFZUVFZWV1VVVFRVVlVVU1RWVFJVVVRUVVZUUVRTVVZXWVRW
-V1VUU1NVV1dXWlZUU1FUUlNUVVZYWVZWVldWWFhXVlVYVVZVVVdZVVhXWFhYV1dX
-V1ZYV1ZWVVZWV1ZaWVZVVlZXVlhXVVVWVVZVV1pXVllZWVlZV1hYXFtaWVlYWllZ
-WFhUUVJXWV1fWlpaWllXWF1ZV1hcXVtZWlxcWlpWWF1bVlhbXlpbX1xaXF1cW1tb
-WFhaXFxZXFtaXV5cXFlYV1paW1xaXF5aW1paW1lYVlhaWVlZWldYV1ZYWFpcXV1c
-W1pcYGVbWV5dXlpYWFtbWV1ZXlpbW1pZWFhdXl1eW1paW1teXVtaWlxgYWNjZF5c
-W15gYl9dX15cXV5aW1taWV1cWVtXWVtcXmNzrMjR2uDj5efo6enqfHl9fX5+gIB9
-ent6e3l6ent9e3p8foF+fX57fX9+fX18eXl7en17e3t8fnx9fX58fXp4ent9e3mA
-f3x9eXl5dnl6e318fH17eHd6eXh5e3x9fHp6enh5eXp4eXl6fXt5enp6fXt7fHx+
-fnt8enp9fHt7eHl3eHh6enl7e3d3eHd3eHZ1dnd1dHR1dXN1dHNxc3RzcHNwb3By
-c3Nzdnd0b3NzdHZ4d3h0d3h1dnZ+gH18dnVydHZzdnp5dnd2d3t6eXd6e3h4dHl+
-e3d2d3h1dXd1c3J0dHdycnZ1dHNzcnJwbXBxcHBtb3Fva2pqa2xtbG1rbG1ra2pq
-amtub25tbm5tbGxsbm1sbm5saWxraGhoamhoaGdlaGZoaGZlZWVmZ2ZnaWlsa21s
-bG1ubnBxcHFub21raGFiX1tbX2NqaWtucm9tcXJxcXRxb3N0dHR3dHR1eHl1c21j
-WUtHRkRITU1UVFVWWVdWVFNUVFBTV1daVVVRVFZVVFFPVGddXVtXV1dcZmBeX2Jl
-ZF9hYmloZmpscXBtbHBwb2tpaWZqbGtrZmtubGtvbW1sampjYmZramptbGppaGls
-bW1sZ15bWltcVVBNS0xWZmxqamlnY2BfYm9xXVBPT01NT1FPT1BOTlBPT05PTE1O
-UFJPUFBTUldWVlJRTU1TVllXVU5KSUlRT09SVE5Qb4mVn56Zm56cm5qXk4Z1Z2FZ
-UlNdXV1YVlZUVVdYWlxfXF9kaGhsbnV2dHBxb2ljX11dX2FeX2JlZWNhYWFeWlZQ
-U1pdW1xbWVlaXVtaW1hZWVtbXV1dW1pYWV5dW1tcXlpaXV5dWlteXVxaW1pbXVxe
-XV9eXlteYl1cXVxaXV1eXltdYF5dXF1dW1xdXVxfXF1hXWBfXlteXV5eWVtbW1tb
-W11cXmBgXVxbWVtdYF5gYWJhX2BjZGZkZWhob3d+iJGTmJyenZ+goKKhpKGhoKOh
-oqShpKaqqKiqq6upqKanpaOfoJycmZuZm5qZnZycmJeUko2JgoF7d3p5dXJyc3Fu
-cXBwb29raWhoa25wcXZ6gYOEg4OBgIKChIWHiYuNjoyHiImOjpeXnqOnpKCfnZyX
-kYaBdWpramtrbnByb2xtamVgY2dfXllYWFpcWlhZV1dVU1JTVFRVVldZV1RSUlNV
-VVRVVVlZWFlUVFRXWFRSVVVXWVhXWVhTU1NSV1VXV1VTUVFUVFRUV1VVVlZXV1db
-WVdWV1tYV1VVVVVXV1dWVlZWVFJRVFRUVFhbXVlZVlZVWVlYWVpXV1ZWV1ZYVlVV
-UlZUWFdYWFdVWFVYW1xYW1pbWllYWFpVVlRUVlVWVlhcXV5ZVlhaWmddWFhbWVZZ
-V1lbV1ZZXFlYV1teXFpbXV5bXF5bWVlXXFtcXl5fXF1eWFlYWFlXVlZXV1tcWl1b
-W1pbXFpVV1laWVpaWVdYWVhYWlpZV1ldXFtda2JYWFpaWlxbXF9fWVhYWVlYWVdX
-WVpaVlxfW1hYVlhaXVxbWFxgYWBgY2BiYF9gYF1dXVxaX2FaWV1fXFxdWltbWlte
-Y3Wsx9Pb3uHl5+nq6ut+eH19e3t7fXx+e3p5eHl8fXx7fH5+fXx7fX58fX99fXp5
-enh5fH56enh5enp6enx7fHp6e3h9enp6enl5d3d4eX18e36AfXx3fHt7eXh5enl8
-fXt6fXp4enx/fXt7fHp6fX1+f3x8fHx+fnt4eHd3eXt6eHl3enl4end3dnd3d3V4
-eHV0c3JwdHd2dHJxcnR0cnJycHRycnJvcnFwc3JzdXd6eXV0dnN4enh2d3d7en18
-d3N2eXl6d3h2dHd3eHh5eXZ2dnd6eXh4eHh3dnRwb3F0dnNzcnV3dXV2cXFxcnBw
-cHJzcWxsaG1sbG1ua21tbHVxbWlubGxrbXBwcXJzb21rbGtra2tqaWlrbGppaGpr
-ZWRnZmNpY2VkY2RnamdoY2hqbWprcHBsampscG9tbXNycXBsa2NeX19gYmdnbG9w
-cHNzdnRycXBwcHJ0dHNydHV4eXZzbGZYT0VDQ0hKTVJWWFVWVlZRVVdaWFJVWFZT
-VldUVFZVVFRYW1xdW1laVllgY1lYZGRmYWFmaWRlZWltbXB3cm9ubWhrbW1rbGxs
-bm5vcmxsaGluamlkaGtta2lqa2lrbG1vbGVdXmNkZ2JgXlpWTE9Zb2xte4WIhIGB
-hoNkVVBSU1FPT1BPTVFRUlJQUFBSU1FUVFVWVlRWWV9cU05QUFJWWFlYV1FNTExO
-U1ZZVVVwiJKVmpyan56eoKCZk4Z0aGVeWV5fW2FcWlZWVlVZW2FiZWhqb3F1dXN5
-end0bGViYGNgYGJjZWViZWJfXV5aU1NZWl9cXF1dWl5eXl5bXV1bXV9dXltZWV5d
-W19cW11dXl5cXF9eX11fXVxaWlxfXmBdX2FeXmBiYWBcXV5fXFtcXF1dW1xdXl1e
-XV5bWlxcW1pbX15eYF9cXmBbWVxbWVhbXVxdXl1eWltcXF1dYGJiX15cXmFkZWZq
-cHd/hIiQlpicnp6io5+foaChoJ6foaWjn6Kjpqiqq6qoqquqp6KgoJ+ZlpqaoKOj
-o6Ggn52amJKQjIZ+ent5d3h3cW1qbXJvbnBwcnFwb2prbG5zdXl8foWFhYaDgoCE
-hoiLj5WSk5WRjZCQkZGZo6OlpqWioJ2YlJCDfHFtamZlaGxtcG9vbWtqaWRkYVxZ
-W1pYWFZZW1lZVlVRVlVVVldXVlNSVVVUVVVTVlRVVFVYVFVXVVhWVlVWVFdXVlJS
-VFhXU1VSVVRTUVhWVFVUV1ZZWFVWV1hXVlhYVllUVFZWVFRXWFhZXVlZWlRSUlNV
-WFtbWVZWV1hZWllYVlNUVlpYWFdbVVRWVVVVWFhZV1NWWVlVVllZW1xaWFhXV1dZ
-V1VYVVZZVllcXmJiWlhZcWBXW1tZXFdVWFhZX15bV1VXW1dYWVtaW1pbWllXVVle
-XFtdWllZXVxYVVhYV1dWVVZaWlpYWlpZV15fXVtaWFhaW19ZXFpaW1pbWVZZWVxZ
-WF1mXlhaWFlaW1tbWlZXV1laW1lYWlpYWlxZWF5eXltaW1xcXF9gXF9gYGFiYWBf
-X2FiYV1dXl5dX2JdXFxZWl1cW11cXl1hdazG0tre4+Xn6Onq6n59fn2Bgn59eX19
-eHh7e3l7eXp6enp2eXp9f357foB8e319eHh7e3d6eXp8enp7en59fX18e3uAf3p6
-eHd3dnZ2d3d5ent8fn59fn16eHt6eHp5e3t6e3p9fYB8e3p6eXt6fH99e3p6enp5
-enl6e3l4eHl4enx8e3x6eHh2eXl2dXR0dXZzcnJxcnR0cnFvc3J0cXBzd3R0c3N2
-dXVzcnV4eXp2d3RzdHZ5eXd2eHp7enl2enh5eHh1dXV2eHt7d3p7eHR2eXh6d3Z3
-d3d4dW9ycG9ucnV2dHZ0cG9xcnJycW9vb3BvcHBtbWtrbGtubWxvbm5wcXBsbWZr
-bW1tb3BvcHFxbWtpa2tra2tqamxsZ2ZnaGFkZ2ZqZ2RjZWRjZ2hnZmloaWppaWpu
-cnJrbG1sbWxta25qZ2ZjYF5gZWlsbHJ1dHJzdnRxb2xtc3l5dHF1dXh7dnRvZl1Q
-R0dKR0dLT1FUVldXVVRVVFJTVldTVVVWWFdVVFJVVlZZW1hVV1tZWl5jXl9lZmdp
-Z2NfZWRiYmRscG1obnFucGxoamtraWxxcHFwbGlsaGxra2loZ2psbm5paW1raGpm
-YV5la2tubGdkYWFXUlZocWx1hY+LhoF/e2JWVlVUUlNLTFBSWVdRU1JVVFdUWFhX
-WVxZXV5cXlpXVVJUV1daXFlYVFVWT09PVVhRU3WKj5ecm5+ko6CgnJaUjnlsY2Bh
-YFtbXl9aWFpbX19hYGFlZ2xydnt5e356eXJqYl9gYGBkZmZkY2JgYmJhXldVXV9f
-X11cW15fXl5cYGBfXlxbWVlYWltbXmFaW15cW1lcX2FcW1pbXV5bXV5cXV1hXV5e
-YWBdXlpcX2NhZGJfXllbXFtcX15gXV1cWltgXFtcWl1eYWBfXV5cXVtbXV1aW1hZ
-WVteW1xfXl1cXFxdXmJiYGFgY2dtcnh9gomNlJOYnJ+gnKChn56enJyfnqCip6Gj
-pqKkqKqur62pqamloJ+enJyfoqCnpqinpKCem5mVko2Hgn14eHp8eXRzcGtrcHFu
-cXFxcXR0cGxqa29yd3d6goSHiImKjImGhYqQk5OXlpeamJqampWXnqSpp6esqaGc
-l5OOh392b25qaGhqaGppZWZpbGtqZF1aVlVWVVhbWlhZVVNSVFhXWFdVVFVVU1NR
-U1RWVldaV1RSUlVYVVNUUlRWU1VSVlVSU1RSU1NWWFZTVVRUVVZWVlhUVVVVVVZY
-V1dYWVhWWVhVVFZUVVhaXllVVFVUWFlWWFpbX1pZVlVXV1hXVlNVV1dYV1dXV1dX
-VlhWWFZUVlNXWFlZWVlaW1hYUlhcV1laV1hVVlRVV1dYXWRcV1VZVldZXVtbW1ha
-WVpaW1tcW1pYW1tZVlZbWVtdXFxcXFtaW1taWl1bWldXWFlaWFlbWldaWVtWWltc
-Wl1cXF1aWlZaW1lbW1laWVlbWlhWWFpYWFdWVVNVV1paW1pZWFZaWlxdXFpbWVdY
-WVxaW1tZWVtcXFtdW1taW1teXl9eX2JfX2RiYWFfY2NdXFxbWVpYXFpcWlxcX2R5
-rcfS2t7j5efp6errgYR/fnx+fX5/fnx6eHt7e4J/fH18d3l+hH58fnt5e357e359
-e3t9fHx6enh4d3l6enh6enp6e39+f317eXp3eHh4eHd5e3t9fn18eXx6eHh4eHt6
-e318fYCBfnt9eXl4d3p5fHp7eHx6eXZ5fXp6eXl3eHp9fXp3e3p7ent6eXp7dHJ0
-dHR1dHNycnBydXN0dnNxc3NycnJycHF0eHt5eHh2dXZ2dnZ2d3d5eXp5eHd3eHp5
-enp6fHl0d3d1d3t4d3d4d3d3dXd0eXd3eHZ3dXN4dXJ6dnZ1c3NycHV1cXJxcm5v
-bW5xb25tbm9sbWxramptbnBubm1vamlqamxsbnFxb29vbmxtbWtqaGloaGdoaGlo
-ZGlnaWhjZWRnZWllZWhmZ2Zpamxta29uamlqaW5rbnJvaWxsa2hgXl9fY2htcnFz
-dXd1dnRubXJ0d3N0dXd5eHl3dXNqYlhLSUdKSUpTVFVVVFZWU1NTVVdWVFRRVVhb
-VVZVVVhXV1RVV1hYV1tfX11kXmFkZ2JkZ2diZGZnaGdoaG10cXBxbWpraWlrbWtu
-cW9tamhpa2traGNlZmZnamlnZ2xoZWdoaWhnbXJzb2tnX1lUWWJmYGJjcnVxa2ho
-ZV9bWVdVWFVVUlVVVFZXWVteXl1fXVteXWNfYGFdXFxXVVlXWFtfW1pcWVhRUVBS
-V1NdfY+anJ6foqKmoqChn5yViHJoZWRkXVxcWlpZXWBjZWhmZ2hsb3F5fICAfHt4
-c2xiXmFhYmJiY2BhYWJjZGJeVVhcXlpYWFtZWllZXVxcXl1bW1xbW1tbXFtaWlla
-WVxcXVtgYFlcWlhbXVxaW1xcXltcXV5dXVtdXF5cXmBfXltaXV1cW1xcXF5gX15f
-XF1bXF9eXFxhX15bW1lcXl1aW1xaWVhYW11gXlxdY2FcX2BfYWNgYmZnbXV9goWN
-kpeXmJqboZ+fmpmdm5udm52eoKGmqKemqKWpqquqrKuppaCgnpufoqasraqrqqmn
-o52Yk5GMiYaAgoJ+f352dHRxcHJvcHBxc3Fwb3V1dHJubHF1en6DhIOIioyMjYyN
-ioyKjZWWmZ2doaKenZyfoaKnqqurqqWenJuSi4N6dG5oZ2NgYGBgYWJlaGtrZF5a
-WldXV1lZWVhYWVZWV1hWVFNTU1RSVlVUU1NSU1hVVlZXV1hZVlhWV1dYVVRTVldU
-U1RTVldWV1VVUlVWWFhYVlVVVVRVV1hXWlpcWVlXVldVUVZaWVdXWFlYVFdYV1df
-WlpYVFNUU1RXWVdYVFNUV1dcXFhZWFJSVFZXVVlZW1tcWlRUVVhZV1hXVVdaV1hY
-VVVXWFVZWFhYWl1aWFpbXFxaWFZXWlleWl1bWlhXWFpbWVdZW1paV1laXFtZWFlZ
-V1lZW1xXWVdaWVdWWVlcW1xYV1lYWlpaWllYWFhZWFtbXF5aWlpaWFpaVVVXWV1b
-WVpbWlVVVlZYWFdaXFlYWltdXl1bWldYV1hXVlpeXF1eW1peW1pbW11dX2JdXmNj
-YmNgYmBeXl5dW1tcYWBcW1xaXmBjZneqx9La4OPm5+jq6ut9fX18fHt+fn9/e3t5
-eXx9f4GBf3x6fX1+gIB/eX18fXx8fn5+fX58eXt8e3d1enl2eXh6enl7fIN6e316
-enx6eHh2eHh3ent6fHh6e3p5eXt7e3x8fHyAfXx+e3p8gHt3dnd8fH19fHp8enp5
-e317e3x7enx9e3x7e3d8fnl5fHt4d3h2enJ0dnN1dXFxcnNub3F0dnNyc3Jxc3R5
-eXt6eHZ2d3h5d3h7d3l5gHx+e3V1e3p4eXp8eHh4d3Z0eHZ1d3t3dXRzdHN0dXdz
-cnNxcHJ0dXVubnJzcXJwbnFzb3F1cnFwb25ramlscG5ubnNybm1raWlsb29wbm9t
-b21tbm9tbW5tbW5tbWlqa2lraWhnZWRkY2lnaGZkYmJjZmhpaWhmZmlvbm1sa2pp
-a2tsbHBubWppa2ppaGJbWl5kaW1vcnVydnp4dHNzdHR2d3Z4eHd9dnV2cWtlV01K
-RkdJTE9QUVNSVFVVUlZTVVVSVFRXWFtYVlxWVVdWUVNWWVdYWFxfWmNfXmhkYmJm
-aWplZ2VpamZqaG5vb25qamtrbG1ubW1wbmtqam5wcW5paGZlZmxsb2pkZGRmaWdk
-YWNpbnBuamdgWVNTWVlVWFleY2hjWlxiYGBhYl1cXFlZXl1dW11fYGBkZGZnbWZk
-YmJkYV1aWlpWV1lXWl9fWlpbWFdZV1dVVWaCkJSZnJ+ipaSnpKOenJyTgG9rZmBg
-X2BdXl9cX2RjZGtsbXB2e39/gH17fXx2bmRhXl9iX19fYF9hXlxdXVtbX15ZXFdc
-XF1cXF5gXFpZWltcWVhYW11fW1pZW1pbXV5cW1xhYF1bWVlZWlxaWVlbW1tcWlph
-XlteXV5dXmFgXVxcXVlbXWBcXFtcXGFgXVtcW1xaWl5eWV1bXFteXFtZWlpYVlhb
-W1xdXV1dX2BgXmBfYGRobnV6e4WIjZGUm5icn56dnJudm5qbnZ2amJ2eo6SioqOo
-qamppqekpKOhn6Cgo6WoqqyqqqusqaOfmpWRjoiEhIKAgIB7e3Z1cm5ub21rbWts
-bnBxcnR1dXdzcG9xdn1/gISJkJSUko6Mj42Mj5aYm6CipaSjoJ+lpKmuq66uq6Wh
-npmTk4Z/c2piX19bWVxcXF1dYWZnZGNgXVpZVlVYWFhYV1RWV1dVVFVSUlRVVVJR
-UVFUVlZZWFhZVVdSU1hYVlVWV1VXVFVWVVVVVFZaW1ZWVVdYWVVVVFNSV1dZWldW
-VlVYWllTU1hZWFlbV1dcW1ZUVVVUWlhXWVlUVFdYVVRXU1FST1BXWFdaVlVWV1Va
-WlZWWFlZWllZWVZYV1dYWVdXV1dWVldWWFZZWVdXVVlZW11cW1tcWVlbWlZWWl1Y
-XFdXW15dWlhZWFxaWVhcWVhYWFpZWVlbWFZZXlxbXl1aWlxbXVtaWlxZWlpZWFhY
-V1ZVVVlcWVtbW1pZXFtZWVtbW1hYWlpXVlpbWFVYWFpaXF5dXVpYWVpeXVlXWFZX
-WVxZXmFbWlpeYFxeXV9eXl1eXl1eYmBhYmNgX19hX11cW11dX1tcWl5hYF9jcqfF
-0tvf4+Xn6err6nt8ent7eHl+fXx/gXx+fX2CgH1+fXp9gH58fXx9gX98fXt8gH99
-e3p4eXp5eXh4enl4enp6eXd4eXp4e3d4eXl4eHl6e3h3dnh7fXp4eHl6fX17fHx/
-fX99enx+fHt7e3l6fX1+enl6fXt6eXp9f31+gYF+e3p7fHp5d3h4enh3eXt8e3l3
-dHV1dXRydHV0cG5ycHBydXFwcnRycnV1eHt4e3h6eXt6eXh4eHl4dXR3dXh5fn58
-eHV7fHh6eXhzc3R4eHZ2dXR1c3NydnRvcHFxcHBxcG5ucHBvb21ucG9xcXZycHBt
-bG5ubm5tbW1qbnFta2xqa2lqbW9ua25sbG1tbm5sbGxrbm1saWxpZ2loaGZlZmdo
-ZWNmZWBhYmRlZGJmamhraGxra2ppb2ttbmtqaGpsZ2poZ2djYl5bXmJlaWpub3Z2
-eHZ4eHZ0dHR4d3Nzd3t4c3Nya2ZbTEJER0xJTE5SVVRXWFdVVVFOUlZVVFNXXldW
-WFdVVlRTVFhaWVhXWFpZYF1gXWJgZWlmY2NjYmdnandubmxqam5zcGxta2hmZ21w
-bG1ra29ubWpsaWtsa21paGplZmxtbWllZm1rZmdkXltWU09WWllaXF1gYFdTW2Fi
-YmJhXVpbXGNlZWJiX15hY2VmaWtoZ2JjZWZiX2BgXFpaWlpYXGFfW1xaX2BfWlRX
-bYiUk5SZnZ6ioKWlpqCem5aHenFmX15kampnZGRlaGhsbG50c3V6fn9/f4B9e3Vn
-YmNgYF1cXlxeXl5hYVxcWl1gXV5dXFtcXVtcXFtdW1hcXlxYWFpbW15bXGBbXV1c
-XFtbXVxeX19dXmBgXFxbWVteX1haYV1gXl1dXlxbXl5eXV5dXl1eYV1fYFxaWFxc
-YV1ZXFtaXFtbXVtcXFtbW1lcXFtcW11dW1tbXF5gY2BeYWFlaW92foCEi5CSlZWY
-np6dm5ibmZeWlpiXlp2fn5+jpqWjoaOnp6SmpKakoqCeoKepp6mpqq2sq6qkn56a
-lZKPiIaAgYB/fXl4enZzb3BxcnRxcG1vbmpsbnF2d3h0b29yeHuAhYyPkpaWlZaX
-lZSSk5WZnJ6ioqSlop6go6isr62rp6OgmpiTkIuBdmxiXltfX1xYVlhaXF9hZGRj
-YF5eWFhUVVlWV1dUVFdXVVZTVFRUU1RWU1VXVVZXWVRUUlVUU1VWWFdXVVZUU1dW
-V1pWVVdXV1hWVlNTVFVXVlVUUlZaV1dWVlZVU1VWVVpZV1haWVpZWFNWV1dYWVZX
-WlpXVVNUVVNUUlJUUlNWWFhaWVlYVldWV1dYVldWWFlaW1dUVFdVWFdXWFhZVldY
-V1ZXWllVVlhcWVhYVVZTVlZXV1ZWVVZYWlZXW1taWFdWVVVWWFlZWFZaXltaWVla
-WVhaW1xbXF9dWVxdW1lXWVtXWVlZWVtcXl5bVltaVlpaWFtZWlpaW1taWVZXWVxa
-VldZWVZXWFpcXl1ZWldYWllbWlpcXVlVWmBcX1xeW15gXl5dX15eYF9gXmFfX19h
-YF9cXVxbW11cXlxcXFxbW15dYmV1q8bR3N/j5ufp6uvre35/fH97d3p5enx8fXl7
-e3x8fX9+fX18fH18fn+Bfnt8fX6Af316fXt5en59fnt5eX17d3h6eHV4d3l5eHZ2
-d3h6fXt5eHl8g357fXp4enp8fHt8f3p7fXx9fXl6fH1+f4CAfHt5eXt8fHx9fX+A
-goF+fH18fXt4dHN5eHt7fHh2ent4d3h2dHRzcnJvcHFvcHN0c3R0cnRycnJ0dnN2
-ent8e3t5fHp6dnZ7e3t7eXh1eHt4fHl4d3l7eXt6enl1d3V1d3d2eHdydnZ1dnNw
-cnJxcnNzdHRycXRvdHJvcG9wcHFubG5rbGxub21ucXBwcGxqa25saWlpa2xsbG1v
-cW9wcXFsa2xsa2xtbm1qaWhpZ2dmZmViZGVmZGZkZGVkZmRmZmhqaWloaWtsaWxv
-bW1ra25vbWprZ2dlZV5fYGBkZWpucXR2ent9eXh1dXZ1c3Z6e3h6enBuY1hLRkZK
-Tk9QUlRXVVZXVVdUV1lUVVZWVFdXVFRUWVlUV1ZYWldVV1VWWFdcXlpdXmBhZGZn
-YmNlZWZkZmlrZmNiZWtrbWxuaWRpa2prb25tb2tsbG9uamdvcXVmZmxsbW1paWlm
-ZmVkYmFiZFpWVVRWW1xfYWFkYV5hY2NlZWVmY2RmZ2hnaGZjYGNra2llY2VkZWRk
-ZGVhX11hYF5eXFtcYmViX2BgX15bVVxziYuLjpqgoJ6gpKWiop+emY6AdWtjYWVq
-bG1tbGhsbm5ycXN1eHx+f3+AfoB+dGxlY2FiXl1cXF1fW15hYl9gZWJcXl5cWFpb
-XF5bW1xbWlhZWFhbWllaWl9dXl5eXVhaXV9dXV5dYGBcXGBfXltaXlxXWFtcXV1g
-XVpbW1xbX1xcYV1gXV1eYGBfX19cVVlZW15fXl1cYV1cXWBfXltaW1haWV1cXV1g
-Xl9hYWBnZGJhY21zen+EiY+Ql5mVl5mcmZWUlJWVlpSZnZ+gnqChoqKjo6Kjo6Wo
-pqakpaWknqOnqq6ppqetrKytqaefm5WSkY2LioeDhYB/eXd0eHh2cXFzdXd3c3Bu
-cXFwdXR1dHV2eXZ6fX5+goaKjpSamJeYlZKPj5GZm5qeqKqrqamnpKWoq6yqqaql
-npqblYyFfXRqYlxbXltYWVpbWlxeX2BfX2BdWllbXVpWVVRXVVVXV1dWWFdXWFVY
-VVVUVFZWV1VVVFJQU1NXWFZYVVVXVFVXVlZXV1ZWVVVVWFZXVlhXWFJUVFRWVVVX
-VFJTV1VTVlRUW1dYXFdZV1ZYV1hWVVZXWVdXVlVTU1RZW1lUVFZVVlhZWFdWVVRW
-VldWVVlXWFlYVlhXV1dVW1lWV1pXWFlZWFRVV1ZUV1lZWVdUUlZWV1hXVVJWU1RX
-WFlYV1ZcXV9ZVFRXWFpcXFtsX1dWV1hZWlpaWVlaW15cWllZWVhaXFpXVVZYW1tc
-W1pcW1lYV1ZXWFZYW1lcWllYWVVWWVlbWllaWFlYV1paXVxdWldYWFpbWllZWl1c
-XV1eXltZV1pcXVxaW19gYF5hXWBhX2FfXl9gYF1dXF1bXFteXVxeXGBdXniwx9La
-3+Pl5+nr6+p7foR9fHp5eXt6e3t7fX59eXh7eXuAfH95enp9f3x+fnp8eXt9enp3
-e39+fYF9eXl5fX17d3h5eHh4eHp6eHd3eXd3eXp8fn1+fnt5d3d4eHp7f3t7dnZ7
-eXh4e3t7e3x9fnx8fH96eX59e3h5en1/fXt7fH99e3x8end3d3l8eHh6eHl3dXZ2
-cnJ0cHBwcXF0c3NydHVycG9vcnR0d3R2eHp6eHl4en1+fHl6e317eX17eHd3d3Z4
-eXV2d3h6eHZ4dXR0eHd2dnR1dHR0dHN0dHl0c3t7enRyc3R0cnJzdXNxbm1ra25x
-dXJwbWxsbW5tbG5tb21taWlqbXJxcHRxbXBtbW1saWpmZWdra2dmZmdoZ2VkZGRj
-ZmRkZGRjZmNiaGRjY2VlZmppaGRna2tramtsbG5ubWpqZ2hlYGBeYWFkaG5ycXR0
-dnt5d3l1dHJzeXh6fH95c2tkWkxFQ0VMTU9QU1dWWVdWVVlcXVpXV1VUVFRWUlRX
-V1ZYVVlXVlRUVVhbWV1ZWV9eYWJjZmhmZGJfYWFjZ2dlZmNma2tqbGxoaWtxcGxw
-dG9wcXBtbW1scXFvZ2ptcm9ub21raWlnZ2RjZWdoZWFdWVhhZGNkY2BcXGVqaGtq
-amhlY2NlZm1tamlraGloZmlpaGprZ2RjZWVhY2FhYGFiXV9jamZiYWFeWFdTW29/
-hYyUl56goKKkpaOgnJiWkIZ5bmZiaW5sbm9sbm9zdHF0dnh8fYB9fH19fnp0a2Jg
-YmJfXl5kXl1eXl1hZWhlXlxcXFtZWlhcXVtZW1lZVlpZV1tdX1pbXF5cXFtdW1xb
-Wl1dWlxeXV1fX19dXVxZV1hcW1tbW1tdXVteXltcXV1dX2FfXVteXmBgYGBgWlda
-XV9eXVpcXF1cXWBdXF5cWVtcX1xZW1teYV5gYmFkaGxzeoOHhomMk5WVl5eXmJaT
-kZGQj5GVm56doqSin6CgoaCho6KgoaOipqeoqK2tq6urqKmmqaqoqKmqpqGblpGM
-jIqLiYeFgX55dXd0cXR1cnl4dnV3d3Jxc3Nzdnd5eH1+fHt9gIOEgoWMlJeZmJqb
-mZWWlZSYn6KjqquvrKqqpqWnqqqsraympp6blZKOgnttYltcXFtbXFpaWVleXFlb
-XFpcXl5cXFxXV1ZWVVNUVVJQVVlUVVRQU1RUVlVWVVRSUVVVUlZVVFVTVFRYVlZX
-VVVVV1RUWVhYV1hUVlVUV1VTVldVU1RaWVlYW1hVVVVaV1dUV1ZWVFhWVldXVVZa
-WFdYVVVXVFBWVVNVU1RUWFlYVldVVVhVVFRYWlZXVVVSVFhWWVlZV1tbW1tZWVxY
-VFVWVVdVVFVWXFlWWFhWVFhYVlRRVlldW1pcW1hbXl9aWFdXXFtcW3RdWFZZWlxY
-V1lZV1xeXFpcW1lYW1taWVlYVVxbWlxdW1paV1pbWVVaWVpdXlxbWVVXVFVXWlZY
-WltbWVlZWlpcXlxeXVtZWVlZWFheWFhZWVpeW1taWltbX19hXmJfX15bWltcX15c
-WllbW1lcXV5dXFxcXFxbXFtgebLH0drf4ubn6Orq6n59e3x9gYB6fHp+gIF+fXt6
-eXp9fn97e3l5e3p8fHp8fn58d3d6ent8fX58fn19fXx7e3t6eHh7e3x5enp5dXh7
-fHp8fn18e3x7eX18fnh3dHh8fnx8eHd1dnl8fX16eHx9f359en18fXt5eHl7eHp6
-enyAen2Be3p6d3Z3d3d3dnV1eHh7dXRzdHNyc3BwcG9vcHJzdHRyc3Fzc3FzdHV0
-dXd1eHh2en58enp5enp4eXd3d3d0eHp3d3d4d3d6e3t7d3h2d3Z1dnN0dHR3eHp5
-eXl0dHZ1dHh3dHV2dnN0cnJwcXFvbm5xcW5vbWtramxvbnBtbm5vbW5tbm5vbW9w
-cHBwcXFwbWtqaWxmZ2hoZmZqaGRkZmZjZmFjY2RgYWFiZWVjZWRlZGhrZ2hqaWxs
-bW1tbnBxbmhqaWdmYVxbXmRrcXV0cG9xcXR0eHh1dHR2enl1eHRxcGdcTUZHSExP
-T1FTVlVWWFlaWltaWFlYVlVSVFVUVlNUVFdVVVhYVVZYXFxZXldYWVlcYGJhYWJj
-YGBiYWVna2pnY2RpaGhoaWltdHNwam1vb3J0cXJta2ptbm1oa21qbW5wbm9xcG5t
-bGptbGhpamppaGptb25qZVhXYWpuamxtbG9ta2hlamxuc3FtbG5ubm9raWlmZWZl
-a2pjYF9hZ2diYGNrZ2RkYmBaVFFicXmGjZCWoKKhn6anqKWinZaOiH9wZmVrc25u
-cW5vdnd2d3Z3fH+CgH59f4F9eXJqYWNcXF1dX11eXl9gXV9mZ2BbW1tcW11aW1xb
-WlldWllaVlZdXFtdXFpaXFtcWlldWlpcWllaXFpcXFxcW1taWVdaVlZYWllZW11f
-WllaWlxhXlpeYWBfXV9eX19cXF9ZWlpbXmBfWlpcXl1cXmBgYF9ZW11dXVlcXF1f
-X11gZWhlb3mChIeIi4qPlZeWl5aRjIuMjIyPlJuampuhnKChoaOhnaCinp2foKWj
-pqmtrqyuraurqqmlpaSnp6SknpigmY+KhoiJi4WCf3x5eHh0dXR0dHJ2d3Z4dnRz
-dnV2eHx/e35+fH1/gIKChYePkpugmpydnZydl5aZl5+jpKqurq6rqqanqqywr6qn
-oqCblo2Khn5zbGBgYmBbW1xZWVtcWlhZWFlaWVdbW1ZVVVRUVFJRT1JSUVNTVFNS
-U1RTVFVSUlRVVlNUT1FWVVVVU1NVWVhXVFJSUVRXVlRVVldWU1VTVFZWV1NTVFdV
-WFdXVVVWVFVUVVdWWVpYWFdUVVhXWVxXWldXV1JUVldYVVNSVlVTVlZWWFlVU1VV
-VVRXVVVXVVZVV1ZWWltYWVdWV1hYV1dUVFZYWFdXVlZXVlNZWl1eWVlcXFxZWVdZ
-WlxaWlhcXVxbW1dWWVlZWVdbWllYW1pYWVZWVltXWVhcW1pbWlxaWltYWFlZWVtf
-XVpWWVhYWFpcWltaWFtaWFhWV1dWWlxaWlxXVlhYWFtdX15dWlpaWVhaWFldWVVX
-W1tgXltbWFtdX15cXV9eYF5fYF1cXl1cW15dXl1aW1xZWFlcXlteYWF5tMjS2d7i
-5ubo6ersenx5e3l5eoR+fn1+f39+fnt7eH1+fnt3eHt6ent5fH9+en19ent7e318
-fX2Bf3t6enp4eHl3enh7eHp3d3V2dnh9fX18fn57e3h5e3t7eHl5eXh3eHt9d3l3
-e3x8fHp5eXh5ent7enp5d3l8e3V0eHp9f3x3e316e3t5e3l2dnl0dXZ3eHd1c3Nz
-dHN1c3Nxb3VwdHR5cnBybXBwcnJydXV0dHZ5eHt6fHt3d3l6eHl5eXl0d3t9e3d2
-e3d2dnh5eXl3enx5e3Z1dnN1dXh3dHNyc3R0dHFwc3J1cXZ3c3BwcHJyc3Bzc29v
-b3Fub25tbm5vbGprbW9vdXZubm9xb29vcXBvb2xsbWtpZGZjZ2lmZGZlYmFmZGFh
-Y19gYmdkYWRlZGRlZWVnaGlrbW1qa2psbW9ybmxrb2ppbmlgXFteYGdub3Fwb3By
-dHR2d3l3dHZ3d3R1dXJsX1JJRUZJTk5SU1VVWF1aXFxdXVlYVlpYU1NSU1NQVFZU
-UlJXVllUVVdaW11bWFpbWVxhYGBgYWBgYWBfX2ZoZGFgYmNnY2VnZ2loZ2tmZm1t
-cXRxcm9sa2lrbm5vbW5ra2xtcHNvbW1uc3FubW1vcXNycXN0d3hzY1hga21tbXBq
-aGhnaG1tb2xubG1ubWttbmhmZ2Voa21raWReX2RpbGlkYmVoaGVjZltSVmJtdX+H
-iZOan6Ooq6iqqqSgnZGLgXZoZm1yb3FscXV1dXx9enl9f4F+fYB9fX9+cmliXltb
-XV1fYGBfX15fY2ZiXVpbXVtgXltbWlpbW1xdWVlaVVZVVVRWXV1dW1xfYGJeXVtX
-V1taXFxZWltbYV5YWFtdWl1cW19aXV5dW11dXV5fYV5iYV5dX1xfWltbXF1fX11b
-XFxcWlpdYV9dXV1dX2BgXF9gXGZjW19gYWRsbnR9gIWHjY6MjpWTlJWQkI6Ji46O
-kJKWmJubnJ6dn6Khn5yenZ2gn6OlpKiqp6mqra+xrqmqpqKioqOgn56empeUk5CN
-jYeGg4WDgoB8end3d3Vzc3Jzdnd5enx4eHp5fHx5e3p8fX5/gYSFiYyRlZibnqGi
-n5yem5mYnKKprK+wsa2tp6iprK6ttKyppaKclZCMhoB2a2ZhX19fYV1ZV1hYWlpa
-V1dYV1dZV1ZYV1dUUVRTUVJSUVJVVlJPVVJSUVRTV1ZXUlBWVFBTUlVUVVZUV1RT
-VFRUVFdWV1dWV1dWV1dXVVRUVlZWV1ZYWFdXVVRWV1dYW1lUVllZV1dXVFJWV1hY
-WVhXUU9UVVdTVFZTVVVVVFdcWFZUVFZVVlVYV1lXVmBYVlZVV1tYWlxaW1haWVtV
-VVdWWVxZWllWVVRTVVdYWFhXXFxYW1paWlhYWFhcWVpZV1hXWVpXWVpXWFlYWVlW
-VlpWWVlWV1haW1dYWllaWVlYWlxdWVpbXFlZXVtZWlpZV1lbWFtdW1xaWldYWlla
-W1pXWVxaWVlaW1xdXVpZWFdZW1tbWlhZXV5eXFxbWlteX11dYGFhY15dX15gXl9e
-XmBeXF1bW1tWWF1cX15eYXi3ytLZ3uLl5+np6+t4eHt8fnt7en1+fHx8eXx/f39/
-fn9/fHl9f357d3h4fHt7eXt8eXl6eXx8gH19fnt4eHt9fHx8e3p4e319fHx8eXt7
-fn57e3h3d3d6f398gX58fXl4dnh5enx8fH58e318e359fXh4eHp8fXx8eXd7eHt/
-gH16end4en55d3d4fH57eXZ1dHVzcnN0dXN0c3FxcHN2dXR1eHN2dHJzcnR1dHZ3
-eHl5fX18fn15dnp6e3t4dXl6e3t2eXd4enZzenh2d3l3enp6eHd4c3N1dHR0c3N2
-dnV0c3VzcXFzdHFvbW1vdG9xcXNycW9ydHFubm1scW5scW9zdG1vcXNzcW9ycG9t
-b25samxubWpqZ2hqaWZkYGRjYGBiYF9eXV5jY2ZnY2VnZGNmaGZoamxta2tqaWxu
-bGtramprbmhnZ2BdX15dXmVnbG9xcXByc3Vzdnd3dXFydHJybmVaTElISUlQUlNW
-VVdaW1taWVlYWFhaWllWUlFTUlFVVVdWVVNUVlVWVVlWWFhZX19bWl5gY2FiYWNj
-ZWFjY2JmaWtpZ2hkY2RlZ2ZoZ2xua2xucW5vbGtpam5ub2tpbWxqa3FtamlobG9z
-c29vam92eXp3dXN3e3lqX2VscXBwc29raGRna21ubmxwcXFycW5ram5uaGVna2pn
-ZmVlZWZna2diZmhobGtjV1ReZGxzfIWKjZihpKapp6qqp6Ocl42Fe2pjZ29ydXZ6
-enh7eHp7enl+fXx9f318foB3bGNgYV1fXl5fXV5eWVxkZV5cW1xdXFlcXFpZWFph
-XFlZWFpdWVhcWltaWVhbXl9cXFtcXVlZWlpbXFxeXVtdW11dXF5fW1tfX15cXl1c
-WFxbXmBfX11dXl1aXF5cWFdaWllaXlpZXF1cXVxfXl1cYGFgYGFiYF9faGNgYWZo
-bnJ4fYGBg4SKjIuQlpKOj5OOjY6QkJCPk5aanJ2goqGhnqGhnp+fpKGhpKWlp6up
-qbGurqyqraikoaGgnpycnZmbmZORkY6Li4mIhIWEf3l7eHVvcXFzc3d2dnd+fXt6
-e3t8fX5+fXx7fHx+hIeIi42QlZugpKqrqqajoZ2Zm6Klq62wsrCxraqoqKyyr62o
-pqOhmZGLhYJ8bmVfYGFfYFtYW1xcXFtbWFZZWFlaWldVV1ZSU1RSTk9VV1ZUVVRS
-UlFPTlNVVlZSUlRUU1JSVFRUVVZWWVhVUlJUVVhZV1VVVFZXWFRVVFhYVVVVVFdW
-VlVXWVdXVlZXWVpbVVVWVlNSU1NWV1VTVVRVWlZWU1dYVVRVV1dUVldZV1VWVVhZ
-V1hYWVxdWllZWVRVWlxaXVhUVVhZW1pWWVlYWFhXVlhZV1hVWlhXV1VWWlpaVlNX
-WllXV1dXVlZWVlNWWllaWVlaVldXVlhXXFpZWFZYWVlYVlVVV1pbXlxaWFpeWltd
-XFlZXFlZW1hZWFZZW1hYWVlbW1tXVFldXVpaWFhZWFpbWlxeXV5ZXF1dX1xbWltZ
-XFtbX19cXFpeXV5eYGJiYl9gXl9hYWJhXl5eWlxZXFxWVVZbW2Fkc6/H09ve4+Xn
-6Onr63p6fXx+fX1+fXl7fHx6en18fX58fXx8gH19gHl3dnZ4fnp9fXx3dXd6e359
-fHt9eXh5fXt4eHh4eHp9gH6Cf4GBfXx+e3p5dXl4eHZ7fH15eX1+f316enl6e3p5
-eHt+fHx+fX19enh3e4B/fHp7enl5fH1+fX57eHh4enx8fHl7fHt5d3d0dXV1c3Rz
-cXJwcXFwc3R0cnR3d3d1d3FydXR4end4dnl4enl7e3l8fHx6eXd6e3l8eHd3c3V4
-dnh6e3l4enl5enZ0eHh3cXJycXFxcHJ1dXJzcHBvbm9wcXRxcHRycG9vbm9rb3Nu
-cXBwcG1ubmxwbW5uc3Fyc3NzcW9wb21sbm5ubW5xb21qZ2JkY2ZmYmFhXl9hYmBi
-YWFhY2RkZWVkYWRnZGhpamlraWlqa21ra2psbWtramhlY2NdXF1dYWRqbW1vb25z
-cnJwcnZzcnBxb25lX1NLSUpJTE9OU1NRVFhaWlZWVlpZV1laXlxZVFJVVVJUVllW
-VFdVV1hXWFdWV1xdXFpcW11eYWJgZGdlZmVjYWZna3FubmNmZ2VlZGZra2pqanBt
-bmtsbW9wbmxtaGRubGtraW5wamlrbG1sa2xucXh3eXt5eHh6eGdfY2psb21xcm5q
-bW1tbm9vbHB4d3NvbWxrbHBycGtqamtqZmlraWdjZ2pnam9qaF9XXGJqcHd8hI2S
-lJ2jpaepp6OlpJ6VkIp/b2dnbXF1enp6eXZ2d3Z0dnh7fXt+gYGAfnRtZ19dXFpd
-XFxcXFpbYmVeXVtbXVtcW1teXVxbXFxZVlZaW19fW1ddXl1cWFtdXllaWFlbXlxc
-WllZXlxdXltdYF9eXV9bWVtbXFpaXF9hXFxeXFtbWlpcXlxdXltcWVpdYF9bXVxa
-Wl9eX2BdXVthYV5gX15gYWFeYGNla2x4ent/goiJhouNj5OSjYyLjo+PlJOQkZSW
-lZmdn6ChnZybnZ6foKKjpqOjoaempqqrrbGyrKuoqKeioqOgnJ2gnJuXmJOQiIiI
-iIiHhoF+fHh0b21tb3NwcnJ1eX18fYB8e319foGEgX58eneAhIWFiIuQlZuio6ap
-pqSloJyWnKOip6uzs7OytK6vrK2wsq6qp6Sfm5SQioN6cmdhaGVkXVxaWVtdXF1b
-WFpdW1xaV1ZXWVZXVVJTUFJRUlVUVFFSU1JSUlVWV1hWVVZZWFhVVVVVVVNVVlRS
-VFdWWFdVU1NVU1BSUVFVVVhUVFhXU1RXV1ZXWFdYVlhXV1lYVFNVU1JUU1NUUlJS
-VVdXV1dVV1dYVlVYV1hXWVpXWVtZW11ZVlVWWl1aWVhUUlJVVlRXWFtZWFdaXVhY
-V1taWVdZXFhWVllaWVdZWVhaXVpbWFlYVFVXWVpcVldVWFdZV1xbW1xZWFdXWlpa
-WFlYWVlYWFlaWVlYWlpcXFtYWFlbW1taWlpYV1lcWFpbWltYV1tZWlldXVldW1pc
-WltYWFdXU1hcXFtcWlhcXlxWWFtgX1xaXV5aXF9cXV1fW11gYmBjY15gX11fX2Ba
-W2BdWlpeXl1YWlleXmN0rsTR2t/j5efo6erre3x7fHt9fX59eXp4d3h5fHx7en56
-enp8fXt6e4B6enp6fH58en18e3x7e3l8fn58f3p4ent6enh4fXx8fHx/fnx9e3x8
-fHp7e3x3d3p7fX16e3t+g4B+fX17e3t0d3p9fH19fH+AeXh6e318f398eXh6eHl8
-fXx5eHl4d3p8eXh6e3l6dnZ4d3N1dHNycHF2dnRycm9vc3Z0dXN1c3V0d3d4dnd3
-dHV5d3Z7eX2AfoB8fX1+eHl3eHZ3dXV1eHl3dXh3eXd2dnd5dXZ9dXF0c3d6eXV0
-dHd2b3Bvb3J0dHl1cXFxb3Byc3Jwa2pubm1tbW1ub3Jxb29uamxycnJ1c29wbW1w
-bW9sbW9ta2lpaGRkZGVkZGJgYmJjZWJhYGFhZGVjYmRmaGdpaWdkZWhtamhqa2ps
-amlpaWxsamhmaGBaWV9eYGNoa2xvcHFycnBvc3Bvbm5uaWRYS0dKTEtJTExNUVBQ
-UllYWVxYW1tbW1laWVpaV1VTU1JSVlhVUlRZWFhVVVRXV1pbWllbXFtgX11gZmRm
-ZGJnZWdqaGxzZmhqaGlpampqbGppa29ta2xxcnFvbHF0c3Vxbmxqa21sbWtqam1v
-dHh1dXt6e3l5dnVzZV1ga3Nwb25tcHFxbGxub3J0dHZydHBrZ2lqb3BsbmdlZmxr
-bWlqaGlsb2tlaGdfWVhjaWxwdXyBiZGVnKSmpqmopqWhmpWTjIFzZmdvdHp5e3h5
-enl9enp5enp6e3+BgIB9c2hgXlpYW15dWl1aWmNmXlxZXGBbXl5aVllbWllYWllX
-V1hcW15cXF1fXVxfWllYWVpYWFhaXlxfXltZV1lbW11cXFxeYF5eXltZW1xdXmFe
-XV1bW1xbWltdW1xcW1pgXmFdXVxaWlhYXV5eX1xcXF5dX2BgXl5dZGRiYmtvdXl9
-g4aGioySj5KSjouJiIeGh4qMjI+Vl5WVmpydoJ+cmpmZmp2goaOlp6aopqipq6yv
-r7CzrKempp+fpKOfoqChnJiWkpCIi42KjYiCf314dXFubWxtbHFydXl7enp9gH19
-f3+ChIaFhoODgoCBhoaHiZKUm5yfo6Wkpaalo6Ghn6GosrOzs7GytLKyrq6trK2q
-qKSemZONiYR/eHBtam1qX15fYGFeXmBdWVtbXFtZV1lVVFRUVVlWVFZXVlNUVlNV
-VVVTUlNVVFdXWFZTVFRVU1RWV1RXVVVSV1lVVVdXWVlUVFJSV1lYWFdaVlhYVFVW
-WVdVVlNTV1VXWFVSU1ZZVllXVlVWV1RUWVhYVFRTVFZVVlBUWFVZWFlaV1ZaW1pa
-WVlYV1dVUVNWWFRTVFNXV1laW1hYV1hXVlhXV1dbW1hZWFZVUlRYXFhYWFhZWlZV
-V1RYW1hXVldWVllaWldXWlpaWVlYWlhXVlhWWFlXWFxbXVpaW1tbWVtcWVZXWFlY
-W1paWltbWltbW1tbWVpaWVhWWVpWWFxZWFpZWFlYV1paXFxdW11cW1xdW1pdWVhb
-XFtcWl5dXF1eX19eXF5fY2BeXFtaWl5dYF5gXl1ZW1tWV1peaHyvxdDa3+Pl6Onp
-6+p7fX96ent8fHp6fHp5d3l4d3l7enp4fXt7fX98fX16eHp8fXl6e318en18eXx/
-fH9+eXh3eHt4enp7e3l3e3t9fn99e3t7enx7fn98eXt9gX16fHyAfn5+e3t8fH99
-enp7foB+fnp+e3p7fH59fHt5fHl6e3t5d3h4enp6enh7e3Z4eHp5e3x3dnFzc3Jy
-dXJycnJydHNydXhzdHZ4dXh2dXFzdXh5eHp7e3t5fnx+fHt/fHp9enh3dXJ2eXl1
-d3Z3d3d3eHZ0dHd1d3t5eXVzdnh2dnRydHVycHFzdnV3dnRyb2xqbW5ucnFsamtu
-cG5sa29wc3FzcHFub3Fyc3V0b3Bxbm5ubW1ra2tqa2poaGdnZmZjYWNgXl9eX1xf
-XGBiYGBkZWppZ2hpaGlmZmlraWhpaWppZ2dlZ2hnamxqZmRhW1lbW15hZ2tsbGxt
-bmxra2ptcWhkXFBHRERERUdLTlFSTlNWVlZZXlxcW11aVlhbW1laWVhXVVRVV1tV
-UVNVVFNUU1RXWVpdWltYWl5fXmBiY2FiY2JjZWloZWRmam9wamhqbG9uamtqa2tt
-cHVzcnBycHNxbW5tcG1qaWxub3BzcnR9fHt6fYJ9eXt6eHRoXV9pdnl5c3VxdG5r
-bW1zdHV2d3Jyb29sbnBzdnt1b25ubmxrZ2RnZ21vaWtiYFhYY2psampueYSJjpie
-o6KkpqemoqCgnJSLg3trZ2tzeXx+enl6e3x7fIKCgX5/gICAfX1zZl1cX15bXVpc
-WVphZ2FbWVtZXGBbWllaVlpaWllZWllbXVxdW1tbW11aX15jXVtaWFpaW1tgX15d
-XF9fW1laXl9gYGFfYGFhXFxgYWBgYF9eYWBdW1xaXF9eX15hX2BiX11dXF1dXF5c
-W1xeXV1aXl9cXF5dXF1hZGVnb3N4fIOFhomIh4uPj46Mh4eFh4WHjJCQk5eUl5mb
-oKCem5uZmJyfoaSjpaeoqaiprq2vsLGxr62qpaWnq6WlpKOgoJ+cl5WSj42OiIeI
-hoN/e3Z1cnJwc25vbnF1eHl5e36Cgn9+g4KDiIqIiImFiIiFhYmKj5KTl5mfoaKj
-p6iop6ilo6aprrW0tLSzsrGsra+usrGtqaWhmpeQiIKAfnRvbm5uZmVnZmNiYWNd
-XFxdXFhVUVZVU1lYWlpWU1NWVV1aUlNXU1VTUlJTVFVXV1dXVlZVVFVWV1VXV1dT
-VlVWVlZbWFpYVFRTWVxWWldRVVVYVVhZVlVZVVZXVVZVV1ZVVlhXV1VXVldaVllX
-V1VXVVRSUVFUVVNUVVdYXFpZWFpaWVdZVllVVFJVVFVWV1ZaWldXV1dYWllZU1JV
-VFVVWF5YWFtcWFVQU1RWWVhVVVdVWVhZWVdaWVZYWVdXWFhWWVdYWFlZV1VWV1hX
-WFdWVldYWltZW11bWFVVWFxbWFVYWFdVV1dYWFhZWFZXWVxeW1tZWlxbWllWWllb
-XFtbXFlWV1laXV1fXl5eXF1cV1hcXFxeXl1dXF1cXF1fYGBeYWNgXVxaW1pbX11e
-WltcXmBeX1hXXFtieavH0Nrf4+Xm6evr631/eXx9f397ent5e35/e3p3eXt8enx+
-fn5+f3x7fHl7fX1+e3p+fnt4e3p5eX18fXx8fH58d3h5eHl7eHZ2eHZ4enx8eXmA
-fX17fn55e319fHx/fn18fH5+fXt7e31+fXp8fH2Bfnx7fIB9fXt6fHt8e3l8e3p6
-d3d5enp7e3l5eHd4e3l5enh2dXN0c3BzcXJ0c3NzdHJxcHR0cnV3d3d2dnd6ent5
-enl6eXl6eHl8eX15d3p7dXZ4dXV4eHd4d3h4dnR3d3RycnV1d3Z4dHZ1dHZ5dXZz
-cXJ1dnVzc3N3c3Bwb3BwcWtubWtwcXBwb2xsbmxrcnBxcXNub3Bxc3Nxb3Jwbm5t
-bGtrbWxsamlnZ2VlZWFhYl9eXV9fXl9iYWJiZmRjZmdna2poaWdpamxpaG9qaGps
-bWxraGhpa2prbGReXFxdWlxhYWJkZGhnZWRnZmRkZV5TSUdHRUZKSkxLTFJQVFZW
-VldXW1teW1hZWFdXV1xbV1VUUlZWWFdVU1RTVFVXVldYVlxcWFhYWVpcXl5hXlpg
-ZmRlZ2ViYGhrbXBra2lsaGxmaWlobW9wcnN0cW9wcG5pZmhscnR0cnFzdnh4fH59
-fHx/g4WFg35+enBgX2ZxeHZ1dHV0cXN0c3V2eHp0c3V1dHV2c3R6enVycG5tb21p
-a2xnbm9qZmBWUl5kam9wbnJ5gIiPmJyfoaWmpqGjop+flI+HgHNlZ3B2ent6enx+
-f4J/gYSAgoOEhYOCfXhpY15eX15cXFpaXGRhWlhYWFpcXVtaWFpaWlxYXFtZW1hY
-XlxbW1xfXFtcWl1aWlxaWVtfYF1fXl1bWFhbXFpcXFxeX15fX15eX19gYV9eX15h
-X2BcWltgX15fXWBjY2BeXl1fX2FgXl1cXFxbWV1eX2BfXlxfYWFkaGxudnp7fYKD
-h4WCiYuKh4aHgYOFiImLjoyOk5SSkZSUlZiVkpOWmZ2foqWnp6eoq62xsLGurayr
-q6qpo6apqaWjo6GenJmZlI+PjomJioSFf319eHd1c3JwcnJycHZ3eHx/gYCDhoWB
-g4iFhoiPjY2OjI2Pj4yOi5OXk5maoKapp6uqqKilpKmtsLK1t7a4uLWxsK2rr7Ky
-rKehnpaLhoN+d3Rva21pZF9fXF1gXV1dXF5eXVhWVldWVlhWVVRYVFVWV1RUVlVU
-VVVTVFZWVlhYWFxWU1ZXVlZWV1dWVVZWVlhYWlRVV1NXV1dWVlZdVFRVVldbWFdW
-VFNWVVdWVFVXWFpXV1laVVdYWlVTU1JWVlhXWVRTVVZYVFJVWFdXV1dZWFdXWFla
-WVZWWFpZWFRcXVxcYFlcWVhaWFRUV1ZXVVZXWVhYVldZXFxYWFhXVVRZWllaWlhZ
-VVdYWVhYV1dYWl5ZXFhaW1hYWFhaV1lcW1hZWFhaWVhYVVZYWVZZWFhYWltXWFta
-WVpXV1VYW1xaW1tZWVlcWlxZWVlbXlxbWllaWlxbWlhaWltaWltZWVxbWVxjXVta
-W1tfX11dXVxfYWJfXVxfXF1dX19fXlxZWFpeX2FcX19dXGBzr8nT2t/j5efp6erq
-fn58en9/gH18fHt8foB/f3l3enl8enp7f39+fHx8fHt7fH6Bfn18fn17fX15end6
-ent6eHd4dXZzeHh4eXt6fHp8eHh6enp7enx9f3x6e317enx9fn58fn1+fXx+fn5+
-fXt7fHt7ent/e317fHl7en58fXl7fHl4e359fH58d3p8eXp4e3t7d3d2dXRyc3Ny
-c3V1dXVzc3FxcnJ0c3Z6fXh5eHp7enp7eXh7eHp5eXl7eXt7eHh6fXd5e3d3eHV4
-c3V4eHV3dXV1cHN0dnZ1dHJzdHd4d3Rydnd4dXJxcXNxc3Ryc3Bva21rbW5tbm1t
-cG9ucXJwcHNyb3BzcXNwcHBwcHJubm5ubmpra2lnZ2dmZ2ZiYGVjYGBdW1teXmBh
-YGFiZmVnZGVmamhpbGxqaWxqbm1pa2tqa2dmZ2dlZmhqZ2VeXFtaWlxcXF5eX2Jo
-ZGJhYF1ZUElGQEFESE1QTUxNUVJTVVpaV1dZW1lbWlhWVlZYWl1bVllSUVVXWlRT
-VlZXV1lYVVVVVltaVldXW15cXl5iYWJiYWNiYWRpa2poamprZ2dqZWlpY2htcm9s
-bXN0cXBtamdmaG10eXh3dXFyd3p8fHx+foKGhoeHhoKBeGdeZW9ydXR3dXRwcXV5
-eXV8enZ1dXd3eXZ1d3h3dHJyc3BtamxmaW1pamhjXFVdYWtxc3Bvdnp8gYmSl5uh
-p6eop6WknZmXkIqEem5pb3p+fYB9fn+DgIGBgYB/hYaDgoSCdWhiYGBfX11dYGdl
-YV1eX2NdXFtZWVlcWllYV1lZWl5eXV1ZXmFcWV1cWltaW11cXVtbWFhdX2BcWFZX
-WFhYW1taWFlZW1hbXWBhYVxZXFtcYV5gX1pYWVtcXFtaXV5hYGNhXl1fXmFiXVpa
-WVxaXF1fYGBgYF5gZmptb3J1dnt8f4SFhYSFhoaBg4GDhIKBhoWGiYyPk5GRlJCS
-lZaYl5uam6CipKeoqq2tq6uqqqmsqaqop6empaSnp6anpqGclpWSi4uKh4qLiIOA
-enh4dnZ2dHV1cm5vcXN2eX2BhYOCgoSEhomIiomMi46RjoyMjIuOjo6Sl5ygpaio
-r7Gtq7CsqaqurrS3t7m5urazsrGvr6+xsqijm5WOh4F8eXBsaWZgXF5eWVtcX1pa
-WWBfWldTVFVWVVZVV1ZVVlZVVFZVV1xXVFNXVlRSUlRVU1RWVFRWWFdWVlJTVFNT
-U1NTVFNZWFhZVlRUV1dVWFpWVlZVWFhVVVNVVldWVVZXWldWVlRTVFFSVVdVU1NV
-V1hYWFZYV1RTVFZZU1VUVFdXWVpZVlhYVVdZW1tZWFlbWFddW11aWlhXWllWWVpZ
-WldWW1tXV1hbWVhaVVRYV1lYWFZXVVZYWVxgXVdbXFpdWlhXW1lYWVlWV1ldXVtW
-VllaW1dXW1hUVldXV1ZZWVZaXVlWWllZWFpZV1haWlhZWFVWWFhXVldbW15iYF1b
-WVpaWFtbWlpeXF1ZWl1cXVxbWV1bXVtbW11dX15hYWBiX1xbXF5fX19gYV9cXV5b
-XFtdWFldXF1dYnSyytPa3uLl5+np6ut8fH17fH1/f317e3x4en9/f4B9fH+Bfn16
-eHp8fXt+fn5+gH+Afnx8e3x6e3h6enZ5eXt7d3d2d3d3eXZ1eHh4eXd5enx6eXx9
-fn99fHp7fHl8fn95d3t6e316fHt8fX1+en19eXp8fHx7e3t7e3x7fXx+e3p9fX99
-fXp4eXt7e3h7e3p6eXh1dHNycXNyb21ucXN1cnN2dHNxdHd5eHh3eHt9fX18fHt5
-eXp5enx6e3t5enp7enh4dnt5d3d4d3V4eHh4d3l1dHNzcnJzdXR2dHJ2eXhycnN1
-eHZzc3FxcXF0cnFxcW9ubXFxbm5sa2pvbm5ucW5vcG1vcHFxcHBub3FtbnFwb29t
-aWhqaWdnZWVlZGJiYmFfX19eXlxaXl1aXGBiY2JjYWNjZ2VnaWloam1uamdnamts
-aGppaGhpbG1pamVgYF1fW1peW1xbYF1cXVpWWFFJRUFBQUdITlBNTE5RU1RVVFVV
-VldXWFpYVlZUWVhXXVpcV1RWWFlVVVZXWFNWWFhVVFVWWVpYVltbWFldYGRjYWNp
-Z2ZlaGlpZmloZmdqbWpnaGdqcHJtcHFxcnNwcXNvamtscnZ0dXdyc3Z5eXZ3e39+
-gYWGg4eIhYSBdmNdZ3R3dnRzcXJ0dXh5eXx8enZ2eHh7end2d3dzdHJzcWtsbGpo
-cHBuaGNbY2Rnb29ycnJ1fn6DjZCTm6esqqinpqaenZqVioaEdmxqb3l+f359fH+B
-gIGAfn2BgoKBgXtvZWBeW11dX2p3dWhjXllbWVdaV1VYWltbW1xaWFtaV1paWlhZ
-WlxbW1pbXFtbW11dXF1eWlhdW1xdW1haWlxbWlxcWlxaXFpcW1lYW11aXF9cW15d
-XV1cX2FfXV9fX11dXl1fXl9hX2NeXltcWl5fXmFgX2FeYGRnbW50dnh6e3yAgYSF
-goKBf3+BgIKDhoSDhoeNkJCVk5aUkJGTm52foKCfoqOlpqWmqKmoqaemrKqnpqao
-qqajp6alpaamoZubk5KPi4eHiYeHg3x5eHd1dnh3d3R0dXNydHV4eoCFiIWFiIeH
-i5COi42PkJCPjY2Pj42Mio2Rlpujqamrrq6rsK6sq62xs7S2t7e4t7W1t7S1sbCx
-rKSfmpSNhYF8dG5rZ2JeXVxfWlxbV1lZW11dWVlXVlJQU1VUVlhXVFlbVlpVVFVX
-WFlcWFZaVlhUVFRVV1hZWFhXVVRTVFdUUlZWVVZYV1ZVV1VWW1VUVVZTUlRXV1VU
-VFVTVVdUV1lXVlhXV1ZUWFRWU1RWVlpWU1ZYWFxWU1VVVVVVVFhYWFZYWFdYWVVX
-WFlYWFhZWFdZV1dZWFVSU1RVWVZWW1tZV1lVVlpWV1tcWltZWlxfXVVVVlhcWFlc
-W1daWllbXV1ZWltaWVpaWFdZWFlcWVhZWVxbWltaVldXV1dbWVlaWFdXWFtbXV5Z
-VFdYWlhYW1xeXFtXVFlaWltbWFxeXV1dXlxcW1dZWVlZWltaWVtcXGBhX11eX1xa
-XVtdXl5cW19dXV5dXF1eX15fXWBeWlxcWlpbXl1eWl5hda/I0dnf4uXm6Onq7Hd5
-e3x6fX58e3p2eXx8e3uDfnp+fHl7eXx+e3l5eXt+fnt8fHx8fXx6e3x6fX18eHl6
-enx7eXd2ent4d3V3dnZ5d3d1eXp5e3x/fn18fHp5gX57eXp4eXp7e3t8fnx8fHt8
-fHp6eX5+enx+fn6Bf355eH2Bf317fHt7enp5fX97fHp4enh5eXt4dXd0c3R1cW5w
-dHJvdHZ4dnd4d3h3c3NzeXt6eXd5fH97d3l+fH17eXp7fHx5eHZzeHV3eHl7eHdy
-dXR2dnd3dnl3dXZ1dnl2dnZ4dXJydHJycXF0c29vcXFwcXJucG5ub21wb21rbXBz
-cG5ubmttcHBwbW9tb3BxdHJybm5wamtra2pnZGRlZmRiYWJfXl9iYmVhYV9cWl1d
-YGBhY2NhYmVmZmdmZmtta2psa21sbWxvampqaWlqaWtsZmdiYGBeWlxeXl1dXFha
-VUxOTElHRERGR0pLTE9RUE9RU1FUVllYW11bWFlYWVlXWFdYWFZVVldYWldVVVhX
-VlRXVFNWV1tbWlpZWlpbXF1cYWBhZmZkYmZpZ2FhZGRkaGloaG5ubW5ubmxsbXBx
-b29ubnBta3BycW5vcHF4dXZ8fX17e3l9g4aIhoSFiYqBbmBkcXV9eXR1dXp8eHp9
-fX1+fnt6fH17eHR4enh0cnNvbG1saWRkamliYGttbG9xdnNycHN5e4OIj5Gboqes
-rKutrKSinZaNhoN/c2xqcHd+f319gYOCgH56foCBgYB/eW5iXl9hX2t1eG5iXl5a
-W1tXXFlaW1xbW1haXFxbWFlZXF5cW1pbWllYWlxcWltdXl9iX15gXFxcWFlZWV1e
-XVxdXl1bXmBcW19cW1tcXVtcX15cWlteXVxeYGFeX19hYmBhZGJfXFxcXltaW1te
-YmJgYGtdX2JiZmpucXZ4eX2BgH9/goN+e3h3f4CCgICHhIWJj4+Tj46Mj5CQkZWZ
-mJycn6Gkp6impqurqqmoqauqpqWrqaqopKWlpaWlpqKcmpWRkIyHhYWDgYSAfHd4
-dHZ1eXR0eXh3eXh2dXR4f4SGiYqKi42NkJOUkZOTk5ORjpCRkI+Qj5GSl56kqKip
-q6ywr7GvrbC1s7S7ubizt7m7ubeysrCvrqafmpSJhIF7c2poaGRiXV1fW1hWVVda
-Xl9aV1hWVlRSUlJVU1JRUlZWVVdZVlVYWFZZWFZUVVpWUVdYV1dbWllZWVZUV1ZW
-V1RVWVxbVlhaXFhdXFVUV1dUVVZYWFVSVFlYVFRVVldTVldVVFhXVVVWVVVbaVpW
-VVdWVlNTVlVVUlZYWFRUVFNVVlhXWlhaV1VYWllXVldXWFdVVlZUUlRZWFVWV1la
-V1dVVVRVV1taWlpaW1xZWlhXVlNXV1NVWVdYWllaW15dWltbWVlbW1lXVlZVWVlY
-WlpWWFlcW1lYVllXWllbXFdXV1paWVlYV1dZV1RXVltdW11bV1hYVlxeXFtbWFdZ
-XV1bWVpeXVpaW11ZW11hYWBeW1lcXVxdXl5fWVtgXmBeXV5dW1xfX2BbXl5aWltd
-XFhXWVxbXmF0qcfQ2d7j5Obp6evre3x+fnx8enx6fX18e358fn57enh7e3x7eX17
-fXl6enx6eHV3ent8ent6e3p6e3p5dnh6d3l7eHZ3d3p7e3t3eHp5d3V4eXd4d3p+
-fX57eXp7fX19fXx6eXp9f319fH58fXx4enp7fX9+fX58fnx8d2h1fHp6fH98end3
-e3p3ent3eHV1dHp5d3h1dnVzdXRxb3FzdHV1c3Z7eXVzeHZ4eHl6fXl5eHp5eXp6
-eXt8enp4eXt7eHl7d3h5eHl8fHp4dnV6d3V4eHZ2eXV1dHN2dXV0dXZydHVxcHFy
-c3JxcHJuam9wb2xvb2tsbnFvbGxtcHByb29samx0dHFubGtubnFwb3BvbW1tb25r
-a2loZGRkYWNlYmNfXV1gX15gYF1eXVxdXmBjZWZiYGFlY2RmZmxra2xqZ2ltamlo
-Z2ZqbGtqamlsZ2NlZGJhXlxaWVdWVlBMS0VJSEZJS0ZGSk5SUFJVU1BOVFRVWFlY
-XFtZXVxcWllYV1ZWVVdYVlNVVFNUU1VUVVVVVVZaWFlbW1xbX19ZXF1bW2BoYWhk
-Z2VmZWViYWRlZmRnaWpqa25ubG5raGxuam1wcnNwb21ra29wcXNxcXeAgX57d3l/
-hYmHh4qNjIZ9bWRpc3d4dHR5foKDgX9+foCCfn6Af3p4dHV0dXRydHVybGlqamdi
-X2NvcnB2eHRxcnBydXt/fYKNk5ahqqusrKiqqaOdlo6Ggn56cWpsb3d4fYGBfnx8
-foCCgHx5e3h4cGdgYGyAfnNmX11fXVlYW19bWV1gXl9aWVdcX11XWF1cXl9dXF1c
-XlxcWFZVWV1bXFxaWV9bW1xcWlpcWlteXFtdX11mY19bXGJfWltdW1pdXFxdWl1e
-YF5bXV1eXV1cXGBiYmJdXV9fYF9fXl9gYF9cZltcYWRnamxwenyAfnx8fHp+fHt7
-d3t7foCAfH2Dio6QlJOSkI+NkZKUmZqcn6GhpaOmpqirrauqqqmnpKqtq6irqaWn
-pqenpaWinZ6el5GMi4yGg4SAf397eHVzc3N0dHR3d3h3dnd4enh5fIOIj5GRkZCO
-kpaXlpSTlpSSkZGSjo6QkpWYmZyjpaaprq6ytbSysrGwsrS5t7e6urm3tre0sq+x
-rqqjmJKIgX54b2tpZGNjYVxZV1dVWVxfXVpXV1VUVFNUU1FRVVdVVFRSU1RWVldX
-VlZYWVZSV1JWVlVWV1VWVFRWVVZYWllXVlVYWlpaWVhXWVVcVVZYV1VVU1VTVFVU
-VlVVV1hTVFRTVVRVU1daVlRXV1tqX1VVWFZUU1dUVVNTVlZWUlJTVlNUVlVcWVtc
-WlZUXVhTVVhXWFdVVlZXXFdXV1tZWVhWWVlbWFdZWlhXWFhaWF1bX15ZWFhZXFha
-XFlZWVhZXFlYWFZWVllbXFtYV1ZUWFhYWVhYWVpZVltbWlpZWVlaWlhYWFVUV1hZ
-WFpaWFpZV1taW1lYWFxaXVpaW11eWlZcXl9dXGBhXVpZXGBfXF1dXmFeXVxfXFxf
-XV1fXV1fYGBeXV5ZXF5dXl5cX15aWFhZWlhZXF5eYXeuxdHZ3uLl5unp6+p7fn59
-eXl7enp9gH17enh8fnt8e3t8e3p9gXt7e3x8enx8dnh4eXt9e3p7fHh6enl5eXx6
-e3h5end6f359e3p4dXp7eXd6eHd1eHp9fHp5eHh7fX5+fX18fH9+fnt4eX2Af358
-fX5+fn99e3p9fH1+fHx5d3d7fXl3dnt7fH56enx3eXh3dnh3dnR0dHFwcHFzdHRz
-c3BxcXV3enh6eXh4e3h2ent4d3d6eHyAfn99fHp4eHp7fHl4eX17en1+enp7fXt6
-eHd5eXd3d3h1d3d2c3N1cXJ0dHdycXRwbnBycG9ub3BtbG1wb25vbmxua25wcHBt
-am5vbXFxbnFxcG9wdHJxcG9vcW9tbGlqaWtpZGZmZWVhZGJiX11eXl5dXl9fX15h
-ZWZmZ2hlZWdkZmVoaGpra2hla21raGhpamlpbGtoa2hoaGhnZmFgW1lWUVFST0lJ
-SklGRUhOTU1OTk9QU1RWU1RTVlZWWFZZXl1bXl1ZWVlWVVhXVVVWUlFSUFJTUlJS
-UlZVU1ZYWFlZW1pcXl1hZmFlXl9gYGJlZWVkZWFiYmZna25nZmtpaG1xcWpkZmps
-bW9wcXFwa2doaHFwc3Rzd3iAfn59e4KFiYqGhYeJiIJ5amVoc3h6en+AgYB/gH1+
-g4eFhH99e3l3cnNyc3R2d3BpY2ZpZV9gb3JydXt3dHFxcXJzeH+BhIyRlqGsr6uo
-p6KjoJmVjYaCgX96dXBvbnN2fnx5eXx9goF8end1dnVta3F5f35vZF1cXV1fXVxZ
-WV5bXFxcXF5eWVZcXl5bW1pcYF5bW11fW1tYWVpaV1paWVlbXFhZWVlZW11bXV5c
-W1xeYGBcXV1cXl5hXV5bWlpfXl5bXVxfXFxeX2FfX15eYGFgX2RgYGFhYV9dXl9f
-XlxeXmBjZGhtcnJ1en2Ae3x5d3d3e3p9fnt+foCDiI2SlJWWk5SVk5GVmpmbm56i
-paampaanp6mrqq2qqaypq6yqq6uqqKikpaiko6Ccm5uWj4+OjIeEgoKBgHt5eHh0
-dXV2dHl4eXl5ent7fH5+hIeNjJCUlJWZl5iZl5eZmZiWlpKUjo2Ql5manZ+hpqqu
-sLGzsLK0tLK2tLi0t7y8u7u4uLm2tK+qr6qimpKGgHx4cmlkX19eXF1YVldaXF1d
-WldUVFRTVFZUVFdTUlNTVlNUVVhWVVVVVFVVU1RVVFNWVFZWVVNSVVdaV1dVWVhb
-V1lXU1ZWVVNTWlhWV1NTVlZWVVlYVlNVVFVSVVlYV1RSU1RUVFVYWFtXV1tXVFRU
-VFNUVlNTVFJVVlZaWFlXVVdYWlhZV1hYWFhXVlRSVVdWV1ZWW1tYWFpaWVlXV1VV
-VVlbWVhXWllYV1ddXlxaW1pbXFlbW1haWlpWVlZYW1tYWlhXWFxbWFRVWFteX1pb
-XVpZV1lZXVtcW1xcXVxaV1hXV1dXVVRWV1paWVlXVVZZXVpXWFhYV1paWV1eXVxb
-WlxaXF1dWltbXl1bXWNdXF5iYF5fX2BgXl5hYF1dYF9eYWBdXl1dX11cXl9eXFxZ
-VldcY2Jmd7HF0tjf4uXn6Orq6n18fHl6enl5fHl9fn18eHl7fHl+f398e31/fXt8
-fHt8fXt2dnd5fX58enl6eXl5eHl3eHt3d3Z3d3Z3eHx8e3p7enp8e3h4eHd4eXp5
-e3p7fXp5eXp+f3p6fn17e3p7fH9+fHt6f395eHd7fHx9fX18fXt+fnl5eXt8e3x9
-fXx7enp5fnh2d3V1dHh2d3h1dXd3dnZ2dHFwcHR1eXh3eHp1dnh4fnt5enx+fIF9
-en5+fXl4e35+fXp8d3t9e3p7eHt6eXl6d3Z4d3V1c3V3dHV2dHRzcnZ1cHFyb29u
-cHBvcG5sbm1qaW5ubWxtbW5vcHJxbW5wb29vcXFxcXNycHFzc3RzcW1ucGttbWps
-Z2dkZWVkZWNjZWRjYl5cW11gYGBhZGJiZmdmamdmaWhmZ2ZnaWdoaW1qbGpvbmpo
-amppaWhpaWhpZmNfXFxbVlBPUE1MTEhJSUlJSU1NSk1QUU9RUVJTVFNXV1hZWV1e
-XFteXFpZWllXVllXWVhTT1JTUVRVUE5RVFJRVFhbWltYWFpcW11bWl1gXmBgZWNl
-YWJmZ2VkZWRlZ2dlZWdmaGtra2Rka3Jsa2psbG9sbGxrcXN4e3Z2dnt+fXt/gYaI
-iIeDhoqHg314bmlqc3l8fIF/f4J/gICFhoeDf396dHJucG9vdHV5cmpkYF5fZG1t
-b3JzcHFtbHR0c3Z6fYGHi5GZpaqsraqlpqGhoJWNiIN+gIKBfHhxb21ubG5vc3N2
-dnR1cW9tcHV9fHRqYl9cW1lZWVtdW1pbWlpbWlpaXFxeXFpaWltdW1taXF1eXFla
-WFlaWltcWVtbWVpYWlpbW1xeYWBdWVxZV1lcXWBeXV1dXWBeXF9dXFxdX1xbXl1e
-XV1eX19gX2FiYGFdX11fX2BfXl1bW1lcWl1hX2JlbG9zeHR3fX99e3d2c3R3d3t6
-eXt7fYKIjJGXlJGQkpOWlJSWm5yen5+jpKippaanqamqqqmopqmoqKmpqaupqauo
-p6ejoaGcmJeTkJOQioiGhIWGf397dnN0eHV3enp4enh5fHh3en2BjY6QkpaYmJeX
-mZydn5ucmpmXk5ORjo2Qk5qdnqSmp6yvrrKytbe1tLS2sK+ytbi3vLq9vLq7t7Ow
-rKegmY+GgHp1b2lhXFtbX15bWllZXV5dWVRYWldTVVRVVVZTUFNVW1ZVVFhYVFRU
-VVVUV1dVWFVWVFRTV1hZV1ZVVVdWU1ZYVVNVWFdSUlNUU1RVVlRVVlNTUlNTU1BR
-UlJWWFdXVlRVVFVWVldaVlZTU1VTU1VUUlJVUVFUT1RWV1xcWlZUVlNWWVhUVVZW
-VVZYVlVUVVdZWVhYWVtbV1laW1pZVVdWVllYWFtbWFlWV11cWVdWWFtbXFlWV1dZ
-WltZW1lZXFhYWFhXWVtXV1lbXVtaWllZWFlZV1taWVtcWlhcWlpZWFlaWlhcWltZ
-WVpbW1taWFZXW1xbV1haW1xZXVxdW1pbV1pbX1tbW1xbWV5cYGBcWl5fXWBhYGBf
-Xl5gYV9fYGFiY1xfX1xdXV9gX1xZWVpZWlxbWmZ3scjT29/i5ejp6erqenx+fn17
-eHZ2eHp8e3p7dnd8fHt6e3t7e3x7eXh8gXp8eXl3enx+fX5+eXx5eX18eXh3eHZ2
-dHV4dXV3e3t8fn19e317fXp5e3l7fHl3en19e3x5eXt9fXx7enp7fXx8gH96fX98
-fH17fHx+fH16enp8fHx6enp7ent6fX59enR5fnl4end5eHR3c3d3dnV0eHh0d3Z2
-dHFzc3Fyc3J4eXt9fnx7e3t8fnx8enx7en5+eXl4e3t9fHl5fHh5eHl6eXh4eXt7
-d3R0d3Z3dHJzdnh4dHR0d3VzcXBva2xub29tbWxtbXBubG9ubGxtcXBub3BwcG1t
-bm1ubnFwc3BtbG1vdHFxdHFybWtoaGhoaWdjYmRlZGFhY2JgX11dXl5fX2BfYl9j
-Y2dnaGdnZmdoZmhmZWZoaWxnZmdpaW1qaGlrZmVoaGVmZGRfX1tXVVNSVE1HR0ZH
-RkRJSkxLTU5QU1BQUFFVVlVWWFtaXVpXWFldXlxZVlhZV1hXWVZSU1VUU1FUU1VV
-VVRVWVhWVldXWl1dXF1ZXF9hZGViYGFmZ2hoaGhpZ2ZmamViYWZrbWtsamVpbm1o
-ZGdsbGxvbW1xdHd5dHR0d3x+f4CBgoiJi4mJioiIgoF4bGlocH59gIWHh4GAg4F/
-f356eHd2c3BubWtwdHRvaWJgZWlnbG9xcHRxbWttcnR2e3p9hYmMk56hpampqamo
-o5+dlpGIhoaHh4SAfnt4dXJubG1pbGxqbXB1en17dHBnYWBcXFxfXlpbXltaXF5d
-XFpYW1xdXF1aW1tYWFdWWVlaV1ZXV1RYW15eXlxcW11ZWVpaW11aXV1cWl1cWFxf
-X15cWVpcXF5bXltaYGBfXF1fXF5gXF9hX11cW1xeXF9fX19dYGFgXl9cWl9eXlte
-ZGVoZ2xrb3Jzc3V8e3d3d3Z1enp1d3NyeHd9goyNlpeRj46Ql5eYl5mbmp6hoKOe
-oKakpqemq6inpqKlqqempqaor6irraWipKKfnZuYmJaVj4yMiIiJhoJ8eHl2dXJx
-cHV3eHl5en58gIWHhoiKio6Sl5mZlZWWmZyfnp+fnp2XlJWWlZCQlJufo6WnpKas
-sbS0t7e2sa+ytLW4uru5vcDAwL26tLCurKafl4+Ef3t1b2hiXl1cWlpaXV5dXFtZ
-WFdWUlJVVFVWVVRXUlBTVVRUVVVaWFZTUldWV1tXVlZVU1VVV1hYW1tYW1hYVlVY
-WFhWWVhWWFVSVVVXV1RTVlNVVlRTUVBYWVNVVFdVVllZWFVRUlJUVlNSVlVUVVVU
-U1VVUlFSVFZVV1pVVlVXVlVUWVxUVlRVV1ZXX1VWWVtZWltXWFVWWFdYWVhXVVZY
-VllaXFlXWVpZWFlaW1pXWVpcXFlYW1lXXV5cWlpYVVdZW1tYW1pbWlxbW1paWllY
-WFdZXF1YWlhbXFhZXFtbXVxbXFlZWVpZWVlbW1pZVVZeWVpZWlhXWFpbXVtcX11d
-XFpbXV1eX15fXV5fX2BdX2BfXl5gYF9gXV5iYV5eYWJhXlxbXl9hX15dXGBdX15b
-XVtcYHeuyNPa4OLl6Onq6+t7fX6Afnp9fnx8fnt5d3l5fHx8fXt5e3x7eHt8enl9
-gH99enl3fnp5enp6e3l7d3p5eHZ3dnV0c3h5enp4eHp7fH16eX18en16e3t9end7
-fn99fHl4enp9fn19eXyAgH58f398fH59fX19fnp9fnx7fHx8fHt5enp4ent+fHp5
-eHd6enp6fHh4eHNyb3FydHVzdXNycXV3dnV0c3R3d3l4d3p+fHp8fHp9fn59fH9/
-fn16eHV7e3t6fHp8enp5eXV5d3p4d3t4eHl3dHV0cnN1dXV1cXVyc3Jxb29sbnBv
-cG1wc29xcnJtbm1samdtcW9ub29wcG5sa2xwcnJvcXJucG5xcnJycHBxcGpoampn
-ZWdkYl9fX2BhYmJgYFtfYV1bXl9fYGBiYWZnZGdoZ2hmZ2hoaGtpaGlnY2drbGhm
-a2tpaGtqZ2djZ2ZgW1hYUlBMTElGREJFREZGR0pLTE9QTk9RVFVXVVZXVllYV1dX
-WVxbW1dVWVZVWFdXVVhUVFZPUFFRUVJZUVRWV1ZTVVZXXF5fXV1cXV9hYmJfX2Ji
-ZGRkZ2ZnaWVlZGNmZ2tubW1sa2lqampoam9wbmtucnV3fHp4eHN1gISBhYeIh4iM
-jIuMiIeKhYN6b2hlanN3e3+DiYWEgn59fHd2eHpzbmtsbG5ubGRjZWhpY2ZscHR1
-dHNvb2pvenp+fX6HjoyOlJyjp6qsqqSin5yWkoqGhYWAg4KHiIaAfHx/end1dHd5
-ent5eG1lYV5fXVxcXV9fXV5eXl5eXV1fWFxdXVlbWFpaV1laWFpXWltXVlVXV1ha
-WVZYW1tZW1tdXFxcWltXV1daWldcXVxfXVhbW1pcXV5cX15jYmBhX2BgXVtdXlxf
-YlxeXF1lXl9dY2FhYGFgXV1bW1ldXmFjZ2doam1sbXF2dXh4d3l3dXRzcnFycXN1
-eoWHi46PjoyQkZOWmJqbnJ2eoqKjoqKipaSlpaapqKamp6eoqqqvr62sqqqtpKGj
-oJ6Zl5aXlJeQj4+Pi4aAf316dnNwb3Fzdnh9fXx+gIKDfYSHiYWJjpGSmJiZm5qb
-nqGioZ+eoZ6cnZ6ZlpWYmp6gpaSkpqWqr7C1uLq1srOztbS3vb3BwMG+uLm2srOs
-qaWelZGHgXt2bmpgW1xdW1tbX19dWllZVVRUWlNRUlNTU1FVU1FTVVVVVFZWVVZW
-WFNVWlVXV1dYWFhVV1hYV1hVVVhcWFVVVlZWV1lZVVVVVVdXV1RTVFRVVVNTU1NV
-U1JVVldVV1dWU1JQUFJVV1lTVVZUV1ZWVFdZWlhbWVNWVFRYV1hWWFRXXFpYV1lb
-WFFQU1hZV1laWlZUVlZWXFtaVlhaV1lZWFhbXVlYWFlbWlhbXFlXWllaXFpYV1ha
-WFhXV1lcW1lZW1lXW1xcWFVXW1hXWVhbWVpYXl9ZWl1dXVlZV1taWVlZVVNXW1xb
-WVtZVlhXVltbWVlWU1JZXF1cX11eWFtdWlxdXV1fX2JcXV5dZF1eX15fYF9dX2Bh
-X2BgYWRiX2NhX1xfXl9fW15fXl9gXVpeXl5jdK/K09nf4uHn6erq63p9fX98d3p+
-fXx6fH96enp3eXp5eHd8fHp5eH6Dfnx9fn98f4B9enl7gHx5fHp8fXt3d3p5d3h4
-e3t6d3l7eHh4fnx6ent5eXd7fHl6fXx6e3h6fnt6fHp+fn6BfH19hIJ9fnt9gIB+
-fn1+fX1+fXp8fHp6e3t4dnh7enp8ent8eXl4eHl3dHV4dXJxdnRzc3VzcHBucXZ5
-dXV0dnh3eXh4fHp9f3t6fHx9fXx9fn59fHx8fnl8fHt9e3p4eXt5eHd4dnd4dHR5
-eXd5d3x4dnh2dHZ2dHVycXJwb29ubW5tb3Bub3BxbG9ub3Fsbm5tcXBra3J1c25p
-bm9sbnF0dHJwcXF0cnFvcG5ua25rY2ZnZWhiYGFfXV1dXWBgYV5dWl9fX2BjYmNi
-ZmNmZ2lrZmRlZmhmZWhpZmdoZ29ya2loamZmamloaWprY19bVVBMTk5NUExHQ0ZH
-SUtKTE1NT0tOU1JQVFRVV1ZUVFNUVlpbWlpaVlhaVFhXV1dYV1RUUlJVVVdWVVNW
-VFpaWFVVV1pbW1xeW1tfXmBfYF1eYWVkZGRlZWdlZWJiYmhoa2pqa2tsbGdmZ2xu
-cnFtbHJzc3Z1cnR6eHp9gYGDgYSIioyNjouJi4eChIWCeWxlY2ZscHd/f31/gnx5
-eHd3dHJscnBpZGRoaW9sZmBkaW9ycHFxcHFvbnF3e3+AfoSIh4uQlp+lp6ajnpqa
-l5mTj4qKiYiGh4eIjIqGiIWEgYB/e3p5fXdza2FgXl9fXl5gXV5cWlpbWlpcXFtd
-WFhaWlxbWVZWVlZZXlpaWlhYVVZXWVtZW1lZXF1bX11dXFxeW1pXWVlYVVhYWFtd
-XF1gYGJgX2NfYWBhYWBeYV1dW1tcXF1fXlxhYWBeYV9hYF9fXV9hYF1aXV5iZmRk
-aGVoaWpwdHR0dXh5eXVzdHNvcXJ0dnmBg4aKjo6MjJCUl5eanJ6fn56lpKOgoqKj
-oaKjp6iqp6enqaaqqqutq6qoqqijpqGem5mamJeUkIyJiImHf3t5dnR1dHJydHJ2
-eXyAg4KCgoaIiIqJiIqMjZCUmZ6foaGioqOlqaioopygoJ+dmpianqGlqquqqKmr
-rrC0ur+3tre6ubi5wcPBwMC+u7i1tLOwq6Wfl5CGfnp1a2ZgXF1eXVxeYGFcV1RW
-VllYWFVWVFVSU1BRUlNSU1RUU1dWVFVYVFJUWFdVVllYWFpWV1dVVlNTVFNYV1RV
-VFRUVFZWV1RVVVZXVlZUUlRTU1VYU1NVVVVVVVRUU1RSUlNVVFVWVVdTVVRZWVpW
-VlhWV1dVVVRRVFtXVlVYV1VWWVxbV1hWVVVVVlZWVVVbWVRTV1hZWVhYWVxXW19a
-V1VXYFpaWlpcW1hYXFlYWVlaW1pbW1lXV1NUV1lYVlZYWVdZXFlXWFlZWFdZW1lb
-XFdaWFpbXltbWltaWlpYWFlcXlpZWlxaWllYWVtaV1xZWVxeWltbXFtaXlxeXl1d
-W19cX11dXFpcXFxeX2FeXF5gXV1fX2FfYGBiYmBhYWVkYV9bWlxgX15dXl9hXFth
-YWF3ssjS2+Dk5ejp6evrfH19eXl6enp3eHt6eXt+fHl2dnp9fXh4e3p6gIGBfX+A
-fHx8f31+eXl7enh4eHp8eXp9eXt6enp3dnh3d3h7eHZ5e3p6enp4dnp7e3p7fX55
-e3t+fXx6fHx7e39/eXt9fnp9e3x9e3t9f397fHt9fH17e3p+f3t9fX19e3l4d3Z3
-eHh1d3Zyc3R2dnR0c3N2dHN0c3Fxc3V5d3l6d3d3d3R6e319fH58f3t8fH97en18
-eXh5enl7fHl4dnZ2d3h1dHJ0eXp4dXN2d3d5d3Vzd3VzdHZ1cG5ubXFwbm9vbm5v
-b21ubW50cmxsbGtra25wcXNwbm1xcnJxcXNwbW5xc3R0dG5ubW9va29vbmlobGpp
-Z2diYWBdW15hW19iY19dXFteYWFhYmZlZmZlZmdkYmBmY2JnaWdmaGloaGxpaGho
-aGlnaWtpamJjYFhYVFJQTUxNSUhGR0pLSkxMTE1PUFJPUVZSVFNVVldVVVJZWV1Y
-VVhXWFlZVlRUVVdYVFRNUFRWV1hUU1JWWFlbWVhUWVpbXFxaXmBeX2BeXV9jZ2hl
-YmFjZWdpZWNqbmhmZWVmb2toZmlucHFxbW5wdXZzcnJzd4B+fH5+f36AhIqJiImK
-iouKiIuHiIeFgXZsZ2JiZWtucXR4d3Z1enJvamdnaGltbW1qZWBgZmtobW9tbm9u
-cnR0dXV3foB9gYSDiZKWn6WkoKGemJaWk5GPjYqIh4mJiImLiYeJioiHh4WBfnp6
-dXBnYV9iXl5eX2BeXVxaWF5dWVhYWmBcWlxfW1xbXFtZW1dcYF5bXVpYXlxdXF9b
-XFtdXFlZXl1cWVpdWllaWldcXFpaXWFgX11dXVxhYFxcXV5gXl9eX1xcWFlbXV1e
-YF5gXl1iYF9jYF5fX1xfX1teYWBkaWhoaGhnbHBxdHR0dXZzdHF0cnRwbHF4fIOF
-i5COj4+SmpWVmJyfnp2amZ+jp6Kko6Ohpqaop6enqKqsqKiqrauqqqmmo6KhoaCb
-mZmXk4+MiIaCgn9+fXl2dnV1dXJxdXN2eHiBiIaGiY6Ojo2OjIuPkJaYm5+goqGi
-pqqqqKuko6KhoJ6fm5ibnqGprKyrrKqqrrS4vL27vb69u7m7u8DCv768u7azsa+t
-qqiel42GfnRtamNjX1xaWlthYGBeWFZYU1VWU1RSUVNUVFRSVVNTU1VWVVVXV1VW
-WFZVVVlYV1hWVFRVV1ZTV1NUUlFTUlNSUlNSVVRUU1RVVVVTVlhZVFNXWFNVUlRW
-VlRUVVRRUlJRVlJTU1VVVVVXVldWWVdSUlFYUVNUVFNWVlZWWlpXVVVZWVdVWVhZ
-WlhYWllYWFdYV1lWVldWVlhYWFlaVVdXVFRYVlVXW1pWWFpbW1hbXVtZW1tbXmBa
-VlVXVlhVVlhVWlpXWFlWVlZaWFtZVlZZWlxcXFtcWFlbXV5cXFlVVlhdXFhYWVtY
-VlldXVxaWllXWVxeWVhXV1paXFxZWFhYWl1gXV5gYGBiXFtfYF5eXF1eYF5dXl5f
-YmFgYV9fcHFfXl5bXF1fX19hXFpZWVxcZnqyyNPc4OPm5+np7Ot8e3p5enp8eHp4
-eXt8e3t7end2e31+end5e31+e3h/fn99f315e3l7fHt8d3d3eHd5eHp4dHV2dXp3
-d3p3d3d5eHd7fXx5fHh3e319f397fXx9eXh6fYB/gHp8e356e3x6eHx7fHp6fH9/
-fHt8ent/fn97e3p8fXl6eX57e3p5c3Z3d3V1dnV2dnVzc3N1dXN1cnR0c3NzdHl8
-end1dXV4dXZ6fHp6eXp6eXd4enl7fnt5d3h5eXh5eXd5e3t6eXh1dHV2eHZ0dXl1
-dHV1dXZ1c3JycXNzb2tvbWtsb3FwcHBxcnJzbnJub21sbHFwcnJwbmxvbW9xcG5t
-bW9sbmxsb29wc3Bvbm9ybW5tbW1ua2poZ2RgX19hYmNfXF1gYl1cWl5hYmBhY2Nl
-ZmdnZWhnZmRiZmVmZ2psa2pqZWVoZ2dpZ2ZlZmdnaWdhWFRRU1NRU09MTEZGRkxP
-SUtLTVFSUFBSUE9QVFVTVVRUU1hXVVhYVlZZV1VUWFpWWVZWVFBSWV5cVlhVU1NT
-VVZUWVpXWlhZWl5aXV1fX2JiY2FlZWVjZGJmZmJiamtpa2VhX2VrZ2dnZWZqa29s
-aWlsdHVzcnV2eHx/gYGAgIOGh4mJh42Njo2Oi4mEg4qKiYV+dm9paWVoZ2hra3N2
-dXd3c29ubWtkX2BkY2pwbG5ucHN0eHNzcnV5d3J7gICAgYKGkJefo6Oho6GfmpWT
-kY6MioyJiomHhoWGhYmHioiKgn59gX93bmVgYF9hXl9cXF1aW1tZWVdZXFtdW11d
-XFxdXl9ZW1pdXVpYW1tgXl1dXlxdXlpbXVtdXFhdXFtcXV5cWVhcXl5eWltaWFxd
-XmJgXGBfXl5dX19jYV9eXl1cXF1eX19eYF5fX11gX19iXWJgWl1dYF9eYmNmaWpm
-ZGtsanBxcnR3eXl2c29wc3FwdXp9hYqNjpOSlZydnJ2ioZ6fnZybm6CjpqWkp6qo
-pqGgoaSjqaqsqqeqrqqqpqafnp+bmpuYmpaQi4WDgoKDfn5/e3h1c3N2cnBwcnBy
-d3uAhIeKio+QkZGRkJWWmJqcm5yepamrraupqKqppaWhn6GgnZqan6WprrGvrq+x
-srKzub2+vr+/vbu7urvBvry9vbiysK+rq6afk42EenpvamdhXltYWV5hYmJdWFdU
-T1JTVFRUWVpaV1NUUVZTVVVTVlhXVlRVVldUV1taVVZUVlVVVVhXWFVTU1RVU1VV
-VVJRVlRUU1RUVllWVVVTU1JUV1RVU1RTU1BUVFZVV1VZWFJSVFVSU1NWVVJSVVVU
-VFJTVVRXVlZXV1ZXWFdYWFlXWFhVVFVXVldUV1dWVVhXWFlXV1VUWFhbW1lXV1la
-V1taVlRWWFpXVVhZWlxbXFpXV1tbXFlZWVxbWFdXV1pYWFxZWVdWVlZbXVtaWFlb
-XFtcWltcW1tbXV5dWVhdW1pYWlpcXFxfX2FfWlpbXlpZWVxaV1dWWVpYWVpXV1hZ
-XFtZWV1hYWBeX11dXV9fX19hZGFfYGBeYWJhYF1fX1xcXFpaWVtgXVxcXlpbWlxi
-frbJ09vh4+Xo6Onr63p6e3d5e3x9fHt7e398fH99gHx8e3p7enp8e3t6fHx8fHx7
-e3x6eXp7e3p7enh0dXd5eHd6end4d3d3dnZ5e314eHt+gHt4enh5fHx8d3l5fHt6
-eXx6fH17d3p7fHt6eXx6fHt7foCCfnl5e3p7f3p8fn9/fH+AfHl8e3t4eXh6eXh0
-c3R2dHNycXN0c25ucnR0dHBwcHd4d3l4dnV2d3V2eHp7eXp9fHp4ent4eXt4eXt+
-gHx5eXh6fXh5ent8e3Z3eXh0dXV3dHh0dnV0cW9ycHFvcW9vbW5tbG5tbm9ucHRz
-cnBwbm9ycXFwb29tbWtsbG5wb21vb29sbnRyc3FvcHJzd3JsbnFtbW5ubGxoZ2lq
-Z2RjZGFeYmFdXFxgX2FeW15hY2BiZ2VjZWVoZ2ZnaGhlaGtramppamdlY2NhZWVn
-ZmZoZmdnY15bVE5NT05NTk9JR0dISEpMS05PTk9RUU9SVFJSVlJTT1NTVFZYWVhZ
-WVdYWllWVVVRU1dUU1VZW1lZWVdTUlRVVllcW1dXWVdaW2BcW1xdYV9eZGNlZWRm
-ZWJgYmRpa2ppZmVjZWtqamdlZmxsbG1ubW9wdHFxcnJ3fX1+gICAhIGBhIqJjo+P
-kI+JhIWOjIiMjYyIgHx8d3RwbW9wdHl4cnFrZF9fX19jZ2lpcHZ2dHRycnZ3dXFz
-cnZ3d3l7en2BhoiRmZ6ipKWlop6al5SVj4uMmImLi4qIjIaDhouMiYiBfH57eXty
-Z11cWl9hYmFdW1pYW1xbXl9bWlxcWltaWltjXFlaW1tbW1tbXFxeX11cWlpcXlhc
-XFpYW1pZXF5ZW11eXFtcXFpcWFdYXF1eW15aWVpbX15fXl9gYmFdXWFfXV1iYWJh
-XFxdXl9gYF9fXFxdX2BlYGNkZmVnaGdmampubnJycXN3dXNwb29ucHJ2foKCiI2P
-k5eanJ2fn56enp2cn5+eoaOjpaWoqKinpaKkp6ioqKmpqq2trqqpqKSio52dm5iV
-kI6KhoGBgX6AgH58eXVydHR1dnVycXJ1eHyBh4yOjo2Pk5KQkJSTmpygoqKlqKqq
-qKitq6ioo6OkoqOfmZmgpquusLGvrbGysLC0s7q9vsDAv728ury6vb65vLqzsayn
-pZ6ako+EenRva2JhXFlYXFtdXlxXVlZTVFRVVFRWVlRTVVRTVVdWVlJSU1ZXVFZZ
-V1VUWFdZWldTVFVXWFlVWVdUVFRUU1RVWVpWU1ZVVVVWVFNUVVhWVVFSUlRUVVRS
-UFNVU1ZYWVhZWlNSU1RSV1VUUlRYV1NWVlNYWVVVVlVUVlpUUVhYWVlWWlhZWFZW
-VFpaVlRUVVRWV1hZV1pbWlhXWVVWVlhYWVxYVFVVVlpWWFlZWFpXWlpYWlpbXFta
-W1pZXFtaWFlWVVVWV1pXWFlbXVtcW1tZW1lZWFVaXVxZXl5gXVtYVVZYWFlZWltd
-XFxbWFhaXVxYWVhaXVtYWFxaWVhZWFpYYV9fXl5eXl5fYWFgX15gYWNjYVteYGFf
-XVxfXVxeXV9fX1haXl1aXVpZXF1dYWV+uMrU2uDk5ufo6ersfYF9e3x/f4J9e3t8
-ent5fX6Agn97e3l7fn56eXx/d3R4eXyCf318fHx7enp7enh3eHZ3d3Z3e3Z0eHp4
-dHV4enl5enx+fn14eHh5enp6e3d6fnx7en15e3t9enl5ent9enx8fH19fXx5eXx9
-e3x8fXp8fH1/f4B+fHp5enp6eHp2dXd3d3N0dXNxcHBycXBxdHN2c3NwcXV1d3d4
-dnh3c3Z6eXt4fH56eXp6ent9fHp8fXp7enl5en17e3l5d3h7fHx7eHV2dnd2dXl3
-dHFycHBzcnJwb29zcnFzcm9xcXBvbmxtb25ucW5xcHJya2ppaWhub21vbm9vbW5y
-cnFyc3NwcHBxcXJ0bmtvbm1qbGdoaGdjZGRiXmFjY2BZW15hXmBeX2JhX2BiY2Vm
-Y2VoaGZmaGxtbGxrZ2loZmVmZmhnZmZnaGlqa2hfXltVU1RUU1JNREVDRElLS0xO
-T1BSUlFRUVJST1NTUlRXUlBSV1ZZWVlXW1xYV1NUU1VWV1dUVlVUVlZVVFRXVlhY
-W1lXV1lXVldbWVhcWl1fYF9gX2BjZWRjY19hZGVoZ2pnZ2pkY2ZrZmNqbmxqbW1t
-bnBsb3FycnN5eXt/goKGhX6AhoyPjpCKi4iKjIyOjIaKjIyMiYWAfX5/fXd2dXBr
-ZGRiYWBiaWlrbW9xd3d0cnFucHV5enh4dnh8en99fYKJi5KWmJ2ipaKfnZqZk5GO
-iYiJiIiBh4uIhYaGh4qFgHp6fH19eG5iXVtdXl1fYmNeXV5ZWVpdXV1dWlpcXFtc
-XmVeWltbXVtcXVtbW1tdWFpaVFZcWldYWlZWWV1aW1xaW1pcW1tcW1lcW15eXlxa
-WVpcXVxbX15dXV5gY2RfXmJhYV9gYWBeWl5eXF1dXl1eYF5eXmNlYmVnZGhoaG1r
-bnB0dXl2dHJvbm5ucG1vc3x/iIuNjG+NlZubmpqgn52fm5yboKOjoqCmo6SoqKur
-qaqoq6ampaSnp6mqp6eppaGhn5ycmJKOi4aFgYKAgYGBfHp1dXV2dXZ4eHVzdHZ4
-fH2Dh4iOkJGSkJOSk5eYlpygpquqq6uvsa6sq6uqqqmpp6WgnqCjra2usbCwrKyx
-sbW3tbm6vLu/vru7uru8uba6t7WwrKqmoJ6Xk4uDeHJtamJfXFxcXl5ZW1laVVdZ
-WFZZWFRTV1pWVFNUVVRWWVdVVVdYWFhaWFhVVlhaWVhXVVVVV1pbWFdXVlNTUldX
-WFRUVlRUVFNTVVVVV1VXVk9QUlFSVFNUUVFRVFRYV1RVV1dXV1NUUlNUVVZUUFRZ
-WVZVVFVWVldYWFdcW1lXVlZYWFpYV1dVV1dYWVpWV1hZWllZWFlZWFdYWVdYW1tY
-WlpZWFVUV1dYWFhYWlpYXltXWFlYV1hZWl1eXV5dWVlYV1haWlpZXFtbWlhZW11c
-V1hWWVhZV1hZWl1cWFpZWllUVldWV1pXV1hbXF9dXFpXV1laWVhaW1pdW1tcXVpc
-XVpcX15eYGFfXl9fX15gYmJhZV9iYWFeXl5dYV9hYGNgXVtcXlxdXWBbWVdbYXaz
-ytPb4OPm6Onp6ux+end6fHl7f359fX59eXp8gH+Af3t7e3h5fnt7fX54eXp4eXp6
-enp4e3t6eXl4eXh3eXp7eHd3d3d4eXZ0dXh5enl3dnp+fXx9fn57e357fnt6end7
-e3t7fH17eHl6fHt8fn19fn99fXx6fHx9fX59enp7fnt5enp8enx8eHR1dnV2dHV3
-d3dycm9vcXB1d3RwcXV0c3VzdHZ1dHV3d3l2d3l8foCAf317enl3ent+e3x6eXt6
-fHt9fX59d3Z1dnl6e318enl4d3t2dnFvb3N3c3Fxb3JycXFzc3R0cGxscW1tbWlt
-amtwc3Vyc3Fva2xucHByb21ub25vcm9ucHJzdHBvcXJvcnFsamtra2xtbWlnZ2Zm
-YWFlZl9cXVtaXFtfYWFjZGNhYGJiZmRlaWpnZmhnamlobGtsamdlZWVlaGViY2Jo
-aWpraWleWlZSVllQUE9NS0hIRklKS1BVUFBRUVNUUlBQUFBWVVZWWFZZVldcWFpY
-WltZV1RVWFhVUlRXVlZSU1RXWFdVV1lcXFhaWllYVlZZVllWXmVgYGJmZGFkYl9h
-Z2NlYWNpamtpY2VlY2Zoa2tsbGpubG1ubm5sa3Bzd3d5fHyAgIWEgn6AgoqNh4OE
-hYOIjY2Lh4qIjIqIhYN/gH15eHh0b25pZGZoaGVpa2ttcHV5dnh5eHN0eXZ7goB6
-eoF/fH+ChIyPkZKXnqSko5+empWUkIuHiYqJi42JioiDhYiGhoaDh4WBgoN6aWBf
-XlpdXV5fYF1cXV5dWVlbWlhYWVxcXFpcXFxaV1tbXFxcW1xaXFxXWVtYWFlaW1la
-WVdaXVxcXFlhXVleWltcWldbWltcW1xbWl5fXWBfXV5eYF5eYWBeYGBhYGFhXl5g
-XV1fYWFiXlxgYWFkY2VnZmhoZ2ZscnFxcnJ0cnJ0cnBra21tdXR3e4WJj5OTkJaX
-mpqdnZ6hoJ6fnJ2foKGhn6Cjo6Kjoairq6iop6mmp6qtrKmopaWkoJ6blpeUlJGJ
-hYODhIaJiYF/eXd3dXZ3eHNzd3l4d3l9goCGi5CUlJKTlpKWmJydm5ugqKursLCw
-ra2vraurrKqmqKmjnqOiqayur6+sqa+ztLa2trO3t7u9t7i5t7u4t7a2t7Swqqmm
-o5qRjomEfHRyZ2FeW1tbXFxaWlVXVlRSVFVYV1dYV1JUV1RSV1NUU1FTVFRWVllW
-VlhYV1dVWVVWVlpYVFRZVlNTUVRVVlZWV1hXVVVUU1JVVFZWVVRTVFRSVVRVVVNR
-T05RU1RVU1VWWVZXVFBSVVRVVlZXVlVXV1ZVVFZXV1hYV1haV1VZVVpbWVhZVlNX
-VldXV1lXWFhaWllXV1daWFhYWVhYV1dXWlxZV1NVV1hXVldXWFZcXVpZWFhZWFZV
-V1dYWVtZWFpYWFlaXFlXVllaWVtbXFtcWF1dXVxaWVhXWVdbWlhcW1pXV1hYVFVZ
-WF1dWVhbWFZXWllZWVdZWlxdXVtfXFpfW1tcXmBdW2BfYWFfYl9fYGFkYGBeYWJg
-Xl5gYV9eXWBgXF1hYVxdXl1bXVxhebPJ0tvg5OXo6Onq63R4enx/enh7fHx+fnx5
-en18fHx7eXt7fH19e31+fX56enx6enl5eHl6e3l4eXh4dXZ3en1+eXd5fHx8d3V2
-eXh3enp8d3V6e3t7fXp8fX5+fH57e3l8fn19fXt6eXp6enx9gX17e3x/fn57e3l7
-foB/fn5/fXt6eXp9fXp3dHN3fHh5dnZ4eHZ0d3l3dHZ5dXZ0cXN5d3JvcnR5e3d2
-dnl7fYB+f4OAfn18fnt6eXl2dnp9fH15d3h7fXl4c3d5d3d5eHt9eXp5d3d2fHd1
-dHNxcnBwc3V2cnJxcnNybnJvcHFwbGxsa29zdXRwb25tb21rbG9ycXBvcW5tam1w
-cHJycnRzdXJxb25vbm1rbGhoa2xnZ2ZpaWdkYWBhX15gYmJfYWJgY2NfYmJiYmNi
-Z2llZWhnaGdmZWdkZGNoaWhkZGpsZ2hpamhqaF9aV1BMUlFQTUtGSUVGSUhMTk9P
-UlFRUVRTUlBTUFFWV1ZWWVdUV15aWF5cWlVTVVVVVldXVlVUVlZRU1JUVVdYWFlW
-WFpWWltZV1dXVFhcXl5kYWVpZmRhY2ViZ2ViZGlkY2ZiYWBjZmxvb2xqam5qbG5t
-cHhzbXFzdnV1en6FgoKCg4KEhoaIhoKBhIWMi4uJhIWHhX9/g359eHR2eHdxbWxs
-aGlubWtsbXBwdHl4e3p4eHx8eHZ7fXt+fn98e4CEh4uPlpufpKiioKCblZGOjIuO
-j4mLjIeJhISHiYqHhIWJiYeIgXlrZ2FgX1xcXVxbXFxgXltcWl9cWVhaWlxdXVpe
-W1xdXVxYWVpZWlxaXV1aW15bXFtaW1hdXVpZX11eXFtbWlteXVxcYF9cWllcW1xa
-XF9iX19cWlhdW1xcXl9dYWJiYmViXl1eXl5eYV9eXGBiY2RlaGtramxrb21xeXt4
-eHd3dHF6b25raWpxdnmCio+Rl5eTlJWYl5qenZyen6Ojo6SioZ+cnJ+ioZ+lqKil
-paaopqSlqqmpp6akpKGgoZuXkY+Pi4aDgYKEhYmIgX17eXZyc3JzdHR2fH+AfHyB
-goaNjZGJlZiVlZiamp+goqOlpqenrK6vsrOvramrrKeqq6qmo6Skqa+ysK+vr7G3
-tbOztLGztbe5uLe5vru1srO0tLCwqqainJeVkY+GgHl0aGFeW1tbXF5bWVRYWFdX
-V1hXWVdWVVRTVVRWVlVVVlRTVldYWFVYV1dXVVlYWFZXV1hWVFZWVlJTUlRTVFVW
-WFZUUlNUVVJTU1ZWVFNUVVVSUlZXV1VRVlRQU1FTU1RXVlRTVFJSVldVVlxYU1NT
-UldWU1ZWWFpWVFdbWFlXWVpaWVpYV1NVWlxWWFdaWFlaWVdVWFlYVlZZVlVVWVpY
-WlxbWlhXVldYWFpaXFpVWFpZV1hZVldYWFpYWVlZWFhYWlpbWllaWlhaWlxbW1tY
-WlxZW15dWVpYWVxfXFxcV1ZXV1lbWldVV1hdWVpaWltaX11bW1pdW1xdXFpdXl1b
-W19fXltbXVxeX19hYWRlY2NkYGBhYF9fXl9cWl5gYWBfXV1cXF1eYWBdWl56ssnS
-2t/i5efq6urse3p7e3x9e3t7fHt8enx/gnx+e3t5dHh6fH19fXt4eXp7fX15en17
-fXt7e3l3eHl3eXZ3en19eXd3eX1+enh1dnh3eXx6enx6e3x9e3p7fHt8fH58ent8
-fnt7fnp6e3x8e3x8e318e3+AgHx5fXx7enx+fnx9foB/fX1+foB/f3l9fn16eHl1
-dXZ4eXl4c3V0cnZycnV0dnRuc3R2eHdxdn59foB+e3x8fYF+fnp5d3d3en19eHp9
-enp6e3h7fHp7eHl7fH18enZ1dnR5dnd1dXVxcnJzdHNucXJycXJxcnBtbm9vanBw
-bWttcXd0c29wbGttbmtwbm9xcHJwcnBvcnRycW9wdHJzcXFyb3FsamlnZmdkZ2dn
-ZmRhYGBgYWFgY2RiX2JjY2JmYWNnY2JjY2VnZmZnZ2lnZWNkZGdsamRmZWVraGhp
-amZnYlZRTk5MTE9NSEZISEhJS0hMTVFRTlFQU1RWVFdZWFVWVlRYWVpXaGFXW1pY
-VlJXWFZVVVRXVVlVVVhVVlZVWFhZVlRXV1dXV1hVVlZaXV5cYWBgY2RmY2NiYWNl
-Y2VjZWZkZWRpaGhpaWxsa2prbGttb3F2eHd1cXJ1eHd4eoKEhIF+en+DhYN+gYSD
-hIWKh4WEgHt8f3t7e359fHRwcXJxb21ramtvdHNvbnFydXNzeX5+f317eHp5fHyC
-gXx8f4OIjY+UmJukpqOgnpqWlZGVjY6Qj42LiImJiomJjYmDhIiIiImEc2djYWFh
-YV1cWFZaWVxfXV1cW1tbXVlaW1pYWFdXW1xXWlpcWllYXV5gX1tZXF1dW1pZW1xa
-W1hXW1tbXV1YXF1cWFpbX1xbXF1cXl1dXFxZW1pcYFtaW1xiYV5gYGFfYWBlX2Bh
-XmFiYmFhYGNkZ2trZ2lsa21vcXV5eHh4d3VwcnBucWpqcXV4f4aKjZGYlJKVk5eX
-lpmcnJydn6Ohn6Gfnp2goKOfoaGjpKapp6WmpKWlpKWkpaOko6GflZaTjImEgoSC
-g4GBgYCCeXd3dnRycnV4e3p9f4B/gX5/hISIjI2Rk5WZm52coKGgn6CipKWorrO2
-tLOxr66uq6yrsKupqKanrrCysaytr7G2tbexs7Kyt7a0tLW4t7m5tbSwsK6wqKWh
-npiSjouFeXVuaGJhXV1dW1pbVlVVVlZXVlVXU1NVV1dWV1RVVVRYUVBVVVdYV1ha
-V1dXWldXWFZYVlhVU1NSVFRSUVVUVVRTU1RYVVNQUlJSVlRXV1VTVFVVUlpYV1VU
-V1RSU1FTVVRRU1NTU1RWVVJSV1lWVVJUVVhaWVZUV1lZV1VVV1ZVVldXWVZVVFdZ
-V1ZXWVhaWVpXV1ZVV1dVVldYVVdWWFxYV1lZWFlZWWFbXFxYWFVXWlpcXVxZWFla
-V1hXVlZaXVpYWFpZWlhZW1lYWltaW1taXFxbXF1dXFtcXF9aWVpdW1lWVldbVlhb
-WltbWltbXFlcXWBdXFtfW1pbXF1eX1xcW15cW1tcXmBnXF1fYGJiY2FfX15iYWJi
-YWBcW1xeYF1cXF1cXWBdWl5iY3izyNPb4OLl5+nr6up8fX19fH1/fXl7fX17fIB+
-f3p5fn15e35+fnx8e3t5dnl4eXl/fH6AfX17e3t8eHd5eXp7fHh3eXp1d3p6eXd2
-dnV1dHl7fHp9fX59eHh7fnt+f3x6e3t7fHt7fXl8enyAf3t8e3x/f39/f319fH1+
-fX1/fX5+fYB+fHR3enl7g3yCgnx6enh5eHh4dnV1dHN0dHdycHh5bnFzdXd5eHl6
-fXt8fHl6fH16foB9fHt5fHt6enx9fn97eHh6e3h2d3l7enl3d314dXR0dHV2c3Jy
-cXN2c29xb25vbnBxcHFxcXFwbW1ubGxvbW9wdHd3eHNwcHFwdXR0cG9vdHVzdHJw
-cXRyb3Fucm5qbW5xc3BrbGlpamtpamNkYmBfYV9iYGBjY2RkYWFhY2NkZWNnY2Jm
-ZmZkYGJpa2hjY2dpZmhoaWdmZmlmaWZoZ2NfWVFMTE1NTU5OTkpKSktJR01OUlFT
-UVNSU1NUV1dVVVRTV1ZWWlhdW1ZWVVlXVldVVVlZVVRZVlVYWFdWVlhXVFZXVFVU
-V1hUUlJXWVpbW1xdXV9gYmBeXV9hY2JmYmJgYmRnaGlqamxqamtsbWxqbWpwb3F0
-dXh2cXN4eHd5f4OEgoJ8gYODhIKChoWGhYOChoWCfn5/fHp3dXt4cXFwcnBxc3Ju
-aWx1e3ZycnN1c293e319fHp7fXt5en+EgX19hI6Oj5aapqKgpqOdlpSSlI+MjY+S
-jY6HiYqJiYiKiIaFio6MiIF0YV9eX11cWVpYV1ZZWVpcWl1dXVxbWllbWltbWVlb
-W1tXWl1fXFtaXF1bXl9eXFlXVVdYXF1bWVtbW1xeXFtaWVtcWFdaV1xdXlxdXFxe
-XFpcXFteWVlcX19gYV1dXV5iY2NjYmJhYGNkZmhhYmhqbGxqbHBycnV1d3p2dXZz
-cGxsb29ubW1zeH6EiZGSlZiXlpiYmpuYl5ebnZ6dnp2bnJ6enKCfn6CgoKOjpaOj
-paanpqOmpaOlpqalnpual4+JhoOBgX18fHp9fH16d3l3dXR0dXR1e32Bg4SDgn6D
-homPkJGXmp2dn5+lp6Sin6Cho6mrrq+ys7GzsrCutLO1tK+sq6ytr6+vr7Cxs7S3
-t7e5urW1tbO0tbW1tba2tK+wr7KrqKWgmZaSiYJ9dHBrZmNhXlteXlpXVVRWVVZW
-V1VTU1RTVVVWWFhVVFZSUlNWVFdYV1dZVlVXWVlZWFdWVVlWU1RVVFdUU1RVV1hX
-VFRUUldTU1JTVFJXVVJSUlFQUlVZVlRVVVZTUlFQUFBPVFRVU1NWV1ZUV1hXVFNV
-WllWVFRUVldVU1ZTU1JTVVZYWFlXWlZUVVdYV1pbWFVVVVZXVlZVVllaVlRTWVlX
-V1lZWVlZX1tYWFZaW1pZWlpaV1pbWFtbWlpdWltbWVZXWlpZWlpaWVpaWllYWVlb
-WllZW1paWFldXF9dWlxeWVhZWlhZW1ldWVlZWllaW1tcXltZW11bXF1dXVxfXVpb
-XV5bWVxcXWBcXlxcYWJhYF9fX11hYWBfYmBcXlxdXlxdXF1eXl9fXV1gd7DJ09vf
-4+bn6enr63uBgHp+en1/e31/gYKBfn17fX5+f3x6fX2BfH18fXp5d3d2e32Bf359
-e3t6enl6eXZ3d3h2d3Z3e3x5eHh6dnh3eXl8e3x9fHt8fH16e3t7fX9+fHt9fX5+
-fX57fHt8fnp3eHp7e3x+fX1+gH98fHx8f3p5eHl5eXt8e3d5eXx+fX59enh4eXx4
-c3V1dXJzcW9ucXRyc3Jyc3Jxcnh5eoB/e3l+fXl5en5/fnx9fXt8e3l7eXyBf3x7
-e3l5e3l3eXx6enuCfHp5dHd1c3N1dHNubnB0cG1sbm1vbnJwcXN6b25vb292bW5x
-b3FzcnV0dHJxcXF2dnN1bW9xc3Jyc3JycXVxbXNwcHNxcHBucW1qbWtrbmlpY2Vl
-Y2RhYmJkYWBhZGdkZmhkYGNmZ2ZmZmhlZWZkY2ZlY2RmZ2hmaGltcHFsamVnZ2Rm
-YV5UUE5MTlJRU1JQTEpMSEtQUE9TVFRRUlNTU1NVWFVVVFhYV1paVldVVVRWVFZX
-VlZWWVlaVlhdW1pUVVdXWVVUUlRWVFNUVlpZVFhZWF1eX1xcXF1bXF9eXmBiZmNh
-Y2VmZWhtaWdrbGtrcXRxb2ttbXFua3F0dHV4eH13dXh5e3yAgX+BhYWDhYJ/f3+D
-hYSBgoKHg4J/fnt4dnZ8eXl3d3FycnNybW14eXZ0cnV3dXp4enp6en5+f3h8gISB
-foSHio2Rlpyio5+hpKGXlZSQjoqJiYmPjYuJiImLjIuLi4uNj42If29gXFpcXFtc
-WlhYW1pZWl1cWltdXlxcWFlZX1taW1tYWVlXWFtcWltaWFdaXF1dW1tcXlpcW1pb
-XFpaXF1cW1xaXF5bW1dbXVtbWVlbW11eX1pbWl5eW1teYGBgWlteXl9lZWloaGdm
-ZmVjZWVnZ2lqbm5tb3Bzd3t7e3d0cG9raWhobG9xdXZ7gIeOk5aXnJ6cl5ianZqY
-l5ecnZ+em5ubm5+gnp+foZ2foaKioqSjpKSkp6alo6ipp6KempmXkoiGg4OBf3t6
-enx7fXx7eHZ1dXNzdnZ3e4CHhoWGiIWFioqLkJebnp6fnaGmpKGhoKKkqK6trrK1
-tLS2ubm6ubi6ubOurq6ws7Owrq+wsrK1uLu6uLe2t7i3uLW1t7O1sLGvtK+ppaGb
-l5OOhoB8d29sZmdkYWBfXVxZVVVXV1RVVVZWVFdUVFNWVFRUVVdVUlNVVVVVVlZW
-VlRYWVlWV1VWWVdUU1NSU1RUVVVZVlRVVVNTVVRRUVRUUU9TUlNTUFFSVVZVVVRR
-VFRUVFBPUVZVVlZXVFRTUlVXV1RYV1hYVlZWVlRWVVVVVFRUVVNTVlVXW1pWV1ZV
-WlhZVVVUU1RVVFZYWFhXVllXW1hXVVZYV1hZV1hYW1lXVldZV1hZWVtbXVtaWlxe
-XFpXWVpaWllYWVxZWlZWWlhWWFpaWlpYWVhbW1paXFtdW1xaW11ZWVpcWltdWVdb
-WVlXVVxcXVxbXFlWWVpbWltZXVxgXV1eXV1aWl1fX15dW11gYWJiYmFgYF9iYWFf
-YWBeX2JfX19fXFlaW15dXl91rMjS2t/j5efo6OrqgYSBfH1/ent8fH99fX19enl6
-fH59gH15eXd6enh+gH9/fHt5enp6eXp6enp6eHl4dXR2d3h6fHh1eXZ2dn16e3p6
-eXd7fHt6d3d5fX18e3l5gH58fH19fX19enp7ent7enl6fn17fX19fH1/f396fn2A
-gYF/fXx8e3+Ff3t8e3l5eHl6eHd6fHt4d3h2dXZ0cG9ydnZyc3RzdXVyeHh5d3h5
-fX19fnx7fX+Af31+fn17d3h5fHx7e3l3dnx+fX17eXh5eHV2enl4dnN0c3BxcHBu
-cHN0c3R0cXFyb29vc3VwbWhqcHBtcG9ta21ub29tbG1wcnJ0cnFzcnJ0c3JwbW5u
-b3FvbnRyc3Rwb3FxbHBwcW1oamhlY2NkYmFnZmdpZWJiYmFkZ2VjY2JiZGdlZmdo
-aGtnZWNlZWdmZGRiaGlqa25ramNjZmRgWldOTkxOUlJTUEtLSkpKSUpMTlFVVFNS
-VFRUU1RZVVRTWFZZXFpYWVZTVVVZWltbV1ZXVVZVVVpYWldWWV1XVlZTVFVSVlRT
-VVdXW1dYXmNdW1tbXl9hYmBfYWhqZmlqaWZmZ2dqZGZpaGtuc25sbW1tbWtqbXBy
-c3N5fHh2dXl7fYGBfn2Bg4WFgoCAf4CAgoOAhISAfX6Df32BgX56end7eXp3dnV6
-c3R1eXZxcnl6eXt4eX5+fH57eH2ChYGAgoSLiIyNmJyeoKSknZeYmJaQjImKjIqK
-jImHi4+Pjo6Oj42OjIZ6a19fX1xbWllaX1tdXFxZWFhZXFpbWFdbWl9gYV9cWVpa
-WllYWl1cW1hWV1hbWltfW15eXFtaXFpbWVldXFpdXFxaWltdW1xcXFtdXl9fYF5e
-XVpdW15dXFxeYGFiXFxfX2NnaGtmZWdpZmJoZWdsa21ycnJ0dnd6fHt5dnZwbWtp
-aWhqbHJ1d3uDiIyPl5qgn5+ZmpyVmZmYmZiXmZ2fmpuWnJ6cm5yem5qfn6OjoqKl
-paampqWnqammopuWlpWSi4eEhIJ/f3x8e3t+eXx7eXZ3eXd0dnV9e36Ch42RjYqJ
-jI+TmZudnqCioaOmpqanpqatrrCxs7e5t7q2trW4t7e4sbCwrrKztLSyq6qrq7G1
-u7e2t7i2t7e2srOytLKvsaytsaunoJ6ak5KMhn52cm5pY2JiZGFaV1ZYVlZTVVVW
-V1ZUVVVTVFNVVlVZXFdVVldXVlRTVllXV1ZVVFRVWFpaVlVVVVRVWFVVWlZUUlJV
-UlRSUlNSVVRTU1ZYVVRTUlJRUlVUVFJUUlVVVFVXV1VVVFVTVFRWVldYWFpaV1hY
-VVZVVlVWVFZTU1VUVVRTVVVVVVdYWVhYVlZXV1ZWU1JVV1dXWFlYWVtZV1dYW1xa
-V1dZWVtYV1pZW1ZXV1ddXmBcXFxXV1xdV1ZYWlpYWFdWWlpZV1tYWFlcWVlcXFpY
-WVxdXV9bWFhaWlhaXF5dW1tbW1pbXFpaWllcXVxeW1tcWV1XWVxeW1pbXVxeXF1d
-WlxiW1pfX2BgYGFkZGRkY2NiYGBhX19eXVxcX1xeXV5dW1lXV1xdYXmsxtLb3uPl
-5ujp6uuAfYF+gYF9fn59e3t9e3p6e3t8enp6enx7eXl7fHx6fHx/fn95eXp6e3d3
-eHl2eHt7eHd3eHh4dXVzc3V2eHl4d3R1eXh4fXx6d3h8fHp/fn5+fHx8fn58fXp6
-ent8fXx6fX1+fXx9ent8fHt9fX19fHt8f356fX1+fn+Agnt6eXd2eHl3dnV2eHd2
-dXR1dnZ0c3FxdXZ0c3JydHl2eHV4e39+fYGBg4R9fn1+fX16fHt9e316eHl4eXl8
-fXt8fXt7eHV0dHh4eXZzc3BxdHJxcnFycHNwdHN0c3Jxb3Fvb29tcHFvam9yb21t
-bWxwcnBxcm9wcnJycG9vcHd2c3FwcnJxb29vb3NxcnRyb3FxcG5tbGZnamlmZmZr
-bmlqZ2NkZWVlZmVoZWRhX19nY2NlaGZma2hjY2NiZWRlaGhlZmZpZ2dqZGZkZWBe
-WFZNT05TVVRSTEpGSEhJS09PTlBQU1VZVlZVVFdXV1lWVlVXVFZWVFZYX19bV1hW
-VFRWWFlWVlhWVVlWWlhaW1dVVFNVVldUVFdbW1leYV9eW15fX2JgX2JlZmpiY2Jm
-ZWNnaGtoZmRoa2tsbWxsbWxuamhuc3VzdXh6eHV3eXt8fX+CgH59foCCgoWCgIGB
-hYJ/g4GAfn6AhIaFgH93eHp6eHt+fXx0c3d4c3Jxdnl5e3l3en+BgYN/gIWDg4SC
-g4eEiY+UnqOkoZ6Xl5mWlYyJhoqOh4qMhoeJjJCOi4mLjI2JgXJkYWBfXVtcWlxg
-XFlZWVpcXVpdXV1dX1xbWlhdXVxfX15dWldYXFxcW1xcXVxbW15dXFtZXl5dX1xb
-WV1bVVtdXl1cXl9eXF1bWlpbW1tcYl1eX11bW19eYGFeYmJgXFtdYWhoZ2lnZmlr
-aWZlaGttc3ZzdXV5fH5/fHd1c21raWhoZmhuc3h8goSJj4yYnqShnZqXm5mYl5ib
-m5mcm5qbm5eZmZmampydnZ6joqOko6SjpKSopqWopKOgmpmUj42Ni4uGf4KAgHx8
-eXZ5eXp5dnZ1d3VxcXB2foOFh46PkpSUk5aTmJuin6Kjp62rqaqrqqurrq+vs7a3
-vb64trm4tba4tK+trK63trKtq6yorbO0tLe5urq6u7e3tbS1sbKurKutrKako5yW
-k5CJhn90cG1pYl1gYV9YVlpXUlNVVFVVWVpVU1ZXWVVWUldZWllYVVZUVVdXWFlZ
-VlVVVVRXWldXVldWVVhZWVdWVlVVUlJUVlVUUlRSU1VRT1FUVVRTUlJUWFdUU1ZW
-VlRXVlVUUU9QU1NUWVdWVFNWVVRbWFZaVVZWU1RVV1lXVllWU1RUV1ZXV1dWWFZU
-VFRTU1RWVlJVVFVZW1RVU1RZWFlXVltdWVdUVFdZWVlZWFdeXFxZWl1bWVdXVldb
-WlhaWVhYWVdYVlZbWlhZXVtaWFlaXV5aWltbWVpYWVhXWFpdXVxbWltcXltXVllY
-XFtZWlxcWlpbW1xaXFtdWlhXWFtcXFtZW11fXVtgYmRiYF9dXmBgYWBfW11eYF9d
-XV5fXVpYXVtbW1taWFpddq/F0dnf4uTn6Onr6n57e359fH59fH19f35+fYB/fHt8
-e359fXp4e31+en17enx6fX5+e3x8fXl7fHt6fXt9eXt6dnl2dXV2d3h3eHd4fXx6
-fHp6e3x6enl7fX5/f39/f39+enp8fH56fYB9enl+fH6AgHx7fn99fX1+gH15enh6
-e31+enp8fn5+f314eXd2eXp4eXZ0dnZycHFzcnF1c3JzdXR0c3N1d3V3eHp5eXt+
-f4CAf3x+fX5+gICAfnt9fXp4eHp8e3t5fX5+e3h7eHd4eHd4c3N1dHFzdXFwcG9v
-b3BxdXFwcXN0cHV1cW9tbm9xb25vcG9wb3Jzc3J0dHN0cnN0c3FycnZ1c3Z2dHRx
-cnNxc3JycHJtb29ycW5sa2lqanBta2lmY2NkZmZnaGlnYmNmYWFmZGNmY2RmZ2pm
-aWlpY2NlaGJlZGRmaGVmYGJqZmNjYVpWU01NS1FTUU9JR0RGSUtNTU1OUE9PUFNV
-VlVTVFRYVFNTU1FWWVZYWV1cW11WVlhaV1pYVVRVV1hbVVRTVldWV1dTVVhWWFNV
-WFpaWlxcW11eX2BiYV9dYGFeXmBiYmtpZmltbWpmYWZpaGtrbGpqaGtqaGtwcHN1
-dXd1dHd5ent+gYB/gn9/f36AgYF/gIGFhoWDgYSEfX2Ah4F4eXZ1fX15dXd+fHVy
-dXh1cnZ0cnWBfH2CfICDhIiEgICGhoeGiISJkJeam6CfnJqbmZiUjYqGiYiIjI6N
-jYmNi4uJi4qMjYd/dGhkYGFdXFxdW1xbW1pZWl1bWVteXVtbWVlZWFpaW1xdW1xc
-WVhdWl1cXFpcW1tdXV5bXF1aXGFfXlxbWltZWFlbXV9fYF9cXV9dXFtbW1pcXWFg
-YWBgYl1dYWJfYGBgYmBhY2drbGdpbGlrbWxsbXF4d3t8foGFgn56eHZwamdnZGVo
-a3F2eoKGho6Vl5ueo6GenZ+Wmpmbm56cmpiZm5iVlJeXmJqbm6GmqKWnpqWlp6ek
-paamo6GfoZ+dmZSTkI+NiIaGhH9+fX16enl3eHt4eHp5dXBwdHZ4eoGHjJGUmZmW
-l5iUmp2hoqOlqrCvrKqsrayvrK+0tbe9vLu5uLm4tbOxsa6trK2wsLGuramorKyw
-sbW4uby8uLq1tLS2sbCur6yrp6aknJuWkY+Mg3lwbWhjXl9eXmBeXVpVVlZYWFlW
-VFVWVVZWVFZVVVZaV1dXVVVUVVJVV1RUVFVVVVVXVlVVVVhYWVhYV1ZWV1ZUV1VT
-UFVVVFFWVVNSVFVVVlFSUVVUWFVaWldUVlhYUlJPTlBTVVdTU1RUVFRSVFZWWFhX
-V1dUVVdXV1pbWFhUU1VWVVZYWVZXVFJWVVdZWFdaV1ZYV1lbWlhZWllXWFZVYmdc
-WVpXW1lYW1pXW11cXFtcWVpZWVhWV1tbW1laWVdaWFlWWVhZW1lbXWBeWlVZWlta
-WVlbXFlbW1lbXF9aXFtZW1xYWlhWWFVYWVdYWltaWF1aWldWWltdXlxbXFxbXVxc
-WlteXF5hYGBcXFxfX2FfXltcYF9gXV1cXVxaWVlXWFlZWVxbWVp0r8bS297j5Ofo
-6OrqfHt6fXl9fnx7enx7enl5enx8eXt6fHx7enl4eXp6fH5/fX1/fn17e359fHp9
-fXx9fYB9d3x5dnh6eHh4eHl7e3l7fH2Afnx7fHx+fH18fX19fXh5fnx7en18f4aB
-fnt6e3t8f4F/fn18fH6AgoJ+e3p7fX5/f396e3p6eHt+enh6eXl6eHp1dHRwbnN2
-dHJydHR0dHVzcnZ0dXN2eHh5eXt3en17fX1+fnp8fYCAgXx9e3t8fXt6e3p6eHd7
-fnt5eXt9fHt5eXZ0c3JydnN1cnBycXBvcnJvcG9wcG92c3Fsa25vcG5raWxucnRy
-b3Byc3NydHRwcnR2dHFzcnN0dHV3dHR1c3ZzcW9wb29vb29ta2lucGtqaGxsa2hj
-Z2RkZWZlZmdmZGNmZWZkYGJjYmRmYmJjaGlmZ2dnZ2ZoY2RmZGRnY2NmZmVfXVZR
-UE1MTVFQT1FNSktLTEtNTE1RUVJQT1NSUVFTVVZVVlRVV1lXVFlaXVtZW1paWFdW
-VlZXVVZYWFlXVlVXXFZXVVdYVldXWllXWF1bW1teX2BbW1tdXmJhXlxfX2FkZ2lm
-Zmdra2VkZmhsa2lpb2pmZmxvbW5vc3Z2eXl5fH18fHt/f36Ag4N+fn5+fHp7e36F
-goWMiISFgoKFg319f3x5fXx4dXd3eHJ1dnd9d3V6eHl6f3t7gIJ/gH59gomHh4eJ
-io6Pj5ebmp6dmpualZSQjo6MjIqKjY+MiouLioyLh4eGgXpsYV1eX15eYF5dW1pZ
-WVxaWlZVWVtbXVxcX1xYVllbW11eWVpcWl1aXFxdW1tcXFlYVlhaXVteXV9dXF9f
-XF5bW11aW1xdXl5eW1tZV1laW1hcXl9iZWBhZmNjYF9hZGVlZmRmZ2lrbGxtbGtw
-cXJ0eXl7goKGgoGCfXp3c2tnZWRlaGlsb3R6gIWMkpyhnqKjnZmal5WXmZmampiU
-lZiZmJmZlpmemJeZnqCjoqOkpKanpqalpKOhn5+hn5mWlJCUkY2KhoaAgYF+fn2A
-fXx6fH18fHp5d3Z2dHZ1d3+Jio+Qk5SWmJubnp6joaWoq6usrqyqqq6tr7K2srrA
-v7++vLu2tLO1trOvq6uvr6urrKuoqKastLa5wLy7urq6tLGysK+wra+xsKeinZmY
-kYqDf3ZuZmVjXllYW1xdXltYWlRVVVVUVllTVVVYWFZUVVhYW1dTUlJTVFdWVlFR
-U1JUVVRWVlVTVFdYV1dVUlNRVFhVU1NTU1VUVldUUVVVVVZVVFJUVlZVVVJSVFVV
-VVhVUlRXUlJSVFVWVFZWVVRXVVVVWFlZWlhVWFZWVFdWVVVWWFpZVFNTVlZVVlhX
-VVZZXFpbWFdWWFldXVtaVlZWWVhbVlxaWVtZV1lZWVhZW1paWVxcXFtXWFZWVFlX
-WFlYWVpZV1dYWFhaWVlWW1xcW1daW1lXWFpdXFtaWF1bXVpXVlhbW1pZW1lWWFhY
-WFhbXFtaWFtaXF1cW11eXVxbWlpcXlxdXFtcXl9fXmFfYF5hYF5hX19fXV5fX11g
-XltfWltcWVhWVVdbX3Wwx9Ha3+Pl5+np6ut6eXp6fXd5eHd4eXl8eXl5ent3eXx7
-eXd7dXx8fXp6fH58f39/enl7fX1/f318fXx6ent5eHd3eHt7d3t6enx6eHl8f359
-fnx6ent6fHl7e3p8fXx/gH18foGBgX9+fX19f4B/gH19foB9fXx/fnt/f4B9fH9/
-fXyAgH5/e3V6eHd7e3p3d3Z1c29ydHBxdG1vcXNxcHFxdHFxdHZ4d3l6enp5eXp/
-fn9/e3x8fX9+fX17e3t6fnt8e3h4dnh5enp7fn18fHh2c3J2eXZ1c3NwcHBwcHJx
-cm1wcW5sbHFwb25tcXBvcG5tbXBxcXBwcG9tb21vbm9xb3FwcnJ0c3BzdnRydHd3
-dXJybW1wcXBua21sa21ra2psamlpaGlkY2ZlY2FiYmFgYmZmZGVgYmhmZGVjY2Jl
-ZWRlYmNpZGVjZmhmZmdnZmRlY11bWFNRU01MUVNTUk9OSUlISUpNT1RTT1FPUlFS
-UlNTUlNTVVRUWFVWV1hZWVlYWlxYWVhbWFdWVlNSU1NTVlpcXFlWV1hYXFhYVlha
-XFpbXV5bW1pbW11iZGRgYmNjYmZmZGdlY2ZnZ2dobG1namxsa2lsbndxcHJwdXR2
-fX16eXp6eXt7gICCfXt9fX5+gICAfn+DgYaHh4eIh4aBeHl/fHd1enl1cHF4dXJx
-dHR5e4B9e3p7d3x/foCDhYOChoiIhIOHjY6SmJ2ZnaCYl5aXlpKPi4iJi42OjY6N
-iouNjIWCgoN/c2JcWl1dYWBeXV5dW15dXVlYVlhbWVxdWllZWFhbWVlfXFhaXV1b
-WVpbW1pbWFdZW1xcWVlaWlxdXF9fW1xfW1pbXFpaXFxcXF1dWVlbYF9cXV5gXl5e
-Xl9kYWRlY2FjZGZpaW1pZ2dpaWlqbXR9fn+BgYB/gYaFf3pyc3BqamZjYmdqbnF2
-eYCBhouTmp6gnp6cnZubl5yenJiZk5OSkpCUl5eWmpudnJ2dn6Oho6Gio6OkpaSj
-oKCenqCcmJSSjo6Nh4aHh4J/gH9/fn59fnx8f35+fHx8end5d3Z4fYKJjZKTlpSV
-mp6eqKWrqamqq6ypqKarrq2wtLWyuL/Awb68urq2traztbSwrK+tra2rq6usrq21
-t76+vL26trS0tLSzsa6vrqympKKfmpmTjYqCeW5lYWFlYV1dWldbXlxZVlRWVllb
-WVVUVlZVVlVVVFJSVFRSUVJVVlVUVlJWV1RWVFRUVFJTU1RTU1JTVFZUUlJTVVdV
-UVFSVFZWUlNUVFdWVVRUVVNUVFVWUlBSU1FQUVNUUlBSW1VWVVVVVFNSVVlbWFdY
-V1hVVlVYV1ZUVFRWV1hXVFdWVlhXVlZXVlhYWFhXWlhYWFhYW1hYVVdZVmhiY2xb
-WVVVV1dZWFZVV1lYWVtcXVtZV1VXWldZW1paWFhXV1dWVlhVVlhZW1xbWVpbXF1e
-WltaW1pZWFpaW1hZVlZXV1ZXWFhZWFhaW1taW1lYWlpbXF1gXVxfXVtbW19fX15d
-X11eXl5eYmFjZmNhYWFgYF5gYV5hYGBeXlpZWWFcWlteWlpfd7PH0trf4+Xn6ens
-631+fn99e3l8eXl4enp9enp5e3t8enp4eHt5eHh5eXp8e3x4enp5enp7eHl6enl6
-e358enh5eXd5enx7e3t9fXt9eXh6gX17e3t9eXx7e319fn57e3t9gX58foF/fYB/
-gX9+fHx+gX5/gIF/fH19fH17enx8e3t8f4F+e3l1dnyBfHx+enh4eHRzc3FycHFz
-cG9tbmptdHJxcHFzdHR2d3V5d3t8en17e3x8fXx5fH5/fHt5eXh4enx6dnd8fHh9
-enl5eXx4c3Vyc3N0d3FwcXFycGxucG9ucHBwcXVzcGxtbHB0cG1ucXNzdXNwcHBu
-bW1tbm1vbm90cW9wcHNxcXRzc3JydXNycHJwb3F0cXBwcG9xcW5ua2pramlsZ2hl
-Y2JkYmBfYmZkY2RkZGJlaGhmZmhnaWhpZ2RpZWhoZWNmZWRkZWZpa2VjYl1STkpJ
-TVBTUlNRTktJR0lITFFKS1BSTVBPT1VWUFFSVFNUVFVTU1JXV1ZZWFlXVFdWWVZY
-WFZZVlJUVFRYW11cW1tWVldYWFdbW11aXV1fXldYXF1fY2RkY2RkYWFfYWJnZmVi
-ZWZpaGlpbGtnamtsaGpxdXZ3dnZ3dHV4fHt8f3l6dn18gIB8fH9/f39/gIF/gYKC
-g4WKi4eFh4F7fH15c3Fyc25sbHd6dHZ3dnp8e3x+eoB9fYGAfX+ChIOFh4eFhoeF
-i42VmZmcn6GbmJiVkI+QjIqPkY6NkJKRiYmJhoOChH5wYV1ZW1xdYV1cXl5eX1tZ
-XlxfWlhYW11ZWVlYWWBcW1xeWllZWVpcXFpaV1pZV1haXVxaXFtZXF1cXFxZWlpe
-YF1bWVhaWl1cXF1eXWBfXl9fYWFgX15eYmhnZ2RjYmBiZWtra2lra2tsaXF3f4aK
-ioaGiYiMgoJ8eHVuaWVkYWVmZ2xzdXt9goOGjpebnJyenqCgm5iWl5eWko6QlpeU
-kI+RlZaYm5+dnqGhoaOjo6Wko6Olo6OhoKKeoJ2XlpWTj4qKiYaFh4aEgXyBgoB+
-fIB+f356fn17fn98fHx/goaHkJSWl5yen6GipKipqaytraytrq6vsrGysbS5u7q7
-uLu8uLa1uLSwtbOyrq6uq6qsqamtsa2utrq6vLi5tba0tbSysK6xraakpqadlpSW
-kop+c2pmZWdoZF5bWVZaW1xdWVZVVVVYU1RUVFhXVlRWU1dVWlZXV1ZUVVZXWFZX
-V1ZUVFVVUlVZVlJQT1NTV1NRU1RVVVNSTlFQU1ZVU1NRVlVUVVVUVlVUVFNTVFVV
-VFRVVFRUVVNSUlVWVlVTU1NTU1lWVVVTVVVUVllYVllXUlVWWVtWVFVWWFZWV1dX
-V1hXV1dXWlhZWFZbW1xbVlhVVWRgVlJUVVRbW1pYWVpXV1pXVllbWVlaW1pVWVhY
-WVdZXFxbV1pbWFlaWllcW1xbWlxbWVtZV1dZW1pbWlpZWlpaXFlWV1hZWFpbXFxZ
-V1dbW1laXFpaXFteXVxdW1xdXmBhYWBeXl1fYl9fYWNiYmFhX19hXmJhYmBfXV9g
-Y2BeXV5dXlpcXl97tMjR2t7i5efo6evrfH58f319e3t6e3t7e3t7eHl7e3d6eHh9
-eoB7fHh6end5enl5eXl7fHp8e31+ent7e3l8e3t6e3l3eXx+f319fXx6fHl7enl6
-e3l7enx/gH5+fH1+fX58fX9+fn59f3+Af3t8fH5/fH9+fn9+e357fHp7fXx5fIB+
-fH57fH9+f3x7e3l6d3d3dXR0cXNzcXBxcXFybXNzdHNxcnF0dHd4eXp4eXx7e3x6
-e39+e317ent7e358fnx9fX58e3x6eXh9f3t4dnh3dXp2dnRycnBtb3JubW1tbG5w
-cHBxbG1vbm1qb3FxcG9xcm5vcG9tb3FtaWlmbGpqbXJwb3Nwb3Bxc3Nxb29zcG5y
-dXRycnFycHR1dHJub2xtbm1ra21paGplZ2RlY2VpaWhlZ2VmZmZhYWdmZmhqa2pq
-aWVlY2ViYmJiZGhqamdlZWNgXFVOSEVJTFFRUlBMS0xITElNUU5LTlJPT05UVFFP
-UVJWVlRUU1NZWlZXV1dYVlZTVlhXV1VVV1haW1dWV1VaXFdWWFVTVlZYW11dX11d
-W1tbW11fYGRjZWhqZmRjYWFlZ2ljZGJgYWRnZWNra21ucHNrbnJ0c3RzcnJ0dXd0
-d3l5enh1dXd4eX+BgYR/eHl8f4KDf36BhImKh4aFf4B+e3t2bm1ub3Bsc3d3fX10
-dH58c3p5e357gIOAf4WBhIKEh4mLjYuLj5KanJydoJ2dmJOUlJOTjo6OjI+RlJGL
-iYeJhYWAeWthXl5aXFlcXV1eX15cWlpcW1hXV1tbW1tcW1xWWFlaXWBgX1taWlla
-V1lYWltaWVpbW1taXFxbWVtaWVlaXFxfXV1dXFlbW1laW1xeXl9fXl5fX19dYWln
-Y2ZmYmFjYmNnZ2psa2tvdniAfoOHh4qNjYuHhIF9endybGVkZGNjZWdqbW10eXyE
-iY+WmJeYlpycm5malpeVlpSPhoqRkY+NjpCRlpiZmJ6dnKCko6Kio6Wkp6ijoqOk
-oZ+dm5qal5WSjo2Mi4iFhoaEg4WFg4F/fH6AfXt8en2Afn6Bf32BhIeHkJSVl5qa
-mZqfoKKmqautr6+xsrGwr6+ur7O5uLi5vLu8ub66tK+ysbGrrLCuq6qqqaysrLCx
-tLq4t7e4uLW2t7W1rq6vrquqqaKdmpSTjYR7cmppaGhlYVtWVFdYXV5cXFtXU1VW
-VlhXVlZXWFVWVlZYVVVWVFhZVlhaVVJWWldTU1NUU1NSUlVXVFVVU1VXVldVWFRU
-T09TU1JUU1VVVVhXVVRTVFNSU1VUVFJTVVVUV1VVUlRTVldYWVdXVlNVVVZWVVVV
-WFdVV1pYVVRWU1VXWFpZVlhXV1ZXWVhVVFVWW1dYV1ZUV1lYV1VVWVpYVFZXV1ZW
-WVtaXF5ZWlpeXVlWVldYWVhXVFVUVlhZWlxbWVlbXVtcW1paWlpbW1xeW1tcWllb
-XFpYWVxZV1dZXFxbW1lcXlteWltcXF1cWlxeX11bXlxaXFtYW1tdW1xcX2JiY2Fe
-XmBfXl5fYGBgYWNiXl5fXVxeXl1eYF5fXV5aXVtgX2BfYXi0ydLa3+Hl6Ojp6+p4
-e318fHx+f4J+fn5+e3h4eXt8e3h7fX17fXt5eXd6eHd4e3t7e3h4e3t9fn19fn18
-fH18eXp8fXx7eoCBfHl5e3l5eXl6fHp6ent8fHx+e396en5+gIF9fnx7fn98fHp8
-fX59fn6Afn1+fX55fH19foGBe3p+e317e3x6f3x+fn1+fHt5enh1dHR1dHJwcHF0
-dnV7dHN2enZ0c3N0eHd5f35+e3t6ent6enx3fXp7e3t8foB/enp6en+Bf3t6e3x8
-e3p2dXVycHR2c3FwcHBvb3BzcHJzbnFwcnBwb29ubWxub25ucW5tb3BubXBxb2xu
-bW5rbG9rbG91c3FycHFzcnNydHV0dHV2dXFsbXF0cnNxb3BycW9ramptbGtsa2lq
-ZWZlZWVoZmdkZ2dmYmJkY2ZmZWVlZmRjYmJjZWBlZWViY2NlY2RmZF9aV09KSEpO
-UFBQTk9OSkhJTlFUUE9PUlFOTlFRUFJRU1ZXVFFSUlNXV1VXWFlYV1hZWlZYWFZY
-WVxgXltZWVtZVFRVVVVZWlpdXl5aXV9fXlpVXWJkYmRiZWNdX2VoZWZkYmRiYmJj
-ZGRlZ2tqamtwcW9ubm51c3FxcnJ1eHd4enl6d3dyeHd9fYJ/gX55fXl8g4R/gYKE
-g4ODgnx5d3V1dXNycnFtcnFwdnV7fnZ0d3h0dnl6fX2AgX5+hYR+gYaIh4yQkJOR
-mZ2anKCfnZ2alpSUk5GQj4yMj5KRj4mFhYWEhIJ3amBeXVtdW1lZXVxdXlpaW15d
-WVpaWVpYXFpYWlpaXFxcXF5ZWVdWV1ldX11WV1haW1pdXVhZWlxZWVlZXFxaVllb
-WF1cXFxaWlpcXV5dXVpcXV1gYl1eYV9eYWBfYGNlZmlraWtzdHR5g4iOjouKjY6P
-iYeBe3x0cW1oY19cXGJmaWtwdnd6g4aMk5eblZKVmZuZmJSTlJOPj4+JioyLi5CQ
-kpKWlJaboqGjnp2io6GfpKqppKOkoaKem5ybl5WUlZGVlJOPjImJh4WDhIWGhXp9
-f4B/fnx8fXx/gYKDhIKChIOHj5SZl5mbnZ2goqKorq2vsbKytLCwsa+us7a1uLu/
-wb25u7i3trW2sq6rpaSkp6anqqqvrq2xsLS1t7e6ube1srOwq6muqKShoqGempeP
-i4R7c2xpZ2drZltaV1ZgWFlbW1lXVlhXWlhVVlVYW1hUU1daVlVUVFdWVlRUVlRT
-VFdWUlVST09RU1ZWVVNTVVRVUFJUVVVTUVNTUlBST09SUlNUVldWVlJUVVRVVVRU
-VVNQUVZXWFZWVVdZV1ZTVVVXVlZXWFdZWlpZVVZUVFVXVFRZWVdXWVlXVFRXVlVW
-V1lXV1ZYWVdYV1hXVFVXV1paWVlaVldVW1taWlhXWWBbV1ZVVVRYWVpYV1hYXF5b
-WVxZWltaXFxcXFxbWVlaV1lbWFdYWVxbWlhYW1tZWFhdW1xZWFpcXmBfX11cXV5h
-XVxaW1pcW2BgXF1ZW11eWlpcXV1gYF1gX2BgXlxeYV9dZGBdX2FdXl5eXV5fXFxb
-V1VXWV5hXmBieLHK09rf4+Xn6Orr6nt5enuBgn9/fnx6fX9+fnp3eH5+fnx7fXp5
-fHp6eHh4e39+enp4eHd7eX17fn56eHt9enp3d3h6eXp7eXd1dnZ5fHp5ent6dnB7
-e3p9fn1/gHx7foJ/gX+DhXx9fXp6e3x9fXx7fX59f39+e318f35+fXx5eHl7ent8
-fHl7enyBf31+ent3dHFycXJwdHBzdHV2cm9zdHl4dnZ3d3d1dnp6e398eXx9fXx7
-enx9gH99ent8enx7e3t6e39/fX99fHx6eHZ3dXRybnBzcW9wcG9xcnFtbXBsbW1t
-cHFwbW1tbm5uc3FubW1ub21sbnFwdHRxcG9xcXFwbXNzcnFxcXFwdXh0cnBwcnJ0
-cHFwdnNxbG1wdHR0cG5sbWtpaGxsa2traGdmZmdoZ2ZoaGdkY2BiZ2VlYmJiY2dm
-ZWVmZ2djY2FeYmBhYWRhXlxWT0tKS05OU09OTU5OR0dNT1BOT05SUlJNS09SU1JR
-UVJRUFBRUVBUVlhYV1dZWFlbV1dXWVlaXl1eXVhZWVhWU1dYWFZYWl1cXlpYWV5c
-XWBgX15iYWFfXmRlamZiZGRfYWNgYmdpbGlnaWdoZmlubGpub25wbG9ydnl3d3h6
-fX16d3Z2eXp9gYSBgYCCgn2Ag4GBhIOAf396dnV3cnNuamlqb3R8dW55e3p7e3p4
-e3x6gH16eoOGhYB/g4WDh4OJj4+Ul5aXm5qiop+coJ2ZlJKQjZCQjIyNj42OjouN
-ioiFfXJlX1xbX19eWVlcXV1bXllZXF5fXVxaWFpaWVlYWldVXF1bW1taW1laWVtb
-WFlXWllaW1laV1laW1tYWFlZWlleXltZV11cWltZV1tcX19gYmJeXltdYV9bWl9d
-X15hY2lvamlpbHJ5goOFjY+RkJGUkIqFhoN6cG1mY2FfYWBiYmZrbXN5doGHjJOX
-mZ2fmqGcmpeXlJOKj4+LiYiKjIqKjY+Qj5OVmZygoaGioKCenqCgo6GgoJ+fn6Ce
-npuYl5WXmZWWko2NjIuJhoWDhYSFgoSCgYCAenp9d3yChYmHh4yIhoaNkZSYmp2c
-nKKjoairsbCys7KzsrO0sbCytra5vLm7vr3Awb6+vLeysa6noqWio6OpqaiorKyt
-srK4v7y6urm0sKyvr66tqqmnpZ+dm5WRjoJ4cGtqaGprZl1bWFpRU1lZWlhYWFpX
-VldXVlZWVFdWVVlZWFhWVFVUVFNWVlFOUVVXVVZVU1FWV1NUVVZXVVVTUlJSVVZU
-UlNVVVNTUlNUVFRXV1RSVFJTVlZTVllWVFNSVFdVVFNTU1VWVVVVVFRWV1hVV1pZ
-VVVXWFZXVlZYWVlZWVhXVlZXVVNUU1RUVlhXWVlaWlpYVVZYV1dYWltYWFdaXVhX
-VVtcW1paWVdXWFpWWVtZWVtbXV9eXF1cWFlaW1taWl5dX15aW1lbWllYXFxcW1ta
-WlhZWlxbWlxcWllZW1taXV5eXl5cW15eX15bWFtZWllZWltdWlpcWllaW11hYGBf
-X15eXF5gYmNeXV5eYGNfXV1eXlpYW1lcW1tbWllbXF5zssjT2+Dj5efp6evqgoGA
-fHx8gX96enl3e3l5eXl6fYR+enx8e318eHt7e3p6e359f3x7eHh6fXx7e3h4d3l4
-eXp6e3x8e3h2d3t6eXp6fHx7fXt5dnt7fHt8dnx8f316eX59foGBf3x9fX16fH18
-fXp5fH6AgYB/gH59fXx8fnx7e3x7eX1/fHZ5fHx6eXl3dnZ1cXNzcnFycHFycGlu
-cXJ0dHR1dHh5eHh2dXZ3enp7eXt7e3t7fH99fn18e318e3p7eX1+f4B/fXx8e3Z3
-eXp1dnd2c3Nzc3RvbXBycG5qbmxsbG1tb25tbGpqbG5wcHFvbG1ta21xcHBzd3Jw
-cnJzcW91c3Bxc3d2c3B0dnN0d3VzcnN1dXRxcXJxb3Byc3Fxb21sbW9vamtua2lm
-ZWZmZGJgZGVmZmJiYWRlZmVjZGBhaGpqZ2tqaWdmZWRkY2JlYmNfXldRS0pKSU9P
-UlNOTk5NSUpNS01RUFBUUk9LT09PT1FSTU1QVFNUUFFVWFlYWVtaW11bWllaWFdZ
-XFtcWldYVlhVVlhXV1ZaWFlaWVpbYF1gYWJiYl1bYGJgZmdpbWpjZWRkZWRkaW9t
-aWllZGdnbnFxcHFram9na25xcnJ1d3p+fXp7end7fH57gIJ/goSDgIGEg4aCf4N8
-fHl1d3Zzb2xoam1vdX54cneAenx5fICEfoF+enx8f4eKhoaHjYmGg4uPkJOWk5mg
-nZucnZ2cmpOQjpCSkI+PjY6NjImLjY2LjIiAbmZhYF9eWlpfXV5dXFtZWFtYWFlb
-XV5aWVdYWVxdWllXWFtaWFZVWFxaWFZXW1taW1pbWlhYVVpaW1laWllYWlpdXFxb
-WltbW1hZXGBgYV9hYWBhYGFfYmJeX2FhYGVma2xranB2fIWKiYyRkJWUko+NhoR/
-eXJtZ2FdX19gYGRlZ2txcnl7gIaNl5WYnJ6dnJqYlpOVkpGOjI2HiImMjI6PkpSU
-lpmbnJydn6CgnZ+jo6KioqOinqGgoKGem5ycmJaamZiRjouJiYiIg4aIh4iGh4aC
-goKDgH9/eX2ChIWIjZGOjIuNkpeZm5udn6OkpaqvsrKytLa2uLa2trW1t7m5urq6
-urq6wMG8ubaxrqempKGio6WlqKqrrK6vrLG1uLi4t7S0sbOzrq+yq6qkoaGcnJSP
-hn95c2xnZGZmZGVgVldWVFdaXldXWFlZVlRWVVdYV1ZVVlZYWFlUUlFSVVVYV1JT
-V1lXWVhWU1NUV1dVVFVTUlJTVVJPUVBSVFdWUlRVWVRRVlNTUFFSV1dTVFVSU1NU
-VlZSVVVUVVVTV1pVV1RSUVRTVFhUUFRWU1VXWVpVVFVZV1VWVVVZWVdYVVVWVVhZ
-WltYV1pZWFpZV1JUVVVWWFdXWllZW1hbW15eXFhYWVVXVVVVWFhYV1haV1dVWFtZ
-WVpaWVZYWlxdWFlbV1pdXFtZXF5cXVtZWFVXWFlbXlxdYF1YW1tcXF1cXVxfXFtb
-XVxXWl5dXFpcXFtaWFtbXF1eYF9eWV1eYWNiX2FfYGFeYWBgYWFgXF5cXllZXVxd
-XlpaWFlfX3GxyNLb3+Pk5+np6et9fn16fX16fHx7fn16d3h5eXh6fH58fHt+f317
-enl4fHt7fn5+fXp7ent8fH19e3l5enp3eXt7eXt6eHl5e3l6ent6e318e3d8fX1+
-f3t5fnx7f3t7fH5/fn5/f4B9fYB8fnp9f3x6e3p9gIJ/fnt7fHx8fHx7e3x9eHt8
-f3t6eXh1dnh5d3Vza3V0dHFwcG9ubW5xcXJwdHd1d3d2dnZ2d3l7eXd3en9+foB7
-foCAfYB/f3+AfXp6e39/gX99fHt4eHZ3eXZ3d3l1c3Jxb3NwcHBwbnBtbGtra2xq
-bWxsbm9tbm5vcG1rbG1vb29vb3Byc3BwcnNxdnFvcHJzcnRzc3F0dXZ2d3ZzcXV2
-dHN0cnJxbW5wbm9tbmtramttamxpaWppZGVkY2dnZ2dpZmNiZGNjYmBhYWNmZWRk
-ZmVnZWNiZGVkZWJkX1xbUk1KSkhJTExTUlJSTk1KTU9NS0xRUlBST01PT1JSUlJT
-UVFUVFlYVFRVV1hZWFZZW1dZVldVVlZXWVpbXFlXVVNUVllYWl1bWldXWFxeXl5k
-ZmRhYV9iYWVmaWxqZ2FjZmRiYmZnamtoamhoa25ua2tqbG5ubWtoaW1ydXd3enh3
-eHl3eXx6d3d5fH2Agn9/fYGGgXx8fHx6dHN0cmtoZ2pub3R2eXt3eIF8eXd0dX+A
-fnt5enp/g4aEiIqNj4eNjoqOk5KTm6CeoKSemZqUjouLjZKTj42PkI+Ojo+MiYqI
-hoBtYl9gYV9ZWF1fXVtcXV1ZV1paW11cXF1bV1haWVhYWlpZV1dVVlpcWllXWVpa
-VlhZWlpaWlxdWVpaVlpaV1ZZXl5cW1xaXVxaXF5dYGBfW1tfYWFhX2BjYmJkZGFj
-ZWZtcHByeIKJjI6Qk5OUlJOQjoh/fHVvaWRdXF1cXFxeZGlpbXF1e32DjJCTkpWa
-np2fnJiXlpCQjY6NjImJioyPk5eWlJWXmJmam6GhoKGko6Shn5+foqCgnaCin6Ci
-np2cl5qblZKQj4uKiYyHiYqJhYWJi4iFgoSBfn5+d3d+f4GDi46Qj5GSkpaZmpyf
-qKaqqq6ytLK0tLa1tLa3tbS2uLq9vb25t7y+uri2t7Oxq6ikpKOkpaaop6mtsLGw
-sbK2t7S0tbe0tLOysrKwq6ano52Yl5CLg3x1c2lhYGJjZFtWVFZUVltdW1hVVVdS
-VVRWVldWVVhXXFlYV1VWVlVXVVNUWVhYVlpYV1dYVVRUV1NUU1RUVFJRUVJQT1BR
-U1VVVVNRTFFQU1RUVVZUUlJSVVZVVFZUU1JOVFRXVVZWU1JVVlVWW1NTV1dZWFdW
-WFRXWFdUU1VXVFFRVllaV1VWWlxZWlhZWVdXWFhYV1dXUlJTV1dXV1pcW1pcXV1b
-W1lZW1lYV1JWWVhcWFpaWFxbWlpXW1xaWlpZWVdXWltZWllXWFZWWVlcWlpZW1hb
-WlhZWllZW11fYVtbWlhZWVxeWllYWVpYWVxaWVtcWlpYWVxbWlpdXV5eYV5aXGBe
-X2JfYWJhYF9eX2BeX2BfXFpdXltdXV1dW1taXWBieLTJ0trf4+Xo6Orq7H5/f4CA
-fn16fn1+fHl3eXx5enp7fHt8fHl5ent6fXt7gHp7e31+fnt5eXl6e3x9f359enp7
-e3t6end7e3l7fXt8fXt5e31+fX9+gIF9fn18fXt6eXp7fICCfn5+fn18fn9+gH5/
-gXt7g397fX99gH16fXp/fXt6eXh7fX1+eHd4eHl2dXZ0cXJxb3Fzbm1vb3Byb3By
-b3Bwc3t1dXV1eXd4eHd5eHh7fn99gIGAe399f4KBgX58enl6fX5+fHp6eHd8e3p2
-dnV2dnVzdG9xb3Nubm1vbGxra2poa2pqa21wcWxvbGpoa2xpbW1vb3Btb3BwcG5u
-a3FwcW9tb3Bxc3V1dXd2dHd5eXh2dXZ1dHV0cXFwcW5ub2xrbm1ubGxsa2pnaGhp
-aGlqaWZlYmRlZmNkZGNiYmJkZWNiZWVkY2NjYGBhYmFkZWNkX1pbUFBLSEhMUFBS
-VVNQS0tPT01PUVBRUVVWUVBTUlFSVlZWV1FRUFVWWVhUVldZV1VTUlRSVFRVVlhb
-WlxcWlhYV1ZbXldYWlpZXFtbW1xiZGNmY2JlY2NkZWdnZ2dmZWVjYmBjaGllZGVp
-ZmpubWtnaGpsbG1ra2ZoanB0dHN2c3N0dXd7fXh9fH5+fH2Af317fX5+f316eHZ4
-c3dxbGdkZG5xdnB4e3x+g4B+f3p1eIB9fHt4d3h9goeIjI6QjpCSkZKYnJaboKGh
-o6GampSMiIeNkJKRkJCQjo2OjY+MioaEd2hfX2FbWltcXl5fXFxcW11bWFhWWV1b
-WVpWV1dZWFdbWFRWV1daWlpaWVVZXltaWFpZWFhcWllWVVdZV1VZXVtZV1RVWlha
-W1tdX15eXVxYWl1gXl5hYWNiYmNjZGVsa29zen6FipGTkZGUlJiTjomDgHtybGZk
-YmFcWFlaXGBjZW5wdHl+hIqPkIuSlpmdnZmUk5OSkI+NiYuIhomNjo+SlZaZmJeZ
-npydnJybnaChoJ6cnp+en56dnp6doZ+enZ2cmpWVko6PjIuMh4mLjYqIiIeFgYWF
-g35+fXx7e3t6fYKGh5CXl5iUlJeboaKipKisra6wr7G1uLa4s7G0trO3ur6+vru5
-u7q7ubaxrq+uq6SinqCho6enqaqtr7Kxs7SysLK3tbS5tbOyr6utq6ahnpuUkI2J
-gHVvamRfXFxeYV9YV1dTVldaW1hTVFdXWFVWVlVUVldUVFZYVFdaVVJUVlZWWVpa
-VVdUVlJTUlNTUlRTVlRSVVRTVVNRUU9QUVRSVFNeWVJVVVRUVVVSUVNVVFJTU1ZT
-UlFRT1FWV1VVVVNUVVVVWVdYV1paWVhXVFRVVlhTU1JUUlRTVVVXVVRUVlhWVVdb
-WVZWVldYV1hZWldWV1lZWVtcWVlaXFpdWldaWldYWFhaWlhaWlxbWVlbWllaWVpb
-WlhZWFlYWltYWFpZWVhYV1dZWVlcXV9eXFhUV1haW1pZWllcWlZXWltbXFlWV1lc
-XVtaXFteWl1ZW15dXVxdXFxcYWNeXVpbXl1cYF9gYV9cXlxfYWBfXV1eW1lbWl9g
-XV1gX2J6s8jT2uDk5ujp6evrfH5/fHl6fX19fn1/e3l4fHt6ent7fn19eHZ4enl8
-enl4enp5fHt4e3x+ent4eHt5e3x7fXx4ent6enx6eHp8e3p5en17fHp7fn19e3x7
-ent+fYCAfn1+fIl9f4F+e3p+gICAgX18gISCgn9/fX5/gH1/f35/fnt5fX17e3t6
-d3Z2d3Z0c3JxcHN0cm9ucHFxdHJxb3FvcHJyc3d6eXZ3d3h3dnh7fXt+gIB/fXx/
-f4ODgoKAf3t7fH19f35/e3x2eX15eXd0dnh3c3JycG5uc3Fub21vbm1wbGxpbGtu
-bW9xb29vbGpsa2ptbm1tbnBxbXBwb29vbnBwb29wc3FtdHd2d3d5eHh2eHh6dnh1
-c3V3dHJzb2pub3Jxb29tamlpaGlmaGhqamtnZWRmZmNlaGRjYmVlYmRmY2JhYmFg
-YWJhWl1fYmRjYmNjXlVQS0hJSkxMT09UU01OS09RUlFSVFVSUlNRT1FRUVFTVFJT
-UlFUU1VWVlJQU1ZZWFVSU1ZVWFpZXFpZXFtbVVhaWVlaVlZbWlhYW1tcX2FjZWVh
-YmFjZWdkX2JjZWVmZWhgXWBiZGJgZGdpbW5saWhrbm5ubGxra21ucHFwcHV1dHN3
-fXp8fICDgX98fnx+f4F7e3x6eHVvc3VycW9raGdvdXR2cXZ8fH+Fg4SEfHl3d3p3
-end2eX6EhoWJi4yPjZOSlJubl5mgoaChn5yfmIeEiIyNj5KPkJCQkY6KiIaFgnxy
-ZF1bXFleXlxeX15dXFpZXFtcWVlZWlpXV1VVWlpbWVlWVVhbWlhXVFZYV1tZWlhW
-V1lZW1pbXFxaXFtXVVVVVllcV1taXFtdXltdXl9dXV9eX2BfXWFkZWVgXl5laW50
-dn6DjJCSlZaXkpOXlZOPh313c25oYl9gWFVWW1pdYmVpbnV6foWKjo+PkZaXlJid
-mZaVk4+RjouGhomGiIqOj5CSlZeXl5edoZ6bnJ6doJ6goqOioKCfnZ+fnZubnJyc
-mp6clpWUlJSSkJCNjpCPjImIhomDg4WFiYN/gYJ/f318f4SGjJaYm56bmZ6ho5+c
-pauura2xsrK4uLe4tre5trm7u7u8vLe4vLy6tq+sr66qo6Win5+hpampqayvsrSy
-sLKysbO0tba2s7KwraurqKOempSPj46Ee3JtZ2BbWVxeX1pWV1lVVldWWVlUVFRV
-V1ZWVVVUVFZXVllUU1RWUlNXV1ZWWFZYVVVRU1JTUlJQUVJSU1JUVFhSVVVUUU5R
-UVJTVVVSVVNVV1ZWV1hWU1FSVFNTVVdUVFJUUVJUWVVTV1VXV1pXWVhXWFdUVVVV
-WFlWWFhbW1dYVVlVVFZYVlJVVVRWV1daWFdVVVdZV1ldW1hWWVhbW1tbWFdZW1tb
-W1laV1ldWV1dWVhbWlpbWllaWltbWVpZWlpZV1lZW15eXFhXW1lZWlpXWFlaWFpc
-W19ZW1pZV1taXl1cXVtYWl5gW1VaXVxaXFpaYGJcW1xeXl1gX11ZXFtdYF5dXl1c
-Xl1eXF5gX2FfYl1fYGBcXVxaWVdbXGBgXWBhYnmzyNPa3+Pl6Onp6+t9fXt8enp8
-fn58e3l3e3l4enl5en18eHd6e3p6fXp8enl7eXl6eHx9f39+fX98fn57en5+ent8
-fHp6fH17fHt7fH6Af318eHl7fH15fX17e32CfX5/fnp7fn5+fn19f4CBgH6BgH5/
-gYGAgoGAgH9+fHx+f3t8e317fH1/f357e3l4dnRxcnJ0cnN1dHRzc3R0c3Nvcm5w
-c3Bxc3V8enZ0d3h4dn17fn57fn98enyEg318fX+AgH5+fnt8gH59fHp5eHV1dXZ3
-eHV1dXFycW9ua21ta2xtbGxraGxrbG5wb3Ftbm5wbW1wcXBvb29xb29vcXFyc3Zw
-bXFwcHFzc3JvcXV2eHp3dXd3dnh2dXR0dXZycnJvbW1vb21vcG1qbWtta2lpa2lp
-Z2VkZ2loa2traGdjYmFiYl9iYmNlZmZhZmNnY2NmZWVlZWFcVlNMSUpKS05RVFBQ
-Tk5RUlRRVFVTU1RRUVFSVFdZVVJVVFRSWFdXV1pZVVNUV1ZYWVxbV1xYWllZWVla
-WVhaWVpbW1hXXFhYWlhbXF9jYGJhYWJfX2NlZ2JfYWJmZ2ViYmJjY2FhY2FiZ2tn
-aWdoaWhrb21pa2ttbm9zcHBvb3JydHl4dXd5e35/goKDgH+Be3d5e3VxcHJ0cG1q
-Z2FkaHN0cXFrc3p8fn5/gX6Bhnt6eXt9eX2ChYaGhIOGi4uMlJSTmpiUlpyhn56c
-nZ+ZiYKLjYuIiI6UkZCRj4uIhYGBenRmXlxbXF5dW11cXF5aWllXWltbW1taWVlW
-V1hYWVxbV1hYWFhYVldYWFtZUlJUV1pYWVpeXF9dWVdXWFpdW1dXW11cXFxdXVtd
-YF5fXmBgXltbW11dX15iZWVrZGpxc3d+hY2QlZWYmZuYmZeUj4uDe3NsaWVfWlZX
-V1RYXGNnbm5zdX2BhoqPj5KWlpeZm52bm5KOjYuMjIuJiYmIiomKi5OUl5aZnZub
-m5qamZqcnJucn52hnp+ho6KbnZycnJydm5mbmpiXl5eUkY+SkIyMjYuIiISIhIeI
-h4WFhYR/g4F/f4SKkZaYnJ6enZ+joaKkpqmqrbKzsrS2uLi5ubq4urq4ub68vL25
-uLm3sK2wrqioqaWjnp+jqaurrrCzt7aztLGurLGytLW2s7Gwr62pqaObmpaTkYp9
-cmxlYlxYXWFhX1lXVlVUVlldXVdUVFRUVVhYWFNVVVRWVlhWVFRUWFVSU1daVldW
-VVRSVVRRUVFQTlFQUVJUUFBQUVNSU1VSVVZXUlFQU1dVVVlVVFZST1FSU1NTUVRV
-U1JTVlVZVVVWWFdWWFhXVlRVU1VWV1VSVFVWV1hZW1hZVldXVVhVV1ZSVFVYV1dY
-V1hcW1paW1peWVlWVVlXWFpXVldZWFpcV1ZXWV5bWlpYW1lcW1hWWVxaWltcWF5d
-WllZW1xaWl1eW1ZYWVhYWFhYWFVYW1lbYVtZWlhaWltcXFxbW1paWltfXlxcW11Y
-WF1eX19fXl9gXlxfXlxcXlxfYGVkXlxeXmBcXFxZW2BeXl1fX2FgXFdaXlxYXVpc
-Xl5ke7PJ09vg4+bo6err7H19fnt9fnt7fXt4eHd5eXh6eXZ1ent3eHl5eHh6fXl5
-end6end3e3t/fn+Af358fHd5eHd7fnt7eXl3e3qAgH19fn5+fHp6e3d8enl7enx6
-f3x7ent6eHl5foB/foB+gICAf4F+f4CBf4B7fYB7eXx6fHx9enx8end7fH57f3+A
-fnx3dnR0dHR0dHF1d3R0dHRzb3BzdHBvcHNzdXd3dnl6eHp6fX17eXt9enl5en9/
-enh5enx8fH56enp7fXx5fHp2enZ2d3d2dHV2dHJzb25sb21qbG1ubWtqbGtqampr
-a21rbG5tbnBxcXNyc3JwbHFzdXJzdnJycnJwcXNydHBwcnN1dXR2dHR0dXhzdnVy
-cnNxcXFycG9tbnFub3FucHBra2ppa2ZkZWZoaWloZmhnZGJmZGFeX2BgYWNnZmNj
-Z2dkZ2ViZWZkYVtWUUlJSUpMTk9PTk1LTE5NUVFQUlRRTk9SVE5RVVNSV1NTU1NU
-VVRTVlpXVVZZW1pcWlpaW1xZVldaW1xaVlhcXF1cW1taWVtaWFxdYWFeX2NgYWFg
-YGNjYmBeYmBkYl9dX2dqY2BfXl9iaWpub25ubWptcnNtbG5wcG5sbm9yb3BxdHZ4
-e3p6gISDhIF+fX58fXV0dnJxcG9uaWdkYV5pcHRwc3J1d3l5enl7hIaDd3l3goF+
-gIWDg4eFhIeLjo2MkJafmpWXnJ6go6GeoJ+ajYuNioSFipCPkZSRjIiEg4B8c2pf
-XFpcWVlaW1taWllaWVpaWltZWVlZWFlXV1hdW11dXVxcW1pYV1lYW1xaWVVTWVpa
-X11bWVpaW1tZXF5gXVpaXVtZW1tcW15eXVteXl9gZmNhY2FiaGhna2xvcXB4g4eL
-jY+Sl5udnJuYlZCKhHhxaWNgXVdVVlhYWF9eZWpvcnZ8gYaKjpSTlZSUlJOUlpST
-j4yKh4iIioqIh4eIjYqPkZWWmJaVk5eampqcnJudnJ2emZian5+goJ6bnZ2bm5qZ
-lpqcmJmZl5iUk5CNjpCNjI+OjIuKh4mHh4WEhIOAfHp9gYeMkZiZmZ+jpKamo6Wn
-q66trbC0s7a7vLu8vb+7uLm4t7i5ubm2uLm5trGvqqmkoZ+en6Okqa60ubq2ubi3
-t7aysKyzsrS2sq+srK+tqKCcm5mVi4F2cWtoY15cYGFgWVdVUlJVV1teWVJSVlZX
-VVhZVlVVV1ZYWFVUUVVVV1dYV1ZUVVdUVlRUVVlUU1NTVVNUUVFSVVJSUVJTVlRX
-VFNVVFRRVlRWVlVUVVZSUVRSUVZYVVVYVVVXU1NXWFNWVlpWVlRVWFZYWVdWVVRR
-VVtZVlVYWlpaWVhWV1dWVlZVVFVVVVhXWFdWV1hYVVpbW1xZVldcW1dXWFVWV1pa
-WltZWllYV1ZaWlhYV1ZWWFdaWl1aXV5bWldaW1pYWFdaXF5eXVtaWVpYXFpdXVtY
-WFhaWlxcXFxcV1dcWFdbWVtcW1lbW11bWVdcXl5bXFtbW1tdY15cXWJgXl9gX11g
-YmBgXF1dXFxdY15eXFteX19dW1xZXGBgXmF/tcnT3eHk5ujp6errfHl4fH1/fHp5
-eHp6enh4d3V1dHR3eHp3e3l7eX18fX99eHp9eXx7f36Bfn18gH58fnx7eXp5fHh6
-eXl6f3x8e319fH59en18fH19fnl7fIB+f4J9eHh6enl7f4GDgIGAhIKAfn59fX19
-gH1/f4J/fn97eXx9ent7eHt8eXd6fn97e318eXV1dHRxc3NycXJxcXV1c3JzcXBx
-cXZ2d3ZzeXt5eXt8fnt9fXl6eXd8enl5fXx+foB/foF+fHx8fHl4fHl6e3Z1dXR0
-dHNycm5wb21tbnBta21vb29wcG5tbW1ub3FubW9wcXFxcXBvb25sbW1vcHFycXFy
-cnJxdHZycnJyc3RxdHN0c3h1dXR1dHV1c3Z1dHFvbW5zcW9vbmxsa29vbWhoZ2tm
-ZmZnZ2tqaGdnZ2VkYmBeYWJfYWFiZWJlZ2lmY2RhYWJiZlRMSkdJTE1PT05QSk1J
-SkpNUVBQUFJTTlBRT09QT1BSUVFRU1ZXWldSVFZXVlRWWVpYV1pcWldYWlxbV1lZ
-W1xcWVpcWlpXV1lYWllaXl5hYGJnZ2ZhX15gYWNeXmFeXl9mZmVhXmBgYWRkaWlv
-bW1ubnBucG9wbGppaGlqbmxvb3FxdHp6eHd6fn1+gH99e3l3dXNubXByc2tkYmFg
-XWZraGh2eXJydXp8gH96eoF5c3V6f3+Bg359gYOFg4uNjpCRlZmbm5acn5+foJ6g
-nZuYk42Nh4WGi4yQkZKMiIaAgYF4ZmBiXlxbWlZZWVlYV1hbXl1bWVpcWlhbWVlb
-WFxZW1tbWVtZWVlYWVlWWVpZWllXWlxaWlhbXGFcW15fXVxcXFtcXVxZWFtdW19h
-Xl1cXGBjZGRjZGVnam5ucnN5hIaMkpKTlpienZqfnZmRi4V9cmhhX1pXVVJOUFZb
-YmVpb3N0eH2BiY6Qjo+RkZGQkZKRko2JiIiKiIiHhYSFiYuMjY+SlZWUlZSSlZmW
-mZqZmZubnJuYmZybnaGgn6GdnJydmZubmZqempmWlpGQjYmPjpGQkZCQkZCOi4iG
-hoiEg4OGfXx/goqNlpSUnJ+gpamop6qurq+0srK0tre4ubm7vLy4uLizsrG2t7e2
-t7e0srCrpqShoqGjoqSmrK+ytri3urq4urazsLCura6ura6rrq2spaKem5WRjHx0
-bGlnYmNgYmJhW1dVV1dWV1laWVdYVldZW1laV1RTVlZWVVVUVVhWWVpXVldWVlZU
-V1dWWFZVU1RWV1VVVFJSVFRSVFdYVFRTU1VVUlRTUlVUVVJPVVJVVFVXWFdWV1NV
-VFVVVVZVWFVUVlRYVFVZVlVXVVdXWFtYW1lcWlhVVFhXV1ZWWVhUVVZWVlRWVVdX
-WFlbXFdXVldXXVpYVVZWWVpZWFhcWVlaW1pYX1taWVhWU1dWVFZYVltaWllbXFpY
-WFhYW1lYWl5fXlxbXVtWV1pZVlVZXF1bWVhaWllaWlpYWllbXFtcWltaWVlZW1hU
-VFVZW1xbW11cW1pbWVtaW1lgYl5gX15fYVxcW11dXVxfXV5bWVxeXlxZV1laWl1f
-YH23ytTc4OPl6Onp6+p3eHh3enl4enh3e358eXp6enl6d3h5fX1/eXZ6fHp9e3t5
-ent8eXx8fXx8fH2Bf3x7fX98fHt4eXx7fnp3eX13d3t9f317en19fX57foB9f3t+
-f35/f4B/gX6DgX9+gYKCf4CCgX5+fX17foKBfn1+fX1+e3p7eXt9fX19f3x9fHd5
-f3p3d3R0dnZ2c3Fub29yc3JzdHNycnV0dHV0dXp4enh9fn17foCAfH5/fX19fX2A
-fn2Afn57fX9/e3l6e3p7e3l4dnNzc3RycXNzcXBsbW5vb3Bra2xucG9wb21wcHFu
-bm9vb29sbG5ta2tsbnBwb2tsbW1wcnBucXNxc3V2dHR0d3d0c3N0d3l1cnh4e3d1
-dHJxcXJxcXByc3BubmptcXBtbWxraGppZmdnZ2dkZGVmamVjYmJiZWNhZWNjY2Nj
-ZWNiX2BiYFtXUk9KSEdHSk1MT05PTE1PSkxOTk5RUVVVUFFPT1JRUVFSV1RWXFxZ
-VVVTVFlZVlVWWVVWWFlYWFlcXFtaW19cXVdVWFpaWVdZXFlYWFpcWl9jYV5fX19d
-YF5eX1lcW1thYF9kYmFeYGBjZ2hpaWlrbGtraGpoaWxrZ2NjZ2hta25vcHFycXV2
-eXl8fH16e3d6d3Z2dW5wc3l0bGFcXFpaZWppbHR2cm91eXt7enB1en15eHR1eX9/
-fn2AhIWGio+OkJWWm56gn6GinZyZmZyfn5qYkpGOiYeJjpCQjIaEg4SAfXdnXVtd
-WlpcXFhZWV1bW1laWlhbXFpZWFZaW1haWFpaWVdXWVdYWVdZV1laXFtgYFtdXV5Z
-WlpcV1laXFlaXFxeW1hcXV5bWllbX19cXF5fXl9iYmNlZGhpbHB0dnuCho6SkpKT
-mJqYmJ6dm4+FfHRsZV9bWVVTT1NWWFpjbHFwc3d/gYWMjo+Nio2PkpOWjYyJjIiE
-hYeHhIeIh4qOjZCOjpGRkZiSlZiZl5STlZiam5mYnJ2cm52jo6Kkop6dnpudm56c
-m5+dnqGYk5KRlJSTkJKZk5KTk4+Pjo2LhouGg4N/f32Ch4iOlp2YmpqfoqqsrK2t
-sbSxs7S1t7u4t7u8vr26trWytLi5u765trWysK+rpqChoKKioqSprK+1uby8ure4
-t7S2sqypqamrrq2rq6mppJ6bmJaQhnpwampnY15eYmNeWFVTVFdWWFhaWFpcV1dY
-WVZVV1ZUVlhZVlJUVVpXV1tXV1VSVFRYWVdYV1RQUlBUVFVTVVVVU1JSU1NRUFJU
-UlZVVVRUU1FTVFVSVFRTVFZYVlJSVlZVVVVUVVdWVFVWVldXV1hYWFZUVlhWVVRW
-V1hZWVhWVlhXVlZXVlhXV1VSVlZXWFZXXFlYVVdZW1dYWFpZWVxaWFlaWltaWFlY
-WVhXWVhZW1dXVFZYWVlaWlpaWlhYWllbWFdZXFtcXF5bWFdZXVpYWlZZW1peXFta
-VVpaWlpdW1pdWFhaW11cWllXWFlaXFtZWVdcXV1bXVtYV1dXWVxbWlxeYGBfYGBi
-YGNhX2BfW1xbXV9iXVxbWlxdXlxcX15jgbnL1dzg5Obo6err6nl4ent5eXh6eXx6
-e358fHl9fH18enl8fXx6e3h6e3h7eXt8enl4e3h2eHl8fX+BgX1+e3t8fHx6enx6
-fH58fnx7en5+foCBfYB/gX56e4F9fX57en2CgoGBgoKDgoB/fXx/fn1/gX5/gIJ+
-fH5/gYCAhH+AfHp8fHx/fnx9fH18eHd4e3t8enZ0eHVvcnBtb3BtcHFwcnBxcW9x
-c3J0dXV6e3p5en1+fn9/fn17e3t8e3x9e3x7fHp5fX5+fHh4eXx8e3Z4eHNzcXFw
-dHJxcnJvb3BwbW1ubW1tbXBta25vcnFtbm9ub25ucHBwcG5wcHBxc3BucHJucnBz
-cnJxcnJ3d3d1dXRycXJ3dXp6c3V4dnZ0cHB0cHBwb3FzcG5xcm9saWtsa2xra2ho
-aGhoaGRgXmBhZGVhZmRiY2JjZmRgYWNhY2RgYWNfWlZTT0tJS0lLSUxPUVFPTExM
-TE9NS09PUVJRUlNRVVFSU1VTWFlaWFhTVlVUVVdUVlhUU1VaVlRZXFhUWlpZXFxY
-UlNSVFZWVltcXF9dXVxaWlteW1teX2JeXFxXWVpdXF1dXlxcX2BjZWVlY2RmZ2Zp
-Z2hoZWRmZmlraGprbG1vcHBxbnBvc3d0d3h5eHV4enh0cHBxcnR3dXJrX1dbV1hf
-ZmptdHd3cHBzd3p2dXZ4fX90cXNwc3l5e36DhIaJiYqPk5ecoqWgoJ+gmJWZnpyd
-nJeUkYyJi46Oj42KhoGCg4B9bmVfYV9eYFtaW1taXFxcWllYV1hZWFhXV1daV1ZY
-WlhaXVtaWVdWWVlcXltaXVxdWllZXF5dW1pbWVpYV1hYWltbWllZV1haW1pdXV5d
-XmFiY2NhYGZoaW5wcHN5f4WGjpGUlJubmZWLjI2SioBza2hhW1lWUU5PUlddY2hu
-d3V2fH+FiIuPj4+KiYyPkY+PjIyJhYSCgoKChYmJjY6MiY6NiY+SkpWVl5uampea
-mpydmJmdoaCenqCgn6GknpudnJqZm5ubmpmZmZeXlZSRkZSSj5CQkJGSkI+NjoqL
-ioeFhIF/f36CiYyTl5ubm52eoKWrrbGwsK+ysrW4ubi6vry7uLq2u7m0s7S5trOy
-tbavr6yno6Kgn5+go6Strra3u7u6uLm4tbGtraupqamqqKqnp6eloJ6XkZKMhHdr
-ZmZkYV1eX15fWVNTVVZWWl5aV1VVVlZZWFZYVlVVVlNXV1VVVVdXV1dXVVZWVVRU
-U1VUVltWUFBQT1NaU1VVUlNTUVFSUVFSV1ZTVFFTVFRTVVRYVlVTUlJRVlNRUVVU
-VFJTVVRZVlVTV1hWVFRZV1ZWVVVUVVJVV1lUVFVXVldVVVVYV1lWWVhXVldTUlVX
-WFxcWlxiV1pbWFhaWlhXV1lYWFpYWldWWFpeWlpXV1dYV1ZXWVpZWVhYVVlaW1tZ
-WFhaXFtYWFlfWFlbWlhWWFZWVVlZXl1cXFpbXFpaV1hYV1dZWVpZWFpaV1dbXV1e
-WllXW1tbW1tZXFxYWlpfXVpcYV9fYV5gY2JfYGBfXVtbW1JcXFteX11bXV1dW2GH
-uc3T3ODj5ejo6erqf4CBfHx8dnl8en5+gH58e3t6en19e3p4e356eHl7eHZ5fHp5
-enl5d3h3eHt8fH1/gXt8gX6Cfn2Afn1/fnx6fH1+fH5+fn1/fnl7f4F/f36AfX1+
-fn+DfXd+fnyBgX5+fX1/gYCAgoF+fHx8fn19foB/fnx9e3x7enx/f3x9fHp7eHh2
-fHd4dnZzc25ub29sam1ub21wcG5wc3N0cnNzdXd2d3h+fXx6f359fn99e319fHt6
-fHp7gH17e3p9eHl7e3l8end2dHBwcXFxb3BwcWxtbm9wcGpqbWtrb29vb3Jwc3Jv
-cHFwcXBwc3Jub25vdXF0c3JvcHFydHN0cnBxcnl7d3V3eHZ1dHV2d3d0dXR0dHJ0
-dHV2dnF0dXNub3JxbWpramtpampqaGdpZ2hqZ2VgYWVkZGdnZmRjYmNkZGNjZGZi
-ZmZlYmBaU1FQSUVJSk1QT1NRUlFKSktKT0xNUE9PUE9WUVFRUk9PUFVTVVdYV1NS
-U1JWVFNYV1hYVFdWVlpaWV5bWl1bW1dVVFhWV15dWFpdXWBfXVpdXVxaWFtdXVtc
-W1teXl1fZGJfW1xbXV5hZmNiZWhoZmVhYmNiY2RnZ2VpbG1ta29ucXBucHR1dnNz
-cnJyc3N3dnJycXV4enx8cGpfVlRTV19iYmpvcXFubW51e3l4dXZ5enh5dXR2dXiB
-fn6Eho2Nio2RlJ2hpKKmpqSioaCdm5ublpOOiIGCh4uMi4iEhIaGgX14Z2FfXl1d
-VlZVV1lcXVlaWFVbWVlZV1RVV1lZWVdZV1ZYWVhXU1leXlxbV1hcX1xcW1taW1lY
-WFhcXl5ZXF1bXFxbV1dZW1xfXV1gXlxdXWNnaGdkZmhqbXJzdnyJjZWRkpqcoqGb
-nJWTkYqEd21lX1tXUVBPT09RVmBpam5wc3l8gYaKi4qMjY6Pko+MjIuMjIWBgoWE
-hoWJiIuPjo+JiY2Mj42Ojo+UlpePmJibnJuZnZ2inqCenJydoJ+fnqKenJybn52c
-oJ6aoJ6Zl5OSlpOTlpaVk5GSkJKRjYmFiIODgn9/foCIio2Rlpydmpucn6etr7Cx
-sbO1t7a5tra3vLq8vbu5trSxtbKvtLOytra0rquppaKiop+ho6SrrrCyt7i3s7Oy
-sK6pqKemp6mmqKempKKhoJ2blZCGeWtkYWFhYV5gXl1ZV1dZWVhbXl5aVFVWVVRW
-WFdVVFVWVVhaWFdVVVRVWFZXV1hXVFRWU1JVVldVU1VWU1FQU1NTUVNUUFFQUlFT
-VlRUV1FPTlFTUlNSVFNUVFJUU1RSUVRTUlJSVFZWVVNTVFRVVFNVVldVVFNVV1dW
-VllVVFVWV1dWVlZUV1tYWldYWVxcWVVVWFxbWF1VWFpbW1dZW1tcWVpaWl5cW1pZ
-WFtYWVlaWlZXWFZZWlZYXFxYV1pZXFteWFZcWllaW11VWlxbXVpcVlZXV1daXF5a
-WVVYW1xWVldVW1xcWVtcXFtZWFlaXFxbWltcXFhZWltYWVtcXV5iZWBfYmJhX2Bh
-YF9gYGBZWVpZV1laWlldW1lbWVlcYYe4y9Ta3+Tm6Orp6+t9e3x8eXV5end5eHl5
-eHl9e3t4d3Z3dXd8fX+Cfnt4eXp4enx6d3Z3eHl3d3t7e39+enh8fn6Af3yBf3x7
-e32Af355e3t+fXt5c3p+f319foB7e3+DgoGAf3+Bgn9/fn59fn9/fn99f357e318
-fYCEg4B8e3p9e3t9fHd7en19d3l6d3h4d3R1dXZ2cnVwb3BybW9wcnFvc3FxdXZ0
-c3Jycnl3eHp/gIB8en19fXp7e39+fXt9fnt6fHt7e3x9eXh5eXd4dnd3d3Z1cHNw
-cXNubmxtbm9ucmxqbWxsb25wb2xtcG9ubW9xdXJubm9ycXJ1eHV2dHVzc29wcnFy
-dHZ4d3Z2dHR3dnp5d3V3d3R0cm9zc29yc3J0d3JwcG5wcnBvbm1paWtpZ2lra2po
-ZWRlZ2VkZGZoZWVmZWVjYGBjZWVnZmRkY2hkYF9ZUE1LSkhISVBPTUtLTUxLTFNR
-UFBQTk5PTk1RU1RTVFJTU1NSVVZSUlNRVFdXWVZXVVVUUVNUWllXW1tcX2JbV1pa
-WlpYV1dZWVhcXF5dZWNdW1pdXFxaWmFfYWBeXVxcYGNbYGJnZGBjZWVmZ2NhYmRk
-YWNkYmJlampramxra3NxcG9vc3Rxc3JvbG9ydnNvcnJ1en6AgH53bmJSTlBUWFxg
-Z2xwcW1sbW92enR0dXVxcW90b3F2eXh7eoKGh42OkJOSmp6Vhp+op6OjpaOgn52Y
-kZGKhYOFiouJhoWGhYKDhX5rXV1eXF1cXFxYWFlbXFVWVFhbWVpcWVZYWFVWWFdX
-W1paWFpZV1taWltaWllYWV5cW1daW1lWWFVZW1paXl5cXFxYWVpaXFxfXl5fXV5j
-YmRoaGlrb3Fyc3R5gomLkpGTmpuenp+cmJWSinxxaWBZVFBNSkpMTlRdYmpvcXN6
-f3+GiIuLioyMjo6OjIuHhoWFhIKCgYGChoeKi42PkImHjZCPjoyQk5SVk5WXnZub
-oJ6cnZ6gnp+eoKCkoJ+dnZ2en5udl5qcmpubnJiZl5eZlZSWmpeVlpWSk5KMi4qI
-hoZ/fn+CgoaKjJCRlp2cmJqdpKqsrbK0srW2tbe3t7q+wcPBwLm1s7GvsLK0trm5
-urSzs62ko6Oen5+jpqipq66xtrW0sbGxq6mopqOipaWmpKKjpKSmoJ2YkIuCc2Ze
-ZGhlX19fW1hWVlhZXFxcXFtbVldVVlZVVVhXVVVWWllYVFVWVVNUVlVWVFdYV1NU
-VVJSVVdZVVhYVlVUU1ZWVFVYU1FTVFNSU1JUUFFTUVFUUVBRVF9UU1NWV1ZSUVJU
-VFNRVlZVVFZWUlVaV1NUU1JSV1ZVU1dUVVhYV1VTVFJWWFdYWVtYWlhZVVZXWVtc
-WVpWV1dYWVpaWVlbWFlXWVpYWFxcV1ZYWVhWUlVWV1dbXFhaWVlbWVlYV1pcWlpY
-XFhYWl1aWVtYWVlZW1hZV1dXVllbX1dZW1ZXWVlbVFVbWl1cZGJeXVxcXVtcW1xd
-WlpeXVtaYF1aW1thXl9hYV5gX19iX2BhX19hYWVcWFtbW1lZWVldWllbW1xjgrnK
-09vf4+Xo6enr6np9fXp5eXl5eHV3eXd3ent7e3t9dnZ4eXh6e36Bf359fHh4eXl9
-fHt5eXl5e357fH59gH59foB/e3+AgIB9eXh5fHp6fXt7e3t7eX1+f3x8enp6fX+A
-f4OCgn9+gYGDfoKCgIKBfnt4eoB/gIB/fYF8fH18e3l6enh9fX59fHp6d3l2eHZ3
-dnZycnBzcHBtcm9xb3Bxb3BycXNzcnJvd3d5eHx6e3x9f3p7fH19f4B7f3x8gH59
-fHt6e3p7enh6d3h4d3d4d3d2c3N1c3Vvbm9vamhqa25uampscG5ramxubm9vcnJw
-cXFwb3Bubm1tcnN0c3JydXV0cHJxb3F0dXF0dXV2dnh3eHx5dXRzcnd3dHZ2dnNy
-dHd1c3Fwb2ttbm1xbWlmaWppbW1qZ2dkY2NmZGdjY2NjZmRhY2ljYWRlZ2JlZmVl
-YmZgXFVRTkhKSktKTE5PTVFRT05QTk9QTk9PUFRSTU9PTU5QU1FRT1JVUFBTUlJT
-VVZWVFhVVFRTU1lbXl5aWl1fXV1cW11bXF1ZWFlfX19fXFlbXVxaV1xeW1pbXFpd
-XWBgWllbYGFcXmViYmJfYWZlYmRiZmpoZmRlZmZrbG1ramdrbm5xbm5vbW5wcGxr
-a2tvcHJ2dXt+hoeEgHlvYVJKTVZVVWBra2lpbG9xbnFwcHF1b25nZWtucXR2en59
-f4WKjYyOlZSUmZOSmqGhoqKiop+dmpaPjYuGg4SFhYaGhYKAgISBdmRfXlxeXGFh
-X1xeW1laWVlaW1paWltbV1dZWl1YVlpZWFhXVVhWVlpaWVtaWVxYWlhXV1taWVtc
-WVlYW1tZXV1cWlpZVlhYWl9eXmBiYmJlZ2hrbW9ydnh3e4KEjJCQk5ebmp6hoZuY
-koqFdm1iWlNQTU1ISE5TV11janR2d3h9goaOjo2Nj46PjIyPiomGhoeFhoOAg4OE
-h4uMi4qJi4yLjIqNjIuPkpGQk5WXlpeZmZ6gnZycnZ6enqOjoJuboKCenJqZmpqa
-nZ2Zm5qZnJyYmZWVlJWXlJCUlJKQjIuIhISCgH+ChoaLjpGWl5eXmJqgpaqsrbO1
-trOxsri2ub2+wL+6u7q2tLCtrrCzuLq5t7e2sbCno56goqOnp6inrrCur7Gwsa6q
-qqmkpaWho6KgoaKio6Ohn5mWj4Z9bWJhYWNgXmBdXVxaWFhaWlpaWVVTV1dTVVZW
-VlhXVFhXV1ZZVlZXVVNUWFZVWFRUV1tXVVJQVldVVVZVVFJUVlVUVVRVVFdVVlRS
-VVRWVFRSVVRSVVNWVlVTUVRWVFJRVFdYVVRbWFdVVFVUU1VVVFVTU1dXVlhWV1hY
-VldYWVhUVFVWWVdVVlhZWFRbXFtYWFlXW1pYWllYWVlbXVhZV1hWWFtYWVpbV1hY
-WlhYV1lgXVlbW1laW1pYWVhYWVtZWllYW1pcWlxfXlxbXVhZWl1ZWFlWV1haXFtb
-WllbXV5hX1tcXVpbW1laW1xdX11dXVlZXGNeXFteYF1bXF5gYWJhX1teXl5hYF5f
-XF5dXlpeXlxcWVdaWVlYVlhZW2J/tcrU2t/j5ufp6uvreXt8e3p7fn5+fH58e3d5
-eHd6eHh3eHp4eHx/e3t+fXh5e3x6eXp8enl5e3p7eHZ5fn1+f397fXyBf31/f398
-enp5enyBhYJ8e3l7fHt9gHt6e31+fn6BgX19fX58fH+CgISEgYJ/foGDh4SDgn6C
-f3p8fnx9fHt8fIB+fX99e3t6eXZ3dXd2dXd2dHFycHBtcHBvbm9yc3N0cnFwcnV3
-eHd6fHl6eXt6e31+foCAgIF9fX1+fnt7fHx6d3l4dnl4d3V1cnBvcHJzcnNydnJw
-b25vbmxsa2poamxucG9ua25tbGlvcnRzdnN2dHFwb21vc3J0dnNyc3NycnFxcXR0
-d3Z4dXh4dnd3d3V3dnd3dnt5dXZ4dHFycnR0cHBubW9wcm1raWdoZ2tqbG1samZk
-aWZna2doZ2VjYV9mZWFkYmRlYmNkZ2RiYmBdV1BKRkhLS0tNTU9TUlFQTU9QTlBP
-S05OUVFSUVBMUE9OT09PT1BRUFJTU1FTUlRYVVdWV1ZTVVhZWltcXVtcWlZYV1db
-XllbXFxhXFlYWVtaW1pdXl1dW1xfXllgZFlZXVteXl1gYl5eYF5fYGFiYmRmZ2lm
-ZmZqa21sbGpmamtqaGtraWxtbm5ubmtraWlvdnx9goGDiIiIgHptWEtLUk9TXGNj
-YGFiYWVvbmprbWxramRobGxucnJ1enl9goyIiouWmJaamJaZnJ+fnp+gm5mal5GN
-jIeDhISCg4eFgoSAg39xaWJhYF5ZWVteXF5eXFtZXF9fXFRUWFhZWFhXWFdYV1dY
-WVhWVVVXVllaWltZXF5YWVtbWVlcW1haV1haXFpaWlxaW11dWVlZV1xcXl1iZGlo
-bXFycnR0dXh8goWOjpOYmZqdnJ6gnpaMg3xyZVtTTEpKSE1NUldeZWtvd319fn2E
-iIuOjI2OjomJjIyLi4iEg4GDgYGDg4SFi42KiomIiIeDiIiJjI6NjI6QkpWSl5mb
-nZ2bmZqcnZycnZ2dnJucoKCenJyampuZm5menp+gnpyZl5WUlpuWkpSSkpKPj4uI
-hYODfXmCiYyPkJCTl5man56ipqussbSzs7WztLW7u7/Bu7u5trayrK2qsLKytLm6
-trOysKqmpKGjpqShpKqvrayrrKqsraupp6Slo6Gcm56hoaCgn6WfnpyclYd+a19f
-ZGRiYV1bWVtXW1pZWVtZVlRUVVRUWFtYV1dWVVVWVlhUU1VZVVRVVFNVVFZUVVNS
-UlNTVFNSU1RVV1dXVVVTUlRWWVlWVFRTVVZTVFFRUVNRUk9UVFZTUlVUVFRTV1hX
-VlVWVVZVVVNSUVRUVVdaXFdXVlVXWVlYWFdWWFdZV1paV1RVWllWV1daW15XV1hX
-W1lZW1hYWlxZWVlYVldYWFdZXFpWVlVYXVlWWFhXWl1bWFlZW1haWFlYV1laXFxb
-W1pZXF1aW11dX11bXVpdXV5XV1tcWllaWVhbXlxeXFlbXVxbW1laXFtbXF1cXV1k
-aV9jXF9gY2FeXV5gYGBfXmBiYGBeYF5dX19dXV1cXVxeXVtcWV1ZWFxbYXq0ytPa
-3+Pl6Ojp6ut6fn19f35+fXx+fHp5dnd2en58enh7end2d3p7enp5enx4dnV6fH19
-fHp6e3x+eXl7fX58e3p8fnx9e3x7e319fXt+f32Af4B+fX1+gYB/f36Afn58fX9/
-f4CCfnp8f4B/gYB8f4GDhH+BgoOBf3t+f358fnyBfX2Af319e3d7e3x5dnR1dnR0
-c3RycnFycnFvbW1sb3N0cnFvcXB1eHh4end4enl4e3p5fHx7fYJ8ent7fXt8eHx7
-fnl6fHh3d3h7d3pzdXNycnBxcnF0dHNvbW5wb25qa25ubmpscW9tampsbnJxdHR0
-cXBub3Bvb29vbnJ2dHFxc3Vxc3Vxc3R1dnh7dnR4eHh1dHV2dnh5d3VycnR0dHFx
-dHRycG5xcXNwbWxub2tubGpnZmhpZWdnZWlmZ2pmY2BgYWJhYWNiX2NiYmNiZGdk
-YF9ZUUtJRkZJSUtMT1FRVFJQS0tMSk5QTlNVTk9SUk9PUlFPTk9OTlBQUlRVVlRW
-VVNVVlxbWFhWWVhZVlhZWFxZWFtbWVZWWlxbWl5cWFpaWFdXWFldXV5gXl9cW2Nj
-X19fX11dYGJiYl5eYWFfYGRlYGRkZmhlY2hqaWdmaWlpamtraGpqZWZrbGxpa2xp
-bnN4f4SDgoSHh4WBeXBdTktOU1VXX2BcXV5aXGhlZmVlZmZpaGdqbG90dnNydnd/
-goOKjI+VlZqanJ+goqalo6Khm5eSkIyLhYKCg4KAf4GDg4GAfXVpXl1eXlpXWVpW
-WVpdW19dWVpaWVpZWV5bW1hYW1pcW1pYV1hXVlZYW1lYWVhZWFpYWFdWV1pbWltd
-X19dXFxeWFpbWFhaWVtZXWFfYGVraW5wcnBzdHVxdnuAiIyRlpicnp2hoJyZkop/
-cWhbU1BJTElKS01VXGJpbnJ3foGAgIaKjY2KiYiIh4eGioqKi4yFh4eGiIeKiYmI
-hoeGhoeFhIaGhYSDhoiIjpCRk5OVl5mYnZucmp2cnZ2gnZuampqdnp6enqCcmpuZ
-nJ2goJ2dnZuZlpeYmpiRlJSSkI2OjIyIhYOEg4KKj5CQkJOWl52fnZ6ipqqqr7Gy
-s7KzuLm6ury+uba1trCqrK2vra2wtre4t7KvtbKrpKCfoJ+ipaWora2rqaejoqCi
-o6Giop6dn5qanp2coaKinpqYlIl7b2VjZmZlYV5dWFdYV1dbWVlXWFZWVlRUVFVU
-WFhUUlVXV1ZVVFVYV1dWVFVTVFNTV1dZVlVUVFhVUlFUVVZYVlZVVlRTVVNTVVRT
-UlRVUlNTU1JRT1FRVVNSVFRXVFNRVVhXVFZTVVVWWVVUVVVbW1pZVVVYWFVXWFhW
-V1RWVlZUWFZVVVNVV1dXWVlYWVdVV1hbW1hZVlZYXFVaWVdWV1dVVVlbWlhYWVha
-WVlbXVdWWVlZWFpXWFddWFhcWVxcWl9cXFxdXVpaXFxcW1pbW1taWldXWlhWVlhY
-WldbW1paWVlbYF5bXF9gY2FaWlxdX2FfYGBeX19hY2RgX11eX2BfYWNjY2JiYmBf
-X11eXVpcXF1gW15eXV1ZWFpherPI09vf4+Xn6ejq6n5/fH6BgYJ+fn58fHp6e3x6
-e3x9fHp4eHd5fHx6eHp6d3t4eHZ4ent7fHt7e317fn6AgH58fX+Ae3uAfHl8fHt6
-fX58e3t/gYCBfH18fH1+foF+fH2BgIOBgoF9fnx8fHt/gYWAgYSBg3+CgH59fHx+
-e3d6eXt5d3d6fXp5eHR3d3d2dXZ0cnR2c3JycnJvcG9ubm5ucG9xbnFzcXN1eHt4
-e3t4d3h5e3p8e398f3x6e316fXx+fXp7fHp4dnd3d3d6eHd7dnRwcXNvbXJycG1v
-b29rbGtvampnam5ubW5wdG5tb3Jwb3BvbG5ub3JxcW9vb3F0dHJ0cnBzcXJ0dXV0
-eHp4dXh5end2dHd3eHZ1dnNxcXV0d3Z1cm5ybnBwbm1ub21tamloaGpmZmVnZWVl
-aGhkZGZjZGZkZGBjZWRmY2NkZGdmaGNeXVhUUUxISklKS05OT1NSU1JNT01KUFJV
-UVJWVVNUUE5NT1BRUFFSUlFVVVZTVVZSUlVeW1pVV1lXV1ZUVVlbWVdZWFlZVVZY
-WldZV1dZXFtbXFldXVpYXVxeYlxeXlpdW1pfYV9hY2VlY2FiY2FhYmRkZGNjZGZp
-a2hkZWZhaGpsaWlqaWtpaWxsbmlqbnN3fHp+f4GChYSEh394c2dSRUNJUVRYWFdX
-WVdZXF5dW19gWl9jZWdnaHBvcm1zeHp/goeJjpCQlZufpKaopaChoJ6elpSQjYmC
-d3l6foCDg4ODgX96b2NdW1tZXF9aWVtZXVxgW1xbV1dYWlVYV1hWWlxbW1xdXltX
-VFZYWVhYVlNVWVhZWlRWVVVWWFdYWllaXVxgX11dW11cWVhaXVxeYGJiaW9tbW1t
-bm53dHN3f4WHi5CVmJ6hoaCgn5iQh3ZqYVRMR0RGR0tLUlhgaG1xdnp+gYOGiIuL
-jo2LiYiLiYmJhoeIhoaDg4SIiYiGhYiLioqIhoSChoeHhYaEiYuOkJOUlJWXmZma
-m52doJ6foqCdm52am5yfnKChoaCcnJmcnZucnJydnZycmJebmJOVlZKPj4+Oi4mC
-g4F8foeMkJCUlpibnZ6hoZ6jpquvs7OztLW1tri9vLq4t7i2ta6srKyurrGztrW3
-tbO1sa6opKGin6GioaOorKqrqqWknpycmp2foKKdmpybnZ6hoaGhmJSUin9zaWho
-amhkXl1cW1xbW1xcWFhZWlRSVFRTVVdXVllXUlVVV1hUV1lWVFZWVFNTVlZWVlVX
-WVdUV1hZVlJTVFZYVlZVVFRTU1RVVFVWU1JWVldUUFFTVFNTU1NUVldYU1JUVlJS
-U1NRVVVYWVtbWlZbWVtTU1VVVFZZWFVWVFVWVFNUVVFUVVRUWFdXWVlZV1ZVWVpZ
-WFZWV1ZaWFhaV1VXVlhYWFlbXFhYV1taWFlcWVhYWVtZWFheXl5fX15eXVpdW1xa
-XFxcXFtdXltcXVxaWlpaWFtdXV5ZVVdVV1lZWVhYWldZXWBfXl9rZlhdW15dXFpe
-YWFeYGVlYV5eXWBgXl9hYWNjYV5fXV5cXVtdW1peXVxdW1tdXFdbW2F/tcjT2t/i
-5Ofo6errgH57e4B+e3t7e3t6e3p6fHl4eXp7eXp7fX14eHt6enZ5fHx9eXV5eXl6
-e35+fIB8eniCgXx9fnx7e3+BgYB+fn18fH2Ae3x7fX+AgX5+f32Ae3+Af32BgoOA
-f31/gHx+f39+foB+foOGhYF/fXx7eXh4e3l9fn57e3t4eXZ4fHp2d3l5dnh0dHV0
-c3Fvb25wcHJwcnFub3FycXF1enl2eHh5e318e3h4e3t8fH9+e3qDfnx9f358fHx8
-fHt3eXZ7dnd2eHd1bW9wb29wcnN0dnNta3Bvcm9vcG1ubm9ta21vbXBycXFwb3Fw
-b21wcnV2cW5ucXN1dnJzb3B1dHJ0cnJ0dXZ2dHd3d3d0c3V2d3Z4eHZ3dXNyc3Fw
-b2xzcnBsbm9tbG1samlpaWhnZ2hlZGVoZmVkZmZhYmpmZWJiY2RhYWJiZWZlZV1d
-WFBMR0ZISEhLTExRVFZTUExKTk5QT0tOVFRRUFJQTE1RUlBQUFFQT1RQU1NTVlVU
-WllcVVVXWFpWVVNTU1laWFhbV1lXWVhUVVRVWVpaWVhZXVtbWl1gZGFeWV9cVFde
-Y2JeX2FjYmFjXl1eXGFkYWRjZ2VjZmtoZmRiY2RmZ2ZiYmJjZWRmbWtpbW9xdnp5
-e31+f3p6f4eGgHt4cF5LREhJU1JQU1ZZWFRVWlpYWFdWV1xeYWFjZmtqaG12dnh3
-fIKHiY6Ql5mfo6Sko6KinpucmZKMhn52dXN4fYCBgoF/e3pzZV9cWlpZXVteWlpZ
-W19gXVhVVFZWVlhaWlhZXF1eXFpcWFdaXVtZWFdWU1dbWV1bWlZXWVhVVllXWVpb
-WlpcW1taWVtcWlpYW1xfZGhqbGpqY2JjaGxueH2DhIaLjpGZnp+io52clox/cGFW
-T0lFQ0VIS1BXYWpvdnx/foKGhYeHi42OjI6GhYaIiIiHhoeJh4SCgICEhYGCh4eJ
-iImGiIaEhISEhYSIjI+Pk5SVl5iZnpqanZ2foaSjnpyeop+dnZ6hoJ6eoJ2anJyd
-nJuamZmbm5eZmpmbl5OWlpeRk46KiYyIhIJ/g4iLkZaXnqCcnZ6cnZ2hqrCvsbS2
-tba1tbe5u7m5t7W2tLCurK2wr62trrGxtLKxsrCxq6elpKOkpaWmp6inqKeinZqW
-lp2emZiYmpqdnZ2hoqGdm5aPhntua2pqaGReXF1aWVpaXV1ZV1dUVldXW1hWWFdW
-VVVUVVNUU1VVVVNUV1RYVldWVFRUVVRUVVpYVllWU1FVWFhZWFZRUlVXU1JUU1FT
-VFRXVlhZV1VTU1NUVFVSVFdXVFRXVlZUVVVVVVVWWVhXWFhWVVZXV1ZTVlZPU1VV
-VFRWVlRXXFdUUllWUVNXWVpbWVlZWFVXV1dcXVpaV1VXW1laWVhbW1laWVZYWlhc
-W1taWVlYV1dYWV5gX2FdXFtbXl1bWlpZW1pdXFlbXlxcW1taXFxZWVlbXF9cV1lW
-WlpaXFVZW1xdXVxcZWtdW1tfYFtaXFxfYGBfXl5fYF9dYGBfX2BeX2BjXl9fYGJf
-X1xbXF1dXFtbW1tbXVpbYnuzyNPa3+Tl5+nq6+p/f35/e3p3e3x7eHd3eH19eHp7
-fnp1d3t7enl4eXd2eXh6eHp9fHt8fX19fn59fHx+f4CEgX57fH5/fYCCfn18fHt7
-fH57fHx9fX5/gX98fX1+f4GBgYCAgYCFg4J/f358f3+AgIKBg4KDgXt+fXt4en+A
-e3t+fHx8e3l5fX56eXp4d3d3d3VzcnJwcm9wb25xcnJydXVwcHNwbnN2dHR3eXx6
-eXp6enl6fn98fnx5fHp3en59fn9/fn18foF8end3eHZ4dHJxd3N0dHVzc3JycGto
-bXBtbW5ub21tbW1vbmxvb3JycnJxb3BzcG5vdXN0c25wb3F1dHNzdXRzb3BwdHN1
-cnV1cnJ0d3Z2c3Jyc3Z3d3ZxcW9vcXBxcW9sb3JubW5tbXBtcGxqaGdkZ2lpamZm
-aGZkZmhmZmZlZWNjYWJgYWRkZGVjX1tXT0pISEhHSk1MT1BSVVVST0pKTE1RUE9P
-TlFOTlJQUFFUVFNRUlJSUFRVVVNSV1VUV1pXWFpWWVpXVVhXWFpZWVtZW11ZV1hV
-VFhaWVxbWl5eXF1dW11gYFtcX1hcWVxiYF9cXV5gYWBeX2BhX2BhY2dnaGpqaGdm
-YmFgYGFkZGNlYWJjaGxsZmdvc3p8dnl/gHx/goB9fH98fnhwalhISUtLTE1RUVFQ
-UFNTVFFUUVNVVVdcXlxgZmdrcG9wcHJ4fYWJjI2RmZuZnqOhoaCcmJaZlpCHenNw
-cnd7f4B9e3p3c2lfWltbWllXWl5fW1lbW1hWVldZVlNaWFhbWVhZW1dZWVRYW1pZ
-WFhZV1xbV1hZWVlXWFtcW1peW1pZWFpaWFpZWVZYW1laW1tfYWNkZGZoa2pjY2Vo
-a3J4f4GFiI2Slpmanp+dm5aOhndlVktGRkRFREhMV2JmbHV4e4GFhouGh4iKjYmI
-iIqJiYWFhoWEgoOBgYCAgoOEhYWHhIiFhIWFiIN/hISIhomOko+OkJSVmZ2ZmZqc
-nJ+ioJ+cmZyhoaGenJ2ioJycnpyanpycmZqZm5qWlJWYmp2bl5OVl5aUk5KOjIiE
-g4KAhYiRlZednp6dnqGjoKKjpKmtsbW1t7a4tbW5vby5tbW1sq6rq7CyqqmtsrG2
-t7S7uLWysbCtqqmqq6uqqqWhn56cnJqWl5qdnZ6fnZqcnp2gn52el5GLh3drZWJi
-ZmNcWlxdWlxdXFxbVFRTW1haWlhVWFdWWVdTVVZXVVRUU1NXVVVWV1dXVlldWVZW
-VVNUWFpXVldTU1ZYVlZWU1VVU1JUUlNRVVRUVFNTU1NSU1ZWVFVVVlRVVFNVWFZU
-V1dXVldWU1VYVlZbV1hVV1lXVklNUlVWVlVVVllZWltbWllVU1RWWVxbWl1ZVVZZ
-WFdYWltbWFdeXFhWWVdaWVpaWFdXWFlaW1tZVlhXWFdbW1xcW1tbW1pbXVxaWVlc
-XVlZWlpbWltbXVlbWVpaWlhZWVlcW1tZVFhdXltZWFlXV1taWl5dXV1cXl9dXF5g
-Xl5fYF9gYWRhYGFgYWBeXl9fYWFhX15eXVxdWVtaXFtbXV1aWl9hebbK09rf4+bn
-6enr63x7fH58fXl7e3p4eXp6eHl4eHp5eXh7eXh5enl4e3h5endzeHp8e39/fX1+
-en5/foKDfX1+f398enp4e31/hH56e3t9fH18fXx7fX59f4F+f4KBgX9/fn5+f4GB
-gH5+fXx/gIWEhISBgX6BgIB/ent+fn15enp5d3x6eXl5fHl3d3h1dHNzcnNzcXNv
-cHBub21udXNxcXFxb2xwb3N1dnh5d3h5d3l5enx8fHx8eHt8fHp9ent6fn99gH9/
-fnp6enh1d3Z0cnNzcnN1dHNwcHJybnFsaW1ra2tubm1rbm5ma3JubHBxbnBvcnZy
-bm9ydHNycm5vb3BxcXZzc3JzcXRydXNycnZ1dnhycnV1c3NycnJ1dXZ0b29tbnBw
-cG5wbm9wbm5tcHBsbGppaWdoamtra2hoaGhqaGpnZmNoaGVkYmFkY2FlZ2VkXlVR
-TklIRklMTExQUFJTU1RSTkhJTFZOVFNSVlBTVlRRT1FTU1ZUVVJUVFRTT09SU1JS
-WFtZWVRVVVdSUVJTV1hWWVpYWVlUV1hWVVZXXF5eX19eW15gXmNdVl9cWVlZV1le
-WlpZXF5gXl9hY2FjZWJkZ2dnamZeY2RjYVtgZGRkaGhnZWdobGVqc3J2ent7e31/
-fICCgHx7fX5+eXJsX0tER0VJSExOTkxKS05RUE5LUFFQVldXW11hY2Nqamxxdnh/
-gYWIjYyQlJeYn6KclZSTkZWWk4t3bGtxdXN1d3Z1cnNvZV9dXF1eXltVWVleXVpX
-VldXXFtaWlZXV1lbWVdYVVVVV1xbWVlXWldXWFpZVlVWWFhcWlhaW1pZWVteXFlZ
-VlZZW19cXl9eX2JoZmZkZWZlYmVkZGlwcXZ8gIOGjZOWmpeanp2Wj4h9bl1NSENE
-RUdJS1JaZW10eX58foSIioeLjIuKhoaFiYeIh4iGhIOFg4GBgIF/g4qKjIqFhoeE
-g4KCgn6AhIeNiYyOjZCVmJmWlJianJ+gnZqZmpuam56ioJ6bnp+fn6KfmpycnJuZ
-npqZmpiZm52bnJyZlZOTlJeWkI2LiYiFhYaHh46Vlpaam52gpaKiop+kp6qusrO3
-ubSzs7S2u7W2s7Gwra2sra2uq6uws7O3sra6vLm6uba0sLW0sK6sq6Wfnp+ZlJGR
-lZedoZ6dmpucnJyfn56blI2Fem9oZGFnY1taXFtaWlpcWllXWFhVWFhUTVJUUVFW
-VlRUVlZUVVRSVlVUVlxXVVVUWFhWVFJVVlNVU1RWW1dSVldXU1NUVlZVU1NVVFVU
-VVZWVlNTU1dXWVRUUk9VVFFRUVVYVlZXWFpWU1ZXVFZVWltbWVJXV1ZVVVVVU1ZZ
-W1ZVVlZXWFRUW1lUVVlXWFhZWlZWVFNVUlJUVVpcWllXU1hbWVpaV1tdW1dXWVlc
-WltZWFZWW1xaWlhZW1laWVtbWltbWlpbW1xbW1peW1pcXVxdW1tbWllZXFxeXFhd
-WF1bVlhbWltZXVxcXl1aW15fYF5eXFtZXFpcXGBfX15iYGJgYV9fYWJiYWBjXmBc
-WVxcW1xaW1paWlpYWl97tsnT2+Dj5Ojp6erqgIF9fn5+fHd5eXt5ent6fH95d3d4
-fnp6d3R3ent7enp9end6f35/f359eXd5e3x/fnx9fX19fnx7eHt7fXx9fHx7ent8
-fX6Cfn16fIB/gIOAfH5/foB8e3yChoB+fXx+fn5/goKBfn5+fn9/gH58enl9e3x5
-en16eHt7eXl5enp4dnV1c3V4c21wb25ucG9vcXBtcW9wcG9xcHN2c3Z2eHl5eXt5
-fHt6e35/fn18fIB9fHp7fH5+f39+fXh7enx3dnV3dnh0dnJwcHFzcXJycXFycW1t
-amloa2lsbXFtbGpsb3Bub21xbnJydnR1cXJycHJ0dHBwcHBycnBvc3RwcnV1d3Z4
-dnZ3eHp3dHNxdHN0dXd2dHN2cnBxb29xb29tbW1ubm9vbG5sa2ppa2hnZmlqaGpo
-aGhoZ2ZjZGVjY2RjYmVmZWdpaWReWFNOSUdISEpNS01OU1RUVFJNTElNUExMUlJS
-U1FRUlBUVFNUV1VSUU9TVFRSVFNSUVBUWVhXU1NWVFZTU1VaWFdYWFVSUlhcWFVV
-VlhaW1xgYVxaYWJfX2NkX1pbXF1cW2FfXV9hYmRkYWdrZ2hmZmZnZWZjYF5dYWJe
-YGVkZmpqZ2Voa21ubXN2en59fHp5enp9gIKAf39/fHh1b2dfUkJAPUBFR0ZISEZH
-SEpJSU5QT01NT1FQVFdbXmBjZG1ydnx+gIaLjI+Ul5WUm5aUlpWTkJCKhXVnaG1v
-bW9ubnFzcGhiYV5eW1pcW1lYWlxZWllZWlhYWVtaWFZZWlhWVFNYWFZYV1hYWFpZ
-V1lZWlZWVlVXWVpdXFZWWFhZWVpZVlZWVVdaWV1eW11eYmZnZWRiYWBjZGNpbHBx
-eH6AfYWNkZiYlpWZm5mRhHNhU0hFRkZGSUtUWFxmb3V7gIJ+g4eIiYmKjI2JiYaI
-h4OGhIWEhYeDg4OCgYODhIOAgoKCgoF/fX9/fH+FiYyOjI2Qk5aZmpmam5uen5+Z
-np6ZnZubnZyenZ6en5+eoJ6goKChnpuYmJualp2cm5ufmZaYlJKRmZWRj4yMiYZ+
-gYWKj5aam52fmpuho6KhpqinrK2ztri3tLKzs7O4ube6tK+wrq2rqKiurK2ts7Wx
-tLW3ury7vL28u727t7WvrqqimpiVkI+Rl5ibnpyempqbnqGfoJ6bk4h/d2tnZWVj
-Xl1cXFZaXFdWVlhYV1ZWU1RTVFlVVVJSVVZTU1JTVFVWVlZaXVdXV1lXV1ZXVFRX
-VVVYWVdUVFZVV1dXVFJRVVdWV1hUU1ZWVFhWVFNUV1hWVFJSU1NTVFNVVFVXWVdX
-VVZXVVdXVFVVWFVVWFNVVFdYV1VZWFRZWlZXUlhYVllZWFZWWFhYWllZWlhXV1hX
-WFVUU1pZW1pWWVlYXFhXW11dW1laWFdZV1hZXFdWWVxcXVpZW11dXVpaW1pZWlpZ
-W1paWFlaWlxaWF1bWllWWFdWW1hZW1lYWFpaXVtbXVtkYlxbXl1dXVxcW11bX15g
-XFldW1xeXF9fYGJiYF1eYmFhYmFiYF9aWFldXl1cWlpaW1tdYXe1ytPb3+Pl6Onq
-6+t8fX9+fH1+enx6en18eHt4eXl8f317e3t5dnh3eXp9e3p5eXd6f4B+fX5/fHp8
-e3t8fX6Afnp8fH17e319fHx7e316en1+f3x+gn59fYGAhIKAfn9/f398fH1/fXx9
-fXh+f3qAgH9/fn99fXp8fHp5eX59fHx9fHt8fHl5d3Z3eHh3dnh1dHNxc3d5enVw
-bnBxcG1rbnBwb3Bwb3B1c3R2d3p4eHl7e3x7e359fn17fH58en19fn9+fH1/f3p6
-fHh1dHR2eXRycnNxdHNwdHFub3Bta29tbWptbGxsbm1ubm5tb3Bub29vb29vcnRz
-cXBzdHVzcHNycnBvcHFydnZycXR1dXV2d3V2dnZ3cnBxdHR0dHNzc3FycG9wb21t
-bG1sbG1tbW1samhrbWtkY2ZraGdoaWdnZWdlaGhlY2NrZmNlZWZnZmZkYFtUT0lJ
-RUJERkpOTk5VVFZZU05MSkpNUFBUU1FQTFBSUVNSUFFXWVRUVFFTUlNTUlNUWFha
-VlRRVVhXU1RTV1lTU1JVVFNUVlVWVlZWWllYXF1gX11hYmJkZmVfW11dXFpdX2Ff
-XmJjZGFeYmRlZmdlZGBjYmBfYGJgY2FhZGdqaWVkZmVqbnNyc3V3en17fXt6eX2A
-f4CDf354eHhuZWVWSkVCPT9AQkBDQ0M/P0ZJRUhMSUpJSkxRUVVWWmBjZ2hxdnp7
-fIOKjpGTj46Uk5GWlJONiYR7c2tpamhrbGxoam9rYl5eXltcXFpdW1hdXltbWlpb
-WlpbWVhaW1lWVFNUU1RTVFVZWFZYWVhYWlpcXFpWWFVXWFdaWllXWltbXFpYWFVZ
-WVpZWFtYWl9kZGdobGlkYmNjZ2pscHh+gYKMiI+UlpSYmZqalY9/bFhLQ0BFRUlH
-T1RfaGtxen+Dgn+DhoSIjoyMjI2MiIaIhIKEgoN/goOBgoOCf4CCg4CCg4KDgX5+
-goKFh4SHi4uNkJWUlZWXmJudn6CcmZmal5ecnJ2hnp+enp+fnZyenZ+kpKCemZSW
-mJeUm5ubmpiYl5iSjZCVlZKQkIyJiISBh4uPkJaamZybnKGlqqelpKessrGysrW1
-tbO0s7O0tre5tbWxr62srKmqrrCwsrKztrW5ubzAwsHAwMPCvbm0rqahoJeVkJST
-lJOWmZealJmbnqGgnZqXjoZ/dmxoZGNbWl1eXl9aVldWW1pYV1RWVFZXVVhVVVdW
-V1VUU1RUV1dWV1ZXWFpcWFdYV1hYVlhZWFhXWllUVlNYV1dVVFRVWFdVVVVWU1RU
-VVhUVFFRUVBUV1dWUVVXV1dXVlRVVF1aVVZUWVhUU1dZVldXV1VYWFlYV1dXW1xc
-W1lVV1hWVVdbVlVXWFtcXFtZW1lYWVpbV1ZVV1xdW1pWWF1aV1lbWlpaW1tYVlZX
-V1hZWVhYWltcXFpdWlteXVtbWllbW1tdW1dXWFhYWFpaWllZWllYV1ZXW1xbXVpb
-Wl1cXFlYWVtcXFpcYF5dXVxfXmBfXVtdW1xcW1pcXV9gYmRlaGRhY2JgYV9gYmRf
-WlhZYGJcWFxeXFlfcrDK1Nrg4+Xn6evs7Ht6enp9fn99e3p6d3VpdHt2eH19fX1/
-fHx8eHh4fHx9fHp6e3p8foF+fXyBfHt9fH59foB+fHt8foCAfnx/fX59fH5/fX58
-fn+BgH1+gX6Bg4OCgIB+f357fYGDgYCBgoCAfn59f31+fX1/e31/fn9/fH5+f398
-fXx7enl6fXp4d3d2dXNzdXJyd3l3dXFwcm1ubGxubm9xdHNvcnV0dHV3d3d4ent7
-en58fH59f3t8fH5+e3x9fnx9eXl8fXl4eHh2dHR0dXNzcXJwcHBtbW5wb25vcXBt
-bWxtbW5sa29wbW1ucHBxb3BwcG5vcXBwcnNzdHZ1dHNxcG1ydXR0c3BzcXNydXR2
-dHR0dnV1dHN3dHBwcXFxcXBwcHZ1cXBubG1sbm5sbGxoa2loaGdlaWlpZmdkZGhm
-ZGhjZGRhYWNnZ2VoZWBiY2NfV1FPS0xHRkZLTE1NT1FRVFBST05MRUhNS09QT05R
-UVJPT1FTVlZWV1dWVlNWVlVYWVtWWVdUVVRTVFRZXVZUT0xQU1dVVlVWV1ZaXVta
-WlhXW2BiYGNjY2JgYGNhYl9bWlxfYV9gX2FiY2BgYmhpZGdhYl1eXF1dXmBkYmBe
-ZGdmY2VnaXB3d3Jzc3R2d3p8fnt8gYSDfn+Af3t7fXhsZl1PTUg+Pjw8OTw9QkBC
-PTw7PUBDRUNISkdMUVNVWmJmanB4dXZ7f4CGjo+JjJGTkpeWkY6KhX52b2lmZWdn
-bW1saWZfW11bXF1cW1laVFlbW1pYWFlbWl1ZWVlZV1ZZWVNXV1pYVllbWlhbWVlc
-W1hYWFpYVVdVWllWWlxaWFpaWFpaV1lcWFxbWllaYmNkZWZmZGFiZWNkam50eH+D
-iI2SlZSamZydnJeUh3RiU0dCQkBDREtSW2Fsdn6AgX6BgoOEhYiNio2OjYqGiImJ
-g4SAgYF/gICBgX6BgIKDf31/hISFgn58gIKEg4eJjo6OkZSSkpOVlZmZl5OWmZmZ
-nZ2dnZ+dnZyfnKCjopubnKGenJ+fm5qanJaZmZ2fnJmamZiZl5aXlpiXko2JhYGF
-h4uOkpSVmZyanKCmqKqkpqiqr7OwsbS2tbKwrq6xs7e4uLKsqqqpp6WprLKur7O3
-uLq7vsDAwb/BxMPBwLy4sq2koqCblpKSjpWYm5ycm5+coaCfn5mTh4B6cmpjYFxa
-W19dXFlaVFJUVlZWV1ZVU1RWV1dVV1VXV1RRUlZSVVVVV1hYV1lYVVpZWVlbW1tZ
-XVxaWFpWWmNYVVZUVlZUVlVUV1ZYV1RSVFNPTlJTUVJUU1VVVVRWVldYV1lZWFpX
-VVNVVlVSVFRUU1VWVlJVVVhVVVhZV1pcW1xZVlVVVlVXVVZYWVhbW1haWldYWFpY
-WFlZWlpcWl1cV1dZWlpZV1paWVdZV1dWWFlZWFhYV1dYW1paXV5dW1lYXV9cWFhY
-WVlaWllcW1pXV1hXV1hZWlZXWVlbWVZaXFpbW1pcXGBeYmBjXl9eXVxcXl5eX2Bg
-YGBhXVxdX2FgYmVnZmJgYV9cW1tdYF9dXV1bXV5dW11eW2BsrcrT2+Dj5ejq6uvs
-e3p8enl5e4B+enh6d3h3e3d5fHt7e317eHp7e3t/e316e319fX57en9+gH17gIF+
-fXx+fH58fn6AfX59fn1/foGAf4B/gX59foCBgH5+f3+Bf4F+fnx9fX1/gYGDg4N/
-gIGAgH+AfnyAfn1/gn1/fX59en99f3+AgH58e3l9fXt6d3V3eHlydnNzeXFwcXBu
-bG9wdHJxcHB0dHNzdHd2eHV3eXt8fX5+e3t+fn58fHt5en18eXx8fHx7ent5d3d4
-d3V1eXVzdXFyc3JucGxqbGtwb3FvcXVvb29wcnBvcHNxam5ycG9vcHBtbG1tbW5w
-cXFwbm9xcHJ0cG9wc3V0dHNxdXZ3dHR0dXd4cnF2dnN2dXNycXJ1c3Byc3Z1cG9u
-b29tbm9ta29wa2doaWpjY2dnZmVjY2VnaWZkYWFiZGNoZ2ZnZ2ZmYFpWUUtISUhM
-SkhKTU1PUVBQU1JNTk5LS01HSE9SVFNQUlFPUFRWV1dYWlRTWllYVlhaWFhWV1dR
-U1RUVVZYVlJVUlFSVlhWVlJTV1lfXVpZXFtZX2JjYWRkYVtbX2FeXl5eX1tZXV5f
-YGBhY2BfX19iZWliY2BdX15eXmFlYWVmaGVlZ2xvcnZ1c3J0dHl6enp9fHeChYB7
-eX5/d3d8fXRnYFpSUENAO0BBOjo5OD09PT0+Ozk6OztBR0tMS05RW15iaG91d3h8
-gISLjYiJjZGOjo+MioiFe21sb25pZWZlZmZiYmFbWFxeXVxeXVtaXFpYW1tXWFdZ
-WltYWllZWVhYWVxeXFpZVllXW1lYV1laXFpaWlVVV1laXFlYW1hYWVdXWVtbW1hY
-WVhZXl5dYmZlZF5fXmJkaWZoaXN5fIKJj5WZm5udnKCbk4p9alhNUEpERURITFZd
-a3N4fX9/gICGgYSKjoyLhYmLh4aGhoOBgYB9f3t7fn5/fYGAgYKAgYCAgX5+gXx9
-f4KDiYuNkZOTk5GSk5WUl5SWlZWVlpmcm56enZ2enp6fn6KdnZ6enZ+dnJ+goJ6c
-npydmJ6bmZmYmpiXmZiVlZiTjYeHh4eGhYuPk5WZnZ6ioaOpqaijo6WprK+xsbWz
-sq+vrK6xtLi4sq6sq6yppqamqbCxsre9vr69u77CwcLBwcK/vbq1r62tpaKfn5aV
-l5iampufnp+hoZ6bm5ePiYB6bmRgX1xdX19dWlZYW1ZVVFVaWFhUVVRVVVhXVlhW
-V1VVVlZUU1VWWFVWWFdWV1ZYWFlbXVliWVZYW1lWVVVWVU9SU1VWVFZUVldVV1VW
-VVVUVVRTUVFSVVhVUlVTVVhcX1hXV1RSVVVUV1dYVVRYVlNUVlRRVFVUVlZTV1pY
-WFhYV1ZUU1JVV1pbW1tXWFlWV1pdW1tbWVlaXVpbXFpXWFpYV1lXVllcXFtcWldX
-VlZZWFhXV1ZZXl1bW1laXFxaW1paWltaXFpbW1pYWltYVVdYWVlZWldYW1xdYV5f
-XFxcW1xaXF1eXV5dXV5fX1tcXl5hX2NiYWBgX19eYGFhXl9gXl9dXl5fXlxbXl1e
-XF1bXV1ZWlxbW2yvydLa3+Ll5+nq6+t7enp7eXl7e3p6enp6e3x9fXx7e3p7e3x7
-e3p5fX18fXp7enp7fXx9foF9gIF+f35/gIKBgX9+fnx8fHx+gX18e319fH59fH19
-gH9+gIB/enp6e3t7enp8gH5+foKEgYB/gIODgoKEf4B/fn1+fX6CgX57foCBf35/
-fX17ent4dnd5eXd4dnV0d3FzcnFyc3NzdHNzcm9vcnJ4eHd3dnZ5dnZ7fYB+fX18
-fHx6eX19f357eoJ9fXp8eXp7ent6fXl0dnd0dXV2d3R2dXNxcW9qZ2xtbm9vbG5t
-bWxxdW9tbHFyb25wbm5xcnBvcnJucHNxcHBwb3FzdXN0c3N0dXh3dHR3d3V0dHZ1
-dXZ6dXR0dnp5eHNxcnNxb29vb3NzdXFtbGtwb21ucm5ramlqamdiZGZpaWVmZ2dp
-amZmZ2RkYWFjaWhoZWRhWVFOS0pJR0dISktNTlBRUFBQT09KSEhMTUpMUFJRUVJU
-UU5PVFZXWFlWVVhXV1ZZV1lZUlNVVlJUV1ZUUVRWVldWU1ZXWFZXV1VVVlhWV1Zc
-WFpXXVxdY2NfWmBjX19fXWBfXFtbXlxdYWNlYVtbX2FlZWRmY15dXWBjZWlmaGpn
-ZmptdHVzbnR5enl5eHl0d3x3ent+enFwfn99fH2Dc19bYVhXTkJEQTs7Oz09OTg6
-OTk6ODk5ODk9REdHSUtQU1phZW52en16e4CGiISKioOFiIuKh4J+bWhqb2pjYl9f
-YmNfYF9gW1paWltdW1lYW1pZWVlbWldZWVpdXFdVVldZW1pZWFdYXlhaWVpdW1lY
-WFxZWFpZWVpbWldYWVhYWVdXW1paX1pdYFtdXWBiZWdlYmBiZWlrbmxxdXiAhIqQ
-lZqbm5udnZqVintlUklHRkRCRElRW2JteX2ChoGEhYmKhImNjImMh4qNhH+Bg4SB
-f3x9fH5/fH6BgX9+fHx6fXx9gH18foGAhYiKjIyOlJaXl5aXlJOUlpaVlJKYmJmd
-oJ6dnZ2enJygnp2gop+fnp6XmJycm52anp2bnp2cnZ2Zl5mcnJeUlJWPioqLioWG
-iZCWm52eoaenpaimq6imp6mqsbKvsLS0sLKxsrKysrGsrKurrKqopKKkrrGzuLu5
-vLu/wMHFx8HBwcG9ubayta6qqaSfm5qVmJmanJ+ioaCgn6CcmZWQhn91bGphW15d
-X15ZWFtbVldYVlZWV1ZTUlNTV1VUV1ZVVVRTVldXVlRVWVpbWFhWV1ZYV1hWXFhZ
-W1hXVVZXV1NTVlVWVlVUU1NTU1NYVVZXWFlWUlFSVFVSVVZVVFVWWFhWVVRUU1NV
-VVVSVVVWWFdWVVRVV1hXVVdXVlRXW1dYVlZWVlZVVVlaVVZYVlRXWVlaXVxdXVlV
-VlhaWFlWV1lXV1VXWVdXVlhYWVhaW1dZWlZXVlZWWFlcW1xbWltaWltaWVpaXFtd
-WlxaWlpYW1paW1tZWVtbW1laW1pbWllYWlxcXlpWW1xcXFpdXFxbX19dYGJjYmJh
-YWNhYWFgYF9hX19eYV9cXWFgXltaXFpbXFpZW1hZWlxedLDI09rh4uXo6Orq63x8
-fH+AgX15eHp2eHl6eXx8end4enl6eHp5enp6e354enp+fX1+fHx7fH1/f357enyB
-gX9+f3t7fnx+fX57e3x/fXt8fHx6e318fH99fH15e3x/gIF9fH6AgHx5e32BgYGD
-hoWEg399gX5/fHl+foWBgX98fn5+f4B6fHt8eX56dnd6e314dXNycnRxcXBzdHFy
-cnBxcG9wb3JydHV2dnd0eHp/fHp+fn57eHh7f3t+gXt9f399fXt6e3p7fHx+gX53
-dnV3dnV0cnN2dXJvbm9qbnJwb2ttb25ucHFxb2xvbm5tam5tcG9wdHd4d3JxcW9y
-cnJzdXR0c3N2d3Z2d3V4d3d2dHR2dXN1dXV2dnZzdHNycnV1c3NwbWpvcXBxb2tr
-bG9wcnFubm1vbWxqampoZ2ZnaGdpamloaWhpZWNhYGhnZ2hmYl1YT0pJR0ZEREVK
-SE1MTU5PT05NS0lJR0dMTUlPUFFQS0xPUFNTU1NUUlRVVlZZWFVTVllYVVVUU1FU
-VlNTVVVTUlNTVFVWVldWV1RTVlZXWFtaWlhYXFpfYF5eZmJiXmFgZGJeY15fX2Fh
-YmNgYF5dYmFdYWZlZWJkZmRlZmhmaWducHFzdnh1dXl4enV2eHR0eXl7fHp1bG6A
-h359gndlWlljWlVNSUNAQDw8Ozw9Ojw9PDo4ODk2Nzs/RUhGR0pNVVleaG9yeXx9
-fH+BgYWEgISJiYiGhYRybmZjZ2JjX19eXF5dXl5ZWl1bXV9eXVpYWlZWWVZYWFlX
-WFlaWFpZV1ZYVVNZW1dZWllcX1xbW1hXWVlYWltXVlhaXFhZVllYWFhYWFlcWVxZ
-WFpdX2FkZGJkYmRiZWludHl9fH2AhYmQlJeanqGgoZiMeGJVS0dBQEBHUFdia3N8
-gYSEgoGDgoOEiYqJjImHio2JhYSAgoKBenx+fHp8foGBgH16d3d6eXd9fX2BhIeJ
-hYOGjJGUk5aXnJyZlZCTlJiWlZaanJ6dnZ6eoJydn6Gin56foJ+gnJeYmp+cnZyf
-oZ+en56cnZqampuXm5qXlZOMiImIhYWGj5WYmpyfoaCio6KmpqioqKyvsa+urrS2
-s7Gwsq+trK2urqalqqiloaCorbC3urm/wsLDw8fHxL/Av7y9u7a1r6uop6mgmpaX
-l5mdoqOlpaKlpJ6bmJOLhIB4b2ZiYWJgYFtbWVlaVlVXVFVVV1RTU1RUVFZUVlZY
-VlpaWFZUVlZUVlhbXFpVVVVVV1VZW1tXWlpaWFZWWFtYWFRVVVNTUlNVVVZWV1dZ
-WlZTUFFRUVJSVVdUVFNVUlFSU1RUUlNUVlRVU1VYVldbVVhYWFpXVlNVWFZVV1hZ
-VldYVldXWVdZVlZZWVVWXF5bWlxeW1hVVldaWltaWFhYVlhWVldXWVtcV1haWFpY
-WFlaWltaW1pcXF1dWVdZXVxbWVpaWllbW1hWWFpaX11aW1xaXFlZW1pZWVpZW1tc
-XF1fYF9cW11hXV1cXF1fXl5fX2NkZGNkZmVlYmBiYmJjZWJgX2BeXl1dXl5eYF5f
-W1laXVpaYGNyrsjS2+Dk5efp6evrfHp8fHt+end6fHx8fHt7fHt2dnl5eXl7eXl6
-fnl6fHl7fH6Bf317fX9/f317ent9gH59f39+gH1+fX58fHl7fHp8fH19fHx+gICB
-gHx7fH19foB+fYKAhH9+fHt9en+AgICAg4aEf35+gX56eXyBgX5/e316eHp6eXl4
-eHp3eYB8eXl7eXdzcHBxcXNzb3BxdHFubm5xcG5ucXRzcXJ0dHV4d3Z5eXh7fXl4
-d3t+e3x9fH+BfXx6en57eoB9enl6e3l3cHJ1eHVxc3FwcG9wcHBubGtsc3JxcXJu
-cXFxcHBsb25tbnBwcHB0dHJvc3FubnBvcXRvbnJwcHBzdXd2d3Z2dXR3dHNzdHNz
-dHR1dnRzcXZ1dHN1cW5xcnBwbnJvbGhtbnFvcGxta2tva2psbWpnYmVmZ2lnZmVn
-aWZkZmdmZWhxb2dhWFJNR0VGR0hISEtLSktOUVBNTFFNTE5KSU9PSE5PUVBQUFFT
-UFRWU1NWW1tYVldXVVNVV1ZWWFpVVFRVU1NTUlNTVVZUUlNWVldZV1VXWVtbV1lW
-VFdcW1lZV19iX19eYWJkYV5dXl5gYWFhYWFhX2NiYWNmZGNlZ2hiY2dqaGxtbnBy
-d3N0d3x8eXZyc3J1d3V5e3x9d3Jyd4GHfn+BbmFZW2BYUU9OSUZIQT0/QkZAPkA8
-Ojk3Nz45ODo9QkVKTE9QVFxiZ2ttcHR1eXp5en6AhIaFhYiMiIqDfXJpYWFjYV1d
-XVpZWl1bXFtdXVpaXFxYWllXXFlYV1VZV1pWVVVXWFZWV1lZVlpZWFpbXVpYWFZY
-WV5cWVhYW1lbWltaWVZZW1tdWVhZVlJXWVhcYGFfYF9hYmZna21vc3h/gIKHio+T
-lJeZnZ2YkoJwYVVLSUNCRUtSW2d1eX2Ag4KBgoGFiIqMjYqJioiHiY2Kh4SBgX57
-fXx/f3+BfoB/e3x6eHd5e3h7f3+DiYuKjI2PkZCSlZaXmJmUkZKSkZSXl5icoJ+e
-oJ6fnZ2foqalpKKknp2dnZ2enqGdnJ+fn56eoKCem5uampmZmZmXlY+LiYmJiImN
-lpeWmJqdn5+ipaeorKyqq66vr7Kwra2xr6utqqmoqK6qq6mpq6agoKelqq+5vcDC
-wb++v8C/v8G9vb68v7uzr66tqqelnZmZm52foqOkpaSjoZyUkZGNhXpuZmBjZWJh
-YFtZWFpYWFRVVlVTVFVTU1JTVlVXV1VUVFNWV1ZVWVlXV1paWlhXWVZXV1ZXWFlV
-V1hYWFdWVVdVVFRTV1NXVVVXVVRWV1dVU1NSU1RVVlZWWFVUVVVVU1NTVVVTU1ZX
-V1VTVlZUU1ZZVlZXVFVVU1NTV1ZbWVhVVFVXVlVUVlhZWVhdW1dWWFpZWFdYWVpZ
-VVdYWFhUV1hZWFdYWllaWVpZWVpcWlxcWVdZWVlZWVpcWlpbW19dXV1bW1xaWlhY
-WFZXWVpfW1tbW1hYW1pXV1laWVlaW1pcXl5gX1xcXGBfXF1fXF1gX19fXmBjZWNj
-ZmRoYWFjYGFiYmFfXV9eXF1cXFpbW1taWllbXGBfZnSxydPa3+Tl5+nq6+t+fH59
-enh5eHh7fXp6foB6eHh0dXt7e3x7fXp7fHh4e3t9fn5/fX19f3t6fX5+hIF/fX6A
-fXt6fn98e3t9fH2AfX5+f3+BgYCAgYF9fX1/f39/hn+AfoOBgn58fH19f36BgYKE
-hISCgX99fn14fH6AgH19f359enh5eHd2eXt5dnZ3dXd3dXRxdHNycnNua29wb29v
-cHF0dHJvcXFzeHh4e3h5dXl4enp5eXl5en93enp7fn95dnp7f3p5en19e3l5e3Z5
-d3p0dXN0cW9scnBtb21tcHFwbnBxcXNycXJxc3Jubmtub25ubXFycXBxcm1vb3Jy
-cXF0cnFzcXBydnd3dHFyb3N1c3VzdXN0cXN0dXVycnFxc3Bub25vcG9wbnBycmtt
-bm5ua2prbW1rbGtrbG1pZ2loZ2dmZmdpamhmaWpqbWxoZF5UTkdIRUVKSUlLS0xM
-S01PUlRTUE5OTEtKTFJOUFJRUlFRVVdWVVdVWFlcXVpYVlZST1BVUlFUVVdWU1NU
-UlBSVVRSVFdTUVJVUVJVVVZWVVlYVllWWlpbWVhYWFxZXFxjY2NhYV5dXF5gYGFi
-X11cW11fXl9fXWBnZmRlZWprcXZ4cnN0c3N1eHx9d3h3dXh2dXh+fXx4b3B6foCG
-gXpnXlpdWldWWFJRTlJFOTs/SEc+Pj03Ojo5ODk1Nzg9Q0xOTVJRU1ZfZGZtbGxw
-d397en9/fn5+ipKNjY+KdmdnZ2RgXmBeWldWWlhbWV5bW1taWFhZWVZVWFdaWlZX
-VlRaWFhaVVdaXV5eXFhRWFlZWVhWVlhXV1laWlhXV1dZWllaXFlbWFhVVlZWWVtd
-XFtbXmFdXF9fYmhrbXFzdn2BhYiNkZmWmJmbnJmQhHNgTkdFRURKVF1hbXZ7fYGB
-g4WHhoeHioqMhomIh4eHiIWHhX9+f399fn17gIB9fH15eXp5eXh3dnh6f4KEh42T
-kY+SkpKRlZWWmJeVlJWUlJWVl5icnZygo6KfoKGjpKWgn5ydnp6dn6Ghop6dnp2e
-oKCcm5uZmpmdmpmamJiXk46Mi4qKiomQlpeYmpudn6OhpKusq62urayxtLSwr66v
-rKutrK6sq6yrqqWlo5+coJ+ip6uxuL28ury9vL29vb27tri7tbOysrGtqKemopuZ
-nqCipqWgoaOfoJyal5KMgXRrY19eYV5bV1RaW1tZV1NTWFRUVldWVFRTUlNVVlFU
-VVZaWVhXW1tYVlZYWFZXWFdYV1VTVldWWVhYVlZVVlZVVFdZW1hXVVRUU1JVVFRS
-U1NXV1RVVVZXWFZUU05NUVNSUlNUWFxcVVlTVFJTU1ZYWFhXVFZXVVJSVldXWFlV
-VldVVlVTU1ZWV1haWFhWVlhZV1tZW1tbWlpXWVhUU1ZYWVtaWVhYWFdWV1pbWFhb
-W1ZXWllaWVxbXmFeXl1cXltaWl1bV1ZYWFlYWVlZWVdaXV5XWlxgXF1aWVlbW1pZ
-W1xbXF1bXF5eXl1bXF9gX2BhYV5gYWJhYGJgYF9eXl9fYGFeXFtcXF1bXF1bXl1c
-XFxYWVpfebHJ09vf5Obn6err6n1+fHp4dnh1dXh7e3p6enp6eXd5fIB+fnx7fH19
-fHd6fXt9fn17fYGAf4B9gIB+f356e3t+gIGAfX18fX5/hIB/fX5/f4GAf39/fn+C
-fH+BgH18fYB+fYB/gIJ/gH18f4GCg4GAfn+Af39+g4F7gH9+fn9+f358fXx+fnl4
-eXt5d3t3eHdzc3R0dHNxcGtsbm91b3BycnJydXNwcnR3eXd4enp7fHp4e3l7e3l2
-eXh6fX19eXp5eXx9fH57fX98eXl4e3l1dXR0c3NycG9wb29wcG1sa2xubm9vb25x
-cXBucHBvbm9vbGtubHByb3Fvb25ucnFycXBzdXZ2dnNzdHVycXFxc3N0c3N0c3Fy
-c3FwcG9ucHNxb3Bxc3NzcHFwcG9ybWxrbGxqaGpsbHBsaWlpZ2xpamdpZ2doZWlo
-aWpraGtvcW1jW1NJQUJCQ0RKSUdERElKSUtMUFNSUU1JSklHTUpLTlFSUk5SVFZS
-UlRXVlxbW1hWV1dTU1dXVFVVVVVVVFVRT09QU1JTVVNTUVFQUFFVVFVUVFdZWVpb
-XVxaW2FhXlxeYF9fY2RhYF9fYGBfXl9cW1xbW11dYGJiYGFlZmlra210dnx7enp4
-e3l2dnZyc3l/fHVxe317enRtdn18hIqBeWZYW1xVW1ldVVJXX0tAOj1MSEE9QEI7
-Ojo7ODo4ODw+QEZNUU9MU1lhaWpoa2pzeHl4e3p6eoGLj5SUl5WHdW9qZmBbWlxb
-WVlbWllbWllaXF5fWFhYWlRYWllWVlZXWVlXVVhZWVlaXVxcWVxYWFlZWlxdX1ta
-XVpbXVpUVVtcXltYW1tZXF5aWFddYGJiYF5fYGJfYF5laGlscHR2eoGGi46QkZSZ
-m5mal46EdGBORkNGSExXX2l0eICAhISDhIeFhIeLi4eGiYiIhoWHiIODgH98fHt4
-d3p9fH97d3p6eHt4dnJ1eHt+gYSJi4yOkJCTkZKUlpaWk5KSk5OUk5WYmJaYnZ+g
-oaSioaCeoKCdnJyemJecnJ2enqSkpKCfnZubmJiZmJuXlpaYmpqYko6Mi4yJiY2S
-lZeanJ2gpaSkpaiqrKypq6+4tbGvrbCwrausr6ypqqmnpKOiop2co6SjpaWnrbS5
-u7i8vr+/v7+3u7q1tbCwsKuoqaaioZ+foaKipaampaCjoaGWko2FenBkXWJfXVtZ
-WFlYVVVVWFlXVVdUU1dXVVRSU1NUVFdXV1dYV1RXWFlWVFZWVlhZVVlYWFpXWFlZ
-WFlXV1haV1dVV1dZVlVVUFRVUlFUV1ZXVVVWUlBSUFVXWFdXVFFTVFVVVlRWWFla
-WFVVVFRXVFVZWVhZV1VVUlFVVFdYWFpXVVZWVFZSU1VZWFhaV1ZZWFdXV1paWVhX
-V1ZYVlZVV1VYVlNWVlRWWVtcWVdZWVpYWVlYV1lbXltcYF5dWllcXFxbWVlcXFtZ
-WVlZWFdXWlhbXFtXWVxcXVxeXFtaWlxcXVpbWllaW1tbXl9dXFxeW19gX19iYmJj
-ZGFhXmFgXVxdXl5cXFtcXFtZXVpZW1xaW1taXV15tcnU2+Dj5ufq6uvrfH14e3l9
-enp+fHl5eXl7fXt4dnl8fH19fH17dnh4e3t4e3x8fHp/foB9fXx9gn99enx7fXx/
-f4B+foF/f39/fHyAf318e3t/gIF9g4B9e35+f4CBfoB9e4CDg4F/fX15e319fn99
-fYKAgICAfn59fH18gH9+f318enp9fH2EfXl4dHZ4eXl1c3Vyb3Jub29sbm5yc3Ny
-cW1xdXJ0dnR0eXt4ent7enl6fHt6end5eXx+foCBfnt+gHyAe3x8e3p3eHp4dnh2
-dnV0cW9ycW9tcHJubmxtbm1ubW1vcG5ucG1tb25tb29ucG9vcXFvb3Jzc3Bwbmxu
-bnB0dXVzcnR1dnZ0dHNycG90d3VxcG5xcXBvb29wcnFwcHRzb29wbW1wc3Jxbmtr
-Z2ZkZmdqa25paGtrbGpqaGZkY2NlZGdmamptbnBtaV5aUklEQ0JCQ0RHR0pISk1P
-TE5QTk9QTkxLR0pKTkxOUVFPUFBUUlFUVFdXWFlYVlhaW1dVWFpXWFVRUlJTU1FQ
-UlNYUFNVU1JQTlBUWVVUVFRWU1lYV1taWltkYmJfYF9gXV1fYV5fZmVjYFxdXmBk
-Yl9dXF5fZGRkZGdsa29zdnV3fHl6ent5dWlwcXFydnl4dHd7end4dXN+g32FjYl3
-Y1pdWFViXmReXGhkUT8/QEpMRkI/Pzs8OTs8Ojs4OTk9QUhKT1BQVFxfYGdoZmtw
-c3Z3cXJ3g4+XlZOZmZSDdmtkXlxaWVlcXlxeW1laWlxYWFdcXFlZWlhZVVZWWFhb
-W11cW1xaWVZZXFteYFpYWFhZW1taWVtcWlhaWFZVVVlbXFpZWVpYXVtcW1xhYWFh
-YV9fXVtbYWZoa25wc3l7f4OFio2Pk5WVmpqVjYR3YVFNSUhKUlxnb3R7gYWDhYWH
-hYOEiIyNjoyGhoaIhoaDhYF/fXt4fnx5eXt6eHl6d3Z2d3d3eHiAgn1/hYaKiYyQ
-jpGTk5OSk5WWkpOWk5WVl5eYlpeZnJ6fnqCfnZ2gnp6fnp6eoqCeoJ2enaKjoJ2d
-nZyblpebm5iYmZqZm5eTkpGNiomMjpGWmJqanKGlp6Okp6yqq6+srbCxq6ymrbaz
-r62tqKapqaWloqGenZ+hoZ+ipKSpra60tLm7u7m8u7m4ube2ubawrq6np6eoo6Ge
-oqSkqKanqqejoJiWkYiAdGhkYWBgXltWVlhYWVhVVldWVVRWV1hVVVhVVVlYWFdW
-VVVVWFlYWFZaW1pYV1VXV1paXVlVV1hWV1pZWVZXWFZXWFdYV1dVVFhXWFRWWFVW
-VVVZV1FRVVZWV1laVlRSVlVUVFZTVlhVWVZVU1JUVVZWWVdTVldXVlRXV1ZZWVha
-WVdVVlVUVllaVldYWVpZWFlSVVdaWlhXWldXWFlYWFVWWFpZV1pdW11XVlRWV1hZ
-WFlbWVpbWVxeXlpbXVxcXV9cW19aXFxZWFZXWFlaWFlbWlpZWltaV1daWVteWltc
-WldZWl1dXl1gYWBeXV1bXl9dYGJjYV9jX19gXmBdXF1eYGFgXVtZV1hZW1lZWFpa
-XlxaYnezytTb4OPm6Orq6+t8en2Afn18gHx5eXh5eXp8fX15eXl8fn9/f396fH58
-enh8fHp5fnt8fH5+f399e358enl8gX59fHuAgH+AfX18f35/fn99gH99en6AgH14
-en18f398f318g36CgYOBgHN9e3x/gIOCf4OCgoGAfn5+fX59fXx5e3uAgX59eXt6
-enZ1dHh4dHV0dXNxbm1xbm1tam1tb29ubG5vcW9wdnV0dnt5enh5eXx+fn59enh8
-fn18fH5/g4F+e3t5eHt8e3h6fHp2d3d3cnNydHRwbXBxcnJtbW9wbm1sbG5wbm9u
-bnFydXBub29ucG5tbm9xdG9yb21vcXF0dHRydXZwcnJxcnF2dnRzdXJydHNxbnJx
-cHFubmxsb29vcXFzc3NwbGxwcXJsa2pqbWxrbG5qaGhoaWppZmhramxpZWJkZWZn
-aXBydHVpX1ZNR0JCQkNDRUhHSk5JSEtOT05OU1FST0dGSE9PTlRSVFNPUFRST1FV
-V1hZWlhUVFlXV1lZWFRSV1VST1FRVVBUVFJVU1JSUlZSUlZYWFZXVlhYWFlaWlhe
-YlxcW1tcXFxdYWNgXl9iZWJeX2BiYl1dXV1fYWNkZGpram5wcnV3eHt7eHV3eHh6
-e3JycG9tcnZ3d3h9g4J/foSCgYeOjHZiYWJWXG9kYWxsaF1XQz9CTVRIRT9FPDk5
-Ojs4Nzk5Ozo8RUlLTUxQVVteYGNoa2hocG1scH6JjZGSkpmbl4x6aWNfYF5bXV9c
-YV5cWlxcW1pZWlhZXF1cW1tbWlhVWlpaXl5cXVxZV1lXVlhcW1hXWVlaW1hXWFxb
-WVhVWFVWVlxaWVlbWVpeXFxbXVxfX2BhYlxcW11fZmpra21yd32BhIiNjpCLjpKa
-m5eQhndfUUhHSU5TXmh0eYGChoSDhYaGg4aEh4mNi4iGhIOAgoF+gX58enx7e3t5
-d3d4enV3dnV1dnd4fHt+fYCChImPkI6Qj5GSk5KSkpSVlZSSkpWYnJqYmpqbmpyd
-n5ybnJ+foZ+fnZubn6Cjo6GfoKCenZyampqYl5aZmpmYmJmamJWQj42Ni42PkJSX
-n5yeoKOmqqemqa2qqq+ur6ysq66urLCtqqanpqaipKampKGhoJ6em56jpqaqr7Cw
-s7m8ubi6vLS0t7e2tLGusK2qqKilo5+jp6ioq6mmpqKfn5eQioR7bGRkYV9fWlZY
-V1dWWVdVVFdVVVZSVVNTV1VZWllWVFVVVlZXV1hZWllZWVhaW1taXFxZVldVVmBZ
-WFZZWlVWVlZUUlRWVFVYWVlWVFVVV1VTUlNTV1ZVU1RUVllYVVRTUlNUVFZVVlVS
-U1VWVlNTVVZWV1tVVFZUVFZVVldYWllYVlRXWVZXV1ZWWFlaWVpZWFhYV1dYVlRW
-VFVXVVVXW1hYWVlXWFpZWFtaW1lZWlpbW1paWlxcW1tdXFxcW1tcW1taXlxbW1lX
-X1hYWlpcXFtcWlpaWFZYWVlYWlpdXVpcXVtcXV5cXF1dXmBfX2BhYWFiYGBfXF5d
-X19hYGBfYmFeXl9fXVxdWlhYXFdXW15bW15lfLTK1Nzh4ubo6err63p6e31+gYB+
-eHh5enx8ent7fHl4end7eXp8fn99gH16fX57e3x9fHh7fH9/gIB+gYJ9fX18foB6
-eXx+gIGBg4CAgYCAgoB/gH19fn5+f4J9fn57fH9/fX5/gIGDgoCAe3p9gIWEgYB/
-hoSDf4GCgX57e357eXt6fYGAgH54dnh4d3R1dnVzcnN1dXNwb29vbWtqa25sb21v
-b3Fwb3J0dHZ1eXt6e3p7fYCCf356eXx6e3Z6fn99fHt6e318fnx6eHp5enp5dnh1
-dHRzc3JzcnRvb21vb3F0bm5ram5wb29ubm9wcXFyb25tbW1ubnBwcW9vcnFvc3Jy
-cXFwdXRzcnB1dnd4d3VycHBzdXJwbnFybm9xc29wcG9wcm9ycHFubG9ubGtqbG1p
-amtsa2lpamtoZ2lnaWhpZmtqampoaGpvc3JycGhiVUtEQTxBQkFDQkNGR0dGSktL
-Tk5SUk5NSUZJSUtJTVFRUVRPUVNRUVJUWVlYV1lVWFlVVVlYV1RVU1NTUVFQU1FQ
-UVFRU1JVVVNUWFdXVlZVWFtcXVpaYFtfXFlaXFteW11fXmJlYmNnZ2VfX2JhX1tc
-X19fYWBja25wbnBydXl4d3x8dXV2eHp8dHZ2dHJzc3t6fYWHhoSDioWEh4+HcWJn
-Z1dfdGpdbG9hXl5LRUFPVU1IRUY9OTk2ODY1Njc7PD9CQ0ZISkxQU1RWW2JrZ2ho
-bW9yfISMkoyOj5WblIBnYWBfW1lbXFxeW1taW1lbXl5ZVllbV1lXWl5aWllXWVpW
-V1lbXFpaWlZXV1haWFpaWVhYVlhZXFtbV1paWVlWVVdbXFtbXFxbXV5bXV5dXV1d
-X11fYGJmZmtucXFzd36Cio6NjZCQk5iYl5CCb1xNR0VGTldhbnh/hIeJiYeEg4WG
-hYmMiIeLhYOGgX5/f357enh6ent2d3d3c3RzdXJzc3J3dnd5ent/hYeJiYyNjpKM
-i42NkI6RkI+RlZSTmJubmpqYm5ybn5+enZqZmp6hoKGdnJuen5ucnJudnZybnZqZ
-mJaXmJiYlpiamZqamZaTj5CQjY2OkZufpKOjqKeppqirq66vr62vrq2sr6ulqKmp
-qKappKKjoqGgoaGemp2dnqKlpqisr7Czs7SztLm4trS0t7axsq6urKqrqqalpaOn
-p6ioqKSpp6SelpKPh3xxZmJhX11ZWVdVVllXV1hZWVVVU1JTVFNTWFVVVlZVVFNY
-WlpYVlhZW1paW1xcXV1eWFhVVlZVW1dWV1ZXWFVTV1NVVlVWVlZWV1hWVlZXVFJT
-VFRYVlZVVlVUVlZTUlNUVFRRUFNZWFlZVlZUV1RTVlhXVFVXVlRWWFVUWFtZVFJW
-WFlZV1hYWFhZWVpaV1paWFpaW1hZXVlYV1ZTU1VaXFlcW1paWFhZWFpbXFlZV1hc
-W1lXWVxdW1xbWVtdXFtgW1pcXVlaXV5bWVhdXFpaXFxcW1lZVlhYWFpZWldaWFtg
-X1xaX19eXVxdXWBdYGJgXlxbXmJjX2FfYV9dYF1eYV9gXFxeXF1ZWlpdXVpaWFda
-XGR+uMvU2+Hk5ujp6uvrfHp7fX9/f356enl3eHl6fH1+enp5dnp7fX58gYB7fX9+
-fX99fX5+fn16f4B/gYKDgn59fH2Af4B8e36Af4F+f39/gYGCg4B8fXt9eXx7f4OA
-f3l/gICBgoKAgIGCgYB/gIGBgIOAfn6AgoKAgYGBfHt6e3p/f3p4eXp6fXl1d3h3
-dHd1cnRzdXJ0cnFwbW1rbGxrbG9wb3Bzcnh5d3h2dnV2e3p9e3x/gIB/gH9/fHx6
-eH2AfnyAf3t8fnp5enx9enp6enh4end0dnVzb3Fxa2xxcHJwcm9ycGxra25vanNx
-cm9ub3Bybm5sbm9vcHJ0dXN0cnJzcW1rb29xc3FvcXBwdHV3dnZ0cnFzdXdzdHNy
-dXJxc3RxcW5xcGtucnNvb3Fta2tubW5sbGtqbW1qbGtoamxramhraGhpaWlpam5z
-eXNuaVxUSkRDPjxAQkRHRUxGSklGSEpLTU1RT01PS0pJSUhKTE5RT0xPU09PVFZZ
-VllWV1ZUVVRUU1ZZWllVWVRTVFBPTU5PUFBQT1FTUlZXVFdVUVJYWFhYWlhYXVlb
-XFlbYV9dXV9iYmRjY2FfYGNgXV5fXF1gYl9gZGdrcHRyc3N0d3h0d3l1dHV0enp3
-eHx3eHh5fYKEi42KgoaLhHp/i4VnXGhlVmp1bltpb2VgY1VJSldeU0lFSkA7Ojc6
-ODc5Nzo+P0RGSklISEdMT1JVXWJgYGNpb3N8g4WNkJCYkpKOhG9gYF5bV1pcXltY
-V1pYW1xaWllXVldbWFhWWlxaWldWVVRUV1haW1laW1hYVltaVltcWFVVVllaWVdX
-VlpbWVpbXVtdXVhaWV1cXF5eXl1dX2FfXV1eYWVrb3Fwc3Z7foOJi4qNkZWUlZqZ
-koVwXU1IR0pQWmhyeX+FhYmLiIiEhYaKiYaJjI2NiYWDg4B/fHp5eXh4d3Z1d3V0
-cHVzc3R0c3Z1d3V4e4CBhYiJjI2Oio6Oj5KOj42Qj42SlJOUl5WXmZqXmZygnp+b
-mp2bnp+en52enpyemJibnpycmpaWmZiYlJednJuanJyenpqZm5aSkZCPj5GVmZ6k
-o6Ggo6amqKmrraqqq62qrrGrpqOoq6qppaOgoqWjo6SgoJybnJehpaeprqqpra+u
-sLGysba2trSyr6+ura2vraqqpqWpp6eop6ilqKioo52alJONg3puZmRdW15bV1dX
-VlhVVlNUWVZUUVBSV1RUVVhUVFlXVVdYVlZWWFdcXVtZWltdXFldXVpbVlZWV1pa
-WlhZWFhYWVZWVVZYVlNTVVVYWFVSU1ZUVVVUVFNXVVZYWVZUU1RYVVdUVFNXVFNV
-VlVVVFNVV1dWWFpYWFdWV1lZV1dVVVVXWFpbWVlZWlhYWVpaV1dbXVpYWFtaWlpa
-WlpYV1lYW1pXWldXVlhYWVxbWVpaVlxZVVlbXmBeXFxaXlpZVVpZXVtbXFpZW1xc
-XFxaWlpaVlZXWVlYU1dXW1pZWlhaWl5hYl1gYF5cXF9cW11hXl1dW15gX2BiYmFf
-YmBeYFtdYGBfXF5eW1taWllaWFhXWVpdY3q3ytTc4OPm5+nq6+t8f3+AgYCAf32A
-f3x5ent5e319fX18ent8fHp9enl7eX1+fnp6eXt+f35/gX9/gH59goCAgX9/f3x9
-fn+Af3d8fH1+f31/f357fX9/gIF/foGChH9/f4CAg4J/fX+BgoCEhYKAf4GCgIB+
-gIKBf36AfXx/fnx9fXp2enh5enh6e3t3c3RycnRzdXN0cnFwbXFyb3Fvb3BxcHB0
-dHd3dHV1eXp6e3x9fnt8gIB+gn99fnt9fnx7fHx9gX57fHx8fXx8enx8enh4d3d4
-c3FzcnN1cm5ub29vcXBvbm1wbW5xdHJxbm9tb3BxcXBvbXBwbm9wcHJycnNzcXJu
-b3JycnJyc3NzdXNzc3JvcHFydHR2dXh2c25xcG5xcHFxcG1wcnR0b2xsbG9ubW5s
-bG1raWlqbmtsaWlpZmpqaGhpZGVqb3V2dGxkXVNNRkFDQEFBQENJSUdFSEZESUlN
-Tk1NT0xPTEZHSkpNTFFSTUtNUU9TU1NWVFZUU1RUVFZWWlxZW1hWWFlTTk1PT1JR
-UVFPUlJTV1RUWlFPUlZWU1hdWFpbWlxbWVhdYWRmZ2llZWRkZ2NiX2BiYl5dXV5g
-XmNqb29ydXV8e3R1d3d1dnh4eHh1cXV4d3N5e3x8hY+SjIaHjouDe36EemBfcWBX
-a3h3Y2Jxb2JmW1BZXmJaUEVHPTo6Ojs8Ojo5OTpAPkFDRUZISUpPUlRXV1ZbZ3B3
-d3qBhomOkJqclY6IfWtiXl5dXV1cWVhdW1xYV1dYVllYV1lYWFlZWFZXVlRZWFVX
-VlhXWllcW1lZV1laXFpXWlpYWVtZWl1aWVpbXF1bW1taXVpYWVtdWltcW2BfXl9h
-X15gY2Vna3F1eHqAgYeKio6PkZOYnJeRgW9cTUdHTldgbHZ8gIWFhoSEhIOEhImK
-i4uIiYqIiYV/f317e3t5end5enZ3dXZycHBxcnFzdHV1c3R+goOEh4eKiomJjZCQ
-jYuNjI2NjY+UlZeWm56cm5ycmZucnZ6cnZ2gn56bmZmdnZuZm5uampyYmZqbmZiS
-mJqcnJucnJ2em5ubm5WRlZOPjpSaoKOipKOlpKqtqqysra2pqaqrrKqnqaalqaik
-oZalpKano6Gfn6Cgoqisq6mtr6qoqKipqaqusLS4tbG1r62urq+prayrqqurrKem
-qayrqKaioJyXlpKNgn1vZ2NfXFpZVFVXVVFQUVRYWVdTWFZVVlZVUlBQUVNWVlVV
-VlVVVllaWllZW1paW1dXXFpYWVZUVVdaWVhZWFhYWFRRVVVVVVNUVFRWWFdSU1VZ
-VVVVU1NTU1hVU1ZVVFRWVlhYVlVXVFVVWFhbWVVWVVlXVlVWVlZVV1VSVFdWV1hV
-VVNXWlhXV1dWWFtaWVpaWlZYWFVUW1tZXF1aXFhaW1xaWVpYVllVVlZYWFlZWFha
-WFlWWl9eXV1fW1xfW1tbWVpaWlhaWltYWllaXFlZWFxcXVtgWlhXWlpZWl1bXGBi
-X19fXGBcXFtaXFxcWl1hY2NlYmFjYWBhZWJkYV9fYWBfYFxbWldXV1hWV1tdXV5l
-g7fK1Nvf5Obn6err63l/f399fXp4fIKCgH16e3t7fX2BfX5+gHx7fHt8ent6fH1+
-eHt9enp6fXt8fnx6fH9+f4GBf3x/goB9f317e3x+f319gYJ/gXx9gIGDgYGBg4SB
-gYKFgn2CgYKAf4ODhISDgoF/foGAgYF+foGAfnt+fHyAf315eXZ5eXh6e3d5d3p2
-c3J2dXNycHBycXBtbHJvb25sbXBvb3Byc3Z1d3V3eXt9fH99f399fnx7gIF9fH58
-fHp7f317fn9/fXx8fXl6eXt6enV1d3V1cXV4dXRycXBtbG1vcHFxcXBwbnFyc3Jt
-cHBvb3JwcW9wb25ubG1tcXBvcnFydXRycnFwcnV1dHNxb3BtcG9tcXFvcW9ycnJv
-bWxtbm5xcXRzcG10dG9vcW1sbGpubGxtbnFwbWtraWtoaGhkYmZmZ2ZlZmxwcnJw
-a2FXUEpGPj48QD9GSEZHSEhISElIS05PTU9PUkxJTEpLS0tNUlVOS05RTlBRU1VY
-WVRSUVJSUlZYV1pYW1ZVVlJRUVJRUVNSUk9SUlRTWF1XVFpXVlNYWVhbYVlbXVda
-YWFlZWVlZ2NnZGdnaWVmaGZoYF5dXFxjZWVpb3NzdnJ1dnp6e3p2dXV1cXNzdXh4
-dX+AfHuIlYyBgIuOhnt0doF1YmhxYmlpdHZjYG9wamhmW2JfX11STEo+PDo8Ozs4
-Ozs4ODs+Pj9EQkRFSUpJTFBQVGFzdXh6e3p+h4qKk5qakImFd2NfX11bXFxcXFta
-V1dbWVpfWlpYVlhaWFZVWVpaWVhVVldXWFhVWltaV1hbW1tiXllcXFxbWlldXlxb
-WlhaW1pbXFtdXVxaWlhaW1pZXF9dW1tfYWNmZmducnJzeHx/goWHjo+OkJiZlZCB
-a1dMSEdUWmdyd32DhoSDgoCAgYeGjI+MioyIhoWEgn58fX57d3d1c3d3dHR4eHNu
-cHJvcXJycnJ2eH+BgH+Fh4qIiY2Pjo6OjImKjZCTk5Sampqam52blpaZnZ2bnJye
-n56fnZydn5+fnZydn6GgnZyenpyXk5aYmZiZm56dnJqbm52dmZeWkI6PkJacoKKm
-p6epqaqqq7Cwra2uq6qop6empKGjpKSioKOjoKamp6empaiopqyxs7Gzsqurraqn
-pqisr7S4t7Szrq2rra6ura6trq2rqKuqrayopKKjn5qVkpCLhXpyZ2NeXVpXVldU
-VFJXWFhYWFhWUVRVVFpZVVZUU1VXVllWV1NXV1paWlhYWlpcWFZYW1lWVlhVVldZ
-WVlXVVRTUlBTUlJVWFVSVFVVWFhVVFVUVlVUUVJTUlRWVVRUVVVTVldWVldXVlNU
-VldZWFhYV1NTV1VVVldWVFdVVlhWU1dXV1dXV1lWWFhXVVdaWFlZWVtZV1hcW1dW
-WltbX1xZWlhXV1hVU1VWWFZVVVVXV1daV1ZcXFxbXV1XWFldW1taV1hZV1lYWFxb
-WllaWlpZWVxbW1lWWFhbXVxaXl1cW15dW1xfXV5bXF5gXmBeXWBhYWFhYmFkZGFi
-YWNgXl5hYF9cWFdZWVhXVVhYWlxaWmGEusrT3N7j5ujp6uvrfXt9fH17e4CBfX5/
-gn58fHx9gIB7e3l8e3t7e3p6enx7fn6CgYB+fnt5en6AfXt8f35/fIB/gH9+f359
-fn18fn5+fYCCgYGCgoCCg4F/gX+Bgn9/f4CAfn9+f39/goGChIGCf32AfX2Afn+B
-goB8e3t/gX19fXp6e3l4enp6eXh5fXh3eXV0dHVwb3BvcnFwbW1rbmtxcXBvbnJy
-cnR0dHV4e3yAgX58fX59fn5+gH1/fX19ent+fX9/fnx+fnx9fX55eXt7fHh2eHR1
-cnN1d3V0dHJxcW5xb29sbXBua2xvcHBvcnJwb25xb29tbm1ubm9vc3FvcnJzcHJy
-cHJxdXd0c3NycHBvbm1wdHJxcXZxbm5sam1xbm5wcnFtbG5tb25tcW1tbWxtbm5s
-bHBvamhnZWdoaGdmZGdpY2NlZ2xyc25oY1lRSUI+PTs7P0VISkpMS0VJS01PT1BS
-UVBPSUVNT0lITU1QUlJMS05NTVBWVldZV1VTU1FUWFdXV1lYV1NUU09QT1RTWFVU
-V1hUVlRVWVhbWlZWVVVTUlhZWVxZV1lfYWBhYmJgY2ZoaGlmaGhqZWdgXlxaX2Nr
-bGdsbmpzdHN1d3h1dHV2dXNwc3d2fHh3gYKEh5GViH6AiIh5b254hHlwdm9ocmtz
-cWVhaW91eXBoZmJgXlJNS0NAPEBCQ0A7OTs6OTk6Oz49QEJHSElKS05TX3B4eXl5
-fH2Fi4mQlpWTjoqBa15bXV1aWltbWFZXWVtaXFxYWVdYWVdVWVZYWFlZWFtZVVVW
-WFlZWllXWVhYWVpZXF1bW1taXVtbW1laWFpXWVpZWVtbW1dXWVZYXV9fYWFcXV1f
-ZGVnaGpsbnNzdHiBhIaMkJWWmpmTjHtlUkhIT1pja3Z6foOGhYOChISDiIuMjY6K
-iIWDg4J8fX97eXd3dnV1c3JxdXd2cm1uc3NucHNydnp9e32AhIiMi46NjIyKjYyM
-iImMjo+Tl5aWmZeWmZmYlpiXm56enp6foaKgn56enZuenZ2bm5+enpuZlZWWmZmW
-lZebmp6hnJ2goZ6bm5eRjo+OkJOboaOlpaqqq6ioqq2ssLCtp6CipqKhn6GjoaGj
-oKKkpKamqamnp6Wlra2usLOwtK6wq6amqaiqr7O0s7Gwra2qrbKysLCrra2pq66u
-q6ekpKOknpWSkIyHhnxuZmNfXVtaV1paW1laWVdYVlVVUlFVWFZaW1VZWFdaW1lY
-WldZWltcW1haWFdaXFhbWVlcW1lZVldYVFVVVFVUU1JRVlJSVlNWVVdYVFJUVVVU
-VFFTUlBSVFNTVVdWWFZWWFVUVVZVVFJUV1VVVldXU1VXVlRWVVZXV1ZZW1dbWVlW
-VFVXWVleW1hYW1pZWlpaV1laWFhaWVlYXVhXWVNVV1VWWlhWWFlZWVdYV1hYV1lZ
-WFlaWV1cW1pbX1tdXVpbWFhWWVhZXV9gXFpcW1pbXlxcXVtcW1ldXltcXlxcXV1b
-WlxfYGBgYF5aYl5hYGBjYmRlZmNhYWJgYWFcXF1dW1xbW1laV1hXWFtXWFxdYoy6
-ydTb4OTm5+nq6+uDgX19eXl7fICBf397fH9/fnt8fXx5fHx9fXx+fXt9fXuBgIJ+
-f4B8e36Af3+AfoCAgoF/g4J+gIF/gIF/fXt9fH+DgH9/gYKEgn59fn18fH1/gH+A
-gX9+fX5/foKAhIeBgX6Afn6BfX6BgoR+fn+Ae359fn6Af3t8e3p5enp5enh3d3h0
-cXR3dnJxcW9wbW1wbHJubHFvcXRzcm90dHV0d3p6d3x7fHl9fYGAfnx/gH+AfX6A
-goSFhIF+fXt5eHt6eXd4dHh3dnZ1c3V1c3Nzc3FvcnNwcnBvcnJxb29sam5tb29t
-cW1tbG9wcW9vbnFwcG9wcG9vcnJxcW9xcHN1dHJzcXFwb29vcG5vcW9ucHJxcGtr
-bW9vbnBucG9wb3BxbmpsbGxtamptbWpramtpaGZpaGloaGVmYWJdXGFpbnd5dGdf
-V01FQDw6ODo9PkRGSEpLSEVJTk5OTk9MSUlJRUZLSUhJSktLT1BKTlBPUFJUVVRU
-VlpbWFRXVlZWWFZUUlJTVlFPUlNbUlVWVlRTUFJWVlRXVVVYVVVYWV5YXFdYXV9d
-Xl9fYWBlamZsbWhoa2ZjY2NfYWVnZWhqa2trbG9zb21wcHJxdHRzcG50eHh5fIGB
-hoyNk4+Fe4aKgHZvaHeChXl1Y2xqbnJqaGRncnd5ampsaF9eWVFGRUBCREVHPzw8
-ODk5OTs6Oz09Q0JJTk5PUVhca3Z5e358gIKOi4uQkJCSjodzY1xbXV5eXFhXWFdW
-WFpZVlhYWVhZW1pWVldVU1ZZWVdYVlRYWlhZWltZWFlaW1tbWlxaW11bW1xZWVdY
-WVdYW1pZWVtbWlpYVllbXl5fXl1bW19kZGZoaGxucnR2eoCHh46Pk5SXmZmOgW9X
-SUlPW2Nyenx/gYGEhoSBgXqBjYuLjYuFhoKDgX5+e3d1dXd5dnNxb3RzcHJxc3Fy
-b3JycnJ2d3h6foODh4eGioqLjI2NjYmJiouNjpCUkZSWlpqbl5ebm5uampudn5+h
-oJ+fnpqZnpyam5ufo6Kfm5+fmZaZmpaWl5uen5+gn6Cgn52bmJWOjpKRk5idoaOf
-o6Wlpqmpq6utrayrqqOkoqGjn56dn6GioqGgoaOmqaqiq6uur66xsrKwra6tqamo
-pKKlqbCwsLOxsa2tr6+vrqyrrq2urq6vrqumpaSclpOPj4+Kfndvb21kY2BdWVpZ
-WFVZV1tYVVRUVFVVVVNWVlRVV1ZWVlZYV1ZYWVpZWlhYWWBdWlhZWVpXWFlWWFdV
-VVRUVlRVVFJSU1JVWFZVVFRSUlNTVVVUVVdVVFJWVlRUWFZWVlNVV1VWVlZWVVZU
-VFJWVVVUVFNVU1NTUFNYWFdYV1lYWVhcVVdWV1pbXFpZWFdZVVZYWFpYVlhXXVtZ
-WVhXW1VXWVdcV1hZW1tbXV5ZWFhXWllZV1lZWlpaWVxYW15aXFxdWVxbXlpZWlxb
-WlpeXl5dW15cXV1eXFxfX15dXlxbWVtfYWBdYGNdXF9eXmJiY2JiY2NjYWFhY2Nh
-Y11cW11cW1xdXltZWVpbWVhXWVpihbrK09rf4uXn6err64SBfnt7fHx8e35/fYKA
-fn1+fHp6e399fXx8eXt9fHt7fX5+fX19ent9f39/f4CBgX58gH9/gX+Af36AgH5+
-fXp8e35/gH1+gYOBgn9+fYF/fX1+f4SBfn9/f4KChIaChIGBhICBf36BgYKCgn16
-enp7fHx+fXt6e3x7fXp7eXp9e3h3d3RzdXh1c3Bvb29ybW1sbm5ubm5ucHFwc3Z2
-eHZ0dnZ2dnp5eXx7fXx8en98gIF/f4CAgYF/f31+f357eXt8eXh1eHZ1dXV1d3d1
-dHNycG9wcnFucHNwcXBta2ptbXB0cnFwcXBzcXJycW1sbG5vb2xyc3FzcHFubXBw
-bm9wc3Jvb29wbm5ub3BtbWttb3BxbGxrb29ub3BubG5wcG5ubW5pa2trbmxsa2xs
-bGlobmxqZ2xqZWRfWFRUWWVvdXlyZ2BXTkdEPjk5Oz1DPUJCQkVGSEdIS01KR0pL
-RkZFQ0dISUpJS01PTlFPTk5OUU9VVlZTVllXU1VWV1paWFhXVVRUUVBQUU9RUlZW
-VVJRVFRUU1dVVldYVlhbWFdZVFZYXl1eXFxeYmRzaGdnamxqYl9jYGJoZ2RgYGhq
-amtsbnBtbm1ubG11dn13cnR5eYCFgoGGho+SiXx/jYqDeHJlb4OGe29nc21wbGZs
-ZWFsdHJYY2tjYGFkVktIRUNCQ0ZAPDo6Njc9OTw8QEFDRkdNVVVYXFxsfn17f36A
-hYyOiYyQj5OVj39oXFtbWltaWVtZWVZVWFlWV1hXWFdVWFhXWFdVUVJYWFVWWFda
-VVpaWFhYV1laXFxbWltWVllcXV1aX1tZWVxaWVtbXVpaV1paWlxbW1teXV5gZGRk
-ampoa251d3p9gYSLjYyQkJWTkIyAb1lLTFFbanN6foODgoOCg4WGhoeIjI+Lh4WI
-g4KAfn15enZ2dXNzcnBxcXBscHNwcHBwbm1wdnl4eHx9g4SJioiHhYmHio2MiIuO
-jo6MjZGQlJKXlpqbmZmampuWl5qeoJ2goKGenJ6bmZqZmZudoKCfnp2alZWZmJ2c
-m5+foqKhoKCfmpyamZSSkpOWmZ2iop+hpqaqrKutrq6xsKytqaSinZ6fnZubnJ6g
-pKOhoqWno5+msLCwsbGxsbOzrqysraqmpaSlqLK3ta+vsqqqqayvr62vr66tr7Ow
-raemo5+XlZCPj4eEfHlwamZsZ2JjW1pYWFZYW1hYWFlZV1dWWVhZVlZVVldWWVpY
-V1hXW1pZWllXWFlcXF1dW1xaVlhXWVhWVlRVWVhWWVRUU1VYV1NVU1VVUVFUUlZU
-VldVVFRXWVRUU1VUVFhVVFFTVFVUUlVUVFdXV1ZVVFRXVVdYVlZWV1haWFpaXFdU
-V1lYW1tXWFdcWlpXVldaWFhYV1ZYVVZUVllaXFpZWFlZV1daW1tbW1paV1taXFhX
-WVlaWltdW11ZWVxfXlxdXV1aW1lZWVpcWlpbWVtdW1tZW1pYWFxcXV1bXVhZWllf
-XF5eYF5cYF9jY2JhYWJiYmVlY19fX2FhYmBdXFpdXltcXFZgaVpZV1pcXGB+uMrT
-29/j5ufp6evrgYF/e39/f31/foB+fn5/fHx7fX58fn15fH99e3p9fn5+fnl+gH+B
-f4B/fn6Bg4J9enx+fn9+f39/fX55enx/f35/gIKCf4CCgX5+gIF8fX99fYKBgX9+
-f31/goOBg4CAg4GBhoN9fHyAg4aCfn5+gH98gIJ/fHt7ent6fHx/fnl6enl4dnNw
-dHJxbW9wbG1xcXFxcW1rb25wb3Fzc3N1dnZ4dXl4fH59ent6e32Af39+gH9/f318
-foB+fH9+fn19fnp8eXh2dXNzeHd3dnVzcHBzc3Z5dnVxbm5vcm5tbnBxc3VubWxt
-cHBxcnFwcnByc3NubW9wbW1vcG5ubGxubnJxb3JxbnBycXBvbW9ub3Bxb21wbm1s
-am1ubm1xcG1vbXBxb21sbW1ra21qaGhpaGhqaGdoaGhnYFtQS01WZnF0dnJrY1VN
-R0A7Oj07OD1BQkBAREVLS0lOTE1KSExLSklERkdHSEhHTE9PTU5OTU5QU1RVVVVS
-VVZSVFlYW1xaWFZYWFVRU1FOUFJRVFhWUVBUU1VXVVJTVVVVVlZXWVZWVlZaX2Ff
-X2BnZ2dpaWpmaGZiX1xgZWljX11eY2dtamptamxsbGtmZmhvfHx0dnp/h4OBiIaI
-ko+DfIuNhYB7dGlre317bHR3cG1nZW5pZGdvZ1ZgZV1hZWVSRkpLSURJSUA+PTo6
-Nzo6PT9AQEFDRUlQWl1jamd3gX15gIOAgoKChouPkpKQi3VhW11aXltYWlhZVlZX
-WlZXWFpYVllbVlhYVlxYU1hZVVNWV1hYWFlbV1lXWFtXV1lXWVdWWFlYXFxZWVpa
-VlhXWFhYW1tcWlxbWlpcW1lcXWBgYWJlZWZpb3Fzd3yDhoqJipGQkZKWiX1sWU5S
-WWRvd32AgYSCg4SCg4aFhIWIjIyIiIWBgIB8enh4dnRzcW9ucHJtbnFxcnNxbWxt
-cXJ4e3l7fX2DhISGhoOBiY2Jh4aKjI6PjI+Vk5OUlZqXm52enp2dnp6cm56foZ2b
-mpucmp2bnJudnJ2dnJ2cl5iam56dm52cnpyfpKSfn52bm5yZmZaWmZ6dnaGgpaak
-p6iqqqywsK+xr6+qpqOhn6CdmpqZmZuhn5+hpqalqKisr6+xsquqrq+tq62sqqmn
-paWnrLGzsbGtq6eqrKutsLKxsq2wtLCsrKOhnZWQkY+Pi4aAenJnYmVkYmJfXlta
-W11bWlZUU1dXWVhZV1VVV1hXVVdXWVhYWVlYXFlYWFdYWVpcXltXVllaWlpXV1hX
-VVZWWFNXWFRTU1VWVVNTUVRWVlFTVlVVVlRSVFdWVVNPUVRUVFZXVlRSVFVXVFRV
-VFdYWFlaWFdVV1lYVVRUVlZZV1dZWFRXWFlZWlpYWFxcW1xXWV1cWlpYWFlXV1VW
-WVdYWVlYXF1aWlxbXl5bXFxbV1haWVtaWFpbXF1bWFpcX2FeX1xcW1taWVtbWllb
-XFteXF1dW1hVVlZZW1xaWVteXFxfX1xgYF9cXV9eYGNiYmFfYmRhYmNgX1xeXV1f
-XV5cXmBiXlpaWGBqV1laW11fY3q3y9Tb3+Lm5+np6uuBfX99fH6Ag32Afn97eHl7
-fX19fHl8fXx8f4B+e3t8f4F/fHqAg4KDf31/gYJ/fXp+foB/fnx9f31+fH5+fX9/
-fnyAg4GCg4KAf4GCfn17f4CAf4OAgYCAfHh/gIB/fnx6fX+DgYCFhIKBhIOCg4F/
-gISFgn5/fn59fHyBfn17enl6eHZ1d3Ryb25xc3Fvb25zdHBtb3BtbG9xcnN0dHZ4
-enl6eXx7fXt7fH5+fn+BgYKAgH18e4CBgYKAf358fX14d3p6enZ1eXd5eHZwdHVx
-cHBzcnFycnBvcXFxcG9vcW1vcXJzb3Jwb3JzcHBycW5wb21tbW5ubmxxb29sbGxu
-bm9ycG5vcXB0cG5wbW5yb25ta29wb25vbGttbnBvb21vcHJ0c3FtbmttbnFrZ2ps
-a2dnaGlqZ2JdUkxLT1ttd3t7dnBkWk5HPz45ODo+PD9BRUZLS0lIS0tISUtLS0xL
-SkVFQ0NFTE5MT05OTlFTVFVVU1RWVFRSVFVVWVtbWldaVllaWVBQVVVUUlFRWFlX
-WVdSUVFRVFFRUVRYWVZbXFlVV1lgYWBfX2NnZ2xtbGpoZWFgX2JkXltdXFxbYWVo
-aGZqZGRlZmRmZGxyc3N4fomKhIaKg4mRkIWFi4d6gHpyY2V4gXRufX10aWRdZ2VZ
-YW9jWl9jaGZhYVdOT0xNTE5MRkRAOTo9PkNERD1ARERKS1ZkXmdrZ2x5fnd8fYGC
-fHyCiI6PjJCRgGthXVtdXF5aWlZWWVtcXllYW1lZWVhZW1ZXWFlaWVdYWlpXV1hc
-WVpbWlpaWVlbW1pZWFlWV1hWWVpbWlpcWVdYWVldXVtZW11bW11ZWl1eYF9jZWVm
-a2xtcXJ3f4OGhYiLkJmXk5KMgGpaVlhfZm93fIOFhoGChYeHiYaGiYiHhoeEhIGC
-gH56dXZ0c3BucHBub25ubG5vcXJwcnNzdHh8eXp5fYGAgX6BhIaFhIWFiZCPj5GT
-lZORj5KWl5mcnZ2cnJ6dm5ubmZyfnpmWmZucmZuenZ2fn56enZuZm5ydnZybnJqc
-oJ+fn6OjoZ+dnZybnp2enJ+en6GlqKanp6eqra2wsLCur6qnoqGgnZyYkpOTmJmZ
-nqKhoKOjqaelp6msr6eop6ipqqqrqauppaSkqq6vrq2vrKerrK2wrrGztbG0saun
-oaOkmpOPiYmLiIR+eG5lYFpbX2FfXl1dW1tdWldVVFhXVVdXV1ZWV1dWU1RXV1RY
-WVpaW1xYWFhZWlpbWFlZWVxZWFlWVlVVVVdYXVZVWFZSUlJWVlVUU1VVVVVVVFJU
-VVVWVVZWVlJRU1RSV1paVlRUVlhWWFVXVlRXWFZWW1hVV1ZXVlZVWVhYV1dWWFlb
-VFZYWltaWllaWF1dXFxcWVlbWFZYWltYV1laWl5eXl1ZWFlZWVpbWVpaXVpaW1tb
-XFxdW19aW1pbWlpfXVpcXFtYXFxbW1pbXFteW1pdW1teXFpaXFtZW11fX2BfXV5g
-XF9eYGBfXV9eX19gYWJdYGBiZGFgYFxbXFxeYF1eWlpdWlhVXVRXWVphe7fL09rf
-4+Xo6err64J+fX59e31/f35+fnp6enl8fX16enl7fn19fXx9fH57fHyAgHx/gIGA
-f31/f4CCgH99fn2CgIB2e358fn1/gYGBf39/f4GCgH57f39/fHx/f4B/fX2Ag4WB
-hIOCgoGCfnJ8gIKBgYSDgYOFgX+Afn99fH5+fn9+en2Af3t8fnt6enl4dXRycnV1
-dXV0c3NvcXJwc3Nyb29ycXB0c3Fzdnp6e3x6fX18e3t+gX9/fnyBgoGBfn59foJ/
-gH59gH9/fHp6enh6d3p4eHZ4dXNydnVxc3d0dnFzc3Fub3FxcXJvaGhrbW5yc25v
-b2ttb29tb25ucXBvbm5sbWtrbm5tcW5tcG1vcXBxcXBvb3Fvb25uamxvcXBtbm9s
-bWtqbm9tcnBub3B3dHFvbmtsbG1sbG5paWZpaWliXFRNR0dLW2h0en10a2dZUkhF
-Pzs8Oz4+QURGSElHR0pKTU5GSUtJSUpJRkVIRURISk1PTkxLUFFSU1RSVVVWUlFS
-UlFRU1VVWVZTUlJTUE9SUVNPS1FSVldUVFRUUFNTV1dWVlhUVFhXVldZXlxbXmZp
-Z2hram1vbGpqcGtoZGNdW1xaWFpdYGRoYF9iZWxsZ2Ziamlob3qBhYWAhoeFjJCM
-h4mKgXV+g21eaH2Ab3qCfnRuZFhoZVplaVxgZWdoamBXWlBMTEZLUU5HRkA+PDs9
-QURESEtMS09XXGNcXWZlaG96en2CgH55gYaGipCOjJKTfGZgX1ldX19dWFdYWFhY
-VlRRV1hWVldYW11dW1pXWVpYVFdVWFhVVlRWWFdaWlxaXVpYWldaWFhaXl1bXVxc
-WFdYW1ldWlpbX11eXFxbXV5fY2FjZWdsbG9tbnJ5f4aHhoiKj5GRj42GcVtVWmJq
-dHyEg4WDhYiGi4yJhYWKioaGh4eEhoOAf392dHRzcXNvbm5vcG9ucG5ubXFycXV5
-eXh4eHuDgn+AgoSEiYiJjIiHjI6Mi46OjZKTlZaWm52dnJmbmZaWlpmZm5uXl5Wa
-np6eoKCenp6dn56cnp2anZ6jn5uenJqcnqWmp6akoaCgn5ycmZueoJ6foaSnqqem
-pqmvsLSytbWsqKilpJ2ZnJmalpSVmJean6Ghn56gpqOmqq2sp6Ojqqmoqq2srKun
-pKOipKqqrrCtp6uur7Gys7OysbK0rqqqqaWflpGIhYKDg4B8dm1hXVpZXV5bWltf
-XV5aVVdZWFVYWFZXWFVWVFRUU1dYVVZWVldZV1dUVVldWVhXVFZXWVtYVVlXWVpa
-V1dXV1lVVVVWVFNUVVdVUlRWWFlWVVRWVVVTU1JTVFVTVFJTVldaVVRUVlVUVVZV
-VlhVU1VXWFdVVldaWldXWFdaVlZXW1haWFZXWFlaWVhZWl1eXFpbWFhWWFhaWFdb
-WVtbXFxbV1ZYWVhXWFpZWlpbXFxcW1xcX15cXF1bWVhYV15dXV9cWlxbW1tYXVxc
-WllaV1paWlxbWlpcXV1cXl9cX2BaWlteYWJgYF9fX15fYGFeYGBfX2NgXV5eXl1d
-Xl1eXVtZWlpaWVVUVVhaWmJ+uMrT2t/j5efo6urrfn5+f4CCgICAfXx8enl5e3yC
-fHt8fX9+enuAf4KEgH56f4J9fIB+gX5+gIGCgH9+fXp7fn5/gIGDgX5+f39/goSC
-goB/gIGBgH5/fn+BfIGBgYCBgX2AgIKDhISAgH97eXx/g4SHg4J+f4CAgn9+fXx+
-gH59fHt7fYF+fXx9fHt6eHh3cnR4d3l4dXNzcW9vcXJxcW1tb3JzcnJzdHF1dnp9
-ent7e358fX9/fn18fH+BgH+Afn5+foB/f359fX1+fnp5eHZ6eXl4dXV5dXR1d3dy
-dXJxcm5xcnBxb29wcnJsb3Fsb3Rxbm9ycHFvbnBtbWtub290cW9ybWtqbG5ubm9w
-b3F0bW9wcG9wcW9ua25ubnBtcnBwcW5vbG9ucHNwcXFvcHBvbG1vbWprbm1paWhs
-amlmYl1VTUVDRk9aa3V8fnZtY2JVTUhBQDw7OT8+Q0pGRkhJSkxPTExJS0tJRUhG
-REVFR0dHSkxPTElOUVBRUlZXV1dQUVJVUFFSUlJVUlNSVVJSUVVPUFFUVFRVVVNV
-VFNUVVZaWVlbV1RWVVtXVldZXVxhZ2VobW9vbm1vbW5zcm1nYWBeX19bXFxbX2Vi
-XmFkZmpoYWJpbWt4foKHgICEhoaLiYWCh4R3eIGAcmx5gHxveHp0bnBmVmRqX2dm
-XmVpaWZwZlphV1RSTVNXV01JRTw+Pz1IU0xTWVRRVFthZmFeYmVpaXh5e4F/g3h5
-g4aJi4yQl5eIdmhjZWNhX1xdXVlZWFZXW1ZTVFVTVVNWWlhYWlpaWFZXVllYV1ZX
-V1hYWVlbV1tbXFpaWllbWFpZWlpcXF1cWlhaWFtaWl1dXV5bXWBeYF5gYGFpamtt
-cXFzdnR5fn+BgYaMjo2Qj4V1YVdaYm5zeH2AgYKGiouNjo2JhoeGiIeIh4KDhH99
-enRxcHBwbWtub25xcnJxcHNxb29wc3V2enp5foCDgYOFhIaDgoWFhYaGiIyLjYyP
-kpSTlZiWl5qZlpqblpWampmYl5eboJ6fnpqfnZycm5ycnpuXnJ2bmZiXmZ2fnqGi
-pKOkoqSjpqSioJyYmp6hoKGho6Wlp6mnq62trrCwsa6pq6yoop2dnJmVlpOTlpqb
-naGioaKho6akpqiqo6Snp6Slp6iop6Sfo6GiqKmvtLGuq6qusbO0trOxtLWyrq+t
-p6CXkY2GgoCChH98dmthXVxZXVxbWltaWFxdXlhYXVZUWFhZV1ZUVFdYWlpYWVhW
-VlZbWlpdWFlYWFlaWFdVV1lWVldaW1lXV1dWVFNXV1ZTUlNTU1RYVVZTVVNUVlRW
-UlNTUVNRUlFQUlRUU1RWV1dVVVNTUlNTVVZYVVpYVFVYWlZYVldXWVlbV1RbWlxb
-XF1cWVpZWFhYWVpZWFdaWVZYWVtaWFpeXltYV1lWWlpZW1hYWVpaWltdYFpYW1xd
-XF1cYF1aWllbW1pcW15bXVxZWltYV1VXWVtcW1xcW11cXFtbXmBfXVxbXmJhX2Bg
-YmFgYF5eX1xgX2JhYV9jYWReYF5eXV9ZW11eX1xbV1pZWlpYVlpgY4K4ytLb3+Pl
-5+nq6up+fn+Cgn99f3+Afnp6fHt8e3x8fHt7fH55fX6CgYCBfX5/fHp8fn9/f36D
-fn9/fnx8e4F+fX1+fn6BgoKDgYKBgYCBgX5+f4GDgYSDg4F/g4WDgYOCgYCAgYKA
-f35/gH59foCChIGCf4GAgIGAfn9+eX2Ae3p5e397f3x5fHt6enl8eXh3eHd1d3V0
-cnNxcHFvbm9ubm9wc3Jwc3VydnR2dnl+fn5+foJ/foGAgX9+fn9/gn6AgIB/fnp9
-fn58fH18fnx6e4B9enl7eXh1dXR1dHNycW9ycnBxbm9wcXRycXBwbG1xcW9sbW9v
-b29wb29tbnNtbGxrbm5ubG9tb3Ftb25xcGxqa25xcHBxcG5ubWtqa21tb21yc3Bw
-cHBrcXFtbm5sbm5tb2xubW1sbWpoaWptaGJeVE9GQkFGTGByen15d29oYVlSST87
-OTw9Qjs9Q0dHSUlMSUtMUE9OTEtKSEVEQ0ZIRUZFS0xLSk1ST1JSVldXVFFTU1ZY
-VFFQVlVSVVNRUlFOTU5MT1NTU1RTVFhTU1ZYV1ZYVlZWVVpVVldUUlheX2FiaGxs
-bG5wb25pamtqZWRhYV5gYmJgX11ZXGFbXmFiY2BiaG1yeX+BiYN/f4eEe4mHgHqH
-gnd1fH9sb3Z7cXB5dm9ycmhaZmxmZmBdZWFhZHFsX2ReV1JVWltVVVJJREFDRlBh
-WltnX1VWYmtrZ2ZlZmhudnd4fXx+enh+hoqKkJeYlI5/eHJycGtiX1pYWFVWVFZV
-VlNUWFhWVlZVV1dZXVxYVlVXVVVWWltcV1RWV1pcWFhZWllYV1haWVhbWllaW1pc
-WlZXWlhcXFpcXltcXF1dYGFiZWhubWxtbXBzd3t9foCBgoaGh4qJfnRkW11lcHh8
-en+AhIiGh4iKh4mKiIiGh4WCfn58eHZ0cXJuaWpwbW1tb29xc3ZzdnJvcnV5d3d4
-e3+Cg4WGhoWGgoGEhYGAhYaHiYuJjY+Sk5OUlJOYmZiXmJeUlZOTlZiUl5udn5yg
-oZ+enJiYmZmbm5ygnpybmJSZm5+goqKlo6SlpKGio6GhoZ+cn6Ojn6OlqKmmpaes
-r66tqq2vr62vrauoo5yblZWUkZKRlpmamp+jo6Kin6CcoKOgoaCioqSio6Wlqqej
-np6mqKuur6yrrbGvs7KxtLGysa2urqyqppyVjIWEhIN/fnx3dWlhXFlcW1hXVlVR
-U1VXVlNXVlVZWFhYWFZVVltZV1hYWVpaWFhZW1tZWFpZWVpXWVVTV1ZVVlhXWVlY
-VFFTVVRXV1VWVFFRU1NUVFNXV1ZUVFNQUlJVUlNSUlJUU1JUVVRVVVRTU1VTVFNT
-VlZWV1hYV1ZXWFdXVFdVV1paW1lXWV1aW11cW1xaWVhYWFlaWVlYWFtZV1ldXFxb
-WlpYWVlYWltaXFxbXFtaW1tbW1tcXFtdXl9eXl9eXVxaW11bXFlYWFpbXVxYWVhZ
-WlpdXV1cXl5cW15eXlxbXFxaXV9fXF5gXl1gZWFgXWBiYGBhYGFhY2VfXl5dXF5b
-WlxdXVxcXFxaWlpYXmFlgLjL1Nrf4+Xo6Orr7H2Bg4KCgYJ/fH99fH1+fHt8fnx9
-fn58f31+gH6BgH1+fn1+gH9+gIGAgoOAf4KBfX99fn1+g4SCf3x+fn9/iIWAfn+A
-gHt7fX59gYWEg4KDgYGAf4B/gH9/gIGBgIKCf399fn9+f3+Af4CAgH6BgYJ/fXt7
-fHx7fXp5fH5+fXp+enZ3dXV4enl4dXZydXRzbm5tbm9wc3RzcnBwc3N1dnV3eHh6
-e31+fH58fn+AgYODgIKBgoGAgH+Af35+fX5/fYB+f4B/fn55d3d3eHZ0dHRzcXFw
-dHFxcXFtbmxsbm9vbnBxbnJycXJwb3BubGxwcm9xem1vbWxrb2xucWtucHBsbnJz
-dGxubG1ubm1vbm9ramxsbW1rbW1wcG9vcG5ub3Rvbm1sb3JubG1rbG9ramppZ2Vk
-XFZPRkE7PUVXZXN7e3t4c2plXVBIQjs3Nzw9RUVISEpKSUtMTEtJSUtPTkpIR0ZF
-R0lKSkVISEpJSk9TUFFRUFdYU1JUU1BRUlNWU1NQUlJOTktQT05OVlVXVFNUV1NS
-VldYWFdWWllZVVVWU1hYWFxgY2hpbnBzdHFzcXBybGZjYWBlX1lcXmBiY1tZV1lj
-Xl1aWmFcZ3uDf4SFf3+CiH97gYCAho+IdGp4e29nanFqeX50dXdvaWVubmVnYGFb
-XV5pc2tibmpXUVhgXVdeT05GREpKVmVhY2lpYVtkbGVlZmhmaG5zc3Z5fIB/enuG
-iIqQm5eUi4d5cmlpamhkX1pWVVRUU1RZWFVXW1pXWlpZV1dbWlpWWFdWWVpaW1tb
-WFhWWFlaWVVXWFhdWllYWVlZWVhYWVhcWlpcXFpcWltaXl1dXFxdX2JjZWpsbWtr
-bHB1eXp6fX6AgIODgYB6cGZgYWZtdXuAgYSIiYSHiIaGiImJh4WHhn9+fHh2dHJy
-c25ub3Bybm1wcnVzcnR2eHh1d3V2eHh7fYGCg4WHhYWGhYaDgICDhYeKiIiLj46R
-j5CTmJiWlJybnJiUk46QkpeXmp2dmJaanZeYmJuZmJmbnpqcm5ycnJqenp+goqKl
-pqakpKOln6Cln52bnqGio6Kkpaenpqenq6qqrqywraqpqaamn5uZlZKPkY+RlJaY
-nKCfn5+cmpiXmJmYmZudnJubn6GjpqOfn6Klqa6vq6usrbOxsbCzsbGysK+urqmn
-n5aRh4eDfHp7e316cGheWlZXWFVUU1VWVVNVVVdZXFlYV1VVVVRTVlVWVVVUV1hY
-WlpaWVdVV1daV1xYVlVXWFZWVFRWVVRTU1RVVFRTVVNRVVJSUVJUVlRUVFZWU1JT
-UlRVVVRRVldVVFhZVlNUVFZUVVVVUVNSU1VWVVVUVFZXVltXVlZWWFdWV1hZWllb
-WVhbWldYWVRXV1hZVlhaWVxbWFpWV1hXWVhZWVlbW1paXFxZXF1YV1tdXmBdX15f
-XVtdXl5eXFxaXVpYV1laXFdVWl1cW1xbWl5dXV9bW15eXV1aWVhcWltdXl9cWl5f
-YF9qYGFkYV5gXl9hX2BkZGFhXl5fYlxbWVtcXF1cXFtdXl9bW2KEt8jU2+Dk5ubl
-6urrf35/f4CBfn9+f36AgoCAfXx8fH58fXx6fHp8fn9+f316f3+Af4B9foCAgICE
-hX+Bf4F/foKDgYCBf399foGAf32Af4B9e4GCgoOBf3+AgoKCgoF+fn19f3+AfoF/
-gYF/f4B9fnx+f4CAfn99foCBfnt5fHx6fIKDgHl6fXx7e3l2enp5d3d0dXZ0c3Nx
-cXFybWtsbXBwb3JxcW9wcXJ2dnZ3eHt7fH1+fn1+e4F9fH6Afn+BgX5/gIGBgIB9
-fHx7e3x9e3l6dnh5d3V2dXN1dnFxcXJzcnBxb29pam9taWxtcHJwcG5vb25wb25v
-a2xub29xcHBvcG5tbW1sbnBubG1sb3B0cXBtbGppam5ubm5rbWtsa25vcG1vcG9v
-b25vb29vcG9xcG5ub2tnaG1tbGZkZGBYT0hBQUE/RlFlcXuDf3x4dWphV1FJQj88
-P0BCQ0tKSElISUpMTE5MSklJR0VHUExKSklIREVHSUpISE5OTlNSVFdWVVJQT09P
-UFVTUlJRUU9OUVFNUFBSU1VVUVNVWVhWV1daV1dWVltaUlRVW1tYWl5hYmZtcHN0
-dnZ3dHJraGFZW1xdXFdbXlxYVlpXYVxUVVNcW2NyfIKEjYSAfoKFhISDfX2LkYFg
-YXV2a2Jwb2t5dHh/c21lY2NmZmNgWFZfYmxwbWptal9VWWlhYmRQUExIRklaa2lc
-ZXBkWmFsYl9nZmhnbHBzcnR/gH99e4CBiJGQjI2QinxrYVtaWl1fWllXV1dXWFZW
-XF1aV1ZaW1daW1lZWF1dW1pZV1laWllaWVhXWlpYV1daW1hbWVtZWVhYWVdXXFpY
-WlpbW1laWl1cXF1cYF5dX2NoZ2ppa2trbnJ2e399e3V3fX1+fXhvZmBhaXJ2fIGB
-gYSEh4aJioqKi4iHg4SBfnh4eHVzcnBubXBwbm9ta29zdnN1dndyc3V4enx6en1/
-f4OEgoKHjYWGg4WFiIeGhoiJjI6OkJCTkpOTlJOSkpOUl5SRk5GSlJubnZ2amZqa
-mZqZm5yenpuYl5icm5+emp2enaCgoKSipqqqqqmjoaCinp6doKGdpqmjpqSppqep
-rK6uqquspqesrKqgnpuTjpCNjI+QlZSTlZ2cnZqYmZmUkY+RlJmbmpaam5yem52e
-oaOmpqytra+xr66ys7Ozs7SysKuqpqKdmo+FgYKAdXh5ent3a2JcWlZZWFhWWFZW
-U1RWWldYWFhZUlNUVlVUV1ZVV1VVVldZV1hXVlhYU1NVVlVZV1dXV1NYVlNUVldV
-V1VUU1NUU1NRUVRUV1JSU1NSUVNSU1RWVlNSVVdVU1RYV1dZWVZWV1VUVFdVVFRR
-U1VUVlRSVVNSVldUVlZVV1dZWVlZWFdZWlxbWlxaW1pYWVlYV1ZYWl5bW1dVWFpZ
-WllaW1lZVldXVltaWltaW11eXl1eXVxbXFpdX1tZW11cWVdWWVxbWFlZXF1bXFlb
-Xl1aW1xZW19eXlpbWFxgW1paXmFfXVtcX2RhXF1eX2BgYmBgYWVlZmNhXl9dXFxa
-WVxaWl5cXVpYXFpaYoK3ytPc4OTm5+jo6+qBgX98en2AgX19gIB/f31+fHl7fHx7
-fnp/gH59fX58fX2Af4B+gH9/fXx7fX17f4KEhIGDgYSCfoGDgn+AgICAgICDf31/
-gIGAgoSAgYJ+fX+Af4B9fn98f4B/gYKCgn9/fnx8en2AgIKCf4B+gIGCfnt7e358
-fn99fXx7eXx6enl8eXd5eHd3dHNxcXJucXBubm5vbGxxcHFyb3J1dHV4dnl5fXt8
-fYCAf3x+goJ7gHp/fYCEhIKDgH9/gIB+e3t7eXt6eHd3eHh4eHZ1d3VydXNxcnBx
-d3JwcHJsbm9xb29wcXBtbGxrb25tbG5rbGptbnJxcHJybW1rbHBwbm5ubWxrb25w
-b2xqamtpamxta2xrbmxtcG9ubWxtb3BwcGxra3BtbGttbWxpaGdmamxrbGpjXVZL
-Q0FBQD9JW2h2f4WCg353bWNhVkxEQEA9O0BFR0dLTExKTU9NS0tLSUhIRkVGREhF
-REhEQ0ZKSEpNTE9PUFRVVVVVUlBTUlJUUlNRVVNQTVBUU1JQUlRQU1JUVFZbWllX
-WltUUlVZWllVVFNUV1xeXl9maGlsdHh3d3Zzc29rZ15aXF5cW1pbXVhXXFxbUlZY
-YGZocHiFh4+RhoOLj4WBhoR/gouLdWVtc2VaYnBtcXJzgHhpZ2JaZWpnZltQUlli
-b3JvaWVoYFdcamFpYE5RTklMT1VobVhcampeYGliYGRkZGFrc3Z1d3x/eX9/enyJ
-jY6Jio+KfmpkX1pYWFhYWlxXVllXXFpZW1dXWVlaXV1aWVpZWlpcW1pXWFhaWVda
-WVlZWVhYV1pcWltdW15bWVpaWVdVWFlZW1xaW1paW1lbW1tcXV9gYGZoaWlraWhu
-cnh7fX17dnZ5enl7eXFqYWJpbnV7f4GBgYaJh4mMjYiKiISDgX9+dXJydHFxb2lo
-bG5ubWprbnN1dXZ2d3N0dXl6e3qAgoOCfn6Ag4GCh4eFg4WGiYeHio2RkpGSkZKT
-kZGOkJSSk5OVmJiVlZSYmJqbmZudnJeXl5mZmZmXmJeYlpqbnZ+dnqGhn6GioKKp
-p6upqqumpaSkoZ+foqCko6Onq6mmpqassK6sq6elqKyvrKKdmpePkImKjJCRk5ee
-oJ6gnpyampyYlZKPjI6SlJKSjpCTl5qenZ+qqq2tr66sra6wtbW1sa+uqqioo56X
-j4SCgoF9enl4eHZtZWBbWFdYV1pUVlZZV1lYVlZVVlVTVVZZVlJTV1VWWVZWWllY
-VVZWVVVXVldVV1dWWVZXWVVWVlZTVVVUVFVVVVVVU1JXVVJUWlNTUU5QUVJYV1VU
-VlpVWFNTU1VWVVZVV1ZXWVZaWFZVWFdUUVJSVFRUVlRTWFhWV1dYWlpYVldUVlpb
-WVtdXFtZV1ZWV1hXWVtbWlpZW1ZYWVlcWVlcW1paWFZXWVhaW1xbW15cXFtbWlxc
-WltaW15dXFxaXF5aWllaWFhYX1xdX15dX15aWVtaW11dXl1cWVtbW1xcW1tcXFpd
-X2BiYmFlZWNmY2RjY2FhXF9fXFdaXV1cWVlaW1xaW1pZWVxigLbI09vg4+fo6urr
-6399e3x8foF+fX59e3t9fn59fXt4eXp7e39+f39+fH5+gHt7fXx+fH5/f4F+fX9/
-gYSEgYCBgYJ+foB/fnx+fn2AgX5+f4B/gYCAhISEf3x9fYB+gIB/f359gX5+gX+A
-fnp7fICBgIKAgoF+fn9+gX98enp8fXp+fX59fHt4eHt+fnp7enh4dnV1c3BwcnBy
-c29tbW1tcG5xcnJycXV3dnZ3eHx5fX98eH56dXx+gICDfXx8goKGg4B/fnx/gHx8
-fX18eHh2dnl4dXh3dXBydnl2c3Nyc3FwcHFvcnNxbm1ubG5xb3BwbG1tbGxtbW5s
-b21vb21rbG5raWlrb2xqaWpubW1vbG5tampqa2pnaWlrbGxub3Bvb2xubWxvbm9u
-bWtsa29vbGhpamtoamxoamdlYl1STUpEQT8+QElaaXN/g4aAgHhzb2VbUUpBQDtA
-REVERUdLT1FNTlJRUExHRkVGRkVGRkVDRkhHRURJSUpJTk9OU1ZWVVRRU1VTUE9R
-UVJSVVVTUVFSVlRRUlBTWFVXWlxYW1pVU1NSVVZYW1hYV1hWV1teXGBnZ2pzdnl3
-eXd2c3FqZmJeYWNnYV5aWFJYVlFTXmtsc3Rvd4OOk46Gg4WHf3iIgn2CioRzc25q
-Y1xmbnFzcXp3aWNiXVxqa1tcV1FPVmlxa21tY2RpXmFmZ2xbVlVSUU5QVGZwWVRk
-a2RsaGFiZ2pfYGp1eHd6fIB6eoCAfYKIj42LiouGfG1iW1pZW11bXFhWV1dbWFdY
-V1tZWFtbVlhaWl5dW1xdXFpZWVpaWVhYWFdWWVtcWltbW1lbWVhZWl1YVVlaXV5d
-XF1cWFVXWlpcWlpaXl5eY2NlZmZlZWlvc3yAfHd6eXd3dXR0cGplZmhwc32BgYaG
-iYiIh4iJioaDgYGBfnx5c25vbm9sbHBub2xsbXBwdHRzdnh8d3Z0dHh8eoGBf3+C
-foCCgoCBg4SFg4KFhoaIi4+WlZOVlZKQkI+PkpWTlJKWmJeTjZOYl5iZmpqam5yb
-mJmZmJeWmJeYmZubnKCgoaGeoKChoaGpqaqpqaapqaWioZ+gpKKgpaWloqOlqaio
-qqytqqioqKWoqKKdmJOOjYyKiYyUmZudoKCbmJiYnJ6Zl5SOiIiJiIiIiY2Slpqe
-n6WorrKtra+wra+ztbSztbCtqqumnpqUioKAgYF6enp3d3BrZF5XVFlbV1ZWWVhY
-V1hYVlRUVlhVV1dXVFRXWFhXV1ZSU1paWVhXV1lXV1VWVlZWWFZXV1hTVVhZWFVV
-VFZVV1dXVVRTVFRUVVZST1FTVVRUVFRVVFZWVlRTU1dYVVVVVlhZWFZYWFNVVlVT
-V1VVWFlVVVZWV1hYWlhZWVlXVllfW1xaWlpZWltYWVdYVlZXW1hWWVhXVllZWl1b
-WlxeWVlbWltYWVhVWltcW1laV1hcW1ldWltbXFxbXF1dW15bWltZWFVXXmBhXlpb
-XVtaXFpYXFpaW11dW1xeX15cW11eXFxhYGNiY2JiY2NgYF9eXl5gYF9dXFxcXFpZ
-WVlYWVtZVlldXGZ/uMrT29/k5efp6uvrfoF/gICAfX55eXh7enx8eHp7eHl4eXl3
-ent+gH2Afn18fXt7enp7e3x+fnx9fn98fX58fXx9fHt+gHx8e3p8fX6BfX18foCA
-f4SAgoF/gIB+fX19fn59f3+Af3x9fHt+fXp8fH18fX19f35+gIF8eX6Cf4B8gH1+
-fnx5d3h6e3d4eHp5d3d0cnJxcHFxc3RwcG9tbGtub21ubm5wcHR0d3V3eHd6eHl7
-d3l8fn5+f39/e3t/g4GAgH9+fH1+f39+fXx7enp5dXR1dnV0dXVyc3Bxc3Nyc2xp
-bHJwb25raGpsbW9ubm5ubm1rbWxubm1oamptbm1wcWtrbGppaGprZ2xvcG5oaW1v
-bGppaWdoaWlpa21sbGpqa2tsbG1vb21rbGpqamxqa2tra2xqaWhmZmVfVk5JQUE9
-P0BLS1psdYCEhIWDf3pzamVaUEQ/PT1BQUVISEpJSExMSk1NSEtKSkpIRUNGREFC
-QkpJR0pLTExNTU5QVFhZVVNTUlFPT1BTVVNWUFFRU1FUVFRVUVJVUlFTWFpZWVhR
-UlJTU1VWWldYWVxYXWZjX2JnbnR1dHh8fnt2cW5pZ2RfXV9fXlRTUFFRVV9oaXV2
-a219hoiQjoSFgYJ2dYiDgYCCd3l5aGdjZG9xcnN5c2VjYmRiYmtmWVhWUE1gYmxx
-amhiY2xjX11oZlpXWFNRTFNUX2tdVGJpbHNnX2VqbGdna3N6e3d5fHh3f4J/fH6G
-i4uKi4uEdGVfXFtaWVtYWllaXFtcW1taWlpbWFdXV1hZXFxaWlhZWVZVV1paWVlZ
-WFxaXV1ZWVpcW11cXVpbW1pXV1laW1hbWVlZWFpZW1lXV1hcX15gZGpnY2RmZm1z
-eXt+eXh2cG9ydHFwaWZqbnF3fYKAgYKHiIeGh4eHg4SBfn16eHh0cXFvbGlrbm5r
-bGtvc3RwcHV2d3h4eXZ3eX19e3t8gICCf4KDgoKDgoWFjIaEh4mOjpCQj5SWlpGO
-jpCQjo6Sk5KRkpOSmJmdnZqZmpuZmp6en52XmJianJycnJ+eoaKjpaOhpKSlpKep
-qqqqqquoqKeinp+jp6OkpqilpqSkp6WkqKutqKWko5+gm52ZlZCOi4eHiJCUl5aW
-mpucnJuWmZ2amZSQjIqIiYqIi46SmZ6jo6iqqquusLS3s7azr66rrK6sqqSdm5mQ
-hoCAfHp+end4eG5lXltWVlVWVVdYWFdWVVRUUFNWWVZXWFhVU1RVV1VXV1VYWFlX
-V1hUVVVZWVVVVldXVFRZWlZVVlVXVVpUVFVYU1VXVlNUUlJVVlZVU1FQUVNTVVRU
-U1RXVlhWVVNVV1hWWlZVWVpYVldVVVNSU1RXVFZhYFdVVVZcW1lWVlhaW1lZWllY
-W1tYW11aV1hZVlZZWVpZWlhVWlxbWVhaWVlYWllZWlxdX1pZV1hZV1dbW1lYWlxc
-Wl1bW11bWlpcWlxYWVdVWVtZXF1dW1tcX11eXVlbW1pbXVxfXl1dX2BfYFxgYGFh
-ZGBiYGBhYWBhYl9hXl9eW11eXF1eXVpZWVpWW1xZWVpdZIG6y9Tb3uPl5ujq6+t4
-e39+fn5+eXV4end4dnh6d3Z5fXt6enp8e358fX18fX9+fHp5enp5fXt8e319e317
-fH58eXl5en19f3x6eHp5end9gIGDfn59fn18f399fn18fX1+fH5+foB/fn5+gH58
-enp+fH9+e317gYJ+e3t5f4KBf32BfHp7fHt7eXl4eHt7enx4d3Vxb25xbm5ucG9s
-bm9ubWtta2xpbm5zcnFxc3N3enl2eHl9fHx+gICAgH6Afn98gH9/f35+eXx/gH14
-d3h5enl5eXZ1dHJ0dHRxbm9vcG9vbGxtbW5uampram1qaWtsbmxsam1sam1wcG5t
-bW5ucG1tbW9ubWtsbG5ua2tsbGpqa2hpbW5mZ2lobG1taGlpampra2tqa2tua2xt
-bGttbWpoaG1sa2hoaGplZF9US0RDQT8+QkNNWWp1foOEg4OCfXdrZFlQSURBPD1C
-RUdJSExPT1FPTUxLS0pKSEdFRElGRENCRkhJSEpMTEtMTE1PT1FUUE5OTUxPTlFT
-U1NPUVNTUVNTVFdVUlZVVVZXVlhWV1VbWVdVVVZYWFhYXVxfY19eYGduc3J1e4CB
-fXZva2ZiYF9gWlxbVVhbXVhebWpwdmxueIWMiomOiH5+fGp5iYmAgHNxd3NuaWRx
-cHV1d21hYGdnZmNjcGNWU1lUVVxdZGtmZGJjbGNcWmVkVFFNU1RNVVdeaV9VY2lr
-b2RhYmRmZWVpbHRva3V6e3yDhIJ5eIGMjoyLhoJ4b2hjXltYW1xdYFtcXFhbWlta
-V1hXVVVZWlpcWVdYWVhbXFtZWFlaWlxZW1xaXVpbXF5dWlxbWVhWWV1eWltaWFha
-V1paWVxaWVhZXV1dXl5jZmdsaGZpbHFzfX98dXNvbmxvcnBqaGxydXl6fYODhYKC
-goKFhYWDgoJ8eXp3cm9vcG1rbWxtbWxqamxucG9xdHZ2eHRzdnp5ent7e36Bg4KA
-g4SHg4KAg4WGiIWHiYqKjY2PkpKSkZCPjpCSkZCSk5mVlZWVlZaYl5aXmJqdn6Ce
-mpiVmJeWmJufn5+coaSkpKSlo6eppqipq6iprKynpKOfnqKmpaSlp6Wiop2hpaer
-qqippKanpqOenJuVkYiFhYeNj4+TlJSVmZqbnp2XlJKQkI+NiomIhYeKjo6TmZ+g
-pqamqauxsra1trCsra2srKuopaGbmJaMhoJ6eHh4eHdzcGplX1xXWFlWWFVWVlVW
-VFNTT1FVVVZYVVhYWVlYV1VWWFhZWFdWWFVTU1NVV1ZXU1NTU1VVVlVUVVVVVFJT
-U1RUVVRUUlJRUVNWWFhWUlRSUFJSVFVVV1RUVVRTUlNUVVdXVlVYVlVaWVRXVVVX
-V1hUV1hYVlhYWVlaWVdXV1ZVWVlZWVpYWlhWWF5YV1pZWFdZWVdaV1hYWlhYW1dZ
-W1lYV1lYWFlbXl1ZWVpcWVVaXFtbWl1dX11dX1taXF1WWFtbW1laXF1aW1xdXV9d
-Xl5eYFpbXF5bXV1dW1tdXV9hYmNhX15fX2BgYF5hYWJjYmJkYWBfX19dXFxcXFtb
-WlpZWVlWWV1ggLrL09vf4uXn6Onr6358eXt9e3t6eXl6e3p5eHh5eXZ4end2enp4
-d3p8fXp7fHp4e3t6fHx9eXp7fX19eXl9fHl7eHp7e3x6fX18end5e4GCg4GAfnx+
-f318foSCf3+Af315fH1/fYCAfH6AgX6AgH5/fX5+fn9+fn59e356e3p9fXh4d3Z4
-d3h6eXl8enh3eHlyc3RvcW9tbG1ub3F0cHFxcG5ubG1vbnJxcXJyc3V4e3t5eXt9
-fH1+fXx/fX2Af3t9fnt8eXZ2eHp7eXl4enh5dnR3d3V1dHJwb29wb29xbm1wc3Fu
-bWxsa2tsamlsbGlqbGhoZ2trbG9tb25xbm1sbGxsbW9tbGtpbGxpaGtpaWhnbG1t
-aWVlaGpra2loZ2prbWxtamhrbGpra21ta2pmZGhpamtobWpoY2NdU1JMQz0/PUNB
-Q09dbHZ9goOEg4F6dnRpXVZMREA6PEJESUtMT1dNUU5PSUlLTUtJSEZFRkdGR0RD
-SEhJSEhHS0xNTFBSUFFRTk9PU05LTVBSUlBRTlNRTk9SVVVVV1NTVlpbWlhYVlhd
-XFZWWFhTVldXWlxgYF9jaW1ucHZ6fH+Ae3VsZWVfYV5gXFpaXF5jY2hnZmpsbXJ6
-gYuNh4N9eoF7cXyChX5+cnJ0cXVqY2xqbnJ1YVRWY2ZlZGpvX1FWXF1eWFpbZFtb
-WmRvZVlbYmRSTUlRV1VVUl5pZF1kbXBqZWdkY2FhZ2hncnBraXF2fH2Bf3l0fYmN
-jYmKg3hwZWJiYF1cWVpcXFlaW1hWWFZYWFdWVlhXWVhXWldXWllXV1dXWVxZXF9d
-W1tZXVpaWlpZWFlXU1dZXF5cWVhaWVxZWl1cW1taWVlZXVxeX2VjZGdkZmlrbHN3
-fXhzb29tbmdnaWppb3B0dnt9gISFhIOEg4aFgYJ/gH96dnJvcG1qbWtsbm1ubGxu
-bG5vcXN1dXl2dXh5e3t5fX2Bf3x/goODgIWDgYODhISBgoeGiYqKjYyMjYyPjouM
-iYqMjo6SlJWUl5mVk5eWlpSYnJqYm5yXk5SWlJWWmZ+fnp+hoqKioaKmpaipq6yq
-qaqpqKSioqCdoaOjoqGjpKGenZ6jp6amqKeop6akoZ+fmZaRjYqHhIeMjIyOk5SX
-nZ6anZuVkpCPjYSDgIGEhoeGg4WRmZ6hoKaprKytrK2sqqerramqrammpKGcmJOO
-h395eHd3eHVwbGVfWFtYVVdXVVNSUlFRUFJVVFRVVVdXVlhYWVpYVFVVV1tZVlVW
-WFlXVFlaWVZWU1NUUlNWV1RUU1NTUVFVVVZZVVNRUlRUU1FVVFRTUVJWV1RSU1JR
-U1NVVFJTVFNUVFNUVFNTUVRXWFdWV1ZVVFNRVVhhWFlYV1hZWFlaWFhZWlpYVVZW
-WltcW1taWlxbV1hbWFhaWlpbWllYWVxZWFpZW1lXWVpcXF1aW1tbW1pbW1xaWFla
-W1xeXl5dXGJZVlhXVVhcXVxaXFxZWltfXV1dXFxfXV5bXVtaW11cXmFgXl5fXV5d
-Xl9fYWFgYWJhYmdiX2BfYF9eXV5bXFhWVVZcXlpbXGB9uMvT2uDj5ejp6erreHp6
-eHt/eXp5dnh6fHt7dnl4dnp3eXp3d3Z3dXd5fHt4eHh2enp3eHd6ent7eHV4eXp6
-eHl5e3t7fHx8fnp3d3yEgYB/fn1+fXx8eX2AgIKAfn57e319fXyAfX18fHx8fICA
-gH96eXx9f35/fHt5e317fX1+enl5enp6eHp6eXp5dnZ1dHZ2cXBtbGxra21sbWxu
-cW9ucHFucG5ubW9xcXV2dXd5eHp5e3yAfHt8fX18e3t7fH14eHp9e3l6fHt6d3d3
-eHZ1dXR2dnNzbmxvb21vbmxqb2tubm9wbW1sa2ppa2hlY2ZoZ2hoZmlra25vb25s
-bWtoaWloamptbG9ramtpaWhoaWxuamhqamloZmVoZmhpZmhqaGhqbmprbGtraWln
-Z2RkaWhqampqaGZiX1tTTUlAPD49Pz1FTl1seH2BhYOChH58dm9qYFNIQD48QUhG
-R0lMTkpKT09OS0xKSkZFREJDQ0VGRkVGSUdHRklJTlBRUFNOTktJSktPUFBRUE9S
-Tk1OTVFPTVBVVldYV1daXl9eWFZVVldZVFhaV1ZVVVZXWVtgYWVpamlvdXR5fX97
-eHJvamVfYl9dWVxeYmZhY2xpaW1wdHmBi45+c3R5f3F1d398fHhpbGxpa2NmZm5y
-dmxbXWFmZGNtcWZeWmNdWFxVVVViYV1gamVdVVthYE9HS1NUVVVQWmZmYGVwcGJk
-Z2hlZGNnY2x0cmpsa3F2dXh+gnp7g4aGioqFdmdeXmhmYl5ZVldZWVlYWVhWV1lX
-VVlXVVlZWldWVlhWV1dWV1lXV1lbWVxdXlpdW1lZWlZWWFlaVltdWlteW1xZWFZW
-WFddXF5cXFxbWl5hYWRnY2BiZWpucXV2dHNuaW9sa2hoZmhrb3R6fn+Bg4KBgYWD
-goB+fHp2eHRwbW1sbGxqbnFvbW1vb25ub29ydHN4fXx3dnh1eHp8fn99f36CgYOD
-gIGEgoKEfn+ChISFiYmMioqJhoeNj4+PiIiKjZKUl5iamZeWl5SYmJmZmZmYmJiX
-mJiWlZeanJqdn6KjoqSrqqiopKusqKuppqelpKSjnp6bnaKjpZ+gn52cn6KhoKOl
-o6Skpqijo5+gmpSTj4qEg4WHi4yQkpGUmp2dnZuZlZOPioeDfoB/fnyAgoeNlp2n
-qKytq6mqq6uqq6yrq6utqqqnoZuWlZCFgHh7enp7enNwaWRjWVZTVFVUU1BRUFNU
-VlZWVFVXVFRUU1dZWFZYVVZXW1xYWltZV1dWVlRWV1RUVFVUUlZWU1NWVlVSVFJT
-VFdYV1dTU1NTUVJTVFNVUlBRVVRWVFFQU1NTV1hWVVRUU1JUU1RWVldYXF1XWVRW
-WVhXWFpWVlhXV1VYWFhdW1dZWVpZV1pbXFxeXVtaW1tYWFdYWVpdWlpbWVlZWVhb
-XFteXFhZWVhZW1xbXFtcWl1eXVtbWVlaX11gYF1fXFldWFlbWFlcXFlaWlhZWVpc
-WltZWFpcX11cXVtbW2FfYF9hYmBdW19fYGBhX2BjZGFeYGNkYWBgX2BdXl1aWFpb
-WFtbXVxaXXy5y9Ta3+Pl5+jp6up4d3l3eHh4eXl3eHp6e3h5d3Z1eHd4eHV1eXh6
-eXh4enl5eHl4eXd6e3t8end6eXd1dXl4eXh5e3l6fX55enl3eXt7ent9fXx6enh5
-fn19f359fnx7e3p6eHt+fXl5en2BgYWAfnx7enx6enx7e3t5d3l6en1+fX59fXx4
-ent5d3p5d3d2dnJwb3Fta2lpa2lra2lsbG1rbWxucG9vb21wcHJ0dHR2d3Z8fnx7
-enh8fXt4eHl7eHZ5e3t8d3p5eHR1dXdydnR0dHF0cnBvbWxubm5wa2xqb2xsa2ps
-cGtnamhlaGhqaWhqampqbGlpaWtqaGptcGtqbGxsbGtsaWlsaWpoamlpamtpaGds
-aWdnZmdnaGhoZmhraGloaWdqaGpqZ2lpZmVoaWhnamllZGNbVU5GPz0/QkA8PEZP
-Xmx4gIODg4WCgH96dW1oXVFJRD0/QEVMSkxRTkpLSkxLTE5OSkRFRkJHSEZDRUZG
-Q0hMSUlKTU5PT01OT0hISk5SUEtMTExNTEtRUU9PU1RUVVVVWlhaWVtZV1dYWVpW
-U1lXVFNXWFVXWGBkYmJna25yc3l7gYZ9enZ0amViZF9bY2piXV9rbmpqbHNzf4iN
-f21weXRyb3F0dXN5bGRlaGhpYGNqbnVzZV5iamhlbnpxY1xdYVVOVFZWXmdqZ1tb
-XFtXXGJcUU1MT1NWVUxTZmVkbXhsXVtiZGJkYGBeZ3F1a2hqa25vdn96fX+FiYuM
-iX50a19bYVxhYV5ZV1haWFpZWltZVlhUVFdWWVZVWFdUU1VWV1dZV1paW1pWWFpZ
-Wl1eXltaWVdYWFpXWVhaXFpaWVhYWVdZXFtaWllaXF9iYF9eYmRmZmdmaG5vcHJ1
-cW1oZmZoaWVnaW1vcnZ6foCBgoKDg4J+gIB9enNxcnJybmtqbm1rbW5ucHBwcXBw
-bXN1d3l7fXp3eHd7ent9fHuAf4CBgoF/g397e36Af4OEhISFiYeIh4mLjIiMjY2M
-jIyMkpKVl5iamJiWlZeZmpycnJeWmJqbmpaVmJycmpyfoqKhp6eqq6qpqKepqqqn
-pqOko6Genp6doKGioJ+goZ2anZ6io6SioaSlpqKioZ+gmJWPkImGhoKFg4WKj5CS
-lZSYmJiXlpONiYaCfHh5enp7fYmQk5yipaanqaenq6+sqKmpqayurKeinZqYj4WC
-enVzd3x6eHRzaWViX1hUVFZWVFNUVFJVWFdWWFlbV1ZYVFVWVVVXV1pZVlVWWVld
-WVhcWldYV1pXWFpVUlNSUlRWVFNVU1dWVVVWVlRUVVZXVFNUUVRST09SVFNXVE9S
-VFJUV1dUU1ZUVVRWVVZWV1hXWVhZXVdXV1pZV1hYVldUV1taWFdWV1daV1taWVpZ
-W1xaWFtbWlpcWFpbW1pbXVpWW1taXF1eX1xfXltfXVtcW1xcW1xcXF5dXVxeXVte
-X11dW1tdX1xdXl1bWltcV1ZZWV1fXV5eXFtaWFtbXF9fXVxcX19fX19gX11fXV1h
-YV1dX15gY2JiYmJfXltcW1xcW1pXWFpaW1paW15hf7XK09vg4uXn6erq63l5d3V1
-d3d5enx6d3p5dnZ5dHJ0eHd4d3Z5d3Z1eHh7enp2enl8e3V4e319e3h3enx5eHp5
-eHx9fX19e3p8f3x5eXh6eHt7e3l4eXl7en1+fXp5eXh8enh4enl7enp7fX55fHt8
-fnd3ent5d3l4eXl3eXp4d3p5enh3eXp6eXd2dXZ3dnR1dHR3cm9tbGhra2xubWtp
-amxta2prbm1wb3FycnJxdXZ2d3l9en5+e3h4dHh2d3V2end4eXh2dXFydHV3dnl1
-c3R3d3Fxcm9ub3Jta2tvbm1samloamlramtmZWVmamxrZ2psamhoa2lmaGttaWho
-aWxsaWlta2dmaGxpZWRkZGZmZWZoaWdhYmdoZmdrbmxoZmdnaGZoZWVnZmdmZmtr
-aWZmZ2dkZWZlY1lRS0dEQD5AP0A9QE1db3qDhISHhYeHh4R/eGhiWk5HRURERkhH
-R0pMSEdKR01MTU1JSElGRkdJRkZHRkFGSUpOSEhLTk1NU1FPTU9KTE9NS0pMTk1R
-T1NRUlBQUVJUVFRVWFdZWVlaWFlXWFZUWlZRVldXVFZZW2FiYGZobG5zeIKDgn53
-dHJsZ2RmZWVrcGdibHBuZm95dHqDhoNuaXN0aml1cm9qdXdnZmpiaGhpaGhscW1p
-Xl5pZWZzcmxkXlhZVlNUVFppbWFXUFZZXl5fXlNOUVBKVVlTUFNgZmRscWpcUmFk
-Yl1WWmBocntwZ2pnZWtzenp6goSIiYiNhXx4a19cXF5fYV1bWlRXWFpaWlpXV1hW
-VlVWW1xXWl5bWVdXWldXVVdYV1lYW1tcW1taXFpbWFpbW1hbW1pXWFpZWllZWllY
-WVpdWVlaWl1cWmJlZWZhXl9la29vcHJxbWpmX2NmZ2Vnam1yeXqAf4GDhIJ9fn19
-f315eXVwb3BwbnBvamhqbG5ub3BzcHFyd3l4eXh5eHh4e3t7fXp7fX5+goGAf359
-fnV6e32Bg4ODgoOHiYeLio2LiYiJioyNj4+RkpSYmpmXlJWTk5OVm5yYl5ebnpqX
-lpqbmZqbnJqfn5+npqeppqmnqKyrp6aopaOioZ6goqGgnqCfoKChnZmdpJ6foaGk
-pqSjp6ShoZ+bmZeRi4mHhYWBf3+ChoqOjpKYmZqZlI+LhoaBfnl2eH2Dh4uRl5uk
-o6ClqKqrsq+tp6ioq62sqqCem5aUjoV+dnl6fn57d3JvZ19cWlhTU1VWVldXV1VY
-WVhVV1pZW1ZXUlNZV1VUVldXVldUV1dYV1dYVlZWVVVUVVZVUlNUVFRUV1ZUU1NW
-WFZXWFZYWlZUVlRWU1VXWFNRUlBNT1JTVFNVWFZWV1ZTVVRUVlVUVlZXVldYVFVZ
-V1dXWFZSVFZXWVhXVldWVldVWVlbWVlZW1xcW1lcWlxbWVtaWllaXF5dXVtaWlpY
-WlpcWV9eW1tbXFxcXF9cW1xbXF9dXV9fXlxcXV5cXF9fXlxbWVpbWFhbWltaXF9e
-XV5cXV1cXV5fX15eYGFjYV9hYF9fX19gX2BhYmFfX2FhX2JeW1xeXltbXFlXWVla
-W1laXGSAtsrT3ODj5ujq6evrc3Z2d3Z3enh5eHZ1c3V0c3N0d3h5eXh2dXVzdHl5
-d3Z5d3V5dnV3dXl4dnd7eXd5e3t7eXp4d3h3eXZ6eXx9fHd3eXd5ent6enp3dnl5
-eHZ2dHZ6dnh6d3d5ent8enl5dnV6eHl3eHp7eXp3dHl5eXt9eHd2d3h3dXR0ent4
-dXV1dHRzc3N1cnFvbnBvcGtoaW1va2hpaGxpaWltb3JzdHBub3R1dHJzc3d3eXd3
-enh5eXl5eHZ0dXZ3eHhzdXN1dHR3d3Jxc3NzcG9vb3BvbnBubW5sbWlramtpZmdp
-ampoYmhpamlrampqaGhpaGVlZmhmZWhlZGhnZWlpaGlnZ2hrZ2RnZmRkZ2RiZGVm
-ZGRmaGlra2hnZGNmZWNjZWJjZWdnamhoZGdnZ2ZnZWVgW1NMR0JBQj5CQTxCTV9v
-fYOFh4mMjIuMiISBenBnXU9IQ0FDREdKSkpKSUpLSk1KSUtJSUhHREZKRUNCR0RF
-R01JSEZHTUtOUlVUTktKTEtJSk5OUlBRU1NST1JNUFJVVFNUV1peW1tbVVNVVlNY
-WVNUVlRVVFdZXF9lZ2lmbXB0eX19e3dyaWdnZGBfYmtzc3N2c3BxcnN2fXp5dWpz
-cm5haG9ua2x1cW50bmVlZmpsb3NmZmBUYWNfaHBoYl5aVVZXVldWYm1qX1dWW1Rf
-ZF9WT0tXVEtTXFlPUFpjZG1vZ2FVXF9gXVhgaGduem5lYmRgZWx0c3J1foeEhoiI
-h4J0Z15bW1tgYFtbWFdXWFlZWFZWVllVVlhZV1hbYFxbWlhUVFhTVVZXV1dYWFtZ
-WVtYV1daW1xeXlhdXFdZWltbWFdaW1daW1tcW1pYW1xdYGFjYmFiYmJqbnBubWts
-amdnYmJkYWNnbnV7fX2AgYGCgoCAf35/fXp4dnBvbGprbm1sbW9vbmxtbXBxcHd0
-dHZ5e3t4eHd4e3t6e3l5ent9fX9+fn17fHt9fH+Cg4GDh4aEh4qKiYeHjYuMj42N
-k5STkpSZl5eXlJWVkpOXmZqXlpiZmpmYmpqanJ6goKCho6CgoaWlqaypq6ypp6im
-paSko5+hn6GempqcoJ6gnp2enJubn6ChpaWmpKOkoZyamJOPiYSHiIqJhYWEhIaK
-h4uNkJKSjo2IhoJ6e3Z5en+Cho+VnZ+eoKKpq6uoq62pqKiuq6ikm52dmZSOg4R6
-e32Af3x6c2xjY15dWVdTVVRQUlRZW1VWVVVUV1dTU1NTVVdWVlVVVlVXVlFUVlRV
-VFRXVVRVVVZXVVNVVVNTVFZVU1NRVVNUVlZYV1VVVlVRUFJVVlNUUlJUUFJUUFRV
-UEpKU1lWVlVWVVVUVlZVVldXV1hZVldYWldYWVZWVVdYWVtXWFZXWVtZWlxaW1lZ
-WV1eWVlaWVlbW1tbXFlbXF5gXVlaWllZWltZWF1cWllZW11fX15ZWltdW1xbWVtb
-Wl1eXWBdXFtdXV1bWmJhXlxbW1tZXF5fX11bXFlbW1pdX2JgX19eYGNhX19gYWBh
-X19fXmBhYGBgX19fXFtdXl1aX19dXVtaW1xeZYm3ydTc4eTl5+np7Ot0dXV1dHR0
-dHZ5e3h1c3R3dXV2d3Z4eHh2dnl4dHd6end3dHV0d3d3dHNydXV2eXh3ent5dXh6
-e3d0dHZ3eXl4d3h3dnp5eHd8eXh5enp2dXZ3enl6eH15enh5eHl5e3l4fHl6dnh6
-enx+fXp6eXp8dHh4dnZ3d3Nydnd8eHZycnl3dXVzb3Bwc25vb25va2pra2xsbGln
-aGtqbGtqbW1wcnFub3N3c3R0c3R2dnV5enx5d3h5fXx6e3d4eHl4dXV1enZxcnN0
-dnJ0c29tbm5ubGtpamxpaGhsa2ZoaWttamZoaGdmZWVmZmdoZGVmaWVnZWZnZWZo
-Y2dnY2JjZmZnZ2lkZGBfY2BfY2NkZmVnZ2VnZGJjY2dkZWJkZWRmZmRkZGZjY2Vk
-ZGdmZWVmZV9ZT0hBPT5BPTw6PEJOWmt6g4KIi42PkYyKiYeBd3JnWU1KRUJER0tO
-TElIR0pKS0hHTEpIRkVCQ0RJQ0dGRERLTUxJRkZJT1NXVlJPS0tPR0lOUU1MTlFU
-U09LTlNTU1dVVFZYWFtcWVpUVFJTU1lbV1RSVVJRU1tcW2FiZWhqbXJ3eH57d3Fr
-ZF9eWFhaYGt2dHJua2xsb3VybmxxcWdeaGlxb2RjZm9sb3NsaGRobW92cmNkU1Rj
-aWhpZVtTVF9gXF1aV1hnbGNjYmBcWmFkYldMSFNXS1BWVlhVV1phbGlmY1pbXmJg
-YmdlaG90aWJfY2FmZ3Fxamp1en2AgoB/h4JxaWZjYmhpYltXVlVXWFZXVlVXU1VZ
-WVhYWFdYWFdZWFVSVFdXV1pbWlhaV1xcWlpaV1pYXVxcWFVYVVZWWFpbWFlXWFpb
-XFtcW1dYW1tZWV1fYmNiZ2hpa29samZkZ2ViXWBfYmdrcnt8gH+Agn+Af3l3e3x5
-dnd1dG9oamxuamlsa2tqaGtvb29vcnN1dXl8fHh4eXp8enh5eXt9fn19f319fnp7
-e3x6e4CChIaFhYSHiYmLiYqJio6Pk5aSkJWWlZeXlZiYl5mZlZWZl5aWl5mgnJmX
-l5qcnqGenp+dn6ChoqWoqqqrrqupq6mgnqOioZ+goJ2al5eZm52bmpqenaCcoKGk
-o5+eoaOmoqCdlpKSjYmLjJCQj4yGhYaDfoOIjI2JhYeKhYF7eXh3eH6Dh5CWn56f
-oKanqqupp6qrrKqnpKCdoJqZlo2DfHp1eXx6fHh0bmllX1xfXVpXVFdSVFJSU1RS
-UlFTWFRTVFdWVldTVVVTVlZWVlNVWFZVVlZWWVVUVVRWWVVXU1FTVVdVU1NSVVVU
-VVVSVlZVVldUUlFSUlNUUlFSVVFWVlNINDlHUlJUVVFUWFdXVVZXVVRUVldZWVda
-WVZVWFlWWFtcWVlaV1dbXmNbWFlWWFdbWlxfXFpYW1xaWVtcW1pcW1tcW1xZXl1a
-WFlaWFlaWltaWl1dXlpZXV5eXFtaWlxcX1pdW1xeXVpfYFpeYWBgXlpaXlxcXlxe
-XV1bXF9bXV1fXl5cXVpdYWNhYF5hYF9fYF9fXV9hYF9gYmFhXV5dXl1eW1lXV1lY
-Wl1kh7nL1Nzh5Obo6enr63d3dHJzdnd0d3h2c3Nzc3h3c3NzdXR5d3Z4eHl1dHd4
-dXR4eXl6dXV3d3d1dHZ8enh1eXp3dnZ4dnd0dHN3eXh3eXl6e3lzeHl5d3d3dnl3
-eHh3dnp3d3Z1dnh5dnl9fHh5enp4dXd4eXh6e3l2dnd1d3N0d3h1dXV5eXZ1dHR0
-dHh0cnBvbm9vbWxrb2lrbW1saWhna2xpaWpmaWlscHBwcXBvcXN1c3Jydnh2d3V1
-dnd6eG9wd3h2eXZ0dHN1dHRzcnFycm9wcXBvbmpqampnaWlqamxraGloaGpramhm
-ZmZjZGRkYmRlZWVlY2NoZ2dmaGlmaGZlZmdmYmJjZWdjY2RiYWNfYV5gYmJlaGVk
-ZGVkaGZkZWZkYWNkZ2dlZWJjY2dkZ2VkY2NiZGViWFdNQkA9PD1CPT09Q09gb3yG
-iYmMjoyMjY+OjIiEeG5mVk1HREdESEdHSkpKRktLS05JSUlHRUJDQ0dHREZCQ0dK
-SktHRk5PUVJRT01JSE5QTkpNTkxOTVFMS09NUVJVV1VVVlpWV1lXVFVTU1dWWlpU
-VFdWUVVUV1ZZXmBnbGprbXN2e4F/dGxjXllUVVddcHhvYl9nZmducG1ycGxlWl9l
-am5kWVljZ2JgZGRlYGhqa3BsZGNaWWVsamNYV1JXYWNnY1tVWmdrYmB2a2BXXGdj
-WU5JUFVOTVNYWVRVWGReZGpjW19YVVxrcGJfaG1mX15jYWZncWthanBzcniFhHl+
-e3FpZmRjZ2VjXFlXV1taWlhXW11cVldYVldbVlNWV1RWWV1UVlhZW1tcW1paXFtc
-XFtdXl5bWFdZVVZXVlZYVllcV1tYWVlYV1hbXVxaXGBiYGBhYmNkZmxubG1oZGFj
-YmBeXF9iaW1zeH1+fn18fHp4eXh5fHp1dHJubGpqa2lqbm1rbGtnanR0cHFzdXd0
-dnp9fX14eHh4eHh4en59fn1+fn58eXt5eXt+gYKCgoKEhoiKiYqMjouLi4+RlJaS
-k5aUl5aVl5SWlpianJmWkZaVlZqZmpaYmZubnpydn5udoaWoqammp6epq6iqp6Oi
-oaGcmZqdoZyZmJiWmJiWmZqZnJ6gn56ZmJ2go6SenZqWjo6OjIyPkpiSjYqIh4SA
-f4GDhIB8fn+FgX99e3d4e4GFiJKcnpyeoqaoqauqqayvr6yoopyjmZSPi4d/c3J1
-eXp6eHVwaWReWlpcWFhXWllVVFJTU1VTU1RWWFhXVVVTU1dWVldXVlRUVFFUU1JS
-VFVXWFRUVVZXV1VTVlNTV1ZWUVJVVFVWVlZWVVZTU1ZVVlZVVFJSU1ZWVFNTVEpC
-TFRWU1VUVFRSU1VYWFRSU1NVVlVXV1VWVlZaWlZWVFZaWVtdWVlaXltbV1dWVltc
-WVlbW1pYWFxaXF1cWltbWFdaWltYVlZZW1pZWVxXWFtaW1hdXlpYWVtbXFxeXl1e
-XFxdXl5fXl1eX19fYGBdXFtcW1paWVxdXltcYVxdWltcXl1eW11fYWFfXVtcXlxe
-Xl9iX19dYmFeXl1dXFtdXVtcXFtaWFlZXWaCucvW3eHk5ujp6uvreXR2dHNzdHVy
-dXVycHF1dXR0cnJxcHNyc3V1eHVzcXV2dXV2eXh5eHd2fHZ2d3V1dHV1dnhzc3N2
-d3d0cnFzdnZ0dnR1dnd5enl4d3h3dnZ4fXp5eHh1dXh2dnd3d3h7enp8enl6e3h5
-eHZ6enl4eHp1cXJzdHZ2d3Z1cXN0dHRxc3Rzb29ubG1samlqampnZWZnZmVnaGhp
-aGlqbW5wb25wcG9wcnV0c3NydHV2dXZ2dndzdXh0cXR2dnRzc3V1cnFvb290cW1t
-b21sbWxta2pqZ2dnZ2ZoaWpoZWhqZmVjY2RkZGVmZGZlZGRlZGdmZWRlZmZmZWNk
-ZWZlZ2hlYGFjZGJjYWJjYF9iZ2VlZ2ZlYmNjZGRjYWVkZGVlZmZnaGdmZWRmYmJk
-Y2FkYF1WTklFQj08PTs9PUBDTl5seH+JiIyOkI6Njo6Mi4d+dm9gU0lIRUVGSUlJ
-S0tPS0lLS01MSEFDQ0FBR0JGSEJDRkZLTEpLTlFPTVFPTEpJSUtISk9MSkpNUk9O
-UlNQUlVVU1RUV1VWVlhUVVZXU1NXWFFSV1RUV1ZWVltaY2ZnZmducnV3e354bmhc
-VFBVXGlybWNfXGBjZ2lucnJqZFpRXGdzbF9VXmZpYWViZmRlaGlnbWddYl5XXmFj
-WllYWGVpY2JcVlRcYmhjXWVaWltjaGZeT0hJUU5FTVZcVk9ZYFddaWBYV1ZdXGJp
-X2Btb2RbWGBiYWVtaGRlam9wdH6ChYB6cWZjYmFcXmNjX1paWFpaWFlaXFtXU1RX
-WFtVV1VYWFZXV1VdWVdWVldYWVhZW11eW1tcXFhXWVpYWVZTVldVVVJXV1pZWFZa
-XVxdW1xdXV5fX19eYGNmamppa2xnY2JjYWBdYWZpbXV2en9/fHp4eXl4enx7eXNw
-bGtoaGpsaGptamprbHBtb3N1dHZ2dXZ2eXt9fHt8eXd3dXh7ent7e3l7fHh1dnl4
-fYGAf4CDg4aIioeJiYuKjIuLj5GOkpeXlJWYlpaYl5eWmZuZlJOVlpqXlJWXl5eZ
-m52cnJqbnJ6io6enpKSmpaaoqqemo6CenZqanJydnJeYmZmYmJaYlpSZl5mYmqOm
-o6SjoqCdmZaLjYuLjIuMjY+MjYmIiYR+fHp8fn51dnl7en5+e3l8gH+KkJaam5yd
-o6akqq2op62sqqmjoqOclpCMf3p3dnN0eHV0dnNpZV9cW1paWlxZW1laV1RRUlRV
-WFpYVFJSVlRVV1VYVVdVVlZWVVRYVFNRVVZVVldVU1RVV1RTVFRSVVpaVFJVV1ZY
-V1hcV1ZTVlZVVFNUWFVYWFhXVVRUV1RVVVdYWFdVVFVUUldXVVNSUlNVU1FUVVRW
-WVlaXVxbWVpbWFlZVllXWFlXW11bV1ZXWVtcWllZV1tbW11aW1tYVVlWVVdZWGhj
-WVpZWFhXWlpaWFteXl5cW11dXl1cYFxcW11eYF5eXl1fXllcXF1dW1xcWlpcWlte
-X1pdXF1bWltdXl5eYGFgYGBeXl5eX2FiYF5gYWNgXl5eXl9cX1paV1dXXltWWFVZ
-YX61yNXc4OTm6Onr7Ox0dHN1cXJwcW9zc3JydXVzbnFycnBwcHBzcnFzcnN0dXR0
-dXZ2dnV3dXN1c3RzeHN3dXNyc3hycHN1dXNzcnBydnZ0c3R4eXl5d3h4eHt3eHZ4
-eHh2dXV2dnV1dHV0cnZ2d3Z7e3p4dXV1dXZ1eXh1dXV0dXR0d3h3dXRwbm9xcnJy
-cXR0cW1ta2psa2ppZ2xjZGZmZmRlaWlrbG9wbWxsbW5sa2xvc3V3eXh1dHNzdHd4
-dHN3d3h0cW9ycXBzcnJxc3Nwbm9xcWxsa2ttbG5ucGxoZ2VnamlnZGVkZ2dpZ2Zl
-ZGRmbGhlZmRkY2NmZGJlZGVlZmVjZGZpZ2dnZGFjZGFkZGNlZGJjYmZkZWVlY2Vl
-ZWJgYmJkY2VlY2RmZmVmZ2ZmZmZjYGNjYV5aVlFIQkJAQUNAQz9AQ0ZOYGl4foWK
-ioqOkJCPj46LiYeBdWhcT0hDQ0dIS09PS0tITEpMS0pHR0VBQj9GRkBBR0JDRkpM
-SEpLTU5PTUtKSUdMTU5KS0tLSUpRTU5PT05QUlNUVVVVVVdYXFZXVlFUUVBSUlNX
-V1VVVFRTVllZXmZoam9wc3d9eHRwaWFbWV1ha2NbX15fZGdjaW1raGBbVVppb29l
-VVZjaWhlZ2NpaGhpanBsZVtcVFFYY1tRV1tlaVxZWlJVV1teZGBgX1tXXWRbYmJV
-TU1PTkZNV1tgVFxdWFhjYlZTVlNUXGJaYm5wY1VWWmJgYGlraGlsc3BxeH+CgXp0
-a2ViXl5dWllYWlpXWFlZWltYV1dUVFRWV1lZVlhZWVlYXFtcWFZVVVlZWFpdX2Bh
-YFxcW1xaWFhWU1RVVFpcWFZYXF9cXV1dXl5bWVteYGBfW1xeYGBhZWVkZWNjYV5e
-Xl9hZ2xxdHh8foGAe3h3e3x6fX56dG9saGlnaGtra2psbmlsb3NycHFzdXR0dXZ6
-enZ4enx8eXl5eHp5ent5eHp6dXd3dnl8fIB/g4OFhYWDhoaJiIqKiYuNjpCOkZOU
-lJaVmZWXl5iZmpSTkpWWlJaXmpeTlpeZmJiYmZqgoqWlpaeopaeoqKaop6Ciop6d
-m5mbmZiZmZSXmJKTlpeVl5mamJmZmp6en56cnZudnJWPjoyLjYyNjoyPj5CNiYJ9
-e3h4enl1cm9vdHd5eHl/gISLkpmcnaGjpKKjp6WrpqimqKiknp6alY6AdnJzdXh5
-eXV0dm9nZWJeXl1aW1pYV1ZVVFVVVFVVV1dTU1JVWFVUU1dYVlRVV1dVVlVWWFVT
-VVRWVVRVVFRSU1NUV1RTVVVTVVJRUVVWVlZXVVVXVVRUVVdYWFRTV1ZUVFRWVlZV
-VlVVVFZVVFNTVFRVVlNZWVdVVVVWWFdaWFldW1tZWVtYWVhZVlhVVFlZXVpXXV1b
-WVdcXFtaWVxbXFlYWVpbW1pZVllaYFtcWVlXWFhaXF1dXF9fYl1eXV9hX19gXl5b
-XV1dXVxdXltbYV1cXVxbXVpcXFxbXFpaXF1cXF1cW1xeXmBeX2JgYF9eYmBgXmBh
-Xl5fYGFkYmBfXVxaV1tZVldfXVxaVVhegLXJ1Nvh5Obn6urr63dzcnNwbnJ1c25u
-cnR0c3Bxb25ydXJwb3JwbnFxc3N0dHJzdXZ2dXNzdXNxcHFxc3RxcXN0c3JzcXN1
-dHFyc3FydHR0cHN1dXV4eHl5dXx6d3Z2eHZ0dXV2cnNzdnR0dHR1dHZ2dnZ1dHF0
-dXV0c3Rzc3R2dXh6dXFydHFxcm5vcHB3dHRycnBua2tqbW1rZmdoZWJnZmVnZ2Zo
-bGpoaGhqbWxrbm5ydHR2dnRxcHJ1dXR0d3h5dXRwbm5vcHF0cXV2dHNzcG9vamtr
-aWhoa2xsbWtsZ2RiYWJhY2RlZGdraW5sZmdlZWloZmVkZmZlYmJiXl5gYWViYmVm
-ZGJiYmRiYmJkYWJiY2BhYmJjZmVkYmFgYWJjYmFfYmJjYmJjYmRjY2NiY2NjZWNe
-XFdOS0lFR0NBPT47QkNCR1FecHyEjIyLio2SkIyTlZKVkoqEeGdZTUZEQ0VISk1M
-SkxLSkpOTEpJR0ZAQEJEREFBRkNDQ0NHRUhJTFBRUExJUkxPTkxJSkpLTE1MTE5P
-UFNUVFNUVVdWVldaXFpaW1ZSUVBRVFZWVVdUVVhZU1ddYmJnb3Bxdntzb2ljYV5f
-YGVnZWJdXWRlY2VpZmFhXVhZZGxrZ2BbX2NlaGJVXmlta2ZkZVteW1xWUVpvWVdc
-W2RnXF5TUlRVWGFbVGBZWFtjXlVWUVBVVlZPTk1TU1RXXFxXUVxoYVpTT1ViYFhe
-Z25nXFpeZ2Zob2ZmY2tzcW51e4CBd3JrZ2hnYl9eWVtbW1dUV1dWV1lWWVtYV1dZ
-VlhYV1dXV1dZW1lZWVhZWVZcW1tZXV5bWFpZWlxZWVdaWFtaVlVYW1tbW15bXmJd
-W11cYGFlZWFfX11dXV1fZGVjYV9fXFxcX15nbnB4eXt+gH55enx6f4GAgXpyc2hn
-ZmdpbG1sbGhsamtvcHJxbnJzcnN1dnd4eXl5eHZ2eXR7d3h3eHd5enh5dHN0dXh6
-foF/goaEg4WIiIuNi4mMjoqKkJGOlJaWlJWZlpWXl5eVlZSTlpWVlpeVko+PlZqX
-l5mZn6Gfn6CkpKqopaSmqainpKGfnJ2cmJaWlJeUkZCPj5OWlJSWlpmWmJuanJye
-npydmpudmJOQko6MjY2MjY+Pj5CLh4J7enV1dXh3b2tscXR6eHd7gYqTlpidoqGk
-n6GipaiopqappaOenJuXkIJ1bXB1dXl4dXF1c21pYl5dW1tcXFdVU1RYVVRSV1lc
-WFZTWFZXVlZWVlZUVFdYV1RTVFVWVVRTVlZYV1ZVVFVUUlVVVVVVVFlVU1RSWVhV
-VFNVUVRWVVVTVVVUUlVVU1dTVVRUU1RWVlZVVlVXV1NSUk9UVlZXWFNVVVVXWllZ
-W1pYWVxaWVpYWVhXWFpeXF9cXFpcXFlYWlpaWVpbW1tZWFdWWFxaWlpcXFtYWVlb
-WlhZXl1fW1xZW1xaXV1aWFpcWl1bXFxdXVpaWVxeWVpcYVtcW1pcXF1bXF1bXl9f
-YFpgXl1eYFxeYV9eYV9fYWJfYWFgXV9gX2BhYGFiXl5cXl1bWlpZVlhcWltdXWGC
-t8rU2+Dk5ujp6evscW9vcHBucHBxcHBydHRycHJxdHN0dXJzbnFzd3BxcHBycHh0
-dHN1dXhzcW5wcnFxcHBxdXd1cXJycnVzcnNzcnBzc3NydHR0cXN0d3R1dXd2dHR0
-dnZ2d3V1c3V2dXh5eHZ3dHNzc3V1cm5xdHJzdnNzc3Rzc3JzdHRzdXdzb3BvcXBy
-dHFwbnBta2tra2loamllZWVnZ2doamxnZWVoaWpucm1sb3F1dHFycnN1dXZxcXJx
-cnRzcG9ydG9xb3RycnNwcXB0cXBsamtpZ2hobGxpa3ZnZGVjYmBjZWNiY2ZoZ2dl
-Y2JgYWVnZGRhXmFjYmFfYmFjY2FjYGBjY2RjZGNhYGFjY2NhXl9gYV9fYGFjYmBh
-YWBhYWFeYWFiY2FiY2JjZmRhaHNhX11ZUU1HQ0NDQEVBQUFARURHUWJxfIGLjIeL
-jY6Qj5GTk5WVkYqCc2VWS0NDSEtJSUpLSktNTExNTEtLSklDP0VDPT5CQT9BREhE
-SEpOTlBQT09NTkpISklISUlKSUpOUU5UVFRRVldWWFVUVFZXVlRTVFVVVlVWWFdV
-VVNVU1RTVVtgY2ducHVzcnBtZ15cXV1hZGRfYWRkYV9namVeYmBZVmBmZ2BfWl9h
-XF9mXFRdZWlkX2lgWGxjWlpVXXBdXFpZZl5YV09SVFJQXFlTYFxUXlpQSlFRTlFX
-WFVLT1dUV15YV1JQWGdvYlRSVmNfWV5kb25iW2BkbHBxYWNkY29xa3B0e3tra3Fv
-bGxlaWRfYFtaWFZUV1dXWVlYVldWVVVXV1lXVlVXWFxaWltdWlVWW1lZWVxbXFxd
-WlpaW11XWllWWFxYWVhZWllZWFlZXF1aWVteX2BfYmFdWl1eX11jYWFgW1tYWFpb
-XWJsb3Z8f358fHV0eHZ4e39/eHNwaGlnaGhqaGlnamxra2xsamxubnBydnh6eXh7
-eXZ3dXV1d3t5eXpzd3h3dnRzcnV2d3l8fICCg4iGiYaHioiIi4yOioeKkJSWlpSR
-k5OSkpSSkZGVk5SUlZmVkZCTk5SWl5qXlpedoJ6gnaCkqKilqaaoqKako5+hn5uZ
-kpKSk5KLi4+RkZKQlJSPkJKVmp+dm52fmpqcnJuZlZGPj5GPkI+RkI6OjYuJhoB4
-cXFzc3NycG9ucXZ4eHyEjZaWlZigo6KgpKanpqapp6Wko6KfnpmRh31zcXRycnNx
-cnN1cWtoYVtcW1hXVVNWVldXV1dWWltWV1VXVlVWV1ZWVVNSV1ZVVFNWV1dXV1ZW
-VFRVUlNWWFdSUlVVV1VUU1FRUFRXUVFQUVBSUFJTU1dVU1FUVVlYU1JTVlJVU1JW
-V1hWU1RYV1NUV1NXV1ZYWllaVFVWV1lZVlNVV1taWllXWFlYW1tbW11eWFlbXF1c
-WlxcXFtbXFpZWVlZWV1bWlpYWFdXWFlaWl1eWlxfXFpbXFpcXlxdW1taWFdcXFpZ
-W11bXFxbWVlaXFxdXFxbW1xbW11gYGBiXlxeXlxdX19eYV9eYWRgYWNlYF9jY2Bd
-XF5fXmBgXVtdXFtcW1tcWVpaW19eYoO2y9Tb3uPm5+np6+ttc3JwcHNvcHFydXR0
-c3Fwc3BxdHRyb25ycXR1c25tbnFxcXFycXN0dHBvb3FzdXFzdXZ2dnRzbnF2dXZ1
-dnR0c3J0c3FzcnF3cnJyd3RycXF1dXFzdnd2d3t9dHR2dnZ1cXR0cnJ3d3h2dXN1
-cXN1dnZ2d3Rxbm10c3R1dXd5dXRzcG9wcW5wcG5ua2hoZmZmZmdpZWRlZmVlZGVk
-aWtrampsbG1ucHFxbnJycXFxc3d0cXFycXBtcXN0cXBxcnJwb25ubm5ubG1ya2hr
-a2pqaGRlZWdlZGRmY2JhZGVjY2dmZ2NjZF1gY2VgYWRlZGJjY2VlZGRiXl5eX15g
-X2ZoZmVkZWRiYmNjYF5fXmFgX2JkY2RhXmBbW1xeX15gYF9iZGFgYl9ugV5bV1RJ
-R0Y9QEBCQD88Pz1BQkVPYHB9hIWIiIyNkI+LkJaYmpaSjYV8cWFTS0ZFRktLTlBL
-TU9NS0tLSkZERkpERkpJQUNIRUFCREpITUpKTE9PTElIR0pIR0pLSUdKTE1OVldS
-U1NTWFpaWVZWVldUWFZVVVZWV1VXWVRSUFJTWVxXWmJgYWpubm5raGdjXVxfXmNi
-ZGFkZ2JgZWpkWVRbWlZZaG1kYGJiYmNjaGdhYWZlZ1xjZGNicGxZWV1qcWVfWVdh
-WVlWTVBSUVdeW1lkYFtaUE1MVVVUU1JRVFRYWlhbYl1XT01UXmpmXFhXYVlVWmJv
-bmNgY2RtcGpaYmNoaWlrcXR6cmtnbGxsbGpqY2BiYF9bV1RWV1hZXVxcWVlWVVlY
-WFlXVlZVVVZaW1tZWFdXV1daXF5dXl1ZWVlVVlhWWFlZWlpcXFdVVlZWV1RWXF1c
-XFxcW1tcXltdXF5gXF5hYFxbV1hZWF5fZGlrdn18enp5eHN0d3h5fHhyb2tsaGtp
-aGZnaW1tZ2lpaGltbHJycnV3eXZ3enp6dnR1c3h5eHl1dnV1d3Z0c3NzeH18fH17
-foGFhImJiouIiIeKjIuHio2QkZOVlZWOjY2Oj5GVlZSVk5CQkpKRj5GQkpSXlZWa
-m5qbn6Cmo6Snqayqqqeop6ympKKdmJiUkI2Qk42LkJOPjpGSkpWVk5KUmJeamqGj
-n5uamJeUj4uOjI6QkY2Oj46MjIqKhHt0bmtxbm90dHBvbnR5fYCMkpaYmJ2fnp+i
-pKiqp6WlpqSkoqCdmZGKhH51cm9scnRzcHNva2ZiYFpcWFdYU1JVVFRUVldWWVZW
-V1NUVlZWVVNUU1BSU1RVVVRWU1NUWVhTVFVXVlRUVVBQUlZWV1RSVFBRU1NTUlNR
-UlJRUFNTWVdTUlRVV1ZUU1RXWVdUVVdYXFNRT1ZZWFVXU1RVWVdVWFNXV1dWVlVX
-V1pYWllbWlxXVFdYWlpaWFtbWlxfXF5bWVxdXVxaW1peWlpYWlxcWllaWltYV1pd
-W1tbXFhaWVtdW1lbXVxdWlpaW1taXl1bW1xbXlldXFpaW11eXl1cW1laW1xeX2Vd
-Xl9fXWBgXV5eX2JiY2FlZWNiYl9fYWJgaG5fXl5dX19bV1lcXl1eWFdYWFthhrfK
-09zg5Obn6Orq629vb3Bwc3FwcHFtc3Fxc3Fxb29wcnFvcHNzcHFxbm9wcHBvb3F1
-dXJ0cnNwdHJ1cnJ0dHNyc3Fyc3RucXJzdXN3dnd1dHFye3Zzc3FydHNydHVydHJ1
-dnZ2dXNyc3d3cHV3cnR1dHR2dHd1c3Bwc3NzdHZ2c3FydnV1dnJydXd2dnRycm9s
-b29wbWtraGpoZ2ZmaGZoaGVkZmlnY2ZqZ2hta2xtc29ubm5wb3B2c3JvcnV1cm1u
-bm9xcXBxcnNzcG9ucG5rbG1sbGxsbWxqaWdmZ2VnZ2JiZWhnZmRmZWViZWJjZGRh
-YmRjYl9gY2NjYmVnY2NgX2JfXmBgYWBhYmVgYWJhYWRkX19gX15fYGBhX19fX19f
-Xl1dXVxcX11jYF9eXV5hXltgWVZWUktCQkE9PUE/PT08QUFAQktdcX6Eho2Njo6O
-kpGQj5aXmJWRjIR6cGFRSEhESE1RUU5QS0xOTkpISU1HSkhHSEdFSUZHQURERkhG
-SElOUFJPS0pHSEhFR0lKSUpJS05QTlFQUlJTV1ZYWFZXWllbWFhXUlFTV1lWUlFT
-VFZVWlpcXmBhZ2xsaWhmY15aXmBdZGNoZ19dXmJnX1pOVl9WVWFnWl5namdmYWRt
-bmVoa2dbXWFiY2dvcl1YY2pkYlxXYGZjW1JQU1NXXVlXWGdsYldNSlFeWVJRUU9R
-VlNaW1peX1VSS05aVlddWlliXlVZZ3FvZV9eY2drZFdWYGlvaGt2eXlyaWZoaGFc
-Y2VgXF5kYV5bWlxbXVtbWVhXVldZWFlZWVdUVVtZVlhcXFxdWldWWVpaXWBiXlxY
-V1RYWFpbVldYWFpbWFpYWVdZWFpaXV1eXlpXVlpZWV5cXV9jX1xcXVpYVlZYW2Fk
-bXJyeX9/eXR5d3l4enh3dHFzbWhmZ2hpZmlqbnBsZWtrbGxwcXNzdXd2dnZ2d3p2
-c3R1d3l3dnRzdXRzdHRycnF2eHl5eXuAgIGAhoWHh4eHiIaJjI6Jjo6QkJCSko+L
-i46Qj46Qk5OUkZGQkJCOkZCRlJOUlZSWm52dpaClpaiqq6yrqqejpaWlpJ6ZlZOR
-jIyQjImLiYqIjI+RkZORj5OVlpaZmJubm5eWl5WTkJCRjYqOjImLjY2KiYeEf315
-cm5qam5zdXBwc3h8goqQk5aXm6CenqGipaWkpaCjpKWhoJ2ak46Jg3l2cm5wcXBz
-c29ta2VgXVxYVlZVVlVVVlVWV1lXVVNTVlNUVFVVV1ZVU1NSUVJTVFlaWFhXV1dW
-VVFSV1RQUlFQVFdRVFVVVlRVWFlXVVNTUVJRUlVXVVZWVldTVVVVVVdYVFZbWFhW
-U1ZYXllZWFpaVlVVV1hYV1daWVdVVlZaW1tXWFZZV1lXWVxcWFhXVlhbW1xdW1tf
-XV1bWlhWWGFbXl1bWltdX1xaWllaW1pZWVtaWVpbXV5cXVxbXFpcXF1aWlpcXVxd
-WllcXV9dXFxbXV1dXFtfX19aXVxdX19eX19eXF1gYmJjYWBiYmJhYmZjYGBhYWJf
-ZmBcXmBcXV1bWV1bWlhYV1pfW2GGucnU3ODk5efp6evrcm9wcHNzcXNycnFwbG1y
-cHBwcG5uc3JxcHJyb25xbm5xb21ubm9wcXJzcXNxcHJ0b3BxcHBydXBzcW5wcHJz
-cnV1dHRycHN1d3Z0cnJ1c3Nyc3R0dnp2c3V4dHJzdXZ0cXV1dHZ3dHJwc3N0c3J0
-c3Ryc3Fvc3JxdHNyc3N0dXRzcnBtaWxtcG1rbWxsZmdmZ2poaWdoaGhoamtpaWpk
-aG1zc3JzcnBvcnBxcnFwb3FzcnNybm5xcnFwc3Rzbm1ubm1tbmxqa2xva2ptb2xo
-Z2ZlaGZlY2RjY2NjZGRiYmRiZ2hkYV9iY2JiYWFhXl5eYGRjX11dX2BgX15fYGBh
-YmFiYF5fYGFhYWBfXl9dXWJhYmJiX11cXV1hXFxfX19dXF9jYWFiY11aVEpIREI+
-PD5BPT5CQz5AQURBSltue3+GiImJjZCMjpCTkpeXmpmVkIh/bl9TSk9PTUlMTEpN
-Tk9MTktMSUlGRkNDQ0RKQUZBQkVDRkRERkxPTktJR0tKTEdGSElJSkZLTUxOUFNR
-U1VVVllWVlZYWllXVVZYUFNXVFNTUVJQT1NXVlZZXWNlbG1rZmJgXFpbXmJhYmJb
-WVhaZGNcVFRVV11bY19TWl9jZF1dZW5qaWtmWVlcXWRmaGdralxeYVhdXWNramFb
-VFVVUllfV1ZUYnFkUktRVVlbVlFOSVBOVFtYXGRjVkxLUFROUWNeXF1bWVpodW1n
-XVtiaWheVlVkb3BlaXJzdXZlXl9fXV5cXmBcWVpbXmBgXFpYWVdWVVhXV1ZVVllZ
-WV1aV1lWVlpaWlhYWlpbXmFfYGNhYV1aYFtZXF1ZVllYWFdbWVZVVVZYV1pbWVxb
-V1hYWFdZWlpbW11dW2BdWVhWV1dXXmZpb3B5fH14d3d5d3Z0fHt1dm5qaWZnaWpq
-Zmdsb21oZ29wbGptcHN0d3t6dXZ3d3d1c3V0dHRxdHNxcnNwcG9wc3V3eHZ4eHt/
-f4CChYOFhoWHiIiLiYyMi46LjZGPiouNio2NkI6RlpORjo2Mj4+Nk5SUkZOVmJec
-oKSgoKSrsKuop6upp6ioqaWinpmXmJSPjIuHhIOIhISLjo6Qj5GPj5OWl5qZmpuc
-mpqXlpaTkY6JioyOi4qLiouHhoOFgHp1dG9sbW9xdHR3fX6CipGTlZaamZ2eo6Sj
-o6CioqOioaKhoJ2XjoN8dnBxcnRxcHJzbW5tZ2FbW1tbWFdYVlRVVVZUVFRTVlRS
-U1FSVFRVVFdXVFVTWFNRVVNVV1ZVVlZVVVJTVlBSUlJTVFZVVVRSUVNYVlZTUVJQ
-T1NTU1BSVVVYVFNTVFVWVlVXVlldXlpWXl5UUVVUV1lZV1ZZV1laWFhXV1ZWWFdZ
-V1ZVV1pZWlhbWFdZWlpdWlpZWl5aWl5cX1xZWllbWllaW11ZW1pbXVtWWFlcXl9d
-XVxaW1tbXFleW1tZXWBdXl9bW1xcW1lZWVlbW15cXFxbW11aXVxdXmNhX11fX19e
-Xl9jYmJgX2BgYV9hYl9lY2NhXl9eX11bXl5bWlxcXFpdW2FbWVtbWlpcZYq6y9Tc
-4OTm6Orp6+pvc3JwcG9xcXJzdXV3c21vcXVwb25tb3Fvb3Bwcm9ubWtsbWxucnJx
-cXJzcHBub3FycHJycXJwcnBydHJwcXFvbnBydXNzcXBzcnRzcXJwcG5vcXZ0dXB3
-dHZvcnR0dHNzcnJyc3BwcnJ0cnR1dHNxcndzc3R1dXN0dHNycnRzdXNxcnNvbW1u
-bm9taWlsbGZkZ2hpZ2RmZWZiZmhqamtsb3FxcnJwbnNxcnJvb3JwcHBzcnNzc3Jx
-bm5ycm9sbm5wbW1qbG1sbW1sbGlnamhmZmlnZ2FiZmZlZGRjZGRjZWFkZmJkY2Bg
-YmJhX2NhYWFfYV9dXltfX2FhXlxcXV9eX2BhYl5cXF1gX19eXmBhYGFgYWNhXl1f
-Xl5eXF9fY2FiYWJmZmFgWFRNRkJDQ0JDQEA+Q0NEQT9BPEBMV2h3f4aEhImMjY6Q
-kpSXlpOOlJWUj4l+bV5TT09OUE5NTkxKSE1LSktJR0dGREVFQ0NCREA+PkNGRURE
-R0xOS0pJSkhIR0VISEdHSEdJS0tKS1BUV1pYV1RWVFRVVVJSVlZTVFhUUlJOTU1P
-U1ZYVVpbXmRobW9uZl5aV1dYX2ReV1RUVVZcXVpTV1paXF5hUk9aYWlnZWdlZWpl
-XlNXWlVXZGVmYmRxZGJbXWRnbmthWFFSWFpbYV1bVVNgdGNRSlZZXF1TTE1PV1pT
-UFdYZGBOSk9QVlNWZl9bV1lWUmNtZF5cX2JnZ15XW2VxdGRrc25ocW9fXGJeXVlg
-XFdXVltcXFtbWltXVVRWVldYVldXWFhaWldXWFZWVldYWFZZWFdhZGBeYGFiXVtb
-WllbWlhaWFlXWFZXWlhZW1laXFpbWlpXVldVVVpaWlpcXF1aWltaWFdVWFhbYmpu
-dXh7e3Nvbm5ucXZ4dHRwaWdpamdmamhnaWppZ2xnam9ubG9zdHV2eXd1d3d1dW9v
-cXBvcnBub3Fyb3Fsbm9zdHJzdXh7e4CCgIKDhYeEiYqIiIaKjIyOj5KMjY6Ni4uP
-kI2QkpGSj5CNjIyOkZKSlJWUkZKUlpaam5ygoqOoqqqqq62srKuoo56cmJiYlI6I
-hoWCf4KCg4aLjYuOjoqNkJaXl5qamZqamZeUlJORjouLj5GQj5CQkYyGhoV/e3Z0
-dnJub29xeXt9gYKJjZCTlpWWm5yeoqGdnqKioqCfoKGhnJeOg3t3b2tsbm5ucHNv
-bWtoYF9fXFpcW1ZWU1NTUlZUUlRUUlVRVFVUVlNUV1dXVVZWW1RTU1RSU1NWVFJT
-VFRUVFRQUVNWWFlWVlNTVVNTUVRYVllXVVNUV1ZWUlFWWVpaVFlXVVVWWF1iYFhU
-XFtVVFVVWFhYVlhYVlhZV1ZZWFhXWFdXV1paWFVUV1ZXWFpZWVpXW1tZXltaWFdc
-XFteXV9eX15eX1xeXVtYV1laXWFnYlpYW1haWVpcXVlZWltdXVtdX11bW1paWVlZ
-WlpaXFxdXFxeX2FdYGBjYmBfYGJfXWFhXV5fXWBgYWFfYGBfYmFhZGZmYV9dYGBe
-XlxbW1xbW1lbc1pdW1pcXV9jiLzK1dvf4+bo6enr7G1vcW9vcG5xcnBzdHZ0cnRz
-cnFwbnF0c3R0cnFyb21wa2ttbGtvcXBvb3Vua21wcXR0cW9xcnBwb3JydHJtb3Ju
-cHFwdXNydnNwdXV2dHJwcG5wcnd2dHJ0dnVzcnJ0cnV1dXNwc3JxcnV2dXVzc3Nz
-cHF0dXZzcnB2dXZ0c3Rxc3ZydHFwbm5ubW5sa2xtbGlnZmlpZmJiZGhmaGZpbGxt
-cHJxcm1ubXBxcHVxcXFxcXN1c3FycG9wcG9xcnNubW1sb21ramttbWxpaWVlZmNj
-ZGVkZWVmZ2djY2RiYWJfZWNiZWFfYmRiZ2FhYmNfX15eX15fYV9fYWFeXmNfXmJi
-YmFjYl5cW11dX2FfYWBfYF5dYF9dYGBhX1xdXlxaXF1eXWBjYV1WTkpBPj5APkBB
-P0I8QD9CQUNCREpXanN/hYqOjYyLi5GSkZSTloeSl5aSj4l8cGJUUU5MS0pKSkxJ
-Sk9NTEdIRkRERUNGQz9CQkFDQ0JDQUNESklISUdFR0ZJSEZLR0ZJS0hKTU9NUFJV
-VVdZWFpZVVVVVlNWU1BUVlJOUlNUUlBPUlVcX15fZm5vbWtiWFJVVVpkZVlTUlFQ
-U1VYW1teX2JiXmJaW2Bna29wa2VkYl1QVmBbW2FnZ2ZgYGtpXmFqbm1oZFtTUVJb
-XFtaWllbVl5zZ1dTXVVbXVFPUVNbU0xSWVZbXE5MUE5UVFJZVllYWllXYWlgXFhb
-Y2ZpZV5gZnFwaW1vaGZsbmVdXl1dYGVmWlVXWFpXVlhYW1paWllcWVpVV1paWVlZ
-XFtcXlxaW1taWVlZVlZZXV5fY19fXVlYV1ZYWlpaWFdYWVdWV11dXFpaWllZWldU
-VVRTVFhYWFhYWFZWV1VWWFpcWVpdZm1wdXh6d3NvbWxvc3NzbnFtaGVnaGZoZmtu
-bGxqZ2psbG1ram9vcHJ3dnd5dHFwcG5xcG9wcG5ub2xrbG9vcG9vcHR2eHl8fnt/
-f3+DhIaHiYiJiIqMjZGQjo6NioyQkpKNi5CRkY6Rko+NjI+PjYyNk5WRk5KWlJSV
-nqKjpaiqqaqur6yrqaihoZ2YmJaUj4yJiYKAgoWFhYaFh4eJi4uNlJmWl52cmJiZ
-mJSWkZKOiI6RkpSOjIyLiIqIhIZ/fXNwb25tcXV6fYKChYeJj5GYlZWVmp6dn6Cf
-oZ+eoJ6dnp6empGFgHpzam1tbG9wc3JraWRcWl5fXlpYVVVTUlJRUlRVVFFVVldb
-WlZXVlRWWVlaWVZXVVZXVVdWVFNVUlRUVVBSUlNTU1RUUlNUV1VWUlJQU1hXVldX
-VVdWVlVQUlVWVlZWVlZVVlZVVlhaWVRWWlpeWFZWVlZXV1dTVVVWW1lXWFlYWFhY
-WVdXVlRUWVpaWFdXWVpaXV9dW1lVWVxcWVxfW1paW1pbWlhZW1hWV15mZ1xbWFlY
-VVhYWVpbWVdZW1tbXFlaW1tcXFtZWVlbXVtaW11dXl9gXmJjaXtiWl1eX15cXmBh
-YF9hX2JhYmRgX2FiYWBjZmFjYlxZWltbW11aWltdW1dWVlpYWFhcXmKFvMvU2+Dj
-5ejp6urra2psbG9vbW1wc3R0dHFyd3R0dHFxcnFycnJwcG9vb2ttbmxtb25vcXFv
-bW9ydHBwc3Nwc3BvbWxvcHN2cnVycXBwcXJ0c3RxbnFwdXR0cXV0dXVxc3Z0cXBy
-dnR2c3Z1dHV1dXR0dHVzdnZ2c3R0dHNyc3R2cnNzdHNycnJycnV2dXV2cHJvbm9p
-bG1ramlnbmtqaGhpZWNmZ2ZjZmlsbW1ubnFsaW1xbm9ycnRyc3JxcXJzcnFycm5p
-am1ycHNwb25vcG9ua25qbGlrcWpmaGVnZGVnaGdmZWVlY2FhYmFjY19fYGJiYmRh
-YF9hYF5fYGBhYF5dX15bWl9iYF1fYF9hYmNgXlxeXFlbX15cYF5dXF5cW11iYl9c
-W1pbW11iX15cXVxcVE5JRkI/PDs8PT09P0FAQEFEQ0NFS1dneoKIkJGOj4+NjI+O
-j4+Ri5GUlpiZk4h+cGBTT0xMTEhIS01MSU5MSEdHRENFRUREQ0FCQ0JDQUNDRENG
-SUpLRUVCRkdERkZHSEhLTEtOVE5MUFNTVFhXV1hVVlVWVVJTUVRXUU9QVVdXUFFY
-XVxbWmVqaWhlYV1ZWFZVVllYUk5NT1JWVllla2hpaWltcG9ub3h5d3JsZ15WVVhe
-ZF9gXFRfbGNfZ21kZGtpYl1bU1FTVF1eXFhcWltbXGxfXWhjV1dVTU1TVlhQUFVb
-WV5bS01QUlNWT1FUWVRVV2JjZ2ReWlheZmxqZGFua2xydW1kYWVra2ZfWlxeY2de
-WlhWWFpVVVZYWVhaWFlaV1dYWVlVVlpcWFZWV1daVlhYV1lYV1dbWl9hYV9eWllY
-VVZXWFpZWVlXW1lXWFdWVVdXV1hZVldSUE5QVFRYWV5bVldYWVVWVVlaWFtlbHB2
-e312cnBrbW9wcW9sbGhlZWJiZGdla21samhoa21pZ2tramtqcXV0dnV1dnNvcG5w
-b25wbGxqa25ra25wbXF2dXR3eXp6e3x7fH9+goWJi4yKiYmMjIuMj4yJioyRkIyL
-jY+Pj4+OjYuLjIuMio6PkpGQlJeWmJubn56ko6OlqKuurq2pqaWko6Cal5STj4qG
-hIKDhISChYaFhomKiYuQk5WWlpeWmJeZmpeUjY6NjpGSlJKRjo6LhoWDgoWAfHZx
-bWtobXN4foKDh46UlZSZlJydoaGfnp+hoJ6eoKOkoZ2Zj4V6dHJua25yc3J1cGpq
-Z2NhYVxWVVdUUlNTVFVSU1JRUFJSVlZYVlRYVFRTWlhWWFVTU1RTUlRWV1JWVFJT
-VFRTU1NUUlJVVldZVE9XV1lXVlVZWldVVlVUVVNTV1lXWFVUVFVZVFFRU1VXV1VX
-VVlSUlNWV1dVVVpYWFhYVlZWV1VWV1paV1ZZW1dXWltZWllZXVxbXFtcW1xZWltZ
-WFpdWlhZWVhaWlhbXFpXWl1aXVhYWFdYWVlYWlpaWltbWlpaXVpYWVleX19eW1xe
-W1tdWlhbXV1cX11wfWZjXV5hYl9fYGJfXFxeX2BkYmZhYmRlY2NiYWFmX1teX11b
-W1xcW1teXFxbV1hXWVpgY4+9y9Xb4OLl5+np7Otub3BvcG5wb3JtbHBwdXNvbnBx
-cXJyb2xtbW5tb21tbWtscndwbWxsc3JtcXJxcXBwb3NyamxxcHFxcHBxdXJxcXFy
-cnBvb3BxcnRycG9zc3J0bnJ1eXh2c3BycXNycnJzdnVzdHh3dXFzc3V3dHN2dHd1
-cnRzdXZ1cm9ycXJyc3RycnNwb3Jwb21xbWxsa2xraWhmZmVlZGVkY2ZqbW1tamps
-bmxramttbnBxcXBycXFvb3FycnBwbm1ra29xcG1sbW9tbW1ta2xvbWxra2loZ2Zn
-Z2hoZmNkZmNiYF9eX2FhZ2ViYmFhY2BhYV9dW11fXl9fX1xfYmNfY2JfXV9dXl9g
-X2FeXl1hX19eXllZXFpbXV1eXmFgYF1bW1xcW15gXl1cWVVOSURETUQ8Oj89Pjw+
-QD09Pj5BQEFJVmR3ho6Pjo6KiY2NjY2MjpOUk5OWmZqVkoh9b2BUUE1MS05NS0xM
-TUtMTUpJQ0BAQEJFQUVBPj09P0NDRERESEtLSURBQ0NGSEZFRkhKSEhPT05SVFVS
-V1hXWFdVVldWVFNTVFdTT1NXV1RPUVhZWlteW2BmZWFdWVhUUVNSVFVPTElPU1ZZ
-XGVsZmtoZmp0dXR7dnFvbGlmYF5bXWFaWFFRVmNmYF1famFlY2BeXFxWU1tbYF9Y
-WVtZVFZZZl1fZmZdVVRQUFFQU1NTWlxcZFtPUFJTUldQT11hWFNYZmRmZF9YV2Fn
-bm1lZmtkYnJ1b2dhYmdkYF1ZWlpdXVxYVldZV1hYVlZWWFRUU1dZW1lbWVZVWVdW
-VVZUVldXWV1eXFhYV1hdW11fX11bW1laVFVVWFtZV1dZWVZWWlVVV1ZXWldZWFBO
-TUtVVlZTVFZXWVhaWFZTVFZWXGJsbnZ6eHZvcHFucXFydW5naGdlY2ZlZ2RmZ2hr
-aGpobGhkZ2prampscXZ1dnV5dnRzbGtrbGptb29tampqb3FwcnF2eXd3enl6fHx6
-fIGBg4WIi42JiIqMjImMiIaGiYyNjY2KjY2Ni4yJjI+NioiNjYyNjpCSk5iZmp2g
-pqKmpaGkqaytq6uqp6WmpqCblZWSjoeGgoSAgIGBg4aGh4qLjY+NlZWWl5mbmpqY
-l5KQkY+Mi42PkY+PjY2JhYOGiISAe3hwa2hnbHR5gISIjZWYmJaYmZWYmZueoaCf
-n6CipaOgnpuWjoV7cXBvb3F0cm9wbmpmYGJkXlVXWFZSU1JUVlFRVFBPTlBVVFZY
-VldcVFhVVldYV1hVUlNTUVRXVFNVVFJUWFpWVFNUVVRTU1ZaWFVTVFVUV1hXVlRV
-V1hVWVlVV1VSVVhYVldYWVRWWVdUVFNVVlVUVVdYV1hUV1paWVlZWFhYWVdYVVdZ
-WVldWllZWlpaW1paWFhcW1xbW1lYWVpaWFtaWVpZV1lZWlpZW1tZV1taWVlZWVxe
-XFtZWlpbWlhaXFtbWlhaWl1dXV5aW1paXFpbW1paXl5aV1RXWF9eXFxdXFtfX19c
-WltdX2FhZ2NdXl9eXV9dY2FhYF5fYWFcW1hbWlxhW1paWFdZWVlhjLzK09vf4+Xo
-6err63B2dHJsb25sbW9vcXFxcG5sb3N1cXBybm1tbm9ub2xtbHFycXBtbWxwcXRy
-c3Fvb29ubW5xcXBvcW9vcHBxcXFxcnNxc3Nxb25tcXNycnNzcnB0b3B3eXp3dW50
-c3Z0dHV3eHVzdHV2eHFzdnZ3dXR1d3h0c3V0c3VydnN0cW9xcXBvbnNxcHFubWxq
-bGdqa2lkZWZnZ2hpZ2htamtqbG1sa25wbW9ubm9vb3Bxb3Bwb3JwcnRxbXFyc3Bv
-dXJubGxqaGhqbWxqaGppamZlaGdoZGNmamllZGRjZ2VnYl9fY19eYWRhYmFhX2Fg
-XV9fYF1gX1xcXlxfYWBfYV9fXlxbXFxeXV5cW1xfX1tcXFtaWltcXFtfXl5hX11a
-XFtbYGBfXllUTklGRUVAPz47PT08Ozo8P0NDRT4+QUVRZHF+iYuKi42OjIyMjY6N
-j5SUkpWamZiWk4p7bltST05OTU1RUk9MSUpJSEhHREREQ0ZFQ0k/PUJCQkZGQkJH
-SkpHRkdFSEZEQkJFTEpKSU5RTVRUV1dVV1lbXVxYWVdUU1VUV1ZRU1hXVlVTW1lY
-WlxfYWViYF1XU1JSVFRQUllWVVJOTFBPVldVWl1fX2lwb2hkZGVcUk5SVFlbWVRP
-VVpWX19dWmBmW1lXVllgYV1ZWVlYWV1WWFhUUVhoZmNcXV5YT0xPUk1RVVdkY1hh
-W1FOU1RQU1ZVZWJZWF5kZF1cV1tbYmlubmRlaFxbbG9rZmVlYmFeWVhWWlxeV1la
-VVZWV1hYV1RVWFdYV1hZV1VWWFhXV1hWVVdWVldaWl1bW1tZWltbW1xdXFxZWlxY
-WFlXWVlYWVdXWFlaW1lWW1taVlhWUVJPTk5RTE5RUFFVVVZXVlZVVVdYY2ptb3N3
-dnJxcG5ubW9wcG1samVlZmZiYWVmampsaXFtZmNpaWxqbHJzc3RycXJ1dHBvbG9r
-aGprbGxsa2pqbHBvcXZ7e3x7enx+f35/gYB+gYGFjIuJhoaIh4WGh4mHiIaLioqM
-i4mIi4qPjomEhouMi46OkZKWk5WZmZugpKimpaeoqqurq6usqaOgnZuWk5ONi4eE
-gICBgYB/gYSGhYSIioyOk5mZm5mZmJaWk5OUj4yNiomKjIuLjIuMiIiEhYSAenRv
-amhudnh7gouNk5GTlJaYlZaYnJyenp+io6WkoZ6enJiTiXpzbm9vcHBvcHFtbGpm
-YWJhXFdYWVdSVFNUVVRUUFBRT1FPVFhWU1RRUlVUVFRVVFhTUlVVVVVUUVNWUVNS
-U1RWU1NVVFFWVFVUU1RWU1NVWFNSUFVUU1leV1VXWFRUU1RXWVlYV1JVV1dWVlpY
-WFVUVFtZXFtWVFhZV1dYWFhWV1ZWV1haWldZWllaW1hZWFdcW1lZXVtZXlxcXltZ
-WVtZVldZXFpZWlpZWFldWltcWVtbX11cW1pXWllbXFxcWlpbW1pbXFtbXl9dXlla
-Wl1dWFhcW1paWVZXWV1bXV5eXl5dX19cXmBgYGRjYF1eXF1dYmJgZ2RkXl1fYF9c
-WVtaWV1dW1xgWlpYYWaIusvU29/i5ufp6evscXFzcG9zb29ucXNwb3BxbW9wcHFw
-bnFxcG1wb2xobGxwbG9ubGxvcnF1dXVwbm9zcnJxcnNzdHNxcXJxc3FvcnNycXBz
-c3JycnV1c3N3dnV0d3RycnR2dXNzcHBxcXJzcnNxcXN1dnd0dXd1dnV1dXR1dXZ1
-dHNycXN1cnNyc3NxcXFxcnJxcW9ubW1qa2tqaWZoaGZpbGpqamtqamtoa2xvbm9y
-c29tbm9ycXJvcHJ1cnJycG9wcW5wdnRwcG9vbGpqa2psbWxpZ2tsamxmZmhmaGtr
-aWpoaWhnZWNiYGVhYWVhYmNeXWFiYGJjYWFhXVthX15eXl9fXF9hXltdXV5dXF9e
-XF5fXlxeWlxcWlhXW1xbXVtaXl5eXV5cXl1dXF1XUVFLSENGSUM9PTw8PD1BP0FB
-Q0NBPjs/Q1BgcH6Fh4yOj46NjImMkJCQlJaTkpeYlZiYkol8bl5TUk9QUE1NTU1M
-S0tKSUhIREVFQ0JHREE8PURDQkJDRkhKS0hHRUJERERCQkdKS0hLTFBTUlRWVlZX
-WlpdXltbV1VXW1pYVlFPU1FRUVRZWVVaW15fXl1bW1dTTk9XVlBOXWJXSkVISEpI
-RklLT1NaX2dgXFlVTktIR0lLT1ZYUk9SVV9gW1taV15aUU9XWllaX1taWltYV15a
-VVhbYXBrZV9cYVpTU1JRVFVVWmNaUmBUTE1NWFlWV1dnZ15hYV9bWVpPUFphaW5t
-X2BiXGBmZ2ZlYmJmZGJbWFdZXGBeWFhZV1dYWlNTVVZWWFlSVVlcV1dWV1dZV1dZ
-VVdYW1lbWltZWVlcWVtYWFxZXFpZV1lXVVVWV1hWV1dWWFhbXV1aWltcWFBQUlNU
-Uk5PUVFRUVBOVFVTVFNUV1tiZ25wdHNwcW9ubmlsbHFyb2ppZmNhYWFkaWdmZGdp
-amxmZ2hnamxsam5wcnNyc3BxbmxsbmlnaGlramlpbnBvbm9ydnd5eHd5foKCgn9/
-gIGCiIGBioaGhIeFgoaGiouIiImJioyPjYuKiImEg4SKiYmKi5CVlpWWmpmbn5+g
-o6Wlqaupq6ysraumo6Ccm5aTkI+LiIKAfn9/gH5+foCEgoWIio2SmJ6bl5iYl5WV
-lI+OjoyLjZGOi4yJiYmGgoCCf39+eXJua29yen6Fio6RkpOVlpWSlZicnJudn56g
-oaKgnZ2bm5eRhHlxbWptcXFvb2tqZ2JhZGBcWVtZVFNVUlFST1JUU1JRU1NRU1NV
-WFdVVVVUVlNRUVVUVFRWVFZTVFRWVVJTVlRUVFVWVlVUV1dVWFdWV1dYWVRSUlJU
-VVdZVlZWV1RWVlZWV1hUV1dZVlZYVlZYWFRXWVpbWVlZVFNWWVhYV1lWVlNSVVdY
-V1dXXFxZWVtcWVpbWllZXF5cXVteWlpbW1dVWFlbWVpWV1dXWlpcWVlXWllcXlta
-XF1bYVxbXFlZWltbWVlZXFxbW11eXFpbWltcW1daWl5fX11cXV9iX1teXVteXl9e
-Xl9gYWBhY19fXV9hZWNgYGJfW11dX2BbWllZWVpYV1haWFtgYoO5ytTa3+Pl5+jq
-6utzcG5zdHFvbXBxb29ucG1xcHJwcm50dXRzcG5ubm5wcHFzb29wcG9wcHBycHBw
-cXJyc3d6eHZzdnNwcXFxcHRzdXRwb3J0dHRyc3N1c3VydXZ1dXd1cXJzcnd6dnVz
-cXBucHF1dXJ0d3p2dHR1dXh0dHV2dnRzdHRyc3R1dHR1dXV1dHNzdXZxb25ubmxs
-a2toaGhpZmZpbm1ramtsbm1sbm1ob3Byc3FwcXFxcnJwb3BucnBxcnNydnZzb3Bx
-cHBubW1ubm1vb2xoaWpsb2lqZ2dpaWhoZ2dmZ2dlZWVjYGFkYmJgY2JiYmRgYWFd
-W1tbXV1cYGFdXVxaW19dWlxbXV9eXl1dXWBgX11bW11eWl1cWlpaWVpcWV5hYV1e
-XllZWFNLREFAQj1AQkFCPz08Pj9AQENDQkJCQ0NFTVxndYCIio6QkI6OjI2QlZCN
-kpSWlJadnZeXkod8bV1VV1NQUk1MTExMTExHSkZISEVEQ0lBQkFAQD09QUVGTE5M
-TElEQ0hHRURCRUpKS0xKSk1RU11eW1laXFpfXFlaWFhbWlZUU09QUVJLUFZWWFha
-WltdXFlbV1JRT1RRTlhXU01LSEZJRkhHR0tNVFRXVlVNTk9PTU5KS0pNUFROUlBU
-W1tdW1FRVlFOUV1eVlhaXFZYW1ZWW1dVXWNkbWJdXmBdW1laV1JVU1NTWVZZZlhS
-TFBdWldVVmJpZmFbXVpWUU1TWV9lampcXVpjZmRlZmFfYF9iXVZWWlZXWFdUVlZU
-U1RWVVdVV1RTUlVUVlhaWFdYV1ZZW1dWWVpXV1lYVllZWVlZWVtaWFlfWltZWFtX
-VFVXV1hYW1pYWlhbX1taW1dWUlJQUFJWUFBNUFFQUlNSUVJQU1VWWFpmb2twdXNs
-bG5sbmxua2lnZmlnYGFjZGZna2hlY2ZmZmloamdoa2xubm1tbnJwbnF1cG1saWlm
-a2xsamprb3RydHN3dXd3e3t5fX5/goKAf36EgoOFhYmIhoaFg4aHh4WEhYmLioiG
-hoWCg4OGh4uJiI+SlJaVk5aZl5ybn6GkqKenqaarq6ysqKelo6OalJOTj4qHf319
-f4B9f35+f4GCg4iLi5SVmJOSlZeYlpaSkZKPkI6PkJGNjIqNiYiCgH5/gIB9e3dx
-dHh6foSNk5KWlpWVk5ORlJebmZqen56enZucnZ2dmJSMf3pxcXFxc3NwamVhYGJh
-XV9bXFpVVldYVVNWU1JTU1hQUlFSVlVTU1RVUlZVVVJUU1JQT1NTVFRST1NRUlBP
-UlNVVFhUVFVVVFdVVVRUV1NUUVRSUlNVVVRWVlZTVFZWVVZXV1lXVlZUWFZSU1VV
-VFdYWFdWVlNSVFRXWFhYV1hWWFVYWVZZV1hZWl1bXF1eWltbWFlZW1tZWVxdXFxb
-WVhZWlpaWVhaWVhaV1hZV1dWV1lbXV1aXlxcXV1ZWl5dXF1dWVZZWltZWVxbWl1c
-XFtZXV9hXWBbXl1bW11fYmBeZWJlYWBeXV9eYWBfYF9hYmRjYmNjYmFeX15eYl9Y
-WVpbYFpcV1dXWl5jgbnL09vf4+Xn6ens63Bwbm5vbXBuc3dycW5wcXNyc3Fwc29w
-cXFucG1sdHN1dXRxcHF1cnJxc3Jtbm9ycXBydHZzcnR0c3JucnJzdnV1c3FydHZ1
-c3R1dHR0dXV0dXh3dnRxc3F1d3V1cnJydXN2c3F1dnV1eHZ1cnN2dnV2dnRzdHV0
-dXRzc3N3eHNzdHZ0dnJzdX1zdW9ubGxsaWhmZmhnaGprbGxsaGtwcW5vb3Nvb3J0
-dHR2dHV0cHBwdHVycXN0cXJydHFwcHBwbm5sb29vcG5sa2hrampra2tqamtpaWpn
-ZGVlZWNjZGRlYV9hYmRhXF1cXl1gX19aW1tbWl1dXGFgXV5dXF9dXFpcXl9hYF5b
-W19cXF5dXFxdW1xaWFtbW1pfYF9dX11bXF5WUEtDQEFAPzo8RT08PTs7PUBEQ0NA
-QUFEREVLVV1wfIaJi42PjouJi4+QkpCQl5aYm52gnpqakol8amJYVFNRUU5PTkxL
-SUtJSEVHRkRCQkJDQEJEQEBEQ0VISkhHRURDREdHSUNESUdNTU5PS01UV1lbWlpb
-XmBeXVtaVldUU1BSUU1SUk5SVlRXVlhYVllZV1dVVVNQVlJOUE5MTUxOSUpOTUxL
-T1JOTVBKSUpJTU5RUFFSU1NOUFVVU1RQVFNPTVFXTUtVWldcW1hfWlZWUVpdWl1j
-ZmZpZmNiYmNeXlpXUk9RU1ZaVV5gU01SU1ldVlZVXWVmYFRVYFZUWVlbX2BmZ1xY
-UV9oaGRgYFxbXV9eWVdYWVZWVlRUVFVVVVlYVlhXV1JUWFpWVVlaWVlXV1ZYWVtZ
-V1pZV1xdXFpYV1dcWFZaWlpcW1hXVlhYV1hcV1laVlZYWltbXVhYVVNTUE1SUE5P
-T01MTU5QVVZTT1FSVVhaXWJnbHBwcWppbG5sa2ZmZGRiYmFiY19iaGRjYmdmaGVm
-ZWRlaWdmbGtrbnBxcnFtbm1ua2dkZmRoaWppaGtrb29ydHJ2dHZ3fX19fX1+f32C
-goF8gIOEhoiIiIeEh4eEhYiMi4iIh4mLiYmHhYeKioeKjJCSkZKTlJSZmp2en6Cm
-p6qurKqpq6qqq6inopyXlZOQi4iCfnt7fX19f4GCh4eGh4eLkJGTjpCRk5GSkZCQ
-i4yMj5CNj5GNjYqIhIJ/fnt7eHR3eXZ4fYGCh4yRlJGTlJWTkZKTmJicnJ6enZqa
-mJyfnpyYlI2Ee3NxbnBzdnFpZGNfXl9bW11cW1ZUVFRWVVRTVlRUUVNTUlJUVVJR
-UFFSUVNTU1NTU1JSUU5SUk9PUE9QTlNTUlVTUlVXVFVTU1ZXVVJSU1ZSVVRXVlVU
-V1dVVVVWVFJUVlVVVVVWVVZaWVpWVlZVVVZXVlhWT1JTVFVWVFZVWFZXV1hbWVpY
-WFdZV1lZW1xcW1xbWVhbWVlZV1lcXFtbXF5cWlpbV1pZWFpaWVhZWVhaW1tbWVxb
-WVxaW1xcXFxaWVpbWlpaWltcW15dW1xfW1xbXGBgXVpaXV9cXF1gYGBgYWNhX11f
-X15eXV1eYGFiXmFlZmNjYWFgYWJgXlpaV1daWltaVlhbXWKEusvU3ODj5ufo6uvr
-dHNxc3RwbnJ1dnV0d3R0dnNvcnFvcnV3b29rbG5yc3J1dXFzcnNxcW9ubXBydXFy
-dXh2dnZzdHNycnRzc3d1dHV0c3N1dXd0c3FwdHl0dnp0dXd3dnR0c3FvcXNzdXZ2
-c3F3dnZ0dnZ3dnd2dHR1dHZ2c3V0dHVzdXZ3dHRzc3V0dnd1dXJxeHZubWtsb21q
-aWpnZ2dra2pqbW1ubnBwcXBwc29zcHN5dnZ2d3JycXJzc3N2dXJxbm9vbnFvb292
-cXBwbW9tbGtramtsbXBtamtwbmppaGVkZGVlZGBgYmFjZGRiX2BiYGFfX1xaX1xb
-XlxeX11cXF1fYGBfXFtcXWFhYF9fXlxaXl1cYFxbXV5hXmBfW1tdXmBdXl5eXV9c
-V1JOSEQ/P0NAQj5BPTw5Ojo9Pj49PD9AQkJDRUhTYW14goqMj4+OjYqJjI2SkpOV
-lZaUm5uen5uYkol/b2JXVlZXT05QTk5OSkdIRUVJRkJEREZCPkJBP0NFQ0JFRkRC
-Qj9CRUlGSEVGRktPT01QT1RaWllcXmFkY1hdXFZbWldUUVFRTVBTWFJQUVNWW1dU
-V1dUVVRVUVFSUE1MS0pLSkhITFBOTEtQT0xNTUlGR0pJSUtQUVBPSkxPUkxNTk5O
-UUtLVFZSV1BMVlpYVltfX1RYX19iZ2VlZ21qaGdpaGdhXVZTUVBaXF1ZWVhWT01L
-UFRRVFRcX11cVVphV1VYV1pdW19jW1dTXWNhXF5iYFhYYF9ZVlNVVVdUVVNVU1JW
-V1dXV1dWWVlYW1xdWlhXWVZTVVdYWVhaWltbW1tbXl5bWFVWU1ZaWVxbWllZWVlb
-WFhaWVhVWVdZWVtbWlZUVFNQTVFRUFBMTExQUFRTVVRYU09SWVtcX2ZtbWxra2do
-Z2lqZ2RgXmJhX2JiYWVoaGloZGNoaGRjZmhmZmlpa21vcXBzcXBva2poZmhjY2Zo
-bGxqaWptdHRwbnN2d3l6e3x+e3x9gX5+fYKCgYOBgYKCgoKBgoKAhIiHhoaJiYeI
-iYWGhoWHiIeHi4uOkJGTl5iYmpmdnqWlqKuxsaysqqqsqqijnZqUko+Lh4SCfHt+
-fnt+f4CCg4KDhoyOj5GUk5SQjY6Sj4uLjZCPj4+PkI6MjIeBfn56d3Z5d29yeXx+
-g4aJi46OkJKRkpCQk5ibmpuamp2bm5mYnJ+fnpuYj4Z8dnRvb29xbWhgYF5cXV1c
-W1pYVVNVVFRWV1hTV1ZUU1JUVFFSVFVTUlFSU1RWU1JQUVJTUE5QUlFQUlNSUVBS
-VFRTT1RVUlJVU1RUUVNUVVZWWFhZV1RTU1JTVVdaV1VXWVpWVVNWVFRWWVdVVldW
-VlhYVlNSVVZSVFVYVlZYVVZaWVZYWFhYWFlbVlhaW15YW1pXWVhaXFlWWFtdXFxf
-Xl1bW1tYVlldW1paWVpbWVhaW11cWlhYWV1aXVxbW1pcXFtfX19bWlxbXV1dX19g
-XFxbXWBiYV1gXl9fXl5gX19fX2BiYV9eXF5eYV9gYmNiY2NlYmNkYF9gYF9dXFla
-WVlZXF1ZW1xcYYS6zNXb4OXm5+nq6+t2dXRydHZzc3NzdnN3dXVxc3FucG9xcXFy
-dHN0cXFxdHNzcnJxcW9zeHRzc3N0d3V3dnp3dnZ0dXd4dnZzdXR2dHN3eHVycXd3
-dHZ4eXh2dnV0dXR2c3N4dnJ1dXN1dnV4e3l3enh3dHZ1enl0dnV2dXZ1c3NydHZ3
-eHp4dnV0dXR0dXd0dHJwcW1ub25ubmxraWtubGloaWhqaW1wb21wcW5wdHFvdXV3
-enZ0c3Fvb3V1dHp2c3Jwbm9xb3Bxc3R1eHNubW1tbW5wbW1tbGtrbm5tamtqaGpo
-aGNkYGFjZ2RhY2NfYWBjXlxeXF1fX2JeW1tdW1teX2BgXl1aW1tcYWBhX19gX1xe
-XFxfXlxbW11cX11dW11gX15eXl9eW1dRTUVEQD9AQj89Pzk/Ozg4PTo+Pj49PD08
-REdJSE9aZ3N8hImOkJCMioqOjpKXl5WWlpWZmpqcnZqTj4l6bmBYUlBOTk9QUVJP
-UExGR0VDQkBGRkRCQkI+P0BCRENDRUJBQ0RERkdJR0VFRk9OT09RVFdYXV9gYmJg
-YF5bW1pWUlZPUlNPUFdTUFJUUk9SVllcW1hVVFNTUVBPS0lISUlFSUpOVE9MTktO
-TElISEhKR0pJSkpLS0dHSktHRUNHSE1OTU9QS09SUVVWUlRTVV1cVVxkY2JkZGRl
-a2pmY2BiYWBcWllYWllZWllaWVJMSUtKTlNVWFpZWFdXXmdiX2BfWllfX11ZVldZ
-WVtdXl5gWlxcYmBXUlNWWFhXVVhVV1dWVlpWV1lXVlRYXFxbWFlYWltaWVtbWF1d
-WVhbWl1dXFpcWlhYVltbWlpaXVtbWVZVV1ZWWVpYXVtWWVlUU09OTlJQUVJRTEpK
-Sk1RUlVUVFdVTk9SVllcYGhra2llZWZnaWllZF9aW1ldX2FgZGdrbWhkY2RnZmdp
-Z2ZoaWlrb3BtbW9ta2xtbGhoamZkZGdnaGltbm9zdnZzcnN3d3V1eHt+fHl7fHp7
-fXx8f4KEgH1/g4J+gIKEhIaIhomLiIqLiIaChISHiIaJi4+TlJWVlpmbnJ2foqSm
-qaitqamrra+ppaSgnJuYkY2JhX57enp8fHx8gYGBgH+GiIqOk5aTkY6MjpCQj5CS
-kY+Pjo+OjYmGg4N9enh3dnV4dnp7e4GKk4+Ni4mKjY6OjIySlZSRk5iYm5udl5mc
-n6Cem5qSiH93cXBwcHBraGBfW1ZYW1xeWlhWVlNVU1VVVFJUVlJUU1VVUl1VVVVX
-VVNSVFVSVlNQUVNTUFBTUVVTWFNQUFJTU1VUU1BVVFZWVVlXVFZTVVdUVFZVU1VX
-VVVXVldYV1VVU1NTVVVVWVdXWFlWVVRTVFVUVVhZWllXVllbV1hWVldaWVlZV1lZ
-WFhXWF1ZWllbW1xeW1pWWFtcXFlaW11cXF1bWVhYWl1aXFxZWVxaV1RXWVxbWl1Z
-WVdaXlxcXF1dX1lcW1xcXl9fXF1eXV1eXVxeXmJlZGBdXV5gXl5eXlxgX2JhX15f
-YGFgYWFhZWVhY2VlZmVjYF5cXl9fXFtbXVxZXF1bWlhff7nM1Nzg5Obo6enr63R0
-dHJzcnBzc3V0dHd1dHBvbm5ubW9vcXBycnRzc3BwbnNwcXJ0dXN0dHZzcnB0dXR2
-e3d2dXh7enx7fHl3d3N1dnVzdXp5eXt7d3l3dnd1d3t4c3Nyd3d3dnp5e3Z1d3d5
-fH14eHh3dHV3en56eHh4dnd3dnV3enh3d3Z3dHR3d3Z1dHFxcHJ0cm9ucGxxb21v
-bGhrbGtqaWlrbmxucG9xcnJwc3J1d3l0c3NzdnVxc3R0c3JxdHV0cm9vb3Jycnh5
-fHZvbnBtb3FxcG5sb21vbm5sam1ra2hoZWRmZGViYWFjY2JhY2JeXl5eXGBeYF9c
-W1pcYF5fXVteYGFkZGJdXl5eYmFmYV9fYGBfXFtaWlpeX1xeXmBeXV5eXllVUk1F
-Qz88QUVDQENAPDg6PTs8PDs9QTxAQkNERkVESVNhb3yDhoqLiYqLi4yQkZGSk5SY
-mpiZl5qdm52akoR0bmJYVE5NTE5OT05NTUlGRkZDRUdCQ0RBQ0FERkRDQkJDQkdJ
-RUNEQkRFRUpJSU9ST1JQVFxfYV5gYV9eXFteWlpUWk9RVE9RUlRVVFRTVFBQV1hW
-V1hXVVRSTk1LSENHSkZHR0lLSklKUE5HRUlJSEpJSUtJSE9LSEtNSkdGSklLUVBM
-UlBPT09SVlJRW1pXW2FgXWBgX2BgYWJpZmVmY2JkXl5eYGFcXVxbWV1WUk9OTUtQ
-VFpcXFtaVlpiamBdXVxYWVxfXVtZVFhaWltaXVtaWFtbWFdTU1VaWFVVVllcX1lW
-WFlZV1VWVlRYW1paWFdXWVlZWFpdWVlYVlhaWllbWlhXVlhbW1lYWFdYW11cWllY
-WFdYWFpaVldSVFNSVFFUU1JRU05KSEpLS09QUlFRUVFRVFJTWVpeYmZkamZdYWFk
-aWpjYl5bWl9iYGJmY2ZqZmRkZ2ppZ2RpY2lmZGhub25ubmtqaWttbGxpamZjZWln
-aWtwcG5wcnV0eXl2eHh4enh2fnt4eHh7eX2AfXx8fX2ChIF+gIODhoiHhIWJiIWG
-hYODhISGg4SIjpCSkpOSlZeXm5ydoKSnrqupqKuvq6mloaGdnJmRjYeCfnp5dXZ3
-eHt9foB/gYGEiY+SlJOUlZeQjZCQlJOOjY+Ni4uKiYeHg4WAeXRvcHV5gIB/gI2S
-ko+LjYqNjY+SlJKUkZKSmJqVmZuWm5yenJ2empWKgXl1cXFwcWhmYmBbWVhZWFpc
-V1dWU1VVUlNVVVFRUVNTVFNSVVJVUFBYV1ZUVFNTVFNRUVJXVFFTU1JUUlRRUlJS
-U1NSUVFXVFVXV1RSVFhUU1NVVFdUVVNSU1ZVU1VWVFZVU1JWVFdXWFhXV1hZVVRV
-U1ZWVlZYWVhXV1ZXU1ZVVlRWV1lXWFhaV1ZYW1lXW1xdW1tbW1pcXFxbW1tcW1ta
-WltZWVxcXVtaWVhZWlhaW1dYWVpZWVpaXFlYWlxdXl9fXVlcXFtbWV5fXlxfXWBg
-YF1eXl9fXF1eYGBdXl5eX15eX15eYF9gYGFjZGRjYmRjY2NlZV5fX19bW1lZWlxc
-WFlbXFhWWV1/u8vV3ODk5efo6uvsdnZ0d3Z2dHNxc3NzcnJwcW9ubWxscnJ0c3Bx
-cXBxc3Vzb3F0dXNxcG9ydHNzc3N0eHV0cnR4dHd2d3p3eHd3dHZ4eXR1d3d3eHh0
-dnd4eHZ1d3l2eHZ3eHh4dnl5c3Z1c3d8e3l6enh4dnh5eXt7eXd4eHd1dnh4d3l4
-eHV2e3x5eHZ1dnV0dnJycnJubG5ycG5ta2xubmxsbmxub3BwcHV2c3J2dnd4dnh4
-eXd3dnZ0dHNzcnF1dnR0dXZ0c3V0dn59eXR0bnFvcWxxb25qbW9tcGpsbG1saGdm
-ZmlqZGVjY2ViZWNhYl5iZ2ZkY2JeYGBdXl1eXl9fXl1fYWNiXmBdYF1fY2BfYV5g
-YV5gXlpZW15dXl9dYV5cXF9ZVVBMRT86OTw+PUBCQD89O0A8Ozo6Oz5AQkE/RkNE
-RUZFTl5udn+IioyOkZGOiYyPkJGQk5aZmZqampycnp6akId8bVxUVVNTUlBQTE1O
-TURHSUREQ0NDRD9DQkFDRkZDRUVISEtFR0NDRUVGRUdKSk5MTFNUWl1dX19hXl5e
-XlpYWVZVU1BVVFNUVVRUUVRWVlhVV1ZWVFRVVVRPTlJLTEtPTkhHSUtKTlJNR0hM
-SUlKSkxLTkxISERJTk1ISEpHTFBRU1RWTk5QVFVWU1RaWlVeZGNiYV9hY2RiZ2xn
-Z2dnZWJhYWFmYl9bV1xcWllYXF1NUFFSVlpcWlhbXWRmX1hXWVdaVlpbWlhYV1pb
-WVpdXFtdWVlaV1JTWVZUV1VUVVdYWVtaWFdXWVhVVllZV1hYWltZWllaXFxaV1hX
-WFxZWFZZWFlcWltZWVhZXFtcX2FeXFlWV1haWFdXWFlZV1NQTUxOVlZSVE5MSEpN
-T09RU1JTUVJXV1VZXF9lYmNqZWNhYmRkZWZiYmRgYF1gZGhoZWRkZmVkZGZjZGZn
-ZGVjZmhqa21sbW1qZ2lpaGhnZmdoZ2psbmtpbGxycnF0dnV3enp4dnl6eXl7d3t7
-ent8eHt+gH+BfX59f4GBgYaEeoSHhISBgoSEgn+ChIWKjo+RkZGVmJqdnqChpqio
-q6mpq6qrqaajoJ2bl5SPiIN/fnh1ent6end5fYOChIeLkpWWl5qWlZeUkpCPkJGP
-jY+Qi4qJhYSBfYKAfHJydHuAgX6Ch4+Sk5WRj4+RkpWWl5aQkZCUmZqZnp6dnKCf
-nJ2alIuAeHRycG5tbGdjYFpZWlpZXVtXVFZXVVRSTk9MS05TUlhTU1NQT09KUFRX
-V1ZSVVVTVVRTWFVUU1NSVVNVVVJOUFJRUVZVU1VVVVRQUVZYU1RUU1NSUlRUVldZ
-WVpcVVVVVVVVV1ZVVFZZWldWVFdWVVJSVVhWWVlaV1RWVlZVV1ZWVVhXV1RXWFhZ
-Xl1cW1paWVdZV1hbXVxbXVpdXVxeXV5ZV1pbXV1dXVpbW1pbWlxbWllZWlxZW1xc
-XFlZXF5cX19eXl1bWVtaW15fYF9fXltdX2FfXV1dX19eX15dYGBgYWFfYF9gXWJh
-YWVkYWJnZmRkYmFiYV5hXV1eXldaW15bWlxeXl5cX3+7zNXb4OTm6Onp6+tycXd2
-dHZ4dnFwcXJxdHNycnNubXFxdHNzenRxcHFyc3JvbW9xdXNycXN1dHV2eXZ1d3V1
-dG9vcXV3e3d2d3V1d3l5end4dnN1d3Z5eXl5fHp1dXl3enh3eHd4eHl6eHd2eHh5
-d3t7enl3eXl4eXt5eXp5eXx6enh4d3l4d3h7eHp4eHd1dXd1cnBxcnJtbm5wb25v
-bW1xcG9vbm5ucW9zcnZ4dnV4dXV4dnd5dnZ5e3Z1dnZzdnV0cXV4dnZ0dXV2dXl2
-dnJ0cG9ubG1va2ttbmttbnBsa2tqam1pbGlpamhnYGFlZF9iY2NjZWNhYV9cXl5f
-Xl9fX15nYmJlZGJiX2BiX2JgYF9eYWRiX19iYl5eXFxgXmBgX2BdWldSTUdEPjs9
-PTs9QUI8PTo6QEE8OTk8PUA9QEJCRkVDQUJJVGFxeoiLi4+Qj4yQjI6QkZGTlpeZ
-l5OYmpmcnJ2elIt7b19XVVZYVVJPUE1MTElJRUJBQUJAQUdDQkhFREJFQkNEQkE/
-Q0ZDRUJDRkhITFJRTVdVWVxeZGFhYF9cWlhXV1hTUlZXU1VZUk9RU1dZW1lVU1FR
-U1JTUFBSTUtLT1JRUUxMTVJUTEhKSktPTU9LTE1OTU1ISUhNTElMS0pOT1BUW1dW
-VlZXW2FfYV9cXF5lZmFlZWVoa2tqcWxoZmpoZ2NjY2RlYFpZWlxfXFlWWVRYV1la
-X11bXFxdY2VeWFRZXFxZVlpaXFxaWVhVVlhaXFtZWFlcWldYVlZZWVVVVVRYV1ZV
-V1hYWFVWVllYV1ZWV1hYWFhbW1daWVpaW1lWWFlXWFlZWldYW11cW15dXF1bW1la
-VVhXWFlbXFdRUVRQU1BOUlJQUFBNS0pOUVBRUlBRUFFUWFZXXF5fY2hoZ2NkY2Rn
-YmJfX2BbXWNmaGlmZGFlaGNnaWVhZWZlZWJjaGdmaGhnZ2ZlaGdlZWdnZWZmZmts
-a29scHV0cXBzdnZ4eXd3d3h6eXp+e355enl6en6AgX98fH6Af4GBhISGioeGgoCE
-hYOCg4KCh4uNj5OSlJeZm5uenqGkpaisqqejpqiopKOioZ6alZORiIJ9fH56e3p6
-e3yAg4WGhouPlpmalpmZlpqVk5GQj5CRkZKPi4WGg4F8e3t9fH18fYKDhYiOj5GR
-lJSRk5GQk5OTkpKTk5eXmpqYnp6blpybmZeXj4J7c3BuamZiYV9dWVdZWVhZWlZR
-UFJTUlJSVVJTT1FSVFRUVFZVVlFTVFVWVFVWUlNVVVFTVlVWVVRVVVdUVFNQUVRV
-VlVUVVVUU1FTUVNVVlVVV1VUV1hbWFZWVlRXWVVWWFhZV1dXVFdZWVdSVFRTVlRY
-VlZXVFZWV1dVVllZWVhUVlhXWFpaWllcWVdXXVtZWlpYWVdYWlxeW1lYWVxaW1dW
-W1tZWllaV1lbW1lbW11ZV1pbXF1dWlpbW1tYWFtbW1teW1hdW1pcW11gYV9dXlxb
-W1tdX2FiX19gX11eX2BfYGJhXl9iYGFiYmNiYmBhY2VkZGFfX2BeXl5gW1pcXlxZ
-W11cXlxeervL1Nzf4+Xn5Orr7HZ6d3h3eHV1cm9vcnJ1end4eHdzdXZxcHB0dnNw
-cHFyc3V1dHN1cG51dXJ0dHZ3dnR1dnZ0dXNydXd1dHV3dnZ2enp5eXh4d3d1dHl6
-fH1+e3p8eHp9fHd4eHh1dnl4e3p3dnp6e3l6eHZ4eHt8eHl6eHh6e3t6e3t9e3h9
-e3l7ent6dnV1dXN1cnNydXZwcHF0cnJyc3Rxb25wbm1tcXV3d3h4eXd0dHZ6eHl7
-e3p7enp5dnd4eXd4dHV3eXp4dHJ0e3Z0c3JvcG9wcnFzcm9vbW5wbmxqbG1sa2pq
-aWpraGZrZWVnZWRlZWNkZWJeXV1eXF5iYWBhaWRlZmFhZ2RlZGVeWWJgXV9eXWFh
-YWJhYF5dXFtgYWNeWlxZVE9JSEFBP0A8ODs9Oz49PD89QTw9Ojs+QD9BQkJBPj5B
-R0lSXmx5gImMjpGTlJKOkpKSkpKSlJaXm5aamp2goZ+clYx+b2NVVVdXVlJRTk9N
-RkdIQ0BDQUNGQkFDRkJEQUFCREVGQkRDQEFFRUVERklJTE9QW1RZW15iX15fXlxc
-WVtbXFZVVlJRVFhWUk5SV1tcXVhWU1lVVlNTVFJOS01MTlFRTUpNUVFNS0pKSkpL
-TExMT1JPUVJPSE1RUE1NUlRWVFZYV1deW1lhZV9eYGNgYWdqbmxnaG1tbmpvb25q
-aGtpYmZnY2VkYV1aXVxZVllUVFVZXGJkXVxaWlxhXllaW1tbWldWWVdcXVlYW1lT
-U1ZWWFlbVlVXVVRTVFdWVlRVVlVTVVZUVFhWVlVVVVVYWFhYW1lYWVpYWFlaW1lY
-VFVYWFtbW1dZWFdaW1dXWFhZWVhcXVxbWFhZVVZXV1hWVFdVVFJQUlBPUE1LS05R
-UlBOT1FPUVNVVVdZXF1gZGdlYmNhYmFgX19cXlteXmFmZWVlZWVlaGlrZGNmZmZk
-ZGJiYmVmaWpnZGVkYmViZGdkY2NlZ2ltb25xcnFzcHB0dHJ1d3Z2dnZ4enh3dnp9
-fXh2d3p+e3h8f4CAf4SGiIeJi4iEgIGAgYKCgYSGiY2Qk5KUlpaXlpiboqGlpaeq
-p6inp6ekpaWjnpiVlJCMh36AeXh4eHt6fX+BgoKFio2MkJSVk5eZmJuWko+RkI6L
-jI+LhoeGhYJ6dHh6enmBh4eNjY+Qj5WVlJOVk5SWlJWVlJWYmZaYmJmXmqCgnJqa
-mJWPiHt2cG1oZGFiYl1cWFhaXVlYVVNQUVVUV1paWlpWUlBRVlRSUlNSUlVVU1JT
-VVZUU1RUU1NUVVJUVVRSUVRVVFJTUlJUUlVSUFJTVFVTVFNWVldWV1ZVVVdXVlNU
-VlZUVVVUVlZWWFhWVFVVVVhaVVdXW1pWVFVUU1dYV1hUVlZWWFtUWFxaWVpbWllY
-WVlZXFtaWmFeW1tfXVxcWldbWlpaWVpXW11bXFtaWFpaXFhVWlpYVldaWlpaWVpc
-XVxcXVtbXVlaWVpfXFtbXF1fYF9eXV5gX1xfYV9eXmFgXV1dXl9fYmFhX15eYF9g
-YmJhY2FfYWJhYF5fXV1eX2BgXFthYFpZWlpaW2B7u8vU2+Dk5ufq6urreHh5enh2
-dHFxcnF1eHZ2dXl5eHRxcHN3cXJ0dnl2c3V3eHh2dXV0cnJ1eHh3eHZ1dnZ2d3h4
-fHl3dnl4d3l6eHl7eXd7eHl4d3t6enl8fHt6e3t6e3t6eXd6enh5fHx9fXp9fH59
-enx9fHx9fHp3d3d5fH19fXx/gX98fX59e3x6eXl7eXd2dnV1c3JydHJycnNzdHR2
-cHBxc3ZzcHF3d3d4e317e3p5d3l5enp5eHp8fXx5eHp5enZ0dnh4dnd3dnh+d3Vy
-dHJydXR0c3JycW9yb21sbGtscG5raGlqa2lqZ2loZ2ZoZWNhYmJjZGBhY2FgXV9f
-Y19jY2FhYGNjYmVmZmVhYWBhYV5gYWBeYWBeYF9cXl1bYFtYVlFNSEVFQT5APj47
-Ozo6Oj1CRT4+Ozw6Ozw+PUBDQUFAQUREQ0xVYm95hYyMjpOSlZSRj4+OkJCTkZWZ
-mpqdnqKkoZ+blol4altWV1tWU1JQTlBRSUpEP0BDRERDP0NBQUNCQEVEQ0NCQkNF
-R0RER0ZIR0ZNU1BPUFdYXF1dXF5fXF1bWVxcV1ZUUlBPUlRWWFBUWVhaWVhaWFpX
-VldUUlRXU0xLS0xPU1JNT0xLTEpKSU5RTEtOUk5UWllTU1RRUlVUVVVXWVdVVF5e
-XF9oYl1iZWZna2ttbXNxc3Zzb3BtcG5rbWptbGdoaGRgX1xeWllYWFtcWlpeYWVd
-WFhcW1tZVFZaXV1aVlhZWVlaWFhZWlZWVVVXWmFeV1ZVVVVTVFJTVlZVVFRVU1VX
-V1haVlhZWlpVVVhZWFlaWFdXWFhWWFRUVFZXWFtcXlpaWVxaYVlaWFhXWltbX2Ba
-WVdXV1dWWVhWVFJQUlFQUVJPUU5LUFFQTk5PTE9UVFVXWllaWlpeYWBhYWJkXmFb
-V1dYW11bXWRmZGRoZ2llZGdpZWRiZWVnaWZkZWVoZWRlYmFhY2dlZGJkZGlpamts
-b21tb3Jwb3R2c3Ryc3N0dXN1cnV1eHt5eXd8e3Z0dXd5fYB+f4OGhoeHhoaGg4OC
-gIGDg4WGio6OjpOTkpGRmJ+fn6GjoaSmpqalpaGjo5+emZSTk5GIfnx5dnd4eXl5
-fn+Eg4KEhoqSk5OWm5ydnJmWkJORj46Lj5GKiYuNiYOAdnR6g4SCi5CQkpWSkpSU
-lZWVlZWUlZaUk5iWlpebmJKcm56inJydk42Gf3d1b2tlYmFiX1taWldYV1ZXVFRT
-VFRWV1hVVlRTVFNUUlFSVVNRU1RVVFRVVFNWV1VUVFRUU1JSUU9SU1hXVVdTVVNT
-VFdXVVZWVVdUU1JTVldZVVRXVlZUVlRVWFdUU1RTU1RTVlZYW1ZVVFRUUVhbVlVZ
-WVdVVlhWWFZTVFRWWVpZWVdYWllaW1pZWVhZWVdZXmFfW19eYl5cW1pbXltbWltc
-XFhYWVxYWVlaWllYW1paWllZWFlbWlxbW1lZWV9dXV1dW15eXlxZWV1fXl9eXl5e
-W1xeXl9dXl1dXVxdXWBhX19fW11gXF9iY2NiY2NiYmBfXF1dXFxdXl1dYV5cWlhb
-XV5dYIC7ytTc4OPl5+np6+t4d3V3dnR4dnRzc3R1dXl2dnd0dnZ4eXd3d3ZzdXV3
-dXZ3dnp6eHZ1eHZ3end0c3R2dHd4enl6e3p0d3h7d3l8e3t7fHl8enp5fX19eX18
-e36BfH19e3x6enp7eHp8enx9f3t9fnx8e3t+f4B+fXp7fX19fH5/f39+fn9+foB/
-fHh7e3t8fXl5eHVydXR0dXRycnN0cnJxcnd2eHZ2c3J1d3p8e317e3h6e3p8e3t7
-fHl8fHp7fHt7fHt5e3t3eXt8enh1dXh7d3R2dHNzc3JycnJwaW5tbWxscHBtbWxw
-cWppamloZ2ZmYmRkY2NjYmJhYWNlX19cXl9iYl9hY2JhYGFkZGNhYWFhYmRjYV9d
-X2BgX19hXFpZU09MR0RBPz8+QUFBQT86Njs6Ojw8QD0+Oz89PT1AP0NAQENFQ0VF
-SVJfb3l/iIqOkZGRjo2Pio2QkpKTk5SWm52bm56jo6GalYl5al1YV1VRUE9QTUtI
-R0VCQT4/PEBEQ0VEPUFEQ0NERkdFRkVCQUdEQ0dHS01OT1JQU1dcWlldYF9dW1db
-XV5bVFRUVFFSUlRPVFJVVVlZWVxdW1paWlhVVFZUUVBOUFNUU1NRUExJT1FTVFJO
-TVNTV1haWltZV1ZXV1xaVVVUU1VaXWBcXWdgYmRlaGppZmltcXV5eHRxbm9ycnRx
-bm5tbGhqampmY2BcXllYWV1bX2JkYl1bWVtdWlhXWVxbW1xaW1pYV1tbV1VXV1RW
-V1hcWltXWFZTVVZVVlJVVVZVVVVVWV5YWVhVVVVYWFdWVlhYWVhZWllZWFhWWVlY
-WFhYWVlZWllXWVxdXFpaWFpcW1xbWlpZWFdYW1paV1VVV1RSUE9RUlVTUlBRUVNR
-TUtLTVBSVFVaWFVWXFxcXWBlZmNiWlhVVltcXGFeYmlpZmZnZmZnaGZkZ2RiYmRk
-YmVpaWhkZWVkYmFgY2RlZmhna25rbGppb21wcG1wc3VxdHdzc3Jwb25xdHV2dXZ1
-eXx8enV0d3l4fXt/gYSEhYSIiYODg4OGgoOEgYaGi5GRkZKTkpWZnZ+cn6Gio6Wn
-pqSlpqakn5yamJaTjouFfn9/end2dHl7fYGGf4KJj5WXmpibnJeUl5qZlpSUk5GT
-lZOWk5GOioh+enyAhZWSk5KTlZiUlJSWlpCVlpOTk5WXmJeYmZyYmJyenJyfoJqS
-jYyCeXFpZWNgXV1dXFhXVlRXW1ZXVlRTUVVUVVdYV1RSUlVUUVNWVVRTVFNUU1RV
-VVRXVFVSU1VVUVNSVFRRU1NRU1haVFRTWFVXV1VVVVVXVFNUVVRTV1dUVFNVVllY
-WFhVVVVVVVNVVVVVV1ZXWFZYWFdYWVxYVVhYV1dXVlRWWFtXWVhZWFdZWlhZW1lZ
-WFdZV1laW15eW1xcW1lfYFxcXV5cXFpXWVJZV1ZZXFlZWFxbWllbWldaWFhZWFlb
-WVpXWVxeXlpbXFtbX15dXl9dX2BgXl1eX11eXltcXF1dX2FeX2BfXGJiXmJjZGBl
-Y2JgX2JgX15hYF9cXl1dXlxeXlpdW1xbXF1hgbbK1Nzg4+bo6Orq6nd4enl4enh6
-eXd0cnV3endzdnZ1dnp5eXh3eXZ3eXV2eHt8ent4eXt8eXh8fHd1dHRzdXh6fH18
-e3t8enl5eX55en2AfX17fXyAf395eH1/fXx6enp8fHx7fH19enp8fX5+fn19fH6A
-gIN+foF8e35+fH59fX19fHp9foB/gH98enp7fXx7fXp4d3Ryc3J1dHRycHFydXR0
-cnJ0dXZ2c3V3e3h3e3x7enp6fXx/fn19fn6AgX59eXt+e3t7fH17eXh2end7enp4
-enZ1dHJ1c3FvcHNtbm5vbXBxbm1wbmpramxsbGhkY2JlZWRlY2NfYGJhX19fXGBh
-YWBhYmRlY2NgY2VgY2JhYWJiY2NhX2BeXmBiYl9cWlJNS0hGQTw/PTg5Oz89PkA+
-Ozw/QT8+Qj9AQEA7PEBAREdEQUREQ0JFTFdoc32FiI2QkJOPjIiOkZKTlJOWl5iZ
-mpqcnaGlpp2Yk4dzZVtXVFJSUlNTTkxISENCRUVFQkE+P0JBQ0FAPUJFSkdFQ0NH
-QUFHRUlKTElKTFBQUlZbXmJlZWZhW1lbWllWV1lbWFZWV1NVVVZSVVVZXFpcXFxd
-W1pbW1hYVFFVWFhZV1ZUUE5QUFJTV1RVU1ZbV1JWV1lWV1pdXVtZV1ZVWVxYX2Vf
-ZGZfYGRpa21wb3JzeXV1eHh3e3d8dHJycHFxb2lqbGllYmFdW1tcXGFkZWFdW1ta
-VlhVVllZWVdYWFlZVlZcXl1aVVdYV1dYVVRWVVlcV1VTVFVUU1RYWVdVVVZVVVhW
-V1pXV1VVWVlWVVhYWVhZWVxZWFhZWlhWVVRXWlpcXVpaWFpZWVlbXVxaWVlZWFxe
-W1tZW1tXWFhUUVJTUlNUVVNTUFJUU1NOTU1NT1JUWVtZVVVXVltdXmFiX1xaWlZW
-VldYYWVkZmpqZmdqaW1qaGRiYGBjY2RoZ2dnZ2RlZGRiYGBgY2dpZ2hoZ2dqa2pr
-bm1vbHByc3NycnR0cm9vcG9wdnR2dXh4eXp6d3Z2enx7goGBg4eIh4aDg4ODgoF+
-f3+Bg4WJjY+QkZKWmZmenZ6ho6SioaKoqaOnqKeknpybm5ONiYWCf315eXh4e35+
-gX+DhYaOj5aamJeXlJeZmpmVkpWWlZWYnp6dko2Lh4J9fIGFi46QlJKUl5SamJiV
-lZKSk5OTlpSVl5ycmZiYlpafpqSgnZmQjIR8c21mZWNiYFlUVFZXWFVXV1ZVU1NU
-UlFUU1JSU1JTVFJUU1FSVFNRUFJUU1NVVFhUVFVSU1ZUUlNUVFNQVFVWVlVUVFVV
-V1ZUVVVWWlpYV1VUV1hVU1NTVVZVV1dXV1dWVlhYVVhYVlRYWFZXVldWVldZVlhW
-VlZVVFVZWFhZWVdWV1hXVldaWlpXVldZW1lZXmBYWl1bV1lZXFxaXFpaWVxcW1lZ
-WVlXV1hXXF1YWltaWlhaW1taWlpZWFZXWVlaWV1dXFpbXFxcX2BeXmBeXlpbWVtg
-YVxeWl5hYWBgYmNhYGFfXWFgYV9kZWRkYl5eXV1eYWFgXl5eXltZWFZUWV1bWVpe
-YGR/t8nU3OHj5ujq6evrfX58eHp6end3e3d1dXd3d3V2eHp5eHh3dnR2dnZ1d3l6
-e317eHl2eXt6ent7e3t4d3l4ent6d3t8fn17en59fXt7en2Afn18fXx+f39+fn1/
-fn17en2AfIF/e3h1eH58gH+Ae3x8fH2DgX9/g4J+fX19fX99f316eXt+fn+Af4B/
-fXx8f317d3d3enV4dnR3c3N0dHJzdnl1cHB0c3R1d3d4e3t5e36Afn98fn1/f4F+
-fn18fnx9goF/enp9fHt6enp7f3x/fHh2dXd3d3d0cnJzc3FvcHJzcnBzcnBtaWps
-bGxoaWlmZGRlZGNjYmBgX2BdX19fX2JhX19jYmJlZmVhX2BhZmRmZWdkYl1fXlxd
-X2JjYF1aUEpIRUNAPzw7QTw/Pz0+QEE/PD1FRD46PD1BQkA9PT9BRURDRERCQERK
-VWJrd4CIjJGPj5CNh4qPkJKPk5SVlJWYmZufoKKioaCWkYN0Z11aV1lZVVFPS0lG
-Q0RGQkZGQUBAQUE+QkJBQEJCQz9BQ0NCQ0JBSEtLR0lMS01SWF5gZGRiZGJeXFxX
-VVdcWlpXWFdWWFpZV1ZWVldYXF1dXGBiX2BeXVpaVlhbWVpZVFRTUVBRU1NYWFNV
-VVpbVVlcWllWW1xdWltbXlxfWllbZGVpamRiZmtvdXR2eHx9e32BgX55dndzc3Nz
-cW5wbm1ta2lmYl9iXl5kYmNnYlxdWlhbWFhaWFhYV1hZWFlZWllYWlpVWVhXVldX
-WFJVVlpZWVdVVlZWVVNWWFhYVVJSUVNVWlhXVVZVV1dUVFhWVlZZWVtXVVhaWFhZ
-V1dZWVpfW1pbWVpbXVxbWFlaWVpdXmBhXF5bXF9bWVZWU1JVVFdRUlFRVlNSUVNM
-TlJQUlRZWVxZWFhbXVtZWlpcW1haU1BSVlphZWRkY2dlZmVnaGZhXmBgY2NjY2Zl
-ZWRlYmBiYmBdXWJiYWNmZmZpaGdnbWxvb25wcG9wcHJzdHR2cnBrb3J0dHR0d3d2
-dnp8fHt9f3qBgoKAhYqHg4GBf39+gYF/f3+EhIaKkJOXl5mbnaCin5ygoqCgo6Wm
-paSlpqOenJualZCMhoF+fXt4enp7f319f4OGhIWFiIyRk5WYkpSYlpSUkpWamJ2j
-oZqYkImCgoSFiImOkJWSkpSVl5eWlZmamZOQlZOUk5aYmJiZm5iUlJqeoqGdmI+I
-gXlybWloYF1cXFNSU1dYV1VTU1FSUVBSUVBSVFBSU1JSVFRTVFJQUFJWVFRSU1VV
-VFVVVFNQUFVVU1JSU1JUV1ZXU1RTUlRTVFRWVFRYV1dUVFRUU1JSVVJTV1dWV1hY
-V1hYV1pZVlZVVFZVWFZUWFdXV1RVW1pUVldXVldZXlpYVlNVVFVYWVhXV1xcWFhZ
-WlxeVllbXFpcW1paWVlZWVlZWVpZWFhbWFhZWFVYWFlaW1lZV1lZW1pbWFhWV1hY
-W1xaW1xcWllaWlxfXV5dXF9gXlpdYl9cXV9cW1xiYF9hX15fYF9hYWBjYmJjZGRi
-YGFiY2BiY15eXF1eXVlYV1hZXFlaW19dYIG6y9Xc4OTm6Oro7Ox7fXx6eXp4eXp7
-fnx5e3x5enl7e3d4dHd6d3l4d3t5fH17e3t4e3t8enh5eXp8e3t7fH58e3h5en1+
-fn17gYSAgYGAfnx6fX6Bgn59f4B/gH59f35+fYCEg4GBgoB+fnx/foB/fn6Bf3+A
-goKBf4KAfn59gIKCgoGEf3x9fX9/fYGDgoR+fHt4d3p5eHZ2dnd4dHR3dXR0d3d1
-dnZ2eHl3d3d7fHt5eHyDgIF9fYGBfn18ent8fX2AfXt+e398e3x7eHp8gHx4d3V5
-eXh1dXJydHJ2dHBvbm9xdHVzcW9saWhpZmRmZmZmY2RjZGBjZWdmZGBhYF9gYmNh
-YmNjZWRmZWVlZ2VjZWRiaGRgYmFgYWJiYWBfXFFKRkRCQD0/PD09QEM/RUJFQkFA
-PTs5Ozw8QUFAPD4+QEJDRUFBQkBCREdMVmNwe4WJioqOkI6NkJCQkZGNkZSWmJqc
-nJyioaCnpZ+akYRzZVpZV1VUUE1MS0lERkRGSE5GQkZBR0NAQEBCQ0ZGRkBBQkNC
-RENFRERGTE1OUFNWXGFlZGZmY2JjX1dWXF5dWV1XU1VaW1taWVhZVlZXWl1hYGFg
-YV5bWVVWWVtdWlZZWVtbWVZWVVhVVFZWWV5cW11aW1paX11bXl1gYFxdXmFmaWpq
-aGJnbXN1dXR3dn+CfH5/e3Z2dHV0dnhzdXV1dnJsa2xmZGVhY2NjZGJjXVlfXlpb
-W1laVldXV1hWVVZXWFpYWFdZWVhaWFhZWVZXWFZXVVJWWFhWV1RVV1dVVVVXVVZZ
-WVtbXFpaWVhXVFJVVVdYVlddWllZWlpdXFtZW1xbWlhXXF5dXFlZXVlZXF5cXWJl
-Y2BbW15aWldXVFZVUU9PUFFSUlBRUUxJTU9RV1dZXFtYVVlbWVdWW1dTVVNaVFFV
-WV9jYmBhX2FiZ2ZhYmZkYGFiY2NiZGpnZGRkYGRgXF1fYWRiYWNmZ2hoaWlrbXBw
-bW9vcHJycHRzdHV0dHJycnFxcXR0dnN0eHp7fX59fXyAg4ODhISBhIN+fH+BgIB7
-fYCEhYeMkpidnJubnp6goZ+hoaOlo6KlpaKjpKGcmpmZlJGPh397eXt8enl5fHyB
-gYCAgoqKi5GUlZmZmpmXlZaYmJmbnp+dmpiOhYGDhIeNjZCRkpKSk5iZmZeYmqCa
-l5WWlpSTk5eZmZqalZeXmqKjnpyUkYqAdnNtZ2ZeWlxZWFdYVVRWV1hUVVNUU1JS
-U1NSU1JUVFVXVVVSUU5QUlRTVVNRUlJRVlNRUlFRUE9QUlNSUlNSV1VWV1VVVlpX
-VFVTUlFVVVRZWFRXWlhVVVJUVFZXVVVUV1hWVlVWWlpYVlZRVVhXV1dYVVdVVFdX
-WFlXV1dZWFdVWFRWV1hZXFdaWVtZWV5bX1lYWFpaW15dYV1aV1pcW1tbV1lbWlZX
-WVlYWVZYWVpZV1ZYWFpaV1lYWVhXWVpdWVhaWlxdXFtbWlxdXF5dXl9eXl1dXVta
-XF1cW19fXl5cYGBeYWJkYF9gYWRmY2ZkZGRkY2JiYmFfXl1eXFpYWVlYWVhWV1xf
-hL3M1Nzg5Obn6unr7H5+fHp4eXt6eHp8f317fX59enh2dXd0dHh7e3p7fHp4enx5
-ent5en19fXp3eHh3en99fHp/fH19e3p9gIGCgn5+goCBfX1+gIGAfX+Agn6BgoKA
-gIOEg4KBg4OChoKAgYGCgH6BhIOBf4GDhIGEiIZ+fn+Dh4SBhYiHg4GAfn5+gYKC
-f4B/fXt4eXh5eHl2dHR3dXVydnRzdHR4eHp4dnt9f39+fHqBgYSDgoSDgX5+fn1+
-f39+fHx8fn99fn5+fn13d3d5eX5+eXh4d3R3dnV0eXZ3dnd1cnJ1dHJxcGpra2lr
-amdpaGFhZmdkZ2NiXV5fYF9lY2BiYmJlZ2VkYmhoaGZmZ2VmZWJkY2RmZWJeX15e
-XVlSTEtHQkA/PkA+QDw+PkBCQTo8Pj48PDw8Oz49PT89Ozs9PkFAPD9BQ0NIS0tT
-X2l0gISJjI2MjouPjpKSkZKSk5WSmJuenKCfoaGko5+VjIN0ZF5ZWFVVUU5OSktJ
-R0dJSkpGRkFCQT9APz5AR0ZFQkBBQ0JDRENERkZJUFBTU1ZaYWFiZGRmZ2ZiX1pb
-XV5ZWlZSU1daW1xaWllZV1laXFxeXF9fXV1dWlpbXWBcXFdaXl9dYmBdWFxYV1Va
-X15eXl9eW19iWltfZGJkZmZkZWRobG1oZGVob3F0cnR6f359enx5eXh3d3d1c3R2
-d3x3cHFtbmlmaGtnZ2ZlY2JgWltcXFpaXFlXWldaWFhXWVlWVFdaWlNXW1dVV1ld
-WVZWV1lZVlRWWVpaWFdXV1dUWFlXV1taW1dYV1dVVVRWVlZVVldXVVhaWVlZWVpZ
-WlxaWVlYWFRaW1pYWVpZXF9jXmFiYWVkX19dW1tcWFhWU05PTU1QVFNWUE9NSElN
-TU9RVVhZWltXU1RWVFJWVFFRV1dUVVRYYWFgX19dW19hYmZlZmloaGRmZGVlaWdk
-X19fYGFjX19gZWVkZGZnaG5tbGxscnBubm5xdHR1eHVwcW5vcHN1c3R2dnZ3dnd2
-eXt7e3yBgYGBgYGEhYeJhIKCg4ODgn99gICGiYyRmJeamJmeoKKioqKlpKakoaGi
-o6Wmop6cnJmXkY2JhXx5eXt6d3h4e36CgYSAgoiOkZOXl5eZmpmYmZycnZ6hoaCe
-lY2Ig4OKjJCQkpSVk5WUl5aWmZ+cmpqZl5WXlZSXnJmZmpaQlJqeop+cnJyQjIN4
-cm9lYWBZW1tZVVRTV1dXV1dXUlRSVFRST1NYV1NTVlpVVVRSUlBRVFVSVFJPUVJT
-UlJUUVFQUVNSUVFRU1NVWFVTUlJSU1NSU1RVV1JSU1BTVlhXV1dUVFNUVVlXVFVW
-WFZWVlNUVFVUVVhVWFddWFdXVVRYWlhWWVdVVldYVlhYW1dXVlZYWVhaW11fWllc
-VldWWFhaX1tcYFxZWFlZWVlZVldWVVRWVFRUV1hXV1ZYV1hYWFlaW11bV1dXWFhc
-W1xdXFxbX19bXltaW15aWl5dXVtbWllbWltbXFxaXV1fX2FfX2JfX2FjZGRjY2Jg
-YGNjYmBiYmJfXlxdXFlbW1lZWVdbWmKHvMvU3OHj5ufo6Ovrf316fHx6eXx/e317
-eHl6fn17eXt6end3dHl7e4B9ent7e396enp6fXt8f39/fnt8fYGAgX59fX5+f4B+
-gIKAhIKCgYCCg4B/gX+AgoKAf4CBgoF8gIKDg3+Bf4KBgYSCgYSAgIOBgX5/goGB
-gYCDhoeHhYWDgIKDg4WGhYKBgIJ4fIKAgoF/fXx4eHd0dnV2d3h5eXp4cnNzdnV2
-eXl7fXx+fX+Hh4KBgIKEgoF/f4J/fXyAf3x+fn6BgoB+fH97eXp6fH18fHx7eHd4
-dXh8eXd2dHd6eXd1dnV1dW9vcG1tamhoZ2ZhY2VkZGJjYF5fX2FhYWJgaGVlZmdm
-ZmZmZ2VlaWZiZGZlZGJkYWBeYGBgXlpYU01HRUE9PTxDPEBBRD4/QT87PD09QEA7
-PD06PDs9PDw8OkE7PT9APj9BQ0NIS05bZ3J/homPkY6Qj4yRlJOTlJWUkY+XmJiZ
-mpyeoaGko52WjYF1Z1xUUk9QUlBPSEZHR0tIRUNGSENBP0FEPz5DRUhGQUE9QURF
-QkRIS0tLS09VVVpfY2RkZWRiY2llWlpbW1taWFhVV1haWlxeYF9eWlxdXF9gYmJk
-YV9jYmFeYWBfWlhdX1ldZGBcW2FeXF5iXV9iYGJjZmFhY2NpaWhtbmxlZ2draGlp
-Z2pobHR0cnN1d3d3d3h9enZ3end3fHp6en16dXFwb2xva21ra2hnY15dWltbWllX
-VVZXWFlaWVhVVVdWWlhZWVZXWFxYV1VSUVNWWFhXVFJVVFdWUlFWV1ZXVldVVVZX
-WVdXVVVVWFhWV1lYVVdbXFlaWltcWFdZXVtWWVhYWFVYWlpbWlteX2NfXl1dXl5h
-YV9dW1xeWldTUk1PUFJWVFRQUEtKSktNUFBRVVVYWFdaVFNUVFNRT01PUFBSVVdc
-Xl5fXFpXWF5gZWRjZmpoZWNkZWZmaGZiYl1fXFpeX2FmaGZlZWVjZWhlZ2xqa29u
-cXJwcXN0cHFzd3Btb3JxcnN0dnR4dXd5enp9fnt8fX+EhIeEg4aKiYeDhISFg4CB
-gYKFiouPkpeYl5mdoaOko6SlpaSioKCioJ+fn5+cmZiUjoqFg315eHh6enx5fICC
-g4WFhomMkJSXm5ucnJyblpqin56cnJqXjoeGhYiPjpCSkJGUlZWTlpeZmpeYm5uY
-lZKRlpqZmZ6alpGRlZ2fnp+bmpGIgXlxbGRfW11bV1dVVVRWW1lYVFZUVFNTUlRS
-UFNVVlJRU1VUVFNTUlFSVFNTUlFSU1FQUFNSU1NSVFNSUlFUVVZST05TVlRTUlVR
-U1VUUVRVVlNTVlZYWFZWVFVWWFdXVlRTVVhXVVRWWFhWVVlWV1dWV1VXVFZaW1hW
-V1ZYVlZVVldZWFRWVVlaW1xZWltWWVxdWllYV1hYWFpdXFlZWltdW1xZVFZWUlRW
-VFZVV1ZWVlpdWlpYV1RXW1tZWFhZWV1dWVtZXFtdXV1dW1tbW1pfXVxaWFpfXlld
-X19aXV5cXVxcXmFgYWFgYmNiYWJiYWNiYWFgZGBhYGBeXF5eX11cWFpaW1tbYoK4
-ytTb4OPl5+jp6up+enp8fH58f398eXl4eHZ4eX6BgX99fnx6d3t+f358fX18eXt4
-eXx8f4CCf36BgYCAgYB+gIF/gYGAgXl6e31+gIB/fn+CgoGChYSDhoSDgn+CgX9/
-gYOBgX99foKFgoCCg4KChIB/f4F+goSEg4SDhIqJhoWHhIGBgoGChYKEgoGAgYSB
-g4F+fHp2eXl7endzc3R1c3J1dHR1dXZ7fn+AfHx9fYKDgn5/gH+DhIB/gIKEf319
-gH59goCCgX6Afnx8fn17fH16eHp8eHp7fHl5dnh6d3V1eXl1cnF1dHNwamxsbmxq
-ZmhnZmVjYV5eYWFeYWNjYFxgZWRlZWZkZmhmY2NlY2JiZGRkYmFbX11bYFxaVlBL
-SEI8Oz9APzw8PEBDRkVEPkI+PEE/PD9APj09PkA6PDs7PkBAPT0+Q0NDRUhLTFNg
-bXeDhoaMkpKPj4+SlpWYlpWUlpSRlJaXmZ2goKOnpqWZkIJwYFlUVFVVUE1KRkhH
-SEhEQDxER0ZCQEJEQkNERUVDQUBBQ0NDQ0ZKS1BUTlBUWl5iYmBlZWVjZWJcWF9b
-XFtWV1VXWVlbW1xiYWJhXGBgXmBkY2NkY2NjZGNkZGBiX11dXV9lYmNiYWBjYmNe
-ZWNiZ2hpYmFjZ2dmZGZrcGtmZWdmbWtsbXFzc3N0eXp8fXt7fHx7en14eHp9f399
-fX58eHFtcnNzcXFta2tpXVlaXF5cW1RYWlZWV1hbWlZWV1lZWFlZWFhaWVdWVVVW
-VVVXVFdYWlZVVFVTVFNVVldXV1VWVVRVVldYV1pcW1xZWVlWV1hcW1pYWVpaV1ld
-XFpYWl9ZVlVaXFtZWlxfYWRgXl1hYmJfYWFeW1pZVlRQUFJTVFZUU1JOSU1NTE1N
-TVBTV1ZSVFZWVlBTVFVTT0tOT1BRVVxfXFtbWVhVV1xjZGNkYmdpZWVoZmZoZWJf
-XlteXV9jZGVjZGVnZ2dlZmRmZmhpbG9vcHFwb3Fxb3Bwb3FvcW9xdXZzc3N2dXd4
-eHl8e3x7foKFhYOAgoiIg4OGg4WEgH5/goGHiY+SmJiXmZucm5ygnKGkoqKhoZ+e
-nJ6fo56amZSUj4uHg4B6e3p9e3p+g3+BhoaChIiLkJCWmZyhoZ6enaGkop6ak5CM
-jImMlJOTkpCSlZaVlpaWl5iWk5SUlZiXlJSXlpqam5ualJWXnqOknpyXkoV9d3Bp
-Z2JcWVZXVFVYWFhXV1hWVlhTVFNSUFBRU1NTUFBTU1NRUFBRU1JTU1RVVFNQTk9Q
-U1RVVlZXVVNUUlJUVVRWVFFSVVVVUVFSVFNTU1lVVlVTUlVVV1VUVlVUV1hXVVVU
-VVdZV1VTU1RZW1ZTVlRVU1ZZWlhaV1RYWVNSVltYWVlZWFZZWFhbWFhWVFpbXF9a
-W1pcW1xZW1pXWFtbW1pZWVtcW1lYVllYWFVXVVZWVllZWVdXV1laWFtZWFpbWlla
-Xl1aWltgXV1bXFxbX1tcXFxdXF9eW1tdXl5eYV5eX2BeW2BgY19gYmFhX2BfYmFi
-YWFhX19eXF5dXV1dX1xcWllbXVxif7XJ09vg4+bn5+jq6X98eXh6fX9+eXqAe3t6
-fX18fn9/e3t9f319f356fX19e3p4eXZ6enp7fYKAgIF9gIKDgX6AgYSCfYB+f359
-foB/fX1/gYKDhIODhoSBgoSAgIGBgoKBg4SDg4GCg4OAhIaDhoSEgoODhIWEiIWE
-hYWHiYODgoSEhYOEhYODhYWFhISAf35+foB+enp6eXp9eHV1c3R1d3d0dXR3eXp9
-fXx6fH9+gIGDhYSCf4OCgoF/gYaCf4B+f35+fX6Af35+f317e35/fHt6eXx+e3p6
-ent8enl4eHl5dnJycnN0cnBycW1tbGlqamdlZmNfYWRiY2VlY2NiYmdiY2RkZGRm
-aWdnZmRmZGNiX19fXV5cXFtVVFJQTEZFQUBCPUFARUBAQUFFRUJCQEA/PDw8PD08
-O0dJPT48PD4+PEBBPT8/RUZFSUhLU1lmcH2EhIWPkZKPjpKSlpWUlJKTk5KRlpiY
-l5yeoaWnqJ6VjoJyYVlXVVJPSEhIS0dCRkRBPT5HSkM9QUNBQUFGSUZFR0RDREZG
-RURLS1FOUlJTWl1gX2BgYmRjXVtaWlhaW1tTWFlZWF5gYF9hYWVkYWFhZmplYmRl
-Y2ZiZGRkY2llYGFdXl9gYmNpZmJhZ2NkZWRpa2xrZGZpamhiZGpsamdianF1dm9v
-cXRzdHp6eoGCgIB/fHt6fICAgYSGhX5+enp7fHt5d3dzc3BvbmdeW11cW11XV1dX
-WFhXWF5bW1paV1hYWlhWWFZXWFVYWFdYVFNTUFFVWFhXVFRTU1ZWVFVZV1hWVlZV
-WVhcWlhZWVlVWFdYWlpZWVVXVVdWV1haW1lXVVRWVlhZW15eXF1eYWNlY2JlZmdh
-X15aWlhTUFFNTlJUVVZYVFVRUE9NSUtJS05PU1JRUVNTTk9TVVRRTkxNUllaWldW
-XF9ZXFhXWl1fXV5dYmRlZ2ZlZGJeXl1aXl1hYmFhYWRmZ2hnaGhoamRlaWhnampo
-a25tbWxucHBvcGxtb29wc3NzdXt5d3l4dHp8fH2CgICBgYKCh4aFhYiFhISBf36A
-g4aLjZGTlJOZmZeZmp+dnqKko6Gfn6CdoqWioaGfmZKOiYiAfn57fXp/iH98fICC
-hIeHh4iNjo+Wm6Gip6SioqKgmpiRjImJjZGVmJWSkZSWlZeal5aWl5WTlpWXmJeX
-lpWXmZiZnJeTk5SanJ6fmpSMiIB2cGpjXltZXllYVlhYV1RUVlZSVVZWWFdWVFdT
-VFBRUVBTVFJWUFBPTVFVV1hYVVZUU1JSV1NTU1ZVVFRRU1NTUlRUU1RXV1VTVVRV
-VFVTU1VTUVNXVlhWV1ZUVldVVVVVVFZUVFVVVFNUU1ZWVlNRT1FUVldXWFdXWlVV
-VldYV1lWV1haWldWVldWW1xaWVxaWl5YWFxdWltbWVpcWVpYV1hZWltYV1hbVFZa
-W1pXWVpYVldaXFhbWFlcV1dXW1pZV1pbXFtbWlxaW1hbXF1eXVxbWltfXl9eXF5c
-XV5dXV1fYGNhXV9hYGJiX15gYGFiYGFjYWNeXVxbWlxdXV9bWVpbWlxdW2aEtsrU
-2t/i5Obn5+jpf3t6e399f3p9fX+Afn1/fYKAfHl9fYCCgH5/fYF/fX58fHp7fX57
-fH99gYKCfn6BgoCAgn+Agn5/gH5/fX1/fn19foGAhIWDhIKFg4KAhIeDg4F+g4KA
-g4SDg4OCgIKBgISEgoCDg4OEg4WDhISEgYSCg4aCg4OFgYSHiIWHhoaFhYaDgHx7
-fX5/fHp6eXp4cXR1dnV3d3h6d3V8ent6e31+gIGBgYODgH6AgIGFhIJ/gIGCgH+D
-hIJ+f4KDgYKAgoOGgH18fn58eXl5fHp5enl5dnd3eXl1dHFxcXNzc3RydXFwbGlo
-amllZWViYmNjZGJjYWNnY2VkZGBhZWNkZGZmYmRmYV5dXlxYWltWUU5NTEpJSEVD
-Qj9CQUJDQ0NAQUJAQkFDQkI7Oz9AOTk9VFE7Oz4+PD9CP0RCPj4/RENDR0lOVmJr
-dYGFiIuNkI+PkJKUlJKTk5GSlJOUlpaVmpqfoqajnJqZjoNxZFtVU09PSktISEdE
-RkZEQUZMRD1AQkRCQUJGRkZFQkNGSEtIRkhITEtKUVNSWV9iYF1fYWBeWVxeWFpY
-VlVZW1xYW2JiX2BiY2BfZGNnaWdkZGZlY2JjZWhta2xqZWJfZmRiZGZhZWlrYV5k
-aW1ubWtnbW5va2hlamttbmxzdXF0eHVxd3p6f4B+hoOBgoCAgX99gYKFh4aEgX15
-eX1+gH1+eXV0c3BuZGFdXVtYWFhYWVZUV1laWltZX1lZWFpcWVlXWVZXV1dYWFlU
-UFFTVFNWWVZVVlRVWVZVU1NWVlhWWVtaWFdUV1haWl1bWVlbWl1cW1dWV1lZV1la
-V1ZWU1hZXFpeXFxbXlxfY2FiYF9kYWFfYGVYWFhST1FOVVdTVFRWU1JNTUtKSklK
-S1JSVFFQU1BQTE1RUk9NS05SVVVZWlhYWFtaWVZZXV5eXlxaYGRlZWdlX2BdXl5f
-Yl5fXl9eYGRjZmlmZmdnZ2dlYmRlaGpubm9taWxucG9ub25sb3FwcXJycnR3e3x4
-d3p6fX1+foKBhIiIiYyHhIKDgoB/gYSBg4iMjY6QkpaWlpebnZ6hn6Cfn5yfoaWi
-pKejoJ2XlZGNi4mDfXl7fIGBfXx+f4KGiYqLioyQk5ean6GlpKWnn52Yko6KiYiM
-kZaZmZeVlJmYlpWUl5mTkZWZmJaTk5OXlpudn52ZlJOSlJmcnqCalZGKhHxzbmhh
-XF5fW1dYVVVVVFNWVlJSU1ZVVFZVU1JSVFROUVNUUlJRUVBTU1RYVVJTVFVSUk9S
-UFNTVFJRUVRTVVVXU1VUU1JSVVdWVlRUWFRTU1JTVVZWVVZWV1dXVVRVWFZTU1NU
-VlVTVldXV1VRUlNSVVVXWVhYWVpYVVdaV1ZXWFpYWFlXWFhcWlpaWlpWWFpZXV1V
-V1hYW1xbXlxbWFlYWllaWVhYWFdWVFZYWVpaV1hYWFhZWV5ZVVlZV1paXFtZWFxe
-W1pZVlhZXFtbXFtcW1taW1xbX2BdXV9eXVxfXmBeX2BiY2VfXFtcYGFiYGFiYV9g
-X11eXFxbWVpbYllaV1laWF1gZn64ytTc3+Lk5ufo6Ol+e3x6fH+AgH+CgoCAfH1/
-gYB+gnx9fHx/gH2Bfn5/gIB+f3x+fn5+fn+Ag359foGDhYGCg4SAgH+BgYCBfoGA
-gYGDgIJ+goWEhIKBgYGEgoB+dm1/goSAg4KAgYKBgX+BhIGBhoSEiYh/f4SGhYaF
-hYaGiYKDgIKDgYKEhYSFhYWEg39+f359gH58fHp6d3N4eHd2d3d2eHZ4eHh4en2A
-fX2AgoCBg4eCgoCDg4aBgYCAgYOBhICAfn+AgoWCg4B/goKBgYR/gYB/fH57enp7
-fXp4enp3d3dycXNzdHV0cW5wcm5saWlqaGlmZGJkYmVoaGdlZGRiaGpjZGJjY2Vj
-Y2BlYmBfXFlYUk9QUlFOSEdGRUVFQ0RBRUNDPEFBQ0I9QD86PkA6PT4+PDo4Ojo5
-Ozo/Pzw7PT08QkA/QEFDQ0VFR0xQWmZve4SGiouPjoyPkJGTk5aYlZKSkZSWlZWY
-oaSko6OhoZ+XjX5vYVhTVFFNTkhJQ0NEREdEQkJEPz1CQ0FERUVHR0RDQ0JCRUVJ
-SE9LTkpOT1FVXl5eX15dX19fW1xZU1FVWFZbXF1eX15hYWJjYmNoaGZoZ2VlaGVl
-YWFnbG9xcXBsZ2VlZGhnaGZocGpnZ250bmloaWpsdXNxb2xtbG1ydHdybXh6fHV1
-en19f4KDhYWCgYeFh4aIiIeIhoWBgX98fnx/fX18eXV0b2xpYV1ZWFdZXl1bWlZZ
-WlpXW1laXFpXV1hXWVpZWVlaWFdVWVdWVFNVVFdYWFVTVFNUV1hXV1hbW1lWWFtb
-WFlbVlRYWlpZWVpbWlpZV1VYW1taWlZVWVlWV1hZV1xeX15bW11hYWJfY19iYF1h
-c1taVVFPT09QV1VZWFJSUk5OS0hKTElNTVJSUFJST0xMTE9QS0dLS1BPUlZaWFdU
-WFhXVllcYV1bXl5bX2FkYWJhYGBgYmViYGBeYWZnaGdoZ2hnaGhnZ2ZnamVqbWxw
-cWxtbG5vb21tb29vcXBvcXNzdnZ0d3l5enl4en19foGFh4WIhISFgoOCgICAgoGB
-gomMj5CSlZmZm52eoJ+enp6foaChnZ2ampuZmJqWlI6Hg399eXl7fXx4fX6BgYWF
-hImKjJGWmZybnKKkp6SemJSPjIyMjI+UmJiamZeZmJiYmJiOlZSVlJeTkpSTlpmc
-nJuanJuVk5mZmZ6fnpmSj4uFem5sZV5cW1hYVFNRU1ZXVlZUVVRTU1NUVlZXU1RT
-VFJPUFNTUlFTUE9PU1RXV1hWU1JSU1FRUVNSV1VSVlNRUlRUUlNTU1FWWFhVUlJT
-VVZUUVFSVFZYWFdbVlVWVFNWVVZSUlVVVFVUUVJWVFZUVFhUU1VVV1hWVllZWlha
-WFlcWlxYWFpXV1hZWlxaWlhWVlxcXFhXW1paXV5hXl1aXFpXVlpcWllaWlxbWFpZ
-WVtaVVdXWVlbWlhaW15bXFxYWVpcW1lfXVlaWllbXFtaXFxcXVpZXFxdW15dXVxf
-W1xgYV5eYGJfX2BeXF1gYWBfZGJiY2FfXF9fXVxbW1taWlhbWFdYVllghLvL1Nvf
-4+Xm6Ojq6YB/fX18fn+EhYODgYKCf35+fn6Af3t8gIGCgIB+gIB+e35+gHx/f39/
-gH1+fH9+gYCAgICDgoB+gH2Bg4OAgoOCgICDf4CDg4KDgoKBf4KBgX9+fH+AgoGB
-hIGDg4KChYWEgH+CgH6Ef36Bg4aDg4eFhYaFiouEhIOCgYSFhYSDhYaDgX5/f4F9
-f318eXt4d3V0d3Z3d3R4eXp8eXh5fYCBgYB+gIGGhYWGhIWDgYR/g4GDhIKBgH9/
-hoODgIB+foCCgoCEg357f357fH9/fHx7ent4dnp6d3h2c3J0dnZ2cm5raGhqaWpo
-Z2VlZ2NjZGVlYmRoZ2hqZWRkaGNiZWJdW1pYVVZWUE1LR0hIQkRDQ0NBREZERENE
-RURGRUVCQ0A+PUA/Pz5BPT9APDw9QUE6Pz8/PEM+PT8/QEI9PUBCR0lITVJZYm56
-homKjo+Oj5KRk5OSkpOWkpCRkpSanpqbnqCjoqKfn56YjnxvXVZRT1RPTUdIRUVG
-RUZAQ0ZEQkBDQkJCR0dGRkFAQkBCREZHRkVISEtNUVFSVVtfX1tcX19YVlhYVlVV
-WFdVVVdaXlxcXmBgYmZmZ2lkZmttbGZmaWlxcXNwbm1sZmdqbG5qamltb25ucnVy
-cXJycW5uc3N0dnh0c3h9e3dwe3t5fX17e3p5e3+EiIiKioiJjY6Pj4yIiIKCg4OA
-fn+CgH1/fHZybWVeW1pXWVdYV1hYWFtdXFpWWFhZWFlZWFtbWFhXWVtXVFdXVVhW
-WlhZWldWUlNUVlVVV15bV1dXV1dXWF5cWFhXW1hbWVdYWVdWVVRWVlhZWldZWFlb
-WlpYV1haWFpaV1leYGFgYWBgYlxiXlxbXVpZVE9OU1dUWllXVVFNTExIRkdGS0xL
-TkxOS0xMTkhKTEtISUpJS01SVFRUVFhYVFJWVltdWltdWllYWlhZW1pdYVxfX19i
-YWJkYmJhY2RlZGRnZWVnaWhoZ2ttcHFubm1udHFva2prbWxtcXJub3ByeXZ2dXh5
-dXd8fH1/goOEg4SEgoKDhIODg4aBgoKCg4aMk4+QmJqdn6SdnJ2fnZudn5uampqc
-mJaXlZeWkY6IhIJ7enp6eXh7gH5+goKBho2Qk5WZm52cmZ6cm5eTjIqKio6PlJeX
-mpyalpmZlpSSk5SVlZWUk5GPkJWYnZycnJubmJSVl5qanZ6dmZSPjIqAc2tkXl1a
-VVJUVFhbXFhWVVZTUlNUUVJTUlJSU1RVVlVSUlJUUlBSUVRRVFNXVFdUVVdVVFFR
-UVVTVlNXU1BPVFRQU1JWV1RXVFRVVFNSUlNSVFNSUlRUV1dWVFVWUFJVV1dTVVVU
-VVNUU1RWVlZWVVVWU1NUVFVWWVZYW1xZVlhYVlhYWFhZWVlXV1dYWlhZWlxcXVpY
-WVpcXVxbWV5dXFpZV1dZWVxeXVtXWFtaWVlVV1RVWFdUVlVZW1xdX1taWlpXV1he
-XVpcXFpaXFpaWFxfYVtdXV9eX2FfXV9bXV5cXF1fX2BjYmBdX2JjY2JhYWBhYl5f
-YF9cXVxZW11cWllZW1lXWWKJuszV29/j5efp6Onqf4F+fXx/fn+BgX5/f4GCg358
-fX18en6CgoKBf4CBgH1+fHt7foCBgoJ/fn+BgIF/fn+CgoODgoF/gIKDgYKChIGB
-gYOEgoOCg4OChIh/f4F/gIGCgYKAf4KEgoGBgYB+gYOEhYCBhIWBgoOGg4aFg4SD
-goOJgYCEhYOEhYaFgoWFhISBfoGBgoB/fnx8d3l5eHhydHh3e3Z5e3l7e3t6gICA
-f4CCgoKAg4iBg4N/g4OBgoGAe3yBf4CCg4KCgoCCg4WAgXx5fH2BgX9+fX19fH17
-en55dnZ2d3d1cXJ0c3FwbW1rbGtpa2hmZmdlZGVkY2NiY2RmZ2VkZGZjX2BeWVhW
-UVFQTEpKRUdGQj49QERCQ0ZERUFFRUZFSUlJRUFAQUFCQz5AQUFBQD0/QEBAQEA/
-PUA/PEBBQEJBQUBBQEJDRkdITFFdZm57hIuRj5OUkpOUlJWSlpeVkZGUmJeYl5eY
-nJ2eoaSopKGYjIFvX1NSUFNQTUVDSElFQ0dBRUNGQUJFQ0RERUJAQUNEREZGRkVE
-Q0ZKS0tNT1BUWFteWl5fW1dVUlZWVFVVUlRVVFVWW1xeYmFiY2NoaWVjaGxqaGpo
-bW5wb21ramtramdrbGpqa21tZWVsbXJ5dnNycG9zdnp/fn1xc3l5dnV7e3t/goJ/
-gHx9gIeIi4uLi4uMjouLjIyKhoiKhX+AgoB+g358enZsYFtbWlpbWVpXWFdXWlhY
-WFdVVllXVlZUVFZYWVhYWllZWlpWVlpaWVdXV1lZWFVYVVVVWFhZWFpXWFdXV1pY
-V1paWFlZW1paWVhXV1tfW1pXV1hYWVlXV1ZYVlZbWlZZXFteX15eW15fXFxaW1pW
-VFNRUk5SVFZcWlhWUVBLSUVDREVHSUpKTExISEtMRkdMTEtLTk1KSk1RT1FUVVRT
-U1RWW1tZWFlXWFdcXVtWVVleX15eX1xgYGVlX2BiYWJkZGJiYmdoaWdoaWtubW5q
-am1vb29wb3Bsa2psb3FzcnVzc3V3end4e3p/f4KEg4OBhIOCg4KHhYWGhIKCgIOF
-iIuQkJKanqCho5ydn56eoqGfmZydoJyamp2cmpaVlI+Jh4F/enl8fX18fYGCg4KE
-i46OkpOWmpuYlpmUjouIiIiKkJWWmp2dm5ubnZmZmJWTkpSVk5GSkZOWl5qam52d
-n5yXlZKVnJ+gnZuZk46JhX1uaWVjXVlXWFtbWVxbXFpXVVRTVldTUFFRUFFRVFNS
-VFBRU1RTUE9RV1dTT1FTUFRWU1dXVFJQU1NTVFJUUlBWVVhWU1BRUVRTU1ZWU1JT
-UFVWUFJRVFZWVlVWV1ZZVVlXV1hVVVRXVFdUUlVVW1hZWVpWU1VVVlZUVVdXWFpW
-WFlWVllUVVpcWVlZWFdcYVtZWV1cW1pZWFpaWFtjZF5eXVtYV1ZbXVpZU1RXVlZW
-VldWUlZVVlpZW1laXFpdW1xdWVpbW1xeYGBcWl1eXFpbXF9eYF1cX1xeXF9eXF5e
-X15eXVxdYGBdXl5gYWBfYF5fYWFgYV5bXFxfXltZWFpXWVlaXVpcYoq9zNXd4OPm
-5+np6+x/gIGBf4B/hIaBfn5/fn9+f4CAf317f318gH6AgIKAfnp/gICBg4KDhYKD
-goaCf4B+gIGCgYOCgoJ/goKAhIKDhISEgoSDgoKBgIF+f4F/fn5/gIKBg4GDg4OE
-g4OBgYOAgICBg4WFg4aEhISEh4OChYWDgH+Fh4WHh4mGhoeIh4SDhIF/f4OAgoB/
-gH14dXl7fHd3d3d2eXh4eHp9fH58fH2BgoCCgoaDgoKDgoB/goJ/gH+Ag4GDgYCC
-hYKDhIaCgoF9gYKEgYB/gHx7ent8fX18e3l7d3N1c3V2cnFwcW9sbG9tbWpqaGdq
-ZWJlZGRlZWRmZ2ZmZ2RiYWBeWltXUk9PT0xJRUVFQEJAQ0JDQ0NDSEdHRUNHRENE
-R0ZGREJAP0A6PDxBQT5AQEE+Pj49PkA+PTs9Pz9APD9CP0FCREZHSFRMVFRgb3R+
-h4iPkI6PlZWUlZaUlpSTk5KSlpWWlZabnZ+kpqikopyXjX5xYFFNUFBRTkhGTEhF
-RUZHR0JBQkVHRkVDQEJCRD9JQUdHRUdFQ0VKTU1OUFJZXF1cXV9cWV1bWFlbWltZ
-WVlYVFtfXl1bXGBeYWNmZmhoa2xpaGVnaW5ubWtqbGpwbmdkZGhqcGxkZWptcHN1
-c3N0dXZ1dnx/fHN4eHd2eHp5fYKAgIOFhoeKiomJjY2MjY2PkJOPj4uJh4mHg4OE
-g4iGgIB9dWtfWVxbXFxdXFZWWFhZV1dWVlRUVldZV1haWVpZWFZVU1RUWVpYV1da
-V1NXVFVUV1hXVldXWVpcWVZWWFdZWFhXV1hVVVlVVVdYV1ZXWVlaWVpXV1ZWWFpZ
-VVlaWVpYXFlYYGFfX2JfXVpXVFZYV1VPUlRST1FVV1xeWldUU09IQ0NBRktJSkxN
-Tk5HSEhERUhJSklOTU1OTVNUUFBTUVBPUFBYXFlUVVVVVVdYVVVVWFdZWlpcXV5h
-YGBjYWRiY2RkX19hZWNiZmlqbGtqbnBub29wbm1wbmpubnFxc3d3d3Vxc3Z6fHl5
-fH+Afn9/g4aGhYaFhoaJh4GBgX9+f4KIkJCUlpaZnJ+bm5qenaCen5yanZ2cm5ye
-nZycmpaUkI2Hg4F+e3p8fHt/gYGBhIWIiY+Qj5KUmp2alpGLi4mEh4yTlpaXnJ2e
-npubnJeYlpKQkZSVkpSVlpiZm5uYnJuYl5OQkZWdoZ6fnpqUj4uHeW5mYmFfW1lW
-WFhbW1pVV1dUVVRRUVJTT1BPUFBSUlJRUlNSUlRVVFRSUlRST0tNUFJTU1JQVVZV
-UlBUUlNWU09RU1JQT1BRT1NVVVVUVVZXVlRVVFRUVVVUU1VUUVJSVFVUVFNXWVZX
-VlFSVVpYWlpXVltZVlVTU1RTVFRWV1pYWltaWFlZWFpZWFlaW1pZWFhYWVxdWFhX
-V1hZXWJgXVtaV1ZXVVhaWVdaVlZYVldWWFZVVlhbXVxdXlteW1xbW1lcXF9dWFpe
-XlxbWllaW1xdW19cWFpbWlxgYF5eXlpaXFxeX1tdXl5eYF5fX1xeYV9gX19dYV9c
-X15fWlpYWFlYWlxaWl5hibvM1Nzg4+bo6enr6oKEiIeEgYKAgYKCgIJ/fX19f4GB
-f317fHx7fn58fn1+gH9+fX98foCBgH99eYJ7fYGBf4KCgYSCgISDgYGDhIaFgoGE
-g4GCgoGBfoCBgYGBgoOChYiCgIGAgoKAg39/goOCgYKDhoeGhoaHgIGEgomGhoSD
-goaIh4iKh4WGg4OGhYSBhoSBgYF+fHt6dHt4eXd5d3d3d3l4eXp6eXp6fn19fX6C
-hYGBgoWFh4eEh4aDf4CDhYWFhISBgIKDg4KCgYCAfoCAg4KAgIF/fHl8enl7enp6
-eHh0c3R0dXZyc3FtcW5ra21sbGxqaGZmamllZGVmZ2VlaGVkZGBfXFZUUE9JSkpK
-RkNCREVGPz4+Pz9BQkVJSUhKRkZGRENDREZGRUM9PkI/PT0/Pz4+Pjw8PT1COjw8
-Ojw8Pjw+PT4+QkNCQ0ZHSUpKVFplcXqBiI+Qk5WVlJWUl5eVlJOUkpOSk5OUlZqb
-m56kpaempaWbkIJvW1NLSUxMS09PSUNERkhIQkFBSEZBPkBEQ0RAPEE/Q0RGRkJE
-RkdKTEtNUFZaW15jX19gXV5cWF1dWVtYW1hVWF5gXVleXF9eY2RkaGZpa29saGpt
-a2toa2xpbHFyaWVjamxwcmxrbm9vb3N2dXR3eHd1fX18enl4enl2eXt7fH2EjIuK
-io+WkY+SkZCQjJKVj5CNjY6NiYSHhoWGiYmEg392bmFcXlpcXFtdWlhWVVVYVlRU
-VlhZWFdbWVpXWltaV1VXWFdXVltYV1RVV1JQUFVUV1lXWllYV1dWVFZbWFhaW1ZU
-V1ZXWFdXWVhZWVlaVlRVV1VTVlpbV1lZV1ldW11cWlhcXV9dXV1cWVlWVlhVUlFP
-T1NPUlpdX19dWVVPTEpHQ0VIR0pHSUpLT0tJR0RFSUlGTEpLSU1QVFNPT1FUVVFQ
-UVVZV1RTU1FSUlZWU1ZZWFlbXVtaW2BfYGJfX2FhYGJiX19hY2JobGxraWxqb21s
-a2tsbG5ra2luc3Jzc3d2c3R0dXh7fHx+gXx/f4GEhIeGhoWFgoWFgoSEhISBgYWM
-j5aWlpaZmZmcm5qbnJ2dnp6bm5ubmpuampqal5OQjouGhH17enh4eXt9fYKHiYqK
-jYuMkJOQlpaTjYiKi4uMkZWYm5yZnJ6bmJeem5aYl5GSlJWZmJqbm5ydnJaUmJiU
-kZGSlZygo5+empOPiIN9cGhkZmFfWldXWFhWV1ZYV1hVU1BRUFFRUE5OT1BOUVNS
-UlNSVFRSU1JTUVFNUFFQU1NRT1NRUlFTUFNWVlhWVVNVVFdVU1RTUlRUVlZWVFdW
-UVRUVVVTUVJPUVNVVVhWV1hUVVRXV1VRU1NVVFZWWFhYVldWVVRWVVhZWFlYWVhX
-W1hYVlVWVVhcW1pbWVhaW1lYWFtcWVlYVllbWllbWltYVVRXVlxZWFxbW1hUVVVY
-WVlaWFldXltcWlZWWFlbWl1bXlxdXFtcXF1cXV1aWlpZWFhZWVtZWVtfYF1aW1tb
-XF9gXV5eX2BfXl9hYGBgYF9eYGBhY15aXVtaW1hZXFpYV1lbW2OMssvV3OHk5ejo
-6evrgYGBhISAfn6Ag4OFg39/fHx+fn+CfHx8e3h6fn58gIB7fHt+f357fX9+fHV3
-fHt9fn1/gYGAgYKBgoOEh4WDhoSCfoCBg4aChIWCgH1+f4WGhIKDhYWAgYCBgoKB
-hoSDhISEhYKDhYeJhoKFiIaJiISDhomGhoeHhYWChYeHhoaGhYaEhoSBgH9+enx6
-dXZ3eXl3dnZ5d3d2eHd5eHl5fX18fICEh4eIiYiGg4WDhIOAg4WGg4KDhISEgIKD
-hIGFg4KBg4SCg4WCgX99fX98e3p5fHt6eXl6dHJ3dXdxb29xb25tb2tqampsaWhm
-Z2ZkZmRlZWZkYV9bV1RRT0pISUZGR0hEPkNBQT49Oz9AQkFBRUVER0ZERD9CRUhG
-RkZFRUFBPT4+QUBCPUI+QEA8QUI+PDs6OzxAPj09PURDQz89REdGREZMU2Bsd4KM
-jY6QkpOTlJiZmJSTlJSSkpGPlZSSkpaXm5+gpamlpaScj4JvXE9DR0ZISEtKSEdG
-RUJAP0E/QkNDQkJFQz9CQkRDRklKTEdGRUhOTExOUVhbXFxdXVpaWFVTVVhXWFdP
-VFJTWltaW1xgXF5gYWNiZWlscW1naWttbGptaWpub3J0c3Vubm92dnN0cnRtcnR0
-dHqCgoOAeHV4eXt9eHh3enl4fISGiIuMj5KSkpOVlJSYnJaTkI+Rj4+JhYKGhYWI
-iYR+fnttYl1cXlxbW1pcWFZXU1RYWFdcW1pZWVpWV1pYVVpWWldYVlRXVlZUU1RT
-UlVRVFdVVFNYWFZVWFhZVlhaWFhaWlxbWFZVVllYWFpZWVdYV1dSVlhaW1xaW1lY
-WFdZVlhcW1taXF1dYVxcWFpUU1NPT01NTk5VWVpdXFpXU09NTEpIRUhGRUZLSktK
-R0dCR0lHRklISktLTE5RU1BPTk5UUk9SUlRUVlZSUFRVU1ZbWFtcWVlYV1ldX19g
-X1tZXl9dXmBiYmBiZGhoamdoa2tpamhpamtqaGlpbW5wcXFxc3h5eXZ2dnl6fH9+
-foKCgYOIh4WEhYeJhoWDhISCgYCCiYmJjpOXm5mbm5qZmJqbmJaZmJuXmpeWlJWX
-mJiXlJGNi4mFgX55eHd7fHl7f4ODhYaJjZCNkZWRk5OIhoiJi4uRl5qeo6Cem5ua
-mpucm5uam5qYl5+fnJuampqYlpiXmJaYlJiYnJ2fnp6dmJGIf3tzamlnY2BlY11V
-VFVYVVRXUFNSUVFOUFNST09OUk5QVFNTUFJSUlRXVFNWUlJWVVRSUFJVVFJQT09S
-U1VXVFVVU1VWVFpYVlRXVVVXWFlUUlRUV1hWVlNQU1FOU1RTV1hXVFFRVFRVVVNS
-U1NSVVhZWVhWVldZWFZWVllZWFlXV1hZV1dUWVdWWFhZWlpaWVhbWlpYWFtbWlpX
-VlpZWFtZWllXV1tZWFdYW1xaWFdVWFVWWFdYWltZVVVYWVdXWlhbXFtbXVpXXFxd
-XltaV1pZW1paXV5fXVpaW1tfXlpfYGJfYGNgXGJhZGJgX2JfX2FiYmVgYGJgXF1c
-XltcWltZVlZXWFpcYYm1ytHb4OTm6Onp6uqEgoGBgX6Bf39+f4B/fX6CgoV/gIB/
-fnt8fHx8enx+foJ/f39+fH9/f31+fXt6f3x9fn6Afn5/gIB/goaKg4GDhYKCgYCC
-gIODg4WChIOEgoWFgoCDgoOBf32BhoWGiImJioeIhYSChISHhoeHhIWFgoKEhIaH
-hIWFhoSEhIWEh4eGhoaFhYKCgYB9fH95c3Z3dHZ1dXZ2eHd0dnp5eXV4fIF9f4CF
-h4iEhISIhIOEgoGBgYKDg4GDgoGDgoWDgX57f3+DhIeDfX1/fnt8fH18e3x+fHp6
-e3p4c3V2dnJxcG5wcGxrcnFqa2ppZmZmZWZmZ2ZlYl9bVlFNSUhFRUNCRkVIRkJE
-Q0VBQ0JCQ0E/PTtCRkhJSEZCREBBQkVFRkBAQ0BCQkRFQENCQkE/Q0JAPzs7PD89
-QUA/QUBDQ0NBQUVER0ZGRUhPW2RyfoWIkZKUl5WUl5WUkpKRk5GSko+Qj42Oj5Wa
-nKCgpKSkpJ+akIBqWlBJRUNKRkVEQz9BRERERENEQkJERkRDQ0I/QEBFREVGSUVE
-RUlRTk1NU11cXVlZWVlXU05QVFdYWFJTV1JUV1pbXFtdYGBdYGJlZmlra21vb21t
-bGxvb3BzcnJ3gXlwbnFycnJ0eHJ1eXRwdHyChIJ5eHl+gHx/gn19eXx/g4SEhIeL
-j4+RlJWVlpiYk5KQi4mJjoyKi4iGgoeGhIN/eW1hXltdXVtZV1laW1pWV1teXF1c
-WFdWVldVWFhbV1hVVlZVU1ZXWVVVVlRRU1RWVFRTVlZaWVlYWVdWWFZWVlZUVlhc
-WlpaWFlZWFlZV1lbV1hZW1xcXWBcWFdXWFdXWVlaWVtbXF9gW1xZU1FOS0tKTElI
-T1NXXGFgWldTT01JSkhHRkVESEdKSkhFR0ZHRUZJSEZJSExNTU1NTk5MSUtNUFFS
-VVZUUlFXVFJTVllZWltaXFtbX15fYF9dXF5fXltcX2FkX2RkZGljZmZoaGppamVm
-ZGhpY2dsa21tb3N0dXd4c3F2eHd6fX19goeKg4OGhoWGhoODgn+CgIOAgIOHiImI
-kJSYnJeZnJmZm5yXlZiXl5WXmZqWmJuamJWUk5OQkI2Hgnx5dXR3eX99fH5/g4WG
-ioqKiouKiYOBhIiJjpWXnZ+gmJiam5aWl5eZl5qdnp+enaGdm5qZlpeVl5iXmJaY
-mZqfoKChn5uWkoqAc25nY2BhY2JiY1tVVFVWVlFRUlJSU1JSVFJRUVFPUVFSUVNU
-UVNTUlJSUlNQTlJTVFBQU1VTVFRTUlVUVFVVUlZSVFNUUlRUU1ZVVFRTVFNRU1VV
-VEpOUFFWVVRSVVNUVFNTV1NTVVVSVFVSU1RWVFdWV1ZVWFdaWldYWFVVV1dXVlhY
-WFpXV1dWV1hZW1tcW1haWFdYWVhaWVlaWFdaWlhUVVhaV1dZW1xYV1ZUVlhZWFlW
-VlVVU1dYWFxbVlZWV1pZV1pdXWJbW1lZXFlYWVpbXFxbXF5eXVtbW1tdX11dXGJf
-YGNiYGFgYWFgXGBgX11cYmJhYV9dWVpaWVtbW1xaWFlaXFtgj7fJ09vf4uXo6Onr
-6oKEgoCAg4GEf3t6fH5/foB/gH9+fn5+fX58fX16e35+gYB7eHp9goKAfH6AgX9+
-fnx+gIB/fn6CgoKDgoSGhYGBhoWDgIGChIOBh4eFhYSBg4WEgIB/gX5/gH9/goKG
-hYOHhIOEhYWEhYeEhoWGhYaFhoSEhoWBg4WIh4aFg4KBhoWEhYWDgoCAe3t+fX97
-eHNzc3NzdXd2dnV1eHZ2d3h4e35/gYKGh4J+g4GBgYSEgYCEgoOEgoOAgYKDhIGA
-gH6ChIGCgIF/f359gH95enx7e3p7fnx6eXl3cnBucXJvbWtub2xucW9saWdlZWNl
-Z2NkYl5aVFFOSklGQ0REREVEQ0FDQ0Q/PT5OS0VCQ0VHSkJGSElGRkhFRkRBREFF
-RkNBQ0RCQUA/QUNCQkA8Ozw+PTxHQjw9Pz9BPUFBQD8+QENDRUtGSE1UXmt2foaN
-j5OWlZSUkZGTlZWQkJKRkY+Pj5GRlZuampugpKSjoJ+ckH5tW05MSEZJSEJBQUFG
-RUVCQ0A/Pz9CQkFCRkNBQkNDR0VBREdLS01PT05YWlxfXltXVVZWVlZZWFlWV1hW
-VFdYWlldW1xeXmRmYl5gZmhrbW5ubXFzcnFycnFwcnd9d3NtbnBycnN2d3l7d3Bv
-cnl7gHh0dXyCgYOHhYJ/gouGhoeIiI2Rk5WTk5KUl5iWlJGPjY6PjYuKioiHioiD
-gnx0Z2JdXFtcXVtcWVleWFlXWVdTVFpXVFdYWVlYWlhWV1hWVlhXWFtaV1VVVVdW
-VFZWV1pVUlNWWVZUVlNTVVlXWFlWVVZYV1dYWFZXWFdaW1dZWVpaWFlYWFpaWldW
-VlZUWFdXWVxcW1pYVlZWVFFNTEpJSktQU1lcYWJeV1dVTkpIR0dGRUNGSkdLSEVE
-RUhHRkdIRkZIS09PT1JSTktMUVBLTFBVVlFVUVBQTk5UWFpWV1laXFxdXl9eXlxd
-XV9eW1tbX1xeYWNlZ2lnZ2dnZ2VmZmZnZWdqaWxsbnBxcnF1d3Z0dnd3eHh6fH6B
-hIOHh4SIh4SBgoSEgX2AgoCBgYGDhYaJjpOXl5mbmpiZmpqWl5eWlZeXlpmampeT
-k5SWkpSSkIyHgnx3cHN3d3d4d3l6fH6BhIaGhYSAfoGEhoeNlJuYmZ6ZlpaVlZWX
-mZaXnJ6fnJ6bnJ+Yk5eUlJSUmJeXlpWZnqGhoqKalJOPjYNza2ReXlxZWV9kYlhW
-VlVSUVBRUVJTUFFRVVZUU1NVU1FSVVJTVFRSVFFQU1RTUlBTUlNSUFNWU1ZVVVRU
-VVJTU1BQU1JSVFRTVVRSUVVTVVRWVFVUU1NUVlRSUlBTUFRTVlZVVVRTU1pXVVJS
-UlVVVVdYWFhWVlZXYFtWWFhVVlRVU1hYV1lXV1VXV1hWWlpZWFdYV1lZV1hZWVlZ
-VlRWV1dWV1VTU1VZWFhYVlVYVVdZWVVYV1RXWFpaWFtbWldeXVxaV1pbW1pdW1tb
-WlpbXFxiYF9cX19dXmFcXmFiXl1bXV9fYmJjYV1cXVxgX2BfXl9hZWJfYF5bWlhX
-WFlaXVpXV1pjY2KJucjS2t/i5Obp6OnrgIWEhIGEhISAfn98f39/fX6CgH+BfYWE
-hYSAfn17e3t8f39/e4CBf4CCf4GBgXx+fn+Af35/f39+f4GAgICBfoGDh4WDhoeG
-hoWGiYiEhYGBgoGCg4CAgoKCgoODgYSDiYaHg4KDgoGEhoeCgoaGh4aFhISDgoWH
-hYWHh4aHh4OEg4GChIODfXp9fH9+fH13dnZ3dnZ0dXp0dHR1d3Z7e3t8gIGDhYaG
-hIOAgYKDg4CBf4GChoWEg4OCg4OAfn+AfX+FhYJ/goF9gYCBfn97d3h4ent8fHt3
-dnd3dnNxb25vcHFsbHRwamZnZWdkY2JiYl5aVFFOTUtKRURERkZGR0dDRUNAQD08
-PkJCQkNFRUVGSEVIR0tFRUdHRkRCQkdFRENGSEdFRkFBRUVEQT0+QD9BQTs7QD5B
-QT9DQUBCQ0RCQ0RFR0hGR05ZZ3F7hYuSkZaTlJGSlpaXlJKWk5GSkZORjpCTlZmZ
-mqCko6KlpKGYj4BsW09JR0ZGQ0RFQERGRkNBQD5DQ0FCQUNER0lFQ0ZERURAQEhK
-SkxRTFBWXFhaWlpZU1hZWVZYWFZWWVdTWVdXWF1cXFxdY2RhYWNmaWtubGtvcG1w
-cnJ1cWxwdXV2dXNwbWxzdXl8e3l3eHh3d3x6d3Z6foSBgIKBgIOGiouJiomKioyQ
-k5WTlZOSkZSWlZKSj46JioeJiIOJjIeAeXBkX11YWFdbXFtaW1xaXFVWVlZWVVVX
-W1lZV1lcWVpWV1hVU1NXWVtXV1dXVldbXVhZWFZUVVVZV1hWV1NVV1dYVlpVVlVX
-VVVWV1lZWVdZWltcXVdYV1dWUldWWVdYV1hVV1hXV1lZW1lXV1VSTkxKRkdGSk5T
-WVlaXFtZVE9MRkdKSElGREJCQUVJSEZERkhGRUdEREdLSkxPT1NYUEtNUFBOUVNW
-VlJOS05QT1BRUk1UVFpeXFlZWFlaW1taWFZaXlpcWl1fYWVkY2hrZWhoZ2RjZmVi
-ZmppbG5ub25vcHJ0dXZ3eXh5en2AgIKHh4iGhoWDf4CCg4OBgICCf4GEg4OFhIeL
-kpOWlpeYmZiXmJqamZmWmJiYl5mbmJWWk5SVk5STkYqFf3t3c3FxdnV1dX2Bfn1+
-goWGhoOAgIGBhY6Ump2cnJmWk5OUjZeWlZiam56em5yam5mal5iWmZiVkZSTlJaa
-mpqdmpeUj4uJg3pybGJaV1hZWmBlZlpYVFVUVlRYVlZWU1ZUVVZSUFFPUVNTVFVV
-VFJSVFJSV1pWUlJVVFBRUFNTVVNPUFBTUlFUVVRTUlFSU1RUU1FVVlJUUlVUVlRU
-V1ZUVFRVVlVRT1RYWVhYV1hZW1hWVVVTVFdWV1laXVxZWlhbXFZXVllXWFZUWVdW
-VVVWWFhbWVhYVldYV1ZXW1lbWltbW1lbW1dYXVlXVldVVFVZVlVWV1hWUlVZWFlc
-VVhaXVpYWFpaWVxbXFlZV1dZWVlaWVlYWVpcX11dXF1eX15fXF5fYGFiXFtfXV9e
-Yl5dXWBhX2BgYGJgYF5gYF1bXV1YV1pZWVpbWl1cW1thZIa5yNHZ3uHk5ujp6ut+
-gYSCgISFf35+gYGAfX1+goKBgHx9fn6BhIKDgH58fn9/foSCgICChIGEgH+BhYKD
-gIF+f399fX5/gH9/f4CCgoSHiIWChYWEhIGBg4ODhICCgoWFgoSEgYKDhIaFhYaG
-goWHhYSDhYOEhYWIhYaHio2HhoWCg4aIhoWEhYmIhYOEhIOBgYJ/fYB7e3t8ent8
-enl0dXV1d3h4d3h4eHp9fn+AfoKBgYGAgICAg4KCgoWCgoOGhIODgoGBf3+AfXt+
-goKAgIKCf39+fH6AgXx7d3Z6enh4eHRyc3VzcnBubXBubGxycGllZWdlYmNlY2Bd
-WFNQTEhFREZEQ0VHQ0NFQ0JBP0JCRENAP0JBRUZKR0lJR0hFREpKRkVHRUdHRUJC
-RUpHSEVGRERAQD4+QUE/QUBHQ0A8PEFGRj89Pj5AREVDQ0FCQUNCSFNfbHiAhouP
-k5GUk5OUlpeYlJOTk5GRkZSSj4+Ul5udnp6ipKOjo52ZkYBvWktFRkdDQUREQERE
-RT9CRURCQD0/P0NGRkVFSUxKS0lJSUlKTVJQT1RWWV5ZV1pXWl1aVVRZV1NYV1RQ
-U1RVWFtaX2FgXWBkZWZtcnBwc3Nwa2trcHZyb29zdHJzd3h1c291e318dHp/fH18
-eXp8eHl9gX+BhIGAiIyLiouKjIqJjJGTlJOSkY+QkY+Qj46Pj4qHhoaEhYaJiIF7
-dGVeXVxbWllaXV5ZWlZYWFVXWVdYVlhUWVlXVVVWVllYVVRUV1dVV1lXV1lbWVhU
-VVVWV1VUVVZYVVhXW1hXV1ZXVlpeWlhZWFhaWVtcWVdaWV1dXllXV1dbWFhXWFZX
-W1ZXVVdWWltXV1lYWFRPTUpHRkZJTldXWlpeW1pYUkxIR0dISkhHRkNCQURGRUdG
-RkdGR0NERkZGSEpPU1RRUk9OTk9OUlFRTkpKSkxHSUxOT1dUVltdW1dYWFdXWFhX
-WFRYXVxaXV9hYmFiZGlraGlpY2JiYGNnaWtvcHJ0c3JycXN1dXd4eHx+f4OCgIKF
-hICBhYOAgIGDg4N/f4CBgICBgoGDh4yOkJSVkpKVlJaWmpybmZaZmZmYmJeYlJKU
-mJeWlZWQjYqEgYF9dnJxdHh9fX18f4CBhISDgH+Df4OIkpeXnpmamZiVl5aNmJiW
-l5ibnp+fnZqanJyXmJmWmZSUlpSTlZucnZmYlZCOjY6If3h0aWFaWVlZWmJoZFhU
-U1dVWFlWVFFQUFVUUlJPT1NSUVJTU1RVUlJUVFFTVVRUU1VSVFJRUVFSUFBRUlFS
-UVFSUlNUUk9RUlNWVFNUVVRUVFVVVVZUVVRXWVdVVFJTU1RTV1laV1pYWVlaVlZU
-VlRWVlZXWFhVVldXV1ZUV1lbXVlWV1RVV1RWWVhYWFhYWVhVVVdZV1lYWlpZWVhZ
-WFlaWltZVVRVVldYVlpZWVxXVltbXV9bWVlZWVpWWFlaWVxZXV1cV1haXlxZWlld
-YF1cXF5dXFxcXV5hYF9fXV9hX19cXl1fXmJkY2JkX15fYmFgYGFdXF5dW1tZWl1b
-XFtdXVtcWFpjhLnI0dne4ePm6Ojr63+FhIKBgH5+gH+BgYB/f4KBgYB/foJ+fX1+
-fn6BgoCCf4GDgYCAgYGCgICCg4CEg4KChIaCgH57foKAgX1/gYODhIOEg4GDg4KB
-hIGFhYOHgoGBgIGBgYOChYSHhIKFgoGDhIWHh4mGhYSDh4iLhoaIiImJiIWChIiF
-hYOFiYqIhoeIiIR+fX5+gH58eHh8eXt+eHZ5eXh2d3l6e3l5enp9gYGBg4GAgoGB
-g4OBhYOCh4OBgIODhYSFgoB/gYF+f4CAfn+Ag4OFgX18fn98fnx5eXZ6eHt8d3Nx
-c3BxcG5vcHBta2tqaWtrZmRlZWNhXVhSTklGSEVFRkdDREREQURCRENHREFDQ0BB
-P0ZDSUZIRkhKS0xKR0pHSEZHRkZEREdFRUZFREVBPj5AQUFAQT06Oj9BQj9BPUJC
-Qj5BQUBAQD5DREJJRUdJUFxnc36FjZCQkJOUlpOYl5iWkZGPjY+QkJCRkZCUl5qh
-oaKmpKalp6Gaj4NtW01GSEhCQUVAQUJBQkVFQEFDQ0JDRkZFRUVCREdMSUxKSktM
-TEtQVFFUWVpZV1tbWVZVUVNSVFdSU1RRUE9SVVhYX2BfXl9iZG10b21vb3Ftb21w
-c3R2dXV0dHl8e398eXp+fXh5fYF+g4F5en57fn+Ag4eHfoOIjYuHioqNiYuNj5KP
-kZGSko+Mjo6Sjo2LiYmIioaEhomIhIF1ZFxYWlpbWllZW1lcX1pXWFhXVVdYWFVV
-VlhWVVRWWFhVVlZXVFRZVlJYV1dUVVJUVlVTUVlXWVhXVVVWWFdZWFdZV1dYWVpa
-WFhaW1taWVhaWFVWVVZWWFlZV1RWVlZUVFVYWFZXXFpYV1hXUE1KRUNGSUtNUlhb
-WFpbWVlWUFBKSUlFRkVCR0hEQ0VAQkNDQ0NBRERERERHTUxOSk1MTlFQUlRMTU5M
-S0lERktLTE1PT1FSVFhbWlhXV1hYWFdXVVJWWl5bXmJlZWZlZGhoZmRkZ2NhY2Nm
-bGpobXFzc3Z0cm9xdHd3eX2AgYKDhYSEhoKBgoB/gYGDhoWCfYJ+fn+DgoKDhouO
-jY+RjpKTlZWYl5iXlpWVlpqZl5WTlZiXlpeWkpCQkYyIhoJ+enh1d3h5foOFhoWE
-g4F+f4CDg4uUmpmcn5qalZGUk5KVlJSXmZicnJuWmJmYmJeUk5aWlJaWlJWVlpib
-m5makpGTjoV/eXVuamJbWltZV11hW1dWVVVVVlNRTk5NTlBRT09QUlBPUlFSU1FS
-VVNUVFZUUVBRU1VSUFBPT01RVVBSU09PUE9SUlJVVlVTUlFUVE9RU1JUVlRTVFZV
-WFNRU1RUVVVVVFZWVVZWV1dXVVJQU1RUVlZZWFdZWFhZV1dXV1VUVlZUWFlWVldX
-WVlXWFlZW1pYVlZWWVlbWVtYWlxcWlhXWlhYV1xaWFZXWFdWVlRXV1ZYV1dWVlVV
-VVZZWFtXWltXWV1fXV1bWFxcW11fXF1eW1teXV5dXFtdX2BhXl5fYGBiYF5fYF5f
-YGRiYGFjYmFjY2JgZWNdXl5bWVlZW1tha15bW1lbXF+CucrS2t7i5efn6errgYSC
-goOCg4J/goKCgIOCgoKBfoB+fHyBf3x/g4KCgIKCgIOBgYCBf4J+gIKBgH+Bg4GE
-hoR+fYCBfoGChISDgYWDg4OFhIKDgoSDgoWHhoSEgICBgYOCgoKDhIWEhYaCfYKD
-hYaIiYWDgoKBhYWDhoWIjYiHh4eEhYqJiIaEhoaDg4aFhoCAfH1+f3x5eXh6fHh3
-dXZ5eXx/e3x6eXt7e3+BgX+Af32AhIOAgoWDg4OCg4ODg4OEhYWEgoKDgIR/f39/
-fYB/f4ODgYB/fX18fHp5d3h5eHh4d3N2cXJwbW5sa21tbGhnaGVmZmhlYl1ZUUlG
-RkhGSEhOSENDRUJGQkBCQT9AQT1BQkFAQURDRkZGSEtIS0xISUlIR0hIRUVBQkRG
-SEZEQURAPj5CQ0JBQEBBQT0/PUBBRUFAQUA+QD08Q0FBREVFRkVNV2Rud36Fi46V
-lJKTlJSQk5OWk5WVkpSQjpCPkpKRmZ2hoaClqKWnpp6bkXxlU0lDQ0RDQkE+QTxB
-REJAP0JBQj9DREZERUZERUpKRkpKSklHSExRT1FVWltYWVtWVFRSU1VPUlJKS09N
-T09TUlVUV1xcXl9iZ2hmaG1qbHFzb3NxcXN3cnR2dXV6fHh6d3p+fnx9gYCAgH1+
-eHx/gYGBh4WGgoWNjISDhYiHioyMjI6QkJmaj4qLj42PkI+LioqMiYWEiI+Lg3Jh
-XFtZWllaWVpYXFlaW1tbW1tbWVhUU1JVVFVTVlVSV1lXWFhWV1ZUVlhXVFZVV1RV
-WFdZVlZXWVdVVFRUVVZXWltaV1hcXFlaW1tbWVdYVldWVVZWVlZZXldWWVtZWFxY
-V1dXV1ZZWltZVVNQS0hGRUZJTE9RWVhbWVtWWVZVUktJR0hGRkZFR0ZERkI+QEFB
-Qj9BREVGR0ZMS05LSEZKTVBWUU9NTE1KR0dISUpLS0tMTVJRUlNSVlJWWFhUU1VW
-WVlXXF9eZGRjZWRkYmVkYmFiYmJjZ2lnbG1ubXJ3eHZzcXZ1dXd5fn9/gYJ/hIWI
-iIeFh4WCgICDgYB/fXx9f4OEhYWFhYiLjY2Ok5SUlZaXmJWTkpSVlZeWk5WXmJaW
-lpaWlJSUko2MiYeEgX17fH2ChoaFg4KBgHt6eoCFjJWbnJmanJeUkZGRj46Uk5WZ
-lZqbmZWXmJaXlpiZlZOUmZ2YmJqcnZ6cmpuVk5OQioV/c29uZWFfXlpcXVxdWVdW
-VlVUUU9RTk1OUVJUU1FOT1BSTlBSUlJRVFNUUU9QUVBPTlFRUlRSUFJTUU5NUFJU
-VFVUU1NUVVVXVVNTVVJQUlRWVVRVVVRUUlNSU1RUUVZVVlZVV1dUVFJTVVVVVlVV
-VVNYV1dYWFhXV1VVVFVTUlVUVllZWFlbVVdWWFpYWVlYW1xdWVpbV1dXWltbWVlY
-WVpbW1teXFhVVlhWV1dYWllYV1hVWVlVV1pZXFlYWVxZW1xbW1xaXFxaXl9fXFte
-Xl5aWVtfX15eX11eXl5hYGJgXV9hYF9gYWBjYWJhZmlpbW9rYVxeXVtZW1pXWmZm
-WlpWV1pbYYW6ytPa3+Lm5ujq6+qBf4ODhoSDg36Bg4GBf3+Af318foJ+f4B+fXyA
-hYGAgH6Eg4KChYKAgoeGg4ODg4F+f4B/goOCgoOEgoKEhIOEg4WFhoaGhIGChYKD
-g4SDhIaFgYKDhYKAgYGCg4aGhISDgoKEg4GAgoaEhoeIh4OFhIWIh4mMiYWJi4yJ
-h4OFiIaFhYWEgoaAfXx9fHt5eHt5eXZ5enl1d3p6eXp5dnh4e358fX+AgICAgoGC
-g4ODhYWGhoWFgoSBgYeGgoCEg4GAgn9+f4CEgYCBgoF9fnx9fHp2d3ZzcXV2cnJz
-cXJubG5rbmtraGdmYWBiY2BbV1BOSUlHRUdGQ0ZERURDQkFDQUA/Pjw/RURDRURH
-REZHRkVIS0lISklJSUxKSEZEQj9CQkNGR0ZDQ0FAQkFDQ0FAQEFBQD1CQ0A/PkBC
-Pz4/QkNCQ0VDRUZGUExVX2p0e4KKjY+RkZKTkpKVl5eZlZGSkpCOjouNkJKRlpmb
-n6OppqSlpKOZjIFsVUdBRkZAQkVBPz5AOz1APT0+P0BBRUZERkNCQURGREVISUlG
-SUxQT1BTV1dZWVhYVlpZVFRUU01LTVBQT1NYWFhVVFdaXmBjYmNlaWZnbW9oaW9z
-d3NydnR1eX59e3l1eoCFgXt6goOAf4F8eHqDgoOCgYSBgYSGg4qOjIaIiYqNkI+P
-kY+Mh4mQkJCRjYqNjo6IhYSGjZCDcmVcXFtcVlteWVlaW1ZaWVhZWllYWFpWVVZW
-V1hVVlhZWFpYV1hWVVJUVVVVVFZUUVJVV1hYW1RVWFhXWFpaWFZaWlhYV1tfXFpb
-WFdXWFpXWllXV1dZWVhYWFlZWVpeWVhYUlJWWFlcW1ZUUlBPTUhHQ0VKTlZZXF5a
-WV5cWVNQSkhJTExISEVIRkdFRENBPT1EQkBAQUBFSk5NS0pMR0dITUtLTEtLT0pH
-RkRJSklJTExKT09PUVJRUVNVUVJRUVVaWFlaX2BgX15hYmRiX2RlY2VlZGJkZGdq
-bGtubnB1dnFxcnFvcnh5foKBgYKGhIKEhoaFgn5/gICBgoCBf35/foSFg4WGiImK
-iYyOj5GSlpiWlpeYlpaUl5eXlJaXmJeWlZaXlpSRkI6Qjo2MhoSEg4OHg4SEf318
-fXp9foSJkpWWlJWWlJOSkI2Ojo6Sk5aXlZWSkpORkpWVlpeZmJeZmJeZm56eoJ6a
-mZiWkYuCfntxbmRiYV5dW1paW1xaV1JRUVNTT1RUUlFPUlVUW1FOT09PUFJQTkxP
-T05PUFNRUlFRVFVUU1dVVFNTU09PUFJQT1JVU1VVWVhTUlFTVFZWV1RUV1ZWVVRS
-U1NTVFVZU1ZWVVZUVlhVV1ZUUlRVVFNUVFVTVFJTWFdWWFRUVVVXVVZWVVhYWFpX
-WFhXWVlaWFlaW1hYWVVXWVlYWVlYV1haWlpZV1hWWFhWVVlbWFpYV1pYWVhYWVpY
-WVhbWFlYWVdZW1xdX15aWVtcW1xeW1tbX11aX15aXGBfXV1gXlxdYWBeX15eXmBf
-XmFhYWBeW1tcXFtZXVxbXV5bWFdXWFlbWVZWWFtjirvL1dvf4+Xo6ejq632AgoKC
-goB/gICAgoSEgYB+fX1/f4KDgYKCf3x+gIB+gHx+f4CBgICAgICAgoCCgICCf3+A
-goOFg4OGgYCBg4aHg4OBhYOBgIGDhoeIiImJh4eFgoKEgoCFhYaDhoSEhYKFhYSB
-gIKDhYSFiIaIiIOChIOFiYqJioqHiImHhoeFh4eEhIOEhYaAfX16ent6eXd4dXd2
-dnR1dHV0dHh8fH19e35+f4B/f4GAgYGAg4SEhISFhoSFg4KCg4WBgoGBgX9/f359
-gIWAf4KBf3t8fnp5eHh2dHNzdnNycHFycG1tbWxraGppZWBkZGBaV1FOSkdKSUhI
-SUdFRkZFRURFQkFBQz8/QENBQ0dIR0ZFQUJFSUdLSUlGSElITEtIR0JBQENEQkdH
-RUdEQ0JDQUNDREE/QEBAQENCQD4/PT09QkA/QUA/QURHSUdJSVBXZG54g4iKi4qO
-jo6RkZSUkI+PkpSRkY2Ki4mLkpSXmJiaoKSlpaSlpaKakn9pVkdERERDP0BBPUdC
-PTw9P0VAPD5BQkZFRERDQkNFP0BFSkpKSEtNTlRTVFZbWlpaWVlYVFNSUE5LUVBN
-UFRUUllZWFhZXWBeYmVlY2VvbmxobXp8eHd5enh8gIWDgHt1e3x+fn6EfXuAhn98
-f318f4N/fX+BgH6Cio2NjoyLi42MjIyNh4KGiIyNjZCRjImOjouIh4iKiIBuYV9g
-XltXV1hYVldXWFZWV1VYVlZYW1xbXFhYV1dWVlVYV1dZW1xZV1ZUVFZXXVFRU1RX
-WlpWVlZWVVlYWldXWVhYWFpbXVxbW1pVVlZZWllbWV5eWldaWlZYV1dWV1hVV1tW
-VlpZXFxdW1ZVUE1NS0hGRkhQU1ddX1xeXVlUVFRQT05OTUtJR0RGREJEQ0RFQTw9
-Q0JCSEdKTUxKT1BKRkdGQUZKSkZJSUdIRkhJSkhKTU1OUVNSTlRRUFBSVVJUWFNY
-XVtbX15eYWBhZWJeX2FlZmZiZWZoaWZucm5tcHFycnNxbnJ1e35/gIGBg4WGhIOH
-hYWBfn58fXuAgYKEf4B/fYB/goODhoWKi4qLkJSXmJWXmJiWlZaVlpaVlJeYmZeX
-lpaUlZWSkI6Pj4yJh4WGiIeEhoF/fXx6dXh+f4uPkJOSlJiTkI+Mi4qOjI2RkZCN
-j5COj4uQkZGVlpianJeWlJianZ+ioZyYl5SOiX97d3FpZGRhX15bWFlZWFZVVVJS
-UFRQUFJSUFNPUlFRUE9QT1BNTE9SUlBPUFFRUlNRVFNTU1RTVVRUUVFUVFBSUVBQ
-U1NVU1RWWVdVVlZaWVhXVFJSVFNQUVJTVFRYV1VTU1JSUlRUVFVVV1dYVFVWVFZV
-VVVTVVVWV1RVV1ZYWFhXVlhWVVhZVlZXW1paWFpeXFlYV1ZYWVpaWllYWVhZW1lY
-VllbV1haV1lYWFdWVlZYVlhYV1VXWVtdXFxcW1lZW1xaW1taWlpYV1taW1taW1hc
-XlxbWVxdXVxeX11eXl9eXl5eXGBgYmJgXmBfXl9eXFxeXV9cXl1eXVtcW1pbXVta
-V1VXXGKLvcrT2t/i5ubo6erpfX5/goKAgoSDf39+gIGAfn1/gIF/fn+Cf4B/gYGC
-gYCCgX19g36AgIB+foGChIaBgH18gH6Afn6AgYF/f4KDgYOEgYGBg4SFhIODgYKG
-g4aEhYWCgn+Bf4CChoaEg4iGhYKDhIWBf4WDhYaHiouIh4OEh4SDh4iGhoeFg4aH
-iImHiYWGhYaCgX58e3p8enx8fHh3d3l2eHd3dXV4eXx8fHx+foCAgoGCg4N/foGB
-gYWFhoSHhYKDhoiDgH+Bf4KDgn1+fX2AgIGGg35+fHx6enp1c3N0d3d0cnJwb29s
-b2poaWVoZWVlZmNiWllRTUhGR0ZGR0hHSElHRkhGRERCRENCP0FAQENHRkpIRkRD
-RkVIS0ZFRkVISUhIR0dEREJGQ0VHR0ZGQkBBQERDP0NAQkI/QD5APj1BP0A9PkA+
-REFDRURCQkNGSEhJSVNgand+gIaJjI2Pj5KUkpGUkpKVlZSSjYuKi5GQkZWVmJqd
-oaSnp6SnqKKakoRvWkpAQUNAQ0JCRUM+PDw7PTxBQT89QEZHRUNEQkNCQkFGSExP
-S0xOTk9RVFhbWFhYWVVOS01RUVBPUlFQUlVVVlhcWltbXF5lZ2JlampsbG1vdndz
-dHl5d3yAgYGBe3Z4eHt/gYGAfn+Cf4CBfnqChn+AgIOCg4OJjYuJi4yQjo6MiYqF
-gIOEiIiHhoqNjo2NioeEhomFe2lfXV5cXVxcWlhXWFVUVllXV1lZVFVVVVdbV1ha
-WFlYWFZTVFZYWVdXV1JSVFlZVlVVUlNVVFZYV1RXV1dWWFZXWVhXV1hYWFtXWlxd
-WFpYVVdYWFlWV1dZWFdVVVVaWllYV1hYWllbXV5cVlRTUVFMS0pJSk5SWF1eXFxc
-XFlXVFNQUE9NS0pGSEZBQ0RGRUhCPT0+QEBCRkpLTEtKRkpKR0RERUpKR0ZJTUpJ
-R0lKSExMTU5NT09NTVBTUVJTVlZWWVVYXF5eXWFfYWFhYF5gYmNjY2VjZmhoa2tv
-cnNwbm5ucG5ucHV4fH9/goaHiIeFhISCgoKDgXx+gICAgH59f4WAg4KDg4aFhIaK
-iIuMjZGWmJaWl5eTkpOWl5iZnJqbl5eWlpaSkpGTkpGPjo+OiomIiYWDf3x6eHZ5
-d3h8iJCTlZOUko+OjIuKiIqLjZKOjIyJjIuNkZGQkJKWlZeXl5aTl5ucm5yfnZiS
-kJKLgXp1c21lY2JfXVpaV1hWVlVTUFBQUlFSVFJQUFNTUFRVUlJPTE5RU1JTUFBS
-VVJVU1NTVFRSVFRTU1NTU1JSVFNSUE9QUlVVUlJTVVZUVFVVVFRVVVZWVFNVVVZU
-VlVUUk5SVlVWU1RWV1ZZWVdWVldWVVZXWldVVVhUWFhaWFdYWFZXV1dVWFdWV1VX
-WVtZWVlYWVpZWVhbWVlYWFtcWllWVlRWWFhWWVdXV1dYVldWV1RWVVdXVlVVVldZ
-W1laWVhZW1taWFhYVlxeXFpaXFxfXl1hXlxbWltbW11eXFxbX2FgX19eYWBgYWZj
-YGBhX19eaGljYmJfXFtcXV1dYVtaW2BaWlpdZZHAzNTb3+Pk5+np6uqBfX19foCC
-g4OCgYOBgYR/fn5/fn99fn9/fn+ChYSChYODgIF+foB9gYCAh4WEgoCBfX5/gH+A
-f4B/f4KCgYSCgYKFgYF8fHx9gYKDgoKEiIeIhoeHhoSCgoiFhoSFg4ODgIGBhYOH
-iYWEiIiKi4qLh4SEhISFhoaHjoeEg4SFhISGhIWFgH18enp6e3t9fHt4end3d3Zz
-dnZ1d3l9f3t7fH58foCAgYB/gYODgYOEhIOBhIWFhoWEhYODgoV+f4GBfn19fn+A
-gICBfYF/enZ0c3Z2cnV1c3R1cnBubm1saGhpamdoZ2dnYV1ZVU9KR0RFR0hISEdI
-R0dHQkFDREZDQUJCQENCQ0RDSElIR0dGS0xNSEZEQkVHSUlKR0FAQkVHR0pHRURD
-Q0dGUkZDREA/Pz9AQ0A9PD09Pz89QT9CQUA/Q0VGRkhHSEhIT1lodH2Dg4mNkZOV
-lZaWlJKSlJOSkpCNiYuKio2QkpWYmJ2kpaeqpqejoJ+bloZ1Xk1BQ0RERUA8QERC
-PT07PEBCP0M8PT9DQ0VCRENFQ0ZIRUlLS05NS05UVlpbWVpaWlVNTU5TUk1OUFJS
-UlJRVFhXVVlYX15iY2drampxcW5tcXNwcXJ5en9/e4B4dXRxeHyCgn59gISEhoSC
-gHyCiIiLiIeNi4mLjYmFh4yMi4iHiImEhIWGhISGhoiHiYqLiIeGh4p8aV5dXFxb
-X1pXWFhbWlpZWFVXVFZXVlZWVlhYV1lTU1ZXVFdXV1dbWVZaVlZVV1hXW1dVVFJT
-VVVXWllVV1ZWVVhZWlpYV1dYWVdYWlxZW1laWVZYV1paWlpcVFFUV1tZWldcW1pa
-W1pcW1pXU1FRTk1NTE1MTVRXWF1dWVtcYFxWWVZSUUxNSkhFQkE+QT4+Pj9BQDw8
-QUNER0lOTUlJSUxGRUZFSEtHR0hKSUhISEtNS05MS0tPTU9NUE9OT09TUFVXWFda
-XF1iX2BdX2BfYmBiYGJiZmJkaGtubm1tbmtrZ2lrbXBwb3F1fYGChYmHhYSEhIeF
-hISIhYWBfn+Bfn59f4CAg4SEhISGho2Ljo2PkZCSl5SVl5mXlpaWmJucm5mZl5aY
-lZSUlJKPj46Mj5GOjoyKiISBf3d0dHN0d4CJkI6QkpOUjIyKiomKi4uLioaEi42K
-hYaJkZCVl5WXl5iblpaWlpueoJ2ampSQjoqHgHt4cm9qZmBcWlhWVFNVVlRSUlNR
-VFRQUlFSUVJUVFVVUVFRUVFSUlRUVFRUVFVWVFRVVFRTUlNSU1ZWWFdVVVNSUVFV
-VVRWVlJRU1ZSV1NRT1FQVVNTVFNVVlRUVFFRUVRWV1NUUlZZU1JQUlZXV1dVWllX
-WFRWVFVYWFhXVlVYWFdXV1lWV1dXVVVWWFdXWFtaWlZWWFlZWVpbWFdaXFhZV1hW
-V1hZWFhXVVVXV1hVV1RWU1ZXWFlZWldYWVdbWlhYWVpXWltcXV1cXV5dXFtfXFpb
-W1tcXl5cWl1cXV9eYWFhX15cXF1gY2ZiYmFiX1tqZmBgXlxcXV1bW1paVlhZXWRe
-WFpjjLzN1Nvf5OTn6Onq635+f36AgYGEg4SGgYGBfn5/gH9+f358fX2Afn+BgoKC
-gYJ/goGAf3qCgH5/gX+AgISCgX98fXx9fYGAgoWHhn+Gh4ODg4aGg3uAgoaFhIWE
-h4SDhISFh4yHh4WDgIODhIeEfn2AhIWIi4qJi4uKiIqMiIaIhYmIiYaGh4qIg4OG
-gICCgX5/gX97eXZ4eXt7eXl5end3dXJydHR1dnh5ent5fH9+f4KCgoB/goWFgoWF
-goKEhYSDgICCgIGBgoKAgICAf35+gICBf4KCf3x8end1d3ZxdHJyc3RxbWtqamlp
-amtrZGlpaWReXFROSkhIR0hHSEpIRkhHSEpFRUNDQkBEQUFEQ0VFRUhLSUlHREhH
-SUtHR0RGRUdJSUhERUNFR0ZHRkdDQURGR0hFREM/P0JBQD08QD5AQkJCQD4/OztB
-Q0VCRUVHRUdKS0pMVV5re4GGhY+Xl5iYmZWVk5GQkpGSkZKOi4iFhYqSkZCYnaKi
-pKSkpaGeoaKdlot5ZFFERUFBOz06PDw+PUBAP0FBQT9APEBCREZFQ0RHSEpJRkZK
-TE1NUFZaVFRXWVlXVExMT1FWTkpLTlFQUU9NUVRVWFhaXF1iZGdjaGxvcGpucG1x
-bnV3ent9fHt4dHR1gYKDfoJ9gX6Eg4B/f3uDjI6MjZGRjIqNjIqIhYWGgoKDiYiG
-homKh4mHh4SGiomHhoSDhntmXVxbWllbWlpXXFpZWFdaVldYWVlYVlpXVlZWU1NT
-UlVWV1lYVldXWlpYXldRVldXWVpWWFlXWFhlW1lXVlhXVlZYWFpaWFVUV1dWWFhY
-XVtXWlpYWFZXWVdUUVhZV1VXWFdbXVpcW1tXVFRTUFNSUE9KSUxRUlddX19bW1td
-Y2VdWldRTkpHRUU/PTs8PTo7P0BHQUBARERHSkhHSUpFRkVERkdHSU5NT0xJR0hI
-SU5NS05MT09RT05LS0tNUVJUVlZWWFtcXF1dWlxbXl1cX2BeXl9hYWNmaGdrbWln
-Z2hoZWdpamtvc3V2dX1/hIaIiIWEgYGBgYCCgIOAgH1/fHp7fn+CgYGChIaKiYmF
-iYmOk5GRlJSUmJyXlZaYm5ubmZWWlZaVlJSUkpKUkJKPkpCOjIiGgYJ+c3RycnJ3
-gIWIjo+OkI2KiouGiYqHiImNjIuJh4SHjZCSmZmbmp6dmJaSlZaYmZ+gn52dmpOK
-g4SEfHZ2cm1lYVpZV1dUUlNSU1NTU1JUVFJUVFNRU1JUU1JUUlRSUE9RUVFRUFVW
-UFBRUFFTU1JSUFJSUFFUU1ZWVVJRUVFTU1RUVFZVV1ZUVFRUUlFTVVZXVlVUVFVT
-U1NSUlVVVVNSUlRVUlJVV1hXV1ZUVlVUWFdVWFdYWFZXWFlWVVdXVFRVV1lbV1ZX
-VlVXW1xbWVVZWllaW11bV1hcWlxZWVlZXFZVWFtbV1ZWVldYVVVVVVhXWlpcWlpc
-WldZWllXWVpaW11fYVxbXl1dX1xbXF1bWVxkalxdXV9iXl1dXmNjYWJiXl1fYGFh
-YWBjYFhVWlxdYFtdXl1dWllZWVZZWltgXWCHusrU29/j5Ofo6erqgYGChYKBg4KA
-gX9+fn+Cf3yAgYJ/fHt8f4SDgIB+foSFgHx+f4B/gH6Bf319g4GEgYGCf39+e31+
-goKChYWEg4KEhYKBf4CAgXuAhIWDhYSEhISDg4SEhYiFgYKFgoOEhYN/f4CBh4SI
-iIuIhIKIh4iIiIiHhomIiIWFhYWIh4WChoGEgYSDgn59fH57eXd5eXl3eHZ3dXN1
-d3t3dnh6en58foCBgYOBgoGBgYCBgIKDgoKEgYOEhYB/gYODgYF+foGBgYGBfn1+
-foCAf316enl2cnFwc3Jwbm5vbWtqa29sZ2RmZmNeXFZSUEtKR0JER0hHRkhKSktJ
-SklIR0JFREFBQ0JDRUdKS0xLSkdNSkZKSkhFR0lIR0dHSEdIQkVEREVFQEZDQkJH
-RkdFQkBBQD4/Pz47Pzw9QT9BRT9AQUNEQ0RER0NCRUhLS01RWmp3gIaIjJCUl5CW
-lpaVmZmVkZCSkY6PjIeGh4qLj5OZnqCgoJmYpKOkpqScl4x5Z1NHRD8/Pjw4OTs9
-PT8/Oz1AREBBP0JFRkNBQ0ZIREVEQUlLSUhKTlNUUFFSVFJQTkxLT1BPTUtMTktN
-TVBMTlJXWVdYWmRqZGFma25tbGpvb29ydnd3dnh7enl3ent9fnt6fHp4fXp5enl+
-goSFhoqQkpGNiYyNjY2KhoiIiIeEgoWEiIaAgISJh4WGiYmKh4eIeGRbWFlZWldb
-XFlYV1dWVVVYV1paWVdWU1NWV1ZWVlpXVlZWU1VcWltbWFdTVlVTWFpXV1ZWV1RV
-VlRVWFdWV1VSUlpYWFpaWlhWWVlVV1lZWVNYWllaWFZTWFlaV1lbXFhaWVhWWVlY
-WFRVUFVUTlBQTklJSU9VWFpfXV9eYmVpZ2JdWFVPS0hDS0A/Qjw9Oj1AQ0xAPkFD
-RkRFR0dHSUhHREZITEpGS0lJSUhLSUxMS0tLS0lNTU5MUVJNSkxQUlNYVVdWWFtY
-WFhXWVlZWVtaXF5dX2FkZWZkaGdmZWhpZmdmZ2lqbWtxcXJwdXp9foSEhIF/goOD
-gn9+gIGBgn16eXh5eX59fX6AgIOHiIaHi5ORjo6SkpOUlpmampeXl5iZm5mYl5WU
-lZKVmJmVlJORkY2Li4WAf3l4dHZ0cXV+hYyOjZCRjIiHhISIiIqMi4uPi4iJjo+T
-kpSVl5ufmpqYk5aYmJebmJugpJ6bkIaEhIB/e3h1cmljW1pVUlVWVVNTVFRSU1NS
-VVdWVVRUVFRTVFNTVFBQUlJUU1NRVVNRUFBOUVJQT1FTU1NRVFNSUlFRVVdTT1NT
-U1RTUlVWVlJSVFNUT1FQU1NVVVVTU1FYV1dTUlVST1JVVldXWldUWFVVVVRTVVNR
-U1RUVFVVVlhVU1JVV1hWVldWWlxYVVVWWVlZV1ZbWFlWWFhYW1lZWFhZXF1YWFdX
-VlRUVVpaVVdXWFhXV1hWVldUVFZXV1lZVFdbWVpcWllYWlpXWlteX15cWltdX15d
-XWtpWlpaXF5dXFtcXl9dXGBjYmBeX2FhYmNhX2FeYGBdXVldYF1cXVpbWllYWFtc
-X4u3xtPa4OLk5+nq6+uAhYSFg4F/gH5/fYB/f4B/fn2BhH9+fX9+fH5+gYGBf4OC
-g4N+gH9/gH6Bg4GJhoGBgYGBhH+Ag4OCgYKCgIKFhIOFg4ODgoOFeHp+gIOEg4SH
-iYaGhYGFg4GCgIKCgYWHiISEhIaFgYKGhoeGh4eIiImLhoaIhYiGhIaIhoiIiISE
-hIOChIKCf316e3x6enZ3dnZzc3Z4d3V3enh4eXZ4fH6AgH6Ag4OBg4OAgoCBf4CC
-goSDg4SFiISCf4OFg4F/f4KDg4F+e3t9fn9+gH15eHdzcnFwbWxra2tsbWttamlo
-ZGReW1hTTk9ISktHR0hJRkdFSUdHRUpGRURGSEdEQkJCREVEQ0ZNTEhGRk1JSkdK
-SktFR0lNR0ZGR0dFRkRDQURDQURDQ0NCQUREQ0A+QEA9PDw9PUBDQERFRkJCQkJB
-QkVGRERISEdKTE5WYW96goWJjZCSkZSTkpSWlJOTk5KPjYyLiIiIi4uQlZaYnJ2d
-np2npaSlop6cl41+aldMRUJBPDpAOztAQD49PDw/QEA+Pj49QEBDSElFRENFRElL
-TkxKTE5NUFBOUlFQT01LSk1MSk1PUU5MTUtLUFhVV1lZYWJiYWFjZWtoa3Jybm9w
-c3R1eXl8fn1+goSBfXl7e3d2dHJ3enh8goF+g4qPkY+Pi4uLiYeIh4uJioF6fH2E
-goGGiIaJhoiJjIuHh4R5Z1xaWVhXWlxdWFdYWFVXVlZVVlVVVlVVVVRUVVZWWFhU
-UlNWU1pZWVdWVVVZWFVWVldWV1VRUVNXV1dWVVZVVFVYU1RVU1ZUWVlWWVhXW1pY
-WVdWVVhWWllVWVdXWVtbXl1ZWltZWVZUU09QUU9QTk5LSk9QUFVXXmJjZGJmaWZm
-Zl1ZU09HREM+OT08PDw7O0FBPT5CQEFGREVKRkZHRklKRkdKSEpJSUlMSkZFRUdH
-Tk1JTFBPTk1MTE5NTkxNUFFQVFVWVVVVWFZWVlZYW1daW11dXWFjZWZnZmhkZGJj
-Y2JgYWRoam9ycnV2d3p7fYB+f4ODg4SAfn+AhIKBf3+Ae3l3d3Z4eHh7f4WGg4WI
-iY2MjZGSlJOVm5iYmZiXl5eWlZWXlpiVlZWVlZWUlJCOj4uHg4R8dHNwbW92en6E
-jo6NjY6OiIWCg4WFiYiGiImGh4eJjZKSlZaXlpucnJyZnZaWlpianJyfm5eQioV/
-foB+enZ1b2lhXlpXVlVTUlVTUVNTU1RWV1VXVlFTU1JSU1RVV1JTUVNUVlNSUFNS
-UlJRT05QUlVWVVNQUVJSU09TVVlWU1NUV1NVU1JTVVVVVFNVU1JQUFJTUVRXV1RW
-WFRUU1NWUlRUVldZWVZaVldXVVVRUlNUVFZWWFhXWVZVU1VYVVZVVlRXWVhVVFRZ
-WVlYWFdXWVxcWlVXWVlYWFdXWlhWV1dXVVhUU1lgWFhXV1hZWllXVFdVV1hWWVpZ
-WlhWWlxbWFpaXFxaWlxeXV1ZW1taXFtcW1laX11eWVlZXFxdXlpdYWFhYmJjYWNq
-Y19fZGRjYF1cWltbW15eXVpcXF1dW15ljbzI1Nrf4+Xn6Onq64OEg4CBgX5+fn18
-f4CAgH9+eXd9goF8foCAfoB/gX57gIKFgYCBg35/gYB/gX1+fH+Bg4SBgoKCgYKD
-hISBgYOFhYSFhIKDgYGEhoSDgoGHg4SFh4WIiYOGg4SDhIOBgoWIjYqIiYOIg4OE
-hYaIioiGhIWHh4WDh4aHiIaHhYOAgIGChYOCg4GBf4B/eXl7enZ4dXV1dnV2eHh6
-eXdzdnh7fX9+fXx/gIGAgYOGg4GAgYKEhoaHiIaGhIGCg4KAgICBgIJ/f399fXt8
-enl4eXl3d3V0cG1rbWxsamdoaGdnZWViXlxYUkxJSkhGSUhIRkdHRUlJSUxJRklG
-SUZERENBQUVHS0dFSklKSUhISEZISUlGPjxFR0pKSEhHR0VDREZEQEJBQkVEQEFD
-Rj8+QT9AREFAQDs+PkJDQkJHRENAQ0RBRUZGSEhHSkpLTU9aand/hYmLkZKVlZSU
-lpOTkZORkI+PjYuLh4iOjZKPkZKYmZ2fnqGgoqKloZyblo+Ccl9PRkVBQEI8OTdB
-P0FCOjw9PkFBP0NGRURDRkhGRUdISUpNUExNTlJLTU5PT01NS01JSUtPUVBPTkpQ
-UU9OU1ZZXlleX19kYF1jZmZqcnVxb3B3dHN2dHh5fn+AgoN9dXqBgXp4c3N7fXyC
-fXV5f4WNjImKjYyHgYSChYWLin6Afn+Ign+DiImHi4qKh4OBhHplXVpaW1lbWlhX
-VlVXVVVZVlZRVlVVVFVWWVdXWlpYV1VRU1ZVWFhXVldWVFVXVFRUVlZWVlZWVFJV
-WFdWWVhXVlhWVFZaWVtWWFhYXFdZVlhZVlZYV1peV1hXWlpaWllaW1paWlhVVlRS
-UU1OTktLTkxLS09PUVlgZGhna2xsaWhlXFZRSUZGREE8OTs6Pz8+PEE/Pz9BQkRE
-RUVFREdHSUpIR0hKS0xMUU5MS0xKS05ISk5LS05OS0pOS0lJTVFPUFFTUlRUVlRS
-VVRTVVlYWFhXWltfXV9lZmRjZmRjYF9fX19eYGdsbnFyc3N2eHl8ent8foCBgoKC
-gYKDgYKDhIF/gX55dXZ6eXp5fH2Bg4WHi4qNjpSVlpWXlpSWl5eXl5eWlZWUlJaV
-lJKTkY+Rj4yMjY2IhYF3bmpobHJ4foWJj4uNjYqJhYKEhIOFhImJh4WHhIiMkZeT
-kp2gnpyXmpmWlZeXmpycmpqbl4+Lh4OAfXp6e3l0bGViYFtUVVJSU1RTUlNUVVFP
-UVBQT09RU1JTU1RSUlRSVFVTU1NTU1JQU1BOUU9PT1JTU1NRU1FSVFBSUlRVU1NR
-VVZVUVNTU1NQU1dUUlJTU1FUU1VVU1JUVFBPTlJYV1RSWFdUU1VVVVRUVVRTUlRU
-VVlWVFRUV1VWU1RWU1RUV1ZWVldWVVVYVllYVlhYWltYWVpYVVZZWltZWFlYWFhW
-VlZTU1hcWFpbWVdXWFlbW1tcWVlYW1taV1hZWlpZWltbW1taXVtaXF9dWVtdXl1c
-XV1eXV1cXltYWFpfYWFjYF9hZGRkYWNhZGBeYGBeXl5bWVhbXVxaW1xbW1ldXmKT
-vsvU3N7j5ejp6urrgYKAg4KCfXx6gH5+foGAgIGBfn5/gYKBf4OBgoSCfn99f4CA
-g4GCgIGEhIJ+f39+f36BhIaIhYKDgX6BgYCCgYKDf4CBgoCChIeGhYaDhoWGg4GD
-hYaGhoaIh4eEhIaGh4iIhoeIiISDgYSGiIiGhYSGhoiHh4iGhoOGiYeDhYeFhoeE
-goODgYB/f3t4enl6enh1c3FzdnZ1eXZ0dnN2eHl6gX1+gH5+fn+AgICBgICCg4SG
-g4mLh4N/gIKAg4SCgYKCgoCAf397eXl7fHl6f3p1dnZyb21sa2tqaGdmZmVhXVtZ
-VlROTEhFSExISEZGRkdKR0hHSEhKSEhIRENBRURFR0lJSkpISUlFR01IRUlGSkBE
-RUdHSEdJRU9HR0VGSERDQ0hDQ0JCPkJAQT8/PT4+PT1APz0/QUNCREJEQUFAQURE
-RUZHRUZGSEZJT1ZhbnqEg4eMlJiXlJSSk5WTko6QkI6NjYuOkZGRjo2OkZWZnJue
-oJ+foKWkpaOinJOHeGdVRUNCPj08Nzg5PDw/Pzo+P0JBQkVERUVHSkxGSElGSUlN
-TUtISkpMTExPUU9MTEpJTE1RTkxKSkxQUFFPUFhaXFhZX2BjYmRlZW5wcHJ0cnJ0
-cnFydHZ4fn+Cgn52eoOFgYF+gYOAfoGAfn5/f4eJgoWJjouIhICBfoeEgoWHh4Z/
-fX+ChYSEhYWMioWBe2RcW1hZXFpZV1dXVldYVlhZVlVVVFdWVVZYWFlcWFVXWVta
-WVZXV1ZXVlhbV1dZWFVTV1dWVFNWVFNVVVVSV1lXWFpYVVdYV1ZWVFZWV1VSUlhZ
-WVhXV1lXV1paWVhXWVpdXFtbWlhWVVZTUU5NUFFLSktPUE9SWVpgZWRlaWttamJb
-UEpGRD49P0A+PT49PT8/Pz08P0BBQUNEREVCRUZHRkdFRUlLSkhOT1RSTUxJS0xN
-TE9PTlBTUE5PT0xNT1BOUFRUUlFTVlFUVlNUVldXWFdaXmBgYGVjaGhmZGRkYmRl
-YWFlZmlsbXBwc3Z5d3p2eXx8f3p+gYGAfYOBgYCAf358e3p6e3p7f3x8e39/gYaH
-i46PkJealpeVlpSYmpeWlJSSk5ORk5SSlJKMjY6UkpGQj4+HgHpuamlrbXV7gYuJ
-iIyMh4SBgoOBgYaGhYeIiYWHiI+RlZeHk5ubnZybmZaUl5qZmp6enZmWkYmGhYJ6
-dHBxdnhvZ2NjYmBWVFNVV1VSU1RUUVBOT1FSUFFRUVJUUVJSVFJRU1NVT1BRTlFT
-UlJPT05PUU9SUlNQUVRTU1BQUVBUVFVTVFNSU1JSVVJTU1ZXVlNTUlJSUVJQUVNT
-VVJVVFJXWFhVVFRWVFFVV1tXVVFSVFNWV1ZVU1VXWlVTVFRSUlJUVlRUVlVYWVZV
-VFVYW1hYWVZXVVhWV1hWV1hXWVZYW1pZWVdXV1laWVtbWFpZV1dYWFlaXFtZV1Za
-XVxaWFhbXF1aWlhYWllZWVhaW11fYWBgX19dXF1eYGFfXlxfX2JhX2BhYGFiX19g
-X2BkYl5eXl1bYFlaWFlbW1xcXFldZI+/ytTc4OPl5+np6+uBgYaDgH5+gIF/goF/
-g4KBgYSBf399f36Ag4GBg36Afn19f4KCg4KCgYF/gIJ+f4CBgYaGg4OEhISAg4F/
-gYOBgIKEgoOEg4SGhoaGhoWFg4SFg4CHhYKGhIaJh4SEg4aDhIeFiYeGhISGg4SE
-g4CAhYaJioiLioaDhISBg4SFh4SFhIWEgIB/gH97f3x8eHl5eHV2d3VxbnF3d3V2
-dXd6enx7e36BgH+AfYCDgIJ9foKBhIuJg42Ph4N+fX1/fX+AgIKAgHx8enl4d3t4
-e3l4cnF0dG9vbmtqZ2RkY2FfXVhYVVFOS0hKS0tJSEZHSUhHREZMSEhJSEtJSEZJ
-SUdFRUdHRkdJRkVHSkpJR0dLS0pGRkJFSUdFR0lJTElHSUZERUZDRUJDQUJGQj9B
-Pz8+QEJGQz4/PkJCRUJBQ0E9PT5DREZGQ0E9REdGRElIT1ZldHyGh4uPkZOTkZKR
-k5KSkJCQjo2OkZKTkJCQj46Qj5OXmJ2hoqCkpaeqqqWhnpqLfGpXS0RBSz87Ozk5
-Nzs7QD0+PjpAQEFFRENIS0ZGRUpISkxNT01LTE9PTlFPTk5LSExSS0lLSEtMTU9P
-TlJLU1lXWVpbWV1kZ2lqaWxwcnN1bXJ1c3R2d3l4foOBf3x6foF6gYGAhIB/fX1+
-gYWGhYN9gIaHiIqIg4B/h4F8fn2DgXt+foGChYWAfoGAhIB3Y1lWWFlbWFZXV1da
-V1pZXVlVV1lZWFpUVVZXWVlaWVZVWFlYV1dXWFdWV1dYVldXVVRYV1dXU1BTVFRY
-VVNSV1ZYWFhcWFtaWFZVVFVZVlZYU1hZV1ZUVllYU1NUW1hXXFtdW1hbXFlXVlRR
-T01MU09LTk1RVFRaW15jZmRob2xnXlZNR0NCQT4+Q0ZEREJBPEFAPz48P0JCQkVH
-REVIRUdFQkVITE9STk5OS05OTk5OUFBRVVJQU1FQU09SUVBPUFFNT1FSUlVWVk9R
-VFVXWVtZWFpcXl5fZWdmamxpZ2doZGFkZWJkZ2hoaWtxd3t7dXN0eXd3dnh5e315
-e4SGhIF/fXp6eX17enx8enx9fX1/gIWIjo+PkZOVlpaXlZSYmJeTkpOUlZOSkI2O
-jpGPjpiXk5CPjIqEgHdua2tucnd/hYaGiYiIhYKBfX6BgoGBf4aJjImJjo+TmJmc
-nJybnZuYlpOTmJuZmpqYlpGPjIaIgnhzbGhtcnZ0Z11hYV5bVVVUUVJRUlNSTlFQ
-U1RSUk9OT1NTU1JQUlRSU1JSUlJSVFJQVFdQTk1OUVRSU1NSU1VSUU9RUlJTUlFP
-UVJPUlJTU09NVVZUVlZUUlFUVFBRUVJUUlJSUlJVVVRTVVVXVVVWVVdXV1dUVVZU
-VVZWVVZXV1RWV1VVVlJRUlJWWVhVVlRXV1pYWVpaV1ZYW1lWVVlXWFhWVVZYXFtd
-XFpaWVpcW1laWVhaWFRWV1lZWlpbW1lbWldZV1ZWWllaWlhaW1dZWV1cW11cXl5c
-XF9eX2FhY2ZjX2FgXmNiYWJhYWFgYV9fXV5dXV5dXFpeWFhXWFpYWllZWl1gi77G
-1Nvg4+bm6Onq636CgYGCg4aGg4KBgIOAhIWDg4CBf31/gICAgIGAgIKAgX5+goSD
-goB/f36AgIF9f4GChIWChoSCg4GBgHyChYOFhoeJiIaBgoWEhoeEgoSFg4WDg4SF
-hYiGgoKFh4SDg4SGh4iIiYiFh4WDgYOBgoODhIaDhYWGh4WHhISEhYaHh4SDg4OC
-fnt9e3l5eXd3c3N1dXd0c3FxdHV2dnR3ent6d3p9gH99fn+AgYSFfn5+fn+EiIx2
-h4uBf4B/fnx+fnx9fXx8fHt5eX14dXZ0cXFwb25tbWppZmRkZmFcWFpYVlJNS0tJ
-SElMTEtLRkdFSEpJRURHSUtMTExLSElJR0VDQkRDSEhGS0tKSkhMTEhHS0tKSEhG
-R0lKS09MR0hFREVDQ0JCQ0E8Oz1BQEFCQEFDQ0JAPkFEQ0ZEQ0E/QUNAPD1DRUVG
-RERCQ0RGSEpKUFtrdIKEiYyMj5SVlJGRkpSUkY6RkZOSk5GUkI+PkY6OjpOXmZ2f
-pKWnqaekpKOenpmSg3JeTUVJPTs3ODY2ODc6Pj07Pjs8QEJEQkZERERFR0dHSEhM
-UE1LTU1MTUpNT0tHSUpLSEtPTU5PT01OUVBNT1NXVllaWl5jaGxrZ2lqa25uc3Z0
-c3N2fXl7gX+CgHx5e3Z4e3h2d3p8eXuDhoqJgnyAh4aCg4WCgICGgnh7fIOGiYaD
-hIOChoKBgoOBfW5eWVpbWlpaWFVXWFtZWVpVVVdZV1ZVVFZRUlNQVldVWFpaV1VV
-VlZWVlhWVlRXV1lYVldWWVpbWFRYV1daVldYVVhXV1VVVFVUVVlaWVhYWlZWV1Za
-WVhYWVxbVlhWW1xdW1pYWV5eXlpZVFFPTk5OS0tOT1BPU1ZZXWRnaW1ua2VeUk1I
-RUJARENDQkE/P0RAPj88PD07PkJBQ0RHREVFRkREQ0JISUhNU1BRT09OUVNVUlRS
-VVhUVFJRUlVUUlRTUlJUU1RUT1JUUlJPUVRWWVpbXl9gX19jaGxrbmtoZ2RkY2Ji
-YGJiZ2tubm9zdXNzcnNvcHRyc3J1eXh5foCBf4GAgYF7ent6e3l7fX6Af4GBhYiL
-i4+Qk5aWlJOWmZmWlJSVlJSUkZOTj5GSkpWUkpORkZKPi4qAfHJtbGpyd3yBhoiJ
-iIWBgIB+fXx9fX5+goOEi46Oj5WanZ2dm5yZmpqbmpyYmJeVlpORkI6NjIaGgHh1
-bmtucndwZVxcX15XUlFUVVNPT1JWVFJTUlRUVFRUVFRUU1NRUFJSUVRWU1NTUlFQ
-UU9OUU9PTlBSUlFSUlNUUVRQU1JSUE5PUlNRVFFWVFJOUVRXV1RVVFNTUlBRVFlX
-UFJTVFVVWlpXVVRWXFhVU1dYV1ZXVlRZWVlYV1pZWVZZW1ZZWFZUVVRUVVVWWVdW
-WFhYWFtaWFVVU1RYVlNWVlZXWFtaWVpZWFpYV1paWlpaWVpaWllaWFtaWFldWFhZ
-W11eWVZaYGFjXVtbXV5fX2JfXVtcXV5dYl9cW11iZGRgYVtfYWJgYmRiYV5gX11b
-XVthYF1ZVlpZVlhZWVdYWFxcXmKLu8vV3OHi5efo6evrgX+Dg4WFgIGDgoGEhICB
-g4SAgIF/gYF/fn+AgYOAfn5/f35+gIF+gIF9fX5+gIGBgoWEgoWCgYB+gH+AgIKF
-hYSCg4WGg4B/hISFh4iJhYWCgoGEg4KEhIaEhYOEhIKDg4WJioaFh4aIh4eIhoSD
-goWEhIaEhYOBhYeGg4KEhoWHg4KEf399fXx7e3d3dnd0dHR1c3J0dnRycHR1eXh5
-enl6end6fX1+fn19f4GBfoJ+fYGBfWp+gnh+f4B/gn5+fXx8eHl6enp1dnhzc29r
-amxtamdmZF9eXVxcXFtWVFBPTEhESEpISUlNUU9MS0lMSklMR0hIS0xMS0xMSkpJ
-RERIRUhGRkhHTExNSEdIS0lJR0ZJSEVGR0hHR0dGRkNJQkZDRUNAQEJCQEBBQElF
-QD4/QD9APERBREJEQkNBQ0RDQkFDQUFCREVERklISEdJWGZ0f4WJjZCPkpSUlpSS
-lJWSkJKUlZSSkpCQko+RlI6OkJWYmpyhn6KioaOhpKOioJmWi3lmUkU+Ojo4NzY3
-Pz4+PEA+PDo8PEJAREVISkRGSEdIRkpLTUtKSE1OTk5NTElIRURHSElKTk9MT09S
-UlFQUlJVWVtZW2BiZGVgZWhsbnZyc3Zyb3F4eHh+foF+dnV5d3Z3dXV5f318fIOI
-i4qGhYWEgHuBgoKDgIWJhYSBhYeMh4SEgoSDgX1/gn12bGBbWFlaV1pZWFlZWFdW
-WVdaXFlXVVNWVlFSU1RWV1ZWWFhXU1VXVlZTVVhUVFNUVllWVVlWWFlXV1lXUFRa
-VlNYWFhVV1dVV1RXVlVWVFdXV1dWV1hZWFlaWFhaV1dYWVxaWldWV1pbV1RRUE5M
-SUZHSU5OTVRUWFlcY2hrbWtpZltQSkZDP0FBQD4/RENAQkNBPz07QD5DRUJDRENF
-RUVBREZESUNGSExPT09QT0tPU1RSUlVXWltZV1hYWFdZXFtXWFZXU1ZWVldVVFRV
-V1dYW15eYF5gX2JnaG1paWdlamZiX2FlZ2lqa29wbm1xcnN0cnJsbW1rbW5xcnN1
-eXt8e3+Af317eXp9fnx9fn+Dg4WJi4mNjpGTkpKRkJGVlpeWlpOUkpaVkZOSk5SU
-lJKRkpKUkpCJhYR9c2tpbHB0eoGFgoOEhIF7eXx7eXp7fX+ChoeIjo2OlZ2dnpiY
-lpibmpuZmJiVl5OTkpGMioqKg4OBe3dzb2tpcHdzamJeYWFbU1BSUlJTUlJQT1BQ
-UldTUlFSU1NUUk9PTk5PUU9PUFFTUE9RUFFTUlNPTk9UVlFTVlRPTVNTUVBQTk5O
-UlVVVVVVU1JTUlRUVFVVVVVVV1RWVVRWVlZTWFhWV1ZYVlVVVVNXVldWVVdSV1la
-WVdYV1ZWUlNUVldVV1ZXWlRUVVVUV1xYWF1YWlpYWllXWFhUWFZWWVtcWlZXWFhY
-WFRTVFZXWVtYWlxaWFlaXV1ZWVtaWFpdXF1bW1pZXmJfXVlZW1pdX15eYV9fXmFf
-Y2BdXWBiY2BdX2ZiYWJiY2FkYmBeX15bXV1fYF9bWVpZWltcV1dTV1tbY4y6ydTb
-4OPl5+no6+mGg4CAgYCBf4KFhYOFgYB/gYB/fn6Bf3t7fn1+fH6Cfn9/fXx9gH6B
-foN/foB/gX+Cg4GCf35+fH6BhoaCgoKDgn+FhYWFhIGEhISFhoaGhYaHh4eIhYOD
-h4eBgIOFhIODhIeFiIaFhoiIh4iJiIeHhYaJiIaGhoSIhoaFhoWHhYWGhIeFgX99
-fXl6eXh4d3Z1c3R1dXV1dnBvc3Jxc3Z2c3d7e3p8fYB7eXl3fYJ/f4B+fn9+bXp/
-fHp7fn5+fXt8e3l2dHR2dXJvcG9saGZlamdjX15ZWFZVU1RUUk9MTE1KRUhISUpN
-TU5NTE1MTUpJTExLSkpNTk1NSklNSEZDRUdIS0VFSEpLS0tJS0pJSkZGRkZJRUZH
-RUdKSUZHSElHREREQ0FBQ0I+Pj5BQUFARUVBQEJBQz4/PUFEREVFRUVDQkNBPD5D
-RkZFSEZDQ0dOXGx6hYqMjpGQj5CSk5eTk5SNjJOUkpORk5STkZOUlZWRkpSVmJyd
-n5+eoaSjo6OjpJ+Zjn9sVklBOjg1Njk7PDo+P0NFRD48PT9AQ0ZCQkVGQkZKS0tM
-TEtISExMTExJSUhESklKTU1MTkxNTExTVVNYVFNZW1laX2BgYmFlZWh0eXBxc3Nw
-a3V3dHd6fHt2bnB7fHl0dnh3dnZ5en+GiYqFg4V/foOHjYSDhYeMjoqGhYyGg4SJ
-gX99fn6AfXprXllcXFVWWVpYVlZWVlhWVFZaWlVXWVZVW1VVUlVYWlZWV1dUVlha
-WVlWUlNUVVdXVlRVWFhaWVhXVlpYWVpZWVVWWVdYW1pXVlZVWFdWVlhaV1ZWVVZW
-WlhYXFdXV1RZW1paV1RWV1RPS0hISEZHR0xLUVBWWFlZW1tjaW1raGdjX1JJQ0JD
-QkE/QkNBPT0/P0E/QEBCP0ZDRkVHRUdFREZFRUlFSUhLSktMT1FQUVBRUlVVWFZZ
-W1taXF1cXFpcXVxcW1tcVllXV1laXlxcXV9eX2FiZGRkZGZoa25pZ2ZkYGFjZmZn
-a2xtbXJwb3BwdHd0cHBub21tbW1pb3J2eHx/f4B/fn58fH6AgoCCgoOGh4qOjYyR
-kZKTkJKTkpOQkJSVk46RlJiVlpiXlZaRkpGQlJaTj46MiHx0amttb3V7gYOCgIGA
-f3x6eXh4eXp7foGChYmLjY2UmZmcn5aWmZiZmZSSlJaUkpKPjY2LhoSEgYB+eHJu
-aWRncHNybGZiX11aUVFTVFVVU1JQU1RRUVNPT09TUlFNTE5PTU5PUUpNUVBVVlRT
-U1FTVE9QUlRUV1JTVVRPUFJQUFNSUVBSUlJSU1FTVFJTVlRWU1ZZVlNXVFRTVFdW
-VlVXVVNSU1RXVFJSVVdZWVZXVldWVlZXV1pZVlRWVFZUVFZaWVpZV1dWVVZYWVlX
-X1pWWFdXWVhYW1hWWFVYV1dXWVdbWFZUVFJQVFNVV1laWVpZVlhXWFpeW11eWlxc
-WFlbXVlaW1tdXVxZXF5cW1pcX19dXV1hZF1bXmBhZmJfYF9gYl9eX2BiYGNgX19g
-Xl9jX1tbWlpaW1lbXF5XWFpji7nJ09nf4uXn6Onq6oSFg4SBgYGBgYKEgYWEf3+A
-f3+AgoF+fX98fICAgoF/f4F+fn+Af4CAf4GDgYODgIGAgH9+gX98f4GEgYKBgYGC
-goGGh4iIhYODgoWFhYSFh4iHhISFhoWFh4aGhYWAg4WJh4aHhYSEhISFhoaIjIqJ
-ioiHioWFhIaGh4aKiIeCgISDh4SCf3p8fXt7eHp6d3Fzc3Vzd3Z1dXRubG5ydHJx
-cXV1dXh5eXd4eXh2eH1/fHp9en53d3l4d3Z3eXN2dnFxcXBubm1tbG5pZ2ViXVtd
-XFlXVlNRUlFQT05PTklHR0VITEtKS01NT05NTU5NTEtLSUlKSklKSktKSktKRkVD
-SEhJSEhNTUtISkpKS0tKTEpJSUpNSEZJSUpIREdGREVGSUdISkZFQDs7QUNFQT5C
-R0JBP0E9PT1DQEZJRERDQkVHREVFRUVFRUVERkVHSk1ZZnaBh4uOkY+SkZKVlJaV
-j5GQkpSVkZCTk5KXkpOWlJWTkpOXm5qcn6CgoaCjo6Kgn5uXkIRyX05EPzo8Pjo4
-Ozs7QDw9PEA6PD0/QUNERkZFR0lLTEtLSEZGSEZIS0lLRkpISEpISkhLTU5PTlJR
-U1RVUVJZXVtdZGVhY2xjZ3Jzbm5vcHNtbW1vcHF2dnV1dnh6e3h7f3p8fXp3eH6C
-g4aFfoGCipCLhYiIi4yQi4WFhoeHh4iCgIF7fXl7d2pcWFhVVVZYWVhXVFdWWVxa
-WVZVVlZaWFZWVlRTVFRUWFdXVFlVVlZVU1NaVFNVWFhXV1VWVFVTVFlXV1dUVVdY
-V1lZWVhYWFdXVlhPVlVVVldVWVdXVVVWWFlbWVlXXFlaWFlXWFhVVFRTT0pER0dI
-SUxOUFNWVVlbX2NmbWtpZmNeVU5GRUREQ0NCQj89PT89QEFCQUFCREVDREdKRURK
-SkhIQ0lISUlJSE1NUFBSUlFSV1hYWFZYWVlaXmBcXVxdWl1dXFtaXF5dYmBhYGJh
-YmRiZWNkZ2ZkaGtpampjX19cXWNjZWhqamtwcHFzcnF0dXZzdHFzc3Bxb2ltc3V7
-fYB8fHx+fXx7f4GBgIOEhYqNjI6SjpGTk5SXlpmTkpWTkZGTkZKSl5qXlJeXko6S
-kpKTlZOQjoyCe3Nuamxzdnp+gYOGgoF+f3t2cnV4dnh8foOGiImOkpadmJaXmJqd
-mpqYlJORkZaXmJKSjouKhYF+e3t4c2tlXl9nb2tpZWJkYV1UUFJTU1VUVldSUVNR
-UFNQUlFSU1FRVFJLTU9SVVJTVVJSVFdVVldWVVRWUlVUUlNbVlRRTVJWVVJQUFJW
-VFRWVVRUUlRVU1NTVFZVVVVVVFZXWVpWU1RWVFJVUlFUUlJUWFdWV1pYWFZTV1VW
-VVdVVFRUVldaWllaWVlZWFZWV1daWFlbW1paV1hXWlpaV1ZVVVVXWFhWVlZUUlNU
-VFdVVFdXWFpYWllWVl5ZXVlbW1lWV1pZWVhYWVtdXmBiYF5cXVxeX2JfXl9aXl5i
-YV9eX2BhY2FhYmFfXWFgX11eXF9cXl9eYF5eXVpYXFxcWVpcWFlXW2GMvMrT3N/j
-5ejp6erqfoKFhYCBgH+BgYGBgYWFgYB/f4B/f4ODfn2Af36Bg4SDf32BfX1/gYWA
-f39/gYB9fH5/f4CBgICAf3+ChoeEgXx/hIWHhYeFhYKGg4SFgoOFgoSCg4aFgYWH
-iIeFhIKAgIOIhIOGh4aGhoaIh4eIiImFh4iHh4iEhIaIiISEhYaFgYKBf3p7c3t4
-eHd2dnV0cXR0cnFvbXBvbGxubGxtbm1tbWxtcm9scHRycHFxcnV2c3J0dXV1cXN0
-cnNxb25sa2pnZ2hlY15gYF9hXVlZVVRSVFVSTk5LTktIS0tMTExLS01OUE1OTU9Q
-UFBPT0xKSUtMSUtLSUlPUU9NTkpIRkNDQkNHT09MSklQSkxISUpLSEtMSUdGR0ZH
-SUhFRkRHREhISEVGSENBPUNHR0RCP0BCQ0RDPz1BQUNDR0ZGREFBQEJGR0dIR0dF
-REdGSUtMTVNebnyFi42PkJSSk5KVlZmVj42NkJaVk5KRk5KRkJCQkZGRkpWamp2f
-oqShpaOkoaSloqCgmop4ZE5CPTo6PTw8Pjw6Ojo8PD87PDk7PkRHRkxIRkhISUlH
-R0ZHR0VHSkhJSEVGR05QSklLTU5RSk1PVFRTUVhZWlpcXl5iamJsamtramZoaG5s
-am1wcG9wc3Juc3Z3c3Z8fYF+fHZ2f4KAgYSDhoaCh4SFh4mMkI6IhYGDhImMg4CA
-gYF+e3x0YltZWVdWVVdYWVVVVldVV1hYWlVWVVVYV1dZV1VXVFJVUVJXV1VVVlVX
-VFJUVlZWWFZWVFNTUU9TVVlXVVNTVFRUVlhXW1hXWFZTVFhWVlZUVVVXVVZYWVZW
-WFdYWFpcWlZYVVdcWllXVFVOSEZIS0hKS05QUVZVVFlhY2ttbGNeWlhXUktGRUpJ
-RUBAQENEQ0E/QUZGSEdJREVDREZHRkVGQ0BGR0lHSElKS0xNUVBSUlNVV1pYWFha
-WlpaXl5gXl9gX2JhYmFeYGBfYGJlZGVkZGdpaWtta2poaWhuamlkZWNgYmRmaWlr
-bW5ycXV1cnFxdHJzdHNxc3NycnN1c3p8gH5+fn2Bf4KAhYuHg4WMjY+PjZGQkZCR
-kpWXlpWUk5iYlpOUlJOUl5iWlJKQkZCQk5mWk5GOjIV8dm9raW10e31+fYCCfnt6
-eXZzc3d5d32BfoSHiY6Sk5WXmJebn6Cgm5iVkpWcm5qalpKQiIOAf316enl2cWpl
-X15la2VjY2FgXltSUE9RU1RUVVFQUlNRUlBQU1VUVFJRUU1MUU9RU1VVVVVUVVRT
-UlFQVFJSUFdTVVRSUlNRT09SUlRQUVRXWVJUVFNTUlZVVlRTU1dWVVZUVVhWVVRT
-UVVUU1RVVFVWVVRbUVNXVVZWVFRVVVVVVFNVUlFSUlVXVlZWVlhYWlVVVlhYWVpX
-WFpbWllYWFdbVVRZVldVVVdVV1lcV1VYWVhaWllYVlhYWVhaXFpZW1tYW1ZVVlhb
-XFdYWVteYWJfX11cX15dXl5dXVxfXl1cXF5bW19hYWFfXVxdX11eX15fXl5eXV1d
-XWBeW1laWVpbWlpZWVtjYIe8zNTc4OPm6Onp6+qBgYCEhYSDgoSHgoKCg4WFgoGB
-gIGBfn5+fYB+gH9/gH9+fX18gHx/g4WDgoB/fn1+fn5/fX6Bf4OAgoSDg4KEhYaG
-hYSGhIWEhIOChIKAg4OCg36AhIaDg4GCgIGAgoGDg4KEhIiFhYSCgYOEh4WFhYaH
-hIWIiIaCgoWFg4OHhoaHgoCEg318f3x6d3d0c3Nybm9qaWhqaGlpaGZnZWVnaWVn
-aWdlZmNna3Bua2ZoZWdra2hqbG1ua2xramdoZWFhYl9eW1paWVZTVlRSU01OT0tL
-SUpMTkxNTE5MTk5QTk1OT05PU1BPUE1RT01OTE5NTktNTk1NS05RUk9NSklEQT5C
-REVHTE9OTUtISktMTE1MTkxJSklGSEhJSUdFSEdJSEpKRkZCQkZGRkdGQ0FBPUBD
-RURCQD1AQ0dGRkNAQENDRUhJSEVHR0ZHR0lNTExNU1hnd4GKi46QjpORjpCRkJGN
-j4yQkJWXk5aUlJKQkYyMi42Qk5SWl5mcnZ6hpaalpaalqKSel45+aFdHQT09Pjw7
-PTw+Ozk4Ozo8PD09PkFEQUNFSkhISElHR0pLS0lOTFBKSktJSkhHTE1JTE1SUFBS
-UE9RU1ZYVlFZXF9lYWJnY2ZjYmRka3FqaWptaGtucXVzdnFtcHeAf3p5eHh7eHl+
-fYGJgX5/fYKFh4qQjpCMgn+EhoqDhIR7eH6CgHFlYFpXW1ZUUlNTU1NXV1RXWFZU
-WFlWVFRXV1ZXV1RXVVRVWVVUU1RVUVFSU1RWVFVTUlRUV1dVVFZVUVBXU1NUVlRW
-WVRXWVRUVVRYWF1ZVldVVFJZWFVYWFdWV1ZWU1VYV1dWWFpYWFpVT01MRkVERUhL
-TUxRVFZYW2JkaWttZ1tTVFZWUExHREE/QkFCREJAPkFCREVHSEVFRERGRkRERkNB
-RURCRkhJTE1KS0pOTk5TVVNTVFJVV1dbXFpcXVxhYmNkZGFmY2BeX2ZjYWFgZGRl
-ZmdvbW5saWZjZWlnZWVoZGNhZGZnaGdqam1xdHJwb3BzcHJxcnV0cHN1dnZzeIJ/
-gICAgH+EhIKEiIeGh4mKjo6MjI2OkI+RkpSVlZeYmJWWlZaUk5WUlpKTkZGQj5CT
-lJGRkY+KhX12bmxtcHV6fXx9gIF+end4eHV5eX5/gIKAgYSHiI2Qk5aam5yfoKGa
-l5aWnJianJeWkY6KiYeBe3h5d3Vva2dmXV5jYl9gYF5eYV5XU1JOUFRWVlRTUlFS
-UVFQU1NWVFVTV1FPTlJTUFFUVFFSV1VVVVJSVFNRUFJPUlFRUlFNT1RUUVVSUFRX
-VVVUVFNTUlNVV1ZUVFdYVVhVVFFQU1NST1RWVFRUU1NUVlZXWFZVVFZWV1lXVVlY
-WldXV1hXVlZWVVZWVVhWWVtZWFdYVlRVVFVXWVZWWVdZVVZWVVZVWFhVVVdbY1tX
-VldYWVhZWFZXWltZWVZaWVdXV1ZYWVhZVllZV1tdXFtaXFtaW11cXF5cXF1fXlte
-Xl5fXmBiY2JgX11eXmBgYF9eYF9mXV5eXltbWVpXV1hbWlpdWl1ehb7L1Nzg4ubn
-6enr64aEf4CCgoKAgYJ/goOCgoODgn2CgoB7foGBgYF/gIGAgH59fX5+fn1/goOC
-gYSBfn9+f39+g4KCf4GEhISEhIGBh4aEg4KCg4aEhISDg4WDgoGAgIOCg4KCgoKC
-g4SIioyCg4WGh4WHhYOEgoSDhoeBg4eJhIaJiYaEhIWFhIWFgYB9fHx+fHl6fHhz
-b29ucGxua2hnaGVkZGNgXmJfXmBgX15dXVtbYV9iYWFiY2FeYWFiYGBgZGRiYmJf
-WllYV1hWV1RTUlNTUU9PUE1LTk9PT0tLSk9NTk9NTVBNT09MTU5RT05NUlFNT01O
-SktKTE5PT01OTk9NTVBRUU5LTUVCQ0NFRkhKTUxKS01KS0pMT01YTExLSkhISUlK
-SEhKSkhHSUlHS0lERUpIS0lBQUBAQURDREJAQEFEQ0NCQUA+QEdIR0RBQ0VDRUhI
-SUhISUpLUF1xf4OLjpCSkpGSkZGSk5iXlJOUmJmXlZKQkI2PjZKQj4yRk5WWlpme
-n5+go6alpqqoqaWgmY+Dcl1NQ0FAPTw5Ozs8PDs7PTo8PUE9Pj1BQENEREJDRUlI
-S01IS0xNTE5QS0tISUpLTkxLSUtOUlFTUlBNU1dZWFleY2FfX19iZmVkZ2xva2xp
-Z2dmam5zeXZzbnB1dXZ7enZ1dnp4dXh/goKAhIGBgYiKi4yKjIiGgIKFhoaBe3t5
-fYCGdGJZWFdWWVNRU1RTVFNXVFZXVllWVlVWVFNWUldXV1ZWVllWVVRUVlRWVVVS
-V1dUVFdYVVRUV1hbVVVUWFdWU1NYVVRVU1FUVFZWVlhbW1pXVldVU1RWVVVXVlVV
-VVZaV1hXV1dYW1pZWVhUTkxGRkhJSUpNTk9SVllhZmhrbGtoXlNRVVZQTUZHREVE
-RkdGREFFQ0JBRUVERURJR0ZCQkRGQUdGQ0NFREhKS01LSkxOT1BSVVJUU1ZYW1lc
-XV5dYGBlZGRjYGBhYWJiZWdjZWRmaGdmZmVnZWlraWpoaWhoZ2hnaGpraWhmbG5t
-cXBzcnJ0c3J0dHJ1dXJycnN2d3d5fX99fYCBhIaHhYSHiIqMjI6PjY2OjIuMjY+R
-kJGTlJSSlZaUlJKTkZKUlZWVlJCTk5OVko+RkIqBe3NubG9wdXh6f4CCgIB7dXZ3
-dXZ1e3x/gYaHiYeIj5CUlpmbnJ2fnpyYlZSVm5eVlZCQj5CKiIF9eXV1dHBpYmJf
-X2BiXVxdY2JhX1hWVVNUVVZWUVNVU1JST09PUVJUU1NTVFJUU1RVU1BWVFNTVVNU
-UlNSUlBNT1BQUU5NUFBRUVRVVFRUUlRVVlRYWlVSVFRSVVZWVlhVVlJTVFFPUFNV
-VFZTVFVVVVRVVlZYWFdXVlRWWFhVVVdYWl5YV1hXVlVXVVRUU1RYWFZZWVVUWFdV
-VFZZWFVYWFddWllYV1ZXVlZXV1dZV1dZWVpbWlhXU1NUWlZWWFpaWVhZVVZYWl5e
-XFpZWVtdX1xdWVtbW11cXFxcXF1eXl9eW1xfX19hYl1iX2JiYmVoY2NgX2FbW1tc
-Wl1fXFVXVlleXVtaW12Evs3V2+Dj5ufp6errgoGBfn6EhYSBhIaCf4CDgoOChYGA
-gH17foGCgoOAgoB+gIF/fn58foGBgYCEgoKAgYKBg4WCgoKDgoF/gYGAgYOFg4SG
-gIGCgoWCgYKCgoSDgISFhIGEg4aFhoSGg4GFhYaHh4aBg4WHhYSBf4SIio2EhYSE
-goSEg4KBgoOCgoB+fnt7eHp4dXNycW9tbWprb2hhYF5iXFtbWlpbWFlbWllXV1hX
-WFdXXFxdWVpdXlpaXVhXWFlbXFxcWlxdVVdVU1JQTk9QUFFPUVBOTktMS0xPTk1P
-TkxOT1BTU1NRTUxLTE1RUU5RUE5QTk5NTE1PTk1OTEtMTk9MSkpOTU5MSERER0dE
-REdKS0xLTE1PUFFPT09NS01LS0dESkxKSkhHS0VHS0lMSEhEQ0VIR0ZCQD5BQERB
-QUNEQkNDRkQ/QEZHQkFEQUNEQkNFSkpJRkZHRkdNWWx4hIyRkJGUlZaRkpaUmJeW
-k5GUk5WTkZCPkZCRkpGNj4+SlJWYlJqam5yfpaGoqaioqKSgmZSIeGROQUFAOz07
-Ojo6OTk6Oz0+QUE6OD0+PUBCRENFR0ZJSktNTk1MTkxPTEhKTk1MTUhJTUtOT1BQ
-UlBSUlRZW15eXVtgXlxdYmZnamtnaGlsamloamlucHFtb3JzeX56eHV0d3d1doSB
-fX6Eg4KCio2MiIWFgoODfn5+fX19fHl7hIZxXVdXVFRXVVJSU1VWU1RUVFVZVllV
-WFVSU1JUU1VWWVRWVlRVVVZUVFVWWVlYV1haWFdVVFRXVlZYWlhZWVpYVVdWVFZV
-VVZVVlZWVlhYWFdaVVZUVVVXV1lZWVdWWVpZV1dXVVVXWVlXVFFNSkdFR0lLTU9N
-TVBVWltjaWtubWpmXlhUVlVQS0lHRkdKSUdGSUdFQkE/Pj9BREhCQURDRUdHRERG
-SEhISktKSkdKSU5MT1FVU1NUWFdVVVpdXFtgZmRgZWVnY2lkZWRlZmZkZGRjZWNj
-YWJkZ2xsb2tqa2pqamxsa21sbG9scHNzdXd4dHN1c3N3eXd3eHh6eHh3dnV3eHt/
-goOHh4eJh4eKi4qLiYuNjoyNjY2NjY6NjY6QkpSYlJWSlJCQkJOVmJaVlZOQkJKT
-k5KMjIV7dm5rbGxvdXx/fX5+fHt3dXV2dXJ1eXx/gYaEhomMjpOTlpmbmp6blJmV
-lpeYl5eVkYyOkJCJgX99dnV0cGtkYWFdWl9eW11jZF5cWllVU1FRVlVYVlNRUFJS
-UlJUUlJQUFJTUFFRU1NVVFNTU1JRU1JRUlNSUk5QT1BQUlRSUFJUVFVQU1RRUVJU
-UlRUVFNVVVdWWFlVUlNUVFNVUlFUV1RTU1JRU1JTVlVUV1VUWFhXVlVSUlRTVFdY
-WFRVVlVUVVdWVVNWV1RUVVZVVlRSVFVXV1dWV1dYXFxaWVdWVVRVUlVXU1NVVVdW
-VVZYW1hXW1hZXVlZWllYWVteWlpdX19bWlpZWllbXFtdXFtdX19cW15eXV9iYF9e
-X11cXV9fYGRlY2JjYGRlYmJoa19cXl1ZWFhZV1dZW1xbW11bXoW+zNTb4ePm6Onr
-6+qCgn59fYKEhYB+hYSBgH+Af3+CgYB/gH57fX6BgH6BgIKBhICBgIB7goCAgIaF
-g4GCg4SBgYOEhYSEgoeFgIOGhYSDgYOBgoKCgYKBhIKFhISChIGBgoWIiYuFhIaH
-h4WDhYeFh4WHh4eIhYOGhIOEioiIiIWGgoODhIOAgIJ8fH18d3Z1dHRxcW5saGdl
-Z2djXl9fXlpZWVlYVlhXV1lbWVhXWVpXV1paWFhZWFVWVllWWVVWVldVV1dWVFRW
-V1hUUVBPTk5RVFNPTEtKS0lMUEpLT05OTk5QV1dRVFRRUk9NTlBRUlBOUE9PTk1Q
-Tk5LTUtNT05NS0lLS05PTUpGREBAQ0dFRkhKTlJNTE5OUFBRTU9MTElJR05NTUxK
-SklKTUpKSUlJSEZHQ0NHREFDRUNBQD5BQUNISUZEREE+QkJFQUNHRUpHRURGRkVD
-R0ZITE9VYG99iYyOj5KSkJSVlpaWlZSUkpGSkpSUlJSTkpGQjo+Pj5KTmZiamZya
-k5ygpKqop6WnpqSfn5mOfGlSRENCPUM6PEA7Njk5PD08Qj8+PT4/QUFBQkdGRUJG
-RkdLTE5NTk5LTk5RVVVOSkhIRkVLTk5UVFVRUFNZWFpgXmJhXl1fZWhoZmFiaWlp
-bW5nYWNpaWtybmxvcHNxdXh1dXh8fYF/g4aDgoWDgoWEhYSFhoiCeXp2en18fHp8
-fGpbV1RXVFRSUVFOUFNSU1JQU1NVWFZXVVhYUVJRVVRVVFZSVVdXVVZXVVBSVlRU
-VVdUVFFTUlZZWFdXV1pZWlhWV1VWV1dUVlpWWVdXV1dbXFpZWldXVVZbWlhaW1dX
-VldYVlRWVVZVVlVRUVJIS0VGSUxPUVFSVVZXXGFma29ubW1oYFhUV1VSTktLS01M
-SkdGREBAQEJDQkNERUNCQkRDRkhISEtITUxKSkhLSUlLTU1NT1RRUlFUVlVXWVlZ
-WVtgY2JhZGZiY2FiZmZmZmVlZWJjY2VlZGdoaGlsa2tvbW5rbG9ub29tb29xc3V1
-dXd2enl6d3R0dnl6fn17eXl7eHFqeX6CgoKGhYaJh4eHi4uKiYuKj4yQkI+QkpCQ
-k5aWk5ORkZGPkpCRkZSTlZiXlpCPkZOVk46Mh350bGhobnJ2e315foF7end0dXZ3
-eHZ3e3t+f4KKiYuRlJiWmJqcnJqYlZaWl5eXlZOTjo+OjIqEgHtzc3Ryb2djYF5e
-Xl9cXmFjX1xaV1hUTk9QUVJTUlRVVVNWVVFPUVVVU1FQVFVUU1JSUk9QUFNSUE5S
-UFFSU1NYUlFSV1RSUFJTVFlVVlZVU1ZUVFRST1FTU1RSVFZVVFRVVFVZVlNUV1NQ
-UlZUUlhWU1RUVFJUVlVUVlZYV1dWXFdYWVZWWVdWV1daWlpZVFVUVVVYWFZYV1VW
-VFlYVlVXW1lWUlVVWFhWV1pZWlpcV1dYWlhaVlVZWlhbWlxcXFtbW1tZV1laWFhY
-WVtbWlpaWVtdXltbXV5eXmBeXlxeX19fYF9gYF9hZGBfYF9fXmBhX19gX11dXltZ
-WlpbWFdWWVlYWmBjhr7N1dzh4+bn6Orq63+CgYB/fX6FhYOBgoGBfn9+gIF9foF/
-gYKBgICBf3yBg4F/gICBiYWCgoJ/gIKDg4F/fn6Bf4SIiYeIhYODgYODgoOCgoWD
-goKAgIOEhYeGhYWCgoOEhYaHi4mFhoaHhIGEhIWHh4aDgoSEh4qDg4SDh4aJioSC
-gYCBf319f315eHVxbmxpamxpZGNjX19gXltdXV1fXFtYW1lWV1dXWlhYVldYWVpW
-VVdXVlRUVVNWUlVYVFRVU1ZXWFNVVVlZVFZVVVhRUVNUVFBOT01NTU9SUlJPTk5N
-TVBRU1dVU1FSUVBNTVBTUVFOTFJLTE5NTU9OTUxPT0tNS0pMTU1KSERHQkNGSUxM
-TEtLWFROTExNUU9OTFBLSElKT0xLSkpJSEdKSUtLSElGRUVERERCQkFCQD89QEJC
-Q0dJSUZBQD9BQD1CRklFRURFRURER0VDRkhOT1BXaniEio+SkJGSlJSTk5WXl5aR
-lJGWmJWWk5COj5KQkpGNkJGQlJWVl5qbnaCjoqWnqKKlp6mjoqCWhnBXRT88Ozo6
-Ojw9QDo7PT88PTw7P0JAQD9BQkNCQkFERURHSUtOTEtPTVBXWU5MTEpHR0VLVVRT
-VFRVWFVSUlZXVFteXF5jYGBjYWJlYmZoampkX2dpbXV0bGtsaGdrdHVyd3l+e3+E
-goKFhH6AgIGEgYKDhHx2eoN+eXl3dnt3ZVxbWlZWVFZUUlJQU09RUk1PU1VWVlNU
-VFdWVFNVV1RUVVRUWFdUVFFUVVRRVVRSU1dYXFhWU1ZYWllbWldXWFlVU1JXV1hX
-WVpZWVZVVVhaWVhZWlpZVlVWWFdWV1dVU1VaVlRWVlZUU1BKR0RFRkZITlVWVVRV
-VFldXmNoa29xcXBnXVlWVlRSUFBNTk1GQT88PkNAQEBBREZGRkRGR0dHSkpIS0pI
-SEhJSEZJS0tNTk5QTU9TUFJWU1ZWVFlbWlxfXl9hY2NiY2VnZmhqaGZmZ2VlZWRm
-aGloa2lpbW1ucnBxcHBxcnNxcnR0dHNxdHV4eHh1d3d5dnl7eXh7e3x8eHiBgYOE
-hoWDhYmKhoaJio2NjIqIiIiOjo+UlpGNkZKTlZGRkZSUlpaTlJSVl5iXlZSUlZeV
-kYqDfXRqZ2htcXV4fH99fX17dnd2e3t3end5fHl9f4WKjJGUlpaZl5qZmZeVk5WV
-mJiXlpaOiYiIi4V8e3V0dHZ0bWlmY2BfYF9cYWRkXllVU1RUUFRVVFNTVFNSUVBT
-U1JRUVBPT05OUVVTU1RTUk5QUE5QUldTUVNUUVVSTlFSUlNQUVJWWFVWVVRUV1VS
-UVJXVVJSU1RRUlRWU1FSUlZWVVNSU1VWUFFRUVNUV1ZWVVJVVVZUVVRYVldbV1ZX
-V1dZWVpYV1lbWlhYVVlWVVZUVlZYWFVaV1VXWVZXVVVSU1ZVV1lYV1dWWFdTVldX
-VlVWVldZXF1ZWl1cXFxaV1lXWllYVlZXVlhaWllcXVleXFdaXWFfYmFeYGBiYmJg
-YF5fXFxdYGFeX15eX2FjYF9bWlpbXFtbXF5cWldYWVlbYGSHvszV2+Dj5ufS5uvr
-hYWDgoGAhIaGhoGCgYKBf3yBgYCAgoOCgIB/fH1+gX+Bgn+AhoWEg4OBfX1/foGC
-gn6Ag3+Af3+DhIKDhoODgoSBhYWDgoODgICBhISGhYF/gYGCgYGDg4KDiYuGhIWF
-g4WEhYaFgYaEhoSEg4aChIOChIGEhYB/gXx5dnZ0dHFua2tpZGJiY2RhXV1cW1pb
-WVxcW1laWFZWVldWVllbWVhYWFdYW1dXVVVVUVZVVVZWVldVVVVWVVdXV1dYV1lW
-VVVSV1BTVFNQTkxQTU1OTk9PUVJOTk9PUVJQUFBNTU1OUE5QT05OU1NRTUpMT09N
-TExLTU5QT0xLTU5OTUxKR0JCQUNGSUtISUtNT1BNUFJOTk9NSkpISEpKSUpLSEdL
-SUlGRkdHRUZGREVDRUREQ0JBQEJCP0VISkZHREJBP0FCRENERkNFQkRDQkNGRkZH
-SUlKTE5ba3iHkJCSkJCSkpSWlpuYlZSSlJWUlZaRkpOUlZORkpKSjY6TmZiZmZyd
-nZ6go6akp6aoq62qp5+XiXNbSkE8Ozo9Pj86PDo4Ojc9Pj1AR0BBQD8+Q0NIREVG
-R0VITExLTkpKUE5SUk5KSkhHS0xJTFBUVVhYVFJSUVVUWFlTWFpZX1xeX2RgX2Fi
-Y2RmaWlnaGdqaGlrZ2hubm1zdnl5e3t7f4GCenp6e3x9eXx+fnZ5foB8dHB0fXNk
-X1tdWVhVU1RUUVFSUk5PT1BTVVNQUFBRUVRVV1VSUVJRVFhXV1dXV1ZWV1pWVVRT
-VFRWWFhYV1daWVVXWFlXVllYVVZYWVhXV1dXWlpYWFdWWFlZWFhZU1hZWVZYV1VW
-VVZYWFZWWFdWU1BLRUVGRUxbXFNWV1NUVlhZXmRqcHZ1c25mXVVUVVhWUFBMSkZC
-QD89PD0+QUFARUZJSElKR0hLSUpKS0dJSUdERUhKTEdKSkxOTlBRUE5OUVBQUlVb
-W11gXl9hXmRmZWdoZmVkYmdmZWZnZWloamxsbGttam1tb3N0cXBxcnR1c3Rxc3Ny
-dnh3d3Z2eXl5fHx6dnZ2eHp8fYGBg4WEhISEiIqKiIeHiYyPjIeFh4iIi42Pjo+M
-kJSRjpOTlJiamZiXlZKTk5ibmZebmZSSjIV+d25ra29ydHZ6fYB9eXl4d3Z4enl5
-fnx5enl/hIaHi46TmJqYmpqbnJqWlpSWm5yYko2GhYOEgYJ8dnJwdHNybWxsa2Fh
-XVlcaGhfV1NQUVZUVlVUVFVUVVZUUlFSU09PUFFQUE9QUVRPT1FSU1BQT1BSUFJT
-UVNST05SUFJPT1BTUVFSVFNYV1RUU1JSWF1RU1NSUlRUVFZWVFNUVFVUU1VTUlFR
-UlBUVFZXWVlVWFdWVFNWWFVUVVVUVFRWWFZWVVVXV1lbWllXWlpXVVVUU1VZWFdX
-WFlWU1NVVVNVV1pXWVZWV1RVV1dWU1ZXWFZYWVpZWVhYV1hWW1pcXFhZWlZWVlhY
-W11bXFpbXl1eW1tdX15cW15gYmBfXmNfXl9fYl9hZGZgYWFhZGNgX19fW1tcW1pZ
-VllaVFdaWFhiY4m+y9Xd4OTm6ODo6+uDgYCEhYWEhYWEgH+BgYCAg4KChIOFgYCC
-f4B/fXp9fX6CgIGAf4F+gn58f3+Agn9/foKAgH99f39/goCBgIGCg4OCg4OGh4SF
-hISFgoGBg4OEg4aKhoSDg4WDhIOFhoaIiYWDg4KCjpCKioaCgYSCg4SBfX2Af315
-dW9va2hqZWFfY2BfW1tdXF5dXVlWV1ZWV1daXFxZWFlbWFhWVlhcXVtZWFdZWFRV
-VlZWWllbWVVTWFhWU1RTVFRVV1hZV1ZUVFJRUlNTVFNSUU5LSEhLTlFQUlRUUFFQ
-UFFNS0pKTU1OTVBQTlBRUlBNS0xNT01MS0tPT01NS01NTk1LSUdDQEBBQ0ZISkhH
-SkxOS01OUU9PTElNTExGSUxMTUtJSE1LSUVDREdGRURDQUNEREZDRUVFREJFQ0RF
-R0VER0ZCQERFREVEQ0JFQkNGSEdKSEZKSkpLT1ZkcoKNkZKPj5CSlJaVmJaWlJSU
-lJOUlpWWlpWRk5ORkJSPlpaWl5iZl5qdoaOjpaajo6mtqKmno5yUjHxjT0M+PDw7
-Oj0+OTg3Nzk7PUI/PT1BPT8/Pj9AQ0RERUtLS0pJSklMT1BRUFFNUltLS0pNTlBW
-W1pZV1RSVFlXU1FUVVJUVFlYW1dYXF5gYWBjZGNkZGJgY2dwcGxvbW5xdG5ydXd4
-fn94enp7eXR0eHt+eHZ2dnFvb3h7bl9aWVpcWVhYVlZSUlFRUlFQVVBRUFBSUVBQ
-UFRXVFZTVFNUVVdZVVhXVVVTVlZWVlNUWFdYVlRXVVZYWFZWWVdVVVRVVVdVVlhW
-VlhWWllZWVpXV1lbWVhZWllXV1dWVVdXVVRUVVdXVFRUTkpIRENJWk9NTk1PVFRR
-WFxeZW1ydHp4c3BoXldYWVtXT0lDREM/PDs8PD89Q0VFRENESEhJSEtIRkZHSEZJ
-SEpJR0pJSUtMTUtHSEtKSkpLTUtMUVNZXF1eXmBhYmZoaWlqZmVlZmhqZ2RnZWRm
-a25saWxtbGtsbnFwcXBxcHR3dXFzdnV1dXV2d3d1dXR2d3h6d3p5enl8e32Ag4KF
-hISFi4iKiYmGhYiJiYiGhISFioqKi4mJjo6LjI+TlZSUmJeXlJWVm52amZeWlpCN
-h314cWtqbXBwdHh8f3x+enl8e3l5eHl6fHt8fH2DiYuLjZGVl5manZubnJqZl5aW
-lpaTj42HgIODgXt2dG5sb3JuaGNjYl5bXFxhYmFXUlZXWFZWVVRSU1VUVFZSVVRS
-UlNRTU9UU1BSUExPUVFRUFNRUU5RUFRTUU9QUFBOUlFPTk9NUlRWUlFUVlBQU1RT
-VVZUVFVXVlRTU1FTVFRWV1VVVFFRUVJYVlZWU1VWVlZVVlVUUlNSVFVaVVVUVlVV
-VFRVV1hXW1lZWVpYV1ZXV1lZV1RWVFVVVFNVVVZWWldWV1hZVldXWFhZVVRVV1dZ
-V1RVV1ZXWVtaV1pcXFpbWVdXWltZWFpbWVxbWVteX11eXVpbW1xfXl5eX2BfXWJi
-ZGNiYV5cYGFgYF1dY2diYGBeXV1eWlpaV1lXVFZXW19jh77M1dzh5Obn6Orq64N+
-fYCDhISFgn+AgH+ChIWDhIODgX99gYGAf4CAf35/gH+Afn6AfoB9fICBhIGFhIOF
-gn99f4OBgn6CgYCDhYKBgYCChISEhIWFhYWCgYSFhIeJiIiIhIOFhoSEhISFh4mF
-hoSFhIeJiYiIiYZ+f4SGioOCf3x4dHJwaWJgYGFjX15dW1lZWlpcX2BcWlVWVFVW
-U1laWldbWV5eXFhZWl9cW1dYWVhaWldWV1lYV1dWVVVYWFRVVFRUVlhUVlpZV1dW
-VVVWVVBSUlJPS01MTE9RU1hWU1FPUE5MUExMTk1OTUxNTE5PT09TT05NT05NT1BO
-T01NTE1MTVFMTElJRkZEQ0E/QkRGR0dJTU9QVE5PTktHSklLR0dPTU1LTExMSkhE
-RURDRURDREdEREJFQ0ZHRkRDQUVFRUREREVGR0dGRUZFREFCQkFEQ0RHSkxIRUhN
-TE1NVF1sfIiRlJSUk5GRlZWVlpWVlZSUlJeUlpmVk5aRkY+Nj5GSlpWXlpiamZyd
-n6Wlpqmoqamnqqmop6GckIBpUkZAQD08Ojw+Ozg7Ozw9PDs8PD06PT48PUBDQ0JE
-RERBRkhIR0ZHSEdMVFBRT0tLTEpNTlJUWFpaUlBUU1JTWFJSVlZWW1laV1ZYWVhU
-WVthYmRjXF5gXmhsb2ttaWhraWx0dXd5eXd7e31/eHV0dXd7cGxycnFyeW1eWlhW
-WFdZWldYWFdQTVBRUE5NTU5QUVNQUlFSUVNUVFJTVVZTVlZWVlpVWFVUU1RYVFhX
-VVdYU1JWU1RYWVVTVlhVVFNYWFJUVlldWVlbW1hbXF1YU1ZVUVNUVFhZWFVVVldY
-WlhUVFlTVFJNR0VGR0RHSkxOTU1PUlRWV1pfZm10ent7dXBnXlhVWFhORUJCPz09
-PTw9Pj5ARUZHR0lMS0tJSUdGREdFRUZHR0dHR0lKSEpISklHSUhJTEpHS01OUlFW
-WFtcX2FhYWVmZ2VhaGNjY2FjZGNjZWVnampraWxucG5rbnBwcXR3dHRydHV2dXRy
-dHh4dnJyc3Nzd3p3dXZ3eHl4fH19f4GCgIB/gYKCgYOBgYKCgYB+gn6AhIWGhoqN
-kI2JjY6Tk5aWl5WUlJOYmZqXnJmXlZCIg31xbGpscG9yeXl6e3t/f3x5eXh2dHh6
-e3d7e3yEi4+PkJKWm5uamZqamJeWlpaWlJORjYWBgoeDfHdzcm9tcG5pZF5cWVhb
-XWJfXFZRU1NVVVVVV1dYVFNSUVBRUFBSV1NQUlJOUFFRUVBSVFJRUVNQUVBQUFBS
-T1BRTlJPVlNUUlNRUlVVVVRVVFVQUVFTUVNSUlFUVFNSUlFWU1VWVlRSUlRVVFRT
-U1VUU1NTU1NUVFdWVFdVVlZZWVlXVlRUVVFVV1hZV1lYWFlWVlZXV1RWWFhYVVdV
-VVZTVVdYU1RYV1taWFhYWV1ZWFpZWFZXWFVYWFZYWFVVWFlcWFhbXVhcXltaV1VZ
-WlxcXFlaXV9dXF1cYGFhXl9fXV1fY2NjYF5fYF9gYmFhYFxdbmhnZmRiYl9fXV9b
-WFdWWllYXmB/vczW3eDj5ubn6enqgoGDgoOEg4GAgoSEgYGEhoaEg4OBgYCCgYF/
-foB/fX5/fn2Bfn99en17f4CAg4SFgIF/gICAgYOCgYGCgYGFhYKDgIGBg4GBgoeH
-hISEhIaHiYWFiImDhYOBgYaEhIWFhIGDhYeLioiIh4eFhISCg4OGg4Z9eHBsZ2Vf
-YV9eXmBiXl9XVllYWV9fXlpZWFhXU1VYV1hZWVlbWFlXV1lcX11bXFtZXFxYVlhY
-WltZWFpaWFlaVlhYWVhWVlhWVFlXV1hZWlZTUE9RUVBSUVBPUlNTVVdUVVFPTEpK
-R0xNTlBRT01OTVFTUk9PT1FPUFFRUlRRTk9PTktOUE1KR0pHRkY/PDw+QEJFSEhL
-T1NSUFBNS0lMTkxNSUdJSUlISkhEREFDR0RDQUJFRkZISENDRUdERERFREdERUNC
-Q0VERUZERUZERkRDQUFDREVISUpIR0lKTFFQWWZ4hIiOlJSTlJeZmZWUlpSUk5aW
-lpeXmJeXlJWTj5CRkpWXlpWVlZqbnZ+doaSjqamqqKmrqaamqqmflINuWElDPzw9
-Oz4+Ozc7PDo9PTo7QEFBRURDQklFRUNEQ0FFQ0VDRERESk5QTkpTU05OTU9QUFJV
-WltaWFRVVVNVUlVWUlVVWmFhX1lZWldaXF5iZWRfYWFhYGZoam1qam5ta29zcnJ2
-b3J4fXdycW9tb25oaW5xcHVyZltYV1RXWFhYU1ZZV1RVVVBQUlBNTE1PTFFQUE5P
-UlFSUU5QUVNTVFJQU1VSVVdWVFRXV1hVVlVWVlNUV1hWVlVXWVtYV1JVWVhXWFhZ
-WlhYWFlZWVxaVldSUVNVV1hYWFZVVlVaV1FRVFhTTk5NR0RDREhMS0xOTE9TUlJV
-Vltja3F1ent6c29mXFRUTkpEREA9PTs9Qj4/Pj5BRERISUpHRkpMSkdERkZER0dL
-R0dKS0tLS0xMTk1IRUZISEdJSk1RUlFVV1dZW1xbXF9fYGJzX2BiZGZkY2BjZmdo
-bGhpaW1vbW1ucXJ0c3ZzcnBxc3h5enl4d3h3eHZ2dHRzdHR1dnV0dXJ0dnZ6enp7
-e3p6d3Z2eXp6eXp8e3t8fXt9gYGDh4mMiYqKi4+TlJORk5aTk5eYmJWWl5eXkYmE
-fHJrbG9xc3N2eXV1eXt7enx6enl6eHh3eHx9f4KFi46Qlpeam5ydmpqZmZSVl5ya
-l5GQioF+gYCCfXl0c3Fyb2tlZFtXW19iX1xaW1RVVlZWUVJTUVBSUVJRUVJTUk5O
-UFBRUFFNUVJRU09ST1BRUVBTU09OS0tOUlNTVVBUVFRVVFVTVFNTUVJVU1JQT1FT
-UkxRVFZUUFJSVFBRVVZXVVFTUlZWVlVTUlRUUVRVU1VUVVlXV1hWVVVXV1dXVVVW
-VFVVV1lZWFZVVFVWWVlXV1hXVVNXV1hXU1ZWWFdXVVdbV1paWFlZXlxaWFlZV1lc
-V1ZYWVhWWVtaWllZW1pbXFpaW1pWVFlaXF1cWllYW1pcXF5hYGFgYGJdX15fX2Fl
-ZmFiY2JjZWNgXmZkYV1hYmFgYl1hX19cX1paW1RXX4W9zdbc4OPk5ufo6ul/gIJ/
-gYCCgYKDg4WFg4OBg4GCg4GAgH+Af4F9gYF/fH1+f3+CgYB/gH2Af4GBg3+AgICC
-hIWCf4GCgoGCgYKEhoSFf4GDg4OCg4SHh4aDhoWIhoeFhoWEhISGg4aFhISEhIGE
-hoWHh4eKh4iGhoWAfYKBfHdvZV9cWlxfYF5fXl5eYVlYV1pbXFxdWVhZWlxXVVhX
-WVpaWFdVU1RTVllaXFtaV1xfWlZaWFhZXFxZWVdZWllYV1taWFVWW1lZV1haWlla
-UlBTUlBRT1FQTlFRU1RUVVVTUk9PTU5JR09SVVVPUFFQUVBST1JUVVRUU1NTVVNN
-TlFMTEtOTUpISEdFQ0A6OztAQUVHS0xMTU9RUlNMS0tMTEtKSkpISUdHSUdCQkRD
-QEFBREJFRkVHRURGREJBRUlGRkVEQj5CQkFDR0dJSENFREBBRUZFR0RFR0dISUpM
-TE1TW2x4hY6TkpSWlJqZmJeXmZaXk5OVl5WYl5WSlJWVl5STlpSVlpWXmJicm5ue
-oqamqKeoqqqopqSnpaOelYd3YlVJQUA9OTs7Ozs6PT9BPT4+QD8+P0BBQ0FFQ0JA
-RERBQURAO0FCTEpGREpQT09PTk9SUFFTVFdZWVlXVlZTVFVTUFBUXF9fW1lcX1xY
-WWNmYF5bW2BjZGJkYmZqb3NvbWxvcHJvc3l7cm5taG9ubmlucGxycWRZVlRUWmFk
-W1VYVFZWU1NVU1FRUFFPTlFPTU1SU1JQTU9OTk9PTlFSVlRRVVRTVVVUWFhYVVdY
-WlhXVVdYV1ZUVFZZW1pbWVhXWVdXV1haVlVTVFZZWVpXV1ZWVFVYVldXVllZVlZX
-VFVYVlRTT0xIR0ZHSUlLTU9RVFhaWVpeXWBnbnF2e3t2cmpgV1FIR0NCPz5APDs8
-O0A+PTxCQEJFSUhGREdFRUZDRUVGSEhJTE1LUFBLSk1OSklHS0dMSkxLSU1LTE1O
-VFdTVlVXW1pZV1lZWllZXGBgY2ZnZ2dpa2pqa2xva21ucXJwcXBydXR2dXd4fH56
-eXh2dXR0c3Jxcmxsa2tta21tcHFwb29vcG9va2xsbHFwc3J1dnZ8fX59gIGBgoaK
-hIWIiIyMkZKTk5eWlpaUlpaWlZSRi4V6dmxoa290cnR4dHJ2enp6enl7enh4dnV2
-eXx/g4WHioyRl5udnJqfnZmWlZacnp2cl5KNhXx9fHx9fHt3dnVwbWdhX1laYWFd
-XFtaWVRWVlNQUlJTUVNUUlRWVVFUUlNRUlNTUVJQUlNPTk1OUVJRUVNSUE9QUlFT
-VFFRVFZSU1RUVVRTVFNSUlJRUVFSVFNYWVNUU1NUVldUVlNUVFNSVFVTVldWVVRX
-VVVYWFZWVlRUVVVWV1VWVVZVV1dWVlRWU1VWVVZWVFZWVVVXV1hZV1NYXFlcWFhY
-VlVWV1dYWFdbV1haWFdXVllYVVlbWVlbWFVYWlpbXltZVlhaWFlYXVxaW11bWVlX
-Wl5dXFlaWl5dW15gXl5gYGBgYV9iYGJhYWFjY2JjY2RgZGVjYl1eXFpaW1taW1xb
-WVtZWFlhib7N1d3h4+Xo5+jr6oKEhIF/gIKBgYCCgISGgoKAf36AfX9+e316e4GA
-gX+Af3x7gIOGhYCAf4OAgIGBgIGAgoODgICEg4KCgoKBgoOEgoJ/goeJhoKDgoSH
-hYN9goSEhYaIhISGhIWDhYWIh4KEhIWEg4ODhoeEg4eEg4F/e3dybmtjW1lZWltc
-XmBdX1xbXFlZVVRXVlldWlpbWlpZWl5bWlpWV1ZUV1hZW1lZWldXWVVWWFpdXltd
-XlpWV1paWVdWVlVWV1hXWFVXV1dYV1VTUVNSUVBRV1NRUVJTUlNQVVdTUFJUUE5L
-TU9QT1BQTlFOTU1PVVRUVFNRVFJRUFBPTkxQT09KSUhJSkdBPDo7PEFFR0lMTU9P
-UlBQUFFTT01OS01PTEpJS0hGRUhFPz08Pz9DREdGSEVDQ0M/PEBBREVDRUFBPUBB
-RENFRkRCQ0NDQ0VDREZFSEVHSUpJRkhLTVJYZHiEjI+QlJeZmZqYmJmbl5aUlZaW
-l5WVmJmWlpSVlpSVlJOTk5aYm5udmp2goKSlpqinpqeqq6ylpaGelYx9alxLRDs6
-PDw8Ozk7Ozw+Ozw+P0A+QEBBQkRFQDs9QD9AQkZEPkFDRkVJSUhFSExNT1JUU1RU
-UldcXVtUUlNRUlBQSE5SUldaVVdXXFhTWFpZXV1fXl5jZWRgYGVscG5rZ2dobGxx
-cG1qcWxjamxtbWpra3BsXVlWVVlraGFhVVVVVFVYVlZUVE5RT1BQTlBPTk9PUFFP
-T1FRTU1PT1JTVVVTVFNTVlZYWFdXVVVYW1hZWlZXV1dZVlhYV1lYV1hXVldYVlZV
-WFdVWVlZWlpYWVhYWVhWVlVVVlZYVVZZWFRUVVFPTExZTkpISUxLTVBTV1pbXmFi
-X2JoaXJydHNza2VaTkhFRD89PUFAPjo7PT9AQ0RERkVHR0hFREhERUdHRklISEhI
-SkxPTk1MT01NS0tLR0lGR0dGSUpLTE9QT09PUlJUV1ZWVVRTVldZV1ldX2Bkamxr
-Z2ZscGxqbW1wcHFxb3N7eXt2d3l5eHd0cnJycnBzcnBra2ZlZGRnaWVpamZmaGhn
-aWpoZ2VmZWZma21ucnR3fH+EgoGAg4uFhYeJi46QjpCSlJCQkpGSkpSSkY6Hg396
-cWtnaGxvc3R3cXB1dnd5d3l6eXZ4d3p+enuCh4eLkZKWmp2em5eYmZWVk5Wbn5+b
-lo6Hg317fHh5eXd0dHJraGRhXmBgYWJbXVpWUlJTUE9QU1dWVVVXVVZVVFJTVVZT
-UVNUUE9QU1RVU1FSUFFPT1ROT1BTWFdWUlRSUVJSV1JUUE5PUlNVVVJOUFFRVFVZ
-WFVTU1NWV1FUVlRVVVRUVFRTVFZRVVVVVVVWWFdWU1VVVlNRVFVTVVdWVldXWFZU
-VlZYVlVQUVdYVlNVV1dWV1lYV1hZWFdXVldVVlRaXFlbW1lXWFdWVVdVV1hcXVhZ
-WFdWX1pYW1lWXFlXW1xaXFpYX1xcWVhbWFZYVlZXW1teYF9gYV9eXWBfYWNiYmNi
-YWNlZGJiYF5gYWBeW15eWVtbWVhXWFZYWFtcXWGIvczV3uHk5ujo6evrgYKFg4OA
-hICAgH5/goOBg4B/fHt9gIF+e3x9fnx8fX9+gn6Cf4F/hIKCfn5+f4KAgoKCg4OA
-f31+foGFhIGCgoKBf4ODhoaEhYaHhYWCgoB/goiFhoiJhISEhYWFhoWFhIaIiYaD
-gYSCgYKGhIJ/fXlzbWZfXVtcW1xZXVxcXF1dXl5fXFhXVlhWV1hbWlhWWFlZW1pY
-WFdZWFZYV1ZYWFZXWFpZVlpZWVdbXVlbWlpaWVtXV1ZTVFRVV1VYWFRVVVdXU1FR
-UlJSU1lZUlBRU1NRTk9PU1NQT01PT0xQT1FRUU9QUExMT1BXVlVTU1VUU1JPUE5O
-S09NSkdHSUlKRUM+QDw/QUNISUtOT01OUVFST09QUE9OUU9OTUtKSUhKR0lFQkJB
-QUhISUdMSUhDQD5AQUBDRUNBP0JCQEFCREpHRUVFREREREZIR0pJRkhKTUlGSktM
-T1RbaX+MkZeYlpiVmJiZmpeZl5aWmJeYl5WWlpSUlZKTk5SSkZKVk5OVlpqeoqCe
-oaOjoaSpq6qqqKalpKOemZaKfGZQR0A7PT88Ozs5ODk6Ojk+QEBCQEA/QEQ9OjxA
-QkFEQj8/PkBBPkBCRkdESUpIS05TVlRUVldZV1ZXVldWT0tNT1BPVVZPTVBXVVJR
-UlZXV1xbWlxeX1xYW2VoZmZnYWRpaG1sZ2hoaWdnbWhhYmdramFZV1hXVlZYVlRU
-UlFSUlJWVFJQT0pOTVFRUE9PUFBPTk5RU1JTU1FQUVBRUlJRVVVYVVdWVllXVlRT
-UlZaVlhWWFRWV1daVlhaXVtXV1VXV1hXWFdYWVtbWVtaWlZUV1VTVFRSVFZWVlVR
-UFNVUkxKS0tKR0hITE1LS05TV11cYmNhYGRpbXJ2dnRxaF5UTUdFQkBBQUFCQENN
-QkVFREVLSkdJSkdFRUVDR0lJS0hIRkZHSEtSVFRRTU5OT1BPT01KS0tGTUpLTU1N
-TlBQUlJRUFJRUU1TVlZYWltcXmNmaGtpZWlsbWxqa2xubnBxdXZ2dXR0dHVzdHN0
-cnFwb29wa2VlY2JiY2RlZWJhYWJiXV5iZGFfXl9gYWRmZ2ZrcXV3fX6AhISDhISF
-gYOHiImLjJGTk5ORlZiVlJGRjoqFgHpya2pta21wdHJzc3V0dnZ1c3N0dnd6e31+
-gX2CiI+UlZqbnZ2fnZyZlI+Nj5eboJuWkoyDenl5dnh6enZ2dG5pZGNjY2RiZGFc
-WVJSU1NSUlNWVlRSUVJVVFFRU1FTUlJSUFFSUU9RUFFSUFBSUlNSVVRSWFhUVVRT
-UlNSUlJSUlRRUFFQUVFRVFVUU1RTVVNUVFdRT1FUVFRVVVRWVlRTUlRUVFdUU1JV
-V1dYXFVTU1RTVVVUVlRUWFlWWFpXVlVVVVZYV1lUVlhYXFdUU1VXV1VUVlVbWVVZ
-WFhYWVlZWFlaWlpYV1hYWllYVlpaWVpaV1pcWlhYW15cW1pcW1tcWFhZX1tZW1pa
-WlpTTEhSWl5dYWNiYF1fXl1fYmNeYWNgYWRjYWFfYmJdXV1dXWBeXF5eYFxaXVhZ
-X2JhZo29zNbc4uTn6Onq6uuDhIeHiIWGhoaDf4CDg4SDgoGCgICCgoB9fn99e3t/
-f35/fn+Af3+Cg4B/fXt9fX59fX+AgYCBf4KCg4GAgYGDhIOFg4OChYWEhIaEgYGB
-gH+AhoeGh4WGhIWGh4eHhoWFhYSIhoWDe3yBhIKCgXx3cGliYF5gXVxcXlxfXV1d
-XF5eYWFhXFZVWlhZWltWU1VYXGBfXVtVWFhVVlRUVllaW1hXVldaVldaXlhXXVpa
-WVdbXFpZW1dXVldWVlZVVlpbW1tWVVVRU1RZWVhVVFJRUlRSUFFOTFBOUFFRUlFQ
-Tk5PUlNQTU1OUVRWVFJSUVJRT1JQUFBPS0pKSEpNTExNSEM+PEBAQUNISU9NS0pO
-T09PUFJTT1JSUlNPSkpLTUZFRUdGRkZIR0hHRkdGR0ZCQ0A+Pj9AQkJBQUI/QD1E
-SEtGRUhHRUVHR0dHRUhITElISUhGSkxNUlZjdYaQlpaal5WWlpaWl5ialpiZmZiU
-l5WUlJGTk5KSkJCQkJaZm5mYmp+dn5+foqSko6eqp6moqKmnpaSjo52ShXJeTUA9
-PT88PDo6PT9BPz09P0BAQD87PUE8PUA8P0FAPDs6OTk6Oz0+QD9DSUNDRUdKTlBS
-UVVSU1hVVlZST05OU1FQT1BOUFZVT1BVU1NVVVhZVFRXWVZVXmBiYmNnY2JhZW1l
-ZGluZWNiXlxeW15eWldWVlZWVlRTUlRVVlJQUFJSUVRRUU1PTlRRUE9RVFJNTk5O
-UU9PTk5OT1JVUVJSVVVUVFRXU1NVVVdTVFtYVVZVVVRUVlZXWVlaV1dXWVlVVFVT
-VllWV11aWltZV1pVV1RVU1VWV1ZVVFNSUFBOUE9OSUdGSEtNT0xOTk1RW19lZmVk
-YmZnbXFzc3FrZl5RSUVDQT5AQEBAQkNERUNFSEhHRkpIR0hHSEpISEZHSUlKSEZG
-R0lNUVNUUVBPU1BOT05PTU5MS0xNS0tRUE5NTE5NT1FNUlJUVFVWVVRcYGNkampk
-ZWZoaWtoZmZqbW50eHl0dHd1eHNycnNxcW5saWppZWRlZGJiYV9hYF9gYVxdXltc
-X19eXV5gYWFoaWttcXNzeHt9gYGEhYaHhIaCh4eIjJGRkpOWl5SRkI+Ph4R+eXJt
-cGlsam1wdHFwcnV1dHRzcXBzdnV4fX6Cf3+Ejpaamp2foqOfmpmUjY+PlZ2hnZaN
-iYN9enh4fnt9e3l2dHFsZl9fZGRiX1laVVBQUVRSUlJQUlNSUVNVVFNSVFJSUE9Q
-T1BTUVFSUFJSUE5RUU9SV1lXVVVTUlNQUVJSU1NSVlNQUVNUU1VWVVRWVVRSU1RV
-VlZVUlVXWVhXWFZYVVVWVFdbWFpUUVNSVVdUU1ZVU1ZYWFdWVVNVVlpYWFhZWlRU
-VVVZWVhZV1dWV1hVVVNTU1ZUVlVWWFZZV1ZZV1pbV1VbWldYWFZWV1ZXWVxaWFlZ
-WVlYV1lZXFtXV15dW1peXV9aX1xbXFlWUUg5QEBQWmBhYmBfXV1gX19fX19fY2Vj
-Y2FgYmBgX1tbXmBeX2BfW1paWVpbXlpYXF5hhr7L1dzf4+bo6evr7ICCg4SGh4eI
-hoKBgYKDgYKEg4OHh4OCgX18en6AgX2Bfn9/gH6Dgn6Afn5/fX19en6AgYOAgn9/
-gIKAgYOFg4WFh4WBgYWGhYSDg4KFhYWDhYKFg4OCgoCAgoWFhoaGhoeEgoWFh4aF
-gYKCfnx3cWxoX11dW1xdX2FhX1lZWllcXGBgYmJdWllYWFdXVlZWVllbW15cVVdX
-WFNTVVVXWVpYWFVUVFhYX11aWVhXVlVXWFpcW15bWVlYWVlVVlZYVVRZWlpWWFZT
-VVlYWllXVVJSUlJRT0tPTU9QUFJSU1BPT01UUlVUUlVTUlFRT05QTk9SUlRRT0tJ
-TEdITU1QT0tHQz09Pz9ERUdJTU1KTU1MSU5RUU5LSk1LTU1OS0hKSEhHSUZISUdF
-QkVHSUNERUVDRENDP0FAQT9APT1ARUdGSkdGR0hFRUhISEZERkdGSUdJSUlHTExV
-WV5seomRlZeUlJmVmZeVlpeWmZeWlpSVlJSWlJaTk5OTkZSTlJWZm5ucm5ugn6Gh
-oqGlpaenpqmnp6mnp6Wmo5+YkX9qVkg+PT0+QDw8PDw8PDw/Pz0+Pj0+Qjk8PDs6
-PT47Ojo3ODs5Ozo7PEFAPT08QEJERklJTVJTU1ZTV1VWWVdVT09OS0xRUVNPTVFR
-UVBSUVNTVVhYUlJWWlhYXF5iX2BgXl5eZGRdW15cXFlUVVJUWFlWWVtZVlZYVVZU
-U1RST1JUU1FOTE9SUVJQUlFRUVFMTlFPTlBQUE5PTVBTUlFTV1RTUFJTVldXVFRW
-VFNVU1RXU1JTVllWU1ZaXFpYVVZXU1JUVllYWFdYWltZV1VTWFdYV1ZWVlRUVFJS
-TlFPSkhFRkZKTEtNT0tKT1RZYmZubWdlYmRma29vbWtpZVpORkA+PD8+PD4+P0NE
-RUdESElJSUhKSEZISkhJR0pKSUdISkxJS09RUVFPUVNSU1JPT05MS01NSk1PTE5O
-TU5SUE9MUlJTU1RVU1JRUFRZX2FhZGRkZGFnaWppamxxcHRxcnN0c3N2d3BwcW5x
-bWxpZGRkY2JlYmJhYV5cXmFeXF1eW1xeYGBiYmBhZGVnam5vc3Z6e3yAgH1+gIKB
-gYKEf4GHjI6Rl5WRk5STkI2KioN9dmxoZGhnZmdqa25wbm5tbm1vc3V0dnV3en+A
-goiOlZebnJ2doaGem5KLjI6Tm6KimI6EgHx9eX59fYB9e3d3dG5oY2JkZWBeWVdU
-UVBPUFJVUlBTU1VUVFJSUFJTVFRXVFFQUFBSUVBPUVJRT01MTk9SVVRUVlVUVVJR
-UlJUUlFSUlFQUlZUUlRSVFJTU1VTU1JSVVRUVFNVVVdWVlZZWFhZV1dXV1VXVFNT
-VFRbV1RUWFdWVlZXWVZUWldWWVlXVVZaWFpaWFZWVldVV1lVV1ZWVlVVVFZYVldY
-WlhdWlhYWVlXWldVU1VYWFhYV1hZWFlXWVlYVldVVVdXWVpaXF1eXV1YWVlcUlRU
-RTEwNTxNW15fX1teYV5fXV5hXl1fYGBgYWBiX15eXFxeXFxeX15bW1xZWVpZWVZZ
-XWKDvczV3ODl5ujq6+vrgH6ChYOGhIKCgoaFhIWGhIGBgoSBgYN/fXt+goF/goF9
-fYF+foCAgYGAgYKCgH+Af4CDgYODgICAgoKBgX6Cg4OBgIGGg4SGg4WGhomHh4iF
-goGEg4SGg3+Dh4eIgoGCg4SEhYSCgH6Aenl2c25nZV5bWFlWWV1eYGBeXV1aV1lZ
-XmNlXltcW1hXV1dbWlZWWVlZWl1YWFdXWFVUWVpWWVpYUlhbWFpfXVlaXlZWVllZ
-VlhdWFlYVllZVlZWUVRVVVZaWFdYVVdXVlZWW1lXVVFQUlBNUVFSU1ZTU1FQUE9S
-VFdYVFJWU1JTVVNTTkxPUVFRUlFNTU1LS0lQUFFPSklEPzs5PUFISUlJTE9NTk1R
-T1BLSkpJTU5NTExLSkpLSk5MS0tJRkVCREZIRUREREZCRUNBQUFCQT5APUFGSEpI
-SEpIR0ZJR0lJRkRFREJBRkdIR0VJTk9TWF5sf4uQlZeXl5mZmpiXmJiZmpaWl5eU
-k5KSl5WVlpiYlZiSlJianqCfnpyem5ufoqaopqWppqanqKWnpKSko5+flYNyWk5F
-QDw8Ojo5Ozs8OTtBPjw9Pjs+Pzw+PDw4PDs9PDc3Njg3Oj06Ozo8OkBBPj1ESEhI
-SUxPTktNUVVZXl1ZVFFQT09PT1BOUlBOTk9PVlZXV1hWUlRTU1NVWVtZW1pWV1pZ
-WFZcX1hXWFVXWVRXWFlaV1dXVldZV1RRU1RUU1FPUU5NUFFQUFBSUVNQTk1NTExQ
-Tk1OTk9NTVBOUFBQUFNQUVNVWldUV1ZTVFRSU1JUU1NWV1ZVWV1bWVpaVldVVVlX
-WFhWWFhXVVVWWVVUV1RVWFlWVFRXV1hZU1RNR0RERkZHSUtOTkxITFVaYGtwbmxo
-ZGRna2xsamhjW1VORkZHPT4/QEI+PkBFRUVHSUxKSktIRkZISkpGSUlKRkhITU1L
-TUxPU1NTVFVUU1FRT05RSktMS0pKSktMS09NS01NT1FPUVBQT09RUVZZXl9gYWRj
-YWJnbG5vbW9vcW9xcnJ0cnJwcnJua2tsamdjYWBhXmFhXVxcXFpcWl5bWlxdXl5i
-Y2FjZ2lramxscHN3eXp+goOBgH19fX+BgYSFhYeLi42MkJKXnZqVkZCNiIN7cmlm
-aWhlZm1qaWpqa21ucXBydHd3eHl7gIKDhIuRlJiampyfnpual5GOjpSdoaCWjIWA
-fH5+gYKCf3x6f3h3cmxnYmNhXVZRT1BVUlFTUlJWVFNTVlZWWVVTVVFSVVNUUlJP
-U1FPT1BQT05TUlBST05RU1JUVVVVVVFTU1JTUlZOTVNWVlVWVlVVVVJSVFRVU1JT
-U1NTVVdUVVRTV1hZVFVVVVdZWVhZVFNWW1hYWldWV1laWlhZW1tWVldXVldWV1dZ
-WVVTVVZWWVhWV1hXWFdYVFNWVFZXVVVWWVZUWFhYW1hYWlhYV1hZVllaV1ZYWVZZ
-W1tZV1lWU1hbWFlbWVtcWlxbXF1WVFFGNDQzNDdJVltdXl1fXVxdXl9fXV5hYWBg
-Xl5fXmJjYGBhYF5dWVtcW1lYWVhaX11fZom+y9Pb3+Tm6Orp6uuChISEhYeBgoSC
-gYCGhYOEhYGAgoJ/f4B9fX1+gYOAf4KAfH6Afn9/gYB/gYKAgX9/hYOBf39+gH+B
-g4GCgICAgIOEg4OGhYSGhoSHg4OFiIOGhIeHh4eGiYeGhoSGhYSCg4SFg4B+end4
-b2xmZWVhXFtYWVpcXV5fYWViX1tZWVpdXmBcW1hcWlpaW11YWltdXV5bWVdaW1hY
-WFpZWVpYV1RUVlxYWVxaV1VTWVVYWFtaWVhbXF1bWlhYV1VVUVFSVFlXW1pcWFhY
-WVhYVllVUU9QUVBRVFZVUVJSUE5QUlRVVVNVVVVUUFNQUFBNSU5QUVFTVlFQTUxO
-TU5QT0xNSkVBOjs8QEVLTk5NTk9NTFBUUU5JS0pLUVFMS0pNSklLUVJSTkhEQ0FF
-RkdDQ0VJRUdDQkVEREFCQD9BRkZHR0ZHSUhFRkhHSElIQ0JEQkJCR0hGSUVLTk1M
-VGJzg4yRlZSXmZiXl5mamZmampqZlpaRlZmWlpmXlZialpSVlJecnp6cnZyZm52b
-naGlqKalpaamqKemqayqp6ahl4p6ZlFCPDw9Ozo8PDw/PT48PT08PD1CQURBPEA7
-PTs4Ojs5Ojs5PDk3ODY7PDk5O0JERUdGSEdKS01NTU5RUVRaVlNST1BNTU5QVVNQ
-U1ZWU1VYV1hYWFRTUVJWWlpaWl5ZVlFUVlRVVlhXVVhYVlZVWFhXVldVWFdTU1VT
-VFJVUk9NT1FPTk1QU1BMTEtMSktPTk1KSk1OSk5MTU9PTk1OTFBRVFJOU1hYWFZT
-VFRSUVFXVVVXV1dYWFdXWFhXW19eWllZV1dYWFhWWFdYWFZWV1VXWFhZVFlWVFhV
-T0tGRUFCQkZJSktMSktNTlRaYWhwcm9qZmZmZmZmZGJdV1RRTERBPD1AQ0A/QUZF
-R0pKTUpJRkpIRkdISEpHSkxKTk5LS0xNT1BRVFJRUFBSUFBOTk9NSktISExLTE1L
-S0lLTk9NTEtMTE1OTk9RVVVXXFxeYWJkY2lsa2ttbnBxcm5ucnNzcW9wbnBtaGhs
-a2hfYF5dXl1dXF1eWllZWl1dXF1fYGZjY2Voa2xvcnNyc3R4enl8gISBgYKEgoSG
-hISFiImKjpCTlJOWl5SUj4qFgHhwaWZmY2NmZmZlaGxrbG9tbW9zdnR0d3p8foWN
-jpGTl5mcnZyfnZmWk5CSk5mfoZ2Og35+gIGDhIKCgnp1dXd1cWxqZGBdWFJRUVJU
-VVFRVFZTVFFRU1NRVVZUVVFRTk9TU1FRU1NQUVNST05SU1JUUVJTVFJSUU9QUlRW
-U1VUUE9SVFNSU1RVVVVYVVRRVVVUVlhVU09RUU9SU1RUVVVWVldXWFdWVldaXVFV
-VlZYV1VWVVZZW1pYV1ZUVldYV1pVVlZVVVZVVFRXV1dYW1hWVVVWVVVXWltYVlZV
-VVRWWlpZV1ZWWFdYWFZVVVVYV1hYWFdZW1pbW1laV1lZWFpaW1taWVtbWllSU0gw
-LzAxNj1PXF1dX1tcXGBgXl9fYF5fYF9gYWRiYWRhYmNhYF9dWVtcXVtdXVtcXl5h
-ib/L1Nvf4uXm6Onp6oeHh4WChIKCgoODg4SAgIKEgn99fYB9fH5/f359gYSCgX5/
-gX5/fYB/fn9/f39+f4CCgYF+f4CCgYCAgYODf4ODgoKDgIKFhoiFhoaEhICDh4qH
-ioiGhYiLh4aJiYWFg4GChIeBfnhza2pkXltbXFtYWldVWFlcXV1eXl5dXl9aWFtb
-XF1cW1lYWFlWVFlhYF5eWllWWlpdW1hdWVhYV1dUVFRTVFVWWVtVVFRVVVRVWFlZ
-V1xdW1lYWlhXVVdWVVZYWlpYWlxaWlhZWVpdV1ZUUVJTU1NSU1BQUlJVUVBTVFBR
-U1VXU09NT1FTUlJRUFBRUVJSUVJOS0pMTlFRTkxGREE8OTtCRUlOTlFQT05NUlJO
-TUxMS05SVlJMTUpGSUtNT09MSEVDSEZDP0NCRUZIRkdCQkNFQ0RAQkREREJCRERC
-REFCQkhGREREQkFBQkNCSUlKSElMTVBRWmp7iI+UkpaWl5qZmJmamJmbmpiYlpeV
-mJWYmZiZmpqamJucnpydnKGdnZ2dnJqbm6GipKSho6WnqKiqqaurqqijnZKCb1lM
-Qj0+PT07Ozo6Ozw9PkA6Ojo8PD4+PDo8PD07OTg5Nzc1OTY3Ojo1ODo5OkBCREVG
-SElKR0pJSk5NTU5UVVVTU1FRT01PUVFQUlNRUlZWUlJRUE9RVFRUVFFVWVZSUFNS
-VldbWlpaVVVWWVpaV1dWVVNTU1FOUFJUVFFTUVBRUlNQUk9RVFFPSUlKTU5NUU9P
-TElOTE5NTktNTk9QT09PUVJVV1lYWVZXXlRVUVRYV1VWWlZVWFdYWlxdXVpcWVlZ
-V1dWV1lZWFlbW1lYV1VVVltYVFFRUlZTTUZAQURFSElPTU9LTExKTFJdY2ZwdG9o
-aGJfX2BkYGBaV1BKQ0E/Oz0/QEBFRkhHRUlJSEpJRkhFSEdGSEpLTU5JS0hMTEpN
-T1FSVVRWVFRTTlBQUlFST01NSUhJTEtNSUtNT01NT1BQTU5OUlFOUFNYXFxeYF5k
-ZWZmamtubW5zcW9wc3J0cm9ubWppaGdpa2NgYF9dXmBgXV1bW11dXV9bXWFhY2Nk
-aGhqaWxucHBxcnZ0d3h8goOFjIyIiIaEh4iGiIyPkJGSlpiWkJKRjIiDfHNrZGJh
-YWNjZGZnam1qamhrbnByeHh0dH2DhY6SlZeXmpqdnpyYmJSPjpCUm6Glmo2Be36A
-iIiEgoGBeXd0cm5ubm9pXldUU1NQUFJVVFNVVFNSVFdVVFFSVFVTVVVVU1FQUE9R
-U1VRUFBMTlNUUVRSUFRaXlRUU09SU1RTVVRWVVRTUVJWV1ZUUlVXV1ZUVFhYVVVS
-UU5PUFJTVlVSVlVUU1RWU1VVVVZYV1ZYWFlXWFRUVVNVWFdXV1dWVlhXVlVXV1ZW
-V1dWVFRUVFhZWVpaVlZYWFlbWlxaWVVVVVZaW1lVV1VXVllXVldUVVlaVlZUWFla
-W1tcW1ZZWVhcXF1ZV1hZWVlXWFZTTDAqLC0wM0NZXl5eXlxfX15hYV5gXl5fX2Ji
-YGBgXV1dXlxcYGFbWlhWWltaV1dYXF2Jv8zT2+Di5Ofn6OnphYeHiIKDgYGChYWC
-goWDgIGCgX9+gYKCf4CAgH6Af4B8fH1/goB+gIB/foGCf4GDgH5+f35+goKCgn6B
-g4OBgYGBg4SDg4aHiIeFgn2DhYGBhIKDhYGBgoaEg4ODg4aEg4OCfnl2cGxoZGFe
-XFpaW1tbVldaW1xcW1lfX1tcWVtgY2ZiXV1aV1dWVllcW15aW11YVlNUVVdWWFhY
-WllZWFdYWVpUVlxaWFhXVk9UVFdYWVdXWVtcWFhXWFZWV1lVV1hWVVdaWVpZV1dW
-WFdVVVhXUlFUV1dWU1JVVVhQUFFOT1FRT1NTUFNRUlBPUlJQTExPUlJUUU5NT05N
-TU5OS01KRD88Oz9FSUxNT05OTk5SUlJNS0tQTk9SVFNOSEdGS01PTktJR0ZFQ0RF
-QUVCRUdFSkhFRUdHQURAQkVHREJERUJEREVEQkFDR0RDQ0JDREZJSklJSVBTU1RZ
-X3CAjJGYnJyam5qYl5mWmZmXmZiUl5KUmJaWlpubmJeZm6Chn6Cgn5+goaGenpqa
-np+eoKKfoqWoqKenqamqqaqnn5mIc15LQkE/PEA/Ozg9Ozs+PT49Ozw+QT07Ozs6
-Ozg3Ozo6ODgzNjg9PDs4Ojs7PkNDQkJCRkhLR0VGRUpJRkRMTU9OTktNSktJSUtL
-TEpLSUtLSkxMT1FTWlZRVVVUUFRTVlxYWFpYVldYWFtdW1lWVldXVlJTVlRSUVJT
-UU5SUFBQUVFQUFBQTk9PSk1NUVFRTVFMTElLS0xMSUhLTU1QUUxOUVJTVFNYV1VT
-UlhYWlZVVVNUVVhXWlhXWFhYV1hVWVhYVlVZVldYV1hVVVVVVFdYWFdWVlVaV1FM
-RD8/QUZFSUtLTk1NSkdGS1RfZm5zdm1lX1phYWBeXl1ZU09JRDw6OjxAQUVERUhG
-R0pLSklKSUhJSEhHSEdHSUpJSUdLS01NT1FUVFVWVlZXVlRYVlRQUU9KSktJR0tN
-Sk1PTk1MTk5OS0VLTE5PU1NYV1xXWF9jZGZoa21wcHV3dnJzdHRwb29vbWtpaGRj
-ZF5gXltdYF5fWl9iYGBjZGNiY2RlZGNlampqamxtbm5wcHN3eXx9gYSKi4eIhomL
-iIeIio2MkZGVmZSUl5SPjImBeHJrZmdpZGJmaWtsa2lrbWxxcHF3eHh8fn2CipKU
-k5mWmp2eoZ+WlZSTkpSZmZqVi4J/gomJi4qEgX18eHRuZ2hrbGlkWFlYVlVRUFRU
-U1FTU1ZXVFRRUVFTV1VVVFRVT1BRU1NSU1NTT1VTTlFQTlJQU1pfVFJUUlNTVVRT
-VVNRVlFVU1BUVFNUUFZYWFZUUVBTVFBRUk9TVVJVVlZVVVVUVVVUWFRRUldcVVRU
-VlVXWVhXVVVVWFdZWVlYVlZXV1ZWVldUVVdXV1hVUFRZWVlXV1hYWVdXV1hXWFRU
-VVlVVVZYVlZYW1dYW15aV1xcWFlZWl1dXFpZWlpZXVxfXltYV1pbWFxdWVJJLSsq
-KSstOlRfYF5fXV5eY2FhYV1hY2FjZWVhYmBgX15fXVtdX1xcXlxcWlpaXFpdYY6/
-zNXb3+Pk5ujp6uqBgoCAgoKEgoF/gIB/f36CgX99fn+BgoCDgX5+f36BgHx7e3x8
-gIOAf4GBfYCEhoKAgoGCgn+BgIB+hISGhIKBfn1+f4OEg4CCg4aDgoSHhoaDhISG
-hYKCgoSGg4KDhYWBgn52cWxpZF9eXl1aXltdW1xcXVpcWFJaXV1eW1hVWFlaWlxb
-WlZSVFlaWVpbWVlWWlpXWFJTVFJSV1lXWFxaVllbV1ZXXFlWVlpYWFNUVVlZV1pY
-VVZXWFlXWFpaVlVaV1ZWV1hZWVtZWFVUUlZWWVZTU1hZWVhVVlZVU1FQUE9QUlJS
-TkxQUE9PT1FUUlFNTE5RU1NRTkxNUFNNS05LTEpFQDo7O0NIS0xNTU9RUFNdZU5L
-TUtMUE9RUE1IRkZLTU9MSUdEQ0NERUdERUNGRkdERENEQ0dCQ0E/QD9BQkVGQ0FB
-Q0RBQkFGR0pJRkZGSk5MTEtMTlFTVFVYYnSGkZqdm5qYlpiVmZybmpmYmZeZk5OS
-kY+UlZmYmZicnqGhoJ2joqOgnpyempmanJ6ho6ShpKenpKWnq6qmqKunn5aJemNR
-RUJAPz09Pjg6PDs9QDw7ODk6PDw2Njk7QDk5OjY3Njk2ODo5OTs5QUVBQENDQUJC
-QEJDQ0FER0dKSkpKTk1NSEtJSUdISUhJTEhFTU1HRElKTFFWWllaVVNSV1hVWVlY
-V1hVV1lWWlxbWFdYWFVUUldWWFZYUlRSUFFSUVBPUFBPUVFPTlNNSU5RT0pITE1O
-TEtMSEZITEtKSEtQTk1PUFBQUVBXWFdWVFZZUlJWVFJTVVZXWFpbV1ZZWFdVV1la
-WldXV1dXVVVTWFdZWFdXVlZYWVlWU05JS0NCREhGSEhHSUpLS0VJTVdhZm9xb2pi
-X11cXVxdXFpXUU1FREM+PkFCRkhHRkdIR0pKSEZKRkRHR0hJSEhLSUpJSktNSk5N
-UlFVV1haXV5dXF9dYF1ZVlRRTUtJS0lHSEtOTktJSUpJR0hKTEtOU1JWWFpcXl9i
-ZWlsbG1ucnN1c3Fwc3N0c3Jua2ZnZWNgYF5fXV5cXFpeX2BkZGdoaWpoaWxpamlr
-bWxrbW5xb3Bub3B0eXp9gYWEgYOFh4qJiYqMjpCQkZSWmJqWkpCPiYZ/eXJqZ2Rl
-Zmdqamtra2hrcG9vdHR5fX18g4OGjI+QkpSZnKCgmpqZlJKUkpOXmpWQiIiOkJSK
-iIWAfHh0c29ubWxrZF5bVVVYV1ZTUlRTUFNWVVZVU1FPUlJTVVZVVlNTUVFQUk9Q
-UVFSU1ZTVFNQT1BQVFZWUlJTVFNUVFNPU1lXVlJUVVNTVVdWUVNTU1JPUFFSUVBS
-UlBVVFRUVlVSUlRVVVVVV1hUVltYWVpZVlZWVlhVWFZWWFlbWVpcVlZUWFlYV1VW
-WFhWWFZYWFdWVVlXVlVVVVZVVFRXVlZWWllbW1pZWFZVWFhZWlpYV1laWVpYWFpY
-V1hVV1haWV5dW1pYW1xbWVlcW08+LyomJiczUV9rbWBhYmFiX2FiZGRkZWJjY2Ri
-YWFhX11aW1xcXV5cXVpbXF5bX15jkcDM1Nvh5Obn6enq6oSHg4KDgoWCgYKCgX94
-fH+CgH+Bgn+Bgn9+fX6AgoCCgn58fH5/gYKAgIKAgoSCf36AgoOEf3+AgYCAhYSH
-hIaCgYGDgYKDgoGBgYKFhoWEh4eFhIeDg4WDhIaFhIGDgYB9dnNrZGNjXlleYGBe
-W11fYV9fWllVVldaWlpWWFxaV1hZWlpYVVJYW19eWlpaV1hYVFRUWFdWVVVUWVxa
-WVlXW1lXVFZaWFVXWFdZVVRWWldVVldWVlhYV1VUU1dYVllbVldYW11bWldXVVJU
-VVhYWltZWl5aVVVXVlpVU1BQUU9RUlRVVE9NTUxRVVNTUU9OTlJVU1FNTlBSTk1N
-SUtMSEdCRDo8P0JJTE9OTE5RTllmTExITFBQUlJPTE5OTktOSkhHR0hGRkdLSElH
-SEZCRERIR0dBQkZEQj47QUNERkpIREJERENGRkVITk9LR0VHSkpLTk1RVVRVWlhc
-cH+Plpucm5ebnJuanJudmpiYmpiWk5KRk5SXlpmZmJueoKGenJ+ioKCcoKCbm5ue
-nqGeo6KnpqenpKanp6emqqapoJmQgWlUSEM+PkA9Oz88Ozs/Ozg3NTc6PTo8PDo6
-PTk7OTg4OT07OTg5PT0+RUJAPkFBQ0NCQUNFRkRDRkVGSEhMT01KSkpJSktNTUxL
-SEVFREZISEVGSk9WWllXVFhbWFhXV1lXVVZWVlhaXFxZVFdYVFRTVFRYWFRSVFRU
-UU5QTEtPUVNUUVBPUE5PT01PUE1MSkxMSkpHSUxKTEpKSUlMTEtOTU9QUlJUV1NQ
-TVZVV1dWV1dYV1paWltbWlhWV1RYV1VWVVdZWlhaXFZYWVlZWFpXV1dVUVJRTklB
-QD9CR0tKSElHSkhJTExPUFRfZm1ybmhhWVhXV1laWFdTT0dAPj07QENDRENIR0hI
-SEdHR0dKSERFR0pLSEdIS0xLSEpLUVJQV1hWV1xbXmJhZWZlYmNhYF1bWVZPTk1J
-SkxOTUtLS0pKTElLTU1PUlNXXGFhY2FjZWptb3Vyd3V0eHd1dXVzcnFsaWRjY2Rg
-XVxeXFxZXF9kZGdpam9wcXJzdXVyc3Fyb2xtbm5ubGtvdnZzd3h5fXyDg4SGiYqL
-ioyNkJCSk5SXl5WRj5COiIR8dG1nY2JiY2RpaWdnZ2pxbW5vdHV6e4GEh4mJj5ST
-lpaYmZqamZWUkY+RlZeVk5CQk5ORjomEgX99fXhzcXBramViX1hVVVpYV1VRUFRT
-VFRUVlNTU1NRUlJRVVNTUlFUVlNQTk5OUFVWVVZUVFFTUVFTVVVXVVVRUlJSU1RV
-U1FOUlJSUFVTV1dWU1JSUFFOUlJTUVJVVlNTUlRVVFRUVVRWU1NWV1VTVldXV1dX
-WlZWVVdUVlhXWFdWV1dXU1VUVFVWVFVUU1hYWVxYV1hYVlNUV1dWVFVVVFdYV1ZX
-WFxdWVxZVlhYWFlZVlVYWllaW1dXVlhXWllYV1dWVldZW1lZWlxcWllcWVlUSEZD
-RElVW2V6YV5dYmJgYWFhYWFiYGBfY19fXmBgX11bW1taWVtbWlpZWFlWWWCKwM3V
-3OLj5ufp6uvqh4OAgYCCgYSBgoOChISAg4F/gIGDg4OAfX6AgX9/gX9/gIGCg4SE
-goOCf3+AgH+Bf4CBgn9+gICCg4CCgoGBgIGEgn5+gYSAgoKBg4aEhIGEg4J/hISD
-goSHh4mMioN8c3FraGVgX19eX1tcYF5dXVxbW1xYU1phWVpZXF9cV1ZUVFdaWlhc
-XVlaXF1bXFtVVFZYVVdXVlZYVVlbXF9ZWlpXWVhYWVleVldcW1tZVVVVVFhZVFlY
-VlZVU1FTU1VXWVlWWFpcXFxYV1VTUFNVWltbWlhcW1hbWVpcWlpTUVVQU1NVUVRX
-VVFKTlBVVVVTUlNTUE9QUVFPU09UV01OTU5KRkY+Pz4+QUdKS0tJTk5QT05MSk1Q
-UFJNUE9NS05KTktMTkxISUhGRUZIR0FHSUVDQ0RGRkNCRERDQkNDSExLSkdFQ0JG
-Q0NESElNTExFRkhGTElHTUxQUFNVVlVjeYeOkpiYmpubm5uZmpqZm5qZmZeUkJWW
-l5iWmJuenqCkoaGgoaCenqGjo6Gen52eoZ+goaKkpaaoqKuopKWnq6inpp2ThnFb
-R0E+OzpBPD08PDo6Ojw6Ozc7Pzw6Ojg8PDg4Njg5Ojk3ODk8PD09QUBBRUVFSEJH
-REVLR0lLSkdFRUZIS0tMTUtKTUxRUE1IREdEQkREREZLT1RZWVlaV1dXWlldWFdY
-WFZVWFpdWlpYVFZWUVVXWFlZVlVSUFJSUlJTVlRRUFBTU09PTk1NT0pPUE5NS0tK
-SElISkpMTE1GREhKS0xOTk5PTk5UVlZXV1lXVFhUVlhYV1hZVVdWVldVVlZWV1dU
-VldbWlpZVFBRU1VYWVpaWVpTUFFLQ0dIRkhHSEhGR01LTUxOSkZIS1RbZ29vamJd
-VlZTVllaV1NPRkA/PkA9QEJGR0VJSEhGRUdHRkpISkdGREZHR0hKTEtNSExOUFBR
-V1dWV1hdYGVkZGVnZmlqamZgX11WT0pFREZLS0pMTk1KSk5OTk5LTlZaW19gZmNm
-bm5ydXZ3eHh0dnZ2dXRybmloZ2dmZWNhYF1fXV5dYmRmaW5wcnl7foCAgn99eXZ3
-enJycXBycG91dHJ2d3l+foSGiYeKi4uMjJCPj5CSlJKWlZOTj4+Jhn93bWRjYWRk
-aGlta2tsbG5tbW9wc3h3fYCEh4iJkJeWmpaVnKCblZOTlZCTlZWYk5KTlJGPhoKC
-fXx8fnh0bWpnZV9bWlZWVVZVU1lRUVBTVVRTUlNSU1JWVFJRUVNSUlVTUVFRU1JR
-UlRUVVZTUk9SU1RTUFBWV1RSUVNSU1RSUVRUVlNVUlFUVFNSU1JSUVBQUFNUVFRV
-UlBSU1VVVVZXU1VVVVdXVldXVldWWFRXWFhYV1RVVldaV1lVVldXVVVVVVZXWFdY
-WVdYWVlWVllVVFVVVVVUUlNXV1pYVVhZV1daWVdZV1lXWVlZV1dbWFdZWltbWF9d
-WltaWlpZWFdZW1hXW1tbWVpcWFpcWltcW1xdW11gYl5eYWNiYF9gYmJiYV9gZGFf
-X2FfXFxaWlpZVldYWVpZWVpbYIS/ztbc4OTm5+rp6+x/gYCDgYKCg4OCgICBgYCB
-hISDgoSEg4SBgYF9fH5/goGBgYCAf4CCgH98g35/goF+foKBgIOEgYKDgYGAg4R/
-foKDhYaCgXyAgoGChoeGgoSFgYCCg4aHhYSGhoKCfHRpY11dXVxfYWJiXVtdXWBc
-W1pbW1tXV1tVV11bWFhXWFdXV1paWl5fWVlaW1tYWFlXVlRVWFlVV1ZXV1dZWlhZ
-Xl1YWFhXWFVWXV9eXVxUVFdYWlpbXVtZWFNSV1ZVVlpaV1dYWVpXWVlaV1NRU1NU
-VllYWFhYWllZWltbW1dZVlNTV1NWVFVWU1BOT1NWUVFTVFFPT1BRTU9QUFBQTExO
-UE1JQj48PD1BQ0hMS0pJS05PTEtOUVBRUVFPTUxKSkpNTlBQTk1LSUpKTEtGRUVF
-O0FBQUVHRkREQkRGRkZISktLSUZFQkNBREtKRklIR0tMSkZISEhMTU5QVFVUVVpk
-dIaRmJybmZqZm5mcmZicnZyalpaXmJeWlZecm5+fnJ+io6Sio6Ojp6eop6OjoaKh
-oqKfn6OmpaaopqSlpaWop6ako56VhG9fTkQ8Oz08PDs9Ozo6Ozs6Ojg6Ojo7Ojc5
-Nzc4Ozk4ODk5PTo+PkFAQEBCRERER0JFSEZGSExMRkVJTE5LTE9PT1FUWVxTRkJC
-RElKRUNFR0tRVFVXV1dXWFlWWF1dW1ZXV1RXWFpaVVNPUFBUU1NUWVdWV1NRV1FQ
-UFFRUldRT1BQT0xPTUxLTEpMTVFOTU1NTE9OS0lMTUpKSElLSkpOUE9RUFFVWF9Y
-WVlXWFZVWFZWVVVXWFpbWlhZV1VTVVRUWFlaWldWVVNSVlpZWFdWVlZRUE1GR0VE
-R0pHRkVGTElHSElKSUxOUVRbYWdoY19bV1dTVlpXVFBKR0I9PTw/Q0VERkxIREVD
-RUZFSEhIRUlFRENDR0pHSkpITEtMTk1QUlFXVlhbXWFhYmdnZmZqZ2RiW1pWUU1K
-RkRHTk1JS0tJSU9OSk1QUVZaW11kZmtrb3J0dXh4eHh5eHh6cmxsa2loZWRkZGVh
-X2JiY2FiZGRsb3N1eXuCgoSHiIaDgYCBe3x4d3h5eHZ4d3V4fHx+fn6BgoiJi4eO
-kZCPk5OWlJGWl5SPjYuIgXdwZmBeYGNnaWlscHFvcG5wb29ydnd6fICGiYyNk5aX
-nZ+enqCclpORjo6OkpiYlpONjI2IiIOAf3p4c3Bta2RiXl1ZV1RRVFZVU1FRUFFS
-UlFPT1JSU1RVU1NUUlBRUlFTUlJUUlJSVVNTU1VUU09ST1JTVFVSUlNRUFFQUVRV
-UlFTU1RWVFNRT05OUVJaVlJRU1JUU1RVU1NTUFNVVlNUVVdXV1lXVlNXVVVQVFZW
-VlhYVFRWWVhYWFZWV1ZWU1RVU1VXWVhXV1dYWFlVUFVTVlRWVVRSVFNTVlVUVVZY
-V1haWlxaWllXV1xaWVdYWVhYX11fW1tfWFpZV1lZWldaW1tbX1tXWlxYWVtdXFpc
-XlxfX2BkZF9fYWFgZGNjY2RjYmFjYmFfX2BeXVtcW1lbWllaWllYW1tjhb/N1tzg
-5Ofo6urq7ICEg4OBf4KCgoN/goKCgoGEhn99fn+BgIN/eXR7en5/fn99f4CAg4B/
-gISAf35+foJ/fn+Ehn+AhIGAgoKCgYODhIaDgoB/foGBg4SFhIaJh4eIhISFh4eG
-hYKDf3hwZ2NdWlxbWVlaW11fXlxbWlpeXVtaW1pYWFtdWlxZWl1bWlpZWV1dW1tc
-WFZWW1lZWFpbWFhZWllZV1dUVVlZXVtaXVtaXFlVWFxfWlhZV1dZWFhWV11dXFlY
-WFZVVFhaW1lZWltaWllYXllbWlZOTlNWV1dXVVZcWltfW1tZV1hXVVdWW1ZXWVNQ
-V1FTVVNTU1JUUE5OTk5MTkxPT0xLSUpNTUlEPjw7PEBGS0pMSktOS0tQUlRUTk5S
-U1BOTkpKTE1RVFVUUUtGSExIRkNCRENCPj9BR0hKSkVBR0RHRklKSUtJSENDRENI
-RUdJSEZKS0tJSEdJSEhKUE9VVFJSUVhsgI6Xmpuenp2dn5uamZibnJubmZuamJaX
-l5WanpufnqCgpKSkpqaop6empaWmoqGgn6GhoaCioqKjpqioqaqnpqOjoJyYlIVo
-U0g8ODs7Ozo3OTs6Ozk5Qzs8Ozs5OTg2OTo5PDw5ODg3OUNBPkBAQUJERENERENA
-P0NISUpMTVFTVFFXWltkbXiDeV5JRUNLSkhFRkxNTlBVWFpaVVdaWFhVV1pbW1VS
-UlNYVlZYVlRRUlRVVVVXV1lUUFhVT09QT1JTVVBQUFFSUVBRTU1NSklPUE9OTUpL
-SkpNSkpLSEhHR0pLR0tNT05QUVFWVVJTWlZYVlVYV1RZWFpYWVlXVllWWVhWVFZV
-WFhWWFdWVlZWWFhWVldTUlFPTUpGRUNFREVGRURERUdFR0tKSklLT1NYXmFhXllY
-VVBSVFJSUFBJQ0JAPUBFSUlIRkhLSEdIRkRCREZHRkZFRUVGR0dJSEhGSUhHSEtP
-UVdVVVleX2FkZWptamtoZWFcXFRRTUxLRkhJSEhHSEtJSU1OUVNSVltcXmFlZmlv
-c3N0eHp9enh5eHZydHBwb2xqZmJiZWNjYWFhYWBiZ2dsam9zdnp+g4OLiIeHhIaE
-gn9+enl8e3d4enh4eX2Afn+DhoeKi4yNlZSXm5yZk5aWlZGNi4mDfHNoY2BgYGFl
-aGtwcW5ubm5xdHJzdXh7f4OLkJGUlpudnpufm5qTj4yOjo6QlJWOiomHiYiHhIKA
-fXdwb25sZ2BZVVdWVVNSVFRRUlRRUVNTVFFRU1JUUlFQUVRSU1NVU1JQUVNST1BR
-U1NTV1RUU09SUVRUU1JRUlFTUVRUU1RSUE9SUVJSU1NTUU9NTVJUUVJSUU9QT1JT
-UlNSUVZWVVVXVlhUVlhWVFJUVlZVVFZVVllXWVRWVlZXV1ZXV1lXU1VWVVdYV1ZW
-V1ZaVlZYUlVXVVVUVVRUVldVVldWWFlYWVhYWFlYWlhYVVZWVlZYWVhZWVhaXllX
-VVhbWFZWWVxZWVtbWVxgXWFgXVtdXWFgYmBhZGNhX11eYGNkZWRjZGNkYmFhX19c
-XmBeXl9cWllVV1dYWVhdYXeLv83V3ODj5+fo6urrgYGBgoKEgX9/f4KFhYSEgH+A
-gX59ent9fYF/fnx7fHp9foCAf4F9fIGFhICBf359f36Af3+EgoN+gIGBf4GBgIGD
-hYSCfn6ChIaChIOFhYWHiIeFhISFg4aDgX52bWhiW1paXFtcWVxcWl5eWVtcXVtc
-Wl5eXlhYWVxbWFtaXV1bXV5dXWBcW1hYV1daXFxYWFlYVlZXWlVWVVpZWFdXWlpZ
-W11cWVpdXlxbWlhWWFxcV1hZWlpbWVdaXFhUV1tdW11eW1tZWV1eXVxdWFZSU1dZ
-VFVaWVlZWFVVVlVVVVRTUlVVV1lWVExSUlBQVlhZVVFQSktLT1BSUk5MS0xMR0hL
-SEVBOzo7PENHSEtOT01OUVFUUVBPUU9OT09OS0tMUFVTUVJUTUtZVktHR0VFREVC
-QUZFRkpJSUlJSEhIR0RIR01IRUdGR0hHREVHR0lISUlJR0dHR0lLT1NVVE5WWGFz
-hpCVm56inpycmpeWmZ6dnZqamZmYmZybm56dm5mZnqClqKaio6OjoqKin6Gjnp6f
-oaKgoKGio6app6eoqKiopKSioaKelYhzXktDOzs9Oz07Ojs8QEA8PDw9Ozk5Nzk5
-ODs9OTg5OTs9Ozo+PkA/PkNFQDw7OTs5OUBDRkpVWmJnbXV+hoyRlpeEXUlESEZE
-RUpNS05SV1lbWltZVlNVV1dVV1ZYWlZWVlZVUU9RUFBTVVVWVlVWV1JaVFZQUE9N
-U1JSUlBPTlBOUlFKSUlLSU9PTk1ISktOSkhKSEhHR0dHSEpLR0dJTEtOUVNUVVVX
-WVNVWFhaWVZYWVhYV1ZWWFlZV1VUVFRUWFpXWVlYVlRVVllXWVdVUU9NSUNCREVD
-QkZJRkZFSEpOTUtLSk9NTlNXWV1cW1hVUk5QT1JRTUlFQDxBQEZJR0VGSUpLRkRD
-RUJFREhLRUdDREJCQ0RERERGSEpKTVBTV1hZWV9fZGVpbG5xbm9ta2llYVlUTkhG
-Q0NGRkZGSkpKTE9QU1VYWlxdY2ZpaW1ydnh4e3t7eXh3dnZ4cGxqYmRkaGZoZGFg
-YGJhY2RmZmdqbnFwdXd8f3+Eh4aBg4OAgISBgn59gH16eHl8fX1/goOHi4mLjJCT
-lpmal5eZmJmWlpKNjIN8c2hhXl1fY2VpaWltbm5wcXFxdnp5en2EhYSNlZaXnZ6e
-m52XlZSSj4qPkI6Ok46LkJGMi4qFgHt3cWtoa2loX1dXVldTUlJTUlNUUVJSUVFS
-UlBUVlRUUlNVUlNSVFRSU1JWU1NRVFRUUlFTUVFPUVJTVVRPT01OUVFSVFZWV1JQ
-UU9RU1NUUVBSVFJTUVFRUlBQUFBSUlNTUVRTUlJUVFVVVFRUV1dVVlhYVlVTVFpX
-V1pXVlZWWFpXVlZVVlZaWVZWVlVVVVRWV1ZUV1ZXVVhVVVpYVVRTUlVUVVRVVlVV
-V1dXVFRYW1ZYVVZTVVZXWFdZWlpcWFdXWltXWFpcXlxcXV1cXGBZXWBgXWJfXFte
-XmFgYGJfYGFiXl9jYmJiZWRiYGFfXV1eXV5dXWBcWFdWWVpaWl1lmanAzdXc4OTm
-6Ojq6+uBgICDgoB+fn2Bg4GBgoSBfoGCg3+AfXuAfn5/fH1/fYKCgn+Bfn5/f3+A
-gH1/gICCgoODgICCgoCBgYF/goWDfoCDh4SBg4SIiIiCgoSChIeKiIaEgYWCgn96
-dmtjXFlXW1pbWltbXF1cWlhZWlpZWlpcXVpZWVtaW1pdXF9hYV5eXVxgYl9ZV1hX
-XFlbW1lWWFhVV1hUWFpeXVtYWFVWWVlbXVpfYmBeXV1YWFpdXFpZWlpZWlhZW1xa
-V1ZXWVlXWVlaWVlbYF9dW1hWU1RWWVtVWlpZV1ZVVFRUVFhVUlBWVVdaWFdTT0pO
-VFRUVlVUUFBNSUtNUFRST05NT01NSEpLSEM9PEI/Q0ZISU5RT01QU1NXT1ZQTU1N
-TU1OT1FSUlJTU09OSkpVSUhGSEdLRkFCRkVHSUtISUhHREVHR0VLTE1JSUlHR0dI
-RkVHRUdIS0lJSElKSEtPTlFRUFZTVWV5jZidoJ6em5qamZqcnJubnJubm5qYmZmc
-np2enp6eoKOnpqOko6KioZ+gnaGho6SkpKOkpaanpqaoqKipraenpaWkoqKglYl5
-ZlJEPDo9Pzw6PD4+Ojo6OTg5OTg5OTY3PDk6Ozw8OTo6P0BCPkNFQzw5Ojg6Nzc2
-O0BDTFllcHiCiZSanJ6bloFdSUZFSEVGR0ZITVNZVVdcW1pWVVdUU1VVU1RXWldW
-VFZSUVBRUFZVVFZVV1VWVlZUVFBNUVBPTU5LTk5OTk9PTk5LSlBMS0tNTUtOTUxO
-TktLSUlISklJSkhGREZHR0dJTU1OUFJZWFVYWFdYV1ZVVlZWWlldW1tWVltYWllX
-V1paVlhXVFFSVFVWUlJSTUhIRkhHRUNCQ0VEQ0ZKSUlLUExRUFJTVFRSVFdaV1NT
-S0pNTU9KSEZBQENCQkZHSEhKRkdGRkZJSEZGRENCQkREQDxBRUVERUlLS09PUFNX
-Wl1cYGJiZmhqbG5saGhua2hkXFdPTEdFRUJDRURER0xMTFBPUVVYWl9hZm1xc3B2
-d3l8fn1/endzcnVwcW5va2ZoZ2ZkYmFiY2JhY2RlY2VtcHFxdXl+g4SEg4CBfn5+
-f3+CfYN/fH15e3p5e4GDhISHioyOjZKVlZOWl5mbmpiYmJGKh311a2VgXGBmZWdq
-bGxxbm5ycHJ4eHd8foGBhImPl5ibnZ+fm5iVkY2MjY+QjIuPkJKRlZWRiYWAfnhw
-bWtoaGJcWVlXV1lUUVlaWFRWVFNUU1FQUFBRUVNSTlVQUlRTU1JSUVJTT05PUFZS
-UlNWU1VRT1FTUlFPUVFPUFJTVVZUUlNUVlNXV1dTVVNWVFJUVFNRUFVWWFRSUVNT
-VFVUUlZXVFNXVVhWVlhXVllZV1hWVVdYWlZXWFpaWVlZWVpZV1VYWFhWVldVVFVU
-VlZVWFdVVFVXV1dXWFdUVFRVVVdVVldXVlVYV1VVU1RUWFlZW1lWV1VZWVhZXl5Z
-V1dZW1xbW1xcWVxYWl1aW11gYV5eXV5jYV5fYF9fYmJjYGJiYWBfX2BeXmBiYV1f
-X19dXV5bXFdXW1pZW2ejsr/M1dvg5Obo6enr64CBgYCAg4aDgYKDgIGBgoB/fn2B
-goJ/g39+goCAgn9+gIB9fX5+f39+fX1+fn+AgYB/hIN/fX+CgoGAgIGCg4CAfn5/
-fn2ChYOFhoWHh4eGg4SGiIaFhYSBfndtZWBcX2NfYl1bXGFbV1hYV1laW1pcW11d
-WltcXl1dXl5fYGNjX19dXV9dXVtVUFFSV1haWVlXVFVWWltVWVpbWVdVXFxZW15c
-YV9hXFpbWFpcXV5aV1ZaXl1bWFdaW1pYV1laWFhZW15dXF1eX2BeXldYWVpXWFld
-WlpWV1NRVlRWWVhUUVNWWVlbWFVQUE1PVVRWVlVUUlBMS1BTVVRST1FQTU1QTUpH
-Qjw7PT1BRUdKTVBRT09QUFFRT05OUE9NTUxOT1BRUVRRU1JLR0lFRUhIR0hGREVI
-RkdGRkZFRUZIQUNCREdKS05MSkRHRURGSUdJSEpKSEdKS0pISUxOVFZVU1FRW3KH
-lZ2enZydmp2cnZ2dnJebmp6cmZeam5ubm52dnqChoJ6hpqGcpqakoqSipaejpqSm
-paWlqKempaSpqKmqp6imqKWmo6KblIyBcFpIQkA+P0I7OkA6Nzg6PTk4Ojw7ODg2
-OTs7PDo8PD09QUBAQD47OTg1Oj02NTk4PUhVYm93fIeSlJOfo6GTe1hHSEROTkRE
-RUlMUFJRU1prWVZWVFVXVlJTVlRVWFhSU1RRT09PUk9SVFVTVFNVVFNSUlBOUVBK
-SUtNT0xLTkxLTE1OTUtKSUpOT0xMTUpISEtJSUpISEdFR0RFREREQ0ZHTEpJTU9R
-VltXWVlYWltcWlpaW1hVVlRUXFtaWlxdXVZSVlZWVFVSUVJUUFBNSUZCREVCR0ZF
-REZHR0lMTk1OT1JTU1FRUVFSV1hWU05LSk1OTEtKRkRCP0NBREVHSkVGRkdGREhG
-RURDQ0JCQURDQz9DREhJSUpMTE5PUVJUVlVZW1paWl5bW1tbYGFiX15bWVVSTUhE
-QUJFRkZHTU1KT1FQUlVeXl9kbXFwenp+fHx/fX1+e3l3dXJzc3FybmdnZ2VjYl9i
-Y2FiZWZnZmpxcnN3fYWHioyHhYZ/fnl2dXR1dHd6fX1+e3p8gIOEh4qLjJGPj5KT
-lJWWlZWXmpqYlIuHgHlvZGBbX19jZ25ubXFwa29yc3d1eH1+goWFhoqWmJqdn6Gg
-mJmVjIyOkZCRj42Pk5OWlY6Kh4B+eHR2cm9mW1paWltdW1dTVVRSVVhXU1ZVVFVR
-TlFTUFJSVlRSVFFRVlJSTk5PTlBQUE5TU1VTU1JRUE9SUlVYVVRRU1FTVFZUVFVS
-UlNSU1JWVFRWWFZYVFRUV1VTU1RUUlNTU1FVVVVTU1RWV1dXWFVUVVVWVlhUVFZW
-WlhZVlZXWlhVVVhZVVJUVVdVVlVTVVZXWVlXV1laVlRUVVRYW1lVUlRVWVlWWFdW
-VlhZVVVWV1hZWVtaWFlXVlhXWVhbWVpaWVlXWFleW1pcWl1aX19cXVtdXmBhYV9e
-YWNhY2FkY2RjYGZhYGJcXmBhZXRnXmFdXl5aXl1dXVpYWFtcY6G1vszV3ODj5+jp
-6uvrgoKAgISDhIWBgICBgoGCgX+DgoKFhoeGf39+foOAf3+Dfnx+fn98fICAfn5+
-gYJ/hIN+gIF/gH+Af4B/gH6AgH+AfYCDg4WDgIKFhYSEhoaHg4WEhIaDhn52bWZd
-W1lZW1peYWFeWlpdVlVYW1tbXFxdW1tiY2NgX11dXl9jYmBeW11cXl5cWVlTUlJW
-W11aW1ZVU1VXVlZWVlhaVldVWFxYWVtdXVtYVFVaW1peX1VUU1hcXFhWVlVWVldZ
-XFpZWlpeXlxhYl1gZF5bVllaWFlYWl1aVVRUVVNSU1NWVlNPUFRWV1hXUlRUUFRU
-VVpZV1VPTU5OU1RWV1RUVlNPTUxMSUhGQD06P0FCSE9RU1BSUlFOUU9NT09RT05O
-T05OTk5PUFBRUU9MRkVESERDREZGR0ZHR0pNSUZHR0lGRENFS05LTUpJSEZIREZJ
-R0pNTUxHSEdJR0hJTlJVVVZTUFJcZ3uOmZycpKOgn6GhoJ2bmpeZmpmcmpeZnZ6d
-nJ6eoJ+dnp2ioqGjp6qioqaop6OmqKGnpqakpKamp6qpqauqqKeno6WkoqKinpaI
-d2NQREFAQzw6PDs8Ozo6ODk+Pjk4ODY3Nzs7QkA8PkBBQTo7PDg1Njg5ODY4ODtB
-UWJtdYGIjJGZnqCkoZd8WElJSUtFRURIRUlQU1NVWllXVFFUU1NQUVRaVlhXUU9N
-T1VQT09PUFBRUVJUU1JTVFFQUlFST01NTk1OTk1PUFBPT09NSU1LR0lLS0pMS0pH
-SE1OS0pJSElER0hGQ0hGRkZKTEtLTVJVVVlXWlhXV1RWVVhYWlhaVFVcW1paWFha
-VllYWVhVVFRSU1JWUE9JSEVCRUVDRkdDQkBFSUhLTlBNTlFUV1ZVU05QUVVTU1FM
-S0lLTEtGRD8/QUFBRkZJTEhGSEdHREZIRkhIRkVAQkFAQUA8PkJDQ0dKTk5NTkxP
-T1BOTU9NT01SUE5NTE9NS0xPUk9LSUdHSEhERUdIT01QUlRWWl1haWhtcXN4eX1/
-fHx7fX17eXl7dHNxbm1pZ2hnZGBeYmJiX2JiZGVkaG5wc3Z5e4CEhoWIhoOEgX58
-eXZxcXF2eHp8foCBgIKFhomNjo6Nj5OSlpaUkpSWlJmYlIuAdm9lXlpZXWZqa2ts
-b3BvbnBwc3R5fHt8gouOkZSXmp2bm5uZk4+Ojo2PkZGQkpeWlpOQjoiEfHt5d3Z1
-bF9ZWFxbWl1iXFNTUlRVVVNVVFVUVFVSUE9QU1RUUlVVV1JQUVFSUlRVVFFQUVJQ
-U1NQUlZTUlJUVVhTVVRVUU9QVFRRUlRRT09RUFBTVFRTUlBTUVRUVVRPUlNWVlVU
-VFRUVldYVlRUVVdYWFlYVVVUV1tXV1ZXV1dTU1RUV1VXWlVWU1VVU1NWV1VWWVha
-XlxaWVhWU1VXWVhZWldXWFVWVlhYWVhZWFpXWVhXWFtaWlhXWVhZWFdWWlpZWVdW
-WFVVWVhZW1taW2FgYF9eXFpeXmBfYGBiZ2NfYGBkY2NjYGBiYGVeYF9iZmBbV1pX
-WVpaXlxbW1lXWVthc5rAzdbe4ePm6enp6uuGh4aDgYSDg399gIOCg4GBfoKChYOF
-hoSBf36AgYSDhIN/f3p+f4B9fn1+e32Af4B+f3+Ag4eFg4CAg4KCf4GCf39/goWE
-gn9/gH+AgoSEhYSFhoGCgoN/eW5nYF1YW1pZWlpfX11bXV5bV1dXWFpaXF1dXlpa
-XV1bXl1cXmBfYF1dXF5eXV1cWlhXVVtbWl1bW1dVVlhYVVZVV11ZV1hYWltZWFpa
-XFZSVVtcWVtdWFFUWFlYVlRSVllWV1pdWVhcXWFfY2djYGFfXVpVWFtcWFdYWltZ
-V1VRUlVXWVdTUE1QVVZXVFRWVVRTVVZWWl1YU05PT1JTU1NWWFVUUExLSktIRUNB
-PD0/QENITVBSVVJTVlVTUVBPTU5NT09NTE5RUVBQU1VVUExJSElHRkZHS0tJR0dG
-R0pKSElMSURFSElKS05KSUZHSUdFRkZJSEpOSUlKS0hISEpPU1VWUk5OUFZeb4KQ
-mJ6ipKOko6Ggnpybm52bnJ+gn56bnZqYm5yho6GhoaOip6alpaqlpqeop6mopqSj
-paSnqKanpqipqKqtp6mkpqippaaknZaKfmhVSUZBPDo6PDk8Nzk7OTlAOzo4Ojo7
-PTo6O0A+Pjo4OTk1NTc4Nzo4OTo6PkdXaXJ7gYiOlZ2hqqqmln9YSUlJSkdISkVH
-R01TU1RVVlVTU1JUVFRWVVVWVVVST1FQUE9VUlBRUk1RU1NTUlBRUVFUVlRQTk1O
-Tk9QT01OTk9PTktNTUtNSkZIR0dHSUtISUZHRkZIR0ZGRURER0VGR0dJSktOUFBT
-VFVbWVZVWVpZV1dZWFZUVFhYWVxaWVdXWVlVV1ZUVVRTVFRPTUxOSUZEQ0RBQkFC
-QUNFREVISklOUlJRVFRUUk9TUVRTTkxJR0tKSEZIRkFCQkRER0dGSExNSkhJS0pG
-RkpMSkhFR0M8O0A8PUA+RUVIRklKSUpHREVHR0JCR0RDRkJBQ0RFQ0JERUZFR0pK
-RkVGR0tRTVBPU1ldXWFlbmtvdHh6fX9/foN/f4B+eXh2dnR0dHNvbWpmY2FiYV5f
-XV1eX15gY2VpbnB1dXp/gX5/fn+BfH57e3t2dnRzd3l8gIGBgoWHiIyOkpKQkpmd
-mJaYmpaWlpSPh311bmheWFZaX2dqaWxtcHBxcG5vcHN4eXyCiIuRlJaYmpqblpKN
-io2Mi5WRjY+Rk5STkY2HhH58f3t7d3NtYWBeXFpYWV5cXFNXV1VTUFNTVVRSU1VR
-TU1SVlJRVFNWVlNRUlNPUVFSV1NSUVFSU1FSVFRWU1RWU1dXVlJRUlNRUVFUVFNS
-U1FUU1VUVVZPUlVTVVJSU1RVVVdXV1VWVVNVVVdVVVhUU1VXV1dXVVVTV1hXWlhV
-V1hXWldXWllYVFVdWFVWVFVVWFVVWFlYWllXWVZYWFdYV1tfXVpZWVdWVVFRVlpY
-V1pbXFpWV1dUV1tZWFpcXFpcWlpYV1dXWVhXWlxdXFlaWlxcX11cYF1eX15gYF9h
-ZGFfY2JjYmVlZWdmZWRhXFxYW2BbWVpaXVxaW1pcXFhXV1xiiL/O1t3i5ebo6unr
-7IGGhoWBg4ODgH2BhIOCg4OEg4SEgoJ/f4CBgoqCgYJ/f35+fXx/gIF/fn1+f3+E
-goF/f4KGg4KDhISFhYF/goB/f4KBgICCg4CBgIOFhIOEhIWHhoSCfHVvZ2FgXFtd
-WVpaYVxeXl5fXFxcWldXV1pcX2BfXV1bXVxhXmBgX1xaW11gXV5eW1tYV1laWVdY
-WlpZWlpXWFlaV1ZaXV5YVlpaWFtdWlhZV1taWFdSUlJVUVVWVVdYWFNWV1dbWVlb
-X15eXmBkYWBeXlxZWFdbX2BZVldYV1VVVlNUVlpdWlRVUlNWVVNUV1VYWFVTU1RW
-WllXVFVSUlJVUlRXVlRTUE1LSUhMSEM+PDw9Q0hLUFJUVVhVV1NSVVRUUFJOTk9O
-UFBQT1BSUlNQUU5LTUpJSEhMTktGRURHSEpQTEtJSEZESUlLSk1DRUVISEZEREVH
-SUpLR0lNT0lHSE1SU1VXT01RVVdleIaTm6Klo6Genp+goZ2foJ+hn5+hn5ybl5ia
-mZqcoKGfoaGoqKenqKiqq6mpp6iopaWnqqenp6enpKinq6+sqKampqinpqGfoJuR
-g3BeTURBQD08PDk6Ozk6ODo9Pj05Ozg6PTw5PDs7Ojg7ODo7ODg6PTg6PDxBTV5s
-dn6JkZOZnKGnqqaZeFNITEtLRkpLSEZISlBQUVFWV1NRU1VVV1NTVFhXU1BQUVRS
-U1RUU1VTUlFRTk1PUU5TVVNSUU1NTE9OTk9OTlBMT01NTExJR0tKSElHSUtLSkxL
-SktHSEdERkZHR0dHRUdJR0JERklISkxQUVNSVFhXV1dXWFZUVFVTUlhcXFdWV1dX
-WlpWV1lXVVNSUk9QTUlGSEVCQEA+QURDQUBFS0pJTE1QUlRUV1hXU1BQTUtNS0ZD
-RUZFSENDRUVFR0dISUdKTExKSElKSktJSkxMTEhJR0RAQT5AQkBAQ0VFRERDQkND
-Q0RDQ0M/P0A7PUA/QEBCQD89QEFBQ0VGSEhJTlBMS1BUXV9dYWZtcXJ1eHuAgYOH
-goF/gIN/f3t3eHp5eXNyamZgYF5aWVhYV1hUVFVVXGBiZmptcnV3eHp7e3h+fXp4
-dXd1c3d2en19foOHiImIjpCPkpeYl5eYl5aXmJaVlJGIgHlvY1tYUldfZWpramlt
-cHFwcXFydHh7fH6FjY+TlpiXlJGMiIiHio6TkZCRkZCSk5GLhoB8eoCCfXt1bW1o
-X1tcWVdaX2NdWllYVFNWU1RVU1RVVFBQUFFQVVRVU1RST1JQTE5PT1NWVFJSUE9P
-UFJQT1JRUFFSU1NUU1VSVVVVVFNSUlZUU1RXU1NQUlZXVFJXVVJTVFVSUlNVUlVS
-UlVTU1VVVFZSUlRVVlVTUldWV1hYVVdXWVdZWVpYVFVZVmNjVlVVVVZWVlRVVVNY
-VldYW1ZWVVVVV1xYWVhWVlhXWFRSVlhYV1ZZVlZWV1VVVFdXWFhZVllaV1haW1ta
-W1ldXlpbW1lZW1teYGBgY19fXl9eX2JhYmNhY2RjZGNkY2JiY2JeXF1eXV5cXl1c
-YVtfXVtbXFpYWl+AvM7W3ePl5ujp6urrhICBg4SBgYSBgH+CgYCBhIWCg397e31/
-fn+Bf4CAf4B/gX18fYCBhYSBgYOFgoGCgX1+gICChoeBf36Bf4CBgIF/fn6DhYSF
-g4KBg4ODg4aDhoeJhH53b2djX2BgXltZWl1bWV1bXV5dWlpaWVpaWVpaX1xZWVlc
-WlpdY2BdXV1ZW15hXFxdWlpcWVtbWFhZWFVVWlxbWltYW1pbXF5ZWVdWWlxbXFtc
-W1tfW1lUVVZWVlRTV1pZWlVUVVdbWlxcX2BfYWBgWlxZWllXVl1fXVlaWVdZV1lZ
-WFpbWltZWlpaWFxcWVpcWlhZUlBNU1daWFdWV1NSUFJTVlRVV1VRS0xMTkxKRT05
-Oj5GTVFQT09QVFVYVVRWVVJSUFFMTE1PT1BUVVNYVlNOTFBMS0xLTU5JSEpIRkhI
-T1JPSUZERkVNT0xMSUhFS0pERUVFR0hISUxOTE5LSEpNTVFRVlVOUFRUVFtreoya
-nZ2fn6GfnqCen56en6Cfop6enpuZmJyam56hop6en6KkpqSko6amqKioqqimpaOn
-qqinqKelpqepq6yrqKqppqampaKho5qUh3pkUkVBPUA5PUU9Ojo6PDw6PDw2Ozo6
-PD0+QTw5OTo7ODY1Nzg5Pjk9QktgaXR+iIyUmJqhpqyrqJp2U0lJR0hPUEtIRkhI
-S05QU1dXVlRTUlRXVVhUV1ZRU09SVFJSVVZUU1JVUk1PTE1OU1NTUVFPT01LSk1Q
-UU9PTU9QT01LSUlHR0pJTElLSklKTExOSkhISEZCREhJSkdGQ0RCRERCRElIR0pL
-TU9QUlZWV1RSVFZUVVNWV1dZWVhVU1VXWFdWV1hZWVNRUk9OTkpKSEZCQkE9QUFD
-QkBBRUVITE1PUVNTVVpZVVNMT0xLSE9HR0RCQkFAPkFERUdHSkpJS0lOTEtKR0RO
-T1ZTUk5JREFCQ0BBQkNEQERGSkE/P0JGQENIR0M9PEFBQD06PEJEQDw4PD9DR0tL
-TExNUllWWFpdXWFiam9zenZ6gIKEhoaEhYWCgoB+fn2AgH16eHJqZ2BaWFVVVVdV
-VE1MSEpRUVRcX2Jna21wdnh3dHNydXNzcXF1cXN5fIGEhoiKio+QkJKVmZqbm5qX
-mpeYlpWRkY6CdWphWldXWF1jamtra29yd3Rzc3Jzdnh8gISLjo2TlJOOiYeBhIiL
-jI2QkI6QkpKRiYaGgH19gICCfW1kam9mW1xeV1dfZGFWV1hVVVNQUlNSVFNRUVFP
-UU9RVVdUTk9TTk9UT09QUFJRUU9OT09UUVFRUFBTUlJRUlFRU1VWVVVTVFJSUVBQ
-UVRUUlRUU1dYWFNUVVJTVVJRU1FSVVRTU1JQVldVVFdVU1VUVFVXV1hXVlJUVlZU
-UlZWVVhVVldUVFRVVVVVVVZWV1dWU1ZYV1tYVldXVVVWVFtZWldYVlpZWlpWVlhY
-V1hZV1lYVlFVVlpYVVZWVllZWFhaWVxaXFpaXl9eXl1dYF9eXWBlY2JhY2JgYmJh
-YGBhZGVjY2NjZGNiYFxeXl1eXFtdXV5dWVxeW1paWlpaYYK6zdbe4eTn5+nq6+yC
-goSBgH9/gICAgYJ+fX2AgICAgXyAf4CAf3+Cg4GBf4GCgYB8fHyBgoKChIaEhICA
-goF+gIGBgICBgYGEiYSEhYOBhIKHhYKChISFhIOFhYGBfX56c2tnZGReXltYW1pb
-WlxcWltXWVtcXFxaWllYWltZW1ZVV1tZXF1iXl1cXVpdW2JcXlpZWVtZWFRXVllY
-V1haXFlbWlpYW11eXlhbXFtdX11cXFxeW11eWFVXV1hWVFVYWFZbV1hVU1VYW1tY
-Wl1dXF5dX15dWVpaYF5dWVdYWVldXl5cWVtbW1pZXFhWVlZaXF1fXVZVT1FSV1dX
-V1dWVldQUVVWWFhUVlJNTUxNS0xJQjs7QUlMT1BMTVBWVldVUlFRUlBPTVBNTk9S
-VFVWVllWVFBKSUpNSUtLSEtMSkpLTk9OSklGQkNGSExOUkpKSklLRUhKSkpFR0lK
-Sk9QTExPUU9PT1BVUlJQVVhaWmBsgpKdn6CioaSgnpqcn6KjpKShn6GgnpyenZ2g
-oaChnp2eoKKlp6elpqeoqKenqKalpqeoqKemp6Wop6anq6qpqqilpaSgoqSfnZ2Y
-joFtWU1NQDw7PDo6Ozo5Ojs7Ojo4PDo7OkE6OTg2OTs6OTo5NjY5OztGVWNyfYOL
-j5eeo6eqrqqolXVVS0pKSEdJSk5MTElKSkpSV1lXVVFSUlRTUlZWUlFRUVJTVFZS
-VVVXV1ZTUE5PTk1OVFNTUVJRUk5QUVRRT0xNTk9OTk9PTElKS1FMS0pIR0hISUtM
-SEdKSkpKTERFR0dIR0hGRkRFR0lJRkhKTE5QUVFTVFdVVFRRVFhYV1VWV1FUU1JS
-VFVWVldZWFNQU09MSURHQ0BAQUBDQUJARENERUhFSEtKT1RWVlVUVFNOTElISURG
-QkJBQT89QENGR0lJSktOUFBRTlVPUVFUVVRQUU9MR0VCQUNEQ0I/QUVDQT9ERkZI
-RkZGQ0FCQkA+P0FFQUE9Ojg6PkNKS09SUE5VWFdaXFxdYmdpbHR6f4CChYeEh4iG
-hYaEgoB/gX5+gHx6c25mXFZaVVJQUlFPTUxLSUlKSkxUVlleY2Vsb29ubm9ua3By
-d3Z5en+BhIaJjI2SlpaWmpeYl5WVmJSXmJWWlJKRhn53al1XXVhXWF9la2xvcHBw
-c3R0cW9ydXd8gIaSkY+OjIuJhYGBhYeKi46OjpCNjIqKhYaAg4OAfn96b2VkbGtf
-WlxYV1tdX19ZWFZUUVFPUlJSUVNRVFNUU05QT1FPUVBQVFpZU09QTVFSUFFRUE9U
-UlFRUlFSUVFVWVJTV1dUUVRTVFRSUVFSU1VSU1JUVFRUVlRWVFRUVVRVV1hUVVFS
-U1VVVlZWU1RVU1VXWFdXWFhUVVdWVFZXVFJVV1lbXVlXVldWW1pZWFdYV1ZXV1VW
-V1lWVlVWWVhWVVVXVVlaWltcW1lXVlVTU1ZXWFZUV1dXVlhXWlhYVVpcW1hZWFhb
-WVlaXF5fXV1dXl9fXmFhYmJgYWBhYWJfX19fX19hYGFhYWBjYmJiXl9gXlpcWVtZ
-V1ZYWFdXWVxlib3M1dzg4+bo6enr64GDgYCAf4GChIOCgX+CgYSAgH9+gYWFgoGD
-hoGAgYF/gH9+fX19f3+BgYKBgoOEgYCBgIGBgYB+gYF/f4CDg4aDgYKDhYiFhYaF
-hYWIiIeEg397c2xoY2BfYF5bWV1dXFxaXFhbXFpUVlhYWltaXVtZVlZZWlZVWVlc
-WllbXFleXGBhYGBeWllYVFdWVlVYWldaWllZWFtbWVZZYGBfXFxeX19aWltcXV1b
-XFpVUlZZWFVTVFdWWFtZWltbVFdZWVdWWFtbX2JiX15ZW15hX1paWFZYV1tdWllZ
-WllaWl5aWVdVUlVXVVpcV1NSU1RXV1RUVlVZXFVQVFdWVVVUT0pKSEtQTUlDOTpB
-SU1NSk1QUVRZWVJPT1FUVFVTUFBUV1RRVldUUE9SUE1NUFBNS0dFSEpMTFBRUE5Q
-TklEREVHSk5NS0tJSktKSEhIR0hLTU1LTE1NTk9OT1BOT1JSU1NZX11ZWmRzh5We
-nqGioqSgoaKgoaOhoaGjop+gnp+en5+fnaCfoKCgoaOnqKmrqqeqrKelp6mnpqeo
-paWopaioq6erqKalpaanpqOmpqGfoJ6Yj4V5Z1VJQz49PDs6ODo7ODc2Ozs7PDs6
-NzY4Ojk4OTo3Njg3OT09Q09hb3uDiZGVmqSmrKqrqqWXdFFISUpMRkhJSktLSkpJ
-TFVZXl1XUlJSV1RTVFZTVlZXVlJRVFlXWVlWVE9QTk9PUFFRVlRTTk5SUlFRTk5K
-T09OTE1MSUpNTU9NTk1LSEhER0hJRUVJSEdISUdGRUdLSEdIRkVFREVKRUdGR0ZL
-TE1NTU5SU1NSUlJRU1JUV1RSUVNTVVRXVVVVVlRSUU9RUVFLSEVGRERAQ0BDQ0RE
-QkBBQkNKR0pQUVJWVFZXVVBLSUVEREJFR0VBQz4/QEJGREVISUlLTU1QUVNQUlZY
-V1RUUE9MR0RHQkFAPUBFRkRFQkJGRENISEdFREVCREFCRUZGSEdEQ0JDR05RUlZV
-UlRYWFdcXV5jaWtzd35/gYGFhomKjIqHhYSChYSCg4SCgH56c2dcVVNTU1JQUE1M
-TE5MSkdISkxQUFFaXGRpam1vcnFuc3Z3e4KJh4yOjY6OkpSVlJabmZeYmpuZmJiY
-l5SUkYuHgXluZVtVVlhYX2Vpbm5vbm1xcXJ0cnN3fH2DiIqQkY6JhoOIiYOIi4uM
-jpGSkIyJiImLhoiGhICAe3luYWBrcWlcW1pXWV5iXFlVU1NXVFJSUVVTUVFVVlVS
-UVBUUlRSU1ViWlZWVVVWVFRUUlFSU1NRU1NUU1JUUlNWTlFQUlBQUlJSUFFUU1JR
-UFRRUVNTUlJRU1NUU1JTVFVVVFRTUlJRVVVVVVRRUVNTVFVYWFhWWFpYVVdXWFxZ
-VlRVW1hZWlpYV1ZXVlVXVVNSV1dYVVVWV1hYVlVWVVhYVldXVlZVWlpbWVdXVlVV
-WldWWVlWVVVWWVdZWFpYWFtYW1pXW1taWllbXV9dXF1hYGBeX19hY2BhYWFhYGJg
-YGJjYV9eXV5fYGFhYmBkYV9bW1tcWFlZVVZWV1ZWWmSFu8vU2+Dj5ujp6urrg4SE
-g4B+foF/goB9foCBgYKCf4CBgoKBgYGBgYSBgIKCgIOAf36Cf4GBgoCAgoF8foCE
-hYN/foF+gX+Cg4SEg4GAg4WGhoeFhoeHh4qKg39+eXJqYl9dW1tdX2FgYF1bW1xa
-WlpbW1hVV1tZW1hYXFlaWFdXWltfXFlZW1xcXlpeYGFhYVhWWFpVV1hZWFxdXF5e
-XFtbW11cW1lZW1xeX15cWVpVUVRbXF5fWlhUVlpbWlZTV1VcXFxbXFlVVFZYWFdW
-WlteZF5aWlhdX2JhXFpaWltaX11YWVlWV1lYXFxYWldUT09aWFdXVVRUVVdUUlZW
-WVtbUVJRU1JSUE5MS0xNUU9OTUM9P0RMUVBPTU1OVFdYVk5OUlRVVFRSWVtYVVVX
-WFJSUVBPTUxPT0pIRURJTE1PT1BNTlNQSENDREZKTk9QTkpJR0dHRUlISUxNTkpK
-SUdHSklJTE9RUVBPUFNXWlVXW2Z9kZmdoKGjp6Wko6Shn5+goJ+fn6Cfnp2cnp+g
-oqGhoqKloqWoqqipqqyoqKunq6uopaeoq6inq6ioqKSlpKelpKSnpaOloaWknZmX
-lYx9bVtKQDs7Oj05PDk4PDo6PDtAODY2ODk3Nzc2NzY3ODk4PD5LWmp2fYWNlJmd
-oqapp6eppZRxUUhHSkdGSEVKTE1LTU1OUlRYV1RTVVRTVVZVVldZWFlZVllXVVVV
-U1RRT1BQUVFQU1NWU1FPUVNRT1BMTUxMTFFPTE1MS0lNT09LTUlGRkhJRkRDREVI
-SEpIRURFRERGR0NEREFFR0ZGREVIRktHR0tLTE5NUVNTU1ZSUVJQUFBVVldVVFNT
-UVRUU1JRVFFTUEpGRUVGREFCQ0BBQUBCPkFDSEZGSEtNUVBUVFNRUkxIRUVFRUJD
-Q0FBQ0JBQD9GSEdHTElOT1FTV1ZVVVdYVlVVUU5MSUdGR0ZEREZKSUdFQEFERUVG
-SUpKSUhFRklLTE1MTlFSUVBPUlVXXFZUWVxcWltfY2Vqb3V4e4CEhomLjI2MjYuF
-hIOFiIWGhYeHgXx3b2ZjXllUU1JSTktLTU9PTk5PTlFSU1dbX2Nma21wc3R0d36B
-iY6QkZedmZaVl5iXl5ucnJqbm52dl5aal5SSjod+c2xiWFlWV1phZmlvcXFycm9v
-cHJwc3d8fIGIioqJiomEhIaIi4+Pj5CQl5SUkpKNj5GRjY2JhH56dXBnX2VubGBe
-W1VVXGJeVldVVVNWU1VWVVJUVFRRUVBQT1FRT1NTVlZVVlVRUlRQUFFPVFRSU1RT
-VVZUUlNPUVJRT1BQUFNRUFFUVFFTVVNQUlNRUFNTVFNQUFBSUlRUVFFRV1ZTUUxO
-U1RVV1dVU1JUWFhWWFdXV1lVVVdZWlhYWFpXVlZYV1ZYVVZVVFZYV1ZVVlVZWVhZ
-WVdWVVRUVVRVVlhVVVdaWltYVldWV1dWWFpYV1VWV1ZaWFxZVldbXFxbXFtaWVte
-XVtcW1xfYF5gYmNlYV9fX2JiYmNhYF5eYWJhYGFhYWFfX19fYV9fXF5bWVlYV1lZ
-VlhZWl1ZXoi8y9Pb3+Pm6Orp7OyHhoWDgn5/goB+fn9+fX6Bg4SFhIJ/f35+f4KD
-g4F/gIGAfX9/gX5/gH1/fn+Af4CBg4GAgH+AgH5/goWDg4SGhoOEhIWGiIaFhIWD
-gX18eXBrZWBcWldZWVhdYGBdWlpaWVdYV1lXWllcWVhXV1hYWldXWFdXWlxbWlpd
-W11cXF1eW11fWllVVVZXVllaW11gYl1bXV5aX1tUU1VcX15dXVlXUlVUVVZZW2Bd
-WFZYXV5gX1hYXF1cWVlZVlVVVVdYWlhYV1xfYGFZV1xfYGBeXV1cWVtdV1hWV1ZX
-WFxmXVhZW1RTUlRYXF5bVlRTVFNUVlhaW1dTUFJRUVNQTkxQT09QUk9JQz48RkpQ
-U05PTE1QVFZUUFNUUlRTU1dWWFdVV1VWVFRSUExNT1FNS0lJS09OTExSU1JUT0tJ
-RkRESExPTlBOTU1JRkZHSUpKS1FQTkxMSkpJSkxLT1BQT1BTUlNTT1NVXW2Hlpyj
-paeopKWlop+jpKGioaGipKKhoZ2doKKioqKjo6OkpKOkp6arqKmqq6mrrauqp6mt
-raqqqaqmoqSkpqenrKmnpqempaOfnpubl5GDdWNRRUE6OTs8Oz07Ozo9QT03ODc0
-NTc0Nzg4Ozk6OTk9RFRmdX2EiJCWnKWkpampqKmok3NUTktISk5ISEdLTU5OTk5S
-U1RUVlRTU1VSVVVVVVRUWlpZV1dYVVZVUlNUU1NUVFVVVVJRU1BSU1FPT09OUExN
-TUxLTExMS0lKTktLSUlIRkJFREVEQkRDRURERkRDQ0RAQUFAQkNGRUdDREdFR0ZF
-RkVISklLT09TVlZWU1NTVllXVVVUVFRWVFVUVFNWU1RRTEhERURDQT5AQEBCPzw8
-PkNEQ0RHTEVKTVBRTlBST0pGRENFSUJBQUFBQUNCRUlKS0pMTU9OUlVWWllVVlZX
-VlVUUU1HSEpJSElIRUZLTUhHRUdCQEJER0lGRklOS1JUUFRVWVtbVVdUVVhZWldY
-Wl1aXGFjZm9vdXh/g4WKjY2Nj46OjIqGhoWKiomJiIeFhH94cnNva2FaVVJPTkdJ
-Sk5TVFFSVFRZXF1fYGFoa3B3d3d8gYWIjJOZnZuZmp2am5uam56bmJibnp+dm5uZ
-lJSOhXxyZl9ZV1dcX2FlZWprb3JvbGxvcHN1eXx/gYWKjYqDgICFiIqRk5GOjJGT
-lJaWkpGTkpOQkI2GfHl2cmthYGVqYF9aVlZYX2BZUVRTVVdVWFZUUlNUVFBPTlNT
-UFNRUFJTVVRTVE9RUE5NUlJQT09QU1RYVVFSU1JMTVFTUk9TUlFUU1JUVlNTU1JW
-VVJOUFRVU1FSUFNTUlNUVFNSU1NVU05SUVRUV1dYVVVVU1VVVlVUVVVVVlZYWFdY
-VlZZWVhWVFRUVlZXVVdYWllZV1VUU1ZYVlZVUlRWU1VVVldWWFtZV1lZWFhWVldW
-WVhXV1hZWlZXWFdWWFhbXV1cW1xdWlxgYV9eYmJhX19hYGBgXmBeXl5eXlxdXlxd
-YGJiYGFjYWJfYGFgXlxdXFpaXVtcXVxaWldYWltdi77L1Nzg5Obo6err64SGhYJ/
-gYF/gX6Agn59gH+Dg4KEg4OCf36AgYKCgoKAgIOEgYF/gH+BgH58fH2BgIJ/foCE
-g4CBgn+BgYWEhoWGhYGDhYSFgYKFg4F9eHFsaGNcW1heXVtYWlteXVpYWVpXWFxb
-WVxbW1tdWVhXWVtYWVNUVlZcXl1dX19eW1laWVtZWVtbWlhcX1taWFpeY2FeXl5b
-W1xgWlhZWlxbXFtaXFhVVVVVV1hdX1tZWF1jYmJfXFtdXV1aWVhZV1RTVVZYWlZY
-Wl5eX1xZXV9jY2JiX1paXVhTUVNVVVtaXF1aWllaVlNRVV5gYVxZVVJUU1VXWlxc
-WVVSUVBQUlNTUk1NTUxPTkxFPj5GS1FQTUxJSk5TVVdVUlJST1JWWVlaWldYVlFR
-V1pVU1JQT1BMTk9RUVBNSkpOUE5NTUxJR0dLSkpNUE5PTEdESEtMSk5RTkxMSklM
-T0xNTE1PTVBSU1NSUFFUVFJUYnuPlp6kpqenpKSkpKSlo6KjoqSlpqSjoZ6goaOi
-oqShoqGjp6elpqerqqupqqqqq66vq6yurbGuqqempaSkpaahoqWlo6Olpqaknp6e
-mZWMfmpaTEVDPz4/PT48PDo7ODY1MjY3OEM5Ojo4Nzg3OEBNYHJ6f4aKkZiipquv
-saqtqaaWdFNKTk5LSEVHS01LS0xOSk1RU1ZXWFdVUlJUVFJWV1lXVVVVXFlWWFZW
-V1VUVVlWVFFRT05QUlJSU1JTS01NS05NTVFLTEtOTElJSEhFQ0ZDRUNERkZGRkJA
-RkdFTERHQURDQ0NESEVEQ0RERkRBQ0NCREdGRkdJUVJTU1RVV1pbWVdVU1RTVVdV
-VFpYVFNTUk5RTEhFQkFBRURCQUFAPENDQD9GR0hJR0VIS0tNTU9OTUlDQDxAPj8/
-QT5AQkRFSUlIR0pMUFBWVlVWV1dWVVRXVVdUU1NMS0lJRURESEZMS0pOUE5IR0hL
-UE9TVVFVV1pcW1xbWVpeW1tZW1tZWFdbXl1cX2Voa3F4e3+FhoiLj4+RkI+QjImI
-iIiJio2NjomIhIB8eXRuZ2RkXVdPTU1MS09PU1JVWl1gYGBgZGptcnB2fYKGio+S
-mJqbnaGhoJ+gm5ydn56bmJaYmZuZmJyXk42Ee3BiXllXWltdYWNmaWpsbnFwc290
-dnl6e3yHhYiMh4KBgoSIjY6Rj42Ok5SVkpSTkZKUkpGPioeAe3l0bmZdX2hjXVxW
-VFhdX1lSUlRUVlZWVlZVV1dVU1RRVllWVVNUVVNVVVVUUlJTVFRUVlJSUFFPUFNR
-T09QUFFQT1BSUE9QUFBSUFBPUVNRVVVTUVJPUVFSV1VRUlNVUlJSVlNSU1NSUE9S
-VFVXWVpXVlZTU1dSUlRUWFhWVFNXVlhZWVlYV1paWldUV1VWVVhZWllZWFdXVlha
-VldVUlVUVVdXVlZbV1dWVVdZVlhXWFVTVlhZWllXV1tbWlpXWFpbXF1fXV1cWlxe
-XF1dXV9hX11eYGFfYGNhYF5eXV5fYWFgY2NgY2NiY2NjX19eXF5eXVtaXVpaW1pX
-WFpYXGWPv8vV3ODl5ujq6uvrf3+ChYSAgICBgYKCgoOAgIGDgYGDfn9+fH6AgoGC
-gH6AhoaGg4GCgIJ/fn58fH5/foCBgYOEgoSDgn+FgYaDf4OGhIGAhIOBgoKCfndw
-aGViW1tVV1lcXVpbWltdXVxaV1hcXFlWVVpdXlxfXFtbW1taWFpZWFpcXF5eYmBc
-XFpYWlxeXVtZXV1gXlpbXV5gYF9eYGBcXFpYWlpbWFdXV1hYV1lWVVlaXWRfXVlZ
-W19eX1paW1pbWVdaVFNVVVZXV1xaWFhbW2BeWllaWlxcX19dWlpdWFRTVlldXFtc
-XF5dWldXU1VYYWNiX11XU1NWWVpXW11aV1RQUE9TWFVTU1FNS01KRkE+PkhPU1JP
-TUpLUFVYWFtZV1ZTUldaV1hWVVhXU1NVVFNTUk9PTUxNTlNWU1BPTkxMU1JSUU5O
-T09NTE1NSU5MSUpKTEhISk1NTU1MSEZGSEtLTE5MT1FRUVBQUVNVVVZabISYnaOj
-oKGjpaOio5+io6aopKOjoaKioaGfoJ+fn6GipKeopqalpqitqqutr6yqqqmurq6y
-q6msq6impaWlpKanpqOgpaWkpaOio6GhnpePgnJhT0RBPjo+Pjo+OTo5NjU1NTk9
-TTs3ODc3NzdASFlreYCGjJCXnKGnrK6ura6sp5Z2U0pOTUlJTElLTE1QT05NUlNV
-VVZWV1ZVUVFSVFVbWVdWVlZYWlpYV1haWFtaWFhYVVBRT1BRU1JQUU5MSktJSEtM
-S0tKSU1LSkpHR0hGQkVFRENISklIRUVISENGREREQ0FDRENFRkNCRkNEREJCQEJE
-QENERUVKTE9SUlRUVVRTVVVbWVZVUlJQU1NQUFRTUUxLSUZIRkVFRUVHRUFDQkNE
-RUREQ0pHS0pISUlNTU5NSkVDQD09PDs9PkBBRURFREZHSExNUFJTVVVYWlZWV1lX
-V1VUUExKSEVJSUtIS01XVVNVVlZXWllcXFpdXltcXWFhW1ldXFxcWVxcW11bXmFg
-X2JjZ2lscXl+gYiHi46PkpCOjI2Sk5GPjIuMjY2MiYqJgn17d3NrZmRmZmNhWFVT
-UlVYW1xgYGBjZ2ppbXFyd3yAg4WKj5OWnaGlop+dnqGinp+gnp2bmJiWmZiZl5aQ
-jol5cGdeWFNYWVxhZmpoaWtqcHB0dHh7e32AgYGGhoOEhIWIiIiOjo2OkpWUk5OX
-lZKPkZSTkIyKhoF+fHZxY1tfZ2ReXVlXVl1fV1VSUVJRVFVXVlZWVlZWVlVVVVZZ
-UlRUU1VUVlVWVVdWVVNUU1FSUlVSUVNUUE9PUU9OUVBRUVFSUlFTU1BTUlNWU1NR
-UFFPUFJVVlZUUlFSUlFSUFFSU1RRUlJVVFVUVlhaVVNSVFVWVlVXWFhZWVdXWFpY
-VlVXWFhYVlZVV1lYVlRWWVdZWFhWVVhXWVdUVldXWFhYVVVWVlVVVVRWWVhXUlVY
-V1ZUWFpbXFxeWllZV1hZWlpcW1xcWVteXltbXVxdY2RhX19eYWJfYGJiY2JkYmFh
-X2BiYWFiYWRjYmBfXV1cW11bXFpbXVtbWF5cYY2+zdfd4OTm6Onq6+p+foCDgoF/
-goOFhYWCgH5/f4B9fX1+gH9/fX99fX1/foKDgoCChYJ+gYF+gH+BgH+CgICBgYCA
-goSEhYOFg357f399f4GEhYCAfnp1bWVhYFxYV1dWWl1cW1dbW1paWVlWWFhZXFha
-WVZYWlxeXV1fXV1aWFpZXFxcW1xgX19ZW1tcXlpWV1NWXmFeWVtdXmJiYGBnZV1d
-WFhZXFxbV1lbWVhYV1dWWl1cXlpbXFxbXV1aWFRVWV1YWVpWWFdUVFldXWdjX1hY
-XV5cWlhdX1VYWltbX11bXVhXWFldW1pcXFxcXFlVU1ZbX2BhXlhTUVRYWltfYGNa
-VFRWVVtbWVZTUExNTk9GPj87RElRUU5MS0pOXFhZXFxZVFNTV1hXVlVVWVRYVVNS
-Uk9NT05KS05QVFRLTU1PTU9OUFNRT09PTExNTlBOSkdGR0pJSUtKT0tKSkhGRklL
-TkpQTk9QTk5OTVBRU1VWV1pmeo2XnqChoqSko6GhoKWnp6aho6CfoqCgoJ6goKCe
-n6KlpqSmpaaqqaiprKutrq2tq66vraqsrqiprammpqeoqaipp6SjpKOlo6SkoqKf
-mpeUiHtsV0hAOzs8Ozo5Nzc1NTY3Njg4Ojk4OTo4O0VSZnN8hY6TlZuepaaorq6w
-r7CqmnVQR0dKSEhLTU5NS01TT09VVldbXVxXU1JSUVRYWVpXWFpTVFVXWFlZVlZZ
-XFlWVFBOT09OTk5OUVBRUVBUTE9JSElMTE5MS01PTExLRkhKRURFRUZGR0dEQkRH
-RUVFSUlEQEFCQUBBQUBEQkFCQ0BBQURFQUBCR0lMTE1PUVNVVFFTVFVVVFJSUldS
-VVVSUFBQTktFQ0RARUVERUVFSExKSURCQ0hIQkZHR0VGSklISEpKRkJCPTw7PD89
-Q0VCQERIR0lLS0xNTVBTU1VWV1pVV1lXVlhUU1FPSklKSEtNTU5QUVJWXFZVVltg
-YF1eXV5dXWBeW1pdX2BeXFxcXWBeXmBgY2Zoam5zeX+DhoyPkY+RkpGNjIqQjo+R
-kJCQio2NiIeFgn96dW9nZGFkZ2pwaGVhYWNkZmZkZGlsb29yd3x9gYODiY2QlZie
-oqOjoqGgoJ2eoZ+enp6bm5ycnpeVk4+LhHhuZl1aV1laX2VpamxraWxsbXB1ent6
-fYF8fX1/g4iJjIqMjJGSkJGRlZSSlZWQkJGRkZKNjImGh4V9eWtgWllkZV9ZWVtZ
-XFxXVFNUUlJRVFdXUlZVVlNVWVdZWFZUVFVTUlFQUlNUVVZRUlVUU1JQUFBQUVhT
-UVNTT05PUlJRVlRVVVRTUlFTVFFTV1NRTk9RUlNQT09RU1NVU1FTU1JTVFRWV1VS
-VFVUVFRWUlNUVVdYWFhYW1pZV1hXWFlbWVdWVlVXWFZXV1dWVVRXVVdVV1dVVVdZ
-VldXWlhYV1hYVVZWVlZUVlZYVlZaVldXV1hWWFtZWF9dWllYV1paWVdaXFpcXV5b
-XVlfYF5gX11dX2BhYWVjZWBfYWNhX15dYGBhYGJgXmBhYWBdWVtdWlpcWldZWFhZ
-XGFqjr/M1tzi5ejo6evr6n5/f4CDgoGCgYKAgIB+fICCf4CAg4B+gIB/fX58fHx+
-f3+AgYCCgYB+fH6BgYOBgX9/goGBgoGCg4aAgYN/gYB/gICCgoCEhHx0bmdhYF1b
-WVpbWVpZVVZXV1VWV1dZV1dVVVRXXFpYVFVVWl1dXl9iYWFeXVxaXFtZX2FfXlxa
-XF5bWVlYWltdXlxbWVpeYWNgYGFfW1tXWF5eXFlZXFlaWVdZWVhcXVpaX19dXl9i
-ZVxTVVdaWlhdWlleWlZWV1lWamNZUllaWFlYWFdVUlRWWVlYV1pbWlhVWlxeWVpa
-W1xZWFJRUldZWl9dV1NRUlNVXGBlYVpVV1ZXVldaWVRTT09RTUtFQUNFSkxVUU1M
-SkxUVVRWV1NXVFRSUlNTVVtXUVJVUU5KS09QTE9SUVNTTEtNTEhNTE9QT1JRUE1K
-SExKSE1PT0pJSUtPSkZJSEZFREdISU5MSktNTEpISk1LTE9QUVRZXWBtf5Gan6Ci
-paSlpaOlo6CioqCioqGioqCgnqGho6CioqGmpqempqajpaiqq6qprK+tr6+uraus
-qK2rq6mnqqyoqaqopKKioqOko6KhoJyYnJiUj4RyXkxEPz4/Ozk4NzU2Nzc1ODc2
-ODg4NzpATmJvfoSLkZebnKamrLCvsq+ysa2ceVZKTE5MSElKUU1MTVJRU1RUWV1f
-W1VRVlRSU1NUU1ZWVFFPVlRVV1ZVVldWWVtRUE5RUFBPUFBQU09QUFBOTk5ISE5O
-SktQT0tLSkVCREZHRkVFRERDQUREQURGRkJDQkRHRERCQ0NFQz9CRkNBQ0RDQkJG
-R0ZFSEdERURHSk1PUlNUU1JRVFVPUVNWVVNPTlBMSUhERUNHRERMTElKTUtLSkVD
-QkRIS0hHRkdFRklJSEhHQkA8Ozo6Ozw/QUJDQkRHR0pLTE9QU1NSVVZWV1pZWVlb
-WVpbWVdXU1RRTlJUUlBRU1FUUlJUVldbV1tcXFtdXV9fYF5dXF1fW19fYWJgX2Bk
-amxrb3J4foWHi5GUlZWWlpOTko6WlpOTlJKRkI6LiouGgX96d3FtZ2FgYWVoam1n
-ZmdqbmtpZ2tscnR3fYSJjI+QkpeZmp2epKOjoaCjp6Sin56foaCenJqZlZSUjIiA
-eXFkYFlWV1pdZGVoamxsamxxcnZ4dXh4fX9+foKEiYiKio2Pjo6PkZCTkpSTkY2O
-i46Pj4+OiIaDhH53bGZgXWFgXl1bWV1fXFNSU1RTUVRVVlZUU1NRUlFRUFBWVVVV
-UVRTU1NVVlRUVFNST1FRVVROUlJaUVBSU1BNTlJVU1JRVFNVU1RTUFFWVFJXWVVU
-UlNUVVRTU1RTUVJTU1NRVFJSVFVVVVRUUlJSVFhZVVZWV1lYVVZVWVdYWFZYV1RW
-WFVYV1paV1dVVlVWWVdTUVVWV1hXV1dWU1VaWlpaWVVWVVRWVFRTVlRTUldXWFtb
-W1pXV1lXWllaW1tcWVxbWFpeWlxcW1tbXVxfXl1bXl1dXWFjZWFmZV1fYF9fYGFh
-X19gXl9fX1xfX2BeW1taWVhXWFhZWFtYXoSnwM3X3eLl5+nq6evrfn6Bg36Bg4J/
-gYB/fn1/gH6EhX98gH9/gH5/goCBf36BfYCBgH5+gH5+goKCgYJ/fYCBgYODgoOD
-goWCgX99f3+Dh4OBf39+d2xhXFpZWVpbV1ZcWlZSVFpcWFZSU1tXWFdUVVlfYFtW
-U1dXV1hbXF9hXV5cW1lcWllZW1xfX15dXVhWV1haXF1aWFpdYWJlZGBdXlpaWVpb
-YWBaWFpeWlpYWVpXWl1cWVlcX19eX2FeWVdaV1lbWVdeYV5YUlZYVlRZS1BWWFla
-WVpWWFlXU1JWVlpaWFdYVlpdW1hWWltXWVlVVVJUV1pYWlpYV1dXVllaX11bWVVT
-U1dUVV1YU1BWVVNPSkpBQkRJUE9NUlJPT1NWVFJVV1VRUE9NUFVYW1hSVVRRTU5P
-UlROTk5RUU5ISkxOU09PVFNOUVBRTklKSkdKTE1OTkxMT05KSERGREVHSElITExI
-SUpKS0pKSElMTlFVVllbXGRyiJWeoqGipKakoqWjoKGipKKjoKCjoqGho6Snq6Si
-o6inp6enp6Slp6eqqaurrKmur7OsqqmrqquqqK2lpaWhpqanpaOio6Kgo6KgnJ2g
-np2XlIt6aFdKQz88Pzc3OTk3NjQyNDg5OTw5QEhcbnyIj5Kan6akpamurqyurbCw
-q558VUlJS05KSklLS0lJTE5PUFVaW1lZV1RUVlZSTk1PVFNUU1JUVVRUU1JTVVlZ
-WFZXVE5NTlBPTk9QTVFQT0xJSEhHRkhHRk5VUkxKSklIRkdFRklIRENER0VEQUJD
-REJERUVFP0BBPUNDP0JEQkVEQ0ZERURFREZFRkVFQ0ZHSk1QU1BPUFRUVFJUVFRS
-VFFQTExHSkNDSEpKTUtLSEVJSktMSklHR0RHSEhLSUZHSEhHRkZDPkFBPD89PkRF
-SEdERkdNSUtMTlBQU1NWVVlbW1hXWVtcYl1bXmJgYGRdW15fW1RTUlRSVVVYWFlZ
-W1paX15fXl1gX19fXV9iX19gYmRkZGVrbnNzeHuChYiPkJGVlZOUlpibmZmXlpaW
-lJSVko+NjIuCgYB7eXVwbmlhYWBgZGRiYmZmZ2hobG9yeXyBhIuPk5OWmZ2hn6Ch
-o6Wjo6Okp6OfnZ6fn52ZlpeZk4+LhH91a2JdV1VVV1ldYmVmamxta25ydXl7enl6
-e4GDhoeIiImPjY6JiY6PkY6Oj5CQkI+OjIySkI2LiIR9gHhqY2FfX2FfWFZXW1ta
-VFJVUlJSU1RYVlJTVVRTVlJLSk1OUVNPS05RV1hWUlBRU1FSUFBRU1BRUlJSUU9O
-V05QT1JTUFJTU1RUVlZVVFNVVFhWV1hZU1BQU1JUVFdYU1NTV1ZUVFJUUVFTVVRT
-VVFSVFRSVFZVVVhaWFdYW1pWWFdVVVRVV1hXV1ZYWFdWWFZUVlRSUVVUV1haVVFS
-VVVWV1dZWFlXV1VWVldWWFZVUlZYVVRXV1ZZW1tZWFhXV1paWl5eWltdXV5dXFxe
-XVxdXFxeY2NcX19iY2JlaWFgYWBfYF9iYmFhX15eXlxeXV1dW1taWVlaV1hXVVhh
-nrbBztfd4eTn6Onp6+uDg4CBgIGEhX+Bg4J9f4GAfn9+hoeEgIB9fn9/gIGAf36A
-f4GCgn6CgIGCgn5+fn5+fYGAgYKFgoB/goSBfYODgYOGiIWBfHhyZlxXVldVV1ha
-V1hVVFJSVFRXV1VWWFRXU1NVWFtbV1dYVlZWVlhaXFtXV1lcXFpaW1hbXGFkY2Jd
-V1dZWVhaWlhWV19iZGRlX2JgXVxaX2FfXFhWWVlbWVlaWVhZWlpbXF1cX1xdXF5c
-XGJhWVdZWVtdWFVWVlhZWFRUWVhaWVtZV1VUV1taWFVXU1ZVU1RXYWBbWldZWFta
-WltXVFlZWFdXV1xgYF5aWVtaWVdXVVNUVVVVVlRSVllWUlBLRUNEQENKSlBRU1JW
-VFZWUVFUUlJPTVFQUFpcV1ZXVldWUVFQTEtOUVFSS0pLTVFTVVRUU1JRUVBNS05J
-Sk1QU1FPUUpJTEpHREFCQkRFSEpJSEpJSEhHS0pJTE5SU1VWWFxcZGt+kp2mpqWk
-pKSmpKanoqSmpaOkpKekpqSjp6elpqOhpKWjoqWpp6mnqqyqrq2tq6yurq+ysLGw
-r62qrKqioaKlpqSjpKamoqKjo6GjpKChn56ZlYyBcV5MRD9BOzw6ODo3NzY3Ozw7
-O0JJXHGCiZGWmp2gpaqsrbCtr66vsrCtnH9WSUpPUE9NT09RUE1NUFFRWFtbWllb
-WVRUWFRUU1FSV1RTUlRUWFRSU1VUWFlXW1VRTk9PTlBRUVNTUVFRTkxGRURERElM
-TE9STExMS01FR0lKRkZHRUJBQEFEQT9AQkJERkJEQT5CQUJAPj4/PkZDQ0JDQkFE
-QkNCQkNFQkRFSE9QUFJUU1BTWFVTVFJQT05KSkdGRURKSkpMSkpNS0pMUlNWUlBM
-SEtJR0dHSklIR0dGRUJGPjs/OTc6PUNERUdGRUxMTk5NTlFTUlVXV1laXmBdW19f
-YmRmZGlqbGdoZmdpaGZjX19iX2FiY2djYGFgXl5dXmNjZGdkYGRkZGJgYWNlaWtw
-dHl6fYGHio6PkZaVlJWam56bmZuamJSSkpGRkI+Lh4WDg4KBgH15eHdua2hmZmVm
-ZmtqbXBydHZ8gYSIjpGVlZqbm5+hoaalpqijpKOkop6cn5ydm5qZl5eTjIuEfXRq
-YFlUU1JVWF5iY2RoaWpwc3Z5fHx8eXl7f4CIioWGiImFiIiIjY6Qj46PjYuNkZSQ
-j42OjIeBfHZ3dG9kX11fYF1cWVlbWFVUUVBPUldVVFhXVlNTU1NTUlJOT09QUFZR
-T01RU1ZVUlBRUVBUVFNTUFBQVFFTUE1WU1NTWFVWUlNRUVNUVVVVVFNWU1JTVlNU
-VlRTVVRVVFRSVVVUV1lcXlZUUlZTV1ZVUE5RU1RSVVRUVVZYVlhYWFZWWVlZVldY
-V1dXVVZXV1lXVlZYWFZUVFVVVlZVVFZVVVNXV1dYVVZWVldXVldVV1VUVVZVVlhY
-VVhZWV1eXFtaW1pdXV1bW11dXV9iYF1fX15gYGBhX2BhZGNlY2FeXV9fYF9iYmJk
-YmZjYWFgYWBcWllXV1hZWllaWVdYW2esvsXQ197i5efp6urr7IOEgoKDg4KDf4CA
-gH+AgoF9fHt+goGBf3t7fn5+fX+Af32AgYOFhH+BhH9/gIODf4CBgYGCg4R/f3+D
-hIWEgYSGhoaFgn54cWVeWlZVVFRTVlRVU1RTVVNXVlRVVlZUU1RZWFdXWFdVV1xY
-VlZYV1dVV1hbXVxbWltZW1xfYWRjY11cWlxbXFpaVVZaXGBhYF9jX11eXmRkY2Bd
-W1leXFhXWVpbWVhaWV1cW1taXFtbYF9eYV5XWltZWFdVW11YYGBZVlhcW15cWFlW
-VlRXVlZVVlJRVVhYVFRdX1pbWldWXFtZV1dWVVZWV1laX2FhX1pYV1VVWFpWVVhW
-UVRWVlNUU1NQTU1IQD9CQkhLTE5PUlNUVFFRU01QVlFQUlBQVFZXWldSV1dXVk5L
-TlRQTExQTElMT1RYV1tRUU1MTVBOTk1OUU5QUEtLSktFR0ZDQD9EQUNFSkdKT0hC
-SEpFRkdNUVNUVFtZWFphY3OLmqKjo6SnqKKkpqWjpqeno6Wio6Sko6OkoaWopKWh
-o6Ghp6mmqqutrKurrK6ur6+tsLGwrq6urLGrq6moqampp6ippaKlo6Cho6Kin56e
-nZ2bmZGHfWhSRD09PDw+Pjo4OTo8Pjw+SFhuf4qSl52ho6Wqrq60sauyr7Gyrq2f
-gVpLTE9RT0xNS05STk5PUFFUXF1dXFpVVFdVVFRUVFRVWVVUVlNWVlRTUVJUVFdX
-U1FNT09NUlJQTUtKTE5MS0pIQ0NBRkdHSEhKTE1LSUhJSUlIREVERURETD49QEI+
-RUNBQEJBQUBAQEZAO0NBQENERD4/Q0REREI/QD9AQkpISUtOUFJXVlZVVFNVU1BR
-T05JRkREQkRFR0hKSU1MS0tPVlpZWVdQTkpKRkhIR0dHR0dGQ0NAQz89Ozw9Q0BD
-RkNISEtNTU9RUlRVVVhZWllcYGJjZGNkaGhpaGtxdHBrbm1uaWloZ2pramprbGtn
-ZGJhZGZiY2RkZmZlZGdlYWNjaWVnam9zeH+Bg4iMkZKTl5mYlpacnpuam5iTlpOW
-lpaTk42LjIiHg4OCg3+Af3x5dXF0dHRzdHt5eXt/gIOEh42Rk5qbnJ2eoKOhoqWl
-oqCgoKKjoKKfm5mZmZeYlpSQjYV6cmVdWFVTVlpZXWBjZWVqb3N2d3d6dnl6ent/
-hYeIi4yKiIaJiImNj5KTjIqKio6Qk5OQi4mHg313dHJ0b2ljX2FhXltZWVZYV1NT
-VVZRUVRUU1hWU1RUVVNRUFBSUFBNUlhaVk1PUExMTUxPUFFRT1JSUVRUVVVTUFBU
-UU9PUVFVV1JRUlRWVVRTUlJSU1BSUlRVVldVUlRVVVVWWFRVU1NXVlhUV1dTU1RV
-VlRXVFNSUVNUWFhYWFhXVlZYWltZVlZUVllWU1ZVVlNVUlRYV1ZSVFNVV1pbWFdW
-U1RWVVZXVldaWFlYWVlXVFVVVllXWFhbV1paWVleXFtbW1pZWltaWVlYXV9hXl5e
-XF5gY2FgXl5fYWJfYVxaWl9iYWBiYGBfYGJhY2RfXlxcX1pYWVhZVllaX11fZau8
-xc/X3+Hk5+jq6uzrfoGAgoaBfH6BgoN+gIB9f356e39/g4OEhIF+fn5+gIF/fn+B
-gISFgYB+fXyBgYKBf4CBgX9+fH+AhIaDg4OBhIaFiIaAe3NmXltbWlhVU1BOUVNV
-WVZWWVpcVlRWWVlVVVVYXVpYVVhYXFlbW1taV1RXXWBgW1laXF5eY2VjY2FcXFtc
-WlpWWFpZW2FiZWJgYmBfXl5hZWNfX19gYWFdWVZZWlxcWlpbXVhXWF1eX2NmY2Fg
-XVpYV1dWWFddWVxcW1paXFteXl9bW1dVV1VSUlNTUFZZWlhXW1xZV1dYWllaWVdT
-U1ZVWFNXWlthYmFaV1dYV1lcXVhVVVRQV1pZWFVSU1FOSUVBOzw9QUJHSUtMTE1N
-T09RVFRTUFRUT1FWVlZWWltWV1lUUlBQUFFPUE9MUFBRUFNTVlFNTktKTFJNSk5S
-UUxKTU1ISE1HRUFAREJDREdKSkxKSEZJSUtMTU5RU1RUWltXWFlca4GRmqGmoqSj
-pKeopKOlpaamo6eqp6aioqSmqKWkoaSmo6Ojp6app6iqr66srrCxsLCusbGurq+v
-q6yrrauqq6yqqaiqpqmjpqOko6Ogn5+hpKCgmpeOgmxVS0JAPUA9NTU4OjpIQUVT
-Z36MkZqgpaesra6xsrW2t7W1sbCuraOHX09MTk5OTk1NTlFPUFBOUldfXV1hXFRR
-UlJSUlNQUFFRUlVSVVRSUlJTVVFUU1FST01OTU5PUE1LSUtKSklLS0lFSUxMS0tJ
-SUdJTExMSkdHR0hLR0dFSU5WRURAQkZHQzxBRERBPjs7Qz09QD89P0BBRENCQ0NB
-Qz88QURDQ0VJTUtMUFRVVVRVUlRSUU5SUFBKRkNFR0ZGSE1NT1BQUVRWWV5iX1xU
-S0lJSkpJS0ZFQkBAQkJDPTo5OTg9P0VHSEhHSUxLUFZUWFlZXVpbXmBgZGZmZ2tr
-a25wcHJxc3RxcnNzbWtpa21sbGtpbGpnZmRkZ2hoZ2VnaGVlZGVkY2RnaWtvdHR8
-gIWFho2SmJmcm5uamZqam5malpiZmZmbl5SUj4yNjImNiIWFhIOBgn55e3x+fYGB
-hIaGioqIiYmMkJSWmZ6dn6Ggn5+ko6Sko6GhoKSpoqGcmpmZmJmYlJGKgndqXlZU
-UVJWWV5iYWNlaW9wcXR2eXh2dXp5d36BfoaKjYyIhYyNh4uMjYuLioiLj5CPkZGP
-iISBfXp2dXFraGlkY2BeXlxZVldXXFZWV1RTVFhXVFNSVFVTVFRUVlNQUVNRUVhS
-TkxQUlBRUVBTUk9PUlVUU1RUUFBQUE9SUlFSUlBQUVJSUlRVVVRST1BSU1FSU1RU
-VVRTU1RRUlNVVVZXV1VSUVBUUVFTU1hUUlZXV1dYWFlWVFhXWFlYVVlXWFdWVVla
-VlhUV1ZXV1RSUlVYUlJWWFVXV1VXV1VSU1RVV1RXWFlbWlpYVlZYVFdYWV9bWVhb
-WltYXF1dXVlbXFxdW1hZW11bXF9hXV5dX2FiY2BeX19gYF9gXV1bXWNiYl9cXV5d
-XV9hX19cW1tdXFhYVltbWldeXltii7K+zdbd4uTm6Onq6+t/gIB9f4CBgIKAgH+A
-gYSDg357e3yCgoKDgIF+fX19f3+AgIGDhoOCf31/fX6DgIKCf4GBgYKCg4OEg4WF
-hYaGhoeDfnhwZFtVVllYVVJSUFFRUlVYWldXWFdXWVdaWlpXV1lfXVhXWV9bW15f
-Yl1ZWFpdXl9eXWFdX2BgXmBhYF9ZXFpdW1haXF1gYGVlYWJiXmBgYWJgX15eX2Bi
-YmJcVVldWllaWlxcV1dbYWRjZWZgYmViXlldVVhaXl1aXFlbXl9cV1lfX1taVllY
-VFJSUFJVV1hWU1ZYWFdVVFVTVlhTUFFSU1hXWlhZWFtgX1tYWFlcXV5gW1ZYVFNX
-WVpWUlFWU05LSUQ5Nzo8PEA+PT5DQ0VFTVJUVFRVVFJWWFZWU1NZW1dWVFJST0xQ
-UVBSVVBQUlNSVVVSTU9HSkpNUU9MS1BRTEtOSUZGQkdFRURFSElJS0pMUExISUlN
-TUxKSk5WUlVXVVhVWVxhc4aVn6SkoqKopqOkpaWlp6aop6empaanp6enqKampaOl
-n5+ipKOnqKuuq62vsLOts7W0srGurK6ssLCvqqqsqqmnpqmrqquopKKioqWlpaSn
-pKahnpmUindhTUVBPTo7Ozw4Oz5BUGZ4iJWeoaWnqqyxtLa5tbW3uLOwsq+upohe
-TEpKS09PS05KSElNTE1QUlhYXWFgWVRRVFJPUVBaU09PUFBOTVBUVlRUVFNTU1JR
-TEtJSklLTE1OTEpLSkdGR0ZJTEpISEZISEhHSktKS0lFRUhJS0dFQ0NDRkJCQj8/
-RkVEQj0+Pzw+QUM+Ozo7PEBDRkdGREI+Pz1BQUVFREZJSUhPT1BRU1RUUVFRUFBO
-TktKSEZGRkdKT1BOUlZTVVdYWF5oZl5VTUxGSEhOSUVHQkI/QDw8Ojk6OTxAREVC
-Q0ZJTExMTVJVVlZZW1xeWV1lZWlrampwcnV0dHJydnJxdHVybGltcnFubGhqampq
-a2dnaGpnamhmZ2hlZWRpam1xc3V3fYGEhYeFjpGTl5yem5qbnJqdm5yYlpmcmpua
-mZeVlI+NjYuIiYeGiIWHhYKCgoKChoaJioyMi4uPj5KYm5ydm5yeoqOioKGgoKCk
-oKGgoKKinZqcmpeZl5eTjoV+d2xhXVRQU1ZbXF9hY2hscHB0eXh5eHZ6ent8gH9+
-g4qNjYuKiYmMiouJh4mHiYqOi42Nj5GLhX96dndxbGRiY2RjYmJgXlxbWllYV1NT
-UE5TVlVST1JSUVBSVFJVWFRSU1NSUFJVUlJUVVZTVFFSUVJTUlFQT1FRUVJTUFFS
-UlNSVFVXVFZSUVJTVVNQUVFTUFNSUlRVVlVSUFJTUlNVVVVZVlRUV1RVVFJTU1JS
-VFNSU1VUVVZWVldXWlpZWFhWVFddWVVXVVRWV1dWVlhWVlZVVlVVVVJWV1dWWFdX
-V1VXVlZXWVtaV1ZVVlVUVVdaW1pbWllbWlpaXF5dWlpYWl5aWlhcXl1ZXFxdXlxc
-XGFiY2BmYmBgYF9hYmdgYmRgYGBgYGFgYF5eW11eXl1cXFtcWl1bWFZaXm6qsr3M
-1dzg5Ofp6evr7H5+fn9/fn9+foJ+f4KDgYODhYCAgoOCfoGCf3x/gn59fX5/f4CB
-hYOCgYGBgIGBgICAgYGAgYGBgoOBgYKGg4WFgH13cGdbVFVTU1VTUVBQUldVWVtb
-V1VYVlhbXVtbWlhYXF9fXl1bW1pdXl1dXVpZWmBdW15bWl5eXlxdWl1eXVpYW1tX
-WVlaX2FiZl9gYGBkZWJjXlxdXl1dX2BhX1lYWVpdXF1dWl1bVlphYl5eXl9jZWNf
-W1dVWF9ZXF1dWVteXVpXWltbWVteX11XVFVTVFtcV1NUVVRVVVNTU1dZV1VYWFVW
-WVhaW1ZVWV5eW1RYX2JlZ2NcV1hXV1VZWldSU1VSTk1JQzw7Ojs4Ozs3ODg5Oj5F
-R0tOUVRRU1haXFZST1FTVFBOT1BQS05QUVFRUk9QVVRWVlVRT0hHSlBPUk9MTlFM
-TUpOWlVPRUVGSEhLS0tNS09RUk1LTUxISE5NUE5MUFNVVVhWWF1pfpKcoKOioqio
-qqenqailo6ShpKSin6OlpaWkpqSlpKOko6Kko6aqq6ytrq6ys7C2t7i5tbS0rK2x
-tLO1sK2vrq2sqqurqKijoaSnqKqoqKWlpKSloZyaj35lTkI8ODo6OT07QEtjd4eQ
-naapq66xs7KztLS2t7a4t7e0srCojF5NSUhKTktISEhJSkxMTFBPU1ZdX1pVUlZV
-VFNVVVdSUVBQT01NUlRXVVRUU1JUTk5MS0tNTEtMTUtNUE1MT0lGSUxIRklIRkhI
-SUlJSElJSEdKSUdJSEZHREFAP0I+QkJCRUNDPjxAQTs+QEE/PD5CQEJEQ0RDQj48
-PEFBQT08PUNKSkpNTE5PUlRRUk9NUU1ISElHQ0RGRkpPUlNYV1ZWV1tcXGBlZl1T
-TkxGRkdEQkVIQTs6Nzg5OTs8P0VFQ0dHR01JSktMT1NVVVVXXF1dXmJpaWprb29y
-cXV4dXV0c3N1dnR2dXV0dHFtbGxpampnZWhpaGpqaGdmZ2ViZ2pubnFydHl7gISH
-iY6RlZacnqCfnZ6enJybmpqcnJyempubmpeWlZGPjpCMi4qLi4uJiYaFhomLi4yN
-j5CQj5GVl5abnZ2dnJ2doaGhoaGjoaGhn5yen5ybmp2emZaYlpSRjH90al9XVlNW
-WF1hY2NkZmtydXR2dnN1dnh8fICDhIWIioyNkY2KjIyOjIyIiIaIi42OjI6PjouF
-fnp0b2xpZF9bXmNgXFtdXl5eWllXVE9PUlRZV1NVVFNTUVJUU1JVVFNTUVJOUFJU
-VVRZW1ZUU1VUUVNSUFFTUFNUU1NVU1BRVFFSVFVVU1RTUlRVVFVYVldUU1JPUFBR
-UlFRUlRPUlJVVVRTVFVUVFRTVVFTU1RTU1RTU1NWVldXVlVZWVZYWVdWVlZWV1RV
-WVdWVlVVWlpXVllXVlZUWFdWVVRYWFVYWVpWV1hWVVZYWVdVVFlXWFlaWllWV1ha
-XF5dXltbW1paWVtbXFtcXVpfXltbXVpbW19iYmNgX2FhYV5eX11eY2FgZGVlY2Nj
-Xl1dXV5gXlxcW1peW11cXVxfb622vMvU2+Dj5+jp6urof36Bf4CBfn5/fH59fHt8
-gYOCf4J/f4J/f398eoCAh4aCfn6AgIOCg4CCgoGBgIKBhIOCg4CAg4GAgoKBhIKC
-goOBeHBmW1VST1NTUlRUTlBWWV5bW1laVlVXWlxcXFpWWFlYW2FcWFpdWllZXFtc
-V1pcWVtcW1pbWlxaWVhYWl5dXF1dW1dUV11fYGBiYV5aXWNnZV5eYGBhYl9dXl5a
-WFdWV1laXFxZVlhYWV5eWlZWXF5bW1tXUlZaZFpdX19dWFlZWFpaWFhaYWFhW1dT
-V1RaW1hVVVVaU05RVVNRVFpcYGBbVltZWVpVUlJVXF5ZVFlgZWNlY1pbWVlYVlhZ
-VFNSU1FTVFJHQzw5ODw9ODY1Mzc9O0BFR0xPVFpSUldZVE5SUVFRTk1QUE9UVlJT
-UFBRTEtRVFRWVFJTS0pLTlFPT09OUE9KSUxPU05GQUZLS01JSUhNUVNQS0ZERklM
-TE1OTUxPVFZXVVNUV11thJihoqSmpqiqq62qqqqnqKimpaGfoKKjoaSlpaOjoaOh
-pKSlpKarq7Cura2wr7W2t7i4tLi2sra3ura2s7Gwrq+uq6amp6SjpaanqqqopaOk
-pKWlp6OelYFnUEE8ODw+Oz5IXXeHkZ2mqaqqrrGztbq3tre5ubm5ubm0s6qMYk1J
-TExNT05KR0tNTE1PUVNXV1tiWVNSVVZWWFZTUE1OUVFPTFBPVFRTVVNUUVBRUFNN
-TE1NTUtNTU5MT05NTktMR0hISUdIRURKSkhFQ0JDRkdFSEZGR0hGQkFEQ0BAP0FA
-PkBAQUBBQD8/QTw7Pz5AQ0BBPkA8Ojw8PD9BQEJBQkdKSk1MT1BNTk5OT05ISUlI
-SEZFRUNJS05VVlhcW1pdX19gYWBeX1xUTUxKQEJCQUM/Ozg+PDk9O0BDRUNARUxL
-UUlKTEtNTVJXWFhfYWBgYGZqcnFwcnFzdnV5enh4dXJ0dnV1eHV1dW9ua2xub25q
-a25ua2xsamloamhubGpucXNyd31/hIePkZKSmJyenqGiop+fnJmXmpuem5mWmJua
-nZuYlZKQko+Qjo6NjIuIioiIiYyPkJKUmJeUlJaXmZuenZ6fnpydnqOio6Sjn6Kf
-nZ6ampmdnpudmZSUko6EfnRnX1hWVlheYGFhYWNna3B1dHV3dHd4en2BgoiHh4iI
-i4+Pjo6RlZGLhYaIh4eMj5ORjpCOh4KAgHx1bmJbV1hbXV9cWVZWVVRXVlRSVVFQ
-UlZYVFFSUVJRVFFTUVNRUlJSUFBNTlJTVlBVVFJSU1NRT1FSUVBSU1JUVFVWVFNU
-UlJSU1BOUVJTU1FRVVlVVFBQUlJSU1JRT1JRUlNTU1NTVVZfVVJUUldUVVRRU1NV
-VlVTUlNXVlRVVVZWVlVXWFdXVlhZVVZXW1dUVVZUVVVVWFxXWFpZWFdZVlVUV1lZ
-WFhUV1dYWVhWWFxYV1dYVldYVlVWWFhZWlZZWlpZWlxaV1ZaXV5dXFxeXVtdXVxf
-X2BgYV9eYGBhYWNgYWRiY2RkY2FeXl5cXVleXl1bWlhZWFhZWFpbWlpidaW9y9Tc
-4ufo6evp6OGAf4B+f4KCgn98gH99fIKAgIB/gIB9fn1/gH59gX6CgoKEgIF/gIKA
-g4SIhYF/f317gn+AgoB/gYGAgYB9foCCfnpuZFlTVFFUU1NPTk5RVlhZXFdZWF1b
-XVxcWlZVVlZUVVdaW1pXWldeWFldX11eXl1bXFtZWFhaWFdcWVpcXFtbWllaWldY
-YVxdXWBiXmFfYWRkX2NmYmFjYmNgXV1YWFZSV11gYWFcWVZXVltcVltcWFlfXFZU
-WmBfXlteZGJbWV1dXVxVVVxgYGJcV1haW1laVlVWUFBOT1JSUVBWV1pcW1hXWFha
-WldSVFddXVtZVlxfYmNkXFlZWVdaW1ZVVFVVWFdRUlBMSEJBPDo4ODY4OT08PT0/
-Rk1MUE9MUFVTU1JPUFNUUk5RUVVWVVZQVFFNT09PUlNUVVJRUFFPT05NTk9QS0dI
-S0xNSUlIS0xKSkRERk1RUU9NSkdER1BSUE1NUFJSUlJTUVJWU155lZ2gpKamp6ar
-q6msrKutqaSlpaOjoKGgoaWjoqGjpKKjpqalpqalqK2rrq+xsKyvr7azsrW7urSz
-tLKura+wsa+vrKyrqKikqamtrKejpqSjqaqqqaWfloVqTkI8PTw8QlZxh5ahpqmq
-sbCwt7a4ubm6ubi5uLi4uby4qY5kTk9OTlBQTUVITEtNT01RV1laXFxcU1JUWVpW
-VVRUVE1NT09PUVJSUlBRUE5OUE9PTk1PUFBPUEpJSk1LS0pNTEtIRklIRkdERkZL
-S0lHR0RFRkZGQ0FDQkBERkQ7OT5CPkA+Pz9AQkJCQkI7PkE+QEJAPj1CPj4/Pj5D
-Ozs9PT9BQ0VGRUZISEpKS0lLTkhISUpKR0ZHSEtOT1BXVlZZX2FiZGViYGBgX1hU
-TUdEREM+Pj8+Ozc3NTg8P0FDREJBRERISElLS01SUlVWWl5fX2BmZ2pvcHBwcnJ1
-dXl8eXh6dXZ4eHp9eXJzc25udHJzdXJwcXJva2psaWdnZmtpaHBxdHR3foKHjJCU
-lJWZm52fn6CfnZ2cm5mbmJmYnZuamJubnJ2Zl5iVkZGRj4+PjI+OjYyPjZGSlJKU
-lJOTlZmdnpqem5ydnJ+foaSgoqCeoaKgm5mbnJ6dnZuXlZWPiYV9c2ddWlhaXWJl
-ZWNjZWpsbnF0dXZ3e3t9gIGFg4SIjIuOjo6NkZCQioiHhISIi42SkpKPjYiAe316
-dnRvY1dTVFteXV5bVlRaWlhXU1BSU1JWU1NVUk5OUE9PU1FPT1BOUVJVU1JTUVBT
-VFFYVFJUVVRQTlBPUFFSU1JUUlVUVFRRV1BTVVNQUVFSU1VTVFRWU1NUVFNVUVJS
-U1NQU1RUUlNUU1ZbUVJVVFRWV1VVUlFVVlNTU1ZWVlZVVVRVVlhZWVZVVlRWU1RU
-WFhWU1JUV1VXV1VYV1dYWVhYVlhYV1hYV1hZWVpZWFlaXVpZVVZbWVhVV1hWV1ZW
-WV1YWltbXFxdXGFgXF1dXl9jXltdXV9fX2BdYF9mZGNgYWJjYWFgYmRkY2BcWl5k
-X2FcXl9cWlhXWVlYWFpcW11kk7/M1dzh5+rt7evr64WAf39/f4B/fHp/fn9+fX19
-fn1+fH19f31/gIOAfnx8gYeFg4J+f4CAgIOEg4CFg4B/foCEg4KDgn9/goOBf357
-c2hbUlNSUVBRUlFQT1JUVFdXWVlcXWBjX1xYVVRTVVhYW1tcWlhXV1tZWFpdW1xc
-XF1dXVpZWltZWlxdXFlWVFJXWFxbWlhZW1RYXV1dX2NmZGBhZWBhYWBiZF5bX11b
-WlRUWl5eXFhXW1lXW1lZXl1ZWFthX11dX1xeYmBgXl1dX2BeWVRXXl5gXllYXF9f
-XFlaVVNVUlJSUldUV11bW1tWVFRYVldUU1RUW19gXltYXFxdX15dXFxcV1hZVVVb
-WlFTUU9UVVNORkE9OTY2NDM2NzY6ODo8QkZISkdPUFBWUU1TUlZWVFJVWVVRUVRQ
-TE9TTU5PUVRVUlVPTVFTT1BNTk5NSE1OTVFMSk5LS0pGSERFTU5NT09KSElKTVJR
-TExRT0xKUE5RVFlcXnCJmaKoqKalp6epqKqqqamop6SipKOioKGhoaKgoqSkoqSh
-o6OjoKGiqqupqq2sqquzsra1t7e1tLa1s7Szs7OxrrGura+sqaisrq6uqqyrpqWo
-q66urKiil4RpUkU/P0NOaYKRoamsqKqxtrq4urq5uby9uri6ure6ubanimVPTE1N
-TkpGR0VJUVFPTlJUV1tdX1tVUlNUV1pZV1lYVVFPT1JTVVJQUVBST01NTlBQUVFN
-TUtJSUhKTk1LTE5MSklIRUVER0dFRklHSUpLR0ZERENBQ0JDRkNDQkNDPz89Pj89
-Pz5AQUNCR0A9QEBCQT0/Q0FAPjs7Ozs9PT4+PkJCRENFRURDR0tLS0xJSUhLSkhH
-R0ZISFBSUlVbXl5jYmVmZmZiYVxbVlRQSUdEQj88Ojo5PDk6ODg3NztBQ0ZDRklK
-S01NT1BRVFdaXmJgX2FkZ2lrbm5vcXJzdnt8e3t3dHN1d3h3cnBydXRzdnFydHZ0
-cnFxcm1samdlZWhrbWxxdXp+g4eOjZSfnZygoaCjoZ+dm5qZm5iamZiamJudnZ+g
-nJybmJaXk46NkI+QkpKRkpGQkZeWk5OUlJSVmpybnZ2doJ+fo6Wiop2foKCgo6Gf
-oJ6cnJ6Zl5aVj4yKgXtyZV1YWl5kY2VkY2Vna3Fxcnd5d3h4e3t+gX+ChoaHio2T
-jYyOjo6KioeFhomNlZORjouHg4B+fHlxcnBpXllbWlxZV1laX1tcVldST1JRVFVS
-UFBRTk9RUFJVVVRTVFJTVFNSVFNWVFZUUlRXVFRSUlFSUFNTUlRUU1NSUVJQUVFR
-T1JTUVFUVFRSVVRSVFRUVFNWVFJSUFJTUFBSUVJRUlFVVFNUU1NUUlRSWFZSVFVU
-VFlYWFZUV1dWVVZUU1FTV1ZXVVRWUlZXV1hWUlFTVFdVWFVUVFZWVFZaWlZUU1da
-VldXVVhYV1paWVtaV1hYVlZWWVlWV1paXFxbXV1cXFtbWlxaWV5fYmNgX15dYF9f
-Xl9iX2BjZWJgZGJiYmRiYmFhYV9bXmFcXF5fYWBcWlhYV1VYWVpdXmKXwc3V3eHk
-5+nr6uvsgYB/gYKBgn9/f399fn+Cf4B/e3p7eX5/gIJ9foB+fH1/goOCgYJ+fn1+
-fn+CgoGFg39+gYGCg4N8fX+Eg359enRpXVdRTU9NT1FQUlJSVlZUVVZZV1pcW11a
-W1ZUU1VYWVxaWlZUVlZYWVxfWlxaWlxbXV9eW1tcXFtcWl5dWVdWWFdaWFZZVVdZ
-W1xgXWJjYWVjY2FeXGFgXl5cXV5cXV9ZVVRWV1hVVFhZWFpdXVtaW1tZYGZhWlte
-X15gXmBfXWFiXl1aWV5fXl5eWlhdX11cXF9dV1RYVFRUV1paW1laXl1ZWFNUVFNR
-VVRaW1hcW1ZWVFRbXFxfXFtaWlRWW1pXU1JRUFBWW1dTT0dBOTg1Njk5ODg4OTc7
-QT1BQkVMUVFOVFZWVFRUU1RSUFJRTU5NT1BOUE9MTkxOTU1OUVFPT1BSUUtKSUtQ
-VlNPTlFOR0VCRktMSkdKSUdISk1QUU9TVFFMTEtOT1RdXV9eZXeMmaSmpqSnqquq
-qq6rp6mrpaWmpqWkp6CgoqKnp6KioaOioKKhoZ2kqq2qqqywrbC0tre3tK2xsLOw
-srWzsLGwsbCxsa+xsbGwsLCvrq2rqqutr7CtraeglYdwWEdBSV95kpymqq2wsra5
-vLu9vL67vMC/t7u7vMC7s6aNZVBOTlJMSUZFSkpLUVBTVlZZWl9gW1VQT1FUVldY
-WFhYUlJTV1ZYVlVWVFRQT1FQUVFPTkxQTkhJR0dISEtIREdHSUdERUdEREVGRUZI
-SEdFRURCRUJBPz9BQkNFQT5AQD49QD9AQD1BQkA+QT48PDw9QEM+Ozo+Pjw9PT09
-P0BBPkBAQkE9QEJFSk1OTktKTEhGREdIRkhNTk1TVFleYWNkZ2ZoaWZjX1tZUlFQ
-S0ZEQj48Ojo7PTw5ODk7Oj5DRkZJSkpKS01OT09TWFhYXGJhaGVnaW1rb29wc3J0
-dXh5eXh4dnVydHR0c3d6eHVzdnZzdXRycW9ubWpnZWNkY2ZpcHJ3e3yChYmNlJ6g
-oKKhoKKjn6CenJ+en52cnJqYl5qZm6Ccm5qYmZiWlZSTlJGQkJGVlJGSl5aUlJOW
-lpmZmp2foaKgn5+goZ+gn5+hoaKkoKGhnZucmZWUlpONjIaEfHVpY15gYWZnZmhm
-Z2lvdHN0c3h5e35+fH58foCBgoWHiouLjY+Sj4uKhoOChouQkI6JhoJ9f317enFu
-bnBoYFxdWVZUVFleX1lUUVRRUVRUVFFPT09TU1NQT1JTUlVSUlNUU1NUVldXVlZT
-VFVXU1NTVFJPT1JTUFFTUlBRU1ZTUlJUUVNVYFdUVlVSVFlRUU9SU1RSU1NQT1NS
-UlNSUldVVFNUUVNVVFhUU1JRVFZTVVZUV1ZXVVNWVlVXWVlWWFZUVllcWFdXVVhY
-WVdVVlZXV1hYU1RVVVhaWltXVFJRVVhZVVRXVVdZWVpbW1xcWlhaW1hZW1dWWVxd
-XF1cXFldXlxaW11gYF9dXV9gYF1hYGFiYF5dXWBiYmFdYGJiY2RjY2NlYmNfXF1c
-XV5eXVxdW1lZV1ZcWllcZprCzdTb3uHk5efo6ep+fXx9f4GAgoSCgYB8fX99fn5/
-fXx8fIGBfX9/foCAfn+Af4CBgX1/gX9+f3+DgIGAf4KAgIB/fnx8gIB/hXxzal5W
-UVBPS01MTVBOT1NVVVRVVlVXWVdZWFlXW1VTWFlbW1hXVlZXV1lZW1xbWlhWWVtf
-X15cWFlaWVZZXFpYV1dVV1lbXFtZWFpgXl5iYmNgX2VfWFdcYF5hX1dYWVxdW1la
-U1JYVVJVXGJWU1haWV5eW2FfX11bXl1cXV1dYWNgZGJgXVxcW1tbXllVV1xhX15d
-XllTVFhUVFdZWFhXWFhdYltZVVFQTVRYXVNSWVxVV1lUWVtfYV5gW1hWVVhXWFJR
-UU5QVVdWWFdXTkdCOjY3ODs5ODo2Nzk3Ozw7P0hOUE5SV1ZTVVVaVlVVUUxMTU1O
-UFFRUU5MTExMTlNSTU9QTVBPS0xOUVJVUlBOTElGSkdGSElIR0hJS0pKTExQVVNW
-UUxLTE5QVVpeXlpebH2RoKOlpqWlqa2srK2rqamrqqmnp6akpaajo6OfnpyeoKCg
-oqCdn6aoqamppqmqrrS0tbSytK+wtba2uLaysrWys7GurbK0r7Gys7O0s6+vr6+u
-srCuq6mjnY50WU9ecYSRnKWosri2uLm7vsDAvr/Avr+7vbzCwLu6r45jUE5NUFBN
-TEhISEtLTlFWV1ZVXGJfVlRTUlRYV1RVWVhaVVNRU1RZVlRTVVRWVVRSUk5LTU5P
-TEtKSkVISklJRkZGSEVDRURERkNFR0ZISEhGSUNBPz87PkNERUVEQ0JAPD1BP0Q/
-QEJBQkI/QkU/QT0/OztAOz1AQzw9PT09Oj8/Pjs/QEI/QEVIS0xNT05HRUVIR0ZH
-RkdLTFFUWF1jZWhpaWpua2dlYlxZUU5OSUdDQj09Ozo7Ojo7OT1DQUNGSElGSUhI
-SU1PUVNUV1ZYX2JhY2Vqa29ycXFxcXN4dnd3eXd2eHVyc3Z5eXl3eHx2dnh3d3Nx
-bmxqZ2ZnZmJlZWxvcnZ3fYKFipCWnqKho6GkpqWjn52cnJ6enJqbmJmZmZqanJue
-npucn5qZl5iYlJKSkpOTkpSWm5eYmpeZm5ucnaCfn56enaCen6CioZ6hop6gn56b
-m5eVkpKSkI2KhHx5cWloY2JmZWVoZ2dna21wdHV5dnl4enl7f399fYKDhYeJjI+R
-jIyQjIiFhISIjZKQi4SAfXd7e3h5cGpsZ2djYV9ZV1VWWV1bWlNRUVBRU1NXUlFT
-VFNUUlFPUFRTUlBTTk5RVVZUVVJSU1JRVFNVVlNTVFFQUlBTUlJTUVNTUVFSUlRU
-U1VeU1JUV1RTWVJRUlJSVFRSUVVTU1VTUVZVVFVVUlRZWFdWU1ZWVVFUVFRSU1VW
-V1JSVFVWXlVZWVhWVlZUVldYWVtcV1dYWllXVlVXWFdWVllbXVpVVlNUV1RUV1ZX
-VlhZWFhZXlpaWFpfW1pZVlhZWFlXWV1bWVtZX1xcXFxcXV5eXFlaXF5hX19eX15f
-XV1dXV9hYWFgX19gYWNjYmRnZGFhY2BfXVtcX1pcWlpYVlpbWlhilsDL1Nnd3+Hk
-5ebn6X5/fnx/gYCDhIGBgYOAgn5/gICBfn5/f399f319f3+Bgn+Bf35/fX+Bf3yA
-g4GBgYN/f4CBgIF/foB+gISDd2xhVlJQUVFPTUxPUlJUVFJTVFNRU1dYXFdXWVdX
-VVZWVlZUVVdZWFVXV1ZYXltcXl1YW19fXlleW1laVVdaWllXVFhbWVtdXFxeXWJe
-XF9jYFtdY19bW1tcXVlYW1hXXV1cW1dUV1tYVlhZXFlZVlxfXl5dXltfX1pXWltb
-Xl5fYF9hYWJgXFxdXV5kWlVYWlxcX1taV1BVV1ZXWVpWV1lcWllZW1lXVlRVWlZR
-T1JYWVlXUlZcXV9lXlpbWFNVWllUU1ZTVFNYVVhWVlZTUEpBPD84NjM0Njg3Nzs4
-Njo7Q0tNTU9QU1NRVVVWW1xRUVNQUFJWUVBOTFBRUFVSUlBLUE1MTk5NSktOTE1O
-TktLRUZIS0RFRkVJTUlISUdJSlFUW1pVVFBOTFJVWFdZW15ldIibo6WqqKqpqa2t
-ra6qq6urq6qqpqWjo6OgnpqbnZ+fn56goqKlpaKhqKupqaisr7OwsrS4sLC2trm2
-tLWztrGvtLSytLi5tLO0tLSysrGxsrOztLCwr6ypnotxbHh/f32Ik6CpsLW4vLm5
-vL29vr68vsDAv7y9vLmulGpSTU5NTk9LSUpJSkxMTktNU1NaXV9VTVFUUlJTVFRW
-V1lYVFJRVFNRT1JSVVVYVVVSU1FSUUxNTEpKSkpKSElISUZHRkhESkZHR0ZFR0dJ
-SEhHRURCQUdCQUJCQUNCPzw6Ozs7PkBBPz9CQUBDQUE+QTw8PT4+PDw6PT0+Pjo4
-Oz49Ozw+Pj5ARkhFS01MS0lIRUhISEZHSE1QUVNVYmZqbGxta2trbmxnYVlVT01I
-RkZDPz89PDs5ODY5Oj5EREdFSEpGSEpJTFJTU1RXW11eYGNjZWttb25vcHFydXh4
-eXR2dnh9eHZ3eXp1d3p4eHx7enh4d3Jua2hnZmhmZWlvbm5vdXt/hIiLkpmboqmk
-oqSlpaamoaGdoaCcmpeXm5yZm5uam56hnJuYmJmXmZmZlZSVk5ORkZCYmZudnJuX
-mZqanJ2cnaGenp6dn6CcmpqenpmcnKCbj5STkZKQjoqJhHxzaWVkY2ZkZGRna25s
-bnF2eHl3enl7fXx6fX5/goSEh4iMjI2MjY2LhYaGiIuOjYqBfX1+en2AfXx1a2lm
-Y2FdX15YWFpZWFpYWFRRUVJWVVJSUlFPVFVUVVNTUU9WUVBUUVJPU1RRU1JQUFFR
-UlhZUVBTU1BQVVFTVFVSUlNTVFJSUVFSVFJUUVFRUVFSVVRSVFNVU1JRUVJTUVNR
-UVZXVlhVUVJRUVFTUlNVVFVXWFVWVVJUVVdTVFVVVllVWFZWWFhYVlZXV1lYWFlZ
-WFhWWFdWV1ZXWVlZWFhUVFNUWldSVlhYWFVVV1hZWVlbW19fXVlYWVpdW1tZXFtZ
-WFpdYV9fXFxfX15cW1pdYGBgX19gYmFhX2BfYGBiYWBeYWFhZGFiYWNgY2VkX1ta
-WlteXlteXlxZWVhbWWKVv8vT2d7g4uPm5ufofn5+f4CAgoCBgYSEg39/f359fn5/
-fn6AgYB8fH+DgX+Af3+CgX1/fn1/gYKCg4WEgYKBgYGAfX+BgH+AhHhuYFVST05V
-VFJRUFBRVFRTUk9RVFdXV1lVV1VYW1laV1ZVVllYWFdWVVdXVllcXV1ZXmJkXl9c
-W1xfXVtbXFhYWFlWWFteYGFiXmJjXFtgY2BYV1xcW1xaWl1aV1xcWF1cYmFYVV1e
-XFZZWFxfXFhZWFtcX2BfW19iXFpZWVlaXWBgX1tcXFpcXlpZWFhZWVpeYl9gXV1Y
-U1pZVFRcX1taWlxYWltZWFhXV1peW1ZTV1leWldTVFVYXl9ZWFpcV1RbWlRWVFBR
-VFZRTlJXWFdXU05CPz8/OTU2NTg5NjY2Ojg8RUZHS1JUUVRXU1dcV1RWU1BRUlNR
-T0xQUFRZVlBPTU5OTU9NUFBOTk1JSUtKRUdJRUhJSUdETExLSERHSEZMU1ZZV1NQ
-TU5TU1FTVlxcXGRugpOdpampqainqq2vsLGtra2qqKuopZ+io5+fn56doqSipaSf
-oaSin6CoqKqqq6+zs7K0tbWxs7O3t7Oztba3sbO0sri5urmysbO0sbW2t7a6uba0
-s7CxsbKtnYyIhnlmYWBzgpWksLjBwL+9vr29vbzAwL29vLu5saiUbFJLT09OTkxO
-TkxKTExPT05OUllcW1hTUFBSUlJSVVVVVVJUV1JSUU9NTE1PT1FUVFZUUlFNUlBO
-T05MSkhJS0xJSEpFSUZHSUpFRUVFRklJR0ZFQ0JIRUI/P0NBQUE7Ojs9Pzs8PkBA
-QEFEPz8+PDw+Qj09QDw7Ozo4PTw9OTk6Ojg3OTo8QUFEQ0dFSElHRUdIR0hHSklE
-R01PUlheY2pucW9sbGxtbmxlXVRQSkpGQ0BAPT0+Ojk4NjY3PT9FRERISEhJR0hI
-TFFYV1dZW11eXmNkZmpnbm5vcnVzdXZ0dHl5enh0dHd5dnV1dHd4e3l7e3d0dHJu
-aWhmY2JjZWlqbXJ5fISFiZCSl5ufpaWmpaSmp6inpKSfoJ6cl5WbmpmZnZubnJ6f
-np6cmpiYmZqamZiVlJKTlJmYmJibm5iYm5ycm5ubnZ2bmJqdn56XnZ2cmpmZmZqZ
-mZaTk42OiYV/eG5nZGZkZGdnZGpscG1tc3d6enp5fH99gH57eX+ChYiIiYmKjY2Q
-jIuHhoaGio6MiIODgXt+fX9+fnZtamZjXltaXV1bWVZUTlBPU1VVVVRRT1BRVFJU
-VFJUU1FVVVJTVFJSVFVUUVJQVFVRUVFRVFdYUk9SVVFUVFVSUVNSUFBSWFVUVFNS
-U1BRUU9SVlFTU1VVVlJTUlFTVFJSVFNWVVRTU1JSVFVVVFZUVVZVVFRVVldUVFVX
-V1ZXVVpcV1ZXWFlXVFVWVVVYWFdXV1lZVVZWVlZXVVZXWlhXWVpYVFhWV1lVWVda
-WVdXWlhXWltcWltYWVtYW1xZWltaXVxcYF9fXVlfXFxeYF5dW1tbXF9dX15fYGFg
-YGFeXmBgYWFjY2JiYWJkZGNjZGRiXVxdXVlaXFtYW1lZXFpaYZDAzNPZ3uLk5Obn
-6Ol9foGCfoB/fYCBgoGAfXp8fXp9fn9/gYB9f3x9fn5/gn9+gIB/gH1+f4GDg4KC
-goF+f36Af39+fYCAgYF9cmVbUE5OUVFSVVBQUFFTU1VSUVBQU1ZWWFVVWFlXVlVY
-V1NSVlhaVlVWWVhaXFtdXlhaXWNcW1teXmBfXVtXVVhaWVhaXmFfX19gZF1bXl9f
-XVhVV1lbWlhaWFVWWl1fXV1fXlZZYFtYUVNWWl1bWlhXXFtaW1hZYWVfWFVZWl5h
-Y2FjWV1ZXVtcXllbWFldY2ZgXVdYXVVYWV1XVVlaXllWW1tYVVVWVlhZVlhZVVdb
-Wl5aWVhUVldcXF9fX11YWVxYVFJTU1BRVVJSUlVZV1NTUU5LRkI+ODc3ODk4OTk7
-ODk7Pz1FUVRTV1VZWVRSVVNSVFRSUlNQUE9TVFNTUktHSkxMT1BQTU9RTktKSkpH
-Q0hJS0hJRUlNS0ZISkxKR09VVldUUlVRUVNSUlRVWFhhZmt5kKGpqqiqqqurrKus
-rqyuq6qpqKinpaWkoaGhpKOjpKKgoaCfoZ+fnqampaissLS1s7K2uLWzsrS3ube3
-ura4trKytLe4tLO1tLWzsri5vru2ubW2tbOysrKnnJaMdFRISE1cco6jsbu/vb6+
-vr7Avr/AwsG8vbyyqJJrT01OUFBNT1RUUVBPT09XVFFRVFteXVJPVFRaVlRUVVJR
-UlNUWFNUUlBPTUtNTlJSUFBPTk1OUFBOSUtLS0hJSENERkVFR0hIR0NCQ0NEREZH
-SEZFQ0BBP0BDQ0ZERERDQ0BBQUA/PkNCQkFBQT48PTo7Ozo8Ozo+PT47Ozg4OTo7
-Ojc5Oz1APjxARUhKSEdHSElJS0tJS1FNTU9SVVleaWxwcW9wcGxuamVhWk5LSUZE
-QT9AQDw6PDw4ODo5QD5ERkRESUxJSkpMT1VXWFpcXVxYWmFgY2lqbXFwcHFzcnN2
-d3h7eXl5enl5enZ6e3t7fnl6eHp1cm9saGdkXl9gZGtvcXV8goiOkpSXmqCho6io
-qaikpqWlpaGdmpydnZiYkpqZmZqYm52hn5qcm5qXmpqZmJmal5eXlJiWlZibmJmb
-nJmbnp6anZuamaCho6Cam5ydmJmam5iWlZiXkY+Jg3xyamVjZGJjZmdpaG9ucG9y
-dHR3eHx7f39+fn1+f4KEhomJhoiNkJGMiYiHh4mKjo6Mi4mCgYB/f314cmpoaGFb
-W1xYWV1ZVlNTUFBOU1RVU1NUUFBRUlFSUlFRVFNUVVNSUVFRUVFSUU9SVVVTUFNU
-T1dZV1FSUlFTUFNOUFFQUFBTVFVVVlJRUlJSVlRUVFRWVFJUU1RTU1NSUFNOUlJR
-V1NTUlZTU1daWVdXVVRTVlRXVVhWVVdUV1NUVFZWVVZYV1hXVlZWWFZWVFVWVVZU
-VVZVWFVWVVVYVlZWWFhYVFdXWVhZWVhVV1dWV1daW1paWVlZVllcXF9fWlhaX1pd
-XVxcXlxeWltcXF5dWlxdXVxdYGFgYGBcXl9gX15hY2RjZmViZGRhYmNkYV9eYF9d
-WltYVlhaWVldXFpfjr/O1dvf4+Xn5+nq6X5+g4J/f4CDgX58e3t9fH19fH1+foB/
-fn+Cf4KBgX5+fn15e32Afn9+fn9+gYKCgIGBf3+CgX9+foKBenFlW1FOTUtJSk1Q
-UVBTUlBTUlJRUVJSVVVWVVVVVVhVVFVWVlRXW1pXVldYWlldXVtZWFlbW1paWV5g
-Xl1bWFNUVlRXWVtgYFxeX2VmX19dYFxXV1paWl5bW1pUVVdYXl1WWV1cXmRfYlxS
-UVNXXl5aWltfXF1bV1peY1xVUlddXF9eY2RcW1tbWFhYVFhXWV9eYF5bWVtbWllX
-WlZZXV1cW1haVFFTV1xgXldUWFhYW11bXlhcWVZWWWBbYWNgW1NVVVZZU1BPUlhV
-U1NXWFVPUllZUk5JQ0A+OTQ3OTc4OTo5ODc8PUJGSU9UV1lXV1RTU1JWWFpdWVRR
-T05PUVFPT1FNTlBTV1JRTlBRTElGRUZIRUhHRkVISkxMTUtKSk1SUldYVE9NTlBP
-U1NTUVFVXF9iYGqDmaetq6mnqKqoqKiqrKuqrKmqqqeko6OfoKGjoZ+goKCgnZ6h
-paOjpKSkpaqtrq6tsbO1tLOvsra4uLe2s7S0srW6uLa0sbSztLi5t7m6u7m4ubm3
-uLi2s6mhmoxwUUI/Q0dSZ4aisbjAv766wb67wMHDxL68t7aqkWpQTVBRUVBRUlFP
-UVJSVldUU1RaW1xaVVhYWFpXUlFTUlBOUFVWUVNTUU9MTUxNTVFQTk5LTE5NSUhM
-S0dGSE1KSEZBQklHSEhGQD9CQ0VJSkpHRkVCQUNCQ0REQkFEQT88PDs9QDw9Pz0/
-QUFBP0E/Ozo5PDw7QD9BPjs9Ojs7PDo4Ozo3Oj08OTxCRERDRkVGSElMTU5MTVRP
-UFNZWV9lZ25ydXR2cWpoY2BcUk9MSUZBQTw8Ojs/Ojg3PkA/QkJERUVISUpNTEpL
-UFRVV1paXl9cX2VnaGtpcnNxb25wdXV0dnl5e318enZ4eX19fX18eXl4dnN0cGtm
-Yl9eX2Biam1yd36EiYyPlZmanaKlqampqKSmpKGhnpucnZubl5eRk5SWmJmbnKCf
-n5ybmZ2cm5qamZiVlpWVl5mWlpuam5mampycnZ6enJyZnKCgm5qZnJual5WamZaT
-kpCNiIeCenBqZ2VkZGNmZ2ptbnFwcHFycnZ9eHh6fX5+f3x9gYWEh4iHi4+Rk42I
-iY2Li4yLjoqKioiDg4aCgH5zamhmY1tWWVVZXFpWVVRQUE5OVlVUUlJRU1JSUFJS
-UFJSUlBRUVJTVFZTVFNTWVVRUlJSUlNRU1RYVE9SUlVWU1FRU1FTUVNSUlFRUFNW
-UlNYV1JVVlRRT1JTUlFTUVNST09OUFBUVVVTU1NTU1VVV1VTVFRUVFRTVldVVVdW
-VVZVWVpaVldXV1dZVVZWVVZWVVRRWFNUVFVXV1hWVVdVVVdYVlRUVlpZWldWWFhX
-V1hYVFRUWlhYV1pbW11cWltaXFtcW1tdXF1dWVtbWVpcW1xbXl9fX15cXmBfXl1b
-XWBgXmBgX2BjY2VlZWNiY19gYGBiX19dXF5bWFlaV1dZXmSRwM3W3N/j5efp6err
-goJ/gIGBgH98fHx+fHx9fnx+f3+Bf318f35/gYKDf359fIB+fn1/fnt/f359f4GD
-hYR/f4CFgoOCgX11alpTUExJSUtQTkxOT05PUVBQTk9NUVNUU1FTVVhYU1hUVlZV
-VFdaVlVUVFZaWVpbWFlbWVZVV1paW15cW1lXVE9XVldYW11eXVteY15hYV5jWVhc
-XF1fXVlbVlRZVVhbWFZaXFtkY2NjX1dUU1VXWF1cWVpZWllZYGRgWVJbXF1XWFxe
-WlhcWlhZWVpXVldYXF1eXV5dXVtbWVNUUlxeW1daWVpYUVFUXGRiVlRXVVdYW1la
-WFlYWlZXWlpeYFtUUFNTV1dVU1FTWlZUUlVaWVJVWFlVUFBISEU9Ozk1ODk4Nzg2
-Njg4PD1BSVBVV1hcWlROUVJYWVZZVVFPTE1PUlFMS01QTVJTUlNTVFJQTEhDREhK
-REdHSUlISElOT0tNTlBQUUxMTExOUk9PUVJUUFRZWl5jZ3aTo6qpqa2npqipqqmr
-q66yr6qpqKakoqGgn6ChoaOjn5+gnZyipaClpqWoq6qqraytra2wsbCytLS2s7K0
-s7K1tbe3tbi1tbKytru5urm8vb6+vLm8ubavp5uXi25RREBFQkRPZYObr77Cv8LD
-wL7BwMLAvL67u7CVa1FPT1BSUVFQT01QVFJUVFVUVltXVVZaV1ZaXFhWV1ZTUE9R
-VFRTUlBRUU5OT1JSUlJSUFBSTEdJS0tLSkpKSUlJRkhFR0ZFRUlFRERFSUhIR0lI
-RUZJRkNEQ0RGRENFREJAOzw8Ojw9QD08Pj49QD8/QTo5Ojs9QDw9QkE+PDU3OUA9
-Ozo4PDg6PT0+QkJERENHS09OTUxLTFFUVVteXmFnaXFycXFwbGtkXlpaUVFMR0NE
-QDs8Ozo5Oj47QD5AR0dKR0lLT1JTTVBOUlVXWFpaWmFkZWlnaGlqcXVxbXFzd3h2
-dXZ4eXp7eHl9fn99fn9+fXp2dXJybGViXV1dYGNpbnF1fIOFipCUl5eaoaOko6am
-op+eoaCgn5ycmZWVmpWUlpeXm5ubnZ+gm5ubm5uZnZyYl5aVlZeWmJiam5qWl5qc
-nJmcn52gnp+jpKCcmpybnJqXlpmYmJeRkI2JhXx2b2tmZWRkZWdnaG1xb3Fwb3Fz
-dHl8gH5+fX9/gX9+gIaGh4qLj5CPkJCKiY+Sko6MioiKiYuLiYV/em5rZ2dlXVpW
-VFdYVldXV1RRUVRUVFRUVFJTU09NUFFRVVRUVVFNUFVWU1ZSUlZWU1JTUlFSUFNN
-SlBWT1FRUVJUV1JSUlFSUlBSUE5RU1RUU09TUVFUUVJTUlNWU1FUUVNVVVJPUFNS
-U1NTUVJVVFNXVlNPUlRXVFVTU1JVVlNUU1ZWVldWVFRVU1RTWVdVVlRVVlhUVlhU
-VlVXV1ZWVlhaWFZYVVdXWVhWWVhZVVdYVVpYVFVVWVpYWVhXWFZUV1lbXF5eXVxc
-WltdW11bW1laWFpbXFxdXF5eXF9gXl1fYF5gYGFjYWFkZWFkZGNiYmNjX19bWVtb
-W1xbWVhaXFxdYpDBzdXc4OTn5+nq6uuAfnx+fn59foB/fX59e3p+foB/f3x8gH59
-fX58fYGAfHp8f31/f4CDgH17fX2Ag4ODgYF+foCDhIN9dm5fV09LSEhKTU5NS0xM
-R0xQUVFPTlNTUlJRU1NTVFpZU1VQUU9SV1pYVVVXVFJUU1NWWFlaVllaWl1eX1pa
-V1ldXVhYWVdaX2BfXWJiXmViXl1YWVxZYWFcXVxaXVxZWV5aWVpaYmFmZF5bW1VR
-TlJVXVxaWFZWVldgYFtaVlhbW1JUXVtaXV5aUFZcW1tVVFhZWVpeW1lcW1tUVVBR
-WFtWVFpeX1pPVFxhYlxbV1pWU2FYV1haWFhXV1hZWVtcVVBSUVdaXVtbV1ZXWFlR
-U1dYVllcWVRUU09PS0VBPTk3NjY3Ojg7Nzc3NzxBR01VXVtWU1JSVVpYV1dUUU9O
-T09RUlNPSk1KS01PT05OT1BPTUlJSkhHSklNTkxKR0pNTE5STktLTE9OTFFMT1FT
-WFlUVVlaXmBhboidpaaqq62qqqanpquqrrGwraurqaelo6SioZ+dn6WppKKdkpue
-pKWmo6eqq62rqqmurayssbGztbWyr7Oxt7e2t7m3urm0s7a3uLy9vb6+wcC9ubq4
-tambkY+EaE5GQ0RCQkNMYoeis7u7vsHBvrzAwsG+v765r5ptU05PUFNRUVBQUE9P
-TlJWU1ZVWlZVVlZXWVxbWVVWVFFQUFJTVlJPUE5NUFNSU1RRUVVRUU9PT0xNTUtL
-SEdJSUtNS0hHRURFREVGRUZHRkZGR0ZHRklMSEZEQT9DQ0JCQT8/PD5CQT0/Oz08
-Pzo5Pj09Ozw8Pj87P0I+PTw8PD09PUE+OTs7PTo4OTw9PkBDQ0ZNTE9RVFFSUFNZ
-WFxeY2NqcXZybm9rZWJeXFZVUUxIRj87Ozo8PTw8OTo9QEFERUpKSUdMTk1RTk1Q
-UFNVWl5aY2ViZ2ZmZ2VtcnBwcnJ0dnh2d3h4ent7eX57fX19gH59eXd0cG5ral9a
-XVxfZGptcXV6g4qNk5SWnJ2eoqWlp6inpKejoaCemJeWlJWTl5qXl5iYmp6enqKd
-mpaXmpqXmJaVm5mYmJiZlZmamZiZnJmcnJ+enZ+foKKioJ2ampqbnZmVlZaUlJOU
-k46He3JtaGRkY2RnaGtsbW5sbW5vcXNzeXx/gH18f39+fn5/hYaIio+TlJKSkY6O
-kpSTkoyOjIqMkZCKh4F6cGZoaGljXVdUUlJVVVRVVlRQVlNUVFRRUVNSVFFSVVZV
-VVRUVlFRUlFSUlJTUk9RUlBRUlFSUlNQTFJVV1VRU1RVU1NUVFNWVFFTUlNTU1RS
-UE9QU1FUUk9OUFRSU1JTVFNRUlBPUVFUVVNPU1NSUVJVU1FRVFdVUVJTVVRVWFRY
-V1dYWVVSUlFZV1lWV1ZWVVVWV1hWWFdWV1VVWFhWVltYWFdWVVZXXFhXVldYV1ZY
-WVhWVFVZWltdYl5aW2FbWFlbW1tbWFpaXmFeXltZWVpdXFpZWltdYF9eXF1fYWFg
-YGBhYWVlZWVkYWFgYWJiZmFdX11bWVlfW1laWVpbXFpfjsHN1dvg4+fn6Orr63t6
-eHl9fn17foB+fX18e3x9fX19e3p6fXx8fHx8fn58fH2AgYGBgICBf4KBfn2AhIKC
-gYR/gYOFf3pvYVZPTElLS0pLTEpKR0tNTk1OUU9QUFVUU1FTUlRWV1hXWFVSVFZY
-V1dVV1NXV1ZTUVJVVVVXV1tgYF9cV1daWlpaVFhXWl1dX11cXFlfYl5cWVdcW1pi
-YV5hXWFjWl5fYl5eXWFjYmdfWlpaVlRPUFhYWVlYV1ZZWFpcV1haWl1ZU05cWlte
-WFpbX2BfXFRUU1VYXFtaW11YVldRWVhYV1VaYGJkW1RVWVtcXFhYWVpSU1VZXV9b
-VlhTU1dZWFpYVVRXWVhZXV1ZWFpeVVJOT1FWVltaVFZWVVNOSUZEQDk1ODg4Nzk0
-NDc3ODtBRU9UV1RVVVdZVlNSUk1OSklOUVBTUE1KTFJRTktPUk9NTk9JRUdFR0hJ
-R0pOTE5ERUtWT05JSEhOUU9MSUxOUFVXVlNTV1hYWVxpeYqXpqqvrqurq6qpqa2v
-raytq6uqqKelo6Oin56foaWlop+enZ6ipaSmqaipqqunp6qvsbC0tbK1uba1tbG0
-tba4ur23trq6ub27vLy/vLy4u7q8vLexm397gHZeTElIR0lIQkNJYYWgsb29vb+9
-vsTBv729u7uwmnBST1JTVFJRUUxKS0pMUE5TWVpaVFlWVlhcV1pYUlRRWFZUVlNS
-WFRXUlFRUFFUU1JVV1ZUUU5NTEhMS0pJSkpFSUhKTElJRUJEQkVDRkJBR0lFQkZJ
-REZEQ0I/QkJBPT0/Pj07PTw7OztBQzw/Qj1BOz0+PDo7Ojo7Ojs+Qjw4ODo4ODg9
-Ozk7Ozw9Pz1AQT5BREhNUVVUVFFSV1pYWmFjZ2hsb3h0bGpmZWNbVlNRUEtFQUBB
-Pzw7ODg5Oj9BRERFREZHSkpPUk9PTlFSU1dYVV9jZWZlZ2Rma2pxc3BwcXN2d3d3
-eHh7enx9fHt6foGBfXh3e3hzcHBmYl1ZWF1iZWlydnh9h42SmJyfn5+jqqWpqKep
-o56em56clZaUlZWamJeYmJeXmp2bnJ6flpSXmJeXl5eanJmXmJubnJybmpqbmpyb
-mZqdnZ6enJudmp2fnZqZmJmWlZaWlpWRi4J8d21raGVmZmhsbGxramlsbG1vdHV1
-d3p9eXp7fICChIKDhIiNj5OWlZWTk5KSlJKSjY6QjpOVlY+IhHptZGZpaGNaVFJP
-UFNXU1RXWFRTUVZWVFFSUVBPT0xTUlJQUlRUVFBPT05SUlFPUVFPUFFQTlJRUlRQ
-UlNWVlJUUlBQUFFUU1JTUFJVVlVRUFNRTlBQUlNTUVFRUlVUUlFQUlNQUE5SU1JS
-UVNPT1NTUVRWVlRUVFZWU1RWV1hXWlNVU1RVVlRWVVdXWVhUVVVXVVZXWldWWFdY
-V1hYV1hVV1dWVVVYWFNVVFhWVVdYWVhYWFZZV15dXl9dW1lbXFtbWl9cWVpYWlpc
-XFtbW1paXV1cV1leXF9eXV1dXWFgXl9hYGFiX2JkZWJjYWBiYGFiY2JfXWFeXF9c
-XVpaXV5eXV+KvszV2+Dj5efp6evqfn59fXp9fHt7e3x6fn18enx7e3t5d3p9fIB7
-fH58e3x+f36ChIKAf3x8g4KBgX+BgICBgoOAgH98c2RaUE5JS0ZGS0tMTktMTU5P
-U1BPUlRUVVZWU1NTWFZVU1RXV1ZWVldWVFZYWlZXWFZTUVRSVVZZW1pdXVtYV1hX
-WlVUVlVTV1dbWVlZV2BcXl1WWF1XW11cYF9bYV1fYmFhXFtiYmBkY1tdW1dZV1JT
-V1tZWFhZWVxZXl9aX11dXVpVVllYW11WWV1dW1paVVVVUVRXXlxZWVhYVFJVXF5Z
-WFtgYFxWV1dWWl1bWVtZVFFVV11iX1pYU01OWFpXWl1aWVdZWlpcW1hYWllWUVBS
-Tk9WW1hZVVRXUUxNT01KRDs6NjU2PzY0NzY2Nzg+REpLU1VWVlVWWVhTUE1RTVNX
-VVBPTk9MUFJNTEtMTFBOTUhHS0lKSklHSUlGRENGS01LSUdETE1QUE9MT0tPUVFR
-TlRTV1dXXF1pepKfp6utr66trKmrrayqqaqjpaunpKKkpaWkop+eoaKlp6Kipaik
-pquqqaeoraipq7G0tri2tba2trq7ubq4uLm4trm4ury9vby/vsK/vr+9vr+6uK6M
-aWNiXFRMSEVER0ZFQ0ZPZIiqur++wcHGxMXBvbu5ubGbclVRUk9RUk9PTUxJSkpL
-TVJTVFlVVVZUUVNUVlZUVFZWVlpWVVNUU1NVUlJSUVJRVFNWUlFQUU1LR0VHSU1L
-SkxLS0xJSEhJRUBDRUJCRUZCRUVGRUVGRUNCQkA+Pz9AOzo7PT4+Pj4/PDxAQD4/
-QEI9QDw8PTw6Nzg4Oj86Ojk3NjY4PTg6PDw9PkBAQD4+QERHR0lPU1dYWFhZV2Fi
-Y2ZmaW1vb25uaGZkYl5ZVVRPTElEPzs7Ojg5Njc3OT1BQUdGRkhHSUhRUFBPUlZa
-WF1cX2JhY2VlZmdqam1ubG1tcHJ0dnl3fHx6enl5e4B/gIB8e3t6eHVyaWNfWlhZ
-XF5nbXB1e4KGjpGRmZ2fop+joqWkoKOjp52amJaYlpmRk5SUkJKYmZmYlZabmJua
-kpiam5mZlZiWmZiXmpygnpubnJ6dnZiVmpybmJqbmpqam56cmpmZmZaXmZeVko6G
-fXZzbGpqaGhnaWtsbWdlamppa25vd3N2d3l4eHt8foGDhoWHi46SlJaXl5eUk5OU
-kpCVlZaVmJiXk4mAenJsbGtqZVxVVVNTT1JTVFZbW1NQVFNUVFZVVFVRVVRRUVFR
-T1BTT05PUkxMTlFRT1JUUVFUVVFTU1BQUVJWVVFPUVFRT09PUVJXVVNVUlJTUU9Q
-UE9PT1BQUE9QVFRTVFFRUlNUTU5QUlNSUlJQUFBSVFhUVFFRVFNTVFNUU1ZWWFtX
-VFRYV1hWV1dXV1hUVVZZVlZYWVhXVVhZWFdWVlhXV1lYWFdVWldXVVZWVlhXWllX
-W1pZWlxfXFlcW11cXV9bWVlVVFhYWlxbXF1bW11cXVpcWl5hX19dW1peYGBhYmBi
-YWBgY2BfYWFiYGBfYGFmY2FfYV9hXF5cXFtYW11eYo6+y9Xc4OPm5+np6+p/gH57
-fX9/fXx5e3l6e3x8fHt6en2BfHt7fnx9eXp7e31+fn5+gHx/gX+CgH59fn9+gH99
-fH58enZsXlNOSE5KT01ISktOTEtMUE9QUVBSUVJSUlRSUlBVWFRVVFhUVVdTUlJS
-VFVWXFtXV1RWVFBRWFdbXVxcWlhYWFdXV1VVVldWVFVVWFVZXFpgWlZaWVtbWFZe
-V1xdWl5hW2BcYGhjYWFZWlhYWl9cWFZaWFZZWV1cWFleXVthX1xcXFpUVVRYXlhZ
-WV1eVlVSV1ZQVFljYVhWWl5aW1ldW1tcXmFbV1lXV1lcXl1cWVdYV1ZWWl9jXlhP
-U1VeXVheX19eXmFgYFtbWlpcXVpVT05QVVpcWVdYWFRRT1JVVVBHQz46NjY7NTU0
-OTg0Nzg7QUNJTk9TVFhaXFdRTlBXV1VWUE9ST09LS09LSElOTVBLR0lLSUdJSkpJ
-SEVEREVGSExKSUdLS0xLTlBLS1BTT01NVFRVVlhYWF1viJupqqyrqq2qr6urqqqs
-rK6urKuopKaipaSno5+gnqOlpKilpKeqpaWoqKerraqsr7Sytbi2s7K0tri3ubu7
-uru5urq7vr2+wMLAw8LDwr6+u7m3pXtaU05QTktGR0hGRUZBREdOZoepurnCxsnH
-xsO/xL+9s552U09SUlFTUU5OT0tKTE1QUE5SVFVYV1ZZWFRUU1FVVldbWllZVlVT
-VFVSUVFRUFBSUVNSUE5MS0dFREhJSk1RTVFOTE1LRkNGSEhHRERFQ0dERUZEQkRE
-Q0BBPT4+QD46PDk9PDxBQUA/PDw9PDs6OTo6Ozs9Ojo4Ojw7PDk9Pj47Ojo4PDo7
-PDw+PD48Oz5AQEJERktQV1taWldbXmJkampoaWxub3BuZGRjYl1XTk1LSEVAPTo6
-PDw6PDk6PUBCREZCRUlHSUlPTE1OVVNWVl9gXVtcX2ZmY2hubmxtbnFycXR0dXd7
-enl7enyEg4GAfn99fnt3dm9nY1tYU1pcYGdvc3d9goaMkpSXmp6ioKGjqauipqej
-np6dmpqWlJWTlJSTkZSZmZSVk5icmJycmZiYmZaYmJKVlpeZnJybnqGfnJucm5eZ
-oJ6cm5ybmZeYmJuampiYmZeWlpOPiYN+c2xqaWhkZWZnaWxqamlqamlpa29wcnRz
-d3t7eXt8gIKEh4qNkJSWlpaYlpaXlpSVk5mcnJ2dlpSMhn56dW1wcXBpYFhTVFJO
-TlZVVFZdW1NQUlRWVVVUUlRVUldWUE5PUVFPTlBRUVJST1FTU1NTU1FSUFFTU1NQ
-TVNVUFBSU1JTVlRTUlRSU1VVUVFVU1FOUFBTVVJRU1FRUlFTVFJRUVNSUlBQTlBT
-VlNUUlVTVFVXVFNXV1VVVVFUVFhWV1pWVlZXVVRUVlhcWFdVWFdXVlZWUlVWVVpV
-U1NWWFdbWFlYVlZXVVZSUlVUVVZdWlhZW1xYWFpYXFpcWltZWlhaWlteW1tZWVlb
-WlteXl1dX15fXWBeXWFhY19fX19hYF5dX19iZGJgYWNjYGFeYV9gXVxeXmBjYGBc
-XV5aVlxiib3M1dvg4+bo6enq64OAfnp8e3t+e3p5ent6fH18fXx+fHl3e3t7ent7
-fX1/fX59fX2Df3+Dgn9+f35+gH9+fXt9fn15bGFaU0pKSEtJSU9OT0lHR0xPUVNT
-VlNUU1ZUUlVVU1BPUlNTVV1UVFVVU1dWVVZUVlRUVVlWV1ZXWVpYWFZbWFdZWFhY
-W1hVV1JTUVZWVl9cWV1cXV9ZXVlTV1VVWFZWWlhUXFxkZWFjYFxeWVhhY19gW1dX
-VVdZWVRXWFlcXGFcXF5bXlRUYGBfWFdaWVxXWVZZWlhZW2RgWVlcW1xeXFtcWlxa
-WltaXl5ZWFlgYV5eXV1fWVlYXGNgWlhWVltaV1ZZXmJgYF1dW1lbW15gX1hUUFla
-XVhVU1NTVE5RUlRZVlFJQj04NjQ0MzM3NDc3ODg5P0dGS1VVXFxfXVFSVllYWVZP
-TVRXUE5PT0pITlBOTVFPTU1LS0dHSEpHQ0NHRUhISUlJTU1KSkpMTkdLT05QUFRR
-VFNWWVdWW2V9mKOoqaysq6yur62pq6mqq66uq6irqqWnqKmmpqaipKGZnJ6hpKSk
-oqOkpaioq6yxs7WzsbKzsbS4t7m6vLq7u7u3trm8vb/CxMTDvr/AvL24uK+ValZT
-UUpLSEZISUpJRkZGRkdRaY2mtr/EwcTFw77BwsK8pHdVTlBSUlJUU1FQTU9QUVFN
-TlVdXF9fXl1XUVBRTk5TWVpbWVVVVFRWVlNNTU1MUE9QUlJRTU1PTU1MTUlJS09S
-UVFOSklFRERGR0lLR0dGRkVDRUVGRERCPz9EPj88Ojk5Ojk5PTw5Pj89Pjo3Ojo9
-PDs5Ozs4OTk7OTg3Oj0+P0E8Ojs9PDs9PUE7Ozs9QEJCREJFS1BUWVxcW1lbX2Vp
-bXNwam1sbG5saGBdW1VSTUpKSEQ8Ojo7PD48PDs9P0BFREdLSElISU1OS09TUlJW
-V1xdWVlcYWVobWhsaGxub3JzdXJzdnp5eXx8fHx7gIGBgIKBfXp2c2tiWVVTVVth
-aW53fX+EhIiPlJicnqGeo6enrqimpaOkpaKem5qWlJSTk5KTlJmYmJSUl5manJqY
-mZmYlZmZl5aXl5iXmpyen6GgnaCdm52enJ6dnJiYmJ6ZnJmampmZlZWVlY6JgXpz
-bGVjZmlmZmVnaGloaWdlYWZqam1vcnV3eHp8foF+hIeKjI+Tl5aVk5aXlpmXl5ea
-naKgn5iTjYeFhH53cXFzdGxgWVRTUFBSVVhTUldeWFBRUVFSUVRUU1JRVFVWUlBS
-UlJRUlFSUFBQT1JUVFJQT1BRU1VWU1BOU1RTUE9RU1NSUldTU1FVU1RXV1RVU1FR
-UVBRUlFRUlFRT1BTVFRTUVVVVFJVU1FYXldRUlRXWFZWVFZUVVZTUlNUU1ZVVFVR
-VVRWVlVWVVVZV1lYWFlYWVhdWFhXVlVWVVZVV1tXV1haV1dWVVNVVltXVllZWVta
-W1tXWFtZWFtbWFZYWVtZWltbV1dYWlpbWV5bWlxdXFtbX15dXWFgYF9dXl9eXF1f
-YGJjZGBfYWJjZmNiY2BhYV5cWllYXFtcW1pZXGKJvczU2+Dk5ejp6ezrfH58fX1+
-fHt6fn9/e3t8e317enp6eHx7fXt9fH1/f4GCgoGCgYF/gYSCgoCAgH15fH58f39+
-fHRjVk9KRkVJSklKUE5KSElHSU1NTU5TUlJXVlRVVFRSUFBQUE5VU1FUVFFSUlRR
-VFdWVFVXV1dUVlpaVFZWV1pZV1dXVFdZVFFRUVZZW1daX1tbW1ldXFVXVVNXVFZX
-VVVVWl1YWGFdX2FcXl1cYGFeY2NeWllVVldWVVdXVl1cXmBaW1lZWlheX1paWl1Y
-X1xeW1pdXl9dXWFaXGFhYWBgXllZVldZV15jX1xdXWNdW15fW1lYWFZdYF9fXFRT
-V1dUVlhaXmBhXFdXVldaWV5hWFNXXFpaV1JSTlJTUU5RVldXV1dNRUE7OTo2ODU2
-Njc3Nzs7PkFGTVZdVlhYVFRWV1ZTVVVVWllTUFNPTEtOS0tPVFJQT1ZSS0VGR0dB
-R0REREZIS1JQTUlJTk9PUEtMVE9YVlRUVldaWVxaZHKInKWsrq6ur7Csra6urayt
-rquqqaqqqKiqqainqauioKSip6Cgo6Gip6Wop6mtq66zs7OxsbG3uLq8ubm7vLu6
-t7q5vb+/vsDAwMO/vb++v7m5pXhdVVFLSUlISEdGRkdHRUNCRkdTbpOtusHEwMLA
-v8C/ubSkelJOVFVVVVROUE5QUlFPTU5SXWFjYmBdXVVQUFBOTlBTV1paWVhXV1hZ
-VFFSTlJRUVFQUFJRUE5NSUlKS0xMTkxMT0xOS0hGQ0VGRkdFRkZFQ0FBQ0ZGREFA
-Pz8/PT88Pz09PTo7PT4+Ozg8PDk9PDg8ODs7Pjs6Ojo6Oz07QD08OTk9PEFBQjs8
-PDpAQUZCRERCRUlOUVNZXVtbWVlaYGdscHJub25tb2lnZl9XWFVVTElIREI7Nzg3
-OT07Pz08QURISkhIR0lHS0tMUlFTVVlXVVhdXV9fX2ZoZmlrbnFucXR1cnN0dnZ7
-fHl8gYKEgoCChIF/fXVxbGVcVlZWWF9obnV9fn+DiI+TlpmcoKOkpqalp6ino6Kj
-oJuZmZaSkY6MjpCTkpWVlpWXl5iXmZianJiVl5mYl5iWlpmbm5mZnJ+dnZ6inpia
-m5mXm5ydnZucmZaYlpSTlpWSjomCeXBpYmNjZmZmZmVlZmVkZmhpZ2trbXBxc3d5
-en2CgoSHi4uOkJSYlJWVl5iXl5iZm52eoKGdmZCOiYSEfnZ1dHdza19XUlBQUlJR
-UlJRVFxfV1NUUFFTUVJVUlNRUVNSUlNRUU9QUlFQUE9PU1NRU1RNT1JTVFZTVVJQ
-VVdXVFNSUU9OU1RSVFJQUlVUVFZXVFZUUlBPUFJUUFRRUFJTVFNRUlRUVVFTUVFS
-TlJSVVRVVFdUVVVVVVVSVFZUU1NXVlVWUlNWVlRUVllZWFlZV1RVVVdbW1hWVlVT
-VFNWWVdWVFZXVVRYWlpXWFVWVFhYWldYWlpZWlhYV1ZWV1dXV1lbWlxcW1hcW1lZ
-WlxfW1pdWlpdXVxfX19eXlpcXV5hYmFgYGBhYGJgYmVlYmFhYmFgYFxYWFtZV1lY
-WVtbZou9zNTd4OPm5+nq6+p9fICBfH17eX1+f318e31+fXp7fHl5ent5e3x8f36A
-goGAgYKBfoCBgYR9fX5+fn1+f4GDgX13bWFWTUdIRUhHTElJS0xNSkpKS05OT0xP
-UFJRUlBMTU5PUVBNUlJSVFRTU1RUVFNTVFZZVFRXVldWV1dUVFFZWFZXVFdWV1pY
-VlVaV1paVlhXWFpWV1xWUFBRVlhXW1NTVVZbW1RbXV5gW1tjXF1gWl1paGFeW15b
-W1lXVFhXWlhhZVtXW2JcWl9dXl9gX1lcXFxfXWBgXV5dYV5fZWVhZWNcV1dWWVpZ
-W1xfXlhcXFdWW1pZW1hVU1VbW11dUU1QUU5TV1pbXV9YUlVXWFhbX15aV1xcWllU
-UFJQTlNRUlNYWFdZVVJKRUM/Ojg2NTc2Njk5OTk6PEJLVVRTV1VVVlZXWVhZVFhZ
-VFJPVFFLR0xNTFJSVFZaWVJPVE5JREVHRkNDRUZLTUtLS05QUUxLS05NU1dTUVNZ
-X1xaW2Zma3yToKasra2trKytrK+xrq2rrKioqKifpKakpqmooqOgoKSjpKGgo6Sj
-qKupq6+wra2urK2xtLa8ube2uLe5ure4trm9wcHBwcTBwcDEwcLCwLGTaFlVTUdE
-REZFQ0VFRUhDQUE/QUJUfKOyusDEwLjAwMDBvKN7VUxQUlNPTExQUlNXUUtKS1Fa
-YWFgX19fV1VVVlVRUVFUVFVWWFhZWlhWVVFPUFJUUFBNT1NRT05NSUlISUpLSktI
-TE5OT0ZCQ0NHRUNFRENCQj9CRUZBQEI8PTo7PTs8PD4+Oj07OTo7Ozk8Ozk5Ojg6
-Ozo+OzpAOjs6Ozo8OTw6PT48OTw+PD09P0BBPkBBQ0VGRkxPUVZaW1pcW1hYXmZt
-c3Fyb2tqaWdiW1dWVFJOSkZGQj87Njc6Ozo3OTg6PT9ERURHSkhKTlBPT1FSV15d
-WFthYWFkZGNnZ2hrbm90cHJydHV5d3Z3eX+FhISEgIKCgX18dm9oY11bU1tcXmZs
-cXp7gISHjJKUlpyeoKCipaSmp6ako6Kgm5qalYyMi4iLjY6Rk5iZmZWSk5aXmpiW
-l5mal5aXmpmYnpqXmJqam56bmZiam5uampmZmZqbm5qXl5eXlpiTkZKOiX94a2Bf
-Y2NlZmZlZGRjYF9jZGZpamxucW5vcnV5foOEhIiLjI6TlJeUlJWXmJeZmZidnZ2f
-npmWl5WOh4KAd3R3eHZqW1VSU1JST1NTVVNRWVxdU1BPUVJSU1NUVVRTU1BPTU9R
-Uk5QUU5QUE1QUFJTUlFSUlNTU1ZXVlVTU1NTU1RSUU5RUlNSUVJUUVNTVFJTVFRV
-U1ZTUFFPUVRVUFFRUVJRUExSUlNRT1FRUVNUUlNUVlZXVFZWVlRVU1dWU1RYWlhV
-VllYWVpWVVhcWVZYVlZVV1ZVVlZXV1JWV1ZXW1pWVlVUVFZdWVdWVFlYWlhXV1hZ
-W1hbWVlYWVlZVldaXVpYWFpgXFpbWVhaW1pXWFteW1tcXV5cX19iY19cX2JjY2Bh
-YWNiY2JkYl9fYV9gYGFfX19fW11dWFpaV1pjhrvM1dvg4+bo6enq64B9fn17enp7
-e3p6fnt6fXx5eHh5en1+fXx9enx8fn1+fX5/gX9/fX19gH1/f4GAgH9/foKAe3Nn
-WFBLTUxJQ0dJSktMUExJSk5MT1BOSkxQUVNRTEtNTVJUU1RQUVRTUlFPU1ZUUlNT
-U1RTV1hXVldWVFVSU1dXWFZUVFheWFVRUVNXXFhWWlRWWlVYVlBRU1ZaWFtdVlVX
-WF1YVl9fYmBYXF9cYl9eaG1lZGNgXF5fXVtZV1RVW2FiVFdcXFRXYGJhYV1aV11h
-X19cYWFgXldgX2JlY2JeZl1dXVpbXV1dX2FdV1dYVllaXWBcWFNTVFpbWlhQTVFW
-VlhZV11dWU9MUFlbVVddX1pXW1hWV1JSVFNQVFJRU1VUVVVST0lIRkNDQjg4ODU2
-NjY3Ojw6QUNJTk5RU1RVU1RTVlVSVFRUVExMS0hMU1VSU1NVXFtWUlJRTktHR0lH
-QEFERUZISEhMU1JRT0xNUVNSUFFUV1xcWVhhX1pdcoufqK2xsK6trK2srayxrKyt
-qainp6Wlp6alpaGgo6CfoKOloqGipKKjp6mtsLCurrCysbO0ube2tba3uLe2uLe3
-u8HDwsbIyMbEwL69wMS8qXteVFBKR0VIRkRDQUNFRUVCPTtAQkVchae6wcDCu73A
-wsG8qX9VTE5LS05OTlFWVFVQT01JS1VfXF5eXV5aWVZYWFdSU1VVU1RXV1dVUU9O
-Tk1PUFNUUlJVVVFQTk1ISUdIRkdKS0pMTktIRERCQUFEREJAQ0NAQ0FEQUFDQDs8
-Oz08Ojo7Ojs+Ojo6PDo6Pj44ODk5Ojs/Pzw8Ojs6ODs+PDo4Oz46Ozg/PT0+Pz1A
-QUA+PT9CQkVKTE9XV1hgYV9gW1ZYW2NrcHJ1cGxmYFxdXVpUUU9LSEhEQT4+Nzg1
-Nzk5NzxAQkBFS0lKTEtRTkxNSU5SVVZZXl5gZGRkYGJlZ2lrb29wcXF1d3h2eXyB
-foCAgYKCgoCAfXh1bmZfWVZXW15gZmt0fYODiYmJj5WYmZygoqalo6KlpqajoJ6b
-mpaPioeJh4mOj5KRkpSVlZOTk5WXlpWVmZmYnaGgnp+fmpmZl5mXmJeYl5aanJuX
-l5WamJiZl5yYlpeVkpOPjYuFem1fWVpeXF5iZGJeYmJmZGRpbWttbnBxcXN1eXd8
-f4KGiIiQk5SXlpWVlpmYlpiZmpucnJ6emJaWlJGIgn12d3d5dGdcVVBPT05OTlBT
-U1VXVlpXUVRRVldSUlFRUVJRU1BNTU9NTk1OUFNTUlFOUFNTVlZUU1NUVFVXVFRT
-VFNTUlNTUVFST1JQUVJVUlFSUVBRUFJTU1FRT09TVVdUU1FVUFBRUU1OUk9PUFFQ
-UVNVVFdVU1VWU1dYWVZlV1ZUVVdWVVlWVVdVVFRYWFdaWVlbWFVYV1VUWFZXVlJU
-V1lYWFdXWFlVVVhZVVZVVlVVV1lXV1lZW1xaWFpaWlpXW11bWldaXmNgXFxdWlta
-WFlcWl1eW1xgYF5fXmBgXl9fXl5fYmRkZGNkZWJfYl1dX1xdXVxaW1pbXF1eXFtd
-Ym+KuszV3eDj5ufo6errgH5+f39+ent6eXp5e3t9fXd3d3l8fX1+e3p8eHl7fHx6
-eXx8f4B+fX9+fn18fX5+f35/fnp0b2JUTE5MTElLTEpLS0xNSklKTk9OTk5MSk5T
-U1RQT1BQU1FQVFVTUlFTUVVTUk9RUFFUVFVWV1ZXVFhUVFVXWVdVU1FRWFhZV1JQ
-U1VYV1dYU1lZW1pVUVFPWFtaWVBPUlZhX1hbYV9jXFleWlxkXmJrZF9lX19ZWVtg
-Y1lYUFhaX2BcW1hVXFtkZ2NiW1pbYl1aYFteYF9eXGJkYWJiZWFkYGZjYWBdYF5e
-X1xWVVVZWVZaYV9XWVhYWFhZWFJSU1VWWVtcWldTTk9TVlhUVFtcW1lXV1tXUFJS
-VFdTVFZTUFVaVlFQUE1LRkZAPDo8OjYzNDk1Njg7OzxESExQUVFTT1FTUFFTT1JS
-TUtGTFFYV1ZTVFdcV1VXVVNST0pHSUlHRUdGR0ZIR0hNTE9TT1BYW1VUVFNVW1dW
-WFtcV1hlfpajra6wsK2qqa2wsqytra6tsK6sq6ypqqemp6GgnKCgo6KjoqOioqWl
-qqqsra6tsLKxsrW6u7q3uLm1trW4uLq9wsXExcLDw8TCxMPGwLCRaVhRTU5OS0hG
-RkdIRUNEQkFCPz89QUxpkam4vcPBwcPAvrypgldJSUpMT1BUU1ZXUk5OTEpJUltY
-WVxbXl5dWlVXVlNSVVNTU1VVWFdOUE9MTVFUU1dYWlVSUU9NTExJSEhIRkdLTUtN
-SkdIREFBQURCRURGRUA/QEE/P0A/QT8/QT49ODk3Nzo5PTw6ODo7Ozs6PDo9Ozs7
-PDk8PTo3Oj07PTs7Ozo6OD09Qj1APT8+QEFBPkBBRUdKT1VXWVtgZWNgWVdYWF1o
-cW9sZ2VgXFxdWlNQTk5MSElDQD88NjU2NTQ4OD5BRERISkhMTU5PTUlPUlZVVlhc
-XFdbXmBhYWNlZmtub25yeXhzdXZ6fH6AgIF+fn+AgX9/eHVtZWBcV1laXWFpbXR4
-foaLjouOk5SXnZueo6OnpaOjo6Kdm52enJmRiouIh4qOjo6PkJCRlZaVlZOWl5Sa
-mpeZnJydn5uampiYmJiampuVlZWcnJ+dmpeVl5mbmJWVkpaTlJSNjYJ3Z1ZOUldZ
-W11cW1hWV1tcX2VqaW5vcHF0dHd6eXl7g4aLjo6Ul5qXl5iVlZibmJqZmpmdnJyZ
-mJaSj4yHf3p6enlzZlpXUk9MTlJPU1VQUVBSU1hRTk9TVlpVVFJRUVFTUVNST05S
-UFBPUVFRTk9RU1JSUVBOUVRUVFVUVFZXU1BQUlRST1FUU1VRUFJRUVFQUFJSUlRU
-UE9QUVFSU1JRUVJQUVFPUE5OTVFRUVNUU1VWVVdXVFRTV1VXVFxWV1lXU1VXWFdW
-VVZVVVlYWFpXWFdUVlhYVldYWllbWlZVVVdXVldXVlVXVFhYWFZZWVhXV1hcWlhY
-WlhXWVlYWFpYW1xcXV1bXVxdXFxbXFteW1tbWlxdX2FfXmFhYF5iYmJgX2FjY2Vn
-Z2NkY2JkZV9dYF9cXFtbXl1aWl5bXFtbX4q5y9Xb4OTn6Ojp6+t8fH99fX99e318
-fXl8fHx9e3yAfXx8e3t9enp+fnt6enp5d3p7fH99foCEgn18f35/gH9/enNoW1JK
-S0xOTExMTE1OTU1ISEdJSkxQT01PTVBTUlJUVFFUVFVWU1JTUlFMTExRTlBSVVZW
-VFZUVVZZV1VYV1lbWFdTUVFVVFZXVlRTVlhUXlxdXlpYVFFTVFJWWVhWTVFSV1xd
-WV1ZXF9bX15WXmBcZGZfXl1cY11bXmBgXFtVU1dcZ11ZWlZhX2NlZGRbXVdcWldd
-Wl5fW15dZWRfYWBlY2BkZ2VfXV5dXV5cXl5XVlhYW11jXFdYXVpYWFpYV1hVVVhd
-YGJdWVJOUE5UVlRXW11ZWVZWWVtaV1ZVUlNUWVZQVFRRVlRQUFBNSkVDQUE4NDc3
-ODY5OTc3PDxGTk9RTVBNUlZLTlRSTktNSUhNTlFWVFVZV1ZWVVdXWVVNS0dIR0dI
-RElOSkVCR0RHTVFQUFZaVU5PVFpfW1VUVlJWWmZ4j6Kpq62urq2rr6+wsbCwrqyr
-ra2tsK6vqaikpqWjoaSioaGhpKKfoKaorLCusLCxsrGztrS0tbO0t7e3t7W5vr6/
-xcbHxsXEwsPCw8C3oHheWVVOUVFMSk1KSEdDQUNBPkBBPDxDR1V3nK26wMK/wcK9
-uqWGWkpIS1NRUU5QVFFOSUtMT09TVlRZW1pcW1pbZV5TUlVVT09TVVZXVlJQUU5N
-UFJVV1dXVlRQTkxNTElHSEdISEtNS0hHR0VFQ0VDQ0RHSklFREJBQEI+Pz9AQEBA
-Pj87Ojs4Ozo6Ojs+Ojg5Ojs7Ojw8PT8+Ozg7Ozw9Ojo8Pz1APT9CRD5CPzxAPT9C
-P0A/QkNDRUlOVFtaWlxeYWZlX1laW1xlbG1rZ2NhXVlZW1VOS0hERDw6ODk6Pzo6
-ODo8PkBERkZLSElJTE1LTkxLUVhXWVlbX11bYWBhYmNna2xscXJzeHV5eXt9gH9/
-gH9+gIB/e3l0cWtkYlxYWltdY2pvcXd9f4OIjIuOkpWXm5ydnp+fnp+hn5+cnpuZ
-l5CNiYiGhYiMioqOj4+QlpSUlJWWlpiWl5mZnJucmZmYmpqZmZuenpmTk5mcm5ma
-lpeXlpiWlJOQkZOSkpGIgXNjU0lJTE9TU1FQUU5OUllhZWpubW9wdXZ4fXt1dnl/
-hImLjpOUlpeXmZiWlpeXnp2ZmZudnJ+clZGOiISCgXh5eHdlXFpYWVVPUFRSUlJQ
-UlJVV1dSUk1VVVdWVVNRT09RVFNTUFJQUk9PT09RTk5MUFBOVFFRU1RVVFJRU1JR
-U1JSUlJPUVRTVFBRUVFRUVNTUlFRT09RUk9QUlFOTlFPUVBQUFJRU1JSU1FSWFZV
-V1VQUVVVU1RTUlZXVlNTVFJVV1VVWVZWV1lZWVZWVVVXWlZUVldXWVdYWFZWVlRV
-WFZXWVhWVlhYV1hYVlZXWFdYV1hUWV1bWltaWVpbWVdYWllcXl1dXVxcXV9cXF1g
-YFtaXFxdXV9fXWJiXmFiYmRjYmJgYWJjYmFlZGlnZWNfXlpaX15aWV1cW1xkX1th
-j73M1dzg5OXo6Onq63x+e3t4enh4eHt+f318e3t7e3t+e3x5eXt7fX5+fXp3fnt5
-eHl8fHx7fH5+fn98fX18f316b19SSEhFRUpKSkhGS05PTUxMSUhITFBMS05LTVJP
-UlNWU1JUVVRVU1NTU1VNTlBQUVBSUFFWV1RUVVhYU1JTVVdYVlVWVFRVVVZVVlVZ
-V1pgWlxZWVtXVldSU1dbXVZQUlRZXFldXVhdWVpgWltfWl5kX1xiXV5fW11cWV9b
-V1RXWFpjYFpeXlxYYGFdX1tbWVRYV1dVWl1cWllgZmNjZWNkYF9jY15gW15mXl1n
-ZV1dYmFjZ2BcWVhcWlhWW15cWFJVWVpfZmJbV09QUVJRU1ZbW1dXWldYWl1YVVdS
-VFddV1VVWlpYU1FVU09MSEZCQTw2ODk1MzY4ODg4Oz9GTFBOTVBUT0hMUExKSk1N
-TU5LT1RRVFVQU1ZZXVtaUlJNSUhGRUhGRUpKR0hGRUtPTUtOUFFTTFJVVlhYU1JR
-UVRYXmh+l6atra2vr7OwrK6urrKura2srq+usrCtp6WnpaOlpaWlpqGhoaOmqK2s
-rKuqra6vr7GysrW2tLW5u7m4u728vb/CxMTBvsXJxMTDvreQZ1pXWFNPTUtLRkdD
-QkJDQ0NBPz88PT9GTF6Dpbe/v8DBw724q4pbRUdPUlZTUVNST0tLT1BSUVFUVVlZ
-XU5YXGBhWFNQUE9PTk5VVFZUVlZVVVFQUFRVVldUUVFRTU5OTU1ISUdJSEhISEZH
-QkhJR0VER0dGRUJCQT8+Pj8+QD9AQkRDQj45NjY5ODg6Ojk6Ozc4OTk4Nzg7Ojo6
-OzU9Qj06Ozo7Ozw/P0FAQD09PD5AOTk+PT09P0JNSU9PU1NWVVhdZGVmXllXV1pl
-bWtmY15bVldWV1JOSUJAPDs7Ojg5ODo7Nj09QURHSklJSEpLTEpJTlRSU1hZWFxf
-YFtfZWllZWVnbG5wb3B2eHl4fX+AgYKDg4KBgHt4d3Nya2VdV1dZWlthZGtwdXd8
-foOFh4mPk5aVmJudm56goZ6goZ+empaTko2GhYSAhYuLhoqQkY6QlZOQkJSWlZSW
-l5iZmZmXmZiYnJyYmJudmpaXm52al5eXlZeWlpWYmJSQkJGNjod8cVxMSkdFSklL
-S0xLSElQWV9paWpxcm9ydHZ3eHh4d3yAiIyRkJGTlpaSlJWVmZudoZucnaCgoJqW
-koqLi4aBd3Z0cGZgWllWVFJWVlRWU1BNU1hVVldST1BVWFRZVVFQT1JTUlBPUFJP
-Tk1OTlBOT09SVVJQUU9SUVRSUlJSUlBVUlBTUlVVVVdUUlBPTlNVWFJUT1FRT09Q
-UE1NTk5QUVBQUVBPUFJUUVFQUVRSVVNUUlRVVVZXWVVXV1pXVlVXWFhSU1ZXVlVX
-VlVXV1ZTVVVUV1VYWVhZWlhWV1hYVVdYWFhYWlhZV1pXV1pWUFRVVlZVWVlYXV1b
-W1pYXl1WV1paXF5cWVlaWllbW1paW1lcXlpaXFtdXFphYWFgYWFiZWNiY2BjZWJj
-YmJoeXNlZV5bYmBcXVlcXFxbW19dXWSRvczU29/j5efo6erqe3x7fHx5d3l6enp9
-fH18e3x7e3x7e3x8enl7e3p8f31/en16d3h6fX98fHt9fX9+eXh3enJmWVBOR0ZH
-SEpNSUtKTUxMTk1KSExMS0xNTVJMTU9MU1FRU1FTVFJTVFRTUExPVFRPUFFRVFhZ
-WFZVWVlVUlRYVVdVV1JRVFRZVVRSV1hXW15bXVxbWlhbWVNWWFxcU1VXVVhZVltZ
-W1xTWWBbXFxXX2NcX15cYVdTW15YXVhWV1ZfXWFdXGBXU1VbXFpbWllZV1xXV1ZU
-XV1dXl9nZWRiYmNeYWBfYlxZXGFdXmRmZGRhY2FiXFhXW1taWFhcX2FaWVVWVVlh
-ZF9cV1VST1NVV1tYWF9gXVpaXVpXV1ZWV1ZUVVhdXFpXVFJUVlJRT0lGQTw5ODg5
-ODk4Ozc2OD1ERUVIUVVPTkxNTEtPUk5NTlJTUU9SUldXW1hYX11XUlFOTUhFQ0NH
-R0pJSERDRkdITUxPTk1OUVNUVlNQVlJPVVpfYnSOoqupq6+ysLKvrbKwsK+ur7Cw
-q6uvsq6opqmpp6ampaeop6OhpaipqaypqKmsq6uvs7KztbS0uLa4uri7u73BxsTE
-xcLAwcLDxcW9pHhcVlJVT05MS01KSEZFQUNBQUFCPz09QEBFSmSMr77BwsXCwbqu
-i11FREtRUFdaU1JTTk1NUFVQTlNRWVhZTVheXFlXWFZRTkpLSlBWUE9TVVhWVFRW
-WVZVVVJRT1BRUU5LTUtISUhHRURFREZIRURDREVJRUVDQkFBQUBAQD4+QUJBQEJB
-PT49OTg2Ozw6OTY6OTU4Nzk5Ozw4Ozg8ODo6OjY3PDo6Oz89Oj07Ojw7Oz49P0A7
-PD1CRFZPSlJTVVhSVVpeY2RmYFxaXFxgZGRhXltXVFVTU1JORkVDPDxANDQ5Ojo9
-PT1DRUVHRUdHSk1KR0hPUE5RV1taWFxcXV1hYmhnY2dlaGtwc3FxdXt7fn+Af4B/
-gX9/fnp4dGxrY1tXV1laXF9iZ2txc3Z6gYKDhoyPkpWWlZiWl52cnZ6foZ2amJeR
-jomGh4ODhYiLjY+QkpOUkpSVkpSWk5SWl5aXl5mam5mcmpqampuYmZiZmZmXlpaW
-mJeYl5WVlJKSkZONhnxuWklHRENGRkdHS0pISE1XYGltbm5wc3ZycnZ2eHl8fICF
-jI+Ok5aTlJWVlJeXmJyio6Ogn6GdmZaUjoqLiIN9d3NpYWBeW1RVVFNUVldSUFJR
-VVZRUlRUUVNWWldPUFFPVVNQUFFPTU9PUFJTUlJUVlFTU1FRUU9RU1RRU1NSUVFP
-UlRWUlBQU1dVUk9RUFRSUFNTUlBQUlFQTlJQUk9QT1JSUU5PU09PUVFSVFFPT1RU
-UFNSUlNYWFRUVllZVVRWWVtYVlhYU1ZXVVZVVFNUVVdXV1ZWWFZWV1dWVlVXVlVW
-VFNTVVZWVFJVVlZVVVZXWFlaW1xeYV5bW1lbXFtZXFpcXFxaWltaWVhaWFlbW1pe
-W1tfXl9hXl1fXl5dX2JkZWdlZmNkZGNiYmFna2NiYFtgYFxgXV1cWFtYVl1lapS+
-y9Xc3+Lm5+jq6up8e398enl6e3t6enp6ent6fX99fXx7fH19enl5enl4eXt7enl2
-e319gH58e319fnh8fXl2bmBUTkhESURGSU5PTkxISUxKSklIRkZISUxMTU5QUE9P
-UE9NUE9TVVNSU1FPT1BQT1BOUFFUV1ZWVFRWVVdUVFpYWFdVVFNTVFdWW1hYWlhd
-WVhaWVpZW15XVVpXXFlWWFRUWlVVXFVfXVVcWVxfWFpnZWNiWmJkXFtcXFlgWV5Z
-WV5cW1tbXFNRUFhaVVhcV1xaYVpZWVZeX2FkXmVhXVpcW1teXmJfVFZcXVxhZGZi
-YWBdXF5eWVlcWldcXF5dW1dWVlNUWWNgXF9fWVFQUlNaW1pja2VcXVtbWVZaWFNP
-VFlZWlxZWFRXXGBdWlpUTkVAOzk2NTc2Nzk5Nzs6OD08Q0hSVFJOTE5MS1JSVVVT
-VU9KSk1OVFhcWVdaWFhTUFNQRUI/Q0hNSEdKSUdISEtNT1BMTk9QVFRRUFBUU1JV
-WFxeZH+ao6mtr6+vrK+wrbCwsa+xs6+qq7KzsrCrp6anqaenp6qppKOlpqenqKur
-rKmrra6vs7WztrO0s7W5vMPHxsTGxMXGwcLDwb6+vbCMY1hXUlFOSkxMSklHR0hG
-RkREQ0I+Ozw8O0BEU3igusG/wMK/uq+SZEdDSFBSVFdQUVJPSUpLTk5NUlJaWVpY
-U1hZVVRXV1NOUE1OUVFNTE9RVVVUV1dYV1ZUUVBOTE9PTU1NTUxKSUdERERGRUlJ
-R0dGRERFSUdGQUFBQkNDQkJBQT9AQkFBQEJBQD88OzU5Nzg5ODo5OTk5ODs7PTk8
-Ozo7PT05PDw7PDo5Oz07Ozw5OzxAQkA+P0BER0lLTVNYVldWVlxiY2VkYmRjYl5f
-Y2NfXFdUU1JUUU1ORz8/PkA5NDQ5ODk4OztAQkRGRUhNSktMUk9SUlVZWltWWVpc
-X15iY2JlZ2dpaWpvc3Vzd3p6f318fH19f359fHlya2hjXFhVV1lbXmJlZ2xucnh7
-fYCChYWIjZCSkpSUl5mam5ianZ+emZeOiYeIiYeGiY6MiouQkZCSjo+QkpOTlZSY
-mJmfmJibmZqXmJaWmpyYl5WYmZmXl5WSlpeXl5WVlJaVkoyCd2ZPQkJCREVHSUtM
-S0lJTlZga3BvbW5tcXV2d3t8enh7f4OIiIqNjo+RlJOUlJSXnKCgoJ6goJ2dmpSN
-ioeFgXp5eW5jXl5ZVFJSVFdYVVNSTVBTU1NUVlJQUlRXVVJRT01RUlJQUlBRTU9S
-T1BRVFNST05OUVNUUlNTU1NSVlVTUlJRUlJUUE5MUFNSU1VUT1FPUVFQUlJPT1NP
-UVBSUU5LS1FTVFFRT1FTU1ZRT1BTUlJTUlNTUlNVWVpbWlZTUVRSVlhXU1NWVVNU
-UlVUVFZXVltbVlZVVlRXWVlVVVhXVVRVVVVWVlVVWFVXWFtWVFdWWFpcW1pcW1hY
-WllaV1hZW11dWVpaWVtdXFxcW1tZWVxgXVpcXmBeXV9iY2FhYGBhZWVmZ2NhYWFe
-X11cXV9gXV1dXmBiXFxfXltbW2Jomr7L1Nvf4uXn6erq63p8fHt8fX58fHt4ent5
-eHt4fXt4ent6eXl6enh4eXl5e3l7gICAgX5/f397fnt8fHx+fHVoXFNNTExMTEdE
-RkdIR0lGSUlKSUlGSUpHSkhMTlFSUVNQT1BPUFVXVE9NUVFRUVBQT01QUlJPUFBN
-TVNUVVFTVFdbV1ZXV1daWVdaWVpZWFtYWFRWVlVYXFlbW1VZVVVbV1ZXVVtYWGFZ
-V1lWXF1UXmdfZWVaY19cX19mXV1fXltXWldWW1lbVlBOVVdVXV5YXFxeWFlaWGJh
-YmZcXllZYGBiXV5cXlxUVFtdXGNkaGNiYVxdW1pbX11bWVhbXFxcWVlTUldeYFlU
-V1lXUk9SXGBbXWdqX1xZVFhXVVZYVlBTWlpaXlxdXFpaXF5cWVVQSkdBPTg6OTc4
-ODg1Njg7OjxETFNUUVBNS01RUlRQT1NSUE9NSk9QVlpZVlZaWVVPTkhFQkJHSk9K
-R0lMSU1NTFFXVE9ST01QTUlMUVRSVVhVWFdecYmdrKyxsrKxrq6tr6+vrbCur6yr
-rqqutK+pqaupqaqrqqusrKulo6Ooqqqrra6tr7GzuLe1srW3tLe+wcvKxcPGysXE
-wcPDwb25nHRfWlNNT0xLSkdHREdDSEhCRkVFRkI9PDs9P0FGXYqtucDBv72+s5Vq
-SUBCSk9OTE1LT0xJR0dISkxPU1ZWV1lXWlZVVVRWVFNOTU1RT1FPTU9TU1ZYWVpb
-WFZWU05PTUxQTFBSTkxMSUZGRERLTUlKRUFBREdGREZEQ0NAQUFAQUA9P0FCQEI9
-Pz1CPjs4OTk3Njo4ODY3ODU7PDs7OTk2ODs7Ojo6Ojw8Ozo7PUA/Pjs8Pz9AQUE9
-PUFESEpPVFVYVlZVWl9namlnZWRhX1xdYGBiW1dRUk5QS0hEQEI9PkA5NjY3NTk5
-OkBCQUdJSEpKS1BPT01RUVRYWVlbXF1bXmJkZGdnZmZqbG1xcXFxd3l8e3p8fX99
-e3t8d3FtZl5XVVZWVldaXmBiZmZrcHN3gIF/gYSEiIyMjpCQlJiWlZmcm5iXlpGI
-h4mJhIKIiYWEh4qNjY6Jio+NkJaOk5eXmJmampyampiamJaYmZmWl5eZlpaUipWX
-lJWYlpiWlZKPjIF3YkxFQ0FBQENFSEZLTVBSV2JucHF0b29xdHR3enqAfn+DhIWH
-iYqNjY6RlJCQk5OZm6OioJ+fn5+bl5WOjIWBeXV1cGlhXVtYU1JSU1VTUU9VUVJV
-V1dYVVNQUlRUU1NOUFBSUlFRUVRTUFFSUFJSUU9QUVJRUFJRUVJSUlNSU1ZXVFRS
-U1NTWFJRUFJVVFNUU1FPT1FPUVFQUlRQUFBPUVFPUlNRT1JOT1NTT09QUFNTUVNT
-VlVWWVhYV1dWVlRUU1JTV1NQVFRUVlZVVVRWYVhZX1hVVVNVVVNVVllXVlhWWFVV
-XFZTV1hYWFdXWVtXWFhYVlxaWllZWVpZWlxfXlxYWVxZVllcXF1eXltaXVtbXV9h
-YFhbXGJiXV5gYmFkZGJkZGVlYWFhYGBjYV5eXGNjX19dXV1dXWBcX15hbGaYv8vU
-2+Di5efo6erreHl6fXt7e3x+enh8fHp6fHp8d3h3d3h3enh4eHh7fXl4eXx/f3t7
-e3p5f314e3x7gH95cWVYUEtLS0hJSEVESklERUVGSElKR0dIR0pMSUtMTU5OUE1L
-S01PT1RSUE5PUVBOUE1OTVJUU1JPT1BRUFFQUFNXV1ZVVVNVWVdZWVtbXllaW1ZV
-VlVVVFVcWFtUV1pXXFtcX1hTW1ZZXFNXWFRcWllmX11jWlthXV9fY2ZbX2FdWFFW
-WVhWVl9aVFZSWFxfW1VdVVdUVlhVX2JhZV1fXVVeXWJgY1paWVVYVFtdXV9hY2Ja
-WFpYV1pdWlteWVlZW1xeWVFUVlpbXVlVU1FTUFFfaGBhZ2BWV1xZWVlYWFZQUVRY
-WFthX1xUU1laWFxaV1NUU0xIQTw6Ojk3MzM1NjY3O0NGSEtOU05OUVZRU1BSVlJQ
-TE1KTFFXXFtXVlRWW1ROSE1MTE1NSUdRT05NSkpKTFFTUlRSUk9NT1FRUVJSU1NR
-VFpfdZKkrq6ur7Ozr7CysLCvr7Kzr66trK2wsa2qq62vqqyrq66up6iko6erqqut
-sbGxtLW0tLS1tre2u7u9wL/FxsTExMbGxMjDuqyEZWRVUlFOSktHREVFRkZEREVF
-Q0JCPz06Ozs+P0NMcJqyvsLBv8C1nHFLQkRFSkxIS0hKR0lMTUxITlFUWFRYWFZX
-VFNWWFZWU01MTk1RUVBQUFFTUlJTV1daWFNQT0xOTU9RT09LSUZGQ0hPSUlJSEdD
-QUBDSEREQkVAQEBAQ0FAREE/REE9PDs+Pzo+OTU0NTI4Ojg2Ojc7PTk8OTk9Ojw7
-ODk7PDk3Nzo6PDw8PDs+QDw7Pj1CR0NCQ0JDR0lPUlVVVFVaXmRnamRkYmFeXFpZ
-WVxbVVNOSktJR0ZFQUA7OTg4NTY6Njk8QURFSEVHR0lMTE5MTFFVUlNXWFxbXWFj
-YGJlYmFkZmhpbnBycnJzdnd5fH1+enp9enl2cGlkXFdTVFZUVFRWV1xhZWZpbXJ1
-en+ChIKChYiLiYuPk5KWlpiZmpqVjYaBhIN/gIKHiIaHiImLjY2PkpSVkZWTlZiX
-m5ybm5yZlpiWmJucmZmXl5eTlJKSmJiYlpmZl5STkIyHf2xYSUZBRkRERUhJS0xR
-VVRYZ3FxdnV1c3N3d3l7eoCEhYSIiIiNkJCOio+OkJCRlpucnZ+dnaGhnpqXj4uJ
-h4N8enRsaWNfXl9ZWFdXVVRUVlRRU1RWV1RWVFJSU1RSVFNOT09SUVBQT09QS01Q
-UVVRU1FRUFJTUlBUU1FRUFBRU1RVU1VTT1RTUVJQVFVUU1FQUFBRU1BSUU9QUlRU
-UU9OUVRTUVJTVVVRTlBRUFJRT01TVVJRU1VTVFRXV1ZUVVVUVlNVV1lVVldYWFdX
-WFhZVllWWFZUV1VSVVRTVFdXVltdWlliWVhYWFZXWFZaXVtYWlxdXl1aWFlaWFpY
-WlpZWlpaWllaV1pcXV5aWlteXVxcWl5dWlldXmNlYFxdXWFiYmNjYmJiY2NhYWBh
-YWFiYGNhX19eXVtbW1tdXFxfYZC+zNbc4eLm5+np6+p6eXp8fHp6fX94d3x8fH5+
-fXl1dXZ3eH19d3Z3ent7fn59e3x+fHt7en99gH56foB8enFlWE9OTElLRkVHRUZF
-REVEREZFSEpLTklLSUhMTktOTkxRTk1MTVFSU1FQU1FQUlFSUUxNUVBPUE9PUFRR
-VVVVWFNVV1VUUlJRWFlWW1xcVlhXUVRYXl1XV1hZX2NgXlleW1xeWFRXVF1ZUVZU
-VFhVXl9YXF5YX1xcYWFtZVZaXllWUVdYYFxWXVtaV1ZfYWBaVVtXV1ZTVVhgYmBk
-W19eWGBcYF5cYVlbVlFRVlxeYGJgX1dUV1ZaXl9fYF1YXFtdYGVaUlJVWVhcW1VT
-WVNVWWJlX2VlXFRWXFxdW1VWUk1LUFNUV1haV1RXVlZWW1lZW1hXVUxFQDs/OTY3
-MzQ0Mzo9PEJDR0tRUVNUV1ZTU1ZYVFNOTk5QU1VXVVNTUlRXVU9MSEZKSkdITVFM
-TkpMS0tLTVFaWFdUTUxUUE9TU1JXXVdaV1toiJ+qq6qpsLK1s7Kvr7KwsK6vsK2w
-sq6wq6qqrLGrp6irrqqqqKWlpqutr7CytbSxsbW4t7e2s7a9vb3AvsHEwb/AxMbF
-xL+wj2hbVlZUUVBNS0dISURDQkNDRENDQEE+OTY5OT5FQ0dUgKS0u768urKdc09E
-QkdKTUtMS0pHSEpISElRVVFSUVdfWVlUU1ZYVFdST01MU1NTU1VQTU9PUFBUUlNU
-UlFRUVBPTE5QS0dHRUVGTFdMSEpJSkZBQUFEREdHREVBQD1AQ0RBPz4+Oz08PT08
-Ozo6Ojc0NDY/Oz47PDo8PUA5Nzg4PDw8QD46OTg4Nzg5Ozg4Pz49PTs9PT1DQz8/
-QERFSkpQVVlbW1pcXmVra2RgXVpYV1ZVVVVUVE5LSEdFRUM8Nzo7OTo3NzczNTxC
-RUVIRkVISkpPU09LTVNSUlZYWllbX2BgYGJiYmNkZ2ttbm90dXd7enp7e3p7e3h4
-dnNvaWJbVU9OUVZWVFZYXV9hYmRrbW9yd31/gYGCg4WDhIqLjZGTlZiYlpSNhH59
-f4CChIWHioaJioqLkJGUl5WVlpeWlpiZlJSXmpyal5eYmZqWlpaVk5OUlJKXlZeV
-lpeVlZKPjYR5alZHREJAQ0NDRkZJTVBSVFpia29xcnR2dXNydHp+foKCg4aHi4uN
-j42Li5SRkpGTlZeanJubnJyYlI+LjImDgX97dW5qaGdiYl5dXFxYVFVUVFJQUFFV
-VFZVU1FRUlJUVFJOT1JTUk9PUVBPT0xNUFBOUFJQT1BPT1JTUVJSUlJVV1VVUVFV
-VVVUVFRTVVNSUlBQU1JUU1RRUlJQUlJRUVNTUlJQTlNST05ST09RVFNUVFVTUFJQ
-UFBWV1dUVlNVVlZXVVVVVFNXVldXWFhZWlVVVldWVVVTVldVUlNTVlRYWVpaXVlY
-VlhYWlhYV1pcW1laWVxeWVdZXV9aXFhaW1pdWFpbXFtbW11ZWlleW15bWlxbW15c
-W1laXmJkYV1fY2JjYl9gYV5gYmVhYWRkYmVjZGJhX11aWlpWWllZXF9kjb/N1dvf
-4+Xo6Ons63p7e3t6fHl6e3h5ent7fH18fH56eHV2eXh5enl4eXh8fHl4eHp8e3t8
-ent9fnx+fXp0a19XUEtMR0dKSEREQkNFRkZLRkNGSExISEhIS0tPU1NRS0lMTU1L
-SUxOT1BTUE9QT1BPT1FTU1dVUVJVWFhYXllYWFdZVlRSVVdbV1pbXFhVWlhWVVZi
-XFVbV1hcXGFbV1tgY2RaXFhXWVVSVFRYWVVcX1pkW1dcVlpcY2xgVVZfWFVQVVNW
-XVlZWFhaVV9fW1hTWlhWVFBWW2BiYWNZX19YXl5jYFxfV1tVTlRTWl1gX15ZU1NS
-V1xdYF5dWltcXV5jYFdQUFdXU1ZXVlZaWVRSWFhdYV5YV11dXF5YVlVYVVJSVVZT
-UVZUV1pbWVVWWVxdW1lVUkZFREBAPjo3ODc3OTlAQkdITlBPU1ZVT09OU1VYVlpY
-UVJRUVNWV1BPUVFRUk5ISEpMTUtNSUJCQ0hISk1OUlRcVUpMTlNVU1NTUlRXWFta
-W2J7mKeuq6qrr7S1tLCysrOxsbGztbaxr7GxsbCxra6trq2rqKypqqunqK2usbW2
-s7W3tbOztrm3wMC9u73AwcLBwb/DxMS/t592W1RTVFJTUU5GREhISUlGQ0VDQT5C
-QEA/PTc4OT0+QUhkjKe2uru8sKF4UUFAQ0tNUE5OSkhHQ0JHSVJST05OVVpWWFZW
-V1ZVVlZSUVNRVVRWU1JOTE1PT01QT1JRVlhWU1BRVFVOTkxMSkxJSktMSEZGRkNI
-QkZGSEhISENERUJBQUFAPz0/QD0+Ozw9O0A7PDo4OTs8Ozg3Nzs8OTg3PDk4NjY5
-Ozs7PDg4ODg3Nzc7QDw7Pj1APz5AQz5AQUVGTFBXWldbXF1eYWZpZWFgW1hWUlFR
-UlJRTUpHRkNCPT07Ozs8ODg0Njo6OkJCQkZFSUlHSElKTUxLS05TVFZZWFlcX19g
-Y2dkY2NnamxvcXNzd3h5fH1+fHx6e3t4c29nX1hSUlNTU1RWVFdYWlpbX2BhZmtv
-cnV7f4CCgYKChYaIiI2Pk5KRjouHfnt/f3+BgYaGhISIi4yPkY+PkpWWmpqUlJaW
-mJeZm5mYmpmXmpmVmJ2blZOVlZWUlZeXmJeYlZGLgnRjT0hERERCQ0NGSExLTVJb
-YmdramtydXN3eXZ1eoB/f4KEhYSKiYmLj46PkY6Qj4+SkJGUnZyZmZaSjoyKiYF9
-gHt5d25qZV1aW1haWVlUVFRSUVJRT1FRVFNSVFJQT1JTUU9QUVFPUE9PTk9PT1FO
-VFVSUU9ST1JXVFNRUU9SU1JSU1VRVVJSU1BQUFFSVVNSUlFQUFBSUlJSUlJRT09T
-V1RRUFJOT1BUVVRPUVFTU1ZWUlBRUFFTU1JXVVVVVFdWVlZWW1hWVFVXWFdXU1VU
-VVZXV1lZVVNXV1dVVFZVVVZWV1dZX1lcVlhYV1ZTWVpZWVlYWllaVlxdXVhVWFpd
-XF5dW11cXltYW11dXVxbW19hYV5cW19cXFtdX2FiYmJiYV5jYWFkZWJkZWVkY2Vj
-Y2RjX11fXl9dW1taWllcX2mTwM3V3ODj5Ojp6erreXt8fn18fHx7fHp7ent6eHp+
-fXp5eHh2eXx8enl3d3h5enN0dXZ4d3p8fHt8fH18dW9nWlFOSEhHS0pJRkdHREZI
-TUpHRkhKSUtHRUhMUk9LTExOSElHRklLSk9QTU5PU1JPT0xQU1JWVFBPUlNXVVJU
-WlxaWVdVUlVWVllXWVhaV1pdWVdWWmJeXFtUWlpcX1pZWWBmY1tdWVdVUFdWVlxb
-XGJcXmFXXVxcXFhjZFlXWF5aU1FTU1pjV1NUWFpbXVtTVVNbWFNVU1dYXGVnZmBd
-XlRYVVteW11YVlpYWVhfX1paYVlTVFdYXF9gYFxbXl1cXGRhV1dUVFNRU1ZZWVpe
-V1FVWltbVlhYW2BeXVtcWVlWV1ZZWFJVVlVYYWVbWllYW1xZVlNRTkxPR0ZDRDs3
-MzY4PEZIT1JPT1BRVVRST09YUFFZX1lZV1ZOTlNUTktPUVJQTUpKSU1MSkpGQ0VI
-Sk1NVVBNUlpNT09QUlZVUVBNUFFUV1pfYW2JnKutrrGysa+xr6+wsbKys7K0sK+x
-sbS1rrCwrrC1sq6rq6mppqOlqa61tbSxt7e2tre1u8HBvsC9vbzBwsPHxsDCwLmm
-g2JUUFFQTk5NSEdJSEZESUlDQUM8Oj5BPDo8Ojg6OjxDQlB2m7G6vLu0pH9SREBB
-SktMS05JSUZHTkxNVVRRUVJTWFRXVllaVlZTVVdYWFZTVVZWU1FQUlJRUVJSU1ZZ
-WFdUVFZYWFNMSktLSklJSElMS0lFQ0RERkVDRURISENCPz5DQD9AQkBBQT5CQDo7
-Ozo1Nzg4NTc2NjM2Nzo7OTk7OTY5Ojs6Ojg6Ozs5Nzc3QT49Ozs7PEA+QkJBPj49
-QkRHS1BXXF5gYWRjZmppY2BcWlpYU05MTU9KSERAQD0+Pz04NTg6OTw4ODxAPj5B
-QUNFSkhIR0pKS0xMTlFQVVhXWFhXW1xeZGVjZmpubG5vcXR0c3l4enx9e3p5eHJv
-bWVdVVNUVFRSVFRRVlhdWlpeX2FjaWtwcHJ1ent6eXd5fH6Ah4qMj5CNiIWBfn97
-fHyAgX+DhoWIjIyKjIuNkpSZmpuYmJqXlpeYmJudmpqWl5eWmJaTlJaYlpaYmJaU
-lJWUkY2Cc15ORkNDRENERkVJR0pKT1hjbHFycG90enp8eXx9foKChYGEhYSFiIiM
-j4+OkpKSkpGNjZGTlpeVlJOOi4mIhHx4dndzb2lhW1lXVlZYWFdWU1RSVFFOT1NT
-U1BSVVFOTlNTVlNQUFRQS01QT05PU1JRU09NTk5TVVNSUVFRUlJQT1JTUlRVVVVU
-VlJRUlFUU1JRVlJRUFBRVFZSUE5PUVFSUU9PUlZVVFRbXFZWVVhRUlBTVFBQUE9Q
-UFFQUVVWVVpXV1dZW1NXVFVXWVdWWFZWWFVXV1dSVFZYWFhYWFdVU1NVVFNYV1VX
-WlxcWFlYWlpaWlpbWVhdW1pZWVdYWFlbXVpaWVlYWFlbXFxaWltdXmBiYmFgXl1d
-YF1fYmFhYWBgYWBhY2RjYWFkYmNiYWJjYGBfW1pdW1xdXl5aWFhdaJDAzNXc4OPl
-5+np7Ot6eHt9e3qBgn96e3l5fHt6enl6e3t4d3h9fXt5eXh2dnh6dHR1dnR2eXd4
-eXp6eHZzbGVVTEtPTEhJTUpKSUZHRElJTEtMS0tMTEpLSkxMTEtLS0tJSEtNTU1P
-TE9QUFFRVE9MTk9OT1NSVFBTVlZYVlNXW1dXVlROUFJUVFdXV1lXXF5aWVZZW1Ze
-W1leXF9bWl1eYWdeXF9WVFRTWlZVWV1iZ1xgW1hfW1xeWmFdWVdWXFRTU1VZW2BY
-VFZTWVpaWlRVVVtYUFRUWlhXXGZoZWBeVVZXWVlYXVdVYGFdWVxeVlZdXFlaWmNe
-XF9jXl1fWldUXmJdW1lYU1JUVlRUVVhQT1NfWltgWVdWVVlcXVpbWltYWFpYVFRS
-VFhdXVdcXV5cW1dUVE9NWkxIR0dEPz05NjZCR0lSU1NPUFBUVVlaVlZQU1dWWVxZ
-UEtLUlBRUFJUUVJNSUpNUk9LSUlHRktKT1RRSUhTVUhQTktSU1JSUVBVUVhaW1la
-ZX2ToaetsLGxsLKsq6+ws7aztLSxsLO0tLKwsLG1sbCwrauqrKelpqqrsbKysrO2
-tLW0ubq+vb7Bvr2+vb/BxMPFwcG5rpBnXFRQT0pHSUxSTVBLTUlHRUdCPjxAPkA/
-PT87PTw5PD1BRGGNrry+uramglVBOj1DRklOTkxJRUtOUFBTUVRZVVhYVFRVV1VU
-VFNRVFZWVlVSU1FRUVNVWFZWVFZXVVtVV1dVV1RQTUpKS0pIR0hLTEtLSkhJRURG
-REJBQ0NDRENBQD9AQUFCQj4/PT88OTc9Pz89PDg3NjQ1Nzc4ODg3OTo6OTk4ODs7
-Njk8PT06ODk7Ojo7PDs8PUVEQT4/QUNGR0ZGSE1QV15mZ2lrbm9paGRfXFZUU0xK
-SElDSEVAPT4+OjU0Njk4ODo4Ojo/RENAQEJFR0VKTE1RTUxMUFZZWVlXVVhcXF9j
-ZWZnZmdpcXNycHN1d3l5eXt6fH54c25sZV1TTVBQVVFKSktLT1FWWFteYmVocHFx
-b29xbmtraGdscHd6f4OGh4eFg4F/gHt6e4CBf4KHiYiKjI+NjYyMkJGWlpSVl5uW
-lJOYmZeWl5WVmJiSk5WWkpKWlJaXl5aVlJaRjIN1YE1GQkJCREZFSUlKSUpOVl9l
-cHV3dnZ3fH2AgH59f4GAg4OBgYOEhYmNkI2QkI6NioyRlZeWlZOQkI+Ki4uHg3Zw
-bm5tbWBdWVhWUlFRVFNST1BPUVBQVFNTUlBRUFBOT1RbWVNSUU5QTFBRUE9PUVFQ
-U1JRUlJST1FRUFFPUE9QUlFQUFJRUlJSUVFQU1RSUFJQUVVWV1JTVVNPTlBTVVVR
-VFVUU1dWVFZWVVNUUlNPUVNUV1dXVFJTUVJUVVhWXFhXV1dYWVRWVlZZWFZXVlVV
-VlhWXFtaVFZYV1RVV1tYUlJVVVdSVlZXV1hZWVldWllcX15cW1haXFtXVllaXFtZ
-WVtbWVlZWVxdWlxfWVpcXGBgYmJgYmJfX2FiZGBiYWJiYmJiY2ViYGJkYmVkYGNh
-YF5fX1xeXlxaWlxVV1tnlL/M1Nzh4+Xn6Onq63p4d3h4fHx9enp4e3t7e3p5eHh5
-e3p7eXp+fXl5e3p7fHx7enp6fHh0dnd2eXZ3dXFmVk5JTU5ISENDR0dERElKTU1L
-T0tISkpHSktNTkpLSUhLTVBOSkhJS09QTk5NT1FRU1FTVVBSTk9QUlNVV1ZUVFJV
-VldVVE9PTlBSU1JWWFVaWlxdW11XV15ZWlxdY11eY2FjY11gYFhZVVhaW2NgXmVj
-Wl5aX11ZXl5bYFlYUlFXU1VWU1hYXlVVWVVZWldbVldXWllTVlJbXFpgYmduYl1W
-VVdbWldYVFRfYmBeXF1WW19fV1hgZV9bXV5dX1tVVlZbZWBgXV9bV1hZVVJRT1FS
-VltcX2RmWlRWVltdYGZiXVhXV1hWVlJSU1VXWlpdWllZU1NXUUxMTEtLUE5HPzk6
-QUhKT1VTVlVRUVNUXFpZU01QUFBVVlNRTk5PVFRWV1NTUk9MVFdWS0hKTkpHTUpK
-T05LTE9MR0pITU5PTlBPVVdXWl9eWFpkbYScpaqurrOvsLCzs7O1ube1tbGwsrOz
-s66tsbOysbGurayqqaissLCysa+ysbS1t7W5ubm+v8C+wMHBv8HEwsbGw7mgdVxX
-UFJPTExFSklPUVBNTUlFSkVAPUE9PUJBPz06Ojw6Oz1ATnWdsry4uKaCVD08PD9F
-RUpKSEZCRkxPS09QVl1VVldXWFlbVldUVE5NVVZSUlBRUE9OUlNTUlFSVlVVUlNT
-VFBPUE5NTU5OTExJSEpGSklIRkdGRURCQT9AQUM+P0NAPz89PkFAPkE+Oj09Ozw/
-PTs7ODY2OTc5OTg7Njc5Ozw9Ojo6ODs+PDo0OTw5PDo9PTs9PTs6QENEQkVCQD9A
-REZJSU5RW15gZmtsb3JubGdfW1RRS0xJRUFFRERBPDg3ODk2NjY8OTo4OT5BQ0FB
-Q0VFSUhKTU5PTlVTU1ZWV1hYW1tcYGBjZGVlam9ucXJ1cnN1eHh5d3h5eXdycmpj
-W1VPTU5PTktHR0REQ0RMUVZZXGJnam1saWRjX1xdWltgaHB3f4KBg4WDfoGAfnt8
-gYOFiImIiYuKkI+Sj4yPk5KPkZSXmZSSl5SUk5SWk5SUkpKVk5WWlZOUk5KVlpSS
-kpKKfm9YTElER0dDRUpNSklHSlJWXGludHl8enx/fHx+f3x8enl7foCBhIiIioyK
-iIuLi4uLjpSUlJeUjoyPjI6NjY+MgXJsbWhqZ19aWVhTTlBST1BOT09PT1RTUlJS
-UVFSUFBRT1JUU09OTk9QUVJZUU5RVlJRUVJQTlJQUFFSUE9PUFBRUVJTUlNUVFJT
-VFNQUlFQUlZVVFZUUVFPUVBQUFFVU1FQUFFQUVRTUFFRVVNTUVJSVFRTVldWV1RT
-WFVUWVxVVFVXU1VTVFZVVlZZWFVYVVRWVVdYXFpXVVdWVlVUV1ZTU1NTVFZVVldS
-VFdXV1xbWVpaXltaWFdZVlZYWVlYWlxdWVZaW1hYWlZYWFhZWVtdXV5fX2FfYWJh
-YWJhX2BiY2NjY2FiW1NYXmFhZ2VhXl9lYGBdXl1cX11dXFhVXGOVv8zU2+Dj5ejp
-6errdnd3dnt7eXl7eXd6fHp8eXZ6ent8fHx4d3p6fX5+fn16fH58fYB9enl5eHl3
-dnV1b2JUSUlHRUVHR0dJSUVLSklNTEdHR0hKSEZISUxOUE5OS0xMS05LSktLS0xO
-TlBOT1FSUVFTUlFMTk9PUVVTUlNTVlRUU1JQUFJQWE9VVVlXVFVWWVtbWVhbXFVV
-W1lcWVtfZWRgW2JfYF1SW1xcYlxcZFxWWVRbW1dgXF5hWVxTVFVSUVJUVlNYWlZY
-V1pYVVdUVVpZWlNXWFtcV2BeaHdhXVdXXFhdVVRWW1tbYGFcZF5eXGJbV1VfXVlb
-XF1cVlRaWVxiYFldXFdXVVdYVFNSTlJUVldcYWVYUVFWWl5cYWJcVVlYWVdUVlZW
-VlZUVltgXlVTVVRQTExKR0xOUElDQ0NHSU5TVVRTT1JVUlVZWVRPT0xMT1FRT1JV
-UlJRUFVTVFVXVVJYX1tNR0lQTlBSTU1ITk5OTUxHR0hKSUlPTE1PUVleWlpYXlxe
-coaZqKqtsa+tsbWzsK+0t7u2sq2wsrW2sq2xs7OysLCvrqyur62wsbOvsrO0srK0
-tLa0trm+vL3BwL/BwcLEw729rYRfVFBQVVRSUU1OS0tNSklHRkhGSkVCQkNBQENA
-Ozk3ODk5Oz5FYI6ruLi1pYVZQj48OkRHSEpIQ0FER0lLTU9TVVRbVldbW11WVl1X
-UFBOUE1JS01LTVBRUVJSUVFRUlRVUlJRUVJQTk1QUVRRT01LSUlLTU1IRklHREND
-Q0Q8Oj9AQUVBQD09PDw/PUA8Oj48Ojo8Ozo3ODk3ODo5PDk8ODg6Ozo8OTg2ODk5
-Oj06PDs5Ojs7Ojo5OTs+QURHRkRDQ0NIRURHS05SU1dcZGpudXV3dm5pY1lTU01K
-SUlGQ0FAODY5Ojg4Ozg2NzVAQEJCQkREREZJSklJTU9RTlBVVlVYWVtaWllaYGBj
-Z2RpbWxtbXBydXh4dnh2enl4dHFsaV9XTkpISEhJTUpGRT1AQkRGS0xUWWBfYGFh
-X1xZVFNTVlhaZXJ3fn+AgYGEfn58fXh/hIOHh4eIhoaKi4uPj5CQkJWSlZiWmZeT
-kZGQkJKRkpiTkpWWk5aZlpOKjpSVlpOSkIh6ZVFGRUVCRERFRkxOTUlIS1VganB1
-dXZ3enh5en1+fHt7fX19gIKFio6LjIyLioqKjZKQkpCTkI2JiI6SlZCQjoh/cmhk
-Z2tsaF9ZVlBPT1FPUltbVlRQUFBTUlJQUVJQT1BNUFBRVVRQTU9SVGNST1BXVFFU
-UE9RTk9QUFBQUlJSUFBQUVJUVlJSUVJSVVRUVlRSVFJTU1NUUlVTU1VUUVBPUVFQ
-UVJSUlFTUlJSU1RUVFRTU1JPVFNTVlZWVlFTVFVRVFZUU1NWU1RUV1ZYVlVVVVVY
-V1hXWFdVVVZYU1FSVVRUVlVWVlRTVVdVVFZYWFlaWVlaW1lWVVVWWVlWV1ZWWltb
-WlhaXVxYV1lWV1lXW1dbWlxcXV9gYV9eXV5cXWBgYmViYV9XUVVbYWFkY2JjYWNj
-ZWBdWlhaXFtaWVdbX4W+y9Tc4OPl6Ojp6+p7e3t7fH54e3l8fX15enl4eHt4d3l3
-eHp3dnl7fH19fHt8fHx9fHl6fnp6enh3dm5jWlFNSklHRkRAR0dJSEtKTU1LRkdI
-SUdIRkRHSUtOTUtKSElNS0pKS0xPTUxMTk1NS1BTT1RVUVJPT09PUlBSUlJQUFFT
-Uk9OT09TVFRXVlRSVFRVVVVWVVtaWVxZW19ZW1hcX1xcYF1iX1ZZVFtiWVphVVVV
-U1tXVl5cZWRhXlRVWFRUWlVWVVxZUlRYWldTVlRUXV9iV1lYWl9ZXl1fZFtaWlZe
-VVpbWFhbXl5gZ1xeW1lZX19YVlxcXFlaXFpUVFhcXlxYW1xWU1NSUVtXVFJSUlJS
-V1xfYFtST1JYW2FbWFpXV1pcWFpZXFRSU1hWWVtbWVZXVlBNTEpISk5PSUVFS01P
-UFNTUU9QUVJUVFdYU09PUFRTTk1PT1RUUlBOU1dWV1xcVlZbUEhIS09SU1FSUU5K
-TFFNRUJCSUpLTU5OTFFTV1ZXWFtiXFxndYyepauxs7GvtLWts7e5tra4srG0tLWy
-srC0tLKusbCxr62usLGzs7GysLS2sre3tri6vb67vL2/w8LCx8XBu7OWbV1XUUtP
-UU1QUU5MSEpMSkhGSkZERENEQUREQT02Njc1ODk7O0FPdqG0vbiqilpBPDs4P0VK
-SEVFQ0RISUtRTVNaW1xcW11YW1hfX1hSUk9MTUxLS0xOTlFSU1BRUVNUVVZUUVBN
-UFRRT05QUlFRUk1MSklMSUdJR0dEQkFIR0BBRD89Pj9APj48PDs6PT07Ozs4Ojk4
-ODg3ODg3NjQyNTQ4NjY6O0A+Ozo3ODc2NDY6Ozg7Ozo8ODg4Oz5BQ0NGRUhFQUFD
-RENISk1NUFRcYGhtbnh6eHRuaGJaVVBNSUhGQj07ODg4PDo3NzQ3Ozc8P0BDRUdF
-Q0RHSkhJTEtOTlJXWVhYWFtZW2BiYmJjZmhpaW1vcXN4eHh5d3p7eHl1cWtkXldR
-RkRCQ0VIRUVCQUFDRURHSUpPU11dXV1dWVRWVFVYXGNqcXh/gYKAf395en19fnt/
-f4GAhoWEhYeIio+TkZGRkZGUlZaUk5GOkJKSkZmXlZaTlJOTlZmXl5ORlJaTk4+M
-hnlhTUJBR0hIR0RITE5PUFFcXWFrdHZ0dHl4ent4eHt8eXt9fX+DhYeKjY2Qjo2O
-i4yJjZCSj4+LiYuKjJOVk5GNhYB6amVhZGxrZFpWVVRPT09RVVJQUlBOUFBSU1NQ
-UFBRUE5MS05PUVFRUE9RUk1OT09PTU5PTU5NTU5PTlJSU1FQTk5RUVVPUlJTVVZU
-VFRTVFNUUU5QUlJSU1NSU1JQTlFSVFVVVlRRTk9RU09SVlZWV1ZVVFNTVVNSU1JT
-U1NSVFNTVlVUVFZXWFdWWFJSU1VXVldXV1ZZWFlYVlhVVFRUWFdXVldYVVRUWFVV
-V1pZWlxbWFdYVldXVVlXWFlaV1hZXFxaW1tcWVhYWVxaXFtaWVlcXl1dYGBhW11e
-X2FgXV1gY2ViYGJgZGNiYWFiYmNhYF9dXFxcWllYWlhZWVhkeb3M1dvg4+Xn6erq
-6358fH16e3x8e3p7eHh5e3d3d3Z3enx9eXd6enx/eHl6eXx9fHt4eXh5fXt4eXt1
-a11STElJR0ZHSUtIR0VGQ0hJSEdERElHRUZMT0hHTE5NSUlJRklKSUpISUpMS0tK
-TExOT1JRUU9RUVJPT1FQVVBOTlBQUFBQUVFPTU9UVFRWVFNVU1RUVFVTWFdZWlhZ
-WVhZVVlbWF5gXmVfWFZRW1pZYFtSVVJUWVBYWlxjY2ZhWVxRTlVXUVdWWlxRVFVW
-VVNUU1hdXV9YWlxbY1hbXllgWFdfWmRbW19WV1ZXXl9hWFtbW1dXXlleX1xeXl5e
-WlZSUldbW1ldXFZTVFVYWVdbWFZUVVNXW11gV1JQVFpdXVlYWVZUWF1bXVpXVFZZ
-WFZUVVVTUFVXT09PT0dKTUpFR0lOSUxTVU9NTE9UVFVTVFdSTVBVWFFPT1JYV1VQ
-S05UWFpcXFhUWVZUT1BRUlFTVFRPS0ZJT05GQUZHSU1PS0tRVFlUVVVTWGNlYV9s
-fpCcrLS0s7Gzsqurt6+tsbW1tra4trKztLSys7G1s7axrrCxuLays7KytLa3trm4
-u729u7q6ur7CwsO+vr2znnpjWFBLTUtKS0pKSkVHR0RJRkVCP0NCQUNEPjw+Pz86
-OT48PD08PERgja24vK6TY0U/NkFMSEhJR0hFRU1RUFFQUlhWXV5dXldaWlxdVVFP
-TVBPTU1KTk9QUVJTUFBUUlNUVFNPTU5RUlFOSkhLTlJRT0lLTExMTElEQ0RHRUZG
-RUE8PT4/PD0+Pj4/Pzw8Ozs7Oz09Ojo2NTc2NDY2Ojg4OTY2NzU6OTk8PDg3Njs5
-OTk4QTw7Njk6OTxAPj1DREVIQURFRUZFRktLSk1PUlNVWF9mbW5ydHFsZmJbWFVQ
-SklKQz49Ozo4NzU2ODk4Njo9P0BDREZHRkVHSEZKUU9QUFJWWl1dXFpeYWJfX2Rm
-a2xrbG1wdnZzdHV6enl6eHZzbmhfV1FIRERCRUJBQkFGRENBRERLTE5RVVtiZWRf
-Yl5aWVtgaW93fIGDgoF8e3x7fYOEf36AfoGFhoeGhIOIiouNjo+PkI+RkpOTlZeT
-kZOWl5OUlZWTkZWWlZSXl5eUlJWVkYd/cVxLSENERUdHRUhKUFFSV1pgZGtvdHR3
-dnh4enh2d3l8eXl9gYSDhImMjI6OjouJhoeHh4aDgoGFipCTlpKPkI6IgHtxaV5d
-ZGxnXFZTVFFQUVNTVVFPT01PTVBPUE1PT09NTUxMTE5OTE9OTUxPTU5PT1BRT1JQ
-Tk9RUFJVT1BQUFBRUE5OTFBSUlNVVlVTUVJTU1JPUVFSU1BQUlFPTU9OT1NVVVZV
-UlJPUFJSVVNUVFZVVlRUVlRVUlJSU1VUVVRWVlRUVVZUVlhYWFZYWVZXV1VWV1dY
-WllaWVdZWFlWVFZaWlhZWFhVV1ZWUVNVV1RXW1hYV1ZUVVVYWVdXV1tZWVlcWVla
-WVhZXV1bWV1cXFlaWVtdXV9gX15bWV1jZWNjYGJlY2RjY2RiZmNgX2BhYWBeX19d
-XVxbWldXXFtcYGaOv8zU3N/k5ufo6erqeHh4d3d3d3l4eXp6fHp5eXd4eHl3e3l4
-eXp4d3l6ent7eHp5e357en19fHp3dnJnV0xIRkdFSElISUlHR0hJS0pHSUVERElL
-TktNS0hJSktLSUhMS0tJSklHSUhMS0tJS0xLTk1OUE1OT09VUlFQTkpLUlBPUFBO
-UVVRVFJUU1JQUFBSUlFZWFdbV1dbWV1YWltbYFpXXmJiYVtbWVVWVFdfWFFSUFNW
-U1tXWmBbYl9iZldSU1VWW1hYW09SVFZaUlhUU1pYWVJXW1xiV1xeVV5aXmddZ19b
-YVxdV1ddW19aXV1ZVlhaWWBfX1pgZGRfV1NTV11dXV1ZVVRVVWBeWWBbWFRSVFZc
-X2BcVlNUXVpbXV9cWVZcX19dWFpcWlpaVlRVWFdTVFNVVVBJR0xMS0xNU1BOUFha
-UU1RUFNXU1JTU1RUVFRST09QUlRTUk5PVFhYV1RUU1FSUE9TU01RT1JQTklGRkpM
-Tk5OTUtPUEtIT1RXWFVZVlZYXmJeX2dteY6irbCws7OytLKvrq6usbS2srKzrrCz
-s7CysrW2t7Gzr7W3tbGwra60trSwtLa4t7u+vb28vb/BxMO+uqyCX1JOTEtLSUlI
-SEZERUVDRUJAPj1DRENDQEA7PUBBQT06OTg4OTo5OUtynrO4sJdoRjo4OkJDR0lK
-R0RJTE1MUE5TWVlgXFxbVVtYXFpUUVBVU1JQT0xMTk5QUVFRUE9OTVJST09RT1JV
-UEpLTExLTExNS0pLS0xKRkRDQ0VFRkNEQ0RCQkE/Pj48Pzw6Ozo3Nzk+PTw7Ojg4
-Njo2NTQ4NTk5Njc3NjU4ODk6Ozk7PTo3Njk8OTY4NjY7Ozw7PUFGRklHRUdGREVH
-R0lMTU9OTExQU1heY2hsbmhhW1pZVFFNSEVDQj09Ojo7OTk8OTc6Oj1CQ0FBR0dG
-REdHSElMTFBQVlNVV1dcWVtgYF1bX2RnamtrbG1xdHRzdHZ3dXV1d3Rva2NbU0xH
-R0JBQ0RCQEVFRUlMT1BTVFleZWtvcXNycnJra21ze4CCiY2KhIB8fXt7fn58goJ/
-e4GGiIWJiYyLioyMi4+TkZKPkJOWl5WRk5SWkZWWkpKUmJuWlJiWlpGRkY+NjYBt
-WU1HRUVLSENGSk1OUVZdW15ja21vdHJxdnd7f3l7enl7en6Cg4SGiYyQkY6PioaC
-gH5+fXt8gYWOlZiYko+QkoqBdnVtYFlha25hVlFRU1BQT09SUVFQTktMT05RUU5K
-TE5NTk1NTVJPTk1NS01QTlBQTlBQUEtPUE9PTVRTVFNPTlNQT05QUU9TUlVTUFFV
-VFNWU1NTU1JQT1FQUlFPUlNTUFNVVFNRUlhSVFNTVVVWVVRTVlRUUlRUUlJWVlRV
-VVdUVlVSVlVUVlRUVVdWV1RWWVdaWltcWVpcWVpYWVhYWVhYV1dYV1hUVlVWWFhV
-VlVXWFlYV1dWV1hTVFdbWVlYV1dXVFhaWFlcXV5XV1xfYF9dWlxaXmBiYF5cXFxe
-YWFhYGJjYmRiYmZkYmNiYmFgX19dXl5cW1tcWllZWVlaZJC+zNTc3+Pm5+np6+p3
-dnZ0eXp3eXh6fX17e3t5d3l6enh4eXx8eXZ2dXh6e3p8e3t8fHt8e316e3l0bWJV
-TkdDRkhJSUxMRUZGR0lHRURERkNHSUpHSklJSkhLSkdFS09OS0tJTEtKSUtKTE1M
-TEtMS0lKUUpLT1BRUFFQTk9QTlBPT1RUUFJRUlBPTEtPU1RSUFlXWVtZWFZaX1he
-XWBkW1tmY2RjXV5ZW1tUV1hRU09SU1FVWFNdW1dfXWRiXFlUWFZdW1RXUFZVV1xX
-XFlYXVhWU1VaWF9YWmFcXl1eYVxfWVhcWmJaWFpXWldWWFVXVlpaXFxfX2VkYF9f
-V1lcW1laWVlXUlFTWFleW1hUUVJTXGZfYWJaVVhcW1peYF5cXFxcWFtaWVpdW1pX
-U1ZaWFJUVFFRS0RITEpNUVNVUExPVVZUVlVYWVtTUVNZWllWUFFVUE9UVVJQT09W
-WFpUUVJPTU5OUlJPUVRSTU5LSktIT1FNTEtITlBVTkxOUFBUV1daVldcYF9ZZGhr
-fJGirLGzsbKztrSysbKysrWusLKzsLG2tbS2tLW0uLW3tba0r6+xsrW3t7W3uLy+
-u7i8wby/v8LHxsO3kGRUTUpDRUdGRENERD1ERD5AQkI+Q0VFQUFAQD0+RkdDPD04
-Ojo5PTw6QFuKqbWymWhHPTs4PUVKRkNIRURISUtLS1JUWWBaW1dZW1ZbVVJQUFZU
-V1VQT0xMT1FPTk1OT0tMUVNRUlBQUlRRTUxNTkxNTUtPTUpIRkhHRUVDRERGR0RF
-QUJGQT48Ozo7Pj06ODs7Ojs8PTw4Ozk4NjU0NzY4NzY5OTk4NTQ5ODk8ODs6Njc4
-NzU4ODg5Nzk6ODo5Pj1BRERJR0dFREdHR0RJSUlMTU5QUVJUWl1cXFlWU1NQS0ZD
-Pzs6QDo4Njk3Njk6Ojo6PEBEQkA/QkNISktOSUxPUVJWU1RUVVdZWV9fXl5dYGVp
-a2xqb25xcnJ1eXh3eHZzcHBtaV9XTkdERERDREVFRUVNTk9VWFtfYGNrcXV7gH6A
-fH1+gISFh42QkZCPiIGCgHt8fXt+f3+Eh4aFh4mMiIuLjYuMjYyNkJKRlpmWlJSS
-kpWTkZaUlpaYlpaTl5WVk5KTj42MgGpVSkZJSkpHRURJTU9SVlpeYmZobHBxcnV2
-enp7en1+foGBfn+DiIiKjo6Oj4yKg393eXp5fX6Hj5SbmpaSjomKjIN2d3NkXF1m
-bGZWT1BUVlRQT05TVVJOTlFPT01PTU1LTE1ST01OT1FTUkxPTU5MTlFPTk1MS01R
-UVRTVVFUUlBQUE9QUFBSVlRVU1JNU1RVU1NUVFRQTk9QTk9TU1NST1FUU1VRUlFS
-VVhWVVNTUlFTUVNWVlhUUlRYVVNUVFRXWFZVU1RYV1dWVVdaWFlXVlhaV1dWV1hX
-V1pWVVdaWVlYWFVWVFNRVFZYWFdZWFpZWVhTU1VXVlZXVlZWV1hXVVhaWVpYVlpa
-WltbWlpbWltZWlpbYF9dW11eXlxfXV5fX2BjY2FlY2JjYmRjY2RkYmBeYF5cXV5d
-XVtaXFdZXF1pnb7L1Nvg5Obn6enq6nZ3d3R3enh3eHt6e3p6eHd5e3t5enp7eXx7
-enl3eHp6fHp3eXZ2eXp3eHt5eXFoXVBKSUdFR0NHSUZGR0NEQ0ZGR0dJRUdIS0pJ
-SEZFSUlIRkxMTk5IRkZISUhJSUxOT0xMTUpKSUpOT05OTk5PTE5QUlBOSkpNT1FT
-T05OTU5KSU1PUE9SWFdZVlVbV19gXF5cXWJcXmRfYF9dXVpdXFhZVVBQTlFVTllV
-VF1YV1tcY15hYllbWl9aWFpQT1RTWVVeXVpeWVVRUVdVWlZcZFtbXFldWFdZV2Bf
-ZVxTWFpdVlJWU15bWFpXXFxiYltWWV1cXF1YVVtcV1NVUE5RVF1cWVVVVFddZWVk
-Y11VWV1aXV9ZWVxXV1dXVVRZWVpYWFNRVV1eWFZTVkxHSEtJTU5WV1NVTVBTVlpW
-VVRYWVZaWVlYVlZSU09OVF1hVlFQT1NZWVRWVVNXT05RU05QU1FQT01MS05PTktL
-SElMT1FTUU1MTk5QU1RXWVhWXVxeZWRqeZGirK+xsrKxsLGzsbKwsbGzsrS4u7az
-sbOztrW3tra0uLi2s7O1tbW1uLe6u7q6ub7CvsPDwsLDuJttV05JRkRFQkRFQ0E+
-P0VCP0JBSEdDREdDQkNCQUZFQkA9PDk5Oz0+Oz09THmgtLKccEg6Nzo8SEhFQkRD
-Q0hIRkpKT1FYX1paV1haV1pWWVdWVFVWVFFSVFFQVFBRUU5NSk1PVVZVT05RUFBO
-S0tNTEtKS05LSEhHREdJRUVCRENEQ0RAQEBAPTs7PTs7Oz08Oz04Ojo4ODk5Ojk3
-Ojg4ODk4Ojc4OTc1Njk3PD09OjY1NTc0NjM2Njo6Ojo5PT07PT1CREVJSkZJRkhL
-S0dJSUpJSEpNTU9OTlBSUUxHRkVFQz05OTk5ODg0NDQ8Njs+QT0+QEFDQURERkhJ
-SklKTFFPTk5PUlNUVVdVWVldX19fZ2Roa21ub25wcnV5d3l2dXJwcW5mY11XUUtG
-Q0NER0lGSVFVWV1hY2dra3B1e36BgoaJiIqMi4yPk5aRi4yOhoSEf399fXp9eX+D
-hYeFhIeIi42Kio+Pj5GQjpCUlJWXlJWYlpORlZWTlZWVk5SWk5OTkpCMiIV7Y09K
-SEhHR0pKTUlJSk9QVltiaGxscHJyc3h4eHp+fH9/foCCg4aIio6OjYmMi4eDf3p5
-enx8gouSlpqcmJKQiYmMiX14cmhgXmZqZV1UUFJSVFVRUlFTT05QT01NT01PUE5Q
-T1BQUlFQT1FPUFBPTk1RUlBNTUxVU09UU1JRVE5OUFJRVVJSV1VSVFJTUVFRUlRT
-UlFQUFJRUE5OTU9RU1JUUVBRU1JUU1NUU1JVU1BTU1RWV1RVVlRUVlhVVVVSUlRV
-V1hVUlNVVFVWWFZZVlVWV11bWFZTVVhZW1dUV1VXV1dWU1RUUVNTVFVXVlVWW11Z
-V1ZVVlVZVVRYWVdYWlhaWFlaXFpYWFxcWVtdXl1cXVtbW1pbXVxfXl5iX15eXmBi
-Y2BfY2NlZ2RmZWRmZWJiYmBeYl5cWlhaWl1cYl9fX2ihvsvV2+Dj5efp6OvqeXp6
-enV1dHh7enh4eXd4end5fn15eHl6eHd4d3d5d3p7ent5enp6eXp6eHV1bGVXTktJ
-S0dGRUVFRkhDQkNFRkVGSEZGQkNFSUhHRklJSElKTEtLSEpIRUZKSklLSk1MSkxN
-S0lIS0xMUE1LTFBMS0xMUE5JSk1OUlJQTU5QSktIS05PV1VaV1dZWllUYmJcYFpd
-XVheXmFiW15bV1lZW1pUVlBMVVNRWFNXX1ZbW1peWGllX2JfXF1bXFdSUlVXVVtd
-V1paWFVTWVldXV5mYGBkWVxWU1xeX1tdXVZbYWBbUVZaaF9aXV5hXF5dWFJTXl1d
-WlVTXmBXVllXUFFXWVhbW1dVVl1gZGJjYFtbXV5iYVhZWFVWVFdTVFZUU1JSU1JV
-XVxWVFdVT0dKUExPTVFXV1NQUFJRVFZXVVlZXV1cXltVWV9WUVReZV9TUFFSV1VP
-VFRTVVZYVVVXU1RQUVNSTkpOUlNRUklJSk1PUE9PUlRMTE9RU1lbVVpeYGFjZV9o
-gJSjrLG0tbS3tq+0t7O0uLi3tLq5tbSztLS0trW4ubq5trSxtLKytru/vru6vbq7
-xMHGw8TEv7qmeldOSUlMTENDREE/Pjw+Q0BFREFFRUdHT01DRElJSERCQj07OTc4
-PDo5O0BDX46rr553Sj85NzxBQkFAQkJBRUZJS0pPUFRdV1dWWVlZW1paU1BOUlNS
-VlVWU09PTE1RUE9MTVNWWlhPTk1PTk9MTlJPTUtLTU9MRURGRUlLSUhGRUREREND
-QD5CPzs7PDs+PD09Pjw7OTw7Njg7ODk4Ozg1NjY1NjU6Ozg5Njg6OTo2NDQ2Nzk1
-NjY0Njo8Pz46PDs9PkBDREZMSklKSkZISEdJSUhKTktIRkdISU1KSUU/QD46OTc4
-ODc5PTM0NDY3ODc7PDw+QUVDQkNESENJT01NS05OTVBQUlRXWlhbXmFeYGVkZ2lq
-bnBwcnNydHV3eHdzdHFwbWhmY15ZUlBNSUpJTE1PVVtjZGpqbG5wdXd7gIWHiIyO
-kZOYlZSVmJSQiIyNiYaDg4B+fX9+foOEiIWGh4uMjY2NjoyNjY+UlJWTk5aWlZGU
-kpSWmJKSkZKTkZGSk5CPj46LgnJfVUhIR0tJSktKSkhJTVBWW2Bka25vcHN2eHp+
-enl8gYCBgoOHi4qOjY+OjY6IgIOCgoCBhImMkJOXmJiTjoiDgoeHfHFra2NhaWxm
-XVlTUVNVVVNSVVJTU1JQTU9PTk5NS05OTk9PUE5OTUxMTk5OTk9OT05RUE9OTU5M
-TE9SUlVPUU9QU1FRUVBRUlJRUVBQUlJSU1BTUlJQTkxPUFJSU1NST1JQUVJTUVFT
-UlFRUVBSVFNRUFJXVlVWWFZTVFZUVVdVV1VVUlNUU1VVVFNWVVVSU1VUVVpVVllc
-W1dWWldVVlZXVlRUV1ZVVlZZWFlXWVpZWVdWVldZVVhZWlhXWVtXV1VaXVtYWFld
-W15eW1hZXFhZWFlZWVxhYF9hXV9eW19gZWBlZ2ZqaGdoaGdwa2BfYmBeX1taWlle
-XF9eXl1haJ+/zNTb4ePl5+jq6ut5fX57fHp5enZ1d3Z6fHl5fHd5fXl8eXt3dnp8
-enl3d3p8fnt5fH57fHl3dnJqXU9NSUZEQUZGRUZISEdGR0VFRkdGREM/QkdFRURJ
-SUlJR0dPTE1PTUhLTElLTU1NTk5OTEhMT01KSUtMS0tMS05LTU9QUVJMTE9RTU5T
-UkxLS0tNTlNTV1hVU1NXV1dbW1dYWFpYWFtdYGNdX1lZV1ddX1hWUVJWUlJWV2Fe
-WV5XUldXZGVjZl5dYFpYU1FUUllTU1lZXmBXUlFYWFhbXGdgXmVfWVxUXFtXWVth
-Wl5mW19ZXWFjYVxfXVdYW1laWFtfXV5ZV1teXV1fYlpUUltfVltaVVRbXWNiYmhk
-YVlgY2RgV1daV1dUVVFSVFVTVVJUUFFWWVZUU1BIS1FSTUxPVFlZV1dUUVJXV1dX
-V1piXVdYWlxdYF1XXmNgVE5QUVJXWVFUVFhZVVZXUlVaU1FVVExLTU5SVlBJSkpO
-TE1OVFZdVlJNTlFSV1lZWldaXWJkZ19kfJSirbm1tba1t7a4t7e4u7q7u7m1tLO0
-tba1tri3uri1sK+ytLe3t7u6urq5uLq8vsLIwb21qoZeT0hJSEhFPz47PDw9PD9B
-QUFEQkVJQ0RGSkdMSUdGQEJBPD06ODs3OTk7OjpKeZ+soHtOOjg2O0FDQEI+QkJB
-QURKTFJSWFxZWVJUWVpaXFtPTlBQU1BSUVFQTE5PUE9QTk5QUlNVUVdVVFJPTk1P
-UlNRUE9MS0xHQ0ZKSEhJSkpHREVEQ0REQUA+QUFBQz4/QT49Ozs6OzpBQDs8Ozo5
-ODg1NTU4ODU3ODo3NjQ0NTk6ODY5OTg5NDM3Ojk5Ojs5Pz9AP0FFRUZNSkxQTk5L
-SUhJS0tIR0JAQUVHSUpKSEI7OTg4Nzg4Njc1Ojc0NDU3NjU4PUFCRUVDQkRJSklK
-SktMTU9QT09QVlZWYF5bYWRlZmdmaGlsbW5wc3V0dnNvc3NucnFsbGdjYmBeV1VS
-UFJSUlRYYWdsbm9xcm9zeYCDh4yOkJCUlpeVl5iWlpWSjo+JgoWCgYGAgH5/gYJ/
-g4eEh4mJjI2Ojo+OkpGQlJWRlJaYlpOXkJGXl5KOjpCQkY6SlZGNi4Z9bllMS0tI
-SEpRTU1LTU9MUVZeY2hscXJybnF5ent4en2AhYmHiIqMjIuKj5KPj4yGgoKDhYWI
-jJCSmJiUjouIhoWIiIiCdGlpamtqamJcWVZSU1RUU1ZVU1JRUU9PTE1PTktLS0tO
-UU5MT05OT0xLTU9QT1BSUVBPT05OUVJRUlBOUU5PUlFST05PUVFTVFRSUlFTU1RS
-UlJSV1RQTU5QUFBOUFBOTU9RT09PUFNTUFlWUlBQTlBTVFVWVVRVVlRVVVNUW1dU
-WFVYVVNWVVVXV1VUVFRXV1dVWFtZWFhWV1hZVldXV1ZZV1pWV1laWVVXVlRWVVZV
-VlVVVVRWVVlYWVdXWVZXWVZWWFhVWFtdXl1cWltdV1lbWFlaXWBeYWdgYmJgXlxc
-X2FkZGRkX2JhY2tkYF5fXV5fXl1dXF5aWlxfXl9rnsDM1Nzg5Obn6Orq6nZ5d3p9
-f39+eHZ2dnl8fnt5dnV2dnl4eXd5eHh4eHV2dXZ3eHd4eXp8fHx6c2hZTEhIR0ZD
-R0hFRUZGRkNBRkZIRkVFRERHR0hDREdHSU5LSUpJSUlJR0xHRkZHS05OS0xISEtN
-Tk1NTUxOTUxLTUpNT1BNT09PTU1NT1JOTUpMTExPU1NWVk9UVldcWF9ZWVhZV1NY
-YFtfXltdXGJdWmBZU1NPU1NNT1JXWlhaXVRYWFhmYmRiXFpeWFdUUlJRVFZRVFRb
-X1ZVVFtdWl1UXVtYY1hUV1FZV1JgYGVbYGVaXVtfYV5gXGBVVl5cXmReXl5eX1xX
-WFpaXGNhWltVXFxYWl9aWVVaY2JmZ2NcVVxgYFxfXVVRUlVUVVNYXF1ZVFRQUFRX
-UVJPTExUVlRUUVJVV1lZVVRTU1lVVFZYW2JdW1peXFlbWVVXWVZRUFFTVFNSU1hW
-WFdYV1NVU1VTUFFRTUtLS05ST0pNTUpKUlNQUVNSU1FTT1BRWVpUUlNcXmNmYVtl
-gZumsLO2tbe1tbW1trm1ubiztre0t7a4vLi3s7Sws7OtsKyqtbe3uLq7vby4ury+
-vr++ua6SZE5HR0ZGRkQ/Ojs9PUA9Oz1CRkdFREVFQENERE9ARkM+PT9BOzk1NTY3
-NjY4O0BjkKeigVI6NTg9REM+Pj5BQUM/RUhJTU1XYF5dUlJUV1xgXVFQUlFTVllR
-T0xPUlBNTk5NTk9RUlVcW1VTUE5PUFBRUlNQTU9NTUdFQ0NCQ0VFR0REREVGQ0RE
-QUJFQUFAQTw9Pjs9Pzo6Ojw9Ozk8OTk8OTk6Nzo6NzQ2NTc2NDM4PEE8NDU0Njcz
-NTM2NTk8Nz5BPUBEQkFERktQTU1NTlJNSkhIRkNDRERCREZERkZGREE9PTg2ODg3
-ODo0Njc0NTY1Nzc9RUE/RUZFQ0VKSElLTU9OT09QUVFTVFVXXVxfYGJiZWdpaWts
-b3BxcXBydXRvcHBxbm1vb2tjYmFbWVhXV1hZWl9laW1vdnNycnV5gIWIj5Oal5aX
-mZeYl5iXlZSPi4aBgYCAgoaFg319fHqAh4qGg4WJi42TkZKQjIuGjZKRkpOUk5SV
-kJOSjY+PkpGQkJKRkpCPhnlqWE5KRkZISk1RTUpMTlFVWl9lanB0c3mAfnx7fYOD
-gYOBiIyLiYqMjIyOk5CNjYeAgYOGi5CRmJmYlpKPjoiGhYiHhoF5bGhqcnFtY1hX
-VVNUUVRVVFRQTk9NT09QTEtMTk5OS01NTk1NTVVTTU1QUVJRTk9RUE5OUFFRUVNT
-UVBQUk9RUE9SVFFSUlNTVFJRUFJSUVRRU1JSUU9UT09QUFBQTlBVVVJQUVJSUVBR
-UlVSUlFVVFNVVVNSUlRVU1RVV1FUVFZWVlVVVlhYWFZWU1VXWVZVV1dYWVdbWFlX
-VFNWWlpbVFFVWVlYV1ZXWFhaV1VYWFZRU1FTVVVUVVZWWFdXVVZWVldWWVdaV1tb
-XVtcXF1dW1lbXFlZXF5dYV9iY2ZlZGNdXmFgYWBjYWBdXF1dXV5eX11bW15fX1xc
-WlldZm2ewMzV2+Dj5efo6Ovqd3h5enp7eXp6eHZ3d3d5e3x6fHp9eHR2eXl3dnl5
-eHV1d3Z5enp5enl4fHl0ZlJLSERAQkRHREVMSkVGQ0ZEREVEREdHQ0RGRUZER0hI
-SUtLR0ZKSEdLSUlJSktKT05LSk1NTExNS0lKTE1NUExMUFFPT09KSEtKTFBPTU1Q
-Tk1LTE5SUVJRTVJVVllZXFZZXF5ZVFtgVlldXl5bY19gZ1pWUE5UU1FQTVRTTVZY
-V15YXGJaYGFcXF1XU1VQTU1TVFNZVVdeU1VVV1VTWVFYWVdeWFNVUVZXV2JcXVdZ
-YV1gXVliXWBaWFNWW11kXldXWV1fYFtbVlVbX1xeWlxcW2BiYVpWU1ZeY2BjYlpW
-V1pbXmFbWVZVU1dYVllgXFlXWlNOUFFQTU5PWFZYWFRVWVdUV1lbWFVZWlRVWFNZ
-XFpaWFpdV1NUV1dUUFJQUFRTUFJRVllZWFlVVFNOTU1UVFBQT0xRTk9NTVFLUE1P
-UExMT1FUVlRPTE5SU1dTUFZaX2BfW11ogZimrLG0tbWzsrW3uLq1tre3uLa2t7i4
-tLO0t7C1srG1sLC2ubm3ubu8vrq8vcDCwb21nm9RSEVHRkJDPzw7PD5BQT5BPz1E
-RUNEP0VHREI/REE+Ozs/QEI4NDQ1NTw8OzY3PVOEoKWIWT07Ojo8P0I9QEBBQUNE
-SUhMUFdaV1dQVFdZXl9eU1NXVlRWU05LTlBST05NTEpISVFQVVpZVlRUUk5NTVBQ
-UVVRUU1ISUVGRURCQkRDRUZHSEVFQUVCREJCQUA+PT48Ozo7Ozs5Ozw8ODc1Nzk9
-OTg5OTg3MzY1Njg7OTY0NTU3NzU2NTQ1NTQ3Njg9QD9APUJCREVESUtOT09QUVRR
-TEtJR0ZGQUE+Oz1CQ0RDREJBOjk3Ojk4NTc3NTY0NTo5OTY+QUZCQ0dEP0BHSUpK
-TE1MT05ST1JWV1lXWF1cX15gZWdpbGxvcXN0dHNzb3Bsb3Nxb3JwamZiYmBdXFxc
-XmBkaGtxdXZ6d3Rzc3qBiI+UmJuYmJmYmZqbm5qYlYyKjISEhIOCgYKDfoOAg4KG
-h4WHiYqGhoyMjo6OkZOOjpCRj5GTk5aVkZCRjo6PkI+QkJKUkI2HfWdWTUpIRkhK
-Tk9OTk1PV1teZGtvbm9wdn5/gX5+f4SFhYiKi4uJi5COjZCQj46NiIKBh4uSlpqW
-lZaQjIyMioiGiIeEg35ycHJ2eG1fV1ZVV1FUVlZTUFFRUk5OTE5PSUtJS0xMT09O
-TVBRT1BSU09QTUtOUVRSVU9PUk9NTFBPT05VVVJPTlBRUU9RU1FSUVFWVFNQUVVV
-UU9PU1JQTk5QUVBUUVJTVFRSUlJSUlJPT1JQVFVUVFZVVFRXVVRSVFNQVFZUVVVZ
-VlZUU1ZXV1tcXFhaVVdZWlpXVldbWVZVVVVZW1laWFRWV1ZYWFZVV1dTUlJUVFVV
-VVJUVVRTVVVYVllZVVZYV1laXFxcWFtbW1xfXF5hYF5bW1pbXVxcYWBgYmRkZGFe
-Xl9gYmBfYGNiXl9gYFtdYF5bWltdXV5eWlhiap2/y9Tb4OTm5+jo6up2dXV6eXp4
-dnh2c3V1d3Z7d3l5eHp4d3Vzdnl6fHx8fnh2d3h4eHx6eXh3dnBiUkpIRUJEP0BE
-RkVNSEVCRkVFQUFAQ0VAQEVIRUJFSUlIRkhIR0lMSkdLTUtLTE1MTExNSktNS0tH
-TExPUFBOUFFVT1BOSElJSktLTUtMT1FPTUdKUFJQUlNPU1NTVFVZVlpbWlxXYGZe
-YFteW1xiXWNgV1lTUlJQV1FMUE1OV1VaXFNdV1VcW1lYWFVXV01PTVFUUltXWlpR
-VlVSVFBVUlJWU11bTlVTWV1ZXllXVVZgX19fWV1eYFhZW15aWl5aV1lfX15eX1dV
-VVZdYFtZWllXW19eXlxXUVVdXF1eVVlUW1leXFteWFNSV1dVV1tYV1lcVFBMTEtP
-UlRZWlpYVFdTUVNVVVpZWVdYVl1eWllZU1BSVldZVFNTUk9PVFFTVVNSU1FUVFZg
-XlhWUkxLTFJRUE9STU9PTUpPU1VKSUlKTFNSUVZXVE9OTlJWXlpUVVhdZGVgXWJr
-g5mprrS4t7K2tbe6ubq4trS2t7e4uba0tLWzs7a2srK1tLW3trW4v7q9v8DCwL+/
-u6h8VktHREFAQT09Ozw+P0FARENCQT5CQkRBPT4+Pj0+Pz1APUE8Ozo4OTo5ODk4
-OT07Rm+RmIdePjo4ODtDRkA+Pj46PkVGSEpKU1pYWlRZWF1gYF9ZVFZVVVRST0tL
-Tk9MTExOTFBQTU9VVlVYUVJQUVBOTk9NTU5PS0lDRENFREFBRENFQ0dER0dGRUVB
-QEBAQT5APjo6OTo6Ojg6Njc3NjQ2ODU4ODg2NzUyNjg3NjY3NzY1NjUyNDM0NDU3
-NTg7Ozo6PUA8PDo/QEFCSEtNUVNVU1dTTVZKREJEQDw6PkBBQENCPz4/PTs5Njc4
-ODc2OTY3ODk3OjpHSUVHRkRFQ0RJTEpJS01MTE9PT1JVWVpaXWBiXl5jZ2xsbG1w
-cXRxb29zcnRwcXRxb3BxbGViY2NgYGBhY2NnbnV2enx7eHd3fIGIkpOVkZSZm5mZ
-m5WVmZeRkYyIhoaGfnx9f4ODhYKHhYWEh4iLiomJh4mKiouOjo+MjY6Nk5WUkZiR
-lZSRko6RkJGPjo2NjIV4YlBMTElISUpOTUxOUFZWW19lbW9zc3Z6fH+AgH19g4aF
-iIqLjY6MjYqJjo+PjouIh4qQkJGYnJqWkpCPjImLiomDfn+Af3x7eXl3a2BYVVdV
-VFdYVFBPTlBPTk9RT09RS0tOT1FMTEpMTU9PTlFRUk9SUE9PUlRQTlFPTVFOT1BQ
-UE9QUlNOUFFST01OT1FRUVFRU09PUVFRUFJQUlBTUlRTVFBPUk9SUlJRU1RSUVJR
-UlJSVFZVUlRSU1hWVFRSVFRWVlVXV1hXVlhXV1VWWFlbWldVVVhaWFZZWVdYWVpa
-WFVXWVpYVVdZWFdXVlRUUVFUVlVUUVNWVldWWVdVWVlaWFpdV1RXWFhYWlpZWFpY
-WlpbXF1aXV1fX19fXFxdX2JiY2NiYl9iY2BiX2JiYGBfX2BfXl1cXVlaWVhaXFpY
-V1tjmb7N1Nzf4+Xn6enq6nh3dnl6enx6eHl5d3V1eXp+fHl5eXl6eHZ2d3p6e316
-dnp8eHl5eXl6d3RwZ15SSklHTEdFRUVERUpIS0hERkVERUJGQkJDQ0RER0dGR0VJ
-RklHSUpLTElMTVBTT0xLSk1ST01LTEpMTFBQUVFRU1NRUlFQTUtMTEtLTU1QUFJP
-TE1QUE5PT05PUVVXV1pVV1hZWlliZV9oYl1cWV5ZXVhZXVZYU1FWT0tPUlNUV1lP
-T1RSV1laWFJYWVRZUFFRUFJTWlNWWFJTUlRaUlNUUFdYX19TVVVaYFVZVFVbV2Fi
-X2JbXFpZXGFmYFlTXFtcXV9fYGZdUlRWXF1aWlZXV1laXV1eYVtUVV1bWVVUV1hc
-VlhXWltaV1dYW1hXWFhYVVhSTUpLTVNUWFtZWldWVVFOUlBQVVdXWlZZXVxYWllO
-UFFTU1dVUFJVVVNPUVRUUlJVU1RUV1lbV1BQUk9QUFFWVVBKR0dJTFFRVFFIR0tT
-U1dYWllVUVRVVVdZVVhVWllmaGRiYGJshZ+ttbSztra0t7y8u7a1tba2ubm6uLe7
-u7e5urm1tri0ubS3ubu7u769v8DDu7uvh15OSkVDQD0+Pz0/PT09PT1APz06OjxA
-Pzs8Pz09PUBAQ0JFREFFPDk7ODY4ODc5OTc9WYGVi2RBNjM4OD5DQjo7PD88QUFH
-SEdUWFNWU1VZWl9fY11UVllXVFJQSklISExKTU9MTE1NUFRZWFRST09QUU5LSUpN
-T0xNSUVCQEVGRURBREdFRERJR0dCQ0RCQT4+PUE/Oz09PT09Ojg5Ozk4OTc0NjU1
-Nzg3Njc4NDI0Njc4Nzo4NTIxNTs3ODc2Nzg5PD08PEE9QkJFQ0JHSEhNVFZTUVRT
-TkZEQj9BQD07Pj5APj9ART45OTk5PTk0OTw7ODY3OTo4OT1CRkZFREFFRkZHRUlM
-TElLTU1RU1ZYW11fYWJjYWZoZ2dnaW5vb29wcXN0dXF0d3Fvbmxta2llYWFhZGZn
-Z2xxeH1+gH18e3h8gYePl5eanJubnJualZaYmZSQj4mHhYOAfoGAfX6Af4KDhIKE
-iImJjIqKjIuMioqPjImNjJGTk5COj5KSkJGPjpCRkI6MjYmKhXReTk1HR0pNTk5O
-UU9UVlhbYWdqc3J1dnp7foCCgH1+g4OGiYyLjI6LjpCPkJGQiouJi46TlZWYlpSQ
-i4iNjI2KhX57e4OHhXx6cnBqYVlVVFNVWFdTUE9PTU1QT1BPTU1MSk1LS0xOTkxP
-UFBPT09QTlBQTk9PUkxNTEtOUVBOU1NTVFNOTVBSUE5RUk9NTE5OT1JSU1NTU1NQ
-UVNUU1FSUlNSUExPU1JTUlJSVVVUVFRWVFFSVFJSU1RVVllZVlVUWFVYWFhVU1Na
-V1pZVVVYWVdYVlVUVVRWVldVVVRYV1VXV1ZWVldXVlZYWFdYVlNVVllYVVVXV1ZW
-VVZWVVRWV1hWV1tZWFZWWVtZWVhZWVhYW1tZW1pcXl5gYl9eXF1cXl5hYGFiZGFl
-YmBfYF9dXmRgYmNfYF1eXVtZWldZWlhZV1+NuM3W29/j5Ofo6Orqe31/fH56enp4
-e3l6enp8enp8fX57fHl6eXp8fHt4eHp4d3h3eHp7enh1cmxjVk1IR0VKSEhDRkdG
-SEpKSkhFRkZISERFSUdGRUdIR0VHSEdFSEhITE5MR0tMTlBPTk9MTlJXUUxLSU1O
-T09PUFBTUVFRVFNWUE5LTU9MTU9QUlBQU1FTTk5UT09RWV5YVVZdVlNWVlxfXGZi
-XFhXWVljZGJdWV5WVlhRUE9OUldTUk5SUVBTVFpWUFpcWF1SU1BPUlFWUFBVT1NT
-U1hTWVZQV1pfZFdVWFdgUlRUVltaX2BWW1NZWllfYWJXVFZaXF5eXldaX11UWFlf
-X1lbWVhgWVZYWlxdYFtZXltUVFdaWVhVV1ZZW1pZVVhbV1dSU1lWUFJNS0xMU1VZ
-WlhaWldWXFdPTE5VV1RaWVdZV1dZXFdQU1VXV1dUVFdZWVFVWFRQUE5SVFZYVlNT
-UVVUVFVVUldTS0dLS0pNTUpPU0dFSk1TW1xYWVZaWVlbWFVVWFdZW19hZmdjX1xt
-jqCutbW2t7WxtLa0tLW2tra3uLm4u7u7ure5u728uba2ubm+vMHAvby+v725rpFj
-TEdDRERDPj8+Pjw5Nzk7Ojk9Qjw+PTs+P0E/QDo4QEFIRUVCR0JAOzg1NTg4ODU0
-NzhIdJOTbUY4ODc3PTo8PT1CQT88QEVJSVRYVFNRVVJVWV1gXFdVWFVXU05MTEtK
-SUxMTlJMTkxQU1ZZVFNOTUxNTkpKS0xLTk1JRkZGQkFER0ZFQkNFR0VFREdEQD09
-PT89Pz5BPjs8Pj48OTg5PDs4Nzc2Njc5NzU1NTU0NTM1Nzk5ODc1Nzg1NTg5Nzk6
-Njo7PD5BQ0FBQ0RGRUVKS09OUVRTU05LSERFREBARUI+PT49QEI6Pzs6OTo7Ojg4
-OTc2NTI0NjU6Oz9ARERFRENGRUhISEZITUxMTlFTVVVWWlpcYGJiZGVoZ2dqbG5w
-bnBvcXBwcnNwcW9ta2tsaGdmY2JlZWlvcHJ9goOFhoJ/e36Ei5GTl5idnJ2dnpqZ
-mJmYlI+NiYSCgoOCg4B+gIWGh4SFiISHg4yPjIyNkI2Mi46OjIqNkZGPjo+SkJCQ
-kZGOi4+Mj46Oi4Z6bVlQSkhJSExQUVFRU1RVV1xhanBzdHZ5eXl+gYOFhYWIiIeJ
-iIuNjpCOkZGRko+LiouQkZWYlZaVk5CMiYuKiIeCf32BiY2KgHVsaGZkW1ZUV1VV
-UlBQTk1OS01MS05RT1NOT05PTk5PTk1NTlJPUE9OTE9PTEtLTlBTVFNSUlBRUVJQ
-U1ROTlBQUVJPT1JPUU9UU05RT1FRUFJVU1JTUk5PT09PT1BVVFVWVlRTVVZVVVNT
-VldUVFVTVFNUVFRYVlRYV1dWVVRTU1dWUlRUVVRXWVlYVlRUVlZaWVVWWFZWV1ZY
-WlZXVlhVVVZVVVhVU1NWWFxWVllYV1dXWVhVUVFSUlRYV1xWV1hXWFhbW1ZYWFdY
-WlpbW1tbXF9eXl9fX11aXF5gYGJjY2FlY2NdXl1gZGRgYF5cXlxgX1xbWFZaV1ZZ
-Xom6zNTc3+Lk5ujo6ep+f35/ent6fHx8f3t7fXp6eXh4d3d3eHl8fHt8gHp6eX16
-enx6enp4eXhzaV5RSERBREZHREdBQEVEQUJISUhJRUNGSUdHR0ZHSEVGSUZFRUpK
-SU1PTUtLSktJTU5MT1BTUlNRUFNQTU9PTEtRUU5QUFFRUlRTUEtLSktNS0xRTk5U
-U1JNSlBPUFBYWFNRUlFSVFVVWlpWYmFfX1peXWJjZV1eWlZXUlFVUElKTk1PVFtP
-Sk1NVlVYXF5ZV1BRTkxTUFJRTFBOUFRQVFBQVUxRU1liWlhYVVlRUVJWYFhWW1hc
-U1paXGFjY1dYVltdXl9fWFZdXVxZWV5gY19ZWFdWVFZVVVdfW1haVlZYYVxSVlhd
-WlhYXVhWVVVYV1RST09PUlFRU1RVWVpXVVZWXV5fWk9PT1NUV1lWUk9QVVlXV1VY
-Wl5cWllYWFRXU1VQUlJQUFVVVFVRTk9RTk1SVFhWU1FNSk5OTUxJT0pPUk1LUFJY
-WldVWVVXWVhVU1RZWVdTVlxpc2xnZWt4kqOqs7i5trWyrqyytbi2uLi4u7m4uLez
-srW5u7a6t7m6ubq7vLq5u7m9urCWak5KSUhEQkJDQTw6Oz87Ojk4OT5CQEFAPDxD
-RUJBQEFFQkREQkJCQkE+OTY1Njk1NDU1NzpYgYptRjk2Njk5OTtAOjtAQT8+QEZN
-VldWVlJYVFZXWl1aVFVWVFVST05NTElJSEtTU1dQT09RUlJSVVJMSk9KSU1KS0pM
-TEhGRUNDQ0VCRkdHSEZFREVNRUM/Pz4/QT08PD8/P0A6PTo5OTc3ODg2ODQ1NzYz
-NTU7OTc6Njc1Njk2OTg4OTY6Nzc1Njk6OTs9PD9AREVCQ0RGSUtKT05TUVJTUk9K
-RUFAPTk6Ozg7OzxBQ0I8Ojk3Nzk5OTY1Mjc1NTM2Ojc4PERHRkhGQkZERUZIS0hJ
-TU5SVVNSU1hbVltgZGZlZGZlaGprbG1tb3JxcnBwb29vcG9samxoZmdlZ2Zlam90
-eYCEiIaIhoSDhIaOlJmWmZ2dnqCdmZmZlpaTjoqJiISFhISDgoGChIKDhoN9fH6G
-iIaJiomLiomJjYyNkZKQkZCOkY2Pjo+Qj42PkpCRkY6NhHZkUkxMSEdJTFBTVVZY
-V11dYGRscnFyd3p5eXt+f4eLiIeHiIeIjYuOkJKTlJSSjYmMjZKQkZOVlpaUjouK
-iYiHhoB/hImNjYV8b2ZjYmBdXFlcWVZSTk1OTE5OS09OT05PUU9RUlJTTktLTUxM
-TExMTExOT1JQTlBQT09QT05QVFBPUVFSUE9OTU5QUU9QVFJUU1ZVU1BTUFJSUlNS
-VVFRUU9OVlBUUVJVV1dUVFJVVlZVVVVVVFNVVlZWVFRUV1VWV1lWVVRSVllWU1VV
-V1dVUVRYWltXWFdXVlhYWVlYVlZYVllXWFdZWFdYV1VWV1dYVVRVVlVWWlhXWllY
-VlRVUlNTVVZaWldYW1lYWVtZWVddXllXVlhaW1tcW1hdWlxdXl5dXV5eYGBhZGJi
-YWBhYWBgYmJfXFxbXl9fXVlYXFhXWlxhh7nK1Nvf4+Xn6Ojr6nl9enZ4eXl6fXp6
-d3h6dnl2d3h4eXh3dnh7fHt8fHp7enp3enl5e396eHJlVkpGRUNFREZFR0lFSURH
-RkVGR0VERkhHRUZLSkZERElJR0hLTktLS0pJSUlJTU1MTUxMUExPUlJQUFJPT05S
-UVJPTU9OUlJSU1FSTkpJS0tLTE9OSUtPTUxNTkxQT1NTUlJNTEpOUFBVVlVcXWJe
-W2BeZGZnYmBYVFVRU1RMSk9PUExYW1FSTE1VVVpbX1dVTElJSldRUFFNVFNUWE5U
-TExSTFhVWF9ZWFpVXVJTWFVhVFNeXmBWV1tcY1xjWV1aXGNgXVxcW1taV1ddXFxd
-Y19YUFFUV1RWWl1ZXF1gWlxhW1NWX1xWU1ZbV1pTUlNVUk9RTVBTUlNUV1dhXFZU
-V1laWlpWVlJOVldVU09OTk9WWFpTVlpXWVNXV1VTUFVaVVNXWFBVWVZTUU9NS05Q
-UFNQUlZWU01KTE5USktQUExLTFFTUlFVVFJUVllXWFpSUVhZWFRTWGRvd3FpY2R1
-jKGvt7i1s7Kysra1r7e6uLm5t7a5ube2vLy9vby4trm5vLq5ubm7t7exonZPRkNF
-REI/PDo5ODc4ODg6Nz1CO0BBQD88PURGR0NBTFJFRENCQUNAQUE/OTg3ODc3MzY1
-OkhwhHJKODo4OTo8Ojo8Ojs6PD9ER01XVllcVltSVltdXlhWVlNQUlNRT0tNT0pI
-TFBUV1ROTE5OTlFSTUlJS01MTExKSkxJSEhJR0VDQ0JDRkdJSkhFREVFQkBBQkM8
-ODo5OTw+QUE8Ozk6NzY5NzY2NzY1Nzk4OTg2Nzg3Njc2NTg6ODM1Njg1MzQ2OTo6
-Ozo+Oz5CREJGQ0ZHRkhMUFBSUlRQTExIQUA+OTc/Pz49QD08OTxBOzs4OTs3NjY7
-Ojg1NTUzNztCRERFREpIR0tHRkdJSEhMTE9NUFVYVldbW1pgX2JjZmpsaWlpbW9t
-cHNycW1wcHJwbmxtamhlZ2Vqa2ttcXp9f4aKjY2MioiHjJKUmJmbn56enJuZmZaZ
-lpWRj42KhoWFhIqIiIaEhIODg351d4KEiIeIh4uNi4uMjZCPjI2PjY2OkJCOj46O
-kJCOj5GQjYmEclxPTk1JSUxNTlFUVVhcXV5hZmtwcnR3enp7fYCAgoODg4OEg4eL
-i4yLiY+Tk5SPiYiLk5STlJSVlI+LiYeLj42Hg4WKi4qGfnNsZF1YWV1fXlhVUlJR
-T05RT1FOTVBNUVNPT05RUVBPUVBOT05STk1LS01NT1BPTk5MS01NTU5PUFFRUU5O
-T05OUE5RVlFQVFRUUVJSUVBRUVNRUlJTVlBPVlNRVVVSVFJTVVVWU1NSUlJSVltU
-VFVVVVhUWllZVVRVVlVUV1RUV1dYVFNXV1VWVVdXV1hWV1dXVVZTVldZVVJSVFNW
-VlhZWVhXWVpXWltYVVNTVFJVVFdXVVZVVlJXV1RXV1ZZWFZVVllXV1hYV1tdWllY
-WV9dWlhZXF1aXV9iYV9dXmBhY2FhYGFiYGNxYV5hY2NkXmBdW1tbXllYXFpfZGaK
-uMrT297i5ebp6Orqe3p4enp2dnh2d3h4dnl5eXh5eHh6enl4d3p7fHp6enp6eXh3
-dnd3eHh0bWJWTklIRkZIR0ZGR0VEQkNBQUFFRUZJRkVFRkdGREhHR0lISVBPTExL
-TElLS09PUExJSElKS1BPUFFQTk1MTlJRT1BOTk5QUFBRVVROTkpIS05KTk9SUVBK
-S01MSkxRVVRVVk9PTU9OSU5TUlZXXVxbYV5jaGloZFZVVVFUUE1NTE5NUFdWUlNS
-UFJUWFVdU1NPS05LVFRQTkxYV1NVTVBNTU9LVFlaX1hcXlJeVVNXWmBXV2VeYV1Y
-Xl9jWF5bXmBibWNgXltbWFlWVFdcXWFjXlxPUVNRVFlaXVtZXWBjYF9YUFZeWFRP
-UlRWVlJMUEpOUlFVUlBQTVNXXV5aU1FYXFhbVlFaU09UUlZUUlVSVFBSV1ZXWVhU
-VFJQUVBRWVtdV1dVVVlVUlRQT0tJT05VUE1QV1hRTkxLS01OTkxOUUtNU01NTUxQ
-UVJXVVJYXFJRV1hbXVlZZG13enVoX2N0j5+tsrWxsra2tbW1u7u4uLa3tru6urW1
-u768u7q3urq7uLm4t7y3sKWAWktEREI/PT87OTc5OTk6Ozo5Ojs7Pz9AQz9AQUVH
-R01STEVHSUZFQUJAPTs7OjtAOTk6OTk3QmF6cEo7NTc4Oj07Ojs4OEBDRkRIT1hV
-WVtUV1NYWWVhV1dUUFNWU1ZTTk1RUk1NT09TUU9PUFBSUE9QS0ZJTUxKTElKSUlM
-SUhDREVESEZER0VFSkZEREJDQDw+QEA9Ozs8Ozw7Oz1APDg0NDY6NzM0NjQ1Nzc3
-NzU2ODk2NjY1Nzk8OTU2NjY1NzY0OTo8PDs8QUBERkZHR0dGSEpMTk1QUVJRTktG
-QT49Ojk8PUA9Oz48Pj9BPTo3MzY2NTg7OTo4ODg2PD5AQkNFSEZFQ0RFR0dIRklP
-T01PUVVTU1ZZXV5fXmJlaWhpaGptbXBubm9xcG5wdHJvbG5ta2dmZmRqbnN2en2D
-ipGUkJOQjo2NkZaZoaCfnaOfnpiZl5aTlJWMi4yJh4iFg4OEhoF/hYWFg3p4g4eI
-iYqHiIiIi4yOjYyNjY2Pjo6Rk46LjIyMjY+NjY6Lhn5vXk5NTFBNTU5QUlJUWFtj
-YmRna25ydHV4en1/gYGAf3+AgoKChIeHi46LkJWWlZCNjI6PlJWUmJiQjoyKi42S
-i4WBgYGHg4B8cWpiXFlXWmBfWVVPT1FPTU5OUFBNTU9QT01MTU9NTE9TUk9RVFpQ
-Tk1NTE1PUVBMTExOT05PUE5NT01NUE5OUVNUTlFRVVJSUlFQUFFPTk5OUFBRUlJO
-TE5VUVNUU1VVUVBUVlRTUE9TVVZXU1JTU1RSUVNVWFZWVVVWV1VVVVpVUVBVV1RU
-VlNVVlZXVlZZV1hWVVZUVVhYVVJUVFdmWllaW1lZVlRTVlhVVlVTVFZWVVVVVVZX
-VVRWU1RWV1RUVFZXVVZWVVZXV1ZXV1hcW11bXVpXW1xcYGFhYmRiYmRjY2BiYWNg
-X2NkX19fX15fYF5dW11dXFhaXFxeZI+7ytLa4OLl5eno6ut6end5enp5d3Z7eXl4
-eHp6eXt5eHp7enh3eH18eHh5enl7e3p8d3dzc3JpWk5MR01IQURGRkdESEVDQkRC
-QUJFSElKS0lKSkdISUZHSUxNTU1KSUlNTkpKTE1NTUpKTE5OUFFQUFJPTU5NUFFL
-T1FOTU9OTVFTUlBLSkhJSU5PUVBTTkxHTk9LUFBSTk1PTEpLTU9KTlFQXFhfXllc
-XmVmZ2ViWVtUVVJSU01MT0tPUVJPT1BRTVBVVVhTV1hNUVBWUk9RTFRUTlBJTFNN
-UVFVXFZVV15jV15cUVVSWFZfaGBkYVhhX2JaW1pWWmBjXV1gW1lbVFFRV1ldXV1b
-WlpTUFFTW1xbWVJVXmVlX19VVldWVlFNUlZWUEhGRE9VVFdUVVJOT1NYWVlTVlhc
-XVtYWVpUVFNUV1VUVldVU05SU1haVFFTUlRSUVBTVllYU1JUU1JUVlVTU1VUV1hS
-UFRWVVNQT05JRkpKSlBPTk1PUExKR0pRU1JSVlZST1VVVlhcYGFibHyDf3VnYWF2
-kKOxr7KytbS1trS5ube3tLO1u7i3tri4ub68uby6u7y/u7a2t7GlimNMR0FCQUI7
-ODk8Pjs7Oz8/Oj49Oz08Q0VIREJDQ0dLS01AQ0VGREREQT87ODg4Oj48ODs6NTY7
-T21zUTw4Njk7PDo6OTo/QEJISFNWWlRbV1ZVVFddZGBXUk9TVldWU1BUVVBPT09O
-UE9OUFFNT09QTExJSUlPTEtMTElNTkxJR0ZGRkdDRkVHRERGQUNCQD9DQz8/QD87
-OTk7PD49PDs7OTo5Ozk4OTo3Nzc5ODc2ODc4NTY1Njg2ODw4NjM2Njo1NDM7RDw+
-PD1AREdHRURERkZISk1MTEtKTUtLSEVDPDo7Ojk/Ojc5PD47OTw+PDg5Nzc4Njc2
-ODs6Pzk3PkJDREdEQ0NHSkdHR0lJR0pNU1RUVlVWVFZaXF9dXWJlZmhmamttcXBv
-cHByc3JzcXNwa2hqaWdlZmxtc3R2eoCGkJWWm5KOkI6Kk5eenJ2dnqCcmZeVlpSS
-kI+OioqIiIiGhoaEhYiHhoOBf4KFg4SEh4aGhYeLjI+Kio2MjYyLjo+TkpGMjY2M
-jYyKi4yHe2ZWT01NSkxLTE1QVFRWWmBiY2dqcHV4en2AgoKDgX5/fX9/gYKChISG
-jI+SlpWSko+OkY2QkpKQkpWPi4yRlZWLhIB+gICAenVvaGBdXl1kZ2NZUk5PUE5N
-TUpJTU9RT1FRT1BQT1FPUFBRUU5WTlBRUE5NT09QUUxPT05OUE9QUVBOTk5PT1BS
-TlBRTlBSUlFRUVFQUExMTU9NT1BRUU9OUlJSU1RUVVRSVFRTUlJSUVJTVFdUU1NT
-U1ZTU1RWV1ZWV1VXWFJVVFVUVVJTVFJTVVNVVFNWWFZWV1JTWlZVVVVWWFdZV1xW
-VFZVV1hXWFRTV1xXU1VYWldTU1VTV1hXVVZUVlNVV1hXVVVVVFRWV1dXW11aWVhW
-WFdXWFpWVVlbXmBdYWJhY19gYl9iYWBeWmFfXV9gYF5cXF1cW1tZWFpdWVhfhr3L
-1Nvg4uXm6Onq6nN4d3d4enl7e3p4enp5eHh6eHl6e3t6eXl6fHd4enZ2dXZ5eHh3
-d3NwbmNXT0hNSkhGREVFQ0VFQEFAREJERkRFR0ZERkhHSUdISEhISkxOS0xMS0tL
-TEtNT09MUk1OTktLT1JSVFZST05PT05NTlBOS0xLS01OTVBNUFFQUU5PUFNQS01M
-TUxPUE1NT05OTE1ST0lMUlJaWFpaVWFfYmFlZmJaXFhYV1VZVVVRTlFOTVFRUE9M
-T1BVWVVaWkxVTlRZUVVRWVpNTElQWlNTTlJcV15YWmJXXmBXWVFUVVpfXWJhVl5f
-YltXW1hbV2JcYF5eXF9aVFZYV1tcXV5eXVlWVVJYXl9ZVlhiZ2NiXlRVVlNXVlBO
-U1FHQkBCR0tQTlRVT0xPUlZbWVNXX19fXVtYVVpXVFVaWFhaXVtSUlJTWVRPU1tZ
-VlJRUVBNUFRVU1NQT1RZUU1UXFpWWFJTUlRXVFBOS0pMTU5PTUxTTk9MSUhNSUtP
-T0xQU09VW1dSU1peW1xndYeKhHdpYWN7mK61tbSytba4urq6vbm4ubi4vL62ubq9
-vb66uLu7ury5uLi1rJNtVUpDQD08PDo7PT48PDk6Ojo5OTs8PDw8PDxAP0FCRUpJ
-SEZDREdFQ0Q/Pzo4PTs/QEE6NTU0NDFBXmtUOzU0NDY4Nzc5Ojg+Q0NJT01SUFlU
-V1hUWFphYVpTUVVZVFRVVFVUUkxMTE1NTVBOSktMTE5QT0pHSk5OTk1KSktKSUlH
-SkZFRkZHR0dHRUFEQURFQT89PkJBQT88OTw7OTs6Ozg8PTo5ODk3NjY4OjU2NjY5
-NjY2Njc2NTYzNTc3NjQ0Nzc1ODY6Ozw8QkFAQ0NFR0lHR0ZJTkxORkdISk5KRUZD
-Pzs2ODg3NjY6Ojk8PDw9PDo4NTU3NTQ0ODc4PDs/QkREQkFEREVISUhIS05MTFFS
-U1NVWFZSVlldXl9dYGNobGxucXF0cW5vcXJxcG9ub21oaGdqamloaGppbG90en+C
-iIyQkZCNjIqPlJSXl5mdnZ+cmZmWhoqQjYqNjo2Kh4qMjYuLi4mHh4qJiYeKiYqJ
-hYaJi4uNjY2Mi46Pjo2NjY+Pj46NjImKjY2NiIB0YlVTT1BST01MTFBUVFdaYmJl
-aG12end3eXx/foGBf358fX59gIKEhIqNjJGWmJWQj5OQk5CTkpKRkI+NjpKTj4eB
-gH99e3t5dW5mZGdnaWhoYltST0tLS0hLTElLTFFPTk9OUVFNS0tKTE5NTlZRUFJS
-UU5NT09PT1FQT05MTlFTUk9PTU1PUFFTUFFRUlFPT1JSUEtPT1JRTk1OUE9RU1VR
-TlNSU1NUUVNSUlJUU1VSUVNVU1RTU1NSVFZVVVRUVlhWVlRWUlBTU1NTUFJTU1RT
-VFVTWFZWU1ZVUlJVV1dWV1VVVlVUVFZTVFJUVVZaWFRYWVZWVFNVUlNTUlZUVVhX
-VFRUU1RUV1dVVVRXV1lYV1tZWVpYW1laWldXWV5eW1pdXV5eX2BiYl5gYF9iYF5d
-Xl5dYGJkX1xcWVpZW1tcWlxaW2GEvcvU29/i5efn5+vrc3h5eXd6fH5+e3p4eHZ1
-eXp4d3h3eXl7eXl5d3Rzdnl6eHh5fHh6dnRuX1RKSEtOT0ZDRENGR0VEQUVAQ0ZE
-QURCRERGR0dHR0dJSkpKTklKTExPT01NS05OT1BSUE9PT09QUVJQTktNTlBPTlJN
-Tk1KS0xNTEtMTlBZVVFNT1FMUU5KSkdQTE5MS0pMUlJUUVNUUFFSU1xTWFZXX19f
-YV9jYmJeXGRgX2FaWE9OV05VVE9RT1FNTFRbVlJRSEtLUldQUlJYXE5QT1JaUVdR
-VFpYXl9WX1ldX1leU1JTUFtcXGJYXlpcXVhfV1hYX2FiXF9gbGViXVpYWVtiY2JY
-WVZRUlhbV1RVVWBmYl5bWVVSVFZWVFRVTEM8PUNFSEtRUlZRTlJVXF1dWGFhY2Jc
-V1ZVV1JUUlBSV1xfXVhUU1VZU1BXXFtWU1VXUk1NVFhVUVFQW1VMSlFXV1NQV1dV
-VldVUk9NVFVUT1BSUVBNTU1OUlBRTEtLUk9RV1VVWFVVV1pXWmRvgI6NhHRkY2qA
-mqywsbKytLW2ubq8vb/AvLu6uru8vL26uLq4ura3ubm4taqYc1tQSURDOjs4OT0+
-PDw5Nzo5OTk5OT9ERj09Pjw7QEJCRUZDQURFR0dGQ0I8OkE4Njo+PTo5NzM0NDxQ
-YlY7NTc0NTc3NTg4PTw/QUVIS1NRWFRVWFJZWFhaVFJUWFRXVVBSUFJTT0tOS0lJ
-TEpISklNUVRPSUtMTk9MS0hLSkpJS0VDRklJSUhLREBDRUdFQ0M+P0E/Q0JEQj89
-PUA/PDo4OTo5Ozw5Ojg1NTU1Njc3Nzg6OTk5OjU0MTM2NDg5NzU1NzU1Nzg7PTw9
-QD5AQURGSElFRUtPUE9MSUpKSUhGQkA+Ojk5ODo6NjU3Nzk7Ozs7PDY3OjU1OTo4
-ODc7PDo8QUVHRUJAQ0NHSEdHSE5OTE9PUlNUVVVWWFpdX2BgZGlrbGxrb29zcXF1
-dnVwbXFubGtsamloZWdjYmBgY2ZsbnV6gYeJjIiHhIaHjZCTl5qcoJ2YmJWOio6M
-jYyMjImGh4mKh4aHh4eIiImJiIqJiIeFh4yPj46LioyMj5OQjIuMjY6Ojo6MioyP
-j42Fe2xdUVBQTE1KSUtPT1FWWV1eYWVqam90cnZ6fn9+fYCCg4B9f3+CgIGDho2Q
-l5ubmJeQjI+VlZSWk5GQj5GSkpCNgXx8e3t+fXt6cWhkZGRhYmNcU01LSktMTUxP
-TkxNTU5MSktMTUxNUFBOTUxMTlBPUFFQTk5OUFFQT09MTUtJTFFPUU9MTlJPUE9Q
-T05RT0xMTU9NT1BQUVFSUE9PUVJSVFBOT1BTVFFSUU9RUFBSUlVUUlBRU1RVVVZV
-VldTVFRTVFNTU1NUVlVTVFNTUlZUU1NSVFVYV1VUVFRXUlVXVlRTV1VWV1dWVlVV
-V1ZWVVVaV1ZXWVVWVVZSVVVWU1RTUlNXVVZVUlZTUFRUV1hZWlpUVllbW1pXWFhY
-VldZXV9dXVxeX15cW1xdYWBgYmBiYF9hZWBfX2RjXl5bWltaWFpaW1xhXoi8y9Tb
-3+Pm5+nq6ut2ent7ent5eHZ6fX19eHh2dnl2dnd5enp5ent5enl5enp4eHl6eXh2
-cmlbUEdIRElIREdFSERCPjw8QUFCRkZAREJDR0RFRUVGSEhPSkxRUkxLSk1PT01N
-T05OS0xPTk1PT1BQT01OTU5PUE9NTlBRUE9LS01OTEhJTE9QT05OTU5RUEtPUU9P
-Tk1NS0pRUVFQTU1OVFRSW1ZQUVJaW11hYGRgXFhdY1tcXl9aVFZSVFdOT0xOT05N
-VVVUTUxNSUhNT1BTUE5TT1BRU1ZSXFRPVlRbYFdhWlxgVFpTUVVRVVJVXVlVVVdc
-V2FaWFphZ2ZdYGNqamFbWVdaXFxfXFZWVFVUV1pcWVVTXGVmYl5aVE5TVlhWV1VJ
-Pjk5PUZIS1BQU1FQUFhXVl5gYlhWWVVWVVVWVlROT1JbX19iXlFRV1hWWVpYWVhW
-WVZWUlBWXV5ZUlpgUkxISk1NUFVcWFVaWFhTUFBRT1RVU1FQSUpRTlJYUUtPTktS
-T01WVlVWVldYWltbX2l7jZONgnVrZGyDmqqws7S3tbW2tLa8ub28u769vLq+uLa6
-u7u4ube7ubOwnX1hVlhSSUA5Ojs/QTw5ODc2ODc3ODo5OkBEP0NBPTs/Qj08SUZE
-RUZCRENFREA8Ozg4ODo+PDg3Nzg2NkNUTz44NjU0Nzc6OTY6Ozw9RERLUU9RTVNT
-UFhTU1VZV1lZWlhVUE9QUlJRTk5MSlBMSEVIUlFWVU9KSktPUE5JTk5OT01KR0ZG
-SEpGSEhJRkJEQkVEQUE/QkBAQUVFQD9AOkBAOzw9Ozw9Ojk5ODc3NzY5ODo4OTk2
-Njg1OzUzNDQ0NDM1NTg3Ojo6ODg7ODk7PT1DRURJR0ZISE1RTk9RUEtIRkRAPzs6
-OTw8Nzo6Njc6Njg7PTk1Nzg6OjY3Ojg2NjU8PUBCQkRERUhHRERJSElISUhLT1FS
-UFNTU1VWWFtaXmJhYWRmampsbG9wcHZ1cnR3c3BraWdlY2RfW1pdYF5ZWVtcY2tx
-dHl2eHd3eH+AhYqQlZaYmZiYmZaSj4yNjIuKiYeLjImJh4aFiIeIhoWHiYqKiYaI
-iYuMi4mKiouNjI2Ni4mIioyNjI6MjZCQi4F1ZllPTU9OTE1LT1JSUVBXXV1fY2do
-am9yc3V6f4GDh4SBgX+Ag4WBhIWIipCWmZeVk5CPkJCQlJqYkZCTkpORi4WAend5
-e316d3ZwamZmbWJfXVpSUE1NS0xNTE5PTE5NTVBPTkxMT05RUk5LTE1LS05QUE9R
-T01OTE1NTU1LS0tJTk1OTlNRUlNPT09OTE1KS09PTFBQTk1PT1FSUU5QT1BQUFNR
-UVBSVVJOT1FVVFFUVVRTU1JSVVZVVVNUU1NUVFNSUlRUV1ZSU1JVV1ZWV1dSU1RP
-UlVWVlpaWVlXWFdXVVVXVlNWWVdZWFhXVldYV1ZaWVdYVlVWV1lWV1ZVU1RVVVdW
-WFZWVFVWVVVVV1hYWFdWWFpYWFhYVldcW1pZWl1cXV1fX1xaXl1jYV9jYmBfX19h
-Y2FfXmBdWltcW11bXVlaWFpgjL3K09vf4+Tn6enq6n59enl4d3l7eXh5e3x7eX16
-end3eXt4eHZ2eXt/f3l5e3h2d3l5dnJtYVZNSEpISUtEQkE9QkRAQUVKRkhIQkRL
-RkZFSERHSEZGS0pITE5NSUtKSktMR0dNTk1NTk5QT01LTk9PT01MTU5MTUxOTUxR
-UkxKTEpMSElMUVJQU1BVU1BPUlFRUUxNUVBNUlJOT1BTTExRU1VZUk5RUlJZYGJc
-Y19dYFtbVlZaWlhWWVVRVFJQSktNSkpRUU1ISEdFSUtOT1VQSU1PUFBNVVNZU01S
-T1hdVVxXW2FVU09OUU1PU1ZfWVlaVlpWYlpVXFpiXlpeZWNnW1tXXFpYXWBcVFNa
-VVhbXF5eWFFWYGNkX1pST1NWWldXV09CPTY4PkRDTUtUU01RWlRTXF5dV1lZWVZV
-U1VcWlVYWFxeX2BYU1VaWlhaWVJTWVdSVVpUUldhYFVRWFhNSkdHTE5RUldaVlhV
-U05OV1hSTVNVU1BNUlhUUVFLSVBOSktPUVVbXVhaXFZZXVxcYHGLmpmPh3dmYmyD
-nK64ubq8ubWxtrq3trq5ur6+vri2tbq7uri6ure5sKSFY1VST09OSD88PDo5Ozo7
-Ozo4Ojg4Ozk7Oz87Oz0+Pzw8OzlOTEFDQUJHRURCPj9BPzo3OT5APDc3NDQ4PUZG
-PDUyNDMzMzU1OTs6OTxAQ05OSkpJUFNTXFhXWVlWVFdXWVlTUFNSUVFUT01RUU9N
-SUxOUFJQTEhISktQTkxNT09QUEtIRkZFRElIRUhGREJEREJCRUlAQUBCQ0NCPzw9
-PDs8Ozs+QDk3OTY3NTUzMTY2OTg3ODg3Nzg2ODU1NzczMzY2NTY2Njk6Ozo9P0FB
-Pz1AQ0ZJSkxMTk5QUlNQTU5NSEZCQTxAQT5APzs6OjpAPDk9Pj07ODI1Ojk1NDU2
-ODc8P0JEQ0FERURGQ0RHSUtMSUpMT09RUlNVVlpaXGBfYGJiZWdqa2trbXFydnR1
-cWxpbm1rZmNfXV1bV1ZWV1hYWllVW19paGdoZWJma250foWOk5WXlJeUlZGPjIiK
-i4qLjYyLi4mGhoSIioeJiIeFiImIh4mLioiKjYiHiImJiomMi4eLjIyNjI6Ojo2I
-gXRiVk5LTk1JSktMUVFTVFZXXF9jY2ZscXd3en6BgYSCg4B+fX+DgYCFiIyNjpKW
-mJaVkY+Rj42PkZSTkpCPjYqHgnl6e3p9fXt5dW9sZWBeXVlaWFVRUE9QTU1NTE1N
-TlFLSkxMTE9OTEpOTU1PS0xPTk9PTUxJS01NS0tOSk1LTExLTE5OTUxPUk9LUE1N
-T1BRTk5QTk5MT05QUVFQTk9TT05SUlFSU1FQTk5OUFFZVlJQUFZVVVFQUVJRUlRU
-VFVWVVNUVVRTVVNQUVJUV1hWVFZSUlFUV1dYV1taV1ZWWFtaW1dXVVhaWVlYYFVV
-WVlWVFZZWVdTVFVVV1dUVVdWVlVXVFVWVVVTVVZVVlZYVVlYWVdVWFlXWVlXW15f
-XFxbXV5eXVxcXVxbX2JkY2FgYGBfYF1fYl9dXFxbWVlZW1tdXVpdW2CQvsnT2+Dk
-5ubn6enqeXh2d3x5d3h2e3l6eXl3fXh5enx+fnt3d3p5eXd6d3p4dnd6eXhwbmhb
-UkdJSUpLSURERUM9OkBBP0RIRkNDRkhKSUZFRERFRUVJR0hKSkdIR0xKS0tKSEtN
-T1FSUU9PTk1NUE9OS05NT09RU1JPTk9OTExPTk1LTU1PUVJTUlFRT1FSTUxOTE5R
-T09JS01RUkxNSk9OVV1TUlBRU1RYXVxgXmBhWldbWVddWFpXVV9UVVJMTE9JTE5U
-TklMSUNFRkxPUU1ES0tMTkpRTlNSTVBLVVdQWllXYlZRUk1STlFZWWBYV15XWlth
-W1xZU1dRUFZfYmZcX1lbXVhjZWFaYGJcWFpcYGVbUVNgZmZiXlNNVFleW1tWUEI8
-OjY/Q0ZHRk5QTk5TUlZYV1dXV1hVVlNTVlhXWF5cXVxeWlVUXFlUVl5ZVU9VV1NZ
-Xl5fW15cWldUT09MSENMUVNUWVhVVlRNTlFRUE9PU1dTUVBVVFFQUEhHSUlLSUpU
-WlhYWF5YVFZfW1lfa4WXm5mOgHBiYnCIo7C6ubq5trm8vbi5uLm9ur24tbm7ubu6
-uby8uLOpiWNWUU5RUlBNSj86NjU3Ojs6Ozs4ODY2NDg4Ozk4OjxBPj47PT1LR0JC
-QkZIR0RCQT06OTc/REI9OTQ1ODM3PkI5MzM0NjUyMzY4Njg2O0FESkxGR0dOUVlh
-WWFeWlxUUVRfXU9RUVJRUlFPTlRSTU1KS01NTU1KSUZRS05OS0pMTExIR0dIRkZJ
-RUhLSEVFQ0VJR0VFPz9DRkZCP0BAQD0+PD48PT86NzY5NzY4Nzc5NTg3NzU1NTU0
-Nzk2MzU1NDg4Nzk0NTY5ODg5Oj0/RUFDQkNGSEtNTkxLTU1NTk1TU1RUU1FPTEpT
-S0ZGR0ZEQUNAPDo7Pjw6OTg4ODg2NzMyNTs+P0FDREZDREpISEdJTU1LTUxPUlFT
-U1VWWVlcWVlcXGBkZmppbW1tb3FxcHJubGxuaGVmY2FdWldTU1RSUlNUVFNYWFxf
-X2BgXV1dYWp0foeKkJOTlZaQj42KhYaHiIeLjYyLi4eHh4eIh4eGhYWHiYmNjIiI
-iIeJh4iGhoiKi4iIiImLiIuKjY+LjIZ8a1pOTk5LTExKSktNTlNVVVtdX2Roa3N2
-eHp9fX58fHx6fn57fX+ChYeJi4+VlpaYlZSTjImKj5CSkY+Qi4iHgXx6e3l7fHt8
-e3dvaWNgXlZTU1RRU1FRVU9NTkpLTE9OTE5MSktOTk1LTUxNTUxMTE5NTktLTE5N
-S0tMTE9RT0tNT01OTE9OS01RUU9OTU9NTVBPTU5NTlBQUlBQUFBRT09PUFBTUlFQ
-T1FQUE1MT1NTWVNRU1VTUU5QU1RSUlVVVVZWVVRTU1VVVVJTU1RUVFZUWFdUVlNV
-X1NSVFRVVFdXVVdYV1dYV1pXV1hZVlhaWllaV1lXVlZSUlNUVVVVVFVXVlVZVlhW
-VldWU1FVVVhaWFlXVldVWVxbV1pfXV1fW1pbXV5aWlxbW19fYWBiYmNgZFtdX15d
-YF9fWlpaWFpcXF5bWFZdZI+/ytPb3+Pl5+np6up3ent6eHh6eXl5ent8enV3dnd5
-e3t3eXd4enx7eXh4eXl6enl5eXZvZlVMTEhKR0RGREBARENGQEBAQUlJS0dIRkVH
-REZGQ0RFSklMTUpISElKSkpMTEtKSktPT1FPUExOTlBPTE9QT05MS0xPUE5MTExM
-TkxOTExLTU5NUU9PUUxQUUxPSkxJSkpMUEtLTU5PUk9LTlFUWVJTUk5WUFJcYV9e
-YGFYXFpdWFhVWVZYXVpbU09QS0xUUk5NT09LREtNTU5LS0dRU01LR1FOT1FNUExL
-U09YV1BfVVRSTFNQVFpZYldVWlhdXV1aWFpTV1NQVF1gYl1iXWBaXGVnZmFiXF5Z
-Wl5gX1lTVV5kYl5aUE9VXV1cWVlVR0A6Ojs8Oz9CTFFUUlRVV1dSVFdWWlBTXVtY
-V1dcW1xeW11WU1RbV1JWW1lST1JaV1tiXl5ZWFlZVlZQUE1KU1FUVlVXVVNVUU9N
-Tk1MUVVVVVVSUlZUT01MSEpNSU1NTVRdV1NWWVVWWFlXVVxidIiZn5WMd2NfaHqP
-oLC7uri3ubu6t728uru5uLu9vr27urq5u7u3rpFpV1BQUVBTVFVIQDk1Ojg5OTk5
-Ozk3ODQzNTY8Ozc5O0FAPEFHQUZRRkVCP0NCPj89QD09Pj9EREA5NzI1Nzc4PDUy
-NjUyNDIyMjU3OUBFSkNKTkxSTVBSV19ZXltUUk9YXV5dVlZTUFFTVFdWU1BNTVFQ
-UFFOT05JSE1QUUxIR0dHSUhJSEZHSEZHSUhHRkVFRUNFS0ZFQkBBPz9BQT0/QT9A
-P0FDPTs5ODg5Ojo4NzY6OTc1NzY0Njo4Njc4ODY0NTY1NjM2OTc1ODk8Pz8/QkZF
-RERFR0xNUVJRT05MR0tNUVdbXFxZWVtbW1hSTUhDPz03Njc7OTg4OTk4NjY1NDM1
-ODo7PT9CQkZFRUhHSUtOTElPUE1QU1ZYV1laW1pbWlxeXWBkaGxpa21vbW1ub29p
-a2poYmFeWllbU09KSUpKS0xNTVFXW15fXlpXU1NVW2JtdX2CiY6PkI2Ki4eGhoaH
-iImKioqKi4eIhoSEhYiHiIaHioiHg4KIiYiIiYqIioqMiYqKh4yKiIiMi4yFfnJg
-UU9RT01NSktMSk1PVVdZXGFiZ2lvc3l4dnZ4fHt7eHRzd3p4fX+EhomOk5aYlpSW
-kY2Mi4uMjo+QkY6MhoN7eXt3dXV2eHx6cmlgXFpUUlNRUU5PUlFQUUtSS05PTE5N
-TVBSUExOUU9NS05LSk5OT0xNTUtLTUxKS0tLT1JRT05RUFBPUVFRTkxPT09QTlFO
-Tk1OTU9SUVNRT1BST1BQUFFPUVRTVFNSUVBOTlFRU1NUUlJSUk9PUlRSVFJTU1NS
-VFVVVVNTVlNVV1RUUVNZVllYVlJTVFFSU1dXWFVXVFVXVldWVlhZWVpXVlVaWlhY
-WFhVVVhXVldRUlRUUlVVV1ZWV1VUVllWV1ZWVlJRUlZXWllcWVhYWV1cWlhWWVlZ
-WFlaWVleXWBcXGFfYF5lX2BcVFVfYF1gX15eXVpaXV5dXVtbV1tjkL7L1Nvf4+bm
-6Ojq6nl3fHt7eXV3e3l5eHd2eHZ6eHl5eHp3eXx7eXd4dnd2eXx7fXt4dm5fUUpI
-R0hIRkU/P0JER0VFQ0JCRkdIRERCREZHRklISUlIRUhJSElJSUlJR0VJRkhMS01L
-TE5OTEtPTk1OTlBNSkxPT09MS0lITU9OTEtJS01LUVFQUUxKTlNPUE1MTEZHSkxL
-TE9OTk1MTEtMTlBWU05OTFNVV2JjYGJjY1lbXVxZWFhYWVxdYV5QUlJOUVFSWFFS
-UExHRUlLSkhNSk1RSUZDVE5KTUtSVE1YVVVUUFlUWFRPV1RcYVljXlRZVVxbWl9X
-W1hbV1VWWVpbXWFcXldaXWFhYGFdWFVWW1tgXlpZW19gXmJYVlRWW1taW1ZNRjw4
-OT0/PUBMVVRVVVRVU1BVWlxbVFhfW1ZYXV5XXF9cWlJRT1FRU1ZcWFJRVVZYVl5c
-WFVWVVRPTEtOTVRQTlNaW1VYWVVTUE1NS05QUVNSUVNXV1BNT1BPTU1LT1FST1VW
-UlFTVVldW1pXWV1oeImXnJaEcGNmbHmTpLCxtLm6uby/vby5uLi5uLe1uLa3t769
-uq6VcFpSVFBRUlJUVFBFPD46OTk2OTc7ODk4OTg3NjY5Oz04PUI+Pz9AQ0VGQkFA
-REdCOzw+P0A+QkZGQjg4NjY3Ozs/Ojk0MDItMTQ0Njc4OkFCRU5RT1FMVFFXV1lg
-XllTW1pZYFhTUlBPUFNXWVZTVlRWUU5MUFJQTUlMTEtMS0lKSkdJS0xLRkVFR0ND
-RkRCREFAQUNFR0lDQUJGREBBQkNCQkBCPT09ODk5ODo6ODQ2NTU0MzUzNTQ1Nzg3
-MzU0NDU1Nzg3Ojo2NTY2Nzo7PkBCRERGSUpJT1JSUVNST0xHQkVIS1BRVFZWVVZW
-VExDPTg4OTc0OTY0Njg2Mzg4NTQ3PDc1Nzo8QEJCQkNFRkhHRkdNTUtRT01QVFZY
-WFxcX15dXmFfYWFjZ2ppamloaWpsb2plYmFhX19bVVBLSUVCQ0RBP0BJTlJVVVdV
-VE9HR0ZPVFhhbnh+g4aHh4OChYOAfoeLioqLiYuLiIeIhYeJiomIhoeIhYiHiIiJ
-i42Hh4qIi4eIiYqKjIyKjY2MjIh8a1tNTUxOUEtGSk1MTE9UV1daYGNnam90dXJy
-c3J2dXl4dnV0dXV8fH+Gio+PlJeXlpaUjIuLj46Pi4yMjIaDfnl3e3h6fH99dXFo
-Y1pVVlNTU1NQUFBPUE9OT0pKSEpNTE1LT1FPUUxNT05OTlBOTE5NTU1MS05OTE1N
-Tk9OTk5OT01NTUxOT09QTkxOUVRRUExNTExNT1JTUVJST1JTUlJRUVFRU1RTUk9P
-UFNTUE5OUlJVU1RTVFhWVlRSVlRVV1NTVFZXV1RUU1RWVFNTUlRTVlRVUlNTUlNV
-U1RYV1RUVVZUV1hXVVRXV1hYWl9aVlZYV1VUV1hXVVlUUlRWVVdWVFJVV1ZSVVdW
-VVZUVFNRT1FUWFdXVlRXWVhZWllbWV1dXV1dWltdW11dXVtdXWJhXlpWVllgYV5d
-XV1cWllbXltaV1hXXGOUwMvU2t/i5efp6errgX18eXx6d3p5d3l5eHh6fnp5e3x6
-e3x8e3l7d3p2d3x9e3l5enl2alpORUdESEpCP0BHRENFRkhHQkA/QUFFQ0VERkhG
-RktGRUhLQ0NDRUlLSEtJSktQT01NSktJTFBQTlBSUVNRUU5LS0tLTUtMS0tNTk5N
-S01JTU5RUlNSTUxNUE1QT01KR0dLS09PUktLSktOTE5QTVJRTVFNUVNYY19fYmFm
-XFpaYFxZVVxeY19fWlNST01QUVxbW1hQTEhKRkpOSU1GS09ISElSUUtOSVRXT1dV
-UlFMU1FUVFJbX1tXVF9hVlxZXF1dYVdZV1laWVNUXFlfYVpYWFZXWVthYVxTUVhb
-YF9YVltcX2JeZF5UU1leXWBeW1pOQTs7PD05QFBdW1tZU1BQUFVbXWBWXF9XWl9d
-Xl5dX1xYWFFMT1FQV1hUVldaWFdWW1NTU1BSS05SVU5PVVJRWV5cV1teVFRSTU5T
-UU5QT0tMU1RQTFBOTU5SU1JQUVNUTFFTUVJVV1pgV1VZXGFtgZOenpKAcWpqdYWa
-qbS4ub66tbu3u7m7uLa7ubi6uLu5uLWxm3hiW1pXU09PUFFPST89OTg4Njg4ODc4
-Ojk6OTo3Nzc4Ojo9Pzw6QUJDQ0E7PD9DRT0/Pz88PEFCRkZBREA9Ozg4ODk4Nzg0
-NjY2Nzc3OTw8QEZKT01QVFJWUFZVV1xYW1hbWVhZU1FPT1FPU1dcU1FRVFNVT05L
-TU1LSEpLTU5KR0lNTUtLS0tGSUlGRkNCRUJAQkJBQkVHR0ZFRkVFRURBPj5EQEE+
-Ojk9ODg8OTg3Nzk6NDQ0MzMyNTQzMzU0MjE3NjU1NjY2NDI2NDQ3Ojw6QUJDSUxP
-TUxPT1JUU1ZRTUlGQ0FDRENGSERAQkE9Ojc4NT45NTU2NTg4ODY6NjY3OTg4ODY2
-NTs9QUJGRkdJSElLR0hMTU5OT05SVVdXWVpaWlxeYmFgX2JnZ2hqaGlpaGpmZWNi
-Xl9bWVRPTElGQj05PT09PD1ESUtMSUpIREA8PUJHTldeaHJ4foGCfnp/f4KDgYSE
-hoeHiIuJhYiLiYmHhYeEhoiHhYiIh4qKiIqIhoeHiYiJioiGiYuPjoqIhXZoVEpL
-TExQTFBLSUxNTE9WW1xdYmZrbXFxcHJ0c3R3enx8dnd3en2ChIiLjJCRlJKTko6J
-h4mKjIiFiIuMi4R5cnV8fX17d3hxZ2JhWlhVV1VTUVBRTk9PUk5PT01NTk5MS09M
-TU1OTktMUE9OT1BOTE9PTE5RTk5NTk1OTExOTE5MSk5QTk9RT1FRT0tNUVJSU1FP
-Tk1NTlBRU1NSUFBOT1FPT09NT1JRUlFOUlBRUFNRUlJTUVRWVVRUVFBUVVVVVldX
-WFRVV1hXVFNUVVNRUVJSV1VYUlNUU1dTVlZVWFZWVlVUV1paVlRVVVZZXlZUU1dV
-WFdUW1lZVVNUVFZUU1JUUVNSU1RSU1FRU1FVV1VUU1VUV1hYW1dXV1ldW1tWWVxb
-XlpbW1laWVxcXl9iXltcXV9dXl9gXl1fXFtbWVhVWFZWV11bYpW+y9Xb3+Lk5+np
-6et5eHl6en18en+BenZ4fH5+enx8fXt5eXl6d3l2dnp4d3x6eXt3cmxiVElGRkU/
-Q0JCSUtFQkRCRkZDQj5AREZEQUZGSEZHREpLTEhFRERFSktMTU9MSktKS0pJTk5M
-TUxOTk9RUk5NTElMUlNNUUhKTU9PT1BMS0pLTE9SUE9MTEpOTUtNSElFQ0hNVE5R
-UlBRU1BKS05OVFJSU1NWUldjX1laWGFZVVpfWVNWWFxgW1tVUE5NUVNaXlhjXVNQ
-SkhITE9JS0tRS0VDRU1UUlRMVVNPV1RRVFBWUU5QT2BdU1ZUW2NWW11dXllfVlZV
-WVxSUldZWmBdWVlaWVlaXGFkXVJTW19fYF9ZWFteXV9fXVVSVFhVWVpgW1FGPjo4
-NzlATltdV1NTTlFSXFlaWlteW1haY2JfWVdeXVhUUVNUU1BRUlNTVFldVVZXVVtU
-UlFRVFpbU1NST1RZXFpXWmBZVVZQT1JXVFZQSUxRUVJSTkxLUFJVUk9RUlBSTVRa
-WFNXWl5YVVpVWWN3jpyenpKBdXd0c4aYqbO5ury5ube1tLe7ury7uLm3uri4saGE
-bmlraWJbVU9MSEZIREE/Ozk5ODc2OTc3Nj0+ODY5Nzk5OTxCPjw9P0dFQjw5PEVH
-Q0NCPTw7Pj5CRkNBPD06ODc1OTk4NDM1OjU1ODk6NzpAQklPTFFQTldaV1FXX1xY
-WVtUVllVT09XVlJPVFZSVFVUWltXUUtMS0xNTU1ST0xJR01RTExJSU5JSUZHSEhF
-RURDQ0dBRENBQ0VDREpHQz4/QD8+QT47PUE+OTc3Njg8Nzs3Njc3NjY1MjIzNDY2
-NjY3NjQzNjQ0NDQyNDY4Ojw+Q0ZHSEtNTk9NTVFVUlJQTUpHRUJAPDk8PkNFQjs8
-ODk4OTg2OTk7Ozo4NTk3OTo3NzY2Njc7PUFDQ0RFRkVGTEZISUxLTU5PUFFUVVZY
-WFhfYV5iY2NkY2dnZ2lrbW5saWlnYl5aVlJKSEZDQUFBPTs8PDtAPj5AQURHRURD
-Qz49PDtASFFXYGtydHZ1dXh6fH5/hYSJiIiKh4iHh4aHhoWGh4iHhoaGhoeHiIqK
-iouIiIuNi4eKhIeFiomOioaBdF9RS0lKS0tJTkxMTE9PTVJZX2Bmam9wcnN1c3Rv
-dHt8fn+Cf4F/gIaJiYeLkY6TkI+PjImIhoSEhoaGh42IgHx7fX98eXdzbGplZGRh
-XlxaWFVUUlBSTk1PUEtMUE9PUE9LS01OUk1MSkxNTk9OTU9OS05NS01PT05LSktP
-S05OTk5JTE1PTU1OUVBPTlNPS09MTE1OUE5NTk5RT05RUE1NTlFQUE9PUVNRUU9Q
-U1JRU1FUU1NRUFNTVFNTVFVTVFRXWlRVVFVWU1RUU1NUVFZVU1BUWFRTUlRWU1RU
-UVVWVVRTU1NUVlZXWVdVWVlYWFdXV1ZVV1ZYWFlXV1ZUVVRWWFNVUlJQUVFTVFVU
-VlZTV1haVVRXWFlXVldZWFlXWlhWV1VYXV5ZWl1cXFtcXmBgX15eXl5gX19dXl5e
-XFxaWlxaV1haXF5llL/M09rf4uTn5+np63l7fnp7e3p6fH59fH59fHp6e3l5eHp8
-eXt6dnN1eXh8enx8e3VzZltOS05KSURFSEZIRktIRERER0ZESUhJRUZBQEVGREVM
-SEdKSkZFR0xMTk5PTktLUk1NUFZRUVNNTExKS0xQUk5LTExRUVBNSktLUFBPTEpI
-TU5OUExLSktMTlBQTElHRENFTE9NTVJOT1lWUEhHR0lNTVRWUFBQVV1aVldXX1pV
-WFdbUldcXFlXXFVTUVBUUlpaW2dcVFRLS0pOUktMUE5JRkhIS1NRUk1OTUxVU05V
-T1hXTlNNW1lTVlVcZVVdXVxiWWFcWFZZXVFQVV9aYF5bWmBfXFxcYGVZUldeYF9b
-WllaX1xgXl9fVlVSVVpfXlpZUEhCPzw5OkBKUlZQUFFRUlVXWVpeXFxcWV1eXl9Y
-X2FdWVRSVlRQUlBSU1JUVVhVV15fXFVUVVNVXWBcVk1NT1RXV1hXVl1bU1FPTlRS
-VVNPTUtMUVNMSFJSS01OT1BNS0pLUVlXVFdTU1lcWVlWWWqDlZ+hm5CKhHhzcoCU
-p7K3ubi7u7m5ury5uLe4uba1t7WplIB8gYKBenFjV0xMRkNCPjs6NzU3ODg2OTg3
-OjY1Njc6PDw7PT08PD0+REBEQUNGREJCQT5CPzw5PT9DSENDPTk9OTc6OT86NjU2
-Ozo3ODs6PDs+QUdLS0ZLV1RWVltgWl5XW1hZV1ZSU1VRTk5XWFdXWFlaWFZPTUxL
-S1JPUE1MTEpMTE1ISEhJSUlHSUlJSUVDRUdERkdCQUBBRURDQ0JEQkJBQEQ9PTw8
-OTk3OTo2OTc0NTU2Njk5OTQ4NTQzMDY0NTQ2NDU2NTQ3NDQ3Njc5O0BAQ0VKSktL
-S09OTUtMSkhJSEVDQDw7PTg2OTk6PDw8NzU4Ojo4OTk4Njk3Nzg7OjszNDQ4Nzg7
-QUNGRURHS0tKSkpITElLUFFVVVRVVFdcXFxaXGBoaGZjY2ZoZ2draWhkX1tXVVBK
-RkZCQENJRERGRkBBQkNBRUVKSUtLSkxMTEpDRENFSU5VX2JnbG5vc3N3dnt/gIOH
-hoaFhYaFhoeEhISDhIGCgoWFhYSFhoqLiIeGiYuLh4qMiIeKhYaKg3poV0lHRklH
-R0xJTVFOTVFSVVtfYmVobnR4eHt6eHl5foGBgIWFh4SBhIaJiYuMj5KRjYmGhoiF
-g4SCgoKEhoWCf358eXh5eG9lYWJdW1xaXF1cWlVRT01NTU9ST0tOU1JQTk5MTU9N
-T05QUE1RT1BNTk1LTk9NTU1LTlBOTk5QT1FPUVJMS0tOTlJNTkxPTk1OT1FRU1FP
-T09PUVFQT1BRT1JRUVJTUFJPUlJSUlFTUlRTUlNUVFNTUVJRU1FUVVZTVlVUVFJU
-VFJTU1RTVFVWVVZWVVNaVVVUVFRUV1ZVUVFUUlVXVlRUVVZZVldXWVlWV1RVWFlZ
-VlVWV1dWVlRUU1VUU1NST1RUVlhXVFdXVVNWVldWVVRWV1ZXU1VUVltZWFdaWVhb
-W11eYF5eXl1gYltdX2FhX19hYV1cXF5bW11bW1pZW1pdXWSUvsvU2t7i5efn6erq
-eHp5d3Z1eXx8eXx/f357enx6eHp8eHh3eHZ4eXh4fXt9fHh4dHRnW1FFSUhLR0ZL
-SUJERURDRERFR0VKRkM/REhGSEVFSkpJSEhHRkRITU5KSkdKTk1OT05LR01QTU1L
-TlBPT1FRTkxMS01NS0xNTU5OT01LTU9JSk5OTk1PT01LTlJNS0dKSElOTk1LUk9R
-WlhQRkdJR05NTlBOUVJTV1pZX15fXFNTWFlTWFpXVV1iV1JQUVBQV1VgZlpZUlBO
-Tk5QSEpJSE9HRUdKS0hPS0ZLSlFUUFVOUlVQU1NWVU9VVl1iUVNYWGNZYVhXWVdk
-VVFUX2BjW1lZXFlYWmBdW1pVU1ddWFlbV1hcW15gXVdWVFBVV1xbWV1QSkpDPDw8
-PD5HTVFOUFNSWlpaWlxbXF1aWFpeY11cXVdaVFRTWVdWV1RPTk9UVVZWW1xUV1ZU
-VlpdYl9TTExMVFZYVlZYWVlTT09OUFJYUFFMS1BRVFBQUFBLTVFPS0dIR0pXXVpX
-T1NVVlxbVVRYZXeHmaWkm5KLgnZucYOVorC2ubi3tre6u7u+u7q4t7a0rJyRj4+U
-mZiRinZkVk1HQ0E/QDk2OTo2ODg5ODY2Nzg4Ojs3ODg4NjY3OD06PD9EQTs8Ojs7
-Oj0+Ojg4PkRIQj06ODk4Ojc2Nzc3NDQ1Nzc5Ojk5Pj1DR0hKR05aVV9bWWBhY1lb
-WlxZVVJPUlFQUFRXVlNVWl1YU1NPSkpLT1BQT01PT01JR0ZERUZIRkVGRkVBQ0RD
-Q0dHRkVDPjw/QUFAQT0/Pj1BQD88Ojo4Njs6OjYzNDI3NjU2NzY1NDI2MzI4NTQz
-NTI3Nzc2NTc1NTc2NDY7PT9AQUJERUhLTk9PTUtGSEdGQj87Pzw7OTs4ODs6OTY2
-Nzc5NzM2NTU1Nzk3ODs5ODg7Ojg2NjY7PENFRERJSEpLTE1NTlBQUFBWVFhXVFlX
-WFtdYGJiZGVkZF5kZ2ZnZGRbUk1HQ0RDQ0VIQ0RERUlOT09QTUxPT1BUVllZWl1e
-WFZVU05RS0lOVVZbX2NnbnByd3l7f4OChYmLiIaEh4aEgIN+goWEhoaHhoaFhYaI
-jIyKiYiFiImIiIWDh4V9b19QSUdGSUdIS01NT01OUFBTVlpeZGhvdXt6e3t7e3+A
-goOFh4iHi4mGhoqNjY+SkY6Ff35/gYSFg4J+f4SDg4J8enV1c3NuaGBbXFtXVFRW
-WFdYWFRRUE9OTk5RUU9PT01PUE5PUFBNTlJNT05PTk5OUFBNTExOTE5MTExOTU1O
-UE5LTk9MS0pNT01NT01MTk5OUFJPUVFTT1BOTlBPUFFRUFFUUlNRUlNRUFFST09T
-UFJUU1FTVFNQVFdSUlJTU1NSU1JTVFJSWFVVU1NPUVRXVFddVFFUV1VUVVVVVFVU
-U1RWVVdZVlZVVVdWVVpbWVlVWFVWVVdbV1dWVlRZV1VVVVdVVVdWVVZXVlJTU1VX
-VlVUVVZTVVdVVVNXWFhWWVtaWF1aWVlYWFxfXWBgYWBhXV1cXF5fYl9eW1haXF5e
-XltaV1dYWVhZZpS+ytPZ3uLk5ufo6+p7fHl2dHh3eHh3eXh6fH1/fnh3d3Z3dnl6
-e3l7e3p7enx8eHh1bmFUT0pGSEZJSEpIRElKSUNDRUVISEdHR0dHR0hGR0pHREdH
-SkdFSElNSklGSUtPT05KSkxKT09MTk9NS1BPTUxLSktLSkpKT1BPT05RUE9NTktM
-S1BMTlNWVFBRUU5KS0pKTUxMUFBUUExNTEhHS1BQVFJLT05SVVNaW1pgXGBYU1RY
-XFhaW1dTV1pcV1FRUFNSTllWVltUUU9NS09MSUZGSUdGSk1KSEpJRElQWVpPVUxN
-T0tQT1RWUFVaWl5SU1NaX1ZeWVhcWmZZV1ZbXF5ZW1taXFlZWVtaVlRUVlRUV1xd
-W1daWWBiX1hSUlpUVllWV1ROTk5DOzg5OkBESk1TUVVZWltaW1pcXldUW15eW1lS
-VlhVVVRaW2BjV1BUUlBUVFJUUlRWV1ldX1pVVlRSUkxPUllXWlxaVk5PT1BRVFNP
-TkxLT1BMSEtNTEpJUE9LR0ZIUFlaXFhSUlZWWVpXVlpnbnWKnKWglI6Ignl2d4SV
-q7W2tre4urm9vbq5u7y4ta6hmp2hpamlpaOajXdiUUdFREREPjk6OTtBOzo2Njo5
-PDc4ODg2NDU2NzU5Ozo6OTc4OTg5Ozs4ODs7PTc9Q0VFQz06OTc2NDc2PD45Nzc3
-ODY4ODxAQEBJRklLU1JWX1VfYFpeXWBdX1dSUlBSU1NTVVhUUlNZV1pYVU9OSkpL
-Tk1OUVNSUUlJSUZEQ0VGQ0dFQ0FBQ0ZGRklIR0ZFQ0I/Q0RDQD09QUI9PD06ODo4
-ODo4NjY1NzY2NTU0NTM1ODUyMTU0Njg3ODg2NjY3NTc7ODU1OT0+Pz9CRkZJTEtL
-T1FNS0xMSEVEQkA8PTw8Ojk3Nzg0Ojs5OTY4OTcyMjU1Nzc1OTc4NDY5Nzc2OTc6
-Q0dGR0dKSUtMTktLUFFTU1ZVVFZXWVpaW1teXmJkZ2RkZWZkZWFdW1BJRUVDQj9D
-RkhGSE9PUFVWWF1iYl5fYWNkZ21vbW9tZmViYFtVUUtLTU5RWF9na21ydXh9g4WE
-hIeJhoeFhIOFho2IhoeHg4SDh4eEh4uKiIeIioiHiYiJioeGfnRpV0xJRkZFR0hL
-T1BTVlFPU1JQVltgZGdweHt8e31+foKFhYWCgYGEiYiIi4yNkJKPiIWCfX59fYB9
-foGAgX5+e3p3dHJuamReWVtaWVZPUE9SUlRTU1ZUU1FPUFJRUk5NTVNSUVFSTk1N
-T05NS0pNUE9OTUxPTk1QT0xOTE5PTU1MTkpLS0xPTkxMTU5QUFJSTlBSUFFRUlFO
-Tk9PTVBQUVNSUlJTVFBRUFFRUU9ST09TUFJUUlNTUlRVVFZSUVFTUVBSU1JRU1RT
-UlVVU1FRU1RUUlZWU1NTVVNUU1NSUldUU1hTUlNVVFdWVlZXV11YWFdbWFVWWFlb
-WVZSU1RUV1VUWFZWWFdTUlBPUVdZVVZZWlpXWVlYWVpYW1hXXV5dWVhYWVhYXFxf
-X15eXl5dX2RlYmBeXWBlZmJfW1tbWllbWlxZWFdYWltjk7/L1Nre4uTm5+np6nx6
-fHh4dXV5enl7e3l9fH16d3d5enp7enx+f3x8fXx6eHp3d3NpW1BNS05OS0hFSUhC
-REtJSEVGSUlKSEtHR0VFREZERkdGRklGSEhKSkhGSktKTE5TT01LTU1NUE9SUVNQ
-TU1OTUtNTElKTE1NS01MTE5QT01PTU1NT1FSVFZWUlNSUE9KTVBOTEpQT1RQSUpI
-TU1PVVBVV09VTlJWVllbWGNgXFdNVVdZXmFjX1tXWltWUlJPV1RRVVFXVk9PSk1L
-UVBISEZJREhRSktGSElDTE5NUk5RREtSTFBQUVZPWl9dYldSVVNcVVxYWF1XYl9Z
-VFJUWFtaWFhbVldVWVdUVVJVWFRTVlpZW1xdYWdeWlhWW1hbVVZUUE9TVkpAODg6
-PT9FTVJUU1BZXltcWVlWU1dgXFxYUlJZVlZTUFhfXl5UV1VVVFFVVVhbW1dUVlhb
-VVZVWVFRTEtYW1ZZW1lTTU9TVlVUUlNST09QTktMTk9MTE9MSk5NS09SUl1ZUlRU
-VVRXVVZVV15lY3ONoKKYi5KTjYR7fIWSpbS3uLi6u72/vry9vbatpKGfqK+vrrGt
-qqWZhm5VS0dHSUpAOTo3Njc4Ojo6Ozw5NjY1ODg3NTc3OTY4OTg6OjY6ODg5OTo5
-ODs6Pzk8Q0RDOzc2NzY1NDY3OTk2NDA0Njc9Pj89PUZGRkhMTVFVVV5dWVtaX15f
-V1FPUFBTUFFTVVRSUlVUV1pXUU9RUVFMSktPU0xMSElITUhIQkFBQENFREVFRUVG
-RkhFQkNDRERFRURCPkA+QEE+Pz8+ODs8Ojs4OTlCODc3NDUyNDY0NTM0NTY4Nzc5
-NzY2NTczNjU0MzM2O0BAPUFFR0dISEdHSkxIR0hFRkVDPz1APj0+PDc2ODo5ODg4
-Ojg4NTg7OTk4Ojs5OTk2NTU1NDY3OTxBQ0dJSUpNT01KT05SUE5OUlFQVFZaWlpc
-XWFhYmVnZWdpaGViYV1VUElHR0hKTktLTU9PU1dZYGNqbW5xc3JwbnJ7gIeIhYR8
-d3RybGNfWVJOSEpNUltjamtydnx/goaGg4OEhIWDhYKEhYqKh4eFhYKFhYWEhYSG
-hoqJiISDh4eHiIR9cGFUTkpMSUpMTk1OT1BTUFBTU1hUVlpeZmxzfH58gIB/gIOF
-g4GBhYWIioyKjY2QkJCJhIB8e3x9fXx8fnx7f3t6eXNvbWVeWVRWW1pWVlNSUVBQ
-VVFUVFJSU1JSUVJTUU9RTlBPUE9QUExNS0tMTEpLTU1KSkxPTUxNT1BPTE9OTUxN
-TUtOUFBRT01QUE9PU1BPTlFRUVRSUVBOUVBNUlRSUFFPT1FRUFFUT1BOT1BRTk9S
-UFFSU1hVUlJTU1RSUE5ST1BRVFVSUlJSVVVSW1VRUlJXV1dWVFJRT1JWVlZWVVlb
-VlVTUlRXV1hXVlZXV1lZWVpXWFlXWFpcWVZXVVZUV1VUVlVWVVFOT1JUVlRWV1dY
-WFpZWltXWFlWVVZZXFxcXVlaXFlYXF9gYWNgXl1gYmFjY2FdW11bWlpcW1paXFxe
-Xl1ZWFxbWl+Pv8vT2t/i5eXn6OnqfHt6d3d4eHh5d3p7eHt/e3p5eXx8eHV3d3p7
-fXh5ent7fHp4cmVXTklCRUxKSUZDREZKS0tJRkZGSkpISUVFREdERUZGR0dISEpL
-SktMSUtKTk9RTE5MS01OTUtMUFBRUE9JSUhLSkdJTEhMTExLSUdKS01PTE1KSUpS
-UE9SUlNQT1FSVE5OT05OTVBRU1BNTkxOT01TUF1dUVJNUVxWXmBaXVhWVlBTV1lf
-YmpnYFlbWFVTVlRXUFRVT1ZRTU9IS0hJTUtIQ0ZER01MTUpHSUVKSUdMSlNRS1FI
-UFBQVVBZW1VeV1RWU15XXF5ZWVJcXllTUFVYX1lbW1xZXFpYUVBTVFVZXVhVV1xb
-WVpfZ15ZWVZVWF5eWFVUVVRWTEQ6Ojk8PUFGT09QUVNaXFpXXFpZXF1bV1RVWVZV
-VFFTV1teWVFWVVNUVllaW1tYU09TVlVZWllYWFFPUVRTUFNcXVdRTlVYVFFTWFRS
-UVBLTFFOUVJOUVFMUlRSVlpdXFpTUlVOUFlWVldXWF1eY32WoZ6RjpSWkYqCf4aS
-orC1ubm6vMG+u7i3tq+nq6qpr62vsrOwp56MdGNVTkpLSD86ODg5ODk7Ozs9PT86
-ODU5PT49ODg3OTtBQz0+OTo7P0JCPDU3Nzg5PEJDTEU9NTM0NjY2NDQ2NjQyNjQ0
-Ojs6OzxBRkpJRk5QUlVQXFdUV1hdWltUTkxTT05QUVNYU1JVU1FUW1tTUFRWVE5K
-RUdKSkpGQ0JGRkVEQ0RCQURFRUJFSUZERUpIRkJCRUZFR0VBQz89QT0+PDw7Oj06
-Njc4Nzs4Nzc0MjY1OTc2MzQ3NjU4NDM1Njc3NTU2NTY2ODg5PTw/QkRFR0dFQ0RE
-RkhHREJCQkQ/P0A8Ojg5Ozk6OTk5ODg3Nzg4OTk5NjI1Njg7OTk2MTQ4NTI3PD1B
-SElMS0pHS0xQTUtPT09RU1NUVFZYWV1dYWBiZmlpaWdkZWNcWVJQT09TVFFRUk9P
-UVJTW15hZWdtcnFzdXJxcnqEiY6RkI6Hf3hzb2xsZGBbVlNTVV5jaWtxd3t/gYKB
-gIKCgYOHhIWJh4WEhYeFhYOHhIiGg4WEg4SFhIWHi4qIhH5uWk9MS0pKS1FRT0xL
-Tk9VVVRVVFZYWlxgaXJ5fX57en5/gYSDgoOHh4eKkJCOjIqKh4SAenh7e3h7eHh4
-d3d4dHFvb2xnYFpZVVNXWVRSUlBQUk9RVlNQUVBQUFBOUFJTUlBRT05NT05OT01R
-UE1LTEpKTU5OTU5QT1BQUFBNTU5OUExNUE1MT0xLTk5PUE5OTk1NTk1QVFFSU1FQ
-Uk9MTU9RUE9RUVFUUFdQUVFSUVNPUFFSUVNSUlNWU1NTUVJRUFVTT09SVFNQUFJV
-VVZWUlNRVlhWVFdWVVhXVFZYVlZWV1dZVVJTV1ZYWFdZVldYWVdaWVhXVVRWV1dX
-VlpbWFdVUVNWVFJUUlRUU1RYV1dYWFpUWFdYWVtaWldXVlZZW11dXlxcXl9eX11b
-YF9cXl1fXl9cXV5bXFpbXF1cW1haXV1dXFpaXVtdX4a+y9Pa3uHk5ujo6ut4d3V5
-eXh2dnl7eXl4e3t5eXl6fHh0c3R2eHp7eXh5e3x2dnNtX1VNTEdFR0VCRkVITExI
-RURFSEhERklJSUA8QElJRkZKSkpOS0xLSktOTUtMTk5STU5NTVFPTUxNTU5NTEtM
-TElKS0dJTE1LTUpKR0pNTEtLTExMTU5NTU1LT01NUVFRT1VTTU1NUlFOTUxRS01O
-T1BPXllQTUlNVFNXW1JYVlJUTlRUV1lia2RgXFxeWFZVUVhRVVFRUVFYWExJSUhL
-SEZAREdHTEpJS01PRk1LS05KTUdHTkpPVFJaUVRZUl5dWFxYY19iW1NaVVxfWlhT
-WV9fVFdcYFdXWVZVUldWWlxbWFFVW1xZWlxjW1hXWFNSV1xbWlNQUFNNRz43OTc6
-PEhSUk5RWl1eWFVbX15bWlpZV1dYUk9SV09NV1dWU1RVVlpdXVlbWVRPTlNXVFhX
-VVZXWlZUT09UWVtaVFNPUFJUVFVSVVdUUlRSUlNUV1RUUlVVVFpaVl5aV1NOVFZY
-VlpWVllUWFpeboWVnpeMk5qcmpaLiIqVpK65urq7vLu5trCyrq2usK+rr7O0tbGu
-n4p5aFhUUUxKQjU1ODU6NzQ3ODk+PUI/PTs+P0BEQTw9P0ZIRD4+Pj9DQ0E9Ojw9
-Ozk9QkdLRkA3ODc1NjU1NzY4OTYyMjg3ODs8QUBIR0hFS09TVk9YVldUWWFaW1ZQ
-UFFOTk9PUFdUUlFOTFNbXFtWVVdTT0lER0dGSEdHSEdHR0VFQUFDRUdGSURERUZI
-R0hGRURGQUBCRUZFREVCQT8+PT88Ojg3Nzc2ODY4NjI0NjU3Nzc4MzU2NTMzNDMy
-NzMwNTQ3NzU3Nzo+PUFEQ0NDRURCQEJDR0VDQ0FCQT8/Ojw6PDg4Njg4ODg3Mzg7
-ODg5OTY2NjQ2NTk4OjIyNTY1Njc2Oz5FS0hJSElNTE9PT1BSUVBRUlNUVVdZXGBe
-X2JiYmVnZWRiXlpWV1RTU1RXVVRTUlBSU1NaYF9fZGlvcHBxcXBvdHp+goWJjIWA
-fnx8eXh2cHFqZF9ZWmFqbm90eHt+gYF/gICCgYOHhoSFhoeHh4WDhoeEhYSEg4OG
-h4aFhoeJiYaDeWlXUU1NSUtOT05LT09QUVRXWFVVWFlaXF5ianB6e319f4B/gISC
-h4uLi46QkIuJh4F+fXl4eHt4eHZ2eHp6fHZvaWhoaGReWVlaWFlZU1BUUlJUVVJT
-U09PVFVPTk9RUVNUUlBRT0pMTU1PTk1NTktNTEtMTlBPTU1PTU9NTU5NTk1OUFFP
-T01NT1NQUFBOT0xOTU9OT09QT09QTk1PT0xMT0xNT05RUVFPTk5PUVVVUlFQUFFS
-VFFRUVFSU1FRUVJQUlNUUllYVVdRUlhUVVZSU1FSUlNTVFJSVFdWVlNYWldVV1VW
-V1VVWVZZW1dVVVdWVVdXU1JTUVNXWVdVVFJVVVVWVFRUUlVYVlBSU1VXVFRVVFRX
-VlhZWldXWFlZVVRYWl1bXF1fW15gZGBcXlxaXGBfXlxdXV5fXlxfXlxbXFxaWltb
-WFdZWVlghL3K1Nre4uTn6Onq6Xt8enl6eHh3ent4eHt7e3t5eXh3dHV4dnl7e3l8
-eXd4eXl5cWVZUkxFTEhGRUJCRktJSEJEREZGS0hGR0ZDQ0NERkZMTUlJSUVISElJ
-SEhLSUlKSk9LTU1NTk5OTEpISklMUEtKSklJSEpKSktMSElMTExOTE9NSkpNTExL
-Tk9NSUtPTU9PUlFPTU5UU05RUFJMTUtNVFFUWk9RSk1WVFNYUldTTU9MUVJTWlte
-WVpaXWBZWFJQVU9RTldTVFtUS0tLSkxGRkVES0pMR0RLT09GSFBITEVERkhKR0tK
-TFdRWGBVXlxWYVliW1lWT1VVWmJaXVVbWVtTUVNaVlVZW1hYXFhXWFZUUVNbXl1c
-YGNhX1tXU1NWWmRbVlFPT05HOjk6QD0/RU5PUVNZW1hWVVpfXFxcX11bWVZPUlVU
-UVBSWVdZWltgYGFeWFVXV1FQWWBaWllXUlNYW1ZPTFhaXlRVUVJRTk9SVFJZXFVV
-VVRNUFBUVVBQWltbXV1dV1hVU1NWXFpWWllUV1VcX2Bmc4aZnJeZoqWmpZ6XkpOa
-o664uLu8urq2sbKxsbS1sKuxtbe2s62diHppXFhVTUpCOzk5Ozo1Njk6PD45Pj87
-Ozo9RUVDRT5ESEVFQD07PEI/O0JIRD06ODxBRUhGQDw4Njc1NTM0NjY4Nzk0NTY1
-OTw9QUNAREJKTFVYVFpTVVlYX1tbWFZQT1BNTkxNUE9MTk5PUlpbVVNUVVRRTEdF
-SUtHR0xNSkpFSERCP0FESUhDRENCR0dJRUJCQkRDQ0RCQ0NAPT49PT47PT87OTY1
-Nzk3NzU1Njk3ODY1NDM3MzU2MzU0NDM0NDY4NDU3Nzg7PDxAQEBBQUJGR0VFQ0RE
-RUdGQDw8Oz49ODc3Njs4NDQyNzU1NTY4Ojg3NjY6NjU2NTc0ODk3NjY5ODY3QERJ
-R0lOTkxTUVNRT1BRUFJTU1RXWFpdXl9gYmFjY2JkY2BfXVdWVFRUVVVUVVZYU1ZY
-WVpbX15hZmpra25wb29ram94fn6AgX99f3t8ent5d3ZxbW1mY2twdHd3fH59fX6A
-goKDgYOCg4SEgoSDgoKGhoWGhISDhIWGh4eHhYaHhIB0Y1NOT0tMS0tMS1FQUFFS
-U1ZXV1pWWlteX2BjaG50eXx/gYKFhYaIjZGNjImKjIWDgH59fHl8dnR0eHl2eH2A
-d2thYWRlYl9ZVFlZWlhTUFFRVFRYWFVTUVJSUlJOUlBRT01OT1BQTEtMTE9PT01P
-TExMSkxOTU1NTlFPTk9PTVBRU05MTU1OTE5RUVBOUVBNTlJQUFBOUE1NTE5QT05O
-TUxMTk9QUExRU1JOUFBTUlJTT05QUVBQU09RUlJST1RTUlNRUFFRU1RUU1ZTUVBQ
-VFpRUlNTVFVWVlVUVFNVU1RVVlVVVFZZVVNVVVZWWFhXVVZWVlZXV1ZaV1dYWlhY
-WVVWVVNUUlZTVVNRU1VTUlRVVlNXVVdZXFlXV1ZZWFVYVVlXWlpcXFtZV1tfX1xd
-XFlbXl9hYF9fYmFeXV5bXFtZWFpbWFdcWFlXVl6GvcrU2t7h5Obo6enqeXl6e3h6
-d3t3eHt8eXt6e3h2dXd3fX16e3t6eXp9enp+fHduX1JKSEVERURHR0VDRUlJSkhC
-RkZJR0RFQUNGR0dFRUdISUZFREZKS0tJR0dLTVJQS0tLT1NPTE5MSUdKS0tJTEpK
-S0lFR0lIR0pKTEtISEhLTUxJSUtOT01NSUlGSk5NS01OSklMTVNPT1JQT0xNT0xT
-UlNWUlFNTlVSUVpYXFtQUE1QUU5SUlhYXWNcX1pUT1FRTVBQVlRWW1VQS0pKTEpF
-QkJPTUtKTFBKTEhLTkhHRUVHRUtGSUZIU01SW1JbXFNWU1lXW2BSVFNXY15fW1tX
-W1hRUVFUV1laW1lZW1pWWFZVWFpcW1ldYmNjW1RRUlJcZGBWU1FOT0xHQDs6PTpC
-RExWVVdXUlBRWVtbWlhYWlpYV1JSVldPUVNaV15kZWJeXFdUVVRWUVFZWFZaVFZS
-U1ZeV1FMUFVeVFdWVFVTU1ZRT1ZVVFdYVVNOUVNTUlFYWldUWFNYW1hSXGFbVFVW
-VFtaW11fZGNmdIudpKWnsLKvqqegmpiao620urq4trK1tbK4uLm6tbG4urm3rJyL
-eWlfV1JQTEE6Ozs8PTo7Pjs4ODk5Ozk3NjtCRkI/PEVFSExLRUA+PT5DSUpJRUBB
-R0NFSEVBPT05NjQzNTQ2ODk4NjU1NTg4Oz1BQERIRUpJUFVVXllcWFdgX15bV1RT
-U1JRUFBPT05PTVJWWF1XV1JRVVVPSUZLTUpPT05JSEtGR0dGRkhISEJCRENGQkFC
-REFERENDQUBCRUNAQTs/QEI9PTs6Ojs4NzY2MzQ1MzI0MzM1NjY3NjY1NDQ1NDM3
-ODk1MzU2ODs6Oj9AQENFRURGR0pMSUlNSEVDQUFBQEA8NjQ2ODc7NjUzNTo4OjY6
-Pjo6Ojg4NTg2NDk7NzU3NTU3Ojw9QkRJTEtLT09NUE9NT1BPVVVXVllZW1pfYWFh
-Y2JkZWRjYGJeWllUUVZWVVdaWFlVV1VZXV1iYWRnZ2lpamtqa2psb3N1ent5en2A
-fXh0fX97enl5c3Vxb3J4d3d6e3x8fYCAfYCAf4CBg4OBgISIhoODgIOCg4KCgYOF
-hoeFhoN/fG9iV01NTUtNTVFOT09QUVRTVFRXW1daW11fXl5hY2hscn2Cg4KEhYaI
-ioiEhoWEhH98eXl3eXp2dHV2e3p3e3tzZ15hZGNfWltZW1pYVlNPUVNUV1NRUFBP
-UlVPUFNSTlBSUE5MS09RT09OTUxMTlFQS05NTU5MTU9OS05OT1FPTVBSUU9RTk9L
-TkxNTUxPT1BOTU9QT1BOTU5OTk9NTk1PUE9SUFFPTk5MTlBTUlBSUVNSUVFQUU9R
-UVFUUE9QUFRVVVVPUFJRU1RWVlJUVFNTVFZUUlJSUlRWVFVUUFNUV1NSVVZUVllY
-U1ZVVFVYWlVWVVdYVlpZWF1bWllZWFdZWVdWVlJSUVVTVFVVUU5OUFBUVVVWVldW
-WFdXWVhVVlhXV1lXV1dbX1tbWltcXFxbW1paXGFhXl1fZ1tcXlxbXFlbXFpYV1dW
-WlhcYYu8ydLZ3uHj5ujo6ep5eXd5eHl1enh3en5+fHp4dXd6eHx9e3l3d3d5eXl7
-fXx5dGVWSkVERkVCQ0dCRURHRkVDQkNDQkVER0lGR0RERkhKSEZHSUdIR0xLTE1O
-S0xMTEtOTktOT05QTEhGSUpMT05OS01NSktIR0lNTEtKSEhISkdJSUpHSE5RUVFK
-SE1OT09OTUxLS0tMTk5OUFFSTkxPTE5RVFNOUU9LVVJUW1ZbX1JRUFRaUVdUU1da
-ZVleXVlRUVBPUVFWUFRcU1BKR0lRTUNBQ0ZGR0hIUEhHSExORUlFSE9ISUVITE5U
-T1NaVV1aTlFPVlhga1ldWFdhXWFaWVpbXVhhVVdaV1taXF9gWFNWX15aWlhWUlVY
-WV5dWFlYV1paVlpZVlBSU05IPjg4ODtBT1VXWldRU1JWVlZXV1ZUWVpaWVlbW1tW
-WVdYX2ZhV1pWUFFRUlNRUlJTWVdWV1JRVlpUUE9PU1dUVVZXVlVUVFNPUFFVWlhS
-VlJUVE1KT1VSTlNZTEtaWFddYmBWVFFXW1hVWVxdYWFme5elq66trq2uraukoZqd
-prK4t7CqrK2xubq5urm5tra7vLmrm4R0al9aVlJQQzs7PjxAPDc5OT09QTc6PTk2
-OTtAQT08PkJHTU1IQT09QkhHRkZJTVhbVExMTEQ/Ozg5PDk5ODg3Njc1OTc4Nzk6
-Oj9BQUVES0pSV1tcV1VUVFdcXlxXT1NQUVNTV1BKTk9QUVZWVlVSUlJTUU1PSEhM
-TkxNTUtJSkVER0lLSkhFRENDQ0JFREFERENGQ0E+REFDRUNBPT09QEA9PTw7PDw4
-OTY2NjM1NTYzNDU3Njc1MzM0NjU3OTYyMzYyMzY3Ojk7QD08SERFR0VJS0tPUFNQ
-Tk5LRkFAPj06Nzc5NjUzNDY4ODc0NTY2NTc5NzU2Nzc2Ojc4OTo6Ojg5OTw9QkpL
-TE1KTlFQTlFQUVJVVlVWVVxZXl9gXWJiYGFkY2FkYGBcWFlWWFZVVVRUV1hZV1ZZ
-XWFgYWJmZ2VoaWltbnBvbXJ1dnt5fH17dn59fn57dXd7eXd3dHt5dnl9fn19gIKA
-gH9+foCCgYGEgYGFgoGDgoWFhoWEg4WDhoSFg352aWNZT01OTU5QTVNQUFRRUFNX
-VVRYWltZWl5fXl1fY2pucnN4fICAgICAgIGCgIB/fXh3eXx5c3Fyc3R3eHl4dnBl
-X19gY2BcWltbWFdSUFFSVVVSUFBPUFFUVldUU1BRT1BOTExNUFBPT05NUExMTUxO
-TU1PTk1MSkxMTU5OUE9OTk9RT1BSUlFOUVBUVFFRUU9OUFFQT05PTE5OTUxQT05S
-T05QUFJRUFFOVFJVUU9RUE9RUVJUUE9TUVFTUFFSUlNQUVRRTVFTU1VYVFJTUlNV
-VVNUVFNUUlJRUFFQU1ZXVVZVVFVYVVdYVlVWVlZWVVZXWVhaWVhbWVdXVVZWVVZW
-VlZXVlVSU1FSVFJSVVNRVFRUV1hVVVZWWVhXVlVVVVdZV1laWFlcW1pcWl1eYV1b
-WVhcY2FgXVthX1xdXF1dW1lbXVxYWVlZWVxjibvI0tne4uTm5+np63h5eXl4en17
-e3t3eHt8fXl3eXl8eXd2enp7enx5dXl8fntxYlFIQz9CPz5GR0ZISURFQUVFPkJC
-RURJTFFMSkdERENFREZHR0pHTU9LSktLTE5LTUxOTVBLTlBOTFFOTk5PT05MTlFS
-U0xMT0xOTUpJSUlJTE1NTEhKTVNRVE1NUVBRUU5NSU1JSktNTkxOTEtKSExLTlFU
-VkxNTk1TUlReWFpfVlZSVFlWV1RYYF5fWGFfW1tYVFNUV1dTWF1TU01NR0xKRENB
-R0ZDR0VIQ0JISk5JTUxITERMSklJS1BTVlhTU1tRTk5XW1toV1NWUl1ZXllaX1ti
-WmFYWVhWV1pgXl9ZU1ZhYl5bU1FQUllaWWJfXVtgXlhWWFVXU1JaXVVGOjc4ODpE
-T1RUUk9RT1RVVlZVU1RVV1xhZGRkXFpbXVxeYFlYUk5PUU9QT0xNTlBRU1lZVE5P
-UVNUT1JWVlNQVFVQUk9KTlJTVVhXVVJVWlhQTktNTk1OU1RTU1RQTk1SWFdZWVdY
-WVhYWl1eXVxrg52rq6mrrK+ys7KwqaSpqq+vrKysqqbAvrq7v763t7u6tauXe2xi
-XVhXVFJFPT49Pjo4Njc5Pj0+Pj4/Ojk4OzxBQUBAQ0pPS0RDRURESEpKT09YamZb
-VE1LSUA6Ozg4ODk0NDU3Njs3Njc6OTc5P0VGSUlLSlNUU1pST1RUU1RaXFpSTk5K
-S1FRT1FRUVNUVVNUUk9OUFFST01MSklISk1NSUZKR0JESkxJSkdGQkRIR0RDQ0NE
-RUNGRkNAQEFBQkREPj49OTs8ODc7PDk4NzQyMzM1NDY2MzI3ODk2Njc3ODY2NDU1
-NTYzMzY1Njk5OTk7PkFBQkNGR0pNT1JQS0tIQ0FBPj48OTk5Njc4OTU2ODg0Mzk3
-Njg5Ozo5OTo7PDo7PT0/PDo5NzpAR0hISk9NTlFUUVJSVVVQUlRUV1lcXFtcX2Bg
-YGFgYWJdXlxXW1pWWFdXVlhYV1laWVpaXF5dYmFhYmFiYmZpbW9vb3B0eHt8gH99
-f35+fnp6eXx6endydHp3d3d4e3x+f4GDfoCCgoJ9f4CBgYKFhIWEh4aKioeHg4aF
-hIKAeXZqWk9NTk9QU1JTU1RUUE9SVFdXV1dYVlpbXWFgYGJiZWlpaGtucnh1dHh7
-fnx8fXt6eXZ2dXVycnJyd3Z2dnR1cWphYmFkYF1eXllYVVNRUFNWWFRRUFFSUVNU
-U09LTU5QT05NT1BQT09NTUxMT09NTFBOTk5NTEtPTk1OTU9STk5LTU5QT1JRUk5N
-T09PUlFRT1JPT1BPT09MS05MT05OT1FPTk9OU1JRU1JRUE1OUFFRUlFST1FRT1JU
-UlJSVFJUUlFQT1BUU1RUVFZVUlJRU1JUUlZWU1JSTlFTVlZWV1ZVVFRVVVVVVldW
-V1RUVVVYWlhZV1ZaWlZWVlhUUlNWVlNTVFVXWFtVV1VTVFVSUFNTV1dWU1VUVlZY
-WFdZV1NXW11aWlxbWVlaXV1eXVxdX1xdX11eXV1dXFteZGJbWVlZV1peXVtaVlZY
-WmCMusjS2N7i5OXn6Orqe3x9fHt5d3p8eHd4dnZ6e3x5en18fHx5eHp7fnx5e3l4
-cmlcT0dEQENBQ0FDR0ZIRkFERENCQkVFREVHSEpPTUhCQ0NEQkdKSENITE9MS0hM
-UE9LSklKTE9PTU1MTE1NTUxPS0xMT1BQT01LSU1KS0xLS0pOSUlISE1NTU9PTEtL
-SUlOT0pNUEtLSkxMSEpMS0lMU1BRVlFUTE1OS09SWV9ZW11UV1pSVE9VVlloYF5b
-XWBfY2BcWVNVU1JZWFVXU1ZNSElFSUtLSkNEQUZFQkZLTkpMSk1RSUlJRkhETFNR
-Vk9RWVBUUVNcVmBTUFNTW1haW1ZbWGReYFtbWFdaW11eXltVVlpfW1ZQTlJYX1pc
-YFtaXGBkYVpTVVpVU1daVU8/OjY8PT1JT0pMTlJXVlVXWVZTW1pbXWVmYmVeX19f
-W1hVVVVQTlNRUU1PUEtMTFBUW1hVUFRST1ZVU1dVUk1LTlVPUFFRVVZdZVtTVF1c
-WlFSTUpLUlRRUlZSU1RSUFBQU1ZVVlRWWVpdW11fX2Bzj6Gopaertbi5ubWwsrGu
-rK+vrauvt76+wcG/vLq7t7i3rJF2aV5bWVZTTUE7Oz48Ozo3ODo+PTs9PEE/Nzc6
-Oz9DQD5GSkdKR0ZGRUFDTU9MSlNjZ2FZTElJPzg4NDQ1NTQ2NzczNDY0MzU2OTpA
-RUpNTVFOUU1TWE1SU1dXU1hdWlNPTkxPUlZWU1NSTlBQTk9NT0tPUFVWUktFRkdM
-TEhJSUdGR0dISExJSkVERENGR0pIRUVFQ0RDQUBCQUJDQ0FAQEA/Ojo7Ojk4Ojk4
-RDg5OTc3MzI2NzY3NDM2NDQ9NjQ3NTM0NTUzNDY2NDc5Ojg6Ozk6OT5CQkNDQUNA
-Ozo6Ojo7OTs6ODc3ODY3Njc2NTg4MzU0NzY7Ozw7Ozg7OTo6Ozs9Ojo7OTxBQ0hL
-TExPT1FUVFFQU1RWV1hYWl9cXl5hXl1eX2FjYF9fXVtbWVdUVVZWVllbYWFhX11g
-XmBhYmBjXllfZGVmZ2ppb3V4foOEgoODfH59eHl5e316eHV2enx+end4e4F+e36A
-gYGBgISCg4CBgoKCg4GChoqJioaDhISCgn96dmtbT0xPT1JPUFZVU1NVUlNUVVlZ
-XVlWV1pcW2BgY2dmZ2lqaGpra25xcnN1d3l4d3h3dm9qaGlrcHBwc3NxdHVxaGJg
-YWReXmBbWllUUFNUVlVUUlNUVVRTUVBPTU5MS0tNTE5OTU1OTktKTk9QTk5XU1FP
-TU1RTktLTU5QTU5NT01NTE1OTk5QTk1NTlFOUFBQUlBOT1JQUE9QTE1MTE1SUk9Q
-UFVVU1JTUlNUVFNRU1JSU1BSUVRRVVFRUVJTUVJTUFJSUlFUU1ZVU1dWVFRTUFFV
-VFVTUVJUUVNTVlVVV1RVVlVVVFZYV1VWVFVXV1ZXVlZUVlZXV1RWVFRXVVZWVVVU
-VVZVVVVWV1ZWVVdUVVJSVFdVUVRUVlVYXFtXVFdbX11YW1tWVlldXl9gX15fYGJi
-XV5fYF5dXFpXWV9bXFlXVVdZWFdZWVpaXY+8ydLZ3+Hk5ujp6el7gISAfHx3dHNx
-cXd4e3p/fXt5fn9+gHp2eHp9fHx5eHVxZlVMS0RFREJFRUNFSEpKRkZMR0tIQ0FF
-RUlER0hHSUZCQ0ZKS0xKR0dGS01KSE5XTkxLSkxLTEpLTEtLS05KTExOS05PTUtO
-TlBLSElHRklKSk5LSkpNS0lLTE9MSkdHTU5MS0tNR0VHS05LTEtKSklMTlFRTlBK
-SEtISlFaYFxYWlJXWVBWUVdYXGheXV5cX1xgXFlWUlFTVFlRTlJSVEtGSkhIR0hE
-QEBBR0VEQ0ZMR0VFRklHSk1GR0RPUkxVTkxYU1hUVVpUWlVWV1hfXFRTTFRPW1xX
-WF1cXVxkX15ZV1ZbVVZYV1ZSVFZbXllWWFFUWGFdX1xdWlhXT1dWUklAOzk6PEVM
-T1JVV1dWVVNWV1dgX15hYV5bWlZXW19bVVJTTlFRUlZXTlJNTU9PVFVWU1RTVFJR
-U1NTWV1WT1JPVFVRWVhUWmFiVk5ZZFxTVlJRT05WV09SVFFUVFZUVlRTUlFQVFpb
-XltdYGZoZWh8lqWurq60ur67ubu6uLaxsrO1srO0ur/Cv8C/v7Sysaykim9iW1hW
-VE1EOzo6PDw8Oz48Oj46Ozw/Qz49Ojg8QEFAPENFREdHREBBR05PTElJTFZjY15R
-TUdDQDk8Ojc1NTIyMzM1NDU3Nzc1NjtCSEhKTVFUTVdYUVZXWl5UWFtbV1JRUFNT
-U1FTUFFSTU1PUlJSTkxPTE9PS0hISUpJSklHR0xOTUdIRkVDRERFQ0ZKTUtIQ0NC
-QkM/QkBBQEBBQUJBPkFCPDs6OTo6Ojo6ODg6NTQ3NjU0NDQ1NDcyMzQ2NTY1NDM1
-NDc3NzY1Njc5ODc6OTY2OTs8PD47NTg5OTk5Ojg7OTo0MzY3Njk2Nzc4ODo6ODc4
-NjY2Ojo6PD05Njg7PD03Njc5OjxCRk9RT09RU1NUVlNUVlZWWlhaWlteYGJiYmNm
-Y2BhYl5eXFtYV1NQT1JWWVtdXmZfYmRhYF9hYF9fXV5iY2NnaW1vc3Z8fX+AfXx9
-enp6enl4d3l1eXp7enp7e3t8fX6Afn+CgYODg39/gYGBhIKCg4SEhIaHh4OFhICC
-fnpyZVlRT05RU1RTUlNSTlFTVFJSVldZW1lYVlZaWVxiZWdnaWpoZmlqa2xubXF1
-d3l2cnJta2RfZWlrbnFxcnR1dnVpYmBfYWFbWVlZWFRSU1dWUlFPUlNYWVROT09O
-T1FNTEtNUE5LTExLS01NTk1NT1BNUU1OTU9PTkxMTExPTk5OUU5OT01MTE9PTlFP
-TU1OTU1OTk5NUlFPUE5QUlFPUE9PUFBOUVJWVFVTVFNRT1JSUlRUU1RVVlNPUFFV
-UVNUU1NXWVRUVVVRUlVTVFRUUlJTVFZTU1RTU1VUVlRVU1VWVVRTVFVYVlZWUlRU
-VlVWV1hYWF1ZWFZVV1VXVVdXVVZXVVVSVFRUVVRTVVRUVFVUVVRYWVVSVVNVVVda
-XF1ZV1tbX1xZWVpYWl1dXV5fYmBfXV1eXmFhYV9eX1xZXF9dV1ZWVVhYWltdW15k
-krzJ09rf4uXn6Ojp6nh5fn9/en14c3d1d3p8d3l/fn9+fn6Cg4B9e3l7fXd2cGlc
-T0pKT0hKUUhGRkJHREZIR01NSUpFQkVHRkdDRUhGSENHR0lNSkhJSUhISklJSEtK
-SExJS05KS0xOT1BPTEhOTlBOTU5PTlBPTU1ITE5NTEpKS0pMS0pHR0hKT01LSktQ
-UVBJSkpHR0ZHTE5STUdGSE9NTk9MUU1HTVBMUFddXFBYVVZZT1JSVFZdY15YW1pi
-W1xcVldWVVNWVE5RUVFUS0NDQklFQkJDREJFR0ZGR0xJSEZJSUhLUElNS1RQTE9S
-VFtXWVhZVlFXUldgW1taVFtOT1BUWlFXWFxdW2FeXllUVFxVVVhaXVVWV1pcXVhR
-UVFTU1ZeZGBYVU9RUlNYT0M5Nzk8PUdOVldYUlJUUVFVXV9lZF5eX1xYUlNUWFdV
-VVhXVlRRWVlUT09MTlJUVU5QVVFVVFFWUlFYVFJTWFhTV1ZYWVFTWFVQUlleWVJZ
-WFFPUlFKTE9QUVNSWFdWV1VWWVtaXGBcWVxlaWxoZWyCmKexsLK2ubm8u7a6t7e3
-tri4srW7vr++v7/AtKirq6OIbl5WVlNMSEZBNzg7PTo8PD07ODg4OT48Qj88Nzg/
-Rjo4QUZGREU/RUVESU1NSEZJUl9eVU1OSkNCPTk5NTc1NDU0NDM1Nzc7Oz08Qj1A
-QUZJSE1KWFpRWFdeXFlfYlpQTlFUVVZUUFFQTVBTV1lYVVdQT05STlROTElGSUxJ
-SUZHSEpGQ0REQ0VGSkhDRUZITEdBQkNFQkFDQkA/PT0/QkA8PD8+PTs5Ozw7Ojo3
-Nzw4ODc3NTQ0NDM0NDQzMjMzNTg1NDY1NzU3Ojk5Nzc3NjU2Nzo7PTc3Njc2NjU2
-Njc5Ojw3ODU2NDM5Njc3Nzk4NTg5NzU2Njo4OTw6ODk6OTk7Ojo2MjU4N0BHSlFS
-U1NTVFVVWFZWWFdXWVpdXF5dX19hYV1cXGJgX11cW1hYWlZXV1pZWlpdZV1fY2Rf
-XVxbW1lXWFldYWVpaWtydnZ3dnp9eXd4eXd2dXh0d3V5e316fHh6fn57fHx7fX59
-gYOFg4CDgH5/gIGDgoODg4WFhYSCg398eHBkV1FPTk1SVFNSU1JQT1NTVlZUVVlY
-WltbWllcXl9eY2doamllZWdobm9wb2xzdXJvbmlnYmBjaW1vcXNycXByb29pY2Bh
-X1xbWFZYV1ZXV1VRUFRUVVhYU01OT09OTE1OTUtNTExKS0lLTU1PTE1OTkxRTk1P
-TlBOTEtKTExMS05OT09NTkpNUVBMTExMTk1LTU5NUFBNTlNQUE1NT1FPTlFRU1VQ
-T1JRUlNRUVFQTlFSVFVVVFRQUlNRUFFSUlNQUlFUWFZVU1FPUFNUU1JQUlNTU1pV
-VldXVVVWVVVTVFNTVVZWVVRVVVVVU1ZUUlVYV1dZWlhWWFpYWVdXWlxbWFhWVFRV
-V1NUVVJUU1NSUlJVV1ZXVFNRU1daV1dZXl1aWVpZWlhVWlpYWVpYWVxdWVteX19i
-Yl1fXl5gXVlXWlxaV1lZWFlZW1pZWmKPvsrS2t/i5efo6OnqeHd8fHt9fnx5fn98
-e3l3eHd5d3h4d3p8fn9+fXt5dnFrYlZNS0xGS0lMSEhIQUVHRUhMSEpLSUVIR0xH
-QUFAQkRBQkhHSUdFSExMSUpMTEtLSUlHSU1NS0pKTE5MTExKTk1QUExLTU1OTExM
-SktKTE1KUE1MSklIS0lIS0tLS0xNTk5PTkxKSEdJSUdKSk5MRkZKTVBPTkxPUEpL
-TVJTVFpYUVVTUlZPUlFSVltbVFZdV1lUWV5fW1tfWFNXUVhUWldPSEdHR0hEP0BD
-PURKSUpBRkZHR0RMS0pOSU1NUlFMTE9RXFddXlVXTlJRWGNWUVlXYE5NTVZdVFlW
-XF1fW1tZV1JRUVJXWWBiWFdUWV5fWldOTk9TVlZjaF9aU1FSU1tUSTs4OTk9QUhU
-VFdUVVlYW1pbXF5aWVteXV9WUFJZVllUWFZYWFdhXFBQUVFUU1FTUFVWUE9QVlpe
-U1hVU1dUV15VUlJRUFJTUU9ZXlpaUVRTVVJQTklLUk9MUlJTV1dTU1RZY19bWFpc
-XmVqbGtkYnGDmqattLS3ub66uLe3ubu7uLe0sLS5vL+/wL+1qa2vpIZqWFRRUElF
-RUQ7ODc7Ozw5Nzg8PDo7Ojo8Ojk5Oz9GRz87R0VERkZBPT1DSk1LRUZMWFRKRkpL
-RkE8ODY3NjYzMjU1NTQ3NDU6PDo8PURESklGSEhVVVJbW15YWV5dWFRPUE9RU05R
-VVJPS01UWVxYUlBMT05OS0pKSkpLTE5OT0hFRUZFQUBDRUdIRUZFRkNISERFREZF
-RUFDREFAQD47PUE/Oz87Ozg6Ojo2Nzg2ODw5OTcyMzQ0MzQyMjMyMzI0MjY3NjY3
-Ojg1NTc2NzY0NTY2NjU0NzQ2ODo3NjY4NzU2MzY5OUE7NjY6OzU4Nzc2Njc1Njk7
-ODk8PTw9OTs7OTg3NjU1NTY5QERIS09LT1FTVVVUUlVWV1hZWFxgYGFhYGBfX15h
-XVpbXVxbWFZZWVZZWlpZWl1gW1pcXF9cWVtZVlVWVVheYmZra3J0c3N2d3l8eXd2
-eHd5eXl3eHx6eXl6dXV5e3t5e3p9fnx9fX2AgH+BgYOAgYOAf4GBhYaCgIGAgHx6
-cmZbUU1PTk5PUlBOTlFXV1NTVFRVV1hbW1teXV5eXmBjZWRoa2dmZmRnaGppa2xt
-dHNxcm9saGVobXJwcXBzdXRzc3FnYmRkXVhaVlZbVlhYVVJWV1lbWFNQT1BPTU1M
-SkpJSkxMTExMSkxMTExLTE1PT0tKT1JMTE5OUVFOTk5LS05RUU9NTlBSU1JPS1FT
-Uk9MTU9QTktPT1BQT05PUVBMUVJRUVJQUFFSUk5OUU9QUFFQUlRTUVNST1FST05Q
-U1NQT1NUVlZSUFFRUlJSU1FRUlVUUlJUU1dVVlhYUlJTUlVVU1VVVVRWWVVVVFVY
-V1hVVVdZWVdZXl1YW1tbXVxZWFdTUlNWV1VUUVNTUlRTU1ZVVVZXW1dRUlVYWFZX
-WFdYV1dXWVlbXlpbWltfXl9hXmBhXl9iYF5gY2FdXFtYW1pWV1pZWVlYXF1gZYy8
-ytPa3+Ll5+fo6up4eXl5eHt8eX59fHt7e3p2dXV2d3h4enl3ent7fXl2bGhaTklG
-RkpERUhGQkNDQkpEQ0VFRUpJSUlJSUdEQT5BREdFRUlHSkdIR0dFQkdKTEdHSEpJ
-Sk1OS0lMSEhMS0hHSk1MTktKSkhHR0pKTkpLTUxMS0pJSktLSUhLSktMSk1NT05L
-SkdISUZJSEhMS0tFSEtRU1FQR0tNRUlKS05RUlNTV1ROVlNRV1FST1JSUltYXVpf
-X2BiX2BbVlVUVVRaWFBOTUtIRz9DRENCQENHSkRJSEVFQkhHTVBIT1BRU01NS0xa
-VlZaUFdOTk9TWlRWWldfUlBOU2BfYldYXl5VV1ZcVlJQVV1hYF9aWVRcYlxcVVFR
-UU5RVFteYF5XWFhYW1VORTo3ODk8Q05NUVtUVllWWVpcXFdWVVZbX1lVVlFQV1lU
-V1hWXGFYUFBTU1ZQTFBVVVJWVVBVV1dSVFVTVE1UWlVQUVRPVVRSVlxdV1lTVlRU
-VFFRUUxPTU1QVFdQVVRYVldbXlZWW11cXWFoZWBfZW+Ko6uvtLK4ur+7ubq2u7q4
-tbKnqbO1ur27t62rrrSniWdVUk9KRkRDRUA6Nzs7Pjk7PT0+PD47OThAOjhAQUdG
-PTpDRElJSERARkdHRkdGRUtQST5ARElNRkA5NTY3OTQ3ODk4NjY3NzZAQTs9Oz1I
-TExQSVVVWWFYXVVXW11bV09PUlJOTlBRUFBQU1RXV1NSUU9MSklNTEpISUpNSkpK
-R0RFR0ZGRERKTklFRkdGQ0hHSUdJR0dFQ0BBPj09Pjw8PUA+Pjw7Oz07ODg5ODc2
-NDU1MjIwMTI2NTQ1NDU2Nzg5Nzk5ODYzODQ0NTc5NzQ2NDY4Nzs5Pjk3NzU2Njk2
-NzQ0NTY3Nzo5NTY3ODk4NTM0NDg5NTc7OTo6PEBAPDw8OTk5Njg3Njo5P0VIT1JQ
-U1JUVlVVU1RWV1laWVtfYWFeYF5dX11bWFdXWFhXWFdaW1xdV1haXFxcW1xcWVpY
-WldaWVVWWl5iZ2hsbG9wc3N2eXd1dnp6d3d6eXt8e3t5e3x7fXp8eXd3eX2AgXt6
-fn9/fHh3fX5+gYGDgoKDgoKCg4SBfXlyaFhPTU9NTkxNTU1PUlVXV1VVVldYXGBf
-XWBhYWJgZGZjZWRlaGRjYmNhYWJhYmZvd3p7d3hzcGxwdXZ1c3Fzc29ucnFramlg
-XFxfX15eWFpYWl1gX1lWUlBMT1BQTEpJSE1OTUxNS0xKTEpLTE1NS0xMUk9MTU1M
-S1BRS01NT05JUE9NTU5RVFNRU1NPT05PT09MTk9QUE9PUFBSUU9QTE9SUlJSUlFO
-UE9SUFJQUVVWU1FTU1FRVE5QT1BQTlFQTlBQVFJTVFNSU1RVUlVTUFJTVVNTU1RU
-VVVVVlJTU1FTUVNQVFVUVlVTVVVTVVhXV1hZV1dUVVlZWVpYVllVWFlYV1hUUlNS
-U1BSUVFRUFJRU1RVV1lXVlVSVlhVWFhaWVhXWFlbWVtaXFxeXFxbX2BfYV5dW2Bi
-YGNeW1xdWVtZWVlYWVhZWVpcXF1gjr7L09re4eTm5+jp6nl5eHt5dnp7e3x7fH17
-e3x8fnx7eHd4d3l4eXh4d3JrZ1tPSUdLR0pES0lEQ0NJREVCQUFGRUNHR0lIRkFA
-PUJHSUNFR0xHR0tHRERISEhJSEtHSUpJSklKTElLSUZJSUtKS0lISklKSkpHR0ZJ
-TktMSklHSkhITFBOTEpMTU1JS09PTUxKTUpLSk1KS0lIRkRFSEtNTlVPTE1HS01M
-UVFQUk9XV1NXWF1aUFBPVF1WW1ZbYF1hXmJfXVRVVVRUVFpTU0tKSUlMQURAQD5A
-QEJGSkhHRUVDSEhJS0VKTE9XS0tRT1dSTlZRWFBQUE5UT1FfWV5WUFRSW2RmVlld
-XVNUUlhVUFBaX2FdWltYVVpeXV5YVVZTUFBUWlpZVFVfX15aWVVPPjY0NjhGSkZM
-VVNRVlNYWFdWV1VWWl9gWVVXUlVXWFdaXFtfXVVUU1VSUkxNUlRXWFxUUFRVTkxR
-T1FMTE9VWVVRUU9ZWVJVXFdRUk9WV1NSTUxQTUxLTVNSV1JQV1lYV1dbV1peWVpc
-X2RgX11cWnCUpayts7a3uLy8u7e2u7mysKalrraxtbW0r7Kwq6GKaFRPSkREQ0BE
-PDo7Oj1APjw7PT48PT07QEM7OTxAQkFCPT5HSUpHRD1AQ0NHTEtDRUhCPD5CS05F
-PTo6OTk5PTs3Njc3Nzc5O0BBPUFDQEpKTVBPV1ReX1tfVFZYXlpST1BQTEtOU1ZU
-UFFWWVhTUlRUUE5NSklOTktKSUtNS0lFQkFARUdGRkpMSkhIR0VBREdHSEZIQ0NC
-Pj9APj0/PEI/P0A7OTw8Ojo7Pzs6ODg4ODYyMzM0MjExMS8yMjU3OTs4ODc3MTY4
-OTY0Mzg5NTU0NTc2Nzg3Ozk2NDo4NTY1Njg3Njc4NjY3ODM3Nzg5NjY3OTk4NTc3
-NTY1ODk8PD0+Nzg3NzY2NDs/QUZMTlFSU1JVVlZVVVVYWFpcWlpeXVxbXFxcXFpY
-V1RaWFVZWF1eW1xZW11cXV1fXVlYZlNUV1pXWllbX2FkZGRrbXB3eHl6enl2eHh6
-ent3e3x5eHd9fHp8eXt7en5/gIB+eXh8fn99eXt8fH1/gYOAgIOAfnx+f398dnBk
-VU9NTU5OTU9QUlFRUlVVWVlaWVdbXl9eX11gYGJjZGNjZWdnZWRiYmBfYF5hYWdw
-eoKHg4F8eHl6eXdzdHFzbm5wcXJ1dXJybnBraGlnZGNdWlpWU1FPTlBNUE9OTE9O
-SktOTk5OTkxOTExLS05LTExNT05OUFBQTE1KSUtKSktNTE5OTVBQUk9OUFNSUU5O
-T01PUFBOTk9SVVNPTlBQUE5OUFBRUlFTUk5RUlJOUVRRUlNRTU9PUVBST1FSTVBR
-T1FQU1FUVVVSUlVVV1hUVVJSUFJRUlJXV1ZaVlVVUlJRVFVSU1JTVVhYWVdVWFVa
-V1VYVlhYWFdYVlhWWVhXVlZYV1RRUVNUVFJTUlVSUVNRVVVUUldaWllVV1pYWllY
-VldXV1ldW1pbXV1dXVtcX2BgXltcXl9eYVxdXmBbWVlbWlhXWFpZWFRYWWKYvsrT
-2t7i5Ofo6OvpfXx9e3p5fHx6eHx6fHl6e398fH16d3N3dHV0eHl1c29kV01HRUdI
-R0tLS0ZGSkZEREFDRkNFRkVFRUZBPDs/QUJORkVGRUlKSEhMTElGR0hGQ0ZJR0dH
-SElIRUdLSklKR0pHSEdKTU1OS0pHTEtLTEtOR0dJSkdKSkxMTEtIRkdKTEtKS0xK
-TExKTk9OTkZHRUZJSUtMUU5JTk1OT0xLTU9MT1hYT1pYVFdXWVZRXVdeXFtlXF1a
-YFxbVlRSVFVSWVhPRENKQ0RBQD8+Pj8/QEtISUZER0VJR0VKRUlGSlJMTFVPWFBO
-WVJaWVlTT0xOTVxUW1dQWFddXF9WWV5gYVdSXllTUldfZFxbWlhUWFpYXVlVVVJS
-VV1hYVlQUVdbXV5eWlJGQTo2OTxFR0tOVVZUVFZSUlZcWVZaYGBZWlxYX1tYW19d
-W1xcVlJUWlVQSktPVVNWWFZVVFJNTlVVTklMUFNXWldUVlNTT1BRTUxRUU1VWFJO
-TE9OUkxOVFFTVFVWW1NUWFxcXVtbVldaW19kXldUXniQnqKqtLe7vLu4t7m3sa2r
-paitr6+wsbO1sKanpZBpUklEQEFAQ0I4OD85PD49Ozw5PDs6PDk8Qjw7PkVCQkNA
-QUdIRklEP0BFS0tNSDlAQkA9P0xOS0Q8Ojo2Nzg2ODc0NDY5OTo7Ojo9Q0JERkZG
-TFBXUVliW2NYVlpaWFZRTk5MSU1RUk9QUVFRU1JQUlJSTlBPUVJOTEdISEpJRkNC
-RUJCREdFR0pOS0hHQ0JDQ0NFREFEQ0JDPz5BQkFCPj0+Pj08Pjw9PDo4OTYzNjg1
-NTY4NzU3NDM4OjQ1MzM0NjY1NTY3NTU1MzYzNjY0NDc6ODY4NTExMjQ2NTY0NTc5
-PDo2NTU0OTk4ODg7ODY7OTo6NzY2Njg4NTg3OTc3Nzk1Njc5ODQ0NjQ6RExPU1RT
-VlhYVVZYV1laW1lbXFxcWl9cXVxcW1lWVVZXWVpbWlZZW1tcW15eXlpcWVhZW1tZ
-WVpcX11gYmZpaWhucXR3d3l4eHx7e3t8fHh8e3h4d3d4eHx/eXx+foB+e3l7d3h7
-gn57en59f4F/f4CCf4CBgYKAfnt3cGVYUUtKSEpLT1NWVFNUVVhaW11bXl1fYGBd
-WllbXF5gY2NjZWdnZGJjYF5dXmFjZmhtdoKIioeFhIB9fHl3c3FubWtra2tucXR5
-enVwamZhX15UU09LSk1MTE1NTk9RUFBOTUxOTk9OTExLS09QT09OTU1KTEtMS0tO
-T0tOTE1OT0xLTlBQUVBPUE5NTVBRUU9PTk5PUVBPUVFPUFJTU1VTUUxNUVJUUk9R
-UFBUUVFRUU5QUE1OT1NTU01RUVBUT1NTU1BQUlJVVlRRT1FQU1dTU1BQVFNVVFVY
-VVhaWVlXWFZVVVNUU1RVVVhYVVhZV1paV1hWWV9bWVdZWFhWWVdYV1dXVFFQUVRS
-UlFQUVFSU1JRT1BWVllYU1NVV1lYVldaWllXWFlZYFpaWVtbW19gXWBeWltbXl5n
-YF9fXFpaV1pYWFhXWFpZWFZZXZW/zNPb3uPk5ujp6up9fHp4e319e3l3eXx4d3l6
-fn17dXl3eHZ4dnZ6eXh1bGBUT0xIS0lHTEpKR0VITUxERURGRUJESUpHRURCQ0FA
-QUdIR0VFRkRGR0hJSUdGRURDRUdGR0ZHR0hKS0tLTEpKSUpHR0hNTkxLSk5MTU9N
-S0lLSUtMSUhMS0hLS0lKRktMTEhKTUtOTUpJSEtPUE5JSE1MSUxNT0xMS0lOTlBO
-T1JTVlZQV1VTWFlYWlpfV1pZWGNbX1piZF1fWldWVlhcWFJHR0tGQjs9PDk6PUA9
-QEdKTEZKRkVJQ0hHSEtHUElLU1FYUlBbTllYWVFOUVBOWVddXVZhVVxaWVhXWllZ
-V1NVVl1WU1ZgXVpeWVVXW1xWVFdYVVNRVWBgVlBOUlVcXmNgVE1FPD45OTtETFFV
-VVZYWlZTV1tXWlpbXFlaWVpfV1hcW1ZXXVpWVl1ZV1dSUlNaV1VUUk5OUFJTU1FM
-T05OT09TWlpUVFFTUEtPTE5RTFBXWFBVVU9RS0pMUUxRV1pUVlhYXlxdWlpWU1le
-YWddV1paYXeKj5+ttLe5tbW1sa+rr6ienaaoqqepq7CtpKOklm5TSkI7PUJMRjs+
-Ojg5QDs6OTk4Ozc6Oz5APzk9P0BEQkBAQUhHRj4+P0NDRkhBPkZLSEFIUVRSRj8+
-OTo8NTU3NzUzNTY1ODk6PT5ERj9BQ0ZJT1hRV1taZFlaWldWXVpaUkxIS09TUkxM
-T1FSUE5WVFVQUVBPTE9OS0hJS0xLSURGREVCREhJSEpORkRCQkJERkI/PUFAQUND
-Q0RDQT48QTw9Pz4+Ojs8Ojw7Nzo4NTUzNjY3ODY1ODg3NzU0NTQ3MjMzMjIzNTcy
-MTEzNDQ3NzY3NDU2NTM0NDg4NTg2NTg6OTc3NDU2ODQ0NTY2MTQ1NDg7Nzc2NTg4
-OTc1ODk6Njc2NjUzNTQyNDo8Rk1NUlNSVFVYV1ZZWFhZWVpZWV5eXFpdXl5dWltZ
-XFlZV1lZVlZVXV5dXV5fX2BaX2NmYl5gYGNpZGNpam9vcHR4dHBxdHZ6enp5d3d2
-d3d5eXp3eHh5enl4e3x8foF7eHx6eXp9gICAfn+AgYOCgX9/hIqFg4SBe3l1aVxT
-S0ZFSE5QT09RU1FYXFxcXV1aW15eXlxXV1ZYXF9kY2RiY2JlZGBiYF1gY21wbm1u
-c3l9g4SBgYB9fnp1cnFwa2RfXV1dYWZpZWRjYmFbWFNPSklJSkxOTU1NTk5MTU1L
-TUxLTExMS0xNS01PTU9RT0xNTk9QTElKT05MTU9PTk9PUFBOTlBSVVJOT1FUUU9N
-TE9PT0xOTk1QU1FUVVROUFFSUFJSUVFQUExPTk9PUVBRT1JTUVRTVVNQVFNNUFFP
-T1FSUlFQVFFQUFFSUFRUUlNVWFVTUlRWVldZVldXVlNUVlZWV1ZUVVVWVVVXVlhb
-WVlcWVlZWVlcW1pVVlhXVldWVVBQUlNSUFJUVFVUVVdZWlhUU1VYUlVWV1RSVFRW
-WVZWVVdaWF1gXFlbXV5fXl1eXVteXmVlW15cXFtaWlhZV1hYWVxaWFddlb7K09rd
-4uTm6Ojq6np6eHh8fXx6eXt8eXx8fH9/fnl2eHh5e3l6eXd6eHJoW05KTElHR0FE
-R0VHSEZGRkRCQ0lIRURIR0hGRkNCQkJFSUlGREVGRkVGRkhFRUVDQkdHS0hDRERF
-SEdISUZJSkpKSUtJSkhJSkhKSUtMTk1LSEpOTE1LSktIQ0dJRUhJT0xKS05NTVBR
-TExKT1NMTkpNUU5QTE5PTFJQS1JPUVdVU1VYWVJXUExVWVpaVV1ZVlpRV1hcW15c
-VV9iYF1ZVlZZU05KTUVCPT09ODo9Q0FBSU1QR0hER05DSklHRkNOS05YVlpUTVhR
-V1pYWVNUUEtZWl5bVF5UXV9ZW1ZcV1VUVFVYYFpTVl9fW11gWlxcWlpXWVpdV1NX
-W1dVUFBRVFZaXVtRT0g/Ozk4O0ZJUFFVXFdfXFxbV1dZV1daXFxbWVhUW1pWUVRe
-X1pcXVdXV1lUWFhVUlVOS1FWVlZPTlBUUEtPUFFaXFFOU1JTU1RTTlBRU1NbU1NS
-VFFNTUxKTE9UVE5UWFdbXVlaWVhdX2FgY2NbWFZaXm9/lKevsrOusa+sq6qrpZiV
-n6GcnKGqq66pqquYbVJIQT5ATlVKQDw6Oz07Pj48PTo5OkE/QUE6PDY4PUE/PjtC
-RUNCQTw+P0RGRD9BRUpHRUFIVVVMQj07Ojc2OTY1NTg5Ojg3Ozs4PT1DPkBARkhO
-V1JaXVtdVVhZWVdZWVZUUlFOUVZSTEZJUVNRVFJVU1BLTUxNTkxMSklMTUxJRkRG
-RUVCSEdFR0RBQ0JERkdKRUA+PUBCQ0BCQj89PD1BPTs8QTs5OzpAOzs7OzY3Njc0
-NTY2MjU0MjUzODQxMzIzMTMzMzU0MjIwNTMwMzY0NzYzMzM1NzI1NjY5OTc2NTc2
-OTc3Ojc2NTc1MzQ1ODg4NTc5Njc5OTk4PDo6OTY3ODg5OTk4Ojg1NTg/RElQTk9S
-VlVYWF1bWVlYV1VZWVtcW1tbXFtaWV1fW1VWWVdXWlpeYF9fYGBeYmVmZ2tmamlo
-bGxtcHFzdXJ6dXJ0dHZ7dnh7fHp4eXh8eXt+f4B/fXx+fnp3enx6fHp1fH18fH2F
-gH9/f3+BgIKDhISCh4WCf3t6eHdsXVRNRUNERUhLSExOTVJXWVhTT1FTWV5hXltY
-WllcXmBjZGNmaGdlZWhkY2dnbnR0c3Jvb29wcnV4eHd5d3JzcnBuZV9fX11eXFpZ
-WVxbW1xZVk5QTU5MTEpLTE9NSUtMTExOTExLS01OSktLSUtKTEtPUE5MTktLRkhL
-S01QTk5OTU9OTE1PT09RT0tNT1FQTU5QS0xSUlBPUVBPT1FUUVNSU09QUVNSTk5P
-TU5PUFBQUVJUVFFSUlNXUlVQUFJRUFNRUU9QU1dQUFJPUlNRUlZWVFNVVFFRUVNT
-V1hWVVZWWFVVU1RVVlZVVVdVVFZWVlhYW1pbWVdXW1paWlZWVlVTVVVTVVRVU1VS
-UFVXVVRWVFZXWFRVVVVZV1hWW1hUVFVVV1VVVVpaWlpcXl5eXltcW1pgYVxcXWNc
-WlxbXVxaW1xbWVhXV1hYU12WvcrS2t3h5Ofm6Onpe3l5eHZ5e3t8f357enl3eXp6
-enp2dHh6eXl5eHd1b2NUTkdISEdIRUpFR0lFRkpNREJDQkdGQkNGRUFEREdFREZF
-SERDQkVGSUdHRERCQ0RFR0NGSUlFQkJFSEZHSUdIS0tHRUVJR0VFSElLTkxMTEtK
-TFJRT0xKR0pKRUlFRUdHTEhOSUxOUE5KS0tJT0xLTk9SU1JPUFJPUVdOVFRSVVJY
-WlRWUE5PS1BXVVlRVVZWWlJRV1hgYFdRW11aVltaU1ZUT0hHQz89Ozk4Ojo8QURJ
-Sk5AQEVKUEVPTENCQkdIUl1UWlxUWFBYW11eUk1OS1daXFxTWVNaXlVUU11ZWFVX
-VllZXFdcX15WXGBaXFxXW1hXWFdUUlpYU1NTWVNVVFhZVVZRSkNAOzY6OURQUlRf
-WVhbV1dSUlRVW1tdW1tZV2BdVlRQVWBnZGReVFZVWVlWV1JWWFJRWVdZVFBMUE9O
-TlBRUFRYUFFUVVNUVlFOTlRWVVVRT1BWUk5MS0tRUlBUVVJRVlhZV1tYWlxaXF9f
-Xl1aVVdXYnOLoLGsqq2vq6eopaWgl5KbnpqbnqWrrbCtq51wTkZCQ01ZUkM8Nzc6
-ODpCPz9BPTc7PkZRRUFAOzs9PUFCPTk8RUZJPzpCRUZCPzxAQT4+P0pXVUpCOTg4
-Nzc1NzU1ODg2NzU2ODo8PENDQ0JKSVNaXmFXUVlTWVxbVllWV1VSUlNXU09NS0tS
-WFVTVVNOTE9NTU1HR0tKS09UTktJRkRFRUdFRkVFREFARkVESUdHRUQ/QD5AQENF
-QUBBPzpAQj4/PT0/PDw9ODs8ODk6OTo2MzE0Nzg1OTc1Nzg2NjQzMzI3NzQzNDMx
-MTM1Nzc2NDU3Nzc0MzQ0NzU1ODk6Ojk7Ozg4Nzg5NTU0NjU3OTk3ODg3NDYzNjw9
-Ojs9Ojg4Ozo5Ojg0Nzo6ODo+RU1NTk9RUlZWVllWV1dbXlVWWFlaWVpZWllcW1pZ
-WVZXXV5aXV9gYGBjYGFjY2dpam5ubm9xc3N0d3h6fn2AeXh8f3x7fHt8gX56fHt9
-eXh3gIB5d3yAfnp8fnl6fH58eXl8e32AgYB/e3p9gYCBhYeHhYCAfHt4dG1nWlFN
-RkRITElERUZLT1BPTEtGRkxSXGJfV1NRWl9lYWBiY2ZnamtlZGRpa290dnp2b2tq
-ZmdobG1sbGtrbG5yc3NyamVjYWVjYGFfXl1aWFJPTVFOS0lKTkxMT09OSklJS0tO
-S01STk5OTk9MTU1MTU1MTU1MT05MTE1PTk1NTElJS0xNTU9QT01OTU9OTlFSUU9Q
-Tk1SVVNPTU1QUFFTVVZUU09QUFBQUFFRVlFOUFBRUlJTVVRTU1RWUk9SUlFSU1RR
-U1BQUVJSUlFQUVRTU1RSVVRSVVdSV1ZVWFpYVVNTVFZUU1VYVFVVU1JTVFdWVVRS
-VFpXWlxbV1lZV1dXVFdSWFhXV1VUVlJQUFNUVFJUWFdVU1RVWVlXWFlZV1hVVFZZ
-WldaWl5cXV9cWlxdXF5fYF9dX1tdXVxbWVRWWVZWVlhbWFZYWFVYXZW8yNHY3eLj
-5ubn6eh/fHp7enp6fHx8e3h4eXd2dXZ5enl7fHx7dnh4d3VuYVROS0VFQkNCRUVH
-R0RBQkdJRkNCRUVFRERDRkVGRkZGRUQ+PURFQ0VFSElGRUNDSEZGR0ZGSEdGRUVG
-RkRIRkVGRUhHRkhJSUdFSEtMS0pLS0tOTk9PTU1MSEhISEZFSEZKTE5LTUpKTE1I
-R0lNS05QTk9SUVBOU09NUktVU1FWUlNWUVRTT09NUFFQVlNRWlVYT1JZVmFhX1lW
-UVBUV1dWVlJORD0+QkI6Pjw3Oj8+REdEQzw9Q0ZLSE5GQEBAQkRLW1ZeX1NbTlhZ
-VWBVTlBLW2BfYFNfU1VeWFVRVlVVUllSWFVaW1lbW1hXXlVXYmFiYFpaVVFQVVpc
-VVlbVldXVlZYUk5KRUI3NDc6QkhUVlpcWVtXVVRVWltdWVlcWFlgY11cV1RXX2Jg
-YVlXVFNXW1VUWVpXVFRUV11YVVNNTFBQUUxMUFRRUlVYWFNVT01RUlVbVVBUUU9V
-UUxLT1BSUlhaVE1WWVdYVlZgYVhaXV1dYF1aX1hZbH2Zo6WopamnpaSkpaGWl52e
-mpyhqqurra+voXRTR0ZTXV5RRD47PDk6P0JFQT46ODo/RERBQDk2OT8+QUQ9OERF
-R0JAOz0/QT89PUNGP0FDTlBPTUA7NzU3NTQ4ODc2NjY3ODY4Oj5BQT1CR01OVlRW
-WVJUVVJZXGFfW1ZYWFJOTlJTU09NUFRZVlNRUE5PUU5QTEZITExNTk1MSUhJSUpK
-TERHRkVCPT49QkJCQkNHR0ZDQkNEQUJBQEJAQD8+Pz0+PDo6Nzs9Ozo6OTc4OTc1
-Mzc3NjU4ODc1NjU0MzIyNDY2NzU2NzM0NTYzNDMyNjg2Njg5OjU1NzY4ODU3Nzc5
-ODU1NDc3NTc4NjU2OTg0NDg1MTM2ODk/OzU3Nzs9PDg4ODc3ODg5ODlBS01OUlJU
-VFVYV1RVVVZVVFRTWFlWVlhaWFZZWVlYW1xgX11eYGJiX19kZWZobGtsbXF0dXZ4
-fH97fIGEhIKAfYCDf35+fX19e3p4d3t8dnZ5enh4ent6en1/fnt+fX15eXl+foOA
-gIKBf31/gISHiIWDgX9/fHdybGZiWVBJR0dJSUZDQkhKS0xJREVHSU9VW1RNSEpW
-YmhoXVheZm1wbmZiZWduc3d5eHFoYl9dXmRjZ2dnYl1eYmdvc3NybGxrbW9nYl9b
-V1NQTUxMTE1NTUxJS01PUE1MTU9QTk1MTE1NTE5KSk1LSUpLS0xOTUxNT1JMT0tL
-TU1NTUxMT1BQT09PT01KTVBOTlNRT1BOT1BRVFJST1BRTFBUUlFQUVBPUU5OUVJR
-UU9QUlNQUk9SVFFQUlFRUFVTUVJRUVNVUVFQUVNUVVVSUVNTUlRWU1RUVVVUVlVR
-VFRUT05VVFNSUlNVVVNRUVNVVVdUVlhWWFdYWlhYV1dXWFlYWVlWVldUVVNTUFNW
-U1NPUVJUVlZYXFhWVFZUV1hZWlhYV1haXFtbWVtbXl1cXWBiXWFiYmJgXFxZXFlZ
-WlhZW1lZWldcWlRVWF1jlbvI0Nfc4OPl5+jp6Xt6e3p5ent5eXZ3eHd2d3Z3d3Z7
-fHp7e3t5d3d6dm1hVU1MRUJFPEVFQ0JDPz0+R0VDQUFESEVFQUZGQkRHSklEQ0hG
-RUhIRElJRUVFREZGR0dFRUdFRUVGRUVGRkZIRURHR0hHRUdISUlKR0lKS0lLS0lL
-TE1MS0lJSkZISUhJR0ZHS0tJR0dOUEpHRkxKTlJNT1BMTUlOT01VTlRXVFhZVVNQ
-VFdQVVRVVU1TUU9aW2BaU1xZXWBdV1hXUVhYXV1YUk5BREdDR0E7P0A+QD89Qj5A
-O0BAQ0pGSEtCPz1DREpYVFZaVF1UWFlSX1lNTk1dYVxiVl1YVV9dWlZWWFdZV1FV
-Wl1YXVpeXF1eV1heYGRiW11YUU9RUlhaXFxRUlhVVltZUUxHQz44Ojs5QE5VUlxc
-VldWVVZYWltYWV9bXV9ZWF1XVVtcWVtgW1hXVVBZU1RgXVlVUFRaXVlYWFVSVFRS
-TktKTFBSVlVXT1FSTk9SUlZWTUpRUlVXUUlSU09TV1dVU1ZUWVpXWVxeWFhbXGJn
-X1pjW1VabJChrKijpaanpKKhoZmWnp6en6Slq66ko6uie1hMUWRnWE1FQUJFRTw9
-REU9Ojo5Oz5BRUQ8OTg4Pz9DS0FAQURERD48RUJBQUBAQ0JCQ0NIS0xJQz02NjY3
-MzIzNjY3Nzo2Njg6PUFDQERGT05ST1FZV1tVTldZYV5aVVRTVFRTUVdVUE5RVVZT
-UlRRUVNSVFNMSUtISkpNT09MSk5MSEdFS0tOR0BDQkJFREFAP0JEQEBEQ0JCQj0/
-P0FAQT9BPz89Pjg7Ojs7ODk5Njc7Ozg5ODY6NzQzNjU0Mzg1MzM2NzQ2OjY4OTk2
-MzMzMzQ1Mzg3NjU4NTY2NTY9NTU1ODg2NTY1NjY3Njg5NjY0Njg1MjQ2MjU4Qzw5
-PTs9PDs8Ozs5PDg2Nzk6OjpCTVBSVFVWV1hXWVNTV1dYWllUVFZYWFlXV1hcXV1c
-Xl1cXV1dX2BhY2dpbGttb3Fyc3h6enp9gYGCg4aIhYeFhIOCgn+AgoCBfX19ent8
-eoF+en9/eXh8fXt6enl7f3x7fH5/gYB9f4GDgIGChoaHhoKDgYB9eXVtamVhWlFM
-SEdDQkZFREVHR0hJS09NTlBMTk1NT1FZXFxVT1NcZ21qZmRlbG91eXdybWlkXFlY
-WmBlZ2VgXlxbXmNlZWlsaGhqaWRZUlJPTUxLT0xKTVBPTEpLS0xJSktLS09NSktO
-TU1KS0xLS0xMTU9OTU1NTE1NTkxPTEhLTExJS01MTE5NTExLTlBPTkxMUVBQT1NS
-UE9QU1RSTlFRUFFPUU5PT01RVFRUUVBUUU9QU1RVVFNRUlJSUU9TU1JQT1JUVVZT
-Tk1ST1FUUlNRUVJVVlRSVlZVVVFSUVJVVVNVUlZXVlRSUlJSVVdPU1VWVldaVlVW
-VlVXWFlWVldXVlVVWFhZVVRWVVVUVlRRUlJRUVFRUlNWV1ZUVFNZWVdZWVhYW1pd
-XV5fXVlZWF1dYWFgYWNgX11cW11dXVxdXFhZWVtZWFRTVFVYWWGZvcjR2dzg4+Xo
-6Onpd3l7eXl2dXl5fHl6eXl5eHl4d3h6eHp6d3l7fHpxZlxUS0VIQkFFR0dCQURB
-QkZJR0BGREdCREVDREZEQkJGQ0VCSkpFREZEQEFARkZFRUNHRUVGSUZCQUNFR0lF
-Q0NFRkRGSExJQ0RISExLSERESEhLTEpKTUxMS0lLSUpNS0hGRUpMTkpLSUxPTE1J
-R0hOUExJS01MSlJUTVBRU1dRWlpVU1JVWFVeW1RRTFNSTVldWllUXV1cW1pYVVpW
-WFxnZGFWUEM9QT5GQDw8Oz5AQDo+PD09OkBDR0JDSUJCPEBGSk9JTVhSWlVWVFBU
-T0tOTmBlW19QV1RSWVVUVVVWVlRWVV5jY1VbXl1dX1xVVV5pY15gYltTUFFUWFxa
-V1JQV1NVXFpYTUlIQT87OTY9Rk9RVFxXVlRRWVVUU1laWVtZV1NSWlhXV1pcW11d
-WFVPTlFUWF1bUlZXWFtWVlpbXVdWVFJRUE1SUlBSTlBQU1NRT1BOTlVSS1BRWFlW
-UVJVUFBRUE5TVVVeWVJWVllYW11aXmJdWF9bVFReeZWjpaanq6alo56ak5Ocn5+g
-oaKoq6SfqaWCYVZhcWZQR0pESUQ/OjtDQj06OjtBQ0JDQUI+Ozo7O0JGQ0FBR0VB
-Oz1AQkNARERDRUk/QERITEhFTTs2NTY2NTQzMzc4Ojc1ODg9P0RCRUpNSlJOT1lY
-XFROV1dgX1pVUFFUVlNPVVVRTk1UVlNZYVZQU1ZWUk1MTE5OUE9RUE5OTVBMRUFD
-R0lIREVHRUJBQkA7QkhJQ0NBREVDR0VBQz9BQUI/P0A8PDc4ODc4Nzg4NjQ3Ojs6
-OTY3NjE0Nzc2MzUyMjM3NDU3NDY4NDU0MjQ2OTc5NjczNDU2NzQ0MjY5NjQ0Njg2
-NzQzNTQ3Nzg2ODc0NTQ0MTc1ODc9PkBDQ0FAQj88Pz4/PDg5NTo9Pz5GTE9SU1RV
-VlpYW1tbV1dXV1ZVU1VXVlpZWFlbXV1cXlpeY2FlZmZoaGdqbnJzdXV1en59e3yB
-hYeIiYmLiYiGhYaEg4eHh4ODgYF9e31+gH95e4F7fH1+fXt1dHh7fH17fH1/f357
-foGCg4WIiYeGg4OCgX56dHFraWllXVNIRkRFSkhIRUJGSE9YVlJLSEdNVVZZUU5R
-VFdaWFRUWFtfYF5fZm96e3Vua2diWVpdZWZmYl5eXVlbXFtaXFtcWFhcXFhRTExL
-TkxMTkxMTU5NT0xLTUlKSUpMTUtMSUpLR0lOT0xLSUtQTktNS0xKSkxOSk1MS0hK
-T1FNTk1OTkxOTk9OTE1NUE1OTlJRUk9PTk9TUlBQUVNSU1NUU09QUlFRUVFTUVJT
-UFFUUlBQTlJSU1BST09QT09QT1JSUlVUV1RSUFVWWFZTU1BQUVVWVVhXVVRRVFRZ
-VlRUVVRUVVVXWFdZV1dUVVRTVVRVUlNUVVhYV1hXVldYV1daV1RXV1ZWVldWUlJS
-U1ZYV1VRU1dYWVdWV1VXV1dVU1ZYWVtdXlpZV1dbWVtcXl9hXmBgYF5cXlxgXV9e
-W1lbWFVXWldWVFVXaai/ydLa3uHj5ufo6Ol5e3l3d3d3eHh5e3p6eXh3eXp4e3h4
-d3h4e319eG5hVU1GSUdGQkZJRENAREdHR0RJSEdKQ0RIRUdER0hAQEFERkZFUUxA
-QUFCQkdGR0ZFREVGREZDRERERERFRERDRERFSEhIRkZGQ0VFR0ZFSUlFRklORkZI
-SEhKR0ZJSUxOSkFFR0xKSUhFS05HRUdJSUpMS0xMT01HTU9OU1JRVlJVUlJTU1VZ
-VFVWVFdRU1VQXWRhX1dhX1hWV1pSXV5bXWRiZF1TS0NAPUJEQjo+Ozo+PUA7Ozw8
-PkFCQkFEQ0dBSk1JSElNWE9aV1ZXUVJMSU5OXWFYV05VT0lOTFFZWl9fYFxZXWJk
-XVVZWVxaVldZXmdaVltjXVVRVFZaXllWVU9PUVJZW1pST0tHPjs4Njc9S1JSUVFS
-UVRWU1JVU1xgXVpYV1lZXFpXWFpbWl9bWFNRV1VUU1NSVldZWlhSV1tdWVJTV1dW
-T05QTlVNTlVUT1NRUEpQUFRRS01WVFNRVVRWUE9RTU5WXVVNT1NZVFhbWlZXWFhX
-XVpYW1Vmfo6cpaiqrKWepJyTk5+inJqdn6app6Opn39hXXB0Yk1LR0A8P0BAQEBD
-Pz46PUJEQUJAQz04Nzo8PUJCPT5CRkQ8OT5FR0lEQ0JLT0A9RklNS0NAOTY4NzY2
-NzY3NzY2Njg5Pz05Oj0/R0hKUlFVVFBaUlBYWV9bV1VVUVFWVlNST09NT1ZWVlZZ
-VlJUVlhWTExLSktNVFRMTFBLXGVGR0NDRkhIRkVCQkVCPT9BQkFFRERBQ0JDRUdF
-QkNDQUA+QTw7PDw6NzU6ODk1MzQ2NDYyNTk3NjIzNzg2MzU0MzY0NTY2NTY2NDU1
-NTg4Njc2MDYvNjc2NDQyNDQzNDU2ODo3NjYzNTg4NjY2MTQwMzY7NT47PT4/QUBB
-RURCQkA+PD8+Ojs5ODk5OkBJTU5SU1VWWFtbWldYWFhXVVNTVlJWV1dWV1pcXF1c
-YV9iZGVnamxra21ucnV3e3l8fn15dYaJi42Ji4+Jh4eKiYeGhoiHh4iEhoOAgYB/
-fn57fH57eXh4enx3eXt7enp4e318fH9+f4CEhYaHiIeKh4ODgH53b29ramhoYVdP
-SkpKS0hERURLUltcVExISlFbX1pQSUdOWF5aU05QXmFdUE1VZnBvZ2RrdHJoX2Np
-aGJhYVhZV1ZWVVZWV1hWUlNYWVZRTU1MTEpMSkhESk5PTU5NT1BLTE5KR0tMSkdM
-TExJTEtISEtMSkxJSk1PUE5NTktLSklMTExLS0pOUFFPTk5QTU1OUUxLTlNRTk1O
-T1FRU1JRU1JRVFNSUlFRUlNRUVFQTVFPTlBQT09PUFNTUlJQUE5RUlJSU1FTV1RT
-UVJTU1RXV1JQU1RUVFNSVFZVVVVWU1RVV1dVVVdWVlZWV1ZaWlhWVlZWWFlXVFZW
-V1dWWFpbWFpaW1ZWU1VWVVZYV1RSUFNYWlhYVFZWVldZW1lZV1laV1paWVhYV1ha
-XF1dWVlcWVpdYGNjY2NhXl5eX2BdXFxeXltZWFZYWVdWV1ljkr/L09re4ePn5ufp
-6Xp4eXh4e3h3dXV5e3h2fHl5eXd4eHt2dnN2eXd3cGRUSUZDQUVBQkZJRDtBQ0RF
-RkdGRUJAREZHREhGR0VEREVGR0ZGSURCQUZIRUZLTEpISUlEQ0VDRUVFREVEQkRH
-RENHREdGREA/Q0ZIRUVERERGREhDQ0dHR0ZGR0dGR0tKR0dGR0dERkdJS0dHSElK
-SEZERktOTUdFSkxPT09STlBUVVVUUFRRUVdUVlJSWldaYF9kWVxYU1ZSWlBUVVdY
-XVteW1JQSkI/QEM+PDw5Oz49QDw7OTs/RENDQURBR0hNV1JRT1VfUVVRUFNLV1BM
-UkxWWVFaUVVXT05PVFtYX11iXF1bXWBbV1lZWVpWWlRTVlNUV1pdWVFXWVxfXl1X
-TUxOU1NZWFdRTEtFOzk6OjxGTVJRUlZWVVhYVVVWWVdbYF9bWVlbXV1ZV1lYXVxW
-UldYUVBRUFNXVllXV1JQWVdXWVJaWVNSUlZYVE1VV09VT1JRUFZVUVtZTE5QTVJV
-UVJSUFFWUlRZVE9PWFdWWF1bU1NWW1lbVVVbU1ZlepGjp6enp6alpJibnqOhnaCg
-pqympaqegmRodXVkT0lKQz09PT08QkNAPDo+REdEREZDQz07Ozs+PT08PD1BRUA8
-PT9AQEJBQElORT5BSExKQz08OjQ0MjQ2NjQ1NDU3NTk9PUI8PkBARktVT1RZUVdV
-U1pUXVlWV1NTV1RSVFJLTU1OU1ZYVlVUT1JbWVRQUE1KR0pOT1ZVTklARkdIRkZH
-SUhFRENEQkVDQD9ERERESERGSEVFRURCQ0JBQD0+O0FDPzo5Ozc5ODg2Njc2Ozs5
-ODg4NTQ0NDYzNDU1NDk0MDQ1Mzc1NDMzMzQ1NTg6NTc0NTg8Njg0NDU0NTg3NzU3
-NjY2NjY7NjY3NTY2NjY7Ojg5Oj5BQkFCRUVAQD9AQD9APzo4Njc6OkBHTlBQVFla
-WVhXWFdaWlxXVlNUWFpeWFVZV1pZXFxcY2FiY2NqbW5ucXNydHt8e3t9gYSEiYmM
-ipCPj46LiouKiomHhoeFhYaFg4ODgYGCfXt6eHt7d3R4eXp8en18fH57eXuAf4GF
-hIeHiImLi4uHgoB+fXdzc3BvbGllYVpTT01OSkZFR0pRVFVQS0tPUVhZVU5HRUpV
-X1tTT1NaXVpSS1JbXV1cXGd4fnxxbGtqZ2BaVlFTUlRTUlNTU1ROTFJWWVNPS09M
-Sk5QTE1KS0pNTUxKUExMTEpISUtLS0tMSk1OSkhJSkpMTU9NTUxMTU1NS0xLSExK
-SEpLS09PTk1RUVJPSk1NT1BOUFFPT09NUVNSUVFQUk5OUVNTUlFRUVFRUFJPT09N
-TlBQT1FQUVJSUlBTVVNQUVNUUVFUU1NSUVJRUlNUUlBRU1VSU1NVVVZWVVRVU1NX
-VlVWVldYV1ZVVlZYWVpYWFVWWVdXV1hWVlpcW1tYVlhWWFdWVFRWVVNVVVVVVFZU
-VVZVV1ZXVVZcWlhZWlpaW1paWFhWVVhbWl1dW1pbWlpcXl9fX2BeXVteW1pbXVlZ
-WltbVlVXWV5bXmGQv8vU2t/i5Obn6ejpeXh2dnp6eHl3eHp3eHl4d3h4eXl8fXp2
-d3d4dnNtYFFKRkRHR0hDRklFPD08PD8+QEVFRkRCRERGR0ZHSkhFRD9CQkNFSUZE
-Q0RIRktMSklJRkhGR0ZFR0hGQ0JDQENDQEJCRklGRkRDQ0NFQ0JJR0NDREZHRkZI
-SEZFRUZIREdIRURERkdGR0NFRkhGSEpEQ0RKTUpLRkVQTVFMRk5LTFRTU1ZPVlBM
-WlRTVFReWVdVVFtVU1RQVVNYUE9SV1xeXFpaVE9QSkE9Qj89Ozs6OTo7Oj09Oj5F
-QkVERT9BQkxRUU9RU1pMVE9NVUpWU0tRSlBWTFRPVFBSW1VbX1RZWF5aXlVYWVRP
-WFxeXlpYVU9TVlFTWmJcVVpbXV5hX2FbUFBUVVZXXlNJUk1APTc5PD9EV1tUUldY
-WVdbV1NVVFZYXFhYV1daWVNWWVlaWFdVWVVVUlFRU1lXXFpXVFJUVVhXV1RRTU5R
-UlNPSU5LTlFMS1JSVVRSV1hNSE5QUFFPTk9UU1RWU1VTUEtWV1NbXVlZXFpWWVxU
-UVlVVldkhJujp6qsp6Whn52lo5+doKWmp6CcpqKKbHB/d2JPTU9DPj49PTs+QUA8
-Ojw+QEFDP0NDQTw7Ojw8PD88QkNBPj07PUBBQUJFQUlDPUBGSEdJRjw5ODY4Nzc0
-NDI0NTM0Nzk7PD5BQkVHTFFNUE9RWlRSU1NaVFdcWVpYVVZVU1BNTE5VWFdWVlVQ
-U1pcWFVRTktKTUxNT05NSkZLTEhHRERDRUNEQ0VHRktHREVHRkVIRkZERUJEQ0RE
-REdFQzw5Oj9BPjo2ODc2NjY1NDc0Njc3ODQ3MzQ1NzgzNzQ1NzY1NzY2NTIyNDEz
-MjQzNDU4OjQzNzg3OjY1Njg1MzM0NTc1NTU5Nzg2Njc4NzY2Njg5Nzo9P0JERERG
-RERGQkFAPT4+PDg3OTk+PUBGTVNUVlldW1tZWFlaWFhYVFVUWFpbWVtcW1pbXV9i
-YWJlY2dqa2lvcnZ3eHp7fYOEhoSHi4mIjJCMjI+QjIyNjIqIjIiGhYN/gYKAgICB
-fX1+enx6fXp2eHx5fXx+fXx3en6AgIWGhoaGhoeIhoiCfn13d3R0c3RxbWxpamBc
-WFdXUlNUWllZV1ZWVlZUU1NRT01NTlNVV1ZYVlNUVE9SUlJOT1hcZGlucHd7eHBl
-XlhWVVNSTkxMUFBSUlJPTlJZWFNMSktKS0xKTk9PS0lJSk1NTk1KT09OSkpOTEtM
-Sk5LTkxMSUtMSkxMS0xNS0xIS0xMS0lLTEtKTExPU1RSUE5PS1BNUVFQUFBOUU9R
-UVFNTlBSUVJQUFBPT09QVFBSUk9QT09QUFJOT09PT1FSU1ZSUE9QUVJSU1JSU1BP
-UlNSU1BRTlBQUVJRUlBSU1RSU1RSVFVSUlNWU1dWWFhVVVlVVVxYVlhZWVlVV1VX
-WVlYWVlYVldVWltaWlRVVFRXWVdWVVZWVFNUV1VVVVRZV1hWWFhYWVlZV1hYWVla
-WltdXltaV1xmX2BfXlxdXWBdXVxdX1hYVlZXWFpaXV1eYpG+y9Ta3+Hk5+fp6ep3
-dnZ4d3d4dnd4eXp6eXd4eH54eHd5eXl5end2cGlcUEdGRERESEVJRUA9Oz09PTw+
-Q0dCRERDREdHSERJSUZFQkRERkdKSEZFRUdGSEhHRUlGREZGSUlGR0hFRkJDQkRE
-RUNGRUVDQ0NBRUFEQkNAPkFERUVGSUdFRUNCQUVGSUZHRkdHQ0VFRUVGRkpJS0dE
-RkZLS0xHR0xNUVROS05LUU1LVVRXVU5VUU5UUVpUUlFOV1RUWlhbWFlVU1pcXl1h
-WlpXVFFPS0Q/Pjs3Ozs4Ojg/OTg8QEI/QUFEQ0ZDSU9LUFBRV0pRUVFYTFNPTUtL
-TVVLUUxPUFVjWF5iVVxZXVVfWlRUVlFVXGNhXFdVVFRXVVZcYllUU1VZWFhfZGBZ
-WVtcVVdYUk1PTEVAQDo2NkRPUlRUXFxWVFtdWFNSVVdYVlpcXGBdV1xeWFpdXVpW
-WVZTVlJQVldZYFpSVlRWXVxcWFFSS05TUlRSUFBPTktMTU5TTVJWUU1JUFRQSkxN
-TVJSUVBOUFVSTk9TUldXWV1cW1hdXlVRVlVWWF94kJyhpainpaKbkpykpaamqaus
-nJino4x3e4R2Y1FOUExEQkE/Qj9AQD08Ojs/QUNFQEJAPTo5PU9CQD1BRkREPjxB
-QENDRUJCQD5AQUhHTExEPjs2NTU2OTU1Ojc0MjM2NzU7PT0+QkNKUk5RU1NWT1dX
-VlpYWF1bYGBaV1NOTlBSU1ZWVVJSUk9RWVlYVlhOSklJTFFPS0tMSEVERUdEQT9B
-RElKSUdLSUpFQEBBR0ZGRUFARENDQ0JDRENBREFAQT08PDo5Nzg3OTo2Njc3ODc3
-Ojc2NTU0MzU2NzI1NTY0NzU0MjQ0MzEzNjQzMzQ1NzQ2NTM0Njc2Nzc2NDM0MzI2
-MzY3Njg6Nzo4PTg6NTc4Nzk8QUNDQ0RCQz08PDw8PT47NzY7Ozs7QEJFTVNUVlRU
-V1dXV1RVV1hXV1hWVlhaWltaWVtcYGBlZWZnZmtqbXFzdXJ1en+AhYKEhYaKiYyM
-jo2OkI+PjY+OjoyMjI2Kh4qHgoCAh4F9e3t8enh6e3d4eXt+enp8fXt9gIODhIaH
-iImJiYiHhoOAfHh2dnR0dHRxb2lraWdnY2JiXmFjZGRjZGZoZmFeW1laXF5cWVRY
-YGVhV1FRWVxVTklNWGRaVlpganWBeGhdWVVTUU9OUE1LT01QU1FST1RYWFNRTk5L
-SklKTE1KS0tJSkxLSkpMTEpKTE1MS0xKSk9PTk1LTEhISUpOS0hNTExJSUlLT0xL
-S01LTU9PTlJRVlJPTE5RTkxMTk5OUk9MTFJRUVJQUVFTT05OTU5RTlFRUVBRT01Q
-UVJVU1JQUVFTU1JPTlBST1JRUFRVVVJOT1FSU1JSUFBPUFFRUlJTUVFVVFZXVVdU
-VVVVVVZXVlZVVVRYX1pXWV5cWFZYV1dYV1pZWFZZWVlaWVhYVldXVVRVVFNRU1dV
-UlRVV1ZUV1tZWFpYWFlaW1paWFpZW1xbWlteYFtYW15dYWFfXWBeWl5bW1taWVZY
-WVhWV1ZYWVxkj77M1Nvf4uTn6Ojo6Xt9e3l5d3d5enZ3eHd3d3h5dnt+enh4eXZ3
-eHVtZFdLREdERkVISUZFQEA/Pzw9Ozw9Q0VEQ0dGRkZJSkNDQz5DQ0dFRUdISEhH
-RUhHR0hHR0tLR0hHSEdHR0ZEQ0REQ0JERkNCQkNEQ0FFQD0+P0JEQkJGR0pIRkVE
-QkVFQ0RFRUZJS0pIQ0RDRkdLSUdLSkZIS01GQkRFSUpPUUpJSEdNTE1PTVRXUFVT
-TVJRWFZXUk9YWFZhWVxaWllWW1lcW2NdX1dRT1RUTUQ9Pjg4Nzc3Nzk6Ozs5PT9D
-REhDREBESEZNTk9VTVdSUVhPUExHSkpNWE9VS09TUWFaX2ZYW1ZbVVxeWldYV1NY
-XGBbWV1WU1VYW15lXVpUWVpXWltfW1tfY2FVUVReUE5RSkM8ODQ1PEZMV1NUWFJU
-WlxZWFJPUlRWXGBfXmBZW1pWVlZVVVhZUFFcV1JSU1hcWFZdWFVaWmBcVlFSUFRV
-VVlUUVBPS09QUFRPUVRQSkpNT1NRTk1RUlhUSktTV1dSS1BQUlRVW15cXlpYUlRW
-WVtaWmh6kaGqpaSkoJGLmqOoqaquraSbmaWkiXuDg29YUFJRT0ZAO0BBQENBQ0I/
-PT9APkBAQj47NTg9REZJQUBFR0VBO0BCQ0pMQUJCQ0JCQkRQUkc/OjY3NjY0Nzc5
-NjQ1Nzc2ODs9PUFGSUtQUFdQTltOWFhcW1pWWFZcX1xXUVJRU1FRVFhWVFRTT01Q
-UlZRTEpPTUtNUFBNS0tGRERFQ0REQkNHR0hGSklJR0ZBQURDQ0VBPz0/QEJBPz1E
-Q0BCQD1AOzg8PDw6OTg7OTk3OTc0NDY0NDY2NTY1NTY3NTU2NzI2NzY0NDU1NjQ2
-OTY1NDI2ODIzMzQ3NTM1Nzg4NTU2NjM1NTY1NTY8ODY5ODs5NTc4ODtAQUNGQ0JB
-QD08Ojo7PDw6Njg7Ozo9REdKTlNUVlhTVlZYWFhZWldZWVRSVVZXVlhZWVpXXWFl
-ZmVnbGxxcXV2d3p+gX+ChYWGg4mQjJKRkJKSkpSUkpGPkI6PjoqKjo2EgoB9fX5+
-fn57eXd6fXl5eXx+fX59fYGFg4aHio2KiYiHioiDgYB6dHd4d3Z0cnBubm9wcGto
-ZmZnaGlrbW5ub3FydHJycHJ2dnRxbG1tdHNtZ2dpaWZaU1JbYldPVmBlZHOAdWBV
-VFJTU1FOUVBPTk9TU09NS09UUlBRTUlJS0pHSktLTUpKTE5MS0pPUFFNTk5NSk1K
-SktLR0hJSUlLS0xLTktLTk5PTktMTkpNTEtNTExSTE1QUE5NT09QUE5QT09QUlBQ
-T1BQUU5PT05NUVFNTVBPT09PT01OT09QT1JUVVBQUVBOVFNRTU5SUlNQUFNUU1FS
-U1VSVFFRU1FSVFRTU1BRU1RUUFRTVldTU1ZUVldXWFZUV1dZWFlXWFpaWlpYWFdV
-V1dZWVpZV1ZVV1hXVVdWV1pZVFNSVVVWVVJVVlhWWVlZVlVVVVNYWVlaWFlYW1lZ
-XFtcXF5bXV5fXmBeXF1aWWBdWVlZWFZaWVhYVldUV2KQvszU29/j5ebo6erqenl6
-eXl4e3l5d3h3eHl4d3Z1eHl0dXt4dXJxcW5eU09LSUhFSEhISUVCPUE+Pjs9PD1A
-SEhHR0ZGREVGSEhFQkJAQURHSU1ISEhKRUZHQ0VHRkZHRUVGR0dFQ0ZGRURCQ0FE
-Q0JHR0NCQkJBOz1AQ0JDREVFREVJRURDRUNCQ0JCQ0hIR0ZERkVCR0pMSk5OTEtM
-T0lKRkVGSUpPSEtNS1NPSkxOTlFNVFJOUkxWXF1XUFZYVV9XVlhcXVtYVVlbYWJj
-WVJQVFVNRTo6OTg7ODg4Ozs6PDo9Ozw/QD5CQ0JJRkdJTU9LU1BLUU9VT0tPSk5b
-VFtTUlJOWlZbZVVWVFtXX1tcVlZWUVVYV1pYXFVVVldbYmNdXFlaWFpWWV1XVlxf
-XFdZWllTWFROSEA2Nzk+QklSWFBRUlZdYFpWUkxQVFJcW11cW1lVVVdUV1dYXVpX
-VlxdVVFTW1lYXVxYWFVdXFdZUlVUT09RV1ZKSkxMTVVTUU1OUFFMS01KT1JUU1NU
-VlBKS1NdVVFOTlRVUlVYYF9YWVZWVlNUWV1YW2N+laOkpqOek5Kco6essLGsp5+j
-qaOOhIh7YlJOUFJQRDw+PTxARENBQEA8P0I6Oz5FQ0A7Oj1GRklEPURDQkJAPUFI
-S0M/QkNEQz4+QkZKS0Y8OTk1NzU2NzQ2NTQ0NjU2Oj1AQkpOUVZWWFFTU1BeWVhb
-XltYWl1fWVNRUFBWU09aVltYUE9LS1BRU1JMUlVVUk1MUU1NSEZEREZISERGSEdJ
-S01KR0ZGRENBR0lIR0tHQ0JEQkE/QD9BQT49Ozw8Pjw9Nzo5OTk5ODY2Njg3NTMy
-NjY3NTc3ODg2NzU2Njc0OTg2Nzc2NTQ3NDQ0NTU1ODc2Njk2Nzk6Ozs7Ojk4NzY2
-OTk4OT03NTY5OTU1ODc5PD1GSUdIQUJBQj49Pj49Ozc1ODg8P0JCQENITlRVVFNZ
-WFhYVVZWWVpYVFhUUlJWWVhcXFxZXWRlZGVla3B0dnV3e3t+fH6Cg4SGiIuPkZKP
-kJOUl5aUkY+RjoyLjoyMiYeHhISAf4F/eXx6eXh8e3x8eXp8fH5+f4KGhoeKjYqJ
-iouIhoR9fHp5eHl5d3d2dnRycnJzcm9tamtrbW1vcHFydHp7fHx7e4CDhYGBgoeJ
-hYmEg4KCfXZua2VjXlVYX11YYXd9b2BXUE9RUVVSVFJVUFFTVU9MTE1QUE9PUEpI
-SExLT05PUExPUE9NTEpJTU5OT0tKSUpLS0tLR0pKTUtKSUhKRkxNT09NTE1PUExJ
-S1BQTk9OUFJRT05NT1FTUlRTVU9OTFBPTk5PUFBQUVNQUFBPT1JQUk9QUVBOTk9R
-UFRUVVJQTk9RUE9RUFFUUVNSVFBSUlFTU1FUU1NPUFJTUlBTVVRTUlRVUlNUWFlZ
-WFZYV1ZWU1JYVFZWWFVZWVtZWVhYWFpZWFhaW1hXVllYWFZXVlZWV1haV1NVV1ZV
-VVVVWFdXVldWVVRVW1xZWFlZWVpZXVxbWl9fXVxbXl9eX15dXWBeXFxbWlpbWFdT
-U1hXVlhZYZW/zNPb3+Ll5+jo6up7enl3e32FfXx7dnd4eXl2dnl4dHZ2dHV0enRv
-ZVpOTk1IRkNIS09JSEQ7PUNAQDs8PT5CSEVERUNFQ0FHSkVFQ0RFRkpKSEdFSUlJ
-SUpHRkhGRklHR0ZIRURDR0dFQkFBQ0FCRENBQkFCPkBAPUNBQkBCQEBCRkdGRkFB
-QERDQkRCRUdERUZGRkRARUdJTlBNTU9PSUhHRkpMR05ISU1NU09LUk1NUUpKTElP
-Tk9RUFBSWl1TVlNOVVNYV1hWWV9gZmBXVlNUVVNIPjg5ODg5ODs5ODo7Oj06Ozs8
-PUFBQEhDR05RUk1OT0tSUFNOT1ROU19SW1VXU0tWVFZgWF5WWVthW1hWVFtaW1hW
-W1paWVZWWFdZW15YWltYYFtaWlZWV1lcXV5dVlFVVVNNRjs3ODw6QE1YT09WWGBg
-WFVZVVFQVFxdXFxYWVVVWFlcX1xdXFteWldTUFRWU1VZWFlfWlpZWlhUUVFVVVRQ
-UExMS05NUVVPS0pMTlFMTUpNTlpUTVBUUFRRUlxVUVlZVFZZWFpiY11aX1lTUVJa
-Y1dXWWqEmaKnp6Sem6Omqa6xta+rpquro5KLinJeT09MTktEQj09P0BAPT47Ojo8
-PT88Pj1CPTk7N0A+P0A8QkM/R0E+O0U/PT08Pj4/Oz1FRkVIRT05NzUzNDYyMjU0
-NDc3Njc8PDw+SFBaUEtPTlVTUFpUVlZVUVNVXGJZUU5MUVZTT09RVFJPSktPUlFP
-UFNWXFpTTk5RTk1LRkVCRklKRkRCQ0ZMTEtJRUZFREFBQkRER0NBQUNDQD0+QUFB
-QT87OTw7Oz07Ojs3NjU0NjY3ODk2NjI0NDM4NDY1NTU2NDQ1NDU2OTY1NjczMTM3
-NjU4MzEzNjc1Njk8QUlLS0dEQT89Ojs8PkA8Oz88PD09Ozo4Oz0+RUhKS0ZDRERC
-QUBAPTs6OTs2ODo6Oz1CRUVLUVJUU1VZWlpZWV1ZWlhYWFZWV1dZXFxbYGJfYWNk
-Z2hpcXN2d3x8eXp8gIGBgoKEiIuOkZGPkZKSko+QjIyOjo2MioqHi4SAgIGChIB6
-eXp7e319fHt5ent7fICBhYaIiYuKioqJi4uGgn57enp6fHp4eXd1dXR0cXN1c3Z0
-cW1sb3FycnJ1eHp7e3x+foKEh4uMj5CQkpGPj5GRioeDfnZxaWVjW1RabYeHc2BX
-U1JSUVBQU1NRUVhcWE1KS01PT1BQT0xQTkxLTEpKUE5PUU9PTklLTkxKTE1NT1BK
-SUpJS0xLSUhNS0pNSktKTExOTE1OUFBOTU9RTk1QTU5NTExOUFNQUVJRUlBQTU5O
-TU9SUE5NUU9OT09PT1FRUU9PT1FOUE1OUVNSUlBSUVFRUlBQT1NWVFNQVFBRVFVS
-UlNWUFNTU1NUU1FSUlFVWFhcVlRVVldUU1VUVFVVVlVUVlZUVVZWWVtZWldaW1lb
-XFxeXFpaV1lYV1VUVVVWV1hXVlVTVFdVVldVV1dYWltZW1pZWFZZWVZaXFpcW1tb
-XVxdXVxcX11bXV5eXF1ZWVxaWVtaV1dZWFhXWVdekb/N1Nve4uTn5+nq63l3d3d3
-e4F5enl4d3Z4d3l2dXRzdXV2dnh5d21mVU1JR0hGRkVHREhCPTw5PDs9Oz07O0JF
-RkVAQUBCQ0VEREVFRktKS0pLSUhITEpHSElMSElLSElHR0lJR0dJR0VEREFCRkRG
-RkNBPz9CQkRDQ0A9QEFDQERCRkVERkRBPD4+QUBBQ0RHSEhHQ0RCRkdJSE1NRURD
-RUdGS05FSU1MUktOUUlJSUZMSktPSktOTE9MUVRWXVZRUE1SUFpVU1VcX2ZkXVtX
-VlJSUExDQTs9Ozs6ODU2NDg6OzpAQj1APz07P0FJTE5YTFBJSU1KUlBPUFBXYE9T
-UldWUFpeV19UXFlgXl5aVldVWVxhXVxXVmBcVlhYUFBaWFdVWFldWltcWldUUVJV
-XlxVUU9SUlJKPkA8OTY6SlJZV1ZZWltYW1lTVlFSXFlcXVtcWl1cXFlZXFtcXl5a
-V1RXW1hRV1dWXGBeV1dhWk9MTVlVUlJQUUpIS0tMTVJNT0xRVlZNTlNSVE9QTVFR
-UVdRUFBPVl9cW1pXVlVWXFldXFhXVVljVlRYXnCMnKSpqJ+boKWnrK+zr6ijpayl
-k42LeFtSUE1KR0RCPTc6PDs5Njw5Ojs7PDw8PTs8PTg8PUFBQkA9P0JERD48P0A9
-QEA+Q0I+QEFGREpHQDo1NjQ1NzYzMzQ3Njc5ODk7Oj5HSE5QT1VQU1BRWlNdVlNV
-UlJfY1pTS0tTVVRPTFJVVUtMTk1SVVBPUlFWWFRPT1FQTU5KRkNGSUdJRkVESktI
-SEVDQUFCPz9AQ0REQ0BAQUA+QUFBQkJAPjs7Ozo6Ozk6Ozo3Nzg5Nzk3Njg5OTQ2
-NTMzNTE1NTU3NjU1NTY3NTY0Mzc0NTQ0NTQ1Njc6Ozk5PkZPVFNUVFVRT01PSkpL
-S05MS0xKSkdFRkVERUlISElJSkZERUZFRUJCPjs6OTs5ODs+Q0BDQURLVVhYVVtb
-WlpaW1laXFlZV1taWFxdWVheYWNlZ2doaWxxcnN2enl4e3x8f4KBgIGIiomLjpKT
-lJGSkZKVko+Pjo2Oi4yKiIOAfnl6en18e3x8eXx7e318fHx/gIKDhouLjYyKio2J
-iYmEgn55ent7enp4dnV0cXh1c3Z1dHZ0cnBxcnJ1dnZ4e3t8fX59gIOGiIyMjo6O
-jpCRlJKYlJKOi4iDfXRqYmBrg5aIallWUVFRUE9SU1FPUlddVk5KTE1STU5MSk9M
-S0xNTUpMTU1NTExQUE5LS0tNTExLTE1NTVBNTkxLS05MSEtMTUxMSkxNTU1NTk1N
-TE5NTU5NTFBRTU1MTk9SUVJQUFVQTlBUUVNRUlBQT09QT01QUVBQTU1MT1ZST09R
-UE9RUVBRU1FQUVBQUVJUU1FQUE5QUFFSU1NUUlNUT1RUVFNTVFNVV1VYU1FbWVdV
-UlNUVlpYVlJUVVhVWFhZWVxdWFlcWVxfW1lZWlxbWFhYVlZWVVZYWFZTV1VUU1JU
-VVRUVFdYW1pYWVtXWlpYWlpbXV1ZXV1bXVxfX1xbW1tcW1xdXFlXWFhYWllUVVdV
-VFNVWV+Wv8vU2t/i5Obo6ejqd3d4d3V1eHd5fHl4eHh4eHl3eXh0dXl3eXh0bF9X
-S0lER0dGRkRIREdBPT06Pj49PD09Q0RFREJCRERFQkBEQEVERklJSUlJSUlISUZH
-S0xLS0lLS0ZGSUtHSUpKSEVDQkNFQ0dFQUFFQURAQUM/Q0JCPkBCQEFCQUVGQT8+
-QEFAQT5AQ0ZESElFRENERkVFR01KSEVFQkFHTUtOTUlOT0tOSkhPS0xLSlNOTE9L
-TExQVlhcWFBOSktLWFpbXVliYWNaWlZWUEpOU1BCQkE7Ojk3Njg2OTY4OT8+Pjo7
-QT5APkRKSVVGR0VGT0pXTEVOUlhhUk9NVVdLWV5WXFZbWV9cWl1aXFhYWWNaW1dW
-XVNTW15YWV1ZWVVTVV5dXl9YVVNMTlFbW1RTTlFSU1NGPTg3Ojg8Q1VdU1ZXU1Za
-WVZXVVNTV1VSVFtcXWBhW1taWl1cWFpXVVtZVlJTVlNWWFtcXFlaU09SVVZXU1NS
-SUlKSEpJTE9PTFVUVE9MUlNKS0hJS1FOT09ST1FZW11fVltZUlZZV1ZUU1RRVV1a
-V1dTW3eUoaWloZSao6mtsLCxq6Ckq6eRi4t8Y1VUTktJR0E7PDc4Ozo7PDo2OT9D
-Q0A/QUFATUxAP0FAPjw+RkpHPT9CQz89Pz9CQ0FASkVGSkg/PDc3NTY0NDQzNDU5
-Nzg8Ojw5OkFFTU1MVk5TVFVbV2BaVlRQVFtbVlRSUFJUVlZRUFNWVVJTUVRYVVFM
-UVFPUEpKTExPT1BJQ0hEQUJAREZGR0hJRUVBQUNCQEJDRUZDQT4/QkBAPT09QEBB
-Ozw7PDs8Ojk4ODc3NTY0NTQ1NzU2NzU0NDQ0ODU3NTc0NzU1NTg5NTMyNjQ1Njk2
-NDQ1Njs6PENLVlxYVVhcWlpdX19gYF9gXl9cXl9aV1ZUUUxLTE1LSEZKSUlIS0lG
-REA9OTY2OTk9PD1CRUJCQUZLUlhaWVtbXF1cWVhXW1hVWVxZWllcYWBfYF9jZ21r
-bnB0c3h7fnx7fH2BhISCh4iKioyNkZOQj4+RlJGQkI6Oio2Mi4uLhIJ/fXt5eXl5
-fn18fH57fH1/fn9/gYWGiIyKjouLiYmKh4WFgX17fX5+fHh1dXR1d3l5eHZ1eHp4
-dnV0dXR3d3h5e317f4OFhYWIiouJiI2Ojo+RlJeVlZSUk5KRjYd9d3qHjohvWVNQ
-T09RUlFQUE9PU1piX1NMTk9OSUlJS0pKTlBOTEpKSEdNV0tLSEtLTk1MTk5MTE1N
-TUtMTU9NTEpLTE1OTk1OTU1QTkxNTkxOTU1KS1FRT1BQTk5JTU5QUFBRT1BPT1JR
-U1JSU1RQUE1NTkxOUlBQUVNQUlVQUE1PUVJQTlFQT1BPUlJSVFFRUVVTUU9TUlRV
-U1JUUlRTUVFSVFNUU1VVVFdYV1RVV1RUVVVVVlVVVFdYW1lZXFlZW1tZXVpbWl1c
-WFtcWlhXWVdXWVhVVVRUVldXV1ZSVVRWWFlYV1RZWVpXVllYXVpbWVpcXFpaW1tb
-XF1bXFtbXl9eXF9gX1tYWVhXWFpWVFZZWlZYX5O/y9Pa3+Hk5efn6el1enh5eXZ4
-enp6e3h3d3d3eXt6f312dHh4dXBrXlVLRT1BQ0RDR0RFS0VEPzo4Oj08Pz1AQUVE
-SUpCQ0RFRUFDRUlHSElMTUlLS0tKSEpJS05NSktKSElJS0tISUlHRkZHR0dDRkZE
-P0JCQUA+QD49QUNAPT5CPz9AP0JAQkE/QkFBQEBDRkVGRURGRUJGRUZHS0tHSkhE
-Q0FHSUlNTE9OUFJNTU9ITU9JUU9KS01TTVZaVlpXU1pPTU9YXV5bV1lWWVdVVV5c
-T0xPU04/ODk6OTs9OTw5PDs5Ojg6NjY5OT49QUlKT0hKREVLTVVOSE1PVF5PTk9S
-UE5aWFNWUldXWVZZYFpZVFxZXFVcWlpbWlpdYFldXllTVVFVVFddXVpVVFRPT1tb
-VVRQT05WWkxDPDY3OTc+TFVPUVxVV19iW1lVT1JUVlZZXVtcZWNaW1hXYl9YXWBe
-W1xWVVNPTldYWFtaVlhYUlJTV1xcWFdRT1JKS0hKTlFMV1ZQVlVRUElMSEpLSk9O
-TFZVWFpTWWJbW1dSU1VYV1JTUk5LUVVaVExUYYSeoqKakJqlqayxsrCspKiuoYqI
-i4dyWlVRTktEPjs7ODg5Pj09PTc3QEVEQEI/Q0E9PDs8PUE+PT9CRklAOkA+QEBB
-QURDQD0/QEdLR0Q/OTU2ODg4NTIzNTY4ODg7Ozs6Pz9ESE9NSlZUWl1bX1pWU1RX
-VlVST09STU5VWlRQVFdUUFJUVVNQS01NT1BOSUZGTE9RUUlISUZFRkZIR0NESERB
-QUJCSEpISEVIQ0JDQT1AQUI8Ojk8PD0/PDs6OTk2Nzk3ODQ3NjUzMzU5OTU6Ojo3
-MzQ2Nzc2NTk0NTY1NDU2NzY3NjY1NjY2Nzc2OEBHVl1hX1pZW19eZGhpa2lucHBt
-bGlnZGRhX1xaVE9RT0hMTUpLTElKSkpGPzk3ODw7PDk5PkJBP0BBQEVJUlpdWllb
-XFtbWlpaWVdXW1xaWV1hYmNiYWRna2xsb290d3h8fH5/foCBf4KEgoaKj5GOjY6P
-kI+OjY2QjY6Mi4mIiIWBfnl3ent6enh4en17e3x+fICBg4OEhoqPjo2NjoyNi4qI
-hoSAfXx+fX1+fXt2eXhzcHV1dnh6fHp4d3Z3dXh1eHh5e358f4ODhYSGh4mJi46Q
-jo6QlJOVl5aUlpmZl5aSjIyHgHJbV1VTUFFRUE9MT05PUVhfXVJKTk5QUk1OUU1O
-TU1MTUpJSUpMSU1LSkxMSkxOTkxLSk5NTEtMTE1MS0tOTktNTk5NTUxOTU1KSU1M
-S0xKSUtPUk9NTEtPT09SUVBRUVJTUFJTVVVUUlNUUE1RUVFSU1NSUU9OUVFRUE9O
-TlBVUVRRUFBOUFFQVFNSVFRUU09QT1FSU1VUUlJRUFJTU1BTU1NSU1ZVVFNSVVlY
-VVNWWldXV1lXV1ZXV1tZW1haW1tZWVpcWFlZWFlYW1paV1dWVlVUVVVXWFdWVFRV
-VFRUV1hXXFhYWlhcW1xYWVpZXVtbXl5aXV5dXV9bWltbXF9dXltaWFdXW1lWV1pb
-WVhfkr/K1Nre4eTm5+jo6XN4eXp4dnZ6en15eXh3dnd2dnl8fnp9eXd1cWheTEtI
-Q0JDSElERERESUdBPkZGPT9BRERFSklHTEdGRkRFQURFR0hHSEtLS0hLTUtOTE9O
-VE5NUFJLSUlLSUlJRkdLSklHRUREQkJDQUE+Pz5FRUA+P0JAPj9DQkI/QUFFQ0BA
-QUJFRERCREZGQUBBRkdHR0VITEhKRkpKRUdIRklLS1BPUUhFSktUV1BTTUdNTlZb
-WltWVlJPVlJUWFVXUlBKT1RZXFtYXl5XT1JRUUZAPDw8Ojo6OTs5Njw8PTo5Ojg9
-Qj1ARkNLQ0ZJRENHU09GSEpLWFBUVVVVSlZUUFpUVldWWFpbVFlWWldUUlhcXV1g
-Yl9eWltYWFBTUVRUWVlbWVdYXFNRV1lXV1JOUFlWUEM8ODY2OEBITFRUWlpaYWFe
-WVNLUE5SU1ddY19mYmJaWVleYF1bYWFVVldTUE1QVFRZVVxaVlVWUlVWV2FaVU5M
-U1BNTU1MUlFMTldVWFVIUVRQU0xLTlFQU1ldWVJSXmBbUFFQVFlZVVNUUE1RUVZS
-Sk9XbomeoJmWnqmwsrCyr6qmrKueiYeOjHVYU1hYTktFPjk4Nzc6Oz08Ojo6Pz1B
-P0M+PT5APEBBPT87QEFFRkI9PDxBQUE9P0BAPEBGSEREREI9Nzc3Nzk4MzM0MjE2
-Ojs2OUE9QURARUlKVU9TWVdbVVlVVFRWU1BPUFFQTlNUT09WWFNTUlBSUUxNS1BP
-UE9KSUtISUxNS0pIRkRGSEhGRUFBQ0RFSk1HSUhDQ0NCP0FBQUBAPkE/Ojw9QT88
-PTo8OTk4ODs5NTM0MzM2NzU1NzMzODk7NjU2NjU3NjU1NDI1Njc1NTQ5OTczNzc3
-OjpBT1tnbGVjYV1dYmRobW5wcHFwcG9sbG1qa2dmY1xbWFZPT05PTExNTElFREE+
-Pzo5Nzw5Oz1BPUFCQ0NGRUdOUlVYV1ZZXVxZXFxeYFpbW1lZW15fYmJiYWVqbWpt
-cG5xdXV3enx6eXx/goGBhYqHiY2NjouOj42QjoyNjYuHiIeFg4R9eXh4eHl1eHZ2
-eXt7e319foCChoeJi42Njo6QjY2Mi4eEgn9/fH59foOEgn99e3h0dnZ0eHt+fHp5
-e3x5dXZ3e3x8f4SGhYOFhoWJioiLi4qNio2OkJSYl5eXmZucnJqTkpSCeWlZU09S
-UFFRUlJQT01RVlZaWlZPUFFPTkxISk1PTk9RTUxIRUlOTk1MTExLT01OSkhLSk1J
-S0pKS0xKSUpLTEtNTE1MS01MTEpLTk5NTUtNSk1NUVBPT0xPUlBRU1FSUlJVUlBR
-UlRWW1VQUE1NT05QTktPT09TU1JRUE9OUVROT1FNT09OT1FSUVJRVVVSUVJRU1VT
-U1NRU1JRUFFQVFVVVlRTVVZVU1NUVVZTU1NUWFhZWVpYWFlYWFhWWFlcW19dXl9c
-W1peW1laW1pWVVZZWltZWlpXV1lWVFVUVFVYVlVWWFVWWl1cXFxcW1tbW11iYWBc
-XlxdYF9eWVpaXFxdXVhWWFlaWVlXV1ZZWF6TvsvT2t7h5OXn6Ojoc3V2eXp3eXx5
-eHt7end2eHl5d3h6e3h0c3VvZllTSkxCQT9CSUVHREdIQkJBRkI9PkVEQ0RKR0dI
-SkZGREVESExNR0lMS0xJSUpMTExNUVBOUVFUbmFNTUdHRUlLSkZJSUdKSEpIREdF
-RkRAPTw+QUE/Pj9APkE/PkE/PUBAPkBAQUFBQUJFRENDREE9R0ZFQkRHRkVHSUhE
-SktHTkhLT0tMSElKTVFWTE9NSU9MUVVSVlNOTEtVUVFTUldNUE1NU1lhV1xaWlRQ
-U1ZQRkI6ODc4OTs7NjUwNTo+Pj85OzxBQEBBQ0tHSkhDR0pRT0ZNTUtYUVJQVFtN
-VFFNVFNYWFRaV1xRVlNZUlFZXmReWWZgWVtbW1lZV1RaWFZZVldbWVZfV1ZXVVZU
-UlRXWVJNRkRAOjU5Pz9EUFVUXl9bXFdWUVJPT1JUVVheW1pfYVxaWlpcW1pZWlVX
-WFJPVFJUVVdUWlxXV19aVVNSV1pVT09XVlFKT0tLTElKT1dXUkxLU1ZWTFJRT1JR
-U1dQTlFTWFZTVlJZW1pVVlNRTE9NUE9LTVFbc42Ympyip66wr6+rpqGkppqLhI+N
-eWVZVlNQTkc/Ojs7PT0/Pj46OTo7PDo9Pzo7OTc3PEBBPz89QERGRj89OzxAP0Y9
-QkZEPT9ERUFBQjw5NjU1MjQ0Njc3Ozk5Oz47Ojw9QkBISkxTSlJZVVZTVlRUUVNT
-U05QTU1PVFRPUVJTUlBLSkxRUVFTVFJQVE9PT0lISk5NSkhGRkZGS01IR0dJTE5L
-RkRHRkFCQT9CQ0NEQ0RCQkI9OD1AQUA5OTs6Nzg5ODc7ODc1NjQ0NDM1Ojc6Nzg5
-Nzk3Ozg2OTY0MjI0MjQ5Nzg4NTY1Nzk8RlVgbW1wamZjXVpiZ2dmbHNzdHV2c3Ny
-cXBubGlmYmBfWlZUUlNQT05NSEhJSEM7OTg5Njs7PTs/QD9ERkZGRUZITlNYV1lZ
-W15eXF9hXFxbXVtcXmNjZmNlZmdrbGttcnF2c3V3eXh6fHx9fn+Bh4mKiIuOjoqM
-jY2NjIqGhoWFhIGAgXx8fHp5eHl5dnZ1eHh4eXuAg4SJiYuNi5CQkIyNjYuJiYOA
-f4CBg4aGg4SEhYN/fXx8fHx6fH5+fX9+fn99e3yAgIB+gIGChYSEhomMioqNi4+R
-kY+OjJOSk5eanJyamZGOlo+LeGJYVlJRU1FQT1FPTlBQUFJXWFRQTk5PT0tLTE5O
-T09MTk1LSElITE1LSktOTUpJSEdJSktKTE1LSUxLSklMS0pMTUxMS0pLTk1LTEtL
-TE1OTVBRUVBPUEtKTU9PUFNSUE1OUFJSUlNSU1VST1FTVFFOTU1RU1JQTk9RUlFR
-UU9MTk9QUlFSUFNSUlJTVVJRUlNSU1NPUVJSVVVUU1RWU1hZU1VXVlVZWVNSWFNS
-VFRWV1ZbWFlcWVlXV1tbXlxdW11eXFlcXl1cXFtYWllbWVdYWVZXWVpYWVtaVlZY
-VlZVVVhZWlhYWVpZWFpeXV9cXl1cXl1eYGFeXl9cWl1bW1lbW1hXWFhZWFRXVVdZ
-YZq+y9La3uHk5efp6el1eXh5enp8eHd3fH59fHh4d3l3eHl5d3l0c2xfU05IR0lD
-SEhFSUdIRkVGRkZIRkBDRERDRkVIR0hIREZIRkVFS0xJSUhLSUpMTU9NTk1QUE5N
-UlBNUk1NS0pMS0VGSUlITUtJSUZGRkRBQEFAPTk6PT8/PDw7Pj8+QUNFQT07Pjw9
-PUBBP0A/QD49P0NERUdGQUZISENCREBDRkZOTEtSTU1MSEpNTU9JT1FKT0xPVFJY
-VUxJSlZZUlRSVUxOTE1QVVlYXlRSTk9TVVBFPz06ODY5NzY2OTw3Njc5Ozk3NztB
-P0BATkxNQUNLSVBQSVZTUlpRU1JSVk1UWE1PTVpZVmFbYFNTU1BPUl1jaWZeXlZU
-VltbXF1ZVlZUUllZWVdUVF1bW19bV1RUVFlfU1FMSUA6Ozk5OTxGUFJeYFhWUlVV
-V1lYWVlXVlxcV2BbWFdUVFlWV1lVVlZWUlVYU1RZVlJXW1tYV1dXVVRTVlFQVVVR
-VFFMT1FQTkxPV1hVVlRVWVZOT09OTlNSU1NPUFJUV1ZWV2JcWFhZV1NJS0tPT0hO
-TlRfdY6Tl5yjq6+ur6qel6SpnIWBio1/ZVhWVk9KRUI8PDs6Ozw6PT04ODs8Oz08
-Oj08OjY3PDw8PzlAREVDQTs5PTtAPj5ESUA/PkJEQ0VAPTc1NjQyMzc2NTg4ODs6
-Oj48Pj9CQUZITE9MU1RRWFhbWVlTUVNUSkpOTE5QT09QUVJTUUtHSlBTVlJPUE5O
-TlJRTUlMUE5PSkpLRkhLTUZISk9OS0xKRkhIREFDQEBDQkNBQ0VEQT86Oj07Oj0/
-PDk4ODo6ODc8OTc3Nzg4NTY4ODUyNDU3NjY6OTU1NzUzMzIzNDU0NTc5OjxARlNc
-Zm5xc3BuaWdcWV5iZm1wdnd3fHt7enh1cnNsa2lnZ2JhXlpWVFJQTktNS0pGPzk4
-Nzc5PT86Oj04P0JEQ0ZERERESVFTVVZZWlxbWl5fXV9gY15fYGNnYmRnZmdrbHBy
-dHV2dnd3eXh3e3p8fX+DhYWIiomJi4yNi4qJiISFhIB9fXp6fHt6enl6eXd2dHd2
-dnh8foGGiYmKi4+Li4uLi46KiIiFgIGBhIaIiYiHh4aHgoGAfn56fn9+fX9/goGD
-goF+gX9/fH2Ag4aGhIaKi42LipCRkZORlJKQk5OSlpeZm5uZlpGXmpyagGZYVVNR
-VFNVUlFRT1JPTk1OUVRWVVJRTU5NT1BNTExLS0xLSEhLTElLSkxNSk1MSEhISUdJ
-S05PTEtMSkhJSk5MSkxOS0xMTU1MTE9NSkxKS01PT1NQSk9RTlJUU1JTU1VSUE5P
-T09PUlFQTE5PTVBTT1BQUFFQT1BSU1NTUlFRUlNST0xOUVFRVlNTUlFSUVNUU1VT
-UVJTUFJUU1NUVFZYVlRVU1VWWVRWV1ZWV1hWUlZaWFlZWFtaXF9iX11ZW11dXFtc
-XVtcW1lZV1daWFZVV1dVVlhXWVhVWFhYVlVYWFlbV1VXWVhYV1tbXl1cXFxdX2Jg
-XlxbYWBbXFtbWVlZWFpaWllXVlRWW1timL/L09rd4uTl6Ojp6Xh4d3l6fHh4d3V3
-eXh5eXh5dnh2eXZ1d3VxaFpRTURDREZJRkZHSkpKRENDQkdDPD1AREZFRUZGR0VI
-RkdISUlGSkpJS0pKSEhKT1BPUFJQT09NS0xMUVBPT05KS0pKS0lKSElHR0VEQUFB
-QUNCRERCQD4/QD09Pj5AQj89Pj1APjxAPD0+QkBBPj5BPkA9PD9ARUZHS0dEQkFD
-REpQTVFKS1BOT1JLTUtNUktPR0tSUFRXTEtGUl1RVFFQUE9UUFFOVFBZWVJUVFdU
-UEQ/Pjw6ODc4Ozw3Ojs6ODs7Ozg4ODk4PT1JS0RDQkdNUVBLU1FQU0xRUFRaTVZb
-U1ZQVllXXllbWFtYVFNRXF9lY1tXVFBUYFhYXFdXVFJSVllYU1RWV1xeZGZjXFlX
-Wl9YWFFLRTs7PDY3QURMVFtfVFBVV1VWXmVhWFVWW2BfX1tWU1JWVFJVWVZVUlZU
-VFNUUVVQT1VcWFFOUFteWFRKSE1RVFJYVFBUWVNUTktUUk5TUlNUUk1NTVBOTlNR
-TEtNUVBQWFhaYlZSV1xfWE1LS0pISlBOTlFjeIOHl6GmrK+trJ+brrGeiX+Din9o
-XltVU05JRUJAPjg3Oz08PT48PD08OTk8PD49Nzk+PjxANzhISUZDOzw6PUA9PUBC
-QD1AQDw/R0k9NjU1MzY1OTU4ODc4ODY3NzY9QERES0pPUEtYVFZaV1pZWlBQU1BQ
-Uk5LTE9SUlFXWFNKTEtJTlNSUU5OSUpOUVNTSUhMTU5PS0lKSkxJRkRESExLTktF
-RUNGREFFRUJDREE/Pjw8QEA+QD08Ozs6OTk8OTo8PDo5Ozc0MzQ1NDU1Nzo3NzY2
-NTc2NjQ0NjQ0NTMzNTM0NDc9RlJZYm1xc3Jyc3JsY11YW2BnbnV1e3p6ent8d3Vy
-b21xamdoaWZlXVlVVVNQUFFLRD85NzU2NztCPz5AP0FAQ0FEQ0lEQ0RFSU1WWlpc
-WVpcXWBkZWFjYmBhYWdmZ2loZ2dsbXFwc3Fzc3d1dXZ2ent+fX2Ah4eIiIeGiIeI
-iYmGiImBfXt7enZ2dnh2dnl7dnd3dHV2fIKGhIaJioyMjYyLi4qMjoqHhoWCgYKF
-hYeKioiHhISFhIGBf4B9foCAgYGEhoaEhIF+f319gICChoiHiImKjo2Oj46QkJKU
-kpCSkpOWlpaamaCgmpqkqamfhWpbVFFUVFZVT1JPTU9OTU5OUVJRT1BPTE1NTElL
-SklJS0pKS0xMSUpNTk1LTU1MSElMTEtKSktJS0tGSUtOTEtJSkxNTE1OT09NT0tJ
-S0pMT1JRTU5RUVJRUFJQUk9OUFBQTUxNUFJRUVBOT09OTVNTU1FTUlFOUFBTVE9Q
-UVFST09SUk5OUVJVU1BRUlFTU1NUV1ZVUlJUT1FRUVJSV1RUVVVUUVZWVFJSVVdW
-VlZVVFdWWFxaWFxgYFxdXFpdW1paW1paWl1dW1hXVlhVU1hZV1hXWFlYWlhYW1hY
-VlhcW1tcWldWVllaXVpaWVtdXFxeXl9eXl9eYFtcX11bWVhXVVpaWVZXVVlVV16a
-v8vT2t/i5Obo5+nqeHh3d3l4eXZ3dXZ0dHZ1dXN0dXl3dnZ1dG9nWU1JSkVHSUJD
-R0hDSEVDSERAQkE7Oz4+QEJGRURGR0lKSkhKSUlISEtMTEtNTExMTk1QTk9PUE5M
-TExOTU1MT09LT05LTEtUSUhIREVEQ0NCQkNCQkA/QD8/PjpAPjs8Pz48PUE/PUQ+
-PD4/QEBAPTo8PkA8O0BBP0JGSUlEQEJDSUxISUlMUUlLUU9LR0hOTU9QTVJMTU9K
-TlBVXE9RU1NUUlVUU1BXTVBTUlVYXFpTRkI9OTs5OTo3Ojs4OkE9Njk8OTo6PEBB
-QktJQ0NBSk5UTlBUTUtVUFRPVFlKU1xZXFNUVlpeWGBaZmFVXVhbVllXVlFTW11f
-XV1ZV15aVlhYV1NTWVdVWFtcXmJhW1daW1ZcWFNGPDs3ODg/RUlPXVxQUFVWUVZb
-YV5ZV1tcYl1hYFZUWFlXWVdXWldTUlZcXVNPT05TVlpWV1hTWF1bVU5HSVNTVlVU
-V1lRVFJJS09KS01RVU1QVVBQUE1NT01KSk1QS09UWllYWFpYV11bUU5ISUlKUUxM
-SE5gboOZoqisrKqlp6mwrZ6Mg4uRhXJiXFhRTEtIQzw7Ozg3Nzo7P0A8OTo3OTk6
-Ozw4ODpAP0I9PEBCR0VCOjY6Pz49QkE/Pjw6OT1HSkE6NDQ3NDQ0NDk4OTc2Njg5
-PD9CREdMTVBUTVlYY11cV1FTU1laUVRVUUxLUVFPUVVWUU5MSUtRVFNSUE5KSEtL
-TlBQSUhIS0pJRklNSUdFS0dJR0lLSkhDQkFBQEVGSEhFQT5AQ0VGRUdAQUFAOjk6
-PDo6Ozk5OTk4NzU1NDQ2NzUyNTY0NTU0MjEzMzQ0NTMzNTc1ODo+QU1ZYWtwdXV0
-cXJvbmpjXVldY2dtcHJ5enp7enl4dXNyb29ramtra2ViXldUVlhVUU1DOzc3MzQ2
-OT9BQEA/PUFAPz5BREZFQ0RHTFBYW1xaXGBhYGFhY2ZnY2ZjZ2lpamVoaXBtamtw
-c3JzdHVzdXZ2dnh5en+DhYSCg4OEg4OGhoWEgX97e3h4eHZ5d3h4d3l2dXFwdnl6
-gIWJi4qLiI2NjpCNjIqLiYiIhYGBf3+EhoaHiIqGgoSAe4KCgYWEg4SFiIqHiIqG
-g4F9f3x/gYKBhIiHh4qMjoyLj5CRkpSUkpSWlZeVj46OnaWalKGrsK6jgGZfW1dW
-VFFTUlBRT09RUE1MUU5NTk9QT01LS0tMSktKTkxMS0dKTU5NTExOS05MTEpPTUxJ
-SExKTE1NTUxMTUtLTE1HS0xNTlBOTk1KTk9OT1BQTU1OUFJSUFFST05OUE5QUE5N
-UE1MTU9SUFBRUlZUUVBTVFJPUFBTVVNUUE9OTlBRUVBSUlFTVVNTTU5RVFZXV1NU
-VFRUUVBQU1VUVlVYVldaV1lWWFVXVVVWVlZVWFpaWltbW1xdXVpZW1lbWVxYXF5e
-Wl1cV1tcWVRRVVhZWFpaWFlXWFpXWVldWlhZWltbWFlaW1taXF1cXF1aW15bXF9g
-YGBhXVtcW1tbWFdVV1lWVlpYVlhWXJbAy9LZ3eHk5ejo6ep6eHd1dXd4eHh3d3d3
-dHR3dHd1dXVxdXZya2JVTEZHSkpGQkJDQkc+QUVFRUM/Qjs8RUlHR0NFR0dHSkpL
-SUNHSUtKTE5NTU1MTk9QT1BPT1NWU1FOTU5MS0xNUExMTE1MT1dJSEpJRkdGRklK
-S0VDPz0/Pz8/Qz08Oj1APT1DQD4/PTw8PDo9Pj4/QUFAPj49PkA/QERDQkJAQERI
-UUtMTkxTTEpQSktLRU5NSlFMTUpNVU9SVFJcUU9QU1RNVFNYVVZSTlVVVVZZV1NM
-Pzo5OTc2Njc8OzQ3Ojo6OD06Njs5OkJCTUlDR0JKSlVVTlVLTFVQWFFRXVBRV1Va
-UlJZWGFaX11hYlthWFleXlpdVFpdY2ViYltZYVlUVVZZUVNaVE5TV1haW1ZVU1xc
-WlpaVkZBOzU3ODlAR01bWFNYW1dWVVhdY19aV1ReXmFhWlZYXV1hWVZaWlNXWlxZ
-VFBTUFJWW15VWVpYW11ZUExMUVJOU1JZXFNXUkpNTk9LU1ZTUlNWV1dKTUpLUEpM
-Tk5RUlRUVldXVFJWW1lXTEZIUExTTE5NS1JmgJSdpaqqrK2mqKusn4+FiI6OhHJi
-VlRTTkhBOzo9OzQ0ODo8Ojk3Nzg4Nzk6Ozo7NjlBQ0FAQUJER0U8OTk/QUNEQUE/
-Pjk3QEZHQz42NjgzMjQzNzc1Njg2NTs7P0JBQUlFTVJQWlhiXV1bUlBSWlhRUU1U
-T05PU05UWFpXVk9LT1JYW1lWVk5MTExOTEtKSkhCRUVHRENJS0lJSkdHSEZER0tI
-RUNBQENGRkFAQkJESEZEQkNBQEFAOjs6OTs6PDo8NjU4Njc4ODc0NTQ0NTI0MDEz
-MS8xMTc6Nzc1NDQ2OkhVYWdudHR0d3d1dnFvbGViXl9haHB2fX18foB/fnp3dXFx
-bW1tb3FvbGhiXFhaW1ZSTEY6NTQ7Nzg6PUA+PT5APTpAQURERkdJRkpKSU1VV1pe
-YF1gYWNlZ2pnZWZnaWhoa2xtbmtqa211c3V2d3dzc3d2dnN3fH+CgoSGgYCBfYF/
-foB8enl1dHZ2dnd5dnV1c3Nyc3Z4fIGDh4iIio6PkY+NjI6OjIyIiYiGg4OEgoWJ
-hoyLi4iHhoSFh4aKi4qIiImLi4mJjIqHhYaEg3+BgICFh4iHioyNjYyNkJGRlJSW
-lpqamJOHf4OTm42ImaextLGhgmJZVlhUUlNSUU9OTk9QUVBOS01OUFNQTkxMTkxM
-TkxLTEdISk5NTUtLTUtKSk5OTUpISktMS0pNTkxKTEtKS09MQzxJSkxOT0tOUk9O
-TE1PTVFPTk9QUlBQUFFSVVNRTlJQUFFPTU9PTk1PUlNRUFBQUFFSWFJQUFNUVVRU
-U1NTUE5SU1NRUVNUUlFRTk1RVlVUU1VTUVJQUFJRUlRUVFRUV1ZVWFdXWFZaV1VV
-WVdWWFhWV1hZWVtaXl9eXlxeX15dW1tbWlhZW1xXV1lXVlZWWFhYV1VVVVlaWllX
-VlhaW1pbW1tZWllZXFxbXmNeWlxcXVpcXWFdXFtYWlpYWFZWWFZYV1VVVFlgksDM
-09ne4eTm6Ofq6Xd2d3Z2d3R1enl4dXR3d3h2d3Z1c3h2cmxnXE9GSUhEREZFQj5G
-RURLSEA+Qz9EQUJBSEpKSEdISUtLS0lGSUlJSklOTk9NSE1NUVNSUE5RUlZUT05N
-T05OUVJQTU1KTlFNS0xJSUpKSUhJSEhTRkVCPT08PT9BQkNAQD5AQDw9Pjw7QD08
-QDxAP0E/Pz4+Pj9BQT4/QEE+Q0JGRUZSSUdIS01MSUtKSUhGTUxLUEdLTEpUVlJV
-UVpWUFJQVVFTV1lUUVJSVlRaVFNRUUxBOUNANTc5Pzo2NzQ4OD45Njg5N0A+Q0FK
-SURMRU1JTlpaVVFOUkhQT01XTVBVUl9WUlhTYFpeVl1XWF1XXVtiXF5XXV5lY2Ne
-WVxdV1JQUlVSVlxWUVJZW1ZXWVRTXWJbVVVSSkg/NzIzND5HS1JVV1ZfW1xcV1lf
-XmBWVWBgYmdlYFtfYWJeWVhYVlhbXVxhXFdSVFRTVFRWYF1YWVpOUFFWVVFRUVda
-WFNOUVRTVVFSVlRPVFJRVUtGSVJNTE5PUVJPU1BUXVtSW1hgZ1lPR0pOTVBOTkxI
-UWJ1ipShqq+yr6igqKqhlIqFiIyKf3NgWVRRTUQ/QUdAOTY2Ozw6ODo7OTk5Nzg8
-Qz85Pj89PT87QkFIQzw6PT09Pz5BP0FBQDxCRkdFPDo2NDc0MzM0NDYzNDc3NjY9
-Q0JDS0lTVlZeWGBYXlxUU1dYWFZOTFNSTU5QUlJVVVZVUUxNTVRaWlpSS0pPTE1N
-R0pJSEdHRD9BREVFSUtKSklGRUpLSEhGRERGSEVDREZEQD9CRUVDQEA/Pzs7Njs+
-Pjs6OTk3OTo4ODc6Nzk0NTc4NDQzNDEzNjQ0MjU2NzY2Njg/UGFqb3N0dHR0dnZ0
-dnJsY19bWmFsdnd6hIODgoJ/gH54dXBwcm9rcHRxcWdiXlpZV1JQRDo2Njg6QUFA
-QD89PT1CQkNEQ0JBRUdFRUlKSUxPVV9dXWJiY2NkZmdoaGhoaWlqaGlta2xrb25v
-b3J1d3d4dHZ5d3Z6fH59foGAfnx8enp6d3d2dHV2eHZ1cnN2d3NucHN4e4CBhYWJ
-ioqNjY6Qj4+Oi4mJioiGhoaFhIWGhoeIiouNi4mHh4iJiouNjIqLjIyKi4iJiImL
-iomLhIF/hIeIiYqIh4iMj4+QkpCVmZucnJ2fnIp5eo2Th4uesrm8uqmbf2ZaWVZY
-WldTUlNQT1RRUFJQTUtLS0xOTk5OT0tKTExKRklNTExMS0pGSEpISUlMSUpLTUxK
-S0xPS0xMSkxLSkhCREhJSUxLS09NUk1KTk1OUFJRUE9RVFNPT09PUFBNUVFRT1BR
-VlBQUFNQUFBPUVJPUVFOT09PUVBQUVRTVFRUUlNSUVNRUFFRU1BOUlNSUlJRUVRT
-U1NTUlRUVlRWUlNWVVZWWVlYU1ZTVFdYVldYWVhWWFtXWVxdXF1eXl1dXFpZWFhY
-XFtZWV9WVlRWVVZWWFlWVFJVV1lZV1ZaW1dXWFVWV1hXWVhZW1paXWFdXV1gYF5b
-XV1aWlxZWFhWUVRTVFRXVlhUV2ORv8rS2d3g5OXn6OnqdXV0d3h3d3h5d3Z4enl5
-d3d2d3l4dnR0cWZYUUpGR0dFRkRHSUJIRkVCSEVCP0BBQ0VEREVIRUVFSUhJRERG
-RUdJSkxMS0xOS05QT1FQTk9PUE5LTE5QT1NTUlFSTU1OS0tNTExJSUtJSEdISk9G
-QkBCPj0+Pjs9QEBAQD48Pzs9PEE/PDtAPj4+Pjs6Ojw6Oz09PkJFRERFRURGRkpJ
-RElHSkxNUlBKS0lRVk5WT09OR1VZU1VSWllUV1RcUk9TU1VNU1dZV1lQUFRVT0U9
-Ozk3Nzc4NDY2MzY4OTg4OTo2NjtAQElLRUhATUtKVVZWVEtTSVJPS1NOVlJTXltY
-XFVgW19ZVFddYFVYVmBcW1ZZVlhVW1VeXVdXVk5MUlhaZmJVUlVZUlFTU1VeXVxQ
-TFBQUEo/OTc6QEFKU1dcWFteWFhXVlVXX1daYl1eZGJZWFtgYGFYVFhXVFZaXl1Y
-UE1RU1FPT1ZZWVVWWVZVVlVWVlFSVVJSUVFWV1RSU1JUVkhMTExSU01JU09OTk5P
-UlFSVFNZWFhfWV1dVlNMRktITktHSEdQWWZ5jp2rtLOxpJynrKaZkYaAgoqJemda
-VFJOSUM9Ozw4Nzg5PkA9QD09PDk8PkFFPDk9PT4/QEE9QEVHQTo6Oz4+Q0NAQERF
-Pjs8QEI6NzU0NDg0NTY4NTQzNzY4NztAQURJSVFTV11UXVVdWFRSVlhXV1FRVE9R
-UlVUTk9QVVZRU1VTVVlaV01ITUxPU1FNR0hISklLRkJDRkdIS0lHSkhHTEhISklJ
-TUxISEdGRkRAQEZFQUNCPz0/PT4/PEA9Ojk4Njk6NjU2OTY1OTk5NzY7OTY3NTc3
-ODg5NzY1NTk7Q0xgcHh3d3h5d3d3dHRzcG1mY11cYWx1gICEh4SEg4KAfn59dnVy
-cXJ2eHVwbGZgW1pYUUo+Ozg3Nzk/R0VDQUA9OztAQUFARUZBREdHREhJRkhKU1hc
-X19iYGJiZWdoaGlpaGhpam5wcW1rbmtucXZ0cnN1dnl4eHl6e316e3t7enh4d3Ny
-cnd3d3h3dXJvbm5sbW9zeX2BhoqKjIuMjIqIiYyNjI+Jh4aEhYeIh4aGiImGiIqH
-iYiJioqGhoiLioyLjYqOjo+OjYqMi4yNi4uGiIqHiYmIiIiFh4uQkZCUlp2lpaOf
-oamlk4KDkpWQma64vb26s62YgGpgXVpZV1dVU1RVVFRTU1NMTlBPTEtNT05MS0pL
-TkhOS0xKS0tJSUtLS01LTE1OSkxNTEtMTExLSUpHTEtHSUxMSktLSklNTk5RTk5N
-T09QTElPU1BRUUxUUk9RU1hRUlFUUVFSVFFTU1JST09RUFBQUE9QT01RUE5RUlJT
-U1BQUU9QT1NSUVJUVFFQUFBQU1FQUFFTVFVYVFVUU1NTVFRUVlRWVlZWVlNUVVRV
-WFhVVldZWlpbXVtcW1lYW11cWltdYF9dWllYWFpXVlhXWFhaWlhYWlhZV1paW1lY
-WFlZWVdYV1pcXFtbXV9fX19dXl9fXVxcXFxbWVxYVVdaV1RYV1pXV1dUXpO+y9LY
-3eDj5efn6el2eHh3dXZ2eXh3dHR6eXl4eXh6enx3dnVxaFlQS09GSEVHREhHRENJ
-SElGRkVCQUJDRkVFSUpGSEdKRUVAQUZISUlISktHSkxLTFFQUVBQU1BPUE9NUE1N
-TlRYUlJRTEhNUU9OTk9KSktJSUlJSEpFQENAQkA+Oz09Pj1APUE8Ozo9P0I/Pz5C
-PTw8Ozs6PT47QUBFQ0BAQ0VHRElHRUBBRURKT0xNS0pOS1BVUFJLSk9QVllRVlJW
-V1NYVVxVWV9XWFNZV2FjXl5YV1hRQzk5ODo4NjM2ODg4NzU0NTY1NTY1OTxASEdC
-QT9HS01SS1FRSVNOUE9OUU9cXFZhV1VXUV5fYltbWV5mXltVW1tWU1dUTk5YWV5e
-XlxYUVRYXWFiYVxWVlhZUlhbW1dXXFRPUFFTTEM7PTg3OERRUlRcWmBgW1RWVlhe
-W2NiXF1hXltYVVlhX15WXFlXWVxiW1hSUVNRTUxRXlxXWFlaYl1ZXFhUVVBRTk5P
-UlFWVlJJT1VQTEdJS09YVVFSTlFMTUxLVVJRW1BVVldTWFxcWk5ITUhJSUxRUFVP
-VGZ9m6mvs7OopK6wqZ6WioGDhIV9a1tUUU5JQjw5Ozs5Oz9CP0BDOzc7P0JDRkE9
-OTc6PkFBP0M/O0I8Ojk7PkBGQDxAQUNAOjo+RD84MzY0NTc1OTQ1NDY2NjY2OzxD
-SEhHSUlVWFZYUVVVWVdWVFRWVU5OUFNTUVFSUlRXV1lYWFdZV1NQTExMSk5QU05K
-TExKS09MRUFESkhHRkNHSEpJS0xQT1BPTEhFQUdERURFRD4+PT0+PT0/PkNBPDo5
-NTU1NTc3NjY2NTk4NzU0NjY3NzY2NTg3Pjs3NTg1OD9LXW13fIB9fXt4eHp1dXJv
-bWhkYGBianR8gISFhIWChoB+ent4c3BzeXx9enVwamNdWVxSR0I+PTs2Oj9GSURF
-QkFAPTw/QUJCRkhFREZGRkhLSU1LUFVZYWJkYmNlZmVnaWpsbWtrbm5tbm1wcHFz
-c3BwcnJ2dnR0dnZ4enp4d3d4dnd1dXRzd3h2dXNwb2xra2tudnt7gIOGiIqNj5CR
-jYuMiYyPkIqGh4WFg4WHiIiIh4aKjIyKi46Mi4uLiYyMi4yOj5CPj5GPjYuLio2Q
-joqJi4qJjImJi4eJiY6UmZ2jpqmqnJSZpJ+Yk5qgnpymr7Gyt7e8s6meiHBoYl1b
-WldYVFdYWFZSVFNRT1BRTExOTU1RTk5OTU1MSkpISUpLSktMSktKSktKS0lJSEdK
-SklLTExKSk1NTk1NSElLTExNTk5PTUxPUE9SUlBRTlFOT01OUFBQUlRUUVBSUVBQ
-UFFST1FQUVFQT1BUU1FPTkxPUE5PT1BQUE9RVlNRUFJTUVNWVFFTUE9RVFRUU1NT
-UVFUUlRTVFNRUlRXVFRWVllYVVVVVVZVVVZYVVdbW15aXVpbW1pcX19dX19cW1tY
-XFpYW11XV1dXV1lYXFtbWFdWV1pZWFZZWVlYV1hZW15bW1xdXl9hYWJiXlxdX15d
-XF1cW1tZV1ZWVVZYV1NXWFlfjr7K0trd4ePl5ufp6Xp6e3d2dnV3eXh4enh2dXR3
-d3l3fXp4dGxjWU1HRUhHRUdGRERFSkZHSURGRURDP0NERUNDQkZIRkRERkVFRkhF
-R0tLSUhKSUlJS09QUlBSUFJUUlJTU05RUVtcVU5MSUpSVFNSTE1NS0tLS0lIRklF
-QkBCQkE+Pz87PDs7P0c6Oz48PTw6Oz07Ozw5OTY4OTk8PEBAPTk8QEFBR0REQj9B
-RUVLS0pIR0tLUFZJSkdITkxTW1JXWldbV15XXFhcW1NTUVZYW2FjX1dYXVZJPjk4
-Nzk4Nzc6NjczNDg3Ojg4ODo2ODtARkFDREpOTEpGT1RFUE9QT0pQTVpbVWFYUVNO
-WVxfWVdcV2FiVlFRUVFPVFVTVVdWXGBiVVZUVVpfW1ddXlRWWFlVWVhUVldZWE9O
-UVJORD47ODU1PUpIS1xbXWNcVFRUW15cXV1YWV1cXlxWWWVhY1xaWVdVVWBgWlRU
-V1VMTlRVVlVUWFhiWltfWlJSU09PU1FSWVlUUE9RWE9JSUVLVlFQUU9IUU9PT1BR
-UVNaWVRTU1RbV1hXTkdKSUhHSU1NUU1NV2uNpbGzsKqusK+oo5yQgn+DhX5vXVJQ
-TktFPTw+PT08QEBBQT08Oj5EREdFPjs7OTg9QEJDPz49PUQ9ODw8PUBBPkE+Q0ZB
-PkRJRDk2NDQ1ODk3NjQ0NjM1NTk4PEFCQkRISVJTUVVNVFVUVVNTVlZTT05QS05R
-UFJQVVlaWlpUT1JRTU9RUUxPUFFQTkdJTEdLUk9LRUVDSEZGRERKS0lHR0hNTU1M
-R0dISEFBQUBAQT5APD1BQD4+PT09PTk2NTUzMzc1NzU1ODg0NTU1ODc2Njc2ODU1
-Nzk1NjY7SltpdHuAg4OAfXh3eXl3dHJtaGJhYmRodn6AhYWDhYSDfnx5eXpzcnN5
-fn18eHNrZGJcVVBDOTk8OTs+QUZKSkZFQ0E/PkFCQ0NESEdHRkdHSElKR0hITFBV
-WV5iZGVnZ2VlZ2dtbm5ubWtrbW9xbm5ubm5wdXZxc3d3eXd4eHN1c3N1dHN1dHJ0
-cXFvbW1oZ2lqcHZ8gIOFh4mOj42Nj46Pj4uMiYiGhoqLhoKAhImIiYiIjYyJioqO
-j4+Qj4yNjouMkJCQj5CQkJGQjYyRjo2Oj4+Ki4uOjI2Pj42LjZeiqauqp5+NhI2S
-kJOcoaSlqa+3urm5trW1urGhmIx9cWdfW1pVVlVXV1RTU1BQUVBQTktNTU5NTlJT
-TUtISEZFRkpKTEtNS0hISUtMTExNSktKSUZJSkpISExNTk1OTE1NTU1OTE1PTk5O
-T1BRUU9NUFdTUk9OUVJTUVFQT09QU1RPT1FSUlJPT0tOTlFTUVFQUVBPT05OT1BP
-UVJSUVFTUlNTV1VVU09QUFBUU1BSVFNRTlNWVVNUVVRVVlRUVlRTVVRSV1VUV1dT
-VVdWVVhbWlxgYFtYV1daYF9eYFtZWl1dWV5ZXFhVWFZZV1hXVVNVV1ZYV1paWFlX
-WVpYW11aWlpaXF1gX1xdXmVfX1xeYV9cW1lZW1pWVlZWVFdXVlRWVl6XwMvS2d7h
-4uXn5+nqeHp4dnV4dnp4dnd2dnZ1dnh3dHV1dnd0bGBUS0hHRUdHQ0RFSUtLR0JM
-R0JBP0A7REBCREFBQT9BQUNFRkZFQ0hHSUhIS01LS0pSUFJUVE9QUlNUVVRUUlBP
-T05PT05LTFFVVlJLTE9NTktMSkZGRUVCQ0VEQD89PDs8PD8+QD06Pjs7Ozo4ODk6
-NzhAPTo6Ojo9PD49QDs9PDtAQD1KRUlJR0pISEdHS0xIS0NFS0hPT1FcUFBTUFVV
-WFhaV1RXU1JLVFdXWVlbWFpdWUo8Ojw6Nzc2Ojc4Nzc2NjU4Ojw8Ozg3OkFFQkJE
-R0pERUNOUUhPSVBQTEtKUlRSYltPU0xRUVZUU1lTXVlWVVFUUlFPVVpcVldbW1lT
-VFRZX15aWl5hWVhYWlZcXVJQV11aUFFVT0pHPjk3ODY4PUNFUVRdXV9ZVlZaXFxZ
-Xl9fXV9hYWBiYWNdXFxXUVJTXWZkWVxZV1NKT09QVFFWWVdRX11XUVNWUVBPUk5R
-XFNVV1NTVU1ISU1SUE1QUk1NSkhKTlBRU1JTV1FXXVtbVVhVSkZCREZFR0pMTVJV
-XX2WqrCupqWqsaqln5WOgH5/gHhpWE5LSklBPDo9Ozw+QEJBQzxBRUVAQkJDQjw5
-Oj1CQUE/PkBFQz03Njk7QUVBOztGS0c+RUxDPDg3OTc3NTU2NTU2Nzo4Njg9QENG
-SVBLU1BTWE9WVVRUW1ZQUFJPT01KTk5PTVBUVVRQTk1PUVFMTk1NT09PTk9MSEpK
-S0tNTk1JRkRDQ0RFQkZGRkRFS0tMTUpJRUZFQD9AQT87PT09PEBAOzo6Oj47OTs4
-NjU1NTc1Ojo1NDQ0NjU3MzUyODU3Nzg5Nzo4O0dXbXR4e36AhIGAfXl3d3Z3cm1m
-Y2RiZWx1foGDhomIioOCgnt5d3R0fICBf3x6dW5oZF1TRDw5OTk6PD1DSktLS0lH
-Q0E9PD5BQENER0RJRUZISktKRkhHR0pPUlleX2FlZ2dnamdrbW1tbWtsbW1sbnBx
-b3B0dXByc3V1dXh0cnNxcm9wcnFxcW9vbGtlY2ZpbHN4foKDiIqHiIuOj42Mjo6P
-i4uKiomKioaDhIKChIiLiomLjY6Njo2Ni4+Nj46OkI6LkJGRjo+RlpKTkpKRkpWS
-kpGOjo+PkpCQk5KYoKmtrKmomYaEioSJmqiqqq23vLi7vLm7uri9t7Krp6GVhHZs
-ZWNbWlhXVlVXVlVSUlNPTlBOTk1MTU1PTEtKSkhKRkhKSUpIR0lKTU9LTElJSUlJ
-SEdNS0xMSkxMTVBMTk5MTU1KS05PT05SVVNRUFFSUVJQUVRUUUxNTVFRUFJTUVFQ
-TU9RUVJQUVBQUk9SUlFSUVFQTE9NTVJQUFNQUVNVVVRUVVJTUVFQUFBST09PUVJQ
-TFNVU1NVVVRRU1RVVlVSVFhYV1hYWVdWV1dWVFZXV1lZXFtZXlxcXl9fXl5aWllb
-W1haWllaWldXVlZWU1RTU1VUVVlbV1laVldZWVdXWlxcWlxeW1xbX19dXlxdX15a
-W1lYWVpXVVZUVVZYVVVWXZW/ytLZ3uHj5Ofn6Oh4eHd4dXh2dXJ2enR0cXR0dXl0
-dHJzcm5oXVFIQ0RERUNBRUNDRkZGQkhIRkFEQUNHREJCPEBBQ0JDQ0RFSkZHSUlK
-SUdKSk9NTlBQUVBTUlFRVFRVV1pWUlBST09RUU9TV1ZVVFNTUlJUUVBNSk1KRENF
-Q0RFQD4+PD0+P0BBPjw8PUM+ODY2ODU3ODs8Nzg6Ojs3PT09PDs6Oj5AQURGR0lI
-TEpISktQTElPRkVNSlJVVVdRT1JPWlZWVFRUTFRWVlRWW1RbWl1cXFdVTkI8OjY4
-ODs4NTQ2Nzg5Ojs5PTs3OjlBPkI/QT9CREJFREdLRk9HTVBKUUtQTVBeWk5TTldR
-UlNRV1JbV1JXV11WV1JWWlhXXFZXXFZWWlZcXF1XXGBcW1lZV1hYUFRaXFlVU1FN
-TUxEOzk4Njc3O0ZPUltbXl1UWFhZWFdaX2FhX1xiX1RaY1peXFRPTFReW2FeW1VU
-VE5KTE1NTE5SVVNfX1JQV1JRUU9PT0lUVVVUUk9WUUtYVVFSUk5QTUpNTVBWVlFP
-UVFVWFhcWFlYWltSRkFHSERKS0xPTlJTZ4GaqqWao7CsqKiinZSGfXx8e3JfUk9K
-R0ZCPTk3OTo9PUBBOzo+QD5GREdEPj0/P0RGQj4/QkNAQTk5Oj5AQDs5OjtCPjk9
-QkdCOzc4NDU0NDg3Nzc3NTU1NTs/Q0VHSkVQT1FSUFhUVU5SU1NSUU9PT09NTU5Q
-UE9TVVNPTVFTUE9QUE1NTUxMT1BPTkxISk5MSkRFRkRERUVHR0ZGQ0pPUE9LSUlI
-REJHR0I/Pz0+PDs7Ojw8Ozs5ODo5OTs5Ozo1MjQzMzg5NTc3NTE1Mzk2ODk5NzY3
-ODtHWWdwd3d4foKCg39/e3l2dXZxb2plYGJlcXmCh4aHioqGg4SAfnd2eHuBhomC
-f314cm1lW1BBOjs3OTo7PkZKTUpLR0dGQj08Pz8+Q0NCQ0hISElJRUZFRUdHSUpN
-T1NXX2JkY2RlZ2ppampqamtubGptbm9wb3N0c3Bub29vcnJvbm1wcG5va21sZGJg
-YmNmbW5ydnl+goSDiIqJi4iJj42OjIuKjI6MjYmIiIaHhoWHhoiJioqLjI6KiIuQ
-jo6Ok5aVkJCRj4+Qko+UlpaTlJOUkZGTkZCQkZOTk5Wan6Soq6qnpqigkJiblqGs
-tLe4t7u8trq6uru7u7i4ubaysKuknJWKf3BlXl5bWlhXV1RSU1JRTU9PT09QT09Q
-TElJSUlMSkxLSktNTktLSUtOTUlHR0dLSElMTkxLTEpLTU9NTEpMSkxNTU5LTlBS
-UVFTVFNTUFBQUVBQVVNTVFFXVFJQUlRSUVFSUlRRUFBQUk9QT09TUlBQUU5RUFFT
-U05MT1FRVFJSUlJSUk5QTlBOTlFUVlZVUlJSVFJQUVRUVFZWVFJWV1dWWltbWlpZ
-Wl5hV1dZWFlaXV5eXlxaW1taXFlYWVpZWFlZW1pXV1dWV1RUUlJUUldWVFZYXl1Y
-VlZbW1pcXmBfXlxdXWJeW11eXV1cXFtdV1pYWVdWVVRSU1dXWVhclr7K0tne4OTl
-5+np6Xp7gHx4enp0dHR1dHN0c3NxcHNxc3NvbGRYTUtGRUFGSD9FSEZJSEdMRkVG
-REZEQUFBR0ZBRUNDQkRGRENCRkNISEpHSUtOTE1NTUxQU09PT1JVVFdWU1FTVFRQ
-TlBRUU5UVFVWWFZVUVFPUlBMTUtDQ0NGRkVIQ0REQjs9QT4+PTxDTTg3OTo4OTw8
-PD0/PDs3OTs7Pz89OTk5PUE+Pj1CQUVIR0dOS05OR1BLSlFNVFJRW1RQV09WVVNU
-UU5NVVdXWFdbUlRRUk9VVFRTTkM8OTc1NTU2NTg3Nzg2Nzg5ODc4Nzw6PTs8P0FA
-RElDR0RIUExMUk1PT1NPSlhYTlZPUk1NUE9VWl1bV19jZl1bVlhVWVZYVVhYWF1c
-VlxhWldVXWBaWV9XXFtVU1dZWVVTUU9PTEpCOzs3Njk7Q0xMU1tdWVNZV1FTWFpa
-X19dWFteV15eWVxYUEtOVF1YV1tZUVNRT05LUk1NTk5YWlVZXVpaWFNSVE9PUFRZ
-VE9QTVJSUVJTUk5QTE1RT0xMV1tSTk1QVVldWlhbVlRaXVdOR0VIRkpJS05OUk1Z
-aIOcpZ2nsLGtqKajmY2BfH5/eHBiU0lGRkM9Ozg8QDo+QUE7OTxCQ0JGRkdCPT48
-PD49Q0RBQUJCPzs5OTk7QUI/PT0/PT9DR0A3ODU0MzUwMzM2Njo2MjQ4PEBCRUdG
-RVBLUlFQVVFWUU1PVVZTTExOTlFRUVBLSUxQVVVSVFNWUlFLT0xLSU5TUk9MTEpL
-S0tHRkhHREJGRkVFSUdHSUpOUE1NSkdCQUVFRURBPj8/PTo7Ozw7PDs6Ojo6ODs6
-ODs2NDY8Ojk2NzY0NTQzNjY3ODU1Nzc5RFVncnp8fn2BgoSDg4N8eXp2dnNuamdh
-YmhwdX6DhoeIioeDg4N/fHt+g4iIhYaEf3p3b2VXRz46OTg5ODpBSElMTE1LSUlH
-Qz89P0A+PURHRkZFQ0ZER0hJSUhHSUpLSk5RV15bXWJla25rZ2ZobGxpampqaW1t
-b3BwbW1taWptbm9wb29uaGdnX2BeX19hZWlyenp8fYOFg4iIiYmHiYqMioqJjI2O
-jouKioiHiIeIh4iJjI2Pj46KiIuLi4+Sk5OTkZOSkJGSlJKTlJSSlJOUlJOUk5KT
-k5OSkJGSmqGqq6ynnpaanqOnra+xtri4uLi7urq6ubm6urm3uLe3t7e3s7Gvrqeh
-loV4a2FaWlhZVlZUVlZZT01NTU1PT01JSEpMT01NSUpHS0xKSU1MSk1NTE9LTEtI
-R0hGS0lKT1JNS0xMTktPUU9PTExOUVFSVVNST1JUVVNQUlJRVFJRVVNQU1VTUVNT
-UE9SU1JSUFNRUU5QU1JSUFBNTlJTU1BQT1RQT1FTUVFSUlNTUVBSTlFQUVNUU1RS
-T09XVFJQUFJTVFRVU1dXWFhYV1haWVdXW11bXFxcWltbXF5eXltZWFdZWVZUWFpa
-XFtZWllXV1dXV1dYVFVXVlZZV1ZUVVpWVFlaXFxaXV5aWl5fYF1fYF9gXFlcWVlY
-WVpbWlZWVFNSVFdYXVyUv8rS2t3h4+Xn6OnpgH98enp5eXh1dXV0cnV6dXJ1dnVy
-cnBvZFVJR0dGR0lDREVFQ0dJRkNBREVCQkJFRURESEZDQkFBQ0hIRUNJR0hIRkhJ
-S01RUUpLSUtPUFJWVVNSU1VUVlhaU1JTUVJSUlFRUVVVVVFTUlBRUE9MTE9OTEhF
-RUZERUQ+PTs8PT89PD1APTs8Ojs7PTo9PD08ODo6PD0/Pjw7OTs6REdGQD0/Q0ZI
-TFBMS05GSE5LU1BWVE1SVFVWUFVTUVJOTUhNUlNYVllRUEtPUVVXVlVSSkE4MzU0
-Njg6NzU1NzU0NTU2NTM0NjxBQDo+Q0RGTkZGTEZPSElLSk1JS0xLUlFKUE9SS09T
-UFpUWFpdbGJnYWBZXlhZWFlSUF1fXFlZWF1cWlpgYFlYWllaWllWWFpcWVdTUUtL
-SUc8NzY4Nzo5RE5SXVtXU1hSUVRYWl1aVFVSVFlfXllaXlVWVVNTWVZTVFlPTlFP
-Tk5SUElKUFhZWVdYXV1aVlNWW09VV1tbVUxLUU1PVk9TUFVTTk9VVVFcYU5NU05R
-U1FcWVpbXVxfXlRFRENBRUNJU0xRT1VZbIudpa2zsK6pqaacj4d+fHp7d3ZsTkZD
-QD08Ojs9QUBDRj86PUFGQkA/Q0A9Oj4/QENDRUZAQUNEOzg8O0NDSDw8QUFCQUVH
-Pjk4ODY4ODUyMjM0NTc1ODs8QEFCQ0VESkpTTVRQTlRWWlpWVlFQUU5OSk9UVlJM
-Sk1TVVFRWFRSTkxRUkxLUE9QSU5OS0lMTUtKR0NBRERGR0hJS0hFSElMT0xHRkdD
-QUJDRUNFRkY+Pjw7PDw7Ozo7Ozs7Ozk2OTk3Njc3NTU2NDg2NzUxNDQ1NjY2OEFP
-YnN8fIGEgoWGgoKCgIN8eHl2cWtpZGFjaHGBhIaHh4WGhYWFhIOBhIOFh4iHhIF7
-dHBqXU5BOzw5Ojo9OkFITExNTU5KSkpGRUNBQT1DQUVERURHRUZGR0ZHR0hJRklJ
-SU5MT1VWXV5hZmZmaWlpaGtmaWhnaGptbW9raGxrbGxta2tpaWZiX11eX19kYmZo
-b3d9gX9/g4aFiYmLh4iHiYqMjIuNjIyMjI2KiIuKh4qHi46QjY+Pj46Ljo2OjY6Q
-k5KUlJWUlpWUlZOZl5aXlZORlJmVlJOTk5GRkpuip66qpJyWjpSjrri3u7u2trm7
-vLq2tLi7uLi3uLa1t7m6uLe1trazsbKtpZqPfXBkW1hYXVdVWVhRUE9NS0pLTUxI
-Sk9QSktKSE1KTE1LSklJS05NTFFOTU5MSUpKS0xNSk1NTE1PTktOT0tNT01OTU5P
-UlFQUFFQUVJTVFZUU1JRUlJQVFJSUlJSTU5RUFFQVFJRUlFQT1BTU1FTT1VSUVFQ
-UFNSUVBTVFJRUlFQUlJPUFNQUFFSU1RTUlJTU1ZUUVNSU1RXVldYVVZUUlNXV1dY
-WltbXFtcX11cXFlcXl1aV1ldW1hVU1ZXW1paV1ZWWFlYWFdVV1ZWVlZVVlpWVltY
-WVpaW1taXFxbW15cXGBfXl1fYF1aWlpXWlhYVlZVVlZXV1dYXpG/ytPZ3uHj5efo
-6el8eHd4eHR1dnd2dXd3dnh4d3V0d3V1dG1hUUhDRkVCQ0hGREZFQkRBQUFBRD8+
-QkJCQ0FDQ0dFQkFFSUpHRkZHRkhISElKT01KSktOTEtOUlZTU1NRVFZVU2hVTk9R
-U1JTU1JSUE9RTk5PUVNPTVBMT1FMSkpISEZGQ0RCPz9APj4+QD88QDw9PDs5Ojk8
-ODo7Ojw7Ozw6PDw3OkA/P0FAPj1DQkNFS01NUUhPUU5UUE5QSUlPUVhTUFFRUExO
-S0xOTlNRV09TTVFUWFtaVU9PSDw2NTQ1NzY3ODk6NzU1NDI1OTg2OT5APj1BQ0JM
-S0hJRUlFSklKSUZHS05KS0ZPTU9JTlFPVlBSW1xmWl5eXFhgXltZX1dZW1tbWllZ
-WlpbW1dbW1pVU1hWVlVUVltTVVVMS0xLS0I5NTMzNzs/TFFTWVxSVUxQV1tcWlFS
-VFZYV15bV1xZTlJVVFVTUlFUVVZTVU5KUFFJT0pQVlRaV1VgXVhWWlVVVU9UVFNX
-VVRXUFFXVGBTTlNRT1JaWVNWTk9WVU9SSlNVVFtfW15hXUxEQD1GRUZLSlRNT1Ne
-epWgqrCvqaaloZuViX11c3h2c2NMRD0+PDw6OTk9QkRGQkI9RUVDP0A8Qj88PTs7
-QkJBPjtAQ0Q/Pz8+RUtIQz9CQ0FAREVDOjc2NzU2Ozg4NTY2Njc0Nz0/QT1CRElP
-TllNT1FSVlhgWVdWUlFQTVBNTVJUUk5MTE9TVVVTVFZUVVBQTk5NTk1PUVJOT01L
-TE1JREVIRkZER0pKR0dGRkdJRkVGSkRAQEVHR0dHREE9PTs9PDo6OT1AQjw6Ojg8
-OTg3Nzc4Nzc5OTg6Ojs4ODk7OTtCUGFvdHh+goSFh4aEf4GEhH98enVxb2tpZGRt
-dH6JiYqJiIiGiIeDhIaFi42LioaGgXp0bmNTSDw7Ozk8PD1BRUhMUE9QTE5LSUZG
-RkI+PkBCQUZFRElFSUhHR0ZHRUhKSUhJS01LTk9NUVJSWFtbX2FjZGVnZmZnZWln
-amhnZWJiYV9eXF5dXWBfXV5fY2dsbXJ3foCAhYeFhoaIiYuJiY2MjIqOjomKi4yM
-jI2KjYmIiYyQlY+MkJGPkI6Ljo+NkJCQlJOUlJWWlZaXkpSWlpiVl5WUl5qYmJaW
-k5WcpKiqpJmWm5+ep7K3uLm8ube2tre4uLi3ube6uLi6ubm6ub3Au7i5uLa1trSu
-raikl4VxZFtkWFdWVVVWVE9OSktPTk1NS0tNS01MTExMTE5MS0hGR0pLTEtRUU1M
-SklKTEtOTUxNTUxQUUxNTlBQUE9OTlJTVFBRVFNRUE1PUlFQU1NTUlBPT1JRUU9R
-UlFPUVFQUlJPUE1QUlNTVFNQUE5SVFNQUlBSU1BRT09RUE9PUFVRUFBTVFNTVlRS
-UFNSUlZTVFVVWVdUVVRUVVlZVldWVlpYWFdaV1hdXl9bWl1cXV1aW1lbWVlVVFVX
-WFdXV1lYWVtZWFhWVldXVVVWV1tbV1ZXWllfXV9eXVtZXV9eXFxaXV9sXmBdW1pY
-V1ZUU1ZYVlVVU1dgk8DL0tnd4OLm6Ofq6Xp7e3t4c3Z3dnZ2dHl5d3l4enp3d3Zy
-aV9QRUVHQ0JCQUA/QUZERUNFRkRCQUFGRD9BPT9CQ0RGSURFRERHQ0FCRkpOSUpO
-UUtHSk5QUlBQUlBUUlBRVFlWWVNVU1RWVFBRUldUUFJRTE5PU1JRUVFRUEpJR0VJ
-SEpFQUU+PUBBPkFBQUA6Oz8+Pjs8Oz1BPT03OTg5Ojk7Ojo6Ojw7PDo8Oz1AQkVJ
-RkdMSU1TS1FNTlVMS09NUk9UWExOTVJST1VQTk5ZVVNTUVhWW1tXVVVMRT86OTU3
-NzczNjU3NjY1NjM0NDc3OT1EQkBBQkhMTERGSEhLS0hPSExQTVFISlNMT01SVU1P
-TlNXUltXYl1eV2FiXVlaWltdW1lZVlZYWVlXWFtYWVheXVpXV1RVVVNNTUlMTk9L
-SkQ6NTg2ODdGTk1YVlJSUFRdXlpSUVNYWl1gYFlYXFZVVldcWldSUVNXV1lUTFBP
-VkhES0xUW19gX2BbX1laVVRbWE9LT1pYWE5SVFJWXk9WVFFPTFJZVE5TUVFVTk1I
-TVRTV1xWWmJfUkdIQERDSkpIUk9RTVRpgpOlsLGmn52enZeNfXRwcXZyZE5CPDpB
-Oj08PDw+REM+O0tGRDw+PjxCQz4/Oz47PD88PD5AQ0E5ODc/QUJCPDxBQkFFSEU6
-Njc2NTUzNTc2NTM1NzY3PT07PkJGTVRUVU1WVlJST1pWVFFZU1BSU09PU1NQTU5R
-VFNTUlBVWFdVUEpHSExNU1dYVVVOSkpLTEhJSElIRElHSEdJR0ZESEhGRkdISERA
-QUVFQUJBQUE+Ojk5ODs8Oj0/PT48Ozc3Nzg5Nzc2Nzk6Ojc7Ozs9QkRJT1Vgb3p8
-fH2Ag4WGiIiFhIOEhIF6dW9rZ2Vpa252fYWLj46OjYqIhIWIiY6OjYuLioWAenRq
-WEc+PDk6Oz09QENHSUpQUlFOTk1OTklHRURAPT8/PUBFRkNFRENFR0dIREVHSkZG
-SUxNSEpKTE1PUFNTU1dXWFtcX15bW11bWltYV1VXVFZVWFdaWlpfYGRmam9yeHt/
-foCGhoGChIWJiImNj46MioyOi4yNi4yMi4yNkYyMjZCSkZOPkJGTkI2MjY2PkJCU
-k5GSkZSUmJWTlZeYmZeVlpWUlZiWlJaZnKSqqZ+Ogo2iq621ubu9urm5tLGztLa4
-urm4trm6vLu+u7y7vsC+ubq3trW1tbOwsK+pn5aHdmxfWVVUVlVUUk9QTUlLTElK
-SUhJTE1MS0tISElLS0tKSkZITk5OTUlMTEZIR0xQTk9QTk1PUE1PTlBPUE5NTlBS
-UE9PT0xOUE9OUFNTVFFQTktMTlBSUlBSUVJUUFVRUlRPUVBRU1BQUFFQUFBRUlRR
-UVFQUU9SVFNTUVFSU1dWU1RRUk5PUlRST09UVVZVVVZVUlFUVlRVVVhVVlhVV1dX
-W1pYW1haW11bW1paWV5eXVpbXl1ZV1lZWVlaXllXWlhaVlRWWVlYV1dXV1hWVVpW
-W1tfXlxfXVxeXFlYWVlbXF5dXl5cWFZVVlZXVldXVlZVV1yMv8vT2N3h4uXm5+nq
-eHl2d3t7eHl7eXh5eHl5eHt5dnV1dHBoWlFHRUFBQzw/Qj1CQkdEREJFQkJAPkFC
-P0E+PT5BRkRHRUNCQUlGRUVER0hHSUlPUk5NTk9PUVZWVVRTUlJUWVlZWlVSUlFW
-VVZWWFdSUU9QUU1TUlFYVFBNTEtLS0hFRkRERUZCQT4+Pj9CPTw8PT9BPjo3NTk4
-Ozk3ODs7OTg5PDo6Ojs8PDw/PD5CQ0RGRUZFSFBMTEtMU01LTUpQVFRUSk1LS09Q
-T09UUlhaUVNWXFZUUlRQU1NVQzo5ODc6NTE0NjU1NDc1MzQ0NjU3QD49P0FESERG
-REZMSEdHSlNMSk5MUkpKVU9RTFFZT1RPUllUWFZkYV9UXF1cWVleYl5bV1hUWFZb
-WFVYV1lXW2VmX1lZV1lTVlZTUEtKUlFSR0A5NjI3Oj1KUFRUX1dTUlxdVFVYVVhe
-Xl9bWmBcXFVTWVxbV1VWVlZYWlVLT1FTUUpMTVNcYmFdX1hiXlhcVlZVSURQTlZb
-UVJYUktKS0tMTElKS1RaVU1RUUtPS0hOU1RZWFZVXVxUT0c+RkRJSUtNSVBPU1hh
-gJyrsKujnJqfl4+CdW1sbnRrXUlAPTs6OTc6P0BAQTo3Pjo8O0E9PD5AOzw8OT09
-Pz09PkNCQT06Pjo5PEJAPUBJQkBHRDs1NDY0NjU2ODY0Nzg2ODo8OzxCP0BKT0pO
-SlVXWFhQV1NYV1lXUVVUTk9RUFBLTlJUV1FOUlhYVFVQSEhITVJUVFBQUEtKTExL
-SkpQS0dCRERGRkdDRktFRkhJSERFRkRFRURDQkNDQUNAPTs5Ojw/PTs6Ozw6ODQ4
-OTk4OTY2NjY3OzxCSE5QU1liZm92eHh6fYKDhISHiYqFiIeFgX15c2tpZ2NmbniD
-iI2PjouLioWBhImMjZCQjImJhX55cGFRQjw5Ozk5OTtARUhITVJSVVJQUFNOS0ZG
-RUVDQEA/QEBAQUNDQkZFR0VFREZJSUlKSkpKTEhMS0tJTFBQTU1RVFRUVVNSUlNS
-UlVSU1hTU1RYWVteYGVla3B2dnl6e4GAgYODh4SEhoiHh4uMi42LiIqKjI2Ni4qL
-jYyOkJCUko+QkI6MkpGSko+Oj5CQlJaUlZeVlpeVlZWSkpSYmZqWmJiWlpaUmZ+i
-qaigloWEjqSxtbi5vLy9vLe1sbOytbi6ubm7u7i8vL7Avr++vr68ury6t7a1tLSv
-trKtq6Objn5uX1xYVFRWUlBNUUxNTk5MR0lJS0pIR0pJSUlMS0xKSkxGSktKTk5P
-UEpISEtKSk1MTUtNTE5OTE1PUE9OUFJSUFFOT1FTUVNSUVBRUFNTUk5QUlJUVlVU
-UVJQTUtOU1JRUlFRUU9QU1BRUlNSUVFSUFFQUVRaXVhTUlVUU1NQTlFQUU9OUFFR
-VFNUVFRWVVZSU1FSUldXWFZaWlhVV1lWWFpZW1taX1xaWVpiWlpbXlpbXFlZVlZZ
-WFZYWlpYWFZXVVdWWFlbW1pbXF5aWVxbW1xeXV1dW1xdX15cWlxcXl5cX1xYWlNV
-VVdXVVdYWVhYXIu+y9PY3eHj5ebo6Op1d3h3eXt9enl5fH17e3p3dXd2dXFybWRV
-TUxDQ0E9Pj9CPz4/QTxDQz4/QURFQkFAQ0BCQEBAQ0hEQUJFRUVEREZFRkdHR05N
-S0pRUE5OUVdWUk5QVVVYXFlYVFVRT1BQVVVUU1JUU05RVlZWUVNTT09MTUxMT0pH
-SEpHRkVBQD1AQEZBP0JBQTw8OTg4ODk6PDk3PDw8Nzk3NzY8PDs6PT08PkZCREdG
-RUVFUUhDSElNTUhPT1JXTlNPTlJNV1laVFlXV1xPVFRZUlRTVlBWVU5GPTkzNDQz
-NjUzNTY1MDI2ODc1ODY5OTw6OjxFQUdERUxIRkZHUExNSUhSVE9TTFJOUFVQVE9T
-YFZVUF1gXFZUV1lWWVtdXldUV1pVWFheWldYWFdZXmJhWlZVW1lcXlpWUlFSUFRO
-RTk2NDM2OUBOU1dfW09RW2BYWlpYXWBYWVhbXVtWVFVZWFtaWF9YVlhdXFJTT1FU
-U1dXWVldYFVVV1hWWFxeVk9NSFFOS1ZWVFRVUU1NSUxQS0xNUVNVUElPSktLS1BW
-V1ZTXFZaW1ZNSUZIR0xMRUlGTlBUU1BrkKqvrKOcn56aj4R4bmdqcXNqWUlCQDw6
-NTg/PUBDPTs5Oz4/QD0+ODc7Ojs6OThBQT0/Q0JHRTo6OT1BQkQ+PUJBQUhHQTc2
-MzY3NTMxNDU0NTc6OTk7PDxBRktNTE9IT1FUWVNWVlhZWFRUVlNOUVNUUUxNUlNU
-UFNOUFVVUUtLS0pHTVhWT0tLSUhKTU1LSExIRkZFR0tNRkhHR0dHR0VERkVHSERB
-RkVEQURCQUBCQUA+Pzo8OTk4OTw4Njk3NjM1Nzk7Oz1AR1JYX19hZWtxdn17eXyD
-goSFh4eIiIeHh4V/fHVybmpnaGdrdH+Hio2OiYaEhYaHjpKTlJOPiYeDfXRpV0dD
-Pzw5Ozs9PUJJSkxNUFFUUlJRTk1OSkdHR0dCPz89Pj5CQUJEQkFFR0ZKSElKSEdJ
-S01MSkhHS0xPTktNTVBOUFJRT09PUFBTVlVXWldWXFtcXGJjZ2pxdnl7fHt7fn6A
-g4aHh4SDhYaGhoiIiYeJiYiJjIyKiYqNj5GSkpCPjJGUkJCNjo+Tk5KUlZOUlZSW
-l5mUl5eXmJaVlpmbmZuXlZWWk5qgpqunnpeXlJCUqbW4u7q3urm3srGxsrS3uLm6
-ubm5uru9vb2+v728vbu6vbu6uLe2ure7vLm1s7CtopGBbmFaVFVWU1FSUFJRTk1Q
-TktMS0tNTEtKSktKS0pLTElHSUlMSktKSElKTU5OTUlLT1BMTk1PUU9QTlBNT1BN
-UlNTUVFPUFBPU1JSUFNVVVFTUlJTU1JRTU5QUFBQUlVUUVFSVFNQUVRSUVNRU1RS
-UlJRUlZXWFRQVFFPUFBPUVRTUE5NTlNUVVZYV1NTVVRQVFZVVVZWWVpZWVtXWFZY
-WlxaW1paWlpYV1haWlxcXVlYWVpaVlZXWllZWFhbWllZWFdYWVpaWlldXFteW1xa
-WlpbXWBhXVtfX2BcW19gX19dWVtZVVVYWFhZVlhXV1Vdhr7L09rd4ePl6Ojq6Xt6
-enl4d3h6eHl6eHh6eXd6dXZ1dHJtX1VMSUhDQ0NAPEFHQ0A9PTw+QD9DRD88QURB
-RUM9REFDRUNFRURCREdGRkpHSEZITk5LSU5NUVFQUVRQUlRVVFlWV1VUU1NRT1JS
-VFRTUVNQUFBUVlVRUlJST01OT0tMSUhJSkdEQT48Oj1BQj89Pj86Ojw9PTo7Ozo7
-ODk4OTs7Ojo5Ozg7ODg8Ojk8RUdER0ZERERMS0pMSEpKS1VRTFRNVlBSWVFTUlRY
-WFxWW1NTVVZUU1NbV1JQUEg/Ozc3MzU1NTY1NTg0MjY5Ojo5Ozo4Ojw8Ozw9QUNF
-S0hNTktPSkpJSVFTVVVNTk1JUk9VUlJfUVJQWltfV1BTUldRU1tjW1dYVlVWV1dY
-WlpVVlZZWVdZV1JXW11XWlpYW1VPUE9NQTk3Nzk3OkpVU1NVUlFXXV5dZF5gWlhV
-U1hcX11YVllbWlhXYGBYVlVZWVZQUFpeYFpbW1RWVE9TUFJZW1hWS0pITEtRU1VU
-U1pWTkxOSUtOTE9STlJPSkhLR1FRT1JPUFVVWVtZVlJMREpLTE5CSUhTSE5QTGOE
-orGupJ+bm5aThn1uaGhqb3BjUEVAOjc3OTw+PkFCPTo9PkRAP0JEQkBBOTc4PDs8
-PDs7PENDRTo5QD1CQENCQUJBQEM/OzczMzY0Mzc0NTg0ODY3ODk6PUZIUktKTU5T
-U1dZVV1XVVhaWlZXUk5OUlZQS09RVVVSUlJUWVxTTlBOS0xSVlNNTU9ISE5OS0hI
-S0hHRkdHSkpGSUpGRkZGSktKTEZEQkBCQ0ZGQkVHRkJBPjs7Ojo5Nzg2NDU6NjU2
-Nzc2Oj5BSVJZXmNmamduc3t9fXt9gICFhoeHhoiJh4eGg398dnBrZ2lnaGt3gIiM
-kY2MiYSFi4yOk5WVkY2JhIF7b1xNREFCPj06OTxDRkpKS05TVVdWVlVUUlBQTEtJ
-R0M+Oz5AQUNCQEJDQUJHSEdHSElHSEtLTEtKSUlJTUxMS01MTU5NUE9OT1FQVFVY
-WFRWWFpfYGJjZmhscXZ3eXt/fXp8foCChIOHhoiGiIeHhoaJjYuJiYmHiYmKjI2N
-kJGSlJaUkY+RlJCRkpGQkpSTlZeWmZuXl5uYlpiZmZqZmpqcnJuamJmdnqewq56S
-kJiXm6Syur28ube3ubSxrbGys7e4u7y8vLq6vL29vb++vr26vby7u7y6ubi3uLm4
-ubaysrKysKWSfWtcWFVUVFFQT09PTk9NTExMTktKSklKTEtMS05LS0hISkxLR0hJ
-SUpJTE1MTEpLS01QT05OTk1NTE5NUE9OT01QT1BNS01PUVNSUlRVUU9SVlNTT1BR
-UFNQUFRWVlFRUU9RVVdWVVNUUFBRUlNST1JVVFRRU1JOUFFRUlFRVVVTUlNQUVJU
-XVtWVFNTVlhXWldWWFZVV1hYV1lYV1haWFhaXl9aWl1eW1hYWlpYWFlXWVpaVVhZ
-WlhaWFlbWlxXWFpZWVpbWllfWltbWltaXFteX11bX15eX2JdXV5eXVtbXFtaV1ZV
-VlhYV1lYWV6EvsrS2d3g4+bn5+npeXh6fHl5dnd4dXd3dXd7enp5eXh1b2pdUUtJ
-R0FAQT0+Q0RFQTg6Pjs/PkFBPkNFQj5BP0REREM9PD49QEA+QkNEREtKS0xLS01O
-TUxOT1JVUFNUVlVRT1VYWVdXUlJUT1FPU1VPT01PUE9WVlFSUFJRTkxKTU1PTEtJ
-R0JDR0RDQEBBQD8/PDs/PD0+Pz44NzY1NjY5ODw5OT08Nzk5ODs8QUNBQj9BQUND
-QklLSk5KSU9TXlhLT0xWUlFaVFdMTVdVW1VgWVdWXV9dWFtWUlFSTkVERTk1NTUz
-MzUzNjU0ODg1ODY2Nzk6PTo6P0NETE1JR1RSTFBISkpKVVVLSUpUTU5STVZRVFpT
-Vk5XXGBYUFhVWFJTV19kXVdVUFdVVlZUW1xbWlhZV1pcWE9TWlRWWVlXUE1MTlJD
-PDo8Ozo+P0pOTVhWVFJWW15oYFxbXlpUVlpZXFhYVVdXUFlcYFhUVFdaW1RQXVxc
-XltXUVBSTU1RUVRYVVNNSU1RUFFWVFFST05QUk9OSU5QTk1KS1BVSkxLTE5LT1RO
-VlRZWlZYVE1FRkVHR0ZLSU9LTlFOXWaJo7CspZ2WlZOJfXFmYmFqdG9eS0E+OTk4
-ODtBQTw9Ojg5PD5EQT8/P0BAPDc6OEE+Pzo+QENAPDtBQENDRkNDR0A6QEQ8ODU0
-MTQ3NTg3Mzc2NDg0OTs/RUpTS05JT1xYWVlUXFZWVVtdWVlXUE5TUlJQT1BUVlZR
-VFVYW1NRTkxMUlZUUEtMTk9PTE1MSERESUpLSkxJRklJSUtLRkRJSEVCRENERUZE
-QkNBQ0JEQ0FAPjk6PDo3OTc3ODU2ODY2NTc+Rk9YYmdqaGhna3B3e3x+gYGBgYaH
-iYuKi4uKioWCfXdwbmpnaWlsb3eCiIuMjYmJjo6SkJCTl5ORjIaBfnZnVEc/Ojo6
-ODo7P0RJSkxPUlVWWVhWVlVSVFFRT05JRkVARD5BPkBCRERDQ0ZGRUdFSUpKSkpK
-SklOS0xNTkxOT05MS0tMTk9NT1ZWU1dZWl1eXWJkZ2hqbm91dXV4fH5/gYB/gYOC
-hYmGiIiHh4iGh4qLi4qMjY2Li4uLjZKTlpeWlpSPjo+SkZOQkpGRlZiXlpaZmZuZ
-mZibl5mZmJiampual5aYnKCpr7KqmImGiJWosbzAvLu5u7i1tbGwsbKzt7a8vcC/
-vL29vr6/v7q8vLq8vrq4ube1tLe3t7S0ta2ysrK1tKyginBfXVhTUlBOTVBMTU9M
-S0tLSUpLSkhJSkdPT05KSUdNS0lJSE1LSkxKTk9MTUtNSk9OTUxPUU5NUE9OT0xM
-Tk5QUVFQUU9QUVJSVFVSUlNTU1JRTlJTUFBSVFRSUlFTT09RUlBSUVNTUU9QUVNV
-VVVVUVFSU1FVU1JSUFFSU1hUUVFPUVRQUVJRU1NUVVZYWFZWWlhYVFFVWFhXVlhW
-WFhaWltbWF5bV1dYVlhbWFZYWVlaWVlYV1laWVpZV1taWVlaWlpaWVtcXV1cXF1d
-YF9dYF5dXV1dYF5bXV1cWlxWV1dYV1ZWWFdYV1ZZXIO9ydPa3uHk5efo6el7enh5
-ent8eHd1dnl2dnh6enl4eXdwZ1pMR0VFSkVEQz09Q0hKQ0A/P0I/PEJCQT4+PTpA
-RURFQ0RHQ0RBPT5ARkhJSkxMTk1JS05QTklKTk5QUVNUU1FXU1ddWVVRUFFTU1JQ
-UlBQT0tRVVVVU1JSUFJNSktPT0tJSUhHREVFQ0dEQkNAPj0+PTw7Ozs6OTw2Nzk4
-ODg6PDs5OTg3Nzs4Ojw7Ozw9PURBQkQ/RktGS0tISUpRVU5RTUpTT1lTV1VOVlNc
-WV1cWVddY1xdXllPT1JVT0pAODc1MzU5Njc6OTs3Njg2NTc7ODg4OTg+Q0dPUEpH
-VVRMUUxRSkpUTklOSVNSTU1KV1RSWlFYVFRVWVdNUFJeVFRVYWFdXVZQUVVYV1VZ
-VFdYWVhZXGVhVlRbW1pXUU1LTFBMTkhAOTg6OTk7Q1BSVVldWFpeXmFhY2VjXlNR
-U1BXWFVVUVZYWFdWU1NTVVZaV09WWVlaWlNQTUtOUE1OUlhZVFFXUE1NUFBQTlFS
-UFJWTU1OS0pRSkVJUFBNSkpNUEtPVFVXV1lXVldXU0hGRUdJSVBJSUhOVE9RWnWU
-qauroZiVkYl8c2xmY2VrdWlSQTs6Ozg7OTw8Oz08PDw7Ozw7PTtAPj09Pj1BQUJA
-OTo+QUQ9OD1FQUM+RUJCOzY9Q0E4ODU0NDY2NjQ0MzI1Njg4Oz1ESkpISkpTWlhY
-W1ZZV1hTVldOUVNOTE5OUlFQVFlYU01XV1dWU09MSktOUlRTUU1PT0xQTklFR0ZL
-TU1LS0dHR0dJSkpGRUNGSExHQ0RDREVBQD9APz5AOzs+QD88Pjw4NjY0Njc5Nzg7
-QUpTXmRmaGdoaGxwdHp7fXmBh4eHhoyQj5CNjYuJhYJ8dnN1b2xtbGtze4SJjI6M
-iImOj5KUlpWVk5GQi4B3a11RRj5APkFAQUFCSEpLTVBQU1hYWFZWV1NRUlBQTkxJ
-RkdDQT5AP0NDQkNARERJR0lJRkhHSUtQUE1MS0tNTExNTE1NT1FSUFBVVVVXWF1f
-XmNjZGdsbG5zdXZ1dnt9f4CDg4F/goOFhYaGh4uHhoWJi4mKi4mIiouNjIuMkZGR
-lpSUj5GQkpKSkpWTkpaXl5OVlpmbmpiYmZmZmJqWlZeYl5iZmp+jqrO5s6aTgXeB
-mK64vby7urq9ure0srSztba1t7m9vsDDwr69vb+9uru+wL69vbi3tbSztbq1tLe4
-rrOztrW1saylknplXFZUVVNPTU9PTk5NTU1NSkhISkpJR0lKS0lKUEpKS0xPTVBL
-TUxKS01KTExMTkpPTktQUE5PT09QTU5NUFBSUlNST1BPT05ST05PUVFTVlJSU1NS
-UlRTUVJTUVBRUU5RTlFSU1NQTk5NUFVTUlBSUVBRVFRTU1NSVFJQUlVUVlNUVFFS
-UFNVU1JUVVVWVVZXV1VWV1dWVVVYWVhZW1pYWVtdXF5aW1hZWFZZWVhYWlpYV1dY
-WFZXW1tZWFlYWVhWWVlZXFxcW11cXltcXltdXlxdX19dXl5bXVtbWFlYV1VXV1ZU
-VlVVVVdgfLvJ0dre4eTm5+jp6nl7e3x7eHp6dnh5dnd4dXd4dnZ5cmlgUkhCQEJH
-QEM/PkFCRERBQERDQz4/QkU7PD1CPkBBQUFAQEFDR0I9PENCR0dISEdJTklMT01P
-UlFNUFBRUlJZVVVWV1ZUU1hSVFZVUlJTUlBRUVNTVFVSUFFOUE5NTExPTUtIREhH
-RENDQkJFQj5APkE/Pz87Oj08Oz04Nzc4NzxBODQ1Nzc4Ozo6Ojg2OTs7Ozs+QT9D
-SUdISEZGRkpQS01LRElHTEpLVVRaWVhZXFtZVlhgX1xXV1BVblVUSkA5NzQ1NDc1
-NTQzNDQ0NDg1Njc2NTg7PD8+PkpJTUZNT0dPS1JNSFBLRkxIS0hLSk1UVFlgVl5X
-UVJUVUxRT1ZVUlFZXVpWVFJTVllZV1dUVFtaWltfZl1VWFtcVlZVT0tTUElIR0Q5
-NzY3Njk/SVJWWmxlWlxdWWBpYV9kV1JUUVJcWlhTT1NSV1pTVFNTVlVXVlZVWFJU
-WlNPS01VVVpaWVVYV1VWUU5QTk5PUFVRTE1LS0xIRU5KSEpOTk1JR01MSVBVWlxT
-VFhcWFpZTkdDQEFFS0VISElTUU9WYniWpqiooJeOhn92a2NgYGZwbV5JOzs/QkI7
-ODs9OkA+PDw6OT1APj4+SDw6Oz4/QkA7O0A9Pz5CPDs8QTxDQkJJPzpBPzo8Ozg2
-NzQ3NjczMjQ2Nzg7PT9HS0RJR01SUldSUE9OT01VWFJWVE5LTlFTUlBSVldSVFhW
-VFJWUVBNTEhLTFJUTExNS05OSUZITE1LS0pKT0pGREVGRkdIRklKSUlERURCQ0NC
-Q0E+PDk9QD9BPzw6ODo4OTo9Ojg5PUdQW2VqamlrbWttcnd6fH19fYKDiI2Ojo2N
-jo6LjIiIhX58eXVzcW9sb3d/hIyOkI+Lio6QkZOYlpSTjYuHgXJkWE1IRD89QEJF
-RUZMT0xPUVNUVlhaW1lXVVNSUlJPTUtNS0dDQUJEQkJDRUZGR0hFR0dHQ0dHSEtI
-SEtJS05LS0xSUVNQTlFUUlVZWFlcXWFlZ2hsbW9zcnV8fH9+fX+AgYGBg4SEg4KD
-hYOCh4eHh4eJi4qKi4uOjI2KjpGSlZWUmZaSkpeWkI6TmJWYmpiXmJiampuXnJmY
-lpaZmJqYmZmbnJ2jrLGwsbSwopSDfIeXrLu/vr28urq4s7OzsrK0trW3ubu8v8G/
-v7++vr27ur27vLy7ubW2uby7t7e1ube1tbe2tLW0sa+mmIFmWldXUlBRT0xOS0xO
-TVJPTUtKR0lKTEpKTUpJSUlLTUxJTE1PUExJS0tNS0xOTU5MS0xLS09PUVFTUE9R
-UFNSU1FRVFJNT09OTlBQUlNSUVBRT1JUVFFPUVRUUVNRUE5QUVFSUFFPSk5RUE9P
-UVNTVlFRTFBPUVJSUlJRT09SUlBQUE9RUlJTVVVTVFVWV1ZYWFZYV1hVVVRTV1ta
-W11bWVxaXV1aWlpcWllZWllaXV5bV1dXWFpZWldXWVpZVlVXWVlbXFxaXF9cX11f
-XFxcX15iYWBdXFhaW1pfXFtZV1RWVllXU1NXW2Z9usjT2d/h4+bn6OnpeXp7enl7
-d3t7enl6eHp4eHl1dG9rZFtRSkdDQkBAQUY/PEVESUREREM9QUFEQUVBPT9BPT8/
-Pz8+QT5BRENHQkNFRkhJS0xNTkxQTE1SU1NQUlNRUlVWVVVZWlZVU1VWU1ZWVFJQ
-VlJPUFNXWFZVU1RUUFFPTkxISEdJTEpGREJCQUBAQENHQj07PTo8OzU5Nzc4ODg3
-Nzg1ODk4ODc5Ozg6ODk3OTk4PD5FQj5AQEBFREU/QlFPSklIR0dHSkxUU1xZUVVZ
-YFhPU2BaV1RYUU9UWFZKQDo3NzI0NTAyNjQ0Ozg4Njc2Nzk5ODw6Ozo+S0ZLRUdF
-Q1RMWlVMUU9KT0pPTFVWSlVTVFtTWlNUVlJSTFNOUlRQSlRUWVdaWVVSUldZWlVT
-U1dUU1pdXFZXW15VVVVOTldSTUdHRzo2NzU2OTk+TV1eYmJYX11aYGleX2lYTVJR
-Vl5gXFhRUVNYXFJSWl1ZVlFWWlJSU1JcVVhTTlhYWFNTU1lYVVhSTU9RUVRTT05F
-SEhJRk9NS01OTU9KS1FJTFFRUlNUV1FTV1xdX1lRSERBQkJHRkpJRVJRU1dVYnqW
-pKekno+Jg3pybWhkZW1yZ1BEPDw+PTs3PDpEQ0E9Ozs7PDs/PEdCOjk9Oz1AQkA9
-QEFEQj06Ojw7PT9ASUhAPEFCPDg4OTczNDM2NDQ1MzU2ODk6PD5FRk1JUE5WXlJU
-UkxMTFVVT1BSUFRVVk5ISlNVVVRWWFRUVltYWE9LTUtKSkxJSUhLTU9NSUlMTkxI
-TU9NR0NHSElLS0xISkhKRkNBQ0RCREJCQz49PDtAPjw6ODg5PDs5Ozg2OkBLWGNo
-am1tbWtubXB1enp7e36Bg4aGiZKRjo2NjY2LiIiFhoKCd3Vua25vdH2DjI6NjIyO
-kpGQkpOVko+NiYN4aV5XU01LSUI+PUNHTFFSU05RUlVUWVtcXFZUUlFUU1FPTkxK
-R0ZCQUBAQUFEQz5CR0pIR0hESUhHRElKSklLS05MUFJVU1JRU1VaWFxdX19iY2lr
-bW9ycnR3enp5e318fX9/gH6Cg4SFgIOFhoeGh4mMiouOjo6PjY+MjY2RlJKSlJuX
-lZSUlZaQkpKWl5qTk5aYmpqbm5yampyam5mWl5uenZ+lqK2ytby9tqmcmJiRjJit
-uL6/vru6uLKuq62vs7a2t7e6u73BwcHAwcG/vcC9vLm4u7e4t7i4ur29urq4tre6
-uLa5trWzs62onohtXFVUVFJRT01NS0tKS05RS0tKS0xPTklJS0pJSUdLTEtJTEpO
-S0tHSUpNT05QTk5LS0xQTlBPUFJRTVBRTk9OT05PUlJPUlBQTk9QUFFRT1FRVVNU
-VFRTUlFWU1JQU1JRUFNVV1RPT1BRT1FSUVNUU1JOTlBPUFBQU1RSUVBSUVNSUVNR
-UVNVVFRTVVRWVFJTVlhXWVdXU1NTWFtbWl5dXFlZWFhZWFxYVlpYWFlbWVpZWVZX
-V1dYWVhYWFdXVVRZW1pZWlxdW1peXl1cXGBgX19gYV9fW1pZW1lcW1hWVFVXVVlW
-U1NVaoS8ytPa3uLk5+jo6el4eXh1eXh9eHt7e3t4eHZ1eHh0cm5jWEtLTEY/PTs8
-P0NGSklGREVBOz0+RD9BRENAPkE/Oj0+P0A9Pz0+Q0NCQkRJSUlKSUtMTEpLTFBU
-VlVUVlZSWVteXFpYU1RXWFlVWFlXV1VTVVJPUlZaVldTUVJRUVFOUE1KTE1LSElF
-QUVEQkVCRUNERD8/Pjs8QDw6Ojo6Ozw5Nzk5PDc2NDQ4Ojg6OTk4Ojc4PEQ7Oj0/
-PUNCRkNBSUtHSERJTEdKR1BLUVlVV1ZiV1BLU1JVVVZWU1RbWVBJQzo9NzY3NzYy
-Mjc3ODg0MzU3OTk/Pjo8Oz1ERk9FRkVJSklXUkxWUVBRSlFPV1ZHTU9SXlVdVVdc
-VlZMUk1QWVJRUFNZWF5eWU9PU1VWVFRSWFBPVllbWlZYWVdUVFFYXFhTUkxKQTo0
-MjQ1OT5DVFhPV1paYFtgX15gX1FPUlRUWltbVlNPUVdWUlhXXVZVVldaVlNWVllZ
-VFNPTVNRTVFPUlNUUVVUUlhVU1ZRS0lLRk1LSFFPTVFNTUpLT01LUlRMTVNVU1BU
-V1tiWFVQSUNDQEVDSEhHVVJYWlBYXn6Ypqiil4qBfHp4cGttcHNtYFBDOz47Ojo3
-OT07ODo7Oz88OjxAREA8Pj07OjxBQTw4PUFEQTxAPTtBRUVGR0U+QEI+ODUzMzc2
-Njc4ODQxNDY3OTs8P0RKR0hSTlVfVVtRT09RU1VSUVJTU1JQS05OTVJXVVZUUVNY
-VVRSTUxOSUtMSktKTEpJTEpKRkdLTE1NT01NSklJSktOTkpGREdGQUFERUREREI9
-Pj4/Ozc5ODo5Njc6OTs5NzpBSlhja21vb3RxcHBucnp9eXl6fH+EhIWGjo2Njo+N
-j4+MiYuLhH54cm5tb3BzfYaLjo6MjpKSkpORlJeWkpCHfnNrYVdWVVRMQj0/RE9V
-UVJQUVFRVFtcXF1dXFlWVlZXV1FRTk9NS0VDQ0FBQUFAP0FER0dHR0dHSUhKSkxL
-TEpJTlJOUFFUVlNWVltcYGBfZGhnaW5wcnR2dXh6eXt8f4GBgoKAg4GBgoODgoSE
-iIiHio6Ni4+Sk46KjY6Qk5WVkpGUlJOSlpKSlJWTkpOYmJiYmZial5iZm5mZmpye
-nZ+foKGip7Cyt7e5vLyznpKcn5aVorO5wcC/wLm2sa2rrK+zuLa4uL2/v768vMDA
-wsG+wL68vra1tLK1tri8vbi6ubi2tre3tra1tbW4trGroY1tW1hUVFNNTk5KSktP
-T0tLSkpMSktNTExLS0tLSkxKSUlMTUtLSk5JSE1OTk9MTE9RT0xNUFBRU1BPTU1P
-UFBTUlFPUlFOUVNRUFFRUU9OUFFTU1NTUlNUVFJTUU5QUlJUVVRUU1ZST1BPUFFS
-UVBOUVJPUFJSUFJRU1NRUFFSVFNSVFNUVFFRU1RXVlVXVFlYWFhZWFhVWFZUWFdX
-XV1cWFdYWVdaWFdaVVVaWVlaWVdaWVlYVVdaWVlXWFlbWFdYWVpdW1xcXlpbX2Fj
-YmBhYV9fX15cXl9aWFlVUFZWV1lYWVlXU1VvorzJ0tre4eTm5+jq6nl3d3l4d3h6
-ent8fHh2dXN1d3NxbGFUSkdGSEM7Pj89QUhJRkZFQ0RFQ0JCQj09PTg8PUE7Oj4+
-Qz0/QEFDQkVHR0pJSUxLTU1MTExLT1FSVFNTVVRVWFdXV1ZRUlNWVlVXWFlXWlta
-VFZXWlxZV1VTVVBTUU1NTEtNS05MR0pMSEVER0ZISEVERD4+Ozs6Ozs6OTk6Ojc1
-NTY5ODY2Njg2NzY4ODk3PDo4Pjw8PD9CRUFBPT5GRkVLTEZLTVFKSkxSW1daV11Z
-SlJWVlNSV1pUU1dXVlZQQjo7OTU1NDMyNjc0NTZINzc3OTo8PT07QUJDUkpOS0hN
-R1RRTlRQTlFIUFJVVEhSUVJgV1ZSWmFeYFJVTk5VUlJQV1lUVVZQSk5OUVZYVFBP
-UFNUWV1XUlRZWFlXVFVcXlpWWFBCOTIyMzc2PUNNV01TWlpbW1paXl1VVFZXVVZZ
-V1lcUk1OU1BVXVlaVlNTUVNYVVVbUk9VT05JTE9SUU9QUVZVUlNVWFlUS05LTFFU
-V09SUFNSUVRNSkpNUk5OV1FNVFpSTlRcYVxYW1FKQ0JER0NFR0hTTFFUTlVWbYma
-paOelImBfXx7dnRxcXBpWkY9Oj1COTQ5Ozs7QTk6Pzw7Ozs/QEJDPDo6PkBFPjtA
-QkRFQjk6PUJGREZEQkFCRj04MzQ1ODk1NTM5NzQ0NTk6Oj49P0JFSFRRWFlSXFNS
-VlZXVVRTU1ZTUFNNTU5PUFRUV1lRTk5SU09MTU5PUVBKS01MSkhIS0pJR0hDR0tL
-TUtFSk1OS0tIR0VHRkhEQ0NGRENBPj89PT87PDs4Ojw4OTo4Njk9QUxZZG1vb3By
-cnFzcnR2fH99eHh7gISHjJKJjpGRk5WSkpKOiomIgHx0cm5vcXV+hIeLjo+SlZiY
-mJWUlZmSjIJ4cG1jVE5NTkxIQ0ZMVFhXVldUVldYXWBiX19gXFxbWlhXU1FPTk1L
-SkNBRENDQT5DQkRFRUdJSUhHSE1OT0tPSk1NS1JRVFZYWlxcXF9jYmZnbGxucXJy
-dnZ4d3p7f359fX+BgYOEhICAhYaDgoWFh4qLjI+NjY6MjY6PlZOVlpSSk5GRk5KU
-lJaVlZWXlpeZmJeXlJWam5ydlpiZmpqfn6Omp6qvsre8u7y9uKmQjZGQkpuoucDA
-u7u+u7GvqqmusLK0tbq8v8G/wL+6u7+/wL29vcC/ubW2sLK3tra3tba0tbOwt7a0
-s7S1tLe2sa+roIhpW1dVUVBQTk5JS0lMTUxKSElJSElJS0pISktNTUxMTktNTEtK
-SktKTk1NSktKS05MT01OT1BUUlFRTk9QUFBRUFBQUVFUU1FRUVBRVVJQUk9SUlJS
-T09OUFFTUlJRUlNVU1RTU1FQTk5QT1JRUU5TVFNSUVRSU1JSU1NSU1JVVVdTVFVX
-VFJSVVdXVFFUWFxYVVVXWFhYWlZVVllZWlpZWVhaWlhXV1hYWFlZWlpYWVdXV1dW
-V1hYWlhXWlxdW15cXVpcXVxcXF5dX19iYF5gXl5eXl5eXltWVltcV1RWV1hYWlpa
-WWqkvsnT2d7i5Obn6Orpd318enh3en1/fXt8eXZ0dnh2dHBnWE9HR0RISUVBREVF
-QEhKRUNAPjw9PTs6Oz09Pz9BQEA9QD48QENEQ0I9PkFCREdHSEhJS0lMT01OUVFO
-U1JTVFVUUlVXVFRTVldVVVlYWVxbWldZXFlcXFtZV1ZXUlFRT1FLTFFOTU1MTEpJ
-SUpJRkhISUxHREE6PD08PT5AOTU0NDU2ODg1MzE1Nzg4Ojs5Njk6OjY5Ojk5PT9K
-R0RHQ0lJREtOSExIUUtKSkpTUlhYXWFZVFdXU1ZXXVdUVVpbV009NjU1NTc0NDQ3
-NjU0OTg4OTg6Ozs6Ozk8PUBOSUtHR0pJVVBNUExKUExYVlxYUFdOT1RRUlBRWV1e
-UVdRUldUVlJVV1VSUVNUTE5RVE5OSU1TUlVVVVRRU1ddYF1cWVhdWFhdV0w9OTQ2
-NTU8PUBLUk5NVldYWlRYXVhXWlNRV1tXV1xXTk9PUlZaW1ZXVk5PTlZZV1ZQT1ZU
-VUxMUFVYVlZWUFJTTVVWUFRRTUxPVVpVU1VKUlZRVlBJSUtQUFJZWE9PVU1OWF1f
-XltYUUxERUhKREJHRktISlBOT1VZcZCjqaibkYmCgH51cm5tbm9hUEE5Ojs6Nzg6
-OUBCOz0/OTw7O0BHRUY7PD07QURDOzpAPkdBPjo9QkFCQ0BCP0NGQTg0NDg3NjYy
-NzQ0NzY2Nzk6O0RIQ0VIUU1TV09XT1ZTV1hXUFNTU1JSVVNPTk5PUlRSWFJNTUxN
-UVBPVlRQUlBPTU5LSUhKTUlKTUtNTktJTk1MTEtNTEpJSUZGR0lHR0ZDQkNBPj0/
-P0A+PTo7Ozs6Pj84OkNKW2hyc3NycnV1d3FzeHp8e3t9fH6DhomMjpORkZKRlJWW
-kZCKioiDfHNwbWttd36DhomNkJaYmZqanJSSkouDe3RyaF1ORERIS01QUFdbXV1Z
-WVtaWl1eYmVmaGZmYV5cWldVVFRRTkxLRkJDQkJCP0RERkVHR0pGR0lJSkxLTUtN
-TE1PT1JVWF1dXF1gZWhsbWpydXR0dnh3enl6e3l9gH2BgYB/gYGBgoKFhYaGiIiL
-i4+OjYyPjoyNj5STlZSVlpSTk5STlJSVlpeXlZKUmJaYmJeVlpmfnJuYmJean6Kk
-qKytsbe3ubu7vLy0oo2Ee3qMorC3u7u9uriwrq6srKyus7S3u7u8v77AxMO+v76+
-vLy9u7y3tLOzsrW2t7e2tLO0trS3uLS4uLS1s7Czsq+rn4VoWVVTUVFQUlBLSkpN
-TUpISElGSEpIR0xMSklISklKTEtMSktNSktOTk1OSkpMTE1NTkxOUVBQTlFPT0xL
-T1JTUlBSU1NTVFFQUVRUVVVUUVFPUlBRUk9RT1FWVFNRT1BTU1VTVlFPU1JPT1JT
-VlBRVFVRUFNTUFJVU1FQVVNRUlVVUVRWVVVSVFNVVFVXV1VUUlVYWllXVVRYVVhW
-V1haWldVVFZWVllaV1daWVdaWVdVVFdWVllaWFpaXF5dXFtcXVxeXFxeX19fXl9e
-X15gXmJiXl5dXF1dW1lVVlVUVVdYWlhXa6K9ytPa3uHj5ujo6up8fHt5e319enp7
-enh5dnd4d3ZxbWBUS0pJSklJRUhERERFQ0VHRj09QEFDOzk4PD1AQkA6PT9CQUVF
-QT1DRENCQ0dIRkZGR0dISUpKTlBMTU9OUFBUVVZUVFNSUlFPUFRUWFlaWllXVlda
-W1tbWVlXVVhVVFZSVlRQUE9RT05KSEhISUtJTEpHSkNAPjs6PkJCQDo8Ozo7Nzg2
-ODk7OTY1Njc5OTg4NTU5ODc5PTw8Pjw9PEVGSEtGTExETlNSTEpQS1BVWFpaY1lZ
-XF9ZU1FaV1JWX2BTR0I/NjY1Njk6OjQzNjk1ODg1Njo3PDs9OTo8PUJETUVHTkhN
-TUxUUE9UTVpYWVdOVk9LU1FZVlRWVlpUWlFUWVpaVFJVVVRTU05NUFFTRlNOT1JW
-V1VVT1FWW2BdWlhWVFZWXV1dUkc/NzQ2OTk4PERPU01WXV1eWFhcXFZSTlVYW1hW
-WV9WWFZcXFlaU1FRTkxVWFhcUlBUV1FXVFJQUFRWWFdSUVBMTlNRU1dQTE9RWFVU
-V09LUFJQUUtSUktLVVdPUUtMUUxOVFhcWVVSS0RDRUZFQkhHSUZPWFBQSkxgfZin
-paKbkol/fHNraGZrcGlYRTw5NjY2Ozg/Q0A8PT06Ojw8P0RHQTo6ODk8RkQ/PD5C
-R0dEPz1CP0I9QkU8QkU/Pjc4Nzc0MzY0Njk3OTg5ODc6QkNHREtMTFZWT1hSVVBV
-VVZSVldVT1RXVlBNT01LUVNPTE5TWlRTT09VV1RUUU5MTE5OTExNTE1LUFJQTEpP
-V1FIT1BNTE1OTklLTUpHRERGQkBCQkBAP0A7OTk5PDg4Ojw/SlhkbnZ4fXh6d3Z2
-cXZ8f315eX+ChISIiYuNkpSUkpGTk5WSkIyLiId+dHBuaWt0f4eKi5CTlpeYmpqX
-k5GKhHxybmpiWFBIR09WXWFhX2JhYWNbW1xeXmBiZWhpampkYmJdWllXVVJQTk5M
-Rz9BQUZNSERDQUNFRkZHSUpKSk1NTE1PUVNTU1VYXGJhYGhpamxvb3J2dXN5enp7
-enp7eH18gIGBf39/foGDgoSGhYaIioqKioqNjIqOj5CQlZeZlZeWk5SVlpaXl5iX
-l5aXlpaXl5eYmJubmpydmpmdnKGmqquvsrK2tre4vby/tK6ijnhwd4ynt7m7tbW6
-ubO0sLCxsbO1tbe4vLy8vL7CxcTBv767uLe4tbOys7i4tbm7ube6t7W3tbe4ubi1
-tbS0srS1sbGtnYJlWFVVVE9QTlFOTElMTkpJSUlGQ0hOS0tMS0lLSkdKSklLS0lM
-TlBOT09OTk1KTk9MUE5NTlBPTVFRU09NUFNSVFRSU1NQT09QUVRTUk9QUVFSVVJR
-U1JSUFNWVFJSUVJRT05QUVFPU1VWU1RUVVJSUVFRU1RRUFBSUFNUU1ZVUVBUVFVY
-WFZVUVNUVFVXV1lZVllZVVZXVlhaWFpXWFtaW1xbWllWWFVXWVhXWFlaWllWVlZW
-WFlaWFhhXVxYWVhdXVdZWl5eX19eX2JhYGFfX15fX15gZWNdXFlaV1VVVVdaWVpl
-oLzL09rd4eTm5+jp6X17eYF+e3t5enh5eHd3eHd3dG9mXlNISEdGQ0NISEdGQ0ND
-QElEREdFRUM8Ozg3Ojw5QUQ+QkRBQj45P0BCRUJEQkZGRUdGRkVHR0xMTE1NTUtO
-UlNYWFVUU1JTVFRTU1VeYFtXVlVWVltcW1xYVFRVVlFTVFVWU1NTU1NOSU5KSklL
-S0lLRkRGRUVDQD4/PT48Pjw+Pjk6PDg5ODc3NTM2Ojc2MzQ1NTQ1NjpAPz47PTs8
-QkRGUE5KTEpNTFJSTldUV1ZQV1dmWV1hYFlWVFddU1FZWVRJRT82Njc7Pjc4NzM0
-NzY1NDc1Nzk7PUI5PEA+RkVLSElMR09MS1tTTlFHTExUVktORU5ZVFxZVVpZXlVc
-WFdWWVtUWVVST1FQTk1QU09SVFNRUE9RVlhRT1FUW1paV1VZXVhaWl1aUUk9NzU5
-ODo7PURRVE9ZXFpdXmFcWVZUXF1VV1RcYV1ZV1dVXFdTUExPT1dUU1xUTlFUTlRa
-WFJSUFJaUlNMRUlMUVFPUVVSUFZUVVBRU05QUlBOT1JTT0lPUU5PS0tMUU5RUFdW
-V1ZPSkJEQERERD9JSlJWUlNJR1JghKCnpJ+WjYZ6a2FkYGNpaGNRQTs5Nzo5OT5E
-Qz4+Ozo6ODw6QUdCPDo5OzxCQD83N0NER0I8PT07PEBEPjlBSERBOjU0NDIyNDY1
-Nzo5OTg5ODpAQkNBRkZJVFRTV1FZUlNZX1FNVVNQUVBST05LUlRTUkxMT1RYVlNR
-TlFSVFRRTktMTElRTEhPU01RVFJQTU9WUk1NT1BKSlJUTkxRSkVCQERFR0RHQj4/
-PDw8Ozo7Nzc5PkVSYm9ydXl8gXp2d3d4fIGCgHt9f4OGiIuMj4+RlJKVlpaSkpGS
-i4uIhH13cXBub3aAiI2SkpSYmZqcm5iXjoeBeHNsZmVkX1hSUlpla2xqZ2ZoZ2Rf
-X2FkZmVrb29ubGloaGZlX1pbWlRSUU5KRUVER0pKQkNHRUVISEpJT0pMTExPTlBV
-V1lZWl9lZGZqamtub29zdHV2end4d3l5e36Bf357e31/f36AhYKEhIiHh4yIiIyN
-jpCOj5CQkZGTk5KRk5GSk5WXmpmXmZiXl5iWmJebnZycm5+foKCfn6SkrbCws7O0
-vLq3tra8urWzq6GPfHd9kKSzuru4t7ezsrKxtbOztLa5uLi4ur+9u7y/vr/Cv7u2
-trS0sbK3u8C+vbq7urq3trS2tLe1tLi3trm5t7W2tLOunoFkV1hSVFZVUlFQTktK
-SkpGRUlLS0pJSU1MSkpGR0lMTktISExNTk5NTE5MTE5MTEtMS0xOTk5NTk9PTk9O
-Tk9SUE9RUVBQUE5QT1FSUVRSUlFRUVNRU1NPUVNSUVNTUFBQUlNQUVJQUVJTUlFT
-UVNSUVJSUlVVUlRVVFVTVFRUV1RQUVJUU1JRU1NVVVVXWFdXWFdVWVhZV1lXWFhY
-V1dXWlxdWllYV1ZXWlpXWFhaV1dVV1dXWFxeXmddXFdXWVdXWVpdXV5eX2BfYF1d
-XV9gZGRfXltZXl5eWldYV1VVVVdZWW6ovcvT2t3h5ebn6Onqfnl9en19eHl3eXl6
-eXl3d3h0bWNXTEhGQT9BRUZFQ0FARUhGRUlDREJHRUI/PDtCPjw6QkVBQUFDQz49
-Pz5BP0JHRkdFRkdGR0hKSkpKTExNTEtQVVZYVVdVVVZXV1VVVlpbW1pTU1ZVWFhb
-XFhYVVRVUVBQUlZPTFJWU09PUE1LSklOR0pNS0tGQ0VIQ0JAQjo7Ozs8Pzw8OTg5
-Ojc2Ojs3Nzg6ODc4PDk3NTk9QDw+Qjw9QEVPUUxPS1JPUFhPVVFRVU5XWltZWmBh
-XV5WVFpSTlldWExJQjw3NzY0NTQzNTM0MjM2ODg3Oj45PEBCREJFRlBMSkpKWVVQ
-V0tIRUJFR0tPSElMUFVSXldSWlhiVFtZWFlZWVJdWF1VVE5RU1ZQUFFUUVJUVVZS
-WFVTT1NbW1lZVVlfXVldXVtTTklANzQ2N1NNQUtVVFZeX1xcXlteWFlaWk9SVFte
-WmJaWllcXFFRTVJcU1JSTlJUTFNLTFRWVE9LTVhRTlJJSlBTVFdTVlRRWFhVV1VY
-V05NUU9MUFBTUEpST0pJTk1QVE1OUFRXV1RRRkQ+R0M9PUhJT1ZSUkhGTVZ1kqCj
-oZuUiHtqWltcYGJlZFlMPzs5PDs9PUBBQz89QD5COz1BREFBPT4/PTw/Pzk5RU1F
-QTo7REVCQUVBPENEQT47NTUxMTIxMzM1NTc2OTs7OkJBREJIREZQS1BSVFtOU1hY
-UFBTWFdWT09WUExNVFlPTE1OUVJUU01LTk1RTU5NSkxKTE1NTVJNSk1RUE9MUVNU
-TUpPTEtMUU5NTUtKRD49QUVHQUJDPz07Ozs7Ojk6Ojo/TV5pcXR1eXt6fHl2dnqA
-fn5+e3p5foOJjo+NkZecmJmZl5WUlZCMi4eBfXVvcXBzeYGHjpCTlpmcmpmYl5SN
-gH17dGlfYmhqZVlSWWNqbWxsbW1qaWloZmRnaWxvcG9vcXFwb2xrZmNjXVxZWFNM
-RkRGR0dHRkhKSEdGR0hJTEtMV1BTVFlcXmFkZWhqbnBtb3JydHd2cnR1dXZ4eXt6
-fH16en57en1/f4CDhoeGhoiGiYuLjZKQjZCUkJKUlZSRj5GRlJWVlpSXlpiZm5ye
-nJiZmpqdnZqXnJydn6Opra2xsLG3t7S4uLi4vb6/s6yglZGOj5GXprW9vbq5sKyu
-r6+xtLS3ubm3t7q8vby9v7y7vLzCwbq3s7Szs7a8vby7ubq8ube3t7m7ury5tra1
-tra1uLm1tbKsmHVeVFZUVFNSU1RQTklLSEdJS0pMS0tJSkhHR0xMSElMSkpJS0xP
-TEtQUUtOTk5NTk5NTExPUlFTT05OTk5MTlNRUVJTVFFQTlBQUVNVU09TVVNRUVFR
-U1NSVFJWVFNTVlRWVVNUUVRXUk9RUVJSVFJQU1NSU1NTU1RTVFJQUlNWVlRSU1VV
-VlJSU1RVVlZUVVZWVldZVlVXVllaWFhVWVdWVldbWldYVlZZWFlZV1hZWVZWWFhX
-VVZZXVdYWlxaWVdYWFZXWl1eXV5gYV9cXF5cW1teW1dYWlpZVldXVlRUV1habai+
-ydPZ3eDj5ufo6el4enx8e3p4ent3eXt7eXh2c3FsYlNKRkJFRUVHREdDQEJFQUZF
-RkRDRD9FQ0A+PkBAQEBCRURGPj1CREI9Oj5FQEBERkVER0tKRkpHSk9NTUtLS09T
-VVRUVFZXV1pcV1hWWFlYWFdVVVZcXlxbW1dRU1VRUFBSVlJNT1RTTE5RTExLTVBN
-TEtKSkdGRkREQkFCQTw9Pj47PDg5Ozs9OTw4OTc4ODo5PDo5OTY3NzpAPD0+PkRH
-SU5OSE9OUVJOVlFRUVFcWl1eVFdUXGBiW1RYXFRWVlhWTk9FPDc3Njc4NDQ3NzU1
-MzQ0NTg4Nzk9PERDQkxESEtPTEtUVVNaTUdCQEVHRklNVFFXVk5bU1NaVF1aXltX
-WlhcWFxbW1VTUVVQVFFRUlNUWFtYVFdXVFNUVlhYV1pUVltVVlpbXlFNSD46Ojg3
-PD08Q1FYV1RgW1lbZF5bV1teVlJXVlxbYWBcXFtWU09VWFpZWVNTUVZRT0pPUVRX
-T0pNTlJOUlVST1RWX15VVVBSVldXVlVZUUtNUEtSVlRSS1FUUE5TUFFTUVFSUVFV
-WFNIQURFPD8+R0ZLTktRTExMTmJ2kJ2fn5aMf3BgVE9XXWFnaltMPjw9PD88RUI7
-PT47Ozw+PURDQUA/PTo6PkNBQTxJSUJCPT1CQ0FCQkJEQkFCQDw1NTExMTQ1NjAy
-NDk3ODw/PEA+PUJER1FKS05UXFJUU1ZOT1dYU1NUU1ZWU1RWVU1ITFBQU1VWUlBO
-T05JTExKS01OTlBOT1FPSkpOUE9MUFJNS05MSkxMS05MS0hGQD5CR0VEQEBAPj09
-Ozs5Ozw8O0VXZnN0dnl4enl5enl/gYSFgH9/e3t9g4mOj4+Vk5eWmJuYlpWVlI+J
-h4R7dnJxdHV3foaNj5SWm5SUlpKQjIZ+eHNsZFlYYGdkWFJWZGtwcXFtb3BtbGpn
-ZGhtcHBwcnJ0dHRwcXBva2dkYV9fWVBLSEdHSEpHSEhJSkxKS0xPTk5PUVNWWF1i
-ZWhlZ2xucXBvb29zdHRydXV2dnd3eXh6fX1/f398en2BgoGFioiHiYWIjI+OjIyN
-kI+Pk5SWlpOUkpGVlpaXmJeZmJmZnZiWlJeYnJ+cmZ2bn6Knq7Cytre3tbi7ury6
-vLu6t7e0rJGCjpmfn5+otbi4tbSurauqq6mtsbO6ubi5vb68vL/AwL26vsDCure2
-tra1trm4ubq7t7i2trm8uLm4uLm4trW6uLi3uLa2s7CqkW5bV1hWVFBPUE9OSUxI
-TEpKSUlMTUpMSklJS01LS0xKSktKSktKSlBOTk5NTlBSUE9OTU1OTk5NT01LTlNS
-UE9QUFJPUlFPT1FOUlFTUlFQUFJTUlBSVVVVVFVSUlJSVlVVWFNTVFRSU09QUFFR
-U1JSUlFTUlFRU1NSVFRTUVJVVVVWVVRWU1ZWVVNUV1RUVVZbWldWV1ZZWltYWltY
-WVdYWlZXVVZXWFdWVFVWWVhcWlZZV1lYWVdYW1lZXFtbX11dXVhZXF9fX19dXV1b
-W1lXV1paWFdYXFlXU1NVVVRWWFhhnb7K09rd4OPm5ujp6Hp6fHx5en18fXp4enp6
-eXV1c2lhVEpHRklFSEpIRkZGRUNBQURERUY+P0Q/Qj9AQT05OjxAQUFGPjw/QD1C
-PT89REVISUZFQkRGSEZNTU1LS0xOUVJUVVZVU1dWWlhaWllXW1haVldXVVlcXVxX
-VVVVV1JPTVNVUVBPT1BNS01NTktJTktOT01PTklIRkdCQ0RERkA7PDw9Ojo5Ojg5
-Ozs7ODY1NDc/Ojc2Ozk2ODs+ODtCQ0JBSU5HTElNTUhSU09MTFdYW2BYWFRdXmBa
-Ul9dWFRTU1BPTUpDPjc5Nzc0NjM0NDQyNDU1Njc5OTo4P0xCSUNHRUlGSVhQTlhN
-TEFBRUFGSlFbUFJVUldSU1dTWVZlY2NdVF5VW1dXUVZVVE5RVFVVVFJYVFRQUVJT
-Uk9UVlRSV1ZVVlpUWGBhVVFJRTs4Njc5Ozk7RVJYUl5cVlhcWFxfXl1XWVZWWWBg
-YVlTVVRXUllfWVpeU1VWVlFNSU5PUlZPTlZRUFVUVVNOU1ZmZ1hQU09SWVZUUlZU
-UVFSTEdOUU5RWVVPUU9WUU5WW1dOTVRYVEpEQENAREFEQkZHTVJIR0ZLVlp5kp6c
-mIyBdmdWVFZYX2VqZllIPjw7Nzg5QEFCQj06PUFAQUNCP0FCPD1CP0dBOTxAQ0RC
-OzxDS0lJQ0BDREFEPzY1NjQ2NjIyODQ1Nzg7Ozs+Pj9CREVQVlBQSlBXVllXVVBW
-WlZLTlZcXV1aVVNTTEtQUExXWFlYVldVU1VPTk1PTU1RT0xKTk1JSE9MR0dJTEtO
-TlFPTUlMTUxKSEpHRUVEREVBPT1BPz5APDg3ODtDUWNrdHV6fX19fXt6f4KHioqE
-gX9/gH6Dio2PkZSTk5WWmZeWlJWTj4uIg3p3dXRwcHd/iI6Uk5uUmZmXj4qGgXdu
-aGdcUk1VXV5WUFhncnd1dHNycXFua2loa21tcHR1eHZ1d3Jzc3Nybm1uaWNjWVBM
-SUVHSEtIR0lPT05MTlBRUVJTU1deX19jZWZpa21wcXFwcHJ0d3t5d3V4eXp8e357
-f35+foCFgYKEhISGh4qJh4aMi42Nj5CSj5CSkpCSl5ORlJSam5qam5uZmpyenZiZ
-m52cop6foaKiqK6ys7S2uLq4t7m6vby9ure4tLCni3+PlZWXnqyzs7Kyrq2tq62v
-rq+usre8v73Bv7+8vbu9u76+wcPCt7i2uLq5t7a1t7e4t7q3trm4uLm5uLq3uLe4
-t7e5t7Owta6ijmxcVVJVU1NPUExKS01MTUxLS0xQT1FLSUpJSUpMTU5NT05NSUtN
-Uk5PTU5OTk9OTU5LSkpLS05NTU5QTk9PUE5OTk9SU1BNUlBTUlJSVFNRU1NTUVFS
-UU9TUFJVU1JRUlRUUVJSUFRSUFFRU1NSU1NUU1JUUlRTUVJWVVFPUVRUVFRTVFRY
-WVpUVFRVV1dXVVdWVVdYV1haWFVVV1pZWVhYV1hVUlZZWlhYVlVYXVxcV1ZWWVlY
-WFdaW1pcWllaX2FhXlpcXF1eYmBeYV1dXl5dXVpZW1pXW1lVU1JUVFdXVmKUu8rS
-2d7g4uXm5+npfHx8enp7fX18fHt6e3p5d3VxZ1tSTEVFQUVFREpKRURCRUhCREVF
-QkJFREE+PEA+PDk5ODlAPUNCPj06Ozk9PUA/QkFEQT1AQERHTEtKSkxKS05SUVNS
-UVNVV1lZW1hVVlpdWVhVVFRRVFdXXFdaV1dYUFFOTVRUVFNUU1BQUVBQUlBPT1BN
-TU5MSUpHRkRCRERFPz9BPTs5O0M9Ojk7OTk4ODw5Njc4Nz05NTM1OD05PD9CQ0JG
-SUJFR0ZJRUtPTFJOUVRSWlRUUlVbXV9ZWllgWFNKS0tHTkc6Njc1NTQ0NDMvMDQ2
-NTM1NjU2Nzk9REFEQEhJSkxGU0pKVU9MSENKSU1ITV1SUlVOUU5SVVBTT1tiXVlR
-WFVcVlVRXFlfU1FVWlRUV1VSU1FMUFFQTlFWVFZZW1dWUlRZXmFZVUtHPzo3OT49
-PTxAS1lVWFpVVFZYX2RgXVxeW1ZUX15cWlNWVlJTVFxbVlhQT1FUUVJSS0xPS01P
-UFRQVFtUVFFNVGNiXlRSUU9VVlFUVk5NTkxMR0VNT1NaVk5QUVROS1BZV1JRU1RZ
-U0hAP0BEQDw9RUdMUUhHR0dPT196jZOUi4B1b2BXVllcXWJpYUk+PDg4NTk/QEVH
-QTg9RENBQkFCQT8+QD1AQ0Y8OT0+PkE/QEFDSkc/PkBBQkNDOjQ0NzY1MzA2Njc4
-Ojs6Oj09PkJFRk9PUFBKUFBSX1pWTlBZV1BRVl5jXVZUU1FOTU1JTVNYXFhVUlBR
-VFVMSk5QUVBRUktOR05QSUVKTkxKSU5OUU5MS05MS0hKSUVERUZJQj4+PDs8Ozo6
-Ozo5P05gbHR3eHx9gH98fICEh4eFhomIh4OBg4aKjpCSk5OVmJiVlpiYmJOOkIuC
-fHVybm9zdn6IkJaXlZeTlo6Nh350b2VhZmVbUlVeZ2Vla3R4eXx2dnR0dHFvbW1s
-bW90dXV1dnd4d3d3d3p3d3dzbm5pWkxLSEZHRUdKTU9PUFJQT1JWVllbXF1hZWdn
-aGptcXN1cnBucHJ4eHh1cnh7foGAfHx+f4GBgYCDf4CCgYSIjIqFiIuNkY6OkpKU
-k5GRkI6VmJiZl5iYmZuampuamJqcmpycm5uanqOipaaosbe4ubWztbm5ube4u7y8
-t7ayr6OQkJGKf4OUqbCxsK+sq6unqamorK+ytbi6ub/BwL28vbvAwLy7t7i4tri7
-vLu2uri3tru6vbS1sre3ubm2tri4tre3trOxra2ysK2gg2hdV1VUUVJRUVJRTUtK
-SUtKTEtJTVFNTU1LSlFSTUpJTE5MTUxMT09PS0lLTk5PTk1MS0hJTU5NTU1QT0xM
-Tk9PUVBPT1NQUFJTU1NWVFRST1JRUE9PT1FTUE9RUVJQUFJUUlBSU1JTUVNSUVBQ
-UlFSU1VVUFJSVlNSUlFQU1ZTVFNSVVhZWFhXV1lXUlVWVFVUVVlYV1hXVFRVVlhW
-V1VZW11bV1dWXVRWVlRWWFxaVldXWVdWV1haWllcXWFdXV1dXVxdXV5fYGFeXlxb
-WllZWVtZWVpZWFZWUVRVVldZX5G8ytHZ3eHj5efn6ep6ent7fn54e3t7fXt6d3d6
-dG5mWFNNSEhDQkFGTEVFREBASERFREVHP0JBQUI8O0A/OTY4NjpDQ0E8P0I8PT09
-Pj8/RUJCQD1BRklJSkhGRklJSE1PUVNPTlNUVllWVFlZXlpWVlhYU05RUFBUVVlX
-U1VVVFBSVVZWU1VVVFNTT1NTUlNNUk5KTUlLS0NHSUREREVFRUFAPDs8QkJCPDo7
-ODc6OjQ1Nzc6OTY3PjQ2ODo6OTxAPUBBREVJQkdGSk5KUlRWWFFVV1RUU1tZX1JV
-WV5dV05OSk1OUEE2NjUyNDYyMzY4Nzc3Njc3Njc9Ojk/PEM8REVFTUhJRUdOR0xN
-R01NVE9NV09MT09RU1BYU1VOWltWWFRaU1ZVWFNbV1lTVVVWWVhbV1NUUlNNU1FQ
-U1dZV1hYVlNNU11iYlhTUU1APDg3OTk4PD9FUFJQVlVTVlhiXlxbWlxhVlZXWlZZ
-XFhYVFZXWFtXTlFaUFFMS1NPSkpKUFJOUFJQWFxVU1BSWl5cXFNQUk1TUU1RS0lJ
-REVGRUZMUlFUTkxMS0tMT1hWUE5QVVlVRz5AOz48OztDSElQSUZLSUxKUGd5iYyI
-e3JtZVhUWFpXWGJhUkA5ODk5PUFDSEU7PUE/RUZCRERDQD1EPD9FPjw4O0FAQz5C
-PD09QkNBPj88PDw3NzMyNTY2ODY1Nzk3Nzk7PD9FRUhETVBOUEhUU1ZcVlZNU1hR
-UVNWWF9aVFNSVFJMTkxMT1JVWFRTUE5VU01OS0tOUlhUTEZMUVBMSkxMSUhHSkpJ
-SUVIUE5KSlFLRUNAP0NAQEFAQDw6Ojk5OD5LWWZxdXh5eHt+gX59goeJhYeHhoaH
-iIWEh4eMk5GXl5mZlZWWmJeVk5GOioZ/eHNxbXJ6goqSk5WVmJaUjYSAeW5rZWFo
-bWZcWWdzeHd6e317e3l1dXd1c3Jwb2pvdXV2eXp6fH18fXx7fXt7end0dXNmVk1N
-SEhMT05OTk5PTk9OVFlaW11eYGVna2pqbW5xc3JzdnNzdnN5eXh6fXl4fnp6fX19
-fYCCgH6AgoCEhYWIioyLjI2Pj4+PkpWSj5KRkZCRl5eWmJiWl5qaoJ+bmp6foJuc
-m5uhpqqsq66yt7i7u7e6u7e2t7i4tba3tbOxrKGakH9ydounsbCyr62rqKenpqeo
-rq20uL+6t7q9vb2/vb++ubq0tbW4uru8u7i7ubi5vry4trS3uLe6uri3uLi5urm4
-tbCysLGzsKuagWRaWFRST09RVEtLSUtOTkxLSkxKSkxLSklKVU1KSUtMTU1OSkxO
-TEtLTE5NT09LS1JOT0tOT1BQT05OUFBOTlBRT1BPTk9PU1BOT1FRUVFPTlJUUlBP
-UE5PTk9NT09QT09RUVFUUlBPUVNSVFBNUU9OUU9QUU9QU1FTU1JSVFdVVVZVVVVX
-VVdZVlZbVVdaVVdZV1hXVlhYXFpXWVZVVVdXWVhbWFlbW1taV1ZYW1tYWFhZWVlY
-V1hWWFtZWFlbXV1cWltdXFxdXVxbXFtYXllbWVpZWVdVUlBPWFhXV1ZelLzJ0tnc
-4OLl5+jp6Xt7fXt7enh6e3l9enh2c3JvbF5TSUdFQUVKTEZESEVGQkNGS0hFR0dF
-RUM+QD06Ozw8PDk9PD06PD5AQ0E+QTw5PD0+QkJCQ0NFRkdDRUVFRkhLSkxPUE1P
-U1RXWFRXWFhaWlhWVVZUU1NSUVFUWFlWU1NQVVZYV1laWVxSU1dTU1NVV1RUT0xL
-SkhJSElGRkpGSERBQkA+Pz0/P0BAOzw4NzY3ODg6Njc0NTc3NTc1NzM0OT85O0JI
-SkdER0ZHSklVVlldVVlbWVlSYVpbV1dbWVpXVVVXVFFNRjg/NjMzNTQ1NTQ1NjY1
-NTc1NTc5PD85QUBARkZFRVBIRU1JSkdFSUhPTU1QSUxUTldOUlhUX1hdW1JaWFtR
-VlZbWV1YVVJVV1dXV11ZV1VPTU9SUk9RVllVU1VUUlBSWVpdWVRTS0VBNzg2NzM2
-QEVIVVRWVFVQWmRdXFRXV1lUU1ZTWFpaWFdYUFNcXVVTUFhTTU5NUE1GRE9US09R
-T01QXFxTUVJSX1dZV01OUE1LS1BQR0RHRUZMSk1QTk1KRUlOTFNRVFZRUFBXVk1J
-QEE8PTw/P0hIRUxLRk5ITlFMV2F5iIR6cW5pX1dWV1VVVllPPjo3Ojs+RUE/PT08
-QEVHREQ/RUVDQEFBPUpEQDo7PkJFRUBAPTxAREQ9PTw5PD06NDQ0NDk2Nzk2NTMx
-Njc3PDxIRUlPUE9OT1dSVFRaW05LVllgWVVWXFlWU1BSWFJQVFFLTFJZVlFOTk5T
-VVROTElJUlZQTU5RUE9LTEtLTExMSUlKSklLS0xJS0tGQkNERkFAPz88Pjo4OTg8
-RVRkbXF0d3h9gYCBgH+FiYiEgoiIhYGHiYeIi42Slpubm5qYl5WVkpOTk46Kgn55
-dXFwc3mEjJSXl5iamI2Jenh2c3BqYmNoZ19aY3WBgIJ/f357fXl2eHd1dHFycHF0
-d3d3eHp8foCDf3+Cf31+f397eHFhU0xMUFFPUU5NUFJUU1RXWVhaYWVlaWptbm1u
-cXFzdXJ4eXd6eXp4d3t8e3p6fHx+fn1/fn+Af4KBiIiIi4qMi42PlJSRkI+MjpGU
-lpWRjpCUmJaWmpSUl5ufm5iYnJ+gn56go6etsLSzt7e3uL25uLq3t7m6u7y6t7i2
-tK+xqaOSgnqCmaqysLCtq6enqKOjo6yxsba1vr++vb+/wcC7vbmzs7O0uri3ur28
-u7i4u7y6urW2tre0trm4uLi4tri2t7m2srOztLSwrqWVeWNaV1ZSU09QUEpLTU1M
-TUxNS0hKS09MTEhRYE1PTklKSk1QTktNTEtLUFBOTU5MTU9PT1FQT09RT09QTU1N
-TU5LTE9LTFBUUE1PT1RRUVNTUlFSUlJRUU5PTU5PTE9PT1JRT1BRUE1QT09RUlFR
-UlNUUE9PUlFRUVJSUlRTVldUVFZTU1NTVFNVU1VWVlhYVVRTVFhYWFtbWFhZVVNX
-V1dXWFVWV1pdW1tbWVlXWVhYVVlaWVZXV1paXVlaW1pbYF1cXV5eYF5cYF9fX19e
-XlpaWVhWVVhaWFhXVFdYWl2TvcrS2d7g4ubm5+noe3x7fH54enl5dnZ5eXp4dnBm
-V05ITE9MQUNJTEdFS05KQ0pAOz1DREZEQ0E+PkA7Pz48Ojo+QEA9Pz8+O0JAQjs7
-Oj5BREhGSEdEQkJEREhHSUpMTU9OVFFPUlJTU1ZTU1ZYWFpYWFhUVFFPU1VXV1RR
-T1JVV1pUVltaXFdVWFRVU1VYVFlSTU5KTU1MTkpJS0hLREVFQj5BPz47PT09Ojk2
-Ojw6NjM1ODg1MTQ1Njg2NzY4ODg3O0JFSEZFSUNCRE5VU1hTU1lSXVldXFdWVllO
-VVdXWVVRU1FJOzY0MDM1Nzc1NDQzNzQ1NTc4ODk7O0BBP0RDQ0Q/SUNGRT9CPUBI
-RklIS05KTU9KS0lNVGFjVVpXU1VTXFNYVV5gXVZVUlVZXVpdXFlXWFdOTVFVUU5S
-VlhZWFZXV1VUV1tXUE5QSkU7NjM0Ozs7Q0NNW11VX19ZXVtUUlZWVVRZWVdaWldb
-V1ZRTVlfVlJPUVZWT1JUS0hJTlJPT1FNR0pTWlZWWFJaVVRXUU1RTE1TTU9OSklL
-Sk9STEpPTk9NS1NPTE9OTlFTUFRUU05EQjw6PT1ARkhCR0NDUE5SUEtPU2Z7f3tx
-bWhgVk9VVlRTUkk9Ojo7PDw9QEBBPz45PkE+PkFAP0JGPT5BS0VDOzg4RUVCQEVB
-REVLRz89Oz48OTQ2ODQ1Njk7Ozo4NjY4Njg7O0FCSFZNSktPVk9XVFtbS09WWF1b
-VllZV1VTVFxXUFRZVU9QUVZTUktLUVhbUkpLTFBTWVJQT05PTEhMT0xHSEpLTktL
-S0pJSkpHQ0E/Q0RAOz9DQD07ODY6PERRYmxyd3Z7fH6Bf36Ag4eJiIiNh4mJhoeK
-iYqNkpCZlpydmpmYmJaSkpKRjYqGfnlzcHJ2gIaOk5idnJyZjn1vbHNycnBqYWFl
-YV1peYOGhH+Cf4B9gHp7d3l2dHJwcXV3e3t9f4CEh4WIhIaEhISCgYB9fHJfUVBP
-UVJPUVJUVVdYW19eXWFlZ2pqbG5wcHNxcHBxdnZ6enh7en5/fnt8e3l5fX+BgX9/
-f4KBgYODh4iIjIyPjo+Tk5SUkpSQkJKTkZKXkpOWlpeXl5eZn5+dm52cnJ6eo6mq
-q6+0tbe1tbu7vbm5ur6+vbq8u7u3t7e3r6eel5ORk56lrrOyrquqqqeko5+gp7G0
-uL+8vcO/wLy8vLi2trGys7azs7a2uLq8vL28urm3trOxsra2uLi5tre4u7m2trS2
-s7O0sbCurqaUdmFZVlRRUlBOT01MTktKSklMS0xMTk9LTWJwS0pLTE1LSkxMTEtM
-S05OTlBPSkxOTU1OTE5PTk5PUE1OT09OTExNTlBRT1FTUk5QUVRVVVNTUk9PTk1P
-TkxQUVFRT1FPT1NTUVRQUk5PTlRTUlFSUVFSU1NTU1NSUVFTVFRVU1VUVFNSVVRS
-UVJVVFRVU1NUVFRWV1dVVVhZVVZZW1hXVFhYV1ZZWFlaXV5dV1ZYWFhaW1tZWVdZ
-WlpaXF5cW1tcX2FcXV1dXF9fYF9fXlxdW1pYVlNVWFhVVlJTUlVXW4i8ytLX3OHj
-5efo6el7fH18fHp5enp6d3d3d3d0bmJXTktNTElGSkdHRUVFRUFDQD9CQkFDSEVA
-O0BFREFDQzw7QUA+PUFCQD9BQD8/QDw9PEBBQkVFQ0BCRkZFRklJSk1QUE9QUEtM
-TU9QUVVTV1lZWVtcW1hbWVZWVVlYUk9SWlhYWlVYWVhZWFhYU1hVVVlVWFJOUUxL
-Tk1PT05QTU5LREJAQEJFREA+PT48Ozs6Ozg7Ozk8NjU2Nzg0MzU3NTc3OTo8Pj1H
-QUNHQT5AR1BSWVhSVlJbWl5fVldUWlhVUVJYWFRSVEc9NjQzMzUzMzQzNjkyMDI0
-NTc5NzY6Ojw8REFBQT9GQEBFSEVDQ0ZIRkdPUEhSVVFOR05XX2NSU1lYU09YU1dZ
-XFpcW1RTUVVhXVhXWFpaWVVQUlFSUVRXWlxWUlRZV1JXW1hRS05OS0A7MzU4Njk+
-RUdOWFheV1ZXVlZZX1VWXGJdW1pgXV5aVVRPUl1YT1JRUVNRUlRLSkxNUlhPT09N
-Sk1PXVpXW1VTU1FUUFFQS09RTU5KS1RXVU5QTlFTTlxTTFFMT01KVllTVVVVUktC
-Pz5APEFCRkNFQkJNTlFPTE5JW2t3eXdyaWBUSk5WVVNRTUQ8Ojk7OTpAQENGOzlA
-Qzw8Pjw9Oj48Q0ZBRUI8OT1EQUZDQEZBRElHSkdFRDo5NDQ1NTQ2NTc3Nzc4ODYz
-Njo9PkFKVkpLSk5VTVRPWVhLTFRbXlZWWltWWFRZXFlSVFpVT05QUVBMTVBVWFZS
-Sk5XUk9XVFVUTk9KSkxOTkhGSU1MSkhLSktJRklGQUI8QT48PT08Ozs6OztATFxp
-dHl9fHyAgX57foeLioaJi4yLiImHiIuKjI6OkZOVmJ2YlJiWlZWVlJGMiYiCeHFw
-c3aBh5OXm5ycmpOIeG5lZ2xwcG9nZm1wdXuAg4aIgn+Ef3x9fYB3eXp2b3B2eHt8
-foOEhIWFhYiOjYuIhYeEhIODfGxcVlJUU1JUVllaWVteXV9iZWZra2tub3Rzc3N2
-c3V3eHl8enh7fHx9fH5/fH19fn6BhIGAhIaGhIWGiYeKioqPjpGSkpOQj5CVlZKS
-kpGNjpOWl5aZmpmbm5qanZudoaWorbOws7a8ubm4u7u7u7q4u7q5u7i2uLW3trCp
-lIODjZeirbCysLCrqqqopqKhoqKosLa9vb27vry9t7W7uLS1s7Gxr6+urrC0vb23
-v7+9vbq2tbO0s7a6uLm4ury8t7e2tLa0s7Oyr62pq6eSeGRaV1RQTk1PT09LS05M
-SUhNTEtKTU9OTElJS0xMS0pKS0xMTElKTEtLSkxMTExLTE9PTk1MSktMTFJPT01R
-T09OTEtLTE5PT09QUVBRUE9QTk5PTk1QT1BTUE9PT1FSUFJRUVBPTk5QT1BSUlJR
-VVNRTU9TVFNTVFRUVlVTVVhZV1RWV1VSUVBTVlRTU1RWWVVVV1VVWVdUVlhbVVRV
-VlVXW1pbWFlZWFlZWlxZV1pWV1hZWllZV1pZWl5dXV5fX1xbXV1eXmFgYGFgXl5e
-Wl5ZWlhXV1dYVlZYWVlbi77K0dnc3+Pk5ujp6X59fX58e3t+enl1dHZ4dHRtYVRP
-TUVHRkdNR0hHSURERkREREFFRkZCREA9PUE9REM9OTtBQkI7Pjw8O0NAPkNBP0BA
-PT1AQUA+Q0NDRkZFRUlJSUtPTk1MS0tOTE5QUlRYWFhaWl5aV1taWFRVWFVTVVZa
-VlRUWF5ZVlZTVldVWldXWlNVWlZYVE5PTk5NT1BQUUlHSUZCQUVGRkNCOz89PTc1
-ODk7ODM1NTgyNjU2NDI0Njg5OTo5Oj1CQEM7QEZFTktPUlBZUlZXWF5SWFhaU1FQ
-TlNZU09PSkU4MzM3MzQ0NDc1NTk3NjU1MzY2Nzo8Pz1AQkVIRUZDRkVCSUBARkVK
-RU5VUlVPS09OVVZTVU9RUVJYVFVPVmBgW1ZbXVtSWmJiW1dXYGJfW1FTU1VTT1RX
-WFNQT1VcVlZeXVJRUVFQRkQ7NzUzODhASUlUVFdXVFJTVllbWlphYFxVW11dWFda
-UVFSW1VWTVBLS1dVUVBPT05TUVBPTVVOUk5VWlFTUFVVUFFNUU9MSktPTUxHT1pd
-TVJUT1RSW1VKTlRNSlBUWVZUU1FUUUhDQT0/PkBFREVBPklLTVFOT0tYX2lxdXFq
-ZVlMTVNYVlZYTkM6ODc4OT1ARkA9PUJBQDw8PD0+Pj49P0RFQj40PEFCRkQ8Oz1G
-REZJSEdBPDYyNTMzNzY1NTI3NjU2NDU5QDtCQUdNRk5NUVBOUlBXUk9QW1teYl5a
-WVhWV1dXUlBUU09KTVRQU1NQVVdTUVFQUVZUUFlSUFFNSEtMSktLSklMUU9LTEZI
-RUZFQENDRkZAQj88QEA3Njc6P0taZW92eHyAgYJ+e3uBhYuMiIeLi4eGiIaGiIuN
-j5CTlZeZmZiampiXlpaUkY2KhoJ8enZ5fYSNk5iZm5uXj4F1c3FvbnFvbmxseYCD
-hYaFhISEf36Cg4KDf3p6enNucnd3eHyAgISEhIKFh4uJjImHhoqIhIWCd2lbWFhX
-WFlYXV1cXGBgZGdta2pqb25vcnVzc3V4enh2eHl4eHl3en5+gICBgYB+fX+Ag4WF
-h4eHhoeIiIuKi4yPkpOPkpOTlJOSlJqXk5KRkZaYmJmYnZucnJ+en6KkrK6usrG0
-t7m9u7q7ube6ubm3uri2tra4tLK0saSFb3WDlaCtsbCurKmpqaikop6go6mwuLi8
-vby9vbe4tbe2tbOwr62tqKSnqauvtbS4uLm6urm2sbK1tra6u7q3tLi4uLW0s7Ky
-srSxr6upqqaSdWBZVVNQT1BOT05PTk1MS01OTExOTUxMSklMTE5KSklISUtJTExM
-Sk1NTktLTU5OTExNTk1MTUxPTU5MT1NRUVJQUVZSUUxMUVJQUFBQUVBSTlFRUFFV
-WlhSUlJSUlBRUlFQUlNQU1FNT1JTUVNSUlJUUlRUVFRUVFNRUFJTVFZWWFVTU1VT
-VlRXVFZUVVVUVFNUVFVUWFhWV1dWVlZXXVdYV1hZW11YVldYWltaWFhXV1tbWVxa
-WVlbXFxcXV1dXFtbW1xcX11cXFxcWllZV1haV1hWWVhXWlhZXGWPvcnQ2N3g4+Tn
-5unpfHt9foGBfnt+e3d5eXd1c21gUUxFSkZHSkhIRENBQUhHQ0ZCRENCRT9CREA+
-PT9AQEJHQTw9PD1CPjw7QEI+QUJDPD09Pz48PT0/QkVCRkVGRUlJSkpNSUZGR0tM
-TU1PT1BTVVlYV1dWWlpaWFVWVVRVVFlcV1dVXFlYV1RXVVFWXFpXUldVV1tWU1VQ
-TU5PTU5RTE1QS0pBQkdIREQ9PkA7OTU6Ojo5ODc3Nzc2NTY1MzQ3Nzs5OTk6PT48
-QUJESklNR0tWUFZVVFZQWVZWVFZWTlFNTlJNTU5JR0A2MjQ0ODcyMzM0NjU0MjAv
-NDM2OT0/QT8/Q0lGR0VFRkJERkVIRkpJTU5NU09MUU5WVE1QTFJQUlRSVFFcW1pW
-VlVTV1JYW2FaV1hcYWJaVVNST05OUVdaVE9QUltXVGBfVlJSUFFNUEA6OjQ2NjpD
-RElRUFdYV1pVXFxdYFtVWlJXWl5bVVdXUFBTTldOTE5SWFhSS0tJTFNOTlVQVFNP
-T01QU09TU09RTE1OSkpJTlZVUFJRVVlQS01OUFdRTk9NUVBJUldUWE5TVlNUS0E6
-OTo8PkRARUlBRkZHT01SS1JXXGZtbWlmWk1KTFJXWltbSzs4MjU7P0FBPkA8Oz4+
-QEE8Oj49RUNERERGQT05QEJGRz02Oz8+PkFGQUZANzQwNDU0MzYzMjU1Njg5ODk7
-PUJBRkhJVlFbUVBXUl1XVlpXWFtnaVpYVVZaVlVTUE9OTEpRVlhXS05UVFBRUExN
-Uk5NUk9LTkdJTkxMTUxJTU9PT05IR0ZHSklHSUxCRURFQUE/Qjs7OTtHV2RvdHV8
-gYKCfnt8fn+GhoaEiYuIiIeGiYiKjI6Ok5WYmpqanp+cm5qYk5OUkY2Ggn55dnp/
-hY6RmJmbnJmJem9seHp5c21tcnR7hIWKi4qHhYB/gH56e315e3t4eXV1eHh7fYGB
-f4KGiIuMi4qMiomLiYmLhoOBcF5ZWFpdXFxcXWBiZGRmaGpsb21vcXJydHV0cnR5
-eXd2dnl5e3x5e3t+f4GCgYCBgYGCgoaFh4iIiYyLioqMkJGOj46NjpGQk5OVlpSW
-nZ6Wlpicm52cnJycn6GlqK+usrS0tri3tre4t7i4tra1tbW2tbS2sbCytbOwpo19
-gomPkZWhqKirq6mppaKhnZ2gq7K4uLm6vL2+u7i1trOwr66vrKimo6SmqauxtLW4
-uLq5tbOysrW3uLm8u7q4tba4t7e2tLSys7K1raqxrqeScV5WVFRSUU9PT05OTU9M
-TUxNT05LS0xLSUxOS0tIR0dKS09PTktOTVBMS01OTk5MS0xUTk1OUE1PTU1QUFFQ
-UFBRT1BQUE9QUFRUT1JTUVRQUlFSUlNWVVVSUFJRTlBRUU9RUVJPTk9PUFBSUE9S
-U1hUU1FUU1JSUVRUVFRUVlhWVFdXWFlWV1ZWVVJSVVVWVlRUU1RWVVZWV1dUVFZY
-WVpWVllYWFlZWFlcXFhXWlpZW1taWVhXWFtcXV1dXVxbXVxcW15eXltbX2FeWFhX
-WVpaWFdXVVNVVlVVXo69ydHY3eDj5efn6ep6fX19fn5/fnl5fHh2d3VwZltPSUhI
-QkJFTUtJRD0+P0ZIREM/PkE+RENBPj4/PT8/Pz8/Qj9APT9CQUNAOz9DQD5FPkA9
-Pzs9QEFDRUZIRkZFR0tLTlBMS0tLS01KSU5NT1JSU1FSV1ZYWlVVVFZVUVNUWVdV
-WVZXWFVWVVVUT1ZWVVRRV1ZaX1ZQVVJRUFNQTVBJSEtHSEVCSEdFQEFAPTxAOT86
-PTk4OTU1ODQ2NTY6OTg1Nzg2OTk/QkFGR0VKSEtIR1FPVltUWFJWVFNTUVNPUk9Q
-VVJOS0RBOzczMTMuNjUzMzI0MzQ3NzY1MjU5PDw6PDo/QkdIPkZHQktDTUxGT0pH
-SUVTU0xOS1NaVVRMVVFPVlFWTlZaVlNOU1VYVllWVlRVVFhYWVdWU01KS1FWWFZT
-UlJVW1JSW1lWVVJPVVFRQTg2Nzk5OUBBQkhNUlVaXGBfYWBkWlRTXFpWV1xbVlZP
-TVRRUVJSTFNYVFNKR0pLUlNOVFdQVVNPTUlQUlJXU1NQS0tJTVRSWFxSUFJTT0hG
-SE9MT1NKTlRMS01OV1RUWFNST1VVRkA7Ojw6PT1BRT9IQ0ZKR09LSk1QW2ZrbGdd
-UEdJTlJbYGNXQjo8Ojg6Ojs8Pz07PT4+PkA/O0NDREBBRkJDQzs8QktGPDs+QUVF
-R0RESUE1Nzk1NjY4NjY3NS8vNDM2NTtCRUZLSktXV2RYVFtVXFhVVFdRV2RhXFVR
-VVRUVlZPUVJQUFJaWlRMSUhIVFRQTVJRR0xMS0pKSktPUVJMSE1OUE9OTEhKR0VF
-SEdHS0hISEE/QEBGPjw8QVFjb3Z7fH19f4B8enx/hIN/gIWHhoeEhYSHiYqMjJCU
-l5eWmJaMm5uYnpqXlZKOi4WCe3ZzdX+LkJSXnJ2Zkoh3a2pucnNtaWpzfISKjI6M
-ioyHh4WHhIOAgICCgH95d3h4en+AgYOCgYSKjYuOi4yMjo6Njo2Niod8aV5aW1pe
-YV5fYmRlaGpsb29ucHFvcXN0dHNxcnR2dXV3dnx7fHp6foF/gIKBfX+BgISDg4WI
-jYyIjJCMjY2RjZKOjo2MjZGPj46RlpeXmJiYnJuanZ2bm52jqaqwsrOysba5t7W3
-ubq6ubi4t7eytLe3s7K0tbWztq6ll5KYlo2De46iqq2traqlo6KgnqGss7i7urq7
-u7y4trGzsbKqqayppaSjoaKnqKiutrq5ubm1sbO1trm5t7y7t7i3tLi4t7S0srOx
-sbCzsLGyrqePcF1XW1VTU1FPUE1NT09NTE1NTEtMSk5MSElJSEtMTExKS01OTk5Q
-Tk1PTE9NT05NTFZLTVNQU1NRT05NT1BOTk5PUE5NT1BQUVBRUlNPUVFRUlNSVVJS
-UVFQTlBPTk5PTU5PT1BSU1NTVFRQUFBPUFBPVlRTUk9VV1VUVVVVVVVUVVZWWVdV
-VlVVU1JSVFVWU1ZUVFVUVFNUVllXVVZZWFdXV1dXWVhZWVlXV1taWltaWFlZWVxb
-W15eXl9gX1xeYV9fXV9dXV1cW1xcW1xcWlxYVlZXWlVTWFtdiL7J0djc4OLl5ufp
-6Xx7enp8f317e3l4d3V0cW5kVkxJSEZBREdJTU5DRUJCR0VCPT49QkJBQEFDQUA9
-PDo8REFAQ0Q9OjxCQEFAP0FBQ0BBQkRCQz8+PD1ARERGRElISUlMTUxNTkxJSkxJ
-S1BNUVNQUVNWVlZUVFRZV1hWV1RVU1VXU1pcWFdSUVFQVFJVVVRWVlldVVBUUVZW
-WFFNUk5FR0JFRUNGQURFQ0Q/QkQ+PT49Pzw6OTQ3Nzg4NzY3NzU1NjY2OT06PENF
-Q0FDRUhHT1BZX1RYUlVQUFdPS09TUlRVVFBOTkU7NzMzMzc2MTIzNjM2ODY1NDU4
-Mjc6Ozg6O0BDS1NFREZDQ0BHUEdPSE9MR1FQTFFMU1NWW1FYU1RWUltRVVVUVFJQ
-U1lVV1RSUVNWVVNSVVdaV1FLT1lWVldZUVFWU1dXVldWUVBTU1JGOzY1NzY5PT1A
-Q0lWXWJaWllfX2hjWVVYWlJWWVhWUVRSUE5LUVRPU1pOUVJJTVFOVFZQV1VWWVVO
-TUxPVFVUT09PTE5NU1pbW1lNTVJMSkxJT1FLUVJPV0xLUU5PT1FVWFZVV1VKPTk5
-PDo7PD1CQURFSk1HTklKTlBWYGVqaWNdVVFZWVxiaF1GPkE6Nzw/PEFDPD1EPzk4
-PD08QD5DPDk9Q1FHPTg9RUZBQj9ERERCQEhIPjU0NTY1MzY5NTYxNjM0OTY4PT0+
-QEVET1VVYlhbW1JZVVRUVE5PVFhZU1RZVldYVVBQUlRTV1VPSk1ISU1UVlBSVFJR
-Tk1OTUhHR01QTUhKUVRMS05ST01HQT9CR0lISUhEQ0REQUA+O0BMW2hzfX18fH19
-enh5foGAfnt9g4WDgoSFhISIio+PkZaVlZialpSYlpeXmJSSko2HhYB4dnR3g4yR
-k5mdnJaOgnZxbmtmZGVnaneCioqLjY2Ni4uJiYeIhoODhYKEf35+enp7foCAgoSF
-iomNi42Ni42Pj4+PjY6Oi4FxYl9gY2NlZGVnZ2ZlamtsbHBycXJyc3J0dHNzdXBy
-dXZ4ent8fnx7gICBg4OEg4SFhYaGhYeIi42Mi4+Qj46QlJKRkI+RkZWUkJCSk5KV
-lpeYmZqZmpygpqmtsrS0tLGytra3trW5u7u4ubW5uLS2uLaxsbO1tLWysKujpqag
-kH97jaOnq6+rqKajn6Cfo6u2uLu8uri8u7a1s7GxsKyqp6Shpaaio6Kip62yuLi3
-s7O1tbWytLi3uby7ubq5uLm2s7O0tLOxs7KwsK+sqaSTd15ZWFdSUVFOT05QUFBM
-TExIS0xISktKTEtMTUtMSkxOT1FQT05MTExIS09OTU1PTUtNT01NT05OTUxLTU5Q
-TU5RUVJPT09SU1FRTk1PU1BPUFFTUE9PUVFOTVFPUFJQTk9SUlNVUFJTT05PUVJQ
-UE1QUVBTUVJTUVRUU1VVVlZZV1dWVlZUVFJUU1JUUlVYU1VVVFRWVFFTVFZVVlhX
-V1dXW1dWVllZWFZWWFhYV1laWltcXFxaVlpfYF9kYmBgYWBfX15dXVpaW1dZWFda
-XFxZV1ZYVllcWl2TvsrS2N3g4+bo6OnqeHh4d3d8fHd5d3Z2dnd0cGFUSkdHRElK
-R0ZETklIR0dJRkFDQkFGREJDQ0Q/Pj9CREVBPT9BPT08OjlAPjw+Ozs7PT5BQUM/
-Pjw9PT0+QkJFREdISEVKSkpKSUlIS01PTU1OT1JOUlRTUlNSVFRYVVRWUVNYWlhQ
-V1dXVE9OT05TVVlaVFlWVl5XU1JPW1pWVVJXUkpMRUhHQ0dERkdGSUREQEA/Pz0+
-PDw6OTc1ODM2NTY0MTM2NjQ0NDc5PkBAPTlCSEpSVFBWT1hVUFRNU0tOT1dXWFRT
-UU9MRTw2NTQyNjI2NjY1NTY3Nzg3NTc2Njk8Ozs8QEFFSURFRjxEQ0NMSExHVlNI
-S05PVEtPVVdaUllRVltUW1dVWl1WVFJOXFRTVFlWVVVTUVBVX1taWE1MVFVTV1dQ
-UFFTWlxeWltUUVBPT0NAOTU1Nzg8OjxCSVVfWVhaWVZfZ2haW1RVVFNWVFRZVllW
-TlBPTlBUU01RUk5NUU1LVVdRVFJVU01PSU9ZV1lYVVVSUFZTVVtTUExGTlBQT05L
-T09QXFdVU0hMUkpQWlxbXVlYUk1GPTk4NjhAPj49PUFESkNPSE5LSVNTWWltb29l
-YF9eXWJjXEtBOzo4Ojk8QD88QTw3NTk9OT88P0M8Oz1ESEZAOz1ARkhGPUBAQkFE
-RkY+NzU2NzU0NjUzMzU3OTcyNTU5PT09RUZSVVRdVFhWT1ZQWVpdVFlaVFRWWFdV
-VFdWVVJTVFFSTUtSU1BNUllZUlFTU1JKTFBOSklGSElMTElJSkdITlFOS0dGR0hH
-R0pIRkhFRUZBPDxESVhmc3l+fH18fX55d3d6gH18e3x+goGCgYODhoeKjpCPkZOW
-l5icm5mXlpaXkpGQjoqCfXh2dnuFjpKYm52elo+EeXBvcWxhXmZueYaMj4+PlZKN
-jIuLioWIh4aEg4OCgX59fn1/f4KFiYqKjY6OkI6QkJCSlJGQkY2LiH1sY2RkZWVm
-ZGRpaW1ub3Jta3BxdHN1dHNxdHd4dXZ1dnt8fHx9fn99goWFhIODh4eHi4uJiIyJ
-ioyMj42PkI6Oj5KPkpWRkpGTkJOSlJWTk5ianJudn6WnrK+ys7SzsLK2t7i3uLm7
-u7i3uLm6uLa0srS2srO1tbKwqaSlnZuUj4+WoaaqrqijoKCkoaKqs7i2t7u8vb26
-ubSysa2sqaikoZmdn6Kjo6etsbK2t7K1trKxsbS1t7S8vbq6u7y7tri6t7Oztrm3
-uLWysbKtrqeUcFpVVFRVUk5OTU9OT0xISkxLS0xKS0pLSkpMTUpLS0xLUlBOTk5N
-TExJSU1QTUtNTU5RTk5LT05SU1VTU1JRTVBRVE9RUVBRU1BPT09RUFJOTlBRT09P
-T1JPUFJQUVNRTk9RUVVVUU9QT09QTU9PU1BPUVNQUFBRUlJTVVNQUlJVVVJVVFBT
-UlNUU1BRUlVWVlZVV1RWVlVUVVZWV1pbV1VVWFlZW1xaWFlaWVdZWVlaWlpXV1tY
-WV5eXF9jX2BgXmFgXl5cXF5eYltcW1dYV1RTU1dYVlRSWZa+y9LZ3eDj5efo6up0
-dXd2dXV2dnV1dHV2dnJqXVBKSUZEREVEQD4+RkVBQ0lDR0tFR0REQkFDQ0NCRD5D
-Qz9CPT0+Ojw7ODk6Pz07OTw9Pz8+Pjs+Qj06OkBDQUNESUZEREVIR0lKSU5RUlJQ
-T1JPUlFSU1FUVlRVVVVUUVJRUVVVV1FUVVFQU1NQUlRTUldRV1VVXVhZVlJVVFVU
-UV1VU1VNSUxKTEZISUJFREZFQD8+QkM9Ojs6Ojc2NTQyNDY0NTgyNzUzNjI2PDw+
-PT9FSU5WTlhUVVVPWU1RVVNZWFlVU1NVUEtJQj05ODYxMjYyNDE1MzU2NTQ4ODo6
-Ojo4Pz08PkBFQ0hKRElGSEtGR0FITk5TUFBRSFJTWFhOV1FOU1FXVlxeXVhTWlRb
-VFBOVltdV1NUU1RXVlhXUE5QVVJWVVFQU1RbVltZV1hVUE5OSj89Ozg1NDU0PEZH
-T2JbXF1WV1xgZlxYU1JSU1JUU1paXlpTTU9OUFdXUFRUVFVQU0xPVlFUXVNQS01O
-TFZbV1pWUVZQV1lOU1JLSU5NUFBNSkpKUlVRUlJRSkdPUVNcW1ZYVlVVUkpAPTk8
-PDw8Pjs/Pz1FQlNNVlNJTExTZm5ucG9ta2Zpa19cTUE8PUA7Nzg+QD47Ozk4Ozk6
-PEBGRD09PkRGST87PUFISEVAPkZBQ0RCQD02NDY3NDg7ODU2ODk4Nzg4Njk6OkJM
-SVBNUllRV1BPUU9XWVxYWFZUVlpXWVtaV1tbWFNRUVJSUVlYT05SVlpWUlNRUElJ
-Tk1JSUpNTU5NSktPUUtPVFFMTE1NSEZHSEZFR0lGQkBAQEdSYG53fHl7fX57e3h5
-enyAfX18fYF/foKDg4SEg4WKkJCWl5icmJucmZmVk5SRkZGOioR8eHp7f4mQk5mc
-n5yblIp/dXRxc2tnanN+ipOTkpKRj4yLjoyMjYyFhYWEhYSDgX5/fX2AgoWKi42P
-j5CQkpOSlJSUk5OTk5CMhHJmYmVla2lpaWltbWpubXBvb3B1dnN0dXh4d3d6enl3
-e3t+g4KBg4GAgYSBg4SDhIKHhoiIiIiKi4uLjoyMjY6QkI6TkpORk5SVlpaVl5aX
-mpqbo6OqrK2ssLC1s7ezs7e4ubi4vby4t7i3t7u4tLC5t7a2sbS3tbCklo2Eh46a
-oKWnp6qrqKSgoqKhp66xuLa3u7u8tra1s7Gyr62qqKeioaCgoaCjqq2usbK0srK0
-tLW4ubW4uLq6ubm0uLm7uLa3t7O0tra2s7W2s7K0sqeUdVxYVlNTU1BQTk5PTkxN
-TUtMSUtKSUpLTk1OTEpJSktLTEpMTExNUk5LT01KTExMTU5QT0tNUFNUVFJUUk5O
-TE5RUVJST05QT09PTlBRT05PT05QTk1QU1FRT1FST1BOT01OUVFSUVFTUlFRUFBQ
-UlNUU1FQU1FSU1RTUlZXVFZUV1ZVVVJUVFNSU1VSVFVUU1NTVVRWV1ZWU1NUVlhX
-V1dZWlpZWFhaV1hYW1dYWVlYWVlYWV5eW1xdXVxgXmBfX2FgYWBeXF1gYV9dXFla
-WltXVVRRVVhhlr/L0tnd4eTl5ufp6Xl2d3ZzdXZ1dHF1cHdxa2BVSkdISktGQ0FE
-QEI+P0JEREhEQktKR0g/QkE/QUNDRkJFQz47Oz1BQT07Ojk5Ozw8Ojo7PT88PD08
-PT49PUFAQEJGQ0RCQUVGSk1OTExOTlBQUFBRU1BRUVJPUVRUU1JVU1BQUVJcVFBV
-U1RRU1NRWFVSVVNYU1NZVFJTT1NSUlJSWlhVV1FPUk9STkxKQkRFRUhBQT5ARD4/
-ODo6OTs8OjkzNDc0NTQ0NjgzNTg8Oj86O0BAQ01GUFNYV0lQTFFYUVNTXV1XUlhY
-VkhDOjQ3NzYvLTAxMzIzMzI0MTI0NTg3NzpAO0BBPzw9RUlGSUdMUUhGP0lJTVtQ
-TFFOU1JWUUlTVFhfVFhUWFlUVFNaWF1UUVFYWVdXUlBUVlRWVFhVVFFVVVNXV1VV
-WltYVVdaWVdRTk1JQTw2NzU5Nzs7Q01KVVdXXFZaW15eXlhQTUxMUFVXVltdWVRN
-TEpQUVhTVl5WVE9RUExRUVFUT0pSUVJOTldVUldWU1BSWE9NUlBKU09MT1BQTU9Q
-VkxKUk5LTE1TW1lWWVVTVlhTTkM9Ojo8Pjk9Oj09QElGUU9VT0ZIRkxhbHRzcnNx
-enVrX1NIQDs9Pjo6Ozk+PTs1Njk6Nzg3RElEQkI/PkJGQ0dFQUBJR0A/QEFFQUJA
-PTc1NDM1ODo4ODk3NzM3Njk5OTs9REdESEdVVElUUE1PTlZcYFhbW1ROWFpbXFhW
-W2VcUU9PUVRUV1JPTVFcX1dUU1BOS0xMS0xNT09QT05OTlJPSUxTVE5MUFFRTkhI
-SEpJSERBQkRDSltpcnd6en1+fnx5e3yAgoF/fH2AfX5+fYGEg4KEiIyOkZaam5ma
-nZ2YlpWTlJKPjYuIgHx6eXyEjpKTmpqcmpqYj4F3dHFxcnBxd4KMkJOVlJKSj4uM
-jIqLiImGhYOEiIWDgYODg4KEhIuNjI2SkJCQlJWVl5aXlZORj5OMfW5lZGZpaGdq
-a21vb3FzcXR3dHRzdXZ3eHd3d3t6eXyAf36Cfn6AgoCAgYGFhYKIiYaHiY6MjomM
-jIyMi46Sko2RkpWTjpGRkpaXlpaZmJiXnqWrr6ywsbGysbOztbSzs7e5urq7vbq3
-t7e3trays7e4urq5trW3sauQeXJ1gpWfqKqsramqp6Ogn6Cosra6uru4uLO0s7i0
-tLGvr6unpqOio6OepKaprK+wsbOwsbO2t7q8ube2vLm3tbO2ubm2trW1sbG0srOy
-sLKzt7eyrKyXdmBWVVRVVFBQUk9RUE1NTkxMTExNS05NTU1MSk1GSUtNTU1MTUxM
-T05MS0pKS0xNTk5NTU9PTk5QUlNRTVBSUVJQUE5QT05NTU1RT05OS05OUFBTV1FO
-T1BSUlJRUkxNTk1PT1FRUFVTUlFRUVFSVFVUUlFWWFZUVVVWVVRWV1pWUlFRUlZX
-VlVUU1VTUVNUV1RTVVdZVFNUV1hXVVdVWFdXVlRXWllYWVlZWlpYWlteXFlaWVxb
-WlxfX15bXV9iYV9dX19dXl5eX1xZWF1aWlZVVlpYWl+bv8vS2d3h4+Xn6Onqc3Fw
-cXJvbm9wcnJvbm1oX1JKQ0VDRUVDRkVFSEdCQ0E+REVDQEZGREQ+QEJDRDxDPz5D
-RkVDP0Q9Ojk3ODs5Ojc9Ozw9PTw+PkRDQkA9PT9CQ0VHQUNAQUNFSktLS0pNTk1N
-UVJRUlJTU1ZVVFVSVVJRTFFOVldWUFJVWFNPT09SU05UUlRPUFRPTE5NUlNQUExU
-WFldUk5SUVNQU1FGSk5LSUJFQUFCPz8/Nz49Ozw8PDo6NjU3Nzs0NjU2OTk4Ojs8
-QTs6Q0JHTlNVSktPU1tVWFRbVE5QVlVOTkc+ODUxLzEvLzIzMzMxNDMxMjQ4Nzg3
-OTo+PTs5PkNGSEZKSEtJRUVES0tNUktQTExUU11XSlBOVWVfX1VXWlBSUVdXWVFX
-VlpXUVVVVFNaVk9RW1tUUk9VVFRZVFFZWVZWU1ZZVFJSUlJIRzw3MDc5ODk+RkZK
-U1JXXlxfWVhWV09JSk1YVVVVVVdYT0xLTFJKUVNVWllQUVFNUU9UVE5VVFJRTE5L
-S1NRVVhWVVZPT1BNUlFQU0xJT1BUVVBNSkxTU01UWFJWVFFYWFdVVVVURz46OTo7
-PD08PD0/R0hTUVBPTE9IS15baHZ3dnp8eW5eU0tAPD0+Ozc5OT1BPzc4Pzk3NztD
-QUZHT0I6OzlDR0JBQUxHQkJBRENFPUA5ODUxNTQ0NjY1NTQ1NzU3Nzo4Oj1BSUNJ
-SE9MTVdXXlpUVVdgW1tcV1hbWVhXVVNUXVxYUVJQT1BWVVBNTV1gW1ZRU1ZOSkpN
-TExSU1RSUk9TUkxJSUlMTE5MTFBKQ0VKSkhEQkFEREdUYm13eHt+fn9/fXp6foOD
-gYF9fn1/fXx9fYCEgYWHiY6RmJucnp+foJyXlpWTj46NioOBend4foaLkJGTmp6e
-nJWOhndzcnJ6fHt6gIuRlJOUkpGSkI+NjIuKiIiGhYOEhIKBg4KEg4SIio2OkJGS
-kZCSlZOXlZWWmJmXk5OLeGlpZ2ZlZmhqcHBucXNvcXV2dHZ3d3Z3eHh7enp7fH+B
-gn99gIKEhYGAg4SHjIiJiYmIiIqNi4yQlZCMi46PjpCRkZGQkZGQk5aYmJeXl5qg
-pausr6+wsrSysrS1t7i1uLe6u7u7t7W3tre3tLSws7e6t7exsbSxp5KAeX2FjI+a
-paqqqaemopqYnKOtuLW4uLa2s7GusLK0srKwq6qopKSlpaOkqKywsq+xsrSvsrG2
-tbW5uba5vLm5uLW2t7i5uLW1tbS0srCxs7O1s7Ows6yWeGNaV1VUU1JSUU9PT09O
-TkxLTU9MS05NT0xLTE1LSktOTE1LTk9NTEpKUE5KSktKTE5OTE5PT1JPTE9QT1BQ
-TU5PTlBOTk1OUk9PTk9NTk5QTlFRT1JQT1BTU1FKT09QUFBPT1NRUVZUU1BTUlJW
-VVNRVVdVVlVVVFZTU1ZWV1ZWVVVZVVRVUVNUVFVVVlVTU1FSU1VWVVZXVlVXVllb
-V1dZWFdaW1haWVhZXV9dWllbWVhYW1laW19eX19dXlxdYF9dXl1fW1tbW1hYWFhY
-WFdXV1lbYJO+ytLZ3eDj5ufp6eppZ2lraWhlaGtoa2lqaGRcT0lFRkhDQ0RHRkhG
-REdFQ0tJPj8+REE+RkBDQj4/PUVFQUBGQUJAOz49OTo6PTw9PD09Ozo+Pz4+QEND
-QT4+QkFAREdDRUM+QkVJSEhITEtKSU5RU1NSUVBPUlNXVFhWUE5QUE5UV1JNT1JU
-UUtKRlBSTFJSTU1NUk9MTEtPUVJRTlRWVlpQUFVMTU9QU0tSUExKQkNHQT49QD49
-Ozk5Ojc4OTYyNjY6ODY1NzY3Njg6ODk8Ojs/RkxMS1VNUFdTXlpYWlVaVVJST0xN
-S0M6MzI1NDMxMDQzNzc4NzczMTI4ODo8Ojk/Pz9EQ0ZAPkdGS0ZER0JKSE9TR0pL
-SVJUY15PWVVZXFRfWl5hU1FQV1JXUldZWltTWVlTVVJUW1daYFxXV1pTT1JOT1ZV
-WVxUU1hUU1JUVkxNQTo6Nzg5ODo9Q0pRWF1gW1xWVFdYVk1NTVRdWFVUVFRVU1NR
-T01OT1ZQU1FOVFFQUFJWUFNVUE1MTVBNT1RTVVhWVFBJTE1OVFBPUUpJSFJVT0ZJ
-Tk1TVVNbWFFVVlJVVlNUV1VNQTs9Ojs6Ozg2PD5ISVFNU1RKTkZLWlVcbnh/fHt1
-Zl1UTEA5Ojo4ODw5PUJGQD8+NzU5PzxBQ0dGQD08QUJCQUBDREJCPkBFQ0JEREI7
-NjY0MzIzNDQ3NTU1NDU2Njk4OkJDRk9LT0tTWVplYGBYV11ZVVhZV1xbWFVST1NX
-U1BRU09TVlVZUk9TVlpYVU1RV1FLSklLSk9TWVVTTVJYT0lHSU1OTEhGS0dHSUhI
-RUVDREVGUVxqcnZ7fHx9gIJ+fn2BiYWDgX5/fXx+gH59f4KChYiLi4+TmZqcnp2c
-nJiSlJGSko+IhXx9fH+EiI2Pk5KYmpmZlY+GfHh0d3yGhXx9j5mXl5eWl5SRj42L
-jIiFiYeFhYaGhoKCg4aGiIuMkJOUlZKRlJGTlpiYl5iamZqUlI5/b2hnZmdpamls
-bm5vdHR1dXR2dXd2eHh7e317e35+f32AgYCCgoKBgoSEiIiLioiKi4yMiYiLjI2P
-kI2MjYyLj5GTkpOSkpOWmpibnZyjpKesq66vra+xr7S2tba4ubW2ubm3trO0srOy
-tLSxs7Wytrm/vbi2tLOrl4+VmZqVkZCgpqekpaSdmpiep66xtbOxsbGvsK6ws7Oy
-sbGtqqijpKajpqWpq66vrq6xsK6vtLawtra4vLq4uLW0tbW0trOxsra0tLSys7W0
-tLKzt7y1sKmXemVaVlNUU1RSUVBPTU1KSUlHSUxKTElKSktLTEtNTEtMSUtPUUtK
-TEtJUE1JSUxMTUpKTVBQTkxMT1BOTE1MUVFRUFBQUU9PUE9NT1FRUFFQTE1PUVFO
-UFBOUVRTUFJPUU9RUlZTUVBRVFNUUlJTUlVUU1RUUlFQUlRVVFVXWVZUWFRVVE9R
-UVNUVVZVV1ZWVVJUVVdXV1pXVVZXWFZUVVZWV1VWV1hZWlldY1taWlhcW1pbXl1e
-XF9gXmBfXV1eYF9eXl1ZWFlZVlhZWFdVVlpYWFtejrzI0djd4OPl5+fp6WBdYF9f
-YGJiYGJfX2FgXVNOR0hJTkhFRUdHQkVFRUlKSERDQkREPzlCQkI/Qj8/QD9CQEBB
-PkE+Oz4+QDw6Ojk5Ozs6PD5BQkQ+PzxCQT08P0FDRkE/QEBCQ0VFREZLS0lNTkxN
-UE5SUFBQUVdXWVVPT1FSUlFXUFBVU1RPTFJSS0tJSUdLSktRTEhKS0lOU1BPVldV
-WFNSUUxOT09UTFVTTU5FR0hGRkJAREM8OTc1NTc3NzlAPDU0MTI1NTU0NTg4Ojw7
-OERFR0xGU09SVlRXUk9bWFlUVVFQTkpIPzk2NDQ2ODg0Mzc3NTc3NTU3Ozc4PDo6
-PUBDQ0NCRkNDRkFERURHRktGSU9ITFBLUFNeXFdiV1RZVFxVXmJbVlRVUFROWF5c
-WFFVYFtUVFZXV1ZYUVlgXFROUFFXWVRYXFZPTlRSVVleUE5IQTs1NTM3NztETVBV
-WVtcWFpVWVdWWVNRUl9fVVZXV1ZaVFdRTFBTWVRSVE9TUU5PTUxSSkxQSU1QUU5K
-U1hSVlJMTUxPT0xUV09QUkRGSktQUExOTlBSWFNTUlpcTktSUlFSVVFEPDg3OTo9
-Oz88PEFCSk9SUEpERU1ST1RndH6CfHVpW1FIPDk5ODk7PDg5QkI9Qz04ODk3P0ND
-REA/OkBDQUlLRD9AR0A/QUZMSUhHRj00MjI4NzMyNTQ0NDU2NjY1Nzg4QERHS01W
-U1dXWGFdW1taYVhZXlpYXFpXVFFSUFxYUlRXUVFTVVpZVVFPUFNQTk5XVE1HSktT
-UU9RVFNOU1VOTE5NT09LSUtJSEZGREZEQEFDQEZTY293eH2DgYKBgn5/goqLhYCB
-f32AgoKDgH6BgIOEh4qNk5WZmp2fn6Ggl5OQkpORjIiDf359gIWKjJOUl5WZnZ2Y
-k42EfHl0d4GHfXmGlpqWlpeXlZSUkI6OioeHioaGhYODgoOCh4qIjI6PlZaal5aW
-lpWWl5iZl5iXlZeWlIl2b2xqa21tb29wcHB0d3Z2dHZ2eHZ2d3d6fYF/eXx/fn6C
-hIODgH2BgoSHiIOGhYaIiouLiouMjo+Njo6PkI+Ojo2TlZOTkpOTmZ6jpayqq6yt
-r66vsbGxsrOzsba4trW3uLe3tLO0tLe1tbK1trO0s7W0sLGxrqOUlaCop6CSjZyl
-pKSkoJqXmJ+orbK0tLGxsK2tq7Gws7Owr6qqqqakpaimqKqqr7GyrrCvsbKysbG3
-uLi3tbi5tLSzs7Szs7OztLW1sbOztrOztre3tba3s6+gfWRYVVRUVFBPT01KS0pK
-S0pLTUxKS0tNTE1LSklJSkxMTU9RTkpKTk9LTE5NS0xNS0xNT09PV1NQT1BQTlJU
-Uk9OTVFTTU1NUVFOS0tOTlFOT05RUFBQT1BQUVNTVlVWUE5QUlBQUE1OUVBSVFJQ
-UlFUVVRTUlJUVVVVVlZUVllRVVRUUlJVV1ZUU1RUUVVXVFNVV1ZUVllXVldXV1ZV
-V1lZWFhaWllYVVZbWlpVV1xeW1lbW1xeW1xeXl5dXV5eXmBgX15dWltZWllUVVda
-VldcWmKKvMjR2N3g4+bn6OnqW15aVlhUVFVZW1paVlNSTklHSEVHR0A+SUhDRkdK
-R0dFQkRAPjw9QDxAREA+QUZBRD48Pjw/PUM7PkE8Nzg4PTs6Ozo7QkA+PTs9QD4/
-PDs8QEBCQkFDRUZFRERGS0lKS0pISklJS09RUVJTVVZWUlFRUE5PVVFQUlRVVkxQ
-UUtMS0lHR0lKS01JREpHSkhNT05ZWltbV1FRS1BTU1VRU01LSUZJSkVIR0FFQ0I8
-PTU2ODs8OkI9MzU5NzY2NDU3ODk5OD88QkhES0RKSkpSU1FQUFlRVVFUUFNRS0RB
-NzE2Njg7OTc1NUM1NjY2NDU3NzY5Oj8+PUBAQUBGR0ZGQEdIQ0dDRkdDTEhKSUdW
-UVZTVGJbVltUVlRZYF5YT1lSU1BTW1lXUFFXWFJTXFpVVFZSVWFfXFBMTllgW11a
-WVFRUlRUV19RTUlBNzQ1NDM2Nz9MSU1bWFhTVFpXWltiW1hVVl9dV1leV1RQU1NR
-UllVVFtWU09QTE1PSlFPR09MTltRUE1NWVROTUpQTkpMUE5SUk5RSkZKTE5QUE1U
-UU1TT0xXVFROSk5UT09QU0g9Ojo4OD5BO0A8PD5FRklTR0VHRlVKTVZld4B9d2la
-UEc8NjY1OTo7PDo+R0c/PTk7PEE7P0NCREM+PDlBRUZFQEQ/QERERUtLSERDRTw2
-NDU2Njg0NzY1NzgzMjY3NzpBRUhKTlRNU1JXYFtcXl1fWVhXVlVcXFZVUU5RWl5W
-WFpYU1RUVFdYVVJVUFFSVVxVS0dOVFZSUFBTUEtJSE1PTkxOTEhISUpISUlIRT5B
-QUA+R1VlcXd8gYSIhIKDgYKGioqGgn17fX6CgH6Af36Af36DiI2QlpufnZ2goJ2a
-lpOSkY+MioWAf4CDiY6TkpabmpudnZuXkIV8eHZ3eX96cICVm5iVlZWVlZWQkY2M
-iYqMi4qJhoSFh4mIiomIio6RlJmamZuamJiYmJiXmZWZm5mXi3xxcm9ub3BwcG5x
-dHV2dHN2eHx6e3l4eHt/gX+BfHx+f3+DgoODhYWFhYaIiYiHhoaHi42Ki42OjIyO
-jo6TkpSQkJOTlJKTkpWbnqiur62urauurq2usLOzsba2t7a0tre2tra0tLO1t7e2
-sbGzuLSyr7CrrLGwnomQn6eoopaQnqWkop+emJmboKqvsrO0tLKwrq6xs7KztLOt
-rayvrKalpaepq62trq+usa+wr7Cwsra4tbe2t7m3tbCxsLe3srG2t7S3s7WysLGx
-srGzubi4s6ycgGZZVVRTUlBPTUxLTUxKTEpLSk1MS0pKSEpLS0tMS0xMTU5PTktK
-Sk1OTExNTExOTE5NTU5QTk9PTVNSUVBPTlFPUE5PT0xOUE9RUVNQUE5RUExNT09O
-T1BUUlFPUlFPUU5MTlFNT1FTU1RTVFRSUlBSVFJPUFFRUE9UUlZXV1VUWFRSVFNQ
-VFNTVVZXVFNXVlZWV1VTVVZXWVZYWFdWWFtZWVhYW1pcWVhaWlpaXF1cXl5fYF5d
-YF1cXF1bXV5fX15dXl1cV1pYVlZUVFZZW1laYpG8ydHY3eDk5efn6epWVlFOUFBO
-Tk5SUlJPTEdGSUZJSUdAPDxIQ0pHQ0FDQkVEQ0JAPD46PUJEQTs6QEA+Qj8/PUA+
-OjdAPTtAPTc3OzY5PT9AOzs7PTs6PT05Oj49Oj49QEBDRURBQ0NDSEpLSUpISUpM
-S05TUlNWVVJPTE5MTk9QVFBPUlJPS0tJSUlKRkRDRkdHSEpFRUhGR0hMUVhXV1pU
-UFVPTVBRUUxLSUhKR0lIQ0hIQkFASkZBQTk6PDw5OEA2Nzg5NzU5NDY1NTY3NzQ5
-QUJMRUlIRlBSVFFQW1VaUFBRUlNSTkE4ODU2Nzg1NjIyPDU0ODc2NTg1Ozo8OjpA
-QUNEQUNBQ0VDRUFBQUJISE5RSUxMRFBTUUxLV1lfY1dXTVheX19TWFZYVVJaVVVS
-VVVWUlRYVlFVXFJQWV1cVllQU15cWWJeVFVaX1ZRVFJQSUc8Ojk4ODY2PEJKR1Na
-VVxZW1pZXl5dVVFaXVlWWVpZUVFVUVNSVVRSW1pVUElKS09RUVFPTEhJV1VOUE1R
-V0xOU1VVVExRVlFSUk5OSkxRS09ZUlBTUk9OTlJSTEpNTlVOT05TTEE8PDw5PD49
-Pjs8PEA/Q0hESExFVE5FS1Nid3hyaF5SRTs5OTk4PT09PT9DQ0Q+PT08Ojs6Q0NF
-R0E8OzlCSkVBQEJJR0FER0xER0xHQDc2NDU4ODc0NDI0NTY1Nzg5O0JGSE9RVUtV
-U1pkUlVbXl9XWV1WVVNbW1dTT1BaW1VWVldUVVVSUE9SV1lWUVNZXVhVVVNVU05P
-TE1IQkZJSkxOVVBLSEhMSUZISERERkBBQkJPX2p0eXyAhIeKh317fYSIiYWCfnx/
-fn19gYCBe3l/fn+Bh4yZmZyenqCemZWSjpCSjoeEg4F9f4eMjpSYl5iam5qamZaT
-jYF6fHt3eHdweo6Zm5mZlpeWlJOQjoyOiomOi4mEh4qLiIqIh4aKj5SUlpeam5qc
-m5ycm5qZmZqZmJiShXdxcHFubnBxcXBzdnd3enp6d3d5fHt6fICDgIODf4CAgH5/
-gYB/goOFhoiNjImJhoWJjo2NjIyNjIuQkpGSj5GUkpGOlJWVmqClqa6xrq2tr7Cy
-s7OwsbOysra1tra3t7W0s7O0srK0tLO0srO0t7S0tK6pr7SmkZGZnJmXlJSco6Og
-nZ2Zl5uiq7CxsrOvrq2rr7KysrOxramspaKfoqaop6morLCsra2qq66wr7SytLa4
-tbi3t7y3tLO4uba1tbW0srGusK+xtLS2tbO1t7SxsKqegWZZVVNQUVJQUU5MTEtJ
-SktJSUlJS0xKTUtNSktLTUxMTUtKSElMS1FNTU1LTk5NS0xQTktNTk1PUU9MT1NR
-UU9OTlFOUFBRUFFTT09OTk1QVFFQTk1RUFNPUVNSUU9QUVBPT1BTUlJTUlNUVFFR
-U1FSUE9QT1BRUFFTU1FVWFdUV1JSU1BSU1NWVlZTVVdXWFlXVldVVldUVVRVVVdX
-V1ZXWVdZWVtZWVpaWVpcX15gXl9eXl1cXV9fX15eXV1dXV5eXF5bWFlXWVdUUlRY
-Vlddjb3K0djd4eTl5+fp6kpPS0tMTU5HSUpJS0tLRkpFSU1KR0FAPENGSUdDRUJG
-Pj49OkI8OkA8Pz8+QENBQUNDQkNAPz9CQjs8PkA/QT46Ojw9PEJAPkU/Pj09Pj49
-Ozw6QUM8QUFDQENEQ0JESEZHRUpMS0lLTFBUU1VTUk9MTUtNUFBMTE1LSkpKSklI
-R0hFQUBDREZFRUA/RURCRktQVFdYVE5QV1FSVVVUUE5NTE1NTkpITEdDQ0NJR0VC
-PT48Nzo6Oj07ODU1NDQyNTc5OTU1Njo/P0Y+PkRETE9VWlBbVlFRSkxTVlVQSj82
-Njk0MjQ1NDE8Nzc4PDk1NTg4OTlAOz5BQUFCRkM/QT9GRUNBQkpIT1RISE1NVlNS
-S0dNUFJUUU9LU1lbXE9QUFVWWF5ZWllaVldVV11XU1FVUk5WXVpZXlRQVllXXFpb
-VldeV1JQUVBKSkVBOTg5ODk5OkdHTFlaWVZVWVhfYFhWU1VZT1NVUFVSUVVSTVRU
-VlVRV1JPR0lQTFRUUlZPSkpNVlJLUU9PTlFWVlhVTE1ST05WTkxLS05OUFNTUVZX
-UExQT1dTTlFSTU5NVVNRR0A8PTw6PD5APDs6Ozw9RENGSUZMSkNFR1NodHVpYVZL
-QTs5OTY1NkA8O0NFPzw6OT5DPDxBQEVGRD8/PT8/Pz09SE5HRURJTEZKUEo+NjQz
-MTAzMjQxNDU0OTc2OTk2PkBJSU9SUlhSXVtQUVRaVlNUYVlRUlZXV1NQUFZVUlJV
-VlFUXFtWUVNVVFFNT1FTUlRTTUxNSkhISUVDSkpGUU5OT0xJRkdISUhFR0RGRkFD
-SldgbXJ6goKGhoWDgXh+g4qGhX9/fnx5eXh7fX17e318fYKEjJSZm5yfnJqYk5OT
-ko6MiYaAgICFhoyQmJqXmJyanJ2cmpeRh4F8eXNwdHh7ipScnJyamZaTlJOQjouN
-jIuMioiKi4mIhoiEiY+RkpWXlpeXmp6fnp6em5uZmpmamJWOfXNub3BvcXNvbm9z
-dnl6enh4e3h6e3t6fHx8fYKDgICCgIB/gIB/goGEh4iIh4qLiIqMj46Pj42Oj5GU
-lJWWk5SSkZWUl5uhpqeqsbu3sbGys7S4trW0s7O0tLS1t7a4ube0tbO0sK2tr7Cv
-r7Kzs7Wyr7CutbCgm5WNgoCEkqGmo6WenJyZnaWssbO2r62xr6yusrazsLGvra+p
-pqWlqKywqqWssbOwrrCtrq+wsra1tbW3vLy6ubSytLW2tba1sraxr6+us7W3ubW3
-s7CvrrOzsKqcfmRZVlNSUVJSTlBNS0tKS0lLS0tKSktNTExKS0xLSktLTEtNSEtN
-TlFRTk5NTUtLSkxMSkxNUE9QT0xKTE1PUE9PUFBQTk9QVVJST1BNTU5RT09PUlNR
-T1NSUlBSUE5RT1BPVFNRUVFSU1NSUU1QUlRSTk5TVFJSUlNRUlJcVFRWU1FRUFVX
-VlRVU1RVVlVXVlVXWFZVU1RVUlVUVllXWVRVWFlbXVpaWltaWllYWl1eXmNgXV1g
-YmFgX1xcXF5dXVpbXF9cW1lWV1ZWVlZWWF2MusnS2d3h4+Xn5+rpS0hIREdIRENI
-RkNDQ0NCRERGRElMRkU+QUZHR0Q+PkE+QT1GQkI9Pjw7OURFQjw7OkJAOzw6Pj07
-Ozo+Rjw6ODw6OjxBV0A/Q0RDQDw/Pjs7Pz08QEE+PTs8P0FBQURESEhJSUtJS0pL
-TExNUFJSTUtNTE1RS0tPSkxJS0lGR0ZGSERAPENFRUVDREFERUhLS0tNT09PSklS
-UFNXWVpRTlRQUU1OTUdMR0VERkxEQ0hAPz04OzY8Ojs4NzY6Nzg5Nzk3Oj85OD8+
-QT07Q0VGTE9VT1lbS0xMT1BRUU1ORjw3NDU2NjY3Mzw1MjU3NDU0NzU1OUE/Pj08
-Pj0/QDw+P0NCSERES05STEJKTU5WTk5HTE9HTU1QW1RbWFVZUVJWW1tUXFtfWVdW
-XltWWlZSTlJSUVVeXFxgV1BSW1hXWl9YU1pYU1ZVUE1LQkI7Ojo4Nzg8QEZFU1tU
-VFVXW11jX1lYWlVUUVFQTlBTVFpPU1VXVVBaU01LRktSVFZPVlVOSlFSU0xOUU1O
-U1BUVFBSTk5RUVNPUFFLSk5NTFZaUlFUTk9PVFdPTFdVT1BWWlFJQT08PDs3Oz89
-PDs7OztFRURGQ0VEQURCSVpocm1kV01CODY2NTQ2OTw9Pj5DRj48PEBBP0RARUZG
-QkE/PUJGQkFEREZDRU5KRUZLRz84MzY2MjI1NDMzNTY2ODg5Nzk4OkVKTE9XW1Fg
-WExSUldXUlZeXFVYWl5WVVNRUVhYVVRVUE9aXVdUTE9QUFFSUFFMUFJNTEtJSUpK
-SUZLSktLTktMSEdHSEZJSUZGR0REQkNKWmZueH6ChIWGg4KAiYmLiYeHgHp4dXh4
-gYF9fHx5fHh7foKIkZabnZ2cm5qTj5SSj4yHhIOBgYaMkJOYlJibmZqfoJ2Zl5aN
-hoCAem9ueH+Gkpugm5qZlZSSj4+Ojo2MiIyLioiEhYaEhYiOj5KQlZaYm5+hn6Cb
-m5yanJ+bm5eYm5aJeHR2dXN0c3NxcXZ3d3h5eXh6eXqCgn58enp8fYCBf4CCgYCA
-gYKIiomKiYaKhoqJiIqKi4uMjI+RkY6Qk5ORkpGQl5ugoaapra64ubSxr7CxsbO1
-s7O1trO0tLe1tbW5uLa0tLOvrKyvrq6xs7Cxr7S0tLSyt7ClmIV0b3iNo6ilop6b
-mZuepq2wr7Guqqypqq+ysrO0s7OvpqakpKGjq6qqqquvsbGyr6+tr7OztLe5t7e2
-ubi4uLW1tbO2tbKwsrCysq+ytLe6trSxsrO0sbSwraicg2dbWFVUVFNQTk5NTEtN
-Tk1NSkpKSk1LS0pISEtNSUpOTk1OS0tKSUxPTk1NS0tLTkxNT05PUE5PT09RUlJN
-UFBRU05PUVJRU1JQT01OUFBRUU9QT1BRUlFRUVFQUFJSUFBRUlFPUFRRUlNUUVFR
-UFFQUVFVU1dUU1JRUFFTVFJVUVhWVFVWVVZVVFJSU1RWVlhXV1ZUVlhXVlVUV1dY
-V1hWWFpaWllaW1pYWlhaXFxcX19dX2BiYWFdW11hX19bXlxcXF1cWVdXVldWWFdY
-XY25ytLa3uHk5ufn6elHTU5RTUpNSkxGQ0hGSkdGQUFBREpKRkE8Q0JDR0FCPjpC
-Q0REQ0I+Ozo9P0BAOz0/Pz4/Ozg8QEY9ODo9OTU6OTk4OTk6Ozw9PUM/Qj46OTk7
-Pz07Pj09Pj09Pj5BQEFCQ0ZKSUtJSUpIS0pNT01MTE5NS05NUU5MUU9MTUlGREhE
-QkNBQ0FDRUZDQj9DR0VJR0hHRUdISFFRTlRVWVNXWVRYT0xQR0dHREhFSkdER0BD
-Pjo7ODs6PDs5NTQ2NTc4Ojc3PTo1OjxCQTpBSkdPTVNNTlVOSUtVWFhSTkxHPTc2
-NTQxMzg+QjY2NTc5Nzc3ODk4OTw9Pjw+QEVERD86P0BCRkhRTEtJR0pJSEpNT0dK
-UEtSUEtVUlhUUFNLV1hbXFNVV2NbW1xiVlRVWFRRVVdVVVtYWFlTUVFXVVZYW1lU
-WlhYWVtTTkpFREA3OTo4Nzc5QEZGUVJSVVlWXFtZU1VYU1BPS01PS1FSVFNRV1dT
-VExKS0xMS1BOTFBQWFBLUU9PTk5VTEpSVFNRT1RTUFNTSk9TUlFMTE1TVldQSlFU
-Tk9WUVNRU1hRTlNXUk9JOzg4Nzo8Ozw+PjtCPEBAP0ZHQkRBSUVGTVRgaV9VSkQ7
-NDY5PTs1Ojk8Q0pJPzk+QkNBREZAR0lFQj4+RkdGRUZJTEhHS05MSkZGPjQyMzU2
-OTY2OTc0Njg3Ojg5Oz9DT1FYUFRWU19VUlpWWVVVVVlbWVpXXFdZU1RYW1xZWllS
-UldbVk5JS1NTVUxLT0xOTlVUR0lOTk1LUUtNTUdLTk5MTUxIRkhLSkpGRENER1Jc
-aHR4e4OGg4OChIKFiouLh4aAe3d5d3h8e3p6ent6e36AgoaPlZqdnZ2amJWRkpOP
-joeDfoCBhoqRk5SVlpeZm5qen52al5KIgn98dG97iIyMlZqenpybl5WVlpOSjoyL
-ioyLioiHh4OHi4+QlJOXmJecnp6eoKCdn56hoZ2gn5+gnpJ+eHR2c3FxdHN1d3h1
-d3l1dnh8fX1/fXt5en1/gH6AgIOBgoWBgYaFg4OIiIqJh4mKiouOjI2Oj5COjYuO
-kpKSkpaaoKKnrbCwsrWxr6+usbGztbW1tLW1tbe1sra4uLe4uLq0sa+ur7Cvr6+v
-sbOxs7W2tbS3sqyeinlxeImfqaeknZeXm6GssLOwsKuopqmrsbCwsa+wrKeioaGg
-oaWnp6qrra6urbCysa6ytbO1t7a3tre2trW3tbK1tbWysrGzsbSytLW1tra2tLGy
-s7Ozsa+wrq2fgWZcWFdUU1NQUU5OUFBOTk5NSktNTU9OS0lLTEpOT1BOUFBPTVBQ
-UU1PT01NTk1QUVBOTk5MSk5RU1FQTlBQUVBOTlFST09OT05MTk5PT1FQTlFUU1FS
-U1FQUVJTUlBQUVBPTlFRUFJUU1FUUlFTU1VTU1NTV1NUVVJPUFJTVVZZVlNSVVdV
-VVVXVlVTUlJVWFhXVVpXVlVVV1ZXVlhaWFlYWVtaWlhaWldZWVteXl5eYFxeYV5c
-XmBdXV9jXlxcW1xbWVpaW1pbWFVaWFxjkLrJ0tne4uTl5+np6UxUVFVXU1FPSUlK
-R0dLTEg+PEJERElGQzk9Pz9CQT1ARUJGSERDRUVDPj0/QkJDPT1CPUA4PD0+Pjw9
-Ozo6Pzs4OTg7OTg5Pj8/QD08Ojo8Ozw9Ojk6OTw8QURBQD0+PUBBQ0lGRUVISkpN
-TUhJR05OTU9QTVBVWFZXU1FKR0ZIRkhERkRFQkJFQD9FQkJIRUZHS0dHSkhITExN
-T1BTUVZYUlVPUFJLTUpESEdPS0NGQUFCPkM7Pzo3Oz04NDY2NTc2OTk5OTk4OTo4
-OENLSExIUEtHVVRVUFZVWlJRSEhCPDQzNDQ5Qjw2NTM0ODc3NTY2NjU3Oj89ODs6
-REQ9Pz1BP0RGSVBNSk1FSENDR0lQRktTTlNVTVBQVlRXVk5SXWBdUllYYl9eWVtW
-VVVVVVNTVVVPV1ZSVE9LTVBVXVpXVFlaVlhcWldPSURGPjg4ODg3Njc8SEdJU1ZV
-W2FdX1VSVlRXV0tISU5SUFlWU1JOU1VdVExMS0tKSExLTU5OTUpLTUxPU1BPSVBZ
-UU1NU09OTVNPTFVRTU5PTlRQUVdLSFVTUVdUUldVVFtTVllTVlFAOTk0NTY2Ojk6
-OUJAPTo4REVCR0ZHRkdHSFReW1hORzk0Njc1NDg/QD5DRkE8PEJCQUNGREdKRklF
-REZFSUlCQ0VKR0ZMT09LSkhBODMyMzU3NjQzNDQ2NTc4Njk+Q0VGSE9PU01SYFFO
-VlZfV1JQVlVYVVhfVlhTVVFVXF1aW1RbXmBYUk1OUlFTT0pQT09XVlJLT01NTlVP
-TUtMTEtKSEhMTExFR0lIRkRERUVJVF9rdH2AgomHhYOChYyQk4uEhIB6eHV3eHp5
-fHx4d3d7fn+Dh4yRmZ2fn5uXk5CQj4+Oi4WFhIOJkJOVmJaVlZibn6Kin5qXk42G
-goF8dnyMk42RmaGhoJucmJiUlZGRkI2KiYuMiIiGhoiKj5STlpiYl5mcnqCgoaOm
-p6aopqSnpqWjmIR5dHJ4dXFxdHV4eXh4e3p4enh6fH17e3t8foCAfH2Bf4CDh4eF
-hoWGg4OGjY2LiouNjYqOjo6MjpCRj42NkJOYnKCmqKytsLCysq+ur7KysbO0t7Oz
-s7W6trW1tbO2ube4tbKwsK6tr7CxtLays7S1t7i3trKnn5uSi4aGkJyjp6GdmZac
-oquxs7KppaSpq6yurq6trK2tqaCgoJ+kp6qrrayrraurq62wrrS1s7S2tbaztbW0
-tbi0tLW0tLezs7GwsbO1tba2trSztra0tbKxr7Kxr62efGFXVVNUVVNQUE9TUExM
-SkxKTk5NTk9NTU5MTk1NTlFSTk9LSUtOT01PUE9NTk5STk5QT0tOT1BRUU9PT05O
-Tk5OUE5OTk9OT09PT1FOTlBNTk9RUVNRUlBSU1RSU1NSTU9TUlBQUVFSVFNUTlFQ
-VFRSUlNVU1JSUFNSUFFWWFhWU1ZUVVJTUlVUUlFRUVJUVFRUVVVXV1laVVZXVldX
-V1pZV1tZWVhZWVlaXl9gYF5dYVxeYF1eYV9eW1tcXl1fXlxZWFlWVVZYWVhXV1yT
-usnS2d7i5Obn6erpS0NNSlJVVVFMSkpMSEdJSUU+P0RCQkRFQEFDQ0VGREM+Q0VE
-RkdGREJDPTtAQTw5PjpAQEJCOzg5PUBAQEFBPDo8ODg6O0E9PT0/OTk7Ozc4OTw5
-Ojk7Ojw7PT1APTw9PUNCSElGSEtNSktMSUlKTU5QUk5OUFdWUlhTTUlHRkNIR0RD
-RUJBPT1CQUFBQENCRkNJRkZISEhMSUlLSVBOVFZVV1BLVFNUVEtOSE5SSElETUxB
-REJCPzo6Ojg6Ozc2NDY0NzY2NTg3N0M8R0lKUUZLT1FXVFNPUVJTTU5ISEU4NjQ4
-PzoyMTAwMzQxMzg7ODo7ODo7Ojo+PTk7PEFBO0BAPj9CT1FOSkZKRUZMSEdFTVBM
-U1VUUU1XVVRWTlNTX2BZXFpiXFlWWVVYWFdQUFVUV1NWUk9VVFBNUVZYW1NXWlVT
-WVhYVlRQSUdBPDc3NTU1NjlBSUlLXFtYXl9bVVVaVVdZUklMUlhTU1dXVlRTU1hV
-TUtOUFJOT1VRUUtKS0hPUUpRTEpNTVNUTVBRUlJSUVJQVFpTUFVMTE1QUk5HT1lT
-UFhUU1ZRVFZXUlRbWUg6OTY2OTc5Ojg3Oz88PT9HRkFIQ0pMQkVFTlZTUkxEOTQz
-NjU1NTs/P0REQj8/QEJEQUM9QUJGREdGRUVJR0RCQUpHQEZKS01STkc8NDc3OTY1
-NTg3NTY0MzM4PD4+PUVCUE5RTE5UTVFWUltUWlVZVlZZXl1aWVJOUVtdV1hWUVxg
-X1ZUUlNVV1JQUFZVUVJTVFRKUlBLS09KSUxNTE1NTE1QTktKSEZGRkNDRUhUYmx0
-e4KDiIuLiIiKjpWVkY2Ggn1/enl4eHp7fHx4eX5+f4GDipCVm52fnpuTj5GPj4uH
-g4GAhouQkJOXlpmen5yeoaCcmpeVkIaBgX17fouRkZOam5yfnJmVko+QkpKRkI2P
-i4iOjY6Rj4uPkpSXlJibmp2go5+jpKaoqKerp6alo6GdjHx1c3V0eHR4dnd6enx8
-fX17eHx7e3x5fH59f358fX6Bg4WEhISCiYeEiYiGioiJi42Qj4+PkJKUlJGPj5GT
-mZ2fpqqqr6+pq62ws7Oys7Gzs7CztLaxtLa1trWzsbe4trOxr6+vr7Gys7a2trS1
-t7q4uLSyq5qIipKYnJyXnaOhnZmVl52jqK6wsaukpKqtrK6trqytrK+spJueoKWm
-q6usrKurq6ysq6yusLK0s7O1t7q4trW2trW1t7W1t7WysLKxsrKztLO0s7W2uLS3
-tLW1tra0tK6aeWNZVlVTU1JRT1BPT01LRkxPTk9NTU1NSkxKSktOUU5OTFBRUU1P
-UlFPTk5OTUxNTk5OTE9OT1BNTk5NTU5QTExPTktPT09NT1BPTkxNTE5SUlFTUU9Q
-T1FRUVJUU1RTUVJSU1RTUlNXWFhUUVBRVFVWVFRTVVNUU1FUU1ZUVFNUU1FTVFFS
-VlVUUlJTU1JUUlNUV1lYVFNWVldYV1hYWFlZWlxaWVpdXVxbW15eXF5gXl5eXV5g
-XlxcXWBjXl1aWlpYWFdWVFRaWVZdYI67ydLZ3eHk5ujn6epOUExPUk1PTklHS0ZF
-SEVFQz8/QENDR0REP0RFRkRBPj87RERERENAQkVBPz5APD09QDxARD5BPz89Pj0+
-QkM/Pjw9Nzo8Pzw8PDs9PDk6Ozs7PT89Ojw7PTw7PT1APTo6QEFDR0VGRkpJSktM
-TU1NS05RU1BRVFZTU1NQTEpOS0lIREVDQkFAQ0FCQURDQEBCQUZGQkNGREhFSEpM
-T0tSVFNUUEtQTVRSSExNUlRPS0RJSkVDPzw9Ojo4ODc2Njk2OTg3ODg6Njc1OjtA
-RUJLRkhLS1FPTEtJVVNST0lKSEE6N0A4NDIzMjQvMzU0Nzk1Nzo6PDg8Oj1DPT06
-PEE+QEBBQEJQTFJSRkhFSU1LT0hNUEtTV1ZRTVdVVFRLTlJbWVZXWF5XVE9YWlVU
-VE5LS05PUldUUFRZWFRSWFteXVhUT1FYVlZSU1RPSkg9Ozg2ODU3Ojs/Q0hTWVVW
-XVtTWVlXVlpWTlJcVVFRVFpaW1BQTE5NSlJVU1dTWVhUTkxMT01PTUtPS0xRTVBQ
-TU9QUFFRVVZWW1lQVExHS09PTktMT1FMUlhTU1JOUVNUU1NXSj03Nzg4Nzk4OTc4
-Qz5FRUdJQUdHSk5FRURITE9RSkQ5NTM1NjY3Oj08QkVDPTpARUpBREVCSEVDRUdH
-R0JCREBFSkY/QEhFSUxLRzs6ODQzMjE0OTY2NTU6NzUyODk8QEVLS09KTU9KUFJQ
-WlNdWl1YU1pgXVdVT0xVW1RXVUxRVFRUW1pUUVBRUFJUV1VWV1JRUVFLSk9OTlFL
-S0pNTlFOTk9NSUxNSElJR0dGS1lmcXV7gISFi4iGjI+Rko+PjoqEf3x9dnZ6fH19
-e319enp/fYGJi5OYmJ2cmZSSkpCPjIeGgoOHjJCSlJSXmZ+en6Sjnp+emZqUi4KB
-gn99goWIj5abnJ2enJiUjY6RlJaVk5COjI2PjYyQkI6PlJiam5+goKKkpKWlpaas
-qqimqKakpaCRgXp6e3l6dnd5dXh6fHp3fHt7e3l9fnx8f318f35+gYOEgoOEgoKI
-iYeLjI2Oi4uNjo2RkY+PkZaXlJWVl5ubpKakqrCrqKiprbKztLW0sbKwrrKzsbKy
-s7S3tbKvs7Syrq6vr7O1tLKxtLa3uLm5ura0r7Wsl3x4iZ2qpqKhpqSdlpaaoKSq
-q6mppqKlqa6wr62vr6yqqaejnJygoqSqqKqurausqq6urq+vs7S0sbC0tri3t7e3
-uLazt7m5tbS0t7Szs7OzsbGytrW0tba1tbe1trW1s6uae2JYVVFQUUtMUFJQTk5O
-T05NTExMS09NSkxNTUxLTE5NTU9RUE1NUVFOTktMT0xLSk1QTU5OTk5PT0xMT1BQ
-TUxRTU9PUlJPUE9OTEtNUFFSUE9OTU9RUlJSVVRSUVRSUlNSU1RWVFJSU1RVU1dW
-VFFSUVJTVVRSU1JRUVRTU1hVU1NUUFFUVFNTV1hVVFZWVlRSVVZZV1hVWllXVVhY
-WVhZWlpbWltdWVZaW11dXmBfX19hYV9jX11dX11aW1taWFdYV1ZXWFdWV1pjkbzK
-0trd4OPm5+jp6U5RTE5OSUVLRkhISURERUNFRkZCR0JCQ0VBQ0dGQ0NAQ0NFQkFD
-Q0FAQ0E9QEE/QUFBPT5BPj4+PDs+PTw9Pz4/QTs5Ojw6Ozk6Ojs9PDk7OTo9Oz8+
-PTs6Ojo4OT0+Pj1AP0NHR0dER0lKSU1PT0xNSk1OUlVXV1JQVFVSTlBKSkhIRUND
-Qz9AQ0BBPz49QD09QUBBQD9AQkJGSkdLSE9OSUhFSFFMTk9MUk1OUE5QRkRJRUNB
-O0JAPzs6NzY1ODo7OTc3OTg4OTc2OTo/PUFERk1QVk9JTlFXUldWS0lNSDw5OTQ0
-NDQyNDIzNTQ1NTQ3Ojw7Ojs8Pjo6Ozo7QEFDPUNCQUlOUk5GS0ZITEhPRktOT1hX
-V1NKVFlYWEhOUllbVVxaYFlZUldYT09TTElHS05UU1laWVlXWlhZWF1dV05NU1ZX
-UlBTVk5LTUI+NjU3NjI1OUBERUdSWFVfYlVXWFZXVFJTV1lTU1RZXl5ZUVFRS1NQ
-UVFPVFVUWlhTUExRTFNQRk5TTE9PTlRNS1JPT1NVU1hUVFBMUEpJTk5OSktMT09T
-U1lXWlRLT1JPT1FKQjk6Nzc5Oz08PjtBQUNFREc/SEhKUEVOSURHUlhSQzo3NTY2
-OTc5OTtCQjw6ODtARUVDRENFR0hFRUFBRUZCQURGS0FBRENKTElGPTg1OTQ0NjY3
-NDYyNDk4NDk2OT1BR05HTE5PTktQS0tUU1ZbY1lSV1xZVlJSUlRYVFJTTU9RUVBU
-VFhPUFFPUlZVU1VXVk9PUEtJUlZUT1NSS01UUk9MTk9MSklNTU9JR0lPW2pyeXp8
-goWIhoiPkY6OjpGPjoqGf354dHV3eXp7e3h2eX1/gISLlJSYmpqVlJOTjoyJhoOF
-iIuOk5GRkpWZnZycoKKfm56dmpSPh4N/foCEf3V9kJyhoKCbl5WWlJOQkpOSjo6N
-jI+OkJCQjI2UmpudoKOkoqKjqqimpaSkqquno6Ofn5iJenh4eXp3eXl3eHd3d3V6
-fHt9fn5+fHp7fX6BgICFh4eFhoqIh4SJio+Rj4+OkZOPkI6PjI2MjZKSk5yhoaKl
-qK+urautrKuurrKzsrCxsbOxrLG3srCwsbOysbKzsK+ura6wsbO0tbO2t7i5uLq1
-sbOwr6uVfniFmaiuq6eopJ2WlpifqK2qqKWkpKeqra2tsK2sq6mlop2dnqKprKur
-q62vrKqtrq+zs6+xtbKurbOytLaytraxsLGytLGvs7S0tLW0s7KztLS0tLS1ubq4
-tri3trW2s6mUd19XVlRTUE5NTk9SUlJQT01MTk1OS0tMSktNTEtOSUtPTk5OTEpM
-TUxQS05MSktKTU5PUVFPTU1PTk9QT01QTE5NT01OUVFPTk5RUlFQUE9QTU1QT1FT
-VE9RU1JQU1JPTlBQU1BSUlFSU1RUVFNRUFBPUldVVFNTU1NSUlZUU1VWU1RTU1JS
-U1NUU1RUVFdTU1ZTVlZXV1dWWVdXVldXWVtZW1xZW1peWllaXF5eYWFfXV9fX2Jf
-Xl1dWVxZWFhYWFhXV1hYWVhZXmeRu8rT2t7h5OXo6OrpSk1LQUJHSEZLTEBGREpD
-REZFRUZFPkRJRkM9QkNFSEdERFBEQUBAPD07PEQ+O0NCQkhCNjg5OTs8Oj8+PTk7
-PD88Oj86OTk4Ojo5Nzk5Ojo8QTs5PDo5Ojw7PT05PEE9PD4/Q0hGQ0NER0ZISk1M
-TkxKTVBTV1haVlFRVFdRUVFOTkhJREZFQUFAQT86PD49PT4/QUM+PD9DQkVEQ0VJ
-S0xCP0RGT0hHSktUSkxQTE9IQUVDSkRHQ0RCOjs+OTY3Ojs4NjU0NzY5ODw4OEFE
-QkBCSk5aU0lUUltYWVRNSkxNQTg1MzA2NjMzMzQ0NTg2ODc6ODk6P0FCQDo8Ojk9
-PT89QEJAQ0ZQSUZLTE1UTlFIT09LVVNVUktVWVhWU1VOWFhVX1tmYV9YWlZVVVVS
-TUxWWlJTXWBZWlpXWFtZWl1cVEtUWVNRUVNOSUdMSkVAOjYzMzY5ODpFR01ZWVhZ
-Vk5VWlNSUFNQUVpYVFZbXFNTUU5QYlhPT1FOUlFSUlJTU09OUU1NT1FSTk1KUFJP
-U1VQUVtRUFBNVFJNTU1LTExGS0xPUllVVlpSVFJSUlFWUk9IPDk5Oz09Ojo+Oz0+
-Q0E+QENKSkhLSlBHQ0dLUE9FOzc2NDk5NjY5Oj5AQzk2NzxCQ0JEQUBFRUVERERG
-SUZBRUVKSUVDPURKTU1COzk4OTU1NjU0NDc2NDM0OTc7PUNJSE1YTE9IS1FKUFRU
-VlpiWlJZXF1XVVVRUltcW15WUFBRVVdaWlJPTlFSUVJTUE9OTVRUTk9UV1NTVlRT
-VE9QU1FUWFBLS0pKSkVBSFNfaW94foKDhYSGjJCSkpKRkJCOjIqFgHt2dnZ4eXp6
-fX16eHyAhoyRlpibmZSSk5GQi4uKg4SJjI+Rj5CVlZiamp+ioJygnZ2bmZKJg4GA
-goaBcWx+kZuenZubl52amJeUl5WVlI+NjY2MkI2PlZeboaOjpKSgpKaopaWlpaal
-oaWmpKGdmI18dnh6enZ1d3N3dXV3eHl+e3t6en1+fX5/f4GEg4aFhYeHh4iIh4qL
-iY6SkpKQj4+Pj42OkY6OkpSZn6KkpKeprKytq62trbCwsbGxsK2wsa6sq7Cvra+w
-sLCzs7GvsK2usbKxsbO0sK+xsrOzsrK0trOxqZiKhouQnKSkoqCfm5SSnKWoqqmr
-pqanqq6yrKytrKqppaOgnaCmqKisrK2urq2urKuusLO0s7W0sLCxsrKzsLCxsbCv
-rq+wr6uvs7SytbO0tbW1tbW2t7i7vL27t7a4t7i3r6SPcl1XVFNTU1FPUFFQTk1N
-Sk1OT0xNTklKSkxQTEpKT0pKTkxMTE1LSktLTVJQT05OT1BRTk1NTkxNT0xOTE5O
-T09NTExOUVBOTVFSUE9PUlFSUlJQUE5NT1BPUFFQUE5NUU9PUVFPUlFST1BRUlRU
-VFZWVFVWVVJSU1JQUlZTVFRXVFNPVFZTUlFQT1FVV1dVVldVWVhXVVRYV1ZUV1db
-XFldXFZWWlpaXVpbXF5fYF5gX2JhYWRiYl1cXFtZWFlYWFlaVlVVVVNXYpG9y9Pb
-3uHk5+jp6epTUUxFRUlLSEpKTUpLTkhDQ0NDQ0dJRUVERkVCQUBCQURCREU8PD5C
-OkE4PT08QEI/QkA/Ozo7PTo6Ojs9OTtBPjw4PTs0Ozw8Ojg8OTc7Ojs7Nzs+PDs9
-PUBAQTs6PUA9PDs/REJFQT9BR0lHTE9NTU5OUVZYWVxbV1ZZW1VUUlJXTUlLSUc/
-OkFAQUA8PDs8PTw/PTs6O0FDQkE/QUVMSUFDQ0NNRUVJSVFPSE5KSkhGRkZJRURG
-RUI7PT05ODs9PTk3Njg2PDk5Ozc7PT09QD5FSU5PSExGUlRdWlhJRURAOzQwNDQy
-NDYxMTU2NTs2NjY8Ojo8PUJDRT46Nzg4Q0I/PEFHQ0dEQkpMT1JTV0tVUUlPU1pV
-T1dXWlhYW1NTV1hmYWNfX1tjXVlZWlZXVmBbV1ZdbWBcV1VTVFZXWWFdUlJSTlZY
-U01PSlBPSj86Nzg5OTo8O0FISFBTTk9SS09dUk9SUFJNVlZUUExNUVBOS09XUFJK
-TEtNVU5RVU9YUE1OSU5WUlZOS05NT1VSVFJMVlBNTUtKUU1JTExITEVJSVNRU1FO
-VlRUWFFTVVpXV1BBOjg4OTw+PD48PzxAPj9GQU5MSk9LUktHQz9KVUg/ODQ0NzY1
-NTlBQUJCQDo9Pz9BR0ZBQEJERkVFTU1LR0pHRUZKRUNISUhQUko9ODc4ODc2Njc1
-Mzc2NDc3ODU8QkRAQkdHT0tPU01TVFdaVVVUVFZYW1haVlFVWlpeXFRPU1dZWVte
-V1FST1BQU1VSTk5LT1lWUFFWWFZWU1FNSkxQTU5OSUdMSkpJSElNWGJtcXh8gYKC
-homNkZSXkpGOj4yMioeDfnt4d3l5enx7fXp7fICDhY+XmZmZlpSSlpWMh4WEh4mQ
-j4+Pj5KanZueoqOgoZ+cm5yel4uDf319e352bXOIl5udnZ2cm5qamZSWl5aSkJGM
-jI6Qk5KVnZygoqKioaakqKakoqalp6SipqimpJ+clYZ9eHZ3d3Z4enl4dnh7e3uA
-fn57foGBf4B+gYGFhYOEg4aGh4qLjY2Kjo6PkZCOjouQkY+PjpOYm56gpqioq6uv
-rqutrKytq66wsLCtr6+xrq2sr7Czr7Gurq2trq+urK+xs7W0tLKzs7S0trGusbO1
-s6+qo52clI6LjpWdn5mVk5Scp6yvsKqlp6qtrq6tq6qtqKqloaGipKqqrK2trqyr
-rKypq62wsLGysrKwsbG0tbOxsrOyrqysr7Gysbe0s7W1s7S3tLS0trW3uLq6u7q5
-t7e5tLKysKaNcFxXU1NQT1FRT01NTkxLS0xPTUxJTk5PS0tNS1BQTUtNTU1MTU9P
-Tk9OTExNS01QUk5OTk5SUFBRUE1UTk5NTk5NTE9QUE9OT1FQTU9NTE5QT1FRTVBP
-Tk5QT0xOTlNQUFJRT1BQT1NSUlJVVVJTUlNTUlRTUlJRU1FPUFNSUlRVVlNRU1RS
-UlJSUVJTU1lXVldVV1ZZWVZYVlVYV1lcW1pYWVpaXl9bW11eXl5cX2RgYGJjYV9f
-XlxaWVpbWllYWFhYVFJbXVpfkr3K1Nre4eTm6Ojp6lVTTktKTUlLSEpISEhIRUJG
-RURCSElJRkVDQ0NDQkVCRERBQkJDQkM9PT48PEBDRj48PDo5OTo9Ojg3Ojw8Ojw7
-Oz48Q0E9Pz07OT09Pj49PTo9OD0+Pzw9PEBBPj06PT48Oj5ARENAQEJERkZLTlRS
-T05SVVlbW1xaWltaVlhaV1hQTUxLSkdFQj5BQEE+QD06Ojo6PD08Ozk8PT4+QUZG
-PTw/Q0pAQ0hJUEpGS0lGRUZKRUZFQ0pESUVCPjxCPT06Ozg2NjQ1ODg3Pj07O0FE
-P0JDRE5JR0JKT1FXWExDREQ7Nzg0NTUyNTc0MjQ4NjU2NTc4Nzg4OzxAPDpCPkJE
-PkU8QUpFREE7RUJIS01QS1RSTlBMVlNPWlNYXF1cUFJUVF5bX2BiXmFfXFddWldZ
-W1dUVlVgXFhVU0xTU1NhY2BYVlZPVl1XUE5QV01JQjw5OTk0Nzg7QEZOS01KS1BU
-VFZUTFRRTlNRVFNPR01STVFUUE1MVVJGSk1QV1VaVFRRSk1MTFFMSk5LT1NLUVVO
-T0tMUExKT01LUUxITkdHSkhIVlFJS01OU1FTVFBRUlVZVUo9OTc3OTg4Pjw/O0VA
-RkxDSEtKU0hQTlBOQ01TTUc7ODc3ODw6OTpBQkNJQD47Pj0/QERCQURGQ0FJRUJL
-TUhHSk5FRkhGUlRRSD84NzU3NTk4NTQ0Njc3NjI1Nz1AREVGQ0RNTVNSS1lUVltb
-YFdSU1VYWFhRT1VcYl9bVFFXWFdSVVdQTE9LUVVWWVRTUkxOUU5NUVNSTk5PT05K
-TU1OTkxMS05PTkpHSk5eanN7fH+Af4GEiI6PkZGRk5GQj4uJhomEfnx3c3l5fH17
-e357f4KHjpeanJuYlpWUj42FgYODiI6PkJGRlZSYnJ+goqOhn56en52ZlYyDgn90
-cXZ8goaQmZybm5yem5uXlpWRk5STlY+QkZSUlZaXmpygnp+lpqmppaakpqipqaeq
-p6ajoJ6dkIF7ent5fHt6enx8e3p8fn19fH9+f3+BgYCAf4GDhIaIiIWJiouMjY2L
-i4uNjo6Ki4+SkJCRlZ+hoqWnra2sqqysrK2tqquurq6vrq+wsLOxra6trK6wtrOv
-rKqsr66urq2ysrSzsrWxtLa2uLW0sbW0s66rqKehlIF4gJSbl5CRmp6mqamqpqSk
-qamuraupqaqqqKakpKSmqqetrK2srqyrqKurrK6wsK2vsLG0tbSxsbOzs7Gvr62x
-sK6wsrCwsbC0tbSztbe2t7m5uri6ubmzs7S1tLWzrqWKbFtXWFVUUlFPUE1JSk1N
-S09PS0tPT05NT1FOTE5OTkpKTE9RTk9RT05MTU1NTU9OT1BRUFFPUU9PUFBSTk1O
-Tk5OUFBOS09MUFBQUE1OUVJSUFJUUlFNT1FRUlBSU1FVVFFPU1NTUVRQT1JPT1FS
-TlBRUlRTVFRRU1NRUlZVVVNSVFRVVFVTU1hXVlVVVVVWVVVWVllaWlhZW1ZXVlhX
-WVhaXF1eX19dYF5dYmBhYl5fYWJiYFxcX1taWltbWFhXV1dWVllXWWOVvcvU29/h
-5Obo6OnpUUxKSUhJRkRJSUZIQUFHQj5BQkJGSEA/QkRDRENFQkdCQj9BQTxBQkRD
-QkJHPERGRDtBPjo7PEFAQD05Ojw7Oz49Pzw+Pjs7Ojs6QEI9OTo9PTs6Pj49PDw9
-PDw8Pj48Ozk7PEJBQkFBQ0ZHS0xOT1NTU1NTWlpaWldXWFlaWlpYVk9MSkdJRURC
-PkFAPD5DPDg3Nzk8PDk3Nzk8PT8+QD48PT8/Q0JDREdKRUZMR0pFR0lGSEhFSEJG
-RkFEQD4/OTo7Ojg3OTU3Nzc1Njs4REdESEFBUU5LSktMTFBSTktKRD86Njk5ODk0
-NDU0MzU0NTY0Mzc5Nzk7QDxAPj5CQEA/Qj9AREBGR0ZERUVESlBQVUxNVVBNTUxW
-U1ZXX2RWUldWXl5jY2VdXVhXVlZUUlFVU1FVVFxaVlFXVFNUU1hiY1xZVU9SVldO
-VE9UUUxCQzs4Njk6OTk6PUpFRElPU1xXUVVOTlNQWFROUEpMTFBRTlFOVFNUU0dJ
-UlFTVlRYVE9TTktPUlFLTlZPVktOWVJOSUdNTk5OU1RMVU9OSkhESk5RUlJLTk1L
-T1FWWVRVUU9WUkQ7Nzc1NjU6PT87REJDTkVESEhSSUtOT1pOTEpLVEI3OjU7Ojk5
-Ojs7QUdGQkBAPD1AQD9DREhGQEhGRkhDRUhLSEJCQkROVFFJPzw4OjQ2ODQ0NjUz
-Njg3NTM4PD5CRkxPSVFOU1JQXlpfXVtgWVFYWFZYWFJSV15hX1ZMUllcV1VWU05N
-T1NYWlpWVFhXU1FNTlJUVFJSUlNQTklISk5MS0tMT09KR0pKU15rcnd7fYCBgoKG
-jJCPkJKSkpCPj4qOioWBfnt5enp5fnx8fH17goeMkZebnZiWk5GPi4mFh4qNjY2Q
-j5KXl5manZ6fo6OjpKSioJyTjYeDgIN2bHF+jJWanZ2amp2bl5mVlJWWlJSRk5KQ
-kZGVmZiamZ2dn6WqpqWkqKanqaqrqauqqaakoZ+Uhn1/fXp3d3p4e358e3x+fXx9
-gYGAf4CAgoKEhYWFh4eFiImHiIuQj46IiYeKjY6Pj5KVmqCgoqOnqaunpqapra2s
-rKuura2sraurrq6ur7CrrKuqrrGxtLCrqqqvs7KsrLCwsrS0tbO2tra2tLGusbKw
-q6Okp6aVfm13jZiVkZScp6Wnp6emo6Slqa2uq6inqaeknp+kpaioqaurpqqsraqo
-qKqsrrCysLO0tra1trGzsrGzr6+wrq+xsbCurK+usrS0srGzsbS5urm7u7m5tra3
-t7OysrGwrJ+FaFpYVVFNTU5PUE5OT05NUkxMTU5PUU9OT05LTk5OUE5PUlJPUVFL
-SUhNUVBNTFBMTU9PTk5NT05ST01PTk1OUFBPTk5KSk1RUVJSUlJQUFNQUVNSU09N
-Tk9RT1BRVFFUVVBQT1FSUVFRUFFSUVRSUFJRUlBQVFRSU1JUVVVTUlNTV1VVVVZV
-WllYVlZWWFhXV1lXV1dVWlpbWVNVWVpYWlpbW1pcXV5gXl9fX1xeXl1dXFxdXl5d
-XFpZW1tcW1hWV1hbVlhXYZe9ytPZ3uLk5efo6epMTE1OR0ZKTUtISUdDQUdEQ0VF
-REVBPkBKQ0VFR0hJRUVEQz8/QDxEQURER0Q+QEFEQkFAOzw9PD4+QD49OTw8Pj06
-PDs8Ozs7QT89PT1BPTo+Pzs6PD09OTs7PT5AQT8+PT0/PUE9QEJCQkdGSU1OUlFO
-UVFSXFtfV1RZXFpcd1hYWVNSTElEQ0pEQD9CQUA8Ojs5OTs8OTo4Oj0/PTxBPjw/
-Pjo/PzxAREdGR0hHR0ZGTklOSkdHQkRCQUE/QT49QDxAOjY4ODo6Ojg2PD0/RkJI
-QENMSk1MSEpJTlFOTUtJQDo4Nzc0MzM0NTU2NTc2NjY4ODg7PEBGQEQ9OkBARD8/
-PkNDPkFCRUtGSkdDRUhOTElXVVBPS1ZSW1ZcZFZUU1VfVl9bX1tVVFJQUlRWU1ZW
-TVBWYWBcXFpST1NUWmNdW1hUTlFUVExRTlZOTkZDPDg2NzQ3OTo6QkNCR1FaWlpU
-U1hTUU9VV01QTldSUFhQTUxQVVZUU0pLVVFTTlBYUU1MS1JXTk5RUVNSUUxUV1JJ
-R0tJUE9WXVRWWFFLSklNT09OU0hGTUtPVFhbVllVTlhYTEE4ODY3OTk8Q0BDQkFM
-TkpIRE5ISk9LU01QTElXTD85NTc7ODg4OzxARUVCRD83Oz9AQUNBQkZCREVLSERE
-RkVEPzo/REhWVUg9OTY1NTMyNTI1NjczNDQ1ODg7OzxFTUZNTUtQUVRbU1laW19W
-UFNWWFlXVlNbYV9gWU9QWldUUllbT09VWVpZWFdUWVhWUk1PTk1NT1FTT1BMS0pK
-TUhJTExOS0ZGR0xUXmh0en6Bg4GBhIqQjo6Tk5OUk5OTjI6JhoWAgH17d3h7e3x8
-foKAhIyVm5+bmZSTkI2Hg4SHiYiPk5GSkpSXm52foZ+goqOkpaSinpqQhn+Ehn91
-bnKEj5ebnJudnp6YmJeVmJWVlZOSkJKRk5aYmZucnqCjqKmppqimp6qqqamqq6yr
-qaWlo5yMgYB9e3x7eHp8fXt7eX1/foB/f35/hIeFiYeEhoeHhoiKjYyIio2Li4uN
-k4yMkZGPkpaan6KmpKeqq6ioqaurrK2rqqmsq62srq+traqpqq2trauvr7Curq6r
-q66urq+sr6+zs7W1ure1t7a1tLCysLGompaZmZCHdnmMlpeUlJmhpKSkpaOlp6ep
-qqumpqmqpqSjoaOoqqmtq6qoqaupp6eqqq+wrrOxsLW2tLO0srG0t7W0srCvsbKx
-rq2tq6+xtbKxsrO1t7Szsre6uLq1tre0srKztLSspZuBZVpUU1BQT09PTE5PTktN
-UE5OUU5OTk5NTlFQT01PT09RTU1PUE1KUExOVFFPUVRQTExMTU5PUUxMT1FRUFBP
-T01QTk1JTk5OT1FRUVBSUlNRUlBPUVFOT09PT1FPVFRTUU9SUFBPUlJPTlBQUE9T
-VVNRU1FTU1JVUFBRUVJRVVRTUlNTU1JTU1ZXVVdYWFhYWFtZWFZVV1hZWVVWWVhZ
-WllYWVlbXV5gYF5eYGBiYGBeX15fXltbW1pYWltcWlZUVVhZW15ilL3K0tre4eTm
-5+jp6lNOTUxQUVJNS0pHR0xRS0VKSUVAPkA+QUlHQ0VERUZGRkVDQ0NAP0VCQURG
-REQ+QUJEQDxAPDw7Ojs6PENDPj09Pz88Oz0/OzxEQjw8PkA9Pz0/Oz09Pjk5ODo8
-Nzs/Pz9BQT4+PkJCP0JDRkVCQ0pNU1FRUFBSVlhZWFpWWFdaV1pdVU9MTEtITEZD
-Q0VBPT08OTg4ODw8PTs4OT48Ozw9Ozs8PDw4Pjs7QkJETUtJQUNIQ0hEQkY9QUJE
-SEJERkJCOjo4Njg8PT47OTs7ODxEQUZBQEhJSkxJSElST1RPTkxCPjw4NDUzMi8t
-LjEzNDc1NDc5OTk3Ozw8QD48QT9JQT08Q0A+Pj9GSkdJQEBFRElIUFFMVFJOUExa
-WlhfVFRUVGJaW1ZWV1paVlJSVlJSWldQTE9XWFxgXVNSVldaW1ZZWllbWFVYVFBO
-WVRPRUQ8Ozg4NTU1Ojo6P0NFUVxbVlFSWFhQT05PVVBVVlFTVFNLUE5RWFtWT0tS
-TkxMS1NUTUlNUVZPW1VPU1JOS01UWE1KT0lNUVFaXldWVElLTEpLUU9KSU5NTk1V
-WFdbVFRTWFhSRTo1PT07OTc9O0FBQEpJS05JTUVISkhQUFVTS1RNSEQ4NDU3Ozk4
-PkBFRUM/Ozc2O0JDQ0hCQkA/QUJESkZDQUdCQUNGS1ZOSD02MzE1Ojk3NzM0MzQ1
-MzU4ODg8OTtCRE9HQlFSVFxTXFpWWVxXVFZVVlhYWl5eW15ZVVFQUlVSW1pNTlVV
-WlVUWVNTVVVUVE1NSktOT05PU09MTU9PSEZGRkxMSEpIUFdhaXV+gYOEhIeGi4+N
-i5CRk5KVkpKPjo2HgoB8enZ0dHV6eHp+f4SHi5GZoZ2alZSSi4WDg4eOk5GPkJGT
-mJacnZ+dn6OhoqOioqOfmZKMhoODgX5+e32Ch4uTm5+enp2dnZ2al5WTkJCRkpGV
-mZubnJ2hoqGlp6epqKanqaqpqaerq66qpqOhnpOEgH99e3p7eHt9fX59foGBfoKB
-h4WEhIWGiIqNjYyOjYyKiYOLkI6PjpCOjIyQkI6SlZqhpKeqrKuqqqupqquqrauv
-sKysrq6tq6urrKysqq2srK2ur66wr6+srqmvrrGztLW1tre4t7eztLOxrquvrqiS
-gYGGjoyHgo6Wl5aWm5+ioqKkp6itr6yrq6empaWioKCjpKersKyqqaimp6ypqqqs
-qqyusrSysbO2tba0tri1tbSxsa6vsbOwsK+urrCwr66wsLK0s7Gxr7O3uLSvsLKz
-tLS2trOuqKGFa1pVUk9NTU5OT05MTFBQT1JPTUtMTUpNUU9OTk9PUE9MTE1NUFNc
-T01OT09RTk5LSk1MTExNTk5PUFFPTU5QUU5NTlBNTU9OT09QUlJSUVBSUFFSUlNQ
-UVFQTE1RUFBVUVBOUFRUVFNUUFNSUlJTUVNTUU9RUVJWUlBSVlRTVFNUUlJRVFVT
-VVVVVlVWV1laW1hYWVhXV1VXWFZYWlhZWllYWVhaXFxeYV1eYGJhX2BhYVxcW1xa
-WFZXW1pbXlpZV1lZXGGSvsvT2t/i5Obo6OrpS0hKTUtNUk9OSklNTEhGR0REQz07
-PUJEQUFHQz4+Q0NESERCQ0NCQkBKR0VEQT47QT89PkNCPT1BQz47Q0VBQEBCPjs7
-Ojs6PTs5PD05PDo5Ozk5Ozs7PT05Pjs9PDw9Oz5APT4/P0NFQkNDRUJBRklLT05Q
-TlJVWFpWV1dWVVNWV1dUUU5PTExOTEhGRERBPj0+Ozo6Nzg7OTs8Ozc5Ojw7OTc7
-Pjo8Ozo/P0FFRURAP0I/QT5BRUNEQ0RLSEpGQUNAPDk6ODg8Pjo6O0JDO0Y/QT89
-S0hJTkRISlVPWVVSUEtCOjc1MzI1NDQ0MDMzNDM2ODc6OTc5Njg6Ojs8PERBQj5C
-QT1CP0NJSEtHRUJITUZNS0hTU1FWSlVYVl1ZXFRRW1ZZVVNYX1lVVk9VVVNZWFhP
-T1FQTVpcVVFWV1lWU1FYWVxaXVpWUktZVVJJREJBOjYyMzQ0NTI0QUpLWV9UTlJS
-WVdTUEpUT1RWVV1UUE5PT05QUlJUS0xQTktLS0pHRElKWVpZVlJNUk5LT0xUWFJR
-TktRU1FYXE5LS0ZKUUtMSEVKUlFPUVBUVlhWVFdQUVRMPzs6OTY5OTs9QEA8SEVD
-SERORkZHRVJOUlFKTk1KUEE0NTc9PTg7OTs8Pjs6Ozk7QkFGR0REPz1EQUVIQUNH
-RkNBPj1JS0hGPTQ1NDM1NTg4PDc0NTg1Nzg4Nzo/P0lHS0xHVFNWW1JZVFZVVlla
-W1hZWFpbW15cX15XUFZbV1NWVU1PVlVQUllaUU9TWFhVVFJRUE9NTFFRTkhKTUxK
-SUlOUFFNS0pQXWRwc3p+g4KBhIeOkJCTkpOYk5SUlZGMjImFg358eHNzdHZ7fH2A
-g4eKj5mfoZ2YjoyJiISHh46QjY6QkJSVmZiZmpufoaKgoaCio56XkJKJgoKBgYKD
-h4uGfn6Mmp6dnJycnZmemZiVk5KSlJibmZiZmp+foaSnpqepqaSmp6mqqaurq6qr
-pKCgmYiAgoN+ent8enyCf35/foB+goKFhYOFhYeGiIeIiIyMi4uMi4eLk5CPjY2N
-jpCSl5yfoKOkqqmqrKinp6mqqqurrrCwrKaqrqyrrKurraurrq+wra2qrq+tr62z
-rq+3t7i3t7O0tbe1tbSztrOvrK2tpY55c3qKlJaSkZKUkZWZoKOno6Ksrqyxsqyn
-p6elo6OipqWmqKenqamop6apq62sqqmrq66wsbSztLW3s7K2t7OytLOzrqussbKv
-sa+rp6ytrrGxsrGysq+rr7Kws7Gzsq+zs7S2tbOwr6WMbl1VUlBOT1FQT05PT09P
-UE9MTEtPUE9OTE5NTUxKS01NSEtOU1JNT05NUFBOTEtKTkxITFFRVVFOTU9NTlFS
-UE5QU05OUFBQUVJTU1FQU1FSVVFQT1BRUVFUTk1OTk9TUlFVUlRWVVRUUk9SVFVV
-VlVSVFVWUlJUU1NWVFVUU1FTVFVUVVZVV1lWVFRXWFVWV1ZXW1pcWlZYWVlZW1lZ
-WVdYV1pdW1teYF1dX2FfYWFeXF5cW11cWVhbXFtaWlpXVFZZYpm+ytPZ3eHk5+fp
-6elMTkxMUU1IS0xHSElLSkhDRERDQUFERkRHQ0NDQT9CQENCPz5AQT89Q0hFRD05
-Oj09PkRBPUI+P0FERkE8PT0+Oz49PTo4ODpAOjw7Ozc4Ozs+QD06PEA8OTs7PTs7
-OjtBPT09PTxBQkVBQEFAQ0NESUhKSkpLUlNVWFtVVVRTVFZXV1RSVlFQSk9OS0hE
-Q0JGQT06OUE8Ojo7Ojo3NjU4MzQ2Njk5OEA/PTs8P0BCQj88QDw9P0FDQEhKSExE
-RENCRUI7Ozo/Oj88OTk7QTw9QEJEQ0RISEdKREhGSktTVVNPTkQ/NzU0MjIyMzg0
-Njk4Njc4Ojc4OTc2Njo9Oj0+QT5APT4/QklIQ0REUEtJSUxSSU5ORlFQUFRPVVNV
-WlJVUFFeWllSUVNaWVBTTk5UVldYXVZRUlBOU1hZV1VaWldTT1BWVl1hXFlUS1BU
-VUpJSEQ4NjU2MTI1NTk9TlBPWVhOT0hPXFRPT1RUTFBVXFxRUUtOUE1MS05OSVJT
-TkhKSUxHSk5PV1hSVExVU05RUEpUUlRZVlFVU1BWVFBPTUtRSkZJRkRMVE5TUlBT
-UlNTTVFQUE1BNjU0MzU5PUE7OTxDQERLRlBGRk1ITExJT01MTktSSjo2Nz1APjpA
-Pj49Ojs8OTo8PD5AQkM+RT9CQEJAQEJEQj05QEdNTUZBNjc2NzY0NjU2NDU3ODU3
-Njg3Oz1DR0VLR0lTVVxZUVtYXmBfVVVbW1tdWVVVWl5dVlJSXFlTUldVTlJWUlBP
-VFVPT1JXXVhTVE9PU1NRT1JOSU5RVE9KS01OTU1MTlRhanJ0e4KFhoWFjJCTlJOS
-lJeUlpWWk46Mi4WEgXt6d3Z2dXd7eHx/hImOlpydnJiUkYmIhYaJjY6NjpCRlZaW
-lpeYmp2eoaChoqGfnp+hkoqEhIaDgIGCioyFfn2IkpqeoKGcnp2cmZaYl5aXmpub
-mZudn6eqqqeoqKmpqqenpqqoqaqppaemo6GbjoWCfn58e31+fX2AfH9+f4GBhIWI
-hYaGiYmGhoiKjI2NjIyMiYyNi46Mio6Rk5idnqGhpaakpaWoq6moqKmqrKmqqqur
-qqutraqpp6qrqamqra2usK+ysrOysbS0rrO2t7e0s7O2tbS2tbS1s7CvrqylmoFz
-dYKWnZiSkY+RlZydo6OhpaWlqK2wrKempaWloaSmpamqq6mpqaepqKipq6qqq7Cx
-sK+vsLO1s7Syt7azsbW2urGxtLKxsK+vsayrr7Cvs7C0s7OxrKqtrbKxsbSxsK+w
-s7W0sbKwrKKJaVZRU1NPTU5OT09OUU9OTkxNTk5PTk9NSklJSkpJTktMUE5LT1BP
-UlBQUU5PT1BQT1BNTlBOTk9MTU9NTFBSUU9NUlBQUE5SU1BPUVNUVFVTT01QUVBS
-UVNUUFBPUFFTUVNUVFJQUVFQUlJSVVlYVFNSUVJTUlRTVFRTU1NTUlNVV1JWVVRV
-VlNWVVdYVVZVV1lXV1hcWllXWVdZWVpZWFhZW1lcXGBhYFxeXmBhXl5cXl5aWlxc
-W1hZWltYV1ZXV1pllr7L09rd4uPl6Ojq6URLT05RUEhKSUpKS0dHRkVCRkVCR0RA
-RUVCQEBAQUJGQUM/QUA+Qj49Pj45PT08Pjs8QkM+PD9BQj08QEE8Qz09PDw4PD06
-OjtAQUA+QDc5OD8+Pjw5Ojw/PTw8Pj08PT8+RD88PT9AQUA8PD9HQkFCRERDSElO
-V1VWXFRTWFVSU1ZSU1haVFBNT0xRUEZHR0dEPDo6QDs3OUA7ODk6NzYzMzQ1ODk3
-PDo8Pj1EQEFFQEA9OkJBQ0ZBRkpGRkJBQUBBPT0/PD09PD89QDtDP0FBP0BFRkxK
-SE5DSkVHR01VUklFREA7NjY1NDc2MzM1NDU0NTY1NzY2NDY3PDs8Pj8+Ojw4OkNG
-S0hFRURJTE1JSU9MUlRKT05PUlBWUFlbVFhQT1ZTU05OVVlbUE1LT1VZVVZcVFVV
-VVRSVVRSU11ZWVRUU1VXW1pZVlNPS1JTTk1MSj40NTM1NTQ2OjpIUE1RW1BMSUpT
-UU1UTU5PTVBPV1JMUE9PUFFNSElOTU9XSUpNUlNSVUtRVVBRTU9OUFFSS09TV1hc
-VU5PUE5QUExPTElOTUtKR0xVTUtNT01UU1JRUlJMUEc8ODY3OTpAQUI8OUJAQEdJ
-T0pDSUZMR0JKSktQSlJLQzo2OTs3MzxBRUZDQUE6MjY2OUBBPzw8QkhHR0I9P0FB
-Qj5BQ0xVTEM6NTU2Nzk0Njg1NzY2Ojc4ODc3OUdGQERHUFZRWlVSW1ZdVlhYWFdb
-Wl1bVVhZWVhSTFFfYFVVU1RSUlNQTktRVVJQU1VZUlBSTlBVV1FQUU9RTEtQUE5P
-UE1OS0dJVGJvdH2AhYWHipCQkpGSlZSWlJmYlpWTko2MiIeAfXp4d3d1dXh3d36C
-hIuTnaGfl5WPioeGiY2Njo6Oj5CQlZiZnJqbm56ioaSkoZ+hop2Wj4iCgYSEfnp2
-fIF/e36JmJyeoaCenJqYmZqal5uampmYnJydoKWipKenraurqq2pqausq62sp6an
-pZ+XioSAgYOBfH5+f318f4KChIOHiIeJh4aHioeJiIqNj46Oj5CSjpCQjpCMjJCa
-n5+foJ+hoKOmr6urq62pp6mqq62vraurqqelqKiora+tra6uq6yws7GvrrOzs7Kv
-r7CusrS1t7a3t7e1tLW1sbGtrKailYmBhZKam5eRkJOVmpyeoaSipamqqaurp6eo
-qKSjo6WmrKqpqKioqKiqqaiprKyusLCvsLKzsbKwtLO1sbKxs7S4tLK1tLOzsq+t
-rrCvsrGzsLO0tLCwsrGtsK6xtLSwra6wsbOxr62sp5x/YlRQUU9PTU5NUlBQT01N
-UU9OUFBKSktMTU9NS0tOTUxOUk1OTk1QUk1QUVFRUFBRUVFOTE1NTVBSUU9OTlFO
-UE9PTlBPUFBTT01MUVFTU1dUUVNUUlBPT1FRUlFRUVFRT1FQT1VTU1NUU09QU1VX
-V1ZUVFdVVFNTUlNTVVVVVVZWV1VVVVJVWVRWVlZZWFZVWVlXWVtbWVhWWlpaWVlc
-W1xbW1xcXVxcXl1fXl9fYF9fX1tbWF1cWlpYVlpbWVhWWGSXvcrT2d7i5Obn6Orq
-SkdOTExWUUpETEZFS0ZGRUZHRkBBQ0NBRUVFREZDRkRAPEJAQUE/QTs8PkM5ODlB
-Pzs+PT05Oj0+Pjw6QUE8Ozc5OTk+Pjk9Ozo6PTw8Ojo8PDw+PD48QD5APj4+PTw7
-PDw/Ozs7PD9APkBCQUNBQEFAQURFSEpRUVNVUlVaWllVVldWWFtXV1BQUVBTSkxJ
-TEZFRENAPDs6Ozk5Ojs5PDs7NDc6OTk3Ojw6Oz08PDo7PDk5PDs+QDtCQ0BGQkZC
-QURAQUE9Rz4+Ozk7OkE+Ojc2OUA9RU5GUEZGRUZGTVJSSURFQTw7NjE0NT0yMzQw
-MjU2NzY3NTc4PDg8PzxBQD48QTtAREFDQ0NBQkhKUkpJT0pNT1BXUVJTTllUWldV
-XVBLT09QS1JXXF9VUVJVVFhUV11YVlRWU1FUVFZWWllZUVJPUFRQT1FSUlBTWFVT
-UktKPjgzNjgzMjQ4NztES0xYWU9LUU1SVVdOTllTUk1NTE9UU01SUE9KSE9OTVRJ
-SlJMWVhOUE9UUklISUhSUkxKTFFWUlVVS0dPUU5TT0tMTE5QUUxIR0pJSktSUFFW
-VlVQUlFUU1M8Nzk6Oj49Pj06QUNBRkRMTUFJR0pHQEdKSk5KUE9ISD09PDk4Njs/
-SkI8PDg8Ojw+P0BCQjw4OjxCQD09PkFCQUBESk9PSDw2Nzs4ODU1Njc1NzQ0NDQ5
-OTg8QURBSkpRVU9cU1JWU1lZXltdYmJbVlRSVVtYVVVQUl1eV1lWVVVTUVFMTU5V
-UFZVWFZUVFRVVVJUVFNRT1BPU1ZTUVBOTk9PT1FYYWx2foOHhoeOlJKRkZKTlpiX
-mZqXlZWUkJGKiIeAfHp5dXR0d3t8fH+Ij5Kcnp+Wko+KiIiJjo+RkZOQkJKYmZyc
-n6Cgn6Kmp6GcnZ+eoqGWjYuDgYB/fnpxbWlpbHeEi5CWnJudm5qbmZmcnJudmpyd
-oKCjpaeoqaysra2rqaqoq62sq6iqqqmnopqLhIWEgX5/fn5/gIODhIaIiYaHiIiG
-h4eIioqKi4yMjo2RkZGOj5CRlJGSl5ufnaCjoqelpKesqa6wq6qpqKqrqq6trKus
-pqWnrK6ura6urq6rq66yrq6trrKzs7Svs6+xs7i4t7i2s7S3srSxr6yqpaSkn5GI
-hpSXl5KRkZebnaGipKelqKitqqulp6mlpKKgpqipqKqqqKusqainqqqrrq2trq6y
-srGvrrKysrK0sbK0s7Kwra2ws7SysK+vr7CztLOysbKwsrSysq6wrbCysrCssLSy
-s66urqyopJJwW1hRUlJPT01OUUxNTU1MTE1OTlBNUFBQUVFRUFFQT0pOTU1KTE1Q
-Tk5LTVBPTU9NTE1LTU5OTk9OT1BNTU5PT01QUE9PUlFTUU9MTk9NT1BRUVNSUlRU
-U1BRU1VQUU5NT1FSUVFSUVRVVFNVVVhYU09RUVRVVlRTUVNQVVZWVFRWWlZWV1dV
-VFJTVVdWVVZVVldZWFZXVVZXWFpXWVpcWlhcXl5eXV5eYWFfWVxfZV1dYV5cXF5Z
-VldZWVtcXFlaZJy/y9PZ3+Hk5+fo6elKS1JUVEtKTVRMTE9IQEdJR0VDRERIRERH
-SElFQkM/QkFAP0FESENAQ0E+PTo4PEA+Ozs6PDk3ODk8QDo8Pj06ODlCPjo6OTo6
-ODg7PTo8PkE9PTw9PT1AQT87P0I5OTk9Ozw9PTw+QEBCQ0I8O0JDRUJBRENFRkpL
-TVJVWF1fXFhVWVlcW1lVV1NWVFNNTkxNS0tIRUA+PDw8Ozw7Ozk4NjY6NDY4OTw5
-OTc2Nzg2OjY3ODc+Ojs8PkFBQUZAREQ/R0JESkFCPj89Ojw6Oz05Ozg7Pj5ESUNO
-S0pHRklJS0JBRUVBOjY1NDMyNzk1NDMyMzk2NDU1Nzs9PDo6OTxBRENBQUFCQTs8
-Pzw9Q0RKSUhPUFNNTldQUlVQV1NXV1VbUk5UT1VVWltdXFdVVFpUU1RXX19aV1lU
-UFVXVlRXWlZVVlVRUEtMT1NTUVFZWFlUTkhCOjc4NzY3NTQ1NT1LTVRiVEtTVldc
-U1JUVlBPU01PVlJSUlJUUExNSE5NS05NUFBVW1BRU05TTEdJSEhSUE5VU09RUVFS
-Sk5WTkxQTUtRUlFYUElKS01ITVNVUlZUVFJTU1ZTSzo5Nzg5Ozo8QUFERkNESUdK
-REdNUExFRkhIUUtNSk5ZRz45ODo3OEFDQDw3OzY8QTtEQT1APzhBOz9DQT9CRUJC
-REFMUVFIPzw5ODc1ODUzMjQ1MzQ4ODU2OzxDRkdISlNRT1RMTU9SXFtcW1tbWFda
-VVJVWVdXVlRWWlZTWFNQUFBUTU1QVFpXV1tbUlRXUlBPTVNRVFNZWVhWVFZXU05P
-TktLT1tqcXaAh4mKjJOWkpGSkJGTkpKUl5ualpmVk4uNhoOBfHt5eHh3eHt+g4aI
-kJmfopmRjIyJjY6OkpKUk5WWlpeXmZucoaKio6Wnqaaem6CjnZeOh4SBgIF/fn14
-bmVjbXuAgYOIj5ienp2dnZ2cnJmcnZ6hpKWjpqutr6+sraqsr66srq+uq6qoqKik
-nJCHhISCg4GAf3+BiIiIiYaHhYWIh4eKj4eKjYuKjIyOkI6Ni4yOj4+Ul5ufn6Cg
-oaOnqKeqqqyqq6yrp6aqqaqrraypq6qsqqipqqysrq+vrq2vrbCysbGysrSysq+x
-r7Cws7S2t7i2sbOzsrCwramkpaurnIV9hZSVk4+Plpuen6Gkqqeqq6unpKampaGf
-o6KkpqWmpqSpp6eoqKinqK2sq6ysrLCvsrGyta+wsLO0srCys7GysbCzsLKysa6x
-srG0sbKwsbKusrKwrrGurq6wrq2xsrGwrq6tq6Gim39gV1VRU1JUUk5NTk1OT09O
-Tk1NTk5OTlBQUlFQTE9STktNUVFRT1BPTlFST09OT0tLSkxPUU9NTUxMTE1OTU1P
-Tk5NTE9QT09MT1BOTk9RU1BPTk5SVVNQT1NVUlBRUFFRVFBPUlNSU1VVU1FRU1VW
-U1RUVFVUVFNUVFRRVFVUU1NWVlRUU1RWVVVWVldZV1RXWFlYV1hZWFZXWVlZV1db
-XFtcW15fYV5eXl9fYF5mYV5dXVtbW1laWVpaWFdYV1lppL7L09rd4eTl5ujq6kVP
-Sk1LTUVNS0xQTEpHQUZERUdGREhGRkZFREBERkI6QkJGQ0pIRUFDQT0+OjpEQkA+
-QUA6Ozo4Ozo7PDc3Ojg8Pjw7OTo6Ojo4ODs4OTw8PUE7Ozk8PUE8PjxAQT47OTs7
-PD09PUA8PUVBPT09PD49Pz1AQj1ARkxTVVNUXWFcXF1aWlpYVFVXV1RSUU9RTU5N
-SkxFQ0NAPDs8Ozk8PDo8QD4+Qzk2Njg1Njg4Mzc1ODY1Nzg6PDw4O0BDRkBCQkJM
-Q0JBPEBCR0U8Qz09RDk/Ozo9OTxERE9OTUlISkhNSEdDQj5ANzM3NjU2NDYzNjY1
-NzQ2OTU2PD89ODo5Nzo8ODxBQD09PDxAQkFCRUlLSk9OUVRTVUxSUlFYUldbWmBU
-V1tTVlhmYmBfVVlXWldcVFdZWlVQUlJYWllWVVdbWlVTUlJWUU9XV1RTVl1bXFNO
-TUNAOjc1NzIzNTU2OkRTUFxWS1JeWltRUlNOWFRMT1VYU0xRUVNbT01NUFNLU1ZO
-TlNUUUxUUVJSTUtQS01ZVFRUS0lLTlBQVVdKSElPT1BTUFJPSFNRTktNTFBQTlJQ
-U1ZWVlRNQDg4OTw8Pzw5PjxDP0JHSU9ITU9NUU1KUU1XTExKSVVLREI/Pz5APkFD
-PDg4Njg9QkFEQ0A7OTxAR0RBP0NDSUJBPktUTURAOjk5OTYzMzYyNTY2Njg4NjU7
-PUFAR01UXlNSWExPTVNdW1lWVVlUWV9aXVdVWFxgV1RdV1dVVVFMVFlWU1FSWVdU
-WFlTT1FRTUxQVVdTVFdXWFZOUlRUUk9TTUxQX213gIGGio6SlpaUkZKTkpOTlJiX
-m5qXmZaTkY6HgoJ/f3t1d3h2d3yBhIuTmJmalY2IhIiMjY+PkpGUlpWVl5WanJue
-n6GjpaWko6egn52cl5CGg4WEgYCAf396eXFzhJCQiYWCiI2Wl5qdnp2dm5qeoqKi
-pKSmqqyvra6urrGzs66srrCxqqupqaqjl4eEhoaDgYGBgoKGh4aGh4WHh4mIiouJ
-iYmMjo6Mi4yTko6MjI+PkJSaoqOkoqOipKarq6qrqaqsq6usr6mlp6epqqmnqKip
-qKqurKusrq2xsLCws7Wzrq2vsbGztrWysrGzt7e3t7Oxs7KysrGsppqep6eehHB2
-iJKUkJCVnaKjpKWrq66wq6alpKSioaGlp6empqamp6mmp6usqKeoq6uurauysLCy
-srOztLCys7SztLOwsbCwr6+wsLGvsLOztLKws7O1sa+tsbGxr7CurbCusLCusLG0
-sbOup6ilkWxZVVRUU1FRUU5PUlFRTk1OTk5QSklPTlBRUFFPUFBPSktOUFFRTU5Q
-Tk5PUVFLSUpMTk1QT0xMT01MTk9QUFJOTE9NTk9NTk5LUFFPUVNSUFBNUFFSVFNT
-U1FPT1BVVFJSUVNSUlBRU1NQUFJSVVRSUVRTUVFTUlFRUVNSV1lUVFNSUVBWU1FS
-U1dYVlhYV1NUVVZZWVlZV1lcXF1aW1pcW1tdXF5gXl1eXlxdW1teXl5fXFxbWlhZ
-WFZWVFZXWGKfvsrS2d7h5Obo6OnpSUlOS0xQT01RUE9KREBBQD4/RkZFR0RGRUNB
-Pz5CR0FBP0FCPTxCQkM3Nzk4Njc7QUE/Pzw+PTs5Pj0/OTs6PTk7Ojk4Ozk6ODg4
-OT0+PTk7Oj49PT47PD46PT5AQj45ODw8PT8/Oz07Ojs6O0A9Ojs9P0A8Pj1ESk1T
-VlJWXFhZXVhXWFJWXFlYV1VTUE5OU09PUkpDQT0+PTw7Oz4+PD06O1ZWNzk3Nzo1
-Mzc3Njg0NTc6Nzg6OTo7OkBDPkFBQUtCQEFAQkNCQDw+PDpAQz47OT09PkhISU5N
-S0dLSlJJSERBQkE3ODQ1NDM1Nzk4Njk1ODk4Nzo7OTk9OjY3PDs4Ojs+Qjs7QEVD
-P0FDTElLUkpMUFFVTE9MT1dXV1VSVVZSW1paVWJiYVpPWFVVVlxVVFZWU09QWF9e
-WlRRV1lcXFxTUlpZWVlZVlRXY11XVFNPSUhBOzU0MzI1NDY4QEtNTVNUVVlTTlBT
-VlRTVlRVV1RYUE1TTlVTUVtRUVBPVU9PUlBQS0pQT1JWUk9OSU5UUlJORkVQUVpV
-T1BRUFFWTkxNT1BJTVJTUFVJR0hOTU9SU1hXW09DODc0Njg8PTo7OD0/QU1NUURG
-T05LSEpSTFZQTVFMUU1GRUI8Oz1DRkQ7OTs4Nzk6PkVBPT1CPkVLSEM7QENGPz9C
-UFNOSUA5NzY0MzM3NTUyNDM2NTMyNDk8PjxBQ0hUTU9RS1VRVlhTV1FVWVhYWVla
-WlZWW2BcV1hZUlBSUVFTWlVRUVZYU09RUlJTU1JRVVdcXFZSU1JRT1FPUk5NT1NQ
-U1JgcICFh4aIi5OYlZaRj4+TkpOVmpyampybl5aRjIqEgH16eXR3dnh4en6CiZCW
-m5qUi4eJiIuMkJGTlJSXlpmdn5ucoJ+boqKjo6aspKCgnpmVjISDgoB/gn9/f3p3
-cW11g42LgXJ0fIOHjZKYmpuam52dn5+ipqmqqampra6wsbCqoKmsrbCvrqmoqaWc
-j4aHh4eFg4OEh4iIioaHiIWHiYiLjIuLjIyMi4uMjYuPjoyLjI+TmpuipqSlpqWl
-pqimp6mrqqOprK+vrqmoqqurqaqmpKiqq6utra6vsLGxsbGwr62trrGysrW2s7Gz
-tLS3uLW1tbS3trWxr6udjYyWopd/bXGFjo+PkZeZnKKkp6isr6ypqKaioaKjo6mp
-qKempKqop6Wnqauqq6uqra+urrCzsbCxsbW1sLO0uLe3tbSwrayurrCvrKutrrGy
-s7GvsLGvsa+wsrCvrq+ys66vsbGusK6urq6rqKWefV9WVVVUUVFRUE9ST0xNTk1M
-Sk5OTE1NTk9NT1FLSUxNTExOTk5PTk5QT1BSUE1PUExNT1BRT05QTU9PT01PTU1M
-Tk9OTU5PTkxLT01PUE9ST1BNTVBPUVJSUFFRUFFQTlFRU1JSUVBOUlhTUlVTVVRU
-UFNRUFJST1BTVFJTVFJVVFJUVVdYVlZUUlNVVVRTVlVVU1VaWltbWFhYXFpdXF1c
-Xl5cW11fYF9eXFxbXV1eXV1aWllYV1laVlNUVldZZpa/y9LY3uHj5efo6upQSUdG
-R0VLTUdHR09JRkFAPkRBRURDRkRDPz88PT1BRkFBQTs9PUI/PD48Ozs4O0JGQT07
-PDw+Oj1ERUE7Ozs7OTk3Ojs6ODg6OTg4Ojw9QT89Pj08ODs7PDw9Pzs6PDk3Njk7
-PDw6OTs4PT09Ozk5Ojk8QUNAP0FITElQUE1SUVZbVVZYVFZaWVpZVlNRUVNUU1NR
-S0dGRERERUFAQ0E+PTo9Ozg4Ojg4OzgzMzU1NjU3OTc5NjhFPDw9PT88Pj0/QkJB
-Q0FCPT1DOz45PD05OTg4QD0/R0JJR0JFRE1GT0tGQ0BEQjs4NzMxMjQ1OTUyNTQ1
-MzY4NjY3ODg6OTs8PDo+Ojs7PT09QUJAREJJQ0tSV1NMUE1IUk5NVVRZVVNPTFRa
-V1dRV1lZVk5OTlZWWFBPU1hSUVJXXV1XVlVXW1xVWFtWVFdaVVJUUlRgX1xYVlJO
-SERAPTQ2NjMyMjc5PkRGSltXTFJSUVtaVVRTWVRRVlhSVk5PTlpXWFRRUFFTVUxP
-UElISktQT1VXUlhQSUpQS0tORk9UUlBVVlNZVVdTTEpKS01SUE9SVVJRSlFVU1ta
-WFVYUk1CODc7Ozk5PDs6QUFFS0pRSERNT1VOTk5JUlJPV01MS0dKRD88QUVDQ0E/
-Pjw9OTc8QD06QEJCQ0dMSUJAQ0ZAPERQTlBQRjk5NTU3ODg1NDUzNTQ0NTo1NTQ5
-ODs6RVBMUE1PVFBYXVpYVVZUVltXVFNXXV9aWFZSUldUUlRVWV5jWlBNVVtYV1FN
-UFFNT1BRW2JbWVtXUU5PT05QTk9PUlJRVmRzfYaGiIuUl5qamZORjo+RkpSWlpic
-nJWWmJaRj42EfHx5d3d1enp/gYWKkZWcnJiPi4qGiY6RkJKTlZmVl5iWmpyZm6Cg
-pKWjpqWkpqGippmLhIOBgX6Afn19gX51bGtxen54amdqdICDg4WMlJiampucn6Om
-pqenqautra+vrKusrauwr7KvqqmqpZ+ViomHi4iFhIKEh4iIhYWHh4aGiIqOjY2N
-jIuNio2Qjo6Li4mNkpWdoaSkpaeppqWlpqKjpqimpqqrq6ysrKytrK6vramqp6mq
-qayvrK6ur66uqqitr6+zsbO0tbaysrSztLW0tbS3tra1sa+tp5mDfYiUkYJxeImP
-jo6TmpydnqClpqmqrKypqKOio6aqq62srqmqqqepqaqpqqusr6utrbG1srKysbCz
-s7O2uLu5tri0tLOzsbCvrq6srbCxsLOtrbGzsbGysbGxsq+vrK+wrq6wrLCzsq+t
-q6ilp6SQZ1hUUVJQUVFPT01OS01OTE1NSk1OTUxMTk5TTU9NTU9OTUxMTkxNTUtL
-TExRUE5OTk1QTlFST01NSkpKTU5PTk1OTk5MTEtOT01NTVBQT1BQUE9PT05QT1BR
-UVFTUk9QT1JQU1JQU1JRUVFRVVdYV1JRUE9QUVJTVFRUUlFQVFRUVVhYV1dXVFRV
-V1ZUVVpZV1hXVlZYWllbXlxcWVlcW1pbW1xcXF5dYGJgXFpbXF5bWlpaWFZVVVRU
-UlBTV1xjk7/K0tjd4OPl5+fq6UtOR0hNSkZIRURFSURHRUU/Q0FEQEFJSUZCRUVF
-Qzs+QUM+OTk/PUBFQkE9PDo5PEFAPj07Ojc4Njs/PT87PDw6NzY3ODs8ODo5OTg3
-PkA/PDtAOz09PTs6Ojw6OTw5NTk5ODk9Ozs8PD87PDw6QDxCQUFDQkFFRUZGQ0pN
-R0lPUVFQT1BQUlVYW15dXFZVVFVVUFFNS0pGRUA/PUJEQUI+Ojw7Ozk3Nzc4Nzk1
-OTY3NDQ4Ojk3Njk8PTk6Pj5EQDtFQUdDQEc+QD48Pjg7PDc4ODg7OT1BPD5DQEdD
-TkhNTUVCQEJBOkA2ODc0NDEzNzg2NDc4Njo3NjU3ODk6Nzk+OzxAOjtAQkJCQklK
-SExGSUtIS0xSU0lTS0tUVlhRVltUW1pUVVFXVldWT0xMU1lbUVJTWVRUU1dZVlhV
-U1RaVVRWVlRUVVlSUFNRUV1bVlRZVUtKRUA5ODc2MjM1MzY5QkdDT1VQUFVZXl9Y
-W1taVU5UT01YT0hPVF1aV05OUE9OTkpOS0pNT05PTllfXFNKSFNVU1VRS05QT1NU
-UFdWUFFOSU1PTU9PS1BMUFlSTVRWWVlUVFVWVkc5Ozg3ODs9Pj1AQz9JSFBPSE9N
-VVROUU1QWFJUUEtNSExJPj9BRUdISEE/QT07Ojk7PD4/PDpBQUNGPz5AQDs+REpR
-UVFIOjY2ODU2Nzg4NjQ3NzUzMzExNjc3OTtHTEpWUFBVUFxfY1pXVU9UU1haV1ZW
-W11XU1FUV1hXWFdfYFtSSUpSV1dkVVNUT1JQUU5SVlBVWFJKS05NUE5OT1FRUFFV
-ZXN+houMlJydm5uYl5GRkJCTlZWWl5aZkpSWlo+OioWBfHt5dnd6e36AhIqSmJyc
-lo6IiYmIjY+PjZCWmZubmJubm5mboKGjpaelpKOkoqKfmI2Cfn6Bf35/f3x7e3p6
-eHl/g4J5eH2EjZGLhYKDiY6QjpafoaOmpKasq6uur6+vq6msq6+ys6+rqq2opZuM
-hoaHh4mIh4aHiIWGiYiIhYSGh4mMj42Mi4uHio2Li46NjpGUmJyfo6WmpKeoqKin
-o6KmqqmoqaaoqaipqKanqa2sqKmrqqmoqKmoqamtr6yqq6usr6+ws7Sys7KzsbCy
-rq+wtrO2trOtp6GckX55gY2Sj4eHjo6Nj5WZmp2fn6Wlpqysq6ejpKGhpqalqaup
-rLGpqqmoqamqqquqqamprbKxsbGyr7Kztba7uri5uLW2trSvqqysrK6xr7OwsLSz
-sLCxsa6urbOys7Kvrqysra6tsLCtraysqKanq5lzW1VSUVBRT1ROT0xOTktLTk9P
-TkxLTU1MTk9QUVBPT09RUExPT05PUE5PTU9RT09MSUpOTU5OTktMS0pPT01PT05O
-TktLUlRPUVFRUVFOT05MT05QTk5SUVBRUFFRT09RUVFRU1NTU1JRUVBSU1VVU1RU
-Uk9NUVBRT1FRUlNTVlVVV1dWVFNUU1VVVFZYVlZWVldaWFlXWllZWlpaWVxaW1pc
-XF5eX1xbYGFeXV9gXVxYWlpaWFZVVVdTVldVWGKTwMvT2d3h4+Xn6OnoT05MREdH
-TkdERkNLSEBEQkVBRENCPkRHR0NATEQ8PT5BQEBCRDw7Pzs/Pjw5PD08Pjs8QTs5
-PDo6Ozs7Pz49Ojg3ODg7PDw5Nzg4Ojs9PUA7OTpAPzw+Qj05O0BAO0Q8Ozo8OTo8
-Ojc5PEA+PDY5PDs9PEFAQEBDRUZFR0hGRUtKSkxNTlJQVVpcW1peWlRUWVhSU1JN
-S0hIREBAQEI/PT1CQT49Ozk7Ojk8Ojg5Ojg2NjY5Nzo4NTY4NzY6OTs6PUJBTEE/
-Qj08Pzs7ODc7Ojs9ODw6Oz07QT48P0JMSExLQ0VGQ0REPDY2MzU2NTM0Njg6Nzs6
-NjQ2NzY5ODk9PEU7Qz8/Pz9AQUJDRktHTEtMRUdLSlRYT1FMT1BRWlVbX1ZWVlFW
-UFZSUlNOU1FQU1lTUVRbUFRXW1lZWlZPT1NVVFRPTlFWWlNSVFhXWldTVFxWTUdG
-QUA5NzQ2NjY3NDc3PEFDTllPTllaW1hZWlRTVFFKUFRRTVVXW1pTUExOTklOTUpQ
-UE9TUFBQUFxeVVRNUFlaVFFNTlFTVVZQVU1KSk5PS1RQTE9RTVBUUVFTUlVOU1FV
-VldTST07Nzc3ODk7Oz5BOkJES05ISktRUE9MTEhTTlRUUFBMT1NFREdFRkhHQUNF
-R0ZCPD5EQ0U/Pjs6QUQ9OkBBOjdAR05PUkc7NzU0NDM0MzU2NTU1NDc5OTg3Nzk+
-Qk1OTVdNU1RSWlliW1VUUFFVW1xcW1taW1VUVlpeXllWV11cVlFMTFNSUVlVU05Q
-UlFMTlBNUlVVUElLTk9NUVBQUFFRVVxrdYCHi5CXm5ybnJyYmJaRjo6Sk5eWlZab
-l5mUkI+Mh4J+fHp7en17en+FipCYnp+ajoSDho+OjY+PkZSXmpeampmZmZueoqOg
-paOhoKOjopyXjIB+fn1+f39+f4F+fX19gIWJioN5eoCJjYx/dG5wdHt/hI6apKWo
-qKyssbKzr62tqaior7Cyr6upq6yooZKJhoeGh4iGhYaFh4qIiYaHh4eGiIuKi4uK
-i4mHi4iKi42OlJmdnaCkqaqoqKiqqqiipaSop6emp6ioqqmppqmpp6emp6mqqqeo
-qamqqquoramnpaKinZyZnKKor7Guo5mUlZykrLOwq6KZj4uOjoWBjJWbl5GSj46R
-lJianqSjpamsrqiloZ+foKSjpqipqqqtsK2rp6aqqa+trK2rqaysrq+wsrKztLO4
-u7q7uLe0t7SzsLCtrKyusLOysa+wtLWvrq6zsa6vsrKzsbCvr6+urq2tq6qvrq6m
-paaqpYReV1RSU1FRT1FOTlBOT09PUE9QS05PTVBPT01QUlBRUVFQTU1PTE1OTUxN
-TExOTk9OT09RTk5MS0xLS0tMTU1NT1BNT1FQUU9PUFJTU1NQTk1NS1BSUFBOUVNP
-UVNUVFNTT09TU1JSUlVVVlVSU1FRU1VVU1FRUVFUVFRUVVdXVldYVFRXV1pbV1RU
-UVVWVVVaWFhaWFZXWlxYWVdXWl5dXV9fW11dXl5fXl5eXVtbXF1aXFlaWFhWWlxY
-VlhaXpfAy9LY3OHk5ujo6ulPTk1TUVRNTklJR0tNSUlIQ0VDQ0Q+P0dBQkJBREJD
-R0FDPz06Pzw+Ozw/Qzs8Ojo7QEE8OTU7Ojg8Ojk5Oz07OTU4Ojk5Ojo7OTo7OTg4
-PTs9PT48ODs6PTo3Oj9APTk4Ozo6PTk8PDs6Ozs/Qzo4OTs9PD49PD1AQ0RCRERF
-R0xHR0lJT1FVWVRYW1ZYVldaW1RTU01MTlBMR0RBQ0REQ0FEQ0BBOzc3NDc3PTs2
-NTU3PDg1Njc0Mjc2Nzs5OTg6QDw7PD0/PDw9PTo7OkA7Pjw7Ozk8PjxBPEBEP0RC
-R0NGREhGRENDQjs4OTc1NjY1ODs1NTc5NTY3Njk5PD06Ojw8OkBAPDw9Qj9ER0hJ
-RUhFQ0pKUlNMTUpST05aWmBeVllaWVtVVVNWV1hdVlZQVU5RVVdRVVdUV1ZXV1RQ
-UFVYWVJOUFNTUlZWWFdYW09SWVdRR0hIRkY5ODU1NTc1Njc2P0JKV1ZQVlRRUldU
-UkxSTExTVFZTV1pYWVZSVE9QT01NTlNXUlZXTk1NTllcWE5VVFpXTk9LSVVYTVFT
-Sk5NTFFSUVBRS1ZRT1dNU1RRUFVNUFNWWFVLPD46NTk7Oz07O0A9P0BDS0NKRklQ
-Tk9PSE9LU1NMU1JQVVFVTUNGSUhEP0JISURCREhIRkZGQD4+Pzw4QEQ+PEJITlNV
-S0M3MzMzMTM1ODQzNjU1Nzg3NjQ4OjxBTlFQTUhLTFZiWVxeX1pVU1ZcYF5aXGBe
-WFdVW1pYWFhYV1ZVUE1OUE1NUlFRTlJRTEpPUFFRVVVSUlRQSUtOTk5NUFNXYGt2
-f4WOmJudnp2dm5iXk5SSkZGVl5iVlpeXlZWXko6HhoF8dXd6e359foeLkpugoZmQ
-hYaGio+RkZGVlpOWm5+enZWWnZ6fn5+dn6OmpKKkoJeFfHx8eXl9fn5/gH5/fn6C
-gYKFfnBnaXJ8f3JiXmNyf4SGiI6foZykqKqusbKwsq2sr7Oxsq+tr6+nqqulmIuH
-hYeLiomHhoaFhouIiIiJiYmJioqMiIeFh4mJi4qMjpKYnKKkqKuqq6qoqamqqaem
-p6mlpKmprKqppaeqqaelpKapqqmpqamtraWhn56hpKeloZWGf4GFk6Kqq6KZko2P
-l6ChnZiUlZWVmZybl4yGj5qblo+OjY6VmJmbnqKlq6ytq6Sgnp+jpKWopKSoqK+t
-pqamp6qsqKqpqKqqra2ura2ytLK0tre4tri4t7W1srCzsbGzsq2vsbOvsLCzr66z
-s7SzsbOxsLCyr7CwrrGvr66vsLGwq6SmqKqnkmpWVFNTVFJRUFBPSk1QUE5QT09S
-Uk1OUlBPT0xOTFBTU1JSTkxMTExNTk5NTExOTE1NUE9PTE1MTU9QTU5PT01OT09R
-Tk9PUFBUU1RSUFRWVVJSUk9QTk9QT1FQUFFQU1JOTk9RUVNWVFFTU1NVVVVXVFNV
-U1JQVVRUUlNTVVdYWVdWVVNVVlZXVlVVVFVWU1ZaWVhaWVdZWVtaXFhaXl5aWl1b
-W15eXl1dYGBeW1lcX11bW1xZWFZXXFdVVFpfkb7K0dnd4OPl5ujq6lNWUkpQVk5U
-UkxNUU9JTEtHREJBQj0+Q0NDQ0VAQ0RIRkNAQj8+QDs3OT09ODw7Ozw9QD88OjQ5
-OTg5Ozs7ODg5ODc4Ojs5Nzc7Ojo8Ojs+Pj4+Pj0/Ozo8PDs+Pj1APjs4ODo8PTw8
-Ozk5OTxCOzk5Ozo7Oz1DQT0+PTxBQD1ESUhFRUpOTU5TUVJVV1VeXl9fWFlYVFJS
-VlRQTkhLSUdIQkJGQkRAOTk6ODc3NzU1Mjc3NDU3Nzc2NDQ4NjUzNzk8OTk6PT48
-Pj47PT08OD86Ojs7OTg7Oj48QUZBRERDRERDQklCP0BAOzg1MTI0NDQ0NTY3NTY5
-ODo6OTk3ODo4Nzk7PDo+PDw+PEBAREVAREBARkRQUUxNSk9MSlBPUlZSVlRbYFta
-UltaWF5aVVBXVlNTWVNZWlRUWVVUVlZTVFlcVU9MTVNUVlNUVlVaUU9RT09LS0lJ
-QT45NDE1NTQ1ODg/QUdQXlxUVk9PU1FPSktJS1ZYWFtVWFVQWVdXU0xOTUtYU1VY
-VldTTFNPT1ZXUFZSUlNOS0xKUVhTUVxSTlJSTlNWUFNPT1laV1NMTlNMTU9TVE1O
-VExCOjo5ODw7PD09Q0JBQkZIQUtFRE5NTlFLUk1RWEpSUlFTVF5WS0dHSEQ7O0RG
-SEZGR0VGSUdFQT4/Pzs+RUI+QktTVFVPRzk3NjM2NTMyNjc2Njc1NTY3ODo8PD5I
-RkdGR1JOWWNeX1lgW1lcYFxgX11ZXWJdU1BYVlVYWllTWFZWWlFST0pOUlBVWE9M
-TE9OT0xKTk1LTFBPVFRPTE9WV1heanV8hY6XnJiYmpmYlpaUk5WXmJmXmJiWlpaX
-mJeUkI2HgXx3eHp5e399hY+WmZ6hoZOKiouMjo6Rj5GUl5icn56Yl5mcoaKloKCf
-oKKjoKCiloh9fHt6e3p7fX19fYCBgoCAgH17cmhkbXl/em9scYKPlZSRjIaNlp2l
-pqirraytrqyssK6uqqiqqqyssKigk4iHhoiHiYmHiIqKjIqNi4mHiIyOiIiIiIaH
-iouNipCWmZmeoqWrqaqrr6ysqquopaaloqOoqKmopKWmpaOlp6Skp6mppqalqamj
-mYl+gI2boqCekYl9f4uRmJubko6Xn6GjnpiLgXyLlJ6joJiUjn57hZOWkIuJjZWa
-n5+hpKmuraijn56en6Klp6empaSoqaisrKqtq6isrKumqauusLGvrbO1t7e1tbS2
-trS0t7i2trW4t7e1sa+xsbCvr7Gxs7a1tLGxr6+ur7GysbCura6wr6+ysK6wqaWp
-qayhel9XVFRQT1FOTlBPTk9PTk1RU1NQUU5MTkxPTk5PTkxOUE9NS01PT09OUU5N
-TUtLUE9OS0xOTU1OT09RSktOTE5OTk1OT1BPT0xQUVZUT1BRT1FQUk9ST09RU1JQ
-T09SUlFQUFNTUlNTUlJSU1RVVVRSUVNVVVNSUlFRWFdTVldXV1VWVlRXV1hXVlVT
-VFhbWFlVV1haWllbW11cW1pcXVpeXV1dXFxdXV1cYF5cXVpeXVtZWVpaWVhaWlhZ
-WWWRvsnR2N3h4uXn6OrpSUhKRkhOTk1PR0ZKSUtFQkVFQkRGQUA9RUdHQUFAQ0ZE
-QUE/QD88PT0+QDk4Oz5APEE8PT47Nzk8Pzs8Oj09OTk7Ozw7Ojk6ODc7Ojs5PD1B
-Pzo6PkI9PTtAQTs6PTs7Pj08Ojw8PD86ODo5OzxAOzo7PDs8PT48PDw+Ozw/Q0JF
-QkNHSEtLSk5RTlJVVV1gWFxaV1laVVdZVlJSS01LS0pFSEpGR0I/Qj44OTg7ODcy
-Njc2NDY3NjY1NDQ0Nzs6Ojk3NTc4PTo4Ozk9Nzk6PDo4ODg3Ojw4ODxAREBHQ0VH
-QkdDQ0JBPDY1MzEyNDEyMTIzNjk4ODY5OTk3ODg2OTw5Ozs8Ojs7PEBAPz9BSEJB
-REdMRkpKS01IT0hMU05OT1JYTlBZVFZUXFxZXVhST1lcX1VWVFZXWFVVU1hZU1VR
-UVVbVE5QU1RWVFVdW2FVUVBQUFJUT09FQzs5Ojc0NDM0OTc+SEpPV1NST09NTUxO
-TFFQUldVUVhYVVFZXFVPTUhPT09UTlZRT1ZTUFFPTlVPUE1OTVFRSUtNVFdRUlFO
-VFNQTlNRVFJMVmNbUk9PVktKTk9TUVFOTkpAODY3NTg6OjhAP0NLREVFQURGTEpO
-Vk1MTVFbUFFRTlRVUltQSEhGQTtDQUZJQ0RDR0dGRkdCREVAPkBHPkBDTlRYWFJI
-PTk6OTk1MzQ1Nzc2Mjc0Njc2OTc8RUlKSUhFT05bX1lhWVlcXWBfYWJjWFleX19a
-UVNVXGFbW1ZVWF1cVE9QUFJSSUtOT0xKTU1PTk5NTUpQT1FVVVJPT1NWWF5rc32H
-kpaYmZiZmpiYlZOSl5yenJybmJmYl5iXlZWQjoeDf317d3Z8fIKBh5GboaWkmYmE
-io2Oi46Qk5WXmpqenJqen6CioqGhoqGkp6WlpaOXiX55fH96fHt8fXp6eoCCfHx+
-e3t5d3R5g4mHfXd6gIqSkId5bnJ+ipahpqitrKuqqq2urq6ur62rrqyuqqaakIyH
-i42Hh4eIi4uKiYmLi4yKjYyLjImJioqMjo2PkpWZm56hpaWora6vsayrqqikqamr
-qqmpqKqnp6mop6Wmp6eoq6qjoaClpZuJdm93g4ySk5GQkY+VlpaPiX5+iJinsKuf
-j4B/gomTnJqVjoyLeW92iZKQjYyPlZqfo6GmqqqrqaKgn6GkpqWmqamoq6moqamr
-q6inqqmtr66trq6ws7K1tbW2t7e1tbi3tbK2uLWztra0tLKyrrCysLKxsLK0s7Sz
-srGvsbGzs7Owsa2urq2vr6+wsK+qpqanqqWQZ1ZTUVFOUVFMTE1MT1BOTlBQUlBP
-T09PS01PTU9RUVFLTE5QUFFQUVFRUE1OT0tMTExJTU1PTlFQT05NTFBNS01PUE5N
-T1BNT1BOUFNQUFFUU09NUE9PUU5NT1NQT09SU1JPUVNTUFNRUVJSUlNVUVNTVFVV
-UlZTUlZUVlRUVlhYVVhZWFlYWVhYVlpXV1dYWFlZWVhaXFpbW1xdW1pbX19eXF5d
-X1xbXl9fWlxeXVtZWllZWFxdXFpYWVhZZJO+ytHY3OHj5efo6epNTkpLS0pKS0hG
-TElKQ0NFRUZCQD5CQj5AQ0A8Q0JBQkM+QEJCPzo9Oz46Ojk6Ozs7Pj46O0I8ODw9
-PDg9Pjo7Ojo6PDo6Ozs6Oz08OkA+QD08Ozo8PDw7PUA8Ozg7PDg4Ozs8Oz1AOzk8
-PDs6Ozs8Ojg7OTw7QDw6PDs6Oz1BQEBDQ0JIS01JSExNTFVQUldSUlZVWlZXW1pb
-VlVVT09PS0RHS0dKQ0NBOz07Ozk5Ozw1Njg4MzU0MzIzMzY1NTY1Njc4NjU5NTY5
-NDk5Ojo6Oz45Ozk5OTw7PT9CPUFBSEhGS0REQUA+OTU1NDQ2MjExMzMzNTY2OTo5
-Nzg1OTc6PDg5Nzg6Oz5CRERFQUZGPUBBSEtESklNSkhLSU9TTlFVVFVUUlNQWVRX
-V1JVUlNVWVtdVFBMTE1RUVJTV1tYVVVTV1ZWU05QVVVTWV9fZlpSVVRWVFZRV0dF
-PTk3NTMyNjQ3ODtBS0lLUE5LVE5MTk5IS1NVV1pXX1tWVFRbV05KSk5VTk9VUVJO
-UVhWUFNNTU9MSlJRVVZITlFNUVBOUVBRTk9HSk5OR0xSXV5RUlJPTExPUU1VVFJV
-U0M6ODg3ODU0PEI7PEJBR0A/R0dKSUxRTk1ST1hRUFZLUVVTV05MTUc+QEJFRUdE
-QkNEQT9DRUVARUU9QEI/P0ZLVVtTTUhFPDc4NjM1NDI1NjY5NzU0MzQzPD1DSkdS
-S0hRT1dZYGZVV1deX11bXltZXltXW1xZU1ZYW1lbXF9bXFtXVVNTUklGS1RZVFBT
-UU1MTE5JS1JTUlNST09SUlVZYm16f4iQlpiYmpubm5iUlJmYmpudnJyZmZmbnJeU
-kpGOioqIgnx3eXp8foOIkJefn6CWiISHiYyQj4+RkpWXmJuZm56foaOmo6KjoqSl
-paalo5mJfnp8fXt8eXx6fH1+fn57e31/e3d2en1/gH95aGJkcoB/e25iZG9/i5Sf
-qKyqqq6tqautr7Cws7Ctr62tp5yPi4iHhYSGiYqJioqLh4mPlJGMjI2MhoeJiYqL
-kJCWmJygoqWmp6aorK+xsqunrKqqsKypq6qoqKurqqqnpaimpqamoJmTm6CemI6D
-gIaJhIB6fomUn6Sgl4Z5cnaDmaappp2Wjo6TkpGOhHyBiIt+cXmOlJGOjpGXm5yg
-pqmura6ooqGip6empaaoqKeqqqqopquprKuppausr7GwrrCwsbK2trS0uLSztLW2
-s7O0s7W2tbOysa+ur66ur7CysLOxtLa0s7GwtbKysrGwr7CurqyqrK2srKiloqip
-p5t4XlhWV1NRTFBOTU1NUFBOT05OTU9QUFFOTk9PT09OT01MT1FNTk9OUVBOUlFQ
-TUtPTFFPT1FRU1FMTExNTE9RTU1RUE1MTVFQTUtPUFBRUE5QUFBQTE1NS01PUFJQ
-T01SU1NOUFBTVVFSUlBSU1VVVlRVU1JTVlNPUFNVVVZVVFZVVllYV1dWVlhYV1dZ
-V1dYW1haWVhXV1pbXl5dXF1dXl5eX19eYF1gYWBcW1tdXFtcXFpbWVpdWlhXWVpl
-oL7K0dnc4ePl5+fp6U9NUE5MS1NMQkA9SUpIQUFGRkVHSEtHRD5DQkVCQz49PkU9
-QkQ+Pjs5Ojo7PTo5OTo8Ozo8Qjo7OTg8Pjk8O0A7PUE/PTo7PDw9OTw+PTs4Nzo5
-ODo7PDo8Ojo5QT09PDg5PT48Ozw9OTo6Rzs9PD49PT07Ojk6OTk7PDk7PT4+QkQ+
-QEJCR0hGRUZITVFMS0pLUFVTUVJRWFdYXFpUTk5QTUpKSk9MREU/QD47QDo7Ojg4
-NTc3OTU2NTE0NjgzMTI2Nzs1OTU3Nzc3Ojc6PTY5OTg4NTg8Nzs7Nj07QUFHSEhJ
-QkE9QD08NjY0NDU1NDU5MzU1NzU2OTM1Njg4OTo7OTk1Ozk3Oz0+QERCQkNBQ0FH
-TkdHQ0hLSExJUE1NV1hUVU5UVlVfWVdXT09RWllcW1tWVE1LTFBWV1VVVlNTV1dT
-UVBOUU5TU1dcXmFpYFZUWFVRUk9VS0o+Pjk2NTQ0NTQ0NTpGT0lHVVJXVFNSUExJ
-TFRQVV1YWlNRVlVTTU5OUllUTE9TUVRSVFdRU1FOTVJMSE9QTk1GS0tOTk5LS01L
-UElHTE1KUFRWW1VRUktMSk9JS01XWVJQTDs2NzY4NTc4PTo5Oj1DQDxFREpJRUpT
-VlpNUFBOWVBQWVJRTE1OSkY7P0REQ0FHRkJERERFRUJGQTs5QTo5TVJXXlFMSUM+
-ODU1NjU3MzIxMzQyNTQ2Ojs4OT5CRUxISUpMWFZcX1lfXWJhYGBfV1ZXVVVZXl9b
-Wl1bWFlcYFtXV1dcXVhSTEpPUlRSVVJPTU1OTklLTlBQUU1RUEtPVV1ma3SAjJWY
-l5WWlpmbmpWZmZ2fnZ+cm5eamJiampiWlo+NiYWAfHh0dnl8gIKMlZuhnJSLhoiJ
-i42MjI+Qk5WXnZ6eoKCipKWoo6WnpqeoqamknIyCfnt8fXl5enp9fnt5eX57eHl2
-dXp7fH1+gXttX19rd399e3RzfIuUl5ecoqeqra6urayusa+vsa6uq62pn5CMiIiJ
-ioqJiYuNioiGhoeJioqKiImIiomIi42Ql5qdoZyip6epq6iqqq6uqamrqamoqKmo
-pqWjo6Wmqaqpp6ijmo6BeYGOkI+Lh4yUnZiMfW9wepCcoZ+VhHZ1fYiVmZmYlpWZ
-n5+ZjX1wb3iJkYuBho+Rjo+RlZudoKSkpa2wrKSgoqSmqqakpaKkp6epqKuop6iq
-rKqrrq2rrbKxsLOzs7G1r7GytLCws7e5tLa1tbS0sbOwqq+ysbCvr66vtLKxtrWy
-tLWysrOztKupr7Gvr62urrGtrKelp6etpYllWFNRT09SUE9OT1BNSUtOUE9MTlBQ
-T1FQUE5QTk5PTkxPT1FPTUxNTU5PTlBTUE1QUFBOUlJOT1FRUFJPTE1LTU1NTU9N
-T05PTExOTlBOUVJOTVBSUFFSUVBRUE1MTlBPT1BNT1FVV1FRUVFRUVRWUlFTU1JV
-VVRWU1VXVVhZVVhYV1ZVV1tZWFpaWVlaV1pYVldYVVhXV1pbXF1cXl5dYV9cXl5i
-YWFkY2JiXVpaW1tcW1haXFhVVVJWXmeev8vS2Nzh4+Tm6OrqVFFQSUhMTktMSjxJ
-T0NERUZGRURFREZDREBBPUNCQUA8PEA/QEBDQTs7Pjk7Ojg4Ojk5Oj4+PTw5PDs7
-PTs8Pj48OjxCQj08PDw7Ozw6Ozs6Ozs9ODg6Ozs8QEA5Ozs/PDk6Ojs/Qj4/PT1T
-PDw7OTs5Ozk7ODo4Nzg5PDo9QEE8QURCQkFCS0VCQUJGS0lHRkpKUE9OUVFSU1Nd
-XFZRT1VXTU5QU0xJRUNDQkJFPzw/OTk4Nzk2NTM0OjQ1NTYzMjM1NjQ0Njc3NjQ0
-NTQ3NjYzMzk3Nzc3Nzc3NztBQkdIQ0VCQjw+Pj04NzY1MjIyMzUzMzQ0NTc1NzY4
-Ojc6Oj45Nz48PDs5Ojs9Qj9BQkREQEVOSkhBRUtJTktNVVFZV09RTVRWVVxWV1lR
-TU5XU1tYWFRXU1BSVFlYVVRWWlZUU1FPUlJVUVFVVlhXV2VhV1hbWFBST1RNTEVB
-PDs3NTQ4NTM1NjhESEVSWlBPU1BSWVFNSkxQWlNVU1NYWFBRS1JTVVVMS1hXUFVX
-VlpTUU5MT05NSktISEdER01ITU9NTlBLSlBNUU9QU1JTV1RQUEtFSEZGT1JWU1JQ
-RDg3ODg4OTg3PDk7PEdKQURDS0pGRlJSVktJUExVUVNXUlNXU1hLQDc4PTo+P0FC
-RktKR0ZIRkdCQUNDPTdGVlRYUEtGRDs1NDIyMjU3NTQyMjQzNjc6ODo5OTtBSERJ
-Sk5XUVlXU1pfX11bXVxbV1RbXFRYW1hYVVJZW19bV1paVlVYWFNTTlFYV1VUU1NP
-TVVQSktNTllcYVNSUk5SWmVudYWPkpaanJuZmpycm5ian6Ohop+dmJmamZiYmJWV
-k5CIg357eXR0dnyEfoqUnqKfmY2MioqKi4uQkI6MlpmbnKGgoKGho6aloqOlqKqp
-qKKakoiFfnx6ent5eHh4eXl3enl3dnh1dXd4en+BgHhuZ3GCi4uIgoGIkZiamZGN
-lqGlqauurq2wra2usa+traqilI2MjIyHiYqJiYuHiIWGh4aJiouFiImIi4qMkJKZ
-mZyfoaekpKarq6mrq6qqramqpqWmpqampaSpp6WlpqSipp6MdnF2gIaGfXN4iJeb
-m5KEeXV8iZSWk4+Mh4SHjJKPi42PkZaWlZaKgXx2fIiQjo2PkZCMioyUnKGhp6mr
-rKuppp+foqempKSmpKOnqqumpaipqqyqrK2urrKxrrG0s7CxrLKvsbKzsbK4trW0
-tLSzsa6vsrCvsrOwsbKwsa+xs7Oys7a3tbOzsbGyrqqxr7CysrCusK+tp6Kiqa+t
-mHBcVVNTUFRRUVBPUVJTTkxMTk9PTk9LTE1PT1BQTlBSUU5OTU5PT1FQTk5OT05P
-TU9QUlBPTU9NTk9MTk1NSkxLTU1NUE9MTlBST01QVFBQU1NRUlNRUFFSUlRQT1BO
-UVFQU1NRU1JVVVJUVFNSUlNTUVNTUVNYXFlVVlZUVlVZW1lXWFZXWlhUVlhZWVla
-WFZYWVZZWlhaYVxeXl1dXmBcW1paXGBiYmJjX2FeXVtcX1xcW11ZV1ZZWFVYZJe/
-y9LY3ODj5ejn6epJT1BNSEVIR0NDPEZFQkhHSEhDQUNHQ0RHQ0NBQUFDQEBBPUJB
-RDw/QT9BQTs5ODk4PD09Pzs5Ojw8Ojk7Pj45Ozk5PDs8PTw5ODw7PTw9PTw+Ozo8
-Ojo9Pj46O0JAPTs6Ozo6OTo5PTo8PD49NzY5PDw9Ojc4Nzs8Ojo8Ozs8PD4/PT4+
-P0FDQjw6PEFESENHR0hITUlMU1VUUlJUTk5TUU5TUlRUUUtMSEhHSEVCQDw6Ozo1
-ODk4NTU1Njg3NTMzNTU0MzU3NjQ3NTg3ODg3NzQ2OjU0NDY2Njo7OkNAS0tDSkZF
-QkI6PDk5NjY0MjM2NDc4NjY1MzU3Njc7ODQ5PDk7OTw7PDs9Rz9AQkdBSE9FSVVR
-T0VFSkhKSEtNUFRQS1FQU1lWWlRXWVVQSlFTVFJRS09TVFdVWmFcVlRZWVFLTlZa
-W1VZU1NWUlFPXlpbVVNTUFJSVU5ORUM7OjY2NTc3NTMzNTpGTk9XUlFaUFRdVE5H
-TVNTUVRUWFZaU1RPSVFRTVBNUFNSVFhTU1hQUVJPTU9JRUZGR0xLTE1MT1lTT09P
-UlFNTVBPUFJQU1JQUUtKSEZNU1NPVVNMOjM1NTk2NTg9Oz1BQktHQkRLUEdDTE5S
-T0lLSVFTT1xWWFVNUE1BOjk5OkFBQEFISkZDRkVFSUFBQ0M9PkRPVVRLSk1DOzk2
-NDY1PjkzMzMyMjU3ODo3NjY5OUNCQkhJUlpRWVJSWFhdXl5cXV5cUVZYW1VRV1JQ
-UFdbWFVaXlxXVFNSU1VUVVxeVlNcWFJRT0xOT0xPUU1MUFJUVFFVYG97iI2OlJWZ
-nZ2am6Cgmpmdn5+kpaGfnJuXl5aWlpaUj4mDgnx5end5e4CEjZGaoaOajYiLioiJ
-i46OkZSYmpybnqOkpKGipaWipaaqqqiooZqSjIaFgHx+eXh3d3l4eHd1dXZ2dndz
-cHR4fX5/fXZ1c3iEi4qBe32EipKRhn6AjZqhqKysrq6vsrG0sK6vrKSWi4yKiYmK
-i4uMi4mGhYaFhoiJiYuJiYiLjo+QlpqdoqWmp6WlqaqsrK2pq6qpqqmmp6akp6il
-paeno6Khn56gl4l/eoaOi39yaXKFjZKTjoyMjI2OkpKOjo2MjZCSkY6OjpGOjo6K
-iYaFhYWDhYiOkpSRjoyJjZefn6GlrKmpqqqioKGmp6eio6Kmo6Wqramsp6qsqq2u
-rayprrSzs7KytrWzsbOysLOxtLe0trS1r6+ura2qsbKytbKvrq6urbGwrq+ztbSy
-srOzrrCwsbCwr66vsbCvrauopKGkqa2kfFxXVVRQUVVQT05OTExNTUxOT01NT09O
-TlJRUVBQUE9PTU1OTUxPUFFQUExLTlBPTU1PTlJRTk5MTExOT09OTU5PUU9OTktO
-UFFMTVBRUVBRU1FOU1NSUFFTUlRRUE5QUlNRT1FRUVJSUFBRVFNRUVNUU1NUVFZY
-U1RWV1ZVVVVXVVdYWVVVV1lWVlhXV1pZW1tbWFdaXGNtXlxcW11eYWJhXFxdX2Bk
-YWNfYF5fXl1bXVxaWlpdW1hXVFVjlb/L0dnd4OPl6Ojq6UpNSkhJS0VCRUc8QUFH
-RkJFQ0ZDQEFGRURFQkNDQj5BPT5AQUA8QTo+PUA7Ozs5Nzc4OTs8Pjw2ODtBPT07
-Pz49Pj49PTs/Ozk5Pj09OTg7O0A/Ozw/PT89PT88Pz4/QD88PT08Ojs8Oj08Ozo4
-OTo5Ojg5OTo+Ozo7Pjs8OjpAPjs7PT47QEA/Pj5AQD5DRkRDQURKSERPVVJUTk5Q
-TExQUFRSUVZWUVFPT05QT0VBPDw8NTU5Nzg4NjU2NzQ0Mzc1NjYzNTI1Njg3OTg2
-NjY0MzQ5NzY1NTY5PDs4PUFKTEJFR0dCPUA8Ojg0NDI0MjI2Njc0NDM2ODk6Ojo7
-OTo9Qj06ODk9OTtCQUJDS0ZHS0dMUVNXRkdIR05MUUpITE1OUlFXU1ZYT1BSUFRQ
-U1JYVVROTFBTWldZWVhWVlVWT1BSXV9fVlpWUFdPUFJZXFlTUk9TTk5WUkxFRD07
-ODUzNTM1NjQ4ODxJSU9WU1NUU1hWSk1OU1ZPUVpiW1VVTk1OT1BIR0hJTFRRU1JO
-VVdPVFROUVRKSUhGT1NTTlFPV1RSUlFLTU9LTU1KTEhMT0pOUk1GSU1SS0pPVVJG
-Ojc1NTg4Nzo7PEQ/RENCRkxPRklPTFBOSExLUVNNVVFQTUhMTEhAOTo9PkJARENG
-RkFDQUZHPzc/RTxBR05ZU0lLSUI7OTc2OTY4NTUyMzM0NTY2NTc2Njc5PkdASk1Q
-Uk5VUFNSUlZUV1hcYlhQVlxcUU9TVlVXU09RVFtbX1lYUExUWllWV1ZTU1hWVE9R
-UU1JSU1QTEtOUVNUVFlmd4ePkY6Ok5eboJybm52cmJyfoqKjoZ+cmpiZmZeWk46O
-i4mBf3x7e3x+goOLk5ifn5qOiIaHiIeIjI2RlZmbmpyZn6WoqKanpaOkpqiop6ik
-mpKQjouFhH97eHd4d3d0dHR1dnd4d3dzdHZ6fX6BfXp5dnV2dnJqZm9/h4N9cGtz
-hpmkp66xsrK1s7azsK2tqJqOiomMjY2Ji4qKh4WKiYWFiYaJi4mIi46RlJeanaOi
-pKmqrKurrrCuraytrausqqumpaipq6qmoZ6ZlJGWm5qShYWLlJeThXh2e4OLjZCQ
-jI6Qk46PkpGQkZCPkJCPjI+Mj4+Oi4mHiIWGhoeIipCVlpGPi4qPlp2jpKuur6+p
-qaGdoqeppqSjpaKkpaaoqquopqqtrq+vsLCwsrGxtrO0uLSxsbSzsre0tre1s7Ct
-ra6trKyysbCvrq2trbCtr7GxsbSzr7CvsLGysrCxs7Gur6+vsq+vrKqpp6eprKaN
-ZVdYVlRQUFJOTk9OTktJSktOTkxPUFFQUU5OUFBQU1JPTk1NTlBQUVNOTk5QT09P
-Uk9QUVBOT1JOTUxLTEtKS01PUE9PTUxMS09QUE9PTU5QUlJRU1dTUlBQUlRST09Q
-UVBRT1BRTlFRUVJPUVJSVVRXVVVWV1ZVVlNVV1pbWlpXVlZWVVNWVVpXVFVWVFda
-WltcV1laW11bW1tcXFxeZmJjYWFfYV1fYFtdX1xbXFpdWllWV1xaWVZWWWONvsvT
-2N3h4+Xn5+npS0dKR0tHQjw8RU5HR0pGRkVDRkNFQkBDQUBBPjxEQEE/Pz5BPDpA
-QEE+Ozo7OTc3NTg2ODU3Ojs+Pjw9QDs+RkA8PUA8Ojo7PDg6ODs4Nzs9P0A9PTs7
-PkI9QEA8PDo6PD08Oz06OTk9Ojo7Ojw8Ojs7Ojs6O0BAPjo6Ojs5Ojs6Ojs7PTw9
-Ozw+Pj49PT9BQkJCREZNSUtNSUxLTU1NTk5PVldSTlBSUlVTU1dTUEtFPjw6Ozw7
-Ojo8Njc2NTY3NjQ0NTMyMTA2Njc4NTY5Nzc3NTg5OTk4Nzg8Ozs5O0FEQUBCR0M/
-PDw7NzQ0MjE2NTM3Njc6ODc4NzY3NTg8PUBAQDs8Njc4O0lAQ0NJSkZFRkpKTFJL
-RUZKTE5STU1PSU9aWFlWVlhPT01SVU9RUlZSUE9OUVNcWltcW1laWFNSVVVZWFtX
-V1ZTV1NUV1VdWlZWVlJSUlZcVVBGPUE+Nzc1NTc3Nzk4PEJGRlFRTFNTUFBMUE9P
-WFNUVFhcWFBSUlZSUk1FSExOUE9RT05PWFZZXFdQVVNJSU5KUE9NUVJPV1hRU05K
-UExJTElISEZGTE5RUEdOTUtKR0pXUUk7Nzc5NTg3Ojw+QT9DQkFHREZCRUxLR01H
-TEhMT0tRUk1QTkxNS0U8ODhER0NFRklJREJAQkRCODlAPUdLUldSUk5KST43Nzk6
-NjQ5NTY2ODc6NzU1Mzg6OT5GRkVPUltWUldVU05OVVRWVV1jWFFTXFdRVFVXW1pW
-UFRXWVZZXFlUUE1UUVRWV1taW1ZTU1FRSkhOT09RUFJRVFZSVml8i5GMh4ePmpuc
-nZybnJuanp6hoaOim5ycnZycmZmWlJKRiYiAfnx7fX19g4qOk5yhn5aOiYmJiIuM
-jo+SmZufnJ6eoqWlpaeopaSnp6WnpKGdlJCOjIeEgX58eXl5dXVzdHR2dXV1c3F1
-dXh7f4KChYeGenRxbWVocH2HhoN1b3N6jJyjrK+xs7KytLSxrq2qo5WNiY2KjIyL
-h4aFh4iHh4aIhYaHiYqLj5KXmZqbnKClp6iprKurqqusra+srKioqKOmq6mlpaGZ
-ioB+g4yRin97gY+Vj4yFgYOEhYWLiYqMjY2Li4mNj5GPkpKRjYqOjYqLjY6NjIyM
-iYqLi42QlZiVlJSNjJKXnKGmqaurqq2onp6hpqampKampKOnpaioqammp6urrK6v
-sK+wtLO1ubaysbKws7W0tLO0tLO1r62wsbCwsLCzsa2wtLazsq+xsbCur6+ysrK0
-tLKvsLG1s7OysrGsra2tq6elp6urqJZuXFZVVFFRT05RUFFQUVBOTU1LTlFQUFBN
-Tk9PT1FTVFFPTEpMTU1PUFBQUE9PUE1LTU9OT05MTE9PTU1PTk9OTU5PUFBRT09R
-UlFPT09NS05RUlJPUFJRUlFRUFJUUVBSUFFRUE5SUlNSVFNTVlNTVVVWV1dXV1ZV
-V1pWVlVWVlhUVFdWUlVUWFpYV1dVVVlZWl5bWltaXF1bW15eXFxZXF9gXl5eXF1f
-X1xcW11aWVpYWlhXV1hXVlRWX5m/ytLY3eDj5efo6ehNT0tLSElKQzxBS0hCR0RG
-SEVITUtGSEZDQD5BQUE9Pz09PT47Pzo9Oj06PDw7Nzk5ODc7Ojc2Pj5APT06PDk4
-OUE/Ozs6Ozs8Ozs6Ozo7Oj1APj47Ojo5Ojw7PTw8Ozo7ODs5PT06Oz45ODs6PTs6
-Nzg7Oz1AQD46Ojo4NzY5Ojo6Oj08PTs5Pz08OjtAPz48PkZGREhIRkRDRk5JSU5K
-SEhOUVNNUVRQVFJNVVRST0lFQ0JCQDo4NTo7NzU0NjY2NTQzNTc0NDI2NTc2OTg2
-NjQ4ODg4Nzk1NTQ1OT08Oj5AR0JEQD8/QTw4NzM2ODM0NTY4NTU3NzY3OTo2Ojw6
-Ozo7PD06Ojg5NzlBQUdGSEdER0hMTkdOSUhNS1JLSUxKT1ZOV1hWXVVUUU5aUVBP
-T05NTUtLTFRRVl1cW1hVU1BUUVZXV1pWVFZbVVZSUFlSVFdYUVFbV1dWUUtEQj06
-NzQ2Nzc2Njc0OEJJSkxOTktQT0tRUUxUV1JUVVVWUlVZVFlZU1BJTlJNUlJPTU9N
-UVRUXFRPT0xRUU1NTEpNT05PVE9RTkpLUE5MT1BLUExOUExXVEdHSElKUFJWSUI5
-NTg2NTk2OTlAPkBAQUVCQkBARkZHS0lNSUtNSVFZUVJOTElJSz47O0JGREdGRkZD
-QkBDQDw+PUFATExSVlNWU0pGPTdMODY2NTY4NzY0NDI0NjU4MzU3O0FGSU5VXFJN
-VFRVUVFUV1hVXV9aWV1cWVhYUVJZXFdUVFhXXVlVWVNNUFBTVlZZVlVdWVRVT0tK
-TFBRTkxNTVJUVFVbcoeTlI6Hho+Ump6cnJ2emp+joqOjop+dnqGdmZydnJuXmJGN
-hX98e3x7fX6BhYaQm5+fmZOLiYqKiIiKkZeZnZ2dnZ+coKGmp6emp6eop6mloZ2U
-kpCNioqFgYB8enl6eHR1c3R1d3Vyc3RzeHp9hoyQkJGQiH93cXF7iZablop/f4mV
-m6Gmqa6wrrCxsbCwsK6sopGMiYuKjImKhoeJiYiHiIiGiIaGiY6UkpKZmJmdn6Kj
-p6mrraqnp6Okqamnq6eioaOopp+cl419dXiDioZ3b298iIuJh4eGiouJioqIiYqJ
-jI2Iio2MjIqPkI6NjYqLjI6Nj5KRjoyLj5GSkpOSlpWWko6Pkpebn6esq6yuq6ae
-oKCjpaOio6eqqKimqKaoqqqrq6ysq6+xsrW1tLe2t7OztbSytLSysrS1sLGysbKy
-r6+usa+xr6+wtLGysK+xsLCvsLCys7Kzs7OysrOysLKysqussK+vqaShpaqpnntd
-WVdWVFFTUU5QUE5RT1FNSk1NTU5PT09RT1BOUEtNTk9NSktLTk9OT1BOT09MSkpK
-S05NSUxMT09NS01QTU9MT09PTlFOUVBOTU9QUE9OT1BPUU9QTU1PUVFRUlNTU1RQ
-TlBPUE9QUVBTVlBRVFJPUVVUU1RSVVdWVlhWV1RUVVlUVVVTU1VVW2RVVFVXWFlZ
-WVxbWlpdXV1fXV5eXF9fX15fYGFfXVteXWBdXl1aWltYWFhYV1hYWFdgnr7J09nd
-4OPl5ujp6UhOUEhISUpJSUZGSERIS0xJRkdFRUJCREE/QT48QkI7PkE+Pz08PDs+
-Pz9BPDs1NDk7PT49OTk9QDs7PTw9O0NKQ0Q+Ozo9Ojs8Ozs6OTc6Ojk8PEFBQTo5
-OTo6Oz89Ozk5PTs7PkI5PT46ODo9OzY4Pzs9PT07Ojs7ODk7OTg4OTw7Ozs8Pj49
-PDs7Ojw9PD0+QkZCQUA/QkRDS0lHSkZDSkNGTE1LU1JQVlFQT0xUU0hJSk1MQjw7
-QD86Nzc2NDU3OTQzNDc0NTw2Njc2NTc1OTU4NTY0NjY0NjU4OTw7PDxCQj8/QUA/
-PDg1MzM0NTM0MzM1MzU4NTc4Njo7PDs+Pjw7OTo4NzU2O0A+RkM+QkhHR0dKRE1L
-SktJU09NUkpSU09VUVNYVFZaWVVTVU9NTE5LSEhGTE9RVVxWV1hVU1JQUlFTWVlX
-WFhVVlNPVFFUVVRTUldaV1NUT0hCQDk2NzU2OTU1ODc2PExKRlFWTVNbVltWUVFa
-VFRZT05YVVpUVF9WUVNRVU5KTVZRT1NOVlVVVFNKVFtSTkxLTU5PVU9OUlFLSUtS
-U01RTk5OTU1MSlBWTEtJSlJRT0xNRz86NDY7NTY3O0FCQ0Y+Qj88QTxFTUdIR0pM
-SU9HTFNQV1RPS0tRSkA3PEJCP0RKRj0+QUA8Oz1CQkRNSVBeU1BMS0g/Oj81MzY2
-NjU1MjQzNTU1NDM5Nzo5Oz9GSU9VS0pPWlxVVFpdWFFcXVlWWVlcXVlWWV1eWFVW
-WllaWVdYV1RKTVJTT0xPVVZUV1NPT05NUE9NTExOU1VTWmR2ipiXkoyCiI6Ynp+g
-m5qaoKKgo6GgmZudn5+amJmYlpaUkJCIgn98e3x9fH2GipGZn5+ZkY2Jh4aEiImS
-mJiam52gm5uipKaip6ioqqqrr6ujnZSRlZONiIiFgoJ+fXx6eXdycnRyc3R0dXZ4
-e4KIj5WZmZqWjoV7dHmHmaCck4qIjpqeoZ+lqaqtrK6srLCvsbGrm5CMjImGi4eJ
-ioeGhIWFh4aGg4WKk5OWl5iZmJian6SmqaqtqKWmpKGgo6iopKOcm5SRlZeWiX5/
-io6MfHN1fYOHjY2LjY2OkJCNjo2Oi4yNj5GPjIuLi4yLjY2MjY6Tk5GNkJCQkZKT
-lZaUl5iXl5KTkY6Nl5qgpqmtra2spaGioKKioqajp6eoqKeoqKmpqKqpqqyurrCv
-tLW0s7O1trSzs7O1tbSyr6+xsK6usK2urq6tq6utsbKxsbGusLGysLKzsrGwrbCy
-s7W0tLSztLSysLGurK2qpqClqa2hg2RaVFRUUlJOTUxPTU5PTU9PT0pMTk5PUk9O
-T09MT1BQUFBSTU1MS0tNTU9QTk5OT0tLS05SUFJPTkxMTU1QUE1MTk5PTk1NT1BP
-UVFOTUtPUE1LTU9QT05PUFFRUVJPUE5QTk5QUlFRUVJVVVFUUlRUVlVTVFVWWFVV
-VVhYVlNWU1FUV1hZV1JYYVZWV1dYV1laWltaW1tcXF1fXl9gX19fXl9eYmJeXWFf
-XF5cXFtYVldZW1pVXVdWWGaWvsrT2Nzg4+bn5+npSE1LSlJMSkdEQ0RHQkFHRENI
-Q0E9QEFCQkRJRUJBQkFBPTo+Pz8/Pj45OT49ODg3Njw7OTk8O0E+Ozs5Pj07PERB
-Pz46ODk5Ozo6ODk5ODs7Ojc4Ozk6Ozs7ODg7PTw9Oj0+PjxAP0A7PD08O0M7Ojk9
-PD08OT45PTs9Ozg6OT06Nzc6Ojw9Ojo6Oz0+Ozs8Pz5CQUA+Ozo9QkJCRkZGSkZG
-R0NGSklKT0tMS0hNT1FVUVFVUlZPRUVAPT5APDc5ODY4NTY1Njg3OzkxMjU1MzU1
-Njs5ODYzNDU5Njc2Nzo7Ozw+Pjw+Ozo6OTYzNDQ2OzYzMzM0NDg3ODg8QDk4Ozs3
-Nzc3OTk6PDk7QD9GQ0JCQ0VCSktIT0tMS09PTVJYT1FUTlNTXF1VWlddVFVaVFNQ
-UVJRTktMTlVUVVJVW1lXUlJTVVJRVFhUU1VVWVNUVFVTVlZUU1tiWlVJSkVDPzo3
-Nzc1NDYzNTc3O0ZITVhSUlxXX2BTU1NZVV1YUVtVU1tZWVZNUE1RVVBRWFpQUlJQ
-UVNSUU9WYFVOTE9QUlNSVU5RUk1KTk5OVE9RTk5KSkpLSU1NTk1SUVFNT1FLPTk+
-PDw6PT07PEBGS0E+RDo8OkBOSk1LTlBKTEtIT01RUEtLSk9NQzo9QkE/P0ZBP0A+
-QURDPUBGRUdFTFpVUk5JRTw3MTQzNDc3NTc2NzMxMTU0NTc5OD5CRERESVNNVE9O
-WVdYWl5VUVdZV1lVVlpYVlRbX1lSUVdZXFVWVFZUVldQUlBPTk9UT1BZWVFSUVBP
-UkxRUlJVVVFXZn6RmZeRi4WIj5WboaGfn5+ho6Kgnp2bm56dnJeYl5eXl5OTjoiE
-f3x3eXp9goWLj5aanZqVkYmFhIaHi5GXmJmVl5udnaKlpaWnp6mrra6tqKOdmJWW
-lJCPjIyLiYaBfH19dnJycXBxc3R1cnJ9h46QlZiam5qXkoiBeHyIkZSMhYKMl56h
-oJ2lqqqpq6+trKqurqqfk5CMioiIh4yKiIqKhoSFhYKDho+TlZaXlpWWlpqepKeq
-qq+tq6egnZuen6CfmI2Eh5CVmIqHiJGVl5GGgYaNkJCTkpaVlZOWlpOTlJKRjo+P
-lpWKiImMi4mLj5KUlZSUkpKUlJOTlpeXmJWXmpuVjo6OjpSZnqOmp6qrrK6npaGh
-oKGko6anpqmoqaamqKisrqyrr6+ura+wsrGysrW2traxtLe4trSwq66urK2tra+w
-saupra+urqyvr6+wsLGwsrW0sa+usLG2sLO0srW0sbKysa+tqqqnpKipqaaTaFtV
-V1JRT09QTlJSUU1LT05PTk1NTExOT05MTk5ST09RUFFQTk1MTExNTU1NT1BNTUtO
-TlBQUFBMTUtNTlBRTUpLTk1NTU5NUE9QUlFRUk9OUE5PT1FPT1BPUVBPUlJPVFFR
-U1JQT1FMUFRTUlJVV1VWVlVUVlVWVlRTU1VXWFVUV1ZWWFZYWVdVWVpZWFlYWVpa
-WVpdWVdbXV5eXGFeYV5bXF5dXl1gYF9hX15cXFhWWFlWU1NTVFZbY469yNHY3ODj
-5efo6ulPUlFQT0tFRD1GREI/Q0VMSUJGRkI8P0JGRUVGRkBCQURCPkBBPD4+QD09
-PTs5ODY1NDc3Oz0+PTk4ODk5OT00NDo6Ojk7ODo5Ozg5ODc5PTo8OTw8OTs8Ojk5
-Oj07PT4+PTw+QT89PD08PT49PDs7PTo6PT08OTo5OTg4PDg3OTc3Njo6ODo6Ozo6
-Ojk6Ozk5Ozw+PTs7PD08P0FCR0ZHSEdJR0NGRURJRkZHRkZKSlBNTVNPUVBLTUtK
-RUVFQDw9ODU2NTc4Nzg2NTMxMjM1ODg2NDY2NjY3NjM4NjY6Pz48PTg7Pj0/PTs2
-Ozk0NjY0MzE0NTU1NTc2NTU0NTk4PDU3ODk7Ozo7QD09QURAQ0NBRUpISElMSk9O
-TVNJTVRQUlBNUE1RV1JYU1VYUlhSUU9SUVRWT1BOVFRUUU9YWVNPUFRXUE5SUVRU
-U1NbVlRWVlVaWFhWVV5YVEhHSEE9Ozo5NjU2NjIxMzU3Q0hHUVVQYFdTWlZUUFRZ
-VVxbWVtWWlxXVlBMV1dQUlRSWlVQVVJNVlVOVlNTUk5LU1BOWVRQVk9PUlBRUU9X
-UkxOT0xJSEpNTUxLTVJNUFJQUk9BOj07Nzk5Ojs7QD9FQEJCPURCQUVJTU1LTUlN
-SUdQT09OTU9RT01MRDs8QD89QEA+QEFAREM9QEM8O0NPU1BNS0xJQDs5NTg7OUU3
-NDIyMzEzMjg3OTU3OUE+QkVJTkpTTlFfYV9aWVVZWFdSU1ldW1pZVVdcWFNYXFxb
-W1xZVE5PUlRWUVBWVFFOUVFRU1BTVVFOTk5OUVVUVVhqgpSdnpqSi4eLjpadoaCd
-n6OipKGem5ubn5yampmZmZudmZaRjIaEgHt8e36Cg4eKkJuhoJmQioaLiIeIjpSW
-mJmcmpudnqCkqKmpqaytraijo52ZlpeVlJKOj4+OioWDgYF+eXR2dHB1dHZ3dnyE
-jJWanZ6enZ+Yl42EenmAhX5xb3aEkpSQipWgqK+tqqyurK2qp6GUjI2LjImLioeL
-ioqHhYOChouMkJWVmZyVk5KUlZiepaOmqK6yqqKbkI6SkYt/cXF9i46FeHqAj5aQ
-i4yOk5eZmpyZmZyYmJWYmpeVkpOSkJCRkpGQjY6NkI2Pk5WWlpaXlpWVlZeZmpeX
-l5eWlZGLjY+OlJyeoainp6mtrqmnpKCen6KkpKWpqqqqrKmrqq2srquusK+vsbGx
-tLKwr7GzsrO2tbO0tbGvr62urK2wsK+vrKusraqqsa6xsLCxr6+xsbKzsa+vsrK0
-tbCvsbOzr7KurrCyrampqaupq5tyW1dYWFRWUlBOUFBQTk5MS05PTk5OTExOTU9P
-TU1RU1NPTU5NSUtMT01NTU9OTE5MTU9PTk9OTk9PTE1PTlFTUk9OUlJOTk9QTkxP
-UVBSUFJPT1FPT09QUlRTVFFQUFFQTU5OUFJRUVBPUlFUVFVXVVNTVlVUUlZVVVZV
-WFVUVVZWVlZXV1dcWlxYWVtaWVpZWlpbXF5aWVxeXl5fXmBiZWFfX2BiY2FfXWFf
-XVtZXVlXV1dVVFdZWVhfm73J0dnc4OPl5+jp6UtPU0hOUElDPEU/QT87REpKQkFC
-Q0JDQkFDQkFCQ0JCQ0FAPT07Pjw6PD05ODk4Ojo4OD08PkM7Oz05NTg6Nz08OTc3
-OTg5ODg5ODk2ODk6Ojk6PTs8OTo6QEM9PTw7PUA6Oj89QD09Pj45PDw7Ozo6Oz09
-PTw6ODc4ODo7ODo6Ojo8OTs4ODg6Ozg4Nzk7PDs6Oj49P0BAPT9AQUBBQ0JFQEVE
-RUZDQENGRERGQkhKR0xRUk5MTU9OUU9LS0dEQDg2NjU3ODg2NjY4NjMxMjM0MzYy
-MzgzNjg2NzU0NTQ3Ojo9PTs/Qj07PDw7Ozo4MzI1MzQ1NzU0NjQ0NTY3ODU4Ozo4
-Nzc4ODs6PD8+QDw/QD9HREVFREZITUtLT0pISkdOTktPTVNQUVtVVldWWVRVTk1X
-VlpVVVVdXlxWU1JPTEtPVlNOTE5TVFVUVldTU1hSUV9eXVVQVE5VTUtGQzw8Nzk3
-NDc2NjczMTE4QUlMVlFNTk9WVlRWUlBRV15dXFdQW11UUExTU0xNTktSVVRQUUxR
-WVNRWE9OUExPU1RSVlRXVE9QU1BQUVJSUlFRTElGSE9PTE9OTk9PU1JTU0g7OUA5
-ODg6Pj5FQUY+QUJBREdDQ0ZKUE1VTU1QSFBUU1BOUFJRTUxHQD0/PUFCQjxBREFC
-Pjo7PDk6RFBPUlJNSkhCOjMzNjc1ODY3NDM0NTQ0Njk4NzlCOzs/P0dHS01LUF9V
-WFNfWFtbWFBPVllcXlxaVlpaVFdeV1haW1pYT1BOTk5LTFRUU1BLTVNSVFhWUVBU
-VFNYVldZXWuElZqYlpCLioyPkZWXmZugpKWjo6Cdmpygn56fnJeamp+cl5OQjIaB
-gn18gIGDio6VnKGlmpONi4mKhoeOl5aXm5ycnp6coKOjpqqsqaysqaWjoJ6bl5aU
-lJOQjI6Pi4uHhIWAgHp5dnV4dnx7f4OMlpqfnp6hoKCdlo6Gf3h9emtmbXiHjIJ3
-eI2jqqqqra6qrK2noJOJioyLiIWGhoWFiIiHhoSJj5OUlZOXlpWQj42QlJqfnp6g
-oKOhnY2Gi5GOf29tfYqGfm9nanSCiI6SlJaXnJ2foZ6cnJ6bnJubmZiZl5SRkZKS
-kpSUlpOTk5KUmZqWl5ibmpqampqbm5qZmZeXk42Mi42Ump+gpaqsq6yrpqSjo6Ch
-oaSlp6utrK2xrKypr7GwsbGytbSysrS1sa+zsbOrsrO1tLezr62sqq2vsa6usK+u
-r66trq+urq6vsLKvsa+zsbCxrq2usLCzr7GzsbGvsbGvsK+xrqqqq66toH5hWlZX
-VFRTUlFOT1JPTU5OTU1NTUxMTk1OTU9PT1BSTk5NTk9MTE1MTExMS0xNT05MTk5R
-UE1QTU5OTUxLUFJQUE1OTlNTUE9QTk9QUlRVU1FRUVBRUVJRUFRUUk9NT1BOS1JT
-UlFSUE9QUVJPUFFQVFNSVlNUU1RRVVdWVlVUU1lWU1VVVFZYWlpbWVZXWVxcXFxc
-W1xeXF1fX15eXV9hYmBhX19hXl5eW1tdXFxbWlpZWFxWWFlZWGOcvsjS2Nzg4+Xn
-6OjpS0tOSUlQS0tKSUpERDxCRUJDREdESUZDRUVBQUNCQD9AQ0M8PDs7Ozk9PDw5
-OTg7Ojg3Njk4OkBBOz05Ojw6PUA7OTc3NzM3OTo6ODk8ODc5Nzg6PTw6PDw6Ozw6
-Oj4+PT4/Pj89Pz09QTw9PT09PTo/PTs9Ozk6ODc4ODo7OTo8PTc4OTk5OTc2NTg5
-NTg7Ojk5Ozk4OTk9Pz0/Pj1BQ0RBR1hDREdFQ0pLRkRAQUZITU1ISUpFSk5QVVRW
-T0xHQUE9OjU0NDY4ODY2NjU1NDU0NTQ2NDg1Njg3NjIyNjw7Oj5BQkFARD05Nzc4
-NTEuMjM2NDM1NDU1OTc3NzY1NzU4Ozg4OTg3ODo9PUA+PT88QEpDRENGSUpLSUtO
-RElJRklLUVdOUEtPW1RWVk1TUFtTT1BUVlFYVllbX15YUE9MTFFTT01NTlFXWlpb
-VE9SW1VTXVpYVE9WUVlRSUFFPjs5PTs3NDU2NTU2NjU9RUhTWU5LUU5RVlZVUFZb
-WlxZUVRbYFtQUUtKUU5PUU9RW05LT1NUVkxUU09PVFBSWFFgW1VYVUxPVVFSVFZU
-V1RVUU9LTVBTT1BWWlRQTk5QSUE7NzY9ODo+OkZAQEQ7P0JFRkBERkdPS1FOSU5L
-UlVMS0xQWltTTlBIQj89QkBCPkBBQ0JCPD09OjtAUVBPUk1OTkU7ODQzNTU2NTQ0
-NjY1NTQzMzs6Nzc6Oj9DSUpKUExTV1dVUVhZXV1dUldVVFleXlxXWV5ZVldUWVpc
-V1hTUlRNTUxQVFVSTk1PUU9WUE9SUlVSUFVYWllfcIWUmJuWlI+KipCTlZeXmp6j
-pKSkop+gnp6goaGam52fnZ2al5SPiYWFgIB/foOIjJGXnaCblI6Jh4iHio6XnZqg
-np2doJ6hp6moqKqpqqquqaSjoZ+ampORkpCOj4+RjoiGhYaCfHt6eHx6eoCGiY+X
-nZ2hpKKgoqCfnJKJf35/eXF0foeMhHVuepCmrKypq6yvrqqonZCLj4qLiomHh4OD
-iYuKjJGUk5OWlZaVk46NjY2TmJuZlZKTk42GhI+YmY18d3+KiIB2dHN5goiOlJme
-n6GkpqikpaCfoaGhn5ybnJ2bl5OSkpKXl5aUkZSVk5WZnJubm5udnZ2cmpuampuZ
-mJWUkYyKjZScoaKlpqesrq2npaaipKmmqausq62rrq2ura2ts7KzsrKzsq6ssrGv
-rrGysrCvsbGysbGvrausr7GzsK6usLCusLGwsbKxr7GtrK2wsLKytbSvrq6ysbCw
-s7Szs7KvsLKvr66sqaqrra+miGVZVVVXVVRUU1FPUVRRT01OTEpNUFFNTU5QTlBO
-TExNTU5LTUpPTU1OTlBRTE9OUE9LTExMUFBOTVBMSUpNUVFRTExOUE5SUE9OTU9P
-UE9QUVBPTUxOUlBQUFBQU1FPT1FSUlBOUU9OUFJPUVZSUFVSUlJUU1dUVlRVVVhV
-VlNUV1VVVVVXWVdWVlZYUlVcXVxfXVxcXF1eXl5hYF9iX19jYmVfW1tdXWBeW1pa
-XFxcW1taWVhYV1dfa569ydDZ3eHj5ebn6OlJSEpSTktPRUdLR0VFRkE/QEVJRUND
-QkFFQj9BQ0FCQz9APUE/QD08Ozg5PTs4NzY3ODg4Nzk5Pj08OjtAPDs6PTw9Ojg2
-NTU3Ojo4OzY4OTc2OTw6QD1FQD08PD06O0VBQj8+PTs7Oj1AOz09PD47Ojs6Ozo7
-Ozk5OT05Ozk5Ozo7Ozg7ODk2Ojc4Ojs5OTg7PUA9Ozk6Ojk7QEE/Pz08Oj8+UEhA
-RUlCSEtERUZER0dITEpGSkZIUVBTUlZVVFJKSEJBPj07PTg5OTUzNjEwNDQ0NTM0
-NzY1NTU0MzY3Pjk5Qz89QD0+ODg2NTQzNDMyMjI2NjM4OTYzNzU2OTs5ODQ2ODk2
-Ojg6Pz87Oj87PT9FRkJGQ0FKR0tIRk9HTFRKUE9QW1RSTE9TUlNPTlFRWlpXUlRY
-TVJVW1pcXltXV01QU1NTUVNPTlJYWFhYWVZgXl5kW1ZWU1hSWlBURkZBQTs9ODc3
-MzczNjQzNzk9SU9UVE9OTk5YWVRWU1ZTU1hRVlpXXFNRT01MT1FYWVNYT0xRWVJS
-VlNSU0tQU1BTVlZZV1JRTEpWVFNUUlVaVFlWTk9PTFFLUVhUUVFQUVJTRTk2NDs5
-OTo3PkE8QENFQ0JKRERHRk5JS01GRURKUklMTEtTUVFMUVBIPjs8PTw+QUBAQEdE
-QT07OzxMTU1PUFFNRDw4NTo8NTU0MzQ3NjQ1NDY2NDQ3ODo5Oz9KS0xQVltcVVRR
-WFZWXFxTU1FQUVRaYFlXWVlaVVRcXFZXWFhWVFFLT1FUVVJQUU9MUlBMUVFSUk9Q
-VFZUVVxtgI+Vl5eWlZCOj5KVl5qcn6KipKWioqCjpKSinp2bmZydnp6clo6HhoGA
-f4CBg4mOkJebmpOLiYeIiIeGipGZnJ6enJudnqCjpqerq6upp6qppKCdoKCdmJKN
-jZGQj5GRjYaCg4KDgYB/fHt8foGMlZaZmZ+hpKWmqKShmpSKgH+BgX+KlpmUh3t/
-i56srKuwra6xsK6jmI+MjIuLiomIiIJ/hY2Tk5aWlpaXlpaSj5CQj46Ni4aBgoZ+
-c256hYmDdnB2foR/gn1+g4mRkpeeoaeoqKiqrK2qqaalpKOfoKCenpyalpaUlpeW
-lpWXlpWVlpiboKGgn6CfoJ2enpuampiWlZGPi4qNlp6ho6apqq2uq6iopaWlpaeo
-qauqrKyur6+trK2usbSysK+ytLGwsbCxsbCxsa+vsa+vr6ysrK6wsrGwrbCzsbGt
-sa6xsbKwr6+vs7S1uLe1sq+vrbGzr7O0srWxrbGxsbCuraupqaqsr6qOZVlVVldU
-UVJSUlBOU1BQUFBOT01NTE1LT01NT05LTkxMS05PUE5MTE1QTk5MTk9OT05MTEtO
-T0tMS0lMSkxPTU9PT05QU1FQUlJST0xOTk5OUU9QTk1PT09QUVFQT1BPUFFPTlNR
-VlRTUFFQU1RTU1NUVVNYVldUVVRUVFZVVFNVV1tbV1ZUVFZXVlpeVlpaXF1cXGFf
-XF1cXmBeYF9fYmFgYGJhXF1bW15eXFxdW1paXVpZWlhYWltkm77I0Njd4OPk5+jp
-6klLTk5ISUpPQ0JHSUhJQUVCRUZHRkZFRz4+QUFAOz1AQDo4OkJDPD06ODo5Ojo1
-ODs4OTo5OkE+PT48Ozw8PUA+Pjw9QDw5NTc4ODs7OTg1ODo4PDk7Ozs9Pj44OTw/
-Oz1BQD89Ozw6Ozo7PUE+PT89Ozo7Pjg7PDo4Ojg2OTc0ODo4OTo9PDk3Oz08Ojo5
-Ozk8Ozk6Oz44OTk+Pz07PTw9QEFDPz5ESUNCR0VITEVFSUhMTkhISUpYU1FTTVNT
-WVZOS0pFPD89Njc2ODU0NDQzNTQ2MTU0MjM1Nzc6ODg8ODk9Oz09PDg4OTY0NTMy
-MzM0NDU3OTY3NTc2OTs5OTc4ODY6Oz5DQTxAQDo8PT1BQUdMQkJDREZISUVES0pK
-UElPUVJUUVZPTFBMVFFWVlRcWF1aVVZOTFFYWlteXFhZVVdYVVRVWVdOT1VVVFxg
-WF9iZWNaV1RVWFNUT1lNUEhHQUM+PDc0MzM0NTs5NztNTUtQU09OSlFUT1NSUFRU
-VFNTXVdUVlBUWFZRT01TU09LV1JRVU9PUE9PS05WWlNYWFZYV0xPT1FYUEpRWF5Y
-UlpTTE9PTk9OUFBPVVFOTkxGOzk6Nzg3Nzg8PTtAQUVFREdCRElKUFBKTEFCQ0NO
-TUxPSU5NUEtSTk1FPDs6Oj08OkJGRUNFOjpCRElNTlBRUUxGPjs4Nzo3NTM2MzIy
-MjEzNTQ0Njg7Ozk6PUpHR1FaXFlUVFFSTlJXVk9QVlVbWltgWFpZWVZUU1ZcWlRV
-VlpYUk9PVVRVVlFPTVJRTUtOUlBPUFFQU1JTWWZ9jpeZmpiUkpKPj5OZnJygpKSf
-oqKfnp2joqGgnJ2gnJubnp+bkouHg4eAf39/ho2VlJaXloyGg4iIhYWJj5Wam5qd
-mp2foKOlpaipqKmpqamjoJ+fm5+elImTlZSWkpGPiIaEg4SEfXp4eXx+ho2Vl5iZ
-n6GlpKSnp6OemZGKhYaIhoyan5+VjY2Unqqsra6trK6tqqKTjImFiIiHh4aFg4GF
-jY+OkpaZlpWYl5WTlJOTjYV3cHp8eWtnbnl5cmhma3V6gIKGjI+TmpycnKKorKys
-rKysra6sqaWjo6KkoqGfnZyZl5eXmJmZm5iamJmYmp+joqKjoKKgoqKhnZucmZiU
-jouJiY2VnaClp6mrqa2ppqejo6Oio6Wkqquoqq6urrKysK2tsrGxrbSysbCusbSx
-sLGvrqyusK+tr7KusLGzs7aysrKrrq2tq6uvsrOxsrW0sbS1uLOxsK+usbCzsbGy
-s7O0sq+trrKvrauprLGyrZZsW1lXVlVUUlFPUVJTUVBQT01QT01OT09PUFBPT0xL
-TU1PTExNTUxNT09OUFBPT09NT05NTk5NTUxLTk5PTktMTVJNTU1OTE9PTE1OTk1S
-T05RT1FQTkxNUE1RUFBST05SUU9PUFFTU1NRUlJSUlNWV1VTU1RUUlNWVlJUU1dX
-WFVZXVhYWFhXUldYVlxdWlxYWltaWltcXV1cXl1gYWFgYGBfY2FgX19cXF1bV1ZX
-VldYWlpZWVhaWWCSvcnR193g4+Xn5+nqTUpJSk1IRklCPkNHREVGRUJCRUNER0ZC
-P0NGSEE/PjpAQz8/Pjw/PTs4OTo+Ojk5Ojo6O0E/Pj09Pjw4Njw8Ozk+QTxBQT47
-Nzc4Ojk6Nzo5Nzk2PDk6Ojg4ODk6Ojs8PUBBQUA9PTk8PkA9QDs9P0A/OTs9PDo9
-PDs5Ojk4ODs4Ozo5Ozk4Ozs7PDw8PDs6PDw+PTk6Nzo8OjpAOTg6Pjs6Ozw+P0FB
-RT5AQ0ZLQ0BISElOSUtJS1pWT1RPVVNZXVtaUk9IQT09Ozo4ODo5NjMyMTI0NDIy
-NTEzMzY3Nz04ODw2Ozw9PDk4Nzc1NDIzMzM1NDYyNDI1Ojs5ODo7Nzg5Ozk7ODs8
-Ozk/PTs7PEA8Q05FQkBFQkZIQEVLRUVESlRPVFhPUU1JTkpRT1FVUllZWllZXFBN
-S1FWWFdbWltYW1lXVFZWVE9SU1FUWldRU19jXl1YWFpYVFVSV1RRSkpDQT07NzYz
-NjM5NzU4N0JMR0hTUk5VVlBPUVFQVFhUVFVYWFRQU1BUW1ZXWVFST0lQUVJVVEpS
-VU5RUFJaVk9OT05YUkxQTlNVTVVYU1dXVFRPTFFRU1RQU0xOT05SUktGPjo1ODY2
-ODo6OUNCQkVETURESExKUUlTSUZIQ09LSlROUk9OTVBJS0pEPjc7QDw/PkA9PDs4
-OUJITk9RU05MT0c9ODUzMzQ1NTEzNDM1NDM0NDM1OT08ODo9R0ZJS0tXVlNXU1BN
-UVpZT09UVlhZWFlYWVRSWFtZVVZTVllbVVRRTVNVU1RRTk1STk9NSEtQUFNPTlBS
-UFFVZXyKkpuXlpeVlZGQk5aYnaGkqKSlnp2ho56koZ+emZuenJuenZmUj4uFhoaC
-gICBiI2QlpmWkIiFhYiLjIqNk5Sam5ycoKKipqimp6uuraympKOioZuenpaOkpWX
-lZORkY+OiYmIiYR/e3t9f4KHj5ObnqCio6WmpaWnoqKelI+LiYiOjY6ZmZSKi5Oh
-qKurqq+zrq2ooZKKhoiHh4mJiIGDgYaOkZKVlpaYmpmVk5WXlpCGf4SIi4Z0bXJ9
-gX1uaW94g4iKkJWYnJ+ipqWnqaqsq66urrCvraqqp6Wmo6Shnp2em5mZmJmVmZaZ
-mpuUlp2cnZ+ipqWjo6OipaSioJ2bmJSPiYiHi5WboqWnqautraulo6SipaSjo6ip
-qKqssLGxsrS1srGysbKssa+wrqyusbGysa2srK+wrq2trq2usbCxtLWxrbGurq2u
-srCxsLCwsrm2tLGwsK+usK+ysK+srK2ur66usbGvrq6tq6mssLS1oHhfXFhUVVZS
-T1BQT1BQTk5RUFBQS0xNTk5OTk1NTUxJSU1NSkhLTU1MTUlPT01OS0pMS05PTUxK
-UFFSTU5QTk1NT09QUE9PUE9SUFBTUlFRUU9OT1BRT1BQUVBMTE9QUFFTTk9SVVJU
-UlBPUFJSUlNSVFVTVFZUU1FTVVVUUlRYWFdYWVdXVldXWVtZV1dZWVpYW15cYFtZ
-WlxcWlxeXV5gXV1eYF5gYF1bXV9cVlhcWVdXWF1aWVlaY5G7yNHY2+Hk5Ofo6OpO
-TVNPVVFISUA8QUdIRUVFQkNFR0VEREFCRUA/QT47PD48PTw8Ozo7PTw3Njs7PTw7
-PDw9PDw5Oj47OTg2OTs6Ojo8PDxCPzo4Njk8Ozo4ODs9Ozk4PDo8Ojo8OT07Oz5B
-Pz09PTs8PEA9QDw5PTk6Pj06Oz1BPjw6PDs8OTk5ODk4Ojw4Pzw3QDg5Ozk5ODY2
-NTk6Nzk4N0M6PTw/PTo5Nzk8PD0/PT49QEJDP0NGQEFBRlBNTU5JVFZMU1BTWlhd
-Xl1YVk9PR0FCPEA3PD44NzQyMjY0NjU2NzQzNzQ2Nzo7OTg3OkFDOT05NzgyMjMy
-NDg0NDMzNDI1ODU0Nzk4Ozk6Ojs3Ojo9OkA9Ozw4OTpDSkZHRUZGRkhGSkxJSkRH
-Uk1NTk1PSElMSlBPUFJRWFZXWF5hWFZNT1NZW1dXXFxcXGBcWllWT05UUlJSUk9X
-X2NgW1RYV1RdXFNZV1lPSkdFPjs2NTQxMzU2NzU1OUJISEtWVltXTlBSUFVTUVJQ
-Vk5MU05PVVFRWlRZVVBVTEhPUlRQTEtUVVBWUktVUE1SUE9VUFBPTFFRU1BOUllT
-UFVQT1FQV1JPT01TU1dXUEtDPzo5OTo3Nz1BSkI8QkFJRkRMSUlRR05RSEtFSk5M
-VE5TT1BRTktMS0NAQENDOTo7PUA7OTo7QUlQUE9STUlOSkE3NTEzMjE1OTc0NDQ1
-NjQzNTU7Pjw7Pj1EQUdKUVhXWVlUVVBSVVFQTk9RVFJYV1VWVFVdYFtTVFdXWFVX
-WVRSVlVVUE1PTlNNUFBSUlRUUVJSUVJPUVFheIuSmZqZlZeXlJWTlZebn5+ipaWi
-oKOhnJyem5uZlpeXnJ6bm5eTi4mJiIiCgoSIjY+TlpeUj4uKgYeGiY+Vk5aam5yZ
-n6GjqKmsraysrKikoqakoqCdnJuZmJeVkpORkJCQi4mJhYGCfnx/hIyRlZ2jpqel
-o6WlpKemop2emZGMiIuLhoeIhX99hZago56kq66sp6mhlYuJiYqKiIeGg4eIj4+R
-lpaWmZmXlJaRjoqHgn2DkpiTg3Z1f4eBe3d7f4SMkZWbnqGlp6alpaepq66trKyu
-rq+tqKepqaimo6Gfop2Ymp6cmp2dop2am5qWmZyeoKKlp6ilqaOhn6CinJyVkI+L
-i4eMlZ2kpqarrq2sqaWioKGjpaeopqenqKirq62xs7S2srGysrOxra6ur6+ys7Ky
-rrCurKupqq6urKqrrKyvq6utra2ssLKysK6xr7GvsLSyrq+vsK+wr7Kxr6+uraqs
-rq2ssrOvrK+sqauvsrChe2NaWFxZUVFQU1BQTk5LTU5PUE9MTEtLTU5PT09NS0hL
-S05OS0tMSktMSkpMTE9OTU5OTU9NTVJSVE9OTE5NTE9PTU9OUlNTUVBTUlFSUE1P
-UVNTUlNTUFBRTFJPTlBRUFBRUVFTVVVST09SUVFQUFJSU1RUVVVTUFJUVFRUUlVY
-VFRUV1dVWFhZXFxbWltbWVtbXV1cXlpcWltdXWFgX19cXl1cWlpcX1xbXF1cW1dU
-WFlbW1VbVllji7nI0djc4OTk6Ojp6kxNTkxQT09HRUhERkZESkdJQ0NHRkNAQEFD
-RkRAPjo+QDxEQTs6PTg4Oz48PT5APTo6Pj1AOj08ODg6ODg2OD05OTg/Ozo6ODk7
-OTc7Ojs7PT05OjpAOjs6OTw+OTs8PTo8PT87PUFAOztAQ0RAPTg4PDo6Oz89PD06
-Ozg4OT46Ojw8Ozs7Ojo6PTw7Ojk5ODg7Ojs6Njg5PDw7OTs8Ojo7Oz0+Ojs6OTs/
-Q0ZGRkM/QENHTlBKUk1KT01QTlFbVl9eXV1aXlZQS0pGQT49Pjk2NzY1NDc2NDg4
-NTc1NDM2MjM3Ojo9P0M6Pz47MzM0MzEyNDMyMzMzNjMzNTU0NTc4NjY0NjY7OTo5
-OTk7PDo9PENKRUNDS0pDREFKRkZSTkhTTk9KS1JMS1BMTkxQVFJbVlhWW11VU1NS
-VFpfWFpiXVZXW15dXFVRTE9PUE9SUFhhW1pYU1NVVl1eXVdWWFFLSkdBQDs7ODQ0
-NzU1ODc2OUhIRFBaV1FVVlhUU1JPVVVWUE5RUE5TVVRWWE9PT05RTUxMU09LRUVM
-U1RST01TVVRTUlNVUE9OTFJTUFlWVllVWFlQUFBSVUtMT09VVlxWTUQ+OT45NTY7
-REJBQzk9QElKQ0dKSVNNTVFFSEZJTklLSk5WU1FRU01LRUdJQj05QEE/Pzw7ODs9
-R1FZVFFKR0pLQjkzMzI0MzQ3NzUxNTc1MjY3Ozw+Oj0/QkdFRUZTW09UWFZUTlZc
-XFNRTk9RWF1XVFlZXWBhV1RaWVZUVltaWltaX1tRV1dXTk9UT09RU1JWV1JRU09N
-UlpzgpCTmpeXmJmamZiYl52foaChoaOjpaOgnpyam5iZmJaWnJ6cmZOQjouJhYSD
-h4aLjpGUmJiNi4mHiYiLkZOTlJiYm56bnKOlqa+wsrGspqWko6SnpKKem5ualpaZ
-mZaQjo2Oj4mIhIOAgIGEio+VnJ+jpKispqOkpaalpKCfmZGLh4aLiIB6bWp1go6N
-iZGfrKumo52UiYuKiIqHgYCBiIyQkJGTlJiXlpeVkYp9dnJucH+NjYZ3cnV5fnx7
-fYaIjJOaoKKnpqiop6akp6aprKysrK+rrqqpqquoqaimoqKkoJ2doaCcnqCdm5eZ
-nKOdoZqcoKKho6irq6ano6GgnJeTjY6OjZGYn6Goqq6sraynoZ+dnZqiqKemqKel
-pqmtrK6wsrGztbKzsKysr7KvsLGvrbCzsLOvrq2qraqsqainpqmsrKysrqywrrCw
-r6+usLCysrOvsa+xsrO0tLKwra6tsK2tr6+ura+wr6qop6yur6J8X1pXVlZSU09R
-T1FPTUxMTU9OTU9MSk5PTU5PUFJPT1BPTE5NTUtNS0xTT0tNTlFNTk9PS01OUFFP
-UU5OTUtNTUxQT0tOT0xMTVBTT1BQUE9PT09RUVJRUE9QU1FQTk9PT1JTUlNTUFBR
-UlFRVFlSVFZTUVFSU1VSU1RXVVZYU1NVU1RYVldZWVhbXFpYWllbWVlcXFtaXV9d
-XV1cW19gX19fXVtiYWBdXFlZV1tbWFdSVFVXVlZXWmCLucfQ193f5OXm5+npUE1P
-T0xGSkZIRkNDRkRCQ0RHREVEQ0JERUNESEVEQkBAQEFBQkJBPjw4PD4+PUA/PDg+
-Qj47Ozw9OTQ2Ojo7Ojk4Ojs6Njo4PTw8Ozg8QD88PDo9Ojo6OTs8PkA7PTw8QDw8
-Pj89QEA+QDs+QD8+Ozo7PUE7Oz4+PTs7Pzw6PDo6OTs7Ozk6OTxFQjw7Pjs7OTs4
-Ojs3Ojk4Ojo7PD49Ojk9QEA/QEI9PUFCRUdDRERCRENJUE5NTEtKSU1MTFZUVV9e
-YVxgXFxQUE1HQT87PDk3NjM1NTQ1MzM1MzEzMzQ0Njc7Ozg6Pjo/Ojg2NDUzMzIz
-MzEzNTQ3NjY4NDI2ODg3OTk2Ozs6Ojo8Oj8+PD5AQkRCRkZISENDQ0pKSFVSS01M
-T0xQUk1MTVBST1RWVFhVVVNRVFRSVVtWWFxXV15dVVRYWlpaVVNOUFNNTk9TW2Vc
-VVVRUl1XXFlgXVJLSkdGSUZGPjo2NzUyNDQwNDU2Oj9AQlJXT1VVVmBZV1FRVFNS
-VVFNU1FZXFBUVUlMTEtSUUtRVE1FRUdOTExLS1RbVllYUlNQTVdQT1ZWV1RQW11V
-V1tRUVBSUElMTE9ZYWBTR0E/QDg5PDxKRTg8PEFCQUlGR1BHSUlJTkVGQkdLSExO
-TlFPWFFXTkRIS0o9OTlCPz9APDs5PURMUFpUTUlHSks7Ozc4NjI0NDM0NTMyMjY7
-Ojg6PT4+Qj5DSE5SU11ZTVhZVVNSWVtbVE1RVFpaW1ZRVFZbYF1ZVVhXVVdbW1lW
-TVdcVFZUU1BOWFdVT1FRT1NZUlJST05TXG2FkZaXmZuWl5qZmZmbnZ6cnZ+go6Om
-o56fnZubm5iYmJWboZ6Zlo6LiIWFg4SCh46PkJSUlI+EhYaJh4eNk5GRlJaZl5ib
-nKGnq6qwra6npKWopKKhoqSem5uam5iVlZKQjo+NioSGgoGAgoSJkpWan6OkpKim
-p6alpaOko6CalYyMjYuJg3pwaW1/hH96eImfpKmloJaPiYiKiIR/gIGMko+PkZKN
-j5GTko6Cd3JwZGVxfH14cXFydnp6fIOIjZWWn6Khoaalpqeqqamnqqytqayyrq2r
-qaeoqausqqempaSin6KkpaGhnZ2ZmZuen5+hn6Gho6WrpqWmqqmioqKbm5eRjYyT
-mJyfo6WnqKussaqioqGfoKOjo6WnpKWlpqWorK2trbSyrq+rqa2wr7KwsK+srqyt
-rauprayrq6inpKOkp6eoq6urraytrq6zsbCvsrWys7OysbCytLW0sbKvrKyurq+x
-sa+tq6+vrqmoo66uooBgWFlZVldVUU9PT05LTEtLTExKTUxMTk5PUFBQT01OUE1Q
-S09PT0tNT01NUE1OUVBOU01MTU1MTU9OTk5HR0xOTUpMTlFPUU5PTE5OTUxQTk1Q
-UVBQUlJQT05OTU1RUVNSU1FQT1BRUFVWUlJTUVNTV1tTUlRTVFZVVlVXVFRTVFRU
-VVZZWl1cWlhaWVpZWVlbWlpdW1lbW11eX2BcW11fXV5dXV9eXF1cXFtdW1lYWlhW
-V1dUU1ZfaI66yNHY3eDj5ebn6epVU05NSVBLTUtLSEZCRT5AREVIQUJCQ0NCQ0FB
-Q0JDQkRBQ0VBPTw7Ozw/PkE+PUFDQEA+Ozw+PT06PD09OTo5NjU3OT06Ozk4Ojw7
-Ojo6PDk9Pjw8Oj4+PDs5PTs8Ojs7PDxBQEE9Ojs/QTs/PEFDPTo9Pjw7Oz0+PTo6
-Oz07Oz05OTs6Ojw9PD07OTg8OTY6Ojo3ODo3PDk5PDs/PTw8Oz1CQERDQj1ARUVH
-RUJBRkxGQURMSUpOSVBMTU9LVlNQV1RdV1ldW1RWVVNJSEI+Ojk6OTUzMjQ2NDU7
-NTU0NTY0Ojo4Oj89Ojk5OTY1NjY1MzMyMzY8NTU1NTQ1NDY2Ojg2OTg3Ozg7ODs9
-Ojw7QEJDRkA/QUVDQEBFTE1OUktOT0tOTE1TTlBPUFRUVVhSVU9OUE5OUFpXXV1U
-VlpYWFRTWFtdW1lWW1RSVE9LTlBTXWBbXFJSWlZTU11cU05JRUpNSUZDOzw3NzM0
-Mjc2Njg2PERESVhVUlNVUldVTkdPUE1RUlBTU1NbU09QTEdLTVBWT0xPTUtNRUVG
-SU5TUV5bU09OTVFRVFZOTlZPUFZWWVlTV1ROU1FSVE1MUFddWFFJRT89PDo2N0BD
-RkVBRUU+Q0VFTEhHSEVFRkRIREpJUE5MVVJYU1lVUEhLRkBBPD5EQUY7NzQ6RllV
-WFRPSkhISUU6OTY1NDU2NTQ1NDY2NTY1ODU3Njs+P0RCSExNV1NPVlFUU1VZXltW
-Uk5UVFVVUUtQWF1cW1hUU1NYWVpZWFJWXl5XVVJQTlNYVVNSUVRPUVVVUlFRUVZf
-boORlJaYnJiZmJiYmpufnZyam56hpqakoqGfm5eWmJmXlpyenJ6ck42KhYaFg4SI
-jI6KkpGRi4WBg4OFiIySkZKPkZGSlZmeo6WoqK2tqqqnpqilop6goaGgnZyYmZmY
-mJKRj4uHhIOCgIOEhYqTl52go6Slp6ano6KkmqSkpaebj4yIiYaFg316eoSKiIB7
-f5KkrKigloiIhoKGgn18g4yMjZGPiYqNj46Lg3BpcG9fZHN7d3NzdXl6foGHj5CS
-nJ+hoqGgp6irq6uorKqssLKur66tq6utrKuqqausqqiopaKhoKGloaGcnJugnp+h
-oaKjo6OnqKmsp6SjoqGenJqYlpGMipCWnZ2fo6eqrLGvqaShoaKkoqKioaOjpaOi
-p6ipra2wsbCwsK6tsa+tra+tq6qrqKurrKimpqSmqKempqenq6yrq62ura+urrCy
-srGxs7K0s7OvsrGwsbKxsK+trKytrrO2ta6ur6+sqammrbCoimdcV1dXWlVVU1FQ
-UFFOTUpKS09KTk1OTE5MTU1NTE1NS05OUVBPUE9OTU1NUE9QTk5OTE1PTExPT05N
-T1BOTE9QT01NTkxPTVFQT05RT05RT09ST1BRUE1OUVJTUFBUVVJSU09OT1JQUVRS
-Tk5QUlRVVlZTVVVTUlNTVVdYVVVSVVhUVFdYWVdYV1dZW11aWFhZV1lZXVteYF9e
-X19bXl5dXl5dXF5hX1xdW1tZW1lcWlpYVlRZV1doirvK0djd4eTl5+nq6klHSUxN
-SERHSUdIREM/REREQ0NGQkRGRkRCQT1BQkFAQUM9P0E+QUA7Ozo6PUA+Pj5EQD9C
-Qzk3NztCPTo5Ozw9Nzk5OTw7OTo6Njg6PTs7PT06Ojs6Ojs8Ozs7Qkg/P0FDPUA/
-PDs8OTc7PT5AQD49PEJBQDw8QD8/PDo7Oz05OEI5OTg6OTs7Ojs5ODk5Ojc3Ozk4
-PTw3Ojo4OT4+Oz5CQD5BPj5CQUBCQkFCQkRFQkdFQ0lFTFJKUlFRVFJVUU1RVVtX
-WFlaVVZXV1VYUEpJQD05NjUyMTQzMzQ0MTU5OTU6OTg3OTg4Ozc5NzU2NTQyMzU1
-NDg7NjU2ODk6PTk5OTc1Njg9PDg1ODtBQD49PEBERUVBRkA/PkVLSk1QTEtMTE9K
-Sk9PUExMUU9XUU1UUlFQU1NPVVVYWlpcXFlQT1BVWVdaWlhaWVBTUk1OTFBWW2Bf
-V1ZdVFBVWVRQTkhHS09LQEE+PTg0MjQ3NzY6NzY4PkRFT1lRT1NNT1VMSEdMUU5S
-UUtTU1VTV09PT0lNT1FQTU1PT0xOSUdQUlxXWlxUTFBPTlNSVE9NVVdRUE9QVVJN
-T1JYV1NbXFVUUVZTTE5LQTs8OTg4PEY/RkNER0RHQ0FHR0VMRklGR0lETk1LU09V
-VlZTVFVRREpIQD48Ojk6Ozo6OTpBVFNWVVhTR0VKRjw8NDQ0MzM1NDIxNTYzMTY1
-MzU5ODo9QEBFRUZLTk5WT09OVmBeW09QUFNSWVdMUVVbXVlbW1ZMU1xaWlhXVVZc
-XFlTUlJUU09VU09RUU5SVVNNTFBUVVprgpKZmZaZm5qbmpmcnp+gn5ydm52dnqOk
-pKOfmpmYlpmXnJudnZqVlI6Kh4WEg4mMjZCTlJGMhoOBhoiKjY2Pk5OQkZGSlZyi
-p6usq6ytrKampKaloqChnpqZmJybmpiWk5KPjomJhoWDg4SLjZSan6Wmpaalpqeo
-pqalp6apqKGWjYiGhoWGgoeNlJmblIqIkqGpqKGUjouJhYGAfYCIjYqMiYmMjoqJ
-ioV3aW90b2lsdHNyc3h6gYWMkJCUmZueoqOfoKOkqKytrKyrq6qrrq+uraurra2s
-q6uqq6qop6qopKKioqOenZydnZ+eoqeppqalpKWoqammpaKhnpyampaOjIqMjpeb
-nZ2jqKqur6+qo6ClpqCgm5+joKGjpKKkpqmsrK6ur7Cvs7Ovrqytraqpqamlpqmo
-paipp6ioqqqoqamusKytra2xrK6wsq+wsLGxsrCwr7K2s7Kys7Gvrayqrq2vtbOx
-srKwsKusra+xsaaLaVxaV1NSU1RSUk9QUE9RUUtKTU5NS0xLS0lMT01SUFFST01N
-T05MTFFNTk9QUE9RT01MTU5RUE5OTEpNT1JQUVJUT01NTU9NTVBPT1FRTExPTk5Q
-UFlRUVVSUFNRUVBSUVBSUFBPUVBOT1JPTVFUVVdUVFNSUlFSUVFSVVhZVltYV1VX
-VlZUVVdWWFpbXFtcV1pbXVxYWl9gYF9hYF5fXF1dX2BeXmZgWVpdW1hYWFdbXFxa
-WVhXVmOXvcrS2d3h4+bo6OjqSUdES0hGSUlLTUhGSElGR0pEQkZGRUJGREJEQTw/
-Q0E8Ojs9RkRCPDg4Oz87PD49Ojs7OUE+PD49OTs/Ojk7Ozo8Ozw8OTg4Nzk6Ojk6
-OTg6OTc8OTs4ODo4PTpBT0U/QEBCQkA/PD4+PDw+PEBDQD9BQUc/PD9DQkA/PDo9
-PTw5ODo6OTg6OTo4Ojs/OTw8ODg5ODc4Ozk7PTs6PT88QUNBPz8+P0RCQ0RCP0FC
-QkhEQkRFTE9MUk9SUk9XUlRQTk9NUFNZWFVSUVRYYmVcVk9HRDs4NDY1NTM4NzU1
-NDY7PTo0NjU6OTc5ODc2NDI0MjU1NDIzNDU2MzU2Njg5OTc3Njk4ODk6NjU+Oz0+
-PTw7Oz5GST1BQkBCRkxISEtIS0ZNUkpJTE1PSk5OT1dWTlFSV1JUUlFVVFdaW2Fj
-XFNNTVFSUFRXVlZVTEpNTEhKTFFYWVxWU1JSUVZbVVdPTkhKUE5GQT07NzU1MzE1
-NzEyNzYzPEdLVFVNUlJLTk1JS0lPUlBQS1FXVlBWV01RTk5QUFBVVU1RWFdUTU9S
-VVZTU1dSTU9OUlFPUVJRVlJMVFFPUktJUVNWV1VbVlJUWVNMTU5DPDc5OTs8R0FD
-RUJDP0NFREZGSVdNT0pLR0NKS0pRTVRRUlNSUExISUhCPzo3NTU5OTo5PUZMT1RY
-WFRNSUZFPzw3NjQyMzQ2NDU0MzIzNDY2NTo8Oj1BQ0NCQ0lKUllRUEdQW2FdUk5R
-VVRWVFNVWllYU1dZU1JYVldYWVpWV1ZXVk5RV1tVUlhaUk9RVVJPT05QTlBUWWmA
-kJeamJebl5qZm5yfn6Gin5mbmpeZnqOjoqKinpucm5qZnJ6gnZuTkIyHhYWGio2M
-k5OTlI2HhYaJiYyNjY2QkZCOjJCRlp2iqausqquqqKenp6eko6Ken5meoJ2cmpWV
-k5CPjoqJiYOFh4yQlJmcoKKnpaSjpainp6eqq6uppJyWjIeHhYaGg4eRm52ai4eP
-nqqnoZSMiYR/fn1/goyNjo+PiIuLiIqMgW9rcXFscHR1dXR3en6HjJOVmJuboKGl
-oaCjpaWqp6mtrautq6+vrK2srq2urK2qqKeop6Wnqq2npKOjpKGgo6GfoKGjqKel
-oqOnqaqmpqWkqKOfnJuZl5CMiYyRlpyfoqWmqKyurKimpKOgnZ2ZnKCho6Wmqauq
-qquvrKqsr62tq6yrr7Cwr6qoqKempaanqKqpq6mrrKyvrKysraysra6wsbSysLOx
-r7GvsLGwsraztLOxsLCtr6+tq6+wsLCtsKyvrayrq6+wpo5tYFtXVlVTUFJSTktL
-Tk5NTE5PTkpNSkpMS01MTUxLTUtLS1BNTU9NTFFRUk5QUE9PT1BNUE1NTU5QT05L
-T1BOUFBQTUxMTU5OTk9QUVNSUVNRT1FNVE9PUFBPUVBQUE9QUVNSUU5RUFNOUFFU
-VFNVU1JOT1BQU1NRVFVXV1dUUVNUVlZWV1dZWVVYXFlWWFZaWVtcW11eXmBgXl5d
-X15dW11eW11gX1xfW1xcXFxbWllYWVhWWFhYYp++ytPZ3eHj5efp6epLRUpMTE1O
-SU1MSUhIR0NFRkZBQkNDRkZHR0hDREFAPj1AQDtBQUA+PTw9QT86Pj5BPj9DPjs7
-Oz5APDg3NDc5PDo5ODg5OTc2ODo6Ozs7OTo7Oz88PT05OT04Ojo5Ojo9Sl5AOz1B
-Qz9AQDs9QD89PEBBQT08PkA9Pjw8PEA+PDo5Pjw5Ojo5OTw+QUA7PDk8OTtAOj1C
-Pz48Pj89Pj5BQz5AQUA/PkJBRUVCRExJR0VDREVKTE9SUVFVT1BNU1FRVFVWU1JV
-VVJVVFlcWVtVVVJPTEE6OTo0NDMzNDI4NTc3MzM1ODo7Pzk5ODUyNDU2NjU0NTQ1
-NjY1Njg5Ojo5Nzg4ODg3OTk6ODw6Oj07PT47PT5CPT89QEFDSEpJSERGRkhQSEpJ
-UFJNU1RSW1RUVVJbUk9RUVdWU1hWWVtaVVJNUVZSVFhYWFNOS01PT05LUFVUVFNS
-UVFQUlVXWVRSTE5NS0ZDOzs4NzU1NzU2ODYzNjg6QElJUlVUVk1GSlJPS09UUE5V
-V1VcU09VT0xXVlBQTE5TTk5XVVpWUFVWUVNQUFhUVlpNTlJNWFVNVE9QUFdZUElO
-VVZaVVNVUVRaVU9RUkc8Ozo7PT1FRUFEQkRFSEpDRUtJU1JPS0dMREtRUVdLUk9P
-VVJTSktNSklHOzo6Nzo4NTg/Q01UVlVXVk9FQUBAPDc1NTM1NTQ2NTEzNDY3ODY2
-NDo7QERARUJDTU1VWVdWUFJZYmBWUFJTUVBQT1JTT1BVWFRRVFVWWFdVU1VTVFRR
-V1hXV1lUV1VWVFFMSUpOVFFRU1heaHyMlpuenJuampqan6KdoqGcmpeYmZedoqel
-paakoJyanJ2dpKWinJiVjouGhYOHj5GQkJGSjIiGhYSHi42Oj5CRj42RjYqPlp6m
-ra+zr6unqKmmpqalo5+eoKCdnZ2ampeUkpCKiYeGh4eJi5KYmp2go6Klpaaoqaip
-qaqpqKekoZyRjYmEg4N+enuJjYmCenyLnKOflYyHgoF9eHuDjpGOkpWRjYmIh4l4
-bW10cG1ydnh6fYCDhYqMkJWanZ6goJ+hoKOio6ampqirq66xsbGxrqqrrK2sqaik
-p6impaepqKakpqOio6KgoaCioaOjoaWjqKeppqWopqSko6KdmpiWmJCMjZKYm5+j
-qKmsq66up6KioaCbm5mZnqCkpaWnqKiqraqsqa6vrqyrqKitrKurp6anpKWmpqys
-ra2vq6yurK6sqqqoqKmsrq2vsLG1srK1sq+vsrKvtLS0tbOyrq2usq+srq6xsa+u
-rrGurKuvsbCpkWpgW1lWVFJRT01LTk9OTExMTU5NTEhHSEtNTUxNT1BMTUxLSU5Q
-T09QTU5PUU9OUE5QT05MTlBNTk5NTE1OT01OTE5OTE9QUE1OTExOVFJXV1ZTVE9Q
-UVBNUE9NT1FSUVFQTk5QUVJTU09MUFFTUE5UVVVRUVNSUVFQUVNUVVNSU1RTU1ZW
-VVZYV1hXV1heWVlaXFteYF5dX11cXF1gX15eXV5eW1xkY1lbWl9cX1xaWVZWWlpW
-VlpmnL7K0tre4uPm5+jo6UpMSUxKTU5HSEpGQ0pFR0ZHR0VCPkFFRkJBREdFRENB
-Pj5DQ0NDQT9APT5BQ0JCQjxAPz49Pz08Pjw6Nzg4Nzg4OTQ3OTo5Ozs6PT89PTs6
-Ojs5Ojw8PDk8O0A+QDw8PD1EW0I/PT5APDw8QD48PEE8Pz8/PDs/PT08PT07Pj89
-Ozo3Ojo6OTs8PT5BQj8+PT0+PDtAQEA9P0A/Oj1AQj5DQEBBRUNBQ0JCQ0ZHTk1L
-RENKRkhMT1ZNUlVNSUJKUE1NTFBNTU5TUVRTVFlYWlFSTlVYUUc8OjQzMjQ3Nzg3
-ODY0NDI2PDk7O0A7NzY1MjQyMzU2NjQ0Nzc6ODc5OTk4OTc5Ojc1NDM1Nzs9OkI/
-PkU/QUZGQj8/QEdFR0pIRElDR09OTk9TWE9TV1BVT1FTUFVQTU1VYlhXVlZTVFtb
-XFBPWF1WW1pVU1FUVVNTU1FVW1ZVWlJOTVBTVlVWUVlPTklKTEc/QTk4Nzk6NDEx
-MDQ2ODY8SEdKVlhRS0xGSlZRUU5SUU1UVlhXT0hQUE5STk9UUU5TTE5WUlBQUlZW
-VldOWF1YV1FOT1FQVUxMVk9SVVBJSUdQVVVUTlNaVVhZT09RTEBBOzpCQEJBPUJA
-QkJGTUJCSEJPT0pPRUpKU1pMTUhKUE5RT09NTkpGT05GPDs4PDY1NDxCUFRXVlNS
-SkVFRD89ODU0ODg2MzU0MC81NzM0NDMzODo5Ojw+P0NISUxMTlBOVVhdW1dSU1VY
-VFBQVVNPU1NUTlFWVVxcWFVTUk9RUldbVVZaVExOVFpYSEtMTVFSVFFUVlhoe4mP
-mZ6dmZqcm5ycn6OlpJ2XlJKVmJ2gpKSlpqKfnJmcnZ+joqKfm5aTkIuEg4mNkZCR
-k4+JhoGChYWLiY6Qk5OQj46Mi4yTm6Orrq+urKuopqSjpaOjoaOin52dnZ6ZmZeU
-kI2LiIWFiImLj5SZnaGhoqepqKqiq6qoqaqpqaeonZWNiYaDgX5+eXd7eXVwcHmI
-mJuUioaDgHp4f4uPkZGTlJGKfnh6dGZmcHJwcnJ1eH6CgoqNkpOQlZedn5+gnZ2d
-n6Omo6Skpqisr7Cwr7GxsrKurqqnpqmqqqunqqmqqqampKSmo6GlpaKjoqSipqqp
-qaelpKuop6ilnZibmJeVkZGTlJecoKSmqaiprK2noqCdnp6dnJ2doZ+go6Wnp6it
-rK2trqyrq6mprayppqmpqKalpKioqqurra6srq6traqqqaelp6irrbCvr7K1trOz
-r6+xr7CytLe1trOwr66trKutrq+vrrCur6+vr6+xsauSbWBbVlNRUlNPUFJQT05O
-TE1MTkxLSklMSkxOTkpJSkxPUU1PTU1PUFBPTU5OUE9QT05OTlJNS01NTFJPTE1O
-TU5KS01MTkxOT1FOTUtPUlNUT1BST1BQT09OTE9PTlBPVFRPUE5QUVFPTlBQUE9N
-TlBUVFNSUlRUU1NVU1VSVlVTUVJUUlVTVVdZWVZVWVhaWl1dWl1cWlxaXF5fYF9g
-YF9dX11dW1haWltXW1laWVhXWllaWlZYW1+WvsvT2t7i4+bn6OrqTUlDSEpPTkVJ
-Rz1CQ0dGR0dDR0dDP0A/QkBCPENESEREQ0VEP0JAPDw8Ojo+Pj8+PkE+PDo8Pj4+
-PT06PD46PTk7NzY4Ojs6ODo7OTs9Nzg6Ojo4Ojw+Ozw7Ojo6OTo8PkA+PDw8Pz08
-PTtAPj8+Pj89PUA+Ojk8PD9AQT08Ojw8Ojk3Nzw/Ozo7PT8/Pjw8QUM8Qj86Ozw+
-PT08QD9BQ0FDPTw/REBCQkNHSkZKT1BMSElKUE5MUU9MTkpISEtLT0xFR0lIS1BL
-T05QTlFUTUpJU1BSTEZAQDo8NzY0ODQ2NTIzNDc9PD08Nzs5NzUzMjMxNDQ0ODg0
-NTY3NzQ0NjY5NzY2ODU3Nzg4Ozs5Pz47PUE+QEFBQUBASEtHRkVES01PUktSTlNY
-VVVPTlBMTU1LUE9NT1JcU1VXWVhUVltaUlFaXFtRU1RVVVVUUlJVWFNUWFhZVE1S
-UFVeW1pSUUxOSUtGTERHPTk1NDQ1MjMzMTU4NDQ5QEFMXVVNTkhKVFRRVFJQT0tP
-UVRQSUlRT01RVFZZUVNVUVZPSk9PVVpUUVFSX11OUlJPV1NSTUdSUE1RTkdQUEdN
-SkxTUlxRVFRNTVBKREU/P0M9P0A8QEBDRUNPRERIRk1NRk1FTEhSWk5KRkJFQ0xQ
-Vk5STElKTEU+OzpBNzQ2O0RKU1pYU05STUhFP0A4MzY1NjU2NTM0NzY0NTU0Njc2
-OjxAPz4+RUpJT0lLTE5YW1pVU1NWXFtWT1BYV1lVT05SVltaWlddWFRUTk1RWV1U
-VFdXVFRWW1BMTlNRVVZUUVJVWWR4hoySlZqam5ycn6Gjp6imoJyamJSUl52hoqOi
-pKGfm5abnqKnoZ+ZlpWTioeHiYyTkZGRkImGhoWGh4iMjpGUk5KMiYeIi5Capauw
-ra+vra2opKGkpaSlo6KhoJ6dm5eXlJSSj42LjIiHhouSlZqdoqepqaqpq6mpqaqo
-pKSqpaWglY2HhYGAfH18d3Z1cW5wdYCPlpWUj4J9eXd+i5OSlY+OioBydG9eVWNx
-c3d2d318gYWJjJCPmJaWmJuhoaCgoaGkqKmoqaqsra6zr7Gwr7SztLOxr6uqrK6u
-qqirqqampKSmqaegnp6fn6GjpKampqimpqmnpqWmp6ShnZqXlJGRjY+Um5+kqais
-q66tqqOeoKGhoaCgnpydoKChoKSrrqupqquqp6mpqKmrpaSlqKWkp6imqqqqrK6w
-sa+uq66rp6Wkp6ipqKyvsK6vsrSysrOysrSztLOxs7S2sa6vr6ysq6mtq6uurbCv
-rayurrGwq5RuX11ZVlNTUVBTVFBNUExMTU5LS01NTEtNTUxNTk5NS01QTk1NTU9N
-T1BQTFBRT05NUE9MTE9PTE1MT1BQT05PT1BNT05OUE9PT1JQUFFSUlFUU1JRUFBR
-UVNST05MTU5RTVBOUE9SUlJTUU9QVFRRUFBSUVBQUFJVV1VQUlNUU1JRUFRTVVZW
-WFpZV1dWWFhXW1xcXl1cW1xaXV5gYF9hXmRoZl5YWFpbWlpaW1dWV1ZXWFlaWldg
-Y5W+ytPa3uLk5ufo6upOSURHRlNPRUdHRUFGRENFQ0VDQkRCQ0NDRURKRURFQ0VG
-QkJBQkc+PDs/Pzs7PUE9Ozs8PDs/QT08Ojs6Ozk7PDc7Ojo5Ojg6Ojc8OTs6Oz06
-Ojw8Ojo5PDw8Ozo4PD4/PUBAPT9APj4/Pjw8QEFCPUA8PD06Oj08QUI+PTs7PTw8
-ODo6Pj4+Pj8/Pjk+Oj0+Nz8/Pj04PDxAPT89Pz49QUE9OzxDRUBDRElIRkZHTUpJ
-S0lNU1NNSUtQTUxMSkhLSkdGP0JHTklKTFFNUE9HSEtNSkxOTk9LQT05OTc2ODU1
-Nzc6Pz08OTg4Ozo3NjQzNDI0Njc6OTk1NjY4NjU1ODg4NjUzMzU4PDs+QUJCPTxA
-P0I/PD1FSURIRkRMQ0hMS1BPRkxMTk5HS0tMUE1OTElTUktNUFVPUU9TW1hVWlpQ
-UVZdXllWVFVXVlNQUFNUUE9UV1dTT0xPVlpZWlRSUFBRVkhMR0I7Ojk2MzI2OTk2
-MjIzOTo6PEJSXFFJS0xNVFJXWFFUTU5RVFFMSU1UUVVbU1hTTlFPTE5WUE5RVlpT
-VlJXXVZMT1BUV01LSEZMUU1QTEtMSEhJSlFUVlVNTExNVFBLQjw5Ozw/Qj9CQUFD
-RExHRUxGRUVASUZJSU5ZUExHQkRBR0tRS0dKSUlLSEI8Ojs4NjVASkxTVlROTlBR
-SkJBPj02OTg1NDU2NDY3ODQ3NzY2Nzs7QEJEQUNMS0xTS1JSUFZcXFVSUVVXWVVS
-T1ZYWFlRTlBVXVxeW1tZWVZRU1VUWVxTVltbT1FQT1FRU1VYUExOVFJXYHKEjZKU
-l5qanJ6cpKemqamln5yWlpSVmJ6epKCioJ6cl5ueoqKcmZmWlpWOi4mLj5KQkY6M
-jIeHhoiGi4+QkZSSkI2MjImJkZ2lqK+xsKypqKelo6KkoaWjop6enZyemZWSkI+M
-ioeIhoWGipGam5+ipaeorKmpqamnpZB2jKKppqOZkIeDgHx9e3t4e318eHd9h5SP
-kZ2UhXt2d4CLkZOTjYh/cm1wZFldZW1wdHd6fYSGiYqMjY+QlZeZnKChqKWmo6Kl
-qayuqauur7Gwrq+usbK0tLSwrq6vraypq6mopqWkpKinpaWhnqCgoKGjpKOjpaWn
-p6ilp6qppqSgnZeWk4uIjZaanqWqqqyur66oop2foJ6fnp+gnaCfoaSkpqmqpaSl
-qKuoqKanpaShpKWkpKSnp6mtq6qsqq6uraqtraqmpKOkqq6vsLKysLK0trS0urW3
-tbO1sq+ytK+wsa+usKyrqqiqrKusqq2urKyqqq6rmHFgXFpUVlNTUU9QT05OTE5M
-S0xQUU5NTU1NUE9OTk1NS01PUE1MS01PT05NT1FQTkxPTk1NT09PTk1LTlJQUE5N
-T1JPTUxPUFBOUlJQUFFQT1FQUlNSTU5TVFVRUlBOTU9RT09UVlFQTk9SU05PVFNQ
-UlFPUFNTUlNSUlRQUVFSVFZUU1RUVlZXV1lZV1VVV1lXWFdbXV1eXl5cXV9fX1xf
-YV9bXFxbWl1cW1pXV1pZWFhYWFhaWF1omr7K0tnd4uTm5+jp6U9KSEZESEpGREFF
-SERFSkVGR0NBQUE/Q0I9PUNEQjxGREFCRERBQ0I/RERAPD5BPjg9OTs+Ojk9Ojk5
-ODo5PDw9PDo4NTY2ODk4OjczOzw5Nzo+Ojg4ODo9PDo7QEI9Pj08PD07Oz4/QD89
-QD4+OzxAPD0/PT4/Ojs9PD06OTs7Pj5BQz9APkFEQkJAPkA9PkI/PD08Oz06Pj5C
-QkA+QEZDQj9BQENGR0hHT09LSkRISUpMSUhOUFBISUxNU0pKR0NBQkVBOT1EQ0VI
-TUdQUE5NQkRCS1BQS01KQj05OTk4NTQ4ODU3OTc4Nzc1NTc4NjQ0NDU1NDc0MzIz
-MzM1NDQ1Njc1OTk4ODg5O0FDPz47OkBBQkRHREZNR0lMRkRFR05MUlNMTE5QT0hI
-SkpRTlNPTFBPTU1QVVJRTFFZVlVZXVRXWFxeXlpUWV5VUlZVV1ZUUFVXV1FNUEpS
-VldVVl5WV1FPTElBQTs9PTo2OTU0NTc0NTg0Njo8P0NXVEtKTUpRWVdbU01TTUxP
-SUpQUFFTUFpbVVROTU1KTl1RSlNVV09LUlFUVFFJUFVTUUxOS0tTUVNOSEhNS01Q
-S09QUU5ISlJQUlNKQDpCPj89PkFEQEdCR0lKTkZESUVMRURHSlNNTU9MS0dFR05I
-QkZKQ0ZJQj46NjM0NkBLUlRUUUhITU9JRUM9ODc0MzE0MzU3MzMzOjQ1MzU1Njo8
-OjtASE9HRlBSVFBPVllbVFNTVlVZWlROU1RXWVRRUVRXV1pYU1JRT01PTVBUVVNU
-VVhOS0tMTlBQTlFPTE5SUlVfcYSNkpOWmpuanJ6jpaelpqein5uZk5CRlJmgpaOg
-n52bnJ+hnZqZmpiUlJCLiouTl5ORj4uJhoKDgoaMj46QkpCKioqJiIeKlqCpra6y
-rqippaWioKGkoqKgn5+hnZqYlZORjoyIh4iGhYaIkJmenp+kpKemqaysrKWVfW6C
-lKeop56Wj4WAfnt6d3d7gIOBf3+HjYqOmZqNfXN6hYuNj5CMg3lvcnZqZGRscXB2
-eXyChouLjo2Pk5OZmZyen6GhpaWlqKenqaurp6uurrCsra+xsbCvr6yur66qqaqs
-raqopKampqWmpKCfnZ+goaKhoaKkpampqKanqaSen5ybmJWNjYeKk5yio6Wpqamt
-raqgnp6hoaCfnp6foJ6go6Wnqqmmp6yqpqmoqquqpaSopKanpqepqqipqamrrq2q
-qqusq6ilp6musLGxsbKztLS1s7W2s7Szsa+wsrKwsLCwsLGvr62tqqqoqa6trqur
-qqqrs6+VcWBeWlhTU1JSUlBRUFBOTU5MTk5OTU9NTlBPTUxNTUxQT01OUE5NTk9N
-UVBNTU1PTk5OUE5NTUxPTUtLTk9PT09MTUtMTlFRUFJRU1FRT1JNUFJRUlFOUU9Q
-UVNTU09QU05QU1NSUFJTUVBTUU9PU1JTVFVQTE9VU1NSUlVUUVJWWFVaWFZUUVVU
-WFdYWFlaWlpZWFdXWllaX19dXmBgY2BcXF1aXlxcXVxZV1lcWFdWVldWVVdYXG2g
-vsvS2N3h4+bo6OnqT1FPTEU/REQ/QTo/R0JFSEREREdEQUFDRUJDQEBCQkNDQEBA
-Q0E9QT1AQD9BREA/QT1APj48Pjw9Pjw7Ojo7Ozg3OTc5Nzg3Nzc4OTk4ODo6OTo5
-ODg6PDo7QkBAPj0+Pj08PD08PT8+PT1APj09PT0/Pz4+Pj48PkA/QDo8PT8+Pj1E
-Q0I+Pz9BPz5APD47ODtBQT49Oj88Pj09QUA+R0RCQkRFRUlISUlSUk9QR0dHRk9L
-R0hHS0VGS0xKRUZISEI9PD9APDw/Oz9BQklGSEVERkVIT1NKUFNPRDs4NjYxMzU2
-NTc2ODk5NjczNTU1NDMzNDQ3NDQ1NDEzNDQ0NjgzNTY2Nzo5OTs6P0I9QT48Oz5F
-R0xNTE5HTExHSEdJTElMTElQSU9USUZHSFFNVlJNT01ST05TVFJSVlVaW1tdWllW
-V1VYWlNXV1daW1lcX1hSVlZVUUxQTk1TWVhZXV1XUlBMSENDQz5APTc4ODc5Njk3
-Ojg1Nzg8QUhTUElKS0tWWVZPSk5SS0xOTFFZUVJWVFdUU1VPR0tOV11VVVtQV1RR
-UlNYU0hPVFBRUlFVUFdYTkpITE9RTldVUFNRVFBQUlJTU1BDOjw7OT1AQkNAQ0RI
-T0pJSEdOS05IQUVGTklISEpRSUhHTUdERk1FQEVBOjY4NzU4PENRW1VQTEpJSUxF
-Qjw3NzIwMTMyNTQ1NDI0NDM6NTc6ODg4PD9FSkdJUFBUUFZZWFdTUk5RWmFgV1FQ
-UVRXXFtWU1RRVldWUVBQUlNSVVJRUlBTUU5KTVFOS0lPVE9KTlVZWGJwgY2Ok5eY
-m5+foKOoq6qsqaainpWRjIqOlJmanqSknp2dnJyeoJyYmpeRjY2Lj5SVk5GPjIqF
-hIGBiIuMjo2OjoyLgoSFhoqTnqetsq6sqaqqp6SmpKKhoZ6foJ+cmJSWlJKRjoqI
-h4iHjI+VmZyfoqSkpqanqa2qppaMkZqkpayro52XjYiAenh1enx9fXx3eXh4dYWX
-mJyQfICIi4eFh4R9dHR3dWxpb3N2dHh9gYKJiYyOkZGTl5qZnJyboqilp6eqrKmr
-p6msrKyusa+vr7Gxr6+vrLCvrKypqKmrqaWmqquopaWmpKSenqKjoaOio6Wpqaqp
-qaqooqCenZ2Xko2Lio6Vm56jpKipqqysqqOdn5ubmpqbnZydn6KeoqKnpqenqaWm
-pquqrq2rqKiiqKinqampqauqqKqsqqmpra2tq6ipq66zsLOwsbWys7OztLS3s7Gx
-rq+xsK6vr66urLCurqqqrKqop62srKysqa+yrJZwX1lZW1ZUU1FQUFBRUlRRTE1R
-TkxOTkxOTUpMTU1PT05NTkpOTExNT1FMT1FPT09LTU5NT0xNUE9OSkhLSkpMTk1K
-TE1OTlNWUlFRUVFQUFRRUFBRT1FTUU5PTk1OTktQVVVRTlBQUlJQUFFQUlJSUlNU
-U1BPT1JTVVRSVFZXVVVXU1FRUlRTVFhWWFdZWVpZWltbXFxdXFpbXV5dXl9hYl9d
-X11bWllbXV1ZWFdYWFpWVVZYWVhfaJa9ydHX3ODj5ubo6elLTlFLSUhHRUdBPUFD
-QURISEdFRkJESkhIRD1BQEFAPUBBQEBBPj5APD5CPj4+PTo8PDw8PDk7Ojs+OTo9
-Ozs5ODg4ODg3Nzg6PDo4OTY2PTo6PDo6Ojo5Ozw5PD0/PTpGPj47PT84PDw9QD4/
-PkBGQj4/Pz07PT06Ojw8Oj5EPT5ERkFAQEE+PTs7Oj88O0A9Pzs8Q0JAPkNBPUFB
-QkJDQ0ZEQkVGSUxKS1JQUFFNTklFTkVDR0pKRkVGSkQ9QEJDRD1AQj88PEI+Ozo8
-QEE8QEVHRkZHSkVLTVJOSj03NDc3NjQzMzU8ODc2NTM2NTY2MzU4NDMwMjY2MzEy
-NTQzMzY0NDU0NTc6OTs4PT5BPz0+QENHTUtKTEpJSUtST01SS0tKTFdLTlFMTUdN
-T1BRUE9RTVhTVFRMTlJWWFVYW2BYVlJRU1NUUlVaWltaW1tgWldZV1RUTk9RUlRc
-VVNWWlJQVU1PQ0NDR0M/OTU1ODY2OTc3ODQ4ODg9P0ZOS0lPTk9VVk1KTVVOTUtN
-UVlVTlJQUFNUV1hNSkxLXFpUV1RTVlJTVVBSUEtRT05TT09UTlFOSEZGSVFXVVlS
-UVhSV1NVVlBRUUtAPDg3PkBCRj9BRkZKQ0ZFRVFOT0pCS0pQSEdFRlBMTkhQSUhF
-R0dFRUE9Ozk1NTk9Rk5cVFFPTkpIQ0NFQjw7NDQ1Sjk0NDQyMzM1MTM0NjY1NTY9
-PEJKSk5PUFRNVVdYWVRSV1ZaXVxVVFZVUlhhWVBZWFRaU05NUVdWVVJPUVVXUlBS
-S0tSVVNJSkpPUE1RV11ZXGt+iY6QlZicoJ2goqasq6ysoaKdl5CMi4+Qk5idoaGd
-m5uen6OlopyZkpCNi46RkJCPkpKOiIKFgoGDj5CPiYuLjIuMiYaHipagqq2wraup
-p6inpqajnp6gnqKfnJqWl5WUlJKNiYmLiomOlZibnJ+ioqSlp6iqqaiop6Gkpqmt
-qqqloZuSiYB8eXd4eXp7eW1hYmRqgIaKnp6Jg4aHg4B7eXBtdHdxbm9ydXZ1eoCE
-iIuKjo+OkJKSl5ydnJ6hoqWlq6mpqa2sqq2tr7GztbWysq2yrqqrq66tsK2tq6yq
-qKmnqKino6CjoqCdnaOipKWlp6Snqamnp6SgnZyZmJWRjY+Rk5mdn6WnqKmnqqyn
-n52enZyYnJyem5ycmZ6ho6Okpqippaioq6mqraiop6aopaWoqamqqqurqqqpqqyr
-rq+trK2vsLCtr7KvrrGwsbKusbaysK+tr7Kur66tra2vra+tr6+tr6+uqqutra2y
-srCsmXZhW1hWUlNTUVNTT09NUFFRTk5NTE9MTktMS0pMTk1MTUxLTE9QTk9PTk9O
-T01NTlBOTk1MS05PUFBPUUtKS01QTE9PUFFPT09SUVJST09QT1BPUk9OUE9PT09S
-T1FRTk5RUE9PT1FSUVJTUVFSUlRUVlRTVFBNUFNTVFNVVFRUVlRUU1NWVFVVVFZV
-VFdWV1pYV1hcXl5cWVpYV1heYGBfXl1fXl9gXFpbW15bWVdXWFdWV1hYVFZfkrvK
-0djd4eXl5ufq6kpJSUdCR0xFQ0VHREVDQkRESEdBQUBKUEREQ0JAQkE8PDxBREFB
-Pj1ARD9CRUFAPD05ODk5OztAPTs9PT0+PTs7NzY4Ojo5PT08PD05Ojk6PDs7Oz0+
-PDs6Oj4/QUBDQkFBPTw9QUE8PTw+QEFAQEBCQ09LPT09Ozo7Oz1APDw+PEE+PTtA
-Pz8+Pz4/QUE9OT9APTtAO0FERERCRUNDRUlJRERERUVKS0hGSkdKT0xRSkhKRkNC
-QkhCRkVMQ0E/Pz06Ozs/Qj05OTo5OTo8ODg7P0FEREI+QUpNUE9WR0A8NzY1MzQy
-NDY3Njg4ODg3NzIzMzM2NTU1NDU4NjU0NDU3ODc0Mjk8Ojk7PD48P0I+P0VFREVM
-SUZFRENGRFJPT09IS0xPU1BPS0pMT0xRT1JWVFFRXVpUVE5MTlZZVldWYF9XVFZQ
-UVFSWVhWW2JeXVxYWFpcU1RNS09TW2FVTU9WU01XVFhKRENKQz86NTY0NDQ1NDw2
-Njc4OD9BQUtOTUpQUExUVlRUTE9XTklOUVZPSlRTTlFVWFhOVFJOU05MV1RWWFFV
-VlNVUU9UVE1QT01LS1JQSUtLSlNSVFZOVlRTU09QS0tSTUQ+Pzo7O0FFQEJLREpH
-RERET0lOU0hKSUxGQ0pHTE1IR0hER0FESExLR0A5NjQ3OD9JSlZaUlRRT0xMSkhC
-Pzk5NTZPST0zMTM1NTQ1NDY1NDU2ODg6PEBCTFFQTkhPVVtYUE9QVltcWFZWWFRS
-UVZQUldWXFVSUlJVV1RRUVBaVFJUTUtMSE5PUFFRTlBRT1FXV1dhcXyFj5OVlpud
-pKCjqaSjqKyoo5+ak4+Li46Ul5qbnZ6cnKKlpqSinZmXlJCNjpGRkJSRjoyJgYGC
-g4SNkZWQjoyKjYyLioiQmKKqra6tq6upqKWlpaKhoJ6jop+cl5eWlZSSkYyOjYqI
-ipGXm5yenqClp6qrqqqpqqyusbGwrq+tq6mjnJKQkYmEfnx5d3V3dWtkXmd3e3yT
-no+FhIF7bmtlXGFtcXBucHJ0dHl8goeIjI6PkI2OjZSWmZmRk6Cio6eoqamrrq2s
-rbGusbK1srKxrrGvrbCvsrGuraipraqrqaippKapp6GenJ2coKSkpKKio6elpqWk
-o6OfnZqWlpSNi4yPlp2hpaapq62tqqabmJuZm56cnJ6gnp6eoKKko5+kqKeopqup
-qaqrqqeoqamopqqsq6qmq62qq6urrrCtra2rqqyura2vsLG0tLCtsa6ys66wsLGt
-ra2ur7Cxra+np7CvrqysrK+usK+xr6+vsa6adGFbWVdWU1RUU1NTTU1QUlNTUU9P
-TU1NTUpMS0lOTU9LTEtMS01MTU5RT1BRT01MUE5PTk5PT1BQUFBMT05NTU5OTUtN
-T1BRTU1NT1BSTk5QUlBPUFBQT05PUE5RU1NVU1BOT1BQUVBQT05PT1BQUVBRVFJT
-U1JQUlJRUlFTU1JSU1hbV1NUV1lXVVdXV1hWV1paXFxbWVtaWFhaWl1fYF5fXl1f
-XltbXFpaWVpZWVtZV1ZXWVdbWmOTvMrR19zg4+Xn6OnoS0dGQ0lBRkM9Pj1HSkdF
-QUVIREFAQEJHQ0ZBQ0BBREBBOUFBPj5AO0JCQkFCPz47Pj4+OTk9PDs5Ojo6O0E/
-QT07PTo8Ozk8PTo6Ozk5Ojs6ODY5PDw7PDo6P0BBPz5APkE/QD48PUFAQT49Pj8+
-Pz0+Qz0+Pj46Ozk7PDw+PT47Ojw6PUA8Pjw9QD5CQT49PUBAQUE/TUNDRUZEQ0lI
-TU9NS0ZGSEhGRkhKRkhLTlJQTE9JRT89QERGREI/QUA8ODg6ODg9Pjk5ODc3NjY2
-Njc3ODg6Oj4+QEVKS1BNR0Q8Pzg1NDM1NjY5NzU3Njg2NTMyMzMyMzE3NjUzNDM1
-Mjc1Njg3Nzk6Ojc5ODg7PDs8RERGREtHQUNBQkdJTU1NSUdMSFNWT1VMSEtKT1VX
-XFZWVFJYWFdYT01OVlhbWlVbWVRUVlNSU1ZUWFZaYF5YVldXWVtYVk9PT01RWVJL
-TlFRTVNVWVFKSUxCQDw5ODY1NDI0Njg4QDY5Nzo7PkdUS0pQSk9dWlJOSlBTSkhK
-UlBQU1RNTlhVV1VSVVJOT0xQVVBYV1BTUU9RTU9YUE5NSFBOTlpTUU5NVFhTUVBO
-T01WVExMUlNUS0FAPT1GQUFCQU1GS0ZIRUVNSktPS0xGSUdFTUZGSUxHRUZIQ0dP
-UElIRjo0NTY5RFROU1ZMT1FMUlBSTEZDPDg6NTk7OzYxMzI1NzM1ODU2Mzg4Ozo6
-Q0JLS05RTFJRVVRQU1BTVFVTVFNWWVVSWFJRV1ZQTlZXVFRVVFNPVlNOUU9KT05K
-Sk9UVlRQUVFRUVdYWF1peYKLk5eam5yhoqWlp6usqammoJyWko6QkZGVmpmdnZ2d
-mp2gpaOhnpqXlJCOkI+QkpCQi4yFgYGDhouOj5GTjo2MkJCNjJGZoaarrK2tqKmm
-paalpaCenp6foJ6blZOUlJKOjI6MiomKj5idnqGfoaisq6ysr6yuq62vqqqrrKun
-qqidmZWSjoqLhXx5e3p4cm5la3NvcY6ei4KDfG5nY1lWX21xdHd0c3JzeX+Ch4qM
-kI+QlZGSkpSWmpqZpKOko6Woq6iprrOwsa+vrK+zs7KxsrCvr66xsK6wrquvrqqo
-p6SmpaWmoJ6enp+hoaKjoqOipKelpKWkpaCWlpWUkouKipKXm52ipqmpsLCspJ6b
-mpqZmZmZmp2ioKCioqKjpKSnpaippaqsqailq66vramqrKmqq6urqaqqq6+vrq2v
-rKaoqa2wrKuvsrSvrrCytLWzsbCxsa6ura+vsLKurZWUrK2rq66tra+xr6+wrK+y
-rpxyYFhZV1VVUU1QUVNPT0xPTU5NSUtMTk5LS0xOTk5NS0pJS0tOTE1NTUxMT1BQ
-T1FPUFBNTU9QVFFPS0tOTVFPT1NQUFBTUk9ST09OTVFRTk5PT09QUE9QT09OTk5O
-UFBPTlBQU1RUU1NQT1JPUVFPT1FUVVBSUlFSVFNRT1NTVVRVVFdZV1VTVldWVlhW
-V1hYW1paW1tcXF1aWl5cXl1gYGBhXV9fXF1gYVxcXVtZWVhXV1RVWF5lapq9ydLX
-3eHj5efo6ulGSURGSEJAQ0U8QURGQ0NAQ0ZGTElIREJFQkdHRURGQ0FEQEFAPz9A
-QT5CPkBFPj07Pz05Oz08QT49PTw9P0E8OTg4OTg7PT5CREFAPDo2ODc6Ozc6Ozo3
-OTo6PT49QEE9QEBCQkA8O0JAPT9BQTw9QkA/PT08QD05PDxASEI+QD09PT4+QEJG
-QUJBP0BBQEI+PkA+QUJAQUVEQUNDSk1UVUxHRklLSEtHSU1NSExKS05LT0lGQj0/
-PEBBQTo7QDo4ODY5Nzc2OTc2NTUzNTY0NDY1NjQ1Nzw6PUFMS0xPTUI+NzQ1NjQz
-Njg2ODY4NzY3NjY2MjQ0NjU3NTY3ODY0NDo6ODk3Nzc6OTg7Pj0+O0E8PEBASEZE
-Q0RHRUNGSktJSE1NTVBNU01LTktTV1JZV1VYUlNRVFZOSExRU1ZbWVtZV1NUT1BQ
-U1NSVFddWlRUVldZXFdXV01LS09SUVBRU1VXVVNWUk5ITUdFQzs6NTc2NTIzNDY2
-NTM3Njg7QVJTS0tNSlFbVExISk1OR0pXVFJPUlZUW1hPT09RUU1MUE5LTUxWUlBP
-S01OUFRPSk9KTVFQWF1UU1ZYW1RKUE9ISlJTVVVZWFhNRkE+PEVAQkFARkNHSlBQ
-RkhNSU9JSURHSEJHRklFRkJDQENARExPTEdGOzYzODtBU1VTVE5LSUtMU1RLRkQ8
-OTY0MjM1NDQyMzM0ODY2MzU3Nzg4OTs/REtKUE5PUFJaU1VTTlVWV1RQUVNWWFtc
-WVRTT01OVFRTVFNRTUxMTFFVVFFUU0xMTk5TUU9PT1FSUlRWWWV3g4uSlp2bnpyg
-pKWnqKqurqqoopuWkY+Ok5OVlpuenZiXmqCioqKgmpiVkI+SkpKTkpKNiYaFgoSF
-iJCVk46OkZKPjIyPkZqhpqyvrKurp6alpKOgoZ+hoZ+gn52ampaTkZGSj4uMjo6Q
-l56hoKKmqampqqqsrKqtrq2srKetr6usqaScl5SPjYyLh4WCfnt1cGVpcGdpfouH
-gnxza2xoWVhkcHV2c3V1c3N2e4OGioyPj5CVlZORmZqcoKGipKKjp6enqqyvsbCv
-rq6prbO3uLm3s7WxsbGvr6+rrK2qqamlp6empaKhnp+hn6Ghn6Ojo6Wpq6mmpael
-oZqUlZSRjoyLj5abn6Srq6yssa+knpqbm52Zl5aZm52fnqGgoKOmpaWlpqimqaun
-qKisrq2urKmpqquqrKutrqyvrqyura6rqKeoqqyrrq2vr6+wsrW2tbKzsrOxr62t
-rrGxs7GxrqutsK2tq6yurq+xsbCuq7Cvn3ZiXlpZV1ZUVVNQUFFRT0tLTExJS05M
-TU1NTlFQTEhJTU5NTUxOT09NUE5NT05OT1BNTUtMTE1OTUxKTE5NTlJOTVBQT1FQ
-TlFST0xNUU9QUE1RUU9QUVBOUVFRUE9QUlJRUFRWU1RVVFFQUVFQT1FOT1BSU1NR
-UVNTVVJTUlJTVVRXVlhZVlRSU1ZWVldXWVtaWllZXF1dXFxcXl5gY2BgYWJgX19e
-XGFfXVtdXFxbWllaWFpaWVttorzJ0dje4OPl5ujp6UFBSEw9OT1DPENKSkJAQ0NH
-R0dKSUNBQ0ZERERGREM9P0I/P0JAQEJCQ0NEQT5AQEA+RDw+PT89QEBAOzw8Ojo+
-Ozs5OTs8PDs+QD88OTs3Mzk3OTs7OTY2Njs5PT1APz4/P0BBQT87OTo/Pzs8Oz0/
-Pz09QEI+PEBAPUBAQEA+QUI7QUFBPkVBQkRFQUE+P0FCPz8/PERDRUVBREdMUFJR
-TEdLTk9LSktNUFBOTUlOSUNMR0ZEQUFERUNAPz0+Ojg4Nzg3NTY3MzQzMzMzNjM0
-NTg0Njc3ODg4QEZHS0tKRkE4MzMzMzM3OTg7Nzc3MjIyMTMzMzQ1MjYyMjI2N0ZB
-ODk6ODc5Ojk4PD5GREU7Ojc8Pj1ERkhJREVHRERDSUlJUElMSUtVUklQUldVUVBR
-VFVOUE1UXFJQTlBVTlVcYFteWVdUU1ZTU1RTV1lYU1JZW1tZU1NTU1FRVVBRUVJX
-U1NWW1hSTkdIR0RAOTc4Njc2MzU0NTIxMTQ0NDY7RU1MSUtISE9TTEdJS01NT1FM
-TVJTW1lWWVNPT1JVUElPUUtMSVJTTkpNRklOTU9NR1BQTUtMV1JQWVVQUExNT0hJ
-UlJZW1xcWlFOQ0A7P0BCRD1CREVMUVZNS1FMUElGR0hKQUJFSkdGRkVEQD1FRk1N
-SEU/ODo5QUZOV1dTUUpOT1NUTEZHRz05ODk2MzY4ODY1NzQ2NjU0NTU1OTw8PUJF
-UExQTVBUUFdVVVFSV1ZWVlJSWVdaWFdUTkxNUFFQUVJTU1BNSkxOUVNWV1pWUE5P
-T1JUVVJTVlVSUFVXYnKCjZWZnJyenJ+jpaWipqyrq6upoJqTjIqKi5GYmpydmpqX
-nKChoJ2WlJOTkpSUk5KSlZKJhYGDhYeLkJSVkYuLjYqIh4iQmaKrr62srqqoq6qm
-o6CdnpucnaCin5uamZeTkI+OjI6QkZSan6Cjpaaoqqiqqqytrqysqq2oq6ysqqKq
-qaOdmZOQjYqJh4aBfnZ2bG9yY2R1f4CBd293fHRkZnF7fnp6c3V2dnl9h4iJio2Q
-kpKUkpKWmJyfoKGnqKanp6aorK+xr7Cvra6ur7OzsrCvt7Wvr6yrq6urqqmoqKqq
-qKWloqOgn5+goaOioaOkoKaqqaunpJ+enJyZl5KOjI6UmJmgp6qqrrCvraegmZya
-nJeVmZycnpueoJ6gn6Gko6elpaiqraqqqayqqayqqqqqqaerqKuoqKmsr66trqSn
-o6esrrCtra2vra+xsrGysrSztbCvrrKvr62xtLSzr66vsbOvr6qvrrCvsK+uta6e
-dmFdWldRT09PUVBQUVBMSUxOTUxJSkxNTU1LTUxMUExNTE1MSk1QTUxOTE5NTk9N
-TUtMTExNTU1PT1FQUE9PUk9TTU1NTU9QUk5RTktLTU9OTlFTUVNRUVFQT1BPUlNP
-U1NUUlBPU1JVVFBQT1JTU1BRUVNTUFFQT1JTU1NSUVNSUlJTVFVVVFVTU1VVVVdZ
-Wl1bXFpbXV5dXl9bXmBbXWFfYF5cXV5gXV9hX1xcXF1aW1hXVldYW26ousnS2N3g
-4+bm5+nqTUlARkQ3OT5ESUtLRkBEQkNDSEhGQ0JBRUNBQkNCQ0BFQ0VEQ0JDRENC
-QkJDQz8/QURBQjxAQDw8QkFCP0A8QTs+Pj09Ozs7Pj9APT45ODo7OTw4Ojo5Nzg6
-OTo7Pj49Pjw/P0JAQkNAPEFAPD48PD8/QkA9QD0/P0E+QENDRUNCQD49PUBDRERC
-Q0NDRkNBQ0NER0RIRkdKTEpITFBOUFBMRkdOT01NTE1OTlBUUExJRkVIR0ZHREND
-RDw7Ozk7PDs4ODg4Nz4zNDY1NjUxNTY3OTQzNTQ1Njw9QEJDSEhFRUQ7MzQyMzU2
-Njc3NzYzNTIzMzUzMzI0PDY2NTU2Njk8Ojg2ODo4Nz47Oj1BPzs6Oz1EQUA8QUE/
-QERDQERGRkZKR0lMSk9IQ0hQV1RMTk9QU1FPTltdU1JQUVRTUlhfXF5bXVZYXVtV
-VFNXWFdNT1VZWlxYU1RSUlBVUVJTVVRRVldbW1RLRkhGRUM7OzU3NDI0NDM2NTU2
-MTM1Nzc7RU1NRURNSk9TSklLUE5MTFFPVVtZXltaWVFOU1NYV1FRUUtOUFZUTUtI
-SEpKTFNPS0xISklKVVZUVUxOTkpLSkdOT1FXWFlVUEZAQz48Pz9EQT5DRUhITFFH
-TEhKQ0VGSUtBQkdFQkRFR0VCPkFFSkpERUM7OTtCSk9ZXldOSVhSVVBNRkdFPzg3
-NTQzNDE0NTY0ODU4ODU1NTg6QkNCREdOTE5PUlVVXlhUUE9XVVZRTVBUVlVUV1JR
-T1FUUlJRTlVaVVBPTE1KUFZXVVRRTk1PU1FOUFZSUlBTVVhgcYOPk5ebnJ2foaKl
-qaqoqq2srKumnZSPiImHi46UmJydnJmboZ+gnpiUk5STlZaUkY+TjYmFgIOHh4yR
-k5KRkY2Jh4WEh4+dpKqqrq+wra2qp6iopaSdnZ6dnJubl5eWkpKOkI6NiY+Wlpud
-oKSpq62rrKmoqaesrqunqampqqqon6empqCdl5KQj4mJhH99eXt1cnJhZGt4foB5
-eYF/d254gIR/eXNzdHd5e4CDiY6Pj5STk5WXmZeVmpmgqqKmpqmpqaqurq6urrCv
-rq6tr66usrSztLSxrqqorKyprKmoqa2opaKfn6GenZ6ipKOkpaanqKmloqSkpKCi
-oZqVk5GOjJKanqKlp6qrrq6ppqCcm5yam5qYnp2bnJ2gopyeo6Ogo6WoqairrbCv
-rqyrr62uraysrK6rqqmorKuuqq6uqqmmq66vr62usLCtra+ysLGvtLGxt7Sysq+w
-s7O0r7CwrKuvr7Cws7Gyrauwsa2wr6B6YVxbWlZVVVNQUlBQT01NSkxLS0pJS05S
-UE9OSExNUE1OTE9LTE5OTU1NTE5QTk9QU05RTkxMTUxOT05PTU9RUE5OUE5QUVBQ
-T01OTUxNTUtMS05OU1JRU1FRUE9QUVBPUVNTU1RRTVBQUFFPT09QU1VRUlZUVVJP
-UE9SUlRSUlRTU1ZWVVVWVVVUUlNWV1hZW1tdXl1dXF5dW1tdXWBeXmBiZGBfX19c
-XV9eXltaWl1aWVdaVldaZ5+5yNHZ3eDj5ufn6OlMQzk9Pzw6OTtAQkVGR0FFQEM/
-QUVERUNERUZDP0BBP0FAQUBERkNBQUFDQ0ZGRUVEQEFFQT5BQ0FAQUJEQD87OTpA
-Pjs7PDs9Oz47OkI9Ojo9Ojs5Ojg7Ozo8PTw9PTw8PUBCP0A9PkJCQEBAP0I+Oz9C
-Qj9BQD9APz0/P0JDQkBBQT48PkJGR0NIRkJEREVER0VIS0tLTU5NSU5PT05JUUxM
-S05STVJPTFBNT1RTU0tOTUtMS0xHQkFAPj87PD85OTs5OTw8OzY0MzU1MzIxMjUy
-NTY0MzIxNTY2OzxBQ0ZGSEg7ODc2NjQ0NDQ1NTU1MTExNDU0NDY2NjY5NzU2NDg2
-Nzc0NzY3Ojs7Ojg4ODg+Q0NEQzw7QEFAP0NERElHSU1LSUhMTENESEpRT0hMTVBQ
-UVJLVFlQT1BUUlNVV15bYF1YVFVbXVJRVVBTVVJRVFVUWVtXVldTUk5OVE9NVFFV
-U1ZVU0lIRUhIRD07Ozk6NzQyMzM2NDMzMzg1Nzw/SE5JRUdGS1NSTEhRTlZRUVld
-XldZYFdZWVRcXVddVEpSUU9RU1hQRUlJSUtIT1RJTk1NTU1TWFZSUE1OWFVQTk1R
-UlNXV1ROSU1KQTs8OT8+PURITUlOTENKRkxIR0lHR0RKQkNCQ0ZLSkhBSEZKRUVA
-Qjg3Oj9JSVBWUEtJTU9QTUlHRUNBPTY0MzU3NjM2Nzg3Njc0Nzc2Nzs4OTs8RkxK
-S0xRU1ZfWFdVU1ZVVlVRUE9NT1FRUU9QUlRWV1JRVllYUlBST1BWV1dUUUxPU1RS
-UVBPUVVWUVBUWV1sfYmSlp6doKChoaaorqytsLKvraifmJSNiYeIj5WYmp+gmpqf
-oKCem5eVlpaUk5CPkY2MhoOBgISLioyNj4+RjIeKh4WIkJ6nqaytsLCwr6umoKSk
-o5+ampubmZiWlJaUk5SPjoyMjpKYm5+ioqOorayvq6qrqKyrqKmpqaetq6urq6uo
-pqOclpOPiYF/fnh0dXd0cWZma3N5d3Bwd3d2eoGJiYV8dnRxdXd/gYSKj5GRko+Q
-lJeampuaoqKiqKuoqquurbCwsbKysrGxr6ussLCytLSxsK6vrayrq6iqqaqop6mm
-pqWjoZ6dm56dn6ChoqWmqKijpaWloqGal5WUj4+OlJyfoaaprK+trKaempqZmJiY
-mZmdoJ6dnZ6foaalo6Gio6Slp6qtrqqsrK2tr6upqqqpqKiqrKyrrK2xsLGtqKep
-rK6ysrCxrquwrrCvsLGws7S2tLO2s7W1t7KwsK+ur6+wr7CtrKutrq+ura+upYBi
-WlhXUlJPT1BQUVBNT01NTE5RUE1MUE5NTU9RT0xKSk1NT01MTk1OTk5QT0pNTUxS
-U1BOTU1KS0tQTk5OTU1QT1BPUFJRT05QUVJQUlBRT05PUE9QUVFQUVNSUFRWU1FP
-T1VUUVFRT05PUFFOTE5TUVJSU1NRUlBQU1BTUlNUUlNUU1ZWV1dWVFNUVVdZWVtb
-W1tbX15eXFxeXVxeX2JhY2FkYmNkYF9fXl1bXVlaYVpaWVtZWVlmm7nJ0dnd4OTl
-6Ojp6ktBOz05Njw9PTc+R0VHPkM9PkVGRURGRUNDREdDQD4+RUFDRkRFQ0NBQURC
-QkREQkRDQkNCQkBCRERBQT8/REJAP0JCQT9BQD5APT5APTw8OTs6Ozk7Pzo7PkBA
-Pz1APz49Pj88QUBCRkZDRUVEQj9AQ0RCQ0NBP0JCP0BAQUNEQ0tDQD5CRERHS0ZG
-RERGSElISktPUlNQTEtKUVNSUE1LTVFPTlJPVE5JT05KT05SSkpMSUhJTEpGRD9F
-Qj4/P0BBPzs5PDo/OjczNDU3ODY4MzU1MzY2NDgyNDQ4OTg7QERGSENAOzY1MjE0
-NDM1MzM2NTY1NTIxMzQ3Njg5ODQ0Mjc1NTY1NjY8PT8/Ojk4OjxAREZJQkA/QkA+
-QEBFT0xITU1ORkhNSERKS1JKS01PTU9QUU1OUkxPUVhVUFRbXVlYWllYXF1cWVpS
-VFRXU1FRU1FSV1xdWVJQVFBRUU9QSk5QUlBSSU9LTExHPUA5ODY0NjUxMTMzNTY0
-MzU1NTpAR05OSUdIT1JSTUtPV1xUX1tXVlVaWllXV1hgWVZVTVFTU1RSUVNLSkxJ
-TVBSVElKUUlKUFFXW1ZRU0tTWU9QUU9PUVRPUE5OS0pBOz86OUBGSUVJSEtORUVC
-R0pCQkFDQ0lCQT9BQUlISkJDP0JFRENDOzY5PEdOTE1JS0RIUExFR0hEQD1AOzc3
-NTQ5NzM1Njk3MjQ3ODc4Nzk6OjtESkhIRU1QWF5UUFJTWFRVWVdQSUxOUE9QVFBP
-UlZWVFdbXFhVVVZQVVpWU1VOTlBVV1NPT09XWFdQUVNZYmx6go+Vm5+foKOmpaao
-rKyyraysrKacmJSLhYeJkJOWnZ2gnp2gnpuamJWUk5KNkI6OjIuEfnyChImLjouO
-kI+Nh4WHhYiOmqarr66vr6ysrKmnqKalop2cm5uWlpeZlpaUko2LiYqPlZuenp6i
-pqqprKmrq6urrq2sq7Csp6yqq6KpqKemop+ZkIl/dXFsaWZnb29qZmdqaGdkYGBo
-cXqBi4yMhn59end7foGDh4qPkZKTkI+TlZibnJygpampqKirqamrsLS0srGzsrOx
-sLCwsrCwsba2r6yqrLGwraurq6qppaampaKjoKOenqCgoqSkpaWlpqWmpaWmo52c
-l5KOjI2Vmp+ipqutrq2po52ZmZmcmZqXmZmanaKfmpygpKOgoKClp6emp6eprq+s
-raqoqKqrqrCrra2trKqrqqysraqopqmrrKyxr6+ztrCvsK+wsLCwsrO2srO1t7Ow
-r7O0tbW4tbWwrKqorK+vr66trq6kgGRaWVlUUk9PTk5UUU9NT01ISUpRUUxNTlFO
-T09MTExOS01OTkxNUFBOS0xMSkpOT09RUlFQT05MTFFPTVBMTU5OTk5MUFNSVFJQ
-UVJSUU9TUVFRUFFRT1BSUVRSUVVTUlJQT1NSUVRSUE1NT09QUE5PT05PT1JRUFFU
-UVNZVFJSUFJUVlRVUlVWVlZYWlpZWVtbXFlbXV1gYWJgXmJgYWJhYmJhYWFjYF1e
-XlpaWVhbW1hZW1taWGibuMnS2d3g4+bo6OjpR0E6ODg5QTw9Q0JMSkVFPkE+Q0VG
-QURDRUJDRUM+QERERUBBQkVFREJEQkFCQ0VCRUVGQj1FSEBAQ0JCREJFRENBQEBE
-QUA+O0A+Pzw9QD48OkBBPEA/PUA9QD08PEE+PT0+QT4/QkRDSEZHRENBQkJDQkZG
-RUNBQkRDQ0hJSUlJSEpHR0ZGRUdOT0xLSktRTU5PTlJPVFJLSkhNT1NQTE1NTUlJ
-TU1STkdJSklGR1BMSUhJSk9MSkdBQkNDQD4+RENCQj05OTk6OTk1NDU1NjQ0NDQ2
-NTc0MzQ0Mjc5OTs+Pj9GSUI5NDc0NTQ0MzI1NjUzNDI0NDMyNDI1NzY4Njc1Nzg2
-MzU1OTo5Oz08Pjo9REJFR0Q9Ojo9PT0+OjxHR0dJS01HSU9QVlBLS0hTWlVTTU5N
-SUhISk5QV1FOVF1hWFtdWVZZWV1WVVBOUlNPUVVVVFdbXl9bT01QVVJPT1JTTk1N
-TVBSV1JWTEk/PTk3Nzc4NjM1PTgzNTQ3NDQzNzg6SlFISElNT1xWTlFTW1VRVE5O
-UlFaV1BWVlldUltUUFFRUlRRW1ZMUU9LU1NLS0xITU5TVVFVWFFRWE9RVFBRUk9R
-UVVVVFJNSkM/RD09QERNREpCR1BHSUNGQUJDPT88Qz4/P0FDR0JDPUBAPUNFQjo5
-Nzo5Q1JSUktKQ0RNSklIRkFCQT43MzY3NjU3NzU3MzQ2MTY1MzM0Nzk6PENJR0FA
-SElUVlVWT01UV1VVXFJOT1dVUVFST09RWFhUUlhYU1FTVVlaVVJRUU5QTk9RTk1O
-WVlVVE9OVFphbXh/i5Scn5+foZ+hoaaoq6qsra2sqqOelpOMhYaJjZGTnJ2bm56d
-mpmalpOSk5KPjo2JhIN/fYCFjI6OjI2TkI6Hh4WGho+co6qtrqussa+vraioqKeh
-paGcmJaYl5iWkpGQi4yIi46TnaChpKGipKqqqayorKuur66ura6vrqyuqKeoqqmm
-opuOeWlpbW1uaWVub2toY15cX2Jna3SFiouNioqGg4GAfX59goeLipGTk5SVlpWW
-mpiboKGjpaSnqainqa6wsLWusraxsrO3s7Cvr7Czs7Gsq6yvsq2rq62sraqrqail
-oqKgnp6cn6CfoaWlpaepqKenpqSloZ2WjoyLkZianaCnqqunqqejnJuam52enJ6e
-nJydnZ2gpJ+ipaWkpKinqairrauprq6sq6moq6qoqKqqrq+rrK6uramqrKypq6+w
-sa6tr7Curq+vra+sra6vsrO3t7O0sLGtsbGxsbKztK+vraqrrK6usa+0taiDZVtY
-WlVUU05PT01PT05NTU5JS09OSUtJSU1LTktJS0pLTEtMTU5OTk1LTFBQUFFPTlFR
-T1BOTE1KTE1MTE5PTk9QTE9OTk9RUlFSTU5PVFNQTk5QT09RUU9QU1VRUVFSUVFR
-T1BSUFFRUlBSUVJUTk5PUFBRVFBRU1FRVFVWVFFRU1NUU1RUWFtbWFlYWVtZW1tb
-XlpbW15hYmFhYmFfYWFgYF9jXlxfX2BeW11dWFlZVlpcWlpcbqG6ytLZ3eDj5efo
-6upGRUI9OztDR0ZER0hLSElDP0FDQ0VGRkREQkNEREM/Q0VHRkI/Q0RHQ0NHSEVB
-QURBQ0NEQEBDRkRCQkNBQUBAQUREQ0JAPDo6QEFDQjw9QD0+PkBCPz89Pj8+PUJA
-QUE/PD1AQ0VCQ0NERkVFQ0NFREVJSUdGRkZHRUhPT1FQTk9NUlBOTElITVBRT1JQ
-VFZTUlFRUVBTUExGSExNTUxLS0hISEVHSk9MSEhKSkhMT0tOSktMTk9OTkhIRUhJ
-RkVGRz89Pjo4Ozw5Njk3NzU3ODQyNTYzMDI1NTQzNTg3NzhAQD1CQDs3MzQ0MjQ0
-MzMzNDIwMjAxMzIzNjo3NTI2NTc1NTc2NjY5PDo8PDw9Ojs9RUZFQz1APTtCPkFE
-Q0ZDR0tITElPUldbWUxJSkxRU1NNU1RMSEVITEpTU1FQV1xWWFRXUk9RVldVUVBL
-TU9SVlZRTldfXl1SSU5TUFBTU1FTVFVRVVFWUk9LSEA/ODs9Nzc3NzU8RDU0Njg3
-NjU5ODtAR0pHR1BNU11ZWFZWVUxRT0hPUU1UU1RcVlhVU1tYUVRTU1hXUldVVk9O
-VElKT0pPUU5RUEtNVFBOT1FUUExTV1VXU1RVUkpGRURIQEBDRk1HSUdQTENJSEQ9
-Qj47Oz07ODk9RkNFOjpAQj9AQUNDPDs7Oz1GTVBOT0xISE1MSkZBRkRAOzg3NzM3
-NDI0NTIyMzM0NDMzNzU5ODw8Q0JCQERISE9WWVROUlZYV1ZZVFJPVlNQVFROT1ZV
-UUpOVllYVVRXWlhZWFVPUllcU1FTUkpNTVVSTk5RWFxlcoCLkZuenZ2cnZ6iqKSj
-qKyqrK2qqaObmI2GhYaKj5OYmp2bnp6empmYlZaUlJGNjI6JgX54fIKHjpCQjI2N
-ko2JhYKGkZukqq+uraytsLCtqqyoo5+fm56bmZial5OQkY+NjI6Rk5WdnqGioaSk
-qKmpq6mrq6yusKyqra6urKypqKKkpaSflYNwZWVmZW51bm1wbWpfVFhdX2Zvfpad
-kIeKj4qKhYKGhYiGiI2NkJSUlJOVl5mbmp2ipKKgpKqrq6iprKuvtLGysLSusbGx
-sbGxsrSzrautrrKwrquura2sqq2tqKOhnZ2hoaCen6CjpKKlpqioqaekpKCbmJaQ
-iYqQmZqdoaeop6qoqKGcmpubl5qanZ6cnZ+dnqCipaSipKakqKioqKmpq6qpp6ip
-qaimqqiqq6yvrqqrrqqrr66pqKmsrLCxsK2qrK2tsK+srqyrra2xsrO2tLOztLCx
-rrGys7Cws7GtraysraywsbCyrYtpXVhVVFVUTU5PTlBMTUxJTU1MTU1RTExKTk9M
-Tk5PTEpMTExNTk9PTlBOTk9QTkxNTEtMT01OUFBOTE1PTk9NTU9PTlBPT1BRUVFR
-Tk1NUlBQUlJTT1JQUlNSUlJTU1FRUFBSUlBSTlFRUFJSUk9SUE9RVFRTVFJSVFVS
-UlJUUlNVVVJUVFdYWFtZWVdXWFhZWFtaWlxeXl5hYmBhYmJhYV9jY2BfX15eXVxb
-W11dWltYWFlXWl1lm7vJ0djd4OPm5+jp6U1OQ0JFPkJISk1ISkZARUVGSkdFSEVF
-RkNDRkRESERCQUREQ0JCP0VKSUdGR0RGREZCQ0M/QUJCQ0NCREE/QkJBQ0RDQkM/
-QEBBQT5BQ0JCQ0RCQUJBQD9CQ0JBP0BDQUFAQEFFR0RBRERESEVDQ0dJTExKR0dK
-TE1OUE9SVFdUU1BRU1ZRUFJPU1BRUk9SUFFTTlFOUFJPSEpJSktLSUdKSEhFRURH
-TUpGRk1HSkpKSU9PTk1OT01JSEZKTkpIREZFQ0JCPzw8QUA+PD09Pjs6NTU3NjQ1
-NjQzMzM0NDQzNDg6PTo5Nzk3MzQzNTI1NTYzMjMwMS8yNDU5OTc5NTU3Njg4ODk3
-NjU8QD08PUA8ODlDQ0BFP0RCQD4+QUVISUNHTEhHSFFQT1RTSkhITE1OT01YV01M
-RktQUVZUVFNaYFtcWlRUUk9NT1ZXU1BQUFRUUktNWWBdV1ZUTlNSVFdWVFJUV1hY
-SUpOV1NHQ0A+PUA4OTc2NzkyMzEzOD04NTQ3OTg9S1FJTFBRWVtSTkxRU01OS1BU
-T1VdVFpbVVxWV1lVUFVZWFhTWFdVVVJVUUxOUUxMUFBMSklQUE5ST1JVTU5XV1JR
-T1JRUE1JQkRCPUZBSUZISEdERkhMR0FCPzw6Ozs6PDpBPz49QEA8O0BAQEVCQDo8
-RkdLRUxRTUZKTEhIRktNS0M8Ozk1Nzo3ODQ1NzczNDIyNTY3Njc3Oz9DQT5ETU1K
-UVdWVlNXWltYUE5RTk5PVFhYVFJUVFFNTVNYW1NVXV1bXFtYUVFRXmNZU1NSSE1Q
-T09SUFRXXF5odIGOlpmdnpucoaSqsaqpra+wr62oqaCako2Ig4OOlJOYnJuanJ6d
-mZiXlZWTkI+NjIuGf3l5foaKi4yRk5GNjYqJhIiVoKWprK6vsK6vsaysrKehnJ2h
-n5qZlpeYmpSQjpCOkZSTk5ygoKKjoaarrayurK2trq2uq6yur66vq6moqqihoZiK
-eWxjX11dY2pya25uZl1YVFdXXGR5lqaUeX6Ih4mHiomKh4mKkJGTlZWVlZOYm56e
-oaClpqamqKqsqqmssLKzs7W2sLCsra6vr7GzsbCtrbCwsK6trayvrqyuraqopaKg
-nqCfo6WjpaOkpaSkpKWmoaCjoKCblJCLi5OZnJyhpqipqKampJ6cnZydnqGgmpuZ
-m5ydn6CeoKGhpKWmpaamp6msq6mopqempqirqaevra2sra2sqqytrqylo6mura2r
-rayurqmrrq+urqyvrq2ysrWzsrO1srK1tLSxrq+uraqtrausq62ysrGkh2RcWVZU
-UlFTUVBQTlBPUFNRUU9PT05QTEtKTU9RUk1NTE5QUExOTU9NTUtMUExJSklLTk1P
-Tk5PUFFQUFFPUU9QUk9LTE9OTk9QT1BPS0xMS09QUVJSVFNRUVFRUVFUUlFRT1FR
-U1RSUVJRUlBRUlFOUU9QUlFSVFNSVVVUUlJTU1JTU1dVWFdZWFhXVVdZWlhaWl1d
-XV5eXV5eXWJgYmJjY2JjYV1eXl9cWVxeXFlaXFhZWV1fXGOPucnS2N3g4uXm6err
-UUlMSkZAQUdHTUtERElKRUdHRElHQ0NGRkVCRkVISEhERUVGSkVGQEJCQ0VDRkVF
-Q0ZFQD1BQkFCQkJBRENCQUJEQ0NDQkRHRUJEQ0FAQUdFRkdDQUNCQkNEQ0VGREdI
-RkREQkRJR0hHSEhJSklJT1JQU05OS0lPU1RUUU9TU1ZXUFFTU1RSVVRTUUtPTlBR
-VFhUT05MTUlIRUZLTEpJSEdERUlMTUdKSklNSkpNTk9MUVBSVVpdUE9MSkpKR0hH
-RUhFQkJAQUBBQj9CQkA+Ozs8OTk5ODc7NDI0NDIyNDM1NjY1NTU9PDczMTMxMTQz
-NDczMjEzMjU1NjM2NjU4Njg4ODk1NTY2ODg4OTc9Pj08Oz49P0JBQ0JFPz5GRERI
-RUpHSElJUlFGS0pHSEVKSk9TTlRWUE9KTFRQUlJWV1ddWFpVT1JTWFNNUFBTVlNS
-VVVUUlRZZmFYU1RSVldbWlZWVkxRU1RKSExWWE9IQ0A6PDc4NTYzMjQxMzM4ODc4
-MzQ3NTZDVVFLVlFPVktLT0xSUU9PU1pWUlxcVFhUVF1WVlZPU1NUWllVV1dWUlJW
-VFBST0tNTEdLTUpQT09PSktPTlFUUE1MT1VVUUxEQkZHSkNISUVFQUdEQ0FCQEE5
-Oz07Nzk7Ojo7PT4+PTk8PEBBRURCPDtPTUpHSEtJSlJNSUxOUkxLREE9ODc2NTY2
-NDQ1ODY3NzczMzQ1Nzg4PUZBPD9FRktXVFFTTVZYWldST1BTVFFUVFZTWFZXVFVS
-U1lXVl5dVVlbU1RRS05TWFdUVVJQTE5LSUxPU1ZZV15qeomVm6CdnqKgoKaqq6uu
-sLGyrq2ppJyVj4qFhIaKjpSbnaCjoZ6cm5qYlpOPjY2Ji4N+eXp8g4mMi42Nj5GN
-iYiJjZOepKqtsbCtsbGwraemo5+foqSgnpybk5GQkI+Pj46QkJGZnZ+io6Slpqqu
-rq+oqKuvr7Gxrqypq6ysqqqmo6GYkIh9eW9hYmdkYGJlaW1jX1JNUV1dYW6Hl45x
-cYGHhoeIiIuOjoyQlJKWmJmXlpiam6KioaKjp6ioqaysqa2ur7Gvr7CwsLCxr66t
-rq+zsa2tr7Ktra2qq6utq6uop6OkoaGfnqCfpKOhoaKjqKeopqSgnJucmpiTkI6Q
-lJqfoqGipairq6uimpuen56enp6em5ydmpybn6CdnZ6lp6eko6OqrqqopqilpKOl
-paeoq66wra6xr62vrrGvqqelpqusrbCts7Svra6sraqsqayur7Cvr7GztrWwsbCx
-sa+ysK2tra2sra6urrGys6qJZV1dWldUU1JPT05PUlRMTE5QUE9MTU9NUE9SUVBR
-T01NTFBQS0pLS0xKS01NSkpNS0pNUE1QT09QT05NUFJRU1NQT05OUFBQT1FTUFJR
-T05WUU5QUWFcU1FQUlBSUlNTUlFTVFJSVFRPTU5RUlFSUFFRUlJUUFNTU1JQU1NW
-VFdRT1JQUFJVVFJSVFZWWFhaWVlZWVpcWlhYW19iYmFgYGBiYWBjZWJhXltdX11b
-W1lbWVhZXV5eY5C8ytHY3OHk5efo6OlZUVFIRUlWT01KUEZGR0dHREZJSkdGRkVF
-REhGR0ZISkhFQURERkJBREVFQ0ZHRUZEREJEPz5BREZIRkFCREZCQ0dEQUZFRUdJ
-RURGREdFR0dKSUlFRURDQ0ZISEhKSlBLSklFSElPTE1NTk9OTk5TVFNTUU9PUE1V
-WFRSUE5OUU9PT1FUVFJQUVFRTE9NTU1SVk9PTkpKR0VFSEtJS05EQ0VERUhPTElK
-S1BOR0xNTk9TVFRWU05NTExLS0tMTUpMUEhHRkRFQD5BQEBBQkE+Ozw6Ozg6NTIy
-NDQ1NTAzMy4wMTY3Nzk5OTczMTc1NDg0MzM0NTY2MjMzMTYyNDY1ODc1NDI1NDQ2
-ODk6OD88PUA+PkNCQkFAPkZCQUhGRk1HRUZJSUhOTkxQTk1NS09OU1JLUE9SVE5L
-TlBVUVFUVlhUWVdTUlRXV1JKTlJYWFZWVFNSVFZfXldQUVNTVFpZV1dWUlNYU01O
-Tk9SS0NGRT1CPDs5ODc4MjQzNTU3NjI1NDM0NzxQXFRTUklNY1NQUlBYVVRWTVVW
-V1xTT1JWVFZTUlJLUFJUVlNTWVFSUlNXUU5LSkhOTUxMTFBVTUxLR0lOTlFOSEtO
-TVBKRkA/REpTSUtHR0c/R0E+Oz06OTg6PDw5ODg5OzpAPD0+PTtCQEZGQTxBQ1JL
-R0tIRUhMTktJT1VPS0dCQDo3NDI1Nzc3NzU0NjMyMzQ0NTc3NDU6QEBAPkJBRE9P
-TklLU1VVUFBVWFRVWF1XVVVXWFJRU1NRU1hcWFRNU1VXWlJPUFJYVlZXVFdVT01M
-TlFTWVpZW2N0hpSYmKGgo6Omqampqauvsa6vrKajnpmSiYOAgIWPlJebnJqeoKCb
-mZiUko+MiYaCf3p5fIOEiIqJiYmJjY2IhISIlKGqrbKwsK2vsbCuqaekoJ6hoqKe
-nJqWk5CPjY2Mi4yUlZqZoKCjpaiorqqpqKmrra2vsbCtq6mrra2sq6iln5SLhoWD
-fHNtbGheWVpfaWlhVElLTldcZXKFhGlhdIaJiouRkZGSkZGWmJqZmpqZmZ2goaKo
-p6WnpqerqqyssLOur7KysbOysrCwsLGtsLGxsa6xsq6vq6inp6mppqKgn6ChoaCf
-oaKhoaKjo6OopqenpKKdmZeXl4+OjI2Slpugo6Wpqaqmp6Kbl5eamaGgn5+enJuc
-np2bm52bm6GhnqKipqSqqqunpqempqanqKmpq62urK6ysK+vr66rpqanq6+wsLCx
-sbCusKytq6qoq62vr7CtrK2ytLW2s7GxsK6trq+vrq2urrGurrGtpYVlYV1ZWFVR
-UVBNT1BQT1BMS05OUFBSTU1NT09PTk5PUE9NTk1MSU5MTE1PS0xMSUtOTk5RT09R
-U05RUFFQT1BQUFJRUFBOTk9LT1FOUVRRUU9RTVBTXlVPT1BSUVNTVVVST1NVVFRT
-U1ZTTk1QUVJTUlNTUE9SU1JTUVZWV1hZWFhVVVZSVFNUVlVVWFhZWVlZWlpdWVtb
-XF5dYF5iYmFjY2NiYWBgX19cX2FdXF9cWltZWFZaXl1rkrvJ0dne4OLl5+jp6k9Q
-Sk5KTUtMSExKSElESExJQ0ZERERIREVFQkdCQUJESEZDQ0NDQz9CQ0JCQUZEQ0VD
-RkREQ0VHRkpGRUNEREVBRUFBQUZJSElHR0VDREhHR0xMSUpIR0lIS0xMT09PTlJQ
-T0xMUE5UVVJSUVJUU1RQUFFPUFBPT1BSTk1NTU1OTUxQUlJSTU5MTUxNT0xNTE5O
-SktLS01GREVGSkxQT0hEQkZHSU1LSkpGR0tLT1BNTE1LTVBRUE9NTk5SUE9QT1NV
-S0ZISUVBQUNGQEZHQUBAOzw9ODk5Nzk3NTU4MzExMzUzODo4Njo7OTU3NDQ3Nzc1
-NzQ4NTQ0NDM4MzE0NjUzNzMyMTY4ODk6Ojk3Pzs5Pj89QUNCQkZCQEBCR0pNS0pK
-RUhGRElLTExMT05PV1FXXFBMSk9YUEtJTVZUVVNWVVBYWVFTV1dYWExJS09PUFNT
-VldUVFdXWFJSUVFWVldXVlhXT1BRTkpISUdISElIS0k/PTs7NzU0MDQ0MzQyMzMy
-MzU1N0FWXFNOSkdUW1JRU1VZU0xLUlZeWVJKU1NNUlhSUUxNTlFOV1RXWFFRTk9Q
-S0dNS01PSkxRUE9QT1FQT09PSkZGRk1RT0xKREFDRE9FR0ZBQkBBQURCPz45Nzk1
-Ojg3Ojo3Njg6PDk6OkM/REM6Oz9DTk5LSUlLT05KRURLTlBMREA7Ozg2NjY0MzY1
-NTc0NjY4ODo2NTU6OztDREVDSENDTE9RT01QS1RWUVRYVVlaXFxcV1hWUU5RUlFQ
-UU1MTk9QUVVPUlNRU1FOT09RUlVSTU9TVlRWV1daXmp8i5een6GipKSnqqmorayt
-sK2op6OkoJiQiIJ9goWPmJycm5yen6CcmZSTjoyJhYF8eHh6e4GFiImKioqMiIWG
-hoeXpKmusbCwsLCwraupo6KfnZ2enJyZl5WTj46OjYqKjI+TlJufoqWmqKunqKqo
-qaysrKuurKqrq6qqqamrqaWdjYeIiIV9dXh1a2NhXlxkc3FZS0ZLU1tfa3V0YV9y
-hoyNjo+UlpaXmZmcmpmTlJecnp+co6anq6iqq6utrK6xtbGzs7SytLGxsbGzsrGw
-qq2tsLCvrq2prKWnqKyrqKGenqSkn6CjpKOloqGjpaGjpaakop+blpWUkYyLjJGY
-oqSnqKyrqaqpoZyenaCeoaCfnpuam5ucn6Khn52fn5+gpaSioKWpqaanqKinpqmp
-qaqtrK6tsLCzsq+uq6utraiqrK2wr66tsK+qqa2tq6yurq+wr66vr7CxsrGxsbCu
-rq+wr7Cwr7Cyr62vsrOsiWZbW1pbVlNQUFFRUE9QTktKS0tNTk1MS0pMTU1NSkxN
-TU9MTE5OTk9OTUxLTU5LTlFOTlBNTU1NTFBPUE9QUFBPT1NRT1BNT09OTE5QT09M
-T09OTVFRTlBUU1BTUlJSUVBPUVBPUFJUVVFPUlNRTk9QUVNUUU9RVFRWUlRTU1NU
-UlVUV1dUU1dXWFpZXVVXWVlWWFtbW1xfX11eXl5hY2JgYmBfYl9fYWBeX1xdXV1a
-VlZbWVpZW2eSvMnR2N3g4+Xn6erqT09PUFFSUVFSUUxGTUdER0pNRkZERUVER0VD
-P0JGSUZGRkVJSEpGQkFER0VFREVGRUZGQ0RGRklHRkVEQ0RDQEBARkNERklJR0ZH
-RUZKS0pKR0tIR0tMSklJS1BQUFJXWFZUVE9NT1FTT09QUVFRUVBRUE9OTlJNT01L
-TU1MTE1OTlBPTlBOTEtLSElNSUxISElGTUhHS0dDRUhLTkxOSkdHQkVHSEhGR0ZI
-S01PTkhJS1BQUlNQUlNSUVlVU09PVFBMSUdKRkhERUdGR0pIRkNAQT89PTo8Ojg2
-NTU1NDMzNDQ2NzY3ODo4OTY3Nzc3ODY1NTMzNTQ0NjI0NDI2NTM1NDU6OTU2OTs5
-OTg5Ozs+PT0/REVCQ0BDRkdHQ0hJREdCREdKTUxQT0hLTE5UUVBTTk5IS1JRU1FU
-WVZZV1NSUllWV1JcXFtUUE5KR0ZIUFVSU1JQVlhXUFBRUFRVVlJWW1lOSkpOTU9M
-TEtLRklGSUFDOTk4Ojk4NjIzMjE0NzU2NjUzOkldV1FRS05cWlNYVFNQSk1OTlRQ
-TFFRUVJSXFdNS0xRVVFZXVdfWVNUUU9KS0xOTkxLR0tLTFNOTFJRT1BKR01QTFBO
-UFBJRUZES0dCPz0/Q0M7Pj8+PTk5ODk0NzU2NjU3OTg7OTg5PT89Ozk7REZLUVNP
-TUlKUUpHSU9MUUpEQkBAOjg1MzU2NTU2MzU3Nzk1NjUzNTU3Oj9FRkpNSE9SVFZN
-TVFSVlBSVVZYV1pZVFZVU1ZaVlhZU1NQUFVQUEtLT1FWUktLT1dQUlJPTk1OT1JU
-V1hTU1RdZHaGlaGkpKWlp6apqKiorK2srKunoaCclpGOiYJ8foaQmp2eoZyZnZ6c
-mZWSi4WEhIB9e3p/g4mMi4uKiomIhYKGipelrLCysrO3srKuqaejoZ2dnJybnJiW
-kpKRkY2JiomNi5OUl6KkpKWpqamnqaqpq6utr66ytK2xr6upqamopZ6QhIeCf3Zz
-eXZoZGZfXF9rc2VTVVhYWV5ncXBmbXuHjo+UkZKUl5eYmZuenZiXmp2doqGjpqmq
-qqytrauusrOysra3tbS0sbK1srS0sq6trquvr6+srKysr66tq6qoqKGkpKGin6Ci
-o6OfoaOlpKOlp6WioJ6YmJGRkIuPlJmcn6Omp6ytq6qlnpudoKGenp+enZ2foKGg
-oaChoqChn5+jo6OlqKalp6aop6eoqqyrrq+vramrr62xsa+tq66qqausrrOvtLKu
-q6ywra+tra2tsLCxr62tra+vsbKxr6+xs7KvsrGvrausq66wtayIZlpaVlVWXFxT
-T0xNTlFSTEpLTk5OT05KSElNT0xMTEtMTk9NT1BPS01QUFBNT01NT1BPT1BNS05Q
-S0pMT1FQUE9OTk5LTUxPUU5MTU9SUU9QUFBMTExNTk9SUE9RT09TUlVSU1JUVVRR
-UlJQU1NSUlFQUVFQUFBQUlRSUlVTUlFTUVFTVlZXWllXVlZbWlpbWVlZW1xcXV9e
-XV9gXmBkY2FhYmFfX19fYl9eWltgYF9dW19gXFpeaJa8ydHa3eHk5+jo6epUU1BM
-SkVKTFBUUU9HRUtHSEhHRkNDQ0dFR0RCREBAQURDQkREQUVIQ0JDRkZDQUFEREhI
-R0ZHRkZFRUdHRUA9QERDRERIRkdGQ0ZJQ0VISkhFRUlNSlBNSk5OTExMUFVZWFNT
-T1BRUFFOTlNUUVBOT1BQU0tLTktJS0lJTEpLUE9KS0pNT05LRkNFRkhDRkdHR0ND
-R0hHRERIR0tLTVBMS0pITEhISEVHRkVNTEpLTVFSU1FWVlRRWVdSVllZVlVRWFRM
-S05MSEdITEpKTk5IR0VHSEA/Pz4/PDg1NjQzNDQ1NTU4Njk7PDo6OjY4Njg5MzM0
-Nzc3MzU2NzQ2NzY2NTc4ODo2Nzo5OTs9Ojk9PD08P0FJRT9BQkdDQklFREJCR0BG
-RUhMRkhMR0tMTVRNS0pMT0pLT01STEtUV1xaWlhVWVNSVFpdXVdNUE5PSktRVFBO
-TU9VWVlPTVBQUlZUU1FWUk5KS0xISVNSTEZFSENEOz47OTk6ODU0NDM0NzQ1MjUz
-MTI2PE5TUExOT1ldVFVUS05NTUtKSk1NUFJNU1dZXVJNUFFVVFZZVFZdUkxMSk1L
-SkxRSEZLSktPS05MT05SU09NUFFRUlFSUkxHRkBKR0FAQUA+Qjo6ODc5PTw3NzU3
-NzY1NDU2Nzg6OTk8PDo7OEJOTEhLVlFMS0pJSkpLTFRMSUVIQz46NzczMzc2NDY4
-NjM0NDQ3ODM2Njc4P0VDQ0tMVFBPT0pOU1VZUlFPTk9WWVtWUVFNVF1aWFVQUFNa
-U1RSS1FRUFVaUlJaV1JUVVVST1BNT1NYWFdRUVVebXuOmaCipKSlpKmoqKmsq6qt
-q6mlo6Gal5OMhYB7foaRl5udmpmcn56dmJKRjIaFf3t5en+Dh4qLi4uLi4aGhYaO
-mqevs6+0srOzsrCvq6Sjn52enZqVlJmYk5KPjo6LiouRkZWZoKelp6epqaipq6us
-qq2tsK2vsa+srauuraqpopGDgoJ9cWtwcmptZ15cZHR3aWBgam9lY2txc3eAh4uO
-lJaSlJeYmZqenJqbmp6dnqGkpKapqa2srKyqrrCzs7a4trC0s7OysrS0s7Kwr7Gy
-r7GwsbCxsa2vrqysq6qprKeioaKgoJ6gn6Gip6SnpainpKWinJqYlJCOj5KVmZ6g
-o6SrrK+sqKOioZ+goZ+bnJ+hnaOhoKSjpKOfoqWhoqWop6eopairqqinpqqtqqur
-qqqpqqytr66ura6sqKepq7OzsrCur6yrr7GysbGvrq2trq6vrqusrK+wsa6trq6u
-r7G0tK6qqairrbCvpINjXVpeWFhSU1JQTUtMT09PT0xNT05MTU5PT09NTExPUE1O
-TU9PTVBMTE1OTU1NT09QT05RT01OTlBST09NTk1NTU9OTUtOTU5OTk1OTk9QUU9O
-TE1MS05NUVNSUVBQT1NVVVNSUlNTUlBRUU9RVFZTU1NRUVVTUFBRUlNTUlJSUVBQ
-VFBRU1VVV1dWVlhZWllbW1hZWVteXV5ZWlpcXmFgYGJjYmFgYV9gXWBgW1peYGBg
-XF1cW19olL3I09ne4eTn6ejq6lZNTEtOTEhMS0tGREQ/P0RCQ0ZEQz5ESENEQkJC
-SENGQ0FDQ0JCQEJESUdFREJBQEBFR0lGRkRDQ0VEQ0VGRUE/P0VHSklHRkZJSkpI
-SEdHRUZGRk1QTUxMTExKSExPUFVWU1JST05PTk1PUFJQS0lISk5OTktKS0pJR0dG
-RUVIS0xKSEpLQ0VIRUNFSUhGQ0VEREVGR0lGQ0RESklHSklJSUhIRUNGQkRER0lN
-SExPV1ZbV1dbWlpbVVFOUFNWV1ZcXVROUVZOSkxQSkhKTElGSk1NTktGREE/Ojk1
-NDk4NTUwMzU1Njc6OT07NTM2NTIzNDQxNzk2NjQ0MTIzNzY1NzM1NDU6Ojs6PUI6
-Ozo8PT9EP0JBQkJAQz9AQUJDPzs9Q0VER0tFRUpJTEtQUk5LRktRS0pNS1BOSkxP
-V1lcW1RTUVRVWFtaVU5SUE5PUlJTUlJSU1VXUU9PUFFUV1JTVE5LUlJOT05QWVxT
-TEZMQ0Q8OTk7Ozg6ODg1MjIyMzY4MjI1NjQ4PUlST01XV1pYT01NTlNPSUdJSEhO
-VFVQUlZYW1BPT1FPVVRTUFJTUU9QT09TT05MSkxPTU1MSElMTk9PTVBPTU9TT1FS
-UU9HPkFEQkA+PUNBPTc2ODg4Nzg2MzM1MDIzMzI1Njg4ODk6Ojo9Q0tRTUlQVFVL
-R0dGSEhERURHR0dHQDs2MzYyNzU2NTMzNTYzMzE0MTI4OzxBQz9BRkxOTExMTFBO
-UVpPTk1NSk9UXFpXV1NVXFZRTk1PU1VUV1ZQUExLTlBMTVBRU1RQUlZTUk9JUFhc
-WVJMUlZkcYKTnqOhpKWnqKysqauvsK2qrKqlo5+bl5CHhH+Ag4mRl52dmpqcmpmY
-mJKOiIJ6eHV3eoSFhYmIi4uJhYSEiJKgp62vr6+vsLO0raqmpaGfm5yempidl5ia
-lo6Ni4eIio+UlJefpKanqqmoqainqqmqq6ysr66wrayrqqyvsaymmoh/f3pzdnZx
-a2hmYmRnb3JkY2ZkanB2f4GDhYuOj4+RkpWVl5iXmZmbm5ubm5+hoqSmpaanq6uq
-q6+vsrO2ure0s7e2s7Ozs7GwsbKvrrKxsbK0srGsra6srKyuq6enpKCjoqCdnqCi
-oKKhpaanpqikoaCgn5yUjo2Rk5WZnaClqqmnq62roqGgoqCgn6GfoqKioKGgpKSk
-pKempKWlpaempqOmp6amqainp6uqrKmmqqmqrbGrrq6vrquqpaarsraxrq2ur7Gz
-sLGzr66rq6ytra6tr7CwsK+ysbCwr66wsbGyra+pra2vr62lhWNdWVhWVlVTUk5R
-UE1PTkxPT0xNTUtNTExKR0hLT0tNTUxOUE9OTk5OTU1MTE5OT01OTU1MTFBOUU9O
-TlFQUVBPT09OT05PTk5QUFBRTU9RTk1OTU1LTlBPUVFOTk9QUFVVUE9PUlJSTlBR
-T1FPTlBQUlFTUVFQUVBSUlRTVFNUVFRTVE9UUlNXWVVVVlZXWFpaWFdYW1tcXF1d
-XV5cXFxdYGBgYV1eX19eX2FfW15fXlpaWFtcXWOQu8nS2t7j5ebn6OnpTkpISElI
-RklHRU1IQkBAQkJHSUdHRUBDQ0FFRD9BQUE/QkVERUNCRkRHR0RGQ0M/PkFCQ0VG
-RUZCQD0/REVERUJBQ0ZLRUNEREdJRkhGREVFREdITU5MTU5LSkpMSUtOUVNPTVBR
-T09OS0pLTlBMSEpMTE5OSUhFRkhJSEVGR0lHR0hKS0lIQURJR0VHRkJDRkRER0dF
-SkdHRUFDRUNDQEFCQj9AQENBQEdJTEhISU5WWFRWUlpfXVpUT0tKTlFUUlZaWFVT
-VE5MTlBNS0tNS0dNT1FcWU5IRkdBOTg6OTc2NjY0MjY1OTY5Nzg0MzM1MzE1Njg7
-OzU1NTU1NjQyMC8xNzY3Nzc2Nzs8Ozw4OUBAQ0E/Qj9DQT9AQkZIP0FCQ0E/OkBF
-R0VHR0hGRUdLTE1JRlBUVVNLTE5QTk9SVV5gWlNRUlFUVVVVUlNWV1JUUlRXVVFR
-T05PT1JRT1VXWFNWVlBSVlVWTVJSWFBJQ05LSUJAQDw8OTw5NjIyMzMyMzEzNDM3
-NzY0Pk9ZUVVZUlVQTFBOWFlSS0xMTE9YXlhSUVZZVk5NUFBVU1FOS1FVUFNVUlhX
-UVZMS05KTE1NTVBUT09RUE5UWllWU1JQS0Y9QUA8P0E/PDw7NzU1PDk1NDU2NjQ5
-NzY0NDY3NDQ4ODg4NDlFTlBKSk9OVE9LSEZLSUVKTktGRUk/PDs5NzUyMzU0MTMy
-NjU0NDQ6Ozg2Nzk8PUBARUdISUxJTEtQVVRSTlJNTVJYVldYWFpcU09OT05RV1ta
-VlRUU1BQTExLTFVVUE5QUVBSUEhKUFVSSkdMVl1qe4ucop6gpqanpaWmq6uur6qs
-rqikpaGakYyKhX9+g4yUmZuZm5mcmpiWk4+Mh310cnR8goaEg4WKiIiFgoSGkZ6o
-sbW2srCvrq2sqqmloJ6gnpycmZyZmJiSjoyLioyKjJOSlJ2hpqeqqKmnp6mpqaqs
-ra2vsLCurqmrrK6vqqekmo2HiYiFfHFqbGxnY2dtbl5aYGJhbYSNko2Ki4+QkpWV
-mZmYl5qbmpmcnJ2io6KkpqepqKemqaqqr66vrrKytLWys7O1tK+tr7KxsLCysK+o
-q7KysbGwrayrrKyuq6ilo6ajoKekoqWkpKOlo6anpqKhn5ucl5SQkJCTmJ2fpKWp
-qKurra2lnp+foaCjoqGeoqCfoqOlp6aop6ikpaaqqaajoqWoqKymqamtqquqpaWo
-qamrrq2sqqysrKuopamtr7KxsLCxr7GwsLCwrbCvrq6tr6+ura6urbCwrrCwrrCu
-sbKxsLGvsK6qq6WGZV9dWFZUVFNTVVNRUE1QT01MS0pKTU1NTUlLTElPT09NTk1M
-TktNTE1LTFBRTU1RTUpJSkxOT01NT1BSUEtOT0xOTU9NTUtOTVJST1JRTUxPTk9N
-UFJOUVFPUVJTUlJUVVRUUlJQUlFQT09NTE5OUk5PUVBRU1RTVFBQU1NVVFRSUVJS
-UFNUVFVZWFdYWlhYWlpYWllZW1paXVtdXl1dX2BiYWBeXV1gYF5eX19eX1tZW1pb
-W1ddZI66ytPa3uHk5ujo6+pPUE5LR0pPT0dPSk1JRURFR0VGSU1KRkVGQDxARkE+
-QEM+REZGREFFQ0RFRUVDQD09QEJARkdFRkRAPz9BQkJAQUFDQ0RBPkFERUdJRURG
-SUdFRkxJSUpKS0tITEtNSktMT05OT05NTU9LS0lNTEpISExLTE1LR0ZFSkxOTEtF
-RkdCREZER0BBQkVISEVEQkNOR0dCQ0NEQEFBPz1AQEFBPUA/PkA9QUNFR0ZJS0tK
-UFRVUVJOVVxcWVJPT01NUFRVVVVXWFFUU1RSVFFPUFRTUlFQWFpYUk1KQ0VBPzw8
-PDk2NDM0NDY2ODg6OTg1NDYyMTIyMzc2NjM0NjU4MzIxNDg2NTg4Nzg4ODc3PEA6
-PT9CQUFFRUVDQ0ZETU5EREFHRUdIQ0RCREdER0pGSElNTkpLTFFXUU1KTVhXU1ZZ
-XmFeVk9RT1RXVlhVVldYU1ZTVVNRS0tNUU9QU1ZTUlNSVVldW1dWVFVOU1FST0hD
-SEdMSEhGPkU8PTw2NTQ2MzMvNDAyMjU2PDU3RFdWUFZNUFNPUlJQU1JMUVJMTlZX
-XFdRVlNUUU5XVE9RUkxOTVRTUVlXUFNTTUxNSEdLRUlNTlFSUFFTUFRWUVJQUFBL
-RkVBPT89QEA4Nzk7Nzk3NTczNTQzMjU1NTQ0NTk6NzY4NzY4OkhPTEpNUVFUTklH
-R0tJRkpNTkZFQT46Njg3NjQzNTMyMzMyMjY0MjUyNTY3OTw8PTxEREJJTklMUFNX
-VFZUVVBNVVtZWFtdWlZUT1BPTFJaXFNVVVVVVk1NTk5QVFZUUVBPT1RST1BPTU5J
-RUhTWmh1hZCXm56hpqeop6enqaqqqausq6qnpJyXkYyJgX6CiJCTlpyampeTk5GQ
-ioaBenNwcHmChoSDg4eGhYOBgYSUn6WtsLSxr6yurrK0raalpKCbnJ2am5uWlJOO
-ioeGjZGRkpaboaSlqKisq6ipq6qqq6ytrbGysLGwraqpqqmsqaqknZmalI5+cGli
-Y2RdX1tWVl1iZmh0iJWWko2Ni4+RlJeXmJeYnJ6dn5ybnp+goaOko6impaipqq2u
-r6+sr7a2srOyuLSxsa+ysbSzsq+xr6ursa+tsa6wra6srayrp6WhoJ+ho6ShoaGh
-oqOjpaejo6KgmpaXlpWRj5SZm6ClqqeqrKurqKKem56hoqGioKKhn6GeoqKjpqam
-qKako6WnpqWlp6imqKemqqipqaWnpaiurq2prKusqampqKenp6mpq6+vr6+wrq+t
-sbKzsLGzs6+xr66uq6yvsLK0saysrrKusKyur66urK2uo4NnX11aV1VTUlNRUE9Q
-TU1QTU1MTEpKS1BNTU5PT09PTE1NS0xLTk1OUFBSUFFPT09PT05NTk5QTkxOUFBQ
-T05OTExOTEtOTk1QUVRQT1FTVlNSUU9MTk1QTlBRUFJTVFNQU1RSVVZUUFJQT09R
-UlRTU1FQUFBSVFNTVFJTUVNVVlVSU1RVV1dTVFRWV1lZWVhZWFpdXVxZXF5eXVxe
-YGFiYWBiZWRkYmJiX15dX15bWlpbWlhXWFtlkLzK09ne4+Tm6Ojq6lFPSkpNTlJR
-UE1LUktJR0pGRElGSUJHQ0ZDQ0A8Qj8+QEFBQ0RFQD49PEFFQURBQz9AREVERkJD
-QkJAREJCQT5BQ0ZEQUFDQUNESEZERUdISEtHSEhISUxMSkZISUhITElMTU5OTE5O
-TElISk9PSkZDRUdLSUhJREZHS0pLS0dEQ0BAQUNCQkNCQkRERUZEQEFCQ0E+Pj5A
-Oz89Ojw/QEBAPz49QT9EQkdKSkhMTktQUVFPS01OV1xdWlJTT0xQVVdWWFxXVFVV
-VVJOUFFQU1dTVFFRUlNVVFFNSUVFQUJBQD02MzI1NTY2ODg1NzQyMjU1MjQ1MzQ2
-NjU2NTc1MDI2ODY1MjQ3NTg7OTg+QD47OjxCPkA+OkBBREFDR0A/QEVDRE1EREJD
-REFISkhLTUxPT1JNSU9OS05RU1JVV1pbWFdXT05NVFVXWFdXWFhYWFtXUUxKTE5R
-VVVXU1JRVVdVXmBZVFJNT0pPUFZRS0RIRkdIRkhDSUREOjY1NTQzNDE0MjEyNDI1
-NDc8UFhPTU1OUFRTU1NOUFJPVU1LVFVYWVFUVk5OTVBTTk5WU1FVVFNST1ROTFBT
-U1NPTU5NS09PS09TUFZRT1VVUlJYWVBLQ0A9Oz5DOzo4NTMyNTY0Njg0NjM0NjQz
-MjM1NTg1NDMzMjQ7P0hKTU1QVFNLR0VIS0hARUtKT0pDPzk5Nzc2ODUyMDUzMzY1
-MjM3NjQ1OjY5Ojs9QkFEREdMTFBTW1tXVVNUTk9VWlpaWVlXWFhVWFFPVFNNT1VT
-TlJST05LTU5ST0tMTU5QTExLTE9OTkpLT1JZYGp+ipKamqCjpqqqqKaoqamsr62v
-q6WlnpmUj4uDgH+EjJGUmZqZmZSTk4+LhH14cXBxeISGh4iIiomHhISEi5Wfqayw
-srKsq66urayqpaWgnJqbmJeWmJeUko2MioqOkpOUlZqhpaWpqaqqpqWoqayvsK+u
-sbOytLCurautqquwrKeinpmai3Z5b2FnbnBuZFRTa4B+dYGOlpqVkYuLkJWUm5ya
-l5eWnJ+cnZyfoKKioaKjpqiqqKqqrrGvr7KzuLWyr7O1trezsbKusLOysrGzs62s
-rbCvsrSvs7Cqqquoo6Khn6OioKGioaKjpKanpKOjo6KdlpeXj46RlZefoKKlq6yo
-rKupo6Cjn5+go6Slo6Wmp6KeoKOkp6imo6GinqOlpKempqSmqaioqaWorqurqKuq
-rKywsK2sra2oqayqqqutrKmqqayxr6uxtrW1tLWzsbGxsLSwrq6xs7Wvr66tq6yr
-ra2vsLGwrq2ggWVcWlZVUlNUUlJMT09QTktRUE1NS0tKTUxOT05PTk9NS0pLUU1N
-TEtLTU5RTk1PT05NTk1OTkxNT09OS05OUVJQTE5LTU9RUlBRUVRQUVFRUU9PUE5N
-TE9RUFBQUE5QTlBRUVNQUVFRUlFST05NUE9SUVBQUlNSU1NTVFJRUVNRUlNUVVZV
-VVVTUFZaV1dYWVlaXFlaWV1cXlxdW19fYmJlYV9hYF9gXmBeYGBeX1xcXVlaWlVW
-XmSVu8nS2dzh5Obo6OrqUVRTUlNRSk5RUllSUEhGTEtBQERFRUVBQUZDRUJCQD5B
-P0FCQUNFQkFCQkJEQ0ZERkNDREVFQ0JEQkFCPz49Pj49QUFCRkJAQURFREZJRUNI
-R0dIRkdHSEhFRUhEREdJS0pKTExLS0tKSUhITVBPSUlJSEZHSEhGSElISUlGRkZF
-RkdFQj9BQUJCQEFCQUBAPT89Ozs9PD0+Ozw8PTw5PD4+PDo+QkFCREpIRUdKSk5O
-SUpGSExVX19eV1FNTVBPVllcW1hVVFZVUVBPUE1PVVRUUlBQUVVYWFFOTExFREVH
-QDw5OTQ0NjQzNTY3NjUyMzU0ODcxMDE1NjEyMjUzMzU2ODUyMzQ2Nzg6Ozk6Nzc4
-Njw/Q0FAOzw/PkNBQD5AQUJDRD5AQURKSUtKR0xLR0xMTkpHSUpNT09PT1NVWl9e
-W1pUTk1WXFpaWlpUUVVXV1hSUk5QUU9PVFpVUVJWU1FZWVNRUU1KSUxNUlFRRkpJ
-SEdDR0VGRUI4NDU0MDM2NzMzMjU5MTI0NTo/TlFJSlFNU1VRVE9SUlFNSkpNU1RZ
-WVFQUFJNSk5QTE5RUVBRT09MTk5MUVdYU1JQTVJPTlFPTFFUVFJYWFROUVZcVUtE
-QD06Oj46ODc2NTIzMzQzNDY2MjQzNTc3NTU3ODYzMzI1N0BFS05QTlFRTEhESk9O
-RkBDREZJRUNAOzo6OTU3NTIzNTI3Nzg2NzU1MzM0Nzk5ODw+Q0ZFREVHTlVYVFRS
-U1JRUFFXW1hWVFVVV1VTV1FTVU5PUlNUU05NUE9NTU1LTE5OVVZSTU1NTFBQUFFW
-U1BTX3WFjZibn6Smpaelp6amq62trq2sp6Wim5eSjYV+fICHkJWZm5qbnZiWko2E
-fHZvbHN5f4aIjYqJiISEhISPmKiurbG0srC0sbGvraimo6CdnZyYlZSXl5eSj4yL
-jZCUlpSVmp+jpqeoraqqp6anrq6wr7C0sq+wsKmrrK2vsLCtqaSfl4+HeGpraW14
-fHJqV1Vvi4x9ipOUm5iSjY+SkpWWlpiZl5aYnJ6en6CemKClpKaoqqmoqamqqa6x
-r66xs7Gur6+ytLewra+tsLGxr7CysbKtrKyvrrOxraimpqWipaKioZ+gnqGjoqSl
-pKOipaGin5qam5KNjZSYnqOko6asrK+uqqmioJ6cnJucnqCio6OjqKWjpKepqaSm
-qaWjoqWmo6Okp6Skp6aoqailqKiqq6yrrKuqra2uqaqmqKy0sq2tqqusrq2urbGy
-tLCxt7i2tbC0s7Gvra+xs66vrq2tq6ytr7Cur6+vraSCYVtbWFhTUVNPT1JRUFFQ
-UFBRTk5NS0pLTEtLT09PTk1OTU1MTU1OUUxKUE9QT0tKTE5SUlJNTExNTU1OT1BP
-UFBQT1JRUU5RUlFRUU9QT1FRUlFPTU1NTlFRUlFQUFBTUFFPT1BQUVNPUVNUUk5O
-T1NTUlRSUFNRUlJTVFBQT1JUVFFTUlJVVFVTUlZWVVdaWllaWVpaWVxdX15dXF1e
-X15iZWJjYV1aXVxgXVxdXl5cW1xeWlpYZJa7ytPZ3uHk5eno6upMSEdKTE5XVVFQ
-UE5JS0pJSERERERDPkFEQ0RFQj4+PkU+PT1CQUJCR0lBQ0JDQ0JCQ0FEREBBP0JG
-RD1CQkFCQD5BQEBCQEA+Q0JEQ0JDQkVHRkVEQ0RERERHRERESUxKTk5MS0pGR0tJ
-SUtNUE5PR0hNS0hJSEZGRkVFSURERERKRkZGPT08PkJAQT49PT88Pjs6Pz08PEA8
-PDw6OzpDPDw7PDw8QUVCQ0I9QkZGSkxJR0lITU1ZWVVZUU9SUFVYWllYVFBOUFNZ
-VFJUTk1XVFNSTlRRU1NXUk5NTExJSEhHQjs8OTkxNDY0NDQ2NzQ3NzYxMDI0MjQ1
-NTQyMzc4NjU2Njg3NjY3OTc4OD46NTc3PEFBQEM+PD49Q0JAPkBFRkNCP0FDR0pJ
-Tk5JTUlMTkxNR0lMS0tPT1RNUVldXlpeVVRVT1JTVldXWVFRUlNVWFRRUE9MRklP
-VlVTVFJUVVRRUU9RUFNTU01RUFRNUFBKTkpGTUhEQzs3NzYzNDM3NDI1NTMxMDM0
-NDdCUFBITlFOU1hSUlFTUVBKUE9SV1RWU09RVFFQTlNUUFRUTU1OTVFQU1JSUVlV
-T09QT05PS01NSlJUUVRTTU1PUFdbU0VAQD87ODU4Ojo5Ojs5NzQ2NTY4Njc2NDM0
-NzkzMjc1NjQ6P0xPU1JOVFJKR0lNTVFPSkhESUdGQTw5ODMxMC8zMjI0NjY0NTY2
-MzY0NDIzNzg4P0NISEQ/QUpUUlFPUVNXVlJQUlNUV1JWWFdSUlZXUFRTUExMUldU
-UEtOVE5PUE9SU1BTUElLT1BQTk9QUlVNR09gbX2KlJ6hn6Ghpaaop6qpq66rra2s
-qKKcmpeRioB8foWQlZucnJ6cm5iTioR7dG1tb3V+hYiKiIeHhYSEh4+eqbCsrK2v
-r7KurqyqqaWhnpycm5eZlpaUk5COiYyNjZGUk5adoainqqqrr6+sq6irra2ysbGu
-ra2sra6tr7Cxr66popyNgnxtW11qdYOFdWtiXWV1fn2NmpmWl5KLkI6UlpmXlZaX
-lpmcnZ2eoKKcoKSoqKiqq6impqmusLGysLCxtrSwsLGxsbCxsbCurrCvr7KxsbOu
-sa+srq6rqqimo6akp6ajo6KgoKCipKWjoaSipaOgm5uZko6Lj5WcoaOiqaqsrKyt
-p5+amZWVlZeZn6CfoKGhpKelpKWjpKOlp6imo6ShoJ+jo6amoaWlqKmop6amqKur
-r66trq2qq6yqrayrrq6vrK6uq6+wsbSzs7a0sLKysrGyr6urqautsLCsrqqlrrGu
-r6+tq7CtpINiXVtXVVJRUVBQT01NTU1NTE1OTktKS0hJTk9PTk5QTlBNTk1MS0tM
-TU1KSkxNTlBPUFBRT09LTEtLTU5OTU5OUFJSUVJUVFFVUVBPUVNQUU9QUVNTT05Q
-T1JTU1RVUVFRTk9UUVBPUFNVVVFTUlFTU1VTUlRTUFJSU1NUV1VUVFRVUlJTVFVT
-U1NSVFZXVlhYWVdYWVlaW1xdXlxfYGBfXl5fXmBhaGZkYl5iYF5bXF1cW11bXF5m
-mLrJ0trd4eTm5+jp6k9HRUhOSktKUU9KSEpETE1KSEVBQ0hIQ0dDRERDQ0JBQEE+
-RERIQUFDRUZBQURFQkBAPz5AP0FBQjtAQkBCQUNBP0NDQUFCPT49QUJAQUFDQkE/
-QkFBQkJBQUBBP0BGSUpMSUlKR0VCRklKSklMSktLTE1HR0pHRUM9Q0ZDQ0FAQUZC
-Pz49Ojw7Ojw7Oj07Oz08ODY5PEBAOjs+Ozs6PT06Ozk8OjhCRUBAQD07PT5ESkVH
-R0lITFBST1VWUFVTWFVVUVRVT1JSV1hWVVVSUVhUUE9MVVNSVFRTU1JPUUxIQ0FE
-RD88NTY1MjIzNjg3Njg7NjIzPT80NjQ0NDQzNDc1NDY2NTk3PDg5ODg4PDs7Nzc6
-PkE/QDw7PUFDRUJAP0RFQUE/REpHSEhOUEhKS0pLSkxLSU9MSUpLUE1NVFtdWVtd
-VlhRUFBUVFVaU1JOT1JYWVlRU05MS09UVVdTUlNVVFFSVFNSVVpVUk5MTlBRTE9K
-R0dKTkVGPTg3NTk1NDU5NDMwNTUzNDM0MjpBUVFOU1BTWFhSU1ZQTE5RVlRVVVFT
-UVJVUlJRT1lXUlJKSkxMTFBSVVZQVlhTUFNSSktJSExNTFFUU1JPVE1NV19dTEQ8
-PDk4OjczMjc4OTg5OjY1OjM1ODYzNTM1Nzc2NDU3NTdAUlhVTk1PUEtESlJQUVFU
-UUpMSUM/PDs2NDIwLjEzMjI1NDQyNjU3NTIzNDc6PEFHQ0lHREZDTVNSU1NSVlxV
-UE9UVVRRVFJQU1BWWlhUVldTTkxWU09NS0tPT0xPTlFUU0tISkhLUVBNTEdMTkhE
-T2N0f4iNlZygn6GmqKenqKiqra2trKyopqSem5SLgnx8g4mSlpuenZ6ak5CKgXpy
-bm9wdoODhYeHhYGEgoODh52nq6qurqqqr6usq6qpo52enZ6al5mXlZWQkY6MjY6P
-kJOUlpmgpaSmrbOwrKquqqurrK+vrq+vrbGvrK6xsbCwr6qkoo6BfHJnZG91fYeC
-bGlnZ3R7fo6amqGZlo+LipKcnJqZmpqZm5ycoJ+doqOipqiqra2wsK2sq66wtbWz
-r7Gxs7GvsbGur7KysrGvr7CwsLKvsbKxsa+ur66rqammpKanqKSko6GioaOhoqKf
-m56loqObmZmWkpCXlpiaoKWnp6isrKykmJSWlpOTlpicn6CfoqGhpaOlo5+goaam
-qKakpKSgoKCio6Ogo6aopaempaerq6qtraysrK6urauusLCxsbCtrq2usre2sbK1
-tLO3sq+vsLGvrqutrKqsr66urauusrKvsK6tsa+nhWZdWlpUUlBRUE9TUk9QT1JN
-SktKS0tMSkxNTk5OTExLS0xKSUpNS01NTkxOTU5NUE9NTU1NTE1MT01OUFFTUlJP
-UFFUUVFSUFBSUVFUUk5TUE9SUVBOTVJRUlVUVFBQUlJQUlJQUlVSUVNSVlVRUVRU
-UlRUTU1QUlJSVFVVVFRUU1RVU1ZVVFRVWFRVVlZVVldZWVtZWFpaXV1hX1xeX2Bi
-YGBiYF9oZmJfXmBgXltbW1xfXltaXGmdusnS2d3h5Obo6errT05EQEZNUElQTUdG
-TUhOSklERkE/QkVHQ0JDRkdEQ0FBPEBERUZDR0M9PUI/PDs+PDw9PkNCPUM9Ozw/
-Qj9AOzw9P0BCQEFDPkE/QT5AQEA/PUFAQD9AQEBBQUVDQ0ZDRkdGRkhHR0dHSk1M
-SUlKS0tKSUdISEdDP0BBQ0JDQz8+Pjw8Qjo4PDw4Ojk4ODo5ODw8Ozc4PT08OTk6
-OTs9PDk6ODo5OTo+Pj1APjo+Pj5ERUhFRUhKSlBRUlRPUVNWWlZWUlFRUVRWWVhY
-WldWWllXV1RSU1FQU1BQUFFPS0xGR0ZKREA4Njk4NDI1MzMxMTIzNjY1NjQyNTM0
-NDY6Nzg3Njg2Njc6PDg4ODs1ODo7Ozo8Ozo9PTw5PkVERUNCREQ/P0FDRlFQSEhH
-Q0ZJUE9KTU9QUlBLSU9QTk1QVFteXFpUU1VYWVhXVmBfWE9NT1RXWVdVVFFVVFhV
-UU5QUlJPTk5QUFZYXFNRSkhLT01LSUdDSEtOSEM+PTs2NjM1OTQ3NjExMDIzMzQz
-NjpEUFBQVFhVVVRTWFNNT1JXVlJSUU5PTk5TU1ZXWV5ZU01MS01KT1BSUE9OVFNL
-S05PSEpJSU9MTVNVV1dWUE9YXFpUR0E9Oz46Nz9BODg4OTc3PDo5ODY1NTc3NTU1
-NjUzNTQ1OUJRVFFJR0pQS0xRUkxNUFJNTVJIPEBAPDY0MTM0NDM1ODY1NjU0MzU1
-NzQzNjs8QERGSUtKTkpQUlNYVVVVVldSTVNYV1ZWUUxPUlJWVE5RVlRWUk5NU1RQ
-U1hTTk9TVlFQTEhJSU1PTElIREtSUldba32FgoKIkpeboKeoqqmnqKutqqurr6yo
-paGelpGKg3t7gImWm5ubnZWPioaAd3Bta253hIqFhYaFhIJ9gYeMnautrLCyrqyr
-rK+vqqumnpubmpiYmJeTk5GNi42NjZKSkJGWnaKjpquqrq2trauprKurqq6sq62u
-ra+ur6+vrK6tqqyhkHt0bWZueHh3hYhqW2BrgIWFlp6cnpyWk5GQkpefoJ2bm5ub
-nJycnp6jpaalqaurqa2tsrOwsa+wrq+1tLCztLKzsbGyr7Kys7GvsLOwsbSwsa2u
-rrCtqqqrrKejo6Wjo6OkpKKioaOgoqSjo5+gn56dmZeVl5aXmqCjp6mnqKisq6Sa
-kZSWlJWUlZabm52foKCenqChoZ6eoKOloZ+gn6Cgn5+hoJ+doqWlpqWlqaurq6qt
-rKyurqysrLCwsq+vs66srrCytbW1uLWzs7OxtLKxsKyvsK6rrq2rrq2sra+xsbCr
-ra2rrKOFZ15aV1RTUVJRUVFQUVBQTk1LTE5LTkxLS0xLTElLTU1OS0pLTEtLTFBP
-UVBYUFBNSUtNS0pMT01KTVBMTlFOUlBPUFFPT1BSUVJUU1JRUk5OT09RUlRTT09P
-UlJSUlFQVVJQUFBRUlBOT1FTUk9QT1FTVFFRUlBQT09QUlJSVFNVUVVSU1RXVlZY
-WFZXVFVXWFZbWVhZW1laWlxfXF9gX15fYmJgX15aW1tcXl5dXVpeXmFcXVpgcZy6
-yNLZ3eLl5ufp6upTUEZEQUlMSUxKQUFHQkpFR0hBPTxAP0RGQkFFRUVBREZGQUBD
-QERCQT47PDo7ODo+PUE9PEA/PDxAPjo7PD0+PDw8PkE+Ojs8QEREQD09PTo9QDxB
-Qj8+QUFGREdFRUhKR0RFRURFRkdLT09NTUlLR0ZJSEpFQUFAQEJDQj9BQzw8Ojw7
-Ojc7Ozs6Ozg5OTUzNjg4Ozw8PkA5OTs7Ojg6OTk4ODk9PDk7O0E5ODc7Ozw9QEZI
-R0tHTk9OUVBNUlJXWFpWUFFSU1dYXFhXWFlZXV9XU1ZXVFRVTk5TV1ZST0tLR0RE
-QTo6ODY1Njc0MjM1NTU3OTMzMzEzNTMzNDg1NTc3Nzg1ODo4OTo7Qj04OTs8QkI/
-QUA8QkM/QkFHSUNBQ0Q/QkJATFFJR0VFRUpUUEtNS1FUS0lHS05NTE1WWVVZV1hb
-VVlcWFlUXmhmV0xQWVxdW1tbV1lXUlFQUk9OU1NUVVBNT1RZUVBNS05KSkpGRUhR
-SkhFSEE/OTg1NTc0NDM/MzAwMjEyMzM0NDlETE1RVE9QVFdTVUxPU1NWUkxTT01T
-TVBUVllaV1pXT1JPTEtLS1FRS0tOTE9MUE5MSUlLTk1MTVRWWFlVUVFWW1hMRDk3
-Njc5Oj43Ojc5ODc2Njc1NTY6ODc2PzQzNjQxNTpAQk1TU0xHSE1NS1ZUU1FRT0hN
-VUlEQUA6OTk2NDQ3NDk2NTUzMjMyNTY0NDM2ODs8PD9ISElRTVBRV2VcVVNWU1BP
-WFlZV1VQT1RUU1JTUlVSU1RSVk5MT1JTWlZQUlNRSkZHRUdJSUhISkxIUV1iXVpj
-b3VxanGAj5aao6enqaqop6qoqaqvrqqlpqCemJGGgHx/ho+anZyYlY2Jgnp2cGxo
-aXeBhYaFg4eBgH+Bg5KgrKyysK+wr7GwrKutraegnJeXlZWWlZSTkoWHioyRkZGS
-kJKboqSlqaarrK6usLCyrqytsK6rrq2srK6tsbKuq66uqaCWiXtqZXB7eGx2el9V
-WnGJkIqXm56cm5WSkpSXm56hoZ+bmp2cn5ufo6Ckpqeqq62qqq6usrCxr66sr7O2
-tba1tbOysrSxtLOysLKxtLO1r6yqqq2vra+rq6ipp6Kfo6GgoKKhoaGfn5+hoaKf
-oJ6enZ6cmJKTk5aboKSnqKmrraqro5aSk5OQk5OVmpuan56enZ6goKCho6GioJ6f
-nZ2cm5uanqCjoqOkp6qpqqmsrKyurautrq6rrKytrbK1ubi3tra1tbCwtLS1tLGy
-sbGxrq+xsa+vsKyvrq6vra+xsLCura6ur7GxqYhlXFlXWFhUUlBSU05QUlJOT0lH
-SUxLT01KSUpNUVBOTU1MTlFNTE9PTlBNT1JQUk9TUlBPTU1QTU1OTU1OU09NTlBQ
-UU9PUFBSUlZUT1BSUlFOT09QUU5SUU9RUlJSUlNUVlZUUk9NTU9RUFBUT05PT1RT
-VlNTU1RST09OTlNUU1ZVVFJRVlZTV1VVVVZZWFdZWFlXVlZaW1laW11dWl5fXmFj
-YWNiYFxbXVtcW1peYF9dW1paWV9xl7rJ0tne4uPm6Onq6kxFQ0RESUlLSUJBSktH
-SEhFST0+P0E8PUJEQUBDQ0NCQUdIQENEQ0JEQEI8PD88OTtAQD4+PD1CQj1AOjw6
-Ojo7PDw9PD08Pzw6PT09PTs7PTs7PUBCRkNBQkNDRUdGSUZISUZGRkZGSUlOTU1N
-TUlGR0dHRkVCREQ/QkFEREBBQT07Ojo+Ojk2OTw1NDg6ODo1Njw6ODY8QDw8Ozw9
-Ozc6ODk3Ojc4OTc4ODk4ODw8Ojo+RUZKTklLT0xTVUtPTlBWVVZRVFdWWFddXFZb
-VlNVXFZQTlJQUlVWVFVXVVBRT0lFRUU+PzkxMzQ2NDU1NTY2NjQ4ODUzMC8wMzQ0
-NjczNTQ3PDY3Njc9ODo7QDo7Ojw7QkVBQT1CRkM9QEZIQEJCQ0FCQD1BSkhGTU1L
-SFJQS0pFR0pHSUpMUVBRTlRdXFVQT1JRU1JTU1NcXllQUFFYWVlWWVtYV1dVU09L
-TU1QUVFRUktKTlFMTUxPTEZKTUVGSE9KS0dHQkI7ODg2MjMzMjMyMzAwMjQ1NDU2
-NDpESk9NTlBRWVRPUUpNTkpOUE9QTVNXVFZYVVpTUlBSVFFPT05OUFFJTEtHSlBQ
-UE5NS0xQTFJSVFVWXlpWU1VYWFREPTo6OTg9OTc6PDs5Qzw4Ojc5PD87ODY5Ozcy
-MjI4QkVITlZVSkhERkdKTE1QTk5TTVJTS0xCQTs7Ojo4NjMyNDU0NDM1NTU1NjY4
-NjY2ODg+PEJHSk9LTU9XYlhVV15aUlNUU1lbT01OTlJWVVVWXVdVWlpUTExQVlZW
-WVFVWFZQTUhFSUhGRUlMTlJbZ2ZZUFJbYGBbXHKFkpadoKelpaOkpqapqaiqq6mn
-o6CclY6Bfn6Di5Sem5iVkYmCfHFra2ltdn2BhoeJhYB9foKGj6Gprq+xsq+wr6+u
-qqimpKGclJSXlpWTkpCQkI2LjJCTlZSTlpmdo6aqp6qpqqyusbCsrrCwrrGtra6v
-rrGysrGtrq6ro5yWkoVzcHlwZmdlVlZjeYqNjZidn52YlpWWlJacnqKkoJyfoJyd
-nZyeoaKkpautqqqnrK6wsbaysK+ws7Wxs7SxsbGwsq6trrGzs7CvrrKyr6+qrK2t
-rKysqqilo6GipaOkpJ+ioKCeoaGioqSjop6am5iWkJGYmJugpqiora6urqiflZCR
-kpWUk5OVmZmbnJubnaKioKKfoKKfoqCfnZyenZ6cnp6jpaeop6qsrq+vr7Gysa6r
-q6uqraurrbS2uba0s7O1srCvs7OwsLSysbGrrrCztrCvrq+xsa+usLKtq6ysrK6u
-sK+liGZZV1tXVlRRUFBOUE9QUU9NS0tMS0xOTlFOTkxMT09PTUxNTlBQT05PTUxN
-UFNQUFFUUlFRUVJOT01PUFFQUlBPT05PTVFRUlJSUFBTU1FRUk9QUlNUUE9TUFNQ
-UVJSVFZUVFJRUlNTUVFRUVFRUVNRUVFUU1FRUVBTUFJPUVVXVlZWVVRUVlZVVE9T
-WFlYV1lXVVdZV1dZWFhaW1xcW1xhYF5hZ2NgXWBdXFlaXltdXFlZWVhYV2KQu8nS
-2d7i4+bo6erqTkZMTUtMTk5RTEZJSkpHRkhJP0BGP0A9PkNDP0BLR0M+QUdDQEFD
-QkZBPj5BPEBAPj88PTw7PEBBPzs4OTo7OTw8Ozw+QD05Ozw+PEBAPTc8Pzo6OzxA
-Q0E/QUNFRkZFR0hJSUhHR0hFSUtMS0xNS0dHSEVFSUpHRUZGQ0NDQkA7Ojs6PUFA
-Pjw6OT08OTk4Mzc7Ojo6Pjs8Ozg4OTg5OT08Oj46ODYzNTY4Njg6Nzc6Ozs8QkJF
-REZMSklMSEpMTFFPTk9TWVFRVVJZV1pYUVJTVU5NTU1UWlpbV1VSTk5MTEtLSEhD
-PjczMjM0NDQxMi8yNjY4NDE0NTU1Njc3NzQ0NzU3NTU1Nzg7OTs/P0A/PDo6Pj9A
-PENBSkdDPz1AQkFFQkVFPD1CQ0NLT1FJTUxISEZGRUVKSU1QT1NPUVlbV09RUk9N
-TVFRT1ZbWFRWWlVTUlZZWFNWV1VOTU1KTFJNSlBPS0xNTUxOTE1GQUNJSEtKSkxP
-R0VDQUA6Ojk5NjI0MDExNTQ1NDY3NTY3NjpDT05PUk1RVVBPTkpITU1LR1FYUVdV
-VVhVWFVRUVRVTlFOTVNQTlBNSUlJUVZPT1FQT1dQTVBQUVVaWlZWUVJWVUc7Ojk6
-OTc3OTw9QDw8Ojs9Pzs6Ozs2NjY3MzQ1NDdKUExMUFRITEpKSUdGR0xGR01KSUlI
-SkJEQDw8ODIuMjMxMjMyNTMzNTc2NjU3Njc2ODo9QEZGSEJGS1FXUlhZXVlSVVdX
-VVRUUVJSVFhZUlNaWlZXVFBSU1JSVFNWVVtbUFBNS0hMTElOUFNOTldjYlVISlZj
-ZWBeaH+Rn6OlpaWno6WmpaSjn6KpqKmno56amIt+fX6EjJWYmZWQiH55cmxnZ2pz
-fYOGhIWEgYGChIeXpKyuqquur6qrq6uppqamoJ2VlZWUlpWSj4+PjYyRj5CTk5aX
-mZ6jqKmqqquqqa2ssLKwsrKysbCvr6+tq66vr7CurqiinZyemH9lZmteW1tXYGhw
-eYKMl5ugn5qXk5KWmJubnp+enJuZm5WcoaKlpqeora6vramur66ysba0tLe4s7Kx
-trKxsbKvra2xsba2tbGusLKxrJyprayrrKqpp6SkqKGho6KjpKOioqKioaakpKOi
-np6bl5OPkZebnqWlp62rrayqqaOQkJKSlZOTlJaYmJubm52goKGhoKChoZ+fn6Cf
-n6Genp2eoqirqKiqrK2urK6ytLazsK+wraqtrK6xsrO0tbKzsLCysbCwsrGxsbGz
-sK+xsLG0tbGysLGxsbO0sK6srq2srbCurqaHZF1YVVRTVVJQUFBSUFBPTUpNUFFR
-Tk5QTVBOS0tNUFBQT0xNS01PTE9QUE5NUEtOU1FRUlBPT09OUE9QUVBPUVBRUVBQ
-UlNRUFFPUFBSUlFSVVJTUlJWU1JTUlBSU1JVVFJRUlJRVVVSUlJRT09ST1FSUlJS
-U1NRU1RUVlNVVFNSUlRWVVRYWFVYWFRVV1hYWFdYWVxaXFpeXVxeX15eXWBiYGBj
-b2RgXl9dXFlbW1lcWl1aWVlcZZW8ydLZ3eLk5ufp6epTVE5JSE9WVk1MT01ISVBI
-SUhFQ0NBPkJESEZER1JQQTw9Qj5EQT5CRENEQkJAPkI/Pzw9PD46Oz09Ozs9PDs8
-OTo5Oz8+Ojo7PEA9PT07PDo8Pzw6PTs9PkBDRUpERkdCREZGQ0lJR0pLTU1OTk5M
-TUtJSEdHRUdIRkRGQ0NBPT09PDw7Oz44Ozo4Nzk6OTo5Nzk8ODg6OTg7OTg5OTs3
-Nzs4PDk3NDM2Njg4OTs7Njg5PT08OTo6PUdHRUVESE1ISURESE1UTUlOU1ZXWFhT
-UExRUlRQTlVYWFhUVE5NTlFPSUdIS0c/OTc2NTE1NjUyNDc1NTc1NDUzNzg4OTY2
-NzY3NTY1NTU4Ojs5Ozo5QD46PUJAPj4+PT1AQUFDQEBFQ0M/QUM9PEFDRUhSUU5T
-T0xHQkJFRUpOUExKS0pTU1FSU1RWTEpNUU1QVlJSUFZfWFFQU1hTUlNXVFBNTUpL
-UU1KUFJNTFJKTEtMTEdGRU5NUkdLTFNGQDxBQT89OzU2NzU2NTQ1NjQ1NTc2NTY5
-OTxITktJTkpNUlJTTktSUElJS1BRTlNTU1xYV1pSUVZUUlNMTVJPTE9OTkxJT1FS
-UFJQUlRMTExOVlVWVFRRVFdXSz89OTs9PEE8Ojc9Ozo5ODk/Ozw9QEA6OTk5NzU3
-OD1JS0ZOUEhHREpPTUVOTExJSEZFRUdFPzo7Ozg3NTAxMy8yMTM2NTM0MjM0Nzk5
-Nzg4PT0+PUJDQUZMVldRT1JVUE5SVlpZUlFTVFVaW1lVUVJRUFFPUlRRUExMU1Vf
-WlBLS0tKTUxMS09UVEtGS1dgW1NVY3B4dnJzfI6dpaWinqOjp6WkpKChpKWlp6mn
-op2Xjod8fH6DjJKTk46Hf3dxbGdnaHN+hIWFhYOAf4GDi5uor7CsrKytrqyqq6qs
-qaWlmpaWlZOTlpKRjYyNkZKQjY6QlZSYoqinqKmoqamqqq6wsLCwrq+wsbKsqayq
-qq2vsbCsqqSTipCQhWZWXldQVWBpbWlteY6YnZ6em5iUl5iZmZybn6KfnZydn5+k
-pqamp6mqrK2sqqyurqyztbGxs7K2tbK3tLGztLGts7Oyr7K2tbOytLGwsa+vq66u
-raqonqOmo6GgoaGioaOlpKOlo6Siop+dm5yYk5KSlJieoqWmp6qqqamnm4+MjY2R
-kZOVmJmam52foKGgoqSfm5+hoaChoZ6en6CfoKGkqaqsrq6vrKyurq+ztLKxsa2r
-p6yqrq6wsbO0s7SztLSysLG0trSwr7OysbKwtLK4tra1srOysK+wrq+wsa2rsLCx
-poNgWldXVVNUVFJQUFJQTk5PT0xLTU5MTk1MTUxNUExNTlFUUk9OTUpOTk5NT05O
-TUxPUU9QUFBOTUtPUE5RT1FQUFFTUlBTUU9PUFBPUVNRUFBTVFNUVFFUVVNUUlVW
-VVFSVVFTUVFSUVBRT1FRUE5QU1NSUVFSU1RUVlZXVFRUVFNVU1FTUlZYVVhZVVdW
-V1hZWVlaWFlcYF5eXF5eX19fXF9hXl1iZV5gYF5dXV1dXVtZXF1aWlxnm73J0tje
-4OPm5+jq61lQVEpLSU5RS0dNSktKSlJGSUpGR0NDPz09QUtNVks9PT07OT9BQEJA
-Q0E/Q0Q/QT89OTk7Pz49Ojw8PTg7PkE8Ozk6PEE+Pjw5OUE8Pzo8PTk6PT09PD0/
-QD4/QklIR0VERUhMSUVHS05PUExNTU1MSkhFRUhHRkZHQ0RCREU+PTxBPjo7ODg3
-Nzg3Ojg2OTg7Nzk9Ozg4Ozs4NTVAOTk5Ozk7Ojg5OTk4OTk6OTo6OTk4ODo5OTo9
-QUZCRERDRD9BQ0NIR0tHRUtSS0xJTk1PTUpOT09MVVxcWVNNTE1MUU1KTU5ISklF
-Ozg4NTMxMjI0MjEzMTE1MjQ1ODMyNDU2NTQ0ODY1OTU0NTo7Ozk+PT4/Q0E9PDw7
-Oj09Pjs9PUJCQ0NDRERDQURGSE1OTlFOUExKS0VFRk9XVE5KSE9RUlFTVVNRUE5T
-TlBVVVRSV2BaVVNQVlNOU1xYVVRNTExMTlJOU1JSU05KSEpPS0lHTk5VSUhLUklH
-Qj9BPD42NzQ1NTY0PDYxNTUzNDM0MzU0Nj9JTEpPS0pPVFJSUlFUTU1TUVBRU1ZV
-WVxaXVpTVFpSUFFJR0lLUVNSTExJUllQT1RUV1VMSktPUlFUVldXWVhOQDs8Pzs7
-Ojg5OEA8PDpAQ0A8QEI7PDk2NTc5NTU8O0dNREpNSkpIRUlPTFZTTExHRkJIR0RE
-Ozo3Nzk2NTQxNDM2NjQ0MjEyNTY3ODk3ODk8ODw/RERJT1NbWFBTUVNQUFRZXFtY
-VlRVVllbV1JMS05TU1FQTk5RTktOUlVQTlBOTktHR0hMTlBORz5BS11raWpyf4aK
-gHd6hJSZnZiWmZuipKWio6KioKSlqKiopJ6YjYV7e4CGjpeUjYuAeHBsZmJmcYGE
-hoSEgYKAgIaRpLC0sLGuqqyurKmnqquppKCdmpmVlJCQkZGOiIuNj5CPkZCTlp2l
-p6mqqainq6qqra2trrCwsK+wr62ura2uq6urraqlnIt4f42Ne15YWVRVaHZwam59
-k52dnJyXlpiYmZmZnJ2gnp+fo6Wjn6CjpaaorK2qrq6srK2xrq+wrrCzsrKzr7S2
-tLSyr7G0tK6usra0trKytK6vrauxr62qq6aooqKioaCenqGfop2doqOeoqKjoaCd
-mZeVj4+TmZyeoqSkqqqsraqbjouLjI+TlJSXmZqcnJueoaCgoKGhoqShn5+io6Oi
-o6Sjpaanq6qsqa2uqaqsr66vsK+tqqmsqaessbG0srW1tLa0trW0tLWzsrCxsrKx
-sq+ys7S2sLOzsrCwrrGvrrCyr62ur62igmdeX1tVV1JUUVFPUE5OUE5OS0pMTU5M
-TVBPTUxOTUtOT1JTT05NUU9OT01MTU1PTk1QUFFPUFBQUE5QUlBPTk9QUFFRUlFT
-U09OUU9OUE1PU09QUFJRUVNPT1JSVFVRUVRRUFBQT1BSUU9PT05ST09PUlFTUlJR
-UVNSU1JTU1JRU1FTUlRXVlhXVlhZWVhXW1paWVpbXl1aXF5dXGBcX2BgXF1fXF1g
-YV9dWl1cXV5dXFlcX2BbXGacvMjS2dzh5OXn6OnqUE1LTlBOTlFQSEpHTEpJUEdH
-SEZEREFAPDs8PkJAQkA/PzpCQUJDQERCQUNCPz46QT49Ozg7QkI7ODk8Ozw9Pz87
-Ozo7OTw9PTo6Ozs8PD47Pjs6Ojk6QUE/QUJDR0ZISElHSEtIRkZKTlBQTU1OTk9L
-SEhHR0lEQ0VER0RGREE9Pj9DPj8+Pjk6Ozs3NjY4PT07OTk5OTk6ODk4OTo7Ozc3
-OTY2Njo8Nzg1NDc5PDk3Njc3NjY5PkRAQj4+QD5AQUFBQENFRUNDSUhDRUdFR0ZK
-R0tNTE1QVlZYVE9PUVJTUU5RVFBHS0lEPjk6NjMxNTMzMTAzMzQzNDI0NTM0NTU0
-NTQ2NTI0NzY4OTs6Ozo8PkBCQkBAPD07PTw+Qj9BQkNCQ0VFREVFREVGTE1PU0xO
-UlRYT0ZGUV1bWFZNUFFUU09RT0xLT1dRTlFPUlBSWFlXWVZTUU5TXFxWU1BST0hL
-T09PT09WU0xISlJQT0hMS1ROTUxPS0tFQ0JAQTk7Nzc3Ojk9NjU1NDMyMzMzNTI3
-OD1ISU5QT0dNVlRTT05RVFhVT1NZVFRVXFxWVVNRUlFMTE5KTUpLU1FMTlFSWFZS
-VlZTU1JQUVBPUlddXllXVU9COjk+Ojk4Ojo5QT9APUFCQkRAQD8+ODMzNDU0ODc6
-QERDR0xJS01FSFBRTUxHSEtHRkxJR0A7Pjk4NjY5MzIzNTM7NjMzMzI3NjY0Nzc1
-Njg7Pz0/QkZLTVZYVFJTWFNQUltbWlpZU1BRVVdVT0xPTlBUUUxOUlBQTktOTk1M
-Tk5JR0tISUhOT0lFPTpCUmFpcXmDioyAbmdrdn+BhIOIkJmeoaWmpqWgoaKipKSk
-nJiTioJ8fISJjpOUjIB2bmlnY2Jyf4WEhIN/f4B/hZWlrq2wrq60ra+qp6inqqim
-oZ2Yl5KRj5OSk4+NioiLjpCTkpSZnqOmpKuqqKqsrK2pqKyur7Kws7CxrK2trKqq
-ra2qpqGPdmlyi5WLbl9cXmF6iYB2eIOVm5yZlpWSkZeWmJmZnJ2cn5+ipKOkoqOl
-qamoqamwsq+wsrSzrbCxsrOwr6+vsLGxsrKytLWxtbOztLKysrSvrq6wsaqqqqqo
-qKmmpKKin6Ginp+foJ+ioqGhoKGfop+cmJWUlpSanqKhpqaoq6ysppiIhoqNk5SS
-k5SZmZqdnp2fn56fo6ajoaKdoqGjpaOkpqepq6yrra2tq6yqqq2xrbCur6+qrK2u
-qKmvs7KxsrGvrq2wsLK1q6yxtLSysrW2sbGxsrO0tbayrrCxsK6traywsrCtraOC
-ZV1aXVhVVFNRT01QUU5PT09PT05OTU5QT05OTk5LTU1MTU5RTE5OTExJTU1OTVFQ
-Tk9PTlBPTlNRT0tSUlBSUU5OUE9QTU1OUFBRU1NSUFBSUE9RU1JRT1BQUVBSUVNU
-U1FSUFJRUVJSUFFSUVJOTVBQT05SUlRSUVFRUVNRUVNUVFVUVlhVVFNXWVZXV1hY
-W15cW1taXlpaWFxcXF1gXl9fXV1eX2BfX11bWVpcXV5ZWVhZW11dZZW6ydHZ3eLk
-5ujn6+pOTVJPSU9SVk9KQURHR0ZJSEhLU0JARUI/PkZAQUVCREQ/RUJEQ0Y+Oz5B
-Qj88Pz48Ojw4Oj48PDs6Ozs4Njw+Ojs4Ojw7Pjs7PTs5Njc6P0A7ODs+PDw9PT1A
-Q0NDRkZISUlHS0lJTE1OUVFOTVFSUlFKSkpKSkZIRkNEQ0NCPz0+PT8+P0I7PT8+
-ODo9OTg9Nzs4OTg4NDczNTc2Njc7Ozs6Nj43NTg7ODY0Ojc3ODo5NzU4OTk6Ozo9
-Ozo7Oj0/PEBBQUNCR0FBRENHSUxIRElISE5OTUpNU1dVUlJPTlFSU1ZXUUhGSkdB
-Pjk3NTQzMDMyMjQ1NTU1NjU1NjQ1NTU2NTc2NDMyODk2Nzg7PD8/QkZAPT0+PTo6
-PD0/P0NBQ0RCSERBRUtMRkZJS05OSEhJUFhSSkVPW1dVWVFVV1dTTlJUUk5OUlBO
-UFBPTVBUWVlWV1NVWlpZV1dUU1FPTlBTUlRQUVNST0tMVltUTlRRWlRUT1BITk1D
-QD47Ozo3PDg1NjkyMjQ2Njg6NDQ0NjY1ODxJVFRUTkxVV1FNTVFXVlZNVFpUUVNV
-WVpYVFJNUU9MU1BNSEpNT1BPUFFVVldWXVdUV1VPT05SWl1gWVdVU0s7OUE7PDg5
-PTlFREJBQ0xGQj5APDw5ODQ1Mjg6Pjs6Q0lMTEZHSklITEpMT0dHTkVCR0VFQz9B
-PDo1NDU2NTE2NTUzMjMzMzMzMjQ0NTc3NjxDPUNCRklKUlFVV1tdU1JTVVhaWlVP
-Tk9OUFFPT1BRVVVWVFZRTk9PTk1LSUxMTEpMSUtDSExJSURAPD0/SFJZZG53e3Rh
-VlNdZ3J0dnp/ipigp6ipqaajoKKjo6OioJuUjYB8f4mTl5WOg3dtZWRkZHB/hYWB
-f39+gIGGmKasrq+uq66uq6imqamqqqiinJeYlpeXk5GQkZCLiY+SkpOWl52foKOl
-qKioqq2rqqmnrK+ztLOwsrCuraunqqyurqKhoZR+bnuNl5J/cnJrcIaVkIJ+ipmg
-oJuXlY+TlJmYl5qWlpeeoJ+go6moqa2srq+vtrWzt7O2tLW1sbK1srGwrqywsK+y
-tLa2trOxsrG0trOwrrGzsrKxrq6tqKSoq6emoqSlpaSioKGloqGhoKCfoqKgnp2c
-l5GTmJqbo6Wkp6qqrKujlYaDiIuQlJSWlpiXm5meoKCeoaCipKChoaSio6Wko6an
-rK2trq+wrqmoqamrqq2ura+ysrCwsK2sqaywsrKzsbCvsK+xsrSxsLK1s7OwrrSy
-tLGytLi1srGusbOxra+trq6xr62nnYBkXFtXVFdWVFJRUFNRT05NT09OTk1LTU5N
-TVFTYE5NTU1LTE1NTU1NT1JRTU1NTk1OUFFUUlBQU1JPUFFOTk9QUFFST09RTk9Q
-U1RTUVNSUVNSUlBRT05QT01SUVJTUFFSUlJRUFJQT1NUVlNQUlNVUk9RUVJQUVNT
-UFBUUVJUVFZWVlVWVlVUUlNYVlNVVldXWFlbXFtdXVtcXF5eYGBeYF5fXGFfY2Bf
-Y19cWlpcXFxaWlZYWF5nlrnJ0tnc4OTl5+jq6lBPTk1SSlBOSUdERExJSUZKTUlJ
-R0RERERDQ0BCRT87Pjw8QEZEQ0Q/Pjw7QUM+PTw7Njc6PDg8PTo+PDs6Oj08OTk7
-Ojw7OTo6Ojw9Pjw9Pzs7Pjw+PDw9PD0/Q0RGRUZHSUZGSUlMUFBSUk9NUFFRU09M
-SklISEZGSEdJRkREQERBQD0+Pzw8QD89PTo4ODc4ODczNjc1ODc5OTg4NjQ1NTg3
-NzYzMjY2NTY4ODY4Nzk6Nzk5Njk7Ozg7Ojo7Oj46Ozo8QkRFQDw9QEdJSEdGSUxJ
-UUtPUk1QVV5aWEtMS09YW1pRS0VHTUZAPjw3NjQ0MTIzMzM3NDU0NDY0NDU0NTY4
-NjQ0NjY4NjY4Ozo6OTk9PT1DPjw7PEBBPTo9QkI+QkJDQ0JBSEpHRUlMTEpHSU1O
-Tk5JSUlRUlZXUlVWVlVTUVRWUkpOUFJWW1ZQUVJUU01RUVRZXVRSVFRTUk1QVlhV
-UFJQTU5SUVRWVldRUlNcWVZOUktOUUpFQ0c+Pjo9OjY0MzIzNzM0NDMyNDU4NTY0
-OEBQVFFQUlRbVExMTFNWVFVXWlhRUlNOVlRUVVBIUFVVVlBJSkxNVVVPTlFQVllV
-WFlZX1VTVVRYWl1eXFROTkQ8PDtAOzo8PkVEQkBBSkdEQEhBQDo5Nzk2NThCSEJH
-TU1ORURGSEhKTkpPS01QRUJFSkVAQD06PDs7ODo5OTg0MzI3MzIwMjM1NDY1Njg3
-OUFAR0hFSUtQTlVUW11STlBUVVVTT05QUk1LUlJRUFNUWVldWFRRU1FTTklIS1BQ
-TktKSUlIRktOSEE/Ozs7PEFHVGNtbWhfXGFrfISIhIOJlaGmqKaloqKfoKKhoaSh
-n5qSiYV+gIqSlJKIe21lYWFjbXyDhYF+e3x9hI2bp62vrq2ssK6uqqmrqKeno6Ge
-mpuYlpSTk5OUkI6SkJKUk5KXmpudoKSnpKWpra2uq62sr7K1s7Ctrq2pqqyur6+v
-p6ejmo2Af4aTlIeEe3B0jJiShoCHlZyenZWSkJGUlJaXmJWYmJecoaGkqamoqait
-sbOyr7GwsbG0trS1sLCwtLewra+xsrO1urizsbKysrO4trGxsrazs7Wwrq6qqKqo
-p6aopaaipaSjoaOkoqOjoKGhnpqYmJiXlJaXmJyfpaaoqaqrrKeXhYWJj5WXlpma
-npycn5+dnaCfoaKhoKGioJ+ho6KipKeora6wsK2sq6uqrKurq6issrGvsbKzs7Ku
-r6+wsbGxs7Cxsbi2tbKxs7SztLOxsK+zsa+1t7Ovr7Gyr6+usq2trq2urqyggGVe
-XFlWU1RPT1BRUVBQTU5QTk1MTE1NTE5RT1dcUE5QUE1OT1FQTk5OTk9OTUxNT1FO
-U1NPUlFPTlJSUE9PT0tOT09SUFBQU1NQUE9PU1NVU1NUU1NTUE9QUVNSUFBSU1RT
-U1JRUVFOUFRSUFFSU1FOUVZUT1BSVFRUU09SUlVTVlZVWFZVVVdXU1VXV1NWV1dX
-V1hcXF1dX2BdX19dX1xeYWJlXl5fYF5cXl5aWlpaXFxaV1ZVXGqcuMnQ2N7g4+Xn
-6OnrUFNQSURJSkdKSUtQTUNGSUZIRkc/RkVCQEJBQUJCPT0/PkJBQD9BPkNAQEA9
-Oz0+Pz48Ozw7PDtAQj47OTs6Ozo6OTs8ODo6Nzc6Ozs8PD4+Ojs8Ozw8PDw/Pj4+
-QEVDRURGREFFR0hLUE5TUUtMTk9RUk9NTUxGQ0dISk5DRUNDQkBDQUNCPz0+PDw7
-ODg3Nzg2Nzg3Nzo3Ozg4Nzo2NjU3ODg4ODc0Njg5MzQ0Nzg4Ozs9PDo3NDg4Ojc4
-OTo8PTk4PDc5QEZDPz8+QUhESENGSkRKS05QS0xPWFlcV1NRUVJZXldSUUxFRkNB
-Pjs3MzMzMjAyNDU3OTI0NzgzNDU1NDM0ODc3Nzo4O0A9ODs8Pjs7Oz4+PDo+Qj8+
-O0BERENDQkI+PUFFSklHSElLSUVHTVNOSEdFSE1KTFFRUkxRU1BPT1VTTE1QVVdW
-Vk5PVVhVUk9RUVFVUFBYXlpXT01TWFNOTlBQU1VSUVJTVU5IS1NUWVFTVVlXTUtG
-SkNIPj45ODUyMjI6NTMzMzQ0NjM0MzI0OEdRTk9TV1pXUE1MS1RUUlBRUlBQUk5O
-VlVUUU9NVFpSUVFPUFJSU1RPT1BQV1dXWFhaVU5QUVdbWlpWU1BORT0/OTs9OURD
-REdAPz9KR0dGSEZBOjU3OTc3OENNSUpUWVNKSUZGR0lSUFJRTlJJSE1JQ0JDPTo7
-Ojc3ODg3NTM3OjU1MzU0MzQyMzY3Nzw8PUFHRkZESVJRVFlYXVpXVVlST1NRUVBS
-UE5OU1FUU1NVUVZTVk1NUVNOTEpLSUhOUE1PS0pJSUpEQzw4PTk6PD9FU2d6fnh1
-d4WPlJeZjoeMmKOrqaWjn6OjoKCgoqGgnJWVmoJ+iI2Sko2BcWFcXGJreoGCg4SB
-e3uAj5+mrq6vra+vra2qqqqopaanp6KcmZaVlpeUj5KNjY6QkZKTlZWam6Cjp6Wj
-pamqrK2urq6wtLOwrq2sq6qtrrCwr62no6OgnY+CfYuVkYZ9dHOHlo6FgoqTmZ2a
-mJCPkJOTlJKSk5iXl5yfpaSnp6ekpayvr7SzsLCzsrWytbSvtbS2tLOurLGytLW2
-srSysLCwsbOztbSxsrGvsa2urq6pqKekp6enpaOkpqWgoKOkoaGgoaCeoZqWk5OS
-k5eYn6Khpaanp6mpp5aDgomPlpeYmZSWmJufn5+fn5+hpKOjo6OjoaGjpqWlqKqr
-r66rra6xr62urKesrKutrbCzs7CvrK6xs7KxsrWxsLCysbG0tbWysLCxtrWysLOy
-trS0srGusLCvsq+urqyqqaysrKCBY1xcXFlVU1BPUFJQT1FQT1BQUVBOTk5OTU1P
-T0pMUE1PUE5QTk5PT1BPT1JQTklKS0xOTk9QUE9PTk5RUFBSUVBNT1FPUVFQUVJR
-VFZUVFFRU1NST1BTUVJQUVJQUlRTVFNQUVBQUlNSUlRTUFFRT09QT09RUVBQUFFQ
-UlRTVFJQT1FTVVZVVlZWVFZWWFtZWFpbWVlbXFpdXl5gYGBhYV9fX2RiYl5dXF5g
-XF5dWllXV1tZW1xebZ23ydLY3eDj5efo6etPVUtNTE1KRURKSUVERk9GSUdFSUhD
-QEBAPT9DRUE8QkFAQEJBQEI9Oz5BQUA5PDw7PUNBPj09PkA+PDxAPTo6Oz08OTo7
-OTs8ODk8OTk5Nzo8PDw7Ozs7Oz1AQENEQkBBPj5DR0ZKS0hJTE9PSk5VUVFOT05Q
-TkhJSk1LTDtBREFDQz5BREA9Pj08OTo6Ojw4Nzo4ODk3Nzo3OTk3NjU0NDU3NDY3
-NjczNTU2NzU2Njc4OTk4Nzs7OTk4OTU5Ozs7Njk8Ojo8QUE9Pj49Qj1BQ0FAREdK
-TE9LT1JTV1pWUlFUUldYWlpTT0lGRkA8Pzg1Njk3NzU4Njg1MzMyNjY2MzIzMzY2
-ODY3ODg6Ojk6Ozs7PD8/QD08Ozw9PD4/QUVERUE/REBBQkVOSExLSU5HSUtMTUlG
-R0NMUk1KSUlMT09PTEpHT1FUUU9TVVdZU09RT0pNTUxQUVNSUVReXFdWUk9UVFBL
-S01QUktLUU5RTVBTUlJRVFpaYFtTTERGR0hAPTs5OTc2NDQ2NDI0NjUzMTI2ODQ3
-PUhRTlRRUlFSTlRRTFFNTk9OT05RVFBNVVhZWVhQVVVRVVhQT09OVFhVWFlVVFJR
-UlBPT0xQUVdYWFJMT05JPT47PDw8SElHST9CPURISEtIRT47NExlODc6R1RMS1JY
-U0tHRk1GSERHUExPT0tOTkZGQDw8PDk3OTY1Njc1NTY1NDM2NTI0NTY1NzY0Ojs9
-RUZGSEdNVFNbWVZYVVhYV1RRVVhSUFRVUlBXVVBPU1NXW1hRUE1QTk1LSklISUxP
-S0lLTEhHR0RCPTg2ODg5PEBCT2d5eXh7gYqUlZSNf3qEj5mdnZ+hoZ2bnZ6dn52b
-lZKVhICEiI6Pi4BwZF5aW2h5foF+fn98fISUo62tq6uurq+urKelqKmnp6ajop6b
-lpWUlJCOjpCOkZCRk5eXmJicpamooqenqKurr66wr66xsa6rrrGwsLCyraytsKqj
-np6bjnVzg5ORiXxsb3iLjYqFiZScnJmZlZOQkpeamJWXmZeVmaGgoaOkoqWnrK6s
-r7S0sa6xtreys7a1s7O0r6yprrKytLe0tLWztrGxs7W2srSysrCwsa+sq6mmpaSm
-qaelpKSlpqWioaSnpaSioqCgnJeXlpSTmpqfo6Ojp6iop6egkn9/io+TlpeUkpSV
-mZydnaKio6SlpqaipKKkoqSmpaemqKqrrKuur7OyrKyvra2urKmqrq+srK6tra2y
-sK6wsbGwtLKwsbK0tbCqsba2tri4tLS1sbC0sK+ysK+ur7Cwrqysq7CwpINnXltY
-U1NSU1FTT05OTlBOUFBQUU1LSUtMTE5MTExMSkxNTlBNS0xNTk1OT0xJTEtMTE5Q
-T09QT05NT1BSU1FST0xLUVFQUFFRUk9QU1JQUFBRT1BTUlNTUVFQT09QUVJRUk9O
-UFFSU1NRVFNSUFNSUlJTUlFQUlNQUlFRUVZUUVJQU1FTVVRWVllYWFhdW1lbWllb
-W1lcX2BdYV9fYGBdYF5dX2JiXmBdWVxfXFxYVllcWllaXl1rnrrJ0dnd4eLl5ufo
-6k9OT1BOS0hHSUZIREpIR0lKSERFSkdDQ0JBQUI+QEJAQjxDPz5DQUE/Pj49QDs6
-PkREQUA/Oz07Oj07PD5BPT0+PDg6ODY4NzY7Ozo3OTg5ODk8PTw6Ozw9PUFBQUFB
-RUJBRkJGTExNTEtLTVBNTVBSUVFMUk5MSkdKTE5JRkdCPEBBP0BCRkI9QD08PDk6
-Ojs7OTo6ODg4Ojw6OTg1Nzc1MjU5ODU1OTY1Mzc3ODo6OjQ0Njk4Pjs6NzU6OTc5
-OTk3OTo3PDk4Ozk8PDs7Ojw+PkZGSEhGTFBPVVNaVFhUVVhYVFZYW1pTT0ZFRT8+
-PUA/ODY1NzU1MzM0MjU4Nzw2NDY1NDY1NzU0NjQ3OTk6OTs6PDk5Ozs+QEE7PUFA
-P0FFQkJAP0JGREhHSUlFSUpKTE1MR0VES1NXUVBPTExQU05KSElNUldYUVJXVVlX
-WlhVX1NQUk1PV1lWVVhbW1xZVFFQTUlKSktJSktMTUxPUVBPUE9UXFtXVVBRRkVA
-RkE8PTo6SzY2NDQ3NTM4MDE0MzI0MzU0PExPTk5NS1FTUlFQWExITE5OUVBOUE9R
-VVVWXlZTT1BUVVNTVFNRW1tXWlRRWlZQUE9SVVZVUlZWU1NRUk5GQ0E+PDpHRUJL
-RUZHRU1ITEVGPTw3QDcxNTpEUUxJS0dMSkVHRkZEQ0NNQ0JEQ01QSkdDPkA9PDo1
-NDU1MzU1MzU1MzMzMzIzMjM1MzU4PDtAREBJTlhXVVxXVVNTVFZYVVZZXlhTVVJT
-VVVSUFBSUVhaV01LTUxHRklKTU1KSkpJR0ZHT0hGPT0+Ozc2ODk8Ozw9RlJbYGVs
-dYKEgnlqYmd0gYmQmJiboJycnZqanJ+clpGGgYGFio+Mf29kW1haZ3V/gHx6enp/
-iZepsrKvrqqqrqyrqaioqaqopaOhn5qamJWQjIqNjo6QjY+SlpmamqCkp6Woq6up
-rrGxsK2xsq2wsbKxsa6xrq6sqautq6KbmZWJd3WGkI6EeG9tdYiNiYeKlJ2cmJaU
-kJGTmZqcmpiYl5WYmZ6koqGjpaarrK6psrKvr7GwsLKxtbS0tLGwrrCwrq+1trS2
-srGusLK0tbSzs7Owr7KzsKqoqaWmpaapq6mop6SkpKKfoKKko6KlpKKinJmWkpKW
-l52hpqiprKqnqJ+Me4CIkZKRlJeVk5WYmJueoKOmp6OlpaKjp6aloqOlpaanqqqs
-rq6vr7Cyr7CxrK2pra6wrq2sr6+wrq+1sLOwra2vsa6wsLKzsrK3t7KytbW2s7Ky
-s7GwsLKyrq6vtLKxra2vsa6igWBaV1hYVFJSUk9RT1JSUFBNTU1NTUtMTUxLTU1N
-TkxMS0pKSklLSVBNT01LS0tLSkpMTE9OT1BQTU9QT05QUE9OUVBQUlNQUE5NT09N
-UFBQT1JTUlNQU1FQT01QUFFPUFFRU1FSU1NRU1NRUVJTUlJUUFBSTlFSUVJRU1JR
-UVNRUVJRUVBUVVRVV1lYV1ZYWVlaXFtbW1taXGBjZGFfYV9dX11dZGpfX2FgX15d
-WVpbW1lbXFhZW2aYt8nS2d3g5Obn6OrqSlNRSkdLT05OSkVFRkdHS05GSkhFRURE
-RkdAP0BBSEQ8PUA/Q0FFRD0/PkE+OjhAQUJBQEI+QEA8PT0/Pjo5Oj08ODs/Nzg5
-OTk4Oj05ODs6Ojs7PD48PkNAPj8/QUBEREJBQ0ZJTUtNTk5PU1FNTk9OUVBNTk5L
-SUpNT0xLSUZFQ0FBQEFJRUBBPz05Pjw9Pjw8PT08Ojk2OTk6Njc2Njc2MzM1NTk3
-ODY4OTc2Njk3NTg4NTk1Njc0ODc2NDg3PDo3ODk5Njc2OTw7Pjs8Oz0+Q0dERkhK
-S01TVVlYWFhbWVJTWFtZWlhPRkREQEFCQDw3NjQwMzAwNTQ0MTI0MzU3NDMyNTY4
-NzU1Nzo6Pj49Pjw4Njk7Ozs5PD09PT9DREVDQz4+QEJCQ0hOTU1NR0hMSkZHRUpM
-UlpWUVBSTU1TUU1OTU5NUFZQT1NTVlVaXFpeU1JSUVBUV1hSVVhbXFtXTlFUUUhI
-TE5NS09RS1BPTUxNS01YWVRTTExHRkREQUY/PTc0NjU2NTM0NDEwMjY1NjY2NDc0
-Pk1PUFRTU1dYWVNRUE5TVFFPT01MUlNPU1FTWFZSUlVTVFNUVlFXXltaWlVZXFNN
-UVRXVFJPTlRWWVpZWklEQDxDPkJCPUlFR09GUEZIQERBQTw5NTQ3PEhVTElOSktH
-RkJGSUJFQ0NDQUFCSklCQUA6OTw5ODc0NDExLzAyNjYxMzQ0NjUzNTM2Nzo7Ozs9
-QEZLUlFUVlFSV1NTWFlYWFpcVlJTUlFRUlBNT1VWVFZTT0xMSklJTE5OTExPTEdG
-R0pQSUQ7Nzw8ODc3NzY6PDw/RENGSU9ZZ3V5emtdYGp4hIyNk5mdn5+dmpmbnJ6c
-louEgIOGiYiBc2ZeV1dldXt/fnt4eICKmaeusLKyra6tp6yrp6qqqaenoaGem5qX
-k5KOjZCPjpCPj5KXl5ugpKOmqKmoqqqpq6ysq7CxsbGysrGysK6tqqurrLGroZGK
-jYqEgImRjH94c3J7i5CMh4qPlpudmZiWlZaYmZmbm5eVlZWanZ6kpKalpamtra2v
-sLGwr7K0tba1tbO0sLK0tLGysrG2tbSysrGxtLW0trO1srKxsrGvrqmmqqioqaim
-pKanpaSjo6OfoKWkoqOjoJ6em5aSlJudoaSmpqalpqeooIyBhIuRkY2Qk5OYl5Wb
-oJudnKCgo6Oko6Sio6CenJ2dn6CgoKKlqampqq6vrq6qqrCxraysrq2pqKmusrKy
-tLOxsLCurrS0srS2uLWwsLKztbKxsrK1tbGvr7Kssba0tLGur7GuqqCAYl1aV1VV
-VlJRUVBRUFBPUVJRT05OTVBNS0tLTE9PTExNT01KTExNT05PTk1OT01PTUxNUFFP
-UVBRT09QTU9QT09QT1BQUFJTUlZQT1FRUVNSUVFSUFFSU09OUFNRUFFRUVRTVFJT
-UlNRUFJSVVJPUVFSUFFSUFJQUlJSUlFSU1NXU1NSUFRUVFJUVFVYWFhZWVlaW1lZ
-W19dXmNlYl9fYGJeXV5hX11dXV5eXFxcXFxYWVlbXFdcZY+5ydLZ3uDj5efp6epG
-SE9FREVPTlFRUk1IREVERUVGR0pHRkNCTEU/QUZDP0BBO0NCQTs6Pz1ARERDQTw+
-PEA8PD46QERAP0A9OjtAOzs9PDs8Ojg3OTg7OTk4OTs+Pjw8QD89QD4+PkBBREVE
-Q0FBQ0dKSklMTU1RU1FSUVJQTElKTU9MTU5NUExJS0ZDR0RBPUFHREE/QEA9QD09
-Ojo7PEE7Nzo6Ozs+PTk4ODY2NzU1NTY1Njc3Njg3NDo4NzY2OTI2OTk2OTk5OTc5
-PDk1NjY0Nzg3Ozs4Ojw9QUBCRkdMRUZNSU9SUVBQVllcVVpbXV1aU09OR0lJQ0E/
-OTQzNDE0NTMxNTQzNTU1NTQ0MjY6Njc2NTQ4Nzk4Oz5APD08Ozw5Pjs6PD47PEFA
-Q0JERUFAP0NFRU9LSUxHRUdDRERJS0lMUFBPTlFTVFBKSUxOU09RUFRWV1lPTlNW
-VFJPT1ZWUVRVWFZVV1VVVVVSVFJSTUlLS0xLTFFLS05QT1RQSVJQUFZQT0RGR0dD
-QTw6PDczNTQ3Njc1NTQ1NTM3Nzs3MTU0P05UWlxUV2BeVVJNS01QUE5MTk1OVlNP
-UFJZXVxXVlRWWVxVU1NZX1xcV1RWV1NWW1ZVU01OUFhaXlhSS0RDP0Y7PENBTkVH
-SURMRkZEQ0VFPTYzNjk/SVVPSUlISkdJS0pFQUZFQUA8QUdFRkU+PT43ODg4NjQz
-MjIzMjIzNDU2NTY4NDUxMjU2NTg7PkBER0VPT09SUFVVUk1RWFlWVVdVU1hVUlFU
-VFJTVVNUVFZOSk1IS0pHSU1LSk1PTElHRkZEPDg4Ozo5OTU2Njk7PDs7PkFAQ0ta
-bXiAfHJueIeUm5qXlJqdpJ6fn5udnJqXlIuEgIGDhIN6Z1tUVmN1fn58d3Z7f4uc
-rK+ytbizsrO0sKqoqquppqSloZ+dm5mVkI2Nj4+Oj5GSl5mZmp+jo6SpqKelqaur
-raussa+wsrOztLKtrK2tsbOzr6qbiIGLlZeSkJGMgn+GiYmPlpCIjJKZm5mVmZqZ
-mZiampuamZmTlpmcnqWlpqysra+vsK6vsa+wsLKysLCxtLO2t7GxsLGurLG0sK+y
-sba2uLaztLKxtLizsbCxsa6urKanpqOjpKSopqSkpKSkpKGhn6CfnpyalpaYmZyg
-qqmpqqqop6afjYaIjZKSkpGVlJSUk5aanqCgoaGenJueoKGhnZiXmJeZmpyenZ6g
-oZ2gpaarqqalqKWppqinpaOjpK+zs7GvsLKusLOxtLa1t7e1trKwsbS0srGxs7Oy
-r7GysbOxsrO0sLKxr6+tpIRlXllXU1FSU1FSUE1LS1BQUVBOTU5TUk1JTU5OTk5O
-TUxMTEpNT05QT1BRTk9RUlJRT09MTU9PUlNSTk5QT1FSUlNUVFJQT1BQTlFRUVNS
-UVBNTU5QUVFTU1BQUFFSUlFQUE9QUVFRUE9PUlRTVFNTVVFRUE1PUVJTVFVSUlJU
-VFZXVldWVldYWFVTVFZWV1ZVVVhaW1pYXGBiYGZhXmBiX2FfXFpcXFxZWVtcXVtb
-WVdWV1haWFxkjbnK0tnd4eXl5+jp6UZKUU1HRkpTT0lITlBEQ0JERkRJS0VFS1ZM
-SUM+PkBBQ0BAQ0E/Pjs9Pzs7QkFBPkFDQDo7QDw6Pz89Ozs6PTo3O0E/Pjs6OTk5
-Ozc3Nzk6PD4+Oz06PUE+Pj89Pj5CQ0JDQkRFSkpJS0xOT1NSUFBQUVJOSUlMTk9P
-UFBOT01NSkZFRkZFR0VFQUNBQD09PTo6Ozo+Rzo5Ozo6O0M9Ojo8Ozk2NzM2Nzg2
-ODo5ODY5Njk5OzY2ODc5Nzg7Ozk6Ojs5OTc5ODk6Ojc5PDw+OjpBP0JESEpFRU1G
-TFJRUU1VV1paXl5ZV1hUU0tKUE5NSEFCOzUzMjIzNzc2NjU2NzU0MzQ1MTI1NzY6
-OTs2OD09PkA9Ozs7Ojk5Pj07Ozs9Pj1CQD5CRElDQUNER0dDRUlMS0tDQEhLS0lE
-RklQVFJXWE5MSk5SUVJSVFpSWFtTV1NRUE1OUllcWFZaVFJTUlRZV1JQUVBPTktO
-SUpJTVBPU1ZUVU9OTlBQUkxORUhHTEVGRz4+OTk5NjYzMDAyNDM2NTU2MDAyMDQ5
-SVdYVU9PVVlTVVFOT0tNS09PVFFTUlFQV1hbY19cWllaX1VQU1hgZFpXVE9SVldh
-W1pbUVBSVVhZWVJNSkhCQ0A/REBGSUJCPkdDQkJAQT08NzY3PEZKU1VNS0lKRUpR
-S0lLRUNAQEA9REVHQzs8PDk2Nzc3NzYyNTQzMzM2NjgzODU1NTY0MzY3NDs8PERE
-Rk1OUk9PVFdST1FcW1hWV1dVWFlWV1VSVVJQTlFSTUxOTElIRklKSkhJTE5PS0dE
-R0Q7OTg6P0A5ODc3OTo6OzxAP0NAQUpfd4iNh4SCkKKmp5mPjJaanJaRmZuanZ2W
-koqFhISGg3ttXVdWY3h/gHx6e3yDj6OutrO0sbO3tLavrKysqaenp6ekn56ZlpKN
-jo2OkJCQlJWampidoaKmpKakpqaoqqyrq6qtr7Cxr7Gtrqutrq+xtLCuppaDhJGe
-op2Tk4uFj56clpiXkYuNlZmZl5malpianJ6am5ycmJaWmpqdoqKkqausrbCvrKuu
-sLCzr66vrrGytbKwrq+1sa+tsLS0tbS2uLO2t7KwsrSxtrO0tbGysa6qqqumpaWm
-pKenp6isqKWipKenn5ycmZqampubn6OkpKmvsq6rp6CQhoeOlJaWlZOUlpaYl5eZ
-nJ6cm5qbnp6foaGipKCdnZyenJ6cnZucm5yenJ2bnJyen6CfoaGio6Kjp6qxsrCu
-srGytba1srOztLK1tLO0tLSzsrK0s7Sxs7KytLKys6+srbCtraylf2RbWlhVVVJS
-UE9OT1JRT05PT01NT1BPT09PUVBOTktOT05OUE9NT0xNTlBQT1BQT09QT09OT1FL
-Tk5LTk5OT1BPT1BRVFJTUlJPUFFRUFBSTk1NT1FPUVBRUFBSU1NTUVBOUFFRUFBR
-TlBRUFFTUlFRUFBRUlFSUlJSUVNSUlRYU1VWU1RXWVhXXFRTVldXVVRWVldaXVtc
-W1xaXF9dXV1eYFxeX11dXFxZWlxaW1pZWFJUWFRXV1+MusnS2d7h5Obo6OnqTU5Q
-S0lOSEtLR0lPT0tHSEtHSERAQ0RESkpGRD9EQUI+P0VDQkA9QkJBPj9BPj4+QENA
-Qj47PTk6PDw+Pjw6PD46Pj48Ojg6PDo0NzY6Ojs8PTs6Ozs8PT0/PD9AQUJCQkFD
-QkNJSUhKS05OT1FNU1JSUlJOT0xPUFFUVFBRUVVMR0hISkhHSEdHQz9APTo8PTo7
-Ojo8PD07OTs8RDw5Ojw8OTk6NTY3ODk4NjU1Ojk4Nzc3Ojo6Ojg3ODo5Ojw4ODc2
-NzQ4Ojc4OTs7Ozk7OkNAQEQ/RkZERUdITE5WT1ZXVlRTWmFZVFFPTE5OTklFQkI8
-OTo0NTEyNTI0MzIzMzEyNjUxNzQ0Nzg3Nzk5PTs7PT1APTs3Ojc7PTo8PD5AQ0I9
-QD07P0NFREJFSEZDSUxMTkxGSUpGTUpKS1BVVFhWTU1LTFBSVFFPU1JUWltdWVRP
-TExRVVxbWF5YT1JUUVNVT01QTk1QUkxKTU9RT0tPVFBRTElNUlBTTUtHTE1MSkxJ
-RkU/PTY0NjQ0MjM0ODEyNDU0MTIzNjM+TlhOTVFTVFdTVFFNS0tJUVVRU09PWVVV
-W1xdY11ZXlxaVE5UWldfXFhZVFBVWF1dWVlWUlVaXVtWVE9LRj9BQT1EQ0JDP0E9
-QUdAQz09QUA2Njc8R0tRUUtQTklGSVFKSkpGQT9AQz89QkA8NTc6Njc2NjY8MzI1
-NzUzNDU0NDk3NDU2NzU0Njg5OTk9Q0RFSEpSTUZMVFdXW2BcWl1aWlpfWVhZWlpT
-VFFNTU1OTUpISERER0xJRkhHTE5GQEFCQjk2NzpDRD04OTs7Ojo8Pj48PkFCQUpa
-b3uBgIGFlZ6dkoh8g42RjIiUmJqbmpqUj4WAgoB/enFnWVhkdHt+e3h5fIOWpq62
-ubOxs7SysK+trKepq6qppaGcmpmVkI6Nj4+Qk5WZl5mbnqKgoaWmp6mpqaSoqaem
-rLOzs7KysauorK+ssK6trKqjmYyGjJeenpuRiIGNnZuWmpiQjYyQlJaVmpyYmJyd
-oJ+anKCdm5aYnaCjpKmpq6utrK6trq+trLGwsLCysa+xsa2wsrOxsLGyrrK2ube0
-trO1s7OysrO0sba0s7Curq2sqKGlp6Wnp6qpqKupqKWmoaGfmpiXm52dm5ucoqSk
-pqmtraynoI2DiI+Sk5KUlZaXmZmam5ycnJyeoKGkoqKkpqypqqmkpKSgo6Smo5+e
-nZqZmZyfnp2bn6GkoqOkpaKmqausrq6usK+vsLWzsrW2tbKztrazsbKzsrKzs7Oz
-s7SzsrKuq6utr66sq557YV5bV1RUVlVRT05QT1FPUVBNTFBQTE1SU1JRTkpLTU9N
-TU1RT05MTE1OTk5RUlFPTlJRT05QT09OUFJRUU5MT1FRUVBRUFFTVVRRU1NQUE9Q
-T09RT09RUU5RTU9RUVBRUU9QT05PUVFRUlFPTk1OT1BSUVJQT09PUVFRVVVSU1ZX
-U1RVVFdXVVZVV1VWVlhXV1pXV1ZYWlxeXl9cXV1cXV5fXl1fXFxeXV1eWlpcWlhU
-VVNWVldbYYy6ydLZ3eDj5ufo6epLTE1LSEtDRElNTkpJSkxIREhKRENDQ0ZJSkND
-RUNDQkBCRkREREQ+PkJCP0BAOzw9PD1CQUE8QD88OTk8PDg5Qjw7PT87OTo6Oj04
-OTo7Oz05OTo9Oz0+PDs9QEFAQkRBQEJERkdFR0pKSkpLTlBRUlNRUVFRT1BRUFJT
-UlJUU05JSUZIR0dKSUZCQUE9QT1BPj89Pj0+REI/PD9BPTo7OT05Ozs7Ojo6Ojk3
-NzY3ODk7OTk5ODk2Ojg5QTk6Ojg3OTo3OjU5OTc6Oz88O0FAQEJDREJFRkJGRUZL
-S1VVWV1SVFBVV1lWVlNNTUpPSkVDQTs9OzU0NjMyMzMyMjI1MzE2NTM0NDQ6ODc2
-ODk6OTs5O0A8PTo8Ozk7PEA8OjxCQUFCQD09PUJFRENGS0pKTEdLUkxMSkdNUVRM
-UFNaXFNMTUxKTEpQT1FRUlJPUlpcVldWUElQUU9UVlNRVFZRT0xMTU1QT1BPUVNT
-VFJSS0lQUE1PTU5TUFpTUExKS09HSEtDQD48ODM0MjM0MzIwNDIzNjMxNDM0NjVA
-T09NVFVPUVJQUE5MSEtQVlRQUE9WWVlYW1pYWVRhYVhXWldZWlteWFhaVVVZWVtX
-VVpVVFlaWlhYUkxFQEE9Oj5BQEM9RT9AT0ZKPTtAOzs5NztISU9PTExGRUZKUE1I
-TEZAQj4/Pj5AQD46OTY0NTg3NTUyMTM1NzUzNTU1NTQ6NzIzNzY1NDM4OTtGS0hH
-R09NUVRUWlldXVxbW11gXmFjYF9bVlhWVU5OUEtOTElHR0hKUEtLSUhMTEVBQkRA
-ODY3PENBOzg4PDw+PUBBQkNEREhHR0lMWmJoaGx0foN/dGppcn6Kj5CVlJOWk5OR
-jIiDg4F4bWVbV19xenx5dXd+i5uosLO0ra+0srOxr6ysr6usqqqoo56bmpWTj5KS
-kpSUk5WWmJqen6Cipaepp6mur6mrsaqpr7Czs7OxsbCvsrCxsrGwqKWhk4N+iJec
-mI+CeIGQlJOZmZOSkpKTlZWWm5uanJyfnZucm5ublpeanqGjp6enqK6ur7CysrGy
-tLSzs6+rrrOys66sr7GwsrO1sbGxuLW1tbq1sbS1trGxs7Wzsq+rqKelpKSop6Wo
-qKqpqqmppaejoJ6cmpqamJuZmZ2ipqWoqautrKigjIaNkpGSkZSVlpOUmJydnaCg
-pKSkqailqaqtrKusq6mnp6emoqeqqKSioaKhpKSjpqarqaiop6amp62trKytr66t
-rq+ws7ezsreztrW2tLO2tLGwsbK0s7KzsbKysrCurbKwrKqpmXlhWVpZVFFUVlJT
-T0xMTk1OTU1OTk5PUlFRT01MTE5QTk5MTE5RUk9MTU5MTE1OUVFSUVJSUk9PUlNP
-UlJRUFBOUE9PT1FQT1BPUFBRU1NUUVFQUk9QUFBPUFFSUFJOUFFRUFBTUlBQUlNT
-UFFQTlFOUE5PUlBOT1FUUlJTVFRQUlNTVFRVU1JVVVdWWFZYWllYVVRXWFdZWl1f
-X19eXl1eYV5dXl5dXltbXF1bWVlaVllYW1ZVVlpni7nK0tjd4ePl6Ojp6kdLS05M
-SEpJSUxNRUdJTEhGRUNCSEZERUFDQz89PEJBQkJHREFDQD9CQ0A+Pj4/P0I/QT1A
-PT1CQDo3NzY5OTs8Ozk5Ozs3ODo7Ozs5Ojk3Ozo3Oj48Oj88PT9BPz09PT4+QUhL
-R0hJSktLSUxPVFZaU1RVUlNTUFFOUFNVVlNUUUlFRkpKSEhQTEdFQ0NCQUBAPj47
-Oz5BQkA8PDs6Oj07PDk5Pz06Ojk6NDk4Oz08OTg4NzY0OTg3Nzw4Nzc3Nzo5Nzg6
-NTk6Ojw6Oz1BPj5EQUFHQkZIREhKRklKU1VUXVJRUVJUWFdYU05LTVBOSkRDQDk4
-NjQzMzM0NzQ0MzA2NDM0Mjg2NTo3NzU6Ozo3Ojs6PDs/QD49Ozg7QDs/PD5CQ0FD
-P0FDQ0VGQUFESEdHR0hIR0dGRUlPUk1MT1JYU09RTkxKSU1LT1NQUFJRWVpbWlhV
-T0xNUE1RU1NYWlFMTU9OUFJMR0pMUU5QUE9LSUlRT01OUFBRXVFPTUZHTE1OTEBB
-Pjs7NjMyNTMzMzM0NDYyMTI0NjY3MzZDSk1WUlFPVFNOUU5KTE9PVlJPVlBUVlRW
-VlZZVFZdXFteWVVcV1VWU1VVT1NVVVZTVllWVlNUWFhVTkk/Qj47Ojs7RT1AQUBI
-QURGPD06OkA4QUtJS1NNSUNCRUhKRUBFQT1DRUNARUdEQTo5Oj4/Ojg1MzI1NTY2
-NjU3Njg2ODM0NDc3NjQ1Nzg4PkRGR0VIUFBYWFNZW1lcWltaV1VYXmFgYF1eWFhX
-U01VU09NTEtPS0xMS0ZJTU5IQUVIRkI5Nzk8Q0lAOz89P0JESEpKT1BRUlBNTU1M
-T01RU11seHlwY11gb3yDiY2OkpORk5GQioaDgXRtZltWXm99gXt4eYCPn62vtLOx
-tLO0sq6urKusq6yqqKOgn52Zk5GQkI+QkJGQkpWXm5ucoKSjo6Onqa2trq2sra+y
-sbGzsrKxsK+usLG2tbCtqaOahX2KlJSRiXpvd4eRj5qakpCSlJWVlJebnJybnZ6j
-oJ6YmJWUmJugo6GlpaSorrKzs7W1srOztLSxr6utsK+xra+urqqzs7KztLSzuLS1
-uLm3tbOzsbGwsrO1s62sqaamqKuopqqrpqqoqKWnpqehoZ+fnZ2amZueoKOkp6mp
-rK6tqZ6QipGUkJCNjZGWlZWcnZuhoKOmpqeprKqoqquqrKutq6aoqqupp6imo6ao
-p6imqaalpqqnp6ano6Snra+wrq+ws7OxsLGytLO3srO0t7e4t7i1tLOwsbKzsa+u
-sLGvs7Cvrqytrqyde2VdW1dVUlJSUlFPTkxMSktOUFVPSkpMT01KSUpOTFFRTlFN
-T05QT0xISk1NT05OUU1MS09RTk9QTU9QVUxMUE9PUFFRUlBRUVNTVlNVUk9QUlJS
-U1JSU1JRT09RUlBQUVBQUE9SUE9PUFFRU1FQUFBQUFBRUVFTVFRUUlJSUFJSUlJU
-UlZVVlZUVVVWVlhYWVlaWVlbW11bWl1dXV1cXl9iY2FfXl9dX19gX15aXV1dWFxe
-W1pZW2aRvMrS2d3g5Obn6OjpS0RKTUtOT0pFSExQSUhJR0ZERERBRUI+RUNFRUM9
-RElFPz8+Pj1CQ0E/PkA8PT06PTw/O0Q+QEA8Ozs+Ozc6OTo5Oz08PDo9OTw7Ojs6
-Ojk5Ojw5PEA+PUI/Qj8/QD4/QEFEREdHRkZDSEpMTU1RV1hSUlJTUlFRT05OUlNU
-VFJQUkhJSUxIS0pKSURCQ0NFQUA+QkI/P0FBPj89PDo6OjtAQj5APTo7Ojk3OT0/
-Pjo4ODo5NzU8Ojo6ODc6OTk7Oz07Ozs8Ojs7Ojo7PT5AQEFCREpJR09ER01KUEtI
-S0xYVllaUlJRVVNQTUtMS1BNS0Q9Ojg1MzUyMjIzNTQ2ODY1NTI0MTQ3NDc2Nzg8
-Oz4/Qz88Pjw5ODg5Nzo6PD88P0BDRj9AQUVCQkZGREdITktGSEdISUZGSUxSSkpS
-VVtWUVJSU0xKS0pLUE9RUVFUVVZWU1JSUlJSTExOVFJWUElJS0xMTkxKSklKTk9W
-V1BOT1JRU1RVUkpSS0ZHQ0ZPUUtLQUE+PTo4MzU0NzU1MzEyMDE0MzU2NjUxNDpF
-UFNUVFNQUlJMTUtMTU5OUU5QUVVTVVVUWFNUVVtbWFpbWFZYV1RRUFRRTlRWV1RS
-VFxYVlZUVlZRRz5APzs6PjxAPD8+OkA9PD09PDg5ODxASkpMVE1JRkBCRUVFQzs9
-PEBCQUE+Q0ZDPzw6PDs6Nzk3Nzc3NzU2NDU3NTk2NTYyMzM1NDM1Njk9RElDRUpO
-T1dXVFhYXF1YWlRSUVRbW1hcYF1bVVhUTVBQV1xUUlJOSktOSUpKRkREREZGQDk0
-NThBR0M/P0FCSEtQUVVZWFtaWVxbW1dTUk1QWGl+hIB2bW1yfYiNkY+Pjo6NkYyK
-h4F5c2xnYFlfcHd4d3Z3gZOjrK+ytLCxtbOxsbCur66sqKeko6KenJeSkY+Ni42O
-kY+Rlpqbn6CipKOhoKWrrrCtq66wsLKzsa+zsa+xsrKxtLizsKiiop6Qho2Tk46L
-fnN3g5CUnZqSkpOVlpSXmpybnJ2doJ+enZiampianZ+jpaampamusba0tbW1tLK0
-s7CwrbCvra+trbGwrq+ztre0sbC1t7i1vLq7u7Oxs7CxtLOwrKuqqqunqaqrrain
-qKalpKWjoaCfoaCdnJ2cn6KhpKimqKisr66omoyMk5aVj4+QkpmZmpqeoqOkoaWn
-qKqpqayppamqqqqpqausra2op6Wnqqqnp6erpqeora+np6iko6ausrOys7O0s7Kx
-srKwsLO2uLKztba1tLa4tLWxsLKysq+wsrO1sq6xsa2wraB5YVlYVlRSUlJRUVRP
-T09MT09OUE9NTEpJSktNTU9NTk5NTU1MTE1OUFNPTk9PTU1NTU1VUFFOTU5PUE9O
-UE1PT1FPT1JOTlBRUlhTUlNRTk5PUVJUU1NSVFNRUVNTTlBSUU9RUVFUT1BPTlBP
-UU5QUFNSUFBPUVJQVFNTUVJUU1JVUlRSU1VVUlNTVFRVVlVWV1paWltbW1lbXF9e
-XV5bXVxhXlxeYV5eX2BdYFpaXGFcVltZWVlcYpG+ydPZ3eDj5ufo6elCS1FTTVFR
-TEpKSD1LR0NHSUtFQkRHRUVBREdIQ0JER0Q/OTo9QD06Ojk7QUNDQD09PT4+QEM/
-QUFBQDs9PTk6OTg5Ojo6Ozo/PDk7PTw5Ozk6Ojs7QD9AQT9CREJBQUJBQkZERURG
-R0hHSUpLT1NUVVZVU1JQUE1PUVBTU1RUUE1NUUxMTkxLTklJSERFREVFQz8+QkJD
-QUFFPD49Pjs9PEBAPT09PTw7PTo7PUA+PDs+Pz07Ozo4Ojg3Njw5Ojo8Pzo7Ozo6
-OTw4NTk7Oz1ARkZAREdITkpFR0VNTkhLSVZXVVtWVlJUUVVPUVJOSktMR0A5MzM0
-MzExNDMzNjQzNDUyMzY1MjI0NDM5Ozo5OjtAPz08OTo4ODo5Ojw6PT0/QERGQEJD
-REZFR0ZFRURJSkRGRktNRkVKTExKTE5SWVZNTFFYUlJST1BNTVFQT09TU1JWU01P
-UVJPU1FPS0xMS0dJSkpNUE1JTE5NTlVXUU9NUE5TUlJTTEhJS0lERU1QTktBQUI6
-Ozo6OU48ODY0NDIwMzQ3NjY1MTQ1MzhGUlNVVVJPVVZOSUlNT05QUUpPUVBRU1lY
-V1JWWVVXWVpYVVlbU1FTVFVRUldTVFBTWFxXVVRSUVFJOzo4Nzc6OT09QEA6Pjg9
-QT05NjY6PD5FSk5RTEhIRkhNSkNEQDw8Ozw8Oz9GQUA7NzY3NTU3Njc3NTM0NjIw
-LzI0NjU1MzQ1NjQ1NDc5Oj5ERkZGRUlMVFNTVFJYV1RVVVNQUFdaWVpXWVtaVlRS
-TU1TU1FSTUlJTEpKTUhDRklJRUA7Ojg2N0FIQj9AQEhITlJWV1haWmBlZmhnY2Fc
-V1JXa32Lkol/f4SOmJeUiYaFg4OHjIqHgXtxaGFcWl5tdnZ2dHmEl6ass7W2tbW0
-sLGwsq+sq6mnpKGcnpmXlpWSjY2OjpGTk5aVmZ2foKKjpaelpKmrrq2qra+ur66u
-sbSwsLC0tbW5uLarno+Sl5KQlJORjYuKhoeRlZmfmZWRkJSVl5uenp6cnqChoaKc
-nJ6empuen6Kkq6uprK6usLKxsrSys7O3tbKysrGzsbKysbGxs7GwsrGusLGzsLSy
-trm3tLGxsa6tsKypqKiqqamqqaqpp6anqaOgo6OfnJ2eoZ+enp+hoaGipqisq6qr
-qqSYjZCVmZmVkpSVmZqYmp6ho6Oho6OkpKiprKemqKqpq6qqqKmoqaqrqKeqqaip
-p6SmpqioqKeqpqWhpq2xr7CysbO4uba0sbCytLe1tLKysbKztrm4tbSysK+ws7Cw
-sbKztK6tra+ynHdiW1tbVFNVUFFQUlFPUVBNTU1MS05QTktMTFBSUEtMT09QUlBO
-UFJRUE9MTE1PTk1NTU9PT1NST09OTU5QTk9SUFFNT09PT05RUlJRUVFYXE9RT09R
-U1RTUE5PUlFQTlJTVVJSUFFRTlNQUVFPUVBRUlJSUlJQT05RUlJVVFJRU1NUVFNV
-VVhWVFdWVlhXVlVXWVlbWFpbWlxcXmBeXl1eXmBhYF1dYGFfXl5fXFtcWllVVlda
-W2Fnj77K0tnd4OLl5+fq6UhMTVNQTUZIQ0hHRE1NSENDSEFBR0RIRUdHRUZFRT5F
-SENFQj09Ojg+PTc7Pj0/P0JAQEA8Oz5APD88PDw9PT08Ozk5Nz0+Oz08Ojg5Ojc4
-Njk8PT0+Q0E/QkBCQUBAQkJDRUNBRUhISEVIS1FNUVVXVlNTVFBQUlJRUVFSUU5N
-UE5LS0lQTklJS0pIRUVFR0tEQkBDQ0FFQkJGQEM/QD07O0A9PT08PDo7PDk7PTw+
-PT0+OTs9PT06OTo+SDo3Ojo/PDw7PT06OkA7Ozw8PT9ERD0/QkVPS0dFRExQUEpH
-TVlUV1VVVVZTVFJSUVBLSUpFQz45NTIvMD02NjMzMzQzMTU0Njc0ODM1NTc4Njc3
-Nzs8OTc4PDg4Ojo6Ojg6PEBBREFAQkJBQkBBQ0JCQkVESUVESkpGRk1UT0xTVFBS
-Uk1MUVpVVVlaXFNKUFBRUVNWU1ZVTkxMTlBVWVdQTEpJRkpLSUxNUFBUWFRRVFVN
-TEdJTE5RT1VSTU1RTkpIS09OTERFQDs5Oz1DTDY4NTQzMDM0NT03OTQ1MDI1MTdF
-UE9NUVFVW1pPS0tOUU1PTUxJTEpSXF9dWFZWVFVcWlRcWFZXUVFVVFNPTVNVUlla
-WlVWWFZXVk5FPjw5OkI4PEA+QDtDQT0/Ojk1NDdAQkRNTk5NSEdERkhEPkRFPz9A
-PEA+QkJAOjg3NTc3ODg0NTMyMTMyMjY3NDU0NTU2NTYzNDU2Njg5O0FDTEtGRk1V
-UVBQUVdZWVdVUVRNTlVbX1pXWFVWUlBMS01SUE9LR0lQSUlGRUdDQ0VEQUA5ODY4
-RUlCPUJGSU5PVVxfX2FkZ2lsbnBwb25vb2pscX6MjIODiJCZlo+FfnyBgH+DhYOA
-eG9iW1hbYW90dnd3eYiaqLC2urSzra+ysrGvrqysqaakop+cmpWUlI+OjY+Pj5KU
-lJuam56doqOmp6Wmpaqurqutr7Gura+xsK+wsa+us7e2saaNeX+LkpOVmJeVmJmU
-jpSXnJ+al5SVlJicnJ6en5+ioaOgnp+fnJ6goqWjpqiqrq2tsbSws7GzsrW6s7Kx
-s7GysLKytLKwr7Oys7Gzs6+trrGys7O1tbe4tba1srCwr6mmqKqop6emqauqraim
-pKOmpKSin5yfn5ubn6Gin6Cjpqirrq2sp5eNkZeZlpiVlJOXnJyenqCjpKOhoqKg
-pKampKSmqqmoqKampqmop6eoqKelpqenpqeopKWpqamppqOlqaysr66tr7G0s7S0
-sa+vtLazr7O1tbe2trSysrSztrS0tLazt7awrq2usbKgemRhW1hWVlZTVVVSU1NO
-UVBQT05OTExMTE5NUE5MTE1NT1BSUFBQUE1OTE5QT0xNUVBRTk1SUlJRUE1RU1BN
-TU5RUlBPTk9QUVFSU1JPUE9TUVBPTk5QT09RUlJQUFJOT1BRUFBTUFNTUVJRUk5R
-VFFRU1BRUVBRUE5QVFVUVFNQUFBVVlRWVlhYVFZYV1hWWVhYWlxbWlteW1xeXl5e
-X11eYWNhYF1cXl5bXF5eXllWWllaW1tYW2mSvsrR2d3h5Obo5+rrT1VSSklMTEtD
-REpGR0ZESUZGRElEQ0VITEhGRUZEPztCQT9APjw8PDo8Ozw4PDs5PTxAPz0+Pj0/
-QT08Oj1APUBCPT07O0A9PDw+Pjo4OTs7PT4+PD9AQj9APz5AQEJCQUJEREVGSElG
-RklMT1BTUVFMTk1PTVBVU1JRUVFOUE9OTUxQTk5MR0pMS0pHSEdGSklHQkBEREZD
-P0VFRERARURBREdHRkI9PTo/OT47Ozs8Pjs6PD09Pj47PDw8PDw7Pz4/PUA8OTo9
-QD49Pj06Oz4+PUFER05KRU1LTE9LSkpKVVJYV1dWWVxYVlFSU1BNSktFQjo1MjQ0
-MDMxMjMzNTY1NDUzNTY3NzY0NDQ2ODg5PTk7PD1AOjg5PT4/PT4+Pz0+PDtAQT48
-PkJFRUNDPUBCQkFKTEdITFBNSFBTUlFRVFFVWVJWWltcU1BPTU5UU1ZYVlJNTk1P
-S09XXFpSS0VFSktMT09SUVJVVFBPVFFKRkhNVVJSWFdTUVVPTkdLTUxHQEE/Ozw/
-Pj88ODs2NC4wMTE0ODc2OTY0MTA3Nz1ITE1PU1VZYFVOSk1QTUxOTUxKTlBbXVtY
-UFFTVVdaXVZXWVZTU1NVU1JNT1FRW1tXV1hYXVxhWFNHRTw7QDxGTUJEPD0+PDs2
-NzYzMkRNR05RUEtHRkVDQEFGQkRBQEA+PDk9PTo3Njk9OTc1MzM0NDU2NTM3Nzc0
-NTU0NDQ0NjczNDM0Njg9Q0VHRkxNTlJOUldXXFlWU1FQTE1NVltdWllUWFZST05O
-T1BSTk9NTU1OSkZFQT4/REI9PDc2Nj9JTUlBQkNOVFdXXWRmZ2lucXN2eHl4ent/
-f3x6eXl5cmtve4ODfHBpanF5eHt7e3lza2VcWVxhc3p4dniAjp2nrbO2tbKwtLKz
-sa2vramjo6Sjnp+clpGRkJCPkI6Pj5KVmpycnZ2ioKGlpKOmpqeusLCwrq+tsLKv
-ra2wsrW3t7GvpI57e42WmJmUlZ2gn5SNl52dnp2ZlZWXmJucnZ+ipaOjpaGdnqSj
-oJ2cn5+ipKmtrq+wtLOys7e1trWzsbCwsK6usrSzsLCvsLGys7WztbOvtbGvsra2
-tbe4uLW1tLCsqq2qq6qrqaeoqqmrqKinpKOjoqOhoZ+dnZ2bn6Smpaenpqioqaqo
-mo2Ql5eVlZaVl5ydnJyamp+foqaioqKjpaalpaWmpaWipKSmpqinpqmnpKSlpqWn
-pqeqqKipqamopaisq6ysrKmrr7Gxr7KwsLKxsbW2tbO3t7i1t7e6ubezsre3tLW1
-s7Gtr62xsqF+Y19bWVZUUFJVUVFTVE9OUlJQT01NTk5PTk1KS01MTVBOT1BRUE5M
-Tk5KTU5PUEtMTEtLS09RT1NXUE5OUFBRU09RTE9OT05QT05OTVBPUFBPUU9NTExP
-TFBSUFJSUVNPUlBRVFFQT1BUUU9OTU9NT09PUVFQUVVWUlFRUlJTU05TVFVUV1dX
-VVVWVVVVVltaWVRWWFtaWltbW1xeYGBdXF5fYGFkX15gXl1fXltcX1pYWVlaWltc
-ZZTAytLZ3eLk5+fp6upOS1BPS0lPS01MVEtIRktGSkxFRkZCQUFFREVEQ0BAQUFC
-RkQ9PDs/Pj1AQEFEQ0NBPUE8Oz09Pz49Ojw/Ojo7Ozk4PDw8Ojs6Oj0+PD07Ojo6
-OztAQEBAQD0+QT89QUBEQ0NBREVGR0hJSUpKTU1PUVFQTE5RUVNRUFNUUlFXV1RW
-UlJQTk9NTk5MS0hFRkRMSkdCPkFESElDR0ZHR0JFRENFRUVGQT87PD0+Ozs7Ozo6
-QD0+QUA9Oz8/OTo8PD9APj04OTs7Ozo+QD5CPTs9PUJAPUNGSkpMU1FKSklKS01a
-V1xfVVtZYVxbVlBPUlBPTklBPTU3TTU2MzUyNDczMTM1Njc0MjQ3NjQzMzQ1NTs4
-Ojo5ODk5OTo5Ojc7PT09Pz49OTs/PD0+Q0RGQEU8PUFERkxMR0tKTElFSUtOUE5U
-UlRXUVRVV1hTTk9OTk9OVFNTTUtMTFBQT1VWVFBKREZKT05QUFZYU1NST1JVU1BP
-UFRUUlFXVFBOVFJSVE5PSklCQj48PUA/Pj47ODQ1NDAwMTM0MTIzMzM0MjI1NzxI
-TU9RVlRSXVtSTk5NTk9QVU5RVVhbWVJQT1JWVllYVVRSUlRSU1VSU1NQU1VSV1ZV
-VlthYWNdV0xFQz5CQD9DQ0A7QDk3ODc1MzQ2SlVKUldVR0RFQj05Ojo8QkJEPTo7
-PD5CPT88OTg4NzY0MTIxNTQ1ODU0MzQ0NDMzNTMyMzQyNDU0ODpARENER0pLSUxZ
-Xl1jXFRSVVJPTk5UXVxYWFdVV1ZWVE5KRkxSTkxJS0xJQkNDQkJDQDo4NTQ5RExO
-TEhGSE5UV11iZ2ptb3N1eHh4eH1/hIeLiYaFgXluYFpcaHBwY1pbYWpubG9yc29m
-XVlZXF5rc3RxcoOVoamws7OzsbW0sLO0rqusq6iopaGfnZqZmJOSko+QkZCVlpaX
-nJ6enqSkpKKjpaanqqytsbKzsK+wr7Kyr7Cxs7O0sqmgm5iOlJubl5GSmKKdk5Sd
-oZycnZqYmZygnp6cmJ2io6Ojo6CboaKgn6GhpaWlqbCys7OztLS1trS1srGysbKw
-sK6ytLSysbOztLCztru7t7O1s7Gxtre3t7m2t7azs66srq6urqyvr6+tqKemp6eo
-paShoaOin6CanJ6eoaapqqypp6qrqqmZj5OVmJiXlZaXmJmbnZyanJyho6ajpqSk
-qKelpaanpKWjpKalpKSpqamnpqSkpqWjpaepqq2rpqWmpKqusLCvra6wsa+xs7O0
-tbK1trKvsLO0s7e2t7y8ubazsbC5tLKwsbCusLOxpH1hXVtZVlhVUVBRTlBPT09O
-UE9QUlJPTlFRT0tITE1PUFFQTlBOS0xPTlFNUFFQTFBNTEtNTk5PUlJQTlBST05P
-UE1PUFBNTk9OUFRTUUxLTE5QT0xKTlBUUVNTUVFTUlFPUFFPT01MTk9OUlBSUE9P
-T1BOUlRTUVJUVlRUVVVVVldUU1RTVVVUVlNVVVNUV1VXVlhaWVhaWVdZW1xfX19e
-Xl9eYWdfX2BeW1xeX1teXVxaWVtbXFtfkL7K0tnd4OPm6enq60tLTUxRTE1OR05M
-SEZFSUtJSkpHREJGREBCQklMRUVAQEFDQ0E8O0JGQT88QD05OT4+PkQ7PTs5OTs7
-PDs8Pz8+Pjo9Ozw8ODs7Ojs4PT05OTo5OTtAPUBAQD8/QD0+QUFBQD4+P0JER0dJ
-S0hKTE9RUFJSVFNRUVJOTFBVVVJWU1pVUVFQUlJRUFJSTUtLR0tKSUpHR0lHRUJF
-RUpNSUhIQkJBQUBAP0VAOjo/Pjs6OTlBRUREQT8/Pz49PkA9Pz47Pzs5Pj9AQEBC
-Pj08Oj9AQkA8QkJGTEpQTENESU5NSlNVVVhbXlxfWlpWUkpKTU9TTkM5NDQ0NjQx
-MDQ1MjAyOTY4NTMzNDU0MjQxNjQ6ODk2Njg4Ojg3ODk5OT49PkA+OTo9PjxBPj5B
-QkdHRUNBRkhITEtITEpKRkZKR0hMTE1NUFNRUVNSVE5OUlRTUlJTUlJOTElMUU1M
-TFRUT0tIQ0lMTU9RU1ZQUE9TUFJVVVFUWFtUUVRRUlJWV01PU1JKSUJHRUE9PDo/
-QTo+OzY3MjEwMTAwMDExMzY0MjQzNztLU1NUUlRYXFROTk5LUFBUV1BST1BRUlRY
-V1xfWVRVVVFOUVNTVVZUVlRVV1RUVFNZXVxcX15UTktIQUQ9QEZBPjs7Pjs4NDM2
-Nj1LWUxNUE1MR0Q8Ojo2NjU5Oz49PEA7Qj87Ojo5NjM2ODU2MzIwMzMzNDQzNTIw
-MjM0NDAxNDA0NTY3Ozs9PT9FSUlLTFBSU1pWV1hYV1JPT1dcW1pXVFNZWlhUUU5J
-SkpMRUdKSkhGRURFQkJEPTc2O0JHSUtNS0hNUVhaXmJnbHN0dnh3ent8foKEh4qN
-jYqGhYB2aWBkbnJsX1heaW5xcGxpZl1cVlZUXWlycXh6hZilrrGys7KytLSxrrOv
-q6mnqqilpJydmpaWlJKSkZCSlJWanZqcnKCfoqCipKGlqKuprK6xsrO1tLCvsbK0
-s7KvsbGto5maoJ2anZqQhICOm5SNlp+jop+emZeanp+hop+dn5+goqCeo6KkoqKg
-oaSmqauqrbCvsK+wtba1uLe3s7O1tbGxsbOysrSyt7e0sLKys7a2trS0trWxtrm3
-tbS3s7Sysq2trq2sq6yrraynpaapqKikoaGjnp6cnJ6bnJudo6errKypqq6qpZyT
-kpaZl5aWlZeYmJufnpyen6Klpqalp6ehqKalpKKioqGkoqKlpqitq6WkpKKmpqel
-o6apqaiqp6SkrLK0trS0srKxsrCxtLS2uLS0sa+ysbCys7G1tLa1tbSws7a0srCu
-sbKwr7SofWNcWllYWVdSUlJTUVJST05OUFFRTVBQUFBQTUxNTVBQT09SUE9OTVBN
-UU5MT01KS0xNSk1UTk9OTk5OTkxOTE5QUU9OUVFPUVBQUlFOS1BQUFFSUVBMT1BR
-U1FRUVFQU1JTUE9PTVFSTk5PU1ZXT09OUU9PUk9QUVFRU1VTU1RTVVRVUlRUVFdW
-VlZUV1daV1dXWFlZWVpbXFhaXl1fXl5aXV1dYGBiYF1fX1paWlxfYFlWWlpcYmeV
-vMrS2d3i4+To6OrqR0dKSk1JS09QSkpKRkdNR0lKRUNDRkNFQEBCQkdGQkJFRENE
-RT5CPTpEQDw+Oj5ARz48P0I+PT06Ojs7PUA/Ojo6PT09Ojk6OTk3Oj0/Q0A8Ozs8
-OTxCQD9APT0/QUFBRENCPz9BQ0dGSEpJSEZGSk5QUFFRUVJUUE9NTlJXV1VTVVRU
-UVBST1FXVVRSS0pMTU1LSUtNTUlHR0NGS05NR0dFSEdFQEBAQTs9Pjo8PTw7QEJB
-P0BCST49QkE/QT4/Pzw8P0FBPkNCQEJBPT5AQD9CQ0RGQUJJR0ZHQ0hJS05GTE5K
-VFlYUVtaWFVUTktLSktKQzo2NDU1MzUzLy8xNjU1NDQ0NDE1NjY3NjY3ODg7Pz42
-Nzc6Ojo5Ozs6PEFAP0BAPT0/Pzo+QkJCRUdISUNISkpLSkhLSEhHR0pMSEdIRkVL
-Tk9STk9TTEpNUVhTT1BOU1FST0xSUU5NUFNRTEVFRkpST1ZTTk9SVFBLTFBSUFBT
-UU5QVFZVTFNWS1BUU0pIR0hGQz49Ozs8NTg4Ojg4NDAxNDIyMzE1MjQ1NDQyN0BN
-VFFTV1VVUk5MTU1SVlhYVk9OTlFXVVxfXV1ZVlVQUVBNT1JVVFZZWVZWV1JVWlZa
-XlpYVVFQSUk9P0ZBR0A+Ojg8NzUzNDY2N0RSSkhOTUxEPj4+OTYzNjQ2Nzs6Pj5E
-QDs5ODk2Njc2NDU1NTMzMDIyMzQ0MzMyNDU2NDMwMjEzMzI5Oz1BREVERUpITk9T
-VlRTUlVWUlFWWFtZV1lUVFdXV1ZVT01KTEhHSEZMTUdLTUVERkE8Ozs/RUpKS0lN
-TU5UWl9hZmxwcHV3eHl8f4KAf4KDioyLipGTi4mBdXF6f352a2lxen15b2VeVlZT
-UVJaaHF0dnuMnaeusrO1s7SztLWwsK6uq6mqp6ahnZuYlZOPj5CQkZCUmJiYmJqd
-oKKioqKjo6WpqqysrK6vsbKzrrC0tLe1trS3s6uWhoyYnJqcmYt2doaNhoSYpqik
-n52YmJubm6KmqKShn6Chp6Sjo6GkoqKjpKmpqqyur7CzsrKxtLe6ubS2s7K4ubey
-sbWssbS4tLS2tbWztri8u7eztLGys7a3t7i0trO0sLGxrquoqquqqaqmp6Wno6Oh
-npyem56enZ+hnZ6go6Wlp52lraynm5SUlpWUlpeVlpudnp6koJ2cnqGio6Skop+i
-o6eoo6GloqOmpaKio6aoqaampKWnp6WipqWip6WnoqOsrbG3t7W2s7O0tLa2uLi1
-trO1trGytLO0trWytLW2trW0sLSytLCusLG2tqV9Y1xZVVRTUlNTUlFRUVFMTk9R
-UVNOTU9OTU5NTU9QT09RUk9MTU1NT1BPUU9OUE5NS0xKS05MTk9RUE9RTk1PT1JP
-UFFPUEpPUlBPT1FTUk9RVVRSUlFPUVBQUFBPUVNQUVJQTlFPUU9ST1BMUVFPUU5P
-UFFSUlFRUVNSVFJRUlNRU1ZVU1VSVFZUU1VXVlZXV1hXWVtYWVtcXVtdXFpdXV9c
-XF9dYWNjXFxeX1xhXFxZV1dYWFtgZ5K7yNPY3eHj5efo6upMS0xQR0lSUUxJTUpK
-SUNLTUZEREJDQEFCQkREQ0JFQ0NEQUFCQzxBQj08PDs9Pj05OTg7PTxAOjs9QDs4
-OkA/Ojk6Oz88PTxAPTo5OT09Pz07OTo6Pj47PTw9QEFCQUBEQUE/PkREQ0dER0lI
-SkpMTlBUU1FUVlNQT1JTVFVTVVdUVVRTUE9QUVFTU1JOTEtOTEZERktMS01ISElJ
-Tk5IR0pJS0dFRENCQD8/QD49QkNAQT86PT1AQD0/Pj1BREBAP0A+QERAREU/QkI/
-QkBCRkVGQUZFP0NARUlGTE9KTEhLUk1SUFJPVFVSUU9LTU1GSUxFOTQ1NjQ2NDMz
-NTM0MzY5NTYxNDM1NDYzNTQ2NDg6PDY2Nzg6Nzg6Pjw+Ozw9PEA8PD4+PkJAQT9B
-QUJKS0dHRUVERERFSElIRkREQ0lMSUhISUlKUFFPTUtNWFhSTk9RUVFQT09SVU1N
-UE5PT0hFSExMTU5OUFJUU09PTkxKTk9RTk9QTk9OUVdOT1BNSEtGS0lIQjw7PD06
-Ozo8Ozk1NjMxMzQvMzYzMTQ0MjU0OkJPVVJUV1dVU1VWVlRTUlFXUU9VVldWVFhe
-VlZRUVJRUFJOUllZV1VVWFBPUFRbXFZWVFRRUUlMTj07QT9GQUE8Oj06OTQ0NDo8
-QU1JTVBMRUA8R0o9ODg4Njc4O0A/PT5CQDs6OTg2Njc3OTYzMTM3NDU0NDUxMjEx
-MjM2NjMzNDMyMzY7PEBCRUNIQ0RNT1VWVVhVT1FTVVdZWFlWV1VWW19bUk5SUUhI
-SEZERUtLS05OSkRCPTs4O0VISUpGRklNUVNVW2Nna292c3h4fIGChIOFhYiKi42M
-jo+QjomFf4KHh4N5dHl/g3xwYlhUVFFPUFhodHR2fpOlrLC0s7W2tLOztrOvsK6r
-q6mjoZ6dm5aTkI+MjpCSkJCTl5iZmp2go6Ofn6SmqKmsrq2vs7O0s7SzsrG0t7W3
-trGsopB7f4iOl52elIF+hYmCi6Cmp6agnJmbm56coqSlpp6ioaGipaWiop+jpKan
-qKipq66xsKyxsK+vs7W1tLS5s7i6tbKzs66ys7S4t7u5t7q2tLW5s7KzsrK1tba3
-tLa5uLOwra2vsK6srKuvrbGsqaeko6GLlZ+bnJubnp2enp+jpaaoqKyrq6WYlZKS
-lJWWlpqbl5uepKKhoqKfm5ydn6CjpqWeoKWoo6OhoqSioaGhpKSopqOjpaipqKek
-pKGjpKGhoqqprrO2trW0s7S4ubq6tbe4trm1tLWytLi3tbO2t7q5t7OxsLO2tq6u
-sba0o35kXFlXVFNPUFFTUFBSUlFQUVFQT1FRT01MTU1OT0tMTkxPUFBRT09PUk9M
-T09OTUxOTlBMTU9SUVJSVFRTUlBOUFBPT09UVFFPUU5PT09QUU9PT1JRUE5RUFBT
-UVFRUlBQU09ST1FST09QUFBNT1FQUVNRUVFRU1RTUlBQU1FSVlNRU1JTVFJSVFRV
-VVZYV1dWVVdXV1lXWFpdW11hXl1eX11cXV9bWFlZWltZXl1dX1xZVlZYWl9rnL3J
-0tnd4uTm6Onp6kxNSUdGSktJSEdISElDQUVIR0RLS0JDSkM/PkJBRENEQzxBQUA+
-REA9Pjw/PD8/PDg5PT09Pz46PT87PDk6Ozg6OTk/Pjw7ODw9Ozs7Ojw8Ozw6Pz5A
-O0I/QEFBQEBAP0FAQEA9PEFERUZHR0dKSkpOT1RVVFhYUlBSV1dXWVVUVlNVVlVU
-VFFRVlVVVE9PTEtJRkdESElRTE1MTkxNTkxKRkdOR0RHQkJBQ0VDRUFERkJDQUBB
-QT9BQEdEPUJDQ0A9RkVARkRBQkFDPzw+O0BBRUhBQEJAQERGRkVKUUpNTUxRTFFP
-TU1NVFBRUUpLSkhDRUQ9OjIyMjEvMjU1NjI2NTc4NDY1NjY0MzU0MDIzNDk7OjY2
-OTc5OD47PUI+PT0/REJDQz88P0JBQENGREVMS0VDQj8/Q0NDR0lHREJFRktISUdG
-TkxOUlJPTU5TUVRUUVFOUFFTVVZWU05QVVRQUE5JTkpLS0xMTVFQTU1PTUdLUFBQ
-UFBPTVBUUExPUVFOT0pMRkhDQEBCQDw7OTk7ODU2NzQzNDQwMD81NjM0MjM2OEJQ
-Uk5RUU9TW15ZWFZWVlZXV1lYVFVZVlVVUVdUT1BRVVlSWFxUT09RU01PWFhVUlBS
-UlFTUlBLQT5DPEZDQUA6PDc1NDY0NzlBSk1RU0tCOjhOTD04OTg5NTc7QD1AP0I+
-QT47NzM0Nzg1NjY3MzU2NjUyMDIzNTQ1NjQ2NjQ1Nzc1Nzc6PT9GSEdFSE9NUFRU
-WFVSUFJUVFRUW1xaWFZZW1RMSk5QSEhIRElLTEZGSU1IRkE9PUFLT01HR0dHS0xQ
-VFdfZmlscHN0dHx/f4KDhoiKiImMjo+Ni5GRi4uIh4eFgXptbXN7dmVUTUpJSEpO
-VWRvcnaClqeusLe1tbSytLW4t7S0s6upqqSioJybnpeQjY2NkZGQk5aWmJmfnaGj
-oKSjpKeprauurrW4trS0s7azsbCvsrWxqqyklH2AfoOOmJyWj5KSioWWpamqpqCe
-nJ+gnqKgpKioo6KkoKKjpKakpqOjpqmpqaeorq6vrrKzsLCysLGwtLW2tbi4urey
-rbCwsra6vLazs7a3srWztrO1tLa3trOztbi4trKurq2trq6urKytrrKwrKilp6CZ
-nJ2goJ6enp2goqSoqKinqKenpZ6am5eTkpSYmpibmqGjo6KioZyen56hpaWmpqen
-pqempqamo6OioaCjo6inqKimpKanq6qnp6WjpqKip6qws7a0s7S0tLW0tbO0s7O4
-tLS3tbOztra3t7Wzs7W5ubW1tLa2tLGztrCggGNcXFpXVVRQTk1OTk9RUlBQT05N
-T05LS01OTU5OTU5NT09OUVBPUVBOT01MTE1NTUxOTVBPUU9PTk5RVlVOT05NUU5N
-Tk9QT01OUVBSUU9TUE9QUVJQUVBQUFBRUFBSU1FRUFBRUFFQTk9PT05QUVFSUlJQ
-UFNTUlBQUU9PU1NSU1NSUlNUVFNUVVRUVldXV1lWU1VYV1dYW11cXF5fX2BgYF1d
-X15dWVlZXF1dW1paXF1eWFdYXGaevMnS2d7g5Obo6OrpRkpGRUJJR0tKTExJSE1G
-RkxIREJHREZER0RHRUNBQkJBQkJFQkNAQz45PEI+QD89Ozs+Pj08PT46OT1APjs9
-PTw+OT04ODY4Oz5CQD48Ozs7Pjo/QUVCQD09PkE/PT1AQUE+PUA/QUNGSkpLSUlJ
-Sk5QVFNTVlZTU1dYWFhZWVlbVlRUU1ZYV1RVV1dUVU5MR0lGSkpMT01PTUtMS0hL
-SERGRUlIR0RCRUVGRkREREJIR0REQlVEPkJARUNARENCQkFDRkFCQEBBP0BAPjw7
-PUJBRURERT9ARkpHR0lOSktJRk9LTk5PTUxXU1FOUk5HRkpFQz83MzIyNDM1OTQ8
-NjQ4MzM0NzY2NjY3NzY4ODc4Ojk6NzY0Njc3ODs9QkA8Oz0/QUJBPTw9RUFDSUdH
-RENHRERDPkBDSEpITEhERkhGREhLSkpKSVBQUVNTUVJQTU9OT09QT05UWltVUVNW
-U09RUk5OUE5MUE9RTUxKS0tLSUpPUFFRTlZSUVJRS01QUk1STk5GSEJFP0JDQT46
-Pzw4ODYzMzMzNjMyMjM3NTM0MjU3OEFLUFJRUFJdYVlWVFJVVlhYVFVUU1tWUFNS
-VltVT1ZYW1RUWFFNT1BXU09VVldYWVlZV1lZU0xFQks+Pzo8PTs7ODY1NDQ5PUhM
-Tk5TS0A5Q1ZMPTo5Njc3NzU7QEE8Pj87PDs6NzQ5ODk0NTU1NTc0MTMzMjU1NTo0
-NTU0Nzg3NTQ1Njk8P0VFR0hMTk5QVFRYVFRQUU9PVFhbXltbWVRSU05OSk1LS09O
-SU1LSEpHSUdCQEFITlJPSkdCRUlNT1FYYWRpamttcXR5eXt8f3+Eg4aIiY6Pj4+Q
-kpCTk5KQi4d5bF9bYGtlV0pFQ0BBREdPYWpxeIOXpKmtsrSxs7SysrGyr62rq6mm
-oqCdm5iTk4+OjY+QlJeZl5aYmJqeoJ+goKCmqKeqrKSssbKztra0srKzs7SxsrGs
-qaWbj4yDgYOJjY+WnpyamKatr6aknZ6anaKkpKSlo6Olp6Wlqqqmp6apo6Woq6qp
-qa6urK6sr7KxsbKxsa6ws7O2tLe6t7Kwsba2trm2trW0urazs7S7u7i2uLq2tLO1
-srWzs6+sq6yvrayuq6mrqqino6Chnp2fn56an6KfnJyfn6SnqKmqq6yjmZicmJaW
-mJeboKGioZ+fnp6fn6SioKOjoaOko6Snp6ipqamjoaKhpKOmpqWmpqenqaSmq6mo
-qKSkoaCorq+usLKys7GysbOzsra4tbi7t7O4urq2sbOytba3tbe6ube4trOzsLG0
-s6SCYmBeW1hWVFNOTk5RUE9NTUtPT0xMTU9LTE5RUE9MSkxMTlBPT0xNTFBNTlBR
-UE9PTUpMTE1PTlBQUlBPUE9KTFBQTVFQTU5OTU1RUlJPUE5QUFBRUVNRTU1OUVJU
-UVJSTlFQU1BRUVJQT1BRUVBQT1NRT1RTUVBQT1FRT01SUlFSUlJQUE5QU1NUVlZU
-VFZaWlpaWFdZW1xbXFtbXl1eX15bXVtdXl9cYF5fY1xbWlhbXlpZWF5eaZa6ytHZ
-3uHk5efo6+tJQ0pFQUZAT1FNTUxOSEZCRkVHSEJDRURDQ0RFQkRBQj8+PkFCRkNA
-QDxAOzs9Pj89Ojo9QDo8Pzo7OT4/PT0/PEFCPDs4Nzk6OTo6OTo7PT09Pjw9QUM/
-Q0BCP0A+QEJEQENAQkJCREVKS01NTU1NUFNUU09RVlRVVFZbW1xfW1lSUVVVVFVV
-U1RZW1dbT0xHR1BKSExOTk5MTEpLTlBLRkZDR0lLRkNDR0ZHREVJSEZJSElFRkRB
-QkFBQkJDQ0NHQ0RFPkBAP0c9PT88PT8/P0JGREZGR0pIRklKRktKSklITEpKU1ZX
-UVRPUFBRTUtHSklGQTc2NDQ1NDQ0NDU0NzMzMjI0NDE1OTc8NzQ2NTU5ODg0NjU2
-PDo5Ozk6PDo7OTk+QUA/Ozk7QEFARElCQ0VCQ0VER0ZMTlFQTktKRkJFSUxNSUZL
-Tk5QU1BQU1NOS1FPTlFQTUxRVVJRVFpZWVRRUlZaVlBQTlBRS0lKSkpJS1FPTk9P
-WVFJT09NT1JQTU9LT0hHREZERUI8Pj1BPTo6NzQyNzM1NTgvMi8yNDQ1NTY1Nz5J
-TVBYWldYVFJWUlBPUVdXVFtXUVJPUVRVWVlTVlpaVVBMUE9MUlRUUFBQVVhbYFxY
-VVdVT0tHSkdAQDw8ODk2MjEzNDY6Sk5PTVBKQjpEXEk+OTc2Nzo5NzdBQz44Oz4/
-Ozk6Nzg1NDQ1Njc2NzQ1MjMzMjU4MTY4ODgyMzQ0NDY1ODlARENGR0pMTE9SVlRX
-WVZSTlBVV1ddWFZXU1dVT0lHTExLTEpITUhJS0tIRT1ARVFUUEhIR0VFS1JUVVpf
-ZWZobm90eHh5fH2AgoSGhoiLjIyOjpGUkZOVk5OOi4R4ZldUXGBVSUNBREVFRk9f
-aG11hpukqKmssbW1trKwr7Cvra6sqKain5qamZSRkJCOkI6QkpWWl5iZnZ6hnqCe
-oqSmp6eop6qvsLOztLazs7S4tbS0srCqpJ+WloyDeHZ5iZOhoJujrKqoqKWfnp6e
-o6akn6OlpKaqqqepqKKipKSjpaeqraixr7Cxr7Cxtre5tbW1tLK0u7m1s7e4sbS2
-t7e1srGysra4tbOztra1trq5tre4trmzs7Oxrq+qpamtrK6rqqqppKWjoqSjoJ6e
-nZybnqCgoKOlpqmws6usqKGYlJabnJeWmJucm56hoaKjoJ+fpKOmpaafpKOkpaSj
-pKeno6WkpqOjp6qnp6mpp6emo6KmqKinoqGhpamtsK+xs66wsbK0srS3tra5t7q5
-t7y/ubWytbSzsrq4t7e4vL22s7Kws7Wun35hXFxaV1dTVFFRUE9NTFBSTk1NUE9N
-TUtKTk1LTk1PSExMTU5OTk5MTk5OTlBQUE1PTEtQT05NT05SUVRRTUxOUU9PTk5P
-T05OTk5PUU9PUU9PT0xNTlFSUE9QUlJST09QTU5OUFFTUU9QU1FSUk9OT1JRUE9O
-Tk1PUVJSU1BOUFNRUVNSVFVUU1RXVVRYVlZXVlVWWFxbWVldYF5cXF1dX11dXV1f
-W1xhXlxaWlxYWFpcXFlZWF1ml7nJ09nd4eTm5+nr609RRT5GTk9MVE9NRkdLSUNA
-Q0RHR0RFRkRERENBQEZAPjs9P0JCQUJDQz8/QD08PEA8PTk2ODg4PD4+PD48Pjs+
-Oz47PDg2Nzk7QUA5Ozo7ODlDRj49PTw+QT4/QkBCQkJBQENCREJER0lKS0xISExQ
-V1dWUU9SUE9TVVhZWlpYWVdWXVNTUVFWVVpaWFVOTU1NTEpKS05RTlBNS01RU09H
-RUNHSEpLR0lGQ0VDRklHSkZCRkNESEM/QT4/Pz9DR0lGQ0lCQURCQ0BAPj1APj08
-QENFRUhITkpITFBKSEZKTklJRkpVVlhUV1NPU1JQUk5LREJAOjIzNTYyMzczNjIz
-MzQ0MjU1MjE0NDc4ODc3NjU2Nzc3ODU3Ozs9ODg6Oz06Oj5DRUNCQkNER0hER0JG
-R0VERkhHRUVHTE1KS0tKSkZGSUhHSkhPTEpMTk5RUE5KS0xQUk1MT0lRVlBUWl1b
-VlBSWWFdVVJSTlJNR0tJSEtLUE9LTk9PUUtJTk9TUE9PTkpOREdLTElHQT1APEE/
-Ojw3NjQ0NjYyMzIwMDI0MjI1NDQ5OUBOUFtbWFJQVllUUEpNUltdXldOTk9QUU5S
-VlhVWllYUk5PU1JTVVJUTU9aW15eW1dWWVhTSEZKREBDPD06ODExMzE1NTtDSEdI
-TktBN0ZgTDw6PDs4NDU6Ojw/Pjs5Ojo6OTY9OTc1MjY2ODg3NzU0MjM2OTg4NjU1
-MDMzNDY1Njg5OTtBS0hGSUpNT09QVFddWVdSUlRZXV1bVldaWlZMSEtPRkZISUpJ
-SUlOTElES05SVFFOTExNSkhKT1NWXF9lZmltbnN2eHp7fX+FhYSFhoeMi42Mj5SV
-lJSRko6OjIV8cGVkamdaS0ZHTElGTFtrcnuLnaeoqq2sq6+vsbCyr62tq6alpZ+f
-m5iWlpOTkI+PkI+Sl5iUl5qdoaOgn6CipKaqqqessLKxsLGzs7S1tLazt7Ovsq+n
-opiSkIFxanCElJ6fo6erraisqKSipaSjpaSgo6WmpaiopqiipKWipaOmpquwsKqt
-sbG0srS1t7a2t7i2tbSzs7K0tbW0tba3tLaxsLK3urS2uLm5trW0s7O1ubaztre4
-sa+vsbCtrrGwr66ro6erqKimpaOhoJ6cnp2cnp6eo62rrK2vsa2pn5aVl5qal5WX
-mZqcn56eoJ6eoKCgoqKhpKeio6WopqWkpqenqqekpKWnqaiqqKenpqaopqekpKel
-o6OgpqyvsLK4uLW0tbO0t7m2uLi4t7a3uLu7tLW1trazt769tbCztbWxsrGztbWp
-g2ZeWllYVVNSUVBSUVBQT01RT01PT0xKUU5NSk1NTEtPTU9PT01OT01PT0xMTExQ
-UE1QUVBNTFBPT1BSUVBQUEtNUlNSTk1OUFFQTE1PUE5OUE5QT05PUVFSUlJRT05Q
-T05QTk1OT05PUFFQUFFRUVBRUVFRTlBRTk9SUlRTUVFRUlNTUFNVVVVZVVRUVlZZ
-WVlYWltZWVpaWlxdXl5fXl5eX19gW1tcXFxbXl1cXF1bWVlaW1pcX2mWusrS2N3i
-5OXo5+nqTE1LQ0ROVE5RUE5MSUhLQUJERENBQ0VBRUNDQURBPERAQzxAPT8/PDo9
-P0BCPz46Ozw8Ojs/Pzo6Ojw7PTw7PDs5OjtHPDc7Ojo3Ozw8PD08OkNGPDo9OkFD
-Pz0+Pj9BQUNBQ0VGR0ZISElLSkdKTlJUVVZSUFBQUVZVVFRWVlNWWltYU09QTlJT
-UlVVUVFQTk5LS0xMS09PTkxKS01ST0dFR0RHSkpHR0VDRkdGREJFRUNGREFGSEJG
-Q0lAPUNESEdDSUJESENHRENFQUNFREFBQUJESUpKTUpLTUlJSUtRTlFLSVRUUlJX
-Vk5RVVFRT09MQ0A+OjU4NzM1MjMxMS81OTUzOzU2NzU3ODs4NzU3NjY1ODg0Nzc1
-NzY5OTk6OTs8PD4+QT8/QkBAQEJEQT9BSEpFRkdERERHSEVHSUlPSkRGSEpOS09M
-SkxPUFJSTE1MTU5QTlBRUFBPTlJUVlVWVVNTW1xVUUxLS0tKSEpMSklNUVBRUVRV
-V0xTVVJRUVFSSEpKTUhPTEhCPUM8PT89ODc4NDMzNTIzNDM0MDAuMDY1NjY4PUZS
-XFlZVVJXVlNNS01OUVtdWFRTUk9LS01RVFJVWFVTU1FWXFlWUFBQT1VcX15fW1hX
-V1hRTElGPkE8QTk2NTQzNDQ3OT9JTExQT0I6RlpFOTs9QTw3OD1AQj48Ojk4Nzo/
-Ojg7OTkzMzY0NjQ0NDc6NkE7MjYyNDM2NjU2Njg5OTc4OT5ERUVHS0tOS1BTVVZY
-VlZUVFNUW1pXXVpcXFVJRUVFRkpNTEpGSEtMSlBSU1FPT0xPUE5MS0xPVVtcYGZl
-a3BzdXp7fX9+gIKIiYeEhYqOjIyOj5GRkZGQkpGMh4R+d3h6dWhWSk1MSERGWWhx
-fJGgpaitrq6vrrKwr66yr6qrp6anoqCal5aVlpCNjYyNkZCSlJWVmJyfn5+goaCj
-pKerqa6xsLGytbS4tbS2s7W2trKwrqmjmY6FfXJxe4yZn6WpqK+xrKmqp6Slpaen
-p6akqq2sqKaipKWlo5+kpqaprKutsLCvsK6ytLOzt7m2uLS1s7O1s7G4vLa0tbSy
-s7W5t7i3ubu4tbW1tbWxsrW7uLi4trOxsLOzsbGvsLKtrKmnqaqnqaWmo6Cen5+e
-m5ycnZ+lqKyvrq2sqqWclpSXmJaampiXnJ2eoZ+ipJ+eoaShoaWnqqenpKSkpKap
-qKeppqSnqqipqqmnpqeopqampaKjpKOjo6KpsLG3urK1tbO1s7S3t7q2t7Ozubu4
-tLOytbm5trm5uLeyt7m0sbGytrq6t6iCZmJeWVVRUlZVVVFOTk5MTk1NT0xLTE5Q
-TU5NTUtJS05OS0pQT01PUU9PTU5PTE5OT09OT1FTUlJPUFJOT09QUVBOT1JSUU9O
-T05OTVBTUVFSUVBQUVBRUlNQVE5OT01PVFFSUFFQT09RU1BPUVFRT05PUU9PT1FR
-UU5PVVFPUVJSUVNUT1FSVFNUU1RUVVlYVlhZWFdYWlpbXF1bXV1fXlxgXlxdXl5g
-X1pcX11eXF1bW1lZWFtjcJm6ytLZ3+Hk5efp6utJUFRKSEpLR0pGS0lCQ0VLTElI
-Q0FDQUVFRUZGPENERUJERUc9Ozw6OTY6PDk6RD07OTo8PDs9QTw6Nzg5ODk4Oj89
-OkE7OTk2ODs7Oj0+Pjs6PDs9Oz1CPz09PEE/PkFEREVDQkRGRktLSktLUE5PT1FR
-UlFRUlFPV1pXVFRWWFZXWVdVUlFUV1hSUlNSUE9QTE5LSkpLTEpQT0xMTU1NS0lI
-SEtMSkdERERFR0RFR0dIRERGREhHSEdERkdDQ0NHSUZGQ0FDQUJGRUdIRERBQEE+
-QEFFSkdIRERNTFBRTVFOUFBNVVRTUlZeWFZWVldSTUtDQT06NTg0NTQyMDMyMTQ1
-NjU3PzU3ODU3ODY0NDQ0MzU3NjY3OTc2NDc5OTk8QEE+QENBQT9BQ0I9QD1AREJH
-TUpERkVFQ0ZLRkNFS1BLR0hKTEpNTUxNTEpOU1NSTFFOTlFNTlJVUE9RUE9TUVNW
-VlJXWFZSTUVFSEpIT1FPSk9SVFVXWFldVlNPTExTVFhOT0hLSU5KREM/Qj07Ojc4
-Nzc0NjMxNDIxNDQyMTMyMzc1NDM3OkdUVVNWVVNWUkxISklOWV1XUlZVTUpISFBS
-UFVTVVNUU1hcWlFQVFVSUlZeXl1aV1pbWFNOSkQ9QDw8OTY1NDg6NzlAREpRUUtP
-QzZBVEY4Nj5GRUM5NDs7Ozw5OTk3OD05ODg4NjU1MzQ1NzQ1NjY1NDY1My8tMzc2
-MzIzODQ3Ojs/PT9FRUlKSktNU1RUXFlVVVdVUlVdWllcX2FfVU1OSENETEtJSENH
-S05VWFZQSUlOTk9LSUpMUFVaYWFkZ2xsc3N0d3l6fH6BhYaKioyOjY2OjY2PkJOS
-kY2NiomHhoWAfnl1alhKR0RAQUNSaHOFlqGlp6errbCwsbCwrq6uqqmpqKShn5uX
-lpGQkI+PjpGSlpWUlZaZmpyhpKKipKWmpqmtrLCvs7W0tbm4ube4ube1tLaysayi
-mIyCfn+LmaClqa+xsKyuqKSipqWmp6ekpKSmqKelpKGjpKejoaKnqKmrqqursbCt
-rLG2s7S1tbOzs7Sxs7W0ubm3tre2trK1uLq4u7u6vLq0tbWytbe4uLe7t7W0tLax
-sa+tr6+vrKmop6anp6mpp6aioZ6hn5+eoaCfoKOnq7Cwra+rp5+VlpaTl5iWlpqd
-n5+fn5+hoZ6ho6CipaWkpqempqanp6mnp6eoqainqaqqqKeoqaSlqKako6OkoqWo
-p620t7q3tLGys7O2trS1tbW2tba1trKysrS1srS4uLeztbG1trexsrKxtrewpYRo
-YF1bV1NSU1BRUVBNT09NTk5KSktOUU1NT1BMSklMTElMS01QTk5PUVBQUVBOTE1O
-T1BSUVFSU1VPTk5QT1FSUFBQUFJRT05OTE9SUlFTU1JSUVFRUE9PUE5PUVFSUlFS
-UlJSUVJRT05PUU5LTVBPTlBOTU5MT1BRUFFRUFJTUlBQUVFRUlNUVFVVUlRSUVRV
-VlVVVFhcV1laWVpbYF5eYF5dZF1gXV1gYFtcWltcWFtaWVpWWGRyn7nJ09nd4OTn
-5+jr6VZRT0hKTUdERkRAQENETlJEPkJGRkZMREZLRUJAPTw+Q0BDQUA+Qj8/PT08
-PDg+PTo5PT04PEA8PDo8ODg5Ojs5Ojg8Ozo7Ojg3PDw7OTw8Ozo8QD0+PkZBQUE9
-P0E9PkNCQT9CQ0dISktPS0tLTlBPVFNSUlFQU1RVVlVWVVRTU1VUUFNTVlZXVlFS
-UlFPT1BNS0tJSElKT1BVTUtNS0pGSU9MUE9MR0NCQkJHRUdJRUhFR0lHRklHSUZD
-RkRGRkVKRkNFQ0VEQ0dJS0tJRkFBREFFRENHREZEREpLSlNNS0hLUEtRVlFQVl9f
-WlZZVlJMSUI+Pz01NTUzODo1MzQzNDIyNDc2Mzc2MjQ0MzQyNjk4NDQ1NDU1Njk4
-Ojk7PT1DPTw8PkA8PEJHSURBQUBBQD1HSEhGRklFR0xGRkVITU1GSUxOS0xQSE1P
-T09SUVBNUFFQUk1NTU1OTU1OUFBSUVNUUVVeXVpSS0lJRkpRVVFLTFBUVVZbV1pT
-UlFMTVdTWVRTS0pJSkhAPj8/PT85ODIzNTYzNTQyMzU0MjM1MjExNDQzNDQ2PEtW
-V1hXVFJSUk1OTU5RVVNSWFlUUVFOTVFTT05QVFNUU1RWV1VZVldXWVxdWldZWVVX
-VllURUI+PTo9Njg0MzI0OkRKT09TS1BHOjtNRDo6PkFCST83QUE7QD48Ozs6Ozs5
-Ojg6PDgyNzY0MDI1NTMzMTQ1MzMwMDIyMDI2NDg3Ojs9Q0VERktLTFBUT1RXV1pX
-WFVYWVlaU1ZZXF1UU05LSUlJSUlFQ0lLUE9NT01HTVFPSkhISEhKUFdeZWlobG1w
-dHl5fH1+g4ODh4mNjIyPjZKSkZGRj5CRjoyMi4yFh4N/eXJlVkhBPT5ARFFkcYOS
-n6atra6vr7K0tK6uq6uopaWnoKGcm5WOj42JjY+PkI6Sk4+Sl5qanKKjpKGio6qs
-q62srrCysbS4uLi2uLe5uLS0tbOyr6uknZaYmZ2hqrCsrLKtqqqpqaalqKipqaen
-oqWnpKaloaOko6KipqiorKmrrKqtsLCwtbe2trS2tLa3trG0t7m5t7i3tbO0tbm7
-t7m5ubq7urm1srS2ure2urm4tLW4t7awrquurKurqqmop6alpaiqpaGem52fmpye
-oaSjpamtra+tq6qnnpSVl5WUk5qYm52goaGhoKGioJ+goJ+foaOkpaappqempqal
-pqirqqemp6mpqKmnpqKmpaWlpqejoqSor7W5uLmzsLGzsrW3s7S0tbW2trSzsbCz
-tLOxtLWyrK2zura4t7S0s7G1trKohWdgW1lWVFNQT1FPTkxLTExOTVBOUE9OTk5O
-TU1NTktJSkpLS0xPT05QTU1SU05PTlBQTE1NTVFUUFBPUlJVUk5NTk1PUVFPUVFQ
-UFFPT1JTUVNUU1BOT1JQUk1NTE9QUlNOUFBSU1FOUE5NTExNUE5RT0xOT1BQUlJT
-UU9SU1BTUVBQUlRUUVFUVFRUUlJUU1NWVlZWVmBcWltZW1xaW11fYWFfYmBeXWBg
-Xl5bWVdZWltaWVxZYW2bu8rS2d3g5Obo6OnqWFRQSUlKRz9CRkNDQURISkFFQkRH
-SEJGREhEQUFFRDw9REZBQkRDQkFDPj08Qz07PD05OTY2ODw3OD45PDs8PTk7PDg7
-PTo4PDw9ODk6Oj09PT49Oz5APz9APj09P0A/QEBAQEJGR0ZISUpKSk1NUVBRU1FR
-U1JRVFJTVFRWVFZWV1VRVFVWW1lXV1VVVk5MUk5JSEdHSk9PUFNPS1BPT01IT09T
-UEtLRkZHQkRERkdHRkdJS0pGSUhOTEVFQ0dJSU5OR0dDR0dGRUNGRkNGQkJCQEVE
-QkVHRkhERkpKVE9KRkNKS1JWVFNTV1lXWFxVUklJSENAOjk4NzY3Ojg0MzQyMjIz
-MTU2Mzc0MzMzNjk3Ojc3NjQzNDQ5Ozs8O0A+QEI7Ojw9Pj9APkZIQ0RGQkA6OkBC
-RUZBREJERkRDREZLS01MRUhHSEtRUlNOT1BQT05PTk9UUlBPTEhMTFBVV1RVUE9S
-Vl5dVk5KSEpKTVRUTktMTE1RUFJRTU5UV1RPVFFVUU9NSklGR0hHRD48QT85Ozg3
-NTM4ODY3NjM2NTUwMDAwMDIzNjc4QExSUVFTVFNTUU5OT1FPT1RZXVtYWVJNTEtP
-UlFUUVJQUlJXWlldWlhbVVZcWVlWVVJXV01ERT1APDo4NzUzMzQ7RlFOSE9OUUU4
-NUJCOzc8PTxDQD1JTkI+QDxBOzk4ODU3OTo3Nzg2NTM1MzI1NTc4NTU3NjMzNDQ0
-NjY2NzY4OD5ISEhMSU1PUFFPUlVWWFlXWVxZVlFSVFNTV1hUTUtIR0VGR0ZJS1FR
-TUhLSUlPUk5LSk1LRkpOVV5gZGltbnBydXd6foGFg4KFhomKjpGPj5CRkZGQkJGT
-kI2KiIeGhoN9dWlaS0JBQ0JEUWJvgZOhp66xsLCurq+trKupqainpaOinJmXlJGP
-iomMiouMkpOTko+Ump2ampyen6OnqKqqq62srrS1tre3t7e3ubq3s7a3t7Kys6+r
-paSmrrGwrrCzsa+rqqqnqKerqqmoqKimo6ilp6qko6CjoKWoqKanq6qsrq6xtLCy
-tbS3trW2ubq4t7a4ubm6vLq2tbm6ubW3urq4ubm5ubaytre5ubu6uru5tri3uLOx
-rayoq6yurqmpp6ilqKajn56cnpycnaCho6SjqKqqrK+wqqCYlZibmpWSlpmanp2d
-nZ6foaCfnp6goZ6foKCipqaloqWlpaekqKmlp6mpq6ysq6aoqKelp6mlo6Skn6ev
-sbK0urm3t7a0traytbW5tre3t7a1tra3trS1t7eytrS0trO0tbGztLa6vKuAZWBe
-V1ZVV1NQUFBSUU5NTU1OTU1PTkxLT05OTExMTU1PTE5PTU5NTk9OUVJRUVJRUk1Q
-UlJTTVJQTU9NTVFRTk5OUU5NTU1PU1BTVFBMT1JSVVRTUk9PT1FPT1BOTk5OT1BP
-UVFRUFFOUFBPTUpNUFFQUE1OTU1PUU5OUVFSU1RRVFRST05RUlJTVFZXVlZWVFpY
-WFhWVVhZXF9dXFxdX1xgYF5eYGFbWl5eXV1ZWVZZWlhcXVtdY5K9yNLZ3uHj5ujp
-6upUTUpMTEZIRkZFSkpHQkRDPUJBQ0JGRkFAPkBAREJCPT1CQ0hIQUJFSEZAOzw9
-PEFBPDs4PDs6OEBCQEA8Ojw6Ozw7OTo8PTs6Ojo5OTo2Ojo4OTo8Pj08PD0+Qz8+
-QD0+PD1DQUNERUVFSEhJSkxMTk9TUU9RTlJQU1RRUVRXWVtcUlJSU1hXUlZWWVlV
-T05RUUxLS0hKTFBOT0pITUxOTkhLT1BPSUtNS0tGR0hESEhJSUpRUkpLS0tNSElK
-SklITEtLSkZFRUNCRENGREdHQ0VDQUNDR0dFR0NHSkdOUEpJSE5MT1VTUUlPV1dV
-VVVSTk1KR0A6Njc1NTU0NzYyMjEyNDQ1OTU1ODIzMzI1NTU1NjUyNTg1Njg5Ojo7
-PDw9Pzo7Ozo9PEFDRURCSUxOS0dHQT5AQT1BQUVHRkNCRkpHR0dHR0dJS09STk5S
-UlFTUFFQT1RaWE9MTkxOUFRRT05NUFJXW11SUU5KTU5LVFhUS0tOS0xMTFBNTVRZ
-WlBQTVFUUkZFSEtMSkdGRD9EP0I9ODc1NTc1MzM3NjU1NDUzMzExMjI1NjU7QVBV
-UFJWVVhUUlBQVFZYXFhaXVdVVlNPTlFTUFRcW1tZVVVZWVxZU1FSVVhbWlhTUFNW
-UlBLP0I9NTc5MzY2NjY/UE5FQ0hOSD05OD0+Ozw/Ozw7O0hZRDw4ODs5Ozk5PT4/
-OjU2NzQ0MjgzNDMyMzYyMTMzOzYzMjUzNjo5Ozk7PkFITExITFFPU1NWVlRTVFlY
-WVJSU1BQVVpZVE5MUFJGSkdGS1JXS05PTUlIRk1PTkpHS0tMSU5TWV1iaGtucnN4
-eXl6enx+hIiNjo6RkY+PjY+TkZGQlJWYkY+LioqGgHt4bmJWTEVCQ0RQY3OClKKp
-qa2wrK2vrq2rrKqqqqSkoZ+cmZWSjYqLjYyKiY2QkY+OjpCXmJmZnqKmpqmqqqqr
-r6+wtbS2tra4t7i4ubm1sbi0srO0t7WvrrG2ubK0s7a4ta+qpqiqq6yrrqusq6im
-qampqaeqqKWmpaaqq66uqqyvtLOzsa+1tLe3ubi6uLW3trm2ub64uru8ubS2t726
-uri3tra3trG6vLi3uLe4uLW0t7q6trOzsK+ys6+xra2qqqippqOgoJ+dmp6gnZ+j
-pqamp6esr66nn5aSlZebmpWWlpmZnJ2em5+foKCgn6GhoaWioKKmpKGkpaampaep
-qamqqaepqaappaewqaKhpaiopqiioqqurq2wsrm2trO0tLi6t7i8ube4ubu5t7a4
-trq5t7m3tbW0t7aysa+usrW4pX9lYFxYWVZUU1NPT1BUUlBQT09QTVFRT09NUE9Q
-T09PTkxLTU9OTk5NTk9PTk5OT1BTUVFRUFFQT1BRTk1PUFJTUVNOTlBRUk5QT1RS
-U1RPUE9QUlRTVFBOTE1NTlBQT1BRUU5QTUxMT05PT0xMUE1MT1BSTlBTUE9TUE1R
-VFNSUlZUV1NVVVJQU1dXV1hYV1VUVVlZV1dXVlhbW1tdX15eX19iX15eXFxaV1pb
-WlpaWVhZWltZXV9kk7/K0tjd4ePl5+jp6kxOR0dGRkVGSklFRUpISEdEQ0ZEQUVG
-QUBCQUJERkdFRT5AREVGQ0FDQkE8PDk+Qj9DQDs7Ozk5Oz09PD0+PTo4Ojc3Ozw7
-OTk3Ojo7PDk7Ozw6ODg8PD8+PUA9PT1BQEBCQ0JEQ0FDR0dJS0hHR0lKTU9PUE1O
-TlBQU1FXVVVbWFdYVVNRVFVSU1ZWV1VSUFFQS01HR0ZKUE1KSUdKT1FPS0pOT1FL
-R0dKTUZDR0JFRUpQTVFSSkpMTExKTU5KSkVISklIRUZGQUBERUlHSUdGRkNAQkRK
-TUxJS0xJRktNRkVCR0tMVFFORklOVFNPT1FPS1ZMQz05NDUzMDI1NzY2MTExMTIz
-NDQ1NDMzMjEzNjUzNTc4PDc2NTg2Nzk/Pjw7Nzk3Njc5PUBBQkJHQ0hIQkFCP0FA
-PkNBQUNFQkNERUJDREZFSEtJTE9QT1RTTVBPUE1PT1VZVFFPT09RVE9MSkxMT1dY
-V1JST01TUEtNUVVNSUpMS01NTE5MTk9VT05KUFBMRURDR0xOT01NQkQ/PTs8Ozw8
-Ojk1NDIzMzQzMzI5Li8xMjUyNDo8RVJRT1lcWVhXV1VXWl9bVlJYXFhTVVZTUU9N
-U1liZV5XVlZYWllRUFBPVVZXV1FRVVpaTkxKQD43Nzg4OTY1N0ROTEVGSUtGOzQ3
-ODs6O0JDQDg4SFxGPDo6Oz08Oz06PDo3NTEyMzIyNzg3NDMyMzExNTU0MzU3MjQ2
-ODY0NTU7P0NITktQUVJVVVpbVFVUVVZVU1NSUlRZXVpRUFFRT0lNSkdTVlBJT09L
-T0tNTE5LSkdFRUlKTlRXXmRjamxvdHd4d3l9f4KDhomJjYyNjo6PkpGSkZCQkZGP
-j4yKgoN/e3pzaF5WTkZCRUtec4yVoamqrq+zsq6trbGtq6epqKWioJqWk4+OjY2M
-i4uJjo+Pjo6Vl5WYl5mcoKOlqKurrKyssLW0t7O0trm2uLi7uba2s7S2tre7u7q1
-tLO4u7WytLGurqypqquwrq6wtK+rq6qpqaeqqqimp6mop6irr6+wra2utLW2ubi2
-tri4t7q8t7m5uLS4t7a2u7m2uby7urS2t7a1trq7u7e5uri3tbS1tbG3ubq7tre1
-tLO1tbCura+srq2rpqKfnJydnZ+foaKnqKapra6traeek5GTk5WYl5iXmJiZm56g
-nZyfoKOnpqOioqKlqKSkpaalpqWmqamqqKinpqqrqaipq6mnqKamp6mopqWnqamv
-r7O1srO3uLq0t7q5tbe1uby6ubm3tra5vL22trm5s7S1trW1sbCzt7Wle2JdXFpV
-VFVWVFVRUVJTUk1QTU1QUk5NUE5OTE1OUE1OT09PTlFOUFFPTkxOUExOUE9NTk9Q
-UU9PT1FSUFFRUVBPTU5OTk9QUlFRT1BTUU5QUVBQTlBSVFNOUlNTUFBQTk9RVFBP
-TkxOT05QUEtKTk1OUFBPUFNTUVBRUVRTVVNSUlJUUlFQU1ZVVVdTUlVWWVhWVFRU
-WFdXWVxcW1lcXl1dXl5dYGBfX15cWlhVU1lWV1pdXFlXW2OPvcnS2t7h5efo6err
-REZCR0dNRUZKS0hKTEhKQERFRUNDQUJAO0FCQUFARERDPTxCQkJCSEVEQD48PTo5
-OklTTEQ2NTY3OTs7Ozw4ODo8PTk6Oj8+PTs4OTs9PTs7Ozg2Ojs/PkBAPkBBQkZB
-PkNCQURFQkRHTklHSElJSElKUFBXUU5RU1BQTk5TV1laV1dSVVZVVE5PU1FVV1VS
-TkxNTktKSUtMT01KS0tPUlVKSktMTk5NS0tMTEVKTklKSkxKS0tISUtLS0pLS0pO
-SkpMSkhDQkZKQkVGR0FFTEdDREJEREVJR0dGSE5JSU1IT0pJS0tTTk1LSUtMU1dP
-S0xIS0xIQDs4NDIyNDY0NDM1NTYwMTIzNTg3Nzo3NTQ3NTc1OTo2Nzg2NTc4OTs9
-Pz8+Ozo6Ojo7QENCQ0lLSUpCQT1FQj9BRUVHR0REQURER0tMSkdJT1RQTElKVFFM
-Sk9QUU1PVFNRUlRTUk9RTkpHR05NT1JSTkxNTlFOSktRTEdCRkpNUFdUU1FRTU1J
-TE5LSU5HSEdJTk5NUE9DRD86PDY8OT08NjMzMTExMzMyMzM1NTM0MDA2NDM3RlBS
-V1pXUlVWU1RVV1dPVFZdXFNXWldPS0xPVV1mZV1aVlZbV1ZTU1NTVlZXWVRTV1dN
-T0tDQkA/PTk3NDQ2QE1KR0dHTUo5OTo5OTpBREVEPzxOY0o9RD07OTY5Ojc2NjQ0
-NDMxNTY5OzU1NTIzMzY1NTU0MjI3NjMyMzQ0Njo/QEhMTVNTUFRSV1ZVVlRUVlJS
-U1heW1peWE9PUU9NSUpLTlJOSE1NTk1NS0lKTUtISEhHRklQU1heYmdtcHBxdXl4
-en5/foKIiYuNi46PjouNj4+SlpOOko+PiYeKhYF8eXJrZFhRSUhGSV53iJmfrrSw
-r7Cvrq+wsrKvqKeopJ+fnZeRjY6Mjo6MiouOj46PkZGTlZeZmJ6jq6qpqKmnqa2u
-rLK0tLG4uLq8u7i1t7u5uru6ur26urq6uri3uLOxsrOwrK2vsbCvtLGyrq6ura2u
-q6ytpKKnqKqpp6mrrLKysq60tbm5t7i2t7m7vL67t7e3sLW3ure2trq5ury6u7i3
-t7i5u7m6urm8uLe4trm0tbW4t7a4trWzsrG0tLCtrKmrq6imop6anJ2en6Gipaal
-paqsrquqpJySjpKSlJOUlJaXmpiYl5ebnZygpaqpp6WmoqOlo6OkpaKipKWmp6mm
-pKSlqaqrqqepqaWfnqWlpqiop6WorbO1t7W1trWysrW1t7m7ubi5ubu7urW1s7W1
-sbG0t7q2tLK0trK0s7S6sZpzYFlaWVRSU1ZUUU9PTlBPTU5OUE5PTk1OTk1OTU1M
-TU1NT09MTUtPTU1TUE9RUE5OTk1OUVJPT1FRUFBRUE5NT09RT05NT09OUFFSUFFT
-UU5OTk9SVlJRUE5QUVJSUFFRU1BSUVJQUVBPTk5OUk9MTk5PTlJQT1FRUVJST1JR
-UFFRU1FSUFFSUlJUVFFPT1BVVVdaV1hYWFpbW1xcW11eW1xcXFxdXV1eXV1bW1dW
-V1hYWVdVWlxdZIm7ytPZ3eLk5unp6uxPS0FKVE9MSExMTE1JSEJAP0BEQj07P0BB
-PUJBQEJCQUQ/PDpBQT5CPDs7OkBAPzo7PDk5PDs2NjY5OTo7Ozg3Ojs/PTo4Ozo6
-Ojc5Ojk4Ozw7OTk7Oz1CQkFCQUFGRURDQ0BFSkVFRUNFRElHSEpKSUxPUE5MT1BQ
-UlJPTk5RU1VRUVNRVVJRT01QUVRXW1lQTEpMTU9PTFBRT05NTVJRUlNPUE1PUE9N
-SEtNR0dKSUdGT01JTEhER0lPTU1PSUlMTU1KRUI+RUNESUVERUJIRkJDRUlERUpF
-SEhGS0pISUZITEtPTldUTk9NUlVYVVNOUEtEREZAODY2MzU1MzIxNzo2NTczMTQw
-MzY4NDQ2MjI0NTg3NzU4OTY5ODk7PDs6PT0+PDw7PkA+QkFERkJHSURBRUVFQENH
-SElMTUdDRkJFSEZIS0tRUVBJR0pPTktNUVRTTUxPUk9RUlBSUlNRS0dHSkxOUlJO
-UFBKS09PUVZSSUZITlJTV1RRUVJOTk1LS0xISEhKS0ZHSEtKTUlFPjs3Ojo7Ozw6
-ODQ0NjcxNTQ0MzY2NzMyMzEwLjQ6RlJUV1tYVFNQUk5PUlRZWFpWVldcWVJNTlJW
-W2FkYVxZV1ZWV1FVVlJPVFldXFdWVE9LTkZERT05NTU3ODpCUU1KR0ZJRkI5Njk6
-PD9CPklIPk1eQz9FQTs7Nzk4NTM2MTMyMDI1OFiEXzYzNTQ1NjU0NDU0MzY4NjIx
-MzQ2PDs8QkVJUVJPUk9SVlVVVFZRT1dVWFdYW1pYU0xOUUtNTExNSk1ISEtMSUtH
-SEpHR0tIRkNER09VWlxjZmlrb3R2eHx9g4iHhIiLiomLjoyNjpCOjpCSk4+OjoqH
-iYaDgX13cWVeWlVPSEZKXXaJlaKkrbCzsq2tsbSwr6inpKKgnZ2bmZWQj5GOj5CM
-i5COj4+MkJOWm5mboKSlpaWlp6eoqqqrsLK0tbe8vrzAurq3ube5uby7u727ubi4
-s7W1r7Gxs7Cuq6yusK+xsbK1srKysrCurKqtrqupqaeorLG0s7GwsbS6t7a1tba5
-u7q7vLq3tre4t7u7ubq2uri6urq3tLa5u7a4t7i7u7i2tLO1tLOztLi4t7a5t7Kx
-sbSzsa6rqqeqqqalpKGgn6CfoqKjpqepqaytrKWjmpOSkZKRlpaXmJaXlZWXl5md
-oaCgoaKioqGgoKKfoKOhnKGlo6GkoaaqqKarq6ioqqumpKWoqqajpaempKevsLS3
-t7W4t7W1tLW0tLe4ubW2t728u7m6tbO3urWzubmzsrSysrKxtriymnRhXVlXVFNT
-UlJSUVBQTlBNTUxMTUxMTUxNT09NTE5MTU1MTVBOTk9MTk5NTExOTU5PT09OTExO
-TU9PUlBOTk1OU1BUU1BNUE9PUVJOTU9QTlFOT09PUE9UVFVQUFFPUVBTUk9QUk5K
-TU5NT1JQTkxKTExOTE9NUE9PT1BRTk5NT1VRUFBRU1FSVFRUWVRSU1VXVlhfXVlY
-WFxbX2BbYl1fXVtcXV1eXVxbW1xcW1laWFlXWFlcXF5mibrJ0Njd4OTl5+jp6VBD
-TEtPUklFSUpHTEhBRkdER0pHREFAQkFCQj89PT4/PUFARjk7Pz1BPT07PT9APT87
-Ozo4OTo5OTg2ODk7Oz8+PD1APDs6ODo4Njo4OTo8Ojk4PT1BP0BBP0FAQ0RDQkRC
-Q0VERUJCQ0RCR0dHSEpKTk9QUU1ISlFOTEtLTE5TVFVSUVVXU09MUVFRVFdYVE9O
-Sk1QTlBPUVVRTUxNUE9SUk1PVVNPTExPS0ZCRUlJS0hPUU1SUElKSktNS01JR0hG
-SUpEREJDREJGR0VHQ0RDQ0ZISEZESklKS0ZGSEhMSElIRlFSVllWUk1VWFdZWFdU
-TElGQ0U+NzY3NjczMTE0NTQyMzM1MTEyMjQ1NDM0Njc4OzY1Njg7Ojs5Nzk8PT07
-PD07Ozo/QkBDRERBPUBEQj8+QEVDR0lOS0hMS0VHSEdHSEhJTE9MTE5JTE9OT0tP
-UlBQUVRSTU9UU1FWWlNRS0tKUVRTUk9RU0xKU1NPVFZTTEhKU1RTUVNWV1dQS0lM
-TUZHSk9OSEVKRkRLQj03O0I9Ozk4Nzg5NDY5NjMuMjU2NTQzMjM1MTMxMjQ9S1NU
-WVZRT05NS09PVFNVVFRSU1hdWFJSVFddXltaV1pXVFZUVlhYVVFOV1ldWltaUkpT
-SEJBPTs1MzU3N0FVVVBMS0hIQTg6PD07Oj4+REA+U1xFPDxDQ0E9PDo5NjQ3OTY0
-NDRJgIhbNzUyMzU0MzEyNDQxNTQ1Nzc1NTk6Ojg9PkJJTEtKTU9SVFJRU1laUVBR
-VlVUWVpWTktJSUhJS0xPV1JIR0ZHS0xGRUZNSkZEQ0RHTVNaXGFla3JydHV1e36E
-hoeHiouNjI6PkZKQjY+Sk5SUkpKLioqHhoaDfHZvZ19ZUk5JR0xddouXn6SlqKqv
-sLSys7GpoaGloqKdmpqXlZSSjo6MioiJkpGPkJCPk5aYmZuboKKko6anpaWnqa2x
-srOxtbi4ury7urq6t7e7uLm6u7m5t7W2tbKvr66xtLi1tLKzsrGvr7CxtK+zr7Cv
-rq2trampqqyvsrOysrOxtri4uLe5tre2t7u8uLi4tba0uLm8uLW2uL6+u7y3tbm6
-ubm3uLi3t7m1s7S0tLS1tbW2t7SztLKxsrKysK6op6mnp6Sko52goaKkpKaoqKuw
-r6qqm5+YlpSRj5CTlpeamJmYl5eXmpydn56coJ+goqShnp+hoKGinqKhoaOjpaen
-p6urrKqrrKqop6qopqalpqWipaqwt7m3uLi4tbOwsrS1tbe3ubW1tri4vLi1uLi4
-s7K0uLa1trWxs7C0t6+UbmFbWlhVVFFPUFBOUExOT0xLS0tNTE5NSk9OTUxNT0tM
-S01LTU9QT09NTE9QTEtMT05OTk1NSUxNTk9QT01RTk5RUlJRT0xOTlBTUk1NTk9P
-UFFQTU9OT05PUFFOTk9QUU9UUk5PTlNPTU1QT05NTExOT0xNTlJPT09QUlVSUFNT
-U1RUUlFRU1dWVFRUV1VVVlpZW1RWWVxZWllaXFxaXV9fXl5fXl1cXFtbXV1aWFtX
-WVpaWltcXmKGtsfQ19zg4uTm6OjpVlJKTUtKSUZGR0VGSEBERkpJRUVERUQ/PERG
-RkdDP0NDPDw/Qjw6Pj0/Q0E9OThAQzw8Ozk4Pj08Pjw6Nzs8PTs6OTZAPDo7OTs6
-OTo4Ozo5O0FAQj4/PkJBPUBAQUNAQUNBQUJGRkNCRkZFR0ZIRk5QTk1NTktNUU1K
-SUxOUVRYWVhVUlNQUVFPUVBXVVdSTE9NTUxOTVBPVFFQUEhMTlBUUE9TU1RPUEtF
-SEdHS0pIRkpNTU5MSExLTE5MTktKTElLSUJEQkJEQ0ZKREdDR0FDRUNDQ0RMS0ZL
-RUVGREtLS01IUVdXXFRWUVVcX15eX15WUE9FQT04NDIxMzIyMjI2NTQyMjMzNDIz
-Mjc2NDQ0OTo4OTw5QTo5ODk5ODg4Ozo6Ojw7QDo7QURCQENDQ0JAQ0FESERARkpJ
-Q0ZHREZGREVIRkdLTktRVE5MT09MR0xNSEtRWlVNUVRUWFZXU1NSUFBUUk5OSlBN
-TU1TU0tIU1RMSk1RVFNPTFBbWVNOT0xKRUdHS01IRkdKRkhGQT1BQD08Ojk5Oz03
-NzIyNTExMzI0NDU2MjQxMDAzNTg8SlRSVVVSTFBTTU5RV1dWWVRUWV1dV1VYV1pe
-WllcWlZUU1VaV1ZZVlBSVVhcXVlOSE5JQEA6OzY0NTU3RFVQT1FRTExJODY3PDk6
-Ojw9PENTUD0+Oj5BPjo5ODg5OTs2NzIyMjVGRDYxMTU0MjAxLzMzNDIxNDY2Njc3
-NzY7PD5ARktLS0tQVFVWVVVZXVtVUVJVVVVZWlZRTEtQSkZESFBVUEhFQUpSTElL
-S01JRElISUhRV1ldZWlscHNzeXt+fn2Cg4WGiIqMjpGSkI+OkpKRj46MjI2LioaE
-goN9d3BpY1lVUElHR1Vvi5aepamqqa2wsK2qqaalo6emoJ6bmJeUkY2LjIyKiouS
-kJKRkY+SkpSUlZmfoaOmqaqpqKuwrbCysrO2tLS2vLq9vLq7vbq7ur+6ubezs7a1
-sLOxsrKytbOztrOzsLCxrrCurrW1traxs7Cuq62usK+vsrOysbK0tri5urm2tbe4
-urq3trm4tLW0sbi5tbi3t7q5uru5urm2uLa4tbW2t7e5t7e1tri5ubm0tLa2s7Gu
-sLCtrq2qqKimpaGkpaKgoqOkpqeqra+sqqefnJGSk5OSkpSWmJeZmpqampiZm5aZ
-nZ+gpaKgnp2fnp2gpKaloqOkpKalpqanqaanp6eoqauqqKqoqaenq6iorrOztLa4
-t7S0srW1s7W3tru5uLe1trOzt7O1uLaxr7O1tLS0s7SxtbW3roxpXlxZVlZTUFBQ
-TU9OUVJPUU5NS01NTEpLTE5MTE1NTUxMT09NS09QUFBNTVBNTk5OTExOTk5NTFBR
-TlFRUlFPU1BPTk5PUE1QUlFQVE5MTU5QUVBPUU5NTk1PUU9PT1BQT09OUFBQUk9Q
-TU5NT0xMT05OT1FUT09PU1JSVFFUVVRSVFVSUFNVV1hXVFRYVlVVV1dYWVtbW1pa
-WllcXFxeXF1cWl1dW1laW1xcXVtaWFtXWFpbWVplao+2yNDY29/i5ebm6ehRUkhM
-Sk1ERkZER0dISUpLSklEQUZCRUZBPT4/QUZEQj8/PTg3QD8+QkFBQ0A9RD8/QDs6
-OTg6Ojk6ODc3PDw9PDo7QTk4Ozo6OTs5Oj08PD49QUJBQkBBQUA/QEBAQEBCQkBA
-PkRHRkZHSEhHS0tJS05JTFBNT1JOTE5QTU9TU1lZWVdUU1BOT05RUk9WVU5OUFJT
-U1BOTlBVVFJPTkxRTlJQVlVSUk9OUE1OTU5NS0dLR0pLTU1JS0xPU0xOTkpMSkhG
-RERCQ0NCRENFRkNERUVDQUFCQUlIRUtGSVBLTVBQUUlNU1FVUU9QVlxZWVZXVlRO
-TEhEQD42MjM0MzUzNTQ2Njk4NTQ1NjU2NzU3NDM4NTc4Ojs5ODo5Nzk8QTw7PDs+
-Pj0/QEFHRUVHREREQkBCRUVERUVDRERHSUpHS0pKSERIRktPUVBUTlFQTEtJR0hM
-Tk5VUk1RUVZTTlNSUVRUUlRVT0tJS0xPR0lPSkdJTElOUk9OT1JKS1FPT1BOT09I
-S0hJTkxKSU1FR0pGREJAQDo8Ozk5OjkzNjk0MzExMzA1NTIwMzM2MzY2NTQ9SlVY
-VVNSUVVRT1JSVlRcW1hdXltWUlNUVFteYV5aVlZXVlhZWVpaVVRWXF9bV1FLSUhF
-RT49ODU0MzdAT1FPUFJQTkg7NTc2ODo8Ozw/R1BFOjo8Ozo6Ozs5OTg3OjYyNDU0
-NDMzMTU1MzIzMzA0Mjk4MjIwMjU0ODg2Nzc8Pj1DSEZMUVNVVVZaWFdZV1JRU1ZW
-W1xZWFRMTktHRUZGTU9KRUZHTFFPTUtLSUZJT05KTFBVV1xhZWtyc3V1d3l7foKB
-goSHi5CPj5GSkIaKkZORlI+OjoyIhYKDf3p4bWdiX1lUTEtIVGyGlaChpKupra+y
-tKupqKWlqKSgnpublpGOj4yLiomIjpKSlJONjoyNj5CRlpmeoaakqqenrrKvrq+w
-sbKwtLO1try9u72+vsLBvbm2uLS5ubW1sLS0trSztbi3t7GztLCxrK2vs7a1tbOy
-sbCvr66wsLOxsLOwt7S1uLu8uLm6vbq5uri7urW0srS1tLO3vru4ubi4vLi4ubu9
-u7e5t7O2uri4u7e9t7a5vLm3tLe2tK6ur6+sra+sq6ijoqOkoqCfoqSnqKqsrLCw
-qqSYj5KWlJKPkpOXmZeVl5mXlZWYnJuanJ+hoqKgnJ+foJ+hoaGioaSmpqWmpamn
-p6ajo6KorKqqqKaoqamloqaws7K2tre4tLWxsrW2ur+9vLu7uraztri3trKzs7Gz
-tra4ubWztLO3urmphGZcWVpVVFRTUVFSUE9PTk9PTktNTU9OSUtLS05OT01NT1BQ
-UU5QTlBQU1RQT1FQTU1MTVBNS0xMT1BUUlNQUFBRUlFOTlFRUE9QU1JTUUxOT09R
-UlJQUlNSUE9RUlBSUVBQTU1MTVBQTk5PUU9NUFBPUE5OTlJSUlBUUk5PUFdVVVZS
-VFdXV1BSVFNUUlRaWlhXVVZYWVpbWldaW2NcXV5fXlxcXFxcW15dXF9fXlxZWFVW
-VldcW2Joi7nH0Njc4OPl5ufp6UdGSklESkpMTUhFRUZNRUVIR0pDQUFGRURAQ0JC
-QUQ/PkBCP0RCPzo/Qj5DQUFCQj08Ozs6PTs3Njc2Ojw/Qjw9PTg7Pjw7OTo5OTw7
-Oj4/PT08P0NFQkFCQkhEQUBBQEJEQ0NER0hJSEdHSUpKS0xJSEtOTE5QUE9TTU1N
-U1ZXWFhXVldVUU9OT09TVVlOT1NTVlVWVVRSUFRUUlBLSkxPUk9QV1FQUVNWSk1M
-SUlISUtGSUlKT1FNTE5TTUdISE5MSUlERERDQUFBREFEQ0NIRkVCRkdHREZFSEhH
-TkhLUExNTE5RTFFTVVBSV1VVUU5PS0xLSERBQT04NzMxNTUyNzo8Ojc3PDs5ODY3
-Njc2NTY0MzQ5Ojg2Ozs5ODc/PTs6Oj08PjxCRUdFQ0dFQkJCQEBESUdFSkhEQ0hM
-SkpNTE5PTUtISlFWWFJQVExOTE1STUtJTFFOTE9TVVdSU1FRVVlYVFNPT09LTExR
-UEtPTEtQT1BTTk5UWVNMTk9PTE9RU09LTE1NS0xIR0NER0dERkI+PTs+Oz46ODc1
-NTQyNDIxNTMyMjI1NzU2NDQyMzU+TFhYV1lcXl9ZVVNTVFtgYF5dWVVQUFFSVVtd
-XVhaW1pYVFNUWltVWFleXlxYU01LSEJFPz83NTU2OD9KSkxMTUxSTUI5Njg4OTk7
-PUNHQz09PDg5Ojk5Njg5NjQ1ODY1NjI0NTI0NjUxMTIyNDMzMjg2NDIyMjQ1ODg1
-Nzg+QUJESEtLTlFRVFZTUVdWUVBUV1dUVlpYVFBLS0ZDSUtKTEpLR0dKTEtLS0xN
-R0pKTEtPU1laXF5mbnF0dHd5fHt+foKDh4iNj5GPkpGQkI2QkY+Rj46NioiIhYB+
-eXZtZmNcVlFNSkpXa4KUoKWpq7CwsLGxrqmqp6ako52dnJeUjouHiouIh4aMkJCP
-jIaGioyPkJKWmJ2gpKmlpqmtrbGvra+ysLG0trWztru8vb69vb+8u7a1tLi4u7e0
-t7m5vbi0t7OxsLGvr62ur7K0tbW0s7O2trO1s7W0srS1tLG1uLm2ubm5vLq4s7i3
-tre7t7i3t7S3tri8ubq4ubq7vLy7v726vLm5uLi5ubu2sre7uby6ura0urizs7Kx
-sK+ssaytq6ejoqOlp6Okpqeqqqqtr6+so5SQkJKVkpOVlJaZmZqdnJyamZ2eoJ6d
-n6CgoqOgn56gn56en6GgoKGgo6Oio6aopKarr62nq6mppaamo6Sjo6uwtLa1tra1
-tbO2uLu+vLm4vLq4s7KztbSxsrOzs7O3uLy5tbO1uLa2s6d/Y2BeXVhXVVJST1BQ
-Tk1NTk5RUExMTU1MSkxOTE5OUVFNTlBPTk1PTk5QUExPUVFSUExMTU5QUFNPUU9Q
-UE5TU1JPUVJQTk9QUVBPT1FRUFFPT05OTU9PT01QUU5OUVFQUFBQTk9OTE1LTlFP
-Tk9OT0pLT1JPT1BRUFBQVE9PU1ZTU1VUV1ZVUk9XVlNRUlRYWFhXWVhYWVZXWlte
-YWFfX11dYV5fXFtbXl9bW1pbXFpXWllaWFhXXmSPucjR2d3h5Ofn6OrqRU5IR0xN
-SUlDRkJDRkFDPkJGSENAREJFP0I+P0A8PUVFS0hGQ0E8PUI/QD5AQ0FDQDw9Oz09
-PDs6Oj06PD5DPzs7Pjw5PD89PDw6Pj08PUBAQzw6PT9DRUFDRURDQT9BRkdERUFE
-Q0dGRkhLS0tIRkRES09SUlFNTE5NUVNPUlNVVlJUVFJSTk5MTk9TU1RSVlRTVFNY
-U1RSU1RQUExJUU1OUE9QUVBTVFJMSExKSkhGSkdJR0lMTUtMTE9IRktGSUtLS0VG
-SEdGQkRHRUlHSE5JRUJDSUVCREdLR0ZFREZPTVFPTVRPT1JQUE5aVlVYXFRRUEtH
-RUM+OjY2MjIzNzM1Nzc2NDM1Njk6NzU0Njg4Nzc0ODc3NjY4Ozo4OTg5QT08PT0+
-QEBAQUVCQUBFRT9CQ0JEQ0JDSU5KR0JHSkhIS01NTUxNT1VYUlBVU0xLSk5PS0lJ
-Tk9MTExRUlVWWFNYW1hSUlBST01MSktKTE1MTVRRT1RXVVdaVUxMTU1NTVFTUEpK
-SkpMSkxGQEVDRT9EQkJEPkI+Ojg1Njg3NDIyMzM0MzUzNDU1NDc1NDM1NDtCT1pa
-X1pYXFtZU1JQVltaWVxYV1BOTlBYW1lYW1tdXFpTUlZUVlldXFlbWVdXVE5LQ0Y+
-Ojo1NDpAQUhKTUxNTk1KSDszNTU0Nzo/RUQ+RUVFQUA9Pzw7OTg6Nzk3NzY0NzQy
-NjY1NDIxMzMyNDMyNDM2NDM0ODc2Njg3OTs6PEdFSU1QVFBSVVRRUlJWWFZWUlNX
-V1dTT0tNSEdNTU1QTU1JSUlKTUxJS0hHQkVKTE9UW2BiZWlucnV3fX5/gIF/gIOG
-iouPkZKNjo+Pjo+Rjo6Pj42NjImFg357dGtiYV1VUEpIT2NzgZCdp6qtra+ur6yq
-qKepp6ainJqbl5GPjouHhYaEioyLhYeMioSIio2KkJaam52io5+ipaWprbCxsrOw
-srS0t7i2uLa8vcG9vby7uLW1s7Kzs7G1vb68ube0sq2ur6ypq6+vrK6xsbO0t7e1
-tLKys7GysrO1tbm7ubu/wby7ury5vbi4urzAu7aztLa2u7y6uLq5ury7wMG/vrm/
-vry7uLq8u7W0ure3tra3trq4s7O1tbW1sauusKeppaSeoqKmqaalp6msrrCuqaKc
-k5CSkJKUkpKQlJeeoqGipaOjo6WmpaWlpqmqq6urpp+io6SipKSmop+goaKjp6aq
-q6usq6imp6anqqikoaWlrLK0s7W1tra2tbO7ubq+ubi4uLSzsq+1s7W1trS3tbe3
-uLq7urm4tbSvm3loYVpXVlRRUFBMTU5OT05OTE1NSkxNTVBQTk1OTk9OTEtMTEtM
-T05PTEtOTk9PT05NT1BNTVFPUFBOUE1NTU1NTVBRUU5OUFJUUFJRT0xNUVBQS0xO
-T09NTFBSUVBRU1FSU1JQUlFQTE9OT1FPTk5PVFBMTk5QT09SUlBSUFRTU1JQVFJW
-VFRUVFBUVFVXV1lZV1lXWFZYW1pbXl5eX2BiYWNhYF9dYGRmXVpXW11dWFZYV1dW
-VlldYo24yNHZ3uLk5ejp6ulJTU5ITUtFSUdHR0xHR0ZIRUZFSkM/RERGQ0NCQkJB
-QkFERkU/QkNDPjo/QkE9PjxCQTs5Qj08PD49Ozo3Oj06Oz9APD47PDw8PEA9QD0/
-PD9DQTs9P0BCQkFCREJBQ0NER0dGRkVGSEhMS0pKSkZEQ0RITk1PTUtKTU9RUFBS
-T1NQT1FRUVZPTE5NUVRYWVhXVFRQUVRRUU9PVVRPTU1QTUxNT1BQTlFQUE1PS0pI
-SEtQS0tIR0lMSk1NSkpHSEhGR0hKSkZMSUpISEpGREhFSUtCQ0FERkZHREdISUtM
-SEtKTk5ITExOTUtRUVhXU1VYVE9MR0dJRkE7ODUzNDQyNDM0NDMzMzUzNDM2Nzgy
-NTc4ODk3OTg1Njk9PDo3ODo7Oj09Ozw7PUA8Ozw9Oz4/QkNBQkRBRERGSkpIRURI
-RkRGRkhHTlBOSk5OT1BRUk1KTUlJTk5QTk5OUE9QUlJSVlZZWFhWU1FQUE1JRUZL
-TU5RVFdSVllUV1lVS0tOTk9QUE9NSkpITExJSEZJS0dHREFAQz05QUI+ODs3NTIw
-MjMzMzQyNTM0NjU3NDY1NTM2NjhFU1ZXV1ZYW1VTU1JVVlhWWVlYU1JRVFlaU1NZ
-XFlXUVRZVVVYWVxfXFlbXFxZUU5LTUE/OTs5OEFESE5RU09RU1FSRTo2NTY1NTc9
-QUFJTEJAQj47Ozo6PDs2NDY0NDgyNDYyNjY1NTI0NTUzNzU2NjY2Nzk3Nzs3ODk7
-OTs7PkJERk5QTFJXV1VUUlVXWFZWWFZWV1VNSkhGSElKS05MSExLTE5MSkhHR0dH
-RkVITVVYXmRnbHF1dXV5e31/gYSFhIOHiY2Mj5KOjpCQkY+Qjo+Oj4+Ni4WAfXZx
-aGFdWFdQTElNXW+CkJqjrrCyrrKrqKaop6SfoaKfnJuTkI6Jh4WGhIOCg4SBhYeG
-goWLh4WGjJaXlpudnpucnqGmsLOysLOztLaztLe7vry9vbq7ubm1sbK0r7a4t7i5
-vLu6trS0srGuqqqnq66usa+vsLO0srK2sKqusbOwtLK1tLq8uru9vb28vr7Au7q6
-u7y7uba1uLa5u7y7vr26uL2+u7u8vb29vLa5ubm2uLe2t7e4uLa3urm2t7e2t7Cw
-sq+xrKuno6OipaOnqayrqqqsra6noJaJi5CQkZOTlJSRk5aYnKCfoaCgo6irrKup
-q7Cvs7SzsaunqKWqqaanpaSko6SmpaytraqlpquorK+mpqako6mytLa1srW1trq5
-u7i4rbG7ubi0uLm4u7q3tLS1tLK0tre3ubW3uLe5tK+UdGNcWlhVVVFRUkxOTE1R
-T1BNTUxLTEtNTkxMTE1NTk1NSkxJSUxOT05MTk9VUk5NTE9RUE1NTk1OTU1NTEtK
-S05OT09OTk9PUVFOUVJQUlBOUVBRT1JPT09RUlJRUFRUUFBNTlBQUk9LTlBPT09P
-UFNSVVJNTU5NTlFPTVBRUFFQT09RU1RTVlhWV1VVV1ZTV1pXWFlaWltcWlxdX19h
-YmNiZ2hqZWRjY2FcXl1YWFhcXFlUVllYV1hhlLTI0dnd4eTm5ujq6khNS0xLQ0dM
-R0JGSEJCSUlAQUNEQ0RFQ0NDQkVCRERAQ0RDRT89Pjw/PT4+Qj87PUA+Ojs8PT5B
-Pjs9Pjs8OTc/Ozw8PT9BOzs/QTo8PUFAPTs+Pj8/QUBCQUJCRENGRENDREdFREVE
-R0tKRkdEQUNER0dKTktKSEhLS0xRUFFOUFJQUFJQTk5OUExQUlZaWVdVVFRRUVFO
-T05UVVJQUFFSUlJPVFVQT05RU1JPSklMSVBNTEtITE1KUE5PTkpMSUhISFBRTExM
-S0dDQkFERkFHR0FDRUVER0lHSEtISElITEtNTUdJSUtUVVlSVVVVVVRRT09SUE9K
-REI3MzEvNDIxMzM1NTQ0NTQzNDc4PDg4NzY3Nzc4Njo7PDo3OTY3Nzk5OTw8Ozw8
-PTs9Pz5APj9CQ0I/PT5BQ0RDPkNDRUtIR0NBQURDSk9QTkpLS05VUE1RTEpLVFRT
-VFVPTEtMT1FUV1ZRVlhWUUtZT0lGRktMT1BVV1JPT1FUWlpRTk5NT09OU1FNS0VH
-SUhGSktNSk1DQT07QD1ERUI4Njk0ODYxMjIzMjUzNDIzMDIxMjU1ODY4NDZCTlRX
-WFlXVFVSU1JTUVBRVFRVVlRVW1xYUlVcWVJSU1dXVVVWWFdWWllZXllSS0hMSD46
-Ojg4Q0hQUVVQTlBWWVhPRDs1NDU3Njg+REpQSkRAPDo4OTc3OzU3OTY4NTY1Njc0
-NDU2MzQzMjMwMTU1NTU2NDU1ODU2Ojk2NjxBPkRITU9UVVVWWVhaVlRTVlxWUlNU
-U01MTEtLS0lOTktMTk1OTElJSUhIR0lFRElKUlhdYWVtb3Bzd3h8fX+Ag4SGhIiJ
-i4yQkpCQkI+Rj5KOjY+OjoyJhoKBd3BoYV5ZV1JKSElWaH6Ql6Opq7Gzraywq6ql
-oZ6ZmZuempSSi4iFhoKAe3x7en6Dgn1/e4CGfHp9g4h+foqUi4SKkpmfqK2ur7S1
-tLO0t7m8vL64vsG8ura1tLGytri3urm5t7e2s7O0tLKuq6ursLK0srO1s7SytLSx
-s7GxtbW2tbS2tru9ube3ubm4ury8uLW3ur66ube3tri5uby+ure0u7u6u769vLu4
-trm5u7q5tba0trm2srO4uLm4trSysLOysK6sq6mtqaajp6anq6mpp6irqKKZkIuM
-jY+TkpOUkpSSlJWVmJiZmpqeoKOnqKerra+urKyspaepqqytq6qsrK+srKyvqair
-rKupqqmrqKelqKWnq665trKxtLSzt7m4uLOtq7S3tbW1uby8u7mzs7W2tLGytLWw
-sLa3u7i0q5BuX1pZVlNRUE9RVE9QT05QT01NTk1MTFBOTExPUExNS01OTE1MS05Q
-UFBPUVBPUFBQT05OTlFPT1BPUE5OTk1PTU9OUE1OUFFPT1FPUFFTUlJTUlBRUU9O
-UFNRVVJPTk9RUk9TVFJSUE9OTVFPUE9NTEtPT09QTU9QT05OTVBSUFJRUFJSUlVV
-VlRSVlVVVVlVVldXWFhbWFpeXV5eYGRlZmhpa2prZ2RnZ2FfXFZXWlxbV1dXVlda
-WGGRssfS2t7i4+fn6OrrSElKSUVDREhIRkhIR0hJR0lCQ0VEQkNDRUM/QkREQkNA
-Q0Q/QEFBQUFDQkA/PTw+PTs8PTo9QD1APTw/QDo3ODw+Ojo5Oz05Ojo7PD08P0A+
-PT0/P0FAQEJCQ0NBQkJIRUZHRklJSUtJS0pGQ0NDRkNFSElLS0xNS01MT1BWVVNP
-Tk1PT09SUExLSk1RU1lcWlVXV1RTT1BQUVJVVVRRUFFTV1RRVVJPT1JSTk1OTlBN
-T05MTElHSUpQU1FSTUtJR0xKSk9NU1FKSURFQ0NGQ0ZIRUVFRURDSkpJUExQUkVG
-S0pORklISlJPWV5VWVVXVFZSUFFVUklDQDk0MzI2MTQyNDg0MjM2ODY3Njc5TDY1
-Nzc6NTU3PD05NTg7Ojk6Ojg4Nzw7OTY5OTw9Pj09Pz5DQURAQjo6QUM+Q0FESUZA
-Pj5EREtNT1VTTEhKTE9MT01KTEdNU1JVWFBMS0pLTlFSU09OUVFQTE5JR01PT0pL
-UlhZT0tMUFFXWlVTUVNQT09UU01NSk1MSkxMSEtHR0I/PUE+P0FDPzs4NjY3MzUx
-MDE1NDI5MzQyMjMwNjMzNDY0NTpDT1VaY15WVVNUVVRPUFJSU1VXWFVcXFZTU1da
-UlBOU1hWT1JaWFRTVlhaW1ZNRUVGQjs1ODhDTVBPUE5MT1FWU01IQDk5NDY0OUBF
-S0tMRURCPTw5Ojs3ODY3Ojg0MzI1MzQyMTM0MzQ1NDc1MTMzNzY1Njk6NjY0NDg5
-PkBBP0RITlFTVFJSWV5bVlJTVlVTUlRXUVFRTFBLTE5OTEpMUFFMT0hHRkJGSktJ
-SE1VWlxgZmpwcG91dXl9fX+EhYWGhoqLjI6SlJCNkpOWlJCOjYqLiYaFhH53bmdh
-XltVUk5ISVFfeYyWnKaoqa2wtK6rq6ilop+fnJmYlJCMhoSBe3d3dnZ5en58cm9w
-gIV+dHqHgnV6iIyAeIKNjYuOnKess7axsbO0t7u5tb29urq7t7azsrKztba3uLu8
-t7e2trm2tLSwsa+wsbSxsbSxs7Szt7a2tLKxsrS2tbS4t7m5uba1tLe4vbq3u7y8
-vb29u7mzs7m5urzAvbu3uLm8vL6/vLy2tbq9vbi5ure4ubW0tbe4uLi1tLOysbKv
-rKyoo6Sjp6OmpqilpKaoraqloJaNio2NjI6RlZORkZCRkpSWl5qbnJqZnaKkpKSm
-p6mpqKehpqiqrK+uqrGxsa6usLKwraiurrKwsKurqKSlqKarr7OztbS3v7u5uLi3
-trSzs7KxsbW1tra6urm6urazsrKztbS1ubWzs66khWlgX15YWFNQT09ST05NTEtO
-TU1PUE9NT01MSUxLTk9PTkxOS0xMTU9OTk1OTlBPUVFQUU5QU1NPUFBRU1BPT1BN
-T1NRUVJTUE1QUVNPT1FRVFJRUVJQUVFQUFFQUVFQTk9RUVBNTlBTU1FQTVBSUktP
-T1BQU09NTVFPUVFSUU9PUVJRUlNTUVVXV1lYV1hYVVZVVVhYWltdYWFgYGRkZWds
-a3FycXN1cm1ua2RfXFpbWllbWFlVWFdZYIS1x9HZ3eHk5efo6+pGR0pKRUJGRkRI
-TUlPS0ZDRUE9QUM+P0FCQ0BFQ0VBPj8+Pj1BREFEQUNGQDw7QD06OTw6OT09QTo6
-Ojs5Oz4+PDk4OTw9Ojs6Ojg5Ozo9PDw/QD9AQENAQUBCRUNCRUZFRkVKSkpMTU1K
-SUdFRURGR0ZJSklLTU1NTU9PUE9SUktMUE9PUE9RTE5OUVFSVVhYVk9RU1JSUlJT
-UlNTVlZTWFZYWE9SU1BPT1JRTVBMTU5MTkpMTExPTk9SUFFOTE1ISEtMU05PTUhH
-Q0VHREdEQkNERkZERERKS0pLSk5TR0VGR0hHSU9OUlBUW1VeV1dVVVNQSkpMSENA
-OzUyMjIxNzMwNDE1NzY0NTU0NzU3NDA0Njc2ODo7PTo4Nzg7PDs7NzQ0Ojo1OT0/
-Pj9CQkA7P0FCP0A+PDpBQkJCQkREQD86PUNCRkpISlBOSE1OTExLUU5JSU1MUVVV
-U05MTk5LT1JSTU5OTk9LSURFTlVXUlBQVVZRUFFSUE9RT1FSVFdUUlBUUk5HSElN
-TEdJTUM+PkBAPT1AQT4+Pj88Ozk1ODk3MzQ1NDYyMjMzNDcyMzExMzk3NztHUVhc
-W1ZTV1VWVlBSUlVYWFdYWllZV1NYWlxaUlNTWFROS09RUFNSVVZZVlJKREM+OTc2
-N0FHSEpMUE5SVVNZVU1IQkA6NjlAREdKSUZISEFCQTk4OTk6Njc1MzE1NjM2NDIy
-MjAuNTMxMzQzMjg3NTc0NjM0NTY0Njk7P0RGRk1NUVJQUFJXWVhXWFRTVlxZWlpU
-UlBQT0pHTk9KSUhKSUpJSUlGREdNU1pPT1JYXl9jaWxvcnV5en2ChIOIiIeIi4yL
-ipGSlJKSkZCSk46MjIiFhYSCe3ZuZmNdWFNSTUpNUl9vgYuQl6Ckra2vrampqqil
-pKWempWTkY+HhYF7dHRzb29wcnJ0fI2PiYF9io2Ih5CZmIqIlZSHenmFlaGlpqip
-qrG5tLa4vLm8urm2tLSys7W2tbi7u7i6ubq6uLW0sK+tsLS0s7Kws7Gws7S1tbiz
-sLKzsrO0tbi0s7O5tLy7uri6vL29ura4vbq4t7e0ubm5vLi8vLm5ubm3tLq5ubm3
-t7q8uLm4ubm4ubm5ubm2tLa1s7Gxsa6tqqimo6WoqqikoKanq6yrqKCcloyIjI+P
-j46PkZSUlZeUk5aWmJmbmZqcm52coaSmpKepp6SoqKisra2srK2xtbGxr7CurK6u
-sKywr66uq6mmoqevtLa2tre4t7q3t7W2trSzsbG1srW5u7y+u7i3tba2s7W6tre5
-tK+vrZ58ZV9cVVNUVFJPTk1NS0tNTE5RTk1PUE5OTExMSkpJS01MTk5OTU9NTU5S
-UUxQUlFPT1FOUVBPUE9OUU9PUVBQUFBRUE9SUlBRT09SUlFOTk9PUlNSUFFRUFFQ
-UVFSUE9OT1BQT05OTlFSUE9OUVBRT05PT1FST1FNTE5SUlBQUFNSUlRVV1VTVllX
-VFdaVVdXVldZWVheYWJgYmZnaG9ydHh6fX17fH17d3Vxa2RiXllaWVlUVFVbWFVd
-ibPG0dje4eTn6Ojq6kVLU0pMRERHTElGSEZKSkpFREZCQkFAP0NHRkJDR0hDQTw+
-Ozs9QEFGQUA/Ojw8ODg4Nzs7PDw7Pzw8Ozk6Ozw6PD9APDs9Ozo7Ojo6PT07Oz48
-P0BART9CQkNBQUFBREVERklISUhKSUlKSkdHRkNGR0lMSUpLT1BQTUtUU1BOT1BO
-UFJTU1NOTkpLVFJWVlVWVVNUU1NQT01NUVFWV1NbWFRWVFVTUFBOUU9OUU1MTUxM
-TU5MT1FNTFJTVVJPU1BQUk9NTk5OSEVFREZDR0ZFRkE/QUNFQ0NGR0pIRU5MSExN
-UUZHTkxUU1VUUlxeWldWVVhQSUVDREROOTI0MDEwMTUyMi4yMjAxMjQyMDM2Nzc2
-NTc3Nzc5PTs3Ojs9QD45Njg6Ozo8OTk3QEM/QDw7Q0I9Ojk8P0FCPT4/RklEQkRA
-QkRDRERKT05KSk9LSkpMTExOU1RQVVRSUU9RUE9QVVhVVVJSU01MR0VMVVpYVlZU
-UlFQUlBQT0xMTU9TVlhUT05WU0dFR0lIRkVLREJAPj0/QEJFQ0A7Ojc2ODk3NDgz
-NTUzMzMzMzQxMzI0NTI0NTQ1OUBJUlVUU1RUWFlYUE5SVVdXU1dXVlVYW11bXV5Z
-V1tZVlJLTlJPUVNTWFlYVUxIQzs6NTc4PExHRlJUUlJSVFdXUk9NSUI/QUpKSkhM
-R0RDQUVDQTs6OTc4NjYzMzMyMzM0NDIyMi4vMzQ0MTM0NjQ1NDQzODQ4NzU2Nz1C
-RUlITk1TUE9SU1ZWVlhWVFNWW2BcWVZSUlFMSEZGRkhHRklFSEpMSUlJTExYWE9R
-WFpfZWhpb3BydXZ7foODgoWGiIiKjI+MjZCRk5GQjo2NjY6MiYaEgn95dHBoYV1Z
-VE9LSU1XZ2xtbXOBjpujpamlqKirq6iipZ+blZGRjo2FfXlzcWxmZmxzeISPlI+H
-goWLh4aUoqSdmZ2ln4p7fIuVlo6Lkpmip6+0tLm4ubq7u7m7uLe3t7O2tba5ubm6
-uraxrqywrqursLGysbOzsbO1tra0tra2tre0tbW2tbS1tru8vLm5ury+urq5u7y8
-wrm5vLq8vbm6uLi8ube3ure8ubi3uLi7uri3ubq4ubi7ubu7ubi4t7WvsLCtrKun
-o6OhpqanoqOmp6mqrKqopZaLhIWLjY+VlJaXmpeYmZiYl5mcm5mZm5eXmZmeoKOk
-p6aoqqmqqqmpp6urrLG0srGwsa6trq2uraytsa+trKylqK+3ubq6u7e5vLi6uLi4
-uLWysLSxrrW6ubW2tre2t7u4tLi9uLW0s7SzoHtjW1lYVlFRVFFNTk1FS01LTExO
-TU5MSlFNTU5LTEtNTE5LTU5NTUxNS05PT09OTU9OTU1MTU5NTVBLTU9RUFBTTU5M
-TU5PTk9STlBQUFJTUVFPU1JTU1FRT1BNTU1QT1FRUFBNTk5SUVBMUVBPUE9QUlFQ
-U1RRUE5PUE9RUFJWVVZVVFNVVFlZVlRXWVdZWVpcXVxeZGRlaWdtcnR1eH2AgoWK
-h4eFh4eDfnp0b2ZhXFtaWlZUVVhbWl6StMbR2d7h5OXn6OrrRUVMTkNHS01GTUlE
-Q0U/Q0ZFREVFREhBQUZIQkNDQ0BBQDs9PD5AQkI/P0BAOzw+OjY1Njg3PD09Ojk2
-NTo8O0A8PUBBPz08Ozo5Ojw8PDs7PT5ARkFAQEFBQUA/QkRDREVDRkdLS0tKR0lI
-RkVERERJR0pJS0xOT05PTExNTE5OT1BQUE5PU09OU1JTVVZaWllUVFRVV1JQTk9S
-VFVYVlpZV1VRT1FWVVFQTUxUU05PTU5LS01MT09NTk9PUUtOTk5TTkpMT1BJSUVD
-SURDRURGQ0BDQ0RFRUhJTk5OTUtIUU1MSklSTlNWTVBSV15XUVJWWVFIRkNEREE6
-NjMzNDUyMzM1NzUyMzI1NDI3NjQ3NzMzNDc2NTY6ODU4Nzo7PDc6Ozs9QD06ODg7
-PD5AQD9APTw6OTw8PUI9OztFSEFBQkNEQUJDRUtNTUlJTFBSUEtLTExTVVJQS0hI
-TE9OTE5QU1ZVVVNVUkpJRkZOV1laVlBPTE5QT1NTTEhKT1JZU0xNTldUTkhHSklK
-SElGSENCPj1BQUdFQTo7ODg2OTY0MjUyMjM3NjMzMTQ1NDQ0LzEyNDY2OD5MU1VV
-V1ZXWV1WUE5RV1RUUlRSU1ZZVlpaXVxcWFZVV01NU1dTVVhaWFZTSkVBOjk3NzQ6
-SUhIUlRPTU1UU1dTT1BTUEpLTE1FSExISERFQ0I7OTs8PTk5OTg0MjQ1ODQ1NDQy
-NTUyMy8wMjMyMjY1NjQ0NTM3ODo7PEBERUhMT1VWVFNWVVlZWV9dWFhbXFtTT01J
-Tk9JR0dITExNTEhISUtKTExMTVVUSE9aXGBlanFyc3V3enx8f4GCh4iKi4yOjIyP
-kZCPkI6Ojo2Oi4qLhoaCf353cWdiXFpTTU5TVWFscmFXYnmKlpWVl56nqqmpo5+g
-nJeRjoqGiIJ8dG1ubGluc3x+iZOQgHZ+ioZ6fI6hpJqeqKijkIqRm56Uf3mDkJWX
-oKWpqa64t7e4t7a2tLWzs7W5trq7ube1tLCwrq2vrquurrK0tba1tbe3tLW2tLi4
-ubKztbW1s7Sztrq6vbu+v725t7m5vsC9vry4ubu7vLy6t7W5uLvAury7uri1tLa2
-t7e4ubu6uLq5ubi3t7a2s7OvsbCsqqunpaenqaSipKOnr62tq6SdlIeBhImMjpOZ
-nJ6go6KioJ2eoJ+gn52dnZ+dnZ2gpqmqp6ioqqisq6eqqKalqKyurqysqKqmpq2u
-sK+wr62qq6yutLi4vLi5u7i4ubq6uLm2tLW1tLSztLSxtrS1tru6u7u6ubmzsLO3
-tK+Yc2FYWVlWU1BNT01LTU1MS05LTk1LS0xKTk9RT05MS0tMTUxMTE1OSU1OTU9P
-T0xNTktKTE1MT1BKT05OUlFPT1BOUFNSUlNRTk9RUVBTU1VTVFZRUVBPUU9MTUxO
-UVJTUVFSUlNQUk1NTFBRUVJSUE9SUU5NT1JWVFRTU1BUVVVSUlRWVlVZWFdaVldY
-XF1bXl5lZ2hpa21zdnl8gIOEhoaJiY2Sk5KOjomGf3x3bWReW1hZWltbWFtdY5K3
-xtHZ3uHj5ujp6+tHR0RGUEpHRUlCS0VCQUFCRkNISUVIR0JGP0A+O0BFRD09QUFC
-QkI9PTk9QD88PTs7OTs9QUE9QD47Ojk9OTs6OD49PDs8Ojw+Oj08PTw8Pjs9PUBC
-RUBBP0BAQkNDRkVESEZISUpMSUlJSEdFRERHR0dISkpNTExKSkxLS05OTlBTU1FP
-TlBUUVJTUVVVWV1cW1VRU1RXWFBOTlJRUVRVV1ZVVVBPU1JWVU1MTE9ST1RPTU1O
-TElNUU1STk1OSEhHSlNRTFBMTUhHSURFRkNFQ0dHRURFRkdGTEtPUEtLSUdLS0pN
-SlBMTFVUWFdVVldVVVdUVE5MSUZDQTw3NTQyMDU1MzQ0NTQ2NDIzMzQ2MzU1MTUz
-MjI1Njg4NzU5PDk3Njc3QENAOjg5PD1DQD49QUI+PUA8Ozs5OT0/Q0dIRUZFQUVG
-RktISkpGRUZJTUtKSU1MT1dPS0lLRklJTk9OUE5KSUxQU1ZTTUlHSlBSVlVRUU5L
-S05PUFBNSUxNVFdTUE9RUFVNTVBRSkpOSkdNTEdHQ0A/R0VBQEE5NjUzOjY3NDIw
-NDQ3NTIyMzEzNTU0MjY2MzQyNjxLVVpYWVVWWlZQTE1YUlNUV1laV1lXVFdZW1ta
-WFZXVVFWVlVUWlpaVlFJSEZCOTY0ND5OT01LUU1QUE5SUFBMTVBRTExHSkhDRUhB
-R0U+PTxAPz48Ojk1Njg5ODk2NDUzNjY1MjI0MDYxMzU0NzgzMTM3NzY4Nzk7Oz9D
-Q0lNT1RUVFZXV1haXl5ZWFtaWltVT0hHS0xMSk5QUU5NTElLTEtJSkpLWE9JT1Va
-YWVrbnJ0eHh7fH1/goOIiIaKi4qMjpGOkJGRjY6Qjo+OiYiEhYV+e3VuZmRbVlNQ
-UFdoenZnWVhmgI+KgXh6i56moJubnqCdk4uHg35/gXtvaXBycHN0dX+FhIBzbXiC
-d2x2i5eZmZ6kpZ+Tk5+kpJiHgo6Tj46XmJKOnKetrLKwrq+ytLSytbe5ubm6uLa1
-sbC0sbGtqq21tbW1uLa3tbKzs7O4t7e4tLK0tbKwsrO2tra8u7y7trW3uby7u72/
-ubm9vL68vLy7vLy+v72+vbm2uLa1uLS1tbW2uLi4ur67t7i1ubW1uLSzsKyur6yo
-qKenpqWkp6mqq6unnZWMgn6HjI6PkpeboKGkpKmkoaCgo6Slo6OjpqOloaGlpaWl
-qKamqqumpaWorKusqqqpp6elpqirq66ura2srqmqqaywubq5trW2s7a2uLe1uLi0
-tLa1s7S0tLe1tra3ur2+vr65tbGrrbG0rpZzYlxVU1JST09QTExOT09OTFBOS05N
-SkpOTk5NT09LTUxMTExLSk1LTU9QTUxOTUpKTktLTU9NTk1MTlBPTk5QUE5NTlFP
-UE9PT09PT1BST05SVFFSUVJQUE9PT1BRU1NTUU9QUVBQUE5MTk5QUFJRT09RUE9O
-UlRSUlNTVFRRUlZVVlZWV1haWV1eYGJgY2dqaW1wdHd4fICDhoiIjpCRkZCPkZSX
-k5OOjYiCe3JrY11ZWFhXWltXVlhklLrI0tje4eTm6Onq60dGS0dMSkdOTEZQUkxD
-QEJAQkVERUhHQ0hDQ0Q8PUQ/QkBCPkE9QUA9Pz08Pz08ODg8OTk7PD08PD08Ozs6
-ODs6QT9AOjs+Pj4/Pj49Pj1CPz48QENEQDk9P0BBQ0dKSEdFRUVHR0ZFSEdIS0ZF
-RUNFR0hISU5JSElJSEdJTE5OUlFSTk1RUVJRT1JSVlVWW1hWVE9QUlRWUk5PT1FQ
-UVJRU1FUUk9UUlVQTE5KSkxNUU9OUU5PT1FNTVBOSkxLRkRKTVBRUlJPTEpIQ0dK
-SExKS05HRkVFRUdKSEhOSEVMRElMSktLTEhITlBXWVJTU1RUV1NUU1BPSUZGPDg0
-MzAzMjM0Njc4MzIyNTUyMjczMzU2NDk4MzM0Njk3OTg5OzY3Ojg5PT47Oz8+QENC
-Pjw/QD9BPj5BQkNAPkFDSEdGSUtMSEVESU5QSUVDQ0ZJSEhITU1RUkxKSkhEQ0ZM
-TU5PUVBJSU5MU1JQSUdPU1ZXV1JOTkpNTk1MUU9LR0tRVlZYU1VQUlJPT0tMS05T
-TlRQTklIREZIRUA7OTg3NzU3OTc1NjQyMTQ3ODQxNTc3NTIyMzMzMTMzNj9LVVlY
-UlNUVFFNT1hPU1ZbWlxeXFZWU1ZXVFpYV1lZV1BRUVNXWFdXVU1JQDs4NjU2P1BV
-VFFQT01JSk9NTkxLUFRSS0ZIRz1ASUdIQjtBREQ+Ojc2NzQ1NzYzNDU3NDY2NTEz
-MTAxNTQyNTY2Mzc2NDc2ODU2Ojw5P0JCR1BQUVNWVVhbWVhaWltcXV5bWllMSk9P
-UU1NSk5LT1JMUE5PTEhNUFNLSExPVVxkZ2VpbnN3eXh7eoCAg4WHioiJiImMjI6R
-kJGSkJGQjY2KiIWDgX97dGpmXlhSUk9SW3OAfGZdZnmGiX9nYWt9h4uIhIiRl5OM
-hIGCfnh1dHFucXJwcG95foJ8dGdufX1ubHyJjYySnqCVi4eUoKimmpGRnJmMjZiV
-h4KSmpuboaOnqaqrq7K0uLu8vLy8u7i0s7Gyraypra6ur7W0sre4t7O1t7a3t7i4
-t7W2trKxsbOxtre3trW0tre6u7q6ubq9vsC/vb69vr28urq3u7y5uLq5tbS1t7a4
-uLO0s7a0uLi6ubi4ubm5trWxr66qpqOlpaiqqaiqq6mqpaKbkoeAgIWGjZCPkpmc
-nZ6doKOko6Skp6mrra2qqaesqKmpqaiqqqenqKqpqaepqamqq6mrqamoqaysrKqt
-rKqrrK2sq7C3trK0tbGytbW1tbe6u7q7u7m5uby5t7e0tri5ubq5uLa0srO0sa+p
-lnFeWVdUVlNPT1BQTk5OUFFPUE1MT01MTEtKTU5OTUxOTU9OTE5OTE9NTk5OTU1N
-TE5QUE9OUlNTUVBPTU5OSU5PU1JPTk9PUFFRUVFPUVJRUVFPUFFRU1FRUk9RUFJU
-V1FOUVRRUVNRUlBPTE1NTU9PT1NSUVFTUFFRVVJTVldTUVVYWVhaXWBjZmhoa3N0
-dnZ2e4GCiImIi4uTk5SVl5iZmZmXmZiWlY+Lh4R/eW5pYFtYW1xXV1VXXGGTvcrT
-2d3h5OXo6OvrSklOSEpISUdHTUlNUU1FQUVFREBCTFlOQUNBRUJBPkJEPT1BQURA
-Pzs7Ojs8Pzw6Oj45Nz48Ojk9OjhBPDg3Ojw8PDk9Pjs7PDpAQEI+PD1AOz1CQ0RE
-Q0FBREFEREZHSEhIRURERUNFSE5OSkhJRkZDRUZKS0lLTUxKSkpMTU5PUlNRUFFQ
-UlFPUFFTVVZcXlpTUE9OUFJST05PUk5OUE5RUlJTT1JOTk1MTUpJS0xOUE1OT1FN
-TFBPT05NT01LT01NT1NUUk9LSEdHRkdHTE1MS0dFQ0JFRUhMSUpKRkpHRUZFS05N
-TEhMUllXT1VTU1BUUE1KT05IR0RBOTUyMjA0NDU0MzIzNDU0NTQ1NTQzNDU0NjEy
-NzY4Nzk4ODo5OTo5Ozo5PD45Pj1APj49Oz09PD46Ojw9QkQ+QUdHSkRCQkZIRkZE
-Rk1IRUZISEdGRkhLTElKSUlMTkpHRkZOT05QT0pITlBNS0lJS09UV1xZWFNRTktK
-SExQUkxGRktQUE1TVU1KTE1JSktLTFNSVk9QTUpJRUpFPzs5Nzo4ODo7Ozc2Njc0
-NTdBNDQ2Nzg3NzQ0MzUvMDQ0NjtJU1NUUlJNUFBOT1FSWFpbYWNiXFhWV1VRVFNT
-V1NVTU5OVFdYVVdWVEtBOTcwMjc+UFRST09VT05QUlNRUk9QT1RNS0pKSkRKSUdC
-QUJGQ0A9Ojg3Nzg2NzUzNDQ2NzYyNDIxNDg2NDI0NTQzNjY2NjY2NDc8PT9APT9J
-S1JTUVJSVFVWVVVYXFxdW1hXUk5NUVFWS0lMTk1OT1FPUE5KSk9MTEhISk5WWV9j
-amlxdHd1eHt8gIaEhImJi4iJi4qNkZGQkpGQjY2LjoiJhoSCf3pwamNeW1hTUU9c
-cn15bXF+jIyCbF1ic357cGJgbn2EgIF9enNzbm1vcm5tampue4SFe3hya3R/eG50
-ho+Ii5WdmYd2f5GdpJ+SkpefnJOaopmNjpeak4aJlJ6gnZ6hrrS2uLm/vcC9ubSw
-sbO0squsr7KzsbKzsra1tbe0tra1ury4tri2s7GxsbCztLW0uLm6u7y6vru8u72/
-vr2+vL/Avr+7t7WztLe1tri4trOyuLu8tLO0t7W4ubi8vru5ureztbSxrqmlpKOi
-paerqKWmqquroJWOg3d7hYmLj46Kj5aXmJeZnaCio6enp6mmq6msq62oqqqnqKqt
-raymqKmop6esqKmrqaiqqauppqqtsK+tra2vr6utsre2sa+yt7e1s7a5t7m/vsG9
-uru5ubm4tbK0tba5ure3trGwsbKxr6eQbGBZWFVRUVJQUFFSUU5OT1BOTE1MTk9Q
-TUtMT09OTVBOTUtNTElMTE9LTExOTEtOT01NTU1OUVNRUE9OTVBPTExOTk9OT09Q
-UVJQTk9RU1NQT1FSUk5PUVJQUVBRUVFQUE5RUk9OT09QUE5MSktMTlJRUlRTUVRU
-VlZTVVNVW1hZXV1fYWRoamxwdHV5e4CAgIWJh4qNjo6RlJSWmZmXmJqdnp2bm5iW
-j4mGgXp1cGhjXFlbWFhWVVdZY4+9ytPZ3uHj5ejo6upFT0xRSURKRkRGRUZJRkhE
-REtEQkBLWE5KRUFEQUNAP0lGPj1AQkE8O0BBOjs9Ozw6Ojs8Ozw8ODk5OTo6PDo7
-Oj47O0JBPDs8PEA+PkJDQEA/QENCQURFQUNFSURDRkhHRkZGRUNERkdLTE5KSElJ
-SElGS0dLSUhIS01MSktNUFFRVVRPTkxOUlFQUlJWVl5fWlRRUFFOU1RSUVBSUlJQ
-UFNWU1dRTU9NTU5OUEtJSE9RTlBQTk1NVFNOVFJST0xMTkxRVFFPTUtISUZDSEpM
-TUlJRkFBQEZFQ0NDR0lDRkVJS0hNT09RS01YWlhTV1RQTVRSUktPTklEQj01NDU1
-NDY1NTMyMzEzMTU1NTc2NjU2NTUzNjg1NjQ3NTs9Ozk3ODc4Pjg6PDw8Ojo5OT48
-Pj09Pjo5P0NDRUBEQkRHSEJBREZFRkVDRkxKSUpKSUpKR0hKSEhIS09LSUlLTlFQ
-T1RSTExMSkxIS0xOUFJdW1dSUlNSTUxRVFVTTUdITU5PS1JVTktKSkxNT1BPU1RU
-T1BOSklIRkE+PTw5Nzk3OTg6Nzc4NDIzNjcxNDQ0NjIwMjQwMDU2NTIzNj1ITlBR
-U1BQVlRSUFRUWVxeYVxbWVxbWVBPUVNVVFBQUlJRVVZUV1pWT0M5MzM1NT1NVFBH
-U11RUVNYU1JQUlVTUE5MUFBTTFFMRUpDRkQ+PTs6Ozk6OTg5NDMyMTM5ODc0MzIy
-MjIzNTQ2MzUzNjg0NTg6OTo6PTw9QkdKUE9NTk5NT1NTUVFWW1tZVltXUVFOUFRL
-SkxJRkpJTE1QUlBKS0tHSUdJUFNaXmNmbHJ0d3t7fn5/gISEhYeJjIuLi4yMjZCP
-j42PkIyNioqIhIB9e3FrYl1ZVlBOUlxrbmpsfJCal4hvaHB8fHJdU1VibmdcYGlp
-ZGJhY2dtcGZobnyMkouBe3Z5goN5eomYl5Sdop6RfHOEk5eYjYOPmqGclqChnJee
-qKWajpCXmpKIi5Wiqqqtsba4uru7uLW0tLOxraqsrrK2tLCxtLSytbi4srS2uLS2
-ubm3t7W1tra2uLi4uLm5ubq4urm5ury4tLi8vb++ubm6ubm5t7a4uLi2tri5uLW3
-tbK0tba2t7a2trW1t7Gxraiop6emp6WoqKinpqitqqmdlIuBeH2GiI6MjoqPkJWX
-mpuZm5yfo6Ojo6empKerp6enqKmpqKqsrausra6wrKusqampqKitq66pqqqvsLCy
-sK6qp66utLS2t7m6ubm4trm6uLq9vbu5ubm1trO2tLa0tbi4t7W1srGwsrKxp4xp
-XFlYV1RQUVBPUE9PUE1NTU1OTUtOTk5MS01MTU1NTE5MTlBPUU5MS0xOT05RUU1M
-TlBLTE1QUU5PT09SUk9MTEtNTk1QT1FTVFNSVVNUUVBNTU9NTU5SUFJST09RUFFS
-UlJUUU9LUE5PUE9QTk9PUlJSUFJVVFVUVlRXWFpbX2NiaG5yc3R4fH+AgoaHiI6P
-j4+Pk5GTlJabnZuZnZqbnJ2XmJWUlZOMiIR+eHJrZF9bW1VWWFZWV1Zgk77K0tjd
-4eTm6Onp6kVKTklGREZJTU9KQkBAR0hBQUhHQDs7RUE9PkI/QT5BRERAPD09Oz47
-PDo5Pz47Ojs8Ozw8PTo5OT1CQD85Ojo6Ozs5QD9BPTs6PkE/QD0+QEJDRUVERkhI
-REZJRUdHSUdIRUdKSkZJSEhKS0lHSkpJR0RJR0hKRkZMTk5LS1JPUVRUU05KTVBT
-UlBPUFRUVlpWU09KTU5TVlBPTVBZVFZVVFdXVFBMTU1NTU5SS0lISVBTU09MUU9Q
-T05VVVFPTE5OTlNRT01KTEtKS0VMTk1PSElGRUM/REJCQUFDRkNHSEhMSUtRTlNP
-TVJVVFJYWU9LT1RRTkpKSUVDPzw3NjU2SjU0MTMyNDMwMzQ2NDY3Njg4Njg6OTg2
-Njk8Ozs7PDg1ODs7Ojg7PD06Ozw4Nzg5PD89PT0+PT06PURHRUZIQ0VFREdHR0tO
-SUhHR0pNS0xOUExPTU9RUU9KR01OUVBQT0xLT0xPTEtOUVRSVFhXVVRQUFBSUlJU
-U1FNTExOT05KTlVPS0dJUFNTUFBTUlhPT0xQUk9KQD49PkE6Ozg7NzU2NzQ0NjQz
-MjQ1NTQzMTc1NjgyMjAuMDQzND1IT05QUFJWV1dWVVldX2FcV1paXV5ZUU5QVFdW
-VFBOUVBUV1hXWFVQRjw1Njk0NURTU0tUX09LT1RTUlRWV1VUVVVSS01PTkpIREFH
-RUFDPjk+PDk2NDg6NS8yNDMxLzE0MjEtMDAwMi4xMjUyMzU5ODk4Njs9PDxDSEpM
-TE1QT1NRUlFOTU5VV1tYWVlUUEpKT09OTEdITVBVUlBST0lMUElKS05PVVtfY2Zs
-bXFzeHt8fX1+hIWIiIiOjY6PjYyPjpCOkJWQkIyKioeHgn17cWxlXFlVUU9SXF9e
-XWyFlpeRgHV4hImCZlRTXmpoWU5QW19cYWZkZWpoa3N+ipGRiId9c3mEgHyFlpqc
-nKSpn49+gJCYlY19foqWmZOQmpOMkaKpo5mTmqKkl4aFkZqZkZSeqbK1uru4u7q4
-tLSzrauttLW2uLOxsrS1tbe3tLa2tLa2t7i5uLW6uLa1t7W3ub28vru8u7q9vr7A
-v767urm8vbe3ubq4uby4uLm6vLa1tbe2s7C0t7a2tra1srGzsbGzrqimpKipqKuq
-qaytqayrn5iOg313foaKjY+Sko+UlZaXmpmamp2eoKOlpaWlp6emp6ipraqoqqys
-q6yqq62ura2rrKqpp6qurq2wr66ytba1sa6tra2xs7a2ube3tbK2tri3uLu5ubm5
-ubm5tbS1t7q5uLW2s7S1sbCytK+mjWlcW1lXU09QUU5NSk1MTU5LTEtMS0pMTEtQ
-UU9NSktJSk5PT1BQUk5NTk5RUFBNTExNTU5NTU5OT01OTVFRVFRPT0xOU1NQUFFR
-U1JQT0xNTVJTUE9OTk9QT1FSUlRRUVNRUFJTVVNSUk9OUlFPUE9PUlBUVVRSVVdZ
-WlxhYGRpbHF2fH+AgYaIio+MkZOTlpuYlpaUmJubm5qcm5ydnZuamJaVlpKOjIyG
-gX51bmhiX11ZVVNVV1ZbXWadv8nT2d3i5OXo6OnqUFBLS09JRUVDRkREQj5ARkJA
-Qj48QUQ9PT0+Pj8/QT09Qj49PURCPT09PDs7PT9EQjxCPDo6Ojw6Pj4+OTw9Pjw6
-PDw7PDs6Oz8+Pzs/QT5AQERGRkVERkZHRUZISEdHSEhIRkZJRkNGR0hKUEtJSkVG
-R0hLSklISklMUFFPT09QUVRUUlFSUVBQTU1LUFFTWFhUUE1PUVVTUVFPTlFTWVhV
-VlRYVFBUVFFSUFBOSkxMT1JSTU5RTUxYVFBSUVJNTVBQVFNRUk5NTEpOS01MTE5G
-S0hGSEJER0NCQkVMR0VHRExPS0xKUVJOU1hbVFJTUE5OUU9OSkdERUVBPjk0MTIy
-OTYzMjIyMzY1ODk1NTI1NTYyNDU3Nzk3Njk6Ozk5Njc2Nzs5ODg4Nzk6Ojo6OTk9
-Pj05Ojs8Ozo9P0NDR01FRUNFQ0hESEdKSEVHR0hJSk9WUk5QUlRRUk5IR0tNUVFO
-TlJQTk9MT0pLT09SVFVYV1JRUU5NTU5OT01LSk9QU0xOUU9IS0tPUUxNTlRWV09Z
-UE5VVE9HRUJCPDo8Nzc4NjY4NDQ3NDU1NTUzNTU4NzQ1NjQ1MzMyMTAzNz1KUlFR
-VldZXFtVVVddYl9cVl1bW1hTUVRWV1lWUVBPUFVXV1xaVlJIPTQ0MjY7RFNTUlRX
-UEhITk5SVVZWVlNSTk5MS05QTktEREFAPTxCQDo7Nzo4NzYzNDAyNDEzMzQ3MjIw
-NTUyMDA1NTU2NjQ4Nzk4OTlBPENJSUxOVVRQT05PT0xMTlBVWFpcWlRSS0hJUEtK
-R0lPU05MTk5MSEpMTEpMUFNWXmRmaG5wcnJ2eHyAf3+AhIaKjIqPj4yMjJCRj5CR
-kZGOioeEg4WBgHZwaWReXVhSUVRcWVFQXXaMkoZ0dYKPkIVtYWhwc2RPTVZdXmRr
-bGpvbWtxeIOLjIJ+f3JreH94d4mZmZOfqaihjYiTmp+YhXuFipWNgIaNgnqGmJeT
-iImeqKqflJCYnJWJhI6ep6iorrCvsLS0s7Ktq62vtbWwsK+usbC0tbKxsbWztbm6
-u7i0t7a3t7W4urm6urq9v729uri5vb29vLO1uLe3uLq8vLy6ubi3s7WzsK6wsbO1
-sLO1tbW0sbCtrK6usLGvqqqpp6errK6srK+traefk42FfXyCiI2PkZeWlpWTmJeY
-mZubmqOioaKkp6ipp6WlqauurKqrrK6tq6yqq62tq6uqqqqoqKqurqysrq2xs7S2
-tK2rr7K2trS2t7aztbe2t7aysrW5urq4t7q5t7a2uLm2tLS3tLWysrKxsaaObl1a
-WldXU1FPTUxNT1BOS05LTExMTk5NTE9NS0tMTUtLSk1NUFJQTkxLT1BOTE1MTU9R
-Tk9QTlFQTk9OUFFRTktNUVJRUVFPTk1LTk9NTk9OUU5OUVBQT1FPT1FRUFFTUE5P
-U1NSUlJQUFNRUVRRUVJTVFRWVVlcXmFlaGpuc3V9gH6Ch4qNjpKTlpSYlpibnJyY
-mJienpyZmpicnpaZlZGRlZaQjIiEf356d3NraGNgWllXVlVSVFpeZpq+ytLZ3eHk
-5efo6upKSElLS0hDQUFLRkI/QUZKSEVIQT09PEE/Pjo9Pj8/PDxAQT1BQkE9Oj0+
-PEA9PEA+Qz48Pj09QT89QD0+Ozo9Pz48Ojc6Ojs7PjxAPj5AQEA/QEVJR0VJTUlI
-RkZHRERDR0hHRUNDQUVISUxMR0hGSEdGSUtKTEtJSkpLTE9QTlRTVFRST1JRUlBQ
-TlBNT09TWVZTT1BQU1dXU1FPVFRTVFNUU1NUUlJUUlVST1JMUE9RUlBQT1BRT1RW
-U1RRT01NUVBWUk9PTlBPTVBLR0dJUUtLTUpJSEVHQ0NDSU9GQkhHSklJSkxPUVNY
-WlxYUFFSV1hVVE9MSEVDQ0M/NzM2NDE0NTU2NzY2NTU2NjczNDU2Njc4NDM1Nzg2
-Nzg5Njg3NTc4Ojo8Nzo4ODo6OTk5PDw9PTw/QT8+PD5AQ0NDRUFCREVPSUlIR0hK
-SkdJSkhKTVJUUkxOUVFRUU1OUk9OTk1PUlJPUE5LSElPUU9UV1RaV1VTUUxPTkxO
-T0xMVlFOTVVYVVBNT1NQSkpKTlVOTFZQS1BTUEZEQ0I9Pjs8OTk8ODo0MjIzNDU5
-NzUyMjM2NDQ0NDQ3NDIxMS8wNUBHUFNXW1pcX1lXV1pcXl1ZVFZUU1NTU1hcW1pT
-VVlYVlZZV1ZTUUo+NzQ1NzxIVVVSUFNQSEdKSEtRT09VVVJQS0dHS09QVVBFQEE8
-PD09PTs5OjkyNDYzMzY0MzU1Mjc0NjY1NTQxMzIzNTY3MjM2ODk4OTpBQkZMS0tN
-UFBUVFJOUFNSVFZZWVxZVlRNSEdISEpKSlFRTUlMTE5NUU5KSEpPU1hbYWdqa3F1
-eX19fYKDgICDg4SFiY2NjYyMkpSOkJKRk5CNiIaGhYJ7eXBnYF9ZVVFVXF1SSlFh
-cHd3amVxhI2KfGpqdoF8ZlRUYGNjam91fHl1c3R6gYF1aG5uY2Jwb2hzhoyFh5ai
-pJqNkJ+np5uLhIyTlYV5goZ3dISSlol+gZelopaMk6OnoZCEi5WVio6ZoqGfp6yu
-q6Wloqaqqquqr7KwrrCzsbCusbCztri5ura5xL25t7e6urq8vL+8vLq5ubm6wMG7
-uLi5t7WztrW0ube4tra0rrCurausr6+vsrO1sLCvra6rqaurra6qo6ioqq2pqayt
-qqmrqJyRjYV/foaMj5KRkJSYlpSWmZeXmpyeoqSjpKWopqiop6mqqqiqrK+srq6w
-r66tqquprKutqqmoq6iprausq7CzsLGwra2vs7Sztra2uLe7u765trO0tbi2u7q3
-uLm5uLe2tra2tbW1tLWxsrOyqIxpXFpWVVVRUVFRUU9PT1BPTk1ISUlJTEpLTUxL
-SU1OTktKTE1PT05PUVBPTk9NUFBQUlNQTU1MTVBSUVFPUFFUUE5RUFBOT05OTU9N
-UE9PTk9PTk5QUVBRUFFUUlBPUFNYWFVUVFBSUlJUU1JSVVRWWlpcX2JgY2hqb3N1
-eH5/g4WMi4+QkpGTk5mampqbmpqdm5qam5qbmpaVl5SRj5COi4mIiYWEfXV1c3Bt
-amdkX1lXVFZZWFZWWlxqnrzK09ne4ePn5+jq6kRFR01NTUVBREZFP0BDQUdGREFE
-QTs9QEM+PkFCQDw7QUA+Pzk4Pz0/OTxDPjw6Ozk8Pz08PT0+PUI+OTk7PDk8Pjs8
-Pz08PDs5PDo8PD5CQ0RHRkNJSEVJRUhGRkVHSEZFR0VHRkNCREpKSUlHSEpMTElH
-S0lLTEtKR0dKSUxOUFFQU1JSU1RTVFJTUExLTVFXWVVUVlxXW1pVUVBRUlZTUFJT
-VFhZU1NTV1RTVU1RVFNTTk9NTFNRVldTVE9PTEpLT1NTUFFQU1NNTEpGSUhOUExN
-SkhIRENAQ0VGT0pFRUVHR0JHS0tQS1FXVVdUVVNVU1FQTUtMTEdGREM8OTM2NjY2
-OTQ5Nzc1MzM1NTQzOTc3NjQ1NjY6NjM1ODk4Ojk2Nzg3ODg2Nzk3OTc3OD9BPjk7
-OjtBQUE/QT1DREVFQ0ZFRkdGRkdKSEdFR0hLTE1PTk9VVU1KTFBQTU5UUlJNTU9O
-UVFRTUlLUFFTUk9SUFFUVFVSUFFQT1VKTE5TU0xJUVlWU05NT05MTUxKU1JOVEhG
-SFJQSEdGRD09Pjs4Nzk6OTc0NDYzNDU0MzMyMzQyMzMzMzIxMjMwMjM1NkBJUVZa
-WlpfYlpZWFtcXFtcWVVSUFRVVllaWV1bWldSVlVYWFRTUEQ5Nzg7QEZQUlpQUFFL
-TE5OTlJPTk9QT09KRERLUVJPT0hDRkA7PTw6Ojo7OjQ1MzY1NTM0MTEyNDMyMzEx
-NjUxMjMyNTY5NDQ2NTY6PTxCREtNSUpLT1NWVVNUUVFXW1tbXF5YTklFRUpITEtK
-TFBOSUtMT1FQSkhGSFBWW11hamxwdHZ7fX6BhIKBg4SDgoqKjY+PjI6PkZOUk5WP
-j42KiIaBfnl3bmdgWVlWVVloYlBNVWZzcWZWVGp9g39vZnB+gnVgWmRtaGZtdHt8
-d3Nwdn1+c2JjbWddZW1kZXuIg3iCkpWUhoaWpa+xopeUnaGXgX+MioKFkpiWiYCF
-k5uVgHqDnailmJCVnpOCgY6YkYeMm6GVi4eRn6CfmJuipqekoKirrautrKqvsbK2
-uLm6ubi4ubm6uby8vLq7uLi3tri2s7m4vLm3srO2tbKysbOxrq+tra2urKmqrqyq
-rKysqqyqrKmoqairqqemo6SmpqmpqamrrqmlmpGOh4KCiY6MjZGTmJmdmpmamZuc
-n6Omp6epqKemqKyurKqnq62urq+vra6wsLS1sbKwsK+usrKxrKqssKytsLKurqyq
-rK+1tLa4tba2uLu6ury3tbS3u725t7m6uLm5vLy4t7Sytba0uLm5ta6liWldV1VW
-VlFRVVFPT09OTkpOTEtNTUtMTk1KSk5OTExMTExJTU9PTk5PTE1NTk9SVVBNUFBR
-TU1NTUxNTk5LTlBSUE9QTk1OT1JUUE9OS01NT05NUU9PT1BQUVRXU09SU1RVVFJS
-VFRUVFZTVldaWVpfX2FmaW1vc3V5foOGiYqMkZOXmJiZnJeZmpuZmpyZlZaam5ia
-l5aSjY6Kj42KiIeFgn17eXVxb21qaGBiX1xaWVdYV1daWFpdYWugvsnS2d3g4+bo
-6enrSExLSkdFRERFRkRFRUZFPEJBQUNCRUdBQEdEQUJCPT0/Pz5BPEBBQT07PD45
-Ozw6PDw8PTw9PDo9Ojo7QD5APDs9Pjo7OTs8Oz4/Oz0+QkNER0NCRUhHSUZIRENF
-SEdHSEVERUdFRURFR0hGRkVJSEpPSklJSEZGR0hIS0xMT1BSVE9TV1hYV1RUVlZV
-Tk1OUFRYWVVUWVZZWlZQUFZZVlNRUVRUWFVRUVFTVE9QT09QUFFPUVFOUlRUVlFQ
-T09RS0tMT1NQUlBSVk5MS0pJSU5NSUtIRkhFRUNDRUJHSkRFR0NEQENESUtMTlRU
-V1FOT1JRUlFMUUpJSkhEQjw2MzE1NjIzNTkzMzMyNTYzNDY3OTo4NTc2OTo6Njg2
-Nzc4NDQ2Ojs5OTk3PT46OTo/PT05Ojo7PUBGREZEQ0REQ0JDSUlKSEVFS0lHRkZI
-RUFGRUhPVFdRTUxMS0xPUFZUUkxLUFJPTk5STE5PUlBNS05PVFNTUU9PTlJQVktN
-TUxLUExKUFJOSkxNUVNMS0tIU1FLSEdKS09NTkpJQUM/QEA6Ojc1NDMzNDQxMzMz
-MjI0Mjs2MjAwMTIzMjQzMzM1N0NIVFpcW1pdXFlXW1hYWVtaWFRST1RZVlhdXVxa
-V1NQUVNUVVhSTz84NTZASFJVW09PUEtMVlNRT0xGS1FSUk5KTFBZUUxTTEpGRUU9
-Pj46Ozc4NjM0NDQ0NDEzMzAyNTQ0NTQ7PjQzMjE0NTQzNTU0Njg4OT08QkZCSExS
-VlpbWldWVlNYXFtaV1FMSURESk9OSkVJSktHRU1QT0pLS0ZHS1NYX2RrbnFydnx8
-fYB+gIKDg4SGiIiJjI6QjoyQk5OSj46MjIeJhYR/d3RvZ2BcWldVXGlmWV1tdHJt
-WE9Ybnp3YVhcbXp8alpdam1pbXh5eHR2bnCBhoFsYmxtYVpna2NvgYZ5dIaTlIZ1
-d4+jq6eZlp2lpZyLjJSSi5Gcop2UjpSgn493cnyQn5qRkJigmpCOmJ2QfX+OmZGA
-foeZnpaLipSZmI2PmZyalZiZnqCipqyrrrKxtLS0tbS4t7i5uLa5uLWtsLC0sbG1
-t7a5uLKwsLCwsKuqq6qqqqiqp6mnp6elpqelpaaoqKenrKWmqKain56fqairrKyv
-p6Gak46JiIiMkZSSl5WYmpmXnZyeoKCgpqqpqqmsrauqqqqurK6zsa2tsrSysa6y
-sbGws7K0sa+usrOyr7Ctr6+wrK2rq6qrrLO2t7e1trm4urq/vL28ure5urW0vLq4
-uLi4uLi2rqyvs7e3tLe2s6OFZlxaVFZUVFFPT09MTEtKT01NUVBNTk5OTExNTktM
-Tk5PTUxLS0xOUE5MTk1NS0pNTk1MT1BQTU1MSk1NTE5NTlBRUE5PT05OTU1QTU5M
-S05NTk9PUVJWVVNQTUxNUlFSUVFSUlNUV1dXVldZXV9iZWdpcXN2eXyBhISJkJOS
-k5aZmpqbnJucmpuampmYlpWWmpaSkpKSlI+MiYiFhIB8enl4c3BvbWhmYGFiYV5e
-XGJfWVpbWV5dXVpcZpe+y9LY3eHj5ejo6upMS0xIRkZBPkNLR0VGR0JMRj5ARUJH
-RUJAQDs+PkFGQz07OkBCPj09PEI8OT08Ojw+Ozw6OTo8Pj08Ozo5PDs7Ojs8Pj89
-Oz0+PTw9PDw9QURER0VFRURFRkZAQD5DRUhIREZHSEhGRkVGRUNBR0dIR0dHSUdI
-SEhMTElLTUtRT09QUFNXVU9SVFVUVlJRUVFRVlpbWFRSVFVWU09NU1lUUU1NVlVV
-VE9RT05OTEtNSU9NT1BRVFRTVFFTUE5PTE1NT1NRVVFPUlJYUUxNS05OTU1KS0tH
-S0dFQEBFRENJTU1LQkBAQkRDS1FSVVBQTk5TTlFRVFdTS0tJRkI9OjQ0NDQ0NDQ4
-MjI2NDY1MzQ0NDQzNTY2NTY3Nzk3Mzc6OTs7OTg9QDk3OD09Pj88Pj07Ojk6QEND
-QUBDQktHRERBP0VKSEVKRkhHSUpMTUtHSEVAQUdOU05MTEhGRU9WV1ZUTk9RUk9M
-T09RUk9TVFVOTE1PTlJSU1JSUlFQTktNTlJTU0tPT0xJTFRUVE5MUUxLSUpMTEpH
-TExJRkdCPT5COzs8Ozo2NDQzMjIzMzIwNDQzMjIyMDM0MjAzNzQzNzc6P0dPWVRX
-VFZYV1VVVllXW1pZWlhUUU5PUlhdXFtXU1BPUFJVVVVVSTg3OUFMWFVZUUxMSEpT
-U1BPTU5PVFZSTkxQS0tIQUZJRkRFREA9Ojo5ODcyNDUzMjQyMzU1MzQvNTM1NjU2
-NTQ0MjIxNTU0NDM0Njo7Ojw7Q0FFSlBUW19dXFRPTlJYVlZTU1FOSEVITEtISk1K
-RUJCQ0lJSEpIRUhMUVdfZWxwcXJ1eXt7e35/gYOEhoaGiIeHjJCRj4+UkY6PjYyK
-ioqEg4B6cmtkXlpXVFRdZ2FicYOGfW1XUmF0d21WTFFfbmZVVV5oZmtwdXZubGhp
-gJSVinNwenBiaXFvcIWPinuDkJWUhHN3ipWYj4ePnKeom4+Rl5CHkJ+joZiUn6qo
-l4N/hpWck4J+jZucl5SgqJyIg5GblYeCj5+im42Jk5majISOkY2EgIqQi4qUoKen
-o6ersrOxsbG0ubi0tbW4trSzs7Cvr66ws7W1sq+tsbCrqKinqKempKWmpqalpaWl
-o6OdnJ+gpaWko6Ginp2fnqGipaamqqiknpeTkYqKi5CVl5mZl5ufoJydnp+jqKmn
-qamnqKuvs7Wvrq+wqa+wr7K1trWzsrCxtLKvrbGzs66wsK6qra6wr62sqqinqKqx
-tbq3uLm5urm4ubu/urm2tLe5ury4urq4uba2t7SzsbKytLOws7OuoH9kXVhZUlBP
-T1FQTkxLTUxNTFJSUk9OTk5MTE1NTU1KTk9PTktPTU1OTU5QTVJPTUxLTk9PUVFL
-TEtMTU1QTk5OT1JPUE9OUlJPT1FRT05QT05MTlFRVFVRTU5OTk9NTlBRUVJUVFda
-W19gZGNnam51d3t8g4WChYmLjo+WmZmWmZ2dnpqbnZuYmZmamZmUlZSTj4uJioiD
-g4N8fXp2dHFvampoZmVoZ2NhX19gXl1dX11bV1ZYV1dYWVxqoL7L09nd4OPl5+nq
-6kxKR0hJQkFIRENGRUY+Q0NDQz9CRERBQT4/QT09Oz0+Pjk3OkBCOjs5ODpAPz08
-Pjo8Ojk8Ozs9Pj86OTY5Ojk4OEBAPz08PTs7QD88PD5BPkJFSEdDRkhHSEVERkZG
-RkhHSEdDRUNEQ0RHRkpISEZER0xKTElLSUpLR0lKSUxMSk9SUlFNTlFRVVNRUFFU
-VVFUVVhYVVVYV1hSUlNRV1pXUlFSVVFST01QTE5LTktITU9QUlJXWVZTTlFPTlBR
-Tk1OUlNWU1JUVVlVT0tJTU5NTUlMTUlIRkNEQ0hKSEpKSkpERUdKSUpLVFVZUlJS
-VFpVV1RUVlVQTEpIRkNAOjc1NDU0ODU0NTY0NTY2NDQxNTY1NzU3ODg3OTY4NTk6
-Ozk4ODo7OTk9PDw/Ozs/PT1BOzxBREBARkZIRkhGQkE9REVFQUJEQ0lNUVBPUk1M
-SkFFSExRUVNOSUZJS1FVV1VWVlFVVFRRTk5TU1VVV0xKTlBPUE9TVFJSUU9MTE5Q
-UE9QUE5PTEdFS1RVUlFTTk1PUEhITEtNTUxFRUBAPTw+OTw8Nzg2NTY0NDUyMzg2
-MjEyMjAvMjI0MzQ1Njk8OTk6P0RRVlNUWFlZVlJPUFVYVlRZWlZTT05SVVpbWlpZ
-WFNPT1JSUFZNPTU2QkpRUVlWUVROTVBUV1VOTVRZWVhST1NOSkhISU1HRkNCQjw5
-ODU3NjY2ODQxLi0yMzQyLTE1NTIzMzUzMTU1NTg3NTE1NjY3ODg4OT1DSUhPUlVb
-XVVXWFRYV1dWW1lWVFBIR0pKTktMT0tJSkVFSEpKR0VDSFFUWWFlZmtxc3Z1dnt7
-fH5+goOFhoaHiImOkZKQj5CPjY6Pi4uGhYeEgH5zamdgWVVTVFdXWWl8iYuBbWFv
-e394YU5LVmVlWktQX2FlanN0cG5qaHSLmZODd36EdW17eHR+kZiNhJKhpJl9c3yO
-l5KBeIWbpp+QhoyLgHmDk5uViouYqKiZjpCZpKGPfnuFkpeMjJ6op5mUnaKek4uU
-oqilmpObpamdjY6XkoJ8gIiDeoKXpKGZn6uxr6mlqKyvsK6xs7OysLGwsa+vqKuu
-sK2rqamqqqelo6OioaGhoaCfnp2bmZyenZiYnpydm5uZm5uanJucnZyepKano5+Z
-k4+MioiLkpSbnp6coKekqKeqo6Snqqytr6qtrK+vsrKxsa+vra+xtLaxtbKxsLGu
-rq6xsrGysLCvr66vr6urqaqqqaamq66yvb63vcG+urq6vrq6tq+0t7W3t7e3vLu5
-t7W1tre0srGyrqyrrKqdfGJYV1VVUVBOUE9PTE5PTk5PUU5MTUtNTkxNTkxNTk9O
-Tk5SUU1LTU5QUVBST01OS0xMTk5MS0xLS0tOTU9PT0tNT09QTk5PT09OTk5PUFBQ
-TE5PU1JSU1RSUFJPTlBRU1RWVVhaW2FkZ21uc3F3eoCDhYiKjY2OkJWVmJmZm5qd
-oZ+dmpqYmZWXlJGOkpOQioqGhYGDfHZ1b2xvcm1naWhmY2JiZWNjY2VgX1tgX1pZ
-V1paWVdWWllbW3mjvsrQ2dzh4ubn6OnpTklGREZCQkJCR0Q4PENAQEZEP0E+PT08
-P0hEQDo6Pz09Ozo+PT86OTY5Pzs8Ojk8PDs6PDo5Oz08OTk8Pzw6OTg6PD0/Pz48
-PDo7QD4/OTtCSEVDR0VGSEhGQUZERkZFR0lHR0ZFQ0VEQkRER0VFSUpIR0hJR0dJ
-SktKSVFKSEtMTE9RVFBQUlJTVFFQUFJUUVNcWlxYU1VUVVVTVVNYWVtSSUtRUlBQ
-T1BQUlJQT0pOUlBRVldSUVRNTVBRUVFQTU5SUFJPTFFTV1VQTUhOUVBUUVBSS0dG
-REhESEhJSUhKTUxNTEhLTUtSUVZWUlJVXFdZWFtZUlBMTEdGRj08MzMzNTQ9NDU0
-NzQ0NjMwNDQyMzc0NzU2ODk5OTg6Nzw5NjU6Nzo7Pzw7Ozs8PEBAPTxARENAQTw8
-QEZHRkZHQkVDQ0REQz4/RUxPUU5PTE5NTk1OT1RXWFdLSExMTVJYWltXU01QUVFL
-SlBbXFVPV1RQUE9RUVBNTFBWUFBRUVFOSk1OTlBLQ0NITlBPU1RPSUpPTUlKSktJ
-R0RHQUE/QEA7PDs4ODY0NTMxMzQ2MzM0MzEyMjUwMTE0NDc4Ozo7OTo3OkNKUFRb
-W1lTU1NSVFRXV1ZbWldTUlNRVVZYWV9cV1JOT1BOUEpBPT1BSE5KVlRSVlNPTU9W
-U05RV19kYVZTUlBOUE1QTUZGR0RDQz46Ojk1MzQ1NTUzMjIyMDA1MzEyNTQyMzY0
-MTE2NjY0NDQ1NzY0NDY3PUBHS01QVldYV1NRVVpaWVlaWlVXU0tNSEhGR0hPTUxK
-R0pJR0xPRkJGUFdeYWRqbnF1eHZ5fHl7f4GBgoWJi4iMiIuOjo+Pj4uOjY6RjYqH
-goSEfXJpZF5cV1JWU0tQb32Kin9wbn6KinxoWFReZF9RSFJgYWVxd3Z6eHJ3gI2Y
-jn+Bh4FxdH13cH2OjoeJnqenm4eGkJeYi3x2gpOYi3h3hIV1bn2OkYd3eI6ho5SM
-j5umpJWMho+Yl4yElKSnnpmfpKGUhombpqSblJqnrKmdl5iXjoB/iIZ/gI6VkYaQ
-oqyooJmlrKilo6aqq62tq62uq6mlpKinpaGlo6OfnpycnZ2am52dmJudm5eXl5aT
-l5WYmZmdlpeWl5eWlZmam52dpKGcl5KOjouHhYqQkpacnp+hpKepq6ysraqsrbCw
-rrCwq7Czs7G0s7CvrrKxs7CwsrGws7OxtLGtrrCysa2vraqsrKuqqK2uqaeprbG8
-uLq9v7u5ubu9vLq4tLa3tLSzs7a4u7i1tLa1tLe1tLKzs62tqpt7YFtZVlhXUk5M
-TU1OT05PUlFPTkpITlBNT1BQT01NTk5PTlBPUFFQUFFSU1FRT0xMTk5LTUxPUExN
-TlJPUlFRUVJRUFBUT05PTlFTUlFQTk1OUU9QT09QUlRVU1VWVlZZWlhdX2ZpbnBy
-dn2BgICEgoSLkZCQlZKWmJmbnJqZm5ydm5qal5WQk5GRi4iIioWBfnp0dHVzb2x0
-a2lna2ZiYGBjYWVmZGBhYV5eXl9cW1tbWVhYVlVVV1ZacY67yNDX3eDj5Ofo6elS
-UUpKRENDR0NAPzk5PT0+QkxIQ0JCRURCRkM+PDxBPjw9QUE9QTw9Pjw+Pjs3ODw6
-PTw8ODo7PDw8Pz08Pj05NTo7PT4+Pzo6Ozs9QD4/PTxDR0hGR0NDREFEQEBCRkRF
-RkZHRUVGRklGSEdFR0hGR0ZIRkVJSEdJSkpGSEpKS0pKS05QUFFQT1ZZVFVRUFFQ
-UFdaWVVQU1FPU1RUVFhdXVdSUFBPTlFOUU5PVldXVFVWU1ZTU1VVVkxMT09QT0tL
-SU1QVVRPUVJVV1RPTU1SUFJST1BKSUtFSkZGSURGRUtSUktJSEhISE1RV1ZPUFBa
-WlZXWllPSU5MSUhIQzo3MzMzMzQzNjc4NTU3ODYzOTc1ODo7NjM0Nzg1NjU2NzY2
-NDU6PDw6OTc3ODg5Ojo8PEBFRkJBPT08Pz9CR0NEQ0REP0BEQURFR0tJTE1LSk5Q
-TUZHTVRbXFBMTU1SU1lXVlVXU1FQT09PVlteV09VV1RRUlNPTkpKTlRYWVZTVFBP
-TkxLTktIREdKT05QVE9JSUtLTkxISkZHRUhDPjw/Pjo5ODo2Nzc2NjQ0NDQ1NTEw
-MjM0MzQ2ODc7Ojk6NjczNjM2OEJLU1hcWVRUV1VSUllaXVtgXVlVWFVSUVNXW1hX
-UU1NT09PT0Q7ODxETE9TT05VWVFNTlRQTlNeXFpaXFdTUU9QT09PTERFRz06Ojs9
-ODU3MTMzNDU0NTg4ODYxMTUzMjAxNjg1MjEyMzMzMjI1NTM3OTw6QUZSU05RVVlY
-VlNYWVlYWFxeXlhRS0lIREZHSkpLTkxMTUpKTE5LR0dNV11hZmpuc3V4enl6fn5/
-gYCEhoeIiIeIiYyLjI6Qjo2RjYuNi4mFg4B7dGxkXldVUlNQTE1ecn19c2t0hYyK
-e2xiZW9uWktLWWZud4F/foJ7dXyFhoh5c31/b2RyeWxneoN7cYWcpqGYk5ijpp6K
-eneIlpJ+cXuHhXVxgpCPgnR2jJqaiYCImqqnmY6UnaWimIqSnqCVj5SXlot7e42f
-oZaGiJqlp6Gdo6agkImQk42KiouCd3ySo6GQi5ajn5mao6WioKOnpqWkp6Ohnpye
-n6GenJaXlpeXlZWUlpKTkpSVkpOUkZKRj4+SlZaXmJWSl5SUlZWVmJufmpeSjIqJ
-i4mLio6UlpSVnZ2gpaenramrq62vrrC1trCuq7K1uLi3tbWztri4tLWzsrOysrS1
-srCwsLKysrCvr6+wrauqqaeqqKu0uba7u7q7wMK+vLm7u7u7tLS1t7a3t7a3uLi1
-s7G2t7q2s7KzsrCvn3hgW1dWU1JTUE9PT1JTUU9PTU5NS01NTUtRT0xMT1BMTVBO
-TVBRTU1TT09QUFBQTk5PSk1OUU9PUE5QUFBOTlJTU1BQT09OUE1PTlBQT1BSUVRT
-UlBRUVFTUlVXWlxdXmBjZmhsc3R5fX6BiouMjo6Ni4+UmJ6cnJuZl5qcmpibm5ma
-mJaUkI+MioeGgX55eHFwbm1qaGZmZmVmZWVlZ2BfX2JfYGVjYF5hYFpaXFpdW1tb
-WVtXWVhVVldplb7J0djc4OPl6Onp6kpKR05IST1EQUBBREE8OUNCPkNHRT9CQ0ZD
-Pz9BPkJCRENBPT47O0BBQz09Pzg6Oz48Oz1APTw8PkE+Oj09PTs7PDw8PD47OzxB
-QUE9PkA9QkNERkhFQ0JCSEhEQkNKSkhISEZFRkZHSkhISkhGQ0RFREdHRkhGREZJ
-SklHSUpJS0dHSkxOT09SUlZRUU9MT09PVFVRUUpOTlFVVVZWV1paWlVPU1FRUFJT
-UlFZWltWUlRWVlVXW1dTUExNTU1QTEhHSU5PVVJPTFFXVE9MTExKS1BNT0pISUhK
-SENISkdISU9RTk1KSUpESEhQWVldWFVaVlhZVFBNT0tIR0U/OzM1MTQ1NDAyMjY2
-NjQyMjI1Njc3ODg3Ozk1NDQ0Nzg4OjY0ODo5Ojo7Ozk3PDo8OTo7Pj08PDs7Ozo8
-QUNDQT9AQkQ/QkNGQ0RDRENFSEdJSktNR0dKTlRXTk5JTlJWUlJWVlhVT05PTEpL
-TVJTU1ZWVFJPT05OT0hNVVdXV1JTVFBNTUxMTEhIR0pPTkxNUExRTk5MS0hKRkhF
-RUBARUI+Ozk5OTk7OTs3NDM2NDY1NjYxMzU2ODo5Nzg2NTQ0NTU0MzE0OEFMUVZW
-U1RYW1dXW1xeW15gXFtYVlVRUVBTU1FNTlBSVlNQRzw3PERHR1BOTlZUUUxLTUtN
-T1BPVFtVUExQUE9KTVBQR0ZDQUA5Oj48Ozw3MzEzMzIxMzIzNDEyMjAwNDY1NTMy
-MzMyMjU2NjM1MDY5Oj0+RU9STUxNUlVWVlRUVVVZW11cVVJQTkxBQUZFRUdJSE5R
-TUtOTEtFSU1UW15gZ2xxcXJ2eX2Af4CDgYKFh4mKi4uLjoqMjo2PiYyNiYiHhYOB
-f3p0bGReVlNWWFBKUmNvb2ZYWm+DiYNzZml0e3VfT1ZlbXR+e3d/d3J1fYaJe2hs
-eXRhY3NxXmFzdWdmgJSckYyRnqisppqRjJeclX51hI6JenqPmJaFen+QmZOGeYCV
-oJ+RipKhqqabkpignIl+g5GTh3l6iJmek4J+jJmgmJOeq6mclZ2lo5eMhXtxd4qa
-m4yBiZWZk5WenZGQlpmYmp2enpiVk5STlZWTj4+Qjo2LhoOGhoaFh4qIiYiIioiK
-jIyMkI+QkI6Nj5STkZGWmZiamJKNjI2PjY2OkZeUlJeZmpqcnaCkpKWmrLGvrLGy
-srC1ubu6uLW1tLS3u728uLOzsrO1s7W4ubq2tLOzs7W1sbCsrqysq6moqrK0sLS6
-u7y6u7i6tri3t7Wwr7S2t7W5trW5u7i2tbS0tbaysLKztKuVb15ZVVRTUU9RUVBP
-TE5QT05OTUxJTExNTk1OTVBOT1BPTU1MT1BPTU5OTk1QTVhPSk5OUFBQT1RMTk5N
-TVFQTk5OUFBNT09OUlBKUE9TUVFRUFNSU1RRUVJXWVxfYWVpbnJ0d36BgoWHiouN
-kZSTkZWUlZucn6GhnZqalpqamZeWlZSUj4uKhYSCendzcW9tamlmZmRjYGFjZmJi
-Y2JiZWNiYWBdYWNjYGFhX11bWFpaW15cV1dYWVxZV2CUvsrS2N3h4+Xn6OrqSklF
-RkhEPDg4NzhAQD89P0dEQD1AQUJDQz4/QDxGRUVDPjo+QT87PT0+P0NAPTk2Nzs9
-PEBBPT5CQkJCQj08Ozc9PDw8PDo+PTw9QkRDQ0NBQUVISElEQEBFREVLREVHSUVI
-RkZFR0VDRkhHRkZIRkdHREZFRkdHSEhKSkdISUtJSUpJS0tMUFBQUE9OTU9OTk5T
-VFRPTE1SUFZXWFtiWFlbV1FQU1JSUVRWVlZWVFJQU1ZZWFNSUE1OTVFPTU9NS0pJ
-TU5UVlFMTVRXVFBKSkVIUlBSUUhKS05MREhMR0JBRkxJTEtISkZGSlBhXlpVT1dW
-VlNXU1BOSkZFQTs5NTI0NDU1NTMzNDIzMzM2NTYzNTc1Njc5ODk7NjE3ODU1NTo5
-OTo4Nzo+P0A6Ojo6Oz0/Pj0+PTo5PEJDRUNAQEJBQUA+QkFDRUVFRUNDRkZKTE1N
-S0tNUUxHR0VHUFFTUFZWVFRRUVRNSkxMTlBOUVFQUE1OT0pISUpPU1NYWVNTUU1O
-UVBMSUlMTk9KREdNT1BPS0lIRkdJR0dMSUhDQj49OTc8PDw5OTY2MzQzMjAzNjc1
-NDg4Njk2MzI0MjU2NDM0NTU2OEVOUlJTVFlaWVlbWVlcX15hW1ZXVlRVVVJPT1BQ
-U1dSVFRNQjpBREZHTldYWVBTTVBNS0pSV1JRTU1KSUlNTlZQTk5NTkRFRkE7ODo8
-NjU6NDY4NDMzMDQ1NjIyMTAyNDUyMjU0NTk3MjU1NTU3NjY3OD9FSE5RUU9PUVVU
-UlZZVVZXWFdWTlVNS0pGQkNHSEdFRExJSEpHREVLT1ddYWZobGxwdXd4fX+ChYaC
-hIWIiIiIi46NjI2Nj46NjYyLjouGg4B8d3JqYVpXUlNVU1ZebHJqV0xUant/cV9f
-anp/eGBXYGhueXlxdG9paXF4hIFuZXB2a11mdGtaZnZxYWyGkJGGeYOXpaSknJmh
-paOZhYGPmo+CiJuknZOMkJyimId1fo+ZmYiAiZujoZKToaegjX2Hl6CYjYaPnp6a
-hYCLmJyTh42fppyZnKeqnpOHe3R6i5mXi3p9jpCKipWbjIiLjIyPkI6RjouKiYiI
-hIODhYeHg4CAfH6AgYV8fH99fn5/goOChIaGhoWDg4WFhoiMkJKUkpWSkZOSjI2P
-ioqOkpCTlZOUl5WYmpuepqemqa6vrq2wsra3ubm6sra1srO2uri2tbW5uLq3tbW3
-tra0tLO1trKwsrOtq6yqqaqprbO2s7e1uLq3ubm3tLi6t7Gtr7C0srS0tbm8tra3
-tLO1t7ayr6+xq5FtXFdUVFVUUE5OT05KSUxMSkpOS05MTEtJSUtQUFFOTk9OUE9R
-T05MTk1NS0xNTk1PT1BPT1BQUExKS09SUVFPTU9PTlRSTUtQT1VQTVFQUFBRU1RU
-V1VVVlphZmlucnV7f4KGiImLj5CRkpGXl5eamZqYmZ2fop+bnpmWlpSRj46PiYV/
-fXt8eHNub21pZGVlYWFkY2RhYGJjY19eXmBkaGRhYWJgX2BgX2FhX11bWlxcXFpY
-WFlbWFpfYIq7ytLY3eDj5ufo6upISEhISUI3NTExMzhBQ0RCR0JEQT89QkZDP0BD
-P0JHRT9CPUA+PT05Njg6Ojw8Ojc2Ojg8PT4/QT4+QD8+PUJDPTw6O0A/Pjw6OT9A
-Q0RBQkFERUlISEdEREZKRkNBQ0RER0ZGREREQkdFQ0VHSEpKSUZJSUdHSEhGRklK
-R0hJS01OS0tISUpOUVBNTU1NTEpNT1BSVFNQUFFRUlRTUlNRV1dST1JSUVNST1JN
-UFBOUE9UV1VYU01KSk9QUlNRUVFQTUpOUFZWUE5PUldXVU1IR0tSUVJSSUdLSUlJ
-R0hER0dDSEVGSUdLSUlMTltbWVZSVVZWUVRVUEhHRkVEPzw5NTMzMjI1NDU0NTI1
-NDQ0NTY3Njc1NDRGNzUxMzU3ODg3NDc5PDc2ODs5Ozg2Nzk4OTs+QD85Nzw8QURF
-RUNDQkVGRUhFREVFRURGQUNBREdNSk5MS0tNS0dIR0hLTlFMTFFTUU5SVFBTUVBP
-TlBPTk1OTUxJSkdKTk9PVFhZVFJOUVNTU1NPTE9NSkpJR0pPT09OSUVDRkdKRkhH
-SUZBQD49PEE/Pjs4ODQ2NjU0NTY1OTg5Njc3MzQwLzIxMjM1NDUzNjY2QUlNUFFU
-WltaWlhZWFtdXmFhXFhUUVFUVVRUVVtYVFVWVU5BOEJJRkRPXFdUUFJUV1RRUVZc
-VlJOUE5KSk5QUldOS0tGQkJBPjs6NTUzNDU4OTUzMTU0MDE0NDQzNTIyMzM3NDIw
-MjUzMjI1Njc3NTQ4PkRKTlBRUlFQTlBHUVhZVlZZWFFKR0hMS0tKRkdLR0VNSEdH
-REJBRk1TV1liZmlqbnJ1d3d4fYCDhYWJh4mHh4iLjZCSj46Qj5GRj4+Nh4SAfHp3
-c2leWldVVlldbnt8d2ZRTldrc2xbT1FhcnVqXF5jam9xamxxX1xoc4SNhW9wfnlk
-XWx1aFxrenFmdo2XjXxyf5SdnZSPmKawqZmHipagkYKLmaOfk4ySoaumk4qFlZuU
-hHeClaCZjIiZqqmZipCeo6KZkZSiq6OVjJWgo5yKh5ahnJOYpqmhlZCFeoOUnpuN
-fXyLkIuDiI2FgoWIh4WChIOAf4B+fHp2dHd6d3Z1dXVzc3R1bm9xcXByc3R1dXd6
-eHp8e31+e3uAgoaLjY2Nj4uQkZGNiYeHiYqNjJCTk5aVmJydnJydoKKjo6isrK6x
-sri4uLW0s7S4tri0traztLW2t7m0s7W3ubi3t7Ozs7OytrOwrKuqqaerr7KxtLS2
-t7m7uLe1u7m2tLOwrbC1tLa3sra4t7i4tra3s7Sysq6mjmpeW1VUU09MTExMTk5M
-TE1QTU5NTEpKSklNTExMTk9NTVBNTUxPTE5LTU1OUE9RUFBQUFBQTU1OTk9QUFBP
-Sk1PTE1NTlJRUVBTU1FTUFFRUFNUVVhZWl5jZ2tudXt8gIOCi4qJjI6SkZaZlpWb
-mpqZnJiYlZqamZeWlI+Nj4qHh4B9eXlxdG9uamplY2RjYmNmYGFjY2RiX19hYmNf
-YFxfYFpeX1xdXVxfYGBdXV5eWllXWVpbV1dYWl5iirzJ09rd4ePm6Ojp60lFR0ZF
-PDU0Nzg3PkI+Q0M7PEJDQT4+QT1CPUFEQ0E+PD06OT8/Qzo7PDw7O0BEPzw4Ojo7
-PUE+Ozw8Ojg6PTtBPkE/QD4/PD46PkFEQURERUlHQ0lKSk1GREhGRUNBRUZFSEhH
-Q0VHRkZBREZJSEdHSUhIR0dHREZFRkdHRUpKS0xNSkhLSUpNTEtOTUpIR0pNUVhU
-VlFQUVJTU1ZWTk5NT1FNTk5LTU5PUVBSUE1NUVBSV1VQTVFQUFBRU1NSUlFRTEtP
-UVZWWFBQVlZVT0dGR05RUU9LTUpFRUNFSEZGRURHRENHS01MTVFPVFhZW1dTVFNR
-VlFOSk1JRkRDPjs1NTMxMzE1ODAxMzM0NTc3NzU3NjY1NjQ0MTMzNTc5Njg4ODY2
-NDU3ODg7PDs2Nzg7PT89Ozs5Ojk1Oz09P0FDREVES0ZCRUdGRURKSkZGR0lIR0hJ
-RkZHS0lKSk5QVU9PUVBRTk1PU1RVVldXVVJRTk9OUE9NSkhPUFJUUlBNTkxRVlZV
-VVFLTEZHSUlOU09PT01GREVFR0pJRkdDRkNFQ0FAQEM8OjY1NjM1NTY2NDk5NTMz
-NjMzNDUzMS8vNDQ1NDQ3NzY3QEZOUlVZW1hXVlRVXl5cXlpXVVVUU1hTVFRZYWFW
-VFZSU0Y+R1BIRU9fVlNNS1BWU1RQUk5VU1FQTEdHSkhOU0tISEg/QUE9Ozo9ODg2
-NzY3NDQ0NDY2MzQ3ODUyMjQzNjQwMC4xMTEzMDU4OjU2ODs6QEhLTVBRUE9SS01Q
-VFhYW11aVk5DREZDRUVGR01KSUxJSEhHREJESlJZXGFjZ2ttcHN3eXd5gH+AgoSK
-iomGiIeKiYuQjJCQk5GLi4uIhISBgHtwZF5aVVRTWGV8j5CGb1xcaG5uYFBLTlxr
-ZlZSXWRsdGxrcWZaX2Z1jJWKe4CHgWxte31rZnh9c3GFlpuSfHaIlZmSgnmGm6mn
-kYCJlZOHeIKUmJOHhpOiqaWYjpGeopmGfYiWmZaFgZKjqZmLi5ejo5qRlaWtqJ+Z
-n6qwqp2Tm5+Xh4eXnZeRkIl/gZSem4+EgYyRjoaAfnZ0dnx5dXNycHFwcHFvcG1t
-b21raWpoZmhqZ2ZjY2RjZWhjZWdra2tta25xdnJ0dnl8f3+AgYGFiIWEhIWHhoaJ
-jIqKjZGSkJKWlZaUlpqZnJ6eoaKjqKyvrrS0s7Kztru3tLSxsbK0srGxtLW0uLu6
-ure5uLa1ubS1uLWxrKuoqa2xsbKysrO2uri3tbK0tLW1sLOusLGwsbe1sry5t7e2
-tLOytrevrqSKaFxXV1ZXUlNPSk1QTE1LTk5NSk1LSEpMTUlLS1BPUFBQTkxMUE9P
-T1BPUFNRUlBQT1FQT05MTU5RTlBPTE9SUU9MS0xPTlFRUVJUVFJRUVJYVVhaX2Jo
-am90eHp8gH+EioqNkI6SkpWXmpiXl5aampiZnJWUkpiUjouKh4mGg312dnNub3Br
-aGdkYmBfX19eX2JhYGFiYWNgX2JgYF1dXV5gXllcXV5dWl1dXVxeXVpaWVtZVldX
-WFxaX2SJusnS2t7h5OXm5+rrR0dJSUhHPTg0Njk9PkdGRUI+QT5APT05Pz09Pj4/
-Pj47PEFAQkFAPkE9Pj06Pj8/PTw8PDw7PDo5ODc5Nzg7Pjo7Pj48Oj8/QUJBQ0VC
-QkJFRkZFR1BSR0NDQ0lHR0lGRUlISEtIRUZJRkdDRUZHSEdISEdJR0ZFREZISEhH
-RkhJR0pKSEtLSk1OT01NTUtMT1NXWFNRTkxOT1BSUlJRUE5PTUxMTU1PUVBQUlRU
-T1BOTE5SUFFSUlFNT05TU1BQTlJQTlFTV1ZTT0tSU1BPSkdGTU5LTE5MSkZKSEhK
-R0lNSklOSktMTlFMUE1MTVBTVFpVUU5RTU5NSkhHQ0A8PDc1NTY1NTIzMzE0NjY2
-NjY4PTk2MzQzMDEyNDU1ODo1NTQ1ODc4Nzc4OTk9OTc3Nzg9Pj49PD4+PT49PkJA
-QD4/QENGQkE7Q0lJR0hJSEdJSEZDRUVCSERESEdJS01MS0tOVVVPT1JSU1VbW1pY
-VVhVUlBLTVBOTVFXUU1LTUtNTVFUVVNUUEhFSExKTE9TUk5NT01JTFBMS0pHQ0JB
-Qj5CSkRDOzo7Nzc7OTc1NjY3NDkzMTI1NTEzOTQyMzAxNDU1Nzo0NDU5QUhPUFNY
-WVJSUFRZWltaWVhXVVRYWVhXV11eWlRRUlhWTUJKU0tHTltXVVRTVllVUU1KT1hQ
-SUxKSkZJR0pQT0pHR0dDQEA7Ozs6Nzg0NjU1NDQ0NjMyMTE0MzAzNDIxLzQxMTc3
-NjQ4MjMzODg6OzxARUhLUFJOT01NUFBUVlhfYmBbTkhFSEdFSUVHTU1NT0lJTEhI
-RUJKUVZaXmBna21wb3J2eHt8fYCEhoeJh4yKjI2Nj42Oj5OOjI6LjIqChIB9dGtj
-XlZUT09cbYOQkYd2b3mAfnVgUFJYYmZYUFZfaHV1c3l9bWpzcnuOkYN+h4h5bnOE
-g3Bten1zd4+eo5eJiZScmY16c4OXn5h+dIWSjXlsdYqUiXd2h5ukoZCOmKaroJOQ
-k56fl4mAjZubjn57iZqZj4aLnKennpeeqayooZ6ipp+LhZOdlYuPioCClJqYkYGC
-h4iNg3p5dmttbWtmZGJmZWRjYmBhYGRhX1xcXV1eWlpbWlpWWF1bW11eXV1eXmBf
-XmRnZmhpbnJydHZ5e3p+eXmAgoGAgoeIiY6MjYuJj46Pk5aWlpmXmJucnqKjqayt
-sLCurq+wtLGvrquura2usK+wsK6xs7aytbS1tbe4tLS5srCsqaerrLGvsq+0sbO1
-s66sqKmutrm3s6imq7Cysq2qsbm2tbW0srG0s6+onoRlVlZWVFJQUU1PTExOTk9M
-T0tJS01KR0hLTEtLTE1NTExKS01PTk9RTU5LTk9PTk9NT1BOTE9PT01LTExOTU5N
-T05OT1BPT1BTUlVUVFJUWFxdYWRqbnN1enuAhomIi5KOjZCTlZWVmZqYm5eTkpSU
-kpOTko2LjIyGhYN+e3d1cW1sa2hkZWZjYmFiYWFiYV9dXWFgYGFiYV9fYGFgX15e
-Xl9eX15gXVxdW1tcW1pcWlpbWVhXWFdVWV5cYY26ydPY3eHj5ujo6+pPTkpNS0VA
-PTg1Oj5EQUNGQkFAQkhJQUBEQTs5PT09QkREQUBAQkBAPT05PT08PTw9QT46PDs5
-PD09PTo6Ojs8Pjw6Ozo9Pj8+QT1BQUFCPkBCQkVGRkZGQ0JJRkRFR0hHRkhHSEhF
-RkZIRUZGSEpHR0RERkdHRkNDREdIRkRGR0VHRkdISUxLTk1NTk9OUExRVFBQUExJ
-SU5MTlFSUlBVVFNTUE1OT1RVUVFPTlBSUVJOTk5LT05PUE9RTlZQT09PTExMUFVc
-XVRVTlFQUFJNTEpNTktLSkpKSlBSTE1KSk5OUVBNTU1OU05QUlBaUlNRUlRSUlFT
-U0tJR0NCQD86NzQzMzs5NTM1MzE1NDQzMjUyMDQzNDQ2NDc0NTg1Nzc0NzY1Njc5
-QDo2Ojk3Nzk9Ojw6ODk5QUA+Q0FBQkA+QDxAPkJERUNCREZGRUVERkZFQEJIS0pI
-SUZIRkZKTUpJSEtRU1FTU1ZYVVlZWFtXWFNRTU1OTUxPVFtRTEtJSUhKTk9QUFRS
-U01LTlBQU1hVVFVTUU9QVE9MTEdCREVERENBQkA+PkA9Ojw7ODY1NDMxMzU0MzQ1
-ODM0NDIyNDQ4NzczMTUzNDU3PUROVFZYU1FZVVdYWllXU1VXXFxcXV5eXl5cVVNR
-UFRUTlJSSElIU1NWV1RQT01NS01PW1ZTT09NTExQSEhMS0lGPjo9QD89ODk9Nzg0
-ODQyMzQwLzQyNDY3OzU0MzE1NTIvMDQyNTczMjc3OTc5OjtAR01NTExOS05OUFVU
-VFdbXFtTTElLTEhLSkhLT1FRSkhPTklGSktRU1heX2ZpbXBwcXJ2e3+AgoWFhIeK
-jIyQkZKRk46Njo6OjIuMi4WFgX12bGBaWVJLSVBofo2LfHV4h5KOfmdeYGVrY1FN
-W21+f3R4fnJqdXhrd4qJdniEhXJodIN9b299fnN3j6CglYuSnqWgjnx6ipGZiHN1
-ho+JcWt+jJCGdXGBkJmUgYOTpKWglJGap6qilIuSmZeNfn2ImJ+WgoGRnp+YkZOd
-qaainKOurJ6UmKOil4x/cXaCjouFeHV/hoB2cXFxamRkX1tcXFxdXFtbXVtbXF1e
-WlZXVlZYV1ZWVVZVVFRWW1pWWFdbW19cWlthXmFhZWhpaG1ub21ucHR4eXp7fICF
-hYaGiIiIjI6KjJWVlJWWl5mUnaCipaWnq62sqqasrKmrqaioq6usrKupqqysrK2t
-rrKzsrOzs7O0r66noaOprqunpq63ubSroJeUm6WtsrGqo6GnqqunpqWmrrKysrS0
-ra+1sqyffV9WVFNRUVJPT01KTk1NSklLTU1KSUhLS0tMTUxMTU5MTUxNUExOUVFQ
-T09OT1FPTVFOT09PTk1PUlBMT09OT05NTU9RUFBSUlFRVFNUVlpfY2Zqc3d5gIKC
-iYuKj5CSlZaXl5mYlJiamJSWk5GSk5OQjYqKhoaEfnl1eHZ0b2hoZWNjYWNgX2Fh
-YGFeYF5gXV9fXV1eXV5gYV5eXl5eYV5aXFtdW1xdXV1bW1lZWVdYWlpbW1xdWllZ
-W11fibzJ0tje4eXm6enp6k1LSklMTURFQUJCR0dDOzw+QUZHSEZHPUA8OTc2PD0/
-QUA+Pz5BQEJBPj87PDw9PEBBPj09ODg5PD4+OkA/QDw7Pzw8QEBDQkFBQUJDQj8/
-P0BFREVGRkVIQ0ZGQ0dIR0dISEdHRkVFRkZJSkdGSkZDRURDRkJGRURKSEdFRUdG
-RUhGSUlJSkpJTExLSk5MSlBST05QUVBOTlNPUVJPTVNZWVtZUFBRUFRSUU9OT0xN
-T1BUVE9SVFBRUVNRUk9OTlJQTk1UWVtcV1JPT1NPVFBMTktOTE5NSU5NUVdQTElK
-TExJS0pOS0tPTFBUU1RVVU9VWFZST1dVTUlHR0dDPjc3NTQyODUyMzIzNTM1NTU0
-NDQ3ODkzNjU1MzcyOTw3Nzc1NDQ0NDg9OTw6ODc5QEA+QDs8Ozs+QjpARkFAPT9C
-REFBQUNBQD5BR0RCQkRDQUFCRklLUEpFRUVGSE9PS01KTVFQUFBQT1NUV1RVWFRQ
-TUxGSk9NT0xMUE9MSU1PTExPTlBTVFNSVVRUVFJVVFJPUlNQVlZXUk9QSUVDSUVE
-QUNCPz0+Pj4/PTg5Ni8xNTQyNTQzMzU4ODU2Nzc3LzExLzU1MzQ0MzY+PkVSW1hS
-TVRaWVdXVVJTVFpbXlxcXFtbXFxXVlVSU1ZRT05ISUhOUVtaT0tPTExJTE1TUFJK
-SEdJTExDQ0ZJSktEQURCOz07PT84NTYzMzIzMjQ3MzYzNDY0NzYyNDIzMzIwMzU2
-NTs2Nzc4Njc4NzpBR0tNTkxOTFBRU1ZXV1VXU0tMT0xKSEdHR0hPVE5JTFVRR0BH
-S1FVWmFoamtub3BwdXV9gIODhoaHi4qJjI2NjpCQlpiUkYyKjIqJh4N+enZqYVtV
-UEpGTWZ4enJnbXuJkJCBc210fHloWF1ne4F0dHp0ZGl1cGV1fHBhbXl1YVxveW5j
-Z3BtY22Jl5eJiJmqrKaWioyTlpKAcXqLk4VxcoiXloh3dYORl415d4mcn5yPi5yl
-qaSXkJSjp52Pi5WmqJ+Mg42YmpKEh5Seo5yVnamtpp6fqKihkX1pb36IiHVraW92
-cGlnamxjYF5bXVxYWlpYWFZXWVdXWVhcWFJTV1hZWFZSVVVWWVhUVVVYWFlYWV5Z
-WFNVW19dXV9cYGFfYGdmZWdsbG1ydnl5e36AgIOIioqKjpGUkpSUk5aVmZudnaGo
-qaelp6qpp6qpqqysrayrrKinqKemqamoqa2ssLGtr7CvraempKmnn5WYo7G1saWc
-mZqip6KenqKiqKqpoJqcp7CvqqKkrLOwsK2uqpp1X1hUVVNSUVBRT05QUVJMTElK
-TElNTEtMSUlLTFFPTE5NT0xOT1FQUE9OT01NTk9QT1FQT0tMTU1PUE5NTlFPUFBO
-UE9OUFNVVlRWV15kZmpvcnZ8gISHiY2OlJmYmZqam5mZl5mamZuVkpCQkJCOjYuH
-hIOAfXp3dXFtbWhlYmNgXl9gX1tcXmBfYGBeWlxeXmFfXl1dYWViYWJgXl9gYFxc
-XFlcW15cXVpbV1hYWVlYWFpaVltcW1hXVl2HvMnS2N3g5Ofo6OnqSEhEREZFQERK
-SURCPDk/QEA9PUFBQkE+OTs6ODc4PD8+Pjw7Ozo9PUA6Ozw6PEA9PT5BQEA9Ojg5
-Ozs/QT0+Pjw5Ojo6Oj09PT8/QEBCPj0+QD9IRkZEREVGRkdIRURHR0hFQ0RIR0hH
-REVHSUZIRkNCRkhFR0ZGSElGRkVDR0VFRUdHSklISEZHSkxMSkxNTE1MTlBPUVBT
-UlBUUE1QU1RTUFFNTE5OUlJRUEtPUExOUlNUUlNRUFNUU1FLT1JQUU9PUVNYVlZU
-T05PUVJSUE5OS05QTE5NT1FPUk9LSUdKSklLS0xMTFJOT1JLUFFTVldaVldWWVVQ
-TUlIRUE7Ojg0MzU2NDAzNjUyNDU3NjQxMzY1ODQwMDA1NzY5Pjs5ODc4Nzc5ODc1
-PDk7Ojk7PD08Ojo8PT5CQDxCRkE/PT5DQ0JIQkVEQUJDQUBBQ0FEQkdLTE1QUEtI
-SElISUpLSkxSU1JQVFBOTlJUVFBST09NTUhLTk9OUlNRUU1KTlBNT09NUVNXWFZZ
-VlJUVFFQT09OUU5OUFVWUU9LSERHSkhEQEBFQD08OTo5ODk2ODQzMTEzMjg1NDk5
-NzQ0MzM2NS4xMjQyMTEzNTM3QkxUUlRVVVVXV1hSU1dcXmFjX1hUWVZVWFhaVlVU
-VVRUUExKSFJVWVJQTlJWVVBNTU5MR0dHRURDQUNDQktKTUlFQUBBPDk6Ozs5NTU3
-MjI5NjYyMzMwNDY1MzY2NDQ1NDIzNzw9PD48Nzo4NTU4O0BES01MSktPUlJTVlpc
-WFNQS0xSTkdFRERHS0xQTUlOTUxJRkNMT1ZcYWVpam1ucXV4enyBhIGFiYiJio2M
-jo+Pi4yPkY+Ni4uNioiFgHp8d2pkXllRTEpabXVuW1RhdoSMhXZxdYGEgW1hYWd0
-cWdzcV1XZWxiZ3l6Zl1teW5bWGxzZFBdaWFcbIOMg3Z6kqOqpJmWmJ6fmYyEjJeZ
-jX+Dj5qcjYSDjZqai3l2hJWYjX+CkqClnpKMmKWso5mVn6yspJaRkZiekIGEkJ2e
-koSLnKqpm5qorKqYgG5vfYR/bmRia2lnYVxeXlxaW1hZVlhWVVhVVVJTVlZSU1VU
-VlVUVFVVVFVSV1ZVWFRSUVNUV1ZYWVhXWlpYWFlZWl1aWVpbXV5eXmBfYWRrbW9w
-c3Z5fH+AhYaEhYuRkpCPk5WZmpmampyhoqWkqaqmqaaprK6sra6uraysraqrrKqs
-qaasrq6rr66tpqWnraaTj5Wiq6afl5ykrLGwppCGjp+qsqqWhY+ps7CgipCnsqyp
-p6mml3FdWVZRUlJST05OT1FRT05KSEtNS0xPTU5NSk1MTU1NTEpNTU1PTU5OTU5P
-TEtLTk9PTlBQUU5LS0xMT0pLTlJOT1JOTU1SU1VYXmFjZ25zdnyBhoaIjpKTkpST
-lZqenZydm52al5aSkY+PjZCNh4KCgoB8eHRxbGpqaGJfYGBgX11dXVteYF5eW19h
-X1xeXF1eYGBeX2FlYmJiZGJeYF9dXV1gXF1gXllYXF5cW1tdXF5aVVhZVFZXWFpV
-XIq5x9LZ3eHj5ejo6utKTUpGREBDR0NGSERHPkFDQj0/PTo4PkE9Ozw+PT48QUBB
-QUE9O0NDPjg6Ozs8OT48O0A9OT07Ojw4PTw9PT0+PDw7PUA+PkJBQj87PUBAQj1C
-Q0RDREZFQ0NDRURDRUdKRkRCRkZIRkdGR0VGREhKRkNERUZKSElKS0dHRUZDRUNF
-REVIRkxFRUxLSktJSUxSUUpMTkpMUFBNTk1MS05QUFJLSUlJTEtSVFNTTU5OTEtN
-TE5SU1JMUE1PT1BRU1RUUlBPUVRXV1dRTk5SU1JQT1BQT1BRUFBNUU9RT0tNS0tJ
-Sk1MS1FPUlJRU1NRUVRVUlVSU1FTUlFPTkpKRUM9PTk3NzM1NjM0NTQ0NDM2NTQ1
-NDEyMzUzNDUzNTo2ODc6PTU6Ozs3NDg3OTs6Ojk6PDw8Ozw7OTo9QUBEQ0A9PD9D
-R0ZGR0tDQkBDQUJCQkFESlBQUVRUTktIS0hJSUpNUFFUVVJQSktTUldVUUxLSktQ
-Tk1RUE9PU1FQU01MTExMTk9QTVBNUFRVUVFUU1BNT01PT1NQS01QT0xGRklISUVD
-QkVBPj89NjU1Nzo6NjMyMjMxNjs3PD1IUD0xMzQwNTYuMjQyMTIzNzhAQktOUlVS
-U1RTVVRWW15fX19cVlZYVlRTV1ZTVFJSVFVTTkxLUk9MS01MUU5STlBMTktJSEdF
-RkZBR0ZBRUM/QUFAQEU8Nzo5Nzc0Njc3MzM3Nzc2NDMzMzUzMjY2NDUzNT9AQUBA
-PTs8ODg5OTo5P0hMTUtKS09VVFRXWVdUVFJNTE1MSUNFR01QTE5IR0tJSUZDQ0pP
-V1xgZGZsb3B0dnp8fIGEhYiJiYuKi4uLjI2Pj4+QkI+Njo6Khn+AfnpzaV9WUU5P
-Vm13dGNTWGN2enhoXWRxf4F1ZF5damlgZ25jU1lnY1ptf3ljY3N7a1pbbnBgUl1s
-YFprgYl5bHSKmp6XjZCXo6aai4mRm5uQgIOSm52Oi5KdpqSVhIGLl5eLd3mGlJyV
-hIGQoKylm5Sdqq6qnpmfqKefioGNmZyShIOTo6GUjJmnpJaAdHN9h4BxYF5kZWNd
-WlpZV1ZXV1ZWVlNTU1NRUVNTVFVRUVFTVFVVVFJTWFdVVFNSVFRUVVdWVlZVVVhY
-VFRVVVRXV1NVV1ZYV1dZXF5aXGFiYGZnbG5xc3d7e36Gg4eLjY2MkJKRk5STlpib
-n6iio6OjoqKmqaaqq66tr6+trrCwsK+vq6utrq6traiinqSoo5iXpKehk4mPm6y0
-trWnlo2Tn6uyqZiLjqCqrJ6JkKayp5WgqaWQblxVVVRRUlJTUU9OTU5NS0tISktO
-T05PTU9OTE5PTU5NTU9MS0tPTlFOTU9TUlBNT1BQUE5RVFBQUFFPT05PUFhUVVZV
-UVJYWmBkbHJ3e3+ChomMj4+RlJiYlZmam5mcmJeTmJWRjoyLjYuLg31/fXp2cnBs
-aWdjYWJiYV9gX15eYF9eXFtcW15bY11dX15eYF5dXl1bXV9cXl5eYF9gZWJcYGBe
-XFxfW1tZW11bWVlYW1pXWVZVVFVUUlZchbnH0djd4OPm6Onp6kpLUEtIRT9DRUlJ
-RENCQUBBOTo/Rz1BQ0VCQkI8Ojs2Ojk5PDxBQkE9Ojs+PTpAQDw4Oj48Pz47Oj0+
-QDw4OT0+QD0+PD9BQkJAPT49QUJCQEBCRUhDQkVER0NEQUFAQkJEQ0JFRUFFREZH
-RUlIR0tKRUJGR0dISEhISEhISEVGREVEQkVESEhJS01KR0VHSktNTE1LTk5LTE1L
-S0pJTVBTTk1OUFBNS05WVVNSTU9OS0pHTE5QUk9PT0tNTFVaVlRUUFNUVVRVWVJP
-UVNSUlRSUVNWVlJOUFBUU1JRSk5OS0tJSU1NU1BPU1FTV1lYTk9OT1FVUVFTU0pO
-TkhFQUc2Nzg1NDEzMzM1NjY1NjQ0Njc0NDU0Njc0MzU2Nzc1NTI3ODk4Nzk3NzY3
-Nzo4NDk5PTs/PT08PD1APj49PUA+PT4/QURFR0hEQ0hGRUhDQ0ZLUE9NTE9MS0VI
-SUhJSUxNTktNSkpLTVFPUFJPSUZLS09NUFBOTUpNUVJST09KSElMTE5OTU5MUFVY
-UlZST1BQUFBQUFBLSExPT0lHSUxOREJEQj87OTk6NjU2NzYyNTY1OTU0Ly8vLUlD
-SjYzMjY2NjYzMjYzMTQ0NTc2PkZMUFFLUFJUUldbYF1hYVpVVlxZVFlbWFhVVFVU
-Uk5ISElSUlBMTE1OTEhHTkxNTE5IQkRIR0VFRkdFQkM/Pj5BPjo8Pjo2NDQ0NTQ0
-NjY3ODY3NDY1NDY2NDM0NDY2Ozo+Pjw3Nzc6OTk2OD5FRUhKRUhJSVFVW1hZVFZV
-U01KUUtFRURGSUxOSUZISUlGREZESE9YXmNmaGtrbnF3foF/gYGEhoeJioqLi4yK
-jY+SkpGSk5CPj4qCgYB9dWtlXVZSUlZofoR6YFZeb3p8bFdSWmx2cGFcXmVqZWNz
-bl1YYmhiZXuLhXV3g31qVmJ0cV9cbHNlX3WKinZtfpKblYl5e42enpKFgo+YlIZ4
-fpKZlYqFkaGsqpyTkJeenI57eoyWnJWAdoOYpaOSipSjqaWbmaCtsKealJmjpqGO
-iZKgnoyBiZqakIN8foiKh3ZoYGRkXllYWVdVVFVSU1RUVFBTVFJRUlJTUVFSUlRV
-UlJRT1NTTlJSUlJQVlhUUlRVU1JTVVZUVVRWVFNUVlVWWFhaV1daWVhWV1haXF9k
-Zmlqa2xwc3d+f4GEhYmLjY6MjJGSlJaVmpydnp6fn5+ipaqoqaupqq2sra2tsbOs
-qaerq6yooZuWmp+eoauvq5uGgIiarLKwqqKbn6OjpaahoJ6hoKCfnZ6ep6eej5qq
-pohlVlJOT1FRU1JQT1JPTE1KS0pMUE5OTVBPTE1QTk9OUFFPTlJNTU5PT01MTVBR
-UVBNUUxNT1JUVFFRUVFRUVJUVFNTWFlZWmBnbXF4f4GFhoqPkJGUlZaYlpeVl5ua
-ko+Qko6RkIyJhoSCgH93dHFva2dkY2JkX1peX15fXV5eWlxcXV1cWltZWFlaWltc
-Xl9gX2BfXVxaW11fXF1dX11eX2BeXV9eXmFhXl5cW1tXWFdWW1pVU1NUVFNTVFaA
-t8fQ193h4+bo6OnqSkxKSkpERUFFREJBRUBAPENGRkJEQkNCQUA8OT09PkM8Pj48
-PztAQj87Pj07ODw+Pj87PTw/PTw/Pjs+PDs4PT08PkBAQUE+Q0FAPDw6PT49QUNC
-RUZFQ0NDQUI/QEBBQUJBQEBDQkZIRUVGRkpKS0hFQ0RFRUZFQ0ZFRkhFREdGR0lI
-RkpISEhMWUtKRUlGR0tMSk5QTVBQUU5NS0pMS01OUVJUTU9NTVBTT09PUE5LSUlL
-T1BUU09QS0lOT1NSUlRSVFRUVVdVVVRTUlRVWFZTVVhZUk9UVFdWUlBJSkpLUk9N
-T1BVUEpQUlFVWl5bVlFPTlNTUFBTTExNRURARDs5Nzk3NDM1NDQyNDQ1Nzg7NzMz
-NDc3NTY2Njo3Njg3Njc5MzU7OTo3NTU3Nzo8Pjw8Ojw8Pj48PkE9PDo9QEBAPT1D
-QEJERUdDR0xHREJDRkdMTExMTExMTk5PTExJSk5OTUtKTEtKSlBQT05NSEpNT09T
-UU9LS01TVU5KTE1OT0tLTVBRUVJRVlRQUE9RU1NRTU9RTkpLT1BRT0pMTEpIREZJ
-Rz89PDk5NTc3Nzg8ODg2NjMyMTM0NTMxMTY0NDQyNjAyMTAyNDM1NjY5PEZTSUxR
-VVdUVVddXl5cWFlZXFlUWFxdXl5bWVdVUkpJSlBQVVJLSEtNSkdNTEtJSklMSktH
-RURERUVEQj88Oj1AOzo8PzU0NDQ1NDc2NTI3NzU4NDc2NTQzNTI2NTY0NTc5OTc5
-ODk3NDQ6P0VHSUhIR0lLT1ZdXVhVUlJOSUtLSEhJSEhLR0ZJREVHSEZGREhKTlZd
-YGVnam9zdnl5fH2BgoOHhIqIhomLjIyNkI2MjJCOkJKJioeCg4J6cGRaVFRUYHGH
-iXtrZ3F9gXheUFNcbGxiWV1odnhrdXxwZG52cmVuho2DeoGNhXNianp2ZGR0d2tw
-hZaTfnuIl5mTf3F2iJWQf3JyhYyHdGl0h4+LfnmJnqalm5WYo6emm4eNnKGilYF4
-gpWdm4p/iJqim46NnKurpp+Zoqyup5uXnqagk4WFkZWKfnyBi46GdmtkY2VdV1ZW
-VFVWUVJUUVJRUFRSV09PVFBQUVJUUFBPTlBQT1JTT1FQUFRVUlFQT1BUUFJSUlRU
-WGFXVFhVV1dVVlVXWFlXV1ZTVVVUWFtfX2FgZGZqbG9xdnd6fICChYiIioyNjZKS
-lpmZmZycmZ2eoKKlpKeoqa2xsK2rq6OfnaOmpqCWk5OXl5inta+qlYiIkZ+iop2X
-l6CqpqWaj5Sgqammmo6Woqqmm5eUnaWeiGRYWFlXU09SUlFPUE5NTUlKTkpMTU1O
-TU5PT1FQT01MTE9PTk1NT05PUU9QTk1PTUxJTkxPUE9RUFBMUFRQUVFUVllaX2Nm
-a3N4fYKGiouPlJSUlpeal5qZmZeXlpSQkIyNjYmHhX56eXl2cG1qaGVjY19fW19f
-Wl5eXVxZW2BhXVlaWFhYVldWVFdaW11dWlxeXl1aWl1bXF5dXFpbXFxcXF1bXF9d
-X2BfXlxfYlxYWFhYWFdWV1VUU1NXX4i1xtDY3ODj5ufo6epMS0dEQ0RGR0NDQz5C
-QUNDR0dHRUREQTw/PT9BQD08RUNDQT0+QTo7OjtBQjs+Pzs/Pjs7Pjs5PT88PTo8
-PEA8PTw/QT08QDw+QT09QkA+Pz9BQUFFREVFRUNAQkRCQEBDRD9AQ0NCQkFERENF
-SUlISkdFRUVCQ0dDRkdFR0VJSUZISEhLTEpKSUxPS0pKSUZJSkxNT1BRUk9PTktL
-SklLTlNTUU9MTU1MTlFSUUxNTUlNUE9RUVJQTk5NSkxLUVBNVFVRU1VWUlNUVFlY
-W1taV1VXVldYVFpXVVVQTkpMTEpOUFBQUFNTTk5MS0tRXFlWT09RVVJSVFRSS01K
-RUNBOjU2Nzk2NDU3NzY4NzU1NjYzNjY2NjYzNjg1NTg2NjQ0NTY3NDU4OTo6ODY5
-Oz08PT08Ojk6Ojg9QUBAPz1BQkE9QT09Q0ZEQ0ZISkhBQkNGTU5NUE5LSklMTk1N
-S0xLS01RUE9RUlRTU1JQUEtKSkxMS1RVVEtJT1FUTkxKSU1KS01OUlJUV1dUUVBO
-UE9PS01MS09VTlJRU09KTU5MSUxLSUlIRUJBQDw6OTk6Nzc2MjQ1OTQ1Njk4NDEx
-LzEwMTM1NjI0OTUzNjc3OzY4PEpMTk9WWltVV1tfX15hWllXWFVZWmFiYmNiWlRS
-S0pGS1BSU01ISUpMSEpJSEZLSU5MUVBIQ0BERkNAPD4+QTw4NDM0Ojk1NTE0NzY4
-OTY2MjY1NTYzMjMyNDU1NTY1NDU1OTg7Ozk5OjxAQ0dJSkxIR0xVWV5fW1RRT05T
-SUZITEtHRkZGSEpLSUlGR0I+RUxPVFpkZGhsb3Nydnp5en1/g4aIiIeGiIyMio6P
-joyOkY+OjY+Jh4SCf3htY1tVU1Rjd4WGd3F5iJCMeGNcW2l0amBhaHeCdXaCfW1u
-e35vYnKHh313hZCGcmdygHhlaXl6bXWMmZWHiZOfnJZ+c32MlY52a3GCioBuZXSD
-ioJyb32SnpqNjJSjqaiakpelrq6fjYOLnaCciYCKm6GYh4CMoKOgmZidra+so6Gl
-rq6ejYmQkol3dHqGjYZ2aGNlZGBWVVZTVFJSVFJPUVVUUFJVUFBSUlJRUVFPTk5P
-UFFRUFFPT09SUlJPTlBTUlJVUlRVU1VUVFRWVlVVUlJSU1NVUlRVVFVWVVRSVVlX
-Vl1eX2JkZmpuc3R1dnl+gYGCg4aIjIyOkY+RlpiamJ+fn56fnqClp6eqqaGfoaCh
-oqKclY2NkI+KlqezsaWdl5qfoZ6TiYSJkJOZmJCGhpafop2Pg4mbqKSSipugm5F+
-YlhVVVVSTk1LTk9QTk1OUU1MTUlPTEhITExLTU1RT09NT05OTU1NT09PUFBRT09R
-T01NTUxNUVFSUlJSUVFTVlhdX2FpcHZ5foKIi42Sk5SXmJiWlpiZmZaUlZSTko+O
-i4iFgHx7dHJubWtnZGNiX15cW1tcX1tbW1tbWllbWVlaV1RTVVVVVldYWlhYXVpa
-XV5fX19fXltbXVtaWllZW1xbYV5eX2BgXFtaW1taW1paWlpaWldUV1dYWllfhbbH
-0Nnd4eLl6Onq6klMT01DQ0FBR0RCPz5AQkZFR0RDQkI8PDo/Pz8+Pz89P0A/PkA+
-Pz4+OTtAPz49Oj9DPTs9Ojg6PEE+QD5AREA8PT89Pjs7Pj8/PT1CRUQ+P0JBRERG
-RENDQkJBQ0BBQkVDQ0NDREdFQUNER0ZJSUlJSklJRUJDRUlGRkVFR0lKR0ZIS0tM
-R0lKSkxNUk1LSkpMTk1NTk9OS0pLSEpJSElMU09NTElHSkxOTkpMSkxMSkxQTkxL
-S01NTUtLTUxRU1BTUk9RUVdaWFhWW1hdXVVTUlhSUVNTVlNPUk5PTEdHR0tMSU1N
-S09MTUxNUFNWVU5LT1BWVlRSUE1KRUVDQ0A6ODU1NzY1NTIzNzU1ODg1NTU3NzEz
-NTYzMTM0NTk3NDQ1Njk3ODY5RDw5Nz89OTo7PjkxNjc5Njs7PEJDQj48P0BAOz5A
-Q0VGREVHRkNEREhNTU9SU05KS0lKS0tLT1NPTVJSU1JUV1hYVU9MS0pKSkpLUVNU
-TUxQVVVTTkxMTk1OTlBMUFZWU1JSUVBPUFFNSkxNUU9OVVVSTEdHT09OSU1JTUlG
-REFEPzo5Nzc1NTYzNDM6ODY0MjM2NjY3NTE2NDQ1MzU9NzY2Nzs2MzQ0PUlMUVha
-WVVXWVtdXVtbXlpXV1leY2ZlZWJcWVNOS0dNUV1YTklHSkxHR0dJTkpHS01QTUhF
-RkpMQkFAPkA/Ozc4NjI4ODY1NjQ2MjMwMjQ0Nj04NDQ0NDY4Njc1NDU5NTM3OkA9
-Ozg3NjpARElJSU9RVVZYWltbWVFPUFJGR01PS0VGSU9QT0pHSUtGQkJGTVBVXGFk
-aW1tbHB1d3h8fX9/gIODh4mMi42OkI6RkY+Oj42Li4mIh4N7dG5kXFdUUl9wdXJu
-dYWQlo5+bnB3fHdpY2VufnpteH5uZ3R/eGJXan99aWuAioNtZ3Z/dmJse3ZqcYua
-j4WPnqakm4uDj5eajXVseYiJfWlpe4+LfWxoeo6TjX12hZigmoqKlqSqqqGSj5mm
-raOUipCfopiJgImboJeKhpOkqaqgn6musKaWlJmaj3pwcn6Hg3JkX19cWVZTVVVQ
-T1FQT1JOUVBRUFJSU1NRUVBPT1BPT09PTk9QUU9PUlNRUVJPUFJRU1JTU1FTUVBP
-UVRVVFNST1NRU1VUU1FRU1RUU1JUVlpZWVtaXF1gYGNoZ2lrbnN2e3t+gISCgYSH
-iYmNkpSTmJyampecnJ6enZ6cm6Opra2lm5iRjpCRkJGbo6ainpuepKWckYN/goGC
-hYqJhoiJjpOXlI6Ii5SbnY+FlJ+XjHpcVFVWVFJUT05NS01NTExQTE1MSkZGRklL
-Tk5NT05PT01OTk5NTEpMTk9QTk9QUExUUExOUVFPUFBSUVFZV1tdYGZscnZ8f4OG
-iY6RkZKWl5aZlpiamZiZlY6PjIyLioaBfnx6dXNtaGRjYmBdXlxeXltZWVlZW1tZ
-WFZbV1dVVFNWVVNTVlhXV1pYWlpaXmBaXFxbW19cXl9bXFxbW1tcXVtdWlpbXF5e
-XlxaWVpbW1hZWlpWVlRYVlJaVFd/s8jS2N7h4+bn6OrqSkdHRUZFQ0NFP0FBPj9E
-RUdEQz5EQkZFQ0E+Pj0+Qz8+PkBAPj9CRD06Nzg/Ozg9OTw8PT9APTs5QEJAQD0/
-Pjo8PD1BQT48QEFAOz0+P0BBQEBARUdDRUNEREQ+QEJDRUZHRkVFRkZEQ0hERUdJ
-SUtKSkpKRkdERUVIRUZFSEZFRklKSklGSEhISkpLS0ZHRUZJS05RTk9OS0pISkpJ
-TEtOTklHSUZJS09NSkdKS0xMSkxJR0pNUVBOS0xKR0xUUVBRVlJQVVdWV1ZbWFhZ
-VVRRUVVOUVJXWU9OT05MREVIS0xKTEhJTktKTU1QUFJSUlNSVlZXUlBJSk5JRUJC
-Pjs4NzQzNjczNDAyMjU2NzIzMzUzNjQyMjQxMjM2NTU0Njc4NzU4ODY6Qjs5Ozs8
-Ozs8OTk2Nzk7Pjw+PTw8PD4/Pj48Pj9AQUNAP0BESUlISUpMTFRXUU5KRkZIR0dO
-UlJOS0tQV1VVVVhWUlJOSE1MSktOT0xJSk9SUlVWVVNQTE5QU1hYV1hRUFBRUU9P
-UU5MT05SUVJYVldPSUVLTk5MTkxNSUhMRD49Ojc6ODc5NjMyNTI2NDIxMDI1NTQ3
-Mi8yMjY4NTMyMzY2Qzs0Mzg8Q0pRWFpXUlBUV1pbWFxgX1pbXV9gZGNnZV9cUk1P
-TUxOWFdQTUlITk5HRExOS0hISktHSkpMS01HREM8Pj0/OTc5Ojk3NDI2MzQyMjMx
-MTQ0NTU2NjQzMzY0ODEzNjc1MzU5Oz02NDY7QEJDRUhLTlJWXFtVWVlbVFJTUUpM
-T05MTUhLTlFPS0pLTU1NRkdIT1VYX2Vpam1ycnN2enyBg4GFg4OHiIqLkJCPjo6P
-jo+NjYqLiYeGf3xyaF5ZVlNUWWViWFxvg4+SiHZyeYeJe2llYnB5amR2a1tjdH1u
-WVNicWtdYnl9b15bb3lsWWNzbmNqhI2Ae4ugqqeckJWgpaOThXyIlZGFdHWGjpCD
-cHGBk5iHdW96i5KKfXuKmqGimZGZo6muqZmUnKqsn4+Ejp2kmYR8hZyjoJKUoa2s
-opWTnqCYgnBqcnl0ZV9YV1ZUU1NTU1RTUVJPUFJPUVBQUFBRT1RTU09OUFBSTk1N
-UFBNUFBQTk5PUFNQUE5RUk9RU1NTUVFPUVJTUVFPUFFRUlVUU1ZSUVNTVFZVVVNR
-VFVYVllaXGBjY2JmbG5vcnd5enx8fH6Di4iKjI+QkI+Qk5eampiWl5acn6auqqeh
-nJmcnJeVl5+cmJCPkJaXlZCHhYSFhIWJiIWFh4iLjI+OkI6NkJSVjoiMlZKNe15W
-VVZYVVBPTk9PTEtNT09OTEtKSU1MTUxOTU1MTk5OTk9QUU9KTE1QTE5PUU9RVFJW
-VFBRU1NRU1JVVFhdYWlwc3d7goaKjJGTk5OTlZeWlpmcmJeYlJKSkI2IgoGBfXdz
-cGxqaGZgYltcXVxbWltaWllWVVZXWVpaWVZXWFNVVlRYUVNUV1hYWFpcXltcXGBd
-Wl5dWltZWVlaWllaW1tcXFteXFxdXF1bWllcW1paV1ZYVVRUUldZVlRQV4Cyx9LZ
-3OHj5efo6elISkpER0dHSEZGSEQ/PkRGR0JCQ0BCREVCQUA/Qjo5PENCQT4+PDxA
-QDo8Ojo6Pj08QD0+Oj9APDs/Pzw8Pjs+Pzw+PT5BPjw9QD89PD49QkE/QEdGREJF
-RUNCQ0NDQENGRkhLR0dFR0tIRUNCQ0hMS0pJSElIR0dHRURHRUhFR0dHRUdHRkVF
-RUVIR0dGRkVHRkZFSExOUE9LSUpLTEpMSUlKR0dFRktOTkxISktJSUdHSUhJR0dR
-T0tMTExLS1BPTk9RVVdSUlFSU1RTUlNTU1FRWVFVWFlaU09SUU9IRkhJTExPUE1P
-Tk1MS1NPUVNUWldaVlZUTktLR0RBQkE9OjY1NTYzNDE1ODk2MzMzMy80MjY0NTQ1
-MTQ0MTQ0NjU3Njc0ODg2NTQ4OTs4Nzo9Ojo6PDk4Nzc8PT1AQD49PT5DRD5AQURD
-Q0BAQkZLSktLUlJMT1BNT0lJTUpLR09UU1BLS09TVVhVVVNPTk1RUUtNTk9QTk1P
-UFJTVVZZWlRQUFNVV1dZV1FMT1BRT05MTU1QU1FSW15YVk5KSEtNTUtMTFBNR0Y9
-Pz1APT07PTo2NTc1NjQzMzIzLzQ0MzIyMzM0NjUyNDAuLzU6NjMxMzY5RUtTVVVO
-T1BTVVZZXWNkXmFiYGFiYGRkYV1TTUxKTk9RVk5LSUVKTEZGRklCQ0ZLTEdNTkxM
-T0xFPjw8Qj05Ojk4NzkzMDIzMTIzNDM2NzMzNjQ2NTY2NjY1Njc1NDQzMTU8PTg2
-OT0+SEdISEtQU1RUVVZWV1ZUUlNNS0tRUlBOTktLUFBJSEtPTUVAQkdQV11eYmRs
-cHR1dnl8f4CDgYWEhoqKio+QjoyNjpGNjo+NjIuKiIWDfHVpYFhWUlpgZ1tPUWV5
-goFxanJ+h4JzZ1xhbWpibHFhXG13b15RWGxyYVdldXZlVVZpdGJVYW1nYHB/hXRs
-f5WfnI+JkqKpqJuNipaenY9+hpOanIl6gY+bmo13cXuJj4p1bYCPmpWNgombo6ei
-l5ajrK+omJCZp6qfioKLm6SZhoWToKKWio6bnpeEc2xtbmddVlVWUlJRUk9QUVJT
-VVNQTE1OT05NUU9UUVFRUFBOT01OUFNRU1RSUE9OTk1PUFBPUk1QUlJUVFNRUU5P
-UlRSTk1PT1BSVFRWVlRTU1NUU1RTU1VTVFVSU1dZW1tcW15iZ2tsbW5wcnh4ent/
-gYKGhoiJjoyOlJWUl5SWlpeboqCjoaCho6SgmpeWn5qUkIyOioiKioiFhoaGg4OH
-hoSFhoiKiYqLiImNj42OjY2KiI18YVRSUlFPT1JSUlBOSkpPT0xLSUpMSE1KTU1O
-TUpNSktQUE1OUlFQTU1NUFBSUE9STlBPTVFPUVJUV1hgaWtvdHt/g4aLjpGTlZOV
-lZaXm5qYnJuXlpORjYyNhIB8dnNybmpmYWBfXF9gX1xcWVhYV1hXWVdTVFRTUlNT
-U1ZVVVZWVFNSVVhZV1paYlteX11gX15aWlxdWllZWFlXV1hZWVpYWlxbW1tbWlpZ
-WVpaWFdXWFdaWFdSVFNYWlZbg7DF0djd4OPl6Ojq6ktMSUpLSUhHQkZJSEVFQUNE
-QUJFQT5DQEhFR0FAQD4/QEFAPT45OTw9QTw7ODg4Oj07RDxDQkI+QD89Qjw6Ozk6
-PTk8Pzs7PDxBQkJAPT89P0FBQUNEQkRFRkdHRUBAQUBDSExMSUlHRkhHSEZGR0pL
-S0VFR0VHR0pLSUVHS0tKS0dHR0VGRUZFQ0dHRkRFR0NDRkVFSUtNTEhHSEhIRkhJ
-R0hIR0ZHS0hITEhISklHRUdHSUpKSExMRUlLUFRRVFVNTk9TVFJTU1JWUFNSUVJQ
-UlBUVVVZV1lQUFRTUExLTEhOTU5ST09PTE1OUVFUWlteWlRZW1ZRS0xJRj9BQTo2
-NTc4NDU3NjQzNDo3MzIzNzU2MjU1NDczMzMyNDU3NzM0MzU3OTs4Nzk4OTk4PDw3
-ODg5OTk8Ojs7P0E9Ojw+QERGRkJBQ0RDQkRGSEtNT01SUk9NTElHRkdOS0xMTlBN
-Tk1NTlRVVU1LSUlJS05PTk9KTE5PUFJUVFNTWlhYVlJRUFVXXFtXU01LUE5NT1BS
-V1dTVVNYXVhTT05IS01NSkhISktFRkFBQj4+PkA7PDg4Nzk1NzY2NzY2NTExNjgz
-MjAvMzM0ODIwLjEyMjQ1NTY5QUxNTU5PT1RWVFVZXmJjZGVkXVteYV5dW1dOTkxQ
-TktMTUtGRkhERUdKSkNIR0dGRUpLTktLSUE8Oz9EPzo4NzM0NTc2NDYyMjMxMjE0
-MjQzNTg7NzQzNDU6NDY2My83NTk3OTY1NUVIRUhIS0xQVFVYVVVUUlFOU1BMSlFQ
-SkpPUExTVUtFSklJRUFDSU9UXWJkZmtrcHN1dXl9foGBg4SMiomKi4yOkI+PkI6R
-jpCMiomHg3x4dGxjW1JRW2djUk5ZbHl6bVdTYnl+eW1kXWluZGlyZVhneHZnXV1n
-fHxqXml7dmNWXm5wXFVmbWFccIB7Z2V6kJWNf3uKm6Wdk42QnaGdjoGHmJ6ZjYSK
-mqOhkoJ+iI+ShXZ0g5CWkoB0eo+do5yTkJ2ssaealJ6vsKOVj5KgopqGgY+ZmIh+
-hJGYj31waGlqY1pYVFFQUVFSUFBRUE9PVFBOUVBQUFBSTUxPTk9PTk5PTk9SUE5R
-VFJPTVJOT0xPT0xOUE9QT1FRUlNPTktNUVFPT1FRT0xPT1FQUlJTUlNUU1FTUlJQ
-U1RSVFVYWFdaWVpcYWRjZWdvbGxtcHJ2fICAgoaFhoiLjY+Qj5KWmpqcmZucnJ6d
-m5iXk5KVk5GQko6LioyIioiHhoSDg4ODhIWIiomLiI2MjIqLjI2LjYqChHlgU1NP
-TktPT1FPTU1OUU5NSUdMTUxQUEtMTFBNS01OT1BOT09RUk9OTk9QUlFTUU9PTU5N
-T1BSVlpeYmhveHt/hImNkJSSlZaXl5SSlJWZmpqbmpKQjoqFg4F9d3RybWhmYl5f
-W1tdXVxaXFhaWFZZWFRVU1BQUVVXWFdXVlZXV1dYWFtbWlpYWFlaXV1dXl5cX2Bc
-W1hXV1ZYWVhbXFxaWVxbXV1aW1hbXV1aW1pbW1xXVllXVVFRUlRVVV2DsMfQ2N3g
-4+bn5+nqRkpNTE5IRklNQEVNR0dDQEE+QUFAPT09QUJGPj1CQz9BPD0+QEI+Pj09
-Qjs8PDs9OTk5Ozw5PkI9PDw9Ozk3OTg5ODw7O0FEQkJFQkFCQDw7QkFDQkJBQkNF
-SERDREdFRUhGR0lKR0dGSEZFQUNFSkpHRklEREVISktISEdKS01MSUhDREVGQkVF
-REdHR0ZJRkVCP0RIRkdJRkZERkpFRUdJS0lIR0dJSEVCSUtLS0lFQkZISUdISk9J
-SElOUFVWV1JRTlFXU1RVUlFSUU5WUFBUUlBUVVtUUlJPUVJWUk1NTU5SVFNTUlNM
-T05SUVRbXF5XU1dbWFFLRUpMQD1FPzk0NDYzMDI0NTY2NDM3Njc5NTEyNDQyMDIy
-Njg1NTY2NjQ1NTg4ODg7ODk3Njc7Ozc5Ojc3OTs8QT1AQD8/Pj5DP0JCPj0/P0E/
-Q0ZLS0lGSE9QVFVSSkVDQkhOUVBPTlFTTEtNTlRRTkxNTUxLSUxMT0tPUVNPUVVU
-VFFWVk5QT01OV1dTWVhUTUtLTlJTVFVZV09PUlJXWlZRTUhPUE5GRUdISkdGRUQ9
-OTk7Pj47Ozk4OTU0NDMzNDMyNjUyMC8vMDE0NTU2MzAyLjg0MzEzNTU6P0lNUE9P
-VVhVVVpgYmFiYmRdV1lZWlxeWE9LSVFMS0hFQURJSEdKTE9KSUhKSUhGSEtMR0M9
-Ozw8OztAOjg2NTU3NTE0MzQ1OEIzMzY2NDM2Mzk4NjY0NjY0NDUzNz01Nzg1Njk6
-PkJFRkhJT01NVFZbV1VTTUxQUk9NUFNMSlNTTlFOSklITUdCP0ZMT1hbXWNnaGpv
-dnl4eXt+fH+DhImNjIuHi4+Qjo2PkIyNjYuKiISEgXdsZV9XUVRkc21gXmZ2e29a
-TFBgbG5pal9fbW9odXtoZW2BeWdkaXyMinp2f4d+ZV9qd3FaV2puYGN2h3pmbISQ
-k4ZwcYOVmI5+eoiZnZSDe4SUm5KFgImapaCUiouUmpuUg32Kl5qQfnR4jZuckYOF
-lKarpJmXoK2yr6CXnqapn42FkJaRg3R0hYuBc21oaWZdVlRVU1BQUFFVUVNQUFFR
-U1NRTk9NT0pLUFBRVFNQUFFRU1FSUU5STk5QUVBQUU9NUE5OT1BPUE9RUUtMTU9O
-T1BPT1BPT1BRT01SUlNPUlRUVVJSU1BQU1NUVFRXWFdUVVdZWl5fY2RlZWZpbXBx
-dHZ5eXyAgoKFh4qLjZCTlZaWmJeZl5WVlpWTkpWVk5GPkY+Ni4+KiYmIhoeGhISD
-h4mLi4eIiYqLi4eJiouLhnhwc15WUlFPUVFRT05NT01JSE1NTUtMT09PTUxQUFBP
-TVBST01QUk5OTk1OT1FRU1FPUFJQU1dTVFVbYWlvdnt9hYiMjZKVlpiZmpmXl5eX
-lJWVlJGRjIuHg317d3NxamVhYF1cXl5aXltZV1hWVVZYVlNUVlNQUVJUVlVTU1ZT
-U1hVWFdXWVlZW1pZWFlcXFxeX19cYF5cWVhVVVhZV1dZV1dbXFpZWlxbWVhcXVpb
-XVxdW11aWl5cWVhVUlNVXIexxtDY3eHj5efo6OpMSUlLRktLSk1OS01HREVAPUFF
-RUQ+Ojw7Oz8/QUFAPDo8QD9DQ0M8QkBEQDw8OzlBPTw6QDs7Pjw9PDg5Oz09OzY6
-OTk6Q0A/PkFCQkRDQURBRURDRENDRENEREdDRUhIR0VFR0ZGRUlGRERDRUhISUxJ
-RkZER0dISkpIR0dJR0hGRUdHRkVCQkNCQUVHSENAQUBDQUhLSElHR0lFRkdDQ0VG
-R0ZJRkZISEZHR0lISEdCQkdJR0lKSklIR0pRTVNTUVFRUFRUVlhUU09NTU1SU1dV
-T09TWVVOT0tMTlNVUFBRUFZVU1BPUU1RUVFUWFxZVlRUUlFRT0hGRUdFPTw7OTg1
-NDMzMzE0NTM0NTU0NTc5NDc0NTMyNTg0MjU1ODU0NDU2NDc4Njc2Ozg3ODs6Ojo8
-OjY7PDxARURBQj5CSEJBQD49OztAPT1BRkpIRUVFREdNTk1LRkFDTE1OTktJUFJM
-SEdITU9TUVFPTkdKS1BPT1RVU09OUlNTTVBRTktOUlRRUVRXXFZOSElKT1JVU1JR
-T1BSUlZVUFFRUk9PTEdGREZFQUFDPj1HOz49PDs4ODc3Njg2NTM1MjAwMTI1NDQ0
-Mi8wMTMxMzE1NDIxLzE0NTQ7QEhNUVBUV1dbW2BjYVxgXVpZWFVaXGFdVlFPU1BP
-TEhETExMREZLTUlIRUhLS0hGSExHSkA9QkM+PDk5NzY4MzMzNTU1MzQ0MTc2NTY2
-NTMxNjQzMzY2ODY0MzMzNDQzNDY5NzY5QUZGSE1MSlFTVFZaVlRQTVNTUlNTUU5J
-SktMTlBPTlFPTkQ7QktVVVlfZGlnbXF1dnd6f35+g4SFiIyOioqNjY2RkJOPj5CO
-jIeIhIV/d25kW1hSV2t1b250foaBbVVPVmpsZmhraXJ7dHF8dWt6f4R5cWtyhpKN
-hYmTloh1cX6GeWZodnVjaYCOgG96jpiVgHB1iJeThXNwgZKUiHNzgY6Si3dwfpWf
-l5CJjZejopuOjJWio5mHeoCTnZ2Oe3qLoaObi4udrbOrn5qkqbCmlY+VnJaAcG52
-fXRnZmViXllVVFdZV1FPUFJTUlJPTU5NT09PUVBRU1BPTlNQT09QTU5RUlBQTkxR
-Tk1PT1NST0xNTU5QTk1QUE9PUVJNTU1PT1BNT1RPT1BQUlNQUU9NTU5QVVJSU1NU
-U1FQUFJTVVZWU1RXWVxbXl9eX2JmZ2lsbG1ydHh9gX6AhYqJjZCRkZWXk5WUk5WS
-kpaWlJOVk5OSkpKUj4+Oj4uOkIuMiYuHh4iJjYuJjImJiYmHiIZ7amZrXlVRUlJQ
-Tk9OTExMS0xLSk5PSk5OTVBPUExOS0tMS05NTk5QUE9PT09OUE9TU1JRUlNVV1pd
-YGhweHt/hYmNjI6Ul5mXnZqZmJWWmJSUk46Ni4eFgXt3dHJtZ2dmYFtbWltbW1lY
-VlhWVVRWVVJSU1RTUVFSUVBQT1JRUlNUUlZWV1pZWVpbXV5eXFxeYF9cX15cW1hY
-WFhZVVZZWFdZWltbW11dXV1bW1tYWVdaW1tbXFxZX19dV1ZVWVpairPFzdbc4OPm
-5ujp6khMT0lNSklSUFBMRkNERUREQEI+QT88PEE+QkFAQUI9QEA9Q0BGREI/Oz1B
-QT08PT08Pj09PDw6PDo4Oj07P0JBPDg/QD87QUFCQkFCQ0NDQUJBRD9CQ0RCQUFB
-SEtJSUdJSUhER0dJRkhHRkhFSEtJR0lJSkdFREhKSUlGR0hIR0dEQ0VFRUVFRD89
-QUVCQUBARUdERUpKSklISUdGRUNDRUpHQkNEQ0dJS0hHRkZJRUFDR0pPSEVHSkpM
-S01MTE1OTlBRWFZVWVdWU1RWUlBSUFBPTU5TVFJSUk5OUVVSVVNRVVlZUlFMTU9T
-VFNSWl5eWFVTT1BOTktGQkJBQDw4ODMyMjQ2NDQ3NTg4OTQ1NjI1NDM3NjQ0NDY0
-NTk7NzM0Njc2NTc5Ozo7ODo3ODg7PTw9OT5BQT49Qz47PkJBPz1CREQ8PT9BQT1C
-RkRGRUVGR0tMS0REQ0hHSk5JRkRKTUxIR0RGS1JZWFZOTUtRT0xMTlFUTk1NTkxN
-TU5RTE5PT1BRVFZXU0xKSE1SVVVOS0xOUVFRVltUUlNTUk9JRUZEREI+QDw6PkI7
-PDw8Ojk6Nzc0NjQ2MzE0MzI0MzU0ODUzNDQ0MTAwMUM4MjI0NDI0MzU4P0ZJTlRZ
-WllaXV5aW1tYWlpaVlVfYmRcWFdXU1JMSkpQUE5GRUpOS0RFREtTSENCSUlIQTtB
-Qz87Ozw8PDU1MjQ2NjU0NDY5NjY0MS8vLzEzNDI0MzQ1MzMyMjQ1NjMxNTU2Nzg6
-PUFITk1NTlJSVFdZWFdSUlRZUU9RTk1KR0dITE9OTkxKREZFTVNXWl5kbGtwdXZ4
-en1+gICFg4KChYqNj5CQj5CSk5SRj4yKh4iDgn51bGJbVVFba3BrdIKPj4ZwYGFo
-cG9scnFwfX1ydnhpcIWLgXJwbHiOkYGAjZqYiXl5iIx+a3N+emxxjZKHdoeZnZWC
-eIGRmpKAbnKEkpGDdXCCkJOEbWh8kZmSgX2ImKKhloySoqiooI+IjJeioZB9fpGf
-n5OAfo6gqZ+Qkp6qq6eblJmemoZ0bnV1bV9dW1xbV1dUU1ZXU05OTk9QUE9OTlBO
-Tk9QUVBRTU5NUFFPUVJVUU9TVFRPUFJQUlJTU1JPTk9VTU1PTU5RUE5OUVBMS05Q
-UFBQT05PUE9QUU9NT05TUVFRVFNTUVJRUE5QUFJUVVBTUlNVVlRVWVpbXGFkZ2lo
-a21wcnd3e3p9hoSHh4qOkpCRjpKSkZKQk5STk5WXmJWXlpeUlJGPj46Pj46PioqJ
-iYuLjZCNjYuNi4WAeW5fYWxbUVJRUlFRUE5MTUtPUVJRTk9KS0xKTVBQTk5NTE5N
-S09QUFBPTk9OTU1OT1FSWVJUVlddYmpxdnyEhYmPkJOWlpmcmZqbmpuYlpSRko6N
-iIWDf3h1cW1samZiX11cWFhYW1hVV1VVVlZVV1JTUVRVVFRTUlBRUFNWUVJTVFVV
-VFdVV1hbW19hXl5eXV1eXlxcW1tbWllWVldYV1dVWFZYWVpbW1xcXVxcWlpYWl1a
-WllYWFpcWllbVldXWF2JtMTP1tzg4+Xn6OrqSElHSkdDQUxIR0NFQz86O0I7Oz8/
-Oz08Q0I9QENAQT08QD1BQj9BOzs6Ozs/PT06PTs8ODc8Ozs7Ozw7PTs5Ojo9Ozw9
-QURCQz49QURCQ0FAQENDQ0FBQUFBQkVORkhFQ0ZKSkVERUZGRUZGRkdHRkVGSEZI
-RUNERkpJR0RDRUZIRURDQ0JER0VDQ0A7QENAQEFAUEdFRUZHRkdHR0NEQ0JDREVD
-QkNCRUhHRENERUNGP0FER0lHREVGRUhGSUtMUExNUVJVV1BSVVZVUVVTTlBPT01N
-TE1PUVZYU1FRUVVVWFFUWF9TTU5MTVBOT1NWWVRVWFZQUVNUUk1HQjw6ODg2MzQ0
-MzQ1MTU4ODc1MTIyMjIzNjg2NDY0MzM1Nzo4NjY2NzQ3ODs5NzY5Njg4Ozs5Nzg5
-Oz9DQD8+OjlBQUA9O0BIR0FAQURAPkNCQkJERkhLRkhIR0lHSElMS0lJSkhLSk1J
-REVMT1JTU1BLUlZXUE1OT1RTT0tJS01KSktKS0lNT05SU1VTTUtKTlFVVFZNS09P
-UFNUVVZZVFNVUUpJSEhHRkE+PUBBREM/QENBPzk5NjQzMzIyNDIyMjQ2ODQzNDEx
-LS8yMjMzNTEyMzI0MTEzMzU5QEZNVFhXWltcWFhYV1teWl1bWl5gYV9dWFNKSUdJ
-Tk9JSEVPTlBPTEtGR1FIREVFQ0VAPz45PDw6OTs7Pjs2NDU4NTY2OTc4NDQ2NjMz
-MTU2NTU1NTQzNjUzMjUzLzEzMzQ2NTg9QkVHSUhKTlBWVVteXFRUVlROTk5QT0lE
-R0dNTk5LTE1FQ0ZKT1deX2RqbXN5enZ6en2Ah4SDgYOJiI2OjpGMkJOTlZGQi4uL
-h4SAenRqXVdRT1RdXmNygouNgGpncH1/dnR1a212cnF5b2Nzh4VzbGhneoiAbGyC
-jol2b32KhXJmdoN8aHOJjn1zh52gloaGk6GkloJ3fY6XkYJ1fIuXkoFubn6Qlox3
-cX6MmJaJho+ep6ailZCYo6ekmoyMmKGikH2AkpyilYWGkaSppJOTnaKbiXpzendm
-WllaV1lTVVRTU1FTUFBRUFBPTUxMS01NTE5QUlRTTlBPUE5SUVJOUFJST09PUVFS
-UlFQT05RUU5PTE1PTkxNSktPT1NOTU9QVFJRUFBNUE9NTkpPUFJRUlNRTk9RU1JQ
-T1JSUlFUUlJVVVFRUVJWV1hYWlpfX2FkY2RscnJyc3Z6fX1/gIOFioyNjo6QlZaT
-lJSXlpeVmJeWlJWRkZKRjo2OjIqLkI+MjIyMiYaGhoF+dWloZV1cYFhQUVFRUFBM
-TE1MS0xOUFBQT09MS01OT1FOT01PU09MTE9LTVBOT09PT1FQUlNSUlddYmlvcnmA
-hYuNjY+QlJmYl5qcnJqYmpiTkIyJhoSBeXZ2cG5qZWJgYF9eXFtaVlVWV1ZVVlVV
-UVVWVVNRUlVQUlFSUVBUVltaVlZXVllYV1lYVVdcX19fYF5eX1VaXVpXWlxZVlVW
-WFZUVVZYV1lXWVlZWlxeYVxaWVxbXF1ZV1daVldXV1pXVVZYW4W2xc/Z3eDj5Ofp
-6epJTk1HR0RKRUQ+QkJEQD0/QEI8PEFBO0FAQj4/QEJBPD87PUNCPzs+PUA5PkM/
-PD1DQjo+Pj9AQz88PT0+PDo7OzxAQD1CREFDQ0BAQT9EQT9AQ0NERENBQkJDQUJG
-RkVDQ0dIRUNDRUVFREdIRUZFQ0RFR0dKRkVFR0ZEQ0NERkZGQ0FEREVGR0VBP0RB
-Q0JCPUBHRj9DQz8+QENDQ0VCPD1BREJCRUZHSUdFQj9AQj9BQENAQUNCQUFERURG
-S0xLS0lMTE5TT1FSUVJSVlJQVVBTUFBQTVFUU1VOT01LTlNVTk1OTVFPTUtIS0tM
-TU1RVFBQUE1OUk5PTkVCPzw5ODo0MjI0MzQ2MzQ1NDMyNTMzNDY3NjY1NTM0NTk5
-OTk4Njc2Nzk8Ozs6NzYzMjY4OTc4OTg7PT0+Pz47OTo7Oz0/PT5CQj09QkRERkNH
-QkBDSklFRUZKSUxKSUtKSktLRkdJSktHRkdISE5ST0pSWlhTUU9PUFBMSk1KR0hL
-TFBQU01OTE1OUFJPT1BPU1daWFNRUE9NT1BSV1hSUlZTUFNPS0lLRUNDREVFQT4+
-Pjw9Ozg3NjQzMDEzMjY1NDQ0MzU0NTM0NDQzMzMzMTM1NTY0MjQ1NDY8QUpPUlVV
-VVVWVFZZWlhcW1pdW1hdXFpZU09QUU5LTUhIRktMT01KRUZLTEhDRUZDRUA6OTg+
-Qj86PD85Pj06NzUzNDY0MjQ1MzA0MzI0MzA0NDUzMzU0Njk3NTM2NDU1NTY2Njs8
-PUJERkhIS01TWlpXUVFUVFFJSEpLS0lGRklNTk5JRkA+RUxRVlldYWZscHR0enp1
-eYGCgYKJiImJioyPlJGRlJWSkY6RjYeDgXt1b2RZUlFRUFJOVmh+hoJxYmZ0f392
-cWtcZmljbW5eYXeCdGJoZGh5f21daoGFdV9ccH94YF5xeW1eZ36Bb2yDmJuMiIqc
-paWVgYGNmJ2XiIOLl56XhXd0hZWXh3RseIyRjH52g5iipJuPjpqlqKiakpihqaKV
-h4mVpaCTg3+NmJuSh4qWnJiFdHR3dGVbV1VVU1dVVFJVU1FQTk5OTU1LTk5NTk1M
-TUpNUE9RUVBPUVFQT1JRUFBPU09NUU9NSk9OUU5MSUhNS0tLTE1KSUlJS01NTVBS
-U1JOT1JSTlBOUE1QT1BQT09PUFJRUlFOUFFPUVJRUlBPUE9RUlhVVlVUVVdZWVxg
-YGFiZmZqbnN4d3d8fYGBgoSHjY+QmJeWlZqXlpOZl5aXlZWVlZKSkpOPjo6PkYuK
-iYuIhIB6dG5iW1xcXFlaVVNST09RUE9PTlBQUk5PTU1RUUxLTk9OUFFPUFFPUVFP
-TE5MT09OUFJQU1NVUlVbXmlucnd/hIqMkZCRkpKXmZmYl5aVl5WSkI2JhoN8d3Vx
-b2tpZWRfXVtbWVpdW1xZVFJSUlJTU1NUUlNTUlBRUlNVVVJSVlRVUlZWV1dcaFxb
-W1xZWFtbXFtdYmRiVUxYXV1bWVdYWFRXWFdWV1haWlxdXl5cXV1dXV5eXlxcWVdY
-V1taVlZXWFZXVFpehLbH0Njd4OPk6Ojq60tJSUhHSEZFQ0JFRkhIR0NEPj5DPjw+
-QEBBPT09QUNBQj08PUBAO0A7PjtAQD05Oz0/Pjk8PT1DPzo9Ozo9Qz09PT9BRj5C
-Q0FEQT9CQUJEQkRERENHRkNDQkdEQkJDRUZBQ0dGRURERkdKRUZHREJDQkVJTEdF
-Q0JIS0dIRkdCQURGQkNEREVISEBBQkNEQj0+PEBBPj1DRkNAPkNBQUA+QD5ARURF
-RUZHR0VAQEE9QT0+P0BAQkNAP0FERUdFRkhJR0hGSU5LTE5RVVJUVVRVUVBOTlJT
-UlVVT0pLT0xNT09OSlJSUExHSktPVFVOT1RYUlBQUlVTS0pHRUBCQjw6NjU0MDA0
-MjY2OTU3NTI2NTU3NDY1NDg2NjY6PTo5NzU3Nzg3NzY4Nzg3Njg4ODc4OTk1ODg9
-Pjw9PDo5ODc5Oz5BQkI/Pjw+QkJBQkNFQ0hIRkVFRERIQUdKSUxMSkVESE5QSkpH
-SEpMTlBMTEtTWVlPUFJQUlBOTExKSktQVVVTU1FNTE1RVlJPTlBUWV5bVFNQU1FQ
-UVBOTktPU1BKTUtNTUtMSUdHRkE+QD05PDw7Ojo5Nzc0NDQ0NS8vLzMzNTIzMDEz
-MDAzMzY1NDIzMTEzNTUyMzU8RklQUVJTUVdVU09TU1JTWVxaV1pbW1hRTk9RSEdR
-TklJT1NTT0pISEdKSE1RTkRDPTw7O0BAPDg5Ojs6PTYyMzEzNTI1MzQ1MjAzNDAy
-MzIyMjUzMzM2NTY1NzY3NzU0MzY3OD1BQ0JERkhJS09VV1ZUUFNQR0RMS0hLUUpK
-UE5LTUtGPj5GUFNWXGFhZ21vbnJ7enh7fYSFhYmQioqLjIqOkJCPjpCOjouLh4GA
-fXVsY1pUTE5SUUxUa3l3aVdWY3R4c2xoWVhkYmJrYVtqe3RiZGRdZnp7ZlxuhYVx
-W11udmlXWW1zX1RneXBdZX2LhXN0gpaeno6Ah5elpJqQjpulpp6Qg4iUnpmGdHCA
-jpOLeHCClZyYiYCCk6OppJeUmqmtqJ2UlqKpp5qFfYuUlo17eIKLintvcXVuX1lX
-VlJTVVNTUVJPTVBSTU1NTU5LSUtLTUlKS0tMT09MTk5NT05QTk9WVFFQUE9PT1FQ
-T1BNS05NTEtNTUtLS0tLTVBNTU5NTlBNUE1NUFBST1BSTk5QT1JRU09PT1FTUE9O
-T09OUVJRUVFUUlNTUVFQT1BPU1ZUV1lWVVxgZGVpaGxvc3R3d3p7f4GDiIySkpOV
-lZWQlZmZmZiZmJubl5SUmJaTkY2NiYeHhIJ6dm9yaFhVV1ZeY1xTVFRRUVFRT1BN
-S1FQTU1MTUxPT05NTE1PTk5OT09PTE9QT0tITFNQUlFUVVRaYGhucnmAhYmNjpGS
-lpOSlJubnpqWk5COjo6Li4SAe3RvbWllYl9hXltZW1tbW1dYVldVUlJSUVFTUk5P
-UVVUU1BSVFVWVlVWWVlfWlhYWVpeXlpaW1xaXVpeW2NdWGpvUFpbWVdXWFdWVVdY
-WFdXXFxZW1tcXltdXl9iXlxbW1lYWVdXWFpZWFdXWVhWWF+Ju8jR19zg4+Xn6err
-TE1JR0VLRENERURGQURAQUVBQT4+REI+PEBAPj09PTw/Pzs6PDs9Pjw7PD87Ojk6
-OTk+OkA7Ojo+OTk7PztCQkJDQUJBREI+PT09Ojw+P0JCQUNEQ0VERUJCRkZCQkNC
-QkRESUlEREZJRkZGR0ZGQ0FCRUdKSEVDQkNIRUZDQkVDQUBCREVFRUpHRT9DQ0ZD
-QDw9QEJCQUNGR0ZCQEFCQkRAPkBGRkRDQ0VFRUI+PT1APkFDQD0+Q0FBPj1CQkJD
-RkdHRUVJSkhKUVZXU1NVUlBRT1BOUVFRUE9IR0hLTU5NTUtNV1VUT0xLTVBPUk1K
-UFFOT1JRVFNQSERDPkBCPTs2NDMwMTUzNDQyMzU2OjczNDM0NTc3Nzo4NzU2ODc5
-Nzg5NzY2OTY2NzU1Nzg2Njg3NzU2Nzk5Ozs6PDo4Njo4PUFDQkA8Ozw+P0FBR0hI
-SEdEREVCQ0ZHRkRLSEZIREFGSU5NSk1LTE5SVFVNTE9RUk1QUVNVWVdWU1FNTVBU
-VFFQTUxRUFRbV1JQU1VWW1RPUVNTV1xZV1RST01OT1BPT0xNTEZISkdERT9APkA8
-QD84OTY1Njk1MjEvLzAyNjMzMjAyMzM0NjU0MTU2MjMzMzIuNTQyMzg7Rk1NTk9N
-VlZXVlVUVFJTVVVWVVpbWU9KTExHQ0tNTEZHS05MR0xGREZJTkxKRUNARkNDQD49
-ODg7Ozw5OTcyMTU0NTY2NTY1NDU1MzMyNTQyNjU3ODs1My8xNDMzNjEvMjU1ODs/
-QT9KTk5SU1RUVlVUT0pHRkhKSUpOT01QTElJSkU/QURLU1ZcYWFnbHFzdHh6fn+E
-gYWJh4mJiIqKjo6OkJGPjpGPjo2LhoN7dG5mWVJPUVdOTVxye3NZTFBhaWpjZWNa
-YWdcZ25gYnd8bmRraWNzgnpucX6Gf21cZXh3ZlNYbG1dWmx9dFxec358bWl2jJKQ
-gniClaKfkoiLmqmqopKLkqGln4+Af4yYlo97eoOTmJWHdn2On6Gai4aXp6ymmZOb
-pautoI+JkZaUiHVwdn15bWZkaWJYVVZTVVNTUE9RUFFPTU1NSk1NTVVRTkpISExL
-TU5OUE9MTlBOT1VSUlJST1BNUVBOUFBQUE5OTkxLT05OTktMSkxOTEpNTU5NTU9O
-T01OUFBSUFFPTkxQU1JPTk9PTk9QUlFSU1JTVFJSUFJVVVJSUFBRTlFTU1VVWVRU
-VlhdW15gZGlpb3J3eHx+gIGDh4mOj5GRk5GTlpeZmpiWlpeYl5GTk5KRjoiFhoaE
-fHNuc3VrX1lXT1tmXFFQUFFQUU9RUExOTEpMTU5PTE9RTktNTk1OUU9QUkxKTE1M
-TU9QTlBQVVhXXWNob3l/g4iKjpCRkpSVlpmYmZiZlpCMjY6Ig4B8eXJvcGllYF9d
-WVlaWlpYV1ZXVVVVVlVTUlRUVFFOUVJRU1RSUVJSUlFSV1ZWV1hYW1dXWFZZWlxd
-W1hZW1pdXVhTVltYWFlYV1hbUlNYVVZXVFJXWVlXWl5fW1xeXlxbXV5bXFtXVlZX
-V1hZWVtYV1ZYXI26yNDY3ODi5efp6upJS0hKR0ZIRENISkhIQ0E9QUJARkFAQUNC
-QDs8PTs+Pjs8PDw5Ozk5P0A8PT05Ozk7PT49PD04OT88OTk5PUBBQEFBQ0FBQUE/
-OkBAPT4/P0BAQkNDRUpJSEVFRkZGQ0BAR0lHR0ZDQ0dFRUZFRkdIREVISUpIQ0NE
-RUZERENERkNCRUJDQ0VJSElHRkRDQ0I/QEFCQ0FBQERHSEJAQEBEREJCQERGSENA
-QUVEREFAPj1AQkA/QkFDQUJBQUBGRT5ARkVEREZIR0dOUFFRVFVQTk5NUlBNTlJS
-TUtJSEtMUVFQS05SUlJRTk5NTk9OU1FRUlBNT05PVE1FRENBPjw5Ozk2NDU2NDU1
-Nzk4Njg4NDc3NjozNDU4Nzo2NTQ1NzY2NjM4NjM3OTg8Oz86Ojc3Nzk4ODc7PDo7
-ODs9Pj09OTo6Pj48Oz5AQUBCQURIRkBGR0JAQ0pJSUlHRUZHR0ZCQUFCR0xPUElO
-U1JVVE9SUlROUE1PV1xbVlNUVFJUVVZVVk9NT1BNUlpYUVJST1FRTE5PUVZYW1pZ
-U1ROTk5VVE1MSktLSEVFSkpKREFAQEE9ODc3MzQ3NzMzNDkyMTAzNTU2NDQ0NTQ2
-NjQ2NTMxMDAvMC8wNTIyNDc+SU1MT1NXWVpcWVdUVFRVVFNWWl1dV1FPTEdGSEpP
-R0RFS0lGRkZEQUJDRENCRUJGR0I9Ozw5NzY1NDY2Nzc2NDEzNTUyMzQyMTQ0MTM1
-NDUyMzY1NDYzNDM0Njo2NTIxNjg5Oj5AR01TUFBQUFJTVlNSS0dGSU5NS09NTE5O
-SUlLSENCR0tPVVtfZmltbnB2eHp8fn17gYKGiIqMioqSj5GRj42PjoyJjY2IgX53
-bmNZU1FTWlZZbnl4Z1JRWWVpaWRoZF9raWV4dGNwh4NzcXhxb4CIgXaClJSKbGd0
-gX1mWmBwbV1gcHxwXWR1e3ZnaHaKkIV0a3eMl4+DeoGSoKCZjYuUo6aglYiNm6Kg
-l4mEjpmblIR0eo+YnJOCgZCgpJyPipShq6qgkpSanZuLeHJ3enFkXV5hXVlVUlNU
-UlJRT1FSUFJPTU5MSU1OTU9NTUpITEpPTE1QT01MUE9QU1RRUFBNT01NT1BPT1BR
-UE5NTExMTUtLS0xOS0xOTUtMTExOTk1OT05OTk9OTE1OT09SUE5QT09OS1BRUlBO
-UFBQUVFTUE5QVFJTUlRcVlVUU1NWVFNWVlZVVllcXmFka25ucnd5fn5+gYeIioyO
-j5GRlJaWmJeVkpOTlJORkJCLjIqLiYeAeHV1d2tmZl9ZXFtWU1FPUE9OT1BOTk1N
-TktOTk5PT09QT05PT01PTk1PTU5NTE1RU1JRT1NYXGRpb3Z6goiIjpCQk5OYmpqb
-nJyZlpaTjYeHgn17e3duaGVlYl5cW1lfXVtaWFdYWFZWVlRTUVJSVFVST1JQUFZW
-VVRRU1JRU1hYWVlbWllcWlVWWFleXl9ZXVlbW1xUY1NfWVhXVldVVFZYUVJUVFNW
-WFZbWFhYWltcXlxcW1xdXFtZXVdZVlRWVVdUU1daWFpdjLvI0Njd4ePm5unp6UdL
-TkdHSUpGRklGQkZAOzs+PENBQj9BP0BBPTs4Oj09PTw9Pjs8PkBCPjo5Ojg8QEE8
-Ozo8Ozw5Ozs8Pj89QUBCQUA+PD8/QkBEQkI+QUBAPT0+QUNHQ0BAQkRDQUNDQEZE
-RUdFRERERkZFRkhIR0lHRUhJRkZDQkJDRkJGRUJGR0VCQkBCQ0dFRUVEQkBCQ0JC
-Q0FDREBCRUdJQUFAQ0REQkJAQENFQD9DREFDRURBQUBBQ0NAQUNEQkA9PkFCQUJF
-SERCQ0ZHS05QTk5NTUpMTlNYVU5KSk1PUlFJTE1TUk9PTlNUUFJPUE9QUU9SU1RT
-VExMTk9VUUlFQj4+PDs6NTQ0NjY1NTU4NjkzMzY2NTc4NTM0NzM1NTY2NjU9OzU3
-ODU2Njg9QDw6ODc5ODg2N0E9OTo8PDs1NjU4PDg3OTw7Ojo9PUFCQUE/QkVEPEFE
-RURISkxGSEpEQkRLTEdCQURGSFFSUExQUVFPUVVWVlNRTVFXW1hUVFRWVldZVVRT
-U1NSUE9TVVNTTU5PU1VTUk5VVVJWWltTUk5MUFdTUlFNR0pKSkhLSUZDQD89Pzw8
-ODc7Nzo4NjMzNTIzNDExMTA0NTc0NTIzNTMxMjAyMjIzNTYyMjEzNDdASUxNT1dc
-W1xZVFNYWVlaWFdZXV5ZUVBKRERLR05JSkVJRUVJTElEQkFIRkFFQ0RCPDs8Pzk0
-NDs5ODc2MzUzMjMyMjMzMzQ0NjQzMTM1NDY1OTc0MzM0MzQ2NzU0NDc1ODc5Oj9A
-SE1PUlFRT1BTV1NNRkZPUE1MSkVIUUxLTU1JQz5FS1RVWl5iaG1yc3V4f36Ag4SD
-g4eIjJCNjIyOjYuLjY2Njo+LioWAfHZrYFdXU1VXXXGDhnlfUFBeZ2tnaG9oam5m
-Z3ZvYnWEe21vdGltg4l4cISWlol4doKNiHJkcX11ZWyAh3Zlbn5/c2NjeYyPf2to
-doWJhHNreIqTkoh7fIucop2Oi5SfqKOXjY+WoqGah36Gj56djX19jp6clIWBjJ2o
-pJqPlZygnI+AeXl7b2JYV1pYU1RSUVFRU1FRTU5OTUtOTk9NTE5NTU9NTExNTk5P
-T05OUE5PT05RU1NQUE9OT0xPTU9TUU5OTU5OR0hFR0lLSk5OS0tMTE1JS05NTU1N
-UE9OT01MS01NTU9RTkxPUFFQTk9PT05OUE5QTU5PUlJQUVFQUlJTUVFSUlJXVFVT
-U1VXWFpZWl1jZGZrb3Fzdnl9foCAgoaJjI6RkpORlJmYlZSUj5CPkY+Qj4+Oh4GB
-fnx3cXV4b2NhW1dWUk5NTU9OTE1LTU1LS01OUFBOTExNT01KTVBST05NTU1QUlNV
-VlZXV1xla3V9goSKjo+PkZeZm56ZmZqZmpeUkouFf399dnFybGdjXltbWFpYVVda
-VlZYV1ZXWFZSUVZVWFhUUVBPUlNSU1NRVFNRU1NUVllYWltaWVdYW1tZWVxfXl9c
-Xl1dX1VdUFtbWV1cWlhXVlRVUlNTVVdYVldWWFZaW1taXFxbWlxaW1tWWFtbVVhZ
-WFZWVVZUVWCJvMnS2N7g5OXn6OnrSUhLR0dIR0ZEQ0xCQkA+PkE/QT48PEE8QD9B
-Ozw7PDw8Ozg8Pz07Pjw6OTc5Pj09PDw8OTs6Ozw5ODo+Pj0+PDo/QEE/Pz4/QEBB
-Q0FDQkBBQUA9P0NDQUJGQ0VDQkVEQ0FCRURERUZHQ0NGRkpJTUtIRUZDQkNFRENC
-REVGRUZHRUNCQUI/RUdISEVARUNGREJCQUJGRkZIR0hEQkNDQ0JCP0NEQkJCQUFA
-QUtGRkE9Qj5BQUE+QEFBQT08PkJBSENGRkRDRkhIS1FOTktNT09RVFhVU0xHTE5Q
-Uk1KTU5SUFZTUU9MTk5OSkxPUE5PVFNRTExKTlNVTEQ+Pz07Ojo2ODYzMTIzNDI0
-NTI1Nzc5ODY2MzU1NTU1Nzg2NTg4ODY5NDM3Ojk7OTw2NTY1NjU5OTg7Nzk6Pjk2
-Nzc0OTs5OT1BQT0+PEA+QEJBQEVFRkdHSEtNS0lKS0tGRUpNSENERUhIS1FOTlBN
-TFBSUlVVU1RRUFRZVFNSVFRSVFNWVVRWVFVXVVZTUkxLS01SVFNSTk1PUVFVVlVS
-TUtQVFBQTklJTU1MSkhGRENDQkJDRUM7QTg4NzY2NjYzMzEwMjIwNDMyNTQ1NDQz
-MjMyNTgyMzU5Njc2NjMyNztDRkhMU1xhYFxUVVRYXVhaWlhYWFRSTEhESExLVUhF
-RUFERUdIRkNCQkVHQUNCRD07PD08Nzc7NjY3ODc6Mi8wMzY3NTM1MjE2MTIzMzMw
-MjMyMTQ1MzIyNDU2Nzg1MzM0Njg6PURFR0ZKT1BST1FVU09MSFBSUE5OR0ZJR0tN
-R0M/QUNIUVVaXWBlbG5xc3h7gH6BhYKDhISNj4+MjYyMjIqMjIyOj46Lg4B7dGtd
-U1BNUlpkf4uKeGFaYm9ta2Zsb2ZqbWBlb2RccHlsYXRsXmh+gXBwgo2Pg3h9jI+G
-dW59g3podoiKf3F+i4h6cHiIlZOBc3J+jI6CcGl6jpOMfnNzhJOVkIaBjpujnZKK
-kZ+pqqOSi5GfpZ6Ofn6Om5mQfnuImp6ZjoaJkZufkYJ6fn1vYFdWVVJTU1JSUlJQ
-T05PTk5PTkxOSUlLTExNT01RT1FQUlBNTk5NSktPTU9PUE5RUVFQUk9QTk5PT1FQ
-Tk1PSk5JSUlMTU1JS0xMT1BOT05PT01NT0xLS0xLS01OTk9RT0xOT05QTk5OT09N
-UFBPTU9QTExPUVBPT1BSUVJSU1ZVVFFRUVBSVVZXV1pbX2VmaWtsb3J3eHh8foKG
-iYyNjI+QlJSVlZOPkpOWkZKSj4yLh4WGgH17fX96cGtjX1lXV1NQTU9OS05PTk1O
-TU1KTE1NTEpMT05PTU1PTk1OUE9PVlVVV1tia3N6gYaJi46RkpOWmZycmZicmpSQ
-jouIgXx6dm9raWZjYltZVlpeWllZV1VWVlVWWVVVVVdXVFRSU1NVVFVTU1BTVlJQ
-UVJXWFdVVllYW1tbWVhbWllZWltdXV5dXl9dWFlPUlhaWFlZWFZYWllUVVRWVFVW
-VltZWFpYWFpbW11cWltaW1xcV1paVVNSUlZWV1taYYm8ytLZ3eHj5efo6upMSENJ
-SERFSUtHQ0VCPTs7P0I+PkA+PENAQ0E/QD47O0A/Q0E+QUI8OTw/Pz4+PT07QT48
-Oz0+Oj4+OTk8PTs8Ojs+PEBCQENBQj9CP0NGRUNDQUJEQkFER0ZEQkM/QD88QEFD
-RkdGRUZEQUFGR0hKSEZFREE+PkBCQkNDREdERUVDQEBFQ0RGSEpJRERGRkREQkNC
-QENDREZJR0RBQkNFQkJCQUJDQUA9PkFAQkRBPUA7PkA/QUBAQUBCQkE/Pz09QEND
-RkRLS0dMTlJSU05SUVNWV1VSUU5OTUxPT01NUVNUVFBPTlBOS0xMUU1OTU5WVVFL
-SEhNUVFLQUFCQTs7NzU0NTY5NTMvMDM0OjU0OTk7MzIzMzIzNTYzNTc1NTQ7PTc5
-Njg2OTc4OTc5Nzg5Ozg1Njk7ODo8PTw4Nzg5QEI6OTw/QT48PEA/PkA+QkFERkpM
-TUhKRUdGRkVFSk1MSkRHR0hMT09TVE9NTE1RUlRPUE1MUlVVUlJST1BPT1VaWVZX
-VVZXWlROUU1NTVRXV1NPUU1QVFFUUlJPS1JUUU5OTkpPTUpHRkNBO0BBQ0ZFQ0A9
-O0c7Ojc4OTc2NzQzOTMvMTExMTQ0MTI2NTUyNDM1NjUyNDIyMjM2NzhCTVBVXF9g
-XFdTVFdWWVdaWVlaVlVRSERET1FVTkpFQkVFREVBREJDQ0JAQD8/Ozw5Ozg7Ozw6
-ODY1NzY0MzEzMzIzNjU1NDE1MjU4NTMxMzY1NDY1NzUzMzU1ODc1NjQ2ODg8PkRI
-RkdOUU9NT1JRUE9OTE9TUk5LSEpKS0xLQ0A+REpOVFxfYGJpbW9zd3x9foKChIiG
-hYeKioqKi4yMj4qLiouMi4iBfXduYl1WTVBNUWeDjYVwYWZyd3NsZmpoYGheUVZg
-U1RjZFpeZl1WY3FqXWJ7hoJva3uKkXtwcoOFdWt8jop7eIeQjIB5hJihm4d9hJCX
-lIl5dYKUlYx5bXSFkI+FdHGBkZeTh4GPnqWon5aSnqqoopGGipadmYp6eomZnpKA
-eX6KkpSJenZ5dGlfWVRSUFNRU1BRVE9PTkxMTk5NS05NS1BOUFBOTU9QTE1OUVBO
-S05QTExOTE1NTEtQUlJPUE9PUU9OUExOUFBPTUpLTExKS0xNTk9PTlFNTkxOTEtK
-SkxOTUpMTU1NT1BQUE9QTVBRTk1OT05LTFBST1BRT09PU1JUUlFRUU9RVFNST1BQ
-T05RVFZWVFVXXFxgZWdoaG1wcnZ7foCFhIaGiY+QkZOTkpOYlZeUkpGSj5CQjYqH
-hYF/gHx0c3BrZmJgWFNSUVBRUlRST09OTE1MS0pOTUxNUU5PTk1PTktPUVFTWFte
-ZGx2e4GJjpKTkpOVlpuXmJqXl5iVkYyHgX57dG9raGZmYF5dXVtYV1hYWlpYVldZ
-WFRVU1FRU1VVVVVXU1JTVFFQUlRTUVNRU1VVVldYWl5ZWlxhXFxaW1tbW15cW1xc
-XVxcWlhYWFZWVldVVlZXV1hWVlVUV1ZWV1hYV1pZXFpbXF1bWlxcXFlYWVlXV1lX
-VVhWVlZehbzJ0dnd4uTm6Ojp6klKSElMSENDRkpCQUE9Pj08PDw/Pz86Pz87QEA9
-QT49Oz49PT5BQTw7ODo9PDk5PEE9Oj07PDw7PT49PkI9Ozs8Oj0+PUFBPUJCQT9B
-QUFCQEBAPUFESEdDQ0RFQkRCQUFBQENDRUVER0dERENFRkZFSEREQkFAP0I/QEFD
-RUJDRUNBQkNFREVJSEhFRkVCRUdCQz9BQkFERUhJQ0JEQUJGQj9AP0BCQkRAPD5A
-RkE+PT4/PUBAPkBAQUA9PD1DPzxAQ0RFR0dMS0xQUlJPS01OTlRWVFNWUk9OTVFO
-UFFQUVFTU1BPT1FPUVNTT1BRV1dRTkxLTU1QUUpIQEBBPDs4ODUxNjY2NDU1ODQ0
-MzQyNjUyNjQzNTU1NzY2NzY3NTM4NDM1ODQ1ODk3ODw9QD0+Ojs7ODo3Ojw8OjY6
-Ojk7PTk2OTw7PD0/PT07Pjw+Pz5CSVRLSkhGREVHSEtOTlJRT0VJS01OTVBQTUxL
-TU5QVVVUTk5SVVhTUUxOTU9RVFZXWFVRUlVXTk5MTk9QU1ZWVlJQTkxPVlZOT01M
-TlNTT1JQUU5MRUVGRUVCPz49QD48Ojk5QTw6Ojg1NjUyMjIzMjQzMi8zMzE2MzIz
-NTUyMzM0NC8yMzEzOjE0NjtDSVFTWFpYU1NTVlpZWVtdXlxZVlBLRUZQU1RRUkdD
-REhESEdFRkdHSD9AOzw7PTk5Ozk4NTc4ODY2NDY2MzMzMzQyMzIxMjU4NTU1MjM0
-NjQ1MTQzMjU0NjQ1NzY2ODU3Nzc6PUNJSk1PTUtNU1RMTEhJS1BQS0lLT1NNS0hG
-Q0JFSU9XW15fZGZscXZ1eX59fYSFiIiKioiHio2Ojo2Ni42MjIqHhYF5d3BjWlFO
-SEhSZnx+b15gbHZ1a2ZhXlJQWU5GUFRKSllaT1pfVlBlcWhdaX2BdGBhdIeEbmNw
-fXhkZnqKf3FziJCLfXqElZ2Zi4KKnKWckIaIlp6fl4B0eImTkIBtbnyLjol9e4ma
-oaGckZGdqa2mmpGXo6SgkYGEj5ydjXlyeYiMiHptaG5uZVZRUVNRUVBPT09RUE5P
-TE1MTU9RUVFQTU1OT1BUTktPS05PUFBRT1JRUFFRTE1PTlBTUE9QS1BQTk5MT1FQ
-T09PTUpHSktOTktNUVBPTk5MS0xNSkpLTEpLTExNS01PUE9PTk5NTUpPUU1LTFBM
-TE5LTk5OTU5PUFJSUFBOT09QUlJTTVBQTFBQTlFWVVdXWFpdXmJkZ2hucHN1eXp6
-fYCEhIqPkJGQk5SWlJWWlpOVlpaYkouIiYSCgHx7eHVxbGdjXFdTUlFQUk5NS0tK
-T05MS0pMTExPTk9ST05QU1FSVllhZ3B4e32AhoqOk5eYlpqbmpqdlpSQjouHhYF+
-eXRzcGhoZGFfX1pcWlhXVldVV1hZVFZXVlRSUVVSUVNUV1hWVFRSUlFRUVVWVVdX
-V1hXWVlbXVpaXV1cWVhbWllbXl5gX2BeXVtbW1pXVVNTVFRWVlZYVVVVVFdXV1dX
-WFlaV1haXl5ZWllYWllZV1VUV1pVVVdYWVlZWWGGvcrS2d7h5OXo6OrrSEhMSkpL
-R0dBQUFBPz9BQTo6PDw8Oj07P0JCQj48Oz07Ozs8Pj85O0E8PDg4Ojk4OD08Oz1A
-PT05Ojw9QUFAOz1BQURBQEI+PUA+QD9BQUA/PTs8P0RDRUZEQkFCQ0RDRkdGQ0NC
-Q0dGR0NCQEZHR0hIQkJCQkBCQUJBQkNAP0BDRUhHREVHRUpLSkhFQ0RDQUE/QD1C
-QUA/QUNDQUFCQ0NBPkFCQEFAQEE9PkA+PD48Ozs9QEA8Pj48QkFAQT4/QEFCQ0ZQ
-SElHR05SUk1KSkxMUFFQUFBOUFVRUE9RT05PTU1PT01LT1FST1JUUVBQUk9UT01O
-UFVVT0VAOz08Ozs3NDQ0MjM1ODYzMzQ1NjU1NTY2NTQ3ODg2Njo4NzU1NTQ0NjY3
-OTY3NTY7OTo6PD87NjY5NzY4Ojw7OTk4Ozs5PDg6ODs7PDs+PD9CRUVIRUVGR0dH
-RkNFRkNFR09QTU9PR0ZKTE9TUVFPTUpNUlJUU1VSUVBVU1BQT09VVVVVU1FSUFJW
-WFlVT01OTE5QVFNVVE5LTlBWVFJMSEdMT1FSUlZVUk1KSEhKSkpCQEA9PDs6OTk5
-PTs5ODk3NDMyMS8vLjM0NDUzNDQzMzMzMjIxMDI0NjIyMzc2NDU3NTpFS01TVFJV
-V1dXWlhZXFxcXFlZVU9IQ0tQUklaTkZARkdHSkhEREZHRkVCQz4+P0I8OTc3OjY1
-NjU1NDczMzEyMjIxMzM0MTEwNjQ0MzMyMjQ1MzQzMzI1Mzc0NTc3OTY5PDo6QUVJ
-TElMUFJXUU1KSERITEtGS01QUEhGS0U/PUBGTlNaXWNkaHB3d3p6foKDg4WGhoeJ
-iImKioqKi46Qi4uKiomGgHp1cWxcVU1KRlBndm1YTVJha2VjXFNTRUVRSUNMUkdH
-VE5MW11XW3JwY15tfHprXGR8hnVdXXF4Z1Nhe4FyYW2Ah4Fxb4KSnJSJgo2bo5+R
-iY+epqacjYSJlpyUfm5xgIyOgXZ1h5WbmYyGjZmosKeYlZymrKKYjo6boJ6PeXBz
-foJ9bWJgX2BeVVRVUk9QTk1RTlBQUE9OT05PTktOT0xJTEpLUE1OUE9OS09LS01O
-T1BQTE1NT05PTk9SUU9OTE9RTlBMS01SVE9SU01JS05LS0tQTk9QTU5PTk5MTU1M
-TE5OTk5QT05PT05UVU9NSkxNTU1KTE5NS0lLS09MTE9OUFJRUlFRTk9QVVJRT05Q
-T1FTUFJWVVNTV1VZWl9iYmZoam50c3Z2eXx+gYOFi42Tk5KRk5aUk5iYmZiWj4yN
-i4WDgn59e3l1b2hmYFpUUVBQUU1LSUpMTU5MTEpLTEtLTk9SUlZVUldaYGhveYCD
-iIqLkJKVlZibmJeWl5eTk42IhX57d3Nvb2xnYF5cWFhXVlpdWlpXU1dXVVRUUlJU
-VVJTU1NUVFRUVVZUVVRUVVNUWFZTU1JVV1dYWFlYXFpcXFtcWlpdW1tcXV5fXl1c
-WldYVldUVFJSUlRTVVdWWFdVVFdXV1laWVtcWVlbWlpcXVlYWllZVlZXVFZWVVZS
-U1VXWH+7ydLZ3+Hk5ujp6+tNSkhGREdFRkNAQkE7OkFFPD48PD1DPjs9P0A8QD05
-Ojs9Ozk8Ozo7PD44Nzo5Njk5Oj48Oz04Ojw/Pj06QEA9Oj5DQUJAPj49QEE+QEI/
-QEE+PUJAQEBCQ0REQUFDQ0ZEREJDQkFBQ0ZDQ0RFRERFRkZGSEhHSEVCQUZDQD8+
-P0NEQkVHQkZIS0pJTENAQ0A/PT4+PD09PT8/PT9BQ0NDQkFAPz9BQUA+QUFBQEBB
-Pjs5PEFCQD08Ozw8P0I/QkJBQkRERktHSEtMUlJTTU5OTlBTUlFPTUxNUk5TUE9O
-TEtLTUhKS0tNT1NRU1JQTU9VU09SUElLT09LRUE+QD88ODg2OjU2NTk3NDQ0MzM0
-MjMzOTY1MjY0Njk6Rjg2NDU1ODUzNTY4OjQ0NDUzNTg4Ojs5Nzo2NjQ1Ojk5ODk6
-Ojs4Oz1AP0I/PTs9P0BDR0hIR0VHSEdGRkdGRUNHTFJSTUxLS0tQU1BRUU9LS01O
-UlRVVVFOT05UUVJSUVBTVlZWT1FSVllZXVZWU1RQT1FTU1NTUEtRU1hZWFFJRktQ
-U1JPUVVTTElHSEtOTERAQD0+O0BBOjo5ODk3Nzc2Nzc3MTE3NDU2NTQ1MjE0MDEv
-Ly8zNjMzNTYzNTMyMzQ6OTxCR0pNVFdeWllaWVhVV1dYWFdWTktHSEpTTkhGRkVD
-RUdGR0VERURAQD5CQD49QD86Njg4OTc2MzAzMzQ1MDE0NjQ1NDUzNTMzMjU0MzI0
-NDY1NDQzMC4wMzQ3PTc1NDY4Ojs9QUdITU5RU1dVUk1ISUtJSEZLTU9OTExHQz46
-QEZNU1VbYWdtcHF0eX1+goGBg4aFiIuGhoaLjo2KiYqLh4yHh4F/e3dyZlpSSkhK
-VWpsXU1KVV9iWVZQUU5APUhIQ0dKREpUUVFhZWBodXlvbnqBfWpkcIaGb1pjeXZi
-WWl8e2ldaXp9cF5ieYmMhXZ0iZmcloiHkqCmpJyPjZehopmIfIKMko+Cc3SKmJiS
-gn2GlqWmnpWSnKaropCLk6GnpZSCdnd9fXRnYWFfWVRUVFRVVk9PT1FOTE1PTU1L
-TkxLSUxMS05LTk1QUlNPTU9PTlJOUFRRUFFQTkxOUE1LTE1LTE5NTVFNUExMUU5K
-TE1NTUxKSkxMTElJS01MTk1OTU1LSUtMTkpMTktMTE5NTk1LTUtNTUxPTVFOTUtM
-SktNTU5NTk5OTU5OUE9PT09OTU5PUVJRUVRSUlJVU1JVVVNUVF1dW11iZ2tvcHR1
-dnp6foGGiY2PjIuTk5OVmZmWlZeVkJGMhYWIiYSAf3l2cG5sZF5aVlRWV1NRTEtO
-TU9PUEdJT1BPT1BTU1JXXmRtdHmChomNkJGUkZWVl5mbmJWTkY6Kh4aBfXZycGxm
-ZmJeW1pYVVNXWVpZWllZV1NTU1NTUFNRU1FSU1NVUVJTVVZWVlRSUVNTV1ZXVlhV
-V1dYWVxdXFxcXFtcWllbXV1bW1xcWVlYVlZVVFFQUVNRU1RUVFRVWVVWVVdYW1hZ
-WllaW1xcW1dWWFlbXFpXVVZVV1daV1VUVllhiLvJ0drf4uXm6Ojq60lKRUdFREND
-RENCQT8+RUM6PUI/Pz5BQkBCQUI9Pj47OjxBPTw+PTxDQj87Pjw7Oz47PkZBOzs8
-Ozo8PDc6PkFBOjs8QEA/PDxAQEJDREJBREBAP0JCQUFDQkE+P0JAPz9BQ0JERUZD
-QkVEQ0NBQkJEQ0NFR0lKSUhDQUREQUFAQkZFQ0FAQUZHSEVEQkBDQkBBPz49PkFA
-QD47PUBAQkJEQUFAQUFAQUE+QEJCQkNAOjs8P0A+Oz09PD1AQEVDQT88PkBDSUpK
-TE1UVFVRTVJPUVRTUU1NSklMTE5OSkpKTElFSEpMTExNWlRKSUlQTU1LUFBQTUtQ
-T01KSERDQDw6Nzo3MzMzNDY1Rzk1MjM1NDQ1ODQzNDQ2Ojc4NTY3ODg5ODYyNDc5
-OjUzNDU0Njg8PTk3Ozk0NDg8Ozo/OTpANzo6Pz9APj09PD1AQ0A/SEtJSEZIR0ZF
-SEdGR0lKT05OS0hJTVBNSktLSktKS0xQVFNUT05MS1BVVlJQUVBSVVRUVVFTVFZa
-WFVTUlFSUlNVVE9PTlZVWV1XVFJMSk5TU1JSUVFMTEpLS09LSEVCREFCQEA8ODo7
-Nzc4ODM1Njc2NzcxNDI0MzY1NDIzNDEzNDMzMzQ0NDQzMDI0NTg3Oz9BREtSVFtX
-WFpaVFNWVVVTVlZWU0tJSE5ORkRBQ0dFS01LRUdBQ0RAP0A5ODlAPzo3NjQ2ODUy
-ODU2NDY0Mi8xMTIzNDIzMjQ0MzQ0ODg1MzU1ODIzNzc0NzY3Njc2OTo4Oj5AQ01R
-VVNWWlpVU0pJTU5LQ0ZMUlBNRkRCQkFARk1WXWNmamxtcnR1eoGEhYeEh4mMiYmK
-iYuMjo+Ki4uRj46Gg4F9d3FiVk1KS1JibmlQS1NfY1pVU0tSUUZFTlRJTFBLT1pY
-WGhta3GEhnt9iY2Ec3OCjYtza3qDel5edoN+ZV9sfn5rW2N5iIZ5bXGFk5aHd3eH
-mp6flYqLm6WroJGLkJuclYh7f5CdnY57dYGTnpuSgYeYpKeajYeNnaajlIR+gYV/
-c2RbW1lZV1JRUVtTUlBQS01PTk1OTk9NSktKSU5PT1FVVE5NUk9ST09PTk9SUlRS
-UE5QUk1QUE5MTE1NT05NUVBPTU1NTUxMTU1PTUlGSUxLTElNTE9PS0pLUFFMS05T
-UVBOTU1MS0pKSU5NTk5LS09QTktNS0tPTUtMTU5NT1BLSkxRUlhPTE9QT05RUVJQ
-UVBQUFNVU1NUV1NSUVVWV1heYWZoam1vc3h6fn6Cg4aFhIyOjpGUlJWTkpaYk5GR
-jo2PjImDfHt7fHVya2NeWVlUVFFQT05OUVJSUlJRTlBQUFBTWWFpbniAg4mLjo2R
-lZiXlJaXlpeXkJCLhoN/fXt2dnBpZmFfXFpaW1pZV1hXV1hYWFVVVFJTUlVTUVFR
-UFJSU1RUVVVVVlRVV1lXVVRVVVVUWFpbWllZXV1cWlxbXF1cX1tbXl9bWltbV1tY
-VVVUUlBQUFJSVFNSVFRUVlVVVVVbW1pbWllaXV1fXl1ZWlpYWFlYVVZaWFdYV1da
-W2aMucnR2d7i5Ofo6evqSExKSUdDRERBQT08QD1ERElEPD09Qz4/QkA9Pzw+Pjw9
-QUVAOzs+Qj5APjo7PTs9OzxAPzw+PT5BPT07Ozk6Ojw9Pz1BQEI+PUBCQkE/QT1B
-PUBDQ0VFREVGREVBQUZHRUBBR0REQUJEREdERUFBQUJCQ0NERkZHRkVERkNDREZC
-REQ/QEFCQkNHREQ+PT8+QEE/QT88PkBBQUVCQEBDQ0VBPj1CP0A+QUFBRkVDQ0A+
-Ozs9Pzs8PT9BPT1AQENAQEI/QEJHSkpNTFJTVlJPU1NPTUlOS0hISk5QTU1ISUtL
-TEhITE5NTEtNTUVDRktJSk1LTk1LSk1OTEVGSEA7OTk4NTc1ODg1MzRLSzY1NDM3
-NTM0NDY1NDY4Njg4NTY0NTQ0NDIzNTY0NDUzMzY5ODk5PDw7OjU1NTc4Nzk6Ozg7
-Nzk/QkA+PDs8PkRFREVDSUlFREdIRUJISU1NSEhJTUxKSUlLTUtIR0ZFRkdISk5Q
-UFBMTVBRUlNXVlVSTlBQUlFVU1NTUVZXVlJSUFFPUVdYVVBSUVFXWVlbWFFKTU1Q
-TE9RUElHRklNTkxIR0NDPzxCQzs5Ozo7NzQ0NjY1NDI0MzQ1ODQzMTMwLzMxMzY1
-MzIzMzIyNTQzNDU0NTc3OTw/RU9VWl1YV1ZXV1hYVlNZV1lZUE5IS0lGQ0RCSEpO
-SUpHRkRFRkZCP0BCQD1ANzo7NTY1NjU3NjY1MTY0NDU1NDY5NDAzNDQ1NjUzMzAy
-MjY0NTI0LzEzNzM2NDc1OTk6PUJHSk5SUVZZWVhVTUxNTktNS05RUU5ISUI+P0JI
-UVZZXmducHBxdHh8goaHiImHh4eKh4iIiYmOj46PjY6LiIaCf3xzaV1UTklLVWx0
-aldZZG1nWlRWTVNWSUdUU0dOV1BOXlZWZGVhbIF9dXuGj4J3fY6Qh3d2hY2Ba2yA
-i4Nsa32FfmhbaX6JhHNrdYmPjntsb4KOlZKCfISUoaSbi46Zo56ckYqQnJ+glYN8
-hJKZmId6gJGcnI1/fomYnZiJfHyEhYBxYFhXVlVVVlFPUVFPTk9PTk5MS09OT05O
-T05OTE1QVVZSUlBSVVBQUE5PUVFQT1NRUE1NTE1OTk1LUE1OTk5OTlBRTExLTEpL
-TEpJTEtOSkpNS0xNTE5NTUxPT01LTFFQTktPTUpJTUxNTE1NTkpJTEtJSUtMTExN
-TUpGSUtNT0xLTU5RUVJOTlBQUlJUVFJSUlRXVVVSUVRTUlFRUlRUUlVZXV1iZmZp
-bm1xdXl7fX2BhomMjJCVlJaTlZWUlJWUkpCOi4eEgYKAenp3cGpkXVhUUFBPUlJN
-T1BOUVFUUE5QVVpjanF3fYKIiIyQkJOWmpaWmJeWk46MiYV+eXt1c29raGRfW1pZ
-VlNTV1laW1pZWVhVVVdTUlRWUk9QT1FRUlRUVVZXV1VWV1VXXFlYVlVVVVhaWVRa
-XlpZWFlaWltdX2JgWltbXl1bWVtYWVhZVVRST09QUU9QUFVXU1ZUVFZWU1RXVlld
-XFtaXl5dW1xcWFdZWVhWV1dXWVJTVlVbZpK5x9DZ3eHj5ejp6epIS05KREdDPkFA
-PUVHQEBFQEJBOzs8QkJCPTs8OTs8Pj49PDs6ODk5QD4+Pj04PT47PDs5Oj0/P0FD
-QEJAQD4+Pz49QUFCQD08PUFCQT9CPz49PD4/RUZJR0VDRUNDQUNDQz9ER0RGRUZH
-RUZDQkFEREFCQ0ZERURFREVEQUREREVDQUFEQURDQ0JBQEFAPkA/QT89PTo7PTxA
-REE/PkFBRENAPz0/PD1APj4+P0NBQD09PDw+Oz8/P0FBQz9AQ0BDQEJHQ0lLS05O
-T1NTU1FXVU1LTExKSElKTktKSklJTEpMSkpKUFJSTkxJSUtISEtNVFVNSUlMTEdL
-SkdGREE7Ojo0NDM4NDMzM0BLNTU0OTo3NjQ0NjY1ODg4NjQ0MzQzNDQ1ODY1NzQ4
-NDU0MzQ1NTc3OTs5Nzc1Njk8OzY8OTs8Pz0+QD9AQkA/RERFREJFRUVHSklDREdL
-SkdLSUZISkpKTEpJSUlHSUhHRUVGRkpLT1FSU1FUVFVWU1RXUE9SVlNVWFZUU1RU
-T05NT1JSVlZVVFBRTVFWVlZWUU5QUk1LS1JPSkhJS01OSkVIQj0+Pj0+Ojc4Nzg1
-NDQ2MzExMTE2MDE0NDIvMjEzLzI2MjQyNDExMzIzMzIxNDU1OTc1NzhCTFFUWFhW
-VlZXWVtbV1tZXFtTT0tMSUZDR0RGSUxKUEpPTkpEREFCQEZCQD47NzY4NjU3NzQ1
-NzU0NTEyNTQ2Nzg1NjM0NDY0NTMzMzQ2NTU3ODIzNTM0NDEzNDQ2ODc7QUZLTU9W
-WFtYVlNQTUxSUVRRTEpLT0xIQTxAQkNKU1dgYWlrcXN1eX1/goWGiIiJiImJh4mJ
-io6Oj4yKh4iGgYF7enNnXFJPTU1fdHlpXmVzcWRbW1ZPUlhIR1RVSkhST1BVT1FY
-VVBhbGlhdIKHeXN9jo+Ac3uLj35weIyRhnF0goiAcW15iI6FdGt7jI6HdGpyhZCR
-hXdyfJKbmY6Dh5qkoZ2UkJelrKqbjYeRn5+WhnqBk5mVhHVwfI2SjYB2dX18d2lc
-WFJVVFJVVVFRUU9QTU5OUE1NTU5NUFFOUE1OS05QUFBRUlFSUVFTUVBRUE9OTE1M
-Tk1OTkxLTU1LTE5OTEtNT09MTUpNTUpMS0tKTEpOS0tJS0pLTkxLS0tMTk9OTUxN
-TUxLTEtLS0tPUE9QTk5LSUtMSklLSUtLS0xOUEtNTkxLTEtMTE1LTVBRTlFRUlJS
-UlNTU1JRUVFVU1ZTU1NRU1ZWWFhbYl5jZ2lqb3N2dXl+goeKio6PkpKTlJSUlJaR
-kY+PjYyIh4WGfXp5dnFsYmBdWFRTUVBQTkxMTVBSVFZaX2t0fICCiI2PkJKWlpeX
-mJeVkpCNioWBf3x4c25qZmVkYV5aV1laVVJWV1dWWFhYVVNTVVVVVFRTVlVUU1RU
-VFlWWFlZWlhYWFZYV1VZWFhYWllbXV5gXVlcXVpZXltdX15fXV1fXl1bWFhWVlRW
-VVRTU1BOUVJTV1dUVFVVVVZTUlZaV1hbWlpaWlpfW1tZV1hWWVhXV1VXV1VZWlpg
-lrvIz9fd4eTm5unq6klHSkhGRkNCQkRIQkZIRUNCPz47OkBCQjw8PTs8PTw7Pjs5
-Ojs9Oz0/Pjw9Pj06Ozw7NzY6Ojg6PkNEQ0VDQkFAPT5BQEFAQEE9P0FGQj4/PT5A
-PD9AQUJGSUhFQkNFQUJAPT1CREZCRkdGRENCRURHRkVGRkVHSEZGQ0NDQUJCP0FA
-QUE/QkNDQT5CQUBCQUFCQD9AQT1APkZSPTw9QEBBQD9AOz09PD0/PDxCPz49QD9A
-QDtDQD49PkBBOTo9QEJDRUxKSUtOUE1MT09RUVVXUk1OTU1OSUdNTEpHSEtKTEhJ
-SElNS0xNSUlJRkZCSEhPUUxKRktMTEtLRkdEQkA4ODQzMzQ0MzIxNDs2NTI0NDMz
-Njc3NDQ0NTU0NTMzMzM3MzQ3NDU0MzE0Nzg4Njk4ODk4OTg2Nzs+Ozo5Njg8PT47
-ODo8Ozo8PkBAPT9AQkRFR0ZGSEVERkdHRkVGRUZGRUZFRUZHSUtNTUpJSUlHSEtO
-UlFPUlBRV1JUU1FOT1JVVVRWXFdYVlRNTU5QT05PUlVOTk9NUFRaVFNUUVJUT01O
-VFNJRUZKSkpGQ0NAPj4+PT07Ozk4NjY1NDQ1NTQ1MzQzMjEzNzIxMzY0MzIzMjMx
-MjAzMjI0MDE0NTY6ODYzNTdCS1FUVlpYWFddXltaVlhZV1JNTE5OSUJGSUlEQkRN
-S09OSUNDQERCQj8/Qjs6Ojk3NTMzNjU1ODI0NDc3NjY2MTQ0NjMyNTIxMjUyMjAz
-NTQzNDMzMzM0NDU2Nzc2OTg9Q0pMSE5YWlROS0lISEtMT1BJR0dIRkdFPzs9Q0pQ
-V19lamtxdnp8fn+DiIaHiYuLi4iJjoyLi4uKi4qIg4WEfnx3cWdZUU1MUF5ubF5e
-bnhxYVxiVUhQVkxJTlBGRUZHRUpHRUlLR1FfVk5ecnNkX3CBgXJpeISAbmh8i5CC
-dH2Mj4Z2dYiUl459eIiZmZB7c3qJk5GHb2x9kpSNf3V9kJiYkYiLmauxq6GUlJ6m
-qJqJgImYnJaCb2p4ho2GdmxscnRqYVlWVVVSUlJUTkxNUU9OTk9MTUtNS0tNUU9Q
-UE9SUlFPUFBRUlJSU1NSUVBQUFFPTU1NTktMT05OTktNTUxNTUxNTU5PUEtIS0tK
-Tk1MTU9MTk5NTU5LS01OTU1OTk5LTUtNTUxPUExOT05QTk1MSUtOT0xKS01KSklL
-S0pLSkpMT09MS0hKS05NT1NUT1BOTVBRUU9RVFVUU1FSVlJRU1RUUlRTV1pcWlxe
-X2BpbW5zdXV8gISFhoqRjZCRkpSZmZaQkZCPj42LiYmFg397dnNtaGVgWlhUVE5T
-UU1PT1JWXGJrc3uAhYmPkZOWlpeUlZOUkZGNioaAfXp3dXNpZ2RgX19dW1hYWFdW
-VFRWWFZWV1RTVlRTVldUUlZWVlRTUlJUVFVXVlVWVlhaV1VXW1hYWl1dXV1gXV1c
-WlpZWltbWlxdXVxcXl9fXVtaWVRSVVRRUVRSVFNTVVVWVFJUV1dXWFZXWVdWWFla
-VVZbW1pcXFpZVVRWWFhYVlZWVFVVVV+WvMjR2N7h4+bn6OnrR0hHRUhFRERERURB
-PUNAQkVAQEM9PDo+PT0+PkJEQD87Ozc4PDw5OTg5Oj0/PTw3Njk5Njo6QEFAQEI+
-PD0+PD1CP0FAQUNAQkFBPkFCQj9CQT0/Pz8/QEJFRkVEQUBBQUJCREJDREVGR0VG
-RUVCRkdKSEhGREZHREI/Pz9BP0E/QEJCQUJBQkVFQz1AP0JGRUNDQUBCSkE/Pjw9
-QkNAPUBAQUA9PDw+PUFAQENDQkVFQ0A+PT1BPj4+Pj49PUE/QkdKSERER0pLSE1O
-UVROTVBRVFBPT1FFRUtOTk1OUFBKSEdFR0pMTkhDRkZFRUJERUNIR0VJQ0lLSUdH
-SEpFQjw3OTc1NDQyNTU0NTU0MzQyMzY0NjY1NzY2NzQ0Njc0NTQ3ODk2ODk5OjU3
-ODk4OzY4ODo5ODk+Ojc2NTY2NzpAPTw7OTs+PkA8P0FCQD07QUVGREVEQUVGR0VF
-R0ZCRUNDREVHSkdLUE5OTU5MTk9QTlFQUE5LTE9TUVNTUU9TUVFRTk9TWVtZU0xN
-TFBOT1FTUFBOTU9QVFpVUlBUV1ZVVFZVUk5KSUdHR0dBPT09RENCPz1AOzY1NTY6
-Ozo2NTU1NDUyMDExNDQzMzIzNjQ0MjQ0MTM0MzExMzY2NTQ1MDQ2NjlCSlJUVldY
-WVxcWVhXVVFSTk1LUE1KRElERkNCQUZIRkZGQkJBPjtARkNBOzs8ODU2NDU3MzI1
-MjE0MzUzMzIzMzU3NTI0NTMzNDY0ODY1MDAxNDY4NTc2NDU3Nzc4Oz1BRUhKSVFU
-U09PTUxISUxJSEhCQERGREM8QUNGS1JXXWNsbnN3eHuChoaFhoeHiYyLi4qLjY2L
-ioqLiYqHgoR/enVsY1dRTUpOW2RVS1RqbGVgV1xVRUZKRDtCSUU/Q0JAREM/Q0RB
-S1VPTFtnYlRXbntyYV9td25cYHSCg3VufI6RhnV5jJmZj4KAjp6kl4qAiJWZlYl6
-cYKSlYt3bXaGko+BdoCYqq2nnZWYoKeqno2LlaSnnIZxcHuFiHxtaGNoZ2FbVlVW
-U1VQUVJUUE9MSkpKTk5OTEtLTktPT09OTU5PT05UUk5PUVBRUVVTUVJRUlBNSUpP
-UlBPUFBNT1BMS09QUE5MTFFNSkxLSk1PT05NTUpOTUxKTVBMSkpKS0tNTE5OUFFP
-Tk5OUk9OS05NTE1OT1BNTUlKR0VISk1MS0dMS0xMTEtMSkhITU1MTU9OTlFOT1FQ
-UlNQT05PUFBRUlZTVFJTUVZXV1hXVFdbXWJmZ2tyeHd7fX+Bg4aKioyPkJGQkpGR
-kpCPj5GMioqKhoN/fHh1bmhmYVtWVVRUU1FTVl1manJ4fYWKjJCUlJmYlpiUkpKL
-hoOAfHp2dXJta2NeYl5bWlpXV1ZYWVlWVFhVVFZVVVRUUlJUUlRUVFhXV1hXVVRU
-V1lVVVdaWVxZV1dYWFhaXFtcXFxZWV5dWltbWVxeXV1fX11dX1xZVlRTUVBPVVVW
-VlVSUVNSVldTVFRTVFRWV1hXWl5cWFdaWVlZW1lZW1tZWVhXV1dYVFRUWVdZZqO9
-yNDY3eDi5ejp6upJRUpIRkhHSUZHQD9BQ0hCQkVDQEFCPz4/Q0VER0JBRD8/Q0A7
-PT48OTw3ODlAQD07NjY3Nzg5PD09Pz8+Oz4+Ozw9QkJAPj8+QD88PUE/QEBBQENB
-PkE/QkRGQkBAQkBCQ0JDQ0VDR0pJSEJDREBCRUlJRUVEQkRGQj89QEE/PkBCQj9A
-Q0NBQUNDQUA/QkNDQT8/QkBAPD9CPztAQD0+Ozw7PkE+P0JBQENAQkNBQ0NERD9A
-Pj0+Pz4/QEFAQEBCQ0NGRkhLSkpKT1JUVVFQTlBQT09PTUhDSk1NT0xNS0dFRENA
-RkZISUZHSEdDREVFRkVGQ0NESUtMTUdDRkZAOzo0NTQ1NDYyMzczNDQzNTk3NDU0
-MjM2MjU2NzY2NDM4NTM3Nzg4Njc2NjU2NzY0NDQ1Ojg5Njc6ODo6OTo6ODs9Ojc6
-OTo/QT4/QUA8O0FDRUREQUBAQ0hJSUdLSkZDRD9DSEVGR0pMTU5STkxNUVBRU1VX
-U1BNT1NTUFFRUU9RTU1NTlNTVFVOSktJSU5RVlJRTU5OTEtTU09OTU9RV1ZUUlNU
-UklHRERGSUVBP0BAQEBBPDw5Pzc4ODc7ODc2NzU0NTQzMTAvMjA3MjM1NTUyMjI1
-NTQyNTQ2MzQzNDU0NTY3NjtHUFBVVFZWV1hZWldSUlRVTlFZTklGRkRGQkVJRkFB
-QkZBQkFAPzxBPjw7ODs6NjU7NzY5ODU0NDEzNTExMzYyNTU0NDM1MjIzNTUzMzQ1
-NTc3NjQ0MjI1MzQ3Nzk3Oj1BSElKTk5KS0tNTU9PTElJR0JFQ0RHQjs5PkVMUlZf
-YmtwcnZ6e31/goSJi46Mi4qKjIuOjYyMjY2Li4aEf3x7eG9gWFNMSE9dXE5FVV9d
-XldPUUxBQkdGPENEQD5AQUJDQTtCR0NIU05KWWZeTlVud2hVWWZvYlFWbHl0ZF9y
-hYV2aGyEkZKIg4OQnqCYi4iRnKGek4uIjpmZi3ZxgY2TjHhwd4qdoJqQjZahp6Wb
-jo6dqamejH12f4iAcGRfX2JfWlZTUVRQUU5PTlBOTk5NSU1OTk5NTExQTkxOUE9O
-UlBSUlBRT1JTUlFSUVBQT09OT05QTk5OUFFQUlBMTEtNTEpOT09OTE1LS0xPTkxL
-SkxNTktMTU9NTExMT05MTUhLTUxKTE1NTU9NTUxLSUxMTk9NTE1OSEpLSklLSkpN
-TE1NT01NTU5NSkxPT09NT09OT09OTU1OUE9RUlNRUVBRVVNTU1NVUVJTVlVWVVhd
-XltfYWNpbnBwcXZ4e3+EiYmMkJOSkpKQkZGSkY+PjYmIiIaCfXx6dHBrZl5aVVRT
-UldgZ2x0fICDiYyQlJOVl5aWkpGOi4eDf314dHBvb2tjYGBdX11bWVpYWVlYV1VV
-WFZUUldUUlBTUVJTUlNWVVRWVlRWV1lXV1pWVFhZWFdbWFlbWVdYW1tZWlpbWlxc
-W1pbXVtdXF1eW1tbWVZUVFNRUFBTUVJTU1FRUVFRU1JUUlNTU1VYWlpYWllWV1tb
-WFRYWFtbWVhYVlRUU1NTVFZXWFl1rr/K0tjc4OPm5+jp601HREVFRUlDSUlEQUA/
-Qz49PkBBQUVEQT5CQUJAQj5CQTw/QEJEPz47Ozo7PDw9QUA6PTk3PTw6Ozw8PTpA
-Pz0+Qj0+PTw7PUJAQT5AQ0FDRUFDQkNAPkFBQkRFQj48PT8/Q0VGSEVHSEdEREVI
-SkRERUZBQUJFRERAQT9EPT89PUBCQD5EQEBAQUJCQUE9PkBAQUJAPD08PT1CQ0Q+
-PTw+QERCPTxCQ0VEREdHRURFQkFEP0M9P0NAP0BDRUJAREVFSEhKSk5MS01PVFZV
-UExMS05OTk1IRkhKTU5MSUZGSUJFRkVDRkxITUtJRERERUZISUZGR0RGSUhISEtH
-REE8Ojk1NDIxMzc1NTQzNjU3Njo0NTIxNDI2ODY2NjQ1ODUyMzQ1NjY2ODk3NTY2
-OTg4OTo4Nzc4OTw9Ojo8Qjs7OTw5OTk7PD9CQTw7Ozw9QkpKTEdCP0BGR0hKRUVH
-Q0JBQUBESEhFRUlPVFRQTUpNTU1RU1dRUFFTVVJQUldWU01OT01PUVRSUlBKR0xL
-TVZWUU1NUk1PUlNRS0xMTU9TUU9PT1JOS0VGSUlLSElFREE9QUE/PD07Pjs4Ozs4
-Njg3NTQ3MzQ1NDU1MzExNTQ0NjU1NDU3MjQ0MjE1NTI0Mzc6OjM0ODxCTFNUU1VV
-VllZWFJQVFNTU1dSUExJQERFRkZHQj5BR0JFQ0VBQDs7Ozc5PDo7OTk4Nzc0NDY2
-NTg6NTY2MzI1MjMzNDE0NDE1MjEwMTQ0NjY0NTIyMzExNDc4Ojk+PUFHSU1NTUxP
-UE1KTEtKRklMSENDREVEQD9DR05SV15jaWxyeHx9gYOChIuLiYyOjo+MjYqMjo2J
-iYyIhYN+fHhya19WUElKUFlTSE1dYVlUSEZJSD09RUI9PURDPjxBQEBCQkJFRkxY
-VFJdaGFUYHF3ZlVZbXVpVFptdWpZW2+AemVaZHuGhHVveY6XmI1+go6gqKGWjZGW
-naCTgYKNlpKHdm56ipWUjH19jZ6gnpWNjpymppyNhIOGi4ZyYmBgXlhXVFRRUE5P
-TkxNTU1OUExMS0xOTU5OTE1NUFFOUVBNTFBPT1FRUFFTUlFRTlBQT09NTFRRUE9N
-TVFQUVBPS0xLTU1OT1FNSkpPT09OS05NS01MTEtMS0xNTU5PT1FRUExLTU9NTU1M
-TE5JS0tKSk1LTExNTEtLSklMS0pKSUdLTUxMTU5PTVBNTU9OUE1LTE5PTUxNTlBR
-T1FSUVFSUVFRT1BRUFNVU1NWVVNTVVNXW1xdXmBjam5qbG9yc3l9gYSJjI+OkZKQ
-jpKSmJOPjIyMjIeHgoGBe3JuaWZjXVxbX2dxeHyBhIeNkJCSlJaVko+QjouEfnt5
-dnRwa2prZWFeXFpaW1pZWVhWVFVUV1ZRVFZVVlVVVFNUVFRTVVZXV1ZUVVZWWVVW
-WFVWVlpYWVxcW1laV1hbXV1bWl1cW1xbWllaXWBgXl9dW1hYVFJST1NSUU5QT1FR
-U1RXU1JUVFNTUlRTUVRXWVhYWlpaWVxZWlVZWFpbWlpXVFRUUFJTVVZZWXCrvsnR
-2N3h5OXn6OnqSklIR0NHSEdJTEhCQD09PjxDQ0JHQ0JCPj0/Q0JDP0JDQ0NEQD89
-Ozw7Qzg6Oz07PT5HPz9APT05Oz0/QT08PT46Pjs8PT9AP0FCRUFAQT9BREM/QUNB
-QkNEQ0A/Pj4/PkJCRkVGREdISEVDQUFCQkVBRERCQkRDREA/PT9BQEA9QEFBPj4+
-REJCQkNBPTtAQENDQD0+PUI/QEBCREI/P0A/QkI+QENEQ0VGREVERUZDQkNCQ0JC
-Q0JDQkBDQkRFR0hLTU9OUU5QTk5UVlVSUVBRUlJSTEZGRElGR0pFRkZER0lKTktK
-R0hLSkdBPT1BRUtKRkpJR0RFREVERURFQjw7OTMzMjQ1NDIyMDQzMzQ0NzUyNTU4
-NzU4NDU2NDU1NDQzMjM1NDU2NjY5NzU3ODc3ODg2Njc5Ozw7Ozo6PDk7OTw9Pz8+
-PT9BQj48PkNJTk5QSEVCRkdJSEdHRkNCQkFEREdGRkJIS01NSktKSExNSU9QUE1Q
-VFVZU1BSXFtXUVJQUlJSUVJRTkpKSUxNUFJVVFFRUFFSVlRRS0tOTUtMTVBOTk1N
-SEhKTU9IR0hFQjw/QD09QEJBPD44OTg1NDMzMTM2ODc1NTY5MjI1ND5DMzU4NTE0
-MzQ1MzY0MTQ1NDU2NTQ2OD5ETlJXWlhXVVdXWldVVVFRVlVVUEhBR0lMSERCQ0VF
-R0RCRENAQDw7OTo6ODc2ODc2MjM1NDQzNjczNTI2NTE3NzMyMjEyNDM0MzEvMTc4
-MDIzNDczNjo5ODs5ODw9PUdGSUlRUU1KSUxPS0ZFUk1MTUpDRURBQURLTlRZXmJl
-am5zdHmBg4OGiYmKjouNj4yMjo6OiomJh4ODf356d3BqX1FNSktVXVJQX2NeUUtF
-QUNJREFFSEVAQUlKRUhFSE5GQ0xMTlZXVWVuZ2BuhH9uY2x+fmxfZ3V5aFhfcn10
-X1Vjd4N5Z2B0iI6JeGx2jJugmo6Nl6GmpJiOjpihoIx4d4WTmJOEdXeFl5iTiHyF
-k52glYd+gIiKg3JiXF5bVlRUU1BTUk9RTk1MTk1OTU9OTExNTk5NTk5PTlFPUFBQ
-T1BQUVJPT1FQTlBPTk5PT01RUE9OUU9PT09OUU5KS05QUFBQT01NTUxLTk9NTU5M
-UE9LTUxOTE1MTFBPT05OTk5MTU1MTkxMSklMS01KTEpNUElJS0pISUpJSUpMSkpM
-TEtJS0xNT0xNUk1RUE5OT09QT1BPT09PTk9SUU9TUlJTUFJSUFJUU1NUV1hUV1VY
-WFhbXF9hYmRobW9zdHZ6fYCFiYuOkZGVlZWTlZWRkJCQk42MhoSBfnl3cG9raGhq
-cXl/hYaIi4+Pj5GRkZCPjYyGg4B6dXRzb2ppZmZnYF1bXFtYVlZYV1VUVlVUV1pY
-VVRUVVZXV1JUV1hWVFZVVVVVVlZWWFdZWVdZWltXVVdbWlxbWltdXFxdXV5eYF1c
-W1tbXFpZW1lZWFNTVVJQT1BQT1BQT1FPT1BTUlJUVFRVVFVVVFZXV19dXVxaWFlW
-V1dYWVtaWlpXVlZSU1ZYV1pcX5O9ytHZ3uHj5ejo6upISUpKSEVJS0hJR0lKSUdI
-RUVFQ0FFPj49PD04PD47Oj4+QkE+PT46Ojs+PTo5PDs8PEhBPDw+Ozs8Pj09QEJA
-QkM/P0FBQkBFQkJARUVCPkBAQEFBQEFBQ0RCPz9APz4/QENBQ0ZFREhGQkNBPUBB
-Q0FCR0VBRENCQ0JCQD8/PT49QENDQUE/QkRCRUFAPDs+QUI/PkA+REE/QEFFREVE
-QkNBQkNDREVFRkdIRkhFRENCQ0JDRENFQEFCQkBDRURKTU5ST0xPT09NTVJUVlZZ
-WllaWVdVTEpJSUpLS0tJRUdLTE1PTkpISElHQ0FBQkVISkhJTElCP0VHRUZDRUVC
-PTs2NDQyMzEyMzEyMjM1NzY3NDQ1Nzc0NDQ0NTY3Njk3NzQ2Njg6ODg3Ozk5Nzk3
-NjczNjY5ODg2NTo6Pzs5Ozs6QD9BPjo9QkE9PTw+QkdLTU5JSEhGTEpJRkdGRUhG
-RUJDQ0VHR0dISUpJSElHSklLUFJSUE5RVlpYUlFVVFBRUFFRVVRSUVBRUU5KSkhJ
-U1tcV1JPTlBVV1NQS0hLTUxOVFRPTklHSEtSU0xKR0hCPDw9PEFEQ0FAPDg3NTM1
-NDY5NTY3MzMzMzYyMTM1Mzc0NDYzMzczMzMzNTU4MjI0NDMzNDI2PUNMVl9dXVlV
-VlpYVVVUUk1NSklJTEtKRk5KRz1BQ0RCSERBQEdBQz44OTk6OTo4NjY1NTQzNDIx
-NDQzNjQ1NTQ2NDQ0MS4xMS8yMjUyNTI0MTE1Nzc3NDc2OTc9Pj8/RkRESk5PUEtK
-Tk5IR0ZGS0xKR0RFRT45QkhOVllgY2hsb3Z6fH6BgoOFiI2Mj5GOjo+QkZGPj4uL
-i4eCf3p1cWZeUUlISlddW2JtaFlLRz49RUlIP0NKSURHS09JSUxIS0pBSU9LVFVT
-YGhmYnSEg25jcoaBcmhxg4NzZGp7fnNlX2x/gnZlYnSHi4FtYmyBj5KGeXyLnqOc
-kIuSnKSfl4mEkJ2eloZ3eoiUl4t7cHmHkY+CdHF5g4J6a2NaWVhWVlJRUlJNTU5O
-TU5MT0xOUE9OTU1NT1FPUE9MUFBRUE9OTk1QUk5PUFBNS05PTUxNS01NTk5OTU5N
-TU9NT09QUFBPT09MTE5LSk1OTkxOTE1QUE5OTUtMSktNUU9OTk1OS01OTUxOS0tL
-SktLSktKS0tKSUlHSktMSkhJTU1PS0pLSkdJTExLTUtOS01OTU1PTk9QTEtOT1FQ
-T1BOT1FSU1RUU1JRU1FRUlFVVVVWVVVUV1ZYWlxfYWBhZWpscnN3en+Bg4mMkJSX
-k5SWk5OSkZCSkI2MiIaEf316d3V2dXZ6foCGiIuOkpKOjY2MiYaDgH59enZ0cXBq
-ZmVjYWFhXVxcWllbW1pZVVhYVVZWV1VUUlRWWVNVVFdWV1lYV1dXVlVUWFdWV1VW
-WFdWVldXWVhdWllbW1paWllbXFxbXF5dXFxbWllXVlZSUlVVVVRRUFBTVFJPUFNS
-T09UVFRZVlZXWVdUVlhYW11cWltbWVdYV1dWVldVVldXWVdVV1VUVVlcibrK09nd
-4OPl5ujp6klKTUtGRERERERCRkZFRkdEQz1BQT89P0E/Ojk7PDk6OTk8P0I8Oj8+
-PDo6Ozk5ODg5PT06PDs8PT1BQUNFREJCP0BBP0BDPz9AQkBBQkA+PEFBQEBAQURG
-REdCPkFEQENBQkM/QkZGQj89QkJAQUBERUZEREZAQEJBP0FCQD9BQzw9QEJBQ0FE
-Q0NDQUM/PT8+QERBQEFCQkJGRENEREJAQEBDQkVCREZJR0VGRkVCQkNAQEJFRUVC
-RENBQUJCQ0hLTE9NSkxKTElLUFJST01RU1ZVU1NRUUpLSlBNTE1MSkpNT09LS0dI
-R0VBPUJERkhJR0lISkQ/QEJDQ0JFREVCPzczNTMyMjAvNDI3ODU6NDU1NDAyNTk3
-Nzc0NzU3NTQ0MjM2MzQ2Nzg2Nzg2Njc5NTc8NjM1NjY3Nzo+Ozs5Ojo/Pz45PT9C
-Qj49PkBBRUVISklHREJGSUVCQkdGRklFSEhISkhGRUhJS01MTkpOSEZMU1FOTU1S
-WFdZVFNSVFBVVllXWFNSUlBRUU9NTExRVlpUU1NQS1NWVFFQTk1SUFFUUlFOSEdM
-T09TTExIRkRER0I9Pj49Pjs8PDU1NzQ4NjY2MjI1MzQxNDQyMzQ0MzMzMTMxMzUz
-NDM0NDU2MzMwMjQzNDY2PEVRW19dW1lYV1hYVVZUTktISE1TTkxJTUtKQ0BDSUhH
-REI/QUhDPjo5OjY0ODk5NjM0NzY1MzM1NTY1MzY2OTE0NDUzMzMzNTUzMTU2MzI0
-NDg3Njg8ODk8Pzw8QkNGREdMUlNPS0pPTU5KSUhJTElHRklEPDxARUtTXGBobnB1
-e4B+fH1/gYWOi46Ojo6Njo2QkY6LioyLhoGAfHZsY1xSSEVJUFVbZmJZS0VGREJJ
-VFFIPUJORT9GTUlGSEZGSktJTUtMTUtXX19abH5/cGZyfnxvbX2Hg3dsc4KEeWZn
-eo6NfmlofIuLfmpkcoWQinptc4WQlIqDg46YoJ6Zh4iXqqukkIKHk5yZiXJudYSK
-hXVpZ3B4d21mXlhVVVhUUFJUVVNPTFFQTE5KS01PTU5QTE5MTlBPTkxLTE9PTEpM
-TE9NTFBQTk1LSUxNTUVLTE1OTU1MTU9QT09NTk1OT1JPT05OTUxQTk9MTEpKS01N
-S0pNTU5MTE1PTk5NTU1OTk5MTUxNSkxMTU9MTk5LSktLSElMS0xKS0hITk5MTk5N
-SEhLTEpMS05MSktLTE9NT01OUVBRUU9OTU1PUU9QTlBUUlBPUlFSUFJSVVdTVFRX
-VVVWVlhcXF1cYWNobHB0dnl9hIqOj5KTlJWZlZaWkpCRkImIiYuKiYWEhISAhIWG
-iYmMi46Oj42LioiFgoB6dXN1cnBvbGlmYV9hX19cXV1cXFtbWVhaWVpYWFhXV1VW
-VlphVlRUVFdXWlxZV1ZXWFhXV1RVVlZYVldXV1dXWVhXV1hYWFpZW1xcWFxdX15c
-WltcWFZbVFVRUU9SVFRRUVFQUFBRUVBSUVNTVFVVVVRVVVRXWFdXWVhYW1lbWlhX
-VlVSWFlXW1laWFZWVFRWVFmGtcrR2d/h5OXo6enqSEdJREJHSUlDQkRAOz1ERkhA
-QD49PT89QEA6Ojo5QUM5OTc4Oz49Qj8/ODo5OTo9PUA6PTo9Pzw+Q0VGREVDQj8/
-P0JARENDQT0/PT9BQj08PUA/P0FCQ0RERkFAPj8+PkNCQkBAQUFBQkRAQkRDQUJE
-RkVFRUFEQEBBRUJAQEJCQj5AP0BBQEJCQ0REREVBPT4+QUJBP0VEQkFBQUFDQEFD
-Q0RDREZHSUhGQ0JCR0dERkNCQkNGRkhIRUZCQUFBREpNTExJSEZKSklNTUxJR0hJ
-TU1OTE5MSUlHSEhHR0dKTVBRTklKTUhHRUJCQ0hNTEhDQ0dGRERFS0ZHSERCQ0U9
-ODc2MzMyMzIyMjQ1NjU1NTY4NzYzNDUzNTc2NjQzMTEyNTU1NDg4Nzc3NjQ3NjY3
-ODk5OzQyNDc0NTg5OzlAPz47PTo8Pj0/QT48PD1ER0ZKR0ZCQUJGQkJAQkRDQ0VG
-SkxISURDR0ZMS0xNTUtJSUpMTEhHS05UVVVTU1RYUlNTWFpYWFpYVVJQTU9OT05U
-VFJQT05IT1RUT1JQUFJTVVFSUU1ISExOTU5LSkpKS0ZDQD07Pz48Ozw5OTg4NTU2
-OjQ0MjEyMjQyNjU0Mzs1MDMzNDMyMTc0MTMxMzQ0NDEyNDk1NjU6PElOVltbW1xZ
-WVZWVlJPTUZHS09ISkpQTEhFRUNDRUlBPUBARD04OTo2OTo5Ojs4ODQ1NTg5MzEv
-MTEyNjczNDUzNDQ1MzQxNDQzMjQ1MzU1NjY1NjY6NDg/QTw8Q0dFRk1QTkdHSk1S
-UFFLSUxMS01NRz86OT1ITldfZGlwdHd4e35+gIWGjIyLioyPjo+Oj4+NjYqLjImH
-hYF8enFlWFBJRkZJTVtkVlBJQk5ORUlVVk5DPUVFQkBERj9BREFERD9GREZIQ0dR
-UVFdcHJhXmt3dmZmdYWHdm5zgol/bmyGk5aHdXeHlJaIdG99jZGGdGxzgo6OgnV0
-gpKUlYp/h5enpp+Uj5egopuLenN8iYqCcGNkaHBrYFpXV1VUU1BQTE5RTk9PTk1O
-TVBNSkpLSUxRT0xMTE1PTU5OTk5QUE5QT01NT09MT09PS0tJSklLTU1MTU5MTE5P
-Tk1LS01OTUxNTlFLTU5MTk5NTE5KS0tKS0xMTE1LT0xOUU9OT05PTU1MTkxLT05P
-T05MTUxLTUtPTU1MTExMTEtLTEpOTE1PS0lKS01MSk9MTUxLSkpNTVBOTk1OUk9O
-Tk9MTU1QUFBQTk5NT01OT1BUVlhXWFZUU1JTVFZYWVpcX2Bla29vcHV5gIOGiI2R
-kpWZmZWSkpSUkZCNi5CQjo2KjZKNjYyNjZORkJGNjIqEf4F7eHdxb29tbGVkZWBf
-XV9dX1xdXVtZV1tbVlVVVFRXV1lWWFdXVVhaXFlXWFdYWFhbWFlZWlVVVFVWV1ZZ
-WllXWFdWWltZVlhaXFtZWV1dXF1cXFxaWVhZW1dUU1FTUFFRUlFRUE9TUlBSUFBS
-UlFTVVVTVFVVVlRWV1dUW1lZWlxZXFdYWFZXWlpYV1hXWVlYU1RVW4S2ydLZ3eHk
-5efp6upJSEpMR0RITklEQD5DPz5CREREQT5AQkJBQUBAQT89QD09Ojo5PDo5PT9D
-PjxAPDw9Ozk+PT0/PjxAQkJBQ0NBQ0A9PkVCQ0I/Pz5AQkJCRT88Oz5AQEBCQEJG
-QkJAQkE9QD09QUJDQEBAQEFCQkBAQkNDRENBQkNBQ0JBP0NGRUVCQ0NBPT5CQT9A
-QkRGQ0FBPkA/Pz5DREdCQENCQEZGRUQ9PUBDRUhISUlGRENCRkdCQ0Q/PUNGR0ZG
-R0FARUVHSEpKSkhLS0xMR0lISUpKSEZGSEdFRkNGR0VCQj5CQkJESktHRkVHR0VG
-REM/Q0NDQERCQkJCQ0pKS0lKSUhGSUQ9NTQ1MzQ0NTQ1NDM0NDc3Nzg0MS8yNDMz
-NTYzMTM0NDs3NTY8Ojk7NTU1ODc2OTs5NTQ2NjY0NTk3Njs+PDo8PDs4Ozk7PDs6
-OTg5PD9CREhGR0E9QkBDQURBR0RAQ0RISEhJSkpHRklITUxPTElJTEtKS0lKSkpN
-UE9VVFhUUFFTVFdZW1hVUFBOUVBSUFJSTkpKSktRUlNRU1BOUVFPTU1OTUxQUFBN
-S0tMT0tKTEdBP0I/QEFAPjo5Oz48Ozo4NDQ0MzM2OTQzNDExMjM1MzQwMjAxMjIz
-MzExMDEvMTI0MzI2NzQ7PkVPVVlaWFlcWldUU09KSkVESUVFR0xJSUdHQ0NKTUVA
-PDw7Ojs7OzY2OTg6ODs+OzY5ODI4NDIzMjIzNjM0MzI0NTQzMzIvMTc1MzY2NDY2
-Njk1OjY2OTxBQT9EREVIT1BKSEhNS0pMT09PT05IRkZCQkI5PklUWmFnanFzd3l8
-goSFiomJjo6TkZCMj5KRkY2MioeKi4mCfHp2c2VZT0pFQkhPWldRUU1HTk9GR1BX
-T0E6PkA9NztDPz0/QT9CQEBFREJDQ0tHSFJhZFRRYnFrV1hsf35tYW+AhnpoboSU
-lImAh5Wam41/go+bmo58cnuOlJGCcXCAjI2GeHJ8j52glo2Rm6aooY2DgoqQkYZx
-Y2NmamRbV1RUVFZUVFFTUVBQTk5STUxOS01MTU5NTU5PTk1PUE9OTEpNTk5NTE5O
-TE5PTU5NTU5NS01QTk5OTEpLTExNTk1NTk1PUFBNS05NTEtOSUpNUEtMTU5NTkxM
-TEpLSkxKSUpMTU9OTExNSkxMTExLTE5PTUtNUFFNTU1OTEpMT09LTk9NTE1MS01L
-TE5OT05LS0tNS0xMTE1PT09OTUpKTE1PT05PUU5MTFBQTktTUVNPUFBTVlZUVVZU
-UFJTVVhWV1pbWlthZmdqbnN2enp+g4iOkJKUlpWSkpaVlJOTkpWSlJCPkpGSlpKR
-kIyPjo6MhYB9enV1c2xraWdmY2JiX2FhXF1dXVtbWlpZVlZUVFNXV1hXVVNWWVNW
-XFpTXF1bWFhYWVpZV1pYVldYVlZVVVZYWVlYVllbXllZV1lYWVhXWlxeX19cW1ta
-WFVXVlRQUE5SUlJSUVFRUVZTUVRRUFFSU1FSU1ZUVFdVVldXV1pZW1tbWFhZV1dY
-V1dXV1dWWFdRUldUUlJYhbPH0tre4ePn5+nq6k1MSk1MRUhISUdHR0JIR0hEQkND
-QkZFQkFBQ0E/QEA8Q0ZBPT4+PTs8PDo+Pz5CQj4/QkA7OkE/QT5APkNDQkE+Pj5D
-QkJBRkJBP0BCPEE/PT9CQkA+QkBBQUNCQUNBP0A9Qj9BQEFAPT4/Q0ZFRkZEQkNE
-QkA/QD5AQEI/P0NCRElDPT0+QUA/QkFEQkBAP0FDQT48PUFCRUZGSEdJSUhHRUNB
-QkJEQ0hMS09IRUNER0RFQ0FBR0pFQkNFQUVISEdHR0hIRklLSUdFR0hJSUtMSEdJ
-R0hGR0VGREE/QEA+QUNDRUZFQ0ZFQkJDREI+QkFCQ0A8O0FDR0dHSEZFREVJRD42
-ODQzMzYzNjc1MzE1MzYzMzY1NDI1NDQzNTQzNjUzNjY3Njc3Njc1NDY3NTU3ODg5
-Ojk2NTY2OTc2NTo6Ojg3Oz48Ojo3ODg4Oj5AREFBQ0lHRT48QUBCQkRAP0BDREdE
-RERISEdJS01PUFBQSUtNTktHR0ZISEdLTVJYVk9OTU5NU1VXWVVQUU9QVVVUVFVQ
-SkdJSk9RVVVQS1BPTU5MUEtPT09RVFBRTU9KSkhERUQ9PUBAQT09PTg6Ozo4Nzc5
-OTY0MjQyMjIyMDMxMTQ0MDExMjIyMzUyMTMzMDAxNDMyMzU0MzU4OkVRVFdZXV1c
-V1RTT0lISEhFQ0BCR0lMRERFRUdHQDs5Ozk3ODw4Njw4OTo3Nzc4Nzc2NzY1NTMz
-NzQxNDU0NTY1NTY4NzU0MDUyMzY2Njc2NTc4OTk6OTs8P0FDP0dNUE5LS0tLR0lN
-TUxMS0tIRkhIQD5CSFJcY2ptdXd4foKFhoWJjYyOj46OkZCQko+Oj42MioaGhYJ+
-e3ZtY1ZPSEZHS1FaVFJTUEdLT0RBRE9LQzw6OT1CPUFEQD5BQ0FDQEFAQT5CR0ZF
-S1lZTk9fa19QVmp1b11TYXh+dGBofo2MhICKnZyXiYCKn6ajmYyHjZ+flYV1eIWP
-in10bXuMk5SLg4eTpaSdkYaGjpOThXNpZWpkXVVUUVJUVFVTU1FRUU9NTU9QTk1P
-UE5NTU9RUFBQTkxOTU5OUU1MTUxMSktKT05OT09OTlBOTVBQT01OTk1OTlBOT05M
-TktOTU9MS0xLT0pLTkxMSUhLS0pMUFBPTkxNTUtLTExMTk5NTktLT05MSUtMTk1O
-UU9MSkpLTU1NSUtKSktJSktPTkxMTEtKTk1MTlFNT0pMTU1OTExNT1FRT05RUkxO
-Tk1PTVBNTU9OUFFUUlJSUVZTU1NSVVVWVlVXV1dWVlhZWFhaXV9lbm5xdHZ4fYKJ
-j5CQkZKRk5OTlJeRkJKVlZKTmJiXlpSTkY6NjYmBfXd1cm9ubmpkYGBiYGRjYWBj
-X19cW1lXV1ZUVFVUVVNWVFJTVFNVWV9YWFZXWVlYVlZWWFZXWlhaVlRUVVRTV1hZ
-WFdYWl1cW1pbWVtaWVdaWlteW1taWFlXU1FQU1JQT0tOT1BPTlBPT09RUVFRVFFR
-T1FWVlZTVFRVV1dXWllaW1xbWFdXWFhZWVdXVldXV1hWVFRTVFqOtsbR2N7g4+bo
-6OnpTE5JSEhHRklHR0RESEtERURCQD87QENFPkJBQD08QkJIRkg8Ojs6OzpBQz09
-Pzw7QD9CQUBDQD89Pzw+Qj9BP0BBQkBER0VDQEBBQEBAQUJAQEBAPT4/Pj48QkNC
-QkNDQD08QD9CQkVAPkdFSUlLSEdGRURCQEJHQUI/QkRDQUZDQz9AQz5BQUFEREVC
-QUI+P0JGREBAQUBESEdISEZISEpHRkpGRUZGSE1OS0hDQURGRkREQkVJSEhFRkZJ
-S0tMS0tLSUhFRElGQ0NGSU9MS01KSUdGRENDSUpGQkRAQD09QUJDQj1AQkRDRENA
-QEVDRkQ/PDw/QkBCQENCQURCQUE/Pjg2MjU3MzM0MzU0Njc3Nzc2NzY1NDQ0NDM0
-Nzc1NDUzNjY6NzY3NjQ1NjU1Nzg4NzY4OTU3NjY4Nzk4Njs7Njg5Ojs7ODc9Pzw+
-PUBGRURCQEdCOz9AQj9BQD9AQkRIRUZEQERFS01OTVFSUVFNTU9STEtHSUxOSUpK
-T1RTTUxMTE1MU1ZTUExLTVBSUlBPU1NPS0pLUVJUVVBQTU1KS09OSkpMSk5TVFRQ
-T01KSUZFQ0JAPT09P0I+PTk7Ozk5Njg5NTU4NjAzNDM0MS8xMzQzMjQzNDMxMjEy
-NDQyNTI0NDU0MzQ1MzM4PUZQVV1bXVtaV1VSTkdDSkRCQEBFSkpKR0VEQ0ZFQDw7
-P0I/Ozo6PDo2NjQ3NzcxMjU2Nzg5NDU0MzU1MzY3NTM0NjU0NjMyNTQ3Nzg2ODY0
-NTU1ODg6Ozw9QkNGSU1RTE5OSkRHSlBPSUVISUxOSEdFQ0RLVFxjZ2x0d36Bg394
-iIeMjIqLj4+TlJKOj5OSj4uIhoODgoB4dHBkWFBHQkZKWltSTVNQQ0ROQzs+Q0xI
-Pzo7PEJAQEVFQ0JDQkVGREhHR0VHRkNNVVFJT2BnXE9VZnBsWFBgcXRmV111fntw
-b3+VmY+Cf4mbpaKXkI+XpaWdjYSJkpePgnBufYyQjYJ5eY2cn5uIfIKMlY6BcGho
-aGBZU1NSUFBRT1BRUVJNUFFRTk1OUE5LTUpMT09NT1BRTkxSUU5MTU1NS01OTE1O
-TE9PT1BNTE1PT09NS0pKTU1OTUxOT1BLTk5NT09OT09NT01KSktMSUdISUpKS0xM
-SkhKTExNTU5NTEdISktNTUtNT09NTVBSTUxLTExNTFBOTE5KSklMS0tNTU1MTEhK
-TU5MTkpKS0lLT01MTEtMTFBSUU9QSklMTk1OTU9PUU1PUk9PUFJRUlNVVVNQU1VU
-VFNWV1VUVVhWVFVYV1tjZGdsb3N2fH+GioqMj5CRkpKTkZOVmJeYnpybm5mampaR
-jImJhYF8dXJybWxrZ2VkYWFgYGJiXVtcXFpaV1dWVldUU1FWVE5PUFVXVldaXFVX
-V1VYV1hVUlZYXFlZWVZXWVlXVlVXWFhYWllXWFpaXFxbW1lZXFtfXWFhXFhWVlVT
-UVBOT1BOTk1QUVFQUVBLUFBOUFFUU1VWU1VTU1VTVVRZWFlaWFdYW1pbV1pbWVhT
-U1ZXVFdVVlZVUlRXXYy2x9HZ3eDj5ufo6elOS0xJR0dHR0hERkJEQT1BRENBQEE/
-P0NEQ0Q+P0JERD9AP0I9PT4+PTs7OTk5Ozw8PTo7PUFCQUA8PDs6PD1BQU1JPkNE
-REJDRkZBQEFAQkJIQD89PT1BRUJDRUNDRUJAPUA+QD5AQkNEREhKSUhIR0dDQkRD
-REdGQ0FFSERERkRDQkQ9PUJDQkFBQEFDQUNAQkRHRkRCQkRFRElGRkhLSEtIR0dH
-S0ZHSU5PSUdFRUVEREVHR0ZGRUVFSElHTE9NT0lFSEZGR0ZFREVIS0pISExJR0RE
-Q0RKQ0NDQD49QEA8Pj09Pj9CQkFBP0FEREA/REE+QUBARUA8QD1BRUhEQkE9OTY1
-NjIzNDAzNDU4NjU2ODc4ODc2MzU1NzE1ODY2NTQ0Njc5ODQ1NTU0Mjc3OTQ4ODY1
-NTY4Njc1OTo3Nzo5Nzc6ODg5OTo8PUBCQUE/Pj5CQT8/Pz9BQ0BAPz1AQ0RERUdI
-Q0dNTk5NT05QT1FRT1FOTU5MTk9NSUtOUVBMSUlOUFNWVVJPTUlMUVJSUFBRUk1O
-Tk1RVVNMS0pKTE1QUlFMTkxLTU5PUVJQTEdEQkNBQkVCPT4/Ozs+Pj0+PTg6OTk3
-NTU2NDI0MzQzMzMzMzQzNTQyMjMvLzI0MzQ0NDQzNDU0NjY3Ozg7Rk5VWF1dWlpX
-VVNNS0RBP0BBQUNLUEpHQT9AQUNBPEFCQT48OTk5ODg4ODY3Ojc0NjEzMzIzNjM0
-MjM1NTgyMzMyMzc2NjQ0NTY7OTU0NTY2Ojg1OTU1Oz9ERkZLU1JRUFFMRkpNTUZH
-Sk1KSElJSkRESE5VXWRrbHN3eX6CgoGIh4iMjY2PkZOUlZOTjo6Rj4qGg359eHRx
-aV1RTEdGR1FeV1JJTkpCRkxKQTs7QkVEPT9ARUpEQ0hKR0lHRUhIRUxJRklLR0tY
-WU5PXmddU1dqbWdUUmJub1xRWm95cmFldoaLf3Bve5GcnJCKkp+jpJ6TjJegoZ2N
-fnyIl5WLfnV3h5SVinlvdYKIhXhpYmFgWlZVUFVTUFFTUlFRUVNPTUtNTU9PUVJQ
-UE1OUFBNUExLTE5OS0pMTUtKSU9OTk5NTFBOTk9OTEpMTUxLSkxMSktNTU5MTE1R
-T1BQTE5OT1FPT0xMS0tOS0pJSEhJTEtLTEpMTU5LS0xMTUlKTExMTUxMTUxNUFFQ
-UE9OTk9PSk5OTU5LS0tMS0tMTE1OTEtKS01MTEpLS09PSk5PTU1NTVBOTE5LSEpL
-TE9RT01NTk5PUE1PUVNRUFJSUU9QUlNUVVJTU1FRVFVWVldaWVlYXmRpbG9ydnl9
-g4mMjY2OkZCSkpOZmJianZmYmJiZlpKNiYN/eXJycWxqZ2ZiYmFiYWFgYV9gXl1c
-XFlWVlRTUlJTU1JTVFFPUVVXVVVTVFZXVldXWFhTVldZWFhaWlhYVFhXVVVZWFlY
-WVdYW1lZW1tdW1lZWVxaXF1eXFlUU09PTk5OUE5QUE9OT09RT05NUE9QUVFTU1NU
-U01PVVRWUllbWllaWVdYWVlaWVdXWVdWVFdVVVZVVlVWVlhciLnI0djc4ePm5+jp
-6kRCREdIR0hFRUdEQENBPkBBQ0REPj1CQkA/QUZAPEI/PTw9QDo+QD5AOjs9Ojk7
-Ozo8Qz1AQ0JCPT4+Ojs9QD9KR0dARENCPkBDQ0RBQkNBP0RAQD49Pjs+QkFFRkVD
-REJAP0FCQkJAQURHR0ZHSUlHRURFRERDR0dHRUNFRkZFQkNBPjw+QEJBQ0FDQ0ND
-RENFRkVHQ0JDREZJSElHSEtLTEtKSEpKR0dHS01QTE1MR0NCRkhFR0lFSEhGSEdJ
-S01KR0hISElGREdJSEZNSUZDREhFRkdEQ0NFRENBP0FBQUM+Ozk/QEJAPzw9PT5A
-Pz1AQUJAQT86Oj48QENCRkZFQ0E7NjMzMTIxNTQwNDc0NDQzODY4OTQ0NTU0NDE1
-NzU1ODE1NzQ1OTg4ODU5OTU1NDQ1NzI0NDQ1Oj46Nzk4NTo6Nzo4OTg6OTo6PT5B
-QT5AQUA+PUJDQTw/QkE/PDw+QUVHR0RBR0lLTU1PTE9OTlBOT05MTE1TVE9LSE1O
-TklGSE5QU1NXVVFLS0pPUVBOUk9OS0tOUVFRUE1NS0xJS1BSV1dTTE1OUFBRUlNO
-TEVFSEZEQ0REPT09PTw/Ozw8Pjo8Ojo4NjM0NDIyNTM0MTM1MjIzMTM2MzQyMTIy
-NDMzMzAvMTIwMTU2NzY8RU9TWl9ZWFhXVU9KQUNBQUFGRk9NTUZEQURAQUFEP0I9
-Ozo6Ozw7Nzc4ODY5OjU0NzM3NDMzMzI1NDEyNTUyMTE0OTU4ODc0MjU5ODc2MzU0
-NTg5NTk9QURFRkxUV1BLSElHSk1PSERJS0xKSEhKREFFTFNdY2xvcnd2fH9/gYWH
-iYuMjo2Lj5WVlJWYkZCOiYeCg3t2cXFoW1JMRkZETlhWU0ZBREM/Q0Y/PDk/SUtJ
-PDg8RUQ/P0VDP0NERElFQkVCRk1NUF5cU1VmbmNZYHN8cF1YaXZvYFFgcnZpW2Bz
-gYN0ZmNzjJWQhHyKmqWmnY2PnKannpKIjJagn5WBenyJk4+CcGhteHx4bGFdXFtX
-V1VTU1RQTk9QUVBPTUtNTU1OTlBRUlBNSk5NS09MSkxLSkpMTUtJS0xKSk9PTkxL
-SklLTUtOTk1KSk1OTkxKS01MS0xNT0xOTk5RUE1QUU9PTU9NT1BNS0xLSkxLTE1O
-T0lKTU5NTVFOT01KTEpKS0tKS0tNT05NS0xLTU1PTU9PT05OTExMT0xJSkxLSUxN
-TktMUVRRUlFQT1BOUFBNUVBNTEtMTEtNTUtMSk5NTExPT09QTE9SUFBQTlFPUVJV
-UlVUVVFSU1NYWVdVVldaW19hZGdsb3R5foGChoeKjZOVl5mamZmamJeYlpKSkIqC
-fHd0cHFva2dmZmZiY2JhYV9eXlxfXFlYWVVSU1BOTEpLT1JTUFJQVFBSVFZZWFRU
-VllWVlhXV1lYVldXVlZTVVVXV1pZWl1bWltcWVtbWVtcXFtbWllaWl1aV1JRUVJQ
-UVFRUlBQTU5OUFBQU09SUlJPTk9PU1RTU1JSUlZYVldYWlpZWFpcV1ZWV1lYWVlY
-WVRVVFVXV1dXWl6JuMfQ2N3g4+Xo6enqR0dEQkdIRUNESUZCQkVBQ0A8PkRBPz5B
-QD9CQUNEREM/Ozw+QUE+Pz07Oz08PDs8OjtAQUBDQ0FAPz4+PUA9OkRQPkM9Pj87
-Oj9BP0JAQT89Oz9BQj86PDxBQ0JCQ0FCQT8+PkFBQEFCQUREQUBFRkZERURFRUND
-R0lFQkNDQ0REQz9BR0ZCRUVGRkZJQ0NHQ0ZJRkdFRkdFRURESE1NSklNTEhKSEpJ
-SUpJSk5OTE5KSEZGSEZFSUhHS0pGR0dHSUxJRUlKSEdFQ0VFRUlKSUZERUVGREJA
-R0ZEQUJCQEBCQj9BQUFCQDw7Ojw/Pz1AP0FFQUNCQTo4Ozg8QEJDQj5CQDk1NDIw
-MTQzNTYyMjAzNjc3NzY1NjY2NzUyMTIxLzI4NTM1NjU3OTgzMzM2OTQ6ODM4NTc2
-Nzs6Oz87Nzk8Nzo5OTo8Nzc9PDs6P0BFQUE/PDk9Qz8/PkNDPz87PkFERUZITEpG
-R0ZISklKS0xNTkxUUVJQT1NPTk1NT05QS0lHS05OT1NUUEtJTE1QUU5QT0xMS05P
-UFJVUlRQUFJSUlBSWVhSTlFVVFJRUU5RSkdISElKRUNAPz09PD09Ozs8OTg2NzQz
-NDY0Njc0NTc0NzYyMjQyNDY3NjM1NDMzNDQzMjIzNC80Mzg7Ozg/R1BZWFxZVVdV
-Vk9IQkM/QkdFR0lKRkQ/RENBQkBAQkA+ODk5PDo6OTo3ODY0NzU3NzM2MTAwLzEy
-NjIzNDE0Njs0NDg2MzU0MDQ6ODc3OTY1NDY2NTw/QUFDTE9STEhISEZHS09MSE5M
-SkhHSkZEQkVKU1tjaHBzdoCAfYGDhYaHi4yMkJOQkI+SkZCNjYuLi4aBfHVybWVd
-VU1GRENKTk5LPzxFQz08PD08O0NMUU1AOzo6PUM9Pj09Pj8/QkFBQENDR0tNWlpS
-U2Z2Z1dgdn50ZWR0fnZkXGZ2emtcYHSDfnBjZXiJkIt3cn2OmJaRhYucqKedkYqS
-oK2qnpCKjpKTj31vaWx3dW9mXFdYV1lWV1RTUFBOUFBPTU9NTUtOTU1OT05PTktK
-S0pJTE1MTExLS0pLSUlJSEhJSUtNTExLS0pKTEtMTEpLSk1NTUxKTE9OTU1OTUtO
-TU5OTk9QTk1PTU1OTlFPTElNTU5MTU9NUk5NTk9OTExLS01KSUdLTE5LTUtMTkxM
-S0xMTk9RTUxMS05QUE1OTk9LS05OSkpSU1JRUU5QUVNQU1JRUE5PTk5OTk1TUk5P
-UU1NS05SU05PTkxOUFFQTU5PU1JQWlRRUlRWU1JUUlNSVVRXVlJVWVxdYGRma21v
-cnV6goiLkJSZmpmZnJuWl5KTk4+Mhn11cHJsbGtmZGRkZmZhYGFgYFxZWFlYV1hU
-VFJTUE1MSk1MTVFRUE9PT1BSUlFUVlRVVFlaVlVZWFdWV1ZWVldWWFdZWVpZXF5c
-XFhZWVxcWlxhX2BcXFxZWFZTU1FQUFBPT09QUlFRTU5OUU9RUFFSUlJSTlZTVVZT
-U1RUVFZYVldZWFpXWFpZWVlXWVlZV1ZWVlZZVFVVV1dXXou5x9LY3eHj5ufp6upJ
-R0FBSEpHR0ZHREJFP0JCQkBEREJCPD5CQT5CQj5BQ0I9Pj89PURAPEFAPT09PDk6
-PDo/PkFCQkM+Q0BAQ0BEQUQ/Pj09PUI/Oz8/QEE+PD1APDw+Pz4/P0RBRUBERDw/
-P0JDQkFAQ0NBPkFAQkJDRkVEQ0BERUZGRkdGRkJGSEhHR0hIRUZFSU9FRURERUhE
-REZGSERER0RFRkhMTVFPSUlLTEtHRUpLS0tNT1BNTEtMTEtISElKSUZHR0pKSkZH
-TU1OSElPTEpJRUlISUlJSERCRkdEQUFBSEhEQUFBQEBBQDo9P0A7PDs5Ojs/PkFG
-Q0dEQkBDPTk5PkE9QEVHRkVDQTk1NjYzNDY1NzYzNDM0NjY3NDIxNjkxMjs4MjM0
-NTQ1OE45NTU4ODc0NjY2ODc1NzU1NTc4Nzg8Oj06Nzg8Ozw8OT07Pjo7Pj9AQj9A
-QT09Pjw+PD5BQkE+PUBARURHSElKSUZDREdKSUpGRUlKS09QUk5PUE5OTk5OTVJO
-TktNT1FTU1VOSE5MUFBTUVFQS0xPT05RT1NWVk9NTlBRT1RZWVRRUVVVU1FQT05L
-SUhLSkpLSUpDPj89Ojw6Ozs3ODk3ODc5NTUwNDM1Njc1MzM9MzIzODk1NDY3NDI2
-NDMxNDAyNjQ0MzQ2OEBETVNVVVZaWVdVVFRPRkdCRUNFQ0Q8QUBCQD08PUBEPj88
-OTc4Ojw+OTczMzU2NDQ5NjM0NDEyMjMzMjI2Nzg4NzU0NjU1NjY4NDc6OTs4Nzc2
-Mjc6Oz9BQURJTk1KSUZFSkxLTUlFR0tHSEdJRUE+RVBYXmNqbXF0e3qAhIOEhomH
-iIiKjI6RkJKRjouMjI6NiYR+eHNrZF1SS0VCQUdISEc8OUJFPj1BSkpFPkFKSkU8
-ODo7QENAPj9BPT5BQUQ/QkNGTUlQU01PYGtjVGB3d3BjZHiGf2xicYOLe2ZofIqF
-cmZsf5GThXNygZCSjX92fZOdmpKKjJOlqqWflZOYoaCXgG5nbHFuZmBaWFRUVVNS
-UVFRT1FNTk9JTE5PTktLSktPTE5MTk5MTU1OTElLTUxQT1BNUEpISUpJS0pLTExK
-S0tKSUxQTE1OTktKTE5NTU1OSktLTE1QUFFQTE5PTkxOT0xJS01MTk5NTktOTE5R
-Tk5SUFBOTUtKS0pKT01NTk5MUFFMTEtLS0xOT09LT1BPTE5NTE1PTk5KTE1QTU1P
-T1FSUVBTUlFQT09PTk5PTk5PUU5MTE5OUE9OTU5RUFFQUU9NTlBQUFJRUFFRUVVS
-U1FSUVFTU1RXVFNSUlRWVlxbXV9gYmRnaW93fYCIjZGUk5WWlZGSkI2NiYR/d3Vx
-aWdlZWRkY2RmZWVfWVxZVllZVVNSVFNVT0xNTkxKSUhHSktNTUxPUlRSUlRWVlNT
-V1dXWFZVU1RUVldXVltXWFpbW1pbWlpbXFtcXV9jYWFgYWRgWVZUVFFQT1BQT1BN
-UFFRTlFPUE9PUVJVVFJTUlNWVVVUVVVUVlZTVlZXWVpaWldaXFlYV1taWldWVVZY
-VlVUVlVXWVpfi7jJ0tjd4OTm5+nq61JIQkdGRENFRkRFQ0BDPkFERENGQz06PTxD
-Qj5AREE8PDg6PDo5Oz5EQjo/PD9APDs+Q0NCQUJDQ0E8QEBAQUA+Pz08PDw9Qj07
-PD8+Pz1BQkFCOz89PT9BPj9BQERDQkJCQUA9P0BFREFDREJDQUJDSEBAQUJGRkZG
-R0VDQkJFRUdHREJAQURGRkVHS0lOSEhHTEdDREJGRUdLSExPUU5LUExLS0pHSUhK
-TExOT1BNSkdITE9LS01LS0lITEtJSUhPUE9MSkhJSElISUhKTUtFQUREQ0FBP0FC
-Qj88PkBAQEA9PEBAPDs6PDo6Oz5BQ0BBQEJAQD09Pjw/QUNERERGQ0VAOzYyMjM1
-MjY1NTY1Njc5NzU0MjQ0NjYxMjUyNzw3NzU3Rjo2Nzk2ODg6NTA0NTg3NTU0NzQ2
-NDQ3Ojk3ODg6Ozo+PDs7PDw9PUJDQTs8QD0+P0U9PUNEQ0A/PUBBREhJRkRGSEZE
-Rk1OSktKTUlPSk5NTElLTVBNSklLT01PS0xMT1RYV1NOTkpNTlBQU09OTk1QUFFP
-U1RVTEpLTk9QTllZVFBOUVBNT05NTkhGSEhJSkxOS0ZCPT48Oz07Ojk3ODg5Nzg2
-ODUxMjE3NjU0MzI0MzM5ODIzMTIxNTQzMzIyMjE1NTQ1NzY2OT1IT1JVV1hXWVVU
-UktFSUhOREVBRkNGQkM9RD08PkM9Pjs7Ojk6Ojg5MzEzMjExNDQ1Nzk5ODYzMjIz
-NTUzNDQ3NTY3NjU1NzU1NTY1ODo3Nzg5Njk7QEE+RUlLTUtJSE1LTE5OTUhGSUlJ
-SEpGQUJITlhdYWZsc3V3eXyAgYSJjImHh4aKiouNjIeHiouPj4yKiIV8c2tfV1NN
-SEM+P0JBRUQ7PUdIQUJHUUhBPTw+QTw4Nzg4PT5BQEJDQT89QUdEQUJCQUJIRERS
-XlhNV251aFtieId+cHR5h4qAbXKEj4x8cHmOnZ2Of3qHlJOMeW56jJWSg3p9jaCm
-opWSmJyjo5iGdm1wcW5hWFlXVVNSUFJRUFBRUlJRUExMTlBLS01OTE1MTVBMS0xM
-Tk1OTk5OT1BST0xLTEtMSk5LSktLS0pLTEtLSFBNTU9NTk5LTU5MTFBOTUxOT05P
-UFBMTkxMS0xMS0xMS0tRTktLTUlMUU9OTUxOTk9NS0dKTkxNTUtNT1BQT1FNTE5N
-Tk5PTk9PTVBPS05LS0xMTU9PTk9OTk5OVE9NT09PT05PS01NTk9QUFNOS0tMTUtN
-TlBRTE1OT01MTlBPTE1OT09PUVJSVFdSVVhWV1JTUk5VVVZUVlNUVVhYWVlbXWJp
-aG5ydX+GjJCRkpCTjY+OiYaCfnVycGplYWRjZGRmZ2ZiYV1aWllYVlZUVFJST1BN
-S0tHR0ZFQURIR0lLTE9QUlJUUVRWVlRSVlZXVlhWVlVWWVdYWlhYWVhYWFpcWVtd
-XFxdXV9eXl1dWVlXVFJRUVFTT01PTE1MTFBOUE9OTk5RUFBSUlBQUVNRUlNUVVRT
-U1JSVldXV1ZbW1lYWVhWWVhZV1dWWFdXVFRSUFRTU1mCtsrS2d3i5Obo6errSUZD
-Q0RIR0ZGRURGSkRHRkQ+PkFEQUE8QEE7PT9AQj89Pzk4PDo7PDs4Nzk6Oz1CQUBA
-QUVCREVFQkRBPEA/PkBBPT08PDw8QUA+QEA9QEA9QD1APTxBQUBAQUFDRUFEQ0I/
-Pj1AQEJEQUE+PkBCP0JERURAQ0NBQ0NFRUdDQ0VGRkNERUNCQUI/QUJHSUpHRkVE
-S0hGSkxIR0pJSUlJS0pMS0xLR0ZGSUtOUE5NTkxJRkVLTEpKTk5KRkhLSktLTk1L
-S0tHR0tJR0lKR0dHSUdEQkRDPT5BQTw+PDw7Oz4/QT48PTs4Nzg5Nz47Ozw/PD08
-Pj8/QUFCRURDQ0E9QEFDREA+ODY4NTQ1MzU0NzYzNzY1MzQ0NTUzOTQ0Njk1ODk3
-MzU0NzY1NDQ4Ozc2NzQ0NjY3Nzg1OjUxMjU1Njc8OTg5Ozo5ODg4ODc6OkFAQDs5
-PDw+QTw9QENEQj88PUFFRUdHS0lJRkNASEpNTEpLTU5QS0xJSUxLS0dHR0xOTkxN
-TU1UVFdZXFdTT0hMT1FUUlJNTlFRUlBSU1FPUE5QUU5MVlhUS0tMSkhOVVJOSURJ
-R0pKTExKRj89PDo9QDs5Ojk6Ozg3ODU0Nzk2NjEyNDMzNjQ0Mzs9MDMzMjMyMDQy
-NTQ0NzQ0NTU2Nzc6O0JGSlBVWVlYV1JPSEBBQkZCRERDQUdFQz48Oj1AREU5Ozo7
-OTk8Ozg1NDYxMzMzNDc4NTIyMjUvMjQyMjY2NjY1NTc2MzQ2ODw7Nzc4OTo5Ojg6
-OTs8PUJFS01KSUdKTE5PT1BMRkVGR0ZHQkNBQkdNVFpgZW5xdHZ4enx+g4aHiYiG
-i4qHjI2Ni4WJi4uLioiGg393b2ZaUkxIRENCQ0JERT46RktGQkFERUM9ODk9Pzw6
-Oz5BR0ZAPUFFQkFDRklGQUFCP0NDPUlWUUdRZ3JiU1hyfXhnZHWGhXlsc4SQjH10
-g5mio5aMipegnpSAc4CQk4x9b3WGlJqYiIGJlZybj39zcXZyamNaUVJSUlBQU1BR
-T09QT01OT09OT1FPTU9PT0xOSkpRUE5NT05PT09OS01PTUxNTEpKS0tKSkxLSktO
-TE1MS09NTE5OT05LS05MTExOUE5OTE5PT05NTU5MTExLTExPTU5PTEtMTkxLSUlJ
-R0xLTU9OT0xNUk9LTUxNS0pLS01OTU5MSUpNTU5NTE5PTk5LTE1MTU1KTFBPT1FM
-SkxPUk5OT09RT1FSUlFQUFFPTlBNUFFOTlNQTlJQT0xLT1FOTE1SUlFSUVBRU1NV
-VVVRUlFRUVRYV1VUVFVUU1VZV1lYWlxfYWRrcnZ8hIaJiYuMjo+HfXl1cm5rZ2Zk
-Y2JgX2BgX2BeXFtWWldXVFFPT1FLS0pKRkNERERDRURHRklJS09PUVdTVFZUU1RX
-WFdXVVVYV1VVV1hWWFhXWFhZWlxbW1laWFpbXF5fXVtaWFZTU09QUlBNTEtQT01P
-UFBNU1BSUFBQT01QUlRRUlJRU1RUVVRTUlRVVVlbWlpbWFpaV1dVWFdYVlVWV1dU
-UU9QUlVaXYevx9HZ3uHk5ufp6upHRUdDREVHSEhISEdFR0U/R0c/O0FDQUA8PTpB
-QkJAQDo7Ozs8QT06Ozs9Pzo4OkBCQT5BQ0VAP0BBQ0RBP0A8PUFBPDw9QUBBQD49
-QUBBQ0M/QD1AQEJAQUdGRENEQkFCQUREQj4+QkNCQUZBPDxBQUNHRkNCQUFGR0RE
-RUZFR0hDQkZJSUNAP0BAQEVJSUdFRkZJSk5MTUtGSkhISUhKSktLTU1JSUpLSk1Q
-UVBMTElISEZMS0lLSUhHR0lJS0pLS0tHSkZFRklGRkpJRkVHRkRCQUJBRUZDQ0A+
-PDpBQEQ9PDk7PDk4ODo9PT47Ozs8PTo/Pj1EQz4+P0VARUI9PEBCPDg1NjczMzEy
-MjQxNDQ0ODc1MDA2NTw3OTk1NTczNjY2MTY4OTg1MzU1NjY4NjQ0ODY1NzU3Ozc2
-NTc2NDI3ODk5ODg4Ojg6Ojg/PkA9OTg8Oj0+OkBBQ0ZFQDxCRENGRURHSktLQ0JG
-SlBMSkxNUE9OTkxNTUpHRUZETFBRTk1MUFJVWFZcW1ZRTEpOT1RSUVBQUlJVWFdR
-TktMTk5NTktPVVJMS0hNTkpNUExIRkhISkxNT0tIQj09O0A7PDo6Ojk4Njc3Njg4
-NTY2MzIyMTAzMzQzMzAxMzMyMjE0NTIuLjAxMjIxMjY2Nzc2Oj1JUVVZV1VWVFVH
-RUI/Pz4/Ozw+QT5AQD07P0JDQEE9PDs5Nzg6ODs4NjQyNTUxMjMzODQxMDU1MTE0
-NDM2NDg0OTY1NDQ5Ojc4NTc1ODo5PDs8OUBDQkhOUlBISUpLTExOTUlGQ0RGRERE
-RUU/RlBXXWJna3B2ent7fHyAhYiIh4aKiIiJi4yKioqJioiKioiCf3pyal9TSkVC
-Pz1DR0hUSz9BSUxBOz5AQD06NzxEREU+NzpGRkI6OTo/REJFRkZDQUJHSkRDRE9P
-Rk1jaVtNVmx1blpZaHp8bGJnfYmEdW9/lqCelo6SoKinnY2Gj5mYjH1xeIaSlId2
-b3mIlJWLeW5wcm1lWldRT1FRUVFRT1FPT1BQUVJQTk9QT0xQTU1LS01MTUxNTU1O
-Tk5OTk5LTUxNTkxLUk1LT09NS0tKSkxPTU5MTUxLSklLTE5LS0tLUUtKTUxOTk1M
-S0xPT09MUE5NTE5KS01QT01LS0lNTUpHSU1OTktKS0tOTktMTEhITUpOT05OUE9P
-TUtKTE9NTk5NTE1LSkdQTk5LTk5RUFFQT0tOTk9OTk9OTE1OTk9PTlFPT05OT09O
-UExPUU5PTExNT1JPT1BSUlFQUlRRUU1SUlZUUVJXVlVSUVNRVFRWV1lZWFlXV1hb
-XmNocHN3fH2Ag4OChH94cW9vbGVlZ2ViYWFgX1tdX11eXVxaW1VRUU5QTUpHR0dI
-RkdFQUJDQEJDSElKTVBTU1RXV1VWVFRWWFhSU1VTV1ZZWFdVVldbWltbXFxcWFdc
-Xl1fYF9eWlpXU1JPUU9PUE9QUE9PT09OTU5VU1JSUE5OUFJSUk9RUlFUVFRTVlNV
-VFZWWFlaW1xcWFlXV1lYVldZVFdWVFZXVVVUWFlfja3F0dne4uTn6Onr6kVKRUlJ
-RUhLUEtJR0ZHPkBCRkdFQ0JBQz48PT89PD0/PDs8Pz9BPj1APDg9ODc2Oj4+Pz9A
-P0FCQD9EREJCQkE/QEBBPj9AP0E9QD8+QEFEQUA9QkFCPz9BQ0NEQ0FDRUNCP0FB
-QD4/RUJDRkE/QUNHRUVEQkNFQUVHSUlERUVDREJBRkpKSUlHRUdGR0pJSEpDRUVH
-S1JOTEpIR0lKSkpKSExPUk5OT01NU1FST01MSk1LSUhJSkpISEdISEhKSktISEtJ
-RUBAQUFCQ0VEQURFQ0A9P0E/Q0RCQkA8Oz5CQj4+PT4/PTs8Oz48P0A5Oz09QDs/
-QUBBQT0/OTo9Pjw8Pz47OTUyMTQ2NTc3MjQ0NTQyOzs1NjM0NDUzNzk1NTM0MjQ0
-NDU3NTU4NzMyMjk0NDUzNDU1NTY0NDU4NTc3NzQ3OjY1ODs8Pjs5ODk6Ojk9Pzo6
-Oj0/QkJHREJEQD9CQkNBQkNGSkhFRUlNSkpJSk1PTEtKSk9PTVBKRkdJUFFNTE9L
-UFFXVltbWVVSU1FRU1RSUVJSV1dYV1JPTE5PUFFQU1NPT09PTUxKSkxOTUlFREhF
-SUtNTkZCQUBAQT07Pj88Ojw3ODg3OTg1ODU2MzMyMDEzMzIxMjMxMzM0MTQ1NDQ0
-NTQzMzQ1NTk5NjY2PERNU1ZWVVRUU09LRkFAQT45OUBFRUM+PDk4Oz86Ozo4Ojk4
-Ozg4ODo4NjczMTIwMzQyMjEzNjMyLy0xNTQ4Njc4MzQ1NTY5ODYzNDMzNTk9Pz8/
-QERFSVJSUE1KS09RS0xPTEpGSUtERUhHQ0RFTFJdZ2drcHN3e31+foCChIeFh4qH
-hImLjYuKiYuIioiIh4B8eXZsYVhKR0A9PERLSk9PRkE+Qj85ODxAQz87O0FISkI7
-PT5FQDs4Ozs/Pjk9Pj4/RUhLR0ZNT0lGTWBkVUxXanFlVVFgbnFkVF9zfHZlYnSK
-mZiMiJOiqqyhl5KcpKSWh32DipGMgHBqcYKMjH9tZmVlZF1ZVVJTU1NQUFBQUU9Q
-TU5PUlFOTlJRTExMTUlLTU1OTk1MTEtLTE9PTlBQUE1MTExLTlBUT1BPTU1LS0tR
-Tk9PTUxMUU9QT05QTU9QTEpLUVBOTktMUk9PT1BQUVJQTktLS01OTk9MSk5LS0lL
-TU1MSkpNS0tMTUtMTElKTU9NUlJQT01QTktMSUxPTU5PTkxNTk5OTEtKS0tOUVFR
-UExNT0xPTE1MTk1MTkxPU1JPTU9SUE9OUE9OTVFSUE9QTU9PTU1PTk9RUlFRUVJX
-VFNTU1ZUU1JPVllXV1VVVFZZV1VZV1haXWBla25zc3Z7e3t2dXJta2dlY2FiYF1e
-XVxbWVhZV1paWFdYVFBOTUpJSUpHR0dHSkpLRkVFREJHSUpMT1JSVVVUVVRYVVhW
-VFZVWFdWV1ZYWVlYWltbXV5dXVpZW11dXFxdXV5cWFdSUFJOTk1NT05NTU9QTkxP
-UlNRUFBRTU5RUFJSUk9RUlJUVFdXVFVXVFlZW1lZWlpZW1lXV1hYWFhXVlZVVVZV
-VFNVWWSLsMTR2d3h5OTo6erpTEhIS0dGSkdKSkdFQkA+QkNCQ0FCRUdEQUFBPz87
-O0BDRENEQEI9PkI+QT04Ojo7Oz0/PD0/PkJFREFBQEJBQ0NAQUFCQ0M/QkI/QEBC
-QD8+PD5APkI9QkJBP0NEQUFGQkJAPkBAQUJDQ0dFRkRDQkNDQEBAQ0dDREdJRkJD
-RERERUVERUdKR0hHREdJS05KSUhHR0dJS1NRTUtLS01NT0pNTk9QT09PT01MTk9N
-TEtJTU5HR0VJSUZJR0dGR0hKR0hERUdEQz9AQ0FDQ0BER0U/PD5AQEFCQj0+O0A6
-OT1AQkM7PEBCPTo7PTxCQD89Ozw9QTw/PjxAPjw8ODtAQD4/QDo4NjU2NDQ2NTIz
-Ojc4NjU2NzM0MjY1NTY0NjgzMzU1NzY2OTY4Ojc1MzM0NDU3NTM0MjQ3NjQ0OTY1
-Ojc2Nzc2Nzg5ODo+QDg6OTw4Ojk6OTo/QUFAQkRFQEI9PT07QEJAPj9DRkZISEpH
-SElHTE1KS0dGTU1RUEtLSkxPUVBOT01QTlRUW1ZVT1BTVFZQVVdVU1RTU1RTU1FR
-TU9UUlZVU1RPSktNTktNUE9OS0pMTEhITUpIREE9P0M/PTw9PT09Pzw5Ojk5OUA2
-Nzc2MzY2Njo1NDIzMzEwMzMzNTU0NDY1NjQzMTIwNDc0NDU1N0FJUVlXV1hTUE1I
-QT4/PT0+QUU/PT8/Ojc8QUJBPzw+OTs4NzU4NDY2NDg0NDMvMS0xNDIxMTM0Mzk4
-NjU3NzY3OTk5NTk2NjgzMzU1NTk6PDs9PkRJTlBQTlNRTE1QU1ZNSEpMUE5GR0VG
-RklJUFhfZWprb3R5eXyAfn+BgISDhYaGhIeKiYmLjImFhYaDg396dW9fWE5JQT9E
-RUxIRUhGQkA9PkA6OTxHRkM8PEFHRkA5OTtAPTc3Nzk5ODpBREFARk1NTFBYWFBZ
-ZWZWTFdscmRWVGVva1xTXW11allYaoCLhnp8i52lpZiRl6OpqJ2SjJGXmo6Ac2t2
-gYeCdGNeYWFgWFhYVVFTVFRSTk5MTU5OT09PUFBPTk9QTUpKTkxMT05QTUpJTU9P
-Tk1MTU5LTU1NTU9OTU9PT1BRUE9OTExNS05MSk1RTU1QUVFNS1BPT01OTUxOT09M
-T01LTE5OTk1LTEpNS0xNTE5MSUpKSktNTUxLSU1OTU5LSUpNTUtMTk5OTk1OUFBL
-S0lLR0xNTUtPUU9RT1FSTlFNTE9PUFFRUE1OTk9PTEtLTE1NTU9QUVBLT01OUFJO
-TE9QUk5MTk1OTU1PTk9PT09RVFJUUVJVUFBSVFZTU1RVUlJUVFRTU1RUVFdVVldY
-XV5hZ2hqbWxucXBtbGpmY2NjZGJgYWBeW1lbW1lUVVZUUlBQT01LTEpLSUhJSkdG
-SEtJSEZFRUVHSEpOUFRTU1NTVFRUVVVUVldYWVlZVldZXVxaWltZXV1dYF5cXlta
-WFhVVldTUlFPS09NTkxMTUxOUE9NTU9OTVBRUlVRUE9TUlFPUVFTU1VUVFVXV1VX
-WFdYWlhZWVpZWlhUVVdYWFdXVllWVVZWV1RWbZWxwtDZ3uHk5ujo6epOUFFOSEhH
-R0VDREVIREVFQ0JBQUE9QkJHRUFBQDw7PkJGQUA+PDs8O0E8Ojk/Oz07OzxAQEJC
-Q0dGQ0FCPkFDQkBDQkVEQkFBQkBAQkNAQkBDQUJDQkJAQUJCREVERkhBPkBDQ0BA
-PkFCQUFIR0ZBQkJBQUJAQUNFR0ZCQ0VFRElJRkhITExIR0lKR0hJSkpISkpLRklJ
-TU9RUExLT1BQUFBPTk5OU1NRUUxJSUpNS0pHR0ZGSUhIRUNGR0lISEdHR0hBQUFE
-RERCQkNCQ0NEQkBBPkFBQUFAPj8+QkE8Ozs8PkVCQT8+Ojo8QEZDQkJCQUBEPTs+
-Qj08PTk7OUBCQUE/Pjw4NTQ0NDM2NTYzNTo3NTEzNDQyNTU2NDUzMjU5Njk1ODU6
-NzY3NTIzNzQ1Nzg2NzY1NTQ2NjY4Nzw4Nzc7OjY4OTY6Ozo8OTc4Oz84ODs9PkJC
-REJAQUdHRUFBQT9AQD5DQUJDRkZJRkVCR0pLT0tIRkZGTEtNT09OT0xLTk9TTE9P
-UlBQUFFQUVFWUk9RUlJVVFRUU1NTWVNPT1RSVVBSUE9LS0tLSEpQUE5MTExMSUlK
-SEdDQT8/QUFBPTs6O0BAPjs+Ozk2Ojg4OTU3NjU5NDIyMzc0MjEyNjQzNDY2NDQz
-NTMwMDEwNDY0MTAwOURNVFtZVFRSTENDRUY9PENEP0U9O0FBPTw/P0A9PDs5Nzk4
-NTAxMTU2NTIxMzIzNTUzMC8xMTM3MjIyNDQ1NjUzNDQ2NzQzNzc3Njk3OTs6Ojw+
-P0ZTUU1KT05HR0tMUFFNTU1MSUtKRkA7QkpSV2BmaGpvdXh+fnx8f4B9gIODh4WF
-iIeIiIqIh4WDgoKDgn51bWJZU0xFPz0+R0g8PD8+PTs8QkM+O0NOSkI7OT5BQDo5
-OT49PTk6PENFPkJER0ZFRkxLT1leVV5scmJZZHJ2altdbXhvXFZhcHNoV1Zrf4Z8
-bG2DlJ2cj4qTn6annZORmKChm4x6dHyFh39wYV1eX1taVlJTU1JTU09OTU9OT1FT
-T05OUU9MTU9PTk5OUE5NTVBPTk1OTk1NS01NTExMTU1OTkxPTUxMTU1PT05QTUxJ
-RkxMUE1LTFBNTlBMTk9NSkxPT05PUE9NS0pNT05MTU5LTUtKTEtMS0tQTk1MSk1N
-TE5OTUxNTEpLTUxLTU1NTEtNTU1PTExJSEpJSExNSktLTE5NUFNQUFFPTEtMT01P
-T1FOTUxPUE5MTU1OUFBOT1FPT09QUFFRUFFRUE5LS01PTk9PVVNQUlNVUVFRUFNU
-VlFUVVRWVFVRVFRRUVZVV1dcVlNUVlhYWVxeYWNjY2ZmZ2dlYmJiY2JgYGFhXl5b
-W1xbVlZWVFJTUU9RTk1MSEhJRkZJSUdKRkhHR0ZHQ0VHR0tRU1RTV1NTUlVUVVdT
-VVdVV1ZVVVhZWVpcXFpbW1xeYV1bWltdVlVTUFBRT01ST0tKTk1NUE9PT05LSUtO
-TlJRT1FSTlRSUlBRUlJTUlZUVFRXVVRVVVZVWFhaWllYWVhZVlVUVFJTUlNWVVRU
-VV5glLTFz9ne4uTn6Ojq6k1NT05KSEZFREVHRkZGRUE+PkJCQUA8PEJDQkBBQ0A/
-PkRFQkFAPj89PkBJQDw7PT9APj9BQ0NHSUVFQj8+PT1BQj5AP0NFQ0NBQUNBQEFB
-Q0REQUNBQkBDSlFJQ0VERUJBRUVEQD4+QEBAQ0lMR0pERUpIQkRJREdIRkRBQT9B
-RUdIR0dITEpGR0hNSkhKSUxOS0xMS0hJTFJSTU1OTU9OUVBOT1BTU1BPSkRESEtJ
-RERESEhEREVCREFFR0VGQUBCREJAP0FBREZDQkNFQkQ/Oz0+QD8/Pz49Pjw7Pzw6
-Ojo8Ozs7PDw6Nzo/QkVFQ0JAQEE9PkBAQ0E/Pj08P0JGRkE/Pzg3MzUzNTg5MjMz
-MzMyMzE1MzIzNzQ1NTM2NDU2Njg4Njc1NjYzNjY3ODQ5OTo3NDU2NDY3OTk4Ozk4
-OTo6PDY3OTk5Ojc3ODY3OTk4Nzo+REVBQ0RFR0VCP0A9QURAQkBBQ0NDRUNFRkdJ
-S05QTk5DREhJRkdMT09NR0ZMUFFNUFFSVFFSVFFSUldTUlVWU1NPT1BTVFJVTlFO
-UFFUVE1MTk9TUUtHS0xQTExJSUlJSEtLRkJAP0FDREM9PD9AQUE/PTw6ODo5OTY3
-NDIxMjI1NDMzNDQ1NjIzNDMzMzEyMTEyMjU1NDIyNDUwLzM2PUVLVlpVVVFJQkRB
-QkA8Q0ZBTD5DPkJDPDw8QEE6ODY5OjQzNTMzNDQ3NDUyNjQ1NjY0MzI2NzgzNDMz
-MTMyNjg3MzU0MTMxM0M7Nzk3Njk8Pj5CREpMTU1NSEVGR0pMTE1NSkVGTExGQUVD
-SVJYXGJnb3J0dnd7fn18f4GEhYiGiIqKi4yMh4WGg4KCgYB9e3VqYVhRUElCPkFC
-Qz0/Ojo7Oz9CQj85P0RIRD85OTs9ODc5OUBAPz9DSEdFRkFFSENESUlNVVlTWWx0
-aF5sfHtxaG55fXZlXWp3em1dXnGBg3dlbIGRlo5/eImanp2VjI2XoaOdjISAgIeE
-eWxiXl1bWFdUVFRVU1FRT01OTk5RUU5OTk5OUVJOTU5OT1BMTVBPUE5QT09MTU5P
-TkxRTk5OTUxOTUtMS0pMT05PT1BQTEtJSE1OUFFNT01NT0xNT09PT1BOUE5STUxN
-TEhJTkxOTExLSElJSklMT01NTU5OS0tMTU1LTEtMTExLS1BMS0pKSUZJTExOTUpJ
-S05MS0pLTUpJTU5MTk9OTkxMTU5MT01MUU5QUk9OT01RT1FOUE9LS1BRTk5QUFBS
-UlBQTUtLT1BRT09QU1NTVFNQUFBRUlNRVFJVVVVSVFRUVVFQUlFSUlJWV1haWVhX
-WVxdXV9jYGBhYGFhX2BiYl9gXl9cW1lVVldXVlNQT1FRTU9NTU5KSENER0lKSU1J
-Q0VGRkZIR0hLTE1QUVFTUlRTU1VXVlJTVVNVV1VUV1hcXFtcWltbXF5iYFpcWFtb
-VFNRTk5PTk5PTVBSUVBPTUtMTkxKTVFST09NUlJQTk9PT1BQUFBSUlVUV1ZWWFdX
-VldcW1hYV1ZXWFVXVlZSUlVVVlZUUlNWX2WPt8fQ2N7h5Obo6unrUVFRUEdFR0RE
-RUpISkZEQ0I7QD9CP0BAQDw+O0JEREFBPj1DQD0/QUI9PTtAOjk6PD1AP0FBREdI
-RkA9Oz48PD1AQD9AQURERUNBQ0VCQkNGRURDQUFDR0VKWUtBQkJDQUJAQkNCPT08
-Q0NGSEdMSEZGRUZERUVGSEpHRUVFREVHRkdESEhJSUVGSExMSkpITExPT09MTU5M
-T1JPSUxOUFBRUFBRUk9NTkpHR0dFRUVDQkBDRkdHRUNAQUBDQj07QEI+QD9BQT9B
-Q0NFQkFCQ0JCQ0RCPz9AQ0BAQD9EQkFAPTw9Ojo5PT09P0BAQ0VCQUI+Qj1APz0+
-QUJEQEBER0dHQEI+Nzk2ODU1MzU2NzQyMjIzMjAyMjQzNjc0NTg3NDI1NDc5Njo3
-NzU0NDY8OjY2NTUzNzQ0NDQ3Njc4Nzg0NTc3NTc4ODc2OTc4PDk5Ojo6Ozs7QD9I
-RENBQD0+QEFEREVCQ0hJRERGQ0VKSUdITExOS0hJSklOS0lKTUlHSUlLTk9MTlNP
-UlFRT1BSVVNUV1ZXUkpKTlNVU1JQT01QU1VST01PUVZWU0tMS09QTlBMSEVJS01L
-R0REREVGREE9QT48Pj9APDk5OTg3ODYyNTc1MjIyMTExODIzNTY0MzEwMTEwMzM3
-Njc1MzMyMjU1NzY3QkZPWFlWUUxKQzs+QUM/Qj07ODtCQ0NGQEI9PDk6OTs4OTc2
-MzY1NDc1MjY2MzQ2MzE2NzY0MzEyMTQ1MzM1NjQ0NDQ2MzY3Njk0NzQ3Ozs7Oj5F
-TExPUk5KTEtGR0xQTkZGR0tHR0M+QERMTldcZGZsc3N3e3p5eoCAgoSGhIaGhoeK
-jIiGhIaFhIKCgX95dW1jVktHRENBP0BCQTw5Ozs5OTk7Qz45ODxAPTs4PDw9OjY6
-QEVGPTg9Q0JJP0JDRUREQkRNUU1Ya3BkXWl7fXBnbn6Ce2tmc4SEdWltf4uLfG5v
-hpSYi3d0hZick4R8gpCam5iLgoKJjIh7aWBfWFZTUVBTVFNSTlFRTVBPTU9SUVFP
-UVFPUVNRTlFQTkxNTU9PTE9MTU5NT1FQTktMTkxMTExMTExJSUpOUVBPUE5NTlBM
-TE5PT09NSktOUE1PT1NPTU5OT05MTVBPTU1OTU5OTE5OTkxLTE5OTU9LTFFNTE1M
-S0lMTE5NTkxJS0lJR0ZESEhKSktJSUxLSExLS0tKTExPTU9OS1FPT05LTktPTU5R
-UE9RUVBQUFNWUlJRU1JPTE9PUVBMT1BQTU1LTEtKTE1KTE9PUVBUU1FOUlFQUlJT
-UVFRTk1OT05SV1hUUVNUUVJYVlhXWllZWVlbW15fXF9fXl1dXltdXFpbWFdWV1hW
-VFRTUlNPUlJRTU5OT09ISElIRUtKSEVIRUJEREdISktMTU5PTlRVU1JUV1dWVlZV
-U1ZXWFZVW1xfW1xcXVxbW19fXFlbWFNRUU9PTU5QUE9OT01OTU9OTk1MUUxNTU5P
-Tk1OTU1NUVRPUVNVU1RUVVVVVFJTVFVXVlVWWVZWVlRSVFdXV1dYV1RRUVRWVVRY
-X5K7ydLa3eLl5+jq6upNVFFSTUlIR0dFRENFQj89PkM9PUNDPD5BQUE5PEFBPz1B
-PDtAPTo8PTw6PTo7Ojk7PT49PkJCQENERD5BQkBBPz49Pj9CQ0VKQj8+P0FFRURG
-REFCQEJEQkFFRENCRERFQkFCRUJCP0NER0lIR0hISkhIRUZFR0dITU9KSEZDQ0NK
-S0lJSkpKRUdJT05OUlNSUVJVUE9NTk1QUkxKSktOT1BOTE1JTEtKRklHRURGQ0VB
-Pz9ERUhGQz8+QD89PDs9Pj8+Ozo6PDtAQkRFQ0RDREJCQj9CQEJBQUBBQUJAQUJC
-Pjw+PEA/QkZDRUFBRERFQEJBQT1AOzxAQT9CREJDRURAPz1AOjc2NjIxMzc2NjY0
-Njc0MjI2Njk1Njc0NTMzNzEyNT01NjU0MzY3NDc3OTg2Njc2OTc4NTU4ODk4ODU3
-ODU0NTY2Nzk6PDk4OTw6Oj85OkA+Ozo8QkVEQz4/QENESkhERkZIRENHTkxMSUlK
-TkxLRklNS05NSkxOT0pKSktMTEtJTFRUU1VRUU5PT1VVVlZTTEtPU1NWVFBQT1BR
-UU9NTk9TVlJQTU5NUU9NTk9KSERHSkxKRUE6QURBPTs8Ozw+Ozw4OTc7ODg3NjY2
-MTI0MzEzMzExNDM1NTMxNDEyNDMzNTU4ODQ4MzQxMjI1OTc6QUtTWF5ZVE5FPT5A
-QDxEQD84OjxAPkJBQDo5Ozc6Ojs4Nzo2NTY2MjIzMTI2NjMzNTUxMDIzMTQ1MDIz
-NjYzMzMzNTc3MzU3NjU4ODg5Ojs8PUlPT05NTUpJTEtLUU9NRkVER0tHQj5ARUxT
-VlthZGhrcHJ1eXt8fX99gIaGhIWGhoeLioqGhYWFg4GAf3t1cGdZUUlFQEBAPT8/
-P0A/PDk6PDk5OTc1Nzs7PUI7PEFEPDo8Q0Y/PDw/QUM6OT1AQkI8OkJDQkxhZVdP
-Y3V2bWhwf4N6aGVyg4R7cXaFlJOHfIKTm5mMfHuKl5iKfHV7iZGPhn59foaIgnJf
-WVpXVlJUUVFQUFFSUVRRTU1LTExPUVJPUlRRUFBRTE5PT0xLSkxNTk1MTk1LTU5O
-TktNTk5OS0tKS0tMS0xNTE1OTk5NTU5NT05OTk5QT0xPTEtLT1JRTk1OUE9QUE9O
-TExMSkxOUU5MTEtMTk1PTU5OTUtNTk9NTkxMTE9PS0pHSElHR0dGSExKS01NTU1S
-TU1PTE1PTU1NTU1OTk1OTE1QUU1LTk1STkxQUlJUUlFRT1BSUVJPUE5MTE5OUFBO
-S05NS01LTE5OTVBRUFJSUVBRUE9TU1JTUk9RUVBST1BTVlZWW1hVVVVUWVxaWllZ
-Wl5dXFtbXF1bW1tXWVdXWVhYWFlWV1RRUFJQUFFQUE5PTEhJR0hLSUlHR0dIRkdB
-QEBFR0ZJTFBOUVJQUlVYV1ZVVlVXVFdXVVdZWlhaXl5gXVxeYFxfXltcW1hST1BN
-Uk9MTU5PTk9QTk9QTU5OT05PUE9OTU5RUU9PTU5QUE9QUFJVU1VVVFZWWFhWV1ZT
-VFRVVVdVVFVVVVZXVlRVVlVUU1NRVVdsoLvK09re4+Xm6Onq60xNT01MSk9MRkZD
-QUJGREA+QkFAQENAPDtCQUA+PD5BQD08P0E9PDw7OTg6Ojw9Pj0+QEA+QUNDQkVH
-RUZEQkJBQUA9QEBBQ0hAQUBDQ0JBRUdIRkVHRUVDRUVCRENNTUdGQ0dGRkJAQUJD
-RkhISEZIR0lKSEdJS01QT0tMSURERUhKSEpJSkhJS09SU09PUVFSU1NVUlBMTVFS
-Tk1JTU1QVFJNS0lLR0dHQkNFQUBCQ0BAPz9CRUVEQjo6QD9BPzs6P0RBPz1BPERE
-R0VFR0hGRUZCQEBCPz9BRERCRURBPUJCQEJDRUJBRkhEQkFDRkVESkREQUBCPD0/
-PUFBQkBFPTs8Oz46Ojg2NTY2MjIzODU4NDQyMzg3NDY4NjYxMzQ1NDg3OTs3MzIw
-ODk3Njc3ODc2Nzc3NTc3NzY1NTg4ODs4ODg4ODU4OTw8ODg7Ojo5Ojo5Ozo/PTs8
-QkI8PD1AQURGRD9CRkpJSUlLS0lJSEtLSkhHSElOTkpNSEtOUE5LTU9OS0hLTVNU
-V1FQUFRVU1NTUFBOUFBRU1VUUlNTUVFVU09QUFFUVlZRVFFRUFFNTU1JQkdKSUhG
-Pz09PT88PDw9PT06Oj87Ozg4Oj06Nzk3ODUyNjQ1NTQ2MjIyMC8zNTQ1NjY3Njc1
-NjQ0MjM0NTlDOTY3R01UVlRTTEI9P0A8OTw+PT1APDs8Pj4+OT07OTc2ODc1NTU1
-NTQzNDMyMjEyMjQzNDs2Nzk6OTY0MTEzNTQwNTU1NjY2NzY4NzM3OTg6OzpASVBV
-UEpNSUhRT0xJS0hGR0dISEZCQkBITVRWXWJna21wb3FzeHp6fYKEhoiIiYmIh4eI
-iIeIhoWFhYWCfHRxaF9WTUZFQEA/PT5ESUlAPj07PDw5Nzg5PTs6QEI+QEJCPTpB
-REI6PTk8PTs6Ojs+PTw+QkM+RFRXTEdTaGlcWWFzenJjYXSBgHRqcoeSkYh/iZuj
-opSHiJSemox7dXuHjIh8cW92f3tzaFxVV1dVU1NSU1JRUVBOTk5MUE5JTExOT1BN
-UE9PT09RUFBPT01MSk1MTUxJS09PTUxQTU5NTk9MTUtMT01NT05OS0xMTU1NTU1Q
-TUxOUVFNTU1NTkxNTU1QTk1MTU1KSUpLS01MTUlKTUpKTU9PTE5MTkxNS0xNT01N
-TUxNTUxOSExKSU5OTU9MSk1KS05QUE1PTUxPTEtMTExLTU1KTkxMT1BQUFZRTk5L
-TE5OTlFRU1BOT09NTU5RT01LSUxMTUpMTk5QTk5OTk9PT05QUU5PT01PUVFSVFJP
-UFNUVVhVVlJRUlVVV1pYWVdYW1paWlhcXFtdXFpZWllZWVdbVldTVVdWWldVUVBP
-UVRST01MTU5MSEhJR0VFREZGR0dGRkVGR0hJSUtNUFRVVFVSVFdYV1dWWFZVVVZX
-W1paWlpcXV1dXV5cW1tdWFhXVVFSUk5NTE9QT0xNUFJRUFJQUFFOT1BQTU9PUExO
-UVJRUVFSU09TUU9UVVZZVldWVFNUVFdUVldXVVZXWFlXVFRVVFVUUlJVUVJUWHyq
-vMnS2d7i5ebn6OrpTEtMTklGR0tGQ0FGREBERkdDQkA+QElGQT4+Ojw9Ozw8QEBA
-QEE+PkA9PDo9QD89Pjs/QEFAQ0JDR0pFREI/PkFEQj4/QURCRUREREVDRUhGRUZH
-R0NCRUZCQkNAQkNERUVFRENFREJBQEVJS01KSUtMTExLTk1QUVFPT1FOS0pLTEpM
-TUtKTUlKTVBUUVNQVVVTUU9PU1JPTUxMSkpISUtLSUxKR0dJSUhEP0E+QkNAQT48
-PEFDQz89Pjw6QD8+PD9CRUE/QUE+QUNER0dCQkNHREJBREJDREJGRUJIR0JERERB
-Q0dJSERERkRERUZHRkZLTEhFQEBCQ0A/QUREQUZAQUM7Ozg3NTc2NzYzMzI3NDU3
-Njg2Njc3NTM0MzMyNTc5Njc4OTQ2NDQzNjc1NDdCOTg3Nzs/OTQyNTk3NUY8PDg3
-OTo6PT06ODU3Nzo6Ojo6ODs8PTo6P0dEPzw9QEFBQ0NBPj9CQ0ZHRkhGSUhHRktJ
-RUZKSUxLSUpLTU9PT09QUlBNSUtNUlVUUlRWW1pXUlBPS0xTU1BUVlJTUlJRUFNW
-WldZVFlXVFFTUU5PTklMSEdJSEhKR0RBQEFAQT88Oz5AQjs5Ozw9PDs4NzU5Ojk1
-MzQzNTQ2NjU0MjQ0MTA2MTQ1MjMzNTQzMjM1Njc4NjoxNDc7REtUVlNLRkA7Pj06
-PT4+Qj07Ojo6ODk2OT08OTU3OTc1NTY3MTMyMzEyMj0yMTQzMzM0NTU2NzY0MjI1
-NzkzMzU2NTc3NzU3ODg4PTw8O0BHS09MTFBKSk5QSkhJSExHR0hKR0VEREdKT1Vc
-Y2dscG5xdnV3fXt+f4GAhYSDhYeHh4eGhYSEgoSFgX58eXRtZVlPR0JAPTo8QUBA
-QD87OTxAPjw/OzQ5ODg9PDw4OD9APDo+PTw8OTo+QDo3OT1AQ0NERkNFT09IRVJg
-X1JMWGltYVRWZ3V1Z19pe4mJf32JmaSglo6ToKajlIV+go2OhnhsaGtxdGxhW1ZV
-VFRUVVVSUFBNUFBQUE5NUFJTUlJSU05NT1NRTU9RUE1OTExLTUxOTlJPTE1PT0xP
-T05OT1BPT1BOUU1MTUxLTEpLUE9QU1NUUk5RUVFRUE5MT05MUFJQUFNMTU1MSUpK
-S0tNTU5NTEtNUVBLSU5NTEtNTExSTEpMTExLTExOTE1NTUtOTU1LTktMSkxOSklI
-R0pLTUxLS0xNTEpLS0tMT05QUVBPTExMTE5NUVNPTU5MSktLTE1RT01PTUxLTE1N
-TU9NSkpNT01PUFJRUE1QUVFOTVBRTk9SUlFPUVFRUlJQU1dXWFZWVVtaXV5cW1xc
-WlpbW1pcWVlYVlZVV1ZVVVNTUlNXUVNSUVFRT05JSUtMS0pJRUNFRkVHSEhISERE
-RkdHSk5PUlNUU1FTU1dVVlVWV1ZYWVhZXFtdXFlaXl5gYV5bW1xaV1lUUlBRT05O
-S0xNT09PT1BPTlBPUE5QUFJQTlBQUU9PUlFRTk5QTk9SUVRWWVhaVlVVVVNUWFhZ
-V1lcWVdbWFVSUFFVWVpXVVRUU1FUg6+8ydHZ3eHk5+jo6epNTU5MQ0VHR0dESUtF
-RUZDRUZCQkRERENDRUM8Pz5CQkE7Pz08Pz49Pjs7OTs5OTg6PTw/REZGSEVGSUlI
-Q0NCQ0JCQkRDQkFCQ0VIR0RDQkFAQUNFREFERENBQkFBRERDQ0JBQkNDSUVCQ0VJ
-S0tKS0tKSk5NTlJRT1BOUk9NSUpMT01NTU5PTU5MT1NRUlRQT1JQUVFRTEtKSElJ
-RENFRElHSUdFRURHSERFRUZCPj5APzo4OTw/Pzw6Oj1BPjw7PUA/P0E/REFBQkJC
-R0ZDQkNEQkFCQkBBRUVIREZEQ0REQEFAREdFREZJSEVDREVESE5PSUlESUZDQUFC
-QUNDR0VGREFAQTo3Ojk1Njk0NjU2NDU2NTUyNDUzMjEyNTU0NTIyMjg2MjI3NzU0
-MzQ0NDY3ODk2Ojo2NzQ2Nzc3QTk3Njk4Njg9PD05Nzk9Pjs6Pz48PDo5OT1BREVI
-QUNBQUBAQEBBQkJCRUVGSEdKTUpHSkhEREZGSkdLTExQU1BVUVJNTk5OUFFSTE5O
-UVheX1pUUU5MTlJUWFNTUlBQUFNPVFtZWltXWVVUUlFQT0tJSUpFRkhMSEdGR0M/
-PkFFQj0/OkJBQj07Ozw6Ojs3ODc4NzU1NTQ0NTY2MzY3NjMyNDMxMjEyNDQ0MjEz
-MjQ5ODU1NTc1NTY7Sk5UU0pGPkNCPDo8QDo/OTw/Pj47PDo5Pz86Ojk1Njs4OTU0
-NDY0Mi4xNjAxNjg2NTY8NzM1Njc4NjM1MzQyMzUzNDU2OTY2NDQ3ODg7PkVNUExK
-SklHSEtJS0pJSkxMSkpHR0RDR0tPVFliaWxub3V0dXh8gYF/gYKDhIWFhYaGh4iE
-gYODgICAfXt5dHNrYlRJREE6ODo/Qjo5OTk1Njo9Ojo0NTk3ODo6OT08PDw7ODs9
-PD06OD1BQDxARUVERkdIQ0dUUkpHUFtcT0pXZmddUFJgbWlXUVpwe3ZpaXqOmJqQ
-jJajqKWbj4qNmJOJdWdlZ2loY1pXV1ZVVVRTUlBQUFFMT09NTk5PT1NSUVJRT05O
-Tk1PTE5OTExNSk1NS0tLTU1KTU5NT09QTk9OT1FRUEtNT0tMTU5QUE9OUU9PUFBP
-Tk9SUU5NT1BOT1BNT09OTE9OT05LTVBOTU5QUU9PT1hOT09OTExNTEpJTU1JSUdJ
-SktJSEtKTk1LTExISkpKTU1LTEtHR0pMSktMS0tNTU1LSEtMTEtPS0hNT05MSk1N
-S01OUFBOTlFOUE9LTlBPT05OTUxNTkxMTE1LTU5OT1BRUFFSUU9PT1BST09NUFNS
-UlFPUE9UUldVVVdVVFdXWFxcX1xbWlpYWFhZWl5bXFlUV1dRUVFSUE9PUlFTUE5O
-T09MS09LSElKSEdJRURIRkRFR0VGRUZGR0hKTFBVVFNRVFVUUlRVVVVUVVpaWFpb
-WlpbX11eXl1eXF1bWFdUU1JQUFBQTk1MTU1NTk5PTk5RUE1OT05OT09NTlBSUVJU
-UFBPT09OT1FVU1JUV1dVVFVWVFVXWV1bW1pZVltaV1VWVVhVWFZWVFRTUVeMsbrJ
-0dnd4eTm6ejp6k5TUUtMRkVIRkZGRklKSURCQEJBQ0BBPjw/Pz8+Oz09PT46Nzw9
-PTk5Ojs7PTw7OjpAPT9BREdJRERFREJCQkJAQ0NCREREQkNIRUZFQ0REQ0BBQkRD
-RURIRkRAPkJBQ0NBQUFCQkRHQ0RERktLSEhGR0dFSkxMTlBPT1BQTkpKSUxOTkxN
-UVJTU1FSVVFQUlNTUUxMT0tJSEpFQ0REREJEREZIREZDQkRHSURCR0ZAPT47PkA8
-PDs7Ojw6Ozw8PT0+PT9CQERDREFDQkRDQT9ERkRDQ0RBPT4/PkFEQ0NER0hIRUNF
-RkJDRktKRkVHRERITk5MSUVHS0lFREdFTUlJQ0ZGRkA+ODQyMjAwNDMzMjQ0NTE0
-MjAyMDAzNTQyNDU2OjY1NTc7OjY2NzY1NTMyMjY5Nzo4Nzk3NDIzNTY4NDE1NDQ5
-Ojg6ODg3NTxAP0E/PTg6PTs9PUFDQkVEQj89QEJDQj9FQUJEQkRER0pIR0dISkZG
-R0dMSU1PUFFPU1JTVE5OTU5SUU5KTU5PVVtcXFlUUlFQVFZXU1RTTlJSUVFQTVdX
-WVpYVFNQUE1OTlBNSkZERkhEQ0FBQURGR0NAQTs9PUNAPzs8OTs7ODc2Nzc1Njc3
-NjczMzIxMzM1NDIzMDIzNDMzMTEzNTMxMzU3NTUyMzUzNDhCTlBTUUtCP0M6Ojk8
-OUBBPz5GSUg8QT9APjs6PTs6NTU1MzU3NjIzNTIvMTM2NDg1MTIxMzMyNDY1NTQx
-NDM1NzYyMjU3NjgzNDY2NzxBRE1NSktQU01LTExMSkhKTk9LUEVEQkNFSlBWW19k
-aGxyc3N5e3yDhIGGg4OHhYWEhYaHhoSDhISAf4CBgntzbWphWlBHQDs7Ozo8Pzk4
-NTY3ODk5NDc2ODg1Njk5PDo2Ojo9Ozo9Qjs5QEM/PEFDRURHSElIUFdfUU5aYl1R
-TlRiZVlQVGFsaVVMWmp1Z1lcboGJh3t9jZ2kn5GIh5Calot8cGZoZWFeXVlXWFZU
-UFFRVFVPTUxMTU5QS0pOT09OUlBPTU9QT01NUFBOT05ST0xMTU5MT01PTExPT05Q
-Tk1PUE5NTUpMUE9QUVFPT09QT05OT05OUE9NTlBOT1FPTE5NUVNOTVBRUlJRUVFR
-T0xKTk1QTkxOUVBQUE1LSklKTE5NS0pLS05OS0lJSk1QTlJNTkpJSktKTElJSktL
-SktNS0tMTE1KSktLSk1NTUtKS01OTUtNUE1OT05SUU9SUFBRUVRRTEtKSkxNTk5O
-TE9MTE9QT09QTk9TTUhLTU5OTkxOTVJQUlRSVFFTVVZUU1ZWVllZW1pcX1xaW1tc
-V1RVVVhZWFlTVFFOUFBST0xMTlRPUE1LS0tKTE9NSkhISEhIR0ZISklHTUlIRUdG
-SEtOUlJUVlVVU1VUVFVUVVlVUlVYXV1dXlpdXF1eYWFdXV5ZVFFQUE5MTEtMTUxK
-S0pLTE5MTE1LTk1OTU5QTk5PTlFQT09PTUxMSlBQUVJRUlNTVVZVV1ZZV1VWWllY
-WVhaVVVYVlZVVFJWVFNVVVVWXJKzusfR2t3h5Obn6OvqUUxHS0lIRUdEREZCQkdG
-SERBREE/Pjw/REJBQD9COzo5PTw8Ojw+Ozo7Pj89PDs8OTg5P0FCRElHQkBAQ0RC
-QUJERERAQUNEQkRCRUdFREJCRkRESEVDRUdIREVEQEJDR0RAQkNFRUdIQkZISEpM
-S0xJSkhGSEtMTU1NTE1NSUtMTFJOUFNRU1FUUlJUT1FWV1VSTk1NS0ZFREZHQ0JD
-QkNDRUVFQ0BFRkZDQEFBRUA9Ozk6Pj8+PDw/PT08Oz0+QEE+QUBAQEJCRkNCQ0RF
-RERFR0dFQz8/P0BFSENEQkRDSUlGQ0dGREVLTVBPTExOS0dMTk1NRkRHSkVDSEhK
-SENAQkVDRUM/NjI1ODU0NTQyMjE1NTUzNDQ3NDEwNDUyMjc8Ojc3NzY1NzQ2NTY2
-MzM0NDM2Nzc6Nzc4Nzg4OTQxNDUzNDU2Njc4Ojg1NTs7Ozs5PD48PD0+QUREQT1A
-P0A/QUVGQ0JCQ0JCRUdKSkhHR0tKSklISUlHSU5RU1ZXUlJRVFBOT09RU1NOT1JY
-WVhbWFZVVFJTVVVTWFdUT1FVVVJUV1ZWVldYUk5PS09SVFBNSkhKR0NDR0JCQ0dG
-RUNAOz1BQkFAPjw6Ozw6Ozc3NzY4Nzk1NDMzMjEyNTY2NjYzMjIzMjMyMzYzMjUy
-MzU1NzQyMTI2Nz5HTlBNSUdCQDk6OTk8PUA/P0ZDQTw/QkI4Nzk4ODU2NzU0NDc2
-MzE0NTU4MzU0My8yMDM0NTM0NDY3NDMxMTY3NjUzMzU4NjY1Nzg5PENDSUxNTVVQ
-R0pLT05LTE1MTEtIR0ZFQ0RKU1ddZWZscXV5ent8fIF+goOFhIGCg4SFh4SGhYWE
-hIJ9e398dHJsaGVhWEhEQTw6OD1ART0/Ozk5ODc5Ojg4NzQzNTk0MjY2ODo6PTxB
-QEA9QEJCPT5BRENKTU9MWF5VVmBqZ1VSXmtvYFVYZm1lWU1YbnJlWlxwfoBzZmp+
-kZiUh36BjpWWiXhwamdkXlpbVldYVVFTT1FPT1FOTE5LTE5NSUtNTUpNTk9LTVBO
-UlBQTU5QTUtOUU1NTU9NS0xOTUtOS0tOTE1PS01OTk1OUU9OTk9PT1JPUE5NUlJO
-UFFPUk9OTU1PTk1PTk9NTU9PTU5PTk5MTUxJS01OTU9PTUxNTU9PTktKSkxLSk1M
-Tk5KSkhJTEpJT1FPT0pKTEpJSkxJSUtMS0xNTElOTU1NSkxNTE9QSkdKS05MT09P
-T0tKS05TUlVPTkxNTk9KS0lJS0xLS0tKTU9OTE5QUk9PUVBQTEpMS0tLTU1ST09R
-UFBTVFFTVVRRVVdVWVdWV1tfXV1ZWVhWVFVWVFZWWVVTT01MUE9QUUxPTk1MSkpJ
-S0xIS01ISUlISEdIRUdLS0dISExHRklMTlFRVVVWVlVXU1NSVVVWWVtWV1lYXV1d
-WlleXl5eW19dWlZTUE1OUExLS0tOUU1OTktJTExLS01LTFFMTU1LSkxOTk9QTlBS
-TVJRUFRVU1RUVFRUVVhXWFpYVllZWlhaWlhVWFhWVlJUVVRWVVZXVlhYi668yNLa
-3uHj5ujo6upMSElRTklHRkhHRURFR0dCQUBAPkI+QkA8QkM+QUA8Oz46Ozw9PDs8
-PDo9PTs+Ozo5Ozs5PD9CWFpIRkREQ0JEPkJBQUJAQEI/Pz8+Q0VHRUNERkZJQ0JC
-RkdIRUJAQ0NFRkZDQ0ZHRkRER0ZISUhMS0hJSUlJSEhMTk5KTExOTU1NT1FPUVRP
-UFNVVFJSVFZWVVFOT05JSUlFSklMREI+PkE/Q0NDREVJRkVBQkJBPzs7OTk8Qj09
-PD8+Ozw+QUJAQD09Pz5BPz9FRkpKR0JBQUNDQUBDRERBQUNFRERERklHR0VDQ0VK
-Sk5QT1BOT1FPS09QUFBNR0VCQ0ZFREdDQT4/QkVHQzs2NjU0MjUxNjY0Nzc4NDc2
-Oz44MzQ0NTU3NTQ1NjQzNTUvNDU1MTUzNDU0ODY1Nzg4NTU2NzU4NTQ3NjU3ODc2
-Njk7ODc8OTk6Ojs/Ozs9PT1CQ0BDQkJDQT9BQ0ZGRkFERkdJSEpKSUtMSUtLTUpI
-REZJTVJVWFlWUFNSU01QTUxQUVNSVlpbWFpYWVVYV1FSV1hXU1JSV1tZWllYV1VR
-UlNRTUxLT1VYVVFLSUpFREJERUVDRENDQEI9QEA/QENAPDw/Pj45Nzc3ODU2NTY2
-NDMyNDMzNDI0MjMzNTEzNjU0NTU0NjY0NTc0MjM2NjM0NT9HUVFJTEI9Ojg6ODw5
-Oj0+PT04OTk8OTY2NTc3NTQyMjU1NDIyMjE2NTM0MzM2OzQyNTc1OjMyMDEzMzEx
-MzQ2NDQ1NjY1MzQ1NTo7QERITk1OS01OTkxITkxNS01LSktIREVESk5UW2Bqa3B3
-dnh6fn5/gX6BgoWDg4OEhYWCgoWFhIeEhYB4dHZ0dnZxa2RaU0VAPj03Ojs+PT5C
-Pz06NzU4Nz01NjU2OzQ1ODY2Nzc3OEc7Ozo6Ozw6OTk8PUBHSUxWXVZRXWlmXFpo
-dnhsXWRxeGxgW2R0d2hdYnWDf25hZXqNjod5b3KBi4mGd21pZmFZWVhZV1ZVVFFO
-UFFQUE9QTUxLTE9OTk5PVFFRTU5PTk9RUE9NTU5OS0tLTE5LTU9LTU5OT05NTk9O
-Tk1OTk9OTk9MTFBNT0xOTlFOTkxPT09QUU9ST09MSUpKSUxPS05NTlBPTElMTkpL
-TEtMTktLTExOT05NTk9PS01MTk5MTUtJTEtJS0tLTVBLS09NT01NUlJOUFBMSUxL
-TE5MS0pMTEpKTUxOTUtMTEtKSUpOTExOTUxQT1BOT09MT0xMTEhLTU1MSkpKSktL
-T1JQTlBSUlFQUlBRTU1OT0tMTVFOTE5QUVJUVVVTU1NRWFtUVlhYWlpbXFpZWltZ
-WFRRUU9PTE1OTVBOS0xNT0pNTUhISElLR0ZHSUtLSEhJR0hHQ0VHR0hJSkpKS0xO
-UFNVVldYVldVVVZUV1xYWlpaWVpdX1pcXV1gX15cXFpXUk9NS0xNSUpKSkxRT05O
-Tk9OTkxNTVFQTk1NTU1MTVBQT01QUVFQTk5QUVJTVFVUV1VVVlhYWFZZWFVfWFlb
-XFhTU1RRVFRVU1RRVFVaWFWAprzL09rf4uPn6Onq6klKRkZJS0tGQ0NEQUREQkFD
-QkRKRkJBQj5AQj5APj9EPzw8O0JAO0E8OTw7PT8+PD05Oz0+Q0REQkdHR0VHRkJD
-QUNBQUNDQ0ZDRkNFREZIR0RHS0lKR0RFRUZKRkVFR0lERUZGSUlGR0VGR0hMTEpK
-SkZGTklISUdJS0tNUFBTUU5OUVFNTUxOT1BPU1RVU1JPT1BQT09LSElISURCQUE9
-QUFCPkRDRUpGPz0/QD9APTs+PDs7Ojw8PUBAQkJEQ0FAPz09QEJBPD5DR0pIQkBF
-REJDPz5CRENERURDSUpKSkY+QUxISEhMT05NUlFYVk5PS09NTEtLT0pHSUxJSEdF
-QkBBQkdEQTs2NjczMTMyNjY2NzczMjMyNjM1NTc1ODM0MzM2Nzg1NTU0NjU0MDIy
-NDU0MjQ2NDQ3NTU3NzY1Njg2NDI0NTU0NTc4ODs7Ozg7PTs8Pjo+P0NERj8/QEA+
-PkFCRkZHRkVGRkZFSElHSElGR0lNS0hFRU1PUVRXW1hSUU9TU09PTUtNUVBUVlpZ
-WlVSUlZVVFRXWGRWUFNXWVxeXV1XV1NRTk1MR0ZOU1lVUkpHR0lHSEhJRkVBQEJB
-PT8/PUFCREM8ODk7PDw6OzY3NTY0NzQzMzQyNDQyNjQyNTUzNjIwNTMyNTg0NDM0
-MjEyNTExNDU5O0NPUUdORkA7PT44Ojk5OkE9Ozs4Nzg2NjY3ODc6NTY0NTY1MzMz
-NjU0MzAwMzs1MjU1MjU3Ojg2MzQ4NTIzMTQ3ODk3ODs0ODQ4Nzw/SFFVUkxLT1NR
-TUhMTU5HSktKSUhKRkhPU1deaGtvdnl7fX2AhISEg4SDhIWDg4WFhYeFhISEhYWA
-fXd4eHVzcnBrZmJXUEdCOzo7OTdAQEFAQDk1Nzc6Ojk7Pzk3Njg4OTk3Ojs6PTo8
-OTk8Ozo6OzdAQkVFRk9WU0tXZGdbVWJ0eG5jZ3h+emlndoCBdWlwfoiGeWdrfYyL
-gHJobHZ7d3FoZGNiXlpXVlpXVVNTUlBPTk1NTk9OTExRUFBQUlBNTU5QTk5OTE1L
-TExMTE5OTU5OTk1OT1FNTk9QTlBPTE9RT05PT05OT01NT09PTk9RTE5MS0xOT1BR
-T01QTk5PS0tNTU5NTU1PTVJPT05NTExNTU5NT0xPTExOTU9PT05LSUpOTUtLSktO
-Tk1NS0xQTUlJSEpLTE1NTExPUEtMTExMTk5OS0xNT09MTEtNUE9QTkhJSktKTExL
-S01MUVBPUExKSk1MT01KSktLS0tNTU1NT1FQUU5PTlBRUlVQTUtNTkxPTVFQUVFR
-U1VUVFNXVFZXWlpXV1lVV1dZWVpZWldVUk1OTk1MTU9PUFRPTU1OTU1OTUpJRkVH
-SkdISUlKR0ZIR0dGRkZISUhKSUlMTE5RU1RUV1hWV1lZWVVXWFdWVlhZWVtYXGBh
-XmFjYF1dWlhTUk9OTEhLSUpLS0tLTE9PTU5SUVJRUU1NT1JOT09OTlBNT09PT1BS
-T01PU1RVVFJTVVdVU1dYWVhXXFdXWFhZW1ZRVlRUU1NTUE5RVFNWVmSfvMvT297h
-5efo6errSExPTEdGSUNERkpFR0RDRUVKRkVGQ0RAPz0+Pj9HQkBAPTs+Pz0+PTo8
-PDo7Oz4+QTw9PEFHQkVFR0hISEhEQ0BBRERFRURISUlHQUFBRUdISElMS0lJRkVF
-Q0dIR0dHSUhGRkZHR0pKSUhGR0hKSE1LSUxMS05ISElKS0pKUFBQUE1OUVBRT01N
-UFBRUVJPTU5LSUtISUpLSUZERUJBQD4/QUFCREZFQ0A+PT0+QEBAPDw9PTw5Ojk9
-QD8/P0JCQUE/P0NBRD5DQD9AQkdBQEFCQT9GQ0JDRUZIR0ZGRktJS0dDSEhJSE5N
-VFFOUFFOT0pKSkxMTEpMSkdOTUxHRUdIR0pGRkVBOjY1NTU2NjY0Mzk4ODUzMjQz
-MzM1Nzc1NjY4NzU2MzM5OjU3NjU2NDM2NDU1Nzc3NTc4Nzg3NzQyMzU4NDE1Nzg1
-ODw6Ozo6PD0/QDk5QEFAQkZGPkJCQj9AQUJGRURGSEVEREVHREhJR0RERktKSElI
-SkxPTlRVVFRTU1NQTktLTUxPTlJTWlhVUFBSVVRVUFRTU1NTVFZWV1pbWVdWVVRQ
-UU9LSE1NUFFSTEhHRUhJTEpIQkJCQkNBOz09QENBPjk6Oz4/Pjs7OTY0NjY0MTQ0
-MjE1NTg6Njg1NjE1ODUzMzI0NDU1NDMyNDIxNTU5Nzc2O0hRS1FLRD48Pzo8Ozw8
-QD07PDk4ODo4OTk4OTU1NzkzNDQzMTU1NDUzMTM0MjI3MjMzNDc2NzY1MTUzMTEy
-MzY4OTk1ODU2ODk9OjlHUlNSTUtNT09QSklMSkpITExKSkpJSlBYXmRwbnV4e3x8
-f4GEhoaGhYSHh4WIiIeFhYWDgoN+gH6Bgnx4dnZ0b2xqY15WTEU9Pjo5NzhBPT45
-OTo6OTc3PD05OTY1NzU0NTU8PTo2PDg8Ojg3OkFAPj5DSEdHSU9HQk9eW1JRYXJ1
-al9qe4B4aGVzhIN3cXeFj5CDdXqGlJCDc2ltb3NxaWJdXFhYWFhVU1VUUk9OTE1M
-TE1PTE1MTU9OTk5RUVFRUVBOTEtMTEpKS0tLT09OUE9PUE9PUk9OTk1OTU1PT01N
-TUxOTVBMTk1PUFJRT09OTU1NTk1OT09QTU5NTUxQT01MTE9OTE1OTk9NTE5LTk9O
-TEtMS0lKSEtNS05NUk1MS0pLT01IS0xOTExMSU5KSUpISktLTFBOTExMSkdKTEpL
-Tk5PTU5MTU5LSk1PTk9NTUtMSElKS0xLTEhLUU9NS0hIR0lKTUxKSEpNTVBPT05L
-T1RTVFZSUFJTUU5NT05OTU9PUVRTUlFTU1JTVllaWVVWWFZVVlhWVFRUVVVSVFFR
-Tk5OUFBQTk9RUVZPTE1MSUdHSUlLTUhKSktJS01HSElLR0ZHR0lISkhKTVBRT1JV
-VlhXWFZVVFRTUlVYWldWWFhZWlldX2FgX2FfXltXVFVSTk9NTEpOTUtNSkxMTk9P
-Tk9PUVBRUE5OTlBRTU5OT09QUE5RT1NUU09RU1JTU1JTVVRYV1hXWFlXVldWWFVX
-WVZWU1dVVFRSVFZVUlRYW5a7y9Ta3uHl5+jo6upOSUdKSUlJRUVKSkdEQUNHR0NG
-Qj9DQ0JCRz46OT9APz9AQEA+P0E8PT09PTs+QT5CPj8/QkBAQ0VESEZFR0ZHRUJE
-Q0ZFRUdJSEhDPz5BRkdMSUlKSklFQ0VKSkdLSUxKSklIRkZGSkpLR0RFRUhJTE1P
-TE9NS0tJS0tLTE1MUFJRUFFZWFJST05RUVFQTk5NS0pIRURFSUlIRUFBQkFAQUNB
-QT9BSEdEQj07PD0+Pzw9PjxAPT0/PT09PD8/RENAQT9CQ0dFQ0BDQUBAQ0VDREVD
-QUFCQ0ZFQ0dJQkNISUlMSUpLS09QTlBUU01PUFFXUExLTU9NSEdLTE9OSkdFS01K
-SkVCQkA4NzY3NzY3NTY6Ojo3MzQ4NDQ2OTo3Njc2NTY2NzU0NTU3NzYzNDU0ODg4
-NTI1NDY3NTQzNDk3NjQ1Nzc5OTo6ODg4Oz9CQDxAPkA+QTw9QEJBQUA9QUJBQENB
-QUNDRkREQkRFQ0ZKTUhJRkREREJCRkhKSk9OTFJQUU9PTlFRTE5MTU1KTlNXWVZV
-UlNUU1NRVVhWU1NRU1RTVlZUWFdTUVFUVFJMSkdJTVJOS0RDRUhMTU1LSUhISERA
-PT5AQ0A8PDk7PT07Ozo5NzU2NDU0NjQ0NjU1NTY1NTU0NDU1NjU2NTc2NDY2NTg2
-MzI0NTY3NDc3Q01KUFRHPT4/P0BDQEA9PT46Njk1OTk8Pjo5NDU3OTU1MzA0NDYz
-MjU0My8yMDIzNTUzNjQyOjcyNjU1NjQ4NDM0OTg2ODQ4Oz06PUVOUEtKS01XWFJN
-UFJSTExKTUpNTkxRVmBkbWxvenp7f4CFhYWHiIiLi4yOjo6NiIR/hISDhIB/gH5/
-fnl4dHNybGdkZFxUTkVAPT46Nzk7Ozk5OTk5ODk3Nzg2NjY2NzQ3NTc5PDg4Ozo6
-ODg5PUJJQUBDQkNJSkZBSlZUS0paaWteV19yeG1iX3F8fHVsdYqTjoJ7f4qYloZ1
-cXNzcWxkXVpWV1hWV1ZTVFRTUVBNTExKS0lLTUxQTExLSkpOVFFPTE1MTU5OTkxI
-TFBNTFBPT09PT09PTU5NTU1OTVBQT01MTktKTU5OTVFRUU1OT01NTk9PTk5MTVBR
-UE5LTE1MUE9PS0pMTU1OTk1OTE1NT01OS01JSkpMTEtLT05KSUlKSU9LSk1LSUlL
-Tk9OS0hLSEhOTUpNS0tOSkhOUkxJTUxLTExNTEpMT0tLTEtKS0pNS0hLTExMTU1K
-S0xNS0tLSkpMTEtLT05NS0xOT09PTk5SUlJRVVFPT09SUE9PTVBPT05PUFJSUVRY
-V1ZWVFZYWFZVV1RVV1dTUlJSUVBRUVRSTk9SVFRTTk5QVVNQTklHSEhIREhHQ0RJ
-SklHR0lHSUlJSkpISkZISUlNUFFSUlNUV1VXV1ZTVVZVVVhXVllaWlxZW11dX2Je
-W15aV1RUUU9RUU1OTEtLTk1MSk5LTU9OTk1OUE1MTU1LTE1NTlBRUFBTT09OUFFQ
-UlBSUVBTVVZXV1ZXVlhWV1lXV1ZVVlhXWVtXVVVUU1NTVVVYWlddkrvJ09rg4+Tm
-5+nq6k9MTEtIRkdITExGRURDQkE+QD9CRUc/QD4/REJAOzxAOj5AQTtAPT47PD89
-PDw/P0FAPkFCQkBBP0JGRURERURBQ0RDREZJS0pHRUREQ0FER0hLSUhJSUtHRUVJ
-SEpLT0xIRUVHSUdIR0VHR0hLSklKS05NS0pKTExLTElKTE1OUVBRU1VYVFBPT01Q
-UVFOTlJTT09JR0dJSEVDQkI/P0BBPj49PkFDQz9BQz88QD8/P0E/PTo8Oz07Ozw9
-OkFCPj4/PD49REdIRUNBQEFAQ0RFRkZEQUFFRUJDRENDRkdHSExPT05NUk5PU1JQ
-VFRRTk9NS0lMTU1MR0hJTElGSUpISUdDQkJCPTQ0Mzc2NjY3NzY3Nzc0NjI1Nzg4
-NjU0NDc3NjU0MzQ1NDM3NjY4NTc4NzY0NTQ0NTI0Njc1NjY5NzQ2OTs6Ojk5NjU4
-OTs8OTg5OTo7Ojo/RUA/Ozo7Pj8+PD5AQkdFRENBRUdJSklJRURFRURHRUFDRkpL
-UlBPTU1MTU5PS0tLTEhITE1OT1JZWlJSUFZZV1JWVFNST01PU1NTVVNSUk1NTlJU
-VFJPTkxLS01LSERIR0pNS0ZGTExIQkE/RUVEQD8+Ozk9Oz07OTs4NjMzNDEyNDY2
-MzU0NTI0MjE1NTg1NDY2NDIzNDU2NTY5NjUzNDY2Njk5RUpST0dAOzg7PTs6Pzo6
-PUBCOzk8Pzk4OTY3ODo5NjY2NzQzNDQzMTQxMC8yMzMyNDg2NDI2ODk1MzY1NTM1
-NTIzNzo5PDg3OTo7Qk1QTEtMT09PTU5SVlVLTUxQUlRVVFhiZmxvcXJ3eX+DhYWG
-hoeHjI6MjI6OkI+MhYOEhYmFg39/fn99enp2dHNuamdmW1pRSUJAOzk7Ozo8Ozg7
-Njg2Ojg4OTk4OTY5ODY3Njc1ODg6OTo7PDw6PkE/PT9DREhOSUNKWFZJSVdoZ1ZP
-V2dtYFNXaXd1aGh1goyGend7j5WQg3Z0d3t1bGFdXFhYWVVUVFRSU1RSTlFPTUtN
-T05QU05QTk1LTVBNT05OTVBPTkxMT1JRTk9OTE9NTEtOUFBNTU1NTE5NTExNTVBQ
-TU1LTU5OTkxMT09OTlBTUU1LTktLTE9LS0xKTE1QT01OT05PTE5QTUtMTUxMTkxL
-S01OTUxKS0xNTktLT0tKSUlJSkhKTE5OTkxLSEtMTE1NTExLTUtNTUlOTk1PT0xM
-TEtOTE5OUk5LTU1MS05STk5NS05NTk5NTkpNTkpMTUtLSkpMUVBNTU9NT1BOUFBR
-TlBQTU5OTExNUFJOT1BPTFFSUFBPVVVXV1dTU1dWV1NTVVNSVVRSUFFQTUxPUU5P
-TlFSVFFRUFFPT1BMS0pKRkNDQ0dISEdJR0dGRklISUpKSkpJSUxNTU5QU1NSVVVV
-WVdVVVVXWVpXWF1eW1lcWl9eXV5dXF1cWldVUlBOTkxLTExNS01OTU5NTkxMTE1K
-TE1PTk9PTU5PUE9QTk9PTE1SUlJTUlVSUVNUUlJVVlVaV1RVV1ZVWFhZWVhYWV1Y
-V1dWVVRSUVVWVVVYWl2Lu8nU2t7g5Obo6OnpUU5MTEdISExLTEhERkNDQ0ZFSERB
-QT89Ozk7P0Q+Ojo+PDxAPjw7QUE+P0E+Pz47Ojw/QkRDQUNCQUNGQUREREVHR0NG
-R0xNSkdHRklHSkhHSkxMSkhISUdHRkZHSU1MTkxKSENHRkVIR0hKSktMSEhKSUZI
-SUtKS0lJSkpJSk1OT1FUVVVRUVFTU1JQUFJVU1dSUE9LSUVHR0FAQj8/QEBCPjw/
-PUBAP0A8PD5BQTs7PUE9PTw5Ozs7Ojg8PkJBQUBCQEJBQkRGRkJDRUNFRUpKR0ND
-REVIRURDRkRGRkdITlBTU1BOT05ST1NTVlFRU1BLSklLSUlHR0dKSkhLSUhDQ0JB
-QUVAPDk2ODY2NTY3NzYzMTU0NDg2NTk5NjUzMTM1NzQ1NzY4ODc1NzczNjc5Njk3
-Njc3NTU2Njc2NTM2NjQ2Njg6OTo4Ozg4ODc3Nzc1ODk6P0BBPTw5Oj07QUM+QD9B
-RERFQkJFR0tIQkVGR0VFRURDQz4/RktQTU1NSkpLTU5OUEtLSklNTk9RUlJUUVZX
-WVlaU1FWVFFPUE5SUlZWVFFOSUtNU1JUVlNTUE5PSktKS0lKR0RFSEtNTEVCQT9C
-QkREPzs6Pjw6Ozk6OTc1NzMzMjM0MjI0NDQ1NTU2NTI2ODk2NTIxMjM2NTY2ODgy
-NTI2Ojc0Nzw8R0xNS0I7Ojg5Ozw9PDs5Ozs8PUA+OTk2NDc3NTQyMzM2MzI1MzU0
-NjU2NTUxMzM0NDM3NTM5NjU3NDY2MzAzMzU3ODk4Oj87OTtETVJOT09KSEpNSVFS
-UExMUlVXWFlbYWVncHN5e3x9gYWHi46Li4+PkJCOkJGRj46Ni4mIioqDgX+AhIF9
-eXZzcG5tbGVeWVRNR0E7PT09Ozw6OjY3Oz06Ojo4NjI2NjU6NzU2Njc5Ojs8QTs9
-Pz08PD8/QEhIS1FNRU9bW0tJV2VkVk1XZGRYUFVrdW1hXGZ6f3hsaXeFjIp9cm10
-d3VtYF9aV1ZWU1NVVFNRT1BPT09OT05ST01PTlBPT0xLT05PTk5NT1BPTU5OTE5N
-Tk1OTk9LSkxNS01LSk1PTk9PTktLTk5PTkxNTE5RUE5SUlJOT01NT09NUE5MT05M
-S0xKTExOTk9OSUxMTUxMTEtMT05OTkxMTUxOS01MTkxOTk5MTUtJSUxNTUtMS05Q
-Tk5QTktNTU1LSU1MSk5OTUpNTUtLTUtOUFBPTkxNTk1JSkxOTUtKTUxKSUlLTUpI
-SUtLSkpKSkpMTVFQTlBQUU5NTFBPTU5QT1BQVFNRTk5PUFJRUlBPTk5QUlNQU1VW
-WVpaV1NTUlNTU1RSUVFOT05PT05QUVFSU1JOTk5QUE9QTUtLSUpNSUVEREdJSEdH
-R0tJRkdGRUhHR0hLTE5QUU9QUVVXV1hZWFdWVFNUV1hWWFhaXF5iYGJiYl5dXF1a
-VVJOTEpPUFJNUVBNTE1NTE1MTk1NTE9PT09PTkxMTlFRT1JOUFFPT1BTUlBSWVNU
-UVNVVVRUWFpbV1RbV1daWltYV1lbWlhUVlNSUlFTUVJVU1RWWYG7ytTa3+Ll5ujo
-6utPTE1LTU1JRUVERkZEQ0RIR0NCRD9BRUNAPjw8QEE7PEE+QT0/QT1APDxAQDw8
-OUFDQDtCQUA+QkJCRkRFQkNHRUVHRUhJSktKSEhKTEtKSEhISUtJSUlJSUdGR0pI
-UFFPTkpKSElIRkZJSk1MUUxJSEdGSUdJS0xKSklKS0tLTU5OUlRVV1VTVFNVVFJS
-WFhWV1ZPTU1JSEdDQkE+PkE+QD9APT8/QD1APkA+PD4/Pjw8P0I7ODk6Ojg7Oz1B
-QUNDREFAQEBBRUdISktJSklHR0hHQ0ZHSUlKSktLSkVGSk1QVU5RUVBMTE9QU1NO
-TUxNUEdHR0tLSUlITE1ISElIRUZHRkJCRT86OTc6Nzg2MjI1MjY2NDQ0Nzc2Njc0
-NDEwMzI0NDQ2NTg4Nzg3NTY2NzQ1NTU5Nzc4NzU3NDY5OTY3OTk4OTs8PDo2OT5D
-PDk4Njc4Ojw8Ozs8Pj48Ozs9QEJDPz9AP0RCQEJFSEdGSEVDRklIRURDQUNDSkxK
-TkxJTEtLVFFQTlFRT05NUVBUU09NUVFTU1FQUFFSUE5QVFNQU1NSTk9KRUhOUlNU
-VFBPUU9OTktISUxLR0hGREdJRkBDRUE/PkE9Qjo4Oj87Ojo7OjUyMS8yMzI0MzU5
-Njc1MzQ4NTQ2NjY0NzQ1NTQ1NjY2MjU0Njg2NTY4Oz9ES01NR0E/Oz9APTg/QTg7
-Ozk7PTw5OTk4NTM1MzMyMzI1NjM0NTUzMzg6MjQ0MzEyNTg3NzQ5NzY1Mi8xMjAx
-MjUzMzY5OTo7PUdNUEpMS05QTUtJT1FQUlNZXF1hYGNmZ21xeX2AgIGBhoqLjY+Q
-j5COkJCSlZSSj5GPi4eHiomJhYN/f358eHVzcHBtZWFZVVJJRUM9PTs4OTc3Njk7
-OTU3OTg2Njo2NjY3NjQ3NzY0NThAOTw6Pjw6PEA/QUdPVlRPWGhjVVBdZmVYUFlk
-ZlxRWmx1bmJeanp9b2JhboKCfHJqa3BybmhgXVlZVVRUUlRTVFFSUlFOTE5PTlFM
-Tk9RTlBMS05QTkxPT1NPT09NUE5QT09QUFFOTk1NTk5PTFBRT09NT1FQUU5LTk9P
-UE1QTk5PVFNOUFFMTE9RTk1QUE5MTVFOTU9NSklNTExLTE1KSk5OS0tLTktMTk1M
-TEtKSUxLSktOTU1LSElMTExKSklLTU1PTUxLT01MS0pISUtLTk1LSkhJSktOS0xL
-TExMS0tNTk5NTkxMTEtKTU5NSkxMS0pLTExJS0lLTExMTk5MT0xNT05RS0tNTU9P
-TlFUUFJSTkxPUFBQUlBQUlBUVldXVVdcWlZTVVJSU1BSUFBPTVBOTlBPUE9SUE9O
-TEtNT09RT05LS0hFR0tKSkhFRENERUVGR0hIR0VGR0lFSElLTk1QVFVUVldVVVZW
-VldWVVZYV1tYWl1fYGBfYF9fX15XV1tXUU1NT01PTk1OTUtMTUtMTE1MTE1OT1JP
-UFJPTUxMT09NTVBSUVRSUVFRUVRSUlFSUlNUVVZaWVlXW1xcWllaWFhVVFZUWFdW
-WFVUUFRRUFNXUlRdf7vL09re4uXm6Onq6kpLS0xJSEdERkRFREZGRkVEQUI/Q0E4
-Ojg5Oz87QD89PD0/PT09QT4+QEA/PT06PEE9QD5AQ0JBRkZEREZERUhKRkZJSEhL
-S0tHSUdKSUZIS05OT05MTEdISUdHSEtOUVFOTU5KSUhHSEpPT05MT0tGRUVGR0ZK
-TEtLTU5NT1BQUVRXWVpZVlVSUlZUVFRWWFlYUlFOS0tGR0VFR0RCQUFCQDw8Pzw9
-QT06PEA8PEA+QUJAQUNDOzk7OT4/PT1DQURFRkVBP0JCRUlKR0hLS01LSUlISUxJ
-R0dMTE5LREhJTVNUT01NTU5KT1FQUkxKTUpKSkdJSktLUVBQUVBOSEdFR0pJR0dE
-QTg2ODg2NjQ1NzQ8Nzc2NjUyNTQvNDQyMjQ1MzM0NDMxMzg3NDY2ODY2NjI0OTU2
-NDQ2NjY5ODY6Ojo4ODo7PDw+OTs6PDo6Ozc2OTw6Pjs7Oz07PEE8PDw6QT07Pz08
-QEJFRUZFRUZHRkVISElHRUJDTUNES1BLR0hNTU9OTU9OT09PUUpOT1JTT1BOT1NT
-U1BRVVRVUE1PUE1OT1FRUEpISUtQUk9NUVRWUVBQUFBSTkpJSUpIR0dGQ0JEPz5B
-P0FAPjw8PDw6Nzc0NDUyMzQ3OTg2NDM0NDQ2NDU0NjU1NjQwNTExMTQ3Nzg4NzMz
-NjQ3NjY2NT1ETE5GQj08QDw8Ozg5OTo4Ozw7Ozo7OjYzNTc1MzI3OTQxMjUyNDIy
-NTY2MzMzMzMzNTU0NTE1NjQzNDMxMTExMzY4Nzk7Ojo9SE5PT0xNUVFMSk1RUVZb
-XV9jX2RnaGxwc3V5gIKAhISGiYiMkJOSk5SSkJOTk5CRk5KPjIyJiouKhYF/fnx5
-eHZzcGtrZF1bVk9JR0E+Qz05NjU8Pjw+Ojo7PDw7Nzc5ODk7Ojo7OTg3Njg7PDg7
-QUVAPkBBRktWVFFebWlYVWVxbmFcY2psZV5kb3VwZWZweXduYWBren51amVkZ2dl
-YlxbWFhXVVVTU1NTU1BOTlJUUlBQUVNPTkxKTlFQS0xNTUtLTk5OTU5RUU9NUE5N
-Tk1KTE1LTU5MUlZQUE9RUVBPTE1NTk9PUU9NTk5NT05MT01RT09PTExOT05OTU5O
-S01JSEtLTk1NTk1NTUxNTU5MT09PSkxNTktJS0lKS0tMSk5LSktLTExMTUtKTk9O
-S0xLTU9NS0lKS0xLTExMSUlHR0lNTU1MTE1LS0xLTExLTU1RTEtLTkpHSEdLS0pM
-S09PTkxLTkpOTE5OTlBPUFFQTkpMTUxNT09QTk1MT1BQTk5RUFFUVVVVVVZXVVNQ
-UlJQUVBRTU1NUE9NT01LTk1JTlFRUVFQT1BPUE9LTU9KSkpFRklNSkhERENFRkZG
-R0dHRkdJSElLS0pNUVNVVFZWVlRTUlNTVVZXV1dWWlpcXVxeXF5eXV9eXVxXU1BO
-TUtOUU5OTUxOT05NT09NTExPUFBNUFJQUFJRUU9OS01QTk5OTVBQUFFTU1BRUVBR
-U1ZWV1RXWlhaXVtaWVlYWVhVVlZVV1NUVFZTVFVRUlZWVluDvMnR2N3g4+Xm6Onq
-S0hJSklGRktHTkhJRkVEQ0I/QURHQUM7NTY8QT0+Q0BCQUI+PTtJRz88Ozw8PT07
-PTw+PT1CQ0VGS0tFRUdGRklKS0hJRkdKRkdIS0xMTExNT09QS0pLRkVISEhLS05O
-TkxKTUtKS0lLTUpNTk5LSkZGRUZKSUlMSktNT1JUT1FRVFZZWFhWU05PU1NTWFlX
-V1RTUk5LSkpKSElJSUJBQ0REQkFAQz48Ozw+QUI+Pz89PkBCQkRCPjw8Ojw8QD5B
-RUJDREI8PkRHSUpJS0tJSktLS0ZJS0lISUtNUU5GRkhNUVJSS01OTUxNTU9OUU5P
-TUxQTUpMTU9OUE9SVFNSTk9PS0ZHRENDOzg3NjU2NTMyMzQ2MzQzMDI0NTk0NTU1
-NDY1NjY0MjI1Nzc2NDUzMTU1NDQ1MzM1NTY4O0A6OT03ODU2Ojs3Njc4ODk8Ojs7
-Ojo6Ozk9PTw7QDo7PkJCPjw7PUA/QD1BQkJEQkdFRkVFRERIS0dGQkBBQUJES05L
-R0lIT01QTU1PUFBOT09UVlRRT09SUVVUTlBSVVROTFFTUFBPUFJRTEtLTlBQUFNR
-UlJTUlNST1RRTktMSEhHRERERkdBQT1AQUJAPj09PTw4NzUyNjg2NTU2Njg1Njc1
-NTg6NzU0NDU3ODU1MzMzMzM1MzM0NTQzMjM0Nzo6Oj9ESkhFQT1APTo6PTw3Ozo7
-OD05Pjo3MzEzNTU3ODE0MjQ3NjMwMjQ1NjMzMC4zNjc3OTY0NDI1ODM1MTAvMDEz
-NTk2OTk6Oz1ETlNRTk1RVFBQU1VYXF9hY2VlZ2tvcnV2eHp+f4SHiomMjIyPk5CS
-lJSTkZCTlZSVkY+MjYuNjo2JhoWBfXx+e3VycnFpZV9aVk9LREI9QD5APT4+QD85
-Ozc7OTk7Nzg4Ozo8OTk7OTo8PDs6Ojw8QEFCQ0VHTFJUU11ral5gbHZzaWRuenhw
-a293e3Rsbnd+em5nZm96enJnYWBlZmBfX1pYWFdTUlNQT09QUFBQT1FPUFBRUExO
-TUxMTU5ST1JRT0tOTExQTk5OTk5MUE1NTU9NUE1MTlFQU05NTVBPTlBQUU1NSk1N
-T09OTE1MUExNTU5NT05MTE1OT01NTE9MS0pKTE9QTlBMS09OTVNOTVBNUE5LS0tL
-TEtJTEtNSUtLTUxLSkpLTExLS0xOTU5LTE1NS0tJSUpMTU1LSkpOT1BJSElMTk1L
-TkxJSEpLS0tKTU9QT01MS0lMTEpMSklMTUtJSE1OTU5OTU5QTlBPTk9QTk1OUE1N
-Tk9RUVJRT05PT1BQUlJTVFVUVlxWVFJTU1JQTk5NS01KTU1MT09KS09PTU1LTU9O
-S0xOS01MSUlGSEhHR0dGRkZIRUZGQkNFSUlKS0tLTU5OTVBSV1ZYV1ZVVVRXVVdU
-VFhXV1dWW15fX15dXV5hX1xcV1VPTUxMTUxMTExNT09NS01NTU5QUE5OTkxMTk5O
-TU9QTk9NTU9NT09OT1FMT1BRUFBOT1FWVFVTVVRXWFpaWFZVV1lXWFZWV1VSVFRV
-U1BQU1NTUlJWWH66yNHZ3OHj5ufo6upPTU1LSUdCQkZFREZIR0c/QEE+P0ZDPj88
-Nzo8Q0E7PEJDQkA+QEFAPj88O0BAOzo6QD49PUJDSElIR0lKSUdFSEpISElISElK
-SklJTU9OTk9OT1NOR0hLSUlJSU1MUFJQTktISUxLTk1KSkxKS0pGRkVGSktMSUtM
-TExNTVBSTlBSVVVVVlZWUk5RU1JQU1ZUVlJST0xJR0dISEZLSkRGRkdGRkFDQ0NE
-TEhAQEFBQT9BQ0RFQ0NCQ0VBPUFCQ0NCQkNEQ0FCRUZJSEpKTktMTk9OS0lRUU5N
-Tk5RT0lJSUxQVVNLSUtMTktLS0lMTE5MSEpLSE5OTE1OTk1OUVRUTUhHRUNDQkA3
-NTY2NjU1NjU2MTM1MjM2NjM1NDc2NTY4ODUzMTIwNjU0NDQ3NDk0MjQzNjUzNDM1
-NTc3NzU4Nzg3NTQ2NT4/OTc5ODs8PTo7PTs9Pz0/P0A+PT1BQUNBPkBAQ0JCP0FB
-QkNFRURDREA/Q0hLS0pEQkJFR0VFSk9RTVBPT09QTE5PUE5RVFdXVFJPTE9QUFFR
-Tk9TUlBPT1RPT05OUFFQTEtOTlFTU09OUVVSUVJSVVZXVE5MTElIRklJQkNBQEFD
-Q0BAPD09Ozo3Njg1NDs2NTU0NDYyNDI0NjY4NTM0NTc3OjY1MzYyNDU0MzE1NTM0
-ODc6Ojg9QkdLS0lDQD4/Rjs5Ozk6Ozw6Ozw6OTg3NjQ2OTY1LzI2NjY1MzY1NTQ0
-MjAzMzE0NDc3ODYzMjMzNDAxLjAzNTY1Njk7Oz9BQElQVFFQVVNZWFhaXWBhZGVn
-amxsbnN0ent8gYKDg4SIiYqKi4uQkpWTlJSSkpOUl5eVlI+Qk5GQj42GhoODgX16
-d3Z1b25pZl9aU1BKRUFCPj4/Ozc5OTs4Njs6ODs3NTU2PDs4ODs6Ozs6OT8/Pj08
-QUFGS0xPUFRSXWpqX2Jscmxsa3aBgH53eX6Bfnp8gYWAeHNwd3p7cmtmZGZkX1xc
-WllWVFNRT1JPTk9NTU9PUlJQUU9RUk9RT0pLTlBRTk5PTk9QT09OTk5NTk9OS0tL
-TE9OTk9PTExOT1BPTE1PT09OT09NTU5OTkxNT05MTk5LS0tOT0xMTElMTU1MTE1P
-TU1MTU9OTVBVWU9KTElLTU1MS0tJTExMTE1MS0tMTU1KSU5MTU1MSktLS0xNTk9O
-TUpLTU1NTUxLS0xLSkxOSktNTEtLSUtLTE5ISUtMS0lNSUtPTkxNUU1NR0lKSktO
-TVFRS05OT01NUFJUUlFOTU1LTk5NTU9PTExOTlNRUVBQUU1QUlNUVlZWVVdVVVVW
-VVFPUE9NT0xIT05QUU9QT05NTExMTExLS05MTUpISkhFR0hFR0NGSUdFRURGRklK
-SkpJS0xMTk9RUVJWV1dXWFhZV1dWVlVUVFNUVldaWV1eXV1cX1xaV1dVU1JMUFBN
-TEtMTU5OTU5KSkpMTk1OTk5OTU1PUE9LTU5NTUxQT1FPTE9OT1BQUU9SU1VSUlFU
-VFdVU1RYV1dXV1hXVVlaW1lZWlhXVVVYVlFSVVRVVFdeh7rH0Nnc4OPm5+fp6U9N
-TkpIS05KQ0JDR0ZHR0VDP0I9PkA9QEA+OkM9Ojw7QD48Oz8+ODo5Nzg6PDw7Ojo9
-PEFDPj9DRkZGREpKRUtISEhISklKTktKSkhMTVBRUVNOT09PS0pJTEpMTVNTVFJS
-S0lISUpJTktJSUxKSUlHSEdJRkpOTFBNTEhLT05QUk5RU1FRU09OT1JSVFVRUVNW
-U1NVUE9OTEpIR0hMTUtNSUZFRkNESEhNSkZGREFCQkJDSUZHR0VGRUJAQ0hISEZG
-SkdCREVLTElHSkxMTEtJSk1MUE1OUU5LTU9OSUpKS01TUEtHR0lMSEhGRUZMTEpL
-SklLSktLS05QTlBSUlJHRUZEQUNBPTQzNzc1MzU1NjQzMzYzNjY1NzQyMzQ3NTQz
-NDExNTYyMzEzNDQ1NDc3ODc4NDU2NTM1NDQ1ODg3NTc1NjU5OzpCOTk6Pj0+PT1B
-Ozs8Ozw+RT49P0BCREM/Q0NAQUE9PUNDREhMSEZBQUFERktOR0JCQUBFS0dISEpN
-UFFTVFBRTUtLTlRZWldTUlBPUFFPU1FOTk9ST05OUFBQU1RQT09SUFBRU1dYVFJU
-VlZST09VWldRT05NTEtNTUtFRkhFQkVFQUJAPDc2NDY3ODYzNDU1NTU0MzU1NjQ1
-MzQ3NTc5OT5ANzYzNTU0NjQ2NDQ2NDQ1NTU1Nj1ARE1QS0VBPjxAOzo4Ojk5ODtC
-Ozg6Ozs4ODM2NDQ4NDk2MzU0ODc1MzQ1NjU3NTU0OTc1MjE2NDY5ODM4NTY0NTg5
-OD5BQkJESVNVVVRXWVxfXVxgZGlpa25wcnJzdXh6fn1+gICCg4aHh4yJi4+PkJGR
-kpOQkJKSk5eblpaXlpSVkI2KioaFgH59eHl1cm9qZGBbU09JRkE+PUA6Ojs4Ozk6
-Nzs3NDY3OjY9Qz07ODk5OTc7QUQ/Q0FDRUpQU1ZXWFhiZ2dhYmtzc2xud4GEgH1+
-gYKBfYCGi4h/d3l9fnx1bWZpaWZgX19bWlVSUlJQT1JQT09PUVBOUFJSUVJPTk9Q
-Vk5RT1BOTlFQT0xNT1BVTU1NTE1OT1BSTlBSUE9OT09OS0tMTUxPT01ST09RTk5M
-TlFOT1FQTUxMTE5OTUxOT05QTE9OTU1NTUxLTExLT1VSTkpJTExLTk1OTUxMTUxM
-TEpKSUpMTE5TU0xNS0xLSklLSkpKTVFQT0tOT09PS0pLSUhKSUxQTkxKS0xMTExL
-TUxJS0tNTUlKSUhJSktRSklHSUlMS01NTUxOTVBOT1BOTVBSUU1OUFBPTk9OTU9P
-UFNUVlJRU09QUU9SUlNXV1RWVFRUU1BQUU9OUFFLTU1OT0tOT05NTVBOTk1MTU5M
-SUpJSUlHR0ZGRkVDQkREQ0ZJRUVKSUpLSEdITU1MU1NSVFZZVFZXVVVVV1dXUlVY
-WFZYWlpbWVxcW19dW1pXV1NQT01MTE5NTEhKS1BOSktMTU5NSkpNS01NT05MTFBM
-T09OTlBRUU5MTVBNTU5NTlJWV1dUUlFTU1NVVFRTVVdZVldXWFhWV1dVWFRSVlZX
-VVNQU1JSVl+KusfQ2Nzg4+Xn5+jpTUpKSkhLSkdHR0hEQ0RGR0RDQkRBQkJEPjs7
-QEFAPzw8PTk9OTk7ODc4OTo5Ozs7Ojw/QT0+Q0ZIRUZGRkZHTElJSkZMSkpKR0hH
-RkhPUE9OTUtJTE1QTk5OTUpNUFBSUk5IRkdKS01LTEtMS0pISEhJR0RGSUtMTk9M
-R0xPUk5QUlBSUlZUUlJRUldXVFZVVVVUU1NSUk9NS0hHSEtNT01LSEZJR0hJS0lK
-SktGSUZJR0NISUpMS0lDRERFSExLSkZHSEhGRkxNUE1MT1FRS0hKSklKTFBST0tJ
-S0xKS0tJTlBOTEhISklIR0dIR0xNTUxLS0pKS01MT09PS0xIRkRFRUdEQkQ9Ojk0
-NDY1MzU1NTUzNjQ0OTc4ODMzMjMzNTIzNjU1NDM0NDMzNTUyMjU3PDg1Ozo1NjQ2
-NzY1NTM7OTY2ODg4ODg7OzlAQEE+PT08QUFAPkJCQD8+QENDQkFAREE8PkBBQ0VE
-SEtKR0ZHSUZDR0hDQUJAP0RJS0tHSElMT09SU1VRUVJRWFpbVVNTV1FQUFBRUk5N
-UVNSTEtOTlNQU1VUUVRXVVBTWF5XVlRTVVBPTU5NUk9LTU1LTE5NS0ZJSkhFQkJB
-QUA7OTU1NTk4NTU2NjQ1MTUzMzc4MzQ1NDc6OTg6QDw6NTM0NDU3NDMzMzI0MjM0
-MzE1ODxFS01JQTw/Pz48Ojg4NjU6Ozo5NzY3NTk4OjQ0NjQzNDY1MjU1MjM2NTc5
-NTY1MzY1Nzc2NTU2ODUyOjs3NTU1Nj08PT5AQkVJUVNWV1dbXmFkamhubnBxdnZz
-dHd6d3d8fn+BgIOFhomJiouJhoaLi42OlJeYlJKTlpmXl5iUlJOSjYqLiImFhYSD
-fHd2cnFqZ2FeWFVNSEhGQUA6QD4+Ozw4OTg7PTo6ODg3ODo6Ozs+Oz1FSktQUU5R
-UVRZXmJhYWdvbGZnbnV5cXZ5fYSEgIKHioWAgomLioJ9gYODfnVvbWpnZWJfX1pa
-VVJUU09NTk1OTU5RUlBPUlNSUU1PUFBPUU9TUE9NT09RUk9OS01KTUtOTk9RUE5N
-TlFQUFJRT09PTlFRT1FQUE9PU1FOTU5NTU1NT09OT09NS01NTE5QTE5PTU9RS0pN
-T01MTE5TVE9LSUxIS05NTU5PTEtNS0pJTUpJSUpMT09PTk1MTEtPTUtKSEtMS0tI
-S0xST0xKS0pLTUtHRkdJTExLSk1LSktOSklJTE5PTkxNTUtLTU1JS01JRkhMTE5N
-S01MTU1NUFFOT1FRT05QUUtNTlBNUFFQTFNSUlJSUU9SU1RXVlNTUVBUU1FRUU5M
-S05NTlFOSUhJTk9NTk1OTU1PUE1MTExMTElJRUhKSERCREZJRkhHSEdGSEtKSklK
-S0tLTVFVVVdXV1hXWVZVVFVXVVhXWFpbWFlbWlpcW15hXltZXFhPUEpNTkxLSElJ
-TE5NTU5PT0tMTE1RTExOTE5OTE1OUE5NUVBPTU9OTk5PUU5OUFBSUlRVVlZUU1NU
-UlJVVFZXWFlZVlhYWVxXWVtYV1JRUFFSUFBRU1RXX4i6x9HY3ODh5efn6elJRkVF
-RUtNS0pJRkhKR0NCQUJERD8+QERCOz1EQT89OD48Pjw9PD06OTk4Ozo4PDxAPD06
-O0FBQUVFR0dGS0hISExRSElKTUpHRUdGTUtPUE5MS0tKTVFSUU9MSktNUVBOTkxJ
-RUZIS05LTU1KSEhHR0dHR0lLS01OT0xMSExPTlFTVVJSUlVSUE5QUlNXUlFTV1dX
-VFRUT09ISU1OTlFTT0xMS0tISEpKS0tJSUtIREZIRUVJSk9OSklKTEtKTE1NSUlK
-SUhITE1PS0tOUlRQT0tLSklQT05LSElITk1LS0lMTExNS0dHS0pIRUVHSElKSkpN
-U1BNTFBTTklFRURDRkRGRURDRUA8NzU0NDY0NDM0NjU0NDQ3NTI0NDA0NTU2Nzc2
-NDc1NjczNDQ3NzY1Nzg7NzM4NTk1NDMzODs2NDc3ODk3Nzo8Pjw9PTw9PUA8Pj49
-QD49PTw+Oz1DRUZCPjxAQUI/P0JFRklJSEQ+P0VKSEtFSEZFRkVJSU1OTExPT0pN
-S09TUU9PUlFVV1dSVldZVVJSUlJQTk5RUlNST1FSUVNSU1RQU1dZVlRVW1lWUVJT
-UE1JSklNTE1TTUlNUEtHREZKSEZCREVEQTs3NTk5ODY5Ojo4NDIzNDc4Nzg1MzM0
-Njk3NzU6Ozc0MjUzNDU4NjQ1MzY2NTw2NDQ3OT9ISktJPjtAOTg5OTs6ODg7PDs5
-NzMzNTMzMzM0NDYzMjQ2NDU0Mzc3NDM2NDY1NDQ0ODg4Nzc0NDc2NjU4OTg3Ojw7
-QENHSlBUVFdXWl1jY2ptbXR4dXd3en98ent8en6Dg4GFhIOGiIiIi4qIio+Uj46S
-kJOVlpWYmJeZlpaUlJCNj4yMiYqGhYh/eHp2dHJtaW1nY1tYVE9KR0RDQUBAQEM/
-QTw9Pj4/Ozo6OzxAQkJCSE5RU1lZWl5eYGZna2tqbnV2cnJ0eXx+eX+Gh4WDhoWI
-hIKEiIiIgXx/hYWAeXJva2hnZmBgXllZU1BOUFBSU09PTk5MUE9OUVNRUFJQUFJO
-T05PT05QUE5QT01OTk1OVFNSTk1RUlFRUE5RUE9PTlBPU1JTU1NRU1JQUk9OTUxM
-S01RUlBOS01NTk9OTU1PTE9OTU1RUEtNTUlJTUxLS0tJS0xJS0lKTExNTUxMTk1L
-TE1PTE1NTU1OTkxLUE9PUU5KS01MS1BOTEtNTE5NSkxISkxKSUlJTU1NTEtLS0tL
-SUhMTUxLSUhLRkdISkxNTEpJSUtMS05QTk5MTU5TUVBQT01QTkxMTk1OTk9TVVFR
-U1JPT1BSVVRUV1ZTVFFQUVBMTU1NTUxLTUtLTEdJR0pLTk1QT01SUE5NT01NTkdJ
-SEdHRUdFQUVGRUZFR0hLSEhNSkpJSkxMTE9QUVRVVVVXWFhWV1VVVFNTVFVWWFdY
-WltdXF1eYF9dXVtYUlBOUk5MSU1LSUlLSkxOTE5PTUxLTEtMS01PT05PT09OTUtO
-Tk1MTExNT1FOT0xQVFRTVFZVVVVVVlZYV1VWVlhWVllYVldYWFlXWlZVUlFUVlVW
-U1FVVlZXg7jI0Nnd4eTl5+fp6VFLTE1KR0hKSURIRUZHREJBQz5BQUFCQ0BDP0FB
-Ozw5OkA9Ozw8Pz06ODc5Ojo6Pjw+PT9BPkBBQUFERkZJSUpHSkpGSElKSEpISkpN
-TFJVVVFLTVBOUVRTUU1LTUxNS01LS0xJTk5LSkVHSElHR0hHREZJR0hMT09LS0tK
-SEpKTU1QU1FPT09OTk9RUlFUU1NWV1VTVVJQT01OTU5OUFZTUk9MTEpLR0lMTEtK
-SkxKSUlNSklNSkhJSU1MTVBPUFBRTUpMTExKTUtKSktNUFFQTktOTE9PTE9KSUpL
-S0tMSUZGTExMSktNSUlGSEdJS0pMSk9SVFJRT1JQS0VERUVHRElHR0FCPjY0NTcz
-MzQ0Mzk0MzI2NDU0MzQ2NzU0MzQ2MzY2NDI2NzQ0MzU3NjY3OTY3ODc2NzY0NjU4
-OTYzMzo7Ojc6Ozo6ODg6Ozs9QD46OTo9PT0+Ozs9QEFFRkI+Pzo+P0FFR0ZGR0lH
-RERBRkxLSklHR0RGREZJRktKSU9RUU5LTU1MTUxMT1ZUWFNWVVZUVFFQUFFTUVBU
-UVFSV1RTUFRXVlRTVVtUUlJWVVNST01NT0xPTU5MTEtKSkhLSkhIRkhJRURFR0ZA
-OjY2Njo/PDY5NjY3NjQ3Ojk4NTU2NjY0ODc1NDc/Pzk2NDU3OTg4ODc0NjUzMjY2
-ODg8P0FESEtJRUA9OTg7Ojc3ODY0NzY1Oz06NTMxMTMzNDMyNDM3NTQ1NTg2NTA2
-NTQ1MzM2OTo3NjU5ODc3NTM1Njk6PkJESUxNVlpbXmFjYmVqb3FydHd4fHt7fX59
-foB9f4GAf3+Cg4KEiIuLjY2MioyOkpCTmZeYmZmZl5eUlpWWlZSSkY+FiYiIh396
-fXt7eHRzc25qZmJeXFlWWFBLSkpHSElGRUE/QkNAQEJCQ0VLTE1PVlpcX2Vpbm9t
-cnJzdXV3fHt6fHyAgH98gYaIioyKiYqLiYeHiYmHg358fn14dXJwaWZgYF5bWVlX
-VFJVUVBRUFFOTkxOS05OUVBRUE5NTlBOT1BQTk5NTk5NTVBRUFBPUFBUUVBSUVFS
-UVFVUE9NUFJQUlJSU1JRU1FRUVBNTE1NUE9OUFBOTU5PTk1OUE5NTU5OTE9RTk1N
-TExNT0xOTUxNSkxNTktLTEpLSkdJSkpJTUtLTEtQTUxMTExLSkxOTk5PTEhISk1O
-TU1NSktOUE1JSkxJS0xKSUtLSkhKS0tLSktKS01MSkdKSkxLS0lKTlFMSUpMUFBM
-TE1PTk1RUFJSUFBQUFJST0xNTE9OUFBQT1BSUU9SUlVTUFFPT01OTUxOS0lKTElG
-RkhGSUhISExMTU1PT01PTU1OTVBLSEZHSEVFQURFQ0NBQkZJSUxMSkZGSExMS05R
-UFRTVllZWFVUVlVZV1ZZV1NVVVVVVldaWVpcXl5dXF1bW1hSUE9SUFBQTk5PTUxR
-UE1RTUxOTkxOTk5KTU5NUU9PTk1NSkpKTE1NT0xOTk1QTExSVVRRUVVUVVVTVlRZ
-VFRWWFdZWVhXVVVWVVRVVFZWU1VUVFNWU1NXWV6Fu8nR2N3h5Obn6OnqTE1LT0tK
-SEVEREFAQEVISUQ7PD06PTo9PD1DPkJDQkI9Pj09OkA6Nzg4NzY9OTw5Ojs+P0JB
-QUFBRUNAPkNGSkhGRkdHSkpLSElKTE9NTlBST05OT1BOTk9LTEtLSktLS0lJSkxL
-SkpJSUpISElJR0VIR0tKSUxLTE9LS0pJSktMT09MTEpKTVJQT1BSVFVTVFVUUlBS
-T09RU1FOUFFSVFJRTk1MTEpISUtOTE5NTElHR0pIR0hLSEhKSkxPTk5OUVBOT0xM
-TE9OT1FMSk1RTlBRTU9LTE5OTUxIR0hITUpIR0VJTFBMSk1KSEhLS0pQUVFRUlNU
-U1NSUFRRTkpMR0dGSEdDQjo6NzU2NTMyNToyLzMzNTMzMjM2NjQ0NDg2NDY1NDM1
-NjU1NTU1NDM1NTU1MTQ2OTw2ODY0MzY4NTc4ODg3ODo5Oz04Njg6Ozo/Ozs3Nzo8
-Pz07PT9AQ0BFRURHP0I9QENERENFRURDREJDSElOTEtIRERHR0tMS0lOUFFRTU1R
-T05LTEhNVVVXU1ZVUU5OTk1QUVBTU1FSU1JYWFdUV1dUUlVWWVVWT1FRUlBRT01L
-TE5NTEtNTU1KSEpHSkZGSkpKSUdISEQ/ODQ6QkE9Nzc3Ozk6NjU6Ozo5NjY0NTc4
-OTo2O0RCOjkzMzQ0NzU1NTY2NjQzNDlAPDw9PUFGRkVCPzs4ODY1Njg4NDM1ODs5
-OTo4NDMzNzc5NDQzLzU3NTYzMTM1OTU1NjczNTQ3NTQ2NTY3OTk4OTk6OkBFQ0xN
-UFJXYWRkZGVoanFydHZ5e35/fH16fH58e36Ag4KBgoWFg4aIiYuLiIqMkZCTk5WX
-lZGTl5iYmJiYmZmYmZaUkIuMioeEhoeFfnx7eXh2cG9tbWpmY2FfWlhZV1lXVVFO
-TkxKSEdMUFFTUVZXWl1hY2ZpbnF0d3Z2eXp9e32BgoCBgoSFg4aHioyMi4yNkZCN
-jouJiYKEg4F/gXx3dHFuamdkX11aW1hUUlRRUE1MT1ZUWFJPTUtOUFJPT09OTlFR
-TVFTT01MT05PS0pOT1BOTVBOT1BPUVFSUlFRUFFRUFBPUFJVU1BQT09TUU9QUlJO
-UFNPTE5NTE5PT0xLTEtMTE1QU1FNT1BNTUxNTk9OT0xMTE1OTU5NTEtKSktFR0tJ
-SUpLS0xPTkxMTU9OTEtLS0hGSUxNTk5PTk5MT09NSkhISk5MSUhJSUdJTEdKS01P
-TkxMTExKTEVKSktLS01MTk9STlBQTk1NTU1OTE5PTk1MTkxMTUtNUFBOTE5NTlBR
-U1BUVFJTVFVUUU9OTktMS0pKR0tKSEdHR0VGSUpHSEpNTU9LSkxOTkxKS0pIRkRG
-RkdHQUNCRURDREREQ0RIRkVJS05PTk9SVVNWV1lXWFlYWlpWVlVWVlZXWFdYW1xb
-Xl1fX11aWVpVUE5MTU1MTU9OUVBPTU1NUE5OTUtOTUpKT01MSk1PS05MUE5OTk9M
-TkxKTU9PT1BSUU1MTFFVUlJSU1VWWFZVV1dWVFdZV1ZWVFJTVVRTU1VSU1VUWFVV
-VVZcZJe7ytHY3eHi5efo6epKS0hLT0lJRUhIR0BDQUJCQUJAPkE5Oj5EQ0JBRUVC
-PztBPj87PDo6NDc7Ozs7PDs6Oz5CPkJDQEFBQUBCRERGRkVISEpGSEpIRktLSk5M
-TU5OTlFNSklLSktJSkxNSUhHRkxKSElIR0VFRklKSktJSEpKSkpMTE1MT01NTUxL
-TUxPTE1MS0xQUFJRUFJWVFBVVFNQUVBSUVJUUU5PUlFQU1FPUU9PS0xKSUtPTU1N
-SUdGSkdLS0xMTE1MTExOTlBPTU1OTUpITE5UUExMTU1OTFBNSUlJTUxLSEdKRUpJ
-R0dHSktPUlBOTEpGSEhLSk9PU1NSVFVUVFNSVVVSUk9LR0lKSkZBPzo3NzY0MzIx
-MjY4NDU0MjU0MzQyMzQ6NzUzNTI0NTUzNDMxMTM0NTQzMzM2NDc6NzU1NzczNTU4
-Nzc5ODc5Pzg3NjY4Ozk7PDs+Ozk4OD5BPj49QkVEQkVDSERAPjs8QEBAP0NFRD9A
-QkZISUZJTUdDQkRKTExMSkhLSkxLUFBQUE1OVk9UUk9OUFBOTExMU1dWVFFSUkxS
-VFVPUlNWV1lUVldaVlVTUU1PTE5KSUpOT0xNT05NSUpJSkRFRkVGRkhIR0dFQj84
-OTxGSEA3NTc7PTo4Njc8Ozg1MzEyNDc5NzdARkk8OzYzNTQzNTYwMzM0Njg4Nzo7
-Oj0/QENFRUA8Oj07Ojg2ODU3ODY2NDo8OTY1MjAxLzE1My8yNDQ2MzMzMzQ0NzU1
-MzMyNjQ2NDI0NzQ5Nzo5ODs/QUdLT1JWV1tiZGdna25ycnl8e3p9f4CBf32BfX9/
-f4CAgYGGiYmLi4mJiYiKiY2PkZWXlpiWlpmcmpuam5iZmJeYlJGRkIyMi4mIh4WE
-gX18e3l2dnNzcW9qaGdoYmBeXV5fXlhWVlZWWVxcX15dZWZma3BucnV3en18fn5/
-foCDhIWJiYiJiomJioqPkZOPj4+OjZGRj4+IhIWHh4WAf396dXVvbGdjXFlVUlNT
-UU9PT1FRUlRXUlZRT1FQUlFQUFBQT1JQT1BPTE1OUE5QUFFRT05PTk9PTk9TUVFT
-UE9PU1JUU1JSVFNRUU5QUE1QT1BPT1FOTVBRT01PUE9MS0lLTEpPTk9OT01NTkxO
-TE5QTUxOTElLS0tMTUxNS0xMSklIS0xQTUxMTElJTU5OTEtMTkxMSUpHSU1NSk5N
-TU1LS0xKS0xMSUlISEZISklNTUxPTUtKTEtKSkxOT0lPSUtJS0xOT1JRTk1LUVVN
-TVJQUE1NS0tLTVBLTkxLTlBUUVBPUFNSVFVSUVJUUFBMSk5RTElKSkhLR0hISEhF
-RUhMS0pKSk1KTU5OS0pMTElKSklFRERBS0ZDQkJBQ0RCRklHRUdJR0hJTE1RUlNU
-V1dUVllYWVdXWVpZVVVXV1ZTVlZXWmBeX19cYF1aWFZQTExPTU5NTExJTEtMTUpK
-SktNVE9PTU9MTEtPTEpQTlBOUVBMT09PT0tLTk5PUFNTU1FTUFNRUFNVVlZVVVFU
-VVdXV1lWVlhVVlRVVFZTU1FSVlVWV1NTUlZpqL3I0dne4OTm6Onq6ktLR0hHSkxK
-SEdGS0pEQkRGRUNAOkBCQT9CQD48Qj4/QkE/PTk6Oj0+Ojw7Ozw8Ozs4PD08PkRC
-Pj9BQkRDRkhGRUZFRkZKSEdDRUhJTE5PTk5MTU1JTEhKR0hKSklJRkZHRUdFRkVJ
-R0hKSUlIS0pKSk5PSUVFSUpKSkxNS09QTlJWUU9PUFJTVFJSVldWVlJQT05PUlFT
-T01NT05OT09PUVFUUE9NTEtJTk1NTE1KSUlMSEpJTlBOTUpKTk5MTlBOS0pKSUdN
-TU1OS0tLSkxOU0lJSUdKTEhIRklIRUlLSUhJSklMVFBPS0lGSEpOTExOU1FTVFRV
-V1BOT1FTU1JNTVBNRj85NzQ0NDUzNTM2NTQ0NzQzMzQ0MTMzNTQyMTI3NDI0NDQ0
-MTMyMzIxMjM1MzM0NjU0Njc4Nzc0NDQ5NTQzNjg4MzE0NTc4PT44OTo9Pjo4Oz1A
-PTw9Q0VEQkZDPTk8PT1AQUBAQkE+QD9CRUhHRkdFREVEQ0dKS0xJSEZITFFRTlJQ
-UU9OUFNTTkpMTVBMTVFTVVhVVVBNTU9RVFJQU1VWVVZaWltYVVJRTU9NTU5NTE9R
-VFBNTU5MS0lIRkZGR0ZGR0dJRUA9Ojc4RU5LQDk1NDU3Nzc3NTY1NTIyMjEyNTU0
-NjxFRjk0NjMzNjk3NDA1MzU1NDI0NTY4PEFDQ0ZEQjw7PTo6OTc5OTY5Ojg1NDM2
-NzExNDM3NjQ0Nzg1MzI0NDM0NDMzMTY2NTg2NjU3OTc3Ojg3Njs9PkNITVBTWF9e
-ZGVoamxudXd4fH5+fn5+gIF/gYGAgISEhYF/fICHiYeGh4aHiImLi42PkJSTk5qa
-mpqbnpubmpmYlZWTkpKVlpGNjpCKhoeFfn1+f355d3l3dnVzdHRva2lqa2tnZ2Vj
-ZmZnZmhpbW1tb3V5eHh7e3+AgIKEhYaEh4iLi4yKjY+OjYqKjZCPkZKTlJSVkpCQ
-jo2KiIiGiIOBgH56dHJtZmNfW1hPUVNRUFFPT01PUVhWVFRRT1FQTk9RU09RUVJQ
-T05NT09PTlBPUFFPTk9PT1BOTk1MS01QTlBRUlFTUU1OUE5NTlBQT05ST01OTU9O
-UE9OUU9OUE9PTk5PTEhLTk5PUE5NUFFOT05OS0tNS01KSUpLTUxMS0tPUFFRTk1L
-TExNTUlLSkxOTU5MS0pKS0tMSUtKSktMSUpISElMTExNSk1OSUlLSkxLS0xOSkpL
-TExNS01LSk5NTFBQTk9OTk9PTlBPUlJRUE9OS0xOTUxMTE1LTEtLTE1OUVBRUVNU
-UlVRT09OTkxNTkpHSEdHR0dJSEdIRkRGSUlKSUdHTU1NSk1LSUpJSEZGR0NEPz5B
-QkNCREFCRklIRkdKSUxOS01NUVNWWFhYWFlZWVpaW1ZVV1hXWFdYV1lWWFpaXGBf
-X19bWlZUU1FRTk5PTkpOTk5MTE1LS0tIS05OTkxMTEtMTkxMTE5OT05OT09OT05R
-T09NTU1NUVBRUlJRUFVUVFZWVVZWV1dZWVhVWFdVVFRVVFZYWFNSUlNTUlRTVFVQ
-UmKWvcnR2d7h4+fo6erqTE5OS0pMTE5FSkhHSkZFREhDREBEQUJCR0VDQjw8OT49
-QUI9Ojc6Ojw6PD07PTs7Ojw+QT47QUM8PkBEQ0ZERkRERURERUZISkVERUhMS05P
-TU1NSUdKSUlJS01NT0xHRURDRUhKSktPTVBMSkhKR0hJS0hGR0lJSEtOT0xOSU5R
-VFdSUFFPUVJQTlFWV1hYUlNQS05QT01LUFBPTk9OTk9RUE1OS0tJSk5MTk9LTElI
-SkxPTlBNTkxKTUtNTU9OTUtMS0pHR0lIS0pJSUlLSktISElJSUtNTUtLSklJSU1M
-S01LTE1QTlBPSklHR0lMTlFSVVdYWVdUUU5OT1FRVFFMTUtDPjk5NjQ3NjM2NTU2
-NTU2NjU2MzIyLzE1NDU1NDQ0Njc4NjQ2NjM1NzY2NDI2NDUzNDEzMzU0MjI1OTo4
-ODg6Ojc3NjczMzU6ODo7ODk7OT05PT5AQDw8QkdIR0VBQ0VERkNDQD9BPkFERENC
-Rk1FQkJAQkdGREVLTkxFQ0hNS0xLTU9RUU1RVFBMSk5PTU5NSU9RVFRUVFFOTlBQ
-S0pLTlBUVllYWFVTT0xNTk5OTE5OUVJRTEdLS0tISElHRkZGRklLSEZCOz02NDhF
-VE1DNzY0MjQ3NzU3NTUxMDQ1NDc1OTc0OTw8OTg3NDQ0NTY1NzY2NDMzNDM1NT05
-Oj1CQ0JIQD45ODs5OTY2OTk5PDk3MjIzNDQ1NzczMjM0MzMyMzU1MjU2NjY2NDU0
-Njg2Ojk4Nzg6OzpCQkNITExQUldfYmRma25wdnh4f399foKEhIOEgYWEhISGiIeD
-g4F9fYSEh4aJhIaJjJGQj4+QkZKTmJiXl5icnp+cl5aUkZWTkZOTkY2Rj4uIh4SD
-f4OJg4GBf35+fnx7eXt1dHRycXRzcm5tbW9ubnBxcnN1ent9fn+BgoaIioyOjouL
-jY6Rj5CSlJGRjoyQk5GQlJOTmZaTl5SPjYyMi4uJhIN/fHd3cW1pZWJdXVxWUlJR
-UVFRUFBPUE5RUE9PUE9PUlRTU1JSUE9UU1FQUVBOTk5OTk9RUFBRUE1PUk1OTk5P
-T1BQUU1NT09SVFNRT1JPUFFTVFFPT1JPUVJRT09PTk9QUU9MTUxOT05LTU1MTExN
-TEpLS09OTEtKSklKTEpNUE5PTUpISUtMTk5PT01LTE5NTE1JSkpPTkxMTUxLTExM
-TUhJTUpKTE1NS0pJSUlLSUtKS0pISkpLTExMS0lKS0lLTFBRT01OT05OUFBOUE5L
-TUxNUFVPS0xLTk9OTU5MT1FQT05OUVRUUFJRTU1MTExISEhERUhJR0VISEZHRkRE
-RkZISklGSUhJSktKRklJR0dFRURGRUJAREBCREVFQ0dLSUpHSU5OTkxOUldXWVha
-V1hXWVlbWlhZVllYV1dYV1lYWlpfYGFdWlZWVVJQU09LS0tJTUtNTEpKTU1OTkxK
-S05NTktNTk9MTU5NTk9QUE5MT1JRTU5PTU1LTFBSU1RSUlJRU1RVU1RVVFRWVlhX
-V1dWV1ZZWVpaWFZXWVVWVVZSVFVUU1JTW5C9ytLZ3uHk5efp6etMS0lJSUxIREJE
-RERHSUVISEtFQ0Q/REVIQD8+QD9CQ0U7OTo9QD09RDk6QTw7PTw+PUA/QTs9P0VB
-PkBBRUVIR0VEQkVFRUZHSEVERkdKTVBPT01OTUhIR0hISkhLSklJQ0RGR0pKSUpK
-SEVCRkhHS0dFR0dKRklMTE9PTk1MTUxQUlRQS0tOTU5NT1JVVlVRT01NUFJSUU5N
-T1JRTk5PUVJSUVBPS0lHSEtNTEpJTk1MTExLTEpNTE5KSUpKS0xKS01MTEpJSUpL
-S0tMTUpNSEpISUpNTlBRUU5OTUxMT09PTUpLSktMTlJVT0ZERkhNUVZaWlpZV1dR
-UFBSUFJQUE5OSkM6NzY1NDU1NTI1NjQzNjY6NjUxMzM1MzI4NzY3NTQzOjY2ODc5
-ODY3Njo4NTYyNDQ0MzMzMzc4NDY3NzY2Nzk3ODY1NjM0Njg6PDs9PDo6Pj08PD08
-PD1AQkNFREFCQkNIRUNEQ0FDRENARUVHTEZGREJCRkhJSExMSkxISEpJSEZKUlVP
-UFZUVE9OT09PSktPUU5PT09OT0xPTktJSElMTlFQVFZYVVVUVVBRTEtMTExTU1FJ
-SkpNSUhKSklERUdKTUpKR0A9Ojo1N0RRUEM4NTMyMjU0LzM0NTg1MzI0NDg2NzY1
-Nzo3OTY1MjM1NjU1NDU0MjM1NDY3NTU8PD5BRUZDPj48Pj46PDw7Pjw8OTs0OTU3
-MzY3MzAyMzM2NTc5ODc6NDMyNDUzODU1Nj06Ojo6PkFFRUZMT09RVVpdX2Jmam1w
-cnR1dnV5fn1/goaFhIaFhoiIh4uJiIWGiISAhIaKiIaEiIuNkZGPkZGSk5GTl5ub
-np+hm5qamZiYl5OQlZWSkZORkYqKhYiGhYaHhoOBgYGDgYGBgoB8eXp3eHp4d3V2
-dXp4e3t6fX5+gISDgoWGiYqMjpCSkpWVko6Vk5STlJKWlJOVlJSUk5WTkZCRkpOP
-jo2PjIaFhHx+enl2b2tjYV9dWldTVFRSUlBOT09PUE1PTk1NTk9OUFFQUU5NT1FP
-Tk9QT1BOTU5OTlBOUVNPUFFPT1FNUE5OTVBOTlFSUk9QUVJTU1JPUlFTVFFSTlFQ
-UFFSTFFPT1FQUlZRTU9OTExMTElKSUlLSktIS01MTUtNSkhKSklLTEpKS0xLTExL
-Tk5MS01OTE1NS01PUk1NS0xNTk1MS0xOTkpJSkhJSktMTUpLSkpLR0pLS0tMSEZI
-SUlKSktKSElLTE1MTlBPTU9RTk1MTE1OTExNUFNST05NT1FOTU1NTE5PTk9RUFJR
-TUxLS0xMSkhJSUhJSUVHRkZGSUZHSEdJSkhISkhGR0ZGSU5NR0pGRUJBRENDQEBB
-REdFR0NERExMS01LTE9NUVJTWVhZWVtZWFZZWFtcVlZXVlpZVlhaWVxcXF5cW1pY
-VlVPTEtNTEtMS0xNS0lKS0lLS0xKTkxKTExLT1BPTk5NTUxPT09OUE9RUE5PT1BM
-Tk5PUFFRVFRUVVRRU1NSU1VWVlZWV1hWVVdYVllbWVlZV1dXVVhWVFRWWFdTUlZf
-jbzJ0tje4uTl6Ojq6kxKSktXTEhDQkRGRkNFREdHRkU/PD4/PTxCPzw/PT9DQTs4
-NDo+QEI9QDs7QDw7PTs8PTs+PEA+Q0A/Pz9FREJER0ZFQkRCRUNEQ0BCREZJSk9O
-SU5MSEZLSEVIR0dJSEVFQ0hJR0hHSUpKRkVDRUVGRURGR0ZFSEhKTk5MSkpNSUpK
-TEpLSU1PS0pLTE5RU1RRTk9TWFdST1FRUUxPUFFSU09PTk5JSEZHSU1MT05OT09P
-S0tGSktNTE1ISUpKSUhHRkpOSUlJS1BPTUxPTU9OTk1OS09QTktNTUxMTE1RUFBP
-S0lMS0tOUFBNSU1KTFFVWF1dXldVWVVTUE9PU05NSUxIRkA+ODUzMTc2MzQ1NDQ4
-NDM2NDQzMzIyNDMwMjc2NTU0MzY1NDQ1Ojc2NjQ0NTc2MzI1NDY0Mzc4OTU0NTg4
-NjU1Nzo6ODk5Oz08Ozs8Ozs9PD9BPz5BQEFFREJCRUhDRkVISURDRERFQkNGSEtN
-TE1IRkZISk1NS0lLTEtLS0tLSkxPT1JQU1JSUVFQT0tJTlFSUlFRUlFSUFBRTktH
-SE5NTUpSVldYVlJTUU1NSkpMSlBQT0lKS0xKSUlJSktHSUlMTExEQj43NzY2QU9R
-QDk0NTU3OTc3OjU0NC8yNDQyMTQ1ODkzNzc0MzMzNDQzNjU2NjY2NTczMzU3OTw9
-PUFCR0ZBRUJBQD49Pzw8Ojw6ODQ0NTM1NTY1NjY5Nzc1Nzc5ODc4NDMyNjc3OTg6
-O0I6PkBCRklNUVRTWFpcXWRiZGhucnFwcnV3e318gICGh4iIh4aGi42JioyMjIuJ
-i4aGjIqJhoyOjo+MjI6RlJWWlZaWm52cmZ6dnZualZeZlZSUlJGSkJKQkI2MiYmJ
-h4eLh4SGhoWCgoOCg4KEg4N+f3x5fX6BgYOCg4KChIODiYqLi42Ojo+SlJSYl5eY
-lZaYlJGSlpWYmZWRk5SUlJWOkZSSkY+Pjo2LjImEfHt6dnNubGdfXl1dWVhVVlVT
-UUxPUlBSVE9OTE1MTVBPTU5QTkxNTU1LT1BRTk5OTk1PUVFQUVNQUlFPUFFRUE9Q
-T09PTk5PT1JRT1FRUFJWUk9SU1FRUE9OTUxNT05NTE5PUlNRT01MSkxLS0pKSkxO
-T05NT01LS0xMSUdJSUlJS0hLSUhITU9OTk5OTUpITE1QT01OT1FQTk5OTU1LTUxJ
-R0tKSktKSkxJSk1KSkpLSk1LSUhIRkhISU1LSkpKTUxMT05OTk9RT05OUVNPT09P
-UU9NT05MTU5PTk9MT1BNTE5OUFFQTkxLTExFQ0dFRkdGSEhISEhHRUZMRkdITUlE
-R0pJRUVHSUlJSUtLS0hJREVEQkE/Q0ZFSElGRkRGSUtMT1BQT1FUVVRXWVpbW1hX
-VlhXV1pdXFlaXF1aWlxeXV1fXFlZXFlXUVBTTk1MTkxMTExNTElJS0tNTUtNTk1P
-TE1PTE1OTE9NTlBPTE1QT09PTlBPT1BOUFFTVFJRUFNUU1JVU1JUVVVSVFZZWldY
-VldYV1hVVFJSU1VXV1ZXVlRTVVVVUl2UvMnR2d3h4+fo6OrqRklJSUxGSEtGR0RD
-Q0A+QkNDQD5AQ0E+PT5APjtAPz8/Pjs8Pjo5PT09Ozg5Pj0/PDo5PDo9PkRGP0BE
-PUJCQkJFSEdKREZFQ0FBP0JFSEpMSktMTU5IRUdGRklJR0hGSEdKTUlJSEZISEhG
-R0ZFR0hJSEZFSExLTE1LTUpJS0dHR0hKT05MTU9PTUlJTlBUVlVTU1NWV1BQTk9R
-UU5QTlBTUFNOTEpISUlLSktMTk1MTU5PTEpKSUpLTE5NTUtJSEZHSUxKRkxNUFBR
-TExPUlZWT0xNT01LT1JTUE1MTU1NT1BKR0RITk1QUU1NS01OT1RZXV1eV1BSU1JR
-Tk1OT05QSEhFPTg1NDMyNDMxNDUzMzUzNjUyODc2NDY1NDc2NjY0NjUzMzMzNDM3
-NDQ0NjY2NTY2NjY1Njk1NDY4Ojo2Nzs2NjU3Ozg8Ozs8Pz08Ozo5Ozg6QEFCQj8+
-PUE9Pj1CRkNERkVHSUVCQ0ZHSEZLUVNMSkVGR0ZKTEpLR0ZJSk1MS0pMTU9QTk5Q
-T1RSUk1MTU1RUlNRUFJUV1RSU1NPSkpJS05LTlBXVVBOUFFTVFBRTExJTEtMTEpH
-SUpKS0xPTkxMTExMSUZCQjs3NTg+S09EOTU3ODg5OTQ3OTc3OTY1MTY1NDY3OTY4
-NDc4Nzg1MjYzNjc2NzUyMjU0NDQ5OjxARUlHSUNFREdDPj06OkA7PDo3NjcyMzQz
-NDQ1Njg2Njc4Nzc4NzQ2MzM0Njc3Pj5AQ0BDRUhNUFVXWVxgYWBgZWdrbnJzdnR1
-dXl+gIOCg4WGiYqJiIeMj4mLi4yJhYSIhouIhYiKkJCQkpGLjJCWk5OVlpqcn5ud
-nZydnp6XlJaXl5SSkZCOi4yNjYqIiomJh4mKiIqJiIiJioWIh4iJiIaAgX+Bf4KD
-hYWGi4qGh4WIjZKQjpCQlZaXl5OWlpaYmJWVk5eSlJOUkZGSk5CSlJOTkpGPkZCN
-j4uMhoWDgnt1cG1paGZhXlxbXFtXVFRTVVFRTk9PUE5PS01OTlBRUVBNS0tOUFJM
-TU9OUFBOTlBQT09QUE1QUFBOTk5QUVFQUE5LTU9QT1JRT09OUFJTU1FRUVBST1BP
-Tk1OTk9OTU5PUVBRTEtNTE1MSktKS01OUEpMTkxJSElJTEtKS0tJSktISEVGSUpK
-TE1KSUZHTE5PTk1MTk5hV05MR0hJTExOTk1LSkhNSEhJSUxLS0pLSElJRUVISUtM
-TVBNSk9NTkxNSkpOT1BNS09QTlBOUFJPUE5PT09OTk5PUlJNT1JPTk1PTExLTU9K
-SEdFREdFRUhMSUlJSElIRUVESEtJSEhGR0ZFR0lGRkNCREhIRkRJRkNBQkA+QEFF
-RUdMSkRIS01PUE9QU1RWVlZbWllcWlpZWVhYW1xbW15dXlxYW19jYV9dW1lYVlFQ
-T01NTEpMS0tOTEtLS0tMSEpLTU1NTk1LS0pMTk1NTUtJTEtMUFFPTU5MTU5QUFBP
-T0xOUlFQT09QVFNQUlNXV1ZRVFNTV1hWV1dWV1daVVRSUVFTVVVVVlRVVldVW5G5
-yNLZ3eHj5ujo6elQUEtIS0dGSEZERENDRERCREVBQ0lGQ0hFRUFCRzxFPkFBQkQ7
-Oz09Pzw6PTg6Ozo6Ozw8ODw8PD0/Pj0+PUFBQ0RBQkNDRUVEQkFBQ0pIR0VERUZJ
-SEVFSElLR0dCRUVHRUlMRktISEVFR0RERUdIR0ZGRkVHSUtMSklKSUhJR0dGREdI
-S0xPUVBOS05PUlVZV1lWU1NVVVNQTU1SUlBRUFFUUE1JSklHSElLTk9PTE1QT05R
-TkpKSkxLTE9STk1MSktJSElKUE9PUVJPTFFPVFFOSU5PUVBSVlhTUE1PT1NSUExJ
-R0hMU1NPT05NSkxQUlVbXVtWT1NTU1NPTE5NTEpHQ0A5NTM1NDM0MzM0NTY0NTQz
-MzUyNDMyMTExMzQ2Nzc1NjQ2MzIzNjc1MzQ2Njc4MzQ2NzY3Ozs5NzU0NTc3Njo6
-ODg6Nzc6Ojo6OTw7OTg6OTs7Ojo5PT88Oz48P0FFREJCQkNEQkA9QkRESEdLTExL
-SkdGRUVGR0pJTEhNT05RTkxOTkpKSk1RUlJRTkpNUVFRT0tOTVFVVlRSUlNKSktN
-Tk9PU1NSTk5NUVNTUEpISUlNS0tMTkhHSklMS05SUUtLSEdHREA+QDUyNTlIT0c6
-NTU3OTcxNDY1NTY3NTc2Njk3Nzc1OjY3OTk2Ojk1MzM0MzUzNTMyMjQ1MzU2OT1B
-S0tMRUlIS0M+Oj08Ozo6OTY2OTc2NDYzNTU4ODg4NjU0NjQ2NTc1OTQ4NTg8QkRK
-S05PVVZZXV9hY2RjZGZpa21zdHZ1c3h+fH2ChoaIiYqKjI2OiYyOi4uOh4iGh4KH
-iIqMh4mLi46Rj46Ji46Tk5KXlZSXmJ2YlpmZmpeVlJOTlZONiYyOiYuNio2NiomH
-iomJiYyKio2JiImIiYqLhoiFhYaFhoaGhoeHjIyJioqPkJCTk5SYmpiYl5mUlZqT
-k5KUlJSUlJGRkpGRkpKTlZSSkpCTj4yMjIqFgoKCfXlzbmtoZ2BdXFpYWFRTVFJP
-Tk5QTlNRTU1OTU9PTU9QUVBUTU9PT01MS0tMTE1MUFFQT09QUVBNSk9OSk5RUlFP
-T01OTU5OT09NTk9RT09RTlBPT05NTk1OT09PTU1OTU5OT1FOTE1MSk1NTE9LS1BO
-TkxMS01JR0pPUE1JSkpLSkpJS0tJS0tLS0pPTkpMUUtNT05MSU5NTktNT05OTk5N
-S0xKTUdHSEdISU1NS0xIS0hJSUtKS05OTEtMTk9OTUtHS05MTlBPTE5PT0pQUk9Q
-T0xQTk9NT1BOTUxNTUxLS0lJSEpLS0dHSEZGRUZHREdGQ0dHRUVISUhIR0lKSEVD
-RUdJSUZFRkRHRkdHRUNBQkFBQD9CQ0JHSUpISEtNT1JSUlRVV1hXV1ZYVlZbWFta
-WllZW1dYWVtbXWFcYGJiYmJfVlVTTk1LTU1PTUxPTExMS1BNS0pLTE1MTEtMTExK
-S0pMS0tNT0xLSktMUE9PT1BPTlFRUE1OUVBPUVBPUVRUVFVXVVNWVlVUVlRUVFVW
-VlpZWlpaVVNUUlFVVVVYWFZVWldbgrfI0dnd4OTl5+jp6EpGRElKSktISkpEPz1B
-Q0NEQ0VFRUZHSElHQkhDQ0A8Q0BCQTk6Ojs5Oz46Oj05OTs7OTk5Ojo6PD08Oz08
-Oj0/QkJBSEZGRkE/Pz9ARUdJRkNCQUFBQ0VESEhIRUdIQkNGRkhKSUZFQ0JEQ0ZI
-R0dHSEVFQ0VHSUpLRkVDRkRERkJHSEhIS0tLTE5PTE1PU1ZSUk5PT1NZUE9MTk5L
-TUxLT09LTU1NTUhHSE5TT1BQTExOTE5LSk1PT05MT1BOT01KTUxNTU5NTk1OUVJU
-UVRSUVFOTE9PVVZaWFVVVldVUU9PTUxNSUxRVFNRUE1PTk9TVFhcWlhPTVJWVFFQ
-T1FQTUhFQTw3NDQ0NTU1MzQ1NTQ2NjU2NTQzMi4wNDI1NjU2MjMzNTc3MzE2Ojc6
-Njc4Njk2Njg5Nzo3OTw+ODU3NjY1Nzc5Njs7Ozk7PTs6OTs6ODU1ODo8Ozk8Pjs8
-PUBCP0FAPTs5PTs/PkA/PUBCRkpNSklHRUNDREVHTUhIRklPUlRPU1BMUUxLT05S
-UlRSUFBOUU5MS05LT1JUV1pVU01MT09RUlNSUU5KTE9WV1ZPTUhLS01OTU1OSkdI
-SklJTE5QTUtKSUlISEU/PDUzND1HSEA6NTMzMjo2MzM0NTM4OzY1OTc3NzYzNTc2
-MzY4Ojo4OTMzODU2MzI0NTc4Ojg6PUFJTVBKRkhGQUBBPzo4OTg5NzY1Nzc2NTU0
-Nzg2NjU0MzY2Ozc5QDo2ODk8PUNITE5TWVpdXmFjZWNlaWxucHBtcnNzdHh1en+D
-goOFhIeMjY2NjI6Oko6MjY+Ni4eFhYWGiYqKjoqJioyLjY+MkI6RlZeWlJiYlpmZ
-mJqYmJSUkpSSjoiQj4yLi42LiImLioaGiYuLiYmKiYmKhoaJiY2Ni4aJiIiJjJCM
-i4qLio2PkpKRkpOTlJaVlpmYl5SUlpOTkZOVl5SSk5KUlJKRj5KPjY+OjJCQjI2J
-iYaEgIF7eHRxbWlnY2BdW1hYVlVST1FNTVJQTUtOTlBQTkxOT05RUFFRUU1OTUxL
-TUtMTExLTU1NT1FPTk1OSk9NTE9PT1BPTk1OTk5NTkxMT0xNTU9OTk5OTkxNTk5O
-UE5NTk1QTkxOUlFQTUxNTU1KTUtNTU1OTE5NTEpLSU1PTk9LS0lKTEtLTUpMTExN
-T0tOUE9OUVBOT01OTU9OUFJQTk1NTE1NSUpJSUhLSktISEtMTEtJTFBPTktLSUhI
-SU1OUU5NTU9PT05OT1FPTU1OTkpOTVBLTFJNUk5NT0xNTE1LSkxJRklLSUxJSkhG
-RkVFRkRDQkFBQkNBRkhDREdHR0ZJR0ZCQkdGSUlFRkdFRUdKSEhFRkRBQUZGREdJ
-SUxNS0tPUVVVV1lZWlpaWVdXWllXWlpZV1hZWFhaXV5fXmBfYGBeWllWUk9OSktK
-TE5NTk5OTUtMTlFPTk9NTExKSktMTFFOSklKS0pHSktMTU1PTk5NTk9QUFFQT0tM
-Tk5RUFBSVFNTUlJVVVZWV1ZXV1dWU1NUWFlXVlVXVVZVU1VVU1NUUlVSUFiOusjR
-193g4uLl5unpSExHRUNCQ0ZGR0ZHRUJCQ0FDREM+Q0NEPkBDQkI9PDw/RkRAPTs3
-Ozo9Pzg8Ojs6Njc7Ozk6Ozw8Ozk8Oj47OjxBQj1CQ0FESEE/QkBCRUdGREE+QEFF
-RkNFRkNGRURCQkNHSEhFRkVDQ0RHR0hHSEdFRUVER0dGSUdGQ0VDQ0ZGRkJCQkNI
-SEdISk9OTE9OT1BNTExOT1BOSklKSktOT0pKSE1IRklJSElJSElMTU1MSklKSElK
-TE1RUExLTE9NSkxMTU1QUE9PTk9QUlJRUlJTVVFLSkxPVFVUUVZTV1VRTk1QUkxQ
-T09SVlRRUFFPTlFSVlxaVVNQUVFRU1RQVFBLRUI+NzY1ODk2NDQ0NjQ1Nzc3MjM0
-PDMwMTA0NTU1NDM0NDU1NDY1NjU0NTQ3NzY3NTY1NTQ2Nzk6OTs4NzU2OTc1NzY2
-NTY5Ozc8PEE7OTg5Ojg5OT88PD4/PDs8Oz49Pj07Ojw8QUBAQD07SUFGSUpLRkhJ
-R0lFSE9LSENFSUtLU01QT01OT09RUlNRVFRWUlBRTkxMTU5OT09NUU9PU0xLSk9R
-U1NRT09QU1VWUlJQSkpMUFBRTk9LSkpMT01NTEpHS0pIRUREQkA8NzMzOEBHRDs6
-OjUyNDU0NjU2NDYzNjc5ODc0NTc3ODY4ODQ3Nzc1NjY2NzY2MjI0NDU1ODk7P0ZM
-UEtFREBBP0Q9PTs5ODk4NTc1NjQ0NTQ1Njg2ODY3OTg4OThAPT1BP0NGTFBTV1hf
-YmVpZ2hoaWpobG93dnNyb3J0dXp+f4KEhYaEioyLjY6NjJCNjpCQjIyLh4WJioqJ
-jo2JjIyMioqMkJCSkpCTlpaVlpOXmJaZm5eWjZSUlpORkpGOi42KiouKioiKi4iG
-h4mJioeJiYyKjIyKioyMi4mJhoiNkJKNi46Qj5KTlJWWk5WXk5SYlpeal5WTlJKS
-lJGTk4+QkpGRj4uMj5COjo6Qj5CPiouJh4WBfnl1dXNta2dlX1xaWlpXVFJRUFJQ
-Tk1NTU9PUE5OTk9NTExNTk9PT01QUUxLTkxLTExPUVJRUlBQUFFRTE5MTE5NT1BQ
-TU1NTk1NTU5MS0xMTU5NTE1RUVFPTU9OUE9PUFFNTkxOT01OS0tNTkxMTEpMTk1M
-SUxNSEdIS05MTU9MTUxNTExLSkxNTlFPSklLS0tLTEtMUE9OTExMTFBOTUxNTE1N
-TEtNTk5RTE9KSUxKS0tNTE9OTEZITE1NTk1PTVBRUU9PTE9OTE9OTlBOTExLTU9P
-UVJQUE9PUE9MSEhIR0dISEhJRUdFQ0VGRENDRkVFQkJAQklKR0NCQkZIQ0RGRkdF
-REZGRkdHSUtJRkZCRUFERkRGRkdGR0dIT09PTlBRU1haWVlaWllbWFhZW2FbWVpa
-WVhXW1tdXl9eXWFfX1xcW1dST05MS0xPTVBNTktLS0tLTE5MT01LS0xMSk1OSUpI
-SEpISUlKTU1OT1FPTk9PUFBOTU1OTk9PTk9RT09QVFRXVlNVVVZWVFVWVlZXV1ZV
-WVZXV1VVVFVUUlVWUVNQUE1PVY67ydHY3eDj5Obo6elIRklHRUdHRUNERkVDQURE
-Q0dCQUZEQ0NAOzw/REE7PD07O0NDQEA8PDs7NzQ5Pzo3ODc4Nzs7PTk6PDs/Ozs6
-Ojs7Pzw9Pz9APkNBPD0+Qj9BQz1AQUFBREVFR0ZKSEM/QkdIR0ZFQUJCR0hGSUlG
-RkBDREFERkdIR0RBQkNDQkZERENDQUNESkxKTVBPUU9OTUlKSEdJS0pKRkVJTEtJ
-R0NER0pIR0lMR0RGR0pNUU5JSktLTU5KTE1PTUtLS0xLT09PT05QUE9STlFTT1BS
-VVZXUUtIS0xRU1JOT1NVUk9MTU5QVU5QUVJUVlZSUU1OTk9VV1ZTVlZVUkxPUlFO
-S0lHQjs5NDc2NTYyNDUzNTQyNDU2NjQ0MTM0NDI3Nzc2NjU1Njc1Njc2NDQ2Njc4
-ODg4NTc1NTc1Nzg5ODY1NDY3ODc4Nzc3Nzg4OTk6OTg5PDw+PDk4OT48Oz88PTo7
-Oz09Ozo6OjxDQz9BRUFCREhOTUpHQ0RETE5MTExGSUdHRUVKTE5QTlVUVVRUUlBV
-WFVTUk9NTlBOUE5MSUlMUFBSUE5OTVBRUlRTVFNWUkxLT05OS0dJTFFRTkhMT1BR
-UE1KSEVISkZEPkBDQzw3NDI0OEJAOjc2NTI0Njc0MDQ0MzQ0NTQ3NzEyNDQ1ODk6
-ODc3NzQ3NzU2NTMzNDQ1NzU1Nzg7RkxMR0JDQkFBQTw9Ozk2NjU1MjUzNzU4OTg1
-OTo5Nzo3OTk8QEJER0lMT1RVWV9jZ2doaW1ucG9ub25wc3N2dnVzc3V4fXx+gIWK
-iIqIjo6Ljo6Sj5KOkZCOjYmHiIaGiIuPjouOjpGPj4+SkpGTlJKSlJSRj5SUl5mc
-mpiKio+TlZKQkI+LjImKh4iHhYaGhYSHiIiIhYeHiI2PjoqKiouKi4mFjI+OkZST
-kpKRkpGUkZOVk5KUkpOVl5WXlpORkZGSkZCQkI6PkZCNjYyNi4+PjIyNjI6MjYuJ
-hH56enhycW5qZWNkYGFbVVxXVFFPUE5OTk5PT1BNS0tMTU5PT05MS0xMTExLTk9N
-TFFOS05QTk1NS05PUE5PTk1MT09PT1BPT1FNUVFOTUxOT1JNSUpLT1FSTVFRUFBP
-T05SUVFQTExOTk5NS0xNT0xMTExOTUlFR0pQUFBMS0pMTE1LTk1NTk1KTEpJSUpN
-TUpLSUpLTUtLTU1LTFFMR0tMTU9LTE9OTU9OS01LSktJSEpKSEpKSkpLTUxOS01O
-T0tPT1FQT05NTUxKS01QUU5NTU9NTlJQTk1QUU5PTUxIR0dGR0ZERERFP0A9P0ND
-QUREQ0dKRUNGREVFRENBQ0FDRUVIRURER0hGSEhGR0ZFR0VDQ0NHRkVCREZHSkxM
-TU1QUVRVWFlYWVpbW1xaWVlbdWZcXFtaW11eXV5gYWRkYWFcWlpZU09PTk5KSkxQ
-TE1NS0tLSkpJS0tNS01LSklKS0pMSktMTExNT0xQUVBPT05NTk5MTE1NTExPTk9P
-UE9OTlJRUFJRU1JRUlVVVFZYWFtaWFdVU1ZWVlZWWFdUUlNSUU5OT1JWh7zJ0djd
-4OLl5+jp6klLSktNSkdGQkVLSUVFRENER0RCREFCQkE7Pj08PTs9Ojw4OT5CPTo6
-PD08Pzk4PDk3NzY5Ojo5ODQ4OTw7PD06PD88Pz89PD5CPz47Pjw9PDs6PD9AQ0E+
-PkBCQ0NDQkI/Q0NGQ0FBQkRGSkdHRUZGQ0NCQkFEREZEQUZGSUhHRkdEREVFREVJ
-SEpMUU5LS0pLSEdKSUlISUVEREdGREVCPT5CRUZFSEVDQkNESktLTExMT01LTU1O
-UE5OTEhLTU5OUE1QTk9RVFVSUlFPUFNTWFpSTkpKTE5TUlJQUlhXVk1JS05STVFR
-UE9UVlNPTk1PUFVbWVVUVVZTUFBTUk5HQUE7ODY1Nzk1NjQzMzU1MzY0NDU1NTUz
-ODQ1NDI0NzszMzQ1MjU1Nzg5ODg5NTw4NTQ4NTg3NjY0NzUzNjk6Nzc3OTg3Njw1
-NDk6Ozw8PD09PD8/PD47PDs9Pj04Ojo6PTxAQj48PTxEQkJBQkRDREdJSEZEQ0dI
-S05PSUpJRkZERUlMT09RU1BRUVBUUlVUVFJRT0xOUlVRU1BKSU5PUEpMTkpITVFS
-U05OT1BPTUtJTktMSkxMTEtMSklNUE9PTEpGQ0hIRURCQUA9ODUzNDY2OT48Ozs4
-NTQ2ODc4Njk3Nzg7OTU0NzQzNTY2ODk6OjY2NjY2NzM1NDc1NTE0NDQ1NzxCSUpJ
-Qz0+QkFFQD87OTY0NjIxMDEzNzk5ODU3ODk+Ozw+QUFDSE9SU1tbXl9iZmdsbG1v
-cW9wcXR0cG5ydnh3eHt4d3p5e4CChIaFhoiJi4yLjpCQkY2Oko+MiIuNiImLjI+R
-j4qLjZGPj4+OkJOVlZOVk5WTk5eYmJiXmJmTkJGSkZCPjY6NiIeIiIaEg4SEg4qG
-iYaGh4iKi4iIiYmJiYqLioiKiIaPj5GTk5ORkJGRlJOSk5ORkZKVlJGTkZOTj4+Q
-kZOOjpCQkpCQj42LjY2NkI2MjI2NiIiGgnx3c3BtaWViYmJgYV1ZV1BNTk5OTk1M
-Tk1MS0tOT01LTExOS0pOTVBNT1BRUFBPUU9NTU5OTk1OTU5PT1FPS0xNTVFPT1NR
-T0xNTU5MTU9QT05OTEpMTk9OT0xQVFBQTkxMTU1LTU9OS0pOT05MTEpKS0xLSUlI
-Sk5PUE1LS01KS0lITU5MS01LTE1KS0tNS0lISEpKS0xMTE1MTEhJSk1MT09QTk1M
-SUtKSkxKSEhJTExKSEdJS0tNTk9PTk1PTk5OT1FPUFBPTk1MS01OT05LTUxLT1BR
-UVBRUFJPTExJSEZGR0NAQEFBREVFREVDQ0RDQ0VGQ0NFQ0VHREJBQUVGQ0VHRUJE
-REVERUREQkBERkNGRUVERUZDSEdIS0tPTVBRVVpZWllaXFtaWlhcXFxfXFpcW1xe
-XF9dXl1hYmFkYF9cWVRRTkxMT09LTExLT09MS0xLTk9NTkpHS0xMTE1LS0tMTExJ
-Sk5QT09NTk9PTUtJTUxNT1BMTE5PTU5RUFBQT1FQUlNTUVFQUlZXWVhWV1lXVFRV
-V1ZWV1hVVVZWVlRRUlNTUVqRu8jR19zf4+Tn5+jpTEtKSUZGRUZCR0VHSEpFQ0JF
-Qzw9PT5BPj5CRUJAPD5CQj09PD8+Qz89Oz88Pj45Nzc4OTk6Njo5OzU7Ozw8PD46
-Oj08PTk7P0E+PDo8PDo8Ozs6PUBBPz49PkFAQUNBQ0VGRkREQUFCQ0RFREJBQ0ND
-QkFEQkRAQD9DRUpNSklISEVDQ0FFREVHRkdJSEZFRUZHR0lLR0dEREJDRUNAP0BB
-QUJFRURCQUA9QUNIS01LSElKSEpLT1NRUE9MSk1QUVBPTUxNT1FSU1NVVlRSU1VZ
-WllST01QT05RT01PVllZU01JSU9RT1BRUUxOVVNQUVBRVVxfWFNUVlVSU09MSUNF
-Pjk2Nzg1MzI0NDQ1NTY4OzQyMjQzNjU3NjM2NDMzMjY0NDMyMzUyNTU3ODs7NjU7
-Njg2NTQ2ODUyMjMyNDY3Nzg4Nzg1MzY3ODo5ODo7Ozo8PUE/OTs8Pj4+PT06OTpA
-QEBEPjo9QkJCREFGQ0tRQ0lJRkJCQENISUxFSEdGTEpKTkxPUVBPS0xOT09TVE9P
-TU5RUU9PT1FPT01KSlBSTkpMUlFPU1VXT01OUE1OS0xMTUxNTUpKS0xKTEhNTE5M
-SUpKSUdGQENDRD85NjU0MzI2OTxBPjY4Nzg5OTo5Njk4NTc1NDc3ODc1NTY3OTk3
-Nzg5ODY2MzM0MzUzMjQ1NTY5OkJITUtEPz8/PD49Ozo5NTMzNjg2ODY0Nzk5Nz5A
-P0FERkdNSk1QVVtfX2RmZmlubHBvb3FxcnRvc3Jxc3N0d3V6eXx5f319gYKCg4aI
-ioyMkYyMjY+QjYyQjoyJiImKjYuLkJGOjIqNj5GQjY2OkJCTlJSVlZOTmZeWl5eX
-lpWVk5STlI6JiImJhoiHhoSHhIWEiYiHh4eIiIuKioiKiImKiIiHi4uLjo2QkJCV
-lJGRkZGSk5KSkpKSj4+SkI+QjpCNjYuPkI6Pj5CQi4qIiomJiYmMi4mKiIeEgYJ9
-e3Z0c29sZ2RhYGJgX1lTUE1PTk9TVEpLTE1QTEpMTExNTU1QUUxMTk9PTkxRUVBQ
-T09MS0xNS0xPTlFQUE9QTlBQTk9OTlBPTUlLS0xIRktKTFBRT0tMTU1MTEpKTU5M
-TEpKTU5NTU9OUE5MS01NSktNSkpOT05OTVBQTU1RUk5LSUtJS0lLTExMTExMTkxK
-SkpJTExNS0pLTEpKSElNS0xMTk1NTk5NSUtOT09JS0pKSUhHSElKS0xOUFBNSkpN
-S0pOUE1PT01QTEpJS01OUE1MTE5MS0xOT09PTUtJR0lGSEdISERAP0FBPTw/REJE
-RUVDQ0RGRUVEQkBDRERER0dFRkJFR0A+PkNDRENDRENFR0VERUNGSUlGSEpNUFBQ
-VlZXWVtbXGBqYlpaXFxcW1tcWFtcW19fYGNhYWBjYmNiYFlWUk9PUExMTU1LS0xN
-TExMS0tMTUtMTUxLTExKSEpLTEtMS01OTkpNUlBQTk1OT05NTkxOTVFRUU5PUFBP
-UE5PUlNRUVJVVFNTVlRTUlZWVFRVU1RXV1dUV1VTVVVTVVVVVFVTV5G8ydHX3ODi
-5ebn6epNTEpIQ0RFRUVFQ0FER0dAQENBPjk6Pj5AQEA/QUFCQ0RFQjw9PD09Ojk2
-PD04Ojs+ODc5Nzo5Oj08ODk7Ozo5ODo6PD47OTo7PDw8PD07PT4/PT5AQT09Pjw+
-PkBCREVDRkRFRkFAPz49QUJDQEJEREZCQkJCQUJAREVHR0tLS0pFR0VERUNEREFD
-RkdGRENER0ZGSERCQ0JCQ0I/QEA/PUA/QUFAQUJBQD1AQkVJTEpLSkxKTUxNT0xP
-UlFST09TUExPUFBPUVFTUldXVFRUV1hcXVhVUlFQUU9PUVBVWllUUUpLTE5QT0xO
-T1FUUlFQTlBVXGBeXFpZU09UVVFNR0FAOzc3NTQzNDMyMzYzNDQ1NjY1MzU2NTQ1
-MjIzNTM1NDY0Nzc2Nzg1ODg6ODY1NDU3ND8zNjc0NDM1MzI2NTM1OTY6OTU0NDU2
-Nzk6ODs7OTc6PDk9PDw/QEJCPkA6PDo7PUA/QjxERENDQ0NDSEhFR0VDQkA/QENJ
-SUhHR0dGRkdITU5ST0xISkxQT09RTE1MT1NVU1FQUFFNTEpNTU9OT05NTk9SVFVM
-S0tPTUpHSk5QT09MSklJS0xOSEdHS01KSUlNS0tGQkRERT03NTMzMzQ0OTk5PDo5
-Ozo5NTY3ODk4NDg3Njc3OTo4ODo4NzY3NzQ3Njg3MjU2NDQ1LzQ1NDk7PkdOTURJ
-QT89Ozo2Nzc5OTc3Njg4NjY0NzpBRERIRklMTE1SVFldYWhra25vcG5ycXN1cm9y
-cXh0d3Z2dnZ5eXZ3enx9gH9+fn6ChomKjpCNjY2Rj4yKjI2NjIuHhYiKiYiLkImK
-j5CPlJSWkY6OkJCSk5KWlZOUlpaWl5eYlpSPkZKPiomIiImHiIeJjIaFgoSCgoOG
-h4eIiYiHh4mKiomJio+MjI6QjI6PkpWSkpCTkI+RkJCQkZGSkI2OjI+Ojo2KioyR
-j4uKi4uKiIiJiomJjIuMjoiIh4Z+fHp3dnJvb21qZWJjX11aV1ZTUFFRUE9STk1P
-UVJPT0xOTE5OTUtNTU5KTE1MS0xMS09OTE9OTExMTk5PTEtKSkxOUVFQTU5MS0xQ
-TUxMTU9LSlBNTk9PS0tJSklGSk1MTU1PTktNTU1NUFFNTU1MSktMTFBRUE9OTFBO
-TU5PTEtNTEtIR0pKTUxKSEdKTUxMTklJS0lJSUxNTUtMTUtMTk5OSktNTExMTEpL
-SUxNTUtNTUpOTUxLTkxPTExNT09PTExNUE1OTUxOTUpNTExMTU1OTUxOTU5QTU9P
-TktLTElMSEVFRUJGQ0FAPz5BP0FEQ0NGQ0NHSExJR0JBREREQkZERENGRkZCQkFA
-QkJERkhHRERFQ0JEREVHSUhKTFBPU1VXWltcW1xaXWleXFtcXFlcXmBcXV1fYF5g
-YmFiY2VlZGFeW1RPS0pNT0xKSUtJS0xNTE5RS0tNTEpPT0xLSkpOT01OTElISU1P
-TU1NTk9KS01PUUtLTUxNT05RT1BPT01OUFNPT09RVFVUUU5SUlNUVVlZWFRUV1ZX
-VVZUUVJPUVBSU1FRUFFVgbnI0Nbc4OLk5ufp6UtLR0VHTE9FREdDQUVJSERGQkM/
-PEZDP0NBRD9DQT1APkFEQEA3ODk7Oz45Oj86Ozw4NTc5Ozs6Nzo4NTY4Ozk7Oz49
-OTo5ODk7Ozk9Ojo7Oj06Ozw/QD88PD08QUFDRUhIREJEQEE9Pj09QkRDQ0JDQkBB
-QkJFRUNFR0hLSUlHRURGQkJGQ0JAQkJEQ0RDQ0VFRkJDRUNEQz8/PT4+QUFBQEE9
-Oz8+PkBAQUJGRk1PTUxLTk1NTk9PUE1PT1BQT1BQUk5SUFFSUVNQUlJUUVJZWltd
-XFlRT05RUVRVVlVaVlJOTU9TTkxNTU9RUVFQTU5PVFVWW1lZWlpbVlVTUlBJQjw6
-NzQ0MjQ3NzQyMjYzMzU2OzUxMDU3MzEzNDQ0NTQ2NTM1NzU0NTk2NTg2Njc1NTQy
-NTIzNjUyMjAzNDQ5NTQ3Ozc3ODg4ODY/Ojo5ODo8PDw+Ojk5PUA/Q0E8Ojo5Ozw7
-PD9APD8/PTw9QERGRkdHQ0JFQUBCRExISEhGRUtHSUlMTU1RTkhKTk5OS0xPTU1S
-U1hUUk9OUU5KSUtMTU9NTktLTFRQUUxNSU1NS0lLTUxMTE5KSkpOTU1ISUZOT05J
-SklMSkdDQ0RGQTs2NDI0MzQ0NjY7Ozg1NjY1NTY1NzQ1NDU6OTc4Njc6Nzc4OTU2
-ODc1NzU1NTg1NTU3NTQ1MzdBRUpLRERDREA+PTg2ODQ1NjY2NzY5Oz8+QURIS0xN
-S1BSVl5cXWFqbG9ucHR1dXVzdnd0cXJydnR1eXl3dXZ1cXd6fn97e39/g4WFh4eJ
-jI2NjY+NiYeGjIuIh4mKiImJiYuKiYiKjJCRjo2NjpCOi4+QkJORkpSVl5WUlpWV
-j4+Sjo6LhYiIh4eGhIeFg4GFhIKAg4SHhoWFhYqIiYmLjYyJjYuLjoyKi5COj46Q
-jo6Pj46Nj46QkY+Pjo6Oj46Njo2JiYuNioaEhIeHhoWJiIeHh4iFg3+AgHx8fHZz
-bm1qZ2ZlY19dW1hVVFBPUFBPTVBSTUxPUE5OT0xNTEpOTlJPTEpLSUpOSktMS0hL
-TUtMTExOUE9PT0xMTk1NTVBPTExLS01OTkxNTE5KS01MT05LS0dFTk5KSUtOTEtM
-SkpMTk5PT01NSk9OTU9MTk5NTU9PT05MTE1NS0xQTU9NS0tJSEhJSkpLSklKTUpJ
-TEpMTExLSklLTE1OS0xNTUtLSEpITExMSkhHSEhJSUhLTUxMUUpOTE5MTk1OTUxM
-TU9QT09LTElJTk5PUU5MTkxPTU1OTU5MR0tJR0dHRENCQTxAPT0/QEJAQkBDR0RF
-SENFR0hFQkVBQUI+PUJFQ0JGREVEQUNBQUFFRklJRENERURHSEpMTk5PVFVSVFhb
-W1lbXVtZW11bXFhbX11cXFxdXl9fYGBgY2RlZWVgX1pUUU1NSkpMTU1OTEpJSktL
-SklMSEpKSkpMTUtKTEpNS0pJTEtLS0xNT0xJSUpNTExOUU9ST05OUk1QUU9PUFFQ
-T09PTlBSU1JTUlBSUlNVVlVTVlRTT1FSV1NTUE5NUlFOUlBOUVqEt8fQ2N3g4+Xm
-5+npT1BNSEZJSElLSklDQUNDRERBREZHQ0RDQUNAPjw9PzxBPzw+Pzw5Oz46QT06
-Ojg5OTg4ODs5Ojo5Nzc2ODk5OTg8Ojo8OTY5Nzg6Oj48Njg8PT1AQ0FCPjw7Nzs7
-QUJFRENDQ0FEQEA+Qj1ARENCQkFFRERERUVDRUZBQkRHQ0VIR0RCQUFCQkJCPD1B
-Pz5AQUNCRUNARUJAQTw9Ozs+PkJAPTo7Ojs5P0NEREhGSEpLTE5QTU5NTEtKTE1Q
-VFFRTlBQUFBRUlFQUFFRUFFNTVBTV1pZV1NVVVRRUVRUV1lZU1FRUlBOTkxOTE1P
-Tk1KSlBRUlFSUlRUUlVWU1FTTEQ9OTc1MzY5ODg2NTY0MjEvNTM0MzQ2OzY1NTQ3
-MjU2NTM1NjMzMjEyMjQ1NDI1NTY1NDMxMjQ0ODc3NjI1NzY3NDQ2MzM2NzY5ODk4
-ODk3ODk4ODk7Ojk8PT0+P0BAPkA+PUE/QD5CQj5APjs8REVEREZFQUJCQ0NGSEdE
-RUhKR0hMSU1RTlBOT05QUk1LSUpNUFVVVlFQT0lJSUhKTkxMTE1QTU5MTktMSkpL
-TE9SSUpKSk1PTkxMTFBRT0lJRkpPT0dFS0tKR0VHQ0JAPTc0NTU2NzY2NTY5ODo4
-ODc0NDIyMjU4Ozs6ODk4Njc3Njk6NTQ1NjY3NzU0NTU0NTQ1NzY0NzxGSUlEREZD
-Pz43ODo5Ozs4NTg5Ojo+QkNHSE5NUVRWVFdaWmBiZWltcnRyc3Z3d3l2dnl1cnV0
-cnZ6eXh5eXh3dnt7e3x8f4eFhIOFhYaHh42Mi4yMi4iLh4uLiouKiYyOjIqJh4mL
-jpCRkpCOjY6MjpCOkY+TlpaVlZaVlJGPk46Rj4qChYaIhoeEhYODg4CDg4SDhISF
-iIeGh4eHiImKi4mJh4iIjI6LjY2Ki4yOj46Ljo6PjpCPjIyJjIuKjYeKiomIi4aG
-iYqEg4OEhIeJhIWEhIF+fX1+fXl4c3Nva2NlZWNfXFteXVtTUVBQUE1MS09QT1BO
-Tk1IS0tKSkhMTktMTU1OTkxOT01JS0xMTkxLTEtMTVBPTE1OSk1MS0xNS0tJTExQ
-T09OTE5NTktLTUxKS0xKTUxLTExLSktMTkxNT09OT0xMTE1PTUxNTU1MUE9NTk1M
-T05RTk5MTk1MS0xKS05QS01KRkpJSUpLRkpKTExJS0xNTk1MS01NS0xKSkpOSUpP
-S0tNSktMTExKTEpMTktOTExPT1FNSkxMTk5LSkxPT0tKTk5NTExPUVBRUk9PTEpL
-SUlHRkRCRENCQUBDRENGRkJBQEREREVERUVERkRDQ0NFQUJAQENEREFCQD9BQkBB
-QUFDQ0RDRURFSEhNTk5PUFJTVlhaXl5dXFpeX2BeW1lcW1lbWlpcXV9gYGNjZGVn
-aGVjYmNcWFRQTk5OUE5OTUtMSktMSk5MSklHRklJTExPTU5NSUtKSk1LSkxMS0xL
-TUtLTU1NTU1NTk5PTExOT09PT09QUU9OT09QT05SU1JQUVRVVFNTVVlUVFJSUVRS
-U09SUE9NTFFOUVNTWo26x9HX3d/j5ufo6epLT05HQ0lHSkdER0dGRUJCRENCQERC
-REM+Oz1AQD48Ozo+PD1BQkQ+Oj4/Ozs3Oj0+Ojo4ODk6ODk6Ojc1Nzc5ODc6Ozs7
-PDg7Nzc5OTU4Ozw8QD0+PUA9PDg9PkE/P0FCQ0BAQUBAPkBCPUBDRkNEQUJDQ0JC
-REVDQ0NCQEJGQkFFQkdCQEBAREA/Ozs7Oz5AQkJDP0VBQj8/PTs6Oz88PDs6Ojk5
-Oz5AQUNGRkVDSUhLTk9LSklHSk1NUldUU1FNT0xPTU5PTk9TUE5NT05KTVFVU1BN
-UVRUU1RXU1NUVVVST1NUVU9NTk5MSkpLTEtJS1BRU1JPTlBUU1RRUk5GQDw4NTQz
-ODc5NjU0NzUyMzIyLzAyNDUzNDU5Nzg6OTQ0NDg1NTMwMTI0OTM0MzU2NzU0NTM0
-NTM1Nzk4NTc4Oj03Nzg6NzY3Njc3Ozc2ODg1Njo8OTc4OT0+PEA9Pj1APjw9Pz5B
-QEA8Pjw8PkFDRUNEQ0BAP0E/RkZHREVISU1LSExQUlFPT0pOUFFTTk9MSk5SVlBP
-TklMS01IS01PUE1OS01NTkhJSk5LTE5PT1NSUE5NS0tKSkhJS0tKS0tHSEtLS0lI
-SktFRUdIQj4+Ojg2NjMyMzY0Ojc4ODg4NjI2NjY2Nzc3Ojc2ODs4NTQ3Nzo6ODk3
-NTg5OTc0MzU2NTc6NTMzPEVLSENERUM+PTs6Ojc6OTU2OTxBRENFRktOVVdWWVle
-XF9jZGVoamxsbXFydHZ3dXNzdXN1dXJ0dXJ1d3N2eX17eHx+fXx+goWEhYCChYWF
-hIWFh4SEiIeFiYmNjYyLjo2NjImJiImMjY6Nj5GRjI2PjZCRk5OVk5aWkY6UkZGT
-kI+QjYOBf4SHiIuGhYOBfYOGhYeFgYKGh4aGg4WFhYmKiIeFhYWIiIaFh4qIi4yJ
-iYuLjo2MjY6LiouKh4aJiIaIiImJhIWJiIeDgYSGhoSEhIKBgoB9fHl4d3RzcW5s
-Z2NiXl1ZV1hWUlRSUVFPS01LSk5LT05NT05LS09TUExLTE1QUFBQTU9QT05MTE5O
-TExLTU5LTUxMTU1KS0dKSklJTEpLT01NTU1NTE1OS05OTE5MS0lISklJSktMTExM
-TUxMTk5NTEtNT1BQTU5PT01NTU5PTk5PTk1MTU5QUU5MTUtJSkpKTEpOSEdJSUhN
-SktLTE5PTk5PS05UTk5NTExLTE9MS0tLS09NTE1OT05KTExOTUxOTkxPUE9LTE1M
-TExNTU9UU05OTk9SUFRUUlBSU1FPTklJSEZFQ0VDRD5BPkNDRENBQ0RCRURBRURE
-RUdDREJEQkA/QUBAQEJCPT5BREJFRkVBQkJDRkREREZISktMTlJRUlRUV1pcW1pb
-XV1cW1teX11eXVpdXlxgX19gYWVjaGlqamRjYlxYVFRPTk1PTU9MTU1NS01LTUtN
-SUpJSUxMS01LTktMTU9NTE1LTEtMSktMTExLTUxMTEtNTU5OT1FOTlBQT1BMUVFQ
-UFJQTU9RUlFRUFJSVFRUV1dRVFNRVVNSUlFQT01OUVRRVFRZj7zI0Njc4OPl5ujp
-6khISklESEhIRUdER0NCQkZCQkFDRkI9PEBBQkNDQEE/PTk5PD09PDo/PT06Ojo7
-Ozo+OTk4OTg4ODo5Ojo4ODg6ODc2ODc6PDgyNjc5OTg6Ozs7Ojo+PTk5ODxAQT8/
-QD9BQT5AQEJBQkA/QEJEQkRBQEFBQUNDRENCQEA/QkNBQj9BP0BBQD1APTs7Ozk7
-Oj5AQUE/PTw6PT4+Ozs7PD08Ojk6Pj49PT9BQ0RDQkRES0tMTE5IRUtKR01RTU9N
-TUtKTEtISElNT09OT05QUlFSUFFSUE9PTE9PVFdWUU5SUlBPUFBRUE9NT09RTUxK
-TExOUVZVU01PTU1TVVVXSkZAOzg2MjI0Njc3ODc2Ojc3MzIzNDI0MjA0NjY3OTg2
-NTY0MzQ1ODcyMjQxNTIyNDU0NDc3NzYyNTI5OTs3NDY5Ozg5OTc5Ojc2OTc3ODc1
-OTc1Nzo9ODc3ODk8Ozo8PUBBOjg5Pj8/QD9AQz9APz5BQ0RBPj8+QERHS0tJSEZI
-S0pLTU9STk1WUE9QTk9PUE1RUlRVUk5JSktLSk9MUFBVUE5MTUxMSUtMT05NUlBM
-TVBPUFJMTUpKSUtKS0pKTElKSUhKS0tKS0ZHRUVAPDo6Ozg1NDc3ODg3Ojk3NjM1
-NDUzMzY0NTQ0NjY5ODg5ODg0Nzk5Nzk5Ozo4Nzc1NTY2NDY1Nzo6RU5OSURGQj08
-PDk5OTc2Nzs6O0FDR0pNUFNXWlxfX2FhYGZnZGdqaGluc3J2d3Z2dXV0c3JvcXZy
-cXZ1d3l4en97e3x6fHt9g4eEgYGCg4aDhYWEgISEg4KHiYmKjY6Oi4qJiYyKiYyQ
-jo6Nj5GPjY+Tk5WVk5OTkZSUlJKRkpOTko6Oh4WCh4iMhIJ/f32AgoOEhoaDgYGG
-hYOChoKEhYmHhoaHhoWFgYOIioeJh4OEiImKh4eHiIeGiIWIhoqMhYSGh4aKiYSD
-hIJ/g4OCgoOBfIGAf3x3dnNzcHBsaWhlYmBiW1pWVVNVVFNPT05OTk5LSkxNTk1M
-T09PTExOS0tLUExPUE9OS0xPTlJPS01RTUxPUE9OS0tISkpISU1LSUpKSElLTU1J
-TU1PS0xOTk9MSUlJRkdIR0dJS1FLS0xNS0xMS0tMTk1JSUxMSkxOTE1OTExPTkxO
-UE1MTEtLS0xMSUtKSkxNTk1LSEdFSkdHSUtKS05PTU1NUVBOTUxPT01LSU5MS0tK
-SktKSk1NTlBOTk5PTExMTk1MT05MUE5LTk5RUE9PTU1MTlFQUFBTVVNPTE5OTElI
-RUNEQkJDRUZCQkNERUZFRUJBREZDQENCQUBAQUNDP0FDQkBAQEREQUVJSUdHRUQ8
-QURGQUBISEhJSk5PUVNWVVZZWVpbXltcX11bXV5fYWNiYWBeYGFgY2RjY2lobHBx
-bmlkYV1bVVJQT01PTUtLS0xMTU1MTExOTE9QTEtNTU1MTE1NT01LSUxMTkxJSkxP
-TU1MS0tMSk5PTk1MTkxNT01OS09SUlJRT1BQTVBRU1RQUFFSUlFUVVJSUVJRUFJT
-Uk9OT1JRUFJUVVuPusjQ19zg4+bo6OrpSktJSE9NSkVFSkdFPz8+Q0VDQkJDQ0M+
-QUhFQEFHQj4+Ojc8Ojk7Oz5APjs/QDs4Ozs8ODQ2NTc3Nzg5Ozs6ODk5NjQ1OTk4
-OT03PDk+Ojg4Nzk7Oj47Pjw8OTo/OzxBQz0/QEBCRUVEQEFBQUJCQERBQT09QUFC
-QkFAQkBCQUM/Pz8/Pj08Oz09Ozo6QD44OjxBQDw8Ojc4Ojo9PTxCQT04OT1CPj5A
-QUBDQ0E/QkJERUlMS0dNTkpNT09NS0hJSUxKSkhER0pOTEpMS01PTklKTlBNSklK
-S05MUVNQUVFSUU5LTU9NTU9NTE1KS0pKTlFSVFRVVE9LS01TV1JJQjs2NDMwMDQ0
-NTc7NzU1OTc3ODYzMzIzNDQzNDg4MzM0NDQ3NDM0Njc0MjUyMzg3ODc2NjU4NTM0
-NjM1OTY3Nzw5OTs9Nzk3Nzg2NDY4Nzk3OTs5Ojo6ODY4Ozg8Oz9AQT48PTo8PD4+
-PUJEQ0M/QEJERkdBQUFARENESEdHRUlKSU5STlBNUExMTUxOT09QUFNUVlVPTkpH
-Sk9MTEtOTVFOTEhKSk1JSUpOT1FSUE5QU1BOUlFRU05LTU5OTEpKSk1JSklJTEtF
-QkRGSURBPzs4NDI1OTc2MjQ1ODQ2MjIyMDAyNTIyNTY4Ojw6OD06OTY2OTo1OTg3
-OTk5OTg1NjU1NDM2ODlAS09JR0U7PDs7Ozs6Ojo+Pz9AREZKTVBWV1xdXV9fYGRi
-ZWRmaGpubG5ucHBydXV3dXJycHR0dHFzc3Nyd3N6fX14d3l/gH+AhIOBgoiHhYaF
-hH9/gYOHiYmLi4qMjoqKjYuLjY+NjYyQkZGRlpOSkZOUk5WTkpCRk5OSlJOPkY+N
-ioqKh4eKiIeDg35/foCAgIaFhYSBgIGHhYODgoaHh4iEgoKFiIaHhYeHhYeIhoOD
-hYaHh4aHh4SEhoqKh4qJiIaEgYGBgoGAfnx+gIF/fn5/e3x9e3x8c3Ftbm1oZmFg
-YF1ZWFhWUlJOTk9OTk1LTU5LS0tOS0xLTk1OS0pMTU5OTk5NTlFQTU1PT1JPTExL
-S0xPTkxKTEtMSUhLTExLSkpOS0lMT0hKTExKSExNTElLSUlLR0lLS0lLS0tMTExK
-TEtJTE1PTk9QTExOTExNS09KTU1NTVRQTk1MTkxJS0tMSUlKTU9NSUxISExJSUdH
-SUtPS05QUFBNUE9OS05RT0tMTElKTktPTk1KTE5OTk9OT1BQUlNPTExOTU9OTU5N
-TFBOTk9OTU5PUE9RT09OTE1NS0lLSUhIQkNDQj9BQUBCQkM/REJDQkBDQUFBQUJB
-Pj5DQkFAPUBBQ0JAQUJDQ0FCREJCQ0lFQ0NFR0RFSElNT1BWV1laXl1eXV1cX2Bf
-XV9eXl5hZGNjY2RgZGRlZ2dnamxwdHZ1dHBsZ19aVVJSUE1MTU9PTk5NTk5NT09Q
-UU1MT0xNTU5PT0xMT05LS0pKTEtLS0pKTExMTEpIS0tNTExPTU5NUFBOT1BSU1RR
-UU5PT1JSUlFRUVJUUlNVVFhSTU1SVVNRT11VT01NUVJZbIu7x9DX3ODj5ujo6epN
-TUlKTElHRkdJS0hFP0A9P0hFRURAQkA+QkBBREU8Qj4+Ozw4Ozk6PUE+Pz07PDs5
-Ojo5OTQzOTc5Oj08Ojk7Ojo1NTI4OTc3NzY6OTk3NTU4ODs7OTg7Oj47Oz0+PD4/
-Pz1APT9CQ0NEQD4/QUA9PT1AQkFEQkNBPj1AQUBEST9CQUM+Ozs6Ojo7Ojo+ODQ5
-OTs5Ojk5OTk5PD48OjtAPTw9Pz9BQEJDRUM/QkNCRUdISEhLTExIS0tLTEtLTUlJ
-SEhJS0tKSkpMTExJTVRNTExPTUxKSEdJTFJPTlJPU1FQUU1LTFBPUE5MTktISEpL
-TE5NTk5MSklLSlBRUUtCOzcxMjI1MjM2OTs3Njo3OTc1NjI0NDIxMzY0MTM0NTc3
-NTY6NTQ0ODM1MzI1NDg3NzUzMjU4NDQ1ODgzNTU5NTc2ODQ1NTQ2NjU2Nzk4Nzk6
-OTk3Ojs7NzU5OTk6Ozo7OzxBOz09Pj08QUJDRkVBQkNGRUI/PEBCREVFQ0VFRkZF
-TlBPT01PTExOUFFQUVFQTlBSUkxLSExPT01NSktOT01PTlBOUU9OT1JSUlFNTUtP
-U1FRTlBOTUxNT1BQTUlLS0hISUtMS0dFRUVDQ0BDQT04MzEzMjMzMTQ0MzM0NTQy
-MjMuMDI3Nzo5Pjs6Nzo6ODc1NTk4OTczNjo5OTU1NDY0NTU4QENKTEpAOzs5Oz08
-OTk5PT1DQ0dHS1BUVllcYGJjY2RjYmZlZmJma2trbG5wcHBvcXNycHNydHR1eHVy
-dHRzdHh5enl7e318goGAgYKEg4ODg4ODgYSDh4mNi4uLiomJh4eJioyLjJKSj5KU
-lJOTj5CRkpWZlZKSk5SVlpOSlpOQkoyKiIiEg4WGiIWDgX+DhISFhYWFhYWGgYOI
-hoKBg4SFhIOFhYWDgYSAf4OFgoKFg4GDg4SFhYWFgYGHhoqJh4iFhYJ/f4CAf39/
-fX19fXx/fHx8d3Z0c3Rxbm5raWJkYl9dWlxXWlRRT05OS0pKTU5MTU5MS0tLS0pL
-TE1KTE1PTE5PTkxPTUtLTUxPUU5LTk1LSUtITE1MTUxNS0pJSktKS0pKSElPUEpK
-S09NSUxLSkpOS0xJS0tKS0lJSElMSEpKTExLTVBOTFBSTUxMTEtLTEtNT05OT1JR
-T1JNTEtNUFFOS0xLSkxNS0xJS0pKS0pJT0xNTE9QTE5LTEpLSkxLS0xMTEpKTU5N
-TE1QUU1NTk5NS05PTktLSktMSUtNTE1LTE1MT0tLTE1QT1NRTklISkpHRkdISkRD
-SEJAQkJDQ0ZFREZDQUA9Pj9AQkNBPj89Pz48PzxBQ0FCREI/Q0NESEVFQkZEQj5B
-RElNR0dJTU9TVlhaWVlcW11eXl1fXl5eX1xfX2FoZGRjYWRlYmRlaWtpaHJ4fHx9
-fHx0bmReWVdWUlBPTE5PTlBRUE5TT05NTk9NS0tNTE1NTEpMTEpJSkpLTkxMSkxM
-SU1PTE1KSktNTEtOTU5OU1NQT1BVUlBPUE5QUVRSUlNTUVJTVFJUVVlZU1FSUVJS
-V1NTT1BUU1diiLvI0djd4OPl6Ojp6k9NUVBPTkdBRUVJSkJCQEBGQkdDQz08QEJE
-QD47Oj9BQUVBOzc7Qj5BPz87PTs8PDo5ODk5Ozs5ODo3OTs7NTw6NjQ2OjY3NjU2
-Nzc4OTo4Nzo3OjczOTs6PEI+Pj49PD8/Oz9EQEFCREJBQEA/Pj88PkJERUVCPzxB
-PT9AQUFAPkFAQUFAOjo5Ojo8Ozk5OjY4NTk7OD89PEA+Ozg7OTc9Pz07Oz09P0JA
-Qj8+QkVHSktKTEtKUEtMTEpISUpMTU1MSUpKSkxNTE1MSktNS0hGSElJSk5LTUhG
-SEtKT1NRVVVRS0dHTEtLTElKTEpJSElISEtJTElGSElKS05OSUI7NjUyNDU2NTM1
-NTQ0NzY2NDQzNTUzNjY2NTc5NDM2NDM3NDg3NTQ0NjU1NDQ1MzY3NTU1NDI0Nzo4
-Ojg3MjMzOTk3NTQzNjYzNTY4Ojc3ODY1NjY4Ozo5PDk5Ojo6OTo8PDw+Oj47QUFA
-QkFDQ0NEP0JCQ0I/P0JDR0NDRkBAQ0ZQTU1MTUlHSVBQUlBOUUtPT1FTT01NSk1S
-UEpKS0pMTk9RVFNQS09SUVJUUU5LSk1RUU1LTUxQS0tMUlBNR0lLS0pJSUpKSUVG
-RUVHRUJEQj05NzU0NDI1MTIxMzMyNjc3NDMxNTU6Ojk9P0BAOzo6OjY2NjU1ODk3
-NTY3NTY3NDMzNTY5QEdLRkI9Pjo6OTo4ODk9QURITE1QUlVXWl1eYWRmZGFeYWNk
-ZmpsbnBua2tub25yc3BvdHJucXR2dXt6d3N2fHh1cnd3e3t8fn19gYSHh4WCgH+C
-g4iIhoiIiIiIhYeIjI2Ni4qNj5CQkJOTkpOTlJWSkpKSkZGQk5GUlZaSkpOQi4qL
-iYeEhYeEhoOEg4GEhoWEgYCEhoWDgoOAgoGCgoOFhICAgoCAf357fX5+fX16fIKF
-hYSDfoCDhYiHjIeGi4iGgoR+fXt9gH6Ae3l9fHl4eXV0cG9sbW5taGVlaGdlYl5Z
-V1ZRUE9QT0xNS0pMTkxMTEtLT0xLSk1OTE9MSkpKSkxLTU1MUU1OTkpLTUlITE1K
-SklJS0pLSk1PS0tNS0lLTU1IS0lJSUhJSU1NSklKTUxKSElJSktLTEtKSkxLSkxO
-T0tISkxMSktISUhISkpOT05OTk1QTk9QUE9PT01NT01LTUxOTExMTk5JSUpKTU1M
-TktOTk5OTEpITElKSEhKS01NSktNTk5MTlBPTUxLS0tJTExMSk1PTEpQTU5OS0tM
-TEtNT1BQTk5OTk1LSEZGRkZHRkRGSEFAQD9AQUFCQUJEREBFQT0/QT5CQ0NAQUE+
-Pj8/Pj0/PkFGRkZGSUhHSUVEQkFCQkJIREdJTU5RUFFWWFlZWllcW1xaXF5fXl9j
-X2JiYmFjZGRkZGZnY2ZrbHBxdXqBhIiLhoF8cmphWVhYUlFRUE5OTU9QTU9PUE1Q
-TUtJSkpMTEtMTUtLTktOT05NS01NSktMTEtMTEtLSktOTUxNTU9SUlBPUE9SUU1N
-T05QUE9RUVRUUVRUU1NWWVVSUE1NTFFSUFJSUVBTV1mMvMnR2Nzh4+Xo6OnrUE5R
-TExLR0JCQ0NGRUVHSENDSEVAQTxCQ0NAQj09PTw7Oz49PTo/PD1DPjo+Pj08PDo5
-Nzk8QDo9NzU0Nzg5Ojw8OjU0NTg4OTk1Nzc5Nzs8OTg1Nzk4Ojw5Ojs6Pj8+PD5B
-QkRCQEBAQENBREBAQUBBQUI+PT4/PkBEQUBDPD05PD48PD83OTw7PT09PDk9ODQ3
-OTs7Ozs7PDs7O0A/QT1APEBCQUFCQkdFQkI/Q0ZJR0tPTkhMSkpMSUtJSUtMTU1L
-SklNT01OTktRTUtNSUdERkhKTU1LSUdGREVFSE5QUEtJSUdJSEZJSEtHRkVFRUZJ
-SUxKTUtMTExKSUdFQzo2NDQ1NDQ0MzQxMzUzNzYyMDM3NzQ1NTU2NjczMzY2NDU0
-NDMyMjQ0MzY3NjU3OTMyNjM0Njc5OTk3NDU1MzY2OTs3NjQyNjMzNjczODg1NTc6
-NjY3OTs5OTw6PDo6Ojo7Oj08PUA+QkI+PUBCRUQ8QEBBQD4/QEZFQERDQURCREtO
-TUtMSkVHTE5QTUxMTVBOUVFRT05NTlJTUk1OTk9MUVRXVE9OT1JUVFdUUE9KS1BQ
-TUtNTU9NTUxPT1BJSEtPTU1PTElGR0hISENFRkdDPjw6Ojk0NTIzMzUzNDY4OT07
-Mzc4OTo8Pj89Pj4/PUA7NzY1MzU3ODk6ODc3NTc0NTc1NTtESU1EQDw9PDs6OTk6
-PUJER0tMTlBTVVhbXFteYWFiZGRlZ2JmamttbG1ua2tucnNxb3B0cnJxcm9xdXJv
-b3V4d3Z3dnd6gHp8fIB+fn+Eh4N+f4KEh4eIiIqOi4yNiYqKiYyNjIyNkJGOkJKV
-mZiVlJWTk5COjo6Qjo+RkJKRkpCKi4qJiISDh4iGhoOCg4SEgYGAgYCEhIWBgYWD
-hIGBf4GBgX1+gH9+fXx7d3Z7fXx6eH1/fn1+fYKIh4OFhoOFh4OGhH58eXt8enp5
-dHV5e3l1dXJycmxubGllZ2RkYGBgX1pXU1RUVVFQTk5OTEpKS0xKSklKS0tOTEtO
-TElMS01NTkxKTU5NT09PTkpMTE5JSktKTklJSkpOTlFNS0tKTUtJRUdISEtMSUdH
-TU1NTUpLSkhKSUlJSElKSUlKTEtLS0tLTEpJS0pISEpNS0pLTk1MTk9MTE1NTExN
-Tk1MTUpMTVJMS0xOTUtMTEtMTExMTE1LSkxKTExNTEpKSEdHR0lISkxOTkxLTU5O
-T01OUE5MSUhNTUxQUEtLTExLTFBPUU1PUE5PT1BOT05PTUxNS0dGRUJBQUFDREBB
-P0NAQT8/QUFCQTo/RUA/QkBAQkE/QEE9PDw9Pz0+PUBDQ0FERUZEQ0RCREZHR0dF
-R09OT1FTV1lYW1tbWFxcW11gX15eYGNiYF9hYmNlZWVlZGhnZ2lvdHh5foeNkI6M
-iYWAeXBoYlxaWFZWVFJTUVJRT09PT0tLSktMS0tMTUtLS0xJTE1OTU1PTk1OTU5N
-T05MSk1OS01NTEtMTUxMTE1PTkxQVVRQT05OUlJSVFRZVlRUUlJTUVdWUE9QUFVU
-U1JRU1FSVYa7ydHY3uHk5ufo6upKTEtOSUVLSEFDRUJAREREQERERUNAPUJDQkA/
-PT07OTg8PUFAPD46ODo5Ozw8Ozw5PD88PDs8Ojo6NjU3ODo7OTc5NjczOzo4ODY3
-OjtAPTg5PDs2Njg4Ojk7OTo6QUI+QD1BPkBAP0E+VEhBQ0FDQ0NDREVEPD9BQEBC
-Pj1CPz48Ozo6OTw3Ojw+Ojs7OjY4OUI9Ozw6OTk5Ozs6Pj87OTo8PUBDQUJEQUdE
-RkNESEpLSUxNTElJR0tMS0hIR0xPT0xMSUdKT1JUUU5NTktKSkhJTE9PU05JSEVE
-R0dHSUpMSEdJSUdFQ0NFRUJDRUZGSUZISU5RUE9OTU1JSENCOzg0NDc4ODU1NDY3
-NjQ0MzMzMzY1NTQ1ODg0MjI1MjEzNTUyMTMzNTE0MjQ0NDY1Njc3MzY6Nzc3NDQ0
-MzQzNDc6ODQ3NzQzNDY1Njg3ODk6OTo8Nzc1Pz4/QEI9PD4+PDs7PTs9QD5CQj87
-PUBBPTw/Q0ZDPT0+PUJCQ0RDQ0VFR01MTE5QS0lKTE5OTkxMS0lLTlVRUlJSUlFO
-Tk5QUVFOUFRUUk9SVFRWVllVUlBQUFFNS09NTUxNTlFOTklJS1FTVFJOTUlKS0lG
-Q0VGRD88Nzg3Njw2NDIxNTk2OTg3ODc5OTc6Oj0/Qj49PDo7Ozs6ODg4Ojc4ODk4
-OTk4NzY4Nzc4Oz9HSEhAPDw9Ojs6PT0/RUdJSk5PUlVUWFhYXFxcYGBjZmZpZWZn
-ZGNpaGlrbm5wc3BuaWlwbG5wbG1sbnJwc3V2dnR2dnR4eXp7enyDg4OIhoCAhYWF
-h4WHh4aHioqKiouJi46Oj4+Oj5GRkpSWl5aTlJWTkJCPjpCTj5CSkZKOjIuLjYqI
-h4iJioWDgoSDg4WEgYKDhISEgoKBgoF+f4CBg4OBgYCCf3t7eHp5eHZ5enh4eXh2
-eHx6fICDhIWHhYGAg3+Bfnp7eHd1eHh5dHBzc3Rxb3JwamlpbW1pZWNeX2FcWlxY
-U1NSUE9NTUtKSUxLSktNS0pLSUhKS0lJTE1KS0xNT0xMTU5NS0xLTElITExMS0pJ
-TU5LSUhJTU1MS0xMS0pMSkhJS0tLR0ZJS0hKSkZMSkpHRkhIRkhHR0lKS0xNSkpN
-TEdLSUtKRUVJUE1MTk1NTExMTEtNTExKSkpMTE1OT1BQTUxMTkxPTU5QTU1LTE9P
-SkpNS0tLTU5LS0pJRkhKSEpLTEtNTktKTk5NTk1LSUlMTlBQTU9MTE5NTU9STkxP
-T05OS0pMTkxLTEpLSkhGQ0FBQEFBQENEQUFAREJAQT09PkBBQ0JAQEBAPEJDPjxC
-PDs9QEA+QEREREFCQ0JCRUVGQ0RFREhHS1BRVFZaWFtbXVtcXV1eX19gYGFhZGRj
-ZGNhYmVkY2dqam1rbnF6f4GDi5KSkY+OjIuFgHpzcWtraGdhXllVVFJPUVJQTUxL
-S0tKTU9MTU5MSkxJSktKTU9MTVBNT09NTkxNS0pMTE1PTUxOTkxMTUxOUE9QVFJS
-TkxNUFNTU1JTU1FPUlFRUlRVUE1OT09NT09OT1FXf7fJ0Njd4eTl5+jp6UdLSktO
-T0lMSkVERD5CQkNIRkNCRkdGRkFBQ0A8PkI6NTg8Ozo7Ojw7Pjc2Ojw8OTs6Oj08
-Ojg6NTQzODk5OTc2Njg4NTk8ODc5Ozg7PDs6PDs6Oj04Njc6PDpAPD8+QEA+Ozw9
-Pj49QUBDQUNCQERBP0JDQkJARD89QEBEQEE/QD45Ozg4Ojs7PDo6Ojk3Ojo4Ojw5
-O0A5QT07Pz46PDs+Pj09PkNDQ0JDREdDSEhHTE1LSkpMSk5PTk1MS0hNTE1OTEhG
-R0tOUE1NSUdIR0VGSkpJSkxLS0pIQUNFR0hNTklFRUVFREJBQ0dGRUNFSElKSEhK
-TlFPT0hNTE5IQEA8NzQ1Njg5NjU1NjY2NTQ0MzIzNDU1NjQ0NTEwMzU2NDU1Mzc2
-NTU3NjY2MjM3NTU2NTY7OTg8ODczMzIxNDo4NTU1NDY4ODc2Ojg6NzQ2Njc5Ojs8
-ODg5P0A9Pzw6Ojs6Ozw5O0A9PEBAPDo4NzxAQkJDQz86PUFERURDQ0VJQ0ZHSktN
-TlBNTUpNUE9PTEpKSk5OUlNTUlRSVFRSU1JTUlJOUVJRUlNWV1hVU1NTUE9QUk5N
-TFFOTExLT01MSUpLT1JVVFNSTEtKSkpJRURCQDs5Ojk1NjczNDc3Njo6NzY2OD86
-Pjw/PD4+Pj08Ozk5Nzc4ODk5Njk6Ojk7PTo6OTk3Ojo8QkpOSEU/PT4+PUFAR0ZJ
-TU5QU1ZXV1hbWlpcW1xgYWBgYWNhYmVqb2tsbWxsb3BwbmxqcXJtbm9wb3FubG5v
-cXh3dHN2dnh7eHl4foB/foGBgH+DhYSDh4qGiISHi42MkI+JjY2MjIuQkpGRlZaU
-mZqWlJSTkZOSkY+PjpKRkJCPj4yMj4qHh4qJiYeEhYWEgoKCf3+AgoKCgoR+e31+
-foCDhX58fH19e3p8d3d2dHZ1dnd0eHl+gIKAgYGBgoB/gIF/fXx/e3l4eHh2dnh1
-d3d0cW9tbWpramdoZWJjX15fXVpYVlVUUk9PUUxPTktMSUxMTUpKTUxKSEhKSUxK
-SUxNTUtMTE5OTUtKS0pLSU5RSkxKS0pMTExNTU1LTEtNTUxMSklLSEhJR0dHR0lL
-TEhISkhKS0lJSklHSUdKSUhJSEpJSUtKS0lJS0pJSkpMS0tLS0pJTEtLTE1MTUxN
-UE5OTU1TUExOTk5MTExKTU1NUUxOT0tLS0pLS01MSkpIS0xMSUpLSEhKS01PTUxL
-S0xNTUpMT0tMTkxNTU5KTEtMS01OTUxOTUxNTUtISUhKS0dHSERERUVFRkNCQEJA
-QEBBQT49QD09QEFAQENAPz08PUA+QENAPDw/QkVGRkNDREJCQkVER0VGRkZISktQ
-UFNWWVpYWFlaWl1dW15gYWBhYWJiYmFjYmJkZmdmZ2tucHJzeH+EhoqNkZCUkpCR
-j42Kh4aBf3t7eHNvaWVgXVlXVFBNTExOTEpLS0tLTEtLS0pLSExPT01MTUxMTlBP
-T0tIS0tMSUxPT05PUVRPTk5SUE9OUVFRUE9RWE9RT1FTUVJSVFVWT1FPUlJPU01N
-T1FVVFZ7ssnR2N3h4+Xo6OrqS0pHR0pMT0xKRERDREJGSENHRkNCQEVEQT87Oj5B
-QDk3Pj07Pjw7Oj47PDw5Ojs6OTo8OTk6ODY5NzY2NTo5OTs5OTo5ODs6Nzc6ODxJ
-ODc4Ojo1Nzc6OTk3Ozw7PD07PEA8Pj5BREVAQkNDQkNEQ0I9PkJFQz9AQz8/PT1D
-QD0+Pzk8Ozs6PDs8Ojo7Ozs6Ozk3Nj06OjtCQj49PTs9Pjw+PUFERUJBQUREQkxH
-SUtMTE1MS05RT09RUU1LSktKSkpOTEtISEpNSUdGRkZGRkNES0lFQkdGR0FAQUVK
-SktJQkJEREZCP0FAREpGR0hHSkpHSUxPU1FLR0dLS0lGRj0+Nzc1NjQ1NTU2Njc3
-NjU2NzQ3Nzo1MjEyMDE2MTI0MjEyMzQ0MzY4NTQ1NDU0MzE0NDgzMjIyMzUzMjUz
-NjY0ODc4NjU1Nzg5ODg6Ojo6OTc1Njg3ODk4Njg6Ozg7PDs9QDo4PT89PDw7PD46
-PD1BQkE/Pj89QERGSEpKSkpISklKTk1LS0xNTVNTT09LS0xKTlBRU1NSVVZVVVJS
-UlFRUVBSUE1OUVdZVlBQUk9LS01PTU1OT0xJR0lMTEpJSk1PUVJUUVBKTEtMS0hK
-SEdFQjxAPzs6Nzc3ODo9Ojg3OTg5ODo9Pjw6Ojw9PTk6NzY5ODY1NjY4PDw/PT08
-Pj1BPDg5PT5ES0xNRD48P0A/RUVGSk1RUlRXVlhZWVhbW1paXmBdXWNiYmNjYmls
-aWltaWdnam1sa2xwcGtwcHFsbWxxdHBvc3V0dXd5d3p8e358e3p+f356foKEhIWE
-iYyLh42Li42Ljo6Oj4+Qj5KRkZOUlJaYl5KTkpWUkpWSko6SkpCOjYuMjI+QkIuJ
-jIuLhYWChoSDgICAgoGDhYKCgn5+gIGBfXt+fnt3eXp5dHJxcXBwc3J1dnV0dXp7
-f357fH6Af35/fnt7enp5eXh4dnZ1dnh1cW5tbGtsbGlnZ2NiYWJjXlxbW1hXVVNU
-UlFPTkxLS01KS0lMS0tNTktISklITUxLS0tNTk1NS01KTU5MSkxKSk1NSkpMSkdH
-SU1NSUpNTEtJSkhLTUtNSEpKSUtKSklKSUVIR0lKSEZHSEhISEdJR0lJSUpJS0pL
-SEtRS0hLTkxLS0xJS0tKTElQUE1OTk1LTUxPUFFPTktNTktKTk9MTExLTk9MSUlM
-SUlLSktKSlFPTU1NTU1OTU1MT05SUFBQT09PUE5OTU9PUExLSEhISEtOTU1LSkxL
-SUxPTElJSUpJSUVGRkVFQ0RFRERCQUJBQD89Pj0/QD89Oj4+PT9APj8/PTs7PkBB
-QkFAQUNDQEA/PkNMUEVHRUhLS0tOUFJUU1daWVtbWVpeXV9cX2FiYmRjYl9laGZl
-ZmdoaW5ubm9zd3h9gYKGiIyPkJSUk5SWl5WSjY2Mi4iGhIJ+eXJuamNdWFNRTk1L
-TUxMSkxKSk1KSkpJTEtMTEpLT05MUFFNT01OTE1NTktOUFJPUFJTTk9OTk5PTExQ
-UFBQT1NTVFVTVVJUUlFTUFBQUE9PT0pPTk5RV3ypxtHX3OHj5Ofo6OlGRkZES05M
-TU9MSEVHSEVFQ0VEQ0NFQz49REFAPD1BOzo9PkE8Ojs8Ozs9Pj06PTY5Ojw+Ojo2
-ODk3NTk5Ojo5Ojw6ODo4NDs5OkA6NjtAODc3NTg3Njc6Ozk5Ojo6PD08Pj5CP0FD
-Pz89QEFDREFDQD88QEBAQz8+QT49QEFCPzo8PTs7OTg5Ozo6Ozo+Pzw5Ozk3Nzo6
-PT4/Pzo9PD4+PkI9QEhHQkFFQ0VGSUxLTEpJS0hJSExMUk1LS0pHRUlJRkhIS0ZH
-R0dHRkVBQ0RERERGRUZAQkVDQz4/Q0lNSkRBQENGRkRCQUBERkZEQ0dJRENGSEpM
-TktJRURIRUFCRT05NzY0MzY4NjY3Njg3MjU2NjU3Njg2NTc3NDMzNjc1ODczMTY5
-NTIyNTQ2NDQyMjk2NDQzNTczNjM6Nzk0MjU5PDg3NDc2ODg5ODs7ODY0NDU2NTg3
-Nzs3Ojo4OTo5O0JAQT04Nzo7Pz48Pj08P0I+PTw7PT1ARUdHSUdMT0xOSUtNTlBM
-SklJU01LS0lLSk1OTU5PU1NVVVJRU1BPTktMUVFSTUpNUlRTU1JPTEZITExKTU5N
-S0tGSEtJR0RJS0xOUU5MTUtLS0tMUE9NTElFQUNDQjs6Ojo7Ozk5ODc4OTo9PT48
-Pz47Pj0+Ozo7Ozs8OTc1ODw5Ojo8Pj09ST48Ozg8PUVLTktFPj5CQUBFSkpMT1FV
-V1dYWlhYWlhfXl5eXmFgX2FmZmZkZmhpaW1sZ2dma2tsbm5sbGhpcHFzb25tbW9u
-b3F2d3h2enx/fHx8e3x9g39/gIKBhIWGh4SHjIqOi42PjY2OkpSSkJGUlZKVk5OV
-lZCXlpeTkpSUk5COj5KNjo6Ni4yNjI2LioeFhoWDg4OCgHt+f319e3t/fX2AgH59
-eXp8fHx4d3VzcnVwbm9zcnF0dHV1dXp4fH58f4GAf358e3p4eXh2d3VxdXFycHNx
-a2tsa2doamlkYF9gYWJfXVpaWVNWVlJQTlFMSUlKSkVIS0pJSElMSkpISUpISUpM
-S0lLTE9MSkpISkpKTEtKTk1NS0hJSktKS0pHR0pPS0lHSU5LSktIR0dNS0pKS0hI
-SEZHSUlGS0hGSEdISkhKRkdJSkpMTUpLSkpMTEtLTk5MS01LS0tLTExKTk5NSklJ
-TU1OTlBPUE5NT1BOTU1MSkpMTElMSkdISUlLS0lMTk1JS0xMTk5MSktOTVBOTk1N
-T09PT01LTExLSUlPTUhISkxJTExNTU1MSkxNT0xLSEtIRERFRERFRENCREA+QT5A
-Qj4/Pz06OTs8OzxAPjs7PT08PD89Q0VFQ0VGRURBQkJBQUJFSEhLTEtMTVNUU1Va
-WVdaW1xcWFxcXVxdYWJjY2NiYmVmaWlqbGxtb29uc3d5e32Bg4mNj5CVlZiWmJma
-l5eUk5WVj4mKiYeFhH14cWpjXVZUUlBOTk9NS0tOTUlLSkpMTE1PT01LTE1MS01Q
-TU5OTk9OTExPUFFQUVJQT05PUE9NUFBRT1BQUFNRVVNWUlBOU1RSUE5OTU1LTkxL
-TU9VfrLF0Njc4OPl6Ojp6khGR0pISkZJSkpLSkxHR0lGQ0ZDQkVEQj9BR0Q/Ojs8
-Oz1AOzg5Oj08OT44ODs6Ozw7PDs5ODk3Nzg3ODk+Njk4Ojs6Ozo3NDQ6OzY4OTc4
-ODo7Nzs5Ojc4ODk4Oz88Pz48QEA9Pj5AQUE/PUE+PT9BQzs+PT8+PD07Ozo8PUBD
-PTw/PT87Ojc4OTg8Ozw/Pz5AOjs6PD08PD4+QEBCP0JDOjs9Q0dGRERFRUdGR0ZK
-TUxISUVGRUdJSUZJSEtLRkZERkVDQkFBQUI/QT0/QkJBQUVEQz0/QUA8PDxAREZH
-REI/QUVDQ0NDQkFCQkJBRkRCQ0ZJS0xNSkhGRUQ+Q0FBQDk0NTQ0NjU1NDQ0MjMz
-MjU0MjE1Njc1NTk4MjM2OTU2NjY0NDQ1Nzc0Nzc3ODYzMjg2NTY0Njo0NTpGRzo4
-ODk5OTc1NTg3Nzk5OTw6NTQ1NTQ2Nzg6Nzo6Ojk6ODk6Ozw6PUA6Ozs9Pz09PD4+
-PD48PTs9PTxBQkdJRklLS0xGSEhKS01GSURISkxNTUxLSktJTFBSUldWUE5QU1JP
-TU1QUVNSUE9TUlFNUE1OS0xNT0lMTU9LTlNPTU1KRUhKTE5NS05KR05OUE5OU1RS
-TElIRUVDQD4+QD87Ojw5ODo6Ozk8Ozw/PTs9Ozw9Pz4+PDk8Ojk7OTg5OTg7Ozk6
-PT47PD4/RUpQT0tFQ0RDRUZNUk9RU1dWVlZZWVtcXl1hXl5iYWBiYWFqZl9eXmBm
-amllZGVra2xubWtmZ2hvcm1sbG1qb3Nzc3N4dHd6d3p5eHh5eHt8f4GDgYOAg4OB
-hoiMjIuMj46Njo2PkZSRlZKRkpSTkpKUlJSXl5KRk5SRj42Nj46Nj46Ojo6Ri4mG
-hISEg4SAgoGCgHl4d3Z2d3h8e3p5e3t7d3Z1dXZzc3NycHBtb29xcG9ydXZ2dXd7
-f318fH9/fXt6end1dXVzdHNwb21tbGpoamtnZGVlZWRfYGFgXl9dWVdXV1RSVFFP
-T0xKSklKR0lISkxLSU1KT0tFSEhISUtLSktLS0tJSEhITUtLS0xNS0tLTExJSktG
-SUpJR0lJR09JTEtLS05LSElHSklJSEhGR0dGRkdKS0pLSEZISEdJSElKSEpMTExM
-TExKSUxOTExMSEpLTU5MTEpKTElISkpKS0tNTk5OS01PTElMTk1MTE1MTUxMSUxO
-S0pLTlBNTk9MSk1NT05JSExNTE1MTE9NTU9PTktKTExKTE1LS05JTUxOTUtOUElI
-RkhJSkhGQkFJS0dFRURCQUBCP0BDQ0RCQj49PDs9PDs9QD48Ojs7PT1BQD5BQ0BG
-Q0dHRUNBREVGRkVGRkhOS0tNWV5VV1hZW1xcYWNhX15fYGBfXWFjZGNnaWZlaWhr
-bm9ycHF1d3p8foKGi4+TlpiVlZeUk5aamJeVl5aRkI2NiYeHhYF9eXRuZV5ZVFBP
-TkxKTUxLTkxOTUxOS01OTU1OT09OT1FNS09QT1BRTk1PT1BQTk9QU1NRT09QUVBS
-UVFPTk9SVFRQTlFRU1JOUFFSUVBOTk9RUFSAs8TP2Nvg4+Xn6OnqSUxPSkpLSElJ
-S05KSENBREVCQEBCQ0hDQD1DREJBODc8PDg4Nzo6PTs5ODo6Ozk2Njs7OTg6PDk7
-Ojk3Njg7Ojc6PTo2Ojg1NTQ2Njc4NzY4OTo6Pzs4ODc7Ojs6Ojw9PUBAP0E9PD5A
-Pz49PUBCQkRAQT0+OT1CPj1APkFBQkA/Oj04OTs6OTk4Ojg6PDw8PDw/PDs6PD0+
-QT49P0FAP0FCREFDREhGR0dGR0hGRUZMTkZEQ0FDRklEQ0ZJR0NBQT9BQUFDQD9B
-Pz46QD9CQD1APT0+Pj07PDs8PT0/P0BAQEJAQkJCQEFCQERBQ0JEREJCREhOTUhI
-SEZEQUBAREdAOzYzMjMzMjMyMTUzNDMzNjQzMjIxMzY1Njc2NTY2NzY0NDUyNTQ0
-NTQ2Nzs4NjU3NTg5Ozc2NTU4NTc5NTY5Nzk5NzU1NDc5NzY3PTo6ODY4NzY0MzQ2
-Ojg2NDg2OTs5OT88PT8/Qj0+PD0+Qj47Ojo4PT48PT1AQUVHSUpKR0hIREhHRkZK
-RklLTE5PUExMSU1QUVRRVFRVUlFQUk9OTlFOUFJYWFdTUE1NS01MT09RUFFOUE9M
-TktOTlFJSEpJTE5OTUtCSU9RT1FSU1BOSkpHR0ZDQUBAPz0+PDo2ODk7PDw9PD4+
-QD09Ozs7PTo5Ojs7OzY4ODg5OTg4OTs8Oz9BP0NHTFRSTEZFQ0ZISUxQVFRUVVlX
-VVdaW1xdXl5hYF9fXF9gY2VjY15eYmNnbWZiYmltbGttbWxqb2tpaGxtbWxtbnJ0
-c3Btcndzdnp6enl5fX59f4J/f4CFhoaFi4eIi4uQj42Oj4+SkpOVk5CPkpSUlpWW
-lpiVlZWTlJKSkZOUjo6MjYmMj42PiYaIiIOAgYKBf3+BfHt5eXd3eHx7eXp7eXh3
-dHNzcXJzcHBwbW1scHFxcXB1dXh2dnt9fn59e3t9f3x7d3NzcW9tbW1sbGhkYmZo
-Z2diYGFgYmBfW11eW1paWVhXVVRUUk1OTE1LTExKSEpLSUZMTEtKS05KR0ZHSkpJ
-R0pKS05PTUlJTUpITEtMTU5MTU5JSUZHSUxKSEtJS0tKSklLTElHSEhGR0dJR0pI
-RUdFSEtIR0pISEhDR0dISUdKSktMTE9MSEdISUxMT0xNTExMTkpGS09NS0pLSktL
-Sk1MSE1MS0xLTk5QT09LS0lJTU5LSktKSkpJSUpNUE5OTUtNTU1MTlJOTlFNTExN
-UE9NT01IS05OTExLSUxNS0tNT01NTUxIR0lIR0RDRUVERUZDREJBQUNAQTw+QEFC
-PTo7PT5BPDs+Pj09PDs/Pj9DQj9CQ0NGRkNFS0VEREVFSEpKS1BNT05TU1RXWFlX
-W1pcXWFhYGNiX2FiY2VmZWNlaG9rb29yc3R1dXZ5e4CEiY2Nk5aZmJeWlpWVl5eW
-lJWVk5OTkZCPi4qGiIaCfnx1bWZgV1BOTVBOTk5NTU5OUE5OSktOTk1NTk5NUE9L
-UE5KTE1OTlFNUFFQT05PUlBPT05QUFJSU1FSVFNUUlNSUU9OT1BOUE1OUExOUVJV
-Wnu1w8/X3ODi5efo6elISEhMTEpJRENFSEdGTkpDQ0NHRkRBQjxBRD0/Pzo4ODg5
-Nzo6OT03Ojs+Pjs7OTk4OTg6OTc4OTg1NTU4NTcyNTc4Pzs4PkA6OTc1Njc1NDQ1
-NTc3PDk6Ojg7Pzg6PTs+P0E+PkA7Oz1AQkBAP0A6Pj8/PDw9Pj0/PT09P0BBQTw8
-Ojg4O0NAOjc5Nzc6Ojs+PkA6PD1AQD0/QURFQUA/P0NGRURDRUVGREZIRUZFREdH
-RENFP0JDQ0ZFQkFBPj5AP0BAPz89Ojg4QkBDPj47Ojo7OTo9PTs9Pjs+PT8+Ozw7
-OTk/PzpAP0FEQkFAQEFCQ0ZGSUxLSEZHRUZCQUBGRkRAOTMyMjE1MTQ5MzY0NDUy
-MjY1MjM0MzU1NjY2NjI1MzUzMzM1Njc3NjUzNjo3NjU2ODc6OTQ1NDQzNDIzNTI2
-PDc3NTM1NTQ3NTQ5QjY3ODg2Nzc2NzU5Ozk3ODw7Ojo7ODo/Q0A/QEA8PkA9QD49
-PTw8PEJBQT4/Q0RISkpIQ0NERUVDRUdKTExOTFBSUlBMTU9SU1NUU1VOTk9PUE9R
-Uk5QT1VYWlVRTk1LS0pNT1JRVFFRTEtJS01QUE9NSExPTk1JSk1ITE5PUlBOT05L
-SUZDQ0VBPzw9QEE+Ozg5PT4+PUA9PTk7OTk5Ojw4Pj49Ozk5OTg3Njg4OTs6Ozo+
-PkE/QEdMUlBMSEZHR0tOUVNUU1ZYW1pZWVtbXFxZWFpbXGBeXmFfYWJlYF5cXmRq
-ZmFhaGllY2dqa2ptbWdlamlsbnFxcnBrbmtwdnd2d3V0enp9foB+f39+gIKFhIWG
-iYqNkI+PkJGRk5GUkZOQkZGSk5eYm5iWmpqZmJaRkZGRkpGNkYqLio6MiomGhYKD
-gYB+fX98en18fX17eXZ2ent5eXt4dXJvc3NwcXZxcG1pa2xsam5yb3J1eHl7eHp7
-fHt4d3d3dXRyb25sbmtrbGZjZmVhYmJgYWBgX1teX1xaWVpbW1haWFdUVFBKTElJ
-SklLTUxISUpLTUhLSkpKSk5MTExLSEpLS0lLTEtJSkpLSUxIS0lJSk1KSklJSkpK
-S0hISUlJSElISEpKSUpHSUhHSklISElFRUZHSkdGRkZGR05IRkZISE1JRkhOTktL
-S0pLTExOTU1NTUtMUExIS0xMSktLTE5OS0xLS01KS05NTEpKS05LSkpLTU1MS0lL
-TlBNTExRUU1KSk5PUVBLS01MTk9OUk1OT1BNS01KS01KSUxOTk1OUFBQT0xJSUhH
-REJBQUZFQkNDQkI/PkQ/PTo+PT09QEA8PDw9Pj4+Oj48PT89Ojs+QEFAPUNEQ0NF
-RUlHREFCQ0VGSE1QVVRaV1hXV1dZW15cX15eYGNhYWRjYWJjZWVmZmlrbWxrcXJx
-c3V6e31/hYeLjZGWmJmWlZGOk5SUlJKTkZKRkZCPkI+Ni4qJiYaGhIF6cmleV1JP
-T01QUE5OS05PTk5LTE1OT05OT01MTU1MS01KS01NTk9OTVBQT05QU1FQU1JRU1NT
-VFRTUVJRU1VPTU9PTk5MUVNVT05RUE9UfbbD0Nfd3+Pk5+fq60lJSEpFQ0dKSkFE
-RUhERENDREVFRkJBOz5BQEJCQD48OjY4O0A/PDo8Ojg3NTk7OjY3Nzo7NzY1NjY1
-NjE1Nzc1Njc5Ojg6OTo5NTQ0NTQ1NDM2NTc4Ojk4OTg5PUJVPTw6Ozo8PUNCPjs+
-QUA9PT9AQEBAOzo8PT06OjtCPzw6Oz04Njg6OTo5OTY3Nzk4OTo7PD9AQD8/PkBC
-RUpEQj9CQEVERUVJR0VDR0VJRURHRUVKR0RAQUdAQEFAPDxAPT09Ojs6Nzg9Ojw8
-PT4+OjY2OTw5Ozk6ODc2NTg4OTg5Nzo8PTo6Nzg5PEFBQz49QDw8QkZGRUhKSENF
-Qzw9P0NCREA3NTUxMTE6NjY1NTY0MDEwNjY3NjQxMjU1NDM0NTUwMzIzMzM2NjUz
-NTM1NDU4Ojo3NTQ0NDY1MzMzNDY0MzE2NjE0NTU3NjY1NjQ2ODY8ODg3Nzk3NzU4
-Ojg4Ozg5OTo8Oz88QUE9PTs/QkRHQ0E/Pz1AQEFBPUJBRUNDRkZHQ0JCQkJDRkdJ
-SU5KS05OTk5OTk5PUVJSU1BNT1BNTk5RT01OTlRZV1VUT05NSk1OTk9RUU5ISUtM
-S01PTEpJTVJUT0pGSkxJTUxPTElHQ0dIR0ZERz8/QD9BQEI9ODg7PDs9Ozo8PTk6
-ODo4Nzo6Ozo4NzM2NDY2Njk6OTk8Ojo6QUBAR01QUE1MS0lNT1FQUlRWVldUWV5c
-WVlbXFpZWVlbW2FjXmFhZGZmZ2JhZGdjZmRka2VjY2doampuaW1wbGptb3Fub29r
-bWtydXVycnJ3eH1+fXx+fXx+hIKEg4KGjI+Nj46QkJCRk5KSkZKSlZeVlJWYlpOT
-lZiWkZCQj4yMio2Mio2NiYmJhYWFhIJ9eXx+fHx7eXd3eXh3dXR0cnNxdXZ2cnFv
-c29tbnJwa2lnampma25vcnd2eXV5eXl5eHR1d3Zwb2xpbG1ta2lqZ2JiYV5eXF1c
-XFpaWVhYWVpcWVhWVVdUU1NTVFBKTUtLSktMS0lKSk1NS0dMS0pLTE1MS0hIR0hI
-S0xMSEpLTUxKSklHSUpKSUhKTE5KSEtLTExMSEdGR0hLR0hKSEhHSEdISEhHSUtH
-SUhGSEdISElKSkdFSEhISktJSFFOS1FPTExMS0tNSklHSlBOTUtKSkpJS0pMSkpM
-S01JTU1MSktKTEpKSUhIR0pLTUhLS01NTk1OS0tOS0xQTk9LSUpJTE9NTU1JS01M
-S0tNTE1KSE1MT1FNT09PT05MSEtFSEZIQ0NEQ0RFQD9CREVCQkNCQT5APjg6OTo+
-PTs7PEA9Ozw+Pjs8PUFDRUdCRENERUVGRUZJS0ZGSkxOUFNUUlNUVlhYWlpbXl9d
-XWFhX2BeYGFjYWNjZWhoamxtcHBxdHR1d3t/gIaLjpGUlJWVk5GRjpCPkZGPko6O
-jo+Pj4+Pj5CPjY+KiIiJiIR8dXBlXFZQTU1SUlBOS05PT01NTUxOS0tMTU1MS0tM
-TU1MTE5RUk5NTU9PTU5OUVFTUU9QUVFUU1NSTlNTVFJOTVBTUVJRUVJUUlBQVVR9
-t8TO19vg4+Xo5+npTktEQ0RHRkdKSkNFREFFRUNESEhDRkRFP0FBP0A9OzpAOTpA
-Pzw9PTk2NTc3ODk6Nzo6ODk4PDs3OTY1NDY1NTY4NTU1Ojw8Ozg4ODU0Mzc5ODY2
-ODo3Ozc8PDk9U2dFPT4+PkJEREJEQj0+PTw9PD09QUI+PT08PTs+Oz0/PT07Ozk3
-Nzg4ODc4ODc3ODc4PT09Pz09Pj4+QEFDREQ/PUFAQ0dIRUZHS0lJSElGR0ZGRkhI
-RkdBPkFDPz87Ojw8Ozs8Ojk4ODk6Ozg3ODk4NzU1Nzk4OjY3NjU2NDg2NDc1Nzs5
-NTc9ODg7Ojw7O0M+QD09QkE/RElJRUE9PTtDQEFEQDc2MjUzMzM1Njg1NjQ2MjU2
-OTk4NjMyNDY3ODc2NDk2ODQxMjI1MzU2NDE0NTY2NDU1MjI2ODY0NDIyNTU2MDE1
-NTU0Mzk2NTM0NjM1OTs6Pzc2ODc2Njg1Nzo5Ozk4ODw9PEE8Pj0/Pj9FSUZBREI+
-QERBQkFAPz9BPT5EREdHR0dDREpLSkhJSEZHS0hHR0lMT1FPUVBNS0lJUVBQUFNV
-UEpKT1NWVVRST05LS1BQUk9OTEhLTlFRU1FRS0xOVVpaUkdGSkpLS0pJSENBRERF
-RURDQEFCP0E/PT07Ojk8QUA8Pjs7PDk6PD06ODw5ODk5OjY3NjU3Nzc4Ojo8Pj4/
-QUNJS1FRTU5NTVBSUE9SWVhYWVlcWVtcXVtcW1lbW1tgX2JhZ2ZjZW1pamlnZ2Rl
-ZGZnaWRfXWJlaGpqbWhoaWxqamxpam9wb3Nzb3BwbnN4eXt6fHx/f3+AgH+EhouL
-i4+Oj4ySko+RkpOTlJSVmJeUkpGSko+RkZGTj42LiomNjYyIiomHioiFhoKBf36B
-fXx7fHl1d3V0c3Z5dXFyeXNzc3Jxb21raGppa2xqZmdlamlmaW1vcXR1dnh4d3Rx
-cnFxcWtqaGZoaGhmZGFdXltcWVhXVlhXWFhYWVxYWFtZV1lXV1dTUlFRUE5LSklM
-SUpKS0pMTUlMS0pJTEpISkpJSUhKSUdJSkxMTEtPS0lISkhKTEhKSEhJSEtKSklI
-SkxKSkhJSEpMSEdIRkhHRUZGSUlJSUlGRUZHSElJS0hKSkpLSkhIS0xJTExLSkhK
-S0xGR0pNTUxNTE1PTUtHTVFOTExNSk1LSkxPT01NSklLSUpPS0pJS0pLSUpMTUtN
-T0xMTkxKSkhKS0pLSktNTE5MTUtLTUtNSkxNSkpISkxSUU9PSUhKSUlJS0lESERE
-QUFAPUBDQEBDQkRCRERBPD49QDw8PD1APTc4Oz4/Ozs+QD1BQ0A+QEY/QEJCRUVF
-R0lLTkxNTk5SVFdbVVVcWlxcWFlaXmJhYmJhX2RmYmJlZGdnaWhpbW9vcnFzdXh5
-f4SGio6RkpKSkY+QkI+OjpGPk5aTkpKQkZOPkZKSkIyMjIqJioqHh4V+fHZtYVlU
-UFBSU1BQTkxNT09NTExQTk9NTk1MTkxMTU1PUE1NTk5OS0xNTUxNUFJSTlFRT09R
-U1JVU1FSUVJOUE5QUFBPTlJVUlJUVoC5xc/W3ODi5efo6upHTUlKRUVHSEZGSEVC
-QEFCREVGTElHQUlKRj8/PEI7QDs/Pj0+PDw8Ozk7Ojo7PDw7ODY2NDk6Ozo2NTc4
-NDUxMzk2Njk4Nzg5Ojs+Ozk1Nzc3Ojo5Ojs7Pjs8OjpEZkk8P0A/Pz1BQkJCRT8+
-PT88PUFAP0VEPT07PT09PTw8Ojs4ODk5Nzg2PTg5NzY5Ozs7PTw7PTw8PkBBQUBE
-Qz8+QERCRUhKSEdJS0pKRkNBQ0ZCQ0RCREJFQj8/QDg7Ozk7NzY4Ojg4NDY4Nzc1
-ODY8NTg3NjU4NzQ2ODY2NTU1Njs5OTk2NzY4ODY4OEE7OTg7OT0+PzxAQURDRUE/
-QD1ARUI+ODMxMzUzNTQ3NjQ0MzMzMzQ3NjU2NTY2NTY1NDQ0MzM0NTY1MjI2NDM0
-MjMzNTY2NTQ0NTM1NDYzMjU2NzY2NTY1NjY2NDU3OTc2NTU1NDU3Njs8Ozg4Nzc2
-Njk7OTk6OTk7PTw5QDo+QUZERkE8Ozs8QURDQEJDQUFDQkJCRkhISkpKS0xLR0lJ
-RkZERUhFR0pLS01NTEpKTU1PUlJSUE5OS0xLT1JQUlRTUE5QTlBSUk9NSk5OUlNP
-S05NTExSUlNPTlJNS0tHSkZIRkRHRUVCQT9BRERDPz89Ozs6ODg8Oz09Pj8+PDo8
-Ojw6Pzw6Ozw8Ojk3Nzk3Njg8OT08QUBCRElMUFVWVFJNTFNVU1VVWFhbV1dZVldb
-W1lZXVpdXl9dYGNjYmNiaGRhZWZoY2BiYmJkZWhkZWZobGxpZmZoZ2xpbW5sbW5w
-b3Nwam1scnZ3fX1+fnp9f4GCgYWHiYeJj5OSlY+NlpGQkY+OkZWUlpWTkJKSkJCQ
-j4+QjYqKiIaKjYuJiYmIioeDf317fHp5d3Z1dHh2cm9zcnBubW9rZm1vbWttamdm
-Z2ZnaGhmZmZnaGZobHBtcHR0dHBwcXRwcG5ybWlqamZmY2JfXl1dW1hXVVZXVVVU
-VVZYW1hXWFhXWVdYU1BPTlBRTUtLSUtISEpLS0dJTElKS0pKSkhJSEdKSUhISUZL
-Sk5OSktLTExNSElHSUdISEhKSUlIRkpLSklISUdJSklLRUhISEdIR0pKR0pGRkdI
-SUhFREVHRkZISElLTUlIS0pLS01LTUtOTUpLTEpJTk5MTEpNT1BQT05MS0pMTUtO
-T05MSktLS0lITE1MS0xKS0pKTk5MTU1OTExMSUxMS0tMS05PTUtLTEpNTU5LSktL
-TEtKS01MS0xLT0tJSUlHRkZHSEdHRUVGQ0NDQ0I/PkE/PUJCQkFAPT0+P0A+PUBB
-Pjo6Pz49Pzs/PT1AQEJAQURBQURDREdGSElMTVBUVVdXWFZaXFlbXVxfYV1gYmNi
-Y2BgY2NjZGhnZmhobG1tbnJzdHV4en6ChYeNkZWTk5SOjo6Pj5WRj46RkpKVlZOS
-k5KSjo6OioeHjIqIi4qHiIeCfXVrZVxUU05OTk5OT09PTlJRUE5QTk1MTk5OTUxM
-TU9RUE5OUFJRUVFOT01OUVVSUlBTU1FTU1JSU1JUWFRPTk5NSk5PU1NQUFNQfbjG
-0Njb3+Pl5ujo6U9OTUlGSEdIQ0BFQEJBREdKSEpKR0VLRUJDRD06OUA8Ozk7PDw7
-PTo5Njs8PDs7Ozo4OTo4Nzk6Ojg2OTgzMjQ0MzQ0Njo5Nz06ODk4NzQ3PT06Njg5
-Pjs6PT1BPT9BPT48PDs/Qj9AQ0JBQD08PT5CQEE+PkBAPz09PTw8PD08OTk9OTo7
-PDg4Ozs6Ozw9Pzs8Pj89Pj1AP0FBQkBCREJAREdHR0lJSktLSEZCP0FEQ0NAQUA9
-PkA/QD84Ozk5NTg5PDo4ODU1NTY2NzY2ODg1NTU2ODY3Njg0NjU0NDY5ODk3ODc0
-NTY2NDY4OTo9PDk7Ozo7PkBBPkFEQ0BBQEA+QEJAOjQ0MzU3OzY1NDUzNTQzNzo2
-NjQzNzY2MzIxMzU2Njg0NTM2NTc0NDc1NDc0NTY2MzQ1Nzk1Mzc4NzQ3NTc5NjU2
-NTU0NDo0NTY0NTQzMjM1NjY3OTc4NTg7PD44Njk7Pjs8PDo5Pj5DQkI/Ozo7Pz5C
-Q0NAQ0RERURHRkdISUpLSUhKSktHR0RFQkFFR0hMTU1JR0tKTEpNUFBPUFBMTk9L
-TE1TUlNRUFFOTk9NTVBTUE5JS05QS09LTU1NS0pLTFBKT05RSkxKS0tJSUhGRkI/
-P0NHSEZDPj4+Pjw7Ojk9ODw8Q0RBOj49PDc6PDo5PT09Ozk6Nzg7Pjo9P0JERUJI
-SU5WW1tZWFBPVFVYW15cXF1fXFdbXFtcWlhbYF5dWV1kYmNgYWFiYmFkY2ZkYl9d
-YGBkaWllaGlqamhpZmVnaWxscG9ubG5ycHFxdXZydXZ4eXt8e3l8goGCg4eJhoWJ
-jZGSkpCRjpCQkI6PkJKSlJOOkJGPj4+OjI2NjIyJh4uLiYiHhIKDgX5+end5d3Ry
-c3N1dXNybGptb25rbnFpam9tbGtpaGdqZ2NlZmZkYGJnZ2Rnbm5scnBwc3JvcXFt
-a2praWhmY2FgXF1dWllVVlNTVVRQU1RZWFhWV1dWVlhYVlNTUFBPUk9NTE1MSEtM
-S0tHRkpISEpKSEhLSEhIR0lIR0pJSkpLTEpKS05NSkdJS0lGRkdGSEhHRUlKR0lJ
-SUlKTElJSUpJSEdISUpKSkpKSUlJRkZISktJR0dISElJSEhKSklKSUZGSUxNSkhM
-UU9PS0pLTU5NTU9OT09NTk5PTExMTE1PS0tNS0xLSEhKSklKTE1JTE1NTUxMSk1O
-S01LTU1PUExOTUxOTkxLS0ZMSUlKS0tISU5OTEpOTEpJR0hHSUhHREhFRU1BPT9C
-RkhEQUBDQT0/QEE+Pjw7PEBCQ0FBPDxAQjw8QUE+QkBAPT1DRUZFSEZBREdHSElK
-TU5QU1dYV1RXVVZYXV1dXl5hYV5fYGNkZWRiZGJjaWxvamxtbW9xc3R5en1+gIOI
-jZCTlZGQkY+Ojo+RkJCPkZWSk5WTlZWXlZWPjYuKhoaFh4yKiYeKioeIgHtuZl1T
-Tk9NT0tOTUxLTU5MTE5QUE1MTUxOS01OUFJQUU1OT1BRT1JQTk1RUFJSU1VUVFRT
-UVBSUVJSU1FMT09QT05OTUxPVFd8t8fP1tvf4uXm6OnqSUpIR0hPTEtHS0hFSEZF
-RUlGQkNDQ0VHQ0A9QkQ+Oz47Ojk9PDs5Oj09Oz1BPjs8Ojs6ODk5NjQ1NTY4OTg1
-NzU0NTY4Ojc2Njk3NTU2OTo4ODo6Ojs9P0FAPTs/QT4+PT0+P0FCPj4/QkBCQzw8
-PT08QEFBPj07Ojo/PDo9PD89OTo6Nzg5ODg6PTw9OTs5Ozs9QDs+PEFDQUBCQUFC
-QkNHRkpKSEpJTEtHRUA9PkJBQD07OTs7PD07ODc2NzY2Njg7OTU0PUY1Nzc5OjY3
-ODY1NzU3OTo7OTg3ODc1OTY4NTg3MzQyMzU2NTY2OTc3PDo5Ojo5OTxBQUBDQT07
-PT1CRUI8Njc2NTc5NjU3NDc8ODYyNTQzNDMyLy8yNDM0NDU0ODYxMjIyMzU1NjY1
-NzU1NzU1Njc1NTQ1NjY4ODQ0NjczMzM0NDQ0ODU0NDk1NTY3OTc0NDc2NzY1NjU2
-OTo4PDs7Oj47OTtBQT0+P0JAPTo9QkFFSEJFR0lHRUlJSktNT05JSElHS0lIRUFD
-RUhFR0tLSkhDRkZKSU5NUE5QT1BRT09NTE9ST1BOSktQTkxKSEtNTUtKSktMTEpL
-Tk5LTEhGSE9QTk1LTUxKS0tMTEhFQEBDQkVISkhDPz06PDk8Ozw7PEA+Pz89Pz06
-Oz07Pj09Pzw7PT0+Pz1AQkNBQ0NGSktOUVZaXFVXWVdYW1xYWVxbXFxbXVtfX2BZ
-VVlcXl5gXWBlYWFgYmNgY2FjY2NkYWViYGRlZ2ZkZmlqa2ZgZGVoaWhsbWlqa2tr
-bXF1d3Z3dHJ1eXh5en1+foWCg4aHh4mLi4uOk5GQkI+Pjo6Mjo2PkpCNjpKQjIyJ
-i4mKiYiIh4eHg4KCfX16enh3eHd1c3Nwb3BvbGxpZ2VpaWtta21uampnZ2doZmNh
-YWJhY2VhY2VmZWRma21raWlrbWxrbW1qaWZnZmJhYGBeXVpbWVZUUVJRUlNWV1dY
-V1lZV1VZWVhUVFNVT0tLTU5JSUhISEhISUlPSUdFSEhIR0lKS0lIRkZIR0pJSUpK
-TUlIRkhKSUlISEVFSEhITEtMSEtLSkpJSUhGSEhJSEVFRUpMTkxJR0ZKR0hISEhH
-SEdHSUlKR0ZHRkdKSElIR0hFSUxJSUpLTU1LSkpKSktOTUxPT05LS01NTExMSktO
-TEtOTE1NS0hJSUZKS0tLT1FOS0xOTExOUE1PUlJOUEtMT0tKSkpMTEtOS0dJTkpL
-TE5OT01MTUpJSElISEdFREFBQERGRUJCRUBAQUE/PjtAQUFCQT49PT0+Pjw+PT9A
-PTk7Ojw6PUBAQkNEQ0NCR0dFREZHTE9PUlNVU1dYWlpXV1dcXF1fXlxfX19eYF9i
-ZGVmZWhra2psb3NycHJ2eHl7f3+EiY6Qk5SWlJCRkI+OkJWUl5aWlpeWlpiXlZiX
-lJSQjYyIhoeGioqJiomJiIiJg3xyZlxRUE5QTk1OT01OTVBPTk9OTU5RTU9QTUxL
-TU9OTU9RUFFQUFJTUlBQUVBPT1BSUVNSU1FTUFFQU1JPTlFPTk1MTk9RVIS5x87T
-29/i5ebn6epOTklKTEpLT09HRkZITUZGS0lCS0tDQkJBPkFCQ0FCOz1BQTo7Oz48
-OkJAQT48OTg5OTk2MzQ1OTg1NDM0MzYzMjQ2ODc3OTc3Njc5ODg3OTg4Njs8PT1A
-PTw8Pz09PkA9PkFDQkE+PD9APz1APT89PTw+Qz4+Pj1APD07Ozo5Ojo6Oz09Ojs5
-Ozk6Ojo9PDw8PD09PTw9QEJEREVFR0NERkRHRUdFSElHR0RBQUA9PD49QDw6Ojo8
-Ozk7OzgzMzM7Ojc2NjY6PTk7OjxDPTo7QEM8P0NFQ0A9PDw9Ojo5ODg2NDQ0Njc0
-MzU0NTM0Njc5ODY3NzU6PTk5Qjs/PDk/QUFCQTw3NDQzNTU0NDY4ODo7NzMwMTQ2
-NzU0MjM1NjY0MzY0NDYxMTEyMzU3NDc0NzY3NTc0NTQ3NzQ2NjQ0NTM2NjM2NTg1
-NzYzNzo6ODQ1NDY1NDU1NTc3Nzk1NjU2NzY6OTg6OTk7Pz5DQUBEQENBPTs9QktL
-RUZFRENCSElMTUxMTUdIRkVJR0hDPkJFRkVHS0tMR0dGRkpNT1NVU1JOUFJXU09S
-Vk9NTUtLSkdNSUtNTEpKTU1NUFBMSUtNTk5OTUxHS09NTUlNSUVGSUtNTEtEQT5F
-R0RHR0REQkNAQD05PkA/PTw8PkA6PkJAQz88Pj49PkI/QD9CRUNDRk5MT01MT1dd
-WlhYWFpdXF9bWVZVWFtaXl1cWllZW1pXWVpaWV5gW1tfYWFjZWRiYmFmY2FiZGVl
-YWBjZ2ZkZWVkYmNeXGRoaWdoaWhjZmhpb29xcnRzdXV1d3h1e3t9gYSEhoWGioqL
-iIuNkZGRj5CQioqMi4mLiouNjYyIiIiIhIWDhIGBgYV/f3x7e3p0dHVzcnBwcmxr
-a2pnZ2dkY2VlZ2hpZ2ZoZmJiYV9dW19dYF5hYl5fYGNkY2JmZmZmZ2lkZmZoaGln
-Z2RmZGJjX15dW1pZV1VWV1NTVFZYVllZWVhXWllXWFdSUE5OTUpKSklGR0ZIRkZG
-Rk5MSEZGR0dJSUhMSkhJRkdNSEdJSklHSUlJRkhISUlIRklKS0hHR0REREFFREZH
-SEZGR0hHRkZHR0VGSEhJS0tLSklJSUhJS0hGSEdGRkdHS0pKS0xJSUpJS0pMS0tL
-S0pKRkdMUFBLUE5OUUtMSUxMS0pKS09NTklKSUhFQ0ZHSklITUxMT05OS01OTE1O
-UlFQT01OT05NTUtLS0tMS0dKTEtJSktOTEtOUU9NTkdHSUZEQkI/QEFBQ0VHRENE
-P0FDPTw8Pj09QUA+PD07Ozw8PUc8Pz08QT5APj5CPEFBQ0RGRERGREVJSUpMT09Q
-VFtZWVpZWFdZW1xbXF5gXV5gYF9fYWRmZmhsamptbm9vcXJzdXZ3eH2Bg4aKj5KV
-mJaXlJSWlZaTlpiZmp2bm5qWmZeUlJeVkZGUjoyMiYeHhYmOjYqLj4yLiIByZFhT
-Tk5SUVBPTlBMTktLTE5NTU1MTE9PTkxNTE5NTU5NTk5OT1FQT1FQUE9OUVFTUVRQ
-UVJRT1BUVFBQTlBOS01LTE9YfrfFz9fc4OPk5ufo6UpHSElIRERKSkxNS0lIRUZG
-R0dERENGREZDOzxDQj47PDo9QD8+Pjw9PDw/Pzo7OTs4PDw6NTY5ODs4OTI1NDI0
-NDY4OTg2NzQ1ODY4Ozg5OTg8Ojk7OTw+PD09QT5APD8/QD09PkE+PT9EQ0BAPj46
-PD1CPT08PD07Ozo5Ozo5PDo7Oz08Oj1BQDs8QD09Oj07Ozo8PD1AQ0VFRUZERUdJ
-SElJSUhGRUdFRD88Ozw7PTw7Ozk4OjQ2NjU1Nzc4ODk2OTk7PD1CRURERkVFRUhL
-SURFR0pJSUdFREVCREE8Ojk4Nzc3ODg0NDM1NjQ3NTU1NTQ1NTM1Nzg3OTs7Pj1A
-PkFDQD84NjMzNDU2NjMzNDI1NDEwNTg2MjU2NTY3OTU1NjQzNTYxMTU1NDQ1NTQ1
-NDY1NDY3NDY2NDIyNTM0NzM2OTk2NTg4ODU3PDg0MzY2NDI0MTU3NDU2Njc2NTY1
-NjQ5Oj1BPj0/QkREQUJCQT49Pz9DR0VERUVDREZFSElLTU5JRkhFRUZHSEdDQ0VH
-R0NJTE5MS0tKSkxRUU5QUlNTVFNWWVVSUlNNSkpHR0tKSk1NT1BPUE5LTE1ITExQ
-UFJQUFJMSklNS05IRkRGR0xJREFAREVFRERHQ0RFQ0BAPDs9P0NCPz9BPUBBQkBD
-QUBCQ0JBRkdIR0lLSUhKTlBSVFZYV1teW1xaXVteXF5gXVpbWVxeYFteXV1YWlhb
-W1laXmNiWVxiYmJmZGViYV5kYV9kYWJhYmZnZ2VhYV9hY2FiZGZiZmZnZ2Nmamxv
-bHBwcXFxcHBycnN1e4CChIOEg4SJiIiJiYmJioyNjYuOi4mKiIeFhomHhYODgn+B
-gYB8fH17f3x4dnR1c29xbm1tbGhoZmRna2RfYWRhY2NkZ2dkZWFfXWFhXFtaV1hZ
-WFtaXFhbYGJkYF5gYmNjYWNmY2VmY2NlZWVjY2JhY19cWllbVVRUV1ZVVVlaXFxf
-W11aWldVVVRPUExISkpHR0dIR0lJR0ZGRUlMR0dJTElHRklJR0ZISUZHSEZJTUtI
-RUlMSktKTUtKR0ZDR0VFSUhISUhHRkhJSklLSklHR0hFR0ZFS0pIR0hNSUlMS0lK
-SEdISElGR0tJSUZGSElMTEpMSUdMSkdJSEdJSUpMTU1NTU9PT0tMTUtLSkpKSElJ
-SUhHSEdKR0lKSEpOS05OT05PTEtKSk5RTUxNTk5NTEtNTExPTEtPTUtJS0tMSUpM
-T09MS0pJS0dDQ0NBQEE+PUFCREJCQ0FCQ0FBPj9BPT88OD47Oz9CPDw9Oj09PTw+
-Pj88Pjw9QUJBQkRHSUdLTk5RVFJRVFZZWllaWFpbWVpcXF1gYGFhYF5iY2VnZ2dm
-aG1qa25ycHJxcXFxc3V3fYGEiY6SlJSXmpydmpiZmZiYmpqam56dm5eal5aVlZSS
-j5COjY+LiomKhoqMkI6Pjo+MhX5zZ1tSTUxMS05PTk1RUk9OTk5OT01OUE9QT05O
-TFJPUlJSUVBPT1BQT1BTUlJUUVJPUFJSUVFRUFJRUFFRUVVQTVBSUFZ+tcTP1tzf
-4uXn5+npSEdIS0pNSkxLTEpLSUdHSEdEREJAPkNDQj47PkFBPT89Oz48QT45ODw5
-OUpHOjs8Ojo4Ozw6NzU3Nzs4NDQ1MzU2Ojc4Nzo1NDI0NjY4OTg4OkA7Ojs7PEE+
-Pj0+Pz9BQEE8PUBBP0BBPj5CP0A8Oz05OTo9Pj09PDo8Pjo5OTY3PD0/QD06PTs7
-Pj4/Pj46Ozw8Oj1BQ0NJSUhHRkdHREdFRUdHREA/RENAPDw9Oz0+ODo4OTk2NDc3
-Ojo8Ozs4Oj08QEhCQkJFSEZHSUpLS0tKSUtNS0pNTk5MRUFERENBPT07Ozw4NjYz
-MjQ1MTM2MjU3Njc0NTU3NTY1OTs8QEBBR0REPTo1Nzc3ODY2NTMyMjM0ODc0NTY0
-NDI0MzcyMzIxNDY0NDIyMDI0MjM0ODc3Njc1NzU0MzQ1NDQzMTQ0NzQ0NDQ0ODU2
-NjQ2NjUzMzIzNjU5NTUyNTk3ODg2NzU3Ojo8P0JAQkdERUZAQkFEQEBBQkZJRkVB
-QUhEREpKSEpIR0hISEhHSkpFRUZJRkdEQ0ZJTk1PTEtJSUtLSk9PU1RWVFNSU1RU
-U1JPS0lLSElLT05RTk9PTk5NTEpKS1JUVVFPT09NTElHSERFQkNGSEhIQEBBQ0hH
-RkZGRUVDQz4/PUA+P0NAPDo9P0FFRERGRkdDR0lLUlBPT1VXWFRVWFlYWlxcW1ha
-XFpZXFteX2BeX19gZGJfXF9gXVpXWVtaW1xdXFxcXVxdWl1fX15dXl1hYl5jZWBg
-Y2VpZmZiYWNkZGNgX2JiY2ZnZWRqa2xqa2xvbWtvcHFxdHl4eHx/gIKDg4OBgoaH
-hoeIi4qHiomJhYOEg4WEgXx+fXt7fXx/e3t7e3p3dHZ0c3NubWxnaWVkZWVhXF5f
-XV5fX19dXF5hYGFhXVlZWVxcWFdXVldUVlJUVlhZWl1dXFxdXl1qYmRlYWBiZGdn
-ZmdmZ2ZkYl5dXV1ZWV1aXFpZWVtbXmBgXl1YVVRSUk9NTEpLSEZFR0lISEhJSUxG
-SUpLSUdJR0dHSEhJS0lKR0dHR0hMSUtJSElJS0lIR0ZIR0RISEhISEdHRkVHRUdG
-RkdHTEpKSUdLRUZJS0pIS0pJSkpISUhISUhGSElISkxISUlITU1KS0pIRkhKSEZH
-R0tJSUtLTk5OTkpNTk5KS0tLSEpHSEtHR0VGR0lKTEtLS01OTExNTUtOTk5NSkxN
-S0xNTE5NTE1LSUhLSktMUU9JSUpNTU5LTElKSEpIRkRDPTs/PDxBQD8+QEFBQEFC
-PUA+Ozo9PT4/Pz05P0NCOzo9PTs5Oj05Pz0/QkBAP0FBQENJSk1NT09SU1NUVlda
-WlpYWFlaXVpdX2FgYGJkYmNmZ2hnaGloa2xsbm9zcXJxcnN3eHyChYeJjJSVlJic
-npydnJubnJycm5uZnJycnJmXlpaUlJSSkZGPjIuIiYiIh4yRkZGQkIyKg3xzZVlS
-T01NS0tKSktQT05OTk1MT05OTE5PUE5NUE9PTU5PT09RUVJSVFRWU1VTU1JRUlNW
-VlBPUE9OUE9OT05NT0xPWH61xc7W2+Di5Ofo6elITk1JS1BPSUhISURERUdISEdC
-Q0REQURBQ0dCQ0I+QEQ7Pj47OTg5Nzk6PD48PTo6QDw8OjU5NTc6ODo3OTY4NTQ1
-NzY2NzY5OTczODc4Nzo8Ozw9Pz89PD4+QUFAPDg7Pj9BQEJBPD09Pj08PT08Pzw4
-OD0+Pjo9PT08Ojg1NDU5O0NBQD48Pj08PTw9Pz4+PT0+PkFDRktJR0NGR0ZCQkFD
-RENAQz8/QEFAPTo6PDw7NjQ1ODs6NTg4OUFAQ0RDREZHTEZGR0VGSEdKS0hMSkVG
-R0ZGR0dMUEdCRERDR0JAQT1DREVBOzg4Nzc4NzY0MzY2NTUyNDU1Mzc4Ojw9QEFC
-QD9CPDc4NzU1NTc0NDM1MDMzNDU1NDUzNTQ1MjU2NjUzMjEyMTEyMTI1NTc4NTU2
-NTI0NDI1NDQ0MjMyMjExNDE1NjY2OTo4ODQ5NTYzMjQ1Nzo4NTc1NjU1Nzw4ODo4
-OTw+PT89QUNDQ0RERUZEPj5DRkhISktGSURHSktLSkVFQ0dKSEpJRkdERkdHSUdL
-SktKS05QS0dGSEtLSktPVFZVU1FRUFFSVE5KSk5MSUpLTU5OUE9PTk9LSUlJTVNV
-VVVST01LS0xOTEhIRUdIRkpEREFDR0hFR0VDPz8+QEA/QEBAQT0+P0FBQUA+QUNH
-SUpKTlJUWFlZWVdYWlpeYWJfXV1dXF5ZXFxeWV5iYF1cWFxaW1tdYF1eXF1jXVxe
-XFhbW19aYFtcW1xZWV1bXWJiXl1iYmRmZGJiYV9hY2VnZFtcXl1dYWRkZWZnampp
-ZmltaWlqcHFydHZ3fH6AgYCAgICEh4iKh4iHhYWDhIGAgIOChYR9d3h5e3l6dnh1
-dnV2c3FzcW9ra2pqZmNkY2BdXFtdWFZVWV5cWllcW1pYWlZXWFdUVVdVVVZTUVRR
-UlBRUlNTVVVWV1lZWmFfYWRiZWlmaWhraGdnaWhpZmJhZGRgXV9cXFtcXlxcXWJg
-YFdTUlFQTk5OTklFRklHSUhHR0pJR0dGSEtKSUhHSEZGSEdJSElLSEdHRUdKTE1M
-SkhIS0hKSUlJSkZISUlKSUlISERDRUJEREVISklJRkVJSUpMTEpKRUpJSEpISkxM
-TEtGR0tKSUxLSUpLSkhJTEtISEhJSEhJSklJSkpNTUtJSkxLSUpMTExIRUZHREVF
-RkZHSEZHSkpNTk5OTE9NTVBQTktMS05QTUxNTU1NTEtMT1BOS0pMSklKSkpMSkhI
-SEdJQ0RER0NBQD49QEBAQj09QUJCPkA9Pj4+Pjo9Pj48O0E/PT09P0JBOz47PEA8
-PkI+QURCQ0RFSEZOTk1QU1dZWFpbV1lZWlpcWllbXF9gZGJiZ2ZmZGZoaWlqa2xt
-cXBydXR0dXV3eHp9gYWKi4yPlJebmpubm52bmZmbnZydnJ6eoJ+cnZeWlJOVk5KV
-kY6PkIqFhYaHiY+TmZqTj42Lg3htYVZSTkxLTE9OUE9NTk9PTkxOT05OTk9PTU5O
-T0xOTE5QT05QT1JUU1NRUlNTVVZSUE9RUVBPUVJQT0xNTU1PTU9XhrfFz9fc4ePl
-5ujo6lBQVlFNTU1MSEZGR0NCRUZFQ0ZFRkNEQkE9PEE8PT09QD08PUE7OTk5OTo6
-PDs5Ojo5Ozk6Ozo5Nzs5Ojs1OTg2NTY0NTc4Ojk3OTg1ODg6Ojs8PD05PD49Ozo+
-Pj4/Ojs+Pj4/Pz48Pjw8PD88Ozo7Ozw8PDk6O0A9Ozw9OjY4Njg9PTo9Pj07QD07
-PkA/QEBAQEBCQ0VHSUlIRUJCQ0JBQkZFQkFDQ0E/PD06NzY8Nzg5PTs6Ozw8QENE
-R0lKSklJSEhKSUdHQkBBQkVISk1GQkA+Pz0/QENHQkA/QT8/PUE+QkNHSUlDP0E4
-NTY2NDM0NDQzNDUyMTU3NDU4Nzc6P0E/QUE+Ozk2ODIxNDMyMzI0MzU1MjQ1NzQ1
-NjI1NzY4ODg3NTYxMjIyNDc0NTY3MzExNTk4NTY3MzU0MjU2MDExNDU0NTY0NjUz
-NTQzNTUyNDM2NTU4NzU4NDc4NTg5OTk5NTc7Ojw/Pj4+QkNBQkVIQ0RJSkhHSUpF
-RkhFR0pIR0dFRUdKTUpKRERGSE5OTE1KSEVJUE1ORkhIS05NT09UU1FVU1JPUVNR
-TU5MTUtNS0tMUFFOTUxPTkhJRUdLTlNWVlRPSUZHSk5JSElHQ0dEREJCQUNFSERD
-QkRAP0BCQD5AQkNDQUE/QUBCQkFDSEtMTlBRVlldYF1fX19iZGZpZWdgX19cXV1f
-YV9fYGJkZ19eX15cXF5eYV9eXl1cXllYWFhZWV5aWVpcXFpdXVtcX2VeXGBjZWVl
-YmVkYV5lYWJhX15eXV9hX2BhX2JkZGZnaWdnZmxtbW9ydXV1dnt8fHt6f4ODhYSF
-hIWFhYWBf39/f39/e3x+dnhzc3Z1c3BwdW1ta21tbGllZWFiY2FfWldXV1VTVVVW
-WFdVV1ZVUlJTVldUUFBQT1JRUFBOT01QTk1PT09OUVBQU1ZVVFlcYmZmZ2lsbm5w
-cnJvbGxraGZjY2ZjZWJhY2FgY2FfYF9eWFNRUE1NS0tMSUpJSkZJSklHSEdHSUhH
-R0hJSkxLSUdISUZIR0dIS0ZGRkdISEtKS0hHRUdJSUhJSUlJRERHR0hHR0RHSERH
-RUdLSkdGRkVGRkhLSkhJRUVHSUlNS0pJSktKSUlJSEpMTEtJSUtNS0dGR0hLTEtL
-SUpKSkxNTk1MS0tNTUhHSklJSklFSUtGR0dJRkpMTktNS0tNS0xOS0xMS0tOTk9R
-TkxKSktMTVNOTUxMSUdMS0tLSkpKS0dIR0RDQ0dGPz4/PT0/QEE+Pj5APkFBPzw8
-PUA6Ojw6OjpBPTw+Pj4+PT46OTs6OUFAQUVEQUVERkhKSUtUUlNYV1daWlhWWFpa
-V1pcXVteX11fYmNiY2ZmZmdpbm1xcXF1dHNzdXd3dnh7foKEhoyOjpKWmJufnp+e
-n52cmJubnJufop+elpualpaVlZiUlZSTkI2NioeBgYeIiZCWlZOUko6JgHVrXlRP
-UVFPTU1LTU5NTE1QTUxOTUxLTE1NTk9OTk5NTlBQUVBPVFVVUVJTUVNUU1BOUFBQ
-UU5OUFJQUlBNTk5PTVZ9uMfQ19zh4+Xn6OnpT1JNTUpOUUtNSEdMS0lFQ0NGR0pG
-RUVAPz48Pz0+Ozw8Pj08PDs8PTs8NzY6Ozg4ODs6ODc7PTc3Nzg5ODY5PDg0NjI2
-OTo3NDU2ODg4OTo3Oz89Ojw6O0A7PTw8PDw9PDxAPz09O0A/Pj0+Pj09QDo/PT47
-PDk7PD49PTw6PTw8Ojo8Ozo7Oj07Pjw8P0A8PUJCRUVGREdIRkZHQUJCRUNEQ0BA
-PDw9PUFKOjc3ODo6Pz09P0NBQ0hJS0xNTVBOSEhHRkZHSEhHRUNEQURGRUI8Ozo7
-Ojk3OTo5Ojw8O0A8Pzk9P0FCREZAPz06ODYzMjI0NDQ0NjU2MjMzMjQ3Nzw/P0JE
-QD89NjM0NTg4NDQwMTM3OTUyMzU2NDQ0NDMzMzc4NDMzNTQzMjEyNjc3NzQ0MzIy
-MzY1NDUyMzM2NTM2NDU5NTU1MjM3NzczMzMyNDg2Nzc3Nzg2NzY3NzQ2Nzc5OTY7
-ODo7Oz1AQEBBQkFDQUBFQkZIRkdHSExITUhGSEdHSUZHSktNTUxIQkFFSUtLTUlK
-SklPTk5KSU5MS0lLTEtMTUxOUVRWU01MTktOSEdJTE9QTlBOUExNS0hFRElPUFFU
-Tk5LUExJSklJSkhGREZCPD9BQkNCRERFQ0BAQUJAPz08QUFAQj5DQ0NERU5OUlJR
-U1ZYW1xfYGFjZWhoaWlqaGZjX15hX2NjX19hYmVoaWVkYV5eX1lbX15fXVtZWVpb
-XFtbXVpaVlleX2NhXV9fY2BcWmBiYGFkYmVfXV1dXF5dYF5gYV5ZWVteYGFgY2Vl
-Y2Foamprbmxzc3N1dnt7enp+f4CAgYKBhIKAfnp+fnt9fHh3d3Nxb29vcnBtamhn
-amlmZ2VlY2RhYV5eXVhXVVJUU1BQU1NSTkxOUFFPWFJSUE1PS05JSU1MS0hGRktM
-SklKS0xLTU5PU1ZbW19mamlpb3Bzc3Jxd3VzcHFwb2ppaWlpZ2lpZ2RjY2RiXl1Z
-V1NQT0xLSklGRkdHRkdJSUVFRUdKS0VHREVCRkdISklHR0VFR0dFRkZGR0dIR0NG
-RUhHRUZFR0dHR0VERUZHRUdISElIR0dHRkZIR0ZFR0NCRENGSElHSEhHSUlJSkpK
-SUhIR0hJSktJSklLTk5NSkdGSUlJSEpJS0tMSUhKSUtNT0xLTE9LR0tIR0ZGRkZI
-SkpNTU1OTU5NTUpJSU1NTk1RTEtPUE5NTEpLTE5LTEtNSUlHSEtKS0xJS0hKSUdH
-REJDPj5CQDs9Pjw9QDw/QkE+PDw8PDo5ODs9Pjw5Oj47Ozw8PkREQEFBOzs9QUNC
-Q0JGRUhISkxQUFJXWFhaWldZWllXWFtZWVtaXV1fXWBhYWVmamxtbWtwdHNycnN0
-dHZ4e3t6eXx+g4iMjpKUl5WZnp+hoJ6foJycmpqcnp+enJ2enZmal5eXl5qZlpOQ
-i4eGhYOBg4mKjJCUlJGRj4yEfXJmV1BNTVBPT09MTVBNTU5MSkpMS0xKTU9MTkxM
-T05QU1JRUFJTUlNRUVJPUE5PUFJTUVFOUlBQT1JQT09MTU5NVn+3xtDY3ODj5ebp
-6epOUktLT0hJSExMS01ERUVFRURJR0lGQ0NFQj5AOjw7Pj46Pjs7OTo9PDo/Ozo5
-Pj07OTs7PDk3NTc4NTM5OTg3NDc2ODY3ODU2Nzg1Nzk5Oz04Ozo5PDw7PTs8PDs8
-QDw7PDw9OTw9Pj5APzxAOz48OTo5PDs8Pj4+Ozo4Ozk6Ozw6OTs5Oj87PT89Pj89
-PD0+QUJFRkhISEhGSkhFREJCQ0A/PTs9QT8/QUtER0BAPT09RElJRUZKS0tMS01L
-S0lGRUZEQENFQ0RCPj0+QT88OjY0NDM3OTU1ODk5PDo2OTw5Nzo9Oz1DQ0RERT48
-NzYzMzc1NDI0NDUzNDIzNTQ1Ojo9QT8/PDs4NzU3OzhNNjIyMzU3NDUzNDAyNDQ2
-MjEyMzU0NDMyNDQ2ODUzMTM4NDU1MjQzNTM0NDIzMC8zMjQzNzU1NDQzMzY1ODQ0
-NjQ1NTQyNzU7ODU0Njc1MjQ1Njg4OjY4OTs+Pz5CPkBCQEE/Pz1DREZGR0ZJS0pP
-TElHQ0VHR0lFTFBTU0pFQkJIS0tPTktLT1NST0tPT1BKR0tKSk1LRUlOU1ZVUVJR
-TFFOSUdMT09OTU1PS0pKSklISUxPTE1OT05PU0tLSUxOSUdISUVCQkJEQkBEREZG
-RT9CQUNDQEFCQ0NEQUNESUlKTFBRUlZYWl1cYV9iY2JlaGdnZ2VlYWFfXmJlamJh
-ZWZkYGJkZGRkXl9gYF5gX19cWlxbWFZZXV5bW1hXV1tgXmFdXV1eYmBeXF5fX1xa
-XVxeXFtZWFlaXF9dWFdXWV1dXl1fY2RiXmJlaWxrbXJydHR1dHd4fH97fYCAhIGB
-f397fHx5dXR3dXBvbG5taWhqa2ZnZmVjZGFhXlxcX1peWVdXU1RRU1FPUU5KS1BO
-TUxOS0lNSk1MSEpERUhHRkdGRkZGRElIRUZJSElLTVBTV1ldZmtsbnJzdnp4eHd7
-eHZzdHNwcm5wbGppbWtsaWZkYl9dWlhUU1BOT0xKS0dJRUhISEhHR0lGSEpGRERG
-RklISUxHRUZGREZHR0ZGR0ZJSkhJSklHRktJSEZFRkZIRkVJR0hKSUhIRkNFR0ZJ
-SUlGR0dHR0JJRkZJR0hHRklHSklJSEtKSklGR0lLTElKT05IS0tPTVBLSkhKS0pL
-S0pKSUlLS0tMTUtKSkZGSkpHSEVHR0lLSkxKTEtMTU9NT09PTk5RUVJQTk5QUE1I
-S0xJR0pOS0tLS0lISUlKS01NSUhDRURGRj9DQD48PzxAPz4+QD9AQEFBPTo8PDw+
-Oz4/PTw7PDw7PDs8PT9EQj4/P0FBRERGRUhKSk1QUVJWUlVXV1hcYF1dWF1dW1xc
-XF9fXWBkY2RjZmprbW1ub29ucXNzdHZ5fH+Bfn5/f4GIio+PkZWZm56hoaCfoJyb
-m5ybnJuanZ6dm6OhnZmanJyZmJeTkI+Mh4WDhIGChIqOjpKWlpGNjIl/dWdYTk9O
-T09PTk1LTUxKSUtJTU1LS0xNT0pKTExQT05NTVBRU1FQUlJTVVBMTVBOUFFTUlJO
-Tk9OT09OTk5QTEtTfLjI0Njc4uTl5+np6klPUUxLR0tMRUZHQ0RERURDQkhHRUJC
-QUFCRERDQj47PDo3PEA/Pzs8Pz87ODk7Ojs8Pjs5OTg6Njo5Njo5NjY4Nzc1Njk5
-Nzc3OD04ODo8Ojk3Njk5ODg+Pz47PTw9PTw6Ojs6PDw7Pz1DPjw/PDs8Ozw6Ojo9
-PT07Ojk4Oz86ODk7Pjw+P0Y8PD09QUBAPkI/Q0VGS0hJSklDQkFCP0BCPjo7QD49
-QEA9QkZDQ0NDRUlKSkZFQ0hIRUhGRklNRUZGQ0M+PkFAPDo7PD86OTc0MjQ0MzI1
-MzczMzIxMzY4OTc4Pzk6Oz9FREZDREE7PDg2MzMxMjQyNTMxNDc1MTQ3Ojg6PkFB
-PTs2Nzo7UEs4NTQ1NzY1ODU1NzU2NjQzMjEyMTc1MzI2NDU3OTY0MzMzNDczMzQz
-NDU1NTIxNjUzMzc2ODYzMzU2NTU1ODU0NzQ0NTMvNTQ2Njg0NDQyNDI2Ojk1NjU1
-ODs+P0E+QD0/Ozo+Pj48QUZIRkhNTlBLSkpHSEhGR0VHTVFUTEVERElHSktPUU1N
-TE1ST1BPUE9IR0xLSkdISE5PUFNTUlBOT05PTU5QUU5RTE1KTUZISUtMS0tNSkpN
-T0pPTUxLTUxNS0hGR0dEQUVGRUZJSUdHSEVFRENEQkRCQ0NFR0tMT1BOU1ZYVlpc
-Xl9jaGlnZmZsa2hqZ2hlYmFhY2VkY2VlYGBgYGNkaWZkYl1fYF9cXltdW1tZVlVV
-WVlcWlhXWFxbXF1dW1hZXl5cXFhcXlpbXV9dWFhYV1dZXVhUWFlYWFpbWltfY2Nm
-ZGNhYmFka25ycm1vcXJ2d3p6e36AhYJ+e3p4eXVzcG9ubGpoaWhmZmRhYl5eX15Z
-WllZV1ZXV1RUUk9PUE9PTEpKSkpHSklHRkhISEdKSEhEREZDSUNCQUVFREVFRElF
-RkdHSEpOUVdcYGNobnBzdHd6eXt8e3p6eXZ2dHR2c3Jzcm9ub2tpZWRhX1xaWVZR
-UE9QT0tISUlLTElGSEZISklHSEhHR0ZJSEhLSEVFRkhHSEdJSEZISURHRkdJSEZH
-SEhJSEdFREVERkRGR0dIRkhHRUVFRkVESUdCRkdFRUdGR0hHRktLSUlKS0xLSkxH
-R0ZJR0hKSkpJS0xNUExLT05MT01LSUtKSklLS05LSk1NSklHSUlIR0ZER0lMSkpK
-S0pMTUxLS01LTU5MT1FOT1BOTk1NS0lKSkpMS01NSkpOTUpKTE1LSkxKSURDQ0JA
-QUNAPUBCQT48Pz0+Ojk9QUE6Oz09PD06Oj48Ozc6PDk5Pz4+P0FHRUZDQkNCRUVJ
-TFNOUlNSV1NUU1JTWVpeXVtaXFxeXV1eYGFiYmZlZGZoaGtubWpvb29wdXV2enp+
-gIGDg4OChYmMkJKRlZuenJ6fnZycnZ6cnpqbnZyamZycnZybm5uempmXlZOSkIyJ
-hoKCg4OFio6OkZOTk42OiYN5bV9WUE5PTk9OTUtKTUtLTExJSU1MTU1LSU1LTU1N
-TVBPTE9OT05RUVFQT01MUFBPTU9QUVROTlBTUE1OTUtQTFB2ucjR2N7h4ubn6Onq
-UE5QTktLSUpOTk5ORUJCQ0NCRkNFREBBQ0RDRkQ8QT46PDc4ODs9PD07Pjo5Nzo2
-Njo4Ojs+PTo7OTk2OTc6NTg5OTk4NDc3NjQ1Ojk4OTg3Nzk6ODk6Ozw+QkJBPz1A
-PTw3ODk6PDs7PT08PDo8PDs4OTo3OTk5Ojg3Ojk4OT85Nzs7QENJQTs8Oj89QUNF
-Q0FCRkZIR0hIRUVEQ0FCQDw9OTk5PEI+PkJERktLSkhJSkxJSUZERkREREREREVB
-REBAPD07Oj0+Ozo5ODk4MzMzMTU2NjE1NTQyMjQ1NzM2OTg4ODU3PEBBQkNEREA9
-OjYyMjMyNTQzMzIyNTU1Mzk4NTY7Pz8+PDk6OTk6NjUzNDQ1NTg3ODU4NzMyMjQ2
-NDQzNDI0Nzk4NzY4NTQ0NDIzNDMzMzM1MjMyMzIzNTU0MjQ1NTU0NzY2Ojk0NTQz
-NjYyMjMzNDY1NjQ0MjI2OTY3ODk3Nzc0NTk+QkFAPDw8O0E/PTxAQkJFR0tNTEhJ
-SUtKS0hIRUdLTU9KSERHSEtNT1BOS0pLTU9OUlFUU01MT09KR0xVT1JSUFJSUU5S
-UE9PTk5QTk1LS0pLSUhJTUxNTkpHRUpOTUlMS0hKSktNSUhFRURFRUhGR0lJR0dG
-R0hHSkpJTUhJS1BQUVJVV1VTV1tdXV9jZmdpaGhpZWtsa2pnZWNhYmZnYl9nZ2Zk
-ZV5eY2VlY2VkYWBeXV1cXV5ZVlZWVlFTVlpaWlZWWFpYWVlbWllbXlpZWlhWV1pb
-WlpZWFdXWFpaW1xcW1tZXVlWVlpfZWZiYmBeX2FjZGpwcG1ucXN1d3p7fHx+fnh6
-d3d4dG5ua2hpZ2VjYmBeXl9eXFlZWF1XV1dUUlRST09OUlFOTU5KSkdISEhGREhI
-RUNFQkJCQ0JHRklNQD49QUJCREFAQ0hHSEtOTU1RWVxiZWxxcXZ2eXx7foB+foB8
-fHl2dnRzc3Nxc3Bsa2prZGNeX1tXVFZSUE1OS0hISUZLSUdHR0pJSEhHRkdHRUVE
-SEdGSUZGSUVHRkhISkhHR0hISUlJSEhGSEpIR0dGR0ZFQ0RGRkZHR0hHRkVERkdF
-SERBREZIRkdHR0ZISUhHSEVIR0dHSElKS0lHRkZKSUtMTU1OTU9KSkxNTU1OTElJ
-TEpJS0pJS0pKSkhISUdFRURHSktLTUxJS05OS0tLT0xKTE1PTlFOT1BPUE1LTExL
-TUxNTUtMTE1MS0xMSkxJRUlGRUNAQEA/QEBAP0FCQEE/PTw8PD07PD45Ojw9Pj48
-Ozg5Ojk6OTs9QUBDREJDQUJFR0lIS01VVVBRVFRUV1JTVVhWV1xcWllZW15hYGBf
-YGFjY2NnaGlqb3BwcW1udHV2dXp8f36BgoSDhYSJipGQkpeZmpudn5ycmpyenZ+c
-mZqamZeYmpiYmZqYmZqYmZSVkI+LiYaFh4WDgoSGjYyTk5KRk5GNh35yY1dQTk1K
-TE9NT0xOTEtLSkpNTE1NSExMS0xLTE1OT09OT01QU1FRUlFQUlBRT09QTU5OUU1N
-UFBPTUtKSlBRVHm3yNDY3eHj5efo6ulJTkxMSUpLT01LS0pLR0RCRUtFQ0NEQ0RB
-RENAQEQ9PDg4Ozc2Nzg7PDk8Oz09PDo2NjM3Ozo8ODg5Ojk4OTM5Njc3PDg4OTc2
-Njk5Ojg2Ojk6OD05ODg8QEFERUVCQj0/PT08Pj4/QD89PT5APTw+Pjo1Nzc5OTk7
-Ojo5OTg4Nzk6OTk5OD46Pj9AQEJCSEREREdHRklIR0lGQkJBSDs7PDg8Ozs7QkRE
-R0dIRUdFS0pJR0dHRkdERUVCPz8+QUI9Qj0+PTo6Ojk/Ozk3OTY2NTQ1NjQ1NTc1
-ODc1NDM1MzU1MzM0Nzg4PEFDQkBAPj05OTg1NDUzMzMzMzEzNTYyMzY5Nzo+Pjk8
-Ozg3OTU4OTc1NTUxMzU4ODczMzc5MzM1MjEvMTM0NjYzNzQ0MzI0NDU0MjU1MjAx
-MjUzNTIyMzY0MTM1MzM0NjY1MzI0MTQ3NTM0NTYzNTc4NTM1Mzc4ODc2ODc2Nzk4
-NztAPz06Nzk6PTs+QEFEQkVGSUpLSUpLSkxIS0pISElKSEVEQkRGR0lMTU1ISkdK
-S05QTlBPUVBQTUtJWFpRUFJUV1RQSkxPTk5RT0xOS0lKSUpJTk1PTkxJR0dGTU9R
-S0dLSUpLSEhJR0dFRkdJSUZHSEdGRkhJTExNUFJNT09TVVdZXVxaXlxZW11gZmZk
-YWJiaWhrZmhsaGdjYmVkZmVjZWNgYmRoY2BjZmRmY2NjYWFhYFpYWFRPUVVVWFRV
-UVJUV1VYWVZWWVlbWVhbVlVUU1NUVFRWWFdYWFRWW1dWVldXWFlaWFVWV1hdX11g
-YWNjYmRiZWloaWtvcnR2d3p4eXd1c3V2dXFtamtmZWZhYmNfY1tdXlpYWFVTVlRQ
-TlFQT05NTE1MTEtIR0hKR0hHREdGQkJBQUFCQD5ARUNCQUBDQkE/PkFBQ0VFR0pM
-TVBTUlhcZGlrbXJ1eHuAgYJ/gYOAgIKCg316d3VydnVycG9pZ2ZkYF1cWVNQTE5N
-UE1MTUtJSklMSUlGSUlGRkhJSElLSUdHSEdHSkhIR0dGRkhKR0dGSEtISUpJSUdH
-SEVEQkVHRkVGREhJSUhJRUNFQkRHR0tIQkJHSUZGRkVHRUdGR0VFR0hIR0hFR0ZK
-SURERkpLRklMTk1MTk1LSUpJTExNT0tNSktKSUpKS0pHSEdGRkZER0lMS01LTE9M
-S01PTUtMTExPUE9NUk9OT09NTk5LTE9MS0xLTE5MTk9LTktLSEdIR0tIREBAPDo/
-QEE8PkI/QD48Ozs7OTg6Pj9AQEJAOz49PTw9PztDQT08QEJAQENFRkZISkxNUFlT
-UVBQU1NXVFVVVlhYWVlYXF9gYmJmaGRkZGVnZ2hoaWptb29ycnF0dXd6fH1+gYGE
-hoaJjY+OkJGVl5qXmpmXmZ2dnJucnZqal5ibmJeYmJaZmZeZmJeYlpOTj42PioiH
-hIKAgoeLjI6OjY+Rj4iDfXFlV1NQTU5MS0xOUk5NSkpMTUxMTk1NSUpMS05OTk5O
-UFBOT1BQUVBSU1FRUk5QUE1PTE9NT0xOTk5NTU5QUFFTfbPJ0Njd4ePk5+jq6kpP
-UkxMTE1KRkVJSUdGRUZEQ0NIRkNFREZEQUBBPkA9Ojs4OD47Ojs5Njo7Pj46ODw7
-OTo5ODk4ODk6OkA5Pjk7Njc3OTk6NjU0NTg4ODU4PTs8Ojo9Pj08PD1DRENBPkBA
-PTw9PkRBQj5CQz47PTs8Ozs5Ojo5ODg5ODo6Ozs6Ojs6ODg6Ozs+QUFCQURFRUND
-Q0FFRUNERkRCPj09QT45Oj1BPkFGTUtPTE9HQkFCRkVFR0hHSERFRkZCQjw/RDw5
-OTc6Ozo7OTw4NzU0NDY1NDM1NTY1MzU0NjI0MjIzNDQ0MzU4Nzg6Ojo6Oj88O0A+
-PTY2NjU0NjQ0NDQ1NDU0NjY1PTg8Ozo8NjU3ODg2NjY4NTQ0NzMzNzc4OTk0NDYz
-MjEyMTM1MzI0NTUzNDQ1NTQ1MjMzMTEyNDUyMzU2OTc3NjU2NTQ1NTM1MzA0NTU0
-MzU1NDI3NTc2NDU2MjU3Ozo3NTg2Njg6OTo9Ojg3OTw9PkNDQkJESElGR0dIS0pN
-UEtITElJSElHRkhGR0dJTU5NT0xNSUhISktMTVBRTktKSVBNSVBQU1VUTk5MSkpN
-TU9NSUpLSklJTU9SVFFLRkZGR0pNUUxKR0hIREdJTUtJSkpHR0lJSEhIS0tISUpN
-T05RV1NTVVdaXFxdW11fXV1eX2RjY15fYWJiZmZmZ2VkZWRlZWRlZGRoZmNpZmdl
-Y2NkZWZiYF5bYWBcW1dWVlJTVFVVUVZSUVJUV1dYVFFSWlhXVlpUVlRRTVJWVlNW
-WFJUUFFVU1FQUE9PVFZTUlNXVlNVWlpeYGFhXmJmZ2poam9yc3JxcnV1dHZ0c3Bt
-bm1rZGBhY2NeXFtaW1tZVllXWFVST1BRUFBQTUxHSkhHSEpERkdGR0NFQkBAQEFA
-RERAPj1APj0/PT09PD9CQEFDRkdJTFFTVVpfXmNobG9zeHt8foCChYaFhoKCgIGA
-fXl3eHRydXJub3BsZ2NhXlhVVFJSUE5NT05NTEpJSEhKS0hHSElHSElKS0hJR0VI
-SEdHRkVJSkpHSEhHR0ZGREhJTkpHRkRERUdHRUVGR0dGRUdJSEVFRUVHSUdHRUhK
-RUNGSkZFSEdIRkVERkVGRkRGQERGSEhHRkZHSk1MTk5LTE1NTElKSUZJSktMS01Q
-S0pJR0lKR0pIR0dFRUZITEhKS0xOTU5NS05PTk5PT1BOTktMTk1MTE9OUE9NTE9R
-TUpKTk1MTUtLTk1LSUtJQ0FBRUJBPz5AQkFAQT49PTk5PDw8PDo6OzxNQkE/Pz4+
-QEBAPz87PkBAREVERklISkxLTk9TUlNUUVNSUVRXWFhZWlpcXF9gY2JhY2NnZ2Vn
-ZmdpaWpubG1tb3F1dXV2e32BgYSGh4qLjIqMjI6Ok5ecmpeXlpSXnJ2dmpiZmZiX
-l5aVkpOTlZiZmJWVk5CRko+Mjo2Kh4WEgYOEg4mLi4yPjIyKiYJ4b2dbV05OTEpN
-TUxLS0lLTU1LTExOTlFOS0pOT09OUU5QTlBQT1BRUFBOTVBRVFNTUE9NTU1MS0hL
-TVZQTU1OTE58tsfR2d3g4+bp6OrrTEtOTEpNT0xKSUZAQkNDREJEREJFRENHRUNB
-QTs8Pzc8Pj4/QTw6Ojo5PD5MPDs5OTs8Pzo4ODY2Ojw5Ozg6NzY2MzIzNjc6OjY6
-NjY7Njc3PDw3PzxCPD05OkA/QENCPTo6Ozs8PT5BRD88PDs7Pj08OjxAQj07OTo6
-Ojo9Ojo7Ozw/Pzg6PD9CQ0FCRkZHQ0FDREFEREBAQUA9PD08PD9AREFGSU9ST0pI
-SkhHQUBDQ0RBQkBAPkFARj09Nzs/Ojo6Oj47PDs4NTg2NzY1NzY2NjMyMzI2OTQ1
-MjI1MzM0NDs2NDIzMjQ1NzY4OTxBQEJCPzg0MzU3NDMzNTU1NzUyNTc0NDk7PD48
-ODM0NTs4NzY2Nzc0NTY3ODc4ODY4Nzc0NTI0OzQxMjM0NDQyMjQzMTI0NDQ2NTMz
-NDU0NTs1NjU2NzM6MzQ2NTc2NDc1NjU4Ozo5NzY2ODc3MzQ0MjQ1NTY5NzY2Njg8
-Ozw8Ojs5PEBAP0JBQ0RISUdIR0ZLS1BPSkpLSkhGSEdHSElJRkpOUU9OUE5JSkdE
-RUlNUE5MTUpJREdNUlRVVFFQTElJR0tLTU1NUE5NTUpNTlFUTkVHRUdKTE5NTExL
-SUhIS01NTEtISkpLS05NTk1OTUtNTVFTU1RUWFlaW15fYWRgYWNfXmFkYmJfXV9j
-Y2VkZGNnaGViYWFiZWVmYmNjYWRmZmZmZ2VnZWBdXF5cW1tbX1pZVVZZWFVVVVJT
-VFNSVVZWUVFUVFJRVVNPUVJQT1BTUVBUUlJSUE9QUVNQTk1NUlhXVFdWVlRXV1lb
-WlpgX2JoamhqaGhsbGxvcG9xcHBwbGtsaGZiY2NiYV5dXFxbWVlYVlFRUE5OTk1O
-UFBOTUtHSUlJR0lHQ0VDQD4+PT9BPj09QUA7Ojo7PTxBPTs+PkJHREVHSk5RVVhZ
-XGVmaGxydXZ6fH9+goOFhoaBf4B9fX57eXV3dXR3dnJxbWZnZ2JcWlRSUE5PUEtL
-SUlISElJSEdJSklKSkpJSkpJSUhIRkdJSUZGRkRERklIRkRFRUNERkZJTUdKR0RF
-RUZGR0pJR0dJSUdJSUVGS0xFQkNER0hIRkZJSEZFR0hGREhISUlJRkdGR0ZFSEtK
-SktNSUpOTUtMTk1OS0tJSUdJSElJS0pKSElHSEhJSEZGR0lLTUpMSklKTU5NT01L
-TE1OTE5NTk1OTUxLS0xMS0xLSk1NS0tMTExNSUtLTktMSkpJRkRDQT4/Pj5BQD9C
-Q0BAPj87PDk9PD09PD48QD46PDs6PkFCPTw/QUNEQERFSkhLT1BMTE5QUE1OUVVW
-V1dXVlhbXFxcXV1dX2BiZGFkYmZnZ2hnaGttb3BzcHFxdXZ1dnh8foWGiIeGh4qI
-iYuOj5GUlJiYl5iVm52ZmJial5mZl5STkpKTkZKVlpWTlZaXkIyOjYqKiYaFgoGD
-gYSHh4iKjIyIiomFgn1xZFlRTUxMS0pLSU1NTEtJSUtMTEpNTk1MTUxLTk9MUUxL
-T1NQT01PT09PT1FOT1BOT05NSkxMTk1TT0pIRkdNUHi0xtDY3eDj5ufo6epRTU5K
-SEhISkpFQ0NEQ0VAQUdGSERFRENBQUNCQ0A8P0JBPEE8PDs9Pz46OUNAOzs5OTk2
-OTg1Nzc3Ojk7Ojc1NDY2NzU1OTo3NTY2Njk3Njc5Ojo6Ojs7Ojw8O0FAQENEPj47
-OTo8PDs9PTw/PT5DPj46OkBKOTs8Ozw6OjxAOjw8PDw+Ozk9P0FDQ0JFRkU/RUZH
-QUE9QD8+Pj49Ozw9QkZHSElOTU9KSEZHSUY+P0BBQEE8PDw9Ojc6OTs6Ozs7PDo4
-NTY2ODc1NjY2MzM0MjY6NDQ0NTU0ODM1ODg8NjY1MjU0MjIzMzIyNjk7PD1DQ0RB
-NzU0NDU1NDQ0NDc3NDg0Njk4ODw+PDo4NjY1Njg2NjY4Nzc2MzQ3ODU3NTU2ODQz
-MzY4NTY0NzUzMzEyMzIyMDI0NjUzMDIxNTY1NTY1ODY1Njg3NjQ1NjU3NDQwNTU0
-Ojk8NjU0NDg1NDU1NTUzODc4Njg4Njc4Ojo6Ozk6Oz5BQT9ARUhJR0hCQ0hJS0pG
-SklISEZJSUlNTU1KTFFRVlBOTUlLR0lGRklNUVNRTkhER0lTV1hTT0xJSEpNTE5O
-TU5QUVFPUE5NUk9LRkhISUxMTUtNT0xLSkpISkpLS0xKS01MTUtNTU1OUFJTV1VV
-VlpfX19lZGJiZGVhYV5eX11hYV9iYWJhZGRlZGJhX2BhY2JjY2JgZGVgYmNlZ2dl
-Z2VfYV1cXFpZWFlXV1RXVlpXV1NSUlNOT09OUVBRTUxSUUxPUk9RUU5NTVFQUVFP
-T05MTE9OTExNTkxLTU9RUlFPUlNYV1hbWVlYV19lYmNlY2lpbW9vbWxsb21ubmtl
-ZGJgYF5hXVpZWFZXV1VTUU5OS0xOS01NSkhGR0VESUZGRUdHRUE7Ojs8Pj9CPTs+
-Qj0+Ojs+PT0+QEJARUVHSk5RU1ZZX2FmaW1vcHZ6fX1/gICCg4eEgIKCg4B+fX99
-fHl1dnZycWxqamhjX1xYVVFQTUxNTUxJSUhMS0hKSkpKSUdHS0lLSEdKSkdKSkhK
-SkhER0hHSUZIRkdFRUdGR0hISEZKR0hJRUhISElJR0RERUdKR0ZFSEhEREVER0hK
-R0ZJSUdFRkNDRUdJSkhKSUlHSExJSkhIS0pLSk1MSklISktOSUtHSElIRkZIS0hH
-RkZER0RESUhJSkxPTElKSElITUtLTE5OUExNSkxLTE1LTEtKSklKSktLSk5LTElK
-TE5MS01MS0tMTExIQEJAQEFDP0FCQEFAPz0+QUA7PDs8OTo5QUE7PT8+Ojs9PT1B
-QD5FREJGR0dHTExOT1FUV1ZTUlFQTlRWW1hYVlVbX1xaX2BeYGNhY2RmZmhoaW1s
-bW5scHFyc3J1d3d6e3yAgoSHhouHiIyNi46QkZKXlZmanJuYl5aWlpWTkpSSkpKQ
-j46OkpKTkI6OkJCOi4qLjIuIg4KBfnx+gIGDhoiJioqGiIN/enBjWFBMTUxLSUtL
-S01MSkxJSktKTEtLTUtNUE5NTk5PUlBPT09PT09RUlJPT1BRTlJQT0xMSUxNS05O
-S01KR0pRd7PGz9fd4ePm6Onp605MSU1LSkhKSUtISERFS01GRUVIQ0I/REVDQ0M9
-RENFRENDQUA7ODc2OTo+Rjs9PDk6OTs8PDc8ODQ2Nzg6Njo4Ojc2NjY5ODk5ODg4
-Ojw8Ojc5Ojo8PDw9OT07P0A/P0A+Ozo7OTw7Ojo8Ozo5QEM8Ozk4OTo7PDk6PDk6
-OTw+P0A+PkA7P0E/P0JGRURGQkRCRkhAPUA9Qz5AQDw9QEFGR0hJTUpMS0hFRUVE
-REBAPj88Ojo5Ojk3OTg2ODc+NjY1MjQzNjk6ODo5Ozc3NDUzNjU1NDU5OjU0NDc5
-NTU0MzM0MjU0MjQ2MjI1Njg7Ojs8PkA9PDYzNDQ0NTQ0NTUzMzA0NTU2PTw+Ojo4
-OTcyNDQ1NTU2OTY2NDM0NTU2NzU1NjM0NDM1NjMyMC8yNjUyMzAyMzUyMTUzMTYz
-MzIzNjc2Njk1NzU3Njg3NzY1NDQyNjI0NDU3NjI1Nzo3NzUzNTc4Nzk3Nzg1ODY4
-Oz08PTs4PUJAQj9DRkVISUJDRkdKS0lIRklISEpHTE5NTUtJTU9QTklLSUpLS0tL
-TlFVU1NPSURHTVBQUVFNTklGS0tLT1FQUFJQUE9QT01OTElNSEtKTExKTE5KTU1J
-SktLSUxNSktPTUxNTE1PUFVUVFZTUFhZWl9kYmRkYWJlZV9dX19fX15gYmVmY2Rj
-Y2FhY2BeX2JiYVxgYWFhaF9iZmZlamlnZmVhW1pYWVdXVVRSU1JTV1dTVFRRT09P
-T05PUE5OTlFUT1BUVE5NS0tOTlBLTExLTE9OTkxMTE1OTUpJTExOT1BQT1FUVldX
-VlRVWF5gXmFkaGhpbWxtamlqam5raGViX15dW1xcWVZVWVdUT1NPUU1KSk1KSkhJ
-SUdGRkVFRkVCQUFAQUA9PDw8QEE+Q0M+QUA8PT1CQkJDQ0ZER0hMUlVZXmBkaGtu
-cXN3dnyAgIKChIWCgYKCgoSBgoCAfoB9fnp2b3Bvb2lnZWBfXFhWVVFNTElJR0lK
-SktJSUdGRkdISUlISUtKR0dIR0VISEhFSEpKS0hGR0dHR0dHR0dHSEhHSEhIR0tK
-RUVGR0dHSElISEhMSklKSEZHRUhHSEpIR0dKS0lJRkRER0hISUdHRkVGSUlLTEpK
-S0xMS0tISUhHTUpKS0pISEpIR0dISUpJR0VGSkhISExNTE5KR0xITEpKTU1RTkxL
-TEtMSktLS0pISUtMS01LSEtOTFBPTU5KTE9NTk1JTEtOS0lHQ0JBQUA/Pj9AQT8+
-Pjs8PkRDQUJCPj09QT88Q0NFQkFDREJFR0VIS0hMTUpMT1RTT1JSUFNWVVNRVVZV
-V1hYWFtdW1xeYGNjZWVlaGdoa25qamttbnBxc3Nyc3V4eHh7e3x+g4GGiIiIiomP
-j5STlJeXmpibmpWTk5OVk5GPkZKQjoyJhouMjYuNjYqGi4yRiYiIh4OCgYSCfH5+
-gYOEg4eKioSBgXx4cGRYT09QT01MS0lLSkxNTlBJTEpMSktLTlBPTk1NS05PT09N
-T1NTUlJUUVBST1FRT1BQTktKS05PTE1LTE1LVlV7tcfQ2Nzg5OXn6OrqTEtOTU5L
-S0lLTEtHTEtJTUlGQ0VFQ0NCQ0JBPj9CRkBCREE9Ojs2Ojs8OTU4OTo5Oj08Ozo5
-OTg2NTU1Njc2Nzk4NjQ2ODg6Ojg4Njo4OTo6ODo9PT08Oz07Ojo8Pjw8PT08OTs6
-Ojs8PTs6ODk6PDw5Nzg2ODo5OTpAQDk5OzxAQENFRERGQUNBRERBP0BBQ0VCQ0FB
-QkE+Pjw9QkJBRkdJS0xLTE5KR0RDQj0+Ozo5Njg6ODw5Nzc5ODU1OTY2NTk2NTg4
-ODo6Nzc7MzQ0Njc3Njk7PTo5Njg7OTc4Nzg4NjU1NDM1NzU2NTY3Njg6OztCQ0M8
-ODYzMzU1MjQ2NDMzMzUzNDc6PDs7OTc4ODk1NTM1NDQyNTYzMzQ2NTczMzMxMzU0
-MjEwMTAxMjMzMjQzNDY4NTU5NjIzNDQzNDM0NTM4NjY2OTc2NDU6Ojk2NDY3NDI2
-NjY1NTY3NzU2NjQ1NTU5OTg5OTk5ODg7Oz8/PDw9Pj0/QD9CRklJRUZHSElFSEdK
-TEpGSUhLUk9LSElHSU9KSkdGRUZJTExNTE5RUVFNSUdNTVFQTktMTEhKSUpRU1BP
-TU1NTE5QTEpKSklLSkpKSUpMSEdISEpISElKSkxOSkxPUk9NS01SVlZVV1hVWV1e
-YGNiZmdiYmZnYmRiY2BfYGNjZWVjY2ZjY2JhY2BeYWJfV1thYWNmZWVnZmlqbWxo
-Z2FdWllZWFdXVVFTUU9OUlFRT0pLS1BQUFFOS0xKS01RUlJQUVJSTkxNT0pMTkxM
-S0xPTUtJSEtKS1BNSk5RTk9RT1FSTlJSVFZYVlxfXl9jZWZoZ2llZGVoaGhmYmBe
-WVpYW1hWUlFVVlBQT1BPUVFOS0hHRERJSEZERUNEQkZCPT5AQj89Pjo4Pzs8QT9A
-Q0NEQ0NCRUVIRklMT1BUWV9jY2lucnR1dnl7foGAgYGBgoGDgoGBgICAf356fHl6
-d3BycG1samZhX1tWVlRSUE1MTUhKR0dKS0pLSkpKR0ZISEhKSUVHRUhISUdKSUpI
-R0ZJSUhHR0lIRkpFRURDRUVBRElJSUhERUlJREZIR0ZGSUtLS01NS0hISkpMR0dH
-R0dJR0lHR0hJSUlHRUdHSUtJSklNS0tJS0xKR0ZKSkpKSkpQS01NTkdHSEdFR0hI
-RURERUZJS0lHSkdKS0pKSkpNTk5OTkpJS01MSkxLSkpKS0xNTE1OTklLTk9QT05O
-TU5PTk1NTFBLS0pDRUA/QUE8Pj06PT4+Pj4/Q0E+Pj9AQkI/QT8/Pz9BRUdHSEZK
-SklMTk5PT01RU1RXVVJRUFpXVlZWVlVZWl5fWlxdXWBiZGRlaGhoaGpqbXBrbWxv
-cnByc3JycHV4enp6eXt/foOEhoiKjIyRlJaUk5OXlpWVlJWUkZSYlY6NjoyLh4SH
-iI2NjIuMi4uKi4uKioqGg4KDgn1+fn6BgoGBf4CBgnx6e3VtYldPTktNTUxOTUpI
-TEtMT09MTUxLS01PT09MS0tNTk1NS05RTlFRU1BRUk9RUlFOUE1NSE1NTUxLS01M
-TUtOU4O4xtDY3ODj5ufn6elLTUtKSUlHSEpNUUxFR0hKSEdGQ0JHSUdCQkRBQkVC
-QUBAQEA+PT88OTg9Ojc3NTU2NDk5ODc4ODg0Mjc5NjU0NTU4Njo3Njg1Njs5Ojo4
-OTs9PUJDOzs8PT87Oz0+P0A9Ozo7OTs8Ozo6PDk5Ozs6OTo8PTo4ODs+PDs7Ozw7
-Ojo6P0RGSEdJSEhEQ0JBQUJDRENCQkFCPkA+PUBCQUZGRkdKS0lJS0pLSD8/QDs/
-OTc6ODlBNTY5Ozs6ODc1ODc7Ojs3OT86PDw5OTc5Nzg8Ojo9QEJGRkNGQkNCQEJB
-QD48Ozo2ODg4ODo4NzY4Oz47PUBAQUA8OjYzMzQ4NDQ0MzQ1MzQ1ODc6PDs4OTU5
-Ozo1NDY2NTg0NTQzNTc1NjY0NDU2NDM0MjMyMzMxNDAzNDM3OjU4Njs3NDMwMDE1
-NTU0NTQ3NTU3NDQ1OTU2NTc7PzY0NTQ3NjU1Nzc2ODg0Nzc0OTY3ODc0NTU3ODs8
-ODY5Ozs9PkA/QUBERERFR0lMS0lJSktLR0hHRkpKSUpJSExQTklKSkhLR0ZIS0hI
-RklNS0xLSktMTk5NTExOTUxLSklNUE1OT0xNTk5PS0pKSUpLTEpJSUpLS0pJSktI
-SUhJSUxNT05NT0pLTVBTU1VWW1pcYGJjZmVjZGJjZGRmY2FgY2JhZWVlZ2ZraGZj
-YV9dYF9hYVxZXGNhY2RlaGlpbm1qa2ZjX1xZWVVUVFFSU1BOTVBQUE9OUUtJSk1P
-T09NUExLTE1PUU1PUVNQTUtOTk1LS0tLS0pJSEdISk5NTU5MTU9OTkpNUFBQUVNR
-UVJTVVhbXl5iYmNiYmNiYGFhYWFfXlpZWFlXVldWU1NTVFFRTk1OT0xLSERDSEdF
-R0hEQkNHREVDQ0RERUVFQz08QENDRkhERUZHSUhMUExMUE9TVVxjZ2ZrbG90eXx5
-fXuDgICAgX+Af4GEgHyBgH19fHh5eXh0c3JtbWxnY2JdW1lVUVFPS01JRUdKSEhH
-TEpHSUdKVk1ISUdHSk1JRkVJSUlJSENERkZGSEhIRkZITEhKR0dGRkhJSUhJRkVD
-RUZJSkdIRUlHRUlMSktLS0tJSUhHSEpISUhHRkdHSUdGRUZFRklJS0pJS0tNSkdK
-SUhGSEhHR0ZKSUhKSUtMSUlHRkZFSkhHRUhFR0lMS01NTU9NTExKSkxOTE5NTkpJ
-S05NSUtLTU5LS05MSkxLSEtNTUtJS01NTE9QT01OTk1LS0lFQTs8P0FAP0JAPjw/
-QkQ/PT09Pjw9QUFDRkI/RUZGRkdISkhMT1FWU1JQUVFVVVNSVVVVUVZXVVZYWVhY
-WFtdXl5fYV9iY2VnampoaWlsbG1tbGtqbW1uc3R1c3V4fHp8e31+gIOFio2NkJOV
-lpmXmJSUlpaUkpSSkpKPjIqLiYeFgoWGiIeJh4aHiYmJioiGhYJ+gYJ+fnx9fHt+
-fX18fnx9e3VzcGpiVk9MS0tMTEtKS0pITEpLVE1MTE1LSktMTExLTVBNTFBOTE9Q
-TVFNTk9RUVNQT05PT09QT01MSklNTk9OT1FRf7nG0Nfb4ePl5+fo6UxKSktJSURE
-RkhJS0lFRUtLREU/QEdFSUJCQUVEQ0U8OjxAPT48Pzs7OzpAPDk1NTg5Njk2ODs8
-OTQ1Nzc3NTY2Njk6ODc1OTY3Ojs7ODk1ODs6Oz4+Pj87PTw7PDs+QD49Ojo6OTs7
-Ojg5ODs8ODY4OT06ODg5OTs7Ojo9Ojw9PD9BREZHRklLSEZCQkJAQ0JEQ0FAQUBE
-REJERUVIRUZHR0pJTUtIRkVCPjw7Pj07PDo1ODg4ODc5ODo4NzU2Ojk5NzU5Nzs6
-Ozw6OTo9PUA+Q0VJSktISUZIRUhKRUZHRkREQT1APz5AOz1AQD89QEJJQkRDQ0E6
-Nzg0NDU1NDM1NDQ0NDU1NTU7PTYzNjQ3NjQ2NTEzNDM0MjY2Nzc0NDQ1MzM0MjU0
-Njc4NzU1NjY2Nzg1MjQ0ODU1Njc4NjY1MjQ1NDIxNzg3NzM0ODU1NTY1NjY2Njc3
-Ojs5OTo8OTo5OD05ODs6ODg4ODg4Nzo5OTs9PDs8P0JCQUVHRENHSUlJR0xLS09L
-TU5KSEZHSEVNTE1PSEpJSUhGRkhIRkVGRkdISEdJSElKTUxLSklJSklJS0xMTE9J
-SktLTE9OS0tJS0pISUhHR0hIRklHSElJSUhKT01OS0lLSk1QV1hSU1tcXV1cXmJj
-Y2NlZF9hZmJfYGVnaWRpZ2ZnaWdmZWVlZF9gXmFjW1pbX2RjYGJjY2drbGlmZ2Nk
-XlxZVU9RUFBPTEtLSUpMSEtNTkxJRkhLT01OTk9MTE1LSUtKS01LS0lKTEpJSktI
-R0RGREVGR0ZJSkxLSEVGSUhLT01PUlJQS01RU1pbW19dYGthY2JeXV9bW1xbV1VX
-VVRWVldXVVRTU1JSUU9NTElJSEtGQUVFQ0JDRERDQ0JGRkNDRURESExLSEpJS0tL
-TkxQVFFQVFRWWFxdX2VobG5xdXV4fHt8f4SBgIGBgoGBg4F+e3x/fH1+enx9fHlz
-cW5qaGloY11bW1dTUE5NTUpGRERFRkVHSUpLTE1LSUdGR0ZKT0tJSEdLSElFR0dG
-R0VISEZLRkpJR0dHSEhISkpKTUtIRkRJS0pKSUdJR0VER0pKSU5JSklHRkVJSEdF
-RkdHR0pKR0FCSUdGR0lGSEhLTUtLTUlIS0xMSkhJRklHSUhMS0tLR0dGSElHRUZH
-R0ZFSEhHSkxOTEpLT0tMTU1MSUlLTUlOTk5OT1BPTkxJS0xNSUpJSEtOTklKTE1K
-TFBMTU9NSkxLSEdFR0M8Pz9APT88PkA/Q0JBPTw/QUFAQkJFRkNHRklKSlFQTU1P
-UlJSVFRYV1VUVVdUV1VXV1hYWFdYWllWWFpcXl5hY2VkZmhpaWhlZmhoaGhnaGtn
-aGxvcXF0eHV1dnt3d3p+f4OHiIuMkZGUlJeUk5GUkJCPjpCSkI6LjIuJhISHiYaE
-hIiGhoaHjoqFhISDf318fn99enl4dnd2d3l3enl4d3VuaF9WT09NTExLS0tISktK
-TEtMS0hJTEtJSkpLTU1PTUpKSVBPT05RT09PUFNPT05PUE5PT1BOTk9MTEtNT09P
-TFR/tsbQ19zg4+bm6enpS0pJS0dHR0hKTU5PTUdIT0tFRERFR01PRkRAQUJFRUpC
-PD5CQT89PDs6OUE2NTI3NTU3Oj09Ozw7NjY3PDk4NjYzNTk7Ozc5OTk8Ozo8PT05
-ODk+RUM8OTs7Ozw5Oj8+PTs5ODc2Ozs7Oj04Ojo9Ozw8Nzo8Ojc5OTU7PDo9PkI9
-QEJFSUdHRkZFRERBQkFAPT08PEBAPD9CR0VKTkxGR0dHR0tMTElHQkA+PDw7OTc7
-OTo2NjU2MjY5NzY3ODc3ODo7OTU4PTc6PDo8Nzw/QkdISktKSEdHTkpMTUpKSUhG
-RUVBQ0REPjw8QUVHSEdERkdHRUlGRD49PDo2Nzk1NTcyNDQ0MzE1NjpFNzU1NjQ1
-ODM1NTI1MzU4OTo5NzUzNTY3OjczMjM3NTU1NjU5MzI1NDY4OTg2OTg4ODY2ODc3
-NjY3NDQ0NjQ1Nzc0NDU2ODU3Nzc6ODs7OTs9Ozw9Oz0+PTk7Oj48PT06OTw9Ozs8
-PT09QD8/Q0JGR0hCRERERkdHSEpOTkxVTklIRkZFRklKTEpLTElISUZFRERHSEZJ
-SEZHSUdITEpJSUhIR0hGSkpLSUpNTU1KS0xKSkhITEpHRkdHSk1NR0dHR0JFSUlJ
-SkpPUk1NSUpOT1FUVlVVXF9eXmBiZGJjY2JkYWJjYWRnZ2VnaWhoZWdpZ2VkZWZj
-X1xeZGVbWVxeY2JhY2RkZGlqZmNiYl1aXFlVU09PUU5MS0pISEdHRkpJTEpJSUpL
-TElLTUlMTExKR0ZJSEdHSEpISElISUlHSEZGR0dJSEVFS0hGSElFREVGSUpMT01O
-TlBRWFhXV1lYXFpaX2NdW1lYV1dWVlRVVVVUV1xbV1ZXVFlSUU9NSkxLSEVERUVE
-RUdGRkVFR0dJSklJSUtLTE5PT09UVVJQUVRWWFZWVlxdXmFmZmxtcnd4ent/fn6A
-f36Afnx+gIB/fn58e3t5enl5eXp4c25uamhkZWRfWlhSUlFRTUxJRkdHSkpLSEdJ
-TEhJR0dJRkdHR0hGRkdIR0ZGREdESElJS0hISEhKRkVFRkhISUhHSktKSUhKSUZG
-SUlKR0dFRkZHRkhHR0lJR0hLSEhIR0hJSUtKSEZHSkpJS0hIS0hJSUhHR0xMTEpJ
-TUpKS0tIR0dIR0hIRUVGRkVGSkpGRkhHSE1JSkxMTE1QT09LS0pLS01QTEpMSk1P
-Tk9PUE9NTU5KSkpLT0xJSUpNTUtLTU1OTUxMTE5OTU5JRkRFQ0RAQEFBP0BAQT9G
-QkJAP0BCPkFDREVJSUtMTU9TU1JSTlNUVFVVVVRWVFdYVldVW1lXWFpZV1dbW1pc
-Xl9iYV9hYmFiZGZkaGZlZWJjZGNlZmtraWprbnBxcnJzeHZ2dXt7f4SHio2QlJOT
-k5KRjo6Nj46Oj4yNi4iKi4mGh4WGhYOChIODg4GDhYGDgoOAfX19e3l2dXNxcHFz
-dXh3dXl4dW9pX1BMT1BMTUpISUtOS0pNTExMTU1MTkxJSUtNTk1OUk9OTU9QUVBR
-UlNPT05OT1FSU1FRT09MTk5PTU5QTk1NU4G2x9HY3eDj5ujo6upLR0RHSkhNTEdO
-TkxLSUxLSkhKS0lFQkBHSkZEQ0dFRUVEQkI/PD8+Pj84ODk4ODg4PDk3OTk6OTw7
-Njk6ODc4OjY3Oj09OTk5OTk4Nzk3QEA6PDlBOzo5O0A8PT49PD1FPz5APzw3OTw8
-OTU3ODs8PTo3OTk3ODo7Ozs7PD8/QENFSUlKS0ZFRUVDQUhFQEE9Pzs/PDo7PD1C
-RUlISktHR0dGSElHRkZBOzs9QTo5Njk2OTU1OTg5ODQ1Njc3Nzc4Nzk7Ozw3ODo9
-Pz8/Pz9HS0xOT1BLSUhISUtRUE1KTkpDR0RBQkVFQD5BREdJRkZISUxNTEhHREQ/
-Pjs5ODs9PDs4NzMzNjY4OTo4NzQ4ODk0NTU1NTQ0NDY3OTo5ODc2RDg3Ojc1NDQ3
-NzY0MjQ0Nzk6OTc5ODg7PTs7PDk4OTc6ODY1Njg0Njc3ODg3ODk5Ojk7PTw9QEBA
-QD9AQkNEP0E9Pj5AQUFCQkBBQEFDPkJAP0FBQEE/QkJERURBRUVFRUdJSUhISk1K
-TEpFRkZIRkdFR0hIR0ZGREVIR0ZIR0ZJSEdFQkVIRkdISEhGRkVHSEhISk1NSktK
-TkpJTExMSUdJRkpLSUlHR0ZGS0tJRklIS0tMUE9NTU5WVlpRUl1gYmNlYmFgX11i
-Y2FgYWFlZWFkY2JpamdmaGZlZWRjZWJdW1xdYF5bW1tfYF5gZGFjYmZmY2JcWllX
-V1NTUlBQT05NS0xJR0ZHRkVKTUlFSklMTElKTUpKSkpIRkdJSktNRURFR0NFR0ZF
-SEhGS0ZFRkNCRklISUpKRkZKTEtISUxISkxMT1JTVlZYWVpZW1xbWFdXWFZZV1RW
-VlZZWVlXV1dWVVZVU1FOTE9KR0hIRkZISUlKSlBNSktNTUxMT1BTVlVWV1lZWldX
-WVtaXF1iX2BgYmdqbG5zdXV4eHp8e35+gXx+gH9/fXt9fHh3eHp4dHJ0dnVzbWtq
-aWljX1xaWlRRUEtLSEdFSElITEpISEhHSEdGSEdHRUdIRkZEREJDRUZERUlFRUhK
-SElJSklJSUVHR0hKS0lESEhISkZERkZFSElIRUZHR0ZJRkdFR0dFRkhKRkdJSUpJ
-S0pIR0tKS0tJTUtISktLSUtKR0lLS0hIS0tMSUlIS0pNSExGRURDRkdGRkdHSUlI
-SEhLTUlJSUxKT01NT0xMTE5XT09OSkpNUE9QTkxLT01KS01OTk5KSEtKTExKSUpL
-TUxLS0tMTExIREJBQEBEQ0E/QD5BQUFDRz9DR0FCR0ZFQ0lJTk9PT1BUU1FTUVJS
-VFRXVFNWWlZXW1pZWlhaXVlaWllZXF9dXl9dXF1dX2FjY2RkZWJjZGVmZ2dkZmhp
-Zmhrbm1tbXFvb3Fzdnt+goWHio2SkZKSkpCOjIqMjIyKi4eHh4eEhYeGhYKBgoKE
-hoKCgYCCgoKCgYB+fHp3cXJxbm5tbG9xdXFwcXFta2RZT0pJTEpITE5NTUtLSk5N
-TE9MS0tNTU9LS0xPTk1QVFNSUVFRUVBSUVFQUE5QT1JTUFFPTUpMTE5LS0pOTVJT
-e7TGz9fd4OPl5+jp6kpHSUhNTk5JSklMTEtMUElLRkhHQ0VGRUJHQT5CQ0FCRENI
-Pzs+PDw7ODg7OT06Ojg3Nzg2ODc3OTw4ODc5ODo6Nzg0NDs/Ojs6OTs5Ojo+PD5A
-Ozo7Ojo5OT1BPDk9QT48PjxAPT09Ozg5Ojo7OT05Ojg4OD08Ojw6Ojw8Pj9CRElL
-T0tMSEVEREBATkk+Pj9AOj89Pz89P0NITExPSkdFSEVDP0NBQjw7PDo4Ojg5NjU3
-Nzg1NTY2NTg3Nzo5Njg0OTk5OTs5Ozw+OjxDRlBOUU9UT01IQ0JESExOTEpJR0ZG
-R0dFRERDQ0JAREhLSEhJTEtLS0tJR0dDRURCQUFAPTw9ODYzNTc5OTg1MzQ2NjY4
-Nzk3NzY2NjUyNTY2NTY3NTc2NTU0NDM4OTY2Njs6OT0+P0I8QEA8Pj8/PT4/PT07
-PD08OTg6PD08Pj0+QEFCQEJAQURFRUhGSkVGSUZERUZBQUVFRENBREZERkVEQUND
-Q0BEQ0JEREZHR0VFREdFQ0VFRkVESExKSElJSUdGRURER0VLSEVIR0hGSEZISkZH
-SUdFRERFRkNERkhHR0dKSktNS0tIRkZLTEpHSEZGRkRIR0ZHSElHSElGTEhHR0lL
-TE1MUE1PUltdVldWXV1eYWVjZF9gXl1gYWFjZWRiYGJiZGdnZmZmZGdjYmNfX2Fd
-WlxeXltdYF5hYF9fYl9gYGJfWldXV1VUUVFQUU5PS0hKTU5LSUZCRkRJTEpISUpK
-SkpLTUxLSUhFR0hJSEtHR0hDREhGSUhDREZDQ0NCQ0RKSEhKS0xLS0xIRkdISkxL
-SklLTU9QWFZXWVlWWFdYV1taWFlZWFVWVlZXWFpZWlxeXF1aVVRWVlFRTU1PTFBP
-T05QUlFSUE9PU1RUVlhaW1xeX11eYF5fXV9jZGJfYmVla2xuc3Jyc3V3e3x+fXx9
-fH17e3x5enp3dnh3dnl7enhydHNvbGppZGJdWlVTU1FRTUtKSEZGSEdJSUhIRkdH
-R0hGRkdHSEhFRkZCRUdCRUZKSEVER0dISEdGSk1LRkhHR0VGSktJSEZHSUlJSEhE
-Q0RHRkdIR0dJSkhHR0dERUVJSElKSEhISklISkhJTEpISklGR0tJR0pKSUlISElG
-R0dGR0tLSkhHREdJSkZHRkZISEtISktISktOS0pLTE9PTUpNTUpMSUtLTktLTE1M
-TEtNS0xMS09PUk9QTkxJS0pKSUdHSUtMTUlJSkxNSkpGRERGREJGRUVDQkNERkZD
-QkJFSkpJSk1QTk1NUFBRT05PTVFTUU9UVFdXWlhaXVlYWltUVVdZWVZYW2BdWFpa
-WllZXF5fY2JkZ2ZpZWlqa2praWhoampmZGdpa2psbmtucHByd3h8goiIio2Pj46P
-j4yIiYeEiImGhYKBhIKDhYSFgYGCgYCDgYKCf35/gX+DgX98eXNxcnBramhmam1y
-cHBvcGxnX1ZNTUtKSUdISEdKTE1NS0xKSkxPTk5MTEpLTElMS0tMT05PUE1PUVFS
-Tk1NT1BQTE5RUlFPTkxOTEtKTEpLWld3tcbQ19vg4ubn6enqREFESEhKSUlKRUdM
-S0dHSUtJSUVIRkZIQ0JEQjs+QkRGRENDOzs8OTk5NzY5OkA5PDo6Nzs6Nzc3NzY2
-OTw7PDo5Nzo9Ojk4Ojo8PDo5Oj08Ojg4Ozw6PTw9QD06Njc8ODo8Oz0+PTw8Ojo+
-Ozk7PDw9Ozs7PDo5OTo8Oz09P0NHSEtJSEVHSEJCQT5APj1AQD88PT5AQ0VGSEpN
-Tk5NTENCQkFBPTo9Ozs5Ojs5ODo/Ojo4NDY3Ozc3NjY5OTc2Pzk3ODo8Pjw9Pzw/
-Q0dHSktOTE9RTkxHRkdFR0pJRkdJSUhISURDQ0NBREZKTEpKR0dLS01LS0tJSkpH
-R0dFREJEQj48OT08PDk1ODg3NTc3NDQ1NjY2NTY2NjU1NTU2Nzc4ODg1NTY4NDQ2
-OTY4Pjw7Oz9BQT9EQ0NCQkFFRERGQ0BAPkFBQkVDQUJEREREQ0VGRUZGRklJSEhK
-SkZISEtHSEZGRUdHRUlHSUtIR0NERUNDQT9ERERGRUVISUpKR0ZGRUdKRkhKSktJ
-SUZGSUlJSkpGSUhHSkdHR0lGREdFRkVFR0REREdIR0dGRkdIRkZKSUtKSElISkdM
-SklGRkRFREVERUVGR0hIRklGSklMT0xMTUtKS1JWVVZaXFpbXl9gYGJiYF9fYWNm
-ZmhkZGJkY2JnY2dnZ2ViYGJhYF5cX15aV1ldW1lcXF1eXV1dW1pdXV1bVFRTUVBP
-Tk9QUU5LTUlJSUtKR0dFREZLTEhISEdISk5NS0dIRkZHRkBESUhGR0lHRUZIR0VE
-REVFRENDQ0VGRUNDSEZAR0VHSEtHSk5OS0pKTlFSUlNXVVhYVldVVlZZWVhXVVZa
-WFhXWlxeXWBcW1xeXlxcWlhXVVdUVVdZWVhXWFxbWVpaWVpZXF5hZGNgXmFiYWBg
-YWFjYGFiZGdpbG1xc3NydHJ3e4B8e3p5d3p6enh3dnR1dXd1c3VzdnNwcG9uZ2Nf
-XF1aVlJTUU5MSUhHSUZEQkNGRURERUdJS0lJSUdKTERERUdFREhKR0ZISkZER0dG
-SUlHSEpKSklISkdMSkVDQ0RETE1LSEtHSUdKSEZHR0lJSkdFR0REREhHSUtLSEtI
-SUdLR0dISUdHSUlKS0hISUpISUhGSktISUlHSEhGQ0VIR0REREZGR0lISkhGR0pK
-S0tMSkpLTU5LTExOS0tLS01NSkxMS0pLSU1MTU1JS1BNTk1OT0pJS0lJSkpIS0xL
-S0lHSUpKSUpEREZGRkVHR0ZIR0dGR0pHSUpLSUpNUFNVT09RT01QTlFOTlBQT1Za
-WVlZWFpZWFpZVlpWVFVaWFhZW1tWW1lbXVpaYGJiZGdmaGxtbG5tbm9ycm9ra2ll
-Zmlsa2tsbWtubG90d3p7foSHiIiIiIaGiYaKiYeGhISEgoODhIOCg4KCgH+AgIKA
-f3+BfH1/f3x/gX56dnNwbGdkZmRkam9ubW9wbmhfVU9PTUpLSUdJTUlJSUtNTUtN
-TU1PTExOSk1OTU9NTk9PT01QUVBRUE1JSUxPT1FMTlFQUVBNTE9NTE1LTUhOUHS1
-xtDY3OHj5ejp6upJRUVHSElISkZGSEpLRUBFSEhISEZHRURDRkRBRj8+Q0VBQkBA
-QEJAOz00Ozg4ODo8Ojo7NzU2Nzg7QTk1NjU5Pj04Nzk5NTc3OD05O0A+PT47OTo7
-PT49PD09PT88Nzo6Ojw6Ojo3ODw8Pjs7Ozs7Ojo6OTo6PD9APD49QERERUpKSEdD
-REFAPz0/P0A+QD08Ozw7QEZHSElKT09OSUpHSEZFQUJAPTs7ODg6OTc6Ojo6OTs6
-Nzo8Ozs5NzY6ODc5ODlAQkE/QkJCQUVITEhHS0lMSktNTUhHREVFRUVGRUZJSUVD
-REZFQ0dLSU1NS0hISkpLTUxKTEtKR0VGRURDREZDPkBCQENDQDo3Ozg4NDU4NzY2
-NTQ0NTM1NzQ2MzU3ODc3NTQ3ODc1NDY2PD09P0FBRkNDQkNEQ0VHR0pKRkZDRERH
-R0RJSkZEQ0BDRURISEhJSUpKSkpLSkxKSklJRUZMSkdJRklISUdGSklJS0dERUVE
-RUZHSkpJS0pKR0hGR0dISEhIS0lLS0pKS0pLSEZHR0dIR0xISEtKSkhERUdLR0hH
-SUZGSEZFR0dHR0tHR0ZCRUpKSUlKRkVGR0hGSEZIRUNHSUhJSEhJRURISElJRkhI
-S01UWlZXWV1fYGBgYmFkZmZiY2hnZ2RhZGdnYl5eX2JkZWZkY2NhYWFfWFpbVlha
-XFtfWVdYWFZaW1hXWFdbW1ZSUFJSUU1NTExOTUtKR0lJR0pLRkZJR0tKSEVGRklJ
-S0lHRkNGRUNEREVFSERJSkdFQkREQ0ZFQ0FCRENCRENDQUBBQUU/QERJRERDSkpL
-TE5MTlFQUVRVVlhXVlhXV1hWWFhcW1pbW1paXmRiY2RkY2JjYl9hYF1cYGBgYGBf
-YmBgXl5iX15fYWFhZGRiZGNgYGNiY2NjYmJiZGNmaWlqa2xtcXBxc3R3ent7eHl4
-dnl5dXV1dHZ2dXt1dnV5cnJvb2xnZWBeXFpZVVNRTklIRkVHSEVMR0VCR0hKSUhF
-RURFR1JMR0RERUdDRURDRUdERUNGRUdGR0ZHS0lISEZHRkRFRENERkZFRklGR0dJ
-SEZHRkZFREhGR0ZFRkRGSElLSklHSEZJRUlKR0dDRkhKSkhISUpKSU5ISktJSElK
-SEhIR0lFSEdISkdLSEhGR0dGRUdHSUpKTUxKSklKS0xNTExMTEtLTVFNTEtKS0xO
-TUxLTU1MSk5MT09QSktKSkhITUtKTk5LS0xMS0pKS0dHSEdESElHRUZFRkpKS0tL
-TEtMTExNTU9OTU1PUFFQT1JNT1FTVVZXWFhZWVZYV1tcWllaWVhaW1hbVVRUU1RX
-VVlgYWNmamtvcHN1dnd3enp1cWtsa2loaGpra2pqamxvbnBydHd7fX6Ag4eEgoWG
-h4WEhYWEg4KBgICCgoODgYODgX99f4J+fX9/fX9/fn9+fHl2c29qZWJiY2FkaWpq
-bWtnYFlUUUxJSk5GR0hJSUlJTEpJS01LS01MT1NOTElKS01OT1FQUE5RUVNOTFBQ
-TE9NTFFQTk5PTkxQTU1OT01STk5UgLfG0Nfc4ePk5+jq6k9LS0xJR09OSkhITUlH
-RD5FQ0JGSUdFRUNIRENCRkRDQUE+OTo7OTc2Nzs3PDk6NzhAPkE4OTg3ODY5OTs6
-Ojo5OTk9Ojc3Nzc5Nzc6PDw+PTw7ODo7PT08Ojw6OTs6PT47OTs7Ojg4Nzc6PDo7
-Ojs6OTY6ODo/Pz89P0FEREdKR0hFRkZDQ0E9QEA+PTw7OTk6PDs+RUpMSUxPT05K
-R0ZEQ0REQDo7Ojs7OTg8Ojg7PTo+OkA7Ojk7OTg4Nzs9QD0+QkVGR0RBQD9DSExQ
-UE9MSkxMSUhJRURBQ0VDRUZFRklJRklJSkZGRkxNTUtKSUhJSklIR0hKSEdGRUZF
-R0dKR0RERUVDQ0JDQD07PTg4NzU3NjQ3OTUzNjU2ODY6NjY3ODc3ODk5Nzs4Ozg5
-Pjw9QEFFRkZNSUdHSU1OSEtHRUVFSElFRkVGREJBQ0dHR0lJTE5LSElISUdIRUZH
-RkZJSUdHSkhISEZLTEtHSEhLSEdHRkxHRERISEdMS0xJS0hJR0dGRkdJSUlJSklL
-SEdJSkdGSElJSUdGSklHR0dISUhGR0hIRUNFRkVFRUNHSEZERktIQ0hKSEdEQ0RG
-SUlGRkNERERGSUdHRktGREFGTElLSUpOT1VdXl1hXV5gY19dXl5gZmhoa2lkYF5c
-YmRjZWJiZGVmZmRjX11dXVtYVlZWVFVWWFtYWFpWVVRZWllXV1paV1hRUFVRT0tK
-SUhJSEVHREZJSExJRkVFR0pIRkVGSEhJR0dKR0dEQkNCQUBFRkVBREZGRENERkM/
-QkFBRD9AQT49QUFEQkNCRUdJRklLS0tMTUtKT1NUVVhZWFZaWVdYWFhbXGBgXV9h
-YV9fXmFjamlmZ2hkZGZoZ2dkaGZkZWVnaGpoZ2ZlZmRjY2NlZ2ZlamlkYmNlaWdk
-YmFlZWhpaWpram5vc3JzcHBydXZ2eXRvdHd5d3V4dXZyc3d0cnRyb3BsaWtjY1xc
-V1JUU1BOTUlGSEdJRUVEQ0NFR0hGR0dGRkFGR0dFREZKU0ZBRkVIQ0dERkdIR0dH
-RUVGRkRGRkdFR0VEREdIR0RER0RER0ZFRUdHR0lKR0lIR0ZGSEdGSkhERkdFRUlI
-T0xKS0tLR0ZGRkhJSkxLSkpISUlKSklISEhGR0hFRkZGSUdISktJSktLSElISEdN
-TE1LS0lKTUxNTUtLSUxNTk1MSkpNTE1NUk9OTEtKSk9NTU1PS0xLS0hISUhJSkpN
-Tk5LS0tMUE5KSEZHSkhFSEhJSEhKSkZISUpLS01MTlFOTk5OTlBPT1BPVFdUVVZV
-V1dWWlpYV1paW1tYVVhUUVBNTE9QU1JXWl5jZGVpbG1wdHV5e3x6e3lzb29tamZn
-Z2pqbWlqbW9tbWxyd3x8fn+DgIGEg4GEhYaGioeJgIGAgIB/gH+Cf4B9e3t8fn17
-foB+f4CAfX55cm5rZmVjYGJkZmlsbmtpaGReVVFQTEtLTElGSklLSk1LS0pMS0tN
-TUxNTk5MTE5NS0xPUlJTUVBQUVBRVFJNTU9NTU5NSk1OTU1KTlJRTFBOUleAuMfQ
-193g5Obo6enpTlBNTExJTkdGR0lNR0JEQ0FHP0FCQ0FBRD5ARENERUM/QD1BOjo/
-Nzk2Ozk6Nzk7Ozk8PDo4OT09OUA+PDc3ODc3NzQ2Njg3OjY7QDw4OTo8Ojk5PDo7
-OTk3ODg5Nzg7PTw4PTo7OTc4Ozw9Ojw+PTw6PDY4OTk5OT9EREhMTk1KSERCQUFC
-QUFBPEE9PTs5ODo7QEFFR0hISUxKSkpKRkRERUM9Pzw5ODo5ODo6OkZEQD06Ojw6
-PT0+QkJDRkZFRkRHSU5NSkZEREZMT1JTTkpJSUhDQ0JERERBQD5CQUNERURFRkhK
-SU5NUFBPTElLS0lGSEdERUFCQ0RFQkFESEhIR0hGQ0RER0hIPjw9Ozg3Nzc2Njk6
-Ojg3Ozw4ODg6ODQ0Nzc4NjY2Njg4Oj05OjxBQ0dJRkVDRUZJS0pKSEpIR0ZGRUVD
-RkNFRkRGSEdMSkpISktFR0dHSEhJSEZGRUdGRUlHSUhISEtPTUlGR0lLR0hJSUdG
-R0lJSUlKSUlKSUlISUhJSkRHSUlJR0pHSUhISUhJS0lIR0lLS0ZLS0xKSUZFRklH
-SUhFRkdDQ0JARURCQ0VERERFRERIRkVGRkRER0FBREJFRklGRkVEQ0dJSUxNUFFS
-Vl1hXl9eXl5kYV1fYWJlaWllY2NeXl9eYWNnZWBgYGJhYmFdW1dUVlVUVlRWVFhX
-WFhUVFRTVlVUUlFQU1ZPT09RUE1KSUxISENCQ0JAREJDRkhHRUREREBCQ0VFSEhJ
-SUhKSEVFQ0REREFBQT08QkRFRURDQkBBQD8+PD49Pz4+QkJERkZJSEVJR0hJS09O
-TUxOUVVVV1pdWlpYW15cWlteXl9hYWNlZGNmaGprampoaWdna2xta2hqcGpsa2pt
-bGxqaWlqaWlta2pnZ2ppaGlqaGNjZWZkZmZpaGhoamtsa25xcXBwcnJycXN2eHV1
-c3Z0dXZ2dXZ1c3Rwb3FvbWlnaGdhXFtZV1JQTkxLSkhHR0ZFQ0RGRkdGRkdHSEdG
-RUhGRkdGRkhHQ0VDQ0RDRkVGRkRGSUZFQ0JGR0hJRklQT0RJSEhIR0ZGSUdIRkpH
-SktHRkdIRkdIRkdLSEdIR0dFRUVGRkhFR0dGRkhIS0pJR0lJSUlIUE1LSktISkhI
-SUdJRERFRUhJRkdKS0xLSUhJSEpLTUxKSkpKTUpKR0hMS0tKSUlOTUpJS01NTU5P
-TkxMTE1MTkxISktOSklLSUlKSkVGSk5NTUxNS01NSElIR0ZJSEZISUlMS0xHSEdJ
-SUtKSk1PUFBNS0tOTk5QVFJVUlFTVFZUU1VVVllUVVZVV1lXWFNQTExNSUtNUFVZ
-X2JjZWdqbnF1eXd5fX16eHdvamlnY2JkZWZmamtqaGlub3B0eHp6g3+EhYN/gIOC
-g4OFgoGDg4J+fYKBgIKBf3t6e3x8e3p8fH1+foF9endybmliX19hZWdrb3JvbGdj
-XVlQS0xOS0xLTUpJSkhKSUhKTEtMS01KUE1PU05LS05OT1JPT09RT09OTk5PUFBO
-Tk1PTUxNTEtKSU1TTEpMTFdUXHu2x9DY3eDj5efo6upRVVFLR0ZJSElHRkVFRUNE
-REdIR0ZEQkBCQkNGSUlESEhEQkM9Ozs7ODg7PjY2Ojw8OTo5PTo5ODs+OTw9PDw4
-ODk4Ozc5OTk5PDo9OTg3ODo4ODs7Ojo6Ojk5Njg3ODg4ODY6Ozo5Nzo+Ozw8PDw6
-OTg8Ozo5OT5CREVKUFJSUExIREFBQD4+PkA+QUA7OTg4QDo9Q0VJSEdKS0hKTEhH
-RUJAQUA8ODs9PT47OTg8PD1CQUA8PUBAQkRDRklMS01OTkpJSUZIR0dGSEtPUE5P
-SkhFREJBQkFBQkFCREJGRERGRUdGRUVISk1NT0xJS0hLS0lGRURCRUZFSUZGR0RE
-SUhLS0VDQURHRkdHPzw8PTk7NjY4NzY4OTk0ODg2NTc3NzQ2Njc4PDc5Ojc5Ozs9
-QkJERkdGRUNHSUlJSkhHSEhKRUNGRURERkVERUZHTUxLSUpJSUlIRkZHR0hNRkZG
-SERFREJHS0hHS0dISktNSUlHRUdGSUpISktMSUhIR0ZGRUdFREVFRURFSEhHRUdK
-SElHSEVFSUZIR0lHSEpJSElJSEpHRkpJRkZESkdERENDRUJFQ0FCRURERUZFRERC
-REFDRUNDQ0JFSEhIQ0JEQUZFSkxNUldbXmBhY2BiZmNfZWFgX2JkZmJkYmFiYF9i
-YmdkZV9eYF9fXltdWllYV1VUUU5QUVRUU1NTU1ZXVVFQUE1NT0xMSUpNTEhJR0VH
-RUNCQ0REREFERkVMRUBEQ0JBQUVFRkdJRkRFREVDRkhEQEJCQ0A/PkFCQD5AQkNC
-QEBDQUNFQUFBQkNFREJDR0NFREVNT0xMTFFTVVhZW11gX2BfYWBfYGJjZGZoaGdp
-bGttbW9xcHBsa2xtcHRxb3BxcnJvb3JwcG5tbW1tbGtraWloamppamhmZ2VlZGVk
-ZWdoaWdqa2tqa25tb25tb3FvcHBxcnFycnN1dXR0c3JydHJ0cG9tamlmY15bXFtY
-VlJNTEhHR0dGREVKRkdIR0VHR0hKSUdIQkRDQ0RGRkdGR0ZFRERERUVEQ0ZFRUVE
-QkZGS0pJSUZKSUlJSElJRkZJSUdNSEdHSUlGSUdGR0lIRUdHSEdHR0ZHSUZER0VG
-S0tISElJSUdKSEtLSElMTEtKR0lKR0VIR0ZIRkRGRkhKS0tIR0lKTEpLS0dGSUtO
-TExOTUlLSUtLSEpMSkpKS0pKTExLTkxNT05OTUxKS0pLS01NSklJSktHSEhLTUtL
-SUtKSktJSUpHREZJR0dHSUhJSkpKSkxKSklMTk5OU1RTT1BPT09RU1BPUFBRUlFS
-U1FVVFhVVFZVUlRVVE5NSUpKSk5TVVhbX2JhZmpsbnR1eXl3d3d1dXNwaGtjYWFm
-am1ta2lmZ25tb3B2e318fYOCf3+Af35/gIB9foCAgH17foB+f4B+fnt6ent9enh4
-eHh8fnp2cm5nZGVgYWZqamxvcG1saGNaU05NSklKSExPTEtNSkVGR0lLSUpKS0lK
-TEpNTk9PUFFNTVFRT09TUFBRT09TUk9NTkxOT01LS0xNUVJNS0pNT01eerTH0Njd
-4eTm5+nq6k5aUU1JSUhJSENFQ0VFREZJRUhKSEZHREVFRUNHTEZIRUVCPz89Ojo5
-NTY5Nzk7PTo8PjY2ODY4PTo6Nzg6ODk2ODg4ODk5OTs5Ozk6NzY5Nzs4ODc4ODk4
-NzY3OTo4ODc4OTk5Ojk5OTo8Ojs4OTo7OT49PTw6PEFCQ0hMUFFSTUZCQkFCPz0/
-PkE+Pzo4OTg7Oz5ARUlMSkpHSEpMTExIRkNBQj9BQEJAQUFJPjxAQkFDRkNEQkNJ
-SUhKTFBRUlBOSkpHRUVISUpMTk1PUE5JR0dERENJRUBAQUA/PkJFREVHSUNGR0pM
-TE9NTUhGSElLR0dDQkBBRkVFREREQ0ZJTE1MS0hGRkdHR0dCPDk2ODo4Ozk5OTY1
-NDU2ODc3OTY0NDQ1Ojs7PD07QD47PDtBQkVFRUVJSEhISUtKR0dHSEZGREZEQkVH
-Q0NCR0tMSEdISUpKSkhFR0tKSEdFRERFREZIUFVKSEhJSEpLSElKSUhGSEdGRkdI
-R0pJRUhHREZFRkRHRUVGRkhFRENDREZGR0hISUdHSEdFSEhJS0tJSEhJR0hIR0hE
-RUFCQkJFREJFQkJERUNGRUNDQ0NERERAQT5EREJCQUNHQ0E9QkJBQkZJTFFUV1ti
-YGJhXF9pZGFgYmFgYGNjZmdfYGFhX2FhYGNkYl9eW1tdWFlaVldXVVBPUVFSUVZV
-UlJSVFZSTk5RUVNTUExMTkhJTEhKRkRERURDSEhIS0ZDQkJFQ0FDRENCRENDREVF
-RUJCRENCRURLUEE+Pz5DQ0NBQkFAREJBQUFCREZFREJFR0RDQUNCP0ZGRktKTU9P
-UFJTWF1cYmZlY2ZmZGZlZ2psampramxta3Nzb3Z1c3FzcnFxb3B0c3R2d3V0c3Nz
-c3FycnBxbmxsbGppa2lmZ2tsaWtmZWZkZmZpa2lpa2traWtsb2tramxta2tubnFy
-dHN0dHF1cnNwcXNwbm5nZGVjX2JhXVpVUExKS0tGSERHRkZGRENFRkdHR0dGRUhE
-Q0RFREZIR0VCQ0ZGRUVFREJBRENFREVMR0hGSEtJR0ZFQ0VEREdKSUlMS0tKRkVE
-RkdGSEhFRUhHRUVDR0dIR0ZFRkVFRkhISEhHSUlHSEpHRkhJSUlISUdJR0ZHRkNG
-R0dJSEhGR0hJSktNTUtMTEpLSkhJS0pJTEtPUVFMR0lKS0xOT05LTExJSkxNTE5O
-TU1MTElISElLR0hLTUdIS0tMSktOS0hKSElJSUpLS0lJR0dGSkpJRkdLTktLSUpL
-TU1OUU9OUFNNT1BTUk9SUE9PUVBQUVFSU1hVV1NTVlRUVVJUTkxIRklJTVBWWllb
-XmJjZ2ptbnBvcHR0dHNxb2xmYmBhYGFlamplYmRlZ2hrbnJ2en1+fnx8fYB9fX2A
-gH16e3p5d3V5eXl6eXl4d3l6eHl5eXZ0dHZ3d3dybGVjXmBmam1tbmpqaWdhW1VT
-TUtGR0pNSktMTVFLSEhISUlKSkpLSkpKT05PTE5OT09OTk9OT1JSUFJPUFBSUU9O
-T0lKTUxMTUxMS09MTUxMS1WNt8fR2N3g5Obo6OrpSUtIS0tNSkdFQ0FGRUVJSEhI
-SEVFSEZERENBREJDREBCREFBPz08OTk4NzY6OTo6Ozs1NDg4PDc5PDk5OTg2NzU2
-NTY5Ozk4Ozk5PT08OTc2ODc6PDg6OTs4Nzo2ODk4Nzg4Nzw7Ojo5OTg3Nzs2Oj88
-Ozw+P0E8QUFBQ0lLTEpJRkRCQT8+Oz5AQUA7OUA9Ozs+QkFCRElIR0lMTE1MSkdF
-R0RBREREQEFBQT9FQkFEQUJGSUpISkpLS0lNUU5MS0xMTUxHRkpISUpISUlLTEpG
-SEVDQUVFRERBQkZBQUVDREVGSUpISk1MSUhHRkdGR0ZGRURDQz5CQkE/QENITk5Q
-Tk1LS0hFRkhKTEVAPTo7PTg2NzY5NTU5OTk7ODg6ODQ2Njc6QENBQ0BBPEE9OztA
-QkNFR0pLSklKS0pHRURCREZHSUdKSEdFREdJSkpJSExISUpLRkdISUlGRkdHREZF
-RUpJR0hJSEdJS0tLTElFR0dISEVGSUlGRkZKREZERUVGRUZFRkZHRUNFQ0NEREhH
-SEVGR0ZGRkZKTE1MTEtKSUdJSEZHRkRDRUVFRkNBQEdERUJBQ0JFQkJDQ0VFQkA9
-R0VCQkJGRENCQ0dHRUNBQkVMU1NYXF5haGNhYmdnY2JiZWJhYmFlY2BiY2NhXVtg
-YF1dXFxcWldWVVVTUlJQUlFQUFFTUVBOUVJQUExLTU9QUlRRUU9MTUlJSEhGRURG
-Q0NCRkRIR0RFQUFISEVDQkVCQ0JFRERGRkVEQT1BQUZDREJCQD0+Pz5CP0BAQ0A+
-QENAQT49QENFRDxAQUNEQkdHSUhOUlRUVFdYWl9jZWdmZmhoaW9rbG9vbW5xc3V3
-dXZ2d3d3d3h5eHh4eHp6d3Z4dnd6eHd2cnNzdXFycW1vbWxrbG1tbGxsamtsZmln
-aGpqamhnaWdpaWpqa2dmaGlrbW5wb3BydndzdHd1cG9vbWxtbWtoZGFgWlhYVlNQ
-TEpLSUpGSEhGRkZHSEVJSUhISEdFRkZERUVEREJFQURFRERERkNCQUVGRklHRUZE
-SERFRERERUZFRENERkVISUpKR0VHRkdHR0hIRkRJR0hIR0pHRUZHR0dFRURCRkpJ
-SEdMSklIWkxHR0pJSEhKRkVJR0dGR0ZCRENHR0hHS0hISEpLSUhKSkxKS0tMS0xQ
-TU5MTE1LTU1NTlBOTE9PTk1MSklJSU1OTk1NTUpKTUpLSklITElIT0dHRUZKSklH
-SUhHSkpJSUhISUtNSUlHSUZISEdHSEpLTU1NTkxNUE5PTFFPTlBTUU9QUFBWVlZW
-V1ZeXFtZVFRSUVFLTUlGRUhQUlNYVlZaXV9jZmlqbGdrbG9ycXFwa2lhX2BhYWJj
-ZGJkZWVlaG1wc3V4e3d2en1/fn56enp9e3d3eHVzcnVxb3V0dXR0dHN2dnVzcXBy
-c25ubmtkYV1eYmhtcGxqZmRgYF1aVExMS01MSktKSU1MT0tJSklISktLSkhHSkxL
-S01LTE1LTU5LS0xRUlFQU1JSVFBPTlFST01NTExLSUlJSUpLSkhMU4+1x9HZ3uLk
-5ujo6elGRktJSkVHSUtEQUJFRkhISEVEQkhFSUpHSEhFSURCQ0E+PT9APjw+Pzc6
-NDo5NjQ6NzY2NT06Ojg3Njg4NjY1Njg4ODU1Nzg4Nzo7Pjs5OTc6OTs8OTc2PDs8
-Ozw9Ozo6ODg8Pj5APDo3ODc3Nzg5Oj06Oj0+PkJDQUJCR0dGQ0NGREJBQUJAPT8+
-PTo6OTs7PkBAQUJCRktLTExMTkxKSEdHSUdGRURHQ0JBQ0BAQ0VER0tLT05LSktL
-TEtGSEhKS0pJSkhJS0xNS0tIR0hISEdGREFBQkRHRkVFR0lIQkNERURHSEhISkxL
-R0RFRkVDRkZFRUQ/QUBBPz09REhJTE5UUE1LTlFJSUpJSEZCQzs6Njk4NjI1OTg4
-OTc4Nzk5ODo8PD4+QUFCQEA9Pj07Oz9AQ0ZHSkpKSkhIRkdFQ0VFRklKS0pHSEpI
-S0lIS0pKSkxKRkhIR0VFSEZJSEVFRUJDRkhHRkZGSEpISk1NTU5JRURGR0ZGSUpG
-SUtHRkZFREJDREZHRUNBQkNHQUFDSElJSEZGREVISEhKSk1KSEdHSEdHR0hGREVE
-REdKRkJEQ0REQUFAREJFRUFDRUNERUJBQ0M+P0BDQ0FARENBQkFDRUhPU1lbXWJn
-b2poZWVmYmFjZWRhX11eYGFhXl1cXFxcW1tZV1hXVFNUU1RTUlBPT1BRTk1OTk9S
-UUxLTFJQT01OTk5KTlJRTUxHSUpISkdEREVDQ0RDREVCQUVDREVEQ0VCQUJFRUVF
-REdEQEJBQUFFQz4/Pzw8Oz1DQ0M/PT4/QDs6O0BBPT9AQUJCRkZDQURFSkhOVFdZ
-WVxgYGJlbGlqbGtub29scnV0cnR2d3V4dHV8e3x8fHl6ent8fn55fHl7fHx6eXh4
-dnR0cnJvcG9zcWxsbm9tbW5tZmprbWxpaWhnZmloa2praWZmZ2ZkZ2pucG9tbm1y
-cnFzdHNycW9qZ2VoZ2ZlYF9bWlZTUU5MSEZISEtISElGSUlKSkdBQUNDREZEQ0NF
-Q0JFREJBRENFRUVCR0RGRUNFRkRIREJEREZFQ0ZFRUZHRUVGR0ZEREVGR0ZER0lH
-RERDRUVFRkdIR0hHREdKSEdFRkhGR0dISkdGSUhNS0lJSklJR0dGQ0JEQ0JERUdG
-SEZFR0ZGSEhJSklKS0lIS0dKSEtNTExLS0pKTE5LS0pLTlFQTElMTVBPTU1PTElL
-TElISkxKSUpLSUxJTEtLSEdJSElJRUVFSEpHSEtISkxNTElKSUpKSUdHSElLS0xM
-S05OS05OUVRSUlJUUFBRUlVUUVZXV1hYWVpdW1lbV1dUUUxISERDREdPUFNYV1lY
-XGJkZWdmbmxramxta2pnY2BfXlxdYmNjZGJmZmZoamtwc3R2dnh5enl7fX17eXd1
-c3NydXRwbGpja2ttb3FwcXN1cnNsbG1va2llYV9bXV5gZGhrbGViYmFaVlJSS0lP
-S0pJSUpLS0lKS0lJSElJSEtMTE5NSUZFSUxLTk9NTk1LSU5PT1BQVFFQUVBOTk5P
-T09OS0pKSUxKS0tMTU5YgLTG0dnd4eTm6Ojp6kxOS0ZHRkVFQkNDREhISUlFRERI
-RkZJRkdDR0VDQ0BBQTw7Ojw5ODc8OTg2ODc3QEEyNTc1NTU2NTc1NDk3NjQyNDU0
-Njc5OTk5OTk7Ozo6ODc8Oj1APDs6PDo4PTo5Ojw5Ojg7Ojk7OTk3ODg3OTk6PT08
-PT07QUBBQUJCREJCQEFCQkBAOzs4Ojs6Ojk6PD9AQEBCQkZKSkpLTlBQT0tJSk1K
-SkdHRUdIQ0JCQkRCQ0RHTE1PTUtNTUtHRkhGQ0dHR0lKS0xNSlBNTElJUUREREVH
-R0RDREdJSUtFSEdEQ0RFQ0VISkhFSEhERUVERkVFRkZGQD4/Pz49P0NGSEtPT1JR
-UU9OTUtISUxMTkdDPzw6Njk6ODo5Ozo3Pjw5Oj9CQz5CQ0RDQ0BAPDo6OzxAQUJG
-SElKSEtMTEdHRkZBQkJISktPUE9NSkhGR0lNSklMTElHRkZISEREREhJSEdGRkRE
-REhGREdJSUtMTUtNTEdGQ0NFRkVISEhGRktHREFCQUJCRUVDRkVCRUZFRUVHR0hD
-RERHSUpNS0hIS0dFSEhISkdEREZEQkRDRUdGRkVFRkVDQkFAREZFR0RCQUBBQkNB
-QUJCQEBBRkFBQkVGREZFSU9TWFhdYWVoaGRkZWdlYWBiYmNhYWFfX11aWltZWV5e
-WFhWU1RSUVFRT09RUVFQTktMS0pNT1FPSUlLUVJTUE1LSUpNTE1LSktKS0dJSEVF
-Q0JDRUJCRENGR0ZEQUNEQUJCQkNGQUNCQ0JCQUA/QD5APUM8PkI+Pz4/PDs+PkA+
-QEBAQUFDQUFEP0JISEpISEhJSU5ZWVhbX2JmZ2xqb25xcnJ3dXN0d3h6eHl6d3h6
-fnx8f318f3x8fXt6fHt8f359e3d3dnZzdnZ3dXJydXd0cnBubW5qbGxramxpaWlm
-ZWZlZWdoaGpqZmFhYmFiZGdoa2xqbm9vcXNwbW5sa2pqbWhkZ2JhXl1aV1RQTUlH
-RkZJS0xKS0dHSklHR0ZDREZEREVDSERFR0VER0RERERER0RERElKR0ZERkRDQ0NE
-Q0ZFRUZDRUZDQ0NFSEhGREVGRkpISU1JSUlEREVGR0ZISElISUhIR0hLS0tGREZH
-SklHSkpKR0VHSEhHR0VDREJFREZJSEZHRkZHSEdHR0hKTEpJSElJSUtNSEpLSklJ
-S01LTExJS01NTUxOT0xPTkxMS01MTEpISElMSElGSEhHRkhISUZFRkhJRkhIRUdH
-SUxKSEtMTExISktHRklJSEdISkpKS0pLTU5OUFFTVFZTVVRRT1BTVFRaWFpcW1td
-W1xeXVpbV1ROSUZDQkNGSEpQUVNUVlhaXV5iZGZqaWlpa2hpZ2RhX19bXl9fYWJh
-ZGVkZGVpa2pvdHJ2eHp3eHt7fHh1c3Fwb21ra2psaWlnZmdjZ2lmZ2lsbmtmZmZl
-Yl1YWVZVWVxjZGRiYVpaV1RRTUtKSUtMSklISExJTFFWSklLSklKSUtLTEtLR0dH
-SElKS05OUU5JS01PUFBQUFBSUU9PTUxMTUxLS0xKSkhJTUtLTVx+scbQ2N3i5Ofn
-6OrpRUhKSEhFQ0lCRUVFRkVFRUVKTElHP0FEQkNERkFCQUJBPztBPDw5Ojc7Ojw7
-ODc9ODY2Njc5NjU2NTY5NjY2Nzc1NTw7PDk5OTk3Oz08PDo7Ojc8PTw5Nzg8QDo5
-Ojs3Nzo4Njg3OTs3ODs5OTY4OTc7PD9AQEI9QEJCRENDR0Q/PD08Ozw7ODk4PDs3
-OT0/QEREQ0FHS0tLTE1QT09NTExISEhGSEdERUVDRkZFRENGSEtLTkxMR09LSEZF
-RUVBQkdKR0pOT0xOTk5NSUtIRkZERkVEQ0NFSUlIS0tKSEZISElJSEtKSEhGR0dF
-RUlIRkVERkVCPTw/PT9GSEpKTU5NTU1NT09MTkpKSUtNTUpEQDs4Nzs8Pjs/Ojg3
-QT8+P0NFREVKSEZEQzw7Ozg7Oj4/QkRFSUZLSkxLSERDRUFBQ0tMTE1PTlBOS0pJ
-SUdJSlBJR0hFRUdFQkdHR0hLSUZKRkZERkdJR0VMTk1NSUlIRkdHRkVHRkRFR0ZC
-QkRDQD48QEJDQ0NGRUZDQ0ZISURGREJFR0hFQ0JFRklMSEZGSUlHSEZHREBGQkVA
-Q0JGRkVHRERCRUJDRkZEQ0JAQUBEQUFAQEREQEBARENGQj9FREVIS1JYXmFiZGVp
-ZWBfYmNgYGNgYWNfW1teXFxZWltcXFpWVlJTVFJUVlBOUlJUUk1MSUpMSkpJSUhK
-S09OS0tNTUlISUtIR0pHR0pJREdISUFCQkJCQkFARkJAQUJCQ0NCQUNDP0BBQD5A
-QD9AQ0E/QUVDQj07Ojg6P0FCQkA+QD9AQURAQEVERENEQERHSkpJSU9PT1VXXV1e
-ZGhpa250dnV2eXx9e3h6fXx8fHp5en58fnt8fn5/f39/gH58enx8e3t7e3l5dnZ4
-eXh2dnR1c3N0dnBub29rbGxoa2poa2dkZGNnZmVkZ2RlYWFfYWJiYmZpaGdpaWpu
-bWlqa2lpaWlmZ2djY19eWVVUUVBOTUlFQkhJSUlIRkZFR0lJSEVGRUVHSEdGQkND
-RUREREZCREJAREVDQkVHQ0JGSENDRERFQ0NCQ0ZCQUNERENGRkVCRUdJRUlJSEhM
-TEpGR0lKSUpJSEdIS0lHRUVFRkVFRkhHR0dISkpJS0hFR0lGRUdFRUVFRUdIRUdJ
-SEhGR0ZFRUZERklLSEdIS0xLTE1JSUpKTEpMS0xLTEtNTElNTE1MTU5KTUtJS0pJ
-SUhKSUlKS0tJSEdJSElISEZISUpJSUdIS0pKTExLSklHSklIRkNFR0dJS0xKTU9R
-UU9QUFJSUlRRUlBTU1NVWVVWWVlZWlxdXV9gXVlWUktGQj0+Q0VJTU1PVFVYWVlb
-XWBiX19kZmNjY2JkYV5eXlpcXF1eXlxcXV9gYGdpa21wcXR3e3p3eHd2cW9ubGpq
-Z2hmZGNiYWBgYWBdXFxdX2RjXl5eXVpZVVJRT01VWVxfYF5bW1lUUUxOS1BPTEpJ
-SUtKSk1MSkpJSEdKSElMS0lISEhLTEpNS0tKSkxNTk5LTExOTlBRTU1PTUtNTE1O
-TU1PUFFMSURJTU5OUIGyyNHX3eDk5efp6elFRkdKTUdERUhGR0JCR0ZFRklIRUZC
-REFCRUhHRD5APD89OTs9Ozo7PDg8PDs8PTw5ODc6OTk7Ojg5PDk6PDc1NTY1Njc3
-ODk5Ojs6PD44OTk4ODo4ODg5Ojg4ODk3Ojg2Nzk5Njk6OTg4Ojg6OTk4ODo9QUA/
-QUBCQkREQ0NDQj08PDo5Ojg7OTg3Nzk7QEVER0NGRUdJSktJSktKS0xJRUZGRkZH
-R0hGQ0dKR0hISEdJSEpKR0pMSElKRkZFRUVERUhISktOUk5NTE5LRkhHS0ZESEdF
-R0ZJSEZGSUlKSkxNTk1LS0lJSEhHRUZHRkhISEZBQUJAPT4/QUVFSUhLS0xNUE1L
-S0pJTExNT09RS0ZCOzs3Ozo7Ozs9Ozw9PUNEQ0NISktNSkZBOzk6Ojo8PkFDRERH
-SUpLSkhISERDQUNFR0xOUFJQUVBNTEpHS0dGSklKS0ZGRUJCQUNCREVISkZJR0dD
-SEhKS0tKSUZGR0hHSklHRUZGRkRDRkhGQ0JAP0NDQUNBQkVEREVCRkZEREREREND
-Q0RHRkZBQ0VJRkNFR0ZGR0VFRkBARUNCSUFBQENEQkNBQ0JCQ0NDREFAQUNFQkFA
-QEBCQUJBQkE9QkFCQ0ZKUFZaXmFiXmJeXVpeX2BfY11gYV1aWVZXWFVTV1lZWFVS
-U1BRUlNTUFBPT09PTktISEhHSkhHSklKSk5PS0lKS0VGRUtISERDRUdFQkRGQ0FD
-R0RFQ0NCREQ/RERDQ0NDQ0JBPjw8QkFCQz5APT9CRENCQ0A8Ozk9RENGREE+Oz4/
-P0JBQUhJQ0A/QENIS0tJTE9RVFZdX2Rna3BydHd6e399fX1+fXx8fYB+foB9foGA
-fn5/gYB/fYGBgoCBfn19fHx7e3l6eXV2dnZ5eXh3d3VycXJwcG9vampvbGpoZ2Zk
-YmVjY2NkY2BhX2BjYmJgYGJiZmloaGloa2tpaGVjZWdnZ2ZkYl9ZVlFSUE5LSUlH
-QENHREhIRUhGSEhLR0ZFR0dHR0dHRkVGRUNCQz9CQUZBRERCQkFDQ0RERERFREJC
-RURFREJCQUFCQ0VER0tHSUhHSUdFSElITElMRkdISkxOSkVGSkhIS0hIR0tKRkRH
-SEpKSEZISEZFRUhIRURDRUZIRkdISEpHSEdDREZFQ0RISEhHR0ZHSUpJSUZGSEpL
-SkhKTE1MS09OS0xMSUpNS0tKSUlLSkZKTkpLS0xLSUtKR0hHSElKSUpLTEtNSUlL
-SklJS0lIRUdJSkpKSUpHR0lNUE9MTE9QUFFPT1FUVFJUV1VWWFhWVVRUVVZcXV1f
-XlxcW1ZPSkVCQj8/R0hKT1JSVFZYU1NWWV1gYGJiYGJfXVpZWFdVWVpbW1tcXFpZ
-XF9eYGVnaGxub3B0dXRycW1sbWpoZWJgYF5eW1laWFVWWVdUVldbW1xcWFZXVVBR
-UE9PUlJXWFlaWFhZVVVTUE9NTEpJSEpGSUtJSUtMSkhGRkhISklISElLSk1OTElJ
-SkpKTU1OS0pLTU5LTE9OTU5OTk5KSktLTk1OTkpNS0tLS1FTebDG0Nfb4OPm5+jp
-6UZHSEZJSU1IRkVDQkREQ0NHRUNFQ0tJREVGRkNDREJAQUE+OTg4Ojg8OTk3Ozs6
-Njo6Nzg4Ozs6PT08PDs8PTg8Pjw8PDo9PDw9Pjs+Ojk2ODo6ODczNzo6ODg1PTg4
-ODs3Njc3ODg7PTw6OTc6Ozo5Oj1AP0JEQkFBQkJBREA9PT09Ojo4PDo4OTU2Oj1C
-REVFRklHSEdDREZHRkZISElFRkhJR0ZJSkpJS0lMTElGRkdJR0lISUlGSElIRUVE
-R0dLSklKS0pNS0lIRkNHR0dER0VGR0dGSElHSEhJSktLTE9RT0xKTEtIS0hGREVD
-Q0VJRkRCQj47PEBDRkpJTUxOTk1QT01KRkVFRklNT1BPS0Q/Ojc3NjY2Nzo7QD9D
-REVFSExNTUpHRUE/Ozs6PT8/QERGRElISEdJR0lJRUpNRUVGTUtPUU5PUk5PSkpH
-SEdKRkdISERGQ0JBP0FCQ0dJRkdJSktLSEtPTElIRkVHSEdFR0lISEZEREVGRURD
-RUJDQkNDQkBAQkRDQkRDREZFRUdCQUNDRUVERUdEQkRFQ0RERkZHR0VFQkU/QUZG
-SkhCQT9CQkJAQkFFP0FDQkFDQT9CQEFDREFCQkNBQkE9QUI+P0lLT1ZaXFtcXl5d
-XlxcXl5cXF9fXVlWV1RVV1ZUVVRSUVFSUlJQTk1OT05NTExMT0tHR0ZJR0dJRURH
-S0tOSEtOS0hIRk1KREZERkZGRElHQ0RDQUJERkJAQEI+QEJBQkJBQEFAP0NAREVA
-QT5CQ0JBPz5CQD08Ojs+P0JBQD5DPTw5Oj4/RERFREI/QUFISkpOUlRUV1xiZWlq
-cXJ3en1/foCAgoOBgH9+gIGChIOBg4GAgH9/gYGCg4aGhYOCgoCAgH9+fXx7fHZ1
-eXp5eHh5dXRzdHJxc3BtbWtqamlkaWZjYF1aXV5eYF5cXlxeX19fYGJjZmRiYmNm
-aW5xaWZpZ2dmZWJgXFhXVkxNTEdFRUZJRERGRkdHRkdHR0ZGRkZGSEZDRUhISEhF
-RUdDQkBBQ0VFRkRDQURBQ0ZGRUZIR0VDQ0JBQkFAQ0RDQkFDRENDRkhIREZFRkhJ
-SElIR0lLSUhLSEZGSElKSkdJS0dFR0hKSEZGRkhLRUZEREZGSUpHSkZGRUZHSkhH
-SUpISEhGR0lGR0tIR0VGSkhISkhMSklKTUtLTk1NTUpMTk5PT0tNTUpJSkZGRkdJ
-SktLSkhHSUhHRkhJSUpJSUpNS0xLSElKSkxKSktJR0tKSk1MSUpKSkpLSkxNT1BR
-UVBQUFVST1FQU1NTVFRVUVRVV1tcYGBeW1hWU01IQj89QUJDRktMTlBRUlNTUlZU
-WF1fXV5cWVdXWVZVU1RUU1RVVVVXV1laW15gYWJlaWttcW9ubm9sb2plZFxdWVlX
-VldWVVJQUE1RTE5OUFJUVVZRTk9RUE9RUE9QUlBSVFhWVFVSVFRQTEpNSkZIR0dG
-R0hFRkdKR0dFRkdISEtLSEdISU1RT01KSk5LUU9OTU1OT01PTk9MS09MTElHS01L
-SUlLUExQSk1KTFR1scXQ19zh4uXn5+roSUxMTEdESkpGRENBQENCQ0VFRURGRkdI
-RUhGSkRDQ0RBPDw9PD1BPTg4PTw7OTo/PDw6Nzo7Ojo4Oj06Ojs6PTk7PD8+Pjs4
-Ojs7PTw9PTs5PTs6OTk6PDo7OTg5Ojo7OzY2NDc3OTY3ODY4Ozk7PT49PD0/PUJE
-QUBCQEFCPj46ODg5OTc5Ojw7OTw+P0FCQ0VHSEtHRUREQkJFSklISUpJS0pLSklO
-TE5NSk1QTkdFSEhKS0hGR0dGR0lHSUlJSkpLS0pKSUpISEpHQENGRkZGSEhFR0hI
-S0lLSElNT09QUE5PTU1JSkhGRkVERUFAREJDRkU/PT09QEhKSUtOT1BRT1FQTk5H
-R0hFR0xNT09IREE7PTo7ODo7PEFCREdKSU1MTUtLR0VEQDs7PDg7PUBCQ0lGRkZG
-R0dHRUVGSEdHSkxMTEtNUFBNTk9NTk1IRUlJRkVDQ0RFR0NDQkNFRUVHREdHRUhK
-SUlMTEdFSkRGSEdIS0hKSEhFQkRFR0RERERDQENDQUJCRUVCREdIRERERUNEREVH
-RENGRUVDRENCQUJCQ0RGREFGREFBQENFRUM+PUFAQUNFQ0BCQEE9QD9CRENDQUND
-REdCQkRCRUE7QUJASEtPUlVYWltXWVlYWFZUWlpYWVhWVVNSUlVUUlJUUk1PU1NS
-T01LTk1LSklLS0tLSUhGR0tJR0ZHQUVDRElIRkZERUZITEhJQ0RDREVDRENAQD9A
-QUNAQEFAPz48PkRAQEFBP0RBQEBAQUBAQEE+PT9BPz09Pj89PD08Ozw8PT9APTs7
-PkdGRUhHREdFRkdJSk5SWFhZXWFjZmtxeH6AhIaFhoSDgoKDgn+AhIiFhYSHh4eE
-goOEgoWEgoeHiISEgoF/gYB/e3t9eXZ4eXV4d3l2dnZzdXNvb25vaWlqaWhkX15e
-YmBeXVxbXV5aXF5cW1tdYGBhX2BhYGZlZmdpZ2VjZGJiYF5cWFJRTkpIS0lFRElH
-REZKRUVHRUhJSUZDRUNCQ0dIR0ZKSURDQ0JFRENGRkRFQkVFRUdHRERDREVHQ0FC
-QUFAQ0ZDRERDQUREQEFGSEZIRUJGSEhISEhHSEdKSExISElJSUhKSkhGRkZHR0ZG
-R0dFRkRFRUdFR0hHSEdKSkpKSUlISkpJRkZESE1OS0lKSkxNS0hKSkxLSk1PS01M
-Tk1PTk1OTUpLTE1LSkpLSkdFRUVFRkZJSUdHRERFRUZJR0dJSEZHR0hISUxKSUtJ
-SEpISUxMSkpLS0xJSEpKSEpOT05RUlBST1BRUVFRU1VVVVZVUlNUVVZYXV9iYV5b
-WVlRTUhBPz4/Pz4/RElNTU1MTVFPUVJRU1ZXVVVUUVJRUU9PTk1PUlNUVVNQU1JX
-WVtcYWNmaWdpaWZmZWViX11cW1RUVVJQT1BOTkxNS0hHR0hKS09PTU1NS01PTUxO
-Tk5RUVJVV1tdX1hVU05OTElIS0lKTUVHRUlHR0ZITk5HSUlISEdKS05NS0xPTlFM
-S0xNTVFQTVFRTkxPT1BOTUxLSkhISk5NSUpLSUpHSUpUVXS0xtHY3eHj5ujo6epP
-TkxKRkZGRkFGRERFPkFDQ0NEQkZDSUdERUVCQURBQ0I/P0BAQUNCPTg/PDw8Pzg9
-Ozc5ODk3ODg3Ojo5OTw7Ozs7OTs8Ojg6ODg4Ojk6PDs4OTk3Ojs5Ozw6Ojk4Ozo+
-Ozg4Nzk6Pj07Pjo6OkBBQkI9QkBCQ0JEQUBAQD48PDo4OTg5Nzc5Ojk+PUBBQD9B
-RUlKSEZGRURDQkRGR0RESExQTUpJR0hNS09LTE1MSUVGSkZHRkRERUNFRkZNUE5M
-SktMSkxHRkVDQ0FCRkVIRERHRUVHREVKTEtNTEpPTlFRT05NTUxKSEZGR0ZAQUFB
-QkVFQkM9QERHSU5OT1JPUFFRUlFPUkhISUhGRkhMSUpHRUM/Pj88PEBFRkVHSEpM
-Tk9NTE1JSEU/Qjs5Ojo8Pz9BREhHR0ZHQ0ZFRUlFRUZESEpMS0xNTExKSUpMSUdG
-RUdFQkNFQUFDRkNCQkJFREZIR0ZHSUxJR0hJS09LSkpJS0tJREdGRUZHRkhER0dG
-SENERUNGRkJDREJFRUJJR0BBR0ZERUZERUZFQ0E/PEBDRUFESEdFQ0JCREVCRURD
-QUJCP0JAQkdDQkBBQEBAQz9DQ0RBPkJCREJCQUU/QERBQkhHSE1PUlNVWFNQVVBS
-VVNXW1lUU1FQT1BUUE5QT0xNVVRMUFFQS0tNS0pKRURJSEhJSUZKSkpISkRGREFF
-R0pJRURFREZISkhGQ0ZFRkZEQkFAQkE/QEJBQUFBQkA+P0JBQEE/Pj9BPTw/Pj09
-Oz5APTw8Pz0/QUI+PUA5OTw8QUNDREFAQ0dHRkVGRUdJSklKTVBYW1hZY2Vqb3J4
-fYGEhoaEhIaHg4GFiIeIh4KEhoaHiIeGh4eFiIaHhoqJh4aFgoGAfX17e3t5dXR5
-dXZ2d3h6eHVxbm1rbG1qZ2ZoZmVgYmJfXV1ZWlxdXFtaWVZXXFlaWlteYGFjZGRj
-ZGVjYWNkYmNhXltZV1BTTkhFRkdBRkVGREREREVDQ0RGRUVERUY/REZGR0ZHRUhG
-RkZGRENERkVIR0dISkhHRkZDRkZERERCQ0VFRkZGREJDRENGRUVISEZERUNISkhH
-RkdHSEhLTEpJSktIR0VMS0hKSUlDRUdLTElIRUZHR0lGSEhISUpLTktHRkdHSUZI
-TEtJSklIR0lKTEpLSEpMSUxLSk1PTE5NTk5MTE9MTUtMTE5NSkpKSEZHRkhJRERF
-R0dFRElFRkVDRkZGSEVERkdHSEpMTExLS0xKSk5NTU1JSExKSktMTk9UVFBRUlRU
-U1NTVFZVVVhWWFVXVVVWWVlbXF5hY19bVlNOSkVBOzw8Pj9CRENDRUdMS01PTk5O
-TlBQTUxLS0pLSUtMS01QU1NUUFFRU1RRVVddWl5iYGJhYF5fXVhYVVNRTEpMTEpK
-SEdHSkhKSEVHRkVGRUpKSktMT0xLT1FSU1NVVVdaXF5cW1ZST0xJSUlMSU1MSklH
-R0dFR0dKSElIR0ZHSEdNTktLS0tMTE5LS09OTU1PUU1OT05PUU1MS0tPS0lKSUtO
-SktMUUtJSF1WebTG0Nnd3+Pl6Onp6k5JR0hGR0lHSENEREVHQURHQUFDQT1DRENB
-SEdEQkE/PD8+QEJCRD8/PT08QDw7PDk5NjQ7Oz05Ozg3Nzc3OTc5OTg4OTk4PDs6
-Ojo5Ojg7Pjo6Nzo4Ojo8PDo5Nzg6Ozk6OjY2Ozw6OTo6PD89PUBCQkREREZEQkNC
-Pj4+Pzw8PDw8Ozc6Ojo8PDxAQEBAQkdHSE1OR0lFRERDRUdKR0ZITEtMSkpLSUxL
-SktJSUhIRkZFSEtHRERHR0hJTE5TU1JRS0pKSUdGRUVDQ0ZFREhKSUhLSEdGR0lJ
-Tk9OTU9NS0xLTFBOTU1LRUdGQj9CP0dCQ0BAQEJBREhMTlBOTk1SUk9RT0xHRkVE
-QkJEREZISEdHR0FCQkFERERERUtKTlBQUU1PTUhFRD89Ozw7Ozw8P0VBRkpDSEZG
-RUZGRUhHRUhJSElMSklJS0pJSUpHR0lEQEJCQUFAQT0/QENDQkRERkdJR0VGR0dI
-SEdFRklKS0tLSkVHRkZHR0tLSUZHSUhFRURFQ0dFREJERkVBRENHQkBBRUdHRENF
-RUI7PDxAQEJBR0RER0VERUVEREZDQ0A+Q0A/Q0FDQkVEREFAQD9AQT49PT8+QUBA
-QUE9Oz9DQ0BDREdITE1QUVBSUU1OV1JPT05RVFJQUU1PTlFRTkxOUlBOTEhISEpM
-S01OS0xJRUdLSEhMTEhLSUhISklHRkpHSExIRkVERkVJRUdEREZHRkRCQERFQUA/
-QkZFQUBBREBAQT89PTw9PDo7QD88PkFAPkJBPz8+PkBCPkBCOjo6PEBAQUFCQkE+
-QUFCRUZFR0hKTU1RV1lcX2Bmam1zdXl8f4OGh4yNjYuMiYuKiYmIkIiHhomJi42K
-iIuLiYqIhomGiIWFgoB9gH15d3p2dHZ2c3d2dXd2cXBvbGtpaWpnZGVmaGVgYl9f
-XFtaV1daW1ZUV1hYWllZWVtgX2JiYWFgZGNkZGRiYV5bWVVVTktJRkpHRUNBREdE
-Q0JCQ0NDQ0JCQ0VERkxER0VFQ0NFQ0RFR0lEQkZHREZGSEZDRkVGSEdEQ0RHSExI
-R0dDQj8+Q0FCQkRER0VHR0dJSEhIRkdIR0ZHRUZJSUdHRkZISklJR0hIR0lFRkhJ
-SUVDQkdGR0VHRkhJSUpJSEVHR0lJR0dIR0hJS0pJSUpNTUxOSkxMS0pJSkpNTk1O
-Tk5OTk1OTUpKTExMR0ZGSUxHRkdISEdGR0RGRkdIR0pESEpHSUdCRkdHR0dHSkxN
-TE1NSUpKSUtOS05PUU9OU1JQVFVVVVZVWFdZWllYWFhWW1hbWFpbX15dXmJiYF5a
-VlJOS0RBQD5CQUNDQUFDRkZIRUdJR0dJSUpJR0RKSUhGR0pMTExPT09NU1FOTk9T
-UlNVV1hcX2BfWFRWV1JNS0dHRkdGRkZDREJEQkRDQ0NEREZHSUtLTEtLTlJTVVZV
-WVdcX2NiXmBfV09MTEhHSUpETUxJR0dJSkZIR0dGRkZKS0ZKSktMTUZLTUtNS0pM
-T1FSUkxNTU5PT05OTUtKS0pLTE1MSUhLS0hKTEtJUlmCt8fQ2Nzg4+Xo6OrpSkxI
-S0dGSEtFQ0VGRUlGRD9EQUE8PUNEQkNFRURDQUFAQT88O0VCQj86Njg5ODs4Ozs/
-OTk7ODg4Nzg4Njo8ODc7PDs8OTc8Ojo4ODo4Nzk7ODk7Ozo8PDc7PDo5OTo5ODs6
-Ozc3Ozs5OTZBQEA+Pj9BQEE/QUFBREI9Ozw5Ojo5OTg4Nzk6Ojw9QDxAQ0VIR0lI
-SUlIR0NCQ0ZJRkdJTEhGR0dGR0tPTElJR0hLSUxJR0dGREZHSUdKSUxQUlNUU09N
-TElGSEVERUZGRUVFREZISElNTUtJSEtMTk5OUU9MS0tLTExNSklJRURDRD88PUFB
-PT0+QkZLT1BPTk9OTVFOUE5MSEVBQ0JDQUJDRkhKSUZFRUZERkNFRUVJTE9RU09O
-TkxHRENAPTs6Oj9AQUNFRkRFQ0dJSUlER0VGSEpJR0lISEpLSUZHSUtNSEhJSEVB
-P0FAP0BAPz1AQUBAQUBCSEdGR0RBRUdIR0dISUtKSUlJRkVFR0pJSEtKR0dIR0tH
-SEdHRkVHSElDQElFRUJCQkJDQ0RAP0JCQUJBQUJBQEVDQkNDREE+Pj9BQ0NCQkND
-QkJBQkNFRUdCQ0BBPT48QEI+PD5AQEE/PTxAQD4/QUJER0tMTFBTVFBPT0lLT09M
-TExRU1NTUE5NSkxLSk5VUk9OTEhKSk1MTExMTUdISEpJRkdJSEtJR0RFRkREREdJ
-SUpNSERFRUdIRURBQ0REQUVERENAQ0NFR0VFQD09Pj8/Pzs7QEBCP0NAQkRBQkNA
-PUA/PD5BPz08PERAPD09O0BCQEFCQ0A/Oz9FSElJSklKTFJXW11iaGdrcHN5e4CB
-hIaLiouNjY+NjYyKjoqNjouJjIqKjIqKi4uNjIuIio2IiISDf3+AfHt9eHp4dXR0
-cnNycW5wcGxtaWhlZGRhX15eXl1cW1tZVllZWFZWVVFTUlNRVFRVVldbYF1eYWNh
-YmNjY2JeWllYVFBNTExKSUlIR0dGR0VHRkRDRENCQkJCREhDR0dERENBQ0FCRENG
-R0ZEQ0NHRERGRkVER0ZHREJERkRARERDQkVDRERCREJDQ0ZEREVGR0ZGRUZISkZF
-RkhNSkVHRUdFRkNHSEVFRklKR0dFR0dGR0ZFSElERUZHRUdJRktJS0hMSkdERkVE
-QkZISktJSUtISEhJR0tMTEtMSlBQT0xLTk5NTU5NS01LTEpJSElJSUhGSEpKSkhJ
-SEZFRkVKSERDRkZFRkhIRUVHSkpLSktMTU9OTU1LS05RTVJUVVZWVVVSU1ZXWVpZ
-WFxcXV1YW1tfX15dXl9gZGRkYWBiX19aWFZRTUlGQkFCQD9BRkVIQ0ZFRUFCQkJF
-RUVDRURGR0hFRklJR0hJSU1LSkhHS05MTlBRUVBSUVFQUE9LSUtJS0dHQz9DQUND
-QkFAPkFERkhFREVLS0tNT1NWWlteX2FhZWNjZmdnZWFaUE9NS0dISktLTEpNTElK
-TEhKSEhHRkdJS0lJR0dJSkhJS01MTUxLT05OSkhKTE5LTExNTUtLSElJSEdMR0lL
-TUtLSkhUVYW5x8/Y3ODj5ufo6epMSUpGR0lIRUREQj9AQD9CQ0RBQj9APj5BPkJC
-QkRDRUJAQkI/QEI9Ojs6Ozo9PDs+PT5APDs5Njg6OTk6ODU3Njo4Nzk6Ojg7Pjo2
-Njg5PDo5OkA8Ojk8PTk9ODk3OTk5Ozs9PDo6Ozw7QEBCQkNDQEE/P0FCPkM/QEdA
-PEJAPDs7Oz05Ojg7Pj06PEFDQ0ZGR0hIS0tHQkFCQURHRklKSEhGRUhKSkhMS0pI
-SUpKR0dER0JDREVHRklRUFFRVFFQTEtIS05HQ0hDRERGQkRHR0dKSkpIS0tOTEtN
-T01OTU5NSkpLSklJRkdGQUNEQ0E9Pz8+QEFESExOUE5KSk5NTU9NTExJQz9ARENH
-Q0hGSklIRkRFRUVGRkdJSUhJTlBSUUxNSkdFPjs6Oz1AQkVGRURGSEhIRklJSEZG
-SUlLTE5ISUhGR0dJSkZKSklIRUlIQ0FEQENEQUA/Oz9CREA/P0JJQkVKRkRHS0dD
-R0pKSElHRUZERUdGRUhISUlLSElLS0pHSERGRkhFR0dDQ0JEQUNFRUVEREFDQkJA
-QUhEREJBQ0NFQ0JAQ0FAQkNDQUI/QEJBQkZFQ0ZERkVDPz48PT1BQ0NAOzxBQT89
-PkFAPT8+P0RJSkxMTk5OTk5MTEtLSUtMTVFQT09SUU9MS0pKTk9QTktLTE1NSU1K
-TEpLS0tJR0dGRUVJRkhGRUZFQ0RCREZHSklIRkhHQ0NCQ0RAQUJAQURBQ0FEQkNF
-RUNAPz08PUA6PDw/P0BDS0VEQ0JBQEA/PTs+Ozo8Ozw7Ozs9PkBCQUI7Pj5CRENF
-RERKSkpMSktMU1VYX2JlanJydXt+gYWMj4+Ojo2MjJCOjY2NjI6RjY+NjI6Mjo6N
-i4yNi4iIiYaIhIJ/fHl4eXl1dXhzdHV0cnFubm1va2ZlZGNgX15cWllaWllbWFVW
-VVJSUlBMTE1NS0pMUVNTVlpZWmBgYWFkZGFhX11XVlRRT0xMS0pKSkxLS0hERkZF
-RkdFRURGRkhGRkNERkVDQkRGRUZBQT9DRkNEQ0NER0VIRUZFR0ZERUZEQ0NDRkVF
-RUVFRURERURER0VBRUZHR0ZHQ0ZJRkZGR0hJSEZHSEVFR0dISkdGRkhISEZFR0ZE
-REVFSEhJSUhGRUVISUxLR0dGR0hLSkdERkZISkdJSkhHSEdJR0pMTEtPUFBQUlBQ
-TUlKS01OTE1KSEdIR0dIR0hKR0hGRkdERUVGSEZIR0RFRUdLS0dGSEhNS01NTVNV
-TU9PUk9PT1RUU1VUVlhZWFVVVVdZXFpaWl1cX1tYXF5cX2JmZWRnaWtra2dmY2Jj
-X1pYU1BLSkVGRENDREVFQkE/Qj9AQUBDR0hFQ0BCSEdERERFRUNFRkZGSUZFSEhI
-SUlHRkZJSEdIREZDQkJFRUJBPkBCQkRDQT48P0A9QkRFR0xOU1pbYmVoampmZmlq
-bGpqa2toZVtUTUtLSklMR0dJSklISkhKS0lJTEdJSUdJTE1LR0hJSk1NT1BSUExK
-TExMTExPTktKS09OS0pNSk9PTkxLSU1ITEpLTlpakLnH0Nfc4OPk5ujq6UVJTElL
-VEdGRkVGQkBBREdFQ0BCQEFDRkVEQUE/P0NCRUFDRENBPz8+Ozg3Ojc6Ozw7PDw5
-Ozk3NDk6ODc3OTc3Nzg3OTg9PT05Ojg3Ojo6PDg5OkFAOzs5NjY4Nzg3Njg5Njk7
-PDk6PD0/PkFCQD8+P0FDQT88Pz45PDw4NjY4Nz05OTo5PD0+QUE9QUJCRkdHRkdH
-SkdFQkVCQ0RDSEdGRkRERUpKTUlKTVBQU0xLRklDQENCR0hLS0pMTUxNSElLSUVH
-RkREQkNGRENFSUlJSUpISUxLTU5SUFBQTExOS0xLS0xKSEhJRkVCRUREQ0A+QT9E
-REVJS05QT0dGR0dJTExMSEdDQEI/PkFCRUhKSUhGRkZISEdGR0lKS0xMT1BNSkhH
-RUE/PDw4PD5CRUZJSklJSk1HSUlIR0dIS0xLTEpGRkhJSklKTE1JRUdFRElIR0RB
-Q0U/QERDQUJBP0FDQkRCR0dHRkNEQ0ZFRUxIRUZIRkdGQ0VLSUZGRkhHSUlKSUlF
-R0NCQkNDQ0I/QT5ERURCQUNDQkJCRENEQ0ZEQkNCP0BDRD8/RENDREQ/QEJCQ0JB
-REJCQkNBQ0NAPz07PD8/QEBBP0BAPj48Ojs5Ozk/QUJGS0tKTE9OSklLSkpNS05K
-SEtKTEtISExMTUtISkRHR0dHSUtLTElLR0lIR0lHRkZDREdGSEVJRkhIRUJBREVI
-SEdERkRHREBAQkE9PkFCPD5APkBAQEBAPUA+PTw+Oz1BPz5AQEJAPj1APUNBQ0Q/
-PkA9Pj07Ozw9PTo6Ojw/Pz49PjtARUdCRUlOTEpOUVNSU1xfZ2hucXJ5f4GDh4qQ
-j5CQjo2PkZCOkI6PkY2OjY2OkZCPjo2Pjo+MjoyJiIWEgIB8d3l2dnRwcHJwcHFv
-bWxra2poZGFeYV9gW1lVV1ZWV1VVVVdRS01NTkxOR0hGSUpLT1JVWF5eX2JhYmJg
-XlxbWFhXUUxMS0lJRkZISkxISEVEREZHRUZFR0dFREZFRkZEQkJBQ0VIR0JAQkZE
-QkRCQT8/RERDRUZGSEVGSEhGSUdGRkZHRkVFRUVERUZIRURBQkZHRENGR0ZJRkRD
-RkhISkZHRkZHRERFRkdFRUZGR0ZERktHRkZHSUdGSUpJSktLSEhHR0RGRUdHR0VH
-SUpISUlKSkhHR0lIS0xMTEpQUE9NUVJTT01NS0lLSklISEhGSEdIR0VESEdFRUlF
-RkhGR0lGRkhJRUdJTU1MTk5MTU1PUFFQUlRUVlNUVFRUVFdXV1lXWFhWV1pZXV9b
-WVtdX1paXFxdX2NmZ2lpa2xsbWplZ2hkYmBeXFhVUUtIRUNFREJCQj4+Qz5APkA9
-QkNDRUJCQ0RERERBQEFAQUJDRUNESEZDQkI/QUNAQkBDQUBBREVBQUJCPUBAPT8+
-QEBCQkRESEhNT1ZaY2lscXRzcXByb3Fybm1ta2pmWFJOS01MSUxKSUhISEhISUlI
-SUlKSElKSUxIR0hMTElISktMT05OTEpKSk5MS0tMSUxNTExNTU1NTUxKS0pKRkRX
-SUpMT2OWusfQ1tvg4+Xn6OnqSklFRElIRkZIR0REQ0FHQ0BEQENHR0NFQkREPUBA
-P0VHQkA+QT5AQEJAPDg3ODk9Ozk4OTo5Oz05Ozg4NzQ3ODs2NTg3Ojo7Ojw9Pjs4
-OTo5Ozw8PD87OTc4OTo7OTc4ODo4ODo7PTo5Oz4/P0FEQj9AP0NBPj06OTo9OTY5
-Pzs7QDk6Oz5BQkBAPD8+PEFFRkVER0ZGRURHRkZDRUJDRkdHRkVGR0hISExNT09M
-SUZFRkVFREZHSUhKS0tNTkxNSkVDR0VGR0ZFR0dJSURGSkhJSElLS0xMT1JPUExM
-TExJTExRTk1KSElFRkVGRENFQ0JAQkNER0lLTEtLR0ZHR0dIR0hGQ0VDRT49QEVD
-RURHRUVFRkdHSEZLTE1NT01MS0pIRkRCQD07Oj1AQEBDRUhJSUlHSk9JR0pLR0dH
-SklHSEhISUdJSUlHR0VGQ0ZFQ0ZHQkNBRElMRkRER0dCQkRERUtHRkNFR0RGRURE
-Q0ZESEFBRUVGSEJFRklIR0dJRUVIRkhEQUJDRERFQUJBQEFDQEE/QkNGREZGR0dD
-QUNGQkBCQEJBPz1AREZCREVAP0FCQ0RDQUBDQEBAPkBCPjw/QENAQEA+QD0+QDw7
-Ozo6QD9BRkhJSEpNTUxGRkZHSUlMSklJSktKS0lHSUxMTExPTkVEQ0ZFR0lJTElL
-S0lIRkRGQ0ZDQ0FDRkNFRUZERUJAQ0JCRkRFQ0ZHRkFAQkZCQD9DQkJCPkI+PUA6
-PT08PTw+Pj09PUA8QT88PDo+PT4/Pj9DQ0I+QkA/Qj5ARUFAPjk9Qz49O0NDR0hE
-Q0tQUFFTVVRZWWFkZ2txdHl8gIKDh4yPj5CRkJGPjo6QkY2MjouPkZCPkJCRkI+P
-j5CQjYuIhoSDfnt/eHd0dHFwcG1sa2xtbWxnYmFkYl9dWlhXVVNWVFVTUVBSU1VQ
-S0xKSEdGR0VGSUtRVFVYW19fYmJjY15eWVtXWFBQTUtMSUhISUpHSkhIRkdIR0VG
-RURCRUVEQkBGSkNDRkVFRUhFREZEQEFBQ0RERkRDRUhFRkhKRkVGSEVCRUdERERE
-RUVFRUVGSEZGRERDREZGR0dHR0VFR0dJSEZHSEdGRUVHSUhIR0RBRkVFRkRDRUlH
-SElISEVHR0ZHSElHR0ZJS0hISEtMS0tISElJSElKSUlMSEdLSktLS0tLT05PUE5L
-TkpJSktKSktHR0hIR0dIR0ZGR0pIS0pKSEZHR0tMSklISk1OT01MTk5PUFJRUVNT
-U1NYWFdWU1RXV1hbWlpaXFpZV1dXWllaWVpfXlxeY2RiZmhqaGhncW9sa2lsamho
-aGVkYV5aV1JPT09OSUVERENERUJFQkRDQEBBQkJBPz8/PDk8PDw7P0JBQUFBQUBE
-QkRDQD48QEBBQUJEQkJFQkFDQURBQEFBQ0RKTU1RVVdaX2Rqb3R3e3x5eHl2dndz
-cHFubWZaU09NT0xJRUZJTUxIR0lGSEdKTElGRkdHSUpNTExLSktLSklJTUxMSUtK
-SklJTU1OSktIS09LT09OTFBLSkpFREZJSUtNVH24x8/X3ODj5efn6epNUUxKQkpF
-QkZEQUJARUVJR0JDQ0VGR0ZHRkVGQkBCPURIQ0RDQ0FBPDw8Ojo5OTo8ODw5QTw5
-Qzg3ODg5Ojc3NzU3ODk2Njk7Oz09QT09PT0+ODo8PDk7Ozs5Ozw3Njk6Ojs4Nzk5
-PD09PkFBQUFBQD89Oz4+Pz48PDo6PTk5Ojw6Oz0/PT1CQD88PkRFP0FDQkJDRkdG
-RUhGRkVDSERESEpLSUhISElKSUpKSkhKSURCP0VFREVITEpOTkxHSkpLSEVHR0VF
-SEhISUtLSEZISUhESk5NSktKS01OS0tLR01JTUxNTVJOTkxKSkxHRkVFQUNDR0hN
-S0pNTEpKSUtISUlIRkRAQUFCP0BBRkVDREFAQkZFR0VFSEhLTUxNTlBMSklIREQ9
-ODo9Oj9BQENESEhKSUhITExKSUhGSUdMR0dHRkVKR0dKSkpGSUtJR0dGRkZGRkVH
-SlRNSEdBQUJHREZGRkZHRkVERkVIQ0BESEZMQ0NERUZDRUhHQ0VFRkRCREZGQkBD
-R0VDQEJDQ0RDPz9BREE+P0JDRkREQkJCQkFDQkNDQUBAPT9BRUE/QEJBPkJDQ0FC
-QUA/Oz1APz5BQD5DQ0RBQj8+PT09Pjw5Ozw8P0RHRkpJTE5MTElFRUZERUhISUhI
-SkpISkxNSUlJR09PSkdFR0ZCRUVGSUhJSERDQERDQ0BFR0RDRURFQz1BP0NBR0NF
-Q0FCREdGSENBQD9AQD89QENIRUE+PT8/Oz4+QDw8Ojk5Ojw8Pj0+PkBCRkNAQD4+
-PzxAQkFAOj09Qj0+PEBAQEJAREJFRkhISU1QUlNWWFlcYGRqbXB4enyAgoSGjIyP
-j5CQjpCQkY2LjI2NkZGTkpOSkY+PkZGRjo6KiIeGh4N/e3x6dnR0cnBtamloaGll
-ZmNgXFtaWFhWVFlWU1BUU09PT09KTU5MSEZGRkdCRkhLS01SV1paYmJjY2JhYFtb
-WVRRT0tMSklISEhHSElFRkhLTElHRkdGQkVFRUNERkVDRURHRUdDQ0VFRURCPUFB
-QT9GRUVEQkFBQ0VEQ0NFRkdFRkVFSEZERkZFSkhGRkRGREZHREZIRkZGRkZFRkZH
-R0pJR0VIRkZGSEVERkhEREVFREVGRkhJS0xJR0hITEZIR0dHSUdISEpJS0tJSktG
-SEhFSEhGSk1OTklLS0xNS0pKTUxMS0xLTk1LSUhIR0dHRUVHR0dISEpHTEpJTEhI
-SElISUtJSkpNTk9MTE9QT09QUlJSVVNUV1daV1VXWFZWWFpZXFdXV1VWUlNTVVZa
-YGJmZGRmaGdpbWtpbWxra2tsbW5ubW5ua2loamlmYl1bV1ZUUEtKRkZEQkFCQD8/
-QEI+QDs7PD09Pz86PD8/PT0/OztAQEJCQkNAQERBQEBCQUNBQkREQUFAQz8/QUNH
-SktTV1tgY2ZnanJ4enp8fXl9fH17eXV1dXVvZ15UTkpNTEpKTEZIR0hISEtHRUdG
-SEZFS0xJSEhJR0hISUhLT0tKSUtMTExKTE1KT01MTkxLTlBLSUtNSklLS0dHR0pM
-TVdUc7TH0Nfb3+Ll6Ojp60lLS0lIRUdFRUVFQ0BCREVHRUFBQkZDREZIQ0NDQUNC
-QkE/QUJAPjg5Ojk9Pj48Ojc5Ojg8PTc8PDk1ODw4ODs6Njk1Ojk3Nzc8Ojk4OTo8
-PTs6ODk7PDk5NkA7OTk5OTc4ODo6Ozo9P0BCRERDPkE9OTs8Pj08PUA5Ojs6Ozk3
-Ozk7PEBDREZGQkNBQUA+PUFEQ0ZHR0lHSklGRERERUdISkpJRkZHSEdGRUhNSUhF
-REVER0REREpHTEtKSkpJR0dHREREREdLSElLSUpLSUlISk1MTU1MS0pHSUhKSEhH
-SUhHSEhKSk1OUEpHSEZHRUdCREVLTk1NS0lKSkpLSkhJRkVCQUFBP0FDQEFCQURD
-RkdJRkVGS0pIR0tNT1FQUExIR0VAQEA8Oj89QT5EQ0RESEpNSUlJSEhGRUdGREZG
-SEpISEpMTEtOSklJSEpGRkJGRkVEQkVGRkVFSEJERENEREFDRERFREVCQ0RAQEJF
-R0dGQ0VERUhHRUdKSUdGRUVFREJBPUJBREFAQEBAQUA/PD9CQj08QENDQkNDQD9A
-QUFBQUBFQ0NDQUJBQUU9P0FCQkJDQEFBQUI8Pz1BQkI9PUNBRUM/QD08PTw7Ozs8
-PEBEQENGRURJTUpMUEpDREZEQ0RDREZIR0VDRUdJSUdHR0pNT01HR0hEQ0VGR0dH
-SUZFRUI/QEZGQ0JDQ0JDQUFCQ0JGR0E+QEI/QkREQT08QUNBQUFBQ0FAQT89PkJA
-Pz05Pj89Ozo6Ojo6O0JCQkBAP0JBQkI9PT5AQT9CQT88REE/PkFAQUNDQkNFSE1O
-TU9QVFVXWF5hZmpwcHZ5fIB/hoWLkI+QkZOTlJeTkpGSj4+TkJOVlpWQj5GPj46N
-jIqGhoKDg35+eXh3cnBqamZlZGNiYmFdXFpWV1lZV1ZSUVRTUVBLTkxMSktHRUZG
-QkdDRUdER0tJTVFVWlxfX2FkYWFeWVRTU1JOTU1MSkZHRUhGRkZFRUZHR0lIR0dI
-REZFRkdHRUVERkRBRUdFQ0VFQkBCQ0JCREJEQ0FEQERCQ0RDREJEREJER0ZHR0RE
-SEVFRkRGRkZGRUREREZGR0dKSERHRUVGR0lJRkZIR0hGR0VFRUNCREVEREZHSUdH
-SUlJRUVJS0pHSEtMR0hHR0lJS01LR0hFRkZJSUtOTU9OSk5NUFBOTEpLTE1MS0tK
-SUpJR0lFR0hGSEhKSUpISElKSUtLSklIRUhJS01MT01MTk1RUFBQUFJTVVRVVVVW
-WVhYVllZWllWWFtZV1VYVFNSUVJSVVpiaGtsbWxyc25vbm1vbm5qamptcHFwcG9x
-bmxubWpmZWViX1xcVVNRS0lHRUNDQkJARDw8QD48OjtBQT1APjs9PT09PDw9Q0NG
-RkRGR0ZGRkZFRUdJR0hERUVDREVITE9TVlteZGlxcnV4eX58fHx9f4CBgIB8eXl4
-eHFqWlBKSUlHRkhISEZGRkdHRUhJR0dKS01HSk1IRkhHSEpLS09LSEtLSElKSkpK
-S0dKS0tLSkpMTU1NTEtMSUlKSEhHSE1OUlR3tMbQ19zg4+bn6OrqTUxOSkhISEhH
-SEtDR0dFRkNIRkVFQkNGQ0lLR0dGQUM+QUA/Pj05PD07OTo7OTc8Pj07PTk7Ozo+
-OTg4ODg8ODU3OTc2NTc4ODg3Nzg2NTg4PDo7Ojk9PTs7Ozg4ODo4ODc4ODk5Ozw+
-PT9BPz48PTs6Ojw9Qzs6OTg1ODc4ODc3PD1BQUFAQEBBQUA+P0FAQEFFR0dJSUpL
-SkdGSElKSEZFRUZFSEZHQ0RGRkRFRUVERUVGR0ZFSEtISUZJRkZFQ0ZDQ0FDRUhJ
-TUtPUExKSUpLUE5NTk1KRkZIR0pIR0lHR0VLS0dKSEdJSUhGRENEQUFERkhOS0pK
-SkhGR0hHSUVHQkFBQT9CQ0NEQkNFQ0JESURER0lJSktNS01OUU5PS0lFRUI+Pz47
-OTs6Oz5EQUJCSEdISEhJSUlIRkVFSEpNS0lMTEtLS0tMUElISEdDREJIRURCQ0hI
-RURCP0NCQUNCQUZGR0RCQ0VIREREQEJEQ0dGRkNCRUVFRUhKRkRHRUFAQEFFQkJF
-Q0NBQ0NAQUJCPTo6Oz5CQUBCQEBCQD89PkNEREBBQUFDQEA/QkBAPj1AQUFBP0I/
-Qz0/P0A/QEREQUJCQEJBQTk9PDw8O0BBQEBDRUVFSElJSEVFREZDQ0VFQ0FDQ0ZI
-SEhFR0pGREhLS0xKSkpISEZHRUNJSEdJR0NBQUJEQUNGQkRBPkFBQUNCQURFREVF
-Q0NEQkNARERAQ0ZEREE+Pz48Oj49PDo5Oj4+Pz07PD9CPjw+QEJBQD89PT8/OkFA
-QEFBQkBAQUA+QUI/QDxAQUNEREZGSE5OTlNZXFpbX2Foa3FudHZ6foCFh4mNjo+Q
-kpKRlpOSkZKQkJGQj5GSkpGPkJGPjo2KiIiDfoCDfnh7dXBta2lmYGBhXFpaXFlU
-VlRUVFRQT05PTkxNSUhJSkZHRkZCQz9ARUJCQ0VGS05SVVVaX15hYmNkX1xcVlFS
-UE1MSEdIRURGRkZJR0hGR0RHR0ZISUlIRkREQ0RFSEdCQ0FER0RDQUNERUBDQ0RA
-QEVFREVGQT8/Q0BCQ0NCRERERUdERkZDRUNER0VDRERBREVEREVISklHSUlJRklF
-RUZIS0RGR0ZFSUlDQUFFRUdJSUhISElGRkVHSUhJSklISUhISkxGREdLR0lJSklJ
-R0hHS05NTE5QTVBNTEtNTUpNTUxMSkpJSUlJSUlJSUhGR0lJR0hISUxNSUdKSkhG
-R0tMTkxOTEtKTVBQUE5QUE9TVFZYVFdWVFVWV1hXWFlYWVhXVVNUUlFOUVNaYGht
-cnl6fHl4dXl4dXNyb2xrb29wcXBxcXJxcHFubGhqaGVlZGRkXVhRU05MSkhGRkNC
-QUE/Oj48PTs7PEE+Pz1BQEFDQkVDRUdHR0pKTExNTlBNUE5RT01PTk9XU1NTVldZ
-XWVlbXJ4eXt7enl7foCDg4SEgH59fHt3c2ldVE9LSUdHR0dHSEZCR0dGR0dHSklD
-RENGSEhHSEZFR0dKSUhISUpNS0tOSkpMTUxLTEtLT0lMTEtLTUtJSUdNSEdIR0tT
-VXuzxs/X3ODi5ufo6elIREJHS0hHR0lGRkZFQ0RJRkVFR0NDR0dLRkFCRUFEREFB
-QEFBPj09OTo8PDo8ODw7Qj5AP0FBOjs8Pjg6Ojw4NTY3OTw5Nzc2NTc0NTo6Njk6
-Ozc2Nzg6OTg4OTs4Ozs2Njk5Nzw6Oz5AQEBDQDo7Pj88PDo5Ojc2Nzk6Ojk4OTo8
-QENCQUA/RUZEQ0JEQj5CQ0FGSElKSEhHSkhGRUZFR0hERUZHR0lIRURDQkFAQ0ZD
-REZGREZKSklHRkNERENDQT4/REZKSUlJR0tLSkVKSUtNUVNQTEdERUdISEpMSklK
-SEdGR0hERENFRURCQkJCREZHR0lKS0xJSEdISktJS0dERUJDRUJCQEFDRUFCREVJ
-RkZHSE5ISExNTUxQTEhKSkZEQ0A/Pj87Oj09P0BCQkdFRkVKSklISEhISEVGSEZI
-S0xPTU9LSkZWS0hFQkNERUNBQkNBQEFCQT5BQEJAQUJDRkZDQkJFQkJDR0hDQ0JD
-RkZHSElGRkJFREZJRkJCQUA9PkJCQUBARENCPz89PT9CQTxBQz9APUA/QkREQT0/
-QUFDQ0A/QEI9Pj9DQkA9OTs8P0BAQENBQkA7PD1AQj9AQUJDQEA9Pzs6OTk8PEJA
-QkVERUlJRkVDQUVGREVCQEJAQ0VDQkRGRERDQ0ZFRkdIR0lHRkhIR0VGREdHREBA
-Q0E/OkJAP0NEQkE+P0BDQkBCRkVFRUZDQkNDQ0NDQ0JAQUJBQT89PTs6ODk5OkE/
-Oz4+P0A9PkA/QUJBQ0JDPj08Ojw7PkFCQUI/Qj9CQkFBQkJCQ0NDREdFRkhJTk5S
-UlRYXFthYmdpa3N1eHp7goiLjYyNjpCUkJKRkZGRkJGOjIyOjpGTkZGRjoyLjImH
-h4WDgX57eXVycGdkZF9fWltZWVdUU1JQU09MS0xJSEhHRkdHR0dDQUBBQkBBQkBC
-RERESEhLT1ZWW11fYmZjY2FfYV1aU1FNR0ZJRkVHRUVFQ0dHR0RGS0VBREZIRUZE
-Qj5CQUNCQkNCQj0/QEdIRUNHRkNEQUJDQ0NDRURDQkFBRENCRERDRURFRURHRkZD
-RktFRkRFRURFQkRGSEVGR0ZISEdHUElGREVFQ0NEQkVIRUVHRkRFSUdEREdISUlI
-R0NGR0lKSElISElMSUlLRkZHSklISEdJSktJSUtOT1BQTU9QTU1PTU9QT05JSkxK
-S0xKT1BLSkdJSEdKSkpMS0xJS0tLT09PTkpIS01NTk1MTlNRUVBRUFFTU1VWV1lY
-WFdZVFZWVlZWWFRUUk1LTE5RVF1mbHR6e3+BgH+Cf4B9fXl1c3Bwb29vcnB0c3Rv
-bnBwb21rbGpqaGlnZ2BdWlZST05LSEZGREZCPz49PkFCQkBDQUI/QUdGR0dKTFFR
-U09QU1ZWV15bWVZXWFhYWV5bWVteXFxjaGtub3d8fn97e3qAhIaDhoJ8gIKBfnx4
-b2NUTUpJRkZKUEdGRkpHS0tIR0pIRkhEREVHSkdJSUlJTktKSUxKTU9NSkxMTE9O
-T09LTEpOTEtISklJSktQS0ZJSklJSk9afLXG0Nfd4ePl5+fp6kpIRkZERUJIQkFC
-RUZHRERGR0ZJS0ZGREhHR0ZEQEFDP0FBQ0ZCQUI9Ojo7Ojw8ODc7Ozo7Ojg8Pz09
-NTk4Nzc5Nzg5ODk4Njo2Nzk3Nzo6NjY4ODk5Ozc4OTc3OT06ODY3ODo9PDw/QUNB
-P0A/PDo6Ozo6Ojc4Nzc6OTs7Ojo6PkE8PTxBQUFAQkRFQ0BCRkNGRUZMSkdKSkhK
-RkZHRERFR0ZGQ0RIRkZIR0VCREA/QEVHR0ZHSEpKSkZFQ0FCQUA/RENDR0lNR0lJ
-R0dFRkVGSU9SVFNNTUdFR0lLTEpOS0pJTU5MR0RCRUdGP0JCQUFFSElISExKTEpM
-SkpLSEhNSEZDRUJBQEJDQERESUxISUlJSktISUdITE5QTU1KSkZFQ0NBQD48PD5A
-Q0JCQ0RHQkZHRERGS0lISUhISEZDRkdKSktLSkhHREdEQ0FDR0VBQUBDQz09PT5A
-QUQ+QURCRUVFRURCQD9CREJCQkVEREJERkZIS0lFR0VFRUVGREFCPj5AQUE/PD8/
-P0FAPUFCQ0RCQ0NCQ0A+PjtAQkJCRUI9QkFCQT9BQkE9PD5CPz88PT1BQD1CPj1A
-Ozw9PT0+QUBAQT9BP0A9Ojk5OT5BPj1BQ0VGREVFQkBAQkFCQ0FBPj9ERUZCP0FD
-QUNAPURFQkdHRkdHRkVGRkRHREhJQj08PTw/Pj4+P0E/QkE+PD0+QUBBQUBEQkE+
-QkJBQD4/QDxAQUBAPzw+Ojo9ODs+QUBBPjw9PkE6OTs9Pj8/QUBDREA8OTw6Pz1B
-QEVAQUFDQ0BFREJCQ0ZEQkNGSUpNUE5PVlhaWl1gZGhtcHN3e36BhYeJiYyPkZOS
-kJCPjo2RkI2PjY6NjY6Mi4uLioqJhoSCgIF+eXhzbm1oZGJgXlpZVlVQTExOTUxM
-SUpJRUNDQkFEQ0M+P0FEQkA8QEBEQ0FERUdJTFFVWF5jY2FjZmZkYV5eXFhUUU1H
-Q0RGSktHRkZJR0ZHR0VERUZEQkNFSEVFQkVERURDQkVFQ0RDQUJFQkNFQ0JERkRC
-Q0RAPT5BQ0NHQ0VIR0ZDQ0NHR0ZERkJERURFRkZGQUVFQkNERkdGRkdGREVIRERH
-R0dBQUJBREZGSEdGSUhIRUZGRkZHRkhHSUZIR0dISUpISUlJS05JSEhGRkdGSkhK
-TVBPTktNTUxQUFJTUFBPTkxMTExMTE9MTVFNTExJSUpLSkpLTE9MSkxLS05PT05N
-S1BOTlBPUU5OT09PT05QUVNWWldYWlpXVVhaWVdXVlRRUExMSkhLTk9VXGpwdnt/
-g4SHioaGiIaCfHl4cm9ubGxwcXNzcnBra250c3FucW1saGlpZ2RiYVxYVlRTUVBO
-S0pHRERGQUFBQkNDQkRESEtNT1VYWlpcX1lcWl5fY2dnZF9hY2FmY11bY2ZfYmho
-bm5ydnt8f4F+fYKDiImIhoKDgoCAf3pzZ1hMSkpLSUZXUElGSEVEREZJR0VGSUhH
-RkdITExLTExNS0tKS0xMT09MS05MSkpNT1BOSklLTExHSUhISElKSUlJSEdKT1eD
-tcbQ2N3h4+Xn6OrqRkpEREhLRkRFRkRISEhLSEdJS0dCRERCQEJEQUFBQUNCQkZE
-QUVAQ0M+PDo5Ojo7PDs6Oj06PT4+PTg2Nzg4Ojg2ODY3ODw9Ozk5Njc6Ojg5Ojk4
-Nzk5OTo6Ozk5ODg5ODg6Oz09QD0/PT9CQTs8PDg7ODY3Njk4ODs9OzpAP0BAPkA+
-Pj9CQUFCQ0RIR0NDQ0JCREdJR0hMSklFR0lGRkNCQ0NDQkJGQ0pHRkVCQkNFREdK
-R0hKTElFSEdEQkFARkFESEhFREhISEdERUdGR0dLT1JVVFBHR0dGSUpKSktMS0tM
-SkhHQkRFREVJREJCQ0ZLTU1QTkxLSEpLS01KRkhJR0dGRkJBQEFGSElKSk9PTk1N
-TElITEtNT05MSkxLSkZGQUE9Ojo9Pj4/QUFDQ0FCRUZGRUhGRExITEtNSURDR0ZI
-S0tJSUhGQ0JFRENDRURFRkFCQkFCQEFCP0FCQUA+PEBCQ0BAPz9CRUhKRkZFSEVH
-REVGRkdFR0ZER0NAP0FAQUM/QEA+Pz9EQkFAQD9FREdDQkJCQT1ART9AQURCQUM9
-QkI+P0JBQEFAP0JCPDo+QT1APz09PD5APT08PDw7OTw/QD0/PDk7Ojk6PDs+PUBC
-RUdFQkVEREJAP0M+PkI+PT0+RUVFQkI/QkJCQ0RDRUZHR0RFQUZHREM/RENFQDs6
-PT89Ojw7QUU+Pjs8OTw+Qj4+QD1BPz4+QEBAPT49Ozs6PkA8PD49PTs/PTxBQz4/
-Qj09Oz8+Pz09PT4+PUBCQENCPzw9PD1BQ0JCQkE+REVBQkNCQkVGRUdNSkxOUFJX
-WF1cX2ZpaGxvcnZ7f4GDhoeIi42LjY+OkpCRkY+Ojo6Oj5CNjYyOioeJi4aDg4F6
-d3VwbmlpaGJhW1tbVlJRTkxNSEhIRUVDRERFQj8/QD9APzs5O0A/Pjs7QUFBQkNG
-Sk1RWFdcYGBiZGVkY2NeWVdVV09MTEpMRUZHQkZIRUdGRUVFR0lJRUJCRkRGREND
-REVEQ0NERERGQ0NDQ0FEREFBQ0JFRkVGRkVBQERGQ0JFREVIRkNERUNFRkdDRkdF
-RkZHRUFCRUpEQkVHSEZHR0ZGSUdHRUVGRUJFQ0REQUFERERFSEdITEhGSUVFR0dL
-SUdFSEpISEdHSklKSElGRkhGSkpGREtQUVFLTlBLSk1NTU5OT1BOUE9PT05NTU5R
-T0tJS09JR0dHSEdLTUtNSkxNTUtLTkxMT1BOTk9RUU5PUFBSUVBTU1VWWFtYWVtZ
-VldZWVZXUlBMSElHRktOT1VeZm95f4WHiouLioyKiIaFf3t1cnJwbGtsb3FxcnVy
-cXBwcnNycGxxcXFtZmZoY2JgX2FcV1ZWVFJQTUpISElOSkpKSk5OUldaXl9lZWJg
-ZGRkZGxtcHNwbmttamdiYGJjZmllaXBwcXFzeHp7fn9/hIaJiYeEhYeHhoGAe3Vq
-XFJLR0lHR0ZKS0RHREdGSEZJR0hITElIRklJSkpKS0pNS0tMUE1MTU1KS0pMTEpN
-S01NSUdISUtKSUhIR0lKTElIR0lRZYSyxs/Y3eDj5efo6ulDRkhNSUtIS0ZDRkVE
-RkpMTUlGR0xIRkZCQ0VDRUQ/QUJIRj88QkJFQ0A7Nzk9OTo4PUI8PT87OTo5OTg3
-NDQ4ODk3Nzk8Ojo4Ojo7Nzo9PTg6Ojs6OTw8Ojo4ODY2OTo7Pj0+QT4/Ozs/Qj8+
-Ojk3ODY2OTg5Nzc3Njc5PD9CPjw/PEdOQUJBQENDQkVHSEVERkVHSEVGR0hOSUdJ
-RkREQ0NFREVCQUJERUZFQ0NCRklLSUtMSElISEVDREVEQT8/QERFR0VDQUZHR0ZL
-S0lJTU9PUE5PT0tKR01OTEpJSUpMTEhHRkRGRUNCQ0VFR0hJR0pOT09PTU5MSUxM
-SUdIR0dHRkZEQ0NGSEpMTUxOT1JQTlBNTk1MS0xMTk1MTEpJRkhFPjw8ODs/P0JA
-QENDQEJCREhHRkhISUtJTExKSklISUxNS0lJR0ZHRkRERUJERUNDPTxAPTs/Q0JA
-Pj9BQEA9Pz0+Pj8+PT1BQ0dHRENGR0dJRENCRERBRURDQUFDPT4+QUFBSkRBQEBB
-QUJDRUNFQ0JAQUFDRUJBQD1AQEJDR0VCQTw8QENCQUJBPz9APz1CQkA9Pz5AQUA+
-Pjw8PDo7PDs9PDw8PTw6Ozg5PEBAQkNERURHREFDQUJEQz9BQz09PkFARENDQ0NE
-RUVBREFDRUZFRENDQUFDQ0E+QUI/Ozw9Pz89PD49QkNFQ0A8PD1AOz88Pj5AQEA+
-QUA/Pjo8Pj08Ozs6OTw8QUE9Pz09PDs7PDs9Ojs6Oz0/QD4+PkBARUZAPTw/PT5A
-QkNAPEBEQkNEPkJAQUZJSktMTE1UVlVXW1xcYGZpcHBzd3d7gIGHiY6PjI2OkJOT
-kY6Pjo2PkpCMjY6RjoqHhYOCgoB/fHh0bW1pZmRfX1xZV1VQTUhFSUZFRERBP0JB
-PzxBQEBBQDw6OTg5P0A7PD1FP0BCR0tLT1NZXWJjZGZoZmVmYmBbW1lTUU1GSElI
-R0dKR0lFRUVIRkZHSEhHR0VBQkNCREdGQkNDRERARURERkNCREJHSURCREZERkZH
-R0RGQ0JERURFRkZFQkRDQkNDRURFRkdHR0VDRUVEREdHR0hKSEdFR0ZGRkhJSEJE
-REZGREFBQERDQkZFRkdHRkVDR0dHS0tIRkNHSkpEQkVJSEdFRURCQ0VJR0tNS01O
-T1BMTlBOTE1JTE9PTlBPTE9PUk5MTUxLSktKTEtMR0hIS0tLTExMSUxOT01NTExP
-T09QUFFRUlNSUFNUU1hYWFtZV1xZWFpYVlhVU05PTUlHRERDR0xPVVxfanF6fIGG
-hoWCh4yJiISBfnl0b25vbmxubW9ub3NzcnFucW9ub25rbmtsbW1tamlqZmRkYl9f
-XltZVVVWVFBTV1VWWFpeXmFkam9uaWptZ2ZqbXB1dnh2cXFxbm1pZ2Zqb3Fvb3N0
-cnN0d3mAg4OGiIiIhoSGhoqHg4J9eG1cUElIRUVGRUZGR0RGREVAR0hISkhHSExL
-Sk1KS0pJTU5PTU1LTkxMS0tKTEtNTE1ISEpIRkZKSkhLSkdMSEhKSEdKVlVTfrjH
-0Njd4OLl6Ojq6k5DSEZKTEZHRExISERFR0dGRUZFRkVIR0ZCRkRGRURCQUJDQ0FB
-QkFDQ0JBPTs6ODc7Ozk5PD08OTg8PDk2Ojs7NjY0ODk4PTo7Oz05Pz07PD07Oz45
-PDw5OTo6Oz05OkA/P0JCQUI+Pj09PD46Pjw2ODg2Njk5ODg5ODs/QEA+P0BAPj49
-P0E9PUFCQ0RFRkhGRkdFRERFSElIRUJCQkFDRUVERERBQUNCQEFBQUFFSktKSUpL
-SUdDREREQkNCQ0RCRENDRERGRURFSUtOT0xOTk1OT05MS0lNT1BNTUtKSUpISUdF
-REZDRUJDQERITE1KS0xMTkpNTk5NS0pISUdJR0VFRkdFQ0dGSUpMUE9RUE5OTU1N
-TUxOTUtNTEpLSElIR0ZAPzs5Pj49QkVFREJCQkdGRkdGRkZJSkpIR0pKSEpNTEtO
-S0tJR0VIRkZIRERER0NBPkBBPz1AQEREQkA+QERAQD05PEBAQUBEQ0ZFRkpHREND
-QT9CREREREZERENDQkFAP0RFQ0JDQD4/QkFBRURFRkRAQUNBQkNAQ0RGRUhGRENA
-RkdAQUNBQD0+QUNDPkBDQT8/QD9AQD47Ozo6Ojw7Ojs5OTpAPDk6OTs9PT9BQEBA
-QUhDRkVEQkFAQj08PkBAP0A/QEJGRUVFREJFREA9QkNDQkJBQT9BQT8+PT5DPjxF
-PD07Pjw/QUA9PT47O0I/PkI+QDw/QT47PT9AP0A/PDw6Ozw9OTs/PDw8O0A9Ozk6
-OTw7ODw5PDw+Pz5AP0FBQURBQUBCQkVGQkJBQEA+QkJDQUNDRkdJSklJSk5RVFda
-WlxfY2dqcXJ1enh8f4OGiY2Lj5GTkZKRj42NkJCPj42MioeGhoSDgHx7d3VvbWxo
-ZF9fWllXV1ZRTktJRURFREQ+QkNCQD9BQz8+Ozs8Ozs6Ojg4Ojw+Pj1CREZLUFNT
-VFtrZWJnamhpZ2JeW1peYWBdWFJLS01ISUlFRUVFR0dIRkZCQ0RCQ0BDRUdIRkRE
-QkNERUlHRkZFQkJBQ0VGRkVGRURCQkNGRkNEQURAQ0JCQUFDQ0NBQUFERENERURE
-Q0RGRERGRklER0hHSEpJSEdGRURGRkFDQ0NEQ0RFRUhIR0pMRUVEQkZGREZHSEhJ
-SUlFRUdFRUVFRURFRUVEQ0VHSUhJTU1OTE1KSEpOTkxNT05QUFFRUExNTU5MSktM
-SkxMTU5MS0pJTU5OTEtNTU1MTE5OTE5QUFNSU1JRUFNVUVNVVVZXWVtcXVxbW1hX
-WFdUT0tJRENCREpITFFVV19ma253eHh9gICCg4OBgIF+eXZyb25ubGprbm1tbm9v
-b29ub29vbGtta2hrb2xubW5raGdmZ2doY2RgXmBcW1tcW1pbX2NlaGtvdHd1bWps
-cG9ucnV5enl2dHV0cW5xb3FycG5vb3d7eXZ4fYGFiYqJiYqHh4iKioiJhIN7cGJT
-TE5MSEhGRUdGRklIR0VFR0tIRUxNS0lIR0hISk1NS01LSkxKTExMS0tKTExOT0xL
-SUpJSEtLSktLSUpLSEZGSFNdS1GJucbQ19zi4+bn6OnqTkdARUFFQ0VEREVFSUhG
-Q0JDQkhER0hIR0hEQD9BQUJDRUNCQD1BQkE/Pj05Ozo6PD85Ojc6PT88PDk6OTg8
-ODQ1NDQ3Ojk6Ojk6OTk5ODc3Ojs5Ojk9OTs8Oz08PDo5PEA/PkBCQkFAOzw7Ojo8
-PTk4NzU0NTY5ODw7PD4+PkFBPUBBQUE/Pz8+QENFQ0RGR0dHR0VCQ0RFRUVERUFC
-QUNDQUREQkFAQEFCQEBCSEZJSUpITEtIREFAP0NERUVFQ0VGRURESElJQ0dGRkxO
-TExKSUxPTU9MTEpNTk5MTUtJRkdGRUNGRkQ/QEBGQ0NIS01MUEtKS0tMTk1MSkhI
-SEdJR0dHRERFREZJSkxPUU1QT09OTkxQUE5OTE1JSklHR0RBQT87PDs9PD1BQUJA
-QURDQ0VEQ1RZRUNFSUlJSk1NSUlLS0hJSElGREJFRUZIQ0RFR0ZDQ0FCPz4+QUFA
-PTs+QEBBQTo7QEFAQEI/QEFFRENDQD5BQkJCQ0BBRENERUdDQ0NDQkFCQkJAPj1A
-RENBQT5BQUA+QT0/Q0ZJRUVCREZGQ0JDQj9AQEE/Oz9BPT4/P0FCQUI/PDo6OTs6
-Ojs8OTo8OTs6Ojo5OTo4OD07REA8Pj1AQ0hHQ0VGQUBDQD0+PUBAPj8+PD9ARUVD
-QkNDQ0FBQ0FAQkBAQ0E/PT49QT08PD49PD5CPj49QUNAPj0/Pj5AQEBBQT49PTw5
-Oj1APD08Ojs8Ozs6PTw6Nzo/PkA7Pjw/PTo7Pj5BQj9AQEBAPTxCREM/QUFDQURD
-Q0VFREJBQEZERUVFVE9LTEtNTFFUV1ZcWlxgaGpsbnNzdHh9foKEiYuPkpOSkZKR
-kI6MjY6KiIWHg4GCfXt4eXVva2poY2JfXFtYVFJRUE5LSElGRkJAQUA+QT0/Pj09
-Pzw+Ojg6Ojw/Pzs8QEU+PUJFSU1RUlVbW15fYmdmaWhoZGJhZGx1d3JpYlhST05L
-RkpJSkpIRkhIR0VMSUVDRUVCQkhGRENCREZGREZJSkZDRUpHRUREQUJBQT9BQkVG
-REVERERCQkFBQkFFREREREJCQUJBQ0JER0dDQ0VGQ0NFREdHRUZFR0RDSEdGQ0JD
-RkZERURGSEZHSElHR0VJSUhHSEdFRklHRUVGSUpHRkdGRkhFQkVHRklNS0pGSktQ
-TkxNS0pLTExOTk1TU1JRUUlJSEtNS0pKS0xNTU1LTE1MT0xNTU5QTUtPUE1OUVJS
-UVpWUU9QUlRTU1VWV1hYWltcXVlWWVlYU1BQTEZCPj5BRkhMUVVZXGVpbnF1end7
-fX17e3x+fnt4d3Rxa2xqaGxsamlrbW5scHFua2traWpraGZqb29wb29tbm1sbWto
-Z2ZkZWZjYWNkZmZqa2ttcXFxc3Z0b2xwcHZ8fHp7fHt6dnZzcXFvbXFxcnB3eHl+
-gHp+g4eJiYmLioiLiIyOjIuKiIF2ZVdQSkxKSkhHSUhHSEhHSUhHRkpKTUpHSEhI
-SEhHR0pNT09LSUxJSkxKTEtLTE1OUExJSk1LSUhJSEtMSklJSUlIWE5NV5u6x9DX
-3eDi5ebo6epKT05JSUZIS0VCQEVNTEdFREBBQUFHSUpJQ0FGR0NHRkdGREhDQT49
-PDs7PTw+Ojk9Ojo4Ozo7Ojk4NTk8Ojk4NjQ3Nzg6OTw6Nzg9OTk4Nzc7Ojc4ODs6
-ODo5Ozk5Ojk7QUA+QEBCQEE8OTg4O0E5OjY4OjQzNDc2Oj1AQEA9Rj5APDs9PT5D
-QkBBQUVHSEZIRkZEREJDRkZHQkVGQ0JDQ0BERkRDREFCQkNDRkdIRUdIR0dJSkdD
-QT1AQURBQUFGSkdHRkZISEpPTExJR0dGREhNTk5MSUpJTk5OUE1OTUtMSEZERUVE
-QT0/QENHRUhMTU5LTklJSUpKTExMSEdFREdIRURERUJGRklJS0tOT09MS0tKTFBP
-UUxPTktHR0ZDQkFDPDs9Pjo9PUFCQURFRUNCQ0JDTUtIRURDRUhJSUpGR0hJS0lJ
-S0hGR0lGRUhMRkdEQkVGRkJCPz48QD1EQUA8PkJCQD8+QD5DQz86PEFDQkFCPkFB
-P0RFQkNAQUREQkI+PkJFRkZDQEI+PkNEQUFAQkE+QkA+PUBBREVGREVCQkJEQT49
-QDw7P0BCPD0/RENCQ0NDQT0/Pj09Ozo6Ozk7PDo6Ozo2ODo4Nzg4ODg7PD0/PT9E
-RkREQj4+QEBBQTw8PD8+PDxCQUE9QUBAQ0JDPz1AQD88PD89PUBDP0JEQjs7PkA6
-PD0+PEA+QD86Ozs8Pz48QUBBPUA9OTo6OkJEPjo4Ojg8Ozg3ODk7PDw+QD88QEE8
-PDo7Pz9BQkFDQT49PkBAPkBAP0VGSEZFRENCRkhGR0RCRUZNSkdHSUlNTlBPVFZZ
-W2BiZ2lqbXF0eXx+f4CEh4mMjpGPjZCRjpCNioaDgn59enx4d3RzbWlnaGJcWVZU
-U1NRTE5KS0hEQj8+PD0+QDs/PD48Oz07PDw+PDk6Pz88QEBBQkRDQ0ZKTE5RWFpd
-Yl9kZWdqam14d3V1eX2GhHhoWE9OUVVTUE9QTU5MSEhJS0tKSktISkZFQ0ZIR0ZB
-QkRFREZFRERBRERDREZFQkVCQkRDQ0ZHRUVFQ0JAQUNDQkFAQkRFQkFDQkJDQEJG
-RkJCRkVCSEdHRkVDRkZHRkRGRENDQ0RGREZDR0ZISUtJR0ZIRklISEdHR0dGSEdH
-SEhGSUhGRUZFRERGRkpHSEhNUE9LSkpKSkxKTk9PTk1OUFJRUE9NS05OSklKSUtL
-TU5KTlFQTU5MS05OTVBOTU9QUFBTUlhWU1VYVlNSVVhYVFZWWFhfYFxcWVRVVVZS
-TkpHQkA9Qj5ERkpPVFdbYWdpbnN4d3h4enh2eHl3eHN0c3BrampoaGVnZ2hmZ2xs
-b21paWhmZ2JhZGVrbGpsa29tb2xsbWtpamxoa2xoZ2dpam9wcnV0dHZ3d3RycHF0
-dnZ2d3p5e3p4c3Zyb25tcXFvcnh5eHp7eHd+f3+Fi4mDiomLjZCPkJGLhXxtWk5I
-SUhISEZGRkRCRkdJSEhGSEZJRkVKSUpJSkpJSUhMTEtMTExLS0hIR0dKTE5NTU1I
-SkpIRklKTExQS01KTkhVTk5TlrjG0Nfb4OPl5+jp6khPTEZCREdEQ0VDR0hHRUZG
-Q0RCQTxER0ZFR0dGREhHRkY/QEVGRUA/Oj07ODo4Ozo5PTo6ODQ2OTk5OTg6OTc3
-ODs5Ozs7ODg2NTQ4PDk0OTg8Ojo5OTg8Njc3ODs6PD89QUBAPjo7PT87Ojw8PDs4
-Njc2OTg2ODw+QD89PTw/RDw9PD09PkFDQkFFQkZJRkdHRkVERERHSEtJR0hHRkNE
-RlFFQ0RFQkJBQUZGRURIRkZHR0dIR0NCRUg/P0A+QEFGSElLSkdGRkxOTUtHQkRG
-R0hMTEpIR0lQTUtOUFNOS0hHRUhGRUFAQT4/Q0lHTk1OTExLTUlJS0lHR0dFRENC
-QD9DRkdHSUpJSElHSUxNTkxHSElOTUxLTU1MSklHRURDQUE8PDw9Pj89QD8/QUBD
-QEFFQ0NFRUZJSEdGRURISEhIR0hHSUlJRkdFRklHRkZGRkRDQUNCQT88PD9APUA7
-PkA+PjxAQEFAQEA+QkJBPkA+Pj0+PT9AREREPz49QEA9PT4/P0FAQUFDQEFDQUVD
-QEJCREZBQEFBQENBQUJCRURBQUA9OT0+Ozw5Oz5CP0M+QUBAQEA/Qj4+PTs5Ojo5
-OTo5Ozw9PD06Ojo2OTg4Ojk6OzxBQ0JBQUFAPDw7Ozw8QD47PD09Pz5AOzo9QURD
-QD4+PDk8PT07O0FDQkA8PD1FRUI+Oz9BPDs/PDs9PD05Ojw9Ojs9Pzs8OTo5Oj48
-SUo4Ozs3PDtAOjY6PD08OTo9Pj4+PTs9PDo5QUJCQUFCPjw5QEJCRUBBRENCQ0hH
-RkVFR0ZCQ0NGQ0VHR0dISkxOT1BSVVheX2JiZmptcXV5e3x+f4KGiIuKi4qLjY2M
-h4WDhYSBfHp2cnN1a2toZGJfWllWU1BNTElISkVBRERDQUA+QD5BPz1HP0BCPD0/
-Pz4/Pz09P0BBRERHSEhKSUxOUlRXWF1fYmNoaG5xeoWEiIB/goeGeWdZUVBYYWVh
-WlVRUVFJS0hGS01OTEhHRkdHRUZGRUNBRERCQUBFRkVGREdFQ0VDQkJCQkNFRUVG
-R01DSEVDQ0REQz9AQ0JBQ0M/PkBBQUBCRkVERUZGRUVFREVCQURIR0VHSEVDQEFG
-SEhFRUdHR0lISEhLSElKSEZGR0ZHR0dHR0hJREVDQ0FAQkZJTExJSUxOS0lJTE1L
-SkxMTE9PT09OUVBOTlBPTElLS0pKSElLTExLT09NTE5QUU9QTlFSUFJRUVFRU1RT
-VVhWVFJSVVldWlpZWVlaW15cV1VUU1BMRUNCQT09QENISk9RV1pfZGRrb21xcXB1
-c3Bzc3Z1cXJtbWpmZmRhYmRlZGdna2pnZWVnZmFhYWNjZGZmaWpsbm5rbG1sbW5s
-amtoaGlqZ2dvbm5xcnR1eXt6dXV0cm5ydHd4d3Z2enl4dHV1cG9vb21xd3l5eXp6
-fIGBgIWHiIaIjIyLjJCQkYuGf3NgTEdIRUhJR0VDR0hISEhJSEZGR0RGRklLSUlJ
-SUlISExMSklLS0hMS0lISkxNS0lKSkpJSUlJSkpJSUlJSkpLSkxPTVV+scXO19vf
-4+Xm6OrpVU1RS0tIRUhIRUNEREdHSUhLR0FEQj9CR0lGR0JERkZFRUVBPkdEP0FB
-PkA+PDs6Oj87Ojw9PD09ODk3Ojk5Nzk5OTc5Nzo8Pjg3Ojo5ODg2Nzg6OTg5Oj44
-ODg3Njg7Ozo8Oz06OTk6PT08Ozo5ODc4Nzc4Nzk5PT89Ojs8PDpCOzg8PDw+QUJC
-RENCQ0ZDRkZERkNCRUZHSUZER0dFREVDQkJAQUJCREVIR0lDQ0FEREhISEdHRkRB
-QEJCQkI/Q0VFQ0dKSkpKRkhKR0ZDREZHSUdJSkpJSklIS01PUk5JSElISUhBQEBA
-QT9CSElISUpLS0tLSEpIS0hERkVHRUNDQ0VISk9NTExKRkdHSUpJSktMSEtISUpK
-S0hHSUhFQkI9OTs8PDo8PT0+REM9QkJDQ0NCRUVDQ0RFRUFDREdHSEhFRURFR0RF
-RENHRkdFQkFHSURBREJCQUA8PT89Ozw/PDs8QDtAPD8+Pj4/Q0NCQEBAQEI9PD0+
-QkBDP0A/Pj9BPjxAP0FAQUE9QEE+Pz4/QD5AQj8/QT4/Qj9BQUNEQUA/PUJBPD09
-Pzs6Oz1BQUFAQT9BQD0+Oz09PDk3Ojs7OTlAOT09OTw6Oz05ODk6PD1CQ0JAP0BA
-PT9CPT08PDs7PDw/PD08Ojs8Pj1AQUI+PTw6Ozs8Pj49Pj48Ojk+PT9APz8+PTw8
-PDs9Ozw+PDo8Ojs7Ojs7Ojc3Njg6Nzw8Ozk6Ojo4Njk3Ozw7OTw/PTs+Pzo5OTk+
-Ozk5QkJCQUJAPUE8QENDQT1EQkJEQ0VGRUZERURHR0NFRkRCREhLT1FRUFJVV1pg
-XmFgYmZscnR2fXx9gYSFhoiHhYOGhYWFgoB+fXl4dXBqaWloZWBfX1hVUE1NS0lH
-RkNDQUI+QUJEQkBAQkJBPUE8QEJCQUJCQUNFQkBCQkZIS0tISUxOUlNUVVZaYGJg
-YmlscnuHjZKLgHZwc3h1b2FXXmt3d3BiV1NOT09MTU9NT05LS0dFRkdFRkVFRkJB
-Q0VGQkNBREVEQ0RGSERGRENDQURFRERCREVGRERFQ0NERUVFQ0BAQ0NBQUFBQ0RF
-REJDRkVGREZHRkVJRENERURERkhGQ0FDQkFDQkRHRUhISkdHR0dHQ0NFSEhLS0lH
-RkVJRUNARERERUlKTEtOSklKSExMTUtNTExMTU9PTkxOUE1LS0lLTUtMTEpNSkpL
-TExNT05NUU9MUVFRUU1OUlJPTlFWVFZVVVZXVlVXWlpbWlpaWl1cXFxcWlVSTklB
-PkE+Pz5CRURHTVFWW19iY2dpaWxucG9wbG5vcHBvcGxpZGFlYl1gYGBhY2ZmaWZj
-YmJgX2FlZWVhYWFkZ2dpbW1raGlsamtnaGppa2ttbXBtbW9zdHd7eHx8d3Rxb3Fy
-c3h5d3V2e3Z1dHJycXBxbm90dnl8fnx+gYGBhIaLi4iJjY6PkY+Qi4mEd2RTTEdH
-R0ZHSUdHSEhJSUdISUlHR0VKSkpKSUpLSkdHTk5MS05LTExLS0xNTExMS1BNSUlN
-SkdESUlISEdHR0dHSVxYTnezxc/X2+Di5efo6epNS0pLTUVFQ0FGRENDREVDR0dE
-REVEREZDRENHRkdFQUBCQkFEQz5BPUA/Q0pEPjw6PEJAPTc5OTw8Ojs4Nzg5Nzg9
-Ozg3NTo6ODc7Ojw/PDk5OTo5ODk5Pj07Nzk7Ojw/Pj08PTs5Ojw+QT04NzU1ODo7
-ODY6Oj0/QD06OTw7QTk7Pjw8QDxAQ0RBQEJGREdGR0ZEQUFCRkhHSkhERUNDQ0A/
-Pj0+PkBERkhHR0dEQ0JER0RGR0RARURIQkBDRUJBQkRGQ0VLS0lHRUNFRkdHRklJ
-SUdKSkxMTEhKTFFNSkpJTExHRD1IPT1DREZIR0dKTUxOTk1NTEdGRUdFR0dHRkhI
-SUhKT01NSkpJR0hJRkRHRUlISEdNS0pIR0dDQ0E+Ozs8OTg9Pj5BPj9BQD9CQEBF
-RUFDQUJCQEBCREJBQ0VHRUVGQkVDQUJDQkJEQ0VDQkVLRUNCREE+QDw4PD49Pjw9
-PDtBQEBAPD8+Pjw8P0JBP0JCQkVFQT89PT8/QD4/Qj9BPz9APT8+PUA+QkdAOj0/
-QT08PT09Pzw9Oz5AQUFCQj8+Ozs6Ojk6PTo8PD5AQT8+PEA/Pz5CQUA9Ojk2Ozc4
-Ozs8ODk9Njo4OTc4ODo4Oj0+Pz89PD1AQT09ODo9Oj09Pz9AQDw/PUJEPTw9PUA8
-Pjw6Oj49QD08PDw6Ozo6OTxBPEA+Pz08Ojw8Ozs7Pjo7OTw5Ozo8Pjw7PD47OTk5
-PDs6OTY1ODo7QD0/Pjs9Ozk7Ojo8Ozo4OTk7QEJCQEBAQkZCQkJEQkJEQUFER0NC
-QkhGQ0VFREVERERERkVGSUxNTlBVVVlbXV9jZGlqbnJ1d3h9f4GDhIWEgYKDhYKD
-fHl2dXNubm1nXlpbWFdVUFBKSUlIRERCPz8/Pz0/P0JAP0RBPD5BQEREQ0RDRkRH
-R0VFR0pLTEpLTlNPUFBSVFZZWFteYV5ka2xxd4CIjIVzY11icnd6cG10foR+b15U
-UlFVWVtcXF9cWFVPSUlISEhGRkVDRENEQ0RFRUdEREREQ0VEQ0FDRENERUVIRkZD
-RENDRUZGRkZGQ0FDQ0RAQUNEQkZDQkVEREREQkNERERFQ0RBQUJDRENFQ0NCQ0RG
-SENFRUZFRkVHTEdERkhIRkdHRUdJSUdHSEhHQ0ZISUhISU5IS0dFSUZISklJR0pN
-Tk1OUE9MTEtMSUlJS01OTEpOTk5PTUxMTFBOTVFPT1BNTlJTUU9QUFRUUlVYV1VW
-V1daWVZZW1taXFtZWltbXFxZV1BMRUM+Ozw/QENERURHSlFWXF9fYmZoamhrZ2xs
-bW1tamxtaWNgYGJfX2BeXF9fYmJkZGViYmBhX2RmZGNhYWNlamxvb21rZWZpaGdo
-amtpaGxub29xcHJzeHl7enh2c3FvcHJxcHJ4eHl4eHV0cXJvcnBsbXR4fHt9fH+B
-fn6Ch4iJh4mKjI2Oj5CPjId9a1RJSEVEREdNR0hHSUlHRkhHRkhKSEpJRklPTE1O
-UU5IR0pOS0pJS0hLTEtLSEZISUtLSUtMSUpKS0pKSUdMS0hHXmlQerbH0Njc4OPk
-5+jp6lFRTkxLRUQ8QENGRUJBQURJSkhFSEhCQEJBPj5DQ0NBQkNERDxAPT5AQkNH
-SUhEQ0Q9QT08Pjs5NzU5PTo3Nzk+PTg5ODg3NTc6Ojo5OTg6NTk6Oz07Ozo8PD09
-Pz5APDw/PT09Pjk6Ojw9PDs5Njg6Ojk5Nzk7PT9BPj07OTo5Ojg6ODk+REFDQkNB
-QkRFQ0RERUNEQ0RERUZHSEVFQkRBPz09PTs9QUZIR0ZFSEZHR0VESkVDRERFQUJD
-QENERUhGQ0VCRUlGR0dGSENHQ0VFRUlLTUpOS0hHSktLS0xLS0lHR0JBPD46P0NG
-REZJR0hLTU9PT1BMSkdGRUVFR0hJSkxPTk5QTUpJSUlGR0hFQ0VERkdJS0xLS0lE
-RUJBQz09ODk4PD9CPz9BRj9BQEBBQEBCREFAQ0NCRERAQERDQkREQ0dEP0FBREND
-Q0VDQURBQUVERUFBQUA/PD5AQEBCQT4+Pj4/QUFAP0A8QEFAPj5AP0FDREVEQUNC
-Rj9CQD47OTo8Ozo4PTw9Pz89Oz07QD46Oj87PT0+Ozo7QEA+QD9BQT46PDw8OTs+
-PDs8PD09QEFBPz5APj8+PDo4Nzo6PDw4OTs6Ojo6OTo5Ojk4OTs/PTk5PTw9Pzs8
-Pjw6Oj48Ojo6Ojw5Oj9AQ0NBPj46Pjw7Oz0+Ozw7Pj08PD0+Ozs4Ojs8Ojo8PUA+
-PTw5Ojo9PDo6Ojs5Ojc4OTk3ODk7Ojw3OTk3OTo6PTw6PD49Pz44Ozw9PDo7Ozo9
-Pzw8QEE/QEdEREE+QUZGRUVERkdHRERDREZHREJGREVFREJFRkVJTExRUFJSVVZZ
-XGBgY2ZpbnFydnl8f35/gX98fH19enl4d3NubWdjZF5aV1NUUE5NS0hJSENDQ0I9
-Oz88Ozo9PUM/QUFDRkVHRUhISUlKSkpKSUpNTE5QTk1OT1BOTlBSUlVbXGBfZGtv
-cG5vdnh6dWlaWF9ygn6FgoGCgHllVlJVYmttbGtsaWhfVlFLSEtLTElGQ0VDQkJE
-QUJDQkE/REJFQUVFRkZERERFRURERURBQkVBQkVHREVIREFERkVDQj9DQ0JCQkNC
-Q0FBQkFCQEFFR0RBP0JDRUNGTEZISUdHREZEQ0RFQ0VGRENGRUdGQ0hHRkZEQkdF
-SEtIRUdFSElISUpGR0lHRkdJTElKTkxNTU1KTExLSk1MTEtMS0pMS01OTE5NTlBR
-TUxMTlBQUFBPUVFVUVFUV1dXVldYWFlZWldWWllbWVpcXFtdX15cWVdSTUlEPjg4
-OTs/PkBCREZJT1JXYFldYWJiZGdnZmlnaGpqamhjXlxdX1xdXV5fXF1eYGFgYGBh
-YmBiYWFiZGRlZGNmaGpramhnaWhmZWhnam1sbXBvbnRzdHJ0dXd2dnRyd3Rzdnh3
-dnZ3dHN1d3Vxc3RzbW5vc3d7e3p6e4CBgICEhouKiYqMjo+VkpCNiIBxXE1KSUZH
-R0VHS0ZEREdER0hISEdGSkZJSkZLSk1MSkhNSkxKSUlGSkhISUlJSktKTUtMS0pL
-Tk9OSElIRkdISURHU16HuMfQ193h4+Tn6enqUE5TUkdHR0BCSEZIRERFRUtLSUpG
-R0VCQkFCPT4+QkI9QkJDQUNDPT88PEE9PEBDPz48PT84NDo7Oz05Ojo7ODo5PDw7
-Ozs6ODo8Ojc2Nzc2NTc6PTg5NTg9PT5APDs6Pj4+PDo6ODg4ODo4OTw5NDg+Ojk7
-PEI9Ozs8PDo5ODo4OTo8OzxCQEBCQEJEQkVHRkVFRUZHR0lESUVCRENEQj48Pjw8
-PD5ARUhLSUlGREVEP0FBQUJDQkNBQkRDRENGQ0RCQURGRUdHRURGSEZERUVLSUpM
-SUpMTUxNTUpNSUlMSUZEPj86Oz9BQkdJSEpHSUdJTFBRUExISERBQkRERkpMS01O
-T1FPTUlHREZIRUVFRkdHR0lLTExHRkZFRUFAQT05ODk6PjxCP0FBQUNCQ0BAQD5B
-QUVARUpIRkNAQ0VDRkNHRURDRERGREZFQ0JDQkVBREQ/Pj09QD4/QkA/QD4/QD4+
-P0JAQD4/PkI+P0BDQUFDQkNBQEBDQ0BBQDs6OTo9ODo9PTs+Pz87PUBDPEJCQUJB
-PD0/PD9APD4+QUA8P0NCPEE/PDxBPDw/Oz08PDw8PT1BQEA9OTo8PDw8OD46ODk5
-PT07PTs6Nzg6OTg5Ojo7Pjw8Pj88Ozk8Pjw6ODk6OT06OT08Ozk7Pz09Ozs5PTk5
-Ozw7Ozw9Ojo3OTg4ODg5QTo5PDs7Ozs7PDs6Ojc7OTc4ODg5OTs7ODo4Njg2OTs6
-PDo6PDw5OTo5OTg6QT8+PDo6Ojw9Pjs7PT09PkJDRj4+QkRDRENDR0hFQ0JCREFC
-Q0VEQ0ZDQUFFQEZHRkVISk5NTlBTVFZaXFxfYmRpb3FzdXZ5eXl5eHl4dXVxcG9t
-amxoZmRjXFdSUE5OTktJR0ZHQkFBP0E9Ozo5Ojs/QEVIREdISkpJS0lMTkxMUE9Q
-VFFQUFJPUVBRVFJPTlBRVVRXW15jam1tZGNja3FxamFhcoWSk46BeXd1aV1VXG17
-hoJ6c21qZGBaVU9OTlRVUk5MSUZHRUZHQ0ZFRkRBREZFRUNGRkVERUJAREJDREFD
-REVDQUNDQkNFREFERUNEREJDQ0BBQUNCPz9CQ0VAQEJCRERDREVGRkdJR0ZISEpL
-RkVGRURFREZHREZHRkdHR0tIR0ZJRURJSkdISUpLTExMSUtHSEpLTEpLTExNSUpN
-TVFPTk1LSkxMTEpLTEtOTUxQUE9OUFJOTVBSVVBSUVBRUVNWV1ZWV1ZWWFhZWFha
-WldZWl1jXFlbXVpZWVlWU09KRUA8PD06OTc7QEBEQkZKT1JUWltdYF9fYWFkY2Vk
-aGtlYV9eWllaW1tcXmFeXl5dYV9eX15gX11fYWFiZGBiYWBiZ2dnZ2dmZmdqaGlo
-aWtqbW1xb29xcnJzdXFvcHN1d3d2dnd1dndycnJ0cW1sbnFxbnBxdHh1dXh6enx+
-fYCBhoqKhIeNkY+QkY6Lg3dhUkhJSUhFRkZMSURER0VGSElHREZDR0lKS01MTEhI
-S05MS0xNTEtMSEhISkxLSUpLTkxKTE9JSUhIR0hFREVGQ0pIfqi7ydHX3eHi5ubp
-6utISk5PTEVHRUNGRUpKRkdHQUJCRURERkFAQkQ9Pz0+PkNDPz5CQkA/PkBAOzc4
-Oz5CPUQ+NzM2Nz8/PkE+O0A9Njw/Qj09Ojk3Nzg7Ozk4NzU4OTc5PDs6ODs9O0A9
-PT47PDw6Ojk6Ojg3OTo4OTg4Njk7Ozw8Ozw5OjtCPDk6Ojg6OTtAQEA/Q0BAQkJE
-RERGSEZGSEZDRkdDRkNCQkJAQUE+Q0A/QkFAREVIR0dHRkA+QUNGRkZDQUFFRUZD
-RERERUVGRUVER0VGREVHSEhISEVHSUlISUhHSElNTUpJSUhLR0BAPj88PT9ESE1O
-TEhGSEhMTUxMTEpIREZFRUdISUpLSUpMT09OTUhIRkVDQ0hJSEpJSElIRkJHRUZD
-Qz87OTo6Ozw8PkNCQkJDQkNCQD9APUBBQURFRkVHSENFQUFFSEhFRkRGRkZHRkhJ
-RkVDRUFAQ0A+QT5BPkBEQEA/PT88PT08OTs8Ozw9Pz86Ozw/QD9BQ0FAQUFAQj47
-Pj49Ozs8PDw9PDw/QT9APj9AQD8+Pj5APTs7PT4+PkA8PDw9Qz9BPz8/Pz4+Pjo5
-OT49PD4/QUJAPj08Ozs8Oj08Ojw+Ojo3ODk3OTo8Ojo4ODg4PTs7Ozk5Pz07Oz07
-Ojw7Ojc3Ozk7P0A9OTg6PDk6Pjs9Ojo7OTs7Ojs8Ozk5ODo5PTg5PTs7Ozk6Ojs7
-PDo+Ojo4ODg3Nzc4OTw6Ojg2Ojw7PDw8Ozk4ODY4NzU5Pz4+Pz48Ozs4Ojo8PD09
-PDw+Pj9APz1AQEJEREVFQ0NBREVFRkZBQ0NAQUNCQENDRkpISUlJSUxSTU1RUlRW
-WVxeZWpsb3NvcnF2d3R2dG9wbm1uZ2lnZWJhW1pYUVFNTEtKR0VFQ0E/QkFAQUI9
-Pz8+QEBERkdIR0tOUk9RU1JTVFRTU1ZWVVJYWFdTVVJVVVRSTUxRU1ZXWVxhZmZh
-X1xncXV7eHuBipGTiXNiXmVqaG14gIiHgHNlXVplaGVbWFheZ2pmYlpRS0lHRkdI
-RURIREVFRkZFR0VFRkNCQkNERERESERERkVFRUJBP0JDQj9ARklGRkJBREFDQUBC
-QkRDQ0RAQUNCRkRER0lISUhKRkVHR05MR0ZHSEdGRUdGSERFRURESEdFR0VHRkhL
-TExLS0pKS0pLS0hHSUpISkhJSktNS01NTk1LS01MTU1MTU9NSVBRT01NTE9OTFJR
-UVJQVFRSU1ZWVlhXVVRWWFdXWFlaWltbWVpaXl1eXVpaXWRYVFVRTUhEQjw7Ozo5
-Oj48PkVHRUpNTlVWWFlbXlxYXF5hY2NiYWRgXltZWFVXWFZbW1hZXF1fYGBdW1de
-X15dXF5eX2JhY2RkY2ZlYmJlZ2dmZWVnamprbm5tcG9xdHR0b3BwcHB0dXV2dXNx
-cnBwc3N0bW5vcHBwbW5wcXR0dXV2eHt8f4CChoWGh4eLi42Qj4mAcF5RTElJSkZF
-RkVGSEZFRkdEREVISUtJSE1NTEdIR0tMTkxMTUtQT09MS0lLSU1PTEtKTUtMTk9L
-R0dHRUZHSkdGSUd3qbvH0dfc4OPn5+jp6ktOUVFMR0RGRUVJS05KRkNGRkNHREVD
-QkdFRD8+QUI/QkFAREM9QkFDQ0NCOjk3Oz08Ojw8OTo6Ojo4PDs5Nzk/PDs6Ojg7
-Oz09Ozg2Ojo5ODg4Ozk5OTo7O0NDPTw8Pz08Ojk7PTk7Ozk5ODc5Nzg7Ozw9QT49
-PUA8Pzw5OTc3Njo5OTo+PkFAQEJEQ0VGRkVHR0dHREVFRURFRUNDQ0REQkI9QEFD
-Q0JBREZKSENCQ0RDRERERUJBRUhGRERBQURFRERHSElJR0RDRURGSUlFRkVER0hG
-REZFRktNSEdGQ0NCPTo6QT5AQkJDR0dHSEdGR0lHS0pISUhIR0dHRkdJSUtMTExK
-SUtLSkhGRUhGR0dLTEhHRkRCQEJERENCPzo3ODc6Ozo8QEBBQ0NDQ0BCQUJAQkNB
-QUJFRkdHRURIR0VGR0dFR0JEQkNFR0lHRUZFQkBAQD8/Pj08QEFEQz4/Pz05Ozo6
-ODk9PT07PUA+P0I9QEJGQ0RDQkJCQkJCPTw6PT08Oz09PDo8Ozo7PUA/Pz09PDw+
-QTo6OT4+Pjw+Ozs9PT08PDs5PTw5Ojo5PD09PT5DQURBQj47Ojs9PTw9PTw4Ojs5
-OTk6OTk4OTk6Ozk6Oz0+Oz07Ojw7Ojs7Ojg6Ozw/Pj4/Ozk4OTo5ODg9Ozg/PUE+
-Ozs8OTc3ODg7QEI8PTw8Ojc3Nzk7PTs7Ozw7PDw6Ojs4ODo8OTw6PDk4OTk6ODo4
-NjY5Ojo4OTw8Ozs7Ojo6Ojo5Ozk+Pj07PD5BPzw+Pz9DP0BCQ0JCRUNDREVFRUJA
-Q0JDQj5DQ0RDREhGRkZFRUlLTlBRU1VXWFxhZWxva2xucXNycnJwb2xpaGZlYmVk
-YFtVVlRRUk9OSUdFRUJDPj9AQUFCQkRAQkRGRkhKTk5LU1RSVVdWVldaWlhVVlpb
-WltYWFlYWFZYVFVYWFBPUVJUWVxeXV1fX2h6hIuIg4KEhn9yXlNfcX6AfoCDhIJw
-YFRRWGdydnBucHN3enpwZVJKRkdKSkhLSEhGR0ZERERERkVFRkVDREREQ0RFSUdH
-SEhEQkBERENCQEFBQkVGRkNDREVEQUNEQUJBRERFQ0NEQ0VESEhIRkdHR0hGRklG
-R0RCRkhJSExHRURESFBIRkZIR0hHSktLSUpLTElJTExKSklKS0xLSk5PUVBNTU1P
-TU5NT0xJTE5QT1JSUFNRT01RTk5UUVNPTlJTVVdVWVdYVlZXWFdZWVtbX1paXV5d
-XVxdYGFeX11eXVlXVE9IRT4+Ojo4Nzs9Ojs/Q0FDRElPT1FVVVVXXldYWVlcXFlb
-W1laV1JVVldZU1VZW1hbW1tbXF1fXlpeXVtcXWBiYWBjYmJgYmFkZmVlYmNlZGNo
-a2xxcHFubWxwdXVwbW9wc3N0dnNvcHN5dHFzdHhzbnBvbGZlam1ubXBycnRxdHh6
-f36Cg4WDhIiKi42KhHpsWU1KSEdEQkVEQ0dGRElHRUVGRkZISkhGS0lJSkpKSU1O
-TEtMTE9NS0tKS0tLTktLS0pKSEtNT01LSElHRkpLT1JOTFGAtcjR2Nzi4+bo6Orq
-S0lPTk1KRUlJR0JGREZBRENGR0VBQ0JFQkZFREVAQ0RBREBFQT4/Oz5EQkJDQT85
-PDc6PD08Qjs5OTo6ODs7PTw8Ojs6QDo5Pz08PDg7Ojo8PDk6Nzc6Ojg8Oj89Ozs6
-PDk7Ojo8Ojc3Nzs7ODc4OTg4OT4+QUFAQT05ODo5OjxCPT5BQD5EQUFEREFAREdI
-R0hJSUhGQ0JDREJERERERUREQUA+QUZEREhFQ0ZGREJAQEJDRkVHRkdIR0RFRUJB
-RENBQURGSUlFQkNDQUJDR0pLTElJRkhGR0ZGR0hMRkVDPz87PT08O0BBRkdHRkZH
-SUpIRUdGSkdGR0ZFRUVFRUhLS0tGSUdGRkhJSklKSUpISktKS0pGR0ZEPj5AQD88
-OTk2OTw+Pjs/QEFCQ0NAPkFDQEBBQkE/Q0dIQ0hHSUhHRkVGQ0dFREJESEZGRkVC
-QkJBPUBBQUFBP0FCQD49PDg6Pj0/Pzs9O0E6Ojo7PT06PT9BQUFBQDw/QEFAREE9
-PDs6PD48Ozw+Ozw8Ojw+QD89QT48Ojk7PD87Ozs7Pj8+ODs8Ojs4ODg5Ozo9PD07
-PUFAPUE/Pj0+Pz88PDk7Ojs7PkE5ODs6ODU4Ojk3ODo9PT0/Pjw8P0A8Ozo5ODk4
-ODU3OTs8PDw9PDo6Pj49ODY3ODo7Ojk8Ozk5OTg6Oz9APD8+PDo6ODo5OjQ2OTo5
-ODo8PDw8Ozc4ODk5OTk5Ojc4Ojc4Njg4OTk7ODU4PDw8Pzk3ODtAP0A/Ojw+Pz86
-Ojs6PUJBPj88PkNCREdDSEJDQkNDQ0VER0FBQkNDQkRDQURCRERGRkZHTlBWVVZY
-XFteZGhnaWlqbW5ubGlnaGViYmRgX11bWFpXU1NQTkxHR0NDQD9AQkBAQUFAQUVG
-RkdJTVJSVVhXVlhbWF5cXlxaWl1cXF1gXl1eX11bW1lYVlVUUVBRT1NUVVpeXWNq
-dYKKjIp/dGttbmVfZHaIjYR+dnNwbGleV1ZidYSQkIF5eHp8em5jVE9PU1ZXVVRN
-SUhHRkZFSEVGRUNDQ0FBRURDREdERkVDQ0JEQENGREJDQ0REQkVGRkNJRUJFQ0RG
-RkRBRUM+QkNFRkVFRERHRkZIRUdFRkZCQkRFSEhFRUdGSEdJSERDREVFRkhJS01L
-TUxLSklHSExJSUhHR0xMS1BQTk9NTUxOTU1NTUlOTU5KTE1QUFBQUlFSUlFRUFJS
-U1NUVVhWV1hVVVdaV1haWltcXl9hX19dYF9gYWBdWllaWVVOTEtIQTw8OTo4PDo6
-PTo9Q0RER0VGRUlMT01OUFNRUlNVUVJVVlVVVVRXVldUUlVZV1haWlxZWFpcW1pb
-XFtcXV1bXVxhYmVjaGVjYmNiYWZlZWlsbm1tbGxna2docG9tbm9vcXVzdHJydXly
-cnVxcnJsbWtmZGNmaG9tbXFxbnVzcnZ4en+EgYGHioiJhod/cmNSSUlGQkNDQ0dF
-Q0JERkdFSEhHRkhIS05LSEpLSkpNSkxLSk5NT01MTE1MS0xKS01LS0lJS0tKSUZI
-Q0VISEZMSk5KUXCxxs/Y3OHk5ufo6epOR0tQUE5LS0ZGQkFJRklFRUVFQ0NFRkdC
-RkVFR0U9P0BAQkJCQkJAQEFCPT89Qjw6Ojo5ODhLOjs7PTs6OTs9Nzk5Oj05ODs+
-P0A6PDw7PTs5Nzk6Ojo8PT08PD49Pjs8Ojo7PDo6NzY2Nzc0Nzc3Nzo+QT87PT0/
-Pjw7Ozo4PD09PkFDQkJBQ0JARUJBREJER0lHSEREQEJEQj9DQUJFRURCQkNFR0dG
-RURGRUVFRkRERkdHRkZIR0ZHQ0ZCQEFCQURDQ0VHSEdKREJAP0BDR0pMTEpJSEhI
-SUpHR0hFREJBQUA8Ozo6QEBESEZITE1LSUhGR0ZISUpJSEZFR0hHRkhIREZISUdI
-SkpJS0pMSktMSkZFR0ZCQkNBPDw9Ozw8PTo7Ozw/QENCQkNDRUVEQUJEQ0BBQUFD
-QkFAQUJEQkRGR0dHR0hERkRHR0dJRUJAPz8/QEBARD5AQEBAPTk5Ojg4Ozw9PT05
-ODtAPjw5QTw9Pz1AQD0/QkRFRUJEQT49QD09PD07PTs9Pzs6Oz1EQ0JCPDs8Ojk5
-Ozo7Ojs8PDs7Ojs3OTw8Pj46OTo8PTo7O0BDPz07Pz8+Ozo7PTs9OTs6Ozk4ODc6
-Ojk7PTk2OTk8Pz4+QERAPD06Ojo5Ojk4Nzc4ODk8Oz08PD46PDw4Njo8Ozs2Nzk6
-PTo7Ojk5Ojw9PDo8Ozg3Njk6Ojg6OzY2Nzk3Ozw9PTw7NjU2OTk4Njg5ODY0Nzk5
-Nzc6ODc4PDk5OTs+Ojo8OTk6PD09Ojs9Ojo8PT89PUNDQkVEQkVERUJEQ0JDP0NC
-QkVEREFBQUFFQz5AQUNEREVKT01QU1ZdWlpcYGNjZGVoZ2ViYmNiY2JfX2BdWVZT
-VFJQUE5ISEdGRkI/QEREQUNEREZHSElLUFJSV1lcXFtcYF9gYmJhZGJdW11cXV9h
-YV9cXVtbWlhXV1RSTk1RUFRaYGVoZ213g4aDdmNWWWBmbG91gYuHfmxfXW1xcm1o
-b3mHj5ORhXNqY2ducW1nYmRmaG5oXFNJRkVBRkdHRUNCQkdHRUVGQ0RCRENEQkND
-R0JBQEJDRUJDQ0JGSUlGRUJERUdEQ0RERUA/P0JDQ0REREdHSUZHRk1IQ0VEREVE
-RkdGRUdISUhFRkRCRUVHR0lMSktLTUxJSkpKSExKS05NTEhKTU5MTUxMS0xKSUxN
-TVBOTEtQUE5OTVFRT09RUVJQU1FUVVdWVlVUVFdZWFlZV1ZYV1daWlxgX15eYF1g
-YWFfX15bVlNST0xKRkdFQD4+Ojo7OTo4PUBCSURCQkFCQkNIS0pJSUtMS0xPT1JS
-UFFSUFBTVlRWU1hZV1hbXF5dX15aWVxZW1tbWVlbWVdcX19hYmNjZGVmY2JnaGtq
-amtqaGdnZ11ha21vb21rc3J1d3R0dXRzdHJtamxpaWZhZWVjZWdoaWxubW1ucXR1
-dXl6foKFg3+Bf3hsXVNKRkRFRUlIR0dERkZEREdIRkpKSUtKS0hGSkxMSk1MSEdN
-TU5LTU1JS0xOTklGSEhKS0pMWGFnVUZGR0dGR0ZKRkxOb6/Eztfb4OTm5+jq6ktP
-S0tLTkpKTElGRUNESUhFQkVKS0hJSEpFREVEPURBQEBAQURHQ0ZBQEFBOjw7QT46
-Oz06Nzw7PDw8Ozk5PTo+PDo2ODw+OTk9Ozo4OjY3OTc3Nzo3Nzs7PD09PT06Ojk4
-OTo7Ozk5ODc3Nzo1ODk8Pjw8QEA9PD47PTw8ODk8Oj5CSEVCRUNCQT9BREdERUZE
-REVFSElEQkJAQEBBQkNBQkFCREVCR0RGRURCR0VFRkhISEdFRkVERUVEQ0FCQEFF
-RUZGQ0VHQ0REQUFBQ0NGSUxMS0pKSkdJSUdHSEdFQkBBPkA7Ojw8PUBER0hKS0lG
-REVFRUVFRURISEdKSElHTEdIRkdHSE1NTE5LSkpISktRTEtGRkREQD88PTs7PDw8
-Pj88PUNAQUNGS0RCQkNBQkRER0VDQUBBQ0I/PT5APz9BRkI/PkFHRUZDQ0RBQD5B
-PkBBRkA+QkJDQDw9Ozo5PDw5Ojg8Oz8+Oz89PTw8Pz8/QUBARUJERENDQ0FBQ0I/
-PUE+Pj48PD08Pj06PkFCQj87Ojo6Ozw7Pjw8OTc3Ojo6OTs5Ozs8Ojw6Ojw/Qj07
-PT8+OjtAOzk+PD0/PTs6Oj07OTg1NDc7Njg8Oz05PTo7Pj1AQz08PT49PDo6Ojo8
-Ojk3OTs5Pjw6OTs8Pj49Ojo3O0E9Nzg5OTk6Ojo6Njo9PTs6QTk4OTk3OTg2OTc4
-Njc2Njk7OTg4NjY3ODg5Njg4ODY0Njo4Nzc6Ojc5Ozk8Pzo3Ozs7ODo5Ojw8PD09
-PUE8OT09QEJBQ0BBREJDQkNEQ0BEREREREVCQkJDQT88QUBAQEJCRklLTkxOU1VX
-VVdbXVxeX1peXl5dXl1cXFtXWVhXV1dUUk5KTExHREhHSEhFRkhOSUlMTlBRT1FV
-WVpYW2JgY2JhYGJkY2JiZF5bYV5dXl9fXlxdXFpbVVVUVFBOTlFTV15jZWVhYmZt
-cG5iVFBabXt9e3l8fHVtYFlbcYKLjIaEg4eOjoZ3ZVxcZHaDhoR7cW5tb2dbUUxL
-TU5JSUdGRURBQ0NFRUVDRERFRkVDQUFCRERFRUFGREVFRENDQERFQ0NGRUJCQEVE
-RUJFQ0ZGR0VFREdGRUFETkdFR0ZDREdHQ0VGRUZHSkdISUZKSUhKTExMTEtKS0tJ
-SkdDR0pGRkhLS0xNTk9NT01MTUxNTE1LTE9QTk1PUFFSUU5RVVNSUFNSVVdXVVdV
-VlVUVFZYWlhYV1hXW19dXmFfYF1gXV1cYF9dXFpXU1FOS0lJSEhEQUE+PTw7Ozw4
-ODxBPT5BPj09PkJDREVDQ0dGR0pNSk5OS0xPT09RUlFOUFRVVlZYWVxbWllZWVdW
-WFdVVlZXVllbXl9eYGJiY2BmaGRnamloZmhnZ2dqampxbW1qbHN0dHN0dXV1b21v
-cW9ua2djYmViXl9hZGRlZ2ZmZmlrb3Fvc3h8foF+eHdza2JZTUpHSEpGR0pGR0VG
-REZDRUpJSU1LR0lJRUdLSkhLSEpMSktLTE1NTUtMTE1MT0xJS0hISExYS0pHSExI
-RElNSEVGT1V7scTO19zh4+bn6OrqTE1RTkxJUE5JS0hGRD9AQUFEQUNHR0ZGR0U/
-QEBEQ0hFQUFDQURCQkRCREA8Ozk8QDw3Ojo7Oz9BPz05PDs4OjtAPDk4ODs6PTo8
-OTY4Ojo1Nzs6Ojg4PDs5Ojs9ODs5Ojw9QDw5Ojg5OTc4Nzs4OTxAOzs/PTw4OTk7
-Ozg6Oz08PD9FREI/PkFEREBEQ0FCQUFDRkdISUZCQkFARENBPUE+QEVGR0dFRUND
-Q0JDRUZHSERFQUFDRUFBRENFRkVDQ0FAQkNEQ0FCREVEQkJCRUVFR0ZFRkdJSEhJ
-SUlIRkI/PUA9Ozs7Oz1BQ0VGSUpJR0VEQERKQz1BREZIRUZJSkpJR0lFRkpIS01N
-S0lJSElLSUxLSkdFQj5AQD47Ozw8Ozw9PT5JQUFBP0NCREhBQEBDQ0NCQ0VGQkJA
-P0BBQEBDQEA/Q0NAPkBFRUVFRUA/PzxAQUJGREVDPTw8PD48Ozo8PDw8PDo6PDo9
-PkJBQT0+Pjw9PUJBPz0/QkRCQUJAPT8/Pz9BPj8/QD4/Pj8/Pz08Pjw5Ozo8Pz06
-Ozs5Ojg6OTo6PDw6Ojg7OTs+Pj07PDs7PTw7PDo6ODo8PT07PDs8PDo1Nzg4OTo4
-ODo7OTo5ODw7QEA7PDs+PDk4OTo8Ojg4ODY4OTk4OD44OT08Nzo8ODg6Ojw5OTo5
-OTg4Ojk6Njc5PDs7Ozk7Ozg3NjY3OTc6ODg4NTY3O0M3NzY2OTc4OTg7NjU0NzY5
-Nzc5Nzc4Njc3OTs4Nzo6OTY2Oj0/QD0/PT06OTs9Pjw/QT9AQT5BQUBDQEFAQUNA
-P0A+PDs8PUI/Pjs9P0RERkhHTExMTk9SVFdYWVpZXFpYWltaW1tcW1tVU1VTUU9P
-UVJOSkpHSkpKS0xLTlBQUVFTVFVXVVlbX2NfY2NlY2ZiYmZiYmRjYV5eXlxcXl1c
-XFxbWlpbWVVSUE9QT05SXF1cXFVRUFZcX11ZYXGChoBzZmdscnBra3eKlZ6ak4qE
-fn2AfXFlY2p6ipOXj4JzZl5iZWNdWVdZWlVUTkxJRkZBQUNCQUZHRkNFRERHQ0ZH
-RkVERUVGQkNFQkNEQ0NDQkZHRUNBQ0VHQ0VHRUZHRUVFRUVHSUZHRkNHSERFSEdE
-REZFRkdJSEhHSUlMS0xNUE1MS0pIR0hFRUVISklGSklISU5MS01NT0xLSkxOUE5O
-T1BQT05PUFRTU1FUVFJRUE9SVVVXVlZWVFVVVltaWFZZW1lbW1xgYF9hYWFhX15e
-Xl1aWlZSTk5NUE5OTkxHQkA9PTo5ODc6OTo6Ojw/PT87QD9AQkJBQkZGR0VJR0lJ
-RUlKSUpNUFBNT1JUVllWWFdXVlRUVFRVVlRUVFZXWFpeW11eXlxgYmJhYmdrZWNj
-ZWVmZ2tqaG1vbGpqbnJydXZ4eHRub21tbG1oZ2RfXl1dXF9eYWFeX11dYl9hZWZq
-bm9vcnNzcGtkXFNNSUZISEdGRUdIR0VGSklGRkpLSkpGSElKRklIRUlLTUtLSUpK
-S01QTUtLS0pKS0lKS0hNTUdHSEpLS0pISE1JSUxSVHq0xc3X3ODj5ujp6upNTk1P
-T1JRS0pEQkRLSUZBQkVDQ0BDSEhGQkFEREhDQz5CQ0BCQkJERkBBPT46PD1CPD46
-PDo5Ojk8Pjw8OjxAPTk6PD44OTk3NDc5ODY3Ozs6OTs7Pzw6OTo7OTs8ODk7Ojs6
-Ojc5OTk2Njg4ODs8PkI+PTs6PD86Ozg5OTk5OT0+QD8+Qj9DQkFEQUZCQkE/QEBC
-REZERkVDQT0+QT1BQkFCRUdKSUZHREFCQ0RERUdFRUJAQj9AQkJEQkRFQ0ZEQ0BA
-Pj1BREZHQ0RJRktFQj8+QkFERklKSUlJSUdCPT4+Ozk8OTs7PD5DRkZIRkZFREVF
-QkJDRUNDQURJRklJSUdIR0VGR0pJSkpMR0lJS01JSElJSURDQEA8PDs8Ozs6OTk+
-QUA9QD5AQEA9QURAP0FAPkJBQ0NCPT07PzxBQUBDQz49QEBBPz8/QEFDREM/PkFB
-Q0ZCPUA9Ozo+PkE/RD88Pzs7PDw7ODs+QUFBPDo8PT9DQD09PT4+RURDQkA/Oj09
-QEE9Pj89PT0+PkFBQDw8PTs7PTs9PT07PDo5PD1APjk7Ojs7OzY4OT08Ozg+Ozo6
-Ojg5Ojo4Ojs7PDo7PDk8OTg5OTg5Nzk4OTc4OD09ODs/Ozw7Oz06Ojg2Oj08Ozg2
-NjY3Nzc8Ozk2Nzg+PD07OjgzNTc3ODo5ODU1ODo4Njc4Ojk5OTo5OjY2OTc3NjQ2
-Njk3MzU2NjU3Njk9Ozs5Ojo5PD45Nzg4Njg5OTg6ODk5ODg6OTo7PDs5ODo7Pjs6
-Ozo8Ozw8Pzw8Pj0+QD9AQkFBPz87Pz4+P0I9PDw7PD88Pj07QkRERkdKSkhGRkxO
-UVJTVVdTUlRUU1ZXWVdYVVFSUlFRTUxMUlBQUExKTU5OTk5PTlVXV1paXF1cXWBh
-ZWdjZmVlZGhoZGZoZGVmYVxcW11aWltbXFpfXV5bW1lYU01OTU1PUlNQTU9OV2Ft
-bW5ve4V/cF9YXGx6hISEho+dnZqOe29nanZ7fXt8fYGKkZGJdF9XV2Jwdnl0bmhj
-W1RRT01NS0dGR0hGRkVDQ0VFSkdBQURGRUdHQ0JDQUFCQ0JAQENEQ0VHRUhFQ0VF
-R0dIRkRER0VHRUZHSEZFR0dGREhERURDREZGRkZGRkdGSklLTExPT0xKSklJSEhI
-SUlLSkhGSEhKS0xPTUpLTk1OTU5PUFBPVFVWUlNUVVZXVVVVVFlVU1RUV1ZYW1lY
-WVlZWl1aW15dXVxdX15fYmJhYGJkZV9eX1lXVE9PTU9PUFJTTUpJQ0E+Oj8+NzU5
-Ozc7PDw/Pz48QDw+P0BAQkZFRUNGREdJRkZEQ0tLUE1OTlBRTlFSUFFRU1VYVVFR
-U1VQT1JVVVhdXFxcYF5gX19hYmZiYF9jZmVkY2NmaG1ubG5xdXZ6enl4c3FubGxp
-Z2VlYVpaW11cW1pbXFdXWltbXV9eX2BjZGZpZ2ZlYVxWTUdGREVHSUdIREVHR0ZI
-SUVEREVIRkRGREZHRUdIR0pOTElLS0pKTU1LTFFOTEtLS0xISVRRSEhJSk5KREdK
-SklJR0ZOgrDEztfb4OPl6Ofp6UtJRkZLTUtLTElISktNUkxFRURFQ0JDREhFQ0RB
-RkNERkM/QUBAP0JAPDs+Oz4/PD5DPD8+PDs4ODw+Ozo4Njs8NzY6Ozw5ODk3Nzc2
-NzY3Njc4Oz08PT0+PTs8Oj4/Ojo7Ojg6ODY2OTg4NTo7OTo/QT08Ozw7Ojw4Ozk7
-PTtBQD1AQUJAQkZGREFCQ0RDQUBBQkJFRkZEREdCQUA+PD5AQkFCRUZFREVBQ0FA
-QkVFRENBQUFFQkBCRENDQ0FCPz0/REFBQ0E/QUJDREZPSkFBQD5BQEFFSUlHSEVF
-REFBPDk2ODk8OTo8PjxERENAP0FBRUVHRUdEQkRDRURDRUZGS0dGRUZGRkhHSUZG
-RklLTEtLSkdFRERAP0I+Ojw4ODw7Ozw9PT5BQkFBPkJCPkBAPz4/Pz9AQEBBPzw8
-PT09PUA/Pj07PD9BREVBQkFAPT5AP0FCQEA/PT49PD07Ojw7PUc+PTw5ODc3NzxB
-Qj1APjs5PDw9PUE/P0A9QkNFQT07Ojw6PkE9QEE9PDo6PT85OjtBPjk+Oz4/PT0+
-PD89PDs6Ozs7Ojc5Njc5Ozo3OjtAPDo7Ozs6ODs4PDs5Ojo4ODo2Ojc3ODY4NjY0
-ODk4N0E5OkA/QD07PTo5OTo5Ojs6Ozg6NjQ0Nzc2PDw5OTo7OTk7PDs4ODc0NTg7
-OTo4Ojo7ODY3Oj07Ozg5Ojk6ODc4ODo8RD04NTk5Ojg4Ojc6ODc3NjtDPDg4NTU2
-NTU0ODY1NjY5Ojk5OTk8Ozs5Ozw6Ojg4ODs5PDo8Ojk+PDw/PUI/P0JAQDw9Pzw8
-PDw9Pz88Pj4/QUA/RERDRkdHSkpNSktNT09QUlFTUFFRUVJTVVJSU1ZUVlVRTU5R
-U1NTUlFQU1VTUVpVWVxbXWBfXWNjaGZnZGNoaGdnZ2dmZ2ZlZ2RhW11cXltcW1pa
-XFtZWFhaWVdXUlBLS1BYW1tVVl5ueXx5c3NxdHFnW2FvgIyUlpGMi5CRjH5qXl1l
-fo2RjYR6c3J5fnlxZ2Vpd4SLjYR3alpVVVdXWFdRT0pIS0pGRkZHRUhEQ0RFQ0ND
-Q0VFQ0NBQ0RCQUVFRURBQ0NHSEhFR0hFRkVERERERkZDSkdHSEZGRkRFSEZFSUVD
-RERGRkdJSklIS0xQT0xMS0hLSUtIR0lLTEtKSkpLSUpLTU1NS0lOUE9OT01TUU9P
-T1RVVFZUVlZVVVZYV1hXVFZWVlhaWlhaWVdZW2FiYF9hYGFhZWNiZWRoZmhkX15c
-WlZTTExMT1NVVVZWU0xKRURBPT88Ozo5Pzw8PkBAREI9PDs6PT1APEBEQ0RERERE
-Q0dHR0tISUtOTU9NTExOUVJSU1JWU1FSU1RQT1NUWVpcW1tcXV5gXl5gYF9gX2Ff
-YmBhYWJgZGpuam5ydnt9fnZzcW5saWZkYmJcWlldWlZVVFZTU1RVVlVaV1ZYV1Vb
-WltZWFpYVE1LR0hFRUdIQ0JDR0hGRUdHQ0JCRURISUlFSEhHSEhKSk9NTUpNTEtM
-TExLTVJQTk1ISkhIRkVER0lIR0hISkhLSklHRlKLrMTO19zh4+bn6OrqS0pHSUtK
-TEpNSkxHS0dESEhHR0hGRkNDRUdLSUhESEhIRD5DPTw8Pz46Oz9APTs/Pjs+Ozk6
-OTQ0PDs6ODs7ODg7PDo7Ojk9QDs6NDY2Njc0Njk6PDs6PDg6PD09PTw7Ojo2PDk2
-NjY4NjU6Ozs7Ojk5PD07Ozg2NzU4Ojk6PTw9Pjw+QkFDR0VGRUNFQ0I/Q0ZERUNG
-RkRBP0A+OTs7Oj1CQEBDRENCQUJAQUFAQURGQDw8PEFBQD9CQkRBP0E+Pj9CPkNE
-Q0NBQT9CRUZGRkFCQD4/QUBER0dHREJAPkE/PTo4ODs5P0BEPj1CQD8+QD5BR0ZD
-RkZEQ0FDQERDREZGSEZHRUdGSEVGRkhHSkxMSkhGRkVGRUJBOz88Ojo4Nzo7Ozs/
-QUNDPz9AP0BBQEJBQT47PD9APz5APTo5PD08ODw8OzxBREZER0VAPD49PTw+PT1A
-QDs8PT9CQUA/Pjs6QTs/Ojs8Ojo7OTw8Oz08PTs5Oz4/QTtAPzw7PkBBQUJAOz09
-Ozw6PTw+PTw8Ojk4OTs7Pz48PTs8PDs7Ojo9Ojk7ODs5ODg7Ojs5PDw6Ojs7Ojw8
-PTw6Ozk4OTk5OTg2NzY1Nzk3NzU5Ojg5NjY1OTo4OjY3OTk4Njo2Nzk2Nzk2ODg4
-Ojw1Njc4ODg4Ozg7ODc6Ozk3NzY4Nzs6Nzc6Ozs9Ojo6Nzk4ODk4Ojo9Ozw7QDk5
-NzU4NTY1NDw6Nzg3Nzk4Njc4NTQ0Njg4ODUyNjUzNTg5Ojs5Ozo9Ozw7Nzo3ODU2
-ODc6Ojo7PT9BQkBBPkFAPkA+QDw5OT5AQD5AQT07OTk/QDo8QENCQ0VESUlJR0hJ
-TU5NT09PT05OUE1PUVFRU1NSU1JTUlFTU1dWWFhZWlhVWV5eYGBeX2BiYmRlaGZn
-aWlpaGhlZmVkZWRjX19fW1tZW1paVldZVlhZWlhUVVRTTkxRXGRtbWdmbXV7fG5h
-XGFxdnNye4aUm52XiX55eX16cmZnbHmEj5ONfGlfZW17g4aGfnmAiYyIfWpfWVpl
-b3Vzal1TUU1KSUhGRkVFRUVHR0VBQEFBQ0NCQkJDREJDRkZFQkRERENIRkVFSFVI
-Q0RHRkhFQkZIREZHR0hHRURFRkdIR0ZHRkdERkdLTk1QUU5NS0tHSUdGR0pKR0tN
-TEtKSUhKTE5NTk5NTk9RU1NTUVJTVVRTUVZZVVVVWFVUWFlcXF5dWFdZWVlaXFlZ
-WFtfYGFjZGBjZmZkZWhpbWpnZGFjXllXVE5KSk5RVFlbWV1cW1hSTUlCQD89Pj8+
-PTk5PT09P0JFPT87PDxEQEFBQD9CREZDREdFR0dHR0hLTUpLSUtLTE1MUVFUU1JQ
-UE5RUk5VWFZWV1laWVhZXVpaWl1eWlxfWltjYl9gZWlpa3F2d3h5dXRwbWpnYGBf
-WlpYWVpTUEtLTU5ST0dLTk9MS0xLS09QT1BPUFBMSkdGSUxLSUZFQ0FDRUVCQ0VG
-RkRGRENHR0dGRklMSUtLS05MTkpISkxMTEtMTk5KSklGR0ZHQ0dIS0lGR0lJR0pJ
-SUhGTX6wxM7W3OHj5ebo6elLSkpOSEhKS0lJSEZDQUNERUdFR0VFREZFREZHQkRC
-Q0JDQT1AQENCQj85O0JBPTs9Pj1CRUU9Ojw7Ozk6Pjs9Ozo7ODk7Ozo6Ozg2NTU1
-NjU5OTk6Pj09PDo5ODo8PDk6OTk5NjY1NjM3Nzg6Njk6Ojo8Ozs3Nzo7ODM2ODo6
-PTw8QUVDRURFREBERUVDQj9AQUNDQ0ZDQ0JAPj46Nzk4OTtCQkVGQ0FBPTw7PT1A
-PT8+Pj8/P0RCQkFAPz0/QD4/QURDQkZHRkVCP0NDQUI+P0FBQUE/QD1BQT89PUFD
-QEA+PDk8OTk4Ozw8PD5BPz9APUA/QEJFQUJCREVDRkVFREVFQUhKSEtMSUlDR0tG
-SkpKRkVDREI+QDw9PDw8Pjw3Ojw9PEBBQUJEQT8+Pz8+P0BAPT9AQD9BPz8+Pz9A
-QEA+Pj49Pz1AQ0JBST89Ozk6Ojo6PD08PT0+Qjw9P0FBPEE+Ozs6Ojk4OT06PDk8
-Ozw8OTs7PTw+PTw9Pjs9PTw9Pzw6Ozo7PDw7PEA+Pj09Ozs5OTs6Ozs8PUQ+Oz46
-Ojk8Ozk6Ojg4ODw4Ojs+Oz4/Ozs/PT09Ojw/Oj43Nzk2Njo4ODg4ODk5ODY2Njg6
-Nzc5Nzg7Ojc6P0E6ODc4Nzc2Nzo6PDs4ODc3NzY4Ojk3OTs6Ojo5ODY3OTo6OTo5
-ODg7PTo3Nzg6Ojg4ODc1MzY3OTk7ODk3ODk5Njk8Ojk3NzY5OTs7OjY4NzU0Ojo2
-NzIyNTg5ODg5NzY1NDg6OTo8PDg1MzM3PDw5ODs9PD1DQDw7PEBBQT5APj0+PTs9
-PkFDPzs8PDo7OT0/P0NAQkREQkRFREVGR0pOTU1MS05NUU5PUVNTVFZVVVZZWFlb
-W1xbXV5fX19eYWBgZGNhYWVmZGZoa2hnYmNlZmViX19gX19hXlxZWFpZWVZUU1RX
-VVhYVFRYV1xbWlxodHRxbWVka2tnW1NWZ3uHiYiJjJOal5B9a2ZncXuDfHuAg4WG
-iYFxY2NtfpCUl5OFd25xenp4dm1scniBhn1xX1JNTU9WWFdPSkdFREJEREBERERC
-REhFRUFEQ0RERUJGR0ZEQ0REREVHRkVGRUNER0VDRUVHRkxGRUZIRkVGSUlIRkhJ
-SElISEpOUE5PTVBMS0pGSkhJSEpISEtLSkxMU1BRUE1OTk9QUVJTVVVVU1VXVFZX
-WVxZV1dYXVxbXFpcWlpZV1pcXVtdYGBeXl5hYmJiZGViZGdoaWtqaWZjYGFdV1NO
-SUpLT1JVWVtgZGhlYVpXT0dFRENBPT1AOjc5PT1BPkBEQjs5Ojs6Ozw8Pz4/Pz5D
-QENERENGR0ZJSklKSklLTVJPTE5NS0lKS0lKUFFSTFBUVVVTVFNTVFRYWVdaWFxa
-VVpfXV9iZmhscXJzdHVzcGxtaGJgW1ZRVlhUUk1JSEpNTUlISkZJRUVFRUdLTElL
-SUlISUdFRUZFR0ZGRkZHRkNESEVDRUVIRkdHRkdJRkdGRk1MSklJSktJSU1NSkxO
-T05LSktLTkxJRENFRUtLR0hHRUZKSEdIRklQcrPDztbb4OLj5Ojp6lJNRktIR0ZO
-SElKTEVESUZDP0VIRENCRkVKRkY+PUNDREI9Pzs/QEBEQUJBPT05OD09PkI+PTw9
-Ozk7OTc8PT09Njk3OTg3OTc3Ojk0NTQ2Pjs+Ozo8Ojk7OTs5Nzo5PDk4NjU2ODg2
-NzY6PT48Ojk6PD08Ojk5OTo8Ojo2Nzo7OTxCRkZFRURDRkBDQkBCQD1CP0FCRERC
-QD49PT07Oz08PEBBRENBQD09PjxAQkFCQDw8PD9CRUVCQD4+PkBAPkRCQERFRkhI
-RkJGQkBAPT9BP0JFQkA8QD88P0E9QEI6PDs/PTo8Ojo6ODs7PT4/QUNDQEA9Pj9B
-R0dHRENERkRDQkFGSkxNTUxLSkhISUdJSUdHQkFBPj1AOz0/PDo6Ojo7Ojs8PTtA
-P0BAQkE9PEBAPUJAQkI+Pj8+PDw+Pj08PDs8Ozs9Pzw8PkI9PTo6ODc3Ojk4Ojo7
-PDo5Nzk9PUA8QEA7Ojg4ODo7OTg3Ojk9PT07PTw9PT9APkA/Ozw/Pjg6OTs9PT87
-Pjw9P0A/Pjw8Ozw6OTo7ODk7Ozs9Oj07PDo5OTg5Ojo6Nzs8Ozk5PEA9Oz0/QDk6
-Ojs3Njg3Njc6ODU2Nzk6ODc5Ojg2ODg5OjU5Ojo5OTo6PDo4Ozk0NTg5Ozc3OTg3
-NzY1NTg6Nzk5OTs8OTo6PDo5Nzg7Pjw8OTc7PDY4Ojo5Ozc4NjYzNTQ1NTM4Nzc3
-NzUzNDg5Ojk5OjY2Ojk1Ojs3OEI6ODg4NTY3NTY2ODg2Nzg8Ozk3Ozo6ODk3NTg6
-OTY5Ojk6Ojw7PD8+Oz09PD8/PkA+Pj09Oj48Pjw7Ozk6Ojs9PkA/Q0NAREZGRkZF
-SExMSk5MS01OT09PUlFSVVZYXF1dXV1fYmNiY2RiY2BgYWVkZ2dmaWppamprZmZm
-ZWZlYV9hXFpbWVlbWVdUVFNXU1JQUVRVV1ZTVlxmam1pamhvb2ZdV1ZaYmJfYWx8
-jpOTioKBhoeHhXZnY2p5iJCOhX15dnZ3fX17e4KIj5WVh3dmYGNygYqNhoF9enh9
-fnVqWlhdZG1tZV1PR0NDQkJDQ0RFRERERkVGR0RDQENGRkhERkZFREZFRUZFREVD
-RkVGRUVDRkZHRUNERElJRUhHSEpHRklGSkhISlBPT1BQUE1MTEpJSUpMTEtJSkxM
-TE9RUFBPTk1OUVVSU1JTVFJVV1dWWFtXVlZVV1dbW1pcW1tcWlhbX2BfYWFiYWFg
-YmNkYmRjZGZlZWZlZ2dpZmNhXlxYUktIRkhNUVZXW2JjaGlnZWFXUExMSklJQkFC
-Qj0+Pz47PD47Pj48Ojs/Pjw6Ozs7PTw9QEBDRkVCQ0RGSEhJRkdJTElISUpNS0hN
-TUpLTU1KTU5RTlNSTk9OUFdWV1dVVVZWU1pdW15iZmhrcXVzcXFubGdhW1lVUE5P
-T01LSEhLT01MS0dJSEpKRkNGR0dHR0ZHSUVGR0ZGRkVFREVDREZEQEVGSUZDRENE
-SElJSkVKSUhLSEhNTE1OT09MT01NS09LS0xMTEtJSUhKSUhJS0hHSEZKT0tMR0RH
-S01xtMTO1drg4uXn5+npTkxPTElHSEhMS01KRkRGSENAQkZFRUJDQ0NCQkZDQ0JB
-QUA+PjxBREVCQT06Oz06OkA/PDc9PkE8Ozs7ODg2ODs9OTw7OUI6ODU3NTQ0NjY4
-Oj4/PEI7Ozs8Pj06PDg4Ozo3Nzk7Ozg3Njg6ODk/Ozw7OTk5NTg3Njg4NzU1Nzk7
-PEFCQ0NGRUVEQT8/QT5CPz89QkJBREJBQEA8Ozw6O0E/PUBBPz9APj8/QD5BPTw/
-Pzw+QEBDQ0NAPUA/QUBBQUJAQkRISEhJREJBQD08PUBDQ0RDQj89P0BBRkE5OT83
-Nzo6Ojo6ODo6PD07P0JFRUNDQ0FBQENFRUJFQUBBRERCSUNGS01NSktNSkdGSElJ
-R0VDQD4+Pj4/Ozk7OTk7OTc6Ozw/Pz5APz1CQEA8Pz9BP0JCQD0/QD49Ozw+QD49
-PDw7Oj0+PT89Pj49PDw7Ojk6ODs5Ojs+PDo5OTs9ODs8PDs+PDs+PTk7Ozo7Ozk8
-PT4+Pj1EPTw7Ozo9PDs8PT1BPDo9Pj9AQEI/QD09Ozw9PTw9Ojw9Oj07PDw4PDo7
-ODk5ODk3Nzc5ODo8Ojo8Oz89Ojo8QD08Ozo2OTo4Nzc4OTk7Ojo5Ojo4Njg3Nzc4
-Ozc3PDw5OTk6ODc6NjU2NzY6OTo8Ojo2Ojs7NjY2PDg8PDg5Nzg3ODY3ODc3OTc0
-NzY2PDs6ODs5Ozo5Ojo3ODc2NzQ5NTU1Nzg6OTk5NTc5ODY3OTg5ODY4NDU1NjU4
-Nzc5Nzg4Nzc3Ojk5Ojs3ODg5ODg6Nzc5PDs3ODk4Ojs6Oz06ODk6Ojo6PD49PTw8
-PD5APTs5OTo3OEBAQEE/PkFCRUVERUJISEdGSEtJTU5SVVVVVlZYW11eYWJkZWRm
-Z2VoamlnZmdlZ2lqa25ra21tbGhmaGlnY2FhYF9cWldWV1tXVlRVV1VSTU5QT1JV
-U1JYaHJybWVdW19gXFNTWml1eHR0e4aTlZOEcWtueX5/fXR1eoiSlY6CcWRkcYKR
-lZGFf3+Dh4J5cGpocoaRl5aOgnFraG97fnpycnp9gH51X05IREFFQ0FDQkNDRURE
-RUREQ0VEQ0NFRkZGR0dFQkZFQ0JEREVGRUNFRUREREZGRUdHSERCRERFRUZGRklK
-TEpLTFBRT01LTkxMTk9KTE9NTEtLTk1OTlJTVFFRUVFTUFJUU1JVVVVZWllaXVpb
-WlpYVlhYWFpZWVtcXVxfY2JhYWFjZWJgX2JjYWFiY2VmaGdpaWhlY19cU05LR0VH
-R01QV1ldYWdxb25qZF1ZVVRTVFBIRUFCQD46PUA+QT4/Ozo8Ozg6ODk6Ozs9QT08
-PUJCQUBAQ0ZCRUVFRUNHRUZIR0dFRERHR0pKSUhJR0tMTE9JSk5SVFVXVFRPT1BR
-V1taXF1iZGhrbGlnZmpnYlxWUVBPTUpJSERMTkhGSEdFR0dJSEZJRkRDQkRBQ0VF
-S0lGRkdJREREQ0hISkpGRkZDRUNFR0ZDRkZFRUdMSktKSkVLTE9JS09MTU5MTk5M
-Tk1LSkhKR0tLSklGRkZGRkVHR0ZDRkhNXmWvxc7W29/i5efn6ulOSkhLTUVMR0dI
-SktHRkdER0RGQURGRUJCQURDQUM+PTw6P0RAQUBBPkA/PD49QDs5OTYzNTc+O0A6
-Oj49Ojk4OTk3Oj87PDw6NDY6NzY0NTk6Oz05OT1BQDw8Ozs8Oz45Nzg6Ozs5OTo6
-Nzg6Oz0/Ojo6Ozw7ODY1NjU4NzY5OTs9P0JCRkVHSERDQkFBQkJCP0A/Pz1AQD47
-OTk5Ozk6PDs9Pzw9Oz0/QEA+Qj8/Pjs8PkBAPURBQkRBQ0NBP0A/P0BBRkVGSklH
-REJBQUNEREJERUJBQj1AQUA+QD07PDw5Ojg8Ozo5PEA+QD8+QENCQUBCQ0RCQ0FG
-REZGRERDRUVDR0VGSUlKSkxOS0tGRUZJSElGQ0A/PTw9Ojg6ODg8PEA8Ozw5OTw9
-PUI/QkBAQj9BQT1BPj05Ozk6PDs+PDo8Ojo7PTw9SEI8PD9CQj4/Pz5AOz09OTs6
-Pjs/QDs2Ojw9QD06ODs7OTs5PDk8Ozk7PTs7PT1CPj46Ojk7QD0+PTw7PTo9Qz87
-Pj4+Ozs4Ojk7ODc5OkA8Ozs7Ojs7Ojw7OTk5ODg4ODo4OTs8Oj09Oj07Ojw8PD88
-Ozo4ODs7OTk3OTw6PD46ODk3Njg5Ozg3ODo1ODo2Njk3NTY2NDM2Njk6Ozk3Ojg7
-Oz88OTk3Njg4Nzk2Njg4NjM2NzY3Nzo2NDY3Njg6ODo9Ojc2Njg4OjU2NTI2NDM5
-ODk3NTY3ODY6ODc4Nzk2Njc4NjQ1NzU3ODk7OTk3NjY3Ojo6Ozo2OTo4Nzk4Nzs8
-Ozg5OTg8Ojg8Ozs7Pjw8Ozw6Ojk7Ozk6PTw7PDo5OT47OztCQD88PkJCP0FDRkRF
-RkpLTU5RVVhbW1tcX19gYmRmZ2dsbGprampoaGlqaW1sbW1wbm9zb2tqaWlmZmZj
-Xl5dW19bVlRXVFJUUVZTVlBMTElNT1NWWV1pcm5iV1RVXGJgX2Z1hIuNhoB/g4yO
-iHljX2dzg4mNi42OjpGShXZmZnB/jpyek4BtZWhyfIOJiYaDiI2Qjoh2Y19icYSQ
-ko2Hg357cmlXSkBCRENDQURERUJCQ0ZEQ0RFQ0FDQ0ZFRUdGSEdERkhJSEhGR0VG
-Q0RGRUJFQ0FFRkdGRkZFREdHRlBKR0lJSktOTU1LTEtOUFBRU1NPTk5QT1BPTk9R
-T1BTVFJVVFNUVFVVVFVUVllZW11cXFxcWlpZWVpdWl1eXl1cXl5eXmFkYmNkYmNj
-Y2RkYmRmZWdoaWloZmZhXVpVTkpFREVGSk9RW15iZWhubmhmZ2ViXVtWVVJNS0pJ
-R0JCREFBQUFDQD06PT87Ozg5O0A8OTw6Oj0/PUFAQERBQkA+P0FBQkNFQ0FCQkNG
-RUdHRUVHRkdJTEhFR0lOTk1QUE5MTVJVWVlbW19hX2NjZWRgXltXVlNOSEdGR0hJ
-R0ZFRERCQEBGQ0FBRERGQ0BAREdERURER0hJSEdHRkRCQ0VISExHREZESUZJRkRF
-R0VGSUlKTkpNSUlISklKTkxNTlBQT01NTEhJSUtNSkhIRkhMSEZFRUVHRkRESEpN
-aK3Gz9bb3+Ll5+fp60xMTU9MTk1LR0lOSkRFRUVHREFDQEVEQEVEQ0BCQUI+QkVB
-Q0RDQkFDQUREPkA8PDs8ODg1NDg7Ozs8Ozw4OTs8PDo3Ozc5OTs7ODc3Nzo4ODs6
-PTw8OTw9PDs7Ozk+Ojs7RkI7OTo6Ojw/PDs/Pj49Ozk6PD06Nzg9Nzg7ODs7PEJA
-QkVEREdHR0RDQUI/QEBAQT8+QD0+PTg6PTk8PEA8PDs8Ojg9OzxAPjo6Pz0/PT09
-PTw9PTw8QENGRUM/PUI9P0BBREdISEdIREFDR0lEQ0JAQj88Pz0+PD8+Pjw6Ojk6
-Ojo7PEA9Pj1AQUJCP0FBQT9CQEFCR0RIRURJRUNDRUZORUZJRklISUtLSEdEQ0pH
-RUdDQj89PT0+Ozs7Ojs7Ozs/PDw+Ojk/PT89Qj4/QkBBQT46OTk4OTw5ODs/Pj09
-PDg4Ozs/Ozo7PkA9PTs/QEBAPjxAPjw9QT08Ozs7PTo9PEA+OTpAPDw7OjtAPTs6
-Ozw8Oz49Pzs9OTo8Pj46PDo5Ozs7P0E/PD07Ojc6ODw8Ojg7PDo5Ojs6PDw6Ozs7
-Ojs9Ozs5PTs7PD07PTs8Oj49PDs5Oz07Nzg6OTc2Nzg4OTk4PTk6Ojo8Ozk5ODk4
-OTg6Ojo3Njc4NTQ0NTQ1NTc3OTo3OTc4OTg4OTo6Nzg5OTk4Ojo5ODg1Nzc4OTs5
-Nzc2NzU3NjY3ODg5Nzc3NzUyMzM2OTMzNzk1OTo3NjY6OTo6ODYyNDg3Nzc4ODg2
-Nzc2Nzc2Ojk4ODc1Nzo2Njk4OTg4Njo9P0A+Qj48Pzo7PTs8PT8/Ojo6PDg2ODU2
-ODg5OTk5Nzo3OTs8PTs9QUJBP0BEREVHSEtOUVZXWl9fYF9iYmNmaWlsbW1tcHFu
-bW1pZ2ptb21sbG5wa2trbG1qaWdiYmBdXFpYV1ZSUVBVT0xRUFFPS0tMTVFLT1lg
-ZGZpZFlPUFpqeHh4fYaRk5KGd3Fwen5/dmdha4CQmZiTiH96e35+fnp8gIOQl5aG
-bmBhanmQmqGcj4V9en+BfXdpaXGBkpuel4l6a2FbUktCQEJERkRDRENHR0pHRURC
-Q0JERkNERUNFSEdIR0ZFR0pISkxKSEdFRUdFQ0NFRkVJR0dGRUZHRERES0lJSUpM
-TU1MTU1PUk9QUE9QUFJQUVJUUVJPUlFTVFVUVFZXWlVVV1tbWVxfXV5cX19eX2Bk
-YmFgX2RhXl1bXF5aWV5eYWdqZ2ZlYmJmZWRiZWhmZ2ZpZmZlX1tXVE1GREFAR0dJ
-TVJWXGBfZWlraGdqZWNgXlhSUFFUVVNRU0tIRUNDQ0dCPTtAQD47OTc7Ozo5Ojo8
-Pz47PTs7Ojw8Pj87Ozs6OTs8Pz5BQUFCRURFQ0A9P0JIRkZHSEtMTEpMS0pLTVFV
-UlJXWFlbXFxcXVxWU05NTUZERklHRkZEQkU+PT5CRkNDQUNBQ0ZGREJCRUtMR0lG
-RUZFRURCRENERkhJSkZEQUdLRUNGR0VHS0dGSk1HSUdITE5LS0tMSk1QUk5NS0hN
-SUlISUhJSUhJR0ZGSEZHS0pKS0lJSU1trMbO1tvf4uXn6OrqUUtOTE5MSktLTU1J
-SUhIREVERUVISUZDRUNFRD1BRUNBQj1BQ0FCQEFDQ0lBPjo4PTk6OzxCOzs7QDs5
-PTo7Ojw9Ozk4ODc6Ojs4NzpaRDo3OTk4Ojo5OT47Ojo5Oj48O0FCPTk4Ojo7PkE+
-PkBBPT46ODk5Ozw6OTg6Njk7QD9BPD5AREdIRUdIREVDQUFAP0FAQD5APTk7Pj49
-PDs8PDk4OTo6Ojs+PT08PT08PT8/PT88PDw7Ojo7RERAPT4+PT5AP0BBQkVGRUVE
-Q0NIRUdJRkI/PUBCPT09Oj06Ojc5OTo5ODo7Oz1CQj9AQEFBQEE/QEE9QkJDREZI
-SEhEQUJDR0pJSEZJSEpLSEhGREdHRUVEQUE9PTw9Pjw8QD07Ozs8Pj1AQTs7PD48
-PT0/Pj08QUE+Pjw6Ojo6Oz45Ojw+PDk5Ojg7PDs6Pzs+Oj07OzxAPj5BPTw8Ozo8
-PEA/PDw8Pj04ODc6ODg3Njo5OTo8Ojs+PTtAOzs8Pz86PTo9PUA9Ozs9Ojk6Ojs5
-PTw5ODk7PDk5ODc7ODo5Ojg7Pzw4Ojk9Ojg7Ozs4Njg7Pz07PD09PDo6Ojw8Ojo5
-Njg4Nzk7Ozs5OTo6OzY1ODk4Ozw3OTo5OTo8PDc0MzU4Ojg1NjUzNz04OTc0Nzg5
-Ojo2NzY1NTo8PT07Ojg5NTU1NzY4ODY5ODY3NDc3NjQ2Nzk4Nzc4ODk4NjI0NTM3
-Nzk5OTY2Nzc3NjU1MzQ0NTY6OTU2NjY1Nzo2NDc4ODc1OTY1Njk2ODk4ODU7NTg7
-Ojo8Ojs4Oz47PkBAPTw9Ojs5NzY3Nzk5ODo6Ojc3Njc2Njc4OTo8QUBCQkJGSU1O
-T1FXXFtdY2hqaGlqbXBxcXFwcXNzc3Jyb25scHRwbm1tbW5ubWtraWdlZGFhXVpW
-V1RTUFFRTktKSElMTEtMSUlKT05RVmBlY2ZjXFdfbH6IioJ/gomMhnljW2R0fIB8
-dnV3i5mclol1aWVufYiNjIiDgH+Af3x1bnF7h5yin5ODb2ZqdICEh4WAhYiPlZSM
-emNVSkpHQD5AR0lEQUBBQkZISEpHR0ZJSEdFR0dFR0RFRUhHRkZFRkxMSEdEREVI
-TUdGREZEQUBCRUZCQkZFSEZHSlBMTEtMTEtMTk5PUFBPUlJPUlVRUVFTUlNTWFpY
-V1hZWFhZXWFkYmBhZGVnamVeXF5jZ2pmZmdveXZqXlpbWltdYV9gY2hsb2tkYmdu
-amBiZWVkYGFhY2FdWVNOR0M+QURDRUpLTVJYWlxeYmFmZ2ZdWllbXFZUVFlZWVhW
-VFJQUFBSSUZFQT9CPT07PDw6Ozw7Oj06Ojw9PTk5OTw7Pj88Ojs5Ozs9PTxAQkBD
-QEA7PDw6QENHSEZGSEdISEJGSEhJSVFQUU9TVlRUV1tXUk5ISEhIREVERkVEQURC
-Q0JCQUFDP0A/QkJERkVDQ0BDRUhGRkZGRURERkdDQ0NDR0lHRERGRkVERUZJSElJ
-RUVLSUxJRkdKS0tKSkxLS01OSkxLSUhIR0dISEZJR0dNS0dGRkZHSUtKTEhCSGyw
-xM/X2+Di5ebo6upISUlKR0lLTUxHRkRFSExMRkRARUFKR0RDR0FDQUJDRj9AQj9C
-RUBCQUVKRUVAPj09Oz86Ojs5OTk8PDw8Ojo3Ojg4PTo+PTk6Nzk4PU9CPTo3ODk5
-Ojw6OTc4OTc6OTtFRTw3NTs6NjY2OUFBPTw9OTs5ODk7PDk5Nzc3OT08PT08QkJD
-RUNERkdGREZDQkFAPj08Pz0+PTs9Ozs7Ozk6Ojs3ODc6PD09PT9CP0BCPTw6PT07
-Ozc4Ojw7PD8+QEJAQUFBQD5CQ0JERUJCQkBFRkhCQUI/PkA7Ojs9Oj44OTk5PDc5
-Ozo+Qj5CQj4/PkJAQEE+PUNCQUJBQkJDQ0dGQURHSktJRkRHSUtKSEhJSElGRUJA
-QEA/PD49QT04Oz46PEA+PUA9QT8+PD0/QD87PD4+PTk6PDw7PTw6Ozk6Ozw5Ojs6
-OTo7Oj08Oz07Ozo7Oj08Ozs/Ozk5O0A9Ozs7ODk5OTc0NTQ3Nzc3Nzw7PD8/QEA8
-Pj09O0BBQEE9PD0+QEA+QT86OTk1ODk6Ojo5Nzg4OTo3ODU1ODo6Ozo7OTY3ODU1
-ODk3Ozs7PDs5ODo6PDo5Oz06ODo6Ojs5ODY2Njo5Ojc3OTs5PDk4ODg3ODo8ODs4
-Nzg4OTo8PTw4Nzg6ODY3Njk0MzM4ODk3Nzc3ODY4Ojs6PDw8OTg0Nzg5OTk2Njg1
-Nzc5OTc2NTY3OTg2Nzg6ODY0MjU3NTU4Nzc3NzY3NzU1NDY3NTM2Ojg1NTc2Nzg4
-Ozo9OTU4OTo2OTo5NjY4Ojg4Pzs9PDY2OTg3ODs5ODs/OzxAQD0/Oz09PTk6OTg6
-Ozk4OTo4Njc7Ojo8PkBCRENDQ0dMT1RWV11fYWVqbW1sb29xc3Z0dnV3dHR1d3d2
-dXdycnNwcHJxb2xrbGhnYmNgX15aVlRVVE5OUVBKR0dFRUdHR0lFRklSU1VbaHFo
-ZmxtcHV/iY2MgXVvc3p8dGdfaHmLkZGNi4SJipCIfHFrcH+RmpeNfnNmYW18hYyG
-g4SHjpiUi3lmX2JyhpSamZGGgoF/eW1bS0NBQ0A9PD9HR0NBPkJJTk5PTktMTk1K
-SEhJR0pOR0dIREdKTElGSElISEhHSERLSUZCQ0JBRUVERUZFREZISEpKS01MS0xO
-TE1PUE5MUFBSU1JUU1JTVlZWWFpcW1xbWlxdW19sdXVuZmZvfH98cWVfaHB5cmNh
-ZnuIiHRhXmFdVVllYltYZXJ4dGlob3p5aWFhYV5bWllWVVVSSkVCQUJEQ0RGRElP
-TlJUVVVaX15iYF1eYl9eXFhbXF5eXFtcWVZZWFNQTUtKRkI/Pj08PTw7Pzo5Ojg9
-PTs8PDs8Ozw9Pz88OTg5PD04OjxAQz9APjw8QUE9P0JCQkFFR0RDQ0JARERGSEtQ
-TU5OUU9RT0tLR0ZERENDPUFBQUBBPkJDRUNAQT89QUA7QUVKQENCR0ZEREVGRkdI
-RERHRklGSEVDQkJERUREQ0RHRkhHSEpJTEpNS0pJS0tLSktLUE1NSUpMTEtKSUlJ
-R0hJSUhJSUhJRklMTElNS01LR0ZJbbTFztfc4OPk5+jp6lBKR0NIR0pOTEZGTUZH
-RkRGSUdDR0BFS0RCQURMR0lDQEROSkRARUQ/QUE/RUE9PTs3Ojs6Pjw2OTc3ODo4
-PTw2PTs4Ojk5Njc3Ojc3PD8+Pj06ODk6Oz06Ozo4Ojk5Ojs6OTo4Nzw/QEA+PTs6
-PDg3NTc4OTc3NTQ2OTg4OT4+O0JBQEBAQkJDSEpFQ0BCQEJCQD8/Pjo6PDw6ODo8
-PDw5Ojs3Nzg7Pj4+PTw9Pz4+PEBCPDw6PT89QkE9Oz9BP0ZGQ0NCP0BDQUBBQUE/
-Q0RGRURFQ0I/Pzs7Ojg9NjY3NzY4Pz4+PUBAQUBBPEA+PkJBQ0E/QkNDQT1AQUFD
-P0JDRUVFRkJDR0dGSEhGS0tQS0pFR0VFQkA9Ozs7Ozw8Ojs6PkBAP0FBQUJBP0A8
-Ozk6Oj07QD46Pjo+Pj86Ojo7PDs/Szo7Ozw8PDs8PD88Ozo6PT0/P0E/PD88Ozo3
-OTw7OTo2Njg2NjU1ODo6PDxAQDw9PT88P0BCPT09PDs/PEA9PDs9Pjg3Nzc5ODs7
-Ozc4OTw8OTY3Njc5PTk5NjM4ODgzMzc4ODo5Ojs6Ojk3Ozs5ODk5OTo6OTo7PDo3
-NTY1NTY3OTg6Nzc1NTU3Njc3ODY2Njg3Nzc5PTo6OTs5ODk2NzM1Njg3NTQ3NTU3
-ODk6Ozo9QD44Ojk4Njc3Njg6ODY2Nzk5ODY3ODc3Nzc1Ozg3OjY1ODs3NDY4NjU1
-Nzc4NTc0NTg2Njc6ODg5ODk2Njg6OjY1NTg5OTY3Ojs5OTg4Nzg5OTs8Ozo8Pjk0
-ODc6Ojg3OTo5Ojg5ODk7PTs5Oz09PT4+Ozo5Nzg6Nzo5Oj1AQD9AREVHSU9QVlle
-X2JmamxvcHBzcXZ3dnh3dnl6e3h3eXh5endydHRycG5sbWllY2JgYV5bWFZSU1BO
-SUtKSUVFREJFRUZKRUhJSUpRVltpdG1ueoKAgYWIhXpoXVxodH1+cW54iZecnJOF
-fnVwdXuDh4mLi4yRjIFwY15lcoiWmZOKgnl0gIaCeG1pcH+Pm5iVi3lsZF1WT0lC
-QD06Ojk4O0RBQj4/Rk1TWFhYWFhWVVVWU05OTUxLS0hMSElKS0tGR0VGR0hJSUVH
-Q0ZGSEZDRENDQkNFREdITU5OUk9PTExPT1BQVFJTU1NXWFhZVllbXF1eXV9hY2Ng
-XFpZY3R8enBqcX2NkYp7cG52gIFzYlple4uNg3NydWtaVmZjU1FeeIKFgXJzgIZ5
-Z2FbVUxKSUpMSktIRD8/PT5AREVCRElKS05PUFVaW11cXGNjXF1bV1peYWJhYV5b
-WVtcW1ZSUk9KSURFQ0M+PkA9PT89PTs8Pjo/P0A+Oj0+PDc6Ozw6OUBAPz4/QD86
-Ojo+Pj5CPUNAQEFBRkRCQkJAPkFESUhKS0xMTUtLS0dFREJDQkFAQD8/Oz4/QkJF
-RkVCQEBCQUJAQkRFRURER0RFSklFRkpFRURHSUhLR0VDRUVERUVCPkRHSEVDR0dG
-RkhJSUdGRkRFSUxOTEhNUUtLSkhKTFBLSEdJTUhJSktHSElJSEZISEZISE1rs8XP
-19zh4+Xo6enqUlJMT01KSUdISkhJSkdHQkZGQ0JFRkZDREJBQ0lEREJDRV5DPj1C
-Qz4/PTs7Pz08QDw7Ozw9PTo5Oz1BPD04Oj07Pjs4Nzo5Ojc2Mzk+QEJEQjs6OTw/
-Ojo7PD05OTo5Ojk5Ojo6OTtAPTw/Pjs9ODo5Nzk5ODc4Nzc3NTg6OTpBQD08PDw9
-QEFERlNJQUNEQkJBQTs4Ojs6Ojs5Nzk5PDo6ODg3Nz49Pz47Ojs7Oj8/QTs7PDw+
-PD07PT8/Pj9AQkVGR0RCQEBBQkBBQj9AQ0RFRkJFRkI/Pj87Ojw7Ojg5NjlAPzw+
-QT87PUFCQD5AP0VERkhCRklFQ0FAQ0FESEZGSEdCREhFRkZHSEhJSEhISUVEQkRC
-Pj46Ojw5OTo5OTs+QD5BREE+PT9AQDs6Ozs+PDo/Qz88Ozk5Ojw9ODk5OEFDOzo7
-Ojo7Ojw/PT49PEA8PD08Ojw8Ozw6PUA6Oz06Ojk6ODg2Nzc5ODg5PDo5Ozk6P0A9
-PT88PTw6Ozw8Pjo8PDs9Ozo5OTs6ODg4Ojg4NzY4Ojo8OTc4Ozk5NTc2Njo3Njc1
-ODk7OjpAPEM5NzY3Ojo3Ojg5PDw8Ozk5Nzg3OTc4ODo6OTY5OTc7Ojc1Njg4Njc2
-NTg5Ozs4OTY2NjY2ODQzNDU1NzU3ODg+Nzk4Ojk4PTs6Ojc2Njc3ODc3Pjc1Nzc2
-NjU2NTMzNDQ0MzQ2NTc3Nzk1MzMyMzU3Nzg2NTUzNTY1NjU3NzY4Njg3OjY1NjYz
-NDU2OTc1Nzc0NDU2ODc7Njc6Ozo8PDk4OTg4Njc3ODg5OTo4OTk4NTk7Pjo7PTxA
-Pjs3Nzk6Ojo9Pj1AQkNFRUtPVFlcW2NmaGtvcHR0d3Z0dnl4eHd4d3l7e3p7e3h2
-enZzcXJybmxqZmRfXFxYWVlYUU1MT0tMR0dIRkZHRkNGR0dFRUhMS1FXY292bXSE
-joZ7dXBxb2ZaXmp9jZKKgoSLlqCcjnllX2RyhZSYkIR9dnx9fXx1d32DkpqXkH5u
-ZmdyfYWHgYGBhIyMiH90YVJOS0hBPT48Ozw5NzY8QkFBQkRPWWJjZ2loZmdjY2BZ
-VVNWVE5LS0dNTEtKSEpIRUdLRUdERUdGREVFR0NFRkhISUZIRUlMUE1RUlBQUFNT
-VlZZVlxbW19hX2FkZGNiYmFkZGJjZGFZU1dpeX11ZmFqgJGUinhyeouTi3hjXGuC
-iIJxbnuMgmRQYGFPSVV3kJWOfnZ9h4BuZl9PREJBQEBAPT07PTo8PkJAQUBDRkZI
-Sk9OT1JWXF1dYmNcXFxcWVxhZGJfX2BdXV9dXVtUUE5NTUlJR0ZDP0BBQD0+OzxB
-QD8/QD07PD49QD48QDw+Pzo5Ozw/Ozw8QT4+Pj09P0A8Pj49PUBAPj0+QUFFREZI
-SUZGQ0FCSEhFRERBQkI7Pjw8QEFDQkNEQUJCQUVAPkJBREVEREVFRUZHRURFQkZF
-RkVGSUhFRkZIRUVHR0dHREVHSEhGRUVERkZIRkZLSEhISk1NTUtMTExMSkpLS0pJ
-SEhESElGSEdJR0hIR0ZHRklJSnC2xtDX3ODj5efo6utVVlFPSUhISEpKS0xKRENJ
-RENDRkpKQ0JBP0JFSEdFSUdGREJCPjxBQT1AOzs8QEA+PDg/PDk6PDo6Ojw+QEE8
-Ozs9Pj07Ojk2Nzs5NztBQUJAPT08PEBAPDw9PDo3NTc6Ojk8Oj4+PDw9Pjw9PTo5
-PDw8OTg3Njc4Nzg7Ojo8PT9AQD87Oj4/QkNCREJDQkNBQTw8Ozw4NjY3Njo5PD0/
-PTw4ODk6Ojw+PkBBPDg6Ojw8OTk6Oz48Pjo6Pj07PTs+QEFDQUJBQUBAQEFAQEBB
-RUVDQD1AQ0FDPzs6ODg4ODc5O0A9Pj07PDw/QTw/QUFCQUFCQkFDRENDQDxAP0RE
-RUhISEZFRUZGRklERUZIRUtFQ0M/Ozs8Ozs6Oj05Ojo4Oz09QEJCQEI9Oz07PDo5
-P0NCPz8/QDs6Ojc6OTo9PDg3OTg6Ojs7PDw9PDs9PD4+Ozo9PDw9PEA/PT8+PEA/
-Ojg5Ozk5Nzc8Ojo5OTg6Ozw6ODk3PTw7OT0/Pjw7Ozk4Ojs7Ozo6OTk5ODw6OTc6
-ODk3ODU4Ozo8OTk4Ozg3NDY4ODc3NjUzNDg4PDs6Ozo5Nzg5Nzo3Ozs5Njk5OTs2
-NzY5Nzs5Ojg9Oz05ODo6NTc4Nzo8Ozw6ODc6OTk4OTQzNDU1NzU1NTc4Ojs2Njo3
-Njc6Njs7Ozc5OTc7ODU2Njc5ODczNDY1NTY2NjY0NDM2NDAzNTU3Oj04NTUzMzY2
-NjY1Nzg6ODY2NTUzNTc3MzY0NDc2ODg3OjY3ODg2NTY4NDY3OTk6Ojk4OTtAPzs4
-Ozg5Njo4Nzg6Ojo6ODY7ODk6Ojo5PDs6Oj06Ozo7OTs6PT9AQ0RJTVJZYWVnam5v
-cnZ3eXl6eHp6fHt7fHx8enh5eXt5eXh2dXJycG5saGhlYmJeWlZVUlBNT0tJR0ZH
-Q0RFRkVEQkJAQ0hJSUtRVl1ueXp6e4yNfGlgZm9zcm1weYiTm5mQjIuTmpOEa19h
-b4GOk5GFdWhobXqJkY6OjY6PkYyDdWRdYG+Ej5CNg313dHFnXFZLSU1LRD49O0BD
-Q0I7OkJFREVLWmFpbG9xc3NwcXBubGdnZWdhXVlTUVBNS0lKSUhJR0hHRUZIR0ZG
-RkZGQ0ZFREdHSUlLTk9PT1BRU1RXXF5cXl9hYmhmZmZpaWdnZmdoZWdnZ2ZmYFhZ
-Y3eEg3RhXW2EjIt7c3eFlZuSf29xgI2NfGBecoqOdlVVXlZPWniSmJF+bG58gHBk
-XE1DPz4/QDo7PDo3Ozc4OkJAQ0VGR0dGSEtLTVJUWlpZWVdSV1dXW15fYV9fXFtb
-W11bXFpWUVNRTktLR0RBQURDQ0RDQkZGQUNGQUJCPUBAQT8/RD86OTc5Pj09Ozo7
-ODc5PDs8PD07QD1BQ0A9PD09PD9AQ0NDQUBBQ0FDQkNCQkJDQT0/PUFDREdHQ0FC
-QURIREFBQkREQ0VIRUJDQ0JBQENGREVGRUdJRUdHRkhGRENDSUlGQkVEREZGSEdL
-SUpLSkpMTUxOTE5OTExMSUpLTEtLS0lIRUlHRUNDRkNEQ0VGSExNS01MdrjFz9fc
-4OPm5ufq6lFXVFFST0lLSkpKSUtFRUhCREVDQkJDRUJDQUFERkdDQ0JDQUJFRERB
-QzlCOzs8Ojo6OzY2Nzg6PD88Ojo7OT07PTo4Ojo6ODk6Pz09PD9AQUA9PTs7Ojs9
-Pjs4NjY4Nzk8OTk8PT5APTs8PEBBQD05PDk4ODk0Mzc3Mzg6Ozo7PTs/QENCQD5A
-PD4+QEBAPz09Pjw7PTo4NjY2OD0/PUA8PTw5Ojk4Oj47PDo9Ojo5OTo7Ojg7Oz07
-Oz07Ojs8QDs9P0BCQ0FAQUFAQD8/QUNDRENER0ZAQj07Oj03ODY3ODs6OTs9PT07
-PDw+PT0/PT5AREVFRUFEREFBQkFGRkVGREZFQ0VGSElHR0dGQT9AQEJCQD5APT09
-PDo8PT07Oz1COkBBPT9APz48OTc6PDo/P0FBPTo9PT49OTw5Nzg6Ojo7Ozo5PTk6
-Ojo5OTw8PDs+PTs7Ojg9PTw9Ozw8PDs9Ozc3ODo5ODg7PTo4Ozk4ODo8PD06Ojk6
-OTw6Ojo5ODk7Ozc3OTg3OTg3ODc1Nzo6Nzk5OTo7PTg5Ozw5Ojc1NDYyNjg2NDc4
-OT07PDs5PDo6NzY4OTg4OTk6OTs8Ozg6OTg7PDs3Ozs5OTo9Ojk5NzU1OTk9PT05
-OTU0OTc3NTQzNzQ2Nzo5PTk3NzY1NTg7Ojs9ODc2PD08OTk2ODc3Njg5NzU2Nzc3
-NzczMjM4NDIzNDM0NTU2Ojg2NDQ2Njc3ODY0NTQ1NDo4NjU2ODg4NTQ3NjU3ODc1
-NjY3Njc5NDU2Ojk4Nzk4OTY5Ojo5Ozw7Ojo3Njs6PDk4OTs9Ojc4Ojg4OTk4ODU3
-OTs4Ojw+PT49QUNGTlBVWVthY2tvdHR1d3p8fHp7e3x9f319gIF8eXt8enp7eXV0
-cnJva2ZmY2JeWVhWU09NS0tKSklGRkRBQ0NEQ0FCQ0VLSU1NT1NXZHR8d3R8i4dx
-X1xugo2OiISIkZufnIt+dXeAgXhybHN8hYyMh35vZm16i5mel5KOiH+CfHZwY2Bq
-eISLi4R3aV1WTk1KQkRHTUtGPDtBR0xMRUFERUhPVVxobnFycnV1eHl4d3t5dndz
-bm5taWReWVBPTUpJSUpJSUlFRUZGSEZFREREREFBRUdITE5PUlJSU1VbXGBlaWts
-bmxtbW1qbG9wbWlmaGhmZmVlam9waWx9kZeNeWNke4uMgGhebISWl4t7eYGPk454
-YV1wgIRyV1BcYVlgb4eQi3ZeWGZ0al1cTD09P0I+Pz45Ojg4OTo7PD1AQUFBQ0FD
-RUdITFFQT1NSVFBPUFRVW11cW1laWVpbW1pYWldUVVJSUlVSTk1MSkpKSk1KSUlL
-S0tHRUVGRUNBPz48PDw+PDxDQjw/Pzg9PDw6PD08PTw7PEBCPT48OzpDPzw9QUNC
-QkJEQUFBQkRFREVGRUI/Q0RIRUFDQkRBREREREVFQ0RIRkVER0REQkNCREVHRkhF
-R0hGRkhFRkRFSUdGRUdERklHSUhHSEhHR0lLS0lKTkxNTEtMSUZJTUpMS0pKTEpL
-R0VHRUdEQ0VCREZGRkhLT1B2tsbP19vg4uXn6OrqTUxNTkxPSkhISktLSEhFRkBA
-RkZGPTo7QERBQEBEQ0NFQEJCQUJCQD88QDk7ODc5Ojw9OUA+QD06OD07PTo5Nzg8
-PDs4Ojo8PTk5PD08Pz89PT8+Qjw6OTk2ODo8Ozo5OTg3OTs7Oj8+Ozk+PD9BPjs6
-ODc4NTU2NjU3Njg8Oj5AP0FCQ0E9PDs9Pj1AQD0+PT47Ojs9OTg4ODg6PUE+Pjw6
-OTg1NTk5Ozo7OTs8Ozk6ODhBPj07OTk4PDw7PDw5Ojo8QD1AQkNBQEFDPkBAQEFA
-QUJCQT88Ozs6OTg5Qzo4Ojo+PDw/QUJEQD4+QEBDP0FGRkdCQUFDQkJFQkBAQkRG
-RERDQ0hJSkpFR0JGQDw/QD4+PD09PD07PDw4Ojo6PDw8PT1APz9AOzk5PDtBQT09
-Pj8+PD08Ojo4NTs4OTo7OTk5ODk6Oj07OjtFPjs9Ojs8PTw9PTk8PDs9Ojw9Pz05
-OTo3Njg5OTo5Ojw6PDo7Pzw9Oj07ODk2OTg1Njk8PDk4Ozo6ODw7PD05Ojk7OTk7
-Ojk6PT08PTo7Ojk3NTQzNDY4NDs3NTg7Ozw7ODo3PDw4NzM5OTs4Ojo3ODo6PDk6
-Ozg7Ozo7OTc4Ojk4ODY1Njg6ODw6Oj44ODo2NjY2Nzg4NzQ1NTc0Nzk4Ozk4Njg5
-Ozs8ODc2Oz07NzU4NzczNTY3NDU0Nzc1NDQ2OTU0NTU4OjYyNDY4Ojo8Ojg2NTg2
-NjY1NDM2MzI1NjY3NTk4Ojg2NzQ0NTQ2Ozo3Nzc6Ozs4Nzc3OTk7Ojo5Oj07PDw9
-PTk4ODk9Ozs5PDw9QDk7ODk4OTk7Ozc5ODk6Ozs9PEBEREpNVVhaYGRkbHFyeIB+
-en19fH9/gX1/f36BgX18fX57dnd2dnNxb2xoaWRkXVhbVFJQSkpKSUVFRkdFRUVG
-Q0JCRkVDR0pKT1FTVlticHl6cHJ7e3BqcYOWnJySjIyPlJSNfWxkZ3SBhIiFf3p1
-dnyCgoF+gImVnp+ZjntsZWx2eHd2cXJ0enx6b11PR0dGRD8+PkVMTUZBPkJOUlFI
-R09TVVxka3Bwc3d4e3l5fH1+f4KFgH18e3t3cmxnYVxWUE1NSklGREhGR0RFRUdF
-R0VFQ0ZJTU1OUFNZWlteaGRmampvc3R0dXR0dnRzdXFubWtqamdmY2dveXRwe42b
-nJJ9b3aKkIl0Y11sg4yKd290hJaWjn1udIKEe2hTTFhra2JkcoKGe2BVXG9qXl1P
-PDc+RUZDQkE8ODg6OTtAQUA+QEE+QEBDRkZGSUpJTE9PUE9OUFJUV1dVVFVYWVZX
-V1ZXWFZVU1JVU1JSUFBRUlJSU1FQU1RTUVNQTk5LTEhHQD9ARUNCQD8+Pj9CQ0FA
-Ozo7Nzk3OTtHSDw8Pjw8Qj8/PkRDQ0RGQ0JBQEFEREVGR0VIR0hISEVGR0ZCREVG
-RkRHSEdHR0dGRkZESEZHR0NFRkVIRkdGSEhHR0hESElHRUVEQUVEREVISkpIRklH
-TE5NTElLSUdKSktKS0tGSE5MSEpJSUpJRUhGRklJSEhBR0dGSUdKTm61x8/Y3N/j
-5ebo6ulPR0dLSktLSE9MSEdIRkdEQ0FBQkhDQj09RkNFREFFQT4/REA+O0BAQUNA
-Oz08Pjo3Nzg6P0M/PTw5Nzo2Ojs/NzQ4NzY4Ojc6ODo+PTw8Ojw+Ojg6Ozo5Ozw5
-ODw5Ozg6Ojg5OzxBPUBAPTo6Ojc6PDo5Nzc3NzY1NTc1NTk9PD1APkFDQD4+PTs+
-P0FDPzs6Ojo+PTk2NDY3Nzc8PkA7Ozk6ODk6ODk5PDs7OTo7OTo7Ozo6OTg4PDw7
-PTs8PD08PTw8QEI/QEBBQENGREJDQkFBQT8+PDo8Ozo7PDg8PDY5PT08Ojs8QUFA
-P0FBQj0+QUNGRkZDQkNGQ0VFRUFAQEFBQ0VGRUhISEREREM+QT8/Pz48PDo7PDs9
-OTo5Oj05OT1BQUBBPTo7PT89Pj49PT0+PTs8PTk5Ojk4ODw7Ojs6OTk5OTk6OTg5
-Ojk5OTo7Ozs9PDs6Oz0+PT07PDw9QTk3ODo+PTs8ODk4Ozs8PDo6OTs6ODo9Ojg1
-NjY6Ojo5ODc4Nzc3Nzk8PT87PDk7Ojo5Ozs5Ojk3OTc3Ozg4Njg0Nzg4NTg5ODc5
-OTk5Nzs7OTg6Njg6PDk5Ozg4OTg6OTk5Nzc4NzU2Ozo1ODg4ODo5OTs6PT48PEA+
-Ozk4ODQ1Nzc4Ozs5NjM1Nzc3Ozk4Njk6Ozk1NzU1OTg6NjY3ODs2NjU2NzY3Njg9
-PTk0NDU3ODY2NDY0Njc5ODg1NTY1NTc0NDU1NTQ3ODc4Nzc4ODs6Ozg2NDU0ODc5
-Ozs8QD48Ojs6OTs7Ojo/PTo4Ojo5OjxBPTk8ODg7Pjw9Pj48PDs4Njk4Ozg6Ozw7
-Ojs5Oz89PkJIS09VW19kam1xdHd6e3uBgoGBfoB/fn99gX99enx/fX56dnNxcW9r
-ZWFhXl5bV1JQTUpJR0hFQ0REQ0RDREREREJER0pLTExPUlVYXWNsd3ZubHJ2d3yE
-j5iamI19dHN8gn91ZWNpeoqTkYl4Z19nfIqUlI2KjI+Sj4p7aV5aYXWBhYN6cmhk
-YV1VTUlFRkVDQD1AQ0VIQUA8QE5ZWFJTWWNramxwc3d1eH18fn+Bf4GDhIeIiIaB
-gYF/e3ZwZ2BbVVBPTEpKRkhHSElFRERBSEhISktPU1dWWl9hZWlqbG9zcnF1eXZ7
-eHd0dHN0c29tbGZnZ2Zma3BxZmV4j5qXin5+iJeXjXZpaXmGiXpnYW6Kk5GEdXKC
-kY5+XktGU29yY1ZWcH9+b11icXVkWUw+OTo+QT9COzg2OTw7PT8/PkFCQEFCQUNE
-REJHSUpIR0lKSkxOUFBVVVZSUVZSVVNSVlhWU1VVVFRWU1JSUlRXV1pcXlxaXVxh
-YFpYV1pYVVdVTUtHR0hFQkJAP0A9Pj07OTs8Pj06OkhDO0FAQUFAQkNEQ0JDREBF
-RURER0hISElMTkpITExNSU1MTk5KTElKSUhJR0VFRkZER0VER0dGRURJSUZGREdG
-SEdERUZGQ0ZGREVFQ0dHRkhMSUhHSEtKR0hITElGR0lMS0xNTkpKSUpGSUpHSkdJ
-SUlHR0lHSEhHSUlKSEpOebXG0Njc4ePm5+nq6lVOSUtHRUpMSkhFS01KRUFGRUpI
-RkZCQ0NDRkVFRj9BSENDQj07Pj49PDxCQT87PDo4ODo3Nzw5Oz87Ojk7Njg5OTs9
-ODo4Njk3Ojw9Ozs7Ojo7Nzg4Ojw4Pjw6PD08Ojg5ODk6PDxBQUJEQTw3OTg3OTc2
-NTc1ODY4NTc6OD0/Pj5CPz48PDo+Oz09QUA+Pjs6Ozo4ODc3ODg7PDw9Ojk4Njo6
-Ozo5Ojs8QDw5ODo6Pjs3OTk5Ozs4ODs8PDs7PD08PDw9PDw9Pjw/QD9EP0I+QURD
-QT0/Pjo6ODo5Nzg4Ojk4Ojw6PD09QEFCPD08PkBDRERHRUZFRkZCQ0NDREA/QURD
-RUZERURCQEA/QEFDQz48PUA+Pz88Ojs5Oz89QT07PTw/Qj49Ozs5Pj09Pzs/PDw+
-Ozo5Ozw6PT05ODg5NDU5Ojk5OTk2ODk5Ojs7PD8+Ozo9Ozg5PDs6Ozs6Oz07ODc5
-Ozs4Nzo3ODs7ODo5OT08ODs7Ozk4OTw4OTc8Ozg4ODw4Nzc2ODg6Oz04Ojg9ODo4
-Oj09Ojo4OzY4ODo6OjY0Nzg3Ojc0Nzg3OTo5NjU2ODk5NzY2OTo4ODg6OTk5Nzg3
-Nzc4NjU3Njk4Njg3Nzg3NzQ4OT04Nzs6Nzg4OTY1MzQ0Njg7NzU1Ozg2NTQ2NTg3
-OTk5OTk2NjU3OTc2ODU0Mzc1NTU1OTo3OTc0Njg5NzM0NjU0NzM0MzIzNDU2NTk3
-NjY1Mzg4NDU3NjU3NjY5NDMzNTQ7ODg5Ojg4OTk4OTg6OTQ3OTg4Nzc1Njk6Ojw7
-PDc6Ojk7PEA7O0BAPTk4Nzc6Nzc2ODs9Oz9DPz1CRUtOUFNYYWdudnV4e3t7foGE
-hX9+g4GEgYB/fn5+fn18eHt7dm9sbWdmZF1aV1NQUFJKTEhIRkRBR0RDQj9DQ0ZE
-REdJTkxOTFFSVlpgam1xbGlsdX+KjIuPkpKNfGliZHF4e4F/en2Gj5OMemZeY3OK
-m56dlYl+eHl9fnRmYGNrdYGBem1cUUxLSkhGREVISEQ/PD1CSEVAQD9IU1heX2Zr
-cHJydnV5fHt7fH6DgoCChIKDh4iMjI6PiYqFgX57dG1lXVdWUlROTEpKSklKSE1M
-SkpNUldYXmNobW9xdnh3eXl5eXd5eHV2dHd3c3VybGttaWRlZ2hxcWZbXnWIjoN3
-d4GPm5iKenl+jJGKc2Fjd4uPgmxkcYeTj3xhT0pPa3ZgUE9gc36CcnB4f29aSTw9
-PDs+PTw9Pjw5Ojk3ODw9QT5APUBAP0FCRUVERkVCQkZGSklNUFJUVFJQT05SVFRT
-U1VTVFRTUVdXUlBUV1hdYGBgYWBiY2NlYWNiYF1bXVtZVlVSTkpKR0VEREE/Pj07
-Pz1BQT49PTk3PEJBQUBCREVGRUdERkhJSklLS0xOTk9RVFRQVFNRUVNRUlBOTUtJ
-RUZGSUVEQ0JEREhEQkVGRkRHSUhGR0dIREZGSURER0VHR0ZHSUlGSUdKSUZKSkpG
-R0dHR0hJS0tKT1JSTk1JSkpHRklISUhEREpISUtIRklKS0pISk5pscfQ2Nzh5OXn
-6OnrVFBLR0lJSU1JRktHS0lDQ0VGQkpHSkZHREVEQ0A9QDo+QERBP0RBPT0+PD48
-Pjs4Ojw9PTk8Oj9AOD06OTs8Ozg5ODs5Ozk5Ojw5Ozs6OTk7Pjw9Ozw5Oz47PT06
-Ojc5ODg4Oj1APTs8Q0A7Ojw7OTo5ODY3Nzc5ODg5Ozo7PkBCQUNAQT09QEBAPz09
-Pjw7Ozo7OTk4ODo4Ozo8PT07Ozc9Ozk7PD05Oz09PD05ODg4PDs8Pjg5Nzk5Ojo9
-PDs8Pzw6PTw9Ozs9Pz4/Pz0/R0I+Pz49Ojs8Ozg9OTk4Nzs4Oz08PDo8Pz89QD86
-P0JDQUBAQkNFRkdCRENDQ0NCQEBAR0hGRkZCQUA/QD5AQkFAPTs8Ozw+Pzo4Nzk7
-Ojo5Ozw9P0BAQD47PDw8PDo8PT49PDw5Oj04Nzc7OTg3Ojo5Ojg7PDs3Ojc2Oj08
-Ozo8OTo8Ojk6PDo5Ojs+OzY3OTs6ODg1OTk4ODo8Ojg5OTo9Oj89PTk5OTg4PDw8
-Ojk7Oz07PTg3ODo7Ozo7Njc4OTY5Njg5OTg5OTo4Ojg7Ojk4Ozo2Njc1NTU2Njg6
-ODk6Ozk5Nzg3OTw9OT07Ozw7OTc2Nzg4Njg2ODg7ODc2ODc6NzY5ODc4Oz06OTs9
-NjQ0NTQ1MzU2OTc2NTQ1NjUzMDE1MzM4NzQzMzQ2Nzg3NDY2NjQ3ODY0Njc5NDg2
-ODg2NjQ3NTQ1NTQ2NTMyMTMzNDc4NTU3NjY4NzY1MjMzNjc2NTU5NzU1ODY2Njg2
-NDk4OTY3OTU3Njg4Nzk6Nzo6Ojk4ODk9Pjw6Ozc3OkE8PT88PDg6Ozk6OTk2OTs/
-QkJBQUVHSlFVWl5ia3F1fHyAgYCFhYaFhYSDgX+Dg4OCfoB8e3p5eHRxbWdoZWBd
-V1VUUk1MSUZIRkM/Q0VFRUVFRUVHSUpISUxQVVFRT1FWXWducGpkaHiBiZSQg3yB
-hYF2Z19mc4KNlJSPjIaFhHlxbG94gZGanJmLdWRdYm53d3BqbXNvbmtjU0pDQkNF
-QT8+RkxOSkRBQ0lOSkNDSlJcYWZrb3F0dXl4eXl9fYB9f36FiomKiIiEiIeJjI2O
-jYqMh4WCe3VxZV9eWllZVVVUUk9PUFFTWFtaYWdtb29zdnl5fIF/f3x6eHd0dHZ1
-dXN0dHFtaGpqZ2RodX99a2FmfIeCdWNmeI2Xk4V4fY+am4x0aW+Dj4t5YF1thI2G
-d2liWE5caV5QTVl2iI2FeHV+dF5KQTo/PT08PD9BPTo8Ozo3OkA8PD9AQkI+OjxA
-PkFDRkJCQUBHRkxOT09QUVBOUE9SVVRRUlNRUVRUVVNUWFNTVVpiYWFhYmNkZGZn
-YmFjYWBfX15dXFtWUFFQTkhERkQ/PT8+QD48PTw+Oz8+P0JFQUNDSElHSklJSUpM
-Sk1OT1FPUFNWWlZXXF5cWVdYU1FQTEtJR0hGRkVGRkJDRkVERERERERCREVFQ0VG
-SEZFR0hJRklJSkVGSkpHSElISktJSElJRkZHRkxKR0pLTU5NT0dKSEdISEZJTEhH
-R0VFRUhGRkhHSkVJUmqtyNDY3eDj5ejo6upTU05LR0dRUEhISEdFSUhIR0hDQ0VJ
-Q0NGRkJCPkBHOz1CQkJMSkJAQUNFREE+PUA+Pj48Ozw7PDs9PTw6PDg7OT02ODg2
-ODc6PDk4PTo6Ozs9Pjw+Pjs9PDs6ODk5PTg5OjxDPj5AQD9AQEA8PTs6ODg2Njc4
-Nzo7Ojw+PTo9Ozw+Pz0/Pz49Oz48PDs9PDw7Ojc5NzU2Njw6Ozs8PD06OD06OjtA
-Pjs7Ozw+Pj05Nzg6Ojo4OTc3OEA4OUA7Ozo6PD47Ozw7PDo8PUA8P0A+REI8PD09
-Ozg8Ojg6ODc4Ojs9PT08PD89Pj48Oz89Qj89P0BDRERCQUFFRERCQT4/QUdGRkVE
-QkNAPkBAQ0NDPj9APzs7PTo6ODs5Ojk7Ojs3Oj1CPkBCQD9APTs7QT1APTw5Ojw4
-Ojo7Ojo7Ozw6OTs6OTg4ODw+Pjk3Nj47PEBAPDo+PDs5Nzo9OTs6OTg5ODtAOzs4
-Nzg4NTU4ODc6PD06OTk8PjY4ODo7Ojo9Ozk6ODk8ODs4Nzg4ODY7OTc6Nzs5Ojc3
-Nzk6Ojs7OTo8PDs5Ozg1OTU4Ojk4ODk4NTc7OTo6ODc5Ojk6Ojk5Ojs7OjY3NzY4
-NjY3ODk3NjY3ODc6OTc3Njc2PDs2NTg3MzMyNTc4OjY3NjY1NTc1Nzc4OTIwNjQ4
-OzY1MjE2Nzc4NTU0Nzc2NzYzMzMyNjc2NDY4NzU1NDg0NDM4NTQ1NTY0NjMyMzc1
-NDM1NjQ2Nzc3Njg3Njg5ODc3Nzc2NDk6Nzc1NjU3Nzc4NjY5ODg5Ojs8Ojg4OTw7
-Pj1APz44Oj4/Pz06PDo6Nzg4OTo8QEJEQ0VHRkxOUlZbYGVudHh8fn6AgYOEg4WD
-hn+BhoSEgX5/fHt4d3Rwb2xnZWJfWlhTUE5LRkdKR0dDQkJASEZGRUdJSklJTVFO
-UE9RU1RTU1VbZ3FxaGNneYuMj39rZWt3fHpyb3aElJ+dm5KAb2lyd3uDhYeIi5OS
-h31pWFdZaXmBfm9pY1pWUElIREA/QkE7OzpDSElGQkRKVVRPTVFZYWpsbnBvcXJ1
-eXh6fH5/gYCEh4iGiImMjImLiYmMjI6OjY+OjYuHg358dm1lX19eXltcWldYXWNl
-aG9ucHV5fH2Af31+f4OCfn17e3t6d3d0dXVycG1qaGxnaHuKjoh6cXiIjH5nYWR6
-ioyBdHGBlZ+ah3x9h5SZjXRiYnOChHZoanBvW1RhYFVUYHiOk4RvYmxzXkpBOzw5
-Njg6P0NBPDs5PDtEOz4+Ojw9Pzs6PDo8QkA9Q0RGRkZHSUpMS0tNSklKTE9QUVBP
-VFFRVFNTVFNRU1RXWFxeXmFgYmNjZGVlZGJhY2JgYF9dXVpXV1JSTU1OSkpHR0VD
-REFDQ0NBQUFBQkRERkpJS01LTkxNT01OTlJWVFdXWFtaW11fYmBfW1pbWlZQSENE
-R0ZGRUZEQ0RERUdEQkJGRURDRUhIRUNKRkRFSERFRkZFRUVHRkdHRkRERUlJSElG
-RktLTUxJRkZJTExKTUlHRkhKSUhGRUVDREpGSEdGSEdHREdQdrPG0Nfd4uPn6Onr
-6k9SVVJMSE1MSUtMT0dGQ0VJREBFSEtHR0REREA/QkREREM/RERBREFHQkRCQDw+
-PUBAPTo9OTc8OTs7Oj08Ozs4Pjs4ODc2OzU2OTo6Oz06Ojw+Pj0/Pz89Ozw7Ozs6
-Ojw8PTw/Oz5BQTw/QT04ODc2NDk3NjY4Nzg9Oz4/OTk8Pzw+QD4+PDw/PT07Ojg5
-Ozk6Ojk4OTo6Nzk2OTg6Ozg6OTw4Ojk5Ozk5PD0/Ozk7Ozs7Nzw9Ojo4OTk3Nzg5
-Oz4/Pj09PTs8PDw7Oj0+PD9AQD45OTg3Nzc5ODk5NTU5Oj05PD08QD0+QD9BQD9A
-QkBBQERFQkJAP0BDQkJCQENCREVDQkJCQkBBQkFDQ0JBPz08OTc6Nzc3Ojs7ODo4
-Ojw9PD9AP0BAQT88OTk9Qj49ODg/PDs9Ozk5Nzg5OjY5Ojs4Ozo7OTk3Oj5DOzk+
-PT07Ozw8Ozg5Ojk5ODg7OTs8Ojo7Oj08ODc5Nzc8Ojw6OTo3OTw4ODs5PDo7PDs7
-OTo6Ojg5ODQ1NTQ1ODg4NzY3ODc3OTc5Ojw7Ozg5Ozo7ODUzNTg3ODUzNjc6ODY3
-Nzc4OTk3NzY6PTw4ODg3Nzo6Nzo4Ojg4OTo4Nzg1NTg3NTc3ODY1ODg4Ozg3NTI3
-NTY1NDY1MjU2NTQ2Nzg3Nzg5NjY2NzY2ODc0NDY5NzU0NDU0Nzg3NDc1NDQ0NjU0
-Mzk0NTUwMzI3NzU2NTk3NjU2NDY0NDg1Njc1Nzc5OTg2NTQ0NzY4Ojk4NjQ1NzY1
-Nzc4ODk6PDo5OTg5OTc3OTo6PDs5OTw8PTs/Oj03PDo8Ozs6OTo2Njc4Ojo+QERD
-RkpKTVFWWmFmaW11eXx+fX2AhYKDgYKDgIOFg4B9e3x5dnZxbGllZGNgW1lYVVNN
-S0ZLSEVCREJARURFR0dISk1OTU9QT1FMUVVYVlRWWlxocHBrZ2t9jJGGc2FfZneF
-jYyJhYiRmJiSgGhfX3GFk5SMhXt1dHZyb2lbWF9qd3hvYFJLSUlJRkE/Pj9DRTw6
-O0FGREJCRk5dZWBfaHF0dHN1dXd2c3d6gH+AgIKDhISFh4uKiY+QjoyJjo6Ojo2N
-jY6SkJGOjIuHf3RubWpoZ2djZGVrcHF3eHd3e3+Cg4GDhYOCgIB/gH57e3x5dHVy
-cXJvamhnY2Vsg5OThn2BjpmYh3BjboGHhHJjZ3qMlJGGen6OmJqRfHN7iImBa1xf
-cnxjUFdiYV5iboGHfmZUXGhZSEE9PDw8PT49PD47ODs+PUA8Ozw9Pz07PD0+PDs7
-P0JEREdGRkRERUdKSkpKSUdIS0pMTkxLUk5LUF5bV1FPUVNXWF1fYWFgXWFhYWRj
-YmBjZWJhYmFgWllZV1daWlNQTkxMTEhIR0dKRUhGQUNDRUhKS05SUFNSUVBTUVNU
-VFZdYGFjYGFjZGVkZWplX11cVU5HREZEREVERURCRENCQ0NHRUVHSEdJR0dFRERG
-SUhIQ0VGREdGRUhMSEVDR0ZHSUZFSEhJR0hJSkhJSkdISUdHSUhJSU5JR0ZDQ0hJ
-R0dJRUFBRUVGTlJ0s8bP193h4+Xn6OrqUFBNTElMSUZHTkhKSklER0ZJR0NDREM+
-QEA9QUNCRkFCRENDQUNDQ0REQD0+QkA8Oj0+Pzs3PDxCOjo7ODs9PTo4NzU3Ojg3
-OTo7PDk5Oj06PUE9PD1APjs6OTk6OD08Ozo4OTo7PT06PD08Ozo6OTg3Ojk4NjU3
-Nzo6ODs9PD87PT0+Pz8/QD0+PD47Ozk2OTo4Nzc4NTg7Ojg3PUA7Ozs7Ozo5Ojg3
-ODk6Ojo8Ozo6PDg7QD89PTw5OTc4Oj06OTk6ODs9PDw5Nzk5PDs9Pj1AQTo5ODk4
-NjY4MzQ2OTg7Ozk7P0JAQT9BQUA8Oz1ARUREQUNCRUBBPUBCQ0JDQUJCQEBAQEND
-QkJBRUNBQD5AQj09OTk5OTc5PDs7PTtAPT1APj0/QUBAPz84Ojw8PTk9P0Q+PD1A
-P0A7Ojg5OTo8Ojo6ODo7Oz47PD47QDo6Ojw7PTw7PDg6Nzk6OTo8O0Q5Ojw6PTw8
-ODo5OTo/PTg6PDw8Pjk6Ojs6QTo3NUM5Nzk4ODg2NTg5Nzg2Ojk4Njg3NjY3ODk4
-OTo7Njc4NTU6Ojc2NzYzNjc3Njc5Njo8Ojc2NjY8OTg3Ojo4OTk4NTc5ODc4ODg3
-Nzk4NzU0Njk4ODYzNDY5ODc5OjY1NjU1NjEzNDQ0NDU4Ojk5ODs5NTU3NjM2NTo6
-Ozo0MjY3NjY0NTM3NTg3MjQ1NDMzNTQ5NDc0NjczNDY2Njg6NzU2NjU1NDg2NzY3
-Njc1ODk5Ojg2NTUzOTc3NzU3NzY1Nzc2Nzk4Njc4ODk3NzY1NDc4Nzg3Ojo6Ojk6
-PTo9Pz47PDw+PTs8Ojs6ODw6Oj1BQkRISkpNUFVdYmZobnZ9gYCBgoaCg36AgIOC
-gIB8fH17eHh1cm9rZWJhXllYU09NTUhIR0ZFREdFRkZHR0dKSUxNTlNTU1NQUFJT
-VlhWVVdcYmZubm5vcYKQloZwYWN0iZWXlY+GgX+HhX5yZGVvfYyamYx5ZFxeaWtv
-cGxlZmVlYFdRRkFDR0ZDQDxBSkpIQENDR0lGQUFJVmJoam9yc3d0dHd6e3p7en6A
-f4CCg4ODhYeHioqJiomMjpCOjo6Pjo+RkZCNj5GTkY6Mh3t0cG5tbW1zc3N4fn5/
-f3+Bg4KBhYiFgYKAgH5+f4F+fHl3dnJycW5qZ2NaVGeAioh7eIOZoJ2Me3h/jZGG
-cWRme4uMf25oc4uUl42AfIyYmo56ZmF0eWVMT2ZyaF5daHuCb1hTZFpFPj8+Oz5B
-QD47OTk7Ojk5OTk4OTw/Pz08Ojk7Pj5AQkNAREVDRUdEQ0RERkdJSEZISElJSUdK
-TE1NTkxOUU5MTlJXWl1eW1taXF5gYmRkZWVkYmJiYl9fWlpbWlxdXVtZVlNVUkxJ
-SUlKTU1LSEVISklPUVVWV1hVVldWV1hYWl9jZWRkZWdqa2pra2plYFtWUUpFRUVG
-REdFRUZGR0dGQ0ZGSEdGREZFRkRIR0hERklGRkZEREdISU1NR0RFR0pKSkhER0dH
-RkdHSEhISEhKSUVHRkZIRkhIR0U/QkRCQkdFREZHQ0RFTXCyxc/X3eLj5ujo6utW
-T0xMTk9LR0pJRUlISEpCREJHQ0ZBPkFBP0FBQkQ9QENDPkRGRD49QUFBPUBGQj07
-Ozk9Ojk2Ozg7Ojk1Nzo5Nzg8OTg7Ozo5Ojw5ODk7PD89PkA7PDw8Pj46Ojs6PD0/
-Pjk6Oj06QT08PTs6PTs3Ojg2NzY2NDQ3OTc3ODs+Pz09P0A+PDw/QD03Nzs6ODY4
-Ozg2NjY3NTY5OkE8Ozo9PT0+OTw3Oz09OTs6Ozs8OTs8PD08PTs7Ojk5PDY5Pjw6
-Ojo6OTg6Nzg4Nzs7Pj07Oz07Pjo+ODU0NDE0NTY4Ozo7PD9CQkJCQ0BBPj87O0FD
-REhEQkNCQkA/P0BERUlGRUNDQD1AQkZFQj49PDw9Ozw7O0A+PDw5PTs5Ojo6Oj09
-PEE+P0BAQj86PDw9Pjo7Oj4+PT09Oj08Pjw6Ozg4Ojs8Ojo7Ojk6Ozk6OTw9Ozw9
-PkY9Pj49Pj44PDpBQDs4Njg3OzY5ODs6Ozo8Ojs5PDo4Oks8Pj8/Ojg3NjY5Ojs4
-NjQ4ODk4Njo4ODk6Ozk1NzU4ODc3ODk8PD46Njg3PDo5Ozk1Njk6Ozo3OTk6Ojo7
-Nzc4ODk6Nzc4OTw5ODc6OTg2Ojc7ODs1NzY1NDQ3NDU2NTI2Njo7Ozk2ODg3NjMz
-MjMzMzI1NTM3NjQ1Njk6Njc4NTM2NTY3NzU0Ozg1NDU0NjUzMzU1NTQzMjExNDMz
-MjE0ODc3NDc3Ojo4NDM3NTQ0MzYzMjAzNTQ0NDQ4OTU0NTYzMzY3NjU2NjY4NzM4
-Mzc5OTg0NDc2ODY5Nzo5NzY1PEI7PT47Ozc7Pj0+PTw6Oz86OTo5Ojw9PUFBREhN
-Tk1OVFxfZmhuc3d7fYKFhYaEgoKChYOAfXp3endzcG1saWJhXllXWFVSUU1MR0RD
-RERHR0dJTE1PTkxQUVBNUFhTUlVUVVVUV1VWWVxgZGRnbnN/jZORg3l5g42Um52X
-jHZqZnKAf395d3qDj5iRg2pcVFZkb3R2cGRaT05OS0M/QUJEQT87OkRLT01HREhN
-U1ROUVtla3JxcnV4eXt4en17fH6AgYKCg4GAhISGhoiGh4qKh4qMj5KQkZGRj5CR
-j5COjI6PkpCOioB8dXFxdnZ1e3+BgYSEgoKDgIGEhoSEhIR/fX6Bhn15enZycnBs
-amplW1JUaYCHfGtqf5ablYh/gY+bnZB8c3iJkYt3YGFwhIyCdm51i5eYjn94eXt3
-YEdIZnlvWk9ZcH15aF5jW0c9PEA4Ozo+Pjw7Nzg5ODg3Ozs6PDw8PT48Oz1BQj5A
-QkI/RUdEQEBAQ0JAP0JHR0pISEdHSUZHSkpMTExPUExOUFVVV1hZWl1cXl1gYl9g
-YmJjZGRjY2BeYF1fX11aXFpZWlVUVFRRTkxLSUtKSklMTlBTWFtaWltYWlxcXVxi
-ZGdpaWdqa21ubnJub2xkWlVQSUREQ0ZHREVFRUZGRkZISEZGQ0ZISEVDRERGR0lG
-RkdHRkRFR0lISFhKRkZHSUpKR0pNUE1NR0hKSUlISUhJRkhGRUhHR0lGSUhDRElK
-SkhERERDQ0RKaq7Ez9jd4ePm6Ofq61NRT01MS0lHS0VFSEZIQUNDQkNEQEBAREpG
-QD5CRUNFRUE+Q0RBQkI+Oz1AQEBBQUM+PT4/RDo2Ozs5OUA8Ozk8Pjs2NzU4Ojo5
-OTs6Ozs8Ojw8QUI/Ojk6Ozs7PD88QDs9Ozo5Oz88Ozw4OTg4Ozk2NTQxNzY0ODk4
-ODg7Ojo7PD5AQD08PD4/PD05Nzo5PDo5NjY2Nzc4Ojk7Ozw8Oz49PDs6Oj87Ozs5
-OTk3OD47ODo8Qzk9Ojk5PUI6Ojo6Ojk4OTo6Ojs9Ozo5Ojw8Ozw7OTw8Pjo4Nzg6
-NjQ0Nzo8Pj09QUNFQT5BREA9Pz89QUJERERAQkJBQkFCQUVFSEtIQUA9Pj4/Q0VC
-QT47PDw6Pj07OT4/PUE+QUA6Pjs7Pz8/PT5APz1BPz06Ojs6PD0/PTw7PDs6PTk7
-ODU5OTpLOztAPj46Ojo7ODc6Ozs6OTw8PTtAQEE+PT06Ojo8PDg5OTw9PD08PDo7
-PTs3NDc7PT86OjlBQDw7Nzc4Ojk3Njk3Nzc2OTk6ODk4OTs7ODo5Ozc4OTg5Ojo5
-Ozs4MjI0Nzg5PDg3Nzg7Ojk7OTo2ODc6OTs+OTk4ODg8PD86OTc5Ojo4OTc7OTg2
-Njg2NzU2NzY3OTs5Njg5OjY4ODo2MzI0NTQ0NTU0NTc1NTU0MjU1NTQ3NTU1NzU3
-NzU2NDEzNzgzMzIwNDQ4NDMxMTQ2NTM0NDMzNTQ1NDY3NjU2NzU2Njc1MTg3OTY0
-MzIyMjQ3ODcyNDU0NTQ2NTg2NjU2NjU4NjY1NTY1OTc2ODg4NjY4NzY1NTo/PD83
-Ojc4Ozw8PTs+Pjs6Oj5APj08QENGSEpOT1RYXl9laGxwdnV6fH6BhoF+fXx8fYB8
-eXV0cXJza2ZiXV1YVFJRS01MTEhDREpHSEpJTUxQTk9RVVVWVVVRUVRVVlVWVVNS
-VVVYXWRhXmJrfZCbmYt/g4eIjI2Rj4V2Zl1gcoaQlpOPgn9/hIF1ZVZXX2p0c25j
-UkhCREhCPDs+QkVEPjo4O0dKR0ZHTFdeYmNlbG9xdXp2dXl6fHt6fH+Af4CDhoeH
-hoiIhouKi4mLiYyQjIyQj4+Pj5GPjo6PkJGVkIyNkI2Mi4mFfHt6eH19fn+Bgn6A
-gISDgYGCgoF+f4B/fn58e3h3eHdwbW1sZmVbWGN5iYl3ZGd9kJKHfHuFkp6aj4OC
-jZmblX9vcoCIhnpnY2+EjIZ8dnyJinlaSUxccGlUS1Bngod+b29lRkE8Ojw7O0NB
-Qjs7Ojc7PD09Ozo6OTs7Ozs+Q0I/Qj4+Pz4+PT4/Pz9AQUJAQUNFRkRGR0VDQkNG
-REhHRklMUE9SVFJTV1dYWVlcXlxdX2BeXmFgYWNhX19fYmJcV1hcYFtbXVxeXVtX
-UUtJSU1NTU5RVVNXWlpcXV1dXF5iZmVmbWtqbG1ubXBvc3NuamReU0xHQkJKREdH
-QUNERUVEREVGRkRERUVFR0dERUBERkdGR0RERUlLR0NHXEpLSUlJSEVHRkpLS0hL
-SklIS0xKSUpJSEdERUpHRkZHRkVCRkRGR0RBQkZFQ0hwr8bQ19zg5Obn6OrqVFRU
-TUhLS09IRUhPTEVFRUJDQ0VEQkJFQz4+Q0NFRUNDRkNBPj1IQD4/QT9DQkNCPzlC
-Pz09PDk2Ojw3Oz06OTs4Nzk5Ojs4OjY3Ojo7OjY2OTs9Pj45OTs6OTo7OztBPTs8
-Ozg6Oj1APjg3OTg3NDc3Nzo4NjU2Nzo4OTo5Ozs8QD88PT08Ojw9PD06Ozs7ODc3
-ODY2Njk6OTs7PTs6Ojk5OTo5Ojo7PDs4Njg5Njo7ODw7Ojg5PD87Ojs8Ojk6Oz06
-Ojg4ODg5PDo3ODg3OTo8Ojo7Ojg4OTg0Nzc5Ozw6OztAPUA/P0A/PD0+QEJCQ0RD
-RkZAQkNCQT5AQUVIRkZLQUVCQ0JEQkA8Ozw5Ojs9PDo8Oz08Ozw7PD08QUA+QD49
-Ozw7Pjw8Ojs7OTo9PDw/Pjs6Ozo6PDw9Pjs6Oz45PDk5ODk6Ojo4Nzw2Ojg5OTo3
-Ojs6PDs6Pj09P0A/QT0/Ojo7PD06OTk7PDk4Ojk7PDo4Ozs6PDs6OD04ODY3ODY5
-Ojs3Ojk4Njk5Pjc3NzY3Njg4Ozs3Ozo6ODg4NzU1ODY3OTo5Nzc6OTs5OTU2Nzg5
-Ojg9Ozs4ODk2ODg5Nzg6Nzc3OTo4ODk1OTk6NjY3Nzg4ODo2Njk9Ojc2NTI0MzY2
-NTUzMzE1MzQ1MzY0NTU1NTI1NzY0MzI2NjYzMjI4Nzg5NTUzMjM1MzQ0NDU0MzY4
-NjU0MDM0NTY3NTQ1NjY2NTY1NzU2ODMzNDEzNDY1NzQ1NTIyMjg4Nzc3Nzg4Njc7
-ODg3ODU1Nzc3Nzc2ODk6Ozk4Nzk5OT46OTg5PD0+Pj47PD0+Pj9APjs9QkZJT05R
-VVtaYGZpam5ydXl+gX6AfXl8fnp6d3V1cm9vbmtkYl5cVVBOTUxMRkhHSktKREhJ
-Sk5OT05QUlRVV1laWFdXV1ZWWFNQUFRWWFxkY11eYGuBlZ+ekHyCioqHgX5+eHJq
-aG97i5eem4x7bWZscW5nZWZlZGZeUUpEQEFARENAPz9FRkc9PzxARkRCQENQW2dr
-b29wb3NzeXh5e39/foJ+f36Ag4OEiYmGiYiHiImKioyMj4yMjYuMjo+Qj5GTkZWS
-kpGPjo6RkYyMjImEfnl9gIF/f36BgH+EgX99fn5/gYF/gn99d3d4dXN3c25ubWtt
-cGtufpCUj35sboKNiXhoanySmJKFfIKVnJqTgX+HkZaQg3V3gY+PgmpfbYaRg15G
-RFNmaFRNVm6FjYp2bGNKPTw8PTk6PkA8PT4+OTk5PDs3OTg7PD07PDs6PTw8PD0+
-QD88QDw9PUA+P0FAQkBBRD9BQ0RDQUFERUVHSEhPTUxOTlBSU1JSVVZYWlpdYF5e
-XlxdX2BhYWBiZmVcWVlbXGNgX2BiXlpVUE1NTVBNUVZWU1heW1tbXF5gYWhoaGls
-cG1wcnFubG5scG1sZVxRSUdJRkNFRERGRkVGR0RFRkVHRERFR0hESEhERkdFUEdF
-R0RHR0pDRUVZUk9JSklJR0VJR0dLSEdGRUdGSEdGR0hJS0hHSkhFREdEREhERkZI
-RkRERUpISGirxc/X3ODk5ufo6upQT09OS0ZHR0VERERHRkhHQEBBRURJQ0NBRUBA
-Qz5EQkFFRkA8Pj5BQTs7PUNDQkM8Q0FAPj5BOjg3Ojs6NzxAOjo5ODw7ODk7Ozs6
-PDs5PEA6Ojs7Ojs6ODo7OTk7ODg3OT88Ojc2Nzw7ODk7OTQ2NDQ1NjU4ODw/OTs8
-Oj0+PDs9Ozs7Ojo8PTw9Ojo7ODY3PT46OTc7PDg8Pzs5ODo6ODo7Ozw3ODk6Ozo4
-NjQ3OTk6Ozs6Ozc3Ojo2OTk5Ojo5ODU4ODc6Ojo6Ozo3OTg6Ojk6OTk4Ojg3NjAx
-MjQ3OTk8PDw/PT08Oj1AQkJBRERGRUJEQEJEQ0RDQj5ERURERkhFREFCQEFBQj47
-Ojo5Ojs7Ozs7OT05Ojo8NzpAQD9BPTw7Ojw8Ozs7PT48QDs5PD06Oj05OT09Ozo6
-Nzk6Ojo3ODo2Nzg5Nzc7ODg7PTo6PTw8Ozo8PD89QEA7Pz8/Ojg2ODg9PT86Ojg2
-O0E6Ojg4OEFgOjk7PDo8ODY2OTg4PTU3ODk1OTk3OTk1NTg2NjI0NTc5OTo3Nzg6
-Ojs4OTc3NTg3ODc6Njc4Nzc3NTU5Ozs4OTk4Nzk3ODk4OTg5NjU3NTU4OTg4Ozc4
-ODg4NjY3NTc1NDc2NTU6OTY3NTY3NjYzNDU0MzQyMzI0NDg0Njk3Nzc2NTc1NTI0
-NDM0NzIzNjM1NjQvLy8wMTI0NjYzNDU1NDc4MzM1NzQ3NjMxMjc4NDg0MzI0NTMy
-Njg2NTQ1NTc3Nzk2NDQ1Njc3ODY7NzU3NjQ3Njc3NTY4Njc4OTs7Ojs8Nzc6OTo7
-Nzk6O0A/PTk6PDs9PT88PDtBR0pOUFZXWF5cYGZpbHJycnd7fHt2d3l7eXh2cm5w
-amhoY19aWFdRTkpLSUhGRkpLTU5PSktPT1FSVFVWV1lZVlhZWlpYVVNQUFNRU1Va
-YGhoZ2Nkc4SRnZyJfIOJhnlnYmp1foKCgYGIkpSMfGZZVl1pcHJwZl1SSkhJQDg5
-OzxAQ0I9Qk1UTUZBQkRJTUhIUWBnbWxwcXN3eHd2dnp7en2Ag4J+fn+BgoWHhoSG
-hoyMiYqKi4uNjoyIioyNi46Oj5GUkZGQkZGSko+SkY+OjIiBfHh8fXt+foGHhYKC
-gX5+f31+e318f3p5d3h2cnBwbG1rcHx5c3OIlpiSh32DkJKJdWhsgZGPhXBmboSR
-jn9vcX6Nj41/d36OlpaGcmVtfYV8Zk1ITVtoYFdba4CPindhV0o9OTs6Ozo7QD9B
-QkFBPDw7Ozs4OTs7OTg+PDw7PkA6OUA8PT89Ozw+PjxBQEBBQD8/RERDREVFSUVD
-RkRHS05LTUtMS0tOTlBUVFJTV1hbW1pbXFxdXmFfYWJmYlxaV1pdYmNlYV9fW1hW
-T05NUFBRVVlcWFlbW11hYmRsbW1sbWtucXFzc3Jzb2trbWhjWk9KR0JGR0dGREZG
-RUdHRkVGR0hIRUREREJDR0ZKTExNTEVAQ0NFSEhCQ0VMVEhFRERJSEhKTEpJSEZD
-QkNKSkpFRkdKREVHSEhLTU5GRkdIRkRGRUlMTUlGbK/Fz9fc3+Pm5+jq6k9PT09K
-R0dGSElIRENFSUdESUNDQ0NFRUNCRUNCQURBPEFBQD1AQD1BP0E+QkRBQEY/PD1B
-QT49Ozg8Oj09Ozw5Ojo6PENBPTs7Ozo7Ozw+PD09Ozs9Pjw5ODk3Nzc4OTs7Oj07
-Ojw7Ozs7Ozg4NTYzMzY0NjY4Nzg4Ozs4Oz89PDw7PDs5P0I+Ojk4ODg5NjU3Nzg8
-PTs5Nzo7PDg2Ozw5Ojs+PDs7Ojc2Nzg4Ozs8OTg7OjY5ODc6OTo5ODo2ODk4Ojg4
-Ojk5PDw7OTs5ODk5OTk5Ojs5OTg4NzM2NTk3Nzo+Pkg/PT1CQ0NBRURCQkVEQj1C
-PUREQUVBQUFHRURGRUBAQURCQUA+Pj09PD1APjo9PDs7Ojk+OTg9PD48QEBAP0FA
-PDw7Ojo6Pjw8PDs8Pjs6ODo6PD48PTs9PDw6Nzc5ODc5OTs6OTc3OTk4Ozk6OTk8
-Ojo/QEFBPT9APjk5Njg7Pjw7Ozs4NTg5OTo7ODo5PEk7Nzo5Ozo6Ozk8Ozo7OTY5
-Ozg5NjY1NTU1Nzg3PDU2ODQ3Njg3Nzc7Ojs5NzY2Ojc4Njg6Ozg3NDc8Njc4Ojw6
-Nzs7ODc3OTg7Ojk6Njc5ODk9OzY0OjY2NzU2NDU4ODY1Nzg5PDg4NjY5Njc2MzQ2
-Nzc3NzY0MzUwNDQ1OTY3NTY2Njc2MzIzNTY1OTQyMzY3NzMzMzMyNjQ0NDYzNDI0
-NjY1NTMyNjc0MzM0MzY2MjQ1ODY1NDQ3NjU4NTQ4NjY4NTU1NDc1NTY4PDg3NTg2
-NDM0Nzc3NTc3NTQ0Nzk4Ozk7OTY3SEI5OTc4PD47Pzw7ODk6PEA+PD9CR0xPUlRV
-W15gYWVmam9xdHh0dnd2dXV3c3JybmtoYl5cWFNTUlBNSUtHRkhLSk1OTktKT1VU
-VVpaW1lbXFxbW1lXV1pYV1JRU1NUVl1la2hpb3J+io6SkISBiox/aGBfb4SQj4iD
-goGBf3dwXFBTXWp0c2VVSUNCREg+Ojk6QENBQT47R1BORkFCTFhaX2FlZmlqbnJ1
-d3d6eXZzd36Afnx9gYB+f4GBg4GDhYWIiYyKjIqLjY2Ni4qMi4mJjIyMjY6OkZGP
-k5WUkI2RkIyLi4eEfXl6eXt6f4KBg3+DgH59e3t5ent7e3t0cXJzdXNtaWp1fHNo
-bYSTloyBf5GYmJSCdnyNl5ODcWx6jZeSgXd4goyNf2xmcIONjYB5eH1/d3BlVUhJ
-U2NtZ15icn+CdFpOSDw4Nzc6Nzo8Ojo+REM+Ozg4Nzg5Ojo7PD4/OTk7Ozs8Ojo5
-Pjw+QUBAPjs+PDo8QkJCQENESEpKRkZGRkVFR0lLS0tNTkxPT05NUFJUWFpWWFdW
-WFldW1xeYGBhW1tZWV1fY2hoZWFaWFZVVFFRVFZWVVlcWlpfYmRjZmdrbG1ubm5u
-bnBycW9ubG5raWFVTUZCQ0NDRERDQ0RDR0dHSUlIR0dDREZJRUZFREVMSUhKSEVD
-QUBCQ0JCQkdISEZGRkdJR0dGRUlIS0RGR0hISUhGSUlHR0ZERktJR0hISEhLR0NB
-SklHQkdss8TP19zf4+Xn6OrpT01OSk5QTkxMSkRCRkZLSUhMSkZJTkVGRUJFRURC
-QkBDQkVDREE8Pj1EQkNAPzw9RUU9Ojo4OjU3Ojo9PUA8Ojs4PDg+PD0+Ozs6PD4/
-Oz08QEA/PkI8Pz47ODs7Ozs6Ozw/QUI7PTs6Ojo5Ojk5OTg2Njc2NjY4ODg5Oj07
-OTw7PD47Ojw/QD48Ojg4ODk5ODc2Njg4Ojk+Ozs6Ojo4OTg4ODk7OT05Ojg6OT07
-Ojg4OTk3ODc3NzU1ODo6Nzc2Nzo6OTY5ODc5Ozo9Ozs5OTc3Nzk3Nzc3Nzs8Njc1
-Ojs9PDw8PT8/QEJAQ0FAP0BAP0A+QURCSkVCREJDQkNERUNCQURCQUE9PD49Oj49
-PDs9Oz1APDo8Ojk8ODk5PD0+QUE/Pjs7QD0/Pj08Pzw9QDw+PDo5Ozo5Ojs7Oz1E
-PTg3NTc4ODg5ODo6Ojg7OTs9Ozo6OTo4OTw+P0A/PTo4PDo4PDs8Ojs7Ojk3ODg3
-Ojo8ODY0NDY4Njc6OTg6OT09ODk9OTg4OTs4NzQ0OjY6NjY2NzU1NTU6OTY2OTo5
-NzY3ODg2NTk4OTg5OTk2ODw1NDc2OT09Oz08PTs9PDg4ODo6Ojs6OjY3ODg4Njg3
-NzU3NjU1Nzk6Njk4OTY2ODw9ODk4NTc7Njc2NTY3NjU0NTQ0MjIyNDY1ODY2NDQ1
-ODg2MzM2NDM1NTY0MzU1MzY3NTYzNjY2ODk2NzYzNTMyNDc1NTQ0NTY3NjY0MjUz
-NDQ6NzU0NDg1NjY3Nzk5Nzk4ODg4Njc6NTY0Njk2ODc4Njc4NDY6OTYzNTc9Ojo4
-Njk8Ozw8Ojo7Ozo6OzxBPkBFR0tMT1FSVFlhYWJnbGxucHNzdXVxc3JvbGpnaGFb
-WllTUU5OTk1JR0tMTU9PTk9RUVNVVVlcXV1cW1tZW1pbWllWV1dQT1BPT05WXGRl
-ZW12g42Qj4p/gouVmYZsZ3F+jZmVinxxZmNnamVbV1xjaGZdUklCPkdJR0VCO0BG
-SEY/PDxGSElGREtXYmhqaWtrZ2tub3R1d3t5d3R2e3x+fX1/gICDgoaDgoKEhYOF
-iIqMiYiLjIyMjIqIiIiIiYqMjY6PkJCTk5GTkpOTkpCOiomGfHh2d3l4e319gYKA
-gHx9fnx5eHd5enp3c3N3c21qbHV2ZFtjeomIfnV6i5qak4V+hJahn45+fImSkYR3
-eoONk5CCdnGAiYl/cG15hoh4YVtcTUhKZXRuW1Vbbnt4ZlBJQj47PTo6Ojo+PD1A
-REQ8Nzk5ODg4OTg4PDo3NzY4Nzo8PEFAOj0+Pj09PDo8PTs+QEJAQEVGRkZFSkhD
-Q0dJSEdHSElISkpKTE5OUVNSVVVTVFNUVVdXWFhfX11bWllZW2JkZmJfW1VTUVRV
-VVZXWVlbWltaXVxiZGdnampsbm9wcXFwb29tbWtoaWdjXlJJRklFR0RHRkVDQkZG
-QkVGSUhGQ0dGSUVCQ0VISU5DQ0ZEQ0NGRUZFR0FERERFTUhKSklGSEZEREZNTElH
-RkdGRkpJS0lJR0dGQ0ZCRUZGRUtRTElDSktJSnGzxNDY2+Di5ufn6ulQUE9VT09K
-SUZKQkRIS0pLSE1MSUtMR0ZER0NERENEQEA+O0E+Q0A9Pz9EQUFDQUFAQTk4Oj43
-Njo9Ozs5Ozk4Ojw+PTk4PDs7PDs7Ojw6Oz1BPz9APj09PTo5PTw5OTw7OztAPT88
-Ozo5OTk3Njk2NzY0NjQ0NDY7ODk7PTw+PD05PUA/QUJBQUA+OTgyNjY2Njg6Njc6
-ODk3Ojk4ODg5Ojk0Njs6Ojo5ODs8Ojs5Ozw7Nzg5Njo4NjU4NzY0Nzk6Ojk5ODg3
-Ojs5Ojo8Ozg2NzY4ODg1ODc3NTY5ODs5Ozw6OTw8Qj1AQj5AQUFAQ0E9PEJHRUVG
-QT8/QkBERERHR0M/QD07PT09Pj8+Pj07PT08PEA+PDs9OTs7Ozs8PTw/P0E/Pzw9
-Oz0+Pzs7PD07PDk7PTo4OTc3Ozo5PDk3ODc3ODc4Nzc3OTg2Ojo4OTo9Pjw6ODk7
-Ojw5OTs6Ojk6ODs5ODc4OTs8PT04ODk5RUI6Njc6Ozg4Ojg4OjY5Nzs9Ozo5Nzc+
-Pzs2Nzc5NjU2Nzc5OTQ2Njo3Ozk3OTY4NjY3ODg6ODc2NTY2Nzg0NTU1NTc4OTo5
-OTg7PDs6Ojo4Ojg5NzUzNTY3ODU0Nzg3NTc3NjY1ODk5Nzk5PDk7OD88NjU7NTQ2
-NTQ0NDQ1NzQzNDQ1NDYzNDg4Ojo1OE08NDM0MzQ0MS8yMzQ1MzM0NTc2NTc4ODY1
-NzY0NjMzMzQ1ODQ2ODs4ODg1NTQzNTQzNTQ2ODU2NTUyNjs4ODk3Ojc4OTo4NTY1
-Njc2Ozs6NzU1NDg6Ojc2ODUzNDc2ODg2Ojo5Ojo7Oj09PDg4Nz1AQEdKSkpMUFFV
-WFhcXmBlZ2lqa2xtcG5wbmtmZWFcXFdUVFFPTkxSUktLSk1OT09SU1VYWVxcXmFd
-XlxeWltcXVlWVFVUUFFQUE5MT1heZGBhcISUnZ+YhXR9kp+hj3x8gIKLkY+DZVZP
-UF1maWxnYVZST0pGQj06R0xIRD4/SU5STkZBREpQSkZESltqb25pZWZpa2xtcXV5
-eXx9ent7e3yAfX+Cg4GEhYSEhYSHiIeFhouKiYiLi4h/hYqNiomKjIyOjo2MkJCP
-kpOUk5ORkI2NioaAenR0dXZ4eHZ5fH9/f36BgHp4d3R4dXRxbW9tampyeXNjWmp+
-g4BwZm6Ajo6Dc3B5jpqUg3Z9iY2FdmhsfoqKgnh0fpCWjHpjWm+HjHxfU15WSktk
-d3hjTkpbcXlxYE9FPTg5Ozs7Ozg4PEFCPjs7OD47ODk6PDo8OTk7ODY2OkI7OTc4
-PDw6Oz09Ojk7PTw+QD9DQEBDREVCQUNCQkZGREdER0ZFRUhKTUxNUE9SUVFST09Q
-U1VUVVhZWFdVWFhaXmNjX1lUUE1OTVJTVldXWllZXF5dYF5hZGRnamdpaWxtbW9t
-amtrZ2RhYl5XTUlFRERBQUVEQ0VERERHSEdFRkZEQ0dHRUdEREVJRkdDQUFEREdH
-SUZHSUlHRUVFRURGRkVESUZHSkdKSEdGSUdJSktJRERERUxIQUJGR0pWVUdCR0VH
-SUxNe7XFz9fc4OPn5+nr6VJSUFFOTElHSUlCSktGSU1OSEhHRkRDQ0E/PUFCQkFC
-RkRAQjw8QEJDQz49Pj5CQDk9Ojo6QUA8Ozo6ODk4Ozg5OTs8PTo6PT8+Pjs7PDs/
-PkFBPkE8OTg6OTk5PDo4Ojk8Oj09ODg4Nzk4Ozg2ODY4NTY1NTM3Nzo8PTk7PDs6
-PDw9QD09PT89Pj84ODc3OTc3Njg4Ojk5ODg3ODo6PDo7PTw6ODk7Ojg4Nzc4Njc3
-Ojo5NjY1NDU4OTY2Nzg7PDs6Nzk3NDY5OTg3Nzc3NTQ5Ozk3NTA0NDU2NDc4Nzc6
-Ojc7PDpCP0JCPT8+P0JCQEBCQEJAQkFBQz5ARENBSElHRUBAQT07PD48PD88Ojs8
-OTs+Pj0/Pj08PDg6PT8/PD08Ojo+QEFAPUA9PD88Pjs9PDo6ODw6Ojs7PDo6Ozo6
-Nzk4Njg2NzY5Ozw7Nzc5Pzw7ODg4OTc5PTs7OTk7Ozk6OTg5OTk4Ojk5OTk2Nzg6
-OTo4OTg3Nzg4OTo5OjU3Ojo4OTk1Nzg4Nzc5OTg5NzY1Njs8OTY3ODc4Njk1NTQz
-NjQ1NTo5ODM2NTQ3NDc1Njc2NDU3ODg3ODc4Ozs7Nzs6Ojc2NTY1ODc5Nzg1Nzg5
-Nzc1Nzk2ODg6Nzo5OTc0NTQzMzMzNjU1MjQ5NjU3NzU0MjY2NTM0NDg1NTQ2QDY1
-MzU1NDQ0MjI1NTc1NTY3Nzc1Ojc3NjMzODc3MjQ0NzQ1NTU3ODk3NzcyNTc1NTg0
-NTQ0NjY2NjU3MzQ1MzQ0OTs5Nzk3Nzk3Njc2Njc3OTo1NTc3OTQ6ODc2NjY2ODg9
-PDs5OT06PDk5ODg4PT5BQEBERUhNUFFTVFdWWltfYWRiZmlramdkZGRiXVpXVlNR
-UFBOTExKUE9SVFFQVVZZW15fXF1cXV9dXFpbXFpXVFNUVFNTVFJRTk5VXV5eWFln
-gpqmpZmEcn2SoJ+QgX9+fHh6dm1cTEtPXmx0eGtcTUJHR0Q+OkBCRkVBQEJLVVZV
-TEtSXV5WTExXZG5vb2hmZ2Roa21ydXd4fHt9fHt5e3x9f4KFg4GDhoiLiouJh4iJ
-jIuKh4qNioOFiIuOjIuOjIuMjIuNjpOTj5CSko+PjI+PjYiGgnt2dHZ4e3x7e3t9
-fX18eXZ3dnl6d3RvbWlncYCHfWpoeYiMinRkboKHfG5iY3WDjIl7eYaUlol+eHuF
-iYRwZmV6jJeQgXJrc4GCeWZeY2FOTlp1eGBMSVJvg4Z4XUk+Ojg4OTw5ODo8QD09
-Ozs5Ojg5Njg8PDk8Pj08Pjk5Ojs6Ojk5Ojw8PDw+PDs6Pjs8Pjw+Pz5AQUJBPT9B
-PkFAQERDQkFDRUdHSUpNTExMT09LS01QT09UV1dUVFZUV1tYWllZU09NTE1QUlRV
-VVVVWFlaW1pcXV5fYWVmZGRmZ2VlZ2dlZWZkYF5aV05FQ0VER0RBQ0ZGSUZBQkVF
-SEdGRUVFRkZFQkNGRENERkVIRkNGSElGRkdHRkRHRkhEQ0VGR1BER0dLREhISEZE
-SUdKR0hGRUlISEZFQ0dRYEw+RkVERkRGR0t7ssXP19zg4+Tn6enqT1BOT1BJUEpJ
-SktKSkpJSElHSEZHR0ZHRUZHREJEQD1ARUQ9QEJBQT46PD88QEFAQj09QD46Ozk7
-OTo4Ojk4ODg6Ozo4PUBBPzw7OztBQT8+Pz04ODs8Ozk4OTg5OTk4Ozo6OTo6OTc4
-Nzg2Nzc3NDQ1NDY2NDU4OTk5PTs7PEA9Pjw7Pz06ODs5ODk6Ojc4NjU2Nzw+Ozo8
-Pzc4OTg3Nzc2Njc0Njk3NzY5Nzk5Nzc5ODg2NDc3NTU1Njc5OTo6OTk3Njc1NjY4
-NzY4Njc8Ojg4NjM0Nzc0NTQ2Ozk4ODk3OTo8OT5APTtBPzs9QEJBQUBBP0I/PUBE
-RURFSUpJSkVCQ0JCQDw9PDw6PUE6Pjw/QD87QEA+PTw6OTo6Ozs+Ozo8PD9CQEFB
-Pjw7PT8+QT5BPD8+Ozs8PT49PDk6Ojg4ODc3OTY4OTc2Njo9Oz45Ozw7Ozs7Ojo7
-PT07Ojk6OTo7PD05OzU6Nzg7PTs4NzQ3ODk5ODc5ODc3NTs5ODs3Nj48PTs2MzI1
-Nzc1NjY2OTk3OTY1OTg3NTc1Nzc2NDQ1NzQ5Ozo3NTQ0Njg4Nzc2Ozg0NTc2OTo3
-Ojg5ODk4ODc4ODg7ODg2Njg5ODg3Nzg5OTk4NzUzNTc5OTk4ODk3NzU2MjQzNTM0
-NjQ0MjQ0NzY1NDMzMjQzNDM2NDQ0NzY3Nzc0MjEyMzQ0NjMxMTc2Nzk2Njg3NDU2
-Njk5NjY3NTQ1MjQ2NjM0MTYzNDQ4NjY2MjMzNjY1NDMzNzU3Njc5ODo3NjM2NTU2
-ODk7Ojg2NjY2Nzg5OTg3Ojo4ODc5NjY3Njc6Ojo5Ozg2Njo7PTxBQkFHTExPTk9S
-VFZTVFdZXWNjZmJfYWFhXlpaV1dTTkxNTkxNTVBRUVZZWltdXV1hYGNfXlxdYV5c
-WVpaVlZVU1NRUFVXWFNQVWFmZ1tUV2mDlp6im4l9iJWcm42BgHtqW15jZFxXWV9m
-bG5pX1BIRklPRjw7QEZHS0JCQENOVlJKTlpucmtjZWpydHRsZ2VlYWNocHV4enh6
-gn16eXt6fHyAgoKGiIqJjY6LjIyLjImJg4uLjIyNiYuNj42Ni4yOkIiKjIuNjI2N
-j46NjZCRjIuNjYuJhX53dHN3eXh4fHt8fXt4eHt3c3N1b3BqanCBkpeKe3yLmpuV
-g3d/i5CGcm5yg4yMhX2CjpSSiHh5iJOUiXdqbXuGioN2c3yCfnVra21uZE9NUmVv
-ZFFMVGuBioRrUEI9Ojg4ODk5PDo4Ozs4OT1AOjg3Ojs4NzhAPDk5OTc4OTk6Njk6
-Ozs4OTs6Ojs8Ozc6PD09PD09PT5AQEBBPj48Pz89QT8/QUdGR0lJSkhKTEpLTEpL
-S09RUVFPTk5YXFJTU1FMTUtNTlFRVFNVV1dYWlZZWFRUWF1gYWFhXV5dX2FfYF9g
-Y15cWVRPSkdER0dGRkZFRURDRERDQkZFRUZIQkJDQUFDQkJCQURERUdJSUlER0VG
-QkRGSUdJSUdGREVFTElGR0lIRkdGSEhGR0dIRUVGRkdGRkpWWVNFQkJDREZISEdL
-S3muxs/Y3N/i5Ofn6upPTExOT01LSENISERJS0xKSkpISUhFQkRDRkhGQT9BPUFC
-RENAQDw+PTo+PT08QENBQkFAQD83ODU2Oj07Ozc3Ozg5ODM2Nzw8Ozo6PT5CPzs7
-Ojs7PDw5OTk4OTg5Ozo5Njk4OTg6ODg3Njc2NjY2NDY4Nzk4Nzc5OTc4ODk5PDw6
-OT48PTo7Oz45Nzc6ODY5OTk6Ozo7PTw7ODY3ODY4Ojg5ODw4ODg3Njk7Ozk4NTc1
-Njg4OTk2OTg3NTY3Ozo7ODw5Njc4OTg5OTc5Ozg6ODg2Nzk1NTY2OTU0OD45OTg5
-Ozk9PTk7PD8/QT9EREE9QD88QEFAQENEQ0VISUlHQ0FDQUI+PkA/Pzw9Ozs9Pz89
-QUBCQEE+Ozs8Pj8/PDw8PD9AP0A+Pjw9Ozk9PT49REE+Ojs9Pjs8PDk4PDw6ODs4
-Nzg6ODo5Nzc2ODw8PTw9PDk5OT89PDw8PD47Nzs6ODk6Oz07ODc5Ojg3OTg6ODg6
-Ozk5Ozk4NzU0Nzc5Ozo4Ojo7PDo2NDU3ODg0ODc1NjU2Nzo4NzY3ODY1Nzs4Ojc4
-NjY3Nzg3ODs3NjY0NTU3Ojg2Njk1Ozo4Nzg4Nzk6NjU3Ojc5OTY2NTM1Nzg5Nzg2
-OTk3OTY1NTU5ODc3NDU3NzAzMjQ1ODc4NjU0NTU1NTU4NDY3NTU0MjM4NjM2MzY3
-NDQzMzQvNDMwMjI1NzY1Njc5ODk3NDQzNTg2NzU1MjM0NTMzNDM0Njc3NTY3NTI3
-NTczMzY3NjIzMzQzNDU1ODU0NDY3NDE2NTY1NzY1Nzk3ODk7OjY4OzY6Ozo5ODU5
-NTc6OTY4Nzc7Ozc7PEFARklGSEpMT1FRUFRSVlVVW1tbWlpdW1tYV1RTUEpJSEpL
-S1BQVVhXW19iYWFeX2JiZGJhXl1gWlxbWFVVVVNRTVBRVFpZU1dicnhvYlxldoyV
-mZqPjI+Vl5OHfn2Ed19TVF9qa2dkYFxXV1ZXTklSWVdUT0ZJTVBPRUJESVVZU0dF
-UmZvcHFxd3p8d2tjZWJiZW1zeHt8gIB/fHp5e36AgYGChYmLjIyMi46QkpCOiouL
-iYuNjIqMjo+Pj46PjoyLiIqMjYuMj46PkIyQkZOSko6MiIiHgn91cXJ2dnh6eHd1
-dXRzc3Nwb25wbWlqfI2XlYh/hpWin5eKiJGcm5SHgIWRl41/fYaPjIFxZ21+i46F
-e3Z/iYmEcWJpgo2CalhfdXVYR0hLV2VoXVVbZ3uHg29TRkJBPTg4ODs9Pz06Njg5
-O0A8Pjo5Ozs7NTk7PD06Ozs9Ojk5OTg4OTk2NDU6Oj09PDs8PD07PDs9PT49QTw8
-QkFAPkE+PUJBQUNGSUpJQz9ESEdFREZKTExNUE5QUk1SUE9NTlBNS0xPT09OUlJU
-UVVZWVdYVFJXW19bW1pYVVVWWFtZWVtcWVBPUEtJSUhIRERCQkNCQUVCR0lERURE
-RkVDQUNER0hFRERFRkZFRkdJSUlJR0dJR0VGR0dHR0lEQ0NHSEpJSUVHRUJFQUVF
-SEdERkRFRUhHRU9HPz9DQkNER0ZEREVJfLLF0Nfd4d/e5+jq6k5QTElJSkxMTExJ
-SkdKSk5LS0hJSUlAQkNCQT1AQz8/QEFAQD46Oj8+Ozw9PkA9Pj0+QURBPzo2NjU3
-Ozk1MzY2NzQ3OTs8OTk7ODk7QUA8Nzc5Pj0/Pjo9PTs6Pj0+Pjk1Nzk3ODg8Nzc5
-ODc2MzQ5Ozs6NTg7OD04ODk5OTo4OT1APDs6OTk+ODk7OTg3OTc7Nzo5Oz09PDo8
-Ozc4ODY4PDs5ODo7ODg6OTk4Nzg2Nzc8ODk2ODc3ODk2NTY3Ozw5Ojg5Ojk4Nzo9
-Ojg2NTY3NDQ2ODg2Nzk3NzY3ODc2Nzg7Oz8+Ojs6PT5DQ0NDQUA8Pzw+PkRBQUBC
-RkdJQ0VCR0RBQ0U+PT8/Q0JAOz0+QEA/QT5BQT9AOzw/PTg9Pj8/Pj4/P0I+Ojs9
-Pz0/PTw7Ozs7PD86Oz06ODc3OTw7Nzc3ODk6ODc7OT05OTg5PD48OTk3OT1AOzk8
-PD08ODs6PDo5Ojo4Njc4OTo4PTw7OjY3OTc2ODU3Nzg1NjY3OT06OTk4OTw8PTY3
-NDg5NzU0NjU2NzY3Nzg3ODY3OTo5ODk4NjU1NjU2NzY2NTg5Nzg5ODc0NTY3NTQ0
-PDg3ODk4NTY2Njg1Mzc3NjU2NDY3NDQ4NzY0OTY1NjU3NjY4NDc2MzU0MzMxNTc3
-Njg5NzQ4NzQ0NjY2NjU0NDU2OTU1NDEyNzc1MzM0MzU0NjY2MjQ0NDU4NDY1Mzc2
-ODU2NTExMTQ0MjQ2NTU2NjQ1MzMxMzI0NTQyNjY2MzQ3NDY0NDY0Njo6ODUyNjU3
-NTY5PTk8OTk6Njk5OjY1NzY5Ojk8Njc3Nzg6OTo4Ojo8Rzk8Pj88PEFCRUNHS01P
-TlBQUVJSVVVWV1VTUVNRU05NS0tKS0tMUVRXWl1gYWRmZmVkZmVhX2FhXVtdWllW
-U1NRTk9RUlBTU1JUXm99gHhsaXeHkZSSioWMm6Kfj3pugo6IallbZ21wbF9UT01M
-VEtIS1hmZVpVVltnbmhaVVhibnBlUEtSYm5wdXp8fXxyZl5gYWVqcXd6fYB/fXt8
-e398goSGiYyKiYqOj42Oj5CPjY6Iio6LiIuKjYuLi4+Pj46PjI2Tk5KPjYyMjIuM
-j5CUkJOUlI+Mh4iJiYR5cnBydnVzc3V1eHJzdHNtbm5sZGd5jJKNenN/k5yYi36C
-kJqbi398hI6MgICKlJmThHJvcn+DfXFscoaXkotvW2B6iYZtU1hxeFxHSEpNYW5x
-Zl5gbHt6bFVHRURDPTo7Ozw/PDw6OTk6ODY1NTg5ODo5ODs8QTs7OkA8QkI8OTg4
-OTc1NTs5Ojo5Ojw6Ojk8PD4/PT08P0BAQz48QEBAQkNBQEFBQ0VCRENFREREQ0hJ
-S01NTE1PUk9LSUpMTE5QUEtJSktLTE9RUVRTVFFPUFFSU1RSUk9PTk1PVFJRT09M
-S0dHRkVISUNCQkRCRURDQUNGREJDQ0JDRUNCPUJDRENEQ0NERUhFRkZHRURCRERI
-RkFHR0dHSUdHR0dGSERIRklIR0RGRElLSUdGRUZERERDRkRDQ0VDRERCRkhJRkyA
-s8XQ193h4+Xm6OnpT0xMTUlERktMS01LSUlGQ0VGSUlHRUVAQENEQj8/QEM/QEBF
-QkNBQT5CQ0JCQT48OztBQkJCOTY1NDY5NTU3Nzg3NTU5OTs6OTo6PT5BPkA8Ojs8
-PDw5ODo4OTs3Nzo4ODg2PD05ODg3NTQ1NTc3NzY1Pj83Ozo4ODk6ODg7Ozg8PTw9
-PTs6ODY6Ojo3Nzg3Njs6PDo6ODk7PT09Ojo6Ojk4Ozs6ODk3Njk4OTg0MjE0Nzg4
-OTc3OTo5Ozo3Ojc3Ojg5Ozg4Nzo5Ojk3NjM4NTY3NDU2NjY2OTc1NzU4Nzk4Nzk6
-Ojw/PkFCQkFBQkJBQkBBQUBAQUBAQ0ZKRkdGREREQEJBQkNAPj8+PT0/Pj09P0JB
-Pz07O0A/PDw7Pjs8PD0+Oj0/PDo8Nzs8Oj06OTw9PDs9TTs7Pjs5NzU5ODg3NTs8
-Ozk4ODg4Ozo4Ojo7OTk5Ojk/PT8/PTs/PD07PD08PT0+Ojo9PDw6ODc9PDc3Njg4
-OTg3Ozo7Ojg2Nzg6Ojo6NzQ2ODk5OTc4NjU2Nzc5ODg3NTQ1ODs3ODY0Nzk3Nzc1
-NjY5NDU2Nzk6ODY3Nzo4ODk4Njc4NjU3ODg2ODY2NTY4ODg3OTg2NzU0NTM2ODc0
-Mzc0NjU2NzU6ODc2NTY1NDg1NDQ0MTQ3Njc3Njg4NTU3ODc2NTYzMTI1NDIzNDY1
-NjU1NTExMzEzNDU1MjM0NTY1NTU2NTo1NjMyNDUzMzI2NTU1MzU2NTY2NTg2NDEz
-NTM2NDM4OTg2ODY0NjUyLzQ1Nzo3NjU5Nzk6Ojk4Nzg4ODk3NDU2Njc5NTg3Njo4
-Ojo5ODg3OjxAOzk6PUA9PT9BQ0FCQ0dLS0hLTE1PUlNRS01MSU1OTU1KTUpOT1FV
-WV1fYmNnaWhoamlmY2FdXVpZWFhWU1JPUFBOT09TVlZPUldmdn58dXB6i5qdlo19
-doaao6KSeG96kJSBaWRmY2FiW1JMTE1TRUFGWWhpX1daZ3d+eWdlanqBfnRkXWJq
-cnJ2f4OAeW9gXV9janB1eHx9f319end6e3+ChImKiYyLiouOkY6MkJKQjY2Li4qJ
-jIyNjY2Qj5CRjI6OkZORjo+SkJKQlJKRkpGVlJKRjo6PjoqMiod5b2xvcnBxdXN2
-cW9tbnBsYl1TWnKDiH5oYm+Ei4d3anKIkox6bnODiYJ2fIyUmZWFe32EiYl9Z2Fs
-gIyQiXVoa3R7e2xgYG1yX0VGRkZZdH1yXlZUYGxvY1BOSUU/Ojc5PDo7ODg8PTo5
-OTg4Ozk6Nzg3ODk6Ojs7Qj8/PT09OTQ1Nzk3ODg4Ojo4OTk5Nzo9PTs+Pj5BPjw+
-Pz0/QEJBPUA9RENAQj9DRkVDRkdDQUVGSUxOS01NSUpKSU1QVE9MSkhHSEpKTU9N
-TVFQT0xNTEtLTExISUlER0hJSUdGQ0pIREVFRkVFRklJREdFRUNAQEJCRURDRkNE
-RkRIQUJDQ0lFRERDRUZGRUVHQ0BAQkVHRURERURGSUtLSklFRkdJSUlOTEhIRUZI
-SEhJSklHRklHRkZFRERERkNEQ0NJTXi0xdDX3ODj5ejp6utNS0lJSURGREVFRktG
-R0lHSkdHSUlIR0RAQUJAQjw/PUI9OztCQT47Oj9APj9DQ0I9ODw+QkE4OTc2MjQy
-NTg3NTY4OTo7PEU+Ozs8PD4/Pz47Ojc6Ozs6Ozk6Ozk4Nzg7OTk3OTc2Njc3NTc1
-NDgyMzc1Ojk5OTk4Ojs5PUA+PTo8PDw7PDo3ODc6PDs3NjY3Ozo8Nzo6PD4/PTw6
-PDs6NTc4Ojk2OTc1Njk5OTk4NjQ0NzY0ODg5OTo6Ojo6Ozo5Nzg4Nzc5Ojc3Nzc2
-ODY3Njk6ODc2Nzc0Nzk2NzY6OTc5PD07Ojs+Pz4+P0FCQUE+Q0A7QEFCQENCQ0ZG
-R0dHRUNAQ0I/QEBAPT0/PTs8P0A9PT09Pz8/PT0+PT1AQkA+PDxDOzs+Pj0+Ozs6
-ODo6PTk4OTo+ODpBNzY3ODY5Nzc6Ojo3Nzk7Ozg6Ozo4Nzg2OD08Ozs6OjxAQDw8
-QEA/QDo5PDo6Nzo7PD43Ojo5OD48Ozs6Ozk4N0A6Ojg1Nzc5Ojc4ODQ3OTg5Ozg2
-NTc2Njg4ODc2NTY2ODg2Njc4ODo6Nzo2NDY4NzY4Nzs5Pjw5OTg5OTY2NjY3OTc6
-PDk0NTc4Njk3ODg0NDU2Nzc4ODc5NTczNzY1ODU0Njg5NzY2NjU0Nzg4NDMzMjM1
-NTk1NjQ4NjU0NjU2NDQ2NDEzMzE0OjgyMTU3NjY0NTQ1MjY5NzY4Njg3NjYyNDg1
-NTQxMjEvMjM1NDMzMzU1NDIzODo0MDQzNzU1NTQ3Nzg2ODUzNDQ2MjM0NDU1ODk5
-PDo3Nzg3NzY1ODY0NjY1NDQ5Ojg6OTg6Ozo7ODg5Ojo3NTc6OTk7PUBBQUJCREVG
-S0pIR0hIS0xMS0tMTExMSktOS01RWFdcYGNmZ2dpam1qZWNhYl9cWllZUlBOTU1O
-TExPVF9fWU9QXm55eHVsdYCWoaCaiHh0hJiino95coSVmolwYVdVXGJdVVZZW1RH
-RUxZY2dfUlVneYB0ZWBneIB+c2dmb3R8f3t8f314Z1xZXWNtc3d7e36BgHx8fX2A
-gIGChoaHiI2Ni4+OjIyMjoyGhoaGi42OjI+PjpCRk4yNjImMkJCPkI+QkpOSkpGO
-kJCRkpCMjIuKioiJh4J7cGtrbW9vbm9va2xvbWpmWVNgfYuLe2dkdIaIfnFteYyS
-jX13f4eBcnF/jJGLfG9zgo6VjH9vaXB/goN2anF8eXRoZmx3dGpeTkRGR1RvfHZe
-S0hPX21tXk1LRDs7OTo5ODw7PT1BOzU1Njo6Njc4ODc4ODg7Ojs8PDw5PDw7Ozo5
-OTw6ODg5Ozo5Ojo9Pjs8PTxBPDs+QDs9Qz5AP0FCQz4/QD5AP0JCQ0NBP0JCQUVG
-SE9MSUdKS0xOUVVTTktHR0ZIS0hJSkdOT01NTUtIR0dHRERCREFDRkZERUFAQ0dE
-Q0VIRUNESEZDQ0JHR0VDQ0RFRkdDQkVFRUNBQERGRUZGRkRGRUVGRkVERkZGREZH
-REJGRklKSk1GQ0ZISExGSEdHREdISUlFRkdHRURFQ0dHRUNERUVFR0lHSEhIg7fF
-z9fc4OPl5+np6k5TTUxJTE1LRUtFRUdEQ0RGSUlHRkZHREVCQUFCQUNAPjxBQT8+
-QkJCPjtEQkRBPj1APD89PjY0NTg1NDQ1OjY2NzY3NTg2Ozw8Oj09PTw7PD07Ojg3
-Ojk8OTs7Ojk6OTo4Nzk4NzY3Njc2NjQ2ODg4NjY2Nzc6Ojo7Ojo+PEA8QDw7Ojk6
-ODc3OTo5Ozk2Nzc5OTs5Ozk6OTg6OT06PDk7ODc4OjY3OTg2Nzk4NDU3Ojw2NjY3
-ODg4ODc2ODo7NTU5OTg4ODc1NzY5OTg6NTg3NTc2ODc1NTY2Njc4Ozk2Njk7PD06
-Ozs6PD0/Pj9BPT8/PT49P0BFREREREVFRkRCQ0M/PUBBPT89Pzk7QT1APz45Ozw7
-QEI8Pj07QEA/QEI8Pz08QD07PD86OTk6PDo5Ozo7ODg7Ojk5ODo3Njc1NTk4NzQ4
-Ozk7ODo7Ozo7Nzc6OTk6OjtAPDs7Oz9APjs8PDo6Ojg5Ojs5OTo5OTc2ODk6OTg3
-Ojg5Ozg2ODY3OTg6OTY4ODk/PTk4Ozg4ODo6NzY2Nzc2NzY1ODg4Nzc0Nzk1OEM7
-NDc3ODc5Nzc5Njg4Njg3NTU4Ojk5ODk4OTg1NzM1NzI3NjU2OTc0Nzo5ODg4ODUz
-Njg4Njg6Nzc3NDM0MjM1NTc2NDY0MzMzMzQ2NjU0NDc5NDQ1NDY0NTQzNDQ0NDMz
-NDEzMzczMzM3NTY1NTU0NTUzNTQzNDQ1NjY0NTM1ODc2NDQzMzk1MzU1NTQ0MjEz
-MTY3ODc3NjY2NDQ2NzYzNjc1NTU1OTU2NjY2Njk5NjM1ODk5NTc3NDM2Nzg6Ojc2
-NTc5NjY1Njc3ODg9Ojg6PDk6Pz49PkJEQ0RDQkZJSUhKS0lHSUpKTlBUVVZaXmFj
-ZGVpa2ttbGhnZF9cXFtXVVRSTk1NSklHRk1daWlfWV1rfIB5al5mfZGdnZSJf4SL
-lJmViYKAipSWhXBfVVhfYFlaZXFxZFNOVWBlZVZNTmFwb2VTU11yfHdpaG14fICC
-hYJ/em9iWV1hZ3F3e3p+hIGCfHl+foWFhoeFiISChoqMio2Mjo2MiYmHi42Oj4yN
-j5GOj5GSlpKOkJSTkJGNjY+QkpGPkZCQkJCPi5CNiYyLiYaEgoF7cGttamtubW1t
-am1scm9kYXKOmpWIeXqMkpWQhoSNmZ+WiYWNi4B+g5KXlYFpY26CjpOJfXZ3gYmD
-dmRjdoeFcVlXboF/aVlVSUtLUWh2c2JNRUlab3p0YE5FPzw8Ozk5ODtBRUI+Ozc4
-OTc2Nzg5OTg4OTo3OTs6Ozs5Ozs9Ojs6Ozk5ODk4PDw8QUA7Ojk4ODc4ODo+Pj1A
-QD49QD1CREA9QD9BPjo+QUJBQkRESE5LSUtHR0tLS1BPTk1NSEtISEZIRkZISEdC
-Q0ZGRUJCQ0I/PkJAQkE9PTw8PURHSEZERkdFREJDQkJDREVEQ0JCQURGSURERUNB
-QEJDQ0ZFRUZER0dGSEdFREVJSEZGREZHRkZISEpHQ0lMSUpLRURCREhGRElJSUhP
-SUdHR0RERUNGRkVFREVGRUdDSUx9t8XO19vg4+bn5+rqTUxOSkpISkhFS0tER0VL
-SERDRURDQkRGRUJCR0VESkI9Qj1CPjxBQ0VEQUNEQEI5Ozo6ODY2Ozk2OTs7Nzs6
-Ojk3NTc4OTc5PDw5Pz88PTw7PD48PDs6PDw4Ojg3ODg3ODk4ODk5Ojo2NzY3ODU2
-NzY2NzY3ODg7PDg5OTw9Oj08Ozw9Ojg2ODg3Nzg4ODk2ODY3ODs9Nzg4Njg5Ojs7
-Ojo5Nzo3Ozg4ODg3ODg5OzQ0Mzc4Nzc4Ozk7OzY1NDY1NDkzNDY1NjY3ODg2NjY1
-NjY0OzUzNDU1ODc2Mzc6OTg1ODk8PDg3Ojs+QUA8QkE/O0I9QT8/P0BCRENGRkJF
-RERLQ0JBPzw7Ozo8PTg+Pz08Ozs8Ozw9PDw7PTw9PTw9Oz09PTw7PT07Ojk9Ozk6
-PDo5PTg3PDw6OTs7Ozo5ODo5ODU2Ojo6ODo5Ozw7Ojs8Ojo3Nzk6Oj85PDs7Ojo8
-Pjw+Ozg4OTk3ODk6PTw6OTY1NTc4Ozo6OTY6Nzo2OTc3Ozs6ODg5ODg6ODc1Nzk5
-Ojg5ODc3NzY3NjU1NTg3Nzg2ODg5OT43NjY1Njc3NTQyMzU1NTQ2NDU/Ojg4ODk3
-Ozo2NTY4Ojc3ODk3ODg3ODY2NzQ1Nzc1NTY3NzY3ODc1NjUyMzE3NjMzNDUzMTI2
-MzI0NDg1NTQ1MzQ0NDM1ODY2OTY3NzczNTg2MjIzMzY0NjQ0NDQ2MzQ2NzYzNDQ2
-NTY6OTc2Njc2NjQ2ODg3NTU4NjUyMzI0Nzg3NjczNjU0NzU1NjY1NDQ2NjY1NjMz
-ODU3Njg4Njc2NzY1Njc3ODg4NzY5ODg3NDY5ODU1ODg4OTo6Nzs6Ozs6Ojs8PEBB
-QD48PkJGSEpOUE5LTVJSVFhbXF5jY2lpbGpraGtraWZjYF1ZV1dUUU5LSElIRUZH
-TF9sbWdteIOKh3hgV2R4hY6OioeLlJ6cj4WCh5CTkIiBfHZvamJYWmF1hINzZFxk
-cHFrVUtUZnBvYVFMWGt2c2pye4KDhoiKg3t0Z1tZXWZwcnl8fn9+gYCAgoCDhoSE
-g4WEgoWFh4yPkZSRkY2KiYmMkY+QkZORj4mQj5GSj5OWlZOUkY+QkZKTj5CSkpCO
-jo+Qj4uJjY2JiIWDhIR7cGpoaGtqaW1pbXd/f3R2hpmdnJSJi5iZm5OIiJSamJKM
-jIiBd4CSnqCag29rc3yIh3pwd4OOko5zW1xyhod2W1VqgIFqVldRS09PWGdrZFBG
-SlpzgoR2W0pBQURBOjc4OT1FQkA8Pzo7Ozs6OTo+PzY8Ojk5ODtAPTw7PD47PDk4
-ODk3Nzc4Ozw/Pjs/Pjw+Ojg6Ozo7PD1CQDs5OTs9PD08PTw9Pzo9Pz0/QD9ERkRG
-SEpLS0pMTUpMSEVARERCQT4/QEJDQEJEQkBAQjw+PkFDQUFAP0JCQD8+Q0RFREVF
-RENGRkdDQ0VGQkRBQUFERkRFSUVCQ0JBQkZEQ0NEQ0ZFRURGRUNDQ0VDREhERkdK
-SElFRURHRUdMS0hGR0ZGSUhFSEhJSkZFSUlJSUdERUVER0RCQkdGSEhJS3a4xs/W
-29/j5ufo6elNTUxMT05LRUNBQ0E/RkhHRUpCREJDQURFQ0RGSUdEQ0RAQkBBQUA9
-QUJEPDxAPzs8Nzs6ODk4OTg3OTk8PT83Nzc2OTo6PTs7PTo9PD89Pj8+PUE8Oz0/
-OTc3NkI6ODg4ODs6Ojg5ODg1NzY8ODU0MzY1Nzk4NzY6OTo8Pzs8PT06QDw5ODo4
-Nzc3ODc4Nzc7Ojk6OTo6OTg5Ozw9PTk6Ozo6OTc5Nzk4OTc3OTo7ODg1NDU4ODg4
-Nzc5OTg0MzU2NDQ4NjY4Njg1NTY2NTQ0MzM5NTQ0NTY2NTc4Nzg3OTg6Ojg3ODk3
-OTw9P0BBQj48Ojk4Ojw9QUJDR0ZGSENDQEBAQ0A7Ojs6QTs9PD49Pj08Pzw4Ozg5
-Ozk6Ozk7OjxAPD1APUJAQEJCPz09Ojo6OTk2NzU3OTc3OTk5ODc4Ojg1NTc5Njo7
-Ozk3Nzc2ODg3ODg4Nzk5Ojc4OT5AOzo9Pjw9Ozk6Ojo9PDo8Ozk6OjU2ODg5Nzg5
-OTo7Ojo3Njc5OTo7PDs3ODg5ODk7ODc1NDs4Nzs5Ojk5ODc3NDQ1Njk2NjY6OTo5
-Njc2Njc2Nzc0NDU2ODU1Nzo6Ojo2NTY3ODY4ODU1ODk4OTk3Ojs7ODU1NjQ0NDY2
-Njc5Nzc3NTM2NTo3Nzg4ODk3NTc3NTY0NDU2MzQ1NDU6NTY2NjQ2Nzc5NzU3NDMz
-MzM0MjY2Nzg3NDUzNTY4NTUzMjQ2NDY0NDg3OjU2NTg0NTU1NzgzNDU2Nzg2OTg2
-NTY2NzMzNDU0MzY1NTY2OTk2NzY2NjQ3ODo5NzU2ODU3OTY3ODU2NjY4NDY4OTY1
-Nzg6Nzc0Njk7Nzk5Nzo8OTk7QDo7PD5DQEJBREZIS0tNTlBTVFVbWlxgYmRnam1q
-aGhnZ2lmZWJdW1hWUlBOTU1LSUdHRUZKWmRkaXmPlpOKdmNib3+Hf3p5g5Oip6CM
-dneLm5qSgHyGi4d2YlJRYHeDgnZtbnqEg3dmXWV3fHdkVVFgbnd0d3uBgYaLiouF
-fnBjV1lha3R4eHp9foGDg4WHh4WFhYOAf39/gIWHiY2RkI+MjI6MjIyPkpCRlJGP
-jo2Qj4+QkY+Pj5GPjpCUkZaOkZGRkJCPkpGRjI6NjYyKhoWEhH93b2loZGhmZ2l2
-hId+dHqImZ6WjISLlZSPhHV7hoeIio6Jfm5qeY+YlY+EenmBh4R6aV9qf4+ShXNj
-ZHN+fHBiX2x2eGVUWVdQVk9JVWRoX1NTWWt6gHxiTEZFSERAOjk7PDs5Oj09QkI/
-Pzw4Njo5OTc4Nzc2Ojw8PD8+Pjw7ODU1OD47Nzk6OTo8Ozs2Ojs7OTg6OTw+PD08
-Ojk9PDw9PD09PDo5Ozw+P0A8QkFAQkRFRERFR0dHRkJERUE/QD1APj0+QUA/PTw7
-PkA/Ozw/QD1BPEJBPz1AQD5CQ0JGRENDRUVDRUZGSkdJSElERUZFRUlEQEBCQkNC
-REZFRURFRERGRUZFQkNFQ0VGRURDREdIRkpIRkVKRkhGR0xLSEdISUdFSUVHR0VE
-R0VHSEhHR0dHSEhCRUhFSEtMd7nG0dfc4ePl5+jq6VBRS0tKSEpDPkA8Pj0+QUJC
-RkZDQ0NCQ0JBQEBDQkFCR0VBQUJAPj1AQEM9PDs9PDw9QD0/Nz07Ojk4NDQ5Ozo2
-Nzc1NDg7Pjw/PD06PTs7PTo5Ojc4OTk6Ojg5OTg4NjU3Ojw6Ozc2OTg2NTc4Njs3
-Njg4Ozg7PDw7PDo8PD9BPD85Ojg4OTo7Nzg5Ozo7OD02Nzs9Nzo6OTs7PDk3ODg4
-PDo3Ozk6NzY0Njc1Nzc1NDY3NDU3NjU3NjQ1ODc2Nzg4NzU1Nzg5NzU0NjY3NjU1
-NDU0ODc2NTQ1Njo4OTg3NzY2OTY4Ojk7PD48PDs9PTs6ODg8PD8/QERCREVGSEhF
-RD8/PT89PD0/O0A8Ozw9QD07ODo5Njc2NjY5OTk6ODw7Oz0+P0RAQUFAPz48Ojo3
-Nzc2NzU2Nzc3NDY7ODY1ODU1Njw4ODs4OTk1NTQ1Njc2OTs4OTg4Ozo5ODk6PDs7
-Ozg6PD46OTg4Ojw7Ozk3Nzc4OTs6PTs6Ojo5Nzg5Njc1Nzo+Ozg6OTs3OT05PTo6
-NTg4Njo6OTg5OTs4NTU0NTY4ODc3NTk2Njg3NjY3NDQ0NDM1Nzs2OTg4OTk6OTk3
-ODg1NjQ1NjY3NjY2NTU3OTc4Nzg2NTU0NTU5NzY1NDQ1NDY0NDU2NTczMzY2MzMz
-NDU3MzIzMzU0NjU0NDY3OTY3NzU1NTQzNjY0NDY0MzEyNDMzMzM3ODQyNTcwMzY3
-NjUzNDMxMjE2ODk4ODk2ODk5Oj07OjUyNTU0MzkzNTQ1MzM0Mzg6OTI2OTU5Ozo4
-OTo5OjY4QDo5ODo8ODU1ODc6OTU3ODo4PTo4Oz02Njw9PTs4OTs7Ozw5Ozg4PDw7
-P0RFR0pOTVJRVlhYW11fZWNmZmZpampqa2toZmdhW1hUUVFRT1BOT1BPSklEQkdQ
-VFlmfpSem46Adnh/g4R8bGp9mKWqoIt1c4ian5J6domYkn1hUk9ecXp6b2tyg42N
-g3VwfIeKh3psaXN+fHh7goWHiouKiYN9bl5YXWlydXh6en6Bg4OGiouNiomHgX9/
-fXyAhIaKjI6NjY+Rko+Pjo+OkZCPkJKSk5SSlI6Oj5CQk46SkpGVlpmYlpSTkZGT
-k5COj5GQjY2KhYN9f310bGtpZ2VfZneEh3dqb4WQkYd1c4GMjoFvaXV+g4uSlpJ7
-aGt6jJCKfnl7gpCTiXVkXmt6hoV4bG1zeHNrY2Vuc29oXFpfWlFUT0JJW2xzZlta
-XWx5cl9OSEhFQUE+Pz47Oj05Ozs/QD8/Ojg3OTk6Nzg0Njo5Ojc5PD48PDw6ODY5
-Ozk5OTs8PD49Ojs6Ozs7OTg7Ozw7OTk8QD4+PD48Oz0+PUA8PTo7PD89QEFDQkND
-QkJEQkJEQ0FCQT09PT8+PUNAQD09PDs6PT5CQUFAPj1EQUJBPz8+PTxDR0ZIRUVF
-R0RCQ0NFREdHRUdGR0NETkRAQEBCQ0RCQkJCQ0JDRERHSUhERUVJSEdEQ0RCRElK
-SkhISEhIRklHRUVISEZGR0dKSkdISEVKSEdHR0VJSEpIR0ZDRUdITU1yuMfR193g
-4+bo5+nqS0tNTEtIRkFCPkI/Pz9EQUJEQ0ZJREVGQ0BCQEBCQT9BR0VERkNCQEBC
-QkA9QT48Pjw8PDo+QD07Nzc5Nzc1ODo3ODc5ODo5Oz48Ozw8Pzo5Ojc4Ozs4NzU3
-Ojo6Ozw9ODk5Njg3NzY2NTQ1ODU4ODg5Njg4Oj07PDw3PD09PDxCQDs5Ozo4OTc5
-Ojk5Ojk5Rzk2ODc5ODY2ODk6Nzc5Ozs5Oj06Nzc4NzY1NjY2NDU3MzMyNTU2ODY0
-NTo1ODY2Ozs3Njg1ODY2NzMyNDc2NzEvNDo2Nzc0MzU4ODo1NzU2NTg3OTk9Ozo+
-Ojk6OTk6PDo9Ojc6PEBCP0BFRENEQ0NBQUI+Pj09PD0/PT49PUBIP0A/PDg0Nzo8
-OTo4Nzo9Pjs5Ojw+Pj0+PD8+PTw6NzU4NjQ3Ozg4Nzc3PDg4OzY1NjU6ODc5ODk4
-ODc1NDc6Pjg2Njc6OTo5NjU5OkVBOTo6OTg4OTc6OTY5OTk4Ojg2NTk6PD87Ojo4
-ODg1NTg4Ozw6Ojo6Ozs7OTk5OTs7ODs7ODY2Mzc4ODU3OTs5ODY2NTc2ODg3Njg1
-NDY4Nzg5ODQ1NDU1NTg7ODk3OTY4NTc5OTk2NDM0NTU2NjY1Njc3Njg3NzM0NTc5
-ODc3NjY2Nzg0ODQ0MzIzNDc2NTQyMzMyNDQ2NjI1NjU2Njc4Njw4NzY1NDQzNDMy
-Njg4Njg3NTIzNDc2Nzg4Nzc1OTk0NTY1NjM0NDM1NzU1Njk3ODc3Ojk9OjY1Nzk1
-NTY3NTU0NDEwNjU3NjY3Ojk4Nzo4Njc3NzY2Nzg3MzY5OTw9Nzk7OTk6ODg2Njc5
-ODk4ODc1Ojk6PDo2Nzs8Ozg4OTc5PUBERkZJTlBWWVtcXGJiYGNpaWpoaGlpaGdn
-ZGNfX11XU1JTT05SUFFSV1RQTEhEREtRVWeDkZWRhYCGjpSQhHVma4KUnKCYi397
-hZGXjHx7jpyWgmhZV2d2e2xcXWt+iYp/cnOEkJKNe3VyeX57f4KGiY+PkY2Kg3xt
-YmNpdHh5fIB/gYWFhIeIjouKiIB9enl7gIKEiYqNjY+OkJCSlJOTjo2RkJCRkpGR
-kZCQjpCQj4+PkpORk5KSlJWVkpOUkpOSjYyLjo+Li4qHfn57enlxZ2JfW1Vdd4F+
-bF9ofomLfWptf4mJgnl3fYKDj5mdk4N0eIeNi3xrZ3OLlZaJem9xe4J+c2ZicYCC
-dWRaZ3h9bVhRXWhZS1FNQ0ZZcn52ZVxSU19nY1dISExDPkE/Pj08Ojk4ODw+QDs8
-ODg4Ojk1Njg5OEA6ODo6Ojo5PDs6Ojc7Ozo5OTk6OTo5PDo8O0Q+Ojs6Ojo7Oj08
-QD0+PDo4OTk6PDs4Ojw5Pj4+PkA+Pj0+QT8/QT09Pz1AQDs7QUA+QEA9Pj89PTs9
-PEBAQkA8PT1BQz47PUJBQkNHRUVDREVCREhGRkNERURDREZFSERHRUJDQUJDREFF
-RENDQENERUlNQkNGR0ZERUZEQ0RESElGRkVERklISEhISEhHS0hHRkdHSEhIR0dI
-RkRGSEdHQ0NGSUdFQ0NGS3W6yNDY3ODj5ufo6elKSU9OS0xKRDw9OkJERkJFQ0I7
-QEBAP0JCQkZDRkVEQkJDREJEQj49PkA+PTw9PTw5PkE9PTw5Qzs3NTc5OTc3NzU1
-NTU1ODw9Pjw5PTo4Ojk4PDw8PTk4NzY7PDk4OTw5ODg2NzY2ODg4NzU0Nzc3NjY3
-OTg2PDo6PUI6OTo8PUBBPj08ODg5Ojo3Ozs4OTg6PDg6ODo6OTo8ODc8Ojo8Ojk1
-ODg3ODo5Nzg2Nzc4NjU2ODw3NDY4Njc7Nzc4ODg4NjY3ODc5OTk1NjU1NDM2Njc1
-NzQ1NzQ0NDk4Njk3OjU1OTY4ODo5PDs7Pjs8OTg5Nzg2OTs9P0BBQkNCRUVFPz0/
-QkE8QD89PT5BQEA7PD89RT07Ojk3ODk5Ojs7Ozs7Ozw8PUBAPT06PDs9PDk3ODs6
-Oj4+Ozk2Nzg4NzY5NjY5Nzg7Nzc4Nzc4N0I4OT07PDk3Nzc2Ozo3Njg3O0A5Ojc5
-ODk6OTo6Ojg3ODY3Nzk6PTk4OTk6PDs6Ojk4Ozc6PTs+Ojk6Nzg5Njc4Njk1NjU2
-OTk6OTY1Ojk4Qz03Nzc3NDU1NjY3NjU1MzQ3Njc4Nzc2NzU2OTc4OTg3NzY2Nzg7
-ODc3Nzc3NTU3OTk2Nzg4ODg2NzY0NjY5OTg5Ojo4NTc0NTg2NjcyNTQ1NjYzNDQz
-ODY1MzEzNTU2NjY1Njc3Njc2NDQ2NzU0NDU2Nzc1NTY1Nzc4NjY2OTk3OTg1ODMz
-NTU3NjUzNDc2NTo4OTw/PDw6Ojk5OTY1Njc2NzMzMTI1NjUzNjc4Ojs5PjY4NDU2
-OTY4Ojg1NDc4NzY2Nzg5OTg4ODc2NTg6Ojk3ODc7PDo5Oj0+Ojk4Ojo5Pj1AQkdJ
-TVBSV11iYmJnZ2hoZmZpaWdlaGVkZmRjYV1aWVZTU1BTVVZZWFlbW1dSSkRDSUxW
-b4WNjIB2fYuYm5uOe3F4ipSUk4uIiYqNkpCDgIWQmpODdW9weYB8aldTZnl+fG5i
-bH2MjIR5dHR2f4OFh4uPlJCPi4aDfG9pa3J5fX1+hYmIhYaIiYqKkYqFf3p3eHt+
-hYaIiYeJjI+PkJGTlpWTk5OSjY+Nko2Ljo2OjI6Qj5OUlJCTkZGTlZSSlJaVk5GT
-kY+NioyKh4iCgn19e3NrY1pRUWBzf3RjYW6Bi4x8cnyNmZWOh4R7dnuFjYuAeX6O
-l5OGbmBhdoaOjX52eYSLjYFtW1xyhoR0WVZogIFxVktecV9JS1FNSVVvfXtqU0lL
-UmFoY1FJT01BPUJBPzo6Ojg4Ojk6Nzg7PTk4Oj48Ojc4Oz04P0Q3OTo7Ojk7PDs5
-Ojs7Oj08OTg3ODo8Rz05PTw7PDw5Ozo8PTs7Nzs6Oz04OTg7PT09Pj08QT9BPz48
-OTs6ODo6PT49PTs9QEA+QD1CQEBAPj1AQEFCP0JBQD5BQ0JAQUFDREVIREVFQEVG
-RkRBRkZCREREQT9GSEVERUJBQUFCSEFEQ0NCRERBQ0ZGQ0JDREhBQ0RCREVHSENG
-SEpHRkVFR0ZHSklMR0lIRkpGRkZHRkRFSklGREZIRENFSUdGP0FJdrjI0Njc4ePl
-6Onp60xOTEtJSkhEPz9CSEdJR0NEQT89QEREQUM+QENCOjo8P0JBQT88QD1BQUBA
-PTpCQUBBPjw7Oj48NTUzNTY4ODk5NTc3NjU1NzU3OTw3ODY1NTU5PTs6Ojw9ODY5
-Oj04OTo7OTk2Ozw4NDg2NjU1MzU2ODc6Ojo5OTo6Ojo7OTs+PD07Ojs6OTo1Nzc1
-ODw6OTs6OTo5Ojs6ODc6Pjs7OTo4Mzc1Nzk5ODc5Nzc2Nzo0NDU5Ojg3NTQ2ODs7
-ODg3NzY8Njc2OTc2Nzk2NjY2NDM4NjMyMjU0NjQ1ODQ0NDg5Ojg5Ozg4ODw7Ozo/
-PTo7Nzg2OTo7ODtAQj9CQkA+Q0JBPj47PT4+Pjw+Pjs9PD08Ojo7Ozw/QDo7PDs4
-Nzg3Pj88OT5BPj09PT47Ozw6PTo4OTk4Nzk6OTY6ODw5Ojc2Nzc6Nzc1NTg2NDY7
-Ojk4ODw5ODo5Ojs4ODs8Ozo3Nzc3OTg3Ojo4ODs6PDY4Njk5OTk8Ojo3Njk5OTg4
-PTo5Ozs6OD09PDo5NTQ2ODY0NTU2Ojs5ODg4NjY0NTM2Nzg2Nzk2NzY1NDc2NjUz
-NTU3NDY2Njg2Nzg6OTg3NTc6OjY6OTk4ODc3NjY3OTg1Njc3ODU5ODc2Nzc2NTY5
-ODk7ODY2NzY3Nzc4OTY0NjMxMzMyNjY3ODc4NDY0NDU1NTQ1NzU2NTU4OTUzMzU1
-NzY2ODU0NDU5ODg3OTs+PD44OTo5ODQzNTg4Njg2OTk6Ozs6Oz48Ozs+PTs5ODk5
-NDU0NDYxMjM0Nzc1NTY5Nzg4ODg3ODo5NzU2NTc2NDU1Ozg4Ojo6Ojo4PTg4Nzc9
-PDs5ODg4PDk5OTc1ODk7Ozo8P0JHR05TVVpcYWRpamtsbGxpaWppbGloZWJjY2Fd
-WlZUVVNTUlRWXF5iZmtrZl1RSUZMUV1yhId/bmx8kJedmo2BgI2Zm4+Ce4CMmZ+T
-gIGMl5uWh4B+gomPjodwXV5ufIN4YFhjeISDdm1wd36Ch4aLkJCRkpGMhIF8dnZ6
-e36ChIiHiIiJi4qNjpOQioB6d3V2gYeIiYiMiY6QjpCSk5eVmZeVk4+KiYiGh46N
-jI+Pjo6PjY+Uk5KQkpSRlpSQkpKSkJOTko2LiYmJiYiCfHZybGVcUk5WaXyEfG9w
-hJWdloiCiZOZmZaLfnmAiYqGemttfpGbmYd0aGt8g4B0ZmNyipaSh3JfYnB6fHJg
-Xmx3eGtZU15tXUhIVlZQVWRyc2ZUSEZLXGtzZ1ZNUkpDRkU+Ozg2OTk6ODY5PD47
-PDc5PDg5OTg3Nzk4OTg7Ozs7PDk7Ojo6Ozs+PTw9Ojc5OD1BOz07Ojs8OTs8Pz49
-RUZAQjs6PT5BQDo+Qj0/PD1BQj87OzY5Ojw+Pz48Pzs6PDpAPDs6ODo8OzxBQEBC
-PT0+SEY9PT1BREM/Q0NISEVDRUE+QUJDQUVFQUJDREZHQ0FHRUREQ0RBQkNBQ0JE
-REREQ0RDRUVDQ0JERUFCREVGRURGREBCREZHRkdIS0VFRkxLSUdISElIR0dFRUVF
-RkRFRUZFRkVFQkFBRUlytsfR2N3h4+bn6erpS0xLS0tJTEdEQUVLRkRHPD1CRURG
-R0VDQ0hAPj07OzxAPUNDQD88PkBAOzw9QEE/P0NCQD1CPDs2NTc0NDdGOjU2Njk3
-MzQ0Njc6Njg5NTU2Njg9Ozc3ODg9PDk5Nzk9Ojk4Nzc3ODk4NzY0NTM4NjQ4OTc4
-ODk5Nzc5ODg6Ozk7PDo9Ozk6ODc6Oj05Ojo6Oz04Ozg3Nzk6Ojg3NjU3Ozo4Oj44
-NDc2NjU2NjY1NTc4Nzc0OTo5ODk4Ozs4PTo2PDg6NzY2Ojg2NjU2NjY9NDIzMjUz
-NTMyNjk2NTQ1Njc5OTc3ODo6OTo8Pjo9Nzg5Ojw6PDo7Pj8+PTw+QT9BQ0JAPj49
-QD08PDtAQT4/QUI9Ojo9Pzw9OztAPj08PTs5PEE/Pz8+Q0JAOzo6Oz44Ozk6Ojk5
-PDc5Ozo7Nzk8Ozg4ODY3NTY5OTc4ODo4Ozg7Ozs5OTo6NzY3ODo8Ozg6OTg4NjY3
-PDk4Njc3OTk6OTk8Ojo7Ozo7ODo6PDw4OTo1OTo4Njg3OTk6OjtANTc3Njg7Ozg2
-NjY4O0E2NjU1NTU3NTc1NjY2ODo4Nzg2ODo3Nzc4ODc1NzQ1Njc3Nzk5Ojc6OTo2
-NjY2OTg4Nzc2OzU3NzQ2OTg2MzQ1NDg6PDs8Ojc1NDMxMjM2NTU1NDY1MjU3Nzo8
-Nzk6NjY2NTM2ODc2NTQ1Njc5NzU5Nzk2NjYyNTY0MzY3ODk7RkxMRkU9Nzc3ODk3
-Nzg6Oj05PTs9PT8/PT5BPj9APTw5NjQ0NTY3NjU3NTU4ODc4NzY3NDU3Nzg2Nzc4
-NzY1NjY0OjU0NDY3ODc3Ojo8Ozg6PTw/OTk5Njc1Nzk3Njc1OTg7PkFDR0lMTVde
-X2NnaG1vb29vbG1tbGlqZGNhYWJeXFxXVlVVVlZWWV9ma2xxc3BsZFhMR0pTZHuG
-hXRocIKNk5WLgIGRnqKgjn1yeo6en41+fpWgn5B+foiQl6CikXxucIKKiXRfWGl+
-hYN0cnqAhYmPk5KUkpGRj4qFf3x2e4GBg4aIi4qJjIyOj46SkoqGe3Z5foGBhoiO
-jY2PkJCOk5OWmJiWko+KiYqHg4CDhouOj5GOj5CUlZSRkpaVl5eTk5CUkY6OjY+P
-joqKiIOEg311cGxkYmRkYm2Aj5CHe4CNnqKYi4aFi5SZlouAhY6WkoJxY2d5hpCS
-h358ho6MgW5cXm+GkYx/bmluc21nYmdudHVoXl1dY2ZaRUJPWllTVF5naF5OR0hZ
-b3l2XktMUktEQz48Oz8+PTc4Ojs+PT5APDo6ODc4Nzg2NjY4OTg3Ojs9PTs5OT8/
-PTo5Oj09PT0+Qkc8OTk2Njk9PkBBPT9DQz5AQUA+QEBCQT44ODk8PDo6Ojk7PjxB
-PD1APDw4PD49P0s9QD5CPDxCRD9CQEE/Pz0/Pz08Ozw+QEI9QUJGQkFHQ0NARkZF
-RkRGQ0REQ0RJRUVFRkZDQkNAQUBBQUJDQkBDQ0JBREZDRkVGR0VFRERESEdIRERG
-RkhIRkVIRkZHSkhHSEpJSUlHQ0ZIR0dFREREQkhMSkVHSUZGRma1x9HX3eDi5efp
-6upMS0tKS09SSkxLS0VDQkNAQ0JCSENCRUNBP0BAPUA/Pz1BQkBBPT06Oj5BQT8+
-Pj07PD48Njk3Njk4NTE0Nz47OjUxMTY0NDc4NTg5Ozc1NzQ4NzQ4Ojc2NzY3ODg6
-Ojo7OTg6NDU6ODczMzQxMzIzMTQ0ODk4ODk5PDg1NjlLPTk4Njo7Ozo5Ozo8Ojg1
-OTo7Ojg3OTk4ODg4OTY3NjY4OTs3NTg3Nzc3NTYzNjc2OTc1OTo5OTg5Pjo3Ozc2
-Ojk2Nzo1NTU2Ojk2Nzc3Njk2MzAyMTU1NDk2Nzk6NjU6Ojo5NDc3OTk6Ojc4O0E6
-OTk9Ojo4Ojk5ODs7Oj0+Pz5AQEA6PDw9OTs5Ozk8PT0/QT9APz5AQDw9PUA8PD5B
-QD88Pj1BPT8/PkA8Ojo4Nzg5OTk4Ojs5NzY1Nzg8OTc4OTc5ODQ2ODc3OTk5Nzg8
-OzY3Nzo7Ozs/Ozc2Njk4Nzc4ODc3NzU4ODc4Nzg3OTk4Njg8Ojk7ODo5Nzk4ODk6
-ODc2OTk3Nzg2Nzk7OTg2NDc7ODg4ODQ1Njc6Ojg3NzU3ODY2MzY2Njg4ODg3NTc0
-Njg4ODg2Njg2NTQ1Mzg3Nzo9OTc3OTk3MzQ0NjY1NTc0OTY3ODQ3Njk5Nzg3Nzs5
-OTg5NTQyMzEzMzU4ODg1NjY3NjU1NjQ0Njg5NjY3NzMxNTY1NjU4NzY4NjUzNTU1
-NjY3NjQyNTg6PkRWYmdfT0E8OTk5NzY3ODs8Ojs9PUE+PT88PDw+PT07ODozOjU1
-NDU1ODQ1NjY4Ojo7NzY1NjU2NTU2Nzk5ODY3ODc4NzQ2NjU4PTk3OTs3ODk5Ojo+
-Ojg3ODg3Njw1Nzk4Nzg5P0NFSVBWW2RkZWhqb25ub3Jva2ppaGlsY2BdWllXVlNT
-WVdaXmFhYmhtcXd1dXFrX1JISE9gfYqFd3R/jpORg3N1gpSjpKCOgXd9jZuZh3eC
-mqmilHt6jZWdoZiKfXqGlpiUhG9reomJgnt9gYmMjY+TlJSTlZOMiIF9fHt/gYSH
-jYuNjI2MjI6Oj5COiH13eICFioqLjpCRjZKUlJKSkpSWlJKTkIyIhIB9foCFh4qP
-kpGOjpOVlZSWmZmXl5SRj5GQj4iIioqJh4aEd36AeG1nZmdsb25td4uUkYF0f4+Y
-lY6GgYCJkI6HenR9kJuXjnpvcoCIh391dYGRl5aKeGxueoaKfW1rdoB/blpYZnl9
-cFtUXm5rXlNHQE9kY1NJTVdjZmJUUldpd3ZlUENIS0VAPjo+REQ+PDs/Pz07Ojk7
-Ojo7Ojk5OTY5PDo4Nzg3OT47PDw6Oz47OTs9PD09PDo9Pjs7PDk5PDk7REQ8QDw7
-PT4+OTg+Pjs9PT48Ojw8Oz8+QEE9PT0/PEE+QDs6PT08Pj09QUVFREJCQkJBP0FC
-REM8P0E/PUA+Pj4/QkM/QURDSUxGQ0VEQkFBREVFSUdIQUVEQkNFSEZAQ0JDREFB
-Q0REQkNERkZFR0dHRUZEREZFQ0dJSEhJSUlISEZKSUtGR0hIR0hISkZGSEdHSEZF
-Q0RGSEhIRUVGSEVNYK7I0djd4OTl5+jq60tLS0hLS0tPTU1JSkZDQEZDRUJCQkNH
-Rj09PDw9PT09QD8/OzxAQEE9PDs8PT9CQ0E8Nzk8Ojo6OTc2Ojg6Nzc1ODs5ODI0
-ODo6Ozo8OT8+OjY2OTc3ODY1NjU3OTg4ODg2NjY5ODc3NTU2NDU0MzU1NDY0NjU3
-Nzk5PDo7Ozs7OTk6Ozw8Ojs9Ozk5Ozw8NzQ2OTg3OTc5NTc4ODY1Nzg/PTs8OTUy
-Mzg1NTY1Nzk5Nzo5Ozc3NTM3Njc5ODg1ODo6NzU2NTU2NTk2NjY1NDY3NDQzNzY1
-OTY2Nzc3OTg5NzU1NzQ4Ojs5PUA8Oz08Ojk6Nzo6Ojk4Oz07Ojk7OThCPT0+QDw8
-ODo7Ojw6Oz87PD49PDw9Pz07PDs8PUE/PD0/Qj49Oz4+PTs9ODk3NjY2NjY0Njc2
-Nzg6Ozs4Ojs5Ozs2Njc6Nzg6NzY4OTo4ODc4OTg3OTs6OTo4QDk4OTc5OTU2ODU3
-ODg3Ojc4Njo1Njc7Ojw7Ojk5OTo8Ojk6Ojs5ODk4OTg4OTk2ODo8Ozs6ODo4ODY1
-NDg5NTc3NzU2OjY2MzU2ODk3NjM2NjU4ODg3ODY4MzY1NDIyNTc4Nzg4NzU4OTY2
-NDY4NTYzNzY0NTY2ODk3NTc5ODk5Ojk2NzY3NDMzNDI2ODc0MjY2NTY0NTU5NTU1
-OzYzNzM2NDQzNDMzNjU0NzY1MTQ1ODY2MzM2ODc5PUZOU2d4gnxoSzo5Ozg6Ojg7
-PkA+Pz4+QkRCPTw+Ozs7OTY5ODg1MjEyMzQ2Ozg3Ojw6PDs6ODc3Njc4NzI0ODs3
-Ozc0Njo4Njc4Ojw3Njg3Nzc2OTk3OTs6ODg1NTk4OTc3Njg4ODs8QUdLUVdeYGho
-a2xsbm5ubW1ra2djY2RiYFlWVldTVlZXWVxeZGhwb3R4eXl6eHVqXk5JTGF5g318
-hJCZnJGAbXeIlJyYk4uGhYuSlZWHeoWYpKKVgn+Snp2VjXZudYWXoJaGeX2MjYmB
-hYiFipGSlZOXlJOUloyJgX9+f4KFh4uLjpGOjpCPkZGTk5CJf3qAg4eKjYyRkpGN
-jZKUkpWTlpeXlZKMiIF8fXx/goWIjouPjZCQj5CSmJaXlpWVk5GQk5OQiYmJioiH
-hoiEgn90aWBibXVyZ2x4io6Jd3J9i5GOhX+Ah5CVjYN7eH+Ij5OHfH6Ij5CDcGdq
-fIySkYl8fISPjIJuXF91hYhyVVNofn5tUU1bcnJgTkpGTWFiTkRDUF1rcWZaVllk
-Zl9MQEFIRj06PT1CQ0I/Pj1AOjg5Ojk7Oz09Ozs7Oj9CPT09ODo6Ojg6PTs6Ojw9
-Ozw+Pjw5OzxAQD47OTs9Ojo+QTw8PDs6OUA9Pz88ODk8OTs+Oz07OTs8Pj1AQUJA
-PD48O0BCRT47PkJEREdIRkZDQUQ/PT5GQTs7PD06PD49Pzo8Q0ZERkVKSUdKQ0VE
-QUJEQUJERENAQz9ERERDQkJCPkJEQkBDQkZFQkJEREZGRUZFRkdERkZEREZHRkRE
-SEdISUlJSERHR0NERklFR0dFRUdGQ0NGRkhIRkhLSExJSktcqcjQ2dzg5OXo6enq
-TU9LSEpLTU9MTEpHPkBDREVCRERDQkE+PD0+PDw6PkA+OTo9O0FAQz9AOzo9PEBC
-QkM+PT08Ozs8ODg5Ozg4NzY2Nzo2OTk6PDc2Nzg5PDw7OTo3NzUzODg5NTY1ODg2
-NTg5OjY5ODg5NjY4Njc2NjY0NjY3Nzg7O0E7PTs7ODg4Oz4+PT48PDw6OTc4Ojc3
-Njg5ODc4Ojg4OTY5ODc0Nzg7OTg1OTk3ODY2Nzg4PTg5ODc2ODk5Ojc4ODc4NDQ3
-NzU1NTs2Nzc2NTY4MzQzMzk3NDM1Nzg6OTk1ODg5ODY1NTc8ODc3OTlGPD06Pjw8
-Ojw5PTo5Oz48Pzo6PTs7Ojk8PTw+Pjo6PDc3OTk7PD09Ozo7ODo7PT07ODg7Ozo8
-Pz06Pjs7PD4+PT88ODs6Nzk4OjY2Nzg5PUA7Ozk4OT04Ojc4Njg4Njo5OTY3Ojk5
-OTk2ODc2NzQ4Njg1Njc3ODg6ODc3ODc4ODo5ODY4Nzg7OTk9Pjo7Ojs7Ojo8Ojg8
-Ozc3Njs3ODc5Ojw6Ozo6NjY3Ozk5ODY5OjY4NzU4ODc1Njg5ODg3ODo3NTU1NTU4
-ODk2NzkyNTczNTU0NDk6Njg2NTQ0Nzw4NzY1NTc1NTc6OTc4Ojg6Ozs5OTo3OTc1
-MzU2NzMzNTIzMzQ1NDM0NjY2Mzo5NTg1NzY2NzU1NzY1MzQ0MzMzNDU2NjQzNDU0
-MjQ3OjlCTVdfdYuUj4FdQDg3Ozc3OTo7P0NCQENDRUJBPz46Ojo6Ojg2NDU3NTY1
-NDUxNjo8Pj09Ozk4Ojw6Ojc2Njc2OTs5NTY6Nzs2ODk4NzQ4Njg4Nzk7Ojk5Ojs7
-Ojc6NzY2Nzk7OTk6PEBESU1TWVxkZ2hqbWxua3BsbGhnZGFfX1xdWVVcW1JVWVtd
-YmNoanR4e3+CgIOAfHZoV0pHW212dX2Mmp2bjX97hI6Uj4qAfoeSmJWPiIWJjpeZ
-lYuIipWcl4+EbGFof42Uj359gouLhIaFiYyPkZSTlZaYlJSRjoN+f4KCg4WJi4uM
-jI6OlJOUlJWSi4WAfYaFio2TkJOUjpKWk5OSlpSVlpWSioJ8eHd5fIKFiIyLioyM
-kJCRkY+Oj5OUlZaPl5mXkY6MiY2Ni42JhISDfm1cVV9uc2hbXW6BioB0dH2LkId5
-cXqJkpKLg4SLj5CMgnRyf5OalYZuaHSAh4eCcm96jJaUiGtaX3KBhXdhWWV5e2lT
-T15xc1pLUk1IVFZLQUFJXXB3dGNWTU5YX1ZIPkNNRz48PT1EQj49P0RAPzo/Ozc9
-PDs6OTw5Ojw5Ojg4ODg5OTs+Oz06OTs7Pz08PDg5OTs8PDo4Nzg7OTo7Ojs5OTk7
-PTo5PD09Pz4+QD07Ozw9Qj86Ojk6PD9BQUE8PT5CPj5CRUVHSExIRkFCPj48Pj9A
-QDs/Pj08PDw+O0BLSkRDRENFRkdGRUFERUVDQ0RCQkBBQkNDRkZCQUFCQ0ZEREE/
-Q0JDRUdGRERFSUlJRkVFRUZFQ0RGRUNGSUhISkpHR0VEQkVFRkZERURESEZDREJE
-RUJDRUZIS05ISmClxtHY3eHk5ubn5upIUUxJTUtKTEhGR0ZFRUJDRUxMSkM/QEM8
-Qjs+PTw/PDw9OjxAQ0FCPzxBPjo8QUE9Ojw7Pjs3Oj06PDs4NDc1NzY2OTc5PDg3
-Ojg2OTs5PDg6ODo2NTY5ODc1NTc3NDY6OjY2NTk5OTg0Njc2NjMzOTY3Ojs6OTo7
-Pj09Ozo5OTk7Ozw8Ozs4ODc5OTk5OTg4Ojk7OTY3NzhAOzU6ODg4OTg4Nzg7Ozg1
-ODk6Ozo6OTg1NzY3NzQ2Nzc3OTk6ODc1OTo4NzY2NDIzNTc0NTgzMjM2NjQ4Njg0
-NTY0PTc3Nzo4ODg6OjY7Njs+PDs6Ojs+PDs5Ojk7Ojo6OD07PTw5OTk7Ozk7Ozw8
-Oj46OTc8PDg7OTk4ODo7OTs6ODo6Nzw+PTs7Ozw9Oz0+PT09OTs3Njk2Ojw6Oz09
-ODk5Ozg6Ojk2ODg2Nzs4ODY3Ozo6OTk7Ojg3ODY2Nzo5ODo3Njk9Ozg2NTU2ODo7
-Ojs7Ojo6Pjk8Ojw8OTo8OTc5Ojg3Njc2ODg5ODk4ODk6ODQ2Ojc2Mzg4OTk2Nzo4
-Nzg5OTc2NjU0NDg4NTc2Nzg3NTU2NzY1Nzc2NTk2MzM2Nzc5NjU2NjY4NzY2ODc4
-OTUyMzY1ODc5Ojg3ODk2Ojo1Njk4NTY3ODc0Nzc3NzQzNzg1MDQ0NTU0NDg1NDc2
-NjMzNDMzMzQ1NTM5NTMzNjo1NzU0NDQ1NjQ3OUNUYWV5j5mUhmBAPT07Ojs7PD4+
-Pj09PkA/P0FDQD87Ozo7OTg2NTUxMjMyMTM1OTw7PD06Ojw3Oj05OTc6Ojk4Oj08
-PDY2NTg6OTg6Nzc3Ojc2NzU4ODY1Njg6Nzg5Njs8Ojg7Pj8/Q0dJUVRXW2BiZWps
-bGhrbGtoZmVhX2BaVldUU1FRUlVaYGNqbm90d3uDhYSFhYB7eG9dTklRX2NmeI6b
-pJmKiY2XmZOKfXB2hpqfl4V7gpWdm4+GiI6YnZuUjYRwZ3GCiIh7amp4g4B+hoqM
-jpKRkJSXl5iYk4uIgH99gIOJiYeHio2Pk5CSlZWXk4yDgYKBhYeMkI6NkpGPkZGR
-k5aXmJeUlI2CfHV2dXp8gYWIiomNjY6RkpGPkJGRkZOUkpCSlZKPkI6MjIqKiYeF
-goR8blZVZnBwYFpnfISFgXt/h5GRhXl1gIyOiXlveomZnJSBcW1/kpmUhXl8hYqK
-g3RiYnaHkpKDcGpqcndzamNlbXBtZlxfaG9rWExXUEZHT05JR0tabHJyYUtCR1BY
-XVNJTFNXRkBAQD88PD1CRENCQDw8QT84Ojk4ODk/PDs3ODc4Nzk5PDk6Ojk9PT0/
-PDw8QDs6PDw7Ojk7PDs8Pjk6Pzs6PT48PDg5PTw9PkE/Oz08OTU5PDk9PTo8PT0+
-PD08Oz8+PUFCQ0RIRkFAPz89QENEQkFAPTo8PDw5OjxCQkdJRUJER0NERUJBR0dH
-QkNDQ0FBQkE/QEJERUZEQkNDQ0FCQkNCQERGRkVGRUVERkRERkVERUlISEhHQ0RF
-SEdISUhJRURJSEdHRURFREdHRkNEQkJDQUBCRUZIR0lNY6XEz9jd4uTm6Ojp6lJX
-T0pKTUpLSklLS0dGSEZLSUZDQzw8PT1BRkU6Ojw/PDg6PDs6PT0+PDo4ODk7QD1A
-PkFAOzg7Njc4OjQyMTM0Mzg4ODw5Njg3NjY4Ojc6NjU1ODU0Nzg3NzU3NzU3Njg1
-NTg5Nzg3NjM2NDg1NTQ4Ojo5Ojw9OTg7PDw8Ojo4Ojo8Oz47Ozk4Nzk5Ojc9Ozo7
-Oz04NzY3ODY3NTU5ODU1Nzg4OTk6Ojc0Njg3ODg1NDM1NTI2Nzc1NTU2ODo6Njc2
-NTM1NzUxNDQ1ODU3Nzg1NTM1NzY2Njs1ODk5ODo2NDg3NTY4OTk3Ojk7Ozo5Ojs9
-PTs6Ojo6QD05Nzg7PTs4ODY4Ojo6OTg4OTc7Ozg4Ojk5Ojs4Njs6OTg4Ozs5OTo7
-ODs7Ojk6OTs6OTo8Ozk3Njk7Ojw9PTo7Njc6Nzg3OTo3Njc3Ojo3NjY3Ojo6Ozo7
-OTg5Nzs6Ojc5ODg7OTc2Nzk3NjU1ODs6Ozo5Ojw/NjY4Njk6OD04NjU2Nzc4NTg7
-OTg2OTc2MjU2Nzs8Nzo6OTg3OTo7Ojg4NjY3Nzk4NjY1Mzg3NTQ1Njg5ODY1NjY2
-NjU3Njk7ODUzNTY4NzY2ODg2NTczNDQ1NjY2NDM2MzUzNjc5Nzg6Pzk8Ozs4ODk5
-NzM1ODM3MzU2NjY2Mzc0NDQzMzU4ODU3NDU5MzI0NTQ0MzkyNTc1NTU3NzY1NTQ6
-NTQ2RFVkanKHjI17WkU9OTo6PTk5PT48Ozk6ODk7QkFBQDs7PDs5ODQ0NTUzNDM0
-NDc7Ojo8PDo5Ozo5OTs4Ozo6Ozo5Ojg4Nzg3NjY4Njs8OTk5NzU1OTc2Nzg3Njg4
-Njc5PDo6Pzw+P0FCSUxLUVdbXGBeZGloZWVmZWRjYmFfXVlWUVBQUlNWWFxiaGt0
-eHt+gIaFhIaGhoF8cWNRSU1VV1xug42TjYaMlJ+imot3b3mIl52OfHGBnKqhkHp+
-kqClno+Dgn5/h5SQh3RjYnF/hoeIjY+QkpORlJWYlZKRhHx8fn1/goaGio2NjpOU
-lJOUlpONhoB4fYGHiY6PlJSRkJOTkpKTlpSWlpGOhXhwb3J2eHqAhYaFiIuOkI+Q
-k5ORkJKRjpKSlZCOjouKiYmJiYWCgoSEgXtoVl1weHVrbXaBiIR+f4iPkY6Ef3+L
-lZaHcmlwgo+VkIR7gI2TkId3dHuHj4+Fc2RrdYGDfHR1doKAcmBbZ3V3a1pbZ3By
-bWBVUFRKRENQXV1VS1FdaGhiUEVFTV1oZFRLT1pQRz48PD5DQUJERkNAP0FBOzo5
-Ojk5NTk7Ozo1Nzg5NTc8Ojs6PTo7PD09QkJBPkM9Ojk6Pjs6PDs5OTg4Ojo7ODk3
-OT09Pj09PTo7PT1CQz47PT0/Pz1BPDs7Ozw8Oz5EREJEREZDQTw5OTtAP0FAQz8/
-Oj08Oj5BQURCSkdDPUNFRENCQERDRUJCRENDQUFFRUI9P0NFQ0VGQ0NERERDQ0FD
-QkVFSUNHRUNCR0lFRkdGR0hHSUZITUZGRUZJRkhGR0pJRURFR0hGRUZHQ0JDQUND
-Q0ZDSUpGSU5jpcTQ2N3h4+bo6erqT0tJR01VUExKTktJSEtKSUlDRURGQDxCQkFC
-Qj87PEBBPzo5Ojo5Ozo8ODlCQT0+Q0NBPTo5Ojs3NzY4NzM2ODY1Njc4Njk3OTg5
-ODg3Njk6OjQ2NTY3ODc3Njo5OTs4NjY3OTk4ODYzNTU1NTMxNjg6OTQ2Oj85ODo6
-Ozk7Ojk5Ojk3ODk5ODc4Njw6OTg6ODg2Nzc6OTg4ODc6ODg4NjU5Nzo4OTg4ODg2
-NTI0NzY3NzY2NzY3NTM1NjU3Nzc1ODU1ODQ1NTQ0MjU1NTY4NjU0NDY2Nzo4Nzc4
-NzYzNDU3MzY1Njc3OTk4Nzk4ODg6OD9APDg5Ojk6PD88Ozw7Ozo5OTc4ODg0NDQ3
-ODk6OTo5Njs6PTo1OTk5OTs6Nzo7OTk4PDo6Ojk2ODo5OTk3ODo4Ozs5Ozo2Ojs8
-Ojc5OTk7ODg4OTY3OTY3ODc4ODs8Ojs9Ozg3ODo0Nzc5ODg5ODk2OTk5Ojg4OTs4
-ODc4OTg4Ojk4ODg5Njc4NjY1OTk6Ojs7Ozk3OTc2NDI1ODc5Ojc1NjU3Ojs6OTg8
-OTY5NzY1NzY4Ozg4ODc4NDU3NzQ3Nzs7Ojs7ODY3NDM2ODc1Nzc/NTU4ODc2MzIz
-MzM0MzQ0MzUzNTg5NjU2Nzg8Ozc1NTU4OTc1OTY0NTI0Njg2NDQyMTIyNjU1NDYy
-NTg1NDMzMjU0NDMxMjMxNDQ0NzU3NzU3NThBUWFiZm9zdWNOQD07Ozw8Pj08Pjk5
-Ozs6OT08QD48Pzs6Nzk3NjY0NjU6ODU6Ozo7Ojo6PD88OTk5ODo4Oj06Ozk2NTg3
-ODc6Njg6Ojk3ODY2NTc4OTo5OTk5Ozw5ODU3OTg7Oz0+QUNESEZOUlpdXF9fYWJi
-ZWJhX11eXFpYVVVSUFVVV1ZcYWdtc3h+gYWHhIeHiomIiIJ5bV1NS05RWXCChXtw
-dYaVoKKbjIF/iJGUkY58eIydqaSPe3qPoKWei3Z3gISRnZyPdmhpeX6GiIeKjI6S
-l5STlJWVjYN7eXx8gYGCgoOJi4yOkJSWlpOSjoeCenx9hImOjpCSkpCRkZGUl5WX
-lpGOioN6cWhrcHJ1fIKHhoeLiY6PjYuPkpSPkY+QjY+NjIyNjYmIhYSKhYKAgH99
-eWtlbX+EfXp/hYqMhIGGkZORgnZ0fo+ZlYh8eHyGiod5c3uJlpWPhHJxe4eMiX52
-dXyFhX1uYWh+iYhyWlJme35qUVNtf4BsVFFXU0dAPktfZ19SS01XX2BhUElQXmxu
-Y0tFS1NMRUE8ODs+QD4/QURDRUdGQDo5Nzs4Njc1Njc5Ozc4OTk3Ojc8PD5AQkVH
-SUdISERDPT09OTs8Pjk5Pjs8OTg4OUBAPD08OTs8Ojw/Pz0+QkFCQEM8OTo5Ozk5
-OTw4Ozw/Q0FDQkE/Pj07OzxDQ0NEQj1APjw/P0BFRENFR0NEQ0JBQUJBQEJDQT49
-PUFGRUFCQUBBQUJFRERDQkNCREBCPkNFQkJERkVHR0hGRUVFRkRERURISUdGR0ZH
-RUdERUZFQkRIRkhERERERkdIRkhHR0ZHSkhIS0ZFSV+lws/Y3OHk5ejo6utRTFBP
-S05KSkxKS0dHR0ZAQUFGTUdAQUZAO0I+QUA7P0BCRUZBPTo/PTw7OzxAPjtAQj0/
-PTs8NzY4OTY4Nzg3NTg3OTg7NzY4Nzs6Ojg1Njg5Njg5ODk2NTU3ODg4OjU1NTk1
-NjY2NzY4NDU2OTc2Nzg1NzQ5Ojo5Ojk5Ozk3ODk5Nzo3ODo7Ozo7ODU9OTk5Ozc4
-Njg2Njc0Nzc+Ozk3Njg2NjY1NTY2NDU0NDY2Njc5ODU1NDQzMzQzNDY1NzU1MzQ1
-MjQzNjY1NDQyMzMvMzQ4ODo4NzU1NzYzMzQ2ODgzNzszNjc3ODo5Ojg2ODs6Oj08
-Pjw9Ojk6PDs9Pzs4OTw4OTc3ODY3NzU4Pjo7ODk7Ojo6PTs2Nj09Ojo5Ojs7Ojw7
-PDk2Ozo3ODo6ODk7Ojs6Ojs3ODo7Ozw5ODo4OTU1Njg5ODY0Ojw3ODY2Nzk4Ojg5
-Ozo2ODk3NTY4Ojo7NjQ1OTg3ODg5ODg0Njk4Oj47Ojo5Ozo2Njk3NTU2Nzg4Ojc2
-OTg4OTc1NDY3Ojs3NTQ1Nzg3OTk4ODg5NzU2Njk5NzY4OjU0NjU0NTkzMjc5OTc5
-ODc3NzY1NzU4ODc3OTg1OTU4NzU1NjY1NTY0Mzg5OTc2NDQzNDU1NDc3NzY4NjU2
-NTc3Njc1NDU0NjUyNDEuMTMxNTAxMzQyNTUyNTY1NDUzMzY3Nzo2NTY3OTc3NDY1
-NjxLWl9bWFpaUkU/QDw9Ozk+Ozg5Ojw9Pz08Pzw8Ozk5OTY2NzY3NjQ1Nkc/Nzg8
-Pjw6Ojs+Pz87Ozs8Ozo3OTk7OTo6Nzc1OTg5ODY4NjczNTg4Nzk6PDk6ODo7Ojg3
-Nzk6PDo8O0JDQUVHUk5RU1lbXF1dXF5cXF9eWllWVVVWV1hXWlpcXWVobnV3fn6D
-hoOHhnuHiouIhn1zYlJMTlBddIF/cWFqgZSanpWJiZGam5SIgoOIkp6hnI6DhJOg
-o5uLfXl5f46cn5aCdniBhIqJi4yNkJOXlJaVko6FeXh6e35/gYSFh4uNjpCUlZaU
-j4mFh357e4CHiY6Qk5SUkpKRlZmZm5mXkouGem9nZmZsc3l7foOJi42Njo+MjI2Q
-jYyLj4+Hh4mJiYuNi4qJioeFgn99fHt1cXR/hIN+fIaNjIN8gI+UlI+EeHmCjJKL
-fn6Fjo6MfW1sdIGMjYd/gIaNjYp8bWh1iJOVgmhaYnWCiHdgV2R2eGpWWG+Agm1R
-Tl5WRT89RmBqaFNEQkdXYGZfVFBSYGpkUkNCSEpDQTw8Ozk2Nzc7QkJJT1JPQj06
-PDw4OTc6PDk7Ozs9OTo7OzxAPDw/Rk5SVVZYVk9GQDs8Ojg7OTg6Ozs9PDxAP0M9
-PDpAPj08PkBEPTo4Oz4+PTk5PD08Oj5APD5BOztBPEJCQj08PD07Ozo9Q0BBQDw8
-Oz9BQEJBRkRCQUZEQ0NCQT5BRURDQz9AQ0FDQ0NDPz0+PENCPj5GRkRBQkRGRkdF
-RURERERERUdGQ0JFR0VEQ0VESEhHRUhJSUdJRkJIR0dFQ0FDSEZFR0dHTExFR0lG
-RUVGRkpQYKXDztbc4OPm5+jq60xMTExNS0pISk1LTUlJREJCP0FGQkM+QUM8QEFD
-P0A9QURERkJDQ0NAQkdCQUI+Ozw8Ojo9Pjk4ODUzNjY5NzY3Ozg2Njo2Nzo3NTQ4
-Ojg0MzU3ODk3MzY1Nzg4OTk3NTU2MzU1NDQ3NzU1NTc2NzYzNTM0Njc6PD4+Ozo5
-Ojs5ODg4Ojo5ODs4ODk4NTxCODg4Ojk2MzU1NzY2OTo7NzM3ODY2OTk2NzY5NTEy
-MzQ3Nzc0NjU1NDU1NjU1Njg2OjYzODk7NjQ0NTM0NDc0MTQzNDY4ODg4Nzg2Njk8
-OTw4Nzg3ODo0Nzg3Njg4OTw5PDc3Nzc7Oj06PD06PDw7PDo+Pjs5OTo0NzY2NTk6
-Ozs7Ozw7Ojk5ODc6OTk6Ojo6Ozs5Nzo8Pzs5OTs5ODg4OTk3Nzw7ODY3Qjg1Nzc5
-Ojc3NjY5Nzk2Nzg6OTs3OTk6OThCNzo2ODc2NzU4Ojo6NzY3OTg2ODg2PDo1NTk6
-OTw6ODg6ODc0Njk5ODg7ODY4NTY3Ozg4NzU1NDg1NjY5ODc1NDQ2ODk6Ozs6Nzg6
-ODY2ODk1Ojg5NzU2NTc2NTQzNTU1NjczMzU3Nzg3Njg4ODc6NjQ1NjU0Nzc1NTo3
-NDc1NTQ4NzY3NTY2NTM0OTY5Ojg3NjY4NTY2NDUzMjExNDY3NDQ0MjMxLzMwMTQz
-NDY2NTM0MjU3Nzk5NjM1ODU0MzU0NDQ1OkhbX1tPS05NRUE8PDs7PTo3Ojo8Pj08
-PDg5OTk6Ozc2OTk4OjU0NzU3OTw4NDo9P0I9PD5AQ0I9PTw7PkJCPTw6ODg6ODc4
-Nzg5Nzg0NDU0NTc1Njg2Ozk4Ojw4NDU2Ojw8Ozo7PkJBRElOTU1PVFZYWFRXWFdZ
-V1VUU1VVVFVXWVxgYmZqb3R3e31+g4SDhYaGhImMiIuJg3RlU0pPVmN1gn1uZHCC
-jY2KgYCOnqmmm4R4gpWdnpaNjI+ZnaCcko6LjYV9ipWYk4R5foGCh4uJi46PkpeW
-lJKPh4F4cnV6f4CBhYeKjI+Sl5WYk5KRiIR7dXZ/gYeLj46QkJGQj5CUl5iYlZaS
-i4B1ZWBhZm13en2Bg4yOjo+KjY2NjI2MioqGhoaFhYmJi4+NjYmKjIqHgn58fHh2
-f4J/f3+EjJGKgH2JkZSTh3t7hpKWjntud4mUmZKEd3J9iYeBdnB4iJWXjX9rZXKI
-ko6DcWhpcHN1bWRjbHRtYVhbbHp3Y09RYVxHPEBHV2VhT0JDRVFlbm5eT0pQXWFX
-REBESEVAP0BDQDw9Ojo7QEdSVlFHPz05ODg3NzY1NDY7QDw7PDk9ODg6OEBGTlhY
-XWtwbV5RRj07Ojs7PDw9Ozs8Ojs7PD07OTs9PUBBPUI/PD09OT07Ojs7PD46Ojo7
-Pzo7Pz4/PzxBPj08PEFCPT4/Qjw4ODg9QEI+QkVESEZCQ0NAQkQ+PEFCQ0FCRUdE
-Q0FDQkNFQkBEQT8/QD1HR0RERURERUdIRUVDQ0NEREdCRERFRkVGRklGR0VFRUdG
-SEhGSkdJSEdGRkdBQ0NBRUZIR0dGRkZHRklISkhhqcPO19vg5Obn6erpT09MTFJN
-SkdJR0ZHSEVDRkZLQT1ERUJEREA7PkFAQ0E/QUFBQEE/P0M+QkQ/QD88PD86PUBA
-Oz07OjU3OTk8OTU2NzY4PDo5Nzg4ODk6NjU2NzY3NTYzNDo4ODc3NTY3NjQ3NzU4
-OTc3NTY0Mzc2NTUyNDY8OTlAPTs5PDo9PDs7OjY3OTk5ODY5NzQ2PD04Nzg5ODg4
-ODY3Njk5Ojo1ODg3Nzk5ODU4OTY0MzE1MzQ3NTY0NzY0NTU2Ojc2Nzc3OTc3ODc3
-ODQzMzEyNTc3NDM2NTY0NTc6NzY3ODo6Ojk4NTQ6Ozk6O0E7Ojs3OjY6OTY3Njk7
-Nzg4OjY1NjY6Pz08Ojo5NzY4OjY5NzQ2Nzg6ODs2ODc0MzY4OjY3Nzg3Njk5ODg6
-OTw6Njg5Nzc3ODg3Njk3NzU0ODY1NzY3ODk4Nzw5NjY2Njc4ODg5Ozs4Ojo2Ozg4
-Njo6OTo4ODo4ODg5Ozs5NzU3NTQ4ODo3PDs3NTs6Ozc1Njo5ODs5OjYzNzk6Ojk3
-NTQ0NjY3OTk5Njs4NzY5ODc4Nzc2Njg5Nzc3ODw4Njo6Ojk2NjU1Njc1Nzc1ODc2
-NjY0Nzc0NTY4Njc3NzQzNDU3NjY2NTc2Njc6NTQ2NzUzNTY3ODg5ODg4Njc2NjQz
-NDU2NzUzNDQ2NzY9NjQyLzIxMTE0MjMxNDU0NTIzMjU5OTk3NzY1NTU3Nzc1NTQ7
-TmZqYUhCTlZRSkA8Ozk6Ojw6PTo6Ozs9PTo8PTw4ODY2ODo9OzU1MzM2ODc4Oj09
-QUJAQD9BQkI9OzxDQ0JBPDw9Oz06Ozo7OTg6OTk3ODg6OTs2Njk5Ojk6Ojo5ODU2
-Ojo7Pj0+QUFCQ0VKT1BSUlJUVVRVVlZXVlRPUlZUWV1eYGVra3J3eXt/f4OIiImK
-h4aIioqKiImDeWtaSk1WbH+Ignd4iI2ShnhrcIWcpKGXhHiDlqKdjn6Fl6Kmn4+B
-gpKhm42Nj4p+eHp7gYSMi4mMjIuKjo+NjYqFeHFxdXmAg4WIio2Lk5iXlpeVkIt/
-dnBwdXyEiYyPjI+Sj42PlJKUmZaVk5CGdWleXGNsc3h+f4GHjo6OjIyOi4mJioeF
-hYWBhIKCgYaHio2Mi42OjYqEgHt8foOIhoKChpCYk4mEiZWem5GGeX6Lk5OKe21z
-hpKWkIaBhY2SjoBtaHSDkJONf3h4fYWGfnJxd316cGJfZ3N6bl1WXWJlaWNXUlhf
-V0VBPEFLWVpSR0NGU2NwcGRRSEVOVldMQ0BFSUNCRkpIQkE7Ozw9P0ZQUU1KQj05
-OTg7OTo2NTk9PDw8Oz48OzpCQ0VOV1NZdoWHdmBVSUJAPjo5Ojo4Ozo6Pj8+Ojw8
-Ozg8QTw+Pj8/Ozo4Ozo6Pjo7Oz08PEI7Ozk9Pj08PT8+PT9EQT0/QD5BOjw7Ozk6
-Pz5AQENER0VDREJAQURGQkNDRUVIQ0NCQUNEQjxAQENCQ0FAQ0JHQ0A/QEFCREdG
-RURDRkRDRURERUNEQUFCRUZHRUZEREdFQ0dIREpIRkVHRkJEQT9CRkdGR0RJSEZG
-RUhPTmWsw87W2uDj5ejo6epPUU5OSkxNS0xJSUhGS0xMRkdFP0hEQURDRUFBPkNC
-Rj8+PEE7PTk6PUJDQEJAQUNCPEQ7Ojc7Pj4/PDg4NzY5PDc2NTg4NDU2Nzc1NzQ3
-ODY1ODs3Ojo0MjQ3NzY5NjU2ODU3MzM4NzY1MzQzNzk3NTY3OTw9Ozs6PT08PDw6
-PDw8Ojg1Njg5ODY2Njk5PDk6OTY3NTg2NjU3OTg5NjQ3OTc3NTQ3OTk4OTg4NzM2
-MzU3NjQyNjc1NjQ0NDUzNjU3NzU2ODQ1NTQ0MjQyMzY3NDc2NTI1ODY4Nzg6Ozc4
-OTY1NTc8NzU5ODo5Nzk6OTg5OTg6NDU5PTg7ODk4Ozk6Ojo8Ozk5Ojw5Ozg5NDU3
-Ojg1NTc2NTg4ODk3Nzc2NzY2Nzg4Ojo3OTs5NzY2Njg5Ojk4OTc4OTY3NzQ5ODY2
-NzY2OTk1NTc3OTo7PD46Njo7Oj09Ozk5Ojo4Nzk6ODY2PTc7OTg4NTU4NjU5Ojo5
-OTY3NTg5Ojo5Njc5PTo3ODc1NDY6OjQ1NjU3Nzc0NTg3Ojo6Ojc3Njc3ODg6OTg4
-OTUzMzY1NTg3Nzc1NDQ4OTs6NTU2NjU2NDM1ODQ2NTU4Nzc5ODg2NTY1NTc1NTQ1
-OTU6NzU1Nzc4Nzc2NjY3OjY0NTU2NjMzNTUzNTQzNjc2ODc1NjUzMTQyMzQyMzUx
-MjEzNDQ1NDg3ODg4NzQ0ODYyNTUxMzdWc3NiRUFSYV5SQj48Ojk6PUA8Ozs7OTo7
-PD5BOjk6OzU2NTc6NjY2NDM1ODo8P0BERkVBQUI/PDw8PD9BQz89PUA+Ozg7Ozw4
-PDs5ODk5NzU0Nzs5OTc4OTw5ODlDOTo7Ojo6PD08QD5FQUNHTk1MTE1RT1JRUlNS
-VFZXWl1iYWZpbHJ4eXp9foGDhYeHhoiIh4iKi4uJh4J7b11STFhvgYN+fouXnZmJ
-eG95iJefm5CHf4eZoZuKe4KUp6uhiXeAlaSjmJiVhXZwcnV7g4mIi4uMi4iEgYGB
-gH1zbm5xe4CGi4uJhouOko+VlJGJgXxzbW5ye4CEh4qMjpGPkY+Uk5eYlpKPiH1y
-Y1tcZXR6gIKDiIqMjZGPjoyOioqIhYKBf31/gX+FhIGHjI6Oj42RkImBenuEh4GD
-f3+Nl5SMgX6HkpaWin9+ipKXkIR6dn2FjoqAeICLk5mRhHp3e4aJhHx2eoWOin5o
-W2h8ioRvVlFje39xVU9hcW1fUE5aX1ZNSkQ+QUFJVlpUSktOWmdtZ1JDQENMU1FG
-PkROT0hIS0dFQkI9PDxAQUFHS1BPTEdAPTg3Ojw8PTg8PD8/Pjs4PEBCRlJYWGV7
-jJGDbFxMRkhCPzw7OTo6ODk7PDo7ODw6ODg8PDs8QD47OTo6ODs9PDo7QEFBQD06
-ODs5Ozo4PTw6PD5BQEA9PTg8Pjw7PDw8PT09P0RFRENDQEBERENBQEVFQUJFREdB
-RUJDSEdHQkNFRURDREM/Q0NFQUNBQkRFRUNIRkZDQUJFRURHSEZERUdFRURESElH
-RkdHSVFFREZGSkdDQUNFRUVGSUlFSEdCRUlJX6nCz9bb4OPm5+jp6k1OU05OUUxL
-SUpHR0VISkdLSEhGREhISEFDQ0A/QkJBQ0FBPkM9PT07PUFAQT09OkBAREA6OTc8
-QTs5Njg4Ozs5Ojk3NzY2NDY2Ojg2ODQ4OjY2OTgzNzY4MzM2Nzc6PDg4NDo4NjY3
-ODY1NjY0MzU3Njg8ODg4Ojk8Ozs+PDk6PDg6Oz07OTk6ODc3Njg7Ojo3NjU0NTQ7
-OzU0Nzc0NzU2NTU2ODg3NTU6Ojc1NTY2NTU0NTU3Nzk1NjQ1NDY2Nzg2NzU0MzQ1
-NjQ3OzMyNDQ3NDg3NjU2NDM0Nzc3Nzc7PDg2ODg5NzQ4Ojs6Nzk6Nzo1Njc3NzU2
-Nzk5Pjw5OTk8PDk3Njc6Ojk5NzY2NTY5Nzg8Ozo6Ojc3ODk5Ojg4Njk3NzY5OTk6
-Nzg6ODc4Ozk5OTo5Nzg3ODg1NzY2Nzg1NjU0NDQ1MzU1NjU6ODk4Nzc4Ojk5OzY0
-Nzs8Ozo4Njs5Ojc2NDY6NjY3Ojc4ODo1ODY2Nzk6ODc1Njc6PDg4Nzk5ODg4NzU4
-OTk6OTg4Njo8PTc6ODo2Nzc2ODc2Njc3NDU0NjU1NjQ3NTY1Nzg6OjY2NjY1NTQ4
-OTc3NjU3ODU5OjY3ODg0NTY1Ojg3ODI1ODc5NDU3NTg3OTs0Nzc3ODc1Njg0MzEx
-MzQ2ODE0ODg3MzU0MzU3NDQ0MTQzMzQzNDQ0NTU3NDg0Nzk0NTMyNDY1NTY4PVRw
-d2RJQkxhZl5FPzw8Ozw/PDs/PT49PTo8Pzs6PDo3ODc2Nzc0NDEzNzU3OT1AQUJD
-QkNEREE9PT08PD9APkJBQUA/PT09P0A6PTg5Nzc6NzY5NzY3Ojk4OTk5Ojw4OTU1
-OTk3Nzk+PUFBQkRCR0lLTExNTk5QUFJSVFpfY2Zoa3B2d3l9fX+BhIOEhYiJioqJ
-i4yLiomHhoF4Y1RMUmR1d3F+kZqgnpCBhIqRk46Gf4KMlJaak4mAhpalqKGKfIOT
-nZuUk5GDb2twc3eBhoeHioqIiIZ8dXV0a2psc3Z7gISHiYeLjpCTkpSRj4l9cnBr
-cHeAh4iHiY2Nj5WUkZSUlJaVk4uBcWVZVFpmc3uCh4mOjo2Rj4+Nj4yLiYiIhX18
-eXZ3e4GCg4iJjpGQj4+Pi4h/goeGg3+Dj5aWjoN9gYqPkYx/c3mIlpaPgHN8h42N
-g3JsdISQkIR6fISNlY9+aWRzhpCQgWVXY3eDgXBdU2J3gHJWT2Z3dGBMT11lV0xP
-TkVFQEBOXWNZTUpMVl1dVkpBQElSV1VLR1BWUklDQkNBQ0ZGQ0JCPDtDT1pgWFFG
-QDs8Ozw8Ozo6PT48Ozs8Oz5DUFdicX2Lko1wWVFMTkdCPDg4Pjw9Ozk5PD0+Pjo5
-Ojg8Ojw8PD89PT08NTk6Pj9AQUA9PUFBQTw7ODg+Pj09PTw8PT4+PTo9QD4+Ozw+
-PkVGRURCQkREREFEQUJERERFQkBFREM9QEBCQUNERUNCRUVCQkFAQkRFQ0FCQkNE
-Q0REQ0VHRkVHSURBRklPTEpGQ0NESEpFQUJARURFRUVEQkVGRERDRERKR0hGSkxJ
-SEpvr8PO1tvf4+Xm6OnqUVFNTEtLTUxMSUhFREdESklIQUZGR0ZFQUJAQkNCPUJD
-QkVERj89OTg3PkZAP0JBPjk5PDo+ODk7Ojk1Njc5ODg4NTk1NDQ3NDU3NjY0NzQ1
-ODY5NDYzMjI4OzY2Nzk3OjU4NTc0NTM4Nzc0Nzc0Mzc5ODk5OTo5Oj9APTo8Ojo8
-Ojk6Ojs7ODhAOzc6ODk0ODc0MzU3NTc1NzY1MzQ2NjU1NjU4ODk5Nzc3NzY1NTY1
-ODg1MzU1NjY3NTY2NjU0NTc4NjI1Nzc0NTIzMTMyOTo6Nzc4Njk5ODc2NjY2ODk3
-NzY3OTo5OTk4Nzk5Ojg2NTUzMjQ2NzQ0ODo7Nzg7Ozk6ODg3ODo3NzU3ODYzNTk7
-Ozw6OTo7Ojo6ODc6Ojk5OTg3Ojk7OTs7OTc4PTs4Nzg+PDg4Ojs5NTU3NjU1Nzc4
-OTc0MzU2NTU2NjU3OTg6Nzc3PTo5ODY2Nzg7NTc5Njc4ODU5PDk1Nzo3OjY2Nzk1
-ODk7Ojg0Njc1ODg5Ojs5OTk3NzY6PTc4Nzg3ODc4Ozk3ODg4ODo4Njc3Nzc0NTU3
-Ozg3ODYyMzU3Njg2NTg2NDY2NDU3ODk4OTY1ODo5NjU2Njc6ODYzMzMzNTQxNzc2
-NjM3NjU1NDg7NTk3NjU3Ojc0NTMzMjM0MzM2ODY0NDc2NTU4ODU3OTMzMjEzMzIy
-MzY1NTU1Nzg6Ozc1NDEyNTQ2OTc7Tmt1aU5CSGVvZ1A8Pz47Ozk+PT89PD08ODs5
-OTk5PDs4NTg1Mzc1NTU2OTk8PUJCQkFDQ0NCQT4+Pzs9QEJDQ0FBQ0NAPTw8Oz07
-Ozc1NDc5NjY3Nzc6Ojk4OTk6ODk5Ojs5OTc1ODs6PUBAQEJCR0lJSElMUFJPU1hb
-XWFoaW5ydXh6foCCgoOCg4WFh4eKioiLjIqMiYiEgHlqV0tKV2RiYXWNmZyVjY2V
-nKKbiXduepGenZSEgoqSnp+clImLkZeWkYqFhXVkZGtudHmBh4WEhoeHh4R3aWVm
-Z2dqcXZ8gIeGipGSkpSWlI6If3VtaXB5e4KFiYmPjpCUlZOQkJGPjpGPg3hqW1NT
-X2hzfISHi5CSkZCRkI+MioqIh4mFfXZzc3J1eXp/hYiJjY2NkJCRjImKioh9e4WQ
-lY2Ff4KLlZaTiH13gYySjoV7d4CNko2GeHWHjJCFcWdxg5CWkn9nX26Cjot+a2Zm
-dHd1amJeZHF2alxYZXNyYU1KXmVRRlVcSkJARE1kcWlYSUVHS1VaVUtFTVZeYVdJ
-TVVUTERAP0JJT09LQD09PEBHV2ZtYkxBPDs7NzU3OTs+PTw8PDo9PEdPWmNodIqS
-j3JPR0hFQTs7Ojs8QUVBQENCQkVCPT0/Pjs6PDw+PDo8PEA7PT4+Pj49Pj1CPzs/
-Ozo8Oz06ODs8OTw9QEFFQkA/Pjw+PDtBQ0VGR0VCREFBR0RERkNCRUdHQ0JDRERD
-QkdHRUhFQj9ERUFBRUNDR0hDQUNCQ0NDQT5CREZHR0VERUREQ0RDQUZHRUVJR0dE
-RERERERFQ0VFREJGRkRER0ZGRkhERUZMVHSxws7W29/i5ebn6OpNS0dKSk5LTUlD
-QkNNTUlGRkRFREhHQ0dHP0FBQkFBP0FCQEE9PkA8QEA+Nz09QUhEPzw6ODg6Ozs8
-MzMzMzg5NTYzNjk0ODgzNjM0NTU5Nzg3NDY2NTI0Njs4NzU2Ozc2NDY9Ozo6NjU1
-NjY4NDI1NTg5Ojk5ODo8P0JBPTo8PTo4Ozw7Ojc2Njc0Ojo3Njk1NTY2NTY2NjQ4
-Ozs2Njk3NzU3NzU0Njo4Njc5ODU0Njc5Nzg4NDE0ODQzNDQ1NDQ0ODc1PDY1NTY2
-NTQ0MzE2NjU1NTk6Nzc3ODk4Njk5Njc4ODg1NDc1Nzc2ODc5ODY3NjMzNjU3Nzc2
-NjM1Nzc6Ozo3Nzo3Ojo4Nzk5Ojo5OTo7Ojk6OTg3OTk3OTw7OTo6Ojg6OTk7Ozs5
-OD08Ojk3ODY7OTg6Ojs3Nzc2Nzc3OTk3NjQ1NzU1Pjc6PTs5PDw4OjU4ODw4Njc3
-NTU5ODg3ODg4ODc2OTg4Nzk5ODY3ODc3ODQ0NDg3Njc6Pjw+PTw7Ojg3NTczNTk9
-PDg2OTc4OTo3ODg2OUE5Ojg3NjU0NDc5ODc3ODU2NjM3OTc3ODYzNDU2NTY1MzY4
-Njg3Nzo3NjY3Ojw5ODg0NDM1ODs1NzU1NzQ1NDU4Nzc2NjU0NTY1NzU2OTgzMzM1
-NTM4NjU1Nzc3MzQ0MzQ0NjM1MzUzNjczODg1NDY2Nzk7ODY0MzEwMDU6NzlMY3Fs
-U0dOZnBmUD87PD07OTg8Pj8+Pz4+PDw4Nzc2Nzc1NjY2NDQyMzQ4OD1AQUNERD49
-Pz9AQkA9QEZEREJBPz89QkI6Ozw8PD08PTkyNTQ2NjY1Njg2Nzc4ODg3Njc1ODs6
-Ojg5ODs6Pz8/QUFDR0VJSUtOUldWW1thZGtydHZ6fICDgIKCgoCChYeGh4eIiYiI
-iYiJh4OBeW5cSkdRWVZZb4iQjoeDjJyoqJ6Ne3J9k6Cej39+kJmakYuFiJafnZqI
-eHR0ZlpaZG5xeHuBhYSJiYmHhHxuYmBhZWxucXV7f4SIjo6Sk5OPjIF8cm9vcnd5
-foiJiYyLkZOVlZKPj46Qj4h7bV5UU1ZgbXV8hYaMjZKTlI+OjIeIiIaEgHp7dnBs
-b3BzdXp9hoqNjIuJjI6OjpCKgX2CjZSOhnZ0gIyVlpCJgYeNk5OGdnR7hI2SioOA
-iZKYkoJwY21+jJCLfHFyeYCAeHJzeH14bGBiZWtubWZgY2VsbGhfVFZbXEtEUlhK
-QkE/SmBua2FKPj1BTVZZVFBLUV5fWkxGSlFNQ0A+QEZLTU1JRUE+QkVPWWhlWkg8
-Ojk5OTk5Ojs8QD89Pj4/QUhXXVtgfo+LdVNGQkI/PEJDQ0JFREVGR0VGR0dGQj47
-Pj0/QEA9QD8+PEA+QD07PkBEQTw+QDw7Pjw6PDs7OTo5PD9CREQ+QD88Ozw6Oz5A
-QkhLSURFQ0JEQ0RFRUZCRUhGREFCP0FBQUNFQ0I/PkJAQkJFREVDREVEQUFBRERC
-QkVFRkhGREJFR0dGRUpGQ0dER0dHRUdFRkZFREZMRkdCQkREQ0VHSUdKTEVBRFhk
-gbDEztbb3+Hk5ujp6k5MSUpMT0tKS0pMRUdHSEhGREJDQUFERkZGRkFAR0ZFPTk8
-QUA9PDo5Oj47Ozo6PTs5Ojk4Ojc5Ojs1NDU2Ojw6Ojs5NjU1MjU0Njw5ODQ3Nzc5
-NzY3NjY2OTg7Pjw8ODc3Nzk3ODg5NzY0NTQ0ODs3Njg6PDY5ODg8Rjs6OTY2Nzk6
-PDk1ODc2OTk3ODs3NTc4OTY2OjU3OTc5Nzg6NDY2NjQ1NDQ3NjU1NTU3Njk2NjU5
-OTc0NDM0MjY1NTQ0NDU2NjU3ODg4OjQzNjU6NTI0Njc2NTY3OTw3NzQ6Ojk2OTg2
-OTc3OTY3Njg3OTk5OTc2MzQyNjc4NTY4PTg4ODY4ODc5Ozw7ODg5ODY4ODo4Ozk6
-ODc5OTc3Nzc4OTw6Ojo5Ozs6ODo7ODs7Ozs7OTo3NDc3NTc5ODY2NTQ1NDY3Ojs8
-Ozk3Njc6Ojs5OTw8Ozk3OTg3Njg5NDc2NTc5Nzc4OTc2Njg3ODk8PDo4NzY5NzY2
-ODg4OTs3NT08PUZGODc2NzQ3ODs5OTg3QTw4Nzg4OTs6ODg4Nzk6ODY0NTUzODo6
-NjQ1NTYyMDY5ODg4NjY4NTY3Nzc2NzQzNjY2Nzg1NTk3Nzo4ODo3Nzo7PDg1NTU1
-NzY0NDY3ODg6Njc5ODU5NzU1NzY6MjM1NDUyMzMzNTY2MzM5NTIyNDc3NjI0NTk3
-ODY1NDQ5ODk3Njg1MjE1NjU2N0BecHFaSlBoc21TOzo6Ozk4Oz9AQj9APT1AOzo6
-Ojs3Ojc1Njc1MzMyNDQ2Oj0/QENFREBAP0BCQ0FBQ0hIRUdBREdDQDs9PDo6Ozo6
-Ozo3NDU4NjY3NzY4ODs4NjU2ODg2OTk7PTo7Ozw8Pj9DRUVDRUdKTVBVVltgZWps
-cXd7fX+Af399fn6Bg4WHhoWIh4eIhIiHiIqHhIR9bFxOTFBTUVZufX96b3SGmqKj
-mo6Hho+WmJiJe3uDi4d+dXmInqeonYduaWhfWFleZWlteHqAg4SIh4iBeGdbX2Nm
-anBycnh+hIiLjo+PjoqCfnJta3B3fH6ChIeJiYuQkZCOj4+PjoyEfnFmWVVSWml1
-gIOFjJCSkZOUk4+IhoWEgX9/enRsZmVlaW90eHt+hoiKiYqLioyLi4eChpOZmZSE
-en+JkpOOhHx8ipSVkYFxbXuLjot9dnaDkZeRhHl0eISJhHtydoGGgnJlYnKAhoBq
-V1ZmeHpqV11kb29jWlpkaWBSS1JWUkU7PUJFU2hoW01DPEJOWmNhVk1NVFpVS0ND
-R01QQj9EQkVFR0xKSkxLSEtOU1xiV0pDOzk7Oj08OTs9P0ZFQ0JARlJbW1h1i4h4
-V0RJQ0E+QUZGR0VKTk9PUlRTUktJR0dCQkBAQkRCPkBAQUE/QURBQT88Oz86Ojs8
-OTtAPzk6Ozw8PD5APkE9QUBBQTw9PT8+REVER0RDRkRBQ0NDQEFDRkhFR0ZDQURG
-RUJEQkBBQUFBQkZGQ0NESUZFQz9CRERGRkVGRUVGRkhJREdIRkdDQUNGR0VESERD
-Q0dGSUZEQkVDRENDQUNERERBREVGT26MrcTN1drf4uTm6OnpUk5MSUxPSkhISExI
-RUhHQ0dEQ0NFQz9DRkVHR0RDQEBDQkBDPjo/PEJDQTs6PDo6Nzs9QT08OTU3Njg4
-ODo4Njc6ODY2ODY3NDczNDc1NDg5OTY3NjY2Mzg2Njg3ODk5ODYzMzM0Nzk3Njc2
-Njg2NTU4Ozs4ODg7PTw8Ozk6OTY2Ojc5ODg0NTg2Nzc6Ozs6Nzg2PDs5NjY3OTk7
-OTk3NzY0MzU3ODY0NjY2NTI0Nzc4NTc4Ozg4ODk0NTo2ODg4ODg4NDY1NDU2NDQ0
-NjY3NjY2ODk3NTc1NzY0NjY4NzY3ODY5OTs4NTQ2NzY1NjczNDc4NjU4NTY4Nzg5
-Pzo2OTU4Ozg5Nzs7OTg5PDs4OTo5ODc4ODU2NTM2Ojs7PDk5Ozw7OTc3ODY7PT08
-OTs5NzY4ODY2NzUyMzQ3NTc2Nzc2Njg3NTk4OTY5OTc5Ojc9Nzs3NjY3ODw7PDg7
-OjY4ODY3Nzk7ODc3Oj85OTU6NzU7Ozk4NzU4Ojc6OTY2OTw6OTg6PDg6QDw5ODg6
-PTc3OTs4OTc1ODw5ODc2OTY2ODY4Ojc4Nzc6Nzc1NTU2NzU1MzIzNDY2NzY1NDQ0
-Nzc4ODc3ODg4ODY4PTw5Nzc6NzQ1Nzc0NDYyMzk4NzU3NjY1NjU4NjgzMzMzMTEx
-Njo7NDMzNzU3NjU0NTI2NTc3NzE0Nzg4OTU1Nzk7OTc5NTQ1NjQ6ODM1PFJqcWVV
-XG57clZDOjg4OkBBQT0+PTw8Pz88PD08Nzc4ODY4NjQ0NDM2OTc4PUFFRkVDPz5B
-QkFAQEFDREJDQkVJQz9BQkFAOTo4ODc4NDM4OzY5NjUyNDY1OTk3ODo6Ojc3OTY5
-Nzk6Ozw6PkJERERGS05TWFteYmlsb3V4e319foGBgIJ+fn+ChoeHiomIh4eHhYaG
-iIeGgXxyYlBHTFNSWW97fWpjbYGPk5ONi46VnJ2UiHxwbHNtZF1jcYaZqKueiXVr
-aF1UWFpdZGtxdnx8gIGCgnhnXlpcY2dscXV4fYKEh4uOj5CKhn92bWduc3Z9hYWH
-iI2MjpCNjIyLiYyKin5zZFZRTlVib3yGi4qNkJSVlZaWkY+LhoZ+e314bmheW11h
-am1yeH6DhYeKiomIiIeBfHyGk5WRiIaEjJibl41+dHuJk4+LgHR3f4iKjIJ3doCK
-jYZ6d3yHj4x+bGNvhI6FcFxZa3+IgG1bVWl9f2lQUGR0bllLUmVyZEtKV1pPQzxD
-TEZHUVlXUEhDRU1bZWheT0hKT1NPRUBCR01IQEE/PD49RUpSWVlTTEVDS1pjYFJF
-PD07Oj09PD4+QkFBQEBDTVdbYneGg3NVSElJSURFRUdKSk9TV1pbXF1ZUlFRT0hG
-R0ZGRUVGPkFFREhIRENDQkE8ODg8PD0+Pjw8PTs7PT4/QkE7Oj8/QT89Pjs9Oz9F
-RkRBQURHSEVFR0VERkRGR0dIR0dGQkNGQj9BQD9BQ0VFQ0NHRENLSUNERENHRUhI
-RURDRUVFRklGREZCQkNDRElHRkRER0ZDRERER0ZDQD1CRUREREFDSERBSUREaYer
-xc3V3N/i5efo6utRUE5MSU1IRkhIRkVERERHREtFRkpBQ0BDPURMRUE+QT9APj48
-PTxAQ0A+Pz08Oz0+Ozs8Pzw+Ozk5OTM3NDk4ODU5Nzc1Nzk1NTU3MzU3NzU1NTg1
-Nzo4NjU2MjQ1NjY3Nzg1NjY4Nzc2NTk3Nzk2NzQ0ODg5ODg6Ojo6OjY2ODc5ODc3
-OTc3OTo2Njc8OTs4NTM4NTc7Ojk4ODg6OTs6ODg2Nzc4Njc1NjUzNjo3Mzc0NTQ3
-ODY5NjY4ODg5NjQ2NzY8Ozo3NjQ2NDU3NTU4OTg4PUA3QDI2NjY4Njc1ODc4NzY3
-NzYzMzY4NDM0NDU2NDc5ODk4Njc1NTo6OTY3ODo5ODc2NjY4ODg4PT46ODg6Ojk5
-Ojg7Nzk7Ojw8PDs7ODc5ODk4ODk6Ojo4PDk5Ozs5NzY2Njs3OTc1Nzg5OTc3NTg5
-OTs7Oz05NzY5Ojs/Oz06ODU3Ojk4PDk3OTo5OTg7ODc4Nzs8OT07ODc9Ojs7OTg6
-Rjs9Pzw4OTc6Nzo9Ojw9Ozk7OTY0NTQ2ODo2Njg3Njc1NzY2Njg4NTQ3ODk2Njk6
-Nzc2OTg2NjY1MzE0MzQ1NDY0NTU1NjY3Ojg3ODU6OTc2NTY4Ojk2ODo5NjU0NzY0
-MjQzNDc5NzY7ODU3Njc3NzUyMzQzMzY1NjY4Njc3NjU0MzM1ODk4MzI0NDM0MzU3
-OTc3OTg4NDUzNDU3NDM3ODg5RFllY2V1iZGEaEpDQDw6PUFAQT48Ojg7QEA8PDw5
-ODk6ODUzMTEzNTU4OT0/Q0dHRkE+P0NGREJERENAPjw/Pj1AQT1CPTw6OTk4OTU4
-OTczNDY1Nzc1Nzo2Ojo6Ojw7Ojs6ODc3Nzw+QUJBQERJSk1OWF1hZWhscXd2eXt9
-gYGCgX59fHuCg4aHiYmJioiGh4aJhoeGiIWFfXJlVElKVVpkdnx1Z2d6h42Lfnh/
-iZmko5Z9bGdmZ19TUVhmd4uUmZSLhH51ZVRRV1thaW5yeXx9f4J8cGNdWl9lanB1
-d3p9gYaIjI6SkIyDe3BpaXB2e4KIi4mLjo6Pj42OkIuMi4qAdWpYS0hLWmdyfYWJ
-kZOWmJqYl5OSjYuHhIJ7dXBpXVdUWV9jaG93fIGCgoeGioyLiYJ9g4+WlYl5cn+N
-lJaTin+DiI+Ph3p0eH6IiYJ4dnmElJKLd2drfYiVkYRtY2+CiYJzYF5reXt0aF5i
-bnV1ZlZTZXNsV0pSZHBgSUdfZk4+P0ZNRkBDTlZVU09MTFNeYlxPRT5DTFBLREBJ
-VFhLQ0E7Pjs8SFRbXl1WSUJHTmBwa1REPT06PDw6Ozk3OkBAQD5GUl9jbXJxZFFM
-S0lFQkFHTExRWFpbXV9iX1tWWldPSkpIREJFRENEQ0NBQUhKSUdEPTo4Ozo+QD8/
-ODw+Pjw7OTw/RUE8PDw8Ojs9QT9BQ0ZIQkNCQkREQ0JCRENDQ0NHRklHRkdFRUVE
-R0dEQD1EQ0NEQkdEQ0JDQ0VEQ0NEQ0VLRkRCRkZFRURCQkREQUJDSEhGSkNERUZF
-SEZDRkVFQkNDQEFEQkNDRUVGQT9wjKzGzdbb4OPm5+fp6kxNTEtLS0pIR0lJTENK
-RkVGQ0hGREVCQkdDP0NEPUJCREI/OTlAQkFER0I7ODo+Pjo7PDw/Pzs5Ojw8PzY3
-OTg6OTc2Nzc5PDg4PDg1Njc3OTU3Nzk1ODw6NzQ2NjQzNzo4NzU0NDU3ODc4ODU1
-NTQ4NjY3Ojo8Ojw5ODhDODc6ODc3NzU2Njg1Njo5OT45OTs6NjY1NDc1Nzc4NTY3
-OT08Ojc3NjY3NjU4PDY2NDQzMzIzNDc2OTQ1Nzk5Nzc0MzY0NzcyNDY2NDY1ODo1
-Njk2NDU1NTk9NzU4ODg1Njc3Ozg0NTU2NTY5Nzc1NDQzNTk0NTU2NjY2Nzc1Nzg2
-NTY2Njc5OTw1NTU1Nzk4OjY5Ojg1Njc6OzU5Ojo5Nz09Pjo5ODY5Ozk4Ojg5Ojw5
-PTs7NzY2MzY4ODY2NTo5Nzs3PDQ3OTs3OTk6ODk7Ojg6PT07Ozw8QDo7PTk5OTk7
-ODc+Ozc4OTg3OTw5Ozo4Ojs9Ozk8ODc6Ozc4ODg4ODg6Oz06Ojs9Ozk2ODc1NjU3
-NzY4Njc2NTg5ODg3OTc2Nzc1MzU2Ojw8Ozw7OTo6NjY0NjY3Nzc2NjM3NjQ1NjY0
-Nzg5ODc4ODc1NzY3Njc3NTg2Njc2NTY0NTg1NTY1Nzc5PTc4ODU0NTY0MzU1NTg2
-Nzg3Njg2NjQ2MjQ3NzU3NjY4NjE2NTk4NTU6NjM1NTQzNTg6ODU4ODg9R1RbZoGW
-m5N6UkE9PkE+Pj0+Ozo5Njk7Oz5AOzo7Nzg4NDI2NjU2NTQ1ODxCQ0ZFRENAPEBE
-QkRBQkBBQD08PkE/QDw8QDg3NzQ6ODU1NTU2NTM1NjU4OTxAPD09PURCQT0+Pj07
-QEBAQ0ZKSktOUlxncnZxcnN1eHp7f359gYKBgH16foCDhoeJiomJiIqKhomHh4eJ
-hoOAeGhYSUdOVWJ2fXlvdYWQkINuZHCFl52YjX1qa3JrW09PUlddand+gYaLkIlx
-Vk1TXmFna3ByeHx+goBqWVphZGlwdnl8e4CFhYuMjYqKg31xZmNrc3yAhYuMj46R
-lJGSko+Oio6KgXdqWk1ESE1ea3V+hoqQlpeWmJiVk5CMiIeAeW9qY15XUFBRWF9n
-aXF6fYGDh4iGh4mDgIWOmJiSh3h0f4eMiH12gIyWl5CCa2VyhpCRhHJocIOUlo18
-bGx5hoqHfHV1eH56d3Frb3h4bmZlbG9ybWZhXl9jaWJVUlhiZ1xKS1lfTD87QE1I
-P0BHVWRnYVNMSE5SVU9FQj1FU1NOR0hUW1dPRT08PUFDS1ZaV1ZSTURFVWhza05C
-PT07Ojk5OTw9P0JGQkJKTlJVWVpUUE1LRkFBR0tTV1pdX2FjZWVcWVxeW1ROTkdC
-QEFDRUJCRUZIR01NSEA8ODs8PjtBPj5BPTw9PEBAO0JAPjo5Ojw+Ozo9Pz9DQUNI
-REBFQ0BARENBQkJBQ0VHRENBQEFCQUFBQUJDQUJDP0NFRUZEREJEREZDQUNERkdH
-SEZEQkE/Q0JERUVFRENFSEdJRkZGR0ZCQ0RDREdEQkBAREVFQkFESElKSnGRsMXO
-1dzg4+Xn6OjpSk5PT0tMTUxJR0hHREhLSUJCRUZFRkNFRENERUQ8Q0BNPkFFQD9B
-REFFPTg7Ozo6Ozs4OTg5OTY4Nzg3NzczNDg3NDg4Ojo6NzU5NTU0MzQ1ODo6ODc5
-PDg4NjUyMzU0OTo7Nzk0NjM2ODIzNDk2Njc3Nzg3Ojs7OTk4ODk8Ozw5OTc2OTY3
-Njc4ODk3NzM0Njg7OTo5NjY3NjY4NzQ4Oj84Ozg5NDU0ODw0Nzc0NTQ3ODIzNjg3
-Njc1Nzc3PDY4NTUzMjYzNjQ2NTYzNTc3NTg3OTo3Nzg9OTc3OTc2NjY1ODc2ODo2
-Nzk5Nzc4NjY1NDM2OTQ0NTc3Ojg3OjY2NTg5Nzg6Nzs5Nzk6OTc2OTY2NTU2NjY3
-NzRAOTk5ODg7PDk6Ojg5ODw6Nzo5Ojk4OTg4NzY5OTg3Nzk9Ozc2MjY4ODo4Njc2
-Nzc4ODc5NzY2Njk6NTU4OTc3Nzc5OTo8Pzo6PD07Nzo5Nzc5ODc4OTk6Ojo8Nzg5
-OTg3NzY2Njc6PDs7Ojk6Njc3Nzg5NDc2ODg5Nzk6OTk5NDU2Oj44NDc4Njc3OTg8
-Ojc2Nzo3NjU1ODU4NzY2Nzc3ODQ1NzQ1NTc0NzY2ODY4Njk4OTc2Njc3NDQyNTY1
-NjU2NjM1Njk3Nzg3Nzc1NjMvNDM0Nzg4Nzk6NzYzNTY4NzU1ODU5NTM2Nzc1NTdA
-NzQ2OTc0NjU0NTYzNjY6Ojo9RkZRboqZjnRSQT88PT4+PTw8PDk8Ozg5Ozs9OzY4
-OTQ1NTc4NTY3Njg+PENIRUZHRD0+QkNAP0JEQ0JBP0FDREI/QT88Ojo5Ojg2Nzg1
-NDY2NDQ3Nzk8QkNIREVGR0ZCQ0RBREhHR0hKSVBTVVdbaXiFgnp2eXx8e3x8fn1+
-fn+CgHx9goSHiYqLi4yLiYaIiIeGhoaGgoB5aVZGQEVNYXR2cnOFjpOTh3Jueo2X
-mJWMgXVxb2ZRRUZKTUtLU2BugZOcmIFdSE1WX2RpaGtyf4R/b1xWVl9ma3N5fH6A
-goeHiYyNioZ/cGZgYmx3gYqKjYyOjo+PkJCQkY+Liod7a1pNREJIU2NxfIKGjZGU
-kpWUl5SUkI+Lhnlza2JbUlBQSk1WXGNobXd+goeJi4mIh4SEhY+UlJCEf4WTlJOE
-c2l2iZOUjX1wbXWBiYiBdnJ8iIyLhH9/gIeIgHdyd4SKgW9iY3OChnxhU1tue3hj
-VFZjbGZdWVleY2FXUEtOUk9KQj09RkhCP0JTaGxnVkhDQ0dMTkpFQkZSXVtRSUxV
-WE5IR0VFRkdJSkpPVFdXVlNQWGtxX0M6NzY2Njo4ODk7PEJCQ0VISU5NUVBSUU1K
-RUlOWF9jYmNmaGhiXl5jYl9cWlZSTklHRERBPEJDRUNHSUxLQz46O0I/PDs7OTw8
-OTs1OUA9PkNBOjY3ODg8Ojo8PT9BQkNBRERFQT9BQUJAREA9QD9ARENFRERGREND
-Q0FAQkNBQkFEPkNFQkVFREVIQUJDREZER0VCQkNBQkNFRUZEQ0JFRkpIRkhDRURD
-Q0NBREVDQEI+Q0ZERUhIR0hHXIqyxc/X3ODj5efp6epPSkpOTE1OUEdISEFJSENB
-SEtEQEFESEZGQkVEREhDQEJBQT1BREBAQTw5Ojs4Ozo8Ojg5NTc2NjY3Njg9NzM0
-NDQ1NTUyNTU0ODY6OTo4NjQzNDQ2OTY1NjU3NzYzNjg3ODk3NTU0NTU3OTc3Njk4
-OTk5ODk2MzY4Ozo6OTo6ODs6ODg5OTU7Ozc5Ojk2OjY2Nzg4NzY3OTo4ODc4NzY1
-ODs2NjU2NTY0NjYzNzY2OTgzMzM3NjY3NTg7NTc1NDg2OTc2Nzg1NTU2NTQzNjY1
-NTM1NjIyNTo6Ojo6ODc0NTc3NzY1Njk1Nzg5Nzg2NzU1NDc3NDg5NTc2NzU1NDc5
-ODY2NjY2Njg7Ojw9Ozg3OTk5Nzc4PDo7O0I6ODg4Njk3ODY2Nzk5Oj47Ozk4OTc4
-NzU1ODk5NDU3Nzg5NjUzNDQ1OTQ1Njk6Ojk4OTg5Njc3Nzk6Ojg8Ozo5NjY3Ojs4
-Nzk3NzY2ODk5PD84ODU7Nzk5Ojs9ODk6ODc4OTo8PTs6Ojg6Ojs5ODk6OTc4NDc2
-ODg4NjY3NjU1NjU1Nzg2Nzk3Ojg0Njg2OTg2NzU2OTk3NTY5Njc2NjQ3NzY0NTUz
-NTY4NDU3Nzc2NTU3NjY4ODU2NjU1MzU3PDs1ODU3NTg4NTc0MzM0NTg3ODg4Ojw8
-Ozo4NDUzNTQ1MzU1NDUzMTU1MjIzNDo3ODg4NTk0NjQ0Nzg4ODs4Njc3PT5Rb399
-bVRDOzg6ODo+QEA7Oz09Pzw7Ojg5Ozk3NDQ2Njg6NjMzNzs9P0FCQkVEQURDQD9A
-QUNCREE9PEA9PkFDQj06PDg2NTU2Nzg3NTU1NjpAODxCRkhMTlBOS0pHSEdMTE5O
-T1ddXF9gY2dteH5/fXt8fH59fXt7e318e36Bfn6BgYOBiIuJiIiEhIOGh4eHhoWG
-fndsXkg9PkhbaWtqdoeWmpOFgYSRlpeSkJCNgnFoWUtERElMSUVITGJ8jpqci2ZJ
-SU5TXGJlaGtvfHRhUE9XX2x0dn2AhYeEh4qMjY6OhHtuZWBibXiAhouOj5ORj4+T
-kI6Rj4uCfHJeTkRCP0hVY3J8goeKjpGUlJOSkZCQjIuDd2ZcVVBKRERJUFFWXGZw
-eH+GiouLi4eHhoaLjo6IfHd+jpyclIZzbHeFjY6HfHh5f4J+d3Z6f4mPjYF1cXiK
-ko+BbGNrg4yFcF1hdoqLfWJQV218d2FSU2VzZlJQW2trW0tJUlhSRkRHQj1BRkY/
-QE5faGVURj46PkdPV1VMSE9eYlhTSUlOTUhJS1NSS0VBQURNWWVpZ11VVl5lV0Q8
-Ojk2ODk5Ozs7PkRER0pKT1RVVFRVTU1MUF1iZWpnZmVnZGBgY2VhY2NmZmRjWldM
-RkRCRERGREZJTUpDQkA8PDg3ODk8OTg/Ojk8Ozo8QD04ODg5PDk8Ozs7PT9BQkFB
-PUFGRT9AQkREQ0NEREVDRkhERUVFREVFQT5AQkRDQkRHRkZGR0dGQ0ZGQ0FDRERI
-R0dFRUJCQD9CRERERkNFQ0VERUVBQ0BCQ0RFRkFCREZHQkVGRUZGSkxHZ67Fz9jd
-4eTm5+np6ktLTU9PSk1NTUpMSkRDQT5AQj9DRUZER0NAQkI/PTo7Ozs9PDo9PT8+
-Ojo/PD06PUBCPTk6Ojo5Nzs5NjY7ODg6QTs4NTU2NjY2OTs4ODgyNDgzMjk2NTU2
-NTg4ODg4NzY2ODU1NTQzNTU4NTc2Nzg5ODk3OTU3Njc6OTw8PDk6OTk2ODg7OzY2
-Nzc4PTk6OjY1NTU3NzU4PTo2NDM3NzY1NTUzNDU1MzQ2NTk0NTU1NjY0MTAzNDY2
-NzU0NjU3Njc0NDQ5NTIyMzQ2NjUzNTU3PzU0MzU3NTM0PTs1NDg5NDI3NzU1ODc3
-ODc3Nzg1NDo5Nzg6OTg8Njg2NTc7ODg5Ozg2NDU6OTw9PTg5Ojk4Nzg4OTo2Nzk3
-ODc2OTk5ODU3OTo3ODg5Pj04ODk5ODg3ODczNTU1MzQ1Nzc6OTg2NTU2NzU2MzU5
-Nzg4NjM1Nzo4NTU6ODo4OD05ODo5OTY7OTY0OTc3ODY4Ojk3ODU2ODk6Ozs8Ojw3
-Ozk2ODc2OTk6PDs3ODk5Ojk7Oj07Nzc2NTU3Nzk6ODs2NDU1Njc1NTc2ODc2Njc0
-Njg5ODc5Nzo7Ojg0NDY0NTM0Nzg4Nzg4Nzc6NjY1NTc3Nzg5Nzk2Mjc3OTc2Njc5
-Njc4Nzw7NjY4NTU2OTUzNDY5OTo8QDw9OjYyMDIzMjE1NTMzMjIzMjIzNDQ4OTs4
-OTc1NDY1MjQ0NTQ4Njg4Njg4OkVZbndyZ1VJPzo7PkFBQEJBPD49PDs7Ozc4Ozk3
-NDM0NjQ6MzU3Ojg7PkBFSUVDRD9BP0JDRUhHRUA7PD09Ojw7OTo5ODc9OTUyNDQ2
-ODk9Qz07QENHSk9UV1hTUFBSVVZVWFlaX2VnY2Voa3F4fH6AgICBgX16dnZ2dnh7
-fn1+gYCChIaIiYeJiYmHhoWFhYaGhYF/d2lmRDs8QUtaWFdngZCTiYKHmKCinpmT
-l5eJc2RYTEdGSElDPj9FUmyCkZKFa1FHS1JXX2VgXmZsZVRLTVdhaXB2fH+DhoeI
-io+OjYmDdmpeXml1foCJi42Mj5GTk5WTkpCNiX9zZE9DPz5BSVtreICEiouNj5SX
-lZSQkZGNhXhvXE5GRkVEQkFHSVFXZG93gYaIjY2MjYmFiJCUkoNyanWEkZWMgX18
-hYuJgHp2e4iPi4FwZW6BjZCOgGtoc4aRkoZvYWh6hYN4Y2d4g4N2ZFddaXNuXU1T
-ZG9mUFZicGlWRkpZYE9ARUxDPDxDSUlGR09aWFJIQzs+R1VfXFJJSFBXVExBP0dM
-S05XW1pTS0ZDQkBRZm5xbWJOS1hiUD05OTs5NTc6Ojo+Q0RESFBQUlVXU1BRVV5i
-YmZpaGlpa2ZhYl5hZ2xvdHV0dXFsZGBVUEpFRUdERUxMTkdDPDw7ODg3OTc6ODo4
-ODs9PDs5OD0+PTw9Ojk/QUE/PkBCQ0FBQENEPz9DQ0NCR0RER0VEQ0JEQkVDQkRD
-QUNJRENBQ0ZHRUZISUdERUdEQ0RDRkZFRUZFRkFFREFEQkNEQkJDRERIRkVERENH
-REVDQkVGQkRGUEZFQ0RHQ0VmqcTP2Nzh5eXo6enrSk5NU09PT01PTk9NTENGSEND
-RENHRUdGQj5BQkA+QD49ODtAP0BBPj9AQjw4OTo8Oz5BOzw7Pjw4NTY7OTk5NjU1
-NDY3Njc2NzY4Njc4NjY3NjU4ODU5Njc6OTk6OTY0NDY0Njc2NTc2NDY4NzU1ODk3
-NDY0OTo3Nzc4OTk2Nzo2NDU2ODg5NjQ2NjU3Ojg4OTUzMzY3OTg4Njg7OTg2NzY0
-NDMxNjY5ODg7NjU0NTU0NDQ1NjUzOjg1ODo2ODc2NjM4ODYzNTY0NTQ1NjY0NTc8
-MzU5OTc2NTQ3PTY2NDk0NTY4OjY1NjY4ODY3Ojc1PjY1Njg7OTc1Njg2ODc3NjU4
-NTY1OTs8OTk6ODg1NTg5OT48Ojk7Ozo1NTY3ODk4Ozc8OTc5ODs4OTg4ODM4NjY2
-NjU3Njk0NTQxNTo5Nzg1NTY3Njc3ODc3MzU4Nzg1NDQ1Ojk3ODk8NzY5NzU1Njs4
-ODY2NzU5NzY2ODY3Ojo4OTg6ODg5Ojg2Njc3OTs6Ojo9PDo3OTo7Ojo+ODs1NDY4
-OTs4ODg5OTg1NDI3NjU2ODY3ODc2NzczNTY3ODQ1ODk5ODc5OTg2NzY3ODc7OTo5
-Nzk3NTY3NjY5OjY1NDU1Nz86OTc4NTMvMjU1Njk4ODk5OTU6ODc0NTY5P0A9Ozo4
-NjMwMy8xMjM1NC8uLy0wMTIyNDg4Nzs4NDI1ODY0NTYzNjo6NzQ0NjY5RVZmeIF8
-cFo/PDs/P0FAQEJAPT9APEA6Nzg5NjU3MjIxMzc4NDU6Ozw+RElMSENAPUJAP0BB
-Q0NDQD49Pzw5Ojk5Ozw7ODY2OTc4NzY6OkA9O0NISU9UWF5fXVdZXWBbW1tcYWJl
-Z2xsbW9ydnt+gIOGg4SCfHhwcHF1dXl7fYGCg4SGh4eJiYqJiYqJh4uLiIiGgn11
-aFdIPj0/S1JRS153gnxwcYOSoqWhl5Sal4VvZmJXS0VHRT88OkBHV2h3gINuWktI
-TVlnaFtTW2JcTkhNVWVqcHZ6fYCDiImLjo2MiIBwY1pfa3R7gYWLjY6SlZOSk5OR
-kIqKgm5ZRz08QEJPXm94f4eLjpKPk5GSk5KPiYmDeGFOREE/QkE8PUFHTVdhanV+
-hYuMjY6LhoKIlJmThHlzd4GIhXx3fYeOkol4a2l6jZWQhHJla3uHjId5cG10f4eG
-f3Nyd3t7dG5uc32AdWlnY2drZmFZWF5jZl5OT19rYlBGTV5hTkJIT0g7Oz9LUUlF
-QkhQUk9JRENIU19hW0lCQUtST0c9QElSUFNXWFFOSkNGSU5XZmpsZVdNTV1mSzo7
-Ojk4NTU5PTs+R0dFT1laV1ZWVFhma2xqamptbG5pY2BgZmdven9+fXp5eHVxbWtl
-WlNMTEdGSUlKSEI8Pjo+Pzw3Nzg7PTc3OTs9OkFBQUA8Qj8+Oj5DQUBBRUZEPz9A
-Q0RGRURCRUVBREVFQkNDREdGREJBQEFDP0BCQUNCREdGRkRGRUVFRkZCREVEQ0FD
-QkFFREFBREJDRkVERUNBQkJDRUREQUA/QT9DP0FBRENEQEJFRUlGSWKrw8/X3eDk
-5ufq6upOS0hGSk1NTklMS05LSUVJRUdIRUNEQDw9QUQ+PEE/PT49Pjs+PkI/P0JB
-PT0+Ojo7OT4+PDw7Ozg4Oz1BPTo7Ozg1NTg7PDY3ODk4NjY1ODg0MzQzNjQ3NzUz
-NDQ2NTQzNTc2Njc2NDQ0NDM1NjU2NTc6Ojo2ODc3Njg3NjU2NzU2NDY1NzU1ODg1
-MzY1Njc3ODY0Mzs4NzY1NTY5NjY1NjM1Njk1NT44NjY1NDc2NDM1NDY2NDU2Nzc3
-ODc2NjU3NjQzNDM0MzM1MjQzMzY0NTU1PDs3NjY1NTc5OjY3NjQyNDU1NDY0Pjg3
-NzY3Njc1MzUyNDExMzM3NjU4ODU3NzY3Njk6ODc4ODg2Nzc3NTc6Ozk6OTk5OTk3
-Nzo4OTg5Njc3NzY3NDg5NjY3PDY4NzY4NjY2NjU1NjU4Njk1NTs5Nzg5ODU5OTY4
-NzY1NDYzMzU3PDk7OTg6ODc2Nzc1OTc0MzU2NjY4OTk3ODg3ODc4OTk6ODs6OTk5
-Qzc3Ojc6OTk+Ozw5Ojg3Nj4/NzY4ODo7PDw7OTo2Nzo2OTY1Nzg4NzY4Nzc2NzM2
-Ojc4Ojc4OTY3OTY2NTU2Njc1NTc6OTg3Njg1Njc3OTY4NjM1NTg2NjY2NzczMzg7
-NDY1Nzk5ODc3ODk4Nzc2Nzk9Qj48Ojc2NDEyMzEzMzQ0MTQzNjMyMjMyNDU2Njc1
-NTQ0NDQ0Nzg2Njg3NTU2OD5GUF5vfH9yWUQ8Pj1FRj06Oz1APztBQkA6PDc6OTk4
-MzQ2NDU1NDc8QUFERklIREA+QT5AQUJCQEBFQ0RFPDs3Ojg6OTs7ODczNDo3Njk6
-Ojg9QkdPVFpbX2RkYmNlZGNjYWNjZmpxcXR2dnh8gYKCgoWFg394cm1udXZ2d4CB
-gISFhYSIh4eMjImKiISJh4iLiYWBe3RoWUdCPTxHTEtMXW9wZ1tedouTlZOJh4yR
-fmxnZlxLRERDQD47PUFES1ppdXVnV1RVYmtjUUxTW1hQS09WYWpxc3h9goiJi4+Q
-i4iCeGxfW2Brd36IioqKj5GUkpKOj46Mi4V5ZU5APDtBR1RncXh/hYuOjY2SkpKP
-jouFg3puWUg/QD88PDw8PkZLVF9pc32Dh4yMjoyGh4iQk4mAe36Giol8bGp3i5aY
-jXxtbnyKj4p/e3l8goN/d29zfYSGgXRsc36IhnhjYXKAgnplXGJwd29dU1diaGRb
-WFZaYWFeVlBXYFhKPkRORT06PkxYT0A+Qk1VWVlPSEZMWFpUSD5CREtST0NASFlX
-Tk5RTk1LTVBNUVFSWGBjYVtSXW5hSEA+Qjs4Ojs8OzxCQkJLV1dXVFhfZW1wcnJ0
-b3Fzb2hlY2ZrcXt9f3x4eXh6eXx9eXJwZVtQTEhDREJCQT05ODk2Oz5BOzk4PD08
-PDo6PkE+OTo7Pjw/PTw7PTxDRUZCQEdBRERDSURDQUFCQkZJRkVFQUFDQ0I9QUdH
-QUBBQEJARERBQkNDREM/QkNERERFREdCQ0REQkRIR0ZIREdGREhBQ0RGQ0NCP0FB
-Q0NAQUREP0JDQUZHQUtOaa3E0Nfc4ePm6Ojp6k1NTkxISUtLSkhJTEtGR0Q/RUVE
-QkE/PzxCQkBBOzw+PkA/P0JEPz0/Ozo7PDs6OkE8PUE9PDg4PDo6Ozk5PT88PDw6
-ODg4NTg2ODg8Njg4RDU1NTU3Njs2OT08NzI0NTQ1NTY6Ojc0NTY4NzUyMzMzNTg4
-OTQ2Ojw7NjU3ODY1NTM0NTc3Njc1Njg3Nzg2ODY3ODo4PDU2NTQ2PTk1NTU2OTU0
-Njc0Nzc7NTQ2NjQ3NzQ0NDU5SDY1NTY5NjYzNTM0MjEyNjY0NTc0NDM2OTM0NTY4
-ODUyMzU2Pzs1NDY3NTQ3NDYzMzQ+Nzg3ODQ4Mzc1NDY3NTAyNTcyNTc3NjQ3NzY3
-ODg4OTg3ODg3OTg3ODg6ODg5Pjg3Ojw5Njg3MzU1NzQ1Nzc1NTc6OTc5OTk4Ojs2
-NTQ5MzMzNzg6Ojg6OTtANzk6PDk3NTk1NDY2PDY4Nzk5OTc3Nzk4NjMyNDI1ODY1
-NTw8Ojc3ODlOOjg3ODk4Njc4ODY3Njc7ODg4OTc3Ozo9Pjo3Njc3NTg5OTk4OTs6
-Ozk6Ozw6ODs5Nzc4Njg3NzY5Nzk3ODY3NjQ3ODg3NDM0MzMzMzU2Njo5Ojk4NzU1
-NDY0NTczMzU1NDc3ODU3OjU1Njc3OjU4ODY0OTk4Ojk3ODk5Ojk4PD1AQUE/Ozcz
-MTAyMDIyMTEvNzc3NzMzMjM0NTg1NjU2NDQ0MzQ1NjQ2NTU2Ojk6P0lSUlllaV9O
-Qj0+QElKRD9BPz88PDo4PT47Ojc4Nzg3NzQzNTc3Oz5BQkRJRkZCQT8+QEJCQD5D
-REZGRT86OTw5Pj09Ozo5ODQ0NDc3ODk9PkFHTFVaX2ViZWVnaWtsaGVnZmhtbnN0
-eHh8fYGDg4SHhoeCfXJraW5xdnp8fICBhISEhYeGiYmKi4eHiYaEiImHhH95c2RW
-SEI6O0JKSE5cbGhcVWJ0hIiCeHByfYSAd3FtYEpCQUE+Ojs6PDtBSVtwe3t8bWtu
-bmJSU1hgX1dOUltjbG5zen6BhoiLjIuHhXprXlhZZG12gIOFh4yOjpKWkpCOjIyE
-fW9WRDs4Oj5MW2pzd4CEh4iKjpCSjYqJiYWAdWdWR0RDRkJAPTk8PkpVYGt0fYKH
-jZGPiIaGiImFgHZ2hI6Yjnxra3qKk5CGdXh/hYeEeHN8hZKUiXdoZXaFjo+DbF1n
-e46KemNdbYKIe2JTXHF8dVtLVGlvZFRTXWZhVlFYXV9YTUdLR0dDOzs9RVNPRDw+
-RVNgY1xQSEdFSktJQ0FASlZfV0lKVF1ZTENGTlZfYlxTS0VGUlxiaGtpbHFXQTw6
-ODo6OTo7PT48P0dQV1leYWprbndydHd0cHBsZ2hrbnJ7foJ/d3Z4eHp8fn98e3p0
-bWJZUUlHQkE9Ozg4PD08PDc6Ojo+QT88P0NAPDo5ODc6QUI+QD06P0JHRUNDQD5C
-Q0JCQ0dIQ0RAQ0VCQkRDQkFBQ0ZFRkZEQ0JCQ0JBQUVGQkhHQUFERENERENFRUVC
-RElGQUFEREJEQ0VDRkNDQ0JEQUREQEZERkVFRUZFRD1CQkJAQEhiqsXP193g5OXn
-6OrpS0dMTEpHR0pMSUlHRk5HQ0A/RD9ESERFQ0NCPD8+PDw8Pj09PEI9QDw7Ozw8
-PD08Pj87OTs7Ozk4OjpBOzo4ODw7Ojk4ODo6Ojo5OjY1Njc0ODY4NTQ3Nzg5OT44
-NzU2NTY3OjU2Nzg0Njc3Njg2NTY0NzY4Ojo6Ozg2NjQ0NjY0NDQyNTU1NzY1NzYz
-MzY1NjY4OTc6Nzg3NTQ5OTk5ODQ0NjQzNjY1NTg1NTI0NDQ4NTU1NDZEOjU3MzQ1
-NDc3Njc2MjIyNTUzNTg1NDIyMzY0NjY1Njc0NTQ1Ozc2NzU1NTU3NjEzNzg5NzQ1
-NjQ2ODU6NDMyNjQyNDg1Nzc5NzQ1Nzg2Njs5NjQ2NTg4OTo3Njc6N0VFPDs2OTo3
-NTU2Njg2MjQ5Ozo5OTg5Oz07OTc0Nzg5NjY1NTczNTU0NTg7Ojk2Nz03NDk3Nzg6
-Nzs2NTg3Njg2NjU3NTg4OTY1NjU1ODc5Ojo3NzY2ODk2ODc2NTM2Nzc3ODg4NjY3
-NzY2NjQ4ODk6NzY1NjY3NTc3OTo6ODo8PTo4NzY3Ojg3NzQ1Nzg2NzU3NzY3Ojc3
-NDI2NTY2NTU4NzU1PDQ5NjU2NTU4NzU2Njg3NTk2NDY2NzQ0NTg2NDc2NDU1ODc1
-NTY3OTg3Njg3ODc4NzY3OT09Pj45NjQ0MzIzOTQxMTQ2MjExMjIyNTczNDQ0NDU1
-NDU0NDQ1NTU0NDY5PTo+SEtLS1VaU0dAQEFGT1RRRkFAQT4/PTo4Ojw6NTg2NzY1
-NTI2NTY7QEBBQEFDREFBQkJBQUE/Q0FCQkJAQTs7PDs6PUA+Ozo3Nzc1NTM3OT5E
-Rk5VW15gZGJlZmhsbWplZmNjaG90dHh4foGDhYeJiYuJh4B4a2Rpb3N1eH56fYOE
-goGFhYiHh4qJio2Lh4aFhYeEgHtvZVVGPTo8QEhOVGJtaFlVZXZ9fGxeX2p5goN+
-enFeSD08PkA9PDw7PT9DUGJ1g4R7ent1aVlbYWdnXlNSWmRucnd4f4SGh4mIhoZ9
-cGJVVFtkcHh+g4iKi46TlJaXkoyKhn55aVFDOzxARlNeaHN2fX+AiImLi4uKhISC
-fntza2BUUFFXUkg9PzxASFJebXh+hIqPkI+JiIeKj4d+bXGDkZKNgXZ5gYmHf3V0
-gY2QiHlpZnaLmpyNeGdjcICNjIJvZWp4gIN2Z2VwfIF3ZVtgbHNvXVBWcHRmUU1d
-a2NRSVFlaFZFQU9KQT05ODs/SU9KRD1CT19lY1ZHQUBESk1JQkFGV2RjVklNWltP
-RkFIVWVqamVRRkNDTmV0fHhrZWJJQDw7OTY6NjY3OEFDR0tVXGNpbnFwc3V2dnNw
-Z2hpaGlwdn6Afnx4d3t8fHt8fHh6eHd0b2ZfVU9LRUA+PDo6Pjw6PDw8PT4/PT49
-QkM8OT86Pjk8PDw7PjtCREdEQj9BQ0BCQkJEQ0JDRkZDRUNBRUJBQENDQ0NCQkNF
-Q0NFRUVFQkRDRUNERENEQ0ZERUVEREVCQkJFQURHR0NISUZKRUBCQkJCP0A+QEFD
-REZEQkJESUNCQ0dJSWOows7W3eHk5efo6epLT0dGR0hHSEhKRkdHRURERj49QUJF
-R0M/Pj08Pj89PTw+QkI+PTw+RkNCOzk3Oz8+Ozw+QDxAOj07PDk6Ojk6OTk4OTg4
-ODo5Njo7NzQxNDc3NTU3NjU3Nzg4NzY1NjQ0NDk5Ojg4Nzc1NTo4OTU2Nzg3ODk7
-QDc2NjY2NzcyODY5NzQyNjU2ODc5Ojc0NTU0Njc1Mzg6Ozs3NzU4NjY3Ozc3ODY0
-NjU0MzU2NjY1NDMzNDUyMzc2ODQ2NTQ1OTc4ODU1MzIzNDQ5QTQ1NTQ5NzU5NjU3
-ODg1NzY3OTYyNzU2NTI0NDY1MzY7NTQ0NDM4NjY4MjI1NTEzNDY1OTk1NTY2ODg5
-Nzk4OjU2ODg2ODU2Nzc4OTg3OTo4ODY2Njc4ODk4ODc5Njc1OTc6ODgyNTU0NTIz
-NDU2Nzc0NTc3Mz07Nzg5ODg4NjU3ODk4NTc3Njc3ODk5NjQ2ODY4ODczNTQ2NzhB
-Nzg3NTU3Nzc3NzY1NzU1NTY3Nzk2Njk2ODg6Nzs4Ojo3Njg3Nzc4OTo6Njk5Ojs4
-Ojk4Nzo5Ojo3NjY2ODc3NjY2NzU0NzozMzM0NDQ0NTg3Nzs/OTo4NzY2OTg3MzQ2
-Njg5ODk6NDc3NjQ2OTg4NTg2NDc2Nzc3Nzg2Njc2Nzc3NjU4ODpAPj87OjczMjEz
-MjMyMzIzNDQyMjI2NjIzNDMzNTY1Nzc4NjM1NTU1NTY4Oj0+O0BGQ0VJT1RNRT8+
-QUVTWVlIPT09PD45ODs6NjQ2Nzo5NDYzMzQzNDc+QUNEREdGQkJAQUA/QEFESEQ/
-QEJDQjk7Ozo7PTw8OTg3NjM0NTc5QUdOVFpgYF9fY2hscW1ramRiX2RpbnZ5foGD
-hYaHi4uMioiEfnJlY2xzdnd5fXt9gIGAhIeGiIqLiYeIioeEhoWDgn98cWxjWEg/
-Ojo8Q0tXaG1nXmVzfoB0ZFhcZ3B1dnx6dWdXSkNBQD5CQ0NBPUFFTGFweXZzeHhx
-Z2VpbGVaU1deZG5xd3d8f4SHhoWEfndtYFdUXWp0e4CAhImLjpGOkZSQiYqEenBi
-VEo+PkJMV2JscnV5enuEhH6FjIeHhIF+fXdwal9bY2plU0lCQERIUmJsdX2DioyP
-kY6OjIyNhHp1gISLin94fYmUkYp2anODkZSLdmdkdYaMjoZ6c3R4f397dHR0d3l3
-cWtscHp5cm9nY2lsbWhdVlhpZ19OSVZpYk1FTmNoVEZKUks9Oz47PkA/TFBORkNI
-VWBeUkY8O0JKT1FOSUdQXmRcT0VKUk1DQ0dLV2RmX1dORkJLX3N+fXJdUU5APDw6
-OTk+OTw9Pz9ET1Jia290cnJzdXh2dGpmZ2pucXd6fn57enh6eXt8e3x6enl4eHVz
-a2ZcV1BOQzw7Ozs8OTw8Ojs4Ozw8Ozo/Qj47OTw8PD08Oz04Ozo+RkM/P0I9PT9A
-QT89QD9AQ0NAQ0ZHQ0A+P0NDQj8+QEFHQ0JCQ0ZEREFAQkVFQ0FEQkNESEhESUdF
-Q0VIRUVHRkdJRUVERERCREVGREVDREBCRENDQ0FGR0VERkVLaqvCz9jc4OPl6Ojo
-6UlISkhGSU1ISklMTEs/PUVERkVERUlAQEJCQT5APTw+PDs/Pj08Oz5AQD9DPUE7
-PDw6Oj06OTo8PDs4PTg2NTU2NTY3Njg1NDc2NzQ1NDM0Njg2Nzc2NTc3ODk5Njk5
-OjY3NDc2NzU0ODc3Njg3Njk5Ojg5ODc3MDI1Njc4Ojo3NDU5OjUyNDQ3NTY4ODc1
-NTY4ODk6Njg3ODg3NjY2NDY3NjQ0Nzg0MzY2NDc2NDYxMjUzNDYzMjQyNTQ0Mzc4
-NjQ3NDU0NjQ2NTMzNjg1NDc1ODY4Nzc4OTY1NjY1MzM0MzQ2OTQ0NDQ0MTE0NTQ1
-NzU2NzU4NDQzNjU1Njc1NzY0Njk5Nzg3OTc5Ojo3OT43ODU4Ojc3NjQ3OTg7PTk6
-ODg1Njc2NTY3ODg5Oj04NTY1ODUxMjQ1OTg1NTU1NDc7OTo6Ojk4OTk6Ojc5NjYz
-N0I3NTc2Nzg3Ojg3Nzg3MzY1NTY7OTo2NjYyNTU2ODU2NTg3Njk4NTc5Njk8NzY3
-ODo6ODo4Ojw1Njk5NTY5OTo2NTk6OTg2NTg5ODg7Nzg3Nzk4NTk7ODU1Nzc1MzY4
-NDIzNDY2NTg2Ojg6OjY3NTY0NDU3Nzc3NDE2Nzg2Njg3NjY2Njg2NTc5ODk5Nzg2
-NjY4Ojg2ODg3Ozg7P0E/QDw8Nzc1NjMzMjIzMzM0NDU5NzE1NjQzMjI0NDY1NzEv
-MDM0OTg6ODs+QD5AQkdGQ0NMTUg/QEBCQ1BcW0k8Ojo4ODk6Ojg3Nzc2NTY1NDM0
-MzU3Oj0/QERGRktLQT4+PztAPUFDQkNEQj48QEI/Pz05Oz05Njc0Njc4NztIU1VX
-WVpfZGVqbm9wcGxsZFtfaGpyenp+hIWHio2Nio2KhoB7cGVpbnN5fHt+gX2AgoSE
-hYeHhoWFhIeGg4aIhIR8fHRsZV1TRTw7PDw/RVBla2FdZnWCgXNhYGJrdWtncYKC
-gHNfUEZAOz1ARURFRERGSE9WXGJueHh2cGtlXVNRVGBiaHB1en2BhIeIhoB8cGJX
-U1pkaHN5fYGCh4aKjI6MjI2KhH51aFhNREFFTVRdY2tucnZ4e36AiYWBhoOBgHt2
-dG9kWVpla2RUSEBBQ0dQY296foKLjI2QjYyHhoJ+eoGJkZGFd3B4iZOUiXdscoCO
-joV4cnR+hoN5dnR7hIaBcmVjcH2GfnBhX25+gHlnW2Nvd3BhVVhfY19YVFNUXGFa
-UE1UW11QREhPSD0+PTs+Oz5LWV1SR0NITVNTSUA7PklZXVlRSkxUXVhPQ0FJTUhF
-SEtMT1NTVFNUUFNcYnB1cWBQUEk/Ozs5NzY6Pj49PENESlllbXFzdXh5enZtZ2hr
-bm9ydYCBe3Z2eXd6f4GBf399fnp4dHJtY1hUUkxHQT46Nzc6Oz4/QDw8PT09P0FB
-PTo9Pz5BPTo7PEE/PT1EQkFAPkFBRENCQUBCQUJGQkREQkFBQUE+QkI/PkBAPz9C
-REJDQ0RJRkNDQkFCQEJCRURGR0VFR0ZFQ0ZDRERDREVGR0ZFQEJFR0lJRENHREBC
-Q0FCP0BJRkNGTE1rrMLO19zg4+Xn5+rpS01LTktKSUhLSUhIRkA5QkRFQkJEQkJB
-PUJFQ0VCQT89PjpBPjs8PkA+Pj4+Q0I8Pj09Pjw4ODY7PT44OTU3NjM0NDU0Njcz
-NTY2NTQ1OTc2ODk2Ojo3ODg7OTU2Njg5Ojc3NDY2NjY5NzY2Njo3ODY2NzY0NjQ0
-NTU2Njc4NjY5Ojg5NzY4NjQ1MzQ2NjY0Njk6ODo5OTk2NDU3NTQ2NTQzNDgzNTY1
-NDY2NTU2Nzc3NTc2ODc0NzM1NzZBQDY0MzMzNDI2MzM2NDI3Njg4Oz43ODY1NTc2
-NzU0MzU0NTQ1MzU0NDQ1NTs2ODU0NTg4Nzc1OTY1MzU3ODg2MzEzNDc5ODk3Njk5
-NDQ2Nzc1OjY2OTg5ODg2NjU3NTk4OTc4ODc2Nzo5Nzc2OTg1OTg0NjU3OTg0NzQ1
-NTY1MzY4ODY3Njs5Nzk4ODc2Njk4NjY2NTQyMjY3NTU1MzQ2NTE0MjQ0Nzo7Njc5
-NDU3ODs5Njo3NTY2Ojg2NzY4Njo6Ojs9PTo6ODg5PDk3ODc4OzszNzY3ODk3Nzc3
-Njg3ODg6ODg3NTU2NjY2NzU1NDQ1OTg4NjM0NDY3NzU1ODY4NzI0NDo6Nzc5ODc2
-NjU2ODk2ODY2Nzg4NTM5Ozg5ODk3NTY1OTc3ODk5ODc3OT9ARURAPjY1MzQzNTUz
-MzIyNDY3Nzc1NjQwMDM1NzUyNTI1NzY2MzM0NjU3OUFFQEBFUFBIQkZLRUI8P0FB
-S1xZST05ODo7Ozk5Ojk3NjU0NDQzMjY0NTk6ODo/QUJEREVCPTw8QUBAQD9BQkI+
-QEJDQz48Pj49OTs3Nzg5OTpCT1haV1ZWXWZrbG5yeHZxbmdfWl5pcHZ9g4OIi42P
-jIuQjY+GgXdpZ2tyeHh7fX2BgYODg4SFgoGBf4GEhIaCgoOCgnx2cm1oW1FLQT06
-OjpATlxhXVlkdHx5aV9ib3NvYldfcYOIh3xsXlVSSUVETE9NQ0NER0dQZHZ+hX50
-ZlhTT1BWW11kbnB1e36ChIaEf3hsXVNTXGVrcHZ7foKDhoSIiIeGiomDeGxfUkhC
-REtTXV9jcXF5eH19e4CBgYGAgn56eHZwZl1ZYWNnXU1DPkBES1Zhbnh/hYuNjoyL
-h4B8eHZ5g42Wkol5bneEi4qDe3uAhYeBenN3hIuJfGphaoCJj4RxXVpvhIyFblxe
-cYOHemFSW3R8b1tNV2luYFNQV2BeV1JOWl1YU0lERkdAPEA8OTs6PUlbYl5NQTxD
-UFVWT0VCSldgX1dJQ0RLT09EPUJJUU5TUEZFS0pVWmNqZFtVWV9lZWBXWkc9OTs7
-PDk4OTo6OTtCS09ebnJ3e3h4eG9sbmprb3d4fnt1c3Z5fH+CgYSDhIN+fHdtZVxQ
-TkpJRUI+Pjs/PTs2N0BBPz84Ojk9P0E7P0M/QD47O0BFQDo5PUBAQUJBQ0I/QEE+
-QkNDQEFBQkJDQEFCQkBBQEBBQkJCREJERURYUlNFQ0VGQz5DREVIREVJR0ZDREVH
-R0RBQEJCQ0RCREdEQ0JDREhGRkRBPj1BQ0BBRE1KQ0NeSnGsw87X3OHj5Ojo6epJ
-SkpLSkdJTE1ST0tNRkJCSURBPT1AQkFAPj5BP0NAQzw8Ojs7Ozw9PDw5PUA7Ozs8
-PD06OTo4ODc4OT06NzQyMzMzMzQ1NTY1NjQ2NTU1MzY5OTk3ODc1NjY3ODg0Njc1
-NzY2Njg6OTg4NzU1NTc2Nzk1NDQ0Njo3ODc2NTY5Nzk3NTk4ODg0MjU2ODM0NjY0
-MzQ1Nzo6NTc2NzY1MjM2NjU3NDM1NTU0Mzk3PTs2NjQ2NDY1NDg2OjY0Njg1NTg3
-Njc1MTEyNDg3NTY1NzY2ODY2NzY0NDM3NjY4MzM1NDM0MzI2NUBWPDg4ODk4ODg4
-NzU1ODc1Nzg4NjY0NDE0Nzk5Ojg2OTo1NjQ0ODk5ODg5OTo3NjY3Njc2OTc4NzY3
-ODQ5Nzc5NjY0NTY3ODc3OTk4Ojc3OzM0ODs0NjY2MzI0Nzo4NTY4ODs6OTc3NjY3
-ODQ0NjY0NDQzMzM5ODQ3NTg1NjMzMzQ0Njc3ODk+Ojs5NDUzMzc3NzY3ODc2NTY6
-OD04OTk7QDs5OTY7NzczNjg3OTc4OTk2NjY2NjQ1NjY0NjYzNTY1MzI0Njk4Njg3
-ODUzMjU1NTUzNTg3NzU1NjY5ODQ1NjY3NTU4Njg4NjU5OzU3NjY3NTU0MTUyNDc5
-NjU3Nzg2ODc6OUBEQ0A9OTg0MTIzMzQ1MzIzNzc3NDU0NjU2NTc1MjQ2ODMzODM0
-NTc4ODk5OTo8P1BZWUtCR09MRkE7Oz9MWltLQEBAPz49Pjk7Ojg4ODc0NTQ1NDQz
-NDY4Oj5BQkRERERBP0NCRENDQj5BQz5BQUJBQD08Pj89ODc2OTs9RVVeYV1XVFhe
-Z3F0dHd5eHVsYVxcZW50e36BiIyKj5KTkpKRjIV+cWdub3Z5fXyBgoKFh4SHh4SE
-gYKAg4OCg4KAgoZ/eHRxa2VfU0tCPDo7OzxDUFJOUmNwcWtbVmBsb2pcTEpVZ32H
-h4B6cGljV09NTkxHQkFCRElYZ3h8e3FiW1JOUlVXXGFpbXN4fICEhH57dGZYVFFa
-Y2tydHl8gIGHiImIhYOCgXt4bFxRSEZJUFtiZ2ltdHl7fXd+e3t9gH93dnt+f3hr
-XWFoamRXSURAPkJOWGFtdoGHh4qMioeDfnlvbHSAjY+JgXt8goaEe29wfYyQiXls
-cX+Mj4x7aF9rfIiLgG9jYm5/hH5sYWNzgoJxXFJcbHVrVVNaZnRqUEpcZ2RTSFNj
-aFpJRUpLR0FAQT07Oj08QlhgXVJBPD1GUlpaUEpITlpcVkhCPkRNTktFRExVVllY
-TUJCQ0pfbnR1ZVRMTFhhbXFoWEg7Ozs5Oj07Ozc4OTk+QkZPWGBob25vbXBvb21w
-eYJ+dnR2eHp+gYCEhoOGfXVqYFpMR0dDQD5AQT88Ozo6OTk6PkQ+OztCQjtAQURE
-Q0FBPT4+Ozo7PD45OUA+QUZFQ0E9PkBDREJBPUA9QEFCPkZEQ0A+QUJCQkNCP0JC
-QkdNRkRCRURDREVJQ0NDREVFQ0NGRkRBRURCQERFRERDQkNFRENER0RIRUM9Ozs+
-Q0NDQEVFRENKaK7Dztba3+Pl6Ojp6kxPTEtOS0tLSk5KRENISUREQj5CPTs5Pj5E
-QUFAPTw9Pz4+Ojs+Ojw6PD07PTs6Njo9PTo5ODo7ODk6NzU1NzQ0NjU0NTQ1NjU2
-Nzg1MjU7Nzc2Njc4NjY2OTc4Njg3NTY0MjYzNjc3NTg2OjU3NTUzNzY1NzY4NzY2
-ODg3ODQ1NzU2NTo4Nzc1NjU0NzY1MzQyMzc3Nzc4ODY1MzQzMzU5NjM0MjQ1OzpA
-Ojg3ODY3NjU1NTY1Njk0MjIyNDU0NDAzNDQzMzU1NTg6Nzc0NTQ0MzA2Njg2NTU2
-NTU2NjM0MjUzMTQ4NlQ7ODY1ODc1NzY3NTM5OTs7NzU2MzQ3NDQ1Njc2ODk2NTY3
-ODk5OTc4ODk4NzY1NzU1NDY2ODo6ODc1NDY3NzY1NTc3NzQzODk4ODM3OjQ4NjU1
-NTQ4Nzc3NzY4Njg4Njc5OTs4NjQ6Nzw5OzY2ODU4MzQ2NTI0OTg2NjY2NzU1NDg3
-ODc2ODo6OTg4NTU1NDU1Njc4NzY5NjUzODg5Ojo6OTc3ODY1NjY5OTg2NzY7Ozo4
-Nzc2NzY4Njs2NTI1Mzc3Njg4Nzc2NTc5MzMzNDY4ODQ0Nzc2ODc1NDU0NTg4ODY2
-Ojg3NzQyODY1NTY1NTg3NDc2NjM1ODg4Nzk5Nzk4Ojw6Oz89PTs7NDczMzI0NjQz
-MjQ0NzQzMzMxNTc2Nzc1NzczMC8yMjIzNDY4OTo5Ozo+UWBbTEJFUlZQQz9AQ0tV
-U0VFQkA+QkE9PD89Ojg2NjQ0NTUzMjIyMjU5PENFREJCQ0VFQ0VFREhDQUJBQ0BA
-QUI+PDw7Ojs6Nzg4OUlXYGlmXlRUXGdud3h9gIB/eGpbXWVsdnt8goWKjJOUkpOU
-lJKIg3VtbnJ0d3p8f4CDhoaHiIeGhISEg4KBgIKBgIF/f3x1cGxqYFdOSENAOzw7
-QD5ESEVHU2NgU0pQXGVoYVVKQEVQbIKRkYyHhoF3bGBXUkxLRUNAQUVMWV9hXVlX
-VVNVUVNcYmZrb3Z7foGAgHRpXlRRVVpibXFzd3x9gH+EhYSCgoB5dGxiWE1HRk1Z
-XWVma3Fydnl6fnt3c3d4d3V3fYOFfnRwc3d3a15TTUtJSE9XYWtzfYGEh4mIhoN9
-dnBweYWJhHpxdoWOkot8bW94iY+KemlugYiNiXpvbXZ/gXt0bmxuc3h3dG9scHl6
-c2hfXmRnaGRZVmNtbmdRS1lqZk9HUGhtWkVDTVBFQkZBOzg4PT1AS1NRTEZAQEVR
-YGJbTUNFSVJQTD89QEZSWlZNTlliXFlUTUlITlVjbnJtXVBJTFZufXdoTD07Ojw6
-Ojo7Ozk4Oz9DQkhLSFBTVVlZXWBoZ257gH14eHp6e32AgH9/e3NrWUlLSEJAQERC
-PUBBPD0+PTw3PTo5OTg4NUJBPTxAQUE9QEA8PTw8Oz08Pzs7QEY/QEFCQEVHRkFB
-QkNDQD1BQ0NAPkNDREJAQUBBQUJEQ0JDRENAQEBAQj5FRENBQklIREJDQkJFRkVF
-RUhFQ0JDRERFREI/RUREQ0VCQUNAPj5DRUVLTklDQUVmsMLN1dzf4+bn5+npT0ZN
-UFBKTUtLRUZGQ0RJS0NBQD5BQkA+QUBBQT48QTs8PDw7PTw+O0JAODk8Ozk9Ojw9
-PD47Ojc3OTo7Nzs4Ojg8ODk2Ojo4OTc1Njc2NjU3Njg3Nzg5Ojc/ODc3Ojg1MzUz
-NjYzNDY1NTo4OTk7ODQ1NjU2Nzo5Nzc1OTY1NjU1ODg5ODg4Ojw5NTY1NjQ0NjQ0
-Njg5NTI0NjY2NDMzNDQ3Ojc0NTc5Ojg4NTY1NDM0NjU2Njc0MzY1MTMyMzMxNTU1
-MzU1Nzc4NjU6OTg2NTQyNjU0MjU3NzY2NTQ0NjEzNTM0NTc0MzM2NTIwMTU1MzUz
-Nzc3ODg6NzU4NzU5OjgzMzQ1ODc1ODY6OTg4OTo4Nzg3ODg4ODQ3Ojg3NTQ2NTY5
-ODU3NDU2Mjk5NjY2NzY2NjU0OTc2NDc5PDs2Njo6ODg1Njc5Ojk2OTYyOTk5Nzc4
-Nzw5ODk3NzU0NDc2ODUzNjc2NTQ3Ozg2NDQ2Ojk5OTo1Nzo4ODc6Ojg3ODQ3NTc1
-Njc2ODo4Njc3NjY4OjU1NDk6OTg3ODc2NjY5NjU2NDY4ODQ3OTg7NzY0NTQyMzQ3
-NjQ0NjU2NTU2Nzc2NDQ2MzU1NTc3NTc4OTg4NzU0NTQ0Nzk1NDUzMjEyNjY3ODc5
-OTo3Nzo5PT0/QD87PDk3NTYzMTM0NDc2NDExMzM0MjMxMzQzNDY1NzczMTIyMjQ4
-Ojc7OTk7PkBMWltORkpYX1tLRUNFS1NNRkA+QUBEPzw8PTw5NzY3NzU0NDMyMjAx
-Njo8QUFCQEBBRklHR0NDQ0ZDREVERUA/P0A/OTg4OTc4OT9NX2pvb2ddWFljbXN5
-fn6AhIN7bF5kbXN5foGDh4yMkJSVl5eSjoh9cGptc3d6fX1+gYSGhomJh4SFgYCC
-gn5+gYOEgX18dGtoZGBbVU9EPDw8Nzo7Oz9ER0VOV1RJQ0ZOWFlSSEE8QEhbdIqO
-kpCQkpGNhnZqXVNNRURDRENERERLTVNYV1BRU1hgY2dtcHJ4enp3bmNcUExTWmJp
-bnB0d3l6en19gYGDfXp2bWNYS0RJTltfZGZqbnF0d3h2dXZzdXNxcHJ5goR4bm93
-f351ZVdRUlFPT1pla3F6fH+DhIeDgX55eX+IjYx/b2JugpCUi31zd4CJjIh8dHeC
-g4J7cHN5gn50Z2Zsd39/eGpiaHOAfHJnXGBoa2ZZVVpfYWNfWlFSXGJdUE1TYWRa
-RUBPTEA/S0Y4NjlAQUNCRk5OSkVCSVBeYlxOQj1ARktKRj1ASFliXlNNVGNhV1VY
-VlRTUVVeX19eWFJSW2p8gXNUQj07Oj84Ojs4PUBBOjtFR0dGS0tOT1NYXF9ocXh/
-fnx9fnp7f358dm1lWk5JR0NCQUJAQkVDQkNCPz5AOz04Ojw6Pzw9PDs9QEA+PD0/
-QkM9QUA/PEBEREBAQkNDQ0JEQkVDQUFAQkJBQ0I9RkVAP0VBQUJBQ0JBQUJBQEND
-QUNHQTw9QUFCQkNERklFSEhHRUVGRkZGRUhDQ0JGRkZFREJFQkVDQUNCQkFBQD9B
-QUNKSUpGSWywws3V29/j5Ofp6elMS05QT0xMTUpERkdDQD9DRT9BQEA+QEE9QD9C
-REVDQkI8QUFBREE7O0A9Ozs5PD09PkRCPkFCOzs6PDc4PD06Ojc0NDQ7Nzc1NzU1
-NDU3Ojk2NDg3NjY6ODc7Nzc3OTc3NzY3ODpANjY4OzU1NjU7OTk4ODY3Nzk4NjY3
-Nzo5Nzc2Nzk4Njc2Nzg1NDU4Ozk3ODQ0NTY2NzY2Nzc2NTY2MzY3ODk3ODY5NjY3
-NzU1OTc0NjY1ODU3MzQ1NTQ3NTg1NDU2NDM1Nzg3NTc2NjU1PDQ5ODM0NjU3NjQ1
-OjQ1NDQ3NDM2NzY0MjIyNDIzNDM2MzI1NDY2Nzg4OTc1Mzc4NTY1NjY3NTY3NTg5
-ODc5OTc2NzQ5NjU0Nzg4ODU1MjE3Njk4NDc3NjY2NTQzNDU2NTg2NzY4Ojc4NzM1
-Njg3Nzk5NTc0ODg5Nzs3ODY1NzY6ODo4OTo6Njg3NzU0MzY2OTg3NzU2Njc2NTY4
-NzU1NjU3Nzk6Nzg3NjY1ODg3Nzs2NTU2Njg5NDo2NTUzNjc4Nzc7ODY4OTc0NjY3
-NjI3NjY2ODg3Ozs6ODg2NDc2NzYzMzMzNTU5MzQ0NTc4NjQzNjU1NDMzMjQ0NTc3
-NjY3NTU3OTc2NjQwNzYyNTc1NjU0NDc4ODc5OTs6P0FBQEA8OTc1NDU3NjM2Ojs0
-MjUzMTAvMzMzMzQxODc2NzYzMzM6Njg4OTk7Nzg4Ok5ZWkxGUGFmYU1FQkZJS0hF
-Q0VDRENCREI/PDk6Ojo7NzY1NzQvNTQ2Nzk+Q0JBP0FDREREQUJCQEJEQ0dFQD9A
-PUQ9Oz0+PD1HV2dvdnd0a11cZm91eoCBhISGhHtsbHBzd32EhIqMjI6QkJeXlJCI
-f3FscXB2eXx7fHx9goeGiYmHiYmGh4mCgHl5eHx8enhxZl1YWVdRSUQ8Oz06Ojs6
-PkA/Q09SS0NAQk1OSUQ/PkJCQkRTan2Ii4uPkpWVjoZ7bmBVS0dDPD1APj5HTVFP
-S0lOVVtgZWltb25xcW1kWlJPTlVaZGtubHBzdHd8e36AfXt1b29vZ1hNSExSWVxj
-Z2lqa29zcXJ0dXJvcXFyeHyCfXFkZXF5eG1hVlJWU1FWXmhqbnaAf36Afnx6dnV5
-hI6SjH9wanKAioiBenmCioyHeXB1gYmGeWZldICEgXBeXWt+iYV0X1lmeIeAbllU
-YXN0ZlJLXGptY1dRVmBfWVBSUVdbWVJKSk1JRUZMQzs5OUBDQj5ASk9UUElDRlBX
-Vk5FPDw/SVBRSUZHUmBlWk1MU1lVU15ramFWTktKVVxkbGpla3B8dFtIQDo6PDw6
-ODg6ODo6Oz5BP0NFRExOVFZeZG13eHp6e3+AfXx8eWxcU0tGSEU9QUA/QUJBQEA+
-QkVCQ0RDQ0FAQDw8QT5APEE/QDxAQUI9QkNCQT5CQ0NHRUZDQkJDPUM+PkFGR0Q/
-QkFCQERAQ0JBRUNEQ0JCQkI/PEFAQEBCQ0RBQ0RAQkBDQkRCUEhHRkdHSkdJRURF
-Q0RERkNBRkVFQ0JEREZBR0ZFRUtJQUJDRUxIRkVKa63CzdXb3+Pl5+fp6klNTU1M
-TU1ITElLRkRFQkBAPj46PDg4PTtBQEJJST08PkBCPz5BP0FAQUI/P0BFPkBDQ0E9
-Ojo9ODg4ODY4Oj87Ozs3NTM0NDU0NjQ1NzU1NzY3PTg5ODc5Njc3NzM3Ozg3Ozo5
-PUI5Ozg2NzU3NDY5NDY3ODc4ODc2NTY4Nzc1NTU2NjY1MjY3Nz47OTQ2OUI6NDYz
-NTQ4Nzg4NjQ2NTU2Nzk2P0U4OTQ1NDg0NTc3ODc1NDY2MzU1MzM1NjY4ODQyNjU1
-NjY3NDk3NjI1NjQ2OTY4NDczNTU0NTMzNDQ3NTY3NzY2NjQzMjIzMzQzOTQ3NTY2
-Nzc2ODU3NzU0NTc4NjQ0NjQ0Njk5NTU1Nzg4OTk2Njc6NzY6OTg2ODU3MzQ1NTE5
-NjU0Njg1NTU4NTY3NjZBNjc3NDg8ODY0Njg5NTY5ODc5Nzc2Njg7ODk1PDk4OTY1
-Njc4Nzg3ODc4NjU4NzY4ODc/NDI3Njg5Nzg4Njg5NjY1Njc2NjU1OTo3NDg3ODY1
-Njc5Nzk1MzQ3NDQ2ODo5ODk5Njc5Ojc3OTI3NzY1OTg0MzY3OTg4Ozg3ODUyMzc5
-Njc3NTc1NjUyMzQ1NTE0MjM0NTY1NTc3Njc6Nzs4NjU1NzQzNTQ2OzU1Njg1NzY2
-ODk4OTw/REREPztANjQ0NTY3NTU1Rzk1NDUxMzM0OTk1NTg2NjU2NTU0NDY2ODc3
-Nzc2ODk7RVlTSkBOZG1kTEZAQkhFRkZKSkpIREJFRkQ/QTo9OTY5NDc3NjY2Nzw6
-OzxBQkNCP0NFQ0FBQEJBQEBCRURBQEBCPz1CPkFFVWJyeHp9e3hqYmVudXp+goeG
-hYeCd25tcXp+f4aIjI2NkJKSkpGQi4N8bmtudHd4d3l7fH2Cg4aIiIqKioiJhoaA
-e3h4end0b2laVVVSTUhFPzw8Ojo4OTs8QD9GTU5HQEJHSkxFQDw+Ozw8PEJQbICM
-i4qLjpKRjYmEe25eU0lDPjw7Oj0/RUhJS1JYYGRmZmVpbWxrYltVT09TWV5jampv
-cm9wdnh6e3l6c2xnZmZgVkxKTFNYWlxhYWVlZmtxcXNxbWxpZ299ioyEdWlqcXdu
-X1NPUU9RUlpjbXJzd3x+fXx6eXZwbG9+jJGOgXl+h42MgXRteIqSkoZxaHSGi4d0
-Y2RyhIiDcmNhcICIhXdjXGh6gXxsWFVldnlpUk5cb3JeTk1daWRRSU9bYVVLSE1O
-S0dDR0pDOzs8PENBPD1LV11cUUdERUxOTUQ9PkJKWF1YSklLVlxYTkNDTVdUXW5x
-bmFSSUZJWWx5eXVwZmFnZVNFPDg8Ojs6Ojk5Ojs9PDk6PkBERkpLSk9UXWZwc3d2
-eHRydG9dTkdFQ0I/Qj8+QkREQ0FCQT09P0VAQT9CQj49Qj07PDs6QT5AQUBAP0NE
-Q0BAQkNBQkFDQ0M/QUBCP0FDRkRAQEBARkdDRURDRUNCRkZCRENFRkRBQkJBQkRE
-Q0REQkI9QT9CQ0ZUR0VER0hIREVFRURDREZFRURFRURBP0FDQ0JAQT5DREFAQkJG
-UUNGRVFro8LN1tze4uXn6OnqTUpHSEtMS0lGSkdBRENHQT0/PT46PUA6Pj08OUVE
-QT08Oz0/Oj49Pj09PDs9Pz9AQkJEQTo6Ozw7PDc5Njg3Nzw6OD03Njg1MjYzMzg1
-OTM1NTg5OTs5Ozc5OTk5NTQ0ODg1Nzk4NjY4NTk4OTs7NzY3NjU2Njc7NTUzNjM5
-OTo2ODY1NTQ3NTY1NjU0NzY3Ozg5OTc0OTg2ODY3OTUzMzQ1NDY2RTc2NDM4NTU0
-Mjg3NjU0NjY2NjY3NjU1NTY3NjY2NjM0NjU1NTU4NzY0NTw4ODY0NzM2ODQ0NjI7
-NDY2NTU0NDU5NTc2MDM2OTU0NTc0MzU2ODU0Njo5Nz43NDM1NTY3ODg3ODc2NzU3
-OTg6NzY4Nzg4OTc3NzU1NTk7NjQ2OTM0NjM0ODc1MzQzMzQ2ODc5Ojc2ODo3Nzc2
-Njc0Mzc3Njc6OTg5Nzk4PDk4NDQ3Nzc1NTc1ODg6NzY5ODg2MzQ2ODU3OTk4Ojk4
-NjQ4OTk0NDU1NjY0NjM1ODs3NzQ1ODY3NzU3PTc0NTU2NzY3NjU5OTg3NjU4ODc0
-PDU1ODc3Ozc2Njg2Njc3NzU4NTY1ODk4NTg2MjU2NTUzNDExMjU0NjQ2NjQ2OTk4
-OTo3NDY1NzU1NjQ2Nzg2Ojc1NDc3OTo7PTo7PD0/Pz07Ojk2NDIxNTM2NzQ2MzQz
-NDQ1MzQyMzQ3ODk3NjU3NDU3NDM1NTk2NTg9Ojg/SU5NTlxub2VRSENBQUFCRElM
-TEdGREVFQEFCPzs3NDQ1NDUzNjc1ODg5Oj5AQkFISENIRkNDREE9PUBBQkI9Pjw9
-QT5HUV9weH6Af4F/eXBpb3V2gYaJjImHhHxwanF4fIWJh4mPkZGSlZaSkoyGfXJt
-bHF1dXl4eX2AgYGDi4iKi42LiYiFg354d3dzb2xlWk5LSUtIR0A/Ozc2ODo8ODhC
-REpTU0tKQ0lJSEM8ODs8OTk7PEJWdIyXlpOTlZiVkpGJfHBlW1FHQDw9Pjs8QEVM
-UlteYmZmZ2ZnaGNdVE9OUlZcXWBlam5wcG9yc3N4eHZwZFpVU05JSkxPUlRWWFpb
-X2FhY2htbG1qaGRibH2JioN8dnp/gXhoVExPUVNYW2NqcHZ4e399fnl2c2xnbXiB
-ioN8fImSmZGCb2t5i5SSh3FodIWLhnZtbnd+gHhvbnB2fH13bmptdXZzcmRdXWZs
-al5VVVxlbF1MS11qYlFGUWVnWEZCTVVMQ0ZLRz07PT1AQEA+QkZVYGBaTUJBQElP
-TEhCR0tYYGFYS0VITlFNRUBDU1dVW2lpYVpQSEpWY3J9fXJdUlNlWkY9Pzw5Ojw6
-PDw8Rzg5Nzk8PD8/Q0dHSktNT1JUXGFhXltZUElERERCQEJBQkA/QkNBQkNBQEZH
-REhERERBQ0E/PT09PTk7PEBFRURCQEVFPUJEREJBRUA+PEFAQ0JDQUJBQENCQ0ND
-RURCQ0FCQkFCQ0JDREZESEVBQUREQ0JDREJFQ0JBQkVDQUhIRUNFRENFQkREQ0NF
-QkFDRUVERj9BQkFBQD1AQUJAQENDQ0dWSFFOR2mjwczV2+Dj5Ofp6elQT0xKSUxL
-TExJRkdBRkdCQkFDQj1BRUNAQD9BPD0/PDk7PDtCQjtAPjo7Ozk8PT5CREFBPDw7
-PT09PTw8Ojg9OTg5Ozw7Nzg0ND9GOTY4Nzc3ODg5ODg1NTU2Njg4NzU0NzU0ODc3
-NTM1MzU5NjU5OzgyNTM0NjY3Nzc1NDY3OTg2OTc1MTU2Nzg3OTU1Njs5PDw5OTg1
-NTc4Nzg2ODczNDQzNzc1NzU4NTU3ODQ1ODk3NzQ3OTQ0Nzc6Nzc0NjczNjU0NjMz
-MzM4NjMzMzM0NTc2NjY2OTg3NjU3OTY1OTo5NzY1NTU4NzY3OTU1NDY0NTg1NDQ1
-NTU2NTc6ODk3NzM1Njo4ODw4OTY3ODg3OTo4Ojc3Njo3ODc2NTg3Njk4Nzc5Njcz
-MjM2NDc2PTk1ODY6OTk3NTY3Nzg1NDU4ODk5OTY5PDc3NjU1Mzc2Njc2Njc2NDU1
-Nzc1Nzo2ODg2NDY3NTc2NTM2ODg1ODY4OjY0Mzg2NjY3NDo4NTQ3NzUzNTY4NzY2
-Nzk3Ojc2NDY2ODU4OTg2NTQ1Nzc4Ojg4NzY2OTk3Nzo2MzU3Nzg2NjY4ODg4NTYz
-MjM6NzY2NDMzNjY3NTQ0NTQ0NDozNjg2Nzc2NzQyMzU1ODY3Nzg2NzY1NDQ4Njc5
-PUA9PUFAPj86NjM0NjczNjc2NzI2NDY4NzU1NDQ1NTc4OTk6NzY5NDM4ODU1NTQ1
-Njg3NTlCS1BZcICFcllFQT0/QUNFRExMSktGRURFQkBAOzk4NjMzNTQ1NDY1NTg6
-PUNDQkBDREVGR0dHQkFAQkI/PDs9Pj1BR1VmdH+EiImHhXx1bm91en6DiY2RkY6D
-eHBvc3uChoyQkZGTlpWUlJSQioB1bWdpcHN2eXh+f4GAfoGGi4yMi42Oi4eBe3Z0
-dHFsZ1dLRkFFRUZEQj47ODY2Nzc6OjtFSlVVTEVJTlZLQTk8Ozw9Ozk1OEJacoaP
-iomPlZiZlI6JhHpuZlhHQT8+Ozw8QkZNVVpbYGRnZmdmYllSUFBYXF9fYGNpbGpr
-bm5wdHR0bmJURkJDPj9ESU9UVldWWFdZW15iYmNmZmVjY2RveYGAenFufYmKgnNi
-WVlbWl9naGlpcXZ4e3x5enVva2lyfYOAeW94h5eZkYZ0c3yIj4l7dHV9gYB4dXd/
-gX1xZmh1god+bWBmcXx/dmdcW2dqZ15VV15eX19dWlNTXmdgUUhRXmJVRkVPVkpD
-S1BIQUE/PkE+Pj8+RE9YXFdNQ0M/RE9WWlFKSU9YWVRHQT9CSk9LRkZTYWBZVlNX
-X11aWFVYYm5xb2ZaTltqXUlBQD88Ozo7Ozs7ODU5Ozo4Ozk9QEVIR05HSUhLS05L
-S0VDQkRBQkRAQEA/QkRCQ0hDRENCRUhEREhLSkNFRUZERURAPjw+QUFDQUFCP0FD
-PkNCQT9EQEE/P0RCPz4+QkJAQUJEQkJCQURERURCQ0BCQkFDQD5ERUVFRURHRURE
-QT5AQUFBPj9BQkZFRUNFSEZEQkFCRUlFQEJERERCREFAQEI8QUFCQkJCQ0FETElX
-ZEtKbqbAzdbb3+Lk5+jo6VNQS0hITU1LSUVFQT1DQz88Q0NIQkBAQ0NBQj87Ozw7
-Pjo4PkNAQj0/PTo6Oj1AQ0FBQj85Pj07QDw6OTs6ODc2ODc4OkE8NjlFTjk5Ojk3
-Njo3Nzk2NTc2NjYyNTM3NjU0NTQyMzM2NjU2NTQ4Nzc3NjYzMTU2NTY5ODY3ODc4
-ODc3OTo7OTUzODc1OTY3Nzg5NTc1Nzk3Nzg3MzQ0Mzc2NTM1NjU5ODU0NTQ0ODc2
-NjU3ODYzNjc1Nz84NTg8NDMzNTs4ODc3NjU1NjI1NzU4NjY3MzY1NTc4NzY6Nzg3
-NDQ0NDU0NDM1NjY2MTM0MzM3ODc3NTc2NzM2NTU2ODgzMzU0Njk3Njg2ODk5Nzc1
-Nzk4Njc1NDQ0NTo6NzY1NTY3Nzg7ODI0MTIzODhAOTg0NjU3NDc3NDc4ODg6ODYz
-NDM0NDc2Nzo3NTU2Njg7Ojk2NTU4Nzg3ODg5Njk4NDc5Nzk7ODU1NjQ1NTU0MjY3
-NDc0NDUzNTU4Njc3OTU2Nzc2ODU1Nzc4ODY8OjU0NDM0Nzc2NTU0NDg3Ojo4OT05
-Ozg3NzY3NjM1Njc0NDY2Nzg0MzU1NjQzNDg4NTQ0NDMzMTI3NzQ2NzY1NTY3Ojg4
-NTk5ODY1ODc4OTU1Njs2ODc4NTU3ODo9P0BCR0JAPDk3NDc4NzY0MzQ1NDUyNDQx
-NDMwMDI1ODg2ODk4NDY4ODQzNDY2NzM1ODY3ODxDRlRyjJKFZko9Ozw8QkVLSktJ
-SEhIRkVGQkI/OzQ3NDM1Nzg3NDY3PD09QEREQUBBREVHRUZDQkZCQ0A+RkVDQUZU
-ZHN+hYqJiYiEeG9udn6DiouMk5SPiHxycnd3eX+EiY+QkpSZmJWRi4aBd2hhZWty
-dnl3e35+gISDhYSJi4qNjIyKh4B2cnFza2ZcUkdAPz9CQT47OTg6Nzg4Nzk5Oz5I
-UVFKRElQUEhAOzg9QD06NzY4QEhfd4KBfH2GkJKSmJWPh3twZVNEPjs7OTs/REtT
-Wl9kaGdmZGFcU09PUVZZXGNnaWtqa25vbW93c2thUUM8ODs7PEFIT1JXVVZXV1pc
-Xl9iYmVgXWJianmFhn5xZGl3hYZ+cWhkY2BfYWdlZmlwdXh5eHRybm5vdn6Fin90
-ZWp/jZKLgXx/hYiDe3N1g4qFdmptfYaMgWtcZHeKiXpmWV1tfod3YFNaam9nV0tV
-YmxoW1JRXWFlYFlSUVdaVU9LSVBUSENNTkNBR0M+Ojs8QUJBRk5TUUxIQj5DUVth
-WE1GRktSUklCPEFGUlVQTlJgZ19SSkxYaG9xZ11WUlhmbWtkY29vVEhIREVCQ0A8
-PEBBPTo8Oz9BPD47PURJS0ZLREdHSkhIQz9DREA/PkBBPkA/QUdHSExDQ0VCQkVG
-TEpMS0ZHQkJDQj49P0A8P0FBQT0+QUFAP0FFPkBAQ0E/P0FAPj08QkQ+QT5BQkNA
-PkRCQkBFPUFCRUJFRkFCQkNCQ0RFQ0NFQD4/RURAQEJDQ0JBQ0dGQ0JDRERBQkZG
-RUVDQ0NCQjw9QkNFREJBQD4+QVVSRU5JR1iMrcDO1tzg4uXn5+rrT05NSEhIS0hG
-SUpJRkZKRUZFQD1AQkBAQUBAPkA/Ojw+Pjw7Ojw8PD08PDw4Oz89Pj49Oj47Pjw6
-Ojo7OTc5ODw7Ozo3OTw4Qj47ODg7OTg4OTg5NjY2NjY1ODY3NTMyNDU0Mzc0MzQ1
-NjU3NDo4NjQ3Nzc2MzM1Nzg2ODg2Njk2NDc3OTgzMzY3PUVCRjUzNDU0NzYzMjI1
-ODk5Nzo2NzM2NzM2Nzc3NDI0NzU1NzY2ODc2NjczNDM0NzQzNDQ0Nzk4NjY2OTg0
-MzMzNjc0NTY4Njk4MjAzODU2NjUzMjE0NDU0NDQ1MzAyMDIyMjQyNDg4ODc4NTY2
-NTUxMjQ0NTQ3Njk0NTgzNjg1Nzc3ODg0NDc3NzYzMTI1Nzc1NzY8ODY3ODc2NTU2
-MzU3Nzg3Nzg3NjY3ODc2NzY2OTg4Nzc3NjI2NjY4OTo4OTg2ODg2NzU2OTk4ODk5
-Ojk6OTY3NzY1OTc4ODY5Nzg2NDU6ODY5ODc2MzM1NTI2NDY4NzY5OTc3MzEzMzc4
-OTk3Ozg1Njk3Nzg2NTc4OTk1MTMzNTU0NjY3Njc4ODc2NzU0Njc3NDc4NjgzMzQ2
-NjQ4NzczMjE2NTU0NjQyMzc2ODc2NDc3NDU2Nzk3ODc3NzY3Njc0NTg7PDY5Ojk/
-QUNEREE+PDk0NDExMzMwLzI0MjQ0NDUyNTAxMzQ0Njc2OTk6NzY0NjUzNjc2NTU4
-Ojg4ODw9SmWHkYhzTjw7PEFBR09QTEtOS0pHSEVBPz89OTY4Njg1NTUyMzQ3OUFE
-QkRDRUZISERDQ0ZGRkVFRENBQUFBS15wfYSJiYmIhX5xbHR9hI+SkZCRkI6Ed3V0
-d3h7goOHjpSVkJSUk4+NhXdpXVtjaG9ydnZ7e4CEh4aHh4iJiIqLiYiEfXZwbmhl
-Y19cWlZSTk1IREA7NzQ3Njc5NjU5OTxFR0M+RElJQzw7PDo8OzY3Nzk8QlNwhYqJ
-goKKioiQmZaOhXtyYEpCOTo7PkJJTVJYXmFpamdgXFVRS05TWGFgYWRmZ2ltbW5s
-bWxoYVVIOzw8OTo9R05STlBTVVZYWlpdXF5fZF9ZVldjdoKGfXNsbHV8fHFoXFtc
-X19eX2Fna3FycnBvcG1saWx3hJGSh3tvcH+Hhn92eIaMin9waHWDjIp1ZmmAi4t/
-bGFkdYOEeGZiZXF3dW1aU19rbmNSTFRnc29bSk1fbWtbTFFeZFpNR0lMTkpHSU1N
-RkFGQjs2OEFGRkJAPUhNT0tFQkROXF9cUERCRE5RTkpFQEpYX1tST1NfYFNLS1Bk
-bXR1aFNISlRqfX54d3dhRz9APz5APjs7Pz4/Pjs7Ozo5Nzs6PD5CQUJDQUJGRUlF
-Q0RFRT9CRERCQUZIRkNCRURGS0lHSkpJTUdIRUpHQ0JER0NCPDs8QUI/Oz89Oz49
-QUA/PUFBPD1DPUFBQT8/P0BDQ0FAQEJCQ0JCQ0NAPj9AQEFCQkE/P0FDQ0ZEQ0VH
-RURERENDQUJCQ0NBRENCQEJDQkFDR0dCQUFDQUNAPz4+PUBEQUFBQUJATUNCRkNT
-aJ6wws7W2+Dj5efo6upLSUhJSktMSU1KS0tMTEdHPTtCPTw8PD9APjs6Pj5DRD1A
-Pjo7Ojo5ODk9PkA+Pzk8Oz48RD87Ojg4Ozs6OTk7PDk5ODk3OkBIPDs7PTo3NDY3
-NjU0NTU1OTU0NTc5ODQ3Njc2ODY0NjM0Mzc5OjY3NTY1Nzg1NTQ1NTY2NjU2NjY4
-NjY5Ozc3N0NRVlRCNjc0NDg2NTQ1NjY3ODo5OTk4Nzg3Nzk5NDQ0NTE0NjY0NTU1
-NjU1NjY1MjUyMjU2Ojw1Nzc1NDc0NjM0Mzo3NTIzNzg0Nzc6ODM1NTQzNTQ2MjU5
-NzQzMzU3MzUyMzI0MzYyMTM4NzQ1Nzg1NDI1NDM0NDQ1NjY4ODg3ODk5OTk5NTY1
-NjU4ODU2NTI5ODk2ODc1NTU2ODY1NzY1NTY2NzY3ODc1Nzc4OTc3Nzc3NjU2NjY4
-ODU5Nzk2NTc6Nzs4OTg3OD07ODo4ODY3ODk3Njc3NjU2OTw5Ozc3Nzc2Nzc3Nzc4
-NzY3NTU2Mzc2ODk6Ozs5NzQ2NzM0NTc3Njk3NjQ0NDY5ODk4ODk5Ojg4NTU2NTc4
-OTg3NjU5ODc1NjQzNDI1ODg4Nzo0Njc2NDM1NDg2NTE2ODY0NTE1Njc1Njg1NTM0
-NTY2MzY0NDc1ODc2ODY5OTk2NTg5OkBERUNBPDg3NjUyNDY1NTQyMTEyMzUzNDYz
-MTMzMzQ2OTk6Ojc3NDM0MzM0NTU1MzY4PDo3Oz5CWnSFiXBWREFDRElMTU5MSkxM
-R0dIR0dFPzo6NzM1NTQzMjQyNDY4PUJDQ0NFSElJR0VERkZHRUVFR0ZGSEpYanp+
-g4uRj4Z7bGptc3qCiY6Qk5WNgndxcXV2eXyAg4yPkZGQk4+OiIB4bF5WW15mb3Nz
-dnl9gYaHhoiIiIeJi4yLg395cmhlXl1kamxubW1nX1tYUktEPzs2NDU4NjU3Ozw+
-PjpAQT8+Ozs9PTk7PT06OTk6S2mFkJWTjo+NhICIjoqIhIBvXEpANzY5QEdJUVJV
-XmFkY2JeU0tMTlZaXmBhZGFjaGltbGtsamFaT0RBP0A+QkdPUlFPUFFSVFdaWlxc
-Xl9iYVpUV2BudXVvZ2lwfHx8cGNWXFxbXFxeYWFiam5sa2ZoZmVmZm5/jZCLfXd/
-houHeGx1hJCPgm1lcYKIhnhscHuAfXRtb3Z/gnx0bG5zdm9mYGBgZGZjWVFPWmNt
-a1tKTmFwblhIT2VqYU5FS1dSSEhKSERCQ0RBOjk5PEhMQjs7QUtQV1ROSkVOV1ZO
-Q0FARVBUVkxKTFZfYVZKS09WUUpQV15ja3BnX1VMT2B3gYR7cmpMQD4+PT5AQUJB
-Q0I8Oz0+Pz06Oj49OztBQERCQUhDRkREQ0dGRURJRkdEQ0dCQ0JBQkdGREZJS0hM
-SEZFREZHSEZGQkNBQ0BBPj0+Pjs/PkBAPUBBQUA+QUFBQT0+QEFDQ0BDRUNFR0RE
-RERCQ0JBQT9CQkJCQkFAQkNBQkFESEVFRUNCRENGRUNERERCQkJERERBRERERUNA
-QT5CQkZAQT4+P0A/QUJEQDw+RERCQk2Ao6/Eztfc4OPm5+jp6k5NSU1MTURITFBQ
-T0lHRUJDQEFBQkA8PT5AQUA8QT49QT49QUI9PD07PTs7PTw8Pjo6OkBFRz04ODc5
-Ozs5Ojs4Njo4Njk7O0JWPDs7OTc3NTc4Nzg1Njc4NDY5ODY4Nzw3Nzo3ODc2MzEy
-MjY2NTQ3NjU1NDQ2NTI1MjUzNDU2NDU1NTY4ODk3OTs+Ozg4ODU1Nzk3NTc0NjU2
-ODg4Nzc5ODI1NTc4NTYzNzQ0MTQ0NDM1NTY1NjUyNDgyMjM5NTY1ODc8MzY2NTQz
-NzY0NjU0NDc0NTU0NjY1NjU1NzU1NTM1NDU3Njg2NzQyNDQ0NDQ0NTY2NTUzMzI0
-NjE0NjUxNDc0NDU2NjY4NzU3NTQ2NjU0ODo6OjgzMzU2Ozc3OTs2NTY4OTY1ODc1
-Njg6ODc2NjU4OTg4NjU3OTY3NTQzNDY0Njs2NTc1Njg2NTM0NDg6OTg2OTk3OTg2
-NDQ1NDU1NzQzNjk6Ojg3OTs3Njg7ODY0NTg7OTk4Nzg6Nzg1OjQ0NTc2NjU0Njg5
-PTo3NzY7NzY3ODk1NjYzMzU4OTQ1MjU3NzY4ODo3NTQzNTU0NDQ0Nzo4NjU0NDUz
-NDIzNDI1NDM4ODU1NzU2NTY5NjY4NzY2NTQ0NDQ3NTc3Nzk7OTo3NzY5ODo9PT9A
-Q0M8ODQ2NTU3NzU0NTEvMS8wMTYzNDQzMzQzNDMzNDg9PTc0MTMyMzY2OTg3NTg6
-PDs8Oz1LX3h8c11MQ0NJSElLS0xJSUlKSEdJRkZDPTk0ODYyMjIyMTIzMTI6QkFC
-QERFRUZFREVBQUZHRklJSEpMU2JxeYGEjI+OhXJpa3Nze4SJjY+SjoR2a25wcXd7
-f4OHjpCTlJGRiYiFenJhV1NVWmBpc3Z2dnh5fX6ChoeEhIeGhYWGfHVuZmBiZm1y
-d3p8e3hzbmpjW1NHQjs3NjY2Mjk2Ozk6Oz9BPDw5ODk7ODc6Ojo6ODlBV3iLkZOS
-lZKHenV4fH2ChX1tV0U+OTk/R0tOUVNYWF1iX1pWU05QU1VYX2BgYWFnaGlsamxn
-XVlSUE1KS1BSUVNUUVJUV1ZWVlZXVltbXWBiXV1dZG1ybmVbWWZvcXJmWlpYW1xa
-WltdYGFhZWZoZmZoZGBibHiHiYB3d4CNkot5bG6AkI6BcW95f398dXl7gHtwZ216
-hoWAcGJpeIJ8bVxfam5rYFxYWlxfY2BcVFFTYGhkVk5TY25hTkJMWFJCRk5JQURJ
-RD05OTs9R0xDPDw8SVdeXU9IQ0NJTU1IQkRHUV9hXVFKTVtfWEhCRE1RSkxYXltX
-V2NnZWJgYGh6hX1pZVVIQEFAPkBAPD1DQz1AQD4/Pj06ODs+PTpBQEFBP0JFRUhF
-QUVFRUZERUdGRUZIRkhITEdBREVLSUZGQkNERkFCRD49P0JDQkA8P0RBQUBBQz5B
-Pj5DRENBPz0+PkJFQEFAQ0NFQ0I/QUBEPkJCRERJQ0FCQEJDQ0NDQ0JCQkFDREVD
-QUhHRURERUBDQ0RGRkVESENHRUVFREREQ0JEQ0VBQEA/SD8+QEM9QkBERkRHVZap
-sMTP193g4+Xo6OnqTEtNSUhKSklPTkxER0FCPj5BQ0M+PD5CPz1CRkE+PkFAPUA+
-PD1CPUA7Ojk5PTw6PTw9Pzw7ODo4Ozg5NzY7Ojg1NTY0ODY5OURDOjc3NzU2NDU4
-OjgzMzY2NTQ3NzU4Nzc2Nzc3NzU1NzQxMzI1NDc1OjI1NjQ0NTU3NDU2NjYzNjQ1
-NjQ3NTU0Njs1NTU3NjUzNzg3NjY1NjUzNDU2ODc2OTo5ODQ0NTg2MjQ0MTEwNTQ2
-MjQ9NjM0OTg2NjQ6NTMzMzY2NzU0NTc1NDQ5NjU0NDYzMzQ3Njc1ODg2MzI2NDYz
-NTQyNDQzOTc0Ojo0NTQyMDM2NjIxMzQzMTM3OT07NTU1NjM0NDQyNjQ0NTM0Njk1
-Njc3NjQ2Njc7OjU2NzQ2NjU3NzY1MjM4Ozc2NTU2NjQ0NTc1MzY2NTY4NjY3Njg4
-NjY5OTc4ODU4Nzc1Nzo4OTk3ODY0NTk0MzU0NTM1NTU1NjY5Njg4OTc8NzY1OTk3
-Njg4OTc6ODY3NjU0MzM1NDQ1NTU4NTY4PTc2NjY1Nzk8ODc2NTM3Njo7ODU3NDM4
-Njg5NzQ2NjU0NTY1ODo3NjY2NTQ1Nzk4NDc0NTQ0MjQ0NTg1NTU6QjM0NDc2NTU1
-NzU4NjU2MzQ1NDY2ODYzNjg6OT49PkJDQDo6NzMzODYxNDMxMTIwMTMyMzMyMjQz
-MjQ3NDo5NzY6OjMzLzMwNzg2NDQ2Pz47OUFDSU9aaXZ0ZFRNSUtMSEdMTE9JRkdG
-Q0RDPz09PDg0MzQzMzU1MzEzNzs8QUJEREZDQ0FBQkRFR0ZGRkRDRUxbaXV8hIqM
-jYN1aGluc3SAiJGSkpCHfHBucXNzeH2Ag4mLkJSUkZCMhYF7Z1dPT1VcZmtxeHp5
-dnl+g4ODhYOCg4SCg4F6b2NgYmpzd3x+f399fX14dG5qYVtPRDs6ODY2MjQ3Ojo+
-QkA8Ojc5PT47NzU4Nzc6RDtGYHeBg4WPkIl8bmZmcn6Ji4JpTEI+QkNFRUlNUFFU
-WVhWUU1KS05RWFVYW15fYGBmaWxsaGReWllbWFdWVlZVVlZXV1laWVhZV1dZWFxf
-ZGhsb3Nwbm9rZF5ZWV9hXllVU1dYWlhYXFxhYWBhZmdlZmVkZmt0f4aGe2preoqU
-j4B1dYGKiX12fYOGe25ufIeKgW1kboGOj4NtX2Z3g35uWl5yeXJeUVdjbGhfUU1R
-WGBkXVdTVVxeX1hMREtVT0JKUUpDRUlHPTo6OjpBRkY+OTxDUVtfV0hEP0BITk1H
-RkdOXWZjVEpFS1NSSkJCSFFUTFJhX1RNT1xseXdwZGVwdGlgW0pEQ0I9Pj9AP0BC
-SEE9Oz5AQDw9Ojo6PDw+PD48Oz49RUZFQkNGQUNJTUZMS0lGSEhKSkZEREVDQENC
-RkA+PDw9Qj0+Pj5DRkZEQD1BQ0I/PUFGR0VERURCQUNHREFBQ0JFQkI9QEFDREFE
-RUJBQ0pDPkJERUVCRENCQUFDQkFDQkJDRUREREdEREREQkRDQUJDRERCQERERkRE
-RkNGRkRFQ0BGPz0/QENAPz5ARkhOjKyyxc/X3OHk5ejn6utMSUdLRkZKS0pJSUtG
-QT5APD0/PT0+P0FAP0FCQD8+QEA7Oj1EPD1BPTw4OTw9PT4/PDo5PDw8ODk4Ojo4
-OTo4NTM1Nzc7Njg6Ozk3NjQ1Nzc6NzczNzo1Njk3OjU3ODU7NzQ0OTc5Nzg3NzY1
-NTY1NDM2NjY2NTY1ODY1Njc0NDY3Njc2NTU5Nzo2NjU1NTY2NjIzNjg4NTE0NTY1
-NzU3Njk3NTc2NTQzNjc4Njc0NTk3NDM0Njc1Nzc0NDU3Nzc6NTU4OTo1MjI0ODU0
-Njc6NTQzNDUzMjQ3Njc2NjUzMzQyNTY1Nzk1NzY3ODY0NDU1OTU2Njo5NTQ1MjQ2
-NDI1NTc4Ojc0NDU3ODs5OTg4NjY2NzMyMjY0MzY0MzQzNDg2NTQzNzY4NzQ3OTY2
-NDM1Njg1NTU0NzY7Njk3NzQ0Nzg7Ojo5NzU3Nzc7Ojs6Ojk3OjY2Njc3NTc0NTk4
-Ojw5NTU0NDQ1NjU2Ojo5Pzo5Nzc1MzU2Njg3ODc5NTY3NDU2NDU2NjY1Njc0Njc7
-PDs5ODY4NzUzNDMzNDI3Njg4NTU2NTM3NTY1MzM0MTMyNjc1MzYxMzQ0OTU2NjQ2
-Njc0MzY4NTc1Ojk2NTQ2NDMwNDQ1Njg3NDY0NDU1NjU0NDY3NTY2OTo8Oj0+QD89
-Ojc1MzM1NTI2NTY0MzQzMjIzMjAxLzQ1NTU0NTQ3Njg2NTExMTY3NzQ4NDg3OTg4
-QExRUlVjdXdqW1BOTUtNS0dIRkdIR0hFQkBAQUE+PjU1NTU1MjU2OjY4Oj1DRkdH
-RkdEQj5AQkNDQkRCREREUmNxfYSIjouHeGZhamxyfIGGjo+Si35xbHB0dHZ6fYGI
-iouQkZKRjIV9d2xdTktNVV9ocHZ6foKDgYKEhYSDgoWGhIN+eHJsZ21zdnt/f4GC
-gYKCgn57d3BqYllQSEA6Ozg5Oj0+QUJJQjw8Njk9Ozo6NjQxNjg3Pj1PZ3N0cn6F
-gnxxZlxlc4OQiXVYR0A8QkRGR0lLTlVVT05MSkhFSU1OUlZZXF9iZGdnampmY2Ff
-Xl9hX1xXV1pXWFxeYmNjXFdUVVhZXGJrcnh6enl2eHRqZGBfY2hoZ2JZWFZZV1ta
-W1xdXV5fYFxeX2hncHuEjIl+bW57h4yKgnyBiIh+cnSGjIh5Zml/jI2EdG1zhIuH
-f3Fsb3x9dGZdZnZ8dF9RVmZxbF1MSFFjbWVWSE1bZGBTS0pOVFZORkZLR0FGS0Q8
-PD07PDxBRkZDPkJIU1VTR0A+PkZSWVhPSUpXYGNXR0FCS1BOSEFDUmBcV11iYFtV
-W2t+g31sWFFgb2hhU0VAPj49Pj8+Pj9IQzw7Pj49Pj89PT1APjs3Njg6ODk7PEBC
-Q0FEQkRIREZEQkE9P0BLPz0/QD5GQ0JEQTs9PUJDQT5BQ0JCQUVDQD48PDw9P0JG
-REM/Pj1DQERDQkFFQ0NBQkA+PT5CREJEQ0JFR0RDRUJBRUdEQkNAQENIRT5AQUVC
-REVCQ0REQkJFRUREQUFBQERDQkFBQUFCPj1CQUVDQE5CQUJHQkBCQz5BQ0htq7HD
-z9fd4uTl5+jp60NHRkRGRUVKSkdISERAPj5BPUA/QD48PUJBPkE8Ozw5PTo8PUFB
-Pjo9QT88Pjo8Oj4+Ozo8Ozw8ODg7OjU5Ozo1Njk8ODM2NjU2OTUzMzQ3NTQ3Nz04
-OTg1ODk3Njc2Nzc1MzQ0NTY5ODc3Ozc1Mzc0MjU1ODUzNDU2NDU2NDY2PTc3NjIy
-NDc2NjU0NTQ1MjI0Nzg4Njk3NDU3NjQ5NDM4Nzc4Nzc3NDQ3Njk1NTU1NjQ2NzY5
-NTU1MjA0MzY2Nzk7ODc1Ojo4NjU1NjMzNTc0MzMxNjM0NTI4ODU2NTY3PDY1NDQ1
-ODY2NTc4NzIzNDMyMjM0ODc3NjQxNDU1NDQ0NDMyMzYzNjg4OTk4ODY1NTU4Njk3
-Nzg5NjM1Njg0Nzk1MzM2Nzs4Nzc2NzU1MTM1NDY1Nzg4OTc1NDQ1ODc1OTY5NzU0
-Njc4ODk3Nzc7Nzk5ODc5OTg4OTo4OjU0NTk0NTY0NDQ3NjY2Nzg6ODc2NzU1NDY0
-Nzc6ODY3NjU0NDU1NTU0NTQ3NTg0NjU9OTc2NDY2NjU2Nzc0Nzc2NzY4NjU3NzQy
-MTQ2NzU1NTEyMzQ4OTo3NTQ1ODg4NTY2NzU1NTU0NTQ0MzIxNjQ1MjU0NTY1NDQ1
-Nzk3OTZKNDczMzQ1NTQ2ODtBPT4+Ozk4NzYyMzU0ODQzNjQzMzI2MjExNDU0NzY3
-NDg3Nzg5Ojk6NjQ2NzY4Ozw6NTc5Pz9HTlRXVGBxfHJiUkxMTEhFRktNSUlISktM
-RkdGQEA9PTs1MTM0MjI1OTg7O0BBQURFREVCQURERUVEPj5AQElXZ3N+hYmOhnxy
-ZGRjbHV7gYGHiYqDdGpvbm9xdXl+g4uOkpKSjYqHgHlvYFFJSExYX2lydHp+gYaJ
-iYeHhoKIiISBfHZ1cWxvcnl+gX5+gIGAgYKDgn97dW5pY1lRRj04OTg6Oz9CRUdE
-Pjk8Pz48ODc2NTY3ODg2OEpjc3VwcXZ1cXBjWVxsgoyMfV9LQkI/RkdGSFFOTk9M
-TUhISUlISk5TVVZZXF1hYmRkZGNfXV9hYWNgXllWVVpgY2loZ2JcV1hZWFphaW52
-e3yCg4J+e3RvbGdpc3t7eG5gXFtdW1xjYV9bV1hYV1dYW15kdX+FhXpydn+HiIB2
-fIiQjYFzdYiVi3dqaXaDh4J3dX6Hh35zbnd/fnduZmtwdHRtYVhcaXFvXEtIUmRv
-ZlNHUGFpY09ER1VaUUhJTEdEQkRFQTs9Pzo3Oj5JUVBHQD9BSUxLRT1BR1VgYFdL
-RE1RVVVJPj5DTVRUSkpQZ2xdV15kanBqZW53eGxbTlNjcWxkSUQ9QEBDQD1AQEBE
-QEFCPz88PTw9Pj9BPUE8QDw9Ojk4PT5BQj5BQEFBRT09OzxCP0RBPj0+Qj1BQz8/
-PUFBQkBAPj4/QkJCRUE+QEBBQEJDQkNDQT0+QkJEQ0VFPz4/PkdDPj4+PDxDQ0FE
-RUNGREI/QEBCRUNBQEZEQ0RCQkBFQz5CQUNCQkJFRUVERENAQEFFSEdEQkBAQkND
-QUNDRUJMSD1BPz9CPj9BQkRERVGbscTQ1dzh5ebo6OnqRURJR0RJRkZKSEpIRD9C
-RERERUJBP0A/PT48Pj8+PTs9QDs9Ozs9PD0/QEE7Ozk7OTs8Ojk6PDc4NjY6Ojo8
-Nzc4NTU2Njk3Nzc4NzQ0NTQ3Njk7ODg1Mzc5NjY0NzY3NTU0ODg0NTo2Nzk6OTc0
-MzEzNzQ1NjMzNTY0NDY3NzU5ODY0NDU2MzU0NzY2NDU0NDQ2NzU0Nzc2OTs4NTc1
-NDc2NjczMzY2NzY0NzQ2NTUzNDUzNjYzNDQ1Nzc1NTc3ODc0NzQ1NDU3NDU4Ojk1
-NTMzNzUzMjU0NTY4NzQ0NzY3NjQ2Nzk3NTU1OTUzNTg3NjI0MzU2NTY4NDQ4NzQ0
-NDQ0MzI0MzM2Nzk4NjU2NDg4Njg3NTU4Njg1NjQzNDY3Njc3OTk4NTQ2NDU1NjQ3
-Nzg3NDI0NjU2NzQ1OTczNTQ1NjY1NTU4Nzc9OzY3ODs8OTU4Ojo4ODk4NTg3NjQy
-MjU2NTQ0NjU3NjY1Nzc3NjU0NTQ0MjU0NTU4NjU1MzQ0NTYzMzU1Njo3ODk3OTs5
-ODo3Njc2Nzg2NzQ1NzgzNzo6OTY1NDg3NjU0NTU4NTM1MzM1OTU3NTY1Ojg2NDY3
-NTc7NjM0MjA0MzU1NjM3NDY4NTY2Nzc6NzY2NjU3NjQyNDI1NTU7O0BDQkI8Ozk2
-NDIyNDEzNDYzMjQzMjM2NzY1NTQyMjA4OTc4Ojg3NzY1ODc4ODc3ODk5ODk9PkZS
-WlpYWmx1cWpYT0pGRUVER0dJSktMTUxLSUZEQDs7NTI0NDExNzg2Njg6PkBBPkRE
-R0hGRkRHQ0FCQUFKV2Jsdn6DiYV/cmJfYGFpdH2DhIiKgnhva25xc3V1eHyEiY+P
-kY6MiIN+c2ZYS0FGUVpjanJ5fYGBhIWIhoqNjo2KhX56dXRzdnh6fn+Dg4aFhIKD
-gYCAgXp6eXRwZVxPRj04NTY7PD5ERkU9PkJDQjw4ODQ5Ozo3NDU5R2Z3eG9lZ2lu
-aWFaX2l0g4d+aFFFQUJHRkZJR0tNSkpIR0dMSUhKS0xQVVhaXmNiZGNlY2BhZmZm
-ZmFgXV9fXmJpaWppZmFaVVRaYGVxcHh6fYGEhIOCfHp7dnJwdnl2bmRgYGFlZmZl
-ZGFbVVNTVU5KSk5XYmlramt1gYeEeWlzhZKQgnR2ipCNe25xfIB6c3F8io6HdWdt
-f4iDdGNidH94alxfZmtpaGFbU1JaZWhgTEdPXWVeS0JHWWBOR01QRUBFSEQ9Pjw9
-ODY3PUVUVUtBPjtFSk9RTkpKUFthW05BP0VKUkxFQEJQXl9ZUFJcaWJVUV1kb3Zu
-X1xia2xhVl13fm1QQ0E+PkA9PD89PDw/QD5DQUA9PkBBQkA/QkA6PT07PD9BQkA9
-Ozo7PUBCPDg5QDs+PTw9Pjs9Pj5CQENDQUA+QEBBQD48QEFEQkVEQUJDRkNBQEFC
-QT5DQ0FBREVEP0BAQUJDQUJEQkRAQURDRkVDQURCQkRDQT49P0I/QENEQkFCRUFF
-REZEQ0VERkhGREJCRURHRURFQUBBP0BESEJBQ0hAPkE/Pj5CQkRERUNER3WuxM/Y
-3eHk5+jo6upKR0dIS0pFQ0dISklLQj1DREdFPj5EQkI/Qj5APT0+PUBBQT89Ozw7
-QT47QD09Ozk6Ozk6ODo+Ozg2Njs7Ojc2OTc5OTY3NTY6OTY4NzIyODY1NzY4NDU2
-Njo2NTU0MzY2NjU0ODU3NDY5NjQ3NDM0OTY0MzEzNTM0NTUzMzU1NTQ1NDIzODY1
-NDI3NTQ0NjY5QDg1MzY2NTY3Nzk3NTU4NDc2NzU0MzQ0NDM1NDYzNjU1NTM2ODc1
-ODk6ODg1ODc5NDo2OTY5ODU3Mjg9ODMyNDU1NDQ0NTM0NTc2NTU4Nzc4ODg3NjU2
-Mjk2NTc5OTY2NjQ1NDU3NjY4NzM3NjY1NTY2MzUzNDc2NjU4Njc1NzY3NzQ0NTk5
-Ojk3MzMzMjc7OTk8ODk3NTc3NDMzMzU2NTUzMjQ2NjU2OTU4NDg2MzQ0NTM0NzY2
-NjY4ODg5OTY3Nzk3OTc2NDQ0NDQ1MjY0NDM0NjY1Njg0NTQ1NzY0Nzc2NDI2NTQ2
-NDQ0NTU2NDQ0Njg1NTc3NzY6Nzg6Ojw4NTc1NDY2MjQ1Nzg1NjY2Ojk2ODg3OTs3
-NTU0Nzc3NTU4OTQ1NTY1Nzg2NDI2NTQ1Njk2MzM2MzM0Njg4NzU3Njc2Njc1NjU1
-NTU0MzI1NDU1NjczMzc7QDw/P0E6NTQzMjIxMjQyNjU1MzM1MjM0NTYzNTM0NjM1
-NzU2OTk0MjEzNjg6OTs8Nzo3ODtAQ01bXl1YYnV4bFtNSEVEREVHRktLSUxKSkdF
-RUM8OTY0NDQyMzc2NTY1Nzg4O0FCREVFRUVHTElDQUJBQ0lbZm94eX6CfHBlYV5h
-Z251eoCFiIZ8bWltb3Fzdnt8fIOKio2PjImEgXdsXVFCQUVQWWJqc3p/gYKDhYaI
-i5CQj4mDfHZwcnd7f36AhYaJh4WFhYWCgIB+gHx5d3FqY15USkE6OTY4QERDPzw9
-Q0E+PDg0Njg4ODk2NzxFZHZ8cGReX2dscnFvcXFyd3ZoVEVBQERERUZERkdFRUZE
-RktLR0hLTk5TV1ldYGJgY2JjZGdmZWViYF9fYGBkaGhrbmpmY1xdXV1haXF6eHt8
-fn+EhoKEgoF/f3hxamZlY2RiZmtqbGtnYFhUUlBKR0NEQkVJTlFRVWN0fHhuZW1/
-i46CdHWAgn1zdYCHhHZnaXuMkYh0Ymh8iYh5ZGd4gHlgUFlpb2dZU1RcZGRhXFJM
-TllbWlBJRk1aWVBJTlVLQkVJQD1EQjo7Ozs7QUxTS0E9Oj5JVFxaTktMUltbUUNB
-QUhTVE5EQ05eZV9VTlBbYlZMUV5fZWlkVVFfbHNzanF/dVlDPkA+Pz8+QD5BQD5B
-QkFAQ0I+Pj0+Qj05Ojg7PD9BPDw8PTw7Ojo8Pj9AP0NAPzs9Pz88PTw9PkE+QkE8
-PkE9P0E9Oz0+RENCQz89P0JAP0BCQkJCSE5NSkdFR0VCQ0JDPkBAQUBDREZCQUJC
-PkFFQ0NCQkREREZCRUFCR0dDQ0JDQkJDREVFRUVDRktAQkRERkZFQUVFRUE/QUA/
-PUFCQEBAQEE9QUVEQUNHQ0RNbKXEz9fd4eTm6Ojp6kxMRkVFRUNEP0JEREZFRERD
-RUdCPTs+QUA+Qj9APTw8OTk6Ojg4Nzw/QkFBQj48Njo7OT0+Pjg3NzQ2NjY2NzY2
-Nzk2NTY4NDYzMzU1Nzc1MzY2Njg0MzI0NjY2Njc1NDQyNjQ2NTY4Nzs5Nzg0NTQ1
-Mzg2NzU0NTQzMzIzNDU2MzQ2NDQ0NTU1NDMyMjIyMTU1Nzg5NjY2Mzc2NTg2NzY8
-Nzc2NDQzNzU4Njg3NDc2NDU2OTg2OT01NjY3NjY5NTM4NDY2NDc4Nj43NTM2NDQ1
-NTQ0NDMzNTYzMjMyMjM5ODU2NjYzMzM0NTg1NDIvMjM4NzU2Mzc3ODgzMTQ1NjM1
-Njc4NDk2NTU3Njg3NjU0NTc3NzU4Nzc6OTU0NDU0NzU3OTU1NTQ1NzUyMDQ0NTU0
-NTU1NjQzNDM4NzY3NzU+NDU1OTY7OTk4Njk7Ojg2NDY2Nzg3ODc2NTU0NTY2NzY0
-NTU2NDU2NjQ0MjY1ODg3NTMzMzIyMTUzNTg1NDY3ODo6Ojg1NTUzNDU3OTk7PDg2
-NjgzNDg5NzY3NTU0NzU1Nzc3ODY3NjUzNTQ0NDo0NTY1NTc1NTU3NjMyNTU6NjYz
-NjQ0NDYzMjY6NDQ1NjY0ODc2Nzk3NDU2NjQ4NzMyNTI1Ojg3Nzk5PD1BQDw7NTc2
-MzM2MzM1NjMzNDU2NTUyNTY0NzY4NDU5ODg4Nzc1NTk0NTY4Ojw3OTs5PT5BS1pf
-XFVabXxzZVJIRUZHR0hLS0xMSkZFQUJEPzw5NzU2MTIwMjQ0NDQ1NjY6QUFFQkdH
-SkZISUdJREFETFhgbHR5e3duZl9iaGlvdnuBh4qHfXFqa2tucnV4fH6EiIyLjomI
-iYB5cmdZS0E7QU1YZGttd3p9hIaGhouQj5CKioF8c3BzeX6CgoODg4SHiYmIg4KC
-goSCf3p3dXFuYl1RRj86NzY3Oz08Ojw/PTs3Nj05Oz07Ojk1M0BYbndwXlRWYXB6
-gYF5cWJfZ2hcSkNEQT8/REVAPTs9QkhOTUtJS09RVFRVWVldYWJfXmJlam1sa2Re
-WFhfYmNnaGppaWlmXV1gZm1weHZ7foCFh4eEiIeIhISDgnduYV1fZGZscXFwcGti
-WVVRTUlHRURAPkNDQ0RHT1hdX11fbXqBe3Z1foF6b257i5SLemVpeYyQh3hrcX6A
-fXNrcnx8c11TWWRtZlJKUWNubFxOSVBdYVpRSkpPVFZRSUZQVEtCREdAPUNDOjY4
-Nz0+R0xJQzs7P0tcYmNaSkVFTlZSRzw+SFheWk5GSVRhYVRHQkpXVUtJUltZWl5i
-WVVhc398cXV3W0U/PkJDQD89P0BAQ0E8PD09PkA/Pjw5O0I8QTw8QT5APz4/QUBC
-Qjw7QUJFREFAPT8+Oz4+QUI+PT0+Ozw+QUBBQkE+QT8+PT9BQEI9QUJFRUVBQVFN
-REFJSUhJR0JDRj4/REFDQj1CQkNCQUJDREM/QURJUE1HQj9BQ0NERERCQkJDREVC
-QkJCQUBMSkVCQkNEREVEQ0dFQ0NAPkJGQ0NCQkFCREJBQ0BDQUVFSFVkocHP1tzg
-4+bn6OnqRUNCSUlHQkNEQkNFREdEQj9CQT09PT1BQUJAQT89PDo8PTo7PT06OTk8
-PT9APTk8OTs9Ozw6NjY4OTo4ODc0NjY5OTY4ODc3NTMzNjUzNjU2NDQ5Nzc3NzY0
-MzQzMTY1NTU4NTU4OTg3ODo5NTQ0NDI2Njc2NTQzMzMzNDQzNTg3NTc3NjIyMzU0
-NzIxMDIyNTY4NTQ0NTY1OTU5ODY2O0M0MzU2NDMyMTEzNTQ2Mzc1NDY3OTM0MzAy
-NTM2Njc1NTY2MjU3ODY3MzU4NzY4NDU1NDI1NTU0MzUzMzM0NDQ0NzczMzQ2NzY0
-NDMzMzI4MzY6ODY1NDY1NTQ4Njc2NDU0OTg4NzM2NjY2NjY4NTY1NDQ2Njg3NzU0
-ODg3NTg2Nzc2NjU1OTYzNDMzMzk1NTU1Nzk3NjQ1NjY5NTYzNz41MTU5OjU0NDY4
-Nzc4OTg2ODg2OTc3NzUyNTUzNTY2NDU1NDY0NTc0NDUzOjk3NzY1OjUyNjY2NDQ1
-NzY1NzU1NDM0Nzc5Mzc2Njg6NjY3ODU0NTU0Mzg2NzQ1NTUzMzI0NTU1NjY3NjY1
-MjU2Njk3NjU4OTc3Nzg0NDU1NTY3NjU4ODg1MzM4OTYzMzU2MzU2NDc1NTc3MzQ0
-MzQ1Nzc0NDY2OTg2ODc7PD08Pjc3NzgzMjEzMjU1Nzc2MzUyNDQwMDEvMDQzMzY3
-ODs6Ozk4NzIzMzY5PTo+OTs7PDxFU1xbVFNjdHJlWU1ISUlJSktKTEtHRUZDQkRB
-PTo5ODQzMzMyMDEzMjM1Nz08QEFIRkhFR0lGR0VCQUBIUVxmbnd1c2dbWmRsbHN4
-fYaIhYB0a2xoaW1zdXV4gYaJi42Mh4N/fHZuX1RIQD5FUl5ia3B2fH+Ch4qOjoyN
-jIiCfXRvcHiCgH+ChIOGiYmJiIeGg4SFg4OAfXl3cm5oYVlPQz86OTk7Ojk5PD06
-ODc3OTo8PDk2OTw9PEpbZ2RaUk5bbX6Nj4R0YlVga2RNREBFREY+QT1AQUBGTU5Q
-UlFSVFZWVFhYWFlZXGBmZ2twc3JwamBWVFlhZGVmZGhmZGBcX2FnbHJ0eX18gYOG
-gYWKi4eFiImFfnRsY2FnaG50dnVya2RaWVNQT0lFQUVFQUJBQz9ERkhLTU9gbHJu
-bHaFhnxoZnmMk5CAcG97hoZ9eHiBgn1waG50dW1hXVpdYV9XTkdRYmtpVkdHU2Ro
-WkhDTVhfVkhHTk9MRUVJSkA/QUA5OTk7PT4+QkZGRD9CTVdgYVpKQUNHTVNPSEdN
-WGVoXE9JTVhYUkI9Q09XU0xTYmdeXmRvbGhveX95amlcSUI9QD8+QDw7PD47Oz08
-PD09PT1CPzw8QEJAPEA+PT0+QEA+PkBAQD4+QEJEQ0RDQj87OjtBQj9DQT5APTw+
-QkJCP0FBPz5CQUNFP0RAPj88PEBDQjw8PT47PD1BPz4/QUNCPz9AQURARUNDQUFE
-RUVCREtHRkNDQkJBPT4/Q0dGTEZGQ0NCQUE/QUlGRENFQ0JDQkJEREJAP0JBQUNC
-PT5GQENFQkVCQUA/QT9BRmeivMzX29/i5efo6ulFR0ZGRURDPUBBRUdFRERBQkNA
-RkU9PD48ODg8Ozw/PDo8PUA7Pjw7OTg6Ozk8Ojs/Ojk3OTk7PT8/Ozs3MzQ1Njs6
-Ojg1ODk7NzU0NzcyNDEyNDY3NTU4ODUzNDQ1NDIxNDQ1Njc2Nz41Ojg3NjY1Nzg1
-NjY4NjYzNDQ3NjQ1MzU1ODg3NTU0NjcyOjg2ODk6Nzg4NjYzNDQ3NzUzNTMzODc4
-NTQ1NDQ2NjUzMjI0NTU0NTU3MzEzMzIzMzQ1Njc4NzQzNDM2NDU3Njo2MzQ2Njoz
-NDQ1MTM3NjY2NDE1OTc0MjE1NzY2NzY1NDU2NTU1NDc2NjUxMzU1NjY1Nzk2NTU0
-NDQ0NjQ2Nzc4ODY6NTQ3MzM3Nzw1NDU2NzM0NzY3ODc7NDU1NzQzNDgyNDY1MzY2
-ODM1NjY0NjU4Njc5Pjc1ODc1MzY2Nzc1ODY2ODs3NTg1NjU1MjY2NTM1ODY1NTY4
-NzY6ODYyNDg1OTUzNjg4NTY0NTMzNjY2ODc4NjY1MjU3Nzc5Njg1NjU1Nzc7NTk3
-NTQ3Nzc2NTY3NDczNDQ0NzQ0MTU2NTo0NDI1Nzo0NDY1NDU0NTc3ODk5Ozg3MzU2
-MzY1NTY1ODU0NjQ3NjQ1NTc2NDg1NjIzNDU1NDg1MTQ1Nzc7PDs8PT47OzYyMjU0
-MzU2NTU1MzU0MjY5MzY2NDQ0NTc2ODk3OTo3NTU1NTU1Njc2Nzg2NDY8PT1HVl1W
-UVxrdW5cTU5LSEtKT01PTEpKSENBQkI7OjU0MzMzNDAyMTI0NzY6OzxCQkVGRERE
-R0ZCREE9P0JKUV1qcW1pYFhYYGhrc3d8gIOBdmhjYmNmbnR0dnx9gouKiYmHgH12
-bWNYS0ZAQUhTXWVrcXZ7fIKHjpCOj42KhoF5dXJ0fICGhoWIiIeGiImFgYWHhoF+
-f39+fnh0c25mXlhMQDw7PUE7PDs7PDk4NjY0ODo7Njk4Oj1BR1hhX1ZPUVZneYWH
-gnZmXGBoYlJGREBBREtHRkJBQkRMT01PVFhcWFdWWFhZV1lgZWZqbnZ2d3VwZ1xU
-VVtgZGRgYmRhYVtfZGpvcnZ8e3yChIWFiYmJi4yHiImEe3JnY2Rsb3Z2dnNoYl9g
-ZGNeVU5ISEFHRz9AQUE+Q0JAQktUW15kdYWMfm9nc4GIh311fYGFgHJvfYeLgHJm
-cn5zYllcZWJZUk9QUVhcYFxNQUhYZ2pZR0RMW2FTR0hVVUlFSE1EQENARTo5OTg5
-PDw9R05UR0VJTlVYVU1AO0BJVVxYUE5VYmhhT0M/SFBUUEdFT1xgWFprcmloZWx2
-dW5la3VrXlhIP0A+Pjw8PDs9Pjs8PDs/Pz8/PTo+Pz9ARkBAPz1AOjw+Ojs+P0BD
-REJDP0E/Pz9BQUFAPjw/QT5APz09Pj5AQD48PUA/P0I/PkFFQENBQUE9PT4+RkFC
-PkM/PT0+QUI/QUBAQ0M8PT1AQUJDQkJDP0NBQ0VDQ0NHQ0JEQz9CQkZCQkNDRENC
-QENEQ0JFQ0NCQkNDRUZDQkFCPkRCQEA+PkA/QUFBQkFARUNCRUJJa6K7zNTb4OPk
-5+fo6UVFR0hKRkJBREVER0ZEREdBQEVCQjw8O0E9Ozw+Oj06Oz4/PTo6PTw/Pzk4
-Njo9OkA9Ozw9Oj07PDo5Njc4NTU1PD8+Ojo4Njg2NzY1Njg4NTU0NDQ1NTY2Njc1
-MjY0NjU0Ojg1Nzc2OTo7NTczMjM2OzU3NTQyNTY2NzU3OTU0NTczNjc4NzY0OTU1
-ODk2NjY2NjY1NzQzMjM2NTU2NTU3Njc3Nzc1Nzk3NzIyMjUzNTg3ODI0MzY0NDM2
-NTQ1ODY2NDMzNTIyNjc1NDQ2MzIzNDMzMjUzMzQ2OTU0NjY4NTQ4NDQ1Nzg2NTY3
-NzQ3OUFGNzc2MjY0MTAyNDQ1NTY0NDM0NDQ3NDQ2Mzc3NDU1MzQ4NzU2OjU2ODYz
-MzU4Njc2NTczNDMzMzY2Nzs1NTU2Nzg2NTc4NTc1MjI1NTg3MzQxMTY0Mzc0MjQz
-NDY2OTc5NjMwMTQ0NDM0NDQ1NTQ1Njg4Ojo7Njc4Njc2Nzg4ODk3NDU0MzU2OTo4
-NjM2Nzg4NzY1ODg3Njc3NTU1OTg4NDQzNDUzNTY2NTc2NTQ1MjIyMzQ1MjQ0MzQ2
-OTU3NTQ2NzQ0NDY1NjU3OTY8OTY0NTQ2MzY1NDY1NTg2NTc3Njc0NDQzNjc1ODg6
-NTU2NDQ2MzY4Njg6Ojs8Pjk2NTM0NDQ2NDMxNDAwMTY0ODQ1NTQzNjg4Njk4NDQ4
-OTQ0NjUzNTI2Nzg4NTY2NDo9QkdSXFpVW2dzb15OSklJSUlLUVBOS0pKSERCPTs8
-ODYyNDUyNTUzNDY3Ojs4PEFCQkRGSUlERUZCPjw4OkNQWmBmZV9WVFVdZ3Bxd32A
-fXZsX1lcXmRmb3F1eX6BhYmJhoaFfnNqX1FEQj1ASE9bZW1xc3uBgoWKjY2Li4eG
-gXx3eHt+goWEgIKGgoWHhoOCh4SCf3+Af4B8fHlza2ViX1NIQTk5OTxAQkVBPDc5
-OTs9Oz08Njg/P0NNWGFkXldYXGNsdHh3dG1mamtiT0pGRURGSU1KSEZCRktOU1hc
-X19dW1pYWFhXW15kaW91d3l7eXZxbWRdXmJmZGZoZWNhZmRrcnh7fH19foOGh4eG
-hoeIioiIiYZ/e21lYmpscnNva2FhZGtxcm5mXFhSTkxKQ0E/QkNDQEFFQkdLUFpu
-gIR/c3J4gH52cHmGjouAbGh6jI6FcGZzfXBdWGZqY1NMUVhaWVRPSkpJT1leXlVO
-R0xVWU5ESFRWR0FJTkRAREI8ODg6PDo8PEJMVFpUSUJCRVFUUUdAQ0pbZGRZTk1X
-YVxVREBDTlhWTUtTZWtlW2RxdmhqaGlyc2RYYm1gWExBPD07PkZAOzw5Ozw9QD9B
-SD0/QkJBQD9BQk49Pzw9PDs9QUI6Pz8/QEI+Pj5BP0A9P0BCPj09Pz8/Pz09PkA+
-PEBCQkFDRUNCRENEP0JAPEBAPUJDQ0RAPT08PT9BQkBCQEREQUI/PkNCQEJBPkBA
-PUBBQ0VCRERGRERIRT9BQkFEQUFDQERAQ0VGRERFPkNBQEJBQ0JFQ0A/Q0RCQUFD
-QT9AQUZFQ0NGRkdIRkt7qbvI09ve4uTn6OnpSEdFSkRHRkZCQEJCQEJESENAPjw9
-PkFEPUE8QDw8Ozw8PD8/QUJEPzw+Oz08Oj07Ozs9PT5AOzo5OTY5ODc6OTk2NTU0
-NDU2ODg2NTg3Njg3OTk0MzI1NjU1NDQ2NTU3Nzc2PDo3NTM2NDQ2NDg2NjQ1NjM2
-NTg2MzY1MzQ1NTU1NTM0NjY1MzU3Nzg3NTQzNTo3NTY1NTY0NDMzMzY1NzQ1Njk1
-NTc1NTQ2NTUwMjU5Njg3NzM1NTY1NzM1NDY1Ojg1NDM0NzUyQVRGNDY5NzUzMzU0
-Njc0NjM0ODY5OTc1MjAwMTM0NzY3NTg5NjZAPjYzNDQxNDUzMzQ0Mi8zNTIzNTY0
-NjY1NTU1MjQ0NDMwMDQ2MzIxNjU1MzU3NTg8NTQ2NDQ1NTY7Njc6NjYxNDY4NzY1
-ODc0NjU0Nzc2Nzg3ODw3NzkyMzU0NDQ2OTY4OTY1NjUxMDM0MzQ1OTU3NTQ1NTY1
-Ojc0NzY5Njg2NzQ5Nzc4NDY1Njc5Ojg2ODg1NTQyNTU1NTU5NTg3NDQ0NTo0MzI0
-NTQ1Nzo2NTU3NzMyNjQxMzQ4NzY1NTY0NjU1OTg3Njg2NTU1Njk5ODY3Nzc1PDQ4
-NjI0NTY4Nzg1NDM0NjY3ODk2NDU1NDY4NzU3NTc3NDQ2NTQ2ODY2NjU2NjU0MzM2
-NDIyMjIxMTIzNTc1MzMxNjQ1MjU3Nzg8Pjc0MzQ0ODY7PDg4ODo7OTw8RFJeX1ZS
-YG5uZFNMS0pGR0lMT0xJR0VFQkA/QUA8ODU0NDYzMzU1Njc2Nzc8PUBERUZGSEZH
-R0hCOjs7Q01VXWFfW05OUl5nbXJ2eXpzal1VVFlbYWVqbXF5gISChYaAfn54dGhe
-T0U+QUFFT1lkaHJ5fH+AiIuLi4qJhoeDe3l6goSFhIeLhIKAg4KChIaEgYKGgoCC
-fnt6d3NuamReW1NGQT0/PkVIR0E5Njc3PDo4Nzs5OTpDSVVhZGhucG5qYmFgZ25y
-dXR0cV5LQkNJTkhKTktJRkdJT1RaXWFlZmZkYFtZWVhaX2Ztc3d4eXp7e3p4dnJt
-bW91c3NwbW5xdnl6fX5/foGAg4SGjYmHiYuOjYqIioeBe3FqaWpsbGhlYmRudX5+
-eHZvZl9YTk1KRUNCQkE+QEBDRUZIUGRweHZ1fYWIgHJlboGTkIFuanqIioBwaXF4
-cl9ca2ZbTU1WYmJWSkZHT1laVFFKSExPU1BJR0ZLVFFHP0tPQz5HQDo7OTo6ODc7
-PUdTWlZKPj8/RE9XU0xJTlpkYllNSEtRX1ZMRUJKWF5ZU1JhcHFhWGRuY1xjYV1j
-aF9UYGVbSUBAQkA9QDs7OTg7PD0+PUBLREFAPjs/P0A/QEJAPzs7Pj8/PUBBP0E+
-Ojw+QT48Pz5CQz5BPjs6PUE9QT0+QT49PDxBQUJCQ0ZFSEU9PT89QEBBQD9CP0FC
-REJAPjtAPj5BRENFQz5BQEA/QkE+QEVDP0FEQ0NDQ0VDQ0JFRUJESENBQUVCQUND
-REdGR0ZGREJCQ0NBQUFDQ0NHRUNBPUJFQkREQ0NFQ0NEREVERG6pvsrU2t7j5Obo
-6epFSUdISklKQ0FEQ0RCQkZCQEFBPT5AREJAPz08Pzs8QT08QEFAPj89PDw4ODo4
-Ojs5OTk3ODw5Ozw6OTk6Ojk4NjIzMzQ0NTM0Ojs3MzUzNjc2NjU8PDc1NTEzNTMz
-NDY2NjY2Nzg2NjY2NTk2Njc3NjU4NDU2NTY4NjY0NDIzNDQ0NDY1NDY2Njc3NTQ0
-NTU0OTY1NzU1NDU2NjczMTI3NTU6NjU2NDQ4Nzc3NTIzNDY1NTc1ODg2NTU1NTYz
-NjYzNDc1NTVANzVQZ1k1NDU2NjQ0MjY3Nzc2NTU1NTU2ODc0ODU1NTQyMzQ1NTc3
-NTU2MzIzNTg1NDQxMDM0MTU1NzQzNzQ0NTMyMTAyNTMzODYzMzc2NTc0Njk1NTg4
-NDU1NTY0NTYyNTY1MjIzNTUyMzM0OTs5NTI1NTQ3OTc4ODg3Nzg6Nzo0Mzc1NjY1
-ODg1Njg4Nzg3Njk2Njc2Njo5OTQ1NjU1MzQ3Njc2NzY2NzY2NjY3Nzg0NTc2NTo4
-ODg6ODg3NzQzMzY1MzY3NjU0OTY0NDU0MzQ1NjU0MzM1ODc3NjQ0NDQ0NTQ0NTc1
-NjU2Njg1ODc0NDY2NzY3OTY5NzQzNjEzNTU3NjY1NTY0NDIwNDYzMzY2NzMzNjYz
-NDU2MjM0MzQ1ODc5Ojk4ODczMjM0NjU0ODU0NTMwMzY4Njg2NDc1NDM1MzU3Njc7
-NjU0MzY1NDY7QTs5ODc7PzxCUV1fWlBaanBnV0xLSkhJSkxNTUhDREREQkE+PDo7
-NTU3NTQ0NzQ1Njg4OTw9PkBCQkNERUZHREE8NzxBSFBVWFtRT01VXGRqcHV1cnFj
-V09RV1thZmtrcHd9hIaDhYWBenNvY1RLQj0+QUtPV2BpbnV5fYKEhYiJi4yIhIB9
-eXiBh4eGiImKiYWHiISCgoWFhYeEgoKDf3l4dXNxamVfVko/PT0+QUxLRT46OTs9
-PDg4Njc5QEVNWGBocXZ7fHZrXVJVXWt4hIR5ZU9HRUhJT1JNSkdFSk1SWFlgYWhq
-aGRfW1dXX2Vna3F2d3p6enp7fXt8fH17fYB/f399fHx+fn+Ag4GAgIGBhYWHi4yK
-jY6RioaIhoWGfnRtbGlnYl5cXmZ0e32BgoB7c2tiWVFJRUJCQEJBQUBDREZJUVtg
-Ym2DkI+FdGVrfYiLgnZ2f4R+cG1wdHNoX2BkZFhRU2FpXk9EREpaZV1PRkRJU1ZN
-RUFHTktKRkNER0pEQ0RDOzg6OThBQkJAQUpWVEs/OztCTVpgWk9JT1VcWU1EQEdW
-XlVLR01cZWNbVFdmaF5NUWBdVFphXVhdZmBebWhSRT47PT89PT07PEA+PTs+QENC
-QEFBPz9BPz0/Ozs7PUA8PkBAQEFCQEBAPUBAREFBQEFFRD9AQUFAQUJCQT89QUM/
-QEBBQ0I/QURDREM+Pz1AQkNCQUNDRURFRUNAPT8/QEE/QkFBQD0+P0FBQ0JFRz8/
-PkNBQUFBRUJEQ0JBQ0FAQUREQ0JDP0BCQ0NDRkhDQkJCREZMRERFRERDQkNCPT5B
-QEBCQkNCQUZHREFIbKe+ytPa3uLk5ujo6UVFRkdGQkhHSEJHRkdBQUE+QkJFPz9C
-PTs6PUA9QUBAPD09PkA6Ozo8Ojw6Ojo6Ojs3ODg4Ozs+OTo3NTU2NjY2MzU3OTY3
-ODU3ODg3Ozk2Nzc0NjY7OTYyMjY2MzY5NjU1NTU1NjY4NDU0NDY2Njc4NjU0MzU2
-NzU1NTY1NTc0MzY0NTYzOTc0NTwzNjUzNDU3NDQ3NzQ2NTQ2NzczNTQzMzU4NzM0
-Nzc0MzQ2OTc3NjMyNDc2Nzg4MzQ1MzEyMTQzNTg0MjMzNVdQNTQ0MzQzNTUzMTY2
-Njg6NjY0MjAyMzQ2NzU1NDYyNTYzNjQzMzIwNTc4NzUzNDI1NDc2MzQ0NDU0NDg+
-MjM0Mzc3Njc6OjYzNDY0ODs3NzY3NzY0NDc1NDc1NDY1NDM2MzM1NzY0NzIzNTg4
-ODU2ODg2NjQ0NTU3NjY4NjU0MTEzNzU1Nzg1Njo6OTY1ODw6OTk4Nzc2NzU1NTM1
-ODc2NDQ1Njc1NDEwMTI3NzQzMzQzNzo4OTg3NTQ0NDM0Mzc3NDQ4Njo6NjMzNTU1
-NDU1NDQ2MzQ2MTE2NTI0MDQ1MTQ1Njc1NTM3NDU4NjY0MzUzNTQ2NDM2NTQ2NTY4
-NTY3OTUyNTU0NDM2MzU0NzY0NTM0MzI1NDQ1MzQ4Nzc3Ozo3NjY1MjIxMzQyNDM0
-NjQ1NDM2NDg5Njc2MzQ0MjUzMzc2OTk+OTU3NzY0Oj48PD47PD5FQkNMV11YUFZn
-cmtaTkdKSkpJTE9JSURAQD5BQUA7NzY1NTU1NTY2MzY1OTc3ODxBQEBBREVEREdF
-REE7O0BGT1FQTktGRU5ZYGhucXFvZltRT1FZYmZpa290e4OHi4eGgnx5bmZcUEQ/
-PT9DSk9ZXWdwcnZ9goOIiYuJiYmEgnt6fX+GiYiJhoWEhYeFhoeFh4iIhIGDgoKA
-fXp1c29qZF9bUkg/PDxDS0xFPjg4O0A8PDc4ODxARUlQWmNtd4KDfXBeUUxTZHaE
-iH1nTkM/SE1NTEtJRT5BTFFXWF5kZW1xaWNfWVtjaGtxdHh4en1+f4F+foGCgIKD
-hYOBgYSFhIGChIaHiYWFhYCHiIeLiYmLi4uOkYyMiIqFgHRsa2pmYGBfY2lye4KH
-iYV9eHFpX1ZPSEVGSkVDRERDQkRKS0tOXXGJi4h6cHV+hIB8fIWIhXZnbHZ8cmNk
-ZmFWVFhbX11VSEVHTldaVUk9Q0tZWkxCQElQSkNBRUhGRUFBRD87Oz07OTtBQj1C
-RUhLS0c9Q0ZQW2RhU0pFSk1ST0Y/RVNhZlxSUFlqaV5RTVRaWEpJVV1WUl5kYWFr
-dnF1alJHQUNBPz1AQ0RCPjo8PT4/Pj5AQD9BPzw7PDo/PEBAQ0FAP0BCQUE/QD1A
-PTxAPz5BQkNCQD9DQkI7PkA/QD5DQkFCQD5AQD5AQkNCRUNDQDtCQD48PT5DQDo8
-Qzs9Oz09PT8+QEFCQEBAPT07Pz88QkE/PUE+Q0pFR0JDQkVBQkRFQUNGR0RCPkND
-RUNHRERDQUVFRkNDRUVFRUJERUZEQkBCPz1AQUJERUhEQ0VopL7L09nd4eTm5+nq
-S0NGSERKREZKQURGRj5APj89P0FAPT0+QT1APkE9Pj46PD08PTw5OTo5PDtBPDk5
-Ojw7PDk5OT46ODk7Oj06NTY5OTo7Nzc4ODU3NTc1Njc3ODc2NzczMzQ0OTc6NzUz
-Njc3MzY3NDU1NTg3NzM2NjY5NTc1NzU2NjQ0NTU1NDQ1NDY1Njc2NTY1NjM3NzUz
-ODc3NDMzNzU1NDU1NTM1NjY0MzM2NzIyNDYyMzU4OTc1NDY1MzQzNDU0NDMyNDMz
-MzM2NzY1NTc2NTMzMzQxNDY1NTY1NDY3ODg1NDMyMDEyMzQ1NTg0NDU2MTM0NTQ2
-NjcxMTI0NTY6NTI2PTY0NjQ3NjczMjM2NjQ2ODg6NjQ2NTMzNDg5ODU2ODQ2Nzg0
-Njg5OTQzMzg5NzczNDU2MzMzNjU3ODYzMjY2NDU0NDQzNDY1NDQ2MzIxMzU1NTc1
-NDg2NzY2NzY1OTg2NzY2Mzc3NzYyNTg0Njc1NjQ0MzIyMzI0NzE0NjQ1MzY4ODk4
-NTc0NjU1NTQ2OTY2Nzc3MzY4OTQ0NTQ0MjM0NjQ1MjY2MTQyMjI1MjEzNDU3Nzg2
-NjQ4NTUzNzY2NDM0NjY1NDE0NTY3Nzk3PTk1NDM2OTU2NTY0NDM0NjY2NDM2NzQ1
-NDEyNDc3Ojs7Ojc1NTMxNjY0MzMzNTUzNjU0MzM1NTQyMTI0NDU4Nzc1Nzk2Nzo5
-ODg5Njc4PT88OTo6Ojo7QEVPWldOUF5oZl1PTEpKSUpJSUREQkBBQ0A/PTg2NDUz
-NDE1MzY4MzY3Nzg2Ozs+QkVDQ0NFRUhHQj1AQERHS0xMR0FER1NfZWdsbmhfVE1L
-VV1lamlucXp+homMioaCeXFlXE9GQD4/QkRITVRbZWxxdnt/hIeLiomJhYJ8fHt+
-gYOHiouKhoSGg4OEg4SEhIaHgoKBf4F9enh0cGxmZF5ZT0U/PT5FRD09Oj88PDw7
-OTs8PEBAQUlPWGNud3h6c2hYUFJhcH+Efm1XSUVFSUxMS0ZGRERGTVJTWWBjanRq
-Yl5bYGVvdHd6e3t+fH5/gX5/gIGFhYODgYOCg4WGhoeGh4qLioiIjoiHhoiMjYqK
-jYyOjoqKiYaGgXp1cmxpaWtsbHN5gIiJiIaDfnVtal1STEZFREREQkJDQ0ZFQ0NL
-WWhzcW51gYmIenF3iJCJcV9ndHtvYmlsYFVYY15WTExLSk1PVU9JQ0BDTVdUSENC
-Sk1FP0BJTkE9PkRBPjs8Ozk3NTpDQkJEQEZMTUZDRk1WYF5WSEE/Rk9QTERHUWJr
-Z1pPVmNnYE1ER1NXT0dRYWVbZG1tbHF6fXptUkVCQEJBOz09QUNAPDw/PTs9PjtA
-QEA/PUBAPj09Pz8/Q0A/QkNDPz9BQkA9PTs8QUBAPkBCQD8+PD49Pz08PkBAPj0/
-QUJCRERBQUJBQUNAPT4+PkA9PUA9Pj48QT5BPz48Pj4+QUE+Oz09QEBBQUZBQ0JB
-QEFBRUJBP0FCRENDRENCQEJFREVBQUNBQEdGRUFDREJCQ0REQ0FDQ0REQ0JBP0BG
-RUFAQUFFSEVDQ2Skv8rT2N7i5Obo6elERUVIRkRFQkNGQkBCQj48PT0/QkA5Ozw7
-PkI9OT5BOjs+Ozw8Ozo5Ojs8Oj06ODs8Oz0+PDg4OTk3Njc4ODc3OTg3NzU6ODk3
-NDc1NTczNTU5ODg0NjUzNjY4OTY3NTMzNjk/OTc3Nzg2NzQ1NTQ1NjU1Ozc1MzM3
-NDY1NzM0NTc0NDQ2NjQ1NTY1ODY2NjU1NDQzNDQ0ODU0MzM0NDQ1NjY0NDYzNTQz
-MzU0MzIzMjM0NDU1Njc3NTczMzc2NjU0NDIzNDQ5OjEwMDAxNTY0MzAwMzY2NTQ1
-MjM2NzY1NDI1NTM2MzQ0NDMzMjI2NzQ1MzIxMzU3NzU0NDQ2MzU3NzM9MzQ1NjQ1
-NDU1Nzc2MzQ0NjcyNTc3ODc2ODU3NTc2NTw3NTc4Mzs5Nzg2NDQ1Mzg5ODg4Qjs3
-NTU2NTU3ODU1NDU1MTI2NzQzMzM0NTQ1NTU2Nzc0NzQzNTU5ODc2NTUyNzY3NDc1
-Njg1MzQ0NTI3ODE6ODc3NjQ2ODY2NTg4Njc3ODg4OTM3OTg2NTc3OTo5NjMzNTY1
-Njg0NTY3NjcyNDc5NzY4OTU2NTg4Njg0NDY1NzY0NDQzNzQ0NDUyNDI2NjU3Njg4
-NzI0ODQ0NjY2NTQ2NjY3Ojc2Nzc1MzQ1Nzc4Nzc2Ozw7ODg4MzAxNTM0NTMzNjY2
-NTU3NjIxNDE1NjY0MzQzNTY4ODk7NjQ2NTIyNjg7PDw7PD05ODk9QUpTU01LUFpc
-V01LSUlISEhHR0VCQ0NAPjw/OTczNDIxMzUzNTY3NjY0NDc0OD1DQ0JCQkRHR0hI
-RT1BQkVKSUc/QUVKUVlhaWxqZltRSkxRXGNmaW50dnyEioqLiH93al9VSEM/Q0NH
-RUZMUlhgaG1yeX5+hoeGh4mHgIB6e4CChoSEg4eFhYaGhYKChIaFhISDhoaEg4R+
-fXh0cGtmYV5US0E8QT88ODk6PkVBPDs9PUBAPD4/P0VNVVthZmtwbmlfW11kbXV2
-bF1PRkRFSElFQ0FDQEFGTlRbW2FmbGtnY2Fmb3N7fHt7fn5+e3+BgIF+gIWHhISE
-hYSEiIeGiYuKiomJjIiKiIeFhouLjo2MiYmLjoyMiYeIhoJ+fHZ2d3Jwb3N6g4WN
-joeFfXZwbGRbUEpFQUNBQkNDQUREQ0RIUVRSVGJygod7amyAi4Z3Yml2em1jamtc
-UltqY1BFSVNXVk9HQ0RDQ0RLTUtGRUNNRT89QElKQTlASkc8OkBAPDs6OkNIQDpA
-RVBUUkhFRkxTVFFKQkJFT1lbUktMXGtuY1ZOUlpYTkVAS1ZUS1FmcWtpdXZ1dHR8
-dGhTQz4/QEFAPD48PD1CPj1CPD0/QEQ9Q0JCQEA+Ojw9QD8+PEA/PD5APj88PTs7
-QD87PT4+PT5CQ0JDQD4/Qj9AQDw9Oz4/PkJCQUNFQT48PT5AQEBAP0JBPj0+PTtB
-QEE9Pz88Pz8+Pz86Pj5APT0/QUM+QEJCQ0RCRkJCQT4+RENCQUVEQUVKR0lDQj5A
-QkBCP0NDQUFFREJDQkJDQEFBPj9EQkNFQUJFQUJAQEFEWaHAzNTa4OLk5+fp6kVG
-RT9DQUhJR0VDREBAQ0E+Q0E/PkJCPDs8PD88P0JAPzo6Pz49OTk5Ojs7ODo4OTw7
-Ozw9OTk5PDk4PDo5NTo7PDw4Nzg5Nzg4OTw3NDU1NzY4Njk7NjU2Nzk3OTg3NDQ1
-Nz06Njc3ODU2NTY6NzY4NzxOPDIzMzQzNjY1NTU4Nzc2NTUzNDY1NDQ0Nzc4NTM1
-NTY3Njc2NzY0NTY1NTg6OTQzMjIzMjM0MzY3NDMxLzIzNj42NDQ3Ojk1Nzk1NzY0
-NzY1NTU0MjM0MTQ1MjExMjU0NTU3MzY2MzE9MjM1NTk0MTMyMjMyNDQ1NDQ1NDM1
-NDY3NDI1NDM1MzUyMzQ5NDg0NTYzNTU2NTMyOTQ0NzU2NzUyNDU1OTU0MzQ1Nzc2
-MzU0NjQ1NTg5PDc4NTk3NzY1NjU6ODc0MzQ1Nzc0NjY2NDU2NTM1NDM0NDU2Nzk4
-NTc5Nzc4NjQ0NTU0NTU0NDc1ODQ1NTY0MzY2Njg2NTk3OTc2Njc1NTQ1NTQ1MjE1
-Nzk2Njc2NjY2NTQ0ODg2Nzo2NDMzNTc3NDQ2ODg5NTY1ODk0NTU3NTU1ODQ2Ozc5
-NjU3NTQ1NDU1MzUzNDU0MzIyNTUzNTc0MjM3NzY3OTc1NDI3NjQ2Mzc5NzM2NDQz
-ODY0OTk3ODk5NjY0ODQyMzMxNTQ1NjQzNDQ0NjgyNzY0NDc0MjM2Nzs6OTk3NzM1
-Nzs1ODk5Ozw9OTU3OUBBR0xQTUhHTFNSUExIRUVHS01MS0dDREE7PDo4NzU0MjEz
-MzY2NjY1NTY1NTY6QUNCQUVHSUtISEdDQzw9QUFCQkA+RU5VXGFpaWlkWEtFSFNe
-YmRpcXh9f4SJi4qHf3JfUklBPj9BQUNFRklQVmBma3B0e4GFh4eHh4aAfXx+hISE
-hoGEiImJioSDh4eFh4aFhX6FiYd+gIB+eXd0bmpjX1dRRkNAPT47Ojg9Pj4+P0JD
-Qz8/Pj06P0tRT01SW2pxdXFrY1lYXWVqaFtRRkNFREE+PkA/PD9FTVhdYmBiZGNn
-a25yeHt5e3x9f4CDgH9/fYCBgYWEgoWHhYKGh4mJh4iJioqKioiEh4aHh4eMi4qN
-joiIjo2KioiHiYWDgoGAfHZwbG53f4iNjIiGgnxxb2deVEpHQ0JBQUNEQ0U/QD1D
-Q0RGSVJkbmxlbH2LhXltb3p4bGNoZ1tWXWVcTEZOXWNXSUNDR0hHR0VDRElMRUQ/
-QEFESEQ+PUBHRUFBQj85Ojo4QERCPT1ET1hYUEQ/QERLS0tCP0VSX2NhU1JWZGhh
-UkZETVZTSENLWmFcXmZ4eWx0eHFybW1oX1JFQUQ/Pz8/Ozo9QERBPjs8PD5BPkA9
-Pj9AQTxBPjw9PT45Oj08Ozo+Pzw8PD9APDw+Pz1BPj9DRkdFQkBAQkBDQUBAPj49
-P0BCQ0NCQEBDQEA/P0FDQEA9QD5AQj89PEBBQj5BQ0JDPDtBQkRCQUI/Q0NERURD
-REREQ0FER0JAQENDQUJEQkNGQkRDRkNGQEFFRERBQUNERkRBQURCQkRCQUZCPz87
-PEBBQkdHP0RVo8PN1tvf4uPm5+roR0hIR0hEQ0VGRkVCPj8/Q0JGQT9IQT1CPzo7
-Pzw6PDo5Ozs/QEA6Oj09Pjk8OzpARz06OjU5ODo4PDs3NTk8Ojk1Nzk6Ozk4Nzg5
-OTc4OjU5NzU2ODY1NDQ1NTk3NTY1Nzg1NTY3NzQyODg2ODg3NjQ2ODc4Njo1NTQz
-NjU0NTs5ODg4NTM0MjM0MjMzMzU2NTRBSDM1MzUyMzQ1NTU0NzY1NTQyNDM1NDM0
-NzQ0MzQyMjI0NjM2NjM2NTc3ODY4NTc2Njc1NDMyNzQ4NzYzNDY2NjUzNDc2Njo1
-NTAxMDEyNDUxMTMzNjY1NDU1Njc2MzM2NzUyNDMzNTg0NjY0MzY3NjM1OTk2NDk2
-NTI0Nzg5NjIxMjM0NTY5NDQ0NTQ0NzczMC8zNjU1ODg2NjY0NTUxLzE0MzM0MzIy
-NDg1NDY1NzQ1NDI1NTU2NTMxMTE0NTUzNDY1Njc3NjQ2NDc4ODYyMjc0NzU1MjI0
-NTU4NTY2MzU3Nzc5Nzk0NTM0NDU0ODQ1NDc4NzY2NjU0NTQzNjc2PDk2NTY6OTcz
-NDc2Nzg3Njg4MzM0MjEwMjQ1NTU2NTY1MjczNzU5NjUzMDQzMzQxMTQzNjY0MjQx
-Njk3Nzk4NjQ1NzQzNzc3NTQ0NDU1NDU1ODc0Nzg7Ojo7ODY0MzM0NDIzNDA1NDU2
-NDU4OjY1NTIzMzM3NjY5PTw7Ojo2NzQyMzc2OTs8PDk4Nzo7PDxBRk1RSUdGSVFT
-TUlIR0ZGSkpISkRCPEI6NjY3NzQ1NjU3MzM2Njk4ODc3NTc8PEJARUhJSUtGR0ZE
-QD09Pz89O0BGTlZdY2VkY1xUSUhJUVlhZ21yeoCFiY6MjIR8a1tLQDs7PkJIRkRG
-Sk1TWWFnbnJ5gIaGhYWCgH98fX5/gYGCfYODhoiHhoaFh4qJh4aGiIeIhYWCg358
-enpza2dgW1VNRkI/Oz06PTxCQ0NFSUxHRUVDQz5CS1dXTUdQX292dHBjUUpNVmZr
-Z1pKQkRBQ0M9PEJCQ0VGT1tmbWdoa3J0d3Z2fH1+fYGBgoF/fYKBg4WEhIaEiYiJ
-h4eHiIqHiIuMjIqLiI2Jh4qKhouLiYiKi4uLj4uNiomHh4aHh4aEf3pxaWlye4aO
-kI6Lh355cmpjWk5GRENEQEJDQ0I/Pjw/QUBAQUNJT1xmdnp8dXh/fHBlZmlmX15i
-XFROT1VeW1FFP0JMT05CPD9FTEtDP0BER0JAQUFAQUE/PT1BPTg3OT09REE9OkJM
-VVhOQzs6P0dQUUpCRk9bZmVXTU9VX19VSUVLWFpTSlNib25jZ3J4amRvZmFoXl9g
-VkVAQD48QEA9Pj1AQ0JCQ0BCQEVCQkBBPTw+P0FAPTw7QD09PDw8Pj9CQ0Q8QDw9
-QD49PT07OTs+Q0A9Q0JBPj9BQEFAQUBBQD49Pj5BQT9FQTw+QURBPkBAQD1AQUFD
-Qjw9QENCPz0+Pz4+PkFCQkJBQ0NDRERFRUNDQkNGRUNBQ0RFQkJEQkRCRkZJQkFD
-QUBAP0BGRUZFRkZDP0FCQ0ZDQkVBP0RBQ0RCRUQ/Q16lws7W3OHj5ejn6epJSUhH
-R0ZBQEE/QEI+PUA/QEJCPkBEPUQ/PTs9PDo7Oz08PD06Oz09PD9DPT8+PDw8OTk5
-PDs6ODo4Oj07Ozc3Nzc7OTY3Ojw5OD03MjQ0NzY1NjQ4Ojg4NDU2NDU1Njk4NzU1
-NzUzNDQzNjc3NTc2NTQ2Njo7NjY0NDQ2NDQyMjM0NDQ0NzY3NjY2NTI1NTg2Mzk6
-NDc3NDEvMTM1NTY3ODg2NTYyNTU1NjY3Ojc2PDk1NDQ1ODU4NTc0MzU2MzMxOzg1
-NzYyMjIzNTM2NTc0NzY2Mzk1NTU4NjU0NjQyMTIyNDc0NTU1NDQ3OzU3Njc4Nzo1
-NjM0NjY1NzY4NTUxNDIzMzc0NzQ2NjIxMzY2NjI2NTUxNDU4NzQ2ODg5NzY2NzUy
-MjQ2NDY3NjEyMS0yMTQ2NDs2ODc4NzE0NTUzNDY3ODU0MzE0Nzk5Njg0NDUzNTk0
-MTc7ODc3NTY2Nzc4NTIzNDUzMDIyNDAxMzQ1NTY5NzY2Nzc3Njc2NzY4NDg1NjM0
-NjY5NzY1NTc0NDQ0NDU2ODg4NDQ0Njg1NDY4Njs2ODU2NDI0MTI1NDU3NjU0NjIz
-MzM1NDIyNTU0NDU3NzY1MjQzNDc0NTU2NTY1MzU3NTM0MzU3ODg2NDYzMzg1NzQ2
-MzM3Ojo5Ojo4ODM2ODMzMy80OzU1MzIzMzEyNDk1MjE0NDQ1Njg8Ojk8Ojc2NDE0
-Nzg6PD47OTo7Ozo6PD9EU1xZUUNIUllXTklKSEZHSEdDRUA/Oz88Ozc5OTI1NTI0
-MDM1NDY1NjU1NTY7PkBCQkZHR0pJSEVDQEE/PzxAQUhOVFxgYWJeVkxGQUdQWGBn
-bHN7foWLjIqIgndoU0FAPz1BREJCRkVIS1FZXmNqbXd+goKDhYWDfX57fX6Ah4iH
-hIiLhISEhIOGiImLiYeHhYSFgIGEhH16d3NsZmFhWlBIRUE8Pj9DRUJCRUlNTEpF
-RkdHQ0JKVltWS01XaHFva11MQUJKVWJlWEg+Pj0+QT9BRUJBQUpSXGlwc3Z1d3h8
-eXl6fHt8fH5+fH99f4J/f4GFh4aHh4iJi4yHh4eIiomKiIiNi4iJiImHiIqLioiI
-hoqMi4aIh4eFh4aFhIN/fXdyaWlwe4ePk5OOiIN9dm5iWE1GQ0JBQT8+QEFBPTxB
-Pj48QENDTVlka2lre4V/bmNubWRiY2ZZUVdfW1RPSUhFR09RRj47P0ZPSEJAR05I
-QDs8P0JBPz47PTw5Nzk3Nzc9RUU/PkZPVlBGPDk/RE5YWU9GSFRfZF1NRUdTXlxQ
-R0pZZmZaVmRydWdcZW9lVV5kWmJdX2diSUE9P0A/PT8+Oz49P0BDQjw6Oz9AP0E9
-PEA+Q0NDPEA/Ozo9Oz1BQkJDQDw9PUFBQD48OTs8OUFAQ0JBPj08P0BDQUZDRkVC
-Q0A+Pz9CPz89PkFCQjo+QUA9Pjw/Oz46PD9EQUM9PT09QUFCPj1BQUFAQEJGQkNA
-PURFRERERURCQENDQUJBRUdFRENDQUJCQD1BRkVHRERCQT08P0BFS0BAQT1BQ0FG
-REJDREJKaarDztfc4ePl5+jp6kdLR0lJR0pFQENCQj86QEBBP0BAOjg7PT08PEA8
-PTw9PT47Ojs7PDo9Ojs8PEA+O0A/PTs7PDk4PDk5OTk5Ozc1Nzk7Ozg1ODk4ODg2
-MzM1Nzg2NjY4ODg2NTU0NjY3Nzc2Nzc0NTY2NzQ3NzQ0NjQ0NDU2NTY1ODY0NDU1
-NDY3NjQ3ODM5NzY1NTQzNDY3ODY0NTY0NjM0NTMyMjM0Njo7ODc2OTg1NTYyNDU0
-NDQ7NjU3NjU1MjY2OTc2NDY5NjU1OjM1NDM2Nzg1NDY3NzU1NjU2NzM1MzI0NDc6
-NTQyMzc3Nzc2NzQ2NDU1Nzg7NzY1MjI3NTM0NDc2NDQ1ODMyNDMyMjQzMjI2Nzg5
-NjYzNDY5Ojc2NjY3Njg3Nzc1NTU0Njc5NzY0MzM1ODc5NzM0Njo6NzgzMzU4ODQ1
-NzkzMjQzNTczNjw6NTU1NjU0NjAzNzQ1Njc3ODQ3NzQyNDU1ODc2NjUxMjU3NzU1
-NDU6Ojc2MzU2NzYxODY2Njg3Nzk4NzY2ODQ0ODU1ODg4ODg2NDg3NjY3NTc3NDQ7
-MzU0Njc4ODc0MzY3OTUyMjQ2Njc0NjQ2NDQzNTUxMzY2NjQ1NjY3MzQ0NTc5NzU5
-NDM1NTY2NzIzNTU1NzU3Nzc2NTU2OTc3NDY3O0A7PDg4NTM0MTA2NTQzMjMzMzMz
-MzQ1MzMzMTQ1NDY2Nzk3OTs3NTQ0NDU4Ojc6Ozs7PUE8ODc8PkJQY2VaQ0VUZGFV
-TUlFQz9DQ0NCQ0E/Ojo4NzQ4NTAyNzMzNDQzNTc2Nzg3Ojo+Pz9CSEtIR0lKRUVC
-QD8/QD4/RlFXWV1fYF1WT0VFSE5YXmdsc3d7gIWIiYZ+cmNOQT9BQkNFQ0FDRUZJ
-UVZeY2pxdn2AgYSFgn18fH6BgoOFiYmGh4aJh4aCh4mKh4qLiouKiISGg4OFf396
-dXBqZWFeV05EQUFCRERDQUNERkhIRkdFSUdBP0RMVllST1BXWl5YUktGQ0FITE9J
-Pzw6OTxAREpLTk1SWmFpcnl6end3eX16eHt6fX16fH5+goSEgYGBgoOEhoaIiIiK
-i4iJi4qIh4iKi4yJi4mJiIqLjI6RioaIiomIh4uLh4eGh4WFg4ODfntzaWlxgoiN
-lJGOhX97cmxnW1FKR0NBP0BBQEA7Ozg6PD1DQkNCSVBTU1twgnxsZW5uYmFqZldT
-YWdbTUdHSUpKRUJCQkJFSERAP0JKTkY9PEVLRUE7PTw7Njg4Nzo7Oj1ESUZCQ0pO
-TEtHQz9GU11fVkpHSVBaWU9DQklaYVVNU19rb2haXmlrXlFRX2JUVWNcXFtfaGNK
-QD4/QD8/PTtAPTo9PDo9Pj09Ojs+Pz4+QEA8PT4+P0E8PDg6Oz0+QEJDRT9AQj85
-Nzs9PkRCPkBBQ0JCQ0JCQkM/P0JBQ0JCQUA8QENCP0BAQEJDPzw7Oz9APkFCOz9C
-PkA/QkFAQT9BP0FAP0NAPz9EQkNAQURFQ0ZCQT9CQkBAQENAQERFQ0BCQ0FBP0FC
-Q0lCQ0JCQkJEREJAPDs8Pz4/Q0NFRUFDQUBFQUZlqsTP19vg4+Xo6OnqR0RESktH
-RUVBQD88PT5BREM/PUE8PDo8PUE+Pj47Ozk/PTs5PEA8PT87Oj07Ojs8PT89PT46
-Nzc4Ozk3ODM2PTQzNDg4NTc2ODk6ODc2NDQ0Njc4OTc5ODg1Nzc0Nzc2Mzk1ODc3
-OTg4MzU2NTQyMzQ0NzU3NTU1NTM0Mzc1NTs5NzUzNDQ1NTY0NDQ3NDU2MzQzNDY0
-NDU2NDY1MTU0NDY5PDc2NjY2NTc0MzU2MzM0NjYzMzg2Nzg2NjMyNzczMjU7ODM0
-NzY1Nzk1MzU4NzY0NTU0NDMxNTU1NDc2Nzg1NTU0MjY1Njg2NTY1NTQ4NzU0MjE0
-NTUyNDY4NzMzMzIxNjQ2ODg3Nzg2ODc0Mzc3Njc0NDQzMTIzNzc1MzU1NzQ3OjY1
-NTU0NTI2NTQ4PDg1OTkzNjc2NjY2NDI1NjU0NTUzNTc4ODY0NjY1OTc4OTc3NTg2
-ODc2OjozNDczMjQ2NTU1NDc2NDM1Nzg1NjY6ODg2ODk7ODY2NTY0NTY0NjY4ODY0
-NDU3NTQ3NzUxMjQ3Nzk3NTQ1OTY3ODY5ODk1NDY5NjU0NTM2NTM2Nzc1Nzc2NDU2
-NDQ2NjY6NTY1MzQ4NDc2NTY1Njc0NzU0NjU0NjY1MzM0MzAzNDY1MzQzNjY3NTU3
-Njo8Ojg5NjQ2NjE0Mjc2Mzg0MDMyMTM1NDcyMjE0Njg3ODo1Nzs2OTg2NTU2NzU0
-NTU4Oj0/Pjo8ODo9Pk1ha2BGPEhdZl1RS0dEQ0RDRUNCPjw9PTc2Njc1NjU0Mjcy
-MzMyNDY3NTQ4NT05Pj5BRERER0hJRUdBP0BAOj1ESlVYW2BcWVJIQ0FHT1dham1y
-en59gYWIhn9zY0xCQENGRkRDQkRFR09XX2Jmb3F2e3+CgH9+fHd4fICCg4aHhIiH
-hoiJiYiFiYiJi4uMi4uJhomHiIiGgnx3c21rZF5aVExGRklNSkZCQUBBPz9AQD9E
-QT09O0FHVVxVTUhISEhPTk5JQz8+Ozo3Nzk+REtSV1phZWhucHN6f316d3h4fHt7
-enp8fX58fHyBgYGBgIKGg4SGiImJh4iIiY6Qi4mKio6NjJCNiYiIiYuKio2Ki4yI
-iYuJh4aJiIaIh4iEgn9+fnxza297gpCTmZWOhn56dXBqXlRKRUNBQEJBQTo4OTw8
-QD88Ojs4QEBFTWZ6fm5la2lhYnFqVVVoZlpLSE5ST0lDQkZISEVEQkBAR0tMRTs9
-R0lDPTk/QDw4NDY3Njk5OkRLSUI7PEZMTExGQ0hRXmJiVEVDRE9XU0dBSFhpaV9U
-XWxwaVpQVmBeUExWZV9WX11laWhtaU5DQ0A+PT8/PkA+PD09PEJCQDw7Ozw9PUFA
-PUI5PkNBREJBPD5DQT8+QD5AQEBDQT5APj4+QD06PD1ARERDRT9CRUVEQTw8QkJB
-Pj8+Pjo9PT89QDxFREE+PUNEREFCQ0RBQUE9Oj09QkVDQUREREJDQkREQ0JAQT1C
-REI+Oz8+PkE/QUNEQUFCQ0JARUNDQ0JCRkRCQ0BERUNFQ0FAQEFAPj9AQkJDQkVC
-QURERmywxc/X3N/j5ujo6utFQ0JERUdGRkJDQkBBPzs8PT88PT07PEFAQz49Pjo5
-Ojo4PDs4Pj07Ozw5Ojk9PTk7Ozs4PTo6Oj1BPDY6Ozc3ODk3NTY3ODc4Njc7Ozo6
-Nzk7ODg5ODc7ODg3ODg6OTg4NTc4NzU3NjQ2NTY3MTQ0NTUyNTQ0NDM0MjczNjc0
-NTU1Nzo1NDI0NDUzMzY2NTQ2Njc1MzI0NTY2NjY2NjM1NDY4ODY2NTY0NTMxNjY3
-MzUzNTQ2NjU4OTY4NzY0Mjc4MzEzNDM0NjU2NzQyMzU1MzQ1NTUxMjU0NTQzMzc6
-ODczNDY1NTY2NTU2ODYzNDM1NDMzMjIyMjExMzU2NDM0NjU0MzI2NzY3NjM2ODc2
-NDU2NDg3ODYyNjU3MjEzNDU1NTY1NjM4OTY1NTQ1Njc0NTk2MzY0NDY4NjQ0NTQ1
-NDY4NzUzNDU1NjY3NjM2NDQyMzY0Nzg4ODY0NTM0ODU0NzU3NTIxMzYzNDQ1NTU4
-NjM1NTQ2NzY0NTY1MjMzNDY6OTo3NDQ4NTU2NjU2NzY5NTQ0NjY1NTQ4ODw5NzU2
-Ojk5ODk1NDM1NDM0MzY4MzM3PTc0OTg1NTIzNzg1MzQ0NDU0MzUyMjMyNDQ4NjU0
-NjY1NDQ1NjQ0NTQ4NDQ1NTUyNTU4NjY2MjY5OTUzNjY3NzUzMzQ2NDUzNjQzMTI1
-NTY0NTM0NDY6OTs4Ozk5ODY4NzU2MzQ2Njk7Pz8+PTw5OT1FTF5oYUtAQVdpYlBI
-R0dHR0VBQkBAQD85ODc1NDMzNjI1NTY2ODg2MzY0NDMyNTpBP0E/QUNFR0dJQUA+
-OjU3QkFIUlpbXVlVT0lFREZPV2BqcHZ6fYCChYeEfHBfTEBBREZHR0hDRUhKUVdd
-ZmttdHp8fnx9fX96eXp9gH+ChIGFhoWGiIiJj42KiouKiouJiYeFhYSHiIZ/fnp2
-cmxmXlpSSkNFTFFSS0hEPj06Ojs3ODw7PDk4OEJPWltRRkFES1NbXVZKQDs9ODk6
-PEVSXGBjaGttcXJ0dnl4e3l6fHx9fHt4fn14fH57fH5+fX6Eg4GFhYeGiImJioiI
-i4uGhImMjI6OiouMiY2KiYiLi42MjoqKiIaHiIiLiYqHhoWEg4GAf3x1c3V7hY+a
-mpWOhIB4dnJqYFRNRUFDRT48PTw7Pj0/PTo4Ozs8PD1EVmlwbW1sZ2BlbWZXVmJk
-WE1IVV5QREFHUVdKRUFGSEpIRENDPUFGRz87P0JDOjU3Ojc2ODg5QklKQDs8PEVR
-V1BLREhVXl9TR0JASFVaUUdKWWpxa1pUWmJkV0lMWFtUS1FmamJramxxb2hhTkJD
-QUI8PEFBQT0+PDs9PkBEQTo8PEA/PUBCPUA+PkJAQkA+Pj5DQj08PT0+P0FAOjs7
-P0FBRUI+QUFBQEFCQ0RCQkA/QEJCPj0+PTs+Qj0+QEFAP0JCQkJFPz89QEFCQkJB
-QD5CP0FBQkE+PkNEQUFBQEI/QkZFREVEQkNEQEFAPEBDRkRBQkREQkNFRUJDREVD
-QEBDQkBBQEJEQD5BQkNGRERCRURDQkNFRElLfLTF0Nje4uTm6Ojp6UtGRj9CQkNA
-PkJCQUE+Ojw7PDxAPDs5PT1DQ0E+Ojg6OTo7Pj48Oj45Ojs6Ojo6Pzs7PkNAPj07
-PDo8OTk5Nj05OTY2NTY1NzU3Njk3Nz04Njk6NzY2OTg0NTY3Nzc4ODY0Njc2ODg4
-ODg4NjY5NzU4NDY1ODY1MjEwPTkzMzIzNTQ0MzM1NDQ1MDQzNDY2NjY0MzI0ODg1
-NTY3NTEzNzY2NTc1MzM4Nzc1NTg0MjY1Nzc5NTU6OTc4NDc3NTUzMDM0NTE0MTMy
-NTU2NDM0MjIyMjI4NDQ5NzU3OTg2NzU2MjQ0MzQzNDMzMzU0NTc0NDU4NjU2NDIx
-MzUyMjM0NDU2NjIyMzQ1MzMwMTYzNDQyMzM0NTQ4MzMyNTY0MTM0MTUyNzY1NTIx
-MjIyNDg1NTY3NzUzMjk1ODI1NzU1NDIzMjQzMjU1NDU1NTg2NzQyNDEzMzU0Njc0
-NDc0NDc5NjE0MzE1MzYxMzI0MTM1NjY3OTc2MzU0NTU1NDU3OTU1Nzg2Njg3NDc4
-NDQyMzg2Nzg2NTU3NzQ0NTU5ODc4NjQ3NTY0NTU2NzQzNzg1NTY0NDQ4NDIyNTc2
-ODY0MTAzMTE0NDQzMTAxMjc2Nzc5MzU0NTg1NDg0NDc6ODY1NjY1NDE0NTY1MzQz
-Pzs7Ojo0NzU1NTMzMzMzMzQzNjgyMjQ1NTQ1ODM3NDY8Ozo3Ojs6NzY3NTY1NTU3
-ODo7Ojs8QDw5Oz9CVGZjVURBTmJkV0lER0hFQkE/QUA/Ozc3Nzg2ODc1NjQ2NzM0
-ODo1NTE0MjQzNDg7QUBBQEBDQ0VFRUE8Ojg8Q0xUWl1dWVRMQUFDSE9UXmdtdHp8
-gIiIhYN+cWBPRUZIRkZHQ0JCR05UWFtgZ21wd3h+fHp7enl4e3yAgH+ChYSDg4WF
-iYuIiYiGh4mIiomLiYqIiIiHhoN9fHZxa2RgW1ZORERJT1BOTkhAPjw6ODk4OUA+
-OTk2O0dWXFpQUFVeZ2tubF1QQjw4O0NKWGBma25tbXJzdXZ6enZ4eHt+e3l+fXx7
-fnp7foGEgn1+f4aIhIWFg4WIiYiIhIeHi4iKi4yJio6OioyKjIeJiYiHjI+MiYmF
-g4eMjIqMiYWChISDg4GEgXpva3J8iZGXmJWPh4B5cm9qX1VNSERAPDs6OTo9QTw7
-Oj49QDw5PD5GU2Fobm9pZ2xpYFxdYl1TT1FTU0tAQEtVVks/PUNPTUM/QEZIST8/
-QUE/Pjw4OTo5Ojo3Nzw9R0pEPDxERlVgXVNIRUVMVFZLQ0FEU15fWE9VZXBsXlJM
-TldXSERNXWBZXW92cW9ubm9oYVhJPzxAQD0/PDw9PT8+QkFBQD48Pz49PkFAPj88
-Pj08Oz0+PD89P0RBP0A9QT48PTs+QEJBQ0FBPj88QUFAQEQ/PkJCQ0JBQD48PEBC
-PjtCQj0/Pj9CQkJGUE1BPD0/Pzo8QEVDQEA/Q0BAQERBRENCPj1BQEA+Q0NGQD1B
-QkFCQkNEQUVEQ0VEREVFRkJDRUM8QEBBQ0FBQEBEQURFRUNCQD1BRENEP0RAQ0dI
-SEeDscXQ2Nzh5ebn6errRENDQUI8QUI/P0M+PT1BPDs5OjxBPTw4PUFKPjo6Ozw7
-PT1AOzw6Ojs8PTw+Ojk8PT0/PTs8Ozw4OTg4ODc4ODk0NTc0NTM3NDQ3Njg6NTYz
-Njk2NTc2NjU1NTU0NjY4Oz43ODk3ODo3NTk1MzU0MjQ0NDY2NTc3NDE1MzI0MzM0
-MjExMjMxMjQ2NTU0NTU2ODY1MzUzNDUzMzQxNzQ0MjQ2Mzc1NjY0NTU1NzU1Mzc0
-Njg2NjY1NDc1Nzc1NTQ2ODc3NTM1Mi80Njc2NDY3NTU1MDY4PDs7ODc3Nzo2MzI1
-NTU1MzQzMjAzNDY1NTc4ODc4NzM1NTQzNzY1MjM3NTM5OTU1NjM0NjQzNDU0MjMz
-NDIzNTQ1NDY2NDM4MzY3NzczNjU0NTM0NDIwMzQ1NDU0MTIzNDU2NTc4NzUzMzM2
-NTQ0Nzc0NTg1MzM2OjMzNDQ0NDQ0NTY3Njk3NjQ1NzQwMjIyNDU1MzY1NTMzNDU1
-NzYyNjM1NDQ3NjY1NTc2Mzc1Njo4Nzc3NTc1ODU3NjM3NDY1NTM2NDY3NjYyNjQz
-MzQ3NzY2Njk0NDQ1Nzk2NDQxMjMzMzMzNTQzMjUxNDExMzM1NjYzNTYzNjc4NDI3
-NTc3NDIzMzU3ODg4NzY1NTQ1MzU0MzY3Ozs9OTc2MzUzNTMzMTI0MjAxNDI0NjM0
-NjY1NjQ1MjU3Njk3Nzg4NDQ2MzQ4NjQ1OTk5Ozs7QT47OTtNX2JZSUNNXWRWSUZG
-RkRCPkA+PD86ODY2NDQ2ODY1NzY3NjM1NTM0Njg2Njk4OT4/PT0/QTw+QkZDPzw7
-OztDSlNbW1pZUkhCQD9EUFVfZ29yeYKEhYiKg3xyYU1ERkZHRUVFRUVHTVZZXWNo
-bHB1eHx9fnh5d3x/f4GAe3+Eg4KChYSIiIaIiIeJiYiLi4uLjYyMiYmEhIF/enRw
-bGhiWlNKSUpMSk1RSkY+PD09PDw7QENFRT4/PUNRVV5ndnx+e3BrZFlMQkNITFZe
-ZWhrb25xc3V4e3x7fXx5fHp4eHp6eX1+e3x/foB9fH+DhoOAhoiHhIeJiImHhoaI
-ioqOjYuMjo2Li4mLiomIiIeLjIqIh4aFhYmNjYyHhIOGg4KEhYODfXZtaG56hpOW
-l5SOh355dG9pY1lOQ0FCP0NBODg8ODY3ODg5PDs8Oj5CTF5tcWdnbWleYWhiVE5U
-VlFHSEVFSlJOSENCS1BNQDlDTE5DPT1DTEE8Ozw8Ozo3Njg4OTpAQ0NAQT9FUlxi
-WExEQENMVFFHQEZTY2lgVFFZZGZcSkJDT1VOSVFlcWxja3dvamhlY2FgXEc9PT1A
-QUBAPD1APTw/Pz09Pjw8PD8/QEFAQD47Pj49Pz0+QkNBQkBAPz48QUE+P0JBQkBB
-Pjw+PUFCQEFCQD9CQUBDQUQ/PD08Pz1CQkFAQUJEQURDP0BGRkE/QkNCPjw/QUJB
-QEE9Pj9FPz9BQEI+Pj5ERkREQ0M/QEJAPj9DQkBHQ0NFQ0NEQ0JERUNAQz8/QkRE
-RkdGQkJBQUZCREZCRENHSEJGQkJCREVESoiyxNDY3eLl5ujp6upDREJJREZCQUJE
-RUE/QEM+Ojk6Pz46Oj49PTw6Oz1AOzs4Ojs6Ojw+PT08ODo5ODo7Ozw5PDc8Ozo4
-ODk4OTg1ODs2Mzc0NTc4NTU0NDM3NzY1NjM0NTM4NjQzNTM0NDc6Ojg3Nzg8OFE1
-ODM2NDY3NDU0MzU2ODc2NDM1ODI1NDEvLzEyMjQ0MDQ1NTQ1NDQ3NjU1MjU3NTIz
-Ozg4NTU2NDM2NTU3Nzc1ODQ3Nzc1Njk1NjQ1ODQ1ODk4QD05OTc2NjU2Nzg0NDIw
-NDc4NTY0NTU3NDY4NzQzMjI0OTM0NDIzMTI0NDMzMzEzMzUzNDQyMjMzNDMxMjQ0
-NDU4NTU2NDg1MzU1NTQ0NDIzNTU2NzY4NTQzMzM4NjU2NDQ0NjQ2MzU2NDI1NDY1
-NDIzMzM0NDQ0MzU0NTU1NzYzNTU2NTc1NjgzNTY0NTc1NjQzODU2NTY3ODQ1NzM0
-MzMyMTUzNzU1NTc1NDIwMzM2NTQ0NDM0NjQzNTUzNTQzMzMyMTM1NDQ1NzU2NzQ1
-NDM0NTU0NDQ2NjM0NzU1NTQ2NDU0NzM3MzY2NTQzMzQyNTQ0NjY1NjE0NDY2NjY1
-Njc2Nzc1NjY0NjQ1NzQ2NTY0NTMzNDg2MzY2MjMzNDU1MzQ1NDY0MzU3NTI3NjM2
-NzY2NzM1NDM0NjU0NjQ1NDM0NTMyNDI0NDM1MzY4Nzg4Ojo3ODY0MzQ0NDQ5ODc8
-PkJAP0FCOTM2OEZWYF9USk9bYVlMRkNFQ0FDQj88Ozs5OTg1Njc2NTYzMzIxMzEy
-MzM0ODY2Njc4Ojk5Oz1BQUFCRkVCPj07O0NKUFddWFROSUI+QUVNW2Fmb3N6fYOI
-iYiDfnNjUEdGRUhIRUNBQkdMU1lfY2hvc3Z4e3l6e3l2eX5/f4CCgYF/goWFhIaI
-iomGi4yOjoyMi42LjY2NjoiBf356d3ZvaWVhV1BPSklJSUpHREBAREVDPz1APUBC
-Pz08O0BHWWp4hISCdmpkX1hPS1JYYWNla2xvcnJ1c3Z4d3p6fHx6eXt7e359e3x/
-fH5/gIWCgoeChIqIiYaIh4iKiomIiIqNiouMiIeLjYyLjIuHiYiLioeHh4aIhoiK
-iIiGhYWFhoOBg4GFgoB/fXZsZ254iJOXmJKNh4F5cm5pYllNRkJDQz85Nzo5OTs6
-OTg6PDs6PkNDUGZuZGdxaFxibGNRT1RSSURESEhFQkREQ0dMTkg/PkZPSUE9PkhI
-Pjo6PTo4Njc3NDQ4NzlBR0ZEQURIUVRPS0E/PkhSXFpOSU1bZWZWS0xYXlpQRUZP
-XmJdWGZ3d2hncm9lW1tbW1xgUkVDQj48Pj1APj09PDxAPjw7Ojw9Pz9AQEBAQEJB
-PTw9PTw+QkFCQD8+QEBAPkJDQUI8QEFAQD8+QEFBPDo9REE+Pj9AQUBAQD9CQEJA
-QEBDQUJBPj09PUNERUE+P0FCQ0I+PkBDQ0M+PT1CRURBPj9BPD5BRERJQEJAQUE+
-P0FDQ0NBREVFR0RDQD1BQ0NCRT9DQD4/QENBP0A/PUBBPz4+QkNFQkFGRzs9QERN
-l6rD0Nfe4ePl5+jp60RHQ0JIQkFAPkNHTElCPT09QEA8Ozw+Pz04OD9BPDw+Oz5A
-QDs7PTg4Ojw7ODk+Ojc5PDw7Ojs7PDo6ODk4Ojc2Ojg6PDw4NzU1NzU3OjU3NTUz
-NDY3OTc4Nzc2NjQ1NTo4Nzc1Njc4Ojk3ODY1Nzs3NTIzNzc2Njg1ODUzNTM0MjYv
-MTQ4NTI0MzY1NDU2OTg0MzQ2NDM3NjU0NTg1NjUzNTk1Njc5NzY2NDc2NjM3NTU1
-NTc7OTc3Njg6NTU5NTY2NjU0Njo3NjU0NDU1MzM1NjY2NTY2PDc2NzIzNTMyMjM2
-NDQ0ODUzNDAyMzQ0MjIyMjQ0NTQzMzQyMzUyMTQ1MzE0NjQ1NjU2NTU0MjU2NTk2
-MzIzMzY0MzU3NTg2NDM3NjEzOTU2NzY2MjQyMzIyOzUyMjU2ODg2NDQ1MzM1Njc0
-NDY0NTU1NDY3NTkyNjc4Njc1NTY1NjU0NDE0NjQ2Njg2Nzc0MzY0NDM1NzUyNTYz
-NTQ2NDYzMjM0NTQ3NjU3NTU2NzU1Nzk3NjU1NDY2NDM0NTIzNzo0NjMzNDY3NjU0
-NDIyNDMzMzM0Mzg4NjQyNDU1MzQ1MzQ2Njc2NjI2OTU0MTU0NTMxMzQ0MzU1MzY4
-NjY0NTc2ODg3NjMyMjMxMDE0Ozk1MjY2Njg3MzI1NTM0NzY3NTk6NTQ2NDQ1NDU4
-NjE1NDU4ODw6Ozw6OzQ0NDQ0NTU1NTk6QEJDPjw3ODg4OkZVYlpUW2tzaF5NSEhH
-R0JBPz9APTo4ODM0Mzg4MzQ0MDQ3NTU0NDc1NTg1Njc5PD4+PT5BREVAREZGQTw8
-PURLUldXUktFQTw9RE9XXWdtcnp/goeJh4N8cGFVTEdISUhFR0RERUpQVVxjZ2du
-dHZ5fn17d3t6e32AfYCDgoWCgYKFhYuMi4iGho6Qjo2MjZCLi4yOi4mDgHt5dXBs
-ZmJdVU1JQzs7PUA6Oj9CQkA9Pzo8Ozo7Ozs5NzpDUWBwdHVycGlhXFhVWmFmaWts
-cXBxc3NydnZ2dnh6eXh6fHt7fnt9gYJ/fn1/f4B/goKEh4eHhoaCiImLiYiLioyO
-ipSOhYmIi42LiImIioqKi4mJiomIiIeGhIWEhIaHg4KGhIOBfX6AfHNlXmp8jJSX
-lJCNg312cW1oYVdQSEVDQjo3PDs8Qj0+Oz08OTg2OTxKXWZjZWxlVl9sZFJPVU5G
-RktTSkFAQ0dJSUlHQzw/R0dDPDxCR0Q/Ojg8Ozk4Ozg3Njc3Nz9GT01DPUFITE9N
-RUFDRlVlZ1xPTFddXlZLRUpVXFlQTlZncGphZXJ0Z1tfYlZTVlhgaWxVRUE/Oz0+
-PUA+PD5AQD87P0E/PDs8Ojk7Ozs/PEFAQD08Oz87OjxBPj0/Pjs+PT48PUBFQD0+
-PEFCPT0+QkE/Qj9BPT5AQUNAP0FGQUBDR0VCPT1BQENFQEJBPD5AP0E+P0JCQEFB
-QkBAQUFCQURCQEA/QUBBQ1NFPj5BQEVCQj9AQUBBPkNBQUBCRkNAPj9EQkFCQUA/
-QkA/P0I+Qj49P0JFQkNFRERFRUJDQ0uEnMLQ2d3i5Obn6OrqQT9FR0hGR0g7PD9A
-SEE8PEI5Pz8+Pj05OTs5Ojs4Pjw5Ojw5PT08ODU5OTk2Nzg4Oj08Ojs7Oj0+Ojs4
-Ojg5OTgzOTg1Nzc1Mzc2Nzg2MjM2NjQ4NzY5Ojk1NDY5ODYzNjQ0NTY2Mzc4OjY4
-Nzc2NzY2NzQ0NDQ0NTQ1OjY2NjUxMzc1NDQzNTY0NDQzNzM0MjczNjQ3NTY0NjY2
-NzY2NjY3MzQ3NDQ2ODY3OTg3NzY2NzQ2NTY5NjY5NDI1NDQ3NjU2ODU0Nzk3NzU0
-MjM0NDQ2NDI1MzU6Njc3NTY4NjQzMDEzMzQ1MjE1NTc0MzIyNDQ0NDU3NjUzMzQz
-NTo4MTQ2NjY1MjEyMjIxMzc2MzM0NDQzMjI1NTc2Njc2NzQyMjQ3ODQzNTU1NTY4
-NTY2MzQyMjAyMzQ2Njc3NDY1MzY2Njc3NjMzNTc4NjUzMjUzMjU0MTEzNTg8NTU0
-NTM0NDY4NzY0NjY0MzIyMzU3MzQwNDQ2NTUzMjIyOjM0NDc4NzY2OTY3NjY2ODc1
-NTU1OTQ2OTg4NTk4NTczNDMyMzM0MzI0MzI0MzE0NDUzNTk2MzM0NjU0NDU0NDU1
-NjY1Njc3NDU2Nzc3NjQ0NDMyNDQ1NjgxMzU3NDI0NDY3NTg0MTAzNTQ0NjYzNDMz
-NjQ2NDE1NjY0MzY3Nzc1MzQ4RjU1NjYwNDU0NTY2Ojo4Ozs1OTc2NTY1MzM1Njw9
-QkM+PDs8PDg5QEdUV1ltho2Id1tJSENDQ0FAP0E8Njw1NjU2Nzc1MzEzNDQ3NDY1
-NDk3MzM2ODk7OTk7PkFCRUNERkVBPT0+QUlPUVJSTEI9O0BETFNbZm52en+GiYqD
-gXhtYFRPTUlKSUhFRkVISU1SWmFmaW10d3l5eXl6d3p9gISAgIGChYSGhYWFh4iK
-iYiJh42RkZCMjZCMjIuJiIWGf3p1cG9pY1xVTEc8OTk6OTY4Ojw7Ozg4ODk7Ozk6
-Njk4Ojw9RVBbYWReW1RSWl1gZGprbm1tdHRzcXNzdnZzeXp5ent8e3p7fn99gIB+
-fX+CfX5/f4CCg4OGiIiGhomJh4qMioyMh4aGhoqHh4iLiYmJiYiIiYqLiIiGhYeG
-hYaEgIOFgoOEgYKBf359eGhdYXaHkJGTjoWDgX93cWxpYFdNSEVBPDo6Ojs6Ozk9
-QDs9Ozk4QEFLVV1kZ2BXXWRdUlBRSkhMU1BEOT5GS0xHQ0RDSUlGRUFCSElEQz4+
-Pz06NjY3PTc2OTs3OUZVUktAPUJJTk9NRUNJUl5lYlRIRkxSU0xEQ1FibGVbXGZ1
-c2RaYGtdTVNdVldcX2h2dVtGQEBCPz09PUBAPEFAPT4+Oz0+PTo5Ojs9QUBAP0RB
-Pz07PDw5Oj0+QD0/Pz5APD8+QUM+Oz0+PT49PUM+Pj08QkI+P0FAPz0+R0ZFQURD
-Q0RBQEJDQkBEQ0FCPjw6PT09PUA9PT4+RkZCQURCQUBDQUFBQT89PTo6Pz9DRkQ/
-PT1AQUBEQ0ZDRkVBQkBBQkFCQ0FFRUFEQUJCRUNCPTw/PT5CQUJERERKRkVFSoqz
-xdHZ3uHk5ejo6+pGQkRCRkhHRUA/Q0VAQ0I7Ozo2Ozo7QTs8Ojw8Ojk7Pjs8Oz0/
-Ozs9PDs4ODk4NTY4Ojg5OTk8Pj05OTk5ODY4ODk3Ojg2NDQyNDU5OTg3OzQ4OTU2
-MzU3NDU2Njg1Nzg2NjU1NzY4OTk2NzQ2ODY0NDU1OTYyNTQ2NDMzMzY1NjUwNDY0
-MjY2Njc3MzY4NDM2NDQ1NzY0MjQyMjQzNDM2Njc4NDQ2NzU3OTg2NTU5NjY2NjU2
-NDg2NDQzNDQzMjM1Mzk0NDI1MzQ0NDc1NDIzNzEzMTMzNTY2NjQ3NjMxMzUzMjA0
-MzIzNzY1MzI0MjU1ODU1NDQzNTU1OTs4NDM3NjQ0MzQzMzMzNDc2NDY1MDEyMzI1
-NjIyMjQ0ODU5OTYzMzY2NTMzNDQ1NjY3NDM3NjYzNzUzNTQ0NDUzMjY5NTEyMzEw
-NjY0MjQ0NTY3NjQ2NDMxMzExNDU0MjExNDU1MzI0MjQ0Mzc3NzQ3Njc1NDU4ODg3
-NDM2OjM0MzM0NTc4Nzk2NzY2NjMzNTc2OTY1ODo1OjQ0NTg1MzQ1MzMzMTQ1NjY3
-OTU1Mzg5ODY3MTIzMzY0NzY3NTczMzM1NDY2Njk4NTo4Rzg0NT01Nzk3MjI2NDMz
-NTQzMDUxMzM0MzQ2NTIyNDUyMzM2NjUzNjQ2NzQzNjk4NDMxMzUzNjw7OTs4Nzk1
-NjY3Njg3OTw+PDg3OTg1NjYzNTY4Nzk7PkI8OTo7Ojs4PERHVGyOn5yKaVBHREJC
-QT9DQkA8ODc3NjQzNDUzNDMyNTQ1NDQ0NzQ3NzU7PjY6ODo+Q0VESEhFRUM+PUBB
-RElSVFBJRDw8PEZKVFleZnN4gIeGhn99d2tdUk5MS0lISUZFRUVJSk5UWGBnaXR2
-d3h5end0fH1/f4KDhISChoeGhoaGioqKi4iLjoyLjY+Pj42Ki4iFiIWAe3dzcWxo
-ZVhOQztCOTk5PTo3Ojc4Nzc0Nzg4NTY1NDg7NjY5PUFCQ0NBR1JXXmRpam1ucHBy
-cnJxcXJyc3N1eHh5enl7eHh3eXp6foCAgoOEgYCBgIKFhIaEhYaFhoqIh4mGiYyO
-j4iIiYmKjoyIi4mLiYWHiIqKiIaDhoOEhYWGhYSDgX9/fYB9e3t7cWRgdIKOkpSU
-kIyBfHdwbWxoXE9KRkA9QTs7OTk4Njg3OTk4Nzg8PT1FVWBiXWVlYFhSUk5NTU9N
-RkA/QkhNSEI/Q09QSkA/RU1MRz45PTw8PDo4NTZBOTY2Ojw8QU5PSkM8OkBMU1lT
-SkRLVVxcU0pDRkxSUU1LVWdycGNdYWtrYlJRXlxNTlpaYWhscXBuVkZAPkA+QUE8
-Ojs8PzxAPj09P0BAQD07PT4+Pj49P0BBPT8+PkE9PT8+PTs6PDo8PD49QEJBPD5A
-P0A+Pj9AQD1AP0NCQkE8PUFAQ0A9Oz0+Pz1APj5AQkNCP0BCREJAQj5APD08O0BC
-Q0BAQkJEPUZCQkJDQTw7RkRBQEA+PkFAQEFBQENDQ0VFQEFEQ0NFQkBCRUNDREJD
-QD08Oj0/REJEP0FBQkVERUJDQklSea7F0Nje4eTl5+jq6kVCQEBASEZDREJBPjs6
-OTg5O0BDQj5BPD06OTs+OTg5Ojw8PDg3ODk7Ojk5Nzg3PDg4Ojk4OTk3Ozw5OTw5
-NzY2NjU3NTQzNDg0MjQ3OTk5Ozo4OjMzMjY6NzU2ODg3NTc4NDM1NjY2OTc0NTYy
-NDM0NDQ1NTUzODc2NzYzMzY2NTUyMzQ1NTU3NTU1MzY2NDU2NDU0NDYyMzQyMzUz
-NTk8NTUzNDIzMzY3NzQ3ODY4NjU1NjQ4Nzk2NzU2NTQ1MjIzMzQ2NDw3NjI0NTQ2
-Njg0NzMzMjQ1MzU1NTQyMjY1MjMzMjUzNDQ1MzU1NDM0Mjs1NTYzMjQ2Njc3Pzc6
-NjY3OjY2NTM0NDQ2NTQyMjEzNTUzNDQyNDY3MzAxNTUyMzY3Nzc1NTQyMzIyMzU1
-Mzc1NjI1NzYzMTExNDM1NDMxNDU1Nzc1MzM1NDExMjQ4NzU2MzMxNDIxNTQzMTY1
-NDU1NDY3MzM2Mjc8QDY0MzU1Mzc2NzQ1NjI1NTU1ODg2NTg7OTk6NjU0OD86ODU4
-Ojk2NzcyNTMzNDc4NjU4NTU2MjM2MzU2NTU5NjU1MTM3NjM0NDE1NDY1NTY0MzU0
-NTQ0MzQ2OTYyNDQ1Njg3NDQ0MjIzNTQ1NjM1MzExNjQzNTg6NTU2Njc0NzMxNDQ2
-OTQ0Njc2NDY1NjUyMzQ3ODY3NTY4ODY2NTU1NjU3Ozs7Ozo4NDUzMzU1Njw3Oz49
-PDg3ODg6OTg4ODtAWn+Ul4ltVEhHRUNCQT9DQj47Ojg3NTMzNDY0NTY1NDM0MjU4
-NzIzNT4/NTY5Ojo+REZIRkVEQkA7PEJGSk5RUUlAPz08Oj9KU1tlbnZ7f4J/fnt3
-bl9TTUxOSkpHREI/QEJHSlFXX2Nnb3R4eXp1eHx6eHp+gIKCf4GDgoWFhYiJiIeI
-jIyOj4uLjIqOj42OjIqFg397d3Vyb2tlXVNHPzo4PDo4NzU1NjU3NDcyMTEyNDY1
-NjY6ODY4OTo5OjxHUl1iY2dqa2xub25wcnN2dXd0d3V1d3l6e3t5eHd5e3t7e3+B
-hYSFg4WGhYWFiImJiYmIhYaGh4eJioyLiYaGiImIh4iHhYeFgoKDhIWHhoWGhYeH
-iIaGhYF/goKAfn1/fH13cGVmdIWQkpSRjoqAenN0bWliWE1HQ0RBQDs4NTc3Nzg7
-Pjs3Ozk9PEFOXl1aZ2hZUFRSUVVTS0VER0ZGRkdDP0FFUlJHPj9GUU1FQUA9Ozo7
-OTg4Nzc1Njc6OTk7RExHQTo6QExYXFxPRkNJUlVTTklITFhfXlNUYXByaVZRVVxb
-UExXXlNOXmZpdXFrZGBSQz9CQEBAPD5AOz9APzk5Ojw9PDw/QDs7PT08QEE+PD09
-PkFCQT4/PkBAPzw+Oz1AOzs8PEFAPz89Oz06Oj08PzxAQz4/QkQ/PT9DPUFDQ0RB
-QUFBQENCQT1AQUFAPUFDR0NAQEFEQUNFQEBBQkFBRUE+QkdEPT9GSERDPj9DTUhB
-QUJDQ0VCREJBPz9CRUhFQEFCR0dEREJDQz9DPj9BRUVERENBQj5DREVETX99qsPO
-1tzh4+Xo6OrqRkRFSEREPkREQ0FBPkBBPj49PkBBQUA+PEE9Ozw7OTg5Ozw6OTk0
-Nzg8Ozo7QDo5Ojk6Nzc5ODc5ODk+Ozo3NjM1NDI0NTY2NTY3NjQ2NzY1NTU3NjI1
-Njc3NTc2NjY2ODg6OTY1Ojc1NDc0MjIzNDQ0MzMxMzI2NTY1NTI0NjY2NDY0MjQ1
-NjQ0MzQxNDc1MDEzMzQ2NDE3NTIzMzQ0NDQ1MzEzMzM5OTY5ODg5OTg2MzgzNjM1
-PDc1NjY4MzM0MjM1NjY2NzUyNDM1NzQ2NzUzMzI1MjMzNDMyMzUzMzU4NDIzMzQ2
-NTU1NDQyNjU1ODUxNDY0Njc1NDQ1MTM0Mzc1ODYyMTA1MzU0NDY0NTk2MzU3NDUy
-NDExMDM1Mjc3NzQ1MjQ2NDU2NTY3NDY4MzM0OTU2MTQyMTIxMjM1NDQ2MTI3Mjc1
-MzcyMzIzNDc0MjY1NTQ1NjUyMzIzNTQ2MjEwMjQzNjk1QFhWRjMzMzc4NDQyMjU1
-NDU1NzY1NDc1MzI0NTY4NzQ1PTc3Njk1MzY3NTU0MzY1MjU2NDM2Nzc0MjUzNDY2
-Nzc2MzU4NjYzNjQzNDQ1NDY0MzMzNjU0MjM1NjY1NDk3OjU8NzQyNTM2NTQ0NDI0
-Ozs2NzM2OTk3NzY2MjAzMzc2NzMzNjcyNDMxLzIzMzM2MTMyNTc4NjU0NDQ0NDQ1
-MzU0OTg8PT07Ozw7Njc2NjQ0Njs4P0I/PDo5Ojs6Ojw8PENSZ4GKg3JcSkVIRkRD
-QkA+Ojs8Ozg4MzU1NDY3NjUzNTQ3MjQ1NDIyNDQzNDU7OjtCRkRDQ0NEPzs8PkNE
-R0pLSEM+OT5AREhQWmZtdXuBgYN9eXNoXFNQT0xMS0pEQT9CQkZIT1ZWXWZqcXZ3
-enl1d3h6eXd8f4F9f4CDhYWHiImKiomJioyPjYuNjo2NjIyKh4WCf4B+d3Zya2hj
-WU9FPDo6ODQ0Njc3NDc1NTc2OTc2NDI3NTY2OTU3NjY5P0tYYWRkZWltbG5sa25x
-c3J1dXh4dHZ2dXZ3eXl5eXh5fHx+foKEgYCAgoGEiIaGi4yGhoeHi4mKioqIiYaD
-hoiIh4aHhYKCgYGCg4KBhYaGh4SDhYaFgoGDhIKBgHx+gH1+gX12cGZoeIiSlJSP
-ioF7eHNwbWpjWVFKRUA6Nzo9Ozo3Ojw+PDc2NzY6PElVVlljYVNRWVNVWlJFRUpN
-TkdDQkNBQ0ZKSkNBRUtOSUA+QUU8Nzg6PDc2NTQ3ODo6Ojo7RUVBPT9DSFJbX1ZK
-Q0JGTlRSS0lOYGtpX1RXYGljUUhLUllUTVdqZ2BnbG50bWdhWFFDPj4/PT0+Pj9B
-QD09Ozo6Ojg6QkBCREFAPT9BQjo4OTw9PUBBPTtAPUFEQ0BBSEY7OTw4P0JCPj89
-Pj4+PTg6PT08QEBEQ0NAPj5DQkRCQkJDQEJBQEFBPz5AQD5DQD9CQUNAQ0NAPkJC
-P0JFQUA/Qz9BQEFDQD09QEE9Pj9CQ0I/P0BDQkJDRUNCQERCQkBBP0FCQ0ZCQURG
-RUVDQ0FBQEJDQ0NEQ0ZEQUJKjaCww83V2+Dj5ufo6upDRURFR0RBRUE9OjdBQj48
-PUE5PEFAO0E8OTs9OTk8OTo5Ozg5OD05Nzk7Ojc7Ozw7OTY4OTc3Ojc2NjU4OTg1
-NDM2MzE1Njc1NDUzNDI0ODs1NTU3OzU3ODQ3OTg2NjU5Ojg5NzY4NDQ1NDQ3NjQ2
-NzU0NTM0NDU3NTY2NTQzNDY5NzUvNDMzNDQzNDk5NTQyNDM0NTY3NDI0MjQ0Nzg3
-ODQ2ODY3Nzc4NjY1NTc1NjU1NDU4NjY5Ojg0NjY1NDQ0NzU1NjU1ODg5NjU0ODU0
-MDExMjI0NTYyMzM2Njc6Ojk3NzU0MzY3ODgzNDM3Nzc0NzY2MzIxMjMyMzM1NDIz
-NDU1NjY3NTQ1MzQ0MTQ1NDI0MzE1NzQ2MjMyMjI1MzMyMzMzMjI0NDUzMzMzMzQ1
-MjE0MzQ2MjQ1MjI3NTM0NzU1MTI2MjAzMzIzNjQzMzY0NTU0Nzc3NTMzMzQzMzIy
-MjQzMjQ0Mz1XVE1JMzI3NTU5NzU0NTU4NjQ1NzUzMzI0NzIyMzQ0NjQyNDc2NDM0
-MzU1ODQ0NDIzMDI1NDY0MjQzNTI3Njg3Nzg3NDYzMjQ2MzI2NzY1NTY1MjM0NDY2
-MzM1NTY3Ojk5O09EMjQ0NjU2NTY1NTY0NTU4Njg0NTU1NjY3NzQ1NTM2NTU0NTg3
-NzUzMTUxMzE1Ojk3NjY2MzMyMjIzNTQ5NDc4PTk9PDo8OTk1NTY4ODUzODw8PkNC
-QTs5ODY4OzxAQ0VUbYCGfmNPSkpGQ0NBPD87Ozg4Nzc4NjU2Nzs2NjU2NzMvMjY3
-NTUxMzM0NTo4PUBDRUNEQ0NCPT09PEJGSERCQzs8PDtBR05WYGlzeH2AgH15cmdY
-UU9PTk1MR0JCQD9AREtQWV9jZmdtc3d3eHR2eHl6eXp7fHx/goSDhYeGh4iMiIeM
-jZCQkI2Mh4iKjIqIhYaEgX5+enRuaWNcVkk8OTs5ODg3Nzg2NjY2ODI2NzU3NjY3
-NzQ1NzM1ODtHU1tgZGdoamxra2xua2xucXN0d3V0dHV3eHt0dnd4d3Z6e3x/foGC
-f4CBg4GDiIWIiIOEiIiIh4iIioyLjI+OiYeGhYaFhYOEgICBgoOEg4ODgYCAg4WC
-gYGBgIKBgXx/fHx9fHpyamZtf5CQlZCJgn98eHJtamZfVk1HQTk4Oj08PDw9PTw7
-QDo3ODc5PkhOWWJbT1JST1ZaUUdLVFlRR0JER0lJRkFBQkNGSEdAPjxDRUE5ODk4
-NDM0Nzg3OD89PTxAQ0hHREVETFJVUUlEQkRLWF9dVVNbanRmVE5TXF9XSkdQYGJb
-YXR4bGtvaGZiXltWVEZAPjw8PD0+QT5FPjw/PT0+QTtAQD1BQ0FAQT86PT4/Pjw+
-Pzw/Pz1BQT48QkVCPj0+PUA8Oz09PTs7PkJCQUA8Pj07P0FEQkE9PkJAPTw/REVB
-Q0A9RENAQj5BQj09PT48Q0JAQUBKU0VEQEA9Oz4+PD5BQT9DQEI/QEM9Pj09Pzw8
-P0A+RkZGRUNERUdAO0BCREREQ0JCRENDRUJFQ0RCOkBCQ0FERUNCPkmOua3CzdXa
-3uPk5+jp6TlCQT9CQUJCPj08Qj0+Pzw+QEQ8Ozw9PT08PkE8PT46OTs4Ojc7Oz08
-PDw/Pjs6Ojs4Ojg8OjY6OTg5OTo5OTg3ODc3NjUzNDY0ODg0NTQxMzc4MzQzNTQz
-ODg1ODU3NjU3NjY1NjU1Njc3NTM2NzQzNDQ0NTU3NjQzMzM0NTQxMjM1NTg1MzEz
-MzU2Nzc0MjM2MzY3ODQ2OTk5NTQ0NDM2NTM1Nzo3Nzg2MzQ3ODYzNDc4ODU5Njc3
-NDQ0MzU3NjY4NDMzMzQzNDo7ODc2NjU1NDIzMzU2NzUzMjM6OTY2NTU5NzIyMTIz
-MzMyMjM1NTI0MjQyMzQzNDc1NTQzNzU7NDQ1NTMxMjIzMS8yMzU2Mi8yMzQ0OTUz
-NzQ0NTIzNDMzMzQxMzQzMjc8NTIyMzQ0MTIwMzMyMDE0NTc4NTY1NzI0MDI1NTM0
-MjQ1NjUzNTc1NjQ4NDczNzM0NTMyNDIyMzMyMzY1OztETkU0NDUyMzU2Nzk2NTQ1
-ODc0MjQ2MzQzODMyMzQ1Nzs5OTg4NzQzNTY1NjU0Njc7NjMzMzMyMzM1OTY3MjU3
-NTQzMjEyMDI0Nzo2NTU0NjY3NTMxNDQ0MzM1NDQ0NTc9NjY1NzU2Oj45NzM0NTU2
-MzU1MzM1NjY4ODc2Njg2NjU1MjU3NzQ1MjQzMTY2NjU1NDQyMzEzNTM0NDUxMTg0
-OD05Pj9BQzw4ODc1NjUzMTE1ODg5Ozw+QTw4OTg6Oz1FSEtheoiCbVNLS0dGREM/
-PTs6ODg3Njc4NjY0NjQwMjY0Nzk5ODU2NDQyNTU1Njk6OUBFQ0RCQ0A/PT0+QENC
-QT89PDk3OD1ESlNaZm92eXx8enhxZlxQT09NTElGRUA/QERHUVpkbW5taGxyeHZz
-dnZ3eXh6ent6f4B+gICFiIaGiIiHiIqLjZGNj4+Pi4yOjYmGh4iEgoB6dG9sZ2FZ
-TUM7ODg4OjczNjg4Nzg2NDQzMjQzNTs4OzQzMzU4Q09WWF1iZWlpa21ubW9ucHFw
-cnZzdHNzc3R2d3V1d3Z1d3p+fn5/gISAgYKCgIGBhYmIiYiJhoqIiYmIiIiIi4uK
-iISFiIiEhYiHh4R/goeGhYSDgoF/gIGBgoCBf3+AfXt9e3t+fHdwZWNyhI+TkY6G
-gnt5c25ramRbVUtCPDk6Ozk5OD48OztEOjg0Njc5PktWW1NPTExMUFNOTU9XV09F
-QUlXWU5EPkJGR0hDPTs7PD0+Ozo4NTUyNDY2Njg6Ojw3NjpETlBKQ0E9RElLS0RC
-RU1daGtfUlViaGRVSEhRXF9UTFZpcmxmcnluZWJcWFtbYWBdS0A+Oj5BOzw8QEE9
-PkE+RUBAPzs7PT8+QUI/PEBBQUFCPjw8PTw9PjxAQUBBQT89Oj5BPjxFQz07PT0+
-QUI+PUFAPkA9Pz8+P0A/PkJFQ0ZAP0RCP0FAPUJBP0E+PTs+PkNBRU5GQEFFRkA7
-PUJFQD8/PT1BQkI8QEA+Pz9APkJCPkJAQT5DRkdGQ0FGSUVEQkJBPUBDRERFQkJD
-QkNEQkJBPkBDQEM9QkVFTpe6qLfM1dvg4+Xn5+npRUFFQT4/P0REQz08PDk8Pjw8
-PT87PEA7PTxAQUM7ODs9PD48PDs7Pj85OTo7PjxBOjk6OTg5ODU6ODc4Nzs6Qj86
-NzQ0NzY4NDc0ODc1Njg4ODs1NDQ0NzY1ODQ2ODg1NjU0ODU0NDQ2ODk3Njk2ODYz
-MzM0NzYyMjMzNDYzNTQ0NTc5ODUyMjM0MzU1NDQ2NzY1NDQ1OTg1NDo2NzUvNTM1
-NTQyNDM2Nzg3NT9FODMzNTc3Nzk1Nzc2ODc1MzQyNDQ2NDI1NDY2NjU2NzM1NDc4
-MzIzMjMzNDU3Njg2NDQ1NDY0MzMzMDMxMjIxMjM0NDU3NjU2NzQ0NzQ1NDY1MzMz
-MzM3NzQzMzIyMjIzMTYzNDQzNjUzNDUxNTU0ODg1MzMxNDU0NDc2MzU2Nzc0NTY4
-MzM0NTMyMTM0NDU1NDQ1MzIzNDMzNDMzNTY1MzEyMDMyMzMyNTQ1NDM0MzEuMjMx
-MTMwMzQ6MzE3NzcyMDIzNjk3Njg5NzY3ODUyMjEzNDM0NTMzMTI0NjM3NjI1NTc1
-NDM1NzY1OTg5NDI0NDYzNjg0NDQ4ODczNDM1NTIyMzc2MzU0NDY2NTg2NDc1MzY3
-NTU3NDQ0NTY4OTY0NTM2NTc2Ojg3NzQ0NDQ0NTc0NzU2Ojg3OTk5OTg2NDQ6OTU3
-NDI0Njc2NzUzMjE1NDI0MzMyMzM0NjY5ODk5PUI9Ozg3OTY4OTUzNTE0NTs/PT06
-Ozs4NzY8QklOTlhxgYV1W0pKSEM+QkFAPz89Ozk2NzY1Nzg1NTQ1MzU2NTIzNDU0
-MTMzMjY2OzY4OkBCR0ZFQjw6Njg9QD09PDs6NTI4Oz1BSlZgaHR4end2dG5lV1BL
-S0pKSkVGRUE+Rk1VXmp0dXNzcHV0c3JxcnV4fHp6fX19gIGBgIKFg4SEh4iKiIiK
-jI2NkY2Ni4uKi4yJiYaCf3x3c21qZV5VTkdAPDo6OTo3OTk3OTY2Nzg4NTY1Njc5
-PTc5OkJQV1lZXGJkaGhsbm1wb29vbm9xcHFwc3NxcnR2eHt7eXd4eHl3d3t+fn6A
-gX9/gYSFhomKjIuLiomJh4mJiYqQjYuIhoWGiIWGiYiHh4WDhYWDg4GCgX1+f39/
-fX+BfoB/ent7e3p3eHFkWmN5kJGPjYmGgHd2dXBsZGBZT0Q8Ozk4ODk4OTk5Nzc3
-Njc7OjtAS1NRUUpITlJQTU9aXFdOSUZGT1pcT0I+RU1LQzw7QD08PDs5OTo3NTg4
-OTk2Nzg8QDo6OURRUk9GPDw9Q01PTUlIT19ub2ZZS09aX1hNRk1eaGdfXWt3dWxu
-eXNgWVpcYmptaWFLQ0E9PDs9Oz1BPD4/Pz08PEFBPz1AQEFCQEJBQUJEREFCPzxA
-PT09PDw9QUY+Pz1DQkFDPTw8PjxAPj9BQERGRD5AQT08PkFFQUFAQD9BQkE9Pj8+
-QD5CQTxAQEI+QUBDRUJBQkJBP0I8QkA/P0BBQ0BBQ0E/P0JBQUJEQUZEQUI+Qj0+
-Q0BBQkRFQUBGQ0VHRENFQkJEQkI/QUNGQkBDRUNBQD9EQ0ZAQkRKkrWqucvW2+Dh
-5efo6upCREE+Ojk6PUJBPTo6OTg3PUJBPTw9Pjw8PTxAPUE+Pz06Oz06Ozs6Ojo6
-Pjs8PDs5OTw8NzU0Nzg1PDk4Ojo9ODU4OTk2NzY2NTY4NTU2NzU5ODY1NDU1NzUz
-NTg4NjgzNjQ2NjQzMzQ2Nzg2NDw6OTczNDQ0MjY0MTE0NjY0MTQ0NTk2NDM1MzM2
-MjQ2NjY3NTYzMDI0NTc2NDU3NTY0NzU0Nzk1NzM3NTY2NDUyNDc3NjUyMTU2NDY2
-OzQzMjY0MzE4ODQxMjM3NDc7NTYzMzQ0MTQ0MTU0Mzk3NTI0NDMyNTUyNDAxMjI0
-MzQyMjU1NjQ2NjY1NTMyMzc2NDU4NDU0MzM0MTQzMTA0MjExMTQ6OzY3NTMzMTMz
-NTQ0NTU1NDc1NTYzOTo1NDQ3ODc2NzY1NTU0NjUzNzUyMzUzNDM2NzY2NDY6NzU3
-Njc3MjAzMzIxMjU1NTIyMi81NTQzMjIyMTIyMjI0NTM1NDIzMjU0NDUzNDU0ODU0
-NzU2NTIzNDY2MS4xNDY4ODY3NTQ0NDU3NzY3MjY0NTY2MzEzMjM0NTU3NjU1ODYz
-MTI0NTI1NTM0NzY2MjEyNjU3NTM1NjU1NzQ2Mzc3NTU3NjE0NjY0MzQ1ODU0NzUx
-MzU2NjU0ODQzNTU2NDU0Nzc1MjQ2NzU1NTE3NDQ1NTU3NTAyMzQ0MzY2ODc1Nzg6
-Ojs7PT46PDk2ODY3Nzc4NzU4OTxCQEM9Ojo7Oz1HTlJSVWh6f3hmVE1KRkJCQEBB
-QUFAPDc6Nzo6Nzg4NjMzNDM0MzM3NDY2NDU3NDMzMzdAQUJBQUBBPTg2Njw6Ozk7
-Ojc1Nzc5PkNHT1licXd5eXRvamBVTk1MS0pISERCQkNGTllnc32BfXd1d3FvcXR2
-eXl3en1+fX58fYCAfoGDgoOGhYaHiImLkI2NjI6MjYmMjo6Ig4J9eXZzcG1mYFxU
-S0M+PDo4ODQ2NjczNjg7Ojo5Nzc2NTg4NzlCSVFXWF1fYmZjZWprb3BucG9tbG1u
-a2ttb3BxcXJ2eHh3d3Z5enl4ent8f35/gYKDhoaHiImIh4uHiImJi4mJiYeHhYWG
-h4mKiIKCf4CGiISBgoGDgoGAfH2AgICAfXx6fn57e3x7e3x2cWhdXXKEjo+OioSC
-gHh1cG1oX1tVTkI9Ojw6NzY5OTs9PDc4OTs5OjxGUFJNSU5XWE9LWGtsXkxDS1FX
-V1RMQkNKU0w+Oz5ERDs5Oz86NjU4Oz09Ojg3Nzg7PDg3PEhQTkM7OjtCS1hcVk5R
-V2NoYVZJSE5YW1ZNUGJ0dmxiZnR3bGJpa1tWWmJobHBsZUxAQD89Pjw7PkBBQD49
-QEM/Q0JEQ0E/Pj5CQUNARENBP0FBPDw9Oz47OTk8Pzk9QT5CRENFQj8+Pj09QEFD
-QkFDQ0A+PD09PT0/Pz1CQkJEQDw8QD4/Pz4+Pj9BQENCQz49QUFAQkRCQkBBQT9E
-QD49QEJAQD9DQkJCQ0E8QUI+PEFFQz9APkFBQ0RDQUJFREVAREI9QUNAQUBDQ0NB
-QUJHQURFQkNCRERHTEiEta3Azdbd4OPm5+jp6kZEQT0+PDs/Pz08OzY4Pz88PEE8
-P0I+Pjw8PDo6PEA/Ozk5Nzg7Oj49Pzw5PD47Pjw6Ozo5Nzg7Ozc3Nzg4OTk5Njc4
-OTYyMzQ1ODg1MTMyNDI1Mzg3NzY1NTg4Njc4ODUyNTI0NTQ1NDQ1Njc1ODk4ODU0
-NjM1Njc3NDM1NDI1NzQ0NTYzMTMyMzM1MzU0NDQ0NjM2NDI2NDMzNDM0NTQ3NzY3
-NjU2ODg3NjY3OzYyMzM0NTQ1NjU0MzU2MzMxMjU2NDQ9ODU1NjQ2NDQzNTQzMzM0
-MjIxODQ2Njs3OTU0NTQ2MjMxNjQ0MzU1NTMzODY1NDQzNjQzMzE1Njc1Njc2NDI0
-NTIzMTI0NTM2NDUzMjY1NDQyNDM1NjIyNDY3ODg2NDM1NjU0NTc0NDI0NjYzMTQ1
-NzY0NTY0Njc1NDc1MjQ1NjMyMzQ2Nzg0NTQ2NjIzNDM0MjAuMTAzMjE1NzQwMjIz
-NTI1NjU0MjU1ODY1NDQ0MzUzMzg2NTU0MzQ0NjQzMTAvMTY1Njc5Nzc6OTY0NDM0
-NTc5Mzg2MzQ2NDMyMjY1Nzg3NDM0NTU0MjM1MzAzMzQ0MzM3NjU1MzY2NjY1NTM6
-MzY4OTM1NTg2OTY3NzY1MjcyMjIyNTY1NTU0ODU1NTM1MzY3Nzk1NTc3Nzo2NTY6
-OTQ1NDU0LzU1Njc2Njc2NjU0NTYzNjc4QEI9PDs+Ozc1Nzc2MzQ1Nzo7QURHPjw7
-OTU5OT5MU1NQXHB+fWpXS0hHQz0+Pz5BQT4+PDo6NjU1NTQ2MzI1MTIzMTU3Nzc1
-MzM0MzY2Nzg4P0FEQz07ODU8NjY8Ozk0NjQ5OTk9Q0dNXmdtc3d5dW9mXFJNTEtL
-S0VCQ0M/QUdTX2p1gIN/e3N1cW5udHd6fHp5eXp/fn17enx+e32EhIGFh4WEh4uM
-io2Nj5CNjY2MioiFf356dnNwbGpnX1VMR0NAPTs6PDk3NTY2NTc4Nzg6NzU2ODk3
-PUVPUFRZXmJiZGVlZmtra2xtbnBvaWhqZ2tubnBxc3R3d3Z3eXl5eHh6enx8f4KD
-hIaIhoWHhoSFhoaHiIiFhIaJiYmHhYeLjoyIhIF+f4OGg4GAgYCAfoB/gYGBgYF9
-fYCCfnl5eX14dnV3cGNcan6Ij5KMiIWBeXVwbm1lX1lRRz48Ozs8NzY7PDs9Ozc2
-Nzk5PD9HTU1HUF1WR0hbb3JhS0ZMW11RREJFSElGQTo8REdCOzs8PT07ODc4Ozk1
-NTU0Nz0/Pjs9RElJRkE7OkJPXmViVk5KT1lcWU5FSlpnZVxYYnJ1cWZgZmxiUldh
-Wlxob29ub2leSUM/PT49PDo5OTs7Ozs+QEM9Ozs8Pj9AQz5AQEFBQEFCPT08Oz87
-Ojo7O0A6Pj1BQD0+QEJDQD48P0BAPT1AQD0/Pj07PDw/QUM/QkA9QD1BPz1CQ0JB
-Pjw6OTs9QT9BQTw9QEFDQUFGQUNCQkA8QUJAQEFAQElHQ0NAQkE8P0FBPTxCRD9A
-Q0ZFRURBQ0ZFRENDQ0E/PzxCQ0RFQkVDQ0JHR0JAQkJCRkdFQnuusMPO1tzg4uXn
-6OrqR0lDQkA9Pj07PTw6Nzg+QDw8Oj49PD06QEM6OTk5Oj48Ozg4PTw8Pz0+PTs6
-Ozg7PTo6PDs5ODg4OTo6Nzk7Oz06NzU3MzQzMzM2Nzk0NTU2NjU1NDY3PTk2NjU0
-NjUyNTQxNjY0NjU3MzE1NDU0Nzc0Njg3Njc1NDg3NDM0Njg3MzQ1MTc3NTgzNkA0
-NzM2OTc1NDMzNTM0NTMzNDM0MjQ1Ozo7NDIzNT02NDhBPjg0MTQzNjEzMTMyNDM2
-Njk1NjU1NjQzOjU2NDIzNDQxMzIzMjQzMzQ1Njk2NDo4NTQ1MTE0NDExMzc4NDU1
-MzIyNTM1NDMyMjIyNjUzMzQzNTQ0NzU0MjQ0NDIyMjI0NjM4NjQzMjYxMzI0MjM2
-NTY1NTY0ODI1ODc3NDQzMjM0NTU0MjM1NjQ1NTk0NTQzMzM2NDY0NDMzMzE2NjU3
-NTU4Njg3NTU1MjIyMjIzMzEzNDc0NDMzMzUzMzYzMjIyNDU3NzQ0NTY3OTY1NzQy
-NzU0MjI1MjQ1Nj01MjY2Njc3ODMzODU2Nzg1NjQ0NTUzMjIyMzQ2NDkzNTIxNTQ0
-NTY1OzUzNTQ1NzU3ODYzMzI0NDY1NTI0NjY3Ojg2NjU3NzYzNzQ2NDQ2MzQ2NzMy
-NzY1NDY2NjU0NTM1NDY1NDo3NjM0Njc2NTMyMjI0NTk4NzUyNjM2MzU0NTY1OTo8
-Oz07Ojc5NTY2NjY1MzE0NzxAQUVFQ0I7Pzo7QkhTV1JUZnyCdFpLRUJCQD8/QEA/
-Ozk7OTg3NjUyMzUzMzA0NDU2NDQ1ODY4NjU1NDY1MzM5P0M/Ozw7NjY3ODY5NzY2
-Ojo5OTs+QklRW2ZwdHRzbGNVTktNTUxLSEI/Qj5CS1Via3N/goB9eXRzbXB0dXZ2
-eXh+fH18enp4e359gXyBgYKEhYaJhYmMjY2OjpCNi4uLiYOBgn95e3JvbGdhWlRO
-RkE+PTo4OTlBNzUzMjM1NTc0ODc2NjpGSFBWWlxgY2NkY2RlZWZqa2tubW5taWlo
-am9yb3Jwc3d3eHd3eXl3ent6fX59g4CChYaEhYWHhIWChoWGh4eGhYaIh4WFhYaI
-iISIgoGEg4WDgYB+gIB/gH9/gH57fX18gH99enl4d3h2dHVsZF5heIiNjIuKhoF8
-d3RxbGxlX1ZHQT08Qjs4Nz1APjw8OT46PTg4PEZMS0xVV1JMTVtqbV5NRlBYWUpB
-Rk9IQTw9P0FAQT06Oj88Nzo5NzQ1Mjk2NTQ2PUFFPjs6QEVKRkA/RkxaZGFWS0JC
-T1dZUklNXW5xbFteZ3JwYVNTX19UU2JhYmlrZmVhXllIQD5AQTo+QUA4Nzo8Pzo9
-QD87Ozw7QTw9QDw/PT48PkFCPzw6Pjw6OTo7Oz4+PDo7OTw6Pjs7QD89Qj49PT4+
-Oz5BQkNCP0JAP0JBOj49OzhAREFAREE/PT08PD1GQD47Oz0+Pz5BPkRIQENAQUJA
-QD1BQUJERERFSEdJR0Q+QUJDPTxBQUJBQ0NFRkZCQUBDREZBQkFAQUNDRUNGSkpH
-SEZMREhBQkVFQj9Eja6lw87Y3N/Z5efo6epDSUZGQTs+QT1BPjw9Pj8+Ozw6PDs+
-Oz0+Pj48Ozs7Ojk6Pzs7PDk5OTo6Ojo4OTc5Ojs6Ojs3OTg6Ozg2NDg3NTY2Njk3
-MzczMjcyNTM0NjMyMTMzNTE2NTM1NjM2NzU0Nzk2Njg9NDM3NTU1NjU1MzI1MzQ2
-NjQ0Nzc2NzY0NjU1NTU1NTQ0OTg2NjU0MzY1NjY0NDI0NjY1MzQ2MzA0Nzg4NjQ1
-Nzc1Njk3NTQ3ODIxMTIyMi4zNDM1NzI1NjczNjMzNjo2NjQ1NjQ0MjIxMzQ0OTY0
-MzQ2NTY3NjQyMzM0MzQ4NTMwMTE0MjIyNTIxMTE0MzMzNTQ0NzQzNDU0NjU1NjQx
-NDEuMDU0MjI0NDMzNTM0NTMzNzQ0MjIzNDM2Nzc2NTY8NDI1NDc3NjQ0NTY2NTE3
-Nzc2NjYzMTQ1NDY2NDQzNDQ2NzY1NDIzMzU5NzQ0NTU1NDQzMjExMTM2Mzc2Njk5
-OTMzMzQ0MzMyNTE0NTM2NzQ0MjIzNTQ1NDQ3ODc2NzU0MzQ2NDU0NTYzMzI1NjQ4
-Njc2NjQ0NTEyMzM0NTM0NTUxMTMxMzY0NTU2OjU0NTg3NDQ3ODg2NzM1NjY3ODc0
-MzM1OTc6ODk6OTk2NTY1MzQ2NDQ0NzczMjM0NDY2NTg2NzY0Nzk3ODU1MzU3ODg3
-ODQxMTY2NDY2NTM2ODYxNjQ2NTc7Oz07PT07Ojk4NjQ4ODYyNDc0OD9EQ0NDQTw9
-OTg9RVJYVVBab399ZE9FQkE+QEJCP0FBOjo5Ojk5OTYzMzI0NTUyNDY1MjMxMjUz
-MzE1NjQzNjg8QkJCPTo4Nzc6ODc2NTU1NTg4OkFFSVFaY21xdXNuYlJLTlFPTkpI
-RENAQEZMVmBrcnyEgnt2c3Fvb3R2d3d2eXZ3dnp8fX5+e318f3t/g4OGhoiIiIqJ
-i4yMi4qNkI2JiIR+fnt3cW5raWdjWlJRS0RAPDc4ODY6NzU0NDQ0NDs3OTg4P0dO
-UVldXmBjZWdmZmJmaWhnZWhsbnFta2lubm9sbW5wcnV1d3R4d3h3ent7fX+Bg4OC
-goiIhISDhYmHiIWFiIqEhoWHhYOCgoKDhISDg4WFhIGDg4KBf4CBfnl5enh3fH58
-fH18e3Z2dnZ3dnVpXmBwgoeIjIuLhoB6dGxpa2hiWExBOTs9PDg4Oz8+Ojs5Ozk1
-Nzk6Q0pSVVJRUVJaXWFaU05QUVJNRkVMT0c9PkVIQzs7OTo5OTo7PDo6Ojg5Nzc3
-OTw6QkdAOTs8QkdKTkhJTFRaWFNLRkJLWWFjW1VaandyZVhYX2ZhT0tWY15WZGto
-aWReYFpZWUs+Pj8/Pzw/PTxDQz86QT47PUA9Qj89PTw7Ozw9PT0/QkFAPjk8OzY4
-OTo/PUFAQkBAOjo+PD1CQkE+Pj1BP0E+Q0RCQkNBQDw9PD4+Qzw8PkBBP0JHQz0+
-PT4+Oz0/Pjw8PDo/QkBAPj08P0E/QEBCQD8+P0NCQUBDQ0dIREVBQUJEQUBCQ0FB
-QkNDQ0FAQURBQkI/PkE+QENCQkA/QkRERERHRkNDQkVESESIr63Cztfc3uLl5+nq
-60ZFRUVDQUE/PEFDQUA/Ozs9QD46PDs/Ojg6Ojo9PDs7PDs4Ojs7OTg3ODk8PDo4
-ODc5OT47Ojg5ODc3ODY3Nzc3NjY4ODU0Njo4Nzc1NjM2MzQzNTY1ODU1NTQ1ODg3
-NDI2NzY1ODYzNDM1NTI0MzQ0OTc3NjY3OTc2NzYzMzI1NTczNDU0Njc2ODU1NjQ3
-MTQzNDI0NjMzNjQ3NTU0MTI0NzU0MjY0NjQzNjY1NDM3Nzc3NDUxNTU2NDMzMzU6
-NTI1NDMyMjU2NTM1NzQxMzM4NTUzMzczNDQzNjY1MjU0MjQ1NjMwMjAyNTQxMS4x
-MzMzMzI0MzI3SEU1NDM0NTU2NDQ0MTY3Mzc1NTczMDQ2NzEyMzU1NjYzMDE0MzQ2
-NzY5OTc0NjUzMjQ0NTQ1NDQzNDM1NzU2NDMzMzQ4NjQ3NDAzMDA1NTI2NTQ0NzY2
-MzQzMzI0MzI1MzQvMTQ3NTY4NDExNDk3OTc0NDQ5ODM1MjEzMzU4NTMzNDE0MzU0
-NTc2Nzc2MzQzNTYzNTQ0NDQ1NTY0NzU2NTc0ODc4NjU0NTM1NjkzNTQ1NTc1OTc1
-Njs4MzM0NjY2NzYzNDY2NTg1NjYzNjU2NjM0NTY0Njg1NzUzMzY2NjY3NDM2MzE0
-MjQ3NzQ1NTI0NTU3PTo5Ojk6OTc1MjM2NDUyMzU0NTU2Njg3NzY1NjczOzo9PDw8
-OzY2ODUzNTQ0MTI0NDY2Oj9GSEhDQT48ODhBTlpWUFNqfoFwV0hDQkQ/QENEPz8+
-PT09Pjs5NjUxMjQ2NTQzNTc0MTUyMjM1NjM0NTU4ODo9QEA9PDk5ODo7NzY3NjU0
-PDs9QkRNVVpibHN5dm1kVk5OUE9UTUlFPzxBREpVX2txd35+eHJram5xcXV5dnN3
-dnV2d3h6fXl6en5+gIGChYSEhYiHh4uLj4+KjI+Njo6Ggn99e3t5cW9ta2dhWVdP
-SUZEQjk4Nzc2NjM2NDY3ODY2ODlDS1BUWFpfYWVlZ2hpamhpaWpmaGprbnByb2ts
-aW5wcnFzcnR1dHRzd3l7fX2AgYOEhISEhYaCg4mMh4eMioaGh4eFhIWFhIB/fYCD
-hoWGgoGAgYODgX99gX59e3d7fHp8fX1+e3h4dnd0cnF0dXFkYWt8h4uLiIeBfn1z
-bWpoZ2ReUUE6PD09PDo4Nzg6Ozk7ODg4Pj1HUVhZUVBVZGplWExJUFpYR0RGS01M
-REBCSUk+PT1CPTo7PTw6Ojc1Njs4ODg7PTw+SEU9ODY7R1NZUkVHSExXVlBMRklZ
-bXJuZFpfbnJrV0tPXmZbTlBhbGdtc2hkXF5eWF5gTUM9PT4/QT8+PT89PUFAPD8+
-P0E+Pj8+Pj08PEBCQkFCQD09PTw4PDs8PT08QUE+Pjs6OjxCQz8/PTw/Pz4+Qj4+
-PUE+QT4/QDs7PEI+Pz0+PUJCQEJARUA9Pz9BPT9BOz0+PT4+PEBDQEFAP0RCQD0+
-PTs8PUFEQj4+QEVFQ0RDR0ZCQz9BRERCPj8+QEJDQkA+QkBDQEFDQ0RDP0VEQEJE
-QkRIRkI+QkVDRGSussDO19zg4+Xn6OnpQ0U/PUJEQj1AP0BBQEFAPzw3OTs5Ojw+
-PTk8PDs7OTg9PD07OTs3Pjg4OTg6PUA3OTs5ODk7QTw7Ozc5Nzo5Ojc3NjM1NzY0
-NDU0NjU4Ojc1NTU4OTg3Njc0MTE0Njk3ODg5Nzc4ODc4ODU3NjU8NzY2NDQ0NDc3
-ODU3NTY0OTQzNjYzMzU2NDM2ODQyNDg2NTQ0MzMzNzYyNjM2NTI4ODIxNDI3NTc2
-NDMzNzk4Nzg3ODg3MzQ1NDU1MjExMzUzMzI0MjI1MTQzMzA1NDQ0MzQ1ODo0MzQ0
-NTQzNDM1Njc1MzUzODQ0NDY7NTExNDM0MjQ2OzY3MzI0PjU1NDQ0MjI0NjYzNjg5
-NzUyMzU5NDU1NDM1MjMzNDY3Ojc0NDM2NDQ0NTY0NTQwMzc3NDQ1OTY0NTY0NjI0
-MjQ0NTQyMTM0MTE1NzQ0NjY2NjUzMzU3NTY3NzI0OTU0NjUyNzU2NTg2MjM0Nzc2
-NTY0NDIyNzMzMzY1NDM0NDU3OTUzNTY1ODo3NTc3NjIyMzUzMjQ0Nzk2NjU1NjYy
-NDU1NTU1MzU0MzYzMjQ1NzY2Nzc2NzY1NDQ0NDU1NDU2NjU1Njo5NzY0NDQzNDMy
-NDQ1ODU4Nzw1NjY0Nzg2MzM0NjMzNDMzNTQ3NTY1MjU0NjY2Ojg5NjY3MzQ2NTY1
-NTU4NTMyNDU3NzY2Nzg2NjU2Ojk7Ozg4OTk3NjMzMzE1NDEzNzc/QENHREI+PD44
-OEBIU1dRT2B2fXVhTERBQ0VCQEBDQEBBPTo7OjU1Njc3MzI2NzUzMzU0Nzg4NTQ0
-MzQ2NzY1OT0+PTw7OTo4Nzk6Ojg3NzU2OTpAR05UW2Ruc3h0bWVWTkxNUVFNSUQ9
-Pj9BSFFaYWlveXlzbWlpbXFxdXR1d3h3d3Z0dHV0eXp9eXx+gH9/f4ODhIWGjIuN
-i4yLi4mLiomEgH17e3h0cW5qZ2JfWlVOSkhEQD49NjQ2ODY5ODk4Njw5QEVMUVJW
-Xl9gYWJiYmVqZmtramtpZmxsbW9vbm1qanBxc3R2dHN0dnV2e3l8f4B/foKCgYSH
-h4eGiYiIioaIh4mIh4aEg36CgH1/gIODgYCCf3t9fn6Afn99fX96e3l4e3t9fHx5
-eXd4d3d1eHRycWlkZ3aEjIuIhoN9d3VzbmxoYV5SRTs6Ojo9OTo4ODs4ODc2PDo5
-PEVUX15PS1hudWlSREZUXlRIREtQTEdDPURMQzw6P0M9OzY7QT08PDs6OTY3Njk7
-O0BAQjw5Nz1JVFZRRUE8RVBWVlRNT1ZrdnZqWVRdZ2NUS05ZaWtfX256dHFwZF5c
-XWNkbGlVQjw6Pj4/Pjs+Pj08P0E+PkE7QEI+Ojo8PDs7Pzs6Ojs8PDw6Ojo6Ojs7
-PEA/Pjs+PkA9P0JDPj08PDs/Pzw+QkFDP0NBPz0+QD9BPz9BPz0/Pz1BPkFBQj5A
-QUREQENBOz07PDtAP0NFQTxAP0JEQkBAQD5BQ0JCQj0/Q0FDQkJERUZBPEJDRENC
-QkJDREJDQEBCP0A9QUFCQ0RGQkBERkRDQkRGRkRBQj9Eaq62w8/X3OHj5efp6upB
-Q0JFQENBQkRAQkNGQ0A7Ozw4PDk6PD49PT08QUM9Ojs8OTo5NDk5Ojg4ODc5Pz46
-Nzc4ODtEQzo6PDo5ODc0NTg2NjY3NTY4NDwzNDg1NDMyNDM4OzY2Nzg2NDM2OT02
-NTMyMjU0NTY1NTY4Njc3NzY4NzUyMjUzMDMxNDU2NjY3NzY2MzU2NTM1NDIxMTQy
-MDEwNjc2NTMzNTY2NTg0Mzc1NDQ4Ojg2NTg1OTU5NjY2NjYzNjc3NzY3NjQ2NzY0
-MTM2NDEwMTU0NTM1ODQzNTM0MTQ0MzM0NTU1NDQ0NDg0NTU0Njg3NjU1NjMyNjUz
-Mzc2NDM0NDMxMDA2NTU1NjM0Mzc1MzQ0NDQ2NjU2NDUxMjU0MjEzNjg6NTMyMzMz
-MzUyMTQzMTI0MjM2NjQ1NDY2NTMyNDQzMTMyMjMzNDY0MjU0MzQ2NzU1NDQyNTc1
-ODc2NDUyMzM2Ojo2OjQ1NjY4NjQ0MzIyOTcwMDMzNDU1MzQ0NTQ1OTg5Ojk3NjY0
-NTc2MzY0NDIxMzYzNDM2NDM1MjU1Njg1NTQ0NDM0MjEyNDQyMTQ2NTY3NzQ2NDQz
-NTg1NTU1NTU0NDU3OTo3NjMxMzQ0MzQ0NDc2NjY2OTY4Ojs4NjY0MjY1Nzc1NTc2
-MzM0NDU3NTc4ODY2ODc3OTg3OTc0NDU5MTQ1Njc1NDQ5ODc1Njo7Ozc5OTs8PDo4
-NTc2NjM2NzU2ODQ0Njk9QENAQEJBPTs7OUJNV1NRV2t3dWVPQUBAPDw/QkFDQkM9
-PDo5OTU0ODUzNDc0NTQzMzU1NTU0NDQzNTU4NTU4ODs6PT08Ozg5ODc2NTY3ODc4
-OjxETVNZZnF5eXhyZlhOTk5OTktLQz09O0FFTFBXXmhzdnFuZ2htcnR1dXN0dHl4
-d3d5eHl6eXh3eX19fX5/gX9/hIWEiYaLi4qJh4qFiIeBfnt7enl1cGxpZmJcWFRQ
-S0hIQz46Ojc6Oj04NTk5OTxFSk1RVFVXW15hX19iZ2doaWhra2ptam1ucXBxcG9r
-bXFwdHNyc3Z2d3h2eXl8f3yAgYGAgYKEhoWFhomIiISFhoeFhYOEgYGChIKBgYGE
-gX9/fXt/gYSEgH2AfHt6eXV4fHp5fHt4dXl6dXN0c3NvZl5ca3qHiYaAfYB9eXl1
-cGljXk9BOjo8Pzs8Pz8+Ozo5Ojs9PT88RVhjXU9NXG52alFFSFZdT0NKU1NHQENE
-RUY/Pj9FR0M5Oj5FRDw5Ojk3ODQ5Ojk/PD0/P0A+PkZQVVFJQDxASlpiZV5bWWNx
-c2pYTUxcZl1PTFhud3Fmanh7cGpjYmVmcHF1cFlBPTw+PjxAQTs6PD89Q0NDQj86
-PkA8PT07PTs8QDw7Pzw8Oz48QDo6OUA9Pj5BPjtCQUBDQ0A/Pz08PTo9P0BDQD08
-PDtAPTw+PD1AP0A/QkBBPj0/QDs8PkFCQ0BAPjw8QT48PUNBQD9AQT8/PTw8QD1A
-QURCQUFBQkJEREE/QEJFREJEPkJCREVAQENFRkRCQEBBQkVBRENDQEI/P0VFRURD
-QkBCSENBQluSs7bGz9jd4ePm6Onp6URCQkRFREJBPz89Q0Q9PkA9PkE+PkM9PT09
-Ozw4OT5AOzs7Ojk3ODw4ODo2Nzs8Ojo7ODo4Oz4+Ojk+PDo6OD06Ojo2NDMzNjUy
-MjU2NjQ3Nzg4ODY1Mzc3OTc2Njg3Ojs4NzQ0NzU0NTY2NDg4Njk3NjQzNTc0MzU0
-NDQzNjc0Mzg0MzU1MzQ0NDYzMjQwMDIzNDMyMzIzMzQ1MzI3NjY0NjQ4NzU2NDY0
-NDU2OTY5OjQ1NTQ0NjtBODY4NjY2Mzc2NTQ0MDMxMzY2MzM2NTc0NDQ1MjIwNTUx
-MjM0MzU2MzUzMjMxMzU3NTc8NjYyNDA0NTc2MzMyMjE0NTQyMTEzNDU4NzY4Nzo4
-NzU6NzY0Njg1MzUzMTIzPjc1MjExLzMyMjI0MjMzNjMzNTMyMTU1NzUyNDU1MTIx
-MC8xMTIyNDUzMi8yMzM1NDQxMjEwMzU1NTM2MzIzNDM2NjY2NjQzNDY1MzM1MjQ7
-Njk1MDI2NTQ0MDQ0NTc0NjY5NDQ0NjEyMjM1MzM3OTQxNDY0NTMyMjE0NTQ1OTc1
-MjM0NTMyMjIzNTM0NTQ1Nzk0NDIyMzM2OTk0NTU5ODg2NjY0NzY3NDE0NTY2NzU2
-NjU2Njg3OTc1Nzg2NDU4NjY0NzY3NjQ2NDs3NzY4ODc6ODk5Nzc5ODk5ODg3OTg1
-MjU2ODg0NDU0Njk5OTk5OTg7PD08PDg2NjMyMTMzMzY3Nzc2ODk+QENEQTw9PTw6
-PUlRVVNVYXB4aFVHQkA/Qz1AP0NCQz88PDs5OTY2NTc1MzY4NDMyNDY4OTU3Ozk2
-Njc6ODM1Nzo6Pj06ODc3NzY5Ojk3OTk5P0RKU1hibXh8enFkWE1MT05MTExGQD07
-O0dMUFVXYWdnZWJnaXBydHZ0dHR0eHp4ent6eHh5dnp8fHp7fn9/gYGDhICDiIyL
-jY2Mi4uKiYWBgX58enh1cm1sZWRdV1VRS0lHQD48PDk3Nzg7Ojg6PkZLTU9WVlhe
-Xl9hYWJlZGZsamtub2traWpqb29ycm9ubnBycHN1d3d3eHt6d3p8fH1/goGDhoWF
-iYiIiIeEhYaEhYSDgoKEg4CBgYF+gIKBgYB9e3p8f4CAfHl4fHl4dnh3enx7e3t3
-dnRycXNzb21pXlRccoKHh4V+eXd4eHJsZ2FeU0E7Pz88OTo5Oz1BPTg6Oj06NjdD
-VF1ZV1pga2lgUUxRWFZJRUtRS0NCSktCQkVISUVCPz0+QD9AOTg7NTc4Ojc3PD88
-Oz9HSkRBQkdPUE5EQUJJXWxvbWBZV19nZFtOSVJlb2RYWm97dWVgbnZqZmhsc3N0
-b29oVkhEQj9BPkFBPz5BQD5CQkE9PDk7Ojw9Ozo7Oz1BQkE+PT9APj9AQDo4Pjo6
-Oj1AQD0/Pz89Oz1BQD4+Pj9CQkI8PT9AQENCPjs+QkFIQkJBPj85PDw9Q0A9QkND
-QEFBQD1AQUJAPT0/PTw8PT49P0JBQUFBPEJCP0FDREpFQD5BPj1DSEZEQUNFQ0RB
-QkRFQ0JAPj9BQkBAPz8+Pj09Qz88QURCQkZHSkhJTZm1tsXQ2d3i5Ofo6enqQD08
-QkE+QkJCQD1APDtAPTs+QTxAPzw8Pj06NzU4OzU3Oz1AQT04Nzo5ODc5ODo6ODk7
-PDo5OTs6NjY3Nzg2Ojo6ODYyMTQzNTo6ODU7Nzc5ODc1NjQ3NDI2Ojk2NjY1Nzc1
-NDI2NjM3Njc1Njg1NTc0NjY1Njc2MzQzNDUzMzU0NTQzMjExNDIyMjM0NTQ2NTU0
-NDIyNDM1ODczNzc1NjU0NDg0MzY1Nz0zMzI0NDU+ODc1NjczNjw5Nzc4NjQ1NjY1
-MzQ2OjY1Mjo0NTk3OTYzNzY1NTk1MTM0MDExMTA0NTQ1MzUyNDI1Njc6NTQ0NzEv
-MDEwMjczNTc0MzA0NDY1NjU1NjU3ODU2NTU2ODQvMDAwMTIwMjMzNDIxMTMvMjEw
-MjMyNTU4NDIyMDIzMjM1NTU0NDUzMDIvMDA0MjMzMjQ3OzQ2NDQ1NjUzMjMzMTMz
-NjI2NjQ2NzQ2OjU4NzUzMTEyMjI2NTMyMjg4NTY1NjQ4MzY0MzY4NzY2NTcwMzIy
-MzM1NjYxNDUyMTMzNTU0NDU0ODg3NzM0ODU0MjAxMjQ0NDcyNTY3NDU0NTUzOjk0
-NjMzNTY2OTs5NDIzNjk4NzQ0NjQ0NTU0MzU3NzY2NjY2NzMzMjQ4ODY2NTMzMjM1
-Ojc1NTY5ODY5Nzc3ODg4NTc1Nzo5NjU0MzMzNzg3Nzc8ODk7ODk6PDs7PD04OTk2
-NDY0NDY1Njk3ODY1NTpBQ0RCQz0+PDo8Qk5VUFFbbXZuXEpFQ0lDQEJDQj9AP0FA
-PTw8Ozk5OTc0MzU0Njg2NzU2Nzk4NzU2NjU1Njc4OTs+Pzw5Njc2NTY2NTQ3NTc8
-QEhOV19ocnR1cmNVTUtLTVNMSEVBPz0/O0RKSkxPVlZZYmhtbm9xdXN1cnNzc3V1
-dnh6enp7e318fH5+fX6BgoWEhImMioqKj42NjYyIh4OCgn58dnVxbmpoY11ZVVRT
-T0pHRUA8OTY4OTo4ODlASk5OUlZYWlxeX19jYmJmZ2xrbGpnZmloamprbG9vcm9w
-dXJ0dHd3dnh5eHl6e3x6fH6BgoKEhYiJiYmEhIOFg4OCg4OAgoKAfHx+gIJ+gX57
-e3l8e3x8fn59e3x8fXx5eHd2dnR2d3VydXNzc29tbWleVVlqfoiHgn59e3Vzcm5n
-Yl9USUJCPj46OTs6Ojs6Pjg8PD07QTpFTFRkbWhgW1ZSVltVT05MTUtDQENLTUJB
-SVdSS0E9QENDPTo5ODs8ODU5ODk8Pjo4Q09VT0NBQkZLUExESEpVY2trYlRMTF1m
-ZFlQVmx9emphZnZ6cF9hamZmaG92cnFqZWFZSEJBRERCQT04ODs7Pj08PUA8PDk7
-QD87PD49PkBCQUE+QEJAPDo5QDw7PTk9PDw/P0BCQj9APz4+Qj9AQD1BO0A9QkFC
-PkJAQEJCQkRFP0FDPUJAPkFBQEVDPT9BPj9BREVEQD0+Pz1CQEA/PT09P0BDQj08
-P0BCQEJCREJDREE/QkNFSEdDQUNFREFCQURERUM/QEBCQkFBQEFAPEA9QEBDP0FF
-RUJBR0RIe7WyxdDZ3uHk5+jp6upBOzw8Pj4+Oz49PkFCPj47QD07Qjw8PD09PTo5
-Nzk8OTo9Ozg8Pzs6Oz82NTk4ODU4PD8+PDY1OTs8Ojc3OTU2ODU1MTM1NDUyNTc2
-NzQ2Njc3Nzc1NzU2NjQ4Pjc5NDg3ODU2Njk4OTI1NTU1NTQ1NTU0NzUzMzMwMzM0
-NDQ0NDQ1NDQ0ODQyMTExMjIxNDQzNzQ0MzM1ODc2NTUzNDY2Nzc2ODY1NDM0ODQ0
-NTM1NzUzNjc5NTM0Njc2NjU0ODg0NjU0NjZANzQxMzYzNjYyMzIzNTY1MjIyMjI0
-MjQ0NjMwNTI0NzMzMzM4NTU0NzU3MzIyMTQ2NDQzMzQyMDI1NzUyNTY0NjY1OTY1
-NTc4OTUxNzM0MjEwMDM3Mzk3MjUvLzQzMzYyNDU1NTIzMTAzNDIyMzU1NDUxMjQ0
-ODQ2MzQ2NDQ0NTQ7NDU0NDI0NDQ1NDU2NDQ1MzM0NDU1Njg1NDQzNDQ0MzM0OTk7
-NzgzMzQzNDQ1MzY3Njg5NjU0NjMxNDI1ODU2NjY0MjUzNDE3Nzc0MzUzMjc2Njc0
-MjQ1NjM1NTM1NzY3NzU1NTU2NzU3NjY3NTM0Nzc3N0Q1MzQ3ODg3NjczNzYzMzM4
-NjQ1NDQyNTQ1Njc2NTU3NzY6Nzc3OTg3NDY0OTk5ODo3NTc3ODg4OTY0NzQ1MzQ0
-NTY3NzY2Ojw4Ojg6OTs9OTw7Ozs3NTU1NTQ0MzU0NjU3NzU4O0BEQkVCRDs6OVA9
-R1FSTFNjcGtcT0VERkJBQUFBQD5AQkA+PT5BSj05NjU3NzY4NTQ1NjY2ODo3NTUz
-Nzc3NTQ1Nzg4Ojc3Nzo6Njc1NTU0NTg+REtVXmducnNvYlROS0tOS0hGQ0RBPTo5
-Ozw9QkNKUltkaG1rcHR1dXR0cm90c3d2dXd5eHh2d3x6fn58fX1/gYWGiImHiImJ
-ioqMiYmIhoOBgnp3eXVxbGpmZGBaV1NRTUhEQT47PDo6ODc3Oj5HT1JUVFVZW11d
-XmJkZGZmaWlsaWZmZmZmZ2ttbXBtcW9vcXJ0dnVzdXZ7e3x6eXl6fX+AhoSCh4eI
-hoSHhYOBfn+ChISFgn1/fH59fn19fn57fHx/fXt8fXx6enl7fHp5d3Z3d3V3dHJx
-cW5tbGxwbWhZVWV3hIWEg316d3ZycWllYFRHQj9BPTo3ODc5Nzo4NjdBOTc7PTxB
-UWZybFlMR1NhZVVOUk9IQUJCR0xIQkNSX1VCPTpESkE6ODo+OzY3Nzg4ODk7Oz1A
-TllUSEJBRExRVlRKTFJZZGZgUkhIU2Zwb2lmbXp/dWZga3RuW1hgY2hudHRvamFg
-XlxNQTtAQT48O0BBPD5BQUJAPjw9Pzw8PT45Ojs7Oz1EPjo8PD9AOj1AOzw9Pzk6
-PD1BPj89PEU8PT09QT9AQD9DQT9CQUBDQkNCQUI8O0Q9P0NCRUVFRkNDQD09PTw6
-PTxAQT8+Pj0/QUE7Oz5BQUA/QT49Oz4/PDxAQkJHREFDREI/QEJDQUNCQEFGRENE
-RkRBQUJEREM/PkBCPz9BPkA+QUJFSEpCQ0NDQkVsqrDD0dje4uXm6enq6T5DQTs6
-OkVFQ0NAP0REPT05Ojg6PTs6Ozg8PDg4PT4/Ojo8Oj0+PDw6Ozo6Pjo4Ojg4Nzc2
-Nzs2Ojo5OTk0NDU3NzQ1MjUxNjM4NDY0NDc1NDc3NjY3NjQyMzVGODk3Nz85ODU1
-NjEzOjU3NjI1Ojk4NzQ0NTQxMzIzNjQ0NTM2NDc0MjMzNTM0NTQzMDE0NTU0NzQy
-MjY3NzYxNjE3NTY2OTk2NzY3NDI0NTY1NjUzNTU0NTY1NDk4NzcyNjQ0NTU3NTMx
-MjQ1MzIyNTEyMTEyMTUzNTQ0ODUyLjM0MzQxNjI0MzU3MzEyODU1MzM3Njg0NTM2
-NTY2NTIyMjIzNjY0MjQ1NDU1NjMyNDMxNTQ3NzU3MjI0MzEyNDMzNDI0NDIzNDcz
-MzQzMjAyNDczMTQ3NDM0NTg0NTc2OjQ0NTQ1MjM1MzAyMzQzMjIzNTc1NTQzOTY2
-NDQyMzY1NTMwLzE0NTQ2Nzo3NDAzNzc0NTUyMTQ4NDY0NzU1NDQ2NjQ0NDczMTI1
-NTY0NTY0NDczOjc3OTU4NjI1NTU3NjUyMzU0NDUzNDU0Njc4Nzg1NTUyNDUyNzU2
-MzQ1Mzg6RDc2NTY1MzQzMzQ1NjQ0MzIzMzQ2NDM0NDg0Njs4Nzc2NjU3ODY1NjY3
-Nzc6PTo6OTg4Ojc3Nzg6OzsyNDM2NTY2OTg2ODk5Nzk5NTs9PTo5Ozs7Ojg2NTQ4
-NTQ0NTc1NDU2ODk6PT8+P0I9Ozw3Pzs9SVBOS1NbXlVPSkVFRUJBREFCQkVCREM+
-Oz0+Ozo6OTY2NzYzMzY7Ojk2NTU0NTI0NTg1NDQ0Nzg6Pjo3NzUzNjQzNjY5PT9C
-SE9aY2txcGxhUExLTE1OUEtFQj47PDk6OjxBR01VY2puanBwbW9ycGtxd3Nzc3V2
-dnV0eXp5ent6fXx+fH2AfHuAg4aJh4iJh4WHh4aEg31+fXx4d3Nybm9pYl5cWVRQ
-S0pEQUNBPjg4PDk9QkdLTlFUVVdZWV5dXWFjZGVjZWZqaGloZ2ZmampqbG1xcXBv
-c3R1dXN3en18enx9e3qAgoKCg4OChoWFhIaFgHx/g4SEgoKAgYB/fX5+enp+fH2A
-f3x7eHp9fH57enh4eXZ0dHh3c3Rybm1vbG1vbGxtZFxVXnJ/hYaFhIB+eXZwamhk
-V0U9Pz08PDo6ODg4ODk8PDs4ODY5ODlFXW1sW0hJU2RjVlVbU0VBSUtHSEZGRUxU
-Tj85OEFJRDk0Nzo4ODc5NjQ1Nzk7Oz5HUE5HQT5CTFxlZltOR0pRWFlWUExSZnl6
-dmplbnx6a1pZZ2hdUF9mbnNzbWZhXV1lZ1hGQj9APz09Oz1APUBBQj89PT1BQj1C
-QEFBPDw8Ozs9QEA9Pj07QTs7Pjw6OTw9Oz08PDtBPDw7PD0+QUJAPj48PDxAQEJE
-QUI+Pz1BPT4+QkRCQUVEQ0FAQEBCQDw9Ojs+QkE+PUA+PUJBPkJEQ0JBRUNBQDxA
-PD9AQUVAQkNFRUJERENFPUBBQEFDQ0NDQkFDPT9BQEdCQ0I/QkE9QEJAREFAQUFH
-REpGRFmmq8DP2d/h5Obp6erqREJDQEFDQkhDQUFEQjs+Qz86Oj4/RT49PDs7Ozs+
-Qj8+PDo9Qjs6PDo5Ojk6Ozs8PDg5OTY3ODk4Ojo3ODg0Njk3OjY1Nzg3NjYzMTI2
-Njc1NTU3Ojk0NjU1Nzc2OTc3OjU1NDY3ODYxMjQzNTg2Njg3Nzc1NTUzMzQzNTQ2
-NjM1Mzc3MzMyNDo8MzI1MTEyODk2ODc0NDQ2NjgzNjYzMzQzNDY6NTg1MzY2MjEy
-NzMzNDI1Njc5MzMzNzY2NjUyNDg2NDQ0NzY1NDU3NzMxNzMyNDIzOTc0NTQ1MTIz
-MTA2OTYyNDQzMzYzNDIxMjMyPDYyOjU0MTIzMzM2NDg2MzM0MzM1NDc5NTY2MTAz
-NjU0Mjg2MzA2NzU0NDQyNTc2MTM0MjQyMjQ1NjY3NTU0MzU0MjIyNDQ0OTY6NjU1
-NjY1OzgvMTEzMTAxMTQ0Mzc3MzIyNjc1MzMzMjM0MjMyMjM0NDU7OzU0ODY4ODU0
-NDQ2MjM3NDU0MjM3NzQ3NjY1NTU0MzQ1NDg0NTQzNDY2NDY3ODc1OTYyNTQ3NjY0
-NTc4NDU0NjY2NTY2MzE1OTUyNTg2NjY2MjMzNzhLQDs4NTc2NjI6NzU0NDY2NjUz
-MzM1NTU1NDIyNTU4ODc1MzU1OTg1NTU3NTc3NTc4Njk6OTw5OTg3Nzg4NzYzNTY4
-Njg4ODc4ODU4OjxBOjw7PkA8PDs3NjU2NTY2NDM2NTc5QDs+REc7PT8/QT41OT9D
-SkdFR0xPS0hLSkVDQ0FDQ0A9QUZGREI9QEJAPjw8PTg1NjQyNTY1NTI3NTY4NjI5
-NDM0Ojg3ODs/QDs3NzQ1NzUzOjs7QERHT1hjbnFxa2JQR0hKS0pLSURBPDg5Ozo8
-PEJJV15kamtxcW5qbnBwbW5xcG9zc3d0cnJzcnd3d3p5fH59fHx9fXyAhIWDhYeH
-iIWGhIOAf39+f3t4c3BzcGpmYV9cVlBSTUlHQ0A9OjY5PkJFRkpOT1NVVlhXW1xe
-X2JhYmRlZmdlZmZmZmVramlpampucXJycnN1dnd5eXZ5e3p5fX2AgYGAgYGBg4OE
-hIKBgoOCgIODgoSBgIB/gIB/fn58fn19fnl4d3d8en59enZ3e3l4dnZ0c29tbWtr
-bW1tbWpmWVJbbHyBg4F+fXp1cm9mYV5YR0BAPkA6PDk5PDs6Ojk9Ozg5Nzc5OT1I
-V19bTUxYXllRWlhJQEdTVElARk5SUk5EPz5AQ0VDPTo4Ojk5ODg6Nzc4Oj1AO0BH
-SklEQkVPXmpvZlFIRElUYGViW1tnd317bF5cZnJqVE5bbWVaYG5zc21lZWFfYm1v
-Ykc+PkBBQkA/PT9BPTs7PUBAQkA8PD4+P0BAO0A7Ozw+PDo7O0JFPz89QT4+Pj0+
-Pzw6PT49PD07QT4/PEA+O0E9O0BEQT49P0E6Nz1APTw9PT9AQTs8Pj0+QD48O0BB
-QEFEREJBP0E+Qj9CQEBCQUNCQENIQz8/PD5BQUE+QERDRkJBQkRCRD9CQEBDQkE9
-QUNEREJCREdFQUBBQD9APUFAP0NDQ0BDRklHWKStv8zY3eHk5+jo6eo+Q0JEPkBB
-PUJEQkNBQD88Pz49Oz9BQT48O0FAPTs7Ojs7PDw6PD07Ozg9Pjk7Nzc5OTc3Nzk3
-ODg5Nzk8OTs3ODo6OjQyMzU0MDQyMjYyMzU0NDU0NTY4ODc3NzYzNzY0MzE0NjU1
-NzgzMzY2NTc2OTU1ODYyNjU1MjM2NjY1NDU0NTc2NjU0NDI0MjU2NDc3ODU5NDk1
-NjQ1Nzg3NjczNDMzNDg4NzUzMzU0MjY2NDEwMjQ1MzY1NTQzNjQyNzYzNDQ3NDQ0
-NzU1Njk2NDIxMjU1MzU4NTk1MzMyMjEyNDQ2MzU7MjEzMzAyMDMxNTM1MzQ0NTQ0
-NTUzMzU2MzIxNzc2MzM1OTo1NDQ2NzM0NDIzMTIzNzI0NTU0MjQxMzQ1NjQ3NTUy
-MTAzODU0MzQzMjEwMjQzNjY3Njk2NzY1NTQ4NTIzNjMyMjQzNDQ2OTQ2MzMzNTQ1
-NjY1MjAyMjU1MzIvMTIzMjY3NzU2NDM1NDU1MjQ2MTI0MzQyNDM1NjM5NzY0NTQ0
-NDY0NTU0NzI2OjczMzI1NDc2NDY2Nzg5ODc1NDcyMTI2NTIyNjU4ODY0NjQ1NTQz
-MTU1Nj5ANjg3Nzc3NzQ1NTc3NzY2NTMzNDU5NzM0MzY1Nzc0NjU1NDU3OTg2Nzc2
-NDU5Njc4Nzg5Nzc6ODY4NTc3ODc0ODg5OTg3ODg/Nzk4OTs6OTw9PT05Njc1NDc1
-NTQ4NDU1NDU6PD4+QT8/QEA8Ozk5PEVJRkRAQkVDQUZFREdFQ0JDQ0JFRUZCPj09
-PkFDQD04ODc1NDQ0NTc2ODQzNTg7ODI0NDY3ODg7OjxAOzg3NzU1NzU5ODc6Q0RK
-U11lbHFtYVRMSEpLSUxLSEM/PDw7PEBHTVNbYmdna21rb25ubm5ucXFycXRzb3J0
-dXZ0cXN2eHd5fHt5enx8fYGAgoSHhIaHhYaEhISBfn6Aenh1cm1vb2tlYV9bVlRR
-TklGQT47ODpCSEpIS05UVlVXWFhYW19gYGBgYmNnZWRlZmdpZmZpZWZnZ2xubm1w
-cXBxc3Z3eXp7fX2AfXt7f3+BgH6CgoKDhIOBg4N+fnt+gYJ+fn1+fXx7fX5+gX54
-d3h7fXp5eXp5dnl2dnN0dHNybWxsamtra2ppbWdcUllndnx/fnl6d3RvaWNcWFFG
-QUVEQz45ODs+PUA8OTo7Ozg6NDg3ODtFS1NYXlxZUFJUUklFTFlSR0JFUmFaSUFB
-Sk1FQUA/Pzo5PDs3ODY2Nzk7QkI9Oz1GSkxKSU9XZGlgVEZDRlBlcXBsYl5pdnlx
-XFNYaWtcTlZtdGhqc3VuamdlZ2ltcXVrTEFCQkA9P0BCPzw7Ojs+Q0NDPD1APDw/
-QD8+PDo/PjxBQzw9QUQ/Pjk8PTs9QEFAQDs7PT5AQkFDQjw9QENGQUJAQT4+Pj08
-PT0/Pj09QEFBPTw7PT0+Pz4/Pj5BP0FCQ0NFQkBBQUBCQkJCPj0/QT9BQUNJRUI/
-P0BAQz5BP0JDQ0NGRUNCREJDQUREQkdFQ0JBQkA+QEJEQkI/QEJEQUJEQ0VERUZK
-QD9VmKrAzdfd4eTm5+jp6kBBRkJBQUM8PUE9Oz46ODw7Pz09Pj0+PDxCPTtBODk6
-OTs8PDo4PTo7Ojk6Ojo6PDo7PT07Ojw3ODk4PTs3PDg5PDc3MjI0NDQ4Nzc0MTEy
-MjUzNTU4Nzg1NTc2MzMzMjQzNDQ1NTQ4NTg1MzI1NDQ3NDM1NzUzPDc2NTQ4NDk0
-NDU1ODQ1NDEzNTcyMzUyNDU1NjM3ODY5NjY1NzQ3NDU2NzU2NjY1NDQ0MjI1NDM3
-NTQ0NzEzNDc3Nzg2ODQzNjc3NTI1MzQzMjIzNzU2NDYzMzk2NTM1NDIzNDQyMzY4
-OTk0MjYzMDMyMzY1MjczMjMxMzI0NjY2NTUzMzM0MjI1NzU1MzY1NjQ0MzIyMzM0
-NjUzMjUzNTYzMjAyMTIxNTc4Ojc3NTIxMTM0NzY0MTE2NTUyMTE0MzQ0NDI0NDI1
-NDQ1NDQzNTQ1NjMzNDc1NzM1NDQ1NjYzNTMwLzE0NjQ0Njc0Mi8zNDUxMzM2NTIz
-NDY1NjY3NzMzMTM2NzkzMzM0NjQyNDAxNDQ1NTU4Njc2OjY2NDQyMjU4OTU3ODY6
-Njg2NTMzMzM1OTY3NjY5OTcyMzQ0MzY3NTQ3NjY3NzM0NDQ2NjQzMzMyNTc3NTY2
-MzMzNjY4NzY3ODg3NTQ0MzU4NTk6OTY3Njc2NDY2Njg3OTg2ODc5Njc6PTo4ODo4
-OTc2Nzo5OTs+PDo6Pjs8Ojo5NzYzMjU0ODc0NTY4Nzk5OzxAPz0/QT0/PTk6PkBH
-QTw8Oz08PkJDRENFRURDRUdFREFBQ0BCQUFCPD06Ojc1MzQ2NjU1MzM5NjU2NTQz
-NDY3PTk4Oz8/Pzw5ODU2Njc4PDs+RElTWGJqcm5iUUxKSU1NSUtKSUhEQ0JFRUlQ
-VlxhZGRram1xcG1ta25vcG1wb3Jzc3ZycnRzdXZ1eHh5e3h4eH58foB+gIKChIiG
-hISDg4SCf3x7e3h4dG5ramViX1xaV1ZPSEU/Ojk4PkNJSUpMUFBQUlVVUlVdXl9g
-YF9gZGRkZmNhYmRmZmZnZWZmaGxqbXBubnFxdHh4eHl7e3l2eHp7f4J/gH9+goaD
-gH5+fXp8fX1+gH5/g358eXl9gH96eHd6fXt6e3p4d3l4eXZxcnByc3BsamxsbW1p
-aWprZ11VWGp2enl6eXdycmxoY15dVkpCQ0ZGQT08Pj5BOzw4NjU1ODY1NTU2Njc+
-SFplXFBMTkxIS1BVTkZCQ0lTX1xHPURSVEo/PT9BQDo9OTc5Njg5NzxDRD85OUVS
-V1JMS01RWFpVTURGTmZ3eHRnWlhibmxbTExabGpbXXCAfnV2cWxpanB0dXR0cGhP
-QUNCQT4+Pz8+Ozw7Oz1BPT09Pzo8QTs6PTw9PDs7PDs+Qz88Ozs9QD48PT4/PkA+
-PT4/QDw+PTs9Pj08PkBBQkE8PD09Pjs8QT47PUNDPT89QUA4Ozs/P0FCQz88PUBB
-Q0FCQkFAPT9CPkFCQkBCPj4+QD1CRkFDQkJGSkRCQUNERUJCRERDPj5APkBHREJC
-RUVAQEJCQUBAPUI9Pz4+QkA8Q0JAQkdBQVSkrsLP1tzh4+bn6OnqRkVFQEA5OTc5
-QUI/QUJEQDpAPjs8PEE6PDs7Ojo8Ojo6Ojo7O0U9PTo5Nzk9QDs5Ojs7Ojo8PT05
-OTc6Ojo4Njk3OTY3NTczMzYyMjYzNDE1NjYzMzg4Njg1NjY1MjIzNTMyNDY3Nzg4
-NTc4NzY2NzY2OTcyNTY1NDY2ODc1NDYzMjA0NzU0NDM1MzQ2MzAzMzYzNTM0NTUy
-NDMzNTU1NDQzNjM0MzY1NDMzNDczMjUyNDE0NTQ0NDQ1NDM5NTg3NDY3MzM1MjI1
-NDMyNDg2ODY2NDQ0NDIyMzYzNDMxNTM0NDIyMzY2MjQ1NzM1NTQyNDI0NzM0ODU3
-NjQzNjYxMjUzMzMzMzU1NDQzNDIxMTEzODg1NTUyMzQ0MzQ1NDQ1NDU4ODU2NjYy
-MzQ0My83NDQ3ODMxMjQ0MzM1Nzc1MzMwMjU5NjQ0MzAyNDU0NTU2NjQ1NDQ0NDMz
-PDc2NDY4NjY4NDY1NTUzOTY0ODczNDU0NjU1Njc3NDIzNDMyMzY1NDk3Nzc1MzMy
-MzQ0NjQ3NjUzNTEyNDQyMjQ5NDU4ODc1NzM2MzQzMzM3NzczNjQ0NTU0ODE1NDU3
-NzU2NTQ0MzM0MzUzMTAxMDE0NjU2NDU0NTU1NDc7ODY0MzMzNTU3NzU1Nzc2NTU4
-Njg2NjY3ODk4Nzc4ODg2OjU5PTs2Nzg3NjY0Njg6PEJBPzw7ODk5OTo5MzEzMzQ0
-NzU2ODg4OTk6Oz9AQEBBPjs5Nzg2PEI/PDg3O0RGQz08QEFCQ0JCQUNDQ0BCQUBE
-Q0M/Ozo4NjMzOjczMzU2Njc2Njc0NDQ0MjQ2Nzo5P0FDPDk4NTg4OTg5PUNIUlVa
-YmptcGZXTEdIRkpISkpJSUlHRklKTlRZX2FnaGZpbGxtcHBwcm5ucG1vbm9xcnNx
-cnNzdXJ3d3d4e3p5eX+AgHx9f4KEhYaGg4KDhIF9fHl6e3l4dHNqaWVjX1hZVU5H
-RUA7ODo9QkZHR0pPUlNSUlJUWFdcX15gY2JjZWRiZWRmZmVlY2VlaGtsbGprcHJy
-bnBxc3R1c3h8enh2eHd4fYB+fn9/fn99ent7enl9fXt7fHx8enx7e318fHl5end3
-ent4d3d0dnl2dHJyb25vcGxra25taWVpaGdmXVNaaHZ8enh4d3Rza2lkX1pWRz5A
-REI+Ozs5OTo7ODYyNDU6OTo8PDs5ODtET1tWTk9JRE5dYVZHREhQT1FTUEY/RVBU
-SDw4QUhEOTY4OTU2NDY4Oz0+QDs/RVVaXlJIRUNMU1hZU09TYHF6dWdVTVRnbmVT
-TVlzeHNnb4KBdXBra2lud3V0dXNvYU1CPz4+PTo8Pz5BQkFAPjw9PUJAQj87QD49
-Pjs4OTw9PkBCPTw8OTw9PEBAQUE/P0A9PTs8Ozw8Pz08PEBAREQ9QT5AQD48QT9B
-QD9APT5CQ0JERUA7PkRBQz09PTw+Pj49P0NCQUJAPTw/QEJDQj9DQEVBQ0dBQkVF
-SEZHRUJFQ0JBQUFCRUNCRkNERkNGQ0RGRUVBREVFREE+QkJFQkFCQj9BQkNGQ0NE
-WJ+uws7Z3eHk5ujo6uo+QEFCQD89PT9APURBPkBBPDs+ODk6Nzc4Ozk+PTw5PD49
-Pzs6Ozw+PDw7Ojo+PDw5PDg4OUA4PDc7QDo5Njk4NTY3ODc2NjYxLzQzMDQ0OT82
-NDg1NTY1NTYzNTkzNDI0NzY1MzM4NzY4OTk2NjQzNjY3NzM0NTY0MzI2NDUyMzIx
-MTUzNjM0NDQ3NDMzNjAxMzU3Mzk2NDU1NTQ0NjY1NjYzNDUwMjM0NDAyMjI2MzEz
-MzQ4Nzg0MjQ1ODY1NzQ0NDQzLzAyNTQ0NDQ2NTU0MzQzMjU4Nzo3ODQzMzYzNzU1
-Nzg2NTQxMTQzNTUzMDI0NDYzMzMzMTc2NjY1NTQzMS81NTMyMjU2NTM1MzUyMDA0
-NDUyMjIzMjI1NTY4OC8wMjQ3NzQzNTU0MzEzMzUzNDU3NTI0MzIzNTI2NTY2NTIx
-NTU4NDU5PDM2NjYzMzUyNDIyMTMxMzQ1Njc1ODY0NDQ1NTY0NDI1MjEyMzQ2Njg4
-NzY1MzcvMjMyNDU1NDY1MzMzNDc2MzQyMjQ0OTU2NTMxNDU1NDEyNTY4ODY5ODU0
-Nzg3OTQxMjMzNTg1MTMzMjMyMDY0NTU2MjMzNDQ1MzMzNTczMjQ2MjE0NjEzNDc5
-OTY1Nzc0NDU2NDQ2MzMyNjc3NTc4ODY0NTY3ODc1Nzg8ODk2NTk4Nzc/PD45MjM5
-Ojk4ODg6PDxBPzo4Ojs4OTo3OTQzMzMzNjc4Njg4Oz49QEBAQj4+PDs5ODY3PD87
-Ojk6QEJAPD9BRUZERERFRENERUJAQ0FAQEI9OTk9ODY2ODczNTY0NDQ3NTc3NjU0
-NjU0ODo5QEA9Ozk4OTc5NTU5RU9ZYGFjZWxuZFZKSEtGR0hISEZHTElJTU5TVllf
-ZGVlZmZoZ2drbm9ta2xsbG5tbm9wc3Nwb29xc3J0dXl9fn18fX9/fX6AeICDhIJ9
-gYKDfn59e3p7e3t3dG9ta2ZhWVhSTEdBPT06P0BDREZKS1BOT1RUU1JUVltdXV9g
-YGBkZGJhYmNkY2VjZmRoZ2Zlam1ta2xtb3FwcXNycnh5d3d3d3d5fYCAf3x9fH1+
-enh5e357d3h5enp9fXx5eXh3eXl5eXh3ent1d316eHZzcXFwbmtpbGttbWpoaWdm
-ZmVeUU9jd3x5d3Z1dnJuamRhX1NEPkJAPTs5Nzs7PDk4Ojo3PTc5Ojs7Ojk4OjpD
-SkxNTEdLXWtqVkZGUF1bTkdJR0ZLTUlEOjxDR0E6NjY3NzUzNjg6Ojw+P0BCS1hc
-VEdAREpWYGpnV1NVZHJxZ1ZMTV1sbWNaYXOAfXBqd3twaGRjZG92eHBuZ2RjVUQ+
-QDw9OTs8QD8+PTw9QEJBPkA+Pz5BPjs7Ojo7QEI9Oz48PTo9PDw8Ojo8Pj89PDo+
-QEA+PD9APT4+P0I/P0BDQkJBPUFDQD5BRD8/PDw9QkFDRD49QkFAQ0FAQUFAPD5A
-QENDQENDPz47PUE+QEJCQD1BQkNBQ0FFRURFQkM/QkBCQENCQUJCRURGQ0NGRkNG
-RUVAPUFCQT1BQkNDQD5AQUNCQ0VFQ0llsK3Dz9fd4ePm6Ojp6UJDRUVDREVDRD44
-PDg9QkRAPjw+Pj1AOzs9ODk5OTs6PD8+Pj45Oz0+Ozc7OTk9OTw5OTk6Ozk3Njk8
-PDo3NzY2Njk4Ojk4NDU1MzQ2NjQ2ODY1NDUzNTc5Ojo4NTc3Nzc4Ojk3Nzc1Njc2
-NTM1NzQ1NjQyMjM1NTMzMjQxNDcxMTQzMjM4ODc1MjQ1NTM4NzgzNTQ3NjU1NTU0
-NTU3ODg2MjU0NTYyNTIxMzM1NTU0NTIyNTQ2NjU2NjY3NDY3NjQ2NjU2NTIwNDYy
-MjQ1NTQzNTUzNzQ1NTc0MjQzNDMyMTQ2NjY0MzQ3NjY3MzE1NDMxMjQzNDEyMzc2
-NTIyMzQzMTMxMjExMzUzNDIzNDIzMjM0NTIxMzE0MjM0MzMxMzIwMjYzNjgyMzMx
-MjY0MzQ1NTQ6NDMyMzI2NjY0NzY1NjY5NTI0Mzc4NDIzNTUzMTY2NTQ0MjIzNjYz
-NDY2NjYxMzU1MzMzNTE3MjIxNDY5NzQ2NTU1NjUzMzQ0MzM1NzU1NTQ0MzA0MjI0
-MzMzNTM0MjQ2NjY1NzMxNDQ1NjU2Njc1NTQ0MjM0NjQwMzQ1NjY1NTY0MzUyNjQ0
-NTY3Njc3NTU0NDY1MzM2NjI1Mi8zNjg3NjU0NTU0MzU1NTgzMzU0NjI2NDc3NjY4
-NjY2NzY3ODk5ODc4ODY4ODc4Nzc5OTg5OTk4Ojk6PDw8Ojs6ODg3Njo3NjQvMDEy
-NjY3Nzg1OUBAR0BDQkA7ODo2Nzs7PDo6PT0+QUFAPz9AQkRBQ0NCQUFDQ0BBQ0FF
-QTw5Ojo7NzU3NjY0MzQxMTMzNDQzMzQ2NDg4Njo6Pj88ODc2NjY2NzxFVF9lZl9m
-bG1kVktKR0pGSElHSEZKS0xPU1RWWl1gYmJlZ2dpbm5ua2tpa2lpamlqa21ubW1r
-bW9xc3RydXZ5fnx8fXx8e36DgIOCgoF+gIB+gH9/enl5end1cm9sZ2NgWFBKRD05
-Ozc6QkdGSEpMUFFQT1JSUVNWXF1fW1tbXWFhY2NjZWRkYWNmZmdnaWhqa2lrb25t
-b25vcHJycnV4eXt8e3l7e3t8e3x4e3t4d3d2enh3dXR5e3h6eXd3dXR2dnh5dnl4
-eXZ1dXJyc3RxcG9ubnNua2pqaWpqaGRkZFxQS1tve3p4d3VzbmtpZ2FeUEVIREA7
-OTg4Ojs7Ojw4OTw9Nzc1OTg7Nzo3NzpBQ0dFSVVkbmdURkhWZGRPQ0BGVVRIQkFD
-R09COzc2NjY2OTg1Nzg6Nz1BPjxCSk5PTEE+SVZkbmxhVE5QXmpoXE5KWnF7fW9m
-bnx+c2NhbWVfYmNpcnd1bGhhYmJWSEI8PUBBPDs9QT46PTs9Pjw6PDxBQjw/QDw9
-QD07Ojs8PDs8Ojo8QD47Oz89OTxCQT9APzw6Oj09Pz08PD48PT5BPTw8Oz89Pzs9
-PkJCQT4/PUFCQ0A+Q0JCPj9DPj09Oz9BPjtBQEBAQkI9Ozw9PUNBQkNERkRDQ0JD
-QEM+QEJDREBBQUJCRUVCREBDQ0NERENCQkdEQ0FBQEBCP0NIQUBGRkVHRkhKSV+w
-pb/P2N3g5Obn6erqQUdHQT8/PkNDREA9PDtARUBBPzw8PD4+QD1BOzw7OTo7QD5C
-P0E6Oj09Pjw8Ojs6OjxAPzw7Ozo6Ojo3ODQ0NjQ4ODU6OjEzNDM1NDQ0MzU0NTg1
-Nzo1Njc3Nzc6Nzk3NjU3MzY1NDU2NzUyNTU4NzY3NjQ1NDE1NTMyMzAzNTY0NjU1
-NjQ2NzM0MzY0MjQ3NTk2NURLMzIyMzY3NDc2Ojc1MzI3NjQ1NDIxMTU0Njc2MjM2
-NjU0MzM0NDUzNDczMzMyNDQ2NTU0NzU1MzMyMTExMzQ3Ojc0MDE0MjIuMzY3MS8z
-NDc2NTY3ODUyODg0MzQyMzIyODQ0NDU0NDM0NTY1MjYzMzU1MzIzNTAxMzU0NTU6
-NTcxMDM3MzMzMzM3NTIxNzk2MzYyMTEyNzEyMTIyNTU0MzEyNDM3NDMzNTU4ODk2
-NjU1NTI2NDU2NjU1NTc2NjQyMTUyNDY2NDM0NTYzMjMvMzU3NTM0MjQ1NTQ1NDUz
-NzMyMDM1NjQzMTIzMzExMzIwMzM0MzQ0NjQxMjI0NjY3Nzc4NjQzMjIyNDM2NTg2
-NjM0NTQ3OTY1NTc3Njc4NjI1MzU0Nzc3NTY1NzY3NTU5ODg3ODM2NDI0NTU1Nzc5
-ODU0NDY2NDQ1NjU2NTUyNTU3OTU1NjY2ODU0Nzk4NjY1OT42ODc4ODc5NzYxNTg6
-Ojo5PT07Pjw/PT47Ojk6ODg0MjQyMzQ3NTg3Nj86QEBBQkFAPTo7Ozk4OTw7OTk5
-OTo+QD89Oj5AQEJDRkJCREJBPTs9QEVFPDg8ODg4ODY0NjU3NDMyMTIxMzQ0NTM0
-PzYzNjc3PD46NzY3NjU6PkhRX2hmYWBmaF9TSkRGRkdHR0hKSElKTVBSVVlXXF9f
-XWFlZ2lqaGtsa2ppZ2ZobGppbmxtbGtqbHFzc3Nyc3R0d3d2dnl9ent+gIGDgH1/
-f3x9fHt5eHR1c3FxbWllYFtUUkxCOjc3OD1ERUhMTUxPUU9OTk9RUVhZWVpaXVla
-W19jYF9fYGFiZGVjZWhoZWhnbW9wcW9ubm5vcXNzcnNzd3p7eXl7enh5fHt7e3l3
-end0eHt6end4eHh5enZ2eHR2dXd1dXZ1dXV0c3RzdHRybm9ra2xsaWdmZmhqaWRi
-W05KV2x6fnl2dnZyamplY19WS0dJSkk+Ojs7PT07Nzk4ODYzNDc0OTc5ODg6PEBE
-RkhQWmFlWk5ITVheXEtFQ01cWkY9QkpLRT46OTo6Ojc1PDo4Nzk+P0VDREFAR0xO
-TklQWmFsbmZVSUdQYWtqXVhfdYSFeG1qb3VuXFReX11haHB4d29nZmNfYVpDOzs7
-PkA+QD1APj0/QEE9Pj08QkNCQDw8QEFCQTw8Ozw7Oz49Ojk9PENCQD4/Pjw/QD08
-Pzs8PkFDQkA7Ozo+PEBDPj08Pz08QT8+QEE/PjxBPD9BQUA/QkNDQz1BQ0E+PTo6
-QUA+Pj0+Oz49PDs+PD9DP0NDREFBQUBBPj9CQURDRURDQ0JDQkNDQkBCQ0NBQ0NE
-Q0JAREJER0VARENHR0VHSkZESEpSZ6qmvs/Z3uLl5ufp6utBRkNEQURDRkRBRUY9
-PT89QkE/PUM8PT8+Oj89QD86Ojg6PTo8PDs7Ojo/Ojo4Nzg5OTo7Ozs7Ozs5Nzc4
-NTU4Njk3NjU0NzQ4MzMzMjQxNDQ1OjgzNjo5ODQ1NTc3NjY5Nzc3NjM1NDQzMzQ2
-NDc1NTw4Ojc1NDMzNTc1MzQ0NTc1NDU0NTQ5NDI1NTExMTQ1MjQ4Pjg2MzQzNjUy
-NDU3MjM1NDU0NTM1NjQ1MzYzODIvMjIyNTM1Njg1NjQ0NjY0NTc5OjU0MzMzNjY1
-NTQ1NDIxMzQzNjMxNDMzMjMyMTEzNjMyNTc0NDExNTQyMzM3PjQxMzU1MjEzNTM0
-Mjc1MS8xMjQ1MzIyMi8yNTMzNTQ0MzQ0NDc0MjYzMTM2NTg5NTQ0MjExMjIvMDIz
-NjMzMzMyNDAyMzUzNjYxNTc2OTg1NjUzMzQzMzQ0NTU2NjU0NTUzMzMzMzIxNDQ3
-NTk1MzE1NTk4Nzc3NzU2NjgyNTU1NDQwMjMzMzM1NjQ2NDE1MzQ0MjY2NjY2NDYy
-MjU2MzYzNTg4ODs0NDItMjM0OTo3Njg1NTU1NDQ0NDQ3Njc4NzQ0NjY0MzE1NzU1
-NTU1NjQ5OTY6NzU1NTM1NjY3NTY2NzQ2NzQyNTQ0NTM0NTQ1NDc4NjUzMjc2NjM2
-MjU4Nzc6OTg6Ojc3ODc3Nzc0NTc2OTk6OTo3Ozo7PDo7Ojw7OTk5ODUxNTQ1MzY2
-Njk7Oj89QT8+QD0/PT08ODc4O0FFPzw8PUE/PDo8PT5AQkNEQEBAPkA/QUNEQj4+
-PDs5OTg1NzU2NjU1NTU0NTQ1NjczNTQ0NDU1Nzo8QD85NzU3NzZBR1JgaWljYGVi
-XVNHREVERkZISEpKTE5OT1FUWVlaXV1cX2NjYmZlZmhoaWpnZ2dlZmdobHFwcG5t
-bnBvbnFxb3N0cnN2eX17e31+f3+Bf39/gIB9fHt5enh1cnNubGZjXVlSSEY/OTg4
-QERHTElNS0lLTU9OUFJTVFNUWVlaWltXVllbWl1cX2FhX2FjZWZoamlramxqbW5u
-b3BwcHJzc3J2dnd2d3t6eHd5fXt8eXl4dXR3dnh5dnZ5fXt6dnd3eXd0dXd3eHd2
-d3R1c3Fwb3FvbWlnZ2hmZmdjZWVkYmNdUElRZHF7eHZ1cm9ta2liXFFHRk9STUdC
-PD08Ojk9Ojg3ODg5Nzw5ODg4OTo8QUNIWGVmXVNQUldaWVRMRUNKUVZURTxCTk9E
-Ozs8QDs6Ojk3Nzc4Ojs7QERAPTxASFJWVFBTWmJmZFpNRklcbnh3bGJre4SAa1tZ
-ZmlcUVpjXWVzeXZxbmpmZ2ttZExAPz89Pj5CQT0/Qz5APTpAQD4+QDs5Ojw9PD1B
-PT09Pz09OTY5Ozk9Pj08Oj49Ozk7OTg6QDw6PD04PTs7Pzs6PEBBPT48PEA9PTw5
-O0E8Oz1AQ0M/QEBERD9DQkBDP0FAQEFBQDw8PkA8PD9CPz4/QUBAQkRBQT89Pz9A
-QkA/Q0JCQj5AQz4/QkNBQkJCQ0VCQUFGRUNGRkJCRT9AQUNFRENERkZGSVF8qKq/
-z9ne4+Tm6Onq6kJARkdFREREQ0ZGQT5BPzw7Ozo6Qj89Pjw8PTw5PkE/PDw7Pz85
-OD07PDo9ODY5ODk6Ozk7PTg4OTg2Nzc3PDc4NTk3NjUxNDg0MjYzNjY1NDQyOTY5
-Nzk2NjY3NzQ2OTc3OTY1NzYzNzY1NjQzNDU9OzU0NTk1MTQ2NDc0MjQ0MjU2NjM0
-MjM4MjIzNTUzMjM0NjQzMzAyNTYxNDQ0NDM0NTc0MDQzLzIyNTMzNTM1NDQzMjQ0
-ODU0MjIxMzU2NDc3NzU0NTc2Mjc3NjQ3NjU2NTY1NTIyNTMzMzIzNTQyMzMzMzQ1
-NDQxMDEyMzY1MDM0NDQ1NTYzMjQyMDAyNDM0NDY0NjYzMTc2MDExNDQxLzM0MjMz
-NDQzNTQyMzMzNTQzNzY1NTQ1Nzc0NTMzNDIxNDQyMzY1NDQyNDUyMzMyMzIyNjQ1
-MzU1NTUzNDU1NDM2Njg2ODY0MzMzNDQzMzM0NDU1MzI0NDQzODczMTQ2NTQ2NDE2
-ODU1MjQ1NTQ0NDQ1NTQ0NDU3NjQ1NzUyMzIyMjY2NTY2NjM1MzAxNTg7ODc3Nzc4
-NTU5NzY3NzU0NjUyNDUxMzY4NzU0NTQzMTM1NjU2Nzg4NzUzNDg2Njg2NTU0NTQ1
-NzYzNDIxMjYzNjc2Nzc3MzU1NDQzNjc2NjU4OjY4Njo7Ozc3ODQ3NDQ4Ojo5Nzk3
-OTg5PUE8QDs5Ozo5PDw7ODs2NjM0NDU2Njg7PUA9PDs+PT89PT06Nzk8P0A8Ojs9
-Q0RAPT06Oj09PT5AQUFBRENAPzw+Pjo4ODo4Nzc1NjU1MzMzNDM2NjczMzU0NTQ0
-NTY4OjtAQTw5NzU0NzxBS1dma2xmYmNXS0NCQUVFRENGRElPTU5QUVJTV1pdXF9i
-YmFiZ2ZoaGdpZ2dpZmdnZWZobm5ub29sbGttbnJzcnNydXV5e3l4d3t7fHx8fn1/
-fnx5enh4d3Vzb25sY1xaVVBJRTs8Oz0/Q0ZISUlHSUpMTk9QUVJRU1NYWFlbWVhX
-V1lfXVhbWl9hYGFmZWJlZmhpZ2tqam5ycm1wcnJwdXV2c3N1eHp5eXZ4eHZ2dnh4
-dnh2eHh3dXd5d3V4dnV4eXp5eXl3eHx8eHd0cnBubG5oaGpqZmNjZ2dmY19eX1pO
-R05jcHV1eHRybmxmX19VST5DSEtIPjo6Ojk5Nzo5Pjs5PDk4OTg6ODc2ODk6Qk5k
-c21XR0lYZmVbSUJHUVNRTkhEPkROTUI6Ozs+Ozg5ODU2NTU2Oz1AQ0Q/Pj5FUl9c
-U01NUFhdXFVNTVxwf352aGdxfHhrUk9bZ2RWWWhob3h2b2pqaW5xdndpTkE9PD5A
-Oz08Pj5APkBDQEBAPkA6PD9BQUA+PDw7OjxCPT4+Qj47PDs8ODg6PUBCPTw9OjY6
-Pz08Oj04Pzs9PDw+QD8+OjxAQkE9Pj8/PD1APT48ODw7PkJDQ0BDQkFCQD8/Q0E8
-PD5BQEI/PT5BQT49QEJCPz9CQEJAQUBBQ0VCQ0NFQj5CQT9CP0JDQUJFRkZFRUJE
-P0FFRkJCQ0FCP0BFR0dITEpJS4q0scHQ2d3i5efp6OnqOjpDRklERUNDRURBQUE/
-QUU7Ozw5Ojs6PD07Ojs4Ozw9OTg6Pj05OTw9OUA7PDk2Nzg5Ozs5Ojc5OzszNDY3
-ODg7ODQ1NDk4ODUzNjU1NTU0NzUzNTU1NDY0Njg5NzU3NTQzNzk5OTczMjUzMzU1
-Nz01NTQyNTY0MTM2NDQ0NTM1MzIyNDQ1ODc1MzQ5NjQ2MzQ5ODM2MjM1MzQ0NDQ0
-NjU3NjY2NDQ0MTU4ODM4MzI3ODUxMS8yMDo3NzU0NTU0NTYyNDIyNjQzNjQzNDQ2
-NTUyNzQxMTU1MzUzNjMyNDUzLzIxNDE0NjU2NTIxNjQ0NDQ4NjU2NzYzMzIzNDM4
-NTM4NzY0MTIxNjUzLjMzMzA1MjM1MzExNTY0NDIxMzU1NTQ1NTY3NTQ2Njc2MzMz
-MzIyMjM7NjUzNTQyMTQxLzEyMjM0MzQ2NjQ1MzY4NTUzMzU3NzU1NToyMS8xMTMz
-MzU4ODY0NTQ0NTUxMzMxMzQzNTc2MjMyNjc0MzUyMjI0NDQ1NDY2NDU4ODg2NjUy
-NTIyNTg6NTQ1ODU1NjU0NDQ0NTY3ODs5Nzc3NzY4NDY2NTMzNTQyMzI2Nzg1NjQ0
-MjY2NTQ1Nzg1NjUyMzQ1ODU0NDMzMzM0Njc1NDMzNzQ3Njc3NDU3NzU0Nzc4Nzk0
-NDI0NTg5Nzc4ODU3NjY3PTs7PDk5ODk3Ojs9QD88PDo6OTg5Ojk4OTc1Nzg3NTUz
-NTs6PT08PkBCPkFAQDs2OTpAPz9AQkpPSEI+Rj8+QD8+P0A+PT8/Ozo9QEE+Ojk5
-OTg1NzY0NDQ0NTY1Nzo3MzM0MzM1NjQ2Nzo3ODk9Pzk1NTQ3Oj5HVGFpamVhW1JG
-QEBCQ0RGR0ZJSUlMTExTVFRWV1pcXF9eYGJlaGZnZmdqaWVjZGdjYmZpaWtubW9t
-aWprbm9ycHF1c3N1eHh4eXl4enx+fnx8enZ4eXd1dHFua2lqX1tTT0k9Ojo8QENE
-R0ZISEhKR0pPT0xOUFBTUFVXVllWWlhWWFlZWVtcXV5dYWJhX2JmZWVqamhqbG9x
-cHBvbnNwcHJ1d3N2eHl2dnd3cnR3eHh2dHRzd4V2dXd0dHNyc3V3d3d3eHl3eHh1
-dHVyc3BqaWpoZmllZGFjX19fXF1dVkhCTWN0dnVzcnJtZ2RgWlFEQTxAPz48PDk4
-Ojg3OTc7PT05OTk4ODU4OTk5ODpEVWhuaE9BRlxvalVEQEpaVUc/REhLSEhEPTg5
-PT03MzU5Ojk3Nzg6PT49Pj47Oz5LV1tSRkFGUltkamRdY2+BhXpsW1pnb21bTVNp
-cGJcb3dzeHFsa21uc3t6dmpOQkA+PDw/Qz1BQkA9QUA/Qj4+PTw9QUI/Pjw+Ojs9
-Pz07Oz1CRURDOjs7OTs+QDs6Pzk7OzlAQT88OTo8Pzw/QD08Pj1CQT08PT89QT88
-Ozw/Q0FDQjw9Pj48Pz1BQEE+QkZDQUA8PjxAPTtDQT9BQDxAQjs9QUJBRURAPUFC
-QEJBQkJAQUVEQkBCQkJCQEVDRkRBPkBCRkRBP0FARkdBPT9BP0NDSkpOhbK1xM/a
-3+Ll5+np6upBPkBCREZFQkE/Pj4+Pz08PUQ9OTs9PUE9PDw+PEA6Ozw+PT49Pz88
-PT07PDw9Ozg3Oj1AOzY2NTU4NzU2Njc4PTs6ODc4ODU0NjM1MzM0NTg5OjQ0NTc0
-MzQ1Njc4Njc4NDY3OTQ1NjM1NzQ1MjUzMTExMjUzMzQ1NTo4NTM0NDU0NDQwMjc2
-NDM2NTY1NzQ1Nzk4MjM0MzEzNDMyMjEwMjI1NDEzMzIyNjM3Nzo4MTM2NDY0NDU3
-Njc1NDg1NTU2NTc0Nzk1MzIyMzQ3NDQ2NDYxMjEzMjM0MTQ2NDA0MzMzMzU2NDM0
-MjQzMzYzNDM4NTg0NzU0Njc0Mzg0Njk1NTU2NTc2NDQzNT8zMzUyNDM0NDQ0NjY0
-NDU0NDM0NDUzNDU0NTU1NDQxNDM1NTo2MzQ0MzMyNTIxMjE0MTIyNDY0MzI2NTU1
-MjAyNDAyMzIyMzIzNTU2NDUxNDIxNDU0MzQ1Nzc3Nj41MjAyNDIwNDQ0NDQ5Nzcz
-NTQ1MzYyNTUyNDU5NjU0NjY3NzY3MzQyMjQ0Njc1NDMxNTY6NjM1MjM0NDU0ODk6
-NzI3NzM0MzM2NTQyNTYzNzUzNzQ1NDU1NTU1NjQ0NTU0NDM0NDQ1NTQ2NjAyNDM0
-NTY1MzQ3NzUzNTY2NTU0MDQ4NjY1MjEyMjc0NTc4NzY1Njo5ODk5Ojg2NTk5Ojk6
-PD1AQD07Pjk6ODU4Njg3NTU1NTY4NzY1Nzo/PT49Pj08Ojo5Ozg0NjlBRUxfbGxj
-U0dEQkFBQD4/PTs6OTc6ODc5Ojo5ODo5NzY1MzMzNTczNjU1MjQ0NjQ3NDM0NDM2
-ODc6PD87Ojk3Njc3O0FLWWBkY11ZT0dAQUBCQ0FCRUlJR0pNTE5RU1pXV1hbWlxd
-XWFjZWNjYmVjYWNkYmRjZ2lpamtsbG1sbWxtbG5wcHFyc3V1dnZ4eHV3fH5+fHl5
-ent6eHZ1cm9uamNdWFdQSEE5Njg6QkZEQkNGS0pISU1NUExOUFFSU1RWWFdVVlNT
-VVlXV1xcW15fX19gY2JmZmVkZWdra3Bvb3BxbnBxcHN2c3R1d3dzdHR0dHZ5eXNv
-cHJwb3B0dXN2dHZycXNzdXR0d3Z2dHVzcnFtbGpoaWtpZ2ZmZGJiYF9eXVlTSUJL
-ZHR3c3F0dW9oZWFaUkNAQkBAPjs7PkA9QDs9Pjk4OTo4ODY1NzU2NzQzN0NUYmJa
-TkVNYG1qVkVGUV9XQjpDVVxTRTw8Pz08OzczNzk3NTc6OTY5Ozo8QEZCP0ROVU9G
-QkJKXXF4dmxiaXd8dmJQTFhqcmxaWWt7dWpwfXV0cGlnbHF4entyZ1BDQkE8OT08
-Pjs7QD09PkA7O0E8PD89PTs7Pj9APjs7PD09PT1DREBAQD09Ojo9PTw7PTs/Qz5A
-Pz0/PTw7PD07PTs/Q0A/Pzs8Pj9APDs6PDs6Q0NAPzw7PD87PDxAQz9ARUNHQD8/
-QUJAQUA+Ojo8PUA/Qj5AQDxAQkNBQkNCRURFRUdFRkdEPj9DREdBQ0VEQjw+QkJF
-Q0RCQkFBQEBAPkFEREZJRUtxrbPG0drf4+Tn6evq6kJDQUA+QEVBPj0+Oj06Ojw8
-Qz5COjY3PUA8PERBOz04ODk7Oj08PT0+Ozk7PEA7Ozs8Ojo6ODg4Njc4Ozs6PDk2
-ODs8NjUxMzEyMTM0NDY3NzU2ODUyNDQ3OTc1NjY1MzQ0MzQ2NjY1NDU1ODQzMjMw
-MzMyMzQ1MzI1NzQzMjEzNjQwNTM1NTQ2OTQ1MTEzNDc1MTE0MzMyMTEwMjczMjQ0
-MzQzMy8wMzMyMzU3NzI1NjU1NDQzMzg2NTQ1NTY2NjU3NTIzNDMyMTMyNDMyNTUz
-MjQzNTM0NTUzMjU0NDQ2NzQ0MDA2MjI0NDIxMzIzNjg5NzQ2MTA0MjU3NDk1NjY2
-NjM1Mzc8NTMyPTUwMTMyMDQ1NTUyMTQxMjU1NDY1NDQ1NzYzNTUyMjIxMjI1MzM1
-MTIyMzU1MTAyMjQ0NTQyMzUzNDM1NTc2MzUyMzU3NjQzMzE1NTk1NDQzMjIyNzU0
-MzU2Nzc2NDUyMTEyNDUyMjIyNjY4ODk1MzU3Nzc0NTc2Ojc1NTI0NjY0NTQ1NDU2
-NDI1NjMxMzIzNTc3MjEyMzM0MjUyOTc1NTUzNTU0NjY3Njg2MjU3NTQ1NDU3NzMx
-NDU0NDM1NDQ1NDU0MzIzMzY6MzU0NTM0Njo4NjU1MzY2NTc1MzEzNTU2NTMyMzY6
-Nzc4Nzg1NTY1NTk3Nzg2NTc3Nzc6Ozs6PkBAQD89Ozg5Ozg4Nzg0Njk1NzU2Nzg2
-OTc7PT08PDs4Ojw4NzY3OD1DUm+DioFlSUA9PDw8Ozs4Ojs6Ojg5PDk6Ojc1NzU0
-NTc4NjY4NjY2NzU2NDU2MjY0NTYyMjM1NTk2OTg7Ojk4NzU2O0ZPWF9dWVFJQj0/
-Pj8/QkRFRkdJSkxNTVBRUVNUV1dYXFtaXVtcY2JgYmBeZWRlZGVnaGtrZmdmZ2lq
-aGpqa29wbW1vcnR1d3VzeHh5eXd7e3x5eXd5dHFzbm1oY11YU05FQDo3OTw/QURF
-RERHSUdGSUpNTExRUVJUVlVVV1RUUlNUV1hXWVZZW15dX11gY2JkaGplZmNlaG5v
-cXNycnFxc3V3dXNzdHFzc3V0cnFwb3Bzc3FydHJycHV1dnd4eHZyc3NzdnRxbm5u
-bm1paGdrbWhmZWRjYmFgXl5fXFVIQkhhcnNzcnJycGplYlpPRkZHSklEQj9CQUQ9
-Ojo7OTg4ODk5OTo6Ojk0Njk6PEhPTE5SVV1iY1xPS01WXlNGPURWYVVCPT5HRz43
-ODg4OTo6OTg6PTo1Njk/QkNAQUhJSklGRkxhdHp8cmRcXGpvZ1pQUGBze3NpbXyA
-dG95dGxlZGlweHh0bmZjT0Q/Pz49Ozw7PDw9PDo/Ozs+ODo8Pj46PDw8Pj06OTw+
-QDo9PT09PDw7PTxBPj5EQz4/Q0Q/QEM+Pz0/PT89Oz87PT0+QD85OD08Pjw+Pjs5
-PUVBPj9DQDo7PDo8PEA/QkJEQEE+PTw+PkVBP0A8PD09QUE8PUFAQ0FAQz9BQURB
-QkNFRUJCQkNEQUNDQT5AQ0JAQkJFQ0JCQkJAQ0A+QkRDRUVESExKTGmassbR2+Di
-5ufp6errRkJBRDs+P0BBQz88P0A9QUNAPDs6PDs7Pj03Oz09Oz88PTo8PDs7OT06
-OTk6Ojk+PDU1Njk5ODs6Njk7ODg5NzY2ODc1NDY1MDM0LzQ1ODg5OzQ1NjY2ODk1
-Nzg0NDQ0MzU2Mzg5PTkyMTc0NDQ2NjQyMDAyNjM0MzI0MjQ1MjEyMDAzNTczOTU2
-NTIxNDIyMTMyMzQzMzM0NDMxMS8zMzQzNjU0MzQ0MjQ1NTY1NTQ3NDc4Nzg0NTU0
-NTU0MzU2NjUzODk1MjM0NjQ0NDU0NDIzMzQ6MjQ0NDQzMTQzMzUzNDMyMDIxMjM2
-MzMyMzM1NDcyMjEzNTM0NjYyNDM0NTQ1MzU0MzY0MzQyMi8vMTY3NTU1NjQzMjEy
-NDU0OTY2My8uMDE1NTI0MTU1NTU0Njc3NDMzNzY1NzU0NTI2MDIzMjIxMzM1NDY4
-NjIzNjc4NTQ1MzM1NTMxMTQ0MzQ0NTU1NDQ6MzMyMTQzMjM1NzU0NTMyMzM2Nzc5
-ODg0NDM1NDQ1MzM0MzU1NDc0NTM1MTQ3MzIzMzM0NjY2Njc1NjUzNjA3ODU2Njg1
-NDY8NjU3NjQ1NjI0NDQ3ODQ1NTU1NDY1NTQzNDU1NDU0MjY2MjU0NTY4NDQ1NTcz
-MzY1NjM4NTc4NTU0NjU1NjQ2NTc2Ozs5Ojg4Njo6OTg5Nzc4ODU4NzQ3Nzs+Ozk9
-Pz4/PTs7ODc3Nzc5NjI0Njo2NTY0NTY3ODo+PTs7PTo5Nzk3Nzc2OD1JaYKIfWBG
-PDs9OTs5ODo9Ojk2NTg3ODc1MzM4OTg0MzU1Nzk1NDM0NjQ1MjM4MzIyNDg3MjM1
-NDc1Ozw8Pzg2Njg8RE5TVFJPRUE8Ozs7PT9BQkBCRklJSkpNT1BRUlNVVFVXWFpY
-WVxeXl1fYV9kaGZmY2VkaGtnZGRnaGloZ2tqa2xubm5ycXJ2dnR2c3N0dnV4eXl4
-dnR0dHJtbWVgXlpSSUA6Nzk4PUFDQ0NER0ZHR0pISklKUE5QUU5SUVJVU1RUVVdX
-WVlaWVtZXF9gXl1fX2NjZGNmZmZmbm9xam5tbW5wcnRzdHFycHFxcHFvbWxsbnN1
-c3Nyc3N0c3N0dXR0dnd2dHN2d3JvcG5vbG5ua2pmaGllYWJhYWBdW1taU0dDR11t
-c29vdHNsaWVcW1FERUxST0dBQEFDQTw8Ojo4NDg4OTo6Ojc0Njc5NzU4OkBDS19q
-Zl1TTEtRWFJPTE1OTVRYTD4+Q0pDOzk6Ojk1Njg4ODs7PTs7P0FGRT47PEFHTE9P
-U11qd3pyYFFMV2lxZ1tZYneEgXJrd39zZGtrYmlrb3p7d29oZGJPQz0+PDw9PT8/
-Pjw7PDw8Ojw8PUFBPTs+QDo7PTs4Oj89Pjw9Ozw/PD4/P0BCQj5BQENCPj47PEA6
-Nz1CQUA7OT89QkE+QkNBQDs8QT5BOzxDQUM9QD1AP0A/Qj49PkE/PTs+PkJEQ0E9
-Pz9APURBQj89PT8/QD48PD5DQUA+QUNAQ0REQ0I+P0RCQ0BBREBEQEVDQ0dGQUBA
-QEFBQUVBRURHRkZAQkFMTGSnxtHZ3+Ll5+jo6+tDP0FCREJBREFCQUA9PDtBPT45
-O0RCRTs8PTw3ODk9PkA+PDs5Oj04OTk7ODk4PDs9PTo1Nzk3Ojg8PT06Ojw3OD82
-NTM0MTA1NDI3ODw3NDY1NzQ1OTY4ODc5NTc0MzU0MTIzMzQyMzU1MzI0MTQzMjMy
-NjM2MzI4Ozw2NTY0NTU0ODMzNzc0MzUyNzU0MzIyMTUxMzQyNzY1NTU0MzY1ODs5
-ODY3MjY3NjQzNjMxMjUzNDM0MzEyNjc1MjM1NjUyMjY1MS8xMzU1NDI2NjQ1NTMx
-MjQ1NDM0MzQ1NDQ1NDY0MTExMTQ1NTQ2NDQyMzc2MzgzNjM1NTQ3NDg1NDc3MzIz
-NDU2MjUzMTUxNTMyNDU2NTY0MzQ1MzU1MzMyMzU3NjQxMzMzMzQ0NDQzMjQ0NDI0
-NzY7NTc3NDEyMjI1MzEwMzY2NDQzNDQyNDM2NDI0NjM0MzQzMzQyMjYzNDIyMzQz
-MjM0NjQzMTQ0Mjc3NjU1NTU0Nzc0NDc1NDQzNDQ0NTQyMTQzNDU2MzU0NTY2MzU4
-NTU2NzU2NDM1NTY3MjI0Nzc4Nzg4NjY1NDlBNTU0NzIzMjU1NjQ1MDAyNTM3NTM1
-NTQ0NDY1NDQ0Mjc3NzY1NTc4NjM1NTY0MjQ1NzY2NTc2NTU1NjU3NjY8OTk2Ojo6
-OTk4NDU5OTY3NjY1NDU0Ojw4ODo8PT1CPEBBPTk5ODY4ODc1NjY1NTU2NDY4OTk5
-PDs8PT05OTo4NTg1NTY2OUBOYGdeTD46OTk3ODo6NzU2ODs6NjY4Nzc2NTY1ODUz
-MzM0NTc2Njk4NzUyNDY1NDEyMzUzMjU0MTU5PD5APTo5NjpAQ0dHREA7Ojw8Pj88
-Oj0/QUJEREdISElMTVBSVFVUVVRXWVxeXl5hX15kY2RkZmRnY2JkZGdlZGdoZWZn
-aWprbGxtbXBxcnV2dHd0cnFydXl3endycHZ0cG5pZWFbU01HQTs3OD09QUJAREZC
-QkVGR0lJTU1NTUxNTE5OUVRVVVlXVlVYV1RXWlxdX2JdXlxeXl5fX2VpaGZma2ts
-amxvb3F0d3JzdnJ1c3NxcnNub2xsbHFwb25vcXFxcnR2dXV0c3NxcHBwcnFvbm1u
-cW5paGxnZ2ZhYmFgXVlZW1lPQ0RKXGxub3RwbGtnY2BYTEA/RkpIQDw8QD08Ojg5
-Ojg3NDY6OTg3NzQ3OTY0NzY5OkFUZ25kUURCT1xgT0RHUFpWTktGP0FFRT03ODo3
-Nzg5NjU2ODs2PEFCREdJRT49PUJMVVhWWl5ocW1jUklQYHJ3dmlncoGFe2VibWpb
-XWlhZW1zdXVuaGdmY1NCPD9DQT87QDs8Ojw7Ojs8PT4/PTs6RD0+PT49QkE/Ojo8
-Ojo8Qj08Pj89PTw9OzxBQkFAPz8/PTw8OkA/PDw5Oz88PEA9Pj09PkJBP0A8PkND
-QDxBPz5AQEJDQz88PDs8QTo/QEVFQEBAQUNBPT09OkBBQ0I/QkRFQkJAQj9BQ0E/
-QUBCPkFERURAQUFERkVESUVEQUJDQUE+QEJGQ0hHRkdIT0xFRkhLaqjC0dne4eTl
-6Onq60ZCREE/Qzw9PT5DQkE4OT09PTw+QkFBPTs9PT06ODk6Ozo9PD07OTg6OTY5
-PEA5ODw/Pzk4ODk6PDw7PDs6Nzc3OTk2NjM1NjY1NjM1Njc3NDg5Njg2NTU0NTY1
-NjQ0NzMyMzEzMzM0NTc1NDQ0Nzc1NDg3Njg1NjUzNTk4MzYzMzc4Njc0NDk3Mjg2
-Njg1MzEzNDQ0MjQ2NDI0NDc1MzY0Nzc3Njg4NzcyMjE3NjIzNDU1MzU0MDIyMTE0
-NDQzMzYyMzQxNDM3MjEyNDIyMjY2NjUyMjMxNDU3ODc1MzQ2NDY2NDIwMjQ2NTY4
-NjYyMzU5OzQ2MzI0NDA1NTY2NzQzNTMzMjIyMTQzNDQzNTQzNTE0MTMyNDU0NDUy
-MzI3NzQ2NTQyLzU1NjMzNDIyNDY1NTQ1NzM2NTU1MDY2MzM2NjIyMzIxMzMxNDM0
-MDIyMzMyNDc4NTMzNDExMTEyNDM0MjQ1MjQ3NDMyNTc3NDc1NjM2Mzc5Ojc0NDI0
-NTMzMzE2OTQ2MjMzMjUzNDIzNzk3NTU1MzQ1NTQ0NDI2NzU1NDQ2OTg5NTg2NTQ0
-NDM1NDM2NDIzNDQzMjM1NTQ0MzU5NjUzNDc3NTU2NDY0NjU1NTQzNTU2ODMzMjEz
-MzU4NTMzNDY3NzY2NzU1NDg4Ozg3Ozg2NjY2NjQ0NjQ1NTY1ODo4Ozw6OTs+Ozo8
-PTw9Pz49OTg1NzU3NTQ0NTU1NjU4PkI9QUE8OTg5Ozc1NjU1ODs9PEBDSUpEQDw4
-Nzc4ODk2NDQ3NTU0OTc2NTY2NTY3NTY0ODQ2NTU0NDM2NDYzNTY3OTMxMjU0Mzc2
-NzY6Pzw8OTo8Ojk7Ozs3NjY7ODs8PT09PkJBREFBQ0dLSkpMS0xQVFRXV1VYWlxe
-XV5fW19gYmNhYWViZWVmZmRmZ2hkZmdmZ2hpa2psbHFzcHV2eXNxdnV2cnJycXBv
-cnJtaGdkXllVS0U+Ojw6OT4+PkBBQkRERERJR0dHS0lJUE1QTU1QUVFSVFVWVVVW
-WVhXWVhcX15eYF1dXlxkZ2ZmaGdoampsbG9vb25xcHNvbnFvb29qbm9wb29tbGxt
-cXBycXB0eHZzc3Nzbm9wcHBycnRua21ubWtnZ2hkYV9gX15ZVVVYVk5DPktea29u
-b29tbGdkYlZHOzw8QD4+Ozo8PTs5Nzs5NjU1Njc1NTg3NjQzNjkyNzY4QlNmZ1xH
-QUlaY1xKQkhbY1pKQ0FCQ0BBPDs5PDg2Nzk2NjU3Oj0/Qzk+RkZDPTs8QU1YW1ZQ
-Tk9ZZGdhVVJgeoeFdWZicn95Y1ZhaVpWZWRibHV0b2hnaG1rXkY+QT8+PUA7Oj5D
-PTw7OTk7QD86OT1APkBAPz4+Q0A8PEBCPT49PEE/Pj48Ojo9QD08PEBBQT8+PkJA
-Pjo5Pz0/QD47PT5AQD9AOz09PUBAPkE8Pj9APUA/P0A+Q0E8QD9BQj4+P0FBQD0/
-QkRDQzxBQUNHQkBCSEVFQURDQUBAQkE/QENFQkJDQkA/RENDRURGRUZAPkNCPz1A
-P0RGQ0ZKR0dMTEVDQT1wq8HO193f4+bn6enqRURERkNAOjo9QEJCQz46Ojw4OD9B
-PTw8PT1APTs6OD1AOzs9Ozs4Njs7Ozw6Ozo3Ozo5Ojc2OTk4OkA2NTU6ODk3NzU1
-NzU1Nzc2NTM0NTQ2NjY0NjY2NDYzMzQ1MzU2NTMyMzM0MzQ2NDQyMjM0NDIyNTc1
-NzMyNTk0MzQ0OTg0NDM0OTY2NTU1NDU1NTc0NDY0NDI0MjIzMzU1NjU0MjU0MzU1
-NTc6NTMzNDg8NTU4NTU0MzM1NTIzNTI2MzMxMTAyNDIxNTY3NTU1MjM1MjI0NTMy
-MzM1NDU3MjEzMzQxMTM3NjU3MzMyMTE1NDU0NDg3NjQyMTE1MDAyMjIyMjMzNDQy
-MDAwLjA0NjEyODAyNTQ0NjQ1Njc2NjQ3MTQ2Nzc1NDQzMjQ1NTQyMzQ0MzIyNTI0
-NTIxMDA1NjM5MzA0NTUzNDIzMzMxMTQ0My8wMzQxMzM0MzI0MzExMTQ1NTQ1MzU1
-MzQzNDY2NTQ4Njg3NTY2MzQyMzI1NDY1NzU2NTM1MjMzNTM0NDUyMDE0MzQ0NjY1
-NTY2NjYwMTEzNTY1NTE1NTY5OTQ0Njc2NDM1NjQzMTExMzc4NDc3OTUyNDQ2MzM1
-NzY0MzMzNDUxNDIxMjY1MzU3OTc1NDAzMzYyMzY5NDQ0NTMyMzQ1Nzg5PDc3OTo4
-NDQ5OTU0NTVAODg4OzU5OTc5Ozk7OTs+QEBBPTs8Qjo3NjYzNDQyNTM0Njk7PT0+
-Pz05Ojc3Nzc1NjQzODg4Oj1CQEM+OTg2NTk4NzU2OTU2NTU3NTc0NDY1Nzg0MzU1
-NjU1MzU0NDUyNDQ2Njc2MzQ0NTQ1NTY0ODY1NzY5OT48Nzc2Njk4Njg3PDo8PDw8
-PkFBQkJDREhHR0xPS0xQVFRWVVVYVlhUW1xkXFxfYWBgX2FkY2NiY2RmaGdoZ2Zn
-ZWhqb21sbm9vcXJzdHJzcXFvcXFwb3BvbW1oYl9bW1NLRT06ODo9QEE+QEJBQ0JF
-RkVERkRHSkxLS0tMT09QUFBQUVNTUlRVVlRYWFxeYl5cXl5eXV5iYmRjZ2toaWpt
-bGxscXNtbnBxb29sbG1vbG5xcHBtbWxucXJxc3R1dHN0dHFubG5tbnFvbm5ua2pt
-a2dmZWNgXVxcXFhWVlVTTEVDUGRtcGxrbm1pZWJbUUM9Q0VFQDs5PD07NTU6ODg4
-Ozk5OTo6NzY3NDIzNzg4Njo7RVNTS0VETlhcVEM/TFthVEQ8QkhDOztCPz08Ojo2
-Nzc1Ojo7QUVHPz4+QENAOzo/Rk9RTkdERUtdbnRtYmRtf4mBZllcaHBkUVdmYVVm
-bWdwdXNpaGhwdnZnTkQ/PT89Pj0/Oj5DPUBCQDs9PD9APT88PT89Oj9BPTs6PT08
-PDs6Pz07Ozw8Ojw9PDw8PT1AQT09QD07Ojs7PD4/QkNGQT5AQkE/QEJCQkVDQT9C
-RURBPT05PD4/Pzw9Pz9AQEFCPjw8PkE/QEBBQT1APj49Pj88P0NDQUNAPj1BPTo9
-PkFBQkNCRENBQkNEQENCQ0RCQUA/Oz9BQURGSUVHRUdMSUdERVenw87W29/j5ebo
-6epBRUZIQ0I8QEE/PkFBPkE/RTk6Ojk5Pz8/OT08Pj47Oz07Ojo4ODY3Nzw7ODk3
-Njg4OTo2NjY5OTo6OTQ3PDc5NjY3Njc7ODgzNDg0NTQ1NzU0NTUzNjc3NjMyMjU1
-NDU1NDQzMzAuMDMwNDU1Nzg1MjQzNjU4NTU2NzUzMjUxNDUyMTI2NDQ4NzUzMTEx
-OTc0NTIyNTIxMzIzNDM1MzU3OjgzMzg8Nzc1MzM0NDY2NTczMjs0NzMzMjMzNDMz
-NDQ4NTU0NjUyNDIzMi8sMTIxNDU0NTQyNTAyNTIzMzEwMjM2NTc1NDEyNjY0NzQ4
-NTg3NTgzNDUyMjIwLjExMjQzMzMyMjAyMjIxMjU2MzI0MS80NDMzNTc2MzQyNDI0
-MTI1NTU1NTM1NDY1NTY0MzQyMzg2NTM1NTc2MjAyMzU3NDU1MzMzNDIzNTM1NDM1
-MjMxNTc0MzI0NDIzMzI0MzEwMjU3NzcxMjY2NzMyNzI1MzU4Njc3NTU0NDY2ODo6
-NzQzMTU3OTMzNDAyNDQzMzQ1Njc1NTU1NTQ0MzM0NTQ1NTEyNDU1NDMzNTQyNjs3
-NTYxMC8sMDAzMzMzNDQ1Njc3NDM2NDQxNDMwMDQ0MjQ5NDU4NjUzNDc3NTczMzIz
-MjI1NTUzNTEyMS0wMzYzNTc4OTg3NTk5NjY4ODY2NzY4ODc6OTk5Ojw8PDxAPTs8
-PTo6Pjo7Nzg1MjI0NjU0MzMyNzo7PDs9PDs4OTg2NjQzNDY2ODo5Ozs8PDs2NTU1
-NjY4Ozc2Njk2NTU2NDU2MzI2NjQ2NDQ0NzY1MjY5NjM1ODI1NjQ0NjQzMzg1MzQ2
-NTY2Nzc5P0JANzc2NDM3Nzo5ODo7OkE9QUJAQ0REREdJS01RUE5QU1FQUVRTV1pZ
-Wl1dW1pdYGFgYWNhYWNiYmNlaGlpaGZqaGltbWxtcG5vcnBvcHFxcG9wcXFwbG5t
-aWRjX1lTUUxDPDw8Oz5AQUBDP0A+P0JGSURCRkVGR0pKTE9OTUxMUE5QUE9QUFFR
-VFZYWFdYW15cWl9jXmBjYWFkaGdnZWdoaWhpa2lscG1ta2xrampubW1wbXJtbm5t
-cHV1cnBxcnNyc3NxcW9xcnBvbWxqbGlnZGZkYl9cW1pZVFRRU1FMRkdSZG9ubWtq
-aGVkYFhMQUNCS01HQT1ARD03Nzc5ODo4Ojk7OTc5Ojc2NTg3NTc4Nzg5P0BDSE9U
-VVFNSk5WV1hJPEBGRj86Q0hGPDo4NzU3OTQ0Njk6QUZFOzk6PERHPjo+REdHR0JD
-R1pzgIB1ZWNxfXtrVk5XaWtbUmJoW2d4bnF0cmpncHh8e29SQz49Pz49Oj09PT87
-PD5BPzw9Pjs/PT09Pjo5ODk6Ozs8PTs4Oj48Ozo9Pj08Oj47PTo6ODk8Pz89Pz06
-PDo8PUBCQ0FBQD9DP0E+Oz8/PT47QUFBQEA8OTg9PD87QEBAQUA/Pj1AQD0+QEJD
-Qj48Pj8+QD9DQUJCPT89QkU+QkFCQDo/P0ZEQ0M/PkBBQkRCREJCQ0NBQENBPj8/
-QkNFRkZEQ0dJTElPYqjDztbb3+Lk5+fp6kBCQ0hAPDw8Pz09Pz8+Ojo6PT1DQkFE
-PTw+PEA8Pj47OTs8OTs5PDk4OTg4ODs7Ojk4ODs5ODg5Ojs8Pzo4OjY6OjU5PDo2
-NDQzMjU1ODY3NDc2MzM0NjU0NTQ3NTQ1MzQ1MTA0MTU0NDMwMzgzNDM0NjUzNjY1
-ODc3NzI0MzExNDMzNzU0Ojg1Mjc0MjIzMjQ1NTQzMjI0MjE1ODc1NjM0NDQ2NDg1
-NTY2NjUzMzQ1NDQ1NDUyMjAyOjYxMTIzNjU1NDIzMzMzMzQ0NDUxMTIzMjQyMzU0
-MzIzMzM0NDIzNTI0Nzg3NzI1NDY3ODQ0NzQ1NTMxMjI0MzEzLzMyNjIwMzM0NTEz
-NDY2MzU0MzIzNDI1MzU0MzMzMzEwMzQyMzY2Nzc0NjQ1NjY1NDQzNDQ4NjU2NjU5
-NTQ1NTM0NTYzMzQyNTQyMzIzMzMzNzMyMTIyNDU2ODQzMzQ0NDM0LzEyMzIwMzE1
-MzMyNDY1NDY1NDY1NjMyNDQzNjY0NTM3NDU4Nzg3NTQ1Njc0Njc2Njg2NDQ2NDM1
-NDQ2NzQ2NDU1NjY0NDQzMTEyNTYzMzg5MzUyMjIyNTM0MjQ3NzY0MjIyMjM2NTU3
-NTIxMjMyNTc1NDE1NzI0NTU1NzQ2NTIzMzE0NDgzNjQzODg1NDQ2ODg3Oz05OTs5
-NzQ3PDc5OTg4ODk7Ozo5Ojw8PD04Ojg6Ozc5QTk6Nzg1NDM0NjQ2NDQ3Nzc6Ojs9
-PDs8ODg4NTY0Nzk6Ozk5OTo7PDo5NjUyNjg5ODQzNzc3NTU3NjY1NDg2MzMzMzIz
-ODo4NjMzMjQ2NTMzNDc0MjAxNDY5Nzc2OTU2NTY5Pz87NTU0ODg5Ojs6Ozs8PDs+
-P0BCQEVHR0pKSExMT1FQTk9QUFVVV1haWVtZWFhZXF5eYmFgYmNmZWRnaGdmZ2Zm
-aGdqbWtsbWttbm1wcnBxb29ucG9sbGtnaGReVlNPSEM9PDk7PD4+PTxAQD4/RkVE
-Q0VGSEdJTEtPTU5LS0tMSktOUE9SUlBPU1ZVWFhYWl5dXWJgX2BjZGZnZmdjZGdm
-Z2hqa2Zpa2trbGpoaGxubm1tbWxsa2xvcm9vb3FzcXBwcnJub3Jvb29tamlrZ2Zj
-YWNhXFtcV1lUUVFOTkhDRlZocG9uaWZiYF1cVkpDPklNSUE8PkE+Ojk6Nzc3ODo2
-PTw7Njc1Nzk4NzY4ODY4Nzo8QFJdWE5FRU5YW1dRSUI/PkE9ODxFSUA4NDY6Njc3
-NzU1ODtARkU9ODU6QUNEOzo7PkVHR0pQXGl6hIJwX1lkbm5fUk1fbWdebXlwa3t4
-b3JuZ2ZyfH16b1JAPDs8PjxAPz08PUM+Pj5BPTw6QD09Ozw9PDs9OTs5Ojs9PTo8
-PUBAP0NDPj49Ozo6PDs8OTw5Oz07PDw+PTs8P0FDQUFCQUFFPTw9PTo6PEI/Pz09
-PUE/QT08PDw+Pjw7PUBAPD5BQUNBQT8/Q0ZBPT1CQUJCPz9AQUE9Qj4/QUFCQkNE
-Qj4+P0NHREFAQ0NFRUFCQkJEQUBBSEVAQz8+QEVGRUZSUVBmqcPN1dvf4+Pl6Onp
-REVDRD9AQkJAP0E8QUNCOz89PDs9PEFCPD87Pjo8PEE8PTs8Ojw5ODo3ODk6Ojw6
-Ozg5Ozk8PDc6Ozs8ODg5OTc2OTs8Nzg2MzI0NDQ1Ojg6NTQ2NjMyNTc1ODc4NjY0
-NDQzMzc0NDQ2NDQ2NjU1OTY7NDM2Ojk4ODIzMzI2MjMxNDMyNTY2MjMzMzAwMzU1
-MjY1NDIzMzUzNTQzNjg1NTU0NDEyMzg3NjY2ODQ3NDQxMjEvMDMxMjIxNjMzNjQ2
-NDIzNDQxMjM0QjIzMDQwLjAwLi8xMTM0NDQ0MjMyMjIyMjQ1Nzg3NDA1NTQ2NzQ2
-Nzc1MTIyMTIzNTQ0NTM0NjUyMzExNDQzNTM1NTQ0NDE1MzQzMjU0MjY1Mi8xMTQ2
-Nzg2NTU1NjQ1NTI1MzIvNjQ0NjY3NjY1NjQ1MzM0MzEvMTI1NTQzMzIzMjQyMjU1
-MjMyMjM4NTg0NTUzNDI3NTM1NTQyMzIzNjExNDQ2NDQ2MjU2NjU3MzMxNTQxMjY3
-MjY3NTg3NjU1Njk3ODc1ODQyMTQ0NTQ4ODY2NTU2Nzk6NjQwMjUxMjQ0NjU1MzU4
-NjY1NzQ1NDE4NTU0Njo2NDQ1NDQ4OjY1NTYzMzY9NjY4ODc3OzU3ODk3MzQzMzU0
-NDUzMjY0NDY1MzI3MjM2Nzg9Nzc3NzY5ODU4Njw7ODo4Nzk8ODg3OTs7Ozg4Ojc2
-Nzc6ODc1NzU0NjY2MjI0NDY0NTQ3OTo5ODg1NjY2NTg3OTs6OTc4Ozw7OTs5Nzc3
-NTQ3Njc2NDQ0MjQ1MzVCSjc6NjQzMTU5NTU1MzM1NjQ1NjUyNDAxMDI0NTc2NTg1
-ODQ2Njk6PDo6Ozo4ODw4Nzs7Ozw+PT07PEBAQkZKSklLS0tOT09RVFFSVFVXWldX
-WVhVWVtaXF5dXV9gYWJhZGNnZmdnampoZ2hra2xsam1tbm5vbHBvb3Fubm1qaWVi
-YF1aU0xCPDs4NjY3Oj49QEJBRkVFQ0NCQ0VERUlHSk5NSUhIR0ZMS0tLTU5RUE9Q
-U1RVWFlbWVxcXl9dYWJkYmVnZmZmaWhramlqaGlqbWlqamppam1tbm5qa3FsbXBv
-b2xvbm5ycnJycG1vb29vbGtqaGloZmZlY19eXFpXVlZTUE9PRj5FVWZraWdmZGFi
-W1VORD49QkRCQDo6PTo3ODY4Njc4ODY1NjczNDY9PDo5PjgzNTU5OTg/UlxTRD5C
-WGhjUERARklCQ0A+P0NBOjk3ODk2Njg3Ojc3Njk9QkM/OzxAQ0Q/OTw7PURLT1NU
-VmBzenJdTVFhbmtcUFtzem1tfHxpbXRmam1sa3d5dm9rV0Q9PDs7Oz08Oz07PDs8
-PDs5Oz07PD48Oj0/RD45PD89PDs7QT06Pj8+QD0/QEE6PDs7Ozs7Ojs8Ozo9PT09
-Qjw+PDw9PDs+QUA+PD07Ojg6O0I/PDo8PkBAPT8/PT1CPz87QD4+PkE9Pz48PTs+
-P0FERERBQkRDQkJFQTs9Q0BERkI+P0FAPD48PkJFRUNDRUFBQkNHQ0I+QEBOTEE9
-PD5AQUNGSElUY4Suws3V2t/i5efo6ulCQ0RBPENEPT88Ojs7Ozs4OTs5Ojo8PT4+
-Ozc2ODc9PTw8PT07OTw6PDs8PkA6Ojk9O0E7Ozc4Ozk6Ozo8OTk6ODc5ODk6OjUy
-NTMxNDQ1NTMzNzU2NDU2NTQ0NzY6NTU3MzM4ODcyMjM0NTQxMzQ3NTI1MjQ2NDY2
-NTc0MjM1NDY1NTQyMjY0NDU2OTczNDQ3MzQwNzQ1MTc1MjY0MzQ1NTc2NTY1NTM1
-NjUzNTQ0NzY0NzY1NjMzNDE3Njc3NjAxNDUzMTMxMjU1MjM0MzIyMzQyMDIzNTUz
-MjEyMzMzMzMyMDM4NDU0MTQxMTQ4Nzk2NDE0MjIyMjMzMzc1NTc2NTY2NTQxNTc0
-Njg3ODM1MzA2NTQ0MjM6NjY2NDQwNDQ1NTI1NjE2NDI0MTAvMTQzNTMzNDY1NjUz
-NTU2NDI1MzMzNDMzNDU2MS8vMTMzMjQzMjQzMjI1NDU1NDUyMzU2NjYwMzQ2NDMz
-NTY1MjU1NDU3NTY3NDQ1NTM0MjU4NjI0Nzk3NTUyNjU0ODc4NjI2NjY3NzU0NTY0
-NDU3NTk2ODU0NTYzNTQ0NTMyODk3NTY3NTU3ODY1NjQ1MzQ1Mjc5NjQ3NjU2OTY1
-NTQ0NDY1Nzc3MjMzNDQ1NDUyMDQ2ODM1MjUzMTU1NjU2Njc4OTc4Nzg4Ozg3NTg4
-Nzc5ODk5ODc6OTg6Njc6Pjw7PDY2NTg7Nzg4NTY1NzczMjMzMzM0ODU0Nzk5Ojk3
-NjU1NTYzNjY3OTk4Nzg5Ozs4PTk3NzY1NDQ0NTQzNjU1NjY2MjU4NjQ0OTQyNTM0
-NjU4ODg4Njc2NDQxMjM3MzM4Njc4NjcxNjY2Nzk3PEA5NTU2NDQ3Ojo7Ojo6Oz07
-PUBFRUVFREhKSUtPU05QUVJUVVdXWFdZW1xeW1paW1tdYWFiZWVpaGZmY2VoaGho
-Z2doaGlqbW1sa25vb21sa2lqaGhnZWBfWlRPSkRAOTg3Nzk/QEFAPz5BQEJDQkNC
-Q0JBRUdIRUZFRUZGR0RKSkxOT05PU1NSVFNWVVhbXF1dXFldY2VjZWVlZ2VmZ2Nn
-ZWRkZmdnaGtqaWprbG5qamtrbW9vb21tbW5wbXJxbW9vbW5xcm1ramhnZmpmZWVi
-YV9cWFdWVVFST0lDPkVSZWpvaWVgX2JcVUdAOTs7Ozg4OTw7Njc3NjY0NjY1NTUz
-NDc6ODg4OTk4Njg2Njg5NzhHTUZAQUpcZl1JQEJPTkI+RkpIQDw6Ojg2MzQ0NTc1
-MjU5ODk9SENCPjs8PTs8Ozo6QktSUk5NUV5scGdZUFtrdG1lZniCe29yd2hiamRk
-bG1wc3JvaWpWRUA7OTg7Ozs7PkA+Pzw+PUM9PTg4OTw9PkI+PD1DPDxAPzxDQj48
-PDxAP0JAQUA8Pj5BQ0M9QERAPj9APkA8PTs8PT9APj06PUBBPj87PDo5Pzs8PDw+
-Pzs+QT9AQD5CPz0/Qj08QT5APT09PDs/QklGQD09Q0M/QkNBQUBAP0BCQkFCQj4/
-QUJDRD49PUJDQUNCREFCQkJAQD1BPz9DQ0JDREZGQkh1pK/CzdTb3+Pk5+jq6kU/
-PkNDPj4+PT5AOzk8Qjs6OztAOTs7PD5BPzo7Ojk7OjtAREBAPT07PD48O0A8Ozw9
-QDo4ODk4OTo3Ozo9Ozs7Ojs5OTg3Ojg1NDY3Nzg4NjY1NjMzMzg5ODY3NDIyMTQ1
-ODc1ODQzNzY0MzU0NDM1MjQ1NjY2NDM3NTQzNjU0NDIyNzk0MjM2NDU1ODQxMzMy
-MTE0NDI0NTMyNTY0NjQ0NDM3NTM1NzY0NzE2NjM1Njg1NzM3Njg0MjI2MTQ1NTU0
-NjQ0MjQyMjA0OjY1NTU0NTI1MzExMzMyMjUyMzMyMDI1MjMzMS8zMTAyNTU3NzU3
-MzE0NDQzMjA0NTM2MzQ0NTU1MzU0NzUyNDU2NDU2NzM0MzU0NTgzNDU6ODUzMjUy
-NTg0NTIzMzE2MjAzMzMzNTAyMjQ1NTUzMDE0NDEzNTY1NDMzNDM1NTMxMDMzNDM0
-NjUyNTU3NTIzNDQ1MzY0NTY1NTc0MTE2NzM0MzM0MjIyNjU1MjQ1NzY3MjU0MzY1
-NDczMjI0MTU1NzUzNTMzNzY1NjU0NTQ0MjE0NTU9NzI1ODY1NTQ0Mzc1MTU3Nzo4
-NjY3Njc2ODUzMzQ1MzY1NTY2NjYzMzM1MzA1Mzc1MzU0NDMyNzMzNjIzNzg3NTM0
-NTc0NTU0Njc1NjQ1Njg5ODk7Ozo8PDk6PDc5PTg2NTg3NzY3Ozk6OTc1ODo5NjY3
-Nzg1NTY1NTIyNjIzNTQ0ODk8Ojo7OTo0MzI1MzQ0NTU3ODk4Njg6Ozc3QDY0NDY1
-NDUzMjQyMzQ2NTU3NDQzNTY2NzYzNjM2ODk3NDM0NTY0NTU1NTY2NjU1NzY1NDQ2
-OTg4Nzg6PUJBOTg5OTg1OTs6Ojk6PT1CQT9CQ0NERklLS0xOTU1RUVBTVFZWV1pd
-XlpaW1lcX15hYWBiY2NkZmdnZWZnYmRoaWpqaWttbW1ubGlra2tqaWhraWhjYFxZ
-U01GPzo8ODo8QD06Pj8/Pz89QEA9QkNEQ0JDQ0JFSUVFRkdJS0pLSkpLTU1PUVNW
-U1FUVlhXVldYXFxbXGBiYWNlYWBjZmZpaGdlZ2hnZ2ZlZ2hnZ2tvamtrbW5vbG1r
-bW5vcW5ubW1ubmxqa2traGlnaGZjY2hkYFlWWVdUUE5PSkE7QFFiaGpqZ2NdXVpO
-RT09PUBBPTo4ODo9OTk5ODY2MzU3NTY5Nzk5NzU9Nzc3OTU3ODUxNDY7P0BIUVpb
-S0JCR01FQEFMUUo7ODs9OTc4NjMvMDQ2NTY6Oj1GTkpCPzg7Pjo5OTpASFNQS0hM
-U2BpbGNUVmx+gnlsc316a19qa11iZmBudXx2cW1ual1HPTw9Ozs7Qzs9QT87Ozs6
-QDw6Ojk8PT5CQEE8PkI+PT09Ozw9PDc4OTxAQ0RAPT0+QD1DQ0M/Pj09Pz4/Pj4+
-Pjs9QDo7O0E5PT08QD8+P0JDQj9APz9ARkREQz0/QEJDPj5APkA+QUJCPkFBQT0/
-REE+PT1BQUBAQUNDPT09PUFAQkVCQD5BREFAQUNBPj5BQ0NCRUJFREJCO0RBQERF
-QkZER0hDQlOascTM1Nvg4uXl5+rqQzw8PTs6Pzw9PT1CQj06PUA6Ozw6OTg9QUE7
-Ojs+Pjs3Njc4PDw9Qjo9Oj5ANzk6Ozk5Ojk7ODk4ODk5OjY4Ojk7Oz07Ojg3ODY9
-ODQxNjU0Mzc3NjY1NDU2NDQwMTQyNTUxMjQ2NzQxNDQ1NDc5NjUyNzYzMzQ1NjIz
-NDY2NzU0MjQ0NDc3MzM2NDU3NTM2NjUyNTQxMTIwMTIzMzIzMzc1NzU1MzQzNjY3
-NTs1MjQ0NjY1NzU5NDU3MjI1NzMyMTExMjI2NTc0NjRANTYzNjYzOTYyNTMwNjMw
-NDMyMTI3PTY1NDUzMjQzNDIyMjQ0MzMwLzAwMjMxMjExMjQ2NTQzNDMzNDIzMjEz
-NDM2NjUyNTIzNjU0MzI1NDU0NDYzMTEyMzMyMzEzNjczMi4zNTQzNDIxMzYzMDE0
-NDM1NTM1MzUzMzEwMTM0NDMyNDQzMzQ1MzUyMzQzMjExMzQ0NTU0Nzc1NDM1NjY1
-NTY1MzAyMjQ2NTU2NjQ1NTY1NTU0NTc0Nzc1MzI3NTU2NjU0My8yNDc6NjYzNDIz
-NDU1NDM2ODc0NTc4MzQ2NDIzMDM1NDQ5ODU1NDU2MjU0NTU1NTQxMzM0NTY3NTY2
-Nzc0NTI1MTIzNTUyNDQ2NDE0NTQ3NTQzMzU2NjU2NzUzNDQ5NzU3ODc3ODc3Oz08
-OTg2NTQ4NTUzNjk3Nzg2Ojc4ODc3Njg2ODY3ODc4Ojc1NjM0MjQ4NTk6PTg3NzQ1
-MzMzMjIzNTc1NTo3Nzo5NzY2NTUyNjU2MzMzMjY2NzQ1MzU1NDczNDM2NTY0NTQ1
-NTY0NTY4NTc3OTc4NjYxMzEzNDQ1OTU3OTY1Njo7Pj45ODs8Ozo5Nzk4Nzk6PDtA
-Q0VBRElMS0pKSUtNTk1OTk5SUlJUVVZaWVpWV1daXF9fXl9gY2NjYWNkZmRpZmVn
-Z2pqaWltbmxpa2dpa2xnZGNjY2RgXFhTSkQ+Ojc5Ozo9QD88Pj9BREA/P0I9Q0BD
-Q0NERENER0dFQ0RHRkhISktLTU1NUFBSVFZVVFRXWVlZWl5bXF1gYmJhZmVlaGRj
-ZWdoZ2JlZWNlZ2ppaGhqamxsa2tpamtsbm5sbG5sbW5oaWtqa2ZpZGlpZWhoY2Jf
-WlhUU09OTEtGQDxBTl5lZWdlX1xYUko8PD1DSUQ/Pzw8OTg6Ojk5NzU5Nzc1MjI4
-NTU3ODw1NTc2NzQzMjQ0MzQ8SU9SUExGQ0RJRT09Q01OQzk5PEE8NjY3ODU1Mzg4
-Nzw8PUZQTEM9PDg6PTw8PDo/SElJR0VHUGJxcmVganyGfmtiam9jWWFpX2JpYnB4
-eHRtZ21sZ09BPkA9O0FDPTtBRTxBQT08PD1APT5APDs+PT88OTo7PDs9Ozw7Pzs6
-Ojs9QDs7P0NBPz8/PT89OkE+QT88QD47Ozs8QEFCP0hDPTw9PT1AQEJCQ0RBPD5D
-QEI9QkE9PkJAQD0/Oz9AQD5CPj9FQUFBQkA+Qzw9QD5ARUREREE+P0NDQ0RBQUNG
-QkJGRkE8PUA+RUdEQEA+PUJCQUI/QkI8QUlGRUhDR2WwwszW29/i5efp6upFRkVB
-QkZBP0FBQUE+Ozk5OT07Ojs7OzlAPj9CPjs+PDk3Njk4PT0/OkA+Ojo9Oz06ODg5
-OTg5OTo8Oj06Nzc3OTw6Ojs3NjQzNDU4OTk0NDQ0NTQ4ODY2NDQ2NTYzNTU0NzYz
-MjU3NzUzNDc1Nzc2NjY0NjQyMjMzMzQ0MzY2Nz00Mjg2NjQzMjI0MzIzNTQ0Njgz
-MTEyMDEvMDA0MzE3MjIyMjI1NTg3ODQ2NjU1OTc0NDY2NzU0NzIxMTEzNDQzNDQ1
-NDQ3NzU0MTMzNDQ0NzIxMzEvMTQzMjQ0NjYwMTI2MzUzNDMyMjQyMzMzNDM1NTMy
-LzAwMjExNDQzMzEzNDIyMzMzNC8uMC4xMzQ4Njc5NjU0Mzc0NTEzNDMzMTEzMjMz
-NjM2NTMyMjA1NjAyNTQzNTc1Mzc0NjQ3NDU2NDEzMDEyMDEzMjQ0MzY1MjMyNTMy
-MjQ1MzM1NjQzNjU1MzM0NTc1NzhCODY1NjM1NDMzMzIyMzc0MjI0MTIzMzY1NjY7
-NzY0MjM0MzQ1NDY0NzQ1NTM0NTg2NDU5NTY0NjU2NTQ2NDIzMzMyNDU0MzQ4NjU1
-Njg4NzQ2Nzo2Nzk5NjgzNDMyMzM0MjU2NTUyNTIxMzI2NjQ0NDc3ODQ1NDM0NDY3
-NTQ1NTU2NjY4NjY5NjY3NzU3Ojc5ODo9Ozk6ODM1ODY4NzM2NjU7Ojk5Ozg7OTk2
-ODY0NTQ0NDY3NTI0NjY3Ojg5Ozg0NDEzNDQyNDU3ODg4ODk5Ozw7NTU1NTU1NDQ2
-NjU0MzM1NDQ3NzU3NDc0NzQ2NjQ3NjY3ODQxNTg0PTg3NTI0MzIyMDE0NjY2MjU1
-NDc4ODg6PT48Ojc3NjU6Ojo6Ozo6PD5AQkJCT0lIR0hHSkxNTEtMTlNRUlZVVVlZ
-WVdWWVdfX15eYWJiY2NjZGNjZGRgYmZoZmhoamtraWlram1qaWpmZGFfXVxdWVBJ
-Qjo7Ojk6ODw+PDw7Ozw7OjtAQT5BQ0RFQ0JBQkVFREdGQ0VERklISkxMTExOTU9T
-VVZVVlRYVVhcXVhaXl9hYmNkYmVlZ2tmZWdkY2NgYGVlZWpwamxqamZsbGlqbGtp
-bG1sbm5sa2xvbWpmaWpqaWhpZmdjYV1aWFJOUE5MS0Y/PUZTYGdlYV9dXVlSRDg1
-ODw8Ojk5Ojg6Ozo3NTM2NTQ0NTE0NDE0MjM1NjY0NTo2ODc3ODg4NzlITEpDQklP
-SkZDQkRHRUZBPUBBQDY2Njk7Ozc4NTY2NztAQ09UREE4Ojo+R0RAPD49PUVIRkdR
-YnmCfG5rb317a1hVZGdcXHBnZ3dycnd2b2pra29xXEZFPjw7Ozw9OjxDQT09QD07
-Ojo9P0A+PDw8Oz1BOjo6Nzg9PD89OzY3Ozs9QDw8PkE/Pj48QEFDP0JAPTs4Ozw8
-PUFAQD4+PUFCPkA/REI8P0FBQEFAQUJAPjo7Pz0/Pj4+QD09Pj48QEBDQUZDPz1A
-REQ+QzxAPD9BRERDRENDQkBAQkRFRkVCREFDQUNDQkFAQkJAP0A/QUZBPT0/QEI/
-PkJGRklJW6rDzdbb3+Lm5+jp6kJFR0xJRT09PD1EREE8OTs7QEA/Pjw6PT08Ojw/
-QD06QDw4Pz08Ozs6Ozg3PDo+Pj05ODs3Nzg5ODs7ODc3PTk6PD06Ojc1NDM3ODg2
-NzY5ODk0MTc2Nzg6NTM0MzU3OTM2OTY1NDM0NjI0NDU1NTg4ODYyNTEyMjI1NjM1
-NjY0Mzc2NTU1MzQ1NDMzMjI2NDEzMzQ1Nzg1MzIxNDIyMzI0NDY4NTMyNjYzNjY0
-NjU0NDQ4Njc1NTQzMjQ2NTM0NDY1NTY1Mzk3NTMzMzIxMDk3MjMxMjM0MzIyMzUy
-MzUzLzEyMjQ0MTM2NTU1MjMzNDM3NTQ0MzUzNDU0MzMzMzM5MTI1MjIyMTMyMjMz
-NDgzMzY3Nzc2NDE2NTY0MzU1MzM0MTI0NDQzNDEwMzI0MTEyNDMzODY1MzU1MzM1
-NDQyNDI1MTU2NTM0NDUyNTUzNTUzNDM0NzQyNDU0NDc1NDQ1NDk3NjQ0Nj44Njg0
-MzI0NjYzNTU1NDMwMzQ1NTM3Njk1NDEwNjQzNTY0NDY1NDY3NTY4OTU2Nzg5OTY3
-ODQzNTM0MzIxNDU2NTU0NzU1NDUzMzQ0ODY2NzU3OTQ1Ojg2Njk4NjQ0MjM2NTc1
-NDYzMzMyMzYzNTQ1NDU2NS8wMTM2NzY0ODg2NDU2Njc4NzY5ODc4ODk1PDk4OTo6
-Ojk3NDQ1Njk7ODo+NzY5NjY2Njo6ODc2MzM0MzAxNDYzMDEzNTg5Oj05NjQ2MjQz
-NDU3NTU1Nzg2Nzc4OTg2NjQ2Njc4NTY2NzY5NDUzMjUzNjc4NjM2NTY1MTI0NTQ3
-Nzc4NzU2NTc2MzU5ODU0MzQzMjMzMzE2NTU2Nzk+QUE6OTk5OTc4Nzo7Ojo9Pz09
-QUVDQkJFR01JS0pNTE1NTlJSVFRVVFdYV1dYXFtdXlxfYmFiZWNjZWRhYWNjZGZl
-aWdlaWppaWhqaWlqaGliYV5bWVZSUExDQTs7OTc5ODs8Pzo7Pz08PT5AQUBBQEBB
-QkFBQ0ZCQ0ZGRkVFRkVFSklLTk5PUE9RUlNUVVZUVVlYWVpbXGFjYWFiY2NiY2Rk
-Y2ZkYmRhZGVmZ2lpaWlnZ2traWpqbXBvb25tbW1tbW5xbmtpaWtsamxnZmFjXllV
-Uk9ITEpLRkFBSlxkZWVgXVdWT0pBOzc3OTk4OTw8Ojs3NTg2MzQ3NzEyMTQ1NTY3
-NTU2NTY2ODg5Nzg5NjM4NTpAPz1GUFdMQ0dMSUE9PD1BP0A7NjY4Nzk6Njc4ODM2
-QD89QUZGRjs6Ojk9QUFAOTo7QklQVVlkdIB/cmZfa3ZxXVNdb2phcnRwfn11c21r
-Z2xzfHdoSD49PTs9PDo8Pjs7Qj9APjs8PjtBPzs7Ojk9PD8+Pjw/Pjs7Ozk6Pzs6
-PT0/Pjs8PUBDQD8/PkE5Oz07PT05OT1AQEFAQTw9QUVCRENBOj4/PDo6Oj5APz08
-Pj09Ojw9Q0A6PkFFRUBBQkJBPj4+PUBAPzw+QkJBQUNCQj89QkBBQ0RAP0NHQkFB
-Q0FBQT9BQ0RBPz9DPz9BPkJBQ0FCQ0M+P0NHRUVeqsTO1tzg5OXm6enqQkNGREQ+
-QD5BRUJAPjs/QT5BP0RAPjo6Ozs5Oz06PEA9PEA9PDtBPjw7Ozw7Ozc5ODs2NDY1
-Nzo2Nzg3NTU5Ozs4ODQ1ODgyNjg0MzM1NjY3Njc5MzQ0NTU4Nzk4NjY3Njk8NzYy
-NDU1NjY3NjUxMzM0NjY1NDM0MjY1Nzg2NDIxNDY1MTI0NjYyNDQ0MzMyMzU9MzU6
-NjQ1MjQ0NDI2NTM2Njg0NDY0NDIzNDE0NTU4NjQ0NjY0NzM0NTQzMzU2NTUyODg1
-MjMxNDMzMjIyMzQwMTI0MjM1NjU0NjMyNDIzMzI1NDQ0NDAxMjUyMDI1NjM1NjIx
-MjExMTAwMDEyNTQzMjUzNTQ1NDUzMzMzMjMwMjY3Njg4NjEzNDUyNDQyMzQzNTQ0
-MjIyNDI1NzY2MDQ0NTY2NTM0MTMzMTQ1MzIyMzMyMjMyNDUzMzQxNDU1NjgzNTc0
-MTIxMjQ2NDQ0NDQzMTYzNDo1MjIzMTM4OTY2NjYzMzU2NTk3NTU2NTU4NzQ1MjQ0
-ODg2ODMyMjM1NTU0MjU3OTk1NDU2NjQ0NTc0MjQ1NjU0MzU1NDMzNTQ1NDU1NjM0
-NDU2NTU3Nzc2Njo5Njs3NzQ0MzM0NTE0NDg1NDQ2NzY2MjAzNjQ1NDM0MzMzNjU1
-NjQyMzY6PDo4NTg6Ojo3Njc5OTg5Ozs7PDs8OjY0NDY5ODs4NTU2OTo6Nzg0OTg2
-MjIzNTI0NTc1NjY4Nzo6ODU1NjQ1NTM1Nzg4ODg7Ozk5ODo5NzU4Ojg3Nzc3NTo2
-Nzk1NDY3Njk3NDY2Njg2ODY1MzU0MzMxMzU1NTQ3NTU4ODU2MzU1NTU2NTQzMDQw
-MzY2NTc5PDo5Nzk9PTo4OTk3Ojs+PUBAQkNCQURGSEhHSE1NTk9MT09QUlNVVFRU
-VVZZXVxbW1xfX2BhYGJkZWNjZGNiZWRmaGtoZmhoZmVlZWNkYV9aW1tWU1FOR0BA
-ODc4OTo7PTo7O0A7PDs8PUI9QEBCQUVFQUFBQkRDRUdJR0VGR0hLTUtLSk1PUFBQ
-UFFUVFZVV1pbW1pcW19fYmRlZmBgYmNjZGJiYmBhYWJiZ2ppZmhoaGlramxsbG1u
-bmxra2tqa2praWlqaGZnaWhlYV1YV1RST01ISEhDPUFPXmVoZmRdWlRQREE6Ozs7
-OTg2Ojs8OzczODc3OTo4NjQ2ODU0NzU0NDQyMjU0Njk1NjQ1NTY1Nzc7PkxaUkM+
-SVBKOzg5REdCOTg7Pzo6Ojs3NzY3NztAPjw7PT1BQT87OTk8PD05OkBJUVtjYmFo
-cndyYFFYZ21pWmB4fHFyenF5fHRvaGhocXl8eWtRQD87Ozw8QTw6OTs8PzxCPTk9
-PTs8PDo4PD4+Pj47PEA/Ozs7OjxBQT47PTw+QT49PUE/Qj4+PUJCPT47PDw8Oz1B
-PTxAREJARERBQkBCPTs6Ojs7PT08PDs6OD08OjtAQkBCREFIQUFDQT5AQEI/Qj4+
-QT9BQEFAPj1APTw9QUFEPjw7QEJDQURHRkRCREFBQkBBREBCREBDRERCRUVDP0FD
-RkZFQW2ww87X2+Hk5efo6uo/QD1AP0A/Pz8+PT1BPjs6OjtAQTs5OkA5Ozo9Ozg5
-OkA7Pj08PTw7Pj07ODg7Ojk6Ozc3OD4+Pjw6ODo6Nzo2ODY6Ozk3Njc3NTQ1NDI1
-NTc4Nzg6Ojk2ODY1Nzc1NjQ3NjU3MjIyMTM1NTU0MzQ1NDQ4NTU2Nzc2NTc1MzQ1
-MzEzMTIxMjIvMzQzNDMzMzQ1NDU0MzY0ODg4ODY1MzU0MDEyMjUzNTQxMDIyMzMz
-PDc1P0EzMDI0NjczMjQ0NDc0NDY2NDQ1MjI0NDEzMTAzNTQzNDQ2NjdDNDI3NTU1
-ODU1MzUzOTc5MTE1NTQzMTU5NTIzMjEzMzQyNDQ0MjMxMjI2T1s4MzM0MzY2MzEw
-MzIyNTU2MjMzMzQ1MzU2MjY0NjYzMzQyMTEzNTUzNjY1MjI2OTk1NDU3MjMyMzM1
-NDUzNTU0MjIzNjMzMzM1MzQ6ODM2NzU1Njo4NjMzMzY1NDc0MjI0NTIyNDQxMzQ2
-NTI0NDM2NTMzNTY1NzU1Nzc0NTU1NTY3NTM0NjM1NTQ0NTMzMzU1NTY0NDQ1NDQ4
-NzQ1NjU1NjQ0MjI0NTY1NDQyMzI1NzYzMjIyMzQ0NTc3MjM0NTg4NDIzNTQwMTM0
-NTUzNDM1NDMzMjQ2NjU2MjMzNDMzNTU0NDk1ODg4Ozc2NzY5OzxAQTo4OTo5Oj05
-ODg2NzU0Nzc2Ozk3Nzk2NjY0NTY2NzYzMjY2NTUzNDU1NzY4Ojg2NTU3NzY3MzY2
-Njg3OkBBOjU2Nzo7NjY2Nzk4NjY2NDU4NDY1MzY5ODU1MzYyNDY3MzQ2NjY0MzU1
-NTg3Nzc3NDI3OTY2MzQzMjMyNDU2NDc4NzU4ODo5OTo7PEE+Ozk3Nzo6Ojk8Oz0+
-QUNBREVGRUdHSUtNT1JOTU1RU1ZVUlJXWFhXW1pcW1xgYGBgX2FkZGVlZGVlZmVm
-aWZlZGVmaGZjZmVkYl1ZWlVTUElEPjs5NjUyNjo7PDw6PDk3OTc6PDk8QEFDQUVF
-RENEQ0JCR0ZFRkFDQ0JISUpMTU5NUlFQUVJUVFhXWFpaWFlcXl9fYmJjXl9iYWNl
-Y2FgYmFjY2ZoaWpqa2hnZ2ZqbG1qbWtsbGlsbGpsa2pqaWhoamdmYmJfXFtUUU5K
-SUlIRD07RFFcZWRiX1pWUklCPD1APD44OTo5Njc3NjQ7OTY1ODQ3Njg4Ojo6NzU0
-MzQ2NTY2MzU1OTc3NjczNDlGUVFJQkNLR0E+OjxISkA4OD1BOTY2Njc6OTg6O0JF
-Pjo9QkRDPzo2Nz08Ojo7OkVSXmhiW1hcam9pWU9edHhvbHeDfnN+c3N2bmtlZ291
-e3x5bVREPT5EPj89PDk7Pj88QDo7PDo+Pjw5OT88PDw7Ojw9Pj4/Ozo4Ozs5OUNB
-PUBAPDs/PT9AQEI+QTw7Ozs/PkJAPj47PD5BQkBAQEFDPjs6OTs/PDw8Oz07Pz07
-Oj8+QUE+PT08P1FAPz09PT0/QEE/PkFBPUFBPj0/QT5ARUFDP0BCQ0JBQD8+REND
-Qz9CRT49QUFAQT1EQD9BQ0JCREdFQkZKRz9GirHDz9fc4OXl5unq6j49Q0A8QEE9
-PEBBOz07Pjw+Qj0/Pz09PUE/QT49PTo5Pzw7OTk8Pz89Ozg5Ojo5OTk2ODk3ODo6
-ODk8ODY2Njg4OTtBPDo4NTI2ODQ0NDU4ODs5PDs7OTY1NDQ3MjM0NDMyNDc3NTQz
-MjU3Njc1NDM0Mzc3MjQ0NDQ1NjY2Mzc3NDUzMzMyMjQyMTIzMzU0NTQ0MTQ0NDMy
-NzQ0NzYzNDAvMTM1MjI0NDMwNDMzMzY4NzQ2MDAxMjMxMjU0MzE1NTM0MzU0NDIy
-NDM2NDQ1NjMzMjU0NDcyNDU1NTM0NTY1NDU2LzI0NUU0NjU0MjQ0MjU3MTQ0MzI0
-MjI1MjIyNDEzN0Z7ajw0OzU4NDEyLjA3MTExMjU0MjM1MzU0NjMyMzU0NTQwMDEy
-MzM2MjExMjIzMzE5OzkzMjQ3MzMzLzIxMjQ2NTYzNDQzMzIyNTY1NjQ1NTc1MzU2
-ODQ0NjIzNTM2NTY1NTU4Njc2NDMzMzMzNTY0NTQ2NzU2NTUyMzMxMjM0ODk4NzUz
-NTY2ODQ1NTc2MjMzMjU1Njc4NjY2NzU2OTc4NTQzMjM0NDY0MjY0MjMxNDQzMjMz
-MzAyNDQ0NDM0NDM2Nzc0NzU0MzQ1MzEzMzU0NzY5OTQ1NTc2NDI2Njg2NzQ0NDM0
-Nzc2Nzo5ODk4ODg6Ojs5NzY4Nzg7Pjw4ODk6Ojk7OTk5OTk4Nzg5NTc3Njk4ODg9
-NzQ1NTIxMzU0NzU6OTg1NDU1MzM1NDQyNDg3Nzk5Ojk7PTw4NTQ0Njs4ODg5OTg4
-Nzc2NjM2NTQ1NDU1NTQzNjYyMzQ1Nzc4MzQ0NTQ2Nzg2NDc0MzM0MzI0NDY3NzU2
-NjU3OTs7PDo5Oz05OTk4ODc5OTs8PT0/QUNEQkVFSEdISEtIS0xOTUxQU1VTU1RT
-V1dZWVhZWFxbXmBhYWJjZ2ZiZWRmZmRkZ2doZWVjZmdmZGNhX1pVU1BNSD4+OTs4
-ODc5OTk6OTk4Pjs7Ojc5PD09PUBCQkRFQkBBRUhHSEZHRkZFRkdMSUlNTU1PTU1L
-UFJUV1dYWVlXWFlaXF1eYF9hYGBhYGBjY2RhYGJbW2JlamtqaWlpZmhqa2toaWlq
-bXBta2tqbmtoZWdnaWlnYmFdXldQTUxJSktCOztEU19jYlxYVVNSSTs4PTo5ODs7
-Ojs7OTk3OjY0NDY0NTg7Njg7Ojg1NTo0MzQ0MjM2NDQ3Ozk5NzY2NThAQUNCRUJB
-QUFCRkxKPDk8Qj85NDQ8QD03NTg6PUJDQDs+QkVCPTg2ODlAPTo7QUxbYV9WTEtZ
-bXNoW2N7hYF0c36Ab3NsZWxnZmRrdHR3bnBrWEQ8P0M/Pzw6OTw8PDw8PTw8Pjs9
-PDo6PkI7Ojw9Pz47PTo6PD83Ozs+QD49Oz49Ozo7Oz5BPz06PTs7REE/PD89PT5B
-QkJBQT88QEI+Pj06Pjs9Pzw6OTtAPz9BQEA9PD09PDw/P0A9QkE/Ozs8P0BBPj4+
-QkM9QUBBRURERURDQkNCQD8+Q0RERUNCQ0NGQkNBQUA/P0FCQENBQkFBRUVJREND
-SEiNrsPO19zh5Obo6OrqOjtEQUBAQ0JBRD89PENCQkA6PDw9Pz5EQTw+Pz06OTxC
-Pjs7Ozg3Ojs7Ozs6Ozw5Ozo9PD09Ozk8Pj86ODo6ODY5OTo7ODg1MzU4NjQ1NTY3
-Nzg5NjU3NDc0MzU7ODU2NTc4MzI1NjQyNTEzNTIyNDU1NDMxMTE0ODUzMTM3MzQ1
-NTQzMjQ0NDI1NTMzMzM0MzUyMDMzNTc1NjMzMzMzMzYzMjEvLjM0MzU1NDM0ODY0
-NDIzNTM1MjQyMjQyNDQxNDc6OTQ2NDIwMzUyMzo4NTEwNDYyNzczMzQ2NzMxMzY3
-NDQ0NDQ0MzI3NDM1NDMxLzAxNDYzNDI0My8xMDMxMzM0QGZSNjYzMzQ0MzM0MzQz
-MjUyMzMyMDE0NDM0NDM0NDQzNjc2NTMyNTQ1NDMyNDQzNDIyOTMzNDM0NDM0MjIz
-MzM2NTUyMzExMTMxMzQzMzUxMzQzMzU1NjM3ODUyNjQyMzU0Mzg5Njc4NzU2MzQy
-MzI2NTQ1NjU2Mzw0MzQyMTM1NDc4OTo2NjU0MzQ1MzE1NjMzNTU1OTQ1ODg0ODg6
-Ojk4Ojc2LzIzMjEzNDMvMjQ0MzMwMjEzNzMyNDQ1NTE0Nzc4NjY2NTk3NjYzMzUz
-NTQ1ODU1Nzg3NzQ1ODY4ODc1NDQ1ODU1Nzc2ODo6Ojg5Ojc2ODk8Nzg5PTY2Njg3
-Njc6QDs4NzY3Njc4NzY1ODg2OTc0ODYyNDEyNTQ0NDQ3PDw7Ojk4Njg1MzM1MzY3
-OTg5OTk4Ojg5Ozo4Nzg4NDk7Ojw5ODg3NjQ2Njg2NjU0NzY0MzM0NDc1MjQ1OTg0
-NDQzNzM3NDUzMzMzMjEyMzQ2ODQ0Njc3NDU5ODw8PDw4Njg7Ozg5ODc6OztAQEBB
-QUdFRERFSEhHSElKTlBNUFFSUlFSUVNWVldaWlZYV1tcXmFjY2JkZWNkXmBhZGRn
-aGdlZ2ZlZmNjYF1dW1hUTkhIQjs4Nzc3OTk4Ojo8Ozg4Oj89Pz0/PT9BQz9EQ0RB
-QT9DQ0VFRUVEREVHRklMTEhLSUpOUE9NUVBRVFZXWFhYWltcXVteXmFiX2FfYWNj
-ZWJgX2RiZGVlY2RoamloaWpsbWtrampqbW5sbGlsaGpqa2toaGdmYF1cV1FNSUdI
-RkA5O0VWX11cWVpWUUk/OTxAPzg2OTg5NzY6Nzg3MjM0NDM3Njc2NTg4Njc2NjQ0
-NTQ0ODg3NTQ2Nzg0MzQ1NzhBSU9HQkJHUE9KRUI6O0A/Ozo5ODU2Nzc5ODU7PEBA
-Oz09PkFBOzg4PD8+Pzw7RE1VWVdQS1Fme31wanuJiXhteHdmZGVbY2RibHR7cm9j
-bW5iTEJCPDs9Pz89PDk7PTw9QD88OzpAPD08PD1APjs6OTo5ODg9Ojs/Qz88PDw/
-Pz49Ojs7PT0/PEBCP0NAQTxBPzo9QEFBQD9AP0I/PkBHRD0+PTtAQz47PD09PDw/
-QEA/Pj08Ozw6Ozs7Pjw8Oj4/QkA9Pj8+QENCQUBBQz9DRkJCQkJBQENEQj9CQz9C
-REE+PTw8Pz08PkE7QUA7PD5CSkxIRUJDRIevxNDX3eDj5ejn6elEQD0+Pj88QUFA
-QkJERkI/PDk7Oj8/Pz8+Ozs8OTo5Ojs6ODo4Njw6ODc6Ozw5ODo6Pjo8PDk7Oj87
-PDo7Ojk5OTk8Ojk4MzQ3NzUyNDY6Njc4NTU2NjM1MjU0MTY4NTUzMjUyNDQyNjY0
-NDQ0MzU5MjM0MzUyNDE0NTQzMzQzMjEwNTMxMTMyMjEyMjM0NTUyMjI1NTMxMzM0
-NjMxMzc2NzQyMjQ2NDQ0MzUzNjUyMDM0NDQ4NzMyMzE2NTQzMzQzMzAzMzQ0MzMx
-MTMzNDU4NDMzMzM2NTMwMTMzMzUyNDYyMDM1MzIyMzQ3MjMzMjMvMTQzMTEzMDEz
-MS4uMC4wLzAxOTYvMjIzNDYyMzIyMjY0MzQ0NTQyMzIzNDM3NDQzMzQ2NDQ1MzMy
-MjI3MjI2NjQyMjEzOTg0NjU0MjQ0MzEzMzE0MzQyMzMzNTQ0MzMxMjMzNDY2NTQ0
-NjM1NjY0MjU3NzU1NTQ2ODQ1NDU1MzEwNzM1NDI0NDM1NTY2NTYzMjE0Njk5OTc2
-NjM0NDM1NTc4NjI0NTM0MjM1NDQzNzg6PDo7Ojg2NDY0NDY1NDIyNDI1NjI0NTQ1
-Nzg0NDIyNDY2ODQ3NzU1OzczNDc2NzM0NTM0NTY0OTU0NDY2NzQ2NDc0NTc2NTY5
-ODg4NzY3OTw7Ozs4Nzc2ODo4ODU2NTU2Nzo6OTY1NTU1NjY3Nzg5Nzc6NzU1Njg1
-NjcyNDMzMzU0OTs8ODU4Ojk3NDM0NDg2Njg8PD05Nzk8Ozk3Njg3OTs8Pjk5Nzg1
-NTU4Nzk6PDg3MzU1NTU2NTQ1NjU0NTU2NDU4NjY0NDI0NTQ0MzQ0NjQ2NjY2NTQ4
-ODc1Nzo6Ojs4ODw+Ozk6OTs4QEA9QUNAQkNEREZFQ0NHSkpMTk1NTk9RUldTUVVW
-WlxcWlpZW1xfYWRkZmRlZmVkYmRjZ2VmZ2ZqaGNnZ2NiXlxXVU9OS0VAOzs3Nzk6
-OTs7OTk6OTs9Ojw+Ozw8PDw8Qj5AQUNCRUBBQEBERURDQ0VHR0dNSEZKTE5NTE5P
-UVBSVVlWWVhXWVtcXmNgYGJjYWBfYWFhYmViZGVjZGVlZ2dqaW9ra2lrbGxpamxr
-aGlqbGpsaGpramloZ2ZiXlZUT1BLSUhKQUJDRVBXW1paWlNNQzk5Ozc4Nzk2OTg7
-ODM2OTg3NDQ0NzczNTUzNDY4NzU3Ojg7Nzc0NDg5ODQzNjQzNTc3ODxJUUM/S1ha
-UEQ/Oj9DPzo2NDU0NDg5ODo7NzY3QEVGQDw7QENBOzk5O0A/Pzo8RlVeXlxWVmd+
-iH5tcICFe2JhcGdZYFleZ2hyenptbGJpbmxcRkBAPD48PDxAQT0+PDw8Pjw+QEA+
-QkA5PDs8Pjw7Nzg6Ojs7PkBAPDw8PUA7PDs/QkE+PTs8PTs9Pj89PT08PkBBQUJD
-Pzs5Ozw6P0FCPTw+PT07Oz5BPT4/PTs/PkRAQ0I/Pj87PTw/QENBQUFBQEE+QEI+
-PUBBQkRAPUBBRENERkNEQ0VCQ0I/REU/Q0JAPz09QEFCQD8/PURFP0NEQ0JDQz9K
-jK7Dztbc4eTl6Ojp6URCQUFEPDo9PD1CQkI+Ojs/PkBAPjo4Ozg7PUA/QDk3ODk4
-Nzg4NTc9Oz4+Ozg5OT5APz85ODg6OTs9Ozg3NzY0Nzo7Ozg6NTk3NDUyNTc5OjU1
-MzMzMTM0Njg4NzQ0MzI0MjI1NjU1OjY0NTY2MzQxMzQzMjIwMjUzMzY1NDQyMDAx
-NDUzNTM0Mzc1MzM1NDU3MzMyMzMyNDAxMTIxMTY2NjYzMzI1MzIyNTI0MzA1MjAy
-NDA0NDUwNTQ1MjQ0NTQ0Njc1MzU3NDAxNTIyMDQ0Nzc2MTM1NzIxMzMxMjEyMzMw
-NjUzNTY0NTQ0NDM0MzMzMTA0MC8yMTI1NTExMzMwMTAvMS8wNDMzNjgyMzUzMzU2
-NDM0NDc0Mi4xMTEwMjYzMzIxMzU2NTEzNTAzNTU1NjQzMzMyMjEyMTEyNTMyNDM0
-MjAyMjU0MjQ0MjM0MjIzNDMxNDQ0NTY3MzIyNDM2NjY1NDY0NDQ0Njc2NTQ1NzVG
-NTU1NjY1NDQ0NDU0NDU0NTU5OTk4OTY0MzI0MzU1NDc3MzEzMjE1MzQ2NTgzMzU2
-Ojk4ODc2NTU1MzMxMy8wNTU0ODYzNTc1NDQzMzM1MzU3Ojs0MzI0NDY0NTQ0NDU0
-ODc4NTc1NTU0ODg0NjU2NTk3ODU3NzY4OTQ4ODg5Ojg3ODw4Nzc7Ojw6NjQ2Nzk3
-ODc3NTM0OTw4ODY3ODg3PDQ1NTQ2ODk4NDQyMzA1Mzc4OztAOTg7Ozs2ODc5Njg1
-NTY5Ozw4NzY2Ozg3ODg7Ozg7Ojo6ODc5NTQ0ODc3NjQ0NDY1NTQ0Nzg2NDEzNjQz
-Nzg2NTU3MzQ0NjI0NTM0NDU2NDQyNDg5NzU0ODk7PDw7Ojg4Nzg4Ojs8PDw7QT5A
-QUFEQkRGQkZGSUhLTE1MTVFRUlNTVFhYWVpZXFxZX1xfY2RnZmdlZmVkZmVmZmRk
-aGdra2VmZF9fW1ZVVFBMREA6Ojs7ODY2OTc7ODg7OTw6Ojo5Oz0/QUJAPj09PEBE
-REJAQEBCQkRFQ0NGRkdHSUxLTExNTlFSUVJUU1VaV1xaWlxcYF9fYWBgYWFiYV9j
-Y2VjZGVnZGdlZmhpaGdoZ2hmaGhoa2hoaWlqbGxqaGhqZmhrZmReVU1OTUpLSUZC
-Pz1JVVpYV1VRTUI6O0A9OTs6OjU3NTY4NzY0Nzg2MzI0Njc2NzQyMjM1Ojg3Ojk6
-Njc1NjQ0NDU1NTUzNDU1NztCPEBTXFlKPzk7R0g+ODY0NTgzMzMzMzg4NDU/SU1F
-Ozg7PT06OTlAQkI/PTo+SltmamplZ3eEgnNmb313Y1lpaV9nZml3cXZ1dXFvZWp3
-emtMQUBAOD07PT08PkFAQT8+Ojs8PTw6PD07PDo9PT47Oz46PDw+PDo6PD06PT06
-PEFEQUE+Ozs4PDs7PT08Ozk9Pjo9PT1AQzs4PT9AQD09QT07Pz08PDw6PT9BQEFA
-QENDQkM+QUA8PD4+QUNAQUJBQUE/P0FCQD5AQURDQD1APz9CREFCQUVEQ0NBQUBB
-RENEQT4+Pz8/QTs7O0FAQD9AQUVCPkmVssTO1tzh4ubo6enrQ0ZKSkdFPD49Oz5C
-Q0U9QkM+PkA9Oz4+Ojg8PkA7Oj1BOzk2Ozg3ODo9PDk9QTg8PD5AQDw5Ozs5Ojc8
-Nzo3NTQ1OTo9PDw8Ojg2ODk1Mzc3NjY1NTM3NDU1NTQyMjM6NzMyMzQ2OTY9NjYz
-NjY5Njg0MzIzNTU1MzUwMzU2MjMzMjIzMzM0NzYzMjM0NTM1MjQzNjMzMjUzMjIw
-MDAwMDU0Mzc6NTIyMjIxMjIxNTQxNjQ1NzMxLzEyNDIzMTIzNTY1NDM0NTYyMjMz
-MzM0MTEyMjM0NTQzMjMyMzIzMTEzMzgxMzYzMzM1MzQzMjc2NDEwMjIxMTAzMzU0
-NjM0NzMyMzQ0MTI0NjM3OTQ1NDQyMTAyMjM3NDUzMC4zMDIyMjM1MzEzNTQ1NTI0
-NjQ0MzMwLzExLzEyNDUzMzUzMTM0MjYzMzExMi8vMzQ0NTI0MzIxMTQ1NDU1NDQz
-NDM0NjE0NTU0NTU2MTU2NDQzMzM0NTc1MjU3NjczNDY1MTM0NDU3NTw7NTU5OTg1
-MzM1NjQ1NDEyLjAzMzQ2MzM1Mzc1Nzc5ODY4NTQ3NTM0NTEyMzI1Njg4NTQzNTU2
-MTk0MjAzNDM1ODY2ODY3OTc0NTM2NTU0NTM2NjY3ODg3NzY1NjY3Nzg2NTU5ODc3
-OTY6OTc2Nzo4Nzo3ODs8Njk1NTc6OTk6Ozc3OTk4Njc4Nzg3NzM2OzQzMzI0MjM1
-NTU1NTEzMjU6Pjs6OjtBOTk3NDU4NDY2Oz08Ozk5ODk7PDw8Ozs5OTs8Ojo4PTs7
-Nz05OTg5OTw3Njk4Nzc3Ojc2Ojo2MzQ1NDY1NDQzNDQ1Nzc0MjM0ODY2MzQ1NDQ0
-NjQ0Njg3Ozs6Ojg4Nzg7PDs7Oz1APz9AQUJEREJEQ0dJSUlLS0pMTU9QUVNVV1ta
-W1paXVtcXl9hYmRjY2NkZGNlZWRmZmZnZ2VlZGReXlxXVVFPSkhEQD85NTM3ODk5
-Ozo9Ozg7OTg1Ojs6PUI8PTw+QEJCPT1BQ0NAPz9CRENGRkdKSElKS0xMTUxNTE5S
-U1VUVFhaW1xZW11dXV5eXV9gY2RgYmJkZWRmZWVnZ2hmZmhnZmZoaGhnZ2ZkZmZq
-bGtramdoZmlqampqYl1VTklHR0dHQ0E+QU1ZWllXVE9JQjs8Ojo6ODo6OTY4NzU2
-NTg0NDAxMzg2OTQyMzIyNjQ2Nzw7NzY4Nzg1NTQ1NjQ0NjU0NzY1Njo8QU1RRz47
-PEJNSTw0MzU4Ojk2OEA3OTc2Oz5ITEU8OTc8Ozs6Oj5GSEM/ODxEWWx7enBpa3d/
-dl5dbnhpV11rbXl6eoF7d3JubnBucH13blBCQD87OD09Pj0+Oz8+QEA8PDw6PUA+
-Pz08PTs7PDs8PDo5Oj49Oj06RU48PD08PT0+QEE9Pz4/Qj9APzw8P0A/Pz48QD08
-ODtAPT9BP0JCP0E9PDs6PEZAPzw9QEJAPDw5Pz4/Pj08Pz89Pj5BQkJDREM9PT5B
-PTs9Pj48QEE+QEFAPz4+PEVERDxBQkJCRUJCPUE8PkJDQ0BDQkI+PTs8QD88QGqr
-ws3W3N/i5ufo6eo/RUZFRElCPz1CPT47PDw9QEI+Pz49Oz89Ojw+QUA9PT5APDw5
-PTg7OTc4ODY4Nzg6OTo9Pjs4ODw8ODg4NjY3ODg9Ozo8PTw8Ojg4NTY0Nzc1NDo3
-NjU2MzM5NTU1NjY0NDU4NjczNTY2ODU1Nzc5ODQ1NDU1NjYzMTAyMzY2Nzg3NDMz
-NTY2NjU2MjU3Ojc1MzMyMzMyMzg0NTgzMTEyNTYzNjQ1NDcyMjEyMzQ0MjQzMzMy
-MS4wNDIxNTUzMjMzNzgwMTI3NjUyMTQ0MzMzMzQyMTAxNjEyNDQ1MzIyNDM0MjIz
-MzY4MzEyMzMyLzM1NTQ1MzIzMTAzNDIzMzQyNDQ1MjMzNTI0NjU1NTU0MDIyMjMz
-MjIyLzAzMDAxMTA0MzI0MjMzMDAyMzIxMzMyMzA0MTE0NDExNDU1MzIyMTIxMi8y
-MDEwMzIzNDM0MzQyMjU2NDMzNDYzMDEzMTAwMzMzNDQzMzU1NDYyMjU3MjIyLzE0
-NzY1Mzc1NTMzNTY0Nzg1MzM1NDU6ODY4NDY0NDU3NDU1MzQ1NTM1MTI0NDY1NTc2
-NTY1NTU1NDQ0NjM0NzYzNDYzNDM0MzM2NTM1NDM2NDQ2NTc3OTo4NjQzNTU0MzQ1
-MzM0NTg5OTc5NjY1Nzk4NDQ4OjU2Nzk6Ojg4ODo1Nzo3Nzg4NDY3Njc7OTc2Ozw4
-Ozs7Ozo6OTg5ODQ1NzQ1MjQ2MzY4NDQzMzU1NzY0MTU6Ojc2Ozs4Nzo2NTI0Nzg3
-ODk5OTg4OjxCQkBBQDw9OTk7PEVISEZFP0NNUUxEQEFBQ0M+Pj06O0BCOzk5NTQ1
-MTU2MzQ1NzY2NzY1Njc1NDU0MzQ1MzU1NDI1NTk5Ojs6Ojk9OTo8PDo4Oz1BPkBB
-P0JAQEVKTkpLSk1JS01PT01PUVNRU1dXWFVZW1xgYlxfYmJhYWNiZWdjY2NnaGdo
-aGRkYWJfUlBTUk5JSkVAOTUxMzQzMzQ3ODw8OTg4Ojc3Ozo5ODo7Pj4/Q0BBP0BA
-Qj49PkFBQEFGR0dHSEhGR0pMTU1NTVBRVVpbWVdZWVpbW1xbW15bXF1eXV5kZGRi
-ZGRmZWNmamVnaGdnaGpoZmZnZ2ZlZWhsamhqamhmZmlnZ2dlX1ZPRkVFRkdCOT1K
-U1xZVlZTTkNAOzo3Njc6Nzg3NTY3NTU4Njc1Njg0NDg0NDU3NDMzNDM1Ojc1MzY1
-NjU1NTUzNDg3NjIzNzU4NztCRENAP0JDQ0VBOzc0Nj46OTk9PDk4ODk4QElPRjs4
-Njw+Qj87O0FFRUNAP0VUbXyAeWpbYG90Y1NcbXJkYnR+foF/fntwa2tyeHVveXBs
-WEI+QkA+PDs/Pzk7OEBAQEM+PT08PEA9Ojs6ODk7P0A9Ojs7PDw8ODg7STw/QTw7
-PDk8PTxBPj0/QDxAQT1AQkE/Pz08OzlBQkBAPUJAPTw6Ojw9Pjo6PT06PkA+P0BB
-P0Q8Pjw9PkQ+PD09PEBAPj1BQEE8PUJCQT48QEI+PTw/QD47QT88PD9CQEBCQEFC
-QkJAQT48QkJAQT48QUFAQUVFQT9GWqfCzNfb3+Lk5+jp6kBDPz8+Q0NDQkFBOzs4
-Pz4+P0FBREBBPjo7PTs8Pj8+PD09OTs7Ojw6PDY5ODw5Njc4Oz44Ozw7Ojo4ODo6
-Nzc6Nzc1Ojo3Nzc4NjMxNDEyMTU2OTk0NTQ1NTYzMzQyMjM1NTU0NTo3MjU4NjQy
-NTU1NzM1NTQ0NTU0MjIzMzU3MjI2NDQ2NDQ1NTM1OTg2NTk1NjMzMjM3Ojc3NzUy
-MjMyMzQzNTQ5NTk1NDQ2MjQzMzMzNDQyNDMzMzEzMzMyNjQzMDMyNT07NjczMzM2
-NDIzMzM2NDQ0NTQxMTEwMTQ1NDEyMjIzMzQ0NDMxMzo2NjM2NzU1MTMzMzE1NDQ2
-NjQ0NDMzNDI6NzQzMTAzNDI0NDExMTEyMDIxMTI2NDAwNDM0NTMzNDQ0NzUxMjQz
-MTQ0MjQ2NDQyMzUzMjY1MjAyMDMyNjM0MC8zNDEyMjMzMDQ1NTc0NDU2NTQyMjMz
-MzI2NjQ4OTU1NjU1NTY0MzI0MDgzMTk1ODY3MTQ0NDA0MzQ0MjY4ODY1NjY2Njg0
-NDQ3NTY3NjY0MzY1NjM1MzQ1MzU2NTQzMzU2NTU2Nzc1MzYzNDU1NjQ2NTQ1NDUz
-MjMzMjI1NDU4NTY1NTQ4ODczMzM0NTU0NDY1NDU2NzU0NTM2NDY1Njc3OTg3NzU1
-NTg2ODg3NjY1Ojo7OTg6OTk3PDk7OTw6PT48Ozo3NzY0NzYzNjMzNjU1NTc1NTIz
-NDQ2MzU2ODg7OTU0ODk5ODg4NDM3NTY1Ojs4Njc4OT4/P0E8Ojk5PERKUlNPSUdM
-YXqJgWJQTUtOTktFQDs9SVNNQjc1Njc3MzMzNTMyMjIzNTU0NTU1ODQzNDQyMjM1
-NzYzOjs7PT08Ozo7PDs7PDo4OD9CQEFAQUFERkpJSElLS0lKTk9NTU9QUFNWVFJW
-WVpZYWBgXmBfX19iYmVjZGJkZWNkYmdlZmljYl9YWVRUTVBJRzo3NTU1NTMxNTQ0
-ODk6Ojc3ODY5PDs6PT5CREpBQEFBPz89P0A/QUNFREVHQ0NFR0VFRkdJSk1OUVFS
-VVxYWFlXWVlaXFpaWltfX15eYGNjY2NhYmNnZmdoZ2ZoaGlmZWloZmVmY2ZnaGho
-a2pmZmlnZ2poZGJdV01HQ0NEQjw6PktTVlZXT05LPzo5ODQ3OTg4NTc4NTI1NDg4
-PDk9OTo+Ozg3OTk1NjM0NTU0NTM1NDQ2NzQzMzQyMjU2NjQ1Nzc3ODo4OUFJTEhB
-PTs7Ozg5NDk6ODg5Ojc8PTw+Q0hFQDs5PkdGQjs6OT1HSUhDRk1dcHx3aFdOYXBp
-VFVtfXhtc4B9gXl3c2hlaHV6e2xvZmRXREA/PDw7QD88Ojs7Oz5BPj8/Ozs8Pjo3
-OT04Oj8/PT04PDs8Ojk7PTxDPT49PD49PTs8Oj5DQj8/PTs6P0E/Ojo8Pj0/Qz48
-QUJCPDw+Ozo6Oj09PDw6PUJCPD49Pj49QEI+QT89QDs8PkBAQ0Q+PUA/RUJAQkFC
-QD48QUFBQkM/P0A7PUA9PkJCQUBBOzxBQkE9PTw+QEdCPj86PUJOR0NDQUdYo8LO
-1dzf4uXm5+nqQ0JBPkNAREE9QUA/OTs/PUNIQ0A+PkA/QDs9PTo+PDw/OTY7PDo7
-PDg8PDo2Nzk6NTc7Ozg2Ojc4NTg1Njk7ODY6NDM4Ojk3NTY0NDMzNTQ1ODk2MjQ1
-ODU2NjQ0NTgzNTgzMjc4NzYzNDc1MjQ2NzM3ODg5NTI0NTY1MzM0Njw2MjU0Njo0
-Njc0NDU1NDY0NTU0MzEzMjI2NzQ+OTg1NTMyNTM0MzEzNDk3Nzc2MzIyNDAyNTU2
-NDM2NjExMTQyMTQ2MjEyNDIxNTM1Mzg3NDEzMzM1MjM2Mi8vLjIxMjU1NzMyMzMx
-MzMyLzE0NDUzMzM0MzI0NTMyMzEzMjMxMjM4OTQyO0k0Njg2MzQyNjQ1NjMyMTA0
-Njg4NjQ2NTQyMDEyMC8xMzY0NTUyNDQzMjMwMjI3NjIzMjExMTMyMzMzMTAxMDMy
-MzIxMjMyMzQ0NjY0NDU2NDY2NTU1NDU2NjU1NTQ1NDYzMjU1NDQyMjExNDQ1MzAy
-Mjc4ODc2ODg0MTUzNTM2NzM1NDYzMjMzNDU3MzMyMzQ0OTg3NzUyMjIzODU2MzM1
-NDU2NDQ1MzI0Nzs0NTQ1NTY2MzIyMjU2NDIyMjQ3ODk3MjY2NzY4NjIzMjE1NjI2
-NjM0NjU1NTU1NTY0NDU3NTM5NDQ4NTg3ODc3ODc4NTg5OTk3OTk8PDw7OTo5Ojo8
-PTw4ODo5NTk2Nzc2NTo3NzUyMzIzNjU1ODg2ODk2ODs5Njc2Njc3ODc4NTU1Ojc3
-ODY1NTY7P0ZFQj5APD0+RVddTkE7QUtripqSd1lOTEhLSUZEPjxIUlZFODY1NzY1
-NDI4My8zMjIzNDMyNTY1NDk3NTQ2NTQ0Njo3PDo6QEA7Ojk9Ojs5Pzw+QD9APj87
-QENFRkdGR0lKSUpLS01MTlBRUFRTVVJVWFldXF1cXF5fYWRgZGNiZGNkY2RjZGRi
-Y2JdWlpXUU5IR0M9NjY4Pjk1MjU3Ojs5Nzo3NjU6OTw6Ozs6Oj4/PkE+Pz5BPkFB
-Pz89QENBQkZHRkhIS0pGSElMUE9RVFRTV1lZWVxYWFlaXV1fXV9fYGFgX19fZGVm
-aGdoZWVna2xoZ2dnZ2hoaGhoZmpoaGhraGlpamdmZ2VoY1xSTEZEQ0ZEQjpBTlNU
-U01MSEE7Nzs5OjU4ODM3ODU5Ojg3ODg4Ojo5OTYzNTc4NjYzMTU3ODg1NDQ0NTg0
-MzMzNzc3ODg3OTUyNTQ1Mzg8S1RSRj09OT1DPTs5NTg4ODg3OT45PD07Q0ZCPkE9
-Q0dEPTs4O0NLU05GRUxZam9nXFBVa3NnXm6ChnJte314cWluZ2ltfX93ZGtiYV1G
-PD08Pjw+PD49QD09Pj47Oz44OTo5Ozw8PTk+Oz89Pjs5PT06PUI+Ozw6ODg8PDw9
-PT5CQT88Pj07Oz0+PjxCPDw8PkFDQz9APj1BPTw6Nzo7PDs6Oz09QT09PD9AQD1B
-QEFBQkBDPz07Oj5ARUJCQ0JAQkBEQ0BEREU+P0FAQ0NCPzw9QT9AQkVAREFAPkE/
-QEJAQEFESUBAOjk5PENGR0E/Q1Klws3W3ODi5Ojn6elAQUA+REVEQz5CQT06ODo7
-Oj1DQTw8Ozw5Oj49PDs/Ozw/Pzs8Pz09OTc8OTc5ODg4Nzk3ODc4ODY3Ozc2ODk4
-Njc6Nzk5NzY3NDU1Njc3NTY5ODk4NzMzNTg1NjY2Nzk1NjQ1MzQyMzY1NDU1NDIz
-MzQ1NjU0MzQ0NjU3Njc0NjY2NDQzNDM0NDU1NTQ0MTAxMzM1NDYzNDEzMzc2NzQx
-NDQ2NDM3MjI0Njc1MjE3NTMxMzIzNjczMjIzNTMzMzMzMjEwMjMzMzEyNTc0MjQ0
-NjQzMjAyMS8xMDAzMTY2ODYzNTQyNTUyNTIwMDExNTQzMzYyMjU1NTQyMjEyNDUw
-NTQ2MzU8NDM0NTY1MjMzNjc2NDUvMDEzOTUzMjEwMTM1NDI4NTM1NTIxNjY2MzY+
-NzI0NTQzMDQyMzA1NTQ2ODU1MjU0ODU0MjA0NDcxMTIzNDIwMDAxMDI2NDQzMzc6
-OTMyNTMzNTY1MjI0NDY0NDQyMjU0NDMzNjc1NDY1MzEzNDg4NzYzMzM0MzMxMzQ0
-MjQ1NTYzNjQzMTI2NTQ0NDc3NTQ0NDU0MTU4ODQzMjU1NjY0NjczNDQ0MzU1NjY2
-NzY0MzM3NTU1Nzg3NTMyMzQ0MzI1MjY2NjIyNjQ0NjU1NTU1ODg3Ojk3Njg2NjY3
-OTk3Mzc4Nzw8OTw5ODlCPzxAOjg3OTo5ODc3Nzc4Nzc2Nzg9ODo3NTU0NjM0ODU1
-Nzk4NTc3OTk4NDc4Nzg1Njc1MzU8Nz07OTQ0OTpOXV9WSEREPz9FT01EOzk6RVx9
-jopvT0M+PT09Ojs6PkBFR0c3NTMzNTQ0NTU1ODY0NTc1NDQzMzQ0NDY3NTY0ODk2
-NTQ4Ozs8PD06Ozs9PkE+PkVCPUFBPj4/QEFDRERFRkdFRUtMTk9NTU9QUlJXVFZW
-WllcXF5iX15gYmFiZmFkZGFjZGRmZWRiY11aWVRUTkpFQUA4QDk3ODQ3Nzg5Ozs4
-Ozg2OT07Ojk5OT06PD8+PUBBQEA9PT0+P0BDRERBRUdJSEZJS0lJR0tRT05TVFhX
-VVVXV1dZWltdX2JhX19fXV5iY2diY2RnZGVnaGZpaWdnaGhmam5qaWlmZmhmZWls
-aGdoaWtnaGhjYFVNSURHSUA5OkBJUFJRTElCODY5Njg5Oz47PjU2ODc4Nzc0NDQz
-NDU2NzY2NTUzNTU2NTg7NzMzNTY4Nzc1Mzc2NDU2NTY4OTU2NTYzNj5NVU4/OTk7
-P0Q8Njc0NDU4Njc5Nzc3ODk7QUZDPTw+Q0VDPTs6PktXWk9JR0pZaG5oXFZpfYBw
-boCKe2hobGppX2lpcnd6fHBhY1lfYEdAPDs9Ozw9Pjw8PDk7Oj09Oj08PDw7Ojs9
-QUE8Ozo4OTo3OTk7QUM/OTs9PD08Oj07Ozo/QD07Ojw/Q0A8PDpBPjxAQ0NFQkI+
-Pj4/Pj5BQD0/QkA8PD9APD1APj89PjtAPT9CQT0/QEA/Pz5CP0A/PTs7Pj5AQEE9
-P0E/Qjw/QEA9QkFCQEFFQEA+QUE/PT46PT5APz07QEA/QUBCRkNAQUFCUKTCztbb
-4OLl5+jp6kM+QUJFRkdCQEJDQTw6OT4+Qjw7OjpCQjw8Pj48Pz07Ojw4Ozo8QDs6
-PT02Njw5OTg3ODY2NTQ4OTk6Ojs5Nzw5ODk7ODs7OTo3NjU0NTQ2NTQ0Njg1NDM2
-Nzk1NjU6ODY1Mzc3MzY3NTQzMjY1MzIzNDQzNDQzMzM1NTY3NDQzNjg2MjIzNDI4
-NjM2ODYzNDEzMzQ2MjEwMTM0NTk4Mjg1NTM1NDIxMS8vMTUzNDI2NDI0MjUyMzcz
-MjMxMTEzMjAwMzIzNTMxMDExMTUxMTMyMjMzMzIyMjIyMDIzMzM0MzIyNTc3NjYz
-NzQyMjMzMjc3NDQzNjU3NDEwMTIzNzQ0NTMxNDIyMTMyNDQ3NTQwMjUzMjQyMjIz
-MzIwMTM1MzM0NDM0NDMyNTM0NTM2NTMyMTEzMjE1NTk2NzQzNjU1ODY3NDU3Mi8v
-MTE1NDMvNDMzNDUwMTExNDIzMjM1NDU2NDU1NTgzNDU0NTEyMzAxMjQzMjQ2MjAz
-NDQzMzU3NDQ0NzYzMzQ0NTI0NDU0MzU0MTQ3Njg1MzE3NDIyNTQ2MzIxMzMzMzIz
-NDQ0NjY0NDk2MjMyNTg0MzAxMjAxMTM0NjcyNTU3NTU0NTY1NDM1NDMyMTI1MjU2
-NjY1NzY1NTQ2NDU3NTQ0NjY1NzY4NTc3OTY3NTU7QDk7Oj08Ojs6ODk4OTk2ODg3
-OTk3ODc4OTg5OTs3NzYxNTM1NDI0NjQzNTc6Ojo7Ojk2NTU5OTY3NjY4ODg4Nzo5
-OTc4P05fY1lLREJAPT9BQEA7OTtASV1mYEw/Ojo2Njc4OjY3OT49OzQ3ODg2NTU2
-NDU3NTg2NDI0MjM1NTQzNTc1NTU3NDM3OTc7Nzg4Nzs9OjxBQkBAQUFAQUJCQUJA
-Q0NCQUBDREZHSUlNS01PUlFSUFFWVlRZW1tbXmFhYV9fYGJjZGZmZmVkZ2NhYF5c
-WVpVVFFPS0Y/Pjk6OjU1NjY2Nzg4ODc2NTc6ODw7OTo/Pzs6PDw7Pzw+QTw/PkFA
-Qz9BQkNDR0hISEtJSElMT09QUFJSVFJUU1ZYWVZYV1pdXl5dYGFjYGFjZ2dlZmZm
-Y2VoaGhoamlqamtpa2dnZ2toZWZnZ2dlZWhramZjZmZhWE1KRkRIQDo6Q01VUktH
-RD83NjY3NjQ0NDs3NjQ2Ozg4NTQ1Njk6OTk4Ojc4NzM0NTUyOjo3NzU1NTU1NTU5
-OjY3NzY2OTY3Ojg1Nzo4OkJIQz08Oz1CQD82NjU3ODc0NDg2MjI1Njk9RUhBOjg+
-Q0RBOzo7R1JXUkdBQ05md3hxbG2AioFzdX95ZFtlY2ZgaG50fnZxamBkXGhmTT46
-O0BDQD08PDs8Ozw7Ozs7Pjk7OTs5Ojs8QDw5ODY4PUA6Nzg7Ozw9Pz9BRD89PTw5
-Ojo6Ozs8OT5BPTo6PT1BQj0+QT5APj8+QEA9Pz48Pj5BPUE9Pj1BQDtCQT9APzs8
-PTw6PUFBPz5APj88Ozs6PD08Pj5EQD4+PkA+PT9BQ0VBPT1AQ0REPz9AQT07PEE+
-Pj1APUBAQUI9Pz5AQT5AQkJOnMLO1dvg5OXn6erqR0VFQkJDREZHRkM9PT9CQkE7
-PDk7QEBDR0BCRUI9Pz08PUU9Oz09Ojw9Ozo5Oz5BOTg7Nzk4Njk4Ojc5ODg8Oz46
-OTo5ODo7Ozg2Nzc2NTY0MzM0NDY3Njg3NDIzNTk4NTY3NTc4Nzc1NTQ0NjQ3ODQ2
-MzMyLzUzMzQyNjcxNjY0NDU4MjM0NDQyMzIyMTE2OTM2NTM0MjczNDQxMzQ2ODQ5
-OjY2MDMzMzIzMjU1MzU0OT41NTQ0NTI0MzUyNTQzMzEyOzc6NTAzNTMwMzMyMzU0
-MjIyMDQ0MzEuMjIzMzIzNzg4ODY1NTM2ODU2NDQ1NDU0NDYzMzQxMTIyMzQ0NDIw
-NjQxLzIxMzIxMjM0MTAwMTQ1MzMyMzU1MzMyNjQ3NTUzNTYzNzMzNTM1NDM0NDQ1
-MjEyMjY0Nzc1NTUzNDIxNDU5NzczNDEwLjMzNjUyMzQyMTExMTM1ODc2MzM1NzMx
-MjQ1NDQzNTU0NDMxNTQzNDY1MjE0NjYzMy8xNTYzNzc1MTMyMzY2NTMxMDU1NDIx
-MzQ1MzQ0NTIzMzI0MjEyMjMxMzQ1MTM0MjIzNTU6ODQwMzU0NTYzMjAzNDE1NTc1
-ODQ1NTQ1NDM0MzUyMzMzNDU0NDU3NDY2MTM0Njg4NTYzMjQ4NDY0NTQ1Ojs9Ojs8
-OTo3Ojg6PDo6PDs3Ojc5Ozs+PTs7O0BDPT87PD06ODs5OTk4Njc3NTU3NTI2NjY2
-MzU3OTg5Ojg1NTU2NzU3OTs6ODc6OTs4Njg7SVpeVEpCQD48Ozw9PTo4ODg6QkFC
-Ozk2Nzc3OTk0NjU2Nzc6MzEyMjU0NDQ1NjY1NjQ4NzUxMTE0NzQzNzY2NjU0NTY1
-Nzo3ODg7PTs8Oz0+P0E+Oj1CQEJDRENCQERFQ0NDR0hJSU1QUE9RT1FRUVBWWldZ
-X11fXmBjYmFiYWRkZWJjZGNkYmBgXFxbWVdXUk5KRj86Ojg2NjQzNTU2ODk5Nzc4
-Njk4PDs6OjtEQT07Oz1APz0+PUBCQEFASEJDREVDQ0RFSklKSkpMS0tQT09QUFJV
-VVVXV1ZaWltfYF5dXl9eX2FlZWRmZmdnZmdpbWpqaGdpampoaWpoZ2hkZ21nZmVm
-Z25pZmZkY2FZSkZGRUM7OT5DS1JOTUQ8NTY2NjU2NjU4NzU1NDUzNTU3ODc0NDc4
-Njg9Ojo4NzYzNTU4OTY3NjQ3NTc2Nzg4Njg0NTg4NzUzMzMyNDU1NTc7Ojs/QkI9
-Ozg2NzY0MzU3Njc2NDUxNT1HTEY9OztAQUQ+ODk/SlNWTURFTGN8hoZ6cXeChHBf
-anNqWWJnaWRxeXt7cmxtZm1obGhSQkQ9PDw9PTw8PTk7Ozk5Ozw/PTw8PTs9Ojk7
-Ozw8QDw6PDs6Ozo9PT89OTo9PTo6Ojs8Oz07PUQ/PD08Pz06PDw+PTw9P0FBPDs8
-Pjw5PTw+Ozs9QD08Pjw8Oz1ARj88PDs7Ozs9PDs7PT89Ojo9Oz9BPjo7Pj5BPz09
-QD4+P0BCRD0/QkA+QEJCP0NEQ0I9QD9AQD4/Pzw/Pz87Pj9DREVFQk6Zwc7V2+Dj
-5efo6epBQkdAPUVCR0lJQzs5Ozs+PD1APjtEOjxAPUA9PEE6QD0/Pj48Ozo8Ozo8
-Ozs7ODg7Ojg6Ojg2Nzc2ODc6NzM1Njk2OjgzNjQ3ODQ0MzUzNDQ4OTY3NjU0LS8z
-MzY4NjY3NzU1NTU0MjM2NzU1NTk3ODQ1NDU1MzMzMDc3NTU0ODc1NTUzMzUzODs3
-NzczMjMyNTU1NDU0NDIxNTQxMjM1NjQ3NzY0NTIyMjMzMzQzMTYxMzUyMjIzNDU1
-NTQzMzQyMzQzNjQxNDI0MTAwMTEzMjI2MjEzMjMzNTQzMzI2NTk3OjY1Njg2NzY1
-NzMyMzQyNDQ1NDQzMDMyMTIzMzIxNDc1NTMyMDQxMTExMTEyMjIyMzM0Nzk4NDs6
-Nzc1MTM3ODUzMjM2MzE1NTc2NzQ1NDU2NDE0NDMyMzQyNDQ0NzczNjY3OjQ0NTM1
-NjYzNjU1NjMzNTMyMjM0NDc4NDQ0MjU1MjQyNTU0NDM1MjY1NDQ1MzIzNDI1NDU0
-NDMyNTM3NjQ4NTQ3OTU1Mzc3NDUzMzY1NTMwMzM2NTY0Njc3Mzc0MjIyNDIyMzMy
-MjQ1Nz03NjQxMjM0MzMzMTMzNDU2NjU1NjQzNDM0NDMzNDMyMzU4NTQzNzQ0NTQ3
-NTQ0NzcyMTIwLzE0NDg3ODY3ODs9PDo5OTg7ODc5Ozk6OjtAOz08Ozo5Ojo9Pz09
-PUA9OTg3Nzg5ODc4ODc5NzU2NDExNjY1NTU2OTo4OjY0NTU1NDU1NTU3NTg1Nzg3
-Oz9DTlNLQkA6Ojo5OTk4ODk5PEJAPzs5ODU5OjUzNDY3NjYzNjw8OzQ2NjQ0MzU0
-NTIxLzY0MzU1MzU1NDUzNDQ0NDg5NjYzMjU3Ojo7Ojw+QUBAPTs7PD4/QkFCQUFA
-QkNEQ0ZGR0hKTVBSUU9QUFFPVFhYV1lbXl9eYGNhYGNiYGJkZGVkYmZjYFtdW15b
-VVBNTEhFQzw7Ojc4NzY4Njg2OTg2Ozw4PDg4OTc7Ojo6Ojs8PT09QD09QUJDQkRB
-RENGRENERURFR0VGSkxOTE1NUE9SV1dWWFVXXFpZWVxeW1xeX2BgX2NkZmdmaWhm
-Z21raGtqbGtrZWhnY2dpZmpoZmttZ2VncWlpa2hgXllRSERBPzo6PUdNTEpHPjk0
-NTY2NTU4NTQyNjg2NzQ2NTQ1NDY5Nzc4Njc3ODc2NzY1OjY2Njg6NzU1NTM0NTY0
-NTU1NDUxNDU1NTUzNTI0NTc/REA7ODs8Ojc4OTczODc1MjU0ODo0N0FHRUE7OjxC
-R0Y/PD9CSlNXT0hNX3CFiIJ0aG55cFxZbnNjZXF0c3p8d3NpaXJwcWpnYVREPT89
-Ojw+Ozs7QT07PEA7OTw+P0A7Ojo9Oz09OjxAPDs9Ozw9Pj5AOzw/Ozg8QEA9PT46
-ODs+PUA7Oj89PDw5Oj09PD0+Oz1AOzo6Oz0+QDs9Ozs6P0A+Pjw6PEFAPD1APD5A
-PT1AQENAPUI/PDs8QUFAPD8/P0JDQUA/REE9QT5APT5BQUNBQUJAP0FCQkE+Ozw9
-QD89PT8+QkFCQ0VCREI/VqHBzdTb4OPl5+jp6kRCQkVBQj9BREpAOzo4OTs7Oj5B
-QT07ODo9OzhBPTw8Pj47OTo7PT08OTg7OTo3OTs6Njg2NTg6ODY3ODY5OTc4NjU2
-NzU1NDM2NTQzNjQ1NDc3NzY4NjM7MjU3ODYzMzc1ODc3NTQ0NDM1NjI0NzUzMjQy
-MTIzNDIyLzI1NjQ0NjU2MjEzMTU0Njg3NjU2NDY0MzUzMjIzMzIzNDUzMjQ2NzU2
-Nzk0NDQzMzM1Nzc1MzQyNTQyNDY2MjQxNTQyNDc1MjI0MzAyNTM1MzUzMjAyLzQ0
-NDMxNDIyMzUyMjIyMTI1Nzg2Nzc0Nzc1MjQ2NTQ1Mzg0MjExMTM2MzQ5ODQzMzU1
-MzAzMzQzMjU1NzcyMzQzMzMzNTY1MzU1Nzg2NTczNDQ0MzIyNDU1N0I4NTQ3NzUz
-MzIwMzMzNjMyMjQ2Njc2NTIwMjc3Mzw0NjIyMzMxMjIzMjM1Njg2MzIyMzU2NzQ1
-NDIyMDEzNTMyNDU0MzQ1NDQ0MzA0NTIzMTIwNDUzNjU0MzQzNzQ0NDQyMTM2NDU1
-MzMzNTY2NjUzNTo4NjQ2NzUzNDMxMDQ1NDY0MjM1MjMzNDQ1NTczNTY2Nzc0NjYz
-MzY1OTQ0MTAyMzc1NzU1NzY3NjUyNDY4NzY1Mzg3NDQ5NzExNzo5OTc6PDs7Ozc3
-Ojc5ODY4Ozs5Ojk5Ozs+QD4+PDk8PT89PTs4NjY3Ozg5NTQ4NTQ2NTk0MjU0Mzc2
-Njc2ODo4ODg3NjQ4NTIxNDY3NzQ2ODo6Ojo6PDw6OTg4Ozg1Nzc6RUxLRz04ODY2
-OTc2NjU1NjU2Nzk3NzY3OTczMjQ0MzQ0NjQ4NDUzNDY1MzM2Nzc1NTM2NzU0MzM1
-NTU5Oz0/QD5APjo5Ojw7Pz8/Q0NAP0FCQkNDREVGR0lHSVFPTU9OTVBPUldZWVla
-WV1aXmFgX2BgYGBhZGZmY2FeXVxZWldQT05NR0NAPD05NjY1ODg3Nzk2ODg6ODk5
-OTc4OTs6Ozo5OT4/PTs7Oz4/QUA/RUJBQUFDRkhFSERESEtLSU1QUVBSUlJTVVVZ
-WFlYWFtaWFpaWl5eYF9iZWZqZWhpZmZlaGhna2xoZmZmZ2hlZGRmZWJlZmZnZmdp
-aGhoZmNgW1NNSEI+ODY+S1BNSUI7ODk0NTQ1NzU2ODk6Ojc4NzM2NDQ4Nzs4OTc4
-ODg2NjYzMzY1NDM0Njc2NTQyNDYzNDQ1MzY2MzU2OjY0NDI1Njc2NzpAPjs2Njo7
-ODY0Njo5NTY2ODc6OTk5OD1DQUA5Oj9GSUU+Oz5IUltgXFdcaHZ8e29bVWRxaFli
-d3dyeHl0d311aGFgb3RwY1VTSUJBPT0+PT1APTs8Ojs9PTs+PT06ODo9OTk5Ojo5
-Ozo5Nzc5PD5AOzw9Ozw/PTk7Ozs8PD88NzpAPDk9PT0/Njs8Ojo7PTw8Pz48PDw+
-PTo7PDs8OzxAPj47Oj07P0BBPz0/PD4+QURDQ0JBPTs7Oj06Pz5DQD08PEFAPEBA
-PkE+Pj9CQDxBQkA/Pz4/QD4+QT09PTs7O0E/PUJDR0FDQEJEQkVWn7/M1dvg4+Tn
-6OnqQERDQkVGQEJCQUNCQD4/QD08PkFAPT48Ozw6PTc7OTs7PDs7Ozo8Ojw3NTc0
-NDc4OkA+Ozg3NjY5Ozk4OTY2NDY4NzM1OTU2PTg5Nzc2NjU2NjE1NDM0Nzo3NTQ1
-NDczNDg3Njc4NTU2Njc1OTMzNTIxNDM4MjM2NjY2NTMzNDY1NDQxMjIxMDM0ODk4
-Nzg0MjIyMTMzMTE1NzY2NTQ0NTY1ODU2NTQ1MjIzNDM2NDMzMTQzMTI1NDQ0MzE1
-ODUzMDUzMTYzNDM1NzU1MzEwMDE1MDExMTQxMTMzNDEyMDE0NDM2NzczMzc3NzI0
-Mzc0NTMyOTQ0MjEyMjY1MjY4NDY4NDUzNjQzMzU1NDQ4NTU1NDQ2NDMvMjI1Njcy
-MzQ0NDY1MzU2NTY2NDQ0NTU0NjUzNDM3NTM0NTUzMzE0MTM0NTczNDM1NTU2NDQ8
-MzM0NDczMjMzNjQ2NDQzNDU2NTUzMzU0NjQ2NDIyNDU1NTU0MzMyNTMyMDM0NDAz
-MzExMjQ0MjM0MjQ0NDc6OTc0NjM3NTU0ODMyMzc3NzQ0NTY2MzY3ODY2NTQ2NTY2
-MzIyMDQ2NDIzLzAyNjw3NjY4NTQ2NjM1NTU0NTU3NTEzNzc1NDY3NjQ2NDc4NjU3
-NzU4Njc4Nzg2NDk2OTg2NTg4ODo5ODY4NztCPTo8Pj0+Pz48Pj0/Pjo6P0E+Pjw8
-PTo4OkBDODs9Pjg5Nzk0Mzg4NTM0MjQ3NTg7ODY5ODY4OjQ0NDU2NDc3NTU3Ozw5
-OTY6NDY2ODc2NTU2OT5DR0U9OTk4Nzc3NTY3OTU0NDU3OTQyNDU4NjMyMTQ0NDY5
-NDU0NTYzNTU4NjY1ODc2Nzc2MzI0NTY1Njg6Pz48Ozk7QDxAOz09PTo8PDs9Pj5B
-RENCQkRFRUhJS0tPT05QUE9PVFhXXFpaW1teXF9fZGNhY2RjYmNiX2BdWFZUUlJN
-SkxGQEA9OjMxMzQzNDY4Nzg4Nzc5ODc5OTs6PDk6OTk6OTs7Oz09QT0/QEBCQ0NB
-QUJDQ0ZIRkdJSUlLS01PUVFRUVBUVFJXWFhZWVtbWlpbW15fYGJjY2NkZWdkZGdl
-Y2RnaGdoZ2VnZ2RkY2ViY2VmZmZmZWZnZ2RlZGNbVE1KRDk2Nj1GSUU/OTQ0NTY3
-MzI1Njg4PTg3Nzc5MjAzNjk6OTk6Njc6NTU5Njc5ODg3NDQ0NDM1NTYyMzY3NzU0
-MjQzMzY2Nzc1NzU8NjU0NTk3ODg5PDw6OzY3ODc3NDY3ODg/OjY4OkBBPjs8Oz5F
-Q0A7OEBRYWxnYFtYWmZtZ1pOVGNrYF12g3Z0dW1vdW5mYmVzdWxaS0hEQT5AREM/
-PT48PTs8Ozs5Ozw7PDw6OEA9PD47Ojo7Ozw8Oj0+PTs8Ozw8QTs8PDo7P0BBP0Q+
-Oz09PTs9Pz0+PTw9PD88Oz9CQEI/PT5APz08Ojo8Ozw8OjxCPz06PTw7ODs/Pz89
-QkA+Pjw6PTs6Pj48Ozw8Pj48Qj9APkE9QD8+QUJARD5AQEBAPz48PUE/QUE+Ozw8
-P0A7PUE/P0FDREJAQFigvsvV29/j5efn6upEQ0RDQEBAPEE/QUFBPTw9RD0+PTw9
-ODo7PDo6OTw7Nzs+Ozs8Pjs5NTg6OTc1NjU0PDo5Njg3OTY4Njo3OTo4OTk5Njk5
-OjY3NjU0NjQ3NDc1MTIxMzQ3Nzg5NjI0MjA0NjY1MzU4ODo0NDc2NjQxNTM0MjIz
-MzM3NTY1NTQwMTQ3MzMxNTMuNDU0MzQ2NDQ0Mi0vMTQ0MzM0NjU0MjMzNjUzMzM2
-NDMzMjUyODQxMC8yMTM0NTMzMjMyMTQ0NDM1MTQzMjM3NDMwMzQzMjQxMTAyMjMy
-NDIwLTIwMzIzNTQ0MzU1NjY1NDM1NTY3NjQyNTU0NDg3NzU1NDM0MjQ0NTMyMzc1
-MjEzNDM1MzU0NDQ0NTQzMjQ0NDQ0MzQzNTQ1NDY2NTU0MjMxMTA0MzI0NTU4NzU0
-NTIyMzMzMjUxMzc5NTU2NzU0NTQ2ODQ2NjU2Nzc0NTM1NTQzMzM1NDQ1NTY2MjE0
-NDU0NTY0MTUzMjAzMjUyMzQ3NzM2NTQ0MzI0MjI0NTQ1MTM0MzU2NDY0NTY2NjY2
-ODY1NTg1Njc5ODY3NTU1Njc2Nzc3NjYzMzIwMTM0MjU0NDQ0NDU0Njc2NjY1NzY1
-Mzg3NDY2NTU6ODQ0NTQ3Nzk5NzU6Ojw6ODc1NjY5NTY2NzU0NTs5NzU3ODk2ODo4
-Nzw/Pjo8PT08PTw7PD46Pjw6QD47Pjo5Ojs5RU02Oz47OUI+Nzc4NDM0NDIyMjM1
-NzY2NDQ4ODU+NDU3Njc5Njc3Nzc4Ozw6ODU1NTU2NzU3Nzg5OTs5OTk6Nzg3Ojo6
-Ojk3NzQ0Mzo2OjY2NTg4NjM1NDY4Njc1NTM0MjQyMzQzNDQ2Nzg4NTY4OTc1NDM1
-OTg5Ozc4Oj06QD09PDk6Ozw6PDk8QEBBQUBCQkRFSElMSklMTU9RUlFTV1xYWFpc
-XFpdX19gYWFgYmJhX11fWltYVVRVUEtIQ0RDPjk6ODUyMjMzNDc2Nzc2Nzc5Ozo6
-OTc4ODk7Oj08Ojw7OTs9Pzw8QUVDRENEQUNGRkdGR0hISEhMTU5TUlFQUVJUUlJX
-XFtbXV5eW1paWltdYmNkZmhnZmJkZ2NkZGVoZmZoZ2ZnY2JhY2VpZmZnaGdpaGZn
-ZWRkX1tVT0k+NzU6Q0NDQDg6NTY2ODY3NDQ4NDY5ODg2OEE2NDY1OTg2NTQ0Njc3
-Njc4NzY4OTc4NjM0NjY1MjQyMjY3NjU0MTI0NDI2NTY5NzIxNDUzMzQ3Ojo+Pj03
-NTc1NzU3NTY1NDU6Ozc3O0NFQzk8PD8+PTo2PEhdaG5rV0xJSlJQSkVETVRTTlhk
-YFxgXlpkZGJna3VwZlxSRkJAQD4/PDk6Ojs8Ozo7Ozo5OT08PTxCPDo6Oz05Ozw5
-OTk4OzxBPj06Ojo6PDs7OTc6PUA/QEI+PUE/Pzw8QkE8PTo7Ojg8Pj8/QD5BPUBC
-Qzw+PDw9PT07O0A9PTs7PDw+QD07Oz47Ozk6PD1AREBDPDw+PTxAPDw9Oz1BQUA/
-QkE+QEFDQkE9PDxBPUBCPjxBQUJAQkM8PT87PTxCQ0RCQz49WKS/zNTb4OLl6Ojr
-6T5AQUE/QEM/QkVBQT4+QkBEPzw+SD07PDk7Ojs6PDw6OTs8PT05Ojk5OTo8OTs4
-NTY5Ojk4Ojg3PDs2Njg5Nzg4Ojs5Ojk6OTc1OjU1NjY3NDs2NDM0NDI3NTU4NzY0
-MjEyNTQyMjY1ODUxNjczNDIzNDU1NDQ3NDM0NTY0MzU0NDc3NzY3OD82NTczNTM1
-Ojo1MTI0NTU0NDMwMzU2NjQ0NjU1NDMzMzc0NDIzMzUzMjIzMTAxMjE2MzIyNTg0
-MjoxNDRAODgzMi8xMDMzNzQxMTc0MjEyMDQ2LzAyMzQ2MzM1NTU2NzY0NTMzMzIw
-MTk1MjM1NDQ0MjU1NTIyMjI0NDYyNTo1MTI0NzIzMzM0NTMwMi8zNDMzMzUzMzU1
-NjM0MzY0MDA0NDM2NDE0NTMzMi80MjM0NTE0MTA1MzUyMjU0MzMzNTMzNDM1MzU4
-NDIzMzMyNTIzNDQ0NjQxMzc1NjUxNDI3NTE0MzEyMjMyNDQ0MzQ1NjY0MjM2NzUz
-NjU0Nzo4NDU2NzYzNDU3NjY0NDY0Njc2NzU1MjIzNzM0NTY1MjQ1NzY3NjQ2NDQz
-MjM0NTQ1MzQ0NTUzNTY5NTI0NjY0MjMyMjQ1Nzc3NjUzODM3NzY2ODk1MjY3ODs7
-OTg5Njg3Mzg2NTk3NDg4Oz03Njk5Nzk3OT0+PkA8PTs8Ozw5PD05OkBBPzs5OTdA
-Rjw9RkE+PDs4OTo8NjQ0NTQzNDU1NjUzNDMzNTU1Njo0Njg2NTc0NzY2NDQ1ODc3
-NjY5OTg4OTc3Njo5ODs4OTg2Nzk3ODs6Ojo7OjY1ODk3NDI0NDIyNTMyNDU1Njk4
-NzY3NDc0NTcyMjQ1NzQ1NTg3NTQ3NTI0Ojo6PDo7Ozw5Nzc7PD47Ojk6PT9AQj8+
-QENBQUFDREVHSEZISUtKTVFQVVhYWVpVVVheXFxgYWBeX2FgYF1eXFlYVFFOTklI
-QD9APzw5ODc1MjQ2NTY4OTc2NjU2ODk9PDk4ODY2OTk3Ozk6Ozo+PkBBRkM/QkRF
-REZHRkVFRERJTUxLTk5PT1FTVlRUVVVXWFpaXF5fWl5cXl1fZGNmZ2VnaGJeYmNk
-ZGdmaGdoaGVpaGdpZWdmZ2poZmVnY2NiY2JiXlZORT02NTxDRj44NTg4Ozo2NTY4
-Njo4NjMyMjc3Pzw3NTQzMTAyNDY2Njc1NTQ6ODg1Njc5ODQ3OTY3NzY0Mzc4ODk3
-OTg3OTU2NDU0NDY0MTIyMjk2OTg4NzY2MTU1MzE0NjY2Oj48ODY7P0lLRj07PDs7
-PT07RVJeY1pMRT9APzo9PD49Pj1AREVEQ0lLSk1RW2hrbGhlY1xMR0I/PUA7ODo8
-PDo5ODk5Ojo9PUFAPD4+PT1AODc7OTk6PD07QTw/PTs6PD0+PDk8PT09PDw7Pjw6
-PT89PD9AQT49Pzk7PDw9Pj47PDw8OzxBPj1BPkI9PEE/Pzs8QEE7Oz88Oz09Ozo9
-PUA8Oj1BQD47QEBAQj49PDw8QD5CQkFBQUFBQT5BQ0E8PEA+QkFAPj09Oz9AQT87
-Q0BDRERBR0VDQEBbp7/J09rg4+Tn6OrqQ0VBQkI9QUNARUVFRDw7Pj0+Ojw4Oz9B
-Qz09OTs/PUA8PD9CPEA8PDo+PD84ODg7Ojg3OTs4OD47ODg2Nzc3PDU3OTo8Ozk4
-OTo2ODc5ODg6ODUzNDQ1NjU1Njg0NDo2Nzc2NjIyMjUzNjg0MzMzNTg2NTU1NDQ0
-MTMzMzMzNTU6NzY3NTQ0NzYzMjI1NDY0MjU4NTY1NTU1MjY2NTQ1NDYzMzYyNDU1
-NzQyMDAxNTc3MzEyMjIzMDMzMzA0PTU2ODMyNjc2NTUzMzExMjEzNDIyMjIzNDQ1
-MzQvMjE1MzMxNTUzMzIxNDQyNDU0NTczNDI3Nzk2OTg4NTUxMjQzNTUzNT4zNzU2
-NTU0NjU1NzYzMTIyNTY2MzAxMzczNTIzMjIzNDQzMzU2ODMyMjE2NTY5ODU0MzI1
-NTI4NTIzNDY0MjM0NzU2NzMyNjE0MzQzMzIzNDIyNDc1NDU3NDY1NDc1MjE1NjIy
-Mi8yMi8wMzo1NDMxMjU3MjAzNTY0ODc2MjY3NjQ1Njg1NzYzNzUzMzg4OTY2NjQ0
-NTY2NjU1MzIyNzs7OzYzNTY2NDMzMTM2NjU1MzU4Nzg1NDMzMzQxMjAxMTUzMTEy
-MzY3NjQ4Nzk3PDg2NzY5Ojg4NDk5ODs7Ozk2Nzk9OjY0Nzk3Nzo6Ojo5Ojo9QkA5
-Ozw6PDo+Pjs/Pjs6Ozk6Oz4+PTk9PDtCPz03PDs7Ojg5ODg3MzQyNDQxMjIzMjg1
-MjY3Mzc5Ojw5Njc5NzU3Nzc4ODg5Ozw3OTk2Nzg6ODs8ODg5Ozs5OT46Oz0+PDw6
-Ojk7Ozk3NTU2ODc2MzU3NDIwNDU2ODYzNDc0MzE3NzUzNTM2NDU1MzU2NjUzMzM0
-Nzo6Ojo8PTo7ODk6Ojo6PDtBQD5ERUVERENBREJER0hJRklLT1BMTlFRUlRWWVha
-VlleXF9hX19fXl5fYl1XWFJTU1NOSEZDPjw6OTk2NDQ0NzY3NDEyMzY2OTY3NTo5
-NjQ3OTs7Nzk6PkA9P0A9P0A/Q0NCQkFCREZGREVISUhKSkxJTlFSVFFTU1NUVVdW
-V1ZXWFtbX19eXV9gZGZ0aWVnZWdmaGVlZGVpZWNlZWdqa2tpaGdkY2VoaGZmZ2Jk
-Y2JeVktBQDk3PEVEPTk4Nzc4Njs5NTY5OTY2Nzg3NzU2NTc4Ojk2NTQ0NTk5Njc4
-OjY4NTY4Njc0OTg1NzU3NzUzMjI1MzU2NzU2NzU4MzQ1NzQ0NTQ0MzQ2NTU2NjY0
-MjY3Njc7NTc5PD8/Ozo8Q0tIQTo3PEBFRD47Q0hIRT46PT89Pzk8Pzo4ODk8PT1A
-QD4/QENKVWBmZmZrX1FIQDw8Oz0+PDg5PT48PUA9PEE9P0E6PD48Ozg7Ozs+QT48
-Pz44PDo8Ojk8Oz86Oz5BOzs/Qz4+Pj06P0E7Ozw9Pjs9ODw8PDs6Ojk9Ozw8OT4+
-Ozs/P0FDQUE8PT1DPjs5PD07Ozw7Oj8+PkA9PDtBP0BAPj4+P0JEQ0FAQT48Pj5B
-QUA/QEA/Pzw9O0BCQ0ZCPj07PDtBQz0+Q0NISEFDRUVBQGKnwcLP2+Dj5ejo6upD
-Q0VCPjs9QEJGQz89Pjw9PDs+PDg+Pj4/QDw5Ozw5Ojs7PDw8PTs4OTk3ODg3PD49
-Ozk6ODY1Njc2ODs2ODg4Nzc8QD06OTo4Nzc5ODY3NzY4OTYzMjY1NTo3NjU1ODY1
-ODIzNTQyMTEyMjQzNjY1Njc5NTQ1NDg2MzMzMjI2NTQzMzQ2NTU0Mzc0MzIyNDU3
-MzQ2MzI0NTEzMjM2MzQ6OjU2MTM0NjExMzM1MTA0MzUzMjEyMTU0MjE4PDQ1NjM2
-NDMzMzQ0ODc0LjE0NDY0NDI1NTIzMzY1MzY3NjU4NTUxMjQ0MzM2MjEyMzQ1NDUz
-MzEwNDU4NjQzMTQyMjQ0NTE0MzIxNTc1NjUzMzQ2NTQ2NzM2OTQ1NTI1NDQzNDI1
-NDQ0MzU0NTQzNDQyNDQzNDQ0NDYzNTU0MjUzMzM3ODY1MjQyNTU2MzE0NzY0NTM0
-NDM0NDQ2NjQzMzY2MzI1NTIyNDI0NjExMzU1NjUxMjUzMTAwMTExMzE1MjQ0Mzc0
-NzY0MzQyNTQ2NTYzMzc1NDUyMzQ1NDY0Mzg3NzY3NjQ0NkE0NDQyMjY2MzQzNTQ1
-NDQ3Nzc3MzI1ODc2NTMzNTk3NDIyMzY3NTU1Nzk2NTY4OTg/OTc3Ozo2Nzs4Nzg4
-OTc3ODg4OTY3Ojk5OTk6OTs5Pz1BQD89PDo5Oj07OTo7Ozo6Pzs4OTw8PD09Pjw8
-Pj1BOzk4Ozs6ODc2NDQxMzM0MzU3NzUzMzQ2Ojw/PDw3ODk5Ozg7OTo6Oz47QD03
-ODo4Ojw6Ozw8Ozw7PTs7PT47PT09PEBAPTk5Ojg4NjY3NjY2NzgzMzg1NDQ2NDI1
-NjY0ODQ1MjI0NTM2NDUzNDY2NTk5NzU2NDQ4PTo5PDs9OTw5OTo7Pz1AQD5DREJC
-Q0RCRkdLREdIR0lKVFRSVFJTUlVXV1hcXFlaW19hXl5bWltaWVhXVlFQTE5GQj08
-PTc3NzY2ODY1NDg2Ozc1NDY3Nzo4NjY3ODQ6ODo5OTw6Qj08QT89PT9BQ0RDRENH
-RUVHRUdJSUZKSk5QTlJRU1FQU1NWVVZVVllZW1xaWVxaXGJkZWlmZWViZWVkZ2Vn
-ZmdmZWNmamlnaWhkY2ZkYmNjZGRkYmNiX15XT0Q9ODpAQTo4Njg3OjU2ODU2ODc3
-MzM1MzMyMzE0Nzk2Njc1OTYyMzc7ODo6ODU3Nzc1NTc3Ojg5Nzc1ODU1NjQ1NjY2
-NTY2NTY2Nzg3OTc3OTs5NjM0ODw7ODY3NjY1Nzc3Nzs6P0M+Ojo7QUFAOTg6RENA
-PDo6Pz48OTk7Ozk8PT07OT05OTg3Ozo6QD8+P0JETFJdaWxcST88PEA7PDs8OTs4
-Ojs8Ojo9Ozg5ODs4PT08Ojs6Oz9BPzs9Pjs7Ozo8Ojw+P0A9PDs6Ozw5PD08PTs8
-PDw+PUE8Ozs8Pjw7ODo6PDs6PD09Ojw7OT08Oj9APTs8Ozs+Pz84Ozw6OTg5PDw+
-Qz08QD1APD07PTo8P0FCQz9AQD89Pzw9Pz5AQUNCPDw/PD9CQUI+PkFAP0BBPD0+
-Q0VAPEBCQkBGaKjBzdbb4OPl5+jp60hFRj9CQD5BPTo+PkFAOzo7Oz9CQUJDOzs+
-Ojk4OTs4Nzk5Ozk7Oj46NzU2Ojo6PDo7PTk1Nzk4Nzg6OTo8OzY3ODg5Ozo7Nzg3
-OTo3OUQ6Njc5OzY4NTU2NzQ1NTY2MzMyMzQ8MzIyMzIzNDMzNDMzNTQzMjMzNTY2
-Njc1NDMyNTc2NDQ2MjMyNzQzMzI2Njc5NDU2MzY0MzMzMzQxNDc4OTkzNDQ2NDQz
-MTIyMTIzMy8yMjM2MzQ0MkJSNjM1MzY1NTQ0NTEwMTM1NTMzMzQyNTQ0NDMyMTE0
-NDQ0NTQ3NjY1NzUzMjU2ODY0NTM0MzQzMjIzMjU1NTEyMjQ3NTY3NTMyMjEyNTcw
-MjIxMTIxNjY1NjQ2NDQxMjM1MjQ0MzQ0NzU0NTY0NDExMjM0MzQzMzI1NDQ0Mzgz
-NjUwMjAvMTY1NTUzNTQ2NjI2MzIzMjIzMzMyMjQxMTI1MjQ1MzUxNTUyNTU2MTM0
-NDQ1NjIzMzc0MjQzNTIxMjMzNTQ0NTM0NDUyMDEvLTAyNTY0MjM0MjM0MjEzMzc0
-Njc0MjIyNDM6ODI0NTU0NTY5NjYzMjQ3NDQ1NTUyMTM0Nzo1NzU1NjM1Njg2NTU2
-MzM2Njw3NDIxNDc6Ojg7ODU1NzY1ODg5OTc5ODo6ODY0OTk5Ojo9PDo7PDxBREE+
-PDs6NzU2ODk5OTk9Ojc8PT88Oz4+Pz49PTw6Ojc5OTc6PDc1NTgzNDY2MzQ0NjEx
-NDY5OTk4OTs5OTs6Ozs+Pjs8Oz08OT06PTw7Ojw5OjtBPz9APUA/Pzw9PTw/PkA/
-PTo7PDo5NjQ1Nzc2NjY4NzU5NTUxMjIzNDc0MjI0NDQ0MjIyMzU0Njg9MzM0Njk6
-ODo6Ozk6PT88Ojk5PUI/Pj89QEJAQz9CQURFREhHSElKS09PUE9QUVJSVFVaWFdX
-WlhXXV1aWlhbWlhWVVNUU09KSklGQT06OTY4ODY2Njc2NTg4OTcyNTQ1Nzg3Nzo5
-Ozk4OTk5Ozw/QEA/Qj8+QUFAPj1DQ0ZFRUNFRkZHS0pKS01PT01RUlNVUVRYV1lZ
-WFdZWllaX2FfXmBiZWJkZmZjY2NnaGdnZ2ZlZmVkY2FiZGNjXl9jYWJkZWJiY2Ng
-XldOSDk2OD08Njg2NTY1NTQzNjY3MjQzNTQ1NzY0MzQ2ODI2NDQ4NDM0Njk2NjU0
-NTU0Njk4ODk5NjU2ODc0NTYzMzg2Nzc5ODQ0Nzg3OTpFODc3ODYzNTU3ODo0NDQ2
-OT07ODg5ODc9SElBOjk6QEE+OTg8PT88PTgzODk3ODU5OTk2Nzo8Ozk4Ozo8PD4+
-QkE8O0BCRFBjYk5CPTo7PkA8Ozs+Ozs4Ozo5Ojk9Ojk6Ojo6OTg5Ojs9PD46Ojo6
-Ozo5OTo6Ozo4QD88Pjs6ODg6PDw9PTo7Ozs4Ozs7Ojs5OTo6ODs6ODo8Ojw+OT0/
-PT09Ojs6OjxAPT1APjo7PT48PDs3Ojg8Oz09PDxAQEI/Pj4+PUFCQDw9PD08PDw9
-QD8+Q0I/OkBCPUBBP0JCPT07P0hBPEFGQ0FAQ0VFQ0pwqMHN1dzg4+Xo6OrpQkJC
-Qj49P0FCQj0+Ozs9PDk4PEA+Ojk6OzY6Njk0NTo3Ojc7NzU2ODk6ODo6PDs5ODo5
-NzY2Ojo3NjY4Ojo7ODc3Ojo2NjY4Ojg8PDo9QTo3Ojw2OTg4NTQ2NTQ1Njs3MzQ0
-MjI3NDMwMzQxNDY0NTQzMjMyNTg3NTc2NTc1MzMzMTQzMzU1NTc4NjY2NTQ3NDI1
-MjU1NDYyNDY2NTQ2NjQ2NDU3MjI0MTM1MjMyMzQ0NDAxNDMzOTY2RDo1MjQzNDEy
-MDQzMTM0MzQ1NDQxMTMzNTU1NDM1NDM0MzIzNjg3MzQ4NDAxMTY1MzQyNDMxNDQz
-MzEuMzEzNDExNTIzNTY1MzQ1NDY6NDQ0NTQ1NDQ0NTQyNTcyNTY1MzY0NDQyNDQ3
-NDM0NTIzNDQ6ODIwMzc1NDMzMzQ0MDMwMzUzMTE0MzU1MjU0MzM2Nj08NTU0NTI0
-MjAyMzc0MDIzNDE1NDQyMjY0NDIzNjY0NTY0MTU1NjU1NjIzMzQ0NDU2NTU1NDQy
-NTY2NTQ0MTEzMzQ1NzI0MjMyNDU0NTQ2NjM2NTQ2NTQ2NTUzNTI2MzQ1MjU2NDU0
-NDc0NDU4MjMzNzU2Nzc1NzU2NTU1NjU5ODo8Ojg5NTk3Ojw3ODk4Ozg1Njg4Ozw7
-Ozk4PD08Ojo4ODc2NTg5OTY7ODo8Ojg5Nzc4ODc5Ojs5PTw/QT48Ozk6OTk9P0E9
-PDs5Ojs4OTg5OTY1NjQzMzQ0NTQzNTEzODU3NDY3OTo5Ojs7Oz09Pz06Ozw9PT49
-PD0+Ozo5Ozs7Pz49QD47Pz09P0FDPjw/PUE8Ozo4Njc5OTc6Nzg2NTI1NjY2NDc1
-NTMyMDM0NTY1MzIyNDQ0NjY1NjY0OT07Oj47PTc1OD49Oz49QEA9Pz4+Pj5BQEND
-REJEREZFRklOT05MTVBNT1FSU1RWVldZV1RVVlhYXFhZWFVRUE1LS0dEQkJAPD47
-OTQyMzQ1NjU0Nzc0MTc2NTg4ODc1NDY5Ozk6Pjk8QDw9PkA/QENBQUBDQkRGS0VI
-R0VDRUZKSElLSklOTlFSUlRTUlFVVlRaV1hZW11dXlxeYGBfYGRkaWdjYmNkY2Jj
-Y2VlY2NkZWJiZWBWYGBhY2NgYmVkYl5cWU5BNjc6ODU1NDUzMjQ5OjgyMzEyNDUz
-NTY1OTo3NDU3NzY4OTY4ODY4NzQ3Nzc1NTc3NTExMjM1Njc2NTQ0NTU1NzU2NzQ1
-NDQ1NTg3NzY4ODY+NDc1NDY3NjU0NDc4ODc0Mzg4NkBLTUY7Njg6QUE7Ozk6Ozg2
-NTg4OTk4ODg5Njg3ODg6Ozs9Ozw9Pjw6Ojs8PT5CRk1TRz9APD0+Ojw5Nzc6PTs7
-Oz09OTs7Pjw8OTc5OTk5OkE8OTs8OjY5OT88Ojw3Ojs6PD88PTw7OTg5ODo+Ojg5
-OTw5Pjo5Nzo9PTs7PTo6OT1AQDo8Ojo5Ozk4Ojw/PkFBPj0+Q0E9QD07PDc7Ozs9
-PkE8OTxAPDxAQkJARD89OTs+PDw8PDw+Pjw9PT8+Pzw9Oz5AQjxBPz1FR0ZCQEA8
-P0BAQUNDRHirwczU2t/i5Obo6ulAQEVEQEFBQUM7PT0+Pjs6PTw9PDw8Oz86ODs4
-Oj09Nzg3Nzc6PTw5Nzg2Njc3PDs2Njc3Nzg4PDg3NzY3NTc5ODY3Ojg3OTc2ODw5
-Ozw8NzY1ODQ2Ojg4OjY2NDM0Njg5NDQ1NzY0NDMzMjM3NDM1NTQxMzI0NjU4NzQ2
-NTc0NTc3ODk5NTg3NzY1NzQ0MzQzMzMzMzMzODY0NzY5OTc3MzU3OTc1MzI1NDk1
-NDQ1Nz83NDU1NzdLODIzNTIyMjIzNTQyMjEzMjQ4MzIxMjMyMjEyMjI0MjQwMjIz
-NjY0NTUzMzMyNTM3NTU0MzM0MTI0NTQyMzMxMjQyMy8wMTIzNDM1MzMyMzIzNTQ1
-PDMzMjQzNjY0MzQ1ODUxMjU0MjExMzAyNDQ0MDI0Pjs1MTEzNDQyMjExMTMzMTM0
-NDI0MzQ3ODQ0NDQ3NDM1OkE0NTMxMTM0NjM2NjY3NzMyNTQ2NjY1NjYzNDMyNjk2
-NjQzNDczNjY2OTg2Nzc4NjQ4NTQ5MzM1OjY0NDU0NTMyNzo5NTQyNTIzODUzNjU2
-NDY1NTQ2NDE0NDMxMzM0NDc0NTY0MzM1NTI1NTQzMjg0MzM1NTc2NDI1NDc4NTY5
-OTc5ODk5Ojo5Ojg3Ojw6OTk5ODk8Ozs6Ojo9Ozs4NDs6Njo7OTg4ODg4Nzg3Nzk4
-Ojs5Oj07Nzk/PTs8QTs7Ojw7PDw8Ojk5NTU7OjY0Nzg2NTMzMjQ0NDQ1NzQzMDAw
-MTM3NjY1NTc3Nzc5OTg5Ojw9O0FBQDs8Ojo7Pjs7Ozo6PkA7Pzw6Oz4+PD09PTxA
-Qjs5ODo4NjY2NjQ1OTUyMzY0MzU2NDQ0NDY7MzAzNjc0ODUzMzQzNjI3ODs4OTs2
-ODo4Pjk8PTo/PTk8Pzs9P0JBQD8/QEBGRENFRUVFSUhRTUtOTUpNTk9RU1dSU1FS
-VFJTVFZVVFNXUk1MTElJR0RBREQ/Ozg2NDQ2MzM0MjU0NjU4NTU3ODY0NTk6Njo1
-NTg/Ozo6Ojw7PUA/QUFBQEJCREZERkRGR0VERkpJSklKSU1QT0xQUlJQUFNUVFdX
-VlhaWVxfX1xbXV5eX19gX2JgX19iYmJgXmBgYmRiXl5jYltbWl1dX19eXV9cW1lS
-S0E8OTk1Nzg4OTY2Nzc3NjY0MzIzNzo7NzQ1Njg1OTc6OTY2NjU1ODc2NTQ0NTc6
-ODQ0NDU0NDMzNDY5NjY3ODk3NzY4NjE0NzM6NTU1OTY4ODg4NTMzNTY3OTg4ODY1
-ODo5NTY4OUFKSUA4OTs9QTg3Nzk4Nzk1MzY1NjY3OT48Ozk3Nzk3Njc7PT08Ojw7
-Ojs8P0E/QkVDPzxBQD07Ozk9PDk6Ojk5Ojk9PTw5OTc4Nzc3OTo+OTs4Oz08PDc6
-Ojk7QD89Ojk6PDw7PDo7Ozw7PTw8Ozs5OTk5Ozk4Ozw8Ozw8PDs7PDs6PD46Ojc5
-Ojk5PD8+PTw7QEJCQj47Ozw7Ozs9Nzs3Ojo/Ojs+QDxAPkFBPkBCPTw/Pzw/QD4/
-Pz8+PD07PEBAPT9BPkE7QWNGQT1BP0A/QUNBREVGbqvAy9XZ3+Lk5+fq6T9BQT89
-QkY/Qz05PD1APUE/PTxARD07PD0+PTk9PDtAOjg8OTs6PTg3Nzg3Ojw6OTs5OTg5
-NjY6NzUzNzs4Ojo5Njg0NTU3NzU0NzY2Nzk7OTk2ODk6OTo4ODk3NDc0NTM1OTg3
-NDw5OjQyMzg2NjQ1NTQ0NTM4NTU2NTU0ODc1Njc3NzY1NTc0NTk1Nzc0NDU2NTQy
-NjY3OTY2NjU2MzQ3NjU3ODU4NjQ0NTY2NTM5Qjg6NTY1OkA1ODY3NTIyNjU2NTUz
-NTQ1NDQyNC4xMzI0NTM0NDIyNTMyMzMzMDQ1MzIzMjI0MjE0NDMxMjI0NDEvLzEy
-MTIyMjM0LzE0MzYzNDQ0NTM1NDQzMzM9NDQzMzI0NTQyMTMxMDMyMjM0MzUyNDU0
-MzQxMjEyMjE0NzIwMjA0MzUyMjE1NTQ0NTY1NDQ2NTY3NDQ2NzI0MzM1NjQyMjIw
-MjQ1NDMyNC8xNDUzMzg0NTUzMzUzNDY1MzY0NzUzNDU2NTQ3NTg3MDQyNTUwNDY0
-NjY1NTU3NDU2NzYwODc2NTg2NjQyMzU0Mzc2NTQxMTMyNDY1MjExMzU0MzIzNDI0
-Njc2MjIzMTI1MzI0MzM0NDU4Ojk6NzY0NTc6Nzo4NzU1NzY4Ojk7Ojs6Ozs6PDs7
-OTk3Nzg/OTk5Ozg4Ojg3Njg4Njc4Nzk8PDs6ODc4Ojs6Nz09Ojs7PD88Ozs1ODs2
-NzU1Njg0NDg3NzQ0MjQyNDMzNjY2MjI0NjM0NDU1NjU3ODc5ODU3OTo9PkFCPz48
-Ojo5OTo5OT09PT06Ojw/Ozs7Pzs9PT89PDs5ODg3NDQ0NzUyMTM0Mzg2NzYzNjY1
-Njk2MzM0NjQ0ODU0NDIzODY4NjU0OTo5Nzg7Pzw5PD05PDs7PDs+Pj89QEBBQkRD
-QkJERkVERkpISUtKTE1KTE9QVFZVWlpYVVVSUlFQT09PS0xJRkVISEhIQT86OTY0
-NDI0MjEyNjY2Njc4NjU3NzU3NzY5ODs5Ozo4Ojs9Ozw9Pz48P0BCQUFCRERERUND
-Q0NHRkdLSk1PUFBRV1BSUVJQU1ZWVFZXV1dXWFxcWlpcX1xcXVxdWlldYGBgYF1c
-W1haW15bVlxhXllaWFlaWlxbV1dUVFJKQz85ODo3NjU4OTo4Njc6NzY5Nzc4ODU4
-NzU1ODo5NjM2Ojg0ODY2NDU1NjY2NTU1NTQ1Nzc2NTQyNzY1NjY0NDU3ODUyMzIz
-NDY1NTU1MzM1OjY2MzQ9NzQ2NDY5Ojk5OTY2ODo4OkJHRT43ODw7PDw2NjU5OTc5
-NTM2ODc5ODs7OTk5Njk4Ozs7Nzc4ODw7ODs9PD49PTw6PTY5OTg4PTo7Ojw7Ozw6
-ODo+Ozk3Nzo3NjY5Ojk6OTs7OTw7PD48Ojg4Oz09PD08PTo6Ojo7PDs9PDs6PDo6
-PDY4PD08PD07Ojs6Ozw4OTo6PTs7PT07P0FBPTs7PUM+PkBBPTs8PT9BOj0+Ozo6
-Ojo5Ojo9PDw9PTxEQT8+PDw+PUFAQUFBREFCPDo/PT5CPjo8QD49Pzw7PT88QEBB
-QEFBQEdxrsDL09ne4uTm5ujqRUhFPkBBPTw7PkE8Pz5APz5EQT4/QDw4ODs9Ozo4
-Njk5ODc3Ojk1Nzk4NDk3OTc3OTY9Njc5Nzk6ODc4NDY2Nzg5NzY0NzY2NzU0NjY3
-ODw8OTc2ODg2NTY2NjU1NDE0NjU2NDY0Nzc2Njk7NTQzMzI2ODczMjIxMjQ1NjM0
-NTM0NDY3OjU1NTg5NTQ0Njg6OTo6OjY5Nzc5NjU0NTc2ODc3NjY3Ojg5OTk6Ojg3
-NzY4Ozs3Nzk4NzY3ODc4ODk5Nzg+OTc6Nzk6Nzc2NTg0MzIyNjQ0MjQ4MzY1NzUz
-NDM0MzIwMzIwMjAyMTc3NTUzMzIwNDE2MzEvMDAyMzQ2OTU0NDM0MzA3NjMzMTE0
-NDYyMTExMzIzMzA0NjgyMDEyMjQ1MjMyMzM0NDEwMjU3NDUzNjMzMzM1MjM1NDc2
-MjU0Njc3NTIzNDg4MzY2NjQ0NDIvMDAxMjEzMTA1Njc2ODw5Qjk0NDMxMzY1MzQ2
-Njc2NjY1MzMyMzIzMzQ2NTY2NzQ2Nzg8NjQ1NzY1NDc3OTk3NTU3NzQ2NjU1MzMu
-NDMzMzIzNDQyNzY0NDY1NTYyMjU1NjY0NDU1NDQzMzM0NjQxMTU1MjY4NzY4OTY4
-Njc5Ozw4ODg5ODg4ODk6ODk8OTs7Ozs8OTs/Pzw6ODg7Ojg6Ojs5Njc3Nzg6Ozo8
-OTk6PDs6Ojw9Ojk3ODs5Ojk4ODk6Oj09OTY3Njk5Nzc2NjU1NTMxMC80Nzg0NjU3
-NDIzNTU1NTQ2Njc6Ojg7OzxAQUNBOz47Ozs8Ozw8Ozw8PDxAPT88Pz49PDw7Ozs7
-Ojo4Nzc0NDc0NDc2MjQ2ODc5NjkzNjQ1My8xMjIzNDI0MzQ0NDMyNTY0NDU1Njo3
-ODs5Ozo3OT89Ozs8Oj09PT09PT08QT4/QEJDQUNHR0pLSUlMSktOUE9RUlFSUFFU
-U1FPTU9QUU9QS0hKSUhFQkI9PDg0NDU2Nzg1MjI4NDY2NTg1NTY4ODc1Nzk4ODg5
-ODs3Nzk+Pzw+Ozw7PEBBREJBQkZHRUNCRERGSUhJTFBOUk9ST1BSU1RSUVhVVFRX
-U1VWWFZVVldWXVlYV1paWlpcXlpdVlVZWFdUU1dXU1VTVFFRUVFOTlFOUE5OTEdD
-QEA7OjtAPDc4OD03ODo4OTk3NDU2NTg6Njc4OTk3MzM1NTk4OjU0NTU4Njc3NjQ0
-NTY2NDU0NTU3NDM2NTc2NTQyMjIzNjc1NDU2ODU1NDY0NDc1Nzc0NDQ0NjIzNzY1
-NDU5Nzc5PD4/Ozg5OTo4NzY4ODk2Ozo3OTs8OTg4Nzo6ODc5Ojw5Ojs6Ozo4Ozs+
-QDs7Ojo7OTU7PDw9PDw5Nzk/Ozo7PDs8PTg8O0FBQD85PTg5PD44Njc6Ozs7Ozw4
-Nz9BOzo5QTo7Pjs8Nzw7PDw8Oz89Pjo2Nzk6OTo7Ozw8OTg4Njk7Ozw7Oj47PEA6
-OTo6OTk7PT09Pjs8PDw8Ojs8PEA/PD1APTo+PEBAPj9APT88PDs9PT4+PT1APz5B
-Q0JDQT8/Q0VAOjk7Pj09PUZDPzw/QT1AQ0A/Q2SowMzV2t/h5efn6epAPj49PD89
-PTo+PUNAQkE/QkI/QDs9P0A8Ojk7ODs8OTg8Ozg6OTc2ODo6OTs1NjY0OTo6Nzg5
-Nzc6ODc2NDY1Nzg5Ozo3NTQ3ODk1NzU0Ojo5ODc3ODo3NTQ0ODcyNTQ3NTc0NDUz
-Nzg4Nzc1NTc3NTU1NDQ0NTYzNTU2OjQ2NzUzMzU2Nzc2Nzk8PDs+Ozo6Ojo5OTw3
-ODo5Ojg6ODg5PT06OTw8Ozc2ODg5OTk3PD05Njc2Nzg4OTY2Ojg4ODg3ODc4ODk5
-ODk2OTc4NjU5NjIxMzI1MzU5NTY0NTQ0NDIzMjQzLzEzMTMyOkI1NTUzMTAyMzg0
-MjQ0MjI0MzEzMzU4NTIzNTQ1NDIyMjMzMzUzMTM3MzMzNjIxNDYwMy80Nzc1NDMy
-MzMxMTM4NjU2NTczMzM0NTMzMjIzNTg0Nzc5PjU2NDg2NjY1MTIyMTExMjQzMjUz
-MzI2NTk1NjY3MzMyNDI2NjMyMzg0NDYzNDQyMTMzMzYzMzY0MzI2NjQ2NDI0NDY1
-Njc2NTU3NDMvMjI1NTQ1MzM1Nzg2NjY0NTQyNjQzNTk2NDc3ODc2NDQ2Njg2Njc1
-NzUyMzMzMzU3NzY1NjY4ODY0OTk5OTc2Nzg6Ojk4ODo4NzY0Njg5OTg5Ojk5ODY2
-Njg4Nzs4OTs8OTk8QTw5NjY4ODY2Ojo5OTg6P0E+PDo2Njo9OjY2NjM1Nzg4Ojo7
-Ojo4ODc7NjQ2Nzk1NjQ0OTU1NzU0NjQxNDYyMzY1Nzg1NTc3PDo9Ozg8PT09OTk5
-Ojs+Pj49PDw9Pz49PkFDPz06OT89Pj09PTo5Nzg1MTM2NTQyNDU7OzU3PDU0MS8x
-MjEwLzAzMjE0NTExMTIyMjU1NDM3ODo5OTs6PD0+Ojw7Oz08Ozw8Ojs8Pz88PUBC
-QUVFREVHR0ZFR0pLSk5OT0xPTUxPT1JTUlBPTk9OTU1KSEZCQ0NDPzs5ODY3NjU2
-NjU4NTM1Nzc5NzY1ODk2NjU1MjY1Nzk4NjU6Oj48Oz09PD4/QUU+PkFAQURIRURI
-RUdJSEtIT0xOT09MTU5PVFRWU1RSUlRSUlRTVFNSUFBQU1JUVFlUUlBUT1ZVTk5P
-S0tOS0tIR0JFTE5LTEZER0dIR0ZGRkM8Pjw8PDw7PTk7Oz05OTk4PDs5Nzg5Ozs4
-Njk5NjU2NDMzNTc0Nzg3NTY0Njg4ODc4OTU1NjQ0Njc4NjY1NjY1NTQxNjY3NTY3
-NjQ1OTc1MTQ5NjUzNTQ0MjYyNDY0NTUzOTc6ODY6OTo5ODg3ODo6Ozc5Njk7Nzk7
-Ozs7Ozw5ODo4ODg5ODs5Ojw9PTw5PkA7PDw7PTo7OTg6ODg5PDpAOzw6OTw8Ozs8
-ODo/Pjs6OTo6PT07Ojo3ODk6Ozk9P0BCPjs8Ozc5Oj4/OTk7OjtBQTtAPDg2OTg5
-Ojo7Ozs7OTg5PDs7Ojo8OTo5Ozo7Ojs+PDk5PTw7Oj4+Pz48Ojk6Ozs+QD07Oz88
-PDw+PT0/Pz09PUA+QEE7PD49QkFAPj88PUA9QT9APDw/PD5DSEREQkNCRENBQEJA
-QENAUKLBzNTa3+Pl5+jp6kRDPUJBPj87PjpBQ0A+QEREPz5DQj8+OTc4OTw9Pzg3
-OTU5PDg4Nzg4OUA7ODg2Nzk2NDc5NzMzNjg3Ojo0NTY5Njg2NjQ4OTk5NzY2Njc3
-OTc5OTY5NzQ4NTk5OTQ1NTYyNDY2MzI4NzU3Nzc3NTY2NTI1ODQzNTY2NDU0NjU2
-NTE3NDY6Nzc2ODY6PDw8Ojs6Ozs3OTc1NTc2Nzg4Ojc6OTo7OTs1NDQ4NzY2NTY5
-ODg3NTQzNDY0NjY2NjY1Nzg4Njk3OTo5ODg3NzU4OTY4NjczNDQ0NDEyNDMyNDE1
-MzA0MzI0NDIyNTUzNTQzMzQyMDI1NTU1MjQyMjIxNDM0NDU1MTM2MzU1MTEyMjM1
-NTQ0NDEyMjQ1NDM1MjQzNDM7ODMzMTMyMTYzMzQzNDM4Nzg2NTY1MjM0OTg5N0k4
-NDM4NDg1MzMzMzc1MTI0MzIzODg1Njc1MjUyMjM0MjEzMjQ2MzI0ODY1NDEzNTQz
-MjEyMjAvMjQ2NDQ3ODc2MjQ1NjY1MjM1NzQzODMyMTQyNTMzMzMzMzU2NTY2NTQy
-MzQ2Nzc2ODg0Njg4NDQ2MTQ2NTQyNjYzMjIzNjY2MzQzNDU0Nzo6NzU2Nzc4ODg4
-OTk2OTc3OTk4NDY4NzY4Ojk5OTk4ODY3Nzc4Ojc5OkE8OTo9PDo6OjY5Ojw5ODk6
-ODk7Ozs9Ojs9PDk4NzU0NTc6Njc4NDQ4OTc2OTY2ODU2NzY5Ojg4ODg4NzU1NTY3
-MzQ0MzU2NjU0NTU5PTc5Nzg4OTo6OT08PDw7PTo7Ozw6Ozw/P0A9PT06PDw+PT07
-OTg3NzQzNTQ0MzQ1NjU0MzI2NjQyMzMyNjU0NjQ1NzEyNDUyMTQzNDY1ODc6PTs5
-ODk3Ozk7PUE5NzlAPDw6Ozw8OENAP0NDQ0JBQkRFR0dIQ0dLS0xMTExMTE1QTU1O
-UU1MS0lNS0pFRUJCQUA9Ozs7OTY1NjM3NTY2NjU0NzQzNTc4Mjk3NjM1ODY1Ojc5
-Ojc6OTo7PDk7QEBEQkBBREJDRUVDSENER0lKSkdLTEtJSE5PTk1QT1BTUE9PUU1N
-TE9NTk9OTU1QTU1OTUpJTUxHSkxMSkdHRkZERkNGR0ZHRUVEQ0VIRURBRUJBQEBD
-Pj5APT0/P0A/Pzw7Oj49Pjw6OTk6ODg4NjU3Nzk5ODQ0MzEzMzM2PUM5ODo9PDs7
-Nzc2Ojs4NjQ2NzQ0NjM1NDY6ODc0NTIyMzg3ODQ0MzQ2Njg1NTU1NTQ0NjQyNDY6
-Njc7PD07OTo4NTY3NjY6OTc5OTo7Nzc0NTc1OTs8Nzg4OTc6ODc5Oz89QDw6Ozs5
-Ozo7OTk7Ojw7OTk8PT49PDw9ODg+PDo4PTo4ODs7PDs9Pjg5OTw6OTY8Pzs6Pj08
-Ojs7OUE/PDtAPDw5PT49PTs7PTs7Ozs6OTs7ODs7PDo3Ozo9PTw8Ojs7Ojg4ODs6
-OkNDOzo9OzxBPz4/PTk7Oz09Oz06PTw7PD87PT48Ojs7Pj09PUJCQEI/QEFGQT4/
-Pz4+PT88PD1AQ0VEQ0FAQ0FDPz9AREZDRUJTocLM1Nvg4uXm6OnqRkVDQEE/OTw9
-QEBCQ0NCOz08OkQ9OTc5Ojo6PDk4NzY3Ojk4Njo6Nzg+OTo5Ojc5NzU4Ojs7Ozo5
-OTg4OTU3NTg4ODc6Nzc6OzY2NDc1NzY2NzU1NTc1NDY1NDc0NjUzNDU1NjU0NjY2
-NDI0NTUzNjU0NDQyNDY2ODYzODU1NjY5Nzg2NjU6ODk8PDw8OTg5Nzs9Ozk5Ojk5
-NDY4Nzw8NTY2OTY1Nzk7NzY4PTY3OTo3Nzk2NzUzMjc1NTUzNjc5NTg2Nzg5ODg4
-ODU3Njc2Nzk0MzIzNDU0OT41NDMzMzQ0Njk0NTAyMjY6NzU3NDU1MjU2NTMzNDY1
-MTE0ODQzNjc3Nzk4NzY1MjM0NDI0PDU1NTEwMjQyMjIxNzQ1NjU0NDU0MTAwMDEy
-MzU0MjMxNDY4NjI0MzQ5NzU1NTQzODY0NTM0NDQ1NTY2NTU0MzQ2MTAxMzI0NTU1
-MzQvMjMyNzUzMzQ1MzI0Nzc3OTY1NDAzNzM1NDQ3NTc1Nzg7QENDOjk5NjU0NDQ0
-NTM0NDI1NTgxMTEzMjI0MzQ0Njg1NDUzNzQ1OTY1Nzg3NTY2NDM0MjMzMjEzMzMz
-NDg3NjY1MzQ4NTU2NzY5ODc2NjY2NDU2ODk5ODc2Nzk4ODc3Nzc4OTg5Nzk5ODk6
-Ozk4OTg6OTo7ODg6Ozo6Ozo5NjY4ODg6PDk2OTo7OTg4NjU6NTY4Njg7Ojc4Ojg6
-OTg2Njo4ODg6OjY4OTo8Ojw7ODY1NTY0MzY2NTM3Ozc1NTk4Ojg5Njo4Ozs6Oj09
-Pz08PDs6Ojg5ODs9Ojw8PUA9Pj0/PDc5Ojs1NjUzMzMyMjM2NDQ0OTk2MzU1Mzg3
-NTM0MjIxMzUzLzExNTY6NTU1Nzs2OTg1Ojk4OTo5OTk3Njk8Oz1AQj9BREZBQEJA
-QD5BRUZGRUNEQkhJSUhLSklISUlJSktLSUlKS0pKR0ZEQ0E9PT0+Ojg2NzY1NTY2
-NTY0NDU2Nzg3NzY5ODY2Nzg4ODk6ODg5PTk6PD09PjxAQUJBQUNBQ0BERENFRkdH
-RUVGRkhISEdITEtMTE1LSUhHSktISlRISEhJSkhISkpKS0hGRkVDRERCRkpGR0NC
-Q0JFQ0NGQEBBQUFFRUdAQUE8QEA7PT5AQTs+PD47PkE/QEE+QEJAPjs6Pjw7PTw3
-ODw5Nzc6NjU3Nzc1MzM3Nzc3NjU4OUA6OTY3NzU3ODY4NjYzNzg2OTk5OTg0MzQ4
-Njg1ODU0MjMzMzM1Njk1NDYyMzM1NTg2Nz88Nzg5NjU0NzU2NDU2Nzo5OTk3OTY4
-ODs6Ojs6OTc5Ozs5Nzk7Ozs7Ojo5Ojo7PDo4PEA9Ozw7Pzo8PT4+PT46Ojk4Njw7
-Ojs8OTtAQDo4Oz48Pjs9Ozw9PDs8Ozk5Ojw9PD05Ojs5OTg7Ojo6O0E8PUBAPj4+
-PDw8QT9BOjg4PT87Nzw4PDo5Oj09PDk9QkE/PT0+Ozw5Pjw7PDc4PTs5O0E8Qz48
-Pj08Pjw8PjpCREM8Ojo6Oj8+PTw+QD1CQERRUktCQj89RElEQj5EREJEQURCQUZK
-SVagwc3W2t/i5efo6epAQkdIR0FAPkFDQkFAPjs5Oj09Ojo5ODk6PD03OTw8QTo4
-Nzc6OTk4OTo5Nzk5ODY3ODg3Ozc7OT07ODQ1NDU1Njc1NDc0MzQ2OzU1Nzk2Nzg3
-ODs3NjU2NTU2NjY3NzY0Njg2NTc2OTMyMzU0MjM0NTIyNjU1NDU1NTMyNTY0NzQ2
-NDY5Nzc7Ozs8Ozo8OjY3NDg4Nzk4NzU1NjY3Nzc3NTQ2Njc2Ojg3NTY1NTg0NjM0
-Nzg1NjY1MjM0MzY4OTk1NDY0Njo6NzU0NjYzMzQzNDY3MjMyNDQ1MjM1NDIwMjEz
-NjY8NDIyNDY1MjMzMzYyMzM0MzI0NDQzMzU2NjcyNjQ1NjY2NzczNDc1MjM1Nzc2
-NDMyMjM0MzM0NDY2NjY0MzQzNDUyNTM0MzQyMjc1MjgzNDYyNDk3MjQzMDEwMTIz
-MjQzNzo0NDQyMzI5NDMyNTExMjM0Njc0NjczMDMzNjU1NTE1MzExNzU4ODc3OzM3
-NzQ0MTI0NDU4NjU4ODk3NTc3Ojg4NjU3OTQyNDY2NTEwMTAwMjQzNDc1Njc4ODg2
-NDY2Nzg4ODU0NDQ3MjQyMjI1NDQ0NDU0NTY4NjQ0MzQzNDY1ODk3NjU2NTY4NjY4
-OTw6Njc5Nzc2Nzc3ODs4Njg2NTY3OTo4NzU2ODc3NzY4ODs8Ojk3Nzk4OTg6Ojg7
-OTk5OTo4Njo4Ozg6ODQ3Nzk5ODg3OTk5OjU3OTk3Nzg4Ojk7PDs7Ozg6NTc2NjY1
-NjU1MTc0NzU2ODs8Ozg3OTk6OTs4OTk7PjxBQj04OTo8Pzo9Pj9BPzw9QDw9Ozo6
-Ozs1NTg2MzU1NDMwNDMyMjI0NTU4NzI0NjU0NjUzNTMzMTIzNzY5NTY5Ozo3ODY3
-OTo4ODY2OTk3Nzc7Pj5CQEBBPz5APj8/QUBCQEJCRUhGREZFSEhJS0lGR0VIR0lK
-SktIRkZEQkRDQUA+Ozs4NzY2NTUyNzc2NDUzNzU2NzM0NDU1Nzg6ODg7PDk7Ozw5
-OTw5Oj8+QUBBPUFCQ0BAQEJDQkBBREVDRUFBR0RCRkdFSUhHR0dIRERDREJGR0ZE
-RklHQkJCQ0NFREdEQEBCQUJDQkNDQUJCREBCQ0FDREA+QEVBQEA9REQ/PD88PT0/
-Pj07PUFBQD4+PD89PEJCPjo6PD09Pjs6OjxBQDw7PDg5Ojk3NTYzMzY1Njg5OTg3
-NjY0Ojk2NDQzODg3NTg3Mzk2NTQ3Ozc3NjY3NzU1MzU0NTY0NTI0NzYyMjQ2MjU3
-ODk2Njc4NjQ0NTU1NDY2Njc1NjY2Ozo8PDk6Ojg6Nzo6OTs+PDo9Pz8+Oz48Ojo4
-OkFBPTs7Ojk8OTo5Ozo7Oj06Ojs5Ojo9Pz49Pjw7Ozw8Oz07PD5CPTw7QDs6Ojk2
-NTw7Ozk5OTk6Ozk5PD0+PT4+PD08OTg8PT9BQEA/Ojk5Qjw/PDw7PD47OTc2OTtA
-QENCPTo+PT09Ozk6PEA8PD47PDw7Ojo9QDw9Oz0+QUFDRT9BPDw6Ojs9QkFCQFFP
-VVVFRkJBQEBGQ0E/P0JCREJEQ0RDRUZFWJvBzNXb3+Ll5+jp6j5DRUJEREQ6Oz08
-PkBDQkE+PTw7NDk4Njg6Ozo/PkFDPjg0NDg2NDg3ODk6Ozs4OTk3ODk7OT06OTk5
-Ojs7Ozg4NDg3Njo3NDU1NzM1NDU1NDU2Ojo3Njc2NDI2Nzo2NjQ1NDc1NjQ1NTY1
-NjM0MjQ1NzU0MjQ2NTQ0OTY0NDY2NTc2OTg5NzY3Ojo8Ozg6OTU4ODg3ODg4NjIz
-NDU0ODY2NTQ0NjU5ODc1NDQ2NTk3ODQ1Nzk6ODQ2NjY3OTk4Nzc2MjY7ODc4ODk3
-NTY0MzczNjMxMjIzMzI0MjY2NTQxMS8zNTc5ODc0MzI1MzExMjAzMjM0NTY2NTQ1
-MzU2NTc1Njc1NDQyMTQ0MzQzNTU1NTY2NTM0NDMzNTQ2NDc1My8yMjI1MzM1ODU2
-MzExN0MyMTQ0NTYzMTY1MjU2MjQwNDIxMjEzMzUzMzQxMzY9NDMyMzQyMzQ3PDY2
-MzY1NDc6NzU0NjUyMDQ1NTY4NTY3ODg4ODc3NDc3OTc1NTc3Nzc0NTc1MzQ1NjMz
-MzMyNDQyMjUzMTM0NTMxNDM0NTk4OT06NzQzMjU0MzI0NTc3MzIwMTMzNTU1NTY1
-NjY0MzM0NTU0Njc2Ojw5NDI3ODk4OTc6OTc2ODY4OTY0NDU3ODg0ODg5Ojk5Ojs8
-Ozo4Ojc4Ojk5Ozo6ODc3Nzc8NTc3Nzk4PDo6Ojw9OTs5Ozw2ODk5ODo7Ozs8Nzc4
-OTY3Oz46OTo4ODg4NzU3Ojs6NTY0MjEzODU3MjUzNDQ2ODc7Nzs2Njc6Njc3ODk7
-OzhAOTo9Ozk7Oz5DQD08PDo9PDo/QT46Ozs5NTc4NjU1MjIyNTY3NjUyMDM0MzEx
-MzQzNDc1MzY6PTU0MzU0Nzk3OTo5Nzc7ODg4OTc6OTY5Ozg6Pzw7Pj5APTw8Pz89
-QUBAQEJFR0VFRERFRUhIR0ZIR0ZIR0lJSURDRERCQEJAOzw9PDg4MjY1NDc3NzUz
-MjI0MzMzNDIzOTg4OTY4ODk4Ojg4Ojo5OTo6Ojk6PD9AQUFDQ0FCQUNAPz4/QUFD
-QkJAQENDQUFBQUNDREBAQEVDPkBFQ0FBRUM9Pz4/P0BEQ0NGRUJCQkBFQ0JCPTw+
-QkBAQUBDQEBDQkA8P0A9QEI/QD49QDs6QUI/Pz49QD4/Pz86OzxCQT4/PTw/Qjw8
-PT8+PT5APj06PDs9OTU1Njg5NzY2NzU6NjI0ODo4NjQ2NzU1MzU2NjY1NDMyMzQ1
-NDMzMjMzMzU1NjU0MzQ0NDU1NTY0MzQ4OTs6ODc6ODg2Njc2OTY4NzY4OjY3ODk4
-OTo6PTo8OTo7Oz0+Ozs8ODo9Ozo5ODk8Ozo5Ojw6Ozs7Ojk5Pj07Ojo5Ozw6PEBA
-QTw6PTw8Ozw9Oz5AQD89Pzs7Ojk5OTxBPDw7Ozc8Oj1AOjw7Ojo8PD49PTs8Pz1A
-QEA9QD1BPDg6PEA8OTo7Ozs9Ozo4Oz08PT4+QUA9Ojk6Oj9FQj47Ozo5Ojs6Ojg7
-O0A+PEBDQD0+QD05PUBAQUA8PTs9Pz47Nzc6P0JCPz07Pj88O0RDQ0NAQEBDQ0Zp
-msHM1tzg4+Xo6OrpPkVIPkpHQT49QUI/QEBCQUI9Pz88Ojc9QDw5ODc7PD89OjU3
-NzU3Ozc7Nzk4OTo6Nzk7Nzk7PDo6PDo4Ozc3Mzc2MjU2ODo5MzU2NjY2NDo6NTc3
-OTc4NTU1PTg0NDY2ODU1NTY1MzU0MzQ1MjY1NDQ1ODc3NTYyMjMzNDU2NzY5Njk5
-NzU4NjY1NjQ1Ojg4Njc0Njg3NzU1OTU0NTc3ODc1MzYzNTY1NzY2OTY3NjY6NTU3
-NDQ2ODc2Njg3ODU2NDc3NDg5NjU4NzU4ODU2NzY2NTQ1NzUzNDQzMTMyMjMyNDYz
-MTQ0NzIzMjUzMTMzMjMxNDQ2MzYzMjI2OTYzMzEzNTYzMjI2NDM2NDY4NTMzMzIz
-MTU2MzY0NTg3NjQzNTY0NTEyMzc2NjQ0NTIyNTMzNTMzMzAwMTQxNDMzNDk4MTMz
-NzM0NTg2NTQ0MC8wODUyNzUyMzQ3NTQ0MjQ2NjcxMTQzNDMzMzAxMjI3NDM0NDc7
-NTIzMjQ0NTc2NDIzNTU2NzY0NjYyMTMyMjMwNDQ1MjQ5NjU0MzMyNDMzNTQ3NDUz
-MjM4NTQxNDQ1NDM2NjEzMzczNDY1MzQ0MzM2NTU0NDY7Nzg7aUg6Ojc3OTk5ODc2
-NDU4Ozg2NTQ3Nzk6OT85Nzc4Njc4OTY4ODc4ODs6ODk4Njk4ODc2NTc4ODY3OTc7
-Oj04Ojo+Ojs5Ojw6ODk2NTg2Ojc1NTU3OTg3Ojo2ODo2NzY3OTc5Ojg4NjMzNDM0
-MzU0NDc1MjI3MzU2NDQ2NTQ3NzY3OTk4Ojw+PDs5OTg7QD5AQDs4OTo5PD49OTo8
-PTw2Nzc2NDY3MzU4NjM1NTEwMjMyMTEzMzUzNjQ1Njw4MzMyMzY3NzY2ODY4ODU0
-ODk4NzY6OTo4ODg5OjxAPT88OTo8Pz8/QUFAQEBAREZFR0NFQ0RHS0tHSUhIRkRG
-REFFQ0FBQT49Ozw6ODc1Njc2NjM1NTY2NjY0MzU1Njc2NjU2Njg2Njc6ODg6Ozo8
-Pj09PEA+P0BDQkBAQEA+QEJDQUFAP0BDQ0BBQ0BCQUE+QURDREE/Q0FGPj9FREBB
-PT9APzo9QkJCQUJDQ0JAPT9EQ0VCQEBAQkZCQUM+PkJDPz06QEA9QUBAQT46OT0+
-PDo7PDw+PT5APjo6PTo7Pj4+OTo8PT4/Pz49PTk/QT07PTw8PTs6ODc3Nzo5Ojk4
-Ozk0NTY2ODY2Njc1NDQ0MjU2NTM2NjU0NTU1NTUyMTQyMzc2NDMzMzM1NzY1Mzc4
-Nzg5Ojs9Ojs5ODs6Ojg6Ozo2NDc7Ozs5Ojw8PD08ODs+Ojo7Ozk6OTo5OTk9Ozo7
-ODs6Ozo6OTs5ODk5Ozw3PDs5ODpAPjs+Q0I/QUE7PDw5Ozk5ODo9PDc5Ojo6Ojs7
-Ozo5Ozk9PT08Ozs5OTk9QD0+QDw8QDw7Ojw8Ozk6OTc8PTw9PDs7PDs7PDs6Ozs6
-Oz0/Qj8+PDs5PkA+PDw7OTg6PT89Pzw8PDs8QEA9OTxCQjw6PDo6Ozs7Ozc6PDo8
-PD5APUNEQUNBQkA+QkhHQ0pEQURHSHSqws7W3ODk5ejo6upFREVEQkFBQUQ9Pz45
-PDw7PDw+Oj9BQD07PDo6PDg7Ozw4Nzc+Pjw5ODk6Nzc2NjQ0Ozs9OTc4ODs7Ozg4
-Nzg4ODg6NTY4Ojo4MjI4Ozg5NjU4NzY1NjM4ODU2NTUzNjU2ODQ1NDQyMzc4NTU2
-NDU1MjQ3NTUyNTYzNDQ2NjU3Njc4ODc5OTg3NzY1ODw3OTk3Nzc2OD05NTk2NjY0
-NjMzNDc3OTc3ODU1NjczNDY1NTczOTg1Njc2NTg2NjU2NzY4Njk5ODc2NjU1NzY3
-NzQ1MzYzNDUzNjM1NTIwMTEzNDQ1MjAxMzE3NDM2MzY0NDU1NjM0NDMzMTMzNTQ6
-OTI2NTQzMzQ2NDM1NDQ2ODY3NTIyNDIzMjI0MjY0MzczMjM0NDMyNjMyNDQ0NDI0
-MzIxMDIzNTMxMzUzMjAyMzQ3OTY1NDMzNDQ1NDI0NjYzNDAzMzQ1NDUxMzczMzAy
-NDQxNDkzNDIxMjQ0NjM1Mjg2NzIyNjUzNjM1MzQ2MjIzMzAyMTUzNTUyMjM1MzIz
-NjUxNzg1MzM0NTU0NTQ1NTQ2MzU2NDQ1Njg0NTc2NTYzMzY2MzY4NDE2MzU0NTc0
-NDY2NjY1NzUzOTxdOzU3NTQ1Nzc2ODo4ODc3NjU0ODQ2OD0+Ozc6ODY6ODg0Nzk4
-ODg5OTY5ODY2OTw7OTo7Ozc4OTs7PDg4PEE8Ojo6Ojk6PDw6OTk4OTo3Ojg4ODg3
-OTc3NjY5OTk4NzY4Nzk1NDc3NTU0MzY2MzI0MjUvMjE1MzU1NDQ0NTc2NTc3OTc4
-Ojw9PDg7Ozo7QUI9Ojs+Ozw+QD07Ozs7OTs3Ojc7OTc1NjU3NzI0NDI1NjY2NzM0
-NDM0NTEyNDUzMzExMjY2NjQ1Nzo3OTo6OzY1NDY4ODk4OTo6OTg7PTk/Pj88PkFC
-QURBQUZEQUNEQ0JBQkdJSUhGRURFRkRDQkJBQUA9Pjs6Ojo7OTg2NTczNjMyNzgz
-NDY1OTY1NjQzNTg4Nzo6ODg7Ozg5Ojs6Ozw8PD09QD49P0FAPz49Pz89PkBBPT5A
-REJAPkFEQT8+QEFDRElJSENAQ0FBREA/QUA+PD5AP0RERUJDPz4/PkBDRENEQD4+
-QEE/QEI+P0FCREBAQUA7Pz89RT5APz4+P0A/P0A/QUA/Qz07Ozw9PTw7PDw7PT5A
-Q0RAQD4/PT5AQUBEQz87PTk4OTs9ODY3Njc2NTk3NTY2NjY3ODg2NjU3NDM1ODQx
-Mjk6NjY0MzM2Njc2MzQ3NjU2NzU1Njg4Nzc5OTk6PDg3Nzo5OTg1NztAOjk7PDY2
-ODo8PDw5Ojk6QD06PDw8PTo3Ozo4Ojk6Ojs7Ozo6Nzk6Ozo4ODs6Ojo6O0VBOz07
-PD9AOzo5NjY5Nzo6Nzk9Ojg4Nzs4ODk6OTk9PkE+Ozk5ODo6P0E8PD08Ozs9Ozw4
-ODg5NzY4OjY4Ojs5Nzg5Ozc2Ozk6Ozw9QDw+PDs7PD09Oz49PDw8Ojk9QDxAQkE9
-PTs8Ozs8PDw+Pjs7PT07OTw5PDk8Oz47PDo+REREP0BBQEA9QEA8QUFGSUlNfazC
-ztjc4ePm5+nq6j8+PUE9PUBGRUNAQDk+QD0/Ozo8PDo9QTs5ODw+Ojo4Nzk7Ojk7
-Ozg2Ozg5OjY1Njc3OUE3Ojk0NDc5Ojg2NzY4OTo4OTg4ODUzPDo5NDc5ODo4NjU3
-NDM0NDUzMzM0NTU3NzU0MjYxNjw2NjU1NTI0NDY4NzQ1ODY0OjM1MzU1Nzc4NzY1
-ODc3NjU2NzU1ODk2NzY2NDY5ODg3NDg1MzY0NTU6NTQ2NjY3NDU0NDM1Nzk1NjU1
-NDU2Njk7ODc3NjU4NzY2NTY2NjU0NDU3NDU1MzQ2NTUzNzM1NTMzMjQ1MTAyMy4v
-MTE0NjU1MjM1NTU2MzQ1NT01MzMyNjQ0OTc1NDI3MjY0MzAxNjU0MzU1NjYzMTA0
-LzIzMjQyNDU1NTUyMzQ0MzA2NTQyNjQ0NTQyNDU1NDc0MzYyMzMzMzc0ODY2NjQ3
-NzQ5Nj00MzIxNDQ0MzM0Njc0NDQ0MjExNTU3Njg0MjU2NzExNzU1NDY1MzQ2Mzc1
-NTM3NTI0NjIzNTQ1MjQ2Nzc0Njc0NjQzMzIyMzY4NTcyNjc3NjQ0NTUyMDQ1NTg1
-NjY1NTQ0NTc3ODk5OTg3MjQxMjY0NDc0NTQ1MzM0NTU1Njc6ODM0Nzg2ODY5Ozo4
-ODo4ODc2Ojc2NzU4Ozw4ODY2Nzc3OTg2ODo5Ojk6ODk6Nzg5NjU0NjU6Ozs8Ozs5
-OTk5OTs/OkM8PT08OTo5ODk1ODg2Nzk3Ojo6OTc5Njk5NTc2ODk5NzY3NzU2OTYz
-NDU0MzczNDQ1MzI2NDM0NTU2Nzg3Njc2OTo5Ojg4Ozk9PDs8PDw8PD48OTo7PDs/
-QDs7PDo6Nzk7OzM2NTYzNDAwNDc0NTM2NzM0Njo3ODYyNDQxNDU3Njk2NTk4OTk4
-Nzc6ODk4Oz06Ojo5Ozo9Q0A7Ozo9P0I/QUJBQkBDQEJDREFERENEREJERENBQURC
-QD07Pj47Ozs5ODo4NDMzMzM1NzY0NDQzODU+Njc4NjY2ODo7ODk8OTc7Ozs7Ozs7
-PDo8PT48PTw/QUFAPDw8PT9APT9BOzs9PkBCQj1BQj8/Ozk9Q0RRREBBRUQ+Q0FA
-Pj1BQUNARENDREdGQUA/PkBCQD08Oz1AQz4/PkE/QT5DQUFBPTo8QUFBPj0+PkBB
-PUFDQ0RBQkM9P0BDQj5AP0NBQ0M8QEREQURDQD08Oz9BQkNEQkA+PT4+PTs5Ozg2
-Nzc2OTg3NzU1MzY3NTYzNDc2NTQ2ODo0NDUzNTc2Njc2NjM1NzU1MzQ1Nzc4Oj47
-PDw6OTc9PDg7OTc4ODk6PUE8OTk9PDw5ODw7PDs6OTo7Ojo5Ojk5PD46Ozs4ODo7
-PTk8Ozc1ODg4Nzs6OTY9Oz0+Qz45Ojo5Ojg6OTo6OTo5Qzs6OTY7Ozs5OTg6ODk5
-OjlBPDk6Ojo4ODg6PD02OTs7Ozs8PDw4Ozo9P0A8PTk7Ojs/Pzo2OT06ODY4Ozs9
-PkA/PT08Ojo8Ozs+PD1BPzk7Pzw/Pj09PTs6Pzo6O0A9Ozo6Ojo9PT0/Qz48P0I8
-PDxAQkJBQz1BPkJBPTtBQkFFSFKgrcLO2Nzh4+bn6OrqRUBAO0BGSEFFQz0/PTo9
-QDw9Ozo8PTs7OTs+PDw/OTo3ODc0Njo4NTc6NjU4NzQ5ODg5Ojk5NzY2Mjg5NTU1
-ODo5Ojk8OTc6PTk3Ozg1NDQ3Njc0ODczODgzMjQ0NTQ3NDM0MjUyNDM0Mzc6NzQz
-MjM1NTY2NTo0NTo7ODU1NjM1NjQ2Ojo3OTk4Ozw5Nzk2Nzs3NzU2NDUyNDI1NTQ1
-NjU2MzU1NDQ2Nzg2MzM1NDM1NjU2OTg3NjQyNTY3NzQ1NTc3Nzc1Njg2NTY2NzQz
-Njc0MzEzNzU0NTI4NjQzNTM0MTI0NzQ1MTY0MzEyNDIxMzYzMzQ2ODM0NTQ2NTAz
-NzQyMjI2MzIxMzQ0NTUyNDEwMjM4NzQxMjE2MjU1NTU2Njc2NjgzNTM2Nzc2OjYy
-MjUyMzUzNTc3NjMwLzI1NzY1NjU0NTY1OzY1PTY2Njc3NTY2NTQ3NjY2NTQxMjQ1
-ODUyNTM0NDMyMzY0MjM1NDU0MzM1NTUzMzQzNzU1NzE1NjY2NzY3NTY2NDQ2NDUy
-MjI0MjM2NTIzMzY2NjUxNDIwNDI0NTMzMzM2MzQzNDIzNjc2NzY4MTU0NDc3Nzc2
-MzU4PDY0NjU1NjY2ODs4ODU1NTc4OTg3Nzs6OTo6OTc4Nzc3Njc2NjU1Nzo5OTs5
-ODc0Nzo6NzU2Ozs3NTc7PDc2Ozo5Oj06OTk5PDw7Pzs6Ozs5OjY2ODQ1OTw7OTk5
-ODg4OTk3NzU3Nzo4OTo6Nzc1ODk0NTU1ODIxMzU4Nzc3ODUzNDU2NjMyMzU4ODY1
-NDc4ODg3Nzg7Pjw/P0E/Q0A8PDk7PTs9Pzw9Ozo6ODU1ODQ0NDY2Nzc1Nzg3NTU1
-NzY2ODg1NjM5OTU3NzY3NjU2OTk6Nzc3PDk5PDs6ODg4OTk6Njo9OTs8Pj5AQUA9
-Pjw+RERAQUFARUNAPkNBQUBDREVBQj9APT08Ojs7QD47NjUzNTY2ODc3NDM0MzU0
-MjQ5MzE0NjU5OTw/OjY4OTs5QD49QEA/PT4+PkA+PTw9PkBCPz49PDw8PDw8PD49
-PD8/QD9BQTxARkRGRkNDQUBBPkBAQUA+RkVEQ0BERkVHQ0JEQEA/PT8/PDw9Oj49
-Ozk5P0BAQEA7PT0/QkFEQj49QkA+PD0/Q0FEQUNBRUVAQUNCQD1AQT8+QkRBQD9D
-QTw/Pz5AQkFBQkBAQkBBQUA+Qj05ODc0ODo4Ojo9ODY0NjQ2NDY0Mzc4OTUxMjE1
-Njc1Nzk5Ojo5OjU0MzU1MzEyNTo6PTk7Ojo7PDk4Ozk4ODk5Nzc3Ojs2Nzo7PTs3
-ODs9Ozo5Ozs5OTo7Oj4/Pjo5ODU4Ozo6Ozs3O0A5Nzg7PDk3Njg7Pzw7Ojo6Oz4/
-Ozk6PDo4Oj08PDw5OTs4Ojw6ODg7OTs7PTw4PTw9Ojk3Mzg7PTo5ODo+PDw7Pjs/
-PD9DQT09PDw8PTs7OT07OTo8PT08Ozo7Oz09Ozw+QEA/PUA/Qj8/QD06PUA+PDw7
-Oz87Pj47Pj89Qj86Oz0+PztAQEBBPzw+PTxAQUJAPUBEQkNBREJCQl1tTZ6ww8/X
-3eHj5efo6upCPkJEPkNGQz8+Pj8+PUA/OTs8Ojk6Ozw4PDs3ODo3Nzk3NzU4TDw7
-PDo4ODY2ODo4NjY9OzY0Njc6Ojc1NDQ2NzY3NjY3NDg6PD06NzM0Njc3Njg2NTU5
-Njc4OTU3NzEyNTY3NTMzNDUzNTM1NDQyNDM2MzQzNDU1NTY4OTc2NTUzNDU2NDY6
-OTY5OTc0OTg4NDgyNDY2NTY1NDI1MzI2NDYzNjc4Njk4NzU1MzM1NTY1NDU2OTg3
-NDYyNTY2Nzg3Njg0NTU1MjY3OTc3OjM0NDQyNjUzNTU2NTU2NzM0NDY0NjgyMzQ0
-MjMuLzAxMTY1MjEwMjg4NDU1MzEzNTIzMzQ0NTMzNjMxMjExNzMzMzIyNDc1NTYz
-MjM0NDU1MzAyNTM1NjQzMjQ2NTg2NDU2NDczNDQ0Nzg2NzYzMjU0NTU3MzQ2NTg2
-NjM1NDc1NDQzMjM2MzQzNzU1NDU0MDM0Njc0Nzc2NDM0MjE0NTI1NTQ2ODg3NDIz
-NDM1NDQ0Njg1Njo5Njk3NTQ0NDQ2NDQ0NjQ1MzQ0NTQ2MzQ2NDc3NjM4OjUzMzM0
-MjQyMzQzNjY3Njc0NTk7NTM0NjU0NzU2NTQ2NzQ3NzQ0NDY2OTg3NjQ4ODY2NTQ1
-Nzo6ODg4OzY2NTQ0OTY4OTc0OEQ8OzY2NzY5ODg9Ozg4ODk4NTc8OTo4OTo6Ozw5
-Nzg4Ozg7Ozc0ODg4Nzo7Ozs6PDs+Ozk6OTY2Nzg1ODg3Nzc5Ozo1NjQ4Nzc0NzY0
-NDQ4NDY3ODc1NDM1NTY0MTI0NTYzNDQ5Njc4Ojg7Ozs6Ozs5QkNBQT87PDs5PDk4
-PUBBQDs8ODo4NTU0NTY1Njk3NTU3NjM0NjQzMjI0Njc4ODc4ODo2NTU2NzY1Njc3
-NjY5Nzg5OTg6NzU1Ojk6ODs/PTw+QUJAPT49PUFCPj1APkBBPUA+PT8/Pz89PD5A
-PD48Oz08Ojk6OTozOTo8NjUyMjU0NjY0MzU2NTc1ODg7Pzw7Oj08PDw9PzxAQD4/
-PkA8PT8/Pjw9Ozs8PDw9Ozs7PEE9QEBBPkA9PUE/PUBEQUFBQkE+QEA9PT9CQEBE
-S0xAQkNCQD9DQkBBQD48QT8+PT8+Ozw8PT9AQEBCQkI+QEE+QD07PkE+QEBBP0FC
-QUFAQEBAQ0BBQkFAQUFAP0FCPT4+QD89QENCQj9BQ0JAQ0I/QD8/Pz8/PTw9PEFA
-PTo7Ozg3NjYzNjQ1NDU3OTk3NDYzNTMzMzU1Nzc2NzY1NDY2Njc1Njc1Njg3Nzg4
-OTs4OTw6Ojo8PDc3ODw7OTs4OTo7Ozs8OTs9PDw5OTo7PDw7ODo8Ozo4PDo8PD08
-PTo9Ojo+Oz48PD8+Ozo6Ojo3Ojk6Ozs7Ojo4PTs6Ozo3Ojs6OTo3Ozo6OTs6OTk8
-Ojk3Ozg5Ozc1OTw8Ojo4OTg6Oz08Pjw7Oz07PD49PDtAPDs+REA/PTw8Ojo4Ojo6
-PD08PD1APTw/PkE/QEA/PkA9Ozk6Oz1BPT09P0JBQUJBP0BCQkE+PD4/QUFCQUBA
-PDxCQD89PVpKQT9BQ0NKW1BPm7HEztjc4OPl6Ojp6kZIPUA7QEE+Pj08PkVFQjw8
-PT5CPT45Njk4Njk5PD49PT07Njc8OTc1NTw5ODo0Nzc7Ojk7OjUzMzY7OTg5NjU0
-NjQ3NjU3ODc2OTg4NjQ1OTg9PTc4Nzk1Njg1MjMyNDUzMzUzNTU4Nzg0MjQ3NTg3
-NzU1Mzc1Njg3OjUyNzU2NDM2NDU1NjQ2PDg4Ojo1OTc5NDg2NTU0NTY3Nzc2Nzk0
-NDg2NTY2Nzg3NTI3NTY2NzQ1NTU0NDY6ODUzNDQ0Ojo3NjY1Njc4NzU4OTg2NjE0
-NDQ0MjQzMzM2NDY1MzI1NjM0NjQ3ODwyMDAuMDAyNDQzNDA1Ly8yMjQ1MDIyMjMy
-MjY0MzI0NTYxMTAyNDIyMjMxNTY1NDQzMjQ1NDA1NjczNDQ0MzIwMjM3NjMyNjU2
-NTUzNzU2Njc1NzQxMzc3NjM0MjYzNzY0MzY1MzY0Nzo1NTY2NTg2NjU0NTM0MzM2
-ODUzNjM0MTAxMzQ0MjI1NjQzNDQ2Njg2ODMxNTM0NDo0MzQzNzM0MjM0NjUyNzk2
-NDM0NTQ3NTQ0MzQzNjc2OTg1MjMxNTQyMjEyNTQyMzUzNzk5NjQ0NTg0MjI0NDU1
-NTk4Njg3MzM1NjYzNTU1NTg2Njc2ODk6ODY2NTc5ODc8NzY3Ozk9OjY6RTo5NjQ1
-Nzk9PDs5ODg1NDY5NjY5ODU5ODo6Pj48OTo4OTk1OTo3NDY4ODk6Ozk4Ozs4OTg4
-Ozg5Ojo7Ozs6Ozc4PTs6OTY3NTc4ODg4OzY2ODY1ODU1MzI1NTc3Njg3ODc3ODk0
-NTg4ODc1Njo7OUBDPzw7PD4+Pj09Ozs8Pzs9Pzw7OTk2Ojg5ODc3NTczNjY0NzU0
-NDU0MTM2NjY0OTg0Nzk4NTMyNTQ2NTQ0MzM0OTY3NzY4Nzg5NjY4Nzc3ODs7O0JA
-PDw+Pz5BQD5EQD49PT0/Pjo5Oz49Oz87OTk9Ozk4Ojo6OTY2OD03NjU1Njk5OTY3
-Njk8Ojo7Ojs8Ojw9PTs7Ozk7Pj5APT9AQ0JCQD5BQT49PkA6PDs7PEI9QT1BQD0+
-QD0+Pj9BQkNCQkBEREE8P0JCPTw9QkVER0NAQEA+QkRCQkI/QD09O0BAPj08PT49
-OzxAPj9AQUJDRUA+Pz08QEFBQD9CPT4/QEBAQT9BQ0VBQT4/QkJCQ0FDP0FAPkFC
-QEFBPz49QERBPkBEQT4+QUA+P0FAPz09PDo7Ojk4Ojk4Nzg1NTU1NTc1NTY4NTY2
-NTQ0Nzc2NDI2Ozs4NzU3OTc6OTo4OTk5Ozk3Nzg5Oz48Ozo4Ojk8PTs5ODk5Ozk2
-NzY5ODc5Ozw4Ojo7Ojw6QD8/PT09OTo8PDg7OTk7OTs7PD46Ojs8OTo5ODs3Njc6
-Ozs8Qjw8PT0/Ozs3ODk5PD05Nzk3Ozg8Ozg2NzY4OD49PT07ODk7Ozg3OT45PDw7
-Ojo7Ozw9PD4+PTs8Ozs6OTU2Ojs7OTo9Pjw+QD0+Ozk6PD1APzs9OTc3Oz1APT07
-OTg6Ojs9QD9AQEA+PDw9Q0FBQUJGQ0M8Ozo+P0A/Qz5DQUFEQkZLSF2sssTP19zh
-4+Tm6OvrR0FCR0VDP0I/REJCQ0A+Pjs9PDo5OT48Pjw6Pjs6Ozk5Oj0/Pjk6OTg7
-Ojc4PTc5Ojo7Ozs3OjY6OT4+PTw5NDc4NTU4OTs5Nzk6ODg5NjY4N0ZCODw6OTc2
-Njk4NTU3Ojc2ODU0MTY+NjIyOTM2NTc3NzY1Njc2NzcyMTU1NTY2NTY3Nzc3OTc1
-NzY5ODk0NTY2NTY2NTMzNjo5Nzg2Njc0NDU1NTMzNTQ2NTY0NTQ0NDU2OTc0NTU1
-NTUzNjg3ODg2ODY5OTk2NjQ2OTk2MzIxMTA0NDMyMzY3NTk1NTQ1NTQ0NTU0MzQ0
-NTUzMTAxMjIwMjoxMTQ0NjI3NTQ1MzU0NTY1MzM1NTgxMzEyNDQ0NTQyNTU4NjIx
-MTMzNTI0MTQ4NzMyMTAyMzMzMzYzNTIzMjQ2OTYzMzg1NTg2NjU2NTM3NjU0NjY2
-NDU3NTM1Ojk4ODQzMzM1NzcyMzE1NTMzMzQ/NTIzMTExMzMyMjU0MzQyMTQzMzI0
-MjMxNjUzNTUzNTY0NDg1NDc0NTQzODk2NjU1NjQ1NDc1NjY6NjU2NTY1Mzg0NDM0
-NjkzMDEyMTE0NDUxNjo3MzU1NzU2NTc5NjY6QTs3Nzg2NDU0NDUyNDY2NjY2ODk4
-Njc3Njo4OTo5NzU2OTs6Ojc2Nzk1NjY4OTk8OTk6Ojg3NTY2ODs8OTk4OTc3OTo6
-PDw9OjY3NzY5ODc4Njc5ODk4OTs7ODs9Pj49Pjc6PTk6NzU/OTc3ODk4ODY2OTc3
-NjY1NTQyNDY3ODY1NTU0Njc3ODUzNDYzNjQzNjY4OTk7ODo9OTk7PTw7PT89PDo8
-Oz5APTo4ODs4OTY1NTcwNDY2NDQ2NzY1NTY4ODMzNDU2NzY4Nzc5ODUyNDU4Nzc1
-NDU1Nzc4ODg6ODQ2PDs7Ozo3Nzg5ODs7Ojs9PjxBQz8+PT09PDw6Ozw6Ozs6Ozo6
-PDk6ODU3OTg6OTc1NDc0Ozg2OTk8OTc6OTk7Ozo6OTs7Pjw9P0BBRkRCQ0FAQUNC
-P0E/QD9CPkJAP0M/Oz1EPD49QUFAQT47Oz88Pj07PD5AQT4/QUE9Pj9AQEJBQT9A
-Qj0+P0BCQEBBPz48PT9BPkA/PT8/QUA8OTo/QT8/QUBBQUA/QD49QEFAQEBAPT4/
-P0NBQD8/Q0JBQUJAQD9AQ0FAQEFBQkBAQkBDPkFAQEJBQEFCQT4/QEI/PT4+Pz89
-OTo7Oz05NzY2NjY1NjQzMjQ2NTU2MzU1MjMxMjQ1Njg2ODk3NTg4NjU4NzY3OTk6
-Ojo4ODc6Ojs9PDs7Ojs5ODs7Ozk5Ojc0Njg4Ozg6OTc5ODY5NzpAQDk7OTw8ODc4
-ODY5ODk4Ojo5OTo4ODs5Ozo7Ojs9Ojk2Nzg5Ozo8Ojs6Ozs3PDs6NzY4Pzw4Ojo5
-OTw7NT1APD07Ozs8PT86OTc3Nzo3Ojs5ODY5ODg6PDk8Oz49Oz05OTk5Ozw9PDs9
-QD06Pjs5ODg5PT9CPzs6OTc7PUE9PDs+QDs5Oz09Pz4/PTw7PDw8Pj4/QkBBQkBB
-QD5BPDxDQ0ZHQkJERUdIZKqvw87W29/j5efo6epCQkFAQUE+PEFCQkA8PkE+QDo8
-Pzw5Ojk9Ozw8Pjc4OTo5Nzw/PDg3NjU2Njc7PDw9Ojs6NDY3NzY3OTg7Ozk1Ojk6
-NzU4OTo2NTc2MjY5OTc2Njc3OTg6Nzc2NjM1NDQ4OTg3NzQxNjs2MzM1MzEwMzQ2
-NzU0NDU0NTU6Nzk0NDY3OjY4NTY0Njc0NjY2Njg5ODczNDU1NTM2OTc2Nzc3NjY3
-NTc2NTUyMzU0NDcyNDU0NTQ0NTY1NDY2Nzc3OTc3Njk4OjY1NzU4Nzc2MzExMjIw
-NDIxNC8yNTMxMzQ0NDU3NTQ0NDM0NDM1NjQ1NjQ0MjQ0MzEyNDQ3Mzc4ODQ0NDUz
-NDEzMTA2NjUzMzM1MjMxMzc3ODg2NDQ1MzQ3NTY4OTMxMTU1NTMzMjQ5NDIyMjMy
-MjI1NDU0NTU1NTQ0ODs3NjI0NzYyNzY1NTMxMjQ2OTY0NTYzNTQ1NDg1NzY1NjM0
-ODMxMjI0MjMyNDUyMTAxMTExMTQzNjY4NDA2NTQzNDU0MzU4NjQzMzU1NjQ2NzUx
-MjYyMjM1MTM2NjU1OTY3NDk2NTc0MzU2Nzs4NTIzMzMyNzY1NTY3NDY0MjYzNjgz
-Njg1NzQ2NjU0Njg1NTQ1NTU5OTc3ODc5Ojk3Njg3Nzk4Nzw4OTc5NTg3Ojs5OTk4
-ODw6Ojc3NjU5NjU4OTg4Ojc5Ojo4OTs8OTk6ODc4ODc6Nzo4Nzo3NDY6OTg6PDxA
-PTs6Ojg0NjQzNTo7Ozk3NTc4Njc0NDc6ODY1ODg3OTg2NzQ2NDU1NjU2NTY0Nzc1
-NjU0ODw6PDg6OTpAPDg2Nzs9PD07Ozo+PT09Pjg6Ozo4OTYyMzY1NjQ2NDQ4NDQ0
-NDIzODY4Njc3OTc2NjUxMjMzMzY2OTg5OTc4ODk4ODk6Oz85OTs5Nzc5Ojk5ODg5
-Ojo9PTw9Pz47Oz4/O0BAPz1AOzo6Ozk3OTs5Ozs5ODo5Ojo4Nzs8OTg6PDg8PDw+
-Pjw6Oj06Ozw/Pzw8QEE/Pz89P0JAQEA/QkBAPj5AQD8+Qjs/PD09Pj08PEU/QEA/
-Q0ZEQj0/QD09PkE+QEE/P0E+PkA9PT1CQDw9QUA+Q0BAQT5CPUE+PT49Pz5AQkFC
-QUNGQ0RFQD4/PT0+QUE+Oz1AQkE/PUJAPTxBRUBBQ0I/QD49QEBAQ0BCP0I9P0JA
-Pj48PDw+P0M/P0E/Pz9AQD4/Pjw8PTs7QDs8Pzo5Ozs6ODc0NTc2NzUzMzQ5ODQ0
-NDIzNzc1NTU2NTQ1NzU3Ozk3OTk3Nzg7OTk4Oz88OTs7PDo5OTs7OTo5OTk/PT07
-OTc4Ojs7PTw6ODY3OT4+Ojk4ODk8OTs8OTc8OTk4ODo5Njc4Njg4OTk6Ojk6Ozo5
-QDw9OjY6PDg5Ojk5Ojk5Nzo9PDs6OTg7PTs7Ozo7OT0/Pj1CPTk5ODc7ODY2NTk5
-OTc3Ojk8Ojg6O0A+Oz08Pzw5PD88PTs7Ozw8Ojs9PDo6Ozw/Ozw/Pzs8Pjo5PT08
-P0I/OTo6Pj4/Oz0+Pjw8O0BAPTw7PT08PkFDPkBEQkREPkVIS0FZpKvCzdbc3+Pl
-5+jq6kBCQUJBRUJFQ0VHPzs9PkJDQj09P0A2OTk5Ozk5OTg9Ozs4PDw6OTU4Njs2
-Ojo7Nzc3MzY0NjU5ODk2ODU5OTk4OTg2NDY1NTU3ODU6Njg5NTY3NjQ0NlhEOT83
-NDQ3OTc5Ozs4NDM1NDY4NDg1NDUzMTUzNDQyNzQxNjU1OTQ1Njk7ODU3NDU1MzU2
-Nzk2Nzc4Nzc3NzQ0NDU4ODc3ODc2NTo1NzM3NTM1Nzc2ODc4NTUzMzU1NTk2NjY3
-Njg1NjY4Njc4NjU1ODY1NDM0NDUxMTExMjQyMzU0MjIzMzQyMjIyNTM1NjQyMjU3
-OTU2NDIxMjAyMjU1NDQ2ODQxMzI2ODUyMDIwNjE3NzY2MTQyMjM1NDQyNzc2NTM2
-NzQzMzU4MzI1MzI2NTE2MzQ1NTMzMjMyMjIxMDQyMjM3MzY0MTU1NjQ1NTc1OTIz
-NTU4NzY4NjU0NTIxMzIxNDQ1MTE1ODczMjI1MjM2NzY1NDEyNDIzMzI2MzY2NjU3
-ODY0MzU1MzMzMzU3NjQzOjsyMjI0NDUzMjc5NzEzMTMzNTc6NTM2MzY1NDg0NTU1
-OTc1NjU2NzU2NzM0MzU0NDI0NTg3NTU0Njg1NjY3NzU2NjU2ODc4Nzs7Pjk4Ozo4
-Njg5Nzg3Nzc3ODk8ODw4OTc2Njc5Ojs8OTg3Nzg3Nzc4ODk4NzY2Nzk5Ojs3NzY1
-ODg4Ojc1Njo6ODs6NzY4ODk7Ozk8O0A7Ojk5NjY2Njc4OTo5ODo6OTc3Njg7Pzs6
-Nzc4Nzc2NDY1OTQzMzE0NDM4Nzc4NjU0NTc3Ojo5OTs8PTo5Nzg7PDo8Pzs6Ojs8
-OTtAPDw/Ozg4NjQ2NzQzNTQ0NTQ2Nzc3Mzc1ODc2Nzk8NzU3NDc1MzM2Njc3OTc0
-NjY2Njg4ODo7OTU3PDs6Njg7Ozo4Ojo/Ozo7PD48Oz8+Pzw7PUA/Ozw7Ojc5Ojw7
-PEQ/OTc2OTo5Oz08QDxAPDo7Ozs+PD07Pjw6Oz48Oz4+QEE+Pj8/QEA/PUJDQEA/
-Pj4/P0A+Pz9APT0/PDw9PD1APz89QEVIQ0BCQT89PD5BRUNEPz4+PkA9Pj47PUBB
-Q0E+PT9EQDo9QkdCQTw8PT46Qj8/QkJBREFERUJAQ0FAPT1AQEBBPz5AQkBAPz09
-PT1AQkRDREVFQUA+QENFRENAPz8/Pzw9QEE+PT0+PD5AQ0I+Pz9BQz8+PDw8Pz4+
-QT8/PkA8Ojk5ODg4Nzc1ODc4NTk0NTY3ODQ0NTQ2NjQ0NDQ0NTY2ODo3ODk2NTU6
-Ojg5Ojg4OTo7PTo9QDo8PTs5OTg4PDw5Ojs5Ojo8Ozk5Ozg6Ojw7OTk6OTs8Ozo6
-ODg5OTg5OTs6ODQ1ODg5OTg4OTc3Nzg5Ozw8Ozw5ODY5ODg6Ozo6ODk5Ojo7Ozo5
-PTw9Ozo6PD0/PUM/Pj89Ojo8Pjw6Ojo4NDY4OTo5Ozo+Pjo7PEE8PDo6ODhAPDk6
-Oj49OTc/Qjw7PD06Ojs/Ojo3PDw9PEA/QENDPzg6Ozs8PDw9Pjs7PTs+PTxBQD0/
-QkE/RT0+PkA/QkBDO0ibrsDN1dvg4+Xn6OnrPkFISU9BQ0VBQEBBQkBBQ0M8Ozo6
-PDo3Nzg3OT44Nzk3NTc6Nzc5PDw5OTU0NTY1Nzo8Nzc4OTg5Nzc3ODg7PDo4NDMz
-NDU1ODk3OTU3Njg4NjY3OjY3OTg6OTg3NjYzNTU0NzQ2OTY6Njk2Njc1Nzc3NTc0
-NDU1Nzc0NDU1NzU1NTk4Nzg4NDQ6NjQ1Nzc1NTY3NzY5Nzo2NjY4Nzk3NjY2NjU0
-MzM3ODw5PDo2OTo3MzY3MzM2NTg4Njc8Nzk2Nzc5Nzc4ODc2NDI2NzQ5OjQyMjY2
-NDMzMzIyMjMzMzYzMzQzMzM0MjI0MjMyMzEyMTM0ODU2ODU2MzkzNDczNDI0NjU1
-MzMxNjU1NTk3MDM3MzUzNDQ3NjQ2NTU3NTI0NDMyMjM1MzUzMzc1MjMzMjc2NTUz
-MjAzMzY4MzU1NTQ1NzQ0NTQzMTA1NTY1MzU1NTg3NDQyMzczMzE0MzUzMzY4NzMz
-NDQ0MjUzNDQ1MjAwMDI1ODY0MzIzNjM2MzEyNjUyMjQzNDUyNDhAPDMyMDA1NDc0
-NTU1NzQ0NTQ2NTUyNjo1MjQzNDI1NDQzNTs0Nz09NzQ0NTMyNDo3NzY3NTIzNTU1
-NjY1NjU6Ozg0NjM2Njc0NDg5OTg6ODk2ODU3Nzo7Nzc4Nzo5Ojs6Njk3ODc6Ojg8
-ODc2Njg2NzY5Ojc2PDw7OTw4Ojg4ODc5OjY4OTY3ODo4PDk6ODg7PT45OTg3Njg3
-Ozg8ODY3OTk6Ojo5OTc4ODg4Qzo5Ojc7NzY4ODc3NjQ1NjU1NjQ3NjY1Njc2Nzc3
-Nzg2Njc3ODc4OTY4Ojo8Ozs9PD4+PTs7Oz07Ozs/Pzg4NjQ0NjE0NDM1NjU3NzY3
-NDU2Nzo1MzY5ODY2NTs5NDQ2NTU2NjY3Ozk4Ojw6Oj09Pjs8Pzo5OTs4Ojs8Ojo7
-Ojg4PD08PkA9QDw6Njo8Ozw9Pjw7Ojk3Ozo3ODk9Ozo9PTs9QUJDPTs/Qzk8PDs7
-Pzw7PT08PD0/Pj1EQz08PkJBPD0/QUFAP0A9PUE+QUJDRD4/RD4+PUJAQD0+PUBD
-QD4/QD48PUNEREM8Oz49PkBAQEFAP0BCQkI/QURBQ0NAO0A9QT89PT1BP0JCQENC
-REZCQ0BDQT8/Pz08P0A/QERBQD5BQkNCQ0BAQEFBQUVCPj9AQkJCQkNBQD8+QD5B
-QT49PT08PEFEQ0Q+PkBBQEA/QEJCPT0/Pj0+PDw9Ozo8ODg3OTo5OTo6Ojg2NzY1
-NTQ0ODc3NTc2NDU1NTQ4ODk5Ozs6PDk5Oz05ODY2ODo6Ozs4ODk7ODc2ODw6Pj09
-PDk3Nzk8PDk5Ojo7PDo7Ozo4Nzo6PDo3Nzo8PTk3Njo6Ozo5ODk4ODo1Nzc7QT48
-PDs9Ojc3Ojo5OTs3NTY4OTk6Nzg4ODc6PT47Ozs4Ojs7Oz85Ojg8OTo6OD9APDs5
-ODg5Ojc6Ozo5PTo9PTw6Ozo3Oz06Nzs8Ozk7Ojk+PTs6PEE8Pj04Nzc8Pjw/Pjw+
-PkFBPTw7OTg7Ojw9Pj4+Oz08Oz8/Pj4+PD08P0JAP0FAQj82QI+swc3W3ODi5Ofp
-6+s9QUREQkU+P0A8PD8/Oj5APD08PD07Pzw6NjU2Nzg4Ojg4NjU7OTk5PDk6PDo3
-Njg5Ojg5OTk6OTc8OjQ0NDk1NDczOTc0Nzc1Nzc3NTQ2ODc4OTo7OzQ0NDY3OTc1
-NjU2NjM0NDY6NjY2NTU4Nzo3OjYzMDI2NDQ0MzU0NTc2NjY4Nzc6ODQ1NTQ4NjQ1
-NjM3OTk6Ojw9Ozo4OzY2NDU3Ojc2ODc1NDU2NzY5OTg2NDU2NDQ0NDQ2Njc5Ojk7
-OTk5OTo2NjU0NDM1NTQyNDQzMjI0MDM2MjIyLzIxMDMxMzI0NTQyMTEyNTQzMjIy
-MjIzNTg0NTI1NjY1MzUwMzEzOTczMjQ0ODY2NTMyMjc2NTM1NTQ2ODU0Ojc4NDI3
-ODIyMjQ0NDI0NDMwMC80NTIyNDY3ODMxMDM3NjY0NTUyMzUyNTQ0MzIyMjIzNDQ4
-Pjc1NjY1NjU2NDQ2NjU2MTM0MTU0NjYzNjQzNDExNTIxMTEyMzI0MTEzMzU2ODg1
-MzU0NDQ0NDExMzU5NzYzMjM3MjA0NTY0NTg5NTU1NjU0NDI2NTMwLzIzMzMzNjY2
-OTQ1OTo1ODU2NzY0NDQ3NTUzNTU2NTY4Nzk3ODQ4Njc0NTY0Njk2Njk5OTo3Njc5
-Ozk4Ozs7PDc6Ojo3ODg7OTg5PDo4OTk8Ojw4NjY1ODw6Ojc4ODo8OTk4ODg3OTc4
-ODg6OjY1ODs/PTs8Ojg6OTk9Ojk2Nzg5PDk6OT04Nzo6Ojo7ODk4ODpHOzk5Ojo7
-Nzc4NjY1NTQ1NjQzMTQ2NDY1NDU5Nzg3NTY1OTU5Ozs5OTw3Nzg6PDw+PkE9Ozs7
-PTw5PT08OT85Ojc1ODQ1NDQzNDgzNDEyMjQ0MjY3ODk0NjY2Nzg5ODc1NTU3OTc3
-OTg3Njc3OTo4OTo5PT46Njg9PTw5Ojo7Pjg6OTk6Ojs/Pjs8PTo6Oz1BQUI9ODg6
-ODs6Ozk9Pj8+PT4/P0A9PD88OTs9Ozw+PD9APj0/Pj8+PD09QD1BQT8+QUA9PEA/
-PUA+PT1APjs9PT8/PD07PDs/Qj89P0JBQ0BCQT8/SENIPz5APDs/QD9EQkdCPUBA
-Q0BAQUJBQEA+Pj0+QEE9PUBEQEE/Pj0/QEI+Pj4/QEE+PUJAPTw9PT5DQD06OkBA
-Q0JBQD5BQ0VLRUM+PT9CQUFCQ0BAQD89PTw+Pj9AQEBBQURCQ0E/PT07Pz9BPj5B
-QUBAQj8+PD08PTo6OTY1Nzc1NDQ0NDc3NjY1OTk0NTY1MjI0NTo5ODY2Nzg3ODo8
-ODY4Ojg6PDs5OTo5OTc2ODY4PDo7Ozs7Ozo9OTo7Ozg4ODo6Pj49PTw7Ojo5Oj04
-ODk3OTk7Ozw7Ojk5ODw7Nzc7OTk6Ojw5ODc3Oz05Ozo5Njc4ODo5OTg6Ozo4Ojk6
-Ojw7Ozs5Ojw9PT0+PT47OTo6PD09PTk7Ozc5Ojk5ODo8PTw6Ozk5Oz1BPjs8Oj06
-PT09Pjs6Pzw8PD1CPjs3PDs8Ozs/Pz47PT0/PDo8Qj9APjw9Pj89P0E+QEJGRUFD
-Pj4/QUE+P0RCP0FJjKrAzdba4OPl5+nq6j9DQ0JDQ0JCQ0E/OD9APjw9PT5AP0RA
-PD46Ojk5Ojs8ODs6PD08PDo3OTg5NDQ3ODc6ODg4Nzg4OT44NTY1Njo1NTY2NzU2
-Njc1OTc3ODk6OTs5Ojg3OzMxMDEzMzY2NTMzNDQ2NjQ0MjIyNTo4NDg3MjY0NDI0
-NDQ1NTc1NTU3NzY6OTY3NTo3Njg3NTYzODY2NDQzNzc3NTY2Njk1OTg5NjU3Ojo4
-MjUzNDQ0ODc0NDcyNzc5OTY3Nzc3NjU3ODY2NTM3ODY8OjM1NTMyMzIyOToyNDEx
-MjQzMzIwMjMzNTM2MjIxMjMyNDYyMjMzNTg1Njc0MjEyMjY4NTMyNDU2NzczNjc3
-MjAyMzU0Njc1NDY2NTEyNjY3NDQ0MzI1OzQ0NTU3OTQzMDE1NjZARkM2NTQ1NDIz
-MjQ8MzM3OD84NjMzNDMzMjQ3NjQ7QD84NDEzNjUyMTU0MzUxNTMuNDUzMTI0Njw3
-NzQzNjM0NDkyMzIxMjE1MTU1NTc2NTQ1NDQ0NDMyMzI1NTc1NjQyMjIyMTE1MzQ0
-NDMyMzQ2NjY0Mzc0NTU2NTM0MzI0NTg3ODMzNzg1Njc4Njc1NzYzODk1Nzg1Nzc4
-Nzo5ODU2ODo4NzY2NzY2ODc8Ozw4Ojk1Njk6Nzk6ODk3OTk2Nzg3ODc2ODk3OTs9
-Ojk3Njg1Njc2Njs5Nzc3Njg3OTc5Ojk6NzQ0Njg1NzU5OTo5Ojo5OTg4Ozs6ODk8
-ODc2Ozk3OTs4OTk5OTo8Ozo7Ojg3OTk8PTo6NjI0Nzg3MTIyMjI0NTY3NzQ0MTQy
-MzY3NjU2Ojc4OTc5Ozs7PDs8Ozs6Ojo9Ozs8PDk6Oz08OTk7NTQ1NDMzMzY0NTQ0
-NTY6ODo6Ojw2NTMzNTg2NTU0Mzc3Ojg4NjU1Nz07PDo9ODs7Ojk5OTo5Ojk8Pjs5
-Ojk6PTo8OTg4Oj07Ozk6Pj09Pzs7Ojo5PDs8Ojo7PD4/Oz5AP0E+Oj08Ojo7PT0+
-P0I/QDs8PD1APz08PT8+QEA9QD1CQT8+Qj8+QD4+Pj88PTo7PD49PD4+QUBAPz8+
-Q0hCRUFDPUA+PT8/PDw8PD1BR0M+PkBDP0JERj8+PTw8Pj9BQ0RCQEJDQT4+PT4+
-P0FCPEJBQD8+QEJDQkE9QD9APjw/QkBDQ0FCQUJERkVGQ0RDQj9CQUNDQkFCQkA8
-PkA+Pj9EQ0JAQEBAPz9APkA/QD4+PzxAPUFDP0BAP0A6ODo5Ojg5ODY5NjU1NDU1
-NjU0NDQ1MDU0MzM2Nzk2NTo7Ojc8PDk5ODY4OTs4ODg5Ojs6OTc4OTo6Ozk7Nzg6
-OTc3Nzk3PDo5Ojg5Ojw7PDw7Ozk4OTo3Nzg6Ojs8Ojs7PTw6PDs2Nzo5Nzk4Ojk3
-Njg6Oz0+PDg5Oj06PTo4OTg2ODo6Ozo6Ojo6Ojw6ODg5ODo6Ozs9OTo6Ojc3OTs8
-PTw5ODs7Pj4+PTw/Pzs6ODo8PD06PTo6Pj88Oz8+PDo6Ozw8Qj07PD09Oj0/PEA9
-Ozo7PTw7Pj8+PkA7PUBCQj86Pz0+QD9EPD5AQUFGSUlJS1KFqsLN1tzg4+Xn6Onr
-QkZAQkNCR0NAPD1BOzs8ODY2QD06PD89Pjk8QEE+PTk9OTo+Pzs6OTY3ODY0NDU1
-MjM3Njg4ODg6OTg7ODc1NjY0NzY2NDI1NTU0NTc2Nzg6Nzo8Ozg4NTMyMzM0MzQ2
-PDw1NDExMTQzNDUzNjUzNDc2Njc2NjQ1NTM4ODU2NzU0Njc2NDM1Nzc7ODc4PDg4
-Nzc4NjY1OTc1NjM1NjM0NDUzNDU1NzU3ODYzMjU6NDQ2Mzc1Njg6ODc0NTY3NzU4
-ODU0NjU6OTg4NzY4NjQxNjY0NTU3NjQ1MTQ0NTM0LzI0NTQ0MzMyMzQyMTIwNDM1
-NDM1NjMyMTIyLzM0NTMwMTI3NDM1NjU0MTAzMzU0ODc2Mjk4ODQ0NDU2NDEyMzU2
-Njc2NDM0MjI2MTM0OUY3ODM0MzEzNDM2NTYyMzY3NTc5MjU2NTMzNDUzMzY3NDIx
-MDI0NDU1MTA0MzU2ODg1NTAuMjU0NTY3NDExNjg1NzMzMzQ3NjU2NDY1NDMxNTcy
-NDQyMzM0MzMxNDYzNjM3NjE0NzU1NTQ1NDc3NzMyNTc4NTU3OTc0MzU0NTY1ODQ1
-NTU2OTg4NjM0NTc2NDU0NjY1NTg2NDQ1Njc5NjU1Nzg6Nzg3NTQ2Njk3ODs8Ozw4
-Nzc5OTk2Njc6OjY3Njg1ODY1Njk4Njk7ODc0NDc2NzY2NTc5NDU4OTg2ODY3ODg4
-OTk5Njg4Ozo6Ojo5Njc7Pj04OTo5Ojk1Njg4OTs6Nzc3ODs9Ozo9QTo6Ozk4OTw6
-OTs6Nzg5ODY4NjU0MzI0NTc1NjIzNTYyNDY1NjQ4NjY5Nzg5Ozg3PDw+Pzs6PD87
-Oz4+Pj1APzw3NjQ5MzIyMzIyNDQ4NzQ3ODo5Ojs5NzY1NTMwNTk1NDU2MjM4Nzg2
-NTg6PDo4ODg4Ozw5Ozs4OTo3Ojo6Ozo4Ojo5ODo5OTo7PDw8PDw6Oj07QkE/PT9A
-Pjo5Ojk9Pj08QEFAQUA8Pz46Oj89QkQ+QEI/Pjw9QDw8Pzw9Pj5AQj1BRT5DQ0RA
-PT5AQEJAPj4/QD0+Pz48OkFFQ0E7OztAQkRAPz49QD08PD47PkBAPj09Pj9APUE/
-Pj9CQT0+PT8+PTk5O0BAPj09PkFBQT4/QkBCPTxAPD49P0BBQ0BAQUNBPT1BQj8+
-Pzw/QEBAQ0FEQ0JBQkFEQkFERENBPT09PT8/QUA/P0BBPT49P0NCQ0RBPkE+Pj9A
-PkA+QD4+Pjs8Ojs6OTg3NzU3NTUyNDc4NDQzNTQwMjMyMjQ4ODc1NTo8PDo6Ozg6
-ODo3OTc5ODk+Qjk4NTU8Oj87PDtAOjY4Nzk4Nzo5PTw7PD06Ojo6Nzc4Ojo5Njc4
-Ojo3OTo4Njg6Ozo6Ozg5Ojo5OTk1OTg4NjQzOjs5Ojg4ODs4Nzk5OTo6PDk7Ozs7
-Ojw5PDo3OTo8ODk7OTs7PTo2Ojk6Ojo8Nzs7Ozs9Pz0+PDs7Ojk5Nzs8P0A7Ozs9
-PTw7QT49Ojo8PDs7Pz89PD5BPTk7Ojs7Pj9APj4+PkNDQ0A9P0JBPzo+P0BAP0BB
-QkFBQ0ZFPz1TZJCuwczW3ODj5ebp6upIQ0BBRUVFQT8+O0BEQUQ+Nzc+QEI5OTxC
-RD88Ozw5Ojs4Ozs5PDo6OTg3OTc0ODc0NTk2Njc3NzY3Nzg3NTU0Njg3NjU0Nzk3
-NzY2NTQzNzo5Ojk3NjU4NDQ0MjM0NDc3NjU1MzU4NjY2NzUzMzU0NTU6NTM0NDk1
-MjU0ODQyNDQ0NTU0NDQ2Ojo5ODc2ODo2Nzc4ODY1NzU1NDU1NTM0NDU0MzY0ODo4
-NjY3Njc2NTM2NjMzNjY0NTU1NjQzNjQ0NjQ2NTU5NTg3NjU2NDc2NjY1Nzg2NDIy
-OEA1MjIzNDMzNjU0NTQyMzUzMTIzNTQ0MjEzNDQzMjQzMjMxMjUyMDQyNTQzMjIz
-MzI3MjEyMzQ2NDU4NzU3MzMzNDUyMjI2MzM0NDY1NTUxMDQ4MjEyMTMzNjMzMjU1
-NTg1MzMzMTU6OTU2NDU0NTU1MjM1NDM0MzQ0NzcxMjA0Mzg3ODMzNjU0NjY1MjE2
-NDU0MTQ3PDwzMjQ1NDIzMzEwMjMyNDUyMjQ0NTQ1MzQ0Nzc3ODU0NTk3NTI0NzUz
-NTQ1NDY4ODY1NTc1MzU2NjU1Nzg4NzU3ODUzNjUzMzo1OTQyNjc4ODo3NDY2NzY6
-NTU2NjY2ODY0NTg1Njc3NTU2Nzc5PUA6OTk1NTY3NjY4ODg4Nzg3OTc2ODg6ODk7
-ODY2NDY1NjY7ODQ4OTg4NjY3Nzg2ODc3NzY1Njk8Ozk3ODg5OTo5ODg4OTU1Njc8
-PDg4ODc2Ozg6PD49PDw+Ozo6ODg4ODs5Nzk6Nzk3NjU1NTMyMjY2NTU0NDc3ODY3
-NjUzMzQ0NTM1Nzg4Ojg6Pjo6OTo3NTk7Ojs6PD07Njc6ODY2NDc3NzQyNDU2Njs2
-NTg5Nzg4NzY3ODQzMzQ0Njk4NzY2NzY5ODk4Njk6OTg8Ojo8Ozg4ODo6Nzk7Oj48
-O0A9Ojo7Oz1APj5AOjtAPDw9PT08Pj09PD09QD09Pz8/PUA9PkBAPT07QD1AQEA9
-Pj08PT09Ojs9PDtAP0A/PT09QEA/Pj0+QEBGRT5DQkJAPkA+PTs7Pz5APzw9QUA8
-PTw8QEI/QD0+QkBAQj5AOzo7QEJDQD8+QEA/PDw/PkA9Pzo+Pjw8PD8+QD1AQkJD
-PTo6Pz4+Pz4/Q0FAPjw9Ojw9PT4/Pzw/QUJCQ0RBQUJBP0FDRkNEQj4+REFBQj8/
-QkFAPz1BREFDQkJEQkFDQ0BDQT8/QERCPkA+PUFAPj07PTs3OTc2Nzk2Njc0NDQ0
-NDMzNjU0MDQ1NzY5ODk5Nzk5OTg3NTU5NzY4Ojg4OTo4Nzs5ODg4ODk6Ojo8PDg7
-Ojo9OTo6Oz09OTk5Ojg5ODk3ODw5Ojg5OTo4PDo5Njk6ODg5Ozo7OTo3Ojo6Ojk5
-OTg3OTg4Pjk5ODk2ODU5Ojk5ODk6Ojk5Ozo6Ozo7Ojk4OTc4Ozo5Njk6Nzc6Ojs8
-Ojo6PD47Ozo7PTw5Nzk+Pz07PT49Ozo6Nzo8Oz89OTo5O0BAPTo5Oz09PkA9PEA+
-QUI9Pzw8Oz0/QUNAP0JBQkBAPj47PT0+P0NBQEJER2iAnKu/y9bc3+Pm5+jq6kFA
-REFFRUE9PTxDQEVCQERCOjY9Ojo6Ojs9Nzc6Ojg5NzY4Ojw+ODc4NjU1Nzs6OTc9
-PTw6NzU2Nzg1ODk3NzY2ODQ2Nzc2ODw5Pjs4NzY4Nzc2ODg3NjY3NTM2NDUyMjU2
-Nzg3NzYyMzY2NjQyNTMzMjQ0MjQ0NDU0MjM2NTU0MDU2NzQ2NDU0Nzg4Ojg3ODc3
-ODU2NzM3NTk4ODY2NTM0NDM2NDQzNTU5Njc4NDg0NTQ4Njc2NTY3Nzc2NDY3NDc3
-NzM0MjQ4NjU3NTg5ODY1NDQ0NDEyMzg3NTY1Nzk3NTMyMTc1NDY1Nzg2NTM0ODQz
-MTAwMTQwMTU0MzU1MjMxMzQzNTIwNTg0My8vNjMzNDY0NDU0NjU1NTI1NDUzNDU1
-R0g0NDUzMzk2MzM0MjYzMzEzNjY3NDMwMjMxMzU0MzU2NTczMTI1NDM2NDQzNDU1
-NjMxMTQxNDY0NDY1MTE2NjM1MzQ0NDM1NTI0ODc4NzUyMC8zMzM0MzIyMTU2NTc4
-NjU2NDQ2MjM0NzUzNjc4NzM5OTc1Nzg4NjU2Njg3NzQ0OjY3NzU4NTg6ODg2Ojk4
-Nzo5Nzk6Pzk0NjY1Njg3ODc1Nzo3Njg3NTg2NjY2Njc4NTU4Njg4NjU3NzU2ODg5
-NjY3NTc3NzU4OTg3OTg4ODg4OTc5NzY4Ojs5ODc2NTk5ODY3NzY4Njk6Njc3ODU0
-NTU2Njk4ODY3Nzc4ODk8Njc2NTU3OUw+Ozo5OTc6Ozc3Ojs6PDs6OTk5Ojk7Ojo8
-PDo8OTs3Nzc1MzQ0NTU0MzM2NzM0NTU2Nzc3NDU4OTY2OTw4NzY5OTg4OTk4OT07
-Oz4/Ozk4Pzw8OTo4NTU0MzMzMzY2NTc4Nzo3NDIyNDQzMjQ3OTk5PDo3NTg3NTU2
-Njc4Ozk3OTk7Pj07Ozo9Ozk7Oz0/Pjs6PEA9QD09PTo7Oz09PztBQ0I9Ozw8Pzo7
-Ojw8PDo8PD08PkFBQD9CPjw7PT9APz08PT09PT4/Pzw6PT4/PDxBQUE9Ojw9PUFB
-QD48Pj8+QDw8QTw7Ojs8PD1DQEE+PDw/Pj0+Pj4+PDw+QT5AOz8/QUJAQD9CPkBE
-Pz1BPT9AQUFDPzs9QUNAQkA+QD88PD8/PD4/PkE+Pz8+Pj1APkFAPkA/QD09PT0/
-QEBBP0NBQkRAPUFCQUJBQ0ZAQUREQ0JBQEFCQT9DRUJBQ0JCQkE9PT1CQkFCQT9A
-QUJDQDw8QUE/PkI9Ojk2Njc4ODo5Njc2Nzg3NjY3Nzc4ODg7PTk2NjY4ODs5ODg2
-OTg5Ojo5Nzk5Ozw8PDs4Ozk8PDo5Ojg3Ojo5ODk6OTc5Ojs8PTw6Ozw7Ojs5Nzk3
-NTc6ODo5NzU6OTo6OTg7Ozo5Ozs7Oj08ODU2Nzg8Ojw6Ojk7ODg4ODg4Oz86Ojk+
-PTo8OTo5OTk6ODc4Ozc8OTk4Ojg3OTk5OTo7Ojg5OTo6Ojw9PEE+Ozs/Pz0+Pjk5
-PEA8Ojo6OTg7Ozg6OTk7PD1CQDw6OztAPkA/PkFCPkFEQT4/PTo5Pz89PUM/PD9B
-QEE/QENJX1eeqb7L1tvg4+Xn6OrrQ0BFREJEREJAQEJDRD4+Qjs+PD87OTk7OTo6
-PTo7Ojc3Njg5Ojc1PDo3NTc3NTg8Ojc8Ojw8Ozs2NTU3ODo4NjU3NTY4Nzc3NTY2
-OTY2ODg3NzUzODc3NjUyNDQ0MjU2NDQzNjg6MzQ0NjY6MzU0NDgzMjc1NDc1NTQ0
-NDQ0NDM0Njc4Ojc1NDg6Ozo2NTU3Njg0NTg1NTY4Nzg1NjUzMzQ2NDY3NTczMzQ4
-Nzg2NDUzNDQ3NTU1ODY4OTc0NTY3Nzg5Njk1NTc3NDM2OEU3NjUyMzQzNDQ1PzU0
-NDU1MjM0MzU0NjY2MzM0NTQyMzQzNDIzMzM1NDYzMTM0MzY1NjU0NzMzMzY4ODM1
-NzYzMzQzMzMyMDIyMzM2NDMwMjIxNDEyNDY1NDYzNDc0NDY1MzM1NjM0MjIxLzU0
-MzU1Njc0NTY1MjQyNTc1ODY1NDU1OTU1NzQ2NTMzODUyNDU3MzE1Njg0NDQyMTM4
-NDM4Ojc2NDIxLy8yLzMzMzAxNTQ1Nzg5Nzk3NTM2NTIyMjUzMzQ4Nzg2NDUzNTU0
-MjU1Oj01NTk2Ojg3NjY2NTY0Njc6ODU4OTk4OTo5OTg3NDc2NTY2NTc3NzU2Njc0
-MjUzNTc1NDY1ODdAODc1NDc4ODQ4OTc5Njg4OTc2Ojg3ODk7Ojg7Ozk3NDU1NzY3
-Nzg4Ojo4NzU4NjY1NTU3OTg3NzY2NTg4OTc4NjY3OTo6Nzc4OTo5NzY4OTk4Ojk4
-OTg3Njg4ODY3Nzk4Nzk5ODo6OTo8PkdAOzk6ODk2NjY2NDU1ODQ2NjY1NjU0MzIz
-NTU0MzQ1NTYzMjY1Njg3ODo4OT09OzY5Pzo6Ozo3Nzk6ODYzNTU2ODg1MzI1Njo2
-Ojg0NDUzNjYzNjU2NTg5OTk4NTg6Njk6Nzo6Oj86Ozk8PUBCPTs7Ozc8Ojw6OTs9
-PT49Ozs6Oz07PDw8PEBBP0A/Ozw9Ojw8PDo5PD08Pj8/PUFAQEA+Ozw+Q0ZBPzo7
-Pjo4Oj5APD0+PD4/O0BCPjw8PUE8PDw8PTxEOzw9Pzc9RkA6OjtBOkBAPTk4OTs7
-PTs9PT4+QD89PD5BQUFCQkE+PT1BQkA8QUFCPDo6Oz5BREI/QEA+O0FBQUA+P0JD
-QUNBQkNBPjs+PT0/Q0NBRD4+PT4+QUI9PUFBP0FBQUE/QEBAPT5EQkFCQUI+PT08
-PD0+PUJBQUVFREBAQ0E8Pz1AQEA+QUI9PkBAQj09PkA/QT87Ojo3NjU1NzY1Njc2
-NzU2NTk5Njs6OTg7Njg4ODg2OTk7OTw7ODc3ODo6ODc5Ojw6PD07OT06Ojs6Ojk6
-Ojo7Ojo5OTo6Ozw5OTs7OTo5Ozg2ODk7Njg5OTc4ODk6Ojk7ODg7Ozg5OTs5OTg2
-PDw7Nzc5PDg6ODg5ODk5ODo8Oj07OTk6Pjw6Ojk5Ojk5ODc2ODs6ODk6OTo3Ojs7
-ODk6ODg4Ozs9PDs8Ojg6PTw9PDk7Ojs8PDs7PDo6PDw7OT0+PTw4PDs6Ojk7PTs9
-Ozk+Qj49Pzw8Oz09Pj88OjxAPUBBPUNDQkJDPklSRH6mvMrV3ODk5ejn6ulDQT9A
-QkNERT9BOz09PDxCPTs/QEBBPUA6Ojw7Ojk6NDc5NTc5ODY3ODk5OTg3NzhDOzY5
-NzY2ODc2ODU0Nzk5NTg1NDU1NjY1NTY4NTQ1NTU3OTg0NjU2NzgyMjE3MjM1MzU5
-NDQ2MzI2NzM7NjU0MzMzMzIzODc3ODU1Nzc2NTQ0NjU2Njg3OTc2Nzc3NDQ6NDU1
-NTc3NTU4ODg2Nzc4ODk3NTY0OTc0NTQ1NzY1NDc4NTc2NTY1NTQ2NTM3Njc1Njg4
-PTY2Nzg2NDM4NDQxNjM0NDU2OTs8ODk2MjQzMjE0MjQ1MzQ0MTU1MzEyMzMxMzQ0
-MzM2NTM1MjY0NTc3MjY2MjM1NTMzMTI0MjM1MzExNT40MjM0MjIzMzQzNTM2NDY0
-LzExMjM0Nzk4Njg4NzU0NjU0NDIyMS8zNDg4NDY1NjQ2MzU0MzU0MzQyNTU6NDM2
-NTk0NjUyMzY1MzMyMS4xNjY0NDUzNjQ3OTc4NjMyMjMxMDE0LzAxNDQxNDY3NjY0
-MzY3OzY3ODg1Njc4NTg0OTk2NDc1MzQ5NjM3RjY2NzYzNjY6OTk2Njc2Nzg5Ojg4
-NzU1Nzk6Nzc3NjQ1NTQ2NzUzNTQzNDM0Mjc3NDU2MzQ2OTo4NTQ2ODc6Ojc2Nzg4
-ODs4Nzw6Ozk5ODc6OjY3Pzc2NTQ2Njs6ODg4Ozg2NTM4Njc3ODk6Ojo4OTs8Ojo7
-Ozg4Nzg3OTo3OTk6ODc4Ozo7ODY2NTU1ODg8OTo+Ozk3Nzc2ODo7OTg6Ozs9Qjo7
-Ozg2Ojg1MzY4NjQ0NjQ2NTU2MzIyNDRCNjY3Njg0NjY1MzQ1Njc4OTo9Pjs+Pzo5
-PTw4PTs4Nzc4NjY2ODk0NDU2OTg3OTY2NzQ2Ojc2Njc3NTc5Njc6OjU5ODo5Nzs6
-PDo8PTg8Pj09PjxEPDs+PDs5PENEPzs8PkBCOjs9Pjw6Ojk8QEA7PD88PTs7PDo4
-Ojw9PD0+OTk9PDs5PUA+QD5CRUM+Ojk6Ojo7PD9CQD5CQEBEQ0E/QUA8Pj07PD88
-PkRAPzw7OTk7QEBCQkI9P0M9Pzs9QDw6QEFDREJAQUE/QD08QkFAPz9CQ0FAQkA/
-PkFAPjw8OzxCQD88PT5BQkBBQUFBREFAQkFEQUE/P0NAPD8/PT8/QT49Q0JBQURC
-QEJDRkJAQEA+QT5AQ0JDQkJDQEA9QT8/PkBBQUJARERDQT4/Pz5CREFBPT0+QD88
-Ojw/Pj4/PT89Pzs8OjU1NDc2NzY3Njk3NjU3Njo6Ojs7OTc3PD06ODg5Ojg6OTs5
-OTo2Ojw7ODk5Ojw9Ozw8Ozk5OTg4NzY4Ojk4OTk4Ojk2NDo6ODo5Ojg4Ojk6ODY4
-Ojw6Ozo4Nzc7PDs6OTo9Ozo3NTg7ODk6Ozo3OTk7Ojk6NTc7Ozo5Nzg6OTg4ODY3
-OTo9OTk5ODk5ODg6ODo5ODk5OTs5NjY5Ozw7Ozw8PUA9PDo5Oj06O0I+OTk8OTg5
-OTw9PTw8OTs+Oj88QD07Ojs8Pj06Ojk8Qj5AREI+Pj08Qj9AQT8/QD09P0E+PT1J
-RERBQEZIaqK7y9Xc4OPm5+np6kJAQEJDPz9BQ0A+QT89Ozs6PD5CP0JAOzk3OD06
-ODg4Ojo5NzY2NTU2Nzk8Ojc3Nzk6NjQ0NDQ2NDQ2NzY3Ojg3Nzs6MzQ2Nzc4ODc0
-NjY3NTc5PTk4ODg6ODUzOjg0MzIzNUE1MzQzNDIyNDY2NjQ0NTE1NDQzMzg3NjUy
-NjQyNDYxNTM2NjQ1NDU3NjM2NjM2NTU2Njc3NDY1NDc3ODk3Nzo1MzQ1NjU1MDQ1
-NjU1NTY2NzY4NDQ0NzY1Njc5ODo7ODk6NzY2Njc2NzQ1MTIzMjMzMTQ2NTQyMjQ0
-NjEzNDM0NjIxNTM2NTM0MjM1NzY0NzQ2MDIyMzIyNDQyMjM0NDY3NDIxMDE0Nzcy
-MzEyNDE0ODczNDQ2MzEzOTU1MzM0MjI3MjQ2NDQzMzQzNTQ0NTQyNDM0NTQ2OTQ1
-MzI0MzM0NjQ1ODM0MzM1NDU4NjM0MjQ0MzU0NDc3NTIxNDU4ODU0NjEyNTUyNDY3
-Njg4NDQzMzMzNDU0Mzg0MjUzMTQzMzU2NTc4NTY2NDY1NTU0MzM4Njo3NTc2NjY3
-NTQ5OTg2NjQ2NzY3Njk3ODc1NTY4NzY1ODg4Nzc6OTo3Njg4OTc2MzU3NTEyMjY2
-Nzo3NTM1MzY1Njk6NzgzNTc4Ojo6Ojo5ODc3NjU5Njg5Nzc4ODY6Nzc2ODs6Ozk6
-PTk3ODg4NjU3Nzo3ODk4PTw6Ozw8Ozw6ODg4OTg2NT82NDg5Ojo7OTg4NjY2NjY6
-Ojo7PTk5Ojk8Ozk3Nzg6Ozs7QDw9PTs9OjY3NTY3NjY1MzU0NTQ2NTU0NDMzNTQ0
-NzU0NjY0NzY0NDc4OTk5PDs6Oz08Ojg3PDw4OTs4NjY4ODY2ODc0NDc3ODs5ODg3
-NzY1ODY5Ozc3ODw6OTk6Pjw6Ojk7PDo6Ojk+PTw8Pzw7Oz06PDs7Ojo5PT4/PT08
-QEI/PDs8PDw9QT9APz89PDw+QD4/Pj89PT09Oz87Pj49PTo9PD85OjxBPz06Pz47
-PTs9OzpAPkE9P0E+QD4+QEFAQDs7PT48Oj05Oz4/QEE/Pj8+QEE+QUA8Oz8/QUFA
-QURCQkJBQ0FAPkI9QT0+P0A/QD8/Qj1CQ0E9PT08Pjw9Qj89PT5AQUA9PD1CQ0BB
-PDpAQkA8PT49PDpBQUJDQkNHREBCQ0NBQkJAQkE/Pz8/PTo/QUBAQEFCQj09Pz8+
-P0NCQUFEREBAQkNAQUI+QUJCPz5BP0E9Oz8/QD8+PDs8Pz08PDo5ODc3ODY2Njo4
-NDU5NjY3Ozo7PDw5ODY2Nzk5OTg8ODk5OTg4OTc4Ojw5ODs5Ojc3Nzo4ODg4Njc3
-Ojs2Nzg5OTs6ODg4ODg6ODg3Nzc5Ojk6Ojg4Njg3ODk6PDw4Ojo5Ojs2ODs/PDk4
-ODs6PjpDOjY6ODg4ODg5Ojk5PDw6NjQ3OTs6Ozg5ODo5Nzg6Ozw/Ojw+Ojo5PTo8
-Ojs5PD07Oz08PTc5PkJCQTs+Pzo8PTw7Ozc5Ojg7Ojw9PD48PDo6QD06OT09PDk7
-Oj1AQj48PEJFQjw+QD4/PkJBQENAPEFEREJAQkNlobvK1dvg4+Xn6erqREJERUJA
-PT9BPTs7Qjo5OzxAPUE8PDw6Oz47Ojo2O0I7OTY3NzY6Ojg2Ozg6ODY3ODg5Nzk6
-OTk5NTU6NDc7OjY0NTk2NTU3Njc4Nzc1Nzk5NTQzNjc7OTk4OTQzNTU1Nzc2Ozo6
-NDc2NTk3NzQzNjI2NjMyNDM0MzU0MzQzNjk1NDQzNDY1NTc0MzI0NDU1NTg2ODU2
-Njg3MzI1NDQ2Nzk2NjM0MzIzODI3NjU3NzczNDQ1NjY1NDU1MzU3ODk1NTc6NzU3
-NDQ2NTY1MzQ1MTE1NzIyNjQyMzQyMzIzNDc0NDIxMjIxMzMzNTQxMzI2Nzc4NjI1
-MjExMzc1MzMzMzIxNjY1Nzg1MDIyNDY1NDIyNzI1NzMxMzM1MzQ0NjU0MzIwMzM1
-ODQ5NDc0NTc3NzQxLzAxMzQ0ODw3NzQzMzMzNjIzMzc2NTI1NDU0NDQ1NzExMDAz
-Mzg1NjczMjQ2MjM3NDMzNjQ1NTIzMzU0NTUzODQ0NzMvMTM0MTEyMzQzNjY3NTUz
-NTU1NjY4NDMzNzY3OTY2Nzc2NzU0NzU3Nzg6OTU2NzY1ODk5ODY5ODk2NzU0NDU5
-OjY2NTg7OTk2Njg4ODQ2NzU2NTM2NjY4Njg2NTU4NTM2OTc5Nzo4NjU4Nzk7OTk2
-NTc6ODc5ODg3ODY2Njc1NzQ0Ojk2Ojk4NzY1Njc3NTQyNDY3ODc5OTo4OTk5ODk9
-OzU4Nzg4OTk7Ojw5Njc6OTg3Njc5Njc5Nzk6OTk5Nzk6PDg4ODY8Ojo7Pj06OTo4
-OTg5OTU0NzY2NzQ3NjU1NjY2NTQ0MjM3NzUzMjM0Njg3NTU7Ojs4Ojo5Nzk4ODY4
-Ojo7OTc5OTo6NjY1NDQ5Ozs3Nzk5Ojg5Ozs4NTc5Oj45OT08Ozs5Ojo5Oj08Pjo5
-Ozk+PDo7PDo4Ozo5PDw7PDs4Ozs5OT06PDo8P0A9Oz0/PD0/PTw6Pjs6Ozw/QUE8
-PD48PEBAPz09Ozw8Oz89Pz08PEBAPjk8PDw8PT5DRD4/PDs9PUBDPz89ODs9PT45
-OTk9PzxAQUE/PT9CQD47Oz0/P0A9Pz9AQj8/Pj89QD1AQDs/Pz49PD89PT49QD5B
-QT48PD1AQT48PD09Pz4/Pj07PDw6Pjs9QTs8Ozw9Ozw6OD4/QD8/QEBBQEFDQ0NB
-SUNBQUFCQEBBQT0+QEM+Pj5BPj0+P0FCQ0REQUFBQkFBQT5AQz8/QEA9Pj9DPT9A
-PTxBQUE+Pj5AQD08Ojc4ODc3OTo3Ojc2NzU3NDU1OTw5OTs3Nzo4OTs6Nzo2Ojs4
-ODk4Ojw5Nzk4ODw5OTg5OTk3Nzk6OTo4OTo4NTQ2OTg7OjY5Ozs5Ozo3NjY4ODc5
-Ozg6ODk4Ojg6Oj03Nzo4ODk3Ojs+PDk6Ojk6Ozo9ODc5Njc5PDw8Ozs5ODo7Ozo7
-Ojg5Pjo5PDw9OTo+Oz08Ojs6Oj08OjU6Ozs5OTo7Oj8+QDw8Qjw7PD1BQUA/PDw5
-OTxBOzo/PTs+PD87PD88QEM9QD49QT4/PT4+Pz5AOz4+Pz5APUNNQ0RBQUNCREZC
-Oz88TWWnvMvV3ODj5efn6upBRkM+QUFCQj5DPTg6ODs5OD09ODk4Ojw5PTs6PTo9
-YT04NDc4ODo3Ozk3NzY5OTo4NjY1ODo8Ojg6N0A3Nzc6ODY3Nzo3NjY3NDc2OTk2
-Nzg3OTc0ODg2Nzc2NTgzNDc3NTU3Nzk6NTY2Mzg4OjY4NjQzNDM0MjMyMjM7Mzc2
-NDY2NzY1MzU3ODg4NDc1NTY5ODU1NjU1ODUyOzczNjY2NjQ0MjQyMDo8Nzc3NTc4
-NDYyNTQ2ODQ2NTQzNDY3Njc5ODc2OTY1NDQ2NzY3MTIyMjEzMzI0NDI0NDUzMTAx
-NTMzNTAzMzUyMzQ2ODU3NzU1NDU2Pzk1MzQ3Nzg2MTQxNjQ0MDU2MjQ2NTMzMzQz
-Mjg1MzEzNTUzMjI1MjI4NjY2NDQ0NTQzMzY0NDUyMzQ0MzAwMTI3NjU5PkA8Nzg0
-NTMvMjI1NTQyMzU0NDMzMTU1NDI0NTQ3ODg0Mjc3NTU2NzY3MzI0NjE3NTQzMjM0
-MjQyMzQzNTc0MDE3NjQyNTYzNjc0LzUyNjM0NDY2OTk2Njk5NTY0NTQ1NjY1NjY3
-NzY1NTg1NzY1NDc5ODU3Njk2NjY5NzY6ODc1ODw6ODQ1NDQ2Nzg4NTc3NzU2NTYz
-NDQ4ODg5Ojc5NTk5NDc1Nzk2Nzk6Ojo3NTk2NTM1Njg3OjYzNTU3Nzk3Ojk2OTg5
-NjU3ODk3ODY2ODk7Nzc7Ozo3ODc6OTs5ODk6Ojo6OT44Ojk4ODc7Ojs5Nzk7Ojo4
-Nzg5Ozk4Nzg7Ozo6Ozk6Ojk5Ozw5ODc6OTg3ODk4NTU4Nzg5ODg5NjY1NTM1Njc0
-MjQ0MTMyNDY2Njk1Nzo3Ojk3Nzg3Ojw8PDk5Nzk4NzY2NTU2ODg4OTw6OTg6Ojk5
-Ojo7PDo7Ozw6PDk6Ojo6Ozs6Oz09PT8/PTw7PDw7ODg4PEA9OD06ODo6Ozo7PD08
-PT0+PDk7Ojg4PEA7Ozo6Ozs7PD49PT4+PT0+Pj1BPzw8PDw/PT08Pjs+Pjs/PTo6
-Ozs8PDs+Pz89PDs7PD5APj5APTo8PT47Ojo7Pz86Ozo7PUBBPTs6Oz9DQT88PTs+
-Oj09OTw8PDs6Oz07Ozw/PkFAPEBAQ0NAPkA7PT46Oj0/Qj88Ozs6OUA/P0A9PEBC
-Qj09PTw9PT0+Pjw8Ozo+QD49PEE/QD9AQEJDREZERUE/QkE+PkBDQEA/Pj09QURE
-QUJAQEBAQT8/Pz9AQT4/QEBBPkFEPz5BQ0FAQEE/PkBBQUE/Pzo6ODY3NDQ2Nzg3
-ODg3OTs5OTs7ODk4Ozo5Ojo6Oj04Ojo7Ojk7OzY3OTk2ODg6OTk3Njo2NTc7OTg5
-Ojc4OTg5OTo6Ojs7Nzs6OTg7OTk4OTg4Njc7Oz08ODg6OTc5Nzg4Nzg6OTo6OTo7
-OTk6PT48ODg3ODY5PDs6OTk5ODw8Ojk6Ozo8PTo3Njc5PTw6ODk7OTs7OTg7OTY5
-Ojw7OzpAPj07Ozo7PTo8Pjw+PTs7Ozc8PEc+Pjw7Ozo7Pjg5PkBAQUNAQD9BPj9A
-QT1APUA/Pj1BPkFBPUBBQT06QURGQD9FQ0NEXaO9zNXc3+Pl5+jq6kE+Pj88PTxC
-QERAOz87Nzk7OTo4PTw4ODY5PDk4Oj1HOzg4Nzs6OTg8NzczMjQ4NTs3NTU3NzY2
-OTc4OjY4NDs6NTMzMzY0Nzg4Ojs5Ojg4Ojc4ODo5Ojc3NzU0MjQ2NDc0Mzg8NTg4
-NDQ0MzU2NjQ1NzU3NTQ4NjQ2MjU3Mzc1Njg9NTA2Njk4NzY5Njo1Pzc6NjQ2MzYz
-NDI1NTQ2NTYzMzI0NjQ0NTQ0NDQ5NjU3NDU1NzY3NjQxNDs4NzQ3ODg2NzQyMzMz
-NTg2NzQyMjQyMzM0NDUzMzMyMDMzMy8xMTUzNDY0NzIyMzMwMjUzMTM1NjQ3NDM2
-NjUzNDQ1NTY1MzQ1NDU2MzY5ODUzMzM0MjQzNjI1NjMyNTg2NDY4NDU2NDQ0NDYx
-MzU1NDYzMzU2MjEwMzEzNTk3Qj07MTE0NDMyMTM0MjM0NjY1NjIzNDU1ODYzMzY1
-NDo2NTExMzM1MzIzMzM1MzM1NTQ0NTAwMTMyNzYyNzU2NDU3NDQ3ODg3ODQ1NDgy
-MjY3OTg2ODk4NTQ0MjMzNjY4NzY4PDg1MzIyNDIzMzQ1NjU1Njc4NjY5OTY4ODo7
-Ozk2Njk6Nzc2NTY2ODg4Nzc1NTY0NDU1ODc2NTQ3NzY6NzQ0NTc5ODo3ODs6Ojo5
-NjY3NzU3ODc5Ojk2Nzg6OTc3ODk3Nzc4NzY2OTY0Njc4Ozo6QDo5Ozk3NDY1NjY5
-OTo5Ojk4ODc3ODg5ODk4Nzc5ODs8OTo5NTg6Ojk5OTs6OTc4ODg4OT05ODc7PT08
-Ojg4ODY0Njo5Nzg4NzY0Njc2OTIyMzY2MjQ6ODU2MzM1NjU1NDg2ODs8Ozg6PTs9
-Ozs5ODU3OTg2ODg4ODg4Ojs4PD09Ozo6PT46Ozk6OTk7PTw7OTw8PTo7Oz88PDxA
-Pj47PD05Ozo6PD08Pjw7Ojk8Ojo6OTw+PTs7OT0/Pjo6OTw5PDs9Qz07Pj5AQD9C
-Pz09PTs+Ozs/PjtBQDs6OTg6Ozs9Pz08Ozo5Ozw9Oz49PTo8O0A/P0BAPjw+P0A/
-PT47Ozw7PD0+Pz06Oz48PkA+QEBAOzo9PUBBPz4/PkE9PDxAOzs7OkA8Ojw+QD4+
-Oz8+Ozg5Ozw9PDw7Ojs6PEE9QT5BPz09Pz09QUA8QD46Pjw9Qzw7PTxBQTw8PUBA
-QEFBP0JDQ0FCQEBEREI+Pj89QEBCPz9AQUA8QEFBPT9ERUJAQ0I/QDw9QUFBQT0+
-PT08Pj8+Pj09PT09Ozs6NjQ3OTM0Nzg2NTk7QDs4Ojk4OTc7OTk6PDs8Ozo3ODw7
-Nzg5OTo3NjU5ODg3NzU0NDk4PDg2Njo4Njg3Njg5Ozo7Ojk5ODg4ODk6ODs5Njc4
-OTs6Ozo9Ozk7OTg3Nzk6Nzg7ODg3OTw7ODk+Ozk5ODo7Ojg7Ojk6PDo5Nzo7PD45
-Ojo4PDw4ODg4Ozo5OTk6Ozo9OTY4Nzg5PD07PUI8Ojo7Pzk7PjxAOzk5Pz47Oj0/
-Oj89Ojo6Ozw6Ozs7QT1BQD0/Pz8/Oz5BPkE/QD5AQUFAPj1AP0VFP0JBQUNCQDxB
-QkBapr/M1trf4uXo6OrqQUNCPjo7PUBCQT0+Qj88Ojc7ODw8Ozc3OTg5Ozg6OTk3
-Ozk1MzY6Njk5ODM1Njg1NDM2Nzk3Nzk4Ojo6NzY1Mzc1Njc3NTQ5PTo4Ojg6NjY3
-Njg6OTg4NjQ1Njc3NjQ2NTQzNTM1MjY9NTczNzg2NDM3NjQ1NTUzNzY3NjQ0NjQ0
-Njc1OTY6Njc5NjY1NjY0Nzs2NTc0MjczNTgzMjAzNzo6MzU0NTMvMzQ0NDM0NjU0
-MzA3NzU5NzY2ODg4OTc1NDY1MzQ0NDU3NjY1NTYzMzUzNzY1MjU1NTI0OTU0MzMx
-MjM2Nzc1MzIyNDAzNDMzMTU0MTY1NTY2NTU0NDYyMjU0ODQ1NDQ2NDU0MzUzMzMx
-MzYzNjY1MzQ3NjM2NTQzMjQ0NTM1NDo0NDMxMTMzMTg0NDMxMDIxNTdBMzIyMjA0
-NDQyNTQzMzE+MzU2NzM0MTQzMzQ0MzU1NDQzMTAxMzQ0NDU2NTM0Nzg5NTMxMjIy
-Mzc4NTQ0MTAwMjY3NjQ2NTU3NDIwODczNTY2NjU5Njc1NzQ2Nzc6NjY2OTQzNTQz
-NDc0NTU1MzU0NDc5OTg4ODQ2Njk5Njk1Ozc2OTc4NTc5NTU4ODg3NTU3Nzc1NjY3
-OTk3NjU6ODc3NzU6NzU1Nzc6Ozg4Nzs4ODk3ODY1OTo7OTg2NzY1ODk4Nzg4Njg3
-NjY5Nzk2NTU4Ojc5Ojg2NTU3ODU4ODk3OTk6Ozo2Njg5Ojc2Njg6Ojg4Nzo9Ojk5
-ODk5Ojo4Ojk3ODc4Ojs5ODg6PT4+Ozo6ODc3NTc2ODc4Njc3Njg3MzMyMzU0NDQ1
-NTU3NzM1Njc0Njg5ODg6PTk7PTs5PDs5OTg4ODg5Pzs5OT07PT07PTo6Oz07PD8+
-PTs9Pjw+PDo7Ojw+Ozs6PTo5OTs+PD48PD47O0A9NzY4OTw8PDo6Njk5ODg5ODg4
-PD07Oj08Ojo5Ozo7PT5BQT8/Pj0/QEFAQD5CQj1APz06PT5BPDo9QDs6PDw7Ozw4
-OTs8Ozs7Pzw+PD9AQUNAQT89Pjw9Qzw8OTc6Ojs6OjxAQEJAPTxAPT9APjs6Oz5E
-QEJCPjw6Pz4+PT07Qj07Ozs9OTk9Pj06OTk5OTk8PDo6OTs7Ozs9QEA9Pj88PkA+
-PDw7QEA9Pz09QUJCQT9APkJAPTxAQD1BQkFDRENERUBAQEJAQj49QEJBQUFCPT1C
-Pj9BPz9BQ0RCQUFBQD9BQUJAQUFAPj49QEBCQD8+QD4+PDg8OTg4Nzc4Njc2NTI5
-OTk5Njc5OTo6Ojg4Njc2OTs6OTg6NzU2ODs7Nzk4OTc0NjY3OjU1Njg5ODY1NDY3
-OTc4Ojk3ODc3Nzk5OTo5ODo6ODQ5ODs7ODs8OTk5NzQ3OTo4Nzk4OTg8OTg3Ojo6
-ODk6NjY5Ozo7PTk6Njc5OTs5Nzw6ODc5ODg4Ojo5Ozk7PTs6ODk4OTo5PDw5OTo5
-Ojs8PDw8Pzo7Ojs6QUJAPT09PTw6PTw9Ozs9QDw4Oz8+PT09PT89Pj06PDw6Ozw8
-PD0+Pz85PT49OTo9O0VDQjw/QT5DQkRDQ2OowczU29/j5efo6epCQkdCOzw9QkI9
-RUREQD09Pj45OTw+Ojk9PTw6ODg4Nzk9Ozo6NzU4OTg4NzU5PTUzNj07NTM3ODg3
-ODk5Nzc1Njg2OTc3NjU3Nzk4Njk3Nzg7NzQ3Nzk8Ozc5ODc3NzQzMjQ3NjQyMjQ2
-NjU5Mzc1NUI4NDU4MzU6NTY1NDQ0ODc0NTc4Njc1NDQ4NDY0NTY3NjU4ODUxMDQz
-NDU2NDQxOzQ0NzU0NDMxNDQ1NDU1MjE0NzU1NzQ0NDM1NTg4NjU3Njk3ODU2NDQ0
-MzM1NTY0NTQzNDM2NDQzNDgzMzQ0NDMxNDY3Nzc0NTQyNjIvNTUyMzIvNDg0MzUy
-NDY1MTQ1NjAuMzM2NDI1NDI0MzM3NDQ0MjQzMzQzNjQ2OTg3NjU2MjQ0NDQ2OTcy
-MjU1MDI0MzU4NDQ0MjI0MTEvNjEzMzM2NjMyMzQwMzMzNDM0NTQzMjQzNDY0NzY6
-NDIzLzExMzIzNjQ0NTM1MjU1MjM1MzI0NjU5ODQ1Njc2NTQ1NTI1NjQzNjc1ODc2
-NzU0NTU3NTU1ODc3NjQ0NTQ2Nzg7ODw7NzY1OjQ0NjY3Njk6OTg2ODc6Nzo5Nzg4
-NjU0Ojk4Njc2NzU1Nzg4Njk5NTY0NjY1ODg4ODc4OTY5Ozs5NjY1Ozg4OjY1ODk8
-OTk1NDc4Njk2NDg2NjY1ODo3Njg8OTc3NTc1NTU4Ojg4OTg3NjY3NzU3NTU2Nzg4
-ODg7PDk5PD05NjY6Nzo5OTs6Ojs8Ozs7OTw5Ozs8Ojo5OTs7Ojs8Ozk9PDo7Nzg4
-ODo5PDs5Njc3ODQ0NjY0MjQzMzQyNDY1NzY1NTg2NTY2Njs6Ojs8Ojo8OTo+Pj07
-PTw8Ozg6Ozw+PT08Ozw7Ozk6OTs7PTo4Ozs8PTo6OTs9Pz0+PDw/OjY7QDs7Ozs7
-Oj88PDk4Ojk6PDo5ODk9OTo+PTo7Ojo6Ojs8PD89QUA9PD08Oz07QT89Ozs/PkM/
-P0M+Pzo9PDs+PD07Ojo7PDw+PD1APjw8Pzo7O0A+QEE+QERAP0A/Pz4+P0NPRD0+
-Ozk4O0BBQD1DPTw/QkBBQEBBQEY/PTs8QERAPDk5Ozo8PD49QEA9Pj09PTo8Ojo2
-ODs7Qj48Ozo+Ozs9PkA9QkJAPj87PD5DQUBCQDw9PT0+QDw/QkBCPD4+QT8/QD5B
-Pj5BPj9AQT8+Pz4/QD4/Q0JEQ0JAQEE+QUA9PT9CQkFAQDtBQkFDQUJFQUBBQEE9
-PUBAPT0+PD4+PD05NjY5ODY3NzQ2NjU2NTU3Nzk3OTo6Ojo3NDY3Njc4NzU2ODg5
-OTg7QTg6NzY3NzY2ODg6ODU3NjU3NTk4Nzc4OTc2Nz47ODc3NzY2Njo4Ojk4ODo5
-ODs5ODg5Ojc5NzY3OTc5ODc5Ozo7Ozg4Nzc5Ojs5NztEOTg7Ojk5OTk6PD09Ojo5
-OTg8Ozg3PDk4ODk5Njc5ODc5QDs7Ozk9Oz05PDs9Nzc6Ojw8PD4+PDs7Oj09PDw4
-PT5APT4/PTw+PTw8PD48PDs6Oz08Pz5APT47O0BCPz4/PDtEQkFCRUJER0ZIRT5D
-XKLBzNTb3+Ll5+fp6kJEPz8/Q0JBOz48Qz4+QkJCOT09ODs7Oj08Oj47PTo3OTs5
-OT04NTY2Ozk5Ojc4OD46Nzg0NDY3NjY1ODo4NTQ3NTc3Njo3NTY2Ojk1Njg4ODs3
-NTk3OTs6OTg5Nzc5NTY0NDU2MzQ0NTY2NTQ3NTY0NDM2NDM3NjY0MTMzMDA2NjQz
-NDM1NDM2NzQ2NDQ0NTY2ODU5ODUzMzQ0NjU3MjQxMjI7NTY0NTM0NDU0Nzc2ODo4
-NzM1NjU5NzY3NTc1Njc5NTY0NDM3NjY2MjM0MzQyMzIyNDMyNDM0NjU1NDY2NTU4
-NDY0NTUyNDMzNDMzMzIxMzU2NDQ2MzQ2NjY3MzEzNDQyLzM0MzQzMTMzNDM1MzMx
-Mzg3NTMxMjc2NDc2NTY0MTYzNjg0NjMyMzU0MzU1NTU3NzIwMTI3MzM1MzI0MjEy
-NDU0Njo1NTYyMDI1NTQzLzIzOTk1MjMzNDIzNTAyODM0Njk2ODQyNjczMzU2NzM1
-NTQ1OTU0OjczNDM1Ojk2MzE2ODQ0Nzc1NTY4NTQzODY3ODY1Nzc0NTg3NTc1U1g4
-ODU3OTc4NTc4NjY4Nzg5OTk7OTc5OTc1Njg4Nzo3ODc4NjY2Nzc4Nz86Nzc0NjU5
-OTc0Nzc3OTk3ODY3NDU1ODg6Ozk4Ojk3NzY3ODg3Njk6NDQzNDc2NjY3NzU3Nzg0
-NDU2Nzg2Njk4Nzg4Ojc3NjU2NDQ1Nzc3NTg4Nzc2ODg5Ojc3Nzs6ODg7PD07OTs5
-Ojo5Ojk6ODc5OTg4OTo5Ojg4ODg5NzU4ODk7Ojo4NzU0Njc3NDIzNTY1Njc3ODU0
-NTc4ODM2Njo5OTg5PDg6PDk5ODc6Ojw7Pjs7ODk6QT06Ozk6Ozo7Ojo6ODs3Njc4
-Ojo6Ozo6PDs6Pzw7PDg5Pj08QD07OTo9Qzo4Ozs6Ozo7Ojk8PTw8PDw5PT09PDw7
-PT09Ozs8OTw+Pjo8QD4+Qzk6PDw/QkI+PDk5Ozw5Ozs8Ojo2OTw/QDs7Oz0+PTw9
-PT0/QkE+QEFCQT8/Pj09Ojw8PEE+Ojs+PEA/QkE+QD9DPz49OTo7Oz1APj87PkJA
-Ozo8PDs8PTw9Pz0/Ozw8Oj49Ozk9Oz06PTw+QDw8Ozs5Ozs9PkA9PTw9Pj88PTxA
-PkA8PkFBP0A9PDo9Pj5APD49PD07PTw+QD89PkA+PDs8PDw9Pj49Pj9CQUQ/QEBC
-Qj9BQD0/PTw9Pz1AQEFCQkNCQUA/Pj5APj5APT4+PkE+OTk5Ojo3ODY4ODg1NTY4
-OTg5NTQ3ODc4Ozc2ODg3ODg4Njs5NjY4ODo6Ojo5Njs4Nzk6Njk6PDs4NTk8OTk4
-ODc4Ojg6OTc2ODk4OTc1ODo6OTY4Ojg3ODg4Ojw6NzY8Ozk2Njg3Njg5PDw7OTk6
-PTo4Njg4NjY4ODc2Nzo6OTs9QTk4OTg2ODk8PTw5OTg6ODs5OTs8PDs5PTs5Nzs6
-PDs8PUA/PT49PTw3OT07Pz09OTk8Pj08Ozs7PDs6QEA9Pj5APkA9PT09ODo7Qj8/
-Ozw9QEBAQ0RBQEFAQUJEQ0JFSElEQD9WpMLM1drf4+Tm6OnpQkE+Qj9CQUA9Pz0/
-QD4/QUQ/Pz47PDxAOjo4PDo8Ojo5Ozc2Ojw7PDk3PDo5OTk4ODo7NjU0NjQ2Njk4
-NjY3OTQ2NDk9Ojg6NzQ5Ojc3NDY3ODc4Njc6Nzo7Ojk4Njc0MjMyMzQzMzEyMjU3
-OTs5NjIyNjMyNDE4Ojg4NTY4NTEyODg1NjY1NjQ1Nzc1MjQ0NjIwNDU1NTQzMzY2
-NTQ0MS8xND01NjY1ODU3MzI3NTc3Nzk3OTQxMzQ0NzY0NjU1NjY4ODo4Ojg3NzY2
-MzM0MjMyNDI0Ozg0MjY2ODg2NDU2NDM0MzUyLzE0NzEzMTY2NTQ0NTM1NDU1NDUx
-MDMzMDI1ODc2MzQ0NjQ0NTUzMjM2NjMzNTY0NTU1NDU2NDY2NDU0MTY0NjQ0NjM2
-MzY1NDY0NDUzNDUzNDc0NTUzNTU2MTE0NTQ0NTY3NDQzNTU1NjQ0NjY1NDQ2NDMy
-MzQzNTQ1MzM0MzY0MzM0NDM1NDI0NDM4NDQ1NDQzNDIwMzU1NTRDQTQzNTQyNTQ2
-NjQ2Njo5Nzg4Nzc1NjQ3Nzc2NjU4ODU3NTU3OTc3ODU1NDU3Pjo5OTU0MjU1NjY3
-NTY3NDY2NDE4ODY3Nzk3NUA+PDk2NjU3NTU3Njc1NjY6ODo7Ojg3NDc4OTk2Nzo6
-OTk3ODc3Ozc7NzU2NTc3NTc2NTg4NjUzNjxGODY2Njc4Nzc3ODo4ODk7Ojk5ODg5
-OTc2Njg3Ojo4ODk5Oj1BOjo5PDo8ODk3Nzk6PDs5ODg9PTc3ODo8OTg5Ojs5Ojk4
-OTw6PDo6NjU3ODc5ODc3NTc1NjY3NTk5Ojg0Njc5OTk4ODo7Oz0+Ozo7OTo8Ozs8
-OjY3NztAPz03ODo5PDw9OjY8Ozg4OTo6Nzc4OTc5ODw9Ojs9PD4+OTo8Ojs9PDo6
-ODk7PD07Ojg6PTw4PDxBQT1APD49PT0/PDk6PDo9Pzs5Ozo7Ozs7PDk7Ojw8PDw9
-PD4+QDw8PTo6OkE6Ozk5PDxBPT04OzxBQD1CRj5BPj4/PEI+Ozw9Ojo6Ozo7OzpB
-Tz4+PDw7Pz5APTo8PT07PT1BPjs6PDw9PDo8PDs6OT08Ozs8Oz0+Ojk8PTw6OTs9
-PTs5OTs8PTw3OkE/PTw8PTo8PDw9PDs9QD8/Pj09P0BCPjw+QEQ9PT49PkA7Ozo6
-PD0/Pj4/Oz89PT0/QkNBQT4/QEE+QT4/QkFBQEA/PT49PUBAQD8/QkJCQT4+PD0+
-Pj0+PUA/PkA9PTw7Ojs6ODo4ODc3Nzg6OTc4ODc3ODo4OTo7NzY5ODU3Nzg5Ozg8
-Ozg6NDk6Njk3Nzo5NTg5Ojg+Ojk4OTk4ODc4OjY1Mzc4PDo2ODk2NTg2ODg2NTc5
-NTk5Ozw6Ojg4Nzg5Ozk3ODo4Nzk8ODk6ODo8Ojc4NTc8OjU0OTw7Ojk4PDk3Ojw3
-OTk4ODs5OTY5OTo4OTk7ODg4OTo4Njg3PkE/PEQ/PDw8Pjo7PT85Ozw5Ojo9PD49
-Pj8+Pj46PTw+QD08Ozs+QURCQ0FCQD0/PT89Oj49PEBCPDg+QURBQUpERkQ9RFan
-wc3U2t/i5Ofo6elBQkNFQjw7Ojs9P0E9Ojo6Pjk6PT06Ojs7Ozo4ODk8PD05PTk5
-OTo9Ojg7Ozg5NTk4Nzk2Nzo2Njc4Nzc4NjU2NTY2Ozk2NjY3NjU3Ojg4NDc4NzY2
-ODc3ODc8OzY3NTMyMzMzMjEyMTA0MzQ0NDE0NDY1NTY3Nzc0Nzo4NzY2NjY2NTU2
-NjQ0NDQ0NjM1MzQ0MjMzNDMzNzYzODY3OjQzMTM2NzY1NzU4ODYzNTQ0NDQ1NjU1
-NzY3ODQyMzU3NjQzNjk5Nzc1NDY2Njc4OzY4NTM0NDI0NTU1NDU6OTU3NzQ1NjMy
-NjMyMDI4NTM5NDI0NTY1NTMyMjQ1MzUyMzQ3NDQ0NDk3NTU0MzUyNDQ1MjIyMjM0
-NDQzNTIzNTczMzMzNjQ2MjQzMzM2MTQ0NTQ1MzMzNzQ1NTM4NjMzNTU2NzQxMjMz
-MzQ0NDU0NDg1NTc2NDozMzMzPjgzMzI0NTQ3MjQ5NTEzNjg9NjIwMjM0NDY2NzY2
-MzM2NjQ0MzMzLy80M0Q6NDg6ODUzNDM2NTU2NzM0MzQ2Nzc3Njc2NTUzMzU1NTc2
-NTg4ODk2NjQ1NjM3NzU2Njc3NzQ0NTc0NjI3NTY2NjQ3ODg5QTs7ODg2NjYzNTc3
-PDc2NTQ2OTk5OTs5ODc2ODo7Nzg6ODo4OTk4NTc2OTo2NjU5OTY1NTY6NzY4NjY3
-QUw7Njc1Nzg5Ojc3Nzc7Ojg6Ozo2ODw5OTc3NzY3OTc4Ozw8OT06ODo3OzU1NTU4
-OTs8OTk4Nzo6Ojo5Njk5Nzg5ODs4Ozk3ODs6OTg2ODo5ODg7OTg3Njc4NjY2ODs+
-PDo6Ozo8Ozo5PTo5OTo7Ojo5OTo5Ozw5PTk2Nzg1Nzg8ODo5Ojs8Ojw/PDw9PDo5
-Ojg1ODg7OTg5Ozs9Ojw7Ozo6Ozo5OTk5ODg8PTw+Ozs/Pjw8Ozo6O0E+PDw9QT85
-PDo7Ozk4ODtAPz0/Pzo8OTk8PTk6Ozs7Pj8/PTs7Ozw8OTk4Ozw+PD8+Ozs8PUE+
-PkA+REA9PDw8PTs7Oz1DPTk7Oz85O0dLPzs7Qzw/QEBAPT0+PTo6Pj05OTo6OTs5
-Oz0/PTs7Ozg4ODg5OD09PDw9Ojk5PD46Oz45Ojw8PTs+Ojs9Pj8+Pjo8Ozs7Oz0/
-Pjs8Pz1BQD5DQ0E9PEA+PDo+PTk8QTs8Pj08PD5DRD06PkJAPz5AQUBAPUFDQj9A
-QD8/PTw9QURIRENBQ0JBQUFDQj4+Pjw9Ozs9Pj08PEA+OTk6Ozk5Ojo3OD05ODg4
-ODk6NzY4NjY3OTo5Ojc5OTk5PDg3Nzc3ODU3NTY4ODk9Pjs3Njg3OTw6Nzg5ODk4
-Njc1NzY4Njg5NjY4Nzc6ODo7Ojg5Njc4Nzg3Ozk3NjY7PTc4Ozo5NzU4ODg5ODc3
-Ojk6ODg4Ojo5PTc2Ojo6ODg2ODg6ODk6OTk2ODk7Ozw6OTk6OTc4ODg3OTg3OTo5
-Ozs5OTs9QD07OTw9Ozk7PT4+PT1AQT49QD09Oz48Pj47Oj09PTo7O0FBP0E+Pjo8
-PT87PUE9PUZBPj5CPzxESUA/REREVabBzdTb3+Ll5ujq6kFBP0I9PTtAPUBCPz8+
-OTo6Ozs6PT85ODw8ODU4ODk5PTw9OTo6OTg5Nzk3OEE5ODc4Nzk6OTY2NzU2NzY1
-NTM2OjU2ODc3NTk1NTY3OTY3Njc3NjY2NjU3ODs3NTU3MzQyMjU0NDExMTM1MjQ0
-NjUzODczMjYyNjk2NDU0NDc3NjYzNTY3MzQzNDc2MzIzMjMxMTIzMTM2ODc0OTc0
-NDI0NzQ1Nzc3ODY2NDUyMzM0NTQzNDU5Njg2NTg1NDU3NTU2NzQ2NDc3NTU2NjM4
-OTM0MjIzMzEyNDM2NTc1NjUzNTc2NTIzNDU2PTU2NjU3NDIzMzI3NDc1NTI0NjQz
-MzQ1NjM2NTg4NDIzNDQyNjM0NDk1NDQyMzE0MjI0NjQ1NTUyNjQ1MzQ1NDU6Ozc3
-MjMzMzEwMzo1NTU3NTQ2NDU2MzIzNjQ1NDM1Nzc2NzU0NTU6ODg0MjU1NjY2ODQ2
-Nzg2NjUzNTQ0NTIwMzEzNjU3NzYzNDUyMTMxNDQ0NDU2OTQ3ODU3NzU2ODY1NDU0
-NDYzNjQ8NjY1NzY0NDcyNjo3NDs2NjY3Nzg4ODg7OTg2ODc5ODg1ODk4NTY2Mzo2
-OTk5NzU3Njg2Nzk/Ozc3NTM1Nzo1Njk4NTY2MzY2OTo5ODo7OTc3ODg6ODQ3OTk5
-NzY7NjU3NTQ2NzY3NTY3Ozk4Nzc1Njg3QEM1Njg4Nzc3NDU1Nzg5PTk6OTg5Pjs5
-OzY3OTg4OTk4Nzo3Nzg7Ojk5ODYzODc7PTw6OTw5OUE6Ozc4Ozk4Njg5NDk3Oj07
-ODk7Ojw6OTk3ODo7OTg3OTw3OTo5Ozs6OTo6PTw7Ojo5PT07OTk8Ojk6Ojs7Ozw6
-OTk0NTc4NjU5Pj48Ozo7PD07Ozo4OT45OTg6Ojs8PT44OT06PD09Ozs6Ojo6Ojo3
-Nzc7PD1AQDw6PTw6Oz07PTs4Oj44Ojw8PDs6Ozw6PDs8PD8/PDo7PTw6PDw5PUE9
-PT5BPjw/Ozo4Nzo5Pj8/OzxAQD46Oj09Pj1NOzs8PT8/PD8+Oz47OTs6OTw+PTs7
-OjxDPDs6Ozs8Pzs7Oz47Ozo8Ojg6Ozs+Pzw+PUA6NzY6OTo9Oj0/Pj48PTo6PDs+
-PD49PT86Ojw8OTo/PT4/Ozw8PDw9PEA+PEA9Pj8+Pz9AQD47Ojs7PDk6Pjs4PDs+
-PkA9Pz0+QT48P0BCQTw9QD5AQD49PT07Ojs6PD9ARENCQ0M+PDtARkRAP0A8Oz08
-PkA9P0JAPDw/QDw6Ozs3NjU7OTc4Njg4OTw6Ojo5Ozg4OTg2ODk7PDk7ODU3ODw4
-ODY2Njs7PDs9Ozk5ODo4Ojc4ODg4Nzg4Njg2Njc5ODY1Mzc7OTo5Nzk4Ojg3ODg3
-OjU2Ojk5Ozg6PDo4Njc2Njc4Ojc6OTU2PDo4OTk5OzpBPzk6Ojw7OTk6Nzg4Njg7
-Ozo7ODk6Ozs3ODc0OTo5ODk4OTg7Pzs4Ojo5Oj09QDs8Ojo7Pj5BQkA/Oz1BPjs6
-PDw7Ozw8QTw8Ozw/OzxBQkI7PT5AQT89PD9CPkM+PkVDPUZAPENBRERGREVcpcLM
-1Nre4uTm6OrqPT08QEE9PDxCQj5BQj06Oj49OD08Oz8+Ozk5Nzc4Njk1Njg4ODk5
-Ojk8OTo3Njk6OTk3NjQ0NzU0NzY3OTY3OD06NzM2NzY3NjYzNzg2NTk3Nzc6NTg2
-Njc2Ojk2NTY1ODQ0NTI0MzMwMTcyMzMyNDUzMzU2ODUzNjg1Njc4NTQ2NDQ0Nzg2
-NDQyOTIzMTQ0Mjk2MzMyMjI3NjY1NTQ1Njc5NzIzOTk3ODYzNzMyMjIyMjg4Nzc5
-OjczMjQzNTU2NjU1ODUzNzY1NzU5NzQzMDEzNDIzMDIyNzIzMTUxNjQ2NTIzNDU1
-NTQ1MzQyMzIzMjM0MTMzNDE1MzY0NjMxMTAyMzk3MzUzNDo2NTQ1MzM2NDQ0NTEv
-MDAvMTI1NzQ0NDU0NjQ0NDMzNTc5NTUyMTEyMjM3NDY1NDM2Nzc1NjU2Nzk1NTk5
-ODU3ODczNDQ1Mzc2NDM0NjY1NjYzMTQ2NjUzMzU1MzQ0NDQyMjAzMjI2NTQzNTUz
-MjU1MzY3Nzg3NDMzMzQzNTg3NTY4NzY3ODo3Ojo2Nzc3NzY3NTg3ODk4Ojg5Ojg0
-NTQ3Ojc4NjY2Nzg6NjQ3ODdENDU1MzU2ODU0NjY2NTc3Nzk5OTY4ODMyNjg1NTc2
-NTc6Nzg1NzY8Ozo4OTo2Nzc2NjY2OTk3ODc4NTU4Nzc3Njg5Nzc5OTg3NTg3Njg3
-ODY3OTs4OTk2Nzg3OTo6Ojs5ODk6Nzg4NTU0Njs8Ojo4NTc4ODg5Ojs3NTY4Nzo5
-ODo7Ozs6PDs7OTk4Ojo4Nzc9Ozk3OTk3Njk5PDc4Nzg5Ozs6ODg4Ojw4Ozo7Ozg5
-OTo5Ojo8Ojs8Pz87Ojk5OTk6OTs8Pjo6Nzk4OTc5PTs4Oz48Pzk4Ojk5ODg6PTs6
-Ojw8Pjs8PDs8PDs6Ozs6PD1BQTs7ODw5ODs7PDw7Ojs6PDw7Ojo8QT07Ozs6PTs+
-PUA8Pj09PT48PUBAPj8+P0A/PEA+PT5GPjw9PDw7ODo6PT08PDw5Oj08Oz47OTw8
-Ozw8OzxCPz87Ozo7Ojo7ODo6Ozs5PD08PD48PUE9QENBPjw9OTk5ODk4Nzg6ODk7
-Oz89PTs6Ojk6Ozw8Qj8/Ojw+PDo7Ozo8Pz48PD07Ojk7Oj5APjo8PT8/QD4/PTw/
-P0FGRD07PD4+Pz49PTw+PTo8Ojw/QkJEPUBAQEJAQD48Pzw/Pz87Ozs8PTs/Pz8+
-Pz4/PT5BQUJCPzw5PTxAQEJCQD08PUA+PT8/Pj08Pzs7Ozo6OTk8OTc1Njk4Nzo5
-Ozo5OTk2ODc4OTs4ODo3Nzc5NjY4OTs5ODg6OTk3OTk7ODw2ODo4NTY4Nzk3OTg1
-Nzs8Ozo5OTk7Ojo7OTc3Njg5Ojk7OTc1Njg6OTg6Ojw5OTU2NjY3NzU4Ozo2ODQ3
-NzY4PD07OTg7Nzg4PDo5OTg4OTo7Ojk4Ojs6Ojk4ODk7OTc4ODg5ODk5OTk8PDg5
-Ozw8Oz08Ozw/PT49PD89QkE+Ozs+PkA9PDs7O0BBPzs7PkFAREE8Pz0+QD1AQDs9
-REE+Pj0+Qz5AQkM7P0A/QERBTXunwc3V2+Dh5Ofo6OpBQ0ZAPj1CQ0M9PkI/PT48
-ODs6OTs7Ozk4Njc4Pz05OTc3Nzk3OTg2ODw3NjU0ODg3NTY2NDQzNjc4PTg3Njg2
-Ojk2ODc4NzU3Njg7ODc6Njc2Njo3Nzk3ODc2QTk1NTY4NTY1NDM0MjUzNTQ0NTc3
-Nzg2MTY2NzYyMzY1NTY4MzQyNTU3Ojg2NjIxMzQzMTM0MjQ1Mzg7Ojc4NTU1ODY2
-Nzg2NDc1NjU0NjU4NjMyMzUzNjY2Nzc0Njo2NTEzNDU2NTY1NDQ2ODc1NDQzNTIx
-MjM1MzQ2MTMzNjExMjU1NTo4NDQ1NTQ2ODQzNTQ2ODc6NDM2NTkyNDUyMTg1NjMx
-NDIyMzQzNTMzNzUzMzMyMTE1MzQzMzY3NTIzNTY1MzQ0MzEzMzMxMjM3NzY1Njc1
-NTc2NDYzMzIzNDQ0MzQ2ODk4NjMyNjU0NjU4NzMyNTY1MzU2Ozs2MjQ1MTU1Njc3
-ODk0MzYzMzQ2MzM2OTM0NTQ0MzExNDUzNTQ0NjM2NjQ2MzIzNDo4NzIzNjg3NzU3
-ODg3Nzg1NTY1NjU2Njg3NTU5Njo2ODc1OTg0NTYzMzs5Nzk5NzY0NjczNzc5NzQ2
-MzQ2NTU1NzU0MzU1NjU0NjY2ODo2NTYzNjY2NjU5NjU4OTg3Nzc4Njc2ODc3ODg2
-NTY4Njc3OTg2Njk4ODg5OTc3NTg1Nzc5ODc4OTc2NjY1NTc3OTk4Njk6ODg2NTY1
-NDY3OTo7OTo4ODk7PTw5ODg4OTc5PTs2ND04ODk4OTlAOzk4ODg3Ojw6PDg4OTk7
-NTo7Pzs8PDo6Ojk2OTk3NDc4ODc4Nzk7OTk5OT07Ojg6Ozo5OTk6Ozg5PD89PTk9
-Pz47Ozk9PT08PD08Pzw7PDxAPjw5OTw+Ozo6Ojo5OTo6Ozs6Ozs5Oz8/OTo9Ozs9
-PDw6Ozs+OTk7PDg7PTw7PDs3Ozo9PTs9Ozs7PTk6Oj4/PT49PT07Ozs8Ojo7PUA+
-PDg4OTk6Nzs8PDw7Ojk8Qz08O0BAOjw6PDs7PD0/Ozo6Nzo6PD49Pj48PD04Oj05
-Oj05Ozs8P0Q7PTs9PDo4ODk3OkA7Ojo6OTk8OTo6ODo6OTo6OTk6PUE+Ozk8PUA9
-PT08Pj87Ozw/Ozg7P0A8PD09Pzw8Ozs7QUQ9PDs8PTw+QD48PTs9PTs6Pjs+QEA/
-QD49PkA9PTo8PTo7Oj0+PTs7Pzo6Oj1APj8+PkBBQj9BQEBAQkBAQUBBQD1AQ0I8
-PT5AP0JBPz9BPTw6Ojw8Nzg3Nzc5Ojs3ODk5ODc3NDc6Nzc3ODg4NTY7OTk3Nzg6
-NzY1ODg6ODg7OjQ5NjU4NTg3OTk7OTo5Oj0/Ojk3ODk3Nzs3Ojo5ODg4Njg4NTY6
-Pjs8PDg3Njc4Nzk5ODkzOTc2ODw8PDk4Ozc4ODc3OTs6OTs9PTg4Njk4Ojw7Ozg3
-PTw5NTo5OTk5Ojs6ODk8ODg9ODs8PTs7ODs5PDo6PDo8PkE9PT5AQEA+PDk5PD1A
-Pj0/QT8/Ozs/PkJAQT9AQUA+Pj5DQ0BCQD8+PD5BRUFAPkNDPERCRkFDW6fCzNTb
-3+Lm5ufo6UNJQT1APTw8PENBQDtAPjw8ODo4OTk8ODo6ODk8PTo5Ojk7Ozg5ODc3
-NTc2Nzw3Nz05ODc2NjQ3NDc6PDk5OTg5ODc3NTU2Nzg6PT42Nzk5Ojw5OT04Ojs4
-ODY1NTg6ODo1Njc4NDExNDU2NDU0NTQ0NzM5OTY0NjU7NjU1NjQ2NDg0NzY0Njcz
-Njc5Nzg5NTY1NDI2Njg4NzQ1NDU2NTM1Njc0MjM3NTY1NDI1Njg2NjQ2Nzc3Nzg3
-Nzg0MjM1ODg2NjM2Njc1Njg1NzY1NDQ0MjMxNDU3OTc1NDQ2NzU3ODU2MjAxNT45
-NDU0Nzc3NDMyMzY0NTM0NDc0MzM0MzQ1MjMyNDU3ODUzMzQyNjQzMjM4NTU3NjY1
-ODUzNDE0NDUyMjIzNDQ0PDU2NjY0MzM0NzY1ODQ3MjI0ODY0NTU4Nzc1NDU0NDY4
-ODk1NTQxNjU0NjU1NDMyNDI2NzY2NjY3NzU0MTIxMjMyNTY5NzQ3Njg6NzQyMzM3
-Njc1MjI2MzQ0NDg8OTk0MzM0NjY5ODc1NzY3NTU2ODg2Mzc2NTM3OTY2ODg3ODY3
-NTY1NTU2ODg6ODo3Njg2Nzc2OTk3Njg4NDU2NjQyNDc3NTUzNDQ1MjI1Ozg3OTUz
-Njc2NzQ0NDQ2Njg6NTc1NDg2ODg3NTY2Nzg3ODk3OTg2ODk5Ozc1ODc5Nzc4ODY2
-NDY2NjU3Nzc5NjU1NTk5Ojw8Ozo6NjU0NjU2Nzk5Nzo5Ojg3ODo9Nzg4OTo7OTk+
-OTU1ODo5OTk6ODg3Nzk5Ojw5ODk4ODg7Ojk5OTs4Nzo5OTk6ODg3ODg2ODk7ODk5
-Ozs6ODo6Ozs9Ozw9Ozs7ODs9Pj06Pjw5PTg6Pjs8Pz06PTs5Ojs6Oz1APD49PTs+
-Ozo6OTo5ODk9Qjk6Ozg7PD47OTs2OTk8PUE6PDo7Ojk9PEFAOzo8Oj07PTo9Ojs7
-PDo+QD86OzxAOz0+Ojs8Pz5AQT5BPzw+PTs7OTk5Ozw7PDw7PUFBODk7Ojs7Ozo7
-Pjk5OTs7PDo4ODw9PD07Ozs3Oz06Ojw8Ozo6Ozw6PDs6Oz4+Ozs8Ojo+QDw5ODg6
-Ozg1ODw5Nzk7PDw4Ojo7Pjw5Ojo7PT0+PTs/Pjk7Pzs8PDk+QTw8Oz07Oz09Ozs7
-Pzs6Ozk6Ozs6PDw7PDw9Pz0+Pz07PT09Pj09PTs7PT07Ojw8Oz4+Ojw7Pjk4Ojs5
-Ojw7P0BBPj8/QkJBQEBBQUA+QD47Ozw7Ojs8Pj1BPTo+PTw6ODg6NzU6ODg4Nzo6
-NzY2NzU2NDY2Njc2Nzg4ODc6NzY3NjY5OTg4ODs6Ojo4OTk5Ozo6OTs8Pjo8ODo6
-Pjo5Ozs6OTU3Nzg3NzY3NDY5NzU1ODs7Ojo6Nzk3ODc3Nzc4Oz46Nzc4Ojo5Njc7
-OTk2NjU5ODc3NTc4Nzk5Ojg5Ojs3Nzs6OTo5Ojw5Nzs8PTw7PDs5OTk5Ozs6Ojk8
-OTs7Ojk9PTw9Ojo8PT4+QD5BPT4/QD89Ojk7PkBCQD1BQT49PD9BQT89QkJAQz08
-PEJCPT9CPz89PD1DQUJAPkRWo8DN1Nrf4ubn6OnrPEA8PEBEQT0/PkFAPDs6RT9A
-O0FAOjg5Nzo3Nzs5Nzk4OjY5PDk2Njc3Nzo9ODc6Ozs4NzU3NjY2NTY1Ozw5OTc6
-NTU1MzY4ODs6NjUzODY4PDc5ODg4ODU1NDU2ODk4NTg2NjY1NTQyMTEzNTMyMzMz
-MjU1MTU2PDo3NjU1NzY0NDQ0NDY2NjQyNDIxNjU3NjcyNDA0NzQ1MzQyNDc2NjUz
-NDM0MzM2NTU0NDU5Njc7Njc2NzM3NTc2Nzk3NTc2NjU3Nzc5NjU1NjY2NjU0MzM0
-MzM0NjM1ODc0NzQ0NzcyMzQ0MzMyMTI2Nzk2Mjc4NDM2NTU0NzU1NDUzMzI4NzQz
-NTEyNTQ2ODQ1OzQzNDY6ODQ2NDYzNTk1MzU0MzU3NDIzMjE2MjI1NjY2NDUzNjQ2
-OTU0MzUxNTYzMzU2NjY2OTg2NDUzNTY3NzY1NDY2NDU2Nzc2ODU0OjQ1NDM1NzUz
-MzU1MTM0MTU1NzQ3NjU2NjQ2OTg3NDEwMzEzMTE2ODQ1ODk5ODo3NTI1ODg4OjU1
-Njk2NTU7NjQzNTY0NTM2NTQ1NTM0NDQ3NTg2NTU4ODk4Nzc3OTg3OzY4Nzc2NTI0
-Nzg3NDU1NzQzNTUzNTQ3NTU2ODU0NjY2NjU1NTMzMjc4Ojg4ODc2NTY2Nzk4NTY3
-NjY3Ojc3Nzg2NTY4OTc2Njg3ODc1Nzc3ODc2NDQ0Ozk4NjY1NTk5Njg2ODk4NjY0
-Njc2OTs6OTs6NzY5ODY0NTU2Njo4OTg6OTY3OTs5Nzc4OjY6Ojw4ODc7ODo7PDw5
-Nzo7PDw7NzU5OTs5Ojs3NjY4Ojo5ODk5Ozs3OTw8Pj07Pj89QUFAQT08PEA8PDw8
-Ozs8Ozw4Ozk8Oz07OjY4OTs5OTc7Ojk7Ojs8OTk7Ojo7PTk5QDs6PDs8PEE5OTw7
-OTo4Ojw6PTw+PTw5OTw9QD8+PTw7OTk8PT47PT07P0I+Pjg9OTs/Pjw+PTw8PD4/
-QDs4Ojs8PDo7PT88Oz88Ozo7OTs7Ojg4Pjw8Pz84Nzk5Ozo8OTk3OTk4ODo7Ozw5
-Ojo4Njg7Nzo/Ozc7OTlDQTw7Oz87Ojc4ODk6ODk3Nzg5Njg7OTg7Ozw8Ozc6Pj07
-PT8+PD08ODg9PEJBPTw6PDw/Pjs9PT09PEI9PTw8PTw4Oz8+Ozs+Pz08PT8/PDo7
-Ojw+Q0A+QUBAPkA8Ojs8Ojo7Ozs4OT08Oz89P0A9PEA/P0E9PD1AQUA9PTk6Ojw5
-OTw8PUA9QDw6Ozs4Ojg5ODQ4OTo6Nzg5Nzk5ODU0NDc3Nzo5ODs9ODc6Ozw9Ojg7
-Ojk5ODc5Nzg3NTk6Ozs6PDs6Ozo7OTo8PTw8PTo6Ojg6Ojk5ODY2ODg4NTY2NjY3
-Ojg7PDo6ODc6Ojk5Njk4Ojk5Ojs7PDk4OTc2ODk0ODo5NjU3ODU5Oj06OTo6Ojg7
-Oz08Ojg5O0BAOzs6PTs5OTk5Ojk5ODc6OTo7PDo7Ojk4Ozw+O0FAQD48Oz09Pjw+
-Pjs6Oz07OT4/QUJAQEJBP0E9Oz06PT4+QENFQz5AQT5BO0JERUNBPlilwszV2uDj
-5efn6uo9PD0+QktEPj1APj87Ojk4PEA+Qjs1NTg3ODY6ODg3ODw+Ojk8Pzo3NzY1
-PDo2OD05Nzc3Nzc3Njg2PDk6Ojg2OTQ2NTU0Nzg5Ojo4NDU3Nzc2NjY6Nzg4OTYy
-NTo5NzUzNzYyMjQ0MjM0MjM2NTUzNDg1NTU0MzU6OzE0NDg3Nzo2NzMzMzI1NDI2
-NTY0NjY2NTY1NjQyMTEzMzM2OTU0NDM0NDU1NDM1MzM0NTY2NDU3NTU0NDc3NjU0
-NDYyNDM0NDY1Njc5ODU2Njc2NDEzMzM1NjQzNzY1Nj40NTQyMjIzMzM1NjQ4Nzc2
-NTQ2NDY3NjY1NTc2NDU2NDQzNjU3NjYxMTc3NTQ4MzY4NjM4NTEzNTY1NzY1NjUy
-MzMxMjMzMzIyNTMzMjMyNTY2NTY4NjU1NDQ0NTYxNTIzMzQ4MzQ1NTU1NjQ1NDIy
-Njc3ODg3NDU2ODY4Nzc3NjU2OTY1NDo1MTQyNDQzMzY0MjU2NDY0NDMzNDQ2MzQ0
-Njg2NjY1NDc5OTg5ODg3Njc6ODY3Njc1Mzg0M0c3Njg5OTc1ODU1NjY4Ojs5Ojg5
-Nzg5Nzc4NjU2NTg2Nzc2Nzc0NjY1Njc3ODY3ODk2NjQzNzY0MzQ1MzU1NDU4NzQ3
-NTY2NjU3NTQ1NjY5ODg2Njg4OTc5Nzc8Njc1Nzc5OTk3NTY2ODY2NjU0NTU4Njg4
-NzM1ODc3Nzk5Nzg5ODg4NTQ3ODc3NzY4Njg5OTo4OTk3Nzk7OTo7OTY5Ojo5Ojc2
-Ozo3ODg4NTY7PTw7O0E5OTg5Ojo6ODo8Ozw7Ozo4Ojo6ODs6Ojk2OTc5PD09OTg7
-PTs8Ojw9PTs8PD07Pj48Ozg5Nzs8Ozo4OTs7ODk7Ozk/Ozk6Pj06Ojc3OTs9PDo8
-PTk3Nzs9Nzs7ODs5Ojg6OTs3Nzg9Ojk9PUI7PT07PT07PUA/Oj9APDs7OTg6Ozo+
-PDs4PTo9PUE9PTs9Ojw/Pj8/PT08PTw/Pjw8QDs4Ozo8Ozc3OTs/Pz09Ojs/PTw7
-ODk6OTo6PDs6ODg5Nzc5PD84NUA6Ojk5Ojo5ODo7Ojo4OTw5Oz1CPT49Ojs8PTs5
-Pj08PTs6Nzc6Ojs6Ozw8PD86Ojo8Pj89Pj9APDxBPUA/Qj03OTw6Ojs7PTs7PDw8
-Ozs8Ozw7Ozg6Ojw8OTo5OD1AQUBBQTw+QUFCQUJBQ0E/PT46Oj89PDg8Ozk7PD09
-Qj49QEA9QENCQkA9QEBBPz49PUE+PD09PT0+QUBAQD09OTc3Nzk5Ojg2NjY5Ojs5
-OzY5OzY4OTk6ODk4NzY4OTg6Ozs6ODs4ODg3ODg5Ozw5Nzg4ODs6ODg3ODk6ODY3
-OTk6Ojc5ODk3Njg3ODo4OTo7Ozg2NjY4Nzs7OTg6Ojg3OTg5OTk4OTg6Ojw4ODY4
-Ojg8OTw5OTg2Nzg5PD48Ozk5Ozw8PDk6Ozc4Ojs8PD48OTg6OTw5OTk7Ojk5OTo5
-Nzk4Oj06OTo6PTw6QUU+Pjw7Ozs+Ozo8OTw8OjxAPTw+PDw9Pj0+PDw8Ozs8Oj09
-QEBHPj1CQT89S0tDRUFCY6jBzdXb4OPk5+jq6j9APUE6QUJEQEA6Ozc7PDg4Nzg9
-OjY3Njc4Nzc1ODY5OTxBPD04OTg5OEA3ODY3ODY2OTg2NTc3MzQ5OTg3NTY4Njk2
-NTU3NTk7Ozs4NkI3NTc6Njk4ODY2NjQ2NzQ2NTY1MzUzMjU2NDM0MzIzMzIzOD00
-NDc0NDU3NjMyNTc2NzY7OjUyMjU2ODY2NTQ0NzM1NTIxMzQ3NjUzNDU0NDQ1NDY3
-NjU2NDU1NTU0NDU0NDI2NDM0MjAyMzQ4NDIzMzQ1Mzc2NTY4NDU0NTU0MzMxNDI1
-MzI2NTU1ODQzNTQyNDY2NDE0Mzo2ODc1Njc5NzQ4NTIyMzU3NTU1NjU0Njg1MzA1
-NjczNjY5NDY1Ojk2NjUzMzEyMjU2NDYzMzU2NDU1NDY1NDU1NTIzMzU0MTc2NDMz
-NDM1NDQ0NTUyMDQ1NDI2OTY2NTQ0NzQyNTk2NDUyNTU3MjM1NDUzNDo1MTM1Ozc0
-NTM0Njc2NTU1Njc4NzUzMzQ4ODQ5ODY3NzY4NzU4NjU1NzUzNzo4NzU3NDM2Njk2
-NjU4OjY1ODg5NzY2NzY2Njk4Njs6OTk7Ozw6Nzg3NjUxMzYzMzk2Mzg1NDQ1NTc3
-NTMzMzMzNTc1NTQ1NzU2NTQ2NTQ1NDQ1NDM6ODg2ODUzNjY1ODc1NDQ1OEA2Nzk2
-Njg5ODo4OTw7OTU1Njg4Nzw4OTc4NjU3OTU2OTo7Njc2NTk1Njc2OTw8OTY4NjY2
-Njg2ODY8Ojg6PTk4PTs8PDc5NjY1ODs5OTg8PTk+Pjo4Ojs5Ozw8Ozg3Oj08Ojk6
-Ojs2Nzk6OTg9PT5APDo4ODo7PD0+PD1AQEBAPT08Ojk2OTk4Njc6Ojk7Ojs6Nzc3
-ODo8Ojo5Ojo5OTc5Ojs7ODg6ODc5OTY5Ozw8OTg2OTg1NjU3Ojs7ODY5Ozk5OTs+
-QEA+PDs+Pzs+PTs6Ojk6Ozo9PDw5Ozs9PT09PTs6Oz5AQD05PDs6Ozo7Oz47Ojo/
-QEE7OTk6Ojk1NzU3OTo5Ojs8PTw9OT0+PDg5Oj07PDk5Ozk6Oz09PDs8Ojg5Oz48
-Ozw7Ojo4OjhCSTk7QENCPTs6PD47Oj8/Ozo9OTk9PDs6Oz1CPDw8PD87Pjs+QD08
-Oz1APD1DQ0A8Oj06PDw8Ozw/Ozo7Ozw6Ojs7Ojo3Ozk4Nzk5QD5BPT5APUBAP0JB
-P0BAQUA9Pz89Pj48Ozs5PD4/Pz83PDs+PkI9PTs9PT49PkA+QD08Pj09Pj8+PT48
-PD5APT09Pz87Ozs7OTk4OTU2OTg5ODo7NzY2NTc4OTs6ODg2ODg3Njc5Oj44Ojo5
-ODc3Nzg6Ojk5ODo6Ojk3Ozw4ODc3Nzg5OTk6Ojg5NzY2Nzo6OTk4Nzo5ODo7OTg5
-Pjk4OD04Ojo6Ozo5NzY2NTU4OTg2Nzk6ODc5OTk6Ojs8Ojc7Ozo4ODg4Njk8Ojk9
-Ojw5Ojw4OTY2Ozo5PD46Ojk4Ojc8Oz07OTw7PTs6Oj07ODo+Pj07PDw9PDs8PTw6
-PD87Oz89PT0/PTs8Pz5BQjo4Ojo4Ojw5Ojw+PkI/P0FQQ0NHPEFfpMDM1trf4+Xn
-6OrpPD5EQkA8OjpCPj07OUA9PDY/Ozs3OTs5OTc3Njk4PDw7Pjs+ODY3PDk6PDg4
-OTs5Ozg1NjQ1NDU0NTk4NTY1NTQ0NzU0NjQzNTM3NzU1MzUzNTU4Njs3OTc2OjU2
-MzYzNTIxNDYyNDQzMzI4NjQzNTY7Nzc2NTMyNjg1Nzg6Njk2Njc7Nzg1NTg4Nzc3
-NTU1MDQ1NjM0Njc1NTM3MjIyMjM1NTg5NjUzMjM9QjI0NDQ0NDQxNDI1ODUxMjQ0
-NTc1NTU2NjU4OzY3NTYzMjMyMTIzNTQ1NTEyMzY1MzQyNTU0NDMyMzI0NTQ1NjY3
-NzU3NTQzMjQzMzQ1NDM2NTQxMTI1NTQ1NjU3PTM2NDc1NTU1NDIxMjM1NzY2MTYy
-MzU4NTI1NTc1NDk1NDg3NTc2MjMyMzM3Ojc0NDMzMDI0NDMzNjg0MTU2NzQ2NDY1
-NjU1MjQzMjU0NDUzNTQzMzQ1NDQyMjMzNTM2NDQ0MzM3OTY2NDQ3NDUzNzg4Njc4
-NjczNzs2NDUyMzQ2NDQ2NjU2NTU0NjY0NTc2Nzs5ODY5OTk4Ojc3QTc0NT47Ozk5
-OTg3NzQ3NzY1Nzs5NTU0Njk2NjU4NTU1NTQyNDY1NTY1NzU2NzU1NDU1NzY2NDQ2
-NDc2NzY1Njc2Njc2OTc2NzU2QDY2NTY2NzY3NTY6OTc2Nzc4Nzw3Nzc5ODg0NzY4
-PDg3NTU3ODc3NjY1MzU4OjY3Nzg8ODU0NDY3OTo3Nzg6NzY6Pjw5Ozs5NTw6OTw4
-PDo6Ojs6Ojs7Ozs7PDo5ODo7OTo7OD45ODo5Njg4OTw+PDs9Ozk5OTo/PD08Oz46
-PDw7PTs8Ozk4Nzg1Njc3OTo4Nzg7Ojk5Pj07Ozk6Ojs7Ozw6ODg9Pzo6ODc4Ojg5
-Ozk9PTU1Ojo3ODo8Ozs8Ojs6PDs+PT8+QDs7Ojs8PTw3ODw5OTo9PDs8PDw7Oj0+
-PTs/OTo5Pj4+QDo4OTg4PDs5Ojo7Oj08PTs6PDw9PDg8ODk7Ozo9Pz1BPjo8ODo6
-OTs7PDo7Ozg6OTo6Njk8QDc6OTk9OTo7Ojo6PDs6Ojk8QTw7PTw8Oz08Ojs9PDo5
-PDw8PUA+Ojs7PDw7PD48Ozw9PT09PT09Ozs+Pz07PT0/QDw6PT46Ojo5Ozw7PT09
-Ojw+PDo2Nzg6OTo7PT9AQj5BP0BEQ0JBPzs/PTs7OTo9Oz0/PTs6Ojw9QEA9Pj0+
-QkE/Pjw+PUJBPz09PEA9PD09Pz0+PUA6Ojw+QD89PTw+Pjw7Ojo7Ozc4ODc4Nzk3
-NTY4Ojc3Ojk2NTY3Njg2ODk7OT06OTk3Nzs5ODw5OTg5QDk4Nzk4Njg4NDY5Njo7
-Ojc4Ozk3ODg3Ozw5Njk8Ozs8Ojo7Njc4ODk3Ojk6Ozo5Ojk7Nzc2Ojc4ODU3OTs7
-Ojc4Nzc4ODg7PDo5Ozc1Njg8Ozo7Ozs7OTk7OTY3Nzg5ODs9Ozo5Ojw7QTo7QD48
-Ozk7PDo8PTo7Oz09Pz8/QDw3PDs9Ozs7PDo9Ojo8Pj0+Ozo/QEA8PT44PD08Oz4/
-QT9BP0VCREVGRUNBQV2jv8zV3ODj5ufp6epGPzw9PD1AQztBPT09PD4/PTw9PENA
-Pzk5Oj47ODc8Ojc4OTw9Ozs9Nzo4NjY2ODc7OTY2NTg3OTk6PDczNTk6NzY1OjY5
-NjU1NTc2Nzg3NjU0NDc6ODY4OTQ2NjMzMjI0NjM1NDQzMjU4NTU4MzQ1NDIzNTY4
-NzM0NjQyNjc3Nzg4Njc5Njo0NDg2NjU0NTY6Nzc5NDE0NjY1NjM0MjExMjU2MzQx
-MTQ2NjM4NDUxMjU3OTc0MjU1MzMxMzM0OTc4NjY3Ozg4NjY2NjQ3MzE0NDQzNTQ0
-NTU1MzE0NDU0MzMyMjIzNTI0NDMzNTY2MzM2NDE3NDM1NDQzMjQ1MzIyMDM0Njs7
-NjY2ODU1NDM2NzU1MzM0MzU2NzY1NDI0MTM1NzQ3OTYzNjo3NDIzMzM0MjAyNDU2
-NjM1NjMzNDQ0MzM7ODU2Njg1NTQ1NjY3ODQ0NjMyMzQ0ODY3NjQ2NDY0Nzc1MzQ2
-Njc2NzU1NTM1NzY4Nzk3Njg0NTU2NjYzNDU4MzU1Nzg0NzczNDY1NjQ3Nzo3Njg4
-OTY5Ojk6Ojk5OjY3NjlBQDY6ODg4ODg4NDQ2NTY2Nzk7Ozo6NTU0Nzg6NzY5NzY4
-NzU0NDg5Nzk4OTg7OjQ1ODk1MzQ3NzY1NTU1Njc4NzU3NTg4Nzc1NTY4Ojg4ODo5
-ODg2Nzk5Njc3NjQ4Nzc2ODg4Njg2NDg4ODU1NTg3OTk4NTU0NDY2Njo6Ojo4Nzc3
-NTc2NDI1ODk4OTg8OT07Ozw4Nzg7PDw8OTo8ODg5Njk9PUA5Ojs6Ozk4OTs8PDo5
-Ojs4Nzo6Oz47OTk7OTk7OTo6Ojw8PDw7Ojo6Ojo5OT86Ojs6Oz08PDw/Oj4+OTk7
-PDo5Ozc6OT08Ozs6PD5APDg1ODg3ODo6Ozk6Pjo5Ojw6PTo6Ozg7PTs4ODs8Oj09
-Ozs6Nzk9OTk6PTo5PDw6Ozs7OTw9Ozw7Pjs4ODk7Pj4+QUQ9PTk7ODg6ODo9Ozo6
-Ojg6OTs7PD49Pz88Oz48PDs9Ozw7Ojo7Ojw+ODc5OTc6OD0/Ojg6Oj9APkE6Pj09
-Nzs9PTo7PDo7PTo3OD5AOzk6PDw7Ojk6Pzw/PDw8Ozs5Ozs8Pjw6PDs8PTs6Pj4+
-Pz49Ozs8OTw8Ojc6Ojk7Ojs7OTo6OTs4Ojg4OTg4ODk9Ozo8QD46PTw7PEBAQEFA
-Ozs+Pj08PD49Pz49PDw8Oj1APUA/PkA+QEFBPTs9Q0M/Pjw9Pj9DQUE+QkJCQD0/
-PD1AQD49Pj47PDxAOTg9Ozo7ODk5ODs4NjY6ODk1NDY2OTo5Ozs9Ojg4OT86Nzo7
-ODg3Nzc3OTs8Ojk3ODo4Njc5OTc1NTU7Ojw4Ojw6OTk7Ojg1ODo6Ozo5ODY7PDo4
-Njc5OTo4Nzc5OTY3ODY1ODk7OjU3Nzc5Nzc1OTw6Ozc7Ozg2OTU2ODs6Ozk5Ozc6
-PDs5Nzo5OTk5Ozk4ODc1Ozk5OTw7Ozw+Pj08Ozw9PDo6Ojo8PT89O0A8Qjs+Ozo5
-ODk7Pj88PTw7PD09QkI+O0FBQD86Pj0+QkI9Q0RCQkRERURFWKfAzNbb3+Pl5+jq
-6URCPj8/QkE9QDk4PUM9Oj1HQjw+PD5AOTs8PDc5O0A7OTk3Nzg6Ojo4PDs3NTU2
-NzY5NzY6ODo8Ojo5OTc2Nzc3NTU2Ojo5NzY2Nzo3ODc4OTY6OTc0NTc4OTs3NDM2
-NzQ8NTU1NTM0NDU4OTQ4NTQ1ODc3NTY1NDIyNjYyNjU6ODg2Nzg3NjU3NTc0NDI0
-NjU1NDY4NjYzNzU4ODMzNzU0NDQ4NzMzNDU0NjQzLzI0NTI0MjIzNTQ0NTc0NDk2
-NjY0NTc0MTI2NjY3NzQ1ODM0NTU1NjY0NDMzNTQ2NDMzMTExMzU4NzU4Ojc0NDQ3
-MjMyMzMxNzQ3NTU1MzU0NDI1NTU0Nj02NzY2NDk2NTc4NjIyNTY1NTU2NjY2NDIz
-MzU1NzU3NzYzMTQzNTIzMzM1NDAyOTU0NTQ2Nzc0NDY3NjY1NTY4NjU1NjY0MzU4
-NjQ3ODk6Nzg0ODMyNTUyNjQ1NjY1MzY2NTU1NjU2NTMyMTI0NTg3ODk1ODY4NDQz
-NDY4ODg2NDg3NjY3NDU1Njc6Ojk2NjY6OTc5NzU2NDQ2ODc3OTU/ODU1Nzs6NTQz
-NTU2NDY1NzU1NDU4ODk3Rzk0NTY3Nzc2NDU2Nzk6NzY5NTc4NTY2NTU0NTQ4ODY3
-NDYzMzk4NTY4NjM2Nzc4NjY2Nzc4NzM0NDY1Nzk7Ojg5Nzc3NzY2Njk5Njc0MjQy
-NDU2NjU5Nzg3Nzc2NjU3OTk3Nzk3Nzg4Nzc3OTc1NDg7PDg6Ozo4OTg4OTk5Ojw4
-PT08OjY2OTg6OzY2Nzk4Ozs8Pjs4ODk6ODg6ODo5ODs/PDg7Ozw8Ojo8Ojw7PDs6
-Ojk6Ozo6Oz06Ozs5Nzo7Ojg6PDs7Ojg5Njg5OT09QDs5Ojo3Ozw6OTc3Ojk5NzU3
-ODg6Ojo5Pjw7Ozk7QD89Ozg7PD04ODk4Ojo3Ozo8Ozk5Ozo5Ojo7Ozs4PDs/Ozw6
-Nzk4Nzg5OTs+PD47Ozw6ODw6Ojs6Ojo5Oj86PDs8Pzw7QT8/PDw6Ojw5ODg4Nzk8
-Ozk3Nzo2Nzg4OT07Oz08PDs7PTs6Ozk5ODc7Ozg5PDs4Njk5Ozs6Ojo8Ojo5Ojw8
-PTs8Ozo5PDo6Nzo8OTs9PTs9PTw7Oj49Ozs7Oj07PDg8OTs6Ojs8OTk6Ozo7NzU3
-ODc2OTo9Ozo9Pz5BPTs5Pjo9PUE+Pjw6Ojw7PTw7PUE+PTw7OztAPDs9Pj8/Pj0/
-QD89P0BBQEFAPkE/Pz0+Pz8/P0A8Ozs7PTs9PTw+PT08PDw/PDs6Ojo5NjQ4ODc1
-NTY5ODc6ODg3Ojg6Ojk3OTc6Ozo6Ojk2NDY1NDQ2Nzg7Ozo4NjU5Njc1ODo4OTg5
-Ojk7OTg5ODY2Ojw8PDo5Ojs5ODk6Ozg4NjU6Njk2Nzo7OTs7OTg4Nzg5Njc5ODk5
-Ozk5Nzc4PDo3OTg5ODY3Ojs7OTg2Ozg4Nzc7OTg4ODo6OTg3Njw8Ozo4Ozs+PDw7
-Oz08PT09PT07ODc7PT5APTw9Ojo5Oz46OTw8Ozs8Ozw+Pj0+QEBAQT9DPkFCQT5A
-PT0+RUNAP0NEREBbp8LN1dvg4+Xn6OnqR0tCPUBBPTo9Pj8+REFAQj48OTo+PDw+
-PT04OTw7OTs8OTU3OTw8PTw8Ojw9Njk3OTg5NjU2ODo7PDo6NzY2NjY3ODk7Oj03
-Njk2Njk5Ojc4Njg3NjY1Nzc3OTc2NjU3ODk2Nzg1MzY3OTk2NTY2ODc3NTY1NDcy
-MzY2NDUzNjg2NjQ2Nzg3NjQ0NjYwMzM1NDQ0MzQ2ODY5Njc3NDQyNTYzMzU1NDQx
-MzMzOTY2MTI0Nzk2NDU1NDQ2Nzg1NDY0NTU4ODU1NTM2NTY3NjY0MzQ4NDM1Njg2
-NDU0MzM0MzMzMzI0NTU4Njc3MjQ3NzY4NDMyMTI1NjY2OTQ0NjQzNjY2MzI1NDM2
-Njc2ODY5NTc1NjU1MjY1NDU3NzQ0NTQ1NjU0MzQ0NzY0MzM1NDM1MjUzMjI2NDk3
-ODQ0ODk4NjY2NDY6ODU3ODk2NzQ2OTg2NjM0NTc1NDI1MTQ1MzMzMzQ4NTI1NTU3
-NTMzNjYyNDMxMzY4NjY0NjU3OzU0NTQzNDk4Njc1Ojg5Ojo4NTc0NTc3ODc1NzU4
-ODc4OTc2NTY2ODc0NkI4Njg5OjQ1NDU0MzQzMjc2NDQ2NzY6OjY3MzE0NDQ2NzU3
-Nzc5OTY2NjU4NTQ2NDQ2NDc2OTc3NDo8NzY2Ojg5OTk4OTk3Nzc2Njc4OTo6OjY1
-NjY0NTg5Nzk3NTU8Ojk4Ojc3Ojg2NTY3Njc4Nzk3NjY2Nzc1ODU3ODk6OzY0OTg2
-OTc5NjY1Njo8NzY3OTg9Pjo4PTw5OTo9PDw5ODo0Njk6Ozg3OTs9Pj08Ojo3ODU3
-NTg4ODg6O0M6PT08Ojo5Oj46Ojo4OTg3Njg7Ozk9Oj07ODk6Ojs7PDs7PD08Ojk8
-Ojg9PTs/OTg1ODk6OTc5Ojk7Ojg6Ozo0ODs6ODk8PDw6Ozo5Ojo3ODs4PDo4OTw5
-Ojo4OTg6Ozw9Ozk4OTk7PD0+Pj48OTk6Ojg5OjtAOjo7ODs8Ojk8Pj87OTs5Ozw7
-ODc5Oj1AOzs7Ojs7Ozk4ODY3OTc6OTg7OTk6NjU2OTs4PDw8PT85Nzo4ODc4OTc4
-ODpAPTc4PDc5OTxFPDo9ODg6PDc5Ozs6Ojk7Ojw7PTk5OTk4Oj07Ozw9PTs2Njk8
-Ozo3Ozo4Ojw9Ojo5ODw7Nzo5ODk6Ojg3Nzs7Ojw4PDg3ODg9P0A8PDo7PDw9QEE9
-Ojw7Pzw8P0A9Oz09Ozs6Pz48PT88Oz09PT1CQT4+Pj09PT8/PUI9PD8+PDw8QD8+
-QDw9QD47Oz07PTs6Ojk8ODg1NTY2ODg5Nzg4ODk8Ojo4NzY2Nzg6Ojg6OTY3OTc1
-NTM0Njc5Ojo6Ojc5OTg7PDw7ODY1NTU3ODg7PTg5Ojk5Nzk0Ozk8OTk9Ojs7Ojo5
-OTk2OTg4OTk6ODk9OTg1NjY4Njc7ODY0Njk5ODs6Oj05Nzg7Ojg4Njo6PDs5Ozo3
-Ojk5Ojs4OTk6ODk7OTg6ODc6Oz0+Pz88Ozg7Ojo9Ojw9PTs5Ojs/Pzw6ODs7Pz45
-OjY1Ojs7Ozw8Oz09PD1BQUJAQD4/QT8/PkA8REVEQUFDRVWnws7W2+Dj5efo6+pF
-Q0NCQUZBPDtARD5APkM9PTs+Ojg6OTo8Pj47OTo2Njo6ODg3Nzc4Nzc5ODo5OTc4
-OjU4NjU6Ojc2ODc2MzUyNjc4OTU3ODk2NjY2Njc3ODc0NDM5ODc4Ojo4NjY3OjY2
-OTQzMzY4Mzc0Nzc4ODc3NTU0NDcwNDMzNjQzNDQ2NzY3OTU2NzU1NjM1MzExNjI0
-NDI1NTQ1NjY1MzU1NjY1MzQ0NDY3NjQxNTQ0NTU1MjI7OjQ0MzQ1NTc4NDY3NTc3
-NjM0Njo5Njc2MzQ3ODQ2NDQ1NjU2NTQ0NzY0MzMzNDM1NTQzMzU1NTQ0NDQ1Njc0
-MjUzNjY2Mzc0NDM0NjYzMDIxMjQzMDM1NjY1OTczNDQ0MzM0NDMzMzYyNDQ0NDU3
-MzQ0NTMyNzc2MjI0NDMzNTY5NzY0Nzo4NzY2NDI1NjY2MzU3Njk4NjQ1OTo7Ojk2
-Njc4NDk5NjY5Njc1NzU2NTIzNDI1NDQ2NTQ0NjQ1NDI1Njc0NTQ2ODc3NDc4Ojk3
-ODk3NDY4NjY2Njg3MjQ1NDY4Nzc3PDs3Nzk4OTY1NTYyNDcyQD85OTY2NTc3Njc1
-MzE0MzY3OjQ1Njk5ODQ2OTc0MzMzMTQ3OTg1MzU1NDg3OzU2NTIyNTg6OTY0Mzc1
-Nzg3OTc2Njg5OTk7Ozg6NzY1NjY3NTc1NzY2NDY2NDY4Ojc3NjU5NTc3NDk7OTg2
-ODg4ODs5OTY6NjY2NDY2Nzg6Ozo6ODc3NjY2OTc3NjU5Nzo5Oj46Nzc7Ojo6Ojo4
-ODtAOjw5OTk4OTg3OTs9PDs5NTU1Nzg4Nzk5OzlAQjc6ODk4ODo6Ozs7Ojo4ODg3
-NzY6ODY3ODk2ODc2OTs6NTY4OTo8OTo6Ozg5Ojs7ODk8Ozw6Ojw7Ojs6Ojw6OUA8
-ODo6Ozw9Ozo5Njw3OTk4ODw6PD5AOjk4PDo3Nzk7PDo8Ozo7PDs7Ojw9Ojk7Ojk7
-PDs4Ojo+PDw9PkI8PTs6ODk5Ojs5Ojk5ODo6Oz09Ozg3ODc3NzY2Ojo6PDo4Ozo3
-ODYzNzU4OTo4OjhBQjk4OTg6NUA6O0E4OTs7OTg3PTs6Ozo6OTc4ODc3Nzw7ODY7
-PDxAQT4+PDg7PD5APz88Oz09ODo5ODo8PDs5PDo5ODw9Ojs4Ojo5OTc4Nzo8OTg6
-Ojw8Ojo4Ojk5Ozo9QEBBPjo8Ojo7PTw8PDw6Ozo8Ozo8PDw8PT08PUA8Oz47PTxC
-Qj9BPzs6O0NBPT9FQkBDQDw9QD89PD0/PkE+PD87Oz07Ojo5ODk3Nzg5Nzg4OTU3
-OTo5NjY7NzY3Njc2Nzc4Ojo6PT47Nzk3MTY4Ozo5OTs7OzY4Nzc5OTQ0Ojw5OTc3
-OTg6Ozk4Nzg5Nzg4OTo4Ojk5Njg2OTc4NjY6Ojk3Nzg7PTo6Ojo6Njg6ODg4Ojc2
-Njs6Njg7OjkzNTk5OTg2ODg4ODY3OTc5Nzk5Nzg5Nzo4Ojg5Nzg6PDk5ODxCQDo7
-Ojg7ODo6Pj89Pjw8PDs7Pj86PTs9ODo6PTs6Ojo4OztAOzw/Pz9APz88Ozo+OTw8
-QD1AQkQ9QEJCWafDztbb4OLl5ujq6j0+P0A+RT87PkE/OkE+PT1BPTs5OTg4ODo6
-PDs7PDg5PTk7Ozc1OTc6PDk5Pzo4PDo2NjU3NjY2NzMyMzU0NDM1ODk1NDY6OTQ2
-NjQ1ODk5OTU5NzY5OTg7PDo1Ojc3ODY2MzMyMTg1Njk4PDc0MzYzMzU2NTc0NDQ2
-MTM0MzczMjI0ODc3MzQ1MzMzNDUvMTUzMzM2NDUzMzUzNTY5OTc0MjU1NDc2ODY1
-Nzc3MzY0MTUxMTY1NTUzMzU2MzU1NDQ2NDYzNzc1Nzg2MzY6OTU5NTQ0MjM1NDM0
-ODQyNDYzNDQzMjI3NDMzNDMzNDM1MzQ1NTU1NDYzNTg5NzU0NDIzNzI1ODY2NzMz
-NDIzNDY0NTUzOTgzNTI0NTUzNDc4NDU1NjM3Njg3NTc2MjIzMjQ4OTY4NzQ3NDY2
-NTU1ODk5Njg3NjY2NjY3Nzg8OTg5ODc3ODU6Nzc2NTc1NDg4OTc3NDQ3NzQyNjc1
-NDUyMDM0NDU1NDY1NDc2NTY2ODg6ODg2NTU1Njc9ODk5NTY1NjU1NDc2Ojk3Nzo1
-OTg4Nj03NTg3Njg4Njc7Ojc5OTs7OTc3NDUxMjY3OTY1NjQ1NDk5OTo7ODM0MjU1
-MTU1NTc2NTc6ODU3NjU2ODo3ODY0Mzc1ODY3Njo4Nzw5ODs3NDc2ODk5ODg2ODQ3
-NjU3Njk4OTo4NzY4Nzc4NjY2Njo3Nzc6PjU3Njg3Nzc3ODc2NTQ1NTg7Ojc6Ojc4
-NjY1OTk3NDY4ODo4OTw7PDc5OTs7Ojc3Ojs6Ozo5ODo5OTk3Oz07ODc6OTU5PTo4
-Ozo3OD86PTo3OTo7Ojk7Ojo4PjYzNjs6OTo5ODk6NTc3ODc3ODg6PDs6OTo6Ozk7
-ODw8OTg5Ozw7Ojw8PD07ODg2OTk5Ojw9Pzo6OT07Ojs4Nzk3OTk3Ojo7PDg5Ozo7
-Ozo3PD06PDg8OD0+PTw7Ozw/Ozs8Ozk9Ojw5Nzo5OT85OzxAPDk6PDo4Ojo5Ojk7
-Ojg7PDo4Ojc3ODk1Njg3Pjg7PDo5Njo4NTU2Nzg2Njg6Pjw8QDo4OTo5OTk5Rjw6
-PDo5ODlAPDs5PDw9Ozw9Ozo7Ojs7OTc5Ojo9PT8/QDw9QD4/Ojs/Ozs/Ozs8Ojk6
-Ozo5ODo7Ozo6OTs3ODs6Ozo5Ojc6Ozk5OTg4Oz48Ojo7Ozo6PDs6Oj9BQT4+PTs7
-PTg3Ozs6Ozw+Pzo6PDw8Ozs9PDo8O0A+PkA7PDo8PTw/PT5AQUE7PkE9PD07PDtC
-PkA/PUFAPDk7PDw5NzY3NTY0NjY5OjY0Njg4OTk3NTU6Ozo4Nzg3ODk4ODs5OTw6
-OTc4Ojg3Njg5OTc4OTg5OjQ1OTg5Ozg7Njg6Ojc3Nzk6OTY5OTs7Oz07OTU2ODY3
-NDQ5Ojg5Nzk5ODg3OTg5Nzg4Ojw5Ojo6ODw9OTk3ODs9Ozo4Nzc4OTc2ODc3ODk8
-Ojg4Njg5Ojo4PDw7Ozs6Ojo8OTo8PTs7Ozk7ODg4Ozw9Pzo7PTs/PTw8PD08PTtA
-QDw7PD09PTs8PD4/PT09QD8/P0A+PD08Pz9AQ0JBP0NZq8XO1dvf4uXn6OnqOz9C
-REdBQj9DPj4/PUJCOj1AOjk6OTk4Pjk7OTo/Pzs6PD07NzU4OTg1NjY0Nzc9Ozo4
-NzU1ODU0MjQzNjY1Nzc3OTY1Nzk6ODg2Njg4ODc4NTU0NTc3NDg7OjY2ODo5Nzc5
-ODU3Njc1NDg4ODQzMzIxMzY6NDA0NDM1MjQ1NTY0MzQ2MjY0Njk2NDEyMzMyNDMz
-Njg2Nzg0MzE0NjU3NDU2NTQ0NDg5NjM1Njg0MzQ1NDMyNDQ1NDQ1ODc2MzU0NTMy
-NTg3NTU2NzUzNTY1NDc0NTIzMzQzNTc1NDY1MzQ0MzQzNTM2NjVBNzQwMzI2Njc4
-OzU1MzQ0MzM0MzM1MTEwMzM0NjY3NzQzMi80NjY1NzkyMzU4NTQyNDY0MzMzMjU1
-Njc4Nzg1NTQyMDM0NjU2PDc1NzkzNTY3NjY4ODgzNTY1Nzg3Njc4Nzg3OTY4Nzg3
-ODY2ODk3NDY0NDU1NzY2Njc3NTM1NTM1MjY1MzU1ODg2NjU2ODU3NzQ2NTQ3NTY2
-NDY2Nzc1ODg4Nzc4OTY1Njo/OTg4ODk6OTo5ODg1ODc2NTc3ODc3NThRNzY2NTY0
-NzYyNjU1NzlANjo+Ozs4Nzg1Njc5Nzc1NDY1Nzc4NzY5ODY3ODg4NzQ1Ozc2MjU0
-NTY2OTk2Nzk3ODg2Nzg5Ujc3OTk3NjQ4ODo4OTo4ODc3ODo7Ojg8OTk5NTM2NjY6
-Nzg8Qjc2Nzc5ODg5Nzc3ODk5PDg6Nzg6PTs7PTo1NTo6OTo5Nzg2NTc6ODo7Ojk5
-OTg6OTc3OzY4OTo4OTw4Ozg4OTc6Ojo5NjY3Ojs8Ozs6ODg4Nzc4OTo4OTs4Ojo5
-Njo4ODk5Ozo7ODY3ODs5PTs7Ozc4Nzg7NTc9PDw6Ozg6ODU3Nzk4ODc4OTk8Ojs7
-Ojk5Ozw6OTg5OTg6ODk/Ojk9QDw/OTs7Oj07Ozs5OD0/Ozo4OTw6Ojk9PENEOzs6
-Pz0/ODo5Ojw6Ojw8Ojo6Ojk5ODo7PD09Ozs5Oj86OTo2Oj43RDpEOzY2NzU2NjY3
-ODY4OTU5NzY6Ozo6Ojo4OD5AOjs5Ozs6Qjs6Ok9AOzs6Ojc5PTw6Ozk4Ojs6OTg7
-Nzk8PD46OTg3OTs6OjY1Njk6Nzk5PDs5OTg2Nzk6Ojk2Nzk5Ojs6PDo4OTw6ODg6
-Ozw6Oz08PDs7Pj88PDs5Pzw7PDo7Ojo5Ozw4Ojk6OTk6OTw7PDw8Ozo6Pzs5PDw8
-Pz05PkJAQUE7Pj8/Q0BAQD5BPDw8PUA9Ojw9QT85OTk3Ojo4Mzc4OTs5PDo6OTc4
-ODk6Njo1NDY4NzY0OTg5OTg1Nzs+PTs4ODY2OTg2ODg1OTc2Njg3ODY2NzY2Nzc5
-ODc2OTc4ODo6ODk7Ojw8Ozw5Ojg3Ojg4ODU3Nzk4ODg4OTk3Njg5Ojg4NzY7Ozg5
-Ojs9Ojs7Ojo3NjQ3ODg8Ojg6Ozo4Ojs8Oj06Njo5Ojo9QT87Pjs7OTo7PTs8Ojk6
-Ojo7Ozo7Ojs8PDk7Ojs8QD0/OkM/Pz0/PDs8Pj08PTs+QDtAQD9DQUI+QD45PEA+
-Ozo9RUJGSG6xxM7V2t/j5Ofo6upAQT9ESENDQT45QUI7QT45Ozo6PD1BPD09PTo9
-Ojw5Ojk/Pjo7Nzg5ODc2Njc5NTc4Ojk9OjI0NTU6NDY2NTY2Nzk5Nzo5ODg4Nzc3
-NTY5OTY1NDU2Nzg6Nzo3MzEzNjY4ODo3Nzg3NzQ2OTg4NjQ0Ojg0MjU2NzM0NjY0
-MjM0NDQ1NzU1MjMzNTY0NDYyNTQwMzc2ODc1NjYyNjQ1ODo3NjQ0NTY1NDY1NTQz
-NjU2ODc1NzY0NTU3NTQ0MzMyMzQ2NDY2NjI1NDM0ODY1MjY0NTMzMTA0NTU0MTI0
-MzM1MjQ0MjIyMzIzNDU1NDU0NDY2NjU1NDM0MzIzMjM0MzQ0NjMyMTM0NDUzMjQy
-MjI2OTg0NDc2NDg3ODkzMjMyMjI0NTg0NTU1NTY2Njg3NjY2ODc3NTc2NDg5NzUy
-NDM0OTc2NTc2Nzc4Nzk2NTo5NzU2ODo6NjY2NTY3NjUzMzU1MzU2Njc3NjIzNjQ0
-MzU2NTI0MzU0NTU4Ozs1NTQ0Njc2NjQ1NDU2Ojg2Nzc3NzY3NTg4NzY1Ojc2Nzc5
-Ojk4Nzg3NTg4OTs4Ojk3NTU1NTg5Nzs1ODo5NTY3Njk1Njg5Nzg1Mzo1Njs1ODc2
-Nzk4ODc0NTczNDc3Nzc3OTc3Nzg2NDU4Nzk4NjQ2Nzg2Nzk4Njc5Nzc2Njk7NzQ2
-Nz45NTg4OT03Ozs3Nzc5ODY2Nzc3Ojg6ODY1NTc2ODg4NzY9OTk4OTg2Nzg8Nzg8
-PDk8Ojk2ODo6ODc4ODU6PDg4NzY1Njk5PTs6PTo5Ozk4OTw7ODs7OTY2Ojs7Ozk4
-ODk4Njo5Ojk2Nzs5Ojk4Nzc5Nzk3Njk7ODg7ODk8QD09Pjg7PkVBQDs+PTk2Oj06
-ODk5Ozs6Ozw8PDc5OTU3ODk5Ojs3Njw8ODk6PT47Ozk8Pjg5OTk6Ojo6Ozo5OTw7
-PDo6Ozs9PTo7Ojw6PDk8PTo8PD48Ojo6Ojk3ODo7PTo4Ojs5OTg5OTo7Ozs9Ojs9
-PT0/Pjo7OTk6OjtFPEE8Nzc2NjU2ODc5Ozg5ODo6Ojk6OTs4OTo5Oz04PDg6OTw8
-PTo6QDs5ODo2Ojk4PDs4Njc3OTk8Ojk8Ojs9Ojc6OjY6ODc4ODc1Nzk4NTg5ODw7
-ODk5ODo6Ojk6Ojs6OTk7OTo5Nzk4Ojo5Ozo5PDs8PD06PD1APTo6OTg6OTk6ODY5
-Ojk2Ojk7PTs9Ozk4Oz08PEA7OjxAPTw6PT46PDs8P0JAPT8/QUBBQT1AQj1DPjw/
-P0FBPTw8PDo6Ojs7Njk5PDs5ODY5OTQ2Nzg4Nzw6OTU6OT04OTk5ODo6OkE7Ojs4
-Nzc2OTg2ODk3NTM0Njg3OTc1ODg4ODk4ODYzNzk6Ojs2Njg3ODs7Ojs7Ojs8PDk4
-Nzo3NzY4Ojo6NzY3Nzg2OTo5ODY4Nzo6Ozo5OTo7OTc5ODk8Ozw4NjU4Ojg1Nzg5
-Ojg6Oj06Ojo7Ojo+Ojo7PD09PD09PT06OTs5ODs9Ozk8PDo7PD46QEA8PkE/Pj09
-Ojw8PD49QT09QTxCQj07PkFORkJDQUFBP0I+Q0dGerTEzdTa3+Lk5+np6EZGQkxM
-RT1BOTpARUA7OkBBPEA4Oj0/P0A8Ozk6PT06OTg9OzY1NjU1OzY0NjI0MzY4Nzs7
-PDY2OTc3OjU5OTY2NzY6PDk3OT03OjY3NDY3NjQ6Njg5ODo2NTQ0NDIxMzU1ODg3
-NTUzMTIzNzk4NTY2MzQ1NDY3ODg6PDo0MjMzMzg0NTY1NzQ0NDQ2NDY1MzM0MzM5
-OTo2NjMyNTc2NDU2NjM0NTMzNjQyMzEyMzY3NjI0MjQ3NjU3NjIzMjQ5NDM0ODY2
-Njo1Nzc2NTQ0MzU3NjgyMTUzMzMyMjY2MzQ0NDc1MTMyNTIzODk2NDMzOTQyMzM1
-MjM0MTAyMjQzMjU1NDMzMjQ1MzIzNTQzMDI4NzQzODY0NjY2NTM0NTU0OTg0MzM0
-NTYyMjQ1NDg3NDU3Njg4NTg7NzU4MzIzNDM0NTQzNjc5NTU5S0M1NDU3ODg6ODU4
-NjU2ODU4Nzg4OTc4NTUyMzU0NTc3NzY6MzEyNzEyMjU1NTU2NTQ1ODk3ODo5NjQy
-MTY2Njc2NTMyNjg3Njc1NjQ1Nzc1Ojg8Ojg1NzY2ODk3Ojk3NDUzNzg4OTY5NzI0
-NTU2NzU5OTc1NzY2ODo4NTQ3Njc2Njg0NTU2NTg3ODU0NTk2NDU6NzU0ODg2NjU2
-NjY0NjM0NTU2NzY3ODc4ODg5NTY3NzU2Ozg4NTc1Nzs5OTg4Ojo5Nzg5ODY2NzY5
-OTg0NTg5NjQ2Njg8Ozo7Ozg4OTg3Nzg3ODg7Oz45Ojc5Ojo4Ojk5Ojo4OTk2Njg6
-PT07Pj49PDs3Ojo8Pzs5Nzo7Ojs4ODo5NjY2OTs5ODg0NTY3NTc2NzY3OTg9ODs5
-OTk8Pz09Ojo5OT4+Ojw8Ojw+Ojs4ODk4OTo6Nzo5OTk4OTo6Ozs7OTw8OTY2Nzc5
-OTo8Ozo4OTs5OTg7OTs7Ojo8OTc5Ojo7PDw7P0A6PDs8Pzs6PDw5ODw7Ojg7Ojo5
-Oz06ODs/Ozo6OTg4ODo5PD4/Pjs5ODk6PTs/PDg3Oj06PDs8OjY2ODg4Ozs3OTo7
-NjU3ODg6OTg6OTc3ODs4OTk5OTk9O0A7PD04Njc4Ojg4ODc5OTo5Ojw7Ozo8PD09
-Ojg3Oz4+Pzg4Ojk4Njg4OTw3NjQ1NDc6Ojk3NTY6PD1AOzo4Nzo6OTk7OTk4ODg5
-Ojk4Ojs7PEA7OTs8Ojs7PTw2Nzo7Oz47Pjs7PD49Oz09PUA6Ozw8PDw8Pzs8PD0/
-QEM8PT4+PT09Pj0+PD09PD1BREA8PD09Pj88QT89PTo4Ojs4ODg1NTg4ODo5ODc5
-OTk3Nzo5OTo8OTc3Njc4OTk6OTw5Ozo4Nzk4Ojo5NzQ1NjY1Njk8Ozo5ODc2NTU4
-Ojk4OzY4Njg2Njk7OTk5OTo6OTg7PDo5ODc1ODg3Nzg4Nzg4Nzk8OTY4NTU3Nzo4
-Ojo8OzU6Ojs6Ojk5Ozs5OTk5Ojo4Ojk4Ozk5Ojk6OTc5Ozk5ODY6ODk7OTw9Ozk6
-OTk9Ozk6PTs7PDo9Pz4/QD5APT0+OztAPz0/OztBPkA9PDs/P0E+PzxGRz5AOj0+
-QkQ9Pkxlr8TO1trg4uXn6OrqQ0VJRURBQT09PDw6PDw8PT06OTo7ODk9QT89QUA8
-Pjo9PDs6ODo4Mjo6Njc3NDUyMzY4Nzg4Nzo4OjU4OTc5Njc3ODc5OTU2Njg2NTY1
-NjY2NDg2NzU5NTc1Nzg0NDM0NDY2Njg3Mzg0NjUyNjQzNzo2NjU3NjY5Njc3NTQ3
-MzQ0NDg3NzczMjIzMzM0NDU2NDc2NTIzMzMzNDMzMzQ2Njg2NTM2NTMxNjIyNjc3
-NzY0MzQ3NDY0NDYzMjMzNDQzNTg3NjY4ODc1NzY0Njc0NTg1Ozg4NjQ0MTIzNDE0
-NDQzNDY1NTQyMzI0NDY1NTY0MjMyMzQxMTMyMTIzNzgzNjExMzIzMzM2MjMzMTE0
-NTYxMTQ0NDY4NjU3Njc1NTc2NTU4NzQ3NzY5Nzk5NjU3NjY2Nzc3NzU1Nzg1NjQ1
-NjQzODg1MzY2OTZASDY1NDU2MzQ0MzY2NjY3Ojo0Nzc6NTQzMTYyMDQ5NDM2MzMx
-MzUyNDc3NDQ1NTQ0ODg2ODU5Ozk3OTYyNDU1NTYzNDY0NjM3NTk5NjY1Nzo2Ojo5
-OT85Ojo6Ojk7Nzc4NjE0NTM3OjU0Njg4MzQyNDg4ODk2ODk5OTo7ODg2ODg2OTcz
-NjMzNDY1NTY2Nzg4Nzg2NTU2Ozg3Ozc2NTU2NDY1Ozc3OTg2NjQ1NTU2NjY3ODg5
-OjU2NTc3OTs5OTk3NTU2Njg5NzU4ODc3NTc3NTU4OjY4Ojs9Ojg4OTg5Ojo4NjU6
-Nzo6OTU2NjY5ODo7ODk9Ojo7OTk6OTo7Oz47Ozk5OTo5Ojk4ODs8QD08Pjs7Njk6
-OTw4OTg2Njk4NDU2NzQ5ODg6OT09PDk8OTg4ODg5ODg9Oz05Ozs5Nzg5Ojk5Ojo6
-PEA9OTs6Oj07OTg6Ojs5PT06OTg4ODY5ODc6Oz07OTo6OTo5OTg1Njg5OTc5OTo6
-PDo7ODg4ODo8PTs5O0A/Ojk7OTk5OTo7PTk4Nzk7Ozg2Njk5Ojk9Ojg9PTg6PDs6
-Ojw5NDg3NTdBODQ2Njg8NjY4OTY5Ojc7ODY2NTY2Ojg3ODg6OTc6OTw8PDk9PTs3
-ODo8ODo5Ojk6Ozo5OTs6OTk7Ozo7ODg4Njc8Oz46Ojg6Ojo6PD48OTc1Nzc4Nzo5
-NzQ3NzU5OTo5ODk4Nzo5OTg6OTc3ODk5Ojk5Ojs9Ozo4Ozs8Ojo5Ojw8Ozk8PT5A
-Pjk4OzxBOj0+Ozw7PD9APT48PDg8Pz0/QDw+Pz87PDw9QD4/PTs/Pj4+Pj09PD0/
-P0FBPjw/PTs6Ozo6ODY2Njc3Nzc2Ojk6ODo2NjY4NTg2NjQ3ODs7OTg4Ojw9PTo4
-NzY5ODg4NzY2NjY4OTc6ODc5OzczNDY4Ojo6ODc3Nzc3OTs2Nzo7OTg4ODk6Ozo3
-Njc4ODY2Ojc6ODk6Ojk6Njk4NTo6ODY3OTs8PTo6OTs6ODk4NzY4OTw7Ozg6Nzc3
-Ojk5OTk+Ozk6OTc4Ojk3NTk6Ojo7Oj05Ojw9Ozg7P0A9PkA9Oz47Pz89P0A8P0E+
-QT06OztAPzxAPTxAPz48QUg9PUFCQ0A+PEE8Qk6hxc7W29/i5efo6epFQ0E/PD5D
-QDs9Pjo7PT0+PTs9O0A9OEM7Pz03Pjs4OTk4ODo4ODY5Ojo4MzY5OTY3NDQ3Ozo0
-NzY3Nzc3NTY0NjY2Nzc4OTk5Njc2NTY1ODk1NTc4ODY3Nzo5PDg1NDQ1OTc0ODY1
-NzY4MjM1Nzg0NTU3NjQzNTc0Nzo0NTo7NDMzNDMzODM0MzQ1Njc3Nzc4NzM0MzQy
-MzMyMzQ2NDQ0NTU1MzY0NDYzNDExNDU2NTQzNDU0MzUyMTQyMzc2NDUzNDQ3NjQ0
-NjY2NDM2NzYzMzM0NjU1MjEyNTQzMjQzMjIzNTUzMzM1NDQ1NjY5NzY2MjM0MjIz
-MTMxMDEzNjYyMS8wMjQ1NjQ0NDEzMzc5Rjs3NzY6NTk4Njg1MTEyNDY2ODg6NzY4
-ODQ2Ojo2NDY1NTU1ODY2Njc4NjU2NzY2NzY2OzYzNDQ4NjY2Mzg4NDMzNDU1Njg4
-NjY3MzU0Mjc4OTYzMzY1NzQ0MjM1NjI1MzY1ODYzMzY0NDk2MzU1Nzs6NzY4Ozs2
-NDY1NjU1MjQ1ODg4ODg5Ozs4Nzg4OTw6Ozw6Nzg5ODc4NjU1NTc2MzQ3NDU2NTY5
-OTs5ODg6Ozs6OjY4Ojk5Nzg1NDU3ODk2NDc0NDY0Nzc1NTQ0NDQ1NTU2ODc3OTc3
-OTg5NTU1Nzg3Ojg2NzU1NjY3NTY3Ojo4ODM0NTc3ODk2NjQ2NTc6NzY4Ojc1NDY3
-OTo3OTk4OTs7OTg5ODk4Ojc3ODk4Nzg4ODc6Ozg4OTg6PDs6OTc6Oj08Ozg5PD05
-OTo7Ojc6ODY5Ojo7NjY6PDk5OUA8OTY3ODY2Njo4ODc5Njc6PDg3OTc5Ozg7PDo8
-Ojg7OTk6Ozw+PUA7Ojo5ODs6ODY5ODk6PDw6Ojw6Ozg4O0A9OTk4Ojk3Njc3Ozg3
-Nzg2OTk8PD04OTc4Njc4NzU6Ozk5OTw8QTo/Ozg7Ozs6OTk7PT85ODw+OTo5ODs4
-PDw4ODc4Oz4+OTk6PDs5Ojc5Ozo5ODk6OTo4NjU4ODU0NTY2OTo4OTg4NjY2ODU2
-Nzg3Ojk5OTc5ODo6Ojk4OTk5OjlCPDo5Ojo7OTs8Pj45OTs9PTo7PD47PTo5ODg2
-ODY1Ojw5OTs6Oz05Ojo4Ojk3NzQ2Nzc6Ojc3OTc5ODY4Njg6Nzg4ODg6OTo6Ojs+
-Ozo4OTc6Ojs6Nzg3Ojw7Ojs6Ozw8PDw6Ojk7Ozk7OTw4OTo7Qjs8Ojo8Ozs8Ozs/
-Pjw7PD89Ozs9PTs8Ozo7PTw+Pjw8PDw7QEE+Pjs4Ojs7OTs4Ojc3OTk4NjY3ODk3
-Njk5Ojg4NzY4Ojc5Ozs6OTk6Ozs6Ojk4ODU1ODg3Njc5Nzc3Njk6NjU3Ojk9Ojk6
-NzY4OTo5OjY6Ozo5ODk7ODg4OkI/Ozk8Ojg2ODg3Njg2ODo4Nzg4ODo4OTY3Nzg4
-Ojs3NjU3OTw3ODY4ODk8Ojs6PTU1OTg7Ozk6Ojk7ODs+PTk3Nzw8Ojs8Pj8/O0A7
-PTw+PDs8OT8/Pz07Pz49OTg4Ozw+Qj9AQDo8PTg6Pzw8QEJAPj9IPjw7OUFER0FE
-QkJFTprCztXb3+Pl5+jq6UFDQj8+PTw/PT1BPDs8PD4+Ojg5OTk6Nzo8OT46Ojk3
-OT5CPDc3ODk3ODU2OTU1NTc7Ojc2OTc2Njc3Nzk7ODozNDg1NjY2Nzc3Nzg2NTc2
-ODk3NzY7ODc4Ojk5Ozc3ODY1NjQ2NDQzNTMyNTQyNTYyNTM1NjQ1NzY3Njc0Njc1
-NjY8NTQzNjA1NTQzNDI0NjYzMjIzMzQyMzg2NTUzNDE0NTM1MjM1MzQzNzMyNDQ0
-NDQyNDQ4Nzc0NjUzNTY0NTY3NTQzNDU2NTMzNjg2NzY1NDU1NTU0NjQ1NDUxMzUz
-MzUzMjQzMjY0NzkyMjU2NDIyMDAzMjMxNjEwNTUyMTIzMC8yNDAxMjQzMjMzNDQ0
-NjU1ODY0Ojc2NDY1NjQ3ODg5Nzc4NjY2NzU0Njg4NjY3NTg4NTY5ODgzMzE0ODU2
-NDI6NTU1NjY0Njk9ODU2NzQ1Njc2ODk4OTY0NjU1Nzk1Njc1NzY0MzQwNDU1ODk5
-ODo0NDU2ODY3Nzg3NzU3Njc2Njc4Ojk4NjUyNDg2MjU4OzgyNjk6Ojg3NTc2ODk5
-Ojk3NTY2NjQ2NTc5NDQ1NjY0MjM0MjY3Ozw8OjY5NzY2Njk8NzY4OTc4ODc4Ojg0
-MzQ0NDQ3NjY2Njc1NTU3NDQzMjU1NjU3ODU1NjQ2ODg6Nzg3Nzk4NTc4NjU5OTc3
-NjY4Pzw8ODc1Nzg5NjU4ODg3ODc3Nz48ODg6OTo4NDc6Nzk3ODY2NTY4ODk3Ojs4
-OTo9Ojg4Nzo7Ozo6OTs5Nz05Ojs8ODk4Ojg4Ozo6OTs6ODg4PDk6Ozo8PDo8Ojo8
-NjQ4ODk5ODk5NTU5Njc5Nzs7OTk6Ojo3ODo1Njo/Pzw8PDs7Ozo8Ojc6Nzg4ODs7
-Pjs9Oj47Ojo7Ojo4Ojk5OTg3ODg4Ojo2Njs7NzY3OTo8Ozs6ODo3Oz86Ojo8Ozw6
-Ojs6OTk7ODs5Pzk7PDw9Ojo6Ojo6Nzc3ODc4Ojk5PD09PTs7PTs6ODc4Nzg3Ojk3
-Nz07Ojg6Ojk5OTk3NDk6OT06NDY3NTQ4Nzg6OTg5NDk3Njc5ODc6OztFOzs2ODk4
-OTw5NzY8Ozs4Ozs8Ojs9PDo7ODk2Nzg1OTk4Ojs8Ozs/OT04ODg4OTc3ODU2NTQ2
-ODg6Nzc3Nzg4Nzg4Njc4Nzg5OTo7PD08Ojk8OTo8Ojk4Ojo5Ojo9Pjw8Ozo6Ozg6
-ODo6OTk7Ozo5Ojo7Ojs8OTk4OTs6OTxAQD09PT07Ojw9PDw8Ozs8Pz47OTs9Oz09
-Pz88Ozo4Ojs8Ojk5OTY1Nzc4OTg6NzY2OTw4OTk4OTk3Ojk6OTo7OTs7PDo3ODs3
-NTU1Nzc4NTY3OTo5Ojg7Ozc5Njc3ODY2NjY2Nzg5OTc4ODc3ODo3Nz1COTk3Ozk1
-Njg3Njc3NDU5Nzc2Njc3ODc2Njc2ODg2Ojk4OTw7ODo2Njk4ODg3PD05OTk5PDg5
-NzY6PDs6Nzo6Ozk5PkA7Ozw8Ojs6PTo7Pj07QUA9PTw7PT5APj05ODk6PDtAQEA/
-QTxAPz49Ojw8PT49PEk/Pj09PkVISEZBRERLmsDM1drf4uXn6OrqSUJCRTxAQEE+
-QD8/PDk4Pj07QUE9QDw4ODg3ODY3ODQ4OT06OTk4Ojk2Nzg4Ojo7OTo6OTQ3NTc2
-Nzc3NTg5ODYzNjM1NTg2Ojk4Ojg2OT08ODc4OTg3ODc1OTo2NTU1NjY2NDc2NDc2
-MjI4Mjc0MjU1NjU5OTQ0NDQ1NTQ3NzY4NzYzOTUzOzUzNDM0MzQ0NjIzNTMyMTMy
-Mzc4NjMyNTE0NzQ2NDExMDM2Njc1NjU1NjQyMjU3NTQ2NDQzMzo1NTc2NDc2Njw3
-ODY2MzM0NTc3NDU2NTMzNDQ1MzUzMzY5NjUzMTIyMjU1OzUzMzM0Ozc7NzQ1MDIw
-MTg3MzIyMjAwMTMwLy8yMzUyLzIzNDM1OTg5NjY0NjU0NTc4ODQ2PDc2NjY2NDQ1
-NTU0Nzg2Nzc1NTY3NTc4ODg0NzY0NTU0MzU4NjQ1NTY3ODY1NTM0NDYzNTY3NTc2
-ODY3NjczNDg3NDQ1NjMyMjQ1NTY2ODk1NTQ1NTg1NTU3ODo1Nzk3NjU4Nzk2Njg6
-Ojg2ODk5PDU6Ozo6ODc2NjY3NTU1ODk5PDg3OTc3NTU1NTc4ODU0NTY6OTc3NTg5
-OTs7Nzc3ODY7OTg6NzU0NDU3NzY2NzY2NjMyNjQ0NTU2MzY1NzQ3NjY1NTU2ODc4
-OTk1NTYzNTY6Nzg3ODo4NTg6NzY3NjU4NzY4NjY4Ojk4N0ZAOjU4NjY5OTU3OTk5
-Ojk4Nzg7ODc3Ojc4NzY3ODY3PD83OTk5ODk7Ojo7OTk4OTs6PDs9PDw6ODw6ODg3
-ODlAOjo6OTc2Njk6PTg5Ojs5Ojg8PDo8ODc4Nzc2NzY2NjY6Ojo5OTg7PTs4ODo7
-Oj06PT4/Ozs7PDo7PTs5OTo6OTk6PD47O0A8PDs4Nzc5OTk4OTk7Ojc6ODo4Ojk6
-OTk4Nzo5Oj46OTg6OTo6Oz07PDlBPDo9PT47Oj09Oz08PT0+PDs8OTs6Nzo6Ojs7
-Ojo3OTw5Ojs6Oz89OTpCOjw5Ojg5Ojo5Ozo6ODg5OjU1Nzc6OTo/OT05Nzo6Nzc2
-NTc4NzY6Nzo4Nzc5Ojo4OTs5ODc1OTw7PTk5PTw4PDs7Ojw9Pjw7OTY5Nzo3ODg6
-Ojk5Ozw7Ojw+Ozs5ODg4ODc4Ojc5Nzo4ODg4OTk4NTg7OjY3Nzk5OTg6Ozg4Ojs6
-Ojw9PTo6Ozo6OTk6PDs7PTo6Pjs6PTw8PTk8PTk1OTs3OTk5OTw8Ojs8OTs9PDw9
-PT49PTw9QEJAPj8+P0A9PTo9Pj9APjw/QD4+Oz47PDs7Ojg5ODk5NTM4OTo3Nzg6
-Ojo3ODc4OTo6ODw5NzU4PDg2NjY4Nzk3Ojo6Ojs2NTc2ODk3ODg7Ojk8ODU2NzY3
-ODk6Ozw7Ojg4Ozk4Ozc3Nzo5ODY4ODo5PDo4OTc8NjU1ODg2ODs8OTY3Nzg3OTk5
-Ojs4ODc6ODk4Nzo4ODo4ODg6ODY7Ojc3NTk3Oj08OTw4Oz89Ozw8PDw6Ojw/PTs6
-Pz88PDs9PD0/Oz0/PT09Ojw+PD47PD4+Oj9BQT5BQDw5Ojo+SDtAQENGRkNJTEZH
-Q1egv8vU2t7h5Obn6+lDQkFEQT5ARUA8P0A8Ozo7Ojw8Pz0+Pzk5Ozc1ODo6ODk7
-OzY2Nzg5OTg5ODU5Ozg1Njc2NjU3Njk6NzY2Ojc4NjI5Ojs5NjY3Njk1NTU1Njo2
-NTk4Nzk2NjY5Nzg2Nzc2NTk4NTY0NTMxMjU0NTM2ODQ1MjU5ODkxMzM0NTU4Ozc4
-NzY0NDM4MzI0NjY1NTg3MjQ1NDI2NzY2NTY0NjM2NDU3NjM2NTc2OTk5NjU1NDEx
-NTM0NDQ0NDMzNTU2ODY1MzM1NTY4ODU2ODUxNjU2NjMzMjU4ODU0MzY1NDU0NTc2
-NTQ0NjQzMzQyOTYzMjU3NjQzMDMzMTEwMzUxMDEyMzQxMDMyMDAxMTQxMDIyNjU2
-NjMxMjE2NTU1NjQ0NzY3NDM1NDY1Nzg1NDQ0NDU1NTQ3NTM0NDQ2NTc3MjMzMjU1
-NjM2NTQ0NTY0Njc2NjU1NDM3NjY2Nzc2NzY2NTU1MjU5ODYzMzYyMjQ1MjQ3Ojk2
-ODg3NzU2ODg5OTY1Njc5Njc3Nzg3MzM4NzU2ODg3PTk1NjU1NzY3Nzc2NjQ0NTU0
-Nzg5Ozg2Njc4OTg6Mzg1NTQ2ODtCOjc5ODo5NTQ2ODU3NDY2NDk1NzQ0MzU1NjY4
-NzY6NzQ1MzM2NTU3ODk0Nzk8ODQ2NjQ2NjQzNTk0NjY1MzU2NTM3Nzc5OTU2NTY3
-NzU4ODY5Pzo1Oz44Ozg3Njc2Njc7OTw6Njc0NTg3ODk2Njk5Nzg4OTg3OTc4OTs4
-ODc5Ozo6ODk8PDw6PDs7Ozs8PDc5ODk5ODk6Pzs7Ojk6OTs4PDk5Njg5ODk9PTk5
-ODY2NjU4ODc4Ojs7PDk3OTg4Njc4Nzo7Ozs6Ojg8PD45Nzo8ODk7OTw8QD88PDs5
-OTg4OTk3OTs7OTc4Nzc5ODg2Nzs5Ozs6Ojk7Ojk5Ojo4ODs7PDs6Ozo6Oz07Ozo9
-PT88Ojs+Pz08Ozg6ODg6OTk7Ojo4ODk7OTs6ODk7ODw5ODk6Ozo4Ojs6Ozw8PDo5
-PTg3NjU2Njg3Ojk5ODY3Ozo3Nzk3NTQ2OTg3Nzc3ODs7Ozk3O0E9Ozk4NjY6PD8+
-PDw6Nzo4OjY4PDw7Ojo6Ozo9OTY3OTo6Ozo5ODs6Ozk4Nzg6ODY4ODg4NzY5NTY4
-Ojs6Nzc2NjU3Nzc5OTs9Pzs4Ojk8PDs7OTo6Ozs6Ozo5OTw8PDs+Pjs6Ojk9OTg5
-OTs5Nzg5Ojo4ODc7Ojs7Ozs6PTo8PDo9Ozw9PTs8QEBAPDw7Ozk6PD1AQUA7PDs9
-Pjw7PDw8PDo6OTk7ODc4Ozc3NjQ7OTg5Ojc1NTg6Nzg5ODo7Nzc3NzY2Nzc4ODc3
-NDY4ODg6NzY1Njk4ODY4PDo4ODk4ODg4Nzs7ODg5OTk4OTdEODg5ODY3Nzk3ODo5
-Ozk5ODs5ODg4Njk4ODg4ODo3NDY3Njc4ODc2Ojc3Ojw5ODo5NjY3ODc4ODc2Ojg3
-NTg8Ozs5OD47PDw4ODc6PTo5ODw+OzxAPDw7PTs/Ozo/PD09PD1BPz9APj05Ojs7
-PUA8PD45OTs6PT9IVkREREdFRkdGRUJDT6DAytTa3uLk5+jp6ktIQ0BCRkhFRj89
-QEA7PTo6QERAQD06Oz06QTo5Ojk5Ojg3NTg4Nzk6Nzk1Nzs3MzM0NDY3NDUyNDY3
-ODo2NTIzNjg5Nzc3NjY0NDg2NTc2Nzs5NzY4Nzo5NzM2ODUzNTU3ODk0NDM0NDQ0
-NTc1NjQ1NTQ1NjM0NDU4NDg3Njg6NjY1NDs2NDUyPDg4NzY5NjY1NjYwMTM2NjUz
-NTY1NDQzMzU1NDczMjQ0MzU2MzIzMTAzNTIxMzQ1NDM0NTY2MjI1NDY2NjY1NTc1
-ODo2NDMzMTE0MjIzNDY0NTM0NjczNDY2ODY2NT43MTQ1NTYzMzY3Nzg7NDI0MjI0
-MjExMjEzNDc0MDMyMzM0NjIzMDI0Njg1NTQ2NTM1NjU1NTM2ODY7NzQ1NzU5ODc0
-MTIzMzY1NzI0NjU0Njg1MzU4NTM0MzIzNTQ1NjY2NTc1NTU1NjM1NDQ1NjY3NDU2
-NzQ0MzQ1Ozk3OTc1NTMxMzY0MjM2Njk7MzM0NjY1NTc2ODY2NjQ1Nzc3Nzc3NzY4
-ODU1Nzc1NTU4NzY1Nzc4ODg1MzQ0MzU3Nzc6Ozc3NjU5ODc1Nzc0ODs6OTk6Ozg4
-OTc2Nzg3ODc4MzU3ODY0MzQ0NTQ0NzY3NjY2NzczNDM0Njs8OTo3NjY0NDY3ODc0
-NDc2Njg1NDU1NTk6NzU2Njg3ODk2Oj84NzY3OTc4ODo2Nzc6NzY3Nzg3ODg3Ojo3
-Ozk4Nzc3Nzo6NjU2Nzk6OTk5OTc3OTw4OTs7OTo5Ojo3Nzk6OTg6PDs8PDs5ODs+
-ODo7OTc4OTw8Ozo5Ojg5Ojw2NzY3OTg4Ozw6ODY2PTk7OTY5OTk5Ojk7ODc9Ojw5
-Njg8PDo7Njg7Ozo5Ojo5Ozg+OTo7Ojo4Nzo6Ojw6OTg3ODg2Njk8OTg3Njg5Ojk6
-Ojw5Ojk4ODo6OTs6Oj05NzY6PDg7Oz08PTs5OTs8Oz47Ojo5OTg4ODc3Nzo8Ozs8
-Ozo4Ojs8ODc5ODc6Ozs3Njk6OTk5Ozs5OTs7Ojc4OTc4Nzk6ODo4ODU3NTU2ODc2
-NTc4ODs5PDo7Ozg4OTk5Ozs6PDg2OUA3ODg6Ozs7Ojk4OTk7OTk6Ozs5OTc6Ozs5
-OkE2Nzg4Njg3NTU2Nzg5OTk6NzQ3NTc2Njc4Njw5NjQ2NTc7PTw9PEI+QT06PDo5
-Nzc5Ozo5Ozs4Oj09PDw5OTo6Ozo5Ojs8Ozo5OTk8Ozo5PDc3Nzs6ODk5Ozo5Ojo7
-PDw9PDw6PDw8PTs7O0BAPj88Ozs8PDs9PDw7OTk7Ojo3Nzg5ODg3Njc2Njc2MzQ2
-NzU5Ojo4Nzk5OTk6Ojc3OTc5Nzo3NTQ2Nzc3Nzc2NzY3Nzo5ODc1ODc4ODc5ODg5
-ODc2OTc3NzU4Nzk5Njg3NjY2ODg3Nzg8Ojc5ODo7Ojk6OTo3ODk7Nzg4ODc3ODg3
-Nzw3NTg5O0A4Ojk3PDc5OTk5OT08PTw8Ozk7Ojs6Ojw+Ojk3Ojk8Ozk6O0E9PD89
-PDs8QTs7Pzo6OTo+QkFAPT48Pj47Pjw4Ojk7Oz0/QUE9QVBjTkNFSERDRURISERP
-n8DL1dre4uTn6OnrQkhGPkJMS0VBQDw+QTo9Ojs8Pj48Ozs4QDc6PkE9OzY3NTU1
-ODw8OTU0NzUyODY3MzI2NzU3NTYzNTY4OTg2NjgzMjU1NzY0NTc0NDU0Njc5NzU0
-NDc2NjY6Ojs6Nzg1OTU2NjU1NDE1NTYzMzU2MzQ1NDEyMzU0NzY0Pj43NjU4NTUx
-OjM3NjI4NjM0NDY1NTIzNTc2NTQ0NjY1NjUzNTQyNDg4NDIwNjg3NjY2NzM0MjMz
-MDAwNjQzMzMzNTE1NjQ1NjU0NjQyOjU2NzUzMzIyNDM3ODYzNjQ2NTQzNjY1NTY1
-NTY0OTUzNDUyNDk0MzAwNDI7ODQ0MjEzMTIzNTAwMjg0MzIwMzUzMjMzMjQ2MzQ2
-NDg2OTQ0NDQ0NTQ0MTUzNDI2NTQ1NjQzMzU1NDQ1Nzc4NzUzMzM5Nzg0NTk4NDQ2
-NTY2NjU3MjI2NjY0MzU1MzQ1NDM0MzAyNDIxNDQ1NTU5NTQ0NDI1NDk3NTU1ODc3
-NTY0Nzc5NzU1ODg2NjY1NTk2ODk5Ozk4Ozk2NTc2Njg5OTs4Nzc3NTY2NTE0OTo5
-Nzc3OTg3NjY1Njo3NjU4OTg5Nzw5Nzc2NDc4Njc2NjU3NTc1ODQ1Nzg1Nzc4ODg2
-MzQ0NTUzNDQ2NTg2NTc4NjY4NTk5PDY4NTYzNjM2Nzg4ODc5NzU2Njc4Nzk5OTc1
-NTU0NTg5Ojo5Nzc5OTk5NjY2ODc5OTs7PDg5OTY2NzU3Nzk6OTk5OTo5ODg3Nzc4
-ODk8Ojw5Ojg3ODo8OTo+Ojg7OTY3Ozk7Ojk6Nzc2Nzk7Pjw4ODc4Ojk4Njg6Ojc4
-NzUzNjc4OTs6ODc2NzY4ODk6PDo5OTo8Pzo7OTk5ODk6ODk7Ojc4Ozs5ODY2ODg2
-ODk6QDk1ODo7OTo8ODo6ODc4ODg6ODk6Ojo5Nzk6Ojo6Ojo8PDo/OTg5Ozk6OTk5
-NjU4OTk8Pjo5Ozk4Ozk5ODc4Nzk7Ozs7Ozc1Ojs3Oj46Ozs6ODg4OTc4Nzg3Njg5
-Nzc4NjQ2OTg6OTg3NjU1NzY/PTo3NTQ3OTo/Ojg5Ojk5Nzk2Nzk5ODk6Pjo6PDo6
-OTk7PT08ODg3OTg4Ojw5Njg6PDs3ODc+VTo5NTY5Ozo4ODc5OTo4Nzc3ODk3NTo2
-NDY5Ojg2OTg5NzY5Ojk8PD0+PDw7OTk4Ojg5Ojo8Oz87OTo5Oj47ODo7Ozk6PT08
-Ozc6PTo5PDk7Ozo9Pzo7Ozk7Ojs6OTw9Pjw6PDo5ODxBPD48PT08OTs6PDs4OTk7
-PTo5ODc5ODk3Nzk6Nzc6Ojo7NjU4ODY3Ojg4ODg4OTk5OTo5ODo8Ozk5ODg5ODg4
-NzY3NzY4Nzs5OTg2OTc4ODo4Njg3OTg3ODg2Nzc5ODc5Nzg5ODg4Nzc4NTc4ODU3
-ODc4NjY5Nzk3ODY2NTo5ODo3Nzc2Nzc3Nzk0Nzk5OTo7Ozk4Njk6NzlAPDk6PDo6
-OTk2Nzg6PDg9OzxAOzo9Ojc5Oj46PTw7Ozs9QT46Oz08Ojo8Ojo7P0A/QT48Oz86
-Ojo6PT4/Q0NESEA8PURAQERFRUNGQkucwc3V2uDi5ebn6upEQkJAPkREQ0w+PD9D
-Pz1BQz4/PDs9Pjk4Ojk3PDs7Ojo5NTg4NzY3Nzc1NS82NTc1MzQzNTY2ODU1OjQ3
-ODc4ODg4MTQyMjY0NjU0Njg1NTQ1NjM2NDMzMzc5Nzc2NTc2NjMzMzc1NTQ0NjI2
-NDY2NTY1NjE0NjQ4MTRIOTc4Ojc0MTE1NDY3Nzc3Njc2NzY3NTU0NTY1MzRCOzc2
-NjY1MzY2MjY0MzQ0NTYzNDg4Njc0NDQ0NDUxNDI2NDM0NDQzMjIyMjI0NjM3OTc4
-OTg1MzMzNTU1NTczMjQ3Nzc4ODU0NDU1NDI0MzIzNDU0ODQ0NTI1NUI5Njc0MzIy
-NTQzMjExNDE6NzQ1NzQxMTQzMzQ0Njc5Njc1NDY1NjQ0MjM6OTg1MzQ1Njg1NjQ0
-MzIzMzU1NzQ0My8wMjExNTc2NzU1OTg1NjY2NjU1NTQyMjE0NjQ0NjY1MzUyNTMz
-MDU1NDQ2NDQ1NTQ0NDM5NDUyNjY3MTQ3NDY3Nzc1NTU4ODs4NjM2NzY4NzU1NTY3
-ODY0NTk5OTg5OTU7OzU1MzY6Ozo4NzY4Nzk4Njc3OTg2NjY0OTc3Njk2NzU2NTY3
-Nzk3NjY0NDU1NDU2NjY7Nzg3NjY4NjU2NTUzOjg2NDY4OTk5ODY4Ojc6Ojg2Nzc3
-NTc3NjY3NTU2Nzg3Ojg1Nzc2Njg4NjcyMTQ0NTM4ODk6OTg6ODY2Njc1Nzg6NjM1
-OTo5ODo4NTY3Nzo5Nzc3ODo4Nzg4ODg6Ojk3ODk3OTs7PDw8Ojk7Ojc4Ozs4Nz04
-Nzc4ODk3ODk4Nzo6OTk4Ozw7Nzc1NjQ0Nzk3ODo4PDo5NzY3NjMzOjw6PDs5Pj49
-PDw8Pj47Nzk3PTo8Ozo5ODY3ODs5Nzk3Ojw5Ozo7Ojk4OjY2ODc2OTk3OTs7Ojo6
-ODc5NzY8Ozk5Nzs6OTg4OUE8Ojc4Ojk4OTo4Nzc7PDo4ODk1NTo3PDo6Ojk5ODs6
-ODc4Ojw5OTc4Ojo3ODg4NTY0Nzk3Nzc1Njg4NDk4ODk5Njg2NTM0OTw7ODU0Njc5
-OTg4OTk4Ojs7ODc4Ojg6ODw8Pzs5Ojo8Nzs7OTc4OTo7OTk6OTY1NTk7Ozk8ODY4
-ODk3OTo3OTo9OTc4NTY5ODc3NjQ1NTo3Nzc0NTc5Ojk2ODo9OTg4Ozs6Ojc5PTg4
-Nzk7PTs6OT05NzY6Ozs5Pz0+Ozo6Ojk4Ojo6Oz07ODk4OTk8Pj0+Ozw5ODk7Ozs9
-PT06PDo8PT09QEA+Pzs5Ojg6Ojk8Pjs6PTs7Ojk5ODo3NzY5Nzc4OTk3OTc4OTo5
-Nzg2ODc5OTc5NjU2OTk3ODg6OTc2Nzc5ODk6Ojs6OTg3ODg2NjM2Nzc2NTY4OTk4
-Nzk4Njg5Nzg6ODg2NDc3OTc5ODc4NTk3NTc2ODk2Nzg7ODY3ODo8Ozg3ODk3OTw9
-OTk2ODk7Ojo7Ojs4OTc5PTg6Ojk6Ozk8PDo5Ojo6Ozo9Ojk5QDw7Ozs8PTs6PTo6
-Ozc9PUA6PT1APTo6PDo9PT1BQj87PD5APD4/Pjw8TW1DOz0+P0NERENGREBGTJvD
-zdbc3+Ll5+jq6z8/RT49QERCSEtAQEhCQEI9PTs9PT03OTs8OTw8PD44Njo5Ozo5
-NDQ2Ozg3NDYzNDY0MTczMzM3NTY4NzY3OTg1NTg3NTQ0MjY2NzU1NzM3NDU1NTI0
-NjYzMzM0NTY3NTM0NDU1Nzg4ODg2NjU0NjY1OTs9NDU0NjQ2NTU2NTU1NjM6PTU3
-ODU4NzY5PDg2ODYzMzU4Njc4NTQ1NjQyNjU3NDQ2NDY2NTMzMzU0NzU3ODg5NTg1
-MzU2NTU0MTE2NDM1NDYzNDMzMzQzNDc3ODMzMjM0Njc0MzIzMzM1NTE1NjMzNzk1
-MDIzNjY1Njg2NTU1MzVCPzQ3NDY4STMyMjQyMjExMjI0MDkzMDAyMjQ1NDQ2Nzg1
-MzQ0NjUyNDYyMDU4OTo1NTAyMzY0NDQ1NjQ4NjUzNjUzNjIyMzEzNzc2OTc3Nzc2
-NjQ0ODY3OTMzMzY1NTQ3NTQ2NTY1NjI2NjM2NDg5NDc0MjYzNTU4OTY3ODg0NDM3
-NjU4OTY0NTY3Njo4NjU3OTc3NTQ3NjQ0MjU0ODg5NjQ3ODg1NDM3ODc1OTg2NDg6
-NDI4NDU4NTY4ODo4NDczNzc4ODg4NTg8NjU3NTY4NTQzNTY1NjY4NjY0NTc1Nzc1
-NTY2Nzg5ODo6NjY2Njc1Nzg3Nzc4OTs3MzY2Njg5NjQ3NTU3Nzc2ODg4ODc4NDQ5
-NjQ3NzQ0NjQ1ODk3NTc4OTc4Njg1NjU3ODg4Ojk4NjY3Nzc3OTg5Ojo5OTY1NzY1
-NDQ4OTg6OTk5OTs6Ozk4OTk5OTs6ODY2Nzg3NjU6Ojg6ODg4OTk6Ojs3ODc1OTs7
-ODs7Oj47Ojw3ODs5Ozw7Oj06Oj07Ozo5OTk7PDk7OTs9PTk6Ozk5OTY1OTg4Oj06
-OTo7PDs4ODk1NTU3Nzk4Nzo7NzY2ODo5ODk7Ojg7Ozg6OTk5Nzk9ZVlGOjs6Nzc4
-Ojc3OTc7Ojw8PTs5Oj47OTg3OTo7OjY4ODc7OTs3OTg4ODk6ODc1NjY1NTQzOTc3
-ODk5Njc4ODc2NjQ1ODc3Njc4NDY1Njg6OTg5Ojw9Ojo6Ojg/Oz1CODs6OT08PTs8
-ODY3ODk4OTk6ODg2NTU3Njg6Ojg4ODg6OTo6OTY0NDU6ODc3Nzc3NTQ0Ojw6OTc3
-Nzk5OTo7OTo7Ozo9PDw7Ojk2NTg4Ozs5Njk5NTU6Ozo6Nzg6OD09PDo6OTg6Ojo6
-Ozk5ODU1NTc3ODo8Ozk4ODo6PD0/PTo8OzpAPjs8Ojk5PD09QEI8Ojs6PTo5NTc5
-Ojk5Ozg4NzY1Nzg5NzY4Nzc2Nzc2OTk4Nzk5Nzg5Ojg3ODg1OD08OTg3NDQ0ODc4
-ODU1NTc1NTU2ODo5Nzc2NTg2NzQ2NTg4NzU2NDU2NzY2NjU2Njg1Ojo6PDw9OTU2
-NTc7OTc4Ojo4ODg3OTk3NzY1Njk+QTs6OTc5Ozs6OTg3ODc5ODo7Ojk3OTo4Ozs6
-Nzk6ODk5Nzc7OTo3Nzs6OTY4Oj0+QDo5PT05PTs9Pj9BPDw7OT48PDc6PTs8PD9B
-QD8+Pz1KZUs9PkI/Pj9BQUI9QURVnMHN1tzf4+Xn6enqPjxBRUNFR0NFRERCREU8
-PTs8OzpCPjw/QD89Nzk2Pj07NzY7PzkzMjQ2Ojg4Nzk2NzU1NDY4MzQ2NzMyNzQ0
-NDg1NjY0Nzg4ODQzNzk2Ozc4NzY2NDU0NDQ3MzY1NTU1NzI2NTQ0Njc4PDc3NzQx
-MjQ0ODY3NjMwMDQ0MjIzOTg4OTU5ODc5OTk1Njc2NDM3Nzc2MjQ5NTU1NzQ0NTg2
-NTUzODQzNjUzMjI3NDQ3Njg3NTQ1NzU6PDY1Njc1MjU1NjQ2NDg1NTUyNDY0ODY2
-NzY1MjQ0MzI0NDIzNDM1NTM0NjMxNDIyMzUyNTQ2NDc2NDg/STs4NTUyNDVDNTMw
-MzIyMTIzMzQ0NTAyMzEyNDU0MDM0NTUyMjQ1NDU1NTY0MTM3Njc0MzQ0MzU0NTU4
-Njk3MzYyMzM1MjIzNDQ2NTc0Mzo5NzY2OjU3Ojg0NjI3NTM1NjY9NzU1NTY2ODU2
-NjU4NTc2Njc3NDU0Njc8OTU0Njc4MjI3NzczNTg2NTc4Nzo2Nzs6Ojg5NTQ1OTg4
-ODg3Ozk2Njg5NTY3Nzk4ODYyNzY1NTU4Nzg2NTQ0MzU3ODY4Nzk2Nzg3NjU1NjU1
-NzY1MjMyNTQyNTc1NjQzMzQ0MjU2NjQ1NjY2NTY4OjY1NDQ2NTc0Mzc4Ojk6NjU5
-NTg2NzU1Nzc5NDY2ODQ2Nzg2NDg5Ojg9Ozc4OTw6NzY3Nzc5ODk6OTg5OT03Njo5
-OTk4ODg3Njc2Njg5ODg3Nzc4OjY2NTY3OTY3Nzc4OTc3Njc6NzY3Njc2NzY1Njc2
-Mzc6ODo3Nzc2ODY5Ojo5ODk5OTg4NzY5PDw9PT0+PTs3OTk7OTk6OkE+PDk6Ojs9
-Pj88Ojo6Ojs5PTk5ODk3Njk5ODo6Ojo9PDs5OTU5ODg6Ojg4OTs8OTc6Ojc3Nzo2
-ODk6ODg5ODo6ODk5OTo9Pz86Ozo4ODg3OTs4NzU3Ojc5OTk6PTk5Nzg4Njg5ODs6
-PDw7ODg3NTc4OTg5ODc1NTU4MzU3ODc3NTU2MzY3ODg2NDc4OTw4Nzg2Njk6PT03
-NjU6ODg3OTs+PUE+PDw6OTg6P0A5PD07ODk6Ojk4Nzk4NTc5NzM2NzgzNjk5Ojg5
-OTk0NTc4NzU1Njo5Ojg3OTw4OTs5OTg5Ozg3ODk7PDo4ODg5ODo3NTg/Ojk8Ozs8
-PTY4Nzk4QD06Ojg9PDo4Nzk4ODY5OTk3ODo7Ojc3Ojw9PD44Ojk6OTs+Pjw7Ozo7
-Pz08Pjs8PDs+Ozo7Ojg7OTk6PTw6Ojo5OTs8Ozo5NzY0NTY5NzY3ODg1Nzg2ODk2
-ODc3Njo4Ozw9Ozo4Ojo5Njc4NTU4NzY5OTk6ODc4NjU2Nzc3ODc5NDk5ODU1MzU2
-NDUyNTc3OTc3ODg4Nzg3NDY4Nzo2Nzg3OTk6Ojg2OTc3OTk4Nzk0NzY4Nzs8OTg3
-ODo5ODg5Nzc6OTk6PDg7Ozo6PD45Ojs9PDk8Ozc5PDg5ODo5Ozw9PTo7Ojw8PD47
-Ozc8Pj4+PT09PD0+Ojs8QTs7PDw/QEE/PUBAREI8OkE+QDxBQkJEPzw7Q1WjwczX
-3ODj5efo6epAQj49Q0VFRT9CQT06PT86ODo4Oz49Ojw3PDg3Nzg5Ojg8OTo6ODo5
-NTYzNzs4Ojs5Nzc3Njg1NzU1NjQzNDU2Ozg2NTQ2NTU1NjY0Ozk4Ojk4NDY3NTUz
-NTU5Njc4NzQ2NDUzNjI1MzQzNTU2NDQzMDM2NTU2NzU1NDQ2NDAwMjQ5Ojk2MzM2
-NjgzMzY4NjU3ODU5NjU3Ol1IODU4Nzc2NTU0NDM0Njc1NjIyMzU3NTQ1NjU2Nzc3
-OTc3NjYzMTIyMzQ2NTU4Njs4NzQ2NDk2NDQ3MzUyMzQ0NjIwLzQyMzQ1NTMzMjM0
-Mzc3ODdDPzo6Pzw3Nzc0NDE1NTc2NDAvMzI0MzMyMDM1MjI0NTEvMjQ1NTQ0NTY1
-NTM1Njg0MjQ0MTQ4ODU0NTU0Nzg0Mzg2NTQ1MjI0NDQyMzY4NjU0NDMxNDU1NjY3
-NjY3NTc1OjQ1ODU0Njg2NTU2NDQ4NzQ2ODY3ODs5NTc3Nzk3ODg5OTc2NTc4NjQ3
-NDQ1NjIzNjY0NDg2NjU0MjU3NzU5ODo3ODQ1Nzo6Njg4NTQ3ODc4NzQ3Nzc1Nzg4
-OTk4PDk2NTU0Ojg3Njc4NDU1NDY0NDg2NjQzNDU1NDE0NThCOjY3OTczNDYzNDQy
-MjQ3Njo2NTo3MzdAOTc2NjU1Nzg2ODc2Nzc2NzU6NTg2NTU1NDQ0NjU6ODQ2ODc3
-ODU3OTg7Nzc6NTY4ODg3Ojw4OTw3Nzc2Nzo6Ojo3NTc4Njo6Nzo3ODk4Nzc3Nzg4
-OTo8OTY2ODk2Nzc2Njc2Njk2NDQ2Nzc3NzY2Njk4NzU2Njk4Ojo5OTo8OTk4PDk6
-Ozg7Ojc7ODs4Ojo5Njk6Ojs7OTk4OTo5ODY5ODg6OTs7PDk5ODo9PDk1Nzo6ODo5
-OTk5OTg5OTg7OjY1NjY3ODo7Pjo6Njc6Ojg4ODc3OTk4ODY3Nzk9PDo5OD08Nzo6
-PDk5OTQ2PDc2ODk5Ozk3Nzc2NjY2Ojs4OT46ODg7NTU3NTY4Nzg1Ozc3NjU2Ojg2
-Njc0MzQ2NjU3NzU4Njc8Ojg6PUA8Ozk3Njg2Ozw5ODY6ODk6OTo4OTw4Ojk7Ozw9
-Ozw5Ozo4NjU1Nzk5ODQzNjg6Ojk5ODY1Ojg6Ozk5NjY2NjU0Nzo8PDs4ODo5ODg3
-ODg2Nzk6ODY4OTs6OTk2OTk5OTk5Pzw5PTo7ODk5Ojw6Ozk6Ojg3OTk2ODg5PTo3
-ODg3Njc2Nzc8OTk5ODs6PDs8PT49Ozw/Pj9BOzs7OTk6Njc7Ozg9PDs8Ozc6Ojo4
-NDc5Ojg3NzY1ODc8Ojc1Ozc4ODY3ODs6ODU1NzU5OTc4Nzc3ODk3Nzk6NzU1OTg3
-Nzg4PEJANjQ2NzU3NTc8NjY2Nzc4NjY3NzU2OTg5NzU5Ojc2NjYzNDg3NjY3ODk7
-OTk6OTo2Nzc5ODk4ODY3NTU0ODk4Ozo5OTk6OTc4Nzc5ODg7Nzg5ODg6Ozw6ODk8
-Ojk5ODg4Nzo2OTg4Ojs6ODs5Nzk5OT9BQTw7PjxAPDs6Oz9APj0/QUFCQTs7Ojw6
-PD0/Q0M9Pz09PTpERkJAQj5DX6nCzdbc4OPl6Ojp6kI+PTtDRT5BQD4/PT4+PEM/
-QTs+PT08Ozo8NjM2NTc7PDs6OTs4Nzs3ODczNjc3NDY5Nzc2MjE2NzQ0NTY0NzY3
-NjM1NDM/NzY1NDk6NTk4NDY4NjUyNTQ3OTY0NjQyNDQ1NTYyMzQyMzM0MjQ0NDs1
-MDQ1NDc7NDc0MzIzMzMzNTU1NTY6NzU0ODg2NTc1MjY1MzM3NTNFVEwyNjQ1NTY2
-Njg2NDQ0MjU0NDU0NzU1MzU5NzU0MjY2ODYyNjY0MzI0NDY0NTk2NjY1NjU2NTc3
-NDU3NTI0NzU0NDU1MzU0NTQ1Nzc3NTM2NDIyNDU3ODY2MjQzMjM3NzA0NDU0MjEz
-MjI1MzM0NDMzMzM2MzIwMzU4NTMzMzM0NDU3ODgyNTIyMzQ3ODIzNDE2ODg2NTc3
-Nzc1Njc3MzQ2NjU1NDUyNjgyNTM0NTY3NDU4ODg4NTQ0Ojk2OTc3NzU1NjUzODk7
-OTg2ODg4NjY6OTg5ODc0Njc2OTU0ODc4NzQyNDM3NTg3NjUzNDMzNjk4Ozo5NzY3
-OTU0NTg6ODY0NjUzNTg4NjU1NzQ0NDQ3OTk4ODc4NzY3NDU2NzU3ODc1NTk4ODY1
-NDc4MzMyNDMwNDU1NTk3NjY1NjM0NzM0NDg2NTU5Nzc3NTc3Njc2MzQ0NTQ2NjY3
-ODg5OjY5Ojk5NjQ5ODY0NTU3OTk5ODg0NTY3ODc1NDEzNzY3OTY4OTc4Ozw6NzU4
-NjU4NjU2NTc5ODc4ODg5Ojk5NjU1MzY3Ojg3Ojc2ODo6NTc5OTk1ODU4ODU3OTUz
-MzQ3OTg5OTc3ODo8PDk8Ojg6OTo6PDo6Ojk6Ozo3OTg2ODg3PT0+PD46OTg7Ozg3
-NjU5OTo3ODo7OTk5Njk6Njg5ODg6Njc4Njg6Ozc4Ojc2Ozk4NjM3ODs7Ozk5Nzg5
-NzY6OTo6PDk5OTg5Nzs8Ojg3Nzg2PDo7Ojo6ODc4Nzg4Ozc7OTY2Nzs4Njg8Ozc4
-PDs9ODo7NzY2Nzc5PDc2ODU3Ojc2NTc3NTc3Nzg1NjY4ODM0NTY3OTs6Nzk5NjpB
-PDo5ODc2OzpBPTk6ODc4ODo6ODg7Ojw+Ozk3ODc4NTk4OTg1NTg4OTU2Njo5NzY1
-OTs7Nzk4Nzc1NzQ0Nzo6OTo4Nzg2NTg5Nzc5Nzk5Ojk5ODs7Nzk4ODk5Ojo5Ozw7
-Ozo4OTs/PTw6OjY3Nzo5Njo/PT09PDc3Ojk4NTg8OTc6PDo7PDw6Ojw8PDs9PT07
-PDk5ODs5Oz1APDs8Pjw6Ojw5Ozk4Pzs7OTg6ODg4NTU4Nzo7Ozw6OTk4NTY4Nzc3
-NDQ4NTg4OTU3ODg4ODg6OTg2Nzg4OTk4ODZAOTo4Ojo6Ozc7OzY4NTU3NjY2Nzg3
-ODc1NTg3Njc3Njo6ODY3NTc4ODc4OjY4ODs3ODg6ODg5ODk2ODc3NjU5OTo4Ozk6
-ODs1NTU3Njs8Ojg7ODY4ODc5ODk5PDs5Ojc4ODY3ODw3OTo5Ozo4ODo8OTk9OTs6
-ODo8PTxBQkA+QT06PD89PD08Pz0+QTw7QENAPj05Pz9BRUFBRUlCQkaEscHN1dvf
-4+Xo6efqREE8Q0FCPzw7Ojo+QUJAQDo8OTg9Pjw6PDc+PjY0Nzs4Nzc4Ozo9OUA4
-NTU1Njc3NTU4MzIzOjY1NzU3Njc1OTo4Nzc5NTY1NjQxMTQ2NjY1NTU0NjY2OTM1
-NzcxMjU4Mz0zNDMyNTM1NjQxMzUzODY1NTQ0NDIzNzQ1MzM0MTExMzI2Nzc3NTMw
-MjE3NTU1MjU4NjQ3NEhiPzY2NjQ1NjU1NTU6ODY2NDQ1MjY7OTYxNDg1NTU2NTY2
-MzU3NjYzMzQyNTc1NTYyMzc5Ojs4ODk5NzI1NzY3NjQ0NzQzNTEzNDc4ODU4NjUz
-NDc1MzQ0MjI3MzMzMzc4OzQ1MzU3MTI0NDcxNDIxMjM1MjU1MTIyMjQ3MjQ1OTg0
-NDYyNDYzNDI2MjQ3NjQxMjM2NjY3NjY2Nzg2Nzc5NzY2NDQ2MzM1NjY3NzczMzU0
-NTUyMjc3Ojg5OTk2NTY4NzY2NTMzODk5MzU2Nzg7ODg4NzY0ODg1NTo4NTU3ODU2
-NTU1NDQ3ODk0MDQzMzM3Nzo8OTs4Nzk5Ozs5Nzg6ODg3OTc5Ozk5NDY0Njg0Njo8
-Ozo3Nzc2NTQ5NzU3Ozg1ODk7NTc2NzY1Mzc2NDU3MjY2ODY3Ojc5OTo5Ojk6NjQ1
-MzU4NjM2MzU3OTg4Ojc1NjY3ODY2NzY2Njk6OTQ0Nzc5ODc2NTc0NjY4ODo2NjY4
-Nzo3Nzg9PDc4NzY1ODc3NzY5ODg4NTU0Njc3Nzg3NzY3Ozs6OTY2OTs9OTc4Ojg6
-NTY5Nzg6Ojg3PDs6OTc1NTM5OTc2NzU2ODU4OD46Ojc2OTk9PDo4ODQ7PD08Ozs5
-Nzk5ODg3ODk4ODk6Ozo9Ojg3OTg6Ojw6Nzc3Njc4ODg2Nzk6Ojw8Ozs6Nzg4ODU3
-Nzs5NjY4OTg2Pjw7Ozo6Ojs4Ozo7Ojk5OTg7Ozo7PD07Ojs6OTk2NjM0NTk6PTw5
-Njg5Nzc7Ojk3Nzc5NzY3NjY5Njc9OTk6Nzg4Nzg3ODg5NjY6ODc3NzU2NTQ0NTU2
-Nzc1NDQ0ND87ODg2ODdBPDk4NjY6Ojs8Nzs4Nzk7Ojo4Njs7OTo7Ojk4Ozs5OTk6
-Ojo3MzY6OTo4Nzk2ODc4NzQ0Nzk4Ozk4Ojk3Nzo4NTc4Ojc2Nzo6ODc5Nzk4Njk4
-Ojo4Oz47Ojo5Ojk7Ojs6OTk7PDo8Nzo6PDk3Njo5ODg6PDk8PDo8Oz4+PTs6OTo4
-Ojo6ODw8Ojs6PT07Pz09Ozo9PTw6QDo7PDg9Ojo4PD04OTs8Ojg6OTo/PDo5Oj07
-Ozk5ODc5Ojc4NTg5OTg4OTc4OTk4Ozk3OTc1NTc0NjY3OTk6OTg4ODg3ODs6Ozo4
-OkE4NTg6ODg5OTY4Mzc7OTc3OTk5Ojg4NzU3Ozc4Nzg5OTo4ODo2NjY2NzY2NzU1
-NTY2Njc4Nzg4ODk6NjY3ODc5ODk5Nzg4OTk3Nzg3Nzk5Ojg7OTk6OTc4OTo4OjU4
-Nzg2OTk5Ojk6PTw8PT06Ozo5ODs6PD0+Pzs7Pj4/QkA+Pz49PTs8Ozs9Pzw6PTs6
-Pjs7OTs8Pz5BQUNERkJAQ2ytw83V2+Dh5efn1+ZGRT5ERkZBPDs8OD8/PjxBPjw7
-Ozs6Ozk9PDw6OzQzNzc2NjY7PTY4NDY2NzY2ODs6Nzg0MzU2NzU3NTc1NTY5OTk5
-ODY5NTM2Njg2MzU2NTY5OTQ1NTo4OTQ0NzY1MjMyNjc0MzM1NDMxMTM1Ojg2NDIy
-NDY3PDo2ODUyMTIzMzQ5NTk3Nzc0ODg1NDQ0MzM2MzU0NDM2NTY2NzY5NDE1MTI1
-Mzc3OTg1MzYzMzU2NjQ3NDIzNTcyMTI0Nzc6Nzc2NTQ1Mzc0NTY3Nzc2ODU3NTY2
-NDQ1NjQ2MzIxMjU1OjM3NTY3Nzc2OTU0NDU4Nzg0MjU0MS81NjQ5Ojc4NjQxMjAz
-NDU0MTQzNDUxMjMzMjQ1MTQ3NDM0MzQyMzo3NTQ0NDg4Njg2Nzk2NzY0NDg2Nzg2
-NjU1NjY3NDc2NjQ1NTIyNzYzNDY0NTk4Mzc3OjY5ODgzNTk2NzY1NTc5Nzs4Njo2
-NTU2NDQ1ODg1Mzc1NTQ1Nzc1NjY2Njk7NTY2NDU0NDc4NDg5OTY3ODg5Nzc4Nzs5
-NTY4OTc6OTk3Ozs8Ojs5Nzk1Nzk3Nzc6Nzk3NDYzNzY5NjY2Njg3NjY2NDU1NDQ4
-OzY2NjU4NzQ0MjIyNTY3ODU0NTY6NzU0NTc2ODU2NTQ1N0I4NTc1MjQ3Nzk3NDc3
-Ozg6NzU2NzY2NTU2Nzk4OTg6Nzk3Njc5NjQ2ODs8Nzc3NjU5NzQ4Nzg1Njc1NTg6
-ODY6NzU4OjY4ODk2Njk4ODw6Ozw4OTk5Ozk3Ojc6Ojk5OTg4Nzg4PDc3OTo7PTk2
-OjY6Oj04NDg3ODk7Ojs5ODg4Ojs9Pz47Ojg6PDg4ODc6OTs7OTc3NzY5Ojk4ODs6
-ODk1NTc4ODY3ODo2MzU7Ojw8OTc4ODg3NzY3NTY2OTw5NzY4Nzk3Nzg4Njg3ODs8
-Nzs5OTk6PDs6PDo6OTo4OTc3PDk8PTk3ODk6Ozk2ODU0Njo6Nzc5Njg6Njo7Ozo5
-NjU2Njk2Nzc2NjYzODg4Njc2Njc3OTo6NzUyMTU3OTk5NjQ0Njw6Ojo4OTk5OTw+
-OTQ3Njo5OTk6Ojg7Qzs7Ojk4ODY4OTY5ODQ1NDc2NjUzNDY0MjU3ODg3ODc6Ojc1
-Njg5Njk4NDY1Njc2Njc6OjY3Njc4OTo4ODk5Ojs7OTo4Ojo7Pjo6OTk5Ojw+ODs6
-PDo6ODo4Ojs8PDo4Ozo8Ojc5PDo4ODk7PTk6ODk6Ozg5OTk6OT46PT89Pjs7OTo5
-ODs6Ozw7Ozs7Ojc4Oz1APzw7OTs5Ojo6ODs4PDc4OTc3OjY3OTk7Ojs9OTk4Nzs7
-Ozk1NjU2OTg6Njc3ODk6PDg3Ojs6NjY3OTY5Ozg5ODY2NDY5NjY3OTg3NDQ2NzU0
-Njc4ODc2OTg3Njc3Njc2Nzc5Nzg2NTY4Nzk3NjY2Njc1Nzo6ODw5OTc5OTY3ODg2
-Ojs6NjU3OTk6OTk5ODo3ODk6ODs+Pjg6Ojo6Ojc4Ojk9Oz9BOzg5NztAPTs8PTw8
-PD07PT1APjw9PTs5OTo8Ozw7PD0+Pj87PUA/QT8/P0BERUVCQUBBTJ/CzNXb3+Ll
-5ufo6jtEQTs9P0I9Ojo5Ojs7PT89PTs7Nzg9PTs6PTs6ODg7Nzc5Njo3NTc7ODc1
-NDM1Njo5NTc0NjU1MjM2NjIzNDQ7Njc9NjM0NzczNDc0NDc3NzQ0NDY5Nzc5NDU3
-ODM5NDU1Nzk5MzQyMzEvMDY2NjU2NDU2NTY1NDg2Nz44NTc9Nzg3Nzc0NTQzODY3
-NzM0NTU2NjU3OTc5PTc2NTY2NjY1NjI1ODg1ODc1MjU0MjQ3OTc0NjIzNzc0NjQ2
-ODU1NjMzMzM0NjM0NjQ0NTU0NzIxNDU2NDUyMzQzMTEyNTMzNTU2NjY1Nzc2NTM0
-NDUzNjU6NDIzNjQ1NTU1ODY3NzUzNTQ0NTIyNDQyMDAzMjI0Nzc1NDI0NTIyMDE0
-NjQ2MzA1Njc0Nzc3Njc3OTk2Nzc3ODY1NjY3Nzg4NTY3NDg4NDc2NzU1Njg1Njk1
-NzY2Nzg4ODY0Njc2NTw1NTVAUTw5NzY1NzM0MTMzNTg3ODY3Nzg2Njk4OTc5OTo3
-ODgzMTY1NTY6OTk1NjQ2NTc2NDY5ODo7Njc1NDc5Nzg9OTo5PDs7ODY1ODY0ODc5
-Nzk0MzIzOTk3NTQ4MzY3Nzc2MzM0ODc0NzY0NDU3Njg3NzQ1NTM1NTMzNDM1NTU0
-NTU2Nzc1NDc3Nzc2NzQ1NTc5Nzg2NzY6NzY1ODY1NDU2OD04Rjs5OTc1NjQ2Nzo2
-ODY3Ojk5NzU5ODY2NzY2NjU3NjU2NzQ1Nzg5Nzg5NzU1NzY4Nzo6PDc7Ojo4Nzc6
-ODo8ODg3Nzg3Nzc3ODk7ODU5Ojo6OTg3ODY4Ojs5Njc2Nzc3Ojk+Ozs3Nzg6Ojo4
-ODg7Nzc6ODs5ODc6ODc2NzY5Ojk3OTk7OTg7OTg5OTo5ODg7OTc4Njc2ODg6OTc5
-Nzg7Nzg5Ozk8ODk4ODk8ODg4Nzg6ODg8OTg7PD07PDw7PDg4ODY2Nzk5OTs6PDg2
-NTg3Nzg6Nzc2Njk6Nzg3NjY3ODs7PDY2ODc1Njg5ODY3NTU2OTk1NTg5NzY1NDQ1
-NDQ3Mzk8NzQ1NjU3OTc6ODg9OT82NjY2Ozk7Ojc1NTg4ODc4Njg4Nzo2NzY1Nzc2
-Nzc5OjY2NDMzNTc5Ojc4Nzg5Ojk1Nzs6Ojg2NjY3ODg4NzU4ODg4Nzg5Nzg4OTc7
-PTk4Ojs8Ojo3OTs7OTg6OTk6OTo5Ozo7PT46Oz06Ozk4Njo6Ojk5PDg4Nzc3Ojo7
-PDw7Ozk6PTw6Ozo5Ozw7Ozo5Ozs9OkA9Nzk5PTw6OTo3OT09Ozw9Oj48Ojk6PDs8
-Ojs2Njc7NzY1NTY6ODk7PDo5ODc4ODg4OTk3OTs3OTo3ODg4OTk7Ozk2NzY7NTg4
-NTY5OTY2NTo4ODs3NzY3ODY5NzY1MzU1ODk5ODY3OTg2NzQ3Nzg6NzU2ODc2Njg1
-ODY3ODY4Njc3ODo7Nzs5Ozg7OTk3NjY3ODg4ODo7OTk6OTg7Ojc4OTg4OTg8PDg4
-ODo3Ojo4OTw7PDk6OztBPjk6PTs7Ojk6PUM+Pj4+PDg6Oj09Pz4+OTg6Pz88Ozk6
-OD89Pj5BPj5DQkM9QUFJmsHL1dre4uTm6OnqREQ/QD08OjxAPDk6Oz8/Pzk5Ojo7
-PT09PDxAPT49Ozw6Nzo3Nzo2Njg0NTY1OTY2Ojg5NzUzMzI0NDc1Ojg3NTI1Njc4
-Njc0NjY3NTU1NjU3NjI0NTk5NzM1NjM0NzM1ODUzMzM0MjIyMTE0MS8xMTI0NDQz
-ODg4Nzc0NTc6NjY3ODU5NzY3NTM0NjQ2NDQzMzQ5ODU3NTs7Nzo2NTc0MjU3MzU1
-ODg0NjM0MzQ1NDU2Njk4NjU4NjgzMjAxNDc2NjQzNjkzNDM1NDU2NTU2NTU2NDUy
-MjEyMzIxNDYyNDEyMjY1NTQ1NzM1NjQyNDQ2NTg2ODQ2NTQzNzg4OTc4NjExMzMz
-MTIxMTM3NTM0NTM0NzQzNDU0NTQyNDM0NjM0MS81NDQ3OjY5Ojo6OTo3OTQ2NDQ1
-NjY3ODg0NDc2NzY5OTc3NDY2NzQ0MzMzNTY3Nzg0NjQ0NDU2NTY3Nzc6OTczMzM2
-NzY1NjY2NzY3ODc2NjQ2NzU0NTc2ODw7NTU1NTc3Nzg3NjMzNjQ2NTc2Njc6Ozo4
-PDc3NzY2NTk4Ozw4Ojw3NjU2Nzc4ODg2NDc5NDM0NjU0NTU0NzM2ODc1NTk1MzY1
-NDQ2NzU4NjY4NDU2NzU1NzM0NDU0NDYyNTQ1NTc4NjY2NjY1Njc1Nzg3Nzk2NzY2
-Nzs3NTc5ODo5Ojg4OTo5NzY2NTY1OTY2Njg2Njg1Nzg4Nzk5ODg2NzU3Ojk5ODk4
-NjY9Ojc4NjU4ODc4Nzg5PDg6OTo8Ozo3OTc6Nzg7ODk5ODk+PTw7Ozc4Nzg2Nzg2
-Nzc3OTg5ODc3OTs3Nzk6OTs7PDg4ODY4NzY5ODc5Ojc5Ozg2NzY2PUQ4Njg8PDs7
-PDk5Ojk7ODk7OTo9NjU1NDY1NDc3Njg5Njg3PD87OTc6NzU3ODY6OTc4OTU5Nzg2
-Ojo6OTo6PDo4ODo4Njk7OTk2NTU5Nzo4NzY2Ozk5ODQ0NzY3OTo4OjY5OTo4OTY3
-OTc4NjY1NDU0ODo6PTQ2NjY2MzU2NTY0NTY6NzY1ODY2ODk5Ojs4ODg7ODY2NTo6
-Pjc3ODU1NTo2ODs4ODY1ODw+NzI0NTc3Ojg5Njg3OTc4Njc6Oj85OTk6Ojw6ODg4
-Ozs6OTk2ODY3NzY7ODs5Njc5ODk7Qzw5ODk5Ojk4NjY3Njc2NzY2Njc4ODk4Ojk6
-OTw7Ozk4OTo1Ojo8OTo5NjY4Ozc5PDo+Pj08OTo6Ojo8Ozs4OTw6ODk7Pjs6Ozo5
-NjY6PDw7Pzs+PTs+PT08Ozo5Ojo8Ozo5Nzg5NjY3ODc0Nzc1Nzc6NzY1NTU8NDU4
-Njk5NTU1Nzg1NTU2ODg3ODY1OTY2Nzc3Nzk5ODY5NzY2Njg3NTM1Nzc4MjY3Nzc0
-NTY4ODc5ODo2NTMzNDY1NDc2NTc3Mzc4Njc4ODY3Qj85OTg2ODg5Ozg5OTc4ODc4
-OTs6Ojo5Njc5Ozk3OTo3NzU5OjU3OTg6Njg6OTc9Pz46Ozs7PUZJPjs8PkBDPTo6
-Oj89PTo+PT07QT88PDtBPj1DPzs8OjxAPUFBPkFAQUNDPz5CRFCfwMvV29/i5Obo
-6upFQkM9Pj0+PD5BPDk6Oz1AODw5Ojs4Oj08PDxDPjo7PDk6Ojs3OTc3Ojg0MzY5
-NTw1ODc4NTw2NDg6Ozg4Nzg1MzQ2Njs6NjU2NTc1NDQ0NTQ1NjY3NjY3NzQ0NjE1
-NDY2NTM0NTY1MzU2NjUyMzc0NjQ0MjEzMzU2NjQzNDY2ODY4NjQ0NTY0NjM0NTMz
-MjQzNDc1MzM0MTQ2Ojk1NTYzMzM1NDY1NDQ4MzMzNjQ3OTY2NDQ2NDQzNDEzMzEz
-NjU1NDQzNjM2NDc2NjQ1NTUyNjc1NTQ0NTI0NTQxMjMzNTY1NDM1NTU1NzQzNDQz
-NTU1NDM1NDIyNDU0MTI0Nzc1MzExNTAxMTAwNDMyMzU4NDMzNjU2MzU0MTI0Mzk3
-NDY1NDY1Nzg1Njg3NzU0Nj02NTQyMjQzNjg1MzM0NzY6OTYzNjc1ODY3NDQ2NTQ1
-NzY1Njg3ODY2NDY1MTM0MzUzNDI6MzQ0NDM0NjQ0Nzg3OTc3Njo3NTc4Njc3NjY5
-OTY3Njc3OTg3MjY0NDI2NDU5OTk5PDo6Ojk1NDY3NjY3ODU1ODg4ODc4ODo4Njc1
-NDQ2MzIzMzY1NTY6ODU2Njg2ODQzNjI0Ojg4ODY3OTk1NTc4NjU1MzM1NDIzNDc2
-NTY4OTc4NTY1NDQ0NTg5NTk1Njk3NzU2Ojo2Njk3Njg4OTg5Njk4ODg4OTw1Nzo5
-Nzk4ODY2ODY5ODk6NjY2Njc5ODk4ODo5Pjg6Ojk4ODg5Njo4Ozk6OTs6Nzg5Nzk4
-OTw4OTo5Nzw7OTk5ODk7Ozo4Ojs8Ojo3Ojo6Ozo4Ojo5Ojc4Ozc4Ozo5Nzc2OTk4
-Ojk4ODc3ODk6OjY5OTc5OTg4Njg6PDk5OTg4Ozs3OTY4Njo7Ojc3NzU1Nzc2ODc3
-Njk5ODg3OTo7Njc5ODk7PTk2Njk4Ojs4Njc7OTk6Ojs4Njk6PTs8ODc1Nzk4Nzw4
-Njk5ODg2OTo1Mzc3Nzk5ODo5OTw6NjU1OTc2NzY3NTYzNTk4OTU3ODc5NTY2Njg5
-OTk3NTQ0Nzg3OTk6OTo4PUg9Njc2OTo4NzY0Njg3OTo4Ojk4OTo2NTg5ODY3Nzk6
-PTc3Njc1Nzg0NDQ+Rzo7Ozw5PDo4Nzk4ODc5Ojc3ODg3OTo6Oz46Nzg3ODlDPDk3
-ODk3ODo2Nzk2Mzc5OTs4NjU1ODc3Nzk7PDo4Ozg5O0A5Nzc6Ojo5OTo6OTg7PEE8
-PD49PTk4Ojk5Ojs9Ojs8Ojo5OT08PDo7Ojg5PT4+PDg6PDs+PDs6Ojk2Ojg4OTk5
-ODg4Nzk5ODY2Njc5NjQ3Nzg4ODg2ODg7Nzc3ODk2ODg3NzU3OTo7Ozg5ODg4Nzc4
-ODg6QDk1NzY2NjA1NzY3Nzc1Njc3ODs4Njg6ODY4OTc2NzY5Nzk8ODY2Njc3ODo5
-NDU5Ozk8QEE8QDo5NzU4ODg5NjY3ODo5Ojg3ODc2NTc3Njg6ODg2OTc6ODg3ODk6
-Ozw8PDo6Ojc4ODg4OUBHRTw8QT49PD8+PDs9Oj5BOzs+PD08PDw7QD0+PT8/PTs/
-QD89Pz5CQEJAQENKWKHBy9Xa3+Hj5+jp6UNAQUNCQDw4PkRCPz46OTo8PDw4Njc4
-OTpAPkE+PDc3PTg7OTo3NDc3OjYyNTg0NjQyMzQ0NTY2NTU2NDU1NTU3ODY5ODs6
-PD84ODQzNDM1NDUzMzQ1NTY3NTIzMjU1MDE0MzQyNTM1NTQ2NDU1NjY4OTk3OTQ0
-NDMzMjQ6NTc4OTg2NDU2NDQ1NDI0MzI1MzU2MzQ1MzQ3NDM3ODU0MjQyNjYzNTQ2
-OTY0MjQ1NDI0MjIzMjU2ODk0NzM1NDU2Njc3MjY3NDU0MTQ0MzQ1NDU0MTU1MzQ0
-ODc2NDIyMzI1NjYzNTUxMjMwMzQzNDQ0MzMyMzQ1NDQzNDQyNDU4OTY1MzIzNTMx
-MDc1ODg1MzM0MjQzMzQ2Nzg4NTM5OTY2MzUzMzQ3NzM0Ojc1NDQ0MzY2NDI4ODg0
-NjY0MzQ1NTc2ODMzNTc4ODczNTc5NTQyNTY2MzM3NzU2Nzk1ODU1MzU3NTQ2NTQ1
-MzU1MzY2Njc3NTY4NzY3NTk1NjY3NTk4ODQzOTg4NjU2NzY3NDc1Nzg5OTo6Ozo6
-ODg4ODY2NDU2NzY0MjM2NTg5NjY1NTUyNTM0MTQ3Nzg3NDU2NjY1MjU1ODY1NTU1
-Njg2NjU1NjczMjM2NTI2Njc0NzQxMjM2NjY2Njg2NjU2NDg3ODQ3ODg3ODc5OTY3
-NjU0NjY2NTU3Ojo2NzU2MzUzMzY4Ojg4Ojw4ODo4ODg3NzY0NTc4NjY2Nzg4ODg5
-Ojo4ODo8Nzc3Njo4OTw6ODo3NjY1NDc5OTs5Nzk9Ozk3Njk7NTg2ODo5Ojk2ODo6
-Ozs3Nzc4PDk4ODk3Nzk5NzU1ODc2NzY0Nzk8PTs4Ojo5OTk5Nzg3NTY3OTo6OTg6
-OTg5OTc5Nzc5OTg4PDs7PDk6PTo5NTg6ODg5ODU4PTk3NjU4ODY4NTc4NjU2NjU1
-Nzc4NTg5Ojo3OTk6Nzk3Njc3OTg4NTU2OTo5ODc2NjY4NzY4ODY3Nzc4Ojc1Njg4
-Njg3Nzc2NTY0NzY5OTg3OTo1NDY2Njc2ODc0Njc4Nzg3Nzg3Nzc5PTw3ODk4ODg4
-OTs6OTg6Oz05ODg4OTs4ODg2Nzc3ODk3NzUzNjg3NjU1Mz07MzY3Nzc6Ojk3NjVA
-OTg1Nzc1NTc4Nzc2OTo5PTs5Njc4Ojo3NzY4Ozg4NjU1NTg5PDo4Njk5Ozc3Njg4
-ODg7Ozs8Ojs6Ojk4OTo8Ojk6ODg6Ojk6Ojo9Ojw5Ozw6OTs7Pj05Ojg5Ojw/PTs+
-QTs5Ozs8Pj06OTk6Ojg4OTs3OTg5Ojs5Ojs6Njc3Njk6OTc6Nzc3Njg8OTo5ODY4
-OTc1NTM0ODk9PDk6ODg4NTQ5NTk3Ojs6ODo6Nzc4NjY1ODc2Nzg3Njg5Nzo4Nzg1
-NzY3NzUzNDc3ODg6NzY4NjU0Nzk4ODg4NjU2Nzc1NDc/Pjk3ODk4OTc0NjY3OTk3
-Ojo3Nzk4OTg5PDo5ODc6ODY5Ojo3OTo7OzxAPjs5Nzo4Nzo8Ozo9PD07OT49OTtA
-Pjs9PkI9PTs7Oz08PD09Pjo8PDw8Pj0+QkE9QD9CQz5FREddmb/M1Nne4eTm5+nr
-Qz8/QUFBQURCPENEQkI/PT0/Ojo3NDk6Ojo4PDw6Nzs6ODY1NTYzMzY0Nzc2NjU2
-NTU0NDc6Njs6NjE1NTQ2MjQ1NDQ1Nzg2OTc2NS8vMjM4NDUyNjQ0MzU1NTY2NjQz
-MzM0NjM0NDI0MDY3NzQ2Nzo1Njc2ODQ1MTQ3NDQ0MzM1NzU1ODM1NTI1NTIyMzUw
-NDM1NDg2NTM0NjY0NTMyMjAyMzU0NDYzNDc1NDEzNTc0MzM1NDY1MjU0NDU2NTY2
-Njc6OTQ0NTQ2MjI0NTQyMzQ5NzY0MzY4ODY0NTY0NTQ0NDY1NTQzMTIuNDY0MzQ1
-OzQ1MzU3NDYzNTQ8ODk3NTM0MjIzMzM2Njg4MzMyMzEzMTU2NzY2ODc1NDY1NDY2
-NjgxMDMzNTQxNDQ2NjUzMzQ0NTY0NzU3Nzk9NTY4Njg1NTY2Njc3ODo1Nzk4Njc0
-MjM0NjY4Nzg1OTQ0Nzc3MzMyODQxMzU0NTQ2NTk3NjQ7NTM0NzU1Nzg2NzU2Ojw6
-OTU1NDUyNTY3ODc5NTk5PDc2ODg3Ojg8Ojo7ODU2NjU2NjU5NTYzMzE0NTY1MzU2
-ODg5ODQ2NTc1NTY2Njg4ODU2Ojk5NzU5Nzk4Nzc4NTIzMzIzNDc3ODU1NzU1MzU2
-OTc0NzczNTY3NjM2NzhCPDg3ODo6OTg2NDU2NjY3Njc5NTIzNDk5Nzc2OTk4ODk3
-OTk7ODc4Njc3NTg5ODc5ODc4Ojk2Nzc4Nzc4OjY4ODc6ODc3Nzg2Nzc3NzY3OTo6
-ODc7Ozs5OTg5Nzg2NDQ1NTQ2Njc6Ojo3NDU1NTY5NTo5Nzc6OTk6OTo6OTg3NjQ4
-Ojs9PDo3OTg6Ozw8OzU5NzU3OTo5ODg4Njo5Nzc4Ojg5Ojg4ODg6Ojg5OTc4NzU1
-NTQ2Njg4NTY2Njc3ODc7OTo4ODc0Njc4ODg2Njk4NjU0ODU2Njg5NzU3ODo5OTw2
-NzY1NTU1NTU1NjU2PDs5Njo1NTc3NzU1NzY3NTUyNTU0Njg2NTQ1Njc2Ojc1MzM2
-NTg3NTg3Nzs4ODo7OTg2Ojo3ODo3OTc3NDc6NzQ3OTY3NTo5Ojg5OTk2MzY3Nzc3
-NzY4NjQ3Nzc2NjUzNTk4NTc1Nzg6NDM3Nzg3NTY2Nzg5OTg3OTk2ODc2ODc4ODc2
-Njo4Nzk5OTc5Ojs6Pj08Ojk5Ojw4Njc4Oz08Ojc4ODo6Ozo4ODc3Nzo5Nzg2Ozo5
-Ojo9OTw7PDo6Oz89Oj06Ojw+OTo7PD46PT09Ozk8Ozs6PDs6Ojg7Oj85OTw9Ozg3
-OTk5OTo5Nzk3Nzc4NzY4OTk4Njk2Ojo7Ojo3NjQ4OTo8Ojg4ODk8OTc4NDk5ODo3
-MzY3ODc5ODg5NTk4Nzk3OTY2ODo7Ozk3NTU1MjMzNjU2NDQzMzQ1ODY0NDUzMjQ3
-NzY1NjU2NjY3Nzk4Njg1Njg2Njk6Ojk5ODk4Ojk3Nzg3Nzg3NTc4OTo6OTY6OTs7
-OTo7PDo5Ojk4OT09Ozo6Ojw9QD4/Pzs9Pj8/PUNCPj4+Pzs8PTw9PkBEPUFAQj0+
-P0FBQkA/Q0U/QUybwMvU2d3j4+bo6etBPTw7Q0E+PD09PUJGREM9Ojk6OjY2OTc5
-Oz44NzY7Ozk1MzM1NzYxODg1Njg1NTQ2NTc4Nzc3NTU5Ojo6NzQ0NjY2NTU3NTU8
-NjU2NTM1NTY4MzY1MjYyNjY4ODk5NzUzNDIyOTQxMTE2NzY4NzMzMjMyNjI0Nzc0
-NjU4NTIxMTIzNTY1NTU1NTc6Mzc3NTUyNDU2Njc4NDYxMzU2NTIzNjk2NTQ0NDY0
-NTUzMzEzMTU0NDU0MjY5ODU1Nzg0MjQ1NDU1MzU3NzU3NDc2NjYzNDY1MzI1ODMy
-Njg1MjQ0NTU1NDMzNjU1ODYyPDc0NDQ3NjY2ODQ2NTg1NTc4Njc2ODY2MzM0NTY3
-NDQ0NDUzMjY1NjQ3NjY4NzU2NTY0NTU3NjQ0NDU1NDU0ODg2OzY2MzYzNjc3ODg2
-NDg3NzU4NjY3NjU2NTQ3NTg4NzU4OTc1NTg3ODc2NDQ2ODg/OTY2OTk2NDU1NDY5
-NzQ3NzY4OTk4OTc5Ojc2NDc3NjY5Njc2NjQzNDQ0NzY3OTo4NTY2NTY5NzU0ODo5
-Nzs3Nzc1Nzk5Ojc4NDQ1NjEzNTQ1NjUyMjQ3NDQ1ODc3ODg4ODc0NTQ4ODU3NTc7
-ODo2NjY0NDQzMzQyNjg3Ojc4OTM1NTQ1NDQzMjc3NTM2NzY2Nzc7NjU4Nzg3ODY4
-OTk4ODY4NzQ3ODU0NDc6PDo5PDc4ODc5ODc3Njc3NjU4Ojw7NzY4NzY1OTg2ODY4
-Nzc3Ojk3ODY2Nzk3Njc3Njg5Nzk3NzY2OTc5Ozk8OTg2NjU2ODQ2OzU3OTY4Nzo7
-ODU0NjY3Njc3NTc5OTg5OTo2NDU4Nzk2NTw8OTg6Ojw8ODg4Ozs4OjY1NTI4OTk2
-NDc7Oj04Njk5ODU1NTU1Nzc2OTo3ODY3NTY3ODc1OTs2NDU6NjY7Ozo5Njg4Nzc3
-NzY2NjM2ODs1NjU0NDc3NzY3NzY4OTY5NjU0NDY0Njk1NjQ3Njc9OTg3Njc0NjU1
-NDQ2NDg4NTU2NjMzNTQyNDU2Njc7ODc2Rzw4NjQ3NTY5Nzo8ODk4Ojk3OTc1NTw7
-ODo5OTY4Ojg3Njc7Ozk3NzQ5NjY6ODU3NzM3NzI3NjY5NTI1ODY2MjY2NTQ2NjY3
-ODg6ODc5ODY6PDo7OTg3Njg5Njc5Ozk4ODk4ODo9Ozk3Ojs9Ojo8PDs9Pzw6Ojw5
-ODg5OTw8Ojo6Nzg4Ojg2OTs6OTg6Ozw/PT5BQEA6Ojw9PDs7Ozs9Ojw6Ojs7PDk4
-ODk7Oz09PD06ODk9Pzs8Oj88OTo7PDw4Ojs4Njk6ODo2OTk4ODk4OTo3Nzk9Oz47
-Ozs5ODU3ODk4ODg2ODo6OTU2NjY3NzY3Nzc0NTU3Nzc5OTc4NTU2Njc7Ojo4Ozk3
-ODg5NTY3NjY3ODg8Nzg2Njg1NTg7ODo5ODg4Njg3Njc4NjY0NTY6OTg7NTU2OTg7
-OTk4ODg7NzU3Ojs7OTs5NzU5Nzo5Ojs7PDw6Nzg4PDw/PDw9Ojs7Oj0+Pzw6Ojs8
-Ozk9Qj8+Pz88PTo5PD08O0I9QERER0NAPz1AQkRFQUI/TJ/AzNTa3+Lk6Ojp6jw7
-Ozs7P0NDOzo6PUE5Oz47OztAPDs7Ozo9Ojg6PTo6PDU0NTY1NzgzNTU0Ojc6NjQz
-NTU5NDU0OTg6Nzc6NjczMjQ1NDU0NDc2ODo2OTk2NTEyMzY2NzU0NTg5OTk4NTUy
-Njg3NzIyMTM3MzQ5NDQ0MzQ0MzU3Mzc1Njs3NjQzNTg3NjQ1Nzc2NjY3NDY4MjIz
-NjM2NTc1ODQ1MzU1NjU3OTk1NDU1NTQ0MzM0MjMxMDM2MzI0MzM2OTc4NjY1NTc4
-NzQzNDU4NjQ1NzU0MzU0ODE0MzAvNjM0NzM0NDAwNDU0MzI0NTZCOjQyNzU3NjY1
-MzY2NDY2NTc3MTQ0Nzc3Nzg1NTM1MzIyNDM2NTQ4NzUzNTc0MjU2NTY0MzM1OT09
-MjM5MzQ1Nzc2NjU1NjcyNDUzNDU0NzU1NTk4OzQ0NzQzNTg0NDI0Njc5NjYzNTUz
-NjU0NjU1NTU4Ojs4Nzg3NzQzMzM1NDQ1NzU1NDMzMzQ4ODk2NTI1NjY1NzQ1NTQ1
-NjY2NjI3OjY1Ojg2NDM3ODk4NTg3ODg4Ojk4ODg4OTs8Nzc1NTI1NDEzODc1MzUy
-ODY1NDQ1Njg3MjQ3NjIzMzY5OTk5ODo3NTo3NDI0NDUzMzU3NTY1NjY0NTU1MzQ0
-OjM1NDg0Nzc2ODk2NDY4Nzc4Nz01Nzg6NzYyMjY2NjUzMzY5ODg5Ojo6Ozc4Nzg2
-OTs7Ojg3NjY4NTc4NTU4PDk5NTY2Njc4NTk4NzU1NTc1NTY1NzY4NjY4NzY2NzU2
-ODg3Mzg7ODY2NTM0Nzk2ODg4Nzc3Njc2Nzg3Njc3Nzg5ODo5ODY3Nzg2Nzc4NzY6
-PTo4OTc6Ojw6ODg5Nzg9OTs8PTo7ODo4Ojs7Ozg6Nzk4OjY4ODY3Njg1ODk4OTc7
-OTY2Njc5Nzg8OzU3OTo4OT0/Ojg1Njg4NjU2NzU4ODg0MzQ0NzY2Nzk6OTU1NzY0
-NTU0NTg2NjY1Nzc2ODo2NTQ4ODc5ODk3NjY3ODk5ODU2NjM0NTU1MzU2NjY2ODk4
-NzI1NjY2NzQ6Ozg4Njg+OTY4OTg4PDk6OTc4NzQ3ODg2NDg6Ojc4ODg3Nzo3Nzo2
-Ojk8OjY3NTY1Nzc1NDY1NDY2Nzg6Nzk3Nzc1NTk9OTk2Nzo2OTc2NjY4NDc2Nzo6
-Oj85Njs7Njg4Nzg6OT04OTk7PD0+OTg5Ojo5Ozs6Ojc5ODU2NjY4ODc3Oj06Ozs8
-Pj8/PD47PDs6Ojw7Pjs7Ozw9PD08PT0+PTo6Ojs9PDs8Oz09PD09PDs5Ojo5OD06
-Ojg6Ojs4Ojk4OTg4ODk5Ojg3ODk8Ojk5Ozg1NTU4NzQ5ODY1Njk6Ojc1NTY1NTk4
-OjY3OTc2ODs6ODc4ODc3NDo4NTY3Njk5Ojo6OTo4NjY2OTg5NjU4NjY4Nzo7PDc0
-Nzc1Nzc1NzU1NDY3ODg6Ozg2NTQzODo6PTs4ODo6Ojw7OTg4ODg3ODk7Ozw+OTs/
-P0E7ODo+QTw6OUA+PT08PT8/PD09Ojs9Pjs9P0A/Pj07PkE9OjpAPUFAQkI/Pj09
-Q0JCRUJFRT9OocHL1Nnf4eTn5+jqPkE9QEBBQD4+PT9BPzk8Pzs6Ozs7Ozk7OTg5
-Ozk+Ojc5NDU1NTg5Oj01NTk4NzU2Nzg6ODQ1Njg4Nzs4Ozs1ODU0Njg3NzYzMjU1
-NDIzOjs0MzY0OTo2Ojg5Ojk4Nzg2MTQ5Njc0MTI2MzQzNTY5NjMzNTU2NTo7NTQy
-Nzc4NDM1OTg2MzI3NTU0MzQzMjMzMjIyNjQzNDo4NjU4NTc6PDs3NDUzMjM2MjI4
-MzIzMzIyMjE0MjIyNTQ1NTY2NDU1MjQ1MzY1ODY2NzQyMjM1Mzg7NTI1NDQzMTE0
-MzIxMTczMzEzMjU6NTQ2ODk1NDQ0NTU0MTQ1NTY1NzM0MzE1MzM4OTQzNTMyNjQ1
-ODc3NjY0MzQzMzU3MzU3Nzg0MzI3QzYzNDQ2MzY0NTQ2NTQzNjo4Njg1MzY0MzM3
-NjQ2OTc3NDg1OTc2NTQ1NjU3Nzo1NjM1Nzc1NjQzNjg3OTo3ODY0NDI0Njk1Njg5
-NjU0MjM2Nzg3OTk2NTM2NjU2NTQ3Njg4Njc5OTk4ODc3NTIzNTIyNjY3Nzg1NTg6
-ODk5Ojc4NTU0NDU3MzMyNTg2OTU1NjY1Nzg5NjQ1NTY1MjQ0NDU3NjY4Ojk4ODg3
-NjUyNDc5NDY2NjM0MzU1NzQ1NTUzMjE0NDYyMzc4OTg2NTM2Njg4NTU2Nzc4NTU7
-Ojs3Mzc3Nzg2PDs5ODg5NzY4NzY2NTk3ODo5ODY3NTg1NzY3OTc4ODo4NjU2MzY7
-ODc0NDU3ODg1Njg1OTg5ODc3NjY1NTU3NjY3NDQ2OT48NzY2NjYzNTg2NjY5NTY4
-ODs7PTg3NjY5NTY6OTU1NzY1Ojw6Ozo6Nzc5OTk3Nzg5OTo8Ojo2OT86PTg5Ozg3
-Ojc2Ozo4ODo6OTo1Njg5OTo5Nzk6Nzo5ODU4NzY4ODg3Nzc4ODg3OEA9Nzc4OTg4
-ODg3NzU3NTQ0NTU0Nzg2Njc4Ojg3NTU3NTU1NDU1Nzs2NDY3NTM1ODc1Njk6OzY2
-Njc4OTc3ODc2NjQzNzk2ODMzNDc0NTc5ODQ0NTU3ODo3ODg5PTo3OTU1NTo4Ojc3
-NTU3Nzc3Njc2NTQ1ODg3ODg4OTg3ODw6Ojc2OTs4NDY3NTc3ODY2NTc3NjY2Nzg3
-ODc1NzY6OTg2Ozc3NzY3ODc5NTg6OTg6OTo6OTc4Nzw5Nzg3Njc6Ojo5OTk2Ojo4
-Ojo6Ozs7Pzk5Nzk3Ojg2Njg5OjpAOzs6PDo8Ojk6Ozo6Ozo8Ozk8PDw8Ojs6PT4/
-Pj47Pzw6Ozw/Ojg5Ojo6Ozo6Ojs6ODg5Ojo6OTo6Ojc3Njc2ODc2NzU6PDk3Nzk4
-ODY2Njc4NjY3ODY0Nzk4NzQ0NTU0Njk4Nzc1NTY5Njc4Nzg5ODc4PTc4NDU1Njg4
-Nzc7NzY0NTY0Njg5Ojw7Ojc6Ozg1Njc1Njo3Nzk4Njc3ODk4ODc5OTw4ODo4ODc2
-Njc5Ojk7PDk3Ozk3Ojk9Ojk6Mzg6Ojw8RDs8Pjk4Nzw8PD89Ozk4Ozw/Pz8+PD1C
-QT0+PD08PDs9Pjs6Ojs/QT89QUI7OkA9PD88PENCQ1CfwcvV29/i5Ofn6uo9PT5B
-O0NAP0A9Oz9BPz06Ozs9PTs8PDg4NTs2NDY6ODg5Nzk2NTo6ODU3Njk2NzQ3ODY1
-MzQzNzg9Pjg2OTk4MzQzNzg2NTQ1NzM0Ojw5NTY1NTY4ODY1NjY4Nzo0NDY4NTQ8
-MzM0MjM1NjQ1NTY4ODc2NDQ1MzU1NjYzNDU4NjY4NTY2NTY4NzU2MjY0MDM2NTc2
-NDY2NTQ0NDI1MjQ1NjU3NjYzMjQ0MzMyMjA0MjQ0NDY3NTU0MS8wMjMzMzUzMzU2
-NzU0NDQ1NTQyMzMyMTAzNTY1OTIvLzI2MTI1MjQzMjMzMzIzMzUyMzA1Njc3NjQy
-NDU1NDYzNDc2NTQ2NDUzNTUyMTA0NTU2NTQ2NDY2NjY2NTc2NTY0NTQ2NTQ0NTQz
-NTg2MzY4ODc3MzY2NDc3NDY2Njc3NTY4NTU5NzY2ODg3NTY1NjE2MzU2MzA0NDU1
-OTc1Njc2NjQ3NTQ1NDU1OTU1MzM3ODk2ODU3NzY0NDY6NjY7Ozg1NTQ1OzQ1ODs5
-NDU4OTU1NzQzNTY3NjM1NTU5Njc3Njc2ODo1NjU0NzY1NDE1NjY0NzU7NTY4NjY0
-MjQvMTE0NTYyNDM3NTI0Nzc5NjIyMjYzMzQ2NzY1NjY1NjU1NDY5Ozk5NTQ0NTI4
-NjY0NTQ4NzY2ODY2Nzc5NjY1Njg2Ojo3Nzc4Sjg2NDo5OTs4ODU5Ozg3Ojs7ODg4
-ODg5OTo6NjY1ODg3Nzk2Nzk1NjY3NTU2MzM3Njk0ODc5Njc1OTg2Nzo4ODg6ODk2
-NTY7OTY4ODg4OTU2ODc1Nzc2NTc5Nzc2OTo3Nzk4OTg6ODc2ODY4Ozk3NzU6Ojs5
-ODc3OTk5Ozo7OTc1Nzg4ODo2OTk1NTY2Nzg5Njg1Nzk7Nzk2NTU3Njc3Nzc3NjU2
-OTk2NTYzNDg3NzY3NzY2NjQ2ODc3ODk3Nzs4NjU0NTQ3NTc2ODU2Njc3NTU1NDg3
-ODU3ODg1NzU3NzY0MzY2ODU3NTQ3NDY2NTg2NzY5OTk2NDQ0NjQ1NjM2NTY5NTU0
-NDU6ODc3NzU4Njw9NjY5OjU6OjU2NDQ4NzY1NjIzODY1Ojo2ODU3OTc0NDYzNTU3
-NzY3ODc6Nzc1NTk2Nzc4OTY1Nzo4Nzc0Nzg0NDU2ODc4ODg3Nzg+ODg2ODk8Ozc3
-PDw4ODo5OTo8OTY5PUU5OTc3Njk6Ojk4PDo5OTs3Nzk6Ojg5Nzc6Oz4+PTo5Ozo5
-Ozk6PT0/Ojk7Oj08Ojs7Ozo6Ojw/Ojs8PDs5Ojk7Ozo5Ojg5Ojo8Ojg8Pjw+OTU5
-OTo4OTg3ODk4ODg1NDY5Ojg4OTc3ODk2NjY1NTM3NTU4Ojk3Nzk8OTY1NzY1Mzc3
-NTY3NjY1Njk5OTY0NDc1Njk4NzU2OTY2NjY1NTk4OTo3NDU3Nzc4NTQ0NDI1NDc2
-Njg4OTU1NTc2ODo7Ozo7Pzk4Ozk3MzU2Nzw6ODc4Ojw6Ozk6ODs6ODc7Nzs8PTs4
-Oj06ODo7Ojs9Pzw/PTs4Ozs+PkA9PDo9Ozw+PD08Ojs5Ojo+QD5APzo7PUBCQT07
-PkBDQTxBUJ7CzNTa3+Pm5ujo6z07Oz46PEE9PDw8OjxART4+Nzo7PkI+Nzg4OTg3
-NzY0Ojk7OT07OTk4NTU2Njc2NzU2Njg3NDM7NjM1NjY4OjY2Nzc1NTY1MzIzNTQ3
-PTs1Njk4NTc3NDM1Njk6MzU2Nzc3Njc1MjM3OTg1NTcyMzQ2NTE0MzM1NjY5NDYy
-NTU2NTM3ODg3NTMzNDQ2NTQ0NTc3NTM1NTczNjUzNDM0NTY1NTY2ODU0Nzk1NDMv
-MzI0NDM0MzIzNDI0NjUyMzQ1NDU1MzQzMDEzNTYzNDg0NDY0NTQ9Njc4OTQ0MzY2
-NzU1MjU0NTM0NTQ1NzUzMzYzNzg2NTE4NTMyMTM1MzIyNDQ1NDUyMDEyMzM0MzMy
-MTQ0NzM1NTIxMDU1MzQ0NT01NTc2NzY2OTc4NTU1OTc5Ojc7NjU1NTY3NTY0NTU1
-NDU3NzY5Njc2ODY3NjQyMzw1NDY4Njc1NTc2NDM0NTc0ODQ4OTo4Nzo3ODU3ODk3
-ODc2NjY1NDQ6OT85Nzc3NjQ0NDU1NDY1NDU1Ojk3Njk5Njg4NTU1NjQ2OTk3OjY2
-ODg4NTY3OTc0NjUzNTc3MzI6OTg2NDIzMzQ3ODM0NjY1NTM1Nzg2NTU1NTY0NDMz
-ODQ1OTU2NjQ1NzQ1NTU3NTU1NDU0NDc5NTU1NTQ0NDM1ODQzNjQ1NTU4Njo4NjY1
-NTc+Njc8Ojo5Ojk6ODY1NjY6OTo4ODU2Njg4NzI0NTc8Ozg3Nzg5Njg6OTc2NDU4
-OTg3OTg3Nzc1NDc2Nzk2OTk4ODc6NjU4ODc4OjczNTg3Nzc3Ojk5OTk4Ozo4Nzk3
-Ojo6Ojc0PDs5NTY3OTk2ODY6Ojo/Ozk5PDs5Ozg3ODhBODg2Ojc4OTs6NjY3ODY4
-OTY4ODs4ODk6OTc2NTc0Njc3NjY1ODc4Ozs6NTY4ODY3Nzc2NTMzNTU1NDU5OTY3
-ODc3Njc1NzU0Njo6Nzc5NzQ0ODg7OTc2Nzc2Njk3NTQzNDY2NTU2NTY3NzY3Njg2
-NTU4NjY3Njg5NzY2NTc6ODc4Njc3ODs7Ojc6Nzc3ODc5ODg5OTk4NTg5OTY0MzM0
-NDY5OTk7Ojo4Nzc0NDU1NTI1NDQ2Nzc1Njk3NzY3NTY1NTc2NTY3NTc5ODQ1NTM0
-Nzc5Njk5Nzk6Njc2OTY4Oj06ODc6Njk7Ojg6PDo3ODk6Ozk5Ojg2NzU0ODk3ODg5
-Njg5NjU4Nzs6PDw9OTw8Ozk7PDo7PDk3Ozo8PDw8Oz0/Ojk7Ozo4OTs6OTo6OTo7
-Ozk4Ojo4OTk5ODs6ODc4Ozo6Ojw6ODc4ODc3Njc5ODk4ODQ4NTg3ODg4Ojk4Ozw5
-ODk2NjU3ODY6Nzc4Nzk3NTY2Ojg4Nzg4NjY2NTg2NjU0NTU1NDM3NzY4Nzg2Njk4
-Njk4Ozo4NzY3NjY2NDU4Ojg6ODk4NTY1NTY1NzY3Njk4ODc3ODs8Ojg5OTg7OTk5
-Nzo6Ozk5Oj08Ojg5PDw6Oj4+OTk5PTo6PDo6Ojo4Ozo4PDtAPzw9Oz07O0BAPT48
-Oz07PDs8Ozc4Pj49PDw8Pjw9PkNJSENAQj9CP0RVn8HM1trg4uXn6OrqQUA+O0BB
-QD5CPTtDQkY/QDw3Nzk8PTk2OTk5ODo3NTg3NjU3ODY1Ojw7ODc1NDY3ODk5NjQ0
-NTU1Njg9ODc6OTg6Nzc0NDY2NzUyODpENTU2NDMzNDc0MzI0MzM1NDQyNjY0NTQ2
-NDEzNDc2Njk2NjU2NTQzMjQ1NDQ2NTg3Njc3Nzg3NjU3ODM1NTQ1NTk2NzQ1Mzc1
-NDU1NTMzNTQ1NjQ1NDQ2NDQ0NDU1ODY1NTM1NzUzMjQzNTQ2NDMzMzM0NDYxMjY2
-Mzc1NTU1NDQ0NDMzNEk4NDQ0NDc8ODcxMjYyMTEzNDMzNDc0MjI1Mzo6NjY5NzY1
-MjM1NjUvMTYyMjY1NDUzNjQxMzQzNDI0NTgzMzIxMzQ1NTU2NjQzNTQ2NzQ2NzY3
-Ozg0MjQ1Mzk3NjU0MjI1ODc0NTc1NDM0NTc1NDUyMjU0NTc2Nzg0Mzg7NzU2Njc1
-NjM1NjM2NTIyNjU7Ozg3NjY0NDU1MzQ5NzU2NTY3NjY5Njc0MTYxNDI0MzQ0NDUz
-NDM3NzY3NTY3Ozc4NDM3Nzc4OTk5Ojk3Njg1MzM3MjE0NTU1Njc0NTc0ODwzMTI1
-NjY2OTo2MzMyNDQzMzMyMzU4NTY3ODc5Ojg2ODQ3MzQ2Nzc5NTUyNjg4NzQ1NzY0
-Nj0zNTM1OzY2NjQ1NjQ1NUI7NTQ3NjM0NTU2ODY3ODc4ODU1NTQ2Nzg4NTc4ODg3
-Nzo7PDk4OjQ1Njg5Ozg6ODU0OTc2Mzc1ODg6OTY1Nzc4Nzg4Ojc3Nz08Ojk5OTk8
-ODc2NjY3ODc3ODg5Ojs6ODY3NzY4NjQ0NjY2Njg5ODs9ODc4ODk5OTg0NjY3OTg7
-ODg4OTc2NDQ6PDs+OjU4OTs8OTQ2NzQ1Njo3NzU1Njc3NTY2MzEzNTY1NTg4ODc3
-ODg0Nzg5NTc4ODY0MjY3MzI0Njc3Ojo4Njc3OTk3NjY4NjY2NTg4ODY1Njc1MTU1
-NTY1NDc4NjMzNjU1Nzc2NjY1NTU3NTU3NTc3Njc4OTk3ODk2NDQ3NjU0Njg6Ozk4
-Nzg3ODg8ODk8ODc4ODo8NjQ2NDY2NjU2NTY5NTs5Ojc2Ojo2NjMzNTU1ODY5NTU3
-Nzg0Nzk2NTE0NDEzNzk5OTk5OTg4ODc2ODc5Ozo5Nzg2ODo3Nzg5OTc4ODY4OTk4
-ODc4Ojo6PD08Ozc4ODg3Nzo2NjY4Ojk4OTg4Njc3OTg6OTg8PD09PDs9Ozo7Ozs7
-PDs8PTs6Ozo7PD06Ojs9OTY7Ojw9Pz07ODY6PTw6OTs7Ojo3Nzc4OTo4Njc5Njg4
-ODc4OTc1Njc5OTU4OTg3Nzk3Nzg7PTs4NTU1OTg3NjY3NjU2Njk6Nzg3NTc4NjQ0
-MzU3NjU3NTg7Ojc3OTk5NjU0Nzg2NTU2ODc4OTg8PDw7Njc2ODg2ODk3OTk3ODg5
-Njc7Nzc5Ojc2Ozk5Ojs6NzU3Nzc5Ojw7OTo2ODw8Ozw5ODs8Ozc4Oj07PDtBPjs4
-Mzg4ODg9Ojk7Oz09QUE/PDs9PDw8OTs6OTs8Pj88Oz46Oz09PD87OTo/QExHREVH
-REE/RU+bws7W29/j5ubo6epCQkJBQ0NEPjo7PkJCQDs/Pjo4OT06OD08Ozs6Oz89
-Ojs9Nzc3ODg1Ozs4NzY4NzY2PTk3NDQ3NDQ1ODg1Ojo4NjY1Mzg4NTY2NjM3Njc8
-NjUyMjI0MzUyNTg1ODc3NDUzNDUzNTQ3OTUzMTIzNzMxNTUzMjg5MjI0NjU1Mzc2
-NDQ1NDU4NjU2NzQ1NzQ0MzEyMDIyNDQyNDU1NDIwNDU0NzQyMDQ0MzExNDY4ODU0
-NDEyNDY3NTQ2NDM1NDU0MjIyMzMzNDU1NDQ1NTo+NC4zNTQ0ODg5NDM1NDU0NjM0
-MzIxMzMyNDY3ODQ0NDc2NDU1NTc4MzUyMzU5NzM1NDIzNjY2NTc2NTc1NDczMzY1
-ODIwNTYxMTI2Njg3NjY0MzU0ODU0Njc2OzY3NzU0ODQzNzY2NTY1ODY2Njc3Nzg1
-MjUzMjY3NDg2NzY3NDM2Nzs5ODk5NjY0MTY2OTY1OTY2NTU4Ojk5ODk5NTU1NTQy
-NTc5NjY3Nzc6NTU4ODk1NDA0MzU1NzU0NjU2NDY0NTg1NzczNjU6ODc3ODg3NTYy
-NTExNDQ1ODs2MzQ4NTU0NjYzOTI1Mzs2NTg6OTc1MTQzMzQ3Mzc3NjU2ODg2NDU3
-ODc3NjY0MzQ2NjU3Nzg3NTU1NDU1Njc0NDUxMjQ3NzQ1NjQ1Ozc2N0M2NTQ0MjAy
-MTU5OjY2NzU3NzU5NTU1ODU6Ojc5NjY2NTU3PTo5ODY3OTc4ODU0NzY2Nzg4ODo5
-OTU1NDU3ODk2Njg5OTk5Ojk3OTo6ODg4NTU1OTo3PDk5NTY2ODQ2ODg4Nzk6Ozg3
-NzQzNzU0NTo5Ojo5ODc2NTk2NTg6NTY3OTc5ODY0NDg4Nzk4OTY1Nzk6NTQ1ODc1
-NDY2Nzc3NjQzMzQ1NjU0NDYzOjw3ODc5Ozk3NTk6ODo3OTg5NjU4MzQ1Nzk5OTg3
-NTM2ODk5NzU2NDU4Nzg4NjY1NTQ4NTQ1NjU1NTU4NTY1ODY3Nzc2NTc2NDU3NzY5
-NjY1Nzg2ODU1Njc1NDY1ODc1OTg5ODs4ODc1NTQ2NDQzODg5PTs3Nzs8Ozc5NDQ0
-MjU8PDs4NzY0Nzc2NzU4NTM1NDQ3NzU2NjUxNjo2NTQyOTY0Nzo5ODk5OTg6Ojs6
-OTk5PDk3ODs8PDg4OTY5OTg3Nzk5Nzc5Ojg3OTo7OTo6Ojc5Ojo5OTg4Njc4ODc4
-NjU3ODk3ODU4Ojw3Ojo8Ojs7PDo5Ozo7Ozk6PTs6Ozo7Ozk5Ojo8OTY5PD9BPDk5
-Ojc4Ojk5ODo7Ojs7PDk3Ozg5Nzc3NDU3ODc4NjY3NzY4ODg4ODY2Ojo6Nzg3OTc5
-Ozc2NDY3ODg7ODc2Nzg2NzY4Nzc2NTg5Nzg3ODU4OTs6Ojs5NzY3Njc4Nzg4OTc1
-ODc3OTk5OTc1NTo5NTg7PTw6OTg4Nzc3Nzc4ODk4OTo2NjQ3ODg2NTY3Njk5Njk3
-Njc3ODo5ODs5OTc5ODk5Ojo7Ojk6Ojs8Nzc2ODo3ODo/QDw7PT5BPTs8Ojw9Ozo9
-Ojo7PT04QD07PD47PUA+PD1CQUNFQkZFQ0RJV6HCztXb4ePl5+jq6kJBPz0+PUFD
-QD89QUI/Pz08Pjs8OTo6Ojo4OTc5ODg6OTc6ODU2NzY1ODY0ODU3OTo1Mzc6Njk4
-ODk4ODY0NzU0NDc3ODY0NDIxMzE3NzczMzg5NjM0ODM0NTU3ODc0NzQzMTMyNDI0
-MzEyNDQyNjQ0OTYzMjU0Mzc1MzU3NTU0Mjc2NDU4OTg1NDU1NzY3NTIyMTExOjYz
-MjQxNC8xNDQ1NTUyMjc0MTMzMTI2NTQ1NTMyMzM1NDM1MzY9OzkyMzY0NjU4Nzg3
-MTU5OD0zNTQ0MzU1MzYzMzQzNDI1NDQ1MS8zNTY0NTMzNjU3MzM0Mzc0MjE1MjEz
-NDo8NjU3NjYzNTMzNDU4NjQzMzU3NzQ0OzY2NTQ1Njc2Njg2NjU0Njc5NjY0MzMz
-Njo4NDM1NDIzNTczNDU1NjUzNDk0NTM0MzU1NDc5NTg3NzMyNDU2Njc0NDQzNjQ1
-NDc2ODo3NTY2NTQ0ODo7Ozo4NzYyMjU0MzMyNTQ0NTc3Njo4NjY0ODc4Njc8OjQx
-MDM2NTc2NjMxMjc2Njc5ODo6NTQ1ODY1Nzg1NDc3NzQ0NDY1NDQzNjExMzU3NjYz
-NDU2Nzc2OTY4MzQzNDg3ODY1MzU4NTQyNjUyMjQ0NjY2Njc3NzU1NTU4OTU2NjM2
-MzU2NjU3Nzc1NjQ5NjY3PEQ3MTE0NTU2NDU1NzY3Nzg4Ozo3Ojo3ODY1NjY1Njc3
-OTk4NTY4NjU3OUI8Ozs6Ojs4OTg6NzU2NjY4Ojc3Ojg3Mzk1NTQ2ODc3Nzo3NzQ1
-NDY2NjQ5Ojg4NjY2NDMzNDY1Njk5ODc3Nzc2NzY1ODk6Nzk4NTg6OTc5OTk4NjQ2
-ODc4Njg4ODc3OTs3Nzg2ODc3Nzg5Ozc3NTg5OTozNzY6OTc2NDI1NjQ0Ojg0NDc3
-ODg3NzU4ODk4NzY1Nzk4MjY3Nzk2ODc0MzY2NjY2Njc5OTUzNTc4NzQzNDc4NTQ0
-NjY2NTQ2NzM0Nzc4Nzc1NDQzNjc6OTY0MzU2NTY6ODc2NDk3Nzo6OTc3Ojw9NzQ3
-OTc5NzU4Mzg6OTg5Ojk3Njc6OTc2NTY1ODk4Nzg5OTU3ODU3ODc3NDc4NjM0MTQy
-NjUzNTQ1NDMzNzg1ODs5Nzw4NTY7OTo4ODk7Ozs5ODs6Nzk6Ojk6OjY3NTY3NTc3
-NjY2OTk3ODg3ODc6OTw8ODo5NzU4Nzk2NzY2Ojg3Nzc5Oz03OTs8ODc3ODo7Ojg7
-Ozs9PD07PDw6OTo6Ozw9Pzo7PDw6ODk5Njk7Ozs6PDo6PD09PDk6OTY5OTc2NTYz
-NDY3NjY2Nzo7PTg3OTo5Njc4OTo6OTo5OTg4ODc1NjU2NDQ3ODc0NjU2Njg2Njk4
-Nzk5OTs5ODs6Nzo3Nzg1NzU2OTk4OTc5ODg4NzU4OjY2Nzc7PTk4NTo7Njc2NTY3
-Nzo5NDY5OTc4ODg4ODw3Nzk6Njc1Nzc9PDo5OTc4Ojk4ODg3Njk5PDs4Ozo5OTg5
-Njc3OTk6PDw8PT89Pz4/Pjs8Pj05Ojo5NTs/PT46Oz4+PT8+Pj09OzxDRUZLRktK
-S05ZpMLN19zg4ubn6OrqQT5BQUA9QUI+QT49Ozk9PD8/Pz46Nzo6OTc0Ojk3NTg5
-ODo3Nzg5Ojc4Ojc6ODY7OTc1ODg6Mzk2OTU1NzUzNTYxMzI1NzMzMTY3NzYyMTQ2
-ODc5NjU1NzU4ODQ5NDMxMzQxMjI2NTg1NDI0NDQ0NjU1ODg2MzU0NDYyNzk3NTQ5
-Njc4Njc5NzQzNTU1NDQ2ODU1MjQ1NDU0MzU2NjMyMjU2Njc0NTUyMTE2NT41ODQz
-NTQ2OTc1ODU0NTs5OzQ2ODk2MjQ0NDAvMDUyMjY2NTIzMjQ0NTE0NTQ4NzIzNjQz
-NTMyNTEzMjQyMzU1MzQ0MjE0MzI1NTU2Nzc2NTU3MTMyNTM1MzM1MzM1NDYyNDM1
-MzAyMzM0MzQ2Njc0NTU0NTg4NDI1NjQ2OTg4OjYzMjEzNjc4Njc2NTMvMzU3MjU3
-ODQ2NjQ3Ojc1NTQ0NTQzNTY1MTU1ODUzMjU1NjM2NDg0NTg5ODc2Njk5NTc0NTMz
-NTY0MzMzNDY4OTk6NjQ1ODg1Njc3Nzc1NTc4ODU0ODc7NzU3OjU5OTg4NjEzNjQ1
-MjM3MzQ0NDMzNDQ0NjM1MzQ1NjM1NjUzNDM1OTg2NzYzNzQ0MzY0NzUzMjQ4Nzc5
-NTYzNDE0MTU0Mjs5MzY3Njc2NTc0NzQ2NDU3NTU2NzY0NTY2ODY4Qzw5OTc5Ojk1
-NDU4Nzk3NTc3OTo4ODk4ODk3ODg3ODk3NjQ4OTc6PDk6ODU5Ozs6NzY4Mzc2NTY0
-NTk4ODg4NDU1NDU4NjQ1ODg4OTY0NDU2NzY3Nzc3Ozo0NTQ2Njg5ODg3ODc2NDM2
-NTQ1Njg1Njc2NTc1Ojk5Nzg3Njg5ODY1Njg4NTg5ODg5ODg1Njc4ODk5OTs6ODc4
-NzY4NzQ0Njc5ODY2NTY1NTc2NTo4NDY1Nzc4OTc2Nzg5NzY1NTc2Nzc4NzQ2NTQ1
-NTQ3NTY2Nzk3NzY1NTY2Njg3NzU1NDY1MzU4NzQzNDQ1Njg1NzY0Njg2NTg2MzM2
-OTc0Nzc4NjY3Njc3OTk4NDY4Nzc4ODc3ODk3Pz03ODc3OTo6ODw6Nzc3ODk4Njc0
-Njg3NjU5ODo5NjQ2NTY5OTY+OTs3MzQ0NDk3NTQ1NTM0Njk6Ozw6ODg5Ozo5OTo7
-PDg8Nzc6OTg4OTk5Ojg4OTY2ODo6OTg2NTU6ODk7OTY3ODk6ODg6Nzc3OTc3OD04
-Njg5NzY5ODk6Nzk6PDw6OTc3ODk+PDg5Ozk4PDw7Oj0+PDs7Ojs8Ojo7Ojo6OTk5
-OTs6Nzo7ODk4OTo6Ojk7Ozs7OTc3NjU1NDU2Nzg5Ozs8Ozw7Ojc2NDU4Ozc4Ojk3
-ODg2NDY2NjU4OTY5NTU3NjU1NDg5NjY1NTk4Nzc4NzU4Nzc4NzY0Njc1Nzk5Nzo5
-Njc2Nzc1NzY2NjY2Njc4ODg4Njg4ODo5ODc3Nzk4OTg3ODg4Ojg4ODk4Nzg4OTo5
-Nzg4ODk7Ozg5ODY3Nzo4ODs4ODw7PTk7Ojs8Ozw7Ojs9PDs7Ozo5PT0+PT45PD08
-PT1APj07Ojk7Oz5CQT88Oz4/VGhTRkQ+RVajwc7W3eHj5ejq6upBPkJCQT4+Q0VA
-P0FAPD08OjlBPjg8OTs8Pjk5OjY1Ojw3NTg2NjY5OT04NzU3NTY4NTo3MjM0ODc1
-MzQ0Mzc2ODYyMjQ2NDU6Ozg1NDMzNzYzNzQ1NTY4NzU0NjU2MzY3NDM0NDY2ODY3
-NzY5NTI2NDY2NjMwMDY1NDQyNTk1NjUzNDY3NjY0NTQ0MzU2NTI2OTczNDU0NTM1
-Ojk5NTQyLzIyNDQzMjQ1ODY1OzU2NjI0MjZTODY0NDUyMzY2ODY1NTY3NDIyNTEw
-MjMzMjQ0NjM0MzY1NjU0NTM0NTUzNTc3NDM2NTQwMjQ2NTU0NDc1NjIxMzU2NjY2
-NjU1NzI0Njc2NjQ7NjI1NDUyNTc2NDU0ODMyMzM1ODY2NjY4NTU1NTM0MzQzNTU1
-NzMzMzI3NDU3NTU3Njg2NjM1MzUyMzU6OTU2NTYyNDM4ODg1MzQ1MzQ2NTc1NTQz
-NDU4Njg1Mzg5Nzo2Mzg3NTQ1Pjc3NjU1NTY3NTUzNTY3NjY4NDI1OTs1MzM2OTc6
-Njg4NzY0Nzk3ODk7OTc2NTY1MzU1MjM3NDw5NzU1MzE1NTU3Njk2NDU1NzU0NTY2
-ODk3OTY0NTg0NTM0NDU3NDYzNTU3NTU3NjY1NDM0Nzg5NTg1MjU2ODY1NDc1NTU5
-Nzc2NzY4NjU3NTY2Njg7STg2Mzw5Njo4Nzg4Njc4Nzc5Ojg3Nzk5Ozg4ODc4OTk1
-OTk6ODk4NzY3Nzo8PDo3NzQ2ODc3NzQ3MzY4MzM4Nzg6NzQzNjU1Njc4OTg1Nzk4
-NjY4ODQ3NjY3NjU2Nzc4ODU2Njk4Njg7Ojc5OTc1NDc7ODg3NTY3NjY1Nzc2NDM0
-NjQ2Nzc2NzQ3NTQ1NTY3OTs1MzU2NzU4ODg0Njc3Njo6Ojg3NTU2ODg5NzY2NjUz
-NjU3NzU2Njc3NDQzMzUzNjc1NTU2NTc1NTQ1NDM0NTc4NTQzNTU0ODc2ODY3NTUz
-NDY0NjY3NzY3NTc4NTU0NzU3Nzc6OTk1Nzc4NTY2Njo4OTk4NjY6ODU2Nzc4OD84
-OjxDOjg2NDU1NTU3Nzk9Ojc2Nzc1Njg3ODc0Njc3NjgzNjY1ODo2Njc2Njw3ODg0
-NDc4OTs7NzU4Njg4Ozk6Njc8OTw5Ozw6ODc0NTc8ODk7PDk2Njk3OTo8OTg1Njc2
-Nzg3ODY3Ozo4Nzk5ODg4Ozg4Nzg1Nzc4OTk3OTo8Ojw4Pjs4Ojs6Ozs4Nzo7OTxA
-Ozs5Ozw7Ozk7Oj0/Ojk7Ozg7PDw7Ojg4OD04Ojk+Ozo5Ojg6Nzk5OTo4OTc3NjY1
-OTk7Ojg4Ojs9Ojo6ODg4NTY4Ojg5ODg2Njc5NTQzNzk5NzY1NzY3NjY2ODY2NzYz
-Nzg4Nzg5ODc4ODo5NjY1MzU2Njk2Njg4ODY2NDY2Ojk0ODo6OTY1ODY2ODk5ODk7
-OTg3OTg3Njg2OTY3ODo8OTg3ODk4Ozs8Nzg3OTs5OTw9ODc3Ojs4PDw7Ozs9Pz49
-PDo5Ojc4Nzo7OTs7Ojo5PT07Ojo8OD09Oz08ODY3Ozo7PE5BPDw8Pz49WEtFREZD
-VKTBzNbc4OPl6Ofq6Ts8PT9BQjtBRUI9PzxFQT05ODU0OT49Oz5BQTk5ODo8PD02
-ODg0Ojo3Njo5Ozg1ODkzNjc5NTMzMzQzMzIzNTM0NTg0NDs4MzY1NzQ0NTg1NTY2
-MzMzNDE2MzM3NDI0MzY4NjU2NjY2NzIxMjI2Nzc1NTQ1NTMxNTg1NjYxNTY0NDQz
-MzU3OTg1NDMyMjE0MzQ2NzUzNDU1NzU2NTc1MTU6NTQ1NTQ1NTQ0Njc1NjMzNzQz
-NTc0MTMzNjU2Njc3OTc2NzczMzEyNTUyMzM0MzU4NTU0NjY2NDQwMTM1NzIxNDY0
-MzM5NzMzMjI0NjQ0NDU4MzQzNDU4Njc4NDUzMzMwMTMzMzM0MzU0LzIzMDI0NzQ0
-NjMzNDY4OTQ1Nzc1NTg0NjQ1NTg6TUc2NjMzNjc4NjQ2NDM0NTY2MzIwMzQ1Njc4
-Ojg7NjU4OTY2ODY0MzE0NzYzNTs4NDU0NTM1ODU2MTU4Njc1Nzc2NTc2ODc2NjQ0
-MTQ0NDU1NTMyMzM3MzY2NDc1NTMzMzU4ODg3NjY1Nzs3Nzc4ODY0OTQ1MjQ2Nzc1
-NDU1NDY0NDM1ODU2Njg1NjU1NzQ2ODU1NTM0NDQzNTo6Njk0ODY1NTU0NTY2NjU0
-OTY1ODc2NjY1OTY4Njk2Njg3NDM3ODY2NDY1NTU2ODk5NzU4OTg4NzU4NjU1ODc4
-Njc4NzU1NjY2Njg4ODg3OTk2Ojk4ODpGOjo5ODU3ODg3Ojs6Ozo2NzM2Ojo4OTc2
-NTY4ODk3Nzk2ODc6OTc2NTg4OjY0Nzc7OTg2NzU4NzU6OTcyNjY1NDY2Nzc4NTg6
-OTc5ODk4ODY3Nzc3NTQ0NTU4NzU1NzUzMzU1Njg3ODg7ODY2NTk4NzU0ODc3Ozk2
-NTY3Nzc4Nzc2Ojg4PTk4NjQ4NzU4NTY4NzU2NDM2NjY2NTU0NjU0MzU2NDY6NjM1
-NzY3OjY4OTg1NDU1NTUzNTU0NjQyNDg0MjM1NDY0NDY4NzY2Njc2ODc3Njg7ODY0
-NDY2NjU5Nzc4Njc4PTw7OTc6ODk7OTY9OTs6ODg3NjU1NjQ5PzhJOjk2NDg4ODY3
-OTo3Ojw8ODc3OTs5Ojs4ODg3Nzk3ODU1MjU3ODo8ODY3ODg5ODY0NTY4ODMzNTY4
-NjU1NDc3Ojk2NjM3NjY2OTk4ODk4Ojk4Nzc2NTU3Ojs8Ojg3Nzc5Nzc4Njc3NzY4
-NzY5ODo9Ojo6ODs6Ojw2ODw5Ojw8PD09Pjs6Pjs5PEE/Ozs6Ojw7Ozk6PDs9Ojg3
-OTs7Ozs7OT47ODg4Nzc2NTc1OTg2OTk4NjZAOzo4ODc5OjY3Nzo5NjY4ODc5Ojg3
-NjIyMzY3Nzk3NDY3Njc2NDY0NzU0Njg3NDc2Nzg3Njk4Nzc4ODg0NjQ3ODY5ODc0
-MTI1NTYzNDQ0NjM4NTc4ODg2NzM3OTc3Nzo3OTo3OTg4OTk5ODo7Ojc3ODc5OjY2
-NDY0Nzk6OTw9Ozw5OT07PUA5PDw9Ozo7Ozs6Oj0+Ozo7OjY4OT09Ozk5OTk6Ozs7
-PT07OTk4Ozk3Ujs8QD09Q0NETkhGQ1BTpsHN1Nzf4+Xo5+nqQUBCQjw8QkA+PDo6
-Pj4+Pjo4Ojo4Pj09ODo5Nzg5MzY1Njg4Nzg2NTc0NzczOTc2Nzc4OTk1NjY5NDU2
-ODQzNDM1NDM2NDM1NTg2Nzc2NTY2NDU2Njc1MDI1MjQyMzg0NDU3NDk6OTQ1NTM0
-NTQ0NjIyMTE0NTY1NTU2ODc3NjY0MjQ0Nzc6NTY0ODc2NTM0NDYzMzQ0NTQ1NzU2
-OTQ0MzQ2NDIwMDQ4NzUzNTc1NjMyMjY0NDY5MjI0NDU0NDQ0NTg1MjM1NTYzNDY1
-NDQ2NDU0NjQ0NDU1NDIwMTI0NDQ2MzM2MzQ4Njc3NTQ0Njk3Mzk5NjQzMzU1NTU1
-NTQ0MjM0MzIyNjc5NzU2NDc3NTUzMjQyMzM1NDY3NjQ1ODc1NDM0NTI0Nzg4OTU3
-Nzg5NzUzNDYyMTU1NDQ1NTQyNjY0NjU1NDU1Nzk3Oj49OTY3OTY1NjY2NjY2NjU1
-Njs5NTQ5NTY1NTU3Nzc5NzUzOTQ0Mzw5NTU1NzQ1NTc1NDg6NjU0MzU2NjY4NTU3
-NzU2Nzk0NTk6OTg8Ojc3ODY4NDQzNzg2NTYxMTE0Nzc2NDQ2Nzc2NjQ1NzU4NjU2
-NjY3MzQzMzU1Nzo5OTk2NjU0MjczOjU1MjIzNDY2NjQ2OTc3Nzk1MzU5NzY2NTw1
-Nzg3NjMzNTQ0NjY2Njg4NTY3NTU1NTc5ODc2OTk4NTQ3Nz8+Ojs2NTg5OTY6Ozk3
-ODk5ODU1NjY3ODs7PDo2ODg6ODg4Njc2ODc3NDY3NjY5OTc3NjY3NDY5PTo5OTg1
-Nzc2NjY3Njg2MzQ0NDQ2Njc2NDQ1NTo6OTc5Ojk3NTY3Nzc2OTY3ODQ1NTo5Ojg1
-Nzk4NjY4NjYzNjg7ODk3Nzk2Nzg5ODY0PDc1ODc0Njg3NDU3NTM2NTY2OTc6ODY3
-MzM2NDMxMzU3Nzc2NzY4OTg4NjY3MzU2Nzc2Ozg2NzQ0NjY2Njg3MzMyNTQ0NDM2
-Njc3NjY2NjY5OTY1NTU2NjY1Nzk2ODg4Nzg4ODk4Nzc1NTQ1NzY0Nzk7Ojs6Ozk4
-OTk6OzY0NzY0NjY5PDo1NTQ2Njc4OTg5Ojc5Ozc4Ozc0NjY3Ojg5Ojc4ODo4OTc0
-Njk5NzU1NjY4NTU4Ojw4NTY2NTY4Nzc3Nzs5OTk5Nzs5OTg2ODc5ODg7Ozo5Ojk4
-NzY3OTg4OTk2Njk4ODg3OTU1NTc3Njc4ODg7Ojk4OTs7Ozk6PDo6Nzs8PTw8PTk7
-PDs7PD07OTo3OTo1Njc4OTk2ODo7PDg2Njk6OTo5QDs6ODo5OTg2Njk4Ojg5Ojk5
-Nzs5Ojo7OTg2ODk6ODs8OTk4NzY4Ojg2NjQ0NTU3NjY2NTY1NjU2NjQ0NzY0Nzk5
-NzY2ODg2NTY2Njk2ODk2NDM3OTU2NjIyNTg1ODY1NTc2OTc6NTY3ODk3OTg2NDY3
-ODg7Ozo6NjU2Njc5OD44OTc4NzY3OTc0OTo3OTo9Oz86Ozs6Ozo5OTg3ODw8ODs8
-Ozw7Oj46Ojo6Ozo8Ozo6Ojs5Oz49Pj1BOzs6Ozw8OTdLOUA6Pz5BSEJOTUJFTFOg
-wMnV2+Di5ebo6upCR0Q8PD5BQkFCPTs7PD5DQDo6QDw7PD04OTM2Nzs7Ozc3NDk5
-NjQ6QDg1NTc0NTg0NjY0OzczNTQ4MjQ3NDc3Ojc1MjMxMzU5NTY4OTY3NzY2NDQx
-NTQ1MzM1Mzc7NTM0NjU1NTQ2NDY1ODY3NjczMzE0NDAzNzY4MzMzMzczMzQyNDY0
-NTQ1NjM2NTQ3NTQzMjIzMjQ1NDQxMTIyNjc0MTEzMzEyMDQ3NjU0NzU0NDQ1MzQz
-NTU3NjMzMzY0MjQ0NDY0NTg2Njc0NjUxMjQ2NjY0MjMyMS8vNDYyMjQ1NjQ1MzI3
-NDU5ODY0NTIyMTY0NTc2ODczMzY1MzM2MzQzMTM0NDY3NDM4NDY4ODgzMTIyNDU3
-NTEzNDU4ODU1NTIzNDY2NTYzMzI1MjQ4OTY3NDY1NTQ0MjUzMTIyNDQ2NjU3NTU1
-NTQ2NzU1Njo4ODc5NTQ2NzQ0NjQ1OTY3NjQ2Nzg2Njk4NTY2NjQ4Ozg2NjQ4NTc0
-MjU2NjY1NjQ3Nzg2NDY3NDc5Nzc0MzU3MzU3NDY3ODc1ODY0NDU1NTM0NjU5OTU0
-NTI0MzM0Mzc2MzQ3NTc4NzY2OTM0NzY5ODg2NjI1NTY0NTUzMzQ1MzU1NDQ1NjQ0
-NDQ0NDY4NTY1NDU1NDQ0NTg1NTg2NTQ2NTY9OTg1MzY2OTY3REE7NzU3ODY1Njc3
-NTc2Mzc5OTQzNTc7Ojo6ODk5PDk5PTo7ODo6OjY3ODg6Ojk6Ojk4Ozs3ODY3Nzc2
-OTk1NDY4NTc3ODY2ODc6Ojo5Nzo4NDc1NjU4Nzg3NjM4MzMzNDU6Nzg2Njk6OTc2
-Njg1OTk4NDU1OTs2ODU2NjQ0NDY4Nzk3Njg5ODg1NDg1NjU2NTY2ODc4NzU1Nzg3
-OTQ1Nzc0MzQ1NTc4NTQ2NjQ3OTc3MzY0NTQ1Nzg2NjU1MjQzNjc2NjU1Nzg6NjY2
-NDQ3NjY3NzU0NTU2Njg3NzY1NjY2NDk2NjY0NDQ2NTQ2NjU0MzU0NDc5NjY4Ozk4
-OTk6ODo5Ojg3Nzc4Njk7OTc5Ojk4OTc1Njk6ODc0NTY4MzUyNjQ1NTQ1NTQ3PT04
-Njc3Nzs2NjY2Njc1Njk5ODU4ODk3ODo7OTs+PDg0NDI2NTQ2Ojk6OjU4PTo6Nzg4
-ODk4OTo5ODY2MzY2ODg4OTk5Nzg2Njc3Ojs7Pzs4OTw8ODk5ODg4ODU1NjU6OTc4
-PDg6OTk4Nzs7Oz07Nzk6Ozk4Ozs4Ojo5Nzk8Ozs5OTo6OTo3Nzk6Ojo3OTk5PTs6
-ODo6ODk5OTo4NTU2ODg5ODc3NzU2ODg4OTk3NzU3NTU3OTc3Njc5OjY3Ozo4Nzg5
-Ojk6ODc1NTY3NTU1NTMzNzQ4OTk2NzY4ODc2NDQ2ODg4OT09ODo3OTY3NjM0Nzc4
-NzU0Njc5Ojs5Njc6Nzc1Njo7ODc5NTQ1NTg3Nzg5ODg4Nzo1NDk8Ozc3OTg3ODw8
-Ozw9Pzo4OTs6OTo7PTs/PTk5PTo4ODk6OTw8OTo6Ozw6Ojk7PTo+PDs6Ojs6Ozw9
-QD84Ojs7QEU2PkRDPz07PExYS0hLWqHBytTb3+Ll5+jp6Uk/QEVDPTw+P0NBPEVD
-PD48Ojk5QTw6Oz09OjY4Ozc4Njk3NzU1NDo6Oj04Njo1NjY4ODY2NjUyMjk3NDY3
-OTg0NDEyNDQzMTM3ODY1NTg5NTUzNDI0NTQ0NTUxNDg4NDQ4NjEtNTcyOjY2NzQy
-NTIwMzI3MzMzNDQ1NTQ0MjEyMTMzMzM0NTQ0MjQ1NDQ0MzgzNTU5NzYzNDQyNTQz
-NDMzNjQzNDU0NzQzNDU1NDY0NzY2NDczNjc2NjY3MzE1NDU2NDU5NTY0NDQyMjAw
-MzAyMzYzMzIzMjMyNDkyLzAxMzI0NjQ1NjY1Njc1MzEyNDQ1NTo2NzMyNTU1NzQ3
-NTczMjc1NDM1NjU2NTQyMTI0MzI2ODg3NjMyMzQ4NjQ0NjQ0NjczMzYzNDIzMzU1
-MzM1NTM2NzYzMjU1MzQ1NTc1NTg1NTQ2NTY3NTU1NDc3NzczNTU1MzI3Nzk1NTQ0
-NDY4NjQ1NTY3Nzg0OTo4NzY3Ojo5OTY4NjU1OTg2NTY2NTU3NzU3ODo3MjAxNDM4
-NzU3Ojw3MjY4ODUzMzU2NDY5OTY2Nzc1MTQ2NDI0Njw2NjU1NDQ2NjMzNDQzNTU0
-NjY0NTM1Njk1MjU5NDY3OTUzNTY5ODk2NjY1NTM2MzQ0NjU1Ojg4NzY4OTo3ODg6
-Nz05OTY2MTY4Njc3MjU5ODI0NjY2MjM4OTk5NzU1NTQ1NTk6OTg4OTg5OTk7PTw8
-OjY1NzY3Njg2ODo7Nzk6OTY3Njk3Ojg4ODs6NTY0NTc5ODc5ODs4Nzg5NjQ1NzU2
-ODk4NzU5NTY3OTo3ODY0Mzc6Ojk4OTo5Nzg4ODo5NjY4ODg4NzY2NDQ0NjY5Nzg2
-NDU3NTg5Njc2Njg1NTg4OTQyOTY4ODc8Ojg4NjU0NjU2NTk3NTEzNjQ1Njg3NjU1
-Nzc4OkA5NTc1MzM0NzU1NTY4OTo0Njk4NTY1MzY1NTY0NjU3NTY3ODg2NDY1Njk2
-NDMzMzM1Njg4NTMzMjU5NTk6Nzc3Ojc6Ojk3ODs4Njc5NzU5ODU0ODc3ODo6Nzg5
-Nzc1Njc0NDI3OjczNzg3MzM1MzY5OTg3NTU1NTY5ODY3OTc1OTg3NTQ0NDg3NjY3
-Ozs4Njg5ODU6OTk6Ozg1Nzc5OTc4ODo5NTc5OTY4Nzo7NzU2NTU5ODs5ODY5OTk4
-Njo7Oj85ODk1Nzo5Nzk7NzU2NzY5PDs6Ojk3Njg4OTY5ODg5Nzg5Ojs4ODs5ODg3
-Njc5ODg3Ojs7Ojo6OTg6Ojg3ODk2Nzk7Ojg4ODo3Nzk5Ojc3Nzk6Njc3OTc1Njg4
-NzU3NDg2ODs7Ojk6PDk4NTU3OTg5Ojo6PDk2NDE2ODY3NTM2NjY2Njg2NjY2ODg0
-MzQ3OTU1NTU0NTY3NDg5NTQ0NTU0NjU0Njo4ODc3ODk3Nzc1ODY4Ojg6Oz41NDU1
-NzY4OTg5Nzc6ODs7Nzc6ODc5Nzg4Mzk5NTc4Ojg5OTk7PDw8PD08Ojs7Ozc6Ozk6
-Nzg5Nzg6OUE9PTg8Pjk8Pzw8PDk7Ojw+REM7PD47OTpAQ1FBOTlBSW9sTEZTncHL
-1Nre4eXn6OnpSEJHRUA6OT5APTo9PDs6PDs8PEA9ODk5Ozk3OTc4Nzc2Nzg3Ojc7
-Ojg3Nzg1OTo4OTY4Ozg1NzY1MjQ1NTc5PTY0NjYxMjY0MjQ2MzUyNjYzMzU1NzUy
-NzY3NzY5OkI2Ozc0NjMyMjU1ODc0NTIzMTQzMzcyMjI0NTUyMjIxMTM3My8zNTM2
-NDQzMjE2MjIyMzc3NzMzMTIyNDQyMTMzNTY0NTQ2MS4xMTM0NDQyNDQ1ODo1NjdD
-NTMxMzI0MzI0NTU1Mzg4NjYwMzM1NDMyMjI1Nzg1NDEwNDQzNTExMjQ1NTQyMjE2
-NTc1NzczNDMzNDQzMzY0ODY1NTY3NDQ1Mzk6NTQ0MzM0NDY3MzU0NDU2NTQ0ODI0
-NTU3NzQ2NzY2NzU1NTQ0MzYyMTM2NTU2Njc3NzY4NjI0MjY1Njc0NDc4MzQzNDYz
-NjQ0NjY1NjU4NDY1MjU1NTc4Nzg3NjQzNTY1NjQ1NjQ4NzU5NjQyNjU3NzY4OTg0
-MjQ6Nzc3MTU3NjM1NTc5NTU2Nzw3NjU4ODY2NzY7Ojc6ODcyNDY0MzQzNzU0MjM1
-ODY2NTU1ODg1NDMyNTQ0MzU2MzQ1NzQzMzc1NTQ3Ozc1NjY1NzUzNTk2NTU4Nzc3
-MzQ1ODY0NTMyMzI1Njs6Nzc0Ojo6ODk7ODc1NTc9Nzg1OTg5NzU3Ojk1NjY0NjU3
-NjY3ODo3Ozk5MzU2NjU2Nzg4PDs5ODc3ODc3Njc5OTg5ODY4ODY5OTk4ODc4NjY1
-NDc4ODg5NTU3NTQ2Nzc0OTo2Nzo4Nzc3NTU1NzY3NTc7OzczNDU1ODk4OjY5Ojc2
-ODg4PDw9OTw6Nzc4Nzk2NTM2NDY5OTc2NjU4ODY6Ozk6ODo6ODc3Nzc2MjM2OTwz
-Mzc2NjY3Nzc1Mzc0NTU3ODY6OTc1Nzc2NzU1ODc5OTU0NjU1Nzk4NTY3NzQzNDY0
-MjIzMzc5ODc2NjQ2ODY3NTM0NTk1NTQ0NjQyMjM0MzM1OTQzNDQ0NTY4NjU2Nzc3
-ODg1NjY4OTY4NjQ0NTs4Njk5ODU1NjQ1Ojk6ODc2Njg3NTU4NzQ5OzY2Nzc1Nzc3
-NDU4Nzg3NTQ2ODw0NDU2NTY2OTg3Ozk2Njg3Njk4Ojw6Ozg6OTU1Nzk3Nzc6OTg3
-NDY2NjY5OTo4NzY3ODg5Ojo6NzY4Ojs6Njc5Pjs6Nj05ODg6OTg0ODg2NTc4Ozw6
-ODo6NjQ3NDU4OTY3Ojc3OTk5Ozo4OTo8Ojc2ODc6ODk4ODc5PDk4OTk5Ojw+OTY2
-Nzc5ODo4OTg4OTg4Nzc3Nzg3MzM0NjU4NzY1Nzk5Ojo4ODk5Nzg6ODg3ODk4Ozs4
-Njc2Ojk6ODUzNTU4MzU4NTU3NjY2Nzg3NTU4Nzg4NTQ2NTc3Nzg1NjY3NjYzNjc6
-NDc4NzY4NjY3NzY4OTk4ODc9PTU5NzY3Njg4OTc3Nzg4Ozg3ODc5NjU1NDc7Ozk3
-ODg5Ozg5Ozg3Nzg6PDw9Oz05PTs5ODg5OT4/Pjk5OTk6PTg5PTw7Ozk4Ojo7Ojw9
-PUBBPjw6QDo6PDw+REZRbltCSV+dv8rT2d7i5Obo6OlFQkVEPTw/Pj5CPj4+NztA
-Ozo8Ozs4Oz87Njk3Njc6Ojg9ODc3PDo5NzU7Ojg1NTUzMzQ1Nzg4NjIxMzY5NzQ2
-NDQ3Njk4MzEzMjU2NDc3NjY1NTY4NTQyNDUyNDNDOjU1NTo4ODYzMjQzMzM0NDYy
-Nzk3NTU0NTczNTQ3ODgxMzI0MzM0Nzg0NTIzMjQ0MjEyMjU0MzIzNTMzNDU3MzY3
-NTM0NTU2ODMyNDQ2ODY3NjY0ODQ1ODc3NTg1NTQ1NzU3NDI2NjY0MjQyMzc2MzMy
-MDA0MzM3NDc1NTY2MzIyNDUxMjM0NTMzMzUzNDMzMzI1MzQ3NTMxNDI0NTY2MzM2
-NDIyNTgzNDMzMDAyMjU3NjU4NDQ1NDU2MzUzNDc2NTU0NTEwNDQ1MzU3NjU3NjU2
-NzY3NzU1NjU0MzQ2NjY0NDU2MjM0NTY0NTc2NTQ0NTQ1NTY0NDU3NjY3NjQ2ODQz
-Nzc1NTY1NzU1Nzc3NDY2Nzc3NjY2NTc4Nzg6NjU2NTY4Ojc1ODY0NDc3NTQzMjQ3
-Nzs1OTo6OTg5ODQ0NTk2NTMxMTIzNTUyNTY1NDU5NjQ1MzU1OTY5MzMzMzU0MzM5
-NzY4NzM2NTY3Rjc1NDY0NDQ2NjQ3OTY0Mzc2NDE1NjczMzU2NzY0NjU2OTg2Nzc4
-Njg5OTU0NDg7ODU2Mzc4NDY4RTk0NjU2NjE1OTc3Ojc5Njg4ODY2OjUxNTY3ODY3
-Nzc6OTs7Ojc0NTk4Njc4ODg2OjY2NjU2ODY4OTo3NTQ3NTMzNjc4OTk5RTg3NjU2
-Njc2NjY1NTQ1NDQ1MzY3Ojk3ODU1OTk4OTw7OTk3Ojg2OTg4Nzc0NDU2NDc1NjY4
-NzQ3OTY5Ozk3ODs3Nzc5NTU1MTU8NzU3MzU4OTg5Njc1MzQ1ODo3Njg2Njg6ODg3
-NzY1NTU3Njk3NTY5NTY1OTg2ODQyMjQ1MzM1NjU2NjUzNjY2ODYzNDQ0MzQ1NjY4
-OzozNDQ3NDg3NTYzNjg4NTg6ODg4ODU4ODc2Nzg4NjU1Njk3PDU3NjU0NTk1NTY3
-Nzc4OTlBODU4ODY3ODs4NjQ1Njk4OTg3NTY5NjIzNTc3ODU4NzU1Njc5Ojg3Njk3
-Nzw6OTw6ODc3ODg7NzY3Nzc3ODg7ODg4Nzg3Ozg6ODY4OTg5Ojc5OTk6Nzc2ODg4
-PDs5PDs7PDo5ODk8ODg3Nzo3Nzk8Ozs6OTc4Nzk4OTc5Ozg4Ojo2Njo6Oz49PDs8
-PDs7PT43Ozo+OTk8Ojs/QD08OztAOjk5NjU3Ojo4ODk2NTg3Nzc1Nzc2NjU2ODk3
-ODg2ODc4NzY3ODY0NjY1OTc3Nzo5Ojg3NjY3NDU4NzY1Njc4NzQ3NTY4NjU0NTU0
-NTU3NzU1Nzc4OTk3NzY2NzY3OTc4Ozo5NTU5NzYzNjg3Nzc2ODY5Ozg3ODo3Nzg5
-OTo7OjY4NzU1Nzg4NjY3NTc6Ojg6OTk3Njg4ODo4Nzg4ODw4Ojs9ODk8Oz08OTo6
-ODg5Ojk2Nzg4Njg8Ozs+PDg6Oj1BPDs8PTo5Oz5JTjw6QkFBQkpOTEBGXp2/y9PZ
-3+Lk5ujp6UNBPj4+PUA5Ojg5OTs5Ozs3OD08PTk6ODk5Njc4OT89PTg6Nzo4Ozc3
-MzQ3ODg2Nzk6NTg3Ozg4NTg2NjQ3NTU0NTg3MzQ0NDQxMjM3MjEyMTM5NzY2MzUx
-NTg2NjQ1NDU0NTU2Njg2NTY0NTM0NDc2NjM2NzUyMzM1NDMzNTIwMzU3NDQ2NjQ0
-NjM1OjY1NDQ2NDU1MjEzMjEyMzM0NDA1NDM0MzQ2MjQ3NDUzMzMzMzQ1MzQ1NTc1
-NTU1NDY1NzY1NjMyMzU0MTI0MzI3NTMyMjI2MzMzMjM1NjY1NDM0MjIzMzMzMzYz
-NDM4NTc2MzM2NjU0Njo5NTEzNDMyODY4OTU4NDc0NDMxMjQ4ODY2NzIyNDU1NDU0
-MjQ3NjQzNTM1MjMzNzU1NTc3NjQ1Njc4NTY1NDc5Njc0NDc3Mzc2NDc3Njk2NjM0
-NjMzNDQ1NTIzNDY1Nzc1OTc4Nzg5NTQ4NDU2ODo2Mzs6OTY3MzY0Nzo4Nzg3Nzk4
-OjQyNDY2Nzc3NTg4NDY2OTg7ODg3Ojg4Nzc3ODc3NjY2NzM3NTc2NTEzMTQ2MTEy
-MjQ1NjY5Njc3ODk2NzQ2NzIyMzMzNjUyNjk2OTU5NjU1NTY2ODU1NDo2NjY5Nzg3
-OTg1OTY3NDM0NDU2OTk1Ojg3Ozc1NTM2Nzc2MzQ1NjY0NDU4ODQzNDY4ODQzNDYz
-MzM1ODg1Ozg4Njc3NzU1OTYzNDU2Ojo4Nzk5ODc5OTY4Ozo7ODk1Njs5MzMzNTg2
-Njc5ODk4Nzc2Njc5NTQ3ODY1NTY1NjY0Njc2Nzk7ODY4NTY3NDQ1Nzs2ODY2ODY1
-Ojg3ODg5Nzg5Nzc4Nzg2NTU1NTg3NTY4Nzc4ODY3NTc1NTU0NjY4NzY2Ojc1NzY4
-NjY4Njo3NjQ2Njk3NTk3NTY2ODY3ODc3OTs7OTs5Njg2ODc3Nzg4NDU4NjM0MjQz
-NjQ3Njc0NTY1Mzc1NTc2NjY0NTc3NjY2ODc2ODo4NTU2NDQ0NjM3Njo6NjQ4ODc3
-ODY2NTc4Ojg6OTg2NTg2ODk4NzY3Nzc1NDY2Nzc3NzY4ODc5ODc2NTE3NjY5ODY2
-OTY5NzY2NTY1NDY2NTc3NDc3ODg6Ozg1NTk5Nzg0MzM1ODk6ODg7OTs2NTk9ODc6
-Ozo5NzY3Oz07PDw9Ojg5Nzg5Nzk1ODw+OjY8PDs4Ojg5ODs8PTw6Nzc1Ojc6Ozc1
-ODg7OTk6OTg6OTk3OTg6Ojo4Ojs7Oz08Ozw9PDw5Ojo5ODk7Ozg4ODg5Nzk6OTo5
-NzU0NTk4Njs5OTg2NTQ0Njk4ODk3ODk4ODc1Nzc2Njg4Ojk0NTc4OTU2Njg3NDQ0
-NDY3ODY4OTo5Nzs5OTU2MjY0NTUzNTc3OTg4ODY3ODc2NTk2Njg4ODU3ODk7ODY2
-ODU1Mzc0MzU3OTk1OTU2Nzc3ODk3Nzc4ODc0Nz05NTQ1ODc3OTo3Nzg4Ojo2OTg4
-OTc3Ozw6Njg5Ojs9PD07OTk6OTg4ODo4Ojo9PDk8PkE7PDs/Ozo8PTs8Pz08Ojo8
-PTtAPEdfUkE6QEFNRkZHQEFOnr/L09re4uPn6OnqQz8/Pj4/QD85ODs6ODc5ODhA
-PDw+PTg5OTU3ODo4Njg6NTo8ODg4NzQ3ODY4Nzg4OTs7OTg3ODc0MzM2NjY7NTg3
-Nzg2NTg3ODM1MzU3MjEyODc1MzU0MjU1NDMzMTEyMjM1NjQzNDIzMzg3NTQzMzQ1
-NDU1NjMyMzQ0MjEzNzk1NTg3MzM0NzUyMzI1NTc2MjUzMjMyMTMzNDM1MzU3OTI1
-MjM1NTU3MzY1NDU0MTU1MzAxNjU3NjY4NTU2NzM0Njc1NTQxMTIzMzMzNjY4OTo1
-MjQ0MzMyMjM0MjUzMzI0NDQyNTQxNzMzNTU3NTMyMjM1NTQ1NjU1NjUzNDM0NDU1
-ODk0MTQzMjE2MzU1NzY0NDU3NTI0NDI0NDQ2PDQyODU2NDYzPDc3NzQzNzY0NDM2
-NzU1MzY0Njg0MzMzNTY5Njc1NDc2Njc1NDY2NjY1MzM1NjY0Njk5NzY1Njc2NzM4
-Njc3ODU3Njc4NTM2MzU4PTk4NzU4NTQ6OTc2ODo3OTk2Nzk6Njs3Nzc6ODc6ODo4
-Oj06ODg3NjUzNTQ0NDUzMjM3NTMzODo0NjU3ODc3Nzg2ODk2NDI1NjU3NjE0Njk0
-NDU2NzU2Nzc3OTU2MzEzNTY2Njc5Ozg3Ojk3MzU2NTQ0Nzc3OTg3NDM0NjY1NTM0
-NzY1NzU3NTQ0NDQyMzc1NjY1NDU0NjM1Njk4Ojc2NzQ1NTU2Nzc0MzM0NzY6Ozg5
-Njc4Ozs5ODk1Nzc1NTY5NzY4NzU2NDU3Ojc2Ojo2ODY3ODY1NTU3NzM1NzYzNzYy
-NTc3ODY4ODc1NDg2NjY3NDU2Nzk4ODY3ODg7Ojg4OTY5ODU1NjY2ODU3Nzk5NjY2
-NjY2NDM1NTUzNjg2NTY1NTk2Nzg4NTY5Nzg2OTc5ODk4NjU0Njg4NzQ2NTY1Nzc2
-NTg5ODg2NjQ0NDQzNDM2NzQ0Njc0Njk2NDQzMzU0Njc2NTU3Nzg4NjU0NDY4OTg3
-OTk4ODo7PDo1NTQ2MjU3Njs4NzU3ODYxNDU3OTk6ODU2NDc3NjU2Nzk2Njk3NDY2
-OjQ3OTk4OjpDOTc0Njg5ODU2Njc4OTk3NzY5ODo0Njg2NzY2Nzc3Njc3ODc2OTk7
-ODc4Nzc4Nzk5OTo6ODw6ODY6OTs4Njo5Nzk4NzU2Nzs4Nzc3ODk7Ojo5Ojc2ODk+
-Pzo9OTg6Ozk5Ozg5ODc4Njk2ODg4OTk4ODs7PDk5ODg6Oj07PDk6OTg6OTk6Ojw5
-Ojs4Ojo7Ojo2ODs5ODo5Njg7Nzg3OTk6NjM5OTo7PTw5ODk3ODo6ODg4ODg5PDk3
-Nzg3Njc4ODk4OTs5OTg5NTc4NzY5OTYzNDY2NDY5ODc3OTY2Nzc2Njc1NzQ1Njg5
-ODc4Nzc2Njg4NDE2NjQ1NjY5Ojo4NjY2NzU1ODg0NjY2Nzg2Nzg3Ojg4OTg5Ozk4
-Ozw5Njg4Ojg3NjU2Nzk4Njo8Ozs7Ojk4Nzo6PDo7OTg8Ozw6O0A/Ozs7OTo5NzY5
-Ozw6QD47PTo8Ojs8Ozs7Qz0+PkI/PT4+QDw9REY7PTw9Qj5QR0NCR1mjwMzT2d7i
-5Ofn6OhCRUNDPjo9PD06PkA8Ojs8OT04Ojg1Njg2Njc2Nzk6Ozw4Njo6OTU3NTc3
-Ozc6Njk3NjM3ODI2Njg2MzUzNDY2Ojk0NjU3Njc3MzM0NDQ2NTc4OTQzMzM1NTQ1
-NjQyMzIxNDI1MTMzNTU2NTY2MjEzMTIzNDYyLzMxMDIwMjIxMjMzNDYzMzU0NjUy
-MDExMTQzMzQyNDAzMDIzMzMzMzU0NDMyMjIxNDYzMTQyNjE0PTU1MzU3Njg1MzQ2
-OTk3NDY1NjY3MzMzODY3NDgwMjU1NDU0MDM1NTUvNDU0MzM2NDUyMzQwMzc1NTU0
-NjIzNTQyMTc2NzU2NDQzNDg1Njc2MzM0NzUzMTE2NjQzMjQ1MzY0NDU0NDA0NTY2
-NzY3NTg4NjQ6OzU1NDY0NTY0NDY0NTQ1NDM1NDUzNTQyNzg2Nzc5NTQ2ODY0NTc1
-NTk5NTMxMTQ2NTM8ODY0MTI0NTQ2ODk3ODY4Njg3NTg3ODc4NTU3ODY3OzY4Pzg4
-ODk9PDk6Ojk3NjUzNDg1Nzg5ODM2OTg2QTg3NjY2NDQzNTU0MzM0NjQ0OjczNDUy
-MTQyNTY2OTY0NjQzMzM1NDQ2Njg2Njg2NjY4Njg4OTc1MzYzNTMzMzMyNDc4Nzo3
-ODc1NTQ3Njc4NzU0NzQ2NjMzNjY2NzQ0Nzs4NjU1NjQ0NTk4Ojg2NTQ0NDQ2ODc2
-Ojc4Ozk5PDg7ODk6NjU7NzU3NTc2ODo6Ojs7ODg3NTM0NjYyNjY2ODc2Nzc1NjY2
-NjQ1ODg2NTo4MzY4NzYzODU1NjY2NzQxNTU2NzU2ODk1Nzc2Nzc4NTQ3OTU0OTY4
-Ozk4ODg2NjY3NjQ3NzU0NzY2ODc2Njc2NjU3ODc0NTY1NjY2NTY5ODk0NjY3NTc5
-NTU0Njo6NzU2Nzc3OTU2NDE0NzY0NDY4NzU0MzM1NTU2NjMzMzQzNDQ3NjY3NzUw
-MTU1NTMzNDU0MzU2NDU1NjU2Ojg2OD05Njg4NTU2Ojs0MzQzMzY2Njg4OTg3NTc3
-Njc5NjU3OTo7NTY2Njc5Nzg1NjU3OTk6NTc5ODg6PDk2OTc4NTQ4ODY2OTk5ODk6
-Ojo5NzU1MzQ1NjQ2OTcyNTc3ODo5Ojg2Nzk3Njk8Ojk4OTw3Ojw4NjUzMzY1Nzg5
-ODU5Njc5OTY3Nzs3NDU2OTg5Ozo5PDw4ODo8Ojo6PDs3Njc3ODs5Ojk5Ojc4OTs5
-OTY2OTw7Ojo7Ozs/Pjw7PTo6Oj04ODg8Ojg4PDo4ODo6Ozo4Ozg3NTY5OTk7Ojs8
-Ozg6PDo4Nzs8Ojk3Njc5Ojk5Ojk5OjY4NjU0Njg2ODc2Nzo4OTg2NTQ2ODc2Njk4
-ODY1ODo4NTY2OTk2NDc4NzQ3NzUzNTg4ODY1ODY4NzYzODU2NDQ3Nzg5MzU3ODk1
-NDU0Njg5NTY3OTg5OTk6Ojk5ODo6ODs7Ozc4NzU3PDk3NTs3Njc5Ojg7OTg6Pjs5
-Njc6Oz48Pzs6Ojs8PDs9PDs5Oz0+PT84Ojs8Ojc8Ozs6ODo9Ojk6PDs9PTw6Ozo9
-QEJFPD1CQUFGRkZGR0NNXaLAy9Xa3+Lk5+jp6kRERUE8PD48Oz48PT48QDw6Njc8
-NzQ2Mzg6ODQ1OTg4Ozg7ODYzNTc5NDU2Njc3MzY0NDg3NTU2ODQ0NTU4NTc2NDM2
-NTQzNzQzMzIzMjM1NjU1Njc3OTc3NjU0NzI1MjQyMzU1NDQ0MzI0NzQ0MzUyMjIx
-NDc2MjAyLzM0MjEyMjIzNDQ0MzMzMTQyMzE0NTo1MDAyNzM1NTMyNDMzMTY1MzI0
-MzE0MjE1NTQ4NzQ5NDQ3ODY4Ojc3OTY3Nzc2Nzg4NDQ0MTUzMjMzNDc0Mzc3NTM1
-MjQ1NTIxMzY1NDQyNTQzNDY1OTg4NTUxMzM3NjMxMzMzNjc1NzU0NDU0MjIwMzQ2
-NTM0Njc2MzIyMjMzNDc1MjEzMTU3NzU1NzY0NjI1NTY7NTQzNjczMzEzNjM0MzU0
-NTQ1NTI0NTg3OTk3Ojk0NDMyNjU2Nzo3NjU0NjI1NDY4Ojk3OTQ1NjU1ODY0NTc5
-ODc4NzY3Nzg4Nzk5Nzg8Ojw5ODk2Njc1Nzc5Ojk2ODc2NDc2ODc1NjYzNzg0OTw5
-NjU0NzMxMjQ0Nzg5ODc3NC80NDQzMzYyNDc0NTg1NDQ0MzUyNjU0MzU2NzU4OTU4
-ODg1NTY0NjY2NDMzMzE0NTQ2MzMyMjQ1Njc0NDZANzY3Nzg2ODU1NjM1Njc2NjU1
-NDU4ODY1ODU3OTg2NzU1Nzc2MzM1OTw3NzU3NTY3Ozo1OTY0OTY3NjY2ODc2MzY5
-Ozo8Nzc1NTM0NTc2Njc3Njc4Njc4ODY4ODc4ODg2ODU1Nzg4Njw1NTQ3ODc2NTgz
-MjM0NzY4NTY0NDY3Nzc2MzY2Njk4OzY2NjY3NjU5NjY0MzM2NTQ1Njc6NzQ1NTc2
-NDYzNTU0NjU2ODc1NTQ0NTo5ODU0MjEzNjY2NDY2NTc3Nzc4NzU1NjU0MzQ2NDc3
-ODk5NjQ1ODMzNTY3ODg1NTM3NTU0MzQ0MjQ2MjI1MzQ1NTY2NjU4NzQ4NzY4ODQ2
-Njs4Nzo5NjY1NTQ0NDU0NzY7ODMzMzMyNDY0MzY1NDM3NDc3ODY2Ojs5Qjo3NTY6
-Njc3Ozo8Nzk5ODU2OTg4Njk2ODg5NjY5OTk6NTU3NzY2ODY2ODo4OTc4ODk3Nzk4
-ODg5Nzg4Ojo4OTY4NDU3NzgzNzg2ODo7OTg4NjY3Ozk8OTk5Ojg0Nzo+OTg7Ojgz
-MzY2NDk3ODY1Njg0NDc3OTc3OTs5Ojg5ODg5Njk8Ozk8PDw9OjtDPjs7ODg6Ojg4
-NTg4NTY3ODk7PTo5Ojo5OTc5OTk7Ozo7Ojg7OTo4Nzc3ODg5Njc3Njo5NjY1Nzg4
-Nzg3ODc1NDk1ODY4Njc4NDQ1NjY5OTg1Nzo3ODY5Nzg4NzU1NDU3ODU1NTg3OTk4
-Nzg3Nzs6ODgzNjY2NjU2ODY0Nzc1MzU2NTg1NjY1NDU1NjY2Ojk4ODk5PDk7ODc1
-OTg7Ojk3NTQ3Njk6OTc3Nzg3Ozs7OTs6OTg6Ozo5Nzw6PT08Ojk7Oz07Pzw3PDk7
-PDo9Ozg6Ojk6O0A8PTg6Ozs6OjY7Pj4+PD88Oz0+Qj5BQ0I8QFFRl8DM1Nrf4uTn
-6OnqQz48PD49PD1AOzU6QUNERD86PT05Njk6ODU1Nzo7Ozw4NTg8Ozk6OTY6NTY4
-Nzg7NTI2OTw5NzY0NjU1ODQ2NTg3MzU2NTc0NTY1NjIyNjc3NjM1NTU2MzY0MTI0
-MTY2MzQzNDgzMjMyMjY1NTY1MzMzMjI1NTMzMjI1NjIxNDMyMTEyNDM0NTI2NDQ0
-MzMzMzEyLzQ2OTY0MTEyMjEyMC8wNDQ1NDU2NTg2ODg5NTQ0MzY5ODUyMzYzNjY2
-NTY3NjY3NDM0MzQzMzU4ODcxMjU3NTU1MzUzNDQ0NTc4NjQ1OTU0Njg2NTY6NjY5
-MzI1NTQ0MzU2Nzs0NjM1MzM0NzU0MzQzNDU2MTg4NzY3NTU1Mzc0MjMzNDU0NDY1
-NjU0NDM0NTc4Njc3Mzc1NDY0MzMyMjc1NDQ1NTU3NjI0NDY0Njg1ODU3ODg2Njc6
-OjU3ODc3ODo9ODo5NzY5NDMzNTQ2NDY4NzU2NjU1NTc2Njc4Nzc5Nzg3Nzs4Nzg4
-Ozo4NDQ3NTU1NjU6ODQ0NTY2NTc3NTQ1Nzc0NjY0NDIzMTQ3ODY0MTM0NjIxMzM1
-MjM2NDc2NjQ0NjUzMjI5NjgzNDU4Ozc4Njc3MzU1NjQzNDYzMzYyNDYyNDU0NzY0
-NTU0NDc4ODs6ODk0ODU6NzM3ODg3ODY0NzUxMzc3ODczMzc2Nzs5NzQ1Nzg0ODc4
-Ojk3ODo3NjY4NjQyNDg7ODg4NjY4Nzc5Nzc1NDMyMzY1NjU1NDY3NzY5NzY1Nzg4
-ODU0NTY0Nzk4Njc5Njk4Njk1MzM6ODQyNzM0NTc2NTY1MzQ2NjQ4Njk2NDU1NzU4
-NzY2NTY2NjExNDc5NjY2Nzg2Njk3NjY2NTk1NTQ4Ozc5NzQ1NDc1Nzc5MzM3NjY2
-NzU1NTc2MjU4OTY2NDQzNDQ0NDQ3NzY2NTQ1NjQzNzY2NDc4Nzg3Nzg3NDU0ODY1
-NTU3NzQ1NDU2MzUyMzY1NTIzNjg3NjQ1NTY2Ojc1NjQ4OTQ1NDg3NjQ2Nzg2Njc1
-ODQ2MjE1NTM3NjM0Njk1Njk8Ozg4Oz4+Ozk6NTc4ODU4ODg5ODk7NTg2ODY1Njg3
-OTk4ODc5Ozo2NjY4Njc3Ozk2NjY4ODo2NTY2Nzs6Ojk3ODc0NzU0NzY4Ojc5Nzk2
-Ojs+Oj08Ojc4OTo7OTc4ODo2Njk6NzY4OTYyNTk6OTo2NDY2ODg3Nzg3OTg2NTY3
-ODk1ODg8Pj05Ojc5OTY3Ojg4Ozo7OjozNzg4Njc5NjY1ODg5ODk4ODg7Ozo5ODs2
-ODc5Nzc2Nzc2Njc6ODg4Nzc1NDM1NjU3ODY1NTU2Nzo4ODc1NjU1Nzc2NTc4NjMx
-MzU5ODc1ODY2Nzc6NTQ2NTc5ODg2NjY2NjY5PDk5OTY2NjY5OjY3NzU1NjY2NTY2
-Njc1NTUzNjY2NjU2NTQ3OTc1NTU2Njc5ODo7PDc2NjY0OTg4Ojo7PDw6PDo3OTk5
-OTo8Ojk3Nzk7Ojc5OTc5Ozo7PUA6Oz07PDo7Ojg4Nzg7OTw6Ozw7Ojg4Njk+PTo5
-PDw8Qz88OkRFPD5DUlGZwczV2t/j5Ofn6epEPEE7PUE/PT5BQz08Oj48Oz1BQD48
-OztAOTY1Nzw7PDY0NzY3Njc2MjM0NjY5OTY4ODo6Ozg1NjY4ODQ0NjczNj8zNTQ2
-NDI1NDQ4NjU3NjMzNC4wNC8yMTAvMi8uLjA1NzU0NTUyMjU0NjQ0NjQ0NDAxMzQ1
-NjEyMjI1NTEyNTQ1NDc0NDc2MjU1MzE0Nzc2ODY2NTc0ODMyMjIyODg3MzMwLzQ1
-MzM3NjY1ODg2MjIyNDM1Mi8yMzAxMzExMjM1MjM0MDU0MzMyMzc6Nzk3ODg4NjQ1
-MzQ1NTQyNTU1ODc0MzQzNDY4Njc3Nzc1NTY1NzY3ODczNDU0NTUxMTU0NzM1MzI0
-OTk7ODs3NjYyNDY4OTYzNzYzNDM1NTc1NTM0NzY4OTU2NDM1NjU0MjY0NTg6NTQ3
-NjQ1NjY3NTQzNjY0Nzk3NTU1NDU0NDc4ODg2OTY2ODg4NTo4Nzg5Nzg3ODY1NjY2
-NzYyNjc3NzU4NjU4NTc3NDY2Njc1OTg5Ozs0NDY4OTo4NjI1ODg3OTY3ODk4NT03
-NjY0MzU0NDUxMzg3NjUzNjc5OTY2NTYxMjM1NTYzMzU4NjU7NTQxMz01NzU2NzQ3
-ODUzMjMzNTc3NjQyMjM1NDYyNDM1NDU3NTY2NjY2OTs4ODk1NDg1OTk4ODg5NjU0
-NTc3Nzg5Njc3Nzw6ODU4NjU0NzU2NTQ3ODU3NzY4Nzk4NTU1Nzg3NTU3Njg4ODg4
-OTYzNDU0NTk6NzY0NDg1NTc6NzQ2ODc1Nzc2NTQ4ODc3ODc3NDY6Nzg1NzY0NjY0
-NDY4NTI1NzY3NTg0NTUzMjU5NjQ2ODc5OTg3OTg0NTg4ODY4OTY1NTk3NjQ3Nzc2
-NDU2NjY5NzY3NTU1ODQzNDM0NjY1Nzg3ODk3NTQzMzY2NjQ3NTUzNTU1MjIyMDAz
-NjQzNDk2Nzc1NTY1ODg1NzIzMTQ0NDQ2NTg2NzMzMTU1MTU0Njc1Nzc1NTc5NzQ1
-Nzw3NjU0NzczNzQ2ODYzNjk1NjU3NDw0NTc2OTMyNjo2MTM4OD01O0U3NzU2OEND
-Ozk5NjM3NjU4Nzg4Nzg6NjU1Nzo7Nzo6Ojo4ODY3NDY2NjY5Nzc5OTc3Nzo3Ojg5
-Njg2NjU2Njg4OTg6Ojg6ODY5ODc3ODs7OTk6PD07OTg5ODk7ODc2OTo7ODo7OTs0
-NTY2NTU5Njo2Nzg1NzY4Nzc3ODY3NzY2ODg3OTtBOTc7OTY3OTc3OTs7PUA7ODY2
-NjY6Ojk9OTo5Ojw7Ojo4Nzc5Ojc2OTk5OTo4Nzg3ODc5ODY3ODc4OTY3NTQyNTU4
-NTc3ODY3NTY5OTY3NjY0NjM3OTU3ODU1Njo0NDs2NTg1Nzc2ODk4OTo6OTgzNDs0
-Njc3NjY4Ozk2NTc3ODo3PDg1NjY3Nzg8NjY3Nzg2Nzc1Njc4NTY2NTY3NTY2NTY4
-Nzg6ODc4OTk5OTg3OTk4Nzc4Njg9Ozk5ODc5OTk3NzY3OTs5Ojg6Ozg7PTY7Ozs9
-PDY6PDc2Oj49Ojo6ODg5Pj48Oj1APzs4PEFAPjw7O0BBRUlFU5zBzNTa3+Ll5+jp
-6UJBQkZEPz4/PT1EQkZDPTo4P0A8Nzo4ODc1Nzg5Nzo4NjMzNTc6NTk4NDY7OTc4
-NTw4OTU1NDQ0Nzs2NDY1NDg0NDc2Njc6NTQ1MzU1NTcyMjEuMjU4MzQyNDE1MzEx
-MjI3NDMzMTQxNj03NjU1MjAwMzQzMDIuMjY5NjU0NDg2NzM2NjUyNDMvMDE0OTU0
-Njg1NjQ2NjU1NTM0NTU3NDU1MzMyMjM1NTM0NTQ0NDU0NTU0MTEyNDU1NDQwMjMz
-NTMyMjMzNTM0MjY1NTE1MzU3NzQ2NTQ0NjY0MzIxNDc0MzM0NDYzNTEzNTgzNTU5
-NTQyOTU0NjQ1MzQyMjM0NDc1OjU0NDU3ODU1NjY3ODg3NjU0OTg1NTIzNzM0NTM2
-OjY2NTc2NTU2NDY0NDI1NjU1ODg1Njg1MzU3OTg3MjU2MjY2NDg2Njk3MzM1NTU0
-Njc3NzU1NDYyNUI7Nzc1NTY3Njc4Nzk4NzY0Njk3NTM0NTg5OTk5NTg6ODU3ODk3
-Nzc2NTo3NTM3Njg0NzY4ODk1Nzg1NjIyNzc3MjQ0NDc6NjU4NzU0NjU0NzY2NTUx
-NTk0MTQ1NDQ1NEk3NTQzNDMxMjQ0MzYzNDIyMT04NjczNDI0NTM0MTEwMzk3NDY5
-NTQzNjg3ODc2NzczNjY0Nzg5NjU2NTc3OTY0MTY2OTc5Ojs3Njc1NjY4Ojg2NjMv
-MzU1NzU0NTc3NTc4Njk3NjY4OD43NTY1MzQ4OjQ1MjQ3NjY3NjM1ODU2MzQ1MzY6
-ODY2NDc3NjY0NzY3Nzc4NDg4Nzc0MjM2NjQzNTg2NDQzMDQ0NjQyNDU2Nj03Nzg5
-Ojg1NDY5NjY4NTo4NjY1NTQ1Njg3NDU2NTc4NzY3NjY4NDY2NTMyNjQ0NTc3NjY3
-NTUyMzY0MjQyMjQ1NjU2NzU2NDQ0NzQ0NjU0NDY0MjQ1Nzg3Nzg2NjQ0NTc2ODY5
-ODYzNTU0NjQ1MzE0MjU7OTo1NjUzNDQ0Njk3ODg2NTc3NTU1NzY1NjY1ODU2Nzk2
-ODc2OTc2NzQ3NDI3Nzk+RDg2NDg6Nzk4NTU3OTo0Njg5OTk3Njo3Nzk7OTk7PDY2
-Ojw/Ozg3OTo8OTU2Nzg4NjU5OTo8ODk5Njk3Nzg3OTk2Njg7OTk6ODY5OTo8PDk8
-OTo5Ojk5Njc5Nzk3Nzg2ODg6Njg5NjU5Nzk3NzY1NDg5Ojk6OTc2NjY5Ozc4OTk4
-ODg3ODk2ODY6OTY4ODc7Nzg4OTk2ODk3Ojs8Ojo+PTo8OTw6OTg6OTM1ODk4OTQ1
-NzU2Njs5Ojc3Ojk3OTk4OTk4Nzo3Nzc2OTk5OTg4ODQ0ODU1NDU1Nzg3Njk3OTg5
-ODc6NjY3Njk+Nzg4Nzk5Nzc1NDU1TTs+NjY3NzY4Nzg3OTk4Nzg5OTg0Nzc4OTk4
-NzU1NDc3ODk3ODY5NzY2Nzg2NjUzNTg6Nzg5ODk1ODw2NzgzOjo6Nzs6PD45Nzk6
-O0A2Ojk3ODg5Ozk5PTw8Ozk9PTk5PjtAPTo6Ozk8Oz07PTg6Ozs9P0RAOzg6Ojg6
-QD9CQz47PD48Q0NVpcHM1dvf4uTm6OjqO0FBRkNCPj47QENBQzw6OjxAQj47NzY4
-Ozk3NTs5NTM2Nzs5Ojo1ODo6Nzg5Nzk5NzU2OTc3NjYzNTYzMzY2ODcxNDQ1Nzw3
-NzY3NjQ1NDIzMjExMzY0ODMyNDQzMjQzMzQzNTU1NDA0STczNDU7NS4wMzIyMjMz
-ODw6Ojs7NzU3NjU0NDM0MzU1NzY1Nzc0NTQ0Njc3NDU2NTU2NzY3NDU2NDAyMjQ0
-MjIzMjMzMzI2MjQzNDUyMTMwLi4wMDIyMzIzMzI0Mzg0Mjk2NjI1NzQ0NjMzMzU3
-NTI2ODQ0MzM0MTE2MTY0NDY2NDM0MjY0MzAyNjY6NTU4Nzc3NDUzNTg1NTIxMzM1
-NzYzNDQ0NDM0NDU5ODg3MzU0NDs0NDM1MjY2NTU4NjY4NzY2NjQyMzU1ODQ1NTI2
-NTEzMzQ4NjM0MzU3Ojg3NDY5NTY3Njc1MzI1ODY0NTU0QTw2Nzc6ODYzNTU1OTY2
-NjM0Nzc2Nzg4NjY4Njo3NTY4Nzc7Ojc2NDU0NzQ3MzY4ODg1Nzk5Nzo2Njk3ODY0
-Njc0NTU1ODU3Njg1Ojg1NDY1NjQ3NzQ0ODY1NzU1OTg1Nzg4NjY0NDc2NTY1MTAy
-NTc1MzU3NTM2NDYyNDQzMjUzNjc3OTc3NTQ2NTU3OjU4NjY4Ojc4NzU3ODc3NTE0
-MzU5NjQ3ODUyNTk4ODczNDc2NjYzNjQzNzg2ODg4Njo3ODk1NDU0NjU4Ojg2NjU1
-ODg0NDQ4NjY3MzMzNzMyNjQzNDc3MzU4NTU5NjY5ODc2ODY2Nzg5NTY1Njc3NDU1
-NTM3NjY0MzI0MDIzNjUzNDU0NTYxMTQ1NjU2Nzc0Njg4PTk2OTkzNjg3NjU0ODY3
-NDQ2NzQ2NTU4OTQ0MzU3Njc4OTc3NTc2NzU2MzMzNDMzMzI0NDgzNzk5Nzc1NjY2
-MzI1NjQ2NDYzNzg3PTg3NTMzNzU4NjU1MjM3ODU0MTQ0MzE1NjM2NTc3NTY4NTY2
-NTU3ODg5MzU4NTc6NjYzNjY2Njc1Njg4NjY2NjY3Njk4NDI0ND09NTU3OTc1Ojk5
-OTY3PTo3ODY2ODc6ODk7Ojg3Njc3Nzk4Ojs6ODk5ODo3OTg4ODY4Nzc1Njk5ODg4
-NTg7ODo7Ojo1Nzc3OTo3Njg3ODk4NTc2NTk5NTU1OTY5PDc7PTg4OTU2NjUyNDg2
-Njk3NjY3NTQ1NTg2Nzc3NjQ2NTY5Ojc4ODY3ODg5Nzc3Ojc6Ojg5OTg7Ozk3OT46
-Ozw7PDw5ODg6ODs7Pzo5ODc2ODY3Njc4OjY2NDY1ODg1Ojg3NzU4Ozs2ODc3NjY2
-NDY3Njc3NTU1NTY2NTYzNTk1NDY0NTY1ODk5NjU1Njk4OjY0Nzk3NjQ2NjM3Njk1
-Njg3NzY2Nzg3Nzg2ODk2Nzk1NDc5PTs4NjQ4NTU1Nzg4NzY2NzY2NTY1ODY1Nzs5
-OTk5NTs3Njk7OTo3Nzk3ODw6Ozw8Ozk2Mzc5OT48OTo7OTg8Ozw6Ojg5PTs6Ojw+
-PDw5Ojo+PD06OTs6Ozo7QTw6Ozg7QT87PERCREVFPTw7Q1qjwczU2t7i5efn6eo/
-QURBPURGQ0E7OzxBOzs8QEJCPjk4Ojc4NjU3OTo6PTc4NTc0Nzg3Ojk6NTQ2ODc7
-NDY4NTY2NTkzNjY2NDY2NTg5ODU2NjYzNDU0NjY1NTM2OTQ0NDQzNTI0NDQ1OTQ3
-NjM1MTEzNDU6RjY1NTQ4OjMyNDg5NDU3Nzc5OjY2ODg4NjY2NTs7NjU2OTk1Nzc2
-NTQ0NjY1MzQ3NTU1MjY4PjYzNTUyNDQyMjQyMzAxNTQwMzMyNTUzMDIxMjIzNDQz
-MzU3MzM0NDIyMzQ0MzQ1NDY2NDM0NDMzMzI1ODQzMzQzMzIyMzM0Njc3NzI1NDU3
-NTU3NjY1NDg4NjY2NTc2NDQ3NDYyMzExMTEzNTU1NDU2NjQ2NzU1NDU3NjU3NTU4
-Qjw1NjU0ODc1NTc2NjYzMzE3NDMyMjM2ODU1NzQ1MzA4Njk4NzM1NDY7ODc2NjU3
-NjU3OTg2ODo3Nzg2OTk1NDQ0NDIyMjI0MzMzNTY4OTY1Nzg7Ojg4Ojg5NzU3ODc3
-NDMzOTQzNDU2NTU3OTw5NTQ2ODw7ODc4NDIzNTg5ODY2Njk4NDY2NDMzNjQ0NDMz
-NTYzNDQ2NTQ2NTQ2NjQ1NzU4MzU0MzExNTU0NjQyNTM2NzQyMzI4NTU0NDc3OTU2
-NTI3Nzs5Pjg2Nzc3ODYzNzQ3NjQ0MzIzNDUzMzQ1NTMxMzY2NTg0NDU1Nzk5OTg2
-OTc6PTg6OTU2NzM1NTUzNjg6ODY3Njc1NDUzMjU2Nzc3ODg4NTQ2ODg2NDMyNTY5
-Ojk3NjU3Nzg3ODY2NjY5NjY4OjY5MzQ1OTg0MzYyNjU3NDI1NzY0NjczMzQ3MzQz
-NDQ2OTk6Nzc2NTk1NjY3NDUzMzI0NDU2NTM0NTQ1ODk2MzM4ODo5OTo4ODw3NjQ5
-NzQ4MzUyMzQzMzI0NDc4ODg2NTg5ODY0MzExNjc3NDAwNDc1OTczNjY2NjY1NTMz
-NDY5OTg2Njk6NDQ1NzY0NzQ0Njc2ODU2Njk3NUM3OTg7Njc2OTo4Njc4ODg3ODc3
-NjY0NTc3Njc5NjU1ODY0NTQzMjE0NDQ1NzY3NjY4ODk3Nzc5ODg4NjY5NTg5Nzo6
-Ozg2Njg7OTg5ODc1ODk5NTU1Nzk4ODc3Njg3OTg5Oz05ODc3Nzg6Ojo5Ozw8OTc8
-ODY1Mzo6OTc6OTk5Ojg5NzY6Nzk3Njk7OTg2ODk3ODw6OTo3NzUzNTY3Ojg5OTo3
-Ozw5Njk7Ojo7PDo6OTk4ODg4OTo6RDs6Ojs8Ozw5Ojo5Nzk5OTo5ODk5Ozo5Ozg4
-Nzk4NjY3NDg2Nzs4OTk2ODo5Nzc1NDY1NTQ2NTY1Njg2NTU1NTc4ODw3NTk3NjY1
-NTY5Ojk5ODw5Nzc4Nzg2NTQ4Ojc1NDU1NjM3NzM0NjY3Njk5OTY4NzU0NDk4ODQ0
-NTYzNTY1NjQ3OTk5Ojs6OTc3OTo4ODY3Nzw5ODU4Ozs8PDo2Nzk7PTs6OTk6Ojg2
-PDs6PDg2ODo6PTs5ODo5Nzk5OTw8Ozs8Ozk4PEY6Oz06Ozg4ODs7PTw6Ozo9Pzw/
-Q0FFQEJAQT9IU6PAytTa3+Lk5ufq6kBDPkBAQUI+Ozs9Pj08Pjs8PzpBOTc7ODk8
-ODs8NTs9OTc3Nzk5ODY3ODk4OTc3OTs4NzM2NDY2NzM0MzM1NzQ2MjQ1NTY0NDQ0
-NDQ2NTU1ODgyNDM1NTQ0NjUyMzEzMzc1NDQ3OTQ1NTI2MzM0MjY4Nzg0NDk8OzY3
-NzQ5ODk2NzYzNzM1OEE5ODM0NTc3NzY1NzU1NjY1Ojw2MzUzNDU1NTE0MzI2My8x
-MzMzMzI0NDc0MjU1NTIzLzIzNjU2NjQyMzQyNjU1NDYzMjMzLi8xMjY2Nzk4NTMz
-NDM5NTQyNjY4ODg3NzM1NDM1NDI1NTY9PDU2NDU1Njk2NTQ3NTQ3MzU0OjEvMDE1
-NTczNjQwMjMzNjU1NDY1NTI2NjUzNTY0NTQxMzM2NjY2NTc3NDQ0NDQ1OTM2NDY2
-OTQ1ODQ1ODY0MzY0NTQ2OTw5NTQ3NTI2Nzc2Nzc2NTY2NzY3NTY3MzI2MjU2NDQz
-MjM1Njg6Ozo5ODc6ODk3Nzg4NTU0NTY1NDc5NDU2NDc3Njc3Njk4OzM0MzU2NTQ0
-NDQzMzUyNTc0OTYzNTE2NDU0MzM2MzU0Nzk1NjQ1NTU2NDc3Njo1Mjg1Nzc1NDU0
-NjQzNDQ0Nzg2OTQ1NzU4ODUzNjg1NTU2NjY1NjY3Nzg2NDU5ODgzMjM3NTU0NDU3
-NTg2NzQ0NTQzMjM0Nzc0NTY4NjU2OTg2NDY0NzU2NzQ2NDM2NzY2NzgzNTQ4NzY4
-NzY3NDc3OkE+ODg3NTU2OTs5NDQ1NDc2NTQ1NDQ0NzY1NTY4NjQ0NTUyNjc6NjUy
-NDc0NjY2NTc0NjU0NDM2NjU2ODc3NDU0NDY1ODY0NjY4NjY0Njc5NTI0NDEyMjIz
-MzU1NTQzOTQ2NjQ3PDg2NDU2NTQ3NjU2NzU1NTc0MTUzMzY0NDU2Nzc3NjU3ODI0
-MjY3PzY0NDIxMjM1MzM1Nzc4NzMyNTU1MzI0OTk3NDQ1NjY1NjczMzU4Njc4NzQy
-MzQ2NTg6ODk6Nzc3ODc2NDQ1NTU3Njg2NTU6OTs1MzU5NzY2MTQ3NDc1Njc3N0BP
-OTk2OjY1NTU4NzU3OD45OTg2ODg3OTs8NzM4ODo4ODQ2Ojg4Njc5ODc2NTg3Nzg3
-NzY5PDk3Ojo2NjY3ODc4OTw7Ozo7ODc3NzU5OTY5NzU3ODo3NTc7ODc9Ojw7Ojk4
-ODg5PDw6Njk5Ojk6Ozo6MjY4Nzk4Nzc4Ojk4ODg2OTw8OTk5Ozk4ODY1Nzg9OTo4
-Ojs7Ojs9Ozk6OTk6ODg5Ojo+PDg6OTo6Nzc9PDY1Njg3Ojk5OTc2NjY6ODU3NzM2
-NjU1NTc1NDc2Nzg0ODg1NTg6OTs4NzY2NjU5ODM3NTU3OTg5NjY1NDQ2MzQ0NTc2
-NTM0NTc3Ozo4ODg1NjY1Nzg2ODk5NzY3NTU1NDU1PTc4OTk7PDs6ODU1ODo5Ojg4
-ODg2Nzo4OTo6Ojo5Ojk+OTs4ODg6Ozk4Nzg5OTk5PD46PT06Ozk7PDg6ODg8Ozw/
-PDo9Ojc4Ozw+Ozw6Ojg9OkBAP0A6Ojs9QEFAQ0VFQEJRocDL1Nrf4uXn6OrqPT47
-Pz1DQEFDRjw/QT09PT08OTo5NjY6ODw6PTc4ODc3NTg6OTc4OjQ1OD06PDY2Nzc0
-NDU0NzY5NjM1MDEyNzUzMjQ1MzIwLzI0MS8xMjI2NDU2NTQ0NTQ0NjY1NTQzMTIx
-NDY6OjUyNjY5OjUzNjg2NTY5Ozs5Nzc6NjY2NTY2NTUzNDc3NTk1NzY2Njo1Njc0
-MzU2Njg5PDk5NzU2MzQ1NjU2NTU0NTQ0NTE1MzQ1NTQ3NTQ2NTY3NDM2NzUyNDMx
-MzM2ODg0ODU3OTU1NjM4OTU2NTY2NTMzMjIzNDUyMzs1NDk4MjE0NDU0MjM3NzY0
-NTMyNDM1NjMzNDc0MzM2MzI0MDAwNDY2NDUyMzIyMjc1NDY0NjY4Nzg3MzEwNTU0
-NDQ2MzQzNjUzNDQzNTQyMTI0MzY2NDY3NzI4NTIyNTI0Nzk4Nzc1ODc0NDQyNDY2
-Nzc2NDc2NjMzNjc3Nzc1NDE1MzExMzc0NTc4NTU2OTo4Nzk5Ojg1NDU3ODc2Njc2
-Nzg3NTUzNTg5OTg2NTo7NjU0NjMyMjA2NzQ0MjY1NTY1NDU0NjQzODM1NDY2NDU1
-NDU0MzMzNTIzMzY1NDc4Ojc3Njc0NDI0NTU2NjIxNDc2NjQ0MzY4NzQ0NDU0OTc0
-NDQzODY5OTk6ODY4OTY0MzQ9ODU3NDY2ODY2NjQyMzEzMzU1OTs1Njg3ODc4NjY2
-NDY1NjY5Nzc6NjY3NjY2NDM1NzY3OTk2Ojg3NjMzODY2NTg3ODo5ODhBOjY0MzYz
-NTQ2MTEzNjc2MzQ2PjkzNTk1OTg1ODc2NTU2Nzg2Njg7NTI2NzU4NzY2MzU2NTQ0
-MzU5NzY2NTY2OTg4NjU0NzU1OjcyNjMyMzQ3Ojc0Mzc8ODg2OTo4NjY2NTU2PDQ1
-MzM0Nzc1MzM2MjA1NDUzNDY2Nzg1Nzc2MzM1NDU2NzY3Njc0NjQyMjY1NzY1ODY0
-Njk3Nzo5OTY2NDU4OTU1NTY2NTM2NTY2ODc8Ojc7Njc3Njc3ODY4NTIzNzY2NTg4
-Njc5OjU1Nzg3NDY2OjY2Njg5Ojc1OEA2NzU2MjM5Ozk3NjczNzs5NjY3ODo3Nzk2
-NzY2NDU4Nzk4ODQ0NTg4ODY1Njk4Ojk2Nzo5OD05OTk7ODg3ODk4Nzc4Ojs2NTg2
-NDc4PDc3ODY2Njo2Nzc7Ozw+PDk7NzQ2OTo6Ozk2Nzc3PDw6OTg5Nzk4NjY5Njc5
-ODo6Nzc4Ojs6PDk5Nzk3NTc2Nzk6Nzc5Njg3Ozs7PTw7PDs8PDk4Ozo6PTo3Nzg5
-Ojg7Ojg4Nzc5NzU3NTI3ODY4NTY3NjQzMzM0Njc4NjQ3ODk3NjQ5Pzw1NjY3NzY0
-NjY3NjQ0NjY0NjY2NjQ0MTc2NjQ4NTQ2Nzg1Rz05Ozs4NjY1MzU3NzU2ODg5Ojk1
-NTU0NTY4Nzc3ODk4ODk4NjU3OTY1Nzg7OTk5Ojg3OTs6OTk3Ojk3ODs6ODk6PDk7
-PTs8Ojo7OTY2Nzo8Ojw4Ozs5ODg6Ojk6ODc9Ojs5Ojo7PDw/PTo4OTg7Ozo7ODo/
-QkBCQj5DQlKgwMzV2t/i5Ofo6upCQjw/QEI9QD5PTkY+Ozk8Ozc4Ojk6OTw9PT05
-Nzo4OTc6OjgzNjg2Nzk5OTU1ODczNDc0Njs4ODYzNTY4NDMzMzUyMDMzNDYzNDM0
-MzY0QTQ0OTc4ODU1NTEzNDY0MTE1NjQzNDc3MzI0Nzc2Nzk6Njk7NDU2NzY3NjQ3
-NzU1NDY7OzQ0NTI0Nzg4NjU2NjQ5OzU2ODg4OTk4ODY0NjY0NDc2Nzg4Ozg7Nzc1
-NDY1NTM0NDYzNjU3OTU0NDYzMzMzNDMzNTQyNjY1MDY1ODc1NjU0NDY2Nzc2NTU3
-ODUzNjY1NTQyNDU4MzQ0NDU1MzU4NTc2MzQ3NTUzMzMzMTM0Nzc6LzEzMjg0MjQ0
-MzU0NDU1NDQzMzU2OjU4ODc1NTMyMTQ0MjMyNDY1NTQ0Njc2MzQ1NjQ1NDk7NTY4
-NjI0NTM0NTU2NDQzMzU2Njg2NTk4OTc6NzM1NTc2NDM3NDM2Njg4NTM0NDk3PTU0
-NTY1ODg4OTY2ODg3Nzs6Ojk3OTc4Njc1Njc1Njg3ODg3OTo3NTg2Ojc0NTc2OTMz
-NDQ1NTY1NTc2NjU2MzczMTIzNTQzMzc3Nzc2NTU0MzczNDY1NDc1MjIzNDYzNjY2
-NTM1MjM2Njg4NDE0NjMxMzYzMzU2Nzg2NTM3Nzk1Nzc3NDQzNjY1NTs5OTY2NTY1
-NTQ3NDMzMzE2NTg3OTQ1Nzk4ODc3OTg1NTU3ODk6ODk0NjY3Nzo0MzU2Nzc4Nzg0
-Nzc0Nzg2NjQ1NTc2ODc4Nzc2NjY2NjY3MzE0MzY2NjY3NjU2NzY1NTc5OTk7Nzc4
-Nzc2Nzc5ODc1ODg7ODY1Njc2NTg3MjU3Ojc3ODo3NzY2Nzc2MzU1NTc0NTc0NzY3
-OTo0NDQ1NTUzNjY4ODcxNDQ1MzQ5NTU1NjU0NTQ0NjY4NjM0NTUzNTcyNTg4NzU1
-NTQ1NTY1OTg3Njs1NDU3NTc4NTc3NTU2Nzo6NzQ2OTQ0MTM0NDY4NTY5Ozc1NTU1
-PTo4NzU1NjY5OzU4OjY1MzQ1OTc3Njg2NTUzNTYzNTQ4Nzk3ODYzMjQ4Njc9ODY4
-Njk1Nzc7PDY4ODs4Nzk7PTs3OTc3Nzc4ODk3Nzg6OTc3NTQzNjg2ODc2Njc3Nzc6
-Njc3Njg3Nzc2NTQ1Nzg4NjU5Ojk2NjU8Ojc6NjQ0Nzc2NTM4OTg5QDc6PDw6Nzc3
-OTo6Pjs5NzU4Nzg5Ozw4NjY5Ojg3NzY2Nzk4OjY2Oj48OTY4Njc4Njg9ODg4PDo2
-NzU4PDs4Ojo5Nzg4ODc3ODk7Ojo5ODk5ODg5NjY4OTg5ODg2OTg4ODg5Ozc1Ojg2
-NDQ1NTg2Nzg4ODs3Njc3NDQ2NzY0NjUzNTM0NDU2NjY2NjY1OTgzNTU4NTY4NTc3
-NTg4OTg4NTY5OTU5NTU2ODg4ODk7Ozo5NjY2NzY1Njs5ODc6NjY4Ozs4Nzk5Ojo2
-NzY5OTg3ODg7Ojc2OTg4ODg7Ozg9Nzg3OTo7OD08OTk7PDk6OkA/Pj47OTo7OzxA
-Pjo+Oj4+PDw8Oz09Ozk4ODw7PDxAPj5AQU1FPkBBTpfBzNTa3+Ll5ufp6kZGRDs4
-PEE7PUVOSTs9Ozs7NzpBQD08Ozo7Oj06ODk3Nzs3NzY6NzY5OTY0ODUzMzU0NTs1
-NDg3NzY0ODk5OTc5MjQzMi8yNDQzMzc2NTQ0LzI1NDU1OTk2MzU1ODc1Mzc2OTY1
-MzI3NjQyNjo1ODY2Njc1MjU1NDM2ODQ0ODY0OTk5NjUwNTU3ODcxNDYzNTg2Njg4
-ODg3NDY6OTc4ODg4NTY0Nz03NzY1ODo2Njk3NDg7Nzc4OTY2NjQzMzU0NDEyNTMw
-MzIyNjQ4NjYxMzIxMTExMTU1NjQ2NDc1MzE0NDQ1NTU0MzQ0MjU6NTY1Njg1NTQz
-NDM3NzY1MzMzMTExQT42MjM1NDM0MTMzNDM2ODUzNzQ1NDQ0NTQ2ODY1NjUxMTAy
-Mzc0NTUxNDQ0NTU3NTY3NTU3NjY2NjU1NTQ1OTQ2NTMzNzo3OTY2NDU3OTY2NjU2
-NzY1NTU4ODs2Njg3NzY0MzY1NzY5ODg5NDY3ODc3Nzc3NzY4OTc5OTg4ODk3MzI0
-Nzc1NzY5OTg2NTU0NDQ2NTU1MzU2NjU2NDU0NDQzNTU1Njc1OTU1MjM1NDM2MzU2
-Njc4NzU0Mzc2NjY1NDU2NjU0NDYxNDQ1ODU1NzQ0NjQ1NjMzMjM4NTQyNzg2NTE1
-NTQ2Nzg3ODc3NjM0MzI1NzQ1Njg2NTMxNTg6NjY4ODc0MzQ1NzY4ODc4Ozg1NzUz
-NjY0MzMzODY2Nzc2NDY1NTU0NTUxNDc1NzY4NzU1Ojc2NzQ5Ojg5NTY1Nzo5Ojg3
-NDY1Njk3Nzk5NjU1NjY2NzQ2OTg4NTU3OTg6OTg1NTgzNDY0Njc1NTY2OTc2Nzg2
-NDY0NjY3NTQ1OjM1NDQyNTU2ODc3NjU4NTM3NDMyNTE1NTc2NTc1NzQ0NTU1ODc3
-MzIxMTY5Nzc6NzU1MzIzMzU2NTM2ODAzNTYzNDc1MjIzNjo2NjY1ODg3NzQzNzc5
-Njg1Njg2Nzc2NjUzMzk1NDY1Njc2NjY1Nzg5ODU2ODY5NzU2Njk4NTQ2MjU4Ojk3
-OTo3OU43NTk5OTg5OTk1Njg5Ojs3ODg4Ojs5Ozo4ODk7OTk2ODg6OTo4ODc4ODg1
-NTc4Nzc2Ojg4Nzg1NjY2ODY3OTU2NzQ0NTY0Njc2NTc7Ozk3NTc4PDs6Ozc4Ozw9
-Ozo4Nzg4ODk3OTY2NzU5Nzc1ODg1Njg2Nzs6NDU4OzY4OTY4ODc3Nzc4NjU3ODU2
-ODo6OTc4PDo2OD05Ojk6PDk6ODU4Ojk5Ojg6OTs6NTU3OTk2Njg4Ojo3Ojs4NTU3
-Njk5NjY1NTQ3Nzg7PDY4Ojo3Nzk0Njk1NTc4ODc4Nzk8ODk2OT42NTIzNDc4NzU1
-NDQ2NDY3NjY4ODU2NDc5NTM2NjU2Nzc2NzY4NzY1OTg2NjY0NTc3ODg7ODc3Njg5
-ODg2ODc5ODY2ODk8Ojs3OTk8OTk6Njk4Nzc3Nzg3Nzo7ODo6Ozs5ODk6Oj8+OTo4
-Nzg8OTo5OTk9OTlAQjw8Oz87Ozg7Ozk5ODc4Ojw8PDw3OTk6Ojs8Ojw+Pz5AQT1A
-UUQ8PUVMl8LN1dvf4uXm6erqR0JFQEM5PkA8OjhJQj1APjw5OTw6PDs5Pz48PTs6
-Nzk4ODg3Ozk3Nzg4NjU2Njc1NTU2NjQ3NTQ2NjYzMzQ1NTUyMzIzNjQ1Mjc0OTY0
-MzUzNjUzNTY2NDQzNDM5OTU0NjxGNzgxMjU3NTQ2OjU4NTg3NTk5NTc0NTo5NjUz
-NTUzNzM0NTQ0OTYzNDU3Nzc4ODU2NzU0NTg2NTY4OUdANjQzNTg3ODk4ODQ7PTU0
-NDw5OTg2NzY1NjQ2NTM2NDU1NDI1MzIwMy4wNTU0MzMzMjEzMzc1NTU1MjQ3NjUz
-NDYzNTU0MzMzNTU0NjM1NDM1NzQ0NTU0Mzc2NjY0NTIyNDM5ODIzMzY0NzY1ODg1
-NTQ3NzMzNTMzMjE1NzY4NzM2NDcwMDIyMzY3NDUzNTc1MjI0MzU2Njg2NDk3Njc1
-NDY1NjU0MzQzNDY2NzY1NTc4NjU1ODg4NzU0NzU3NzY2NTY1NDUzMzM0Nzg4OTg6
-Ojg4ODg0ODc3NDg2NjQ2NjQyMTMxNDs5ODg5OTc4ODo2Njk2NjUyLzI2Ozc2NTU3
-NDQ2NTQxNDU1NDg2NjY0MDEzNjY3NzQ2MzU1Njg8NjY3NDU1NTY1MzIyMTU4NTg2
-NTc2NTU2MzM0NjMyNDUyNTY2MzkzMzQ2NjU2OTg3ODg1NDM3NTMxNTc4ODg2Njc2
-NTI1NTM2ODg3NTQ3NjUzNzo3NjY3NzY0MzQ0MzM0NjU5ODY2ODY1MjI3NTc2NDg4
-ODU0NDY2NTc3PDU4OTg6NjQ3NTQ1ODk6Nzk3Njc5ODc1NjY3Nzg3NDY3Ojk1NDU+
-Ojg3NjQ3ODo4NTc3ODY2ODY6Nzc3NzY4Nzg4NzY4Nzc1NTUyNDU1NTY1MzMxMzMy
-NTU2Nzc1NjM0NDI3OTs5Njc2NTc4NjQ0NTU2Njc3NT43NTQxNTMzMjM1Ojc1NjQ0
-NDY3NzQ2NDM1NDY3NDY0NTU1NDQ1OTg1NTg3NDU1NjUyMzU0NTY2NTc1NzY0NTQ0
-Nzg3NjY2OTQ1ODc2MzMzNDYzNTY3ODc4NTY1NDU3Nzc6Nzg7Ozs6ODg5Ojc1Nzc4
-Nzs5ODg3NjY7PDs6Ojk5Ozo4NjU3ODc4Nzk3NzY0NDk5ODY0MzY1ODU0NTU5ODY1
-NTU2NzY3ODk4NjQ1NjY3ODg3OTg4Nzc1OD44Njo8Ojk5OTY4NzU4PTg3Ojo5ODk5
-Oz43NzQ1NTc9Ojk1OTg3Mzc4Ojk7OTU4ODc2Ojk1NDk4Njg3Nzk3Njc3ODg4OTk5
-ODk5Ozg6OTg6OTc3NzY4ODc4ODc2OTo5Nzg6NzY1NTY4NzY3ODg4ODY5Ojk4OTg6
-OTg5NTQ3NjY2NTc0NjU2NDc2NTc3ODc4NTc2NDg8NzY1ODM0NzY1NDU2NzY3NTY1
-MzU2NjU2Njs5Njc4Nzc3NTU3ODY2NzY2NzY4OTo1NTg3OjY1NjY1ODg3NjY4Ojg3
-Nzk7Nzg5OTY4Ojo6Ozs7PDo6OTo3ODk5ODk4OTk2OTk8ODk3Nzk+Ozo7Ojs5Pzw4
-Nzk4OTxCOzg4ODk+QkE9Ojk8Ozs6QD1APTtHR0uWwM3U29/i5ebp6elHQ0RCREU8
-QEQ6OTg7PEBCQDs6OTo7Pjw8Ozo8Ozg2NjY2Njg5OTc1OjY2Nzg3Nzc1NjM4OTc3
-NDg6NzMzNTc1NzQ9ODM1NTU0MzYzNDQ1NTM2NjQ0NjExNTg1MzQ1Nzc1NjczMjMy
-Njg3NTU0NDk7OTY3OTk2Njg2NzM1MzU3NDY3NjQyMjU1NTg4NTQ2ODc0NzVFOjc1
-NjMzNjQ3OjQ1Njk4NTU3OTY2OTQ/ODUzNTg3NTQzNDc3NDY1MzYzNjYzMjUzMzM0
-NTIyNDU0NzIyMzY0MTQ0MjQ2NDU3MjI2ODk3NDU2MzMzMzY2NTQ3PDg1Njc1NTg0
-MTI0NTQ3MzM0NjIzMzY2NDw2MzQzMzQ4NDU0MjM5NTYxMjU0NTU1Nzc1NDUyNjY1
-OTY5Nzc3ODg0MjU2NjQyNDY5NTc1MzIzNjYzNDU0ODY0ODc0NTc2Njc1MzQ2Nzc5
-NjYzNDg3NDQ0MzA0NDQ1MzY3NTg0NTg2OTk3ODk2NzQ4ODg4NTQzMzQzMzU6Nzg5
-ODk2Ozc6Njc1ODY1NTk2MzY2ODs1NTMzNjY0NDQyMjEyNTY4NTMyLjAwMTc1NzU1
-NDMzNDk5NDc2NTU0NjExMzMyMjU2NzU2NDc0MjIzNTc2NTY1NTQyMzM2MzU3NzY1
-ODk5NzE0Njg2NTQ0NDQ1MjU3NjM0NDU3NTQyNDQ0NzQ1Mzc4NTUzMTQ3NDM2NjQ1
-MTQxMzU2NTY5Nzc4OjY4Nzc6OTw5NTQ2ODo4NTM0MzY3OTY3ODk5Ojc3NzY3OTg5
-NjY1Njc3Njg2Nzc2NzY2NDY5NzY1Mzc1Nzo4ODY2NzY2NzQxMzQ2NDQ1Njc1NDY3
-NDk4NzQ2NDU2NjcyMzQ1NjQ2NTg3NzY2NTQ4OTk1NTMzNjQ1NzY1NDY2MzUzMjU2
-NjU0NDU2ODczNTY1NDIxMjQ3NTY3NjY1Njg2NTo6NTk3NTIxMzU2NzY5NjU3NzU1
-ODY4NDM3NDU1NjY1NzU1Njc2NDI1ODk4NDY1NjY1NTc1NzY3NDg3Njg1NTY4Njk5
-Nzc1NTU0NjY0Njg4ODo5Ozk6OTk4OThBPDo3ODk4Nzg4Ojo4ODc3NTQ0Mjg5OTo6
-Ojk2Nzc4Nzo8ODY3Njg0NDE6ODg9Ojk3MzY1OTc5Njc4OTY5ODY3Nzk3Nzs4NTo9
-ODk5OEM6Ojk5Nzc3NzU4Ojw4PTc4ODo6OjY3OzU2OEk4NzY4ODo3Ojk6OTw4NzU2
-Njg3OTU2NjY4NTc4Ojg6NzY5ODk4Nzg3Nzg4OTs5OTY3Ojs4NzU2ODg4Njg7Ojo7
-Ojk2NjY1NDQ2Njc7NzU5Njo4Ojo3Nzg3NjM2ODg2NTY5ODg2NjIyMjc2NzU1NjY3
-OTc2NTk1NDU3NzYzNzY1NTY2NTc8NzY2NjU4ODY3NzQ2OTc2Nzk3OTY4ODg3OTY2
-Ojs4Nzg5NzQ0NzY1NDc4NTc4OTg6Ojk3OTw7ODg3Ojc4Ozo2NzhAOTg6Nzo4PDs+
-PTo5PD03OTk6NjY4PTs8OTk9Ojo8PDk6PDs/Ozo5Pj05NzxAQD89PDs5OTo7O0VE
-Q0tFUZ/AzNXb3uLl5+fp6T4+QEI9PDxAPTs4Nzg3N0I9Oj0/NzU3Ojo7OTk6Nzg2
-NTY3Ojc6Ozo2NTU2ODY3NjU3OTc2Nzc2MzY2MzM1Njc4MTM1NTg2NTUyNDM0NDU3
-NjUzMzI1NDUzMjM0MzM3NTU0NTUzNjQzNzYzMzc4OTk4Ojg2Nzc2NzY0ODQ0Nzg3
-NzY7PDg7OTc1MzYzMjQzNDM4NTk0NTczNTIzNDU1NTY1NTY2NjY1OTo3NDU1NDM2
-NTU4Ojg0MjE0NDI2NDMyMTIxNDExMDMzMzQzMTEyMzIzMTUxNDI0MjQ0NTQ3MzU1
-NTc3ODo3MzM0NjMzNTY4NTQ0MjM4Nzc1NDUyMjM5NjQ3NjQ0NDUzNTo0MzUzOTU1
-NTI2NDQ4NTY4NTg3Nzk2NTg1Nzo3NjEyNTY3NDQzNDMxMjEzNDY1MzU5ODc1NTU1
-NTYyNDI0NjM4NzU2NTY5NTM0NjU3Njk3NjU4NzU2NTUzNTY1NDU1Mzc3NDY2NTc3
-ODY5Nzc4NTY0MzM2NDY4NDU0NDQ1ODo4Njc1NjY2ODQ0MTAzNjs4NzY1NzU3NTY2
-NTQ0NDYzMTEzMzM1NDQzMjMyMzg3NDc2NzY0NTg3NzgzMzc1NDQzNTI2NjU1NzQ2
-NzY1MzQyNDc0NjM2NTI1MzY3NDUzNDY4NjQ0NDI0NTY2MzM0NjY1Njg1MzU4NjY2
-Nzc0NTQ0MzU1NjYzNDQ3NjUzNDY3NzQ1NjYzMzMyNTc4ODo4Njg4MzU2MzQ0NTY2
-NDU3Nzg3NTc3NTQ0Nzk6NDQ3NTY2ODc4OTg0NjU1OTg2ODg3NTU2NjU6ODg2NjUz
-NDY2NzY2NTQzNjIzNTU3ODs6ODY2MjQ2Nzc4ODg4OTY2NjYyMzg4Nzc1NTY2NzM2
-ODY3Nzs4NTMwMjU1NDQ1NzY0ODQzNTY3Njg3Ojk3NDY0MzU2NDQzNTMzNTMyNzg0
-NTk5Nzc5OTc2NDMzNjc2ODg0NDY0MTc6Nzc2NTI1NjU2ODk2Nzc0NDM2NTU3ODg2
-MzM2NjYyMjQ1NTc5NzU2MzQzNjQyNTY0NjY2NDQyNTc5OTU4Nzc0Njc5Ojo4Ojg1
-ODc6OTk3OTo6OTk5Ojc4NDg2ODo8OTo8OTg2NTc3Nzk3ODo4Njc0Njo7PTw5OTs5
-Njc4ODo5Mzc5NjY2OTY2Nzk3ODc3Nzg5NzY4ODk4Nzk5Nzk6Njg0NTZANjY3Nzo4
-Ojs3NTQ1ODk5OTg6ODU2OTc5OTc4NzU1MzI1ODk5Nzk5PD05Ojo4Ojc5Ozk8ODg6
-OTk3Nzs8OTk7ODU2Nzg7Ojk4Ojg5Ojo3ODY3ODg5NzU5ODg3Nzg3NzY1NjY3ODg5
-OTo4ODg2OTo5OTY0MjY5Nzg3OTc3NTc2Njc3Nzc6NzQ1NTs6ODg2ODc2OTY2Nzc1
-ODk3NjU2ODg4ODc2OTk4OTk5NzU4NTI0OTc2Nzc4ODg1Nzc2NTU7NjU4ODk6Ojo6
-OTo6Oz05ODw7Ojk4Nzc2NzU5OTo6PTk7Ojw7Ojw8Oz48QTs6Ojo2Njk6NjY6QD5B
-Pzw6OTs8Ozg4PTs8PD8/OT09Ozw+OkRXSEZUoL/M1Nrf4eXn6OnpQUQ+P0NCPTw5
-OTo3NTs6OTo4ODk7ODc3Nzo4Ozc7Ozs5OTw3NzU3PDg6OTU2Njk1Njs6NjY1NjU3
-NDMyMzM0Nz07Ojc4NTU2NjI2Njc6NzY2Njk2MzU5NjIyNTEvMTIzNjMxODU0NzY2
-NjQ0Njc3NzU0Njc3NzU1NjYzNzY0Njc1Nzc5OjY2Nzc2MjU3ODU0NTQ0NTYyMjQz
-NTUyNTU2NDU2MzU2NTg5OTc4NjE0ODM2Njk5Njg0MzM1MzEyMzAxNjY2NTg3NjQ0
-MzM0MjIzMjI2NTMxMDI1MjIyNDU4MzY1Njg4ODU1MjEyNTM0NjQ4NjQ1Nzk5ODc3
-Njc2NDQ1NjY0NDMzNjY+ODY1NDQzNDY0OTk1NDU4Ozo5NjYzNTQ3Nzc4PDc0MTIy
-NTI0MzMxMjM0NTQ2NjQxMzY0MjY0NjY2NTExNDY2ODk3NjY5OTc2MTQzNTU2NzY2
-NzY0MzMyMzQ0NDg5MzY4ODU0NjQ1ODY2NjY4NDQ0MzY0NjY1NDM1ODU2NTIxOTc4
-ODY1NzQ1ODU4ODs2NTU0NjQzMjU0NTU8OTc4NjY0MjQzMDQ0NzU0NTczNDQ0Njg3
-ODUzNDc3NTMzNDY1NTUxNEU2NDY3NzY2MzY3NjIxNDMyMzY0MzkzNTc1NjU1NDc3
-NzU2NjM1NDQ3OTg6OjY2NTIzNjc3Ozc3OTg3MzU0NzY0NTQ0Nzo3NzY1MjQzMzY2
-ODs1NTc2Njc3ODg4NzY5ODo4NTc1NDQ1NTY2MzY3NDU1Mzg4Njk5OTg4OTc1Nzc4
-OTg0NDU0NzMxOjc6PjMzMjQ1NjY0ODU1NzQ3NzM2NTUzNzc1NTg4NTU3Nzg1NDU2
-NTg5NjU3NTc5NTQyMzU1NzY0NDU0NDQ3Ozg4NjQ0NDY0NTI0NDMzMzk3ODU0NDY6
-Ojg2Nzc1MzIyNDQ0ODM0NDMzNjc4ODQ3NTY0Njk3NjU2QDczNjU2NjU2NTU2NDg1
-NDU3OTc4Nzc4OTY0NjY4NjQ2NDQ1NDU0MzQ0NTI3NjQzMjY4NTQ0NjU3OTc1NTM0
-NjY0MjM4ODg4Nzc1NTc2Njs7PT04ODo5Nzk5OTw7OTo6Ozw3NzQ0Nzc4OTg4ODg3
-NDIzNjc4NTU4Ojc4ODo6OTs5Ozg5Ojw2ODg4NzQ4OTk5NzQ0NTY2OD05OTg1NTk3
-Njg4Nzc3ODc7OTY2Nzg0NDo3OTU3ODg5Nzc3Ojo2Ozk3OTc1Njg4ODo7PDk4NjU3
-OTc5Ozw9OTs9Ojo5Nzc3Ozk3PD07Ojo6Pjw7OTc5Ozk3ODg1NjU5PTw5Nzk8Ojk4
-NjY4Nzg4NjU1Njc3OjY2NTU0Njc3ODg2NTU3Nzc0Nzg5ODc6OTc4OTY1ODU1NDQy
-NjY2NTY3NTc2NjY6NzY1ODo2OTk5PDg4NTU1NTU0NDUzNjc1Njc1MzY2ODo6OjY1
-NTc1NTc5OTk6Nzs7ODg6ODk4OTk5PDs8Nzo3Ozo6OTc5PDg5Ojs8PDo7PTc8PDk5
-ODg3Ojk5Oz07Ojk3Ojs5ODs6ODY8PT89Ozw8PD07PEA9PT1DQj48Oz4/Pj5APU1D
-XFKVvsvU2t/j5Obo6ehDQT09PUI9ODg3Nzg4ODs5OTY2Nzc5ODY4PDw6ODg6Ojo3
-Nzg4OTU1NjU2NTY1Nzo2NTc4NTg1NDU3Ojk4Ozs3ODo3Njg3MjMzNDQzMzU1NzY7
-Nzc5NjU2NjQ1NDQ1NTEyNTU0ODc2NDY1NjU2MzQ0MzU0MzYzNDc0MzY5OjY0NTYx
-NDk3ODc2OTM2MzQ1NTU1NDI1NDU1ODIwMzM0NDU0NDg5NzY1NzYzMzYzMjU3NTM0
-NTU2ODU1MzI1NTAvMTI0NDkzNDY2NDQyNTY1NTQyMzU1NzY1NDY1NDQzMDIyMTQ3
-NTY2NjMyMzExMjQ0NDQ1ODI1NjMzNTc3ODc3NjQ3NjIzMzU0Nzw4NTY1NjU0NDQ2
-ODU0NDY3Nzc4NjY0Njc2NDU2NDE2NDExMzM1NTE0NjY2NDU2Nzg5OTU0MzM1NDU1
-NDM0NjU2NzYzMTU2NTAwMTMyNDQ0NTc1NjM1MzMzNjYzNDk1ODo5NTY0Njg3NTQ0
-NzQ1NTYzMzU1NjY0NjU1OTs2NDg2ODY3NzY2Nzk5Nzc3ODY3NDYzMTY2NTE0NjQ1
-NDU0NzYzLzMzNDY2Nzc1NTUyNDY1NjU2NjUyNUM4NTM1NjQ1Mzk8OTYzNDU1NjM0
-NDczNTIxNTY0MzY0NzU0Nzo1NjY0Njk6NjUzMzY2NTU5Ojg4ODY1NTc2Njc0NjU2
-NTc2NTM1NzU4OTc0MjU2ODY0MzM1NjQ1MjM2NjY1Nzg3NDQ3ODY4NzY6NzU2ODY0
-NDc5ODQ3ODY0MzQ6ODg7Njk3NzQyMzQ1MjQ2NDQ2NzU1NzM1ODg0MzI0NDU0NTQ3
-NzU0ODs4ODc5Ojc2NjU2MjQ2MjU1NjU2Njk3Nzg5Nzc3NjY5Njc5NTs3NzY2NDU4
-OTY1NjY1NjU1NTc0MzQ2NTY1Nzo6OTo2NDU1OTM0NjUzNTY2Njc1MjI3MzI0NDU2
-Nzg4Njk2NDY5NzQ2NDU4OTczNDY2NDczNzY4Nzg5Njc3NjQ2Nzc3NDU2Nzg1NTQ0
-NjQ3OTg3NDg6ODU0NjU0NTU4Ojg1MjI0MzMzMzQ1Ojc1NTY3OTg4Pzw2NTg2ODk2
-Njc5Ojo3OjU0Njo5OjY3Pjg2Nzc2NDQ2NjU2OTg3ODk3OTo6Ozo3NTU0Nzg2NjY4
-Njo3Nzc1Nzc5NTY2ODU5Ojk5Ojg5OTk4ODg2ODg2Njg7ODo5NDY5ODY1Nzc3OTc3
-NTQ3Njc1ODc3NzU1NzY1NDg8OTQ4OTk5Njc1Njk7Ojg4Ozc4OTk5Ojk6Ojo6OTk6
-Ozg7Pjg2NTk7OTg3NTY5Ozg2NzY5Ojo7Ojk4OzY5NDM2OTU3MzMzNDMzNTc1NjYz
-NTU1Nzc1Nzc3NTk5NjU1NTQ2OTU4NzY2NzU1NzU3OjY3NzczNDU3OTg3Nzg9OTk3
-NjU0MzQ3NDY0NjU1OTk6ODg2Njc1NjY1NTk/Njc2ODk3Ozk4ODc5OTw5Ozg3Njk5
-NzY3ODo7OTk5OTc4Ozs6ODY6PDo8PDo7OTY5Nzc0OTk4Ojo4Nzo6Ojg6OTc6Ojw9
-PTk8PDk+QT4+Qj47Pj8+PD9FQj5AQT5SWqC+y9Ta3uLk5ufp6jo7Oj5CQD47ODc3
-ODk4Ojg6Nzg6ODg3NDM2ODk6NTU4NjQ1NTM0Mzc1NjU3NjU3OTg7Nzc4NzY0Mzoz
-MzQ3Nzs0Nzg1NjY0NjY0Nzk2Ojg0OTw2NDk3NzY1NDM0NDY0MDM0MzY4NDY1NjU0
-NzIyNjY1NDU2NDY1NjY4NDU2NjQ2NjU0NTU0MzQ2NDY2NTU3NzIxNDY1NzIxMzIz
-MzU0Njo6NzY1NTU2NjQ0Nzg7NzQyNDU3OTQ0NjY3MjM0MzM4NTY1NTU1NzQ2NDMy
-MjU3NDU3NjY2NTYzNDQ2NDEyMDUyLzQ0MzM0NDU2NTg0NTc3ODM2ODk1ODUxMzMz
-NDQ1MzIyNDU3MzEzNzY2MzU0NzU3NDE1NDU1MzIyNDU0MzIzNTUzNTMxMzc2NDQy
-MjI0NTY5NDM0Njc4OzU5NTQxNjY1NTU2NDI0NTY2Njc2NDIyNDMvMjU3MzM0Mzo2
-NTU3NTMzNjU1NjQ0NDMzNDM1NDQ1NjM0NTY1ODo3NjY1NDQ0NzIyNzY3Njg7Njc4
-Nzg2NDQ1ODo1NTY0NTUzNDg5MzMzNTI0NTIyMzQ1NTQzNTI3MjM1MzU0MjM3OTo2
-ODMxODU0ODM3MzU2NjU2NTE4NDY5Njw3ODU2NjM0Nzg5NTczNjo3Nzc3PDhASjo1
-MzI2NDI0NjU1NzU1NDU7NzY0NTg3NDQ1NzczMzc1NjYzOTczNDk4ODYzNjU2MzU3
-NTQ1NDg5NjU6ODM0NzM1Njc0NDY3NzU2NTQ0ODY3Ozo1ODg3NjQ4Njk4Ojo4NTY4
-Nzo5NDU0NzU3ODc5ODg0Nzg4NDEzNzc8Njc4ODc4OTo5OTk2NjY0NTQ0Njk0Njg3
-NzY1MzQ3NjU2ODY0Njc7NzU1NDU3NTY1NDU2NTU2NDQ0NTY1MzQ2NTY6OTg6ODk1
-NDU2NTY3ODY0MzY1MjU1NjI2NDY2NDU2NTU0MDQ0NTY2NTM3NjY2NTU1NTc1OTY3
-NjM0ODk1NzY1NTk2Njk4MjQ2NzU0MzY7OzU2ODc0Njk1OTk3Nzk4NjM0NDM1NDAy
-NjgzNDU2NDg8Njk7ODY5Nzo5OTs7Ojg5OTc4Nzg1NzU3Njc5ODk+NTc4NDMzNDg6
-NzY1NDU4NjY4OTo4ODk2NTc0NTY3NzU4Nzk4NDc4Pjk5NzY4ODk5Ojo7PT07Ojk4
-ODs6ODo3NTQ0NDU3ODg1NTY4NTc1Nzk5ODk4ODk4Ozk4OjY3Njg3NDc4PDo7PDc9
-OTo5PTs5ODo6Ojg5Ojs6Ozs6Nzc4ODs7OTg2Njg4OTY4ODc4OTo7Ojs2Nzo4ODc4
-Ojs4Ojs3NjU1NTQzNDQ0Njg5NTczMzIzNDc3NDU0MzY5NjY1NTU0NDU2NjY0ODg0
-NTg6Nzs4NTg2Njc3OTg4NzY4OTk2NTQ3NjQzNjQ3NTQ1ODg2ODo4ODc0Njc3Njc5
-OTk5NzY0NDg3Njc6ODo4ODk5Oj08Pjw+OTg5ODg5NTs6OTc4ODs4ODk7PDk8PDs6
-OjY4ODo5PDo8Ojc4Nzk6OTw4PDg6Ojk7OjpBQTk8Pjk7Ozw8QT48QUFFQT48P0JR
-nr/L09rf4uTm5+nqQkM+Pzo+PUFCPDs5OTc5Njg1Ozk4ODs2Njk2Nzc4NjU4NTU5
-Nzc3Nzk3NDU4NjQ0Nzg4NTY0NjU2NTQ3NzY3MzQ2NDIxNTY4NDM1NTg0NDg4NjUz
-NDw3Njc2ODIyMjI0NDQ3ODI2NzU2NjU0MTQzNDk5Njc1NTo2NDY4NzM0NDczMzY2
-NjMyNDU3ODU3ODY1MzEzNDE1Njk4NTQzNDU3Nzg5NDQ2NDEzNTMzNzc5NTg0NTE0
-NjY2Njc3NTQ0MzQyMjUzOjk3ODY3MzIwMDU0MjM1NTM0NDU3NzQ2Nzk6ODYwMzE0
-MjIyNDU3NjQxMjU2NjQ5Njc1NjUzMzQ1MzU0MjUzNDY3NjMyNTU1NTk3NTMyNjUz
-MzU1NTIxNjY2NjY3ODUzLzI1NTY3Nzg2NjU3ODc0NzY4Nzg4NzQ0NjU1NTgzNTM2
-Mzc3OTo8OTUyMTQ1NzY2NjU0NDQ1Nzg3NjU1Nzc6Nzc3NjUzMzUyNDU0NjQ3OTk1
-NTY2Nzc3MzY/PjQyNjMxNjk2Ojo5NjUzNjU4NjQyMzM0Njc2NjU1NTU0MjU4NzU1
-MzMzMTM0NTIxNTM0Njk1NDQyMzg8NjU0NDU4Nzo5NTc4MzIxMjU0Mjc1NDQzNTU1
-ODc0NjU1Nzk6NTU1NTU1NTo2Njg3Pjk2NzQzNDg3NTczNTYzMzYxODk2MzI1NTQ5
-OTU0NDU1NTY3ODk1Nzc1OTc1NDUzNDU1NzI1MzY1MzU1NjQ0NTM1NjU5NzM1ODc3
-NzU4ODY4OTo2NTM2ODY4NTU3NTk4ODU3OTY0NTk3NTU0NTU0NzY2NDc4ODY3NDI0
-NTU0NDU4NzQ1Njc2NDc3Mjg2NTk2NDM0NDQ3NTc2OTk4ODo2NDc6ODU0NDM3Njcz
-MjQ2NjY1NzYzMzUyNDQ2NjQ1NjMzNTU3NDY3NTY0NjYzMzM0NDU1MTIzMzY2NDc2
-NDYzMjU1NTY2NDY2Njc3NzY1NTs6NDg3NjQ0NzY2Nzc1Njc3OTw1MjQ0NTY4Qz82
-Njc5NzU2NTU1NjY3Ojk2MTQ0NDU1Njc4NTYzNjc1NEU4NDU2NTY2Njc5Nzc2OTg2
-ODk1NjY2NjY3Nzo3OTg2Nzg1MjQ1NzU2NjY1Njc2ODc0Nzs4PDY4NjY2NjY3Nzc4
-OTY0NDY3Njg2NjY5OTg5Nzk3OTw4ODo4Pjs3ODo1Njg5NzY1Njs4ODk6PDo5OTg6
-ODY3NzU6ODo5Nzg6Ozk4OTg4OTk2Njg3ODo8Ojo6Ojo4OTk5Ozo7Ozo5Njc4Ojo5
-Ojc3Nzg4Nzc1ODk7OTc3ODg5OTk3NTg5OTk1NjUzNjc1NTUxNTU1NzY4NzUzNTc0
-NTc3Nzk4Njc3ODk5OTY1NTU3NDc1NTc3NTQ4NjU3NzU2NjQ3NjQ2OTg6OTg2ODg1
-NTg4MzQ0NTY1NTU4Njk4ODc2Njc4Ozk5OTg4Njo6ODk4OTw6Ojg4Njc6Ojo6Ojo5
-OTg6Ojg7Nzs4O0A8OT07PDs6NTc7Pzw7NTY5OTo8PTs5ODg5OTk3Ojs6PDk8ODg8
-PD8/PTg5PD8+P0E9QUFAQUFCPj1CP02dwcvT2d7i5efn6OlAPDg4PD1APEJCPTk6
-ODg5Ojo4OTc5PDk5ODk5NjU3OTY4OTg3Nzk+Ojc1Njc4NDg5NDQyMzU0NDIzOjQ0
-NDU1NTk4MzI1MzI0NTQ2NTQ0ODYyNjczNDQ0NTIzNTUzMTAyNTUzOTg2NDU2MzM1
-NzE1Njg7NTI1Njc4ODc1Njc1NTc1NTM2NTs4Njc3NDI1NTQzNDAuNDg2Njg3OjY1
-Njc0MjIxMjU0NTg2MzEzMzM0NTYyNzIxNTM0NjUzMjM1NTc1MjQ0Mzg1NDQ4MjIz
-MTUzLzE2NjM0NDc0NDQ1MzU0ODczMjUzMjIyMjU1NDEzMzEyMzY1NDI0Ozc1NDU1
-NzYzMjMyNTQ0NjMzNjU1MzQ0NjQ1OjUxMzU3NDY1NTM0MzQ1NjMxMzE0NTY4Njc0
-NDQxMzU7Njg3NDY0MTE0MzI0Nzc2NDc0NDU2NjQ3NDc2NjU1MjEzMTM1NjY3NDg4
-Njc3Njc3NjU5OT03NjY2NTY1NjY5Nzc4NTY1NTY0MjY0MTY0Ojc1Ojk3NTg4NTMy
-Nzs7NjMwMjU1ODQ2NDM1NTM1NDQ0NjY0NDU2MjE2MzEzMjMzNDQ2Njk5OTU3NjY2
-NzU3Ojc3NjQzMTI0NjU0MzMvMTI0NDU2Njk4MzMzNDQ5Nzg5OTY0NTY2Njc4NjQ1
-NDQ3Nzg9NTY0NTYzNDg4NTQ2MzI5OTY1NTY2NjY2Nzc2NTY3NTY6OTM1NTQ0NTY2
-NTg0MzQzMzQzNjU2MzUzODg5NTY2NjU1NzU2NzU3NDQ2MjM2NTQ2ODc4Njc4OTo2
-NjU1Nzg1NjQ1NDU2NzY1MzU3NjY0MjA1NjQ2NjUxMzM0NTc2NTY6NTg3NjYzNzU1
-NjYzMzI1Nzg0NDU1Njc0NTo1NTU0Nzg3ODg3OTY1NzU1NDY1NzY3NjQ0NDMyMzIy
-MzU2MzI2NzY0ODY2NjU1NTQ1NDQ0MjQ1NTU0NDQzMjM3NTM3Nzc0MTQ2OTo5NTY0
-NDM1NTc4NTc4NzY3ODo0MzY2OTg4NzQ2ODUzNDQ3Ojg4OTQ3Nzc2NTQ4NjQ0M0A2
-Ojg2Mzg5Nzg2Njc2NTY3NzU5Ozw8Ojw8Njg3Nzg5NjM1Nzg2Njw5ODY1NTc2NDQ2
-ODc3NjU3ODg6Nzg6OTg5ODc1ODY3Njg3NjY4ODU3ODY2Nzg6Ozc2ODg3NTQ7UkA3
-ODg2OTY1Nzc3ODs6OTs4OT0+Ozk7Ozo7Ojs4Ojw3NTo6Nzg4Ojg2Nzk4NjU4OTg2
-ODk8Ojg5ODc6Ozs4ODY1ODk3Njc3Ojs5Ozo6NTY4Njc2NzY2NTc2Nzg3OTs8OTk6
-Nzc3Njc0Njc1NDU1NjU1NTc3ODU0NTc1NTc4PDo2MzQ1NDY1NDQ0NjYzNTYzNjg2
-Nzg2NjQ0NjU3NTQ0NTY2ODY2Nzc1ODg5NjY4MzQ5Nzc5ODc2Njc0NTU2ODo7OTg3
-ODc2Njc0ODo6Ojg3OTg1NTc3Njc4ODc5Ozk6Ozs4PDw7OTk6OTo6Ozw7OTw8OTk8
-ODk7Ojk9OTk6Ozk5OT02NTlAOzk5PDw6Ozo9Ozw+REFBQUI9QT87QUA6PDxATaDA
-ytPZ3eHk5+fp6kE+Njk+PkE8Ojw7PUE6PDo5ODg4Ojk8PD06PDs6NzU3Nzs7ODc2
-Njg6ODU4NjQ0Mzo5NjgzMjIxMTc2NDI2NTgyMjozMTM5NDU2NjU2NDY2OjU3Ojk3
-MzQ0Mzk7ODk1NDcyNDIwMzU4NjM4NTk3MzQ3NTg1NDIzNzUzNjQyNTY2NDU1NjU2
-NzY2NjQ2NDMzMzQzNzUzNDUzNTY5ODU0NDY5MzU0NDc0NDQ0NDIwMjI2Njc2MjQ0
-NTM1NzU0NDU0NTU1MjMzNTQ0MS4yMDE0MzM2MzM0NjY1NjIvMzQ0NjY0NzY0NDIz
-MzAxMjcyMjEyNDU1NDY1ODQ0NzMyNzQ0NzMxMzAxNDMzNTY0NDQ3NDMzNzk1NTY4
-Nzg5Ojo5NTI0NDI2NDEyNTU1NTU3NDY2MzQyMjM1NzUyMTU1MzM0MzQ2NTc3ODc1
-NDg1MzM2Njc3NzQ4OTU2MjU7OTU0MzU4NjgzMTc0NTk4Oj05NjY0MzU3NzY1NjY4
-NzU1NzMzMzo3OjI0OD07ODYyMzQxNTc2NTk2NDc1MzMxNDQ0NTU0MjMzMjY2Nzc0
-NDY3NjU1NDE1MzM5ODo3NTc3Nzc4NDY1Njk2OTgzMjIwMzA2NTY1OjExMDMyNzI1
-Njc3NDc3ODg7Nzc3NjY1NjU1NzU0NjY2OTg+Ojk1NDQ1NjY5ODU0NjY5NDUzNDY4
-MzY2NjY5ODY1NjY5NDU2NDIzMzU0NDc4NTY2NzczNTU5NDI1MjM2NjU0MzU1OTU1
-NDIzNjc3NjM0Nzc1NzY2NjY2ODs5NjQ0NTc1NTU3NzQ3NjY4NTY3Ozc1NTUyNjMx
-NDY1NzQzNTQzNzQ2NjU2ODg3NjgzNTU1NDc1NTY3NTQ1NDExNTUzNTg3NjQzNDc2
-NTM1NjY1NTQzNTQ1NTY2MzE0NjQyNDMyNDQzMjQ1NjM1NjU0ODY3NDQzNTU2NTU1
-OTU3NjU4NTY3NTY0Mzc5NTQ2Njg2ODk3NjQzNDk5OTc2NTQ2NzY3NzQ1NTQxNDg6
-NTc0OjY1Nzg3Nzk2NTQzNTQwMjM1NTk4OTg3Nj01Nzk4NTk6Ozc3OT45ODc4ODc1
-Nzc2NzQ1Njk2Nzc4NjU0OTg3Nzg2NTc2NzY4NTU5ODc5Ozo5OTk4ODc1ODk4Nzc3
-NjY4NzU1ODU2Njo5Nzc2NTY5NzU5Njg3Nzc3Njk6Ojw5PTk3Nzk5Ozo1Njc3Ojo3
-Ojg5ODo6Nzg4ODg6Nzg3ODg4OTo4ODk6Pzw5Ozg4OjY5Ozo2Njc3Njg3NDU0NjY7
-Nzc4ODc3NjU2NjY4ODg5OTs4NTk6Ojo4NzY3NjY3Nzk4NzY2Njg6NDU3Ojk7NTI2
-Njk4NjUzNTY2MjM1NDIzNzQzNTY0Mzc1NjY4ODU2ODQ1NjU0NjUzNTQ6NzY4ODk4
-OTc1NjY3ODg2OTk3NzY2ODY3Njg3ODY6ODg5Nzg3Ojo3ODg4Njg6ODo5OTc6PDo5
-ODg5OTk5Ozk2Ojk5Ojw7PD08Ojo6Ozs6OkA/QDw7PDo7ODk6PDs6Ojs5ODxAP0E9
-PkJCQEI/PT9BP0BBPz09QUU9PUFNm8DK09ne4uTm5+rqOTw7OzY0Ojs6PDs7P0A/
-Ojk5PEE5NzQ6QTw2Njc4NTY6Ozo8OTg0NTg3Njo1Njc3PTg1MzMzMzE2Mzc1NDQ0
-MzIzNTY4NTU1NTc3NjUxMjQ1NTc2NzQ0NDQ0Njc1NDY0Njk1Nzg3M0A/Mzc0MjI2
-Njg3MzQ1MzU0MjM1NzQ2NTQ0NTU1MzUzMjQ2NTU2MjExMzY4ODU1MzQ1ODQ2NTYz
-MjU5OTM0NTIxODMzMTY1NzY0ODY2MzI1NTQ0NTU2ODQzNTg5NDU0MzMwMjEyMjEz
-MzMzMjM0NjQyMzg1NDQzNjgzMjM1NDEyMzIzMjM1MjQ1NDMxNDMzNTM0NTY3NjY4
-NjQzMzMzNjY2NjQzNDMzMjM1ODc1MzQ1NDQ1NTY1NzQwNTMyODk4ODc0NDQ0NTQ0
-MjY1NTEwMjU3NTc2NTU1NTMzNDQ2OTU1ODk1Nzg2NTY4NTk3NzQ1Nzg1NjQyNTc5
-NTQzNjM0NTg3Njc3NTU1Njg3NDMyNjo3Njc5ODIyODk4ODY3OTQ1NzU2Njc4NDQy
-NDI0NDM0MzEwMzM2NjY1NDU1ODk3OTY6NjU1NTQ2Nzc3Nzg3OTY1MzMyOTc3NDQ3
-NzY3NTUzMzY3MzU2MzU1NTUxMTMyNDM1NDk4NzU0Njc3Nzk2NTYzMjU5Ozc1NTY0
-NjY3ODc0MzI2MzMzMzM0NTc4NDQ1NDc3NTo7NjQ0NDUzMzU3NTMyNTU0NDY3Njg1
-Njc4ODY1NDI1NTc5NzU0NTQzNDUyMzMzNDY1NDQ2Nzg2NjU2NTg4Ojc2NjUzNzM3
-Ojc1NjY3Nzc4OjU1NTY0NTY1NTQ1NjE0MzMyNDEyNDM1Nzk3NDY4Ozc4NjY2NDQ0
-NTMzOTY2NTY2NTQyMzY0MzMwMzU2Njk5NjU0NjQ2ODU3MjI3NTMzNjczNTM1Nzcz
-NDY0MjA1NDU0NTU0NTc2MzU3NDMyNDM0NjMzNDQ0NDQ0NTUzNjg0NTQ1Nzg3NjUy
-MzUzMzMyMjU0NTQ2ODY4OTU3NDUzNTc2MzU0NjU0ODo2NjY2NDU1NDQ1ODo9NTk5
-OTk4Ozg4OTk5ODk4ODY4OTk5NzY3Ojs7OjY2NjM4Ozc2Nzc0Njc3OjUzMzU2Nzg4
-Nzg5ODU2ODk3NTU1NzU3OTc1Nzg3OTY5Nzk5Nzc2Ojk5RDs5ODc2Nzc2ODU2NTY3
-Ojo5ODs7Ozo6Ozk5Ojk7OTk1ODg4ODk6Ojc4Ojk6ODg3Njg3ODc1Njc6OTg4Ojw6
-Ojo7Ozk1OTs3ODg4Nzo4NzY2Nzg3Ojg4ODc4OTc2NTU1Nzk5Ozk4ODg6Ojk4OTs6
-ODM1MzU3NDQ2OTo4Nzg6Ojc5OTg2OTY2NjY1NDU1NzY2NTQ1ODY0NTQzMzU0NTU1
-NTg3MzM0NTU3NzY3NjUzNzg4ODU1MzU2Nzc2Nzg4Nzc3NTU3Nzg4NzU1NDU4NjY1
-Njg5PTw5Njc5Ojk5OTg2Ozg6PDw7Ozg3ODg4OTg2Njg7Ozs/Ozs5Ojo6Ozw7Ozs8
-Ozw9OTo7OjtBPjo6OkA6Ojo5Oj1APTw8Ojg9P0M8PDo+QT48PUJESUQ8QUmTwMvT
-2d/h5Ofn6ek+PTw4NjU5OTw9QDs6PDo5OTg6Ojw7Ojw6Ozg5Ozo3Njo6ODk8Nzk9
-Nzc7NTY5OTs2NDUxMzU0ODMyMjEzNzk2NDY0NzY2MzI0Njk5NDQxMzY2NjM0MzYy
-MzczNzQzMzQxMzk1NzgzNDUyNDUzNjM0NTU4MjU3NTY2NDU0NjQ0NjU2NDU2NTM1
-NjQyNDU2NTQzNDI0MjIzNDM4NjY0NDU4Nzg0NDU0ODQ3Nzc3NjI0ODY0NjU1MzE0
-NDQ0Njc2NDY2NDQ0NTMyMTAwMDM0NDM0MTIyMTAyMDE1NzMxMDE0MjM0NTMyMzQy
-NTM0MzM1NDY2NDM0NTIyMjMzNDU4NjQ6OTU1NzU2Njg1NjY0NDUzMzQ1MzQ0NjU3
-Njk2NDU2MzM1OjY5Ojs1NTQzNTQ1NTY2ODU1MzIxNDQ1Njg6NzUyNDU5OTg3Njc7
-ODg4NDUyMTUzNTMzNDIzMjY4NjYyNTQ0OTk0Njc2OTY2ODg4ODU0NjQ0NDQ0ODo3
-NjY1NDY4Ojc1ODc4ODY3ODMzNTk3NTU1NTUxMzQxMTEzNzYzNDQyNDE1NDc2NTU2
-NTI0NjQ4MjU2NjU2MzM0NDQ1NTU1MjI0OjUyMzM0NDQ2NDYzOzQ4MzM0NDM1Nzc5
-Nzg3ODU1NDQ2Njg5OzY5NDU4MzIzMzQ0NTQ1ODY4NzI0MzIxLjExMDc3NDEzNDQ2
-NDUzNTIyMjEzNDQ0NzU4ODg0MzQ1Mzg3ODQ1NjY2MzQ3Nzc4OTcwMjMyNDY3NTY1
-NDc1NDQ2ODY3ODc4OjY4ODg3MTA0Ozo4Nzc2NEI4Njc2NTU0NTYyMjMzNTMyNjMy
-NDc5NjMwMTI0Mzg2NDM1NjU2NTU0NDMyMjQ1NzY2NjUyNTUxMjMzNDQzNTc0NDk4
-NzU0NDU2MzU1MzI2OTk1NTU1NTY2NTU1NTQyNTM0MjQyNTQ2OjQ4NTU4NDMyODc5
-MzMzNzU2Njc0NTU2ODo4Nzc2Njc3NzYzMzU1MzU0NTU1MzY3Njo2NjU3ODg1Nzk3
-NTg3NTQzOTY2NzU3NjY1NjY4Ojg0NjY5ODk/NTQ2NjU3Nzc2ODc2NkE6OTs3Njk3
-ODc3ODw3NzM2NTY4NzU0NTY5ODk3NDc5OTo4OTY3Njk3NzY3NDM2Njc5Njg2Ozc2
-NzY5Nzk4OTo4OTg3ODk4Njc2NDg4NTE1OTw4Nzg3NzU4NjU3NzY2ODk4ODk6OTs5
-Nzc3Njg6OTk5Nzg3OTc1ODc6Ojw5Pzw3Nzo1Nzg5OTg2Nzg7Nzg3OTs4ODg3ODc3
-Nzc5OTk3ODs6OTU3Ojg1Njc4ODg5NzY3ODc6ODc3Njg1Njg4Nzc2NDc2ODk2NjY1
-Njc0NDU2OzQ1NjY2NjMwMDMyMzQ0NDU3Nzc4NTc5ODc4NjU2NDY2NTU0Njc3NjY3
-NTU1NDc3Njk4Nzc3NTU1NTc2NDQ4Njg1Njc3OTc2Nzk5Nzg7Ozk2NTc2ODk7OTk7
-NTQ3Njc5Ozk+PDo6ODc3PDo7Ojg5PTc1ODo5ODs6PD05Ojk5Ojo5OT89PDw/P0NA
-PjxCQD1EPjs8PUA8PD5GREM+R4/Ay9Pa3uLk5ujp6j05PDs3NzxBQDs8Ozg3Ojs7
-Pzs8REA5ODg7OjY5ODQ3ODY4ODc1PTk2OTU0NDg2NTY5NTM1OTo4NjY1MzIzMzQz
-NDY4ODM3Ojc3Nzk3NTc1NDY3NDY4NTg1NzMyNDMzMzI0Njg3OTg3NDQwNTc0NjYz
-Nzs3NDUyMTIzMjIyNDY6NTc4NTg2ODM0NjY0MTExNDEwMjQyMzU1NDY4ODg3NTU2
-NTUyNjU2NjQ1Njg6ODMzMzI0NTU4NjU0Mzc3NTQ2NjQzNTYzNTQyNDUyMzc0MzQ1
-NTIwMjE0NTUxMTMwNTQ1MTQ4MzYyNTQyNTU0MDQyNDQ1NjY0MzQ1NDg0MzQ1NDM0
-NzYzMjIzMjMxNTU0NDU3NTU1NjY0MjU4NjQyNDU3NDM3NTg2MjQ1MjQ1NDg3NjUz
-MTU2MzIyNDYzNTY2Nzc1ODg7NjU3NTY4NTY1Nzg1NTc0NTQ2NDU2NjU2ODc0MzQ5
-NzU1NzMxMjY2NTQ1ODg3NTU0NDY1NTU0NTY3MzU3NTQ3NjU2NDI2Ojc2NjU2Njc3
-NTY2NjY1NjYwMDI1NTQ2ODI0NTY2MzMyNjY1Njc1NzY2NDU2Nzg2MzM1NTQ3Nzc1
-ODg5NzU4NzU1NTo5NDQ0NzU0Njc0Njc4OTc0NjM1NDM2Njc2MzQ0NDY1NDU1Njc1
-ODgyNTY2MTAzNjQ2MzY3MjI3ODM2Nzc0NTQ0NTU2NTU0NDQ2OTY2NTc2MzY2NjY3
-NjU1Nzc0MzUzMzI6Njc1NDI0NzY2NTY4OTYzNjY2MzM2ODg5ODk5NTU4ODU1NjY5
-NzczPzc6Njc4NDY3ODg5NTU2MjU0MjY2NjYyNDU2ODQyNDM0MzIzMjU0Nj83NDUz
-NTQ2NzU2MTY5NjY0MzQ0NjY0NjYyMzM1NTY0MzQ1NjU2NzY4NTM1NDQ0MjEyNTQ4
-OjQ2NTc3MjQzMzU0Njc4NDQyMjU1NTQ0Mzc2NzY0NTY0NTg4ODc2NjUzNjY3Nzk2
-ODYyMzAwMzM0NTU5NzY2NjQ4NTc0NTY1NDQ1NzQ0NTQ0NDM2Mzc2PTg1OD86ODg3
-NkY9NjQ3Njc3OTg9Ojk4ODs8Ozw9NzU2Njg7Ojk1NjU6OTg3Njc1NjY2OjY0Mzg0
-MzQ2NDk1OTQ0NDU1NjU3OTk4ODk6Ojs7Njk5NzY3Njg5Ojk2Nzg1Nzg6Ojs6NTg5
-PDs3Pzs3Nzc0NDU4Nzg5NzY2ODk6OTo8Nzc3NjY5OTg6Ojw5ODk3Nzs5ODg4PTg6
-OTtAOzg4NjY6Nzk4ODk7PTg3NzY5NzU2NjU4OTw8OTg7ODxDOTk2Njg4NzY4NzQ1
-ODY2NDY3Njg3NjY2NzUzMzQzNjY4ODUzNTQ3NjM0NjY1ODo1MjY2NTg0NzUzNDUz
-Nzc8NkI4NjY3NjI2Nzk2Njg2Mzc5ODQ0NTQ3NTY2NTg3NTY2NDU3Njk3Ojs9Ozg5
-OTg4NjM0NTg8OTk4NjM5Njc3ODw6ODw4NzU8PTw6P0A9Ozg3NTg6Ojg5OT05OTg1
-Njg7Ojk5Ojw6Pjs7PTw9PD46Ozc7O0A/Pzw8PkJBOz48PDs/PEBDRktMjMHM1Nne
-4uTm6OjoQD0/Pj48PEA+ODs8ODo2Nzc7Nzo7PDs8PDk5OzY2OT07ODk5ODY1NzQ2
-MzU2MzU2NTkxMzU2ODg5NjU0NDMzMjMyNDMxMzQzNDU2MzQzNTU1MjU1NjczNDYx
-MTMxMTU1NzY1NzY1NTg1Nzc3ODs2NDQ2OTU1MjU0NTU1NTY1ODk4Nzg2NzUxMzUz
-MzEzNDk0NzQ0NDQ1NTM1NTQ1NjU0ODYzNzIzNDY2MzI0NTU1Njc1NjM0NTM0Nzgz
-MzI0NTM2NTM0MzI1NTMzMzU2MzM1NjU1NDYyMzAzMzM1NDM0MjIzMTEyNTI0NjE0
-NjQyNTMwMjEzMS8vNTU1NTQ1MzIzMTc1ODk5NDU2MzI0Njg3NDY0NTU1NzUzMzM0
-NDQyMzIyNDQ2NTU4NTU0ODY2NTc1MjIzMjQ5NzM5MzM6ODU3NjY2NDU2NDQ1ODU0
-MjU2Njc3NTk3ODY3NjQ1NDY2NTU2Njc2NjY5Nzc1ODY0Njo5Oj44Nzg6OTc2Ozg1
-NjU0NDQ2NDM0NDU2NjQ3NjU2NjQ3NzY1NTQ1NTMxNTM1NDE0NjUzODk4NjQxNTU5
-NTM0NjQyNTQ0NTY6Nzc2ODQ6NzQ2NTk7Ojc5OTQ1NTc2OTc4ODg5OTY0NjU1OTg3
-NDY2Njo0NTQ0NDI0NTc2Nzc3NTg4NTU1NTU1NzQ2NDY4MDE0NTIzNTQ1OTg3Njc2
-NjMzMzc3NjY3Njg6NzY2NjM2MzU0MzM2NTc4Ojc1MzAzNDE0Njg4Ojg4Nzc3NDY4
-NTU2ODc5Ojk3ODY2OTk9NTc3NDU0NzU4NjQzNDU1NTU0NDE0NjYzMjMzODY2OTU3
-NTg4PTc4NTk2MjM0NDUyMzM0MzMxMTc2ODQ2ODU1NTY2MzI0NTY0NDIzMjMzMTQ1
-NDU6ODg0ODczNjQzNTc0NTs4MjI0NTY3OTk1NjQyNTY5NzYyMC8zNDQxMjQ2NzMz
-MjczNTU2OTg3NTY1NjMyNTQ2NjMyMzIyNTU1NDU1NzUxNTQ2Nzg0NzQ1NTM1Nzc6
-OTo2NjcyNjM0NDQ1MzpGOTw8Ojs6NTZDPEI6Oj04OTg2NTc4Nzo2ODc5ODk5NjY3
-NTY6NzM1NzY4NzU2NzY1NDU0Nzg5Njc3Njc4NTU2OjQ2Mzc2Ozs4OTo4Nzc5Nzg7
-Ojg2NjY3Njg5OTg5ODo6Ojc5OTg3Njo3Njc/Ozo2OjQ1ODc5PT05OTk5OTg4Ozo4
-ODg4Ojc2Njk5OTs6ODU3NzY3Nzo6Nzo6PTw7Njk6Nzg3ODc5OTg6Ozg1Njg4NzQ2
-OTo6ODg8Ozk5ODw5Ojg3NjU2Nzk5OTY4Ojs7Ojg2NjY1ODM0NzYwNDY6OjY2Njc1
-NDY1NjU1ODc3NDU4ODg7Pzc3NDM1Mzc1Nzg0MzY1NTU1Nzk8NzQ4OTY2NTY2ODc0
-Njg4ODQzMzU1NTc3Njg5PDg4ODo5ODg3ODc3OTg3OTk6OTY5OzY6Ojc4ODg8PTk5
-NzY4Ojg5ODo5Ojk1Nzk5OTk9Ozo5Ojg6NjY1Njo4OD09Pjs6OTo6Ozs7OUA/PDw7
-ODs8Ozw7Oz09REI7PDxAREiRv8zU2t/i5Obo6epCPDxDRkdFQzo3OjU5Pjg4Oj46
-PT03Njg3Nzg3NDo1NTQ3Njc5Njk5Ojk3NzQvNTc1Ozo4ODg7Nzc1NDc6PTU4NTYz
-NDQ0NDY0NDQ+Nzg2NDMzMjAyMTIzMzQyLTE1Nj44ODU4ODk2NTY4NTU0NDM0NDI0
-MzIyNTQ1NjY3NzMxMTM3NjY1NTUyMjYzMzMyMzU2NTM1NzY1MjI1MjUzMzUzNDY2
-Mzg1NDY0NDQ3NTY1NTMzNjUyNTU2NjU1NzU0NTU1NDU1NzY1MTA2MjUzMzEzMzMy
-MTM2NTYzMzU4NTM1MzI1NjQ1NDM1NjIzNzY0MzU1NjUxMTMzMzY4NDQ1NzQ1OjQz
-NzY2NDY2NjQzMzEyNDQ0MTI0Njc1NjU1NjY1NTc2NDY1NDM3NzMyMTExNDQ3NTU0
-Nzc2NDQ1MjU+ODY1NTY1NTMzMjEyNTM0MjQzNjg4NjUzNzU1NTg5ODc4Ozk1NTYz
-MzMxNDc0Njc5NTY1NjU7Ojo3NjY2OTs5NjUxMTMzNDY3NDY2NjY2NzY0Mzs4NjY2
-NjIyMjQzNDM1NjY0NjU0QTYzMTIzNDk0MzU1ODg4MzQ3ODc1NzY2NjY3NjU2ODg1
-NzM0NTcxMTM4NTg6NzY1Njc1NDQ1MzY1NzY2NzY0MzU2NDc3Njc2ODo3NDQ0NDY3
-NTY0Mzc5Nzc4NTMyMzUyNTQyMzY3NTMzNDc3ODY2NDg3NDU6ODY0MjQ0MjMyMzU0
-NTU3NjU2NDQ1MjMzNTY1MjQ2ODQ2NDE0Ozc6ODk4OTk3Nzk2OTk2ODg2Mzk0NTI0
-NTI0MjEzMTU4OTM3NjQ0NTY1Ozk0NjM6NjQzNDQ1NDE1NDQzNjY3NjU0NTQ1NTQ0
-NTU2ODQzNDU0MjQ0MzIzNTU0MzIxMzQzNzc2OjY1Nzg3Njs1ODU3NzY1NDc0Mjg3
-NDY2ODU0NjU3OjY0NTM2NTUxMjQ2NTUyNDEyNTY3ODc2NDY2MzU2ODU4NzU2NzAz
-NjY3NDI0MTQ0MzU0NDQ1NjU2NzU3ODg2NTQ4NzQ2MzQ3NDI1Ozo2NTQ2NzU1OD05
-Ojk3Nzk6OTc9Ozo6NjY4Nzc3OTc3Nzk9ODk1Nzk5NjY1NDY1NTU5NDM3NzY4NjY2
-NzY1NTQ4NjY2Njg3NzY2Nzo5Njg5ODs3NTY2ODc6ODk3ODo7Oz8/Ojg3Pjg8Qjg6
-ODM5Ojs5ODU6Ozc6OTo7OTY6ODk3ODg3OTg4OjU7QTk4PTg2Njc3Nzg4Ojo3Nzk5
-ODg4Nzc7Ojk4ODU4Nzs8ODo7NjU2MzU5OTw7PDg3ODk5Ozo6OTQ3NzY1Njk4OTk2
-NDY1Nzk2NDQzNTY0Njk2Njg6OTc3NzY1ODo2NTM2NjU3Nzg3OTc2MzQ0ODY0NTg4
-NDU5NTU1NjU4NjU4ODc0NDUzNjc4ODc3NjY1Mzg4NjU3NjQ3ODg6Ojs5NzY3Njc3
-Pjo8Ozw4NjY4Nzg8Pjs4OTo6Ozo5PDs5NDk3OTg5ODc6PD46ODY0Njg5Ojg5Ojo3
-Nzg6OTc6ODs9PDk4OTc6PDg7PTs6Ojk9Pzw9OkA9QUQ9Pj5AQUJGTJa+ytPa4OPl
-5ufp6T9DQ0g/PT4/PDY5Nzc2NjU5PTo2OTY2Njc0NTQ1NjU1MzU1NDY3MzY4ODU1
-MzU3NjM0NDQ0NTg4NTc2ODk9Ozo5NzY1NjU0NjY0NTM3NzQ1NTMyMjI1MzE1Mzc7
-NzQ3OTg3MzY1NTU0NDc0QTo2NzMxMjExNTk0NTU1OTo2NDQ0Nzc0MjM2NTc1NDIz
-NjYyNTc6NzUzMjQ5MjE0NDQ0NTIyNDIzMzIzNDU3NTQ1NTQ2MTQ0NzQ2MzM0NjU2
-NjY0MzU4Njg4NTczMjQ1MzM0NDU2MjAzMTQzMTIyMjExMzQ1NjQ3MzUzMzIyMTI1
-NTU1ODU1MjMxNjIyNDMzNzc2NTYyNjY0NDQ4NTY1NTM0NjMxNjc2MTA1NDU1NzU0
-NTI2NTQ5MzQ0NzY3ODIzMzIzNzc3NTg1NDc1NDIwMDM1MzQ2NTU0NzY3NjMyNTU2
-Njg2PDk2NzQ0NTc1NTMyMzUzNTY3NTQwMjUzMzQ0NjQ0NDQ1ODg2NjQ1ODQyNTg4
-NDI0MzU0NjQ3NzM0Mjg4Njg2NDQ4NDQzNDE0OjIzMTQ0MzQyNTY2NDYxMjY3NTM0
-MjI0NDc3NDM2Nzk3Njc1OTc3Nzg2Njc2MzIzMTUyMjM0OjozNDU1NzU0NDQ1MzE3
-NjY2MzY0NDQ3NjY1NTY2ODc1Njo0NzY4NjY1NTIyNDU3OjY2Njg0NzU1MTU2NDU1
-OTw6OTc1NzU3NDQ3Ojc4NzY1MjQyNDQzMzU5OTY3NjQ3MzQ0NTQ2NTY1NDY2ODk3
-Njc5NTU3NTc3Nzc4Njc1NTY1NDU3NzU1NTEzNjQzMzU1NDY1Njc0NDU2NTQ1NTUz
-MzUzNTIzNDUzNjUyMzIxMzQzMjQzNzY0NTU1NDU0NjY2Nzc0NDMzNTQzNDI0MjM0
-NjY4OTc2NDUzNTQ1NTQ0Ozg6PDg2Njc1NjY7ODg4Njg0OTc3ODY2NzYyMzIxMzYy
-MjM1NTUzNTc2NzY2NTM1NTc7Ojc3ODY3NDc1ODQ2NTU0MzQ2NzY0NTI0Njg1MjAz
-NDg1NTYzNDQ0Nzc3ODk4NTUzNDY3OTk4Ozs3Nzc2NDU5ODc6NzU3NzY5NTM0NTg6
-Ojk3NjY2Ojc2ODY1NDY2ODc4NjY1NDc4Njc1NzU1Nzs4NzQzNDU3OTc2ODY4ODg3
-Njk5ODc4OTY5OTo5Q0Y5Ojg4ODY7ODo1NzY3ODo6Nzg3Nzo4Njc4Ojw8Ojs7Ojg1
-Nzc3Ozo4NjY4Nzk7NjU3ODo6Nzo3ODg2ODg3Ojg4Njc4ODtBPTc6OTk3Nzk8ODg8
-Ozo2Nzc7Njc7OTg4Nzo5NzY1Ozc5ODQ0NTY1Ojk3NDE2NDk1NzU4Njc3MzIxNTY2
-NTU0NTM2ODY1NTY2NzY3Nzg3ODc2ODU5Nzg4NjY4ODk3NjU0NDYzNTU2ODo1NzY1
-Mzk5NTk5NzY1OTg5Njg4NTU1OTk4Njg5Nzc5OTY5Ozc6ODg6OTk3ODc5Nzc9Ozo2
-Ojo5Njg2Mzc8PTs5Ojs3Nzk6OTY3Ojk6Ojk6PDs9Ozk5PDs4NzY6QD84Ojk6QEI8
-PDw7PkFEQERAQkBEREVQmLzJ09ve4eXn5+npP0NCPD1CQTw4Njg5ODo5ODQ5Oz03
-Ojk6PDk2NTc3NTY0NTQ0NzU2OjU2ODQ1Njk6NzI0MTM2Mzc3Nzk3NDc1NDEyNTc2
-NTY2NTQ1NDI0Nzg3NTQyMjU0MTIzNjg2NzY3OTQ0NzMzMTQ1NzQ4NzY4NTY0MjU2
-NzY0NTg1ODo4NDo3NzQzMjQ3NTU1OjM3NDQ3OTc2NDQzMjQ4NDc1NTMzMTMyMzU2
-NDEyNDU0NTY2Nzg5NzU3Nzc1NDg5ODY2NjUyMjM0NDcyMjc3MzMyMTU0NDU1OTUy
-MDMzNTU0MjAxMjM0NDEzNjUzNDEvNDQ0NTc0MzY7Ojg3OTQyNDM1NTU0MzY0NjQ0
-NDU3NDY1OTQ2OTc2ODg0NTMzNDQ1ODY0NjY1NTY3NjY1NTQ1NDQ0MjExMjI3Njc2
-NTY1MzQyMjM3NjY1ODY6OTY0NTUzNDYzNTI2OTc1NDU2NzU3NDc2MzI1NDc1MjM2
-NjM1NTM0NjY2MTQ0NTY3NzQ9Nzc1NzcyNjI0NTU1ODo4NzU3NjU5OTg2NDUyMjQ0
-NTU3NDY1NDQyNDU0MjQxMzg4ODg4NzMyMzI2NTU0NjM1OTg1Nzg2NTM3NjY5OTc4
-OjQ1NDIzMzM4ODc2NDI0NTU1NjU2NzQ1NTY5NzU2NDYzMzQzMTQxNDQ3NzI1NjQ2
-ODc1MzMvMTU2NzQ6OTg3NzYzNjg3Njc1NzY4ODc3NzY3NTY0NzU4OTQyMzU1NTUy
-MzQ0Nzc2NjU1NTY1NTYzNDk4NjY4NTQ2ODc0NDQ2ODc5ODc1NjQ0NDU2NTY1NDQx
-MTM0NDQ2NTM0NzY2NTc4NTU1Nzc1NTU1NDgyMzU2NDU0NTUzMjQ1NTQ0NjYzMzI1
-NTIyNTQ1ODw2NDY3NzU2Nzc1NzY2NTI0NTY4NjU3NTg1MzQ0Ojg3Nzc0NTo3NjQz
-NDc0ODc2MzM1NzcxMjg2NjY4ODY1Njc2Nzk2NTY1NzY0MjUzNDQ0ODY1OjY2NzY0
-NjMzNDE1ODc1NzYzNDYzNTQ2NjQzNTM2NTM0NTQ2NDc7NjU3OTk3Ojs1OTk2ODY4
-Nzs4Ojk8PDo6PDg1MzU3NzY4ODc2Nzk6NzY3NDU3ODk2OTs4NDQ3ODY1Njc5Nzg7
-Ojg2ODQ7OTg4ODUyNTY2Nzg2OTk2ODg2NzY3Nzc4Ojo5OTc3QDo3OTo5NzQ2NzU2
-NzQ2Njo5Nzk3Nzw6NTk5Ozo6OTw6OTg3ODs5Ozk6NjQ2OTc2Nzo3ODY4OTk5Ojk5
-ODk4NTY3NTY4ODo4ODo4ODg7Pj89PDg2NzU3NjY3NTY6Nzc4NzY5OTk8NzUzNjg3
-Nzc0Nzc3NzM0NTc1NDU2NzY1NDU5OTo2NjU1NjY2MzU2NTY0MzY1Njk2ODc3NjY3
-MzEyNDc4NTw3NjU0NTY0NTU1MzE2NTY4NDQ3NTc5OTk5NzY3NjY4NDY7OjY4NzYz
-ODg5OTo3ODo4Nzg2ODc0NjY5Nzs6PDk6Ojg5Ojw8OTY5NTY7NzY3Njo9PDk4OTc4
-ODk7Ozg5ODg8Ojo7Nzo7Oj83Nzg6Oj48PkA9RUFAQkBAQUM/PkqXt8bU2d/i5efn
-6eo+QT09PD1APTo3Ozk7OTg6ODg5ODg5Ojk4OTo7PDs4NDM1NDY4OzYyNzY2Nzg1
-NzUyOzc1NTc3ODc6NzQ0NDU0NzMzNDU0NjQzNDIyNTMzNTU2MTA0NTM1NTQyNzU4
-MzI0NDgzNTMxMjM5MjQxNzQ1Njc4NDU0Mzc0NDc3NTU1NjhDNzUzNDk2MzQ0NTQ2
-NjQ2NDY1NTY1NDMzNDY2NTMyMzY1NDEwMjQ0NTEyNTc1Nzg2OTc5NjUzNTY3ODk2
-NjQyMzAzMjMyNDUyNDEyNDExNTM0NjYyMzU1NDQ0MzEyMjQzNDEzNjo1NjQzNDMz
-MzMyMzU3NTc6NjU0MjMzMzYxNDMzMzM3MzY4NjU2NTY3ODI1Nzc2NTY0NTc4NTgz
-NTYzMjQ1NTU2NjI2MjIzMzUxMzQ0NzgyNjg3NDMzMzQ3Njc1Njc3NjQ2NzU1NDY0
-OjY2NzY5NzQ0NDQ1NzU0NTU0NzUzMzIyMTI0MzI0MzQ0MzY2NjY3OTQ0NDQ2ODc2
-NTU3NDY5Nzk2NTQ1MjQ0ODU2NzQ2NDU8NjQzMzU0MjQ0MzUzMjU2NTQ2NTUzOjk2
-MzM4NDQ1NTk6PDg4ODQ1Nzc4OTc5Nzc5NTMzNTU3OTc3NjU3NjMyMzMyNDY1NzQ3
-NTk3NDg4ODg0Ojg4NjQ0NTY1NTQ2OTw4OTQzNDc0NTQ3NzUyNTU1NTo4Nzg4OTw5
-Njc3Nzc4OTU1NTc2NjM2NzUzNDY2MzMzNjM2NjQzNDMzNTg3NTg2NDY3ODY3ODQ1
-NjU0NTQ1NjY5NzU3NzM2NDQ1NDQ0NTUzMzM1NjUxMjYzNTM1NTY2ODY0Njs4OTY1
-NDIzNjc1MzU1NDU2NjYzMzQ0NDU4NTIyMTI0MzM0NTYxNDU0NTU0NDQ2NTU2NDQ6
-OTc4OTc0NDc1NTQ1NjQyMzQ1NzU1NDQ0Nzg8NjQ2NjQ5NzM0Njo2NjU5NTU1NDU1
-MzQ2MTExMjI1NjIzMjU1Nzc3Njc2NTQ1NjI1Njc1NTY4Ojo0NDM1NjU1ODY1Njc0
-NTQ0MzU2NjQ1NTg3ODk6PDdDOjQ2NTY1NjY4OTs5OTk4Nzk7Nzk5Nz04ODc1Njg4
-OTg5NzY1Nzg5NzU1NzczMzc4Ozk1ODc4ODU5OTc5OTs8ODc3Ojo3NDQ0NDY3NzY1
-MzU2Nzg7Nzc0NzY2NDY5NjY3Ojo5NzY5Nzo3Ojk6Ozg4ODg6OTo6Njc4PDk4Njc4
-ODk4OTg8Nzk3NDg5OTg4OTg4OTo9Ojc2NTY2NTY4NjY1NDg3ODc4OTg6PTw3ODk7
-Ozo6NTc2Nzc1ODc2NjY1Nzc6Ojo1Nzc2Mzc1NDU2NjUxNDg2NTQ1NjQ0NDU3NTg4
-NDg3NjY1NTQ0MzI2Njs3NTg3ODk3ODc3Nzk3Njc2OTc4NTQ3Nzc4NjQ2OTQ3Nzc5
-ODg4NTc4OTk5ODY1NzQ3OTk2Nzk4NTg5OzY3OTg4NTY5NzU5OjY1NTg7OTY2PTk6
-Ozg2Ozo2NDY4OTs6ODg3NTU3OTw+PTw4OTs5ODk7PTw7ODk6NjY5OTs5OTs4OD09
-PD0+Pj1BQkRBPkFAR46+zNTb3+Pk5+jp6jxDOzo9PD5CPjw8Ojk4Ojk6PDs3ODk7
-PDlFXEI8Ojg3NzczNDY3NTc1ODY1NjY0NTY2OTc5NTUyNjY2ODs0MjMzNDQ0MTIz
-MjYzMzg2NDY1Nzg0NDY4NDY5NzQ3NTc3MjQzNzU3NTIzNTM0MTEzNTgzMDU5NDIx
-NDM2NjUzNTUzNDQxODY0NzQzNDc1MzQ3NTg1NDUzNTU1Nzg2MzEzNDMzNjc2NjU0
-NDQ2NDA0MTU2NTQ1MjEzNTY2NDU2NjQ1NTE0NTE4NTQ2NTY1MjMyMjIzNTAwNTc0
-NjQyMzQyMDIyMjQ0ODY5ODgzNTYzMzQ0NTQzNTc2NTYzMjE1NTI2MjUyMTIwNDY0
-MjM1NzQ0MzQ2NzYzNTYyQDU0NDgzNDY2NDY1NDU1Njg1NDU1NDQ1NjY4NTg3Ojo2
-NDQzNDIzNDMyNDY1MjM2MzY1Nzc1NzY2NzY3NTQ3MTM1MzE0Nzk2NTM0NDU2NzMy
-MjYzMjIzMzQzMzs0NDY1NTc2NDY2NTg4ODQ1NzQ1NDM2ODg4MTY2ODM1NDo3Njc1
-NTU2MjIyMzM1Njo0NDU0NDc1ODo3ODg2MzMzNDc1Njg3NTU4NjQzNTc2MzU1NTUz
-MzM0NDQ1OTtAOjQ2Njc1NTU4NjQ0NTQ0NDQ2MTY3NjU2NTg2NDU5OjU4NDU2Njc3
-NjY2ODY0NDY2NDUwNjg1MzE1NTo5OTY0NTU2NjU0NzY2Nzk4ODU2OTc1NjUzNDI6
-Nzo1MzM3ODY1ODg4ODY4NTQ1ODczNTY0NjM0OTU0NTU2NDQ0NTc2NjU1NjQ0NDE1
-NTQzMzU1MzQ1ODU0MzIxNDQ1NDY3ODY2MjM2NTY0NDQ4Nzc3NTMzNTU0NjY2NTYz
-ODQ0NTI1NjU0MjQ0NDI0MjI1Njc1ODU6OTg2ODY4NTU4NDMzNDY0Mjg2Njc3NTg6
-NjU5Nzk6NTMzNDQ0NjY3NDc4NTc3NDU0NTM0NTM0NTU0NTQ2NDI0MzU1NDQ1NDY1
-MzI1NTQzNDQ0OTU2MzQ1NjIzNTI1OTY1NDQ2MzY1ODo6NTY3Nzc3NjU1ODc6NjU2
-NDc5ODY6OTg4ODc3Njg4Ozk2NDU1NjY3ODk4NzY7NzU2OTU4Ojc2Nzc6Ozs3OTk4
-PTk4Njc4Ojc3OjY4OTo3NzY4NzY2NTY3NDQ1NTg3Njo5NzY4NjU2NDc7ODg7OTo5
-Njg5Ozc1Nzk7Ozc2NzY4Nzg2Nzg7PDo4ODk4Ozo7ODk6Ozg7Ojs4Nzk5QVA5ODY2
-ODk2Ozg2OTg4ODc4Ojo5Ozc7Ozs7Ozs5OTc8Ojo4ODg6OTc3ODY2NTUxMjc4NjU3
-Nzg5Nzg1NTU3NTczNjk4PDk4ODY1NDU3Njc2NjY1NDIyMjE1NTY3OTg1NzY1Njg7
-NzY0NjU2OTk4NzU2ODc1ODc4NTM2ODc2NTQ2Njk3NzU0Njg2NzY2Nzk3ODg7Ozg2
-NjY2Ojk7OzU3Nzk3OTg3Nzg3ODw4Nzg4OTU2NzU4NDU3OTo8ODk7Ojs7Ojk3ODs2
-OTs7PT88Ojo9Ojw8ODg/PTk6Nzo6OztDQkJCQUBAR0BEQkFMmb/L09nf4uXn5+fq
-QD4/QkA9QUNDPkA6Ojo6OTs9Ojo5OTg4OVReQjg7PDg6Nzo5NzQ0OTc5NzY4Nzc1
-Njc1NDk7NDQ1MjI0Njo0MjEzMTM0MzM0MTM0MzQzMjMyMjQ1NTc2NTQ3NjU2Njc1
-MzQ0NTU3NjM1NDQ0MzMzMTQzMDQ1ODc0NDMwNjU0NDQyMzI0NTMzNDY1NjQzNDU3
-NTMyNjUyMjY1NTQ1NTM1NDY3NTQ2NjU1MzQ2MjIyMjM0MjAzMzM0NTIyNDI0NDM0
-NjUzMDEyMzQ2ODU2Mzc1MzQzNDEzNTUzMzQyMzU0MzMzMzEwMTY1NjQyNTQyNjUy
-MzMzNDQ4NjMyMzIzMjM0NTc1NjY3NDY1NDU1NDQ0NDIwMjU1NTUzNjMyNDc2NjI0
-NTQ2NTY1NjY2MDU6NDU2OTk2NjQzNjQ1NjY4NTY4MzI0NDQyMjM0NDQzNDI1Mzc0
-MjExMjQvMDEyODY2Njc2NzU1Nzc2MjQ1NTQ2MzI0MzMzMzU0NjY2MzM3MzYyNEA1
-NjQ1MzIzNDU3ODg1NDc1NTMzNjExMTExMTE0MTM0MjM0NDY1NDMzNjc4ODc1OjY2
-NDQzMzQxMzQ1NjQ2NzY3MzQ2NjU0Mzc3NTlANTY3Nzc1NjU3NTU1NjY1Nzg0Njc0
-NjI1MzY4NzUzMzc4NTU1NjY7NjQ2NDU6OTc3MzI2MjY2Mzc4NjQxNTY0NDU1MzUy
-Njc2MzM1NTc2OjY1NTg2NzQ7Njg0NTM2MjIzMzM3NDY3NzY0NTIzNTM3ODc5OTc4
-NzY2NTM0MTU2MzY2MzQ2NDM3NjMyNDUyNTc1MjM2NDc6ODU1NzQzNTYzNDU3ODgy
-NDE0MzQyMzM0NDY0MzU1NTY5NjU3NDY5NjQxNDQ1NDE0NTU2MzA2NDY2NzQzNTg1
-NDk2OTc1NjY4NDMxNDU3MzIzNDU2NDs4Ojg2NzY3NzU0NDQ4NTM1ODQ2NjY3OTk2
-PDc2Nzc1NDc0MjU1NTQzNC8yMzY0NzY0NjQyNDc1ODExMzE1NTUzNDg3ODQ3NTQ1
-NzQ3NTY4OTU2ODY4OTc2ODw4Nzs3NTVANjY3Ozk5OjU4NjY2ODk5OTo7ODk1NTg4
-Njg3OTpDNzc3OTk3Nzo3NDg5NzY2NDs0NzU2Njc3OTs8Ojo5ODo5OTY3PDk4OjUz
-NDQ1NDg7ODg5ODo2Njk4ODg0NTc2OTg3Njg6Ojg5OTo4Nzg4NTo4Ojg5OTY7ODY3
-ODk5Nzc5OTk6ODo5Ozg5OTc2ODY3Ojg6Nzc2NjY4Nzg3ODk3OTk7Ozg3Ojk4PDg1
-Nzg5Ozo5OjY3NjY4Nzc2Njg2ODY4OTc3NjU1NTc5Nzw6NjM1NTU3ODg5Ojc4ODc4
-OTU2NzU3NzY4NjQ1NDU2NzgzNjc3ODY1NDg2Njg2NTY1MzQ2MzQ1NzY2Nzg2NTY4
-Nzk6OTU4OTc1NTU0NjY1NDU4NjQ1NjY2NTg1ODo5Nzg5Ojc3ODc6Njk6Ojs5Njc4
-OTs8Oz4+Nzk4Nzo5OTg1Ojo3Ojk4Ojw8Ojw7Oj04NzpAPj1BPDs9Qzw7Ojc8Oz4+
-Ozw+QkRBSENBPk+cvcrT2d7i5efn6eo9QT09QDpAPjw6PDo5OTU4Ojk4ODY3Ozo5
-VlU7OTg4QDo7OTs4MjI2NTM2Ojc2NjY2NjQ0NDY1MzIzNzU2NTQ0NDQyNTIzNTQ3
-NjE0MzExMzMyNTc1NTQzMjM0NDQ3ODUzMzQzNTYzMzAzNzM0MzI0MzEzNzM0NjU3
-NzMxMzMyMjAzMzIyNDIzMzY0MTMyMjIxOTU2MzIyNDY1NTM1NzQ4ODc1Ojc3ODUz
-MzQ3NTYyMTIzMjQ0Nzc2MzEzNTQ0NTU2MTQxMDMzNTY2NTk1MjM1NDQzMzM1NTMy
-MjQyNDYyMjYzNjg3NDU1NDU0NjYyNDAwMTU2NzQ0MTc1MzQyMjI2Njc4ODY0NjU0
-NDY2ODUyMzI0NjIxNDUzNjQyNTU2MzIyNjc2NTY0NDU0Njg2NzY2OTU4NjUyNDU1
-NDc4NjU2NTY3NTU0NDo0NDExNDQyMDQzMjQxPzg0NjU4ODg2NDQzMzU0MzM1NTM1
-Njc5NzQ3NDYzNDY1ODc4NTM0OzIxPDQ2OjY1MzQ0NDI4Ozo2NzgzMjI2NzQyNDQy
-NDU5NjY5Ojg3NTQ0NTQ1OTY2OTY7OjY0NTQzNTMyMjY2MjMzNzY3Njc3OD88ODc2
-Nzk1MzM4NjY1NTM3NTU0NjU1NjQ1MzQzNDQ3Njc8NzU0MzQ2NTY1NTU0MzQ1NTU3
-OTczMTIzNDM2NzY2NzY2NjEzNDQ0NDIzMzM1NTM1NTQ0Njk6NTEzODY3NzU0Njc2
-NTIwNDY3NTM1NjY0NDM0NTQ1ODo5Ozk5ODYyMTI1NDY1MTMwNDU1NjU1OTY1MzY1
-MjMyNDU2NTc1Nzk4ODg7NDU1NTY1ODc0NzY1MjczMzUyMTI1NzM1NDQ3NTY3OTs3
-NDc0MjQ1NTY1Mzc0NjY1ODY1MzI2NjY2Njk0NTM0Mzg4NTIyMjQ0NjU0Njc1NDU0
-MjI1OjY2NTQ1NTQzMzM2OTg4OTs3NjU4ODo3NjM1NjU4ODc1Njc0NDQzNjc0NDQ0
-NjQ6OTs1NjYzLDUzNDU3Nzk1MTI0Njc2NjU1MjQzNjU3NjY6Nzc5Ozo4Nzk5PEA5
-OTg4Nzk5ODg8OjY1ODg6OTk3NTY1Njg2ODc2NTc1Nzg3NzU2Njc1NjU1ODc2PDU1
-NDU2Ozo5PD0/Qjw5Nzk5NTc2NjU5Njg3NzU1ODk1OTg3NjY5ODY3NTc2OTY1ODo4
-OzY3OTY1ODg5PDk6OjY2ODY1Nzg5NzY2NjU1OTw6OTo5ODk6PDo8OTY2Ojk8OjU3
-Ozg2ODo4NzU1PDk2Nzg4Ojg3ODY4ODo8OTo8Ojs5OTc3NTY2Nzw7Ozg3ODY3NjQz
-MjIyNTY2NzU3NTk4NzM0Nzc3OTc1NzYzNjg3NTQ2PD09ODY1Njc2NTY1NDk4OTY0
-NjY6ODU1NTk3Ojk4NTQzNjU3Njs8OzU1NjY1Njg3NTQ1NjY3ODY1NTc3NjU2NDk4
-Njc4Nzs5Nzg4OTw6Ojc3NTc4OjY3NTY5ODc4Pjs4OTg6Nzo5Njk3OTo6OTc4ODw8
-ODg6PDs9Pj88QDw8PT1ERkBBPTw/Ozo6Ozo9Pz09Pj4+TJe+ydTZ3uHk5+jp6kA9
-PDk7PDs8QDs6Nzk/ODs7Njk2NDQ3OTg9ODU0MzQ3NDY4NDQ6ODg4Nzk7OTg1NDM1
-MjMxNjk0MTA3NDc7MzQ0NDM1Njc1MjQ1NjMyMTIyNDM1NDQzNDMzNDQ3NTQ0NDM0
-MzQ0NDMwMjQ1ODM1NDU2NDMzMTQzODU2MjAyNTc2NTA0MjM1NDQzMTIzMjM0NTQz
-NDQ1MDIzMzI1NDY0Njc3NDc2NTY4OTc0NTY3NzQ0MjU3NjY0NTQ0NTg2NDc1NDQ1
-NTU1MzI2Njk4Nzk4ODg1NTQzMjMzMDE0NzU4ODU3ODEzMzUzNTU0NTU0NTU0OTkz
-MjM1MjI2NjM1NDM0NDM2OTQ0NDM3NjQ3NTQ1NjYzNzIzMjQ0NDU3MzQ0ODc6OTc3
-Njg1MTM0NjY2NTY0NDc3NzU1NjEwNDYzMjM3ODk8NzY3NTk2MzM1NzQzODk9NzM2
-NTQ2Nzc2ODU0MzU1NzIzNTc1NjY2NTQzNDg4NTY0NDU1ODc0NTY8NjQ0MzQ0NTI1
-NDY2NDc0NjY3ODE0NzQ0MzM2NTUzNDYyMDM0NTg5Ojc1NjQ1NTU0Njc2Ojc0NjYv
-LjUzMjQ3NTUwNTI1OTY2MzU3Nzc1NDY5Nzg6OTg0NDczNTM1NjI0NDY4NTM2NTQz
-NTc4NjU3ODYyMjU2NDQ2NjY0MzM0OTM0NDc0MzU1Njg3Nzc3ODU2OTg2NzY2MzQx
-MjY1NjQ3NjQ2NzgzMzQ1Nzg3ODc3MzIyMjU0NDc4Ojc5Nzc0NDYzNDg1NTY0NDQ1
-MzY5Njg2NjMwMjg2MzQ2NTQ8ODk5NjQwMjU3ODY1NDQ0NjU3ODYyNDQ0NDU1MzU0
-NDMzMzMzNDQ0NzQ1NDY0MzEyNDQ2NzUzNjg2NTY3OTU2NjY1NDQ1MzY4NjM1NjU3
-NjU4NDU1NDY3NzY1MzExNjc2NDQ1MzY4NjY2NjM1NjY3NjU2MzQ0MzMzNjc2ODg7
-OTU3OTY3NjU3NzYzNDY3Nzg1NTQzMjQyNTY5ODU1NTQ5ODY0NTY2Nzo4NjQ0NTc3
-NDU3Njc2NzU0NjY2Nj85Ojk3OTk5Nzs6OTg5Njc3Njk4Njc3Njg2NTU2ODc2NDY2
-NzY2NjY2NTY3NzY2NTo1NTc2NTk6ODUyPEM8PD09PUBAPTs/PkE5OTk4Nzk8Pjc2
-MzQ1Mzc2Nzg4NjY5OTk3ODk4Nzo5Njc6NzQ4NTY1OD47OTc2ODg5ODU9OD44NTc2
-ODg4Nzg4O0Q5Ojk5ODc4Ozs6Nzc3NDQ2Nzo4Nzc0OTo3NTk3NzY5PDw5Nzc7ODc3
-Nzg1OTo7NzU2NDY5NzU1Nzk2NjY3NjQ1Njg2NTc0NTQ1NjU1NjY1NjY3NTQ4Njk5
-ODc4ODg3Nzc3NTU1Njc1MzU1ODc2ODU0NDQzNjU1MzI4Ojo8Ojk4Njc4ODg5OjU1
-NzU5OTU1NjY4Ozc0Njg5Nzk4NzU6NjQ3ODY2OTk4OTo4Ozk6Ojk6Njk6ODg1Nzg4
-OTs6Ojg3Nzo6ODk5Nzc7Njg5PDs7Ozs7ODc6Ojo7PTw8Pz8+PDw4Ojw+PD4+Pjw5
-OztAPUI8PUJKlr/L1Nne4uTn6OrqPT4/Pjs5OjpDPTs7OTw5NjY4NTUzNDU4ODY4
-NzUzMTU4NDY0Ojw4Ozo5NTk4Ozc1NTY2MzQzNDMzODg2Nzg1MzU0NDU0NzY3NTQ2
-NzI0MzExMjQ3NjQzMjQ0NDM2NDMwMjE0NzUyMjEyNDQ1NzM1Njg3NDA0NTg0NDU1
-MjQ1NDUyNjQxNTU1NjcyMzIxLzQ0MjI3NDU2NTQyMzU2NzY1NTIzNDU4Nzc4OTc0
-NDY4MzExNjc0MzY0NDdUQTMzNDU4NjU1NDE0NDY1NTQ4OTs2NzY2ODc1MzE0MjAv
-MjMyMjAyNTQ2ODY4NDU1MjM0NjQ1NjU0NTgzNDc0MzIzMzQ3OTk2MzIzNTIwNTUz
-NzU3PDw2NzUzMjU2NTU2NTU0NTU1ODc3NjEzNTMyNTMzLzMxMzY3MzUzNzUzMzY1
-MTQzNTc6NzQ3Njg3MjEyNDI0NTc4NTUyNTQ3NDc4ODY0MTI1NjU0NTQ1ODU2NTM0
-MzMzMzMzNTU0NTYzMTUyMjM0OTczNDMzMzU1NTU1NTM0MzM0NTU0NTMzMjEzNDAy
-MTQ0NDc0NDQ1MzQ3NjIzNTY7NzY1Mjg0NDY4NzUzNTQ0NDU1NDQ2MzQ0ODY2NzY0
-Nzg1NzU0NjYyNTU1MzQ0MzQ4ODY3ODY2NzY3ODg3NjgzMzI2Njc3ODQyMzU4MDE3
-NzY0NTQxMjM3Nzg6OTg4ODU1NTc3MzY2MzU2NjU0NTY3NTU1Nzg2NTg3NzY2NTM0
-NTM1NDU3NjQ+NzI0NTY1NjI1NjczNDMzNDQ2ODg0MjYzNDc2NjQ1Njg3NTQ2MjIx
-MzY2NzU0MTM0NTY0MzQ0Mzc4NDU0MDEzMjIxMzMxNDMyNTU1NDgzNzY0NTI3NjQ1
-NDU3MzIzNjs5OTs3OTY3NDU5NTcyNjI2ODY4ODQ2OTg5NzU6MzA3MzMzODU5Njc2
-NTY3ODQ3ODU0Nzg1NTI0NDU0Njc5NjU2NDQ1NDc2NTYzNTk1NTQ0Nzc2NDMzNTU3
-Njc3NjYzNjs1Nzc1NzI2NjY6NTM0NTU0MzM0MjU1Nzg3ODU3OjY5OTg2ODo3OTk5
-ODc1NDQ0OjY7Nzc3ODo3ODk9ODIyNTY3ODc4ODU0Njg2MzU2Nzo1Nzk4NTk6ODg8
-ODo8P0BBPz08QD46OTg2NTc4OTg5Njc3NTc4NTs5OTk4Nzc1ODg6NjY4Nzg3Ojo4
-NzI1NjQ2ODc5OTs4Ojk6OEA5Ojg4Njc2Nzk3OThCSjs6OTs4OTg3ODc4ODo5ODY4
-ODg4OTo3OTo4Njc6ODY6OTg3ODk6OTg3OTc3Nzg1NDU2Mzk6ODo3NDU1NTUyMzY1
-NTc5NjM1OTg3NjY4NjY3NzY4OTg3Njk5OTg5ODg1Nzg5Nzc2NzY1Njc8ODQ3ODU0
-Njc0NDc7Ojk2ODo4Ojk6NjQ0NDc5Nzc2ODo5Nzg4Njk2Nzk4NTQ0Njc4ODk4OTY3
-ODc3OTY4NTg4ODk4Nzk6OTk4Nzo6ODc5PDs5Ozg5Nzs3ODg3Njo6OTk8Pz03Ojs7
-ODk5Nzk9Ojs7Pjg3OTo7Oz49Pz88Ozs8QUJEPT47QEqSwMvT2t/j4+bo6uo8Oz8/
-OTo7Oj86Ojo5Ojo4NjY4NjY8Ojk0ODc4Nzg3NTc4NDw6PDg5NzU5ODg5NzY1MzY2
-NjM0NjY4NDU5OzYzMzQzNjo0NDQ1NDIzNDIvMTEyNTQzODc4NDM1NzUyNDU0MjM/
-NDM2NzY0MzI1OjY2NTY2NjY0NDY1MzAyMTY3NjY3ODs2NTU1NjU0NjEzMTQ1NzU3
-Ojc0NTIzMzU0NDU0NDYzMzQ0NDU1NTM1NTM0NTc1NTI2NDY2NkpFNDUzNDMyMjQ3
-NDIzNjU1NjU3OTg3NzYxMTUzNDQ0MzIyNDQyMzMzNDM0PTk2MzY1NDg3NTc4NDM1
-NjYzMTE2NDQ0MzQ0NDQ1NzQ0MjIzNDI0NTk3NzU0Njo1MjM0NDMyMzMyNTg3NzM1
-NjQ2NTQxMDM1MzI1MzQ0Ojc1NTIzMzc2NTY1MzM1MzQ2NTY0NTIwMjQ1MzQ1NDc1
-ODY7NzY4NjQ4MjI0NTY1NjY3NTczNDI0NDI1MzMzNDQ2NDM0MzI1NDU2Nzk3MjM1
-NzUzNjU0NDQ3MzMzMTU0NTM0NDQzNDIvMDIyMzM0NzIxMjIyMzc0NTczMzM0OD85
-ODg4NjYzMzY2MjM1Njg6NTM2Nzo5Njc2MzQ2NTQ1NDU0NTQ1MzMyMjQ4Nzg1NTU1
-NzMzNzY1NDQ6NDI0NjUzNDQ0NTIzNjQ1Ojk2NjQ0MjIzNzQ2ODY4NzU0NjQ1NDQ1
-MzQzNTU3OTk2OTs5NjU0MzM1MzQ2MDY1NDE1MzU3NDI0NzY1NTU2MjY7NzY0NDQy
-MzU0NDU2ODY2NTY1NDU0Nzg4ODQzNTIzNTg4MzIyMzc3NTUzMzM1MjM0NDQyMzQ0
-NTUyMjM0MzQ0NTQzNTYzMzY1NTY2ODIzNzY0MzI0NTY3PTc0NDUzMzQ5NzUxNTYz
-MjMzNTc1NDY4ODk0MjQ1NzU1NjU1NzQ2NTY2NTU1NjY1NDMyMTU4NTMzNTU0NjY2
-NTUzNTQ2NDUzNDc3NzU0ODY1NDczNjMzNzUzMzI1NTM1MzM3NjY2Ojg4NjY5NTIx
-NTU4NTQ2NzU1OTM6ODc3ODc7Ojs5Njk5OTg2OTk5ODgzMzY2ODk5ODc2NzQ1NjY2
-ODQ4Njc4NTY6Oj05NTc3ODk5Nzk5Oz89Ojk/QEE7OTg6Ozg5OjY4NzQ2NDYyNTU1
-NTk6NzY3NkI5ODY3ODg4OD06NzY2MzczNDk5OTg3NTc7Ojs6ODo5ODg4Ojw4Njg5
-OTo6Ojo5ODc2ODc2ODo2Nzo4PDs6Ozw5PDg3NTc5NTg7ODk2Nzg8Ozg2Nzo6OTk2
-OTg4Njc5NzY2NTk6NzY2NjUzNTY5ODY3NzY1MjU3Nzc0NzQ3ODg2Nzk3MjQ2NTY0
-NTo4NDQ2Njc5Nzc2Nzc1NTY2Njc3ODY4NzczNTY3NDU3Njg3Ojc3NjQ1ODU4NzUx
-NDU2NjU3NzY1Nzk5Nzc1NDc6Ojc2Njc1NzU1OTs3ODc4OjU1Nzs5OTo6OTc4ODo7
-PDk4Njk6OTo5Njc4Oz07ODY0OTk5ODk6PDpAPDs7Ozo9OTs6OTk5Ozo8Pzs+QT88
-Oj09QkNATI/AzdTa4OPm4+nq6js8QDo8Oz5APTo3Ozw8PD07Nzc3Nzs9OTg5NjY5
-OTo7NjU7Ozs4ODU2Nzg7NjI0Nzg4NzQ0NDM0NTc3NjY1MzMzMjc2MjQ0NjYzNTM0
-NjMyNDU0MzU0NDUxNjk1NzcyNDQ0NTY1NDI1MzQ2NjQ3NjMzNDQ3Ojc2NTQ0NDI2
-Njg5NjY1NTYzNTUzMzU3OTU1MzM2NDY2NjYzMTM0MjE0NjU1MzIyMTAwNTkzNTY1
-NDQ1NTUzNjY6NDMyNz41MTM0NDI0MTQ5MjE0NDQ2NTMzNzQyNDU3NTQzNTQzNTI1
-MTAzMzM1NDY2NzU2NjYzNTI1NDc4MzEzNTMyNDY3Nzk7OTo1NTUzMjU1MzM0NDc3
-OTc2NTUyMjIzNDU2NzM1OToyNDc1NDg1NjU1NTQ0MzQ3Oj0/QD44Nzc2NDM2MzY3
-Nzg3NDU2NTY1MzY1MjY4NjU0MjQ3NTU3NzU1NjY2ODUzMDE1MzQ3Nzc3NTQ2MzU2
-NTMzNDI0NjY2NTY0MzIzNDM0Njc1Nj00NDU2NTY1NDQzMjE2MzMzMjE1NjIxMC8x
-MzIxMjc1MzE0Njc3NDY1NDI1NTQ1Nzc2Nzc2NzU1NDAzNDU0NTM0MjQ0NTQ1OTk1
-NTc0Nzc1NTU2Njc3MzQzMzMwMjc4NTQyNDUxNTU0ODg3NTc4OTY2NDM0NTU4ODg7
-NjgzNDQ0MzY5NTg6ODY2ODg3NTU3MzIvMTQ2Nzg3Njg3NzI1Nzc0Mzc1ODc6Njg0
-NjI2NTU0NjQ1NTU0NDQ3NjY1NTMyNDQ5NzQ0MjM0NzU0Njg4MjQxNDQ1Nzc2NjU1
-NDQzMzIyMzY2NTQ0MjEyNzc0MzYzMzc6NzY3NDIzNDc0ODc0NDM1MzEzMjU1NzU0
-NDU3ODY0NjY6OTY1NDUwMzEyMjIzNTQ0NTU3NDY0MTM2Nzo7NjU4NjQyMjY2Njs5
-NzMzNDQ0ODk3NDQ3NjQ1Njc1NTQ2Njc3NTY3NjQzNjY1PDc3My80NDM1NDQxMzMz
-MzU0NzQ1Mzc7ODg2Njc5NTY7OTc2NDQ2Nzc3ODg2NzY1OjY1NzY3Njg6ODg6Nzo4
-Ojg4ODs4NjQ3NjY3ODg5Ojc3Nzc2Nzc2NzQ2OzU1Ojg4NjQzMjc6NjczNjw7PT89
-OT1BPT45PDw7OTk4Nzc2NzYzMjM0NzM0Njg5OTc6RDU0NDY3NzM3QDg2Njg5NzY2
-Ozo4ODc3ODk3ODc1NjY2Nzg9OTg2Nzg2Njc4OTk3NjU1Njc3ODc5Ojk6Ozk6OTg5
-ODg7Njc5OTo6ODk2Nzk2Njg3NTY2NTY3NDU2NzY2Njc4Njc4NjYwNDU3OTc2Ojc2
-Njk5ODY3ODk4Nzg2NTc3NTU6NzY1NTc3NTU2Njg3NTg2NjY1NjU4NDc0NDo2Nzc3
-NzUzNDQ1NjU1NTc5Njg4NzM0NDM1NTY5ODc4ODc2NDQ4ODc5NjY6OTY2NzY3MzM1
-ODc4Njc4OTk4Ojk4ODk4NTY3ODo7OTY3Ojo6OTo1Njk6ODg6OTo5NTY3Ojk5OTg5
-ODs6Ojo7OT09OTg8OTo8QT1AQkFDQ0E7Oz09Q0FSnsHM1dve4+Xk5erqRUY8Ojo8
-PT4/Qjw5Oj04Nzk/OD04ODo8Pjo3ODY1Nzo4ODc5Ojo4Njc1NTc6MzU0NTg6ODY2
-ODQzODg4NTEzODYzNjQzNDc0MzMzNDU2NjYzNTU0NDU5OTc2NjY1NTY4NzU0MzUz
-NDc2ODMxMzU5NzU1NDM2NTI0OTU2MzQ0Njs3NTIxNjY1NTQ3NjQ3NTU0NDY2Nzc4
-MzE2MjEzMzE0MzU0MzIxMzM0NTMzMzU3OTc1NTM0NDU3NTM0NDY2Njg1NzQ0NDg3
-MzAxMjIzODYyMjU1ODU2NjQ0NDY0MzMyMjYzMzg3NTMyMzQyNjQ0MzM1Njg3NTI3
-ODg1MzY1NDY2OTo4NjUyMjY1MzI4ODg1NzU3NjI3NjQzNTk0NTUzNDI0MzMwNDY1
-NDY2NzUyMDI1Nj87NjY1NjQ3OTQ1NzY0NDU0MzU0MTQ2NUA7NjQ1NTU1NTU4Njc6
-Njg4NDQ1NDU0MzMzMzg3NjY3Njc1MjU0MDQzNDU2NjY3ODQyMzEwNTc3NTU3NzQ3
-NTMzNTQzMjIzNDUyMzYyMzMzMzQzNz4wMzMzMjQzNDU1NDUzMTMyNjM0NTY1NzY4
-NTc2ODYvNDc3NzQyNjY3MjM0NTU0NDg3Nzc2MzQ0MzU1OjUyOTUzMTM4Njc3ODQz
-NTM1OTg3Njc1NjU5NjQ0Nzc5NTI1Njk6NzU0MzM0NTY1Nzg1NjY5NDc5ODQ0LzIy
-MTY3Ozk2NzM0MzQ1NDQ2MzU3Njc0Nzc3NTQzMzc1NDU1NjU4NjU0NDU0NDEzNDQ0
-NjI6NzUzMzM4Nzo6NTc2NzU0NTY1NTY1MjMzMjM0MjMyNDU0Njc2NjIwNDU0ODY2
-NTc3ODg2ODg5NTQ0MzQ2NjM2NTg2NzU5ODg3Nzg0ODUyNTg4NTQ0NjIzMjQyMzE0
-NzgzMzAyNDc2ODU0NzYzNTY3NzQ2Nzc0NTU5Nzc4NTQ2OzUxMjMyMzU0NDY1NDQ4
-ODo5NDY2NTU2NDQzMjAyODIzNDY1Nzg0Mzc4NDIzMzQ0Njk5Nzk1NDY1Nzc2OzQz
-NzU4NjY4ODg3Nzc3NzU2ODg5Nzg5Nzg4Oz06NzU1NDg4NTc2NTc5OTo4NjQ1NDM0
-Njo4OTQ3Ojg5OTs4NjM0Mzg2Ozs7OTk6PDk8Oz49OTw7ODo6ODg3MzQ3Nzg4ODY4
-OUc5QDYzNDY1ODc4Ozw/NTQ0NTc2Njc4Nzk4ODY2NjU1Nzk5Ojk2NjY2ODY4Ojg4
-OTg6ODk6NTU5OjU4PTo1Nzk6Nzc3NTc5Ozg0Njg2OTo5OTo6ODY2ODo5ODk3NDI0
-Njc2Njc4Ozk4ODg4NjU1Njg1NzY0NDc2ODk5ODg4Nzk3NTc1NjUzNTg1NDU3ODg4
-ODY4Nzg4NjU3ODUzNDM0Mzc2NjQ2Ojs7OzUyNDU4NTY2Njc4NjU2MzYyLzQ1NjY1
-Njc2MzQ4NTc3OTk3OTg1Njc3Njo3ODU4OTc3ODg7Ojw4Ozk7OTg5OTc9OTg2Njc3
-OTk3OTg0NTY5OTs6OTg6OTg6ODw6Ojk8Pjs6ODk+PTo9OTw9Ozo8Pj0+PTs/QUA9
-PD48SVKev8vV2d/i5efk5+s+Pz89OzU4PDtBQ0BFPDs7Ojo0Nzc2PUc8Ozc1OTU2
-NTg7OTg4ODk5PDY1Mzk9PTg0NTo1Njg4NTY2NjM1NjU0NTU1NDQyMjEwNTQzMzU1
-ODc2MzQ0NDU2NTUzNjY0NDExMjE1NTczMzUzNTkzNTg2NjQ2OTc3Ozg1NjY4NzU2
-MzMzMzQ3NTU0ODg2NDQ0NDY2NjY2NTQyMjEzMzM3NDIzNDM0MTMxMzU0NDUzODU2
-Njg0MzM1NjY2NTE1ODczMTQ0NTc0MzM0MjMyNDIwMTU5NzY3NTU2NTY2MjQ4RzY2
-MzQ3NDQxNDUwNDQzMjI1MzQ2NTY2NDQ1NTU0NDQ2MjM0NDU0MjMyMzY1MzY2ODM0
-NTMzNjQ3NDM2NDMzMzM1NjQyMzYyMzU1Njk4NzU1NTU3MjIwMzgyNjg0NjQ0Njk3
-Nzc2NzY1MzUyNjc4ODc3NjQ2Njc1NzY1NjY0NTYzNTc1NjY0MjU3NTQzNDYzMjMz
-MjQ0Njc2ODY2ODUxMjExMjQ3NDM3NjY5MzUyNTEzMjA3MzIwMDIyNTkyMzU+ODMx
-MzU0NTU2OjY4NDEwMTU0NjU0NDU0NDU1MjU0NDc3NTQ1NTU1NTY0NTIyMTIyMzMz
-MzQzMzQ1NDIzNTg2NzY3ODc1NjQ2PDM0NDEzODg4NDM0MzI0NTQyNDg6MzQzNTc1
-NTU2NjU0MzYzNTU0NjQ5NzUyMjMxMTU0Nzc3NTQ1NTY0Ojc4Ojg1NUI1NTk3NjU1
-NzQyNTY0NDc2Ojg4Njc2NzQ0NjY4Nzc2ODU0NzM4ODg4NDU0ODU3Nzg1MzI0NjY0
-MzMyNDMxMjM0NTY3NzQ2NTU2NjY3NTQ1MzM1NDI0NzQ2NjQ1NzU3NTY2NTM0Njg2
-NDY2NTU2NTU2MzI0Ojg1NDIzNTQzNTc2MjQzNDM0Njg5ODo0MjQ2MzQzMzU0NTg2
-NDQ2OTg4ODc3NTk2MzU4Njg0NjY3NzY1MzQ0NDY2NTUzNDY0MzM0NzU3Nzg1NDQ0
-NzU2NDY1NjQ0ODc3Nzk4NTU3NTc2ODUzMzY4NzY2Ozg4Nzk5Nzk5OTc5NzY1OTg5
-OTo7NjQ2OTc3NzdGNzQ2ODg2NjU5NjY2NDY4Nzk1NDYzNTY2NjY5Nz49Ozw6PD1A
-QUJBPD5AQD09PDo5PDY2Nzg8Ojc4Njk2UjY3NjY4NTc3ODc0Nj03NjU6OTY4PDc4
-Ozg4OTY8OTc2ODk5OTg3NjU1ODc2ODs6Ozg6NzY6PTs6Ozs5Ojk4ODc4ODY5PDc2
-NTg3Njg4ODo6ODk4Njc4NjU2Njk3NzY3OTo/ODc3NTc4OTc1ODU3NDQ0NjU1NTU2
-Njg1Nzg4NzY1Njc2NTQ0NTY0Nzc4ODk2NTY3NjY3NTM2NjY3NTU1ODY3OTc1Nzk3
-NTIyNjY1Njc4ODc1MjU1NTY3MzI1NTU2NTU1MzM1NTc2NTg7NTc2Njg3NjU2OzY3
-Nzo5PDo3Ojg2NjY5Ojg2PTk5OTo5ODk5Ojg7PDs6OTc3PTg6ODk4ODg1NTk3OTo8
-Ozs5Ojs+OD07PDs7OTo+PTxBQkJBPz9AQkZDT5+/y9Ta3uHj5+fl6Tw9QDw9Pzs4
-OT88PkU/PDk8Ozk0Njg3Ojs8OzU5OTo0Njc4ODY1ODk6NzY5OTk3OzY1Nzk3OTg6
-Nzg2NDQ1ODY1NTY2NTQ1MS8wNDQ0NDU2NjY3NDYzNDg3NDo3NjQ2NTY3NTMzNDAx
-MTQ1NTM0NTY2ODU1Njg3MjI3ODU2NjQ0MDMxMjMzNDc0Nzg3NDk1NTQzMjM0MjM2
-ODYzNDI2NDM0ODg0MjE0MzMzMzIxNTc1MzQzMTM0MzQzMzIyNDU0MTE1NTIzMzIz
-NDU2NDEyNjQ3NzY2NjU4MzM6NTk5MzExMzU3MjU1NTQzNjc2NTI1NDM2NDc1MjM0
-NjU2MzM1NTY0ODU0NTY3Nzc5ODs2NTQzMjo5NjIyMzQ2NDQxNDU1ODc2NjU0NDU2
-NTc5NzY3NDMzMzM2MzMxNDI0NDMyMjY0NjU1NTU1NDQ0Nzk5OTc5NzM1NjY2ODc3
-NDY2NDYxNDY2NjMwMzIyNDc6NjMzNTM1MzIzMzQ1Nzg2NjY2NzMvNjU0NzM0Nzc2
-NTY1NTQxMjExMzMzNTQ2ODg0NDc0NTYxNDU0NDM0Njg1NDIzNDU4NDU3NTU0Nzg2
-MzM0OTY2NzQzNTU1MzI0NDUzMzY1NTUyMjU0NDY1MjIzNzc2NTMzNDU2NDQzNDM1
-NDQ1NTg2NjQ0NDMyNDUzNTM0MzMwODgzMzc0NTQ5Nzc0NzU2OjU0ODc0NjQ1NDIy
-NTU1NzI0Njg3NzY2NDc3NzY0MjQyMTQ0NDQyMjQ2Njg4ODk4NzQ1OTk2Njc2Nzk6
-ODY3ODg5Njc2Njc1MzMzODQ0NDQ0NTU0NTU1NDg3MzQ0MTU0MzEzNTU4NzU3NjIv
-MjI0NTUzNDIzNTYyNDQ3NzMyNTY0NjgzMjMyMjU2MzIzNDU2NTM4NjM0NTM0NDU0
-NjU4Nzc2NDU1NTY4NTc3NDQ3NTQ2NTUyNTY4NjY1OjY5NTMxNDI0NTI1NDQ0NTc4
-MzE0NTg2NzUzNTYzMzY2NDc4NzU0NTQ2NzY1OjU3NTY3ODk3NjY0Nzc0Nzk2NzMz
-NDY3ODc4Oj04NzUzNDU3Nzc4ODc3OTw5NTY1Nzk4NTQ0Njo4NDc4Njc1NjU1NTQ2
-NzU6ODc2Njw4ODg4Ojs2Oj1CPjs9PkBGQD9DQEA9Pj88OTk5OTs4Ojo6OTQ1NjU9
-NTc3OTw6Njc5ODY3ODk6Ojk5OTU6ODc3Nzc3Njc5NzU4PDw4Ojo4OTk4OTo5ODk8
-Ozs4Njs5Njk6ODg4OTg3PDo3ODo5OTg5PDg5Ozk3NDU6NTk6Ojg2OTs4OTk5ODk3
-Njc2NjY4Nzc4OTs6ODk5Ozc1NTU3NDQ1Nzc3NzU2NzY1NDU0Njg5ODk2MzQ1NzU0
-MzY0NDU2NzY0NDM4ODc5ODY3NDM1MzQ1NjQ0NDM3NjU1NTc2Nzg3OTc2NzI0NDU4
-NTc2ODk3NDM2NzY2Ojg2Njo9PDo3Nzc2PDo6Ojw6Ozg3OTk4OTo4OTY2NzY4Ojk9
-Ozo6Ozo5OTo5PTo5Ozk5Ojk7OTk7Ojk4Oz49Oj0+QD87Nzw8QD9APTw9Ozo8P0BA
-QkhSm8DM1Nnd4OPl5efoPj0+PkBCPz4/QTk5PEVEPD85OTg5ODg3ODo5Ojo6PDg2
-Nj5APDk3NjY3NzU3NTc4NjU0Nzg3Njg3ODg2NDI1NDE0NDMzNDU1ODc1NDY0NDo4
-NDIyNTgzNzY4NjQzNDY3Ojo2NzQzNDk4MjM0NTMxMDU5NTUyMzQ1NDEyNjQ1Njc2
-NzQwMjEzMjIxMzM0NjUzMzQxMTMzMzY2NTQyNDEyMjE0NDM2NjMyMzMxNTM1NzU2
-NTY2NDQ1Njk4ODg2NjY2NTQ3NTMyMjQ1NDM0MzU3NTc3NjU3NTU1NDYzNTY2NDg9
-NDM1MzYzNTQ0NDc4MjQ2NzQ0NzUyOjc2MjE1NjMzODg1NzYzMzc5OTg3Nzg2NTM4
-Njk1NTY2NDU3Nzg2Nzk0NTIyMzQvNzIyNTk7NjY5NzI1NTMzNDI2NzY3ODYxMjQ3
-OTY1NDQ1NDg0Njg7Njc1NjQzNjg3NTUzMjI0NTMyMzI0MjMwMDQ0MzIxMjQzMS8z
-NTYzNTQ2Nzg1MzM1NDQxMjQ1NDU5NTY1MzA1MTM0NjU0MTM3NTUzNDM0NDQ3NjM0
-NDY1NTIxNDM0NDQ0NTc2NzI1ODQ2NTU1MjIxNDMyNTczMjM0NTUzMjMyMjQ1NDY3
-NDg2NDY1Njg1QEU1NjQyNzY3ODU1MzU3NjUzMjQ2NzQ1NjY8NTUzMjQyNDQ3NzU1
-NjQ0NTc2NTY2ODQzNTY0NjU1NDIxODQyMzgzNDU2NDU0NDE0MjU0NTU1NzYyMjc1
-NjcyNDY2MzU2MzQ2MTM1NjQyMjM3OTk2NzM1NjY1Njg1NDM0NzItMDc3NTkyNDU0
-Nzc3NTY0MzY3Njc1MjIzMzU2MjM1NTQ1NjU2NTIyNDQxMzU3NzQyMzQzMjUyMTQy
-NjU3NjU4MzIzMjU0NDMzNDc0MzU1NTc2NTU1NTc2NTUzNDU1NjU5Ojg4ODQ1NTc3
-NTQyNDc4NTg3MzUzMTM0MjExMjIxMzQzNDUzNzYzNTY3NzU0NjQ3NzUyNDU0MjM3
-NTU2Njk1NTU6ODQ0MzM2NDU0NDM2OTY1NjU3ODg3Ozo5NjQ7ODg6Nzk3NTY7Ojk4
-Mjg1NTg7OTI2Pjo5NzY6Ozk4ODs7Ozg3NjQ1Njg3NTU7PDg4Oj88Oj5AQT9DP09E
-QEM/PzxAPkA8OTY5ODs4NTY3ODE1NTg3NzY1OUA7ODU1Nzc5NTM2NjQ2ODk5OTo6
-ODo9Ozk6ODs6Ojg4ODY3Ojs6OTo5NzY5NzY4ODs5OTc7ODg3OTc6Ojg7Ojk3OTo7
-PDk2NzU3Njk6OTo4NTc2Ojg1NjY0NDc0NDU4NTU1NjY1NTg2Njk1NTQ2NjY6ODUz
-Njo2NzY1NTY1Njg4OTY1Njc4ODY2Nzc2NTU0Nzg0NTY0NjY3ODk5Nzg2MzU2NTU2
-Njc1NDI3ODg4NTU3ODk6ODU0PDQ1NTU0ODg6NzQ0Njc8ODc2NjU4Nzg4Nzc4OTw6
-Ojg4Nzc5NjY4OTg3Ojw6Ozw6Ozg3ODo4Ozk5Ojk7Oj09Ozg5Nzw/PDg5OTo7PTw5
-Ojg3PDs8QDw9PUBBPjw8OT5AOTo+PD09PEeRv8rT2d3g4uTk5uZEQTo+Pzs+PT88
-ODc4PTw7OTk5PDk7NzU4ODg2NTU3OTk3PUA7OTg2OTc1MzMzNTkzNjUzNzU2NTY2
-NjUzNTAzNzM0MzMxMzM1NDY0Njk0MzUyNzQ1N0lINTQzNDY1ODg0NTM0NTQ3ODQ2
-NjU0MzI2MjI1MjQ0Njk3Ozk1MjMzNDU0NjMyNzY1MzI3NTU0NTIxMzM5Njc3NTc2
-NDUzMjMzNDIyMjI0NDE1NjMyMjAzNDg1Njk0Njc3NjQ2MTM0NjY4NzY2NzQ0MzQ0
-MzM1NTQzMzQ0NDY1MTQzNDQ1NTk0MzY1NDQ1NjUyNzUzNjUyMzMzNTIyMDIzNTU3
-Nzg0MzQ2NjUzNjU1Nzc3NjUzNTc5NTo6ODY2NzczMjIzNzY2NDI0NzU0Njc2NjU2
-OTk1OTUzNDc2NDU1NDU0MzU1NDU2Nzc2NDU1NTU1Njc1NTc3ODY1MzQ2Njo4MzY0
-NDM1NjY1NzMzMjExNDU1MzEyLzEzMzQ0NzY1NjU2NjU2NTY0NTc2NDIyNTU0NDQ0
-NDExMTEzNzQyNTQ2NDc2NDQyMzg3NjY4NDczNDYyNDQ2NTIzNDU3NDEyNDw0MTA1
-OjUxMjEyOTIyMDI0NjQ1MzQ3NTQ0MjU1NTU2NDY2ODY0QUpDOjczNDU3OTY1NDU1
-NjY3ODYyMzM0Ojg5NTQ0NDc0NjIyNDU0NTQ0NTU3NzM0NTE0Njc3OzY2OTc1NTM0
-PTY3Nzg1NDUyNTU0MzM0NjY3OTk1NDU0Nzw3MzM4MzQ1MjI2Nzc3NDY3NDM2ODQz
-NTU2OTg2ODg5NzU1NzY5NDg5OTYzMjg4NjYzNTc5ODo1MTE0ODU2NjQ1NTY1NTQ2
-NjM2ODg2ODk5ODEyMjI0NDMzNDMzMTEzNzs3MzM2MjMzNDU3NTM0Nzc4MjIxNTQz
-NTUzOTQ1MzU0Njc2NTc1NDQ2Nzo4NzczNDQyNjg4NjM1NTU2NTUzNDQzMzI0NDQ3
-NjQyMzUxMzU4NDEzODY2NTMxNTc2MzQ4NjY3MjQ1NTY3NDMzNzc2NTU2NzU4Nzg0
-NTU1Njc2MzQ2NTc2Njg2NDUzNzk5ODk2Njg+ODc3NjY5Ojc4PTk4Nzc4Ojs3NTg2
-NzY6NzY2Ojg3Nzc2Ozw+OTs7QUA7Ojs7QTw/QkZBQUY/PDk4PDs6OTo5NjU4ODk4
-Njg4ODY0NTQ1Nzo4ODU1NjU5PDg5OTg3ODQ3OTo7PTo6Ojg5PTw8OT48Nzc3ODk3
-OUM3ODo6ODg5PDo6OTw7Ozg3Nzc2Nzg3OTc0NzY4OTc3Nzc2Ojo3OTw5NDc1NDU3
-NjY2Njc5OTUyMjY3NTc6OTo3ODo2NzY4Ozo4Nzc4OTc4ODY0NDU1NTg4NjU4ODY2
-NzY1ODY4Njc2ODY3Nzk2Njg4NTc4ODc2NjYyNDU2Nzc4ODo6OTY1NDc3Njc3ODo4
-ODg1ODo4NTU1OTk5Ojw9ODg7Ojk5OTk6OTo4OTc6PTk4OTo5OTs6Ojs5PTs6Ozo6
-Ozk4ODk8PT8+Oz9DPjs7PT0/Pz89PT06Ojo4Njs6PTw6PUA9Ozw5OTo7PD5BQUhF
-SZW+ytPY3N/h4uXl5kBDQTw9PTk6QTo4ODg5OTk1NjlBPDk2NjY6NjQ3Nzs+ODg1
-OTo3ODk7NjQ3ODY3NTU2NTUzNjQ1Njw2NjY1NDQyMjU4ODY2NDQ0MTU3NjUzMzM7
-OTg3P11SNTQ1NDI0NjU0MzU6NzU2NDM1OjgzNjQ0MzEyMjQwMzg1NTI0MzMwLzAx
-MjEvMjg1MjM0NzU1MzQ2Njc1MzQ2NDU4MzAzNjMzNTM2NTMzMzIzMzMxMTE0MTI0
-NTYyNTU1NDYzNjY0NTUzMjI1NTQ1MjM0NTU2MzEzNDQ0MzM2NzY2NTM0NTc3NDM3
-NTU0NzUzNDAzMjU0NTU1OTUxMjIxNTg2NjYzNDM0NTc1NDQ0NjM0NDU1MzMzNDU4
-Nzc0MTE2NzU1ODQzMjg3NTU3NDU3NTQ0NjI1MzMzNDU2NzU2NTY0MjQyMjQ0NDMz
-MjUzMzM2NDU1NjU2OTg4NTk3NTQ0OTQ0NzE1Nzc4Ojg2MzIzMjMyNDU4NDQ2NDU0
-NTQ0NDI1MjMxNjY1Njg4MzQ0MjM0NTUzMTM2NDY5NDUzMjM0NTIxMjQzNjo3OjY1
-MzM0MzI0NzUyMjM2MjE0NzI2NjU0Nzc2MzY0NzYzNTAyMzQ2NTYxMzU1MjM0NjY1
-NDU2OTc3NTQ1Nzc0NDQ1NTc4Njo2NDI0NTg1NDQyMDEyNDk6NjUzMzMzMjQ2NDc3
-NjQ0Nzc3NTQzNjMzMjM0NzU1NjQyNDIyNTc4NzQ0NTQ1Nzc6NjU1MzU3NDUzMzQ2
-NTQ0NDQzNTg2NDU1NjQ3NTU1MzM1NzM2ODc1NTY1NDQ2NTg2ODo4NTY2NTg3NjY4
-NzQyNTg5NjY0MzU3MzMzNDQ0NjQxNDQ0NTUzNjg0MzY0MTM0NTc3NjM1MzIzNTMy
-NTI0MjQ0NjI0NTc1Njc2NzU0MzQxNzQ2NTc0MTQ0MzQ4ODU6NDM1Mz85NjY2MjQz
-NTs0MjY1OTs2NTU0NjU2NDIzMjExNTYzNDc3NTY0MTQ3ODMzNDU0NjU1NjUzNjY4
-ODY1NDQ4MzI2NTQ3NzQ2Njc2NDc5NTU2ODU5Nzg2NjY2MzQ2NjY3NTc5Ozk3Nzc4
-NDY3Nzc5ODk5PDs3NTY1NjY3Nzg3Njk5Ojg0NDk8Pjo2Njo4OTs5OkJBPkBAQD1B
-Qjs+Pz88OkA8ODk9Ojo4OD07Ojg6OTg3OT08Njk3NTc3Nzg5ODg6Oj06ODY2Nzc1
-NDc5OTc5OTg3ODk7PDs5PDw4NzU2NTQ2Njg1Njc3Nzk4ODo6OTo4OzY4ODg3NDk3
-MzQ1NzU1Njc2ODc1ODo4NzU3Nzo5OTU0Njo2NjUzNjY2NzY2Nzg5Nzc4Nzc1OTY3
-OzY3NTQ0NjU3NDY3MzU4NzU1Nzg3ODc3Nzc0ODg2Njg4ODg5NDc3ODQzNjg5NDU7
-OD06OTg4Nzc2OTY2NzU5OTY1MzU2NTc3ODY2ODg2Njo3Nzo5ODo3ODg2ODg4ODs5
-Nzg4OTg4Nzc8PDs5Ozk7PD09PTo5PDg7Ojg4Nzk5Ojw8PDw+PD07Oj09Ojk9Pzk5
-Ojo8Oz09Pj48O0A9PTs+OzpBSUNAQURJkrzK09jd4OHj5ObnQEQ9QkA9PDs+Pzs/
-PDs+Ozc1Ozc1ODo4Nzg4OTk9Ozk5NzY1Njk2NTM0OjU4Nzc3Ojc5NzYzNTc3NTY8
-ODc3NDAyNTU1NjI1Nzk4NDU4NzYyMjM3MzY1PGE6NTQ0MzU0Njc3NTU1OzM0OTM0
-NzY2MzM0NzU3MzQyMjI0NDIyMTIxMzE0MzIxNTU2NDQ1MjQ2NTU3NDQyMzQ2Njc4
-Nzg4NjQzNjU2NDI1ODg2NDU3NTM0NDU3NDM2NDU1MzM5Nzc3NTo2NTU4NzU2NjM2
-NTI0MzQ1MzI0MzMzNDY1NTQzODMyMzIzNzk3NDIyMzQzNjQ0MTY0OjczNDMyNTYx
-NTg3ODc2ODg4NDIzNDQzNjQ2MzMwNDk3NzU0NzY2NzYzNDI2Nzc1MTM2OTk2NDgz
-NDUxNy4yMjIxMzY0NDM3Nzk4NDY4NTE0NjMzMjU0MjU0PTc3ODk3NDUzMzg2NDQ1
-NTY3Njc0NTg4NTM0PDY1MjI4NDYzLzI1MjQzNTM0NjM0OjpAOTg6ODU0NDQ1NzQ2
-NTI1ODI0NDIyNzYyMjMzNTY2NjMxMjM0MjQ1ODc3NTQ1NDExLS8wMDExMzg1MTM0
-NDg1NDc3ODc2OTcyMjY1NDMzMzU0MzI2Njs2NTU2NDQ3NDc4NzY1NjY1ODU2NTU1
-NTc3NzY2MjI1Njc5NzU1MjQ0NDU2NTc4OTU0MzQ2NTMzNTQ2NTUzNTUyMjUzNTc1
-Nzc4Njg4NjY2Ojk7NjQ0Njk4NjU0MzQ0ODc2Mzc2NTU1NTc0MTU1NDQ2NDQ3OTc4
-Nzc4NjU3NDI1NTk7Ozg2OTU3NjU1NDQ1NjczNTY3Nzg4NzM3MTM1NjY0NjQ1Ly4y
-NDg3NzQxNDk3NTAxNTIyNDQ1MzI0MjExMzIvMjYyMjU0NDs4NzQ0NDQyMjM6NjMz
-NTMzNjk3NjYyMzIzMzU2NzgzMzY3NjQ2NDM2OTg4OzY1NDQ1NDYzNjU1MzQ2NjU3
-NzMvNDQ2OTk2NDYzMzMzNDM4MzU0ODc3NzY0NT41NTc2ODUzNzg6Oj06OTY4NzUz
-NTY3Ozo3Ozw1ODc1NTU3NzY4NzU3Nzs5Ojc2OTc2NTo6ODg3Ozc9NjY2ODg2NTg0
-NzU0Mzg6NjY2NTY5OTs6Oz09QUREQjo9OT0+PTs8OTo7OTs4NTY4OTo7ODc3Nzg5
-NjY3NjU2NTk9Nzg4Njc3ODU5ODo4NTc6ODg2Ozs6OTc3ODo6Nzg5OTs4NTY1ODk3
-ODU3OTo3NTY4ODc3OTg4OTc4ODc3ODk4NzY3Ojg1Nzo6Ojw3NDQ4OTw3NTQ3NjQ4
-Ojg4OTk5Nzc2NTU0MjM2Nzc2NjUzNTY2NDY1NzU3NjU2NzY5NjY1Njc3Njg4OTs5
-NzYzNDU2NTY1OTY0MTI1ODk2ODo5NDM0NTg4Nzc5Nzg0NTc2NTk2NzU2ODo4Ojk4
-OjY3Njg4Ozg4ODg3OTs8OTY3Ozg1NjY5Njc2Nzs3Ojs8Ojs+OzY2ODk5ODc6PDk6
-Ojg4OTs6OD49PDw+Ozk5ODk3ODg7Ozk8Pj4+ODxAQDw9PT0+P0A7PUFBRkVEQEmU
-vsrS2d7h4+Tm5+hFQjs9PT1BRUI7PD47Ozw6ODc1OjU3OTw3Nzs9OTY3Njg3OTs7
-ODU0Nzc2OTc3Njc6Ozo4NDM0NTY3NTQ1NjU4OzU0MTUxMzY1NjY4NjQ4ODU1NjMz
-NTY+PzYxMjM0NDU4OTQxMjUzNTI0NTIzODYzMTEzMzU1NDUzMjQ1NDMyNjIxMC8v
-MjAyMzQzMjIzMzc1NDMzNDo5ODMxMzY1NzY3NzY2NTQ0MzExMjU1NDMzMzY3NDMx
-NDIyNDc0NDIzNTQ3Ojs5Nzg1MjIxNDMzMzEyMTI1NTMzMzMyNTQzMzE1MjMyNDQz
-MzI1NjI0NDg1NDI0MzI0NTU2NzM2NTQzNDk1NjU1Njk2NjUxMzI1NTY2NjUzNDo3
-NzQ3NTU3MjYzNzQ0NDQ4ODYzNjU3NDQ1NDw0PDc1Mzg1NDY0NzU1Njc2MzIzNDMy
-NTM0MTQyMjZNNzY2Njo0MzU2NDc5ODk2OTg4NzY0NTRAPzxCNDM1NTc0NTc2NDc2
-NDQ2NDU0MzI1ODw3Nzc4NTQyNDIzMjM2MTU1MzM1NjU0MjMwMTI3NDUzMjQ0NTI1
-NDM0NjY3NDc3NDQ1MzIzMzQzNzk4ODk5Nzg1MzY4NjU3NjU0NzY0MzMzMzUyMTU0
-NjYyMjMzMTIyOTU3NTg4Nzg0MjM0NzQ3NTY2NjY4NTQ2MjU4NjQyMTU1NzY1NTY1
-NDM2NjY2NzY1MjUxNTEzNjczNjY5NTU0Njc2NTQ2MzU1MzMzMjQ1NDY2NDUzMjUy
-NDQ0NjQ0NDY3Nzg2NTQ0NDQ1NTI0Nzc4NTc1MzM1NDY4NTQzNjQzMjQ0NDU2NTY4
-NzU0MzU2Njc4MzQ2MjM2NjI0NTIwMTc1NjU3ODU1NzcxNDMzNDM1MjUyMTM0NDIy
-Mzc2NjY3NDE0NDYzMTE1MzY6NzY2MTQ1Nzc5ODk2MzQ1MzE0NDU3NjU1NjU1NDMy
-MzQ1NTM7NzY1NjU0MzY2Njc1MzAyNjM0Njk3Njc1NjY4Nzg2MjU1NTM0NTEzNjg3
-NTYzNzk0MjUzNTU2Njk5Nzc3NTY1NTQzMzQ3OTg1NTY4NzU4Nzg5OTY3Nzc5Nzg3
-Njc3Ozo3OTk3Ozo6NjU5OjY1Nzk2Nzk4Ozg3NjY3Nzk3Nzs8PDw6Ojw/QUNDQjo5
-Ojk5Ozk3NzY1OTg3NjM2Ojc2Nzk3Ojk5Nzg4OTg3ODY1NDc2Nzk3ODc5Nzk3NzY4
-Njc3OUQ5Ojc4Ojo8PTc4NTg4NDc7OTY5OTc6OjY5Nzg3ODo4ODk3Nzk5ODc4Nzc4
-Nzg7ODc3OTc4Nzg1NjY2OTU2NjQ1MzU3ODY4ODQ0NDU1ODg2ODg5Njg3Ojo2Nzcz
-Nzk1Njc1MzU2ODY4NTU2ODg3Njg3ODc2Njk2Nzg2Njc2NjQ5PDc1Nzk3Nzc2NzY2
-ODQ1Njc0Njc4NTQ3NTU2NjUyNjg4Nzs4ODc2Ojo6OjY2Ozk3OTo3ODU4NzU3OTY1
-Nzk6Ozo8OTg3PDg6NTY2Nzg4NzY4Nzc8OTs6ODg5Ozw8PDs5OTo4NTc5OzpBOzk5
-O0E9Oz4+QD5BOTQ4Ozg6PEFCQkJETpq+ytPZ3uDk5ufo6UQ8PT5DQEBFQjk5ODc7
-ODg4ODc4Ozo7Ojs4ODg3OTk4NTY2Njc7MzU5ODc2NzUzNDk7ODc1NzQ2MzU5ODU1
-MzIzNjQyNDMzNTMzNDIyMzE0NTUzNTU2NTQ4NTQxMTIzMjQ1NTU2NjQ0Mjg1MjU0
-MTM0NDI2NTY4NjUyNDQ1NzQzNDQ1MzQyNDQyNTYzNDU2NDIzMTIyMjY2NDI1MzM1
-NzY1MzUyNTIyNjY1NDE0MjQ2NTM0MzU0NTY0NTY0NjQ0NDY3NzIzMTMyNDIyNTc6
-ODQ1MjEyNDMxMjEzMzU1NDQyMzExNDQyMTMzMTMzNDIyNTYzNDc2MzM1NjExMjQ1
-NzI0MzQ1NzY1OTg0NTEyMjQ3NzQ2NjY2NjQyNDU2NDU2NTQ2NDU1NzM0NjY4NzU1
-ODc2Nzc1NTg1NDg3Njg3ODU0MzUzNDc4NjYzMzY0Mzg3Nzc5Nzg0NjtYTjM2NzU1
-NTM3OTUzMzJANTY0NDUzNDY7ODY3NTQzNjUzMjQzNDM0ODQ1Njc1MzEyMTExNDI3
-NDQ0NzEyMDMxMzQ0Njc2MzM0MzQzNjQ0NTM0NDY0NjYyMTI1NjU1NTcyMDIzMzg3
-NTQxNDY3PDU1NjY4NjEyMzMzMjMzNTY0MjIyNDQ0MTQ0NTU3ODU0NTk1ODQ1NTc1
-NDQ0NTY1MzM1NjY2NDM3MzYyMzM2NTQzNTQ0NDY2NTU0NDU1NTc2ODc4NjU2NjQz
-NTU2OTg3NTUzNDExMjQ2ODY1NjY2NDY1NTg5Nzc2NjMzODczMjEwMzAzNDU3NTQ3
-MTIzMjI1NzUzMzU1MzIzNDQ1NjQyMzMyMjY2Nzg4NzY4Nzc1NTU0MTIxNDQzMjEw
-MzUzMjQ1MzIwMzExNDU1MjQ8ODg2NjIzNzg1NDY3NTg2Nzc0NDM1NzY2NjQ1NjU4
-OTU2NTQzMjU0ODY0NTczMzY5NTY1MzQ0MzA3NjY2Njc0NTQ0MjUyMDI5NDQyLzI1
-NTg3NDMyNjc2NDQ1NTQzMjI2MzI1MzQ2NjU1NTc1NDUyNzo5NTg5NjY0NTU0NTY3
-PDk4NzY1NDY1NDc3ODI2Nzk4OTk4OTs5OTY6OTg4ODs4ODc4NTg6PTg3ODk1Njg4
-NzQ5Nzk3NzpDPDw7PT08PTw8QEREQDpBQDk6PDs6OTY2OTw3NTQ1NzY2NTc4NzY4
-NzY4OTo5ODg4OTQ1ODc1Njc1NjY3NjY0Njo6Sjk3ODo4ODc4OTs6Nzc2OTo9Pjs/
-OjY1ODg4NDc3OTo3Ojc3ODg5NzU2Ojo6NjY5Nzc0Nzc2Njg3Nzg4OTc3NDI3OTg4
-Nzg6OTU2Njc2ODU7Qjk4Nzg5OTc2ODY2MzQ1NjQ1NDQ0NTQ2NTQzNDU2NDY1MzM1
-NjY3ODk5OTY0NDs6NzY4ODY2Njk3NzY4NzMzNTg6ODc3NTY1ODg1MjI0NzU2NDY2
-OTk4Oj04NzdANzc5NzY5NjU5Ojc4Ojs4Ozk5OTg1NTU3PDk6ODc6OTo5ODg+PTtB
-Ozk6Ojo5OTs5Ozs9Ojo2OTs6Ojs9OjU4PDw+P0A+Ojo5PDw+Pj46QUNFQz1QocHM
-09ne4uXn5+nqPDo6OUE/O0A9Nzc2MzU6ODc6ODo6Ozs8QDs4Nzc9NzU5NjU5PDc4
-NDY6OTg3NzU2Nzg3NTY0Njc3OTk5NTU1MjUzMjU1NTE0NDM1MjI1MzczNTEzNDY0
-NjM4ODQwMjY1NDM2NzczNDI2NDUzNTY2NjUzMzM2MzAzNjU0NjY3MzIyMDI0OTg1
-NjY2MzQ2ODM0My8wMzU3Nzc1NTQ1NzU0Mzc0MTM0MTMzNTExMjQyMzc2Njg4ODc1
-NjY0NDUzMjI2NTI1NjQ1MzM0NDM1NTo4NzM1OTY1MTEwMTMzMzU2NTMyMzMwMjIz
-MzI1MzY0Njc0NkE5ODY2NTg1NTM0NjY2NTU1NDMwNTYyNDQ1NjY0NTMyNjM4NjY0
-NzgzMDI1NTI0NTY1NTUzMzMzNjM4NTY2NTQ3NDQ2NjY0NDY3ODc2NTg3NjY4Njs3
-NjM0NTg0NTY0NzQyNjtARHNGNzMzNTUyMzE2NTcyMjQzNDI0NDU1ODQ4NzM3NTM2
-OjQzMzc4NTU2NzQyNzc3MzMyMjI4MzYzMjQ2MjU0NDc1Njg1Njc1Njg2NjU1MzQ1
-MTI1MTIxMjI1NDo3MjQ0MzM3ODQ2NTQ4Nzc0MzI1MzU1NTU0NzM0NTM4LzEyMzM0
-MzM0NDM0NTQ1NTQ1NzU2NTY2NTYzNTUxNDU1NjU1NDM1NTEyODw1MzIyNTQ1NTU4
-NjUzMzQ2NTk2NjQ2MzM7NzU0MjU2NTQ0NzY4NzU2MDI0NjU4NDU0NTg2NTY1NTQz
-MzI0ODczMzM0NDY1NTU0NzY1NTU0NDU1NjI2MzYzNzo3NDM0NTY0NDMyMzA0NDMz
-NTg4NDc6NzQ1MzUzMTE0NzQ2NTQ0MjI0MjIxMDEzMzEyMjM1NjQ0Nzo1Mzc0NDg0
-MzYzNDQzNTc4ODc3ODQ0NDYxNjo0NDMzNjMxMzMzNDU1OTUyMzQ1NDU2OTY0NTY0
-MjQ2MzMzNzU2NzY2NDk1MDc0NTYzMjExMzQ0NTU2NTQ0Mzo2NTc3MzQyNTE0NTcz
-MjIyMzQ1NDg2NDY3ODc1NTQ3NDI3Nzc3NjY2ODc3MjY6NzU2Njg3NTc4NTY1NTc1
-Njc5OTw5NjU0ODo2Nzg8Ojk2ODg4NTQ4NzY5NTg4Pzw3OkE8PT08OjxAPz9DQUJB
-QTs+QDo5Ojg2ODc2OTs5Nzc3OTg6Nzg6ODg5OTk5Nzc2NTY3NTU4Ojg1NzQ3NTQx
-NTk5ODg3NDY7QkM8Ojk4Njc5Ozs3OTc3NzU1MjU4Njg4OTo6Ojg5OTk6ODc9Ozo4
-Njk4Nzg4NjY2Nzc4Ojw5NjU2Njc4Ojo3NTk6Ojg7Ojk4OUlSODc4NzQ0NTU2NzU0
-NDU1NDU2NjU1NTY1NDU2NTU1NDQzMjQ4OTk6ODg5ODc4ODc4OjU2NjM4Nzk4NjY3
-ODk4ODg3NDU4Nzk5NTM2Ojg4ODY1Ojg5ODY2Nzo4Ozg4ODY3ODs9PDw7Ojg7Ozs7
-ODc2NTc3OTk8ODY6ODg4OT06PUU9OTo8Oz87Ojo5NzY5Ojo5Njc5Oz08PT0/Q0E9
-Pj5CPz0/P0FEQj0+Pj9EQkJAP1OgwMzU2t/i5Ojn6ek9Oz4+P0I/Pjc4Nzc3Njc3
-NDs8ODg9OTk4Nzg3NDo3NzY1NDU0NzY2OjY4ODUzNDk1NTc3ODc3ODY3NjU2ODQ3
-NDkzNTI1MjU4NzQ3NjU1NjY3MjMyODc1NDM0MTIyMjU1NjQ1MzM1NzU0MzY0NjMx
-MTMzMzMzNTc3NTUyMTI1MjIyNTc3NDQ6NzU1ODQzNTIxNDAwMDEyNDQ1MjEyMzM0
-NzUxMDM0NzQ2NDc3NTY0NDQ1NDc6NDMzNjI1MzMwMTExNTY3NjIwMzQ2MjEzOTs2
-Nzc0OTc2NDQyMjI0MzExMzIzMjAyNTMzNDQ0MzM3ODQ0NjI1NzMyNTU1NTg4OTc3
-NTQzNjY1NTY1NDY1MzU1MzI2Njc1NTg1NTQyMzMxMjMyMzQzMzMxNDQyMzY4OTk1
-NjQyMzUzNjc3NTc4NTQ1OTo3ODg4OTc0LzI1ODc1ODo2NjU3R25vVDMwMjI0NTUw
-MTQ1MzM0NzY0NDQzNDIzNTg1NTk2ODc3NDU2NzUzNDg1NDQ6ODc1MjI1Nzg4MzU0
-MjE0NDU0NjMyNTUyMzQ2MzM3NDU0MjIzNTU7NjIyMjk3NjMyNDU4NTQ0ODc0NTY2
-OTk0MjMxNjU1NTQyMDIzMjIyMzIvLzAyNDMyMjAuMjQ1NDU0NTQ2NzY0NDY0NDM2
-NTU1NDMzMzYxLzEyODc1NzYzMjY0NDQzMjMzMzI1NjMwNTQ0NDc6OTQ0NzI0Nzg2
-Nzw2MjQ5NDY0NTM6Nzc4NzY1Nzc6Ojg2ODc3OTQ3ODUyNTQ4ODY1MzM3NzQ2NjY2
-NDM2NTQ0Nzk6NjYyMzQ0NTc3NDMyMzIzNDU2NTQ1NDQ2NDIzMzQ1NDY5ODc0NTM0
-MjIzNTQ0MTU0NDU0NjU1NTk0OjU7MzMzNjIyNDY2NzY0NTU2ODQ0NDM1NjQzNjQ0
-NDI1NjMyOTY6OTc2NDMzMjQ5OTQzNTM1MjEyNTU0ODk5NzM0NjY3MzU0MzQ0Nzgz
-NDQ3MjEyMjE0MzM0NjY1NDU0OTYzNjc3NjQ1NTI0NDcyNDY2NjMxNTI3NjY2Njc3
-Nzg4Njk5OTg1NzMzODc3OTc1NjYzNTo3Njk3Nzg6ODg5QDs8OTc5Oz06ODY7OTo0
-MjU3ODpBPjo2OD5CPTs7PkA8PUFBOjg6PD08PTw3NjY4OTo5NzY3PDY4ODg1NjY1
-MzY/ODQyNjQ3Njg5Ozo1ODo2Ozg1OTk3OTg3Ojk5NTVFQDs6OTc4ODc4NzU1Nzc4
-NTY3NDQ2Njg8OjY4OTk4OjY2ODg4ODo5Nzc4OTY2NTc2Nzc6OTg2Nzg2ODc3Njk4
-NTg5Nzc1ODo5Oz44NzY4Njc2Njc3ODg3NTQ4OTk3NjM1NDQ0NTc4NTMxNTU0MzY5
-NjY4Njc3OTg2ODg4NTk3NjU3NzU1NDM2Ozc2Nzc0NDc6PDo4OTg5OTY0NDM0Nzc1
-NDQ1Nzc6Nzc5OTtBOD46NzY5ODg5Ojk5OzQ0NDk8PDo6OTo4OTs+PDw5OTg7Nzs9
-PTw+PDw5OTk4ODo5Njg4Oj07ODw9PD48Oz1BS1BLQT09Pj9AQkBDQkBDWaK/zNba
-3+Ll5+jo6kI6QDw7QEFAPT82OTs7Ojo4OTg3Nz5AOTY2OTQ2NjY5OzY1NjQ2MzQ1
-MzY0NTY1NDQ1NTU0NjUzNTU1NTc2NjU3NTU0NTc6NzQ1NTc7OzMzMzI0NDU2MjIx
-MTQyMjAwMjM2NjU0NDQ3NDQzNDE0Nzk1MDMyNTQzMjAyMzQ0MzM0NTQ2NDQ2Njc0
-MzM3NjU0MTM3NjI0MTExMTAvLzEyNDI0ODg3NTk0MzU3ODU0NjY2NTYxNDQ4NjQ0
-NjIvLzIzNTQ0NTUzNTg1NDEyNTg0Nzc5NjUxMzU1NDMzNTUzMzUxNDQ1NDI0MjM0
-Njc0MjM1NTk2MTIyMzQ0NjQ2NjM7PTg0NDI0MjM1NjU0NDY5NjQxNDM1NzY2NjY0
-MjM0MjMzMzMxNTMzMTMyMzQyNjg3NzYzMzY4OTo3NzMzNzY1ODY1Njg3MzU1NDM2
-NzM1NjQ3NDg1NTMySWZALjAvNTUzMTE0MzY0MzMzNjU1MzIzNTY2MzI0NjQ3NTMy
-MzU1NTI1NTUzMjQ1NjU1QDk0NTMzNDUzNTkzNTU1NDI0NzQ1MjQ0NjQzMzY1MzQz
-NjU2NzY5OTU0NzU0MDE0NDU3NTMzNzI0MTQ1NDMxNDc1NDMyMjI3NDQzMzQzNzMz
-MjAyMzIyMTM4NzQzMTU2NTc1NjUxNDY3NzU0NTI0NjI0NDU1Nzc1NTM0NDQ1NjY3
-MjEwMjU0NDczMjM2NTg1Nzg1NTQzOTQ2NjU4NjQxNjQ1MzQ1MzU0NDM0NjY3Nzc2
-NTMzNTI0Njs3NjY2NTYzMzM0NTU2ODg4OzU3NDU1ODg2Njc1MzI0MzQ2NTUvMTAx
-MjI0NTg2ODU2NDU1NTQ1NTU1MjU0NDU4MS8xMzY2NzI1Njc3NzY3Njc2Nzg2NDMx
-NTkzNDY2MzIyODY4NDY1ODc2ODM0ODQ1ODc1Pzc3NjY7Ozc2NDQyNDozNDQ1NDg3
-NzU4Nzg2ODY3ODU0Njc2NDM2NTUzMzUzNzQ1NjQ0NTU0NjQ1MzEyNjY2NTU2NzQy
-NTg3NjU0NDE4NzQ1NTU0OTk4Nzc4ODk1NjU2OTo6Ozg3ODQ2Njk6Oj4+OTg3NDk4
-Njg5Njg6Ozg8QDo3OTg6ODg3Nzc9ODs6Nzk6PDk3QEJFQD8/P0I/PTg8Oj07Oz09
-Ozo5Njc4ODg5Ozc4Nzc3NzY0NDU2NzY4OTMzNDI0ODc2OTk7NjY5NTU4OTc3NTc4
-Nzo6OTc5Ojs6OTk3OTs6NzY6OTU3Njk6ODs6Ojo6ODY1Njg2Nzw5NjY3Njc3Nzg3
-ODY1NzY2NzY4NjY2NzY0NTw5OTc4Njc2Njk2NDU1NTY4ODU3NzY3OTc4NjU2NjY1
-NTU4NjY0MTM3Nzk4ODY3OTc3ODg4Njg5NTM1NjY2NTU1OTo4Nzc4NzU1NTQ1ODY5
-Nzc1Nzk3Nzc6Oz09PDk3Nzg4ODk7ODk4OTU2NTY6Ozs8OTk8Ojo3NzY5Pjw8PT07
-Ojo2ODk7Pz5APDs5Ozo9Ozo4ODk7PDs3Ozo6PDg4PDw6Nzg6Oj08OTs7Ozw8OT48
-Ojg7PDo7PT08O0E/QEBCQ01dor7L09rg4+Tm6OnqQDs4OjxBP0FCPzs3Ozk9PTw5
-ODs3OTo6Nzc3NjU4NTc5OjY3NjQzNDMyMTM0NDU1NTQ1NTc1NTc7Nzg2PDg3PDU2
-NDI2NjU2NzY5Njc6ODUzNDUxMTMyMTIyMjU0NTM0MzU0NTY4NjM1NDozNTQ0MzM3
-NDQxMjY0MTAxNTg6Njg6OTIzNDc3Njk4NjQ0NDY2MjU3OjIyMTI0NzEyMzQyMjIx
-MjQyMTI3MzI2MzMxMzM2NTczMzM2NDUwLTExMzMzNDM0NjU1NTQyMzEyNzI0MzM2
-NzI1MjMzMzQyNTg2ODY0MTQ1NTMzMjE2ODUzNDQwNDM1NDM3NDIyNTY1MzM1NDE0
-NzQ1MjU1OTUzNDg2NTY0Njc2NTczNjUyNDY2MzU2NDU1NTM2MjAyNjQ0NTg3NjMx
-MjIzNjc2NjUzOTc2Njg4NTU4NzY1NzY1NjU2NzY4NDg2NTIzWkYwMTQxNDI1NDM2
-NjY2MzI2MjQ1NTUzMjIyMjU1ODg4MjI0MjQzNTQ1NjU3NTQ0NzQ0NjM0NjY1NTM5
-PTcyLzQzNDUzNjg0NDQxMS8yNDY0MzE1MzU5MzAyNTQ2NTUzMzMyNTY1Nzc1NDM1
-MzM1Mzg3NTQzMzU1NDc1MjEyNDMzODYzMjI0NDIyNDMyNDk3NTQ1NTY0NDMyOTQy
-MzI1NTUzMzYyMzU0NzY2NzQyNDI3NzI1MjE0MjQ0MzU1OjQzNDc6Njg0NDI1NTMz
-NDM0MjM3OTc1NTU1NTE1NTM0NTY2NDc2ODQ0NDk2NjU1MzU1NTY1MzExMDI4NTk4
-ODc3MjIyNDU2Nzc3Nzc4ODozMDIyMzAvMDIzMjE1Nzc2Nzc3ODs5NTQ0NDQyMjU2
-Nzc1NDQ3NjQ2NDU4NTQ4OTg2NTg5MzM1NDIzNDI1NDEvMDEzMzM1QDQyMzc1MzQ1
-NDs8ODg1NTY4MzU4NzU4NzY2NjQ0Njc3OTg2NTQyNTQ2NzU1Nzc2MzU4NjIyMTM1
-Nzc3Nzg1Nzk4NzM0MjU2NDc1MzQ2NTQyMjIzNTY2ODQyNjY3ODQ2OTc2Njg5OTs/
-PDs2Nzo5QDc3NzUzNTQ3ODs2Nzk2OTg5Ojg4Njk5Nzo4Ojc6Ojs6OTk4OT08OTw6
-NTo7Oz5AOj9APTw9QkY/Nzw8Nzg7OTs8Ozo2Njo3Njc5Nzo7ODk6Nzk2ODk2ODU1
-NzM2Ozc2ODkzNjU0NjY5NzYyNTY4OTo3Nzc1ODo5OTg5Nzo5NTU2NTU2NzY1Ojo8
-Ozk5Oj44ODk4ODo3ODg5OTY3Nzw2OTc3ODg4NzY4OjY1NzU2Njg4Njg7NzY2NjQ2
-NDI1Njg4NzY1MjU2ODc3NTQ3NjY3NjQ1NTU3NDM4NTY3ODk3Nzk5Ozs6Ojk2ODs5
-OTc1Mzc2NDY1Njc6Ojs6OjQyMzMyNTU2NzU3ODg2ODk6OTg2OTc6PTo6Ozo5Ojg3
-ODc1Njc3ODc5Pjw4OTk7OTs7OTg4OTw6OTU1NTk8PTo7PTo6Ojs/OTg6Ozo5Ojo7
-Ojo5Ozo7Ojg4OTo7PDk7Oz08ODg9Ojw9PT46Oz09PUI/QUJBQEFDSVOfvsvU297i
-5Obo6eo7Ojs6Ojk+PD09PD1CPTk5Oj06PTk6OTY4Njc4NzU4Nzk7Ozg2ODY4NTM1
-MjI0NDQ0NTQ0ODU1NzQzNTU1NTg4NTQ2NDU1Njc3NDQ4Njc0MjM1NTEwMzQ0MzE0
-MDQ3Nzc3MzU0MjQzMzAzNjg2NDU2NTU2MzM2NDU0NTIzMTEvNTQ5NzQzNDQzNDg1
-MjE1NDMyNDU2NTM1NDQ0NjIzNDM0MzIyMTI1NDUzMjIyNjY0NDQ3OjY2NjU1MzM5
-NzU1NTQ1NTY0NjQ3NTU2MC8xNTQyMTE1NjU1NTQ3NjQ0NjY1NTQxMzI0NjU3NTIy
-NTU0MjItMzMyMTo0MTQzMTE2NDIzNDU3NzY1MzU0OTY2NTY4NzczNTQ0NjU1NDY3
-NTU3ODY2NjM1NjMyMzI2ODQ1MzY0NTU2Nzg6Ozs6NjY0NDMzNDQ1Nzo9ODc5Ojc3
-NDg3NTM3ODY0NjxNMzMxNTU0ODc5NzU1MzYyNDQ1NjY5NzQwLzAzODY3OTUyMjZC
-NDIzOjc1MjM0MzkzNDQ1MzM2OTMzNDY3NTU0MzIyMjM0MzQ3MjMzNzQ2MTQ1NDI1
-NDAxNDU5ODU2NjU1NDQyNDU4NjM0NDQ0NTQ2NjM1MzU0NDQ1MjI0Njc0Njc0MzQ0
-Mjk3NDYyMTQ0MzQvMTI0NzY2NjY0NjQzNTU3NTY3NjQ1NjU3NjY2NjUyNjc4NTM0
-MzE0NTc7ODk3NzY4Nzc5ODc0NjIzNjY4MjQ1NTQ4Njk0MzY0MjQ1NjMyOTU0NTM0
-NTY1NTU0Mzg3Njo5OTY1MzMyNDU1NzM3NTMwMTI0MzIyMjEyMjg2PDYyMzUyMS8x
-MjQzNjc0MzY4NTQ0NjU4NzQ1NDM1NTU0NjUxMzM1NTU0MzQ3NjU1Nzg4NjQ1NDQy
-Njc2MjMzODgzNDMyMTQyMTM1NjU0ODg2Njc4NTU3MzQ0MjQ2NTY1NDM0NDU2NzQ5
-NjMyNjYxMzQ2NTY1NDI0Njc3ODc3NzY0NDU3NDI3Ojs4ODQ0MjE3NTU2NDQ1NTc0
-MjM0MjUzMi8xODU1NDM1NDM0Njk6Ozc5OTs3OTs/OTg3ODc2NzQ2OjY2Ojg5NjM1
-OjY4OTc4ODk3OTw6Njc1ODo4OT03NTk5Ojc6OT1CPTxBPUZiY0A/Pzw8Ojc6OTs7
-OTo7ODY0NTg2NjU3OTc7OTc4NjY1NjUzOTc2Ojk6NjUyNDg5NTI2Nzc2NzY4NjU1
-Nzk3NzU2OTk4ODc1Nzg2MDU2Nzg5ODk4OTs6OTo2Ozs2Nzc6Ozo7Nzc2Nzk2Njg5
-OTY4NTU1MzMzNzY3NDEyNTY4ODg3Nzc3NDM0MzY4OTg2NDQ2NTQ1NzU1NDU1NjU2
-NzU1NTk4NzU4ODk3Nzg2Nzg2ODU3NjY3NzU3ODY0ODU2ODc3Nzg5NTM0MjM1NjY2
-ODg2ODc3Nzc4NzY1NzY3NjU3ODg5Nzg5OTw3Nzg7Pjs5OTg6PTo4ODc6OTc3OT08
-OTc4Ojs6Oz07PTg1Njs8Ozs4Njk5OTs6Njg7Ojs6OTk3ODo7Ojs7Ojs3OTs7Pj8+
-Pjs/PT0+PEBBQ0BCR0hEU5/AzdTb3+Ll5ufp6jc9Ojw9OUBDPD05OUA8Njk5Ozo6
-Ojc1Njc3NTQ3Ojs5MjY2Nzc3ODgyMzc1NTc6Ozk4Njg1NTM0MzQ2NTM2OTc0MjU2
-NjQ2OTU2OTY1NjM2MjIwMTEyNDM0NTY0NDY4NjY2MTQ4MzA0NjQ1NTQ2NzU2NzU1
-NDY1Njc3NjUzMzk0MjQ2NTU2NTM2NjQ1NzU1NTUzMzE3NTE0MjM1MDAxMzM1MjE4
-QDo2NTUxMzU1Nzc1NDYzNTU0MzQ3NTM3NzU0MzQ0MTIxNTU0MzM1NDMzNDI0NDMz
-MDQ0NDQ1NDQ2ODgzMjIzNTI0Njg1NTIzNzM2MzQzNDIwMi8yMjExMjQyNDYzNzc5
-NzMzMzUxMzQyNzc2NTY3NzU3NjMzNjQ2NjU2NDUzNDc2NzU1Njg1ODM1NDU1Ojg3
-NTc5Nzk5NzQyNjI2NTM2NzU2NjU1MzY1MzQzNzY0NDY2NjIxMzY1MzM2Nzg0NjY0
-MjM0NzczNjU1NDc1Nzo3NDY4MTU1NTczNjY1NjU2NzU2MjMwMjU0MjU0OTQzODUx
-NDI0MjY0NTY2ODQxMjM2NTczMjcuMTIvMDI0NDIyMS8zNTk7NzU2NjY0ODQ0NjY3
-NTIyNjY5ODU0NjUzMzY1NTUzNDUxNDI0Nzg4NDU0MzM2NjU1NTUzNDc4NzYzOTUy
-MzQ0ODY1NDQ1NTQxNTQ0NzszMzcxMTw3NzU1NzY0NTY3Njc2NjU1NzQyNTAyMzQz
-NDQ1NTY2NjU0NzY3Nzk3NTg0ODUzNzUyNDU2NzQ2Njk5ODU4ODg1Njc2NjY1NTIx
-MTMxNTQ0NTY2MjE1Njc6OTQ1NzUzMzM1MjE0MzUzMjQwMjQ2NTU0NDY2NTM0Njg1
-NDk5NTQzNjY2MTQ0ODIyNDgzNjUzNTU2ODY2NTY0ODIyNDIzNDU9PDo2NDY3NzMy
-MjI2NTMzNDc1MjI1MjQ5ODU4Nzc0Njc5ODc1MzU0NTMzNjQzMjQwMjM4NjU0NTYz
-NDY2NDQ0MzQ0Njg2ODc4NjYyNTc3NjQzMzM1NjM2NjY3ODQzODU1ODg4Njc3NTc6
-PDo4OUM+PDo8OTU1NTY3ODg3OTU1NTY5Njg2NDY1Nzo2OTczOjo4Njo7PT87Ojg4
-NzY3Oz49Oz08RmBJOD49PDo6Ojs7PDo3Njg3NjUzNjMyMzUzNDQ1Njc1Njc3Nzc5
-Ojo6ODY3ODg3OTg2ODk2ODg4OTg2NTU4Ojo2Njc5Ozs5OTo5NzU2ODo6OTk3Ozg4
-ODg3ODUzODg3ODg4Ojw5NTU1Njg3NTc4OTQ5OTg1NDU3NTI0NTg3NzY0NDY0ODY3
-NTM0NDM1Njc4Njg3NTEzNTY1NjY3NTY3Nzs5Njc1NzU1MzU5ODk1Njc2NzU0ODc2
-NzY4ODg4NjU1NzY2Njc6OTc1NTM1NDU2NTY2OTY3OTg3Nzk6OTk8Ojk1NDU6OT48
-Ojo5Ojc3OTs7Ojc3OTs3NzU4Ozo6Ojw4ODg2Nzk7Ozo6OTk9PDs4OTo4Njk/Ozo3
-OTk6Ojc3Ozk5Nz0+Pz48Ozo4OTs9Q0M7PD1BPT08PUBBR0BERERSncHJ0tnf4uTm
-5+npPDs4PEJBPD0/Ojs+Pj86OTc6OTo7Ozo4Njc5NTU1QTc4NTQ0NTY3NjQ0NzU5
-ODY4NjM2NTU0NDM1NTY2MzczNTU4MzM1NjMzMzU2NTc1MzM1NTY2NTIwNDMyMjIz
-NDc1NDM3NDQzNjk3NTQ4MzI1Njk2NDQ1NDY0NDM0MzM1NTY1MzQxMjg2My4xMzg5
-NjQzMzYzMzI2NDU3Njg4MzEyMDAzNUFOREM1NTk3NzY3NTU4Njg2NTM0NTI2Nzc2
-NTQ2Njc4NDQ1MzY1NDU3MjM1NzY1MzY5NTMzNjMzNzY2MzMyNzYzNTE1OTU0MjE1
-NTQzMzIyMDM1NDM0NDY1NDMyMzY0NTM2PDc1NDU1Mi8xMzM3OzU1NTU2NTgzMzI0
-NDQ0NDk3MzI2MzM2MjAzNTMzNTQ2ODQ1Nzk5NzY1NzM0NTQ2NDY3NzY2MjMyMzU0
-MDc3NTU0NTYyMzM1NjQzNjc5Qjk3NDY5NTMyMjM0MjIzMzQ4MzQ1NDQzMjQ2ODY3
-NTo9ODYyMTQyNDIzNDc1NDQzNTQxMjM1NjMzMzU1NDk1NTY2OjQ4Ozc1NjIzMzIz
-MjExNDA0NDM1NTg3OTQzNjY2NzY3OTYyMzIzNDg0NDc0MzY1NTYzNzY0MjQzMjQy
-NDQzNDQ1MTM2NjQ2NTQ3NDI0MjIzNDIzNDQzNTEyNDc1MjQzNTM2NDMwMTc2Njg5
-OTU0NTIzNTg2Nzc2NjU2NTY0MzI1MzQ1NDY2Ojc4NjUyNTg2OTc3Nzk1NTUyMzY0
-MzQ2OTUzNTY3NDU3Ojs6Nzc/NDg2NTMzNDExMjM0NTYzNDIzNTc3ODU3NTQyMzM2
-NjIzNDI0MjM0MDc1NzU0MzAxNDIyNjQ2OTQzNjM1MzQ2NjUxNTU0Njg1NjU3Nzcy
-MjM2NzU2MjM0MjUxMjM0NDUzMzY1NDMzNjQ2NDIxNDk2NjI1MjY1OTo2MzY1MzQz
-NTc2NTQ0NTM1MjQ4NTQ2NzQ1NTU2NDQ4NTUzNTUyMjQ0ODk1NjMxNDg2NTY2NTQ2
-MzM1MzY1NjU1MzY6NjE2NDU0NTc2NDY/PDs4NzY7Nzg5Njg4OTM2Njg5Nzg6NTc5
-ODk4NTU0ODY2OTo2ODQ2OD49Ozc4ODc3OTs8QEA7Ozo9QDo8Pjs/QD88PDo7PT45
-Ojg3ODg2NzUyNjIzNjk5Nzk4NjY2Ozg1OTc3PDs5Ozg0Nzk4ODc6ODU1NDY3OD07
-Ojw9Ojo6ODo6OTg6Ozs5OTs5NTg4ODg6Ozk2NzY3NzpCOzg4Ojk4NTU4NjY1NTU3
-Njc3Nzg3OTk4Nzg6PDk1NTQ1NTU0ODo5Njg1MzU6Njk4Nzc3ODc3NjY5NzU4ODg4
-Ozc4Ojs4ODc5Nzc6OTc2Nzk7ODU3OjY2OTg5NzY2NjY1OTg3NzU1Nzk2OTg1NTQ0
-NjQ3Nzg4OTk5ODs3OTs6NzU5ODc3ODk4Nzc2Nzg5OTk7PDw9Ojo3OTg5Ojg8Oz06
-OjY2OTk7OTk5Pjs4ODk6Ozo4Ozs/Ozo7Ozo7Ojs5Ozo8OTo9PD06Ojs+Oz08O0VD
-Pj0+ODs9PEJEPEFFQlWcwcrT2t7i5ufo6ek9QT1AREFBPUFAOjw6PkA+Ozk4Nzo4
-Ozs6OjY3NzY6PTo3NjY0NTY4NjM0Njk5OjY1NTU0NDQzMzU3NTY5NjYyNDU2ODg0
-NDY1MTEzNTQzMzc0NTQ1MDAxNTUzNjQ0MTIzNjU2NTM0Ozc1NzY2NzYzMzQ0NDU1
-MzAyNDM0MzMyMzc2MzIzMzQyMzUzNDMxMTIzMjEyMTQ1MTM0NzY1MjEyNDIxNDc1
-MjE0NzY1NTAyMTQzMzM2NTMzMjY4MzU3ODQ0NTMyMjUyMS8yLzI0NDM3NDQyMjQz
-NjMyMjM6OTUyNDY1NjU0MzQyOTU0MzEzMjA0NDU5NjI6NjU1ODc0Mzg2NTQ2NjI2
-NTMyNTU2NjI1OTY3OTg2NzU3NDI0NjMyNTc3NzU2NjQ1NjMyNjUzNjQ0NTg4OTc2
-Njo2NjU1MzI1NzUzNTU0ODY1Njc3NTU3OTU2NjU1NTg2Nzc0NzkyMz9eODQzMzY1
-MTM2NDIzMzY3NjU4NjU1MzMyMjk2NDQ2Nzg4NzY2NDY1NDc1NDYzMjQxNjUzNDM0
-Njc9NTQ0MzM0Ozc1NjU8NjQ3NDE1MTYzNDY1NjQ0NjUyNTM0NTY1Mjo1NDg2NTU1
-MjEzNDQ0MzQ3MzUyMjMxMjIxMDM2NDM0NDQ1NTMzMzU2ODcxNTQ1NDIwNTQ0MzQ0
-NTQyMTI2NzQ1NDM1Nzc1NTY2Mzc5Njo6NjY0MzQ0NTY2NTY2NDQ2MjI1NjQ1NjM2
-OTc3MjQ0MjQ0NzY2Njc1NDMyNDM2NzU2MzY1MzE1NTY4MzY4NjVCNjczMzQxMzYy
-MzIzNDIyMzM2NDEyOjc7NTIzMTc2MzE2NDIyNTc3NzU1NDY1NzUzNzw3Ozw2Nzg2
-NTc1NTM1NDY3NDUzMzYyMDI4OjU1MzM1NTk0MzM1NTc0NDEyNDMxMjw0NjQ2Nzc1
-OTg2NTQ2Nzc5MzQ2MzM2MzMzNDM0NzUzNTMwMzU1NjU2NTY0Njg3NjY2NTIzMTA0
-NTYyMDUzMzQ3OTc5Nzg1ODU1NDU1MjM0NDY4NTc3NTU1NjQ2NjQzNTg2ODY2ODc3
-NTY4OTg4OjY3ODY3Ojg7NTY6Ojo3Njg5Nzg3Nzc3NTI2ODs7NTU3OTY3NjY3NTY4
-Nzc1N0FAOkJBREE+PD48PDY4PkA6ODs6ODY1OTU2Nzg1NTY5Nzg5Ojk5Ojo4PDs5
-Nzk4OTk3ODs4NzU0ODY5NzY4ODg4Ojo5Ojo6ODY6OTo6PDo3Nzk6OTc5Nzo8Ojs6
-OjY4OTc7ODk4OTw6NjU2NDM2NTc2NTg3OTg0NjM1NTY5OTo8ODc3NzQ0NDQ0NTg1
-NDU0NTc3Njc5NzQ4NjU3OTk6ODg3NzY2ODc2ODk4OTY2NzU0NjY2ODc4OzY3ODU1
-NTY4ODQ2NTU2ODY3ODc3Nzg3NTM0NDY4OTc0Njg5OTY2NTo5ODg5ODg3OjY5Ojo3
-ODk3ODo5ODk8Ojs8PDs+Ojo6OTs6PD89Ozo3ODY4NTY3ODg4OTo4Nzs3Oz07QDw8
-Oj08Ojk7Pz08PD07Ojw8PT48PT49PDs7PTw8Oj88P0NEQkI+WJzBzNXa3uLl5ufo
-6j9BQDw9PTg6OT8+QUE8Ojs7PTs6PDk5Ojw2NDc6NTU2OTY0Mzc1Njo3Nzc1NDg4
-NzQ0MzY6NzQ2NDMyODk4NDQzMzU0MzY2MjE2OTc1MzQ1Nzg2OTY0NDQxMzQzNDU0
-MTQ0Nzk4NDIyMzQ4NDMxODY2MzMzMjAyNDQ4NzEyNDMzNDM1NTM2ODU3NjIwMjQ1
-NDEyMzQ3NTc3NDUyMjMzMDEzMzQyNDU0ODIyNjY1MjYzMTU1NDIzMjE3NTM0MzU2
-NzUyMTY0MjUwMC8wMTQ1NDQyMjQyMzMxNDk0NDU1NTYzNjc3NDM0MjI1Njk0NjYz
-ODY1MzI1Ojk5ODg5ODc3Njc1NDUzMzU1NzY0NTY1MzM3MzQ2NDQ1MzYzMjQyNDUz
-NDM0Njc4NTk0MzM3NjQxMTY3NTY0NTQ3NTc2ODU0MTM1NjU2NTU2Nzg3NjU1MzM2
-ODQ0NTE0NzU3Njc3NTQwNDk5NzUzNDc0NDMxNzMxNDU0NTU4NDc0MzQzNzU3MzY3
-NTQ4NjUzMjU1MzQ4NjQ2OTQzNDo7NTQ2Nzk2NDQzMjUzMjI1NTgzNzM3NzU3Mzc1
-NTc3NDY1NzQyNDU4MjEzNDQ0MzM0NTY2MzEyMTIyMjA0NDA0OTc0MjI3NjQyNTMy
-NDg2NDY0NDMyMjQ0NTYzMTEzMzc4MzM3NDc0NDU3NjU2NTI5OzQ0NTQ1NTY2NTY3
-NzY3NjU3ODM1MzQ1MjI1NDY1NTUyNTU0NjU1NTUzMzIxNjU3NjMzNDM0NDQ4OTg3
-NTg1NjU1MjU5ODM2NDc3NTg1MDMxMzQ0MzQ0NDQ1NzU2NTQ1MjM0NjQ2NjY3NDQ1
-NTU1NDYzNDEyMjM4NjQzNjc4OTczNTY3NzM0NTUyMzUzNTY0NDQ3ODI8NTU1NTI0
-MTIzNDg3NTI4NzUyMTI1OTU1NzY3OTY1ODg4NTI2OTQ3NjU1NDI0Nzc2NTYzNTY2
-MzY1Njc4MzYzNzg5NjY2NTQzMzEyMTE2OTUxMjU1NDU3Njc3ODYyNTUzNTMzMzQ3
-NzY3NTc3Nzc5ODc2NjYzMzY2OTg6Nzk3Njc2OTU3PDk3Nzc2Ojo3NDc4Nzg4Nzg3
-Njs3Njk3Ojc6ODg7PTg4OTc6OTY2ODk5Ojo+PUBDPkBBQj06PDs7Oj47NzU0NTc3
-ODY6NTY4Njk3Njc2ODg2ODo3Nzc5ODY5Ojk5NDU5Njc0MzU2NTc2NjY4Nzg4NTc6
-OTc4ODg6Njk4Nzc6NTQ3Nzc4Nzg3NzY5OTs5NjU2NTk3Nzk6ODg4Ozc0ODk3Nzk5
-NDM1NDQ3Ojk6OzY3NjU2Njc2NDc1Nzc3NzY1NjY1NDQ2ODU2NTg5OTc5ODg3NzU3
-NzU4Nzc0ODY1NTc2NDc1ODs7NTQ2Nzc5Ojc1NjQzNjY0NTU4ODg5MzUzNDY5ODg3
-NTU0NTg4ODU4Njk4ODg3PDc4Ozw6Ojc5Nzg3NjU3ODo7Ozs9PDk4OTk6OTs4Oz4+
-Ozk6OTk5OTk5ODg7ODo3ODo5Ozw3Nzw9Ozk6Ozk3OTo6Ozs7Ozs9PUA7Ozs8ODs8
-PTw/PkA9PEBAQ0hdocDL09re4uPm5+rpO0BAPTk2PDw4ODtBQTs7OTg4Ozw8Njc2
-Njg3Nzc2NzY0NzY1Nzg1NTQ1NTY1NjU2NTU1ODczODczMjc2ODc4MzYzNDc3NjQz
-NTU1NTMxNDg1Njc4NTY4ODk3MjY1MzUyNDY4NzQ3NTQ2MzM0NTQ1NDY1Mzc1Mi8v
-NDY2NjQ1NTMzNTU1NDI0NTQzMzEwMTMxMjExMzQ3NjY4MzQxNTAxMTI0Njc2NTU1
-NDQ2NDY1LzIzNTM0MTExNTU3OTY1NDY2MjExMjM0NDUzMzY1MjM0NDQxNDM0Njc0
-NDQ1NTAwNzg1NDExMjAyMzQ1NzU0NDU4MzQ4NjY4NjU1Njk3NzQ1MzIwNDY2NDU1
-MzU2OTYwMTM2NTQzMzU2MzQ1NjQ0MzMxNzY2Njo4NjY2Nzc3Nzk6Nzc3NDUzNTI2
-OTY3MzQzNTU2NjY0NjY0NDU1NDU0MjI1NTU1NTU5OjY2NTQyNTQyMzQ3MzQyMjUz
-NTMyNTQ1NTg4ODc4NTU1NDxJNzc3MzQ1NjY2MjM3NjU0NDI1NDY1NjQxNDYzMzM1
-NTI2MzY2NDM2NTI0MzM3OTMzMzY2MzUzOTY2NjQ0NDU1MTI1Mzc5NjU1NDQ5MzU4
-MzM0NDQzNTIyNjc5OTU3MzEyMDAyMjIzMzM0MTM1NDAwNDQ1NDMyMzI2MjY4MjI1
-MjQ3NDU1Nzc2NDUyNTc4NjU1NTMzNTU3NzM1NTY2MzU1NzMzMjQ1NDQ1NDUzNTk2
-MjczOTYzNDQ1NDM1NDI0NDM2NjY2ODQ0NTQzMzUxNjY3NTU2NTU0MzM0NzU0NTIy
-NDM1NTU0Mzg5NDQ1MzM0MzM1MzQ2NDIzMzc3NTQ0MTAyNDI3PDc3NTUzNjQ1NjU3
-NjQ1NTY0MDM2MzQ2NTQyNDYzMzU5OTc0MTMzMjI2NjQ2NTQ1MjM3NzY2Nzk4NTMz
-NTQ1NTU2Mzc3NDU0NjU3NDQzMjMxNTMyMzQ0NjU1NTQyMzU0NTM0NjkzNTU0NTQz
-Njc0Njo4NjY2NzU2NDc2NTY3NjY2NDQzNjU1NDY3NTc2NTc4NzI0NTY2NjY7OTs8
-Njs3Nzc6Ojc5Ojs1Njo3NzU1Ojk6Ojg0Njg0NjU0NDc4NzY2Nzg4Njg7OjU1Njg5
-PDs6PkFCQD49PDo9Ozs+PTs5NzU0Njg4Nzc2NTs8Ojg7ODg1NDg5ODg7OTk7Nzk6
-OTk3NjY2NDg3MzU6ODY0NjY2NDYzNDY5OTg7Ojk4OTk4Njc4NjU2OTg3Nzo1NTY5
-ODY1NDc4Njc3ODs4ODY3ODg3OTg9OjU1ODg2NjQ2NzY3MzM2Nzk5Ojk4Ojc4OTg1
-NTY1NjYzMzU0ODY3Nzc3Njg1NjU1NTU2NDY3NTc1NzU3OTc5Ozo6OTg5PDk4NzQz
-Nzc4OTo2NTc1ODk7Pz45NjY1Nzc0NTc4NTM1NjY4ODc6ODg3ODY2NTk6Ozo7ODs7
-ODU1Ojg5OTw8Ojk3Ojo7Ojs3Njk6OTk8Oz08Ozs2Njc5Ojo4Oj06Ojo6ODc5OjtB
-QDw4Ozo4Nzo8Njk4PUA9PT06PD08PDs8QD8/Pj8/PUFER1ejwcvT2d7h4+bn6Ok6
-OEBCQT88PDo7QD86Ojk7ODg6PDk2NjU4OTo1NjY2NjQ0NjU0OTQyNjk3OTU1Nzc1
-NzQ1NDQzNDIyMzI0MzU2NDQ1ODYzMy8yMjY2NTYyNTo1NDY1NjY4ODU3NDUzMzY0
-NTQ3OTU3NTY3ODMzMjc3NDU3NTM1NjI1NTU0NDQ1NTIyNjIzMjI0NTIyMDAxMTIy
-NTMxMjc2NTM1NTMzNjE1PjYzNTY1NDg1NDUzMTA0NjMyMzM1MjQ2MzM3MzMyNTg1
-NTU1NDM0NjQ7Njg0NjE1NTIwMjQzNDU3NDc1NDU0NTU1NTI1MjIzMzIyNTI0NzY2
-ODY3NjQ3NDY0MzU1MzQ2MDIwNjUzNDc1NTM6NTY3NTczNDU5Nzc4NDMwMTQ0NDU0
-NzU3NzU1ODY3ODU2Njc5ODg3NzY1Nzg4NjQ3NTkzNDY0NDc2MzI0NjIxNTU1MzU1
-NTM2NjY0MjU0MzM1NTM0NDM0NTYyMzU1MTQzNDQ7RDg1ODM0NDY2NTg3NzY0MzU0
-MjM2MzQ0NTU0MzMxMzQ3NTczLy4xNjQ1NDU0NTc0NTY1MTIzMjQ4Nzg5ODMxLjQ2
-NTMzNTU0NjQ0MjU0MzI0NjMyNTIyMjQ0MjMzNTQyMzM2NjY3NDY0NDIzNDIzNDUy
-MjMyNTU2NDQ1NjUzMzQ1NzMxNDMzMzEzNDU1NjY1NTQ0NjUzNzM4NDM0NTc5Nzc1
-Njg3NzUzMzY4NjY1NDU1MTEzMzIyNTI1NjQzNDQ3MzE0NC8yNTM1MzU3Ojc2MzYz
-MzQ2MjQ1NTQ5NjIyMzE2NTU2NzMxMzMzNDU1NjUzNDUyNDQ1NDIyMzQ2MzA0NDMz
-NDM1ODM1NTU0NTY0Nzg1NDgyMzMzMTIzNjUzNjE2MjI3NDE0NzYyLzU0Njg0NTY0
-NTUzOTc7OzUxNTQ5NjU2NTk4NzY1NjU2MzY2NzQ5NTM0NDY0Njc2MzY2NjYxMjMy
-MDI3MzIzMzQ0NDU0MzMzNDM1NjU1NDU1Ojc2Njg0Mzc2NzY4NjMzNzs2Njg1MjE2
-NjEyNjQ7OTg0Nzg6OTM1NTc2NTs3Ojg6Njc4Ozk1NTc3NzU2OTk4ODc2Ojg2NTY4
-ODg3Nzc1OTc5PDk+Ojk7Ojg3NjQ4Njo4N0BAPTs+Pj87Ojo9PD45Nzc2OTk6OTU3
-ODg4Nzc3Nzg5Ozk5Njk5OTg4OTc6Nzo5NTc3NDY1ODg4ODU0NTY4NTc6ODg5ODk7
-Ojo4ODg4Nzo6ODY4ODg4Nzc6ODg0NTc6OTo6OTo5ODY5Ozo4NDg3NjU4PTs7Ojs7
-OTo0MzQ2NjQ0MzM5ODk5Nzo7Qzg2NTk3NTc3ODI2NjQ0NjU3NTY1NjY3OzY2ODk6
-NTM0Njg4ODk8ODg4ODg4ODs6Ojk4Njo2NTc5OTg4NTU1Nzg5Ozc3Oj86ODYzNTg4
-ODY2Njw8ODc3NzQ2Njg4OTg3Njg4ODk5OTg2OTg4Ojo6PDw5Ojo4PD1CPDs9Ojo5
-Ozs5ODo8Ojk4OTk5QTw7Ojs4ODo5OD1APjs4PT08OjU1ODw5Ozw7Pjw+Ozo7PT48
-PU49QUJFREFKY6XAy9PZ3uHk5ebp6T07QTo6QEE9PkA9PDg3Njo3Nzg4Njc4Njg1
-NTY2NTg4NTY1Nzg1NTU9YF5bPzw5ODk2NzM0NDQ2MzQ0MzM0ODUyNDY2NjY0NDI1
-NzY0NjQ3NjM2NjU1NTM0MjUxNDY0NDIyMzM1NjQ2NjY0MzM0MTYyODQyNTYzMzIz
-MjQyNDQ1NTI0NjkyMjAzNjUzMjAyNDQ2NTQ0MzQ1ODc2NjI1NDY9NDQ1MzQ1NjU1
-MTQzMjU1NjU0MTAzMTMzMzAzMjI2NTY1NTM0MzIzNDU0MzM2NzMyMjMxNjY3Njcy
-MjU1NzUyNTMzMjUzMTAwMzMzNjQxNTMzMzI0NTY1MzQwMzQ3NjU0NjIzNTc2Nzg2
-ODI4NjU1NTY5OTk0Mzk4MzY1NDc0MzQ3NDY1NjQ0Nzg3ODk7ODMyNTc3OTo3NjY1
-NjY2NTQ0NDg0NDkzMTAyMTE0MzY0Njg1NDU1NTY3NDM0NTU7NTUzNjUyMTI0NTM6
-MzQ0NjU4Njo3MzQ0NDY5PjQ2MjM0NTQ0NDU0NDU1NTI1NTczNDc3OTY0MzMzNzQ0
-NDU3NTc3ODg1NDY3OTk4NTk4Mzc3MTgyMTQzNDI0NzQ0NjY3NDExMjQzMzQ2NjMy
-NDU1NTM2NzQ0NDU0NTQwNjAzMjU0NDU0NTY0NjQ2NjU2MjI0NjU0ODIyNTUzMzM1
-NTU1NjYzMzY1NTYzMzI1Njc2NTQ2NTg4ODo2NTI0NTU0NDM1NDMyNTU0Njc1NjU0
-NTQ2NDQ+NjQxMDMzMjQ0MzMzNTM1MzM1NDA0MjM2NzU1ODMxMjc1Njc4ODc2MzEy
-NDY3NjMxMDY1MTI0MDAyMzUyNDY2NjU1NDQ7NTMyMTU4NzY2NDM1NTQ2ODY0MzAw
-MjUyNjUzMTU0NTQ0NTY5NjU3NTY0NjgzNDY4NzY2ODk0NDg0Nzo4ODk1NTU1Nzk3
-OTc0MzY4MzM0NTQzMzY1OD88NDg1NTU4NTQ1NTY5NTUzMjM1MS8xMTU4NzQ3NzQ2
-NTQ3ODg3Ozg3Nzc4NzY1Nzc1NjU3NjQ1MzY0Nzg3Njc4ODk4NzU2Njk4OEI/Ojg0
-NjQ1NzQ2Njk4Nzw5ODY3Nzc3OTY1ODc7PDYzNjU2NzZAPDo5OTU2NjU0ODk3NjQ5
-ODY3Oj06OTk5Ozo8Ozw6ODg4OTg5ODk5NzU4ODg3Nzg2Nzg5ODg4Ozs8ODczOTo4
-NzY1MzU2Njc3NTQ5OTY3Njk3OTo3OTo6PDY1ODg3Nzo5OTc4Njc3ODc3Njg6OTg4
-OTk6ODk4Nzc2ODg8Ojg3ODc2OTk5NjU1NzY3NzQ3ODU5ODs3NzU4Njk2NDc1NjU1
-NjY3NDQzNDU4NzQ3NzY3NTY1Ojo5QD87NzY2Nzg5ODU1ODw7ODU0Nz48Ozk6ODk2
-NTU1MzU0Njc3Njc1Njc5OTg3NjM1ODg4Ojk2Nzc4ODQzOTs7Ojs1OTs4Njc5Ojo7
-PTtAPjs5Ozk5OTY6Ojk5Ojs6PDw5Ojc2PDs6ODc4ODc6PUE7PTs3Nzk7OTc3OTo5
-PTk6ODk5Ozo5Ozk+Pjw8PTw8PD8/PkJBQUVCQkRDQEVXn8DL09nf4eTm5+joQkI9
-Ojw/Q0A8PT06Ojc5ODg8OjY4Njc0NDg3OTw5Nzo5ODU0PTs8MzlQiZVTOzk3NjU2
-NzU4NDQ0NDY3OTY0NTQ2NTQ0ODk3MzI1NjI0NzY4Ojk6OTc1MzAyMzU1NDY0NDg1
-MzU1MzU2NDI0MzQzNzk4MzM2NTgzMDMzMzY2NTk6NjQyNDQzMjQ1NTM0MTQyMzU0
-NTU0NjY4NTU3NTI2MzAyMTE0NTQ0MTYxMTQ5Nzc3NDM1MzI0MjQwMDIyMjIyMjIx
-MTM1MzcyMzY0NTY0NDM3OTc1NTQ2NDQwMTM0MzU2NDI1MzAxMjQzNTAyNjU1NjQ1
-NDQzNDY1NTM0MzQ1Nzc1Nzc4NTc0MjU1NTU3NTU2ODc2Nzk2NzU6NzI0MzA2Nzc2
-NDc2NTc2NDU3NDc3Njg0MjY2Njc3NzY1NTM1MzQ0NTQ2Njg1MTIzMTA1Njc0NjY1
-Njg1Nzs1NTY0OTw6NDQyMzExMjEzNzQ1NDQ1NDk3NzIxMzMyMzg0NTIyMzE3NzMz
-MjEwMDE1MTExMC0xMDI0Nzg4Ny8wMzAyNTY7Oj06ODgyMzU1Nzk2NTQxNzQwMTEy
-MzMyMzQ3NTQ2NjQ0NDM1MjI3NDIyNjY2NjQ1ODQ2NDQ0NDIwMzM0NDUyMjc2NDcz
-MDEyNDc2NTc2NjYzMzU1NDEwMjA0NjY2OjQ0Mzc1MzMyNTMzNTc6NTg2NTY4Ojs6
-NzU1MjM1MzY0NDA3NjM1NDI2NjY0NTk3NDM2NDg2MjQ4NTU3NTY1NTIyNjc2NTM0
-OjU4NTUzMzEwNTU1MzY2NTM3ODc4NDU3NTU3NzMxMjE0NjY2NDI0MjI1NDQ1NTQz
-MTUwMjI0MzM4OzY2NDQzNTQ0NjcyLjQ1NDUyNjUyNDM5NjMzMzM4NzIzMDE0NTY4
-Oz03NjY1OTU1MzQ2MjM4NTY1NTY2NTU0MjY0Mzc1MzQ0NDU2NzM3OD03NjQ5OTU3
-NTMzNTc0NDMzNDYzNDIyMzQzNTU1NjQ2NjQ1Nzk4ODU3OTc1NTIxMjk5Nzg2NTY2
-NDM1NDY2Nzc3Nzc2NzY3Nzk3OjY5Njg0OTY3NzY2NDY2Njc4NTE1MzU3ODk4OTo6
-OjY3NjU4Ojo5Ojg2Njc2NTY3NTc3Ojg2Nzg3Ojs5Nz45Ojg1NDlGOjc4ODg7ODo5
-Nzg4Ozk5OTU0Njc3Ojg4OTg3Nzs4Nzc3Njc6NTU0Nzc7Pjw8Ojg3NTg5OTg3ODc2
-NTc4Ojg8OTU4Ozg6ODo5Njg5OTg3Njg4NTU3ODc3NTQyNTY4OTU2Njg3NjY7Ozo5
-Nzc4ODU1NDY+PDw6NzY1Ojc3Njg3Njg1MjQ5NjM1NjY1NzU4NjYzNTY2Nzc6Ojo4
-NjY2NzY3NjQ0NTc6OTk3NjY4Nzg4OzY0NTU0NDc7Ozg3NTU0NDQ5OjY6Ojo8OTc4
-Oj04Ozg1NDY2OTg2NzU4OTk4NjY2Ojs5OTw8OTk7Ozo7Ozk6Ozo7Ozk5Ozw5OTc5
-PTs5Ozo8Ozs7Ojw4OTc8Pjs9PDk3ODc4Ojs5Oz48PEI+PT48Nzg7Ozk8P0BCQj5C
-QUJDSEFCQ0+cv8rT2d3h5Obo6Ok7Pz4+Pj48QEJCODY2ODk7Ozs5Ojo7OjUzODo4
-Ojc6OTY0ODc1OTY4OkNxlIhZRDo3ODY0NDIxMjExMzI2ODc1NDMzMjE0MjYzMjQ0
-MjU4Njc2NzY1OTkzMjY2NzM1NTI1NjM0NDQ1NDU2NzU4MzQ5NTY7NjU4NTo4NDEx
-LjA0NjQ5NjY1NTY1NjM2ODMyMzIyODU0MzU3NDU2Njk2MzIyMjIyMjAyMzQ1NDQy
-NjU3MzUyMjc0MzM0MTEzLzA3NDM0MDQ3MzI0MjM1NTQyMTAvMTE1MzQ1NTU0NjI0
-OTc2NTM1NDQzMDAzNDIxMjMyNTM1MzU4NDQ1NTU0MzY1MjU4QEE6NzU0MjQ4MjEy
-NDU0NTY0NzY1NTIzNTc1NjQzNTM0MjU1NDI1NjI0NjM0Njc2NjU3NDU2NEE4Njs4
-NjQ0NDQ1NzcxMjY2NDU0NzY1MjQ1NDY3NjQ1Njc0NDY4ODQ3MjExNTQ1NzYzNjQ0
-MzI2Nzc2NDQzNzY2NTc0MzUzNTMyMzIzMTAxMTUxMTMzNjM0MzExMzU0ODQ2NTQz
-OTs1ODU2NTYyNDM1OTkzNDUyMjc3MDE2NDEyMDI0NTM2MzI0MzU2NTMzNTM2MzQ0
-NDY1NjUyMzQ3PTY1NjU2NzQzMzIyMzM2NTExMjMzMzY5NjM1NjQ1MzE1NDQzNzc2
-NjU1NzQxNDQ1MzMzNTQ3PDU1NTg6NzY0NTI1MzQzMzQ2OTU1NzYzMzc4Njk3NjYz
-NjY1NjIxNDY0NjQzNTM1Njg0MjIwNDQ0NjY4NTEyMC4xMjUyNDM1NjM0ODY4ODMz
-NTY1NDU3NjQ0NTc4NTQ0NzUyNTQzMzQ1ND05ODMyNjUzMjMyNDY0NzY0MjI0MjY1
-NTc3Ojc0MjY1NDU1NzMyNTY0MzY2NDg4NjgzNDMzODM0MjY2NTM0NDY1NjQyNTMz
-MjI0Mjg5NTYzNTI0NTk3NjU1NTU3OjYzNTYxMzY0MzQzNTM2NTM1NTU1NDc2NjY1
-NTY1MjU4OTc3NTM2NTU2NTY3Njc2NDc1NjMwMjY0OTs5NzozMzU1NTc3ODg3NTQ3
-PDU1NjU1NTk5ODU4OjU0MzQ3OTg2ODg4Ozs4Njk+Ozc4OTk9Oz08ODY0NDg5OjY1
-NzY0NTY2OT45Nzo4OUk6ODc4Njg4ODg5Ozk4Nzc3NTU5OjY2NjY3NjY4NzY4Nzc5
-NzU4Nzc5OTc3OTg7Ojg2Njk3Njc4Njg2Njo4OT06Ojk7OTk3Nzk5ODg5OTY5NzY2
-NTU3OTo5ODc4ODc4Nzk5ODU1Nzg6NzQ3NjU2NjY4Njs6Ojs4NTg3NTQ0NTc2NjMz
-NDY0MzQ2NzU0NTc2NjY3Njg3NjQ3Njc1NDU0NDQ2NzY3NThAOjk3Nzc4ODc1ODY0
-Njc5Njc4Ojg2NTc2NzQ1NTk3Njg3NTU3Ojo4Ozw8ODY2Nzg3Ozg3OTg4Njg5OTc6
-OTw/Ojk5Ozw6OTo4Ojw5Ozs6ODs3OTc9PTs4ODk7PD45Ozs3Oj07PD0+Ojg6Nzc5
-OTw8Oz47PT89PTs4NjY7PTo6PD1BSEI/QUREQj49SZ2/y9PZ3eLk5ufo6jxAQj86
-PD1DQz45Njs/Pjg5ODk7OTg1NjQ1NDM6OTk2NTo4NjU6Nzg6PFBYaWtYSjg4NTQz
-NDUzMTI0ODc4ODU1NDc0MzY0MzU2OTM+ODc0Nzg0NTc1NTI0NDs0NTU1NjY0NTQ0
-NDUzNTY5OTo3NTY3NDI3ODU0NDQ0MTIxNTIxMDI0NDQ1MjM1NDM1MzUyMjIzMTEy
-MjE0NjU1NjQzMjQ2NDI2NTEyNDUzMi8yNDQzMzY1NDU0NDY3NjU0NDEzNDQ0MTIz
-MzMxMDM0NTMxMTI1ODExMzQyMzU3ODY4NjU0MzIzNjc0MDI2NzQ2NTQ3NTc3OTU2
-NzU0NDU2PDQ0NjY4ODo5NDQ1MzQ1NjU1NTU2NjYyMzMxNTQ1MzU2NzA3NDY0NTY1
-Nzg5ODY1MzM1NDM0NzY2Mzg4NDU1Njg2NjU2MzU2NTM1NTU0NTY1Njg0NjMzNDg3
-NzQ1ODg6Njg1NDQ1MS8wMTQ3Nzc4NDIyMjM2NDQ2Mzk4NjQ2Nzg6NjQ6NjQ1NjQz
-MzM1MDIzNzQ7ODMzMS8xNTU1NDU5NDQzMzQ0NzY1NTU3NzY2NTg1MTUzMzMyNDY1
-NDExNDM1NDUzMzU1MzY0MzMxMzI1NjUyNTIxMjU6NTI3Njg0NTQ2NDY2NzIxNTEz
-MzMxMTYzMzI1NDY0NDMzMzk2NTY0MzQwNjQ1NTc1NTY3NDc1NzUyNjY1MjI0NTU2
-NDQ1MzQ0MzU1MjU0NDQ2NDI1OTg2NDY0NTQ3ODg1ODY2NDYzNTY1NTU3NjYzNDU3
-NTYzMjQ3NjIyNTIyMjM2NzY3ODU0NTg1NTY3OjY0NTQ3NjU3MzQzNTU0NTU2NzMy
-NDY1NzI2NjY2NjY2NTY2NTQzNjU2NTY3Nzc1NjY2MzMzNDQ1MzQ4Ojc2NjY2NTc2
-NzgzMzQ0MzkzMjEvMzY2NjY1NjY0MzY3Oz04NDMxMjUyMjEzNTgzMTQ1NDM2NDY2
-NjQzNDUzNDI0NzY1Njg2OTY3Nzc2MjM2ODgzNDQ0NzY3OTQxMzU1NTczNDQ1NTY1
-NTMzNDY3QDg3Nzg4Njg2NzU4OjU2OTg7ODc3Ojs5Nzk2OTg4OTk2ODc2NjY3Njc5
-ODs6ODs8Ozc6OTg5Ojs5Oj08Njg3OTgzNzs1Njc6PD89PDk4Rz06OTY4ODc8NzU3
-Ojo5OTdAOz47OTY4NTY3NTU4ODY2NzU0ODk4ODc4ODU6ODs6OTk3ODk4ODY4ODk5
-ODo6PDs3Ozo5PDg5NjU1Ozg1Nzc1OjY2NTY3NTk5ODg2Njg4Njc0NTY2OTs4ODM1
-NDc2ODg2Nzk5ODg2ODQ2NTQ0NjY1Mzg5NjI0NzY0NTY1NTU2Nzc4OTk5OTo3ODo4
-NjU2OTk3ODk2MjU3OTk3NjY3Njc2NTc3Ojg4Ozk5ODU2ODg6Ojc4ODg3NzQ0Nzk4
-Ozs4Oj09Ozk3OTk8PDs5Ojo8ODg5Ojs4NzY2NTk4OTs8Ojg5OTs3OTo5OTg4OTc8
-Ozo5OTw8OTo8Ozw6PD47Ozs6Ozw8Ojo5ODw9Oz1BPDs5OTo7Ozk3Nzs4OkA/Q0NA
-O0A/P0NKmL/M09je4ePl5+joQDs/PDo9PD49Ojo8Ojc3Njc5Nzc1NTg5OzU3ODg4
-Njg2NjU2NTQ4NDY1OEQ4PUhJOTY0NTIzMjI1ODYyNDc5NzY3Nzc2NjU1NjU1NTQz
-MjM4Ojk6Pzo2NDQ4NjQzMjQzNTYzMjQyMzQ0NTY2NzU0NTU2NzM0NTYyMzM2MTU0
-MzU0NzY1NjY0NTc0MjAzNTUxLjI1MjgyMTQyMzMyMzY2NTY1NTQyLjExMTMyMTQz
-NDQzMjM2NTQzNjY1Nzk3NDM0Mzg4NDM2NDMxMjM1MzIzNjg1MzUzNDQyNTM1NTc3
-MjMzMDI0NjY1NTc1MzM5OTY3NDM0NTc3ODU3NDU0Mzc3Njo0Njk1NDk5NjY2Nzc2
-ODc1NDQ0NDUzMzAyNjYzMjM3NjM2NjY3NzQ3NTQ2OTY1NDQ3NzY5ODY4NjQzNDI0
-MzQzNTQ0NjY1NzM0OTM2NDUzNDQ1NDQzNDc1ODs4NDMzNTU0NDExNDQ2NTc1NDQy
-MTIxNDM2NjQ3NjU0NjY3NTUyMzUzNTMzNDQ1NTE0NDUyMzY2NDUzMzQ0MS8yMzA0
-NjY1NDY3ODg3NTgxODUzNjU1NzIyMTQyMDE2NjQ0NDUzOTU1MzQ4ODc1NDQ1NDI0
-NDIxMjUyNDI0NDY2NDQzMjU2NTUzMTE2NjMzNDM1NjY3MzExNDEzODU1MzY1Mzcz
-MTQ2NTQ1NDc3PDkzNTQvNTUzMzY1NTg2ODY2NzMyNDQzNDc2MzQ3MjU2NzY3NjQz
-NTM2NjYzODQ1MjQ1NTIyNjc1NjQxMzc4MzQ3NTY2MjU0OTQ1NTQ1NDM2NzM0MjMy
-NDU2NjY1NDYzNTQzMzc3NjY1NjU0NjQ1NTY7NjU0MzY3Nzg0Njc3ODg3Ni80NTk3
-NzY0LzY1MzI1NTE0NTYxMzQ1NDQ2MjIzMzM1MzM4OzU1NTk1NTQwMDU3NjY0NDU4
-OTozNTc1NDc1NjY3NTU1MjY3NjUzNDM2NTQyMDM2NDQ2NjI1NzQ2Nzg2NTc2NzQ0
-MjU2NTQ0NDI0MzQ0NTMzMzU0NTg5NTc7ODk3ODY5ODo4OTk1NTU3OTg4Nzc3Njg4
-NTU6Ojs4ODg4ODc3OTg1MzY5Ojs5Ojk5PDw4Njk5OTg3NjU4ODU2Nzg4NjY1NjU4
-Ojg5Oj08OTo4NzU0Nzo4NjU1Njc2NzY5Nzc4OTs6Ojo6PDo5Nzc5Ojg3Njc1Njo4
-NTY2Njc4OTc4OTo7Nzg3Njg2OTg8Ojo6OTg4OTk6ODc4Ojc5OjY2NjY2OTk7PTs7
-Njc4Nzg2NTg3Ojg3NTg1Ojo4Nzc2NjQ0NjY3OTY2ODY2PDY1NzY1Njc6ODc1NDc1
-NzY0NzY0NTU1Njc1NTM1NzY4ODc3Njo2NzY2Nzo7ODg4NjU3NzY4NjQ5OjY5ODU0
-ODY2OTg6NTY1ODk4Nzs7ODs4ODc2OTg4OTg4OTk7Ozc3Nzo8OTs6Ozc6Ojc3Ojg2
-NzY3Nzk7Ojo4PDo5OTw6ODc7PTw5PDc5Ojo5Ozw6Ojo7Ozc4Ozw5OTw/Ozg8Ojs8
-ODs6OkA+PTo7Ojw6Ozo/QDs7P0M/PT0+QkdIQ06ewMvU2d3g4+Xo6OdAPT1BQDk5
-Oj43OTo6Nzo7Ojs7OTg5Ojk4Njc7NjYxNDc2NjQ3NTU1NTU1Njw0ND05NTY0NTY1
-OzY1NDk1NDc3NDIxMTc3NTc5NzQzNDYzMzQ1RTs8ODUyNDQ1NjQ2NDY4ODU0MzU3
-NzMzMzUxNDY5Ojc2OjczNDY0Mzc3ODczMzEyMzcyMjM1NTU1MDE2ODc1NTU1NDQz
-MjEyMjM1ODk1NTU0NjI4MjY5NDYzMzIyNDAxLzA3NTQzMTI1NjU0MjMzNDQ1NjUy
-MzUzMDM2ODg4OjcyMzQzNDU2Nzc1NDY3NTc1NTE0NzU4NDc6Nzg2NTQ1NDU2Nzc3
-NjQzNTIzNDc1NDY3NTY4MzU2Njg1NTc0NDMzNzc0NTQzNDY3NTc4NTUzNTQ0NDc1
-MjU6OTY0MzMxLzM2NDc3MzQzPDU2NjY0Njo5OTY1NTIxNDQ1ODY2NDQzNDI2NTM3
-ODQ0NzQ0Nzg7OTU1NDUzNTc1NDQyLzMzMjIzODM0MzU5PTIyMzU5Njc1NDIzNDM2
-MjQzMjQ1Oj06NDM1MzE1MzMyMjQ0MjQ0NTQ1NTg5NTY2NTk0NDg1MzQ2NTUzMzQ2
-NzUyNTYyNTM1NDc0NTU2NDU0NTQ0MzQxNDIzMjc0Mzg4NTQzNTY0MzU0NTM0NDQ0
-OTQzNDQ1MjI0NjIyMjEzNDQ4NjY4NDM1NjAyMTQ0NjY0MjMzNjc2NTY3OTY1ODg4
-NjY2NTU2NTU0MjA0NTMzMzQ0NTU2ODQ0NTQ1Mzg1NTQ1NDQ2NTUyMzU3NDIyNTUy
-NDc1NTc1NjQzMDAyMzM2NDQ1NDc1MjU0MTU3NTY0Mjc1NDUxNDQzNzczMjU2Nzg1
-NTQ1NDU3NDg3NDIxMTAyMzQ1NTEzNTU4ODY4ODo3NTQ0Nzg1NjQ3OjM1NzI0MzQy
-Mzk2N0M3Nzc1NTU1NDQ4PTg4NjQ4Ojc0NDY6OjY3NTc2MzI2NDk4NzYzNDMyNjEy
-NDI1NTMyNDM2NzMyNTY1Nzc1ODQ0NzQ2NDUzNDU3NzY0NTY1MzA0NDQ1Ozk3NTg5
-Ojk4ODU2NzQ2ODo3NjY1NTY5ODc4OTo2NjQ1ODo5PUM4OTc6Ojc4ODc2NTU1OD47
-ODc2OTk6NzUzNTU2Nzo3Njc4NzY3Njg3ODs6Ojg2ODg4ODg1NDQ1NTc3Nzc2Nzg3
-NjU3Nzo7OTc2NzY5Njg6NTU5NzY5Ozk3NDU0Njc5ODU3OTs4NjU3ODs6NTc5Nzg3
-ODk7Ozc4Ojk3OTg4Ojk6OTg6Ojs6OTc2Nzk5OTY3NTc1NTc3ODc4NTY0Nzw3NjU1
-NTY2Njs2ODg7ODQ3NTU2Nzo4Nzk2Njg3ODgyNDY1NDQ2NTM2NzY1ODg1NTc6Nzk3
-OTg4ODg2Nzg2Nzc5Njc3Njc2NTg3Nzc2NjY1NTM1NzY2ODY6Ozs4ODg4ODc3NDY1
-Nzg5PDo5OTc2OTs9PDo5ODk5Ojg4PUk4Ojo2Nzw7Nzk6OTs7OTk7PUI+Ozo7OTg4
-Ojs5PD05OTg6Oz06QDs5PUVDQD09PTo5ODk7Oz1DPDo7ODg9PD09Pj5AQkNBQkFB
-RUU8R5jAy9TZ3uHj5ufo6UI9QUA8Ozo/PUE6PTg6OTo3OT48Nzo6ODk+Ozg4ODc2
-NTo3NzY4MzM1NjU1OTIyMTEuMzEyMzM2NTI0NDU5Nzs3NTY2Njk0NzUzMzQ1Njc3
-NzU1NTU4Ojg4NTU2OTY1ODg1MzEzNTMzNDY2Nzg2NDQ1ODc3OTg0Njk1Njc1NjMx
-MjI2MzE0NjUxMjU7NjU0NTU1NTc2NjM0MjMzNzQwNDY3NTc1Nzg8Ojc3Nzk0MzQ1
-ODk3NzQyMzQ4NTM1Njc3MzMwMjMyNDIxMzM4MzU0NjQyNTg1NTU3OTc3NjczNDU3
-NTI1MzIxNDg1Njk3NjM0NzU3NTU1MzMzNzY1NTUwNDMzMDY1NzYzLzE0NDc1NTU0
-MzQ2NDQ5NjEyOTQyMTY0MzMxMzY0MzU1NzU1NTk3NDQ1MjMyMzU1NjU2NTc3ODg5
-Njg2NjUyMzUzMjU3NzU2NDIzMzAzNTQ2NTMzMjM0NDU1NTU3ODg0NTg2MDI0MjIw
-MDMxMzQ2NDU1MDEzMjc3NzQ2NTM0MzMyNTIyMjI1NzUyMzQ1MjEyMjI1Njo3NDU0
-NDY1NTQzNDk2NTM3OTM2NDIwMTIzMjQ2NDYyMzY1NTMyMzMyMzQ0NDM0MzY4NjMy
-MjUyMjA5NDc1NTQxNjU1NzMzNTc5NzUzNDUzMzU0NDIzMjM1ODQyNTU2NDMzNTQ0
-NTU3NDM0MzI0NDY1NzQ6OjUyNDc2Mzg3NDQzMzM2NTMwMzQ0MzM1MzY2NjQ1NDQ3
-Nzc5OTQ1Mzc1NTM0MzU1MzU4NzIzMzM3Nzg5Nzg1NDY3NjMyMzU4NjQ3NjU4NTQ3
-NTk0MjU1Njg4MzY2ODo2NjM0MjI1NDEzNDQ0NTEyNjY3NDIzMzg3NjY2NjQ2MjM1
-Nzg0NTY0NTc0NTQ1ODY0NzQ3NzY0MzQ7NjY1QTU2NDcxMTEwMjI1NTU3NTQ0PjY4
-OjU2MzY0NDQ1NzQ1NTQ2MzczMzMzNTM0MzIxMzUzMjUzNDUzNDYzNjU3Ozk0NTIz
-MjM4OjQ2OTk0NDMyNjQzNjg2NTQ6ODo6NTcyNDQ1Nzc3NzM3ODc4MzY2Nzc3NTY4
-Pjk5ODg+PDs3ODo3NjU1NTU0Njc4Ojg2Nzo3Nzc3NjU5OzU5OTg2Nzg5OTg5Ojw6
-PDo7Nzg2NDU2ODg3Njk4OTY2Njk4ODc4OTo9OTk6NzU4Njk5NzU1NTU1Nzo6OTk5
-OTc4O0Q6NjU6ODY7ODk2OTY3Njg4Ojg4Nzs6Ozs4ODk4OTc2Nzg6ODc6Ozg3ODYz
-NDg4OTY0NTc2NTg3NjU1Nzg2ODs4NTg4NzY3NjY7NzQ1Nzo2NjY2ODg2NTQyNzc7
-ODM1Njc1NjY6Nzc3Nzg5OTY2Njg4ODk3NjY0Njc0NTU2NTU2Nzo5ODg3ODY2NzY4
-NDU1NjY6OjY3NzY3NTc4NzU0Njc2NTg5Ojk1NTc6OjY5Ojw3NTg4OTk3OTg5OTk1
-Nzc5Ozk6Ojg5Nzo6Ojo5Ojo4Nzc4PDs6PD46ODY4Ozc5Ojg6PkA8Ojo8PD0/PT07
-Ojo9Ojo8Ojw6Ojg6OTc+RTxBQkJBRURCPT9Mj8DL1Nrd4uTm5ujpOjc9PTw7Njg8
-Pzs7Oz05Nzg4Ozo6OEI7OTk7Ojk8OTUzNjk1NDE2MzM0MzI2NjUzMTAxMzE1NzQ0
-Njc1Ozc1ODc1OTUzMzUwNDczMTQ1NzU0MzMyNTc4ODg2ODg3NTU2MTM0NDUyNDM0
-ODY2NzY1NTU2NjQ0NTY0NDY4NDM0NDY3NTE0MjMzMzQyNjc3NjMzMzIwMzQ4NzU1
-NTc5NTIwMjU3Nzc5NjM1NTc3MjQ0MzI0NTMyMzM1MzU1NDE2PTgyMjU5NzU0NTMz
-NTUzMzQyMTQ0NDY1MzQ3NDU5NjY1MjU0ODQ0NDY2NDg3NDg2OTgzMzIzMjU6OT01
-NzMyNjQzMzU0NzQ0NDUzMzY3NDM0NTU4NjU2NzQ2NjI0PTM3NTc1NDU4NzQ1MzY1
-MzM0MzM5Nzg2MzY2NzU2NzY0NDQzNjc3NTY3NjMxNTg0MzQ1Nzc6OTk2NTQ1NTU0
-NjU1NzQzMzU4OjM0NTMzNjQ3NDMzNTQ0Mjg0NTI1NjMyNTM2NTM0Nzg1NzIzNTo2
-MzIyMjIzNTk4NDo0MjMyNzM+OTs5NTY3NTIzMTEyMzg5NDU3NjIzNzc0NDIyMjQ4
-NjU0MjI1NDMwMzQ0NDI1NDY0MzU4NjUyMTQzNjY2NDY3NjM0NTY0NDQ0NDg0MjIy
-NTQ1MzIzMzU3OTczNjIzNjQ0NzczMTU3NTIyMjI1OjU2NjY4OTY2NDU2MjE1MjIz
-MzI0NDMxMDU2MjI5NzY1MjM3NjU2Njc1NTQ1ODU6NDU3ODU0NDMzNTU2Nzc2MzU1
-ODo3Njg3NDM0Njc3NjYzNzc0NzY1Njc2NTM1NTY0Mjc3ODk3NDM2Njk2MjEzMzEy
-NTY1Njc0NTQxMzQ0NDM0NjY1MjQ1NDY2NDU1MzUzMDIzMzc3NTQzNzk5Nzc5ODQ2
-NDM3MzIyMjU0MTI1NDIyNTU0MzY9NTk2ODc0ODc1NjQ4NzY3ODU1Njg5NDMyMzU1
-MjE0Njc3NjQzNDYyMjU1ODc3NDMzNTQ2ODU2Nzk3OTo4NTQyNTQ0Njc2NzM0NDc2
-NjY1MzU4Nzc1NTY2NDQ3ODk7ODo4OjtBQTc2PDs4Nzo5OTo4OTg5Nzk8ODg8PDo4
-Nzk2ODk5NTk7OTc4OTY1Njo7PDk4Ojs5Nzk6Ozc5ODg2ODo2ODk5OTk6Ojk7PDc5
-ODs4OTc3NjY2Njk3Nzg1OTo6PDo5Nzs7OTlBSDs5NTo6NDY6ODk4OTc3Nzc5OTg3
-OTc4Ojg5NTk8ODc4Nzc6OjY3ODs7OTk3OTk6Nzk5NzY1Nzo0NzY0NDU3ODY3NzYz
-NDo7Njg2Nzg2NTc2NTc4NDY2Ozk5Ojk7Ozc3NjY0MzI3Nzg6OTg4ODY4OTc4Ojo4
-NzQ2NTI1NDM2NTM2NTY3NTg3NzY3NTUzNDY5PDg5NzU2NTY3NzY2OTg1NzM1Njk8
-NzY3Nzo6Ojk5ODk6NzU3NzQ3ODk6ODU3OTo5Ozk4NjY6ODs8Ozg4OTo3Nzs1OT9C
-PTo6PDs7OTs4OTo6OTg4NTo5OTk+PD49PDw7PTw9PD06Pzo7OD1BPzxCQ0RBQD4+
-QU6KvsvU2d/h5OXm6OlBQjk8PDs4QTs5Pjs8Ozs4Njg6ODo9Ojo1Nzo5ODo7Ozw3
-NzQ1NjQ1MTMzNDUzMzUyMTI0MzEzNTQ2NzY1Nzk0NjQ0MzQzNTQ1MzQyNTk3NDEz
-MjY0OTs1NTU1MjI2NS8yNTMyNDk1MTQzNDYzMjQyNDc2NDQ2NjQzNzc1Nzo4NzQy
-MjMxMDAxNjY2NTUxMzM0NkQzMTQ0Mzc1ODo3NzQyNDY0NjY4MTM0NTY0MzQ4NjU1
-MDI1NTQzMzQ0NTE1NjY6NDU1NDMwMjQyMzQ0NTU5ODY0MjM0NjM2MzQxMjQxMjM0
-NDQ2OjY5Njc2OTk1MzI1PTY0NTc0NTQ+MzY1NjQwMjMzNDU0NTY3NTI1MzM0NTY0
-Njc3ODg4OTQ3MzY0MTQ3NzY1NDUyNjUzODg7NDU5NDM0NTY2MzI1MzQ1Njc1Njc0
-NDUzNDI1NTc2NDQ0NTQ2NzQ3NTE1NTQ2ODg5ODU0Mzc3NjU1NDIzMzMzNjM0NDY0
-MzQ0NzYzNTQxNDY2NzQzNTU3ODQ7PTQyNDQyNDM0NzMzNjM0NTQ5NDI1NjQzNDI1
-MzMyNTUzNTQyMjU0NTM0NDU1MzM1NTQyMjMzMzY0NDI0NDIyNDMyMTQ4NTM3PTs2
-Nzg5NTM0ODUzNDEzMzE0MzQ0MjEyNDMyNTY0MzM0MTQ3NTQ1NDQ0MzY1NDE0MjMz
-MjIzMzQ1NjQ1Nzg1NDI2NjU1MjU4NzU2NjIyMzM0NDg4NDY1NzU1NTQ0NzY4NDU0
-NzUxMTI1MjQ1NjEyMzUyMzM3NjU1NjU2Nzg1NDc2NTM1NzU0NzY0MzMzNDY1NTY5
-NjY1MzM1NTQzNDQ9MjM0NTQ1MzEyMzIzMjY3NTQ1MzMyNDY2NTU3NjY0MTI2NjIy
-NTU0MzY0MzU0NjU1NDo3NTY3OTY0NjYzMzQzMjUyNTM1OTk3NDQ0Njo3Slk6NTY4
-NDs5NTUzNjU3MzU1NjU0NTU1NjMyNDEyMjMxMzU3NTU0Njc3NjU3NjU1NDc0NDc2
-ODc5NTk4ODY2NTc1NzU3NDMzNTc1NjU2ODYzNTY3ODc3NjU5OTc7Ozg5OTk5Ojs6
-Nzg2NzU3ODg4Nzs7ODg3RDg2Nzg4Ojo6ODc4OTo2Ojo6NDQ4Nzg4Njg5Njo5Ojc3
-OzY5Ojk2ODo4ODc6OTc5Ozo5ODg6ODk4OTs6ODo3Ojk5PDo3ODk3OD06NzY3ODg3
-Ozo7Nzg3NjU5ODg3ODc3NzU3ODg7Ozo5Ojk4Ojc5OTc0Njg3Njg5ODk2ODk3ODg5
-ODk4NjU3OTc4NjY1MzM0NDU3NjY3NTQ1NTc3NTU1Nzg2NTI1NjM0Nzg3NjU4NTU4
-NzQzNjY2Njc4Nzc3OTk4ODk5NzY2NTo6NjU0NTQ0NDc5Nzc3NjQ3Nzo4NjM3Nzg2
-OTg5Nzg2NTY3NjU1NjQ2OTw4ODY3NTY2NTk3ODc3ODg2Nzc6ODc4Ozk3ODk4OTg8
-PDk4OTo4ODY4NTU2Pj08Oz45OTg5PDs4OTw+Pjo7Ojs7OTw9PDk7OkFCPD08REg8
-Nj08PDs5PDs/QD89PD09REFAPT9APEJHS4S7y9Ta3uHj5+fo6Ts9Ozw+Oz8+PTw7
-PDo4PTo/Ojs7Qjw7OTU3OT04Ozg2OzczNjg4OTQ0NTc7Njc1NDYzNDU1NjY1NjY1
-Nzc2MjU1NjM1ODY2MjU0Njc3Nzc2MzM3NzUyMzQ0NjUzODU0NTY1Ozc1NDUzMDEy
-MDMzMzY2OUE5ODQ0NDU0Nzg4NjQ2NjUxMjIwMTI3NTUzNDY1NTY1NjMxLzExNDU1
-OTg1ODY0MjIyMzQ0NTczNTQ1NDM2NDQ0MzMxMjg1NjY0NTc0NTU1NjI2MzIyNDY0
-MTIzMjMyMDMyNTMzNDIzMzMzNDU0NDMzNjozNDUzMzIyMjc4NjQzMjUzNTM0Nzg0
-MjE0NjgzMjU4NTM2NDY2NjY2NDU0NDMzNDU1NTY2NjQ1MzY2MzU2NjQ0NTc1NDY3
-Ozc4NjIyMDQzNDY4NTY5NjQ0NTQ0NTMzMjI0NDk5ODc2NjY2NzU2MjY3NTE3NzI5
-ODg1NDczNTY3NjU0NDUzMTM0Mzg3NTgzMzQ1NjY1MzQ0NjUxMzI0NTU3NjMxNDY2
-MzE0LzM0MzQ0NTQ0MzUyMzY5ODY2NTY3NTY3NjY1NTEyMTQ1ODQ0NDUzMjEvMjE1
-NTg2NDQ1NTI2ODk0NTc2MzU0NTU8ODQ0OjUzMzMxNjQ0MzAyMjEyMzIyMzM2ODc0
-ODc3ODU0NTM2ODc0NDU2NDQ1NDIyMzI1NTQ2ODY2MzQ2Njg4MzU1NjY2NTM4NTQ0
-MTU1NjU2NjY0MzU4NjQ0NTc2MjQ4NzQ2Njg2MzE0NTU1NjI1NTQ1NjQ0NDc3NjY1
-MzY3NjExNDU2NDI2NjU0MzQ4NjY2ODM0NDY2NDc1MzQ1NTQ0NDQzNDU3NDM0NTc1
-NDQ2NDMxMzY2NDYzMzIyMzU1MjM2NzY2NDMzMzM3NjU3NTQ1MjEzNjU0NzIzMjIz
-Nzg3MzU8OTc2NjY1MzQ1NDU0OjY3NzUyNDQ0Mzc1NDM0NDIyMjM1MzU4NDMyMzMy
-MTUzNTU1NzY3Ojg1MzI1Nzc3OTM3ODY1NDU3NDUzNDI1NTc2NjY3NTg1NTc1NDc3
-NTc3ODU3ODY1NTI1Nzc6OTg5Ozw5Ojs6Njs3ODc2NzY2OTw7PDtCOjo6PTg5ODc8
-Ozs8Ozo6OTg4ODk2NzU2NjY2PTo2Nzg6OTk7Pjk6ODg3NDU2OTg5ODc6Nzc5ODs3
-Nzg4ODg3OTk5PDg3OTg5OTg3OTw5OzlFQzo5QEA7Ojo5Nzc5Ojo4NjY2ODw8Ozg5
-Ojk6OTc2Njc2NTc3OTg5Nzc3OTc2NTY3ODc4ODg2NTYzMjUzMDEyNTk4Nzg4Ojc2
-NDU1ODY5Njc2NTY4Njc1OzY2NDg2MzM0NDU1OTc3NzY2ODo3OTc4Njg5PTY1NzY3
-OTg3NjQ0NTk5Nzc1Nzc1ODY2Nzg0NDU1NTY5NjY1NTY3NTU2Nzc5Njg4Nzc0NDc6
-ODo4ODo4Njg5Ojo9Qj06ODc2NTk6Ozo7ODo4Njg6PDY2OTk6Ojk6PD86Ojg5Ojo6
-PTs5ODo8PTs7Oz4/Pz4/PDo7OTc9PD08Oj5APjo7OjtEPz87Oj08RUdBPjxCREFH
-irrK09je4eTl5+npQDtAOzs8Oj9BQ0A4Nzg6OjY5Ozo5Ozk4Njg3Ojg6Ozs4NzUz
-NzY1OzU4Ojg6OTU5NTg1ODc4NjMzNTU2OTc1MjM1MzQ4ODYzNDUzODg1ODgyNDUy
-Mi4yNTY1NTM0NDY3Njo6NTc2MjY0MDU3MzU2Nzc2NTY1NjU3NTIxMzM0MzM3NTIx
-MTEzNDM1NjQ2NjU1NDY1NjQxMjM3OTU2NTk4NTM0NDQyMzM0NzczMjM0NTczMjcz
-MTEyQTgyNDQ0MTEyMzU0NTEzNTMyMjIwMTIxMDEsMzMzNDAxMS80NjIyNTYxMzIw
-NTE0NzQyNDI3NjU3OjQzNzc0NDMyMzU0NTQ2Njg3ODQzMzE1NzU0NDU0NDc3NTQ2
-NTQ0ODg3NDY2Nzg3NDM0MjY1NzU1NjU0NTU1MjM1NTQ0MzI1Njc0MzI3OTc1NDc2
-NzUzNDc0Nzg1NjM0ODY4NDY1NzQ4NjM2NDU2Nzg0NDY2NTY2NDgzNjU4ODc4Njg4
-NjQ0MzEzPTo4NjUzNTg0NzU1NDQ2NzU1NDU0NDUyMzM1Mzc2ODUzMzM2NDU1NDY2
-NDY4OTc3NDM0NDM1ODc3NzQzMzMyMzQ1NDQ0MzY1NjQ0NzQyNDMzMjM2NDUzMjY2
-Njg2MjM0NTY2NTU0MjIzNDQyMjM1NTU0MjI2Njc1MzIxNDM0NDQ0NDQ3OTAwMjM0
-NTQ0NjYyMzc2NTU1Njo3NTIzMjczNDU4NzIyNDY1NTQ0ODU0NTQ2NDU0NDMyNTU4
-NjU0MjQxMzM1MjI0Nzc4NDE0NTMxMzEwNDMxMzc0MzM5OTY1MTU2NTM3NTU1NzU0
-NTQ+NzU0NDI0MzUyNDg1NDM1NTM2NDM1NzU1NDMzMzg2MzU2NzM0NDQzMDI2NTY1
-NDQ1MjU5OTk3NjIyMjI2MjIzMTMzNzczNzQzMzQ2ODY5Ojk0Njc2Mzg3NDU1NDU0
-MzQxMjIzMjMzMzQ1MjM2MzEyMzIyMjMzNDIzNDQyMzU3OTg3ODU1NTU2ODY0Njk8
-NTU1NTY1ODQ4ODg5OTc2NTY3NzQzNTQ0Nzg3NTY1Njg3Mzc2MjM2Njc7Ozg5OTQy
-NTc3NDU4ODY2ODs6ODg4OzY1OTc2Njk2Nzc7Ozk4ODg5ODk2NTYzNjc5NzQ4NzU4
-Oj46NzU2NjY2NzU2NTY3Nzo+ODY3OTk5NjQzODc3NTg4Njc3PDs3OTw7Ojo4OjhA
-Njo6Njc1NTk6OTg4ODg4NjY2Ojs6ODg6Ozs5Ozc3OTc1Nzc4NTc1NTc1NTc4NDg5
-Nzg6Nzc2NzY1NjUzNDU2OTY1NzY1MjM1Njk/NzYzNDU3NzY1OTg3Nzg6ODs8Nzg6
-NTc0NTc3NzY2NTc3NTY4ODU3OTg1Mjk6OTk2NzU1Njg3OTc3NTlDOTg6NzY3NzY3
-NTM0NTQ2Nzc3Nzc7ODo4Nzc4OTg4ODk8PDw4ODg5Ojw6PDs9PDs+PDo7Njg5Ojw7
-ODc6Ozk5OTo6PT05Njk6PDs7PDo7Ozk2NjU4OztAOzo9Ozs9Pj5APDg2Ozs7OTg8
-PT08PTs6OT09Pjw/QD9CU0NAQUE/OUWHuMbR2N7h5Obm6ehFRkU9ODw7OD87PTo6
-OjxAPDs8NzY2Nzg3NjY1OjY5QD04ODQ1NzY1ODo/Ojc2Ojg3NTc1NTs9Q0A7Njg6
-NTY6NTk5NDc2NjAyNDU2ODY+QTY1MzQ0NTE0NjYzMzE0Nzo4Njk7ODc0MTU2NTY2
-NDQyMTMyNDQyNjg1NzUwMjQ1MzMzODczNDc2NDE0NTU1NDQ0NDMxNTUzMjU1NTY0
-MjU0NDUzMzM1NTc1NjQzNjU3ODUzNTY3NjU3NTQ1NzEwLzAxMzEwMDA0MzMyMTUz
-MzI0MzIxMTIzMzMwMS8wMjU2NTMwMDM1NDExMjQ0MzQ0NDQ0ODc1NjY5MzIzNTI0
-NDMzMzQ6NjMzNDQyMzIyNTQ0NTQyMzQxNjY3NjQ5OTc2MzUzMjI4NzY4NjU1NjIw
-MDE1NTQ0MTgxNDc1OTg4NzQ2NTQ3NzM0NDMyODU0MjI0NjQuMDUyMjU2MTY0NTQ3
-MjU0PDc4OTk1MzY0NDw4NTQ4Ojk0ODcyNDE0NDQ4OjUxNTY0NDU1NTY0NDY1Nzc0
-NjU2Mzg1NDU0NjQzNDM3OTUzMjM1Nzo1NTg0NTk5OTM0ODU3QDc2NzU1ODEzOTc0
-NTQyNjQ5NzY0NDU2MzMyMjY0MzM2NTc2NTU0MzI1Njg1NzY2NTMzNDM2NDM0ODQy
-NTMzMzQ1NDk1MjY0NTU3NTg6OTMyMjQzMTY1NzYzNTcyNTY2Nzk2NTUzNDU0MzQ1
-NjM0NjM0MjQzMzMyMzIzNjU2NjU1MzQ2NzY3NDEzNDU0NDc1NTQyNjMzMzIzMTIy
-NjU3NTYzNTU3NTY4NjM0NjQ3NjYzNDQzNDMxNDY4NjQ3NjQyNTQ0MzY4NDUzNTQz
-Nzc2ODc1MzE0NDM1NDU2NDA3MjAyNDU1NTQ1NDQ1NTU0NDQ1NDQ1MjI0MjQ0ODg1
-NDU0NjQ2NDY2NTY0MjIzODc0NzMzNTQ1MzMyMjIzMjM0NDQ0NjQyMDQ1NTQ1MzIz
-MTU4NzM1NTQ0OTY4NjQ3NjU0Nzc1NjU1NTU2Njc4NDU4ODU3ODY1NDU1NTU4NjQ2
-Nzc0NzM0NTk5Nzc3ODg5Nzk5NzY3ODY2NzU0NDQ1Nzg4RDg7Ozc2NzU2NzY4OTU1
-OUA3SkI3Njk1NDY4NTQ1NTQzNzk3Nzc8ODc6NzY3ODo5Nzc2NjY1PDo5NjU6Ojg5
-ODk5Ojo5OTY3Nzc8PDw5Ojk6PDo4NjU0Njg3Nzk6ODc4Ozo3ODc5Nzg5Ozg3Nzo6
-ODg3OTg4NzQ1NjQ1NTc3ODc1Nzc3Njc4NjY2ODg2NjY1Nzc2NDk5NjU2NDMzNzk5
-ODY4NDU1Nzg4ODc4PDo3NzY4NjY2NTc1NjQ3ODc2MzQ2NjU1NDc2NTc0NTY2OTo4
-Nzc3Nzc2ODY3NjY0Ojw4Nzc1ODg4NTY1NDg0NTc2NjY0MzY6Ojg5NjU2Njc4Ozc2
-ODk4Nzo5Ojk5PTs8Ojk9PDo5ODc5Ozk8OTo5Ojo5PT87PDk4Ojk4Nzw7OTo5ODY3
-OTk6PT07PD0+PTw8PD87Nzk3ODY7Ozs7Pj4/PT9BPTw8PTs8PUA/QUNHRDk8S4u3
-xtHY3N/j5efo6UVKQ0BDQDw7PDw8Ozs9O0E+PEI4Njg4ODc4Nzs4OTs8PD04NzY5
-Njo3Nzk3OTY5Ozc4NTM2OTo6Ozk2MjQ3ODk3NjY4ODU3ODQ3ODQyNDM0NDg4ODg7
-ODgzNDIzMTU0NTczNjc2Njg1NzczNTY2NzI1NTI0NzUyNTQ3MzUxMTE2OTs1Nzk1
-NDU3NjUzNTMxMjQzNDQ2NDMyNDIzMzEwMjQ1NDQ0NTQzNDI2NDc1MzU0NzQzNTIx
-MjE1NTQzNTQ2NDEyNTQ0NTM0NTU0MjMxNDIwMzAyMjU3NDQyMzY1NDM0NDQ1NDIz
-NS8yMTU2NjU1OTg2Nzc2NDQxMzI1NTU3NTY0NTU0NTY4NTEyMzM1Njg0MzIxMzU0
-NDc1NDM3ODQ0MzQyNDg2OTY2NTg0MzMzMTY2NjQzNTU1MjQ0NDQ0MjQ1NzczNDQ3
-NjY4MjM2NzoyNDMzMDQ2Njc2OzczMTQzNDM3OTo2NjY0NTM0NjY1NTc3NjczMjM1
-NDI0OjM0MzYzMjM1NzY3NTY0NzY0NDU1NTUzNDw0MzU4MjY2NDMzMzIxLzQ5ODM0
-MzgzMTA0NTY1NDU2NDQ0Nzc5Ojg0MzMzNTUzNTU0MzIzMzo4ODUzMTM3MzM0NTY2
-NTUzNjU1NjY0MTI0NDQzNjQ1NTQzMjMzNDMzNDY1Mjc1ODg0NTM2NzQ1NDIxNDQz
-NDQyOTg2MjAzNTM1OTU0NDM0NDM2NTIyNzM0NTM0NDUzNDQ0NjQ0Nzg0NjY1MjEz
-NDQ4ODIxMzM1Njg2NDIyMzIzMzY3NzU1NDU1NzY1NTIzNTc5ODY1NTM0NDM0NjU2
-NTUzNTYyNzY2NjQ0NTQzNjc1NTQzMTU2ODs2NzY1MjMyMzI1NDMyNDQ1NzU1NjUz
-NDY4OTg2NDc2MjE0NDQ1Nzg0NjU1Ojk4NzU1NTQ4ODY2NTIzNDM1NjM6ODQyMzQ1
-NDUyMjIxNDY0MjM1NzY1NDIzMjU1MzI1NjY0NzM0NjM4NzY2NjQyNjc0MzEyMjA0
-NTQ0NTY5NjY2NTQ1NDU0Njc2Nzc2NTUzNjg3ODg7OTo5Nzg1Njk4OTg3Ozk5ODU0
-NTg2NDY5OzpAOTY3NDY3ODY2Ojw5Ozk8PTk5ODc+Ojw3NDY1NjY2NTU2NTQ1Ozw6
-OTs5Ojc2OTo6OTk5OTw6ODU4Ojo4NTY3PDw4Njg4Njg1ODc7PDg4QD1COjs6OTc0
-Mzg4QT04NTk8NzY2OTk2Njc8Ojg3NzY2Njc4ODg5OTo2NDQ3Nzg4OTo1Njc4Njc3
-NjU3OTg2NDg6NzU0NDQ4NjY4Njg2Njg4NzU3NjY2NjY2OTc0MzY2NjY0NjQ1NDQz
-MzU3Nzc3NjY3NzM1Mzc3NTY2Nzw5Njg2NTMzNjc4OjU2NDk2NjY2NTc3OTg4ODc2
-PDs2NDc2Njg4NzU3MzQ2NzQ2NzY2Nzg4ODY3ODs6NzY3ODs5Nzc5Nzg3OTk4ODk7
-Pzo7Ozs9PT45OTw5OTk9PTo4OTk8Pz88PT8+Oj07Ojo7Ojo5OTo5ODo7OTo7ODs8
-PUA5OTw+PUE/O0BBRkJDPk1FNkFJkLzI0djd4OPl5ujpQ0VCQD0+Oz0+Pj44Ojg7
-Ozw4NTU4Ozk5ODk5OTo5Ozs7Ojs5OzU1ODg5OTg3OjU2NTQzMjM4NjY3ODc2ODU1
-NTM0Nzg7NjY3NDM5NDU2MjQzNTc3ODY5NjM1ODU1PDo1NjQ3NjM6Pjo1NDQ0Nzc0
-NTQ2NTY2OTkyNDQ0MzQzMjA2NDQ4NTUzNDQ3Njc1NjY1NDQ0NjY4NTU1NDQyMzU2
-NzY1NDU0MzIzNjQxMzQzNDU0NDQyNDMzMzI0MzU2NzUzNDMzNjY1NDMzMzc2MjUy
-MDI0MTIzMjMyNDc2NzczNDQ2NTUyMjIyNTU1NDU1Njg5OjY5ODQ0MjQyNDIzMjM0
-NTQxNDQ2Nzc3NTQzMzE3ODc2MjEyMTQzNDMzNTU2Njg2Nzg0NjU4NTU2NjY5NjU0
-NTc4ODc0MDIyMzIzMzo8NTc2NDU0NDIzNjQ2MzM0PjY1NTY0MjQ4NjU1NjU4Njc0
-NDc6NTM1NDU3Nzg4ODg3PDg2ODc0OTYzMTQ0MzM1OjM0OTYzMzU5OjY0NjM0NTI4
-OjY3OTY7PDExMTUyMDU2Njs1MjM2NTU4NjM1Ny8xNDc2NjIyNDUzMzU1NTU0MzQ0
-Njg2OjY0NDYxMTAwNTc0MzU3NjY1MjIvMDQyMzU0NTIzNTUyMzI0NjM0NTM1Ni8y
-NDQ0MjQ0Njc3Njc0MzY0NDo0NTQ2NTIwNzQ2NjQ1NTQyNDU0MzM0NTY2MjE0MzM5
-MjM3NTQyNDY2NTU4NTYyMzQ0MTIyMjQ4NzM1NDEzNjU2NTU1NTMxNTc3MzI1NTM0
-NTQyNTY1NDQ0MzU2NzU2NjQxMzMzNTM1NDUzMTYyMjU2NDM1MzM1NDU2MzM0NDc2
-NDU2NTU3MjY1Njc0MzQyMzI0NDM0NTM3OzY5ODY0MzE1NjczNTk7NzU0NTg2MzU0
-NDQ0MjU2NTU6NTIzMjQ0NTU0NDQxMjQ0ODQzNDM1NTI1NDUyMzU0NzQzNDQyMzQ1
-MzY1NTAzMzI0NTU0MjczODc3NDQ1NTExNTc3Nzc3NTIzODYzMzY1NDczNDQzNTc2
-NTc2Nzs4Nzc1Njg1NzY3NzY1NjY3OTk3ODo6OTc3OTs6NTU4Nzc3Njg4Ojo6OTc4
-Nzk4OTY2OTs4OTo2NTU1Njc3NjY2OTs5Nzk6Ojk6OTU2Ozo5Pzg5ODU5OTY4OTs4
-PTg2Njc4Ozg2Nzk4NTY3NzU5ODk3OTc4NDc4OTc2Ojs4NTc6Nzc1Njc4OjY4OTo3
-NzU4Nzc3NzQ2NjU2NTQ1Njc1ODQ0Njk3NjY2ODg6Njc5OTY6NTU0Nzk5Ojk4ODU4
-Nzc4NzU1NTc3Nzc4ODg6NzY1NDk5OTY0ODc4PDc4OTg4OzY5NjM2ODg2NTQ3NjY2
-ODg2NzQ0MzY4NjY2NjQ4ODg1NjY2ODk4OTk3Ojk2NTo4ODk6OTk5ODc3NDMyNDY4
-Njk6Nzo9PD08PDo2ODc4OTk4Nzg6Ojo5Ozo6Pjw6PDo2OTY5Pjw5ODc6Ojs8PD48
-PEBAPT0+Ozs7Ozk6Ojs4Njk9PTk7Pz46OT48PT8+Pjw9PT09PkJAT1NFRUyUvMjS
-19zg4+Xm6OlGRT87P0M+QT9EPTk1NTg3ODg4Nzk6OTo4OTs4Mzg4OTg4Ozs7OjQ5
-PDY4Ojk3NDEvMTI0ODYyMTM1OTg1OTY3NzY2NTY4Ojg1ODg1NjU2NjYzNDU2OTg2
-Nzc2NjI1NjEyMzQ0NDY4Ozs2OTQ1NjM0Njk1Mzg4NzE1NDQ0ODY3NDU3NjQ1NDQz
-MzI0NTQzNjY1NjQzMjM0MDIzNDQ2NzQ1NDM2NTMzMTM1NDU0MjQyMjQ1NDQ3NjIx
-MzQ0NDU1NDQzMjE0NzYzNDEyMjE0OTY0MjAwMjMzNDY6ODU1MzQzMzM4NjM0NDEz
-Nzc1NzQ1PTg3NzY3NTI2NjY0MzM3NTY2NjU1NTQyNTM1MjU2OjY1NjUzMjMzMzU4
-Nzs4NDY4Ojs4ODYzNDU1NDY1ODU3NzMzMzQ0NjU6Nzc0NDQ0NDY4NzYyNTE1NTMz
-Mzs1NDQ5Njc1NjQzNzY0NTY7NTY2NTgzNzs2NzQ1MzY3Nzc4Nzg7NzM1NzU2NDM1
-MjM0MzMyNzg0MzQ0MzMzNDQ3MjY1NjY3NzIzOzo1NDMwNTU1MjQ1NTU0NTQ1MzQ0
-MzY2ODQ5NDM1NTQ2NzY0NjQ1OjY5NDM0Njc3Njk0MzEyMjEzMzUzMjU1MTM1MzMv
-MDIyNTIzMzc1MzQxMTI1MjA0NjU2NzU2NzY2MzU0NDQ0QzQyNDQ8PzY1NTczNDU0
-NTo0MTI0NDQ2NjQ1NzM2NTc2NDM2NTg3NjM0NjQ0NTc2NzMzNjMzMTMyMjQ1MjM2
-NzYzMjI0MzQzNDY1Mzg1NDMxNDY2Njc5NjM0MjM0MjU1MTU1NDU0NDk1Mzk3NDEx
-NDg2MDQ2NTg1MjQ1NTQ1NTM1NDM4OTI0NTIyLzA2Nzg2NTc0NzIxMjAzNDU0Ojk5
-ODY2NjY2NzY3MzQzOj41MzQ3Njc0NDY0NUE2NDU0NTY0NzM3NDQ3NzQ0NTI1MzU0
-NTY2NTMzNDYzNDMzODU0NTU1NDU3NzU0NDY0NDY2NDQzMjY0NDY5ODo3ODU2NTQ0
-NTY1Njk5NjY2Njc0NjY1NTY3NTM1ODc1Njc6Njc3NzY2NTY3OTc0NjY3NDU4OTk4
-Nzc5Nzg4Njg5Mzg5OTg3ODY3Nzg5Ojo4NzY5NjU2Nzo8OTg1NDQ4ODk3Nzg1NzU5
-Njg5OTo4ODg5ODhAOzY1NDc5OTs4OTc6OTU1Njc3OTg2Ozg2Nzc1NTg5OjY3PDg2
-Ozk3OTo5ODo1NTY1NTc4Nzs6OTc6Ozk2OjU2NjQyNDU2NTU3NDY2OTg1NTU0NTQ0
-NDk5Nzc2NzYyNDs4NjY1Njo6Ozc3NzU2Nzo6NzY1NTU4NjY6OTY7NjY1NDg2Nzg3
-NTM3OTY3Nzk2Njo5NjU3ODc4OzUzNDI1Njc0NDU2Nzg6ODc5Njc1MjEyNDU1NTQ0
-OTc3ODk7ODY2OTs9Ojs6ODY6ODo4Nzg/PTk5Ojo4OTo7NzY3Ozo6OTs5Ozw5OTo6
-PT4+PDg4Oj08PDo7Ojk8Ojk6Oz07Oz0+PDs7Pjw7Ojo7PTg4Nzc2ODc0ODc3Ojs7
-Ozw7Oj49Pz5BQT9CREVISkdKUI+7ydLY3eHk5ubo6UlHPj0/Pzo8PTs6PDk4OTg5
-Ozc6PTs6Ojo5ODU3NTM0ODo7OTk/Ozg1NjY2NDM0NDc0NTc4ODc2NjU3ODY0NDMy
-MTU1NTM2Nzs9Ojo3NTY4NjU2NzQ0PDM1Njc2MjQ1ODQzMzM1NjU0MzMzNTYzMjM1
-NTc0NDg2MzQ1NjYzNTUzNDU1MzU0NDU2NTU0NjgzMzUzMjIwMDM4NjMzNTc2NDUz
-MjM0NTEzMDM0NTQ2MjQzNTY5NTEzNzY0MzU3NzM1NjQ1NDUyNDc1NTUxMjMxLjE0
-NTMzNDMzNDY2NDUzNTIyMTEzNzM3NDU0NjY1Ozc3NDU3NDY0ODY2MjY2NjYxNTU0
-MzUzMzU0MzUyNDY1NzU2NTM0NjQ4NTc3Ozs3Nzg5NDY0NTg3NjQzNDQxNDY2MzIz
-MzM0NDQ2NzQ1NDI2NTk2NTc1NzQ2MjQ0NTU1NzY6ODU2PjQ0Mi8yNTg0MzU3OjY1
-NTc3NzUzMjMzNTc0Njo2OzY6Ozw4ODY2MjU2NTc1MjExND41NDQzNjQyNTQ0NzQ3
-NjQzNjY1NDQ0Nzc2MzY1NTMyNDMxMjUwMzIyNDIzNDU4ODU2Nzc4NTc2NzQ1ODk7
-NzY1MzU1NDUzMzMyNzQ2OTg4NDIxMzIzMTExMjAzMjEzNDQ3MjIzNjYyODU2NTM0
-NDM3NDY2NTIyMjEzMjIxMzY0Qjc6NDM2ODc1NjY3NTIzNzM0ODU3NzU0NzQ3MzIy
-MDQzNjM0MzQzNDY0ODc5ODUzNDMyNTQ1NTU3NDU0MjEzMjIzMzMyMzQzNDM2ODcz
-MjI2NzUyNDMyNDQ1NDMzNjM0NDQ1NTI0NDQ2NTM0NDg4MzM0Njc2NjU4NzY0MTIz
-NTU1MjM5NjQ1ODg3NjM1MzUyNTg3NTQ1NTY4ODk1NDQ0MjQ1NDY2NDg4Njk3NjQ3
-NzYzMjQxMjMyMzU1Njk4Njc2OjkzMTAzNTM0Njg3NDQzMzEzMjE0Mzk3NDU2NjY0
-Njg5ODY6NTM0NDQ1ODk4Nzo5NTQ2NDQ1NjU3Ojc6OTc1NTU2NzU0ODg7Njo4NTg2
-NTU1Njo4NjY2ODU3NjU2NTY1NDU5NzU1NTc4NTY3Ozs7OjtBOjc5OTU4ODk4Nzg2
-OTY2NTY7PTg1ODc3Ozg4Nzk1Njo3NTY2Nzg4Nzo2NTc3NjpJOzg6Nzc8NTY3Njg1
-Nzc2NTk6ODQ1ODg4Njo5OTk3Njg7Ozo5Ojk4Ozk5Nzo5Ozs4Njk5OTs7OTo5Ojw4
-Njc1NDU0NTU1ODc1NTg1Njg3OTgzNTc7Njg4NzY1NTc2Nzc0Njc3NDQ3NTU1MzU1
-Njc5Nzg6OTg2Njc6QTk2NzY5NjU1Ojc4OTY3ODc4Ojs3Njg4ODc3NjY0Njw3NTQ0
-NjMyNDc4NzY1Njg3Njc5ODY1NDY3Njc3ODk3Nzg4Ozg4OTg3OTc6Ozs5OTk6Ozo7
-OTk5Ozs4Nzg5ODc3Oj05ODg5OTo6ODo4ODc4Ojo5OTs9PDo5OTk5ODk7Pj08Pj0+
-Pjs8Ojs5Njc3NTc5PDk4NzY4PDk3ODs8PTs9OTk8PD4+QENNQERFQ0ZPkLzJ0tnd
-4ePl5ufpRT8/Pj89PD87ODo7PT04Ozs7Oj09PDo3OTk3NDg5OTk6OTs8Oj06PDQ5
-OTQ2ODU5NTg4ODs4Mzg3NTU2Njo5ODo4Njg5Nzg3ODc1NjY1Nzk6NDY4NzQzMjQ2
-NTY6NDM0NDQ1NTMxMzQ0MzU0MjM1MzA0Nzc3ODo6ODU1NTQyMzEwMTQ2NTQ1NzI0
-NjY2MzU1NTgyNDM1MTE2NzQ1MjU1NDg0MzUzNjI0NTMzNjY0NTcxMjIxNDY2OTQz
-MjM0MTQ1NDQ1Mi81NjU0NDIyMjI1MjI1Mjg3NzQzNTc1NzQzMzU0MjU2Njc1NDg4
-Nzg4NjQzNTU2MzQ0NjIzNTQ0MjczMzQwMDEzNDY2NzUzNjY0NTY3NjM1Njc0NDQ2
-Mzg2MjQ1NTc1NjY0MzIyMzQ1NTQ1NzQ1MzQ1NDQ2NTU1NTc3NTQzNzg0QDs0MjMz
-NDY2MjM1NDI5NzIxNTM0NDM4ODk5NDM0NTg0NDEyNTc5NDQ2ODc5Ojo5Nzg3NjM5
-MzY1NTExLzExPDM3NjQ3NDE2NTEzNTU1MjQ0NDQ0NTQ1NTM2NDQ1NTMzNTU0MjEy
-NjU0MjM1NTY5ODI0ODc4ODc4NTc3Nzc2ODk2NDU1NjUyMjQ1NzY4NjYzMS8xNjMz
-MzQ1NDQzNDEyNDQ0NzMzNTY1NDU1NDQ0NDIvNDI1NjY3NjY3MjEwNTM0MzMzNTc0
-NTU2Nzc0NTY0NTU2OjU2NTo4NDMxNDg4NDY1PDc0MzA1OTk2ODU2NDIyMTQyNDQ1
-MzU1NTQ0NDYyMTIzMDEyMjExMzIxMzc1NDMxMjUyMzMzNTQ0MjUwMzQzNTQ2NTIw
-MjU1NjUyNDc2NjU2NDI0MzQ1MzQzMjQ5Njk0NjQ2ODo3NzU3NTI1MzU2NzU5NjU1
-Nzk4NTQ0MzM2OTY2NDM1NDU0Njc2NzU3NzQyNjUxMjQ1NjQ0NjY4MzM0MzU0NTQ0
-NDU0NTQ1MDExMjU2Nzk4Njc3MjM2NTU3NjY3NTQ2ODM2NTg6ODg4ODY1ODc2Nzgz
-NDMzNzo5NjU2NzU2NzU3NzY3Ojc4Nzc5NDQ2Njk5NzY3Nzc6ODo3ODc2NzU5NzY5
-NTU2Nzc4NDg5OTk3Oz41Njk2OTU3OTo4OTc6PTo4Nzo5OTs2ODc4ODk5Njc4Ojg1
-Njg3ODc4OTk9PEVOSUM8OEM4ODpDOzk5ODg6Pjw4Njc5OT05Ozo6Ojk7ODo4Nzs4
-Ojs6Nzg8PTg7PTo7ODY2Nzo6NzY6ODQ5ODg3Ojk2NTUzNDY3NTc4OTg2Nzg4Ozc3
-Njk6NjU1OTk2NTY0NjY0NDM0NDQ1Mjg5NjU1NDU0NTY3ODk4NjQ3Nzg3NTg4NDI0
-Nzc1NTQ1NTU1Njg1NjY2OTg5ODk4NjQ1NTc6Ojk3ODc3OTc3ODc5Nzo4ODk3Nzc2
-NzY2NDc4Ojg5OTg4OTg6Ojo2OTo9Ozk6OTg5Ojk7ODc4Njg2Nzg3NzU4ODc4Ojs4
-Oj08PkE9ODg5Oj06PDs8Pj48Pj89QD06Ojg2Nzc4OTo8NzY3PTk6Ozo0ODg2OT49
-Pjs7OTo9PUE/QEI+Q0RCPkqHu8nS2d3h5Obm6OhHQkA/Pj09PUA9PDk9PDQ0Pjw9
-Nz07Ozg4ODc4ODs7ODk2NTw+PTo8ODw8Pjc1NDk8Nzc2NjQ6ODk5Nzg9Rjk6Nzg6
-NzpANjU2ODY1NzY3ODY1NDQzNDMwLzMyMzU3NjU0NTQ5NDUzNDY1MjI0NjY1NDI1
-NjczNTU1NjQzMzQ1NDA0NjMzMzY2Mzg4MzQ0NTExMzI2NzM1MjI0NTQ1MzIzNTQz
-MjU1NjY1NjUzNjUyMzE1NjUzNDUzNDI0MzIyMjQ1MzMyNjQ1NTQ0MzYyMjM2NDI0
-NDU1OTY1MjY1Nzc2Nzc2NTQ0NDIwMTEzNDU3ODU1NDIxNDY3NzU1NzY3Njc1NTQ2
-NTM3NTg0NTM1NTAyMTQ1NTU1NjYzNDAxNTQ0NTQ0MjQ1MzMzNTQ1ODc4NTUyMzc2
-NzUzMzQzMjY3NjU1NDQzNDZAPTc0NjM2NTYyNDQyMzs2NjM2NTY2Njc4ODg1OjY3
-Njg4NTM2ODU2ODY3NTc3ODQ3QDs0ODk3NDc1NDQzMTQyNjQzMjM1NjEyNzg4NDQ6
-ODY2ODQzMzI2Njc2NTc1OTc1NTM0OTY1NDIyNTMyNDc0Nzc1ODg3NzQ2OTs4NzU1
-Nzo3OTM1NTU1NjU1MzYzMTk3NjQ1ODkyMjM1NzUyMzEzNjU0NTUzLjAyNTU0NTQ1
-NDQ0NDQ4MzMyMzU0NjY0NTM0NDE0NTQ0NDc2NDU2Njc4NjY1NzU1NDU1NTUzNTMy
-MzUzNDMyMjQ3NjY0NTMzMjM2NzM1MTIyNDI2Nzg1MjM2MzMzMjIzNTIyNTQ0NDY2
-MzY3Njg2NjMxMjI0NDMyNDI0NDIyMzUzNDM1NDQxMzQyMjM3NjYzNTI2MzY3NjQ0
-Mzc2NDQ2NDI1NTY1NTU2MjIzODUzNTQ0NTUzNTU0NDc1NDExODgzMzUzNj85NjY0
-NDM3NjQ0NjY0NDM1NTMzNDM0MjMzMjQ1MzM0MzM0NDU2NTM0ODk2NTQzMDEyNDM1
-NTc4NTU1MzQ1Nzc1NTUzMzQ1MzQ0NTY3NDY3MzY5ODQzNjc0NDM2NzY1ODw4Nzg5
-NjY3OTk2NTU2OEQ2QTg0Nzc2NjY3OTk6Ozs4Nzg5OTk7Ojk5ODY1NjQ1Nzc5PDc0
-NDg6ODc3OTk5NzU3NTU2ODc0NDQ1Njk1NjY4Nzg2ODc3QlVTVFBAOzg5PEU7Ojg0
-Nzg3OTQ4Nzg6Ojk5ODg7ODs5ODo6Qzk2NTg5Ozo6Ozo8Ozs3ODs5Ojg4Ojo8OTg5
-Ojs5Njc3ODk4Njc1ODo4NjQ1NTc4Njc3ODc2MzU4OTg4NjU3NzU0NDQ1Njk4NjY3
-NzU1NDMzNDY3Njc1ODc1MzQ2Nzo2NTg5NjM3NDY3Njc3NTg9Qjc2Nzg4NzY4NzM2
-NjY1NzY5OTczNDY1Nzk6OTw3Njg4ODc2OTY3ODg2Njc2NDo9Pjo9Pjo4OTs9PDo5
-PDw6Nzg5OzY1NzY6Ozk6ODg4OTo6Nzg7PT49PD89OTo4Ojw8OT5BPTw8OTo5PDo6
-PDw5PDw8PTs7Oj89PDo5Ojo2Oz8+Nzo6PD4/QEI9Pj1AQEFDRUNDS4m8ydPX3eHj
-5efo6ENEQkJDPDo+QkJIPzw8OTU6PDw6Ojs9QDo1ODk0ODk3PDs2ODg5ODg3ODk8
-ODg5Pjk3Nzo6ODY3OTk6Ojw7Nzc4NTIxMTY6NjU1MzY4ODc2NTQyMjAyMzQzNDAw
-MjU3NjY2Nzg1NTY0MDI2NjM1Njg0NDI1NDQ1NjU1NjY2NjQ2ODc0NTQ3MzIyMzEy
-NjIxNTU0NDUyNDMyNDMzMDAyMjIyNTQ1MjIwMzEyNDU1NjMwMTQ6OTUyNDUzNjM0
-MTMzMzEzMzU0MjA0NjQ2NTEzMjM1OTk1NTY4OTUzMzc0NDY5NDM0NTM1NzoyMjI0
-Nzc6OzYzNTU1NDQ3ODozNDk7NjAyMzY2NjY0NDU1MzUzNjMyMjU1Njg5ODczNzQ1
-Nzg1NDUxNDQxNDMzNTY3NTc2NjQzMjQyNDQyNDU1NTU3MzY2NjYyNzQ0NTg4NTY1
-NTU3Nzg2NzYyNDY0NDY5Nzg6NDI3MzQxMTU1Njg0Mzk0NDY2NTY2NzY5OTpDNzM0
-NTU4Ojg0MzY1NzQyNTU4OzY5QzU1NjY2MzQ0MjIxMTY0Njk5ODo4NTc2NTY3NTQz
-MzQ0Nzc1NjY1NjQ2NTQ1NjM3OTg1MzU8NzU0MjU1NDU2NTU7NTk2NDU1Nzc4NTM3
-NjY3MzM0MTQ1MjU1MjMxMzMzMzMxMzU1NjU2MjI0PTI0NDQ1OUA2NTQzMzI0NTIz
-NDMyNDM1NzU1MzQ2NDU2MzMyMjQxLzM1NTY0NzY2MzI1NTY0MjM0MjQ2NTU2NjQ1
-NzY3NTg4NjM5NzQzMjg2NDUzMjEyLy4yNjUzNjc0NDEyMTEzNzcyNDQ1Nzc1MjY2
-NDQ2NjM0MzY3ODg2NzY1NDQ1NTUyMzY4ODU0NjIyMTEyNjQ4NjM0MjEyNjg4NTc1
-MjU4ODU3ODYzMjQ2NTUzMjU1NzQ3NDIyMTMwMzY1Nzg4NzU1ODU0MjYzMTIyMDQ2
-NjUyMzU0MzMzNTg3ODUzNDU0NDM0NDQ0NDU2NDQ2Nzc5Nzc3NTU0NTU2NDMyMzY1
-NTc2NDU6ODU1NjcxMTc5Njc2NjU0Njk6NzU0NDY3NTc5PTs2Nzk3Njc3Nzo6NzU6
-QTc5OTo5O1dQPTk2ODc2OTY0ODg6PDo4Nzk2Njg4ODk3Njc3Njo4Njc8ODg4OTg1
-Njc1NjY3Nzc5OUE2Ojo3NkFJPjQ3NzY3OTk4PDo4ODk3NzY0OTc4PDk5Qjk4Njc5
-Ozg4Oz07Ojo7Ozk7Ojo0OTg5Nzk6OTo5OTg2NjQzNT09ODg5ODc4NTU1NjY2ODo4
-Nzg1NDY3Njc2NDY1NzU1NDM2NTY3Nzc2ODg3NTc2NDU4NTczNjU2NDc3NjU0NTY2
-NzYzNTc3ODc0NTg4Nzc2ODU0NDk1NDc0NDQ0Nzg1NTM1NzY2Nzg1Njc1NzY1Njk5
-OTw6Oz47OTk7ODo5PDs7Ozg4ODc3Ojo9Ozo3ODg5OTo6NzY7Ojs8PDk5OTk6OTg8
-PDw8PDtCQTw9PT08PDg5OTs6PkA9PkE/Ojg7PTo7OT5BPTw5Njg5OTc3Ozs5Pj1B
-Pz08PEE9QUFDQkFFQkBKkL3J0tne4uPm5+noQEFCQUI8ODpCPkNCPzw8Ozo6PDw4
-OTk4NjI2NzM5Ozk1ODk3OTo5OTg2ODc1Nzg7Ozg4NzQ2OjY0NTc0NTU2OTc2NzY3
-MzQ0OTUzMzQ0NjY0Njs2NDI0NTc1NDM0NjYzNzMzNDQyNTMzMjc3NzQ2NTc0NTQw
-MjIyMjMzMzU5OTU3ODUzNjQzMjM0MjIzNDMyMTM1MzIwMC8xMTM3NDI0NzQ2NjI0
-NTI2MjU0MzI0MTEzMzQ3NzYzNTIyMzM1ODUzNTQyNTQyLzEyNjU2NzYwMTY4NTE0
-MzY2NTg3NDUzNzMzMzU2NDMzMjQ1MzQxNDc8MzQxNjU2ODM1NDk2ODMyMzQ0NTU3
-Mzc2NTc0NDkzNDQ4NTc2NTU1Nzc1Nzc3NDY1NjY1MzMzMzU1NTQ2Nzg2OTU1NzUw
-MjIzNTM1MzUzNTM3Njc1NTQ1NDQ1Njg2NjU4Njc6NzczNDo0Nzk2Nzc2Ozg3NTI1
-MjM0MDE0NTIyMzAyNzc4OTU0NjM0OjU0NTY0MzU0MTIyNzU1NzY7OzIyNTU2NjMz
-MTI1NDM3MjIzNzY1NTU0NjM1MzE0NDMzMzMyMzIyNTMzMjM1MjU3NTQ4NDU0MjYz
-NjM0NTI1NTU0MjQ0NDQzNTY2MzMyNDI0NzMxMzU0NDM0NDI1NjQyMzQ1ODk1NDU2
-Njs6NzU2NTQ1NjYwMjc0NjU3NDYyMTMwMTMzNDc3Nzc0MzU1MzIxMjAwLzM2NjU2
-Njc1NzQ1NTM0NDIzMjE1MzY2Nzg1NzQ0MzQ1NTg4NjUwMzU2NTk2NTUxMjMxNDMz
-NTM0NTU0NjQ0MjIzMTM0MzY1NjU0OTU1ODU1NDc3NjQ2NDYyNTYzNDMxNTU0MzQ1
-NTQyNDY3ODU2NjY2NDI0MTQ3NDM0MjIzNDQ3NjUyNTk0NDIzNTY0NTg3NDU4NDU2
-NDYxNTQyMjY3NDQzMzQ1NTI0NjM1NDQ1NTYzMzM1PDM1NTU1NDMxMjU0NTU2NTQ4
-NTQ1NjQzNDk8NTQyNjY3NzUyNTU2NDQ0MzM1Njg1NDQxMjE2NTU2Njk3OTUzNDc3
-NzY2NjU2Mzc3Njo4Nzg3Nzg8Ojg4NTM2ODo3Nzc6WUhFOzY4NzwzODM1NTU2Nzg5
-ODg1Njg5ODg2Nzc4OTk2OD05Nzk3NzY6OTg5Nzg1OTc3NTg3NjY1NTY0Njc4NjY1
-NzY8Qj09Ozk3NDM1ODc7ODc8Ojg3OTc8PD45Njo7PDw6Ojk6Nzg8ODg4Nzg5OTo6
-OTQ1NDQ6Ojk4Nzg2NTQ0NTU2Nzg2NzY3NDQ3NDY1NjU4NTc4NjY2NTQ1NDI0NzI5
-Ojc5ODg0NTU1NTg3NjU2Nzg4MzU1NDM0NTc1NTk2Njc2Njk3Nz05NzY0Njo3MzI1
-Nzc4Nzc1Njg4ODc5NjU0NjY4OTc4ODc3NzY5Ojk5Ozs7QTs4Ojk5OTY3ODk6PDg4
-Nzs6Pzw7Ozo7Ozs4Njo6Pjo7Ozo4OTg7Ojw8QDo9Pzo6PDs2OTo5ODk6PDw6Ojk6
-PDo4Oz48Nzs7ODg5ODg4OTk7PTw8PTs6Oz4+PTxAPkBFRUE9Q0uPvMnT2d7h4+bn
-6OlAPUNBPT49O0FDPjw6Ojs9Pz08PDg3OTc3Ojo9OzY4ODY5OTk4OTo2NTU4OTg2
-Nzk7OTc0ODo4ODU3NDI1NTk4Nzg2MzU2NDU1MzY3NDUzMzQ0NTAyMzg1NzUzMjI2
-NjQyMzQ0NDUzNDQ1MjM1NDg0NTU0NDQ1NjgzNTU2Njg4NjMzNzk2NjYwMjEwMzAy
-MTA0NDY3NDQzMy80Mzo3NzExMzQzNDMzMDAxMjM1NDQ2NjIzMzM1NzY1MzIyNDM2
-OTYzMjEyMzQ1MjM0MjQ0MjExMjIxMDI0NDY3NjM1NTQzNDMzMzUyNTQ0NDY4NDQz
-MzU4Njg4NzQ1NjM1Ojo3NjU5NzY2NDQ1NTY5NjczMjM1Nzk4NTY0NzY1PTg5ODY1
-NTc1NjY1NTY0NjU4NTg2Nzo3NzY2NjQzMjM0NjU2MjM3NTc3MzQ1NDIzNTEyMzs3
-OTk3NzczNjY1OTU0NDQ1MzY1NTU2NzQxNTM0NDQyNDU4OTQ2NTUzNTMyNzg2ODM2
-NjY1NDU1MzM2NTQ3NztTNjY1NjU0MzItMTQ2MzMzMTAxNzcxLzAzMjQyNTIyMzE0
-MTMvMjE0MzQwMjI5NTU2NTQ0MzQzNDM0NTU3NjY0NjM4NDM3Nzg4NjQ1MzExMTEz
-MzU2MzczNjM1NDIzNDYzMTM0NjU1MzU1NTc4NzQ0Nzc3NjY3NDQ5OjY2NTI0MjIz
-NDU2NDQzNjUxMzIwMi81MjQ0Nzk0MjU1NjY1NDYzMTIzNDEzNjg2NTM2ODM1NjY3
-NDY1NTQzMjM1MjEyNzc2MzQ1NTY2NDQyNTUzMzI0NDU1NDQxMzY0NzU1MzU1NTU0
-MzQ4ODQ1Ojg0MjY0NjY0OTQ0ODY0NTo4OzY2NTc2NjY1NTY0NTQ5ODQ0NTMyMzQz
-MTA1NDAwNDY1NTU1NDM1Nzc3NzU2NjU0NjM1NDU1NjY2NDQ1NTc1ODc1NDQyNDMz
-NTY0NjY3NjU3NDQyMTc4NTM0NTc0NjM0NTc4NTMzMDU0NTY1MTE0NjM0MzM0MzAx
-NTUyNzUxMjM3OzYzNjU3Nzc2NTQ0NjU1ODQyNTU3NjY1Nzg3OTg3NTc3NzUzODs4
-NjY1ODo4ODY2NDM0MzY1NDIzNjc0Nzc2ODk3NTc3Nzg5NjY4Njc7PDk1NTQ1Nzs6
-NDg3OTc3NzY5OTc4Ojo4NTg7ODM3NTQ2NTQ0MjU6ODk6ODY3PDo6PDs5ODs6Ojs9
-QTc5Ojo4Nzg6ODc2NTY4ODo4Ozc3NTQ3Ojw4NjY2NTg4NjQ0NTo5OTk5NjQ4Njg8
-ODY2NDQ0MzQ1NzI1Nzo5OTc2MzE2NDg0NTY1NDk3NTU3ODg2OTc2NTY1NDU0Nzc0
-MjQ0OTY1NjU1NTk5Njo2NDc3ODM0ODc3NzU3Nzg4NjU3NjY7Ozg1Njc4Ojg7Ojc3
-Ojw6Ojo5Ozs6Ojg4Ozk5PDs6ODg3Njk7OTw7PDs6Ozo9PTs6Ozo5PD08ODk4PDs9
-Oz09Pj8+PDxAQUM7Ozw5OTs6Oj06Ojw7PDs7Ozw7OTg3NztAPT88PDs7PDw+Pjw/
-QEFAPj1WRkM/QDxAT5m+ytPZ3uLk5ujp6T5APEJBPz9AQUFEPTc2Oj5BPD0/OTc3
-NzY9Ojs5ODk6Ojo5OTU3Ozg0NDQ6ODc4NzU1NTU2QDs8NjQ0Ozk5Ozg2NDY4NDQ1
-NDQ2NTMzNDY2NjQzNjg4Nzg1Nzs5MzU0Njc1MTIzMzM1NTk2NzU0MzYzMzI6NTE0
-NDg3NjQzMzUwLjA0Nzc3NTczMzQ0MjI2NjU1NjY3Njc4NDIzNjUzNTU1NDUyMzMz
-NDQ0ODQzMzU2MzIyNTM0Ojo2MzUzMjM0NzIyMS8zNTc3ODUxMjIzMzIzNzM4NDIz
-NDQ0NDQ0NDg2NTU1MjEzMDE0NzQyMzQ0NTg3ODo5NTQzNDU7NzQ2OTg5NTU1MjM3
-Njg3OTMzMzU1NjQ0NDU1NTQ2OTg4Njg2NDo5NzY2ODY4NTY4ODQ3Njc1NTk2NTMz
-NTc1NDQ2NTUzNDY2NzUzMjIyMjQ1PTU1NTk8ODc1NTI1NTQzNzM1NTQ2MzU1NjMy
-NDU2NDY1NjU2NzM2OTQxNjQzNTk2ODM0MTM0MzU0MTQ0MzIyPT00NzM0NDY2MjYz
-NzY2NzczMTEwMS4yMDE0NjY3NTIzMjY1MzIxMjQ1NTMzNTc0MTQ1ODI0MzY0MjY2
-NDc1NDU0MzQ4NjY1NDMzOTc2MzMyMjY1MzM2NTI0NTQ0NTs0ODY0NTY1NDY0MzIz
-MjY1NTM2ODY0NDIyMzQ1NDY4NzUzNTU0MzMxNTc1NTU0NzEzNDQ0MzMyMzU1NjU2
-OTY0MjQ0NDQzNTg0NjYxMzY1NTI3MjQ0MzQyMzMyMzI0NTM0MzM2ODcyNzY3NTY1
-MjEzMzQ1NTU3NTY0NzQyNDU4NDQ0NTUxNDEyMTIzNTQzNTY0NTU2NjU0NDUxMjU1
-NjY2NTU1Njc0MzY7Nzg2NzY0NTI0ODU3NDIzNjM1MzY4NDQ0MTAzMTY1NjQ2NDIz
-NDU2NDU3ODU0NjU1NTY2NjQ1NjcyMTQzNDUzMzU2NTMyNTU3NTY0NTY2NjM1MzU1
-Njc2ODU3NjY3NzU0NDQ4OTg2NDQzNTQzMjQ0MzM0NDU1NTU1NTU2NDQ1NzQzNjY2
-NDIxNDQ1Njc5ODw7Ozo6Njw4OTc2ODg2NTg0NTU1OTk5OTk7Pzc5Nzc4NjU2Njg6
-OTk3NzgzNjQ3NTY7OTo5Ozg6OTg3OjQ2NTc4NzU3NjU0Mjc3OTg5ODk8ODc3ODY2
-NTY1NTU0Njs7Nzg4Nzk5PTg4Ojk5Ojs+OTg6ODo6OTg2ODg1Njg3NTY2Njg4Ojg2
-NTU0Njc1OTk7OTg5ODc1Nzg5ODc4OTg6NzY1NTY3NzQ0OTU0MzQ2NjU1ODY0NDM3
-NzY5Nzc3NDY1MjY3NTY3NTg4ODY3Njg7OTU1NzU3NjY3Ojw/PTk4OTo4Ozo7Ojg2
-Nzc4ODk1Nzc4Nzg7ODs8ODU3PDg5Njk6ODo6ODc6OTs6Ozo3ODg8OTs7Njc2Nzw8
-PDk4ODw6PD09OTw8Ozo7Ojg5Oz07Ojs9PTo7Ozo6PDo7PTo5Ojk8Pj09PkA9Ojo+
-Pzs7Ojk7PDc7PTxAPTo3Nzg3Nzg7Ozk8P0BAP0hAQT5AREJPl77K09re4uTn5+np
-PkE+Qj1AP0FCQz9AOjg/PDs8PTw7Ozc5Nzs8Ojg3ODo7Ozs6Njc2ODU0PzY0NTcz
-NzI0NTQ1Njc5Njs2OTg1Nzk2NDQzNTYzMjE0NTIzNTI1NzY1Ojo1MjExNjc4NjI2
-NDMxNTIyMjM3NzY3OTU1NTc2MTM3NjQyNTY7NDMzMzQ3MzI4NjM2Ojk2NTQ1Mzc4
-NTg3NzIzNTIzMzQzMDI3MzM1MzM0MzIvMTI1NDM0NTQyMTIyNjUyNzU2MjIxMTM5
-MjEzMTE1NTMxNDQxMTQ1NjU3NDg1NDIyNjUyMzY1MzQyNTYyNDczNDcyMjI0Njc1
-MTM1MjM3NzMyNTc2NDY1NjU1NDMzNDQ0NTQ1NzY5NzQ0MjIzNDQ0MzU4NjY4NzY1
-NTU0NDc8NTc4NzU2NjUzNjU2NTQ0NTQzMDAwMy8yMzg3NDQzNjM0OTYyNTQ3MzQz
-QTk1N0E6NjU5Nzw1NTI4NDM1NTY3NjMyNTU1NDUxNzY2ODM1NTUzMTIzMjM1NDU2
-MzUyMjQ3ODg1NTMzNDQ1MzMzNTY3OjczNTE0NTY0NjYzNDMzMjM2NjY0NTY0MjQy
-NTU1ODYzNTIzNTc5NTQ0NDQ0NTU1NjQ2NDQ4MzI1NjQ1MjU1NTY0NDQ5NDEyMzIz
-NDE2NDM1MjMzNDQzNTQ1MzI0NDM0NTMzMzMxMjQ0NDQxNzQzNjM1ODQ2NDc1Njk0
-MzU5NTEuMTQ2NTQyMTQxNDM1Njg4NzU1NDY4NjQzNDM1NDEyMzU3NjQ2NzU3NTU0
-MzIxMjY1NDUzMzQ1NjQ3NTQ1Njc3NDY2ODIyMTQ1NTM1NDY5NzY1MzM0MjMyMTI0
-MzMzMjMzMzUyNjs3MzQ0NDU1NTU0NzU2NzY2NTM0Njc2NTY3NTc0MjMyMzo4NDU2
-MzI7NTQxMzQ3NTU1Mzc1NTU0NDM1NjM0NDQ4NzY2NDQ0NTIzMzYyNTg1NTQ1NTMy
-NDU2NjU2Njc2NjUyMDU1NjU1NDU4NTI1NDM0Njc0NDQyNDU1NTA1NTY0NDMwMzI0
-Njc1Mzc2NDQ3ODU2OTQ3MzIzNTc2NjY0NDQ1NjY2Nzc2Nzc2NTg3NzY2NTc5NjQ2
-Njk3ODY2NTU3Ozo6OTo3Njk6Ozc3Njg4ODc2MzQ1Njc4OTo8PTo6Ozs7OTs6ODc3
-ODg2NTU3NzY0NzU3Nzg6OTc3NzY3NTQ2NTg5NTU2NTg5Ojg4OT08ODo7Ozk3OTs7
-PDo6Oj04Nzc4OTk4NTY3NTY5Nzk6NTk5Njc7Nzk4Njc4OTc4ODU1OTo6ODk4Ojo4
-NjY1NjY0NTUyNDQ2Njk4NDQzNzg2Nzg4NzU0NDU1NTY0NDc3MzY0NDg5NzUzNTo5
-Ojg4NjM1Nzg3NTU3NjY0Njc3OTU5OTk4OTg5NT04NjU5ODk7OTc3Nzc3Njc4Nzg4
-OTo4Njc3Ozk3ODg6Nzg4Ozk3Njc4OTo4OTw5OT88PT08PEBDPDg6PDxAOjg6PDo5
-PD09PTo5OTo9PkA5OTo7PDo7Ojc5Oz06OTk+Pjo+Qz8+Oj1AOj88ODs2Njg4Pjw9
-Pj4+QUA+QEBDRVKbvsrT2d7h5Ofm6ehCQT9AP0A9Qj48QkE4Oj47Ojw5Ojs7Njs9
-Ojo5NzdCOjxAOTgzNzc4NzY3NTY4NjU2NzM2NzgzNDM2Nzk2NzY1Nzg3NTg1Njc2
-NTQxNTY4Nzg0NjEzNDIyMjY1NTQzNTQzNDM2NzYzNTI0OT81NTg2MzQyMjIyNTU0
-NTk4ODc1NjQ3MzE0NTMyPz01NTU1MzU3NzM1NjQ4NDY0MjMyMTIxMjQ2MzMyMjI0
-MTI1NTM3NTM0MjA2MjEyNTY0NDY3MzY0MzEyMTExMjYyMjQzMzQzNDMzNjUzMzQz
-NTQ0MzUyMjM1MjIyNjcyMTIzMzY2NTIyNjc2NDQ0NzQzNDc1NTU1MjMzMzQ2Njc1
-NzU4NjY1MzQ0NTY1NTU1NDY4Nzc7NzU1NDQ1NTY5OjU1NTk1MzMyNTQ4NjMzNjUx
-MzIxNTYzNDQ0NTY0NjQ1NTU5NjMzMj08NjY2Nz46ODg2NzMyMTU1MjE0MjQ0NzYz
-MjY4NjYzNjY3MzIyMjE0Mzk2NDY1MzM0MzA0NTQyNDM1NjU3NTcyMjMzNTY3ODc2
-NjM0NzY0NTc4NTMzNTU4OTQzMzQ2NDQyNTU3Njc2Nzg0ODg3ODQ4ODQyMTIzMTQ0
-MjQyMzU3MzQ1MzI2MzEyMzQ2NDIyMjM1MzY3NTMzNTQ0NTMxMzEyMjM1NzU1OTQ1
-NDMxMTQ2MzMzNjU0NDIyMjI0NTQzODMzMTIxNDQxMzE0NTE0NDk2NTY2NDUzMzUz
-MzY1NTY2NDQ3NjQyNTM2NDU1Nzc2NTQzMjIxNTg0NTU3Njc0NTQxNTYxNDQ5ODg1
-NjU3NDQ0NjY2NTQ0NDQ0NDIzMjM3Nzk2MzAzMzExMjI1OTU1NTQ0NTU2NzU0MzU0
-MTE0MjU1NjI0MzM1NDU1NTM0NzcyMjQ2NDw2NTY2NDIzOzc1NDM0NDc2NzU4OTc4
-OTk1OTQxMzQ0NDYxNDU1NTUyMzI1MzIwMjIyNjY1OTUyMzU7MzIzMzQ1NDY4NDY2
-Nzg2MzQ0MTIwNTI1ODU0NjUzNDU0NTQ1NDI3NTc4ODY1Nzg4ODU2ODQ1NTY3ODk1
-ODo4OTg2Mzg7NjYzNjc3NTY3NjU3NzY1NDY0NjY3Oz00ODc7OTs3NDc4NTk5NjY3
-OjcyODg2Nzc3Nzk4OTk4OTs4NjY2NzQ2OTg3NTU0ODg2Nzo2NTY3ODc3OTo5Nzg3
-ODc3ODlANzo4ODs5NTk3ODk5ODk6Ozo5OTs6Nzg6PDo5Nzg5NjU0Mzc4OTk5Ojw6
-Ozo4NTU1NTg2NzY3ODY3ODo3Njg3Njc4Nzg2NzY1NjU0Nzg0Mjg3NzQzNjk2Nzg3
-NTY1NDQxNDU0NTU1NTk2Njc5Njc3Ojk2NTY4OTk4Nzc2ODo6PDs6Ojs1NDc5OTg3
-NTc6PTo2Njk5OkA8OTc5OTk3OTo6Ojg4ODc3Nzk6Ojo5OzczNzg3Nzg7PDk4NzlB
-QD45Ojc9Pj8+Ozo7Ojo9PTg3NTk6Ozw9PD05ODs5OTs8OTo4Ozk8QDw5OTs5OTk6
-PD07PT4+QT89Pjo8Nzc4ODs9PzxBQT03O0BAPENHQkVOXp/Ay9Pa3uHk5+jo6UVE
-R0U9Pjw+PTg6Ozg7Ozg6ODo2Njk5PD86ODs4ODo6OTs0NDc3Ojs8ODc5Njs9ODc3
-NjU6Njo5Ozc4NzczNTUzNDQ2NzQ1NTQyNDM1MzU5Njc0MjAyNjU2MzU0MjQyMjY0
-NDM0NzQyNTM3NzM0NTM1MzQ0MzMzMS84OjY0MzQ1NTc0OTQ0NTIzNTQ2NzY2NjQz
-MTM5NTY0NDIyMDQzMTMzMzU0MjAyNDMxMzMzNDIxMzIxMjM0Mi8yMTY3NjMyNDQx
-LjIxNTM3NDQ1MzQyMjU0NTQ1NTU1NTQyNTU1NjUzNDU0NjQ2NDU3NTU2NjQzMzY2
-ODY5OzY1NTMyNjU1OTc0MTMzNjU2NjU0NDU3NTM1NjI3NjQzMzQ0NTY2NjY3ODky
-NDg2NjU4ODQzNzc2ODU1NzY1NDc3OTU0MzIyMTU1NDM3NTc3NzY4Nzg0NTMxNDY0
-MzMxNjg3NTQ2NTQzMTEzMjEyMzM2OTYyNDQ0NDMyODc0NzQ0ODY1RjszNTUzMjQ1
-NDM1NDIyMjMzNDMyNTk3NDU0NjY4NzI0NjM0NDw4NTE1NjU1MzQ2Njc2MTM1MjUz
-NDQ5Ozk1NTI0OTc1MzU1NzMyNDI1NjYzNzQ0NjQ0NTU1NDMzNTc4NDU3MzQ0MTIz
-MTc2NDU2MjM0MzQzMTIyMjM2MjQ3NjUyMTMyMTA0NjUzMDIyMTIxMTI1NTMzNDMz
-MjUzMTMyMzM1MjQzNTk5MTM1NTY2NjU2MzIyMzU5NTMzMzM2NDEyMTQzNjQ1NzY3
-NjMzNDU2ODg4NjYzMjYzNDMzNjU0NDU3NTgyMzU0NDY3MzU0MzY4NTM2NTU2NTc0
-NTQ3NjIxMTMzMTIxMTQ1NTUzNjY1Ni8zNDE1NzQ0MzI1Njk1NjY3Njg1NTU6NjY0
-MTA0NDc2NDMyNDU2NzM3NDg1NDc4ODc4NzU2NDM0MjczOzMzNzg1NjU0NDEyLzAx
-MjQ0NzYzMjU3NjU2NTUzNzU1MzQ3OTY2Nzc1NTY2NzU1NTM1NTMxNDYzODU0NTM0
-ODo4NjQzMjIyMjI0Njg6MzU1NDY4NjU6NzY3NjU2NDg4NDc3NDU3OTc2NjU2Nzg1
-NjU2OTk6OTM2ODg3ODk2Nzs4Njc3Nzc2Ojo5NTc4Nzg1OTg5Ojg3ODk5ODY0NTg9
-Ojc0ODs3NDY0NTY5NDc1ODg3Nzo5ODg5Nzk6OTw6OTk4OTc2Ozo1NjY4OTk5ODo6
-PDk6OTk6PDo5OjY5ODg7ODk5ODY4PDw6Ojg5ODk5NzM0NTc5Ojg5Ojc5NzY3ODk3
-ODg5NzY3OTg5NjU1Nzs8Ozk1Njc3Njc2Nzg2NTg1Njg5ODY3ODc2NTg5ODc1NDc5
-NzY5QDk3NzkzNTg5Ojo6Ozs3NTg4ODk9OTk4Nzk6ODo5Ozg4Ozo6Ojs6OTk4ODY4
-ODs8Ojg6ODo6ODo5ODc2ODc5ODk4OT47PT47PDs8OTs5ODg3Ozk5ODY5OTg4Ojw7
-Pj88PTw7ODk5Oj45OTk6PDs7ODg4ODw+PDs4ODg8OTw7Pzk7QDo4PD09QT0/P0BB
-QEFAPFBCW0hapMDM1drf4eTn5+npRUJCQkU7PDk5Ozw7QD47Nzg6Ojs3ODs8ODgz
-MjQ3Njg3OT89OTg4NTU3NTY2ODY0NDM0NTQ2NjY6Nzc3NTc4NTQ2Mzc3NDM3NTQ1
-NDMyNDQ5NjMzLjAyMzQ1NjY3NTM0MzQ2NTg5NjY3NDQ2NTUzMjQ1NTU5NzYyNTU5
-OTg5Nzo6MTQ2NDU1MzIzMzM0NTQ0NDM2OTY0NDIxMDE1NDMxNDQ2Mzc1NTUyMzI2
-NDIzMjExMTQ0MjU2MzM1NDU2NTc0NDQyMDEzNjQ1MjQzMi8yMzQ0NDEyNDQzNTU1
-NDQzNTcyMTY3NDY0NDY0NTg0OTQ3NzIzMTc2NzU1NTQzNzc4NjIvMzQzMzU0MzI6
-ODY1NTs8NDg3ODg3NDQ0MzU1MzM1MzQ2NTU0NTc0MjQ4NjQ1MjU1NDI0NDI0NTEy
-MjQ2NTQyLzM2Njk4Nzg2Nzc3Nzg0MzU1NTc1NjY1Nzc3NjIxNTMyMzM0MzAxNDQ4
-NTQyNDMzNDIyMzMzNTMvMzIzMjU1MTEzMjUzNDQ0MjIzMzY1NDQ4NDM0NTY2MzU0
-MzQ0NzY3ODQ1MjEyMjEzNTk2Njg1NjY0NzQ3OTUzNDAyMC81NDMzNTQyMTI1MzY3
-ODo4NTg4NTU1MTEyMy8yNDEyMzQyNDQ5NjY0OzY2NDQ0MzczMTQ0N0A3NjM0NTM1
-NDM0ODQ2NzU2NzQ2MzU0NDQyNDc3MzU0NTYzNDMyMjM2NDI0NDQ0NDM1NjY1NDUz
-MzMyNDMxMDA0NTcyMDMzMjEzMTIzNjE3NzM4NTI0NTUzMjU0NTQ3MzU7Njg1NTMy
-NDU3MzQyNjc2NTU1Njg8ODc3OTc2NDU2NDU4MzQzMzQyMTIxNDQ7NDQzMzEzMzEx
-MDE0NDc3NjQ1NDU0NjYzMzY3ODYzMjE0NjUxNzYyNDczMzM1Njc5Nzg3NzczNjc3
-ODQzMzIzNTI1ODU4NzQ1MzQ1NjIxMzIzNTU2NzMyMjM0MjM0MDY0NTU2ODc5NjU3
-ODg2NjY2Nzc1NTY3NTU2NTQ1MjMyNTU6Ojc3NDM1MzQzNDc2OTc2NjU1NTY1NzY1
-NDQ2NzY1MzU0Nzg2Nzg3NTY2NjQ4Njg4Njg5ODc2NjQ2Nzc4Njc3Nzk5NjY2Njk5
-ODUzNjc4ODs4ODY3ODk6Nzc2NTU1ODo3Nzk3ODk4MjM2Nzg3Nzk6OTc2NTo8ODc1
-NjY4OTg2N0A8Nzs6Ojg4NzU9NjY1OTw6ODg8Ojk5OTY3NTY4Ozg4OTU4ODk6ODg2
-Nzc3OTU4NzY4ODY2NTc4NTg5OTg6Ojg1NjY2NjY3ODY1NDM0Pzo6Ozk2Ozk6Nzc2
-Njc3Njc4ODM0ODU3OTIzNjk5OTc3NzYyNTg8Nzs3Ozs3Njk4Ojw5Ozg1Nzs5Njg3
-Njk3Nzo6ODY4OTU3Ojk4ODg5OTg2OTk3ODg1ODg6Nzk4NzU5NjU1OTg2ODs8PTo8
-ODw3OTk4Ojs1Ozs3OTs5Ozo4Njg6Ozs3Ojg5Ozw7Oz05Ojg1Nzo8Ojk6PTo4OTo8
-Ozg6Ojs8QEA8Pz88PDo4ODs6Ozo8Ozw9PUBJR0FFRFikwcvU2t7i5Obo6OhHQkA8
-Qj0/PTg5PTs+Nzg5PD06PDg7PDk4PTc4MzMxMTk6PDk1ODk3NjY4NjQ2Njs1Nzk2
-NzMzMzg2ODw3NzQ3NDQ0NTU0NzU1NDI0NzM1NjQ3NTIxLzM1NDMzNTY1NTU1NzYy
-NjY3NzUzMzUxNjU1NTQ0NDY0NjM1NzQ6Njc4ODcyLzI0NDQ1NjMyNTM0NTEzNTc4
-OTg0MzY0NDQ2NTQ2NTU3Njk3ODUxMjU1NzUyMTE0ODc3NDc3MzQ4OTQyNTQyNDM0
-NDM3NjY0MTIyLzI0NDQzMzIyNDM0NzY1NDIyNjc3NTY5NjU3NTM1Nzg5ODQ4NTg0
-NDY0NTg2NjM1ODQ4Ojk0NTQxMjAxNDc4NzQzNzQ1NTg5Ojg4NDY3MzEzNzMyMzQ0
-Nzo6NjIxMjQ3Njc5NDU1ODY1NjM0MTIzNDEzMzAwMDA1NTg4OTc3NTUzNDU1NDY1
-NDQ3NDM5NDY2NjYxNDI2NzQyMS4yMjQ1NTY1NTQ2ODQzNDMyNjo3NDMxMTA0NjM1
-MzIzMzI0MjU0MzM0MjQ0NTIzMjU4NTM2NTk2NTMzNjY0MjIxNDYzNDg2NTs+ODM1
-Njk1NDQ1MzM0Mjk0NzQ0MDA1Njc2NTQ2OTc4NDM2NTM1NjQxNTM1ODQzMzI1Nzw4
-NTg0NTQyMTMyMzQ0NDU4PDc0NTQ2NTg3NDM0NjEyNDg2NDU0NTI1MTI0NDU1NDQy
-MjM0MTIxMjM1MzE0NjI1MzUzMzU1MjIwMDU2NDE1NzMyNTY3NTY2NTQyMzU0NTU2
-NzUyNzc1NzY0NjU1NTU1NDQ2NjY2NjUzNTQ0NTQyNDc3NjQ0MzU3ODY1Njc1MzQ3
-ODU6ODUyMjQyNTQzM0Y5OTY1NTI1MjE2NDM2NDc2Ojc0MzM1NDI0MzU3NDUzMzY3
-OTc2NTM0NTQ1Nzg4Njc1NTMyMzU3NjQ0NDQ0MzI1NjY1NDY3NTU5NzY2NjQ0NTY0
-NzMzMzM1NDc2NTU0NTY2NTQ1NDI1NDI2NTUzNjQ4NDQyMzM0NDQ4NjU1Njc3MjQy
-MzUzMTM3Mzc6Nzc3NDU1NTU1NDY2NjQ3NTY0OTc1NDw4NjQ1NzU2NDY3OTc1NDY0
-Nzk3Nzc1NzQ2NTU4NjY4NTU2NDc4ODg2MzY1Nzc5Nzc3NTY3OTs4NDc2NDQ4ODY4
-OTg4ODk6OTY1NTk3ODk4Ojk1MjM2Ozk7Ozw4NjU6Nzo3Njc2ODg5ODo7Ojk5OTg3
-Ozo5OjUzODg6ODk4ODg3ODk8Ojg4NTc2MzI4Njg4OTg2Nzk3Njc3NjY3ODg3Nzc3
-NjU2NTc4Nzc1NDU5Njc4ODY5Ozo5ODc2NTY4NzY4OTk1NjYyMzM0NTg2Njg2ODc2
-MzY4Nzg3ODc2Njc6ODs6PT04Ojg2Mzk2ODk3ODg3Ojc7PTk4ODo+Ojk3OTg3Nzc3
-ODk4Ojs7ODc4NzY2NTc7Ozk3Ojo5PDg4O0A8Oz07Ozo7PUI/OTY4Ozk3Ozk5Ojo6
-OTo6ODg7OTk5OD08NDk9Ozs4Ojw6OTs8OTk7Oz4+Ozo8PD45Ojk2ODxEPjo3PURC
-S0M/PT9DUZ3By9PZ3uLk5ufp6T07PD45QEFAPkA/PD1CQ0E8QEA9OD0/PT8+OToz
-MTMzNDw7PTozNjg1MzU2NjU8OTw4Nzo3NDg0NDY6NjQ2NzY3NTYyNDYzNDU2Ojc4
-NTMyMDExMzM0MDg2NTYzMjEzNzMzMzQ0NTg3Nzc2NTMzNDQ0NTU0MjQ0NzQzNDg1
-NDIzNDc2MzM4NDY1MzU1NTQ1NDQ3NDU0NTY1NjU2OjM0NDY0MzQ0Nzo2MzU1NjQ1
-NTQzMzU2NDAzMzQyMzYzMDM1MzMzNTQ1MjMzLzAxMjEyMzEwMDAyMjAyMjM0Njg1
-Njo0NTM1NjQ0NDU1Njc1Nzg1NDQ2Njc2NDU3Njg3NTM0NTQzNTU3MzMwMzQ1NTY1
-Nzc0Njk5ODQ1NTY4NDE2NjY3ODUyNDQ3NTc0MjE0NTQ2NzY3Mzk4ODY2NjU4MzEx
-NDU3MjQ0MzM0Mzc3NzU1NTc2NTIzNDY3NjYzMjY2PTc3ODk3NTQzNDMzNjIyNDU0
-NTk2MzQ1NjUyMzE1QjczLjAzMzMzNTIzNDY3NjMzMS8yNTI0NTU0MDEyNDQ1NDMx
-NTQzNjQzMzk2NzozNDc1NTQ1OTY2OTY4ODc3NDUvMjc1MzUyNDEzNjQ1Nzc1NDM0
-NjY2MzIzMi0xMzMwMzg1MzUxMjM1NjUyNDc1Mzc5NDY0NTY0ODU1NDgzMzM2NTc0
-NzU0OTYzNjQ2Nzg3NDE0NDU4NjUzMzUyMzU4NjEwMzI0MzEzMjIzMjIxMDEzNTU1
-MjIzNDY1NjI0MzMzMTIyNjU0NTU1NDYzMTU1MzIzNDU1NDQ2MzQ3NjM2NjY1NjUz
-MjMxNDc3Nzg3NTU2NjUzMjM0NDU2Nzc4Nzc3NDU1MjM1NTQxMzU1Nzo0NDMxMjMy
-MzM0NjQ0NzU3NTQ0MzQ2NTQ1NDEyNDUzNTg4NzM1NTU3NzY1Mzc1MjQ1MzE1MzMy
-NDMxNTYzNTU1MjM1NTQ0NTQ0NTI3Nzc0NTQ0MzU2ODc3NTM0NDYwNDQ1NTU0MjM0
-NDU2Nzc1MDc4NTEzNjQ2NjM2MzIyMjQ6ODc2Njk7NjU3NTY3NzU3NzM1NDU2NTY3
-NjU2NTQ1NjU0NTY1ODY3NjY0NDY1MzU3Ojc4ODc0MjY5ODc7ODQ2NzY3ODc2Nzc2
-ODg5OTc5Njo2ODY5OzczNTY2NjY4NjU1ODc2Nzk5NjU4Njc2ODw4NjY4ODg7PD1B
-Ozo8OzQ3NzU2NDY7Ojg8ODY6Ozk4Nzo4PDo2NjY4ODc6ODc3PDk5Njg6NjY7NjU4
-ODU3NzY5NjQ2MjU3NTY2NzU2Oj49OTU1Njc1NTg5ODg1Mjg4OTU3ODc3Nzs1NzY1
-NjY3OTk3Nzg1Mzc2NDQ1NzY1NDU2Njg1OTw4ODo8OTY2NTk2ODY2ODs4OT07NzY1
-Nzk5OTg3Ozo5Njg4ODk6OTk3NzY5OTg4Ojk1Nzs5Ojg5NjY5Oz47PDo6PDg3ODs+
-Pj08Pj08Pzw7Pzk5Ozs7PDs2Pjo5Ojg4OTtAPTg2ODk6OTg7PDk5OTs6ODc5O0A9
-O0A7ODo5PD0+PD08Ozg6O0E+QDo5QUNLSD9CPkVUo8DM1Nne4eXn5+joOz5AOzg3
-PEA8PDo7Pj5BQD05QkNBPTs6Ozo5NTY2Nzw3Njc0ODg1Njk3MzY3Ozw4Njc6Nzgz
-ODc5OTg3OTlHODU3ODw6OTg0NDM2NDc0NDM2NTQxNjc2Nzc2NDMzMDEwMzQzMjQ3
-NDU0NDU4ODYyMTQ5OTM2NTU2Njk2NjMyMzMyMjI2MTM0MzQ1NTQ1NDEzMzU2NzU0
-ODY5ODU0NTMzNDM1NDIxMzMyMzY4NDc2NjUxMzMyMDIzNDU1NDU1Nzs0NDQ2MzM0
-MTEzMDMxMTIvMjQyMjUyMzMwMTEyNDUzMTQ1NTIyMDUzNTU0NDY0Nzc6Nzk1MjE0
-MjM1NDQ0NDQ0MDAzMzMxMTMzMzI0NTIzODQ2NTg3NTQ0NzU0OzY2NjY1MzY1NTQ1
-ODIyMzM2ODU2NjYzNjY3ODg4NjY6NDU1NzQ3NDU2NTY2NDY4OTY1MzAzMzEzNjY0
-Njc2NDU1NjY1NjYzMzIwMzU2NTQ2MzY2NTU1MzMzNjU5OzYzMzQ1MzMzNDU0NTU1
-NUBAMjE0MzIyMzQzNzQ0MzY1Nks6NDQ1MzU2NTY2NjQ0NDQyMzY1NjY6Nzg4NDQ1
-NTUzNTIzMzQzMzUyNDY2MjE0ODY5NjM0NDY3NjEyNjM1OTU1NjU1NTc3MzE0NjQ1
-NjY3RDo4MzQ2NTM0NjU1NDYzMjUzNTMxMTU4NzMzNDIxMjM0NDozNzg4NDMzNTg3
-NjMzMzM0MzMzMjAyMDU0NDEzNDQ3NDY3Mzg3NDIzMzI0NTgzMzMyNTU1NzU2MzMz
-MTMyMzU1MzEzNjQ1MzI0NDM1MzU0NTM1MTM3Nzc3OTg2Njc1ODY0MjM2Nzc2ODY4
-Nzc0NTU2NjU2OTU5NjUzMzU0NzYzNTQ1NTM1ODc2NDYzMDQ0NTg3MjE1NzcxLzQz
-NjQ2NDUzNjU1NDY0MjEyMjMyMzI0MzQ0NDQzNjQ5NjYzMzQ2MjM2NDU3ODY1NTQz
-MzM1Njc1Njc1NDU2NDY2NDUyMjQzNDQ0NDQ1MzQ0NTc0MjM0NjMyNDU1NDMzNDY0
-ODg3NzQ2ODY2OjU3MzMzNDY3ODg2ODc1NTY0NTc3Nzg2NTQ1NzY5NTc1NTU4Ojs6
-OTk1Njg5OTc4OTY2NTc4NDc2NzQ2NTY0ODc2Njc4ODY4QEI4Njc1Njg2NjU2NTU1
-NzY3OTQ1NTg5ODc3Nzc4OTY4Njc5O0I+QTc4Ojg3Njc4Nzc4O0M5NzY1NTc5OTY2
-ODY2OTg4ODc3NDc5ODk8Ojk4ODU4Nzc3Ojs4Njc0Ojs2NTY2Njg2NzY2Ojc4NjUz
-NjU2NjQ1Nzg1NTY2NDY3Nzg1NDU2Nzk0NjY2ODc2MzQzNDk4Njc2Nzc3NTg3NTc2
-OTc0NjY0Njk5NjY3ODc0NjY5OTg3OTg4Nzg6OTc2Nz86OTo5Nzs2ODk7PDo9PDo6
-ODU4Nzg6OTY3Ozk7Ojs/Pzs4OTc4ODo7Ojc8Pzo7OTo7OTw+Pjg6PT47PTs+Ojs+
-PDs+Pj09PTk5PTs5Ozs8OTk7PD5CPz4+Ojs4OD0/PDs8OT89P0JBPz8/QDs9OkBF
-PkNETFKcv8vU2t7j5Obn6Oo7PEA7Oz06PUE/ODo9QDw+QDs6Pj89Ojg6Ojk5Ojk6
-Ojw6OUE8OTU4Nzc4Njg1Ojo6NDY4Nzg2PDw4PTg4NzQ1NjI2Nzk1NjU3NjY1NTQ5
-ODM5NTMyNDQzMzMzNjU2NDY3ODY1NTY3NDU2Nzk2NTA0NTQzMzU2MzQ0NTMyMzEx
-NjU3MjQzMzY3NDU0MzQ0NTIzNTc1NTU3ODY1NzY5NzM0NDU1NTY1ODUxMzg2NTc2
-MzMxLzI0Mzc2MzgzMzAzNjU2NDM1NDI1MjM1Nzg0NjQ3NTY2NDEwMzYzNDI0NDM3
-NTY0NDI2MzQxMjM0NDU1Njc4NDUzNTE1NDg2NDc0NDM0MjY3Mzo3ODU2NDQ1NTY2
-NTY2ODg8ODc2MzQ2NjY0NjM5NjQ0NDU1MzU1NTQ2NTY0OD81Ozg7OTQyNDU0NTU0
-MDY1MjQ1NDg1NDU1NDM2ODY1NTUzNjU1NDQ1MzM3NTU1Njg4NDIxMzc2NTQ0NjU2
-NzQ1MzI0Nzg4NzU3ODY1NDIyNzYyODU1NDU3MzAyMzIyNjQ1NjkxNTQ0PDc0NTEy
-NTY2NDM1NDMzMzU3NjY2ODc3OT02Nzc0NjY2NzY1MzQ0MzM1NzQyNDQ1MzE0NzEy
-MzMyNTUzNTU0NDIzNDM0MjU3Ozg4NzQ0NTo7NzU0MjUyMDQ2NTQ0NjY1NTg1MjQ0
-NDM4NzQxMTYzNTQ1NTU2NzYzMjQzMjU2NDUwMjIzNDg4ODY4Mzc+NC8yMjc3MzMx
-NTMxMjI5NTIyNTg1MzQ2MTQzNTMzNDU1NTc0NTQzMTU2NzM3NzQ0MTIyMzY1MzQ0
-NTU3NTY0NDMwMDQ3Nzc3NTY1MzM3NTQ2NTQ0Mjg3NDU2Njc2MTMzNDUyMTI0NzM0
-NTU0NDM0NTY3MzU0NTQxNDMyNDY5ODk1MjQ8NzQzMjIyNDIyNDM2NDU4NjY4NjMy
-NDU2MzMyNDQzNDUzNTQ2NTg5Nzg3MzM2NDQ1NjQ1NDIzNTY4MzIyMzQzNDQzNTUz
-NDMzNDo2NjU1MzM1MjQ2Njg3Mjc3Njc6NTU3NjY1ODU1NTM0NjQ0NjU2NjY2Nzk4
-NjU1NTU6Qzc0MTIzNjU4OTc1NDc4OTc4ODo3Ozs4ODg4ODQ1MzU2NTY2NDU6ODY3
-Ojc6NzY2PDY7PTc3NTo3NTY3NDQ4OTg3ODU5OTo1Nzk1NDY2ODc1PDk3NzpKX0M6
-OTc4Nzo3Njg4Ojo7Ozs7Ojg7Nzc6ODY3Ojo4ODY1NTU1OTk4OTk4Ozw5OTc2NzY1
-Nzw9Ojo4Nzc3NjY1NTY2NzU3ODc4NjY3NTU3NjY2ODc3ODY1NDI0NjY3Nzc2OTk7
-Nzg0NzU1NTYzNDk3NjQ2ODc4Ojo4OTk4Ojo2NDg3OTc4ODg0Njc3OTg6PDw6ODc5
-ODk6OTc4Ozw6OTs5Nzk4Njc4Ojk7OTg1Njc3Ojk7Ozk8OTg3Nzo9PTo6OTk7Oz49
-PUA8Ozk9PDo8PT08PEA9QD48PD8+Ojk7PDo4OTo5PDs+Pzk8QTs6OTs4OTw/Oj09
-Ozk4ODg3ODo6PTxBPj0/QUNDQD08PT0/RURLXJ2/y9TZ3eHj5efp6kdBPT06Ozs7
-QkFBOTs8PUA7OTo8PT49Ojs7OT47OTc8Ozo4PkI3Ojc7OTk3Pzg7OTY2MzQ1NDY2
-NjI0NzU2ODczMzU1MzIzNzU1NDU1N0I1MzM1NTY0NDMyMTM2NTY3NzYzNDQ2NDQ0
-MjM2MjU0NTEyMjUzNTMzMjI1NzY3NTUyMzUzMjYzNDQ0NDE0MjI0NDQ0NDU0NzU1
-NTMyNDQ0MjU2NDIyMzU0NTUzNjQ1NDg3OjQxMjU2NDQzMTM0NDQ0NTY0NjQzODo2
-Nzc2ODc4Pjc0MzI1NjE1NzQ0MjM1NzY1NjMzMjQ2MzMxMDMzNjQ1NjQzNTIzMzEz
-NDY1NDM0Nzc1MzM1NTo5NDY2Njc5NTU3Nzo2NTY1NDc2MjU0Nzo3ODY1NDM0NTMx
-MzY1NTU1NDMyNTY4NjU3ODY0NDU0NTQxMjQ3NDMyNDc6NTU0NTc8NjU4PTk7NzU2
-NDM1ODY1NjQ2OTk1MzM0NDUzNjQ0MzM3NDM2Mzo2NzY3Nzk1NTAzMzQzNDQzMzEz
-NTM1MjU3MzMzNjU2MjM0OTc1MzEwMzg4NjcyMTM2MzM1NTY2MzQ2MzU2Pjo1NjUz
-NzI1OTg6NjY1NTY4NDQ0MjI1MzM1NDM0NDQ4ODY0NTQyMTEzMjQyMTI2Rzw0NzQ3
-NzQyMTM2MTU4NDY2MzY1NDc1NzY3NTU0MzQ0NDE0NDc1NTc4NTQ0NTY0NjQ0MzE3
-ODY1NTk0MzI2MzU2ODc2NDo2NDY5OTw2MTQzMTM1NjU0MzUyNTc0NjUzMzIzNTY2
-NjQzMjQ0MzEyNDU0NDI1NDczNDU2NzQzMzUyMjMyNjc3NTY5OTY0MzE0NDUzMjcz
-MzMzNTEzMTQ0NDIxMDIwMjU0Njc1NTg1NDI1NDY0NTUzMzMzMjQ2NDQ0Njc0NTMz
-NDtMOjM2MzI1NDY2Nzg3MzU2Njc3Njg3NDYzNTIzNTc4NDY2MzIyNTc5NTU3NjQ2
-NzU3ODY2MzI4NTM0NDQyNDQ1NTMzNTY1NDQ3NDUzNjY2NTEyNTU2MzU4NjY0NTQ3
-NjYyNDg3OTU1NjUzNDM1NDQzNDQ2ODg3Njc5Njg4NDo5NTY3ODk2Nzg4ODs9Njg3
-Nzg3Nj05NTo3Nzc1Nzc2NzU3OTc6OTY2ODY2NzhAOjU2ODc3ODo1NTQzNDc5NzY3
-NDc2Ozk0NDY0MzU0Njw7OTo5OTc6NTo+Ozk3Nzc1Nzg4NjhBRzo3OTY2Nzo5Ojs6
-Ozk4ODc4Ojo4OTs4NzY4Ozc2NzY7OTc3OTw5Ojg4PTw3ODU0Njg1Nzc2NDU2NzY1
-NTY1Njg1NjQ0MzY0MzQ4NjU1NjY0MjczNTYzODk1NjY3Nzg3NjY6ODg4NjU2OT05
-OjU4Nzc5Nzg3Nzk5PDs3Nzc8OzY4ODo9PTc4Ojo7Pjw7ODw5Ojo6ODg5ODc2NTY2
-NDU3Nzk5Ozg2OTc5Ozs8ODY3OTk7ODs8PDo8Ojo6Pzo9OTs7PDs4PDo8Ozw8OTk4
-ODs+PTk9Pzo5Ozw8Nzg5Njg4Njo+PkE6OTo6OTw5PD5BPTs+PkBAQEJCOTo+QUdP
-REpwpMDL09re4ePm5+jpQUQ/PUJBQUE+QD49ODw+Ozs9Ozs7PDw9ODw+Qj5BPjk3
-OTw4OTU3NTo5NzRHOjYzMzY2NDk2NDUyODk2NjMyMTAyNTU5PDk3ODU2NjY4NzM0
-NDQ0NDUyMjMyMjM4NDY1ODY0NjQ0MzM1ODQ1NTM0MTEzMzM0MzE2NTY4Njk4NTc2
-NDIyNTUzNzU1OTQ0NDQ3NjM0NDU0MzY2NTU1NDMzMjI0MzEzMzIzNDEzMzUxMzUz
-NDc2NzQ0MjU5NjY2NTI0ODM0NDY1NDc3NTQ0NDYzMTM1NDM0NzQ0MzMzNDQ0NTYz
-NTY6NTAyMTExMjY2OjU2NDMzNjQ1NTU1Njc2NDMzNTU0Nzk3NzQzNjQzNjg2NTQ1
-Ozo5Njg2Njk3Mzc5NjU4ODQxMjM1NTM0MDQ0MjIzMy8yMzQ0NjU1ODc2MzY2NjY3
-MzU9MzM1NDozMjE0MzQ0MzY0NTIxMjQzNjUzMi40ODQ1MTM2MzQ0MzQ5MzQ0Nzg2
-Ojc3NzU1MzU0NTMyNjo0MzQ0OTQzNzI0NTE0MTIxMzI2NTY1NzU1NDQxMzQyMjc1
-NDM1MjQ0NDI1NDQzMTEwMTo2NTY1NjYxODc1Njc2NTQ1NDQ4OTcyMjI0NzQ1MzU1
-NDE0Njk2NjcyNTQ0NDMxMjQ5Pzo1MzM2NjIxMjg6NTY1NTM0NTQ0ODk3NTc5NDQz
-NDI2NTU0NDMzMjMyMTI0NjY0NTU0NDUzMzI0NjU1NTMzNDUxMjc2NDMyNDY1Pjo4
-NzU3NjY2NTU1NzkzNDI2NzQzMzEyMzI0PDM0OTQyMjY0NTQyMDExNTU0NDI0NTQy
-MTU2NDU4NzM1NzY0NDU0MzIzNTMyNDUzMzY1NTMzNTY3NTY3NTg4NDQ3NTk4ODY6
-NzY0NjM4NzY2OTYyMzU4NTU1NTI2NjUzNDQ1MzQwMjI2NjY3Mi8yNjU0NTY1NDM1
-NTU5NDY1NTczNDU0Nzc5NjY0NDM2NTQzNzY2NTU3NTYzMTMyMDM3ODY0NDM0NDMz
-MjI0MzM1NTQzMTQ1MjU3Njc2NDQ1NTY0NDMyNjQzNDUzNDQ2NDQ2NzUyMjQ0NDg7
-ODc4NTc0Njo6ODc4OTg6ODg6Ojs5OTc5NjY2ODk3NzY5Njc3Nzo4PDk6OD08OTo2
-NjQ0Njc3OTY2OTc1ODk5Njg1NzY0NTI2PDU2NjQ0NTQ1NDU3ODk5OTo3NzY2ODY1
-ODk6OTg2NTo4Ojo5ODs9Pzw3ODY6ODo7OTg3Nzg7Ojw7OTY7Ozk5OTg4Pjo4ODg1
-Nzk4NTY7Ojc5OTc3NzU2ODY3Ojc2NTY4ODU4ODUxNDQ2NTM1NTY3Njc1NzY4NDY1
-MzUyMzk3OTc3Nzc4PDk3NTQ1NDY2Njg3ODk4OTY3OTk4OTY5ODg5Pjo2ODc5Nzg3
-ODg6Ozk+Ojo7Ozs5Ojs6ODk4ODg3ODc4OD08NzY3NDc6Ozc1Ojc5OTk6QEE9PD47
-PT05Nzk3Oj08Oz05Njg7Pj48Nzk5Ojo8Oz86PUA6QD88OTs9Pzs6Oj06PD0/QD49
-Pj09Ojo8Ojw9QEI+Pj08Pj45PT4+QkpGRVigwMrU2t/h5Obn6OlDQUZFP0BCQkE/
-Qj02PD88PDs6PDs3Ojg3OT06Ojw4ODk8Ozw7Ojc2MzM3PWBDODc0Nzo6Njc2NDQ1
-NDg3OTc1Ojo3OTU0Nzk2OTU4ODQ1NTY0MzI1NTMxMzM0NTQ3NTU0MzY2NDU1NDY3
-NDI2Njc2NDQ2NjY4NjY1NjY2MzY4NjU5OTU0NDI1NTM0NDQ2NTMzMjM0ODQ1NTU0
-NTY2NTU0NDU0NDYzNDQ2NjYzMjMzODU3NzY1NDQyNTY0Ozc0MTMxMzgyMTIvLzEy
-MzQ1MzMxNDM0NDU2MjMzMzY3NzU4NTY2NTQzNjAzNjUzNjY1NzYzMzQ2NTQ5NzU2
-NTQ3NDUzNjQ0Njk4NzQ0MTM1NDMzNzU1ODk1NTM0NjU+QDgzNDU2NTQ3NDUyNDY0
-MzM0NTk3NDQyNTM0MzQ1NDg0Nzc3NzU2NTc2MjM0NTIyNjI1NjU2NjYzMTQ0NTY1
-M0A2NjY2MzM0MjMzNDU0NDUzMzQ1NjU3NjQwMjYyNTU0NTQzOTI1NTczNjc1NDMz
-NTA1NTk0NDY0MzM2NTI0NjQ3NTU5NTY4NjQyNDU1NDE0NTczMTIxMDIyNTY0NTY4
-NzI1NjU1MzQzMjQyNDQyMzM1NTY1MjY2NzU1NzY2NDs3NDMvNDE4NTY6PUI3MzE0
-NDQyMTM4NTM2MzQzMzU4NTQ0MjI0NjQ0NTU2NjEzMTQxMTI2MTIzNDc2NTc0NDQ2
-NTQzMDE0MzMxNTQyMzM0Njk3NDU6ODY4NTU0Njc0NjY0NjY3NzU2NjM0NjY1ODU6
-NDQ2NDU0MzIzNTc3NDQwMTIzNjM1Nzc0MzQ1MzU0NTY0MjQ2NDY2NjUzMjIwNDY0
-MjIzNDQ2OjcyNTYzNzU2Ozc2NDU1NTk2MzQ0NTQzNDY1NTUzMzIzMzQ1NTM3OTY4
-NjgzMDE2NzU0MzQ1NzU0ODQ1NDI1NzQ1NzI4OTc0MTE0MzQ1NzgzMzUyMjU1NDU3
-NzY1NTY2NTc1NTAyNjc2Njg3NjQzNTQ0MjY1NTc1NDI0MzI1NTMxNTY0NTcxMTQy
-NDQ1MzEyMzYxODQ1MzQ3NzI0NjQ0ODs4Nzg6NjU4NzpBOTc3ODo4Nzg0NjY6Nzk5
-Nzo7OTo4OTc3Njo1NTU4NzU4ODczNDIzNDM2Nz06Ojk6OTg3PDk4NjU3ODk3NTk5
-OTc4ODY2NjQ2NzY1OTU5Njc5Ozg1MzU7OTY4Ojg7Njk4Ozw4Ojg9PDk6PDs5OTg4
-ODY6Njc7ODY4PDk7ODg8Ojc3ODg5Nzk0Njc5Nzk4ODc3OTg6ODg2ODo2NjU2NTY2
-Nzg1Njk6NzU3Njc0NTY2Mzg2NTU1OTg3NDU2MzQ1Njc4NDI5NjczNzk4Njk4NTo6
-Ojk4OTc5Nzk7OTo4Nzg8OTo5ODo8OTc2NzU3Nzo6Ojg5NzY2ODs6NzY4ODk3Njw9
-Ozo3Njc2NTg7OTk3Ojk6Ojo9QUBAPz07Ojo6Nzs8Ozw6Ozk8Ojo6OTo8OTw8OjpA
-Ozo5PT1AP0E7Nzo6Oz9BOzs5Ojo+PTo7PD4+PT09PDw9PkA9Ojw+Pj8+Pj1FQT0/
-U5q/y9PZ3uHj5ufo6UJDQ0NFQz8+RUZBQEBAOzo6Ozs4Nz06Ojs4Ojo9PT47Ojk0
-Nzc7OjQ6NzU6OjgxMzg1OjY2NjU3NTU1Nzc3NTU1NzU2OTQxMjI0Njg3NTE0MzYz
-NzMwMzIyNTQ0NDU0NDUzMzY3ODo5NzQ1NDU1NDU2MzMzODQxMzY1NjU2ODczMjY6
-OTU2NDc3MzMyNjU0MTE1NDQzMjIzNDQ1NzY1ODY0ODg1NDIzNTUyNDQzODM1NDM1
-NDYzNDU2MzQ0MzM0NjYyOjIwMTIxLzMzMzQ0OTkxMzY3MzU0MjQ2NjY3NjY1NTIz
-NTg3NTEzNjMxNDY0Njk1NTY4NjY0NjMzNDc4ODE0NjQ2Nzc2MzQzNzc1Njc3ODQ1
-MTQ1MjM5OzgzMzQ0NDQ2NzY3PTQ0MzM0NzU1NDQ0NTMyMzIxMjQ1NTc4NjQ3NjM2
-MzQ3NTY5Nzc2Mzg2NTY1NDMxMzI0NTQ3OTUxMzU0NjQyNDU1NjU0NTU0NDI0Nzcz
-NTY3NDY3NjQ0NTc0NTU3ODY0MTIzNTo2MzQzMTY0NTc5MzY3ODQ0NjYzNDM1NDU4
-NDU1NzQzMzQzNjg3NjQ0MjQ0NDU0RDU1NTwzNjUxNTY6NjY3NjMyNTQ1MzMzNDY2
-NzU0MTM1NDY0MTM2MzU4NTUzODYyMTU5OTIxMTM1MjM0Nzc2NjQ0NTQ0NTczMjE0
-NzYzNjQyNTQxNjQwMjIyMTU3NzYxMjQ1NTE0MzI1MzIzOjk3NTIzNjY4NTY1NzQ0
-NTM1NTQ4OTo3NTU3NjQ2NzU3Njc1NjY0Njg4Njc2NTQ2ODc5NzY3ODg2NDY0NDY3
-NTMyMjM2ODs1OjUzNDc1NDMyNjQ1NDQ0NTM4NDI3Njg1Njc2NjY4OTY0NjEyNDM1
-NDQ0NzU2NjIzOTY1NDQ3NDM2NjQ1NTU2NjUzMjQ3MzIvMTI1ODU0NjU1NDU1ODU4
-OjY2NjY3Njg1NjQ2NjU0NDIzMzU2NDQ0NjU0NjY1MzU1NjY0MzQ1NDQ0NjQ0NDQ0
-NjY3NDUwMjA0MzMzMzUyMjM0NDQ1NjUyNjMzNDM1MzIwMjU0NTQ1NTc1NDM1MzM1
-NDU4QTs9N0c6NjY2OTk2Njc1NjU2ODY3ODg2Njc5NTdANzg1MzU2NTY1NzQ1OTY1
-Njg4Nzc3Ojg4Nzg7ODk3OTk1NTo8NDU1NzM0NjQ3NDQ2OTYzODc4OUA8Ojg3Nzg2
-Nzs5OTo3OTY9PTg3ODU4Njg5NzQ0NDY1NTc5Ojs5Njc4OTg6ODk7OTU1NTY4NTg2
-ODg2Nzc2ODo4Nzk4Nzg5NTY3Nzc2ODc3ODg0NjY5ODc3ODYzNjQ2Mzg2NzU1Nzc3
-ODc2OTU0NjU0Njg3ODk3Nzs7Ozs5PDs4ODc3NTY5PTg2NDU1Nzs7Ozk5OTw2NDY1
-OTc6ODY4Nzc2NzY2ODk4Nzk6OTc3ODc4Nzk3Ojg3Nzg+Pjk4OTo4OTw+PkA/PTk6
-Pjs7PDxAPT0+Ozk4Ozk+OTw6Ojg6PD0+PTw7PT1APD08PDxBOjs6ODs7Ojo+PT0+
-Pz07PDw7PDw7OjtBPD4+RUM+PkNEPEBVmb/M1drd4OTm5unpQ0NEQkVHREQ9QkJD
-QzpAPDo6Ojk9Pjo5OTg6PzY3ODw3ODw7Ozs3NTc3ODU4NTo1ODs5Pzs6NzU3Njg1
-Njo5Njg1NDQ1NjIxNTQzNzU1NzQzNz02NTUyMS8yNjY0Njc0MjI0NTQ0Mzk7Nzg1
-MzM3NjYyMjEyNjQyMTEyMzA0ODMzMjE1NTY0NDU2NzUzMzM0NDQ1MTE0MTIzNDU0
-MzQzNDU0NjUyNDY0NDQzMzQ0NzM0NzM2NDQzNjk3NTM0MjMzNDc0MzI0NDMxNDU0
-MzQ0NTY1NzE3NTMzNDY4Njc3NDc0MjM1NzY0NjY3ODQ3NjY2Njg3Nzc1ODU3NDI2
-NDM0NDIyMzM0NDQ0NTQzNDU1NDY2Njc4MjU2OTo3MjM3MzczMzQ2NzY0Njg3OTc1
-NTQyMjI1NTk3NjQ1NTozNjM1NjQ0NTM1MjIyMTI1NTY1NDcyMjY0MTUzMjExLzI0
-Njk5MjM2OTUzNzA0NjczMjQzNjdJRDU2Nzk3MzM0NTc1NjU2NzY1NDI1NTo3NTc4
-NTI1NzYzNzkyMjc1NDI0NTQ0NDQzMjQ1NTM0NTU0NTU2NTU0OTYzNDY2NDZFNTY2
-Njc1NTQ0MzY0NDIyMzMzOTs1NzMzMzM5NjMyMzc2NjU1NzY2NTg1MjU0NDo5NDQ0
-NTg4NTU0Mjg2NTU2NTU0MzQ2NzQ1NDQyLjQ0NTQ0NjY2MzQyMTMzMzQ3MjMzNDEz
-MzQ0NTc2ODg4NTMzMjM0MzQ3NzU0NjMyNTM1NjU2NDU0NzY1NDI0NTg6NzQ0MzMz
-MzQ0MjQ0NTMxMzQzMjM2OTU6ODU2NTU2Nzc2NTY4NTc3NTQzMTIzMzQ1Njc2NjUz
-Mzc0NTU1NjY2NzM0NDY4ODk3OjczMzQyMjM0MzQvMjQ2NzU2Nzc3NDU2Njk0NDMz
-NTQyNDIyMjQ0NTI2NjY1NDQ2NDQ1NjY3NTY3NjM0NTU1Nzk1NDQ0NjIwMzU0MzUz
-NTM1NjY4NjQ4ODM0MjI1MzAzMzU2NDQ2NzU0NDUzMjI1MTQzMzIyNTM0NTQ1ODQz
-MzI4NTY0MjIwMDMzNDQ4NDQ1NjU2NTQ3NTU2PTg5Njk4Ojk2OTk5NDU0NTc3Njc1
-Nzg2Njc1Nz03NjY2NDQ1NTU3NjY0NjczNzc3NjY2NTU2Qz47OTc1NTg4ODk4ND04
-OTs4OTg6NzY6Ojo4ODg5Ojg7Oz09Nzc2Njg5Ojg7OzpGOTU2NTU2NzU3ODc3Njg5
-Ojo6OTc1Njg4NzY2Nzw7Ojk3ODY0NDQ0Nzc4ODg1NDQ0NDc4ODc0NDQ3Nzo7NjU1
-Nzg3ODc1NDY2Njk0NTU5NDQxMzM4OTg5OTU3ODg4Njo6NDM0Njc4NzY2ODo4ODg5
-Nzk3OTg4Ojw3Nzg1NTk7Ojk3NjU1NTo5NTg6OTo5Njg5PT04ODk6PTk7Pj44ODg5
-OTo4ODs6PDg3Ojw5Nzk5Ojs5PD4+PUBEQD89Pjw8ODY7Oz09Pjo8PDo8Oz48PD49
-PkA8Ojs8PkE8Pz49PDw5Nzg5OTs8OTpDPjs+Oz08PT49PUE/PD9APT1CRUJDRFGd
-wMzU2t/i5Obo6eg9OTw+Pj1CREM/REI/Q0I7Pjw4OTY2Ojk3Oz07Pjo8Oj5BOTo4
-ODg5NTU3Njc3Ozc4Nzo4Ozo6NjU4Ojg2NDQ3NTc1MzU0Nzc5NTMyMjY4OTY5NjMz
-MTA0NTQ1NDQ5RzYxMjIyNDU3Njg3RjU1NjQ1NDUzMzQ0NTUzMzMyMjIxNDAyMjU3
-NTYzNDYzNDMyMzQwMjQ1NTQyMjY0NDU3NDM1NjQ1NTc4NjU1NDMxLzIyNzcyMjQ0
-MzIyNTU1NDY1NDM0MDIyMTUyMzg1NDQ1NDc2NDQ2MjY3NTYzMzM3NjM0NTUyMjU1
-NzU2NDU4ODc3MzQ0NTIyNDc0NDc2NTIzNDI0MzU3NTU1NDY1MzM1NTQzNTUyMzc5
-Nzc5NTY5ODs5NDY3NDY1NTQ3OTg2NDMyMjIzNjQxMzM2NzQ1ODg0NTIxMTMyMDIz
-NTI0MzM3MjU1NzMxMzEyMTY0MTEwMTM1NjUzMzIzMjQyMjEyMzM0MjQ6NjY0NzQy
-MzM1NTUyOTU0MjMyMjM1MjQ2ODo8Njc0MjU2NTU1NTU5NTU1ODQzNDMzMjI2NjQy
-MjU0MzQyMzU0MzQ0NTUyMjQyMzQ0NDU0NDI1ODU1NDM0MzQzNzQ3MzAzNzQ0NDY1
-NTUyNTc2NjQ4NTc1NDQzMzM0NTYzNDU2NjY2NzQ1NjM3MzY2NzYzMzY5NjU1MzEy
-NjU2NzQyMTIzMjIyMjM0MjM1NTQzNTc3MzY1NDU2NzQ2MjEwMjU1MTQ2NzU0NDI1
-NDMyMzQxMzQ0NDU1MjU2NTc5NzU1NDc4ODY1NTU3NjM0NTY2NTg5Ozc3NzY1NDU0
-OTc3NjI0NDM0NjQ4ODMxNzQ3ODMzMzAxMzM2NjMzNjcyNjY2ODY2Njc2NTU2MzUy
-NDQzNzY0MzQ1NDU0NjQ4NDQ1NTc1MzMyODczNDQzNzQ0NTg4NTg3NjY4NzU2NzQ1
-NTo5NTY4Njg1MzY3NDIzNjYyMjQ1NTUzODc1NjY1NTU1MzIyMzM0MzMzNTY2NTY1
-MzQ0NDk3NDY2MzMyMjIwMDQzNTQ0Nzc3ODk0Mzg2NTczMzc1NTY0ODYyNDY2NzY1
-NjY4NzY1NDc4NDY3OjUyNTc4NjQ2Nzc0Njc3NjY2Njc3OTY2NTczNjw7Nzc5Nzc1
-Njc2NTY2Ojc2Ojg2NzY2ODk3Nzc3PDc1Nzc8ODY1Nzk5ODo3NTk3PEE5NzU5NzY2
-Nzc4ODg2OUA7NzU4Njg6Nzc3Ozg2OTo4ODk2NTg4NTk7OTg4Ozo7Ojo3OTg2NDQ2
-Nzc4Njo4ODc4NzY4OTk8OTUzODg3NDU1ODk2NzY1NjY3NTU0NDMzOjY3NzQ1Njs4
-OTg6Ojc3ODc3Nzc2Nzk2ODg2NjY1ODo7Ojw4ODQ2Nzg7OTk5OTk4Ojg4NzY1ODc6
-OTg7OTk5OTk2ODs2ODg5PDs5PDs6Nzg7NjIxNzk9PDs6ODs6Ojs4Ozw8Oj0/PDw6
-Ojs9PDs8PDw+PT07Oj05ODU3ODk6Ojs7Ozk6ODs+PkQ7QD0/Qjs/PTw5OD5DPkFC
-PD0/Ozs8Pjs8Ojw5PT1AQ0FEQ0FJVaHCzNXb3uHk5+fp6jo4Ozo6Ojo/REM/PUA8
-Pz48Ojo8Ozs6Nzg8Pjw8PTgyNjY4Ojg2ODc6NzU0NDczNDg2OTc7ODk7OTg1NDM4
-ODQvMjUxNDk8Njc7NTMzMjE4OTQyNDg4NjM2MDEzMzhENTI0NDc1NjU1NjU5NTEx
-MjU2Nzc2OTUzMjMzMzIyMzI0MzYzMDU0NDY0Mzc4NTU1NDEzNTQ1NTM1MzY2MzQ5
-NTY3NTM0MzU2NjU1MjQ0NDQzMjMzNDU1NTQzNTY7MTEzMjQ3NDMwNjk4NDc0MzE0
-Mzc0MzIzNTUzMjc2NzM0MzQwMzEzNjc1NzYzNTY2NTM1NTMyNDI0NTY2NjU1NTQ1
-NjY1NTM0NTU4NTg2NTU0NTU1NzQyMjY5NzIzNDI0NDY1MjQ4NTUzNjIzNzQ2NTMz
-NTUyNTEzMTM2NTM1NDUzNDk1NTM1NDc6NTUzNzIzNDY1NTQ0NDM0NzQxMDAyMTI2
-NDQ1NDc3MjMzMjM2NTM2NjM1MzM3NjQ1NjUzNDQ1NjUzMi8wMzQ2NDM1NjU0NDU0
-NjM0NTY2Nzo1MjI0MjQ0NDQ0MTVDNTQzMjQzNzo3NjQyMDI0Nzc0NTk0MzQyNTs4
-NzU0NDU0NjIxLzQzNTU1NDM1NDM0MDEyMjIxMjM1ODUyNDY2NDczNTU1NzY0Njg0
-MzMzMjU2NjY3NTI2Njo4NDQ3NTYyMjE0NDQzNTQ3NzM0MzM0MjEvMjc1NjUzNzc0
-NTc2NjQyNTIyLzMzMzMzMzM0ODU0NTU0NDQyMjQyNDQ3OTU0NDM0NzY2Mzc0NTY3
-NTY2NTY1MzY2ODo6NzU1Nzc4Njc1NTQ0NDY2NzY3OTY2NDQ1NDUzNTU1NDI0NDU1
-MzMzNDc2NDU1ODQyMjUzNjY0MzYzMzUzNTMyNTc3NTo0NTM0NjQzMTQzMzM1Njg3
-NzMyNTY1NjQ2Njc3Ojc5OTU2NzczNDc1NjQ0MzQzNDU1ODc4OTQ1NjUyMjYyMjQy
-NjY2NzU0MzIyMTEyNzk2MjQ1NjY4NjQzMjIzMzM2NjQ0NDM0MzM2MzU0NDQ0MzU2
-MzAyNTQ1NDM3NTY2Mzg4NTg2NjM4OTg3NzQ1NTU3NjY3OTg6Pjo4Nzk3NjU1NTY2
-OTU2NDQ0NDU2NTU1NTY4Pjo1NTo7Ozk4NzY3Nzc3NjQ1Nzc5ODk3PDw4Ojg2NzY2
-ODU2OD04NTU3Nzc9Nzc4Nz05OTs3NzY2Njc6OTg8PTk6ODY3Nzk5OTc5NjQ3ODg1
-NDY5NjY4Nzo6OTs6Ojs3ODY5ODc3NTY3NTU3OTk6NzU3ODc4OTo4Nzo4NjMyNjo1
-MzY3Nzg2ODk6Ojc3NTk6ODk2Nzk5ODU4NjU3ODc3NzQ0Njc2NDUzNjk3Nzk6Ojk4
-OTo3Nzg2Njg7PTk4Nzc1NDU6Oz45OTs5Ojw7Ozg4OT0+Ozk3OTc5ODk3OTo6ODk1
-Nzk5Ozw9RDs6OTw7QD48PTtAPDs5Ojs8PDo6OTw9PDs8Ojo7ODc6OTg7PDw+PT48
-Ozo8O0A6OTg7Ozk9PTw8PDs8Ojs4ODpBQT48Ojw9PD4+PDw9Pj09QkA9RlVXncDM
-09rf4uPm5+jpPUA9Oz5BPTg9REREPkBCOT1AQD08PTs6OTk8OTc2NTM2ODY8OTY5
-Ozg7OTc3ODo6Ozs7Njo4ODk5NjQ2Njo5MzA0MzIyNDQ3ODQ0MzUxMTEzMjE2NDg4
-NzU1MjI1MzMxMjUzNjQ3NDIzNTY3MzQ1MzMzMTM2ODQ5MzU1NTQ2NjM0NTQzMTI0
-NDU3MzY0MzIxNDQ1NDU4OTk2NDY1Mzc0NDM1NTM1NjUzMzMyNDM3Njg1NTM1NzUy
-NDQyOUExMjM1NjY1OTk5OTUzMjQ2NTcyMzMzNjI0MzI0OTY3NjU0NzY0MTI0NjY1
-MjU0MjQ0MzE0NjQ1Nzc1NTk2NTY2NjU5Njc4NjQzNDM5Ojg3NDY0NTQ2Njc1NTU1
-MzUzNjU3NjY0MjY1MzQ2Nzg2NTY1NDU1NjM5My8xMzE0MzM1NjU3NDY0MjE2NTM0
-MzQ0NzY2NDMxNDQzNTMwNDM0NjMzMzUzPDU2NjY1NTQzMzIyNDk2MjI0OTY2NTIy
-NTYyNjQyMjMzOTIyNDQ0NTMxLzMyNDIzNDUyNjQ2NTU1NDU0OTY4NzQ0MjIyMTQ5
-NDU2NTg2Mzc3NjUzNTU3NjI1NTg1NDg5MzQ2ODY0NDQzMTU1NDAxNzQ0MzIxMTM1
-NDMzMTE2NDMxMzQ1NTg0NzUzNDQ2NTU2NDMxMjY4NTU0NDI1NTUyNTUzNDU1MzIz
-NDY1NTQzNDU0OTMzMjMyMzMyMzM2MTM0NDY0MTMzMzU5NTY0MjMzNDMzMjQ3OTY2
-MzIxMDQ0MzQ1NTMzNTY2MzY1OTQzMjg4NTQ1NDQ0NDY2MzQ0NDM0NTU2NTc2NDMy
-MjI3ODc2Njc3MjM2NjQ1NjUyMjUyMjM0MjU3Nzk3NzM1NjY0NTQzNTU4MzUyNTEw
-Nzk3Nzo4ODU0Mzg1NDM0MzU0NTY3NjQ1ODMxMjQzMzI2NzQ0NTU0NjY0NDM2MjM1
-NTM0NDQ1NjY1NTc1NTY4OTc1MjQ3NDY2NzU2NzU1MDMzMzY4NDMyNDM2ODg0NDU0
-NDc4NzI2NzYzNDQzNDU3MzU3NTI0NTIyMjM2ODU3ODY0NTE4OTc5NzY0Njg5OTo4
-NzY3Nzc3Nzk1NTY3OzhAODg1NTQ1NDQ2ODc3NTc0NTU2NTc3Ojg5ODY4Ojg5OzY3
-NTMyMzMxNDUyNjc0ODY2NTY3Njg4Nzg4NjU2OTY3NTc3ODs7ODc1Ojk5Ozs6Nzg4
-NzY4OTo4NjY3NTY7MzY4NTQ5PDk6NjU2Nzg1Njg3Ojc2Ojo7Ojg4ODc3OTc5ODY3
-ODc3Ojw6OTY2NjY4Ozk5NzU0Mzc0NTg2NzY3NjY1NDY4PTo6OTw4ODc2Nzc4Ozw4
-NjU2NTY3OTY2NzY3ODw5NzY5ODg4NzU1Nzc1NTU3NTc4ODc2NTc4OTs4OTs8Ozs5
-ODo6Nzg9Ojo7Ojs4OTc6Nzg1Nzg4OD07Pz47OTg9PTs9Ozs6OTw8Ozw5PDs7OTs9
-OTs4OTs4OD08OT07Oz86PTw+PT48Ozo7PDw7Ojw7PTk8QUQ/OTc7PT03Nzs/OTo+
-Pz4+QD0+PUA9Pj5CQD4+PURCRVKewcvT2d7h5Obm6OpGRElDPEBFSUFCOkFDQUBE
-PEFCPEFCOzY5Ojs7ODY0MzM3OTo5ODY5Ojc3Njg5ODU4NTQ2ND8zMjMyMzU2NDQ1
-NDIxNjc2ODQ0NTc2Nzg5NTUxNTE1Njk0NDM0NTY3NjM0MjIzNDM2NTQ1NjUzNzQz
-NTY1NDM2NDY1NDI1NTU1MjQ1NTYyMzUxNDc4NTg2NTIyMzQ1NjY1NTU0NTc1ODY0
-NDM1NzQ1NTM1NDUzMzM0NDYyNDMxMzU0MTEyMTQzMzIzNTQ1NTc2NTY2NDIyPjM3
-NDk3MzU8PTEzNTU3NzU3NDQ5OTg3NTc2MzU2MzczMjMxMDU0NDk6NjU1MzU0Njw1
-MzQ3NDIzNTc2ODgzNTQ1NTY3Nzo1MzI2MzM1NzUyMjI0NTY1ODc4NTQ0NDY1MjIy
-MjMzNTM2NDgzMTQyMzExMzQ4NzY1NTg4Nzg0NDY0NTUyMjQ0NDI0NDM0NTUzNDQy
-MzM0Njc0MzQ1NTQ3NTEyNDY2NTc0MjEyMzMxMzMzMzU2Nzc1NzUzNDExMTM0MzM1
-NjQ0MzU1MTM0MzM1NzY2NDQ3MjY1MjczNjU1MzUzMzg5NTY3NjU0ODU2NTU3NjU1
-NUA5NDIxMjU1NjQ0NDUzMjQ0NDQ0NTMyNTQzNTQ0MzMyMzU4NDM3MjI3NjU2MzM0
-Ojc2NTE1Nzc3NjU3NjQ0NTc1NTU0NDY0NDg2NTQzMzI4NjY2MzIyNDQ1NDE2NTQ0
-MzQzNDU0NTY2NjUyMi8xMzE0Njk4MzIyNDc2NjY3NTMyNDY3Nzc2Njc3Nzc0NDY0
-MjY0NjU3NTU2NjU1MzQ0NDU3Njc2NDI0NjY3NTU2ODk1MjU6ODI0NTc2MzIyMjU1
-NDU0Nzo1NjM0NTU2NTMzNDQ2NjY0MzQ2Nzk3Nzc3NDY0NDQ1NDo1MzY0NTQ1MzQ0
-Njk1NDMyMzU1NzY6MjI0MzQzNjU2NTc2NjUyNzU0NDQ0MzMyMjI3NzM0NTU4NTMz
-NjU1MzQ2MzU1MjQzMjAwMzQ0NTk3NTY2NzY1NDQ3NjMzMzQ0NTU2NTM2NzM2NDU2
-NTQyNDY4NjUyMTU2NzY2NTk6NTc2OTU3ODs5ODk2Nzc4NTc5OkE2NTMzNjY4NTQ4
-ODY2Njg1MjI3NDg2OTk3ODk4NTY3Nzc0NTc4MzQ3NDM3ODY3NDQ0NjU5Ojc3NTQ0
-MzQ0PzY3OTc4ODY2Nzc4ODg5ODw4NzY2OTg6OTk8ODU3Nzo5NzY5OjU5Ojc2Nzc7
-OTo3NTg6Ojk2Njk6OTc1Nzg2NzczMzQ0Njg4ODk4OTk2Njc4Njg5NTY3OTU2Njc1
-NjY4Nzo3NDM2Nzk0OjY5OTg2NDg4OTk4Ozg3NTc3Nzc2Njc2ODc3ODY2ODg4Ozw5
-ODg3ODU3NDY3NTU2NT08OTg3ODg4OTc4Ojo6Ozw6OTc6OTo6OT06ODg4Nzc4OTo9
-PDw5ODw6Ojs6PD85ODs8OzY3Ojo7Ozw8Ozw7Ojo5Oz09Oz08Ozs9Ojo5Ozw9PjhA
-Pjs7Pj4+PTs/Pjs+PT07PD08QDo+QD05Ozs+Ojs9PTo+QT4+PztCQUBEUqLBy9TZ
-3uHk5ejp6URDRUQ/QT1CREI8QTw8QUJBPT09Ozw4ODw9OTo5NzQ0Njw5OTw7OjY3
-NTY3PDk4OzU0NzM0MzYyMDIxNjc2MzM0NjU5NTU1NzMzNDQ0ODg3Nzg2NjM3NzUx
-MzM0NjMwNDY0NTQzNDM1MjMzMjU0NDY5NzYzMjU1NTU0MjQ3NzU2NjY1NDczMDQ0
-MjU0NDU1MzQ1NDMyNTk0NzU6NjY2NzMyMTI2NTU1MzMyMzY1NTMyLzExMzIyMzAz
-MDM0MjA0NTc2NDMzMzQ3ODU2NTY0MzM0MzQ0OTo0MzQzNTQ2Nzc3Ojo5Ozg5NTMy
-NDg4ODg3NDw1NDY5Nzc2NTY0NDY2OTQ1MzU3NDQ1Nzc1Njk0NDU0Mzc3NzQ2MTMz
-MjY4NDIxMjUxNzgzMzY1NTY1OTY1NTYzMzc2MzI4NzQzNDUzMzQ0NDY2MjQ1NDU1
-NTQ1NzY3NzU0MTAwLzM1NjMwMjMyNDM2NjY2NzYzNDE0NDIxMzY3Njc2NTY1ODMx
-NDQ1MjI0MzU0NDY0Mzg3ODY2NjQ2MTY0NDU3NzY0MzIxMzU0NTo7NTQ2NTIzNDQ0
-NDQzMTY5NTIzMzI0MzM2MzY1Njc1NDM0ODs4NTQ1NzY0MjE0NDUzNDE1NDE1MjIx
-MTQ0MzQ1NzI1NTUyMzIyMzUzMjM1MjMzNTQzMzU1NDQ0NDo5NjQyNTU1NTMyNDUy
-MjY2NzU2NjU3NTUxNTQ0MzE1MzUyMjQxMDY0NjQ2NDIyRjoyMTQ3ODY0NjU0NDU1
-Nzs0Njo2NTY3NTUzMzU1Njc3ODc2NDY2NDM1NTY1NTY3NTk7ODIzMjU2Nzg5ODg7
-ODc3MzQ3OTs1NDczNDY3Njk1Mzg2NjY2NTU1NzU2NTIzMzU3NTY1Njk2NjY3NjI0
-NTUzNjQ0NjQyNzg2OTQxNDQ0NDIyNDM2NTk2MzM0MzI0MjQ1NDU4ODU1NTY5NTQ3
-ODYzMzQ2NTU0NTQ1MjI0NTc1Njc2NzQxMjUyMjIyNTQ0MjEzNDYzNDM2NzU1NjQ0
-MzY1NTU1NDc1NjY0MzM0Mzg1MTI1NjQwMTU1NDc3NTc4NzU2NDU1OTc3NjU1NTY3
-NzY3NTQ2Njc3Nzc2Nzc7OjM1Mzg3NTg7ODg2NjU4MzY3NzQyMzo8PjY1NTc2Njg3
-NjQ0Nzs2OTg4Mzc2Njc2NzY3Nzg3NzY0NzY6Nzs6OTc3PDk3NjU4ODo3NTc1OTo3
-NjQ5ODk6Pzc7ODc5ODk6OTc4Nzc3OTg4Ojw4ODY1ODc5ODo8Njg3Nzg1NDw5Nzg2
-NTY2Njg6OTc2NzU1ODg4OTY5NTQ2MzY3Njc0Njc2NTc1NjgxMzc1NzQ0OTo3OTg+
-ODc3NDY2NjQ0NTU3NjU2Nzk3ODk3OTg2Njk3Njc7NzQ1NDc5PDs5ODc3Njc0ODU4
-PTo4Nzc3Nzk5OTo7OTo2NzU2ODk7ODs9ODk6OT08Ozg5Ojs7Ozw+QD4+Pjs8Ozo7
-Ojo5Pjw6OTs8Ojk9Pzk6NTk7PDs9QDw7PD1CPj88Ojw+QD8/Ojs8Oz48Ozw9OTc2
-Njo6OT09P0BCPTw/Q0JCQkxUm8HL09ne4ePl5+npSUFFRkBCQ0ZAQkFBOTc9QTw7
-Ojg4Ozo4ODw9Ozg3ODU5PDs3OTY4NTQ3OTo4Njc3Nzk1ODg1MzQ1NDI0MjM1Mjg2
-Ojc0MjU2Njg2MzM0Nzc8ODg1NjkzMzQzMTI4OTo1NjY2Njg3NzQ3NDI1MjU3NDU0
-OTg4NjU6OTY5NzM0OTg1NDM2MzMzNjYzMS8yMTQzMjQ2NTY2NzQyNDM0NTQ2NTY1
-NDU0MTM0MzMxNDczMzIzMzI0MjIwMTMwMTIzMjUyLi4yNzYzNDQ0NDQ2Nzk4ODUz
-Mjk0NTU0MzMzMTU0MzY3Njc2NDQ3NTU2NjU3NzU2Njk5Njg1NDUzNDQ2NDU5NjQ3
-Njk6OjU2MzQ1NjQyNjY2NzYzMjM1NTU3Njg0MzMyMjQxNjYzMzY2NDIyMDY2NDQy
-MzczNTY3NTY1NTU1NTMzMjQxMTEzMzQ0NDM4NDM0NDM0MzMzNDMyMjYyMjEzODMx
-MTY3NDY2Njc0NTY5NTMzNzMxNDU2NTM2NTUxMjIxMzM1MjEzNDgxNDQ1Ozw7NzY3
-NTUyNDczMTI0MzQzNDQzNjIyMjE0NzU1NTUzNjQ1NTYzMzAwMTU2NjU3OTUzMTIy
-NjM1NDMzMzMyMTM0NDU0NjsyNjIxNTU2NDg1NjI3NDEyMzUzMjAxMDEyNDM1NDQz
-MDM0MTQ2ODYzMzQ1NTMyMTI2NjVNUTQ3Nzg3NT02NDMxMzMyMTQ0NDIzNDU0MjIy
-NDc0MjQ2NjY/NDIzMjM1NDU1MzIxMzU2ODs1MjQzNjU0NTQ3NjU1NTQzNTU2Njc0
-NTM1NDg2MzY1NTU5OTg2NTY3NTg3NjY3Nzk2NTM1Njk7ODc2NzQ0NzY0NTU1ODM1
-NDM2NzY2ODU3NTU0NzY1NTU0ODg2ODg0NTYyNDk4NTMzNzU1NTU0MzUzMzI0Ojc2
-NTAyNzc0NDM0NDY1NjU0MzM0ODg3NzU3NzQ0NzY0NTEyMjQyMzMzNTc3NTc0MzQ0
-MzQvMDI0MzQ1Mzc1MzEzMzIyMzQ2NjY1NjQyMzMzMzU2NjQxLzAyLzMyMzIxMzMx
-NDY3NTI0NTY3ODQ0NDU3Njc1NDU2Njc2NTUzNTc2NDU6Nzg3MzU4Njk0NTk6NjY3
-Nzg2OD03NTg6NjQ0NTc5ODg2Nzc5OTo5OjY4ODg1NjQ4ODc1NzU1NTQ1Njc2Nzc2
-ODc3Mzc2NzY5Ojk3OjY1Njc2NDc2NDc5OTc2ODk4Ojc4OTk6Ojg5ODY5OTw4ODg8
-PDk1NTk2NTc4ODg2Nzg3NTc4Ojc5OTY2NzU4PDo4ODY1NDQ1Nzo5ODg3Ojk4NzU4
-MzY0MzY2MzY0Nzk3NzY1NDc3Njk5ODg3OjY3NTc3NzY2NTY0NjU1ODc4ODs4Njc6
-Njc5Ojs7Nzg3Nzg4PDo4OTk6NTY5Nzc5Ojg6OTs8Nzc3ODk3NjU4Nzc4Ojg8Pjo4
-ODk6OTw5ODs6OTg7PTw7PDg4Ojk5PTs8Oz09Ozc5OTg3OTs8PTs7Oj47QDs9QT08
-PDg6PDs7ODg6Oj08Pj0+PDo+PD09PDw9Ozs8PD4+Pj1APD4/P0E/SE+YwMvT2t3g
-4+bm6Oo+Pj1BP0NHQT9BQj07Ojo6PTw7PkE5OTk7ODc6Nzg6Ojc0NTk3Ozs4OD8+
-NTc8OTg0NDQ1Njk+OTk1NzI1NDQ3Ojo6ODc6OTU0ODkyMzQ2Njs7Mzg3NTczNjMz
-MjU2ODU0NTQ0MzMzNDY0NDU5NDIzNTU4Njg4OTIzNDQ0NDUyNjQzMjIxNTUxMzQ0
-NDI0Mzg3MjI0MzU0NjQ0NjUzNTQ0MC80NDExNjQ2NTQ2ODQxMzIyMTM1MzEwMjM1
-NDExMTEyMzIxMjY0MjU1NDY0Ozk1NDUxMzQyMzU1MTQ2NzU2NDQ5OTc1NjU1NzU0
-MzYzMjM0NjUyMjQzMzMvMjI0NTQ2NjY1MjY3NTQ0NTc3Nzg0NTQyNDI0NTQzNjc1
-NTQzNDQyMjQzMjI0NTQ0NDQyMzIyMzQyMTE1MzIzMzMyNjI0NTU1Mzk4NTEyMTM1
-NDU3NTUxNDUzNTQzNDU0NjMxMTc4ODczODQyMzU3NjYzNTQ1NDQyMzI0NTU1NDM0
-Nzg2MjI0MzIxNDIyNTIyNDI5NjYxMzY+NTQ0NDIyNTQyMDI0MzEzMjQxNzMzMTQw
-NTc2MzI0MjU1NzI0NTc3NDUzNDQzMy4wMDU2NzQ0MzIxNDU3ODU1NjQ1MzQ1MzY2
-NDg1NTU0NzQ1MzM0NjMyMTU0NTQ0NTU1MzIxNjg1ODMzMzMzNDQzMTAzMjY3MjM3
-NDQ3Njc3MzI0MzE1NTMyNTU1MzY2MjQxMzIyMTAzMjMxMjQ0NTQ0MjIzNDU4NzQ2
-NzQ1Njg1MDE0NjQ2MzQ1NTU1MzY1Njg5Njc6OTY5ODU4NzU3NjY3ODk2OTg3NDU2
-ODU1MzU2Ojc2OTg0NDQ1NDY2NDU5NjMzNTU1NjM0NDU0Mjc3ODUzMzQ2MzIzNzQy
-NDU4MjM2ODg3Njk2ODU5ODU2ODc3NzU3ODU0NDUzNjY1NDA0MzI0NjY0MzY0NDM0
-NjY1MzMyNjQ1MjQyMjQyMzY3NTMzMzU0MzY0NjY1NDI0NjYzNDQ0NTQ1NTQzNDY2
-MjExMTIxNDU0Njc4NDE0NjMwLzMxMjExNDY0NDU9ODQ3MzM1NDM3NzU2NDQ5ODg0
-Njg6Njc4NzU0MjQ7Ozk6OTU4OTc2Njc3NTc2PTYzNDw6NTc5OTU2Nzg0NTY4Njc4
-ODc5NTk0NDMzNTszNzc2NDQzNDY2Ojo4ODc0ODs2Njg5ODg1NjM0NDg5OjY4MjU5
-NzY1Nzk3OTc3Ojw7Ozs+OTc2ODo4ODk6OTU2ODg5NzY2ODk3ODM1NjY1NTM2NzQ0
-NjY5Njc0Njg1NjY2NTg3Njc2ODg4Njc4NDM1NjU4Ozk5OjU0NTc5ODo4NTQ3OzY4
-NzU2Nzk6NzY6ODc2NzU1Ojg4Nzc5OTc5Ozw6Ojg1Nzg4ODg3OTs8OTk2NTc3Nzc5
-Ozs7OTw5NjY3ODs6OTY4ODo6ODo8Oz49OTk4PTo5ODg2OTs8PTw7ODg5ODw7PTw9
-Pjs5Ojk9QDw6Oz09PD45Oz47Ozw8PTw/Pzw6ODo+Pz0+QD4+PDs9PTo7QT06QDw6
-Pj07P0A+REE+Oz1CQUpDTJzBzNTZ3uHj5ebo6kJAP0FDQkI+PTw9PDo5ODg6PDg5
-ODw9PTs4PDk4Nzs5ODo2ODw4PDg3Ojg1NTY2MzQ2OTk2NzxDNzY3NTM0NTY6OTY7
-Ojs6OTM4MjIyNDM1MzQ3NTI1Njc1NDYxNjY3NDIxMjA0NDM1OTM0NTQxMjQ0NDIy
-NTUzMjY0MzQzMzMxMjE0MzU2NTM1NDQ2NDQzMzM1MzAyMzs0NjQ0My8zNjU2NzMy
-NTQyMTUyNDQ0NjU0MjQzMzMyMzMzNDYyMjAxNDEyNDI0NTU2Mzc5ODY1NTIxMzg0
-MjU1MjQ0ODg5NTY1MTY6NDMzNDg3NjMyMTQ2NjY6NTU0NDQ3MzQyMzQ0NjU3ODU0
-NTU1NTQ0NTg3Njc5NzQyMDI1NDc5ODMzMTE3Nzc0MzQ1NTMxNTY1NDMxMzQzMzQ6
-MzQ1MT00NDQ3NjUyODY6OTc1Njg1NjQ2NDI2NTI0MjU1MzM1NTQzMzM1Mzc0NTIy
-MzU1MjM1Nzk5Nzk3NjMyNDU1NDU2NDMzMjQzMzI1NzU0NjQzNzE0NTQ3NDQ2MzU1
-NDUzNTUzMzUzMjQzMjEzNTM2MzM0MzUzNTU1NjQ1NTUxNDI2NDU0MTIxMjIyMzE0
-MjMzMDIyNjc0NDc2Njg4ODIzNTQ3MzIyNTY3NzU0MzM2NTIxMzg1NTc3OTk2NTUz
-NTg3ODYzMjU0MzY3MzIwNDc0MzM0MTIzNTIxMzEyNTQ0MTMzMTEyNDQyLzIyNDcz
-MDIyMTIzMTI2NTcyNDMyMjMyMzQ5NzY3NTM0MDQ0MzM0NTUyNTU0NDY3MzY1NTY0
-Nzg3ODY0NTM1NDMyNTg4NDc4ODc2ODg2ODc5NTQ2ODk6NzY2NjQ3NTU5OTU3NjQ0
-NTk3NjY1NDM3MzM2NDQ2NTM1Njg4NjU1ODU0NDc6NzQ0NzQ1NzU1NDI2NzQ3NzQ0
-NDQ1MzY2Njc1NzUyNDAwMzA1NzM0NDMyNDQzMjY2MzU1NTczMjk2NTU2MzQ4NjY1
-NzgyMzU0MzU3NDQ2MjIzNTM0Mjo3MzQzMjI2NjUzMjM0Njg1NTMzNDM0OTQ1NzI0
-NDc4QWBFNDc2MzMyNDU2ODYzNTg1NjU2ODM0ODc4Njc2MzY3ODc0Njk3Njc3NjY1
-ODc7Ojg3ODU4ODk4NjQzNDU4ODk3NjU3ODQ7ODg3OTY4NjU3NDM2ODk1MzU2NjY2
-ODU2Njg3MzU4NTY3ODk5ODg7Ozs7Nzk2NTk5NDc3NTY3ODo3ODg5ODg2Ojg3OTc1
-NzY3Nzo4ODc6PDg3ODU4NjY4ODM3OTY4OTc3ODY2ODg0MzY4Njk2NTc1Njo3NjIz
-NTM2Ojc3PTc1Njc1Nzo7OTc5ODg5OTY1NjY5ODg7ODg4Nzg2NjU4NjU5NDU2NDg8
-Oj06PDk5Nzg6Nzc5OTg6Ojs4Nzc2ODk6Ojw3ODc2OTo4ODk3OTg6PT48Pjk5PDk8
-Ojg9ODU0OTk3Nz47Ozo6ODg5ODY4Ozw8PTo4Njk8Pz48Ozw9OUBAPDs6Oz09QD5A
-Pzs9PT09PDw9Ozs+QDo3Pjo8Pz44OTs6PD47Oj1CQUFHQEFFZlpcpsHM1Nnf4+Tm
-5unoRkxFRkFBRUFCQTw5OT8+Ozw9PDo1Ojw4OTg4Ojg7Ozg1NjY5Ozk3Njc3NTg3
-ODg5NTc3NDU3OUM5OTg0MDM0Mzc2Ojg4NjQ2NjM2NDY5ODM3NDMwMzMzNzY3ODU0
-NzY0MTQ2ODMzMjQzMTU1NDUyMjU1NjU3NjU5NzYyNjQ1NTMxMjQzMzU0MzMwMDA1
-NjQ0NDI3OTI0MzM0MzI1NzQ1NjQ1MzIzNzQ6NDYzMjExNDQ1MjIwMTM1NTIzMTIz
-NDMyMjEyMzMvMjQ3NTY1NTU0NDIyNzQzNTQ1NTk8Njo2NTg4Njo4Njk2NDMyMjQ1
-Mzc4NzM1NTQ1ODY0ODI1MjIzNjQ0MzQ0MzIyNTU4NTY2NTg1ODQ2NjUzNTM0NDU1
-MzM2NTEzMjE0NTY2NDIzNDYzMzM1OTY4MzU2NzM0MzQ1NTY0ODk2NDc2MzQ0NDU0
-NjU1NTQxNjM1NTg1MzI2NDI1NTM1NDI1NTg5NTg4OTc0NDEwMzY4NjY4NTIzMzMz
-NTQzMTU1MzM2NTc0NTY4NjY3NjMyMjc1NjU1NzY1Mzg3NTY2MjIyNDQzNDIyNDc0
-NzIvMjQ0NDMyNjIxNDU2NjQ0MTE0MjQxMzEzMzQ2ODU1MzU0NDU1NDY1NTQ2NDUx
-MTIyMjI1NzY1NDUyMjU0NTc1NTg3NTM0NzU0MjM0NTM1NTgzMjg3NTg4MzQzNTU0
-NTIyMjY0NDU0MzM0NDAzMDEvLzAzNDQ0MzYzNDc3NDAvLzAvMjMyMzQ3NDY2MzI0
-NTY0NDQwNDY3NTYzMjIyNDY1NjQ0NjY1NjYzNDU1Nzc4NzY0Nzc0MzQ2NTQ1NDQ1
-ODc3OTY2Njc0NjUyNDc3MzY1NDQ3NTc0NTc4Ojk4NjgzNTQ0MzY0NTU1NjM1OTg1
-MzU2NjQ0MjU1NzQzNTQ5NjQ0NjY2OTY5NzQ0MzQ1NTQ1NzQ0MzI0NzU0NzQ0NDMy
-NjI1NDMvMjMyMjU2NjQ1OTU2NDQ0MjM1NjYzNDU1NjU1NDMzNDM1MzY0MzY0NTQ0
-MzU2NTM2NDQzMTU3NjY3NzQzMzU1NzU2NjY1ODU1NTM1NjUyNDM1NTU0Njg1NjQ2
-Njc2Nzg2Ojs5NjU4Ojk4NzYzNDY2NTo3NTY4ODc9OTg2ODg6NjQ0Nzg2Nzk4Nzo3
-OTc2ODY4NzY2NTc0NTY0ODYzNTY1NTs4Ojg4OTk6NzU4OTg3NTg3Njc4Njc3Nzg4
-Ojg3Nzg4ODc5OTo4Njg4Nzk6PTs5OTo3MjQ4ODg4ODg4PDc5ODc4Ojg2Nzg3Nzg4
-NjU3ODY4OTc1NDQ2Nzc5Ojs3NzY1NTo5Ozc2Nzc3NTU4Njk3ODY3Ozk2NjQzODo5
-Ojc5Nzc5Ojk5ODg3ODY5Njg3ODk8Njg2ODU5OTg5OTc6PDs2Njg5Ozs7Ojg4PDs3
-ODs4Ojs8Ojg6PT07Ojs6Ojs5Ojs5OTc6PDg2Ojk8QDo6PTo7PUA6ODk4NTk9Pzk5
-O0BAOj5APjw/QD9BPDo8ODk7PDw5OTs+QDs/Pzw4ODw+PD48Ozg8Pjs7PUA8Ojo5
-OzY8QEJERENBPkRcU1OiwczU2t7i5Obn6elCRUZERUJEQj5CRkU/QkJAPT46Ojc4
-ODs6ODw3OTk4PjgzODo3Nzo7Ozs+OzY2Ojg0Mjg2NDU1NTo4NjQ2MTM1NjU5MzU0
-OTg4NDU3Njc4NjU1ODo4NzY2ODU2NzUyNTY4NTQ1NjY0NjQ0MzU3MTQ1NTQxMzo3
-NTI0NTQ0NTI2ODc1MzUzNDUxMTIxMS8zNzM0NjMyMjU1MzY1NTUzMjM1MzQ1NDQz
-NTY0NjIyMTQ1NjU2NjU0NjQ1MzA0NDIwNTY1MzU0MzQzMzU1NTQ3NjQzNjE1Nzc4
-NTc4NzYzNTM2Njo3ODU3NDUyMzY1NzU2Mzg3NzY3NTM1NDU3NDU4NjY0NTQ2MzIz
-Mzc2Njg2NTc2ODo2OTQ0MjU2NjU0NDI7OzY2ODY1Mi8zNDc3Ojg0NjY3NTQzMzU5
-NDYzMjEwNEY1NjMzMzMzMzMxMDUzNTczNjg2MzExNDU4NTQ1MzYxMTIyMzA0NzU0
-NTQ1NTU2NTQ0NDQ/NTU3NTU1NzU0NDc0NDIyNTI0NDU1Njg4ODQ1Njc2NTEwNzQu
-MzIyMzQ1OTM2NTU2MjIyNTQyMTM6NTUzNjY1NDQ0NTQ1NTQzNjcyMzQ3NTQzNTU2
-NzMyMDI1MzMzNTY5NTU1NDMzMTQyMjM0MzUzNTQ0MjMzNDMyNDQxMjU2NTU4NzMy
-MjQyMzA0NjM1NjU4NjQ2ODg3NTY3NTM2NTUyMjU3NjYzMjQ2MDIzMjQwNTQxMjc1
-MjM1NjY1NDAxMjMzMzMzNDQzMjQwMzQvNDc3NjQzMzc1NzMzMDI2MzU2NTg3Njc3
-NjYzMjc2NDQ3Nzo1NDQ4OTY3NTM1Njg0Nzc6Ojg2NDM0NjQyNDU0NDY1NTU1MzU2
-ODc2ODg2Njc1MjQ0NTYxMjQzNzU1NDU0NDU3NDk2NzY2MzAyNDQ2ODM1ODM2Njg2
-NTIzMjIyODc1NTc3NTM2MzIzNTc1NTQ1NDg0NDg1NTAxNzY2MzQ2NDQ1NjQ2NTQ0
-MjM1NTQ0MjMzNDQ1MjIzMDQ3NDQzNDU4NTIzNzg0NDM1NDU1MTIzNTI1MzY0NDQ2
-NDQ2ODQ1NzQ5NjczNDAxMzMzNDY2OTY5Njc0NDM0OT44OTc0Nzc7NjYzNDY4NTU4
-ODc8ODU6Ozw3NTU5Oz05Njc5ODg3ODU3NTM1Njc3NTY2NzY4OTg2ODc1NDQ2Nzc2
-NDc3PTo4Ozg3NzU4Njk4Ojg3Nzc4OTo2NTc4PTo1NTg3Nzg5OTo4Nzg6Njo5Njg3
-ODU1NTg3NzU2Ojo3NDU7OTg1Nzg2Mjc4OTc4OTk5NjU2OTU3OTk5Nzc4NjY5ODk3
-NjQ2NjU5ODg5NzgxMzY3NTQ0ODk5ODg5Nzc3Nzc4ODY3NjY2MzU3OTg2NDY1NTQ4
-ODY4Njk4PT06Nzg4NzY3Oj48PDs4Ojs6Ojo5Pz86PEE9PTw6ODc7OTs6ODg7QD89
-Ojs5Ozw6Pzo4Oj09P0A+Pz08PTs9PUA7PD4/PDo8Ozw9O0E5Njc6PTw9Ozg+QDxA
-QT5BPTw8OztAQD45PTw9PUA9Oz45OkE6ODw/Q0dKRUJFQkJETJzBzNTa3uLk5ufo
-6kFDQ0FCRUFARkBCRUI9QEM+PDs8PDs5PT08OTc5PTg3OTg8Ozs8ODk9OTg6Nzw6
-OTY5OTY4OTY4NjU2PDQxMzEyMjQzMzI3NTg2Njg3NjUzMzQ4NzMxNDY3NDQ2MzE1
-MzU4NTQ5NDQyMjQ1Mzc4NDU3NDYxNTQzNjUyMjMzNjczNzU2NTMwLzIyNjMxMjAx
-MTM5OTMyNDQ5NDI1MzM0Mjc3NTU0LzM0MzMxMjIyMzAzNDMzMTIyMTE0NDM3Njc1
-MzM3MzU3MjQzNjQ1NjQ2NTQyNDk0NTEyODc2NjM0Nzg6Njg3NjY4NTg5NDczMzM0
-Njk2NjI4NzU1Nzc2NjU0NDU0ODc1NTM3NTY0NDU2Nzc4NTY0NzQ1NTMwNTU1Mzo3
-NjU2OTM4ODU1NjU1NzY1NTg3Nzc1MzIzNDUxMTExNzU1MzQzMjQ1NDU1NDYzNjY0
-OTQ1NTU1MzEyMzc1MzQyMTIzMzQ2MTAyNTIxMzM1MTI1NTIzNTU2Njc0MjQ1NTU0
-MjQ3NTc2NTc2NzQ1NzQyMjI1MzM1Mzk0MTU1MzEzNTM0NzM0NTM0NDQ2NTYyMzQ1
-NDM0MzYzMzU0MzU0MjMzNDI0NjY2NDg3MzY0OTQzMzM0MzI0NDU0NDUxNDMyNTU1
-ODc3Nzc2NzU1NDUxMzMyMzU2NTQ2NTM1NTU2NTU0NTIzNTU3NjY2OjU3NjczNTcz
-MzA1MzQ1NTk3MzQzMzU2OzczMzQ2NTIzNDU0MTE0NDY0NjIzNTU2NDQzNDUyMzUz
-MzUxNDAvODQ1NjUzNjUzNzg1MTQ4NDI0NDQ3NjQ2NzY3Njg3NjY1NjQ0ODU1MjQ2
-Nzk3OTk0NzkzMzQzMjIyNjY0NTY2ODg7NTY1Njc3MzcyNDU1MzY2NzM1NjY3Nzg2
-Nzk2NTU1NDQzMTI1ODg1MzIzNDM1NDU1NDQyMjMzMTAzNDY0MjE0MjUyNDQ2NTU1
-NDU4Ojc1MzY6ODY2NTQ1NzY2NTU3ODU0NDg3ODY2ODQzNTY2Mzc4NjczNDg5NzQy
-NzU1NjMyMzU0NTIyMDEyNTIzMzMyNDY3Njc4ODQ3OTY1NDI2NzYzMzY1NjUzNDY1
-NzY1MjU7Njo4Nzo3NjY1MzY5NzU1NTU8OTc2Njg4NDY0NjY3ODc3Nzs5Ojo6OTQ0
-MTE1Njg4NTY3OD01NDMzMzQ3Nzk1MjQ1Nzk4MzQ3Nzk4Ozg3Njg2Njc4OTM5OTo6
-ODg4Ojs3Njk5OTg3ODg5NjM2ODg2NTg0NTY3NTo4NTk2NTY4NDc1NTY2MzMzODg5
-PDk5NjY3NDk4Ojg3ODk4NDU2OTs3Nzk2NzU6Nzc2Nzg7Ozg3ODg0Njc1NjY5Ozw5
-Nzc2Ojc1NTQ3Nzc5NjY6OTg3Nzk2Nzk1NDc2NjY3OTc3ODk4Njk7Ojk3Ozg4Nzc4
-ODo8Ojo7Ozg8Ojo9Pzk3Ojo6ODY4QD09PT86Ojo4PUE4Ojs8QD09QTw8Pz09PkE7
-PDg9Oj09PDo8Pjs6PDs8PTw8QkA9PkA7PkJCQD04Ojw+Pjo5Ozk6QDw7Pj1MQT1B
-PTw9QUU9RUZDQUNPm8HL1Nnf4eTl5+joP0JFRkRBPDpBPjw8QUE9Pj49PT47Ozw8
-Oj05Ojo3Ozc2Nzs6PT0/Ozc7Oj06OztCQzs4ODY1NTg5OTs2MzIuMzU0NjQzMzIz
-NzIxMzY1NTQzNDU1NzUxNTk3NTU3NzQzNTU3NzQ2MjQ1MzM1NTY3NjQ3OjQ0NTEy
-NDMzMzQ2NDQ1NDM1MzIzNDMzNzQ0MzExMzU1NTI0NjUzMzI1NDU1NDUyMjQzMzI0
-MzI0MzMyMDExLzA0MjAxNDM5ODk1ODUxMzMxMjI0MjY0NTIyNjM0MTY3NzQ1NDQ3
-NDQ0MTE4Ojg3NzY5Njg3ODM1NTI1MzU1NTM0NDM2MzU0NDU0NTc2NjQ1NjM0NTY3
-NDQ1OTQzNzg3NjY1Njk0NTc0NDQ3ODQ3NTU0NTMyNzc0NjQzNjg2MjAzNjc2NTM1
-MzIxNDQ3Nz02MjI0NzU3Nzw2MzM0MzMwNDU3OTMxMzMzNjMzMjI0NDQ1NTMxNDQ2
-NDAxNzU1MjEyMzUzNDMzNjU0NDY1NjQ1NDc3NzY1MjIzNTY1NDY0ODY2NDMxMTQ1
-MjMzMjI2NTQvMTIzMzIxMzM1MjUzMTc1MzI0Nzc1MzM0MzQzMTMyNTQ3Szk3NjQ3
-NjUyMjc3Mi8uLzIyMzIzNTczNTQ0NDU1NzY1NTU1NTQzNDUxMzAyNDQ0NzYzMTIy
-MzU2NzY0NDc0MTMzNzkzMzI2MzU0MjM0NTQ0NDMzNDc2MzA0MDE1ODMyNTQ2OTc0
-MjM1MTM0MzQ1NDY3NjQ2MzY4NTUyMzE1NDIzNjMxNTM0NDM0NDMyNDI2MzU1NjMx
-MzU1ODY1NjY3NTc5NzY5NTM2NDE1NDI1NjY2NTg1NDYyMjU3NjEyNDIzNDc2Njo3
-NzMyMjU0MTcyMTU3NTU3MzU3NTQzMzM1MjQzMzQyMzU1NzQ0NTQ2NDM0NDM0NTMy
-NjYxMDIwNTc1NzM0NDM0OTk2NjQ1NTUzNjc2ODY1NTY3Nzc2NTU2NDM0NjM1ODg4
-ODM2NjU2NTUyMzY2NTg0MzU0NzU1NjY0NTY3Njc0NTI0MzMzNDU1NjQzNDMxMjQ1
-Njc2NDM5NjUxNDc4Ojg1Mzg1MzMyNDo6ODg0OTc2Nzg6Ojg2NDY0Nzo1NDQ0Nzw7
-Nzg5NzY5OTQ1ODQ0NDI0Njc2NTQ1NDI1NDc5Ozk4NzY0NTExMzY1NzU2NDUzNTM1
-OTM1NzY1RUA4NjU3NTo5OjY1NDw2OTY4Ojc5ODU1NTY4NTY3NzU0NzU5NTY3MzQ2
-NTQ4NT88NzY4OjY2ODY2PDQ1NTY2NTU4NzQ5OTk7OTc6NzY4Oz0/ODg2NTQ1ODg8
-Pjw8Ojc2NzY0NjQ3OTw+NTM3ODY4ODc1Ozk4Ojc4Ojg5Ozc1NTc5Ozk6Ojw5PDU3
-Nzc4OTo4Njg4ODc4OTg4NTU2ODY4OTs5Ozo5OTc1Nzs8OTc5PEA8PDs6PDw5Nzs8
-OTs6Ozk5Ojw7PT1APDg5Oj05OTs6Pzw+QDxCOzw5Ozs7Ojs7PDo6Oj4/Ojc5Pj8/
-PT4/PTs+QEM9Ojg3OTw9PUA+PUE8QEE/Rk8+Pjw9QEFBP0ydwMvU2t7i5OXn6OhB
-Q0JHREA8Oz0+P0A+QURBQkBAPjw7PTw5OTc6Ozk7Oz86Ojo6Ozs6Nzg/Ojc5ODk8
-PDk6NjU3NTU7Pzc0MzQ0ODc3NTY3NDQ2NzQ1NDU1NTM0MTMzNTUzNTY2Nzc0NDM7
-MzY3NDMzNTU0MjM1NTEyNzY1OTQ1MjIxNDMzMzM3NzU6NDEyNTQ1NjQ0Nzc2NzY2
-NDQ0Njg2MTc2Nzc0ODU4NTY0MzU1MjQzNzYyNDM1MjE1ODI1NzM1Njc5NjY0NDUz
-NTY0NDQzNTQ2MjQyNDQ0NDU2NTg2NDY5NzU3NjY5NDMzMjY1NTYzNDg2NjY1NTQz
-NDI2MzMzNDY2NTc2ODY1NDIzNT06NTc0NDc4NjU0NDEwNTQ0MjIxMzQ0Njg4NjY1
-Njg0Nzk0NjYzNjU1MzI0Pzc0NDc1NDYwMzQ4Nzs1ODUzNjU0NTU4NTk3NDY1MjQ2
-NjU3MzU0MzMyNjEyMzEzNjQ2OjY0MzM0NDU0MjY0MzQzMzQyNjMzMjEyMzQ1NTc2
-Njg1NDYzMjI2MjI0NDY7Nzc0NDIyMDI1MTMyNDMyMDM1NTIyMjMwMjY1NjY3NzMx
-OTk6NzU0NDM2MzY0MjIzM0pjVDk0NDEzNDU4ODY1MjMzMzExMzExNDMzNTQ0MzY1
-NTQ0NzQ1NjMzNzIzNTQ0Njc2NDc3MzMxNzYzNDIxMzIzMzU2Nzc8OjQyNDY1MzU3
-MjEzODY4NTU1NDQ1NDIzNzcyMzU1ODk3MjIwNDc2OTo2NDU2NTY0NDIyMTc0MzIz
-MjIxMjEyNDU2NTUyNDU0NTg1ODc0NzY0PTg2NjMxNTQ0NTY3Njc3NjQ1NjM2NjY1
-NjQzNjY0Njk5OTs4NzU0NTc2NTU1Nzc2NTQ3OTQ3NTU2NDEzNzY0MTM2NTk2NDM1
-NTQ2MzAxMjM0MjQ0NTc1NTQ1NzY0Nzc1NTUyMzQ1NDI0MzE1MjMyNDUyNTU1Njg3
-NjYyMTY4NTc1NjY1NTI0NDY3NjQ1OTc1ODMyMzQ3OTIyMjEzNDI0NDMzMzQ3NjU0
-MjA2NjUxMTQyLzAyNTc1NzU1QTU0NDU3NzU0NDQzNDU0NTg4ODYzNjQ1NjY3Nzo4
-NTc1NzMzNDY0NTQ2NDQ0NTY1NDU1Ojs7NjQ2NzU3OTk0NjY2NTk8NjY3MzY5OTc4
-Njc2NzQ1Njg3NzQ0NTc4NjY0Njg4NjY1MjY4Nz07OjY3ODY5OTg5OTk7NjY6NzQ2
-OjY2ODU2Njg7ODU1NTg4ODY8OjY2NzU0NTU5ODk3Nzg5ODc2Nzk3NzY4NzY4ODc2
-NTk4NzY1Nj5CP0E9PTk2NTg5ODk6Njk5Ojk5Nzk5Njk5Nzk3NjQ1NzU2ODk6NzY7
-PDY1Mzc3Nzg5PDo6OTg6Ozs4OTs3Njk5OTc4Ozg6Nzc2ODg4Njs0NjcyNjY6PDo3
-Ozw4OTc7Ojs8Nzo7Pz8+Ozk5Nzg6PTg5NzY2OTg6OD09Pj89QDs9OTg4OTg7Ojs7
-PTs7PT07Oz08Ojs6OTg7PDs7ODw8PDs5Ojs7Pjs8Ozw7Oz05OTo6PDk7PDw8PDw+
-VVxJQj08QUNFTpm/y9Ta3uHj5ufp6D09RERFQjo7PT9CREE9QEVBPD88QD04Oz07
-Oz09Oj09Ozo6Pjs4Nzc5ODc5Ojk7PDk5OTs6ODY1MjI1NjQ0MzY3NDU0NDY5Njo4
-Njc2NTU3NzQ1MzQ1NDk4Njc1NDY2NjU0OTQ0NDUyMjQ0MTQ1ODQ0NTg3ODY1NTY0
-MjMzNTU2NTY0MjQzNDUzMzQ2NjQzNTQ1NTY2MjM1NzY2NjQ2NzIyNzY0NDIzMjQ3
-NzUzMzEzMTEzMjM0NTUzNDQ2NjQ0Njc2ODQ0NTQ0NTQzMjEzNDQ0NDUyNzY3NjYz
-NzU4ODc5NzU1NTY1MTU1NTc1NDM1NzY4NzU1NDU1Njg3NjY1NjUzNDU0PDc3NDY1
-ODg4NTQyMjE0NDMuLjM0MzQ3NjU2Njc3NjM3NjU3OTY5OTczNDM0NDY6NjQ2OTc0
-MzQ1ODY1Njg0MzE0NjY4NDUyMzQ1Nzc1NTYzMjQ1OTMzMjIzNjY0NTU4NTY0NjQz
-MTE0MjMyMzUzNDExMzIzMzQ4NDUyNTQyNTQzOzkwNDQ1NTY2NjY1NDM0MzIvMjIy
-MTIyMzMwMTM0NDIvMTM0NDIzNDIyMzQ0NjM1NjMzMjMyNjczNTUxMTozMTY3NzQ1
-NDU1Nzc2NDc4NTIwMjExMjAxMzEyNTY4NTMzMzUzMjIyMTI0NTc1NTU2NDI2NjU2
-NDQ4MzMzNjY0MzQ2MzMzMjUyNDEyMTM4NDU0NTQyMjIzMzAzMjIzMzMzMjQ1MDMz
-MjU2Nzg2NDI0MzUzNTMxMjIzMzI0NTEyMjMzNDQ0ODg4OTk0ODU2NTQ2MzQ0Mzc+
-NTg2ODQ2NjQ0MTMzNjQ0MzM0Njc2NTIzMzE2NjQ1NTU5NzU4NzY0MzM1NDM2NzYz
-MjI2NDY2NjIxMDI0NjY0MzY4ODY1NDY2MzQzMzIzMjMxNDU2NzU0NjczMjM1NDY0
-NjM2NTQ1NjQ0NDAxMjMzMjQ0NjYzODY4NTI7OTk4Njc3ODUzNTU1MzQ2Nzc0NTY0
-MjQ1NDQ1NDQ1NTQzMzIzNDQ1NDMyMzIzMjY1MzUzMzc0NTQyMzI1NjIzODMyNDIz
-Nzc4NzY0NjM1NDMyNTc7Nzk3NTY2Njg3Nzk2NTQ0OEI7ODY6NTU0Mzc4NjY3OTo2
-Njc4NjY3Pjo4NjQ2ODk3Njc3Nzg3ODo4NTU1NDU1NTk2NTI1OTc3NjU2Njg2ODY3
-NTQ1Nzg2Nzc4ODY6Ozk8OTg4Ozo6Ojc0NTc4NjQ0NTc1NTU2Nzc1ODk7OTo6NzYz
-Njk5Ozo5Njg5NzU1Nzk3Nzs3MzU4NzY4NjY1MzYzNTEzMzE1Njg1Nzc3ODc4OTc2
-PDk5PDg2Nzg3NzY2OTk5Njc8PDk6OTk2NTQ4NzY1ODo4OTk3Nzk6ODs6ODc5NzY2
-NTMyMjQ0Nzo3Nzc3Njc3Nzk2Nzc2OTk6Ojk4OTk5OTs6NzY4Ozs8Ozs7Ojs8PDk3
-Nzg5OTk5PTw8Uz85PDw6ODo+QTs7Pj09PDg7Ozs9OTg3PkE+Pzs9PkBBPjo7Ozw6
-Nzo+Pjw5Ojo6OTw+PDs4Oj46Oj0+Q0lNc2tYQkA8PkVMl8HL1Nne4eTl5+joQEZD
-Q0A+RENAQkVEQz4/QT47QD05PDs5Pjw6OD03PDo5OTw/QTs7Nzc2Nzs9ODg5RzY1
-MzU1Njc2NzY3OTg5ODQzMzU5NDQyMTMzMzQ0NTU0NjQ0NjYzNzQ4NzY4ODg5NjY1
-MTMzNTU1NzgxMjY2NTMwNTM1NTQ2MjE0NDQ1MjU0NjUzMjQ1NTUxNTIzMDEyMzM2
-NDQ1NDc3NDIzNDMyNDc1NTQ1NjQ0Nzc5Ojc1My8xNDQ1NDU3NDQ2NjQzNTc1NjU1
-MjM3NTQzNTI1NDU0NDM2NDg2Nzk0NTY3NjUzNTs2ODs5ODc0NDIxMjY2NTc3NjYx
-MjQ3NjMzNDY3NTU2ODU1NTg0NDc2NjY0Njg0NTY1NDIwLispMTUzNjUzNDM0RjQ0
-NDQ0NzQ3NDQ3MjQ3Nzc2NjU1OTgzNzcxMjM0NTU1NTU3OTY2NjY4MjEzMzQ1MjQy
-NDg5NzI0MzU0NDQ2NjU0NDM1NzU3OTQ1MzMyMzM1MzI0NDEwNDg9NzUxMTMzNDYy
-MjY1NjdANTQ2Njg2NTQ6NjEzMTQ1MzM1NjI0NDY0NjMyNTQyMDEzNDM1MjMzMTMx
-NTU0NDAwMDExMjQxMzIxMzU2NTM1NDUzMTM2ODc4NjU1MDIyMy8uMDAxMzMzMzMz
-NjQ1MjMzNDEzMTUzMjM2NTc2NzY1NzY2MzY2NDUyNjY1NDIyMTA1NDIyMTIzNTEx
-MzIwLjAzMTAwMjYyMjU1MTAxMjY1MjMzOTU3NjQ2NzMzNDMzMzIyNDIzNDM1NzU6
-NjQzNDc3Nzc3OTU0MjM0NzU3ODU1Nzk5NTQ1NjU1NjU4ODUzMTEyMzY1NDU1Njk1
-Nzc0NTc3NDY1NzU2NjQ2NjMyMzQ2NTY4NjQ0NjU1NjY1Nzc0NjY2NDk4Nzc2NDc3
-NzQzMzY1NjM0NjY1NzU1NTE1MzI4NDU2NjU1NDc1NTQ0NTU2NzQzNTMzNTc3NzY3
-NjU3NDMyMjQzNDMzNTU1NTU1NzU1NTQyMzc3NzU4Nzg2NzQ0NzQ1MzY2NDM2NDE0
-MTM3NDQ1NjM0NDUyMDIzMjMyMzczMzQyMjAxNDQ2NjQ1NjU1NTc8PTs4NTM1ODY3
-NzU0NTU2Pzs4OTg1NDY2NjYzNzc3NTU2NjU2MzY3Njg3Nzg3NTU1Nzk4Nzg6Ojo5
-ODY2NDc4ODg2NDY0MzM1Nzg7OzU3Njg2OTk0OTk7Ojc3Nzk5NDY1NDY3NzY3NTQ1
-NTQ3NzY2OTc2NzU0Njc2ODc4Nzk3OTc0NTk5Ojk5NzY4NDQ1Njc2ODc4NDQ2OTg2
-NzY1ODY1NDM3OTk3Nzg4ODg5Nzo6Oz04PD04ODk6OTM1NjU5PDs6Nzc4Nzk5Njc3
-Nzs8ODg2PTg5Ozs/PDk6OTg3NzU1MzM0MzM2ODY7Nzg6OTg1ODc1OTo4NTg2Nzk3
-ODo6OTg6Ozo6OTo5OTg3OTk8OTU4Njc5Ojg6Ojo2ODlAOTk6Ozo7PDw9PT08Pzw9
-Pjw7PDw2ODs6PD08Ozk8RD07PT07Oz0/PTo+QD46Ojs7PD06Ojc4PD4/PTk7OUdS
-TFk+PTo9Q0qVwMzV2t/h5Ofn6epHSEFCQkhGREBBPT4+PD07QEA+OTIyODs6QDtB
-QEM+PEE7O0A9PTw7PDU2Nzw9Nzk5NjU3ODk2Njc4ODM4OjU1NzY1NTQ1MjMxMDM0
-Mzc2NTg1NDU0NTg2NDQ2NzU2NDc1MzY0MjI0NDU6ODU1Njc5NDY1MzI2NzQzNDQ0
-MjY0MzM0MzUzNjgzNDIvMTMzMzM0NDU1NjYyMjE2NTM2NTMzNDMxMTIyMjM0NzU1
-NzY0MzE0MzUyMjU0MjM0NDYzNDY3NjQzMTUzMjQ1NTg3MjM1Njc5OTg5NjUzNTk3
-MTQ2NTM1NjM1MzI2NTM0NTc2NTc3ODc0NTY0MTIyNTg4ODU1NzY1NjU3NzYzNDU0
-MzQ1Njc0OTYzMjU0Nzk0NTQzMzQ2NzY1NDQxNDg3NTc+NDQ1MzU1NTc1NDY2Njc5
-NTYyMjg2RTk1OTg0MzI0NDE0NTUyMzM4NTQ1NTMxNDM0NDQ4NTM0NTY1NDo2NjMy
-NTU0NTg1MzIxNzUzMzU2NzY2NDMzMzU8NTQyMTQzMjEwMjUyMzs2NzQ0MzIzMzM0
-NDQ2NjUxMjI0MS8yNDYyNTM3MjM2NDY1NTQ2NTU1MS4zMjQ0NTU3Nzs2NDExMjM2
-NDQ0MjQ1MzQ2MzMyMzQyNDU1NjM2NTM1NTU1MjU1NTU0NDM0MjI2Nzc5NTQ1NjY0
-NTQyMzEyMjM1MTE0My8xNDMzNDQ0NTY2MzIyMTI0NTEyNDU0MzE0MzEyMzU0MDQy
-NzUyMTIyNDIwMzMyMTQ1MjIzODU1NTQ0NTMyNDk3MzU3NzE1ODs4NDY0NTQ1Nzk2
-Nzo2NzM1NDQ0Nzc2NjM2NjY4NzY0NTc2NjQ0ODg5OTU2ODY0NDMzMzIzNDQ1OjY0
-MjU0NDc2NTk1OTUzNTY0Nzc3MzUzMzIxNTY0Njk1NDAzNjg3NjQzNDIzMzQzMzMy
-NDQ2Njc5OTY2NDMyMTExMzQ4Nzc4Nzg4NjM1NTUxMTEyMTIzNTU1NDUzNjU2NTc1
-NDc4NTQxMzQ0NjI0NDQ1MzU0ODQxMjIzNjM2NTQ2NDU0NDY4NTQ0Mzc1NDM0NTMz
-NDQ1ODk2NjQyNTc1NTc0Njg1NzM0NDc2NTY1NjU2Ojc2OTo3Njc1ODk3Ozk3NTEy
-Mzc8OTg4ODk2MjQ1NDU1NzQ2OzlBQzU4ODM2PTg5Nzg1NDQ1NDU3NzU1Nzc6NDc5
-NzY1NTk6Ojc5ODQ4NDQ3NDY3Njc0NTU3NzY3NjY3NTY2Njc1NjU2NzY3Njc6NzU1
-Nzo5ODk3Nzc4NjY2Njc1NDY2NDc2Nzc2NDU2Njc5Nzc6Ojc4OD48NjU5Nzc5Ozc5
-ODo5NzdEOjo5ODc5PD09OTg5OjU3OTg8OTg5Ojs4Oj45Ozw5Ojg3NzY2NzY0NzQ0
-NjU3Nzk5OTk2Njo6ODo5ODg9PDtBPTk5OTg5ODo8PD08PDg3ODY4OzY1ODg5ODo7
-ODw8PDo7Ozg4Nzc5OTg4OTg9OTk5Ojs7PDk4ODg7Pj05Ozs8PDs6Pzw7Ozk7PkBB
-PT1APz09PEA+PD0+Pj08PD04OTg7Ojo9QEE9QD1ES5HAzNTa3+Hk5efp6EY/R0NE
-S0lGRE0+Pz9AOzw+QDo2NTc6PDo6Ozw/Qj48PEA6PT86ODs7OTw7ODo7Ojg4OTg4
-Ozw5OTc3ODI0MzQ4NzQ1NTU4NzQzNDU0NTU1OTo5NDM2OTc1NDMzNTY3NjU2OTc2
-Nzc5ODU2NzU3OTc3NTEzMTQ2MzM1NTEzMTE0NDQzMzMyNTIwMjIvMjAyMzU2NTc2
-NTQ0MjMzNjg3MzIzNDU0MzEzNzc1NDQ0NTU5ODI0ODYzMzk2NDg1NDI0MjMyNTYz
-MjU6ODQzNjQ0MzQ3OTo0NDY1NjU2MzU1MzI0MzQ3MzI0NTk5NTEwMzc1NjQ1NTY4
-NjU1NDM1NjU3OjcxMzUzMjU2NjQ0NzUyMTU1NDU2NTQ4PDc1NzY1NjUyLzY1NTAy
-MTM7Qjo4MjY2NTc5ODc0NzU4Nzs3NjU2MjExNTI4PDk2NjY1MjIzNDI5NzIyMzM1
-NTY1MjU0MjQyMzE1Nzc5NDY2NjQ0NTY1NDg2NDM0NTQ1MTU2NjY3MzI2NTMyMzQ1
-MzQzNDY1MzQyMzQzNjM2NDU2MzM0NjQ0NTY3MzIxM0IxNDQyMzU1NTY0NTQyNjQ1
-NTEzNjQyNDQzMzI2MzEzNzgzMjEyNzY4NzU0MzMzNDY1MzU0MDI2NTY1NjQ0MzI0
-NTY1NDM1NDQ2Njk2MzM1NDY1MzMzMzQzNDQzNDUzMjQ0NTQyMTM0MTM1NC8yMDAy
-MDMwLi4xNDMxNDIwMjM1NTU1NDIzMDUwMTUzMTQ0NzUyNDQ1NTc1NDM1MjAxNjk1
-NjQzMjY2NDUzNjY2NjU2ODk3ODY2NTQ1NTQ0MzU1NTY1Njc3NDUzNTo5NjQ1NTc0
-NjQ1NTg4NTUzMTM0NDs6Nzo1NDY2OTg0NDIzMTU1NDU2NzQ1OzY0MzYyNDAyNDY1
-MzQ1OTg2ODQ1OTk4NTUzMTY2MTM3Njk3OTQzOjQ1NTc4MjAyMzU3NjIzNDQ2OTY5
-NzMyNjQ1MzI1NTQyMDMyMzM1MzM1NDYyMzM1MjIxNDU2NjU1NjY3NjQzMTM0NTU0
-MzU1MjIzMzQ0NjQ0MzMwMjQzMjMzMzQ2Nzg2MTQ1NTQ1Mzo1NDIxNDU6ODg2MzQ3
-NzU0NDU2OTg4Njk3OTY5OTc3MzQ2NTQ4NjY4OTo5NzU1MzU4NzY0ODs5OTw8OTo7
-ODo3ODc4OTc4NjY2NzY4OTQ0NTc3ODY2NjUzNzY1NDQ3Nzg5ODg4NTQ1Njc3ODU2
-Ojs7OTc4Njc2ODg3NTY1NTY1ODw4OTs4Nzc2Njk4NTQ2NTU4Ojg3ODU1NjY3NTY5
-OTU4Nzg3ODo8OTk7OTc5OTo4Njc2NTc1ODk7OTo9PDo9PDo6OTw6OTc5OTs7PTo5
-ODg5Ojg1Njw3OTk3OTo4NjU4NTY0NDc3ODo8Njc1NTY2NjQ0Njs6OkE8PDo3Nzc5
-ODw7Ojw6Oj04PD48ODU4Nzg2NDY1NTc6Ozs5ODo5Pzk3OTg5PDg5Nzo8OTo7Ozo6
-Pjk7ODk5Nzo8P0A8PDo6PDs/Pj46Oj09Oz09Pj89Ozw+PkNDPz09Ozo6ODs9PT88
-REI/PUJJlsDM1dne4eTn5+joREJDQ0M/Q0JBQUFAQD4/QEA7Pjk2Ojs8PDo7PzpA
-PTs3NjU3OTw8Njo4Ozo5Ojk5OTg2NzY3OTg2PTk3NzM1NzQ2NjQ1MjU4NzU3NDg4
-NTY4Njg6Nzc0Njc3NTc2NjU0Njk2NzY2NDY6NzQzMzE0NTIxNDQzMTE4NjM0NTYz
-NTU1NDU0LjEzNDU0NTI3NTMyMjY0NTk6ODc4NjU0MzY0MzMzMzIzMzIzMjMyNDM1
-NjQyMjU0NTMyMzc1NDY0NTU0Mzk0NTQ1NDY2NDc0NTY3NjY1NzQ0NTY2NTQ2NTMz
-NjQ1NzQxNDg3NzQ1MjI0MjQzNDY1MzQ2NTc2Njc5OTY4ODs0NDQ0NTI0NzZFOTU1
-NDM0ODMyNDU2NjQ0MzY1OTY2NDMyMzI0NTU2NjY1MTEwNTg0NTU1NDY6ODU0NDUz
-NDI1MTQ1MTE0NTY1NTIyNDI2MzQ0MzQxMTM0MDIxMjQyNjM0NDI0NjQzNTY0MzMy
-MzY2NzQ1NTU1NTc1NTYzMDI1MzM0MzQyMjU4NzUyMDE0MzQzMzMxMjM2NTY2MjU2
-MzEzMzMzMjc2MzAzMzQ0NDYzMjExMjM1MzY1NzMzNDY0MzIzMjIyNTM3NTQ2NjUy
-MzIyNDU1NjQ2NjI3ODc7NzUxMjMzMzQ1MjQ1NTY3NzY2OzgzNTY1NDU2NzQ2NjY0
-ODQ0NzY2Njg0MzMzMjMzMzQzNjk0MDI0NjMwMzQxMjUzMzIyMzM3NDUzNDY3ODM1
-MzAxMzIyMjUwMTEzNTY2NzYwMTExMjUzNjU0MzM1NDIwMjMzNTU2OTg6NzU0NzQy
-LjI1NDY1MzY4Mzg5Nzo5MzQ3ODY2NTg2NjU2NDU0NTM2NDI2VDo2NDM0NDQ3NTQ0
-MzI1ODY2ODY4NzpDRTc1NDQzMjEzNzIyNDg2NTc2NDM1ODUyMjQzMzU0NDQ1NTQ3
-NTMzMzU1NDUyMTU4NjU0NTU3NTc0NTM1NTQzNTIzNzk1NDI0MTY5NjMzMzc1NDQy
-MzQ2NTU2NTMyMjQ1NjQ1MzIyMzMzNDU0NTM0MDAzMzIxNTYzMjQ3NjMzMzY1Njo4
-NDEzNjU5NzdFSzUzNjU2MjQ1NjQvMTMxNDQ2NTY0NTY1NTk3ODY4MzM1MjQ3NzU2
-NDc4Nzg3Ozk2NzY1NTI1Nzc5Ozs5Njk4NjM2ODU4Nzg4OTY3OTY4Ojc1NDg5Nzk4
-NTU3NzY1ODc3OT08ODc4ODY0NDg5ODc2Njk3Nzg4NjczNzY1Nzc2Njc2ODY3Ojc1
-Nzg4Ojs5ODk5Njg6Nzc2Ojc2MzI1NTg3OTs3NjM6ODc5OTg3ODs6Qjk7ODg3NzU0
-NTc3NjY2Oj07Ojs9ODo6Ojg+Ojk3Nzk4NTc3Ozk5Ojg7Ojg2Ojg3NzY4Ojo1OTk0
-NjQ3NzUzNjo4OTk7Nzg4Ojg3NjY4Ojo7Ojg5OTk6ODY3PTg4Ojg4Njk/Ojg7Ojk6
-Pj04Nzc7PTs6OTo8PDw7Ojw6PDw8Pz07PDs5O0A9PDw8OD48PDo7PD1CQjs6PT08
-Oj09PT08PEFAPEJFQ0I9O0A/Pj1BP0BDREVBPkaUv8vT2t7i5Obn6elCRkRERkVC
-QkA6P0E+Qjw4Ozs7Ozc6OTc7Ojk6OjxAOTg4NTc6Ozc8PDs3Ozs7OzU4OTc2NzI9
-Ozg5NDU4NjQ3NzY2Nzs6Njc2ODY1NDM0NDU3ODU0MzU0NTU3NTQ1MTU3NjQ3ODI0
-NTU0MzI3MzE1OTY1MC8wMjM0MjY0LzMzMzU0NTQ3MjIyNDUyNTY2NDc0MzYzMzc1
-NTc3NjUzMjQzMjIyMzU2NTMyMzYxMzIvMzU3NTQyMDU2NzQ2NjY3MzUzNzQ0NTQ0
-NTQyMjg0NDQ0NTY1ODUxNDQ1MzU5ODk2OTY2MDM3OTQ1NTQ4NzU2MzI2Nzo1NDM1
-NTU3OTc4NTY5ODUzNDc1NDU0NjQ1NTQxMjIvMzU1NDUzMzMzNDQ0NjUzMjIzNTU0
-NTc2NTMyNjUyMTU1MjMzNzQ1NTMzMzMzNjY0NDMzMjY2NjYxMzU1Njc3NjM2Nzkz
-MTEzMjM2MzQyMzQ1Mzc2NjU0ODU0NDY0Njk0NjQzMzE3NTc4NTQyMzEzNDMzMzQ3
-Nzg1Njc0NDQ1MjQ1MzIzMzI0MzMxMjcwNDI0NTU0NDUzOjU1NzY3NDM0NDMxMjU1
-ODY2NDQyMjI0MzIzNjU1NjQzNDU0NTQ0NTUzNTY1NjQ1Nzc4OUY5NjkzMzI0MjQ2
-MzMzNjQ0MjI0NTU1NDU3Nzc4NTY1MzM1NTU4NTc1Njg2Ozc1NDQ0NjU1OTY0MzIz
-MzIyNzU1Mjc0MzEyNTQ3NTQyMzY0NTU0MDQyMDEyMTMzNTM1NDc2OTc0NzQyNDMz
-NzY3NTQ1NTc4NDM1OjU2Nzg2MzU0NTM3NDMzNTI4Nzc3NjY0MzU0NTM3NDQ0MjU2
-Nzk4MzU2NDY0NTI6NTg0MzU2MjU1NTQzMTEyMzA1Nzg3NDM0NTQ1NDU0NDMzODU0
-NTU0MzM3NjY1NTY1Nzc0MTIzNDM1NTMzNzI0NDQzMzE1NjMxNDk5NjY2MzY0NTU1
-NTY1Njc0NTQyMjI1MzU0MzEyMjQ0NDMyMzM0NDQxMjQ2NC8yNTU1NDI0MzMyMjI0
-NTY3NDQ0MjM0MzMzNDc1NDQyPTgzOTc1Njc6Nzc0NT0/NzMzNjY1MzM5Ojg0NDc5
-ODg3ODMzNDM2NjU1NDM0MjQ1MjQ1NTg0NTc0NjQ4ODU4NjU1NjY3ODs7ODU3Nzg1
-NDY2Nzc2ODg2NTU4Njc2OTg5NjY2Nzg6Njc4NTQ2Njk7Nzg7ODc3Njg9Nzk6NzY2
-Njk1MzY3ODo4Nzc3NTU3NzY3OTo5OTU2NjY0Mzc6Ozg3ODY2ODU4OTY0NTc6Ozo4
-NjU1OTc3Nzg6Nzc4NjY4OTs6Ozk3NjM1ODk4NTo7OD06Ozg3Njg6Njg5OTk4Njg5
-OTk5OTo4Ojo3ODY4Ojs4NTU2OTo5Oj43NDU0OTQ1NTY4ODg7OTg2NzU2ODo5ODc4
-Ojo6ODk6ODY5Pzk3Njc3MzU3Ozo6Pzk4Ozs3OTk5Oj07Ojs+ODs+ODg+PDw8Ozc4
-Pj47Pj05OTc6PTs6Oj08QEA/QD46Pjs7Ojo7Ozo9PT09PUFCQDtBPkNBRURISEZG
-Q0NGR42/y9Ta3uHk5+bo6UFEQkNFRkBBQURFQz5EQDo2PT88OTo7PTg7QT08PUE6
-Ozw4Nzk4ODc4OTw5Ojk3NjU3ODo1OTc9ODk2ODU2NjQ1NjQ2Njc4OD05OTY1MjYz
-NDUzNjY2NjUzOzczMDE2MzUzNTY1NTU1MjU0MzQ1NDU2NjU1MjQyMTM1NDQ1MjI0
-Nzc2NzczMTI0NTc3NzU3ODY0NTczNDU2Njg3NTQ2NjMxNTYzMjE0NDQvMTAxMzUx
-MzI0MzMzMzczNDM4NjQ0NTI0NzUyMTU4MzMzMjEwMjMzNTU5NDMzNTQ3Nzc2Njk2
-NDU3NDg8MzAyNTEyNDY3OTU5OTY1Mj0+Njc4NTY1NDU1NzQzNDIzMjc3ODc4Ojk3
-NTU4ODg3NDUyOjQ0Njk2NDQ0MjIyNDQ0NTU1NTQ0Mzc0MzIxMzY3NjUzNjY0NDMx
-MzY0NTY6NDQ2NzM1NTUzMzMyNTM1NTIyMDMyMTEyMjI1MzU0NDczNjYzNjQ0NTQy
-MzI0MjE0NTozMzM3NzUyMzUzNTQyNDQ1NzY1Nzg4NjE1MjU1NTU0NTU0NTQzMzU3
-NjQ1NTI2NjY0MjQ0MzUzMDM1MzIzLzM0NzY3MjIyNTwzNTQzMjIwMzI0NjIwNDMz
-NDU1MzQ1NDY4NzU2Njc2MTM0MzQyMzQ1NTc1MzY1MzM0NjY2MjY4Nzg3NTQ1NjUz
-MzMzNTU3OTY1NTQ1NTIzMjIwNjcyMTEyNjY1NTQ3MzM1NDMyMzAyNDU0NDQ1ODU5
-NTE0MTEzMzM0NjEyNTY1Njg2MzM0NTQ1NTc0NDU5NjQ2NTM3NjQ3NTY4ODY4Oj83
-NTIzODY6Ojc7NzM1Nzk0NjU3NDQ0NjU0NjU0NDUzNDQ0NTEwMzUxNDQ0NjQ5NDM2
-NTc1MjM0NTU3NjU0MzQ1NjMyMTI1Njc0NDczMTQ0Nzc3Nzc4MzQzMzMzNjc3NzU2
-NjUzMjI1NTMzMjY1NzYyMjQ3OTg1NTU6Nzg4NDQzNTQzMzI0MjI0NDMzMTQ2NjAx
-MzQ2Njc3NTc1MjExMjMzNTI0MzU2MzIwMDQxMzM2NjUyNDU0NjQ1NTY1NjY2NDM1
-NTU0MzM2OkJFPjg1NTY3NzY6NzY0ODc1NzU1NDczNTc3MTQ0MzM1NzIzNTMzNDY4
-NTY1MjQzNDY1NjY2NzY0NTU2NjY3OTo4ODU1NjQ1NjQ1NDU2NDg6Ojc4NTc2NDc1
-OEU4NjM3ODk7Ojc3NjY3Njc8OTk5ODg3Nzg4NzM1Ojg4NzY3NTc3Ojo4NzY4OTk6
-PTo8ODg3Nzg4MzE1Njk6Ozs8Ojs7PDo5Ozo7OTk3Nzk3NzU2Njc4ODk6NjY2NjY4
-NjY1ODg3NTU4Ojo2ODY2Nzg1NTc0OTk3NjY1ODczNTY2ODk3OTs4NjQ1ODk3ODk7
-Njk3OTY0ODg7PD06Ojc3ODk5QEA5Njc4Ojc5OTk8PDk4Nzg3ODc5Ojo6Oz08PDo5
-OTc4ODo7Ozo8QT09PDo6OTo5Ojg5Njc9QTw5PDw7ODk6Ojo5PDw9OkNCPjg+QDo4
-Ozw+QD8/PT08O0JCPkA+PUBBQUJBOztCRVJLjr/L09ne4eTm5+npP0JBQkNAPDo9
-PEdFQkM+OT08PUFBPDo7PTk2PD07Ojw5OTk5Ojo4Nzo4Nzk7OTk3ODg6ODo1NTg1
-Ozc3Nzc4NzU1NDQ0Njo0Nzc6NzU0MzMyNTY0MzY1NjY2QDU0MzQyMTM0MjUzNDU2
-MzM2NTY2NTU1NDQ1NTQ3NTQxNDQyMDA1NjQ2NjY1NjQzMzIzNjYzMjEzNDQ0NDMz
-Njg4NzU3NDdBNzU1NDU0NDIyNDQ4NzUzMTQ0MjQwMjQ0NTQzNzU2ODM2MzU2MTYz
-NTEyNTQ0NDIxMjIzMzg3NjQzMzI2OjQ2Nzg2NTQ0MzI0MzU1NTQzOjk2NjUzMzc2
-NjY0MzQ0MzM0Njc5NTMzNzc1Njc5ODQ5ODg2NjY2NDc3NjY0ODU0NjMzMzMxMzk1
-NTY2MzEyNjUzNTY2NjQ0NjQ3NTIyNDUyNTc2ODc0MzU4ODc1NTY1MzY+PD47MzM3
-MjEzNTIzNTc3NDM0NTExMDQzMjc2MDI1MzIwMzUxNTUzNjYzNDIwMzY2ODk4NzY3
-NzY2NzY1NzQzMzUzNDUzODU1MzM3NTY5NTU0MzQ1NTY0MzM1NDM2MjEvMDEzNDQ2
-NjMyMjMyNDM0MzM0MzY0NTMyMTEzMjM1MTI3NTQ0MzU0MzY2MzU4NTIzMzQ2MzM0
-NDQzMzYzMTM0OTg2NjU0NjQzNjY2NjQ3NDQ0NDY2NTQzNDY3MzIwNDMxMzMzMzI0
-NTQzNTQyMjUzMS80NzIyNDo1MzY1ODo2NTU0NTUyMzIzNTU0NjU3NzU4NDU0NDU0
-NDMyNTY5OTU0NjQyOTM2OTs5PDtAOTg2NDU3Nzc3Njg5NzM0Nzc0MDM0MjY5NTY0
-MzU2NjU1NzU0MjI0MjQ0Mzc1NTM1MDIxMzI1NDQ2NDM1NzIyMzI1NTM0NDIxNTIx
-MzU0NTg2NzUzNDI2NDY1MzQyNTY1Nzo3NTIzMzMyNDQzNTQzNDM0MjY4NjYzNTQ1
-MzY2MjQ0MjM0NTQ3NTI1MzM3ODMyNTMyMzMyMjMzNDIzNDQ0MzQ0MzUzNDc1NDU2
-MjEyMjM1MzU1NTU2MzQ0NDQ0MzM1NTg1NTM5NTU3NDc4NjIyNjo6ODc8ODY1MzI3
-NzUyNDU2NjY1MzI0MzM1NTgzNzY4NTU1NDU1MjQ4ODY3NTc3MzQzNTI3NjY3ODg3
-OTczNzQ2Njg2NjU2OTY2MzU5RDY0NDQyNzc3Ojg2NTc7ODk3Nzg5OTU4OTk4ODY5
-OTk4Nzg3Njc5NzQ2NTY2Njk6ODg4Njc4ODg2NzU4ODk5Njc6Njg5Njc3Ojk3ODk4
-ODk5ODY0MzU0Njc5ODU0Njc3Nzg6PDg3OTg2NTc3NDY3PDw6Ojg2OTY1NzU3ODY3
-NzY4ODc3PDk6NzU6PDo4OTU3Nz07ODk2NTY2MzY8PTw9PDk5Oz04OTo5OTw9ODk9
-QTk4PTk8Pjo5ODU2OTo3Nzg5Oj46Nzo+Pz07P0E7PDk9Oz8/PDw7Ojw+Pz05Oj06
-Oz07Ozs8Pjk9QD87PDo7PUA8Nzk6Ojk7Oz09QEM+PT47QDs7PEE+PEI/QUJBPj9C
-TU6Yv8vT2d3h4+bn6ek9PUFAQUFBQj9BQEFBP0NAQDo8Pj88OTk6PDw6QURAQDs6
-PDg0Ojw5OTg5QDo3Njk6NjU2PDY2Njc1ODY1NDc7NDE1NTQ4OzgyNjc1NDQ0NDcz
-ODg3OTMzNjc3Nzc1NDQzMjQ4ODQ1NTczNjQ0NTU2NTU1Nzg1OTc0NTU3NTg2NzIz
-NDM2NDM0Nzg1NTY5ODUzNTc2NTI1Njg4OTo2Njc3OTU1NDQ1NDQ1NDM0NDQ1NTUz
-MzEyMTEyNDU0MzQ3NjY1MzQxMzMzMjIyMjUzNDM3MzM0MzMyNTU1OTM0NTM0ODYx
-NTc2NzU0NzU4NzMzMzM6MzYzNjYzNjc4Njc5NTMzMjUzNTUzNjM2Njg5NTY3NjY2
-MjQ0OTc1MjQ1OTc0MzU6NDY3MjUyMTI0OTY1MjQ4Mzc2ODU1NDk2NDQ0PDY2NjQ0
-NjY1NTc1MzU2ODQ1NTU0NEM1NTYzNTIzMjMzMjM0MzY1NjUzMTQ2NjU3MzQzNTMx
-MzU0OTU0MjEyNDYzNTQzNjU1Njk0MDU2NTQ1NDY0NTY0MzQ1NTY3Nzc1NDY0MjU6
-NzU3MjY3NzY0NTMzMjIyNTU4NzQzMzM0MzUzNDAxMzExNjk2NjM0NTQ0MjMzMzU0
-NDY3NDQzNzY4NzY5MTM2NTMzMjY0NzQ2NzY1NDI0NDY1NDM1NTY1Mjc0NTQ0NTE0
-NDY1NDY2Njc3ODIzNTU0NDI1NTM0NUI4MzQ2NjU7MzEzODY0MTAxMjU5NTU3NTU2
-NjU1NzQ0NTQ1NTU1NzQ1NDc1MjIyNDY0MzQ1Nzc1NzY2NDY6ODo3NTY1ODc2Nzc3
-NDQ0NTg6Ozg3Njg0ODk2NDY3Nzc6ODM0NjY2MTEzNjY1NDMwMDMzMTEzMzM1NzQ0
-MzQ1NDU0MzUzNTEzNDY0NzU1Nzk1MzQzNDY1NDQ1NTs4OTMyNjU0Njk1NjQ0NjQ0
-NTY2NzUzMjUzNTM0NTY0NDM0Njc1NTY1NTQyMTEyMTIyMzMzNTEyMzQzNjY4NTE0
-Njc1NDU3NjYyMzMzMzM0MzM0NDQ0NTc3ODQzMjEyNDM1NjMzMTA0MzU1NjY3ODQ0
-Njg2NjU3NjU4Ozg0ODo6ODs7NTM3NzU2ODg1Nzc2MzQzNjUxMzY2NzU0OTk4NDc3
-NDY2NTM1NjU0NDQyNjQ1OTo2NzU1ODk2NDI5Pjc2NTc1NzU4Njc4ODo4Nzc4OjY1
-PEpENzQ4ODQ0NTY6ODU1OTY5ODg4OTg4ODc5ODU3NzY3OTc4NTc6OTk2Njc2Ojs4
-ODk5Nzc3ODU3ODU2NTY7ODY1Ojs5ODc2ODY4ODUyMTU6PTk1NDg4Nzo4OTc9OTk5
-Njg4Ojg3NjU3NTo8OTo5ODY3ODg1ODg1NDc6ODk5Nzg5OTY3ODg2OTw4OTk5Nzo4
-ODo9OTs8Ojc2ODo4OTo6Ojw7PD07Nzg7OTg6Ojs7OT09Ojg8Ozk5OTk1Nzo4Nzk7
-PDs8Oz44Nzs6ODo6Ojs9PD09QDw7PT08Ozw6PDo5Oj0+PUI+PUBBQkQ+Oj07Ozw9
-Pz06Ozs8QD48PDtDQkJCOz5BQUNGQ0FPUJq+ydLZ3uHk5ufo6UNBRUZLR0ZHQ0FC
-RD1CQz0+OjtAQD87PT07PT8+Pjo9PTw8Ozg5Ozk9Ojk4Ojk3PDk3NjY4ODU4NDQ3
-NjU7Njc3NjQ4ODU2NDQ0MjIyNDM2NDI1NjU5OTw1ODg1NTczNTk3NTU0NDU1NzU2
-NzQzNDQ0NDQ0NDU0NTQ1NzQ1NDU4NTM1NTQzMTMzNjY1NDU3NTQzNzc1NjY0NDY3
-ODc1NzQyMTAxNDQ1NjY2NDIyNTQ0NjQxMzMzMzQ1NjYyMjUzNDM1MzQzMzQ0NDU1
-NjU0MzIyLzM3ODQzNTU3NjY1NTg0NTYzNDU2NTc3Nzk4NDUvMTM1ODk3NzYzNTQ3
-MzQ2NTY0NjMyMzkzNjU1MzU2NTc1NzU3NTc5NzYxMTAzMzU0NDM1MjI1NTU2NTMz
-NDM4NTs5NDEzMjM2OTY3NDQ1NjYyNzc5NDU1NDM1NjU0NjY1NTMzNTUzNTU2MzM0
-NDU1NTc1MjUzNDU3NTg2NjQ0MjM0MzM1NTk5NjIzMzMxNDk5NzM0NDM2Nzc3MjIz
-NDU1MzU4NTQxMzEzMjU3NDU0MzU2NzY2NTQ0NTY3MzQzMjQyMzU3ODU3MjI0MzMz
-NTMzNDkzODc2NDQ0NTc1NjQ1MjM0MjY0MDIxNDY1OTg1Njg5NjU3NTMyNDE0MjU3
-MzMzNTY0NDQzNjU1NDY6NjQzNTU2MjM0NjQ4NTU4NTIwMDU2NTU3NjM0Ozc+OjU0
-OjU2MzQ0NTg4NDMzMzEwMjU1NDc0NDIzNjY0NDY2NDMyMjQ1NDY1NTUyNDY1NDY2
-NTc1MzM1NTg6ODc4Ozk2NDY1ODQyNTM2Nzg2NzY8OzY2NjY0NjY4Nzk4NzY3NDU0
-NDQ2NDc6NzY0NjY2MzM0NjE0NzU0MTEyMzY3ODI1MzM0NjM2NjM1NTc2NTU0MzU1
-NTM0MjI1OjUyNDU5NzIxNjU4Njc0NDU2Njc3NjIzNzU1NDM2NjUyNTQ0NTQ1NDUz
-MTcyNDMxMTExLzAyMzMyMzIzNjYyNDM2ODMyNTU5NjU1NTQ0NjQ0NjU1NzU0NDU1
-NDQ0MjQ1MzMzNDQyMzQ2NzY1NDM0NjU2Njg2Nzo3Nzc5Pjc7NjU4Rzk3NTg2NTc5
-Nzc3NzY6ODg1MjQ2NzY0NzY3Nzg3NDY2NjY2NzM0MzU1Njc4Ojo5NTQ0MzU3NTQ1
-NTQ2ODY5OTc3OjY0NjQ2Nzk0Njc3Nzc6OzY2NTU1Njc3Njc5OTk2NzY4NTc3ODY2
-NDc8PDo3Njg2Njc4NTU4NjY4NDU5ODY1Nzg3NDU1NTU0ODc4NTg3PD47ODo5Ojo6
-OjY2NjU5ODk4Nzc1Njg9Nzc5NTU7Ozg2Nzc5OTk4Nzk4ODk5OTY4Nzk5ODg3OTg3
-Nzc4ODk6Ozo4Ojc1NjMzNjc0Nzk4OTs5Njc4OTs7Nzg1Nzk4PDw+PT49PTo5Ojg6
-Ojo7Ozk2Njc4OT1AQTs6Ozk4Nzs9PD49PDs8OTs7Nzk8Pz45PD08PT46Pjw8OTs8
-Oz08OTg+Qj05PkA9OT1BQD47PT08PD08PDw/QEFBP0A9P0JDQUI9Oj1CREJBPENM
-mr/J0tjd4eTm5+joQEBERUNGRkRAQEFBPkJAQ0hEOz1FPkA4OTw3PDs+PTo7Ozk6
-NjY3PDw9Ozg4OTo3NzU2NjQ0NjU0Njc2NDQ0Njg2ODc5NzU3NDY3NjUzNTs2MjE3
-OTg1NDc3OTY0MzQ1NDcyMzMyNTM1NDQ0NTMzMTQyNDM2MjQ1NTU6NjQ2MjI2NjMz
-MzQ1NDUzMjU0Njo2ODY5MjQ1MzU4NTQ0NTQ0ODExNTc1NDMyMjMyMTQ0NDIyMjY1
-MzM0NjczMjM0NjQwMjQzNDIwNDQzMjQ0NDU1NDQwMzU1NDM0MzQ1NTY1NTg0Njc3
-NTc2NTU0NDQ1Mzg2MzY2Ojo1Nzc4Njc5ODo3NjU3NjM0NTQ2NTU2ODs2NDU1MzQz
-MjU0NTc0Nzg2NTM1NDIzNzY2NTYzMzMyMjM1PDU0MjQ0NDc3Ozg3ODY2NjQ0NDI1
-NTY0NDc1Njc0NDQ6NTM0MjE0NTQ1MzMzNDQyNjk3NDc0NTU1NTQ0MzUxMjMzNDAx
-NDY2NTMyMjQzNjc1NTU5NzY2NTM0NTU0NjMzNTU0MTQ2MzM1NDQzNTQxMzQ2NTM1
-NTMyMjc4ODo0MjMxMTMzNTQ1NTQyMjIyMjQ0MzQ1NTQwMzczNjY0MDAyMzYyNTU2
-NTQ0NDI1MzM0NTQzNjIxMzYzNDIzNDM0MjM1NTQ0MzU2NTk3NzU2NDM1MjM1ODU0
-NTU5NzY4NzM0MzU2NzU1Mzk4Nzg4NTk8NDI3NTQ3NDY2NjU1MjIyNDQ0NTU0MzMx
-MTU1NTQyNDY1MzQ0NTEyNTY1NTY2NjQ2NTU3Nzc1NDY4ODk2MTM1NjQ3ODY0Nzc4
-ODU1NTc3NTQzNDc3NTY2NzY0MzU3NTIzNTc4NDQzNDQ2ODo3Njc1NzYzODUyNTEx
-NTQ2NTM4NzY1Ni4yMDM1NTQ1NjU3NTU3NjU0NDU1MzY5NzY1NzUyMjI1NjQ2NDUz
-MzY4OTc1ODQ1Njg3NTY0NTU0NTMzMzU2MzMzMjM1NDM3NDU1NTg2NDQ2MjEzMDEz
-NDU0Mjc3NjYzNTU0MzQyNTU1NTMyNDY3NzUyNDM0NzU3NzU4MzMvNDYzNDQ1Njc4
-NjY4NzU1ODw4OTc6NzQ1NDU5NjQ1NTQ1Njg3NjczMjY4Nzg1NT05ODg5NTQzMzUz
-NjQ1OjY1Njg1Njk4OTg1NjY3Nzc2NTM3MzQ4NDg3Ojc6NTg2Njc3NTU4Nzk2OTc4
-OTU4Ojc5ODc0Nzc4OTU1Mzc4ODc2MzU2Nzk7Ojc2NTg1Njk4NDU1NzY2MzU1OTg6
-Ozs7ODY3NDc0MjQ2Nzc2ODk6OTU3Nzk2NzU3ODo2NDQ2OTg4NjY1NzU4NDU1ODg4
-Ozo7Ojo5ODc2Njk5Ojg4Ozo2NzY3NjU1NTc2ODg5Ojk3Njc3PDg3NDUzNTk3NTY6
-PDQ3OTc2Ozw4ODk7PDw+PD0+Ozc2OTo6ODk5Ozg2OTk3Nzw9PDw5Nzc5Ozw3Oz46
-Ojo6Nzg7PT48Ozw8OTs+Ojs8Pjw8PDk7PTs6OTs+PEE5Njw8PDw+PT87PUA7PD88
-Pjw8PDw7PTxDPztBQkM9Ozw+PD4+PU2ewMrS2N3g4+Xm6OlFRUJBPUJFRkhDQkRE
-OT5CPT49QEA9O0A9Pjg5Ojo1Njk/PDw9OzY4Ozs8OTc5Ozk4Ojo5NTU0NjczNjc0
-MjIxOTY0MzQ3OTw5Njc4NTIyMzUzMzY3ODk0MTQ0MzEzNDc0MTIyMTE5OjU1NTQ3
-NTQ1Njc3MzQzNDQ2NTY3NTc1Njc1Mzc0MjMzMzY2NjY3OTc2NzQ5Nzg3NzY3ODU2
-NDM2NDQ1NTQ0NzQzMjMyNjQyMzAwMzIwMzU2NTg1MzI1NTU1NTc2NTMwNDQzMjQ4
-NDYyMDEyMzI0NTU1NjUzNTU1MzQzNTYzMjU0NDg1NTY3NzQ0NTg3OTQ3NTU2Njg4
-NTU0NjY2NDI0Nzg5Ozg2NjY1NDQ4NTY1OTY4NzU0NDU3Njg2NDU1NTg5OTQ0MzAz
-NTUyOTk2MzI4NzQ0NDMxNDY2NTIyNTQyNDQ3NTU2NjUzNTQ5NjU0NDQ3NzY2NTQ1
-MzM0MjQ2NDMxMTU0NDU4OTUyNDMxMzM0NDU2Njc1NjQzNjM0NTM0Njc2NTMzNDQz
-NDIzNDQ0NTMyMTU0MTMyMTE1NDU2NTM0NTQ2MzM1OTg0NDYyMDM1Nzc0MTM0MS8y
-Mzc0MTEzNDAxMDY5NjEyMzQzMTI1NTUzNTMyNjQ1MjAyNDg1MzIzMjMzMzQzNTg1
-NTU0NDYzMjQ1NDQzMzc1NTIxMTE2NTo2NzY0NDU1NzY0NDM0Mzc1NTMzNjY6NTQz
-MjE1ODUzNTc0NDY5NzQzMjExMjM0NTU3MzQ1NzYzNTUyMDMzMTI0ODc1NTU2Nzc0
-NTc5OTg1Nzc2NjY5OTc1NjU1OT9VcEs6OjY2NzU0MzM3NzY3ODYzNzc6NTg5ODQ2
-NjY1NDU0NTQ7Ojk1NzczMzQ3OTc7NjI1MzQ1MjQ3Njk0MzI0MTY4ODI0ODg1ODg3
-MzI1NDA0NzQ1NzY2NDQvNDM3MzU3NzY1NjY3NjY2OTg2NDMyNTU1MzQyOTYzMjAy
-NzYyMTU0NDU2NzY2MjU2NjQyMzMzNjUxMjQ3MzQzMjMwMTEzNTY1NjM0MzM1MjU1
-NDcyNTQ0NjQzNDY0MzY0NTc0MTM1NjY2NDM0NDU2Nzo2NTU1ODY1NTY3ODk5NTM1
-NTU4NTU0ODc6OTk3ODU2ODY2Ojw2NDU5Ojg4OTU2MzQ0OTg3ODc2NTgzODY3NjU1
-NTU3Nzg7Ozo3NjU4PDc4NzY1NDY3Njk0NTc5ODw7ODs3Njc4Ojg8Nzg2NjY2ODY3
-NTk6Ojg4OTk5ODg3NjY4Nzc2NjY2OjY5Pzs3Nzg5Nzo5NTY2ODY7ODc4OTY1OTg4
-NjY3Ozc1NDU2OjY3NjY1ODk2ODg6Ozk1Nzo6Ozg6Ojs7ODk7Ozo2ODY1ODc2Mzc3
-Nzc4OTo1Nzo7Ojc2ODg6NjY4ODc2NTY7Ozo2NDg2Ojk5Ojw8Ojg6Ozs7Ojk2ODg2
-OTg7OTo5Ozk/PDk7Ozo5Ojw9Oz44OjxBPjw4Nz0/PTs5QDw6OTw7Pjo6Oj08Pj47
-Oj07PT08Ozs+Oz49PT1AQT8/Q0I8PUA/PT5BPD09QkI8Oj1DQEI9Oj1APz9EU5/A
-ytLX3eHj5ubo6UBAPjxBP0JCQ0VCRUQ/OT4+Oz48Ojs8Pjo+RD09Ojs8PTk7Pz06
-OjY5OTc4OTY5OTg6OzY0NTYzNjMyMzMzNDlDNjQ1Nzg2NTg1MTEzMzg2NTQyMDQ3
-NzU1NjY4NjQzMzQ1MjA2MjE1ODM0NTU2ODo3NDg3MzEzNDQyMjc5NDI1NTI2Njc0
-MzIyNDY1NDIxMjE0NjUzNTQ0NDY3NjY1MzQ0NjQ0NTYyMzU2NzI0NDMxMDExMjIx
-MzIzMzIyNDU2ODM2NjM1NzMyNTQxMjQ2MzQyNTI1NTU2NjMyNTYyMzM0NDQzMjU1
-MTM4OjU0OTUzNTg0NjM0NjQ0NTU0NTg4ODc2MzEzMzY1MzY3NTc2NDIzODg5Nzkz
-NjQ1NzUzNTc3NjU1NTU5ODk3Nzc4MzIzMzU1NTY1NDozMzMzNDMxMTI2MzU1NjUy
-NTY0NTU0MzQ2NDY2Njs2MzUzNTQ0NTQ5NjQ0MzA0NTYzMzIyNTU1MjIxNTYyMjI0
-Nzg4NjU2Mzk5NjQyMjM0MzQ1Njc0MjM0NTM2NTMyMzIzNDg1MzUyMzU1MjMyMzQ1
-NDQzMzU1MzY2NjQ0NjY0NjM3MzUzNTIzNDYzNDQzNjQ1NTQ0MzIxNDU1MzM0NTU1
-MzUzNTMyMDYxMjEyNDMyNDI1NDUzNTU0NzQ4NjMxNjY1NDQ2MzczMzI0NDQzMTQ1
-NTUzMjQzNC8wLzAxNTc1NjUvMTM3NTMzMzYyNDc3NDQ3NjQ0NDQyMzM1NTQzMzg5
-NDM5OTU1MjMyMTI0NDM6NzY1ODI2NTQ3ODc2MTQyNzc3OjY4NjY1NDU9Y3F7azkz
-NTUxNDQ2Nzc0NTQ0MjU3NzY2Njk3Njc0NTU2NzY4NTk4ODg4NjM0NTY1NjQ0MjMz
-MzE2Njk2Njc2NDU5PTU1NDYzNzQ1ODY3NTMyMjY3Njc3NjUyMjM0MzIzNDU3ODU1
-NDQ2NTM6MzIzMzU3MjM4NzQyMjMzMzQ1NTM1NTc2Njk0Mzc0MzU0NTQ1NDQ0NTU1
-MjI0NjMzNjMzNTMzNjA1NTQ1NzY1NjI0NDEvNDVANDI0NDQ1NjU4ODY3MzU0NDM0
-NTI1MjM2NTQ2NzY2NDQ3NDU0NT05NTY2NzQ1Nzg5PTY3NTg4OTY2NjU3NjYyNjg5
-Nzc7OTo7ODU0NDg0NTc5NzU0Njg8ODc6Ozc3NTQ4NzU0N0pGODczNTY3NzY4OjM1
-Nzg5OTc4PTY0Nzk7ODk3NTk1NzY3NzU1NTY1NzU2OTg3Nzg3OTk5NDQzNDg6Ojo4
-Ojs3OTY1Nzc6Ojg6PDs4Njg4Nzk2Nzk5Oz48Ojc1NTQ1Njg6Ojg3OD05OTk4OTk8
-Ozg2Njg4Nzk8PDo7OTg5Nzk2NzY0NTQ1Njg2Njo4OTY6Ozo5NzY4Nzg2Mzc1ODg3
-ODo7OTs5Ojk5ODg3ODo7Njg6Ozo6Nzk4Ojs/Pjg4Pjs7OTk2Njg5OTo8Ojw9Ojw8
-PT87Ozs+Ojg8ODU4Ozs+PDw7Ojw9PDw6PT08ODc6P0BAOjs5PTw7Ozs9QD08OTo6
-Ozs7PEA/PDw/PT48Pj08PkFFQ0RWo8DK09jc4OPm5+fpQz9ARD08QklIQT9AQD4/
-PT49PD06Ojo8QT5BPD05ODs5Ojo4Nz04Nzg1NjY7ODc5Nzc4OTg2NTk3NjU4OzY3
-ODc4ODo1NTg3Nzc2NzQ6NjU1MjQ0MzQzMjQ3Njc/MjM2NjYzNzM3OTQzMTIwNTQ1
-MjM2NjU0NTU0NDU0MzI1NDY2NDI1NDY0NzQ0NjMzNDUxMzEzMTQyMDAyNDQ1MzQ3
-NTc2MzU2MTQ1NzIwMjU1MDU1MTMyMzMzNDMzMi8wMzIzMzM0ODk2NDQyNTMzNDU5
-Nzo3NjQ2NjkzMzQ0NTc0NDIyNTAyNDIzNjQ4NDY0OTo3MzQ0NDU2MjMxMjQ3Nzc3
-ODQzMjQzMzI1NTY3NjQ1NTY4NTY6NTI0NDI1NzYzMjM0MzU1ODs2NDhCOTY7NzI1
-NjQ0MjQzOTU1NDU2NDEwNDY1NTU1OTU1MzMzMjQxNjc4NzY0NDY1NDMyMjIzMjQ0
-MzEzMzM0MzMxMjMyMzQ0MTI1NzQ0MzA0NDQ0NjM1OTU0NDI2NTIzNjc2QzY3NjUy
-MjU1MzY2NTEyMjQyMzMwMTQzMzIzNDI1NTU2NjQ2NzUzNjU1NDU2Mzc2MTQ2NDU8
-NjgyNDA1NTQ2NTUzNDU0NTMyMzMyMjUzNDY1NTIxMzQxMTM4NDQ0MzQ1Nzc3NTY1
-Nzw0MzY0NDIzMTIzNTUzMjs1NTU0NTM0MzM0NTMzMC0zMTAzMzQyPDQ0ODQzNDc3
-MzE3Nzk4NTc5MzI3NTMzNTI2MDM1NzU2NjU4NTQyMTExNDM1Njg4NDQzMzM1Nzg6
-Ojk5OTo3NDc1Nzk4Nzc1NUR1Z25jOTYzMzI0Njg5NTU2MzM2NzY1NTY5Pzk5OTY0
-MzU1Nzo5NTU4NTMxMTQ3NDY2Njk0MDAzMzM2NTM0ODc2NjU1MjU0NDQzNTRBNTYz
-NTc2MzQ1NDU1MjQyMzQ1NzY2NjQ0NTY6NDY0NTQ0NTY0MjQzNzY2NTc4NTU0MzM0
-NTQ2NTg0MTM0MjE0NDQ2NzY0MjQyMjEzNTU2MzMzNTMxNDU2NDM2NTIzMzMxNTM2
-NjQzNjY1NjY1ODUzNTY3QzQ1NzQ0NDU0NzU2NDg0NTY2NDM1MzIzNDc1NjY4NDQ2
-Mzk4NjY3OjQ4RTc3MzI0OjQ0ODY3OzU0NTg5OzY5Nzc3Njc0NTc1OTk4Ojo5OTg2
-NTY4OjY3Njc3N0A3NDc1Nzk3ODc0ODUzNTY3NDlFNjY2NjY2NjY2NDU2NTM2NzQ0
-NDUzNTc5ODg3ODc2ODg5Nzg2ODs2Nzc4Ojg3NTU1Njg5Njg5Njg3NzU3ODk4Nzg6
-PTg4OTk2NTg4ODg4Nzo4ODk4ODc1Nzc4ODY6Qzc2ODg4NjY5ODg4NjY3Ojc3ODY4
-Nzc2ODg7OTk5Nzk6ODg4Nzk5Njw8Nzg4Ojw6Pjo6Njc5ODo5OTw6Ozk6Ojc4Nzg3
-ODk1NDk9Ojs7OTY0OTk3Ozs4OTo+QDw9Ojs4Ozo8Ojw9PDg9Ozk6Ojo6Ojo7Ozo7
-Pjg4Oz8/Pjw+Pjo/PT5DQjw6Ojo9PDo7OzpAPj09Pj4+P0E/PT07PEA+PUuhwMrT
-2d7h4+Xm6OhBQEA8OzxGSEM8QUNCRkA+Oz0/Ojs4NzlDQD47Ojw+Ojw6OTpAPDw1
-Nzk2NTg7Ozc5Ojg4Nzk3NTIvMjU0MTI0NjYzMTE1ODk1ODc2NzM2NTMxMjI0MzY3
-QExCOzc0NDM3Ojk2MzIwMDM0NjU1NDY0MzI0NTQ0MzUzMzQzMTI0NjY0NjY0NDc2
-NzY1MzM0Njg1MzQ0NDg1NTMyMzUzNDQ1NjQ3NDEzNDMyMjQuLzQyMTM1NjMzNTMy
-MDAxNjQyMjQ3PUxFOjU2NDUzOTg2NTU1NjU0MzM4MzY2NzM1NTc3NDIzNjA1MzQ1
-NTU0NTQ1OTU3NDM0Njc3NjQ2NTEyMzU0MzAzMjUzNTY4NTY3Nzg1MzYzNTc2NDI2
-OTM4ODc4ODY1NDQ0NDE0NTY3Mzg2NTQyNzU0MjQ1NDI0NTQ7MzM1NTMzMjQ0MjM0
-NDIyNTMzMjM2NTMyNDQxLzM1NTk6NDEyMzw3MzMzMzM1MjExMjM1MjU1NDU4NDEz
-MTEzNTY0NjYzNjM0NzU0NTM0ODQ0MzUzNDUzMTIzNTcyMjQzMzUzMzI1NDM0OTg1
-ODQ0NTQ0MzUzNDMyNTY1NzUzNDMzMzQzNjg1NDQzMjMzNDYzNjU1MjIxMTM1NTc2
-NTk3NDU0MzQ4NzQ0MzU0NDQ2NjQ0MjY2NzM1NTU2NDU4NjMzMjI5NDQyNTY3NDU1
-ODc2NDQ1NTQyNTY1Mzc2ODU4NTU3NjQ2Mzc1Njg4ODQzMzMzMzY0NDEwLzMzMzIx
-Njc1NDUxMzc5MzY0NjY0NDQyMzE3NjM2Njs5NzY1NTY1MjU0MzU3WW1ncXA8NDQy
-NTUzMzU2NDc4NjQ1NTQ1Nzg2ODQyMzI0Mzc2NjQ0MjMxMTIxMzY1NTQ2MzU2NDU0
-MTQ1ODc6ODQ0NDU2NDU2NjQ0MTM3NzY2NDUzMzY0NTM2NTY3Nzc2Nzc1NTU0NTQ0
-NDQ2NDM0NDcyMjE0NDc5Nzo8OzY2ODExNTYzMjI3NTY4NzIwNTY3MzM7NDIzNDEw
-MzU3NTMzMTIyMTI1NDIzNTY3NDE0Mi8yMjQ0NjUyMzY2NDg5NkBKOTU2Njg3Nzc1
-NDQ2NTU2NzY0MTU1NjYzNTQzNDY8OTk2NzU2NzQ2OTg6Nzs2NDY2Njc5Ojo4NjQ3
-Oz04NTQyNTU2NjQ4ODc6NzY5NTY4OTY0ODY3NDc2OTkzNjM2NTk3NjQ0NDY4Njc4
-Njk3ODc1Njc5OTc2ODY2NDc4NzY2Njk5ODo5OTg2NzY2NDM2Nzo3Njg6OTs4ODw3
-ODg1Nzg3OjY2OTk3NTYzNTc4ODU2NTY4PDs9Ojw9Oz03Nzk7Ozk4NzY1NDg2NTY2
-Nzg/NzY1Ozc5Nz48OTg4ODc6PDc5OjY4Nzk3Ojk7Ojs5Ojs6OTs4Ojg8Ozk4Pjo6
-ODo6PDo7Nzg4Ojc7ODg8PD07PTY2ODk6NjM4ODs5Ojs5Ojk7Ojo5Nzc6Ozw7P0E/
-PD09PDo8OT09Pjw8ODo/QD48Oj07PD06P0BBPT5APT0+QDxBOzk7PkE+PT89QDs7
-Ojs7Ojs7PUA9Pj0+PTxEQkJCUaPBytLY3eDk5ebo6T4+QkBBPz5BQzw7PkJDQD08
-PT48PDs9Ojg8Ojc2Pjw6Ozo7OTs8PDcyNzg0NjY5NzY3NTc2NDI0MjU2MzMzNTU4
-Njc0NjY8OTo6QDs6NzQ1NC82NDEzMjhKNDU5ODg3ODg5Nzg2NTM0OTQ4ODc1MzQ0
-MjQzMjQ1MTAyNDQ0MjQ3NDMxMjM3NTY1NDQ1NTM2NjQ0MzUxNTY0NTcxMjY1NDY1
-MzQ0MTE0NTYxNTU2NTUzNDQ0MzUzNDIzMjM1NTY4NjZASmBINzU1NTI3NDg4MzY2
-OjY0MjY1NDU1NjQ2Nzg3NTY1NDY1NDM2NjY4MzY4Ojk1NjU0NTY3NTI0NDMzNDU4
-NTY3Nzc1NzU1NDU1NjIwMDI1MzQzMjI1ODY2NDc1MzUyMzEyMzQzNzM4NjQ1NTQz
-MzQyMTUzNTU4NzM0NTQyMzU1MzU2NzY3NDQ0NTQ1NDQ2NDYyMjQ4MTM2NzY1MTc1
-NDY0MzM0MzY2NTY1Nj01NjczMDQ1OTQ1NTUyNDY1Njg1NTUzMzU5NTQ0NTMyNTMz
-NjQyMjQ1NzU1MzQyNDMzNjU1NDQ0MzM0MzYzNjQ1NDQzMzEyMjEyNDMxNjY0NTY0
-MzQzNTE3NDQyNDY2NjcxMjI0MjM2ODY0MTY1NjY2ODg2NTUxNDQ1ODU1NDc1MTE0
-NzY1ODg4Njg2MjEyMTU2NTEyNDQ2NTQzMjc3OTQ4NTM0NDU2ODYzNTY0MTEuMjQz
-NzU0MzM1NTMyMzQ3Ojg2NjU3MjI0MzQyNTQ2MDU2NTc0MTM1NzcyMTU1Nzk0MzU0
-NTY2NTQ0NTY0MzA0Njdge3iBdEI2NDc1MzM0NDc1MDY6ODUyNTMyNzY1NjU0NjMz
-MjM0NDQ1MjYzNDQzMTQ2NTQ2NTYzNjs4NTQ1NTUzNDM0Nzg0NDM0NjU0MjQzNTY0
-NDU1NjUzNDY2NTYyNTQ1NTM0NzQ0NDQ2NjY1NDY3NzY2NzEzNjYyRVhCNzMxNjYz
-NDQyNDQ1MzQ0MzIxMjM2OTY0NTc3MzMyMjEwMDExMzI0NDE0ODU1NjU1NzU0NzMy
-NTQ0ODU1NDQ3MzQyMzk4MzU2OTk3NTY4NDY0NDU2ODc5NTY2MTQyNDM1NUM8Nzg2
-NDU2NjU1NzU0Nzc2Nzc2ODo4Ozg4Nzk1NTM1NDY0NTc3Ojg1NTU2PDY2Njk3NDg3
-NTg4RjY1NTIzNDc3OjU0NjY3NzozNzc3ODc6OTg4Mzc5ODY3Njc3OTc5ODk5OTY2
-PD05Nzg2NTU1NjY1OTg4Nzk2Nzk4OTc2Nzc5Nzc2NjY9OTc3NTY2Ojc4ODg5ODg6
-PDg2Nzo9OzY3Ojk3NjU1OTY4OTc3NTY6Ozg3OTU2ODo4OTY3Nzo5Nzk7OjY5ODc2
-ODo7Ozs7PTs4ODk6OTY5PDY5Oj04OTU3ODo2PDo8ODc3Nzs6OT1BPDs9Ozc4Nzg5
-Ozw6Nzk4ODY5PTo6PDg3OT09QDs9PDs6QDo7Ozo8Ozk9PD48ODs8Ozs8Ojk6ODo5
-Oj06Pj5APT06PTs+PD5BQj4+PT8+QUE+PDtDQjs6Oj09QEVEQ0BBQkhZpcHM09je
-4eTl5ufpPkFCQj4/Oz5APDxAQDs6P0VBPTw7PTo6PD48OzU7PTs4NjY5Nzk6Oj82
-NTU2Mzc6Pjo5OTs4NzM0Nzo2Njs0NzU4Ojg2NzY2ODc6Oj03OTc4NjIzNzY1NzY2
-Nzk3MjU3NTQ1NzQ1NTMzNzU2Ojg1NDMxMjM2NjU0NTk3NTI0MTIzMjIyNjI2NjUz
-MzMyNTI0NzY1NDMzNDY1NDMyMzYzMzM0MTI1NDI0NjYzNzg0NTMyMjM0NjQ0NTQz
-NTUzNDIzNDZOMjQ3NzQ1NDMzOjgyNDQ1MzM3NDU0MTQ3ODY3Njk4NzQ2NDQ3ODU2
-NjU2ODM3Njk3NDQ0NzY2OTY3NTM0NTQ6NzY2NzU0NjY2NjYzMzExMzQ9ODQ2NTc2
-NDY1NDMzMTAzNjQzMjE0NDU1MTE1NDM0MTIyMjIyNTs4NjQ0MjIzNDU2NTU0NDY2
-NTY9PjU0MzM0NTQwMTIxMzc0MDI0NTk1NDMzMzMzNDU3NDQ1ODc4ODI1NDI1MzI0
-NDU2NDU2ODQzNDc4NjY2NjZDNjg1NTIxMjQzNTM2NzYzNTU1NjQ1NTM1Njk0NDIz
-MzU0NjQ2NDExMjMyMDM0MjQyMzU0MzUzNDQ1NjM0NDY3NDU3NTYxNDMzNDIyNDI0
-Njk5MzMzMjIxMzMwMTY1Nzg0NjYyMTE0ODg3NDY3NjMyMDQ2NTQ0NDQ1NzQzMDQ2
-NDIzNjY0MzEwNTU1NTU3NTMxMDQxMzU1NTU2NTM0Njk7ODU1MzQ1NTU1MzI1MjIx
-NDQxNDMxMzUzNTQ2MzMyMzIzMzMxMjY0NTI1NTc3NDY3NjY1N0xzeH51QDU3NDI1
-MzMyNTU4ODc3NjQ0MjUzMzU1NDY1NTIyNDc0MjQzODc7NzMyNDI3NTQ3NjU0Mjg1
-NjY1NTY0MzU3ODM4ODg4NTYzMTM0Mzg2NDY4NDI1NDQyMTI0MjExMzU0MjI1NjY2
-NjY5Nzc1NTQ1NTQ2NjlTUzU0MjU2MzQxMTIzNTUwLzE1MzAtMjIzMDA0NTMyNzM0
-MjEyNDMzMjIyMzQ0NTQyLzIyMjM2NjQ0MzY0ODU0NTg3PTY1NDU1MzIzNTY1NTc1
-NzstLjM8PD03NTY4NjQ3NjY2ODc4Njc3OTc1ODkzNzc1Njc3ODk1OTo8QDg5Oj02
-OTc1ODs4NDU2NTQ0Njk2Nzc4ODY2NTA1NzU6NTM0NTY2NTQ1NjY3Nzc0NDk3NTY2
-Njo4Ojo7Njk6Ojk6Nzk5ODw6ODg4Ozg3OTg3NTU2NTc3ODY3Njg3Nzc4NzU1ODU1
-OTg7Ozg8Ojc3Nzk4NjY5ODw5OTg4NzY5Ozw7OTo2OTc3Njc6NTU0Nzo5ODg1NTc2
-Njk2NTg6NzU5Ojo5NzU2ODY7Ojg3Nzc3NzU1OTg6OTg4ODU3OTc6Ozk7OTo2Nzo7
-ODc4OTo6Nzk1ODk5Ozs7Oj06OTo3Mzk5ODg4NTYzNTc3PDs6Nzk8Nzw9Ozo6PDg7
-PDo4PD4+Ozw7Ozk5OTk8PTw8ODk9PTw+Pzo6OTw9PTw9Ozo+PTw9Ozw+PT9AOz4+
-Oz1BPTs9Pj48RkE+PEFDSWSnwszU2t3h5Obm6Ok9QkJAPTw3OUFFQkA+Ojw7PT5B
-Ojk4ODY5Ojs9Pj48Ojo+Pjs6OTk9OjY0NzY5ODc5PDs4ODc4ODk4Nzg3OTk5NzY4
-ODY1NDU1Nzc5Njg3ODc3NzY2NzQ3NzQxNTc2NDU2NDc0NjQ0NTg4NTY2Njc4MzQz
-NDg2NTY2NzQzMzIyMTMxMjIyNTQ2NjY2NTQxNTYzNTUzNTQ0NDc3NzUwNDU0NjQy
-NzU0NTI0LzUyMTQ1MzQzNzM2NDIzMzQzNTc1NjE1M0Y2NTY2NTQ1Mjg3NjcyMjI0
-MTI2NDM1MzMzNDQ3Njc5NjQyNzU1NDY1NTY1MzE0NTU2NDEyNTM0Njo7ODQ1NDY5
-ODc1MzMyMjMyNjQ1MzMxNDw1OjU3NjY2NjU2MzQ0MTMzMjU2NzUzMzM3NDM1MjE1
-NDAxMzg0NTY1ODY1NTc0NTQ0NDM0NTk3OD0/QTY0Mzc1NDQwMTM1NDQ2MjM0MzQ0
-MTQzMzM1NTY1NTQ4NzUyNDM1NTU0MzIzNTQyODQ3MzU2MzU2Njg1NTU2NDU2NTIz
-NDE1NjU2MzY1NjU0NTQyMzE0NTQ1NDIzNDQ1NDY0MDIzMjI0NjcyMjMwMjM2Njg2
-NDQzNzkzMzQ5NDE4MjQzMzUzNTUxNTQ4Ozg1MjIxMTAxMzM2Njc2NjQ1NTQyNTU3
-NDgyNDIyNDQ0NDU0NDM1NzQzMTE0NTQ1NTg0MzQzMjUzNjc1NTMyMjQ0NzQzMjU4
-NjM1MzQ1NzY0NDY2NjU3NzU2MzM1MTIxMzc5NjYzMzQ2Nzc4OTczMjIyMjI0NDI0
-NDI1Njc5NzM1NDU4O0Z3e0Y3NTc5ODM1NzU0NjY3NzY4NjM2ODMyMjIwNDU1NDUz
-MzIwMzUzNTUvMDQzNzU2Nzc3NDkzMjU1NzY0NTQzMTIyODo2NzQ0NjU1NjMzNDU3
-MjQzNTIzNjQ0NDMzMzMyMjYzNDQ1NjU1Nzc1NjYyNTQyMTU2NkN6QjU1MjY1NjQx
-MTMyNjQ0NDc0MzMwMTEzMjIzNTg4NDM0NTY2MjY1LzI0NzY2NTM4NDU0MjYzMzU0
-MTM1NjQ2NThFODU0NTU0MzQ1Njg2OTg1NTc3NDc5Nzk2MzM0OTY2Ozc5ODk4NjM1
-MzM1NjM1MzQzNDs3NTU1Nzs+ODg5OjUzMzY4NzM6OjU3Njk1NTY1NTs2ODQ1NzQ1
-OjY0NjQ0MzIwODY5NzU2NTk4Njc1NDU3NzY4OTc5Nzk2NDQ6ODk1OTs5OTU4Njk3
-OTY3NTk7OTk4Nzg2NzU2Njc2ODk3Ojo5ODc3NDY2Njg4OTo8PDo2Nzk4PDc5Ojk6
-Pjg3Njo3NzY3ODU1NTc4ODs6PTs2NzY2Njo2Nzk3NjY4Nzc5ODQ3NTY6Ozw4NTg7
-OTk3Nzg2NDY3OTg3NjU6ODY1NTQzNzg2OTo6Ojw5OTc5PDo6Ozo7Ozg5ODk2ODY5
-NjY2NjY3OTY2ODc3NzpEQDk6PDs7Ozs+ODo8PT1BPj0/Ozo7ODs7PTk5Ozs6Pj1B
-QT45Oz49Pjw/Qj1CQj49Pjo7PTk8Pj0+PDo+Pz8+PDk6OURBQEA9VKTCzNXa3uLk
-5ujo6UNCPT47Ojs5Pjw8QTs4Pz5DPTo5OT9DPjs4OkE5Pjk5Oj5CPDk3PD06NzU2
-NTc3Njc6Ojo7ODY2NjU1NjQ3NzU1NzY3Njc3OTM0Nzg3ODc5Ojc4PDg3NzY2OjUy
-MjY2NzYzNDY2ODtDODM2NjY0MzMyMTI0NTU2NDg2NzM0NTU2NDUzNTIzMzM2OTQ0
-Njc3NjUzMjQ1NzUzNjU3NjU1Njc3NTY0NTM0NjM6NDAwNTE0OTIxNTMwMS8yMzQy
-NjU0NTc1STUzNDU0MTI1NTM3NDs7NDEzNTUzMTQzNjQ0NTQzNDQ1NDc3NDQ0MTMx
-NDY2NTU2NTQ1Njc2NDU2PD81MzMzMjY3NDUzNDQzNTY2Njg4NjczMzU5NjQ0NTU2
-NTQxMzU1MjQ1MDI1NTQ1NjY1MzIzNDQ0NzQ1MzQ3NzU3NTQyNDg3NzM1NzY0Nzg3
-ODQ1NTEyQDIzMzQ2MzM0MDQ3NDI0NDMzMTIzMTM0MzY1NTU0MjM0Njg4NzMxMjQz
-NDU1NTA1NzQyNDQ1NTQ1ODYzMzQ1NTQzMzEzNTU3MzM0NTc0MzU1NjIyMzQ1NDQz
-NTQzMzQ0NDYzNjQ0MzQ0NTYyMzI1NDY1ODU1ODgzNTEyMjE1NTQ0Njs2NDU0OjQ2
-NTIyMjMzMjQ1NjMwNTQ2NDIyNDgzMjMzNTQ0MjIzNTQ0NjU3NzY2MzUzMzM1MjI2
-NTU0MzM0OTc3NzU3NzY1NzU3NjY0MzczNzY0MzUxNDM2MzQ3ODQzNjY1NTQyMTM2
-Nzc2NjY1NTUzMzY4NTUzMjMyMTQyNDU0MjQ0MjQ0Njg2OTg6O0xDMzU1Njo7ODY0
-NTg1NjM1NzU0NTU0MzM0NDQ1NDU0MjIzMC81NjAxNTg1NDQzMzU0NTo1Mzc2NjMz
-MjU2OzU1NjY3NTY1Njc2OTc1MzIxNDc3NzY0NTc3NTg3NzcyNDU1NDQyNTY3NTU3
-NDU0MTAwMjU2MjM0NT42NDIyNDQ1NjUzMjQ1NDg2NTYyMzY0NDUyMjAyNTQ0NTY2
-NDc1NjU2NTU1Nzg6Nzk3OTY2NTk3MjUzMjU0NTc0NDM1NzY1MjIyNDk4NTY1MzQ1
-ODQ1ODU1NDQ3Njg4ODg4NDQ2ODY2MjE2NDY2NjQ0MzM1Nzk4OTc1NTk6NTY6OTY1
-NDk5OTU1NjU1NDY3Nzc5OkA0MzY2NjU0NDU0MzQzNjg1NzU0NDM4OTw6NDM3Njg3
-Nzg5OTY1Njg2OD85NjU4NTg1NzY2Njg1Nzg2NjY2ODc3NTc2Nzg6NjQzNzg4PDo7
-Nzg4Ojk5Ojs6OTg4ODo5Nzk3ODk4ODo3Nzg2PDo3NDQ3Nzo2MzQ4OTs9PTw6NjU4
-Njg6Ojk6OTc5OTc1Nzk9Ozg7PTk6Njg4NzU+OTQ2OTw7ODU2NTY3ODc4PDo3Pjo3
-OTo8PDs7ODc3Ozc2Ojc1Njk6NjU4ODk8PDc5OTc5NTY6NjY5QUg7PDg6OzU4OTg1
-Nzo5PT4+Ojo5Ojc1OTo6PUE+Pz07QD9AQEA7Oz0/Qjs9PTs/Pzw5Oj8+Ozw9Pj06
-Ozw6Oj09OT47PT0+OkFYosLM1Nvf4eTm5+nqSEE7OzpDQTk7OTpAQkBFQz47P0I9
-Ozo/Ojs9Pz08OTo5ODs7PDtAOj46NTE0Nzg0MzM1NTc2ODg3MzU2OTg0NjU3NTI0
-NjU3Njg1Nzc2OTU1Nzg0NjY0MzM1ODg1NTQxNjY1NTU1O1I7NTQ1NDQyMTIyMjM2
-NzU3NTI0MzQvNDIzNzM0NTMzNTU1NTU0NTg2Nzg5NTY2NDE1MzQ0NTY0MzI0MjIy
-Mjg3Njk0MzI4NjY2NzIyMTQzNTQ1NTQ3Nzc3NjdBNzczNTQ1NzU0NTQ1ODQyNDM1
-Nzg4NDQ0NDQzNDQ1NTY1MjU1MzExMzUzNDQzMzkyMjQ1NTQ2NjU/RjM1MjM1NDQ2
-NTM1MjMzNzYzNDU0NTU0MzU0NTY0NTM0NjQ1NDM2NzM0MzU1NDY1NDIzNTU1NTU/
-Nzk3NjY0NDM1Mzc1Nzs3NDY1NzY6OTY0MzM2NTE0MzE1OTQ1MzQxNDAxNDIzMTIz
-NjQ0NTM0MTIzNDMzNTM1MjM1NTY0Njc1NDMyNTQyMjM0NDYzNDQ2NTQzODg3NjIz
-MzMyMTM0NDEyMjU3NzQ0MzM0MzI0NDQyMzQ0NDY1MTU0MzMyOjc4NjQxMjM0Njk1
-NDg2NDM1NDMzNjYzNDQ2NTMzMzQzMzQ2NTQzNDc1NDgzMjQzNTUzNTc1NTI0NDMz
-NTQzMjMxMjg5NTMzNDU2MzczMTM1MTI0Njo5NTU1NjYzNzczNjY0ODU0MzU0MzU2
-NTEyMjY3NjM1MzIxMjIyNTY0NTU1NzU2NjY1MzQzMS80NzU3Nzk1MDE0NjYzMzU4
-OTc1NDQ2NjczNTY4Nzk6NzUyNDY1NTQ3ODY1MzQzOTg1NDY3Njc6ODQwMDEyMjE1
-NzU1NDg8OTo3MjU2MzY3MTM2NjY0NDM3NDY5ODY2NDM1MzI0NDM0NTM2NjM2OTg1
-NjY2NTg5Nzk0MjIyMjU0MjU2NTIzMzI4NzY2Njg2OTc7OTk3NjUyNDY2NTg3NzQ1
-NDUzNTczNDM1OTY3MzMzNDMyMjMzNDU4ODUyNjk2Nzc1Njg4NzQ0ODo6NzUzMjQ2
-NDI0NTIxNDE0NDU1NDUzMzQ0NDE0NjU1OTg3Nzg3Nzo4NTY+NzU0MjY8ODs3NDw1
-NjczMjU1NTY3ODg5ODY1NDc2OEJHNzQ0NzY3ODk6Ozc2Nzg5OTk3Njo2NjQ0NjM1
-NjY6NjY3NTYzMzY3OjY1Njo3NDQ0MzY4Njc1NjY1Nzg4OjY3Nzc2OTo4NDEzNTY2
-NDQ2ODg1Njg5Nzc2NTQ2ODQ3OT0+Ojk4Oj87Ozk6PT07PDo5ODc2ODg5OTg6ODk1
-ODg3OTg2NDU6OTg5ODc3OTc4ODU2OTc3Nzk4ODc2NzU3Nzo6Ojk6OTg5Ojo2NzU3
-PTo4Pjs7PDs4Nzc2Nzc1Njg6PDg3OTo6OTk7PTo5OTk6OTo7Ojo4OTY3Nzc5PDs7
-ODg4Ozs3Njc6PTo4Nzg7PD8+PDg2OUQ8Nzg5Nzk6Ojw6Ozk3Nzo8PT05ODw+QUE9
-PTw7PUE6PD4+Ojk7Ojs/Ojs6PD4+PkA/QD06OT86OkNCQ0I+Qk+fwszV2t/h5ebo
-6OlDRUQ8Njk8OjpBRD5EQUE/Ojw7Ozk5PEJCQ0M6ODk9PTo8Oz06PTo3Oz05NjY5
-ODg1NTQ2OTo5Nzg5Ozg2Nzk2NjczODY2NzU3OzY0MzM3ODg3NTY0NDU4ODk2NTU0
-MjE0MTU2NjQ2PDg5NjM1MzQzNDU1NDk4NjY3NTMzLzEyMjMyMzIzNTM3NDQ1MTM0
-MzIzNDM1NjYzNTY2NzQ1NDU1NDM0NDY1OTU0NTo7NDI4NTIxMzEzNTQ1Njc4NzY0
-NzY3NTg1NjY0MzM1MjQyMTMyMjMzMzI1NzQ0NTQ0NDg2NzY3NDc7NzM1NDQ0NjY1
-NDY1NTIyMzQzNTc3NjM1NDQ1NDQ0NTUyNDQ0NTczMzU5NDgxNzY3NzY1Nzc0NDM0
-NjMzNDM0NjI1MzQ2NjIyNDMyNTQ0NTg6ODw2OTs6NDI2NTc3NDU3Nzg1NDY7NTc3
-NDQ2MzQ0MjMzNjQzMjEvMzMvMzI0OTY2NDY2NTU0NDQ2NzM4MzU0MzY3NjQzMzIy
-MjAzNDM0MjM0NDY2NDQ0MzM1NDIzMzMwMDMzMzM1NTMyMjI1NTU0NDUzNDM2MzM0
-NDYzMzU1NTUzNzI0OTU3Nzc0NDM0Mzc1NDQ3ODQ6NDY2NTQzMzMwMTU0MjQzMzU1
-MTI1NjU4MzY0NDU0MjU3MzUzNzM1NjY1NjMvMjIzNTY1OTY1NjQzOTkyMDQ1OTYz
-Njc2ODc2NTQ2MzMwMzU3NzU1MzY0MzM2NTMzMzk0NDEwMzE2NTQzNTY0MzQ1NTU4
-NjQ0MjY0NDM0NDY2ODk2Mjc5NzM3NjU0NDU0NjU0Nzk3NjQ1NjQ0MzIzNDU1NTY1
-Njc2NTc1NjY5OTs7NzI2NjU3OTk0Mzc2NDQ3PDY4ODc5NDI1NjU1MjMzNDc2NDc3
-NjQ2NDU2MzIzMTIzMzI0MjMzMjM2MzIzNTI0NTg1NDg1NDQ3NDM1NzU3NDY3NzM0
-MzM1Njg4NzY6Ojg3NzM1ODU3NzU1NTc0NDY4NTc3NDQyMzYyNjY3MzI1Nzc1NjQ0
-MzM0MjQzNjc2NzU0NTU3NjM0NDI1NTY4OTY1Njw2MjM3Nzc2MzY0NjQzNDg0NTg4
-NUI6NDY3NjY1NTg1ODM1NzY4OTg5PDo1NjY4ODY4ODk5OTY4Njc0ODs7P0Y1Nzc3
-NjQ2Nzc3OTc3Nzc2NjY2OTY4OTU0Nzc3Nzg2NTY2ODc2Nzc5NjU7NzU5MjE0NTc3
-NTU0NDM5NjQ2NTY0Nzo5OTUzMzY2NjY1NjU3NjY2Nzk4NTc0NTg5Ojg6Ozk6Ojg5
-OTk1ODo9PTk7Ojo8Ozk4Nzo7Njk8PT08Pz45ODk6Ozg1NTg7NDQ0NTU3Nzo3Ojk4
-OTg5Ojk1NTg3OTg6ODk6ODc3ODo7OTk5OT48PDo4OTg6Nzk4OTk5Njs5OTg5ODs6
-OTc3PD07PT09PTs4OTo5PD07Njg4Nzc6OTk4OTk3Ojk6OTc3OD0/Pj06OTk7PTk4
-PDc5PT47Oz04ODo9Ojs5OjY6Ojo8Pzs/REA8PEVARUE5Ojk6PDw/PkA9PD8+Pjw9
-PT48ODg5O0FAQT5CVaTDzdXa3+Hk5ufo6UZBOjw7QTtCQUBAPENCPkI+Pz87Oz47
-Oj4/Pjs+Ojg6Ozs8Pjw+Pjk3Ojk2NjU3ODY2NjY4Ojs6Nzg3NjU4NTo5OTU4NjU5
-Nzc4NjQ0MzQ3NzU1NDYzMzc8ODk2NjQ0NTM0MjM2OzU1NzY1NDU4NDQ0NDY6NjUz
-MzU2MjMxMDIzMzQzODk3NTQyMzQxNDY5NjU0NTk2NzY3NTU3NzQ0NzM2NDQzMzg2
-NDo5NTY1NzQzNjMwMzQ2NzU1NDQzNDQ0MjI2NjIzMjIzMjEyMDA1NTg0MjU7NDI1
-NTQ1NTU3NDQ2OTo3NTY0NTc1ODQzMTM0MjU0MzU0MzAwMjM2ODU3MzQ1NjUyLzM0
-Njk2ODc2MzU0ODM0NjU0MjU1NTUzMjI0MjU0NjQ2NTQzMzMyMzc1MzMzOTQ3NjQ2
-NzM1PTc0Njc2NDU1NDYyNDIyMTQ0NzU0NjIxNjU2NTMyMjAwMTE0MzQ1MTE0NjY2
-NjY0NjQyMzUzNTI2NzMzNTQ1MzQzNDY2NzU0NTU3MzQ2MzQyMDE0NzUzNDExMTI1
-MzM0NjU1NDIxNTY2NTU2MzMxNTU3NzU3MzQ0NDY0MzY0MjE0NTQ0NTUzNTM3NDQ1
-NDQ0NDY3NzU1MzY0MTMzNDU0NTMxNDQ0MzQ3MzM2NDI3NDM1NzQyNDU0NDI4NDQw
-MzQ1NDU4NjMzNDY3NTM0MzExMjExNDU1NjY4NDUzMzIyMjMzNjc2NjQzMzU1MjMz
-NjQzMTEwNDYxMzQzMzQ1MzM0MjM1OTc2NjMzNTY3NTI1NDc4NzQ3Nzc4Njc5NzY4
-ODY0NTU4O0g1ODMzMzM1NjM4NDU1Njc3Njg0NTc2NjQ1NjM1NjQ2NTI1NzM1Nzkz
-NDQ1NTM1NzY0NjU1NTg4NDEzMjU3NzUzNTU2NDMzMzMzMzQ1NDY3NjU3NjQ0MTM1
-MjEyNTQxNDY0NDc4Nzc2NjQ2NTU3NjIxNDY1NTU1NzQ0Nzg2Njg1MzQ0NDY2NDY0
-NDY0NjQ1MjM6NjUyNDAzMjQ0MzgzNDI0NTQzMzU1NTUxMjI2NjYzMjMxNTMzNDU3
-NDA0ODYzNTIzNDY0NTY1NTQ0NDM1Njc2NTMyNDY1NDc3NTc2NjY7ODU1MzM0MzMz
-NTM0NTU1NTY2OzU2NTU2Ojk4Pzs3NzUyODk1NzY3NDc4NzY0NTU4OTk5Njg4NTU1
-NjQyMjUzMTI1ODg1ODo1Mzc5Njk1NjU3NTU1NzQ2NDc1ODw3ODg2ODQ0Njk5Njc4
-NTc3Nzg3NTc1Nzk3ODk4Ojs4Nzk2ODo5ODk5Nzg5OTk6PTc4PDs8ODk9Ozg3Ozw7
-PDo6Nzo5Ojo7Ojg2NTg4PDg5PDk4Nzc2ODc3OTg4ODc4Ojc4Njg6OTc3Nzg5ODQ1
-Ojk3Nzk1OTc3NzU1ODc7Ozk7OTk7NzY8OTg7Ozg4OTg7PTw2Nzk4OTY3NzU5Nzc5
-ODY2NTc3PDs6PTo7Oz49PT48Ozs8QD88PDs5PDw4Ozk5Oj04Nzo8QDxBQD8/Pj08
-Pjw6PT0+QT87Ozo5OTlBPkBDPj07QkRCOztAPEI+Pj49REVVqcPM1Nrg4+Tm5+rq
-Qzw+PT0/PkVCQ0NBQkA8QT48PT0+PTw7PTo8Pjc3Ojs8Oj09Ozk8Ozk6OTc0NTc4
-ODY1NDk4OTg8OTk5Nzg4PDk2NzU7ODY3NjU3NzY2NzM1NjQzNTY2NDI2NDY2NTQz
-MDIzNDc3NTY5MzM2NTc3OTU0NDM1MzI0MjQ1NzcyMTIwMTI1ODY2MjE1NzI2NTY0
-NjY2NTM2Njc3NTY2MzQ0NTE1NTU0NTQ2RTw7NjU2MjYzMDc0MzI0NDkyMzUyMjQz
-MDExMjIzMzMyMzQ1MzEwNjcyNTc3NDIzNTM1NTMzMTU4NTY2MzY1NDg3NjMxMTM2
-NjAzMzQzNTYzMTQ0NzY2MzY2NTU1NTc4Ojk2OjY3NDI0NTMzNDM1NTc1MzU2MjQ1
-ODc1NTQ2NjQ2NTc2MzY1NjQzMzU0NDIzMzQ1OTg3Nzc4OzQ1NTc4NjUyMjQ4NTQz
-NTIxNTU1NTExMjMzNTs3NDIvMzMxNTc1NTYzMzM0NTgzMzc1PTs5OjI0NTk3ODc2
-NjY1NTUyMzQ3MDE0NDc3NjUzNjIzNDM0MTQ2NDM0NzQ1NDM1NzUyNDIzMjIzMzEz
-NzU2NTU1MjEyNTY2NDY1NjM0NDU2NjMzNDIzNTQ2NjQ2NDY1MjMzNDUyNDMyMzM0
-ODU1NDMzNDE0MzY3MzU1NTI0MjEyMTEzNTAvMzM1MzM0MjM1NDIyNTgzMTQ0NTY2
-NzY3NTM0MjUxNTU1Njg2NzMxMzY4NTY1NTUyMjIzNDQyNDI4NjY5NTI1MzQ0NTMz
-NjY4ODY2NzIxNTM0NTY1MTE2MTM6OTk4NTQzMzQ2NjQ1MTI2NTQyNDUxMjY0NzU3
-Nzc4Nzc2NDIyNjU4ODc4NTIxMjQzNDM2MjI0NTU3ODc2ODY3Njg1NDI1MjU0NDY2
-NTMyMzU4OTc1NTU1MzY3ODQyNDQzMzU1NDU0NDk0MzU3OTo2NTY4Nzc1NDg0NTU1
-NDM1NjUyMTIzNDU2NzIzMzQ1NTIzMzMwMTU0MzIzMzM0NTEzNjY0MzQ2MjQ0NTY3
-ODUzNDUzMTQzNTUzMjQxMjMyNzc2NTQ3NzUzMzMyMjM0MjQ3Njc0NDUzNjg1NjY4
-ODc1PDQzNDQ4NTU2Mz44ODU2Njc0NTQ1NTMzNDU3NTQ1NDQ1MjQ0Njk7OjQ2ODY2
-ODg6ODk4Nzg3NzU3Nzc2NDM1Nzk1Njc2MjQ1MjM0NDM0NzY0MTI1NTg3Nzg4OTY4
-NDM3NTY3NTY2Pjo3NjU1NTgzOzg5OTo3Ojc2NDYyNDc5Nzc4OTg4ODc3NjY3NTQ3
-ODk2Njk4Ojk3OTs5Ojg3OTY3NjY6Ojg3NzU3Nzc4ODc5ODc4ODdBOjk5ODg6Ojg0
-ODk5NzY1OTk6Ojk6Ojc4Ojo3NTg5ODc3Ojc3ODg2Njg5Ozk3Ojc5OTo5Ozo6Ojs7
-Nzo5Ojk/Pzw3Ojo3Njk5OjY6ODg5ODY0MzQ2Nzc5OTw8OTg6Ojk7OTo6OTw7OTk+
-Ozk8PD08Pz08ODw8PDtCQUBAQD88PT4/QTxAREE8Pjw/Pjg5ODk7Oj49Ojs9QUFA
-PUBCPUBCPTlAQlOlws3V297h5Obo6OhFQzw+QTs9QUdGSkNCQEI8Ojo7QD07Ozk+
-QEFAPUI5Ozg6PT48Nzg4Njc4Oz07Nzg4ODQ1NjgzNzc1NTk2NDg6Ozc5NTQ3Ojo3
-Njg2MzQ2NjU2NTU2NjUzMzYyNDQ0MjY5NDY3NDY4OjUyMzQ1NzU3NjY1NjU0MS8y
-MzQzMTEzNDU4MzQ0NzUzMzQzNC4yNDQ1MzI1NTU1NTU0NDY0NTY3NDU1NTY2NTM2
-NzY3NTQ0NTExMjMzMzM0Mzc1NDQyMTAzMzUzNDY1NTYyMjM1MS8wNTQyNDczMjUz
-NDQ1NjQ1NTc3Njc1NTM1NDY4NzQ2NDc5NDU3NjQ2MzMzNjU0Njc2NTMzMzc3Njc3
-OTk1NjQzMzI2NjY1NTY1NjIyMzU3ODc1NTI0MTQ1MzY4Njc3MzU2NDM0NDY1MzIy
-NDc0NTU1ODc2MzU6NDQ1ODc1NDMzNDY0NDAzMjQ4MzQyMzQ3NDMzNDQ1MzIyODY0
-NDEzMzgxMjQzNzU1NDMyNDUyMjMyNDY1MTQ1NDQzMzYzMTEyPTMwMzQzNTM0NjI0
-NDIxNTY1NzQzMTA1NDIyMDMzMzExMzIyMzM0NTMyMTY0OTk2NDU0NTc2NzY1NTQ3
-Njc2NDM2NTQ1NDU0NDUzNTUzLSwvMzQ2NTY2NTU1NTU1MzQ1NTQ0NjQ1MS8zNDMy
-MzE0NTM1NDU0MTI0MzY6NzYzMzIzNDMzNDQzMzg5Njg2NDM0OTUzNDQ3NTM1NjY2
-NDg1NjMzMzM2MjM1NjQ2NzIyMzIyMzo2Njc1Njo5NjY3NTg4NjU4ODQyOjY0MzUz
-NDYwMTMzMzEzNjY2NTQ1NjY2Mzc0NTU1Nzc3ODY1NTQyMzc0Nzc4ODM0NTY1NTQz
-NTg4ODU0Mzk0NzU1NTY0NjY5MzY1NzQzMzQ1NTU3NjY3NDg2NzU3NTQ2NzUzMzY2
-NjM2Njc4NzIyNDMyMjM0NTM0NDEwNTg0MzY1NjQ2NDMxNDQwMDAyNDIyMzAzNTQy
-MjU0MDM0MzQ0NTQzMDQxMzI1NjY3NTU0NTMyNDM3NTQyNjQ0NDUxMzAxMzU0MjAz
-NDMzNzQzMDQzMzM1MzQzMzU5NjU2Nzo5NzM2NjY4NTY0Mjg4NjU1NjY4OTk5Ojk4
-Ojg4OzY1NDQ0MzQ2NzU0ODY0Njk1NjU1Nzc8Ozk3NjY0Nj03Ojs3Nzc3NDMzMzY1
-NTQ0NjU2NDc0Njg3Nzo7OjczNDY2ODc3NDU2ODc1NjY2NDQ0NDU2Njc0NjY5OTc4
-ODQ3Ojg6NjM3NzU3Nzg4ODY2OTo3NTc2NTc4ODc3OTo6PD08OTk2PTk4NzY4ODs0
-ODo4NTc5ODg1Nzc4Nzc4OTk4Nzc8Ozw6OTs3Njk6Ojc2Nzg7PDk2Nzk1Nzg3Ojc3
-Pjk2NTc4Ojg4NzU4ODg6Ozk3Nzk5ODs5Nzc5Ojo4Njk3ODk4ODs9PTs8OTc6OjU5
-Njg4OTg3Njg5Ojg4ODtAOz4+Oz03OTs7Pjo3PDo+PDo3PDo7Oj05Ozw+Pz07Pzw7
-Ojo7Ozw8PTo3ODo3ODg7PDk5Pj89PkVBQUE/P0A+Qj4+V6XDzNTZ3uLk5+fo6kZG
-PUBGPkJFRkU9Qj49QTk4Nz09Pjw2PDo6P0JDQjw5ODo7Ozg3ODg7Njw9PDs7Njk2
-ODk4NTY0Njk3Njo3Nzw4ODk6NzU4NTU2NTQ1NDU2ODc3NDcyNDQyMTU0NjQzNzY2
-NjY2ODc2NjQzMjQ0MzU2NTQyMjAyLjEzMDE4NzMyNDY0MTEyNDQzNjY3NDY3NDEx
-MTMyNjk2NjQ2NTQ1NjQ3NTQzNDMzMjQ0MjMxMDI0NDU0NDU0NDIyNDI2NDEzMzMy
-ODU0MzM2NDMyMzUzMTEzMjEyMzE0MjU2NjczMzMzNjc3NzUzNjU1NTc2NDI0NjY3
-NTk4NDUxMjQzMzI3NzczNjM1NTM3NDQ1NDU1NDQyNDI1Nzk4NjQ1NjU0ODMyNDU1
-NjEzMjQ1NjY2NDQyMjM0MzQ0NDI0NDI0MzMzNDc2NjQzMjM0MjQzNDU1NTQzODg2
-NDU4Njg2NDQzPTc2NDQ0Mjc1NjY0MTQ0NTM0Njk2NzU1NjU0NTM1MzM3MzU1Mzc6
-ODc2NzU0NTU1MzAyMzIxMDI0NTUyMzQxMzY2ODc2NDQ1NTQyMzQ0MjI0NjMxMzMx
-NDY1NDM1Mjc2NDU2MzM0NDU2NTQ1NTIxMjQ3MzI0NzY3NTUzMzMzNzIyMDMyNDYz
-MjM1MzI1OjU1NDUzNDUzNTUzMTA3NzYzMzQxMjExMTIyMTExMTQzNjc5NjI0NTUy
-Njc1NjgzNDUzMzIyMzMxMjU1NTQ3MjY0NDY2NDEzNTM0NDU1MjEwMC8xMzE1NTU0
-NDhNRTQ2NjU2ODg6OjQzNDo4Nzs3NDo4NjIyMTQ3OTk3NjQ0Nzc4ODg2NDQ0NTEy
-MjU0NjU3NTUzNDI0NTUyMjM1MTExMDQ3OjY0OjU2NzcxMTM0NDcyNTE0NjUzNDIz
-NDM0NDU2MjI0NTg0NTc0NTI0MzQyNDU3NjQ2NjU2NTQyMTA0NDMzNjQyMzUzNjUw
-NzE0NTUxMTAwMjM2NTIzNDQ2ODg1NTUzMTI4NjMzNzY0PUUyNDMxMTU4OTY3NDM0
-MzM0NDc0Mzg1NDU2Nzk5ODY2NDUyNTg0NTU3ODY1LzE0NjQzNTQ0NDY0NTU5NjY2
-N0E7NjU0MjQ1NTQxNDQyMzY2ODs6ODU4Nzg4ODc1NjAzNTMzNDUyMzQ0NTY1NjU3
-Njg3NzY2Njg3Njk4NjQ2NjY1Nzc1Njo3Ojk2NTU1Nzo7Njc5ODk5NzI0NDY3Nzg4
-Nzk4ODs5NzczODc2NzQzNjU1NTc2NTU2NDU1NzQ1ODY2NjU0NTU2NjU4Ojk1NDU2
-NzY2OTs7OTY3Ozs5Ozk4Nzo2Nzc5QDo4Oj09PDo6NTQ2NzY5NjY5OTg4Nzo7Ojo3
-Njw3Nzs6Ojk5OTg6Ojo4Nzk4NTg5OTg4NzYyNjo8Ozc5Ozs3NTk4ODg6OzY3Nzk5
-ODw8PDg6ODg5Ojw+QD89Ozs0NTk5Njg5Nzg2Njo5Njc4ODs3PDs9PDxBPD08Ojo4
-OTo8Ojw5ODs3Ozw+PT07PD07Ozo7Oz47Ojw5PDs9Ozo5Ojo3Ozs8PDtAPT09QTw/
-Q0U8Pj5DQUNTpcLM1dvf4uPn5+noQkZCQkZBSUhDQ0VDPjo6Ojg6PD48Pzs8ODo6
-Oz0+PDs6PTw5NDY2Nzg5OTk5PEA8ODQ2ODc1NjQ0NzM1NTk6ODg4NTc5OTo5OTcy
-MDU0NDQ2NTY3NDc2NDc5Nzc1NTc1Oj04OTc0Njc3NjUzMzM0Nzg1NjY2MzUzLzA1
-NTQ3NzQ2NjQzNDI2NjM0Ojo5NTc0MzM1NTQ0MzM3OjMzNDUwMDY2ODUyMzQ0MjMy
-NjQ0ODY0NDM0NDY3ODAyNjM1NDIzMjIzNTIzNDUzMjIwNDQ0NTU2NTQ1MzQ0NTUz
-NDU2NjQ0NTY0MzQ3MzUyMzY3NTEzNzo6OTk2OTYyNTcyNTE2NzcyNDIwMzU1MzIy
-NDU0MzQ3MjI2NDY4NzQzMjI1MjAzMjM0NDI4MjQ2NTUzNzc2NTIzMjUyNjQ0MTQ0
-OjQ3OTY1NDM2NTU3Njk1NDUzMzMxMzAxMzkyNTUyMzU1NDg5NzU1MzQzNDI1NDQ3
-NDkzMzY2NjQ0NTQ0NjY4NjMxNDQ1NzY1Njc2Njc0NTU1NDM1OjI1MzIxNDUyNDY1
-NDUzNDI1NDQyNTIzNDQ2NjMxMzQyMTYzNjQyOjc5MzU2NTYyMjUzMzQ1NjUzMTE0
-NDIzNTM2MzM0NTY2Nzc0NDg6NTQzNDY2NTQzNDU3NTY3NTU0NDQ2PDc0NzQ2NTQz
-LzEyMjIvMTEzMjEyMTIzNjU1NDM1NDkzMjU4MzU5NzMzMzUzNDQ2NDI0NTQ1MzU1
-OTg2OTgzNjM0NzQ0Ly0xMjAvMzQyNDY3Nzk7Nzc1Mzg2Njk4NzM1Mzc3Nzc1NDQ0
-MzEwMDM1ODg1Nzk4NDQ4NDc0NjIzODU3NjU1NjQ0NTQ0NDY1NTc1QDM0NDU1NTUz
-MzY0MzM1NTM1NzU4ODc4MzQzNDE2NjY2NjQ1NDM0Nzk2NjY4NjQzNDM1NTc5Njg1
-NjQ2Njc4NDc2NjM1NjIzNzQ1MzEvMzQzNDE1NTY3MDI0NjY3NzQ1NTUzNDY1MzMy
-MjQ2MzQxMzQ7ODczNTg6NDQ0NDQ0NjU2MzU1Mjg5NTg2MzQ3NTY1NTU1Njg4ODk3
-ODc1Njo3MjI1MzI1NTU1MzM0MTkyMjMzN0Y9NDM1NDo4NjQ0NDg5NTU1NjY1NDQ3
-ODk3MzQ0MzUzNTU1QjUzMzY2Ozk0NzQyNDY3Njc2ODY1Nzg3ODs8ODg0Njg3Nzg6
-Ojg3NzY0Mzg7ODY1Nzc3OTQzNTU5Ozk3OTg6OTc2NjU2NjU2Njk2NjY0Nzc3NzU3
-NzY3ODU1NjY1NDU1ODs5Ojs8Oz06Nzc2NTY3ODg5NzU5Pzo7OTo3Nzc3Nzo7Ojk6
-Ozo6OTc2PDk8OTc2ODg5NzU4OTo4Nzo2NzY2ODc3Njc4ODk6ODg5Ozs7OTo4ODU3
-Nzc3ODY4Nzc2OTk5Ozg5Ojo8Ozo3ODo8PTk4ODg8PDw6PT8+Ojs3NjY5Ojo3Ojo4
-OT05ODk8Ozg5Njk5Ozo6QT46PT49Ozw9PT08Pjg3Ojk5NzY5ODs7PDw/PTc6Pj48
-Oj09PD09PTw+PUA+Ozw6Oz09QD5BP0M+Qz87PERJRFGjwc3U2d7h5Obn6elBQklE
-QURBSEVFREI+PTw6Ozs6Ojw8Pz09Oj46Oj07PD4/ODU3Nzo6Ojk3Nzg8PTw7Ozw4
-OTs+Nzc1Nzc3ODc1NDM2Njk3NTk3NDQwMzU2OTU0ODQ1MjQ2NDc1NDQ0NTY1NzY2
-NTMzMTM1OjY0NDU3OTU0NDU0MjQ2MjQ1MzY1NjU1NjU4NDU1NDY0NjU0MzQzNjU1
-Mzc2NjYyNDUzMzM0MjM0NjUyMzM2NTY0NTU2NDM0Njc0MjM0NDY0NTQ2MzE1MzIx
-MjEzMzQzMTQ1NTEyMDIzMjQzMzU0MzQ1MzUwNDQ0NTU1ODc4Mi8xMjYzNDU1Nzc1
-NjY2NjY2Mzc2NTM1NzMxNTUzMzY1NDQ2NDU3OTc1MzQ2MzQzMzQzMjM0NjMzMjEz
-NTU1MzE0MjQzNzk7NjU1NDU2NTQzMTE1ODZCNzMzMjM1NjY1MjIzMzMwMjM0MzMx
-NjQyNDU1NDI0NDY3NTUzNTQzMjY1NDM3NTQzOzU2MzM1MzQ2ODY0Njk2MjMzMzM0
-OTczMzEzNDU1NTMzMzU0MjIyMTQzNjczMTQ2MzU3MzUyNDMzMDIxMzI0Nzc3NDMz
-MzM0NTU1NDE1NDU4NzIyMjY3NzQ1MjIzMTIyMjY4NDMzODUzMzYzNjY4OTMxNDU0
-NjM1NDI0NTIxMTU0MDI0MTI3PDozNDUyNDAxMzIyNTExNTUzMzIzNTc3NDY1NTQ1
-NjY5NzIxNTU4NjI0NjM0MTQyMzczMzQ1Njo1Mjc4ODk1NTUyNTU2MjIyMzYzMTM1
-NjU2NTM0MzU0MjI1Njs7Nzg3ODk3NTU3NDM1Nzc3NjY2MzY4ODgzNDk3ODU2NTIy
-NDQ0NDU2NS8wMzM1MzEzMjQ3NjUzMTAyNDQ2NDIzMTo4NTUzNzY1NDQ0NDc2NjYz
-ODY3NTQ2NjQ0NjM1NjUzNDU3ODU1NjUyNDQ0NjU1NTg3Nzc2MjAsLTEyMTMxMzU1
-Nzc1Njc0MzIzNTc3NjU1MzAzNjM0NTQ1MjI3NDQzNTI0NTc1NzU2NDQzNDM0NjY1
-NDQ4NTQ0NDM6NzY2NzY3NzU1Njg1ODM1NTU1NzczMDQyMjIzMTM1MzM4NDIyNTk4
-NTc0Nzc4ODg2MzU1NTQ1NTY1MjQ6ODY0NjU2OTY5NjU5NzU5MzM3NTY5NjY2MjQ2
-Njc3NzM2NTQ0OTs8NzY2ODg4OTk3Nzg3ODs4NTg0NTU3NjU4Njk6OTg0MzY3Nzc2
-Njg3NzQ1NDU7OTg5ODc0NTk1NDY0NTg3Njc1NDU2Nzg2Njc2ODk4ODc2Nzc5Nzc4
-ODo2Nzk6ODg6PTs4NTU2OTo8PT07OTk2Njo8Nzk6ODk5ODk1Nzc4Ozs5NTw5Njk4
-NTU2OTk3OTo6OTk3Ojk4Ojg3NzcyNjc3Ozk1NDM0Nzk2ODg3Ozo5Ojc1NzU0ODs4
-OTk4ODs8OTw7Ozo5ODo9ODk3Ojk2NTc5Ozs9QTw6OTk3Oj04Ozw7PTs6PDs6PUA+
-PDo6OTg4ODk7Nzk5Ojs6PDs5ODg7PUFAPUE/PEE/Qj8+PTw+PD05PEBARD0+PD8+
-QUFBP0VCUaTCzdPZ3uHk5ufp6UNFRENHQj5CR0ZCQkRAPz5BQT08PDs6Oj09Nzo6
-ODk3ODU6OTk+PDs4OTo4OT09ODk7Ojs6PEA6NDU4NjU3NjU2NDc4OTgwMzc1MjM1
-MzQ2ODY2MzY3NTU3NTI0NDU3MzM0NDQ9ODU1NDI0MzU2NjU2ODg3NzQ1MTM2NjY0
-NDY6MzU5NzY2NDU2Pj8yNDEzNjQxMTY2NjU3ODUyMzI1NjY1MTQ0NjU0NDM0Njg3
-Ozc0NDc0MzEwMzY2NTQ2NTI1Nzg0NDMzNTY3NzIxMjM0MDI0MjIxMTMvMDEzMzM0
-NTQ2MzU1NTQ4Nzc1MzIyMzM0NTQ0NDc3Nzg5Ozk3Ojk2ODU1NjQ1NjcxNTM1Nzgz
-MzY2MzY0MzQ0NjY0Njc0NjU0NjU1NTUvMTkxMy8xMzI0NTQ1NjY2NDU0NzQ1OjY3
-NTU0MzM1NDQzMTQ1NzY7NTQ0NDg0NzIyNDUyOzQ4Mjo0MzM0MjMzNTUzNDMzNjY1
-Njc0NDY5NjU1NDM2Njc1MzU4LzA0NTVDODUxNDI2ODc1MTQ3NjM0NDU2NjUyNjQ2
-NTM0MjIxNTEzMTA1MjExMjQ0NTQzMTQ1NDQyMTEyMzQ2NDk3NTU2MzQ0NDkzMjAx
-MDI1Njc3ODUxMDU0Njs6ODQ0NjMwNTY0NjMzMjUyMjU0MzM2NkQ6Mjc6NTM0Njc1
-MDA2QDQxMjY2ODc2NzU0NDQ2NjI1Nzg3NTE1NDU1NDQzMzMxNjQyMDIxNDI2NTQ0
-NTU0MzY3NTY0MzI0NTMyMzEzNDQwMzIzNTM0NTc1NzY2NTY4OTc5Njc4NTY4NjU6
-Nzg2ODk3NTY2NDc4MzE2Njg3OTc2MjIyMjMxMzEyMzQyNTQzMzUyNjQyNDQ1MzMy
-MTU0NDc5NzU2NDQ1NTU1NDY2Nzk4OTYzMzU4OTY4NjY1Nzg3OTo4NjUzMDI1NjU3
-NzU1MjQ1ODY0NDY1OjY0MDQ1Njc1NTU2NTc0NjU0MzM1MzY4NTM0NzUzNjU1ODk3
-MDAzMzQ1ODM0NDM0NTU0MzMzMzU3NTM2NTU0NDU0NjU2MTM2NjU1NDg6NDU3ODYz
-MzQ0NzUzNDQyMzQ2NDU2NTE1NzI0NDU3NTAwNDY2NjQ5ODk2MzQ3MzQ5NTc6MjU4
-NTM3Nzk5NjY2NjU1NTU0NTU2NzQ2NTY1NTQ2OTk2NjY1Njg8OjQ1NjU3OTY2Nzk0
-ODc4NTY1NjU5Ozc3OTg3ODc4NjY3NDQ0NTY2ODU1Nzc2OTo6ODY1NjU1MzQ1NDU4
-ODY2NjQ4Njc1NTQ5Ojg2Nzo5OTc1OTk3PDc3OT09Ojs8ODU2PD08Ozg6ODc5ODk5
-Ojk5NjQ3Nzk4NTQ2Njc6QDc4OTk4OTk3NzM3OTg3PDk5OTg4ODk5ODw8ODc2MjU4
-NjQ2NTU2NTg4NTM1ODc4OTY5OTk5OTg3Oz88PDs6OTk5ODk4Njg3ODY6NjQ3ODw6
-Ozo7Ojo5OTk7OTc4PTs8Ozo5OzY6Ozo5NDk5Pjk5PT47Oz08OzpAPj48ODs+QEA5
-Ozo+PkBEQUFBPTxAPT48QUE9PkJBPD0+QUJDPz1Kn8LM1Nne4eTm5+npQ0NGRkBF
-RkNERUFCRERDRkI/Qz06Ozo2ODs6OTo5Ojg4ODc5OTo4ODs+PT08PTk5OTg4Ojo7
-ODo3NDY5NzQ3NjY5NzY2Njg5Ojc1NDY2NjY1NjY3NTU1NzY2MzQ1NTM0NjU2NTY2
-NjU1MjMzNDUyMzIzNTU4ODc3NDc2NTg1MzM2MjM0MzQ0NDUzMzY2NDIzNTY0MzMy
-NDQ1ODc2MjQ4OjU0NjUxNzY0NjY2NjY3MzM4ODMwMDQzNjMzMzY1NTg4NzY1MjU2
-NTQ3ODgzMTI0MzAxMTQ0NDQzNDQzNDAzMjQ3NDM0NDM1NjY0MzM0MjI2NTY2ODc2
-Ojg1ODc3NzU1NjU3Nzc0NjQ1NjU3Nj85NTo4NTUzNDQ0NTg2NTc5NjY2NzYzNDY0
-MjIwMjMyNTUzNTQ1NzY0MjYzMjQ1ODUzMzMyMTEyNDQ2NTM0NTUzMTQzNjM0NDQ2
-NTU1NDYzMjMyMjMyNjc0NTU0NTA2NjU1MjM0NDg3NzkzNDY2ODg3ODc0MTE0Njg1
-NDM0NDQxMzIyNDQzNDMyMzQ0MzMzMzU5NDM1NDU1MzU3NDQ1MzM1NzU2MzAzNDQ2
-NjIyMTMzMzQ0NTQ3ODU1MDAyMTQyMy81MzM2NTk4NTIzNDMyODU0NTMzNTY2NDU1
-NDQzMDI3NzQ1NTYxNzc2NDQzNDIyMjE0MTM0NTMzMTE1NDMyMjY2MzM2NDg7Ozc2
-NDc2NDQ0MzQyMjQ2NjQ1NTQyMjI0MjI1NTQ3NDU3NzI1MjMyMjY0MzM1MTI0NTU2
-NTU4ODY3NzY1NzY0NDQyNjY0ODk3ODk2NjQ0OTU5Nzk2MzQ1Ojg3NjY1Njk3NTY2
-NDQ1MzYzMTE0NjMyNDQxMjMxNTQzNDM1NTY4OzM1NjU1NDU0NjQ1NTU2PDk0NTI0
-Nzg0NTU0NTg2OTc1NTU2NjUzNTY2ODY0NDU1NTU2NDY4NTM5OzMyNTg1NDU2NTY0
-Mzg3Njg2NzMzMDE0NjU0NTc3Ojg2MzM3NzY3Njc5Njc2ODIyMzI0NDU3MzY6NjU3
-NzQ2NTQ3ODM2NTc1NjY4NjUzNDU3NTU1NTM1NDE0NTc2ODU3NTMyNzk5NjI0NjEx
-NTk0NjQzNzc2Nzk2MzQ1MjU2NTQ3NTY3NjY2NTY1Njc0OTw7Ojc2NjY0MzY5NjU2
-NTU1NDU6Nzc2NjU2NTc2Nzc3NzY2NTM0Njc7ODg3OTY1Njc3Njc5OTY1NzQzNTM1
-NTUzNTQ2ODc5NjQ8OTc3NjQ2Njk9NjM1Njc1NTY0NDU4NjU3Ojg4OTo6Ojs8PDw9
-Ozg3Ojo8PDk7Ozg6PDs5Ojo3ODs2Njg4NjU3ODs6Nzc2ODs5OTw5NzU0Nzs2ODs2
-NTk5OTc4Mzc5Ojo5ODg7Ojk7Ozg1NTY8ODk4OTg4OTg3Njo2NzY3Nzs5Ozk7OTw6
-OTk7PDo1Njk4ODo6OTY1Nzc3NzQ4Ojw8Ojg2Ojo3Nzg3OjU3OTc3ODk4OTg3OTk5
-ODk3Ozw7OT09PkA9Oz1AOzs5QUI+QUQ8Nz5BPkBBQT48Oz4+Ojw8Oj08P0E8QT4/
-Q0I9OEWWwczV29/h5Obo6elBPUBHSUZIRERBQDw9Pj88PUZBQz88Ojc5Ojk9PTw+
-OzpBPTs2OzY3Nzk2Ojk2Nzk4ODc3ODo2NDQ2NjY1MzI4NTg4NTU3NTY5NTc5Nzc3
-Nzs4NjY2NDY3MzM0NjQzNDU0NzM2NjY3NjU2NTEwMTQyMzQ1NTMzMzY2Njg1NDc2
-NjU0MjMzNDM1NDQzMDIzNTM1NTYyLzI0NTM0NjQ0NTU1OTYzNTMyMTMzNDMyMTM1
-NjU1NTY1NDU0NTMzNDU4ODMzNjU1NTUxMTE4OTU0MjQyNDM0NjM0NDY0MzM1NTUy
-MzY0NTIzMzE0NzkzNDc4MzAyNDAyNzk6ODUzODY1OTc1Njg3Nzg3NTMyNjM0Nzc1
-Mzc1NTY1Mzo5NjY1QDYzNDc3NDQ0MzY0NTM2NTMzNjQ3MjcyNDM2NTIzNDMzMzQ3
-OjY1MjE0MzI5ODEyMzUyMDEyNTQ0NjY1NTY3NDAwMjQyMjQ0MjEzNDU1PTIzNDIy
-MTU2Nzo3Nzc1OTU1NDYzNDY0NDY2NDIyMjM2NTM0NDU0NTQyMzYyMjMwOTAvMjM1
-MzEyNzU0NTQ1MjMzMzQ0NTQ0NDMyMzM1OTQ2MzMzMTU0MzY1OTg1NTU2MzQ2NDIz
-LzE0NzY1NjQyNTI0MzIyNDU2NTQzNDU2MjIzLzM2NTQ1NDU2NTMxMjU5NzQzOTUz
-MTIyNDIzNTMzNDc2NDIzMTI5Njc8ODY3Nzs4MzAyNDExMzc0NDY0MS8zMzM0MjMz
-NTIyNjMyMzc1MS8xOTQzNDU1NjQ1NDM0MzU0NTU3NTU0NDY3MzQ1Nzc4ODo1NjU2
-ODY0NTU0NjU0NTk4ODc0MzQ1NTY3NjY1MzM1NDY3NjQxMjIzMzQyMjEwMTUzNTQ1
-MzMyNTU0NDU2NjM1OTY3NTQ5MjUxMzk1Mzg1NDY0MjU1NDM2NDI1ODk2NjM0Nzc2
-NDU2NTg3NjY2NTQ4ODk4NzY2Njg0MzI2NDM4Ozg1NjYxMTU3NTc0NDMzNTQ0MzY1
-NjM1ODg3NTIzNjUzMjIxMjUzMjo4NDU0NTU0NDQ1NzM1MzU1MzQ3NjQwMDUzMzE0
-NzU1NTEyNTQ1NDY1MzI2NTQ2Nzc6OTQ4NjY1NDEyMjE2NjU2Nzc4ODQ1NzUzNTc4
-NjU2Nzg3ODY6OTc6NDs6NzUyMzU0NTQ3OjU3NTY4OTY2NjY2Nzc2NkA4NjY2Nzgz
-NDU2ODc4OTo2NjY4ODk5Ojc3Njg4Nzc2Nzg3NDc3OzY2ODk8ODY4Ozk4OTo5OTU0
-Mzg5NDU2NjY5ODc3Njo5Nzc3Njg4Ojc7ODc6Ozw4ODo7Ozw7Ozw4Nzc6NzczMjE0
-ODw6Ojg7NzU4Ozg5ODY1ODc4Njc7OT05Ojg5ODY3ODc3Nzo3ODg4Njc3NTY2Ozk4
-Ojs8Ojg4NzY2OTY2Njc5Ojc5Ozk6OTo7Ozo4ODk6OTk7PTw/OTk3Nzc5OjY5OTo5
-OTw5ODk7Nzo5PDo8PT04Ojg4Ojw7OjY3OTg9Qz4+PDs8Pz49Ojw9QUM9Pjw9Pj09
-PDs7Pj0+PDs9Ozw9Ojs7QT89Pz89PD48PTxCSJ7Cy9Xa3uLk5uXn6UU+PUVDQ0VG
-QUJBPzw8QT0+QENGPkE7OTk7Qj47PEM7Njo8Ozk8Nzk3Njs7ODU3NTg7PTg3ODc0
-NTY2OTs4NjQ0ODk3OTc2NTU3PDs6ODc6NzY2ODQ1Nzc2OTk3NzYyNDU1NDM2NjY2
-NzY3NDMwMTMxMTM2NTM0OTU2NjMyNTk5NjczMjIzMjMyNDczNDU2NDUzMzQzNDUz
-NTU+PDc0NTQ2NjM0MjMyMzMxMjIzNTQ2NDQzNDc0OjU2NTUzNDIzNDQzODIzMzM0
-MzIyNTU3NTY3NjQ0NzU0MzEwNTU2NTQzMjExMDI2NTM3NzcyMTc4MS8zNTQ6NzQ1
-NTIzNDU3OTY3NDc4MzUxMTIyNDU0MzQ2NDY3NTQ0Njs2NTc/NjMzNzc3NjM2NzQ1
-NTQyMjI0MjY4NjI0NDg0NTc9NTM1NTkzNTYzMTQ1NDMzNDUyNDIyMjQ5NTY2NTQ1
-NjQyODM0NDczNDUzNTMxMzM3MTI1MjUzMzQzNTU0MjM2Njc0NDQ1MjI0MjQzMjMy
-NTYzNDAyNDU1MzMzNDQyMTMyMjEzNDU1NTQ0NDc4NjQ0NDIxNDM2NDc2OTMxMjQ1
-NzU1NzU1NDUzNTc2NjU1MzIyMjMzMzQ1NjMxMzcyNDY1NzgzNTMzMjM1NDU0NDM0
-NDMzMzUzNjU0NDY2NjY3OTY2NDMzMzUyMjE0NzMxNTc1NDI1MjIyMTU3NjQ1NTY2
-NTU0MzQ2NjIxMjY0MTI1MzQ0NTkyMzM2NTUxMjMyMzI0MzM0NjczMjQ0MzIzMDMz
-MzU1NDc2NzY3ODg4NTk4NjM2MzM0ODg5NjQ2NjQ1ODY4Nzc3NTYzNDY1MzUzMjI1
-MjM0MzE0NTY2NDQ0MTY1NTY3Njc2ODo2NjQ1MzI0NTQ1NzY0MTI0NTU1NTYzNjY3
-NjY0NDIzMzQ0NzU1NDQ1NDM2ODo3NzQ1NDUzMzY1MjM0NTg0NTA0ODc1NTY2NTQ2
-NTY2NDYzMTM0NDU2NTU2NTM1NDMzNDU0NjQ0NDc2NDU0MzY0NTQ3MjIxMzQ0NTQ1
-NjU1NjQ0NDU0MzMzNTY0NTYyMzQ1NzU2NjU1MzMzNDU3NjMzNTQzNTo3NzY6PDg2
-NzY1NjM0ODQ0NTQ2MzY6Nzc2OzU2NDY1Njo4NTg2Njc5Nzc5RUE2Njk3NDQ5NDU2
-Njc3Njc3NTY3ODg1NDU2NDY1NjY1Njc0NTY1NTk7Ozg0NTI0NTg2Nzg0Njc4Nzc3
-Nzc5NzY3NTY3NzU0NTQ1Nzg4NzU4ODYzNjQ1NzU3ODc4Nzc2Njc5ODk7NjY3OTo6
-ODc5Nzg4Ojs4OTk7Nzc3Nzk3OTY2ODc5OTk3Ozo7OTczNDk3NDU4OTg5OTs5Ozs3
-ODc4NzY4Ozc3NTo2ODg0NTY4Nzk0OTk8PD0+PDg5ODU2OTo4Nzk7Ojk4Njk3Nzc4
-Nzg7ODo4ODk6Pjg8OTk6OztBPzk4Ozo4ODo6Njk6ODk4Pjs6Ojw7Pjo9PT4/QDs8
-PT09QDtFPD88PkA8PT1BREFBQj07PT1BPj09OT08OTk7PDs+PkA+Ozk7PjxAPj49
-PD1SosLM1dnf4OTm5+jpQkNEQUVCQkJCPj0/P0BCQj9FQUA+Pjs2Nzk+Ojs8Pzk6
-Oz1APEA4Nzs7Njo+Ojc0Mzc5PDo3PDU1Njk5NzY2MzQ1NjU3NTU3NDU5NzY5NTY2
-NDY1NjI0NDg3NzY3NzUzNDMzMzY2NDI0NjMzNDU0NTU0NDM1Njg2NjQ0NDUzMjM0
-MzY3NTU1NDM0NTU0NTY0MzQ2NjQzMzQzNjY3NTMzMzU1NjM2Nzg0NDQ1NDQ0NDM2
-NTY6MzQ4NzUzMjQyMjM0MTM2NDM1NTM0NjQzNTQzNTc2NDIzMjQzNTQzMzUyMzMy
-NTM2Nzg5OTc3Nzk4MzU2MTEzOTc2NjQ0MjIzMzM1OTc2NzU3NTI2NjQ1NTgzNDcy
-NTQ1NjQ1NDU2Nzc1MzU1Njc1NjU2NDM0MzU1NDY0MjcxNDQ1NTQ5ODg4Ozk0NjYz
-NDc0MzY1MzIzMzMyNTQxMzI0NDU3NDc0MjIzNzY2Nzk1NTM2MzU0NTQ1MjYzMzM1
-NzM1NDQ0NTMzMzY3NjY1NjUyMzIzNTU6NDMxMjQ1Njc0NDY0NTQ1NTI3MjQ0NTUz
-NDM1ODczNjQ0NTU2NTQ1NjM1NTY0MTQ0MzEzNDQ1NDU1MjM1ODU0NDY3NTU0NDIz
-NTY1Njc1MzU1NTY3MzQ0MzYyMjM1MzAwMjUzNDU3MzU0NTc3NjY5NzQzNDM2MzQz
-NTQzMTMyMzc1Nzc0NTQzNDQyNzQ1MjIyNjk5NjU2NTo1MzY3NjQyMC4zMjIxMTM0
-NjQ4MTUzNzY3MzM0NTY0MS8zMzEzMTQzOTU0NTY6NjY5NDY0Nzs3NDM1NTc4NjU0
-Mzc3Ojo4NTU2NDY0NTMzNDI1NjkzMjIzNTYzNDQzNTc0MTM0NzM0MTQ0NDI2NzIy
-MTU0MjU3NDQzNDM2NDczMzQ3NTk1OTY3NDUyNDU2Mjg0NTQzMzIyNTY2OTU1MzQ3
-NTU0NDMzNDMxMjU0NTc1NTU3NjQ1NDU1ODg2Njc2MzQ2NjY1NjU2NjU0MzQ1MzQ1
-MzQ6ODI0MTI0MzM4MzQ2NTYzMzQ2NzI3NDQ3Nzc4NjMzMjM0NDIzNDYzNDM0NjQ2
-NzY0NTUzMTQ1NTY4ODo2NDg3Nzg4ODg4NzU3NTIyNTU0MjUzNDc7NzU9NjU0NTo3
-NTc4NjY0Nzg6ODtAOjc3NzY2NzM0NTg3NTU3NTY2NjY1NTc2OjU2NjU3NDg5NjY3
-NDU3NDY3NjU0NzM1NTc2NzQ0ODc4NTY3OTk1NjQ2Nzc3NjQ2Nzc3NTQ4Nzk5ODc2
-Njs7NzU2NDU0MjM0NTo3ODo6NjY2NTg6Nzk8OTk5Nzk5ODc1Njg2NDc4ODMzOTw7
-PTw5ODo1NDQzNjo3ODg4Ozs+Ozs4OTw6PDk3ODo6NzMzNTc4OTw3Njg4ODUzOTxA
-PDY5OTo6Nzc1ODk4Nzo6OTg3Nzk2ODk6PD08OzY8ODg6OTg4Ojo5Ozk8Ojc5ODY3
-NDg3Njg5PTw8PDs7QD89Ojo7PTk8PTo9PkE8QT87Ozw6PDo9PTw+Pzo7O0A+Pz0/
-Pjo5Oj1BOzo7Ojw8Pz48Oj4+Pzw9Pj5APE6hwcvU2t7h5Obn6OlAQEFCQEJBSkNA
-Ojk8PENEQ0A/PD47Ojs6Njg4Ozk7Nz06Nzs/PDs6OTw8Ozs7Nzg5Nzw7Ozg5ODg3
-Njg1MzU2NzU0NTU3PDc3NDQzMzQ3OTU2NDY2NjMzODc4NDQ2NjY1NDQ1NjU3NTEy
-NjY3ODQ2NzY3NzY4Nzk4NTIzOjMzNDQ1MjU0NTU0NDMxMjIzOTY1Njc2Njc1MjU3
-ODY1NzQ0NjY0NDUxMjQxMTEyMjEzMzM0NTY2ODY2NTQzNDM1NTMzNTUzNTY1MzEy
-MjEyNDIyMTM2NDUxMjE0MzM0MzUzMzQzNDU2MzY4ODc2NTUzNDY1MzE0NTM0MzU6
-NjMzMzQ5NTY0NDU1NDM0ODc2NDQwMTIzNzg2NDg3NDY3NjIxNDU0NjM3OTM0MjQz
-Njc3NzYzNDY3NDMxNjk5Nzc0MTIzNDM2NzY3NjQzNTQ0MjI0MzI0Mzc1MzU0NTIw
-MzM0MzA1NTg3NjQ1NDM0MzM1NTk6NDg1NDM1Mjg3NDU1NTQ2NDQxMzIxNjMyMDQz
-MzQ1NDU2ODg2NTQxMzQwMzU2Njk2NDU0NjY3NDM1MzM1NTI2NjM1NTQzNzQ0Njc2
-NTI4Njo4NzY0NTMzNDUxMTM1NDYzNTU0NDgzNTQxNDUxNDQ0MzMyMzU0NTEvMzY1
-MzE1NTIyNDYzMzM0NDM1NjI0Njo2NDQyMDMyMS8wNDQ0OT84OjU0MjM0NTIzNjcz
-NDo4ODg2NTc0MTUzNTU1NjEzMjQzNTczMTIzNTY2NjMzNTI0Njc0NDQ1NjIxMjAx
-MjU4Njc3NTQ1Njg4Ojg4NjQ2Njc2MzU3NTg3NTY3NDc2NDQ1NDUzMjAyNDQ1NTMz
-MzY1ODM1NDY2MjU1MjQ0MTAxMzMwNDQ0LzA0NDQyNDc0MzY1NDQ0MjQ1MzY1NDM0
-MDM3NTY1OTYzMzY1NDQ2NDU1NTQyNDgyNTU0NDM0MzIzNDg5Mzc3ODY4NjQ2NjU0
-NjY0NDc5Nzc1NzMzMjMzMTA0NjQyMjMzNDMzMTI1MzQxMzMzMzM3Ojg2NzQ2NjY0
-MzE1NzQ1NjU2NDU1MzQzPDQzNDQ0MjQ0NzU3NjQ1NDM0NTc2NjY4MzI2Njc3ODc4
-PDcyMDM0NjQ0ODc3Nzc2NjU2NDQ2NTc2NDMyMzU2NjQ1Njc4NDc2OTM2ODc1NzU2
-Njo4Nzc5NTI0Nzg3NjQ3Njc3NjY2NjM0Nzc2MzMzNzc4NTQ0NDg+NTU3NTo4ODk4
-OjY2NTU2NjU2ODk3OTc2NTc2OTY5Ozg2NzU3ODM0NDQ1ODY5Ozw6ODs4Njo6NjY2
-ODc2Nzg4Ojg3NzY5NzY4OTc3Nz03OTk3ODo3ODg5ODc1Njc5NTg5OTk4Nzk6OTg3
-Ozk7OTo5NzQ0Nzk5PTs7NTk5ODg4NzY5ODY1Nzo4OTs5ODg6Ojg6Nzs7Ozo8PD05
-PDw8OTg8PDg6ODg4OTs7Ojo3ODc3OTo4ODc3PD48PUQ7Oz1BQD87Ojk9Pj46Nzk9
-PTtBQDg6PTo4QD0+Pzs6PD1CPDo6Pz5DQD1AP0A7OTo+P0A8Ojw+QkNCQEQ/Qz9B
-TJ7BzNTZ3uHl5ujo6kdDQT4/Pj5CRURAQj4+Oz8/PUA+Ozw6OTg2Ojo9PEA+Oj49
-PTo6OUE5Oj46PTQ3NDU1NTs6OTg2MzU1Ojk3Nzg2NDU1MjQ2NTU0NTc6NDI0Njs2
-NDg4NzY0MzU1NjM0NDMzNDU1NDI1NTU2NjQ2OTo3NDY3Njo0NjM0MDEzMjEwLjAy
-NTQ4MTEzMC8vMTU0NTY2NjUyND03ODg3ODUyNjc0NDU0MzY2NTAyNTMzMTIxNDM1
-NTU0MzQzNTM1NTQ2OTY2NzYzNDE6MzIzMzI0NDQ1Njc4NzU0NDMzNjMzODMzNDQ2
-NTIyNTU0NTY2NjY2NDQ0NTQ1NDY3OTgzMzQzNTg3OTc1NTY2NzY1OTU3NDQyMzEz
-NTY1NDY2NTY1NTQxMzMzNTc0NjU1NTUzNTU1NjU3NjY1NTQ1NzQ3Njc5NzQ2MzI0
-NDI0NTQzNDY1ODY2MzIwNzU1MzUyMzU0NDUzNjozNTY0NDQ1NDMyMzQ1OTs1NjUz
-MzM2NjMzNDAxMzQyMTYzMDMyNzIwMzExMzIxMTMzNDQ0NTMzNDQ1NTUyNDczMjQ1
-NTQzMzMxMDU1MzIyNTE0NDMzNzk3OTc3ODY5Njg2NTY1ODU1NjMzMTY3MzMyMzQy
-MzQyMjU2NDIzMzU3NTU1NDU5NzY4NTU1MTU1NjQ2NzYyMzQ2ODYyNjQ1NTU3NTM3
-ODAvMzYyMzQyMzU0MDI5NDEzMzMxNTc4Njc4OTczMjI0MjM2NDM1MzQ2NDQ0MjIy
-MzE0MDQ2NjcwMjQyNDQ1NDM4MzMyMzI1MTMyMTIzNDQ0NzQzNDY2Nzg4NjU2NTI0
-NjQyMzU7NTc2ODU0MzQ1MzM0MzIzNjU0Njc6MjEzNjM0MTI0NDY2NTMyLzIzMjQz
-NTAvMTIyNzU2NTUxNTY1NzUxNjk3NTY1NDQzNDc1NTQzNDI2NzQyNDM3NjQzMTQ0
-NjUyNDQxNDMzNDQ0NTQ0NTYzNDU5NDQ1NjY3NTU1Njc2ODMwMDIzMjU1NTQzOTY0
-NTAzMzQ3MjQ2NTQ0OTQ1Nzc1MzMyNDM0ODQ1MjM0NTQ0MjY4NTY2OTc1NTU0NTI1
-ODYzNjQ1NDQzNDQ2Njc4OTk3ODg2Nzc5ODY2NTQyNTY3ODQ0NDc4OTY4NTM2NjIz
-NDM0NTQzMzQ1NDU2Njc3NzY2NjgzMjM4Nzc4ODQ1Nzw4NjM1NTY1Njc3NjU1NjY3
-NTY3NzM4NTY2OTs4NTY2MjQ1NTc4Nzc2NzY0NTQzNjU6NzY5Nzk5NDU0NDQ4ODc2
-Njc0NDE1NTY2Njg4OTk7Ozw5OTY6Nzc6OTg4Nzs5ODk4NzY3Ozs7Ojo+PT46ODo5
-ODg5ODc4ODc5PDk2PDs6Ozw8OTs8Njc0ODc5NjU4NzM2OTs6Nzg3Njg2Njk5OTk4
-ODY3Njk4OTg6PTw7Ojs7ODc6OTk6OTs9Ojk6Ozw7ODg3OTU4Ojo7Njg3ODs5OTc4
-Ojk8Ozo6Ojk7ODo7P0A+PDw9OzY4Ojw9Qzs+QkA8PTw9Pj49OTg6PEA+Ozg6Ozk+
-PT0+PEFEPTs7PD88PD07PUBBRUNBQD1QnsHL1dre4uTm5+rqP0ZEQj89Pj5CREVE
-QkJBPj5GQkVDPjw5P0BBPz1CPTs8Ozg6PT04Ozk5Ojs6Pzk7ODc5Ozs7Njk3ODk3
-ODY0NTM1NDQ2NjQ2NjQ3NTc2NjQ1Njg0NTo1NTU1MzU3NzQ1NTQyMDMzNDQ1NjQ1
-ODg4NjYzNzU0NjQ0NTMwNTI4NTUzMzA1NDQ2MjAwMzIzNDQzNjg4OjY1NDM0NjQ3
-ODM1NTU2ODU3NjYzMzI2ODUyMjQ0MjAzMzM0MjM2MDU0NDQ4NjU0Nzg5NjQ1NDQ0
-MDI1NjIvMTEyMzQyMjM1NTM1MzMzNDMzNzQxMzc0NjY2MTQ1NDQzNTU1NDQ1Njg6
-OTUzNTg3NzY1OTY3Nzc1NzY1NTU2NTg2NTM3NjMyMzIzMzUzODU0ODY2Njc1NDUy
-NDQ0ODUyNjczNDM1Njc1Njo5NjU2NTQ0MjIyNDY0ODM0MzIwMTIyNDM1MjQ2NT05
-NDU0NTQyMjA1NDQyMzM0MjM1NjI0NDM0MjI0MzIzMjAzMjQ0MzMyMjM8MjU0NTMz
-MDQxMTc3NjU2NDQzNTUzNDU2MzI0NTM1NjQzMjI1MzU2MzQ3Nzg4ODY0NTU2ODM5
-MjY2Njc2NzQzNTU0NTYyMDI0NjU2NzY3NjQ5NTMzNjY4OzU0NDMzNTU1NDQ1NTM2
-ODhCNzY3MzMzMjAwMzQzMzExMTMzNDY0NDQyNDMyMTEwNDg2Mzg0NjMzMzM2MjM1
-MjU2MzI2NDQyNTU1Nzc1NDQ1NjYyMzIxNTI3NzQ0MzIyMzc3Nzc0MjQ3NDg3MzIz
-MzMzNDQzMjM1NTc2Nzc0MzI0NDY2ODU4NDY2MzMzMzQzMjAzNTIyNDY1NTQxNjQy
-NDIyNTU1NjU4NjIxMjM1MzQzNDQzNDQ1MzUxMjQ4NTQzNDMzNTY2Mjc4NzIzNTk0
-OjY0MzQ1NTMyMzM0Njs0MTM2OTMwNDU1NDUyMTMyNDY2Nzc2NDU1NjQ3NjQ2NDU1
-NjU2OTc2NjM4ODQyMjc2ODQ0NTU0NTc2NjM5NTQ6MzI1NTQzNTUzNDU2NTU1NTg6
-Ojc4NTc0NDM2NTY0NTc1NTc2ODU2NzUzNzg0NDY2NTc1NTU2ODY0NjY2Njc1NTMz
-NDc3Nzc2ODk1NjQ2Nzc2Nzc2NTI0NDQ1NjQ1NTQ2NTMzNDQ2NDU0NzU1NDU5NTc4
-ODc2Njg5Ojo5NDU2NTc4ODc2ODg3Nzc5Ojk2NTg6Nzk5NzY2MzY3NTMxNDYzNTMx
-MjM2Pzg0ODY1NDMzNjY2NDQ0NDc4ODM1ODoyNDY2OTg5OTs5Nzs9PDs3OTk5Ojk4
-Ojk4Oj06Ozc3Njg5PDo5Ozc2OTY1NDg4ODc3Ojc5Ojg3Njw4OTs7PTo6ODk4ODk5
-Nzg2NTlBOjk4OzgzMzg1NTc2Nzg7Ojk7Ojg4OTk2Nzk5Ojk4OTY6ODc6OTk4Ojw8
-OTc3OjU2NzU2NTY6OTo1NDo4PDo2ODk7ODg5Ojk2Ozk5PDw9Qzs7PEI9Ozk6Oz1B
-PT06Oz06P0A+Ozs7Ozo5Ozw5PDs6Ojs+Ozg4Oj1AQTs9PT49PD47PkA9P0NGRVSf
-wMzV2t/i5efo6OpFRklMRURGRERIRkNAPz5BQEI+Pj5ERDw8QUA9PTo4Oj49OTxC
-Ojo3Ojg4Ojo6Oz44NjY2NDc2ODc3Njk6Nzk2MzIxMTM1NTY2NTg1MTU2Mzc5NTU1
-NTs4NjM0MTY1MzQ1NDIzMzY4OjU1NTM0ODU3Nzg0MTAxNjg6ODg5Nzc4NTYyMzAz
-MzQ4ODU3NjYzMzM0NTM2MzM2NzQ0NDYzNTU1MjY2NjUwMzI1MzM0NDUyMjMyNDY1
-NDAzMTQ8NDU5OTUwMzY2ODAzMjI4NTY2ODU1NDMzMzQ1NDUyMzUzNDY2NDQ2MjYz
-NDQxMzQzMjM2NDMzMTEzNTY0NTg5Ojk2NjM1NjU3ODg3ODozNjM1NDQ3NTQ1NTY0
-NTMyNDMzNDY3NTQwNTU1NDUxNDY1NTU0Nzo2NTQ4NjYzMzU1ODg8ODU3Nzc2MTA5
-Mi8zNDUyMTQ0MTIxMzIyMjQzMzQzNDU3MjQ1NDY1NzQ1NDQxMDQ0OTQyMzQ0NjUz
-MzQ5NjU2NTI1NTUyMjU1NjY5ODg1NDI3MjM0NzY2NDg5NTc3NTU0MTUyMDIyMzIz
-NDYwMzc4NjQyNTUvMDU4NTY0MzI2NjM1MzQ1NzY1Njc2MzUyNDIyMzU1NTM3OTY1
-NzU0NTMwMjUzMzQyNjQzNDUzNDU1MzMzODYxMzMzOTYzMDAvMjU1MzM2MjE3Njcz
-NDMxMjUyNjEzNDU0NTE1MzIzMjM0NTIxNjUzNjI0MzQ2NTY3NzY1NDMyMjAvMzQz
-MzQzMzM2MzA1NjY1NjY2MzI1QDo0MzoyMjA0NTMzNDQ0NTg2NjYzNjc3NDU0Njc1
-NTQ1NDk2NDQyMjMvMTQzMjA0MjU1MzMyNTY0NDY1NTU4OTc0MjQzNTQ1MzM2NDI2
-NjY1Njg1NTc1NjQ1NDM3NjY2NjU0NDA0NTQ0MjU2NTMzNDM0NDM2NjUzNTg2MjU2
-MjMyNTo3MzE2NTY2OTY3Ojc3NTU4NTc6OTg6NzU1NzQyNzY1NzU0MjIzNjQzMTQ0
-NjY0NDIzNDUzNDc1NzU4NTU1NTUyMzc2NTU3NzU1NzYzNDM0NDMzMzQzNjc4NzU0
-MzQ2NDM2NDc0MjQ1NTM2NDUwMzM2Mzc2NTc5Njc1NTIyMzQ3NjU3NjY4NzY3NDU3
-ODgzNDM5NTQ0Njg1NzY1NjU0ODY1Njc1NTU2ODk3Nzc2NDU2ODU7OTY1MTU2NjQ1
-NTY4Nzk0OTo4PDk5NTg6NzU1MzQ1Njc3NzM4Nzg3NTU0NzM3Ojk1NDU2Njg2NjY3
-NjM3Nzg5NjU4Njo7Ojg6Ozk6PTs6ODo5Ozw8Ojg3Nzk4Nzw8PTo7ODg5NTYyNDo3
-Njc3NzU3NzU3Nzc4Nzo4OTk2NTY4NzY1ODc2NDU4Nzg3NzY1NDY3OTo3OTk4Ojo4
-NzY4OTw3ODg5Nzc2OTo5Njk5ODY6ODo8Ojo4OTg1Pj07ODhAPDY5OTo7Ojc3OTk7
-OTg2ODo3OTo8PTs5OTk6Ozk7PD46OTg7Ojk9Pz48PT08PT88Ojo9PkE9Nzc7Oz49
-Ozk5Pjs7Ojo/Pj08P0I9PEJBR0hSVqDCzNXb3+Ll5+jp6kZJRURGR0I+QUJEQDxA
-Rjs6QEA9PTw/Pj08Ojs8Ojs8OzhBQDw9Ojg5Ojc4OTg2Njg1Njg4NzYzNzk4ODU3
-NTQ2OTgyNjQ2NDQ0NjM2Ojc2MzQ2NzIzODY3MzQ2MzY0NTIzMjM1NDU3NjU3MzQy
-MTI0NDMzMjA0Nzc3NTg4NjY0NTgzNDMzMjUyNDY2NDQyNTs5NDU0MzQ2NDM4Nzk2
-NDUzMjYzMjI1NTEyMzc1NDUzMjQ0NTQ1Mjc1MzgzNTU9OTk0MjcxMjE0NTI0Njk4
-NjU0NDM1ODY0MjQ1NDQ1NjY2NTU1NDM3NzYyNDIxMjUxMzc1ODU1MzQ0NTY1Njg3
-NjY2Nzs5NTY3NTU5Nzg5ODY1MTMwNDMxNTM0NzQzNTg1NDUyNDMxMS8yMzk3NTM0
-NDQ2NDs5NTU2NDQzNzs8NTU2NDU0MzI1NjQ0MzIyMzEzNjYxMjUyMzM1NDYyNDU1
-NDQ1NDQ0MzM2Nzc1NDc4MzU1NzY2NzY1NTMzMjU0NzYyMjQ8MjE0NTUyMzMxMjU2
-NDM1NTU0NDg0NjU4NjU1MzMyNDQ2NDQ2NTU3NjM2NjY4NTc1MzU0NTQyNTY4NjM0
-MjI0NDI1NTIyMzM0MzQ1NDY1NTQ1NDU0NTIzNTMzMzIzNjQ0NTM0NzY5MzUzMTU1
-NTUzNDY2NTMyMjI3MzMzMDEwMTMzOTY1NTQzMzMxMjUzNTIwMzU1NDQ1NTMzOTU1
-NDs2MzI0NDk2NTQ1NTM5NDYyMjI0NjUzNDUzNzg4NjUwMzY2NzY0NDQzNDU5MzMz
-Nzk2NDM0MzM1NzQ2NzUzNDU1NDc0NjYyOjo3ODw3NDQ4NTUyNDc0NTQyMjM0NTUz
-MzU2MzIzMzIxMzY2NDMzMzUwMTIyMTUzNTU1NDM1MjQyNDI0NTQ1NDQ0NDIzNjYz
-Mzc1ODUzNDUyMzQ0NjQ3NTQzOjg1NDQ0NTY1NjQ0MzQ1NkU1NTU2NTU0ODU2NTo7
-OjY3NjQ1Njg4NjQ1NTY1NTMzNTM2NTY1NTM0NTQzNjc2NTQ2NDk1Njk2Njg3NDM1
-MjQ1NDM0NjU2NjUyNDg4NTM1NjUzMjI0NDMzMzY4NTU1NjQ0NjU0MTU0NDg1MjE1
-NDY1ODc2NTc4NjQ0NjQ2NTc3ODM0MzIzNDI2OTc2Njg3NjQ3ODU1Njc2NTQ3ODg3
-NjU4ODk4Njc1Nzg0Nzg3NDY1NDI1NzY3Nzg6ODk4Nzg4OTg6Njc3ODc3NDQzMzY5
-Ojo5Ojc1NjQ1ODk3Mzg2Nzk3Nzg5OTo7Ozc5PDw7OTU1Nzc5Ozo4ODo4ODk6NTY0
-Njc4Nzg6OTg4PDw8PT46Nzg0NTo9ODk7Ozw7ODk2ODk5ODo6OTY3NDQ2Nzk5NzY3
-ODc2NDU2NDE0ODc1Njg4Ojo4Nzk4Njk3ODg3PD46NzY2ODg6Ojo2NTU3OTc5OTw8
-Ojw5Nzo7OTc4ODs6OTg4OTk4Ojo7PDk4Nzk7QTw6OTo7Oz47Ojk8Ozo5PTo7PTs6
-PDo+PD0+PDk8PDo8PD1APTs3OUA9QEJEQ0BAOjs5Ozo6PT08QEI/Q0JDSElRocHN
-1drf4+Xn6OnpSEtLTExDQ0RERD9AQUFAPkBAPEE9Pzw+O0E9Pzs5Ojw+PD04Nzo/
-PTY4Nzg3ODk5ODs5Nzc0NDEzNTg4NjUzNTQ3Nzk5NjI3MzUzOjY3ODczNjc0MzU1
-ODU3NDM0MjIwMDEzMjYxNTQ1NTg4NjM1NTUxNDUzNTI1NjQ0Mzg1NDU4Njg0NzMy
-NDQzMjMzNTUzNjU0NTQ0MjQyMzQ3Nzc3NjM0MjIzNDIzNzUzNTM0NTY1NTc1MzY1
-NjU1NTU0MzU6NTI0NDUzNzk3NDc1MzY4NjQ0NTU0Mzc1NDM0MzY4NTQyNDUzNzM0
-MjM0NjQ0NDU3NTc6NzU0NTEzNDEzNDQ4NTU4NjYxMzU5Nzg1Njc4NjY2MzQzMzY0
-NzI6ODc0NTMyODY0NTU1NzU2NTIzNDIwNjQ2NDIzNTc5NDM0NDc2NTU1NjY2Ojg0
-NDY5NjU0MzAxNDY0Rjg1NzUzNTU0NDk3NDY2NzUyNDM1NjU1MzQ1NTc1ODQ0NzQz
-MzQ2MjM3NTMzNTg3NTc1MzY0MjIzMjQ2NzQ1NzU1ODQ0NDQyNDU0MzQ1NTc2OTY4
-ODU4NjQ1NTc0NDM4NzY4ODg2NDk2My8wMTMyNDM1NTQ3NjY1NTQyMjQzMzI0Njc2
-NjM0NTEyNjU1NDc0MzM2NTQzNDMyMDE1ODY0NjMzNDI0NDI0MzI0NTU3NDIzMjU3
-NTIwNDYyMTQ2MzIyNDU1NTM3OTY4NjY0NDMzMzU3NzMyNDY2NzY2NTQyNTEzMDM1
-NTY3Nzg0NTE0NjUyMzM4NjU2NTY2NzQ1NjY4NTM0NDUzNDc0NDI0MzMzNjU0NDQ3
-OT03Njg2NDQvNTY4NTY3Njk4Mzg2NTU0NjY1NTUxMzQ1NDU2NDQzNzk1MDI6MzIz
-MjM1NjM5ODQ0NDI0NjY0MzQzNDM4NTU1MzQ0MzI1NTczNjY0MzQ2MzAzNTc5Nzk4
-MzQyMTMyNTQzOjc1NTQ0NDQ0NjM0NTY3ODk4ODg5ODg2Nzc1Njc3ODQxNTY3MjUy
-MjMzNTg2MzU1MzU4NzY0Njc5Ozg2NzU3NjM1NTM1NjQ2Nzk5OTg5NjQ2NTY4NjM0
-MjE1Nzo3Nzc2MjQ3Nzg4NTMzNDQzMjEzNzo3NTM0NTY2NjAvNDg3MzI0NTMyMjIz
-Nzc7NjQ0MjQ0NjU2NjYzNjY4NTQ1NDg4ODY4ODY2NDo5Ojc2NTc4MzU4OTc2Njo6
-Nzc8PDc1NDY4Ozg3ODg2NjU0Nzk3ODc3Nzc2OTc0NTc4NTY3Ozg6ODk4Ojk4Ozw7
-OTk6Ozc4Njc4Nzk6ODo5Ojo5Nzk7OTg3Ojk5ODQ2ODk9PDk9Ozo6ODc3NjU3ODo8
-Ojg2Njk2ODk4Njc6PDc4ODg6OTo3NjY3Nzk4Njk4Ozs7PT01OTs6PTo5PDk2ODk3
-Nzo9Oz8+PD45PDs3ODc4Nzc3Njo3OTo4OTc5OTk4Ozs5Ojo5Ojo8Ojk4Nzg6Ojk/
-Ojs9PDk6OTg1NDU8Pjs6PT5BQkBAQkFAPT08PkFBQTk6Pj07Ozs8OTo9Ojk7QT9A
-PkA9Pjw9PD0+SEJFSEA/PEFERlmrwczW2t7i5ebn6upKTFJMTEhIRUdFREREREA9
-PUA9PT89Ojg5OTg6Oz06OTo5Ojo2Ozs4Ozc5Ojg4ODY2ODk4OTc1NTU5NjU4NjY3
-Ojk1NzYzNDY4NTc2NDQ4Nzg2NjU2NjU4ODQ0MjAyMjExNDEyNDk1NDQ3Nzk3MzQz
-NjUzMjU1NDM0NTY0Nzc4ODU2N0s2NzU5OzYyNTY2NjUxMjQ1MjQ0NDg7ODY2NTUy
-NTMwMDMyMjY4ODYxMjM1NDIxNDUzMjQzMzU1NTQvMjc1MjQ1NTM1NTY2MzY1NDU1
-NDU3NTQzNjk1MzQ1NjU1NDIxMzIyMzQyMTc3NTQ0MzY3NzU0NDI2NjM0LzIyMzY0
-MjU5OTo3Nzg4NzY2NTc3NTU2NDYzMTIzNz07NjYzMjU3NTQ1Nzg4NTI1MjM1NjY0
-NDU0Njg3NjQ1NjQ0NTMyNTMzMjQ0MjMzNTczMzUzMjIyMjQ0NDQyNDU0NTYzMjY2
-NDQ1NTY1NjM1Njw3NDc1MzMzMDMzNDY1NTU2MjI0MzMzNDQ1NTQ0NzM0NTEzMjM0
-Njc1MzQ3NDI0NDUyMzM2MzYzNTc1NjUzNjQ0MjIxMjMzOTc0NzY4Nzc3ODU0NDI0
-NjEyMDY5OzUzNDU0Ly8xMDU1MjIyMzM1MzAyMzU4NjU4NTU1NjU2NDQzMjEyMjIw
-MjQzMzU2MTI0NDY0MjQyNTk2MDI0Njc1NTMyNTUwMjQxNTUzNDMzMjAzNjQ2ODM0
-NjUyNDUyMjQ0ODY0MzI2MzMxNDQ1NDMzMzIyNTU2NTMyMzU3NTM0NTQxMDQ2NDQ3
-NTU1NDMyMDMzNjU1NzQ0NTc4NzQ1NTQ1NjczNDQ3Njk0Nzg2Njc1NjY4NzUyMTY2
-NjMzMjIxMjM2NDU2ODU3MjU7OzgzNDU1NDQ1ODc3NDMxMjMzMzQzMjI0MTM0NDg2
-NjYyNTIzODg0MjQ2NjU1PTM1NzQ2NzU1Mzc0MTU2NzYzNTM0NTU3MjUzMzY3Nzg4
-ODk3NzQ4NTc7NjY4ODU2NTgzMTE0MjQzMzY4Ozc4NjI4OTY1NDU1ODY3NTMzMzQx
-NDU1NDU0NDU2OjY1NjQzNDU5OUE2Nzg3Njk3NjMzNDM3Njc1NDQ0MzQxMjY2MzQ3
-NzM2NTQ1MzM3OjYyNDY1NjMyMzYyNTU2NjY0NzQ1NTU5NzY0NzgyNjQ4NjYzNjc5
-OjY4MzQzMzM0NDQ1Nzg2Nzk7NzUzOTc4NzQ4NzQ0MzU4NDU4Nzo5Nzc0ODs3NTc1
-NTM3NTM3Nzc3ODo4Nzg2NTY6Nzc7Ozo5Nzc5Oj06OTo4OTk7Ojs6ODc3Njk7Pz45
-Ozg4Ojk5ODk5ODk4OTk7NzU2OTU2ODQ1NjczNTU3Nzc2Nzo8OTk5OTs8ODY8NjYz
-NDo4Nzw6Nzs6PT86ODg5ODg5ODg3ODc2ODw4NDo7PDo6OTk6Ojo5NjU5OTg3OTs6
-PDo7PT06Nzc5OTk8Ozo6Ojg3ODo6Ozs6Ojg2Nzg5ODs6Oz49Ozo7PTs7PDw8PD0/
-PTs6O0A/Pjk6PTpAPz08Pjo3Oz87PEI8PUBAQDs9PT1IRURKOz5AP0VDZa/CzdTb
-3+Hk5+fp6kRJSEdKSEVDSUdBP0RFR0JEQzw9PUE+PTs7Oj09Ojk6Oz48NzY5OTw6
-OTg2Nzg5NTQyNTk7OTo5Nzc3NTU2OjI2Njk3ODQ1OTk4Nzk1NDc1NDI1ODo3NTc3
-Njc3MjU4NDQ0MjI0Nzg2NDU2Njk6ODU4OzU1NDU0NjQ1NjUzNTU2Njg3PTY0NDQ6
-NjMyMzI4NTQ1NzY3OTY3NjM2NDQzNDg0MzM1MzEyNDY1MTUyMjM1NDQxNTYyMTAz
-MjMzNTIyNDQ0MjU2NjI1NzYxNDQxLzAzNDU2MzI3NTY1MzEzMzQ0MjExMTE0MzU2
-NTQ0NDMzNDMyMzY2MjQ3ODUyMzU2NTY2NDY4Ozc2NTQ1Njg2ODc2MzIzMTM0NTU5
-Nj81NTM1NTM3Njk1NTM2Nzg3NTU3NDM1NTY3MzEzMjM2NDU0NTMyNDQ3NjU4NjM1
-NDMwMzUyNDQ1MjIzMDQ1OjY3MjMzNjU1NjUzNDM1NjQyMzY1ODU1NDM0ODU3NDMz
-MDM0NDM0MzU2NDIxMzU2NjU0NDMzNTU1NjU0ODYyNjEyNDQ0MzU1NTQzNTMzMjQ0
-MjM0MTQzMjQzNTQ1NjY1NjQ0ODU3NzUyMTUzMzM0NDk2MzQ1NDMzNTI1MjIyMzQx
-MjQ1NDQ1NDQ3NDMyMjI2NTMzNDc0MzI0MzMzODU1NDQ1NDY3Njc3NzU1ODYyMzM1
-NDQ0Mjg0NDMzMjIzNDc3NDIzMzQyMzU2NjQzMzczMzQ1NjU0MzE3NDIzNjU3NTIz
-OTc3NTQ0NjY3NTU1NTU0MjQyNTQ2NTQxNTY3NzMyMzc1NTM0NDMzNzg2ODUyMDAz
-NTU0MzUzNjg3OTY4Ozg1Njc0NDM0MDc0NTEzMTIwNTUzNDY1NDY1OTo4NzY1NjMw
-MTMyNjc2NTMwMjMzMzIzOTEyMDM0MjIyNDMyMTEzNTQ1NzY0MzxCOjc1ODc0NDM1
-Njc3NDY2NTMxMTI0NTg2MjQ2ODo1NjU2Nzc2NTM2NzU4Ojc3NTkzNDYzNTM0Ni80
-NDc3ODc2Nzg5NTY3MzIyMzIxMDU2NzYzNjYzMTQ1NDU0NTQ1NTc1NTY1NzU2Njc1
-NTczNTYzNDU1NTQ1NDcyMzI0MjY1NTM0NDIxMjQ2NDQ7Ojs5NzQ1NDY0NTQyNDY2
-NjQ4ODc4Njc2NjY4Njc1Nzo3ODk4Njk3Njc5NjMzNTg3NDQ1Njg5Njk5PDc5NzU1
-NjY2NTM0NTU2Nzg4ODk4OTc2OTo6ODU3NzczNzc0NTk6ODo8OTk4OjU2OTc7NjU4
-NjY5OTw7ODo2PDk5Ojg3ODg6OTk5PDg1Ojg4OTo9PDg2Nzk8PTo7Ojs5OTk4ODpA
-PDg0Njc4ODk5OTs6ODo7Ojc1Nzo6Nzg1OTw4NTg5OTk7ODg5Nzc3Ojs6OTo4OTc2
-Nzg4Ozo2Ojo9Oz0+PTg2NTc6OzY5Ojc6PD5APDw6ODg5Ojs5OTk4Njg6OTg4Nzg4
-NDc5Njk9PDw5Oz88OjhAPTg4Ojs5Ojs+PT88PkFAQz05Oz09Oz09Pzo8PDg6PkFB
-PD0+Pzw6Pj5JP0E3PD1BREN7rsLM1drf4uXn6OjpRT9BQ0FCREBAQD89QEFFQz87
-PkE+REI/Pzo4Ozo7Nzo3Pjs8PDg2NzY0Njg2NjY4NzU0OTo8QTk5NTU4NTc2NjY2
-Nzc5Ozk6NzMxNjc1MzU3Njg6NzQ4NDQ2ODk3PTg1ODY2NjY3Njg3NTY2Nzs7ODU1
-ODY1NjM0NTM2NzQzNTQzMjQ3NzQ1MzQ0MzExMjI1NzQ2NDQ2NDQ2NzI0NjY0NDg1
-MzQ0MzEyNjIzNDY2NDIxMzY1MzY0NjIwMjEzNDMyNDk7OTUzMjEyMzQyMzQxMTQ1
-MDQ2NTQ2NjM1MzI0NDI0NTMzNzU3NjU5NDU2NjUyNDUzNjg3NjY5MzU2NTUzMjEx
-Mzc5NzQyMjY7Nzc2NjI0NDU2NjY3NTU2RDQzMTMyNTU3NjYzNTY1NTU1NDY9Nzg1
-NTg5NzY2MzQyMjMyNTU3NTU0NDY3MjI1MTQ2MjMzNDM0MzUzNDI2ODQxMzg2NDM0
-NzM4NDUyNDUzMzY3NDg3NDIzNjY0NjM2NDQzNDU1NTU1NDI1NTc2NTU6NjQ0MjMz
-NTM1ODUxNTI1ODY0MzQ0NjMyNTM0NjIyMTEyMzI0NTU1NDY2NDM1OTY3ODU0NjQ3
-Njg0NjY1NjQ0NTc2NjU0NjU2NDIyMjM1NDgzMzIyMjAzMjMyMDAxMzM1NDU2OjUz
-NTY1MjUzNDQ3OTg6Njg2NDM0NDQ1NzY3MzQ2ODc1NjQ1MzYzNDU0MTM0NjQzNTY2
-ODc0MjYyMzAxNjc1NDg1NTY1NzU1Nzk2MzYyLzEzMjEwMTM2NDYzNTU0MjI0NDU2
-NTY0MzU0NjI0MjI1NDMzNDU1NDU1MTIzNTMvMTM3MzM2ODY1Mzc2NzU3MzQyMTM0
-NTE0NTc2NTQzNTUzNTM0MjU2Nzc0MjEvMjY0OTc4NTM2NjMyMzQ3Nzo1ODUzMDQz
-NjIzNTU0NDIxMTIxMTMyMjIzNDUzNTU0NDY0MjI2NjMyMjYzNDQ3NTU2NTUyNDQ0
-NTY2NjU2ODo1NTU6NjY1MzM0MjE0ODQ2NDQ0NDc0PDk0NDQ0NjM0NjY2MjY3Nzc1
-NjY1NDQ1NTQ1NTM0NTU1NTQzNDMyNTVAOTY0MzI1NTY0NTU0NzU3NTc0NDQ3NTQ2
-Nzk4Mzk5ODY3NTYyMjY0MjQ1NDY2NDYzMzQ2NTg1Njg0MzQ0Njc7Ozg4Njg4NzY3
-Nzg2MjU2MzU2Njc4NzU4Njc4NjczMjQyMzM0Njc0NjY5Nzg4ODk3NjQ1ODo+PDo4
-ODYzNjg5ODo5Ozo8PTw6NjY4OjU4Ojk4OTc4PDw7ODg1NTQ1ODg3ODg7OTY3Ojk5
-ODo2ODo2ODo4Ozk5PDo6ODQ3Nzg5Ozo7OjUyNTY5PDg2Njg4Ojg5OTc4OTc4Ojk3
-OTo5OTg2NzU1Nzo4NjY2NjY1Ojs5Ojg1NTk4Nzo5Ojs+PDs9PTs5Ojk6OTo1OTpA
-Ojc2ODo4OTc0Nzg2NjY3Ozk5ODY3ODc1ODo5Ozs8Ozo5Ozs4OUM8PDc7PDo7PD49
-PD07Oz06PT85Oj08OkA/PTo7Pzs4OT4/Pj49Ojo8PTpBRUBCREVGRnSuw8zV29/j
-5Ofo6elERENCQkNDQz0/RENFQ0Q/Pzs+OztARERGQTs7Nzk5Njk5PD8+Pzk2Nzs2
-ODg7ODc3Nz0/ODc4PTY2NTQ2NTc1Nzc5Nz05ODY2NjQ0NTk5NzY6Njc6NzU2Njg3
-NjYyNTY1Njc1NDU2NTYzNDU2Nzg3NTU4NzY3MzY2NTc4MDIyMzc2NjhROjQ1NTIz
-MjI0MzA0MzEuMDQ1NjMyNDM1NTM1MzIzMzUzMjQwMTQzNDU3MjI0PTU1NTc1NDQ2
-MzQxMjEwMjY1NTMyMi4yMjIzNDIxMjE2NjY2NTMzNjM2MjM0Njc3Ojg1NTQzNjM0
-NDU2NzU0NDM4NDU1Njk3NDQ0MjU1OjgzNjU2NTQ0MzQ0NDU0NDAzMzQ0NTc5NDI8
-PDM3NDYzNzc3Njg4ODc1NTc1NDc3NTY2Njc2MzQ2NDM2MzYzNDcxNDU2MzMzNTYx
-NTUyNDUyMTI0NTY0NzQ0NDQ2NjQ0Njc4NjQ4MzEyNDU1NDY1NTc1NTI0OTQzMzM0
-NjUzNjMyMjY4NTM1NjY1NzQ5NDMyNDM2NTAxMzQ0NjU2NTUyNTMwMDQ0MzY1NDM2
-MjQyNTY0LzIzNjg2NjU1ODg0NDMyNTQ1NTY1NTg4NTU1NjI1NzY1NDU3MzIyNTU0
-NDU1NjM0NjQ0NzM1MzM1ODI4NzU1NTU1NTQ3NzU2MjM0Mzg1MzM0NTUzMjQ1NTY0
-NjUyNjIzNjY1NDg0MzI0NDc3NjM0MzY4OTk2MjIxMDU3ODc4NDY1NTQ1NDUzMjIz
-MzI1MzI2NDIyNDY2NDY3OzUzNjU0MzM1ODQyMjQ2MzQ3NDQ4Nzc1NTQ0NTYzNDEv
-MjI1NDQ3MzQ0NTc1MzMyNDIzNDQyNS8uNTc7NzY1NjczNDY2Nzg4NjU0MzUyNjY2
-MjMzNzc4NzU3NDQ0NDM0OTc0MjUuNDc0NjYzNDQ1MzMyMzM0MzIxNzk0Njc5NzQ0
-MzQ0MzI1NTY4MjI0NjY1MzMyLzM5NDM0NzU2Nzc4ODc3NDY2NzYyNDIyMjM5ODc1
-NzQ2NjI0MTExMjMyNDQxMjQzMjUzNjUzNDQzMzM2NjQ0NDI2ODg1NTU1NTQ0NDU0
-MzQ0MjU2MzI1NjQzNTEzNTMwNDU1NjY2NDY7NDU2NTQ1NjM2NTQzMzQ2Njc1PD85
-Nzg3NTQ1Njc1NDc0NzQ2NzY5ODY1Ojg5NDM3ODQ2NjU0NTM4Nj43NTY3NTQ0NTg3
-NTU3NTc2ODk5ODg6NTY6Nzk2NTk5NzU0Nzo5OTY3OTc3OTg4OT09ODU2NTU0Njg5
-NzY4OTg4Njg4Nzc2Nzk8ODY4Njo6Ozc3Nzg6ODs5ODo7PDo7ODc5NjU1NDQ5OTU4
-Nzk3Nzk3NTg3OTc2NDU1OTk2ODo6OTc4Ojo5NzU3Ojo4Njc3Nzc4Njc5Nzk6Ojc4
-ODw6ODg5Ojs7Ozw8Pjs6Nzc1Nzo5Oz84OTk2Nzc4OTk6Nzc2ODo5Oj46OTg3Nzo7
-OTc5OUA4Oz05Ojk7ODs9Ojs9Ozc6Pj47PDw7ODw8PDo6Ozg8PUFCPT49PDg4QD49
-PDo5Ojw7REM7QEBER0REV6XDzdXa4OHl5ujp6UJCQEVESUZCOj5DRkRDQ0FFQTtB
-PUJDPjw+Ozc6Ojw5Nzc7Ojo4Nzg6ODk7Ojk5NDQ0NTU0NDg3ODk5NDU2NDc3NjY6
-OTo7OTQ5OTU4NTc8OTc2NTY4MzY1NTIzMzk0NTc5Oj43NTU0NDQzNjY4NDEyNjUz
-NzM4Njc2MzM2NTMzNDY0Njg1MjAzMDI0NDIxMjExMjQxMDQxMzY3MzQ1NjQ1MzMy
-MjIzNDc3OjY3NDQzNDI0MzM0NTQyNTQ1NzU1Njc3MzU0MzIwMjQxMTIyNTI0Mi4x
-NDUyMTQzNzQ0MjQ2Nzk1NTU1MzYzNTMxMjM2NjQ0MzUyMzU2Nzc2ODI2MzU3ODg0
-NTYzNjM2NDUzMzU0NTY1MzQ4Njc1MTE+NjU1ODYyNTY0Njo3ODc3OTg3NzY5NTU0
-Njc0NTU2MzQ2OTUzMjQzMTEzNDQzMzQ2MzQyMjQ0MjI0MTMzNDE1NjY1NDU2NjQ0
-NDQyNTMyMzMzNDQ2NjU4NDM4NTIyMzQzMzQuMjQ1Njg4NTM1NTY1NDgyMjQyNDM0
-NDU0MzU1NzY0MzIzMzQ1NzQyMTMxMzI1NTQ2NTQ1NTc3Ojk1NDM2NjQzNTEzNjc2
-NTUzNDMyNDMzNjYzMjEyNDQ0MzMyMjM0MzEzNjIzNDM3NTMzNDY2NDI0NTU1NDc3
-ODs5NjYyMjE3NTQ0NDY1NzcyMzMxNDI0Ly8xNDIwNTQ0NDU1NTY0MjQ3NTQ3NzM1
-NTU1NTEwMTAwMjU1NTQ7NjQzNDY1NDMyMzY0LzQxNDQ0NjU2NzY2NjU0NDQzMjQ2
-NjQ0NTQyMTY0MzY2NTk2NTYzNjU3NTg3ODQzMjAzMjE1Mjc3NjMxMTMyNjU1Njgy
-Mzc5NzU2NjMyNjU4NDc4Njc3NTUyMjU1MzM2Njc6Njo2NDY2MjQ0NTU0NDk2NDU5
-NDI0NTQzMzM1NDU1NDMyNDQ2Njc1MzM2NzU0NjY3NTY1NjQ2NjY1NjM2NTk1MTM1
-ODQzMzU2NDQ2NDU0NDQyMzMzNDQ2NzQ3NjYyMzM0Nzc0NTY2ODk0MzU0MjM0MjE1
-NDQ1MTI2MjM1NDY3NjY1NjQ1NzY0NDMyNTU6ODc2NDM2NTY7ODQ0NDM0NTc1NDUy
-MjM1NTU0NDY2NTU0NTU3NDQwNTk2ODc3Nzc2MjY0NTc3NTU0NjY0MzM2NjU3ODk5
-NjQ2NDg3NTQ0NjU4ODU2NjY4NTU1NzY2NzY1NTk1ODg3Nzo1NTY3NDM3OTc1Mzc4
-Nzk3ODk5OTc3ODczNTk6ODc2NzQ2ODo8Ojc1NTk6ODc2Nzo3Njg4NjY3OTg4Njg4
-OTk7OTk5OTk9Ozk4Nzg4Nzg/PDo3OTY8Oz02NjY2Ozk4Ojk3NjY3NzY1ODk6Ojc3
-NzU1Njc4OTc3ODk3ODc5Nzg4NTc4NzU8Ojg6NjY4ODc3ODo8Ozc3NjQzNTY6OTg5
-ODc3Ojw9Ojc6Ojc4ODo5Ozw9OTk7Ozs8Ojg7PEE8QTo6OTo6Ojk5OT4/Qz08Pjs6
-OTtCPj09Pzk7Ozk4PDw/Pjw6Nj0+QT09Ojc7Ozw+P0NFT1dJRENWp8PN1dvf4uXl
-5+rqQUFCRUNERkdIP0NGQTw8Pz1CQUI9QUE8OT49PTo3Nzg6OTo6OTc1PD08OTs5
-OTg2MzU2NDQ5Njg6Ojo/OjY3Nzg1Nzc3ODg2NzczNTY6NTc0MzMwMjE0NDIyNDc1
-NDI1NDU1NjY0NTg0MjQ2NjIyMTIzMTQ1NzM4MjY2NTM0MDMzNjY1OTk0NzQ2MzIw
-Nzc2NTUxNDQwMDMzNDUyMjU4NzU0MzUzNjM0NzY1MzY0NTMzMDEzMDAwNjIvMjUz
-NTU2NzQxMjMxMzIwMDI2NTY3NDQzNjQyNDMyMDM1NDY2NTQyNjMyNDI0MzQwNDIy
-MzQ0NDo3Njc1MjMyMTAzNDY1MzU0MjQ0MzUzNTI1MzQ0NTg5NjU0MDY2OTY1NjlB
-NTU2OTgyNzM0OTYzNzY1NzQzNTQ3NjUzODU2NjI0NTIyNDIzNDQzMjI0Ojg0OTU2
-MzY0MjEzMjcyMC81NTY0NTc1MTQ0NDU3NzQ0NTMzNTg2NjY2MzQ0MjMyMzMyNDU2
-NjQ1MjQyNTc3NjQ2Njc2NDQzNTUyNDg0My8vMzIyMjU2MzQyNjUzNDY1MjM0MzUz
-Mzc4NTU2NDQ1NjU0NzU2ODg2MzUzMzUzNDIzNjQ0NDM2NjQ0MzY0NDE0MzQyMTIy
-NzMzNDM1NDU1NDMzNjQ2MTQ1NDU1MzQ0Qj8zNjU1MjEyNzYyMzU2NTUyNDc2NDEy
-MjM0NTIzNDMzNjk0MjU0NDU0Mzc1NjYzMzg2NDgxLy4xMzU1NjY0MzU3Nzk5ODg3
-NDUyMTMzNDQ1NTs3OTMyNTc0NTIxNDU1NTg0NDEyMzMzNTY0NTY1MzY3NzU2NDQ3
-ODY4Nzk2NzU3ODgzMzQ1NjUzMTQ2NDk3NTY0MzM3NDQzMTY4ODs2Nzg0Mzg1OTk2
-NjUxMjQ3NTY3NDMzNTQ6NzUyNDgzMTQyNTg0Nzg3NDMzNjQzMjExMjM1NjQ0NDQ0
-MzQ2NTU0NDMzNDc2OTczNTM2MjAzMzM1NDY0NDQ2NzQ2NTU+ODY3NzUzNTUyMzEx
-NjUyNDYzNjY0OjQ0NTQ1NDE1NzMxNDU2NTM2NjU0NjY7NzQzNTY1NTQ1NTMyNTM0
-MjEyMzQ2MjM0NjM0ODMzMTQ2Ozc0ODc3NzUxNDY1OTc0MzI0NTM1MTY1Njc4NjQ1
-NTQ0NzQ3NTc3NTMzNDQ1NDIyNTU3NDg3ODYzNTc3NzQ0NDM2NDQ1NjY2NTc2Nzg3
-ODc5Nzg4Njg8PTk6OTU4NzU1Nzg4Nzc5OTs7Ozk4NzU2NTY3ODk7Ojo3ODc5Njk4
-Njc5Ojc5Ojo9PDo6ODw7OTo5Ojo6Ojw4Ojc3Ojk4OTk5OUM/NjQ1NTc4OTs5Njg3
-NjYzNTU3ODg1Njc1Nzk6Ojg3ODo7Nzc8OzY5Ozg4NzY2Mzg1Nzg6PDs4OTo7OTo4
-NTk5OTg6NzY4NzQ5OTk4Ozk4OTk5ODU1ODg9Ojg3OTs4ODc4NTk7PTs3Ojs6Pjo9
-PDc5OTs8PTs+PDs9QERBQEBDQj49P0A/PD04OTs6Oj06OjtBQj06PUFDPD84OT08
-OkA/Pjs7PUFISkRCR1mrxM3V29/i5ebm6elCRUhGR0ZHR0dEQkJERT5CRD5CQD07
-Ojw9O0BCPzg4Oz04OTs4Ojw6ODg2NDY2OTk0Njo1ODU2NDY3Ojc5PTo3PDc3Nzc3
-NzQ6NjczNjk3Njc2NDQ3Njc2MjMzMTU1NDQ1NDQ0NDc6NTg6NjU1NTQzMjQ1NjE3
-NTQ0NTQ4NzIyMDQzMjIzNzY1MzQ2NTQ0OTUzMTEzNDQ2MjQ0NTU2NTc1NzMyMjU0
-NjUzMTE3NjUyMDAvLzIyMjAxMTAyMzc0NTg2NjQxMDE0NzYyNDc1MzMzNTQ1Nzg0
-NDQ0MTQ1NDc2MDA2NjIxMjM0NDUzNDY0MzU2Njg5NzUyMzQ0NDU0NDU3NDQ0NTU3
-NjU1MzQzMzc1NzU0NTU1NTk5ODY3Nj06NDM2ODYzMjQ0NTQ0NTU0NTY1NDU1NTQ0
-MzMzNDI1MzIxNzM0PDc2NDQ0NTU2ODU4OzYzMjM0NDMxMTIzMTM0NTQyNDM1MzU0
-MzM1NDU1Nzk3NzM1NDc1NDMzMzU3Nzc4OTQzNDM0MzQ1NTU1NDU0NTY2NDU2Nzg4
-NDU2NjQzMzQ0NDYzNzQ2OjcyMzQ0MzIzMjQ2NDU0NDQ0MzQ2NTQ1ODY2MzQ1NDEz
-NDI1MzM0NTc2Njc0MzQ0MjQ1ODQ3MzIzNDQzNjM0NDY2NDI1MzUwNDYzMzQyNDU0
-NDQ0NzUzMjMzMzM1PTY1NTY0NTY1MzA2OTY3NzMzMjMzNTY3MzQ0MzU0NTY1NDY0
-MzU6NTU2NDQyNTAwMy8wMzQ1Nzc2NTU1NjMvMDI1NTY1NTU0NTM2OTEyNDQ0NTQ2
-OjkzNDMzNTU0NDQ0NDM1Nzg0NjQ0MzQ3NzY5ODk4NDUyNTQ0NDM2NkA2NTU0NDU0
-NTIzMjM0NDQ3NTQ2NTU0NDY3NzU2NDc3NDEyMzM2NzY3MjQ1MTI2ODY3ODc0MjI1
-MzQ3NTU0NTY2Nzc0NDMyNDg0NDQ0NTI3NjQ0NDY1NTc3NzQ1MzY3NjM3MzI0NDU2
-NjQ0NTQ0MzYzNDY4Nzg7ODMxMjQxNjczMzU0NjMwNDc2NDY1Mz1CNzQzNDY2Njg3
-NTMzNDk4NjQ1MjU2NTY0NzczNjU3NzY2NDUwMzU1Mzk3NTI1NjQyNDcyMzEyNDMz
-MzEzNDY2ODg2NTY1OTQ0NTM3NTQ0MzQ2NTM3NjY3Njc3NTo1NTU4NTM2Njc0NDQ1
-NzU0MzU2ODMzNTg3Nzg4PDo4Njc4Nzc5ODg3Ozk4NjY4ODQ3NjM1NzU5QzY0NTc3
-Ojs1NTc3NzU2Nzo7Pjk4OTk3NTc2ODo3ODs7OzY7PTo3OkA5ODg8Ozk6PD08OTY5
-ODY3Nzk6Nzo7PTw5NzgxNTk6Ojo7OTc1OTc1Njc3Njg6Ojo6ODY4OTo6Ojc5Ojk1
-MzQ4Nzo6Ojg+OTk3Ojk4Nzk2NzY7ODg5OTg6Ojg6OzU1Oz87Ozk8PEA7ODY8QEA+
-Ojs6ODc4Nzo7Nzc5OTg7OTw5Ojw8Ojs8Nz89Pz89PTo9QjY5Pz0+Oj1AQTw9Oz5B
-Oj4+OTk6Ozw7Ozw7PEBCQUNCQTs9PT5APURDQTw8QUNITkFCXKjDzdTb3+Ll5ujp
-6kNHRUVDRkNFRURGQkNBPjxAQD4+PT5AOTxAPTw8Ojg5Ozs6OTk3Nzg1ODk1NTw5
-PD05NzQ6NzU2NDY2Nj46OTQ3NzM4Nzg4PD05Njw6OTc5Ojk1NDM0NDIxMTAzNDY1
-NDU1NDA1NTc1NjQ3NDQ2MzMvMjM4Nzk3MzYzODY4NDMzMzMzNDQ0MzI0NTY3Nzcz
-ODM4Mi8zMTM2MzQ0NDYzNTg4Nzg2MzQzMzQ0NzU0NzQxMjI1NDMyMTAxNjQ1MTI0
-NTEyMTEzNDU1Njc5Nzc3NTU0NTEzMzMzMzYzMTU1NjMxNTU3NzU2NDQ0NDU3NjU1
-MzMzMzM2OTcxMzQzNDM1Njc0NDY1MzMyNTQ2ODUzNTY2MzAyNjU1NjU2NjU1NzM1
-NTc1NDQ1NTU2NzUyMjM1NDU1NTM0NTY2NDI2NTY2NTc3Nzc1Nzg2NDIzNTI0NzY2
-NDU3NTIwMzUzNDI0MTM0NTMzNDYyNDUyMzU2NTQ4NTc4MzM3NTc2NTEzNDY2NjU2
-MjQ1NTUxMjMyNDY3NzY0NjMzNDY1NzQ0NDc2NDI0NTIyNTUzMzU0NDU0MjIzNTEw
-MTM0NDU1MjMyNDI0NTg0NTQ0Nzc3NzM0NzQyMzY2NjU0NTc4MzM4Ojs2NjM3Nzc1
-NTQ0MzU0NjY2NTM1NjY1MzIzNTY1MjQ2NTIzNjY2MjIxNjMyODo1NjQ0MzQ2NDU2
-ODg1NjQwMTUyMjIyNDY2NTY2NjQ0NDg4NDc4NDMwMjYzMjYzNDMxMzg2MzYyNTA1
-NjQzMzc1MjM0MzM0NDMyNDQ1NDI2ODY6NjQ3MzMzMzQzMjIwNDc3NjMzNTUzMjM1
-NDM0NDI1NTQyMjIwMzMyNDMzNDM2NDU2Njc1OTM2NjMzNjU0MzQzMzQyMzM0NzQ0
-NjU6Njg0NjU5MzQ0NjM0NTU1MzQ2NDU2Nzg2NjQ4NTY1NjUxNDIyNTQ0NTg2ODQ0
-MzQ1NzU3Nzg3NDY2NTQxMjQzNzEzNDQ2NDQ3NTQ3ODg3NTg3ODg4OTM0MjQyNjc1
-NDQ2NTYzMzM1NTMzNjg2NDQ2NDQ0NjY2MzQ0NjQ1Nzc5NzYzNTcxNTQ3NjUzODQz
-NTM4ODU0MzQzNDQ1NDc5NTQ0MDI0NTg2NTQ0MzU1NDM0MjM0MzU0Mzc5PDg2NTU1
-OTQzNTQ0ODo4NjY2NTI0NjU0NjY1NDY3NjQ0MjU2NjY2Njc1OzY5Nzc4ODY3Nzc3
-NTc3NjU3NTM2NzY2OTc2ODc2OTg2NDQ1NjY5Nzc3NzY1ODU2Njk5ODk6Nzk5ODg5
-Ozc5Ozs6ODo7P0E6ODQ3OTk5ODk4Pjw4Nzk4Nzk3Njk5ODg+Pjk1ODo7OTo4Ozw8
-OTY5ODY2Nzk5OTg4NTU5ODc4NDU1MzU6OTUzNjo5Oj85OTk3Nzk4OTw3Njg4Nzk4
-Ojg5ODc4Nzk7Pjo6Ojo5Ojk4O0A8Ozo6OEA5ODg2Nzo7PT08ODc5Ojo9PTc3PDo5
-Ojw/Pzk9OTk5PkA8PTw7PT0+QD49Pzw8Oz03Ozs+PDw9Pj9AQT8+P0A/QEBCQjo5
-Oz0+P0JBQ0VLUlFZpMPM1drf4eTn5+nqQ0VFR0BCR0JGRkJCPkBFRUBCQD8+QTs8
-OTo/OTo4NTY0Ojo4Ojo6Nzs3Ojk5Njc7Pjo5Njc4ODc1Njo4ODw4NDg6NjM0NzY4
-Ozg6OTc0Ozk1NDY3Nzc0Njk2MzM2MzY3NjQxMTM2NjY1NTM0NTU0MzYzNDUzNzc0
-NjU0NDU2NzgzNDU0MzUxMzc2NjY1ODYzMzQ0MTE0MC8zNTg2NjYzNTQ1NzQ1NzU2
-MTU1MzQzNjY5NjU3Njg1NzU2MTM0MzEzMjQyMzU1ODY3NTc1Nzc5NzYzMzQyMjQ0
-NDMyNDQ0MTIzMzY4OTczNDQ2NjM0NDIyNjY4NjUyNzYzMjYzMjU2Njc1ODo4Njc3
-MzQ2NzgzMzY0NDc1NjY2NzU1NDg2MzIyNDU0OTM4NTY5NjYzNDQ3MzY0NTM0NDQ3
-MjU0MjQ0MTU2NDMxMTM1NTU0MjE2MjQzNzQyMTE0MzUzMTUyMjQyMy8xNjQ0NTQ0
-NDE3NDQ1NTc2NDUzNzg2NTM1MzUzNDUzMjQ1NDc1NjI0MzU2NjY0NTg0ODQzNDU1
-NDQ0NDAyMzM0MTQzMjExNDI0NjIxMzM0MzQ3NjQ1NTUzNTU0MjQ0NTc3NzMxMzY2
-NzU1NzczMzg1NjUyMzA1NTQ0NTM1NjU1Ojc2NTU1NDY1NjQ2NTIxMzE2NDU1LzEy
-NTg4NTUzNDQ2NDQzNzo2NjQxNDU0NTY3NzU0MzQyLTM0MzQyMzM3NzU1MTQ0NTk2
-NTY3MzQ0NTg1NTY0NjY0NTQ0NDU1MzY2ODgzNTI0NzQ0MTU0NDEyNTIyNTc0NDUz
-Njc2NDIvMjIzODUzNzY1NTQ0OTc3NDUxMjQ0MzMzMzIzNDQyNS8yNTMvMjM9MjMz
-Ozs4NDYzODQzNjMzNjMzNDM3NDE0NDI0NDYzMzIxNzc2NDM1NTQ0MTY4NjY2NTYz
-NDU3NzQ1NTQyNTU0NTM0Njg6ODY1MjU0NDUzNDQ6NzY1NTM0ODg3Nzc2NjMwMjYy
-MDI1ODg5NzY4NTc2Njg4ODYyMzc5NzU0NTQ0MDM0NDQ1NjMxMzY1NzU0NTQ1NjY3
-NTU0Njc1NzU3MTAzNDozMzQ1NTU1Nzc4NTU1NTQ1MjIzMTM8Njc2NTMzMDQ1NDY5
-ODQ2NDU0NjQyMzQyMzM2NTc9PDU2NTU1NDIzMzU2OTc2NzU0NTk5NTU2NTU3NDI1
-NDIzMzQ3MzQ1ODc0NzY4ODg2Nzc1NjU2NDQ4OTU2Nzc3OTo1NTQzNjY3OTY2NTYz
-NTg0NjU2NDM1Nzg7OTo8PDc0NDY2NTg2OTc3PDk4OTg4Pz04OTY2Njo6OTk3OTk6
-Njc1NjY3Ojk5OTc3Ojk5Njo8ODY4Ozo7Njc3Njc3NjY3NjQ3Nzc5Ojg2ODc4OjY0
-NDo5Nzg2Ojo5Ozg4ODg5OTg5OTk4ODo7ODg6ODY3O0BAPzs7Ojo5ODk6PDo5Ojg3
-Njg3NDM0ODo8Ojo2Ojs6OTs7OTk8Pjw7PTw8ODY4OTs7Ozs8QEJBPz0+P0E+Pz5A
-Pj0/QT9EQzxAPD4+Pj5CQUNDOz0/QD07Ozk6QENEPkZMVGKmwcvU2d7i5efn6ulG
-QkNCREJDQENERT48Qj9APDg8Ojg7PDk8Ojw7Pzc3NjQ5OTg1MzM2PTo2OjQzNjo6
-Ozc3NjY1NDQ6OTo2NjY2Njg1NjY1Njs3NjY0NDY3OTc2NzU3Njk3Njc0NjMzNzU0
-OTk4NDYyMTc1NTI1NDU2NTQ2OTUyNjY0ODc6ODY3NjY3NzU0NDcyMjQ5ODo4OTc4
-NDEyMTExMzQzNDc5NzUxMzM0MjY3NzYzNjQ0NDcwMzk1NTY2Nzk3Njc0MjMzNDMz
-NDM0Nzc1NTQ1ODU2NTQ2NTYzMDE0NDUyNjIzNDY0NTE0MzM1NTQzMjQ3NTQ1NDc4
-Njg3NjMxNDYzMzY3NjUzMjY6NzY2MzY4OTg5OTg2MjQ3NjY0PDY2NTc1NDIwMjIz
-MjU3NzY0MTM0NDc2NDU2NDc1MzM1NTQ0MzIyMjMyNTY1NDQyMzo8NzUxMzQzMTIz
-NjQ0MjQ2NDQ0MjE0NzMyMzMyNDY3MjI1NjU1NTIzNjY2MjI2NzY3Njc0MjYzMzQ2
-NjEzOjMyNDUzMzE2NzQ0NTQzMzIyMzQ1NzY1Nzc0NTQ3NDM0MzIyNjUyNDY2MzI1
-MzYzNjYyNjMzMjIzNTU3NjUyNjU1NTU4NzQ1MzQ2NjU0MzU2MzQyNDIxMjUyNDY3
-ODU0NTQ0NTQ3NjMzNjI1NjU2MzUyMTQ2NTg4OTg1NDUyMzUzNTg2NTQ0NzYzNDQ3
-NjIxMzI3MjQ1NTM0NDY0NDMzMzQ1Ojc2MjI0NzY5NDY2MjAyMzU0MzEyNTQxMzYz
-NzYzNDQ0MzMxMTE3NDQzMjM0NjQyMzQ2NDIzMzE0NDMzNDY1MzI1MzIyNTY2MjM0
-Njg1MzU0MzI0NTg5NTQ0MzQzNjY2MjU4Ozk2NDM2NjU1MzU2NTQ1NzU1NDQvMDIz
-ODgxNTczMTU1ODQ2NDgzNjo2NjU0NDY0NDM2Njg4NjQxNDU1NTI3NzU2NDQ1NjU0
-NDY1MzI1OTc1MzM1MzQ1NzM0NjMvMTExMzY3NTQ2NDU0NTc1NTU2Njc4NDc1NDc0
-Njk1NTQ1MjM1My8yMjI0NDQ0MjM1Njc7ODU3NTQ1MzY3OjU3Njc5NTIzNTQ1NjU2
-NjU0MjE1NjY3NTg3NTUzNDU0MzU1NjU0NjU3NDQ1MzU1MzY1NDQ0NTQ0NzQ0NTQx
-NDEyNDQ1NDU3NjQ3NTQ2NDU3ODc4NzQ7ODU2NzU2NzY3ODc3NDY2NTY0NjY0Njg5
-NjY1NTY3NzY1OTc5NjU0Nzc4OTk3Njg7NTQ1NTU3NzU2NjY2Ojs9Nzk4Nzg7OjQ4
-Ojk4Oz08Ojo4RDo7ODk5OTk3Oj07ODo5Njk5Ojk4OTs3Nz07ODg5Nzc3Ojg2OTk4
-Njg3OTg2NTM2NTk4Ojo5Ojk2Nzc2ODY8OTg2NzczNTU0MzM1OjY3ODs3NTY4Ozo6
-ODc6Ozw5ODk4ODs5PDw6Ojo5NzU2OTg7ODk3MzM1Njg5ODg6Ozg3ODc5Ozs/Pj5A
-Oj09QT4+QEVBQ0RDQEBBQT05OTs6PD88QkFBQEQ/Q0NDQUBBPDtBQz5AP0JCPzo9
-Qz0+O0FBQT9KbqrAy9PZ3+Hk5ufp6kJIREM/Q0ZLQD5BOz47Ojw+Oj4+RD47OTtA
-OjxBPzs5Ojc4NzYzOj47OTs+Ozo4OTk4Njo6PTcyNzI3Ojs3ODQ3NTY3NzY3Nzk1
-NDUyNTc3NDEzMzU0NzU3OTkzMzY0NzUzOzU0NjQ1NjI2OTg4MzQ3NDI4MzU0MjI0
-NjY0Mjc4MjMzMzMzNTM2NDY1NDg4NzYyNzMyMjMyLzM2NDU3NTMyNzIzNDI2NTQ4
-MTU0NTYzNDU1NTY1NTU2MzQxNDU1MzMwNDU2MzM1NDU0NzU0NTQ5NDQ0MzM1NTU0
-NjU1NDU3NjQ2NDMxMzQyNTUyNjUzMzY2NTY3MzEyNTU0NDI1NTMzNDUzNTY0NzY2
-Njg2NTM3NDU1Njg3NTU3NDU3ODM0MjIzMzM0MzQzMjIzNDQ2MjQ3NTg0NDExMjU1
-NTQ0Njg3NTMyMzY1ODY5MjI0NTQxMTExMzU1MTU5NzQyNDUyNzY0OTM0NDQ2NDIy
-NTU1Njw2NDQ1MzU6NjU2NjUzNTg2NTU1NDY4MzIwNDYyNTQ2MzU0MzIyMTIyNDM4
-NDU4NzQ2NDQ2MzY1MzM2MzQ0NDM3MzI0MzM2MjI2ODg3MzEzODUyNDU1NDU1MzM1
-NDQ1NTY0NDM0NjM1NDY1MzAyNDM1MzQ1NTo1NzY3ODU1NjY1NjU0MDM2NjU1NjY2
-NTU3ODg2NTM1MjYzNTYzNTYzMzMxMjQyNTI3ODY0MzM5OTs1MjU1NTUyMDI0NjU3
-NDY3NzQ2NDUxMDI0MjQxMzU1NTc4NzUzNDQzMzM2NTQzMzIyNDM3NDUzMzEyMTE0
-MjM0MzIzMzIzNDMyNTU4OjczMjMzNTU3NjU1NTY4NDI1NzY0MzQ1Ojk1NDc4NDQ2
-Njg4NjM0NTY0NzQ1NTc2NzM0MzU2NDIyODYzNzY2NDo4NjYyNDUzNDU1Njc1NTU2
-NTEyNTU2MjEzNTU4Njg2NTY0NDU0MjEzMjg1MjEyNzY4Njg2NzgyMzQzMzQ3ODY3
-NDI0NTU1NTc1NTQ2NzQ4NzU0NTg2NDY2Njg3NjIvNDMyNTU3NDQ0NDMyMTQ0Nzg4
-NTc3NjE0MjM2NjU2NDY2OjU3NzUzNzU1MzQ3NDY1NDQwMTY0MTI0Nzk3NDg3NjU2
-NDIwMTQ2NjE2Njg0MjI0NDU0NDc3NzM0MzE1MjM3NjQ0Nzk1Nzg4NjYzMzMzNzU7
-NzU1ODQ2NjY0NTo2NjU1ODQ2OTg2ODk2Nzc9Njg0NDg8Njk4NzU2Njg1Nzc0NzU5
-OTk3NDo6Ozc5ODQ1NTk4ODg0NTk4Njg7Pzw7PD07Ojs8OTw6Ojo5Ozo4ODY4OTg7
-PTs+Ozg3Nzg3OTc4NTU4OTo6Ozk2Nzg5ODk6OTc1NzI0Nzs4OD03ODg3Nzk6ODs5
-Nzs4NzU2NzU4ODU3Nzg5Ojk4ODY4Oj05ODg4Ojk5ODo5Ojg2Ojs8PTo4Nzc5Ozo3
-NTMyNTY0OTs5Ojk4NzY2OTk+Pz8/QDw7QD8+Oz9AQj4+QEI6PT08PD07Ojs9OzxE
-RD07PDs+PTo9Pj08Ojo+PUJEPj1BP0BEQ0RCPz8+PEJuqsHJ1Nnd4ePn5+nqSUk+
-QDs9Pj1IRkVFQz9APTxDQUJBPkA+Pj49PTs9PDk3OTY4Ozw9Ozk4Njg2ODQzNDc2
-OTs3NjQ4NTg4MzQ5PTc1Njo2NTQ1Njg1NTY1NzY0NDAzNDU3OTw3Njg5NDIyNDI0
-OTo2NTI4NTQ2MzM6OTY4NzYyMjU3MjQ0NDYzMDI1NjU2NTUzMjQ1ODQ1NTY5NTU1
-NDQzMzIzMDI1MzM2ODYyNzYyMjQzNDYxMzQzNTY0NDY2NTUyMjEzNDMzNTQ2PDQ0
-NTc3OzU2NDQ0NDQ1Nzc2MjU0NTMyMzQ0NTU1MzMyNjUzMTU0NjU2Njc0NDc0MzM1
-NDQ1NDI1MDAuMDE2MjQyMjU1NDc3OTk2MzU0MDQ0MTM0NTY1NDYzNjU0NTU0MzQ3
-OTU0MDY3NDQyMjk0MzUzMzI2NTc1MzMzNzY0NTIzNDQ0NDY0NDI0NDIyMTUzMzUw
-MTM0NDc1MjgzMzMwMTI1NDM1NjM2ODU1NDExMzUyNDozNDYzMTYzMTU1NjQzNDM0
-MzEwNDM0NDY4Njc3NDQxNDAyNTMyNjc0NDM0NDg0NTM1NDMzNjc5OTQ1NDY3MzAu
-MS8zMzU1NTQ0NjU2NjY2NDQ1NjI2MzAzMzM0MTI2NDE0MzY0ODg0LjEyNDEzMzM1
-NDQ0NTc5NDQ0NjQ0MzQ3NTU2NDU1NzU0NTMyNjY4NDI0NDM1ODk2NTU1NDUyMzQy
-Nzc4NjU3NDU3NzQ2MzQ0NTQ0NDYxMzM1NTk1MzUxNTM0MzUyNDY1MzU1NTU2NzY0
-Nzg0NTI0NzM3MzUzNzY2NTMzNDEzNDI1NjU2NTY3NTc1NjY2NzM1NzU0MzMyNDY2
-Nzk3NTM2NzQyNDQ1NDM1Nzg7NzY3MzA0NTUzNDY2NzU1MzQzMzU1MzIyMzQ2NjU1
-NjU2MzozMjM0OjMzNDIzNDI1NjM1MzU1MzYzMzM3Pjk4NjMyMzEyMzQ2OTU0MzUz
-NTQ1NDQ0NDM4NTY2NTQzNDY1ODgzNDQzNjU1OTk3NjMzNDEyNTU2NDIxMjQ2NDU1
-NjMwMTQ1NTQ1PzU0NDQ1NTIzNTI0NTQ0NTU1NDQ0NjY0NDQ2NzUzNDI0MjM1NDUx
-MjEzNTc4MzQxMjMyNDY1NzY2NzUzNTY1NDc5NTIzNDlCOjc2MjU1NDY0NDc1Nzsz
-Mzg8NTU2NTU3Nzs8NTU1NTQ3NTQ3Njg0NDY4Njc5PDc0NTc3NzQwLzM1NjU1ODQx
-Nj02NDIzNTY0NjU5NTQzODc5OTg4Njc4ODU3Nzo6ODw5ODY2ODc4Nzk6NTg6OD09
-PDo8PTs4OTo7Oz09Ojk3OTk3OTg3Ozg7OTk2ODY2ODc1ODc4ODk8OTk3Ojc2NTY3
-NzYzNDk5ODY5PDs5Ozk7NzY2Ozk3Nzc1ODc5OTg3ODc3ODM7Ozo7PDg6Nzc1Nzg4
-NzM2Ojc7Ojo5NDM6Pjw7PTw4ODk2Njc6ODo6O0I9OTs8OTg3Ozw8QD07PTw9PTs+
-PDw8Pj5BPz49Oz89Pzs8OkA9O0FCQTw9PDo6Nz48Ojs7Pzw9Oz07PkBFRkJBOzg7
-QEVBQD5ARImswcrT2t7g5Obn6OlHREFDPT4+QT5CRUREQUE/QD5DQTo+PDs5Pz86
-Ozo6OTY4OTc5Pjw6OTg2Njg2ODg0NTU1PTk4NzQ3NDc2NDc4NjIyNjc4Ojc3MzIz
-NTMxNjY1NDQ2NzQ1NjU1NTg4NTQ1OTg0NTU5OTc0NTQ1MTEyNzc4NjY2MzQ0Mjc3
-NDQ2NjMzNTY4NDIxMTEzNDc2NDY0Nzc3NTM3NjQyMTAxNTQzNjc2NzY8MzY3ODY0
-NDQzMzI0MjQ0OzY0NTUzMTIzNTg4NTY1MzQ1NjIyMzQ1NTU2MzU0MzMzNjU2MjM0
-MjUzNjY2NDUzNjc2NDY0MzQ0NTU1NTQ1NzY0NDQwOzUyNDU0MDE2NDQ3NTQ2MzI1
-NDU1MzI2Nzs5NjU4NzUyMjI1NDQzMTExNDQzNTQ2NTc1NTY0MzMzMjM1MzQ1NjQz
-NDIwMzQ0NDM0MjQzMzM2MzIyLzMyMjEzNjYyNDQ3NDMwMDIyOEA2PDgzNTY0MzIy
-MzUyMzMxNTU0ODYyMDEyMzg3OTg1NTY1MzEzMzQ1Ojg5NzM0NTIyMDI0MzM2OzEx
-MzM0NTQ0NjQ0MzU1NTY2NzQ1NjUzNTg0NDU1NDU1MjMzMjU3NTQ3NTU0MzE0OTI0
-NjQ1NTQ0MzQ2NjE3NDI0MTIyNTM4NTAzNTY2MjQ1NjQzMzIzNjUzMzI0NzM0NDQ2
-NTQzNTU0MzU0MTM2Nzk0NDU0Mzg2NjY0NDM0NTU1NDQ1Njg4NTEzNzU2ODg1NTI0
-MzQwNDYzMzU0NTk0Mzg1MTI3Ozc2NTY2NjQ2NTQ6NzY4NTM0NDQ0NDQyNDI0NDQ2
-Mzc2NTUzNDM0NjQ2NzQyNDU0NTU1NjUzODU0MzY0MDMyMzEzNDIzNDY3NjY4OjQ2
-NzM0NTU0ODU2NzYzOzc2NTU0MTMzNDQ0NDI2NTc0NTQ0MzE0NjM2NDU1NTU0MzM1
-NTU0MjM3ODYzNTM1MzU1NzM1ODgxNTc1NDY1MzQ1NDIxMzc1NDo2MzM3NTQzNDc4
-NDE0NjczNTQ0MzEzNTY2NzU0MjYyNDk0MzUzNjUzNzQyMTg3MjY0NTU0Nzc2NTQz
-MTQyMzQ2OTU5NTIzNTQzNDMzNjU2NDU3NzY0NDY1ODgyMzU1MzU2NTM0MzE1ODU2
-NTQyMzI2NDczNTU1NTUxMzg2NDM0NzY4Ozc1MzQ2Nzc3PTc0NjQ1OTk3NjU4NzU2
-ODc4NjQzNjI2NTQ0NDQ0NjQ2OzY2Ojc0MTU3ODc5NTQzNDU2Njc5Ozk6PDY1ODk2
-MDIzODo5OTY2NjQ5Ojk3Pzk5OTk5OTs9OjY5PDo5Ozs7ODk8PDk4Ojs6NTk6Ojo3
-NjU3OTw4Ojk4NTU3OjY3OzxAOTk0NDg2Njk4ODw5ODg4OTc4Nzc1Njo4PDs6OT05
-Njg4Ozo4ODk3NjY1Nzc4Ojo4ODg1NTc5Nzc3Ojc2Ojo3Nzg7ODo5Ojk6PDk7Ojk5
-PTs/PTo4OTs6OTk5PD89Pjw6PD5BPzw5PDs7PT49QEA5PDo8PjpAQD4+PTs8Ozw/
-Oj0+Pjw8PDw9PT46PkA+QEJCQDo6Pj8/RUU/QkJGj6LBzNPa3uHk5ufp6klMQ0FD
-QEI/PkJIQUJBQkJBQjw8Ozo6Oz06Oj87Ozk7OTc3NDY5Nzc4OTo4OTw9Nzo5Nzo8
-Ozw5OzI2OTo4NTc4NTc1OTQzMzM2Njc2NjY0NTQ3Ojg0NjQ1NTc6OTc4MjM3ODM1
-OTQ3Ozg3Nzk4NDQ0NTo3ODc3NTc2MzE1MzU1NTg3ODIzNTQzMzY3MzM1NDc4NjY0
-MzYzMjc0MzM3OTw2NjQ0Nj44NjY5NjU1NDQzMjMzMzQ3NDU2NDQ4NjQyMTc2MjQz
-Njc0NTc2NDQ1MzIzNTQyMDIyNzM1NTYyNzw3Nzg1ODg2Njg1MzIzMzM2NzYzNTQy
-MzY0MzM0NjQ2NjYzMjQ3NjYyNDQ2NDc1Njc1NTUyOzc4Nzg4NTM1ODQyNDQzNjUz
-MzI0NDU1MjQ0MjE2NDI1NDMyMzU2NDIyNDUzNDU0NjU3NDEzNDMyMTMzMTM0MjAy
-Njc0NTUyMzQzNDM1ODM2NjU3NjU1MzQ0OTQ0NDg8OTQ1NDIyMzs1NDQ1QDYzNDQ4
-NTUzMjU2NTM0ODU0NDQ2MzU0Nzk5MjM0MjM0MzQ2NDY3NTUzNDU4ODU1MzI2ODQw
-Mzc1MzQ1NDQ1NzU1NTY0Nzc2NjY2NDQ1NTQxLzI3NDE0ODg0NjQzMzQxNDQ0NTQ3
-NjY1NDU4ODMvMjMyMjQyMjIzNTMzNTU1ODQ2ODY2NDQ1MjI1MzQ0Njc4Nzc1MjU0
-NDMwNDQ1NTU3NDM0MTAzNDQzNjQxMjU1NzY0NTQ4NzY0NDQzNTYzMzY1NTY0MjI0
-NTk3NjQ2NDY1NTEyMjMzMjU3NDMzMzc2MzQ1NDQ1NTY1MzQzNDIwMTU1NDU0NDM1
-NTU4NTQ3Njg1NTM1NjQyMjM2Nzc3NzU3ODkzNTQ1NjY1NTY2NDM1NTYzNDM4OTQ0
-NTY0NDIyNTU3NTU1MjMyMzIyMjQ2NDUzNjk2NzY0MjU2NTEzNTQyMjM2NjQ0NTIz
-NjUzNTU0NDMyNTU0MzQ1NTQ3NDU2NDQzMTEzMjM0NjY2MzIzMjQ6MjU0NDU1OjY5
-NDc2NDU0NjUxMjE1NjU0NjU1NDQ0NDY1NDY1MzIzOjg0NDc0NjgyNTU1NDU0NjU0
-Nzk3NjY3NDU1ODU3NTM0NzUyMjc3Nzk2NDIyMTI1NDU2NDQyMzIyNTM0NDQ2NjU5
-ODQ0MzM2Nzg2Nzc4ODg5NzQzNTIyNDM3ODU0NTY6OTc0MjIyMzM2Nzc2NzQ2OTo2
-NjI3OTU1ODs4Nzo5ODc7ODc4PDo5Njg4Nzg3ODg8ODc3PDY5ODs8Oj05Ozk4OTg8
-Oj05ODk5PTs4Ojg5OTk5Ojw7Ozk1Nz04NjU0Nzc4NzY1Nzc0NDU6ODY1ODQ5OTY2
-Ojk4Nzc7Nzc1NTY3Njc1NzU4ODg6OTY2NDU4ODg5Njk3NjA1Nzk2PDs2Nzg5PDs4
-Nzk7Pjw4ODs5ODg3Njc4ODs5Ojg2Nzk5Ozc5PD0/Ozs7Ojg5Ojw5ODc5OTw9Pzk1
-NzlAQT9CRT09PTo4PDw8QDs9Ojo9QEFDRUE+PD08Oz5APz49PkRDQD8+PUBCQUNE
-RTpDRT5losDL1dnf4uTm5+jqRkZBPT0+Q0JGRkVGR0NAQkM/PDk7QENDPjw7Oj05
-Ojo9Ojs2Njk5NzU5Nzk9Pz87Oj08PT1AOTc2Mjc3ODUzMTU4Nzg4NDQ5NTM2NDI2
-NDg3NThBNTQzNzQ2Njc4MzQ2ODU4Nzo3Ojc2NzY4Nzo1MzU3NzU1MzUzMTUzNjY2
-NjI1OTM0NzYzNjk2NTQ1NTU1Njg3NTg2MjQzMzUzNTc2ODk6NTQ3NzY1NTc1NTQ2
-NTY2ODQxMjE1MzY4Mzg0MTEzMjQyLjQ1NDc0MzQzNDIyMzIzNjY0NTUzMzY0NDQz
-NDQ0NTg5OjU1NTMyNTc1NTIzNDc2NzY2NDY2NTU4ODc2MjEzMTQyNDY0MzQ0NTY1
-NDYzNjk0PDw1NjUzNTIzNDM0NDQ1NzQzNjUzNTU0NzE1NjY0NTo2MzUzMzE3NzUy
-LzIyMjEzMzEzNDMyMjEwMzMzNDQzMzIxMzU0NjQyNDU5NzY0MzI0NjY3NDQyMjUz
-NTEzNjc4Nzc4NzM1ODYzMzU2NDQ4Ojg2NDY0NDQ0NDY3NjcyMjY2NDU/NTY4NDMz
-NDU1NDU2NTQ3NDU2NTU4NzQzNDIzMzcyNTc0MzU3ODk1NTQzMzQ0NTU1NTM3NzU0
-MTQ1NjU1NzU3OzM1MjMwNDU2MzQ1NzY1NDQyNDY5MzMyNDM0NDQxLzIzNjQ4NjY4
-Nzg3NDM3NjYzMjQ0NTU1NTQ0NTM6NjQ2NDIzNDM1MTA1MzQ0NjU0Mzc1ODQ1MDIz
-MzU1NzY0NDM0NDc1NDQzNTw2ODM0MjU3NjQ1NzU2MzY5OTE0MTI1NDQ3MzM0NDQ3
-OTg2NjM1MjI1NDQzNjQyMTc1NDMzNDQ0NDU1NTQyMjI2NDQ0NTg0NTQ1Njc4NzU0
-ODY2Mzc1MzU3Nzc1NDI4Njc5NzM0NDEyNDQ2NzQ0NTc1NjU3ODo0NTU0Njk3MzU0
-NDU1NjIzNjY1NDIxMjIxMjE1NTU2Ojc0NTU0NDU0NTY3Njk5Nzg3NzI1MjQzNTc0
-NTUzNTM1NDM3NzQ4NTY1NTU0NTU3NDQ1NDU0NDY2NDU2NjY1OTc4NDQ0MTE0NTU3
-NTU4OjY2NjY2NDYyNjQ1NjQzNDI0MzQxMzEzMDIyMzU1NTMxMjMyMjM0Nzc4NjQx
-NDQ0NTQ0NDY2NTQyNTMvMjM0MjU1Nzc2ODc1NzM1NjY2NDU2NDU1Nzg4NzY4Ojc2
-NjY1NTY6PDc2NjY2NzY0NTY3NzY7Ozc0NDg0MzU0OT06NDQ0NDQ3ODk4Ojk6OT0+
-Ozc2Nzs2OTk1NTY3ODk6Ozo6Ojo4ODs5ODg7Pj07PD47PDw+Pjk6Ozk2NTY5OT0+
-PTs5NTY3Njc2NTk4Mzc4Ozg/PTg3Nzc2NTc5OTg3NTc1Njg5NjU3OzU6Ozk3NTUz
-MzIzOTk6ODs8ODY2Nzk7Ojk5OTg2Ojk4Ojk4Ojk1NzY4Ozk4ODo4NzY1Nzo6ODc6
-ODs8QT88Ojo6PTg2ODk3OTs7Pz87Ojk5Oj1APz0/OjhCQz4+PTw7Ozk8Ojw/PkBD
-Pz4+Q0E5OT1CQj88Qj0+PT9BQT5CRUREQT8+QVqiws7W29/i5efn6upFPzs+Pj89
-RUVHR0VDPz4+QEZBQz5DPzw4PD8+Ozs6Ojs5PDs9PDk7QT0+Ojk9ODY3ODY2OzU2
-MzQ3Nzc2NTc3Njk5ODc2NzQ3NDEzMzc3NDc5OTg5ODk4Njc4NDIzNjc1Njc9OTg3
-NjY4NDYzNzcyMTE1NDc3NTQ0NzU2NDU0NTg2NzY0NDY5Njg8NzYzNzY1Nzg4NzU1
-NTQ0Nzc2Njk6OTg1NTY3NTIzMi8yNDczNjU1NDY2QDg2MDEyMTAwNjM2NDY5NDIy
-Nzc1NTQ0MzI3NDMxMzQ1MjI1MjM0NjMzNTMzNTU0MzQ1NDQ2NTYyNTI1NDQ3NzIz
-Nzc5ODk4NzY2NjY2MjM0Njc2Njc3NjY0NjUzNjQ3Njo3NjU0NDQyMzY0MTM0Nzg4
-NTQ0NDY2MjAyMjIyMzgxMjg5NTU7NTQyMzM1NjU0NDMxMDE1NTY1MzU2NDQyMjIz
-MzQzMjU0MzQ6NzY0OTQzMzU0NDQ2NzQzMzQ0ODM0NDc2NTQzNDU1NTQ0MTY1NDg2
-NTQzMzEyNDU2NDc6NzU3NzQ1Njc4MzM0NDUxMzM1MjE1NTc2NDU0NTY1NjMzNjY0
-NzY4Njg3NTE2MjU2MzI0Mjg2OjY1NDo5ODc4ODg1NjQ1NjQ3NTQyMzM0MTU0NDc1
-NDMxMzMyMjAxNDc1NTc3NTc1NjQyMzc1NDc2MzUyNDY0NDYyMzEyMTQ3NTo1NjU1
-NTUyMjQyMDAwMzUzNTU0NDI1NzY1MTM1NDU0MTIzMzMyMTQzNTM3ODg4NjI0NTg3
-ODY3MzY0NTY5NzMvMTExMTM0NTIyNTY2NjUyMjAzMjM0NTU0NjQzNDMzNTQzNTU1
-ODo3NDIyMzM1MjI1MjEzMzQ3NDQ1NTUyMjAwMDE2NDQzMjMzMzUyNTg0NTM0NTM1
-MDQ2ODo4NzY5NTM0NDUzMzY6NjQ3Njc2MzM1MzY1MzU0MzMzMjMyMzU2Nzc4NTQ1
-NTI0NjQ2NDI0NTg4NTQyNTM2Njc4MTMzNTs1NDM1NTMzMzEzMzM2NzY0MzMyMzg5
-NDU1NTY2NDM4NDI0NDMzMzQyMzU0MzU1NzU3ODQzMjU1NTQ1NDQ0NTU1NDIzNjUy
-MzQ0NDw3NDYxMDM0MzQ2MzU2NDM1NDIzNDg3ODcxMzY3NTQzMDIzMjQ4NTU1NTU3
-NDU2NDQ0NDY3NjY4NjEzNzc3Ojk4ODY6Pj05NjY2MzU3MzU1NTU1NTY3OjY7ODU3
-Njc2NTM2NjY0Njs5NzY5ODc3Nzg3OUA8ODc1OTg5Nzs6ODg4Ojc2Nzo6OTg3OTw6
-OT07Ozw5Ojk5Ojo6Ozg2NjU4OTk3OTpAPTs2Nzg4NTQ2ODk7ODs6Ozs5NzE4NTY1
-NjY6ODw8Ozo7OjU2NDU3NTMzNDs8OzY1MjQ2Nzk3ODg8Nzg5ODY2Njk4ODc3Nzw7
-Njs7NTg3OTY6PD06Ozo4NjY4Ozo8Ozs6PTs7Njc5OTw9PTs4Ozk7PTw7OTw5QTs6
-PD0+PD07PT89PT08QDtAQkBEQz09PUBBPj8+QD49PT4+PUA8PD04PUJAP05DPkBI
-Sj88Tp/Dztbb4OPl5ujp6kQ+PEFAQEZCRUpDRUdCPz1APkJAPDo8Oz09PDs+PDg9
-OTs6PT46Oz09Ozs6ODY0OTg3NTg5OTo5OTw5Nzk7PDs5ODY3NjM4Njc4NzQ0NjY0
-Nzg3NTs8Njk1Mzk2MzA2NjU5OTY0NzU1NzU0NDc0NzQyMDg4Nzk3ODY3Nzg1NDg+
-ODY4NTY2NjExNDU0MjY1MjM1NjU4NTU4Nzg3Nzc2OjQ1Njc1NDU1NTY1ODY4NTQ2
-MjQ3NTIxNDYxMjIxLzMzMDMzMzUyNTMyMzU2NTI0NTQ5MjMzMjI2NTY1NDU0NTQ0
-MzQzMzM2MDMyNzY0MjQ3NjU1NDIxNTU1Njg2NTc5NzQyMjMzMzM1Njg4OTU2NzQy
-NDQ1NDI1NjU1NDY0Njc1NDQ4Nzk3NjUzNTQ2NDg0NDQ0NTIxMTMzNDY0MzMzNDU1
-MzMzMzQyMzUzNzAxNzU1NTQ0MzUyMzQyNTA3NTU0MzM1PDw3NzI0NTUzMzU1Njc1
-MzMzMTQ4MzU3NDIzLzAxMjI0MzAxMjQxMC80NTU1NjQ0NDI0NTU8NjQ1NjY1NjUz
-MzQ0NTM1NDE0NzU2NTM2Njk0NDY0NDU2NTU2OTY6NzI1MjExNDUzNjczMzI0Njo4
-ODY2NjU3ODc0OTs1NDI0NDI0MzMzNDIyMzYzNDQ2NjQzNTU1ODk3NDQxMDIyMTQz
-Mzc0MzU1NDc2NDU0MDI0NDQ3MzUzMzI1ODY0NDQ1NDI0NjU1MzMzNDQyNDM3NDg1
-NjYzMjMyMjIwMDIzNzk1ODc4ODY3NTc0NDM2NzU3NTY2MTIwMjM1MjQ1NjMxNDc2
-NTMwMTM0MzEyNzc7OTc2NzQ1MzY1NTU2NDUyNTY2OTU3MzMyNDUyMDIyNDQ2NDQ0
-MzYzMjE1NDY4NDM1NTY0NDU2NTI2NTI3PzU2NDY0MzEzNDUxNDc2NDQ3Nzc0MjIx
-MzQyMzUzMzMyNTIwMTU1NTYyMjI0NTc1NTQ1NDQ4NjQ3Nzc1MjU0NDIzMzI0NzY3
-NzU0NjYzMjQ0MjI0NTY2MzI0MTI1NzQ1MjIyNTU4NzIwNzc3NDI0NTQzMjU3NzY3
-NzY1Mzw4NTY5NzY0NDU3ODU3MzU1Nzk4ODg3ODQ4NjQyNTY4ODs3NTY0NzQ0NzM0
-MjMzMTQ0NjU1MTU1MzM1NDY1Nzc1NDMzMzM3NTY2NTY1Njc3NTk4NzU3Pzk0NTcz
-Nzc4NTc3OTY1NTM2NjY4NDU0NTg1Nzc2ODk3ODc0Njg8OTc2MzM2NTk3OTk5Ojk2
-NjU2NzM0Njc4ODs3Nzc3PDc4ODs8OTs6Ojs9ODk5Ozw5QDo8ODc3OEA8ODc3ODo3
-Njs6OTg8PTo5OTo5NjM3PD08PTg5NTg6ODU2Ozo7Ojk2Ozo5NTI3NzUyNTc5Ojg5
-Ozk4Njk6OTs8OTk9PTk5MzU2Nzc5ODc1ODo6Ozo7PT08Ozo6ODk3Ozg3Ojg6Ojo7
-PTs5ODo4OTo4Ojg7Pjk3OT5FQD5BQTs7ODo7PDo9Ojs6Ozw7PkFEQD49Pz09QkNB
-REE+QkI/P0A+QkE7NzpBP0A9QD1BQT8/RURRnsLO193h4+Xn6OnpQkU9QEBDTEdB
-Qz9FRD5ARDw5PkFBQD5CPzs6Pzs+QDs7Pj07NjU5OTk5Ozg2NTg2NzY4OTw6QDg1
-NTk6Nzc5NjQ4Nzc3NTc2Mjc4PDg2NjU1Mzc7PDo5ODg5NTQ1NTI1OTk3NzM2ODY0
-MzU2NjMzODQ1MjU1Nzc3Nzo2NTY4OTs4NzQ1Njc2NjM1MzE3MzcyMzQ0MjI0NTs7
-OTUzNTM1MzU1NTY1MDM0NTc0NDMzNjQ3NTc4NjM1NDYzMzQ4NTMyNDQxMzQ0MzU1
-NTY4NjU1MzQ0NDYzMjM1NzMzMzQ2MzU2NTQ2NzU1Mjc2Mzg1MzQzMjMyLzM1NDQz
-NjU2NTU2NTgzNDU1NDY0MzM1NzU1NTU2MzMzNTQ0NzQ0MzQzOTk6OT8zODc2Njc2
-NzMyNDM0NjQ4ODE2MjI0MzExNDc2NjY3NDI0NC8yNjUzNDUwNzMyMDAxMTU0MzQz
-ODMxMjI1NDc6Njk2NTM1NDIyMzc6NjIzMzIzNTY1MzMzMTEzMTAyNTIwMTMzNTMz
-MzgzNDc6NTMyNDM1OTc2NTc1Mzg3NjQ2Njc6MzEyNTQyMjE0NDM0Njg2NTg3NjI0
-OTU0NTM1NTIyMjQyMDcyNDU3NTIzNDUzMzc0NTIzMTIzMjM2ODUzNDQ0NzIzNTU0
-NDY2NTU0NTQ1Nzc2ODczNDQxMjM0NDE1NDY0NjMyMjQyMjU0MzM2NjMzNDg2ODU2
-MjMyMjU3NjEuMjEyMjU4NTQ0NzU4NDUzNDQzMzMxMTEzMTMzMzM0NDU2ODc3NTM0
-MzU2MzI2Ojs3Nzk0MzU2MjIzMDAwMDQ0MzEwMTI0MzMzMjg0ODY1NTQ0MzU0Mz01
-MzU4OTg6NzY2MjI0MzIyMDI3NTY7NTQyMDIyNTY4ODg2NjU1NTY2MzY1NTQ6NzY3
-OjY4MzU2NjQxMzM0MjU0NzQ1NDY2Njg0MTU2NTUzNTY1NDQ1NDYzNDU0MTQ1NTYy
-MDIxNDQwMzQ1MzMzMjQzNDUwNDY1NzUzNDQ1NDQ3NzU1MjU0NDMxMzU0NjQzMzY2
-MzIyNTU0MTQ2NDkzMjQ2NjM0NDMzNjQ0NTUzNjUyMzQ1NTY3NjY4NjU2NDQ4NjY0
-ODc1ODY1NTQ2NDY2OTg6NDQzNTU1NTQzMjI1ODY0NDEzNDY4Nzc1ODs2MzQ1NTY3
-ODY1MjU2NDU1MjY4NTY3ODU4ODU2NDM2Njg4NTg3OTc4Ozc3NTMzNTU6ODQ6NzY1
-NzUzNjs1ODc2ODg2NDY0NDQ3OTpHSjk2NjI2NzU0NzU4ODY4Ojc3NTY6PT05Nzg9
-Ojk5Nzc2Ozk4Ojs5Nzg8PT43NTg4ODg6Ojg6Ojw7Ozs5Ojc2NjY4Ozc4NTU2ODY1
-NjU0Nzc5OTU4OTg3Nzo+PTgzNjM1ODg7PDg2Nzo3Ojo6OTo2ODg5ODc2Nzg5Njg2
-ODY8Ojk9Ojk3PDw6ODk7QUA8NjU5Ojk5Njc6PUJAPjs7ODk4PTxAQEM9PT06Pjo6
-PkA/PUA+Ojo8PD49PT8+PDw9Pzs9Pzs9OTs+QkE+PD4+PDw8Ozo7Oj9CPT1ESEJB
-QGypw87W3OHj5Ojo6upISkBDQURARERBPEBEQUVDQjs4QUFAPERBPD9BPD86Ozo9
-PDU6Ozg3NjY5ODQ1ODg2Nzg4OTw5ODg3NTc1OTY2MzY3NzQ2NjY4OTc3PTk3OTU1
-NDk3NjU4ND01Ojg4Njo6OjU0NjU4NTQ1NTY1NTY2NTU4NzQ0ODY3MzM0NTU0NTY1
-MjMyNTQ1MjM0MjI0MzQ1NDYzMzEyMjQyNTU1NjUyMjUzMzY4NTYzNTY0NTY2NDY6
-NjY4Nzc1MzE0MzQzNDY0NTYxMzQ1NDMxNTQ2Nzg0OTgyMDA1MzMzNDg0NTUzMzY4
-ODYzNTw3NTMyMzc3NzIyMzc5NTMzNDU1NTY2NzQzNDM0MzQ1NDM0NzM0NTg0NTIx
-MjIzMzM2NjY2MzQ2ODc5PjMyNDQ0NDY1NTY2MzQ2NTM0NDU1MjQyNDYxMy8wNDI2
-NjQ0NDU0ODc4NzQ0NTIyMTMyMTQ3ODEyODUyNDM4MzM1Nzc7NjY3NjM1NjY2ODU2
-NjMzMzQzNDU2NDMwMi8wMTA2NTM0NTQ1NzY1NDMxMzU2NzI1NzQ1NzU1NDE0NTQ2
-Njg1MzU2NzUzNDQzMzM1NTQ1Mzo2NDU1NTU0NDMzMzEzNDQ4NTIzNzY0LzI0MjMz
-Mzg3MzY1NjQ0MjU2ODg2NTMyNTQ0NDUzNTc2NTg1NTQyMjQyMzExMjU2OTU0ODg4
-NTY2NTU1MTQ0NDM2NzM2Njg1NTQ1MzQ2MjQzNDY0NjkyMjQ1OTE1NTQ1MjM2NTY0
-NDQyNTM1NDI2MDAyMjQ1NDY1MzI2NDY1NDQ0MzI1MjQ1MTAwMjM1NjQ5NTY6NjE0
-NTU1ODYzNDIyNDQyMzUzNDIyNTY1NTY3NDY0NjY0NDQ1NTUzNDI0NDE0Pzk2MzQ0
-NDY1NjY1NDU3NjQ1NDM0Njc0NTQ0Njc2NTU1NjU2NTM0NjUyNjYzMzI1NjU3NjU1
-NDUvMzg2OjY3ODc2NTk6MzIuMjU2MzY4NTUyNDUwMjMyMjg2NDQ0Mzg2NzMxNjQ0
-NTU0OTk1NDU2NTc5MjE0NTc3NjEyNTQ2MjQ3ODQ2Njc1NTUxMjU1NjMyMjQ4Nzc0
-NzgzOTg8NTM0NDU0NTQyMjU1MzUyMjg3NTY4NjU2NTY1NzE2NjY3NDMyNDMxNDMz
-ODU0NjQ1NDU0NTg6OTQ0Njg3NTY3NzY2NTQ2NDM2NjQ2Njc2NjczNTQ1NTU1NzM1
-NTQ1NzU2Njc3NjQ2ODQyNDQ0NzM1NDc4Nzg6NzY2NjQ2NTQyNjc2NDc1NlROOTs2
-NDQ4Nzo3ODc4Nzc3OTk5OTs5Njg3Ojk7PTo6Nzg3Ojo5OTY3Pj48NzY2Njk7Ojo5
-ODc7ODo5Ojs7Ozo6OT88ODg4ODg6NTY2NTU2Njo7OTk4ODs6Ojg6ODU5PTo5ODg3
-Njg6Ojs6PTs7Ozg1Njg6Ojo4ODc5OTo3Njc5PDk3OTg3OTg3Nzk9Pjw6OTk5NzU2
-PDo9PUA/Qzo7OD09Pzs6OjpFQ0BAPTo/Pjs/PT4/QD9BP0FAPTw+Pjw9Pz89Pz04
-ODs6PDo6PTxAQUFAPjo5QT9BPkBEQz1KgbTCzNbb4OHk5ujp6kdHRUdDPj4/REU8
-Q0VFSUQ+QEA6QT9AQz87PT48OTs4Nzg3NTo9PDU6ODo6Ojg5Pzs3NTY4Ozg2NDU1
-NDUzMzg3NjczNzg4Nz44OTo1NDIzNjk4ODo5OTY9Nzc8Ozg1NDo4NTc0MjQzMjQ3
-NzY3NTY2OTk8Ojc3ODc2MzEyNzo5NzQ0MzIxMzM2MjIzNjczNTQzNDU2NTQ0NTU0
-NDMxMjM1NTQ0MzU2OTQ2NDM0MzQ1NzMzNTw2NTQyMTAzNDUxNTQ1NTc2NTQ1NTMy
-MjQyNzY0NDEyNDAwMTIxNDQyMjY0NTU0NDY0MjQ1MzIxMTIzMDQ0NzIyNjY4NjQz
-MTM0NDQ4NTU0NjU1OTUzNDQyMzU1NzMxNTEwMzU3OTQzNTU0ODc4MzAxNTU3MTE0
-NTQ3NTQ0NDE0NDs2MzM2MjM2Nzk5NTc2NDI0MjEzNDc0NTIxMjE1MzEyMzQ0ODY3
-OTc4NjQzMzMzNTc2Njk3NDQ0NTU3NjQ0MTQ0NjU1NDAwMzQzMTQwMzU1NDc2NDQw
-MzMyNDI0MTQzNDQ3ODQ4ODU3NzQzMjIzNTU3NjUxMTExMjQzNTQ0NTIzNDQ1NDQ1
-NjQ0NTQ0NjAwNDQyNzY0MjM1NDUyNDg2NTUzNTk4NTg1NTU1NzQxMjM3NjQzMjM1
-MzU1MjQ2NTU0MzA0NjUzNDMyMjI1MjM1MzY2NjQ0Nzo0NTMzNDE1NDIzMDAwMjUz
-NTU4Nzg5MzI0NTg4ODQ0Nzc1NDQ0MTI3NjkzMzM1MzUxNDUzMzM0NTY3OTg2NTUz
-NTQzMTEyNDExMTAzNDY4OjY1NDQzNDc4NjU0NDM1NzUzNTQzMjM1NDUzNDU0NTU3
-OTQ0NDMyNTc1NDM2NjI3NzY0NDQ2ODg3ODM0MzUyMjI3NDMxMTQzNDMxMTU1NDQz
-MzU0NzY1OzY3ODMzNjQzMzI2NTU1NTYyMTI2ODY0NDUzNTIyMzE0Nzc0Mzc2NTQ2
-MjEzNDU2NTY1NjcxMjM0NjU2NzUzMzMzMjM3NDUzMzI0NzU1NTgzNTg1NzQzNDU0
-MTMzNDMyNjcyMjc3NjMzNTU1OTU3NzU2Njg1NTg5ODk5NzU1MjEwNDY4OTc1NDU0
-MzMzMzIzNDMyMzAzMjEzNTU2NTQyMTQ2NDI0MjU0NDY4NDM2NjM0Nzc1MTQ0MzMx
-NDQ0MzQ1Nzc3Nzg1NDc0MzQ0Njw9OTQ1NTM0NjQ0Nzg3NzY2NDU2Nzk3NzQ0NTY5
-NjM0NjQ0OTY2ODY0NDUzOjc3NTc3NzgzNDg3Nzk2ODg4ODg3NTc6Ozc6OTo7Oj06
-OT05ODw7OTs1ODg9PTw6Ozs5OTg2OTg2ODc5ODo7OTc4NzY3Nzc6OzY0NzY6Nzk5
-NjUzMzc5NjUyNjg7PTs6PDs5Nzo6Nzk6OTg3Nzg7PD06PT06PTw4Njo5Ojk5QDk6
-ODo8Ojs9ODU4Ozg6PTw7OTo7OTs4Nzc3Ojo+PD06Ozw9OD8/OjpAQUA9PTg4OTg5
-Ojw+Pz09PUA/QEFEPz5BQj89QD9AP0E8Pj0+OTs7Pj1APT9EQkE9O0NDQ0JFQ0Vo
-sMLM1Nvf4uXm5+npR0NJSENCRERBQkBCRkFBQEVDQz86ODk8Ozs+Ojs7OTo5NTg3
-OTk5OTg8ODg7PDo3ODc0NTk8Ozc6OTY2NDU2NjY3Nzs6ODk3OzY2Njg3Ozk1NjQ1
-NzY1Nzo6OTg4Ojg1NTc2ODQ1NjY3NDMzNTc2NDYzMzU9OTc4ODk2NjU4OTQ1MzEy
-MjM0NTI0NTw2NTg0MzEvMTMzNjQzMTIyNDY5NTQ0NDQ1MjE0NjQ1NzY3NDM0MzY1
-Ojk1NDU2MTE0NTY0NTY0MzIzMzMxMDMzMjQxMzI0NTIzMjEzMTE1NDQzMTE0NTM1
-NDMyMjEyMTEwNDQ2MjU1ODg0NDM1NC4xMzUyNDQ2NjQ0NTIyMDE0MzQ1NTM2MzUz
-NDMzMzU4OTc1NDM2NjU0MTMyMzIzMjUyNDE6MzI1NTYzMTI0MjAxMzExMzg0NDQ0
-MjEzMjM2NzE0MjM0NTQ0MzQ1MTM1ODQ2NTI0NDUzNzY0NTY1NDQ2NjY0NTI0NTQy
-NDMzNDYyMDE0MzU4NjY2NDI1Njk1MzUzNTQ0MjUzMjM0ODo2NzQ1NDQ6NDMzNDE1
-NDM1NzUzNDMyNDM2NTQ3NTgzMzMzNDU2NDQ1NDMzMjMzNzM1NDMyMzU1MkE9MTM3
-Njc0NDM0NzU0NDQ0NjQ2NTYyNDQzNTQyMzAxMTM0MjQ1NDM0OjYyMTEyNDU0MzM1
-NTc2MjQ0NjQ1NTQ4Ojc2NTIzMzM3NjQzNDQ0MzYzNDc0NDc5ODc0NDY0NDQ2NjQ2
-NjU3OTY0NDIyMTQ2MzIzNDQ2MzEyMzU1NDUzNjQxMjQyNDM1MzM1NzUzMjI0MzU2
-NTU3NDU0NDY0NTc1MzI1MzY5NzY3OTU4OTk0NTU5NTQzNDM4OjY3NTEzNTQ1NzUy
-NTQyMDEyMzU3NTMyMzM1NjIzMTQ1NzYyMTIzNTc1NjQ2NDM0NTU1NDQ4NDQ0MzQ4
-Nzg1MjI0Nzg2NjUyNDM4ODUzNTIzNTYzNDU2NDMzNTM0MzI2NTY0MTEzMjQzNDUz
-NDEzNDQzNDMzMTMzMzMzNDU2NTIzNTIzMzU1OTU1NTU4NDIxNTY1MjM0NjM0Nzg5
-NjI0NTQ1NzY0NTM1ODk4NjQ4NzMyMjMzNDQxMzEuMDQyNDYzMjIyNTk7ODU2NTQ1
-NzU0NDMzNTY2NDQ1NTg3MzQ1MzMzMjU1Nzc6NzQ1Njc1NDU4NjQ1NTM1Njg3ODc3
-OjY2ODo5Nzk3NjY4NTc4OTc2NzU4NjU3NjU3NDQ1Nzw5NzQ0Njc0NDQ1NjU1OTk4
-OD05PDw6ODc2ODc5OTw5NjY8ODk7Oz1AOTs5NjU1OTg7Ozo6Ojs7OTg5ODc2Nzc5
-Ojo5ODY2ODc2Njk3Nzs4NzczNDk6PTo4NzY1Nzs6NTQ2NzU1ODg7NzU4OTs5Nzs6
-ODo9Ojo5NDU7PDs8Ozs9Ozc4Nzc4OTo7Ojg5PT86Oz05ODk8OjlBPTs6Njo6PTo7
-Pjo5PDc5Oz48PDw/Oz0+Pj0+Pj8+QT4+PDk5Ozs9Pj9AP0A+Q0FBOzg5QUNEQ0RA
-PTw6PT9DRENBP0NFREFBQUNBQENESWGtwMvU2d7i5Obo6elGTkxGQUFHRD5EQjk7
-PkJDQjw9QTs6PDo9Qz44OTo6NTg6ODo4Ojk4PDs4Nzc4PEE9Ojk3Ojw6ODg3Nzg3
-Ojc1ODU1NTM0ODY0NjY3NDQ2Njo1ODY6ODg3Ozk4Nzg2NDY2PDY6ODQ0NTUzNDQ2
-MzQ1NDUzNDY2Nzs4NzY2NzMzMzU2MzY1NzQzNTU2NzQ1Nzc2NTYzNDExNTc0NDE1
-Njg7OTU2NTYzNTY3NDU0ODQ1NDY2NTM0NzUzNDU2MzUyNTIzMjQ0MjQyLzI0MTMx
-NDQyNDM0MzEyLzI0NTQzMjEzNjU2NTYzMzY2MzExLjM2NjQ1MjIzNDUxMjQyMTIz
-NDQyMjM0NDUyMy8yNzQ0ODY4NzQ0MTMyNDMyNTY2NjQ0OzYzMjU2NTQzMzEzNjQz
-NTMxMjI2NDA0NDU0NDYyNTY0Nzk2NjQ3MzM1MzM2NTMzNzgzNTM1NzQzMDAzNDQz
-NzU0NDQ1NjM1MzE1NjY1MzIzMjM2NjU1NjUyNjE3NjEyMzM1NjIxMDQ0NTU2NjUx
-MTQ4NDQ6NTY1NjY2NzQ0MzI2NTc1NDI0NjY0NjY2NTM0MzY2ODs3OTY3NTc3NTQz
-MzUzMjExMzI0NjQxMDAyMzI0Nzk3NDIzNDMzNjk3MzI0NDIxNDQ1NDQ0MjE2NDEy
-NDAzNDY1NTQ2NDY2NzY0NDQ0ODAzNTc2MjIzMzk1MTQ3Nzg2NDY1Nzg2NjU2Ojg3
-NzU2NjUzNTQxMjUzODU0MjQ0MTAyNDY3OTg3NjU1NTExNDU0NTQxNTUzNTI0NTU0
-NjQxMzc0NDEzNTIyNTM0NDM0NTY1MjQyMjM2NDI0NDU4NDUyMjEyNTAyNjQ1Ojg2
-NjM1NjU1MzQ0NDU1NTU0MjM2NTYyNTMzNjQ0Mzg3ODMxNDo2Njg2NDU2NjY2NjU2
-NTYzNDk2NjIzNDg2NTQ0Njc1NTg3NjQ2NDIzMzIzNDU3OTY0MzMxMzUzMzMxMjU0
-NDM1NjMyMzQyMDQ4NjQyMzEwNDU1MzQ2MzQ0NTQzMzMzNTk2NTY2NTU1MzU0Nzc2
-NjU1Nzc4NjQ0NDU2NzY2NDY1NjUwNDU4ODU0NTY3NTQxNDU2NTk0Nzc1NjQ0NDUz
-MzIxMjMzNDY3NDU1NTY1NDc2NTg3NTI0NDI1NDY4OTc4NzU1Njc3NDczNDQ1MjM1
-ODc2NDY3NTU2NDc4NjYzNDc6PDU3NTY3ODk4Njg1NzU2Nzg5Ojk2OTs4Nzg0NjY1
-NDQ4NTU0NTU1NTU2Njg1Nzg1Ojk+PTg3NDU1NjY0NTc2Nzc3Njc5ODk6ODk5Pzs5
-Ozo4PDw3Njc9Ojc4OTo5Ozo5Nzg3Njk7ODU2NTc3NzY4ODc5OTk4Njs8Nzk5OTg6
-PDk5NDg6OTY2OTo5Ojc6Pz07NTc3OTY2Ojk8OTk6Njo2Ojg5PDo6ODk5Ojo9OjY5
-Ozo+PDo7OTU4Ojg6Ojo6ODg5OTo5Ojo9Ozk4ODU5OTg9PTpCQEE9QD9BP0A+PT5B
-PDk6Oj48Pzw7PT5BREA+PD0+PUBCQDw6PD0+QUBBPj5EQkJGQEFAQD0+PkRFUqfA
-ytPY3eLl5unp6klFQUFBPUFDP0E/OTpCQkJAQkFFQD4+QTw9OTc2Oz89PDo3Ozw4
-Njg5OTU2NjY9PD0/PDg3Nzk6Ozk0Nzc2OzY1NjM5ODY2OTY6NTY3NzY3ODc2NTQ3
-MzQ1OTw6OjQ1Nzs1MjU4NDQyMzM1MzMzMzU0NDg4ODQ1Njc5ODY1NDE0MzU3NDc6
-NTQyMDM6ODQ1NjQ4ODczMjMyNTY0NDM2NjU2MjU0MzM2OjU4ODY4NDMzNjQyMTIy
-NDU0MjM0NjQ0NjQ5MjI2NTQyMjI0NTUzMjIzMjIzMzc0NTc1MzU1NjU0MzUzNjg5
-NzQyMzEyMzM1MjU0NDQ1NjY0NDU0MzMxOjY0MzM1NDU0NjQ2NDY2Njg2MzM3NDU0
-MjIzMTM3NzQ0NjQzNzg0NTQzNDM0NTIyMDMyNTQyMjQyMDIzNTMxNDUyNDQ0ODoz
-MjIzNDU0NDc2NDQ1MjY2NTE5MTMyMzU3MzM2NzM1NDY2NTI3MzM0NjU0NDQ1NDM2
-NjQ5NjY3MzMxMzQ3NjU2MjUzNTQ2NTM0MzMzNDU1MzIyNjY0MjExNTU2NjUzNDU0
-NzM0NDc4NjU2ODc2NTU3MDU2NTY1NTQzMzM1NDM2MjMzMzMyMTQwMTQ1NTo2NzQz
-NTc3MzMyNjQ0Njo1NDM1NTY1MzQ0MzQ0MjQ1NDc0MzU0NDYyMjQ1NDQyMjIwNjY1
-NTU0NDQ5NDQ0NDMxNDM0NTk5Nzc2Njk4NTQ3NDY0MjMxMzU0NjU0MzA0OTk3ODYz
-MjQ0Njg2MzY4NjY2Njg0MzQ2MzQ2NTc0NDUzMjwzNTQxMjM1MjIyMzIzMzI0NDEx
-Njg4NjQ2NDQzNDM0MzM2MzIzMzg2NTQ1NzQ2ODc2NTQ1NDM1NDczNTo0NTc1NjM0
-NDQ0NDc2NjgzNDU1OTY2Nzg3NzYyMzQ0MzM4NjMzMzU0NDUzNDU2MzY4NjU2NTMz
-NzMyNDgzMzMzMS8uMjM0NjM1OTk1NTg3NjMzMDIxMTY2NTU2NzkyMjEzMzQ1NTQz
-NTY0Njc3MzQ4NTU1Njg3NTg3NjY2NDUzMjEwMDA2NTk3NzU0NTMyNjU2NDU1Njg2
-Nzg2MzU2NjQ4OzY0NDY3NTIyNTg2NjIzMzI1NzQ1OTMzNDM1NDUzNDU2NzU3NTQz
-NTQ0NTUzNTM0NDY1NDYzNTQ1NTUzMjQ2Njg3Ozo3Njg5OTc3NDI1NDo6Nzc2ODc4
-Nzc1NTU2OTc5NjY4Njk4ODc3NzY3Nzw2Njg1MzU2NTg1Nzk4Nzk2NzU3NzY4ODk6
-NzYzNDQ0NTY5Njg5Nzk7Ojk5ODs5Nzo5PTk7OTk2Ozs5NjczNjo6ODc6Ojw5OTk4
-Nzc3Nzc4ODg4ODg5Ojk7Nzg6Nzc4NT0/Ozs1Njw+Ozo6ODY0NTM1Nzc3OTY3ODg2
-ODc6OTc7Rzc4OTo8OTs6OTk4OTc3ODU3ODc5Ojk5PDg4PDY5OTo4ODc4NzQ1Pjk5
-Ozs7PDw9PDo5PT09PEA8Oz89Pj88PD8/PDw9REA+P0JAPUBDRD88PkFCOz86PkI+
-QDo7QUI/PT9AQ0JDQEE9PTs7PT9LncHK1Njd4eTm6OnqSENARkZBQEA/Qj06PkFC
-PkI/PD07Pj87PD47Ojo5PUNAOTY2Ozo9Nzo7Ojo9Ozo7Pzs3OTg1Njk6OTg1ODg4
-Njg3Ojo7NjY4NTk3NTw3NzY3NzQ1NDU3NDQ1ODc3ODMzNDU1NTU3NzU1NjU2ODQ1
-NTk2NTY2NDY0ODY5NzU1MzY5NzQ2NzQyNDU1MzAzOTc3NzY3NTM2NDQ2NzY0NzY2
-NTU3NTU1NDU0NzUzMTM1NjExMjU0MzEwMTU1OTQ1NDIyNEY0MzU2NDQ0MzEyNDQ1
-NTQ1MzMzMzc1MzpCNzY1NDYxMDU2ODg1MzAwMzY3MjI0NTczNDM1NjUyNTk3NTQ0
-NTQzMjU1NTQ0NTU0MzM3NzY3NjY2Ojg1MjAwMzQ3NjQzNTg3NzU3NzU4NTU1NDQ1
-NTQ0MzEwMzQ0MTExMS8zMjEzMjMyMzY0MjM2NTM0MzY6MzE1ODEzMzUzMzQyNzY0
-MTEzNTQ1NjM0NTQ0MjY0NTI2NDQ3NDY4NTM2NzQ2NTQ0MjM2ODU1MTEzNTM0NTgz
-Mzc4OzY0MzI0NzU1NTc2NDg2NjY5NzU5ODU2NTc3NjQ1NjM1NTQ0NzY1NTQ2NzQ0
-NDM0NTExNDMyMjEyMTQ4NzY3Njc0MjE3OTg2ODY0Mzc1NTY1NjU0NTM2MzQ1NTY3
-NDUyNTU0NjUyMjAyMzg2ODI1MzQzNjY0NDUzMzQ0NjQzMjIzMzMwMTI2OjU0NTY4
-NzU1NjMxMjIxNDAxMzMwMjQ1ODQ1NDI0LzA0NDg2NDYzNj45OjY2NTM0NDQ1NTQy
-NTk7OjM0MzIxMTEzMjQzMjE1NzY0MTI0NTU1NTc4NDYzNDI2NjMzODc0NTY0MzM2
-NTU0NTQ1NDQ0NDY4Nzg2NTQ2NzQ1MzM0Ozg4Nzc3NDExMjU3NDU2NjY2NjQ1MzM4
-NzQzNDg1NTY2MzMzNzg1NDU3OTc0MzU2NzQ0MjIzMjMxNDQ1Njc2NDIzNTU4NjU1
-NzY1MzU0NDY3MzU1Njc3MjIyNTMxNTc2Njg1NjM0NDMyMjU3NzY5ODc0NTU2NjEy
-NjQzMzMzNTUzMzk0MjIxNTQzNTIyMzY3NTc4MzQ3OTY0NTU0MzI1NDU2NDQzNDAx
-MDE3NjI0MzMzNjU3NjU2Nzc3NTU2NDQxMTU0NDQ1NDUyNTMyMjQ0NzU2ODUyMzI1
-NTg5OTk2NTU4NjQ1NjE2Njg6NTY4NzQ0NTY1ODc2NTY3OTU0NTY1NjQ0Nzc3OTg4
-Nzc3NTc2NTY2NDc1NjQ1OTk6ODc5OTk5Nzc4NjUzNjc6OTc4ODs8Ozw7Njc8Ozk8
-Nzc4ODo4OTg3NDU4OTk5PD44Nzs8Ozg5ODg1MzU7ODc5ODg5PTk8OTc4ODg5Nzg4
-Ojo6ODw7Ojc0MzU2ODM4ODs5Njg2NjY1NzQ3Nzs9QD07Oj5AODg4OTg3NzY4NTQ2
-Nzo9OTU4PT05ODk2NTc0NDY5Ojs5PDs2ODk5Ojw3PD07O0A9PDo9PDo9PD48PD0/
-Pz45Oz0+Pj49PD1BQEBAPDs6Nzw9Oz1APjo8PDxAPjw/QUA+QD0+Pj0/QlChwcvU
-2d7i5Ofn6ulFQ0RFSUo9RDk9Pj5ARkdDQT09PD05Pj46Pj47OTk7Ozs6Ozk5OTs5
-Njg6OjU6OTo4Nzc5ODw3Njk5PTo4ODg1MzY3Ozs7ODg2NTs5Nzc3Nzc3MzU1NDY4
-Njc3PDo1NjQxNjc1NjY2NTIyNTc2MzgzODc1NzMxNjQ1ODY4NTc4NjU4NjUzMzI5
-MzQxNzQ2NjQ0ODU2MzU5NDY2Njc4NzYzNzg0NTc3NzY3NDYzNTc1NzY1ODQ0Nzgy
-MjU2MzQ0MzI0NDQ0NDQyNDQ1ODU2MzUyNTQ1MDMzNDY2N0I2NDUzOjUzNjQ2NjMx
-NDIwNzIxMzc0NzY4NDQ1MzM0Njg3NzY2Nzc2MjMyMjQ2OTc4NDQ0NTc3OTY5ODY1
-MzQxNTU0NDIyNDEyNDU0NjY3NDQ3NjM1MjQ9NzcxMDE0MjIyMjUzMy8wMzMyNTM2
-MzU3ODU0MjU1MzMyMDMwNDIzMjEyMzM1NDQ2MzQyMjI0NDM0MzIzNjQ0MzI2NDY2
-NzczNjYzNjY4Nzc1Njc1NTMyNTg1MzM0MjQzNTU1NDU3NTU1NDU2NjQ1OzQ0NDg4
-ODY2NDY2NDQ2OTUzNjc3NDU1NjI2NDM2MzU3NTQyMTMzMzEzNDM1NDU3NDQyNTQy
-MDMzNTIzNDg3NzUyMzQxNTc0NDUzNDUyNDUyNDU1MzEyNTIxMzI1NDM0MjE1NTYy
-MjQ3NDMzNzYzMzMyMjM0MzM2MzM0NDQ0NDUyNDMzNTAxMjI2NjY1NDkyNjU2ODE1
-NTMyMzY3NjMzNjM1NDQ0MjI0NTY0MzQ1NTc2Mzc4MDI2NTU0NDM1MzI0NDIzMTIz
-NDQzNjQyNDMzNTQ0MzM1NDY1NjMyMzQ3NDM1ODM0NDY2NjU5NjU1NTY2NTMyMTQ1
-ODQyMjU0NDM2OTQ0NTc2MzI1NTU1NDQ2NjY0NDM1NTIzNDc2NDY2NjQ2NzYyMzM3
-OTc0MTAyMTU2NzUzMzU3ODMyNTg2NTUzMjM3ODYzNTcyMzQ3NTY3NTMyMzc1NTU1
-NjUzNzQ0MzI1NDQ1OTo4NjY1NTUzMzM1Nzg1NTUzNjIvMzY3NjQzMjc1NTQ0NDY1
-NTQzMDIzNDMzNDM5NjQzNDg1NTM0NTIxNTUyNTg2NjU1NTEzMjI3NzY0NTU4NDQ0
-MTM2MjA0MzUzMTMzMTYzMTQ3MjU3MzQ1NTc2NzU2NTU1MzQ0NTQzNDU3ODY2Nzc4
-ODY5NjU2Ozc0NDU6NzUzNTM2OTg5ODY1Nzc1Njg4NTM0NTU3Nzo7ODY4Ozw4Nzk4
-PDk3NzY6ODc5ODc4ODk5Ojk5Ojo8OTo6ODg4Ojc1NTU2OTo7Oz05OTs7Nzc3ODk5
-ODs8Ojo6Nzk5OTo6ODY2Nzc2NjU6OTg3Ojg4Nzg9ODk3OTc3MzQ2ODo3Nzg5Ozc4
-Ojk6Ozw9Pjk4OTk4NjY4OzY6ODo9ODg4Pjo2ODo7Ojk7NzU2OTk5Njg2Nzo5ODc5
-OTg5PDo5ODs9Pjs6REFCQUFAOj49PT09PTw/QUA7Ojo/PTs9OkA/PDs4QT4/P0JA
-REBBO0FAQEI/PUA9Oz5BQEFBTaLCzNXa3uHl5ufp6khERUM9Pj4+P0BDQ0VBPjw8
-PDw8QDo+QEJBOz48Oj83ODo7PD48ODM8Ozo6PTk1Njg7OTg2OTo3Njg5Ojg3PDo4
-ODo5OEE9OTg3Nzc2NjU4ODc2NTQ2NTg1NzQ2Nzk8PDQ2Njk3NDM3NjQyMzU2PTc2
-Njg6Njc3ODk4Nzc3NzczNDg8OjYzNzU5NDU1NTc2NjQ0MjI0NjU4MjYzNzc3NDQz
-MzQ1Njg3NTY1NjY2MzMzMzQ2Nzg1Njc1MzQ3Mzc3NDUyMzMzMjc2NTY4ODk3NTQ0
-NDI0MjE1MTE0Qzw1NDY0MTI1NTQ1Ojc1NDIyNjI0NTU3NjQ1NzM1NzM0Nzc4NTU0
-Nzg2MzU1NTY2NTUzNjU3NTQ0NjUzNjgyMjkzMTMzNTUzMjQyNDIzNTU3NTU0MjQ0
-MjY2NDMyNDA0NjM2NjMzNDM2NDIxMzQ1MjQ8NzEyNTU0MTY1MTEyNTEyMzE1MzM1
-NDU2MjU2NzIyNTY1MjQzMzQzNDk1MzM1NzM0NTg0NzU4NDY1NTQ0NDY3NDMyNDY1
-MzQ1MzM3OTg7NjU1Njg1NTc1NzIyNzUzMzY3NjY6Nzs3NjY3NTY3NzUzODY1NTU2
-NjYxNzIzMjIzNTY0MzU2NzI3OTk1NDU1NTYzMjU2NTMyMzY0MzQ1MzY0NTQzNDU2
-NjUyNDI1NzYzNDYyNDMwMTAyMjEyMzc4NDU0NTY0NTU1NjU0Njc3ODMzMDIxMjQ0
-NTMzMjE1MzY1NDU2NTQzNDY2MzU0NjIyMTI2NDU6OjYzNjQ1NTQ0MzI1MzIzNTYv
-MTY2Nzc3NDM1NTM3NjQzMjIzNzMxMTMyNjQzMzU2NjQ0MjMzNzI2NTc1NzQyMjQz
-NTYwMTo5NDM0MzM5MTQ2NjU0MzQ0MzI0NjQ1MzU1NTUzNTQ0NDM1NjQyMjQ1Njc1
-Nzc0NTAxMjIxNDY3Njg0NTQ2NDI1NjQ2NDIzMjAwNDMyODUyMTI0MzI6Nzc1Nzcx
-NDMxNjc6NDc4NzU1NTM1NTY0MzM0MjQzNDI0NTU2NTQ2Nzc2NDU3NDU0NzY2MzU0
-MzU3NDI0NDUyMzU1NDY1NTY3NzY0NzU2NDU1NTU3ODc2NzczMTM0MzIyMzIyODc0
-MTAyNjY3NDM0NDEzNDU2MzM0NDc0MzMzMzM0MjQ4MzMzMzMzMzQ0NTc4OTY3ODc2
-NjI0MzQ0NjY2OTc4NTY3Nzc3MzU5ODY2NzUzNjo4OTY2Njk1NTU2ODQyNDY1Njk2
-Njk4Nzk2NTI0Nzc1OTg7PDk5Ojg4Nzg1NzY3Ojc2NTg3ODg6OT03ODg6Oz06Njc6
-Njc7OTY4OTk6PDs4ODg5OT49ODc4OTc3Nzg6Ozs4OTg6Nzk5ODU3NjQ2MzM1ODg3
-Ojg2NDY3Nzs5Ozk7Ozc5Nzg2NTc3NTk/Ojg4Pjs5Njg8OTk5NzY5Ozo6NjU3Nzc2
-OD08Nzc2NjQ3OTg3ODs7PDw6ODc0Njk9PD06PDw9PTw/PD1CPTw8QUA+PUNDPj0+
-Pj1CQD89Pj48Ojw7Ozw5OUBBRUZCPz06Pj1CQUI7Pz09Pjk9RURGQ0hRocPO1tvf
-4ubo6OrpR0xKR0I6PEJEQj4+QD08Pj9AOUNBPT9BPz1BQTw7ODY6PT4+PDs5NjY4
-OD05OTc2PTw2NjU2Njg4ODg5ODs8Pzw6Ozo5OT08OTU3Nzo4NDU0NTc1Njg1OTc1
-ODs5Nzk1ODY0NDQ2ODQzNTY2NjQ3NDM2Njc2NjQ2OjY2ODQ1NDU1NDU3Ojg0NDMz
-NzY0NDQ3NTMzNjY2Nzc3NTU0NzU0NDM1MzI0MjU0NTU2ODUyMjQ1NDU3NzY4NDU4
-Njc4NDY0NTM2NTQ3MzU0MjU0NTQzMzUyMjc1NDMyMjMzNjQ3MzY3NDIyMTQ1Mzc2
-ODczODY0MzQzNDMzMzQ0NTo2NTc3NzUzMzUzODY2Njc5ODc3Njg5OTY0MTU2NjUy
-NDQyNTIzNDQ4NjU1NTQ2NTg2MzMzMzU2MjI0MzYzMTM2NTUzNzU2MjQyMzI0NTU2
-Mjg1MzE0NDc4NTMyMTQ7NDExLzIzMTUzMTU0NzczMDIzNjQzMzIzMzU0MjIyMzQ1
-NTQ2NTQ1NDQ0NjU1Nzg3NTQ2NzQ0Njc0MjM1MDAyMzQ1NzQ1NjU2NDM2NTYzNjY2
-NTI3MzY2NjU3NzY1NTc4NTY1NTY2NDYzNjg1NjUwMTAwMTU1NjY0MjE2NDM0NDI0
-NDY3NjMxMjczMjQyMDQzNDIzNTMzOTk4NTU3NDM2NjYzMzIzNTExMzMzMTAxNDM4
-QzY1NDM0NzU0NjU3MzMyMzIzNTMzMzUzMTM2NDE0NzY2NTQ0NjQ2OTg4NTQ3NjIy
-MjE0NjU0NDIzNTc2MzU3ODU0Nzc2NTY0NjY4OTc3PTc0NjQ0NDMzMzE0NjY5NjEy
-MzM4NTY0NjY1NTM2NTM2MjQ1NDIzMzQ2NDI3NjY5Njc3NzY0MzExNDY2NTM0Mzc2
-NDU0MTQ2MzU1MzM0NzMzMTI0MzQ0Njg1NzQzMzQzNDQzNDQ0MzQ2NDQ2ODU1NjY1
-NTQzNTIzNzc0MjIyMzc1NTUzNTc4Njc0MTAyMzQ1NzU2NzUyMjc1NTU0MzU2MjY1
-NjQ0NTc1NTM1ODUzMTMzNTUzNDMzNjU0PDY1NDQ1OTczMzU2MzY4NTU0MzQzNDU1
-NTc3NTU1NzU0NDMzNDY6OTg2NDI1Njg2OjczMzQzMzU2NTM0NTMyNTY0MTU1MzU2
-NjY1NTMzMDMxMjQ4NDk3OTc2Nzg5MzY0NTQzNDI0NjU3OTg5OTc2NzY1NzQ2NTQ4
-NTU3NTY1Nzc4ODY4OTY5Nzg2NjIzNDY2NjY4Ojs9NDY4ODg3OTc2NTU1ODc2NTU1
-Nzg2OUM6Nzg4OTs4OTk4Ojo/ODc6ODY2ODc4OTo5Ozk4Njg3NDM0NTc5OTw3Njk4
-Njc4NzY5Ojg5OTw4NTM3OjY4ODs6OTw5NjQzNjY5ODs5OTk4Ojs3OTk4ODU4ODc2
-OTk7Ojg4Nzg5OTs5NzY5Nzo7ODc3OTg4ODk6ODU4Njo3Ojo9ODw5PD07Ojw4ODs/
-Pj03Ojw+QTxBPT08PT06QEBAQD08Pz4/QTs5Ozs9PD88Pjw5OTs9QkNBPUBAPTs7
-PTw+PD89Pz08PEJBREFEQVSkws7X29/j5ufo6upHS0hISz0/QUM+P0E8ODg7Ojs7
-PT9BPDw9PDc9Ozw8Oz06Oz9APDg3ODQ1ODg6OTtHPzg3NzY2Nz04Njk4OTY8ODY0
-Njk6Ojw6NzY4NjYzNjM1MzI0NzQ1Ozc6MzY2PTo+PTw5NTU0MzU0NTQxMjY3MzU0
-Njc0NDM1NjMzNDUzNTY0NDMyMDQyMzEzMTI1RFMyMjI4Njc2Njc0NTIzMTc1NDc2
-NDU3NDMyNDU2NDUvMTI0NTY2NDM2NTY2Mjg1MjM0MTM1MzM1NTc1MTE2MzIyMjY3
-NzUyMzI3MjQxMzY3NDc1NDU0NTg2MTQzMzE0NTM0NDU1NzY1NTQ2NzQ0Nzc0NDQ2
-MzY2Njs5ODc2NDQ2NDYzMjY3MzU5OTc1MjI2NTU1MTEzNjU3NjUyNjU0NDU4Njc0
-MzQ1NTMyNTY0MzI0NzgyNDY2MzAyMzM0NTQyMDQwMDEyMjIyNDM0MTUyMDIyNjQ1
-ODU2NDQ0MjQxMTIyNjYzNDQ1NDI0MzU3NTYwODUyNTQ1Mzc0MjI1Njc1NTs2NTc7
-NTU2MzM0NDM4NTU0NjQ0MzMxMzY1NzMwMTM1NTUyMzU3NzQ0Njc2MzMzMjU1NjY2
-NTI0MzMzMzIyNDQ0NDM0NjIzNTIzNDIxMzYyNTIzNjQ0MjU0MzIzMzY0NTUzMzE0
-NDExMjU1NDU0MjU0MzIxNDUzMTE1MzU4NTYzNTQ0NjY1NDk4MzQ0MzU1NzUyMDI1
-OTc1MjUxMjY1MjIyNzc3ODc1ND05NDM4NDMzNTQ0MjEzNjEzMjM3NDM1NTZANzcz
-NTk5NzU1NjU2NTc2NjU3NjY2OTo5NzU0MzU4NTU0MzM1NzU0MjQ3NDMyNDM2NDI0
-NDUyNDM3NjY1NjQyNDQxMjA0MzQ1Njc2Njg1Njg2NjU4MzQ0MjIyNDU0NTM1Ojc1
-NzY5Njc0NDU1NTUyPDM0MzU3ODY1Njc3NTU1NTQ0NTU2NDU0Njk1MzU7PTY2Nzc0
-MTIyNjc1NDY2NzY3NjU1NjQ1NDg0MzQ2NTU1MzM0NDEyMDI0MjEyMjEwMjIzOTw8
-NjQ1NTQzNDc0NTM0NDMzMjQ2NTQ1ODk2NzY3NjUzNTQwMTM0NDY3NjY1NTgzNDMz
-NTYyMjU1MzI1NTQ2ODU0MjY4ODY0NTIyMDM1Njc1NzUzNTg2NDU0ODQ1Njg2NTc4
-NjU2NDQ2Njc3NjY3NjU1OTQ1NzQ1Njk3OTk3Nzc4NjQ0NzU1NTc2NTU1NTU1Njc1
-NjU3Njc3NTI3Ojk2NDM1NjU1Nzo5NzY5OTc5PDc4Nzg3OTY4PTs5NzY3ODk8Ojk4
-PTc3OTw8ODU3OTs1NzY5NjY0NTU2NTY4NDY3Oz06Nzs9Nzo4Njo5OT49OTY4OTg5
-OTk4OTo0Ojg6Nz09Ozo7Ojk4Nz88OTc2Ozo5Ozs2NzY4NTg8ODU2NTk3NTs6ODY0
-NTc5Ozk6NzU3ODo5Oj4+PDo5Qj42Ojo7ODg9Pjs7ODg4QTw9P0A5Ojw5PDs7Ojw/
-Pz48OjpBPj1AOzlAP0FCQkA9PD0+Oj47O0BBQ0NDPDtCREFAREFHdazDz9bc4OPl
-5+nq60RJQURDREVBQEA/PD47PDo9QEA9Ozk6O0A8Pjk8QTs9PDo8PTxAODo7Nzg7
-PTs6Ozw7Ozs5Nzk4OD46ODk3OTs4Ozk2NDU3OTo6Nzo3ODQ1NDc4MjQyNjc2NDQ1
-ODk6OD03NTMzNTEyNDg2NTc4NTU1MjMzOTw5MjMyNDMzNjUzNzIzNTU0Mzc2MjM1
-NDM1ODQ2ODMyNDU1OzM1NTY1NDQ3NDU3Nzg4NzY1MzU2MzYxMzQ1NTU1NDMzNTUz
-MjQ0NDQ6NTM0NjU1MjExNDU3NDIyMzU0NjU0MjQ0MjMzNDY1NjY1NTEzMzc4MzMy
-NDM0MzIzMzQ1NDI0NDUyMjQ1NTYzNTc1NzUzNjU3NzQ0NzMzMzQ0MjQyMjU2NzQ0
-NTw1Njc0NTQzMTQ2NTQyMjI2MzAzNDk2Njc3NjQ0NTY1NjI0NjQzMjEyNjQ0MzEv
-MDIyLzEyNDU2NDUxMjIzMjIyMDQ1MzQ1NDU2NjY3NjQxMTI2ODU0NDY0OTIzMjY3
-NDQ0MzIzNDY2NjczMzQ0Njc1ODg2NDU2Mzc2NDI0MzQ2NjY2NTQ1MzMzMzQ1NTU1
-NDUzNDQ0NTc3NjQ1NDUyMzM0NDU3NTM0Mzg2MzAzMzY0NjM0Njc2OTQ0NDExNDE1
-MzQzMjU3NTQzMjI0MzMyMDIzNDY5ODMyMDU0NTIzNjc0MzQ0NzQxMjI0MzQzODg1
-NjMyMzMzNTUzMzQ2NjY1NjUyNjczMjUzMzQ0OjUyMjQyMDAyNTY3ODQ1NDM0NTc2
-NTY0NDg9NjUzMjIzMzM1NzU3Ok82NTU0NTU1NTIyNDQ0NTY2NTMzNzY3NjQ1NTQ3
-NTMzMTM0NDY1NjY0MzU0NDY2NjU1NTQ2NDY0NTc3OTQzNjQzMzUxMTM2NjYzMTQz
-NDU0NDg1Njg1NTUyNDgyMzcyNDQ2NTY2NjY4ODc2NjMyMzQ0NDM0Mzk2ODc2NTU3
-NTY2NDM0MzMyMzQ1Njc2ODc1NDQ0Ojc1MTQ1NTY3NTQ1NjU0NTY3Ozg2OTY3NjYy
-NTY0NDM4NTIzNzc0MjE2NDU5Nz89NzM0MzQzNDQ1NzYzNDUyNTQyNDIyMzY2NjQ1
-Njc5NjQzMzIyMzQyNTg0NDg4NTQ1NzU0MzMyNDUyNDI1MzAxMzIzMzUzNTc4ODQ1
-NjY3NjY4NTM1ODY1NTQ2MzU0NDU1MzU4OTY4NTY0Mzc3Nz04NTg2NjU1NTQ1OTg5
-OTc4NjU2NjM2NTU3Nzc3MzQ4ODUzNjY2NTUzLzM2Nzk1MjM0Njc3NjU2NjQ1NzY5
-Ozk2NjY4Ozs5Nzo5OTg9PDg4Ozo5Ozo7Ojo6Njc6ODc2Njk8OTk4NTM0ODc5ODk7
-Ojo6OTg5Ojo6OTk+OTg1Ojg5Ojo7NTc3Nzg5Nzg0Njc5Ojo5ODY7Ozo3ODg2OTk4
-ODg5Ojg5Nzc3NTo3NzY1Nzg4OTg5NjU4Ojc2OTo7OjU7Ojw5PTw7RUA9Pj88ODk7
-Ozw4Oz08PDs7Ojo8PDs7Oz1AQDs8PUE7PT5APD4+Pj9BP0BBPkFBQkE/PkFFQT8+
-Q0FARz08QD07OT5AQkeJrcPO2N3h4+Xn6erqSEVOR0VFPTxARD08Ojk4QkFCQT08
-ODw7ODo7Pj1BQzw/Ojc9Pj84Nzo2NzY3Ozs5ODo5NzY4ODo5OTo5ODk3Njg3ODc1
-Njc2ODc5OTg0NjY2OT03Njk5NDY3Nzc3ODY2OTYyMjUzNDI2NTU2PDczODU1NDQ3
-ODQ3NjU1NDk0NzcyNDM1NjY1NDMzMzQzMzQ2ODg7NTI2ODU2OTQ0NTc0MzMyMzM0
-NDQ1Nzc5NzU1NjY1NDU4NDM1NDU0NzYzNDEzMzo2NDQ1NTUxMTIzNDU0MzQ6ODc0
-NjMzMzMzNDQzMzE0NTQzNzE1NTQ3MzE1NDM1MjMxMjMzNTEyNDU0NDc1NTUzMzQz
-NzU2NjU2NTUzNjc2Nzg4NDAxNTU2OjY1NTY4NzY0NDMzNDUzPDYzMTE2NTA0NjQx
-NDQ1NTM0NDU1NDUyNDU1MjQyMzU0NDIyMzAzNzU2NzY3NDEwLzIvNTQyNDQyMzI1
-Nzg1NDYyNDU3NDY3Njc2NDY2MzQ0NDIxMzI1ODY0OEM3NjMyMzI2NDc2NTU0NDY2
-MzMzMzU2NDQ1NDc4ODc0MjAzMzU0MzU0MjIzNjQ0NDQ3NzYyNDQyMjIzNTE1NTY0
-NTM0ODU0MzIzMzU2NjQ0MTM0NTY0MzY6NjMyNjQzMzM0NDM1MzUzNDMwMzc2NjQ1
-NjMxNDU3NzM2OTQzNDM0NDU1MjM1NjY2NjQxMTU3MzIzMjU1NDY+Nzs3NTY1MTY0
-MjU2NjEzMjYzMTUzNTM0NDQyNDU1NTU5NjI0MjQ3NDM0OTc2Ojg5NzQ0OTY3NDY1
-NTM4NzY2NTU3NjczNTQ0NjQ1ODY3NDQ2NDMxNDEzNTU1NC8zNDU1NzU1NjU2NTc2
-Nzg1NDM0MzUzMzQ2NDEyMzY4OTY2MzIzMjQyNjc0NTY2NTc2NzU1ODI2NTY2Nzc2
-NzQ2NTM0ODY0NTQxNDY1ODY1NjUzMzQ5OTg2NTIuNDQ3MzczMjc4NDQzMzQ0MzM4
-NDU1NDQ2NjQ1OTkzMzY3NTMyNzYzMjY0NTc1Njc3ODU0MzM0MzMyNDU4ODUyMzUz
-MjY1NzozNDQ1MzM0NTM0NzUxOTQxNTIxMTE0NDEyMzI3ODc1NDU3Njk7NzU1NDQ2
-OTU1NTMzNTQ0MzQ2NjU5NDIzMzM3OTk1ODQ2Njc4NTM0MzM2NTQ2NTU0NTY4Nzg3
-PD87Ozw5ODw6Nzg2Njo3NDczMzQ0NjU2Nzc2Njc3NDM3ODQ1Mjg2Nzc2Njg4OTg0
-Nzc3Ojc1Njg3NDY4Nzo4NzUzNTo5Ojg5ODo5PDo+Njg4Ojo2Nzc6OTc6Ojk7Ozg2
-Nzg0NDc5Nzg3Ozs5Njk6Ojc4Ojo7Ojg5Njg4Nzs5OTg5PTs9Ojc3OTg9Ojg7OTg6
-OTc5OTQ4Nz06NzY1NTc4Nzo4Nzc4OjY3PD03Nzc5Njg3OTk7Nzo2Nzo4OTo4NjY2
-NzQ2ODg4Nzk4Ojs5Ojo8Ozs8Ojk8Ozc5Oj1BPj05PTo3Ozw6Pj1CQj1BQD9BPzk3
-Oz49Oz1APUNDPjs+Pjs+Qj0/O0M/QUI+QUNBQUJEQjw5OTtBQ3Ctw87X3ODj5ufo
-6upHR0dKSEE8PDxCQkY+OjxBPTw6PDxBQjw5PTw6QDo7PT44OTs7OTg4NjgzODo5
-Ojk3Oz87ODk2Ojk3Ojs4Ozs4ODU1NjU3OTY8Pzc3NjY4Nzo4ODg8Nzo5ODc2NzQ1
-ODY3NTk3NTM0NjQzNDM1NTQ3ODg0MjMzMzUzNzc4NTU2NDQ1NzI0NDY0MjM1Nzg3
-NTU5ODg1NDc4NzUzMjY3NjYzNTQ2MjMzMzMyNjY3NTY1NzU1MzQ1NzQxNTYyNDMx
-NTMzMzQ0NzUzNDMzNTMxMjQ0NjM2NDM0MzA0NTI2NzU1NTI2NTM1NjIzNDIxMzI1
-NS8zNjI0Njc1NDg1NTQwMzQxMjYzMDM1NTM2NjY2NjUzMzQ2Nzc2MzEyMzI1NDQz
-MzIzMjMzMzc0NTU1NDEzNjk2MjM0NTYxMDAyMTM1MzQzMzI0MjI1NTMzMzQ1MzI0
-OzMzMjMxMDIyOjE0MzAxNjUyNDM0MzU0NDU2NTQ5ODU0NDQzNTY0NDY2MjI0NDQx
-MjEyMzI0QDczNDQ2OTIzNDQ0MjIyMzQzMjQyMzMzMzgzNTE2OTY4OTMzMzEyMzUy
-MzU0NDU2NzY2NjU0NDM0NDIxNDExMDE0NzU3ODY3NTIyMzY0NDY1MzQ0MzQyMTM1
-NDUzNDQ0NDY1MzQ1Mzc4Njc2NTg0NTMxMzczNTUzNTU3NDI2NzUxNTU3NjY0MjQ3
-NzEzNjo4NjM2NzY1NDI1MzU0NDY2MzU1NTc3NjU3MjY4NzY1NDQ1NDQyNTY2ODg0
-NDE0NjQ0NzU0NjIuMzQ0NDQ0MzU1NjYzNDY3NTQ2MzQ0NDM2NzY1NjY1NDIzNDYz
-MTU3NjIzNTM4PjY0NTQyMTQ1MTU3NjU0ODQxMzQyNzQ0Nzc2NDM2NTU2NjU3NDEy
-NDY0MzY2NTg1NjU1NzY1NzM2NTY3NTg3NDU0ODU0NjUyNDQ3OTU0MzE1Njk2NTc3
-OzYzMzQ1NDM3ODc1MzM1NDM3NTQ3NTQ0MTU0Nzc3ODc4NjU3MzQ4NjQ0NDc0Njg4
-NzUyMzM3NTc1MjU0NzY1MTUzNTM0NDU0NTE1NTg3NDQyNDM0NTQ2NjczMzU2NDU3
-NjU0MzE1OTY3NTQ1MzQ0Njg3ODY0NTE1NzU1Mzk2MzQzMzQ3MzMxNjMyNDQ1NDIy
-NTY2Ojk2ODczMzQ1MjY0NDQ1Njk5NjU4NzY3Nzc2Njc0NDEwMjYzNTc1NjY4Pjk1
-Njc3NTc4NzYzMzIyNDU1PDU0NTc2MzQ3ODw5ODc4ODo2ODU2NzU3NzU3Ojs2NDU3
-NTg8PDc7Nzg5OTs3ODc6Ozs7ODw9Ojk3ODg5ODs5Pj46OTc3ODk4ODg4NzM1ODk4
-Nzc5Ojc3OTs8OTk1NDM1OTs5OTg3PTg3ODk2NTg5ODk3ODg7NDE2ODY3OTU6OTY6
-Ojg3Nzo4Njk4Njc4Njs9OTc3NjYzNjU+PDo5OTg7Ojo5Ojs8OTk4OTw9Ojo6Pzw6
-OTg4Ojo4Nzo4PzxCQ0M7PkFCPTw+QUA6Nzw8PEFBQUE+PDo5PDw8Qj9CP0JERkE+
-QUA9QD9AQj5BQT1Bcq/Fztfd4OXl5+np6kJCQ0RIQ0I/QERISkVHQkFAOzw7PT4/
-Ojw9Pj84Oz0+Pjs9PDk4NTU4NDg1Njw6ODk5Pjs4Ozo6QDs5OTg3PDk3Nzo4OTs3
-NTk7OTY1NzY2NTk5OTg3NTc2Ojc3OTMxNTg2MzY3MzMzNDQ2ODY0MzQ3NTU2NDU1
-NThJPjU4OzQ0Nzc3NDY0NTg4NzUzNTQ4ODk1Nzc2MjU3NTIzMjUzNDc2NjY2NDI0
-NDIxNjY0NTQyMzIzNTQxMzIvMzMyNDU1NzU0NjYyMy8uMTQzMDAyMzMyMTIxMjAy
-MTM3ODg0NDQ3NzcyMzIzMTEzMzQ0NTI2NTU2Njc2NDc4NjY0NDMzNTU1NTU2Mzg0
-NDY1MzU2NDI0NDMxNTY2NjM2NDIxMDIzMzExMjI0NDU1NTMyNjEyMjI1MjQ2NjU1
-NTM0NjI0MzMwMTAvMTIyNDUzNzU0NTI2MjMyMzUyNTY3NTM0MjY0NDQyNTYyMTM0
-NTQ0OTU3NzU1NjY3NTU6NzM7MjAwMjQzNDAxMzY1NjQ0MzE1NDMzNDMzNTQzNDIy
-MzU2NTY0NjUzNjU0NDM4NzQ0NjQ0NDI3NjY1NjY3NTU3Njc2NDIyNTQxMjU2NDMz
-NTY4ODc2NDQ2MzU1NDg0NDg1Njc3MTEvMjMxMjI1MjQ1LzE2NzUzNTU1MzQzNDg0
-MzIyMzM1NDQ0NTQyNDc0NzU0MzA1NTMzNjM0MzY8NjQ1NDU1NjY0NTMxMzQzNjk0
-NjM1NTUzNjg2NzYyMzUzNDMyNzU1MjU0NDIyMTExNDY0NDYxMDMzNzM3NDY4NjYz
-NTU1NTg0MzM0Nzk2NjU1MzczMzIyMzI0MzQ0MzY1NDMzMjI2MzEzNDMzNTk2NzU2
-MzQ1NjcyMzc2MzY2ODUzODU3ODU2ODg1NjQyNDQ1NTc1Mzc4NTU3NjQ1MjQ1NTM1
-NTMyNjYzMzQyNDU5NjM0NTU2ODUzNTM1NjY2NTU1ODg4ODo2NDMzMjM2Njc1NjQy
-MDEzMzY1OTUxMzMzNzc2NDMxNTY3NTQ0NTc1NjU3MzM3ODUzNTM1NjU1NDIyMjY3
-NjIwMzc2NDU3NjY3ODM0MTAyMzM4NTU1MjI3NzU2NDM1NjY4NTU4ODg4NDMxMTMy
-NDM1NjQxMjQ1MzI0Mzg2NDQ5Nzw4OTU0NDQ0ODUzNDUzMjI0Mzc7NTU0NDM0NjY3
-NTU2NjY0NjUyMjU1MjQ1NDY1Nzk7Nzc1ODo6Ojo6NjM0NDY0NDQ1NTY2NzY2NTY5
-Nzg6OTc2OTg5Ojk7PDc1Njk0NjY5NjY4OTk7OTg2Nzk1NTc5ODs7PTs+Ozg5Ojc0
-NjU3Nzc5PDs8PD04Ojs1NjY4ODw4Ojk2NTk7Ojg3ODg5ODg3OTg1Njg4ODk2OTk3
-Nzk7ODo5Ojo7OTc2NTM0MjY6Ozk2NzQ1ODo2Nzo6ODg3ODY2OD07NDk4NjY1Njo9
-PDk3ODs7ODk8OTk7Ojw9PTw3OT8/QkA6OTw5OjtCPz87PDk+Q0NBQUFBPUBAOzo5
-Ozs7PEJCPz4/Pzw9Nzw9PkA+Q0NAPTw+REQ6OTk+QkNDP0RjscXP1tzf4uXo6Onr
-R0lHQ0JAPztER0hKSERAQj9AQEZBPkM5Nz5BPUFDPz89QTk8PT09OTc7Ozc1Nzc5
-Njk5Ojs8OTo8Ojg4PTg2NDc3NTU5Ojg3NzU3OjU0ODc6NjY1NTc4NjY4OzYzMzY3
-OTQzODg3NDc3NzU0NTc5NDc0NDY2NDc3Nzc1Nzg4Nzk2NzYzMzY1Njc3NjUzNjM3
-NDQ1NDU2NTY0NjU2NzY0NDU0MzI2NTc0NTU1Njg0NDY2NjUzMjc2NzY1NDMzNDQ0
-NDIxMjEwMTAzNTc0MjMzMDEyLzE0MzM2MzU0MTA0NDQ3MzM0NTU1ODY2NzQ1MDU1
-NTYzNDM2NjY1NDM5NjY4PDQ0ODQzNTQ0MjExMzM0ODg1NjQzNjg1MTUyMTIwMjEy
-MjMzNzU0MTEzNTw7NDUyMTMzMzUyNDY4NDMzMDUyNDQyMDA0Njc2MTE0NTYzMjEv
-MDIyMTQ2MzI4NDM0NTAzNDU1NDIyMzg5MzY2MzM1NDU2NTU0NDQzNDg1NDMzMzUy
-NjY2NzQzNDc1NjU0NDQ2NTU1Njg1NTM0MjM0NDM0OTg5NDY0NDM0NDY0NTQ1NTIx
-MzY1NTQ3Nzk5Nzc4NzQzODMzNDQ1NzQzNjU2NjY2MzM0MzQ0MjMzMTU1NjM0My8z
-MzMzMTUzMzE0NzQ2MzQ0NjU2NTY2ODg0MzY1MzQ0MjQ1MjM1NTU1NDQ0MTIxMTIy
-MjQ0NTY2NDI2Njg5NjU1MzA1NTQ1MzM0MzI2NTU0MzQ3OTc0MzI1NjUzNTg1MTY2
-NDU1NTo0MjMxMzM0MzQ1NzQ1MzQzMjc3Njc2OTc1MzY1NzY3ODQ1NDM0MzQzNDUz
-MzQ0MzY0MzIwMDE1NDMzNDY0NTI0NTU0NDIzNDMxNDU1ODc6OTc0ODc3NzU2Nzc3
-NTQzNjg1ODg3NDczMzIxMzQ2NjQ0NDQ0Njc1NTI1NDQ3OTg0NTY4NTU2NDU4ODc0
-NTQ4NTQ1NDU3Njc1MzI1NDEzNDQzNjU2NzUzNjYzMTQxMjY1NTQzMzQ2MzEzNDQz
-NjU4NzMwMjQ2NTk5NjQzNjY0NTc3NDMzNTQzNjY1NTM1NTQ1MjU0MjIxMjIvMzM1
-NDY0MzQ0MzI1NTU2NjY2NTE1NjMzNTMyMzc3NjYyMjMzNjU2NDM5NjYzMjMxMjMy
-NDQ1NDI0NTU0NDQ2NTc2NTQ0NDU3ODk4Njg5ODY2NjQ5ODc2ODc0Nzc6NzY7NDI2
-OTo5Nzg3NDU1NzY5NDQ2ODMzNjU1ODc5OTk3Nj05Nzg4ODo+NjUzMzU1NjU3Ozs6
-OTk5Ozs4ODk4Ojk2ODw9Ojw7PDs7PTo5Ozc5Ojk6PDg6QDw5NzY4PTo5NjY1Njc3
-Ozg6OTk5OTc1NjY3OTU3Nzc4ODY4ODg7Ojg2NzY0NTg5Nzg9Ojg+OTY2Nzk4OTU7
-Nzo4OTs3ODo3NjY6Nzg4NDY4Ozo7PD49OTo7Nzg4Nzk2ODY4OTk7PUBAQkM+Pj5C
-QEA9PD08P0FAQD5AQDw9QUJBPj9BOTg8OT9APj48Qj49PDk7PEFBPUNFQEE+P0BA
-ODo8PjxCQURCRWGxxc/X3eDj5ujo6ulFRkRCP0U+PkJERERCQj9HQEZAQEJCQTo6
-PD47PTs7OTs9Ozk5Nzg9PTk5Nzg4ODc6OTg8OTo/OTg7Ojk4Pjs2NDI3OjU1NjY6
-OTo4ODcyNzg4OTk3ODk1Nzg3Nzg4NzU3OTY5ODc5ODg5NTU1NTU2Nzk5NTU2NT46
-Njc1NTM2NTQ0NzU1OjU2ODc1NTE0MjwzNjQyNDU1NjY4OTk0NDU2NTMzNDc1MTI0
-NjI1NTs0NTM0NDQ2MzM0ODg2MjMzMjQ0MzEyNDQzNTdCNzY0MS8xNTIyMzQ0NDQz
-NDY3NjM2NDUyNDIwNDM3NTM2NTQzMzQyMzU3NzIzNTU2MjAxNDg3Nzg1NTQ1NzMy
-MzQ0NjY2NzQzNzQ0MzY4MzQxNDQyNjEzNDU4NDcyMzM4NDQ4NDY3MTM2NDQ4NDEz
-MjEzLzEyMjIzMzMyNDU3MzMzNC8wMDQzMzQzNDQ0MzQ2ODQyNC8tMjM0NzY3NTYy
-MjM3NDM0NjY3NzY2Nzc2ODMxMTM1NjU1NTY0MzU1NDY0ODc1MzIzNzYzMjQzNTU1
-NDQ1NzQyMzQzNDU4MjMyMjIyMzc9NjEzNjU0NTU1MzM0MzIzNDI0NDIxMjE0NTQz
-NTM0ODY2MzExMDAzLzI0NDczMDAwMzY2NDQ1NDMyMzQ1NDMzNjQ0MzQzNzY0MzU2
-NDAyODY3NTU3NjQ3NDI1MzI4NTAxMjMyMjEwMjExNDY0MzU4MzIyMzU2NjMxMDI1
-NDM2NzU1NDQ2NjU0NTQ3NzY0MzQ0NzU4NzQyMTM3MjY0MzY2ODc0ODY1NTk1NjU2
-NDc0NjY0NzQ0NjY1NTU0NDM1NjYzMzQyNDM2NjU0LzI3NTg0NTIzNjU0NTk0NTIz
-NDQ2Nzg2NjQ2NjQ1NzkzNDQ2NDYyNDM0NTU2NjU0NTY2MzMyMjQyMzU2NjYyMzU3
-NjQ2NTU2NjU1Njc3NzYzMjQ1Mjg4ODQ0NDMyNTQ0PDY2NDQ2MjY2NzMxMzQ0NjU4
-NTU0NTMzMS8wNDM1MzI0MzQ2NDQ1ODczMzQ1Nzc1NDQ3Nzg3NjM0MzI2NjQ0MjM0
-NTg4NjM0NDQ1MzU5MzQ1MjMyNDMwMjY4NTUyMjMyNjQ0NDY2NjU0NjQyNTU0NTIx
-MzUyNDIyNDQ1MzUzNjQ0NjYyNTIzNTMyMzczNTQ2NTQ1NTg0OjU1MzQ3NjU0NDY2
-Nzg5Nzc6OzY2NjY3NTg5NDQ1NTc7OjY1NTk3Nzc4ODY0Nzk3NjU3OTY3Njc2Njg7
-OTU6OTU4MzU4NzQzNzc2NzY4ODo4Njc3PDs4NzY2Njg7Ojg4Ozs6OTg5ODc6PTw6
-Ojo5ODg2Nzo5Ozs6ODs5Nzk4ODk1OTo2Njc5ODg3Nzk5Ojg3NzU4Nzo5Ozo4ODo7
-ODg0ODk8ODo4ODk3NjU4NjU5Nzg3Nzk5OztBPkE7Nzk5ODY4NTo9ODg7OjYzOTc4
-Nzc5Ojo6OT83Nzs6Ozc+PTo6PT49Ozk4Ojo7PkFEQ0A/Pj48OTg4Oz9AQT09PDtB
-QUNAPDlEQD1APkE/P0FCQ0VEQEA+P0A/QD8/QD9DREVBT63Fz9bb4OPl5ujq6UdL
-RkRDQj08Q0dJREdFQ0c9Pz48QEM8Qj0+QkE9PTw2Nzo8ODg4PDo8OTY3Njc3Njc0
-NT46Ozo4Njk0Njo8QTs3NzU3MzU4Ojg5Ozw2MjM2OD08ODc0Nzk1NDY1NTY4NjU4
-ODg3ODg4Ojs5OTk2Njg3ODY1NDU2NTUzNzQyNjc3NjY0Nzw2NTUyOzkzNDg0NTs4
-Nzc0MzQzNDg2Njk1NDc0MzU2MzMyNDQ5MzQzNTY4NjIyMzAwMzg0NzY0MzQ2NDQy
-MjAxMTMzMjU0MjMzNTcyNjc0MzMxMjY0NDY1MjI0MzQzMTEzMzUzNTMyMTM0MjEx
-MzY3NTU1NTMzLzA0ODc3NTY4NTY4NzY2Njg1NjQyMzI1NzY2MzM5MjU1MzQyNjc1
-MjEzMzM5MzE0MTMxMDIzMTM2NTQ1NzEyMzQzMjEuMDM1NDQxNDIzMzUzNDMwMjE0
-NjYzMjE0NDU3Njg5OjY2MzMyMjI0MzU1NTY2NDQ0NDQ0MjU2NDUyMzE1Nzc5Nzg2
-NzU0MjU2ODY1NDQ1MjI0MzU0ODU1Nzc0MzI1MzM1NDIyMzY0MjE0NzIzMzI0NTU2
-NjU2Nzg1NjU2NDU0ODg2NTU3NDEyMzUzNDU1NjU3NTMwMzE0MzM0NDQyNTY1MzM0
-MzM1NDUzMzc2MzM0NjY0Mjg1ODs0NTY0MTM1NTY2NzY1ODQ0MzQ1NjIxMDExMzUy
-MjEyNDQ2NDc2NDM2MjAxLzE1NTMyMTI3NTU0MjE0NjU1NTU1OTQ0NDg2NDAyMzI0
-MzM0MTE2NDQ4ODY1MzE1MjM1NDY0NDY3NTo5NDQzNTM0NDQ0MjUzNjU2NDQ2ODU1
-NTQ0Nzg4NjY2NzU0NTU2NTY1Njg2NjQ1NTQ4Nzg2NTY1NTY2OTw3MjQxMzU1NjQ2
-Nzg7NTU2ODQ2NjYxNjU1MzM1NDM0ODU6ODU4NzY2NjczNjU0NDU2MjQ0MzMzNTc0
-NDY0MjM4NTU1MzIxMDM0NDc2NDI2NTU1MzQyNDIzNTIzMjI1NjYzMzc0MjU0NzUy
-NTc4NDQ2ODY2Njo4NDM0NzY0MzExNDU2NzU1NjY0NDYyNDM4Nzc0MjY1NTgzMzQ0
-NTY2Nzc0NTIxMTMzMzIyMjQ0NjQ0NDIzMzQ0MzIyOjQzNDM0NDUyMjMzNjU1NjU4
-NTU4NzM4ODM0NzU1NTQ0NDU0NzU2NDc4NjY4Ojk2ODc2Mzg3Nzc2OTc4NjUxMjY8
-ODo4Nzg4Nzc2OjY3NDQ0NTc3Nzg4NTY2NjM4ODc0MzU4Nzc5Ozg2NTg6ODo5Nzk4
-Ojo4NzY5ODY4NzY5Oz07OTc2Nzk4Njc5OTs6Nzc3NTY4Ojs9Ozo3Nzg4Ojc3Ojg3
-ODs2NjY6OTc4NzY1NzY7OTw8OTs7OTpBOjw/Oj05OTk5Nzc6OTc6Pjs4Nzc0Nzg4
-Ojs5Ojg6PTs8PThBPTw+PT05Nzg6OD04OTY5OTY8PDo7PDk8OTs5OTw7Ozk6OTw7
-Ozw9PTs9Ozc5PTY4PDo7O0BCQkA6QEI9PT07PkNCPjtDPjs+QTw9Pz5AQj8/QDtA
-QUJCPUNGRURVrsXP1tvg4+Xn6OrpQkRFR0ZGQz9FRURCRkdEQ0NCRD0+Qzo8Pz07
-PDw+Ozo/PT08Pjo5OT06NjU5PDg4ODw9ODo+OD05Njg4Njw5PTk5NjM0OT84Ojg5
-Ozk6Ojg7Ozo5ODg1NTU2Njc1Ojk2NTg5PTk2ODo6Ozs7OjY2Nzk3Njg3NTEzMzQ0
-NjY2NzU2NDQ2ODc2NTc+NjY0NTU3NzY0Nzo5NjU2NDc7ODY2MjQ0NDU2MzQzNTQ4
-NTQ1NDY0MjAzNDc1NjY2NTU0MTMyMzc1LzAxMjQ1MzIyNDQ0MjY2Njc1NzY3NjMy
-MzM2NTYzNjEyNDQ3MzI1NjQ1NTM0MjU2NjU0MjI0NTMzMjM3MTEwNTU0NDMzNDMz
-MjQ1NTU2NTY3NDQyMzU3ODU3NzY1MzUyMTIzNjE0MzI0MjMyMjIyNDQyMzUzNDMw
-MTQyNDQxNDU1NDI0NTMyMjMxMzQ0NjU1MzQzOTU0MzY3NjY2MzIxNjI0NDEzNDY1
-NTQ2NTg2NTU3NDQzNTY0NTU0MzMzNjQ2NDU0NDEwMjM2NTQxMjQzMzM1NTc0NDI1
-NTIyMjg2Njs2NDEyNTY1NTQyNTU3NDQ2NTM0NDQ0NzY0NTU1MzQ2NTU1NDQ0MzM1
-NjY3OTc0NTIyMzMyMjIxNDQ0NzUzNzU3NTY3Nzk4NTMzMTU1MjMyNTU2NTYzMzQ2
-NTU0NjYzMzY3NDc3NjMzMTIzMzMyMTEyMDU1NzU1MjUzMzQwMTE1MTA3NjI2NTYz
-NjUxMzY1NDQ4NTc2NTQyMzU2NTQ4NDU2NzUxMDA0NTc1NjU0NzU0NDY3MzU1NDQ2
-MzMyMTEyNTQ1MzQzMjU0MzQzMjMyMjY1NTQ3NjU1Nzg1NTM2MjMxMzU0NzY2NjY1
-Njg1NDY0MjQzMjQ3NTc6ODQzMjM2ODMzOjg3NjU1NTEwNTQzNTQ1NTMxNzYyMzY1
-NDU1Nzc3OTYzNDM2OTQyMzI0MzM1MzQ0NDY7NTU1MzUyNDMyMDI1NTU1MzQ0Njc0
-NDY1NjM3NTUyMjEzNDIzMTEzNjQ1NjY1MzQ2NDUzMjQ3Nzg6NzU1NTY3NjQ1NDc0
-NDE0MzI0NTQ1NDU0ODg3NDQ3NDQ4NTI1MzQ1NDQ1ODQ3NTU0NTQ0MzY1NzA0NTEy
-NzYyNDI1Njg4OzU1NDMwNTQ2Nzc2NTMyNDQ1ODUyMzQ4NjQ1NTc0NDQ2NzU3Nzc3
-Ojo2ODc2NjU2NTQ1NjQ3NTQ5NTg5NjY2NjUzNTg4NTY2Ojw4NDM2Nzs8Nzg7ODg4
-Ozg4OTc2NjY5ODc4NjY4Ojs7OTo5NzY2Nzg5OTg5Nzk6PDs7QT85OTY3OTo5OTk5
-Oz06Ojg9ODc5Pjs1Nzg6OTc5PTw7OTg2OTw9Ojw3ODg2Nzc6Nzs9OTc5Ojk2NTg5
-Ozw8Ozg8PDk3Ojg3NDY7Ojg4Nzg6ODU2ODg2Njg5Ozo6PD5APjs8Nzc7Ozo2NTY5
-NzY2Njs6Ojk6Ojw4Ojs4NTo3PDs8OD07ODk5ODo7Q0Q+PDw7Oz0+PUFAQD89PUE9
-Oj5EP0NCPkI/Pz9BQ0ZHQT4+QUBDQ0ZEQT9CQUJFQ16zxc/W3ODj5Ofn6elFRkhL
-RkFBQ0NCRERHRUdFQUFDPDw+QTw7PEA9QUA/Pj5BQT86Ozk6Nzg6OTY6Ojo9ODo9
-PDc3NzY2NDo3ODs8NTk5OTQ6ODo3NTc3Ojk2Nzk4Nzc9MzQ2Njc5Nzc4NTY3NjQ3
-NDU2ODc0OTg3OTo6ODU1NDc1NzY0MzQzNDU0NTU1Nzk0NDY1NjY2NTY2NTQ4Nzg5
-OTY4NTU2NTk2NDQ1MzMyMjgzMTk2NDczNDQ1NTU1MzAvMDQ1NjU1NTU3NDQ0MjUz
-NTIzMjIyMjEyNzM1NTQ3NTIyNjQzNDUxNDM2NTU1NzUzNTQ3NjMyNTI0NTM2NDMy
-MjU0NjY3NjU1NTIwMDI0NjY3NTUzNTYwMzY1NzY2MjQ1NDQ1NzQ2ODk3MzUzMzM1
-MzIwMDIxMjQ0MjExMzc1MzY1NTIzMzM3OjcxMzUzMjMzNTMzMDAyNTQ1NTQzODUy
-MTAyNjE0Njc0MzM0NDIzOjI2NDQzNzI0NDU3NTU0NDc3Njc2ODo3MzMyNDQ3NTI0
-NjMvMzIxMjU1NTQ0MzIzMzQzMTMzMTI1NjQzMzEzMzcxNDM1NDY2NDQ5NjQzMzQy
-NDU0NDU0NjY2NDMzNTc2NTg3NDQ0MzEyNDM3NTk3NzY1NTMzMTI1MTAzMjU3NTQy
-Njc2MzE1OTgyMzIyMjExMjMzMjMyNDM0ODY0NzU5NjQzNDM0MjQ0MzMyNDU0NjY5
-NjU0MzIyMjIyNTUyMjE1MzI1NTY3Njg1Njc1OTQzNTU5NTQ0OTg2NDQ1MzQ0MzQ0
-NjQ0NTQ2NTYyNDM3Njk5ODc2NjU2NDgyNDQyMTM1NDQzMzQ0NjYzMzQ2NjMyNzAw
-MzMzMzU0NDY1NTEwMDE0MzUyNDQ3Ojk3NjQ5ODQzMTEzMzU1NTY0ODc3NDQ0NTMz
-MjY2NTEzMTY1NDY0MzQ1MzQzMzU2NTY1NTc2OTk4NzU2NjQ0NjU1MzMxMjQ3NTM3
-NzUyMjQ1NTcyMjEyMzY1Njc0MzMzMzMxNDQ0NDMxMzI3MzIzMzExMzM3NDU2Nzg2
-Nzs3NDM4NjU0NTI1MzQ0NDIxMzM1NDIxMzQ0MzU1NDI0NTc4NjY0MTQ0MjUzMzEy
-MjYzNDMzNTQyNTQ1Njc6NTY2MzMxMjAxNzUyMzQ0Njs5NjU0NjQyMjI2Njc3MzQ0
-NTQ2NTc1NTQ4ODQ0NzU2MjM1NjU0MzQ1Nzk4NzQ1NzU0NDY1ODU0NDc0NDQzNDMz
-MjU2Nzc3NzY2OTc4OTs9ODY2Njk5NzY3ODc4PDk1OTo1Njc7Njo7ODc7ODo7Ozk4
-PUE6PDk4OD1BOTo7Ojs5Ozk5Ozs6Ozo5ODg8Ojs/ODU2OTs6OTg6Ozo8Ojo4ODg6
-Ozk7OjU1NTc5ODk7Ozk4NzU4Njk6OTs8PDk7OTg6OTs6OjU5OTY4ODg5Ozs2NjY3
-OTk8PDk6OTc9Ozo7OTk6PDxBOzY2Nzk3Ojc4ODo5ODg9Ojc1OTY7OTc6QkA9PDg6
-Ojs7Pjw+QT1AOzw9Ozw8PDw+QD48PT47Nzo9QEFBQj9BQz9ERERGPj1FREJDREVE
-QT1FTT9DV6vEztbc3+Pk5+fp6kZESEtHRElEQkdGSEpHRUVCPzk+PkNAPEBBPT47
-PEFCPj49PDw+Ozs5Ozk2Ojk7ODg1OTg2Nz04MTU1ODk6PDg3ODg3NjQ0NzU2Nzk6
-Pjg4MzQ2Nzk3ODg2ODg0OTU3NTY2Ojk6Njc4PDk5OTw7Njk4NTUzMjMzMzM0NjQ0
-NjU0NTg4Njc1NjQzNTU1Nzc3NDYzNTo8OTc2MjIyMzc1Nzc0NDMwMzU1MzQyMTQ0
-MzIzMTM1NTc0NjMyMy8wNDI3NDI0NTM0NzI0MjAwMjEzNDI0NjU0NTQwMDE1NTQ0
-NDU0NTY2NzQ2NDEwMTQzMzQ0NDIxMzQ0Mzk4NjU4Njg5OTUyNDQ2ODU2NDQ0NTU2
-MzA2MzExNTM0NzMyNDQ2MzQ0NTQzNTYzMTQyMzU4NzM0MjIxNTUwMDIyMTIzMz04
-NTo4ODU5ODMyNDIxMjMzOjk3NDEzMjY0MzgyNDU2MzQ1MTIyNTM1NDQzMzI0NDIz
-NzQ1MjIyMzU2NDMzNDEzMjI2MzU4NTM0NjQzNDc1MjQ3MjQ2MzM0MzMzMjI2NDQ0
-NDQzMzIzNDc0NjM2NDc4ODU2Njg2MzEyNDY5MzM2NzY3NjQ0MzM0NDMyMzQ0Nzc3
-ODY0NzY2ODQ2NTQ3NTIzNDU0NDExMTM1NjY0NDU2NDo2NTIwMTQzMjM5MzI1ODc2
-NjU4Nzs2NTM0MTM0NTY2MjI0Nzc2NTk2NzMyMzMzNDMxMzMxNDM1NzE1NDQ3NTIy
-MzQ8NDM2NDY0MjMyMjU1MzQ2NDQ1NjQzNjMyNjU0NTY1NDM0NTQ0NDMzNTY1NTIz
-MTU2NTMxMzU1MzY1MjM1MzMzMTM2MjQzNDU1NzUyMjM4MzI0NjQ3NjYwNjc0NjY1
-NjczNjQ3NzQ2NjQyNTk2Nzk2Njw2NTU1MzM0NTEzMzQ1NDMzMzc3OTc6MjM2Njc4
-ODc2ODc1NDM0NTM1Njc5MzEyNjY2NzU1ODozNDQyMTA0NTQ0MzM4ODY0MzEzNDg0
-Mzc2NDI0NjU3NDQ1NTQ2NTIzMjU2OTYzNTQ0NjQ1NDMzMjIzMzIyMzM2NjQ0MjU3
-NzY4NTQ0NjQyMjE0NDU3NDQ4NjYzMzI0MzU1MjUzMzQzNTI0NzUyNDQ1NzMxMzYz
-NTUyNTQ1MzMwMTU2ODc2NTQ2ODg6OTY0NTU0NzQyNTE2OTQ3NTM3NTc4NjU0Njs3
-ODc4ODM2NTY0MzU0MzU4MzU0OTQzMS8yMzQ3NTY0NDU3NzY3Nzc0Njk6NjQ3OTo5
-Njg7QDs6OTg2ODk4Njk3ODg5Ojg6ODY8Pjo6Nzk5OTo4OTk4ODc3OTo4Nzo4ODc6
-Nzg9OTc7Ozk4OUA9ODg3Njc3ODc3Nzg4Nzc4NTU3NDY2OjY2Njc2Njc3OT46ODg3
-ODc1OTw9PDo7Ojg2NTg2Njg5OTk4NTc4Ojo8PDs7Ozk5PTo3ODc3Nzg5NDU4OT05
-PUM/OTk8Ojk5PT84PTw7Ojw+OT4+Pjs9PDtAPzw/Pjw8QkBBQUVCPD8/Oz04PkA9
-Oj5AP0RBRENCQkNERkdGQj1CQkRCQkJCQkZJSEdcqsTP1tzg4+Xn5+rqQ0RCRkJI
-QkFER0hMS0dEREpEPz07PD07OjtAQEE+QD9FQzk6PDs8OTo+PDk3NjY4OTY0NTQ2
-Ozk6Nzk9Pzw3Njc3OT1AOjY5Ojk4NTY8Ozo3MzU2OTY1Njg3Nzc1NTg2NjY2Ozc3
-NTY4Ozc1Nzg2NjY2NTYxNTY1NjY4ODY1NTY6Ojg3ODY1NjU1NDIzNzU0NTQ3NzY2
-NjU1MzI0NjU2NDQ0MjEzODc0MzU4NTU0NTQzNDc1ODU0MzEyMzc0NTQ0NjQ2Njg0
-NDUyMjQyMzQ0MjUzMjczMC8xMDIxMjA2NTQ1NTc3NjQxMzQyMjQ4ODYzMTIzMTQ2
-Njg4NjY1Nzg4NDQ1NzU3ODQ3NTY1NTY1NjIzNTExMzYzNDY0NDQyMjY1NjQ1NjQ1
-MjM0PUw2NjU0NTUzNDEvMzMzNDMyOTQ4NjY3NjU0ODc0NTQ0NTQ0OzkzNjQ0MjQ0
-NDI0MjM1MTY2MzAzMTE1MzMzMjM0NTQ1MzAxMjQyMjM3NTc2NjM1NjU7NjM0NTQ0
-MzI0MjU3Njc1NzY1NjQ3NTIyNjM2NTQ0NTM1MzAzNzc3Nzc3ODY0NTc0NTAyNDEx
-My8xMzM3ODY1NzExMzMzODYyNDU1NDU1NjY1NjUzNDI0NDU0Njk0MzAyMjUyNTIw
-MTIwMzQ0NDQzMTQ2NDIyNTw2NDU1MzQyODQ4NTM2NDExMjM2NDIyMzI0Ozg5NjQ1
-MzM3NzQ1MzQyMzo2Mzg1Njo5NjU1MzMxMzc0NzI1NDY0MjEwMjU3Ojg1NTQ3NDY1
-MjUzNDQzNjU3NzQ0LzQzNzUyMjQ0NTo1Nzg1MzMyNDU1NDQ1MzQ0MzU1NDIxMzIy
-NDUzMTM0MjU0ODY0NTY0NTMzNDQ2NDU3NDU0NDU0MzM4RDY3NjY5Nzc2OzY0NTQ2
-MzYzNDQzNDQ3OjQ1Njc5OTYzMjExNTQ0NTc0MzMzMzY1Njk5NTU1NTMxNTY2NDY3
-ODg2NDUzNzY1MzM4ODQ4NTUzNDU0MzM1Njg1ODQ0NTc1NTU4NzY0MjIyMzU2NTY1
-NDQyMzU0MTAwLi4wMjczMzQ0NTY1NTQ3NjY2NzY0NDI3NTMxNDQ1NzQ2ODU1NTQ0
-NTM3NTQ3NDQ0NTMyNDc0MzU1Njo4NTY3Njk1NzQ1Njc1NDQvLjM2NDM0MzU0NDMy
-MzMyNTQ1NjQ0NjU3NDU3NzY0MjU2NTo5NzU2NjI3OTg3ODY0NzU4OTo7NjQ4NjQ2
-Mzg3MzM3ODY3Ozk6NjY4NTQ2ODQ1NTU5ODo4Njc4ODY4Pz04Mzc5Ojg3Nzg4OTY5
-Ojg7NDk5NjU3NDU5Ojw7Nzs8Ojc2Ojw+PTk8Nzk8PDw8OTg2ODk2ODg2NTY5ODg5
-Nzc2OTk4NjQ5Pjc6OTg5OTs4ODY4NTc5OTg5ODg0Njg1ODk4Nzk7Nzs5Nzg3Ojg4
-ODg5OD08P1BLPDk4ODc3Njk6Ozc4NTc9OTk4OTk1Nzw6OD5DQTs2Ojk4Ojs8PUBA
-Oz1CRkA+PDw9PkA9OkNCPEI7P0E+QEE8PUJGQTs+SUlFSEhFQz5BP0BCRENDP0JC
-QURJRl2uw87V29/j5efn6utDRkRIRkJDPUdIR0lGRD49QUNEQj89Pzs5O0NBP0BB
-REBFQz5BREY7PTs5Ojs7Nzg6NTY7OTk5PTw8PTo8Ojg2OjpAQkJAOzg6NjU4Ozw9
-OjUyNTU7PTw8OTQ0NTU0NTk3ODg3PDY0NTYzNjY1NTY1Nzc3NDg2NDM0ODg0NzQz
-MzU2MjEyMjI2NTU3NjYyMzU1NzUzNTc1NzU4NzU0NzQ0NzQ1Njg2NDQ3MjY0NTcz
-NDQ0NTY1MzQzNjAzNTI1NDU1NDQ1MzQ0MzIyMjQyNjc3NDIyODM1NDIzNDY6NjQz
-MzM0Mzc3NDQ1MzU0NTQ0MzQwMjQ3Ojg3Njc3NjY0MTM5NjQ1NDM1NzU1MzY1MjU1
-MzUyMi8wNjMxNjc0MzIzMzM1ODUzNjQ0MzQ7OzMzMzEyMzEwMTMxNTI2NjU1NTYy
-Mzc3NzY0NTY2MzU1MjIzNTY0MzMwMjQ0NjUxMzU3Njg6ODQzNDY2MjI0MzMyNDUz
-MjQzMzIyMjMzOTc2Nzg2NjQ2Njc1NzUyMjI0ODMzNTM1MzMyMjQzNTU0NTc2NjQ1
-NTIyMzEyNDQzNDc0NjY2MzM0NjYzNTM1Nzg2NjY3NjMwLzE0MzMzNjg2NTQ1NDU0
-NDI1NDcyMjQxNDU4MTIzMTI0NDQzNDQyMzYzNTQ0NTc2MjI5MTo3NTMzNzMyMDQ2
-NDQ1NDM2MzQ8NDU0MzEyNTEwMjYyNzY1NDMyNDU1NDMxNDc3Njg2NjY2NjQ0MjQ2
-OzgzNDMyNjU2OjY1NDU0NDUyMzIyMzY1NjUzMS8wMzQzMTQ4NzY0NTY1MzIyMzc4
-NjQ2NTM1NTY2Njc0NTQ0NDUzMTQyNDg1NDc2MDEzNDUzNDY2NTU1OTMzNTQ0NzU1
-Mzg3NjM1MzM5NTU2NjhDOjY3NDQyMzM0MzQxNzs7NTY1ODY1Mzc4NjUzNDczMjM0
-NTM0NTU0NDQ0NTY1NzU7NzU1Njg1NTY2NzIzNjg3NjQ0NTU1NTQ0NDU1NjQ0NDY4
-OTg4NjU1NDY0MzU4NzY2NTU1NjY7PDk3NzYyMTM0MTEyMjIzNzQ3NjM2NTQzNDc2
-NDUyNDQzNTQ3NTk0NTQ3NDU0MzU2NTc2NzUyMzQ2NDY1Njc2NzMzNDI1ODg3NTY4
-NjQ0Nzc6NTU0MC8zMzIyMDM0ODQzMzQ0MzU2NTc2MzI3OTk3Nzg5Nzc1NTY3NTg1
-NDc6Ozg0ODg3NTU1NjY2NjI2Njg7Ojg1NjQ1NTQ4OD08OTs1MzM2NjQ0OzU3OTg4
-ODk6NTY7Ozg4OTY2Njc1OTo7OTs9Ozw4PDs8ODc3NzY3OTc4Oz09PT04OT06Oz08
-Ozo5PDg8Ojw6Nzg1ODY3ODo6NzY4Njg0Njo5PTs3NTc4OTo2Nzg7Ozk4Nzc5Ojk0
-OTg2ODk3OTc5NTk2NjQ1NTY3ODk3Njg3Oj4+Oz9NY1RKQTtBPTg6Ojo6Ojo6Ozw/
-Ozw7Oj04Ojo7PUI9PDs6Oz49PD0+Oj08Pj8+Ojs8OT09OTw+QT88PDs7PT1APUBB
-PT48QDtDSkpNRUJDQ0VCPj9APD8/QENERUNcbrDDzdXb3+Lk5+jq6kVFSUpIQENG
-QkBDQT5DQUJFREM+PTs+QT5BQz49Ojg4PEFCP0JCQDw8Oj86PDo2Njw5ODw4Ojw8
-O0A9ODo8ODQ4OT1GQDs8PDo8QD48PTo5OTk2NTw7ODk6ODo4OTg6NzU0MzUzMzc4
-NzY3ODg2ODY3NzQ2OjQ0Nzc1MzY2NTc1NTIxMTA0NTg3NTU4NTgzNTczNDg3ODc3
-NjE1NTc1NTQ2NDo3NjY4NDIzMjMzNTM0NzQyMjM1NzMwMTQzNTM1Njo1MzQzNDM1
-NDQwMjMxMDMzNTM1NTQ8ODUzMzg2NDM0MjQ2NjU2OTg1MzU1NTM0NDUzNDU1MTQ0
-MjM0NDM2ODU1NDU1PzU3MzUyNDQ1NDI1NTY0MjMzNDQ0MjEzNDQzMzYzMzQ0MjMz
-MjIyMjUyMTM2NzQ0MzMzMzg5Njc4MzQzNjY2OTowMjM1NEE0NDM0NDIxNDsyMzM2
-NDMyMzM2ODc1NDUzMzMwMzIxMjQ0MzIyNDc1Nzg1NDU0Njo1NDc1NTY2NDM1NDQ0
-MTI1NTU0NDc0NDU1NDY1NjQ0NDM2NjY3NTU1Nzg3MzUzNTQzNjQ0NDU0MzI1MzMy
-NTU2NDQ1NDQ0MjQ2NjY2NDQ0NTU5ODUzNDQzNTMzNDY3NjQyMzc3MDIxMzM1MzY3
-MzIzNTQ0Njc4NDI0MjExMTQ0NDI0NzUyMTIyNjc4Ojc1MjEzNDY0NDEzNTc1NzY4
-OTk1NTU1NjE0NzYzNDU1NDUyMjI0NjY3NTc1NDQzNTk3NTIxLzMzMjM2MzEwNDU0
-NDM1MTAzMDIzMzQ0MzQzNDY6NDI2NDg1NjMzNjI0NzQ0MjQzMjU2MjI0Ozo2NDUz
-NjM4NTMyNzQ1NjU0NTY1NjQzOTc5NjY2NDg1LzU5NzQ3ODY1NEM3NDU2MzQ0MzMy
-NjY5Qjw5NTQ1NjQ1ODc3NjU1NDMzMTI0NTY2MzQ0NDU0NDY3OTo4NzU1NjQ1ODQz
-NjQzMzU0NDUxNjM0Njc2NDUyMzIxNDU1NDc3NTIzMzEzMzM0NTg1NTQ1Nzg3NTUy
-NDExMzEyNTYzMjU6Nzo5NDI3NzY1NDU0ODY3NzY4ODY6ODQ1NTM1MTI0NTM1MjQ1
-NjU1MjM0NDQ2NjQ2Nzc3Njw4NDc1OTUzMjMyMzU2NTM1MzUzMjU0MjM0NDc2NDQz
-NTQyMjU7Ojg3Nzc2NzY4OjgzNjY1NTY5NjYzNzY1OTo2NDU2NTk3NzI1NzU4ODUx
-MTU4Ojg7Njo2Njc2Njo3NTY4Nzo5Nzo4ODg0Nzc5ODM0Nzw4ODs5PDw7ODg5ODc4
-ODg5Oj47ODk4OjY3OTc6OD09Pjs4ODk7PT04PDo9PDc7ODQ1NTU4Ojg6Njg2Njg5
-Ojo6Oj04ODY2OTg6Pjs7OjY3ODs3OTc2NTU2NjY2NjQ2ODc1NTMzNTg7NjQ5PD49
-PTxATV9XUUlLQT1CQDs5Njk6PTs9Ozk1Nzk5PTs7Ojo7PDk5OTk5PT5APj48OzxC
-Pz02Oj4+Pz5BOz0/PT05Ozs6Oz9APj07Oj5CREJIRkE/QEJDQkFAQj86PD09P0JB
-RHSmtMPM1tvg4+Xo6OnqRkhIQkBEP0BAQ0I/REBDQ0Q/Q0E8Oz4/QUJAQEI7NjdA
-Q0I/RT05QTw5Pj42NDY6NTk6Pj01MzY8Ojo7Nzc5PDo7Nzg5ODo7OTlDPzk3Nzc5
-OTU4NTc6Nzc4OTQ0OjY3NDY0NDY3MzM0NjQ2NzY2OTY4Njk7Ojg5Ozo3OTk8Ojo4
-NzY0NTMzODk3NTY4OTs0NTU0Njc3NTc2NTY1Mzg4NjU5NzM0NTY0MzQ3NTU1NjM1
-NDQ1MzIyMzMyMjY3NTY2MzU0MjAzMjI0NTUzMjQzMTIyNDM2Ojo7NjQzMzUxMzM0
-NDMzNDMzNTU1NTM0NjU0NzgyMzU1MzQ1MDQ2MzY3OTU1MzU3NDQ2OTQzNDgzMzU3
-NTMyMi4yNjQ4NjQyNDc0NjI2NDA0NjYxMDQ3NTQ0NjIwMTQ0NDUzMzAxNDUyMzEw
-NDY1MDQzMjIzNDAxMjM2NDQ1MzEyMjIyMTM3NzU1NjEzNDAzNDQzLzE0NDMzMzYz
-NDU0NTU2NzM0NTU2NDUyNDQ2NjgzMzMyMzY0MjM2MzE2MzU1NjczMzc4NjQ0NDY1
-NjQ3NjUwMzIyNjo0NDM3Nzc1NTUzNDUzNTUyMzM1Ozg3MzQ0MzQxMjY1NjU1NTQ0
-MzQ0NDMzMjMzMjU1MjU7ODY0NTczNTU5NjY2MzQ1MjEzMzI1MTQzMjMxMjYzNDc1
-NDM2MjIzMjU1NjU3NzU0NjU1NDUyNjY2NjY0NTY2OzQ0NDMyMzM1MzM0MjQ4NzY4
-NzU2NTY4OTs9OjQzNDU0NTY4Nzk1NDU0NDU1NDA0NDk0MjM1NjU4Nzc3NzU0MzU1
-MzU1NDU1NDg2NTM1MjIvMjY2NTM4MzIyMjQ2MzI2ODY2NDU0Njc4NzQ4NzY5NzYy
-NDk1MTMzMzY3NjU2Njc1NDIzNTY1NDc3NjY2NzMxMzQyMjU2Njo2ODo7ODQzNjQy
-MTMyMDMyMDY4Nzc2NTY2OD02NTU3NTUyNDQzNjk2NTQ0NTU0NDMwMjQ4ODU0NTU1
-NDM2NTMyNDI1Njc2NDYyMDAzNDE1NDU0MzExNTY0NjY1NjY2NTU3NjQ2QTk0NTU0
-NTEyMzM0NDMzNDU0MzIyLzIwMTQ0MzIzNC8zNTY1MjExMDE3NTM1MzUzNTU0MjQz
-MzE2Nzg2NTU6NDc1NjEzNDU4NTYzNDM0NDo2NTU3ODY1Ojk2ODY0NTg2NzY1Nzs3
-NTU1Njk2NjUyMTQ2NzQ2NjU2ODk3MzEyNjY2NzY2NTQ0NDY3Njo7NzU1NjU4Njg2
-NTc1Nzg3OTg1ODc4Ozo6Ojw7OTg5N0I7Ojg7Ojs6Ozs4ODo7NzQ7PT05Ozs5OD06
-PDw5Nzg3OjY3OTs4NjQzODo5OTc3ODc3ODo4ODk4OTc3Ojk4Nzk5NTY2OTk2Njc0
-MzY2NDY6Njo5Ojk2NDQ2Nzg6Njk5PD08PkBYd19bVlFGQUM5OTg5OTk2Nzc3ODY3
-ODo6PTw5OTU3OTs8PT4/O0E/QT47PD44Njg9PTs9PTg7OzxAPz46PD48QEE8QEBA
-PEJFREVCP0E/PkBBRUhFP0I/PUE9PT9CYa61xM3W2+Di5ejo6upHS0lGR0Q/Pj5H
-SUdEQD09QUNEQUFBQz1BQD5APDk2OTg/Pj48PkNFOzg6NzgzMzk8Ozg+OTk9ODc5
-ODc3NTo7Ojk5ODY1Nzc3ODI2Ozk7Ojc6OTg2NTs9Ozk9NjM0NTk5NTQ3OTUxNjM1
-NDU4NzU9Njc3Njg2NDY5ODg3NTk4Ojg1NjY1NTc4Ojg3Ojk5NzU3Nzc3ODc1Nzg2
-NDU1NzU1NDY1NzQ1NDI0ODg1MzA0NTIxMzQ0MzM2NjY2MzQ1NDExMjQ2MzI0MTAw
-MjQyNDQzMjM0NTU4NTY1NDU2NjUzMzIzNTMyMjM1NTUyNDU3Nzc3MzM0NDYzNTIz
-NDQ0Njc2NTIzNjU0NDU3NTQzNDU2Ozc4ODU1My8xMDM0Nzo0NDMxMjIyMTM1NTQz
-MDQ0NDIyMjAvMDg1Njc0NDM0NDQzMTMzMjQ9PDkzMjc1NDYzNDQyNTMxMTEzMTI0
-Mzc0NDM0NjIwMjE2OjMxMDI2NTM3MzU2NjQ3MjQ1OTk0NjU1NzU2OTo3Nzk0OzUx
-MjU1MTYzMjAzNTU1NTU2NjIzNjM2NTY0NTYzMjMyNDU1MzM0NDQ1Njg2NjI1NTUz
-NDI0NTQwNzUzNTI0NTg4NjUzNjY1NTIwLzA7MjQ2Nzc4NjM5OTQ3ODU2NDMyNTc0
-MzY1MjI1NjU1MjY1Njc2NjQ0MzY0NzQ1MzY1NDU0NTU1Nzc2MzQ0NTY4NjYzNDQ2
-MzY2NTQzMzAvMjIvMjY1NjU2NTM1NDc2NDM1NzQzNzk5Nzk2NDY3NzY2NTY1NDcy
-MzQ2NDUzMjExMTM1NzYzNDQ3NjMxNjQ1NzYyMjIyMjIzNzUzMzUyNjQ3NjMzNDEx
-MzU2ODQ0MjU1MjM4NjY2NzY1NjQ0Ojg0NjQ0NjU1NTQ0NTY3NTY2NTY0ODY2NjU1
-NDg2MzQ1NzY4Nzc4Njc3OTU1NDQyMjU0NDQ3MzMzNjM1NTUzMzMzMzQ3NDEyNDY1
-NjQ5NTc1Nzc3NTM0NDQzMjE1MzExNTUyNTU1ODUxNDI0Nzc4NTI2NDEyNDEyNzY2
-NDQ3NjY1NjEzNDM1MzU4ODg5OTg4ODc1NzYyNDQ1MzQyLjI0MzMzNDIzMzc4NTM0
-NDUyMjAyNDIyNDQ1NDMzMzU1NjM1NTc2MzAxNjY2NjY1NDU1Nzg1NDIzMzIzNDY0
-Njc2NTQ0NTU1Ozk3Njc1NDY3Njc2NjQ3NDU2NTI0NjY0NDU0NTk5NzQyNTQ2NTM2
-Njc4NDg3Nzc4NzY3Njc2NjU1ODQzMzY2OTk5Ojw7PDg3ODs6OTY3Ojo7Ojc4OTs7
-Ojk8OTk5Oz0/Ojw7PTs5ODg4ODo4OTs6Ozk7NzU1NTc5Ozg6Njs4Ojg3OTk3OTk3
-Nzo4Ojk7OTo2MTI2NTc4ODU1OTg3NDc2NzQ1NDU5ODk2Nzk3Ozc2OTo4ODg7PTw6
-R1FKRFlxT0E9Pjs9QDo8PTs3Ojk1ODs2NjlAOjc4NzI4Pz87Pzo6Ozs7Ozo8Qj48
-PDs6Pz44OD46Oz09QEA8PTw/RUJDQkBDREZERj5CQz8+P0JCREhFQ0RDRkA9QUNI
-jrHEztbb4OLm6Ojq6khFQkdFQ0JCRUpFRURAPz1CQkRBQ0A7Pj1CQTs8PDk9OTk5
-OT86Pz47NTo8OjpEPDw+PDo7Nzk5ODg7Ojg3NzY5Ozc3Njg4NjY1OTg5OTQ1Ojg3
-ODY4ODw2ODo6NzY2OTYzMjU2Nzo5NDU1Mzc6NjQ4NTk3NjU1MjU4ODc1NjU0MjM0
-NTYzNTUzNjY3OzY1NTU2Nzg2NDU1MjQzNjc2NjU1MzU3NTQ2NTY2PTYzMzQ3NDQz
-MzUyNDY2NjMyMDAxMzMzMDIzMTI3NzAzNjUyMzIyMTYzMzU0NTY1NzU1NzY3NjY5
-NTU1NjMxNzk4NTQ1NzY0MjY3NzY3ODQxMjEyMjIzNDMzMzQzNDY2Nzc2NTg1NDM2
-ODk4NTIxMzY2NDMyNDQ1MzIzMTIyMzU2MzAzNTUzMzM0Nzg1NDg0MzMzMjM0NDMz
-NDY2NTM1MzUyMzQ0MzMzMTExMzI1Nzk4NTQzNDE0NTU0MzU1MjMyMzQ0NzQ1ODU2
-Njg1NDY1NjM2MzQzNzM0NjQ0Njg1Nzo1NDMzNTMzNzQ3MzAyMzIyNjs9NDQzMzQ2
-NjQ1NDEzNDMzMzIzNDY2NjMzNTMzMjA0NjUxMjMyMzc1NDUzNDU1Ozc1NjQ0MzEv
-LjEzNTMzNDMzMTM3NDI2NDQzNTQ0NDMzMTE0NzY1NjU0NTU1Mzc3ODg4NjU1NDk4
-NzM2MzUzMzExNTY1NjQ1MzQ1NjM3NDU1NzQxMzMxMjM0MzU2NzUyMTM1MzI0Nzc4
-NjIxNTU1ODg2MzM1NTY4NjQ1Nzc3NDc0NTU3NjUzMzM0NDIyNTY0NjU2NTc0Nzc4
-MzI2NDMzMTI0NDM0NDU3NDY3LzM2NDQyNTozMzMxMTM1NDUyMzY2NjM0Njk2ODoz
-NDM0OTQ0NjQ1NDU0NTU0MzI0NjQ0ODc1MzY7NzUzNjY3Njc4NDQyNDI3NDI1NDQ3
-ODYyMTQ0MzQzMjEyNTQzNDU1MzIyMzMzNjk2ODc4Ozc2NTM5NzUzNDM0MjEzMzYz
-Njc2Nzc1NjU0MzM1Nzo3MTEyMjQyMzM1NjY3NjQ1NDE0OTc2MzQ3NjY2NDIzNjc1
-NTUzNTk1NTc1NDM0NTU0MjQ3NjQzMTQ2NTk1NDQ0NjYzMzE2NDMyMzIxNjo2MjAx
-NDc4NzM1NTUzNTU0MzQ1NjY3NjUyMjM4OjQ0NDg2NzY3NDY5OTU1NTQ2NTY3NTU2
-ODc2NjY1NDY3MjI2Nzo3NjM0MzYzNDU4NTQ1Njk2NDc5NzY6NjYyNzM0NDQ2Nzo5
-ODY3OUE/PTk6ODk6OjY2Nzk4OTQ3Ojo6OTw6Pjw7PDo7Ojs3Ojc5Ojs6ODo7Ozs5
-OTY2NjY3NjU3ODY4Nzc3NjU5Ozk6PTo9PDs2NzY3Ozg3Nzc6ODg3NjI2OTw4Ozg0
-NTY2NDk4NjU5OTk6ODg4OTs4OTg6O0BTT0A9R2FXRD47Pj49Oz45PTw2ODo5Ojc5
-Nzg8OT07ODo9PT89PTs4ODo+PTs5Ozk6PTk4OD45Ojw6PD49QD45PDk7PD1AQkBB
-QUREQ0BAQUJDSUNDQklHRD0+Q0BARkNcqcTO19vg4+bn6enrRENBRkNGRkdISUNA
-Rj88QUI+Q0dDPT07Pjs7Oj5BOzk8Pzw+QD88Ozo1Ojk6P0M8Ozw7OTo5ODk8Ojw9
-OzU1ODY3NjU5Nzg3Nzg4OTk5OzU1NzY2NzM1ODo5OTk5ODg3OTg3Mzg6Ojg4Njg4
-NjM1OTc1NTY7Nzg3NzQ3ODc1NDQ1NTMzMzQ1ODUwMzU2NzM1NjU2NjQ1MTI4MzU2
-Nzc1NDU4NDY3ODQ0MjQ3Njs6Ozo1NC8yMjY2NDQ0Njc2NjQzMzA2Nzk2MjQ0NzU0
-MjAzNDE1NzQzNTU0NjU1Nzc1NTU3NjUzMzIzNTo2NjU0NDU0NTY2NTUyMzU1NzQx
-NDY2NjIzMTIyNTIzNjc1NTQ0NTI3MzU3Ojk4NjUyMjU3OjQ1MzMzNDc3MzUzMjQy
-MjExMTM1NTQzMjM0MzUyMTMyNDg0MTIyNjQzNDsxMzMxMjQzNDMyMzUzMzU8NzQ1
-Nzc5ODg0NDM0NDQzMzI0NDY2NDM1NTY2Njc0NTQ1NDU0NTQ0NzU1NDc3NDQ4ODQz
-NTYxNDMzNVM0NDM0NTMyMzIzNzY0MzU1MzM0NTM1MzQxMjIzMjIwMzQ0MTIzNjQ0
-NDg2Njk2MzY2NTQzNTUzMTU1NTY2Njc1MzA0NTU0MzE0NTYzNjM3MzQzMzI2NDY2
-MzQ0NDc6NDo5NDM0NTUyMzY2NDIyNTY3ODMzNzc2NTMzNDI1NTQ1NDMyMzU2MTQz
-NjY1MjIyMzU2NjM1NTY0MzMwNDczNDMyMzM1MzIyNDc3NDUyNDY2NTM0NTQ3Nzk3
-NzQ2MzY1NzY1NTUzMjQzMzUzMjI1NDk6Ojc0MzQ1NDQ1QDY5ODU8OzUzOTg1NDU2
-NTU1NjIyNTczMDIyNDQ0MzMxMzg1NjY1NDU1NTYzMTM3Njc3NTY3MzEzMzU1Njg4
-NzQ1NTQ2Njc0NjU0MjAvMTMzOTs3NDc3NDczMzczNTQxNjUyMTAyNjk3OTUzNDU2
-Njg2NTc4NjY3NjQ1MzU1MzM2NjUzNTU1NTU0NDU3MjMyMjE0NzY2Mzc3ODU1NTM0
-MzAyMTEyMzI1MzU0MzQ2NjYzMjM1MzQ1MzIxMDU1NjU2NTU1NzIzMDU4MTM1NTc1
-MzU0MzU3NzYzNjY1NjQ0MDI1NjU2ODg3NjQ0NDUzNjQzMzQ0MTU3NTU0NTU3NzI1
-NTI0Njc5Njc2NjY0NDU0NjY1ODg5NzQzMjY3NDM4NjU2ODY3Nzo6OzY1MjM3OTY2
-Nzc3NjM1NDg6Ozk5Nzc6ODY6OTg3OTk4PDc2OTo7Ozg4OTk3NjQ3Nzc3ODs7PTk7
-Pzw7Ozk5ODk4ODs9Ozo7Ozo6OTk7Nzg4ODg1NTk2ODc6ODk6NjU5Ojc3OTg5Ozo7
-Ojw6OTc0ODo6ODc2NDg8QD09Ozc4OTo6ODk1OTc3NTU2ODk3ODg7ODo5OzU4SGJJ
-PDs8VlBDPjw4Ozs7PTk6ODw6Ozg2Nzc4ODY6Nzc5Nzc5Ojw+Pjw7PDw6OTk5Pjo8
-O0A+QUA9PUFAPDo7QERAPT1AQD8/QkNDQkRCPUNBQ0ZFSENHSkJCQUNCQT4+QXit
-w83V2+Di5ejn6uo+QEJDRURAPj9DQ0JHRURBPT5CPERAOzs3OTs7Ozw9Qj89Oj1A
-PDk4Njg1OD1CPD86Ozw5Ojs6PTw4ODw4OTg3NzY0ODk4OTc0Nzc2NzY1OTw4Nzc1
-NzY3Ozs6NzY3ODY2ODs/OTU2ODk5NzczNTQ0NzMzNjY4OTo9OjY1OTU1ODo3NDQ1
-NjU0Nzg1NjY2MzU3OTg2MzU1NDY4NjY6ODYyNTQ3NDc2Nzg3NjU1Njg5NzY1NzQy
-NDY3NzY1NTY3NzU1NDQ0NDQzNDIzMjMwLy8yNDQzMjM0MTM0MzM1NjYzNjQ1MzQy
-NjYyNDMzMjQ0NTQ0NDg2NDU1MTM0NTQ0NTg3NTY2NTM0NTE0MjU1NDMzNDU2OTg5
-ODU0NjEzMjM9ODc2NjI1NDIzMzM3NTMzMjIzMjI1NDM1MjM2Mzk1MTU3NjMzMzMy
-MTM6NDIxMTM0NTIwMDM1NDQ0NDc1MzMzNDQzMzMxMDQzODc2NTY1NDQ0NDM0NDQ1
-NDU1NzM1MzcyNTU1NzU1NTY0MzY0NTY0NTUyMzgzNjU0MjE0MjIzMzIyMTIyMzI0
-MjQ4NDU0NjQ0MzU0MzQzMzMxMzY2NDA1MzI0NTc3NjU0NDMzMjI0NDQ2ODg4OTc5
-OjY0NDIvNDM0MDQ3Njc1MjQyNDM1NzQ1NjU1NDM0NTY2NjMyMTQ0NDU4Nzk0NTM0
-NzQ2NjUzMzU2Njg0NDU1MjI0NDY4ODo3OTY2NTc0MzQ1NTY1NjU0NDk2NTU2NTcy
-Ni8wNDU2ODUzMzM2NTY2NjUyMTg0Njk3NTQ2OjY5NzY3NTIxMjM1NTQzMjMyMzQ1
-NTQ1NDc2NTM2NDU4OTk2Nzg0NDQ1MzU2MjE3NTQyMzIyNDQ0MzQ2NDQzMjAyNTc2
-NDQyNjI0NzU1NTQ0Njc5NTIzNTU4NDY1ODczMzY5NzU2NjU0ODQ1NDU0Njc2NDk1
-NDg2NzY1MTU2NzM2NTM2NDQ2ODY4Ozk1MzQ2Njc3NjU0NTU2MjY5NTc5NzUzNDMz
-NTY1MzI1Nzc1NzU1Njg6OTk3NjU0NDMzMjU2NDM0ODY4NTYzMzQwMjMzMzE0NDQz
-MzI0NDY3ODg1Njk4NTY4NzM3NDMzNjI0NDgzNjU5Ojg2NDUyMjQ6ODc2Nzg2NjY1
-NTU2MjIzMTIyMjMzNDU0NTc3Nzg2MTAyMzM2Njg2NzYzNDc6NzY1NTQ1ODo3NDQ4
-NDQyMjc5NDc4NDU0NTk7OTczNDU2NDU1NTY2ODk4Ojw6OTs6Nzg4OTk5OTYzMzY2
-Ojg3ODY0NjY3ODc6Ojw4Ojs3ODg3PEE8Oz0/Pjo4ODo7ODg7NjY4Ojo+PDk3NTQ1
-NDk1OTs8Ojg5OTo4Ojc6OTY5OTs1Njc2Njc2Njc3NzY3Ojk4ODg3PDw8OTU2OTs4
-NzczNjc4NzY2ODg3Ojk7PDo8OjxVY0k+OjhOUUE4OTo5Njk6ODc3QDs7Ozo4Nzg7
-PT88PjY3PTw6PT49Ozo8Ojs9Ojw+QEM/PD08Oz9AOzg6Pjk7QkA+OT9BPkFDQj8/
-Pj08PkNCRD5ARklIQkFERERCPkFCbK7DzdXb4OLl6Ojq60JIQ0VGREBFQkREPz1C
-RUdJPD5APjw9QTY1Nzo+Ozg7PDw5QTw9Nzc3ODY5Ozw6OTk9ODk6Pzs4Nzg5NzU2
-Ozk5ODo6Ojc3Ojo2Ozw4OTk2ODc5NDU4ODk2NzY1NTc4OTk1ODg4NjMyMzY0MjU4
-NzM0NjQ1Nzo6NTk8OTc3NTY2NTc8OTc7ODY6ODM1MzM5Nzc3NzU0NjU0OTY2NDU4
-NTY0NDU2NjU6NzYzNTQ4NjQzMjc3NzQyNDIyODU3NjQzMzM0MzQ3NDY0NTQ0NTM2
-NzQwMjE2NjMxMjIzNDIyODgzNDUwMzY2MzIvMjgxNDU0MzM0Nzc2Nzc0MTc4NDQ1
-MzUzNDU1NjI0NDI0NTg3NDQyNTY3ODY1NTUzNjgxNTk2NDQzNTUyMzQzMjAxMjM1
-ODYzMjI0NTMzMjQ6MzU3NjM1NjM2NTQ6PDo0NTQzMDEzMzIzMjQzMjM1NDU2Nzc0
-NDUzNTUwMzI0MTE2MjMyMTI1MzIyNTU0NDQ2OTUzMzM1MzU0QTc4Nzg0NDMzNTM0
-NTI0MzU3NzU2ODMzMjExMjQxMi8uMTM4NTQ0NDU2MjM2ODY5MjQ0MzQ1MTQ2NzQ1
-NzU1MTAxMzY3NjQzMzYzNDM0NjU0Njg4OzUxMDEzNDQ1Ojw2NjU1NTQwMzI0MzM0
-NjQ1MzYyMjUzMzI3NDU0Njg4NDEzNDMyMjQ0NDg0NzYzMzU0NDM4OjMzNDM1NzQz
-NTY2NjQ0NDc2MzAzNDY3NTM1NTMzNjc0MzIzMzU2NTcyMTM2NTc1NDIwMjU1MjM2
-NTc6NzU3NTY1NTU3ODYzMjAzMDY1MjE7PTI1NTQzNDUzMzQ3Nzg4ODU0NTI3NTQ2
-NTUzMTM3NTU1OTczNDQ4NjIyNTgyNTI0NTU3NTQzMzIwNDIzNDU3NjY0NDU4MjUz
-NDM0NjU0MjI1Njo5NjIzNDM0Njc0MjczMzg0MjQzNTU3NzM2OTk2NDU1MTEzMzMz
-MzQ0NTUzNjc4NDc5NjM1NjQ0NTU3MzM2NDQ0NDU1NDQ3MjM2NTs4NzQ1NDczMzM1
-Njc0MzU3Nzc2NjY7NjY1MDIzMzIyNDM0NDU5Nzg2NTIzNTQ0Nzk1Njc6ODk1MTEy
-NDQ2ODU3Nzc0NDU1Njc4NDY1NjczLzM2NDcxNTI1NTMzMTM0MzM3NzY3OjQ3ODcz
-MzY2Njk2ODY3ODc3MzQyNDU0NzU2NTc0Njc4NjU2Mzg4NjY3NzU2ODk6Nzg4NzY0
-Njg3ODg1NjY2Njg2NTg5Ozo2NzU5Pzw6ODg5NzY3NzY3NzQ4Nzo9PDs5OTs6PDs6
-Ozo3ODo3OTw4NDY8Ozc3OTk4OTg4Njc4Njo6PTs7Ozc3Nzc3ODg3Nzc2ODc4Ojk6
-Ojk4OTk7OTs5Ojc5OjkyODc4Nzg2OTg6Njk8ODc5ODo5OTo2OTs4Njs7OlhpQz09
-PFFkRj49ODk7OTk6OTs8Pj03OTo4Oz4+Ozw5OTs6Ojk7ODc5Pjo6Ojg+QD47Pzs9
-Njw/PD0/Pj0/PkA9Oj1BO0FBPD06PjxBQUpBQT9HQ0FDSEVDRklDREhDPUJYpsHN
-1dvf4uXn6enqQkRJRkZMQ0RBPj8+Pz1APT5APj0+QDs8PDw6Ojc5Ojc+Ojo9PTw/
-PT06NDtAPDs7PDg7Ojc6PDY2OTs5ODo6OTY4NzY6Pjg5ODg5OTk8PDg3PDo2NDY4
-NDY3ODo8NzU2Nzg4ODY2OTY4ODc1NDc2NDQ4OTg3NjUzMjc3NjQyMTM1NDk5PTY1
-NDQ4NTI0MzQ2NTU0NDI0NzY0MjQ0NTU1NTU0NzYzNjk4NjEzOD04NzY1NjU2NzU1
-NDUzNDU0MzI0NDU0NzU1MzUyNDc2NDg1NDIwNDM0NDM1NDMzMzY1NDM2NjMxNjU2
-NjYyMzYwNDU2NjM0NTQ1NTI0MjM1NTU0MzQ1NTM0NTI1MDI1MzU3NzY2NTM0NTM4
-ODY0NDQ1NDU0NTY2NjIzMjIwMDEwMTY2NzU0NTIyNTYzMzgxMzIyMC4wMjQ1NDQz
-MzM1MzQ1MTAwMzQ0MzM5NzQzMzI5NjU0NDU1NDU1MzIyNDIxMjA0NTI1NTQ1NDQy
-Mzc2NTU0MzQ1NjZCODU5Njc1NDMxMjMxMjM1NjUzNDU3NzQwMjQzNDQzMDIyMjQ1
-NDQzNzk0MzQ0NTc0NTMyLy4xNjM0OjUzNTYzNTQzNjc0MzY2NTU0NjY0MjQzNDQ0
-NDU2NTM1Njk3OjU3NTQ1Mjg3NTU2NTQ3NjQ1NTUzMzQ4MzAyNTU1NDc8OTE0NTc2
-NzU2Njc2ODY0MzQ3NDUzNTU0NTM0NTQzNjY0NTYzMzc4NTY2NTc2NjQzNDY0MjY0
-NTQyMjIyNjc2NjUzNjQ3NTY1NDMyMzU1NTc1NTE0NTUyMzYzNjMzNDM2Ozg0NDU0
-NjMyNTU0NjU1Njg2NTU0NTUzNTYzMjM2Njc1NjU3Nzc2NDQzNDQzNDU4Nzg1NTMy
-MTU0NDUzMzMyMjM1NTc2NzYzMzc2NDQ2NTY1NDM0NTY2NzY1NjM2OjcyMjQzMjIy
-MTUzMjMyMzU2ODc1NjI0MzQzMjE0ODs4ODczNTQ0NTY0NDUzNTk2ODQ0NTYzMDIy
-MjMzMjU0MzQ1MTQ0Njc2OTk1NjMyNDM0NDk2NDI0NTM1NzY1MDAzMjQzNzU2NTQ3
-NTQ3NTEyODU4Njs4NDQzNTQ3NzY1NTQyNTMzNjc1MzU4Nzk5PDw7NjQ1NDQ0MzU1
-NjQ0Nzk5NjQ1NDM0NTc6NTYzMjQ1NTc1NDY3ODY1NzU0NTY7OjY5ODQ0NTg2NTI0
-NTQ0MzMyNjk3NjY4ODg4Njc3Nzo7Njg4Nzk4Njc1NDQyNDMzNDo9NzY4ODk7Ojo4
-OTc1Nzk7ODg4Nzo5OTk9Ozc0Ojo4Njc3Nzs4Nz07OD08NTs5ODg3Njc5ODo5Ozk6
-PT09Ozs7PDk5ODg5OjY0Nzc2NzY1NTU1ODg5Nzc8Ojo3OTo8OTw4ODw+OTg4MTQ2
-Njc5Ojo7OTg0NTo3NjY5OT1AYlc7Ojs8W2JJQjw7ODU5OTo3PDs6Ojk7PDk3Ojs6
-OUE9PDw7Ojc4Oj5BPkA8OkI/QT07Oz08Pj9DREQ9PD9ARz9DOz03PUFDQD09QkZF
-Qz0/QEJCQEJCQUJJQj89Q0E5O1Wlws7W2+Dj5efo6utDQUFDREZCPjw+Pzw7QkJB
-Pjg8Pz5EOzc4QT8+Ojs7Ojw/PTlAPzs5PTk2OD08PDs3OTc9OTo7Ozk3ODo8Ozo3
-Ojk3ODk4OjY1NTY4PT89Ojk4OTcyNDY4NzU0Nzc1NTc6OTc4ODg2ODU4Nzc2Njg6
-NzY5ODg1NTU2Nzk3NzU2NDM2NjY1NTQ2PDU1NjU1NDU1NjY2NjU3NjY2MjQ1ODc4
-Ojc0ODg0NTQ2NDU1Njc1NzMzMzU3NjU3NjMzMi8zNzYzNDY2Mzc3NDY0Mjc4NjQ1
-MzMzNTQ0MzIzNTU1MzI2NTUzMzEwNjU2NDE1NDIyMzI2NzM2NjQ2ODg2NDY0NjY2
-NjUyMzY0NDMyMzU4Nzg3Njc3NjMzNDQ1MzM1MjQ0MjI3NDQyMTMzMzUyMzIxMTA1
-NDMzMzU2Nzc1MjIuNTQzMTE0MzQ0NjMwMzkyNTgzMzIzMTIzNjY1MjE1OTU1MjE0
-Mzk0MzAwMjY3NDQ3NzU1NTg1NDY1MjIxNDc1NjQ1MjY0NTU1MzI0NjQzMjIyMTIz
-MzU1NjI3NDQ2NDcxNDM1NDIxMjUxNDI4NTQ0NjY3NDQ2NjU1NzQyNDM0MzM2NTM0
-MzU0NTQ1MzUzNTQ1NTM0ODQyNjc1MTQ1NDo3NzY4NTU3NzUzNTQzMTIyMzU1NjU1
-NDQyNjYyMzU1NTQ1NjY2ODk7ODY2NzQyMzU2NjY1NDE1NjU3NzQzNDU6NjY2NTQ0
-NDY1NjMzNTY2MzU1NTQzMTQzNDQ0NzY2NTQzNDUyNTY2MzQ1MTMyNDUzMDUzNDc3
-NDMxMjE0NTk0NDE1MzQ0NTU1MzMzMTExNDQzNTY3NDU0OTY2NDM4Njg6ODY2MzY2
-NjU3NTIzNDY2MjEzNDM1NTY1ODo0MzQ1NTQzNDg0MTMzMzM1NjU2Nzc1NTY2NTo4
-ODQ0MzxEPjY2ODU2Ojg1NDUzMTAzNTc0MzU0NDQzODY2NjQ2NzUzNTMxMzEzNjQz
-MzQ1Njc0Njc1MzIzNDMzNTQ2NjUzMzMzNjc3NTM0NTMzMzU1NzQ1NzU1NTQ0MzMy
-NDU0ODYzNTU2NzU4NTY0MzE0NDU2NDQ3NjU1MzM0MjQ0NjY1NDU1NTU3OTg5ODU1
-NTQ2Nzc2NTo7Nzc2NTQzNDUzMjMzNjM0NTQ2NjQwNDQzNTY0NDU0MzUzNDY3OTc1
-Nzg1MzQzMTE0Mzg3NjQ1NjUzNDU2NDQ2NTY1NjY2Nzg4MzQ2NDY4ODs3ODg4Nzg3
-NTU3NDQ1NDk5Njc4Nzk6OTY1OTg5Ojs7ODs5ODo7Oj06Ojo5Ozk4ODs4NTQ2NzQ4
-Ozw7OzY5OTk2Nzc4NTU1Njc4Nzk4Nz08Ozk2Ojc3OTY2NDc2OTc6Ojk7NzY1ODo1
-NjY1Njg6Ojg4Ojo3ODY7Ozs6OTU0NTg5ODw8PTs9Ozc3NTQ2OTw7OkNhTUBAPz1R
-X0k8OjU1ODc2Ojs5ODc4OTs7PDw9ODU3Oz48PD83ODg9PT8/PT1AQT06PzlAOz06
-PUBDPkBCQEFAREBBQT0+Pj9BQT49Q0FAOz5AQEFCQT9CRERCPEQ+PkI/W6jBztbc
-4OLk5+jp60hEQEJAPztBQz89RUZCQEA7OD9DQkI9PD4+PDw6OTc8PDk6PD4/PD47
-Ozc4ODc4OTo9QDk5OTs6Pj08ODw8Mzk5OTc2Ozs7NTY5PDk7OTc6Njc0NjU1NTg5
-NTU0NTY5NzU0NjQ2Nzo1NjY5ODg3Njg3Njc3NTQzMzY4OjU3NTUyMjY1NTU4NTc3
-NjY0NTM1MzQ1NjU0NDQ2MjU3NjNBQjg1Ojc3NDY0ODc1NTQ0ODc2MzU1MjI2ODg3
-NTU1NzU0NTc1NTQ1Njc5Njc6MzQzNDQ1MzQ2MjM1MzA1MjMyNTI1NjEzNTUzNTU2
-MzIzMjM0MzU1Ni8xMzc2NzY0NTMzMzYzMzQ0NTczMDY1NTU2OTc1NDIzNTY0NTQ2
-MzE1NDQ1MzU2NjQ2NDIyNDY3NjQzMzMxMzA0NDU1MjM0MjIxNDQ0ODg2NDQyMzM0
-MjExMzI0ODU0MzIyNDUyNTI0NDIzNDEyNzI1ODMzMzVGOzs2Njc0NTY1NDM1MDM0
-NDUyNTg2NTM0MzIyMzIyMDAzMzIxNDYzNDY2NDMzMjMyODg0NDQ1MzIzMjg3NzYy
-MDI0ODc3ODk2MzIyMTU4NDQzNjQ5ODI2MjI1MzQzMjExNDMxNDM2NTM1NTczNTc3
-OTc4ODY2Njs4NjY2NTMyMTQzNzc4NTQ0MzM3MjExNTk3NjU0NTY8NTQ3NDc1NDA0
-MzIzMjIyNjY5OTg5OTU3ODU2NDMzMzEyNDMzNTQ0MzM1NTc2MzY2NjM2NzM1ODMx
-NjU2MjUzNDMzMjIzMTM2NTY1Ozc1Njk0NTU3NDE1NjIzMzUxMzg2MzMzNDIyNTU1
-NTIyNTU1Nz44ODU1NDM1NDU3NDc2NDc0NzM1Mzc6NjU3NjIyNDc5NTQ1NzM0MzU0
-Nzg1NDUzNzo4NjU0NzQ2MzU0NTY6MjU2Nzc1Njo8ODU2Njc5NzQ0MzQ1NTQ0NDU0
-NDMxMzU0MzU1NTM1NTU4ODc1NDIyMzQzNDQzMTA0MjEzNDEyMzQ0Mzc0MjY0NTQz
-NTU0NTIzNDMzMzMyMTQzNDU0NjU0NDIyNTU4ODQyNDQzMTQ1NDQ1NzY1NjU3MzIz
-MzM0MzQ1NTUzNjU0NTY2Nzc3ODo6ODY2NTY3Nzo7Njc4NjQ1NTc1MTEyMzMyNjU1
-MzY1NjY3MzQ0Ojc0NDg9Ojc0OTo7Nzc1ODg1NTMyMTIzNjY1NzUzNDg5ODU5NTU4
-OTg5NTY2NjU0NDQ1NTY8Nzg4OTk7OTs5OTc3OTg2OTczNTc1Ojk7Ozw7OTs8Ozg2
-Ojo2OTk4NjY3OTo6OTo8Ojc4NzY3Nzg5PDw7OTw7Ozg4ODo5ODc3ODg5OTo4NjY0
-Njc2Ojs4PDs6OTg5OTk4OTc4OTw6ODk5Ojg2OTo6Ozc5OTg8Ojk4OTY3PDo7Ojk7
-ODg3OTo6PTk3ODg6ODo7UGVSQj47O09lSz05Ojg5Ozs4Ozk6Ozo5ODw+PDs7Ozs7
-P0A9PDw6Ojo/Ozg7Ojs1OD08Pjw6PD88O0JFQUBFQD5EPj9BPjxAQUFAOj09PD07
-Ozw6PkNFREBAQEBERkNBPj5brcPO1tvg4+Xo6erqSEVDQkI/PD4+QkVJRUNCRUM/
-Pz5BPz8+RD5CPzs+QTw7QTs8OTk6PDo4Ozs5Pzs6ODw8ODo7OT08OT47Oz07NTc2
-Nzc3ODk7OTg/OTc6Njc9OTg4Nzg2OjU2NTU4NjQ1NTU2NzU4Njg5OTY3Ozg4NzY3
-Nzw4MzU5NjY2Njc2NjYzNTY1NjQzMzc2ODQ1NzY7NTMzMzY3NTg5Ojk1Nzc7ODY4
-Nzc2NTg2NjM0NTQ3Njc0NTc3NDIzNjQ0MzM0MzQ1NjY1MzQ2NTQyNjc0MjM1MjQ1
-NDAxMTQ1NDE0NDg1MzMzMzIzODY0NTY6ODQ1NTU1MjQ3NTczNTY3NzQxNDY3MzMy
-NDQyNDU3NDU1NDc0NzY1MzU3NTE0MzQ2MzQ0NDM1NDQ1NjYzNz02MDM2NTMzNTU9
-NDQ2NDQ1MzM0OjUyNTY0NDU0NDM0NjQzMDIyMTc2Nzk2MTQxMjMzMTMzNTUxMjMz
-NDM2NTMxMjY4ODQ3NjYzMzQ0NDUvMzQ3NTU2Nzk3MzM1NzQ2NTIyMzU0NTo4NTMy
-MDQ1MTAzLzMzMjQ2NzIyNjY1NzQzNTY0MTM6NDU1NTI1NDY0NDcyNDU1MjUyNDMx
-MjMzMjQzMjUyMjEzMzM3ODc0MjMzNjg4Njc2Mzg0NjY4ODMxMzI4NDMzNzQ3OTQ0
-MzU4NzU0NTQzNjU1NzY0MzU3NjQ0NzYzMjMzNDc4NTc3NzY2NTc1MzQ2MzU2OTUw
-NDQ0MzM0NTY0MzI3Ojg1NTY0NDU2Nzg0NDQyMTYyNTU0NjY0NTY5NjUzNjg2NDU2
-NTQ2NjU5NzQxNTo1OTU2NjY1MzQyMTMzMjMyMzU0Nzg2NDc1MzI1NjQ1ODQzODY3
-OTY4Nzk1Njc4MjU1MzIxMzQ0NDQ1MzIzNDYzMzg2MzU0NjY3NjU3MTQzNTc1NDQ1
-NDU2NjY2NTU7ODg2NDQ1NTYzNTc1NjU2Nzg2NzY3MjQ1MzM1NTQ0NTY1NTI0MjIw
-NDQ0MzQ2MjI2NjU0NDM1NTM1NzY0MzIxODMzNDQzNzU1MDM0MjQ0NTU0NTUzMzQy
-MzQwLzQ3NTY1NTU3NzY0NjQxMzQ1MjIzODk2NTIzMzQxMjIyMjU1NjY3NjY6PDY1
-NzQ6ODIxNDU1NjYzNTQ0MjMzNTQ0MzQ0NTc3ODs4NTo2NjQzNDc7OTg2NTU2NDUy
-NTc1NTU0NDQ5Nzg3NTMxNTY4ODU0NDY1Nzg1NDQ0Njc5NDY3Njg4NDY2OTk5ODg3
-Njg2NTc5NDU1ODs6Ojk5OTg1NDQ3Njk6Ojo5NDc2Njo2NTk6Oj05Njw4Njc5Ojw8
-Oz87OTk5PTk6OTc4Njc2ODg6ODc1NDo3ODk7Ozs2Nzg6OTc3Njg6PDo7OTs2ODo5
-NzY4OTk8Pj05PDk5Nzg4NjU1NzY5NjY4ODk5ODo5Nzg5Nzc4Nz5Na0k7PDw7VW9R
-PTgzNzw5Nzg6PD07PTk3ODo8OkA/QDo8OkE8ODg6ODQ2OTo8Ojc6Oj8+P0A8Pzw+
-Qj4+QUFHQkRAPUBCQUI+PkFEQEBBPUFDQkE8QUFDQT9BPERJSz4+QV+qxc/X3N/j
-5ejp6utIR0VGRERHQDtBSEhER1JRRT89QkBAP0A/PkFAOjk5OTxBQD08PDk6Oz47
-PTk6ODo4Ozw9PDw8Pjo6OTxBNzk6NDU7OTY1ODo9ODU0OTU0NTc6ODc5NzY3NTU1
-NjY0MzU3OTc4Nz44Nzc4Ozg2NTg1NjY1ODc4NjU4NzY2ODg5NzMzNDY3NzY1NjQ3
-OTc1NDQ0NDEyNDYzNDM1OTk6ODg3ODg5NTU1Nzk1NTY0NDU3NTcyMjM2NDIxNjY1
-NjU3NDU2NTMxMjE0NTc4NjY1NjUzNjU1Njk0NTU1MzAyMjIxMDIzMzM2NzU3NTI2
-Nzc1NjQ0MzM0ODQ0NTQ2NzUzNTQ0MjUyMzQzNTY2MzM2NzUzNTU2NjU0MjQ1NTQ3
-MS8zNDQzNTY6NTQ0NDc1NzQzNDMzNDQzMTI0NDQ2NDIzMjQzNDU0MjQyNDY1NjI1
-NDQ3NDIyNDM0NTY1MTIxMjExMDEzMy4wMTI0NzIzMDU0NjMzMzMyMzo1MzMyMTY6
-NTQyNjQzNTY2NzQwMjU1MjM0NDQ1NjU2MjIyLzA2NTY1Njc4NzYyNDc0MzIxMzUz
-Njc4NDQ3MjE2MzU3NTU1NTc1NzI0MTUzMjIyNDU1NTQ0MjIzNDU0NDU1MjIxMzQz
-NjU1NDY1NTc1MTQ1MzY1MzMyMTI1ODU2MTU1NDU3Njc3MzM0NTU1MjI1MjM3NTcz
-MzY0MzY1MzYzMjI2NTQ1ODM1ODY1NDc1Nzg1NTU1NTM0NDQ2OT05ODU3NDMzNTY3
-Mzc7NzU1NTQ2NDo7ODU0NzY1MjI0NDEwMjQzMzQ2MzQ2MzQ2OzU1NTk4NTU2NzEz
-MzAyNDA0NTk3NTU1NDU1NjQ5Mzk0NTg7Njc2NzU3NjM3NzY2NzgzLjI0MzM0Njc2
-NDY2NjY0NDc2NDQ2ODc1NTQ3NTIzNDM0NTMyMzQ0MzY2ODo3NDU2NDQ3Nzs2NTM0
-NTQ0NDMyNjY1NTI0NDE1NTY4NzIxMzU0NDU2NDE0NTQ2OzQ1MjMzNzY3NTU3MjMz
-NTk3NTgzNjI0MDY1Njk3ODUyMjY2MzQyMTM0NDIyNTU0NDM6Ojo4NzQzMjU0MzQ1
-NzU2NTY3NjUxNTM2MDQ1MzM0NDU4Ojc1NDU3ODg2NDU0NDUxMzQ0NDY0MzQ0MjY4
-NTY1NDQ5NjM0ODY3MzQ3OTY2Nzo3NTM2ODY0MjY0Nzo5NTYyNjU1MzUzNjYzMjEz
-ODc2ODU2NDM0NjQ2NzY5OTc3ODc5ODQ2ODc2NjU2Nzg3PDs4Nzo6ODc4Njc5ODg6
-OTc4OTo6Njc3NTk4OT08Ozw8Ozo6OTk5PDw7ODg6Oz08OTg2NjQ0NDQ1NTc4Nzg2
-OTc4Ojk3Nzc2OTs4Nzg8Ojg6Njg7Ojk3NzY1NzY6ODg4Ozs4OTc5NjU3OTs7Ojk5
-Pjk5OTg6OTk7Ojg6QWp5Qz89Oz1WeU83Ozw4ODw8PDo2Nzk7PDw7Ozw5ODs6Ozg+
-Oz05PT06Njg7PDk9PDw7OjpBPjs6PD4/Oz1BQT8+PjxAPEFEQUNCRDw+Pj5BOD5C
-Q0dKREFCP0ZGQ0hBPj8/Xa3Fz9fb4OTm5+jq60pKRkRGRUlIRElIRUVFV0lIPDs8
-PkFGQEA+PTdAPDs8PT02Nzk7PDs8OT08PDg4OTY4Ojk7OTo9Ojo4Nzo4NzkzMjg6
-NTk5Nzg6PTYzOTY4Nzc2ODg4NzQ0MzU4NzU4NTQ3ODk0Ojk1NjU2Njc3Njc4NjM1
-Mzc5ODMzNDM2Nzk1Nzg1NjY2ODc1NjQ3NTYzMjM2NTc0NDY0MzU1Njc4Nzc1NTY2
-Nzk4Njc1NTM1NDY0MjAvMTM0Ly0xMzI2MzMzMzQ5NTU0MzM0NDU0NTQ2NzQzMTM0
-ODgxMzQ2MjM0MzY4NTM0NDY0MjI0MzA1Mzc3NjU0NDY2MzIzNDQ0NTMzMzQ0MzEw
-MTIyNTQ1NjQ2NTMyNzc2NzQ3NzY0Mzg1NDMxMDQzNDkyNDMyNjY0NDEwMTAyMjQz
-MTEzMzE0MzE1NDM1MzI0NDM0NjQ0MjY2NTYzMTU4NjY0NDMyMjEzNDIwNDIxMzMz
-MzUyNDQ0NTYzMjEyNTQ0NTM0NjMyMzUxNDY3MTQ0MjUzNDIxNTQzNjc0MjEwMTQz
-MTIyNDE0NDc4MTExMDQ0ODY0MS8wMzM3Ojk1NTY1MjExNjY2NTU1MzQ1MzM1NTQ1
-NTg0NTQ3NjU1NjQzMzMxMzI0MTQ2NTM0MjM1NTMzNzU3NzQ0NzQzMzEyMDIzNjU1
-MjE0ODg6ODY2NzQ0MjQ2NDMyMzQ0NTM0NTI1NjY2NTU1MzM1NjY1MjE1NzU3NTc6
-NzY0Mzc0NzMyMTM3Njg1NTc0NDQ0MzQ0MzI1Njc3NzQ1Nzc1Njc3ODU2NDIxNDQy
-MzM0MzM0NDQ2MzM1NzQ2MzU3NjU0MzQ2MjExMzY1Njc1Njg0NTc2NTM1Njg0NDQ0
-NjMxMTY0Njc1NTU1NjYxMTMvMTEzMzg0MjU1MzQ1NjU2NjU2ODU4NzY1OTk1MzQ1
-MzU3NTY4Nzk2ODY3NTc1ODk4NzM0MzY3NTM0NDU1MzY0MjM0NDM1NTU1NjU2Njg0
-NTY0NTc2Nzg5Mjg2MzU2ODQyNDk4NjUzNTQ2Nzs4NDUyNDY3ODs1NTM2NTY2NDY0
-MzQ2MzQ0NTI1Nzc0NDY0NTU1MjU2MDI0MzI0MzM0NDY4OTc2MzQ4ODY2MzQ3NDU3
-ODY3ODM2NjMyMzAwMTIzNDMzNDY5NjI1MzM2Njk4NzY3NTY5Ojo8OjU0NjY2NjY2
-NDQ2NDY2NTc1Nzk2NTIxNTQ2NDEzNzk6Njg4NzU1NDY4Nzc3NjYyNDU0NDY4ODo3
-Njg3NTU0ODg6OTc2NTU5Ojo1Njg5NzY5Ozw5Ojo7OjY3ODc5ODw9Pj87ODo4Nzc8
-PDk2OTo/PTs7Ojc6OTc0Ojo2OTk2Nzk5Njg6OTw3Nzk3Nzo4OTk5Ozg3NDU4Ojo4
-NTQ3Nzg3Njc5Ojc1NDY3Nzk3ODg6NzU2ODs3Ojs7Oz05ODg8XmBCPDg7PV1xRDw6
-PTo4NTw8OTk7Ojk8PDw9Qzw7PkA8PDs9OTw/PDs5PD8+NzhBOz09Pz8+Pz4+QD5A
-QT1BQEE9QDs6OztBPD09PkJBPD08QUM+P0A+PUJAQkRCPT9EQ0VersbQ19zg4+Xn
-6OrqTEhKSUFCQ0VCREhDQkNBOjs/Pz0+P0M+Pzw8Ozw8OztCQT48Ozw9OTk1Nzk5
-PD05Nzg+PDk2ODg3Nzc5ODw6Nzs6OTs3Oj04Ojg5OjU7Njc5NzQ2OTg3OD81Nzs3
-Njw0NjczODk3Njo6OTQ2NTY5NzY2NjM1ODY5NjQ1ODY2ODw4NzY0Njc2ODY5NTQ1
-NjU0NDU1NTU0MTMzNTM2NDU3NTU5NzMzMzQ1Nzc3NjUxNTM1NTQyNDIyMjAwNDI0
-NDUxNDE2NTMzMTQzMzM2NTQyMzI0NTEzPDc1NTQzNjY0NDU3NTQ2NDMyMjMzNDU3
-NTQ0NDQ3NjQ0NjMyMzQ0NTQyMTIyMzI0MjQxMzI1NTU1NDU2NTc4OT01NTM1Njg2
-NDE2MzEzMjMzMzQyLzEzMDEvLy8xNDI0MTE3MDIwMjUyMzM1MjM0MDQyMzA0NDM0
-NjQzMzQ5NDMzMzczMzEzMzE0MjIzMzU0MzQ1MjExNDExMjQzMTQyMjM1NDUzMzY3
-NjM0MzIxMjY2NDMyMTU6MjM1NDMzMzQ4NTE0MTM0NTM4NTY5MjM0NjQzMjQzNDY2
-NzczNTUwMzY2Nzg1NjMzNTYyNDM1NDM2MzQ2OjQ2NzQxMTIzNTU5NTEwMjc2MzI0
-NDIzMTI0NjQzNDQ0MzM2NTU1MjI0NjU1MjQ3NzY0Njk3NDY1NDU2NTQ1NDc2NDMz
-NDgzMjQ1NDM1NTM0NzMyNzk4NzU1ODY4ODUzNDQyMzQ1NjY0NDQxNDQ0NTMwMDU1
-MjIzNjY1NDU3NjU2MzMyMzM1MTExMTQzMTI0MzM1MjY4ODMzODIzNTM1NTIzNzk2
-MzQzNjY0ODM2NjY1Mjc0NTA2NzY4MjMzNDY3NTY2NTY2NDcyNjs5MjMxMDM1NTU4
-ODc0NTIyMjI0NDI1NjU7PTg3NTY2NTI1Nzc2MTM3NDc1MzM1NjM1NTU2NTM2NzIy
-ODg4NzU1MzM1MjQ1NDM1Njg4OTU2NTYzNTY1NTMzNjQ1MzY2NTQyMzQ1NDQ3ODY0
-MjQ3ODY1NDQ1NTU0NjY2NDM3ODY2NTYzNjM0MjgyMzQ3NzczNDUzMjM4NTM3NjU0
-NjMzMjE1MjIyNDQ0NjQ1Nzg1MzQ0NDo4NTU0MTM0NzMzMzU1NzQ0OjgyNjQ0MzM2
-NzY1NTk8Ojk2NTY1NDM4OTY0NjQ5NzQ2Mzg2NjQyNTY0MTM0NjgzMTM0NTQ3Nzg6
-Njg3NTg0Njk3ODk2Nzg6Ojg1PDk4Ozc5Ozc8Nzc8Ozg1NzQ3Ozk4Nzg5Ojk5Ojs7
-PDg5OTk4Ojk5OTs6Ojg5ODo5ODo4PDg6PDo5Ojk5ODc5OTc4ODc3Ojk5ODg4Nzc3
-Nzg2NTY2NDUzMjQ2NjY2Nzk9PTo+Ojk4Njg6Ojk2NjU4ODg3ODg7Njo5OTc3Ozg5
-Nzk5OTo4PTc2OT1fU0I7PDk8XmtIQjs5Ojk4Ozs7OTg6OTw6ODo8Ozo7Pjs+PDo7
-Ojw5OjU2OTw8PEM9Pj49QD9BPT0+PzxBPkA7Ojw+QUE/PDxBPj4/RkZBQjs+Q0NC
-P0BGREVGRUQ9PUBDRWCvxs/W3ODi5efo6epCQ0REREFEREBGQUNBQzs8Ojo8O0E+
-Qj89O0RDPz46PkE/QT07NzY1Pjw4Ojg1ODg8PTo8Oj06OTg4MzYzNzg5Njc6Ojs3
-Nzc4NTk4OztDOzc0ODk6OTo8OzY2NzM3Ojk5OTg3Ojk3Njk3NTQ3PDg0NDQzNzs1
-MjQ3Njk3NTc2Nzg3NTg5ODc0NTY3Nzc2Njg4ODs1NTI0NDU1NDQ0NDQ3ODdENzUy
-NDY0Nzc1NTQxNDIzMzU0NjY1MzExNDEzNDczMzk4ODU5NTkzMjQxNDM0MzQ2NTk2
-NDY3NDU0NDI2NDQ2NjIxMjM1MjIxMzg2MDEuMTMzNDMyMzQ0NTY0MzE0MjU1NjUy
-MTM4NjQzNjY4Nzo4NzY3Njc5OTUzNTo3NjQxMjQ1Njc0NTQzMzIyMjMwMjIzNDUz
-MjExNDc2NDMzMzQ3MjExMTEyMDA1NTc1NTQ2NDM0MjEzNDU1MjE1NjMzNTU0NDIz
-MzQ0MzQyNDQ2NTIxLzIyNTM0MjI1MjQ1NTIwMTU2NTIxMjQ1NDM1MDExNDI0NTM0
-NjU0NTIzMjQzMjUxMTQzMzIyMzQzMzQ0MjE0MjQzMjQ0NTU2ODIwNTY1NTI2NTA0
-MjM0MzM0NDQzMzExMzEzMzExMjQ1NjQzMjQ1NjQzNTQzNDM0NDQ2NzU0NDUwNTY5
-NTQ1NTQ0Nzc3Njg3NDY1ODY2NDc1NTQ4NzQ1NTQ2NzQ1NjM0Njg1ODc4NzYzNjQ3
-NTg0NDEwNTg3NTc1NDMyMzc2NTc0MTM0NDU0NTczNTM1MzMyMTI0NTMzNDU0NDU1
-MjM3NDU6ODo3NDQ2NTU0NDIyMzQ0NzU0NjU0NTQ0NDQzNDY2NTM2NTQ2NjYyMjQ3
-Ojg0MzQ1Ozc1MzM2Ojs0NzY3ODY3OTs3NDUzNDY0MzMyMzQ1MzM0NDQ2NjQ0OTY0
-MDE3NzY3MzU0NTc1NDIyMzU1NjY1MzI1NjU2NTUzMzU1NTczNTQ0Nzg4NzU1NjU5
-NTY0NTYyMjM2MzU0NDMxMzU2NzU2NTU2Njc4NjY1OTc4ODc1NTU5ODg2ODgxMzMz
-MTAyMzY3NDIzMzQ2NDY5ODU1MzYzMjQ0MjAvNTU1MzEzMzIxMjM3NTY4NjIyNDY0
-NDMzNDMzNjc2NjU1Njg2Mzc0MzEzNDMyNDMzNjY3NzY1NDY3ODg2OzM0NTQ0NTU2
-NTY1MzIyNDMyMTY2ODg1NTUzNTc1NDY7NzQ2Nj80ODk5NTU4NzU3Nzk5ODY1NjY4
-Ozg6Ozs6OTk3NzlBPTo3Ojs6PDo3NTY5ODc3OT07ODg4Ojc1Nzc5OTw3ODs2ODg4
-Nzc6Ojs4OTg3Oj05Nzg6NzY2ODk5ODk6OzY5Njc1NzQ4NzY3ODk3NTY0NzY6ODY2
-NjY3ODQ2Ojc1ODk6Nzg3NjU2ODg6Nzc3OTo5Ojo3Nzg6S3ZgPjw+PUBvektDPT08
-Ozg3Njk+QD48PTw6Ozo7PT05ODo8Ojs7Oj07OTk5PDw9P0A8PUE/OTtBPz4+PT5F
-Rj87Oj1ARUFCREVFPkBAQz5BPz1DRUNBRkhGS0lIS0dCR0hQiLLGz9fc4OPl6Ojp
-6jxCRklHTkZGRkRDQkg/RkVFQDw8QD0/PjxARUE+QEFCOjs7Ojo7Ojk5Nzs+Pz5B
-Pzw8PkJDQDc4ODk6Nzg0NjY6Nzg4Ojo5Nzk4OTg3OTk3Ojg1OTo6Ozk5Nzk6MzU3
-NTc3OTg5ODMzNzk8OTU1NDQ0NjU2Njc2NDU1NDU4ODk3NTY3OTc2NjY0MzU2NzQ3
-NTU9OTc5NzM2NDIyMzY3OTY3Nzc5NTczMjg3NjM1NjMzMzE4NDY1NTQ0MzQzNzQ1
-MjYzMzY3NTMzOzUwNDc0NTIzNDY1ODY1MzQ0Mzc3ODQ0MjMzMzQzNTMzNDU0MTEx
-MjIxMjQzNzcyMDQ0LzAwMTIwNDY1NTMyMTM1MzY2NzU2NTc1NDU2ODg1NzU3ODY3
-NDM0NDQ1NDY0NjY3NDMzNDIyNDMxMzM0NjU0NjUzNTU2NzYzMTcwMDAyMzQzMjUz
-NDY0ODIzMTY0NDIzMjEzNDQ1NDczNTM0MzQ1NDI0MzM0NTQyMTU1NTgxMjIzNjM0
-NDg0MjQzMzQ0MjExMzIxMjEvMjU1NjU1MTU1MzQ2Ni8zMzczMzEzNDMxMjM0NTQ2
-NzQ0MjU1MjI2NTU3MDY5NTY5Nzs2NTQyNDQ2NDUyMzU0NTUyNzg0NDI0MzU1NDQ2
-NTU0NDYyNzU2MzQ1NTc4ODY2Nzc1NTk6ODY5NTg1NTQ0MzY2Nzc0NDQ1Nzg2NjYz
-NjM0NjQzNjIzNTIzNzc2NjQ2NjQ0NTY5NTc0MzUxMzM0NTY2OTYzNTkzNTU2NDM0
-NTc0NDU0NTk2NDQ2Nzk2NjY1MzU0MzQzNDU0MzU1NTU1NzUyMjY0ODM2NjQzNjY4
-NjAyMjk1NjUzNDEyODk2MzU3NjM0Ozw7Nzg4NTMzNDMvMkE6NjQ3NDU0NzY4NjY1
-MzQ1NDMwMzg1NzY1NjY2NDI1NTM3Mzc8NTYyNDMzMzY2Nzc5NDU2MzQzNTMxMzIy
-MC80MjEyNDY0NjU0NTU3NTc2NTY0NTg1PDk1NDU1NDc3NDY1NzYzMzU1NjUzMzQ3
-NTU1NTg5ODUzNTU1Nzo3NjQ1NDU2NzMzNDM3NTc0NDQ1Njc1ODQ1NjM3NEA4ODc2
-NTU1MjExNDY4NTIzNDQ1NzY1MTEyMzYzMzUyMzc2NTQ0Njc5MzQ4NjMzNTAwLy8w
-MjIwMTM2MzM3Ojc1Nzk6NzU3NDU1ODs3Nzg0NDY0NDg1NTU3PTs5NDg5NTg6ODg1
-NDU0ODk4NDU2Nzg3OTw4Ojg3NjU3ODk3PDw+Ozw5OTk5Ozs1Ojg5ODg5ODc4OTc2
-Ojk3Nzc2MzU2NjU3OTk3Njo4Nzk3Nzc8Ojg4Nzc7Ojw6ODs7OjY4ODc8Ojg4NzY1
-ODUzNjs5Njs5NjY4ODc5ODc0NTM0ODc5ODg3NzczNjU2NjQ3OTs4NzM2Nzg3NDo6
-OTg1OTg4NztPZk1DQkE9QWyJSDo8PTk+Ojk9OTo3NjY7Ozo4Nzc5Ojs1OEA/PDk7
-Ozk6Ojo/QkI+Pz09QD9APkA7OjxEREI+Qj48PT5FQj0+Qj9APkBDRD9FRUFAQkdK
-R0VER0pGREJCSG+os8TO19ze4+Tm6OrpO0JEQkJDQ0xMSkhDREVERUNBPT0/PkM+
-PTs+RDw9P0NGPDk+PEA9PTw7Ozw8O0NHOTo6PDo5Nzg7Pjw5NTY4Ojw7OT4+OTc5
-Ojc4Ojw6Ozg6Ozw6ODo4NDU4OTU1MzM2NTQ4NzU2NzQ3OTk7ODczNzc0NjY2NzY3
-NDg5ODIzNTI1OTk0MzQxNjg2Njc2Mzc6ODQ0NTQ2NTU1NjQ2MzQ3OjIyNzc4NjY4
-NTY3NTM3OTMzNDE1MzQ1Njc2NjQ1NDU1NDQ0NTM2NjQzNTI0MzQ0MjI0MjMzMzU2
-NDAyMjIyMzIxMTAzMDMyNTQyMzQxMzYyMTIwMjM0NTg2MzE0MzMzMzQ0NDY1NTUy
-MzM0Njc6OTQ0OzY1NzU0Mzg2NjU4ODc1NzY1NjI1ODQyMjMzMTQyMjI1NjQzMTAy
-MTE0MTI0NTIyMjg2NTMxMTIyNzMzMTI1MzM3NDIyMTQ1NDQyNDQzMDEzMzM1NDI1
-NjMzMjQ1MjU0NTQzNDMyMzYzMjQ5Mjc3NDc5MzE0NTg1MjAzMTM0NDMwMjU2Mzg1
-MTM1MjY4MTUzMjQ2NTUyMjMzNDc2NDMyNDU0MzU0NjM2NTQ2NTMzNDQ1NjQ1NDk2
-NjY0ODQzMzI0NzU2NzY2NTQzNTU2NzQ1NzY0ODU2NzQ1MjMyMjU1NTY0NDY4NTc0
-NjY3NzY2Njc3NTMzMzM0MTI1MzQ2NDMzMTQ1NTU0MzQ2Nzc2NjY0NDU1NTA0Njs3
-NjQ2OTg2NTY1ODY1MzA0NDU3NzU0NDY1NDU0MTU0MzUzNjhDPDc2NDc3MzQ7NTQ1
-NDY0NDY0NTQ1NDY1MjM1ODM1Nzw7OTc0NDQwMDQ3NjQ0NDU0Njc0NTY1NTU4NzM2
-NTM1NTY0OTg0MjMzMzIzNDU1NTU0NDIxNDIzNTgxNjU1NTY1NDc2NjU0NDIyNTQ2
-NjQ0MjU1NDQ2MjQ0NDM0Njc0NTU1NzU0NjYxMzUzMzExNjQ2MzQzNDc1NjQ2NjQz
-MzMyMzM2NTc1NTQ4Njo0MTM1ODY1NTU4NTMwMzY2NDU1NTU2Ojg2NzQ1NjQ0MzU2
-Njg2NTQ3NzY0OTU2MTE0MzU2OzQzNjYzNjU1NjMxMTE1Njc1NDEzMjM0NDM0NDg6
-Nzg8NzY1NjU3NzYzMzMxNDQ6MzEyMzIzNzc1NDM0Njc4NDY3NTU4Njg5NjQ4ODc1
-ODo6NjU1NzY5ODY6PDk5PTo3NDcyLzU2NTo7Njc1NTg4Nzg4NzY3NTc6Nzc3NTQ0
-ODY3Njc7Nj45Ojw4OTk3NjY4OTg5Nzo2Nzg2Nzg4NTQ2NTc7Pjk3Ozk4Njk6Pjg8
-PTo5ODo5NTc4OTc3Ozw+Ozg6Ojg3NTU4ODo6Ojo5Ozo3OTc3Nzc2NjU1ODg5OTc3
-Nzk3OTU1Njg2Njg6OjY6Ozg5Ojg6ODc3NjY5ODk8OlFqRj06O0BGaWNHQD89Nzo8
-Ojs5Ojw7OTY3ODo5ODk5OkFAODo6NzlBPD07Ojs8QD8+QDs9QD88Ozs8QERERUM8
-PTs6QUQ+QT9ERkFCQUFGQj1APTtCREZCQ0VFR0dHRUNDUGSuxM/W3N/j5ufo6epG
-R0ZCR0ZJQ0FCQENDQ0BFQkE/Q0M8PTxBPDw/Qz4+QUI6PD09Ozw+Ozk3Ojs7Pj81
-PDk4OTg6PDs4Ozg2Njo9Oj08Ozo5Ojc4Njo3ODYzNjU5ODg4Nzg3NjY3OTc1NTc2
-MzI2ODY3PDc7Ozg2MzU2NTc3Nzc2NzQ3ODU3ODczODU1MzY1NTc1ODs5Ojg2Mjc1
-MjQ3NTY1NjY1NjY2NDQ1NDc8ODo5NzY3Njk2NTM0MTU1NDQzNTY2ODk2NjIzMzY2
-NTQyNDY3NTQ0NTI0NDQ2NTQyMjY3MjQzMTMzNDMzNTQwMzU2NTY2MzMyMzExMzc1
-NjE0NDEyNTg0NjUyMzY4NjIzNTQzNDY2NTI0NjY1NDU2ODc2NTU1MzU0NTQ0NTY2
-NTY1MjY2MzMyNDU2NDUyMjExMC4xMjAyMzIzMjEyNTYyNTY3MzQ0MTQ1MzIzNDI1
-MzU1NDQ0NDMzMzI0MzM1MzI0NDQwLzM0NTQzMjM0MzIzMzMyNzI0NDYzMzI2NTQz
-ODY1NTg2NjM1MzEzMzQ0NDk3NzQ3NzQyMjM2MjU0NDMxMDIzNTU0NTIzMzI2MzUz
-NzgzMzQ0NDQ0NTY9NTU2MzU1Njg3NTYzNjg2OTQ3NjU2NzQyMzY0NTQ0MzM2ODQz
-NDc4NTIzMzE0MTMyNDE0NjU0MzQzNTY1Njg3NTU3ODU4NjczNTM0NTU2NDY1MjUz
-MzA0NDIzNTQzNDQ0NTc3Nzo2OTc4NTM2NTY0NDM0NjQzNDUzOzU0MjU2NjU4NzEw
-NDQ3NDU4NTU0ODc3MzQ5NDY2MzMyNzU0NDI0MjU5NTY0ODU0NTM0NTY1NzY6NjQ0
-MzMzNDU5OjIwMzc1Njc5MzY1Ojg0MzUxMzE0NDg1NTQ3Nzg1ODo1NTc3MzQ4NzUz
-NDU3ODc1NDQzNTMyMzY1NTg2NTU0NjU3NTM1NDMyNDU0MjQzNTc2NTc1ODc3NTg3
-NTM0NDQ0NjU2NjM1Njg2MzU0Nj81MzMzMzQ7NTQ2NDU0NTY3OTs4NTMyMzMyMTMz
-MTQ1Njc2NTQ1NjUyMjU2NDQ2NjY3NDMzMzU2NTU2NDY4OTU6ODMzNTY1ODY3NjQ3
-MzM0NDQ2ODg4NDc2MzY0NTIzMzY3ODc7NTI2OTg2Nzg0MzUzNzMzMzQ5NzY3NTQ0
-MjQ2Nzk4MzU1NjU0MzU3NTc3NzU2NTk5OjY2NDo2OTY1Nzo4ODg5NTU1NTU2NjY3
-ODU1Nzw6Njc2Njc3OzY4Nzc0NjY0OTY1Ojc5PTs5ODY3Njk3MjQ3ODU6PDs3Njc4
-ODU4Ojg6NzU7Ojs5OTs7OTk3OTs5Njc4Nzo4OTo5ODk2Nzg5PTk8Ojw4NTY0MjY6
-Ojs5ODc2ODo5Ojg2MzU3NDU2OTw7OTc4OTk4Nzk2NjQ5Nzk4Ozs8Ojg5OTc5Ojk2
-ODY6PT09VWlBPjk4OkBeXEI+Oz4+QD5BQDg3Ojo3Nzo5OTk8Oz46Ojg6OTc5OTs7
-PDw9PTs8QD08Ozw6OTs6PD1DRUVEQ0hORz5AQEA/Q0ZJRUA/QUVBQ0FDQ0FGPkZD
-RUhOTElIQT9FYbHFz9fc4OPn5ufp6ktKSElHQkNDQT5AOz4+Pjs9Pz07PD07PTw6
-OTo6Ozo5QT88PDs7Ojo4OTo7PTs4NTs6OjY5ODg7OTc6OTk7PD85Oj85Nzw4OTU5
-Ozk8Njs4Njc4OTQ2Ojk8NjU2Ozk2Nzc9NjI3OTg3Njc3Njc5ODg4Ozs2MzU1NTc4
-Nzg5NjQ3NDQ0NDc2Ojk4NDY3ODc4NTU2ODc1NDc2ODI0NjczNDM1NDQ2Ojg6NzY2
-NzY0MTY3NDU2ODc2NzY1NTQ0NjM1NTMzNTY0NDUzNDg0NDQ0NDMwMTMzMzc0NTY0
-NTY1NjY1NTIzMS8xMTEzNTg1MjEzMzg0NzQyMzQxNDc1NTY0NTg2NTU0NjU0NTY1
-NzI0NjY1NTYyNTA0MzQzMzI1MjExMzQ4NjQ1Njc0NDIzNTU2MjQ0MjQxMzc0NDMz
-MzIwMTM1NDMyMjQyMjU2NjYzMzQ5MjQ1Njg6ODQzMjQ1NDQzMDIzMTYzMjI0MjIy
-NTQzNTM1NTI0MDM4NDIzMzMyNDQ1NTczMzM1NDQxNTQ5NjU0Mzc2NzU1NTU2NjUz
-NTM3NjUzNTEyNDEyNTQ1MTIyMTI0MjU3Mzc2NDM0Njk2MzQ1MjIzMjQ3Uzk2NjQ3
-QDQ1NDg2NDQzNDQ1NDU1NjU1MTU0MzMyNTQzNDU1MzIzMjIzNTQ2NjU1MzI2NDg3
-NDUzNTUzNjY3MzUzNDc2NTU2NTQ0MjIzNS83MzIyNjY2Njc2Nzc7ODY2NDU1NDM3
-NzQ3NTQ1NTM1NjY3MjI1Njc2NjY2NDc1NDY1NjQ0ODQ1ODY0MjQ3ODY2NjU5Ozk1
-NTQ2NDQ2Njo1ODg0MjY1NTUzNEE1NjQ1NTY5ODk4NTs0MjM1NTY4Nzc/OTMzNDc3
-Ojc6NzY2NTQ3ODk2NjY3NTUzNDEzNzMzNTQxMzUzMzM2NzY1NjU2NTMzNjg4OTk1
-NjcyMzYzMzczNTYzMTQ3NTM3NzM2ODc3NjU0MzQ4NzQ2NTY3Njg1NDM3NDc2Njc1
-NDc0NTU0MzMyLzI1ODYxMTEzNDY1MjA4NjQ1NTM3NzU1NTExMTM1NzU0NjQzMjQ0
-NDQ2Njc1NTU0Njk5ODg3Nzg2NDY0NTU4NDU3OjdARzc2NzY5NjY0NDExMjY0MjU1
-NjY1NDI2MzQ2NDM1MjM2NzU0NTY3MzM2MDExMzU2NjQwMzM2NTQ0MzQ0NTU2NzY1
-MzU2OTY0NDQ3NzY8Ojc3OTY4NDU3Nzc4NjU1NDg4NzM0NjUzOjc2OTw5OTY5ODk4
-ODc2Nzo5NTc3NTc3NjU5OTk3Nzc2ODg2Njc3OTU3Ojg4Njo5ODg5ODg7Ojo6PTo4
-Njc8ODw6ODc4Njc8OTk5Njk3ODc4Njc6ODg4ODo4NzY1NzY1NTc3NTc3NTY3ODc3
-Nzg1Nzo5ODY3ODk6ODo1MzQ2NzY6OTc5OT0+PEBrcEY6Njc8QF1WQEI8PUM8OTk+
-PTw7PDw5Njg2Njk+OjY6Ozw7OTY3NjY2Ojo5Oz49PD0+Ojg8PTo6Oz49QkRCRUND
-RUZBQUFDQUFFRkZDREBCQ0BCQUM9P0NKSEhJRUVJTEZissTO19zh4+Xo6OnpREhJ
-TklCREdBPz0+Pjk5PDo7Oz89PDtBOzo+P0E8Pj8/PTk7Oj1AOz0+OTg+Qjw3Njc7
-ODk8PD44Ozs8PTo7Ozk4PDk3ODc4Njs9OTg0NDw7ODg2Ozo7Ojk1OjY5Nzk3ODo7
-Ozo4OTk3MzU2ODk6Ojc0NDU1Njk8Ozc4OTo4NjM0NTQ2NTQ2Nzg4Njc1PDkzMzMx
-MjQ1Ojg1NjM0NTg3NDY3ODg7Ojc6OTU1NTQzNjQ0NDY2NDU1NzY3NTQyNjQ0MzIy
-NTExMzYzMjM0MjQwNTczMzI0NDUyMjExMzM0MjA1NjQ4NDUxMjIyMjY0ODU0NjQz
-NDczMzQ0NzU0NTg2MzQyMjY1NDk2MjM1NjI0MzMyNDYxNTU0NTY1NTY2My8zMjIx
-NDQzMzQ2ODU1NDMyMjIyNDU4ODc3NDU1MzMyMjM0MjI1NTYyMzczNjk4NTQzMjMy
-NDQ1NjU4NjQ1NTQyMjEyMzQ0Njw1NDU0NDMyNDUzMjA0MTU2NjY0NzQ0NjU1MjQ0
-NTU0MzE2NDAxNTUyNTYzMjU2OzQzMTExNTM0NjQ0MzMxMzMzNzczNDEwMjI1NzM3
-NDc0NTQ0NTQzNDIzMTE0NDU9MTY0MTY0NTU2NTY3ODU3NjU2NTM0NDY2MjE0MjI0
-MzQxMzIyMjUzNDIxNDQ2NjM0NTAyNTUyMzQyNDMzNTY2NjUzNjM1NjY2ODg2MzQ1
-OTI0NTM2NjU2ODg0NTY2NTc3Nzc3NTMyNTg2NjMxNTQ1MzY1MjQ2NzY3NTUzNTo7
-ODY3Mzo5Nzg1NTU4ODc3NTc1Nzc5NDc1NDY3OjQ1NjYzMjM0MjEyMzMzNzU3Nzc4
-Nzg3ODY1NTYzMTI0MzY1OT43NDg2ODw3NDQzNzk3NTM0NDc1NDU1OTc3NTY6NjU1
-NDQxMTEzMzI1NTU3NjMwMjU5NDU5ODo6NjY0NTY0NTg2NDI1MzI1NjQ2ODU1NTk1
-MjM4MzMzMzI0NTc0NTQ1NTMyMjM1NDc3ODU1NDQ0NTM1NDQ0Nzc3ODMzMzI2NTI0
-NzY2ODY1NTU3NjQzMjM0NDUzNjQzODUzNTAwMzMzMzc1NjY3ODczMjU3NzU0Njc4
-ODU2NjU2Mzg3ODY0MjQ3NTQ1NDQ1MzQ2MzQzNjY2NTQ2Ojc1NTI0MjUzNzg3Nzc4
-Ozo0MTU0NDMzMzY2ODg3MjY1NTk1MjI0OjY4ODQzMTI0NDQ1NDQ0MzM6NTQ3ODc2
-NTU1NTY3NTQ2Njk7ODc1NTU+Ojo5NTg3NDk6Njc3OTc1NDg3ODo4OTk1Njk5Njg6
-Ozs6OTw7Ojk5OTc4ODw4ODk5OTw9PDo7Ozo6Nzg3ODs4ODc3OTc4ODY1NTg4NTU1
-NTY2Njg7Ojo5Nzg4Oj88NzY2Njk4Ojc2NTg4OTg4NDY4Ozk5ODY3OTU0Nzc3Ojk5
-Ojw4PF5XQTo5OTpEcmZBPkRDQj0/PDo7PDw8PD87Oz45Ojo9Pjo6ODk8Pjo5OTg7
-PDs6Ojo4Ozs6OTw7OT4/PT9AQUQ/Q0JDRERBPT0/QkBCQ0BARkM+PEFPST0/QkdM
-P0hLS0RAQ2GqxM7W3eHj5ebo6ulJRUdGSEREQ0NCPUJEQT9BPkJBQDxDQzo7Ojg4
-Ozo5Ozo9PkA6PD09PEFAQDs5Nzk4ODg4Nzo7Ojk7PT47OTo3PTU2Ozc4OTg4Njk4
-OTU4NjY4NDU0NTY3ODo6ODo1Njc1ODk5NTc2MjM1Mzg5NTg5NjU2NTg3OTk3NjQ0
-NDU2Nzc1NDg2NjY1Njc3NjsyNDY0Nzk2Nzc3NjU2NzU2NDQ2Njc4NzM4Ozk5NDU2
-Njo3NjY2NTs4ODczNTQ0NDMxNTMzNTU2NjM0MzM3MzY1NTMyNDMxMzE0NTU0NjIy
-MjIwMzY2NTo5NjU0NTU1NDU5NjYzMjg0Ojc0NjMzNjg1NjQxMS8xMDI0NjU1NDEz
-NTMzLzIzMjYyMjc2NjU3OTY3NzQ1MzIyNTUzMjU3NDU0NDUzMzIzOTc0NDQ5NjY4
-NzM0NDQ0MTI0MzQ1NTUyMzAxMTM0NDQzMjM0Mzc4ODU1NDQ2MTEzMzIyNDI1NjU0
-NjU2Mzc0MjMzMjMxMzQ0NTU0NTI0NzM0NDMxNTY0NDU2NDIzNDM1NDIzNzQ1NDYy
-NTc3NzQ0MjM1NjU1NDU0MjMxNDY1NTQzNDI1NTU0MTM0NDMyNDU4NjM1NjM0MTYz
-NDU0NDU4NzM0NTc0NTU0NTU2NzU3NjQ0MzUwNDM2MzM1NjQzMjM1OTAzMzI2NTIw
-MTQxNTQ2Nzc4NjQzMzMzNTY2Njg2NTg5NTEzMjMwMzIzNzU0NTYzNjI0NDIyMTY2
-NjY2NDw0MzM3NjQ1MzIzMzMyNTo2Nzg0NjU4OzU1NDUzNDU1NTQ1NTQ0NjU2MzI3
-NDY1NDM1NDQ2MzIzMTExMjEyNTg5Ozg2ODU2NTc4NjY1MzQzODY2NjY2PTc2MzU3
-NDc3NTU1NjMzOTs3NDQ7NDM4ODUzNjc4MjY1NTUxMzQ3NTQzMzY3NTk4ODUxMjI0
-NzYyMzM1Njg1NDg2MzI0NDAyMzQ1MzY3NjU2NDQ4NjY4NDUzNTY1MTEyMjQ0NDQ3
-MzUzNTMyNDc2NTc4OTY3ODcyNTQ1NjQ0NzY3NzQ0NDQzMjU2NTQ1NDU1NDIyNTMz
-NjQwMzQ1NjU1NjY1NjY3NTQ0Njc5NTQ4ODYzMTczMjQ1MzM2Njc7OzczNDg0NDU0
-MzYxMTQ1NTQ0MzQ0NjUyMzU2NDMxNDc0Nzo5NTQ1NTg3NDI2NzY3NTQ2MzQzMTY1
-MjQ2NDk1MzU1MjQ0MjgzNDM0NzU1MjI1NzU1Njc3OD05ODc7OTg4Ozg+ODo6OTc3
-Njo8Ozw6OjY2ODc1Njc2Njc4ODo6Ojk6ODw6OTo4ODo5Ozw8Ozs7OTs6OTk5Ojw4
-Njg2NjY4Nzo6Ojg4OjY2NzY2NDU0NTU1MzQ1NDg8ODg5N0A7Ozs5Ozs7Nzc3OTc5
-OTY1Njc3Ozo6Ozs5ODc3Ozk6NjY6OT86ODo8Z19CPz08PUh9UkBBQEM8PkA8Pzs4
-Nzo7Pz48PD08OTo5Nzk9Ozs6Nzc1Ozw9OTo5Oz07OTo8PD0/QUE6PEBAQEM+RUNG
-RUFAPT8/Q0NER0M+PkJCRERFSURJSUdFTEtKREJBY6zFztbc4OPm6Ojq6UlCQkZJ
-TEpHRENBQEA7QEA/Ojw8PkQ9Ozc5Ojk6QD5CQkA9Ojg7OjxCPzg4PDw9PDs5OTs4
-PDo5PDo4ODg5PTk6Ozs0NTk6OTY2NzY6PTg1ODk3NDc1NTc2Nzo1Njk5Njk2NTYy
-MjU0Mzg2Nzc5Nzg6NzU3ODk1NjU3ODY0NTY4NDY1NDU1NjM1NDY3NDg2NTg1Ojc1
-NTU2NjU2NzY5NDYyNTY3OTg1ODc4NzQ0Nzs4NzMzNTQyNDQ0MzE2MjMyMzE0NTQ2
-NjQ0MjMyMjI2NjUwNDU0MzMzNzM0MDIzMzc0NTM2Njc1MzQ1NDI0NTY0NDYzMjI1
-MjQ1NTU0MzQzMDEyNjQyLzEwMzMzNjc1MzQ3NTI1NjQzNTU2NTc1Nzw4NTIzMjMz
-MjE0Njg1NTg0MzU0NDM2MzMzMjM1NTM1NDM2NzY1NDM1NTY1NTIxMzMyMjM0NjU0
-MjI1PTU2NzU3NzQzMzEwMDAwNjQ0NDY4OjczMTExMTU1MzU0MjU3NzQ0NjQ2ODQ2
-NTg3NzY2ODk5Njk0Rzg2NzUyMTE0NDQ3NzU1MjQ1NDM0NDUzNTMyNDIvMjU3ODMy
-NDQ0NDUzNjYyMjM2NDY0NDU2ODU1MjY4MzU3NTg5NTQ1NDg0NDIyNDMzMTY0MTU0
-Mjo1Njc0MjQ0MzQ1MzM3NzU3NTQzODk1MjI0MzYzMzIwLzIzMjI1NTY1NzY1NTc2
-NjQ3NzYzMTcyNDQxMzU0OTg0NTIwMTE2NDM5MjM0MzM0NTs1NDQxMDIwNDQ1Mzc3
-NTc3NzU3NjY1MzIzMzY2NTc0MjM3NjU2NTI3NzM0NjUzMzExNTMzMzM1NDQ2NzU0
-NTQ0NzY1NDg2NjY7OzY0NzhDNzU0MzQzMzk5NjU2Nzk5Ojc3Njc3PDUzNDQ1NTY4
-OTc3NzU2ODU1NTQ1NTc8NzY2NzQzNzY4ODQuMjI0NTc4NzM1NTU0NDU3NjY0Mzk1
-Njc2NjYzNjY0MjMzMjU3NzY0NDU0NTY1NzY1NjY1NjY0Mzc3Njc4Nzc4NDMyMzMy
-NTQ3OTc1NDU0MjU2NjU3NzM1MTE2NzY0MzU0MzY0MjY1NTg1NTM1NTQ2NTQ0NDQ1
-MzQ0NTQ0MzQ0MjM2OTU2ODU1NTQ2MjM3Mi8vMjU1NTMzMjQ1NTU2NjQzNTc5NDc4
-OTc3NjM2OTk5ODc3NDY3NjMzODg2MzU4NTY1PTg1NTM3NDQzMzQ2ODY3NTU3Njc1
-MzQ1NjQ0MzAxNTU2NjY1NTY4Nzo6OTg2NTM3OTw5PDo2Njk4ODk1ODo1NTg5NTU2
-Nzo4OTw7Ojo7Ojo6Ojc7Ozo3NjU3ODo4Nzo3OTc1Nzg5Ojc5NzY2ODY2Nzo6Ojc3
-ODc3NTc3OTk3OTk4PTk4Nzo6NjY4OjY4Njc4OjU5ODg4Ozg6OTg5PTk8PTc5ODk5
-OD5YWj08PTw7UH1VPDs6Ojk6Pzo5OTg5Oj07QkI8OzpBOzs7ODs8Ojs9PT1APjs6
-NTg2Oz5BPzs7PT87PD48O0I7PUBBQEI+PTs6PkJBQUNDQ0VIRENESEI9QERISkdJ
-Q0REQz5prcTO1tvg4+bn6OrqSUNFQ0VEP0NFQUA9PD5BQj47QkVBPzw5QUA7Oj4+
-PDo6OTs7QDs8Pjs5Nz09PkA8Ojg7Ozg3ODU1Njc6Ozg3OTY2NDY5OTk7Njg3Ojw8
-PTk4NjkzNzY4Ojk3Ojk5OTk2Nzc3MzQ3Mzc4ODc2NTc4Njc1NjQ2ODo2Mzc2OTk2
-NjU0NDY1NjU3NTQ1Ojc3Nzo3NjQ1Njk8Njc1OTgzNTc3Njo3NTQ2NDU2Nzo+Ojc4
-OTg1MjAxNDc2NTc0MzQ2NTExNDQ0NTU6Njc1NzcyMjM1MzMyNjU3NjIyNjQ1NDM1
-NDQzNTY1Ojg0NjMzNjMyMDEyNDQvMDAxMjM0MzU1MzIvMDM1MzMzMTIxMzQ0NDc1
-Nzg3MzI2NjUzNTUzNDU0NTY2NDU0NDM0NDQ0NDY2NTQ1NTU1ODUzMzMyMzM1MzU0
-NjUzNTU5OTU1Nzc0MzEyMTAyNDY1NDc1NTQ0NTk2NjU2NDMzMS4xMzE0MzMyNTo2
-NDQxLzU0NDM0MzEzNTQ4MzU0NTIzNjM2NTY6ODU2Njc3NjU4NTU1NTIzNDMzNTY5
-MzMyMzU1NjMyMTQzMTIzMTEzMzQzMzQ1NTM1MjUzNjI0NTQ0NDM0MzM3NzU1NDQz
-NjY2NTI0NTUyNDUzMzAwMzUyMzIxMzMwNTU2NTMxMjIyMTI0NDI1NjU2NjU1NjM2
-NzQ0Mzg2MjI0MzMzNDIyMzY3NDY0NzU0Nzc4Pzc1MTU0NTo0NTQ1Njg0NTQxMzUz
-NDYzMzQ1Njc3OTU2NTc0MTMyNjc2OTg0NDM2NTc3NTMzNTU0Mzc1NTY1NTU2NjY5
-NTU0Njg0MjM2NzIxMTQ0MjQ0NDQ1NDMzMzQzOjc1Nzo4NzY1Nzg4Ozs5MzMzMTU3
-NzY1NjY3NTY0NDU2NjQ1MjM4NzY3NDM3NzY2Njc2MjQ3NjY3Nzo0Nzc5NzY2NjY7
-NDMyNDIzNDU4ODk4OTs6Nzc2MjIzOTY0MzQ7OjY1NDM1NTM0MjM6NzQ1MzMzNDQz
-MzQ3Nzo3Njc1ODg5Njg0MjMzMzQ0NjMzNDI0NjU0NTU4NTIzNDY2NzMyMzQ1MjIz
-Mjc3NTc4ODU4OTk2NTEzNTQxNDI0NjQ0NTU0NTU2NDQ2ODo3OTk2ODY1NDM0MTM1
-NDc0Mjc2NTY4NDQ1NjU3NzY2Ojg2NTU4Nzg5Njc1ODU4ODMzNDU0NzQ3OzczNjY0
-NTY2NTEyODU2NzU1Nzk0NDU3NzU4OTo4MzE1NTM2OTo5NzY1NDU4NjM5OTU0MzUz
-NDU6Ojo8Ojg2Nzc4Njc5Nzc3Njc2NTc0NTc3OTY4OTg6Ojk4ODg1Nzg4Nzc4Nzg5
-Nzg6Ojk7OTo3ODY5ODc5Nzo4ODw4NjU2ODU2Nzc0NTc5OTQ4PDo3NjY2Ojo8Ojo6
-Ozo5ODo9Ozk4ODU4ODs2OTo7OTY3Nzg7R1VPQUI/PTxYi086Ozk5Ojo3NjY6NzxC
-PT0+Ozg5Oz48Ozk6OTo8Pz5CQjg2Nzg4Ojs4ODxBPT08Pj0+PkNGQkI8PTo8PjxD
-PUA9QUBEQkI7QEg/QUBDP0FCP0FBP0VGQ0lIQm2wxc/X2+Dj5ufo6epARUZHREdF
-RkZFPD1BPj8+PUFDRUU+QTw5PUFDQzs9ODc9Ozw6O0Q3Njc5Ozw6Ozw4Nzo4NTg7
-OTY4Ozo5NjY6ODo6OT1CO0BAODo7ODk9PDs5OTs5Ojc5NjU3NTY5NTU5OTg5NjU5
-OTo1Nzk4NTQ4NDUzNjY2NjY3NDU1Nzc0NjQ2MzM0NzU0NTY2OTM2NjY5OjY3NjY1
-NDk4NDk4ODM1Njk7NDU3OTk3NDY2NjQ3NjM3NDQ2MjM0NDQ0NTU3NzY0NDU0NDU1
-MzIyMzQ2NDM3MjQyNTY2MjE0MzQ0MzE4NjQyMzY2NTY0NDIxMTI0MjMyMTAwMjM0
-MzMzMzM0Mzc0NDIzMzI1NjIzNjc0NDY0MjUzNDU2NjQzNjMyNjQ1NDQ0MzMyMjMy
-MzQwNDUyMjY1MzEzNDI0MzIxNTUyMDM1NjQzNzY3OTc0QTQzNDQzNTI1NzQ2NDQ1
-NTA5NTM0NzQ0MTMyMTQ1NjU2NzQyNzQ1MjQ0OTMzNDM0NDM4PDg2NDQ0NjMxMDM0
-NDU2MzM1NzU1NDQzNDQ0NjMwNDU3NDU3NDQ0NzM2NTQ2NzAzNTY1NTUzMjMyMzU0
-NDQxMTEzMzQzMjM0NTc5NjY2NzU0NDM1NTY0Njc1MzIyNDc0NjU2NDY2NTQ2OTQ0
-NTQzLzQzMzUzMzM1Ozk3Njc3MS80NDU1Nzs2NTc4MzQ0NjY0NDIyNDQ0NDg3Nzs7
-Nzk4ODY0NTU1NzQ0NjU0MTI0NjQ4NTUzMjQzNTQ4NTU0MjEzMjEyMzU0NTc3NTQ3
-ODQ0Njc3NTQ1MjU1NDQ2NDc3NTQzMzM4OTY0MzQzNTg1MzI0MTM0NDU3NzUzODY1
-NjY1NDc2Njk2NDM1NDU1NzY2NTc2NTc2NTU1MjI1NDU1NjM0NDg4NjYzMjEyMjc4
-NDU2Nzk5NDU3NjM0NjU4OTc1Nzg6Ojo2NDQ2NDE1NjYvNTo4NzU0MzM0NDg0NjQz
-MTM0NTY0NTY2Nzg2NjExNzU0NDQ0NjY3NjY1NDY2NTI3OTc3NjQ0MzM2NjQ2NjM0
-MjQ1MzQxNDQzNjQ1NjMyMjM3NzI0NjY3NTQ0ODY1Njc3NDEzNjczMzI0NjU2NjUz
-NS8zNzU2MjA1NjQ0NDMzNDc6ODQ2MzUyMTIyNDU3NTY1MDE1ODU2ODk4OTU0NzU0
-Njg4NjY0NjQ0NjU1ODc6ODk8Nzg1NjQzNjY1ODg4Pjo7OTY3NTY1NjU1ODc0Njc0
-NTU1NzQ6PTo4NDU1Nzg1NTc5NzU4Njc5Nzc2OTk7Ojw8Ozg4ODg4Njg2NTU3ODo4
-ODc5Njg3OTU4ODk5Nzc2OTs6OTY4ODo5OTg7Ozo6Ozg6Nzc2OTs7ODc5OTk4ODo4
-NTU3ODs3NTU1ODg1MjY4Njk5Nzs3Njc7Pj1AOzo4Ozk4Nzo8OTs3Nzg5ODg5OjpI
-XE87PD1AQnKERzs8ODk4ODk4Ojk5OkA7Njs9PDs8Ozk7PTk5Ozs6Ozw5Pjw8Oz06
-Oj48PDw+PDo6OztAQkE9QUJAPT9AQ0I/Pj0/P0VCP0VJRT1DPEFGR0RGR0RGSkVJ
-RkVGcbbGz9jd4ePn6Ojq60hER0NIQkNFPkQ+O0I+P0Q9QT89PEJGREM8PEA8Ozg4
-OT07OTxBQEE+PTk2NTk5Ozw+OTw4PEE9Qj09Ozc3PDc5Pj47Nzk4Njc3NTk2Ojs7
-OTk8Oz09Ozk6Ozo4OTo5Ojg2Ozg6Nzc7OjY0NDY3NjM2NzY2NTM3NjQ1NTY2NTYz
-MjIzNTEyMzUxNTU2Nzk7ODc4NzQzNjIzNjc6NjU1NjU3Nzk1NTY4NzY3MzU0Njc1
-NTQ0MjM0NTU0MzU0MzM1Nzk1MzI3NDY1NDU0MjU1MzU3NjM1NjQ0NDE0NDQzNjIx
-NDQ2NDY4NjYyMTI1MzQ2NzU1NjUyNDM0NDM1NDQ1NzUxMTExLzI2MjU3MjQzNDI1
-NDU4NTY2Njc2ODg3NTY1NTY3OTEyNDQ1NTU2NDQ3NDM1NEE7ODQxMDM1NDYyMzM0
-ODk5Nzc1MzQ5NDM2ODMzOTk1MzIyNTYxMzQ1NTU0LjUwLzEzNTY1MjM1MzQxNDQ5
-NjExNTMyMzY1NDY4NDIyNTQ0NDM1NzQ1NjQ3MjI0NTg0NjUxMTQyMzMyNTU2NjMy
-MzU4Nzk0NDY0NTQ0NTQzNjczMTEzMzM2NzQ0MjA0NDQ0MjQzMjQ1NDc3MzQ0MzM2
-NjE0NjU0NDQ0MTA0NzY1NTU1NjQ2ODU0MDIyMzY1OTYzMTM4NTQ2Njk2OTk5NzY2
-NTU1NTQ0Njg1NTg5NzQzMzQ3NDk3NTY2NTU3NzYzMzM1Nzc0NTUzMTQ3Ojc1NzU1
-NTc5NTQ2MzY0Ojg2NjIzNDU0ODQ3NzU2NTY3Njc4NjU0OTc2NzU1NTQyMzc5NzY1
-NTw4Njg2ODY1NDYzNDIyMzM0OTg4NDM2NTU1NTQ1NDc0NDQ0NjM1NjMyMzQ0NDg4
-ODg0NDU2NTM1NzQwMzUyMDI2NzQ1MjU3NTY1NDc2Nzk4NjgzNDU4ODk4NjY4ODY3
-MzIzNDY0MjY2NjU2ODM1MzU5OTg3NjQ0MzY2Mzc4NTY4Njg3Ojo6NTU5MzM2NzY3
-Njc2OjY2MjQ3NzU3ODc3MjQzMTQ3NjQyMjM1MjMyNjUzNDY1NDk1NDM2NjY1NDg2
-NjU2NTY3NTY2NjY0MjY0NDMyNDU1NDUzMzM2NTQ1NDM0NjQ0MzQyMjU1NDI1NDQ1
-NDM0NDQzMzc2LzMzNDU3NjMxMjQ2Njg3Nzc4ODQ4ODYzNDc2Njc5OD06NjY0NTU2
-NDU4OTw3Njo4OTk3Ozc0NTY6NDg3Nzc4NTY3Njo7Ojg3MzU1MTQ6NTo4NTc3Nzw+
-Nzk6Nzk5OTs6OTo5ODY3NjY0Njg4OTs2NTg0ODs3Nzg7ODg5OTg6Ozk3Ojw6Ojw9
-PkA9Ozk2Ojw7Ozw4Njs7Ojw5OTs6NzY8OTc0Njg5Nzs6OTY1NDU5NTU3ODY4NjY4
-PDk7ODk2ODk5OTY3OTs5NzQ3ODk6QkVsVzw7OD9QmHNGOzk4OTo5PDs4Ozc4Ojc4
-PD46PDw9Pjs7OTs6Njg5ODk6Pj49OT4+PDo+Pjw6PTs7Pj49PkRCQD08RERCQEI8
-PDw+QD5DRUdDRUNDSEFFQkVGS0dDQEFDQEpztMbQ19zg4+Xo6erqQ0JBRUhFPz5E
-RUBCQkNCPjxAPj4+QENBQj44OTs+Pzw5Ozo4Nj06O0NBOTs8PTw7PkBBOzk6QkM5
-PTo3OTs9OTw9Ozk4Nzg1Nzs5OT07Njk4Nzk2Njc6Ojs9ODU4OTc6Ojo6ODk1MjQ0
-NTY0Njg3NTY2NTc1NzY3Nzc6Nzg2NDIyNTc2OTg5OTk5OTk4Njc5NTU1Njg6OTYx
-MzQ2ODc8NjU0NDY3Njc3NzY1NzQ1NzQ2NTY2NTU1NDMyNDU3NjY1MjExMzQ2NjMz
-NjY2MzUzMzEzNTU0MTM0MjIzMzQ0NDU2NDU1OjQ0NDg2NTU0NDI1Njg4ODM1MzM0
-NDU2NDI1MzMyMTA1OTY2NDQxMjQxMzc3MzA0NDY7OjQxMjY5NTU2NDQ1NjU1NTU1
-NTUyMzQ1NTc5NDc4NTQyNDQzNjg0NTQzNDI0NTM1MjI1ODY0NDc2NjY1NDM0NTc0
-NDIzMjU1NzQvMTIyNDQ3NTY3NDUyMzQzMjMzNjc1NTU3NDQ0NDY1MzU3NTQ2MjQy
-NTQ0NjM0MjQ2NzY0MzYyMTQ2NTU3MjQ0MTMzNDMyNDUzMzM0Mzg2NDUyMzM1NjQz
-MDMyNTQ4MzI2MzQzNDMyMzMzNTY2NzQ1LjAzNjg1MzMzNDQ4MzY0NTY1Mzc4NTUy
-MjQzNDY3NTI1NDc2MzMzNDg3Njc2NTUxMjI0MjI1NjIzMzI2ODI0ODU3Njc2NjQ1
-NjU1NTU0MzM1NDMzNDQ1Njc1NDI1Mzk0NjY3NTI0NTc1MzM1NjM0MTQ1NjQ0NDM0
-NDM2NzU0NDg3NzY1MzM0NjY1NjQ2ODc5RT83NjM0NjY1ODQ0NDQ4NDQzNjc2NDY3
-MzQzNTQzNTk1MzQ0Nzc5Pzs1NTc0MDY2NTg7OTQyMTAzOTozMjY4NDY1Mzg4OzIx
-NDU0NDQ1NTQ1Njc1NTM2NTc2NTY4NzU3NjMyNDM0Njc2ODU2MzMzNjM0NTQzODg4
-Nzc2NTQ0NTY2NDQ2OjY2Nzk3NDQ1MjM1NTY1NjY4OTc2MzQ0MzUzNTY6NTU1NDQ1
-MzQ3MzIyMjYzMzIyNjQ1MzI2Njc6OTc0NDQ0ODc1MDQ1MzEzNTQ0NjczMjQ0NTUw
-MTQ3NDU4MzMzMjM1NTQ0NjUzNDY2NjY4NTIzNzY0MjM1MjMzMzQzNDUwMTQzNTY1
-Njg0NjY0MjQ1MzU2NTI2Ojc1NjM1NzU0NTg5PDk9Njo6Nzg5Ozk1NDY5ODc5ODg4
-OTo7ODo3ODU4ODY5Ozw8ODc3Njk5Ojg2OjU3Nzo6OTc1ODk2Nzg4Ojg3ODU5Ojo5
-Njk3OTY3OTc3Ojg3OTo6Nzk5Njk3NTg7Ojw6OTg6Oz45OT1ANjY2NTY3Ozk5NTg5
-Ojs0PDs7PTo5ODs6Ojc4OjY3Nzg2Nzg6Nzk2NTc5ODc3Njg6OTo3Njk6OT09QmlS
-QTw5O1mQY0VBPD4+PDo4PEA8Pjw3Oz48Ozs4Ojw8PTw9Ojs7P0A8Oz4+Ojw/PT87
-OTo6Ojw7PD1BRUFBQUFDQjw/Pz8/PTg4ODw/P0FBPkNEPD1CQ0FEQj5AQUNFSUhF
-RXuzxc/X3ODj5efo6exGRkdGTEdCPURBPj9EQ0I9QT09Pz88QUVDQT08Ojk8Nzc5
-Nzo9OTk+PT0+QkI+Pjs6PDg5PEJBPDk4Ojw7PTw6Nzk5Pzk1NTY7OTc4Ojc1OTo5
-ODk5Njg5OTg2NzU4ODg9PD86Ojk6NTU8ODc1NTc5NjM2ODU5Ojc3NTQ1Njo4ODY3
-OTU4Njg2Njc3Njs4OjUyMjQ1Nzo4OjczMzc1NjM1NDU2ODMzNzg0Mzc0MzQ0MjMz
-NTU4NDY1NTM2NDU2NDY0MzY1NTYzNjI1NjQwMTAyNTQ1MjQ1MjI1MzU2NjU3NjQz
-NDI1Mzk1NTU3NTU0NDM1NjM1NzY1MzEzMjU0NDM0NTQyODY2NTk2OTg1MjM1NDE0
-NzQ2NDU3NDUzNDY0Mjc4MzQ4NTY0MjI0NTQzMzIzNTkzNzY0NDM2NDUzNjY1NjUz
-NjExMTE3NTM0NTc1NDQ1MzM0MjIyNDUzMzEwMjIwMTMzMjEyNDMzMzM3MjM1NjM4
-MjU0MzI0NDQ0MjE1ODY2MzI0MzU2MjY0NTczMjMzMzQ3ODY1NTY2MjEyNDc2NTM2
-NzMzNzg0NjQ1NDAwMTQwMjMyNTQ0MjIzNTM1MjMzMzMzMzQ2NTMxMTA1ODYzMjQ3
-OjY2NzIyMTM2NDY1NTEwMTE0NTQ1NTUzMjQyMzUzNjQ2NTQ1NzY1NDc3MTUyNjQz
-MzQzMzQ2NjQ0NTQyMzQ0NTY2Njc2Njg3NTM3NjY2NTQ1NjU2NTg4NzY3NTQ1NzQ1
-MzEyMjIyMTAyMzU0NzQxMDEyMjUzNDQzMzU0MzY3NzQ2NzU0NDQ1NDM0NTU3NDc3
-ODQzNjQzMDIxNTYzNDY1NDEyMjI0NTU0MzU0NDYzNTU0Mzk2NTc2NTY3NTQ3MzI2
-MzIzNDMzMjQ1NjY2OzQzNDY2NDY0NTY1MzQzNTY3NDU4MjU3NTY2ODk3NjY4OTI0
-NzU0MzM3NTQ2NDE0MzY1NTU0NDc2OTk0MzMzMzY1NzU5Njg2NDU4ODU1MzQzNDIy
-NjU2Nzs6NDU1NTU0ODk3ODY2NTQzMzI0NDMxMjU1NDc2MzQzNjk0NTc4ODg3MjY0
-Mzc2NDQ0NTY4NTMyNDU2NjM2MzU2NDMwMjE3NTQ2NzQyNjM1NDQ0NTQ1NjM0Njk5
-OTU3NjUzNjk5NDM0NDU1NjI0MzU0MjI0MzQzMzU2NTY0NDU0Nzg1MzQzMzQ2NTk6
-OTw6OTo9OTU4NjQ2Njg4NTMwMzQ2NDY3NjQzNDg2OTY1NDc5OTw3OTc1NTY2Nzk5
-Nzg6NzY1Nzc3Njc0MjU2NjY3ODU2Nzs6ODg2Ojw4ODo7Ozs7ODg6OTY3Nzk6OTo5
-ODs6OTk5PDk7PDk5OTo8NzU5Njg5NTk3ODw3Njg8OTk2Ojs6OTc3PDY5OTs3Ojk4
-Njc4NzY4OTo8PDk7Ozo3OTg9Pz9JalA7Ojg5V39WQkQ/Pj46OjY9PTw8Pz49Ojw6
-ODw6O0FAPjw9PDs7Oj4/QTxDPj49Pjs5ODo8PTo4OjtBRUA9PD9BQj5AQEE/Ozo6
-OT89P0ZFPkRFRUNCQENAQkNBRUhJSUVCdrbGztfc4uTl6Ojr6klMS0VFQkNCQkNF
-QTw+QkVDPz46PT04QD5CPjk5Nz08ODY4Ojs9Ozw7Oj08PD05PEE/QTo7ODk8Qj08
-Pj0+PUs5Ojg6Ozg3Njk6Nzc5Ojs3ODlCODo7Nzc5Njc6ODc1NTc3NjU0NTY4ODg3
-NzU1OTc1NTg3NDM9NDI1Nzg0NjU2NTg2ODQ1ODc7NzIyNTQzNjY2MzU1ODo4Ojg0
-NDYyNjg1NTQ1NDM0NjU1MzMyMzEzNDIzNjQzMzMzNDQyMzU2NTM0MzI0NjY1NDUz
-NjY3NDQ0MzM1Njc0NDQ0MzM0MjEzMTE0ODQ0MTc0NDUzNDQ0NDU0NTQ0NDY1NjQ3
-NDUzNDQ1NTU0NDU1NjQ0Njg4ODU3NTM3MTY2MjMyMzMyMjIyMTk2NTg0NDU0MzY1
-NTUyNTY0NDMyNjQ1NjIyNjY1NTQ0NDQ0MTEyMDQ2ODY3MzEyMTM1MzUyMTU2MzU0
-ODIwNTQzMDEyMjM0MjM1NDM0MjY0NjYzNTM0NDg2NTQxMzQyMzYzNDYzMzU0MjU0
-NDU0NDQyNDQ1NjY3OTY0NDIzNTY0NzUyNDQ1NjQyNDM0MzY3MTAyMzM2NjMzMzI1
-NDMzMjIyMjU4NDIyNzQ0MzQ1NTYzMzc3NjQyNTY0NjQ0Njc2Nzc1NDIyNDUzNjc1
-MjI0NTUzNjIzMzY4Ojg4Njc4MzU1ODk1NDU4NzQ0NDMzNTY4NjUzNDU2NDU0NDUz
-MjY3NDM1NTYzMzQ0NTU3OTc2NDMyMjMxMTE0MC4wNDIzMzU0NDU1MzE0MTQyNDU0
-ODQyMTM0NDMzNDM0Mzc2NDc2NDQyMzQ0MzI2NzQzMzU3MzU1Mzc1NjQwNTY3MjY5
-NDIyNDU3Njc1NjQ1MzU1NTY2NDY0NjQ4NjMyNDQ1NjY1NzpENjc4NjU0OTc3NTMz
-MzU1NzU2NTQ1NTM2NjU3MjU1NjY1NzQ2NzQ1MjM3ODU0NjQ1NTQ0NDUyMzQzNDY2
-NTIyNTY0MTI2ODk4NzY4NjQyNDU0NDU0MTQ0OTY3NDQ2NDU1NTg2NjY0MjU1NjQy
-NDMyMjQ0NTM1NDMyNTg2NzQzNTQ2Njc3ODc1Nzs5NDQzMzQ0NDU1MzI0NTU2NzY1
-NTU0NDM0NDc3Njc2NjQzMzU2NDU1ODc1NjU3Njo7OjQyNTY3NzU0NjY0MTIxMTI2
-NTY3NjQ0NTEzNTQyMzc4Njc1Njg3Oj06PDw7PDo6PTY1NDQ1NTM2NjY5NjY0NTY1
-NDQ0NDY4OjY3ODg6Ojs4OTs6Nzg6ODs8Ojo5NTY1Nzc3NjU2NjY1Njc1Nzo5Njc6
-Ojg6OTk6Ozo7OT09OjY7Ojs4Nzs5OTk4OTg4ODY3NjU6OTc0ODo4OTY3Njo1Ojs+
-ODk6ODc3Njs6PDs5NjU1NzUzNjg4ODc4Ojs7Nzk5ODo6ODk2Nzg6Ojk8Q096XD44
-O0Fqfkk8PUA8P0A+Ojs3Nzo7OTo9Ojs7PDg2Oz07PTs8Ozw+Pzs2OTs/PD49Pjs9
-Nzs8Nzg8QEA9Oz1ERURFQT4+P0BBOTo7Pzs6QkJDRUFCQkFAQkM9PT9AREpHQUJn
-tcbQ2N3h4+bn6errSEdISElERkpBQkE6OkE/PkRDQUM/Pzk6Ojg6OTw+PD04ODs7
-Ozg8PzkzNTpAQD08QUI+Pzg5Njk+Ozo9ODc8QTs6Pjk5QT05NjY2Ozo5OTs5ODw5
-Ozs4PTo7OTgzMjM1Nzg4NDQ0OTo4OTs5OjY0Njk4Njc6Nzg4Ojc3NTE1MzQ3NTY3
-Ozo2OTY1NTQ2NzQ1ODY0Nzc6Njg2NjYzMzg4ODQ0MzIzNjY2Nzc3NDUzMzIuNDQ1
-NDI2NTYyMTQ0NDQ1MzMzNjMyMTU2NjU3MzY3NzUyNDMxNDY2MjIyMzQ2NTQxMDE1
-OTMyMDU0MTIyMDIzMzQ0Njg1MTIzNTEyMy80MzIzNTc1NjY1ODU1NjI6NDM1MzQ4
-NTMyNTwzNDU0MjMxNDc0ODc1NTQ3NjQ2NjQzNTY0MTM3NzQ0MjEzNDk7MzIxMjQ2
-MjM0MjIzNDQzNDU0MDI0MzU2NDYzMzY2NTMzMDM3MzI1MzI3MzI0MTIwMjE2Ojo2
-MzI1NjU1NTQxNjU1NDMxMzM1NjU1NDQ0NDQzNjY0NTU1NTY2NDg0NTQzODY2MzU3
-NzQ1NTM0NDMyNDMyMzI2MjM2NTU0NDIwMTM1NDQzNTQ3NDIyMzM1NjM1NzcwMTU1
-MjI0MDAzMzQxMDM3NDUzMjI0MTMzNzU2NDU1MTMzNTQ1NTg3Njg0NDc2NDIzNDQ1
-NTc1NjU0NDY0MzQ1NTQ2NTM1NTE0NDQ0NzY2NTc3Nz01NTU1NTc1NjYyMzMxMjIv
-MTIyMjQ0NjY2NDU0NDIyNDIyMDQzMjU8NDQ0MzMzNDMzMzM1ODg2ODY2NjM1MjQy
-Njc1NDc+NDQzNTg0NTU0MjMzMTI4MzIzODg1ODs5ODczNTIxNDQ6NTY2Nzg0MzY1
-MDQzMzU7NjY0NjUzNDRBODU4ODg2ODY1NDI0NDUwMzM0NTM1NDE1OTQ2NzY1NzQz
-MjQ0MzQ0NTU1NTQ1Njc1MjIzNDM0MzI1NDQ1MjU3NTY0MjQ1ODU0NTM2Ojw6NTM0
-MzY1Njc2NjY0NjY2NTg1MzQ2NjQ0MjUzNzUyNTQ1NDQzMzQ1MjM3NjY0MjE1NTQ2
-ODg2NDU2NzM1MzEyNTUzMjIzNjU2NTM1NDU4Nzk2OTg5ODg6NjQ0NjQ0MzUzNTU1
-NDU3ODY2Nzs3NzUzNDQ5NjQ2NDQ1NzE1NDg4Ozc2Njc1Nzg2Njc2NTc3Nzk6ODc7
-Ojk4Njg6NzIyMjUzNjY0Nzc2ODk5ODk3Njg2NTUzNzg2NTQ2Njk4Ozk6Ojc8OTk6
-OTk5NDc5NzQ1Njg5Nzg3Nzs6OTg5NjQ5OTo7Ozw6OTc7OTo6OTg1Njc6PDo5Ojk4
-Nzk6ODg8ODc5ODc6Ozk6ODo6Ojo4Oj06ODg4NjQ5NTQ3Nzc2NzY3NzY5NzY3Njg6
-PDw5Njk5OTo4Ojo5PDo4Nzc4R3RXPkI+QWd2Rj07QERBQD06PDs5OTY5PDk7Ozw8
-Oz06ODk6OTw5OTg6Ojk3Ojw6Pj08OjY1Njs+OTo+QEA8Oj5BPEFGPTtCPzs6OT06
-Pz4/RkVAPUJERUNERURGV0dAQkM8QW20x9DX3eDk5ejp6upKS0dDREVERkBCQzw9
-Pz08REtCQkFBQTo9Ozs/Qj47Ozo5OTxGQjw5OTU2Nzo7RT09Oz5APTk3Oj06Ojk7
-Pj49ODg8PDo7PTo6Oj04ODk7OTYzMzY1Ojk9PDs4Nzg0NjM2Njg1NDY5Nzo6ODc0
-ODg3NTY3ODs4ODk8NzlGMzM2NDQ5ODk3Ojk5Ojo3ODU1NzU8OjU4Nzs3Njc4ODs4
-Njk3Nzc5NjQ0Nzc0Njo4Nzk2NDQxMzIyNDQ4Nzc2NjU3MzM2NDIxMDc5ODY1NjY4
-NzY0ODUyMzQ0MzQzMzM0NjQ1Njc3OTg3NDMzMTAzNDMxMS8uNDU4NTMyMTQ2MzMx
-NDU1NDIzNDQyMzUzMjY3NjM1NjQzNTM0MTMyPTQzNTMxNTMxMjAyMjU0MzMxMTQy
-LzEzMzQ1Mzc5NTgzMTEwNDQzMDEyNDMxNTQxMjIxMjc1MjMwMDMxMzMwMjQ0NDM1
-NDI1NTY1OjczNTc1MzEzNjUyMjU3NjU1NTU0NTU3NjkzMTMxMTEzNTM6MzM1NDQz
-MjQ1Njc1MzM2NjQ2MTQyNDQ0ODU0Njc1MzQzNzY1NDM2NTM1MDEzNjc1NTMyMjIx
-MzUzMDEvMjM3NDE0NTMyNDQ0Njc2MzY1NTQ0NTIyNDQzNDU4NzY1NDIzNDQ3NTcz
-MzE3MzQ3NTM0NDc2NjY0NDQzODM5NDEyNDU2NjY3NTMyNDM0MjU1MzU3NDY0NzQz
-NjY2NTU3NjY2Nzc0NzUzMzYxMjQyMzg2MjM0OTc1NDcyMjQyMzQzMzQzNj04NjU0
-MTIzNDY0NDQzNzg2OTg3NjQ1NTMwMTU2NjY5OTc2NzY0MzQxMjM1NDU1NDEtNDc3
-NzU2Njg0ODc0NjY5Nzc2NjY2NTY1NTg5NzU2Nzc1MzE0NDYzNjc4NjY3Nzc0Njk4
-NDQzMzc0NTU0MjM2ODQ2NzY1NjY0MzM1NDM1MzQ1ODU2MzY0NDQ2NjQ0Nzc0NTQ2
-NDE3NTY0NTU3NDUzNDU1NTY2OTk3NjY1ODc1NDU0MjQ0MjQ0MzQ4NzQ0MjU1NDc3
-NjQ4NTc2NTY2NTM0NzY1NjY4ODc0NTQyMzQ0MjI0NTEzNjU1NTc0MjQ1Mjg0MjMy
-NDU1Nzg3NjY2NjY4NDQ1NDQ0MzU7OTU1NDk2NjgzNTUyMjQ0OTU2MzU2NDM1NDY6
-OzU0NjY2Nzg0NDY1NjY1NTc5Nzc4NzU2MzQ1NzczNDU1NDQzMzY7NTQzNDQzODg3
-ODs6NzUwMzU2Njg5OTk3Njg3Nzg1NzY0ODo4NTM3NDU5OTg1NjU5OTY0NjY3ODc3
-ODs4ODc3ODk7OTg6ODk4ODk8OTk4OTk5ODo6ODo6ODg3OTs7Ojo7Ozw6NTQ3OTQ2
-NzY1Njg1Njg4ODk5ODo4OTo5Ojo7Nzw7Ozo5Njo5Nzg4NjY3ODk6PTtBX1hAREJN
-Z1BAQTo3OUE9Pzw6PTw6Nzg4Ojw8Ozw5OTg7Ojs7Ozs6Oz09Ojo6OTo7ODo7ODc5
-Ojg5PDtBQTo/OTs8Pj8+ODw+QUM5PT48PT9CPj5BREM+Q0VLRkZQRklGPUBEaLPH
-z9fd4eTm5+nq60tHR0A9QT1AR0dHRj9ARUVCRkVCPz88PTw7OT0/QTs6Ojs7QEU8
-OTw6PDg7OUA/Oj08ODc5OTs6Ojw8PEBCPDw7OTk9Ozc4ODs2OTs6Ozk3ODU3NTY4
-OTc4PTw2MzUzNDU3Njw2OTk5ODk7ODc2Nzc0MzQ1Nzc4OTQ3Pjs2NDU3NDUzNTU1
-OT05NjY2NTc5OTg1OTU2NjU1Njg3NTc4Nzc1NTY0NTQ2Njo2Njc3NjE0Nzk0NDQz
-MzY3NjUzMzMzMzM2NjMwMzY4NzU3NjczNjU1NTYzNDcyNDY1MjIzMjU2NDM0NTU1
-MzQxMS8wMjQ0NTc1MjQ4NDM1MzAxNTU1MDEzMjM1NjQzMzMzMzM1NTY5ODM0NDM1
-MTU0MjE0MzUzNDQ0NDQzNDEzMTEzNDY0MTIxLzAyMTMzNTM4NDMyMjUyMTM1OjU0
-NDQ0NDU0NjQzNDAxNDY1MjY2MzI0MjU1NDg5NDo3NTg0NDQ0NjQyNTY2Njc2NDc2
-NDU1MzQ1MjIyMjIyNDQzNTQzMzQ3NTQzMzQ0NDM1MjMxNDIyMzc3NTM2Nzc3MzU0
-MzQ0NzMwMzQ0MjM2NTIyLzAwNDEwNDU2MzI0MjM3NTAxNDM0MjM0NDM2ODYzMzU2
-NjUzMzI0NDYzMjMzNjY0Mjc4NzU1MzI0NTQ5NTU3MzQzNDQ0MjIzNDUyMjY0NDM1
-MTIyMzc3NTU0NjM0NDQzMzI2NzQzNDY3OjY2MzIyNDQ1NTg1MzQ3MzQzMzU0NDU4
-Njg5NTUzMzI2NzQwLi8zNTYzMzQ0NDU2NzQ0NDQyNTg4Nzg3NTc3NDY3Mjs5NTU1
-NjM3NzU2NTYzNDc2NTc1NTIyMzk4NjY6Ojc2NTc4OTk4NzY1NzQ1NTYzPDU0NDc3
-Nzg3NTQwMTMzNDU4ODU3NzY2NTU2NTQ3ODU1Nzc2NDY1NTY5OTU0NTc2Njg2MzU0
-NTI2NjQ2OjU1NDM2NTY3NDEwMjU0NjQyMjIwNTc2Nzo0MzM0NDk4NTU3NDU3NDYz
-MzY4Nzg0NzMxMzMzMjU3NjU3NTM0NTU4NjY0NTY4NTY2NDU2NjY2NDU0MzU0MzU1
-MjY1MzM1MjMyNTY2MzYzMzE0NjMzMjIyMTE0NDQxMjMzNjY3NTc2NzU0MjQwMTE0
-MzQ4ODc2MzQzNDU0NTc3NjU2OTg2NjY1MzM0NjYyMDMzMzg5Ojc2ODg4NDU4OTc5
-Ojg1NTQ1Nzc2NzczNDg4Njc3OTc5NjU3NTQzMzY1NTc7Ojc5ODg3NDU3NTY2Nzc3
-ODo3NjM1Oz04Nzo6ODo3NjM2Nzc4ODg4Ojc4Ozc4Nzg6Nzk6OTg5OTs7Ojg5Ojs6
-NjY4ODw8Ozs5ODc7PDo5OTs8Ozo2Ozg5NjQ1NDY7Nzc1NjQ3Nzc6Ojg6Ozk6Nzw9
-Ozc1ODg3ODw7OjM1ODg6OkZdWElFRFxlRDw7PEA/Ojs8PDk4Ojo9Ojk5O0BCOzk5
-OzQ5Ozs7Ojs7PkE8ODc4Ojs/PDk+Pjs2Oz5BPDs7PDw9PD9GRD4+Pjk9Pj1DO0A+
-PDxESEJDSENGRUpOR0JLTUlCQ0xmq8XP19zi5OXo6errSURIRkI+RENKSEtKRkRE
-REBCRkRCPjg5OT9ERD5DQz88QDxBPEE6OTg7PDw7OTlBOTw8ODg4NTg4Ojs8QDo2
-PkE6Ojc5Ozo7PTg4Nzk6NTc3Nzc3Nzk4Ojc3Nzc1MC81Njc4ODUzNDc2MzU4ODc2
-Njg6NDU5OjM2Njg2NjQ3Nzg4NTczNjY1NjY1ODc3NzU3Njc2Nzc1ODg4Njc2ODQ3
-ODUzMzMyNjk5Nzo2NTY6ODQ4ODY1Njc4Njc3Nzc3NjM1NTM0MzY1NjQzNDQ3Nzc4
-ODQ3NTQ1NDc3NDU1MjM0NzY1NDY3NjMyMzY2ODUxNjU2NDEyNDMyNTMxMDIyMzU0
-NTM1NTYxMjY2MzU1ODU5NjY7NjE0NjY2MzQxMjEyNjU1MzIyNDMyMTMxMzMyNDYy
-MjEwMjEyNjY2NDUxMzMyNDQyMDQ1NTMzMzQ0MjMzMjMyMzY3NjQ0NTY0NjU2NTU4
-NTQ1MjIzOzIxMjI0MTIzNTY1MjY1ODUzNjU3NjIyMTAxMS8xMTM1NDU1Njc0MzU0
-NDQzMzMyNjUzMzIzNTY2NDU0NTY1NDIyMjQ1MjU1NzEyNTE0MzMzNDEyNjQyNDYy
-NDUzMi8zNTI0MzM3NjQ0NDQzMjE1NDI0NTU6NzQzMzA0MzEyMzQ3Nzc2NDIzMjU0
-NDI1MzU3NTYzMjQ0MjMzNDQ1NjY2NTQ0MzQ2MzM3NDQzMjMzNDEwNDEyNTU0NTY5
-NTMyODQ0NDQ1ODMyMjI0NDY0NTM2Ojg3Njc1Njc3NTM0NzU3Mzc2Njg5NzMyMjQ0
-MjQ3NzY0OTk2NzUzNDY3NzUzNjY0NTU2NTY1NDU4NDY0NDc2ODQ1OTc2PTg0Mzc3
-NzQ0NDQ0ODc4ODY1Nzk4Mzg3OTQ1NTc1Njc2NTQ3NDM2OjUzNDQ0NjQ1NjU1OTg2
-Njc1NDQ0Mzg3MzUzNDU2NzQ1Nzc1MjU4ODc1MzEzNTczNTM1Nzc2MjIyNDY2MzQ5
-NjU1NjQ0NTc2OTk2NzU1ODU2NTM2NTU1NjQ3NTUzNjQzNDg1NDM0MjQzNTY1NTY0
-MzU0ODc0NzU1NDU1NTQ0NTgyNDU2NDc4NjU0MzY2MzQzNTQzLzMyMDI1MzMuMjQy
-NDMzNDQyNDYzMzU0MjQ2NjYyMzAwMzI0NDQ0NDQ3OTY0MzgzNDM2MzQ3NDcxMjY0
-NDc4NjQ5NTc0NDY0NTQ2NTU4PDs7OTc2ODw1NTU2ODQ2NzM3NTU1NDIyMzY3NTUz
-MjIzMjg0NjU2ODk4ODo0NTY2NzY3ODc5ODg0ODk4NzY4OTc5PDc3Nzc4ODg3Ozs3
-Ojg4NzU2NjU5ODg6Ozo4OTg6NzU2ODk6OTg3Nzs4OTs5Njk5ODk5ODw5Nzk2MzY4
-OTc7PTs5Njk1NzY7PDo9OTg5OTc4ODk4PDs7PDo6Ojs6NzY5Nzg5QmRWQ0RFZFlB
-Pz09Oj49Ozk+Pzs5ODk4OTc4ODw9PTo7PDo5PEA8Ojo9PT45Ojk1OD06OTs7Ozo5
-QEI+O0A+Pzo7QUA8PkA9QUBCPjxBR0NAPkBGQkBAQUFISkxKRkdJP0VGSW2xxM/X
-3eDk5ujp6utBREZLSEJEREVDQ0RJSkdERUBCPT47OTk6Pz4+PTo6PD09PD49PD04
-Ojo7PDo8Ojk8Ojs+OTc5Ojk4Ojk6OTo9PD47Oz46Ozk6PDs3Mz04NzQ2NDY7NjU5
-OTQ0NTQ0NDY2PDg4NTYyNDg2MjQ2ODk4NTMzMzcyMzY3NDQ4Njg4NzY2NzQ4NzY1
-NDc9Ozc5Ojw1Nzg1Njc4Nzc1Nzk5NzU2NDM2OTY4NzYyNjg1Nzs4NDc1MTIzNjQ1
-NzY2MzIyMzQzNDQ1Njg4NjY4NjY0NzU0NTY1NTIzMjM0MjU0NDQ0ODk2NjY4NjQ2
-MjIzNTI3MzU1NzMxMjQzMTc2MjQ1MzMzMTQ0MC8yMC8yMz42NDM2NjQ0NTM0MzM1
-MjEyMjQyMzU1NTQ6NDIwMzIuMjEyMTUyMTIyMjMzNTMwMjMyNDU1NjQ0MzU2Njc0
-NTMyMzAwMTUyMzMwMzMvMDsyNDM0NTg1Mzc1NDM0NS4wNjQwNDM0NTMzNjczNTUx
-MzQzMjM0NjMyMjMzMTAzMjI0NTg4NDIyMzQ0NjQ0NDM2NTMzNDQ0NDQyMzM2NDMy
-MzI0NDM0NDU4NzEyMTMzMTM1NjQ1NDM1Nzc1NzQzODM1NjU1NDUzMzQ0NDQzMjIz
-NTY3NTUyNDIzNTEyNjU1MzIyNDU1Njg2NTc3NDU1NjgzMzM1NTM0NTc3NTY0MzQ0
-MzQyMTIyMjMzNDQyMzIyNjU0NDU2NTU0NzU4NTY1Njo0NTY0MjQ2MjU0NDQ0NTU1
-NDU3NDUzMjI3NzQ4NTc1NjQ0MzEyNTY3NDE0NjQ2Nzc3NjYyMjE1Njc2NTU1NDg0
-MjQzNDY3MzQyNTc0MjI0NTY3Ozo2MzAvMjIzNTU0OTY3NDM2NDY5Nzc2ODU1NTUz
-Njc3Nzg1NjI1NDQ1MzI1NTE2Ozk3NDYzMTMzNDYzNTY1NjQ0MjI2NzU0Nzc0NTY4
-MzQ0NDMyNTU1NTQ3NjY0NDMyMTM3NTU2NTU1NTU2Njc0MTEyMjU2NjU3NTY4NjQy
-MTY1ODY0Nzc3Nzg0NDQ2MjQ1NTZDOTc1NDQ0MzU1NzU1NDY2NTIzNTc3NDQ0NDc2
-NTUzMjM0NDMxMzUyODQ1NTU0MzQ0NDM0NTQyMzI0MzEzNTc0MzIzMzQ1NjYzNDQ0
-MzUzNzMyNTY1ODU0NTU0NTU5OjszNTY4NjQ0MzA1Nzk1MjM1NTM0OTo1NDM4NzY4
-ODk5ODY2Njk2NDU0NjY1MzM3NjY2NzY5NzI3Ojs5ODs5NjY4Njg4OTo5OjM0Njs3
-Ojc5Ojo4NTk3OTg4ODU0NDo5Nzs4NTk9ODk7OjY5Ojg6OTg8Ozo4ODs8OTk1Nzg2
-Nzc3NjU4Oz49ODk4Ojs5Ozg2NTc1Njo6Ozs5NjY3Nzw5Ozc8PTg1OTg6NTU1OTg5
-Ojk7Oz05PDw7Ojk0NztFYE8/Q0ZeTEA+PkA9Pjw5OTo8OTk6Pjs8Pjw/Oj09PDw8
-PTs7PDs8Oz46Ojk7PTs8Ojk5PTo5QDo6Ojo7OD4+QD1AQUFAQEA/QURBQUBCRj49
-QD5DQDs9QUpGQ0RFQklGQ0FQmbXGz9fe4eTm6Onr6zxASkxJRkZCQkBDRUVJSkZM
-Q0NCQT03OTk7Pj9CPTo7PT0+Oz03OTw6Nzo8ODc9Ozk5Oz04OTs8Nzg4PD07OjxA
-Ojs6Ozo7Ojo5Ojg1PTo6Ojk4ODY2NTU6OTk5NzM2OTc1NzU2NzY5NDIzMzQ2Njc2
-NTY5MzU5ODg8NTY5ODY3ODU4NjY3Njk3Njc3NTs2Njc2ODU0NTY2Nzc2Nzc3NzU4
-NjQ4NjQ2Njk1NTU1NTU1NTU0NDIyMzUyMDQzMzI1MzI1NDQ0NzczNjc1ODQ1NDU0
-NTY2MjEzNDU1NDQ1NjU0NDc2NzU3NTMzMzIzNzUxMDE0NDAzMjM4Pjc1NDU0MzU1
-NTg0NDQzOTQ2RjUyNDM3Ozw6NzQ0NDU1NDM0NTMzNDY3NjU4NDI0NjEzMDMzMzM0
-NTg4NjY2NTMzNzgyNTk4ODU0MDM1NDU1MzIzMzM1NjY0My8xNDEyNTA0MjM4NTc0
-Mzs2MzU0MjQzMTY1PDQzNTY0NjY0NTU0NzU0MTQzMzU0MzM1MzMyMTY5NjU1MTMy
-MzU3NjQzNjQ1OTYzMzM3ODQzNTU1MjQxMzU4ODE4Njc3NjQzMzIzNDI1ODc1NjM0
-NTU5NTYzNDU2NDU1MTA1NjM1NTQ3NTY2MzU2NTQyNjQ0NzQ2MzUyMjQ0MjExMjM0
-MzU3Njg5NzY2MzM1NTUxMzUyNDMzNDYyMzM2NjQyMjIyNTU0NDc1NDY2NjQ0ODc3
-NjU2MzU0NDQ3NjYyMjI0NDQ8NzQ0Njc2NzQ1MzIxMzQ0NDc4NjY0NzIzMzUzNTI0
-NTc0NTc1NTMzMzIzNjIzNTU0MzI1ODEyMzQ1Njc1NjEwMzQ0MzM0Mzg1ODcxMzY0
-MjI1MjQ0NTc0OTY2NDY1NDM0NzQ0NTQzNzY1NDQ0NjY1MjU1NjU2ODc7OTUzNTY4
-MzQ2NTMxMTI2NjU1NDYzMjQ3Ozc1OTk4Njg2NTY0NTU3NTU1NTUzNDEwLzAyMzU3
-NT05NjU1NTY4ODk6ODQzNzUzMTQ1MzY2MjQ2NTYyNjQ2NjQ1NzMyNDQyODk4NTM0
-NTMzNTQ1ODg2NDc1NjMzNjY5Nzc0MzI0MjIyMi81MTMyMjQyMTMzMzQyNTM1NzQz
-MzUzNDU0Njg0NDI0NDIzNjk7NDEyMTM1NDU0NTYzOzU1NTM4NjM6NTY0ODo6NTY3
-NTYyNTw3NTc0NDc4Njc1MzU1Njc7ODg3NjI1Nzg4NzQyNDU4QDU2Nzk2NjU5Ozc0
-ODg4NzY3ODg4NTg3ODc3Njw4NTc4ODc0NjY4NDc3NDo7Ozk4OT07Ojg4Njc6Ojg2
-Njg4ODw8Ozk2OTg2Njk5Nzc4NzY2NzU3OTo5PDo9Ozw5ODg1NTc4Nzc1OD86Ozs7
-ODk3NjQ3ODg2ODc0MzE4ODc5Njc3Oj48PDk2ODg3Njk6Njo7PEJcYkc9YWpGOjw+
-QUE9OjU6PDo6PDg8PDs7PTw8Njc6Oz1CPz88PDo8Oz5APTY5PTw4Nzg7PD0+Ozs7
-OjpAPEE6P0E9PD49P0BAPERDPT9FRUVCRkVEREJER0Y/Q0BBQUNIR16qusbQ2N3i
-5Obo6enqQEVISUZHQkVFPTpBR0VCQUNDQz88PTs4Pj09QT4+PDo5Ojo5OTs7PTs7
-Ozo9OTg7Ozo4Pjw9Pj42PDk6Ozw6PEJDQUE4Njc5OTs2ODo3Mzk7PDc2NzY3NjY4
-OTo4PTw8Ojg3PDg2NzY0Nzk4NTMyNDk0NTM3ODQ1NTU1NTo0NTY4PDg4ODk5Nzk3
-NzY3NTc4PDU2NjU1OTY2Ojk3ODY0OzU4Nzg5NzQ1ODYzMjQ0NDU1NDg0MzQ0MzQ2
-Nzg5ODU1NjYxMzYxMzU2NDI3NzQ2NjU2NDU1NTY2NTY0MzU1NDUzMzEwMzQzMzQz
-ODM2NTY2NTM0NDQ1MzI0NDUzMDI1ODc1NDMzMjA1NTU2MTEzNTU5ODg7Njc0MTQ0
-MjU0NjY2NDQ0NDAwMzI0NjQ2MzM0MzI2NzUyNTQ1NTI1Njc3ODk3NjQzMzM1NTMz
-NTc3Nzc2NzUzMi8xMzU7MzAxMzY0NDUyMTQzMzMyNDM2NTI0NTI1NTU0NDQ2NDIy
-NTQ1NjY1MzMzNDIzMD40Nzc2NDQ1MjM0NTU3NjU3Nzc0NDQzMTAzMzMyNTQzMzMx
-MzQ5NjQ0NzQzNTIxMzI1MjI0ODc1NDc3ODU1NjY3NDY6NTQ3OTg1NTY2NjM2MzQ0
-NDU1Njk1MzIzNjc3NTQzMzEzMzMzNzY1MzU1NTMzNDk4Nzc3MzQyMjQ1NjY3NjY0
-MzY5NzIyNDM0NTU0NDQyNDQ0MjQzNTU1NDUzNDQ0NDMxMjQzNTc3NkE4NDI1NzU0
-MzQyNDQyNjc1NjY1Nzc1NjU0NTY1ODU2NzQ0PzY0MjYzNzY1NDI1NzQ1MzQxMjQ3
-NTg2NjUyMTQyNDk1Mzo2NTM0MS40NTIzNDMyMzc3NzU2NTQ2NjM1NjQ0NDM3NjQ1
-NjQ1NDQ2NjUzMTIzNTs5ODk2NjY2MjU0NTI1NzY2NDU4OzY1NTU0Njg3MzQ1ODw5
-NTQ0Mzc0NTQ1NDM5NjU3NjQ1NDc2NTI0MzI3NjY1Njk5ODk7NTU0NTg4NjM1NzQ0
-ODk6NzY5NTU1NjU2NTY1MzY1NTU2OTc2NTI2MzI0NTU0Nzg2MzczNDI0NzY0MzAx
-My8xLy8xMjQzLzAzNDEwMjY1NTU2NTc1NTg1NTYzODc2NDQ0MjIzNTQ0NjU4ODQ2
-NTY3NDQ1Njk8NTQ2NTU1NDMzNTg2MzM0OTw8Ojg4NjU0NTU1NTg3NTU2NTY6OTc3
-Nzo4NTU4Oj05NzY5Ojg2NzU1NDc3OTg7Ojg3Nzg7ODc2Ojo4OTY0NTY2NTU2Njc1
-NTc1NTc0NjU3Nzc4ODk3ODo3OzY3ODk4Ojg6Pz44ODo6NDU2ODU1Njg4NTQ2Nzo7
-OT07Ojo7ODc3ODg4Ozs4MzY5NTg6OTk3ODc3OzU3Ojk2Nzo5OjY3Nzc4ODo7ODc6
-NzU2Mzk4Ojw7Oz1CRmlkST9dXEBCQz0+Pjs6OTo+Pjs7Ozk7PUBBOzw8Ojo8Pjo4
-Ojw7PDo6OTc6P0I9PD84OTo9P0A+Qz49Pzw7O0NCQUJAPD48QTw8Oz1BPz9BQUNC
-QUNCQUFESERBQ0NESkhATJu5yNHY3eHk5ujp6utFS0ZHRkRHTU1APDxEREc/REJE
-Ozk5PDtBPjs+PTs6Ozk5Ozw+OjY3ODo6PTs5Ojo4Ojk9Ozk4Ojk0OTk6Ozc3ODs9
-ODg9Ozg2Njk2OTc4ODo6OTk3Nzo5ODo6PTg7PTo7OjY0Nzk5NTY1NTY1ODQzNDMy
-NTg7NjU1NDk6NTA4Nzg6ODc2NTYzNTU2NTU1Njc3Njc0NjY3ODk4ODU2Njc3NTY2
-ODQ3ODY2Njc0NDg5Nzk4NDM1NTY1Nzc2Nzc2Njw2Njc1NTI1NDIxMzU0NTU2NTQz
-Ojs2NzY0Njg2MjQ1NzcyNTQzMjE0MzU2MjAwMTQ0NTQ1NjY5NTU0MjQyNDQ1NzQ0
-NDg5Nzk2MjI1MzE0NjQ3NDU5NjY2NTQ0NTc2NjY1NTQzMzQzMjM2MzU0NDIxMzMz
-MjYzNTY0NzQ0NDMzNjQ0ODM1MzY2MzIyNDUzMzQyNTM3MzM1NDUzMjIzMjIxMjQz
-MjE1MzU1NzQxMjUzMzUzMjI0MzY1MjUzOjE0NDI0MzQzMjU0NDo3NzQyMjEyMjEx
-MjQ0NDY2MzU0NDQzMjIyMTM0MjUzMy8zMzM3ODEzMTQzMzMyNDQ1NjYzNTQzNDQ2
-NTMzNjU0Mzc1NjY1MTEyNDI1MzQzNDU1NjQ0NTQ2MzQyNTQyMjU2Njc1MjY1NTYz
-MTg3Nzc0Njc0NDQ2NjY2MzUzMzIyMjQ0RTc2OzMzNTYzNjc3Nzo3NTI0Mzc1NjU2
-MjQyMzM1NTIyNDE0NjU0NTQ1NjQ1NDQzMjQ0MzQ0NTY3NzU1NTU2NzUzMzE3NzQ1
-NDIyMzQ0NDY0NDMzNjc1NDkzMjIzNTUzMzQ1NjgyMjg3OzU0NDMzMzUzNjU2OTQ0
-MzM2NDI0NDY4Njc1NDQ0NjQ1NDQ1NTU0NTY0MTI0NDU1MjMyNTQ1NDY2NTIzMjM0
-NDY2NjM0NDU1NTQ1NzY0MjEzNDIzNTc4NDMyNjgzMjQxNjQ0MzM0NTM1NTM0NDY1
-ODg2OTY0MzUxMjMwNjUxMzU1NTc1Nzk3NjU2NTY1NDI3NjY2MjQyNTY2MzM0NTQ2
-NjQ0MzIzMTAzNDI1NTIxNTU1NzgzMDM1NzU0Mi0uMjQ2NTUzNDM0NDM1NDY1Nzk0
-NTQzNTM2NjU3NzQ0MjUyMjU1MjM3ODQ2NjczNTYzNjc4NjY0NDc3NTQzNzY1NjY3
-ODk1NDc3ODc4Nzc1NjU4Njc5ODU1NTc3OjQ2NDQ3Ojo4ODc4NjY3NzU0NDY3ODk8
-Ozo4OTc3Nzk6PT07ODYzNDY5NDQ0NTc5ODk5Ozc0Njc2NTc2NzY2NjY6OTY4OTc5
-OTg4NTc6Ojc2ODk4Nzc4NjY4Nzc4PDw5Nzg3Nzo3Nzk6OTg3Njc4NDc6OTM0Njc4
-Njg7ODw7ODk3Ojg+OjY2Pjg3Nzk2NDY9PTs7Nzk6OTg2Ozs/Z2hNQmJRQT48PTc4
-OTs/OTo6Nzs7PT44Ozk4PDg5PDw6PDk2OT5APUE8Pj5CQTs7Ozs9OTtAPTw+QUQ+
-OTk8Qj09P0A9Oz0/PUJAQEFDPz07PD1CQkFEQTlHR0ZIR0tOREJImrjI0dje4eTm
-5+nr60NLREpIRUFFQj8/RUZGSEZFREVBRT9HQD48Pjw8Oz07NzQ6Ozk5OTw4PDo4
-OjY3Njg8Pzw8PDs7Oj46ODY4ODY7OTg8PUE8Ojo3NTUxNDc8Pjg8Njc2ODk6Ozw5
-Ojg4NTg6OzY1Njg6NzU1NTczNDc0NTU3ODg4NjY5OTg6OTc2Nzk2NTk5NjAzMzU0
-OTk6ODk5ODg6Nzc1NTY1ODo4Nzg2Nzg6ODY1Mzc0NTQ2NTY4NzYyNTM1NDczNDE2
-NzY0NTY1NjY3NzczMzM3NDQ0ODY0NDM5ODY1NDU1NDQ0MzU1NTc0MjQxMTAxMzQz
-MjIyMzU2NTUzNTU0NTMxMzcyMDIzNDM1MzI0NDM0MjE0NDY3NjUyMjY2NDQxMjQ0
-MjIxMjExNTQ2NTc1NTQyNDczNTM1NzM0NjM0MzU2NzYzNDQ0NDI0NDU0NDE0NTUz
-NDQyNjE2OTU3MzExMjM0LjI1NjY3NjY3NDg0NDI0NTkzNzI0NTY1NTU2NDM2NDMy
-MTExMDQ1NDIzODY2NjIyMzU0NjY1MTIyMzI2NDM2MzI1NDMzNTQxMTMzMTI5NTM2
-NjM0NTMxMzg0NjY1Njk2NDM2MzQzMzM1NzUyNTY1NTU0NDQzMzMyMTIzNjk0MDIz
-MzQyMzI2NzQ0NTc4Nzg3Nzg2NTQxNDU1MzQ2Njc3NDc3NzQ2NjYzMzQzNC8yNDI0
-NjQ4NjY0MjQ2NjM2NDc2MjU3NTY1MzQzNjQ2NzY1NzgzNDY2NjU1NTY2NDEyMzQ0
-NzgzNTc3NDY3NjU2NTU0OTU1NDE1MTUzNTQzNDU1NTQ0MTE0MzUyNDU2NTY3MzU0
-MzQ2NTY1NDU5NDUyMzIzNTg4NjM4NzY0NDU2NTQzMjM3NDM1NjI0MzU1NDUzMTQ0
-MjI1NTU2NTQ0MzExMzQ0NTU0NDQzMzI1NDc4NjM1MjM1NDY1NTIxMTMzMjI3NTY0
-NTI0NjU0MzExMTIyNjQ0NDQzNDU1NDM1NDMyMjIyMzMyMjMzMzY1NTU1NTU1NzY0
-Njc0Mjc4NjY2ODUzMDIzNDczNjQzNjU3NTQzNTQ1OTQzMjQ0NDc0Njc4Nzg3NTQ0
-NjU5NTIzMjc4Nzg3NzIzNDY1NTc2NDU1NTU2OzY1ODQyNDY5NTQ4Njk2NTMzMTQ1
-MzEzMjQ2NTc2NjUzNDc2NjUyNDgyMzg2NDU1Mzc4Ozg4NzU1Nzk4NTY1ODQzNjY3
-Nzo4Nzc2NTc3MzIzNTU4ODg4OTY2Nzc2NTc4NzU3ODc3Ojo7ODk1Mzc2MzY1NjY2
-NTk3NTY4OTo5OTo2NTQ3OTU3Ojs4Ojo6Ozg5Ozg4Ojs7Njg4PTg5Ojw5Ojo5Ozw4
-Nzk5OTY3Nzk4Njg6Nzk7PTo3OTY4ODg4OTs7Ozo9Ozo6Ozk7PTw6Ozc1Njc4ODo7
-Ojo7OTo7Qjw6NzxhalFFX01AP0M9O0E9ODc5OjtAQD04PkBAPDw6Ojs7Ozo8Ozg2
-OTw+QkRBQjw6Ojs6Ozo5Ojo+Oj9DRUA6OTg7PT9BRT48PD49QTs+Qj9BPT88QkE/
-Q0ZGQkNIQ0BESUhIQEiausnR2t7h5Obo6uvrRkVCREJCQEM8Oj9FQ0dKQkVERERB
-PUFCP0M/Pjw8Ojo9Ozg3PTs5PDo5Ozs5OTY3ODw+Oj1BOjo8PTw8Ozs/PTo/Ozw7
-Ozs3Ojk3ODo2Nzc6PUA8ODo7Nzc4Nzs6ODU2OD07Ozg3NzY5Nzg7OzczNTc1MzU3
-NTU1Nzc7Njg7OTg3NjY3Ojw5ODY1Njc8NzY4Nzc3NzU1NjY0NjU1ODY1Nzk3Nzk5
-NjY0NDU0NDU0NDU1NjQzNjU2Nzc1NTQ3NjQ1ODU1OTo2NDQ0MzM3Njg1NTY1NjU1
-NDc0NDY1NzQ3NzIzMzMzNTU2NDIyMzQ1MjAyMjIyNjY1MzU0MTIyNDY1NTY0NDY2
-Ly8xNTM2NDM1NTczMzQ3MzM1NDUzMjMyMzYyMzc1NDU2NTQyMTAzNTUyNDQ0LjIz
-MTUyMTMyNDMzNTc0NDQzNTg1MzMzNDM0NTM2NTQ0MzQxLTM0NjU1Mjc2MzIzNzQ0
-MjM3NDQ2NzU4MzQzMjM1NTg1NjY1NDI0MzMyMzY0NDU5NDQzMzExNDI1NDQ0NTMz
-MzMyNTY0MjQ2MjQ2NTM3NDU2NzQ1MzMxMC8xMzIyMDU0MzQ1MzQ0NTIxMzIyMzQz
-MzU1NTMzODY1NDQ2NjU0NDQ1NTI4NDMzMTQxMjU1MzU0NzU2MzQ1NDM0MzU1ODUz
-NjQzMzQ2Nzg4OTc1MTAwMTEzNjQ2NDQ2NDM0NjY0NDMzNjU1NjY1MjU1NTY1MjU2
-Nzc2NjY0NTQ2NjU0OTs4OzUzMzE0NTU1NjU1NzQ2NTU0MzQ2NTc3NDMzNTM6MzEz
-MjQxMzQzMzQ3NTU0NDQ2NTU1NjU6NTY1MzM2NjY7NjI1NzY1NTMzNTY4NTY2NzY5
-MDIyNDU2NjY0NTM3NjIxNTc0NTI0NDQ0NTQ1OD01MDI3MjU0Ojk4NTMyNjMxNTc1
-NDI0MjIyMjM1NTM2NTYzMzU3NjM0NDMyNDAyMjMzNTEzMzY1NDUxMTM4NzQ2MzU0
-NTM1MTQ0NTY0NjUzMjQ0NTQ2NzY1NDQzNTQzMjU0MjQzNDUxMi4yNTY2NzM1NDM3
-MjAyMzY2MjQ3NjY2Ojk0NDY2Njc2NjQ0MTI3ODU0NTU5Nzg4Njg4NDQ0NDQxMjAy
-NDY1NTY1NzU2NDU0NTk1NTY1NTY3NTY2MzU1NTY0NTc4NTY1NDM3Njg4Nzc3NzY1
-NTQ0Mjc6ODc0NTQ0Nzo4NzY2ODY1NTc3OTc1ODU2Nzc1NTU1Njc4ODg7OTo1NzY3
-OTY3ODg5NTQ1Njg1Nzg5Ozg5NTc5OTU0Ojk6Ozo6ODo8NzY0ODk5Nzc7ODk6OTo4
-Ozg2Njg5OTo5OTo6Ojs7Ojo8OTo6PTs4OTg3NTU1Njc0OTo9QDk4OTk4Njc6Oj06
-ODc2OTo6Ozk4Nzc6Ojo3NzU6PEI7Ozw7Ozw6NTc6Ojo7PVFfVlNwSDw6Ozs/PTw6
-Ojg6OTs7OkNEQD49PkI9Nzk4NjlAPjc7Oz4+PTk+QT89PD08PDg4OTk7PEBBQUE4
-ODo7PUBBPj47PTw8PEBCQDtDQURCPEJFSUVHQkVDPEBDQ0NBRIu6yNLa3uHl5ujp
-6+tFQkFFQ0NIQkVEQUBCSUNDQ0NDQ0E+Q0BBPjw+OTo5Ojo9OTo7ODg6Ozs1NzY3
-OTpBP0E7PDs5OjY1Ojo/Ozs/OTw5OTs3Ojc4OTw7Pzs5OTo8PTg4MzY3NDQ2NjY1
-Njk4OTw6OjQ1NzY4Ojg3NjY3ODc6ODU0Nzo2Ozw8Njs2ODk2NTc3ODI0NjU1Nzc6
-ODU2NzU0MzI0NzY2Nzc2MzQ3MDY5ODg1NTU1NDMzNTY2NTQyMTMyNDMzMzU2NTI0
-NjY2NDU1NjU3ODU1NjY1NTQzMjMzMzQ0NDQ0NjY3NTU2NDEyMzc2NzU0MzQ0NzU4
-NDMwMjQzNzIyMjMxLy40NTUwMC82NTM0MjE1OTc2NjU2MzY0NTU1NzM0MjQzMjI2
-MjY1MjQ2NDUyLzIyMDAzMTM0Nzc4NTIzMzMzMjAyMjg0NTY1NzI0MjQ0MzIzMzQ0
-NDQzMjEzMjAxNDQ5NzY4NTAzNTE0OTw1MzQ2NTQ7ODMyNDQ2NzU2NjYzMzU1MzIy
-NjkzNDM1NjQzMjM0NDMzMjI0NTU1MzIzNDQ1NDI0MDUxNTYyMzU0MzU1OzY1NDEy
-MzMzNTI0MjI1MzQzMjAyMzEwNDQzMzIxNzMxMjExNDMzNDc1NjYzMjMyMjU0NDMv
-MDU2MzMzNDY1MzU0NTMyMS8xMTQzNDQ1MTE1NzY3NjM0NDQzMzMzMzQ2NTQ1NDM1
-NjY5MjM1MzM0NDM2NTU3NDY4MzI3NjcxMTM2NDM1NTg3NzY1NjY2NDU1MzM0MzU0
-NTQ0NzM0MzI1NDUzNjcwMzY0MzQzMTIzMzU0NTY3NzQ0NTM1Nzg4NjQ1NTc9NjQ1
-MzQ2ODU1NDQ0NTY3NzY0Njk2MjM1ODMzNTM0NjY3ODc3NTM0MzE0MzMzNTY1MjUz
-MjQyMzM4NjQ4NjU1NTQzMjQzNzc2NTQ1NDc3NzY2NjI1Nzg3NzYzMzMzNDY4NTU1
-MzY0NDY1NjU0NjAzMzUzMjU0MzQ2NTg3OTY4MzQ2NjQ0NjU3NzQ0MzM2OTU0NDY2
-NTQzNDU2NDM1MzQzNTY4Nzg3NTQ0ODQ4NjY2OTY2NTQ1NTg4Nzc4NjQ0NTIzMTMz
-NTg3NzY4NjMzMzMzNTY1NjQzMzMyMzMzNDU3NzU2NzUzMTI0MzMzNTU4NDQ3Njg5
-OTY4NjUzNjMxNTQ0NTQ0Nzg0NTQyOzs3Nzc2NjU4NzM2NjY5NjMzNjg6OTk4NzQ0
-NTU1NjUzNjc3NzYzNTc6OTk3NTc2NzY0Nzc4OTYxLjM1Mjc7Ojk3ODc3ODk8OTc4
-Oz8/Ozg4ODo6Nzo4Ojk8Ojk5Ozg4ODc5Njk2NDU4ODo7Njk3ODg6Ojg6Nzg6ODY1
-NTc7PDY2ODo5Ojo6ODc3Nzk5Nzk6OTc2PTg5Ojk2OTg5Ozc3Ozw5OTk6Ojs4PDw6
-PTw6OTo9PUFBW2JlZl9EQTs7OUA7PT07ODs9PTpCQz9BPT8+QkFEQjw5O0NAPjw7
-Ozo+Ozk5PTg4PT09Ozk4Ojs8Qj5BQ0RCPDtAPD1APTs7QT48QkBGRURIPz5DQkNF
-REVIQ0dIRkVFQj9DabbJ0dne4eXm6enr60VDR0hBQURFQkRHQz5AQkFGQz08PkJA
-Ozc6OTk6OTs4Ojo2PDk4OTo4ODg2NzU1PDc9Pzw3ODw6Pjk3Njc5Ojg3NjY2Ojcz
-Nzk7Pj06PDo8PD07OTc4OTc1NTU2OTY0NTc1NTU0NTg4NzU1NzU1NjQ4ODo3Njc0
-NTU1Njg4OTk4ODY3OTg3NjY0NTg4ODc6OzY5OTg1NTY1NjYzNzYzNDI1Nzc1Njg4
-NTQzMzU2Nzg3NzAxNDM1NTI1NjY1MzI0NjU0MjEyMjM1MzMzNTY3NjUwMjQ0MTI1
-NDU1MzU4NjQ0NDI3NDQ1NDY0NDc0NTYzNTExMTQyMDAxNDMxMzUzMzIxMjU0Njk2
-NDI1NjYyMzQ0NDQ0MzM0NTg0MjMxMjQzNDQyMjUyMjA0MDIzMjE0NDQzNTk1NTEz
-NDczMzY0MDMyNTUzNTMvPDgyMjM2NjQ1NjUzNDcyNTU0NDc1MjU1MzU0MzQ2ODk1
-NTMzNjU3NDIzMzM0MjQyMTQ0MjIzMTMzMjU0MjU1NTQyMjAzMDI2NjQ0NjYyMzQy
-NDUyMTM2Njc0MzY2MzI3NTQ1NDIvNDU1NTQzNjMxNDQ2NTc4NDM2NDQ1NTM2NTY1
-NDgzMTMxMDI0NDQ1NTMzMTMzMDM1NDQ4ODY3NDQ4NjQzNDQ1NDMyMjMzNDM0NjMz
-NTQ1NDYxNDEzNDQ2Njg5Nzk2NTQ3NTQ1NjY3NTMyMzYzOjc1NDY1NDU2NTUzMjQz
-NDc4ODU1ODU3NDUyMzQ0MjExMTU1ODU1NzU0MzQzNDQ0NDQ0MDAxNjQzNDY1MjUw
-MzMxMzU0NjYzNTQ0NTQ2NjI1NDQ2NzUzNDUzMzI0NDY1NjY1ODc0NjY2NTU4NjQ0
-NDUzNTU2OTc4OTg4NDI1NTQwNTc3ODY1MTEzNDU3NDY4OTY0MzIyNDU1NjU5NjE1
-NTc1NTU4ODIzNjU0MzU0NTY0NjU3NDY1ODY2NzY0Njc1NTQ2NDYzMzU3MzY2Njg3
-NjQyMzM5ODU0Njk2NDQ0MzM0NTc1MzMzNzM1NjY0NjQ0NDU4NjQzNTQ2NDU0Nzg4
-NTk2NDY0NDM2NTU2NjM0NTY3NDI1MjQ1NTU0MjAwNDQyMTMxNDIzNDY4MTIvNDMz
-MjMzNTU2NDQ2NzIzMzQyNDIwMzU1MjMzMjQzNDY1NTY0NTMyMjMzNjUzNjc3Nzk5
-PDg1NjQ0MjIyNDU1NDU0MzU4ODY2ODQ2NzQ0Mzc4NTY3NTY0Nzk6ODg3Nzg0ODQ1
-NTY0ODM0Njc4OTk4NzY3ODo3NTc3OTw8Ozo8ODc6Ozo6OTs8NzU5PTo4ODM5Nzo8
-ODo3Njs7OTczMjU3OT06OTk4OTk5OTg3ODc1NzQ3Nzo6Ojk6OTc3ODg4OTs6ODs4
-Ojk5Ojk6OTk7ODw5PT07OjY2Njg3Nzg7Ojw5OD0+PD5ddGReTUA/PD07OTg7Ojs8
-QD08Ojk6PT5BPjs8PUI9OTw+P0JBPD07Ojo7OTo3PTc4Ozw7ODc4OD1BQ0RCQEM9
-Ojw8Pz0/Pzo8QkE+QkJFQkZGQ0ZHQkBERkZFQ0VDRUlCRUNlscjS2d7i5Obo6evr
-S01DRUxFRklEQUFJQz9CQEVCRUA9PT1CPEE5ODg5Nzs5OTc3Ojs8OTw6OTs5Oz45
-OTY2NzY3Ojk/Ojw5NTQ4ODU1OjY6NTc3Ozw8QD47OTg5Nzo7OTc3NTc5OTg3Njg1
-MDQ2ODU1NjY0MjQ1Nzw5NzY5NTc1Nzc0MzQ0Njo6OTY3Njs4ODY3OjY1OTg4NjY2
-Nzk3Njc3Nzk2NTY2OTYzNTI1NjY2ODU2NTY2NzY2OjY3NzcyNTg0MzQ0Nzc2Nzc4
-NjQ2NDY1NTI0ODQzNDY3NTc0NzM1MzM2NDM0MzI4NTIyNDU1NDMyMzY2NTM0MzIw
-MTMzMDM0LzAzNTM0MzEzOTMyNzY0NTY6ODYzNjQ0MjU1MzE1MTk1NjU2NzY2NDQ3
-NDM1NTczMTY0NDQ2NjU1Njk4MzMzMjc0MjI0NTE1NDU0NjUzNDQ2MzY0MjUxMzAx
-MzQ3NjQ4OjU2NTc3NTg4NDI0NTM0NTU1ODg0NDQxMzIzMjAzMzY7OTMyMjIyMTM0
-NDQ1MzM0NTM0NTQ0Njg2NjQ0MzEwMDM1Njk2NTUzNDU0MzQ0NTM0NjY0Njg3NzY3
-NTU1NTUzMzcyNDMzMTIyNTY1NDg2MjQ1PjQ0MzE0NTY2MzU4NjQ0Mzk3MjU1Nzg0
-Njg0MzU1NjY0MzY1NTM1NTU3Njc2MzU0Mzk1NDM0NDQzNjQ1NjY4Nzc1NTY2NTM0
-NDU2MzU2MzIxNjY2NzUzNDM2NjY4NTU6NjY0NzY3NTY0MzM3NTUzMzU1NzU0NjUz
-MDIzMjY0MDE1NDU0MzM0MzI1NDU2Mjc0MTM2NTQzMzI2MzMzNDM0MTMzNTM1NjU3
-NzQyNDE0Nzo1Mzc4NTYzMzMyMjI1NzUyMzYzOTc4NjgzNDI0NDQyNTU2NjU1ODQ1
-ODU0NTYyNDM2MzMyMjYzMjU5Nzc1MzI0NDM1NzY1NDQzNDQ1ODc2NDU1NTQ3NzU2
-NTY2Nzk3NTY0MjEyMzE1Njc7Njg5NzY0NDM0NzQ1NjY1NjU4OjIxMjQ1Njc4NDY1
-MTEzNTY0MjY2NDQ1MzY0MzU2NDM3NjIzNDY3MzQ1NDU3NjM0MzU1NTc4NDY7ODg4
-Njc3NDY0NzQzMzI0NDQ0NDgzNTY4ODg1NTU0NjQ0NzUzNzY3NDIwMjU0NjQ0MzY2
-NTYzNDQ5Njc1MzY2MzY4ODc0NjU3OTg1NDY1NTc1Nzc2MzQ4NTY2NDU2NTQ3OTU2
-Nzg6Njc4NjY2ODtEODc4Ojw8Njc2NjU3Nzc6NTU2NTo3MzU3NjM4Nzk9Ozg6NzU0
-ODg2NjU2OTk5OTY4Njk3OTo1NjY3OTg4Nzk5OTo5NzY5ODk5Ojk4PDw6ODk4Ozo4
-Ojc2ODU3Ojo4ODo6OTc1Ojg5Ojw+QD87OTk4NTg1Njk2Nzo9PTw/NzY3OTo5OTY4
-ODc6Ojo6QGSBfXJJPT04OD45PDs+Pj1CPz1AQjs4Ojo7Ojc8PT89Ojo9PTw7OTc4
-ODc3OTw3OTk6Ojw5ODk6PUFFRD1APjo5Ozs6PkJDRUI7PD9BQEE+P0VFSENEPUJG
-S0FGS0dGRUFCRFiux9DZ3uHl5+jo6utFSkVFR0NBREVCREBEQ0U/P0FCPjs9QUA9
-Oz1DPT86PD04NTg4PDo8Pzs6OTs6Ojk5Nzc6ODY/PT8/PDk2ODc7PDo5PDk0Nzg6
-Ozk8PTw7Nzg1ODk5Ojs5ODc4ODk4ODY1NDU1Nzg1NTM0MzM2Nzo2Nzg3ODo5OTc4
-Njk3MzM1OTw1OTc3NkA2NDM1NTQ1Njc6ODk3Nzg4OTo3NDY1Njc0OTY0NDQ0NTc2
-NTU3ODY2Nzc4NzQ2NTQ0MjM2NTMzNDM5OTY3NTU0NjQ0NjQ0MzQ3NjU1NTQ0NTU1
-NDIyMTQ1NDU0MC8zMTIyMzY1MjI0My8tMTQxNDY1MjMzNjQ0NjYzNjY1NTQzMzAx
-MzY0NjQ4MjI0NDk2MzM0OTQzNzQ0MjEzMTI0MjExMzMzMjU1NTY7ODQ1MjI2NTM0
-NDo1NzI1MzM2NTg1NDM1NjU0MjUzNDMzMjo1ODc0NDU0NDM0NzY2ODU0MzQ1MjU0
-NDM1MzQ2ODM1NzM5ODQyNDQ0NDI0NzQ0Njc7NTU1NjUyMjY0NzY1NDMzNTUvMzM2
-NzY1NzQyMzY1NDU2NzU4Nzc2Njk3NTQ1NDc0OTMzNTQzNDMzMTM1NDQyNDM1NjU1
-NTMyMzUyMTI0NzY0NjY1Nzc3NTU1NTg2MzM1NjU2ODY3NDU2NTI3NzY2NzY0MzM1
-MzU3NTU0NjQzMzIzNzQ0Njg2NjQ2MzI0MjU2NTY0NTMxNDU1NDU1NjQyMzY5NjQ4
-MzUyMjQ1NTU1NTU1NDY0NTM0Njc1Mzc1NDMzMjI1NTY0MzY4NTQ1ODc1NDQ0MjAy
-NTYzNTY3NTU1MzEwNTU1MzQyMTM1NDQ1NTU2NDM0NTU2MzY2NTU0NjM0MjEyNTMy
-Mzk2MzIzNjkzNjg3ODc2NjU0NDM1NjQ1NDU2NDY2NDMyNjo4Nzc4NjQ0MzE2NTg0
-ODQyMzMyMjIxNDM1NDU1MzU0NDMzNTY0MzU1NTY4ODQzNzY1NjMyNzY0Nzg1MzU0
-MjUxNTY4NjU2NjY5ODYzMzQ1Njo3Nzc2NDQ1MzQzMzU0NjQ0NTQ0NDMzNjg1NDYy
-NjY1MzIyMzMxNTcyNTY0NDI3Nzo4NjY1NTQ1MzMzNjU1NzQxMjQzMzM3Nzc4NTU1
-MzM0MzM2Njg1NzU1MjY1NTY3NDU4NjU1NDU2MzM0NDMzNjc3NTU1ODo1NDQ3ODQ3
-ODk4ODc1NDQ1NzQ3OTU1NDQ0NjYzNjc3ODg3NTY1Njo9SDw5NzY3OTg8ODg4PDg3
-Nzk4NzY6Nzo5OTU2Ojk6Oj03Ozs5Ojw6NjQ4ODY2ODs5ODk8Ojo8Ozo6NzU6OD08
-Ozo3ODo6OTw5OTk4OTk7Ozk7PDg3Nzk5Ozc4Nzk6ODc6OTw7PT44ODo7Ojs6OTk7
-Ozo2Nzg5Nj06NTk8PT0/PD87Oj08Ojs3ODs7PjxCXY6TaD46Ozs9Oz47PD5EQT48
-Pj4/PDg6Ojs+PDk8Ozg3Nzk5Ojk1Ojc1NTY5OTo6Ojk6PT89PTk7PDs/QDxBOzY3
-OTw7PkBEQj48Pz9GQz47PkVHREA8P0VKS0lNS0hHOzw+VK7G0dne4uXm6enq7ERK
-SEVISUdMR0E+QUBFQj89Pz08PDtARUE8PTw7PTw+Ojs8Ozw8OztBPjc4Pjo8ODg5
-Ojc4Ojs5Ozk6OjU3Ojc7ODo4ODk/PDpEPDc2Njc5OjY4Nzc4Ojs3NjU1NjY1NjM5
-NjU1OT02MzY1Njk2NjkzNDg6NzY2NTU5NTU2NDM2ODs7OTo6Ozg2NTM1NzY3OTk1
-Nzc4Nzc1Mzc2NjY2NDY4NjY1NjI0NDU2NTU2NzM0Njk2NDY0MzUzMjI1NDU1Nzo1
-Nzg1MzAzNDU2NDQ2NjY2Ojs3NTQ2MjEzMi40NTI1MzQ0LzAyMTQzMjE0MjEyMjAz
-NDMyNzY0NTQzNDQ4NjUxNTY5Nzg0NTk3MzU1NjU3Mjg6NzM3NDIzMzMzMjUzNDQ0
-MzIxMTM0NDE1MzQ4NzU0NDIyNDk3MzY6OTYxNTIzMj00MzIzMzY0NDM0MjQyNDUz
-MzMyMjQ4NjMyMjEzNTc3NTcyMDE1MjU1NDU2NTI1MTQzMjU3NDUyNTQ1MjU0NjI1
-NDU1NDQ1NDU2NjIyMjUyMzU5MTQ0Nzc1Njc0MjEyMjc4RDU3Ozg4OTg5OjQ3MzM0
-NTg4NjczMzI0ODQwMjQzMjcyMjMzNTQ0NDU3NTQxMzUyNTI2NTI2NTk0MzM0NDY1
-NjU0NTg2Nzo3NDg2NzU0MjI0MzI1NTc0MzM1MzEyNDExNTQzMjY0NDU1Nzc1NjQz
-MzY2NTY4NTQwMjMxMzMyMzE0MzY2NjM0MjQ2NjMzNDc1NjQ0NDczMTEwMTQ1MzQz
-MjMzMjEwNDQ1NDc3NjY2NDQzMTE1NzYyMjM0MjM1NjM+NTU0NDU1NjUzNDQ1NjY3
-NzY1NTYzODk0NDU0NTU1MzUzNDU1NjczMzM1NTk4NTM2NzY2Njg0NDUzMzU2NDU1
-NTE0NTM0NjY0NzU0NjU0MjMzMzQ1NTQzNDQ1MzMzMzIyNTM0MjI1NDU1ODEyMjI2
-MjEyNjU2ODU2NTc1NTE0NDU1NTU0NjU1Mzc2NTY2NTM4NDY0NjU1NTU2Nzg3MDI2
-NDMyNDU0NDU0MzM1MjMzNDMzMjM0NjUyNTU0MzYzNTU1NTU0Njc1NTg5OTgyMTAw
-MTQyMzQ2ODQ1NTExMzQzNzQzNDY3NzQyMjQ2ODY3Njo2NzY2NDI2NTU3NTY3NTQ5
-NDU0MzEyMzc1NTc1NzY9Nzg4NjQ1Nzg4Njc3Njg0MzU3OTc4Nzc5Nzc4NzY0NTY2
-ODc2ODY2NkE5OjY1NTc3NjU0MzM2NjU3Nzg3Njc3OTo6Ozs7Ojg6Ozs6Njs6Ojo8
-OzY0NDM3NDc2OTk5Nzc7OTg6Ozk6QD45Ojo6Ozo4Ojs7Ozs8PDo3OTk4Nzc3Nzk3
-ODY6OTg3Njo7ODo4Ojk6Ozg7PDw7Oj46Ojw5Nzo8PDs9OjY4Oz09QDxEQj5BPDs7
-PDo7Pj9niYZjPz09Pjw+PEA/Ojw/QT88Ozw/PD49PDs9Ozo3Nzg6PkJAOTk6Njg1
-Nzc2ODg6PTo6OT4+Pzw7Ozo+Pz5BPzs8Ojw/RUM8QEA/QUBFQTtAQUI+QEFDSUhL
-SEdIR0ZCQkRYrsfS2t3i5Ofo6evsTk1JRUpLR0ZFRkE9Pz09QD5CQT48PDw/OztC
-QUE7PDg5Ojs7QDw7PDw4Ozk6Ozc7ODo3NTo7OjxBOjg9NzY0Nzw8ODc6PD5BPj84
-NDU4OTc4NTQ8Ozg5NzY1Nzc3Ozw7OTc2NjU3Ojw6NzY2Njg3NDQ5Ojg2MzI1ODc7
-Ojg3NTE4ODc2Njg4ODMzNTQ1ODU4NzI0NTM0NDY3NTc2Nzg3NTQ4NTc3NTY1ODU0
-Njc3OjU2NjY4ODM1Njc2NjY2ODg4NDQ4MzIzNTU3NjQ0NTUyMjU2PUY0NDEzMTU0
-NTg0NTc1NjYzMTMyMjMyMTQ0NDY3NzE1MzEyMS8uMjMzNTg1NDQyNDU0Njo1Njg3
-ODU2NDQ3NjY2NzMyMzMwMDMzMjQzMzUzNDg4NDYzMzY2NDQzNDQ0MTEzODU3NjY2
-NDQ0MzM1MjI0NDEyMjI0NDc0NDY1NTQxNDc0ODc1NjE0NTQ2MzUzMjAwMzMyMjI0
-NDQ1NTc6Ojg1MzM4ODUxMzQxNjc1NzQ2NjY1ODY1NTg3MzQ1Mzg0NTY2NDEzNjQ1
-NTQ0MjAyMjI0Njg6OTU3NjU1NTY1NjU4NDc0NjU0NTMzNzQzNTYzMzM1MTAyMzY2
-NjIxNTc1NTMyNDQ0PTMyNDQ2NDM1MjY2Nzg2NTY4NDQ1NDU4Nzk3NzgzMzY3MjQ1
-MjEyMjMzNDIyMjE3MzI0MjQ1NzU1NTc2ODg2NzY3NDI0MzU3NTY1NDo1ODU0NDIz
-Mzc0NDU3MzUyNzM1MzUzMTUzMjQ1NDQ0NDQzMzU0NTg0Nzc2NTU1NTI2OjM1NDQ1
-MjM1NjQyNDIyMjMyNzYyNTY3ODY1NjY0NDc3NDU0MzU0Njc5OTY1NDMzMzc1Njc2
-NTE0NTYzNTI0NDQ3NTI0ODQ2NzU1NjQ3NjQ0MjIxMzIyMjExNTc3NjY7NzM2NDQ0
-NzMzNDU1NTQyMDIyMjQ0NTMyMzQzNjU2NjMzMjQ0NTU0NTc3OTc2NjY3Njc3OTU0
-NDQ0LzI2NjY1ODQ1ODU0NTU1NTY1NTM1MzIzNDg2NTU3Nzg3MDQ5NDYzMjM1NDQx
-NDU0MzUyMzI2NzUzMzM1NTU1MzAyNDQ1NDMyMjIyNjUzNDc0NjU0NzY0NDY7Ozg3
-NjQ5NTk5NDU1OTk3OTg2NzY2NTM0NTU1NDQwMjY1MzE0NjY0Njg3Ojc2ODY6ODg4
-NjM2Nzg0NzY3MzU2NTY3MzU0NTY0Nzc1Njc0NDQ0NDU1NTc1NjUyMDAxMzU1Njg3
-NjQ6Ojc2Njc4Pj08ODs6Ozs6Ozo6OTY3NzU4OT45OTk5Ojk3ODk3Njc6Nzk6ODY7
-Ojo6Ojs6NjU3ODg5ODk4Njk6OTk7OTk4Ojk7ODY4ODc6PDs3OTo4Ojo3Ojs7OTg7
-ODY4ODk6Ozc4Oj0+PT07OTk3ODc4Pj5AOzs9P22Hklg+PUE9OT49OTo7OkE9PDo+
-Pj4+Ozo8QEA5Njc2Nzs8Oz88PDs8Nzg5Nzo6O0E9Nzs9PkFEPzw7Ojs6Pz9BP0M9
-Nzk9QUZBQD88QERBPz9HPj1FRUZIRUZQUkU/RUVAQ12wyNDY3eLk5+np6+tJSUVH
-R0VHSEFET0VCQUNDQkBERz1APjs8Pj8/PT08Qz5AOjs6ODo7PDs4NzYzOTc3ODg8
-PTc4NDY2Ojo7ODU4ODg5Nzc2Ojg6Nzc4Njg5Ojo3Njc7Ojs8OTY3Ojc5Ojk1MzM5
-PDk6ODk5NTc6PT05OTc2NjU2PDs6Ozs4NjU4OTg6PDY0NDY1NzQzMzM0Njk2PDg1
-NTU6OjY2NjU7OTk5NzU1NTM0NDU1MzU2NDc1NDI1Nzc2NjQ4OTg3NTY1NTc1NTc1
-MzQ0NjMzMDIwMDM1NjQ0Njk1NzU1NTc3NTY4NTM0NTQ6NDQ0NTY4NzI1NDk2NjY3
-NTU2MjMyMTU0NjY2NDc0MTMzNTQ0NjU3NDU1NzM0Njg2NTM1NDM1MjMyMjM0NDYz
-NTU1NTU1NzY2NjQyMTM0NTMyNDQ2NjAwNDUzNDc4MjIyMjQ0NDU0MzczMzI1MzIz
-NjY5OzYzNjQ0NDY2NTc4NjY2NDQ0MjU4NTU0Nzg3NDQzNTg2NDUzMzMxMjE1NjY1
-NTQ0MjExNTY0NDU5OTc5Mi8zNTU4NTc0OjQ1NjQxNTk0MzY1NTY1NTc3MTM1NDY0
-NDYzNjU0NDRAOjg3OD05NTM0MjI2OTY0MzIzNTMyNDM1Mzc2NDMzODc1NTM1MjU1
-ODUzNTQzNDEzMzU3NTU0NDU4NTM3NzQ2MzQ0MTU1NDMwMjIxNjIyODMwMjE0NDU4
-NzY5MzU4OjUxMzQ0MzU2NDY3Nzg3Njk1NTc1Nzc0MjIzMzQ2NDU0MTY2NDMyMzQ2
-NjY1NDM0MzMyNDYyNDI1NTU0NjQ2MzMyMDU0MTM0NTI0NTY4OzY1NzIyMTQ1NDMx
-MjU1NjY1NjQ3NzQ3MzQzMzAwMzA1MjEzNjY1Njc1Nzo2Nzc3NzU0NDU2Njo4NjY1
-MzA0NDU0MzQyNzU0MzQ1MzU2NjIxNDU1NjY0NDU2Njo1MzU2NDQxMTI2NTQyMjQ0
-MzMxNDk5OTg3NTQ2NjU1NDQzNzw5ODUzNTM3NDQ8NTQ0NTY0NTMzNDIxNDY4ODUy
-MjQ1NTQ1NTc3NTY4MzY4NTM0NjczNjc2NTEyNDUzMjIyNTY0NzM2NTM1NDQvMDIy
-MC80NjU0NDQ1NTY0ODQ2MzUxMjU3OTg5Nzc4Nzc3MjMzNDc4NzM0NjM1NTQyNDQ2
-NzIyNDc3MzQ1NDc5NzY1NDM0NTU0MzY3NzY3ODk1ODo8NzY2NjY4Nzk4ODc2OT0y
-NjM0MDQ2OTQzNzU0NTM3NDM1MjQ4NjU2NTQ0NTU3NjY2NTc4ODg4NTU2NjM1Njg2
-Nzg4OTg6Ojc2Nzg6OTo4ODY2Nzc5Ozo4OTo5OTk2Nzg4NzU4NDQzNjg5OjY3Ojk8
-PDc6Ozw4Njg7Ojo5Ojo5Nzk7Njc4Ojo5Ozk4NzY5OTo7OjU1OTo+Ozk5PDs6Nzo6
-PTs9VH6MV0hBPT49PT45Ojw8PD08PT05Oz09Ojs6Pj8+Ozw9Ojw8Oj0/PTs7ODk/
-QUVAPj87QD48PkJAOzg6PD09PjtCRT44ODw/QDw6PUE6PEBFP0VDQUFEQ0BAREdO
-R0JBQUA+WbHH0dne4uXn6evq7ENLS0hCRUZGREVDRUE/QkFCPj9DPj44QD1DQDs8
-Q0VEOjo4OTc6Pjg+Ozw7NDU1Nz88Njw6NDY3NTo7Oj0+PDk3ODo/Ojc3ODxCOjc4
-Nzs5NTY2NzY5PDo6OTc2Nzg3NzU6OTY1NzU7ODk3Nzg4OTg4NjU2NjYyODg4Mzg7
-Nzk8ODo8OzM0Njg4NTUzNjg3Nzc6NjY4Okk6NDM1Njg3OTg4NjQzMjc3NTY5NDgz
-NDY2NzQ1NDY5NTY2ODU2NDU3ODg0NjY0MTU0NDQ1MzY0NDUzMTM0NDUyNDQzMzMy
-MjU3NzU3Nzc0NjY2NjU0NTQ1NDY0Njg2NjU4NjMzMjIyNDQzNTMzNDQ2NTYyNDQ0
-NTMzNTIyNTU1NjUzNTQ1NDIzMDI0NTQ1MzQ2NjUzMzQzNDM2NTc3NzU0Njc2Nzo1
-NTQ1NjUzMTQyNTY1NjU0NDI0ODoyNTQyNDU4Nzc3NzQwNDUyNTQ2ODg3NjY2MzQ0
-MzY0NDg2NjQ1NTo1OTc2NDg2ODQ0NTU1NDEzNDM0NTYzNTU2NTUyNTIyMjM0NDY2
-ODI2NTI1NjY0NTM0NjYyNTM2NDU1NDMyMjQ1NTY4Mzc3ODc6PUg8NDEzNTc1NTg2
-NDExNTIxNzU1NDg0NjY0NTM2NjQzMjY8OTY1NTQyMzY0NDQ0NzUyMjU2MjU3ODI0
-MzUxMDQ1NDUyNTYzNjMyNDQxMDU3MzIxNDU0Nzc3NDY0MTI0NTQ0NDY2ODY4OTg1
-NDY1NjU1NjYzMzQ2NDIyNjIzMTEyMzM4NjY1NTMzNDMxMTAzMzI0NDM0MjIwNDU0
-MjM1Njk1NDQ3NzU1Ojg0ODQ0NTQ0Njg3MzI1NDQ0NDMzNTY0MzU2Njc1NTQ3NjQ4
-NDU0MzQ3ODg4NjQ3Nzc1NjQzMjI1NDQ2NjYzNTY1NDUxMzM0NzgzMjc2NDQ0MDM2
-NjYxMjY2Nzs2NTUzNDc1ODYyMTU3NzY1NDY0NDU2NjUyMTU1NDU0NzQ1NjYzODk2
-OTk4NTY0MzU5Njg2NjQ2Njc3ODQzNDM1MzI3Nzc0NjQ0Njc1NDUyNTc1NDU1NDQy
-NDAyNDQ0MzExMDEyMzQyNTU0MDQzMzIyNDU4NzY3NjU3MzQ2MzM0MjI0MzQ0MzE0
-Nz4zMzExMDIzMzMxMTEzNDU4ODc3NjQ1MTI1NDc4NTIyMzUzNjY1NTQ2MzQ1NTg3
-MzQ3OTo5Ojs4NzU1OTk2Njg5ODg3NTQ3PTQ4MjQ3ODc2OTYzNTk1MjMzMzY1MzQ3
-MzM1NjI1NDc5ODk2MjQ1NDUzNjc2ODU2NTY4Njg4NjQ2ODk5Ozk6NzU2Njg6NjY2
-OTk5OTo4NjY2ODc4Nzg2Nzg4Pzc5OTo4ODg0ODg6OTk4Ojk7ODc4Ozs4Nzo5Oz02
-Nzo8Pzo6QD47ODo7Pz46Ojs9Ojo+PTk5Oj9QfHpLQkJBQEA9Oz0+Pzs6Ozs+OTk9
-Pjs7Ozo3PUZAPEA7OTo3PDxBPTw5OEBBPj02ODY6PTs+PkA9Ozo/Pj4+PT5COzY5
-OUA+Qz9CR0FDQz4/RENBREJBRUVDRkdOREVAPkBdr8jR2t7i5ufo6urrTkhGQkRD
-REU+RENMPz49Pj1HQkRJQT5BSUFFPjo/P0FFQkI8Pj49Oz86OTU0ODo4ODs8Ozs3
-NDU4Njg8ODw8ODk8PTk7PTs9OTo6QDw4Oz05Ojo3NzY3OEA5Nzk4Njk8Nzw5ODk5
-Njc0Njs2NDk4Ozg4NTM0Njc2ODczNDY7OTU0Nzg3Ojw7PDU1NjY3NTU4MzM1MzVJ
-ODc2NjY1MzE1OTo5NjQ1NjY4Ojg1NjQzNjU0MzU4Njc4ODY2NDUzNDIyNDY0Nzo2
-MzQyNTc2MjU2MzEyNDM1NDM1MjQ1NDUyMDI2NDU5NDU3NDQxMjIxNTU1NDU0MzQ0
-Mzg2NjUxMi8yMjQ0NDMxMjU3MzI1NDU4NTIxNjYzNTU0NzUzMjQ1MzQ1NDIyMjM2
-Njc0NDQyMzQzNTU1MzE1NjQxMjU1NTQ0NDY0MzQ1NjMyMDQxMTMyNDRKQzQyNjU0
-My8yNjY1Nzc3MzIzMzI6NTM2ODg3MzIzNjU2PT0zNjUyNTYzMTc6ODg3NTU0NDU1
-MjI0NDU1NTU2NTIyMzY2OTMxNDQzMy8vLzIyNTM0MzM1NjU1Njc0NTg4ODg5NzU3
-NjQ0NDUzMDMyNDU0ODg1NjY1MTQ0MjMzNDQ3NT0zNDMzNjUzNDk3NDQzNTQ0NjY1
-NzUyMzY1NDQ1MjM2MjM0NzQzNTQ1MjM0NzU2NTI0NDEyMzUzNDIxNjU0NTc3NjY0
-NDg2NDMzNDM4ODI1NTg3NjY2NzY4NzQyNTU4NzY2NjQ1MjMzNDU2NjIyMjU0NTQ0
-MzUzNDIzNzUyNDMzMjMzNDY0NTc0NTMzNDE0NDg3NDM2NDY4NTg2OTU2NTY1ODQ2
-NDQ2NzU1Njc1ODMzNDkzOTY2NTQ2NjM2Nzg1NjUzMzU0ODg2ODU2MjY3NTU2NTU4
-NTAyNjY2Njg2NTE0MzQ1NDIxMjQ3NjY1MzY3ODU1NjczMzU3Njo2NjY3Nzc1MzU1
-NTU1MzQzNTczMjM2NTY3NTM1Nzg4OTU2NTQ1NDU1NTc3MTc4NDU2ODg3NjUxNDU0
-NzY1NTQzNTY1MjQzLzI2ODQ0NTc3NjUyNTUzNDQzMzM0NDQ1MzM2NzY1NTc2Njg3
-Njc2NDU0MzMyMzU1NTU1MjI2NDI0ODU2NjQyMzU5NjUzMzU0MTE0OTUxMjQ2NTIz
-NTQ3NjU2MjA0NzMxNDU0MjQ2NDU1NDI0Nzc4NTY5Ozc2NTg4ODg2Njc5ODc0ODk3
-NTk7Ojc2NzY0NDY2MzQ3NzY2NDI3NjU3OTQzMzQ1PDk8OTc4NTc3NjQ2NTg3Njg3
-OTY4NDU0NTMzNjs3OTg0NTU3NTQ3Njc3Ojs6OTk3NTk8Ozk3Ozc3ODo5ODg5Ozg2
-NTU1Nzk5Nzk6Ozg4PDo7ODo5Nzw6PDg6ODk7Ojg8Ozo/Pjs8PD06OT48OjxAPj5A
-PmKYd0M/QEE9Ojo3Nz07PEE7Ozo6Oz8/PkE9QUZQT0hCQzw+Ozs+QTw7Pzs6Ozo9
-Qjw7Oj02ODg4Oj06OT8/Ozo7PD5BPTk/PT5CRUhLR0FDPD46QUNERkRLRkRCQ0RK
-RDw/RFarx9HZ3uLl5+np6utKR0JCRENGRUhARUM/Ozw/PkFCQkVDQT0/RT8+PT45
-PkBCPzs8Ojc5NzQ5Ojo7OjxBREM7Ozg3NjY3Nzg5QD49Ojw7Ojo9PDo2ODs9PTw9
-PTo6Nzk5Nzc5ODY2NzY2ODo9Nzc7OTk0NDc2Njc3Njo9Ojo4ODc3NjQ0Mzc2Njs2
-ODY2OTs6PD8+NzY6PDk2NTYzNDc3NkU2Njk4NjY3OTM1NDc3NDM1NDc2Nzg5NzQy
-MzMzNjc2NTc1MTEzNDQ0MzI0MzQ0NTI5NDU0NDY1NjY1MzM3ODgzMzQ4NTY1NTU2
-NDI3Njc2NjU0NDg0Mjk4Nzc0NDQ0NTU0MzYyMjEzMjc2MzQ2OjYzMzY0NzczNTYz
-MzM3NTU1NTc5ODU2OTs4ODg3NjY1NDU1NTY1Njc4NDU1NjM2NTQ1MzEyNjQzNTk2
-MjU2NTQ2NTI0NDg0NTQzNzU3OTQ0MzEzMzMzNjM0MjIzNTQ1NjI1MzU2NjczNDQ0
-NTM2NTU2NzExMTM5NDY1NjY2NjMzMDAxMjIwNTUzMzMzNDMyMzUxNDQzMjQ3MzQ7
-NDQ1NTUzMDI1NTQ1NTU1MzQ3OTg3ODUxMzM2MDQ2NTU1NDI2ODk2NTc3ODc1MTY2
-NDc3OTUzMzEvNDUzNTY0NDQyNTYzNjY4NDIzMjQzNDMzNDQ2NDU0NTY1MzMzMzQ1
-NTg3MjQzNzUzNTQ2NDQ0Njc2MzM1Mzc2ODQ2NTU0NjU0NTQ3NjY3NTQ1NzQzNDQw
-Mjg3NjczMzQ0MjQ1Mi8vMTMzNDU0ODg0NTg2MzIyNDIzMjU1NDMzMDM0NjYyMzM3
-NzQzNDU0NTUzNTczMzQzNjQzMjMzNTY3NTU2NTg0NjQ1OTU0Nzk3Nzc5Njg0NjMz
-NzU0MzUyMzU1Njk4NDU3ODg2NDg5ODY2NTg2NDQ1Njg2NTMyNTczMjE0NDQ1NjU2
-NDM1NDMyMzg3NDU2NTUzNDY2NjQ0NDg2NjQ5NDQ1OTU0MzIzNDY4NjM1Njc3Nzg2
-NjY2NjY3NDQ1NDM1NDQ3ODU4NDM0Njg2NTc0MzIyMjQ6NjM2MjQ0MDI1NTY3NjM0
-NjY2MzMyMzQzODY0NTczNDY1NDAyMTU1NTcyMTEzMjIzMTMzNzEzMzQ2MzM4NTU3
-OTY2Njg5NzU3NDM0NDI0LzEyMzUzNTY3Njg0NDY4NjIzNDM0NjMyNDAwMzU1NzY3
-NTU1NjYyMjY3NDY3ODU1ODo3Ojk9NTc6ODU3Nzg3NjQ7NTU1NzQyMS8zMjM1Njc1
-Njc2NzQ4NjY5Ojc3ODU5ODU3ODYzNzU2NzU2NTU3NzY6Ojg4ODczNTc7NzQ0MzY2
-OTg4Njc4ODo7ODg5Nzg4Mzc5ODo6OTo4ODc5NTc5Ojo5ODg5ODg1Ozs5OTk5Ojo8
-PUJBPjs+OTQ4OjY5Ozo5Ojs7Pz49OzlDZo97RkJBPD4/Pz4+QEVAPT06QD0+QEFE
-SkxabHZpW0I/P0E9PkI/PTw6PDs5ODpBQzw7OjY4PTw7PDk3Oj0/Ojg3OTw7Ozo9
-QkNFQkVJRD1ERUA+REREQEZJQ0JBPkNGQjxGYK7H0dne4+Xn6err605QS0lGRU5K
-R0FFQkVFPjw6Pz5HRUFAPUFBPkA9OjhDQjs6Ojc4OTs/Ozc0OTs6QEI/ODc2ODc5
-OzUzNDY4OTo9Pjk8Pjw9Ozs8Ozs6OzpAPDw5NTY2NjY3NTY3OTo3PDk7Ozk9NzQ3
-Nzg5Nzo7OTc2Nzk2Nzc8OjU5NzY7OjY0NTIzPDs4Nzo4Njg5ODk3MzU5OTk5Rjo3
-ODY3Nzg1MzQxMzUyNTI1Njc3NjQ3NDIzNjY1NTU4NTIxMTUxMTM1MjMyNDQ0NTU1
-MjM0NzU0Nzg7NzQ2NDc1NjQ1NzY0NDQzNDY3NTQ1NDQ1MjUyNTU0Nzg3NTY0NDIy
-MjUzMT5BNTc0MzQ0NjU0NjU1Nzc1NTk2NDQyMzMyMjQ2ODg4PDo3NjY1NTU5NTQ0
-NDIyNDU1MzM1NTE3NTMzNjUyMTMzNjQ0Njc3ODM0MzU2NTM0NjMyMTI1NDMxMjIz
-NDM0NTQyNzg3NTM6MzM1NTQ3ODo0MzQ4NzQ1Nzg0NDU0NDU2Mzc3NzU2MzUzMjEw
-MjM2NjU1NDU1MjM1ND03MzMwLjI0NTMzNjQ0NTQyNTYzNDIzMzQ0Njc2NDU0MzQ0
-MjY1NTU1NDI0NDM0Njk4OTk6Ozg7NTU2NzY1NTczNDIzNTY0NDM0MjU1NTQ0NTU0
-NDU2NTUyNTU4ODU0NDM2NjE1MTMzMzM0MjM3NjYzNTU0NDM2NzQ1NzUzMjY0NzQz
-NTQ0MzQzNDY3Ozg2NTU0MzI0NDU3NzI1NjY3Nzc1NDQ0NjQyLS81NDI0NTMyMzMy
-NTU1NDMxNTUzMjEzMTMyNDAuMTY1Ojk3NTY4NTQ0NTcyNDQ2NTczNjMxNDQ1MzU1
-NTY0Njg0MzQ3NjU4NDQ1NjY2Nzc1NDk2MjQ1NTQyNjM1MzY4NTg0NDY0NTU1NTU3
-NzY3ODY3Ojg3NTYzNDI0NzMxLjIxMjEzMzEzNDQxMzU1MjQzMjQzNTMxMzM0NTg1
-NjY2NjY1NTQ0NTc2NjM1NzQ5NzY2NDg3NDQ3ODUzMTAvMTIzNzo7ODM1NTY1NjY2
-NzYyNjQ0NTU3NjU0NDU2MzMzNDU1NzUzNjU0MjIyMTQ0NDU1MjM1OTUwMDAzMTIw
-MDIyNTcyMzQ0NDUzMjMyMzYzMjM1NTU0MjE2NTM1NTY3NTk1ODQ2ODU1NjY2ODc3
-Nzc4Nzc4OTc6OjUyMzM0NjQ0NTg5MzQ1NzU2Mzc3Njg4OTg2NzY7NzY4Nzc7NjQ2
-NzU0NTM1Ojs4NjUyNDU1NzY1NjI0ODk5Nzc3NjU3NDU1Njc2OTo4Njg1ODc1NjU3
-NTY1MzU2NzY4NzU2NDc4ODc4OTY1ODg1NTg5Ojk5ODg7Ojc1NDc8PTo7Nzc3Nzs6
-Oj06Ojg3ODk5ODo4Oj86PDg4Ojs7PT09Ojs9Ojk6PDg4NTc7Pjw8Pj49Pj48PENy
-iX1PQ0REQTw6PUI+Pj46Oj5AQUBAP0NKYYKNiH5nRURJQjo5PD49ODk8Pjk2NTpA
-Pjg7OTg7PkE7Pj1AOTk4OTc7Pzw6Ojw/QkBCRUdHPUBKSD4/QUFGRURCSEZHQUBA
-OkaCtMfS29/k5ufo6ezrSE5NTEZERENFP0ZIRUNAPTs6QEFBPUFCQ0I9PDs6Ojw8
-Pjs7ODc7PTs6Nzc4OTc6Ozw8NjY1ODo6Ozk3ODY3Nzk7OTc6ODs6PDc5PD45Ojk5
-Oj04NDM3ODY5ODg5Nzk6Ozg5Ojk2ODg3Nzk9PDc2ODg0NjY2ODg4Nzk3NjU3NDMz
-NTU3Ojk4PTs6Nzc2MzQ0ODg4OTg3RT87NzQ1Njc2MzQ1Njs2ODY3NzY1NDY2NDY1
-NTM2NDc2NDI1MzEzNTIxMjQ1ODQ1MzU2NjYyMzYyODU3NzQ4Mjc0MzU0NzczMDIz
-NTc0NDc1NzU1OTozNDIzMjc3OTQzMzQzMzQ1Nj03NDIyMzM1NTQxMzU3ODc0NjY4
-NTM0NTY1NDU1MzY3NjQxNDMzNTc1NDQ1NjMyMzM2NDQzLjAyNjEzMzIyMzI0NjY2
-ODY0MjU2NTQ2NjQzNDY0NTQ2NDMwMjQ0MzQ3NDQ0MzIvNDQyNDI1Mjc0NzY3NjIz
-MTI1NTUyOTQ2NTc0NTY1MzIzMDE2NzQ0MjI0NjQyMzMzMzMyNTY1MzU2NTUzNjUz
-NTMzNTY2NTQxMDU0NTc2MzExMzM2NDMzNTkzMzQwMzY0NDU2NTc4NDQ2MjQ2NDQ0
-NTQzNDQzNTc2NTY1NDMyMTU1NDIzNDU1NzU0ODY1NjEzNDY0NDI1ODY5NDcyNTU3
-MzY7NTg0MzIzNjc1NDQzMjM0NDM2NDY0MjQ2OTc4NTI5NTw4NjYyNDU0MjMzMzQ0
-NzU2NTMzNDQ0MjQ2OjY3NzYzMToyNTU0NzI1NTUyMTM1MzMxNDQzNDU1NDg4ODY3
-OTQ4NTIzNDY1NDc2NDQyNDU1NTY2MzY1Nzg2NDgzMTQ0NTQyMzQ0NTs8NjU1NDY5
-NDY0MTA0MjEyMTQ3Njc2MjI2Ojg3MzQ2NjY1NDUzNjU0MzU0Njo1NDkzMTA0MjIx
-MTU0MjI0NTYyNDQ1MjQyMjU0NDIzMzM2NDU2NjY3Njg1NjQyNTY3NjU0NTU2NDYz
-NjYzNzU1MzM0NTY4Ojk3MjM2Njg2NTU4NzY0NDMyNTU3Njc3NTU1MjIyNDM0Njg1
-NDU0NjIxMzQ0NDc1NDE0NTY3NDExMTMxMzUzNDU0NDczMjQ1MzMxMjEyNDYzMzE0
-NDk4NjQ2NTc0NTY1Njc2ODU0MjMzMjM1NTU2OT05ODg2Ozc0NTY3Njc4Ojo6Nzg1
-NTU1NjU0Nzc3NTMyNzc3OTk2ODY2NzY5ODc2ODc6Ozg1NjU1Njc1NjY6OTg3NzQ2
-NjQzMTQzNjc3Nzg4OTs3Nzc2Nzk7OTo1Njc6Ozo6NTY4Nzg8Ozc3OTc6Ojs5OTw7
-Nzk6Ozo6ODk8PDc2NTg+OTc4Njg4Ojc2ODw4Nzg4Nzk5OTs7Ozo8Ozs7Ozo5Ojo8
-Ojo6Ojo8Nzk5ODo7PTw+OTs8Pj0+U25paU9FQj88Oz9BPj5APTk6P0FCPj1GanyO
-pYuRfVtCPT9GREJDPDg8QT9AOjw9ODw+QEQ+Pjw/QTw8PD08OzY3Nzo7PT08PEJE
-R0BBRD9CQERBPTxAQj9CR0hFSUVDQjxAUZe2x9HZ3+Ll5+np6+tKTUdIRkJFS0dE
-RUI+RkA7OTk8QkA9Pj1CPzw5Ojs9Oj89PT05OTY9Pzs8QTk9Ozo3Ojw5Pj45NTU3
-Njk6ODc1MjY7OTs6OTs6OTo5PTo6PTo8PToyMjU6NzY4OTs3Nzk8PTk3OjY0OTk5
-Ozk4Njg9QDs7Nzk1NTUzNDY2NDQ1NTg4ODc0NjY1Nzk3OTY2Njc/PjUzMTc1NzY0
-MjQzNDQzNTQ0NDM0MzQ4Nzc0NTQ2NjQ1NzU3NDQzMzQ0NTc1NDQ0NjUyMjI0NDcx
-NjY0MzM0NjU2NTU0MjYyMzU1OTg4ODIzMzU4NTY0NDIzMjU0NDg6NzY2NTMzMjMz
-MzQ1MjIzMzMyNDEyODM0NDY1NTEyNDc3NzQ2NTY0NTQ1NjQ4Njk3NDQ0NjQ7NzQ0
-MjIyMjI1NDYzMTE0NjQyMDIyMzQ3NzY2Nzg5NzY0MzQ2NTQ4NjIyNjUyNTY0NzQz
-NjE0NTI2MDQzNTQyMjU0NDU0MzQ0MzA2NzIyNDU2MzQ0NjQ1NTU3NTMyMjQyLzQ2
-NDI1MzI1NTc3NTEyNDQ0NjY0MTMzMzM0MzIyMy8zODg3MzQzNTQ1NDM0MzY0MjMx
-MTM0NDEyNTIzNDQ2Njk1MzQ1OzM1NDUzNTY3NzYzNzU0NTUzNDY2MjQzMjQ2NTQz
-NDQ2NTU1NDE2Nzg0NDM1NjM1Njc1NDM2NDU2NjY2NTQzNDkzNTQ2NDU0NTAwNzMy
-MzQ2NDUzNjM0NjM0NTY3NTU0MjQ2OTc0NDY1NjYzMzQ1NTg3NjY0NzMzNzYzNTY0
-NTQ2Nzc1MzIzMTMyMzQ1MjQ1NzYzNTY1NDIwMTAzNTY2ODU4NzY1NTM0NTc1NDEy
-NTExMzM2MjMwODU1Nzg2NTc1MjUzNTQwMjU5NzQ0MjUzMzU3NjU2NTQ1Njc0NTU1
-NTU3NTQ2MzMyMzI0NTg2NDI0MzQ3NzUxMzM0ODc1NDI0NTQzMzIxMjMzMzI3NTM4
-NTU1MzQ3NTQ1NTEyNDU1NTY1NzU0MjQ2ODk4OjY2NTU3ODY1NTY1MjIyNTQ2NDQy
-NTc0MzAvMzU4Nzo1NTw7NDIxMjM4Njc3Ojk0MDQxMTM1MzM1NDExMzU1NDU0NTQy
-MjU1NDIzNjQ2NzQ0NDUyNjUyMjMxMzI0NDQ1NTI0NDo9MjIzNTk6NzU1NDc2NzY0
-MDMxMDY6Ozo2NjYzMTY1NjU0OTs4NTI0ODc1NjQ2MjI2ODc2NTc3Nzg4NzY2Nzc3
-NDY1NDQ4NzQ0NjQ4Njg6OTc3NzY4ODUyODg3NjU4ODY4Nzg5Ozg4NTQ1ODc4NTU1
-Njc1Nzg4NDY3Nzg3NjQ3Ojs7Ozk6OTc5NzU4ODY4ODc2OTw4Nzg4Ojo5Nzo5Ozo5
-Nzg3Pjs6OTg4ODw5ODg6OTc6Ojw7Ozk6Ozw8Ojo4Njg6OTg6Oz47Ojw8PUFga3t3
-VTtAP0JGQkRAOzk8ODo7PDxDTHiopq+un4+HZEQ+QzxBPEA6OTk+Ozs7PT43NUBA
-QUI9Pjo7QTw9Ozk+PDU5OjtARTs7Ojw7PUQ9PEI/QT9EQENBR0FBRElEQEdBQUNv
-qrbI0trf4uXn6Orr7EVKS0pGR0JGRUZHQkFCRTw8Qj5AQUBCQz9CQD07Pj8+Pj5B
-QD47OjxBQTo9ODg8NTc6Ojw5Ojo3NzlBQTo4NTU2NDo5Ozw5Nzs7QDg6PDpCOzo6
-Ojg4ODYzODk7NjU6ODs7PDY3NjY4Ojw5ODg6Ozc7ODQ1NzQzNTs2NDU0MjY0NTk6
-NjM0ODg2ODc1Njk4Ojk1NTM4NDY3NTU1NTU1MzY2NTUzNjU2MzI0NjU0NDU2NDY2
-NDM0ND08ODk1NjY0NTQ0NDQ1NDQzMzMzNjY0MzU0MzM0NTM2NDM1MzQ4NjQ2NjQz
-NjQ3NTQyMjA0MjQ1NzQxMzUzNDU3ODc1NTc1NjIyMTExNDQzNDQyNTQ1Njg0NDMz
-NDM1MzMxNDY1OTQ1Nzc0MzM2OTc0NTU1NTIyNTY0NDMzNTQyNDU2NTM2MjM1NjU0
-NDUyMTQzNDU3Nzc5NjM1NTM0Njg4Njg3NjY0OTY2MzIxMzQxMjI0NDQzMjQ5NzU2
-MzEzMS81MzEyNDU1NDQzNDMzMjM1NDUwMTEzMTk6NDI0NTU1NTM0MjM2NTQyMjUz
-MjI0NTI1NjQzMTMzMTEzNTU0MjU0NTQ0MzU2NjU0NTY1NTU5NzU3NDg3NTM1Njc4
-NjM2NjU2NDIyNDQ0OTUyMzM0NjY0NDM1NTI1NTI0NjU1NjU1NTU0NzY6ODY0NTM0
-NTQ1NjU0MjM0NTUyNDY3MzIzMzUyMjMyMzU2NDQ1NDQ1NDQ0NjUzNTQ1NDMyNDY0
-MzI1MzU1NDM0MjM0NDM0MDAxNDUzMjEzNjU2ODQzNTI1NDIxMzU3NTU3Nzc2NjY3
-MzI2MzQ2ODc1ODM0NTY0MzUxMzU3Nzc2NDM0NTU0Nzc0NTU1NTg4NjQ1NTQ0NDI0
-NTc3NjU3NTY0MzY8NDQ2MzQ0MzY4NzY1NjY5NjI2NTIxMzQ1NTc3NjU0NTY3Njcy
-NDExMjI0NjY1Nzc1NTMyNDU1NDY0MzE6NTczNDM0NTgxNTg1NjYzMTU1NTU2NDIz
-NTc4Nzg7Njc3NDMzMzI1NDUyMzQ1NTYzLzAxNDgyNDM1NTg3NTU3NDQzNzY3MzM0
-NTU4NzI1MTMyMzM3NzIzMzU1Nzc1NTIzMTQ0NDc2NTQ5ODU1NDY1Nz04MjI1NjU2
-NjY0NTQ0NTUzNDY2MjU3NDIxNDY3NTY1MTMyMzY5NDMyNjc0NDQ3NjU1NDU1NTU3
-NjY3NzU0Mzk5Njc3NzY1NDU1NTc4NTQyNDU3OTk5Ozk2ODk3Njc5ODU1Nzc4Ozo2
-NTQwNDk5NzY4Njk4NjY2Njc2NjY5NjU1NTk6ODc1ODc3OjY1MzU2PTo4Nzk2NTY5
-Ojc2Nzc5NzY7Pzw7ODg3ODk6Ojo4OTg5Ozs6OTo2OTc8Oj05PDw7Oz0+Ojs6ODk1
-Ojs5OTo7Ozg5ODg6OTc6OTpBSmRqeH9PQkBOZGZJREY+Oz09PT88QE+JopmdlaOh
-mHxZQkRBPT47Nzk5Oj1BOTg7Ojo3Oj9EQTg6ODk9Oj0+Pzc3OTs7PT9FQDk4Ojo5
-PEBBQ0JCQ0JCPD49REFERkZAR0g9R1yOscjS2t/i5ujp6evsSEZKSUlHS0ZKQkZA
-RUQ+QD1GSUlCP0FDP0VBQD5DP0NCQENGPUE7PDk6Ozg6Njg4NTU8PD1DOzk3PD46
-Ojk4Njs2NTc5PDs6NDg6Oz88PTo+Pz88QD89PDY0ODo+Ozs4Ozg5NjU5OTY5Ojg6
-OTw2NTY3NjU6NzQ3OTk1NTU1MzU2NTY1NDU1Nzc2MzQ4Njo4NzU1NDQ2ODY3Nzk4
-OTg+OTQ0NTU1NDQ1NjQzNDM1NDc3ODc2NTU1MzU1OTM0NT43NDMyNDM4NzQzNDI2
-NzcyMzU0NDU2ODQ0NTU1NTY3NzY1NzY1NTQ0NDMzNzU3OjUzMjQyMjIxMzY1MjQ2
-NTUzOTYzMjMyMjM0MzQ1NTY8OTg3MjI0MjM0MzMyMjU0MzU0NTY2MzQ0NTU2NjQ1
-NjIyMzA0ODc0NDM1NTU1OTM0NTU0NjU1MjQyMzc3NjQ0NDM1NTQxNDMzOTc1NTc0
-NTQ0NDIzMzI0NTU0MzI1NDEyNDM0NTExMjIxMTIyMzQzNDU0MTM1NjI2MzMzMzQ2
-MTQzNTU1MzQyMzU2NzYzNDU3NjY2MTMyNDM0NDY1NzMvLzExMzY1Njc0NTU1NDI0
-NTY2NzU1NjMyMTE1NDY4NjU2MjM0MzQ1NTkzNzMyNjQ1NTY2NTM0ODQyMTMzNTg4
-NzEyMzQ2NTU2NTU2NDc4NTQ2NTY0MzM0NTU2NTMyMzU2MjIyNDU1NDIyNzUzNTQz
-MzQzNDQzMzk9NTY0MjU1NjU0NTk3NjQzNDI1NDMzNDQzNjE3NDMzNDI1NDQ1MzU4
-MjIxNjU1NTk6NTUzNDIxMzQ0NDU6NjMzNTc1NDc3NTU1Njc0MjIyMjEzNjM1NjM0
-ODcyMzM1NjU4NTU6NjQ1NDU1NDM0MzIzOTg2NDY1NDUzMjMwMzc1NTQ2OTY1NTY2
-NDU1NTExNDU2NjQ0Njg3Nzs4OTc5OTIzNzc3NDU0NTU1NTAxMTQ2MzM2NjY0NDcz
-MTE1MzQ1NDU3ODg1NTY6NTY3OjY4NDQ4ODY2Njk1MzQ2NTIzNTg3NjU1Njc0NDMz
-MTMzMzMyMzU2Nzg2NTY0NTY1NTQzMzU0ODU2OTg1NjQzNjU2ODg4NTI0NTUyNDQ0
-NDMzNDQ5NjI0NjY0NDU3NzY1NDM3Nzc4NDQ2NTE0Nzk1NTY2Njc1MjI1NjU1NzU1
-NTQ0NDMyNTY0NTIyODc1NTY1MjI1ODc3ODc3Njo4NTc4Ojc3NTY1NDY3NTU4NjQ1
-NTg2Nzg6OTg4OTU4Ojo2NDQ0NTU0Nzg5Nzc2NDY4NTY5Ojg4ODc0NjQzNjk8NjM0
-Nzs3NTU3NTc3Njg4Njk4OTc6OTo7ODk5Ojg6NTM3Nzg6ODg4NTo5Ozw6ODY3ODo2
-Oz09Ozs8PDs5Njk5OTc4Ojs8Ojs6PDs4Ojk7Ozs4Ozs6QD0+QD0/Q0FUVl99d1dI
-SGR5d1RGRERCRkVBPkBQjbG1nG5Sh5h9YkQ9Pzs7PDw7PDxCPjw+PDs8QD8+PUM+
-PTo7PT1AODo6ODg7OkE8PEBBPDs8PDo8PD0+QUU/Qj5CRkRFQ0RFRUVNSEhIRGiy
-ydLa3+Ll6Onq6+tDREdMR0VKSElGRUVCQ0VIR0VAQUM/REZFQD88QUVAPEdBOjs/
-PTk7Pzg5Njk5PTk+Pz8+Pzs5PTc0Nzc2PDg6O0Q+Ozw8OTk9Njg7PD08Oz8/PDs8
-OTo9Pjo1Njg5NTU8Ozs4ODo3OTo3OTo6OzY4NTY3ODY3NjM3Njk3ODU0NjY2Njc3
-NjQ4ODY3NDI0Nzc2NzMzMzQyNDQ1NDM2NjU3NDEyNDQzMjY0NTU1MzU2NTc2Njc2
-Nzc5NTQzMzI5STc2MzU1ODk4NzU1NTQ0NTc1MzMzMjE1NjUzNDM4ODc5ODY1MjY1
-NjM0NDk6NjU1MzQyNzg3NDQ0NzU0NjQzNTMxNDE2Ly4wLzIzMTE0NTI3NTMyNDMy
-MzQ0NDM4NzY1Njg5NzQzMjQ1MjI1MzAzNDMyMjIzNDIzNzQ3OzU4NTI1NTM1NDg4
-NzY2OTY1NTM0NjQ0MDExNDg1NTMzNjc4ODU1NzMxMzM0NjU2Njc3OTQyNTMzNTU2
-NTQzNTU2MzU0MzUyMzQxNzU2NzU6NTQ3NjUyMzMzMzM0NDY1NDQ1NDc3NDMxMjQz
-MzQ1Mzg0MjMyMjU1NTUzNDYzMzM0MzQ1NDU3NTU2NjQ2NjUwMzYyMzc1NTg1MjM1
-Nzk2NjI1ODY3NjM1NTU3OTUyMDE0OzY1MDEzNjU0MzUzMjIxNTY1NzIwMTM0NTEw
-NTQ0NTU1NjQ1ODYzMzM2MzQzMjQzNTM0MjM0NDEzOjQ0NDAxMTQzMjQ4Nzg2NjU0
-Njc0MzI1NDMxMzU0NjM3NDEzNTM0MTAyMzQzNDY0Ozc2NDMyMTM0NTMzMjQ0MjM1
-NjQ1NTIxMzg4NDY0MzU3NjQ1NjQ1MzU1NTcyMzM3NjY3ODY2ODg0NDMzMTQ0NTY1
-OTg2MzQzNDExMzo3Nzc0NTQzNDMxMzMxNTUyNTQ0NjMyNDQ0NjY1Nzg3NDM2NDQ4
-OTw4NjY1ODc1NTYzMTU1OTg3Njg6ODY0MC4yNDU1MjQ0NjM3Njc3NDg4NzY3ODQ2
-NzU1NDQxNDU0NTY8PDk3NDM1NDU2NTQ0NjUzMTE2MzM2NTY4NDU2NDQ1NTc1Nzc2
-NTk3MzU2NjMzMjQ1NTU0MzUzMjU0NTMvMDE0NDQzNjY0NDc2NDY3MzU5NDQyMjE1
-MjIyOjQ1Nzg4NjY1NDYzNjYyNTc1NTc1Njc4NzY3NTY1NTU2NDQ2NTk0NDQ2NDMv
-NDQyMjU1OD44ODg1MzQyNzc3NTM0NDI1NjU0Nzg5Ojk4NTM1NTc4ODY0NjY1NTg2
-NTQ0NjI0ODg1NDUzMjI1Njs2ODk3ODQ1OTc2ODk4MzQ5Ozw3NTY0NjY3Ojg5Nzg7
-NjM3PDg4Ojg4ODk3OTw+OTc4ODY4ODk4OTg4OTY4Ojc4NjY4OTs7Oz4+Nzg8Ojg4
-ODo7PDk5Ozo7Ozw8Pj0+PWJTUYJ3WElveYJ5ZkpFSEBARDxGWYKrrI9gSGKOiWNH
-Rj8/Qj49OT5AP0FBQD8+QDw+QT9FPz8+OT0+QTw4Oj1APTw3OT1BOkBCP0M9PDw6
-Pj9DRkNESEZARUVAPz1CREdIUEJDZrTG0dnd4+Xo6Onq60VDRkdGRUNHRkhFQD9E
-Q0VFP0FEPkFAQ0JEQUBCQUBBQkQ7O0BAPjw9Nzw3OTk9Ojs5Nz5COTY2Njc6OTw8
-RDs2Ojg3OTk8Nzo2ODo6ODc7PT06OTk5PDs5NzQ2NjU6NzU1Nzk5NTc5ODY2ODo3
-Ozg4NzYzNzU1NjY6Ojk6Ojg6Ojw5Ojk6OTY2NzQ1NTU2NTUzNDQ0MzM1NjY3OTY0
-MzI1NDU1ODc3Nzk2NDQ2OjgzNTU2NjQ3NzY3ODU1Njc2MzY0NjUzNDM1Ojg1NDI3
-NzQyNDE0NjY1NTYzNjg2NTM3Nzg1NzQ0NDc2Nzg4Njg1MzY4NjM3ODY5Ozg2MjEw
-NTMuNDUyMzEyMzMzMjUzNzMzODY6NTI2MTI2NjI2NjQ1MzIzNDU0NTI1MzI0NDc2
-MDIxMzMzNDMzNDc5Ojg2MzU1MzQ0NDIyMjQ1NTc4NTMwMTI1NTIzNjY2MzUyNDU1
-NjQzNzYzNDYzMzI0NDM4OjYzNjU2Njk4NDc3NTQzNDY2PDQzODY1MzUyNzIyNDQ1
-Mjc0NDU3Mzc3NDUzMjQ2NTM0NDQzNDY0NjU2NTQ0MzM1NDI2MjUvMzU0NjY1NzU3
-NjUzNDY0NjY0MjIxMDMyNDc3NDQ1NTU2NzU3NjU0NzM0NTU2MzQ3MjI0NTc3NTM0
-OTUxMjY3ODU3MzYzMzQ1MjIyMjU0OjIwMjQ2NDg4NDU3NjcyLzAxMjEzMjQ1MTMw
-MTAxMzE1NDUzNDU2MTExNTM2Nzc0NzY0NDY2MjIxNDU2ODE1NDQ8NzEyMzU1MjU1
-MzQzNDU0NDQzMzExNTY0NzMzMjI0NzQzNDQ1MzMyMjY0NTUzNDU3NTY4NTI0NTU1
-NjY0ODU1NTc3NTM1NDIzMzM1MzU0NzY1NTczMzQ0NTQ0MzQzNTYzNTUyMTM1ODY0
-MzMyMTIwMzA0ODU0Njg0NDI0MzI2OTU3NTQ1NTIwMjI0MzUzNDg0NTY1NTQ2NTQ0
-NDM0NDU1NzQ4NzU2Ojg2Nzo5NDQ4Nzk5NjU1NzUxMjU3NzY1NzQ1NjU2NTMxMjQ1
-NDQ1MjQ1NDQ1NzQ0NzU0MTEyMzY6Nzc1Mzg1NjUzNDIyMzQxMjIzNDE0MzExMTI0
-NDAyMzMyMzM0MjIzMjAxMTIzMzQyNDI0NDUzNTY4NjQ0ODQ2NTY4NTUyNDc2NTQz
-NDQ2ODc3NTQ4ODg1NDQ2OjU0MjIyMzY2NDQ2NjU3Nzk2NDM2NTU3ODYzMzQ0OTc1
-NjU2NjczOTM0NDM0Njg5Ojk4NjY2ODUzNDc1ODc3Ojo/NTM2NzQ3Nzc1NjY2NTc4
-ODg4NzY0MzQ0NzU1MzQ5Nzg5Ojc2NTY1NTY4Nzk6OTk5Nzk8Oz07Ozo5ODc4NDc3
-OTc6ODc3Ozs6Nzk2Ozw7Ojo9Ojc4Ozk3Ojk7Pz48Ojw8PT06OzxEWkZGbHFea5ef
-o3htU0VDPkA8QV6Qn6R0UkpVen5kTEJDQ0NBPkI8Ozw9PUBFQ0ZFQj08PT9APjo7
-PEA/OjlAQEJCOzk4Oz48PkNFREFEQTs5O0FARUI+QkI/SEhFQkRER0xKQkF0tMfQ
-2d3i5efp6errRURHRUhGQUNHQj9BQUBEPkFARkNBQUNAQEE8QUE8QD1ARERBPzw7
-PT49PDY2OD47Njk9Oz46ODY3NzhDQDw8Nzc4NzY4Ozs4OTs5OjY5NzU4Nzg4OTg5
-OzU3OTg0Njo2Njg2OTc4OjY3OTk3NjY4Nzg2NTc1Ojg4Nzc2ODc5Ojo6OTo4Ojk5
-ODUzODg3NzY2NjM0NDIzMzE2MjU5Njg4Nzo5NjY2ODc3OT00NjQ2Nzg0NTY4NTUz
-NDU3MzY1NTU4NDQ3NDQ3PDk2ODQyMTAxNDIxLzIyNDI2NTY3OjUzNzQyMTQ5NTMy
-MjUzNTY3NDQ4NTU4NzU3MTI2NTQ0NDM9NDM1NjY0NjY0NTQzNDU3NzQ0NjYzMzc1
-MzQ0MzgzMTI0MjI2NTM0Njc7NTMzNjQ1NDU4NTc3NDk2MjQ3MzQ3NzQyMTIyMjIy
-NTM0NzgzOjAyNzMzODYyMzM0NjQzNTQ0MjQ0NjYzMjEyMzMyNDQ0NjY2NTQ2ODg4
-NzQzNDU1MjM0ODg3NjIyMjQzMzM0MTcwMzY0NjMzMTU0NDc1NTY1NDU1NTUzMDAy
-NDQ0NDY3NTExNDE1NTUzNDI0MjY2NDM0MzEzNDc3MzU0NjMxMzY5MzE1NjUzNTQ3
-NjM2ODgzNTUyMjI1Mzc1NDUzNjU6MzQvMDY4NTU3OTY0OTQ0MTc2NDUzMjQ1MjI1
-NjU2Njg3NTg2NTI4NDQ3NDY2NDEzMTYzMzc2NTQ0MzU2NzMyMzUzNjczNTQ3NDMz
-Njc1NDUzNTU3NTg2NzkzNTQzNDMzNTQ0NDc1NDYyNDU2NDQ1NjMzNjQyMzUzMzY1
-MjM1NTI2NjUzNjY0NzM1MjY0MjIzNzYzNTU0NjY3NjU1MzY2NTUyMzU0MzQ2NTUz
-NDY1MzQ0NTU0MjIzMjQzNDY2NzQ2NjQ1NTMzNTY2NjM0MzQ2NDQ0NDQyNDY1NjU0
-NTUzMzM0OTk3Njc2NzUxMzQ3NTY1Nzk2MzE1NDU1NDM0Njk6OTc1ODU1NzY3NzY4
-NzY3NjY3NDQ3OTc0NTY1MzM0NjQ1NDI3NDIyNTc0MjMyNDY1MzM0MzQ4NjY0NzY2
-NjgzNDM2MzIyNTQyMjQyNDYzMjIzMzM3NjMzMjYwMjIxMjQ1MzMzMjIzMDQ6NDM0
-NDQ1NDQzNDQzMTY3NTU2NjYyNDc0NTM0NDI2NjMzMzMzNDY4NzQ0NTU1NDg5ODg2
-Ojk5ODc1NzY0NDU4ODc4NTg3Nzc3NjU1NjU0NjU1ODc2NDM3ODY3Nzo4NzUzMzE1
-MzU4Ojg2NjY3NjU2Okc3NjY0NDg5NjU2NjU2Njc1NDk5NzY5Njg5OTY2Njk4Njo+
-OTk4Nzc3Ozo5Ojk4OTg7Ozs7ODg5Ozk4ODk3NDU4Ojo4OT47Ojg7PDw9PDw6PD48
-Ozo5PDw4Ojw8Ozg5PVNdQ0ZviZCXiIqWk3hzUUNDREJYfo+Ka0hFV3V9c1NDQz8+
-Pj09Pj0/Pzs7PTs9P0NBPj8/PD5BPT8+P0E9PkBCPUA+ODhBOzw3Oz9APUNAPkA9
-Qj88QEM/QzxARERCQEVOUE1GSn+1xtDY3uHl5ujp6+tER0dGTEtATEtCRERDQ0c/
-QT9CPzxAQUA/QEA7Pjs7QEFFRD4/Pjk2PD46Nz0+REVBOzg4OTc3Pz03Nz07ODo+
-PTg5ODY3Njc5Ozo3Nzg4Ozc4OTU3OEE7OTs4Ozo3OTY2Njg6OTw2NjY4OTY1ODg3
-OTk2NDQ3OTk4ODg0Nzg5NDc1NTc3NTQ4Nzg1NjUyNDQzMzU5NzU0NDQ1NTU3ODc5
-NzYzNjY2NjYzNjU4OTg6NzczNTg2NTQ0NTU5NTc2NDU2NTY1NDU3NTc3Njc3MzMy
-NTIzMzQzMzM1Njc1MjIzMjQ0NDMzNDEzNzQ0MzE0NjQ4NjY1MjMwMTQ0MzU2NDQz
-NDI2Njc1NTQ2NzQ2NjM1NTU2NzUzNjMyMjMzNDEyNDUzODY2MzQ0NDU1NDEyNDU1
-NTU3NTU2NTU1ODIyNTY2MzU1MTQwMzQ0NDQ2NjU1MzQyNDQ1Njs1NTc2NjM1NzY8
-NzQxMzQ2NDE0NTUzNTQ5NzU2ODQ1Njg2NDIzMTQ0NTMyMTU3NDU4NjEvNDQzMzUz
-MzI0NDI1MDUxMzc3NDQ3NTU0MzY4NDYyNDY3RzY5NTM1NDQ1OzUzNDM0NTQ0NTQ1
-NDUyNTg2NDQzNDIzNTc2MzQ2ODY1NTk0NDQzMTAyMzU1NTUzNDM0MzY2NDU0NTY5
-NDg1NjU4OTg5OTY4NTU3NjY1MzM3NzM3NDY2NDQ1ODg2MzMxMzU1NTUyNjU2NDU5
-OTY1NzY0MjI0MjY1NTczNDU0NjUzMjQzNTc4NzMzNTQ3Nzc3Njc0MzQ0NDU0NjI5
-NzU0MzU3NTU0NDc3Nzg3NDUyNTY2Nzc4NjY2NTY4NjEzNTUzMzQwMTU5NTQ1NDY1
-NjQxMzU2NjQ3NTU3OTc3MzQ4NDUzNzU0MzMzNTIzMzIvMjQ0NDQ2NDM1NTU2Nzc3
-ODc0Mzc0NTU1NjY1NjQyNTc0NjYzMzQ1MjM0NjUzMzU1MzM0NjczNTQ1NTY7ODg3
-NzQ2ODs6ODY4NzY3Nzc4ODY1MzM0NDQ0NTY3NzY1NjY4ODQyMjMyMzQyMzQzMjI0
-NTYzNTc5Njc1Njc0NzUzNjU3NTMyMzM0NjY0NDIxNTQzOjI3NDQ3ODc0NTY0MzQ2
-MjMzMjAzMzIyNDI0NTUzNTM0MzM1MzI1MDQ0MjI0Nzg6ODU3NDU4NjU1MzQzNzY5
-Ozk1NTU3NTMyNDQzNTMvMjc1NTU1NDM3ODY0NDQyNDg7NzQyNzc1NTU2ODc2NDY3
-MjM1OTg5NzU4NTQ1NDQyNDg4NjY5MzM0NjYzNDg1NjY4ODU2Nzg2NjQ0Njs4NjQ1
-Njg2MzQ0Nzo5OTs6ODY1NjY2ODs7QkA6OTs8ODc5Oz05ODk3ODc1NTw5ODs7OTg5
-OTo5ODY5ODg4Ojk3ODk2Nzw5ODw3NjQ3ODk4Ojs6Oj07PTg6X1JDQl6ClJRsVmmN
-iIdZRUZGX42DcVBISmZ9cXdXR0hEQT5DQEI/PDw7QDo2OT0/RkNDQD07REE+PkM/
-OjlBQkRBPD1BQTo5OTk5ODtBQEI+PD09Pz8+PUNEQT4/RURGQkhFRkdGb7bI0dfd
-4uXn6urq60tIRkk/Q0hRTkNFS0FEQUFDRD0+QUJBQD1BPDs5Ozo9PUM8Ojs9PDs+
-QD89PDxCOz48NzU2Njk4Nzc9PT5EPDo9Ojs5Ozc9OjY3ODo7PDo+ODo0NjY5ODk7
-PDk4OTs5Njg4OTc8QTo2ODg2Ojc4NDU7Ojo5NjY1NDY2Nzg4Ojg4ODg4ODY4OTc0
-ODk2NDYzMTU4ODIyMzUzNTc3OTk3NTU1NDQ1ODY4NTI2Njc1Nzc2ODc4MzU4NTMx
-NTg4OTQ0MzMxODY3NTA2NjUzNDIxMjIzNDM0NDI0NTQ0NjEyMTI1NDUzNTMxMzMz
-MDE3NTAzMzM9NjY0NDM3ODY1NzU0NDY1MzM0MjIyMTE0MzM2NTc0NjNMRzQyMjUy
-MTQ2MzM0NDgzNDc0NTc2NzM0MjMxMzg0MzM1MzI0NDU0NDQyNDY1NTc0Njc5NTEz
-NDUzMzQ1NTY0NTY3NzU2MjQ1NTQ2Nzk0MjM0NTM2NjIzMzU0NjQ1MzQ1MjIyMjg3
-NTI0MzY1NjU0NDMyMzEzNTs2NDMyMjYxMzQzMDA0NzQ0NjQ0NDY0MjMyMzYzMTIx
-MTQ2NTAxNTQ3NTQ0MzM2OjcyMTY1MjU0MTU1MzU2NDQ1NTc0NjMyMjY4NjU3NTU0
-MzMxMjE1MzU0NzY1NTc4NTYyMzQ3NjM2NDQ1MzMzMzI3NTY5NjU0NjUzMjMzNTQ1
-NzY2NTczNTU3MzMyMjUzNTM1NzU0MTAzMzU3Nzc3MjQ0NDM1NTI2NTY2NjI1NjYz
-NTc5ODMzNTQ0NTU1NTY4NzQ2NjQzLzIzMzUyNTIzNjM0Nzc4NTI1MzIyNDc1MjM1
-NTY1NDQzNjU5NTUyMzc2NTc2NTM0MjIyMjU2NDQzNDY1MzQ3NzE1Nzc0NTQ2NjQy
-MzU2NDIwLzEvMzc2NTUzMTI0NjU0NDc3NDE4OTo2OTc2Nzc0NDY2Nzk1NzY0MTEv
-NTgyMzI0NTQ1MjQ3ODUwMzQ0NDYyNDQ1MzU1NDc3NjY1NTU1MzY1NzU1NDI0Mzc3
-NzY2NTY1NDQzNjY1NDU2NTY4MTQ0MjE1MzQ5NTQ0NzU1NDMzNDc3NDIzNjUyMjM0
-NTY1NDY2NjU2NjU3NTIzMzEzMjYzMjM3NTU4NzMyMzUzMzM1NzM2MzU3NjM1MzQy
-MDIzNjY1ODc3NjUxMzY4MzMzNzk5Nzg3NTQ3NzY2NDUzNDU2NDY2Njg1NTMxNjg0
-Njk3ODY5Nzc5ODg4NzU3NTc1Nzc0NTU0MzMyNDEyNTU2NTY2NTU0MTI3NDM1Njg1
-Nzk4NDY1NjU1Njc3NjY1NDQ1MzM1NzQzNTU1NTc2ODk8OjU2Nzg1NTc1NTg2Nzk5
-OTk5PDo7OTg5Njk5OTo4ODw5Njg4ODs8Pjs1NDg4OTw5Njc5Ozc2OjY6PDo4ODs7
-Ojc9PTw3Nz08QEJuVkRDXnGVg2VIRWmCdW1USFptcm1MRU97lXdiUUVFRkU/Pzg9
-Oj0+PD8/PD06Oz06PT5CQUA8Ojo7Oz09P0JAPkA8QEA+Pj87PTg9Pj48Ozw+PEBB
-QkNFRUU8Qj9ARUZER0JCPztmtcjR2N7i5ebo6urrTktMT0hGSklIQ0dGQEVCPT09
-OT1AQTw+Oz1AQD47O0BBPkM9PTw8PkJBPz08OTw6PDo7Ojc0NDY4Nzs5ODo7PTw6
-Nzg6PDs8OzY2OTY5NTk5NjY2NjY1Njo6ODY4ODg3Nzs4Ojo9Pz08NTg2Nzo4Nzk4
-ODg2NTMyNDU2ODs6Oz05Njc2NDg4Njc5OjUzMzIzMzUzNTY0NDc1NTc7PDU1NTY1
-ODQzNDMzMzQ1MzM0NTUzNjY2Njg1MzM0NjY2NjUyNDI0NjY2NTcyNTQ1NDU0NDc0
-NDI2ODU1NjY0NTU3Nzc0NTYzMzEzNi4yMjAzNDEzNTc0NzczNTQzMjQyNzY3Njk4
-MTI0MzY1NTQ0MDM2NDc0Njk4ODU0MzU1MTIyNDEzNTU0NDQzNjU1NDY1MzQ6NjEy
-NzI0MzM1NDU0NDYzNzszNjQ2NjU2NTc0MDIwNDU2NjUzMzY4NjY0MTM1NTIzNDQ2
-NTYwNDQ1NDAyMDM1ODk3NTY0MS8zNTc3NTE0NjU1NjU1NDM3NzQ1OTc4Njc0My8v
-MzMzNTYxMzQ0MzQyMzg0MTMwMDI0NDQ1Mzc1NTM0NDQ2MjU0NTUyMjU2MjMzNDI0
-MjMyNDU2NzY2MDMxMTw1NTU2NTI1NDEzMjU1MjUzNDg1NjY5NTQ1MjEyMzMyNjc4
-NDQzMzU2NTQ1Njc3Nzc0MzMyNDI1OTU5NjUzMzU5Ojk4NDUzMTMzMzU0MjI1MTk1
-MzQyNDM3NTg1MjExNTEzNTY4Njc1NjU0MjU3MjIxMDI0NzM0MzQ1NjY0NDQzNDIy
-OTM1MzIxNDQyNTc4NjQ0MjM4NjQyNDE0MzYyNTQ1NTQyNTc3NTQ3NjYzMTM3NTY1
-NTQ0NDQ1Njc1NTM0MzY3NDI0NzU0NTg2NzU1NDQ0MjQ0MzMzMjQ0MTQ0NTQ0NTY1
-NjY0NzU0OjY2NDY4OTUzMzUyMDIwMzY0Mzc2NDY3NTU2MzMxMTEzMzQ0NTY0NTIz
-MTUyNDY3NjU0MzY1NjY7NjY2NDQyNDg0MTQ2MjE0MjMzNTQzNjU2NzQzMDM3NzQ2
-MzM0NTUzNTg1NDEwMjQ0MzM1Nzc6OjM0NDQzNTEzMzU3NjUyMjUzMjMyMzg3Nzc2
-NjQ2NjQ0NDExNDEyNjI4NDQ2NjMyNzYyMjU1NTU2NTQ0MzY0NjI0Njc1NTc5Njc2
-NzY6OTc3Njc2NTU7ODU4NzY3NDY3Nzk1NTY2Nzc2Njc2Ojk1NTc3Nzk2NjU0MzM0
-MzI1NTU1NTY2MzY0NDU1MjQ6OTk1MzU5NjUzNTQ2NTY2MzQ2NjM0Njc2NTEzNDMz
-NTU2NzY2Njc3ODc5NzY1OTU0Mzg6ODk6NzY5Ojs4Ojg5ODk4ODo4Ojg3Nzo5ODc3
-Njo7Ojw4Ojk6Ozw6OTo3NTk1Oj07Pjw5Ojo6OTo5Oj9DVXdOPkNqhpSBTENAT259
-nXtuanpkTVJegJ6JeUw/RElFQUM/QERCQD1CQT46PDo5PDw7PTo7Ojg7ODo9Qj8/
-Pj08PUA9QUU/PD88PEJCPTo6QkFJQT9DQkFGRUZCQkJBR0ZLQkhDRGe3yNLZ3uLk
-5+np6+tHRkJGSEZFSEM9QURESkJAPj47ODg+QkBAPD5DQD09PD0+PUE/Pjo8QUE9
-PEJAOz06Ozg5PDw2NTw4Nzc0ODg8Ojo3OTo6OTs5ODtDODc4Ozc0OTs6Pjg4ODY4
-OjwzNzg2ODs5Njc3Ozo3Nzg3NzY1OTg0Nzg3Ozg4NTY4OT07ODk4NTY0NDg2ODc6
-NjYyNDg3NjY4ODU4Njg1MzU2Ojc3NjY0MzE0NDQ1NTQ0MzU0NTU1NDMzNTc2MzIy
-NDc3PjU1NjQ0NDU0NjM1NTQ0NDQ1NjY4NjMzMzY1MzI1Njc2Njc0Njg1ND01OTM1
-MzQ0NDczNTM1NDM0MjQzNDI1ODU0MzEzMTE1NzY1MzM1Ozs3NDY0NDM3ODU2NjM0
-MzIyMzM0MzIyMzMyNDIzMjM3NzU5NjU2NTQ2MzE1NjY2Nzg2NTYyNTI0MzY2My8x
-NDc1Njc4NzU1MzEzODQyMTI1NzMzMzU2NTQyMzU2MjEyMzM1ODc2MzM0NDczNTc3
-OTg3NzU1NjQ1MzY2NTIyNTM0NDQvMDAyMTIxMzM0NjQ0NDIyMzcyMzU1NTQ1NDU2
-NTM4NTQxMTMyNjI0NTQzMzUyMzIyNzUxNTU1Njc1OTc1MTIzOjk1NDY4NjQ1MzEy
-NDU2MzAzMjM1NDMzMC42NDY1NDYzMjQzMzU1NTc0MTIzNDM1NTY0MjE1NDc5OTo5
-NzM0NzY2NDQ1NTY0NDEyNDM1NTM1MjAyMzc2NzMzNDU1MzM1NTI0MjQ1NTc3NDMz
-MTEzMzQyMjIzMzMyNzM0NjUyMzU0NDMxMjI0NDMyMzI2NDQ1MjI2NDM0NDQ1MjI1
-MzIyMzU2Nzc2NjY2NTYyMjI0NDMzNTM0NTU4NDM1NzY1MzQ1NDQ1NDI0NTM0Nzcz
-NTM1NTc3ODU2NTg2NzU3NTU2NDQ0NDc2MjM0NTQ0NTU1Ozk2ODc2Mzc2NDYyMzMz
-Mjg2MzU3NjMxMTIzMjI0MzM1NDU0NzQ0MjA0NTU2Njc3NDY2NjU4NTUzNDQ1NDg4
-ODYyMzIzMzU0NDQ2NTc1MzI1NDg5NjQzNjUxMzQzNDc1NTQzNTM2NjU0NTQ2NzQ2
-MzQxMjU2NTQyMzUzNTM2Nzg3NTU2OTk7ODQ0NDMxMjExNTc0NTYzMjEzMzUzNTY0
-NDYzNTY2NjQ3Njc3NzMzNjg5ODc0MTE0OzU3ODg3NTk4Njk2Nzk3ODc3OTo5Ozc3
-ODs4NTY3ODg2NDc1Njg3NTM1MTAxNDM2ODQzNDIxMzQ0NjU2NzUxNDc0Njo3NDM1
-NDQzNDQ2MzU1MTQ0Mzc4NTQ0ODU4Nzk1OzY3NjU2NjQ2NDU1ODk2NzY3Nzk4ODk6
-OTo6Nzo3OTk4Nzk4Nzk3ODg2NTc2ODk7Nzk4Njc1Njk3OTk6OTpANTc0Njk7PD07
-OjY5OTY7PUJfZEM/QWuAiXxGQUJKWnqinZJ/eFlVdqCShmZTSkVFSEREQD09QD8+
-Qj9APkA7Qj05Ozw7Ozc6PTk4PT49Pj48PD0/P0E7Oj9BP0M9PT48Oj1BQ0VCRkJA
-QEFDQj9CQz9EQURHS0Q9brXJ09nd4uXn5+jq60BESkxJREVGRkpFRURDP0VCPTo3
-ODtBRUBDPjw+PTw+Pz9CQEE7Nj09PkFEQ0U7PT09PTg7Nz48NDg8PT84OTw8Ozk9
-OTc6Oj04OTs9PTg1NTY1Njg4Ozk5OTs7Ozg1OTk2OTk6Njc6Ozg5NTY5NjY1Ojs4
-PDQ3OTg0NDY3NzY7OTc3NzU1ODc1NDY2NTk2MzU0NDA1ODU1NTM0NTU5PDc1NTUy
-NjMzNTc4Njc0MzY0MTM0MzY3ODo3MzA1MzY8NjQ3NzQ0MzM1NjY2NDIzMjM0ODc3
-NjY2NTUxMzY0NTo4NTQ3NTEzNDU4NjY2NzQ1MzUzNDU2ODc1NDM5NTQ1NTYzNDQ0
-NjY2NDMxMzU1MzMzNDY3NTU2NTM3Ozc2NDo2NjUzMzQxNDQyMTIxNTc0NTU0NDU2
-NDQxMzQxMTQ1NTQ2MzIzNTYzNzM2NTMzNDM2NzU4NzUzMjc3ODQ1NDIzNzYxMzAz
-MTMzMjI2NzU1NDQ0NzY3NjY0NTQ0MjU3NzY1ODU2ODYyMjQ2NzU1NDU0Njo1NDUx
-MTExMjQyNDMyNDY2NDg2MzM0Nzg3NzY0MzU7Nzc2NDQzMTEzNTY1Nj01ODQzMjUz
-NDY3Njg5OTk0Nzk5NzU2NDY1NjU4NjUzOzQ0MTMxLzAxMjk0MzMyNDU1NDIzMzIz
-NTI0LzI0NTIzNTQ2NDYzLzEyNDQxMTM0MTQyMTQ3NDMxMzQyNDEyNjU2Mzc0MjQ3
-NDIxNDY3NjU4NzMzNjQ1NTM4NjQ2MzMyNDgzMzM0NDU1NDIwMzMzNTM1NS8xMjI0
-Nzg1MjQzMzQxNDY3Nzc1Njk2NTEzNDI2MzI1MjAxNjg2Njc1MTg2MjMyNTY0NTo4
-NzU2Njc2NjU2Njg0MzQ2Mzc1NDM1NjUyMjM0NTY3Njc1NTQ0ODc1NTQ1NTQ1MzM2
-NTUzNDU0MzM2NTY6NzY3NTMzMzY1MzE0MzQzMzUzMS8xNjU0NDIzNTQzMzQ0MjEy
-NTQ0NzU4NTY1Nzo4NjU0MzQzNjIyNTMwNDczMzMxMzU2NTU2NTM0Ojw1Mzc4Ojg0
-MjMyMTE0NTY0MzYxNTQ0MzM0MzQ0NDE2NDM0NDU2Mi4xMTQ2NzQ0NDQ2MTIzMjQ0
-MjE2NDQzNDc1NTU0MzIxNDM1NjMzNjIvMTQzNjQ1NDk4NTc6NjYyODIyNTU2NzQ1
-OTU2Njg7ODg5Nzc2NjU5ODU2NjMzNjY1ODo2ODc4ODQwMzM2NDU0NzU2MzM1NzI3
-Njg1Nzc0MzU0NTY2NzY1NTk4ODY4NjIwMjI0NTQ2NDQ2NTYzNzY0NjY2NTU3OTg3
-ODk6ODY3NzQ1NDY1OjY3NTo5ODg4Nzk3ODY1Nzg3Njc2Nzg4NTc3ODQ2ODk4Nzg5
-Nzc5NzY4NzY3Njo6PDk4ODg2ODs7ODk5PTs7Ozg8RWVOPz1BZ3V5dUtFQkZWmrq7
-rp+Sg4GMl39sTkhEQkRDRkE9Pj0/QkFFQkE+Pzk5QD87Q0I9QTs8Pz1ERkE+Oz48
-OTs8PD46QEFBQj4+REBAQT5APkJAPz9DSUlFQz9ERkRCQENCPUJjscjS2d3h5efp
-6enrS0FHS0dFSUdFSUNAQkJGR0lFQDs5OT1BPz0+Pj8/Pzo6Oj0+PDg7Oz1BQkND
-REA9QTs6QDo6Qj86OTw7PEI+Ojk2Ojk9PTs8PDw6Ozo7Ozc5PDo3Njg3ODc4NTU3
-Ojk6Nzw3NTg5Ozo4Ozg7Nzk8ODY3PDY2ODY0NTs7ODc1NjY2NTc4OTg2Nzs6OTY5
-ODg7Nzg4OjM0NTUzODgzNDc2NTY2NjY4NTU1NDQ0Mzg4NzQzMzE0NDMzNjY2NzUz
-NjQ3NTQ0MjAwMjQ1NzQ4NTU0NjY4OTY2NDQ0ODc3NzY2NTMyNDQxNDU2ODc6ODY0
-MzMzMTIzMzM5Ojg1NTQ4NjQ0NjM0NDUzNDMyNDY7NDAxMzY2NTc1Njg1NTQ1Pzgz
-MjEyNDQzMjI0NjMxMTI0MzIzNTQ2NTQxNTM2MzIzMjAyMzI0MTAxNDY0MzEzNjk2
-NjY0NjY4NDQ1MzQ1NjI0NTU1NTU2NDU1NTQ0NDQ2ODk1NTQzNTQ0NjEyMzMzMjQ1
-NjI0NTk4ODQyNDQ0NDc4MzQ1ODc1NDE1MTY4NzgzMzI3NDU1MjM0NTI1NDU1MzEz
-NjI1NTMyMjQ1NTQ4Ozg3OTU3NzY1NTQ2NTQ1Njk1NzY1NDU2Njg1NDQ3NDU1NDUx
-MTU7NzMvLzAzMjc1ODQ0NDQ2MzMxNTM3NTQ1NDQ2NTIzOTY2ODg8NTU1MzI2NTQ1
-MDIxMTQ0MTQ3NjQ0NTY1NDc2ODk5NTg5NjMyNDU2OTY1MDIyMjM0MjQyMzQ1MzUz
-MjAyMjEzMjE1NTQ1MzU1MjMzNjk4OTYzOTk4NTQ3ODYzMzU6Njc1Mzc1MzIyNTY0
-MjI1NTcyMzEyNjk2NTc2NjIzNDU1NTU1NTU1NTMzMzY1NjM4NzYyNDQzNDQ3NDIy
-ODg4Njg1NDMwNDQzMzU2NzU1NjgzMzU2Njc0NDc4NDM1Mzc3NjIyMjAzNDU4NjU2
-NjY2MzYzMjEzMzM0NTU1NzQ0MzM1NzU2NjY2MjUyMzM1NTU4NzU1NDQ1NDQzNDQ3
-Njk2MjM0MTE1NjMzNjc3NTQ1NTY2NTY1MzY5OzUzNTM2MzYyNDQ0MTQxMjMzNjM0
-NDMuMTAxNDIyNjc0MjU1NTU1NTI0Ojg5Njc3MzQ0NDIzNTMzNTozNDU1NTI4Njg0
-Njc2NDQ1NzY1NDU0MzU2MjU2NjU4Ozg2NjY1ODY2NzU2ODY4OTg2NjM0Njg6PDk3
-ODg4ODg2MzU4NjM2NzU1OTk6Ojg3OjQ4ODY4NzU4NzY5OTY1ODY3NDU2NTc2NDQ1
-Mjc4Nzc3ODg3ODc1NDM1NDUxNDQ0Nzo6NDU1ODU2NjU1MzI1NTc3Ojc1NDI1NTU6
-ODU3NjY4NjY4OTc3Nzc2ODY2NzU4Njc4OTk5Ojc3Nzc5Nzc1NjU3OTk0OTxAQDs9
-PTw9PTxVaUY8Oz5XZn5oSUVERmSsu7unrZeVmIB1WEdCR0JDR0ZCPj88Pz5DPD4/
-QUE9OTo8Q0NCOTs/Ozk7PkA7PUBAQT1BPT8+QD89Qz87PkE8PTs+RENCQD4/REBI
-SkhEP0VFRURBPkFEQmy0ydLZ3uHk5unp6utHTkdJSUhGSURBREVIRkVDRENCPTw8
-OUE/Pj5BQUFAQTw9ODc7Ojw8PUFDPDlBPjk6ODo8OTk+Pzw7PDc7OTo5Ojk8PDo5
-OTs+Ozk5Ojs5Nzs2OTo7ODg7Ojg3NTc3NzU1NTU0NDc5ODg2NjY3ODk4Njg8PTc3
-MzYyNzg1NDM1NzU1OTc4NDQ4ODc1Pjs6OTk6Nzc2NDU2NjczNDM1NTU1OTc3ODY4
-Njk1NzU2Njc2PjY0ODM6NjQzNzg1NTU2Njc0NTQzNDU1NjM0NTg3ODY4NjU1NTU2
-NDU0NzU0NzY2NzU0MjE1NDQ3NTQ2MzMxMTM0MjI0NTg4NjU2OTY6NTc2MzY1Njo3
-NDg4Njc0NDExMzQzMjU1NDM1NTM2NTYyMTEzMzM0NjI1NDQ0My4yMDQ1NTM0NDI2
-NTY0Mi8zNjAvMjEzNDE0MDA2NTM0Nj9AMDQ0NjY2ODk1MjQ0NTU2MzIzMDIzMzU0
-NDM0MjU1NDUzNzQyNTU1NDMyMDAzNDQzNTQ0NTUxMjIxMzY3NDQ1MzMzNjc3NjQy
-NDQ0NTM3NTMzMTI1NDUyNDQ3NjU1MjAxMTEyNDMzNDQ3NDM2NDk2NDU0NDUyMzU1
-ODc3Nzc1NTc2Mzg3NzY1MzY2NzQ0MTM0MzM0MzIyMzQ2NTQ2NjUzNDUzNDQzMzM1
-NTg4NjM0NDM1NDUzMzU2NDY1NTQ2NDI1NTU0NTU1ODU0NDYzMjQ0MzM0NTU0NDU2
-Mzc2Njc0NDY0NjM0NDI1MzU0ODU1NTQ2NDQ1NjYzMzk0NzU0Njc3MzUzNDQ1NTU0
-NTM2NjM0NTMzMjY0NTIzMzY1NTM0NDQ0Mjo1NDI0NDQ0Nzc3NjU2NTM0NDU1NjU2
-NDMzMTIyNTM0NzQ0MzM0NDU0NDIzMjU4Njc3NDQ0MzIzMjQ0NDM3NDQzOEQ1Njc3
-NDMzNTY4NzIzNTg3MzY4MzM1NjM0MzMzMjQ2NTc3NjQ2NDY1NTQzNDMzNDU1Nzc1
-ODY0NTU1MDEyMzU2ODUyMjQ1NDMxNDY3NDQzNzI1NDIyMjczNTY4NjU0MTY0NjM0
-NjQ1NDMxLzEyMjIwNDU0MjIzODc1NTQyMzMvMTEwNTc4NjMyMjQzNDQwMzQ2NzU1
-Njg5NzU1NTMzODQxMDU4NDMwNTU2ODIzMzY3NTUzMjg4OTc1NjY2MTQ3ODY5OjY2
-Njg2NjY2Njc2NDU1OD45OjU3OTY1NDMxNzM0NTc1NDY3Nzg2ODg5OTk5ODY1Njg0
-Njk5OTc2Nzg2Nzk2ODc3Njc3NDU1NDM2ODc1NDU2NjY2NjM0MzU4MzY2NjY0NDc1
-NDQ3Njg3NzU2MzQzNzo4OjU3OTY1NTY2Nzc3OTg5Ozk2ODs5Ojk6OTQ2Njk5NTU5
-ODY2NDc5NTQ3ODc5Ojw9PDw5Oz1APTw9PDg7QXFYQTw7O1R+g2RIR0VOkaytpq+u
-pJR9VkdFRUdDQEFCQT88Oj0+P0A+PEBAQjw6Ojg/QEM/PUE9Oz0/QD8+Qz4/Pz4+
-QTs9PzlAPj09OTs7Pjw7Oz5AREBEPkBFT0pEO0FESEdHREFBb7TI0tne4eTm6Ons
-60hESE5KSkhIRUFCREVCPkBBPUBKRUI8QTs6OT9BR0NCPD1APDw6Oj9APzo+Pjg4
-OTo4PD4/PTw7Ojk3ODlBQENAPDk9OzU6ODs8Nzw6PD05Ozs3Nzg3Ozs6Ojg1NTU2
-NTU1NTU1Mzo6NzM0MjQ3OTc5Ojo8PD03NDo7ODY2NzM1NDc3ODk3Nzc5OTo3Ozc5
-ODg3NTQ2NDQ1Njk3MzI0MjY4ODg7ODo5NTY7ODc2NTU0Njc3OjU3Njc2NTQ1MjU3
-NTQ1NTY2NjcwMzk2NTY6OTY1NzY2NjY2ODU1NjMyMTMwMzMyMS8xNTY4NjU1NjUy
-MTM1NDM0NzUzNTQwNDY2Njc1ODg1NjUyNjk3NDY2MTMzMTIyMzUzOjk4OjQzNDY2
-NjQ1NjQ2MzQ1MzYxNDE0NDU5ODQ0NjMyNTY0LTU0MzMyNTY1NDIwMjUyNTU1NTY0
-MzMyNDIzMzM1NzY0NDIxMTI3NTM0NTY2Nzc5NTY2NjY0ODYzNTk0NzU2NjozNDQ0
-Ojc2NjQxNjY0NjYzMTI0NDI1NjY0MzEwMTIxMDQzNDM3NTM1MzM1NjYyNTM0NTQz
-Nzc1NjozNDY4NTc2NTU2NTAzMzQzMTY0ODk3NDU2NDk2MDQ0NTMzMjI2NjIyMzM0
-NjQzMzc4NjU3NzY5Nzc2MzY0NTIyMzM1MzM0NTcyNzI5MzM0NTUzMzU1NTU0NzU2
-MzQ4MjM1NDY1Njc6OTUzMjQ1ODc3NjQ0NDEzMjU6NTY1NTQyMzM2NjM2NTMzMzcy
-NTU1NTU0Njc3OTY2NTU0Nzc4NTQ1NDU2MzM0ODQ1NTY1NTg2NTYyNTQ2OjU0MzQz
-MjY1NTQ3ODMzMTExNjQzNDM0NTQ1Nzg4NTM2NDI1NjQ2NDU0NTIyMjQ1MzM3NTk1
-NTU2NDU1NjY2NjMzMjU2ODc3ODY1NjQ2NzY1Nzw3MzQzMzM2NTQ1NjUzNDM0MTQ1
-MzU2NDQ0NDIzNDQ0NDIzNDk3MzQ2Ozo2MjY2NjY2NjU2Nzg3NTQzMjI1NTUyNzY1
-NjY2NDM1MzQ0MzAyMzc5NzM1MzQxNjY3NzczMjQ1MjMzNTc1MzM0ODIxNTQzNzYz
-MjMzNDQxNTM0Njc2MzQzNDMzMzUzNzY1NDY1ODg2MjEzNTk1MTQ2NDQ1Mzc2MjM1
-NDc3NDE0MzQ5ODQ0NDg6NzY1ODc0ODk7Ozc4NTIyMjM2ODY2Nzc1MzM0NTU3PDo2
-NzU1Njg3ODg4Nzo2NDk7ODc3NTY5ODYzNDU4Ojc1MzQ2Njg2NjU4NTc1Njk3Nzk1
-NzczNDY2NzU3MzUzNTU1NTMxMzg1NTc0NjU2NzY3OTk2Nzo5ODY4OTg4Nzc5ODg7
-ODg7OTk5NzY3Ojg7PDk3Ojs5NTc1Mzc2Njg2ODUwNzc3Njc6PDs6Ojs7Ojo8PT09
-OjtQakVAQUE+a5OGW0JBSHSiknx/h415Y1FDR0ZGR0NDRD5EQTs7QD1APT06Ozw9
-PkA+PTw/RkNAQUNBPT0/QEI8PTs7QkNBPTk6Oz08SEdIQkE8P0M6OD09PkBGRUA/
-Pjw9PUNBRUpMQUJwtMfR2d7i5ebp6errRktGRkxISUpIQ0JCRkJDQ0A+REdBRkY6
-PkVCQkJEQkVCOz08Qj09Pjw6OEM/Oj87QUZEPkA8PTg4Ojc6PTk+PD1DPTw9OTo7
-NTg8ODw7Oj09PDo4OTg2Ozo5Oj05NzU4ODk3NDc6NDQ5OTc4OTc2OTg6Ojk5ODo4
-OD47Nzg6NzYzMzQ1Njc3ODk4Nzo0ODo7Ozs1Nzc3NzQ0MzQ4OTU1NzU4Nzg7Nzo5
-NjQ3NTUzMjEzNTYzNDU1NTMxNTY0NDY4Nzg7NTQ0NTUzNTY2NzU1NjMzMzQ0NjU3
-NzQzNjIwMjI0NDQ0MjQzNjU0MDY7NTMyMjU2MzMzMzUzNjk1MzM0NTM0NDU0NDUy
-MzUyMzY2Mzc4MjE0NDMyMzU1Njg0NTY4ODc0Njw1NDQ1Nzg3NzU2NDY0MzM0NTI3
-ODQ7MDQ2OUE1NTM0NzQzNjMxNDIyMzYwNDQ1MzI0NDMxNDMyMjMyNTc1OTk1Njg2
-NTU1NjY3NDQ0NDQzNDU1Nzc1NjY2Njc2NjM1NTMxNDQ0MzE1MzQzMzMzMC8vNTIy
-MTUyMDM0Njk1Njg1Nzo0MzI1ODQyMzU1NDY4NzQyMzc1NjMxMzo2NzEzMjM0NDYz
-NDQ0MzM0NTQ0MzQ1NTIxMzI1Njg2NDQ1MzIzNTI0MTM1OTU2Nzo3NTQ0NDU1MjMy
-MzE0MzMyMzMyNDY3Nzc3NDg1NDM0MzIzNTQ1NDU0NTU2NjU0MzU1Nzc7NTU4OTcz
-NDM3NjQ1Nzk6NjU0MzM0NjU2NTM1MzY2NTM5OTU1NzY0NTQ1Njc0NTczNzU1MTU0
-MjM0NTU4NDI1NDM1NDMzMjQzMTEyMTMzNDM0NDQzMjY0MjEyNTY2NTEyMTQyNTM0
-NDU4NTM1NDU2ODU1ODU0MzQ1NDU2NTQwMzQ0NDI0Njg0MzQ2Njc2NjU0MjAzMDQ0
-MzM1MzQ2MjEzNDExMDI2NTExMjM1MzU1NDU2MzMzMjQ0Njc1NzY4NTMzNjc4OTU0
-NDc3NTM1NTQ2NjM2NzcyNDU1NzU4MzQ5NTk3OTQ0NjQ1MzY1Nzg3ODQzNDg0NTU3
-NTUxNTQ1NjM0MzU2NDUzMzU1NDUzMzY3Njs0NTUyMjI1NzQ1NTY3NTg3NTUzNTg6
-NjUzNDc0NTUzMjI2NzYzMzY0NDU2MzQ0NDY1NjQ4OTM0NjM1NjgzMjU0NTc2NzY5
-ODc2NDM0NjM0NTQ1MzI1ODg1NDY0NzQ1NTY2Nzk4ODo3NTc3Nzo6Njw3OjQ2NTM2
-NTQ2NjI0NjU3NTU0NDQ1NTc3Nzg4NzQ0NDczODc2MzI0NDY2NTY1MjMyMzU5Njs4
-ODg3ODY5ODs5Ojc3Nzg2ODY1NDc7PDw6NzY4NTY4OTU0NTU3NzY4ODk2MjQ1NTY6
-Nzk5Oz04OTY4Nzc4OTo4Njg5Ojw7PDk6PWVfQT87Pj9jeIFbRkRak51+SkRHSkNE
-REVDQj8+QkBGRD88PEVBPUBAPj49PT1EQz07OD1BQkVCQD1DQTs9Pz09Ozk+QT48
-Ozw9PkJDQ0NFRkZEQDs3QT9FRUhISEJEQ0Y9Q0FBQEVAPna2x9HZ3uLk5ujo6+tH
-TVJIR0ZKSUZGR0RCQkNCPkFEQkI9Pj08PkVDQUVEQUdEREI/Pzo4Pz85PkA5Oz0+
-QDw6PkA9QD49ODg8Pzs3ODw/PT0+ODs3ODo3OT49Ojs2Nzo4ODo4OTY1Ozo3Ojc7
-OTg5ODU4Ojw3OTo6Ojk4Ozk2Nzk8Njk3NTY2NjU3NTM2ODg4Nzg1Nz04NTQ4Oj05
-NzU3OTs1NDU0NTg6Nzg2Njk4Ojk6ODk3ODY4NTQ1NDc2NjU3MzQyNTQ3NjU1NDY2
-MzQ6ODk2MzI0NDM4NjQyMzE0NTQ1Nzg6NTU2NzU0NDMzNjU1NzU1NTU0Ozo0NDc6
-ODY0NjY3NTM0MzMzMjM4NjMyMjIyNDM2NTMyNjg2ODMzODc0NjQzNDI2NzYzMjU2
-NTU1NTY2NjY0NTc3NjU0MzMyMjUzNzM2NjU0MzU6NDQ3NTY1NTQzMC8wMjQ0Njk0
-NDQzODYyNDM0NDMxMjIyMjQ0Njc2NjM1Njc1NDY4NzY0MTQzNDMwMTE2NTQ0MjU2
-NjczNTEzMzIyMjYzMzE2MTAwMzIxMzQyMzI0NzUyLzEzNDUyMjQyMzU3NDQzNDQz
-NDQzMzQ1MTIzNjU1ODkzNTIyMjI2NTQ2NDQyNTAyNDc1NjM1MzQzNTc5NzI0NjU1
-NjY0NTIyNDU1NjQ5ODc2NTY3NDUzMTM1MzIyNTUzMjIyNTc1NjY0NTQ0MjAxMjEx
-MjQ2Njg2NTU0NTQyMjQ2NDQ0NDIyNDY7NzEyMTM0Njc6NzQxMjc2Nzg5NzQ0NTM0
-NzMzNDY5NjQzMTUyNDU1NTY3NTU3NjM0NjwyMjMwMzc1MzQ1NTU6MjExMzQ0MzUz
-NTQ0MzI1NjgxMTE0Nzo4MjQ1NDM2ODc1MTI3NjQ1NTU0MzY0NTQzNTU1NTQ0NDEv
-MjMzNTMyNTY3NzMzNDc2MzI2NTIzNDQ1MzY3MjQ1MjQ1NDEyMzMzMjMyMjQ1NjIx
-MzQ1Mjc4NTU0NTY3NDk3OzgzMzIzNzc6NjU0MzQ1Njc1NTM2MjI0MjM0NzQzMzU6
-ODc4NTY0MTEzMzc1NjU4NDQzNDQzMDU4NzY1ODM2NDQ0NDIxLzMyMzM1NjU1NzU1
-NjQ3MzMzMzQ0NTMyNzk4NTY3NzQ2Ojc1Njk5NTc4NzY6NzQ0NTU0NTIyMjY2MzAy
-MTQ3ODc4NzY2Nzc4ODc3MzQ2ODQ0ODU0NDQ1NzUyNTU3NzU4OjQ0ODo5Ojc3NzY1
-ODY0NDY2NTY0OTc2OTs4Njk3MjI0Njc2NTU3NDQzMTQ1NjQ0NTQ1NTY3NDM2ODUz
-MzY3NjM2ODY1NDQ1NjU4NzY1NTU3NTY4Ozk6Ojw7Njg7Pzk3Njo5ODg5Ojk4OTs6
-Ojs3ODY5OTY0Njg6Nzc6Nzc3Nzk4Ozc4Ojk4Nzg4OTk6ODk3OTk7Ojc4OTk3NjZE
-aVFBPT5APU5vbE5FV3+meks/Pjw/P0JCQT4+PkBBQz8+Pj9FQ0A/P0E/QT1CQkNA
-Pzo+Q0I9PD9APjxBOz0/PDs4Pjs/Pjw5Pj49QUA8PkJBQkNCP0M7Pjo7PUBDQ05E
-RUlNSk1JRj1Aa7TH0tne4eTm6Onr60lOT0pBRUFEREdFQ0hHRElCQ0JCPD4+PDtA
-RUNFPUFHSkI/Pjw/PTs3ODs9NjU6PT8+PD06PTo9Pjo9Ojo4ODg6Ozw+PD4+Nzo9
-ODk4ODY8PTs1Nzc1Nzk1Ozs3NTg7OD88Njk3NDQ0Nzs5NzY5P0A6Ozk1ODc9Sjw7
-NzY1Njg7OTk4ODY0MzQ2NTY1NjY2NzgzOjM2Nzc2NTEzNDc4NjM0ODc4Nzs5Ojo3
-OTk4NjY1NTY3NTc0Nzg2NTU1NTY2NDQzNTc2NjQ0MzQ0NjUyNDMwMjI0NDc3Nzc2
-NTY4NjY2NjUzMzM1NTU0MzU0Mzg3ODY2Mzk0NzUzMTMyNDIyNjM2Mjc2NDEzNDQy
-NTI1NDQ0Nzc4Nzs2MjY2NDY3NTc0Mzc5NTg2NDU2NTU1Mzc2NTYzNDIyMzQ2NTUz
-NTM1Nj40NzQzNDQ1NDM0NjIyNzg1NzQyMTM1NTY2NDIyMi8wMTU6ODs3NDQyNTU0
-NjY2Njc4NDU3MTI1NDMzNTI1NDIyMTI1NDc2NDM0Njc0NTUzMzMvLzMwMTM1MzEz
-MzIzMzMzMTI0NDQ1NDIzMDQ1NTQzMTQ3NDMyNjQ1NTQ3NjY2Nzk4NTY3NTU2NTMz
-NDQ0NjQ0NDc2MzM7MzU2NTU4MzM0NDM1NTY2NDY2NTU0ODQ5ODU2NTIzMzE1NTg3
-NTU1Nzg2NTU2NjY1NDY2NDY0NDM0NTU0NjY2NDEzMDIzNTc1NjM0NTYzNDU2OjUz
-NDQ1NDY5OTczMzEyNDU3Njc0MzIzNTIzMjM2Nzc2MzIzNTQxMjI3Njc2NDU2NDk1
-NTEzNDQ1NDQyMDU4ODc4NjY1NjU1Ojo2NTc3MjIyMjU0NTU0NTg3NTczMzk2NTYw
-MjM0MzU1MzMzNDI0NDQ0NTY2NjM0MzI0NDQzMTUyMTc1NzQzMzE1MzU3NjUzNjY1
-MjI1OTc1NDQ2NjUyMTEvMzIyNTQ2NDQ3NDM3NzczNDU4Nzc3NDc4OjY2MjQzNTY2
-NzU1NTM0NTU0ODc4NjY2NzY1NDM1NDMyNzYzNTM2MjMzNjg4ODU1NDU3NDQ2NDU3
-OTY3NTc3MjI3NTQ1NTMzMzIzMzI0NjMyNDQ1NDE1MjIyMzQzNDc1ODg1NTc0MzM1
-NDU1ODc2NjU1NjY1ODc2NTQ0NTYyMTEyMzM0NTI0NTY3Nzg0Nzg3NTQ1Nzg4NjQz
-NTM2OTY3NzY3Njk5PTs2Njg4ODg3OTc2NTQ5NjY2NTU2Nzc6NzU3NTU4NzUzMzY3
-OTU4OTU0NTY3NjU1NjQ2NTQzMzY3NDM2NjU0NDQ4NDQ2NDQ0NzQ3Nzg0NDc2NjU2
-Nzg5ODg4OTg3NjY2Nzg1Njk3OTk7Nzk3NTc4Njg4OTgzOTk6Ozk1MjU2NTY4ODg2
-Njk4OTg4Ozg2NzY6Ozo+Ozg8Nzc4OlRuRT49PkVATHZjSFhxgIFWQ0BAQD0/P0NC
-Q0Y/QEJAPT9CRDw9RENBQEBCPz8+RD4/QD0+QD0+PDtCQkM/P0FBPDw8PUFDPzo5
-PTs+Pz87QURAQUVGQTs6Pj89QEBJT0lAP0lOTUZCQEJotsjS2dzh5Obp6uvrTVBL
-RUZAPkFISEVDREdAQkI8PDo8PEFBQUFCPz9BPj9CQEE/Pz8+PTo6Ozk1Nzo4Oj08
-Ozs8OTs7PTw2ODc6P0I+OztAQTs5OTg4PDs7OTg7PTo3Ojg5Njg+Ojg7NzhAPTs1
-Nzo7Nzo5OTQ1ODQ0Ojs1NzY2OTtHOzs5Ojk4Nz1APjc8Ojc5NzQ4Njs6NjgzNTUy
-NTQ2NzU3Njc6ODY4ODc3NDQ1NDU3NTQ3NzY5NjU0NDc2ODY3NjY4ODg3NDU0NDUy
-MjQzMjc2NjU1NTMyMDMzNDQzNjY4Nzc4NTU0NTQyMjo4NTo4NTQ1MzU1NDM1MzM0
-MzY2NzQ0NjY1MTEyMzEwNDUzNDU2NDg3NDQ1NjU1NTU0MzQ0NDM1OTY2NDQ5NTo2
-Njg1NTY2ODU1NDQ1NDIzNjQzNTI0MzM3MzQ0Njc1MzY6Mi8xNDY3NjMyNzY0MjEy
-NTQxNTUzNTM1ODg3NTU1NTg3NTUyMjQ0NjU0MzIzMzE1MTU3NjQ1NDIxMjM0MzIy
-Mzc3NDQzMzg3NDAwNjUyNDM1Mzg4MjQzNDM0MzIyNDgzMjM1NjU5ODU2MzMxMjM1
-NDQ0MjUyNjU2ODc3NTU0MTQzNjk2NTU0MzQ6OTQ1NDIzMzM2NDU0Njk4NTQzMjI2
-NTQ3ODQ1NTU1NTI0NTQ1MjE0Nzg1ODQ2NTc3NTc4NTIyNDM0MzU2NzQ1MjAxNDg3
-MzYzMTM1MDE0NDIyMjI1ODUzNTUzNzIyMzIyMTQ1Mjc5OjY4NTU0MzQ0NjY1NTU2
-MjQ1NDY2NTc3MDMzMTMyNTU2MjU0NTEzNDMyNzQ5MzU1MDM1MzU1Nzg2MjQ0NTc5
-NTMzMTI1NTI0NzMzNDg0MDEzMTIxNzIwMjM2NDUzMjIvMjU1NDM1NjY2NTU7ODc3
-NjU0Nzg4MzY2NTk3NDQ0MzM0NTo6NjU2NDQ1NTU0NDUzMzIzNDIzMzcyNDQzNDQx
-MTQ2NjMyNDQzMjY0Nzc1Nzc5OTczNDIzNzQ1NjU0NTU0Njk8ODY1NDQyNTY2MTQ1
-NjY3NDQ4NDg1NTUzNTU1MzQyODc2NTQ2Mzc2MjQ2NDY2NzY2NzMzMzMzMjYzNzcz
-MjQ1NDMzNDM2NDI0NDU2NDA0MzU0NDU1NjU3NzgzNjUyMjQzNjc2NjQxMjIyMzc4
-NzY1MzM3NTc3NjMzNjc3ODU1NjY4OTo4OzY4ODg3OTY4NzQ3NjQzMjc2Mzg4ODg2
-NTU6NDY5Nzg3ODY1NzQ0NDQ1NTIwNTUyNDMzNTU0NTU4NDM1MjE1NTEyMzM2OTU3
-NDU2NjU2NjY1NDYyMzc2NzU4NzQ3ODU0Nzg3NjU3Njc4NzM3OTY0MzY3OD06ODc6
-OTM5OjU2ODs3Nzo6ODk6Njg5ODc3NzY2NDY4OTU3NTc2OTg4PTs6ODs7NzU+cFpC
-QkI9PUE+W09den5ySEI7PDs8Oj1APUBEQ0ZCPUA8N0FEPz49PT09Pj4/Pjw+PUJA
-Pz0+PUBCPUM+Oz5AQUA/QT0/Pj08PT4+Pjo7Oz47PUJGR0g6PTw6QkM+PURJRkpH
-REpNRz9JSHG3x9HZ3eHl5+jp6+tKR0hIQ0ZBQkdLRUhJQ0VHSEE5Oz4/PD1BREBA
-Pzw6PT8/Ojs+QDk+Oz09PTpHPDw4OTs6OjtBQUFAPEE5OTo6PkE/QEFAQDk7ODk8
-PTs7PDw6Pz44NjY3NDw9PTg2Njs6OTY1ODw7Nzc5NjY2OT43Njk4NzY9Ozo6NDY4
-Nzg2Njk6OzY5OTc2NTI1Njk5NjY4Njc3ODc1Nzo9ODYyNz04ODs7ODk6NjU3NTUz
-MzY0NDU1ODY2NjY2Nzc3ODc0MjM0Mjg0NDU2NjQ0NDQyNDM0MzU0NDU0MzM0Mzg2
-NzY4NzY3ODk3NTYzNjMzMzE2MzExMzQ1NjQ0NDQzNDQ1MzMzMzY1NDE1NDc3MzY2
-NjY3OTU0NDY4NTc2NTU0NDU0Njc4NjY0Nzc1NTc1MDA0NjY6NjYzMzc2MzI3NDU4
-MzM0MzUyNDg2NTMyNzc2Njk1NDQ3NTc3NDMzNjc3NjQ1MzY3NTMyMzQ1MTMyMzQ0
-NTM0MzE0MjMzMTMxMTQ0NzQyNTQ1ODQ0NjY0MTIwNDc2OTczMjQ1ODYzMjc1NTIx
-MjI0NzU2MzAzNTYwMzAzNDc1MzQzNTc3ODkzNzUzMjUzMzQ2Njc4MzU0NDY1NDQ3
-NjU0OTY1NTc2NDU2NDc4Ozk0MzM1MzM1NDY1NzM1NDQ1NjU1MzQ2NDg2NDQ3OTU3
-NTQ0NTYyMjEyMzQzMzM7ODU0MzMzNTU0MzEyNTk2NTUzNTUzMTM1ODQ0NTUzNjk2
-NDMzMjM1NTM3NDg0MjM0NDIzMzU0NjMwMTIzNDc3NDc3NjY2MTExNzQzMzc1NDQ4
-OTc3NzI0ODU2MjI2NzU2NjU2MTExNTk3NTU1Njg5NjQ4MzQ1NDU1NzY0NDY1NDU1
-NDEzMTQzMzQ0MzM3NjIzMzMyMTQyMzY2MzU3NjY2ODc8PDQyNTc4NTY6NzUyNDQ2
-NzU0NTQ2MzIzNzc4MzE1NzMzNTYyMzY9NjQyMjIyNkQ9OTg2NjQ1NDU4ODY2NDM4
-OTk1NDQ0NTQ1Nzc1NjU0Mzk4NDY2NDM2NTQzMjQ3OTU1NDQ3NTQzNDY1ODQzNTM0
-MzQyMjI3NTk1NTQzMjIxMjUzMDMzMjQzNTM2MjAvNDU3NzYzNDUzNDY2NTk4NjUz
-NDc2MjE2NTQxNDE0MzIyMDg0NTY1NjU3NDM4NzQ1MzM0NDU0Nzk5NjYzNDI1Nzg6
-OTc3NjYzMjQ1NTc4OTY1MzY3Nzc1NTU2NTU1NjU1Njk3NjQ2NjU1ODs4Ojc2NDI1
-NzQyMTMwNDM1MzM0NjY4OTUzMzQ1NTU1MzU0NTk4ODg2NDU2ODg3NTQ1ODUzMjc1
-NTg7Nzc2ODc6Ojc3NzY0NTU4NzU6ODo6Ojg5ODQ0Nzc4Ojw4PDo3Ojo6ODo8OjU1
-NTYyNzY5ODs6Ojk7PTs7Oz09PVqJTkFBPTw8PTtAU4aHc0hCPjw5Ojw+QD49Pj0/
-PkFBQT07PkNDPENBQzw8PD47Ojk7PD86PDk8QD1DRkJAPkFJREA+QUJBQT1BQUA8
-Pj48P0RDPkJBQTs8QkdHQEJDQURFS0tHRUdESElDb7fI0tre4ubm6enq6kdNREVJ
-SkpMSkVCPkA+PkA9Rj85PT9APT5EOjtAPTw9PTxBQD4+Pzw9Njk7Ojo+Q0E6OTxA
-REc/PEFCQj08PUA4ODs8PDk3OkA9Nzg5OTg8Pz0+PTY4ODc7OTxCPjo1NTY4Ojg3
-ODk7ODg3ODk4OzYzODQ1Nzk5Qzs7NzY0Njk1NDc2NzU3ODIzNTI0ODk4Nzc0MzU0
-ODk6OTg6Nzk4NTk2Njg0NTY2ODc2ODc0NTc3Ojk2NjU6Q0pGNjg2NjQ0NzU0NDM0
-NDQzODQ2NTMzMzMzNzQ1NzU0MzQ2NzU2NjU1MjQ1Njg5NTg0NTIzMzU0MjUzNzQ1
-NDU0MTIzNDM1NTQ0NjY2MjQ0Njc3NzcxMTs7NzY1NTQ3NjY1NzM0MzMzNDIxNjU0
-MjQzNjMzNDQ0NTYzNTg9NTU1NTQ1Nzg0MzIzMC8xNjc6NzIyNTQ4NTMyMzY0NTQ2
-NTg0NTU4NTM3NjQ1MzQzNDQ0MTQ0MzQ2NDI0ODMwMTQxMzY2NDMwNDU2MjM0Nzg3
-NzUyMjY5NTc1NTQzNTg2MzQ4NjIyMjIxMTQ1MzM2OTQzNTUyNTUzMTM2Mzk4NTQ1
-NjU1NDQ3NDQ0NDYzMzY1NTQ0NTQ3NTU1NzU2ODQ2NzU0MjEzNzY2NTY0NDE2MjQx
-MjI1NjU1Mzc8NDY1NjM1NTQ1NDU2MzQ1OTcwMzM0NTMzNTg1NDU0NTM4NzczNDQz
-MzQzNDY5NTUxMjY2MzQ1NjQ3NDU0NTc2NTQ0MjMzNjQ1NDMwMjQ0NzQzNTQzMjMx
-MzIzNDY1NjM0NDQ0NjUyMzUyMzQ3ODc2NTY4NTI3MjQ0NDQ5ODY1NTYzNDUxNjY3
-ODc3Njg2MjQyNjQ2NjUwMjI0Njc3NjU0MjQ5NDIyNTU2NTMzNzY1MDIyMDMzMjMy
-NDY2NDU1MTQ1NjU0NDk2OTY3NzQzNzUyMTMyMjQ1NTQzMzU2NDQzNTk2NDQyMzU2
-MjIzNDU0Nzc5NTM1NjIzMjY2NTQ4MzU0NTQyNTM1NzM2Nzc4NzUzNzE0MzQ2NzY3
-Nzg2NDs1NjU0MzI0NDM0Mzg1NTUzNDQ2NTQ2NDE0NjMzMzQ0NTIzMjM0MzQ1Nzk1
-NjQ0NTUxMjIyNjM2NTo1NDc0NTY1NTQyNjc3ODU2NDU1NDU0MTY2NjY1NTY2NzQ0
-Njg5OTc1MzQ1Njo3OjY3NjQ1MDI2NTY5NzQ4NTU1NjY0NTY3ODg5OTYzNjU0MzQ0
-NTY2Njc3NDszMDs2MzU2PDo3NDQyNDU0NTU0NjY0NjY3Nzc0ODc3NTU6NDU0NTYy
-MjQ0NTY3ODg3Njc3NTEyNjY2NDc5OTc3NTo4NzQ2ODo5NjY5Nzg4NzY2NzY4OTs7
-Nzc4NjU2NjM3Ojk4ODg2Nzg3ODg2Nzo3Nzk3OTY1ODg8OTs/PDw8PDtBcH1LQ0NC
-PTw9QEyAmHhIPDs6Oj8/PkE9Ozs/PUBCQkA+OkFCQD9GQD9EPj08Ozg5Oz8/PDw7
-Ojw+Pj4+Qj9BREU/PUNEPzc8PDs/Pjs6PEBEQj8/QTw+QT5CPDxDREZGQkZGSEpF
-QEhERT1qt8jR2Nzh5Ofo6urrSUpKQkZNSktJRERIR0I/PkA5PTk2OD5DQz88Pz9F
-REQ/Qj4/Q0ZBPTw3O0E7ODw3ODo9Pj89P0E4P0E9RkRDQjs7Pjk4PjU0OTc3Nzk9
-Pj06Nzw8PTw7Ozk6Ozs9Ozo4PDw4OT07PTo5Ozc4OTo7Njg0OTs5NzdCNzY1MzQ4
-OzY2NTU1NzY4OTY1OTU3ODk5OTk6NzI3Nzc2OTM5PTk3Njg4Njc5NjY1NTM0NDY2
-Njg2NjY4NjpYXlc7Nzg5NzU0MzU0NDU1MTEzNDM2NDg1NTc1NjQ2NTM0NjUyMzM0
-NTQ3MjY4Nzc4NjY1MzY2MjEzMTQzLzMzNDQ1MTA2MjY2NDQ1NTMyMzQ1NTg2NzMy
-MjQ2ODg2Mjk1NTY0NTcyNDQ0NTc4MjQzNjYzNDg5MjU1NjcyNzgzNjU2NTU1NTQy
-MTA3MzMzNjY1NDMxNzUzNTIzNTUzNjY0NDc2NDY3OTo5ODY0MjAxNDIxLzMxMTM1
-MzE2NTM4NDYxMDM3ODYyMzM0NTc4MzM1NDQ2ODUzMzM1NDQ0NDU4NTIwMjEwMDEx
-MjQ1NTM3NTQ1NDQzMzUxNjYzNDMzNTU3ODc0NDQ4ODg0MjMwMjY0NTM0NTU3NTMz
-NjU3ODc3MjEyMzM2NjY0NDM1NDMzMjEvMzI0MzMxMzQ2NTU1NjUxNDU2NDQxMjQ0
-NjM0NzY0ODY3Njc6ODY1MjQ0NDQ0MjU2NTc2NTMzNDM6MzUzMzQ1NTU4NTc0MzAy
-NDI0MjQ2NjQzMzQ0NjQyMzY1NjMzNTQ3NDEwMjg5OTg1NzYzMDEyMTQzNTc2NTc2
-NTMyNjQzMzYzNTY2OTQzMTYzMjY0Njc3NzQ1NTQyNDY1NjgyMzIzODQyNDg4NzM1
-MjMyMTQ0NjUyMjc0NjY0MzQ1NDY4NzYyNjQ2NTY0NDQ2MzMzMzc5OTMzNTQ0NTQx
-MDU2NTM0NDU4ODU3ODg2NDc2NjY4NTY4NTI1Nzc6Ozk2NTQxNjc2NzUzNTg5NDAx
-NDc1Njo6Ozk1NTQ0NzQ3ODU5OjU1MzQ0NTU2NDQ0NjMxMjU1NDIzMjQ3NDM2MzIy
-NDUyMDM2NDIxLjMyMjQzNDQyNDc4NTc4NTQ2ODQ1NTMzMzI0NDE1MjQzNDQ0Nzc1
-MzM1Nzc4NDQzNTg4NTU1ODc3MzQ0NDUzNjY5NzU1MzY1NjU1NDQzMjM5NTM0NTU0
-NTg1NDg2NDIzNjY3Nzc2MzU0NjQ2NDUzNTQ0MzQ0NDY2REA1NjIzNzs4NTQ3NTU3
-Njg4Nzc3ODc1NTQzMzU2Nzg1MjMzMjU0NTQ0MzQ0NjQ3NTIzNDY1NDc4NzU3NTU2
-OTk4Njg5Ojo4Njk2NTU2NjY3NjY5Ojg3Nzg6OTk4MzY8PDg5ODY2Njk5ODg3ODk5
-ODg6ODY3ODs+RkZFPjxBPUprXEJCQD08PT1Kd5R8UEZCPj8+QEFAQEBBRj0/QD4/
-QkFBQT8/PUJAQENAPjo+QD4+QkA9Pj0+Qj1CPUNCQj5DQT4+QUI+QUE+OTxAQTo5
-QEFEQjw9QkBBPj9FQz9DRkZFRUI+Q0VFT0dCPGO1yNHY3eHl6Ojo6+tDSktNSlBH
-RkVESU1QSEJFPj4/PDc7Pj88PT5GQENDRTw6OT8+QkA8PTs/Pz09PDg9PDc7OT1B
-PTs7Ojo5Pj06Pjs6Nz08ODg6Ojc5OTg8Ojw+OTlDOjo5ODU4Nzc6Nz9APT87Pz05
-Ozo5OTg8PTg6Pjk4Ozk6NTk3Njc3OTs3NjY5OTc1MzU2NTQ1NTY1OTg5OTg4Nzc3
-Njc2NzY4Ozs3Ozo6OjU2NTc2NTQzNDg3ODY1NDQ2O1xkXjs3NDU2NTU1NDY2NDU2
-NTc0MzU1NTo4OTk3NTc0NTY1NTYyNDY2NjQ2NTQ0MTE1NDI0MDczNTY2OjY5NTIy
-MTQ4Njk3NDI1NjQ0NDQ2MzQ1NkQ5NTY0NjU2NTY2NDMyNDM1NTc0MzMyNjI1Mzc1
-NDQyNzczNDU0MzMyNjU2NjMyNTMzNDEwMTIzMTQ4MzQ0MTI2NjgzMzQxMzI3Nzk3
-NTY1NDY1NjU0NTI1MjM4MTQzMjMzNTYzMjIzODc2NTU3ODc0NTc0MDQ1NTY0NzU1
-NTY1MTMyMjIyMTExNTE1NDU4PjM1MzQzMzY1NzQ3NDU1Njo2NTIwNDUyMzM0NTQ3
-NjQyNDQ2MjI0NDQ1NTQ0OzY0NTU2NjUxNDUyNDQ0NDE1MTMzMzQ2NTY3MjQzNjMx
-MTQzMTI0MzQ0MjY0MjU0NTQ1MzMyMzI3ODY2NTQ2NjM1NjM2MTA2Nzc2NjQ3Njc2
-NTg2Mzc5NTUyMzc1NTM1NDY0MDAwMzUzMjU0NDMzNDU1NTQzNDUzNDMxMjIwMDI0
-MzU0NjM2Ojk2NjUyMDI1NTI1NDM3NjQyNTc1MjQ5NTU2OzYyNTQyLzI0MzQ2MzM3
-NDM1NTU1NjU2NjQ2NjIzNTEyNTUyNDIyNTIyMzQyNDQ0NDMzMzU0NjUyOTU1MzQ1
-OTk5NTIzNjQ0MzQzNTg5NzY2NTY0NTY3NDY1NTM1NTY2ODUyNTU5NzY1NzQzMzQ1
-MzIyNTc1NjU1Nzc6NzY6NzY2NjUyNDQyNjQ3Nzo3NjQzNjY1NDM1NDQ1NDU1NDc2
-NTUzNDMxMzQ1NTMzMjUzMjQxMjM2NzUzNDU3NTI1OjUzNDQ1MzY2ODc2NjQzMzg4
-NjY2OTUzMzMyLzIxMzU3NTU1NTM1NDMzMzU3NzYzMjQ3Nzk1NTU2NDI1NTM1Njc7
-Ojo4NzQ2NjYyMzU1MzU1NDMzNTU2Nzc3ODc1NDg3ODYzNDU4NTQ0NTM4NjU1OjY1
-NDQyMjE1NjQ1PEQ1MTM3NTY2NzQ4NTc2NjU0NTQzNDQzMzEzMTQ1NjU1NDQ1MzMy
-NDMxMjMzMjMzNDI4OTY2Nzg2MjU5Njg5ODg3Ojg4Nzc1Nzk4NjQ1Nzc2OTo2Nzc1
-Njg4ODc5QmdGNzc3NzY3Ojs5NzY7OTczNjY1NTk7O0BHSk1EPzxEb21UREJBQT9A
-RHOOf1Y9QUQ/QDtDRkBDOz9APEI+PUFEQUNDR0M8PT0+QT8+Pj48Pj8/Ozs8Pjw9
-PTxCQEJBQUFCQDxAP0BBPjs7PT07Pjw/QkI/PDs8QkE+RkVFQ0JFREFAPj5GQ0JE
-Qjo7YrPI0tje4uXm6Onr60RHS0pMS0VDQ0RISkxFQ0A/RD09Oz06PkA5Pzw/O0JE
-Pjw+QEBBPzs/Pj5DPj05OTc3OTtDPj1BPjw+P0E9PTo9Ozk5Oz07QDc7Ozk8ODQ3
-Nzk4Ojg4NjY2NTU2Njg3OkE5OTo6ODo2Njo+PDk8Ojg8PD4+PDg0Njg5ODg3NjQ2
-ODk2ODc1NjY0MTQ1NTQ1NDU0Njo3NzQ1NTc4Ojc2OTk2ODU4Nzc4NDU0NDQ4OTw3
-NDQzNDU0Nzs7NzU0MzQ0NjQ0NDAxMzQ2NTY1NjU2Nzc4NTAzMzM1NjYzMTM0MDI0
-NjczMjU1ODY0Njk6NDo1NDg4NDc0NTQ2Nzc8PTY0Njg0NDUzMDUzMzMyNDQ2NzQy
-Mzc2NTU1MjQ2NDQyNDMyMjM0NDY4NTc1Njc2NzM1ODQ1OTYxMjQ1NDM2NTMxMjQy
-NjM0NDY3NzU1NTM1NDU1NDMzNTQzNTU3OTY0MTI2ODY0MzQ2QTU2NjY0MTE0Njc3
-NTU0NzU3OTg2NjY1NTc1NzQ0NjU0MzM3Mzk2NTU2NjY3NDIzNDM2NTc0NDY1MzQz
-NDQyMzc0MzY2NjYzNTQ2NjIyMzMyMzExMzM0NjAyMTU3NjQ0MzQ2NTU1NDc3NTIy
-MTI2NTM0OTY5NTUzMjM1NDU2NDU3NzQ2Ojc5NjU0NDI0NDEzNDIzMzU1NzMyNDcy
-MjI2OjU0NDI1NDU1NDU1NDQ1ODg0OTQ0NTQxMzM0NjU0MzM1NjQ2NzI0MTM2NjM0
-NDMyMzE2NTc3MzMzMjQ0LzM1NDc0NDU1ODQ0Ny49OTQzMjg5OzY2OTg0NDY0MzQ2
-OTo3OTc1NzU1NjY1OD08NTMxODQ0MjQzNTc0MjM0MzIyNTQ0MjAxMjU0NTUyNjIy
-NjY1My8zMjM1NTY0MjEzNTU1NjI1NjQ1NTUzNjg1NTM0NjY3NDIzNDQ0NTM0MzU0
-MTIyMzY1NTQ2MzQyMzU3Nzk5NDI1NDU1MzA4OTc3NTY0Njc1Nzg1NTc2MTU2NjI3
-Nzg2NTQ2NDExNDM2NDI1ODQxMjIyMDY0NTg0ODc2NDExMTM3NjY1MjQxNDM3NDU0
-NjY1NDQ1NDYzMjU2Nzs2NDQzNTc0MzEyMzIxMzM1NDM1NTU2MzM0Nzc3NjYyMzM3
-OjY4Ojg2MjI4Njc4NTM2NjU4NTQ3Nzc3NDY2NTY1MzYyNDQ1Njc2Njc0NjY1NTk2
-Nzk3NjYyMzIyNTQ1NDg2NTY0NzY2NTI0NTQyMTEyMzU0NDg1NDI0NzQ4ODU2NjY0
-NTg2NDg2NzY3NzQzNDc4Njg6NjUzNDY3NTU0NDI2NDM2Njc1MzY2Nzg3Mzk6ODk5
-Njg2ODg3Nzc1Nzg2NDMxMjY3Nzc4ODY1Nzk5OjVJe1A3ODg5ODU2Nzo7Ozo4NzY2
-Ojg4Nzw/SWhgXHFKQkiKZj8/QD0+PT5ieHlRQ0FBPj89OEJCQUA5PTo/PDxBOjxB
-QUFCPj47PTs/QEE+P0E9PDg8Oz5APD09OTw+Qz88QT08PkJCPDk3OD0/QT89Ojw/
-QDo7PURDRURITEJFRT86QEZJS0dERUNAP0BltMnS2tzi4+fp6errRkVQTkpMSkZF
-RUhISUU+QkNFQUI9Oz9CRUA9RENIQz48P0NJQkU9PEJCQDxBOj06Ojs5Nzc4QUA5
-PUNCQEE8PkBAODk7PD9CPDw5OEI8OjU3ODo5Nzc2NjY1NjY4PDo5Ozw5Nj06ODU3
-Nzk4OTo6Nzg7Ojg3Ojw8OjY5ODY5Nzo5Njc3NDQzNzY3NTU0MDc2NjY3Nzk5ODc5
-RDc2ODc2ODc5ODg2NDU3NzY4Nzg5NjYzMzI1ODU2NTQzMjEwMjIzNDMvMzUyMjc1
-MzU0NzQ3NTMxNzQ0NzI1NDY1NDM2NzU0MjY2MzY5OTc3ODg0NDY3Nzc4NzU1Njc3
-MjAyNzo4NTQ0MDY0NTUzNDMzNzw4NjQ2NTQ2ODk5Njg3Njc4NTUzNTM2NjU5Nzc1
-MzQ2NzU2MzM2MzAzNzMyMzc2NjY2NzYxMTM0NDQ3NTs1MzY1NDM0NDQ1MzA1ODg6
-NTU2MzMzNDU1NjRGNTI3NDMzNDQ0NTY0NTU0MzU0MzEyNDY1Njc2NzYzNDU4OzQy
-MTM3NDU2NjY1MzM0NTY1NDM1Nzo5Njc2NDM0NjU2MzMyMjY1NDQ3ODY1MzEzNDAy
-MjE1NTQ5ODc1NDQyMTM0NTY3NTQ1OTU3MjEyNDQ3Nzc2NDYyNTQ0MzI3NDY5OTY5
-Nzc2NTU3NTMzNTM0NTU2NTUzNjA0OTEwMzIzNTc4MjQzMjY7PDMzNDQzMzU3NDEx
-Njk1NDMzNDQzMjMyNDU1NTY1NjQ0NTYzMzY2OTY0MTEzNTMzMzMxMTQ2NzY7NjQ1
-NTU1MjQ0MzU3ODY2NTg3MzQxMjU0NDU4ODc1MzUzNDQzNzU1Ojc1Njc2NDk1Mzc0
-MjM2NDM0MzQzNDIyNTI0NjQ1NTY0MzY3NDM3ODU0NTU5NzU0MzMzNjUzMzU3NjI0
-NjM0NDY1NTM3OTg2MC8zNTc3MzYzNDE0MjI0Mzc1ODYzNTc2NTU0NDQ1NDI2NjU3
-ODY1NTU1ODc3NTU2NDUzLzMyMzM4OTU2NDMyMzI0NDQyNDQ1MjAxMjI0MjIzMjMx
-MzU2NzY3MjQ0MTMxNDU5NjYyMzAwNjY3NTQ2NDY5NTYzNDQzMzMzMjQ0Nzc1NjQz
-MzQ1NzU1NDQ2NTI1MzM3Njc4ODUzNDY1NTQ1NjY2NTU2Nj02NjU1ODY1NTc5NDU0
-MjEyMjE0NTkzNDM0Njg5ODg2NjY2NzY2NEE3NDc1NTU1ODUyMjQ2NjY3NzY3MzUx
-MzAxMzUyNjc6NzQ3OTUzNDU2NTUzMzU4ODY2Njo4OTY2Njg2Nzg2ODc1NjQ3Nzg0
-MjM0NDQ2ODM0NDQ4ODY6Ojo6Nzg4Nzc3OTc0NjM2Njc0NDM4OTc4NzY2NjY2ODg2
-Nzk3NjhjTTIxMzQ1Nzc4ODs6ODc4NTY6OzozOERPXmNdbEhAWXNMQDw9PDw/YHVh
-TkdAQ0A9QD1CPEE9O0A6PD5ARD8+PD49PTxCQTw7PDw9PDs8Pj4/Pj87OkA6PD49
-PDs6O0BCPj09PkA7Ojw6QkQ7O0A+QUNBPj1AQz5ERkFCRkhGP0ZCQkJDQD9DQkND
-Q2q2ydPa3eLk5ujp6utMUUpOR0VEQ0JHRkVBQUNBQD9BQ0U9Oj0/PEJCQkI9Q0M+
-QENGRUJBPjw+OTs9Qjw/PD8+Ozo8P0I7REI/PDw7PTw5OUA+Pzw5ODY2OTs1NTc4
-OTs9PDg5NjY5Pjo4Nzc5Nzw4OTg7Ojo5OTg6Nzs5PTw9Ojk3NTY3NTY1ODk3NTg8
-QD88ODY1MzM2NDU2NTQ1OTU4Pjs5ODg6Njc4Ozc2ODk3NjY2NjU2MjQ0NTM1NTU2
-NDQ0NzQ0MzIvMjQ0NjQzNDAzMzo6NTQ2NTU1NTY1NDQ1NTY3NTI1NDUyNTQ2MzM1
-NDM1NTY1Mzo2NjQxMDEyMzQ1ODU3NTMwMzQ3NTU4MzM3MzozNTU0NjY2NzczMzQy
-MzY3NjQ4NzM1ODg2NjU2NDc1NTU1MzIzMzU2NTY5NTQ0NjU0ODU0MzQ0NDUyNjc5
-NzU2NDY1ODc3MDQ1NTM0NzY1NDkzNTUzMzE0MjM3NDUzMjU2NDA0NDY1MzQ2MzU2
-MzQzNDMyMzU0NTg1NjY4Njk0ODg1MTM2NTUzMjAzMzMzMzM1NTQ0NTYyNjMwMzM3
-NzYzMTU1NjU5NzU2NjUzNjQ0NTY3NjY0MzE0Nzc0NzUzNzYzNTc0NDEyMzY1NjU2
-NTIyNDU0ODcyOjg1NzM5NjU0Nzc0MzQ2MzM3NjMzMzMzNTM3OzU2MjQyNDQ0NjMx
-MzIxNDU4ODQ2NDE1NDU1NDI2MzM2NjU0MTMyMzExMi8yNDQ0NDc2ODY2NjU4NTg0
-MTI1NzIzODY1NjIyMjYwMzI0NDM0MzUzNDE0NDQ0Nzg4NzkzNTQyNTQxNDc2NTMy
-MjI1MTIuNTY0MzcyNzk5NzU1NDczNDY3MzY2NDU1NTQzMzI3ODQ0OjY0NTQ1NDYx
-OTU0NTk3NDM0MjQyNDMyNDY4ODc3NTU0ODc0NDQ2OTg3MzM2NzcwMTI0NzYwMjE1
-MzM1Njg0MzY2NTQyMzU1NDM0NTc2ODU4ODQzNDIyNjc2NDU0NDU1NDo5OTg2NzI3
-ODY3MS8xNDIyNDU3MzEzMzQyNDU0Mzk1MzMyMzI3MzU0NTMxNDExMzMxMjIwMjQ2
-NjU1NDU2Njg1NTI0MTU3NTMxNTEyMTQ3OjgzNDQ2NDMyNjU0MzM2Njg3NDM0ODk6
-NDU1NTg5NzY2NTY2MjEzOzo3NTMxNDE2NjU0NTc2NDM1NTQ3NTQ2NTY4ODg2NTk6
-NjQ0MzU7PDY0MzY3NDUzMjM1NTM1NDU1MjM3NjQyNjo2Ozg3MzU4ODY2NzY2ODo1
-ODU2NzQ2NTMyNDQ4ODY4Nzk3NzQ0NTM3Nzc1NTUzNzU3Nzc2NjczNTc2Njk4Nzc2
-NzQ0NzQ0NTc0NTk5ODg3NzUzNTU2Njc8Ozg5NzpJNjczNTgzNDI2Nzc3OjM0NjQ5
-ODhFS0lEZXV4TEF4hEg/RUFCQVqLdlNHRkJCQ0A9Ozw+Pz9AOzs9Pz87Pjw6PT49
-Pz5BR0Q9PD1AOjo9PDxBOjxAPTs8P0JAPT49P0E7Oz09Ozo6O0A9PjxAQUNEQD48
-P0NBPjs+Pjw/Q0VHR0NGRT9DQkJIUUpGZ7bJ0tje4uTo6Ojr60hISEtNRkZERE5L
-QkJEPkBEPUA/PEFBRUJBQj8+RkJEREE+QEFAQD0+Pzw5ODo8Pzo5PkI8Ozc6Q0E7
-OT46Ojs7PD07PEFAOzw2OTc7Ojk2OTw5Ozo6Njg3PDo4Pjw7OTs8Ojk4Ojo5ODo9
-OTc2ODo6Nzo4Pjg5NzQ1NzU3OTU2Oj46ODU4PTc6OzQ1NDg2NDY6PDs6Ojs3Njc7
-ODk6ODg4Nzo4OTUzMTQ3NjQzNzc2Njg4NDQ0NDM0MjQ1NTQxNDAzMzA2OTQ0NDg3
-NzU0Mzc5NTc0NTg1MzIyMzQzNDQ3NjQ6Nzg1MzI0NDQ1NDg0NTY4NzY1NDUzNjU0
-NDY1MjY0Nzo1NDU0NTU5OTk4Nzo2NTg2NTMzNDY5NjQ0OTg1NjU4NzczMjQ0NDEy
-NDU0NzczMzM3NTQ1NTM2NDIyNTg5NzU2NTc1MjEzNTY1MzI3NDc0NTY0MzQ3MzM1
-NzM1NDQ0NjIwMDM2ODIyMjAyMzQ0NzY1MjM2NDAxNTEwMjk4NzU3NjY0NDUzMTM1
-NTQ0NTQ6NzQyNDI8ODMyMzQ0MjM0NTQ1NTU0MzY0NTQ3NDE0NDc5NjYzNTM2NzEz
-NTU4NjU3NTQzNzk2NTM0Njc0OTc0NDg4NTIzMjU1Mzc3MzYzNzQ0NTQ1NzU0NDY0
-NzQ4MjMzNTM2MzI0NTEyMzQzNTU2NTQ1NDM1Nzc1NjQwMS80Njc2Nzg1MzQ0MjY0
-MzQ1NDI1NDIzNjYyMjU2NTk4Njg2MzU1NTEzNTQ1NDQzMi80NTk1NTY0MTEvMjUy
-NjY0MjQ5NjY4PDQ1NDMyMjc2NTg1NDY1NDEvMTUxMzI1NDg1Nzk3NTc0MzQ0MzY1
-NDQ0NDIzNDU1MjU2Nzc1ODYzNDU1Njk2NTU2NTQ0MzQ2MjIyMzU0NzgzNDU0NTY0
-NDMyNDU5Ojc1Nzc4NjUyMjEwMDMzMzQ0NDI2MTEzNjY1NDQ1NzMwMjI0NTQ0NTY0
-NDQ0NzU2NjY4ODU1Njk4NTc2NjU0NTM3Nzc3ODMyNDM0NTY1NTAyMzg1NDYzMjAz
-Njc1NDQ0MjIyNDM2NTY3NTY2NzM2MzM1NjIyNDg1ODU0MzM0NjUzMjY0NDUyNDU2
-NzU2NDI1NzozNjc4NTQ1NDU5NTU4NTU2NTg3MzU0Nzo4NjU1NDM1NDc3NjY3NTI1
-MzY2NDQzLzQ1MjM1NDU2OTc4ODc3NTY3NjU2NjU0NDU3OTg4PTU0NDQ0NzUyNDQ7
-Ojw3Nzk1NTc3NjQ0NjU0Nzk4Nzk4Nzc2NTUyNTU1NTIyNTc1MTIyNTc5NTQ0NzU0
-NTQ2Njg5OTY5Nzc3ODs3NjY2NTc0NDU3ODc4OTY2NjU3NTY4Njc2ODo5OTk5PDo3
-OTk4NTQ4NjQ3NTQ0Njc5Nzo4ODo6Nzc5QEhRTWyLhHpicJdxQ0FBP0RXdGxdPj5C
-RENBPTo9Pj1CPj88Ozw7PEI9P0E9PUE9QEQ7QEA+O0FCQDo3PD0+QD47Ojk5PEFC
-QEJAQz0/PkBCRkJDRkZDPUA5PEBBQDg7Pjw+PT49OkBCRElGR0RFREE/REZBQz9p
-tsjS2t/i5efo6urrRk5IT05IS0hMSEVBRkZISkBERkFHRENCRT1BPz4+P0RCPj9D
-QDw8QDs8OTk1OTk5NTk6PD0+ODpBQzs8Pzo8Pjs9Pj08PTs8PkA7Ozg4OD9APD06
-Pjs8OTk3ODY6PD08Ozs5Njc5Ozs5Nzc1Njg2ODI1NjY4ODk1NTk5NTc2NTg2ODw8
-NzY3Mzc5ODY3NjU4NTo5ODg5NjU4PDk3Nzc5NjQ0NzY4MzQ3NTg2Ojw4Nzc4ODc2
-NTc0MDI1MjY1NjYxMjQyMzU2MTYyMzQ1NTg3NDY1NTM2NDEwNDY2NTcyMTMyNTg7
-NzUzMzMzMzY1MzM1NTI3OTY0NjYzNTQ0NDc5Njc0NDU3NTY4NDY0Njc5Njg3ODc4
-NjY1NjY2ODk3NjU3NzUzNzMzMjU1NDQ5NDQyMzI0MjYzNzg1NDQzODYyMzQ3NDE0
-NDc0NTg2NkE0MTAzMzQzNzc3MzQ2NDM1ODMyMjIzMjExMzU1MzE2ODU2NTQ0MTEy
-MTU1NDQyMTQzNTg3NzQzODYzNjQzMzM2NTU7NjU0MjIwNDQ0MzQ4MjAyNDQ1NDM3
-NzczMTM3NTU3NTkyNTQ7ODg0MjY1NDc4NzU0Njg5NDIzMjMyNDM1MTU0Njg0MDAz
-MzQ0MTA0ODc0Mzg4Nzc1NDYzODU0MzI1MjE0NDUyNDMyNTI0MzMyMTc0NzY4OjY2
-NzIzMzMzNDQ3NDUzMzQ0NzYzMjM1MzEzNTQ0MjIzMzMyMjM1MjQ0Njc1MTc1NDU2
-NDU2MjM1MjM1NzM3ODU7OTU0LzEzNTg1NDMyMjU2ODUzMTM0NDU3Nzg3Nzc2MjEy
-MTUyMzQ1NzQ2ODU1Nzc1NTQ2NzY2NDQyMzQ2MzQzNjg5NjU1NjE0NTQ2NDQ0NTc2
-NjY1NjM0Njg3NTM0NDM0MzQ0NDY1MzM2Njc2OTo3OTc1NjQ0ODgzMTIyNDIxLjI0
-ODk0NTExMzE0MjEyMTIzNTEwNTQ2NDQ1NDU0MjI0NjQ1Njg4NTQ1NDc3NDQ4Njg3
-Njc2NDU1NDU2NDI2NTQ1Nzg1MzQ1Nzc0NDc1NDQyNDIzNDM0Njo4NjY1MzQzMzEy
-NTg1ODg0NjM0MzY1NTY1Mjk5Njc4OTY4NjY1NTQ2NTUyNTc6OTQ1NjQ2NDMyNTMz
-NzQzNTQ3NTQ2NzU0NjM2NjY5NzU2OTc4OjY1NjU4NDY4NTg3Nzk2NjY1OTU2NDM0
-NDU0Njc1MTM0MjQ2NTY0NjU3NzMyNTY1NDk2NTQ3MzY2MzU2Nzg7PDk5Nzk3OTQ0
-NDI0NDQ0MzMzNTY3Ozw1NDY3NTU1NTY5NDMzNjc1NjU2OTY2OTU1NDY2NTY2Ojc2
-NTY3Nzo8NzQzNjY2ODg6NTk7OTc3ODk5Nzc4NTc4NTU3ODY1Njg4NjQ3NTg4ODdJ
-WG1ra4WQfoilomFAQT1BTHZ9WkZCQD9CPUJBPz08P0E9Pzs5Ozo4Pj06PTlCRj89
-PT9AQURBPkJCRT8+Pjw9Pzk7PDo4PD09Ozk/QkVDQUVCRUQ/QEJDQEBBREM+Q0A/
-QUBEPUBBQEBCP0FEQ0RBO0NKQUJFPWe1x9HZ3uLl5+jr6+tETE9MTU1GRUNGPkA/
-RUBDSUpFR0FHRkE/PkBAPT1CPj5APDw8Pj09PT0+Pj4+PTo5Ojs9PTk/Qzs7Q0M5
-Oz07QUFAOjo8Oz8+PT88PDs5OTw8Nzg4Ojw9OTQ0NTQ3OTY3Njc2Njg4Ozs5NjQ0
-NDY6Ozk3NTY2ODc2NDc3ODc0NjQ2OTs4Nzg3ODo5OTY0Njg5Ozc2Njg6Nzk3Ozc2
-Njk2NTc5NjU3NTY2Njo5ODg3Nzg3NTc2NTEyMTQ0NTY4ODM3MzUzMzQ3Njk1NDMz
-NjY4ODY0NjQzNTY5OEU5NjU2NDY4Ozc3ODY0NDQzNTgyNTc0NjY4NzQyNDc0NTY1
-MjQ1OjY1NDg1Nzk3NDM0NDc4NjY3NTY2NzMzNDU3Nzc2NDQyMzM2NDU2ODQzNTY5
-NjA0NTg3ODQ1NDQ1NDg3NzY4MjM3NDMyMzM2NDMyOkIyMTQzMjQyMzU1MTU1NDU1
-MjIxNTIzNTY+NDIyNjY1MzMzNjU0NTQ4NzI2NTQ0NDQ2NzY1NTU1NzM0Mzg0MzIz
-NDY0MzIzNzg4NDIzMzQ3NzMzNTY2NDQ1NjQ0MTM2OTg1NDMzMzY3NzU2NTM2NjQ4
-NTY2NUE2MzM0MzAyMzI1NjUyLjQ1NDU5MzM3NzI4NDQ1NzY2NjQ1Njg4PjM1NDMy
-MjE0MjQ2MjIxNTU2NDg1ODc5NjY2NzU3MzIzNDM0NDY0NDU0MjU0MjIyMTQyMzk9
-MTEyNjQyMDI2NTQ0NzU1NTc1MDY0NTEyNzYzMjExMzQ2NjY2OTQ0NDY3ODU0NTIw
-NDIyNDMyNTQzMzM0NTU3NjQ0NjY1NjY4NTI1NTMxMzQ0NTU2NDQ1ODU2NjY2NTcz
-NDQzNTc1Nzk1NjY2NTUxNTc2NDQ0NTc2NTM1NzM0NDQzNDM2NDU2NzUzNDMzMjQ0
-ODY6NzU3NTU0MzUzMzQ1NTM1NjU3NDY1NDAzNTQzNDY1MjI0LzMxLzAyMzM0MzQ0
-NjQ5NjQ0ODYzNTQ1MzM0NjUyMTAzNDU2MzQ2NTM1NDM5NTI0NjY3OTc2NDU3NTMy
-NDo3NTMzNDMzMzM1NjY0NzU0NjM2NDM6ODY3NzUyMTAvMTQ1NzQzMDI4NjQwMTMz
-MDAzMzI2OTY0NTI1MjI2MzYzNzg1NDAyMzMyNDYzMzU3ODg3NjMyNDg2Njs4OTg1
-NTM1ODg2NTc3NTc4OTo4ODU3NTY4Nzg1NDM2NTY6OjQ0NDYzNjY3NzY1Njo2NDc1
-NDIzNzU1NjY1NDU1NTg4NjU1Nzc2Njc3NzczMzMzNTc1NjU5NzU1NTU1NTU1NjM0
-ODo2Njc1NDY1ODk6Ozg3NTc4NzU0NTY4NTc0NjU0NTc0NTg5Ojo5PDk3ODk4Nzg4
-Nzk2Nzc2NDY6OTU0NTc5OTo5Pjs4PFJpk3F3gn2CnqiSS0FCRE19iIRRQ0Q/QT49
-PTo/Pzs/PDs7ODo6OkQ6PUA8Pz8/PDs9Pj0+REU+Oz1CQ0U+Pj49PT88PTw7O0FD
-PTxCQUE9RklDP0E9O0RBPkFBQUNAPUFFQUVFRkBIQj0+Qz8/PDs8QEFDQTw+Z7XJ
-0tne4uTm6Onr60ZGR0dMS0JCREBDQT9DQUJBSEFETEpEPz09Ojw9P0dEPj05PDw7
-PTg5Oz8+Qj49Qz4+PD9BQjw+Pj89OUA+PDk8QD48Pzs8Oz48QDw5OTlANjY6PD07
-Ozo5OTg6OTk4Njg4Njs4ODs7QTo4Njg4OzxBPD82OTw3Njc4Njw+PDozNjo8Ojk3
-Ojc1OTc0NDY4ODw5ODU1NDQ3OjU2OTc6OTc2OTI0Njg3NTQ0Nzo5OTg3NTg2OTgy
-NDQ1MzEzMzc1Nzc2ODY0NDQ1Njc1MzQ1MjM1Njk0NDczMzY1MjI2NjI3OTo4Njg3
-NjQzNjQ2OTo0NzY1MjE0NDU1ODg4NjU0NTQ0Nzk2NDU2NTUyNDQ3NDU0NTQ2NTM3
-Ojg1NTg2NTY0NTU3NDM1PDM2Njc2Njc0NTQ2OTU0Njc0NDU1MzU0NDQ0NTQ1MTIx
-MDEzNDQ0Q0E4MTMxMzE2NTQ4NDQ0MzM0NDQxMzE2Njo0NjM2NjY3Ozc5NjczMTQ0
-NDQxMjMzOTc0NTQ1NjM1NTYzNDQyMzQxMzU1MzU0NTkzMzUyNTg2MjQ2NC80MTQz
-NDMyMjMzNjc0NzMzNjY2NzU0ODY1NTM1NTQyMTU2NzY1NDE3NTQ0MTIzNDU2OzY2
-NjY3Njg2NjU0NDI1Njg3P0JEOjQ0NDM0NTY2NDMyMjYyMjU0NDU4NjQ1NzY3Ozc0
-MjUzNjY3NDw1MzM2MzI2NTU0MzEzMzIwMTIzNDU0NDE0NTUxMzQ1NjQ0NTU0NDY1
-NDQ0LzAzMjQ0MzQzMDEzMzUzLzI0MjM1NjY3NTc0MzQ0NTc2MzQ2MzU1NTU4ODY3
-NDU0NzQ2NTY5ODg2NzY2NzY1NjU1NDU0NTQ0NDMzMzQzNjQyMTI3NjQ0MjUzMzEz
-MzM4NTIzNTYzNzY2Njc3NTM0MzY0MzY3NzY1NTMyNDY3NTAxMTQ1NjU3NjQvNDUw
-MjMyNDQxNjUzNDQ1NTE1Njc4Ojc1NTQ1NjU0NjYyNDYzMjQ0NDY3NjY0MzQyNTo3
-NDY0NzYzNDc1MjEzMzc5NTU0NTQ0NTU0MjU2OTY0NDMzNTM0NjYyMDQ1Nzc1NTY3
-Njc1NTQ1MjEyMjM0Nzg+ODU1NTYzNDM3NjUzNDY2ODg3NjQyNTQzNzU1Njc4ODY5
-NTUzMzU0Nzc6OTY0MTE2NjY1NjYzNTk3OTc5PTg4OTc1NDc0NDY1NDY5ODo1NDU1
-MjQ0ODY0NDY1Nzg2MzI2Nzk2NTU1Nzg0NDc3ODc2Nzc4MzU3NTg1ODYzMjM0NjQ2
-NDY1MzU2MzI2NDQzNDY1NDM2NDQ2OTY3OTc5Ojc4OTc2OTo5NzU1NzY6NDc3NTU1
-MTQ0NDY2ODc4NzY6PDk3Nzc4ODc5Nzg1Nzg3NDY1Mzc4ODY2Nzg6Nzg8OTk9WX2w
-laSemZ+vnV5BPzxFfZ+HYUE+Pj9BQD85NTo7Ojk9Pj8+Ojg6Oz5AQD46Ozs8PT9C
-Ozw+OTs4OT1BQD1BPj9AQj85OTpDQUQ+QkBBPTw9QEFDQT0/Q0NGQz1BQTs/QEBC
-QkNBQj8/PT5CRUM9OjxBQkRJQkJrt8jS2d7i5efp6evrRURITUtKQj5ESkdJR0RJ
-SEdFRURGR0ZGQkJGPjw5PD89PT48Ozw8Pzw8QkE+QDw7Pj8+PTw9Ozo6Ojk8Oj08
-Pjw9QUE+P0I/PTw5Ozs6Ojs5Ozc8OTw7OTw7OTs5Pz07Njg2PTo4Oz04ODo5OTc2
-Ojs6Ojk7OTo1Njk6Ojk3PDw3OTk4ODg2NzU2OTs6PDU0NzU4NTY0NDg3Ojk1NzY4
-NTY1NDc3ODs3Ozo4ODc2MzM1NTczNjY7OTc1ODU0NjQ3NjY3NjQ0NzU0MjIzNTQy
-NDQ2NzMzODU2NDQzMjMyNDQ3Nzc2NjE1NjQ2NzQ2NDU2NTIxNTU2NDg4OzY0Njc3
-NDg2NTU0NTM0Njc3NTU5NjQ0NDQ1NTQ2NTU0NDY1Njc6NDU2Njc0NjM1ODc3ODU1
-NTMzOTk0NDM1NjUyMTQ2NTc1ODU0MzMzNTMvMjAyNTUzMjIzNzY1Njg4NjMyNDIz
-NjIyNjs0NjU0NjQ0NjU5NTU1NDc1NTM0NDIzNDQ3NDQ2NjU1NjI0NDY0MTIzMzUz
-NjUzNDM3NDY2MjU1NjUzMzc7NzQ4MzM2MTMyNjUzNTg1Njg0NDUzMjM3NDExNDU1
-MjEyNTc5NjQzNDQ0NjY2NzU1NTY1NzY0NDY2NTczNDM0OTg2NDM2NjQzNTQ2NDU3
-NTU1NjczMjI0NjY3Nzo3NDMzNDU0MzM1MzQ0NTczMTE0NTQwMDE0MjExMjY1MjQz
-MjMxNjExMzEyMzI0NDU3NjQ0MzQ4ODc2NDU4NjU1Mzg3NDM0MjM1NTQ2NDQzNDI3
-NTM2NjQ2NjQzNjU1NjUzMTUzNzQ1NTYzMjI0NjU0MjA1NTQyNjYzNDY0NDc0MzQy
-MjU2NzM0NTQyMjM0MTAwNDQ1NDQzNzQzNjU0MzQ2OjUzODU1MzY3NTUzMzY2NTY3
-NjQ0NDQzMzYzMzMzNTc5Ozs2Njg6NTY2MzI3NzUzNDMyNTY1NjU1NDQ1NjU2OTY0
-NTczMzQ1NDQ1NTQyNDQ1MjMyMzg1MzM0Nzg3NDM0ODQ0NDIyNDQ0NzgzMzQ0MjQ3
-MzY6ODUxMzMyNTM3Njc2MjQ0NDc1MzIzNTUzNDY0NDQwMTM0ODg6Njk2NTA1NjY1
-NTc1MjM0NDQ2NDQ2NjQ2NjU1Njc1MzQzMDQ1MzU2NzY3Njc+NzY1Nzk3NDMzNjg5
-OTc3NzU3MTE0NTUzNTg2NjY1MzU2NTQ2NTQ2NzU0ODY3PTc1MzQ3NjU3Njc5ODQ4
-NzU1Nzg3NjQ3NTg5OTM2NjUzNjM0NjY4OTUyMzM1MzMzNDQ3NjU3Njg4ODg6Nzk4
-ODY1Nzg4OTk1Mzc4NjY3ODU4NzY2NTM2NTU1ODc0NjQ3Nzg3OTs4Njg2Nzc1NDk6
-OjY2NTg3Ozk5ODU5OTk6ODc4PD1mk7qlpJeWo7WXUkA+RGR/iF5HQDw7PT5DQzxC
-QDs5ODw8ODs3ODU0OkBAPDo9PT5CPT09Ojw+OzY5Ozw/Qz9AQD89QDs8OTxAPj1C
-RD5CPzw+QUM9PURCRT9DQkI9PD4/PT89PUBFR0lCQ0NER0dEQUZHQ0VBQm+3yNHZ
-3eLk5unp6upJUVBLRU1JQ0RGS0RHREhEPzw+R0JCR0lARD06OTg9QD5DQkA+PUBE
-QkJCRERCQT4+Ojk7PUA+Ojo9Pj09Pjw6Ojs9PT08Ozw6PEA/QDs7PD04PDs8Ojs6
-PDk3NzY2Nzc3Ozk/Ojs8RTw2NTg6ODs3NTk3OTo6NTU6NjQ2ODc5Ojk2NjQ2NTM2
-OTk5OTk5Nzw4NjY0ODg3ODc4ODg2ODY0Njg6Nzo6Ozg9OTQ1NDU2ODY0Njc0ODQ5
-MzMxNTY2MzQ0NDY1NTg4NzQ0NTcyNjc1Njo1NTQ1ODc4NzE5NzY2Ojc4NDMzNDEz
-Mzg3NDU1NDY0NTQ4PDc3OTs3Njg3Njo1NDM1ODU3NDM0OTc1NzQ2NDUzMzIzNTo3
-NjUzMzMzMzc2Njc5NjI0Njc5ODQ1NTk3NjQzNDM3NjQzMzY2MjQ0NTU2Nzk4NDQ1
-MzczMzI2MjIyMjMzNDQyNTc1OTYyMjAxMTU3NDQ0NjY0MzQ0NDY3NjQ1NTc4NTM2
-Mzc0NzY2NjgzMzM1NTQ2MjAzNDM3Nzo0MTQ1NjM8OTM0NjcxNDI1NTk+OzMuMDYz
-MzM0NTY1NTk0NzY1MjMzNDI0NDQyMjE3NDEzNDE2MjIxMjMzNzQ0NTU3NTU0NTc0
-NDMwMjY1MjI2NTY7Nzc4NzIwNTc3NDY3NDY1NDMyMjQyNzcyMjI2Nzg2NjIwMDQ2
-MzM0MzM0NDU1MjI0MjQ0MzAxMjY5Njg2NDMxNDMyMjIxMTM1NjU0NDY1MzY0MzEx
-MzU0NDMwMTM0Njg1NDYyMjUxMTIyMzQzNjQzODk4NTY2NDQzODQ3NTY0MjU3MjY0
-MjAyNTQvMC01NTUzMzQ3NjQzNTk2NDQ0NTY0OTQ2MzI0NDU1NjMyNDY0MzU1NTMy
-NTU1NjU1NjQ1MjU2NDc1NjI0NDQ1NjYzMzMyNDQ1MjU2NDc3NDYzOjk5OTY1MzQ1
-NTI0NDIzNjMxMzY4ODc4NzQ1MzI0MjA0MzQ1Nzc0MTQzMzEvMTQ2NTc3NjQ0MzQ1
-NTUwMjAyNDg3NTQyNDU3Ojg1NDU3ODk5NDM1NTc0MzQ0NDY4Ojg2NTY3NzU1MjEy
-NDE1NTU1MjQ1MzM0NDY0NTM0MzQ1MjQ4NTU1MTE2NDU1NzQ4NjU2NjU1MzU1NzY4
-NTU0NTY0Njc3NjUzMzM2NzM0Nzk4PDo2ODg3Nzc4NzQ2Mzc4NTY3NTE2ODc2Nzg2
-NDU3ODo5MzQ0NjU2MjM0NTY2NDY7Ozg1NjQ2NjU1NDM4ODg2MzM1MjM1NTQ3NTQ4
-NzU0NjQzNDQ1NjM0NDQ1NTU3OTk2NTQ0ODY2ODU3Njc1NTY4OTU2Nzc7OTk1MzU2
-NDY1NTQ2NTg5Ozg3NzY4OTc2MjQ0NTg5OD04Nzc5OTU5Ojw7OTk6Ozw6TnJ+mpZq
-VXOcrYlKQkRmgnZYR0A/PT0+Ozk+QkA/PT07Ojk6Ozg5NzU2Ojs+Pz8/PDs3Nzs9
-Ozw7PD07PD0+PjxAPjk8Qz48Qz05PD9GR0c9OT88P0dERT89PT8+P0JAOzxAPT8/
-Qj9DRkRFP0BAQ0hHSkpGRj9BZLbJ0dne4uXm6enq60VKTU5ISUpHQ0xGSEZFRU5P
-PTpCQDtDSkdCPD48PEBER0M/QkA/QD1EQ0Q/Pz4+Ozg7Ojs7QEI7PD9BQzo5OT07
-OTo5PD1CPT1AOz06OTs7QEI7PTtAOD07PD04Njg1Nzc4Nzg3NDg7NzUyNjU5OTo7
-QDw7NTY6PTo3Ojg3ODk3Nzg4NTMyNjo4Ojw+Ozc6NjU1Njc3NjYzNjg6OTQ1OTw4
-NDk3NTc3ODg5NjY1ODw3NjgzNjM0MzM2NjY2NjU2ODYxMjUzNjYyNTg2NjYxMzM0
-NDQ0NTM1NjU3NzU3Nzs3NjY4NTU1NDc1NTQvMTI1NTQ1NDQ1OTo4OTk1NzU3OTU3
-NTQ3NTIyMjM1NTUzNTU4Njc2NjY3Njo5NjQ0NjY0NTc2NTg1ODUxMzUzNDIyNzU3
-MzU1NDY2NjYyMzg5NTY3Nzc1Nzg2NzU4NjMzNTIxMTExMDI0NTUyMzU5NzQxMTEz
-MzU1NDQzNjg0Njc3NTI0MzAwMzQ1Nzc0NDEzNTc1NDMzNDMzMzMzNTc3NTc4NTEy
-MTEyMz81NTYzMjQ0NjQ1NTY0MzMzMjEzNTQyMjMzNDQzMzQ1NTQyMTEyNTQyMjIz
-NDMyNjQ2NDIxMzMzNDc2OTI1NDMzNDI0MjUyNDY2NDY1MjQ0NzQyNTY0NDQ0MzU1
-NDYyMjEzMzIzMzM4NTMzNDQ4NDo0MTIzMTM0MjQzNDQ0MzQ0NDUzNDM1NjQ1MzU0
-MzUzMTIzMjUzMzY1NTc2NTU2MzU2NTU0NjU2MDIyLzM1MTM0MjI0MDIwOTo6OTYy
-MzM0NTU0Nzc0LzM0MzMxNDc3MzQ1NTQyNDIyNjU2NzM2NjY3NzUyMTE1NjU2NTM0
-NjYzNDU1MzY2NDU1MjQ2NzQ0NDQ1NC8yNjU2MjQzMzQ0NTU4NDM2ODg1NDU0NzU0
-NTU0MzE0NTQ2MzU2NDIyNjg4NS4wMTQyNzExMjY0NzUzMjIyNTk1NDEyNTUyMTU0
-NTU5NDY4MjAxNTU1NDQ1MzU4OTc3NjQ3NDYzNDMzMzU4NzQ0MjU1NDU2MjI0MzA0
-ODQ2OTo1NDQ2MTMyMTY2NTQ1NTUzNDQxMDQzMzU4NzM0MzI3MzU2NjQ6NDYyMzU3
-PDo3Njc0NTU2NjQ0NTI0NDYyMjQ2NDU5Ojc3NTg4ODY2Njc1ODU1Njc3ODg4ODo6
-ODg4NTY1NTY2NjU3ODc1MzY3Nzc1OjM1Njc4OTo3MjU1NjU2NDY3NjY3NjY2NTIx
-NDY0MjMzMjU2NDY3NDMzMjQ1MjMyMzMyNjY1Njk0NDY3OTU2Njc4Njo5NTg3NTU0
-NTY1NDQ0MjY4OTc5ODc1Njc4Nzc4NDU0NjQyNTg4OTY1NzU4ODg1ODg1Njc5Ozg4
-NzU5OUNGODg6OTo6PDw9PEFid3eCgFFFY5ihgUlJaYNwWUNBPkFBQkI7Pz8+Ozs/
-Pjs7Ozg4Ojo9OTw8Qz49PUM/PDg3Ojw6Pz1BQD09QDw/QUE9Oz0/PkZCPjw9QEJE
-SEQ7Pj0+RkM/QT49QERBQT5CQDs9Q0FER0RDQkNAQUlKRkVJREZFPDx1tsjS2t7i
-5Ofo6errRkxLRUVISklHQUREQENHSEVEQUA9P0RAPUI9QT9APkRCQT1BQ0A9QUJB
-Pj89Q0A9PT8+PT08O0JDPjw8PDw9Pz49PTo8QTs5Nzg8PkE7PTlAODk8Ozw+O0E+
-OD0/QDk3ODc5OTc4ODY4Njc4OzY5ODg5OjkzNzc1Njg1Mzc3NjY3Nj47MzQ3PTo3
-Ojo6OTg4NTYzNDk4NzM0OTU2Nzg3OjY2NjU5OTk4NjU1NDk2NDg5NjY0NjQ2NDs2
-NTY6ODk4NTQ1NTU0NTU3NjM1MzY2NTYxNDM1NDc2MjY2NjQ2NTM2MzI1NDY2MjQz
-MzYxNDY4Ojg1MjE1Njc1ODU0Mzc0MjI2NTQ3NTg3NzU3NzUzNDQ0NjY1Njc2Nzk4
-ODU3NzY1NjIxMzY4OTgyNDM1OTQyMzQ1NTMwNjkzMzMzNTU4NjU2NDY0NDQzODc3
-MjIyLzQwMjY0MzY0NjQ0NDIzNDQ4MzMzNjY3NDUzMzc3MTIxNTIyLzI3MzY2OTY6
-Ozg3Njc0MzY0NjMzNDU3NDU1NTc5NTUzMjIxMTM1NjYwMjQzMjQzNTY3MzY2MzMy
-MzQ1NzY4NTc0MzQ0NDIzNTM2NDAzMTM0NDQxMjQ1NDY1NDM1NjQ0MjAwMjEzMzQ0
-MzMzNDM1NzY1MzMyMzM3NzY2NjY1NzM1MzEzMjIzNTM2Nzc0MTQyMjM0MzQ0NDcz
-NjMzMjM2NjQwMzU0NjY3NjU1NTQyMDIyNDMzMzM2NTY2NTY0NTQ2OTQ1MzQ1ODc0
-NzU5NS4wMzQzNjw1NDIyNDZINDk5NjYyMzIzNDc1NTQ0MjIzMzQ2NjY5NDI0NTQy
-NDQ0NDo3MzU2OTc1NDU0MjMzNjgyMjQ2NDU0NDU0NDg2NTIzNDY2NDIzNDUzMzQ2
-ODU5NzY1NDY4NTQ2NTQzNjY2NTQzMjIxNDU2NjQ0Nzc1MzMyMzQ3NjQzMjk1Mzc1
-MzE0NjczNDI0NDQ3NjQzMzI1MzQ1NTU3NjUzNTY3NjMyNTMzNTIxNDY2NjU3NjUy
-MzMxMTQ0NDY1MzM0NDI1NTc2NTMzNzQ1NzY3NDQ3NjU2MzU4NTQ1MzIzMzQzMzYz
-NDY2NDU2NjMzMzU0NTM0MzM4NDg1ODU2NzY0MzUxNDIwNDU1NTMxNTc0MzI0NDk1
-NjY1NDMzNjk4ODg4NjQ0MzM2NjQ2OTc1Nzc5NTY1NTc2NTc0NDYzNTI1NzUzMzY1
-NDc2NTUyMzY1NDg5NzUzNTU2MzExNjY0MzI0NDExMjUyMzU2NDUxMzY2NDIxMDM0
-MzU2NDU1NDY3NTY3OTc3NzU1NTU3NzY1NDc3NTM1NjQ0NjY4ODY1NjQ3Ozc3OTc1
-Nzg5NTc2Njg5OTY5ODc2ODY4Nzo6NDU4Ozw7P0A6ODk7Ojk6OzpAVHByf21SPzpW
-mJ59SlJ2cVdEQD07PUNKREE+QD5APkI8Pj85OTY6QT9APTg5Ozw6P0REQD07QD08
-OTg7QENCRUFAQUNDPT1APj9CQT49PkU+PD49Pz89RUVGRDxARUVCPj06OEFAQ0FC
-QUNERkVHTEhGP0dHREJBPHu4ydLY3uHk5+jp6+tLTU1LTU5KTEJARkpHQ0RDQkc/
-PT07QT48QTw8Q0JAQDw8Pz5BPTtAQEE/Pz89Pz49QUI+Pjs+PTtDQT07PUA7O0E+
-QD07PT4+Ojs7PTs9PDo5OTs6Ozs3Ozs7Ojg3Ojk5PDo6Ozs4OjY3Ojg7PDk3NzY4
-ODw4Njc2Mzc5NzY5ODg1MjU2Nzo6OzU3Nzc5Nzg4ODY0MTQ2NTM1Njc4Nzg1Nzg2
-NTg4OTs4OTg5Nzk4NTY4NjU1Nzc3NTY5ODc4NTk3OTc2NztCMzc4MjIzNTg4NzM1
-NzU0NDQ1Nzc6NTU1NjMyODU9ODQ2NzUzNTQzMTY5ODo3NjIzMzU1NjU2NDM0NDM1
-OTo5NjY4ODc5Nzg1ODY4NTI2NTU2NzU2NTUyNDIxMjUzNjM0ODU2ODU4ODY0NDQz
-Nzo2NjU7NTQ0NDM3NTQ0NTQzMzIwNTU1NjU0NTY+NzYwMDEzMzQ0NDUxMjAzMDE2
-NDU0MjU2NDE0NTQyMjM1NjI0NTc0NjU2NDQ0NjU2MzQzMjQ0NzUyNDI1NDU3NjU0
-MzQyMjcxNTMyMTUzNDMzMzc1NzY2NDQ0NjU1NzU3NTY4NDIzNjMzNzo0NTU2NDU2
-NjQ0NTc4ODU1NDUyMzY4NzQzMTY2Mzc1NTIzNTY2NjU0MTM1NDY2NjQ2NDQ0NDI1
-NjQ0NDMxMjQzNDQ1Mzc2NjMxMjMzNDMxMzY1NzYzMTMzNTQ1MzQzMjEyMjUxMTEz
-NjEzOTg3NzQ1MzQ0NjY0NDAwLzA2NDM0MjQwMzU0MjQ0NTQ0NTY0NjY2ODQ1NDQ1
-NTU1NDM0MjU5NDIyMTIyMTQ3NjMzNTg3MzM5NTY2NTk4NjUzMzQ0NTQ0NDo5NjM0
-MzYzNjY2NjM0MjI3NDY3MjQzODg3MjM1NTMzNDQ2NTk1Nzg2NjUzNDU0MzU0MjU2
-NjY7OzQ1NDMzMTQ2NTI0NDU2NDM0NTIyNzg0NTQ1Nzs3ODY1MTU2ODY2NzU0OTg0
-NjUyMjU0MzE0NTQ1NDc3MzIxMzM1NTIwLywyNTg2NjY1Mzc2NjU1NTU3MjQ0NjMz
-NjZAOTgzMjMzNjg0NTMyMTI0NDc1NDMzNDMzNjUxMjM0NjYzNDM2OzU1OjU1NjMz
-NjUyMzM0NDUyNzc0NDM0NTk1NDM1MzYzNTU4Njk2NTk2NTQ1NDM2NTU1Nzc1NzU2
-NjQ2NjMyMzU0MzE0MzQ0MzM0NzU3NjY1NTc4NTw4NTY3ODo2NTQ2NTM0MzQzMzMz
-MzA0MzEyMzM1NTY2NDU0NTM2NTIxMTM0ODg1NjY5OTg3ODk1NDU2Nzg3NzM2Njg1
-NTU1NzY1Nzc1NTY0NDYyMzM4ODQ1NTYzNzg3NjU2NjU3Njk7OTo4Ozc5ODc3Njg5
-PTs6Njc5Njc7Pjs6PFBqZm1qTT5ARFSFnI1rbFhOQz4/QEA5O0JEQ0FDRUJDQj0+
-QEE/QD8+PTg7Pj43Oz07PT9DQEJAOzs5PT09QT89PD5BP0E+O0FFPj9CQDlAP0A8
-Ojo+Qj9EQj89Q0NGRUJAPDo7Ojs/Ozo6Qj1BRUhKR0pPRUZFRkRAcLfJ0tre4uTn
-6ejr6k5KTE1KSUlGTUBDQkJGRUNDQDw8QD4/PkRAPUZEQkFCPj45PTk7Pzk8PUBB
-Qjo6ODw7OjxBQj49PTk5QUA6Ozs6Oz06Ojw8Ozw8OjY7PT4+PDk8Oz09PDk7PDo8
-ODc2Ozs7Ojw7Ozc1NTc2ODs5Nzg3Njc3Njg8Ozo8NTM2MjE2NjQ4Nzk4ODc3NzU4
-Nzg6OTc1Nzc0NjY4ODY3NzY2Njg3Njc1Njc2NTg4OTo6Nzo2OTc4ODU0NDMyNTU5
-ODk3OTk5NjU1Nj03Nzc1MjA0NjQyNzs4NzQ1NTU1Mzk2NTMyNDQ3NTU2NjY2NDY1
-MjU0NjU4Njs7NjY1MzUyMzQ0OjU3MzM3NzY3NjU0NTk3OTk2MTAxNTg1NDU1NDQ0
-MzU3NDE4Nzo3OTg2Njk4OjQ3NTU2Njc2Njc0Nzk0MjM4NjUzMzMxMTAxNzUzMzU0
-NTs4NDQzNzY0NDIzNTM0ODQzNDEyMzIxMTMwMDI1NzQ0NTQwMTMzNTY0MTUyNTQz
-MjM0Njc0MzUyMTU0MTI2NTIwMjc6ODY0NTUyMzU4NzMzNTQwMTEzNjM0NzMyMjUz
-NToyNTI1Njc1OTc4MzQ3ODU1NjY2NDU2NzY2NDU3NzY0NjM0NDUzNTc3NDU2NTU2
-MDA2MzQ0NDMyMzQ0Nzc0NDMyMzIzNjQ3NjQ5MzMzNTQzMjY0NDIxNDQ2Njc1NDMy
-NjY2MzU1NDE1NjIzMTEyMTU2MjEwMjQ0NjY2NjI0NjQ0NTY2NjIwMjUyMjQyNDM0
-NTQ0NDQ5Nzk1MzIzNDMzOTUyMjIyMzU2NjU0MzI0NjU1MTIyMjU1NzY1NTQxNDc1
-NDU2Nzg1NDQ7NTY4NjgzMjM1NzMyMjUyNDUzNjU2MzQ1NTU2NjU3NjY2Nzc3NTY3
-NDY3NDMzNzU0MzU1NTQ3NjY4Nzk4NzU1OTU1NjYzMzU1NTQ1NDU0NjU1NTc1MjQ0
-MDY1NzQ2NTY0MjI5Nzc1NDY0MzUyNTY1NDk4NTU2NzMzMzI3NjM1NDMyNDQ2NC0u
-NTg2NjMxMzc3NTU2NTc2NjU2MzY1OTY3ODc2NDU1NDUxNDUzMjQ1NC80MzQxNDUz
-NTUxMjU0NDU2NjYzMTI1NTQ3OjU0NTQ2MjY2Njc3NDY2MzQ2NTk2Njg4NjUzNTU0
-NTQ5OTQ4Njc1NjUzMDIzNDQ2NjU3OTY4ODY0NDI2PDgzNDU2NjY2Njc3ODU1NTc0
-NjU4ODY1NDg1Mjc2NjQ0NDE0NTQ0NTU0NjI1MzI0NjQyMzMzNTIxNTMxLzM0NDg4
-NTUyNjc1Njg2Njk3Nzc2NjY2NDs3NjY1NTE0OTY3NTMzMzY2NjU0MjIzNTY2NTc3
-ODc2Njc4Njg8Nzg5Njs3ODg5NDM3OTg5PDo5OT05OTlBOzhJaWFYUEZCPTw/aomj
-k4t4TUZFQT8+P0NBPUBEQEBAQDw5Pj1AQkJAOTM4OTk6PT86ODxBQUJBPDw7PERA
-QUFBPjo8Pj9DP0E8Qj86O0BAPDs/QkdGQUFBQkNBOzo+PT88PTs4PkRGPz49Qjs8
-Q0NERkRKS0hBSlJOQkdntMnS2t7i5ebo6enrS0dJSEdFQ0NDQTk+RUdFRERGQD9F
-TERBQkdHQUFCQURFQkRBQD09Ojg/QEVBPj05Oz09OTk8PUI8Ojk5PEE+Pjo8PDo9
-PTs6PDk3OTs4Ozw7QTg5PT48OTk9PTw5ODk3PDw4OTk8Ozg4NTs4Njk4Nzo6Nzg6
-ODQ5ODo4PDY5NzU3OjY5NDY3NzY1NTc2OT5ANjg3ODczNTU3NjU2NTc2OTg3Ojg2
-NzQ2ODs2NjY1Ojc5Nzc6ODg3NDcyNjc4Ozw5ODg6PDc1NTU0NTU1NjY1NTUzOTMz
-NjQ3NzY3NjU3Nzk2OjQ0Nzc2NzUzNjc3NDM3NjU3Nzg0Nzk1MjM0OjQ3NTk4Njc4
-NzQ2NDU4ODk2NjUzMjUzMzI1Njc3ODY2OTc2NzQ3NTIzNTY3NTY5NjUzNDU1MDEz
-Njk2NTU4NDM1NDM1NDE0MjI0MzIyMzUzNTg4NTU5NzY1NDIyMjQzNDExMzIwMjE1
-MzAzNzMzNTMyNDM2NDc2NDIyMTM0MzMwMjAyMzc4MzA2ODI1NjY2MzU0Njc4NjU2
-NjUzNTVVNjc0ODQzNTI0MzU4NDQzMjI2NTMwMjM1NDY1MzM2NjY0ODQ1NDU2MzM0
-ODc1Nzc3NTU0NDUxMTE1NzMzNTUzMzQ0Mzg3NTEwMzM1NTY3NjY1My8xMzUzMjU3
-NjUzNDM1NTcyNTU0NjQzMzc0NjY0MTI0NTQzMTMyNDMxMTQ0MTM1NDUyMjM0NTQz
-NTIwNDI2MjQ0NTc2NzQyMTI0MjM1NTczNDQ1MzIzNDEyNTY2NTMzMzQ1MjQzMzQ0
-NDU0MTI1NDQ2MzQzMTM1NjM0MjY0NDU4NTY2NjQ2OjYzNjY4NzY2NDMyNTU2MjMx
-NDM1MjIzNzczMzQ0MjEzNDk3Mzg0MzU2NDM4NjYzNDU3Nzc1Njg3OTU2NTU2NDM1
-MzEzNDU0MzIyMzU0Njo3Nzc3NTY4NTQ4MzI1NzQyNzU4NDY5NjUxLjM1NTY0Njc1
-NTY6ODUzLzA0NjMxMjIyNDQ0MTUxMjQzODU0MzQzNTQ0MzQ0OTU0MzU1NDM2NDI0
-NTQyOTYzMTU3MjQ1NjY0NjU1NjY0NTU1NDU0NjQ0NDQyMjY1MDY1MzQ1NTQzNDQ4
-ODY2NTIzMjQ1NTQ2NjU0NTc1NzY2Nzc0MzM2NjQ1NDI3NTI2NzU1NDs4ODc9ODQ3
-NTY2NjY0NDQ1NTI0NTAxNTQ4ODc4NjMzNzcyNDQ0NTczNDY2NjMyNTQ0MzQ1NzU3
-NTMzNzU6NTU1MzEyNDU0MDAzNzU2NDU2NDY0NDU1NzU1ODc2NzYzNzMzMzY1NjQz
-OTY2OTc4Ojg4Ojo6NTU3NTI0NjU3NzU3Nzg8OjY1ODY7ODc1Ozo3Nzg5Ojk5PDk7
-Ojs4NzY/P0JAQGV5Xz8/PDs4O0eEoZybjmdGQz5BQUA8PT4+OT5APj08QkI6OTc3
-PDs4OTg5Nzo+Pj08PTpAQD48Pj1APkI9PURCQkI/Oz89PEhEPURER0E8PD5CRUZD
-PT5APkA6PTw8PD09Pz8/RERERERDQz9FRUBFQkFHRUFBSEhGQ2O1yNLa3uHk5ujp
-6utWVE1LREJER0lCQUFJSEdHR0JDREQ8PTw9PT5EQTxCRkZEQUJAQj09P0BAP0JC
-Qj4+QUA9OzY9Pj0/PT09PkE6PDtFOzw/P0M6OTk3OT5AQkE7Pjs9Pzs6OTc5Ojk7
-Ozg5ODs4Nzk4Ojo4NzQ0ODg7OT07PDc6OzY6Pjs5Ozk5Qj05NjY2NjQ0ODk5Nzo1
-ODo4Njc6Nzg2ODY0NzQ1Njc3OTk7ODo3NTQ4OjY2Ozg2NDc1Mzc2Nzk4Njg2NTQ1
-Nzs3Njo5NTQ4NjM0NzY1NDIxMTEzNTU2NDc3NDk1NTo7Ojk6NjQ1Njc4Ojk1Mzg1
-Njc6Ozc6NjQ4OTQzNDIxOTY5Njg6Njg3NjM2ODc5NzU3NTY4NTU2NzMzNDY1MzU1
-NDc1NTQ0NTU3NTM1NDQ3ODg4NzQ0MTExNDM0NDY0NDYzMjI1NTQ1NTY5NDEwNDEz
-NjY1MzU1NTQ0NDUxLzI0MzE1MTIzNDQyMzMxMjI0NDIzMjU3ODU0NDY2OTg1NDY1
-NDY6NTM0NzQyMzU2MjI0NjMyMzY1NjM0NDc2Nzk2Njc2ODYzMjM2NjQ2NjYzNDM0
-NTc4MzMzMDMyMzQ2Njg1NDU0NDYzNDQzMzU1NjY1NDUyMDExMTY3NTQzNTMyOTg0
-MTI0MzU1MzM0NDU2OTMyMjIwLzAzMTIzMTI3ODU4NTY0MzIzNDQ1MDA0NTc2NDIy
-NDg1NDMzMTQyNjM1NDY4Nzg2OTQ1NDUzNDUzMzExMjMzNDc5NzYzNDY0NjQ2NDM1
-MTM0MTQ0MjMzNDUzMjM0MTY3NDMzMzQzNDIzMDI1MjQzMTIyNTQ1NjU3NTU1Njc4
-ODU4NjQ0OTk1NzU2NzQ1NjQ0ODYwMzM0NDQ0MzM3MzIyMjc4NjQyMTI1ODQyMzIz
-MzU1MzY1OjM1NTU0MzY2NDI0NjQ1MzE3OTQ0MjY1NjIyMjY+Nzg2NjAzMTM1NjM5
-OjQ6NzU1ODY0NDc4NTo5ODQyNjg2NjMyMzQ2Nzk1NDI1MTMzNDc0NDU0MTM4Ojs1
-NjM1MjIyMTUzMTAyMzQxMTI3MzAyNTEyNzQzODU5NjM1NTQ0NTU1MTU3NjY1NTY0
-NTU0NTU1MjU2MzU7NDY2ODY3Njk5OjszNDg3Njk4MzE0MzQ0NTQyNDI0OTc4Njc2
-NTQ1OTY1Njg7PDY2NDc4OTY4Nzo4NDo0NDY0MzY3MTYyMjMzNjg1NDQ1Njc3NTUz
-LzIxNTU0NjM1NTg1NjQ4NDE0MjM2Nzc2ODY3ODY1NTU2MzIyNTY3NDU2NzczNjU1
-NzUzNTU0NTY1NTU4NzY0MzY2NTU2MDE1NDc2NjQ2ODg9PTg2ODk2NzY3Njg5NzQ1
-NDQ0MzU2Ojs7Ojk4Nzc2Njk6PDo6OTg9PTs6PV5hVlhobnVJPjo9PDs7Z4Wfj5mT
-YElHSUJBPDs6OTs7Oz08PTw9QkM/PENBQjo/PkE/PDk8Oz0/PTw+Qz8+QDs8PUI9
-PTs+QkFBOjw/RD8+Q0Q9PUE+QEJDRkE8QD5AQUpEQkFAPkNDREBBRkZBQUFBREE9
-P0ZDQkVGREVJTlJFYrTI0tre5OXn6enq7E9NTEZHREJERkRHRUREREVGR0VEQ0NG
-Pz9DRkhAQkRFQURCPz5CPkBAQj08RklIQD09QEE5PTw+Oz4+OjlAP0A9ODg8Oj09
-OTo6Njo8PTxBQD49PT07Ojk3NzY8PDo8Pjs4Ojk6PTY1NTk7OTY1Nzs8PEc6OjlD
-Pjo8ODg4ODY4OD43ODg3ODg5Ojo4PTs5Nzc5NTk4Nzk5OD05Ojg4NjU5OTk7Oz44
-Ojo7OzY5ODY3Nzg4Nzc6OzozNTQ1MzM1NjY6ODk5NTU0NjQ2NTIyNDAwMjI0ODU3
-NTc0Mzk2Nzk3Njk4NjY2NTY2NjM2NzU2Ojg3Nzg0NDU2MzM4NDM0NDMyNjg2NTcy
-OTo3NjU0NTY3Nzg1NTU2Mz01NjQ0ODMyNzk0MjQ0MjUyMjU1NTY2Njc4NjY4NTU4
-ODM1MC8zNTM0NTU0NTQzNTY2MzQ0Njk1ODY5Njc1NjQ3NTY2MjIzMzU3NjIyMzQz
-MzEzNDUzNDQ3MTM5NDYxNTYzQTc0NjQ0Njg1NTQ2ODY1NDU0NTQ0NTQ0NTYzMjI0
-Mzc0NTM1NjMzNDQ0NjIyMzE0NDYzNTUxOD0zMzc0MjM0NTMyNzM1NTU0MjIwNTg1
-NTQ2MjM0MzIyMDE0NTUzMzMzMjUzMzIwMDIxMjY1MzM0MjM2NDQ3MzAvMDMwMDM0
-ODY2NDQ3NDQzMzM0NjQ1NzAyMzU1NTYzMTQ1NTYzMzI1NjYzMzIzMzQ3NjY4NDMz
-MzMyNTgyNDQ0MzI2ODY0NjQ2NDM1MzU2NDI0NzYzMjMzMzU1MjIzMzM0MzM0MzMz
-NTY2NDUzMjMyNDUyMzQ0NDU3MzU1Njk0NTQ0ODYzMzQ2NDE0NDU0NTQzNTYzMzQ2
-MzA1MzEzNTY0NzU1NDMzMzI0NjQ0MzQyMzI1NDU1NzQ0NDY1NTM2NzQ0NTQ0MTE3
-NzY0MzM4NzE1NTU1NTUyMDE0MTQ3NjY1NDU1NzY2NTY3NDU0MTEzNDM1NjU4OjY1
-NDYzODcwLzE0NTY1MjU1NDU2MjE1ODYxLzA1NDEwMC4yMTAwMzQzMjEzMzMyNDIx
-MzQ0NjU0NzcxNDIzNjY6NTc1Njc8NzQ2Njg2NDUzMzYxODk0MzQzMjY3ODg2NTY3
-NDY2NDY0MzUzNjMzNDQ0NTQ0NDc3OTg5OTc4Njg4NjYzMjQ1ODo5OjQ3NDM2PDc2
-NjQ2Ojs5ODg3NDU0NTQ3NzU0NTU5NzQ6NjQ3NTMzNzU5ODU0NjU3NzM4OTc6ODk6
-OTc4Njg4ODc2NTU3MjMxMjU0NDY1MzQ0MTQ3ODg3Ojg1MzU3OTg0Nzo3NTY0NDc2
-ODY1NDQ4Ozc2Njk8PTk6NjM3Mjg4NDU1NTU3NTY3NTo6PTg7OTY5Ojk4ODo5Oj9D
-P0JOk56UiJl/Uz5BPz07Ojx6lJ2XnJFmREJFQkNFRUJCPj48Pz89Oj1AQUFDQ0M6
-OTg5PEA+PD06PD88PD09REFAQTxAPUM8Ojs7PkA9PkQ8QURFQD5AREZHQkREQ0I/
-Ojs+RUZCQ0FAQkFDRD5BREA/QUFFREBIREBCRkhDR0VIOjths8jS2d7i5Obn6Orr
-SExOQ0VGRURDP0I9QkE+Q0VFQUJDSEdER0RERUVCQUNCRUQ9PD08Pj9CQD0+Q0JA
-PT4/QT0/Pj06Oz08Ozo4PUE+PTs9Pjw4Nzg7PDg6OTo5PUJCQT0+ODk1ODg8Ozs5
-OTY1ODk6QDk5PTQ0NjY8Ojo0Nj05OEc7OTs5NTU6Ojg6ODw9Ojc4Nzo6Ojc4ODc4
-ODc1NTU4NDQ4Nzc5Ojo4OzY4OT05NzY1Nzg5NjY7PDo3NzY4Nzc2Njg3NTU0NDQ2
-Njc2ODg3Mjc3Njc1NTEzNjEyNjY3NDU0NDc1NDMzNDY1Nzk3NjczMzQ2ODg0NDU2
-NDY2NDU3OjY0NDMzMjU1NjU0NDc0Njc4ODU5MzQ2NDM0OTY0ODo4Njc3NjoyMzM1
-ODYzNTMzNjIzMjM0MTQ0NTY0NjQ1NDYyMDQ3NDUzMjY0MzI0PzU3NzY2NjU0MzU3
-NDUzNDQ0Njg2NTQ0NTM0NDQ4Njc0MzI3MzMzNjQzNTMzNTQ6NTY0NjI1NDY1NDc1
-NTgyMzQ0NjgyMzUzOTU1MzQ3NzQ0MzIzMjEzNTc3NDI1NDU0NzIyMzI1NTU0MzMy
-NjY3NzY2ODUyMTQ2ODg3NDU1NjY2MzIyMjQ0NzYzNDM1NzY1MjM2NzMzMzQ1NTQz
-LzMyMzo4NTczMjE2NTMxNDUzMDIyNDQ3MzU2MzY2NDI0NDIzNjU1MzQ0ODk0MzM0
-NDQ3NzYzMzEzNDQyMjIxMjc4NDI0MTEyMzAzMjU0NDMyMjMzMjQ0NDQyMzI0NDY1
-NDUzNDU0MzE0NDQzNTQzMjAyMTM0NDQ4NDMyMzM1ODg3NTMzNjQ1PDc0NDUzMTUy
-MDI1Njc1NzY0MjQ0NDg5NzQzNDk2MzAyNTIyNTU0MzU1MzQ0NDMyMjMzNTMzNTU1
-MzU2NDU1MjQ1NjYzNDU0MzQ4OzU1NDI0MjMzNDYxNDc2NzQ1NjQ5NjQyMzM5OTk1
-NDU3Ozc5NjQ1MzU5PDUwNDMzMzM4OjQ1NDE0NDkxMTQ3OTY1NDU4NjQ5OTU0MzMy
-MzM1NTQ4NzQzMTMvMjM5NjY0MzI0NTU1MzU0NDUzMzI0NTE1ODY4NzY0NDM1ODg2
-NTQxMjI0NDI0Nzg2NDM1NDY2NzY0NDY3ODQ0MzY5Njc3NTY3MjI1ODY1MzQyNjQz
-Nzk4Njc3Nzk2Njg2NjYxNDY4NzU4NDQ1NDc2Nzs5PT06Njk5NDc0Mzc3NzY0NDc3
-NzY1NDUzNTs3ODQyODg5OTg2Nzc2NzY5ODQ1MzU1MzQ1NTMxMTQ0MjE0NjU1ODMy
-MTI2NTUzNDc0Mjc1ODg3Njc3NTc5ODYzNjc2Njo5NzY5Ozo3NzY3NDQ2NDY6OTg5
-Ojw5NTc2Njg7PDk6OTk6OTs8Ojs6PT87QW+lqqeHflFBQD0+PkA/UYilrq6imHxG
-RUJERD46QUA/PUA+PkE+QkBAPj4+QT1DPT88Ojs+PUBGPjo/PDw+P0FAQUBDPkA+
-Pj8/Q0ZCPTpAQkREQz49RUhGQz1FQEQ8PkNDQEA+PERHQEJDQUI9Pj1CQkRBR0JF
-RT8+RkdFR0U8OFyyyNHZ3uLk5+jp6upHTFVMSERGR0tBQ0RAQERCQUFDQENAQUNE
-QEJEREE7PkFCQ0BCOjo6PT1DPTw5PENCPT09QTw8Pzw7P0Y+PDw5PD87PjxBPjc5
-Ozg5NzU2Nz1EPDo4ODc1NTY0Njs6QT0/NTg7OjY4Ojg5ODY1Nzg3OTk7OTs5Nzw6
-Oj81NTg4ODs7Ojk6Pjk5ODg4OTY6Ozg3NTQ3OjY0Nzc1MjY5Nzg2OjdBSDs2Ozs3
-NTk8OTQ5ODg4NTU1Ojo6ODUzNDU1NTY0NTY1NTYyNDQ0NDU1Ojg1NTQyMjM0NjQ1
-NjE0NDc5ODUyNDQ1Njc1NDY0NDQ2NjY4MzMzMjc3Nzg4NjM3MzM3NDIzNjQ5NjQz
-NjY2NDc1PDg2NjQ2MzI1Mzg3NjQ0Njo5NTMxMTIvMzQ0NDQ0NzU0NDU1NTQzMjA1
-NjU1Mzg2MzQ0NTY1ODc1NDY0NTc2NTY5NjQ1NjU0NTMzMzI2MzExMjgyMjI0NDU1
-MzczNTYzNTc2MjQ0NjY0MzIzMzM0NDg1NDMzNzY0NDIxMzIyMzM0MjE0NzU1NTU3
-NzM2MzY1MjI0NDM0NDMzMzQ0MjM0NTY0NDM0NTU1MjQ0NDM1NzUzMzU4NTQ3MzMy
-NDIzNDU0MzEyNjY1NDY2ODc1NDQ2NjU2MzU1NzYxLzQxMzY2Njc3MzIxMC8yMDY3
-NTQ1NTQ2NTUxMTIyNTMyMzM3NzczNDMzNzU3NTM1NzM0MjQ1NzUzNTU0NDUzNDI0
-NjQzNzQxMjQzNDM0MzMzNDY0MzIxMzIxMzQ1NTU0MzMzMzM0NDQxNDcxMjQyNDQ1
-NjU0NDU1NTU4Ojg2Njg3MzU1NjQyNTM0MzMzNDQ1NDc2MzQ3NDIzMTI2NTY5NzYy
-MDMyNzMyNDY3MzMzNC81NTQ0NTIxMzMzNTU6NDMyMTM0NTU2NTU2NjU1NTQ1NDQ1
-NDMyMjQ2MzM0NTMvMzU4Nzk4PTs5Njc5MzI2ODgzNTU0NzY6NjMxMTI0NjUzNTU1
-NjIyNjc1MzQ0MzM1NTo1MDI1NzY0MjExMzU0NDQ0MTMvMTUxMzs2NjQyMzU2NTY0
-MjMzMzIzNTY1NjI1NzY0ODg1NTYzNDU6ODY1MjQ0MzMxMzY1NDU1MzExMzMzMjM1
-NDMyNDQ3ODQ2ODY2NjQ1OTY2NDU1Nzc1MzQ1NDg3NjY3Ojg1NDU2NTQ2NjU0NDc4
-ODY2Nzo7Ojo7Ojg6Nzg3NTo0MjM1Njc2NjU2NjQ3OT9EQzg4OjU3NzY1ODc1NTU2
-NTQ0MjM1NjM2NzQ2MzQ0MjIzMjM1MjU3NjQ0NTY4NjQ0Njc2NDM1MjQ0Njg1ODU2
-Njc4Njc1Ozo8OTo5Nzs5Njk7OTk6Nzk4NTU3Nzc0NDg6Ozk4NzlAQT06PD5BPDxT
-j52enoVOQj49Njc6PEVtoK+lqJaEb0xHRkNDQT4+QkFEQkFBPkFCPDs7Pjw9OkJD
-PkE+PkFDPz4/PTw6PEA9QUZAQUA+PT4/QUFCQ0RBQz0/Pj9AQj09QENCPUFDQkVF
-QUFBQkg+REdFRj4/QUA8PT8+RkVHQ0VIPz9BSEdHSENDYLHI0tne4uXn6enr61BL
-Tk5EQ0VHRkNEQURKQ0RBRD8+QD49REM9QT08PztAPUVIRD45Oj4/Pj0/Ozs7Qj04
-Ozs7PTs8PDs9PTw6PUA+Oj08Pj48Pz8/QDw8OTU0OD85NzY4Ojg4OT08PkE+Ojg8
-PDk5ODs4Ozc2Nzc9QDc2ODg4Njc3ODw6Njk1ODk6Nzg2Njk6Ozg4Nzg5ODc3OTo4
-ODc1Njc3Ozk4NUE+NDY1MzU4OTc8Ozw8Nzc5OjU1NDY5Nzk2Nzc5NjQ0Nzg3NjQz
-MzI0NTc1MTM0NTU1NDQ2NjU4OD87NzgxNTI0NDU0MjUyNjk4Njg3NjMzMzY0NDMz
-MzE3Nzo4Nzg1NzgxMzQ0MjUyMTIzMjI2Njg3NjQ2NTc6ODU3MjU1Ozo2NTc4ODc1
-ODY3NTY1MDI1NzQ5ODM1NDM3NTYzMTEzNDQ4MTM0MTQ6OTk1NDQxMzY4ODEyNDc1
-NTQ0MzI2NDIxMjMyMzQzMjMyMzE1MzM0MzQyNDMyMzQzNDQ1NDY4NDU1NjY0NDMy
-MzI2ODczMTM2NTI0OjQyMjE2Nzc8NzQ3NjY1NTU1NTY0NDEyNDM0NTY4MzEzNDQ0
-NTMzNjU0NDIwMDI6Njg7Nzc1NjY2NDQ1NTU1NTMzMjIyNTY2NTU2Njg0MzQzMzM0
-NTU1MzMyMTIzMjY1NjU0MzIwNTM1MTEyNDU0MzI0MzU0NTU2NjYxNDc3NDAxMzUz
-NzQ1Njc1MzQyNjMzNDM2MzU1NTY2NTUyMzEyMjQxMTEzMzMyNDQ2NDc0NDQwNDk4
-MTMyNTY7NjMzMzQ2MTMyNTY1Mjc3NTM0NTU1NTk3NTQ1MzI4OTQ3NTU3MzU1NTEx
-MzU1NjQ3NTg2NjU1NTQzMjU2MzY1NDM2NDQ1Njc2NzU2NjQ2NjU1NDY0NDUzMzY0
-MjQ1NTI1MTM1MjUzNTY2NDU1NTc2NDI0NjgzMjEyMzI0MjQ2MTQ1OTU3ODY6Ojc3
-NjYzNzYyMzMyNTg3ODg0MzU2NzQ0NTI0Njc0Nzk2MzY3ODc3NjUzMzEyMTIxMjU3
-MzQxMzQ0NTYzNDc0NDI1NTMyNDUzNzY3NDEwMTE3Njc2NzM0MzQzMTQ1NjMzNTU2
-OjY2NDU0MTM1NjY3NTI3NzQ2MTI0MjEzNDQ1My8zNDU2OTk1ODU1NjY2MDI1NDM0
-MzUzNTk0NDU2NjY7OjQwMzcyNTg4OTc5NzU2NDc5NjU1Njg7Ojg5Njk6Ozc2NjQz
-NzgzMjMyNDY6NjY1NjU0NjQ4NzQ5NjY0NjYzNDI0NTU0NDM4MzExMDMzMjQxMzc4
-NjU0NjQyNTU1ODY1NjY5NDo7OTk5Njk0MjI0Njg3Njk5ODo3ODc5OTg4NTU6Njg2
-NTUyNTU3Njg6Ojw8OTs7ODk7PDo7PFB6iHt3VERCPj87OjtCVI+ikmNyfHqAUUFB
-Pj1AQkI+QkFBQ0FBPT06PDw7OjpAQz5BQkJAPEE9PT8/PDg+Oj4/PkJFQD1AQD5D
-REZGQTxER0I7PkFEPj09PDw+QEJDQkRBQUE/Oz5BRERFQ0hCPTo8QUNHQ0NBQUNC
-REA/Q0dMRENksMfS2N7i5Ofp6OrrTkpQS0lFRUNAQUdJQkJBREFCPTxBPT1APz89
-PD5CPD5FRD1BOz48QEVFPzk5Pjs6Ozo7Oz8/Pzo7Pj47OkFAP0I8Ojw3OTw6Oz09
-PkA+Ojs5Ozc5NTc4OTw5Ojs9PTo5OUJBOzc3OTw4OTc4NjpBRjs5OjY1OTo2Nj02
-ODc5Nzk4ODU2ODg9Ozo6ODY5Nzk2Nzk+QDw4OTk6NjQ2Skk6ODc3OTo3NjU3ODo3
-Oj05OjUzNDM5Nzo1NjY0NTY4ODU1NTQzNzQ0MjIzNTI2NTM0MzEyNzQ1NDIzNDM1
-MjQ0NTQyNDUzNjszMzo1MjI1Njk1MjY4Nzg7Nzo7Njc5NTMzMzY4NzMzLzMzMzY4
-Nzo0MzU1NTU0NzY1NzMyNDU1NjY3NDU0NDM0NDQ2NjQzPkk4Njc3NzU2NTc3NC84
-OTIxMjM4OTQ3NzY0MTQxMC8zNDU3NDIyNTY1NjQyNDU2NDY0MzUyMjAyMzI0MDIy
-NDQzODM0MzQ1NDQ4NTMxMjU0NDc0MjQ2OTY0ODg1NDM4MzM4NjMzNDQ2NzY1NjY1
-NzY2Nzg4MzIxMDQ1NzU3MzQ0NDIyMjMzMjQzNTYxMTEzMzM1NDI2Mzg1MzAwMjMy
-NDIzNDg4Nzc0MzIxMjYzOTc6ODg1MzQ0NTU1NjU6NTc2NTUzNDQ1MzU2OTY3NTM1
-NDU1NDIxNjU3NTU1Nzg3NjE0NTY5NjYxMjI2NzY2NTM0MDAyMjI1NTQ1NjU1NTUz
-NjI0NDIwMzIwMzAvMjMzMTY0NjU4NTM0NDQ3NzQ0NDYzMDY3OTUzNDY2NTQ5NjQy
-MzU2ODgzMzQyMTM0NzY0NDIyNTk2MjQzMzI1NTM3Nzc0NDc2ODY3NDUyMjI0NjQ1
-MzQ1OEE4MzM0NDU2Njc3NzUzNTQ3ODo2NTY3ODg3NjU2NTc1NTY0NzQ1NDQzMzM0
-MjQ1Mzg1OzUzMzI0NTY4NzczNjM0ODc1NzQzNTk1NDM0NTg4PDYyNDU1NDc0Njgx
-MzIxMzUyNTQyMzQyMzQyMzAyMTA1NDE0Nzc1Nzc4NjQ0NTMzMzU0NjY3NTQ0NDM0
-MzM0LzQ1NjU1Ojk1NTQyNDUyMzMzNTg5OTY4NzU2NzE1MzMyMzY5NTI0MTEuLzI0
-NzM0NzU0MzU4OTc2NTM1NjQ3NDMvNDU0NDQ1NTU0NTI0ODo2NTUwMDM1ODo3NTQz
-NjM1NDc5OTs4Nzk3NjQzNjg5Ojk3NzY3OTU1MzQ0NTc1MzQzMzQ0NjI0Nzg2NjU2
-NDMyMzMzNTQzMzIzNDM0NzgzMzQzNDQ3ODc1Njg2NTg1NTQ2NTU4Njk5NTEzNDI1
-NDY2OzkzMzg3Nzc3NzY3Nzc1NDM5ODo7NTU1Nzc2Ozw8Oj86Ojg1ODw7ODc8X2tu
-UEE9Q0A9Oz0+PUuSqZZkRkxxhoJSPT1CQUBBQj9AREJAQT08PkE9OjxARERBQkND
-Q0M+PUE8Oz1AQj1BQDw9Q0A8O0NAQENCRkE8Q0FDRkI/Pz1BQD48PUBBPkBDQ0ZD
-PD9CP0BBRUBEQUFCRD9GRkRFQUBFQ0ZCQkJBQE1EQ2WzyNLY3uHk5unp6utQSUpM
-T0lJRUNFREtLQT9CQEBBQUA9PEJDQDs8QERCPT8/PUA9Pzs6QEE+Nzk8Oz1CPj08
-Ozg4Ojo/QT5BPD5APEE+PD4/QT07QUA6PTo8PD0+PDg6NjY6PEA5OkI8OD88PEE7
-Ojk9OTo2OT04ODlAPzk2Ojg4OjY1NDQ2Nzg2NjU1Nzc4OTw7Ozo5NTo4ODY0ODc5
-Ojs4Nzs5NDc0NTY1NTU3Nzc5NjY2ODo5ODc2Ozc8Nzk4OTY2NTc/NzA0NjU3ODc5
-PDQ0NzY0NkE4Njc4NDU0NzU2NDM0NTY7Njc0NjY0NTY1OjU2Nzo7ODczNjMzMTM0
-NDQ2NzQ2MzYyOjIyNDY6NDczMjQ2NjU2NTY3MzI0MzY0MjQ1NjM3NTk6Ojg2Njc5
-NjM3NDYzNjRCQzMzNDMxNDM3NzY4NTUzMTMyNjQ3NzQ1Nzc3NTIxNDM3NjUzNDMy
-MjM1NTI4NDI1OTU1MzM0MzMzMjMyMDE1MzIzMjQ0NTU2MzMzMzI1Nzk3NjY3ODQz
-MTEyNTQ0MjY1NjQ2NTUyNDU2NTY0NDQ5NzIyNjg0NDQ0Mzc7NTQwMTIzMjE1NjMz
-MjQ1NjMxMjU0MzUwMjMzMjQ1NDM1NjQyNTU2NjQ1NDc1NTEyMjI0NzMzNzQ3NDQ1
-MzMxNTU3NTc1MDEzNTg2OTc2NzM1NzIyNDM0NjU2NTc0NTM2ODQyNTM0NTU1NTIx
-MjM1NTQxMzMxMjU1NzU2NDYzMjEzMzQzNDIwNDQzMTcyMzExMjQ4NTQzOjU5ODc7
-OjkzNDU0NDQ2MjM2NDMyMzM0NjY4NzU3NTMyMjU0NDY3NTI0MjQ1MzM3NjQ2NjQz
-NDUxLjE1NjY2Nzc3NTc1NjMvMjIyNTY3NTM0NjI1Njg2MTE1MzIzNDY1NTQ1Mzc4
-Nzc5Njc1Nzc0MzM1NTkzMjMyNTQ2NTM0NDg2Nzg4NTU0MzQzMjI2NjMyNTU0NTc2
-ODg5NDc1MjE0Nzc3MzQ3NTY2NjczMjY2MzUxMjY1MzEyMTIzNjk2NDMwMzQxLzI2
-NDc2Njo+NTI0NzQ0NTQ0NTU2NTMzNTY2MzY5NTYyNTMyNjU0MTY2MzIzMzUyMzM0
-NTc2NzYyMjA1ODU3NjQ1Njg1MTIvMTY3MzM1NDQ1Njk2Ozc2ODU2PDY2MzQ1NTc0
-NjE1NjU1NTU2QTQ1ODo4OTg2ODo4OTc4MzQyNTc6OTY2Njo2NTU5Nzg6PDk4NzU0
-NzQ0NTk1NjY3NjQyMTMzNDU1NDc3Njc0NTM0MDM4NjU1MTE0MzMzMzMzNTUzMzQ0
-NDU1NDIyNDM0NjQ0Nzc5NzU2NTQ1ODs7NjM0Nzc1MjY0NTQ1NTM1NTU5ODc6ODs8
-Ojw9Ozs7P0A7Ojs8Ozk5Ozk5PU1vY0w8PkI/PT4/QkNRfaOpiEs+RXF9hGlEQURE
-QD8+QEE/Pz0+PkVEQD8+PUJDREFDQj87PTw8ODk6PUFCQD06QD8/QD4/P0JBQD4/
-PDxBQj4/Q0M9P0NDPz5EP0FDRUI8Pj9CRUNAPj5EQUNGQT5BQkNEPz1FQEBDR0lN
-RT9HSEZFXrXJ0tne4uXn6err60hKSkdHSkVGRERCPj1CPkFCQD48PUM9QERCRUVB
-RUI9Pz0/Q0FDRD5AQkJAPD89Oz08PTw9PT0+PDs7QEFDQj4/QT07QUA7Pj06ODg8
-PDpAQ0c+Ojk6ODk/PTs4OTg6Ozs7Pj06PDg2Ozc7NTc7NjU5NTMzNzo4ODQ2Mzk7
-Njk4ODk4Ozc1Njg6ODo3Njc4OTY0ODY5NzY0OTQ5NTY4Njg3Ojs2NTU4Ojc5OTc3
-OTo7OTk8Ojo9OTg3N0FINTIzODQzMjlFNzc5NDY0ODg2NTI1NTQ0NTQ0NzMyNTYz
-Nzg5NzU2Njw3NjU1NjY2NjUyMzY2MzU0MzI2ODU3MzY3OTs5MzE3MDY1NTUzNDc2
-NTQ0MzQ1NTM0Njg0NDY1NjU0Nzk3NjQ3OTQzNTMyNjc6ODIyMzQyNTU3NTUyMjM0
-NDUzMjAvMTc4ODY7Ozw4Nzg1NTY1NTM2MzM0Nzk0NjQ0NjM2ODc0LzI0MzAxMjE0
-NjU3NTU0MDQ2NTU2MzExNTYzMzM2OzgzMzIyNTUwMjM1NDQ2NDYzMjY2NDQ2NTQz
-MjQ0MzI6NzY4NTs1NDYzMzEyNTU1NTY2NjM1MjIwMzUzLjE0MjMyMzQ1NzY1NjYy
-MDI1NTY2NDMzNDU0MzMxNTM1NTs1MDY0MjQ2MzMzNDIyMTI1Njg2NjY1NDQ2NDAx
-NTc1NDQzMzI5MTM2NDI0Mzc3OjUzNzUwMDI1NTg1MjQ3ODIyNjY1NDY3NjUxMzQy
-NDUzMzQ1ODUxNTMyNDU1NDQyNjQ2NjU2NTU0NzU3OjU0NDM1MzI2Nzc1NzY1NDc1
-My8yMzY1NDQ1NjM2NjM1NjY2NTUyNDY1NzU3NDI2NTQzNTQ1NTY1NjQzNzU1Nzc3
-NDI0NTQ1NTQ2NDAyNDA3Nj02NzMzMzI2MzU3PDk2NjY1NjM2NzoyNzU0NDM1MzE4
-ODY3ODU0NDQzNDQ0ODQyMjY4NjU0NTQwNTY1NjQyNTU2NTY0MjM1NTg1Njg2Nzc2
-NDMyMzE2NzU4NjQ1NTU1Mzg2MzI2MzQ2NjM0MjMzMjQ1NjY2NTU2NzU3NTU2NjQ0
-NTk1NDQ0NTUxMzU2NDMzNjMwNDQ2NzUxMjMxNTYzNTY0NTMzMjI0NjYzNDc4NTI3
-NjQ1NzU0NTU3NjY2OTg6Nzc2PDY2NzMzOTY1Njg2NTQ3OTo3Nzc1OTs3Njc0Njk5
-OTg2NzY3Nzc0NjY7Oz03ODg3NDY6ODU1NDY2NzIzNDM0NDU0MzQ0NDQ0NDc1NTQ4
-NjI1MzQyNjM1NDQ0PDYzNDU5NjMzNTI0NDU1NTQ0Njg3NTUzNDU3ODY5ODc2NTc3
-Njg4ODQyNDM3Njk5OTQ1ODg5ODo5Ozo6Ozs9PkA/PT87Ojg7Pzo4OD1DaW5PQUFD
-Pjw6PEFDYKGsp5x9RkNAY5eNdkZBQT9BQz5APkFEQD5BQT07Ozs/Pzs+QUBDPz48
-Ojs5OzpBQkFFQkJEQkJCQEREP0FAPj48Pz5APkA+REJAP0NCR0REQj9BQ0FAQkBB
-QEFBRkNAR0dGQUNCQkZFR0ZAPUZFS09KRUlLTEtds8jT29/i5efp6uvrSUpFTU5N
-RklLRkVARENCQ0I/QEM+P0M/QENDPj49Oj87PDs/Q0NBPz9CQUhDQz05PD44PD03
-Pz4+PEJBRUFBPjo7OTs9PT8/PT05Ojw9PDw8PDo6Ojc5ODY6PTk6OTk5PDs4ODg7
-OTc5OTs9Ozc3PD05OTY4Nzo4Ozo6ODc5Pj87Ozw9PTg5OTw4OTc3Nzc3NzQ3ODY2
-Nzg7PD08ODk6ODo5ODY0Nzo2Njg5Nzg4OTc2ODc4Njw6NTQ5Nz9CODQ0NTY4ODg9
-PDk5ODk3NDQ1NjYzNTI3NDIzMzY2NjMyNTU3NzY1Njc2NjM0NDQ1MzY0NTc4NTY0
-NTI0NjQ4MzM3ODYzNjg5NjQ1NzUzNDg0NTQ4ODc1MzEzMzI2Nzc1NDM2NTY3Njc3
-MzUzMjIyMzc+MjAvMjI0NjQ0MzU0NzY0NjIvMzEzNjU2NTU2OTo9QDIyNTQzNjU0
-NjczMzM0Njg0MjY3NDU3MzIzMTMxNTI3NjU2MjMxMjIzNDU2MzQzNTI0NDUzNDMz
-MzMxNDMyNTc2OTg1NTQ0NDY5NzczMzE2NDQyNjY0NTU2NzY4NDIzMjEzNjU8ODY3
-ODQyMzEvMzAxNDM1NDY1NTM3NjM1NTk4NDIzMjM0NDI0NTUzMjM1Mzc1NDMwMDM0
-NTQ0NDUzMDIyMTQxMTMyMzM0MzQ1NDMyMzMyMjIzNDQ1NDQyNDMzNjg0NDUzNDQ3
-Mi8yMzQ0NDEzMjMyMTQzNTg1NDk3NjQ0MDIzNTc3OTYzNjQzNDUyMjg1NTUyMjM0
-MjIzNzk6NDc3NDI0NTgzMzM0Nzc1NDk2NTM4NTY0NTQ0NTY0MzM1NTIxNTUzNDMx
-MzQ0NjMzNDMyNTUyMzM1NjY0NTc0NTUzMTM0NTU1NjMzMzIzNTQ0NDk2NzY2MzQw
-Mjc1NTQzODc1NDQ1MzY3ODQzNUM4MzU5NTUyMjMyNzk4NTQ0NTEzNDQ4NjgyMjY0
-MTM0NTczMTQ2Mzc4MjM0MTI1NTMxMjQ3MzUyMTM1OTw4NjU3Njc3NzU1OTc0NTUy
-NTI1NjMzMzY0NTQ1NDQ0NzU2NjQ2Njc1NjVDQzYyNDY1NjEwMC4xLzMzNDQzMjc1
-NjU2NDY2NDQ2NTU0MzQ2NTU1ODgzNjc2NDQ0NjY4NTU3NTY1MzM1Nzo3OD84ODg2
-Nzg1OD42NjQyMzU1NTU9PT06ODc1ODY1ODg4NzU2NTU1NTU3Ojs2NjY0NjY2ODs9
-NTUzNDEyNTMzNjY0MjM1NTU0NDQyMjMxODk6MzQzMzQ1NDg6NzgzMjM1OTcyNDU2
-NjI1NDY1NDY1NDM0NjU4ODg3ODc3NzU2Nzc4Nzc4NjU1Njk4OTc0Njo4NTc4ODk4
-Ojk6Ozw8Ojs7Ozo8Ozg7QGN9ekg9Pz4/PTw9QVydsL25l2VBQUJjl3drSEI9QUJC
-Q0I+PkE9QD42NjlAPTw+PUA+QT1CPDs+PENCPj49QEFAQUBBP0FCPj9EQ0RCRElF
-Qzs5PENCQUNCRUpFREJGSEdFQEU+PUA/QkNDQEVEQ0ZGQDxCQkREQ0FISVJOSkhK
-U05MRWC0ydLa3+Pm5+nq6utMSkZGSEhNSEZHT0ZIQkBCPTw/QElFRUBBQD1CRD9B
-Pj9BPUI/QUE8Pz85OzpAQUdDQjw8PT8+PT08PENCPDg/RT5BPTs+Ozs7PDw6PDw/
-PT07Nzo9OTs5ODc9Ozw8O0I/Ozs5Ozs5Pjw6Oj5AOzg0Nzs4OD1AODY5OjY3NzY4
-PT8/P0A9Ojs5Oj08PDo2OTg8PDc2Ojc0ODY2NzY4NzU7PDkzOjw6OTk5ODU2OT05
-OTo2NTY1NTc3OTc2ODo7Ojk3OT02NjQ1NTc4OTU0OTk3NTU1NDU2NjQ5MjUzNTAy
-NjMzNTU2OTo6ODQ0NTU3NDY2NTg5NDY0NjM0NTg3NTQzMzU8ODg4ODg5NTg3NTI0
-ODk3OTczMzU0NDQ0ODU0NTQyMzk0MjMzMDMyNDUyNTw3NzU3NjIzODIzNjQ0MzU5
-NDM4NjU2NzY1NTg8QTw1NDU2MzExNjo1NDM2NDMyMTgyNjIzMzc5NTM0Nzc3NTUy
-NC8xMDMzMTU1NjU2OTU1NTMzNC8zODM0NDMwMzIzNTc3NTc3NzY2NDY0MjAxMzU4
-Njg0MjMzNTQ1Nzo7NTMzNDg7PDc0NDQzNDExMDIwMjMyMzY1NDQyNTY3NDo5Nzk3
-ODM1NDIxMzM2Nzc3MzQzNDIzMzI4MzU1NTM0MzM4MzY1MzI1MzY1MzQ1MTA1NTU2
-ODI2MjUyMzc1NTI0MjQ4NDc5NTY1ODQzODUzNDM0NDUyMzM0Nzg0MjY3Nzc1NzQ1
-ODc3ODg2NTc0MzQ0MjQ0NTU4NTExNDY2MjMzNDMzMzMzMTMxMTEzNDIwMzIzMjQz
-NDMxNjUzNDE0NTU1NzMxNDQ2NTQ1NjY2NTM1MzQyNDIzNDUzMzQ0NTM4NjMzNDUz
-MjUzMzc2NDEzNjM1Nzo3NDU1Njg1MzQ1MzIzNzs2NDQ2NjY1NjgyMjUzNjY2NzIz
-NDY4ODczNDc3NjY1NDM2Mzc2NTc0MzI1NjQzODg2NDg2NDg0MDQyMTU4NzIxMzQ0
-MzM2NDI1NTU3NjQyNDMzNTMzMzMzMTI1NTc2NTQ0NDcyLjQ0NjY2NDM0MjI0ODY2
-MjMyMzYzNjY2NjUyNDEzNDI0NTI1MjE0NzU1NTY1NTQ2Njc2NjU2Ojg3MjM1NDU1
-NTc0NDM3OjQ1ODg0MzY3Nzk6PDg1NjY6OTg5Ojk0Njc3Ojk6ODg2NDM1NzU3NDY3
-NTY4OTkzLzQ1Mjc1Njc3NjU1MzM0NjQ2MzM0NDIyNDQ3NjQ1NDQyMTIzNDU1NjQz
-NTY0MjE0MzQ1NjgzMjQ0NjMyNDc3ODQ2OjU2NjYzMzIyOTg3NTg2NjY1ODU2Nzc4
-Ojg4ODk4OTg3MzY3OTg1NzM3OTg5OTo5OTw8Ozs7ODw5Ojs9PTtLgothQj5BOzo5
-Pj5Uk62prLuiYUZEQ0lqcXhNQENBRUFBREVEPjs8PUI/PUJBPDxAQz9AQD0+QD5B
-PUNAQUA/PUNCQUNDQEE8PENDQkJCPzo7PUA+QUVEQkJFRkVGQkVDQT4/QkA9PD5C
-QkRDQkJCR0ZEPkNBPkNCQktNUE5KREhPR0VDXbLJ0tvf4ufn6err7EpHSUVIREZE
-QkRFQkVIR0NDP0JMRUNEPj8/O0JDQURBQT5CP0JARUM+OUA/Pj1EQERBQ0M/Pj8+
-Pj8/Pj09PDw8REY+Oz07Ozo6Nzs6PEFBOjw4Oj86OTs+ODc7OjxAPDg1OTo6Oz5B
-PTs7OTg+Ozc7OTk1NTc5Ozg5Nzk6NzU0Ojo6OTg6PDo5Ojs6ODg3Nzg4Ozo4Njk4
-OTo5OTg2Ojk7ODc3NDI1NTU2NDc4NzU4ODg4NTY4OTs5ODk2OTk2NjY3NTk7ODQ2
-Njc0NTg4ODk2NjY3NDc3OTY3MzExMzM0MzUyMjU5ODc4Nzg2Ozk4NjU0NzgzNDQ9
-PTk5NDU4Mjc3NTUzNjk0NDg5Ozc3OTY1Nzc0Mzc4Njw0MzY1NzgzNDMzNzUzMjMz
-OTczNDQ1NTQyNTQzNDU0Nzk4MzM7NDU0Nz81MzY0NDU1Pzs1NDU2NDU1ODYyNTY0
-NDY3NTQzMjo0OTY2NTM0NjQ0Njc1MTIzOjkxMDMzNzQ1Nj9KNTc8NTQ0NTY0NDQ2
-ODUxMjY0NDQ2NTU2Nzc3NTc3MjEzNDY3NjY2MjQ3NjIyNj05NTQyNzoyMTMyMDYy
-MzYzMzQyMzAwMTUzMjMzMzI0MjI2NjY1NTI0MzQ0NDM1NTg1MzAxNTUzNjo2OTYz
-NjQ2Nzo4ODMwMDExNTQ5NzY0NDg1MzUzNTMyMDEzODc2ODcyNDQ2NTc0NDY2ODY0
-MTEyNDUxMTc1OTMxMTc2NjQzNTMyMDEyNTY3ODU2NzU2MzUxNTc2NDIxMjQyNTQz
-NDUwMzM0NTI4Nzc3NjYzNDMzMjQ4NjQ3Njg0MjQ0NDEyNTY2MzMzMzI0NzQ0NDkz
-NDU1NzY2NDY1MzAyNDIxMTY5NzQ1NDMzNDI1Nzo2NTYzNzU0NTg3NTQ4NDY1MzU3
-Nzk4Nzg3ODc2NTY3Ojg4NzU0MzUyNjQyMjU3OTQ1NjY2Njc0ODY0NDg3Ojs3NTEz
-NDY0OTY0MjM1NTU4MjQ2MzQyMjQyMzQzMTEzNDQ0NjMzMDYyMTI4MzEzMzMyMzU3
-NTU3Njg0NDM2MzE1MzMyMzIyMjU2MjAzMjIzMjUyLzEyNTY2MzEvMjQzNjQxMzEz
-NDAwNDY1NDU0MTM2NDY2ODc2NDY0Nzg3ODo6ODU0NDU4Nzg0MzM3OTg3NDMyMzc3
-OTg4ODk6OTY2NDY1Ojs3ODc7PDk3NTg0Njg3NjY0NDQ4NDU0MzQ2NTU1NTU2MjU1
-NTI1MzE0Njk4NDQzNjU0OTQyMzI2NTU3NTQ0NTEyMTQ1NjQwMjEyMjAyMzM1MzM0
-NDU2NTQ3ODg4OTs5NTM1NzU1NzY3NzQ1ODQ3NjU3Nzk5Nzk4NzU4ODY2Nzo5ODs7
-Ojk6Ojk8Ozs6OzxBRWFxeUtBPTo8Pj4/T4yuq3lvn6xtRD4+P2x5dFNCRUA/Pj08
-QT49Oz1ERT08PkA9QEE/QD46QD4+Pzs9QUFAPkRFQkFBQz8+QENAPz8+QUNAPTc8
-Pj89QkFAPkJDRkRAPj48QEVDSUZEQj9ER0RERUJGRkZEQj1AOj1FS0lLRkRJTUlE
-Qz9stMjT2+Dj5ujp6uvrRkJGRUNHRUBEQ0dGQUVJRUZFQ0JGSUVFRUFEQ0JCREdC
-QkA+PD88Pj48PD1DQkQ+O0NEPT9AQDs5PDw9OT47PTs8PD0/PDk/PT06PTw7Ozw6
-Pjw9Ojo4NzY6Ozc7PT46Ojs4Ojs6PDU6Ojc8OTk4Njk7OTo7ODg2NjY3NzM3ODw6
-OTc6OTc3ODo4OD08NzQ4ODU6ODk3Njk8Ojc6Ojw6OzY2ODc2Njc3MzM4ODg2MjU3
-NTY3ODc4ODY3Njs4NzY0ODQ1Nzc4NjQzMjQ0NjxAOTk6NTM1NTU3NTUzNDM0MjQy
-MzU0Njs5ODc7OTc3Njo4MzM0NTU1Ojk5Njc5NzYzNDM1NzQ2NDQxMTQ9Nzk3NzY2
-NTU4NjY1NTY2MzU2Nzk4NjUzMjY0NDo1MzIwNjY1Mzg2NjY2NjU3NTc2NzUyMzQx
-NDM2MjIyNEE7MjMxMjE0MzEwNTU2MjQ1MjQzMzQ2NTUzMzEzNDU3NjQzNDQyNTc1
-NDMzMTQ2NzU1NjozNjY0NDQ1Njg2Nzg1NTYxMTExNDU0MjQ2NDU2OTY4NjY0Nzc4
-NzYyMTU2NzQ1Ojk1MzM2OjIxMTM3MjY2ODkzMzY0NTIwMDQzNjQ0NzQyMzQxNDE1
-NDc2NDIzNzMxMDM0ODczNTQ0OTQ4NjQyNDc1NjU0NDIzNDIwMTQ1NzY1NTMzNDQy
-MzQ1MzQyMzMzMTMzMzI0NTI0NzIyNTQzMzgzMzQ0MzQ3NjQ2MzU1MjExMjM0NDY1
-NDM5OTU2Nzc2NDc2ODQzMDE2NTIyMzQ1NDUzMTEzNTUzNDMzMzQ2MzMzODY3NjUy
-My8zNTM0NDQzMzU2NDYzMzM1NTQ0NDU1NDExNDUzMjM0NDI0NTQzNjU2NTYyNTMy
-MzQ0NzU2NjY1MzU4NDY5Nzc3NDQ1NTQ4NDMzNjY4NTc7Ojk5NzY2NTYzMjQ1MTI0
-MzIyMzM0NDY2NTg3Mzc1MzQ1NDY3OjU0MjUyMzc1NDI0MzQyMTMyMjU2MTQ2MTAz
-MTQ3Njg2NTI1NDIyLzM1NjMzNTQzLzE0MTM2NDU2Nzc1MzQ0MjEuMzY4NTAwMTA0
-NDQxMjQ0MjIzNjY2OjMzMzE1NDU0Nzg3NjU0NTg1ODc1NjQzMzU3NjY2NjgzNjUy
-Njg0MjI1ODU0NTU1NTY3NjUzNDY3NTU0NTg0NTk3NzQ2ODM2Nzg4Nzc2NDQ5OTk4
-Ozg2OTYzNjc1MzU2NjM2NDc8NzQzNTs5NjU2ODg5NTM1NzIxNDU2NzY1NDQ3ODY1
-NTc1NDU2NDY2NTMzMjIzNzU0MjIyMzk1MzMzNTM0OTUzODg3Nzg1NDQ2Nzk3ODk3
-NzU2ODU1OTg2NTg5ODk5Nzc3NzQ4Ojo4Ojs8Ozs6Pj48PkRpiXBSRkI/Pjs9Oz1q
-npxvSEl8kGJAQDxCY4uCYkVEQT89Oz89PUFBQEVDPjk+Pz5CPT0+QUBBQ0I/P0A9
-Pj5APz9BQEBBQT8/QkE6PD86QEU/Pz8+Pj5AQUQ+QkA/QkRCREdGQkI9QUVCQkJB
-Qj4/Pj9FQj1DREE/PUFGSUlHSEpIQUNDRXW1yNTa3+Pm5+nq6+tESEBDRERIRUdJ
-RkJIRkZERERIRkhLRUNERkNEREVAODw/Pz4+PkE6Pzo6OkJEPTs5PUBDPzw9Qjo5
-OD0/Ojg9PTw+QkM8Ozk6PD48Ozw8Nzs+PT09ODk0Njg7OTs/PDk5Nzc4OTo7Ojk9
-QTw6Nzc5Ojk8NzY2NTc0ODs5OTk5NTw4Njg3ODc3ODc2ODs4Nzc0NDM2ODg5OTk4
-Ojk5PDk7Nzg7Ojo4Nzg5ODg1Mzg6ODg5Njo5PDo3NTMzNTk2NTY1ODo1NTg2NDM4
-NTM2Ozw7OzU2NDU0NzU1Nzk6Njc1NDM3NTQ1NzQ1NjU0NTw2NTk3MzIzODY3NzU4
-NDc8NTUzNDU3NDIzNzg4ODQ1NTc0NTQ2NTU1NTc0MzU2NTY0Nzc2NzMzNDM1ODc5
-OTM0NzIzNTM0MjQzMzMzMjEwMDMwMjU2NDY0NDM5PTAzMjExMjU0NDM2MzQ1NDgz
-MDAzOTQ2OTQyNTY2NTIyNDQzNDM3NDM3NDEvMjY2MzM4NjIyNDQxMjMyMzU0NjIz
-NTUzMzAxMDE0NjYyMzU3OTo1NDY0NDQ2NDM1NzI5ODY0NTMzNDU0MzIzMTEzMjc2
-Njc1Mzc2MjMyMjE0NDQzMzM0NjMwNDU2NDQ1NjU0Njc2NTM1NjQ0NTY1MjM1NTY0
-MjMzMjQ0MzQ3NTQ0MzMzMzQ1MzM0NTYzNDQ2NTIxMjM1NzU0NDIwNDM0MDE2MzQz
-NDMzMjQ2MzI0Njk2NDA1NDQ5OjYzNjQzMzY2NTM4NzU4OTY0NDI1NTQ0NjQzNjcz
-NDU2NTU0NDg1NDQzMTM1NzcyNDM2NTYzNDU1NTY2NDM2NzU2NjY2MzM0MTEwNDc0
-MjM0NDc7NTc0NDU2MjI1NjUxMTU7NDQyNDQyNTk2ODgzNTQ2NTI1NTUyNTQ0NDUy
-MzMzNTg5ODU3Nzg6NTU0NTg4ODg3NjQzMTAvMTU5ODQ0NjY1NTMyMzIyNDU3NjQ0
-MzI0NDY3Mjc1ODc3MzI0MzY6NjQzMzc0MzM3NTQ1OTQ2NTExMDAwMjQzMTMwMTc3
-MzMyNTQzMzU5NjMwNzk4Nzc1MTMzMzMyLzM3NTY0NzY0MjMzOzc0NDU3Njc2Njg4
-NzY3NDQzMjI2MzMzMzMzNTU1NTg4NTUzNTY2NTI1NDQ0NTg2NDg2Nzk4ODk6NzM3
-NTMzNDM1NDQzNDU2NjM1Nzo2NDU1NTY5Nzw5NzQ3ODg3NTw3NjM0Njg3Njg1Njg6
-Nzc4OTY2NTU3NzY0Njc1NTQ0NTY1NjYxNTQ0Nzc3NjM0MzQ0NDY2NDc4ODo3NDU1
-Mzo0NDo7Nzg0NjY2NTY2Nzg5NzU4NTU2NTU4Nzk4Nzc6Njc4ODg1NjY4Ojg2NzY3
-Ojw8PENIR0BDWXGHVz87PD05ODs9YId6XkZFTH52T0M+PT1bgZF2RkVBPDw+Pjs9
-QD49QEI9QD88QUA9PTw+PT0/QEBCPUBBQURGQT89REE/QT09Qz87Oz9BRUA/QkRD
-PEBAPD47Oj49P0NFREZDRUFAQDs/QEFAQEE+Qj4/PkI+PTw8QENJSkxKRkNFS0ZF
-bLLI09vf4ubn6urs60hIRkJBRElHR0ZERkZGRUlFRUZLSEY/REJFREZGRz03PD9B
-QD4/QDo7OjY7QEg9PD4/QT48PTo9RkRDOT09RD8/PDw8QDs9PDk6PTo4ODg5PEI7
-ODc7ODk6QDk7PDw8Ojs7Oz0/OzU3Oz09Ozk7Oz48PDs6ODs3NTk6Pzs4ODU2NTo5
-OTc2ODQ4OjQ3ODY5ODg2Nzc2Ozk7PTk4ODg4Ojs5ODw5OT07PD89Ozg5ODk4ODY2
-ODg6NjU2NzY4NzY4NzY5ODo4NTU2NTY1NDg4OTg4OTc3Nzc1ODY1NTU5OjU0NTU1
-NTUyMzk1NTQ1Mzg1NjY2NTg2NzUxMjMzMzY2OTU0NzUzNjc2NTY3NzY1NTM2NTU4
-NjU1Nzg5NjU0NDQ0MjIwMjYyMzQ2ODU1MjU0MTExMjQzNTQzMzMyMTE1NzM1NDU0
-NDY9PEc0MjIxMzQ0MTI0NDI0MjIzNDM1NTY2NTU2NTc2NjEzMzMzNDMzNDUyMjM1
-NDUzNjc0NDI1ODQ0NDU0OTQwNjUzOTY1NTUzMzIxMjg1NTU2OjY3Ojg1MzI1NTU2
-ODUzNDY2OTY1Nzc2NzQzNDMzMjUzMzQ1ODQ0NDMzMTMzNjQzMzM0NDI1NDc3NTMz
-MzMzNTM0MzMzMzI0MjE1MjM1NDIzNTQyNTM1NTI0NjIzNDMxNjg1NTY0NDQ1ODQx
-NDQ1NTMzMzUzNzY3MzY0NTIyNjQ0MzU2NjUzMjYzNDIzMjU1NjY1Mjk4NjQzMTEv
-MjQzMzYzNjU2NDY2NjkyNDU3NTUzNDQzNjQ2NTY1MzU0MjMyMDMyNzUyNDY2NDQx
-NDM0NTM0NDQzMzY2MjIzNTQ1MzU4ODQ0MzU1NjY4NDMzNDM1NTU1NTYzMzU1NjM0
-MzM0NjQ4Nzw0MzM3ODQ2NDQ2NTg2NDs4NDM0MzQzMzU3NjU0Njg3OTc2NTc1NTM0
-MjEzMzY3NDM0MzU1NDk0MDU3NTM0Njg4NjQzMzQzMjQ0MzI0NDU0OTY0LzMyNzEz
-MTIzNjg0NDczODc2MzMxNjUzNTM0ODY3MTE1NjMzNDQ0NTg0NTk4NTY2MzU2NzM0
-NDQ0NTg4NTIxNDYzNjc0MzMyNjY3ODg5NTI0NTMyMzM0Nzc1MzM0NjY0MTEzNTMy
-NDQ3NTI2ODg0NDU0NTg2Nzg5NjQ5OTQ3PDY1Njg6NzMzMzU1NjQ2MzQzMzY7PDU2
-ODc5Ozw5NjU0NTU0NDUzNTQzNDY1NTY4Ojc2NTYzNTs4OjU0MzU4NTIzMjEyNjMz
-MzQ2NTQyMjQ0NTU4NzYyMzU2ODYxMzk1ODc4Nzg6ODg3NTU1Njg2ODU1OTk3Njc3
-Nzk3Nzc0NTY3NzU1Njc3NjU2Nzc3Nzk4OjxCUm9sSGNxd2ZCOztBQD07QE6RlXVG
-Q0JDcH5WQUBAPFaFf25GQzw9O0I+PT0/QUFCRz9EPjs7PjtBPD1DPkBAQzw6P0NC
-Qz9AQjo+PEFDPkZDQ0A7PUA+Pj0/Pzw9QEVAQD1BQUNDRENESD1ESkJEQUJBQUE9
-Pj1CQ0FBRTw7PDs+Q0BFSEtMRklDRkpqscjT2+Dk5efq6ursQ0NCQUFISkpJRUlC
-PkNISEQ9QkZJR0RDQUBIRT9EQD5CRENGQUE9QT88PT8+PEI9OTk8PTw+PTk/QkJC
-PEBCRT46Ojw7PzxEQT88OztAOTk7OTc6Oz46PT07OTY9ODo+Qj09Ozs7OTs6Oz45
-OTo/Ojo4Ozs2Ojg0OTw8ODo5OTk3NzY5Ozk5ODc4ODg3ODc3Nzs5OjY4ODc1Ojk7
-OTk3Nzo5ODQ0ODY4Ozs3OTs6OTc1NjU1NEA5ODc5OTc1Njs8Njg5NDMyMzM1NzY4
-OjY2NTU0Mzc5Ojg3NjM2Nzs3OTY0NTQ0OTw6NjQ0NTQxMDI0ODU0MjQ0NDc6NzUz
-NTc0MzQ2OjU6ODg4OTUyMjY0NDQ1NTc2OTg1Njg0NTM7ODc3NjUzNTgxNTk3Mjg4
-OTQzMjY1NjQ0NTIyLzM1MzIyNDEzMzIwNTY2QjYzOTY0NDY3MzQ0NjIzMjQ3NTU0
-NDI0NjU3NjQyNTQ2NDExMzYzMjAyNTI4ODg1MzY6NjM0NzY2NjU2NzQ3Njg0MzU1
-MzE0MjM2MzY2NzQ1Njc2Njg0MjI2NTUzMzU2NDI2NjQ4Njk3NTQ2NTgzNjMzODU2
-NDQ1NDQwMjU5ODI1NDQ3OTQ2NjM0NjMzMjM1NjU0NTY1NTU2NjMzMjExMjM2NjY2
-MzQ2NjUyMjUyMjg0NjQ0MzI0NjU2NjQ1NTIzNTE0NTU0NTEvMTMxMjIzMzMwNDQ3
-NDM0NDMxMzYzMzQ1NzY2MzEyMzI1NDUyMjQ0MjU3NTY2MzQzNTY2PDU1NDUzMjM0
-NDM2NDU0NDQ2NzQzMjEyNjUzMTM1NDMyMjUyMTY4NDQ0MzM0NjQ3NjM1MzEyNDY0
-MDEyMjMzMzIzNzU1Njg5Ojs1MjM1MjM3NzAxNjM2NTU0MjU1OTg4ODc2NTM1NzQ6
-ODQyNjk4NDY4ODY3ODQ1NTc1MzYzNDUyNDQ2NTM1NDMzNDYyMTQ2NjUyNjM1MzU0
-NDU1NzU3NDMyNDg2OTg2MjMxMDIxMTQ2NTQ0MzMzMjk2NTY4NTU1NTMyMTQ8NjQz
-NTQ0MzMxMzUzMjI1NjQzMzQ1NjU2NDYyMjE0NjMzNzY1NDQ3NTIzNzQ0NTMzNDQy
-MzEzNTY0NDY0NjY0MzEyMjI3MzIvMjM1NjE0NzMyMzU1MjI0NTU2ODY2NjY7OjY2
-Nzc2Njc6NzQ1NDU5Ozc5ODg3OTg3ODo+ODg3ODUzNTY1MzU1NDQ0NDM0NjY2NTk2
-NzY1MjM2Njg5NzY0MzUvNjUzNTUzMzU4NTU1NTM2NjU1NjU1NDU2MzQ1Njg2NTk4
-Nzc3NzY1Ozg4NDc2NTc3NjQ1NTM0NTo3Njc1MjY2NjU4NTQ3Njg2MzU4ODg4OTc5
-P2CIi52Ok4JhQz08QTw+OztGgp2NXUVCPkNriGdCQEBBTX+Af0hCPzxAQT9CQ0JE
-Q0FBQkVFQT07Pz8/Q0Q+O0FBPj9CQz9CRUE/P0BAQUBCQkJDRkJCQT0/PDxBQUFB
-RUVAOz4+QkZDQUNIRUJCQEFBPj49PT8+PEBASD45QkNBPTlCREhHSkpHS0dER2ez
-ydTc4OPn6Orp6+tCP0hIRUpFRUZQSENMRkFBPD9CPUZEQz5CQTw/P0JCQkI/REU/
-PTtERDxGPTo8QT5HQ0FDPjo9Pjw3OTk6Oz48PEI9OTs5PT07Ozc4Ojk6Ozo5Pz48
-Oj09Oz44Nzg5Ozo9ODk7Ozc4Ojg5Nzk1ODw7OTk7OTY3Ojo6PTg5OTs4ODc3Nzg4
-ODc3Nzc4NTY4ODc2Nzo7ODg2NTU1ODg8ODo6Ozo1Nzo4Njk6NjY6Ozg4NDs4Ojs4
-OTo4OTg9Ozg6Pjw3OTk5Nzc0NjU3NTU2OTc2NTY1NTU4OTk5NTU1NTU0MzUzNjc4
-OTY5NTI1NDU1MzAyNTQ1NTs5NjU3NjY3NTU0NTU1Ojc4NDU4NTQ0NzY4Njk2MjQ4
-ODY3NzUzMzY5PUI4NzY3NDU1ODY7NDQ2NzY4Ojc1NzU0MzU3NDc1MzIyNTc3NTU0
-NDY8RDo2NTUzMjQ0NDQxMjQ1MzI1NTMxMjIyNDQ0Mzc1MS8zNTQzNDIzMjMzNTQ3
-ODU3MzU4NzQ1NjY1NTQ3NjM2NDQ0MjQ1NTY2NzIzODU1NTQ0NDQ0NDM2NzU3Njk0
-MzMzMjU2NzhBNTQ0NzY1NjQyMTMxMDU1NjEzNzQxMDI0NDU0NzQ1NTY1MzI1NjMy
-MjEzNDY2ODc2MzM1MTMzMzIxMTQzMzI3NTU1NDQ1NjUzMjI0MzQzNjdFNDQ3NzU1
-NTYzNjU2NDQzMjAyMjE1MzIyMjAyMDU1NDI0MTE0NDUyMjUzNTY0MzEwMDEzNjQy
-NTs3NTU3NTMzNDU1MzY0MjM0MjM1NTU2NTQ0NTU0NTY1NTIyMjMyNTc0MjM0NTY0
-NDY2NDg1NTc0NDY3NDc0MzI2NjM0MjIzMzE0NDMzNDQ0NzYyMzY2MzQyMzQ0NTQz
-NDc0MzM0MDMzMjQ3Njc0MzU5ODk1NDI1ODU3NTU1MzY2Nzk2NTU3OTg4NzY1Nzk4
-NzU1NTMzNDQ1NDU4NzQ1NTUyNDU3NjYzNjc4ODQyNTY0MDIzMjE0MjI3NDQzMjQ2
-MTMxMzM2NjEzNjQ2MTAzOjo4ODY0Nzc2NjY0MzQ2MzIzOTIzMjMzNDQzMzU5NjU1
-MzM1MjI1ODY1MjI0NDM1NzY2Njc2NTU0NTI1MzI0NjQyMzMxMzIyMTM0NjY1MzY2
-NjU2NzIyNTAzNDM3NTI4NzU4OTg2NzY1Njg4OTo9OzU2ODk3Njg3Nzc4ODY3OTY4
-NzY0NDU3NTU0Njo5NDU2NDM2NTY1NTc5Ojc1MzQ3NTU0NTUzMTUyNTo4Njc2NDI1
-OTY1NDU2NTY1NDU3Ojk1NDQ0MzM0NzU2NTI6Ozc1Njk6OTQ0NTg5NzU3ODQ2NTc3
-ODY0NjY2NDY1NDY2NDc1Njg3ODg7PUBNdKGorbGbcUVDQD0+Oj5AQGuekV1GQT07
-RFh8cUlFPEBIc5R5TEA/Pjg3Oj47PT0+QkFCQj5BQz89O0JAOz49PkJCQj9DQEE/
-PkFBQz5BPkFBOj5BQ0JBQkA+PUBFRkVBQT9EPz9APUBGQkE/Qz5CPz9BPkZKSkZH
-QkFGSERDR0xLTEdFS09LUERJRkVDa7fK09rg4+bn6err6kBGRUZHRURCRUFFRklE
-QDxDPkI+QEBARUI9Oj07PT1APkBCQUI8QT9HRUFCOz49RUA+QD1CQkI9Ojs4PD47
-QUBBQD0/QTs+PT08QD08PTo6Ojo4OTk5Ozw+Pz86Ojc7Ojs4OTg9Oz08OTgzNjc2
-OUc6OTo5Ojg1NTc2OUA6Ojk3OTs3ODg3NjY7OTc5OTo2ODk4ODk8PDY5PDg3Njk5
-Ojk6NTc3OjY2NTI1OTk4Nzc4ODg5Ozk3ODk2ODc6NzU/TTs3ODs7ODc3OTk6Nzc4
-Ozc3NDY8Nzc2NTc3ODU2NDQ1NjY4NTYzNDM0NTc1OTY2NTY4NDU4Nzc2NDc2NjU5
-Njc6OTY2OTU0NTc4ODc4OTc3NzY7NzU1MzU1ODU1NzMzNTcyMTQ1Nzk2Nzc1Ojk2
-NzU0MzU2NzY3ODc8ODU3MzU0Mzk6NjU8NDc/RTw1NTc1NTUzMzMyNDE0NTQ0MzM0
-MzEwNjUyMzU1MzM0NTMxMzM2NTYxMzU3My8wMjk2MTI0MjU4Nzg3NDMzNTQzNjc3
-MjY4NDc1NTY1NDUyMzQ1ODczNTU2NTYzMjExMDI0MzQ0NzY2NDY5NDMyNTU2NDY2
-MjQzNDIwMDIwMjM1NTU1NDQ1NDIyNTY3NzIzNTQzNDQ0MzU2NDczMi8uMDIzNDMz
-MzMxMTQzMjY2NjY1NDMzMz00NTM1NTAzMzU0MzAzNDIyNDQ0MjIwMzczNjEyMzQ1
-NTIyMzIyNDUzMTU0Mjg0MDU0NjMyNDM1OTc0NjM0NDQ0NjQyMTMzNDM3NTY3NTU1
-MzUzNDMzNTU0NDEyMzU2MzU3NjQzNDY1NTQ0NTM0MzQ0NTgzMzMyMzE0MzU2NTU1
-NjY1NDc4MS80Nzg2Njc0NDM1NTg3NTY4NTc2NDY0MzQzNTM0NDM0NTY2NTY0MzY6
-OTc0NTQ1Njc1Njk3NDQzMDQ1NTU3Nzk3NTg0NDIyNDQ1NTI1NzM0NDAyNTg0NDc2
-NTY3NjY6NTc4NDMyMjM1MzM0NTU0NTMyMTIyNTQyNzQxMTI0MzM0NTU1Njc4NzU0
-NzQzMjYyMzI1NDQ1NjYyNTY2NTQ0NDUyMjI0NTQyNTcyMzM0Ozs3NzY1NDcyMzc3
-NzQwMDIyMzMyMDAwMjQzMjM3ODUzNDUyNDg4OzY1ODg2NzU1NjIzNzo3NDY0Nzg3
-NTk3ODk5PDs0NjY2NTc5OzY3NzI0OTk4Njc2NDQ3NjM1NTQzNzc4NzU0NDg1NTc4
-NTc0NTU4NTc1NDQ3NDQ2NTg2NzY3Mzc1NjMyNTc3NDIyMzU0Njc2NTM2Njg3NjU2
-Njk3ODc3Njc3NTU1Njk4NDo5Nzw3ODY0MzQ3OTY2NTY4Njg3Nzs5ODk6Ozs6SnKB
-mpCjqYNIQEBBQDs6Oj5Sg39TQUNHQ0A9XGppSD5AQURmeW9IRUc+Oj48Ojs+Oz0+
-ODw+Ozs+Q0FAREY8Nzw+PkE/Oz8/QEFFQUFAQEJDQD4+PkJEREFAQENDRkY/QkJD
-RkJAPTs8QENERkRERkVFPz9EQEVGR0lAQERCRUdJRkxQSkZERklKRkxOSkNot8nU
-29/j5ufp6evrSEpKRUhMS0ZERUNGRkRGS0ZCPkBDPj9BQ0BAOkBCP0E8Ojw8Pj1B
-SkVGRT4+Q0VCQTw+QEA6Oz09PEQ9OD85PD0/PTo9QD07OTg5Pj08PD89Ojk1OD09
-PT0/Qjw4PTg8OTY3PDw5OT45ODk7PkQ7PTc3ODk5Nzg1NDk9ODk6Ozo4NjQ2Nzc7
-PDk7Ojk4Nzo9Ojg1NzY3Ozg5NTY2OTo5ODo4ODc5NzY1ODc5OjY4NjY4Ozg4NjY1
-ODY5PDk6OzpOOzg3OTk6NTY2OTc4ODg5Ojc2OTo4NjU1NzY0MzU1Njg4NDM0Nzg0
-MzQ1ODMzNTs0Njg2NDY3ODg1Njg2NTY2NDg2NTk+Ojk1Njg8ODc2NTU1NTM0Njc1
-NDU0NDAzNDc3OThCNzU1Njg5NDY5NTY2NTc1PTg1NDY2MzM2OTQxMjUyMzY0MjEz
-NjM3OD4/NjIyMzIyNzU1NTAzNTU5NjY2NTIxMDUzMTIyMzIyNTMyMjM0NTMzNDQz
-NTYzMjU2MzU0Njg1MzQzNDIyNjIzNTc2NTY1MzEzNDk4NTM1MzUzNDY0NDQ0MzY0
-MTAxMTQ1NDM0NTU2OTI1NDIyNDg5MjEyMTIzMzU2QTUyMzYzMTMyNDQ0NzYyNDU5
-OTQyMzI2NDIzNTY3NDMvLTMxMzM1MzExLTAyMTM2NTc1MzQyMzY1NTY2MzMzMTIz
-NDQyNDYzMzU1NDQ0NDgyMzYyMDU1NzQ0NTU0MzIxNDczMjU0MzM1NDUyNTM0MzY2
-NjY2NDMzMjIzMzE1NTAvMjU0NjMzNTc2OTczMzQ2MzQ1OTUzNDQzNDY4Nzc0NjU2
-NzU1NTM1MzQzNDU1NTQ0MzY4NjUzOTA0NjY1MzI0NTUxMTM1Nzg2NzMxMzQzNzQ0
-MjU4NjM0Njo3NDE0Mzc2NDEzNTY5NjU6NjM0NTMzNTM2OTU0NzMzMzQ2NjU2NTEx
-NTUzNDQzNTc3NDY0MjIzMzUyMDI3NjQ1NTY2NjQ0MjY3NDM0NjM2NTE2NjMzMzY2
-Nzc3OTg1NDMxNTc2NDQvMjQ0NDY1NjI0MjU2MzQzNTM3NTY1NzczNTQ0NDY7NDIy
-MjE0NDQ1NDc3NDMzNjg4MjQ1NTU1MzY0MjI3MjExMy8yNDQ0MzQ1NjU1NzY1NTk2
-NTU2NzU4NTU2NDY3MDQ3NTM1MzM0NDk3OTg3NTc7Ozo3ODg3Nzc3Nzs4NjI0NDc3
-Nzg5Nzo0NjU0NjMyNDI1NTQ1NTQ0NTg1NjY0NjY1NjQ0NDQzNjY4NzQ1NDU2NDQ2
-MTM1NTM0OTY4NDMyMzY0NzU0NDQ4OTk3Njc6Njc2NjQ2ODc2NzU3NjY5Ojg4ODg4
-ODQ4O0E5Nzg4Ozk1Njg3Ojw7Oz1ifpqVlZmOVj87OjpAQD0+VXCBXERFQDw8Oz5M
-c21SQUVARXV9U0RBREE/PT1BQUI9Q0VBP0FFRkVDPkZBPz4+PT88Qjw+P0M+PUBD
-Q0I/QEBCQT0+PEA+PEFBQUJFRkNBQ0RDQUJAP0BBQ0REPUhFRkQ9QEREP0REPkFF
-RExHSElHRVBKTklHSkpLUkxHSmG4ydTc4OTm5+nq6+tBSEpLSklHQz9DR0VKSUlH
-RUM9QT5CRUNCQT46PUBCP0A+PT07PkJFQUA+P0BGRkJAQUA+Oj07QUFCQTk4Nzs5
-PT07QkRCQDs9Oz06OD46Oz8+PDk5P0E5OTk7PTo2ODo8OTc0OTg0Njk6Pj89PT85
-Ojg6Nzo6Nzw4PDo1NTI1Nz49Ozw/Oz05ODw7OTg5Ojk3ODg5PTc5Nzg7Ojk6NzY2
-Njg5ODo3Nzs6OTg3ODo3NzY6Nzk3OTY2Nzw3Njg5Nzc3NjY7ODk4PDk1NDc3NzY1
-OTg0NjY4NjM3NjU0MjU1NTc1NzU4NzYzNzY3NDQ2ODI1NzQ0Mzc9OjY1MzUzNTQ0
-Njc3ODg1Njg4ODc0PDM2ODc2NjY1NTc1NjQyNTE0OD83ODs3ODg5NzY2Njc8ODY4
-Njg1NDIyMzE0MjY5NDY1NTYxMjc0MTQ1NjQwMzI0OzkyMjY5NjY1NTQyMjM1ODc0
-NDM0MzQ0NDIzMjY1NTc5NDQ1NzIzMzM4NjM0Nzg0NjQxNjQ3Nzg3Nzg3Njc6Nzc4
-NjM0NjEvMjM4MjIzMDI0NTM0NDc2Nzg3ODU1NDIyMzM1NTU2NTMwMDI0MjQyNDcz
-MzIyMjc7MTUzMjc2MzMzNTM5ODQ2MjU4NjY1NDIzMTIzMzYzMjUzNDIyNDU1NjQ0
-MzQ0MjIzMzM0MjM1NDg3NjU3NDMyMzQyNDUzNzQyNTM1NDU0NjUzMjQ4MzMxMjIz
-MzE0NDY4MzQ0NTAxNzMyNDIxMjE4PTM2MzI0MjAyNDM0NTM2ODM1NTEvMDY4NjY1
-NDM0NTM0OzYzNjU1Njg5NjM2NDY1MzQ4NjU1NzY0NTIyNTQ2NDYyMjM1NTU2NTEx
-MjI1MzE0MzYyMTQzNTQyMjIyNDU3NjMxMzQzNjY2PTc0NDUyMzE0NDY5OTk4NjQ3
-NTQ3Nzc3NjY1NjY2NDQ0Mzc4NDQ0NDY1MjE0MDI1NjM5NzY2NDU1MzQyMzM1Njc2
-NDQyMTM1MjM5NjQ0OTUyODQ0NzM0NDQ1OTU5NjU1NDY4ODY1Njk1NTU0MzE0NjQy
-MjY2Njc4OTQzMjU1Nzk4NTQzNTQ5PDcwMDA0NjY3ODU0ODc2NzQ1NDQ2NTQ1NjU3
-MjA0MTEvMTExMTE0Mzc4MzU2NDQ3ODk4Njc0NDM1NjIyNTU1NjM0NDc1NTI2NTY2
-NTQ2OTk7OTc2Nzc2Njg1MzY1ODY2NDg5NjU2Rz01NTMzMjI1Njk3Ozc4NjUyMzU4
-NDU1MjQ1NTU2MzM1NjYzNDMyMzU0NDQ0MTM1NjM0NTM0ODw2NTk2NTQ3MzQ3ODY5
-NzUzMzQ2ODg7OTY3NTY1NzY3NDYzODo5Ojc4ODs4Njs6OTo5PDs5Pjo/UoFse3qA
-f09EPDs5OT0+OkJpe2hFQkNBQj49PEdteWFIRkFEfJFrRENBRD5CPDo9PkFBRUpE
-QkZFP0Q/Oz1CPTw+Ozo+QD88PTw9Oz4+Ozk7Pjw9QUNGRkI+QUNDRkJKR0Q/QUNF
-Q0BFQ0RDQTw+QD8+Pz49Q0NBQkZIRkVDQ0NDQUhCSEdFTE9KUlFPUEVIYLTL1Nvf
-4+bo6urq7ExIRkRERUNCQkJDRkVGQ0ZDQj1DP0VAPTw8OkA+Pj9AQkE+QDxDQEND
-QUJAQT9EP0BBRkE8P0E+Pj08Oz9BPD08OTg6PT1APTw/PD09Ojs6ODo3PDk6QEA8
-Ojs5PDo6Ojg8PT4/PDYzNTo3Ozo7PDs9Nzk2NkA8OTc4OzsxODQ1Njc5Ozg7NzY3
-Ojg5NDg4NjY5OjU5OTg6PDw4ODo2NjU1Njc4Ojw7ODlAPjg6Nzg4Nzg3Njc5OTo3
-PDc4NTU5OTc2Nzc4Ozk3NjU1MjQzMjM2NDMxNTU2NjU4ODg5NTY3NjQ2NjU0Ojg2
-Njc2Njg3OTQzQjk2QTg4NzszMzU0NjAyMTU2OTs7Ozc4OTc5OTg5NzU2Njc0NjU0
-MzU2NjU2NzY3NjU0NzQ1NzU8NTk6Ojc6NjU1MjY0NzQzNTc2NjUzMzI5OTg3NzY2
-MjQwMzE0MzAzNDE1NTUzMzQ1ODQzNDQzMzU2Njc0MzM3NjM0NDQ1MzY1MzM1NDU2
-NjEzMzMzNjc2NDY3OzY0NTg0MzY1NDMyNDI0Mjc0MjAzNDIyMTQyMjI0NjY1Nzk2
-NjQ0NDQzMzM0NTgxMjg3Nzk2NTI2MzQzMDIzMjE3NjMyMzQyMzY1NjM0NDI0MjM0
-MDM0MjM1MzQ1MzIwNDU0NTI1MjQ0NDIzMTI4NTQyNTQ0MTU1NTI0NjY3NjQxMzQ1
-MzI1NjU1NzU1NDI3OTc1Nzc1NTMwMzMzNDQ0NDQ0NTQ2NTIxMjQ1NTMzMzk1NTU2
-NTQ1NDY0MzQ0NTQ1MzMzNDMzNTQ0MzUzMTQxMjIzMDMzMjUzNjY2NjY1MzU2NjQ1
-NTY1NTc1NDc5OTQzMjM0NDY1NjQzMjM0NDIzNDQzNjY/NDM0MjUzNTY3ODU0OTo3
-ODc3OTw4ODc0NDM0NzY0NDM2NDUzNTI1NjQxNDM4Nzc2ODg2Njc2NjMyMTQ6NjIz
-NDE1NTYzNjQ0NTc1NTIzMjY2NjMwMzM2NDM1MzU3NjY2NTQ2NTQzMzQzNzYzMzQz
-NTY3ODUyMzQ0Ojg3NjQ0MjIxNjU1Njc0MzU2MjQ0NzYyMzQ4NDQ1Njg0NDE0NTM2
-NjQ0NTU0NTg0NDU0NTQ1NTU1MzM1NTIyMzY5NjU1MjQ4NTU4Njk5Nzc2NzY2NDQ0
-NDY1Nzg4OTY0MjMwNDY3ODY4NjY5NTI2OTo5Ozo2NTQ1NDY4NjY0Nzk5Nzc2NzY4
-NzU3NDI0Nzk3OTk5PDg0NzM0NjU3NTY0MzUzMTs0MjIyNDQzMjM0MzQ0NDM0MjU2
-NDQzNTcyMjY2NjU2Njg3NTQ4NTU0NTY1NjY2NDY5ODkzNDY1NjY5Ojg1Njg2OTk5
-Njg4Nzc2NzQ3OT85Pjg8PUeCkV5MU0tFPjs+Ozs8Oz1EWGVWRUA+Oz8/QDo+Ok1p
-aFNBQEeEm29CQUVGREZGPUBAQkFARUVBPUJBQUFCPUBBPT5BQkJAQDw8PT5APkBD
-QT46P0FGQT1DQz5CQUNGRUdIR0RBPkJAP0JAQDw8P0M9Qj4+PUVERENFQURFRUFH
-RUtISEhISEpKT09STVBRSkZhtMrV2+Dj5ufq6urqSUpGRERBREhHQUZGSktGQ0RA
-P0NAQTs7OTo9QDw8P0FERjw9PkNCQkJCQT8/QkRCQEE/Pjw7PT05PT9CQT0+PDk9
-Pjw6Oz86PDs4REBAQT05Nzo4Nz09Ozs7Ojo9QkM+PDs8Pz07OjU3OjczODY2OTk3
-NzY3OTo7PUU+PjY2ODs8OTg6OTo7ODg5Ojk5NzY7Ojo6OTY5Ozc6Ozk6OTk2NTQ2
-OTs7OTo7PD0/Ojg5OTc3ODY2OTk4ODY4ODk3NjY1Njg4NzY3MzIzMDQ1NTY3ODc3
-MTI0MzQyNjU2OTk8NjY3Nzg2NDg5NjM1Nzc3MzI1Nz8+Nzc5NTk4NjU0NTc1OjQ1
-NTg4OTg3Nzc2NTU2Nzk5Nzc7Ojc6OjYyNUA1MzU4Nzk2ODMyNjY0OTc3ODQzNTc3
-NzQzMzI1MTM1NTYzNDYyNEI4Njg4NTQ1MjMzNDMyMzIzLzMyMzQxMjI1MzU0NTY0
-NTU0MjM3NTg0MDAyMzQ1MTU1MzQ2NjMzMzE0MTQzMzMyNjU1ODYzNDIzMzIxMjM0
-ODc3OTYyMjEzMDExMjE0NDMzNjQ3ODY2NTM3NDY1NjUyMzc3Ozo1NzQ2MzEyNDc5
-NTQ0NTY1NzU1NDU3NDM1NDMzMS8yNTM3MS8wMTY1NzUzNDM0MzQzNDQzMzQ1MzEu
-LjEyMTI1NjQ1MzI0MzI0MzQzNDIwMTEvMjExMjM0NjU3NjM2NjMyMjMzMzQyNzc0
-NTUxMjIyMjIyNDY1NC8xMzIxNTM0MzU3NDY1MzQ3NzY2NDU1NzQ2NjYzNDM0NDE2
-NDU4NTY5ODc1MTAzMzE0NTAyNDQzMzMyNDYyNjU0NTo6NjQvMjM1Mzg1NDU2NjYz
-MjY0NTY1OTQ2NjYzOTg2NTY1NDg6OzY1NjQ2Nzc4ODQ1NjM2NDYzNDU3MzM0MzQ0
-NDQzMzMyMzU3NjU1Nzg0OTc0NDQzMzIzNDM2ODc1NjU1NTQ0NTU3NTU1NjQxMTE0
-ODQ0NTg2Njc2NDU4ODMzMjIxMTMzNjY0MzI0Njc5NDM4NTU1NTQ2NzU0NjUzNTIw
-MTI0NTQ0MzU1NDg5MzQ0NDMzNDQyODQzMzI0NDIyMjY3NjY2NTMzNjk3MDEyMzM4
-NTU5NjQ2NDU2NDQzNDQ3NjY1MzU2NDMyMzU1NzY1NDg5NjQ1NjY1MzQzNDc5NjY5
-Njk1Njk3NjY0Nzc5Njc2Nzo2ODY2NjU1NDQ0NTM1NTk2Nzo4NDs1NDM2Nzg2MzI0
-NDI2MzQ0MzIzMzI2NDc2NzUzMzIwNTQ0MzU4OTI2NjM1NDU2MjU4OTk2Nzk4NTU0
-NTM4NTc4NDM2NDY2OTk4OTY1ODc0NTY1Njc4OTg4Nzc6PT46PTtKaJGARDs+OT9A
-Pjw/Pjw8QFp3V0RBQj46Ojs7Qjw/RmRoZF1FTYOHTkQ+PEA9QENCPDxAPj07Pj0+
-Ozw/PT4+QEBAPkFAQUJBQEA+Q0BCQUA+OztER0I6PkFBQkBGRD1BQ0NHQj9DQD9C
-QUBBPUFCQ0hCQENERUFDRkU+PUJARkhFRklJSUFGSE1MSUpGRkZHRma0ytTd4OTm
-5+np6+pGRUdGRUlJT1tIQ0NMSElHRENDQDk9Ozo7PUA+Q0VIREREPj5ERz5EQkA+
-P0E/PT0+Pj0+PkI9OkI6PkA6PDs7Ozk+PTg2O0A8QD5APD09Oz06QD4+PDk6Pjs8
-Ojs+QT08PDw7Ojk9NzY5Nzg5Ozo5Ojw5NTYzNjU7QUM+PTc1PDw7OTo6PTY1NTk8
-Ozk6OTo3Ojk7ODc4Ojc6ODc4OTk3OjY3ODo5ODg4ODY3NzY5OTk4OTgyNTY4ODhB
-OTg6NzY3Nzc1Nzk2NDI0Mzc3Nzo4ODo5NDY0NDQyNTg2Ozw4MzM0NTY3NDY4NjU1
-MzI0MjQ2NzU4NTU2NDY1NDU2OTo5Njs8Nzc5ODU2NTU0NjY4OjU5OjcxNjU5NzY1
-PjQ4Nzc0NzY0NDE1ODY5ODY0NzY1NDg3ODY0NjYzMTEwNTMyNDQ2OTQzMzMyMTQ1
-ODk3NDAzMjM0MjI0NzY1NjQzMzIzNTU2NTQ4ODQyNDEzNDQzNDMzMzU1NDQ0NDQz
-NDMzNDIyMjI1NjU0MzIyNDM0NDU1MjI0NTIzNjMyMjAxMTM0NDk3NTY0OTU3NTU1
-NDQ1NTUxMDc0Njc2NTU2MTQ1MTc2NDYzNDQ0Ojg3NzU0NjY0NjY0MzQzNjYxMjAy
-MjMyMjQyMTE3NTU2MTMyMjcyMTI0NjQyMTQ1MTExMTAxMjQzNDMzMjMzMzM1MjUz
-Mzc0MjU2Njg3NTMyNDY1NjUyNDY2MTQzNTQzMzQzMTIyMTI3NzYyMzQxMTIwMDc0
-MzY1MzQzMzMxMTYzMDM0NTQ0MzI0MzM0NzY2MzM2ODQxMTE0NTQyMzU3MjM0MzQ0
-MzI0NDM1NDc4NTY1MjQzNTQ0NzQ0NjUyMzYzNTQzMzM1NzY0NjQ0NDc2NTQyNzo4
-OTU3NDc2NjMzMzI2ODg3NzQ0NzU0NTU4NjQxMjI1NjQ2NTU1NzczMDQ2MzQzNTM2
-NjUzMzM1NzMzNTEzNjU2NTQ1NjMyMDQ1NjY0MTU0NDQ0NTc0NTYzMjMzNzQ1NDQ1
-NzY3Ojk2Njc1NTk4NzY2NjIyMjI0MzUzNjY1NDI0MzQyMTE2NDIyNTc2NzQ2NjU0
-MzM0NTcyNTM2NTU5ODY1NjQ7NDQ3OTU1NjU2NjU0MzQ1NTM1OTY4NTI2MzUzNDQ0
-NTQ0NzU1Nz05NjQ1NzhAOzg2NTY1MzIxNTY3Njc2NDQ0NjM2Nzc4NDc1NzY2NTY2
-NzUyNTc4NjY0NDY1NjUzMTY5OjMzNDIxNTQ2MzU3OTY0Nzc2NjQ4NTQ0ODYyMzIy
-MzUzMjQ1MjU2MzQyMzU0NjU4Njc2NTc4ODc2Nzk2NzU3NTY4ODc0NDk5NjY5ODY2
-Ojs8Ojc5Nzo4OEBFV4Caklo/PTw6PD5BQj87PD5FhHlHPEA5Oj09Oz4/Pz08VHZz
-e2FjcmRLRT4+PT8+PDo9QkNCQT07QUA6PD5APj48Ojo+Q0FAREBAP0JAQD5BQD08
-Ozo9PUNAQ0NBPz1AQ0JDQUVIRkRDQDw+PjxDQjs+QkREPj9BRkpEREJHREdGRUNE
-QEZDRUlPSEdHR0hLQj9CXbPJ09zh5OXn6err60pQTUZDS0ZISkhGRkpFR0VAPj4+
-QEA/Qj9AQ0ZIQ0JBRD89QD1CQ0RHQUI9QEE/PTw+Oz5BPjk6PkBAPj49Ojs+PEA6
-ODw/Ojw3Ojk5Ojs+Ozg5PTo7PTg5Ojk9Pj5APjw6Ojs+OTY4OTo8PTs3PTk1NUA7
-OTk3N0BAQT04ODczNTg3Nzk5ODc2ODg7ODk8PD04OjY6Ozo6ODs4Ojg8Pjw8OTg6
-OTc4ODc3ODk5ODY3NzU3ODo2Njc7Nzo8PDo2NTY6NzU3NTk2Nzg4NzU1ODs2Njk0
-NTU0MzQzNTY1ODczODg3NDU2NDU1NjAxNTg6NjMyODQ3NzY1NjM0NTQ2NzY3NzY3
-NjY3NjU2NDU4NjY1NTU3Ojo4NDMxNzU3NTQ2NDU0MzI4Mzc4NTU3ODY3NzUzMjQ4
-NTc1OTY0NDExMTIyNDc+MzI1QDczNTU1NzEyMzUzMzQ0NDU1NDY1MjEyMzQ2MzY0
-MzQ2NzYzNDQyMjUyMzM1NTQ0MjAxNDUyMDM1NjM0NTc1NDIzMzQ2NjUzOTk3MzQ2
-ODg3NjUyNDQ3NzM5Njg0NDo2MzEyMjI0NTc1NDMzMTMzNTU2NjQ0MzMyMTM0NTYz
-MzY0NTQ1NjM0MzU1NDY0MzMzLjEzNTQzNTc3MzMzNjc3MzM1NDY1NDIzNDMzNDUz
-MzUzNDUzMzEyMjY2NTYzMDQ0MzIyNjY1ODc4NDU3NzU1NzY2MzMxMTEyMzE1NDUz
-NDIyMjMyMzEyMjEzMzQyMjMxMTQzNDQ1NTQ1NDI0MS8xMjM1MzIwNDQ0NjI0NjQy
-NTYzNDY1Njc1NDE0MTI5NzY1NjQ0MjQ0NDM2ODYzNTQ2ODQzMjU2MjIxMjU2NjQ2
-ODM4NTM1Njc0NjI4NDMzNjU3OTQxMjM2NDQ3MzQzNDQzNTg2NTUzNjU0NjY2NDU0
-NzM2NjYyMDM1NzUyMjAyNDIzNTQzMjQ0NTQ0NTY2NzY2NjEzNjc0MzIvMzQ1NjMz
-NDU2NTM1NDg1NDQ1Njk1NjMzNDQ1MzQzMzU2MzU0MjQ3Ojk0MjM2ODc1MzI0MjU0
-MjIyNTQ2NDEyMjAyMjEzODY4OTM0NTM2NTY2NzU1NTIyMzMzNjYzLzE2Njc0NzY0
-MzU2MjM0NDg1MzQ0NDM0NzMyNDg3Ojo2NzU0OjY2OTs5Njc5Nzk1NDY2NjY1ODg2
-Mzc3NjQ1NTg0Njc0NDU0MTM2ODg3NTY1Njo5NTU5OTYzNDQ0MzQ1NzUxMjIzNDMz
-MzQzNDQ1Njc4NzQ1NTU0NDU0NTM0NDY1MTQzMjQ0MjMyMzQ2MjQ2Njg3Nzc3Nzc2
-NTg2Njc4OTY0Nzc1Mzk4NDQ1NjU2Ojc5PTg2Ozg4PEFPeZWlrp2AR0BCPzw4OTo7
-Oj87PVx1YUI/PkA+Pjw8Pzw8PUBIdHRhZmpjUkU9QkI/PEE9OjpBQjtAQ0I9QDw+
-Pjw/Oj0+Pz5APT8/Pjs8Qj4+QD88QEFCP0FBPz4/QT47QD9EQ0JDQ0JAQT0/PkI8
-PT9AP0E+O0JESENDREVDQEVJRkVESkpERElJTUhFRkVHSUtEPEBhscrU3ODj5ejp
-6uvrSUdISUVKS09IRkhHRkJAQkNBPD1FR0RBQERDRUQ9PkQ+QDtBPj4/QT5BQz8+
-PEJAQTw8Ojo7Oj0/QUI8PTw+Pjk8PDw+PTs5OD86OTk6Ozw/Pjs2ODg3Nzk5PDw4
-ODw6PD04OTg5OTY7OTs4OzY5OD03Ozc4Ojk6OEE5PEA3OTo4MzM3Ozo7Ojk5Ozs7
-ODg3OTs2Ojc3NTg5OTo3PT47PUM5NjY3ODk2Njc5ODo3Njo3ODg2Ojg3Nzg3ODs1
-ODo5Njg1NDU4ODU4O0E8NjQ1OTc1Nzk3ODk0NDM0NzM0NTNBNjY0NjUzNDM1Nzc1
-Njo3NTc1Nzc3NTY0Njg4ODY2NTIzMTY2MzQ3Nzg2Njg2OjY1MjI1NjU0MzUzNUE1
-NTQ0MjQ1MjU2NTY2NjY1NTc4OTQ1MjIxMjgzNDMvMS8yMDUzMzwxNDM1Nzc6OTo0
-MDQzMDIzNTUwMzQ2MjMzMzM3NDQ1NTg4NzMzNzQ1LzM3Njc0MzM0NjM0NzIxMTM3
-NzU0NTo1NTY3ODc3OTQ1NTQ0NzY1NTY3ODQ2NDE1ODU0NTQ0NTU0NzgzNDU2MzQ0
-NzczMzQzMzMxMzIyMjI0MzM0MjQ1NTc1MzMyMjQ1NDQ1NDIzMzEyMTMzMjM0NTc1
-NTQzNTg1MTY2MzQ2NjQyMjMyNTczNDYyMzU4MzY0NDEyMzExMzAyNDY1NjY1MzY4
-NTU0MzM2ODc1NDQ0NTY0MzM2MzMzNTY0NDIwMzMzMzIyMzEzNTUzNDU0NjU1NDU6
-NTQ3NTMzNC8wMzY2NDMzMTExMjUyMzQzMzU3MjY6ODYzMjQxMjY4NjY2MzM0NDQ1
-NjU1MzQ0NTMyNTU0MzEvMjQ1NjM0NzY0NTQzMzU5Nzk4MjI1Nzc1NDUyMTEzMjU2
-MjE2NjU3Nzg5OTg6NjUzMzMzNjU1NTg2NTM0MTIzNjMzMzQzMzEzMjU1MTEyNzQx
-NDQ3NjY2NDQzNTQ3NDM1MTQ0NDU2NTIzNDQ2ODc6OTY1MzU1Nzc1NDEzNTY4NTQ1
-NDQ2NDczNDQ2OTU1MjEyMzQyMzE0MTM3NjExNjMyNDIzMzAxMjIyMjEzMzU2NzU2
-NzM2MjMzMjIzNTU0MTEzNDQ3ODc0MzUzMzU2NTM1NzY4NTc5NDI0MjIyMzU3Njc3
-Nzg1OTo4Ojg4Nzg2NDY4ODo2NDc6Nzg6NjU2OjU1Ojg0NDQzNTU1MjU1ODc0NTU1
-ODg2NjU2NTUzNDY0NTQ0MzM0MzU2NjMzNDYyNDQ0NDQzMjAyMzQ3NDQzNDg3NDM0
-NDY2NjIzMzQ3NTY2NDU1NjU3NTc1NTk6NzY2Nzg4Nzg2NzUzNjg8NTM1Njg3NTg4
-Ozs5Nzo/Znyfs7mwmF5BPUA8Oj07Ozo7Oz5Oa2lGQ0JAQkM+QDs8QDw9Q0JJbWpw
-aldFQkRAPD08PTw/QDk8PD5APT89PDs6QUE/PkNBPTs9PkU/PT89PTxCQ0Q5OUFC
-PD88PD08Pj49QkI8P0BCQUA8PD09PT5DPz5BQEFBQkVCQUBDQj4+SkRDRklKRERA
-TEtGSklUTU1NSExIR2y1ytPb3+Tn6Onp6utJT01PR0dJSEpGQ0dFQkJGRkNCPUBD
-QT8/PT5DQT09QT4+PDxBR0JDQUBCRUJCPT5DQEFBQDpBPT8+PD09PEE9PUA+PTo5
-OzpAQjxCPzs9ODw8PTxBODc1NDg3Njc7OzpBODk7Ozw7Pjo6Nzo4OTo5NDY7NzM2
-ODc9OTc4ODk2NTk1OTkzODs4OTw7ODg2MjU1NTs4ODY3Nz46ODY6NDg9Wzs2NTY5
-Nzg2Nzg3NDUzNTg3NTY2ODw5ODk6NjY3Njc6OjY3Nzc6PTo3ODk3NTMzNjg2NTQ0
-NTM0MzU5ODU1OEMxMzIzNTYyMjMxMzY2MzQ5NjczMjI1NTM0MjczMzUyMjMzMzM0
-NDY4ODg1NjY3NjQ0NTY1NjU4NDY0NTY1NDY1ODY2MzQ4NTU4NjQ1MjQ5NDczMzU0
-MzMzNjUyMzU2NzYyMzQzNTIzNTQ1MzQzMjAuLjAzMjMwMzQ0NTUzMjM2NDcyNDMz
-NTc1MS8zMjI0NzQyNDExMTI1MjAxNDY0NTQzNjUzNTU2ODk8MzQ0MzUyNjk0MjU1
-NjY6OjY0MjYzNjQ2NjUzNzYzMzY0ODYyNjQ1NTU2NTYzMjQ0MjM1ODMzMDM0NjYz
-MjEzMjI2NTQ1Njc4NDAzMjUzMzU0NjQwNDY3NDc3Njg2NTQ0MzAzMzM0MzU1MjI1
-NjY3NjY5NzQyNTMvMjQyNjIzMjU1NTU0NDY1NDU6MzMzNzU0Njg4NDQ1NDQ0NTU4
-ODU2Njc0NjQ1NTEwMzAxMzY1NTU0MjI0MzQ2NzY1NzgzMjQ0MzEwMjQ3NDYyMjMy
-NDQ1NTY1NzYyMzU0NDQ1NzU0MzQzNDY0NDIxMjM0MTIyNDQ0MjIyNjg4ODY7PDg3
-NTQzNjQ0ODg4NDI0MTIuMDMyMS8yNDM0NzMxNDY1ODQ1NzU2NTUzNzc1NTQ0NDU2
-MzYzMzAzODk1NTQ0NTM1NDQ1NDMzMzg2Njc1NTc0NTUyNTU4NjM0MzIzNDY1NjMx
-NDU2MzU4NDM1Njg1NDM1Nzc0NDY3NDU1NzU3NTI2NTIyMzQ0OTUzNDM1NzY2NjU4
-OTM4MzI3NjY2NzU2NDUzMjM2NzY3NjUzNTQ2NTQyMzMzNDg3NjY2NTU0MzY2NDU1
-NDU0NjYzNDQ2NjQ0NjY4NzU2Njc5PDg1NzY3OjgzNDU1NjY1NjU1Njo6NzU3NTU2
-NTg3Njk3NjU2NjY0OTY2ODUzNDY1NDIyNDo6NzczNjc6OjY1NTY2NTMzMzY2NDQ1
-NzczNjUzMzI0NTM0NTEzNDQzMDEzMjM3NDM0NTY0NjYzNDUzNTc1NTU2NTI1NzYy
-NDUzMzY0Mzc1Njc1Njc0Nzg4Nzc1Njc6NTc7PVWDn7KytquMSj47Ozo6Pj47PTw9
-RmxnT0JBPjs4OTw+QDw5QEBAP0BLXHViTkRBQT1CQUBBP0E9PkNCQD87QEE9Pjs6
-QUVIPkE/PkFDQkFAPkBAQEBAQUA8PDs+Pj0+PkNEPj1BPz88Oz5BRENCP0E8QkNA
-QDw9PkBGRUFER0pDQkVGSUdFSEJJR0lJRkNGSktPTkpLRkNCZbPJ1Nzf5Ofo6enr
-609LR0NMSkhIRklFQEQ+P0RDQkJFR0RDQ0U9Qzs7OjtCQkRAPT5FPj5CQT9EQTxA
-PD9HREE/Pjw8Pj5BQUA+Pjo7Pz49Pzs5Oz1BPkI7Nzg6Oz5DQjs8Nzk5ODg5PDs7
-OTY5Njk6PD09PkE9Ozg5OTY6PUA5ODs4NTY2OTk6Ozk0Njw5OTo2Njk2OjY5ODY1
-NjYzNzk3OTw6Ojc1NTY5Ojk8OzQ0Njg5Nzo5OjUzODk1OTk6ODY4ODc1NjU4NjY1
-NTg4ODY2ODY5Ojc0Nzk3NDU2NDY1NDQxNDU2Ojg2Nzc4NjYzNTY0MjU0MjI3NDEw
-NDQzNDU4MzczNjc2NTc3NzMyNTQwMzMzNDQ3NjU2ODY0NTY5NTc1NjUzMjU3Njc3
-Njg3Nzg/OTU1NTQzNTI0NDQ3NjM4NjQzMzg4ODk5Nzk3NDMzNDIxLi8xNDU1NTYy
-MDA1MjMwLzM0NDQ0NDY0NDQ2Nzc3NTU0MDMxNDI0NTI1NDE0NDI0MzI0NTQ0NDQ0
-NzQ0MzU2NjU1OzU6NjQ0NjY5Njg2MzU3Njc1MzU3Nzc0NDUzMzMyNjI0NjU1NTQy
-NDU0NTQ1NTMwMTM0NTUzMzY3MzU2MzQxMzMzMzM0NTU0NDAzNjQzNDI2OzcwLzMz
-MzY0NDQyMDI0NDYzNTIxNDMxMjU0NDMzNTU1Njg1NTI2NjU1MDEuMzM3NTM0MjMz
-MjQ1NTU2NTU2NzY2NTY0NjQ0MzU1NDU3NzQ0NjczNjEyMjIwMDM0MzYxMzQ0NDIz
-MzI0MTI0MjIyMjMzNDM0NDUzMTIyMTY0NDU0MzU1NDMyNDU1MDMzMjY4MzM2MzE0
-NjQ0NTQ1MzUzNDI0NTU0NTY2Njg5Njc4ODc4NjUzMjM2NzIzMTQyMzQ0MTIvMjU1
-NjMyNDQ0MzUzNTY4NDY0NTQ0NTU2NDMzMzc3ODg2NjQ3Nzc4NjQ1ODY1MzIzMzQx
-MjY3NDA0NDU0OTU3OTY1MTMxMjM2NDU3OTU3NDM0NTg0NTUyNDQ5NTMyNDc2NjQz
-MjU2NzY3NjY0NTg6MzEwMzQ3NjY1Nzc3NDI1ODM0NjY5NDQyNTQvMjMzNDQ0Njk3
-ODg1MjU1Njc3MzIzNDQ0NTY2Njg5NDU1NTY3NjMzNjc1NDM1Nzc6ODs4NjI1NzU2
-NjcyNDU2NjQ0Njs5NjU0NTY3NDg5NzQ5NTI2NDY6ODY2NTg1Njk3NTI2NzQ2NjI3
-NT05NTQ1Njc1NjY2NzczMjM4OTQ3NTQyMzY1MzQ1MzE3NzQ0MzEyNTU0NDM2MzM0
-MjM2OjU2NDM1Nzc2NTM0NTM0NDY2NDY4NjU2NjQ6NjM0NTM2ODg3Ozk3ODY5Nzc7
-OTxSl5KQpK6jmGRAPj45OTw7Ozk6QkJjYldCQDw7Pjo4PD0+QEE9PT09PUFESEVE
-Pj4+Qj48QD9ERUFCQj1CQEA7QT08PDw+QkA/PkFGP0FCQUNDR0dEP0NCQUM/Q0VA
-RUE9PkA+Q0RBPkI5PERGRUJDRkc+REFEP0JBRkRBQkFEQ0VIREtKQ0BDRkxMTEtH
-SEhISU5LUUtHTkRptMrV3OHk5ujq6urrTExHR0hNSUVKRUtKQkVCQkNFRUVBQUVF
-QEVCQUNBPkFCQT5AQTw/QkVEPT1AQT07PjpAPUBCPUBBPTw6P0BBPT08Pz8+Pj09
-OTo+Pjs7Ojs7Ojo/Pjk2Ojs5QD88OTk6Nj47Ozk5ODg5Qzw8Ojk7ODo4PDg6PDc4
-NTg3NTo6ODY7OTk4ODw2NDk5Nzc6ODs3Nzg1NjY5Ojg7OTo5OTw6NjQ4NTc5NjY6
-Ozg0NjY3NzczOTs2ODY2Nzg0MzQ5ODc1MjI2ODU1OjQ0NTU2Njo6NDU2ODY5Njk2
-NTM0MjU3Nzo4Pjo3NTU3NzE0NTY2NjU2NTU1Njc0MjY2NzY4NjY4ODo2NTg0NTQz
-MzExNDI1ODg2Njg5NjU0ODU2NTc3NjU2NTQ3ODQzMzM2NDQ2MzE1Njc1MzM0NTU8
-OTg3NTo1MzMyMzExNDM0MjMvMTM2MzIzNTI0MzA0NDIyNjU2NTQyMjM0MzM1Mzc1
-MzQ2Ny8xMjQ3MzI0MTQzNTY1NjYyLzY2NTQ1MzIzNDQ2NDY0MzM0MDMzMzc5ODk6
-NTg4MjczNzY3NDM1NTMzODQzNzY0MzEwMTIyNjU1ODc3MzMyNTM2NDQ0MjQ1NjYw
-LjQ1MjExMi80NjM2NTM0MzI0MzMwNTM0MTYzMjU0MTI0MzI1ODUzMS8wNUg6NjQw
-MjIyNTY1ODcvMzQ1PDYyMzUzNTMzMTIzMzQzNDQ5Nzc1NDY1MjIxMzI0MjQ2NTg1
-NTQ0MjQxNDUyNDQ0MzM0NTY2NDEyMDIzNDM0NDIyMjY1MjY2MzI0MzM0NDIyMjIz
-NTUxMzI0NTU1NDQ0NTIzNDM0NzMzNDU3NjgzNTYzNDMzMzU1MjcxMTM2NDM0NDY2
-NjY1NjUzNDMwMzU0OTU1MzI5NjU2NDU0NTc2ODY5ODU2Njk2NTIyNjM2Nzc3Njc2
-NTIyNjU4ODU0NDU2NTQ2ODQ1MzEyMTI0NDMyNDU0NDM1MzI0NDc5NjQ0NjY5OjY4
-NjU3NTI0MzQ2MzQ0NjQ1MjQzNTY0ODg3OTc4NTMyMzYyNDU0NDMzNTc2OTUyMzM0
-Mzc2NTc2NzY1MjU2MzgxMTIyNDU0NTg3MjM1NDU1Njc1MjIzMjIxNTU0NDQ1NTM1
-Njc2NjY1NDMzMzQ0NzU3NTM2NTYzMzU1Nzc2NDY1NTc4Nzg4NjY3Nzo7ODs4NjY2
-NTY4NTM0MTA0MjM1MjMyNDYzNTY3ODc4ODY3ODc0MzMzNTU1MzM0OTY0MTM4Nzk3
-MzQzMjU2OTY3NzU4NTQ0NTY3NzQ2NTM0MzY3ODU4OTo6OTc2NjczNTU1NTQzOTc1
-ODU4OTo4NDQ1NjM1NjczNTc3OTc4NjU3PmKAhHd+jpV0Tz47Oz08PDk4ODg/UoJk
-RTw4PD0/PDw6PTo4PDk6PT5BQEA7Oz5BPT5APkA+PkA/QDpAPz0+Pj89PD8+QUFD
-P0M9QENBQkRGQEBFR0JBQT8/QD89O0FCQ0ZDPTw+PUFAQkA8P0BHRkpCQkRDQkQ/
-QkFCRkdJR0ZBO0JCRURDRENHSEtMTElNT0xMS1FJSlFMSmyyytXc3+Tm5+np6+pH
-SEhJSUlLTURCRElHR0dDPkRBQkFBREFAPENEPz49QTo/QEFCOT5CPDs6Oz0/PkA/
-PDk+QD5DRERBQT48Oj4/Pjo6Oj44Ozs6Oj1CQkA7PDs5PD08PTw4OTo5OTk4OTk4
-Ozw6NTo9Njc5OTo7OTg5OTs4OTc6Nzc2Nzg3Njc6ODo2OTk7NzU2Nzg3OTg9P0A5
-OTw5ODg4Njc6Qjo9Nzk6Nzk4ODg3Ozg4NTY5Ojc5Njc1NzU4MjUzNTQ1Nzc4NTY2
-NzU2NTU4PDY5NzU3Nzo4Nzc2NjY3Qk03NTQ3NDY0Nj47Ojg1NDU3ODY1Nzg4OTY2
-NTYzNTg3Nzg3ODY4NDU2NTg2NzczNDY0MTQ0NDI3NjUzNDY4NjQ2NTQzMzUyNDY0
-Nzg1NTI0NjYyMTM0MzEyMjQ1NzU2NDQ4Njc0NjcxNDIyMTAzMjQ0NTM0MjM2OzM1
-MTM1MjQ1NjQ2NjIxMjQyNTEzNjg1Nzg2NTUzMzIyNTg4MjQ1NzU3NzYzNDM2NTQw
-NTMxMjI1NjQzNTIxMzcyMzU1NTg1NjUzNjY4NzUyMjIyNTc3ODYxNTMyNTc2MzIy
-MzM0NDUzMjQyMjMzNTMzMzY1NDo2NDQxMTEyMTQzMjEvNDM2OTk1NDMxLzU2MjMz
-NDY1NTY0NTE1MzI0NzI1MTA0QzUzNDIxMjAwMjAzNzY0NjY3MjIzNDM1NTUzMjAy
-NDM2Mzc0MjIzNDMzMC8wLzI0Mjg0NzQzNTUxNjQ0NzM0NDg2NTc3NTQ2Njg0NDQz
-Njc3MzIyMzI0MzQvMTM1MzM0MzIxMjMzMzIyNDU0NTU1NDc2OTY2MTM1MzQ0MjMy
-Mzc0MjQ1NTMyMzczNzczNDU1NzQ1ODU1NTI1NTg3NTQ3Nzo1MzQ1NjY6NDEzNDQ5
-NjU1NTc1ODU1NzY2NTY2Nzk3Njc3NTM3ODg4NzU3NjIzNDY0NjQ1NjU3NDUzMzMy
-MTc1ODY0NTc3MTU0ODY4NzA0Mjc3Njc2NTUzMzY7NTY0NDEzMjQ1NDM2NTMzMzU1
-MjU0NTIzMzMyLzI1NTMyMjMzLjExMzQ1MTg5ODQ1NDU3NDM0NDQ1NDMyNzYyMzM1
-Njk3NjMxNDM1NzU1NTczNjY1MzQ1Njc0NTI0NTIzMjIyNTU2NjY2NjY3MzQ0MzU0
-NDY3NTg2NTg3Nzg5OTg2MzY2MzI0NjQ4NzQzNjQzNTg2ODg0Mzk4ODU0MjM5Nzc0
-Njk4NzU1MzIzMzY5Nzc5ODU1NTUzMzI2NDY1NDUzNDY1NTY1MzU3Nzg3Ojc0NzQ0
-NTQ1NDU2Nzo4NTY1NDQzMjIzNjg2OTQ0NDQ2NjcyMzI2NjQ2ODc0Nzc4ODg1NztO
-c2taUVZ7g19FPTs5Pjw6OTo9PUVhako/Ojs/PTxBQkA/Pj89Ozw+Pj9APz4+Pj9D
-PTo3PT4/Oz46PD89OD1APjg+PT5AQEJDPkE/QD5EQURDPj8+PTs8Pj4+Pj47Pz1D
-R0FEQ0E+P0JBPD5ERkRFRT0/QUE+RUVCRUJBREhGRkRHOkFDPkVHREtISEtMVFBK
-Sk5KSktOS0RHa7DJ1d3g5Obo6err60lLRUhHRkVGR0JAR0lCRERDQ0lISUBAQ0JE
-QUFEQTo7Pjs7PTw5Pj06Nj1AQD09PDs7Pzo5PTo+OD1BOzo+Ojs4Ozw7PDs6OTo6
-PTw9OTg8Ozk9PDk9Pz88Ojk/Ozg4Njo8O0E4ODY7Ojc3Njc6ODc1Nzk3OTg0Nzg6
-ODg3ODk6ODk7PDc1Nzg1Njc7Pjo5Oz89Ojc1NTg8ODc6Nzo4ODk9PT47Ozg0ODo5
-OTs4Nzk4Nzg3NjY3NzU1NzY2Nzc5ODo2NjY2NzQ5ODc5PDg4NTc5ODY2NDI3Oz04
-ODQ1Nzk4NDY1Pjg2NTU1NTY1NDo5NTg1NDY0MzU1NDc3ODU2NjMzNzc2NDc2MzY1
-MzU3NjUzNTM2NzY1Nzc1MzY3NTI0NTY0NDc3OjY3NzQzNDk4Njg3Ozk1NTU5Ojg1
-NTU1OTIyOjIxMzEzNjU1NjMyNTU2LjE1MzI5NjM2NjY1NTY2ODY0MzQ0NTU2NjYy
-LzAxMzUzLzQ4NTY0LzI1NzE1MzIxMzM2NTQ3NjQ2NjIyNDQ0NDQyMzc2MzU0LzE0
-ODk2NjI0NDQ0NzY0NDY1NjQzNDQ3NDQ3NDA2NDQzNDIyNzU1NjUzMTIyNTMxMTQw
-LjEyNDYxMzY4ODM1OTUzNjEzNTY0MjYwODUxMzQxMzMyMzQ0NzM1MjMzNDA1NDQz
-MjQzMzI2Njc1ODQ1MzM1NjY1NDQ1NDMzMjQxMjIzMTIxMzEvNDIwMTEzNTMzNDY1
-MjQzNDMzNTU3Nzc2MjQyNDU1NzQ0MzY0NjY2Nzc0NDMzMjU2MjQ3MzIyNDIzNTQ1
-NzMzNjY4OTU5ODU2MzQzMjM2NDU3NjgzNjk2NDM2MjQ3PT07NjMzMjI1NjYzODY2
-Njc2ODw3NDs1NDQyNTI3OTo7NzI0NzY6ODUzMTM1Njc2ODY3NTU2NjY2NTIzMTMz
-NDMzNzU1ODc1Nzc2NTc4NjE2Nzc3NTM0Njc3ODU1NDc4ODc1NzY1NTY0MjM3NDQ1
-NDQ2NDc1NTUyMjU0NTc2MjIzNzc3NDQzMjQ0MjI0NjQyNDQ2NDIzODU2NTQ0NzMz
-NTY0NTIyMjE1NTI0NTI0NDY0Njk3NjQ1NjY1NjM1NDQ3NjY4MzQ1NDQ1NzU0NTMy
-NDY0NDUzNTY0NTU0MTI1Nzc3Njk5Njs1NDczMjM1NTQ1Nzk6ODY0NTQzMzU2ODk1
-MzY0NDU4NTI1OTY2NDQ1Nzk3NjY2Njc2NjY4NjY0MzQzNjc3Nzg1MjQ2NTQ2NzY1
-NzY2NDg1NDc3NTY3NTc3Nzk1NDo3NDc1NTc2NzY3ODc4NDMxMzUyNDU0NjYzNjQy
-MjQ2Njk1NjY5RTk7OTk3ODk5Njs8R15bT0I/Q3N4UTw9Oj49OTo9PT1EamJOPjw/
-O0A+PT04Ojw9PUFAPkBDQD5CQkE9PTw7PUE+QUE+QT49QT1CPj08P0RCQkFAPTtB
-PURBPz89QkU/OUM+Pj0+QD4+PT1AQ0BGRT8/QkA/Pjs4QENARURGSENEQ0hDQ0FE
-RURASEVFR0RHQUBHSEZKSEpNSklPTU9MTE9KTlJPSUpYrcnU3ODj5ujo6+vrR0dG
-RURESEhDQj5EQ0VBRUlCSElDQEFKQkNDRkI+Qz4+ODg8OT49PDo6Ojs7ODw9PTw8
-QDY1PDc2Ojs7Ozs6Oz08Ozk5Ojs4Oj07Ozc3Njk3NzY5Pzs5PD09Pj4/Pj04Ozs5
-Oz85Nzk4PTk1OTU3Ojk3Nzg5PTpAPzo4PDk4OTg3OTg7NTc5PDc1Nzc4OTg3Ojs8
-Nzk5OTw8OztBOzc2ODo5Ozc2ODw5Nzg2Nzs5OTk4NjU3Ojk5NzMzNzU1ODg6Ojk4
-ODc3OT47PDk2NjUyNzY4OjY2NjY2MzU3OjY1NTc2Njc4Ojg3NzM5NzU6Nj07Njc2
-NTU5PD83Nzg4NjY2MTU1NzYzMTU4ODk5Njc4ODUyNTM1Njg5NzU3NTQ2Njc1NjU2
-NDQ0NDc5OTc1NjY3MjU2NTI3NTE1ODY2NjY1NjY4MzM1NDU3ODY4NjAxNDI1MzYy
-ODg5Ojc2MzQ2NDMyMDI3NjY4Nzg1MzEzMzQ1NDU1MzU0MzM0NTIzNTQ2NTU0NDM3
-NzU3NzU2NDQ1NTQ0NjQyMTY2MTIyMzMzNTEzNTg1MTU0NDMyNDY1NDI1NDM0NDU2
-MjM1MjM3NTUzNDI0NDM1MzIvMzY0MjIuLjMyMjE0NTQ2MzQ1NTUyMzQzMzQxMjIy
-MzQ2NDY0MzM0MjU1Nzg2MzM0OjIzNTY1MjQ2MjAxMTU2NTQ2NDc1NDMzMzQ2NjQ1
-OzQ0NDQ1NDMwMTQwMzIyMTY0NjQyNDU2NjQzNDc2NDQ0NjU0NDQ3MzI1NzMzMTMy
-MzM0NTQ2NDUzNTU1NDMyNjIyMjUzMjQ1Njg2NTUzNDQyNDMyMjo7OTQzNDAwMTUz
-NDY0NTQ1ODY4NjM2MjExMjQ2NjY0MzI1NjM0ODs6ODg1NTo2NjQ1NzcyMjUzNzk4
-Nzg1NzY1NjQ3MzU1NDU3NTU2NjQ2MTEzMzYzMjc1MzYzMjc3NTUzODU0MzQ1Nzc0
-MjY2NTg5NDM0NjUxNDI0NDQ2NzY1NjQ2NzQ1MTIyNDU2MzU0MjE0MzI0NjY0NTU3
-NDU4NzQzNjc0NDQ2ODYzNDUyMjIyNDIzNjczNTQ0PDgzLzU0NjIxMzQ3NzQ0NTMy
-MzU2Nzo6NTc5OTY5NzQzNDU0NTIzMzQ0NDI0MzM3Nzc2NTU1NjY2NjM3NzQ2Njc1
-MzY1NjU2NTQ1NTY2Nzg2NDQ4NjQ0NTQ2NjY1NTMyNDc3NjY1NTU4NTM3NzIxNTg3
-NzQ0ODs5NDQzNTg4ODQ3NTU3NTMzMzQ0MzQ0MTU2NjQ3ODk3Nzc1Nzo4ODc2Nzg4
-NTY4NDQzNDY1NTg2NTUxMzIzODQ0NDY1ODczNDY3Ojk5Njg4ODg2ODg2OD5Obl9H
-SERVcmRBQTw9ODw+Oz07PVBwUUA5PTs7PEBAPEBEPjlAOz1APj9AQD1BQD9APT5C
-Qj0/QkY+Pj9BQz88Pj9DPz49QkA/O0JBQD06QkRAPkE4PUBBQEVCQkFFRERFS0RF
-RkNFQ0JCPz8/QkVERERBREJFRUA9RkRGRkFESEFDRklFRUpLSkdIQkJGSEtMTlBM
-SkpOVExJTFmqytXb4eTm5+np7OtMSEZERD5FRkZHR0Q/Q0hHRURGRkZFREREQ0dH
-SUVGQz47PDtAREA9OTo8PDk6PTk+PT9BPjc6OjxANzk7Ojw/Pj0/PDk6Ojs4Ojs9
-Qz06ODc9Ozw8Oz4+PDs7QD07ODs/OTk9Pjw6ODY7Ojg3MzM7Ozo2OTc7OTg6Ozk7
-PTs5ODw9Ojw6NzY0OTk5ODw5Njg7ODpBPT08Pzw9Ozk3Njk3OD06ODc4ODw7PDs1
-NzY3Nzg3Nzc4Ojo2Njg3OTc3Nzk8OTs5ODg4ODczODg3Njk3NjY5Nzk5OzMyMjU3
-Njk4NTQ0NjY7ODg7ODY5OTlBPTk4NzU2NDUzNjY4NTY2Njc2NTQwNTc2NDY2Nzc0
-NTY4NzU1NTY3NDk6ODk0NjQ3NzUzMzI0Nzk3Njg1NTQ1NjY4NjY0NDU4NzU0NDMz
-ODg6OzY4ODIzNTczMzQyMTI1Ojg3ODVGOTIyNTUzMDIyMDIyNTc2NTQ3OTMxMDEy
-MTc3NDc0MzEyMzUxNDUvMjU3NDQzODM0Nzc0MjM1NjUzMDM1NDQ0MzMxMjQyNTE0
-ODY3NzIzMTIxMzY0NDM1NDc3NDc4MjM0MTQzNTQ0MzAxMTAxMjYzMjIzNjYzMzQ0
-MTMzMjQ0NzA0NDY2NTU0NDYwMjQ0MzIzMTM9NTgzNTU0MzU3NjU2MjIxMzExMjMy
-MzY0MjE3MzE0Mzc1MTQ2ODQzMDM0NTYzMTEzMS8yMzQxMjMyMTI2NDU0NDU2NTQ3
-NzU1Njc0NTc1NTU1NjYzNTY2NzQyNTIyNjY0MzM0NTMzMzQ0NDYzMjM1MzI1MzIz
-MjQ2Ojg3ODgzODY1NjY2ODgzMzIyNTY1NDMzNjc3NTIzNDI1ODczNDQ2NjQzNDU0
-NzY2OTc2Nzk4NTQ0Njc0NDM1NjM4ODg2NDg5Nzc0NDQ0NTM3NjU1NDc0NzQ1NDYz
-MzU0NDQ1MjI2NDQ0Mzc4MzY2ODU2MjI1MzEzNDU0NTU1NTQ0NDMvMjUyMzEyNTE0
-MjM2NjQ0Nzc5NTU0MjI0NjQzNTYzNTU4Ozc4Njg4ODU1NDQ2NTUyMTU1ODc2NTAz
-MjUyNTI0NjA1NDU2NzU1OzY1Nzc1MjMyMjQ1NTY1Njg3Njc0MzM1NDM1MzIyNDQz
-NTI0MzE4OTk2NjU2ODE0OTg3ODY1NjU6Nzg1NDU1NDM0Njc5NjYzMTI0Mzc0NTY1
-MzU1NTQ3NjY1NTY5OTQ3NjIzNDQ6Njc1NDM3ODc4NzM0Ozk3NDg1NDU3ODg1NDQ2
-NTQ1NTU1NjY1NDY0MzU1ODo5Njg3NjQzNTQ1NjU1ODU3NjU0MzM1NTU3NTY3Njcz
-Nzc1Njg2NTY3NTY4Nzo5OTpAQl1zbUdCTGV4YEE/PT5CQD07Oj1FW1hHPTo6PT87
-PD0/PDw8Ojw7Oj49OD4+Pz1BQUFAPjs8Pj4+PkBAQD49Oz49PD08QD1CQz06Ojw9
-O0FCQT89QEZCQkM/PT9BREhFREVDQEdGR0RERkM/PUJEREBESEJBQUVJRkZFQkVD
-Q0ZDQUZIRUpHQ0VGR01FQEZGTUhKUE1KS1RVSEpPWrDK1Nzg5Obo6err60pGSkdI
-SUJJR0NLR0pDRklKRkRIRUZEQkRFQUJGQEE+QkQ9Oz5BPj87Ojs/Oj86PUBBRENC
-Qj07PT09PD09OkE9PDs7Oj8+QT86PT08OTo3QEQ/Pz07Nzg9Ozo7Ojc6ODs2OTo7
-Ozw5ODc8Ojk8OTg5PTs4Ozw5Nzk6Oz9AQkE/PTg5ODk5Oj07Ojo/Ojk7ODs/NDg6
-OTo5OTs8Ojo7Pjw7Nzg4Ojk7PTw+OzU4Njc4Nzg3OjY4OTc6NTQ3NzY3Njc5Ojw9
-Pjo2PTg0NTQ4Nzc0NTQ2NjU0MzY5NTc3Njc2NzQ1OTg1NTc5ODc5Ojg3Nzw5ODU1
-Nzc5NzMzMjY4MzUzMjU1NDUzMTQ2NzY1NDY9OTM1NjQ0NDU4NTg3Nzc0MTQyMzU0
-MjQ2NTU4ODY1NjIyNDQ5NzY0MTUzNDI0Nzc1NTk3MjIzMjExLjI0MjAzMzE2Mzc4
-NDQ0MzUzNDQ0NDI3Njc3NTIxMzUvMzQxMzM0MzIzNzc3NzM1NDU2MDI0NTMzMjE1
-NTUzMjEzMi8xMDIxMDQ0NDoyNDY2MzEzNDQ1NjMzMjQ0NDM1NTU2NDQ1Nzg2NzY0
-MjY2MjI1NzMzMzU3NTU1MzMyMjQ3NzU1NDQ1NTU1OTQzMzYzMy80MzY1NDAwMjY1
-NDMzMzMzNTc4ODc6OzY1MzE0NTQ1OTc4NDMxMzQzNTM0MjU2MjI0NzIzMjAxMjU3
-NjAzNjYxMTM2NzQ1NDU1NDU0NDM2NjU2NjozMjY0MzQ0NDY3NzU1MjQ3ODc0NDU0
-NTQ1NTMxMTAxMjI2MzI1ODY2NjU1NjQzNjc2NzY1MzQ2NDU2NTU1NTU1NjYzMzMy
-NTY0Njc2MjM0OTk5NTU2NzYxMTU5MTA2NTU1NTg3NjY3ODU4NTMyNDQ3Nzc0Njg2
-NTc1ODY0NzU2NTc2Njc4NDM1NzY1NjMxMjI1Njg2NDk2NDc0MzI2Mjk4ODc4ODMx
-MzQyNTY0Ojc1NjU0NjUyMTM1NjI1MzQ1NTU1NTIyNDU3NDUxMjYyNTQxMjI1MjM3
-MzQ4NDc4Ozc1MzU3NTUyNDY2NTYzNDQ0MjE1NjIvMjg2NjY2NDQzMjA0NDYzNTM3
-NDUyNC81MzUzMjIvLjM0NDY2NTU1Njg1NjY0NTU4OTc3ODc2Ojg4NTc6ODY1MjM2
-NjY4NTM1NjU2NjY1NTc2LzIzMjU3NzQ1MzQ1OTk5OTc3NTU3OTc1NTc4NTY4NzU2
-OTk6ODk2NTY3NjY3NTg0MzQ4MjM0NDIzNTQzNDQ0NDU2NTU3NjM2Nzg4OTYzNDQ0
-MDQ2Ozc7Skc0NTU0NTIzNDU6ODg1NDQ0NDU2Njg2Nzk3Nzg3MzY3ODpMZ4OMYGOG
-hGVDP0FBOjw8PDs7RGVeSj8+Pjs+QDw8PT88PD8/PUFAPD1DPTw/QkRAQD89Ojo6
-Ojs/PkA9PT0/Pj4+Ozw+Q0FDPj4/QEA9QEFBREFCRUI+QkZJQT1DQkNCQkBBP0BC
-QUFDRUlFRERDP0FBSERAQ0JEQ0E+QkVDQkNCRkhISUdCQEM+SUpKR0hMSkhOTk1N
-Tk9RT0desMfT3OHk5ujp6uzrUk9ITlFRRkpISkRGSElHQT9DQEJFRkRAQ0VEQUFD
-RD89PT8/P0I+Qjs7PT88Pjs+OkA/QT09Oz0+PDw+QT5AQT08QEQ7PDs/Pjw6OD9C
-PD8/PDs9QEE6Oj8+Ojo1NjgyNzo6OTY6Ozo5PT46Ojs4Ojg5PDs7PDw6NjY5QD4/
-PTw6OTo8Ojw+PT0+Oz49NjY3MjY5Ojg4PDo6Nzs8Ojc4Njk3OTs8Ojk4Ojk8Ojc4
-Njg3OTw5Nzc0NjU5OjY3ODY7ODc3OjU2NTg1Ojo+QDc5PDUxMjYyNTc6Ojo2NTQ1
-NjQ1NDYzMzc3NTQ3OjU7Nzk4Nzc2NzU3Ozg4NjY0NjQ2PTc1NjU2Nzk4MzM2NDg3
-ODw7NDI0MzIzMzY5NjcyMjQ2MzIyNTc1NTUyNDU1Nzk4NTczNjEwMzU1NDM0NjU0
-MzQyMzUzNC8wNjg0Nzg1NDM1MzQ0NTk1NTI1NTIyMzc1OTY4NDU1MTQ0MzM0ODk2
-NDEwMTQ0NDY2NjU2NjUzNzMzMzEwNDAzNDU1NzMyMjc4NTUzMTQxMDIzMzc0NDE0
-MzQ2MzM0MzQ2NDMzMzExMDE0NTU2NzQyLzQ4OjY4NzU1NjY0Nkg3MDQzNTYzNjc4
-OjQ1MzYzMTIwMDMwLzA1NTU0NjA0MjI2NjU1NTY0MjAyNDY0Mjc5Njs2NTU0NjUz
-MjM0NDAzNTM0MzQyNDQ3NzIzMzMyMjEyNDY2NjIzNTU2MzU2NTQ1NjU0NDEzNTIz
-NTY2NDIzMTY2ODQ0NzIzMzE0NDEwMjI2NjYzMjE0NzIzNTQ0NjQzNDI2NjMzNDc4
-Ojk2NS8zNjQ0NTQzNDc1NDY3ODg3NDQxNDMzMjI2NTg2OTU4Nzo2NTM1NTQ3NTU1
-NDY1ODYzMjI0NTU1NTU6NzY4NTY4OTk0NDQ0NzY0MzU4NzY4NzY1NTM0Nzg1NDM0
-MzE2Njg3NTM0MzIyNDM0NTk6Njg3ODU2NDU0MjAzNjI0NTY3NzgyMTQ1MjQ0MzM0
-MTI1NjMzNDIzNDQ3NzIxMzUzMjEyNDU3Njc3NDQ0NzI1ODI4NjQ3NTU0MzQ2NTM1
-NjU0NDg5NjQ1NDIzNDIyMTQ0NTY2NDUzMzU0OjY1NjYyMTU1NDY2NTM1MjY3NTQz
-MjM0NjQ0NDc4NjQ1ODQ2Ojc5Nzc2NjU3Nzc0OTc0NDY2NTY4OTg2MzU1Mzg3NDQ0
-NDk2NjszMzY3Njo4NjU1Njc2NDU3NzY3Nzg4Qjk2NDQyMzY1MzI0NTIzOTc3NTY1
-OTY1MTM2Njc4Nzc5ODY2ODg2OTg2NTM1NTI3Q0VIPzc3NTU0MzM2NTU0Njg3NjQ0
-MjM1NDM0NTg3OTg5Nzk7QWiJnoWUoaWYUT87PDs6PD4/OjxPY1ZIQj49OTs7Ozk9
-QUE/PTw+PEBAPz8+QkE/QEA/Pjw+QD0+QUA9PD47QEA9QkFFR0RCQzo9REA7O0FB
-Q0I9OEJHREFFQUJCRkU/QT49Pj9GQD49Q0hISkZCQkFAQD5CREdGR0hESUVAREdJ
-RkVEQklIQEJFQ0lARUtGTUhMTUlFUFVUTUxJSWm0ydPb4OXn5+rq7OtPSkhDTEtL
-SUZOSkdHQkVETUM/QENIR0NBPT49Q0E/Oz9BPENDQT4+Pj0+QD8+Qz1APD5AOzw+
-Qj08QTw+PT49Pz09Pz1APDk7Pj9DST88QT09PT8+PD47PTs7OTo7PDs2Ozo4NTg8
-ODk4PEE6ODc3Ojo6Nzo5Njk7Ozg6Nzc7Ojc5OTs4ODs3OTs5OTs3NzY2NTo5Ojw4
-ODo4ODo5NzY5Nzg5ODo9OTo3Njo9Nzk4NDk3NzY1Njk2ODc3Nzc3NTU3NzY1Nzk6
-NjY7PD05Ozk3NjI3NTo2Njc1NzYyMzU1Ojc3NTc0ODY0Nzc2OTY4Ojg2ODMzNDc3
-NjY3NDM2Njg5OjQ3Njg0NDY2NDI3NjY7OTg1NzQ2NzY3NTc0MzM2NjcyNTY3MzQz
-NDU4QjU1Njc3MzU0Njc1NTU3OTQyMTMzMzY2NTMzNDIzNDU1MzQ5ODk4OTk2NjU0
-Njc0MS82NDU1NjQ2MzQ4MzMzNjU3NTU2MTMyNDUyNDY3NTk2MzI0NjY1NTg3NDEz
-Njc1NzQ1NjU5NzQ0MzI2MDAxNDM1NTEzMjMyNjMwMDEyLzIyNjQvNDM4NDU0NDU1
-MjI1NjY3NDMxNjY2XTk1MzQ0MjEzNDY1NTUwMDUyNDQyMTAyMzM0MzE2ODAwNDIy
-NDQxMTMyOTMzNTc3ODg3ODc2NDQyMzYzNDYyNDQwMjAzNTQyMTEzMjI0MjQ0NDM0
-NDQzMzY7ODY3MzQ1NDMxNDU0MjMzMTExMjM4LzQ2MzE1NjMzMzExNjQ0MTMzMjQz
-MTE0NjMyMzQyNjM0NTI2NzY2NTMzNjY2NDQzNzk4NTM1MjEyMjM0Mzg3NTc1NDMx
-MzM1Njg3NjY0NTk6ODY1Njc1NTY4NTY3NjU0NDMyNTU1NDc2Njk5NDU1Nzc3NDc5
-NDc5OTc2NTUzNzIvMzUzMjU2NDk0NjQ1NzQ2NzQ0NTY0NDU1NDU2NjY1NzM0MzQz
-NjY1NTM1NzU1NjY4Njo4NTU2NTQyNDQzMTQ0NDc0MjEyMzIyMzEyMzQ0NDIzNDY3
-NzY1MzQzNjY1OTc2NjQzMzQzMjQxNTY5NTM0MTU4NDM0NTU0NTU2NDE0MzU4NjI1
-Njg4NDc0NTg4OTczMzg2NTQ2NDQ2NjQzNDY3MjAyNzQzNDMyNTg4NTY2ODY0Nzk0
-Mzg2MzQxMjI3NDQ2Nzw3NTY2MjIzMTIzNjc2NTQ5OTY4NzY2NDQ1NDQ3Nzg5ODU2
-Njg4NTU1NDQyMjQ0NDQ0Nzc3ODc5NjY4Njc5NjQxMTMzNTs4NDY4ODc2MzIxMzU4
-Nj5FSUg3ODg2ODo1NDYyNDQ0NTU2MjQzNjU2NzgzODY2ODg1NjxTf5STjKafil8/
-PD9DQkI+PD0/VGJeS0M7QD9APT5CPz4/Pjw5QD49Ozs7PTo6PkFDPj8+PkBBREE9
-PDw8PTs8PDxDRUBAPTw+QD08PD1DQUNGQz1BRUI+Rj4/P0NIR0E+Pz5CQUJEQjtF
-Q0VDQ0JBPj5APkJERkREQ0VHR0hCQEZIRk1JRUFHSUZHQ0RFRkZKT01MS1pOS1NW
-T0dGYrXJ1d3h5OXn6urs7E1DS0dLR0dNSUtJSkdLSUpFSUJDQUJDPj0/Q0U+Ozo5
-QEE/QD9EPjw/Pj1ARkI8Pz09Pj08Ozo8PEI+Oj08PTxAPTk5Qj09O0A8Q0M8PzxD
-QkA8Ozs/PT07PTk6Ozk5PDs6PDk7PDg3NDs4ODw6NjU3Ojg3ODg5ODY4NTM1Nzg6
-NTc8OTc8Ojo7Ozk9OTg3Nzc6OTk3Njg5Nzk5OTs6OTs4ODw7Ozs6PD07ODc4Njg0
-MjY3Njg2NjM5ODU1NzgzNTQ1NjU0NjY2Njc4Ozk6NzY6NjMyNzU1NTc5OTk1MTM2
-Nzg1NzczNjY1OjQ2NTs1NTc3PTlAQ0A+NTY4NjY1NTI1MzQ1NDUyNTUzMjQzMjUy
-NDk4NDQ3OTw2NzcyNTk1ODg2NTY1NTU2NDU1Mjg0NDQ2MzYyMjMzNDg4NTo0NTQy
-MDU1ODM2ODQ0LzExMzc0Njc2NTk2NDI0NDEzNTY0NDQ4OTYyNTM2NjQ1NTU3MjQ0
-NTQzMzMyNDIyMzUzMjI1MjU4Nzc2NjIyNTY3NzY0MzU1NTM0MjE0OTQ0ODQ2MTMy
-MTAyMzMyMzM1NzY0MjQzNzMzNjUyMTM0MjM0NDM0MTI1NjY0NDMzMzMyMjQ2NDU1
-MjUzMzExNDMwMzExNDIuMDIzMjAzNjUxNTEzMTQ1MzAyNTY4NTU1NTg1NTMzMjUz
-NTU1NDQxNDQ0MTAyMzIzMjIxNTQ1NTQyMTI1MzI5NTMzNTQzNTI1MzY0NDIzNTMz
-MzU1MzM2NjY3NTg3NTUzMzU2MzIzMDQzMzQyNDU0MDIzNTQ0NjQzMzQ2NzYzMTQ0
-NjY4NTQ2NDc2NzUzMjg8NzY4NTIzNDIwMTM0MzM2NTM1Nzc5NjY5ODU0Nzg2NDY1
-NDMyMjQ1NjM1Nzc6OzY1Njg4OjY5NjQzMjQ1NjY4NzU2ODU3OTg4NTc3Nzk0NTI1
-MjEyNDQzNjc0Nzc2MDE2NjY0NjU5NjQ0Mzo3MzM1Njc3NjIzNjY0Nzk1NDUyNjQ0
-MzMzODk2NDM0MTMzNjMxMjQ1Nzc3NTUzNDQyMDQ3Nzc2NzI1OTg4NTgzMzIyNTQ3
-OTY7NjQ0NTc3NTY1NzUyMzIxMzQ1NzYzNDY2MzU5OTg3NjY1NjU0MzM1NDU3NDQz
-MzY4NjI0NjY5Ojk1NTc0NTY2Njc3Nzg2NTYzOTYyNjc5NTg5NDI0NTc1NDU0NTcz
-NzY2NjExNzs5NzQ1NDY2ODc4Nzg5ODk5Nzg6NjI0NDY1NTk2NjYyNDY0MTMyNjc2
-ODY0NTU2NDU1Njk3Nzo4NjM1MzM2Njg7Q05GNDQ1NjY1Nzg3NDM1NDYzMzU1MjU2
-NTU3OTg6OTg3OTg4QmiCiaCYnYtbQT4+PEI+Pjw9Pk5vb09BQUdBRT07PT87Oz49
-PkM9Ozk+Pjo7QUA+QUFCQz49OT5APD5DQT09QD48PD5CRkQ8PT4+Pj4+QkJDQkFC
-REFAQD9DQkNDRUZGREU+QERIQkA+QENHRkNFRT4/QEhEQ0VHRUZDRUFBREZGR0RE
-S0tLSEFDR0hNSkhJS0tOTU1PTEhOTUtNRUhmtcvU3eHl5+jp6uvrRUhLSklGTElI
-SUlLUVBOS0ZIQj5BRkNFQkA/QkNANzc9QDxCQEFBQjtBPTw9OD8+RT4+PT49QD9B
-QD4+Pzo/QEA+PTs6Ojg4Pzo5Oz0+PDs6QT5APT08OTU9PTo7PTg3Nzg3OD0+PTg9
-QTo4OTUzMzg4NzY2Njs6Ojg2Njg3NzU1ODY4OztAPTs9ODY3Nzc5PT46OTg9Nzg6
-PDk6Nzs8OTg6Ozo6OTY2Nzc5ODc3Nz03ODU3Nzg2NzQ2ODc3NTg2ODc0NzQ3NTY4
-Nzc5NTc2NTw4NDMzNzg6NjU1ODMzMTQzNDY4NzMzNjY2Nzc4Ozg4NDQ3PEA2NDU5
-Njs4NTc2ODcyNz80NDQxNDY0NDUxNDQ2NjQ9NzUzSjYyNTU4NjU2OTY4NDc5Njg4
-Nzg0MDIzMzU7PjQzNDY1NDQ1NDY3NzY2NzY5OTtQNTEwMjYzNDk0MjQzMzY1MTIy
-NDY3NTIyMzc1NDU0NzY4Ojo1NzY3NTY0NDMwMTIyNDUzLzIzMzMzNDQzNDg5NTI1
-NTQ0NTkyMTE1NjU2MjEyOTYzNDU3MjM2NTM6NjU2ODY3OTg4OTM2NTQzNTQzMzM1
-NDU2MzExMTEyNDIvMTI0MjQyNTQ0MjIzMTI1NTQwNDMzNjY0NTs4MjIxMDI0MzIx
-NTI3NTU0NzQ0NTUwNDE0NjY1NDQ0MjMxNTc2NzQ1NjQ8MzM2NDEyMTMwNTYyMjUz
-NDMzNTQ2NTYyMTM0NzU1NTIzODYyLzIxNTQ1MzIyMjU1ODg2NzY1MjEyMDE0MjMw
-MTQ0NDM0NTIyMTM1NTQ1NTM0NzQyMjMyMjQ1NDU1NzQ3ODMzNjc6OzQ1MjM0NDg0
-NjU0RDQ0Nzc2NDUzLjEyNjU0NDY4NzY0MzU7NDI1OTYzMTQ5PDs0NTU0Ojg5Njk3
-NDQ1Nzc6NTY1ODc2MzM0NDU3NDQ0Nzc1NzU1MzUyMzU1ODc2MzIzNTQ0NDQ1NDQ1
-NDczMzY5NzQ0MzE1MjExNDU1Njg0NDQ0NDY1MzM1NjM0NTU5PTQ1MjE1NDQ2NTQ1
-MzU3NjU0NzQ3Njk1NTY1Mzg0MjU1OTU4Njg3Nzc1NzY2NzczMjM0MjE2NTU3NjUz
-MzQ1NjQ1NjM1OTQ2MzMxMDM2NTQ0NTY2NTY4NzQ2NTU2NjQxMzU2OTU0NjU3ODU0
-Njc3NTY3Njg4ODc0NTU0MTQ4Oz04NzU1Njg5NzU0NTY9ODY2NjY4NzU0MzU3NzY2
-NDUzNDU3Nzc1NjU0MTQ2MjY3NDIzNDM2NTMzMzU2ODk1MjY2MjM0NDQ0NjM1NU5J
-Rj86NTU1NTMyMzU2Njc0MzI0Mzc2Nzg2NDc6OTw+PDk2ODthdHRbdntvUEI7Ozo9
-PT09PDpAXXBdQUBAP0NCRT49Ozw6Pj1AQT88P0E+Pj4+Q0dAQ0NCPzxAQDs4Ozs6
-Pz8/Pjw8Pz4+PkI8OTs+Pj9AQj07O0A9QEJFQUJFQ0JFQ0NER0NCQ0NEQkFHRENB
-QEVEPjlIRUVCQkJCR0U/QkRFRUxHSEhCSEtOSkdGR0dKSE5RSUxSUFBSVlBKSk1M
-SWy4ytXd4ebn6Orq6+tIQUVDRU1OTUhKRkRKSUlKSEVGQj5CQEVDQEQ/Pz5AQT9A
-PTw+PUJBPz0+PT47Ojk4Ojo+Oz8+PTs6PDk6Ojk/QT88Ozs6PTs7ODs8Oz1FPjo6
-Ozs8OTs5PEE9OTk4NzQ5OT08OT9APj07PDw3Nj03NjY4Nzg4PD8/OTk6Ojg3OTg4
-ODc2Mzw4ODg4OTk6Ozg5OTk5PTk5OTg7Ozg5Pjw/PDo8Pjk5Ozk5Nzs4ODk3Ojs6
-NDY4OTc4NTI1ODk5NDQ2Nzk6OjU4Ojc4PDY8ODU0P1g6NDU2NTYxNTAyNjU1NjU4
-Njk2MzQzNDU3NTY1NjM1MjU0MjQ2NDQzMjM0NTk2NTU4PDQ3ND8zNjU2MTIwNTg5
-ND06Ojc5OTY0NzQxNzgzNDA3MjQ2NTg1MTAyMDE0OTg3ODQ4MzUzNTY5OTk1NTEz
-NDY4NjY3MzQzNTEzMjQ2NjM1MjUzMjQ1MzQ1NzQ0MjY2NDQwMjU2NDQ1NDc6NzIy
-NTAxMjY0NDExMjIxMTM0Njo2ODg1NzMyNzQ0NTU0ODU2NzQyLy8yMTQ0ODQzNjM0
-NTU1NzI1NjY3NDYzMDI1NjIzNDU2NzY4ODY5NzIuMjQ0Njg0NTU1MzUzNDAyMjQ0
-MzM0MzIwMzE1NjUyMjQ2MzY0MzEyMjY2NjUzMzU1NTY0NDk1MzMyNDIxMzUzMjM0
-NDY4MzI2Njw0MjM0NDM1MDM1MC4wMjU1MTM1NDY1NjU2NjEzNTIzMzQ1MzQ0MzM0
-NDQ1ODg3NjQ1MzY0MDI2NzUxNDUyNTgzMzMzMzIyMS8xNTU0MzI2ODY5ODU1NTUy
-MjE1MzM1NTY4OjY3NzU2MzU0MzY1NDc3NjY5NDI2Njc0MTA5MjEyNDQ2Njg0NTU3
-Njg6NjQzNjQ0Njg6Ojg3Nzg3Njg3NjUzNDY2NTMzNTk5NzY4OjU0NTc7Nzg5ODU2
-OTc3NDU0NzQ0Nzg5NjU0NzQ1NDUzMzIyNzUzNTU1NzQ0NTQ2MTM2MjIzNDU0NDUy
-MTU4NTQzNDYzNzY2MTEyMDY1NjczNDMzMjQzMzY2NjM1NDQ1NTU5NTQ3NTc0NDc1
-NDU4OTY1ODc0NzQ0NDU2MzM0MTY1NTQxNjY0NjQ1NTQxMTMzMTM0MzMzMzQ3NTg2
-NzIwMzQzMzY0MzUzNzY0MzQ1NjM0NTk3Nzc2ODg2NTU2OTc1MTM0NTY5Nzg2MzU5
-NjY3OjU0NDU1NTc3NzY1NjMyNDQ4OTY0NjY5NjYzNEM+ODU5NTU0NTM0NjY1OTg1
-NjQ0NDg4ODU0NzM1MjQ2NzY0MzRLa2FUOTc6MzMzMzI0NDU4Nzc1NTQ1NjQ2Nz5X
-UklIRDs5OTs+SYGEbkVGR0FAQD46PT47PD1AQV90aUY+Pj4+Ozo/QkJFPjw+Pzw6
-O0FFPjtCPD07PT0+PT4/QD49OTw7OzxAPD4/PDw8PUNCQDw/QTo9QT0+QkE7PTxC
-QUREQkJCQkNFPz5DQUJDQENDRURDP0BBRDw8PkFBQD89QURFRUZDQEZJSEdGQEBD
-RkdJTElLRkdKTEpKS09QTVBPV1JRUUhGg7PL1N3h5Ofo6urr605QSU1LS0tOSEpI
-SkpMT01LSkpIR0pGRkdCQENBRERCQEI+PD48QEJCPDw9Pzs8OTo8PTw/Pjs4Ozo3
-OD07PD47QUNCPTs8OT0/PTs8Oz42Njk2ODY5Pzo+QkE7NzU4Pjw9OTw/PD46Nzw7
-NTY7PDk5Nzc3Ozg6PDc5OTk8OTk3OTk4NjY5Mzc6OT08Ozk9Pjs3Njg4PT03Pzo/
-OTo3OTs+Pjs6PDs6Nzc4OTU6OTk7Ojk5Ojo3OjgzNDQ6OTk4Nzc4PTo8Ozk4ODQ3
-OTY3ODg6OzU2OTc3ODg0ODQ0NTg2NDMzNjY3OjU0Njg1NjM2NDA2MzMyNDY4NTEz
-NTUyNTQ2NTk6MjI0OzA2ODk3PDs5NzU1NzY3NDc4OTc0MzYzNjY0NDM5NTY3NjQy
-MTE3NzQ4NDM2NTE0NTs4MjQ0NjU3NjU0NTYzMzU0MzMyNjU2MTAzMjMxNTMzMzU2
-NDk0Ni8yNjIxNjU0MzUzNjE0MzMzMzMwMjQwMDMxMTQ2NjY0MTQ3MDQ0NzY2Njc2
-NzgzNDczMTEzNzUzLy80MzM0NzU1NzU1NTU1ODczNTY2MTQ2MjIzNTQzNzc2NjY1
-NTIzNzg0MzMzMzY0MjQ1NjQzMDYzNjQzNTEyNzQwMTI1NDc2NDY0Nzc2NDUyMjc0
-NDQyMzI1MzM4NTY3NDIzMTQxMjE0MjQ1NTc1NDE1MjMwLzI2NDM0LzIxNzY0MjI5
-NDUzMjQ0NDMzNjU4OTY0MTE2NzU2NDc4Njc7OTg3NTI1NDY0MjI1NDU0MjI2NTMx
-MzMzMDIzNDQzMTQ0NDY0NTUzNjU1NTM0NDQxMjE0NjQ1OTc1NTU1NDc4NjQ1NDU1
-ODc1OTc2NDU0MzY4NTM2Nzg4Nzk4NjU2NDU1Njc0NDQ2ODc2NzY8ODUzNTY2NTQ0
-NDQ0MjQ0MzU0MzU3NTUzNDU2NjM0NTM4OTg2MzQ1OTY0NjY2NDQ1NTY5NjMyNTMx
-NDY1NDc2NDEzNjU2NDUxMzMyNDIyNDMxMzQ2NjY1Njc1NDMxLy8vNDU1MzQ0NzMy
-NDQ0MjY1NzQ2MzQ2Nzg3ODY3NTY2MzQ0NDU1NzQyMzMzNzg3Mzc4OTM1NTQ0NDY2
-Njc4NjQ0NjQzNjU0Njc5NTM2NjQ1NzYzNTY0NDU2ODw3Nzc2Njg6NTM0ODk0MzY4
-ODU2NjQ0NDQ2NzY3NjM2MjIzMzM4Nzk6ODc4ODc3NTU0NTM4Njk2NDY2Nzk3Ojc1
-NjU3NDMyNjYzNTY0NDg2NDU1NTk4Nzc4NTY3ODc2Nzg0NjU1NTQ0NDQ4RG5wY0w6
-NzU0MTUzNzQ1Njo2Njc2NTg2MjNCaIR0YWVTST06QEFxhmxEPD47PUFAPj87Njo+
-Pz5OZmlHP0E/Pz47Ojw/PD88PTs9PT4/PDw/Ozo6Ojs9Pj47PT09Ojg4Ojg6PT0+
-RUFAQD4+REE9O0JBPkBCQkM+QENCRURDREVBPUBFREdBPUFAQEVBQkNBQUBCRENB
-QENAQD89Pj5AQT5DRkRBP0NEPkRDQ0FGSkpGR0xQR0dISkxNTE1LS05LUlBRS1GP
-r8nV3eHk5ujp6uvqTk9ITEhKS0dJT0tNSUlQSUhJSEZHRENFSkpCQUdBQ0BFRkdA
-PD9APUJAQERDQkA+PDs5PUJBQj07PD07OTk8O0M8O0A8Pj06PD86Nzk+QUA6Nzc6
-PDw4P0JAOzw6OTg5PDg6PTo4QD8+Nzk6NTY7PDs6PDpBODY2ODs+Pjg2ODc3Nzg4
-OUI9Pzs6Nzc6Ojs9PD45ODo8Ozw7NjU3Nzk4OTo6Ojc3Ojo5ODY4ODc+Pjs4Njo3
-ODk1OTg0NjU2Njc4Njk4Nzo8NjUzNTU3OTc1NDM1Nzc3Nzc8OTo6MjAzNTY0NzY1
-NTc4NjU0NjYxNTQ3MzUzMTIyNDQzOjc1MjIyNDY3OTc2MzM1NjU1NTY4ODU0RDM6
-NzQ3NjU5NzMxMjI0NDU2Mzc0NjQ1MzQzNTY3Njg4NTY2NTQ0NjYzMzU0NDU1NTYz
-Nzg0NTg0NTY3NjY2NTIwLzExMjQzNDYyNDU0ODYyNTY2NTMzODQ1NjUzMzczMzY2
-NzgwMzQ2NzY0NDU5NzY5Mzc1Nj01NDc3Nzg8ODg4NjY2ODM2MzQzMzUyNTQyMzQy
-MjU5NjYyNDI1NDUyMTM1NTM0NzY1MjI0MjU3OTcyNDMzNDIzNTQ2Nzc1NzYyNDY2
-NDMxMTAzMjM1MzM1NTU3NTIxNDQzMDIzNDQzNDY0NTU1MzU4MzM4NDIyNzUyMTIz
-MzU2NjM0MzQzNzExMDExMjAyNjU1NDQyNTMxMzUzNDIyNjU2Njc5NzY0NjU0MzI0
-MTI1NjY2MzQwMjM3Njc3NzY1NDAzNjMzNTMyMzI0MjQ0NTY2NDYyNDU0NTc4MzIz
-MDAzNTQ1NjM1NzU3NDQyNTY3NjMyMzI2Njc4ODgyMTc3NDY3NTM3MzQ1MzIyMzU0
-NjU4NTQ1Nzc2NjY1NzY1Njw5Nzk5ODQ4NDQ1NTY1NDY1NDM1NTQ0MzY2MzU2NTM0
-Njc5NjU2MjE1NjM2NzYzMzU0NjU0NjQzMzQxMjE0MTMxMTU3NS8xNTY0NTMyNDQ0
-MzY1NDMzMzMyNDQ1OjQxNDU4MzQ0MzUyMTU2MzY4OjU2NjYzMjU3NjU1NjI3MzQ0
-NjM1NjU1NDU0MzM4ODc4NTQ0NzY2NjY3OTg2NDY2MzMxMjY1Nzg1MzM0MzQ0NTU1
-NjQ1Nzg4OzgzNjg0NTc3NzY3NjQzMzQ0NTY4Ojg3ODY1NjY2NzY2ODU5OzMzNTY2
-NjU2ODw3NjU0NDQ1PDo2NTU3NTQ4NjYyMzIyMjIyNzQ8MzU7OTg4ODQ4NzY0Nzc5
-NDc3NTI2NzY1NTQ0MjQ1Oj9dgIxiPTY2NDQzNTc3NTQzNDU1Nzc1Njc0Nl57h3Zw
-g3xUSD0/Xo55T0I6Ozs6OT09PTk6OTw7RldoVUA8PD08PkFAOj0/Pjo6Ozo8PD08
-Pjs3Ozw9Pj07Oz48Ojo8PDc4Pj4/Pj5AQj9CP0BAQTs3PT5FQzw+Pj1AP0JCPkBB
-QkBCQ0E/PT9APz49PD1AQUE+PEFDQUVEQDs/QEFEPz8+QkRDQUZFQ0hGRzxBREdE
-RkdIRklNSUVLS0xOUUxQUlNRTFVPTn+yydXe4uTn6enp6+tMSUdDSUlLSUlIR0VJ
-RklFSkNGRUpLSUdFR0I/Rkg+PUZGQUA+Qj89QURBPkI+Pz09OTU6P0BCSEVCPj1D
-PD06PUA/OTs8O0I9Ozw3PDo6Ojo6OTk7PEA8PT08Ozs8Pjo7NzY8PTo5OTk7Nzg4
-ODw7Pjk9OTw5Nzo1OzczNjg3Njg4OT07OTc4OTo6ODs9Pzw7OT06PT07OEA6OTc2
-OTk4OEE5OTg5PDo4Ojk2NzU6ODM1Nzc3ODg6Ojs2Njc5OTQ1MzY3Njg4ODY1NTU3
-NjczODY4OTgzNjc3NjUzMjI6ODY0NTU1MzQ1NTU4Nzc2ODg3Ojc5NjQ0NEJUQjw0
-NTQzMzU2ODo0NDM2NDI1ODIxLzE4ODU1ODYyNDM2NjcyMjAyMjU5NjM1NTU3NTc3
-Njc4NTc4ODg4NDQ0MjQ0NTY0Njg3NjM0NTMzMzU1ODY3OTs0MjM0MzE0NDQ1NjY3
-MzY0Ly4wMjU1NTk0NDIzNDQxNTQ0Njc6NjIzNjk1NTU2OTY3NDEyNDEyNDM0NDQz
-ODo5OTk4NTQzNTYxMTMyMjUxMjQxLzIyMjUzNTgzNTA0ODY0MzY4Ojo4NjUxMTQ1
-Nzk4NzY1NDU1Njc1NTg1NTU1NTY2Nzc1MDEyNDM3NTQzMDEwMS8yNTIxNDU0NDEy
-MjMxMzQ0NDQ0MzU1MzQ2NDU1NTQyNDU2ODMyMTEyMTI0MzIzNjgxNDczMTA3MjQ2
-OTUwNDI0NDUxMTI3Njc1MzI0MzQzNDMyMDQ0NTY4NDI0MzQ0NjY2ODYzNDUzNDQz
-MzMzNDQ2MzM1NzQ2NTU0MjEwMTIyNjUzMTAyNDUyNjU1MjI0Njg0NTEyMjU0NTQ3
-Nzo4NTc4NDQ1NDQ4NDQ2NDUxNDU2NzY0NjUyMzc3NTY5NDY2NjY1Nzc4NTI0Njk3
-NDM1OTg2NTk1NTQyMC0wMDU1MTI1ODc2NTg2MzU4OTY1MzAyMzIzMzMzNTU6NjQ2
-MTU0NDQwNDU4NDUyMzI0NTk1NDU3NjQ1NTMzMzY4NDE0NjMyOTg3MzQ0MzMyNDI0
-NTU2NjY1NjYzMjUzMzI1NjQyNDY4OTc3ODQ1NDIyMjU2ODY1NjQ0NDE0NTM3NjU1
-Nzc3NzQ1NjQ0MzQ2LzU0NDU3OTU4NzY1NTQ3Ojg1Njc1NDM3NDc1NDQ0NDU1NDY3
-NTY3Nzc3NzY3NjY0NjU4ODc3OTY5Nzc3NjQ2NjY4NzU3ODg7Ozk2MzQ6NjQzNDU1
-NDU2MzM3Njg2Ojk2NDc0NDM1NDc6NjQ2NTY5ODY1NDU2NjQ2MzQ6XHR8iWhDOzc2
-NTU1MzU2NzU0MzM0NjU0NzdKdH+AdnKMjW1XQ1WAd0k9Pjw8QT49PDs7Ozo7OUha
-YUs9QEM8Oz0+PkFBPz46Ozk8OjlAPTk4Ozo6PD9CPkBAOzs+Pj4/PD8+PT4/PTw8
-PkA8PkE+QUM9PTo+OztDP0M9PjxCQUE/PkREQz9BQ0NEREJAPz9AQEBGQ0JEQERC
-QD0/PkZBPjw8Q0ZER0RBQ0dEQEJCRUhMTkxNTlBOTk1OTldRUFBMU1NWVU5HZrXL
-1d3h5Obo6ers61JOR0dFSklIQ01LRUdJSUhERkJHR0ZJQ0NDQz9APD89P0RFQkVB
-Pj5EQTk+PT89PT86QkNAPTs/Pz48Ozk9OTo+Oj9APT08PDY9Oj8+OjlAOzY1NTw8
-PTg5Oj1DPT5CPTs8Ojo8ODs5OTo+QD4+PTw8Njc5ODg4OTs6OTc1OTc1Nzk4OTg8
-NTU5Ojs8Ozo8Pj84Oz9BOzo6Pjw6Oj04OTs8PDw5PDk4OTs8OjU1NzY3ODUzODQ2
-Nzc+OjU3NTU3OTg4NjU3NjY5OTk4Nzg5OTc3ODk2NTY0NDU0Njs5NTY2OTQ1ODU1
-NTk1NTo5NTYzNDY2NzY1ODUzO0A3PTk1ODY2NDg2Nzc0NzQ2NDM2NzM2OTY5Mzg3
-Nzg5ODU2PDczNTIxMjU3NTc1NDk1NzU0Njg1NTk5ODs1MzI0NTYzNjg2MjM0NDM0
-NzU1MzI0NDQ0NDIyNDc3NjQyMDE2NzQ5NzU4NjIvMzI3ODUzNDQ1NjQ0MjYyMzU0
-NDM1NTU3ODg6NzQ1MTIzMzI0NzczMzMyNzY1NDQzMjQzNDUwNjIzNTUyMDE2NDMy
-MzEvMjIxMjQ3NzYyMTU2MTQ0NTY0NDEzNTU0NjY1NDY0NjU3NTI0NTY0NTU1NDI0
-MjE1NDMyNTY0MzI0NTQyMTIyMjM1ODg0NDQzNDQ1NTY2NDIzNDU1MjQ1MjU0MzQ1
-NDAwMTEyNTI0MTUzODU3MzIyMS4wNDQ1NTQwMzc2NDUyMzQ0NDM0NDQ1MjM2ODM0
-MjM1NzY5NjMzNjQ1MzI1NzUyMTI0NjMzNTg1NjQzMjIyNDYzMjYzMzMzMjU1NDQ0
-NDIwNDQ0NTU0MjMzNDEzMzY5NTEzMTA1NzcyNDY2ODg1NjY3MzY3NjkzNTU1MzM0
-NjU0MzU0MjM1NjU1Nzg3NzQ3OTg3NjQ2NjI1MzY2NDMzMjIzMjQ0Mjc5ODczMjQy
-NjM1NTM0NTU1NzQwMTMzNDU1NTYzMzYyMjIyNDY1MjI0NTM1NjQzPzUzNDU3NjMx
-NTIyNTQ0NTMzODQzMjQ3NDI0LzAzNTIxMjYzMzQ1NDM1NTM1NDQyMzY1NTg2NjQy
-MzQ2NDY2Njc1NDc3NTY2NzEyMzI1NDU0NDczNTg5OTU1MzQ3NzQzNzU2NDEzMjQ0
-OTQ1NjU2NTY1NDQ0NTM1Njg5MzI0NTc2MjM1ODY2MzU1NTM2ODc4ODU3NjMzNTI1
-Ozo4Njk3NDg1NTU3Njk3NjY0Njc2NzU2NjY4NzY3ODk3NjY0NzEyNTU0NjU2Njc2
-Njg1ODY2Nzk1NTQ1OE94e4GIV0A6NzUzMDQyMzY5NzEyNDQzNTQ3PWByZVxgcoub
-mH1mjZdwPTw/QT4/Pjo6PDs+PTk+WWtURURCQ0JBPTw7QEBBPTw+Pj8+QD47PT09
-QDw9Pjw+Ozs9PT46PD88PT48PDo7PD9BQkJAQD09QDw9Q0A9PUFBPT1AQUE/QUFB
-PUFCQD1CQkFCQz9CQj5ARUdDQDw/PD46PUJGQjw9PztCQ0NHS0hIRj1CRkVGRkdJ
-S0pRUU5RVlNOTE9WWFFSUFZUU05ntczW3ePl5+fo6uvrTE5LVk9NUUlISEdFR0NG
-SUdJREVFREQ9Qz5DRUJGQUBBREJCQUNAPDs9QEM7PD87PDw+Qjo7Pz49PTg6QT48
-Ozw5Oj07PD47PTU2Nzk7Ozk9Ojg5Ozg6Ozg5PTo8Pz06ODU8OTc6Ozc2Njo7Pj09
-PDk4PDw3NDU1Nzc3PDk8Ojk6Ozc3ODg5Nzk2OTs7Nzg6PT48Ozc3ODs9Pzk9OTk4
-OTk4OTw6ODk6Ozs6PTk4NjY4PDo6PTYzNzc6NzU2NzI2Njc2NzY3Njg3ODc3OD05
-Nzg4NzY0Nzc0NDg3OTs7OTo3NjY0NjY4NTQ1NDM0Ozc1NTQ2Njo5OTs4ODg5NzY4
-ODU2NzM3NzY3MjQ3OTc4NTY2NzU1NjY1ODg6NzY1NTI3Njc1OTo3Njg1MzEzMzc2
-ODY2NTU3ODo2NDY3Nj06NzY2NjQ0NDEyODUyNDQ1NDIzNTY1NjU1NzMxLzM1MjIy
-NDY3MzMwMjI0MTE1NTUxMjI0NDM0NTQ0NDI1MjM0Njg2MzQ4NjU1NDIyNjU1NjU3
-ODMzNjQ2NzY1NjY2MzEwMTM2NzExMjY3MzIzMzIyNjY1ODQvNDI1NTM0MTQzNzQx
-MTM0MzIyMDE2NDQ0MTU3NjU1MjEzMzE1NzMyMTQzNDM1MzY1OTQvMjU5MjI0NTI1
-NTMzNDQyMjU0NTc2MzQ2Njo2NDQyMzIzMzQ0MTI0MjI2MjM5NTszNTIxMjAwNjQ5
-NDQ3NzM0MzM0MzU2NDcyMTU2NjU2MjU0Mzg2MzQ3ODk4OTU1NTUzNjMwNjIyMjIw
-MjMzNDQ2NDE0MDMzNDYzMjQxMzU1NTYzMzMzNDQzMjIxMzQzNTQ0NDI0NTU3Mzc2
-NzY0NjU2MzI3NjgyMjc0NTU1NTU2Mjg1NjY1MjQ1NDQzNzQzNjc1NjM0Njc2Mzc3
-NzY2NTQyNTIzNzY1NDQ0NTc2NzQzMzQ0NzY1MzM0NDM2NTU1NDQ2NjY4NTUzNTU2
-ODg2NDE1OTYyMjUzNDU3MzM0NDM1MzQ5NTU0NTc1Njg4ODkzMjAzMzQ4NTcyMTQ0
-MDAyNTMxMzM0NDY2NDM0MzU3NjQ0MzUyMjQ2NTg3NjY1NTU1NDY1MzU1NDIzNTM2
-NTU3OTc2ODc2Njg2NDc2NzU1NTUyMTI1Ojg3NDY2NTg7Ojg0NTY2ODo3ODk0NTY6
-OjY3ODY2MDUzNDg/NTY3ODYzMzUzMzQ4Nzg2NTc4NTU1NTY3ODo4OTU0NjQ1NDY2
-Njc2NzY0NDU1Njc3PDw2NjY4Njc4OTc1NDU2Njg2NTc4NTZOdWtNa35NODg2NTY3
-NTY2NjY2ODY2NDQzODhYcFg+Oz4+bZy1raqjhFBFPD85Ojs6PDs6Ojw8QF5oaEhD
-QD09PTw9QkFBQ0BAPjs6OzlDQkI8QEA/Q0A8Oz0/PUU9OD1BQT9BQDw7PTw8Oz1C
-PDk8Oz07PT9AP0BCQUA+QUBBP0FAP0BCQD07QUFBOz5BQUA8PUFBQT88Qjs+Pkk+
-Pj1BREFDQUFCRUREQ0RFRUZLRURHSUxLT05NS0lMTE9LUlJSS0xTTU1GSmaxy9Xe
-4+Xn6Onq6+pLS0ZMT01LSEdJSkVGRUVLSERDQ0hGREA+P0lBRkdDRkA/REVCPjw8
-Oz1FPz05Ojs6OT49QUBBPzw9PDw/PUE+PD5APz08PD08NzY1ODo+PDo9Oz89PDY1
-ODc4O0A8PTs5Njc4ODk+Pj08OT08Ozs8OTg3Ojk0Nzc3NzU3ODk8PD49Ojo8Ojo7
-OkA5Nzc4Ojc5OTo4NTs5Ozs9Qz09NzU4Njc8Njk4ODY6ODo5OTg2OTlCSkU8OD87
-OTU0Nj03OTk4OTg2Njg5NzM5Ozo5PDw6OTs0NzY1Njc2OjU3OTo7OTg1OD5AOzs4
-NzM1MzY9NDk7NjcxNDg6Ojc6QDk2NDIzNTs3NTU1ODg6NzY1NTg4NzYzMjMzMzc3
-NjQzNTE0NTU3OD1GOTU2Nzg8NjU1Mzg0Nzc2NTc5NjY3OzY1OTYzNjIzNTc1Nzcy
-NTo2NzQ2NTMzMzM1ODg0NTQzMjEyMzExNDU2MS8zLy80NTQzMjM0NDI0NS8xMzMy
-NTMzMjYzNjc2NTY1MTExMjQzMzU1MzU0NDQyNDM3NDo3NDU0NzMyNDQ1NzY2NDY3
-NTc0NTozMjU1MjMzNDQ1NDM1NDQ0MTI1NDMzMjM0MjEwLy8zNTY2NjYzMzEzNTUz
-MzQ1ODUzNjM2NTQ3NTMwMjUzMTM1NDQ0MTAzNDczNDU2NDMzMDI2OTc4ODMzMTMw
-NTI0NDQ0MjM1Njc5NDMyNTU0MzM0NTczNDIyNDUyNDQyMzMzNDMyMzgzMTU3NDY1
-Njc1Mzg1NDUyMzEzMjI2OTg0MDQzMDIxMTMzMzc0NjQ1MjQ0ODs1NTQzLzQ0NDQy
-MjEvMjQ0ODc1MjU0MjE2NTY1OTc0Njc2NzQyNDQ3OTM1MjMzODcyNDU3MzQ1Njc4
-ODMxMjY0NTg0MzQ1NDM0NDM0NTc2NTY1NTY1MzU3NjU3NTQ0NDM0NTc3ODUxNDU0
-NTU2NTQ0MzQ0NjU0NzUzMzM0NzU1ODY1NTU0NzU0NDM3NTY1NDMyNjQzMDIzNTM3
-NzY2Mi8xNTUzOTc3ODU1NTY4MzQ1MzQ1MzQ1MzM1NjU0MzQ1NTg1NDY0NTY4NzQz
-NTU0NjQ3MzE1NTY3MjU0MTQzNTU4NjMwMTM0NDQ3ODs6ODc3MzIyMzQ0NTQ2NzU1
-ODQ1NjY5NDY0Njk4ODU3Njg4Nzk4NjU1Nzc0NDI1MzMzMzg1NTc3MzQzNDU2Ojg2
-Ojk4Nzg0NjU2NzY7Nzg2NzQzMzU1MzMzMzUzNTc2NDU0Mzc2NzU2NTU2NTY2NzY2
-Nzg7NTY0NDM5RneBVDxfZEI0Nzc3Ozo2Nzc1MDM1MjY5NzQ2SnZ9Ujs8PTpImbGy
-oHxRSEA9Pz85Ojo6ODU5Oz5NZHFDPTw7Qj45Ojs4Pjk9PT0/QUA7Pz9BPz5BREJB
-PTw9QEBFRzo6PTs/Qjs9Pj47QUA8PD09Oz4+PEE/Pj4+SEE8O0BAQD0+REFBP0RA
-PTk6Oz1BPT88PkBEQz49RkE9O0BFPz9DQ0VBQENDQkQ/QEJCQkRIR0VCRUZKSk5K
-UU9LS0tMTlBUVlhSUVRWT1NMabPL1t3i5ebo6unr6kxNTkxMT0pKTEhMRkNGSklH
-RkRCRUNDQ0lJQz9BRUtCQkE/RD06OjxDQEE7QTs7Pzk4Oz5EQj5CPjtAPDk+Ojw9
-Pz9DQTo4ODc4OTo3Oj07ODw7Pjo6PTg0Njg3Nj46OT85Okc7OT4/QEE7Ojw7NDQ6
-Ozo4ODs1Nzk8Ojc4OjlAQDs6PD07Ozo5PEA3Njg5PD46ODs7OTk6OTs2PD89PTg8
-ODo7ODg5ODo5Ojc1Njg5Ozs4Njc4PDk4Nzc3NTY5Oj07NTU2NTc1NjY0NjY5Ojs4
-ODc2NTQ1NzY5NDc2OTs3Mzc3ODQ1Nzk3Ozc4PTo3NTU0NTc2Nzc3ODc2ODU1NDY1
-NDQzNDMzNTY2NjYzNzg3NjM3NzI1MzQyMzY1Njc2Njs3OT49NzQ1NzY2NzMyLzM2
-Nzk2NDY0MjUxMjU5NjQ4NTM2ODM4Nzk0OTc0MTIzMDM2NjU1OTk1NDMuMjI0NTMy
-NDI0NjM0NTQxNDgxMjM0MzU3MDA5NjQ4MzM2NzI1NDU8NTMzNDU0NDQ0MjM2NDU3
-NjczNTo2OTk2MTI0MjYzMzE0Nzc2NDUzNDQ0NzU4MjQ1NjMyNjg0MzIxMzM4NjY1
-MjIxNDI1NjMxMDE0MjU1MzEzMzE3NjY2NTQyMzI3NDIzMTAzNjc1MjM0NDM0MzMy
-MTIzNDM2MzQxLzAzMTExNDU1NDM1MzQ0NTQ0NDU2NDg0NjMwMjIyNDc2NDQ4NTM0
-NDUzMjc2OTQzMjQyMzMzMzM1MzE1NzkzMzM3MzM1MjIxMTI1NTU2NTU1MzYzNzY1
-NTU1NDU1NjQ1NjUzMzU2MjIyMjI0MjIyNTAzMTY2NjU0NTQ0NjYzMjAzMzQ1MzM1
-MjU2NTc5NDE0NTYwMzU0NTQ1NTIyMzQ2NTU0NTU1ODk3ODU2Nzc3MjM1Nzo2NTQz
-MjIzNjk0NTQ1NjY4NTM0MzM2NzU1Njg6Nzc2NTY0MzQzNTYzMzIxMDI1NTQ1MTMy
-MzY3ODMyMDI0NDU1NDg2NjQ0MjEyNDQzNDU0NTI1OTg1NjQzMzUzNTQ1NDQ0NjMy
-MTM2NTM1NDQxNDQ1NTg4NDM0NTY4NDQ0MzU2NTQ0NTM0ODY5OTY1NTc1NjU0NzUx
-MzI1ODc4OTo3ND02ODc1Njg4NDQ0MzI2Njg3NDQzMTU6ODk1MzI1OTg1NDMzNjY1
-NTM1NjQ5PDg5ODg3NjU4ODo3Nzc4NzU2NTU9NjY0MzM0NTU2NjU1NTMzMzM0NDU0
-NTY3NDQwMzUzNzQ2NTY2NDQ2NjU1MzY0ODYzMTU2Nz55clhDPW1hOzk1NDg1NzU2
-NjYzMzM1NTc5PEJyiWxBOjc7OEqYqaF7Rz06PUI9QDo9Ojg6PUBBRWFrXEM/QERB
-Oz4+PT1BPjs/PT9BREJEQEBBQUBCQT0+PTo6Pz47OTw+Pz9APjtBPTw7Pz8/PDg6
-PTw/PTs/OTw6PEA9Oj48PTw9P0RFQkM/Ojk7PkBEQ0BBQ0REP0FIREBAO0BAQUJC
-Qz1BQ0I/QkVBQEVHSURJSEVISEpHR0hHSVFLRkVPUVFXV1ZXUlBJTU1jssrW3uLk
-5+jq6urrSUpLSUhIS0xIR0lHRERDSUZLR0dEQUY/TExNSEVEQkNGRT0+Pj1BQkBA
-QT49Pjk4Ozg5OUE/PDo7OkBEQjw/PTk5OTs+QDs5PkE7OTo7PD07PUNDPD49OzU7
-OTk9Ozo/Pz03OTc4ODk3Oj07Ojo7Ojw9RDo5Ojw4NjY1Ojw3Ojs6ODk6Ojk9OzY1
-MjY5Njg5ODk3ODc4ODk5Ojo4OTo6Ojs8Oz06PTo9ODY2Nzc6O0A8Ojg3OjY4ODc5
-ODg1Njg8OTo3Njc4ODY6NDU2Ojk5OTUzNjw5NjU3Njc1NDc4ODczNTY2OTw7Ozw4
-Nzc1NTs3Nzo5NzY4ODU2NTc2Nzg4NjI2NDU1NTMzNzg1Nzg1NTY2NzY7NjU3NzY1
-NTMzNjY4MzU3NjU1NTU2OTUyMTE4ODc1NDQzMjc3NTc6NzU5NDE0NzU1Njg1ODk2
-NDY1NzM1MzQ0NTU3NDQzNzYxMDEwMTQ0Nzg6NjE1NDMzNTcyNDY2NDU4OTU0NDM2
-NzgzNDE0Ojg0NDY2ODg0MjQ0OEE0NjQ0NDYzNjY4Njg2LzA1NDYzNzg1NTQ0NjUy
-MTM3NTIzMzI0NTQyMzMzMjQ1NDIxMjUzMTM0MzYzMzMyNDQ0MzMzMzMxMTM2ODUy
-MjY2NjM0NjY0MzI0Njc2NTMwMTQzMDAxMTEyMzM0NTIzMzAyMTE0MjQyMzM2NzMz
-NDQ0MS8wMDI1NzY1NDU2MzU2MzAzNDYzNjc4NzUyMzIzMTM1NDQ1NTQ1NDQ0MzMz
-NTU1NTExMTAzMjMzMzQyNjQ1NDY0NDY0NTc2NTM0NTQ1Nzo5ODY2NDIxMjY6OTY3
-NDE0MzA1NDk2NDg4NDY0NDQ0MjU5NTM0NDc1OTc2NDQ4NTU3Ozo4MjY3NjU1Njc4
-NjQ1Nzc3NzQ4OjM1NjQ1Njg4NzYzMTMzODYzNjY0NjY5NTk3ODU0MjMwMTMwMTMz
-NTQxMjQ1NTc3NjU1NDM1NDY3NTc1NTM0MzU4NTQ1MTU0NDIyNTM0NDMxMjEyNDIx
-NTg3NjU1ODQ0NDU2NDQ3ODk3ODU0NjU1NTM1NjQ6ODo5ODU3Njk3NjIzMzU4NDQ0
-ODY2NDc0NTc3NzQ1Nzc1MjY2Mzg2NjU0NTQ0NTU4ODY5SDgzMzQ3NzU7MTU4OTQ2
-ODY2NDE1NDY2NTk5Ojg2NTY1NDU3NDY2NDQ2OTg3OTk0NDQ0NDg2OTo2MzY1NTY1
-NTUzNzU1MjM0NTY4ODYzMjE1NjczNjQ1NDc2Njc3NjIzNzQ2NDY4NzY4OTg1NTQ0
-MDIyNzo+VHFiUz1Gcls3Njg3MzQ2NTU1MzQzMjQzMzY8XoJ0STs6Ozk6TnhyelY9
-PTw6O0A+QkE+Pj07OkRRbUs/Pj8+Pz48PDpAOz08Ozk9PD9DQkI8OjxBQkJCQj5B
-QD89QUE/PD08PT4+QUA9PTw9QkBAOTc5OTs6OTo5Oz0+Pz5DQEA6OT5FQ0RIRj88
-OTg+P0NBQD9DRENDQ0JESEZBQj1CQEFEQUVAQTxDQERFRkRGR0ZFR0ZLSElKSEtM
-SkhNUFBNT01RS09QTFFUUWKxytTc4eXm6Orp6+tJTE5NSkdKSkhLSUlKSUhHRk1J
-S0lFRUZDQ0REREU/PkE9Pzs7QD08Pj9BPjlDPz08PklBPTxCPTw7Qj9BQD87NThC
-Pjw9Q0BDPTo9OTg6OTg9Oz06Pj89QT85O0A9PTw9Ozk5OTc1NTo2QD86ODg8Ozw+
-NzY2PDk5OTk2OTc1OzY2ODs6Ozg2OjU1OTk6ODk6Njk4OTc4O0NCOzg6PD5AOzs9
-Ozk7Pzo6NzM1Mjs7PDc0NTc8OzY3NTg5ODY0NDY1MzQ0NjY3NDU4Ojc5Nzg4NDU2
-OTo4NTQ2NzQ3Ojc0Njc5Nzg3PDc8Ozs4NjU2Nzs4NjY4NTI0MzU2NDg0NTM0PDc0
-NzQ2ODg4NjQ0NDU1NjU0MzU4OjozNDc6OTYyMjY0MjU3Njk3Nzc5NTc3NTQ0NzU1
-MzQ3NjQzNTQ1Mzs2Nzo3Nzk4NTg1NjU2NzY0NDM1NzczNTg1NDU0NDQzNTc1NTMz
-NDY3NTYzNjU1MzM0NzY1MzY3ODMyMjY1MzY0Njk6OTc3NzQ1Njc4ODY2PDEzNDI0
-NTY0NDY0MTM0Mzc1MzM1NTgyMTY2ODQzNDY1NDU1NTU5NjM0MjM3NDUyNjYzNDQz
-MzMzNDU1ODMzMzEzMzY5NDQyMTQ0MzY0MzU0MjI3NTMyMjQyMzU3OTk1MzEwNTMy
-MjIzNzM1NDYyMDExMzMzMjY1NjM1NTQ1MjE2MzI2MjI1Njg1NjcyMTU0NjQxMzUx
-MzUzMzY1ODU5ODU2NDQ2NDQ1NDY0NzQ4NjY0NTIxMjExMTMzMzY0NDQ1NDMzMjI0
-NTUzMjg0NzY3NjU0MjgzMjM2NTU5NTU2NjQ2ODc0NTM1NDc3ODc2ODU0MzU1NTY3
-ODk3Ozo1NTY4NDM1Mzc4NTk4NjIyMzQ3MzU4ODU0OTk2NDY1NTo2NTUyMzM0MzY1
-ODc3NzU3NDU3NTQ0MzU3NDQ0MTAyMjMxMjI0MjQ5Ojg4ODY0NTIzMjI3NzU0NjY1
-MTQ1Ojk2NDIzMzYzNDQzMTI1NDY0NzM1MzU1NDQ0My8uMDE2NzgzNTc3NTUzODc1
-NjIyNTQ2NDY0MjU2NzY4NjQ1NjU4NzgyMjI0NTg5NzY2NDM0MzU2NzU2MzI2NzY3
-NTU0NTU1NjY0NTU1NjQ2NzY4NjU0MzM2ODc3NDIyNTM0ODg4OTY1NTQxMzAzNDUz
-OTg5Njg4NjQwMjY3OTk7OjczMzU3Njc0MzI3OTY3OTQ0ODo5NDU1NTU2Nzc3ODQ3
-Nzc2Njk7OjQ1NTY2NDQ2NjU4NjY3NTg5Nzk2OjxZcGxTO1JsUz45ODc5OTg2MjY2
-NzU0NjU2OmOShlk7ODk6NjpBS0dEQEFIPUA/Qz8+Oz9CPUFBUGJWPTs8PDw6PT9B
-Q0A/Pj06Oz8/Q0RJQj4/QkFAPjw/QztBPT5DQD06Ozw9PDs6Oj0+QEA+Pz88Pzw6
-OjpBQT9FQD9APj4+PkI8Oj8+Q0NCQTo/QT09Pj47QD9AP0FCQ0Q/PkNEQkVCPkFB
-QEVBQkNCSUNES0tKSkhRT0hGTEpNSk1MTE9WU01PTVVKSFhVS1NKYLLJ1N3g5Obo
-6evs7EdJSkpNTkpIS0ZLS0RGRkVJSUxJSEZAPT5CQUM8P0BCPUJCP0BAQj5DPD09
-QT1AP0A+Pjw6Pjs6OjtAQUA9OzxBOj49Oz1DQzw6QTw7OTw+O0A8PTo4Njw6Nzc6
-Pj49PTw5PTs/NzU2Njc5NzM2Nzk4PTo8Nzo3ODg2OTo8NTY4NDU1ODg1Nzk2ODo9
-NjU7Ozs4Nzk1OTg5Ojk5OTc5Oz07OTw7Ojk8OTo7Ojc6ODo2Nzk4Nzc3NDc3ODc1
-NTc1MzM3NTg5NTc5OTo3OTQ0Njg3NDg9OTY6NTQ2NTY4OTc0ODI2Nzw4Njc4OTg2
-OTg2Mjc2NDU4Nzc4OTY1Nzc2Nzc3NTYzNTQ3NjU0NTg3NDU1NTU3NTY3NTU0Mzc3
-NDM4NjU0NTQ1MzQ3OTc2MzM0NDU1NTQ3NjU2Nzc0NjQ2ODU5OTc2Nzg7PjY0Mzk2
-MzczNTQyNDU0MzQ3Njc0MTEzMTczNDQzMzU0MzYzMjMyMzU0MjQ2NDY1NzYzMzEx
-NTgyMzY3MTw6NTg2NTc3NTY2NDMyNDQ0NjIzMjAxMjM0MDI3MzM2NTg1LzAzODQ0
-NDU3NTM3N0A1NzM0NjU3NjQ1NDIyMjIyNjQ0NjMyMzQ2NjY4OTU1MzQyMTEyMjIz
-MTQzNjQ2MjExMzE0NjQ2OjY1MTIxNTUzMTQyMDAzNDUyMjAxMzQzNTQzNDM2ODM1
-MjIyMjY0MTQ0NDI0NTU3NjU2ODUxMzQxMzIzNDY1NTY0Njc3NTY5NzMzNDI1MzM0
-NjI0NTU1NDQ2MzQzMjMzMjQyMS82NDQzNDAzMjAxMjUzMjU1MzIyNDIyNDM2NzU1
-MzY0NjMxMzc1NDQ1NjY2NDI0NDM1OTc3OTg3Nzg2NzU8NTQ3MjE1NjY0NkQ3NzU0
-NjU3NjU0NDY5ODo6OTY1NjM3Njc2NDQ2NTc2NzQyMjY1NTM2NjU5OTs0NDIzLzE0
-MzU5Nzc4NTQ2NTY2ODMzMDMwMDMyNzU3NDQ0NzgyMjYzNTU0NTIzNDY1MzM2ODYz
-NDU4ODU0MTEwMTQxMDIzMTMzNDc2NDM0Nzc0MzY3NDM1MjQxMzI0NDQxNDIzNDMx
-MjQ2ODU0NzU2NTMzNjQ0NDg2Nzk4NTQ1NTQ0NTY6NzU0NTMzNDQ1ODg1NTc0NTQ2
-NTYzMTU0Mjc2Nzg2NTMyNDQzNTI1Mzc2ODk6OzU1NjY6ODk7Nzc3NzY3ODc0NTQ1
-Ojk1MzU2NTU3NDE3ODY2NDM2Nzc1NDU6OTg3NjU1MzU2NjQ0NDIxMTQ2Nzc1MzU3
-OTU3QFBxdlNDVGlRNjU1NTc3NDc5NDQ3NzU1OjxliYqGSj05Nzc7PTo4Oz4/QUJD
-PEBCPj08OTo/QEtSTUM+Oz47Ojs9Pj9CQkM9Ojk8QkFDQENDPj48PUBAQDo/Qz48
-PEA/QkJBPUBBPTo8PEBBQT46Oz09PT08PD8+OzxBQ0BBQj5AQkNBPD49Oz9BPUJB
-QT89PD9FQTw9QT5ARkVAPURIREZLQkpHQkJEQURHSEZFTExJSklISUtKS01OSkxR
-V1RSUE5LS1NRUVdSVExutcrU3OHk5ujp6uvrSExLSktKQ0hBRkpER0pIRkZJRURF
-Q0RERUQ9QT0/Ozc8Q0RLREJFQkNDPT5DQDxBPkE8OT5HOzo7PDw+Pj8+Ozk7QD4/
-QjxAOTo8Oj0/PDxCQTk9Ojc7Pjw/PDk/PEA9OT86Oj04MjU2NzY3Nzg6OTY2Nzc7
-ODg3ODk3ODg4Ozo1NTM2Ojk6PDw5Pzg3NDc5OTc7PDo9PDs4Njc5Nzk6ODk5Nzc6
-Ozc2NjxBPzo4OTY2Njo5NzY2Nzc5NzY2ODY0ODo5Nzc5OTc4Ojo4NzY6ODY4Nj43
-Njw7ODo2NjY3MzQzMTI2OzY4Njg7Nzg4OTY0MzY2Nzo6Nzc1NDY2NDg4Nzc2NTY0
-NTc1NTUzMjc4Nzc2NTU2OTM0MzUzNDYzNDc0NTg3MjQ0NTU1NDYzMzQ0MTAyNTM0
-MzMyMjQ1OTc4MzEzOEE5NzUzODg1NTU0NTk2NDQzNDU0NDMzNTQ0NDIzMTI0NDM0
-NDYzMzMzMzU0NDY0MzQ2NDU3NTU0NDQ1NjQ2MjQ2ODU0NTQ2NTgzMTEyNDIxNDIz
-NDIxNDc0NDM2NzY0MzU2NTc6Ni80NjM4NzU2NTYzNTM1MzQxMjMzNDQ0NjM3MzQ3
-NDQyNDY0NzU2ODY1MzY1NTUxMTMxMTAxMzU3MzQ0NTQyMzI2NTU1NDM4Njc3NDM0
-MTMxMjA0NDQzMzQzNDMwMDQ1NTUzNDQ0NTU0NzQ0NTEvMTQ2NzYzMTA0My8wMTAy
-MjQ1NzM0MzM2NzYzMzQ0NzM0MzY3NjU0NjY2NjY1NjQ1NjQ0NTI0Mjg1NjU0MzIz
-MzUzMTMyMzI2NDU0MjY2NTMzNjk4NjY1NTExNDU2MDQyMjQ1NzQ1ODY0NTU2NjY3
-NjQ+NzU4Ozc1Njc1MzM1NTc7ODg6ODU2NzU4OjY0NTg5ODk5Nzc1ODY3NDc1Ojk4
-NTM2OTQzMzY4Nzo6NDI0Njg5NjU4MzU5ODY0Njg1ODU2MjQ1NjQyNjc2NTI1ODc3
-NTUzNDM1NTc2NTY2NzQzMjQzNDY4OTY2NjczMzM4NzMwMzUyMzI1NDU0ODU2NzUy
-OjUzMzU0MjEyMzU0NDM0NjU1NDUyMjU0NDU3NTY0NjU2MzMxNjY0MzMzNjU0MzQ1
-MjMwMzQ1NjU2OTQ1ODQ3Mzc1NDQ4NjI0NjU2NjMzNTU0NDQ0MzM2NTU2NTU4Njg6
-Ojg4Nzc2Njg0OTc3NzQ1NTg4NTM0ODw2NjU1NDc5ODc0NTQ1OjY1NTc2Njc4Nzc6
-OTk2Njc3Ojs3NjU1MzU0NzYzNjQ3OTo3ODlHUGBqVENVcUg+OTU1NTY3MzY0NjU3
-Ojk9W4+ggmFBOjo6Ojo6QD07QD09Ozk7Oj1AP0A+QD5JVVNHP0M+Qjo4O0BCQUA+
-QkE6Ojs9REBBP0NBQUJEQT9APjo/PD9APDs9QUE+PTw8Pj8+PkA+PTw8PDw6OTs9
-Pjw2PEQ+NztCP0BBQ0NGQD0+PUJEQUdAQTw5Oj5APT5BRENDQ0FCQj5EPUNITE5C
-RkRBQkVGR0ZGSElLR0lESkhMTVFST1VVT0xOT1BWVVZWVU9RUXK3y9Tb4OPm6Orq
-7O1KSEtPUExGREJDR0hJR0RIRkdFREI9REQ/RUZFQkZKQ0M+REVETEVDRkQ8P0FE
-SEVAPz9BQDk8OTtCQT48Oz5AOD0+Pjg8PDo7Ozw6Ozk8OkA+PDo4Nzk7OTw/QTw6
-PT1BPzxAODU3ODc4OTg6Nzc1Njo6Nzc6Ojo5Ojk6Njg9Pjs6Ojo4Ozs4PDk5Nzs5
-Oj47ODU7ODk4PDo4ODtAOTg7Ojk2OTk8Ozo3Nzc5NjY4NTQ6OT04Ojo5Ojk3Njg6
-OTY4Ojk3Nzc3Nzo2NTU1NzY1ODg6ODk5Nzk7Ojs2NTU2ODc1MzM0Njs5Ojc2NDU2
-Nzk2MzQ3NjU1MTEwMjQ2NTc3NTM1Ozo2NDU1ODU0NTs3NTo2NDQ3OzQ0MTEyMjU2
-NzY1NDMyNDg7Ojg2MzYxNTUzMzM0MjIzMzQyNDY3Nzc3NjY1ODQ3Njg3NjU1NDQ0
-NDg3ODg0NjYyNDM0NDU1NDI1MzMzNTQyODM0Njg0NDQ1NTY3NTg3MzQ3MjM1NDI2
-OzU4NDY2NzY3Nzc3NDQ2Njc6NTU1NTYyNDUzNTU1NT83NzY3NTU1NTk3OTY4OTUy
-NDc0NDY3NTc0MzMzNTMwMDY3OjI2Nzc0NTMyOTc2MjQ1NzMzMzY0MTQ2NjIyMTY0
-MjU0ODY1MjAuMTM0NDUyNTMzMjMxNDU0MDEyMDEzNTY4NjIxMTAxNTU0MzI2NzY2
-Njg4NTYyMzU0NTQ0MzIxNzUyMTAwNDQyNDU3ODQyMjQzNTc1ODQyMzI1MTU2Nzc3
-NDY1MTM3ODQzMDEzMjIzMzM1NDQzNTMzNDQyMS8yODc4NjM1NDM2MzU1ODk4Mzc2
-NDg2NTU1MzQ4Nzc2NTMzNjU3Nzc0MzM3PTk4MzMyNTg3NTU0Mjc2ODc3ODk1NjY4
-Nzo4Nzc3ODMzMzIzNTQ2NjM0NTg4MzQ3NTU0ODk3ODY3MzIxMzQ2NjY6NTU1NjQy
-MzIzNTY2NjU1NzY1NTU0Nzs2OTY1NTQxMDE1MzI1MjM0NTY1NTc2MjMzNTMxNTc1
-MzE1NTY5ODUzMzQ6NTk2NTU1Njc0NTY3NDc5NTY0MTM0Njg0NDIyMzM2MDU4NzY3
-NzY2NDI0NTY0NDI0NzIyNDQzMzQ1NTY2NjU0NzY5ODo5ODs1MTE1NTY3OzU1Nzk4
-NTU2ODcxMzIzNDU0NDEzMzI2ODc5Ojc1NTo8Ozg2NTU2NzQ3ODM0MzY3NjI3NzY3
-ODk4ODg4NzU1MzI3NTU0NTY1NDY0NDUzNDEzMzc5NDQzNjg3NjQ0MzU2NjM2ODo9
-RE9SU15VRl1fPzg3Ozk0MjQ0NDU1Nzk7RnKboaZqQj02Nzk4ODo9Pjs+Pj05Ojs7
-Ojs8OzpCQ1NfTz9DP0FDREJBPj09Pz4+Pj08PT06Oj0+P0E/Pzw/Q0JDQj9DQD0+
-Oj06PUA+PT8/Pz8+QD09Ojw9QD0/PDs+PkNAPDs+QD9BRD49P0JBPD4/PUFBRD89
-Ojo8QENFREFDQD9DP0I/QDs/R0NDSUxCQUVEREdFSURER01KSkVKTUhLS05JTkdL
-UU5TVFNMUVBPU0xKb7jL1Nzg4+bn6Onr60xRUFBVTUtMSEZJRkhDSENERk1KRkJG
-RUhIREdFQkNBQUFBPj1CRENFQ0NDRkZCREJFQj1FQkA+PUFCPT06PT9FQD07Pj49
-PUA+P0NAOTc2Nzc6PDs+OTs5OkE3NTk6PUE9PkA5NDc5PDk6Ojc2Nzg4ODxBOjg5
-OTk1Ojk8ODs6Nzc4Njs/PT8/Pjo7Ozg/QT88Ojs8PDg4Pzc3Ozs6Ozg3ODo7OTk9
-Nzg6Ozk6OTY6NjY7ODk4Ojs3Nzc5Nzc4OTQ0OTc3OTYzNDM2NjY3NTUyNzc2PEA6
-Njs5NzU3Nzk5ODIyMDM2NkM7ODg3NzY3NTY1MjM0NDI0NTQ2NjY2NTY0MjA0NTY2
-NTU2NjU2MzI1Nzg2OTY2ODQ0NDI2NDY3NTAwNTg2ODg4NzU1NTg2NDU1NDQ2NjMz
-NDg0MzY2Njg3NjU1NTc4Njg2NDQ1NTMxMjU1NTIyMjU3Ojc1NDQ1MzYyNDM0NTk2
-MzI2MzI0NDQ1NjU1MzYzMzI1MzY3NjY2NTU1MTEyNjY1ODo6NTM1NDg3NTUzMjI5
-MjM0MzMyNjc1MzQzNTQ2NTU3NDQ3Mzc1NjUzNTM2NTU2NDUzNTg2MzQ2NjIxNzU2
-NTUzMzM0MjIwLzA1NTYyMjUyNDY1MjIyMjM2NTY4NTQ2MzUzMjM0NDQ3MzMyMTQ0
-NDAxNTc3NDk0MzMyNDMyMzU0NDU0MzU1NDI3MzQ1MzUyNDY0MjY3MjQyLjQ5NTMz
-NTUzNDU0MjI1MTMyNTMzMjI3NTU2NjQ1NTMzMzc0MjEzNTMyMzMyMjM1NjY0MzMy
-MzQzMzEyNTMzNTY0MjIzODY0NDc1NjEyNTU1NzExMjIzNzc3NC8yNDY3NjY3NTU0
-NTc2NzM2NTY0MzQ1Nzg4NzQ2NjM2MzM5Nzg0MjQ3NjU1Njk3Ojg0NjY2Nzg4NjQ2
-MzQ1Nzc4NzU4NTc1NTc5NDQ1NDc2ODg2NTY3NzU3Njc1NTI0NzY4NzU0NTIzMzYy
-MjQ2NTM0MjI0Nzc4ODs2NzY0MjY1Nzs1NTc1NTM1MzU3MzQ3Nzg3NDc2MzM1NjY3
-NzU4NjQzMzQzNTU1NzM3Nzc3Njc1ODs3NDEzMjEzNjUxMjU1NTA0NDM2NTU1NDU1
-MzQ1NTQ2Njc5NjM1ODQ1OTg1Njc2ODc3ODc6NTUzMzU0MzQ4Ojc2Njc2Njo4ODc2
-Njg5NTc2NjM2NDY1NTc3NjQ5NTU1Nzg6ODgzNzU2NzYzNjY2MjQ0NDY0NDMyODk4
-NjU0NzY0NTg4NjQzMzc2Njc6ODU1OTZIWVlEWFlMZVw5OTs2NTg5NzU2Ozk4PD5u
-kqSnl19AOTk5ODo8OTk3ODo7Ozw5PT07Ojw8O0BGWmVFREQ+Pj48Ozw8OTs8Pj07
-PUI+Oj0/PT8+PT0+PD0+QkdEQEI/Pz1APUA8PD1AQD4+Pz48Pz85PD9BQjw7PDlE
-REFAOz9DPj88QD4+PT5CPz09O0VBPD45PDxFQ0JDRkNFQj8/Pz49QERDQUBBR0dG
-Q0VHQ0VARUNFRUVISUdGTEtGSk5ST1JQUVBOTE1TU1ZQVE9ltsvU3OHk5ujp6urr
-SE1NSkxNTElGRkxMSUhJSkNERkVGRkNBRkVFRUFBREVJQTk8QERBQERGQT8+QT0/
-PEFEPjo7PUA9OT05Ozw9O0JARj48PD9BPTw5QD5AOzU3OTo9Pz48Oj07Ojs6PTo7
-PDw9Pzo5PDs4Ojg5OTY3Oz07QDw5PDk6Ojs2ODs8Ojk2ODg6OTxAPjs8QUE5OjxA
-PkA/PD8+PUI+PkI8ODc6PTg5PDw7Ojg7Ojk4PDw5Njk6ODo5ODY2MzU4NTU2NzY2
-OTY1NjY5PDY1NjQ4ODk2NjY5ODg4ODU5PDo5ODM1OTo5PDo3MzM1Njg1NTY4NjMz
-NTo8NTM3Ozc5Nzg2Njg0NjYzNjc0NjQ0NTU6Ozk1NDI4ODg5NzgyMDU2NDU4Ozc0
-NDMyMzU3Njk5Nzg6OT47Ojk7NjY2NzQzMTMzNS8zNzg4NjM6OTUzODY1NzUzNTYz
-MjMzMjA0NDQ2NDczNTQ5QzI1MzM1NTMwMzEwMTM0MzAzNTUyMjMzNzU3NTs2Njg4
-MjE2NDQxNTY5ODk0MzIyNDUyOTU0NzI1MzQ0MjYzNz40MzExNTIzNTY0MzEzNDU3
-Njc1NDg2NzQ1NjU2NTY4NDc3NjY0NDQzMjIwNDQzNTE0NTU1Nzw3NDMzMzY1NTY0
-MzMyMjQ2NTIzNDEvMzEvMDEyMzMzNDEzNTo0MTM2NDM1NzQ2NTQxMzQ3NjU3NTcx
-MTY4NDc4NTIzNjM2NzI1MjM3NTY1MzQyNjg0NDI0MzQ3MDIyNDU2NjYzMzQwMDQz
-MjI1NDEyMjIzMjI1NTMyMjY0NjY5NjQyMzMyMzMyNDE0NzI0NDE0MjMzMzIwMzc0
-NTQ0MjQ0NDU3Njg3NTQ1NDM2NDY2NDk3NjQ3Ojg6NzQ0NTQ1OTY3Njc4NjQ1NTY1
-NDQyMTM0MzQzMzU0NTU0ODg0NTY3NjY5NzQ2NzY2Nzc3OTkzMjI1Nzc3NTczNTI3
-NTQ1ODc2MjIzNDg2OjU4NDIzNDU1NTIyNTMwNDQzMjU2NzQ4NDMxMjQ1NTY3NjY1
-NDU1NTQ1NDQ2NDQ4NzU3NTM1NDU0Njo7NjIzNTY3NzY2NzU0MTU0NjU2NTU0NDQz
-NDM1NDMxMzY4NTg4NjMyMzU2NTI0MTIxMDI0ODg3OTo6Nzc7NjUzODY2Nzk2NDU1
-Nzg2ODQzNTc0NDU4Ojs8Ojc3NjU3NzY4NTk2NjM3Nzc5Njg8Nzg3OTo5NzY4ODc1
-MzY4OTc2NDU5NTQzMzM3MzIwMzU2NDYzMzM1NjQ1MzU0NTQ0Nzg5NDM3NzY4OkRX
-VT1QXE1WRTc3NjU3NzY1Nzg6ODpBd4ufp6yiaD08PD4+Pjw4NzU7Pjo7Pjw7Pjk7
-Oj09SVFWRkFCPz46Ozw9Pjs8Ojs9OTg7PD09OTpAQj8+Pjs8PUVDQEBDQEE+Pz07
-PT08PD06PT1AQD8+Pz5EQD1AQD5AREVDQUA/P0JDQD9AQUA/PT9BQz1AQ0NFPD1B
-PUFBREdDRkVJQENBPkNBQEFCRUhLSUpJREZEQ0hJRUhCQ0pCRElNTlBTUVFbXFlR
-UlNTT05TU1xQVWy3y9bc4eTn6Ojp6+xLSEhLUFNOTUlIS1BLS0ZIRkZITUZERkBG
-TEtFRkJFQUJBPTs+RURAQz8/QDo5Pj1BPT9CPzk8PkBBPjw9PUJCQz48QT87Ozk6
-OTs7OTg8PTk5PDg5Oj47ODw4PTs7OTs4Pzo7PD04PDo5Ozs+Ojk3Ojo5PD49PD47
-Pjg3PDo6OTU4ODs7Ozk3Oj48Ozo4Ojw7OT86PDs8Pz07Njk8Ozg1NTc2OTo3ODg5
-Njk6Ojw4O0E6Ozk4ODg6OTQ1NTQ1Ojo5ODY3NTc4NTI2NTQ0MzQ3NTU3OTs2Njg5
-ODo5Ojk0ODc2ODc4ODU3NDg5Ojg1PjU1Njs2MjY3NTU2Mzk7ODM3Nzc5ODU0NDQ0
-NTY1Oj04Njc0NTg2MzYzMzg5NzU3NjM1NjY0NzU2NjY2NzY0Njc4OTk0NDY2NTU2
-MjEwMjAzMjY1OTUyMzQzNDM1NjgzNTY1NTQzMjQyNDY3MjU2NjY4NzQ4MzU0NDU6
-NzQyMzU1NzQzMzEyMTUzNTg3PTcyNzg3NjM2NDg2ODM2Njc4Nzc1NDk5Nzo1OTU6
-NjY0NTc0Njs1NTY1NzU0NDMxMzU0MzU4ODY2MjY2NjczMTIzMzMyMzMyNDU1MzEw
-MzQ0MzMvMzU1NDc2ODUzNDMzNDQ2NjUzMjE0NTE1MzAuLjA0MzU0NDIxMzIyNTQ3
-OjY2NDQyMTIzNTU2NjU0NTY0MzIyMjM1NDY1NjQ0NTc2NzU2MzM0NjMxMTQ0NjMx
-MTAwMzQzMjM1MTIxMzY5NjcyMTAxMDI2NTMzNTM1MjY5NjQ0NTQ0MjE1MzY1NTI2
-NDI0MjQ0OTU2NDEyLzQ0NzU0NDM1NTUxMjIzMTU0NTY5Ozo3NDEzNDM3NDY2NTY1
-NDo5Nzk1NTY3NTU3OTg4ODc2MzY4MzAzMTMxMzY4NDU0NjMzNjY4Ojc3ODg1NTQ3
-NzQ1NjY3NjM4OjU0MzQ2Njc3NjU2ODczMTEzMjY0NDU0NDU0NTI2OTQzMzY0MzQ0
-Njc1NTE0NDU0NDMzNTQ0MzY1MjMzMzU1NjU2NTY7ODg2NDg7NjM0NDMzNDQ2NzUz
-MzIzNTY3ODc3NjY1NDI2ODU0NTY0NDM3NTY0MzI1NjM2Nzg3Nzg2NDQxMjI0MjEx
-MTY3ODk4Njc5NzY6NzY2Njc2MzQ1ODg4ODY2NDQyNDY1MjY6ODo6NzY2NzM2NTU3
-ODQ5Nzk8ODg7ODg4NTg4ODg3NDc5NzU2NjU2NjUzNjU0NjU0NzU0MzU4NTU0NjU4
-ODU2NTU2MzU1ODg2Njk5Ozw5ODk8Rl5cPVNcY1c4NjY3ODo2Nzc3Ozo6PFiKiJiY
-qKF5UD09PTs+Ojo5Pjs3ODc5OTo4PDs8OkROTkE+QD89OjtAOTo9OTo6Oz07OD4+
-QDw9Pj5ARUBAPT0/UUJAQD1EQj5BPTw9PUFAQj46PzxEPTpAPkJFPz4+QT5DQDw8
-P0FFPjw7O0E+P0FDRUlFR0ZFSERCPUFCREM/PkU+Oj5FPkBHQ0NBQUFGQUNFR0tI
-SEZHSkZHQUlDQ0tMU0xMUU1NTFBVT0tIUVJPSk5OU1BNcLbL1d3h5Ofn6erq60pH
-S0pNTVBVSUlLSUpKSkZIREFCREFGREFIRkZCQEJER0E9Pz1BP0BBRUdBPEBBOj07
-OjpBQUBDPUA+QEFFQUE+Pz1AQjs6Pjw/OTY1Oj05Pzo3PkA9OkI6OTs/PDc5OTk7
-PUBCQD81OD08ODo7OTg7Nzc4OTg3PDo6QDk3Ojc6Ojg6OTU5OTg5OTs+PTw3NjU7
-ODk5NjU7Ojg6Ozg3ODU7PDs7OTc1Nzk5OTg4PDo3Pzc3ODk4ODc2NjY4NDU0NDk4
-OTk6OjY4Nzc0NTQ1Nzs4NTM3NTM1NjQ1NDc4ODg2OTc1PjY3NDU4Ozo5OTg5OTc1
-ODk4NTg8ODk5Rjg2NjY2OTc1NjYzNTY0NDo2Njg1Njg3ODcyNDY+Ozc3NjIzOjg2
-MzU2OTYyMjAxMTY3NDMzNzMxNDU0NTY3NTQyNTQ0MjQ2NzIxMTEyODk0NTYzMjIz
-NzY1Njg0ODU1NjUzNDMzMzc1ODY1OTg0NTY1NTMzMzM2MzA1MzM0NTQ3NzQ1NTQ2
-NzY3NjM1Nzc2OjY2ODUzMjU2NDQ2ODk0Ojg0NDg0NzY3NTc0NTU3NTU3NDUzMjE1
-MzU1NDM5NTI1NTQ0MDIxMjM1MjM1NTMxMTAxMzIyMjY2Nzg2NDIyMTM5NjQ0MzM2
-NDMxNTQzNjExMTIzNTM1NTYyNDQzMzc3NTY2MzQyMzU1MzUyMDI0MzM0NzUzNTc1
-MzMyMjIzNzY0NTczMTI1NTQ0NDEzMzQ1MzI2NjYxNDMzLzA2MTQ1MzMzNTAwMTM3
-NTUzNTYyMzc4OTc2NTQ0MTUzMjY3NzM0NjQyMzQxNDk4ODQ5NTM0NTYxNTIzNDQ0
-MDM2Njg6OjU1NTc3MzMyNjQ1ODc3NDQ2NTc2NzM0NDQ4NTMzMzU0NDU3NTYxMjU1
-NTU0MzQ4Njg2NjY1MjI1Nzc4ODYzNjg6NTQ4NjY4Nz06Ozg3NTY0NTU1NjQzMzQ0
-NTY0MzUzODMyMzQ1NTc2NTQzNDc6Nzg4NDY2NTU2MzEzNDM3OTU0MzY1NDMyMzQ1
-OTg4OTQ1NDM0NDQ1NTU1MzQ1NjU3Njk3Ozc3OTc4Nzg8OjY1NTYyMTM0NzU1MzU1
-NDM2NjIyNDU2Njk2OTU1NDQxMTQ1Nzg2ODk4ODY0NjU2Nzg3NzU1NDQ0ODc3ODc2
-ODc5NzU2NDI0MzU3NjY4NzY4Njc1NDU2Ojs8PDg3ODg5NjQ3Nzc2OTg2NDc2NDc3
-Njc1NjU5MzU1ODc0NjI0NzQ0NDg3Njc1NTg6ODY2Ojw4OTk6OTo8Nzk4NzpPYV4+
-WHNrYj84Ojk4NzY1Nzk7OTlCbm5gZmt0gXFZPDk6Pz04OTg7PTw7ODo7Ozo6Nzk+
-Tl5NPT8/QD48Ojo8Qjs7PDw5Ozw9PDk6PD9APTw9P0JGRkJHUEVAOz8+QUJCQT49
-QEQ+PkE9PEJHQUFCPkFFQUQ/QUI+Pj47P0I+Oj9BQEE9PUNDSUZDQD9BPkA7RUlF
-REZIRD4/QkdCREhHR0RER0dJRT5BRUdKR0xGSUdGREVMSElQUFJUSU5NUlFNTExS
-U1RZV1RNRkp0tsrV3eHj5+jq6ezsVUtMSE9XVlFUSElISVBNR0lFQ0RHRUNDREVI
-RURGRUdIQ0I+Oz5CQT4/Q0A8PT88Pjw5Nz9CPD9APjtAOz5BP0A/Pz4/Ozw9PDg6
-PD46OD06OTk+QDs6Pjw8PTs7ODk7OzY8QkI7Pj04OD1BOjc3Ojo7NzY3Ojg2Njo3
-Nzc2NjU3Ozc4Njo4OTg5Ojw4PDo6OTo3ODc2Njg7Ojk4OTs9PDk7OTo7PDs3Ojo5
-Nzk8OTg7Ozg1NDc1Nzw2OTc5NjQ0NTc4OTc4NjU3NjQ2Njk3Nzg3NTQyNTQ2MzQz
-MzU2NjQ3NTk6OjY3Nzg8ODY4OTk9PDs3ODs6NTY4Ojs9ODg2ODo2NzczNTQ2NDIz
-NTY1Njo3OTg2MjM1NjczMjU0NDYyMzU1NjIyMzQzNTc8NTg2NDU0Mzg1NDY1NDQ1
-Njc0NDQzNjc5NjU2NzU2NDw5Njc0NDMzNDQ1Njc2MjE4NjQyMTQxMTY2NjY3NTk4
-NzQyMDM1NjIzNzQ1NDExNDUzMzAyMjc5OTk4NTc4OjU2Njc0NTI0MzQ1NDQ1MzM0
-NjM1MzU2OTQ0NDY1NTY1NTUzMzUzMDE2MzQzODUyNjY0NTM2MzY1MzM1MjQ1NDUx
-NjI1Njc2Mzc6Ojk2NTIzNTY1NjIyMTE1NjQ0NTU0MzIzNDI2NzMyMC8wODg1NTQ1
-NTM0ODYzNjIyNDQzMzIyMzMxMjI3NTYzNDM0NjQ2NjQzNjU1MjE0NjQxMTAyNjc4
-ODQ1MzIyNTU1NjU2MzMzNTMzMTMzMzM2NDU1Njc2NDU3NTU1MzM1NTQ2Ozc1NTIy
-MTY2NDUxNDQ2NzY2MjI0NjIxNjU2My8xMjMyMjQ3Qjg2Ojg3NzQxMDU1NTUzNDUz
-NDQ1MzYyNzg5OjY3NjY1NTQ0MjI1NDc3NzQ0MzIyNDQ2NjM0MzQ1NTU1NDU3NzY2
-NjY2NTY3NTU4NTQ4NjY4NjM0MjMyNTI1Njc1MzU2NjgzMjc5MzM0MzU1MjY6Nzc0
-NDU2NTIyMzMvNjY0NjQ0NTM0MzM0NzY0NDM1Nzk0NjY0MTI1NTUzNDM1NTQ0Njg5
-Ojo3NzY1NDQ1NDQ4NDc7Nzc1NTU6NDI2ODo3NTM0MzU2ODk2NjU2MzM1MDIzNzw5
-OTg0NjU2NzU1NTg1NTUyMzg4NzY4ODs2Ojs5NzQyMjU2NDU2ODk2NjU2NDIzNDc3
-Nzg4NDc3NzY2NTY0NDU3OTg1PDk9NjU1NTU2MjMyMjY3Njg7OTg4Nzk3NDY5Njcx
-NDI1ODc2Ojo5NjU2OT08Nzc4PV5wYD1Qf3BpRTk2NjU3Nzc5ODw7O1F4YlFBRVhX
-XEU3PDk6Oj09Pjw6Ozs4OkE5Ojo7N0VcUz88OTU9Pzo7PT04OkBBPj89Qz1AP0E+
-Ojs5Pj1EQUJDQ0BGSkVFPz4/QUVBQDc6Ozc9QkM/QkVKQ0FAPT5CQDtAQ0VCQTxA
-PTs/QEFBQkE9P0E+Qj9FQz5DPz5AREJAQjs4PD5DQ0BBQ0VLRkdESUdAQUlLTE1G
-SU1OR0pDR0tKUEpNT0dTU1lRVFVVUldTUVZRUUVPSHG3zNXe4eXm6Onr6utKSVBP
-UVNVTlFQUVBLSE1ISEdFRk1ISEBHSUJGRUhJSkFBS0ZBOkBATE49Pj1BOzc5Ojs5
-PDs3Nz5AOz49PUBAQDo4Oz86PD45Njo8PTo8Pj4/QTo8Oj0/SUE4OjxAPj86Ozo+
-PT88OTQ3ODg4ODc0Njo8PDs3OTc2OTo3OzY1NTY4Njg5NTk6Ojg7ODk8PTs6OTk6
-ODg5OTo+OTc6OTw4ODw9Nzc4Ojc5ODc2NjU4OTQ4NTc3MzU4OjY5Nzc4ODM2NTc2
-OTU1Njc4NTo5Njg5ODQ3NjY3Ojg3NjMyNzg4Nzg3OTk1OTc3ODU2ODk6Nzc7Pzk5
-NzU1Ojg4O0A8OFc6NTMzOTc2Ojk2NTg1NTU0Nzo4OTU4NTU6Qjk0MTQ0NDE0ODU4
-NjMzNDY2ODc2ODg1NDc2NDY1Nzc1MTM2NTQyMzU0MzY0NjM3Nj02LzQ0ODUxMjEy
-NTU1NzQyMjM1NDU0Nzc1ODQzNzI0NTQ2OTIzNjMyMzQ1MzMzMjM2NzY0MjE2NDQ3
-ODc3NTM4NjM2NjU6NzU0MjMxNDIzMzM0MjM3MzUxMzUxNTY2NzYxMTU1NzU2NjY3
-NTQ2NzY2NjU1MzY1NTQ1NjY1NDM1NDY2MzY1Njg1Nzc3NTY1MzExMjMxMzQ0MzMy
-NTQyMzIvMjU0MzY6NDQwMzI2MzY1MzU0Njc0NTY4NzU0NDU0NjY0MjM0NTQ0NDIy
-MzY2NzM0MjE3NTMxMjEyNjg2NDM0NTU5NTc2MjIyNjc6MzQyMzQyNDQ0MTI0Ojc4
-ODc0NTU2Nzc0MzExMjY5NzY0Mzc2NDc8NzM1NzY2NTQ2Njk1NDM0NTU1MzQwMjM1
-NDM0MUBVPjU2NjQ1NzY1NTc3NjU3Njc3NTI1Njk6OTw4NzY2NDMyNTY4OTk2Njg4
-NjY1Njk5NzgyNTU2NjY2NTQ5OTM0NTY1NjY2NDM1NTUyNTQ2NjU1Nzc1OTk4Ojg2
-NjI2NTM1ODYzMzg2NDM2OjM0NDM1NDMzNDc2NDM0NDQyNDI0NDQ1NjQzNTk2NzY1
-NjU4NjY2NDEzNDc2NDU0NDY3MzU0NTg4NzUyNTI0NjUzMzM0NTY4NTk4NzY3OTU2
-NzQ0NjUzNDU3Njc2OTMyNTU1MzY2OTg0NjQ1NjY4NzY1MTI2NjgzNTQ3ODUzNTYz
-NDY3Nzk1MTY3NjU4NTc2MzQ0ODo6Ojg2OzYzNTY1NTY3Njs1MzQ4OTU2OjgyMzI1
-NTQ0NTY2NTU2NDM1NjM2MzY2NTYyNDk1NjY2Nzo3Nzg2ODY4Pjg4PDo9aGpPPFVv
-bVQ6OTo4OTU6Ojg5ODtAdXdpSkFFYV1WOjo4Ojk5ODw7Ojc4Ozk6PTk3Oz0+T2JB
-PDw8PT07Ozs+PD89Pz5BQT08QD07QUE8OTw+PUBBPTxBQj5DQz8/QD4+QD07ODc+
-Pz89PkE/PEZMPD1APj88Pj47Pzo6ODlARUJAQT1AQj8+QTw9QUJCP0RCQUE8QUVD
-Qj86Q0ZBQURCQ0NFSk1EQkRGRkdFSUtOTUdLS0lKR0pQT0pKTFRaVk9PSkxPSFRU
-V1ZTUExKb7jM1t3h5ebo6urq60xNUU1TWFRMSUxHRkhKSUVERkFCR05JQj5CQUJH
-SENEREJHST85PUNRTUE+PTxEREU+QD83Oz07QT48PTs9QEM9QkE9PTk7Ozo8PDo4
-ODY6Ozw7Ozk5PT9BPTk7PT9AODs8Pzw9PEA5Njk3Ojo5PDc3Ozs7Njg0NjU2Njg6
-ODU5Nzg2Ojg+Ozo3NjY6PDw5Ozs6OTw4ODg6Nzg6Ozo4Ojo5NzY3NTlBOjg5OTk3
-OTU1NTQ3OTc2OTg9OTY4Nzg3NTQ1Njg2OTk5OTY2ODY5ODczOjw7OTc5NzU6Nzc5
-Njw7ODc3NjQ4NzMzOjY2Nzc5OTk5PTgzMjg5ODs6PDY5SD06NTQ1NzM1PDY0NjU1
-OTc3ODk3Ojg2NTs9ODY3NTg4Njc3NTc2NTo5OTc0ODg4NTY2OTc5NTU1NTU1NTU5
-NzQ0MzEwMzQ4Njw1MzQ0NDc3NjY4NTQ2NTc6ODY0NTM0NjU1NDQzMzQ0ODY3Njc1
-MDIuMzIxMjQzMzM5ODYzMzM2NTU2NDc1MjU1NjU2ODk1Nzg1NTMzMDU2MTMzNDc0
-MTc0NDIyMzMyNTU2NTQ4NzM0NTc4NjczMjI2NTY1NTU4Ozk3NDQzMzc1NDU2NDQ3
-Mjg0MTI3MzM2NTY1NDExNDQ1NDQzMDQzNDMwMzUxMDAzMzIzNjc1MzEyNDQ2Nzk7
-OTg3ODY4ODMzNTY1NDMxMzU6NTYzMzMzMjM0NDQyNTU3NTM0NTY2NTY2MzQzNTI0
-MjI1NzU1NjM5NDQyNTU1NjM2NTU2MzQ2MzU2NzU1MzIyMTIzMTMzNDQ1ODIzNTQ0
-NDA0NTQ3ODY4NDUzNTIyMzUzMzMzMTc1NjY2PUM3NDQ1Njc3ODc2NDU1NTUyMzY2
-Nzg5Njg1NjY0NDMzNjo6NjU1Njc4Njg4NDg3NjU2OTs0NTY2ODg2OTo6Nzg3NzY1
-NjQ2Nzg1MzQ1Njc0NDU2ODM0NjMyNTQyNDo8Ojk7Ojk3Njk4NTQ1NTg1NjMyNzYz
-NTY0MzIzODc2NTU1NzQwMzM2NjUzNjY5OTc0NzczNzU0MzMzNDY2OzU5OTs4Nzc3
-Njc0NTc0NDQzMTM2NjY3NTY3NjU2MzQ0NTY1NzY1NDQ1MS8xMzU1MzI1Njc2MjY1
-NTM2NTc1NjU2Njc4Nzg3NjU2Njc4NTc1NzY0NTg5NTQ4OjM2ODc2NDg4NzY5ODY1
-NzY3NzQ3Ojg5NzU1Njg3ODg4NDY1NTM0MzU0MzIzNTk1NTEzNjQzMTM4ODc2Nzc2
-NTg4NTg4Nzc4ODg5Nzk5Nz12bltAXYlmQTc2OTw4Nzg6PDc4O2KCiXRCPmV2WUM4
-OTg6OTw4OTo4Ozg6PDo5Ozo5OEZVUj08PT5CPDs9Ozk7Ozs8PkFAODg4OTs7QTw7
-PT47PD5APD5CREE9QUJAQT48PztAPTg6O0NCPjo8PkdEPj4+Ojw9PDs6Oj0+Pj5A
-Ojs+Pjg5PD0/QTw7P0JAQkRDQ0REQ0RFRUJERD9DQ0NEQkNCQ0VISkpJSEZJSERG
-QUZMTkpHSktITE5NSk5XV1pSTlBOTU5QVFFUVU1xuMzW3eHk5ufq6uvrSk1NSlJL
-UVJTTUdJSUxIS0dERT5GTkxMTUhEQj1DQURCQkdFQEFCQD5CREJEQENCQT1CPj9D
-RkJAPzw6PD08Oz09RDw8Njs6Oz07ODY3NTM5PDo3Oj5APTw9OTs5Ojc3Nz08PD5A
-PUA+Pjw9P0A8Ozo4ODs5Ozg1OTc4OT09QT5AOzk7Ojg2OzY8PDk6ODhBOz44Ojs7
-NzU3Njk5Oj05OTc1OTg2PDw5PDk5ODU2Nzc2Njg6ODk3Nzg7Nzc6ODo3OTg3ODc3
-Njg2ODY3Ojw3ODs7OTg8Ojk5NTc2Njc4Nzs4Njc3ODY1NTExMjQ1NDg6NTc3NDU1
-Nzs6OjpAQD44ODk8NzYzNjo6Ojc0NTQ1PDU5Ozk4MzM5NTYyNTk4OTg2NDc0NjM0
-NTU1NTo6OjY2MzM1NDQ1NzgxMzc1ODY9NDY2NTQ1NTk6NzY1MjczMDY2NDU3NTY3
-NDQ5NzIzMjM3NDU3MzEyMzU0NzQyMjI1Njs2MzQzNDM1NjY1MzU1NTIxMjYyNTY4
-MjQ0MzQzNDc1Mzc3NjU0Nzc4NTM5NzgzMjIzNTQ0MzIzODU0ODY2NjU4NTQ0MzEy
-MjU3NTg4NjQ2NTQzNTE0NTU0NzQyNTQ2NzUzMjIzMTIzNDM0NTAzMzIxLC4uMTI3
-MzMzMTExNDQ0MTAwMDE2NTc6Nzk1NzY2NTU1NDQ0NjY1NTQ1NDU2Nzc3MzMyMTAy
-MzIxMDQ0NDY1NTQ1MjMzMjM1NDQ1NTc0NDg5NzYzNDY1NjQyNDQ2NDQ2MzIzNDQ1
-Njc5NjQ2NDM0MzM2NDM4Njc0NDU0MTQxNjQ3Njc2NzY0NjM2NDQ0Mzg1NzY3NTU2
-Njg2NTQ1ODY0NTYzMzY3NDU1NDc3NjY3NTQ1MjMzNDU1OTg1NzU4Nzg5OjczNDg5
-ODUzNDY2ODw9Ojc3Nzg2NTM2NzY0NTIyNjQzMzAzNzg3NDU1MzI2NDU0NTQxNTc5
-NjU4NTQzMzM1Nzg2NTY1Njg2Njc0MjIzMzYzNDQ3NjQzMjQ0NTQyMTMxNDY1Njg2
-NTM1NDU3NzY0MjI1NjQzNTU4OTcyNjc1MzEzMjI0MjU1NTMzNTY2NjU2NDM0NjQz
-MzQ0MzM1NzM3NzU3ODo1NTU4NDM2NTY0NDc5NTQ4ODg3OjQ2ODg5NDg0NDc2ODg3
-NjQ2OTs6PDo3NDQ1MzU0NjU2Nzc1NTc3NDQ0OTg2NTg2NTU1Njk3NzY0Njs8QkRF
-PzY4ODg0MjQ5OzY5ODc2Njk2MzUyNDY1NDQ2Nzk5NzY3NTU2OTg4PXWDZDxhfWpD
-Ozo5NjY2Njg6OTdJiZuOe09Wf4JPOzg4Nzc4Ozk5OTk3Ojc3Ozk7PDw/T1VBPj09
-PD0/Pj09Nzw8OTc9PTs7QTs5QUE+QD48QDw8QUE/PjlAQUA/Pj88PDxBQUNBOzw+
-Ozs7Ojg5Ojs5OTs7PUM/Pj49Oj9AQD0+OTk6Pz0/Q0E/PT1BQT9BRUNEQkFCRUZF
-Q0BDRkZCREJBREZHTUtLT1BKQ0ZISExOS0VHSUlIS0xISE1TTlZQVk9TUFBSTk1S
-UE1QUHi6zNXd4eTm6Onp6+tJSk1NSklRUFFSUklJSFJQSkhDQURHSkpJR0U/PUE9
-PEJEQkdGQT1AQD1AQ0VHQkZIRD8/P0NAQkQ+PEI9P0BCQD48PDk5OTk6PUNDPDo3
-Nzo8OzlBPTxCQT89Ojg2Njg6OTU7PUA+Ojs9Ojg+OTg5OTs1Nzs9PTo4Ojk2Nzc7
-Ojg4PT89Ojg4Oz47Ozg6ODk5ODY2Njk5ODw5ODo9PT08NTw7PDk5PT1AOTo3Ojg7
-NjY4OTo2NDIyNDU6OzY0Nj03NjU1NTo5OTc9OTc2ODo6OTo2MzU7PDk4NTk1Nzo6
-Nzg3NDo6ODg5OTQyNTY4NTo1Njg4NTY0NTY2Nzs8RD06ODg3NzYzNTM5NTYzNTU1
-NDg2NTg0NzI1NDc0NjU2NjMzMzQ2NzU4ODc9Njc2NjQyMDEvMTYzNDQyNTc3NzY1
-MzU1NTU2Ojg4ODc0NDc2NDQyNDc4ODU3Ojo6OTQxNDMzNjY2MzUyNC83MzUyMTY1
-NjI0ODc2MjQ3NjQ1NDMyNDIyNDU1NDM3NDQ2Nzc7MTY2NzQ1NTc1NTc2NTY0NjY1
-MTQ4OTY1MDM4Mjg3Njk5Njg3NDYzMzk3NjQ4OTMyNTU6NjY2MTM0NTUxMTY3OTU1
-NjUzMjMxMTMyMTMzMzIyMzMzMjIuMTQ1NDMxMjYzNTUzNTc4ODY2ODo1NTc0NDU2
-NzQ0NTUwMTQ0NDI0NjM2NzM0NDMwLzAxMTEvMjY2NjU1OTYzNDQ0NjU1Nzc3Nzg2
-OTc2NDY3NjY2NTM1ODc2NzY2NzU2NTg3NTQyMjIzMjM0NjY2MzM0NTw4NjQ0NDU1
-NDQ1NjQ1NjU0NDQ5NjEyMzU0ODc1MjQ0MzM1NTY2NDY2NzY3NTM3ODY2NzU2NjU2
-NjY4NzU4PDk4OTc1NjM1NTIyMzI0NjU1Nzg3NzQ3Ojg1NDU0NDg3PDo2NzkzNDM0
-MTIxNjg7NzY0NTY0NzQzNjg3NjgzNTQ5Njc4NjM2NDY2NTUzNDY2NjQzMDU2LjMz
-NTU0MjE1NTkzMzM4NTQ1ODc3NzU1MzczMjU2NjU4Nzg2NTUyMzQyNTY1NDc0MzM3
-NDM1NzU0NTU0NDMyNTY1NjM1NzY2MzMyMjMxNDQ0NjQ2NzQzNjY0NDU4NDk5NDU2
-NDQ2MjU0NTg2NTU5Nzo6NDQ0ODg4ODY2ODc1NDY6OTc4NTQ4OTg5Ozc3OTo4NTY2
-Nzg1NTk2NDc1NDQ5PEI2ODtAQT03Njo5ODY2Nzg1NTg5NzI0NzY3NDY1Njo4NDc0
-NDc3ODg2NTQzNTg5ODs/a29jQFx4aEc6PD04Nzs6OTY3Q3uOpaikhYCFhEU8PDk3
-NTo8Ozs7Ojk3Njg8PT48PEZSUEA8PDw7QD0+OTo6Oj07Pj8+PEJBPDs8QkY/Pj5A
-Q0FDQT09QDk3PTw7PT48O0FEQDw9QD8+P0A9PUA/PDo7PUFDRUU9PUA/Pj9HRTw8
-PTs/QUBCQkA7PkBBQ0JCQkFDRUZFREE8PEFHREBEQkZDSUVHS0xPTkdHSUtDSUpN
-SEZIS01JTEtMTExSU1FWX1lPUVJMUE9QUltUernL1tvg5ebn6ers61ROS0xMTlBP
-T1VSSElIS0lLSEBAQUNIREZIRUZDR0FEREVGRkhDPUNDOz1FQUVEQkFBQD1DQEJA
-QTk7Ozo8QT87OTg4PTo7Oz48PkJAPjg8PD47PT45Oz0/Ozs8PTw5OTg4Njg2Ojo+
-Qjs7Ojg9QDY1ODk4Ojs4Ozo4PTg6Nzg5OTs4NzY4OTU1OTg5Nzk6OTk6Njg3OTk6
-Nzs7PDxBPTs6Ojo5Ozo7OTg3ODs9P0I4OTY4Nzo2NDc3OTk4Nzc3ODg1NjY0Nzo7
-OTg6NTQ0NTc1NjY5ODc4NTU2NTU0MzM0NDc6Ojg8Njc1NDczNzc5Nzk5Ojk1NzY2
-Nzc1NTo3Ojg4OTo5NjQ1NzU2NjQzNzU3NDU2Njg2ODY1NjM1NjI0NjQ1NjI1MzU2
-ODMzNDU3ODY3NDk5OzM3NDM3NTM1NjU2NDQzMzc3Njc1Njc5ODo4OTc3ODM3Nzc0
-NDI4ODU0MjY0NDQyNDczMzI2NDM1MjY2NTY1NTg2NzUzNTU1MDAyMTQ0MTI1NjU1
-NjY2NTo9NjU0NjQ5Ozc0MjQ2NzYzNjc5OzY6NjU6NTg2NDc4OTg5NjY2NDc3NDY2
-NzY2NzUyMTM0NDY0MDI1NDM0NTg2MzM2NzczMjU3NjMxMTMzMzE0MjMyMjEyNjQ1
-MDQ4NjU1MzQ5Ojk3OjQ0NTU3Njg2NDc3NzYzNDMvMDE1NzY0NDcxMjEzMzMvMzYz
-MzMvMjI1NDY1NDQ1NDY1NTQ1ODU3ODU1NTc1MjU1NjQ2NTY2NDQ1NTg2MjQ0NDY1
-NDMvMjE0OTk4Njk2NTQ3Nzc5NjQ2NjczMzE1NTg0NTc0NzQ3OTUyNTQ0NjY2Njc2
-NzQzMDIzNDczNjQ2NjQ1ODg3NzY0NDg5ODUzMjY0NTg4NTI1NzY0NjcyOTc1NDU2
-OTg3Ozs7OTg4NDQ2NTg4ODc1Njc0NTI1NDc1ODc2ODU2NTQ4Njc1NjU2Nzk3Nzc3
-Njk1NzY1NTU3NjQ2MTEzMjQ0NDI3MjI0Mzc1NDQ1MzUzMjM0NTU2NTg5OjY3ODU0
-NzY3Nzc2NTk3NjU2NDQ1MzM1NjQ2Mzc1NzMzNjc4NjQyMzMzMzUzOjY2NzU0NDc2
-NDM0Mzc0NjM0NTU5NDU0Njc5MzU2Nzc2NjU4NjUzNTY3ODg4NjI0Nzc0NDc5OTg4
-NjU4Ojg2Nzs3Nzc0ODg6NzY1ODg3MzU0OzY3NzM1NTU2NTg4OkFDQz83NTYyNjc2
-Nzc5OTc2Ojw6PzczNTk4ODk1PD47Ojk1Nzg4Ojg2OTU2Nzc6ODxicXRMT2tdRUE7
-Nzs6Ojs7Ozxpk5imnZ6rlHNQPj47Pjs4Oj08PDs6OTk7OjY3Oz5BSk49PjxCQz45
-Oz48PTg4Ojw6Ojw9PT0/Pz1BQ0RBRUVHRkJBQUNEQ0BAQEE+QkFCQkFAPjxAPkNA
-Q0REQD49QD1DQUJEQz5DQEE+PTw9PEE9QEJBPz5CQUFAQEBAQERCQURHTUVHR0dK
-R0NCQERBS0dGR0lDRkpJRUVIRkhKR0tDR0pLSVFOS0tSVFNLUFJQV1ZVUFBOUlpV
-TU11ucvU2+Hk5+fp6urrSlBRTFFJR0xJSk9HRUVGRUtGRUNDSUZEREdGRkRDQkdG
-P0RHRUFBQUNLPkFAQ0Y/QUI/PTs7Pzw7PkA+Pj0+Ozw5Pj9CPjxCPz5AQ0I9Ozs8
-Qz9BPjs5Nzw1PDw+QDw5ODY4Nzw8Pz09OTk5OD06Ozk6OTM4Oj07PT47ODc9OTg6
-PTg5NzU1Ojo6PDo5ODo7Pzs/PDk3OTo3OTo7OTo8Ozs8Ozk6OTo6Njg3ODs6PDg4
-Nzg6OTk6Oz47ODY4NzY3Nzk6PTg6Ojo5Ojg4NTQ2NTg4NzQ4OTo6Nzs6NTk1Njg3
-Ojs9OTcyNTc7Ozc2Njk3NDQyNDY1NTc4NjQ1NTg3OTc3Nzg3ODc4Nzc3OjQ3ODc1
-NjY1NTQzMjY1OTg4Ozo+Ozk1NzQ7OTczMTQ1NDc5OjcyNzg1NzYzMzIzMTIzMzMy
-NDg3MjAvMTE1NjU1Oj06ODg0NzY2NzY3NDU3NTU6NDMyNDg2MzE0MzIwNjU2NjQ3
-Njc2ODo2MjIxNTU0NTQ2NDEyNDQ4NDQ0NTE0Njc0NDY2MzQ0NzQzNTY2NTQzMzEy
-NzY1NjY4NTU0ODU2ODk2NjU6PDo4NDYyNjc2NjEzMTQzNDUyNDU0NDQzNTM5OTQz
-MjU3NjU3NDMzNTAwMjQ0NTE0MzQ1NTM2NDE3NzMzNDQyMjM2NTQ1NTY2NDQ0MTQ0
-NjMxNTIyMzM1NzQxMjMwMDIyMTQxMzQ2Nzc0My4xNTQ0MzIyMjU2OTY0ODU4ODYz
-Njg2NzQ0NTY0MDQzNjgzLy8vLzM0NzIzMjExMzQzNzUzNjY1NDU1NjUyMjY0Mzcz
-MjExMTMzNTU1Nzc1NTUzMzMyNDQ1NDU1NjQxMTg0NTUzNDQ1NTc2OTU4Njc2NDY0
-MzY1MjU0NDM0MzM1Njg2NDg3NzU2Njc3ODY2NDM1OTk0NTQ0ODo3Nzg2Nzc2NTQz
-MzM3NTU0Njc2NTQ1NTc3Nzc2NDY2ODY2NjQ0NDUyMjM2ODU2NDM0NDczNDk3ODU3
-ODk3NTQzNDQ0MzM0NTI1Njc2NDc0Njg0ODY0NTY3NTM0OTg2NDUzMjQ3ODM1Nzc3
-ODU3NzY0NDUyMjMxNDM4ODUxMTQyNDU1NjU0NDUzMTM2Njg2NzY2NDM1NTU3NjU2
-ODc0Njw1MzQ2Nzk1NjU4ODY2OTo3Nzs5Nzk1NDc3Nzs5Nzk4NzY4NTc5Nzk6ODYz
-OTk8ODY1NjUzNTY0Nzo6Njg3Njc4OTo3Njc5OTk2ODc6PTM1Nzg3Nzg5OTg3NTY2
-ODg2NDQ1ODw5ODg3OlFxY0VMfVlCODc2Njc5ODo9WX+Ah6Gbl5yIWUNCQkA9PTk5
-OTk3OTo8Nzs8PDs3PEpLQj5BQ0BBPkE7OkE+PT0+QDw9PkA/Qz1APUNGPj9EQj9C
-Q0FDRUVBQ0A/PEZEPT48PT9CQz5BRD5AP0FDQUA8QD9CQD88Pz89Ozs9PDw7PDs+
-QD49ODlAQz1BR0RBREREREg/QEM9QT9AQUBBQT9ERURIRERMTUZLSEZCSklNUVNL
-RUpIS0xNSE1KSEtNUlJSUVRVVVdSXVhPSne7y9Xc4eTl5+nq6upJTVJOUEZGSU1K
-TE9VTk1KSUdGSEdGR0ZBPz1FSUNEQEZIR0hAREM8OkJERD4+QDw7Pzo9SEA8O0BA
-Q0A9OT06Nzg+QEJBPUBAQEE9PD07PDs/PDc6Oz87Ozs4Ozk7Nzg5Oz48Ojs5OT89
-Pzo7PDo7PDo8ODo3OTs+Ozc4Ojo8ODs4Njg2Mzc3ODk6Oj46OTk7PkQ8OD4+OTc2
-OTo4NjlAPjs4ODk3Ozo5ODg5PDs6NTg2OTg4NzQ4Njc1NzY2Nzk3Nzc4PDw6ODs7
-PDs5Njc3Ojo7PDc5PDo4Nzk0Oz05PDg5NzY4ODk3NTY4Ozk3NTMzNjk7NDU0MjM0
-NTQ1NTQ3ODo3NDk4OTw4Nzg1Nzg4NTY1Njc1NTMyNDg3OTY1Njk4NjY0NjI2NTI0
-NTQ0MzM2Nzg4NTc3NDQ3Njg2Nzg1OTU1NzY5OTYzNjg2ODk5Ojo2NzQ2Njc5NzYu
-Lzg4NzY3NzIwMzUyNDQ2MzQ0MzI2NDM1NDg3NTY2NjQyMzQ2NTg2NTY3NjY4NzM0
-NTM2NzQzNTM1ODQ0NDU0NTY3NjY2NjQ0NTMwMDM0MTU1MzAzNTY1NzY3Nzo3NjM0
-NjU0NTU0MDU2NjM0NTg1NTY1MjM3NjMxMDIzNjU1NTMzNTo1MjIzNDQ4NjQ3ODo6
-NTUzNDEzMzIwMjMxNDU1ODY1NjQzNDQzNDU2Ojc1NTIzMS8xMjMzNTQ2ODo2MjQ0
-MzM1MzIuNjQwMzQzNjY2NjQzNTM0NDY2NjcxMjQ0NDM1MTQ1NjY0NDQyNjMzMzMz
-MjQ0MTQ1NDMxMDM1NDU0NDU2NDM2NTU2MTQ0MjYzNDQ0NTc4NTMzMzU1Njc0NTcz
-NjQ0NTQyMzQyNjU2NDM1OjY1NTk2NTU1NjMyNjo2NTI2NzU2NjY1NDc0Nzc5NzUy
-NTc3NDYzMjY5Nzc2NDg6ODc4NzY5NzY1NzQ1NDY3Njc3NjU2NDc0MzQ2NTQ0NDY0
-MzQyMjI0NDI0ODo3NTU1NzY2OTg1NDIzNjQxMzQzNTg0NjY1OjY3NjQ0NjQ3Nzc2
-NDU1NTQ0NjQzNTU4NTMyMDEzNjQ2ODc1NjY6NzY2NjQ2NTY4Nzc7NjMzMi4zNTU2
-MzM2Ojk2MzI1NTU2Nzs3MDEyNDY3ODU3Mzc3OTM1NTc3ODo4NzU0NjU2Njo4ODk4
-NzIzNjc3Njg6Mzc3NzQ1Njc4ODo3Njc6Nzo4Njg3Nzg3NDQzMzYyNDc5Njk5NjY3
-ODY2MTY3ODg4ODU0NzU3Njg5Nzk6ODk1NTo5ODk3ODk3NTc7S3JuTFNnWj02OTk4
-PDw+RFmKiFVOfpWZhVtCQkNBP0JBQjw9ODo4Njc5Ozs3NjdBT1dBPUFCQDw7PkBA
-QD1BPTs/Qj85PUJARD09PTw9RUdFQDo9QkJBPkA7Pj4/QUM/QUE7O0FCQ0FBPzo/
-QEA+QTw9PkBAPjw8Pj08Oj07QUJEQj08Qz43OEJIQEI9RERDQT5BQEJCQkNBQ0NF
-RUZIRUVHQ0VGS0lNS0dFQ0ZISE1IT1RQSE9OT05NTlFUUUxNU1FTT1NVVVNTX1dO
-b7vL1dzg5Obo6err6k5JRUdIS0ZLUUxIS1BOTUtHSUhJSkhHQj9MSERCREM8Q0pL
-Qj9APT07PEFBRkI6PT08Oz4+QD0/RURBPTs/Ozg7QT1AQEA9PD49PTw7Qjs9OTs8
-Pjw6Oj08Pjw8PDhAQTw9Nzk/Ojs8PTxBOzo8OTs6Pzk6OkA+Pzs8Oj08PD1AOjk5
-NzU4NTQzOzs+Oj88Pzw9PD48Ojk4Nzo7QDc5Ojo7Nzk6Ojc4Ojg5NjU6ODg3ODk8
-Ojs7PDk4OTY4OTk8ODs3Njs3Ozc1ODk8Ojo8Ozg3Ojk3OTg7PDo4OD47Ojs8Ojk2
-ODg6ODU3NTg4NzU2NDc2OT04MzM1NDM6NjQ0ODY2ODczNTU3Pjw5ODg3NjY1Njc6
-Nzg2NzUwMzc6NTIyNTM0NjM0NTU3NzY1MjIyNTM1Nzg4NjUzNDY3MTc4NjczODg1
-NDY1NTQzMzQ3OT85NzUzNjc3NzU3OkA7Njo9NjIzNjUyMjU2NTUzNDc2NTg5NDM0
-NTQ0NDY0NDc2MzU1NTY2NzY2MjU1NTQ0NTU1ODM0NDA0NDU2OTQwMzU0MzY5ODU2
-OTc1NTg5NTEyMTQ0NDY5NjYzNDQ0Njg5NzY1NTc0NDg4NDU1Nzg0NDg0MjI1NjY2
-OTQyNDc3MzM1NDI0MTE1NDY0MTMyNDc3MzI0NDc0NTMxMzI1NTY3NDQ1NDU0NTc3
-ODQ1ODQyMzQzNDQ1NDUwNDY0NTU0MzU0MzIzNTY2MTA0NjY2ODk4NTU1NzMyMzg5
-NjIvMDEyNDIyMzM0Nzc0NjM3NzQxNDU2NTIxMzQyMDI0MjQ0NTU1ODg2NzY3NDc0
-NDIwMTM0NTU1NTQyNDU2OTY3Njk3MzQ3NzkzMzQyMTU3NDQ3NjQ3NjUzMjQzMzUx
-NDYzNTI0NTg2NzU2NDQ2Ojc2NzU1Njc2MzQ2NTY4Nzc4ODg4NDQ1NTM2NzY1NDEz
-MzEzNTU0ODg1Njc0NTY0NzY1NDc2NTY1MTU0NDQzNTQzODY3NjY3NTM1NjY0NDYz
-NDQxMzIzNDY0NDU3NjUzNzY2NjQ1MjIyNTYzMS80NzU0ODc1NDQzMzY4ODQ0NjY1
-NjY0NDI3Nzk5NzM0NjYyMzMyMzI1OTc6ODU2NzU2MTMzNTM1NDc5Ojc2NTU2Njc6
-NTQzNTY2ODc5OTY3NTc0NTY2ODYzNTg7ODU4NTY3NzQ1MzIzNDU2Mzc5NjU1ODY3
-Njc3Njc6NTU2NzY2OT01NTY2Nzg3ODg3NzQ1MTY2Njg5NjMzNjc2NjY4Nz42OTQ1
-Njk7Ozo3NzY3OThDfXheXoZsPzs+OjxPcX+Clp92RkBJdnFVPDg8OTo8PDw+OTg5
-PDk3OTo9PDk5OklTRj8+Pj49ODk6Ozs4Nzo7PDg5Ozk7QENDPT4+PDw9QDs+QT49
-Pj8+PT0+QkI9Oz8+P0JAPkNBQ0hDQ0A9QD5BPjs9QkFCQDw/QEM+PD48PEBAQUFC
-Qz8+P0FGQ0JBP0BAQURBQEBFRkRHRUdDREFIRUJDSUlLT0lISEhNS05KSUhNUE9K
-TUpOT0tMSlFSTEpUTFNQUldWWVVVVE9xuszW3ODl5+jq6evrTlNPR0tLSEpISkpN
-SUpOS0lPUE1PTUJASEtJSkhDQDxCQkZJRD9EPz9BPkFDSj86Pzw8PkA8O0FNPUFA
-PT06OkA9Ozk5ODs6Njo9ODw8Ojg4Ojw9PD86Pzk2N0Q6PTk5OTs9ODg6Ojo6Ojs5
-OTc3NzY4Ozw3PTs8Nzg4OTs+PDk9Ozw6OzU1Njw5OjY6Oj8/Ozg3Oz9APT0+PEI7
-PTo3PDw6ODk6ODs7ODY2NTo5ODc1Nzw8Ozo5Ozk7OTc6Pzw4OTo4Njg0NzY1NzU4
-PDk4Pzw2NzU3NjU4NjczNDY4OTY5Ozc2Ojc4NTc0ODk4NTc4OT86Pjk4Ojo+ODk4
-NTk4NzU2Mjo2NThHOTU3ODU2OTc4OzY6Ozc5NTc4NjY1Nzg4NjYzMzM3NzYyODo1
-NDM0NjkzNTQzNDI3ODg8NTg2QDs0NTE2NDEwMjQ0MjU3NTU2NjY5OTc2MzQ4NTc2
-NzY5ODg1MzQ0NTcyMzY2ODUzNjc4NzUzMzU0MjU2NTQ0ODU2MjEzNjU1NDQ1NDQ5
-NjIzMzEyNzEyODc2NTc3MzcyNDY1NTg6NjU1NjYzNDU2MzM1Nzk2NjY1OTY2ODQ2
-NDY3NDc0MjE0NDY2NjY5NzQ2MzQ3Nzk2NjMzMzg5NjQwMTE0NDU0MjQ2MTQzMTI1
-NDUzMDMyMTU0MjIyMzM1NTU1NTIyNDM0NTU3NDQzNTc4Ojg1NDM0NDQ2NTUzMzIz
-Nzc4MzMxNjU2NjY0MzQzNzY0ODg2NDY0MzczNjM1NTg1NDQzNjU4OjQ0Mjc4NzU1
-MzIxMDI3NzY3NjQ1NDQ3NTQyMzU6OzU1NTQxNTMzNTQ1NjU1Mzc4NzY3PDUzMjE1
-NTcyNDc3MjU0MzI2NTQ2NjIyNTU2MjIzNDQ1NzU2Nzc1NzU3Njg5NTQ1NDQ0NTg2
-NjY1MjM0NzU2NTQ0NTQwMTU1NTM3OTU2OTY1NDU1NDU2NjU3OTc0Njc3NjYyMTYz
-NDI0MDQ2Nzw2Njg5ODU2OTU3Mzc3NzU4NjMzNDU1MzEzNTg2Nzk3NzU2ODY3NzUy
-NTIyNTM0Mjg4OTk0NjU3NjI1NzY2NDM3NTY2OTc1NjQ2Nzc1Nzc2ODYyNTQ2ODYz
-NTY0MTY4NDc7NzUyNjg5NzUxNDY2NjQzNzY2OTg3NTU2NTQ0NTk3Nzk5NjY3NjQ6
-Ojk4ODU2ODY7NjU4PTc0ODc1NjQ1Njg4ODY4Nzc2NTc3Njk6Ojs2NzY1NTU1ODg4
-NTQyNDM5ODo5OTc2NzQ0ODc2NTc1ODg3NTM1ODY4Ojo5Nj96dnZahl89OUBFapKu
-saSbj149Pj5AQD4/PTw8OTY6ODk5Nzk3ODc2OTo8Oj9JS0c+QUE+Ozo8PD08OTk6
-O0E8Ojo+Pz8/QT1DQT09Oj08PTg9QkJAPzw8QD9DQD5BPjo7Pj0+Pj1DPD5EPj1B
-Q0A+PT5APz49Q0M+PUE9OzxBPT07OkBCREQ/PUBCQ0RAP0NCR0VEQEBCS01FTEhI
-TEhFR0ZDTk9LS0dLS05QTEtJSktLUU9NUVFVU05MVFNRT09QUlBTUVtcXlVUUHa5
-y9Xc4OPl6Onp6utUUU1PRklJSkpMS0xOTUhJTEZGREhJR0pLTFBKRUZLRUBBPz5D
-Q0RCQEE+RUFDR0E8Pz06Ozs8REA/RENBQT88QT5APDs7PT88OD4/PDk2Njk+PT9A
-Oz49PDs9PTs6OkFAPD49ODgzNzc3OTg0MjM2ODc4Nzg6Njk3Njo/Ojc5OTk8OTk8
-PDw6Ozk5ODc5OTo2PD08PT1DQjs7OTs4Ojs4OTs8QT48PT06Ojo2NDY3Njs3Njg4
-Ojk6PDg5TEY/Ojk2ODY5Oz05ODY4ODg3PDc7Nzc7Ojc0NzY6PkA4Nzk0NzY4OTg2
-Nzg8Ojg1ODo4Qzg5ODo7OTc2Njw2Mzc2NTg3ODg6Ojg4NDc5NTU4OTc2ODg1NTk+
-Ojk4OD04MjU2NTQ3NzY4NTc0NTU3NTg1MzI0NjIzNDQzOzo4Njo2NzUzNTM2NjMz
-MjQ0LzI2Njc1MzU2Nj02NDQ1MjU2NDI2NzM2NjU3NDU1Nzg2Mzc2MzM0NTk5NjU2
-NDU4NTYyNTQ0MjIzMjM0MzM7NjU0NDk3ODU4ODU0NDg3MzU2NDYzNDQzMzg3OTU6
-OjU1Ojk3NjI1NjQyNTU0Njg6NDc1NzU3NTU1NDM2NjY0NjY2NDM0Nzg3NzU2NjY5
-NDIyNjU0NDMyMDIwMDE0MzI0NTU1MDM0MTMyMjIwMDQ1MjA0NjQ1MzQ1NjMxMzI2
-NjY1Njc3NjY4NzMzNTUzNjgzNDY1MzMxMzAwNTQ0NjQzNTY2NTY3NTQwNDU1NDQz
-NjU2NDU1Nzc0NTc1NzU1NDMyNDU3ODY0NTU2NTY3NzU2MzQvMjMzMTM1NDU4OzU0
-NjYzMzM0NDU0NTMzNDQzNDUyMjY4Nzc1NTQ1Nz04NTYzNzI1NDM2NDU1MjUzNDQz
-NTQyNTc2OTk2NzY1NzMzNTU1NDU3OTg7NTY0Njc3OjU0NjY3ODo5NTI1MzE1NzU4
-OTs4NDk1NjY0MTI0NDY3Nzc4NzsyMzk1ODQ4NTQ0NjY3ODg2ODMxMzY6ODc9Nzk4
-NTY2NjYzNDc4NTY4ODY4Njc3NTk4ODkzNTczNDk6Njg3Njk7OTg0NTU2ODc2MzQ1
-NDg4ODs6Njc2NDU4OTU1NTgyMTU1Njs3Njg2NDc3ODc4Ojc4NzUzMjM1MzY3NDQw
-MjM0NTQ0Nzc2ODU2Oj04ODg5Ojk5ODk6Ozg6ODo4OTc5OTk6ODc0NDQ1NTU2ODc1
-MzQ2OTc4NjY7Ojk5OTg2NjQ0NjQ4NjY6NTM1NTQ0Nzo4ODU1NDU2Njo4ODg2NjU3
-NTc3NzU3Nzc3OnmMiWNeTjw6O3ScsLavqZNiQTw8Ozw9Ojk6OTg3NTM3ODk4Ojg3
-Ozs7Ojk9P0dMSUE6ODg7Oz9AOzk9Ojo+PTs8PDw9Pj5APT8+Pz08Pjw8Ozk8QEA+
-PkFCPjw7QEE+Pjo4PD49Pz4/PD4/QEc+QEA8PUE9PTw6PEBDREJAOkA8QEFAPkNH
-Q0JAREREQz5BRENESUZDQUZMSUNCR0dJQkFDQ0JHTklHSEtHSExKRkNGSlVTUFFN
-TFNYUUtNUlFLTlVUVFVXXFlVVlBOc7jK1d3g4+bn6Onr61FNTUhISU9MTEtMR0hJ
-S01KTEdHRkxKRU1HSElHRkRBQ0VCREE+QkI/Q0A+PUNCSUQ5Ojw8RDw9QUJBQT1C
-PTo8OjtAQDw8PUA9Ozw+Ojg5Ozw5Ozo+PT08PTw8Ozc6Ozo9QEU7Nzg0Nzo4Nzg4
-NjU4NTU3ODY3ODg5Ojw5ODo7Ozo6Njc/OTo4ODk5Njc3NDc4OTY3Nzw4PDg2Ozo7
-OTU3Nzk6ODY4Ojo+QDo3Nzo5ODk3OTg1ODg7Ozk2Ozw9PDg3ODo7Ozo8Oz46NzM1
-Njc3Nzo5NjY3Njk4Ojc1Njc3ODg4Njg7ODo5Ojc4NztEODs0NTg3NDU4OjU3NjM2
-NDg6OTk6NDY4Njc5NDk6Nzg3Njo3Ozg2ODg4Nzc6NTM1NDc1Nzc2NTg3OjU2NjQz
-NTMxNzI3NTM0NTM2MzIyNDI3ODY4NTM2MjUyNDc0Nzk3NDQ3NDI0NDQ1Nzo3NTQ3
-NjYyNDk2NDc2NTk1MTI2NDA2NzUzOjM2NTU2NTI1ODU2ODg1NTk1MjkzNDY0NTc4
-Ojg4MzU0MzQ3NDc2NDU3NjQzNzg3OTo6OTY0NjYyMzA1NjY3MzU2NTU2NTc0NTQ1
-NjY0MjUzMzY2NDc4NDMzNDU1NzU3NzY3NjUzNzg0NTQ0MzU0NjM2NDUzNzg4MjI0
-MjQwNTMyLzE3NzQ2NDM0MzQzNTI0NDI0NDQyMDMzNDI0OTc3Njk2NzczMzE0NDI1
-Njg1NTI0MzU1NTY3NDIyMTIxNDY1Njk2NjUxNTU1MzM1NjUyMzExMzUxMzAzNDk1
-NDY1Njg3ODQ0NTg0MjIzNDUyNTczNzo8ODU0MzYzNjg3NDM1NDQyMjM1Nzc1NjQ3
-OTk5Njo3Njc2NTQ0NDU0ODU3Nzc1NDc3NTM3OTo8NzM2MzM0ODc3NTUyNjU1NDg2
-Njs4OTczMjczNjQ3ODY2Njg7OTY4Njc3NjY0NDU0NDk1NC8yMzU4OTY1NDg2Nzc3
-OTUzNDM0NDQ2Njc2NTM0MTI2Nzk2NDM1Njc3Njk6Nzc5ODg4Nzk3NTU5NjU1Nzk7
-NzQ0NjY2OTg0NTg2NTU4NjU2NTQ0MzU0Njo4Nzc2NTc5Nzc1ODk4NzU0MTY1NTc4
-Oz9BOTg3ODU1NTY3OTg1NTQ0NDU2NDM2NjM1NDY3Njo6Ojs3NzY2NjU3ODg1NTU7
-Ozc3NTc6NzY5Ozw6ODY1Njk4Njk7ODg7OTc1NDQzNTk3Nzo4NjU3ODM0NDM1Njc7
-PDc5Ojg0NDk2NTQ4OTY2NjY2NzU0ODg5OTQ2Njc5PTw6W4WAgmhNPDxVm665t62P
-Xz07O0w9ODg6PDs5OTg5Pjk4ODg4OTg4PDw8OjpES04/QEA9Nzs8PT48PDk4Nzg4
-QD1APz0+QD89QD9AREdIQ0A+QEM9PDk6QkVAQDo/RT9APz08PURERENCOz1CQT9B
-Pz5AQj04PDg8RENCRT42PEA8PT07PDo9Q0E9Pz5APj5GSENCRUBIS0lBQURARklG
-RUBHQz9GSEVFRkhAQUVGTExKSk5TUElKTU5ISUpSVFBPT1FSWVtcXVlVTVBvtMrV
-3OHk5ufp6urqT09JUFBRTExISkZGSUJJQ0dJTE1CQ0pGRkdIRkVGR0VBREVDPDw8
-QDs8OUFBQj5AREI9REI/Qj5BQEE+PT4+Qj5CPkBBQEA9PTw9QT09QT8+QT89QD49
-OTk5OTk5OTY3NDc7Ojw6ODg4PDk5OTw4NzpBPDs6NjU4OTk1OD06OTk6Nzg8Ozo8
-Ozk8ODc5Oj45OTc5Ojk4Nzg5PEE8PTg6ODk2NTc5NzY2OTpBPTc2Oj88QDs3Nzc5
-OTs7Ojk4ODk9OTQ6Nzg4ODg3NTY3MTg3ODY1Njk6OjQ2Nzg7ODY4OTg6NTg3ODc3
-Pjk5ODc2OTg2NDQ1ODc5NzYzMjY1NjU3Nzc0NTo2NzM0NDQ3Ojo4NDU3ODU3OTg2
-Njo5OTs5NDU1ODc2NTQ5NzY8Pzs0OTc1MzQ2ODY1NDQ1NzI1NTU1OTo8NjIxMDQ5
-Nzo0Njc6NzM1NTQyMjI0NTY+Njc0MTU2NTY0OTc4NTU5Ozc1Mzc4MjM3NzU0OTc0
-NDU1NDY3NzM2Nzs3NDY4MTIxNTUzMjc4OTYzMzIzNDQ0NjI1NDU4NTMzNTU2NzY2
-ODQ2NDY0MzE0NDI0NjYyMjMzNTQ0NTU2MzY4NTUyMjQ2NzU2NzUyMjM0NDQ1NTQ2
-NDM1MzExMzQyMzMyMS8zMzc4NzU1NDYyMjUxNTYyMDQ3NjYyNDc3NTM1NTU1ODk1
-NDc3NjQxMDIxMjIzNTQzMzMyMzI2NDI1MzM1NjM0Njg2NTY0NTU0MTQyMTQ0MzY0
-NDU0NTI3NTUyMTA2MjAzMjMyMTE1NTY0NTM3NTU3NDYzNTU0NTQ0MzQ4NjY1NjU2
-NzYzMzQ2NjY5NjUzMzY2NjY2NTY2NTY3ODk3NDU4NzQ0NDY4OTk1NTo4OTg2N0ZE
-QkE9Nzg0MzM2OTg7OjY0NzY2MjIzMzQzNDM4NDQ3NjU5Ojc3OTg1Ozg5Njc7OTQ1
-NjUzNDQ6NTQ1NTY0NjY2NzU0NDU3NjUyNTQ0Njc4NDQ2NjY4NjIyMzIyNDY4OTk4
-NTIzNT85Ojs2Nzc4Njc0NzY0OTo6ODc2ODY2NTk1NjQ4NTM2OTc1ODY0Nzk3ODc2
-Njg4NjgzMzY1NDQ3NzU3ODY1Nzg4OTw/PDs7Njo2Njs4NDMzNjc1NDY2Nzk6Ozcz
-NDM2NDM2Nzg2ODk4ODc0NTQ2NTU4Nzk6OTs7Nzg6ODU1Njc3OTo4OTg4OTg3Nzc3
-NjY0NjQyMjQ0NjQ1NDU1OjczMjU1NTU3NTY2MzY0NDc4Ojk0NTo3NjY4NTY1NjY2
-OTs5ODc4ODpkkJOJcU85OVifsrCoomQ9Ojg6Pzk6ODs/Pjw4Nzo5Ojk6PTw5Ojs4
-OT47PUdLQz46PD5CPj48Ozs6Ojc3NTk7PEA8PTw+Qz45Ozo/Q0ZHRD09PDxAPkA9
-Pz9CPDs8Pj49PkFDRUNBQENEQT0/QD1AREM/PD08PkJEQ0NARUQ+PUFAPUI8PD5B
-PkFARD48Qj9BPT9DRUVMSkhCQUZFRUxKRENHRkFERkVFTE1KSUpPUVBPTUtJUFVM
-S0hKSUxRUE9SVlNUVlpXVFJIUXe3zNXd4eTl6Onq6+pSU1VRRkdJR0ZMR0lIS0tG
-S0xISUlESEdGSERFQUVIR0ZDQkA7ODtAPTxBREA/QD1DRUI9PkBFQz48QD09Pj0+
-OT04Pjk+PT1APTs8P0A4Pj43OTxBOjg7PDw7PT47PTY5NzY5PTw5Ozg3Ojo9Ojs6
-Oz48ODc1OTg6Njg2ODk3Njc2Nzo8PUE7PD06NjY4Ojs6Ojs5OTo6Ojo8Oj85Nzs8
-Ojo5Ojo6ODc8PT08PTo7PTw6PTg2Ozg2Nzk5Ojo4OTk6PDk4Njc5ODg6OTU4Nzo6
-ODU1NDdANzc3Ojk3OTo6ODU6Nzo6OTg5ODg5OzU3NTU0NTc1NTI0NTo3NjY4Nzg3
-NTk4OTk3NDU2Ojc7PDY3ODY3Ozw4Nzg2Nzs3Nzc2NTY2NDg1NjU2Nzg3MjQ1Njc1
-NzY2NTY5NTM0MzE0NTc6NDY3MTM2MzU5OTo3NjQyMzQ2Nzc6NzU0NTY2NjMzNjc1
-NTk5ODQ0NTUyNDY2Nzc9Njc3ODUzNTQ0NTc2NDU4NjY3MzY1NTM0NDIxMjc0MzI3
-NDQzMjQ0NDU0Pzc8Nzk2Njg2Nzc2Njg2NjU2OTc1NTY2ODQyMTE0Njg1Nzc0NjU1
-NDY1NjQzMzY2NzY3NjU0NTQ6NjMzNTYzNDUyNjMyMzI0MjM0MzU2NjQ3MzY0MjQ2
-NDU4NjQ1NTYzMDM1NTY0MTM0NjMzMjQ1NTY1MjEwMDQ2NTM3NTY2MjMyMjE1MTQ2
-MzM3NzQyMjU3NzY1NTU0NDMzNDEyMzU0NjY0NDU1NTQ2MzI1NTU0NjU4MjQ2MzM2
-NTU1MzQyMzU3NzY5NDQ2NDI2NjU1NjU1NTYyNDQyNTY4NDc5MzU3NjMyNTY3NzU1
-Njg5NzQ2NDU3NzY1NjY5MzU1Nzc0NDI2NDQ2NDc5NTY1NjQ2NTs6Ojc4NDQzMzU1
-NDU0NDM2Njc3NTY6PDw4Nzg4Nzk2MjI0MjEyMjM0Njg5MzMzNDY2NDQyMzQ1NzUz
-ODc2Njc3Nzg4NDQ3Njg2NDQ1MjY3Ozo5OTw3Mjc3Nzg5NDQ4OTg1Nj08ODU5NDY5
-NjY5NzQ0NTc8Ojg2NTU1NzY3ODg3PDg5ODc0Mzg4NDMzNTY3OTg3ODc4NzU1ODY0
-ODg3Njc4NjY6Ozg5Nzc3OTo5Ojs4Ozg3NzU1NDc1Njc0Njc2Nzc5OTc4NTU2Nzk3
-Nzo5Ozs5Nzc0NDc3OTo4ODc3Ojs2Njw7ODQ3NTc4NDQ3Njg3Nzk5OTY1NDQzMTU3
-ODQ0NTk1Njc6NjQ0NTY2ODY3NDU5Njc5NzY5ODg6OlSIiX5+Szs7S3qcq6ZsQjw7
-PDg3NjY3ODg5OTs8Ojs8OTk5Ojc7OTU9PD8+SEY+PD09PD5CPDk9ODY4Ozo7OTo7
-Oj9DPjs7OTtAPTk+QEA/Pj8/Pj9EQD9BPTw4Oz9CQkI8Pj49Oz9BPkNAQD8+Pz09
-QURDQD9BQj5APkFCR0FCPUNHQkBEQ0RDQkJEQUZGRUVHR0E/Sk1LS0lKQ0RJPTlF
-R0dJTEhKTUlST0hHT09NUE9KRklITEtLTE1MVFJPUlVYWFNTUFZVTUdNeLrM1N3i
-5Obo6enq6llUY2NVSU9SRkpJRklRTEtMSExER0ZHRkhKSENGPUNDQkJBPz4+PDk8
-QD47QD1FPUFAPz8/QUJDPjs7PDw+Pjk6Pjk7PDw+PTw6OD08PDo8PTo8PD4/Ozg9
-PDw8Pj5APTg+Njc5PTs7QDk3Ojo4Ojs8OTo+PDo3ODY5ODs+Ozo7NzU3OTk5OTtA
-PDo3ODgyNzw7ODo6Ozg2Ojw8Pj86OTw8PT06Oz06Ozw7PDs7Ojc3ODk5OTY3OTlA
-PDc5OzU1Njk5PD08NjQ1OTo8OTc4OTk6Ozk4Pjk5OT06Ojk4NzY1NTU1NjU4PDo3
-OTQ0Ojg4NjM0MjAyNTM1ODg4NzY1ODg4ODc4PTU2NjQ2Nzg2Njc2Nzw6ODU3ODY0
-NjU2ODg1MzI0NTY4NjU2Njg2NzI5PTg6NjU1NzY4NDIyNTU5OTk5Njc2NDc5Nzg5
-NzI2NzY2NTM0Nzc1NzY1NDIzNDc3NjQzMzM0MzY0MjUzNzc0NDQ1NzU1NjQ0NTU0
-NTY0NDQ2ODc6ODMyNDQzMzQ1NTY1NDY0NzYzNDY3OTg4NjU5Njc2Nzg3NjY3Njk2
-NDM1ODk3NTM4NDMyMTAyMzQ3NzYzMjIzMzM1NTU1NTQ2OTc2NTQ0MzU3ODs2NTY4
-ODU0NzYyLy8zNDQ4ODQ1NDQ0NjQ1NDU1LzEyNDw2MzY0MzUzMzIzMDIzNTM3NjQ0
-NTIyMzU0MjQ2NTY0NjIyMTIzMjAyMzE0MjMzODczMzQ1OTs1NDMyMDEzMjQvMzY0
-NTc4NTQ2ODg4NDQ2MjQ1ODc5Nzc2NTU1ODQ2MzQ0Njg6MjQzNzU1NDM1NTMwMzY3
-My8zNjQ1MzU5NDU1NDQ2Njc0NTU4ODQ2NjQwNDY4ODk0NDQ2NTU1NTg5OjcyMDE2
-Ojg1NTU1ODUzNDQ3ODk3ODM0ODc4NTQ1NDU1NTQ2NDQ1Njc4NjY6ODg3NDU2ODIy
-NDU0NjQ3ODQ3NTY0NTU2MzMyNDMyMzMzMzM2PDg0MzU1NTQ0NDc3Njg2NTY2Nzcy
-NzQ3ODo2NTc2Nzc1NTczODs5OTk6OTk1NDQ2NjU2OTc2NjMzNjU1Nzg3NTY2Njc2
-NTQ3NzQ0NDQxNjY1Mzg3ODQ2Mzc3Njk3NDIxNTQ4ODk3NzU3ODc5Ojc0Ozk6NzY4
-Ozg4NDc2ODc4Njc4ODg3ODc9ODk3ODk2Njc4Njc4Nzc0NTw8Nzk2NjU4Ojc1Ojs5
-Ojg3MzY4OTg5OTg1NjY1NjM0MS8yMzIyNTU0NDI2NjM3ODg4Nzc4OTc2NDc3Njg2
-NztAPTk8Tn2MgnNDOjo6TIeji048Ozo6Pjs5OTg5Oj06Ojk6ODc8Ojk7Ojc0Nzo6
-OkBFQD0+PT88PENCQTs2Ojg5PT09PDw6Ojs6Pj48P0JAPT09PT9BPTo+Q0A/Pz08
-Oz5BQD9DQDs9PD5EQkM/QEBDPkJBPzs+Rz5BQEFBQ0NDPkNCRUA/Oz1AQ0E+O0BD
-RURBRUREREJGRkhGSUlISkZESUVMSEJGSlJOTkpHSUxPTUlITVRNTUtMTk5MT09S
-VFJMTUtLUFNTUFJZVldPTFB8u8zU3eDl5ejp6uvrUUxLVVpQS05JRkpNTU1JSkZJ
-TFFFSEtIREVLTUxNRUtFPkJCPUI7Oz9BPUE/QT5AQUNBRD4+RD09Njo8QUE8RkBA
-PTw8QUZBQDs7PDs+Pjs9RD1CPz86OTw6Ozw6Ojo7PDs4OTpCQD46OD08PTw6ODo2
-ODc4NjkyMzY7NTg8PDo6PTs5ODw9Ojo6Ozg4ODs5ODs2ODw9PDg1ODlAOz08Pjw9
-QTw6Njs9Ojw6Pz07Ozw5PDg0Nzs7Ozk3NTY1NTk4NjY3Nzo5Nzk5ODg5OzY4NzY1
-Ojc2Ojw6ODo6Nzg4Nzg8OTU3NzY4ODo9ODo7OTY1NTY5NTg3OTg6Ojk2OzU3Nzk3
-NTc5PTg3NDUzNDU5Njo3NTs5NDg4ODY3NzYzMzMyODc0NDY2ODk0NTMxMTQ4Pzo3
-NjQ0Nzc2NDs5ODc2NDQzNTg9ODg2OTc3ODc1NDY3NTc1NTg6NTo0NDEwMjM2NTU2
-Mzc2NTMzODU6Nzc5OjIyNTYzNDQyMjc1NTM0NTM1Nzk3NTUyNzU2NDQzNDM0NzQ2
-ODo4NzYzMzM0NjU2NTU3NzM1NjM3ODk6MzQ3OTc0NDUzMzM1NDc3NjQ3NDM0ODg5
-OjY2MjMyNjY2ODc5NDU0MzI0NDMwMzI2NzY4NTAzMjMzNTU3NTQzMzc2ODk0NjQy
-MjEzNzs5Nzk3NTIzMTIyNTIyNDU2ODU3ODI3NTQ1NjU2NjI0NTc2Mzc1MzAyNDQ0
-NDc3Njw4NzY1MzIxMDExMTIxNzg1NjU1NTU1Njg2NTY2NTc1NzQzNDc3MzU0MzM0
-MjAyNjY0NTQ3MzExMzMzNDQ1NTU1NzU1NTM2MzIwMzIyMTQzMTU2ODU2NzU3NjM0
-NjM2NDQ2NjY3Njc2MzU4NzU1MzQ4NzM2Njg3NzY1NzY1MjM2Nzc4NzQ3NzU1NjU1
-NjU4MzI1NDg0NzQ5NzY5NzY2NTU0MTM5Nzk6OTQ2NjQ0MzIzNTY4NDc3NjU1NjUy
-NTU2NzY1MzMyNTY1NTIyMjM0Njc4Ojs2NTc2NzQ1ODg3NjY3Njk8OTg3ODc7Ozg1
-MzQ1OTk4OjY5OjY0NDU3NzU3NzU1NjY0ODc5NzU1NzY1NTU4NjU1NzQ0NDg3NzY3
-NDQ1NjQ2NDU2ODc2NTY3Nzg0NDQ5NjQ1Ojg1NzU0NjU6Nzc2NTY3OTo6Ojs5Ozw8
-Pjo0NjQ1NDc4Ozs7Ojg4Njg3Ozs0OTlAOjc3NDc5NzQ0NTY2ODg3OTc1NDo7ODM1
-NzQ0MzU1NTc4Ozs3Njg2NjU1NjY1NDQ2Ozo8OTtBfYqFc0c7Ozg/d49yQ0M9Oj06
-ODc4OTo+Ozo7Ozg4ODg5NDk5Nzk3ODo+REQ9PD1BPzw7OT1APTo6Nzg5Ojo6Pj89
-Pz1AQEFEQ0U+Pz07PEBCQ0E8PUU/QUFCREFERENAQURCP0FGR0Q9P0JFR0M/RERI
-R0E9QEM9QEA8PURERDw7QEA9Oz1CQD8/RUJDPURHRkBFSUZKSEhHRUhIRkZHS0hN
-UU5LSUVJSEtLTUxGR0dIREdHTE1LS0lMS09RWVFQTlJTWFFXVVNLR3S5zNXd4eTm
-6Onq6+tLTVJTVFRQTEZNTEpJR0pFSEhLTURBQklFREZKRkZFQ0hJSURGRUdFQEFB
-R0c/QT9APzs9RDs9Pjs+PDs+Pz0/QEBAPTw6Ozk6O0I9Ozs8QT07OTs5PD42NDo3
-Ojw+PDw4OTY2OT4/Pzo7Oz0+PDo5Ojk6NzU2OTs5PTc6Ojo5PDs5QT4+Ojk3ODg5
-QjxAOTk7Ojk4ODY5OTs3O0E8QUE9Ozs6PDo6Nzw6ODc6OTw8PDw8ODg8Nzk6ODc3
-ODg4PDs+Qj89NzY4Nzc3ODk5ODo3MzU1ODk6ODdBPDk4ODk4OTg8OTQ2NTQ5OTo5
-Ojc5ODc2NTs5Ojk5ODw5Ojk5OTo1MzU4NzU4OTQ3ODc2OTc7PTw5OTk8Ojc4ODg6
-NjY1Nzo5NjM1NDEzNDg4NzU1NzU0OjY6ODk4ODg7Njk5NjU2NjM3Ojk3Njo2Ojg4
-NDg1NjU0NDIwMzQ3NDQ0OTQ3ODU4NDUyMjM1Njk4ODk2NDU2MzU0MjIzODc3NTAx
-NTY2NTQ2NTU4NTY1NDM3NDQ1NTI1NDU3ODg1MzQ0ODY2NTQzNTU0MzIzNDY2Nzc3
-NjQ1Nzc0MzIzNTY1NDQzMzY2MzQ1NjY5OjcyMjU0NTUzNTU3NTEyMjM1NDU0NDY2
-NTc5NDQwMjY1NDQxMzY3NDQ0NDc2NTIzMzU0Nzo2MjQzODc2NjU0NTY4ODUxNzQ1
-NzMxMzQ1NTY0NDU1NTY4NTM5NjU0NDUzNTM0ODk4OTY2NjIyNDcyMzY2NTU0NzUy
-MzM0NzY2ODY3NjU1NDY1NTY2NTUzNDAvMzIzNDU2Ojc2OTg2NjU0NjY4ODU1NjMz
-MDAxMjQ2NTY1MzQ1NDQ2NjQzMzM1NzQ4ODY1Ojg2NjU1ODMyNjYzNjY3NTM5ODU1
-NTU0NjY4NjY0NDY0NjU3Nzc2Nzg3NjQ0MzIzNjM0NzYyNDQ1NzY8NzY3NzY0MjY1
-ODY5NzY2Njg3NTQ2Nzc3NzQ1Njc0NTQ0NTY2NjQyNDI1NTc1NjY3PDk9Ojo6ODM1
-NjM1ODg2NDYzNzU5Ozg5OTk7Ojs9NjQ3OTY2ODg5ODo8NzY0Njc5NTg2NzY2NTU1
-ODY5OjQ2ODo1MjQ3OjQ2NTU4Njc3Ojo3NzYzNDY2NDQ4OjU2NzY4ODUzNTk2NDM2
-NjIzNTY0NDY4NTQ0Njg4NDU4Ojc2NjU4OTw3NTU1Nzg5Ozg4NzY3Nzg5Oj06Ojo5
-ODg8ODQ0NTY3Nzo3ODM0NTg8Ozo3MzM2NDY1NTU6ODg5ODc2NDU6ODg3NzU0ODs6
-Ojk3OT96jpJ8Szw8Q1qUl2M9Ozk5OT06ODc7Ojg3Ojo6Ojk5Pjk6OTs5Ojk8OkdH
-Rjk7PD08QD48Oz8+PkBBPz0+O0JDQEE+ODo7Pjs7OEBCQjw6PkA+PT9HQj49PT5B
-QUBCRkZFQEVIQkFGQ0JCPzxAPUVDTU1FPzw/PkE9P0M9PT47Oz5DQUJEQD4+PUJA
-RUM7Q0RER0VFR0VHR0VESktKSUhHSEdJS0xNSEZMSkxPSUxMSktOUlNSUFFRTUZL
-SktOUVJRVFRbWV5YV1RQe7nM1d3h5Obo6err60tUTlFWUlxcUUtJT09ST0xLSk1M
-SUJARElGRkpFRUlIRUhCREZGREdFRklFSENKQTw+PEFBQ0JCPDo7PkA/QTc5Qzw9
-Pz05Pz5APkA4QD5CQT49N0E6Nzk9OTo7QEA6ODk7QDk6Ozs7PTo9PDg6PDs4OTg4
-Ojs6Ozs5PDg4Ozw8ODs5ODo8PDo4Pjw8Ozw7ODk6OTg5ODo8PTs4OTs7PDo5Ozw9
-PDo5ODk4Njk6OTo2OTY4PTs5ODc5Njg6Ozk7Ozw+PTo5Oz08OTg7PDg2Ojc4NTY4
-OTg9PD84Nzg3NTY4ODY4ODg3OTs5Ojg8ODc1Nzg1NjY5OTY6Ojk4NDU0NjY5OTcy
-Nzk2Njc3Nzs5Nzo7ODc1Nzk3ODY2NjU2Njo3OTs7ODo4NzQ0NDQ3OTY4NjU1NTU4
-Ozk4Nzg5ODc1NDk3NjQ3ODY3NjY3ODg2NjY4ODY0NDU0MDU1NDg1NDUzNTc1NTE0
-MzQ2Ojo3NTg1Ojo2NDU4NDo7NTY2Njc8NzI3NTY1NzU3OzU1ODY1NDU1Nzc2NTU2
-ODk5Nzg4NTIzMjc4ODU0MjU2ODM2OTc6ODc0NTc3NTc2NTYzMTc2NjIzNTYzNTM4
-Njk2NjIzNTs1Mzc4ODY2OTY3NDQ3NDY4OjY4Ojk1MzIzNDU3NzU0NDMzNDc4OTQ0
-NzUxNDcyMjIyNTMxMTIxNTg2NTU2MzY3NjQxNDY2NjMyMjY5NTU2NDQ1MzY1MzQ1
-NjY1NjQ3NTQ0MzMzNDU0MzUxMzM2NzUyNDc3ODU5ODY1NzM1ODY0NTU1NDE0Mzg1
-NDQ1NTU0MzM2NzQzNTU/NTIzNTQ0Ozs5Njc0NTU1NTMzNjU2NTQzNDUxMzQ1NjMw
-MDg5OTg3NjU5OTg0NzYzNjQ2NzYzNTQ0NTIzNTYzNjc2Nzg7OTY3MzM3Nzg5Njk4
-Njg6ODc3NDQzNDk4ODU6OjY4NDc0NDQ3ODk6NDY4NzY2NjY2NDU2NTI0NzU1NDMy
-MTAzNzQzNDM1OTs5Nzc3Ozg4NDY2NjY3OTo7Ozs4ODc3NjY1Nzo5OTk6Ojk3NjU4
-OTU1OTg8OTY3NDY7Njg4NTY3NTM0NTY5Nzc3NTY0Mzg3NDQ0NTY3NjY0MzU0NDQ3
-NDI3NjU0Nzg1OTU3Njc2NDYzMzQ3OTk3NDUzNDQ1NTQ1Ojs6OTo7ODc3OTg3Nzg5
-NzY2Nzg4NjY2PTs7ODg6NDc4ODg5ODk8PTo0NTUyODc1NzY3ODs4Njo3Nzc5MjUx
-Mzg5OTk4Nzc7NjY3NTc9Nzc4Ojg8PD06OTo3PmKWmJhqREBCf41xRzs+Pjs6Ozk6
-ODg2NTg3ODc6OTk6ODY6Ozw8Ojg2SkhHQTs5PDk7PT89Ojo5QD5CQTw7Qj5BPTxA
-PDtBPUA7PTw9QEJBQT9APzw5Ozo+PjxBQT9CQ0A8QERBO0FCQkFDQTo/QUBKUEJA
-QkREQUBAQ0U9P0dFQD1BQ0RBPENBQEFDQkZAQEFFSEFGREhFRUJGSEVLS0hER0lH
-R01LSEhISUlJSElISEhLUU9KSVFQT09LSEhJT1NUVVleWk5PT056ucvV3eHk5ujq
-6uzqRUVKTlNTU1tcVFFLVE9NS01ORkVKTUpGRERGSExDTkJDREBGRklHRElIQTxD
-QURMQD9CQ0dDQkRAPj9AQUA/RENBQz1BQkFCQ0FEPzxBQD46PT05Ojc9PUM9QUA7
-Ojs4Ozo8PDg5OTo9OjtCPTw+Pzs2ODg1Ozs6Ojg5OjY0Nzk6Ozo7Ojk4OTw/PD08
-PDw7ODg7ODo4OTs+PDo7PDw5Nzs6Ozs6Ozk0Nkc5NzU0NTY2OEQ5NDc3Ojo5OTU3
-Njs+Oz07Ozo9Ozc4ODs7ODxDPDY1MzM5ODo6OjQ2OTY1ODg6Ozk5OTw5OTo5Njc5
-ODg3Njk2NTU3NzU2Njc5OD05OTY5NTU3ODo9Ozc6PDo5Njc4Njk3NTM2NzU1Ojw4
-NDg6ODc4Nzk3NzY0NDg6Nzc2NTQ0NjU2ODYzNDQ4NTIzNjg6PTk3Njc2NTY1MjU2
-NzU3ODYvMjg4NDc2Njg3ODUzNDU2NzY4ODk2NTY2NTU2NTY1NjEyMTUyNjY0Nzg2
-NzQ4Njg5NjIzNDQ2NzY3Nzg4OTU3NjYzMzU3NjY0NDM0MDU2NDU2NTU1Njc2ODg1
-NTc2NzU2NjU1NTQ1OzUxMzY2NjU4NjY2NzU1NjU0NzU1NTM1ODc2NjU0MzYyNDU2
-ODc5OTU0NjU1OTgzMzU1NTI0MzM1NTU2NjEzMjU1NTQxMzQ1NjQ0MjE1NTIxMzQ2
-NDQzNTMyMzU3NDU3NTQyMzk3NDQ2ODc5NTUzMzMyMDI0NDg2Njg2NzQ0NjQ1MzI2
-NTU2Nzg2NjMzNTI0OTU2NDc3NjQ0Nzg2NDI1NTUyMTU2NjU1NTUyNzQzNDY3NzI1
-NTQzNzQ0NDU0MzMxMjQ2Nzc0NTY2Nzo2NzU4NTU2NzQyMzQ0NTo2NTI1NDQ0NDIx
-NjUzNDU1NzUzNzc1NTY2NjQ1OTg8Ojo6Nzc5NTY0NDEzMi8zODk3ODg3Nzk3NzQ2
-ODY2NTY2NzU0ODY3NDQzMzY3Njg1Nzg3OTU3NDM5NjY5NjU4Njs4Njs6PEA6NTo3
-Nzg3OD1CPDw7ODg3Ojg6Nzk7Nzo5OjU2Nzg3Njg3Ojo5Ozo1NTU5ODQzNjU7OjM2
-NTc6PDk9Pzk2Njg4NDg2NjU0NDg3NDY2MzQ2MjU2Nzg3ODU0NzQ4Ojs0Nzg2NTY0
-NDc2ODQ0Njc2ODo7Ojk2Njk5NTU4OTk0Nzg4Ojc8OTc5Ojo3Nzc4Nzg5Ozs7Ozc2
-ODc1NTc0MjQzNjU2Nzg4NzY4ODc4NDM0ODk9Ojg7OjU4Nzc5OTo2NzY2Nzk4ODc4
-PT1CXpqpsJNpTGSRd0tAPjw4NDc4OT04Njg5Ojs7ODs8Pjw6Ozk4Ojs5OD9IS0JB
-Pzk7PDw9Ojg9PD08QD0+Qj46PT4+PTw6Oz4+Pj47Oj1BQ0I8O0A/PTs6PT06OkA8
-PkNBQD9BQ0NEQEA+QUJAPkFCP0NEPj5FR0NCPj1AQDo6Ojw7PERFQT8/Q0VCREVK
-SUdIRkdESUVDQ0hJR0hITEVJSlBHRkdJSEpER0xER0pKSU9LTE9US0lLU1FQTU5Q
-T0pOUE9WUVBQTEhHR3a3y9Xc4OPm6Onq6+tJS09OUFJWVFlXVUlKTUxHRkRNSUhL
-T0xJUkxJSUxOQkNDR0NBRUBDQ0VAQEJAPEBAQENBPz0+PUREQEE/Ozo8PD8+PD88
-Pjw6Pjw6PT45Pz4+Ozw5Pjg7PzxBOzs+PTk4ODk3OTc2Njo7PkBFPz89PTo4Oj86
-Ozs7OTY1NDQ3Njk0NjY5OTo/ODw7Ojw/Qzo3Ozk/OjY5Pzo3Ojs7Ozg6Oj89PEA8
-Nzg7Ojk7PDo7OTk3OT03NzpAPjo7PDxAQzw6Ojo6OTs7PDk4Nzk+Pjw9OTg/Njo2
-ODk3NzU2OTg3NzY2Njs6OTk3PDk3NzU4Ojs4NTc5NTg4PDU0ODo5OklFOjU4OTY1
-OzY3ODg3OTk3NDc5ODU3ODU3NTc4Ojg0ODo3NTQ5Nzc5Njg6ODg7NjU1Nj49NzY3
-Nzs1MjMyNTY4ODY4NjU1NTM1NDM5NjU0ODU2NDQ1MTEzNDc2OUQ2NjU0ODk4NTc5
-ODg4NTU2Njc5Njc1MzI1NjY1NTY1NjY2OTU1NzQ0MzU0MTM1MzQ0Nzg2NTY1Njg2
-MzQ0MzU1MTI3NDc0ODY5Ojc1ODY0NDQzNjU1NTU2ODc2NTY2NTY2MjU1NTY2NTUz
-NDU2MjQ1NTYzMjQ0NjYyMDM1MjY1OTg1NDMzNTU1NjQ0ODI1NDQ4NjM2NjU0NjQ4
-MzEwMzQxNDQ1NDI2ODQxMjMyMjMzMjU3MzU1MjI0NTQ2NTc1MzU0NDIyMjM2NjU3
-NjQ0MzIzMzY3Nzg1NDQxMDMzMzEzNDY2NzUzNDQzNDY2MzQ2Nzc2NTc3NTc1NzU2
-NjMyMzQzNDYzMTY2MzQxMzQ0MjM2ODUzNzg2NTQzNTg3NDQ0NDM2NzgzNjg3NTY1
-NzY0NjMzNjc4NTQ3Nzc0MzQ1NTUyMzY1NDQzNDc0NTY2NTM1Nzg5NzY5OTk4Nzg5
-OTU0NDM1NTY0NDY4ODs8PDU3NDc5NzM4NjY1NjY1NTU1NTk4NTIzNTQ1NzU2NTg6
-ODU3Njo6Ozc0NDU6ODc4OjxCOzo+OTg1ODg2NjY1MTM8ODg5ODY3OTo3Ozo4ODo3
-OTk2Njk3NTMyNDU1NTU3NTQ1NzU1Ojk3NzY4NzY5Ozo5Nzo3Njc5NDM1OTY0MjQz
-NDEzMzY4Nzc5ODc1NTc3OTg3NDU2OTc0MzU6NzQ2Ojc3Nzc4NjY2Nzk5NTY3NjY3
-ODc4ODk5OTk4OjczNTU3PDY3ODo5NDU2ODg4NzY4NTU3NzY2NzY2NDMzNDg1NjY6
-Ojc6ODk5OTc3Ozs2NTc6ODg5OTk8Ozk6PTxSnLLBr6ShnJhcOzo6NjY3ODQ1Nzg7
-Ojk7Ojo3OTw3OTw6ODo5OTs9PUlKP0A/Pzw8QT48Ozw+QT48PDw8Ojs4OjxCPTk8
-QD0/PT0+QD1BQD49QkE/QD88QENBPDpAQDo8PUE+QkI7PUFAQ0VDOTxCP0VHSUdG
-PDo9QT1ARURBQ0FCQEZERUI+RUNERUVIS0lKSkVESktFRkhKSEtGRklKRUZERktJ
-R05HR01KTU5QR0VKS1BXVVJNT05RW1NPVldPUFBRUlNQTExFcbbM1dzh5Obn6Orr
-7EhNTEpQUlNUUVdUTVZMSkpLTkZJTUtKTUxJSlBKQUhDRUNBP0BEREVIRUFEPDw/
-QkNDPz49PkA9Q0VHRkVGQT4+QTs+Pj48Nzs8Ozo9Qj07Pjo9Oj07QEA8QDw5Pzs5
-Pjw6OT08NzU4Nzw+Q0E+Ozs3OTU6OTo+OTg6Ojs0Nzk6PDY4Ojc3Ojo2NTg5Pjo9
-Ozw5Njk8Ozo4Ozo7OTg4ODg6Pzs7Oj06ODo+PDs8Pjw9PTo3NDc8Ojo5OTg6OTw/
-Ozo5Ozo3OTk7OTo6Ojw8PTs7Ojk1NTg4OjxAOzg5OTY2NDY2OTs1ODk7Ojc3OTw7
-ODQ0ODo0NTg6NzU4Ozo6PTw3ODo7Ojo3NjUzNTU2ODYzNTU2OTQ5OjY2ODU2Mzg1
-OTU2OTY0ODg4Njg4NTY3NjMxNjY1Njc1NjU1Mjk1OTg8OTc5Ozc4NTI2Nzk1MjM1
-MzQ0NTg1NTQyNTU3Nzc1ODk2OTQzNzY2NTQ3MzQ3NTc3Nzc1Nzg1NTY3NTUxNjQ1
-Njc2NzU0MzQ1MjY1MzM3NTQ0NjU2Njg4NjQzNTY2NTU0MjQ1NTc2NDQ0MTIyMTIz
-MzM0NjU0Njo1MzIzNDk5NzQ5ODIzNzg2OTc2OTQ4NDE1OTUzMzIzNjU1NjY3NzQz
-NDI0NjU0NDQ2NjQ3NzY0NDY0ODY0NDM2MjM0NDM0NjM1OTY2NTQ0MjQ1NDU2NjU0
-NzU3ODc0MzQzMzQ3NjU1NDUxMjEyNDM0NDQ2ODY1Njc1NDM0MzQzMjM1MTMyNTcx
-MTY1MjI2NzU2OTU0MzM1NTc2NTY2NTI1NTgzMjc4NzY3MzU1MzI4OTU4NzU3NDM0
-NjU3ODQ2NDQyNDIzNzQ5Nzg3NTc1NjQ2NzczLzI0NTQ1MzM3NjU1NTMyNjU0ODYy
-NDc3Nzc0NDQ1NjU4Njk6ODc3NzQ6ODU1NzY0NDIxMzM0MzQ2OTU0Nzo3PDU3NTU3
-OTU0NTQyNDc5OTU2NzU0NTY1MzM4ODY2ODs/PTw9QD49Pjo5PTo6NTc4Ozo3NjY3
-NjUyNTg5Ozc5ODk8ODY4ODs5NzU4Ozs2NDI0NDY3Nzc2Nzg1NDY0NjY4OTg4Nzc6
-ODc2OTc3Nzg1Njc3ODg4NzU2Njg1ODQzNDQ3ODc3Ojo3NTg5ODo4ODc3ODY1ODY2
-NTY5Njc5ODg0MzU0NTY2NDg4Nzc3PDw6ODk3NjY3PDg3Ojc4OTk6OTg5NTY3NjQ2
-Nzc4OTg3NDc3MzQ2NDc5Njk3MzU6PDo6NjQ2NjY2Ojs5OTo6ODc7OTc3ODc6QT07
-PXCbrrStqayii0o7ODUzNDg6ODk5OTs/Ozk5Ojo2NzY6ODs7Ojk4OjxASUVAPzo9
-P0BBPDo6Nzo9Pz4+P0E9QEFCPTo9Ojo6Ojw+QEE7ODs8OTo/REJBQUJDREE+QT4/
-Pz5BQT8/PjxAPjpCQzxAQkFCQ0VGQkE9QUNEQj5BR0VCQUFEPj5AOkZJQjxGR0hI
-RUhPTEVJTklGSkxMR0FLSEZLSUlHR0hKSUVETU1MRkxLSUdJTltaWFRSU1RRU1FU
-U1JSUU9SWVJRVVBwtczV3eDk5ujq6+vrVlZRVE9QTk5NU1JRV1JTWFFKSktLTkZH
-TUxFSkhKSEhIRElBQkJDRkhHSUFDQkFDQ0VCQkE/QEE5PkJCQUNBPT87Oj48QD48
-QTw5QDs6PTo4PTk4OTw9QkFAPDo4Ojo8Ozc8Nzc3ODk1OD07Qj48OTs3Ojs9PTw7
-Ojo2Nzg4ODg4OTpAQTk5PD08Nzk3Njg5ODw7PTk2NTo6PTo4NTg7OTc7OTc8PT84
-OjY1NjhAOzs7PD48Nzk6Pzs9ODk5Ozg4Ozs9Ozw4Ojs6Pjg7Ojg5ODk6Nzw4ODg5
-Ojg4PDo4OTg2Nzk5OTc1OTY2OTg3OTk4OTo4Nzw9Nzg6ODg3ODg5NzU4OTU0ODY5
-ODw4NjI0NzY1OTo5Ozg2Njo2NTU2NzY2NTUzNTY4NTY4NDc0MzU1MjQ1NzU2OTQ2
-OTo5ODo3OEA5NzQ2Ojc5NDU0NzU1NDc1NDY5NzY4ODQ1Njc1NDU3Nzg7ODUzNTQz
-ODo7ODY4Njo3NzY5ODc5ODY2Nzs3MzQ1NTU1NDYyNDc4NDU1NTc3NjQ1ODc2NTc1
-MjQ0NTc1MzMzNjI0NDU0NTMwMzUyMjQ1NjQzNTM1NzY3NTI1NjQ2ODc3NTQzNzo3
-Ojw2NzU2NTY0NjM1NTQ4ODY1NzQyNTg3NT01NTMzNTc3ODo1MzY3NTc0NjUzMDMx
-NDM2OTQ0NDU2NTAzNDI0NTY0NTUzMzIxNDM1MTUzNDQ3Njc3MzAzNTIxMzMzNTQ3
-NjY4NjU0NjU0NjU2OTg1NDM3NTM1MzMzNTY2NzU2ODY3NjU2NTY2NTc5NjUyOjo4
-OTg2NTQ0NDQ5Njo5Mz03NjM1MzIzNDY1NTY3NjczMjYyNTQyNDQ0MzUzNzo5OTg2
-MzQ2NjY2NzQ3NTU0NzY2Njg2ODk2NjU1NDU1NTM0MzU0NDU6ODw7Ojo4OjY5OTk7
-OTo2NjYzNjc3ODQ2ODk5ODY4Ojo3Nzo6OTU4ODQzNjM0MzQ3NTQzODM1NDM2Ozo6
-PDxCODk5PTo6Pjs3Nzg7ODYyNDM0Mzk7Ojk4OTs9Mzg3NjY2ODk2OTo5Njc2NDg3
-NzY3OTk4ODs6Nzg3ODk4OjY6ODY1NTc6OTk6Ojs9Ojg4NjY3NjU5OTg7OT08Pzk4
-NTQ2NjU1Nzc3ODg5MzU2ODI1NDUzMzY0NDY4ODc0Nzg4NjczNjo3NTQ2NjY0NDQ4
-ODc1Njk2Nzg7Ozo8PDk4Nzk3Nzg4NTk5Pjg2NDc5OTU2NjY4NjY1Njc5NjU5NjU1
-NTc2NTU1NTQ0NzQ5Ojo2NDU8Ojo6OjpCgouWrKKqqZllQ0A4NTo3NjU3OTs4OTk4
-Ozc3Ozk3Ozk5Ozo7QDw5OkdIQj8+Oz0+PT49PDs8OTw8Ozs+PD8/PDpBPTs5NTY7
-QEBBPz08Pzs7OjlDR0VEPDxCRERCREFEQ0FEQjo8QUE+PTw+P0dGPTxAQDw6PURE
-SDtER0hDQEBERkI/RUVIRUVDQUVHRURCR0pJTUtJSVNNS0hNQ0NIS0dJRklISk9P
-S0pIS05MTEtJUE5NUE5TVlRSSEhOT05OUlVNTFpTU1RRTne4y9Xc4eTm6Onq6+xN
-UFBPT0xWVFFWXFVSVE9QTUxLTExLTkpKUU1NUk9QSkhKREVIS0NLRkZMRUhGQkNF
-QUREOzxDPEI+QkFBPkJAPDc7PDo/QERAPkE6PTs/Qz47ODo4PTk/OTw7ODg7OTpD
-PTtBPjo7Nzg3Ojg5Ozs8PEA/PTs5OD46Ozk4Nzg6PUE+OTc5Pjw8Ojo8Njo6Ozw6
-Nzc3Mzg9PD07OTw5OTY4NTM2PDlBQDs5ODk3OTg7OzU1OTo+Pjw8Pjo2ODo4Ozo6
-Ozs6Oj9APjo5Ozo5Ozo5Pz4+PDg1NDU6ODo0Njs6Ojg3NjU3OTY5Nzo5Ojg3OTk7
-NzU2Nzo1Nzw7ODg4ODY7OjY3ODg4ODo8Ojo5OTk6ODQ1NjY6ODc1NDQzNzc7OTc1
-NjY1Njc0ODg7NDc3NDY4Nzc5NjQ3NzYzNTc3OTc2ODs5Njg1ODk3MjM0NjY4Ojg2
-NzU0NTc4NTQ2NDU0Njc4NDg5NzMzNjk2NzQzNDg4OTo3OTY4NzY2NTU0NzgzNDY3
-NDQ4NTU2NjQ1NDQ0MzY5Ozk5ODU1NjU1NDQ4NTU0Njc0MzQ0NjkzNzY3NTY3NDM1
-MTQ1NzEzMzY2NjUyNDc2NjU6ODQzMzQ1NTU4ODQ3ODczMzM1NDg2Njg1MjM1NDQ1
-NTU0NDQ0MzYzODQ2NjQ1NzU1NDY0NzUwMTEyNjI0NjgzNTU0MzIxNDY0ODMzMjM1
-NDMyMDI2NjQ3Njk3NjQyNDMyMzU0NDUzMTY1NDM0MjEuMTQ2ODo1NTQyMzMxMzUz
-NTk4NzU0NTQ0MzQ0MzI3NjU3MzQ3NzY2NzczNTY0NDk5NzU4OTY2NTU1NTc1MzIy
-NTQ1NDIyNzY1Mzc2NjY3NjQ1NzU3NTM3NjU1OzQzNjg1NDQ0Ozk2Nzo6NjM0NTQ1
-NDIzNTIzODc3MjM3ODc3NjQ1OTc3NzY3ODs6Ozs4NjQ3Nzg1Nzg2Mjc5OTo4Nzo2
-Njg2Mzc1ODY3NTM1NDczNjU0Nzw6Ozg2PDg5Oz8/O0A6NzY5Ojw8OzYzLzQ3Njk9
-Nzg5Ojk5ODc6NzU1NDQ3Njo8PDk1NjQ2OTg4Ojo8ODs6NDQ1Nzs4OjY3NzU1ODU3
-NTc6Nzg6NzM1NTc6Ozo2Njg8Pzw2OTg2ODc1NzY4Njc5ODY5ODg2NTc4NTU0NTY1
-NTc0NDU2NDY2NTMzMzM0NDMzNDQ0Nzc2Ojg5Ojg5PDk8Pjs5Njc5NjU1Nzc5ODg6
-ODQzNzU0NTU1NTY4NjM2Ojc2NzczMzY4Nzc5Ozk5OTg1ODo3ODc3Nzc5OTs7PVeA
-bWaRm5ePb0U3PEM9Nzw5ODg6Ojo6OTk7OTc3Nzs7OTk5Ozs5PD5EUk0+PT46PD87
-Ojk4Nzk7Ojk8Pjs9Pz47PT08OTc5Oj8+QD8+QD87Oz48Ojo8PT5HREBBRD1AQD8/
-PD4/Oz1CP0I/Oz1AQj5BPEFDQD5ESEtKSURARERFQD9EQUlDSUlARkpDRkNHSE9G
-RUhMSkdIS0dISEtPT0NARUpNTEdIUlFOTE1MSE5NSkxNUFFOU1JQUE1JS0xTT1FT
-UlhYUE9OVVBafbnK1Nvg4+bo6enq6lVcXF1YV1VQTkxQTU1MVlJLS0ZIRUtISkdL
-T0hJUk9MTElDSEtKSEdJREdHRkVFRz9DQD46Ozw/PT0+P0E/PTw6Ojg5Oz5AQUBC
-PD48Oz9BQEE9OTc4NTo+PD46ODo/PkFDOTs6OTc7Ozg5Pj47Oj09Pjg4Ojk3OTw6
-PTo3Ojk5QkQ4Nzo8PDs9Pjk5PD1AOz08PDo5OTo5Oj06Ojs8PD06NTg6Ozo9PTc4
-OTo5Ojk7PDk5Njs3Ojw4OTs3Nzs6OTs5Oz08Qj08OTY5Ojs7PDw8QDw5Ozw6PTk1
-Nzk3ODk5NzU4ODg6OTg3ODY3ODo3Ojo7ODo3NTk4Ojc3OT44OTo6ODY0OTs5Nzg4
-Oz49Ojg5OTQ2NTM5ODM3ODg2ODo4NTw3ODY4ODY3OTU2NDQ2NDg2Nzo4NzQzNjY4
-Nzc5NzYzNDc2NTQzNTQ5OTc5OTU2OTk3NjY4NjU3Nzk5ODQ0Mzc5OTY3NTQ3ODo5
-PDk3Nzk4OTg5ODY5NDQ2OTg6OTY2NDY2NjQ2Njk5Njk3MzM1ODc2Nzg3NDY1Nzc0
-Nzc3OT04OjUzNTY2NjU2Nzg2NTY3NDU3MzM1NDM2MTMvMzYyNDQ3NjU4MzQyNTY1
-Nzc2Ojc4ODYzNDE0NDI0NDQ2Nzc2NjY0NDEzMjI0MjEyMTIzMjM1NzIzNDIwMzQ0
-NDM1NzQ3NjQ0NDEzNDUyNDYxNTQzMjU1NjYyMzIyMzE0MzQ0NDQ3NTIzMTI0NjQz
-NDY1MzQ0NDY0NTk4Nzk0MzQ0NTc3ODIzNzU2NjU2NjY1Mzc5OTo2NzU2NTUzNDY3
-OTo5MjQ1NTU0Mzc1NTU2NDI1ODYzNzM2NjM0MzM0NDU2Njk3NjY0MzY3NjY3ODo3
-OTk0NTU1Nzk2NzQ4ODU4Nzg5ODUzNTc5NTQ1Nzg5Ozk2Njs2ODM1NjUyNTY2NTY3
-Njc1ODs5Nzc3NDc1NzU5Nzk3OTk3Njg1NDI1NjU4NTc6NTc5NzM1NTc4NjQ1Nzk8
-Pjw8Pj09PTk5OzYzNjk5OTY/Ozk3PDk1NTU3NTo2ODc6NjIxMzc4ODo5Nzc1NzY2
-OThAOzc3Njc0Njg2MjQ3Nzg1Nzc3Nzg3NDY1MjY3ODk2NDU4NjU5ODY4Nzw1ODw3
-ODQ2Nzc3NTc3Njg5NDY5OTYyNTU3ODk1NDQyNDM0NTY2NDU5OjU4NzY3PDs1NTUz
-Nzk4Ojs7Ojo5NTQ3OTk4NTU4ODs3NzU3OTc1NTQ0NjU6PDg4NjU3ODg3Nzg2ODo7
-Nzo5ODc8Nzc1NDU2NTY3NjY3NzdBcnRMQFRjZ1ZCODc5Ojg7NzQ3NzQ2ODo3Ojs6
-ODY4Ojk6Nzg6Nzg6PEpqVEFAOjs4ODg6Ozc5Oz46Ojw5PTw/PD89ODg6Ozw7PTo/
-Pjs8QD87QT87PTw6PUA8PkRAOT49O0NDP0A9QkFBQEBGQj9ERUFBQkRHRkQ7P0BE
-Q0M8PkFERUZIRUdITkVEQkJHSUhGSUdDREZHSUFGSEpHS0xLSEtMRkpQT0pJSEtO
-Tk1MTktNV01OTlFQS0pOSUpUUVJOVVNUWllRUVNJRk5/uMnU3ODk5ujp6evqYV5X
-WVNTVFBQTklMUU9PU0lKTEpMSEpMSElIS0lITkhKSkdFR0pHR0Q/RUVEQUZCREdC
-QD8+O0A+Pj1DPkFAPjxCPDo6OTk6OD08Oj09PUJEQ0M+OTM5PD06Nzs5OTpCPz5B
-Pz47Ozw8PTo6Ojg7Nzc7PTs6PDs5Ozw6NjY5PDg6OTo6Ojs6PDw7PDk4OD48PDw7
-Ozs6OTo4Ozg6OTo6QT05Ozs+Qjw9QDs7Oj45Ozw7PDo6Ozg2ODo5OT09Ozo5OTg5
-PTw8Qjw5PTs8ODo4PD06Ojw8OTk3ODo6Ojs7Nzw8OjQ3ODg8Ojk0Nzg1Mzc3ODo5
-Nzk7ODg4Ojo5OTY5OTc0MjU1ODUzNjk6OTY2Nzk4Ojo6OTc4Njk7ODk0MzQyNjo6
-Ozk2ODg1Nzc7ODg0Njc0ODg1Njg4Njg6Ozc0NjM2Njg3NTs7NzU3NjY2NzU3NzM2
-Ojc0Nzk3Ojg1MzQzMzk0MTM1ODU1Nzg4NjU0NDUzNDc0Nzg3NTk1NDM2NjQ1NTc2
-MzY1ODc4OTg4NjQ1Njw4NTQyNDU0NDQzNjY0ODk5Ojc4Nzk2Njc4ODc4Nzc1NjY1
-MjMzNTQ0MzMyNTU2Njc1NzY0MjM3NzU3OTc3ODY0NTYzMjQyNTExNTU1NjUzMzQ0
-MTYyNjE0NTU6NzY2NjUyNjc4NzUxMjQ2NDM4OTQzMzQzODc3ND05MDY1NTQ2MzQ1
-MzUzMjIxNTQyNTMxNDU0NDU1NDUyMzM0NTMzMjM2NDg0Mzg4NzY1NzY1NjY1NjQz
-NDQ0NTc2Njc0Mzg5MjQ2Njg4NTMyNzU0NDEzNTM1NTQ4NTY3NjYyMzQ1Njc1NDEy
-NjY0MTU0NTQ3Nzc3NzU1NTUzNTc2NjU3NzY3NTQzNDY4NTY5NjY2NDQ4OTg3NTY4
-NDU4OTg1Njc5Ojk3OTQxMzM0Nzk6ODg1NzU1Njo3NzU6ODY3ODo5NjY0MzU4NzYz
-Njc0MjI1Njg9ODw6MjM0Njk3NTU3OTo5Ojo5QTY2ODg5OTU3Oj44ODg4OT48Ozo4
-PDw3OTs3ODk2OTY4Nzk7Ojo6Nzc4Ojc2OTs4NDM1Njc6NTY3MzU1NTU3Njc6PDc1
-NTg3MzI2NzU0NDU0NTY2Njk3NDY9OTc0NDQ1NzY0Nzc3PDk3Nzk2Nzg5NjY3NzQ0
-NDM1NTk2ODc4Njg5ODY2ODg5NjczNDg6NjQ4Ozg6OTM1Nzo6Pjk4NTc4ODc0ODc3
-Nzg0NzY4NDc6Ojs8ODg6OTk6ODY3NzQ1OTk5NjY5OTc2OTg6Ojc4Nzc4OlODhEM6
-Oj4+PTs8Pj86OTo4PDs4ODs5OTk4NjU3ODk8Ozk2PT06OTtBV2pLQUA8OTs6Ojo7
-OTg4Nzc4OTs6Ozw4Nz07PDs8Ozw8Pjw3Ojw8Ojw6PUBBPkA9OTs8QUE/PDs/QkA/
-Pj48PUA+QkJDQTxBREZHSUlHQ0A+PEBIQkJCREZGR0RBSEZJSkJDR0RCRkBGTE1F
-RUdLUkVFRUhERklRTkxPTk5QUk1LRUlHTUxJR0tMS0pMVE9KSU5LTU1MVFJQSFNX
-UlJPUlBJSoK2ydTa4OTm6Onq6+tWWVNQV1NQUEpLSk1MUFFTUlBMSklNSUxMS0lH
-SkdLSEZGQEVDQUA+QUVBQEM/REdHRERCRUJBPz46PUBEP0RGR0RCPD1AOj0+PT48
-Q0BDQD8/QTs7Nzs7Ojs/PTs/PTs+PTs9QDg2ODY5OTg4ODc4NzY4PDs7Ojw+Oj07
-Nzo0NDQ2ODg2OTw5Ojw5ODg2OTk6PEI9Pjw6OTs+PTlBOjdAPTw9OztBQEA+PD8+
-PDo5Ojo4Ojo7Ozo2PDs6PT04Nzk6PTw7Ozo5QD8/Ojo5Ojo9PTs7OTlAOTQ7OTk4
-PTw7Ojw5Njo2OTk5Ojk6ODY3ODk7Ozg5Ozw8NzY4Ozk0NDc3ODQ7Nzg2NjY3NzY1
-OTU0OTo7OTo5ODk5ODY2NjU1Nzg3ODk6OjY1Njk4Nzc4Njc2NjU4ODU3NjY1NDUy
-MzczNTQ1NTU3NDQ7Nzc4ODg3ODc1NTEzNjY2Ozc0NzY4NjU0NDQ0MjQ2NDU1Njc2
-Ojg2NjUzPDYzNDc4OTU1NzU0NjU1Njc1MzU2NjY3OTM3ODU4NzU0NzY4NzY3Nzc1
-MzMzNTU2NjU2ODc2Njg3NjY3NTg2NTUzMTMyMTEyMzU0Njs6OTo1NjY4NjU2NTM0
-NDc8PDg3OTU5NjM0MTU4NjU1MTEzMzI1NDU2MjU3NzY6ODg4NzU0MzU3NTQzMTE0
-NTU1NTU0MzUzNDUzOzg1MS8yNTQ0NDY2Njg2NTM0NDU0NTU2NTY2NDIyNDE0NTUy
-MTI0Nzg1NTc4NjY3NzU2MzM0Nzc1MjQyODk2NTQ1Njc3NjU3NDM1NTU0NDQ2NzUx
-NDc0NDEzODUzNDY2NTc4NzY3Nzg2MjI2Nzg5NDQyMjQ1MzUzMzQzNTY2NDQ1MzU3
-OjY5ODg2NjY4Nzg3OTg2NDQ1NDQ3Nzo4ODY1NTg6Ojc5Ozg5NTg1NDczMjM0NDU0
-NzU2NzY1NjY5NzU2OTQ2OTg2NDU2OTg5Ozg6OTc7Ojo9Ozo9Ojs7Ojc4NjU2Nz1A
-Nzg4Ojw6ODQ2Nzg6Ozo8Ojk5Ozg2ODc/OTk5Ojo2Njw7Njc1Njw9OTg3NTc3NTU5
-PDc2NTQ1MzM2ODg0MjU1ODc3NDU0MzQ2OTY3NjMzNjU4ODg4NjMzNDU5ODg4NjY2
-ODg3NTcyMzU4NzY2ODYzODs3NzQzNzQ1ODg7OTc1NzgzNTU3Nzg5Nzg5Njg4Njc3
-OTg3NTU4Nzk5Nzk5OTUzNzc1NDk4NTY3NjM2Nzc2Njg3NTM1MzU2Ozk3OTc4NTY3
-OTg3NTc3Ozk4Njc5OTk4NThCbX1SPjo6OTc4PDk7Ojs5Njk6Ozc0Nzk8Nzg5Nzg6
-PTg4NjY5Ojw7PD9SV0I/PT06PD43ODY3ODg4ODY5ODs5Oj9DQUNAQD5CPzc5OT49
-Pjs5Oz4+PT0/PUI/Pj8/PkA9QTw8PURCQjw5PDs9PkE+PD1CR0VLRkRBQDw+PERC
-Q0VDRUJFRUZOS0lMTEhISElHR0hISERGQUJGTUJHSkZER0tOTUpITExNSUhMR0RI
-UExNS0xJTkhQUlJQUExRSU9PUFFZUlNNUU5JTktKhrnJ1Nzh4+bp6urq61RQVVNO
-Vk1ITE5LTlFWVFVUSktKT0tJTktIR0hISUZFTE9HRURBQkhEQ0ZEQ0VGQkE/QkND
-RkVERURFRD48QUNCQT8/ODk7PEE7PT09Ozs4QEI7PT9DOzo7OTk/Pz9CQEI9PDk3
-OTo4PDg6Nzg4PTg5Ojs6PDw8PT09Ozo9ODY6QDw4OTg5Nzk4OTg5Oj4+PDo6Ozw8
-PDo4ODg5PTo5NDk+Pzs5Pjs5PkE8PD1APD08PDc5PT4+Ojw7Ozw8Ozk7Ojg5ODo4
-ODk7Ojc2NTc4ODk6Njc7OT44Ojs6Ozg7OTs8OTo3NDc1NTQ3Ojg3OTY3Ojs8Ozg5
-OTk0NzU7OTo5NjY4NzU3ODc3PDk3OD05NzQ0ODc6PTo2ODc1MzA0NzQ2OTg1Nzk3
-Nzk2Njc4Njc4OTg3Ozc1NzQ4Njo5NjM3NTQ2NDU0NDY6Nz00NTc3NzU1MzQ2NzQ1
-ODQ4Njc4Njg2MjE0MjAzNDc2NzU2Nzs3NTc2NjY2NjczNTQ1Nzk2Njk2NTQ0NDMz
-ODY4ODg1ODg3NzUzNTg2ODY3NDU0MzA0NDMzNTc1Nzk3ODU1NjM1MzQ1NTk5NTM3
-NDUwMTMzMjI0OjU6PDU4NjQ0NDM1NDEzNjc5Ozs7OTc3NjM1Nzc1NjQxMjM0Nzg0
-NTY5ODY6NTg3NTU0Njc3NjI3NTczNDQ0NzU2NjMzMzM0MzU3NDMxMzM0NDU1NzM3
-OTQzNDEyNjQwMzQ2NTYwMjEzNDQwNDI0MzM1NzU1NjY3MjM4NDQ1NjUyNTY2NDc1
-OzU0NDQ2NzQzNDM0MTEyMzQ0MzU2NTM7OTQyNTM0MjU0MzQxMzQ2NDc4NTQ1ODgz
-NzkxNDQ0NDIzNjUzMzEzMzI1NDQ3ODk5OjYzNTQ0Nzg4NDY2NzcyLy84ODc3ODo4
-OTc1Nzg4Nzc3Njk3NjY2OTk1NjUyMzQ0NDk3NzY2NTk4NTUyNjc1NTY4NjE0NTc1
-NDY3Ojo2Mzk7OTk2PTk2Nzc5OkU8OzY3OjpDOzc3NjU4ODk4Ozs+NzQ3NjQ4NjU4
-PDs7OTc4Ojk4NzY5ODY2Nzo7ODo6PDw1NzUzNDQ2NTk5NjUyNTU0NjU0MTQ2NTgy
-NDMyNTY1Nzk3NDY2Nzc3Njk8Ozc2NTQ3NzU2NjU2NTU1NjU2NDk4OTk1NjU2NDk3
-Njg4ODQ0MzEzNDU3Njg2NDY4NjY1Ojs5NDU1NjU4NjQ3NTU2OTk4NjU3NzY1NjY0
-NjU0NDc4NTY2NDU6OTg3Nzk6Nzs6ODQ1ODY2Nzc6OjQ3Ojk5OTs6PF5pTTk6Ozo2
-ODg8ODc6PTk4ODs5Njc3Ozw6Ozg4Ozo4OTY2ODg3Ozc7Pl5TRUVBOzY7ODc5Pjo7
-Nzc3NDc3PD8+Pz1CQUM/Ozw7Pz07Ojs6OTg+QkI/Q0RCRUA/Q0BAQkNAQj8+PTs9
-PD49QkBCRENDRkZFREVHQkRAQj4/QkJGRkVERUdKR0hGQ0ZLS0BIRkFHQkFARkZE
-QkhNSk5JSEVGR0tMRUZHUFBLS0xOTEpJSk5NS05MT09LSU5QTlFWTkxMTk5TTlFc
-TkpJSkmFu8vV2+Hk5ufo6urrVFxUVFVVUk5SUk9RUVBUU1FPTkhGSE1QR0hLSUpK
-RkRDTE5GQkZGRkVDRUZEPTtBQz0/PkJDRkREPURDPT1BREM+PTw7Oz06QT1BQzw5
-Oj47PDs7PD8+PTo7Ojw6OTpBPT09PDs8OTs6Nzs4Nzg4PTs9Rj45OTY2Ojw8Oj04
-ODg6Ozc5Nzg3Nzk5PDs6Ozk7Pj88Oz0/PT49OTk5OUJFQD87PD08PDo4Njc5PTw4
-Nzk6Ojg7Ojs2ODk6Ojc3Ojk7Ojs5OTk5Ozk7ODU4ODg8ODg6Nzc4ODg7OTg7ODg3
-OTc3OD04NzU4OTg4Nzk4NzU3OTk6OTc3ODo0Nzc4PDo5OTw4Nzg5ODk3PDw7NzU5
-NzU1Ozo5Ojo4ODc3NDY2ODc3Nzk5Ozc3Ozo6ODc4ODY3Njg3NzY3OTs4OTs2NTc4
-NzQ1NzQ3OTg4NjU3NDU3Nzg5OjY1MjY1NTI3NDM1ODY0NjYyNDU4ODI0NTs3Njg4
-NjY3ODg6OTo7ODg0Nzs4ODg2ODcxNDc6OTc2MzM1OTc3NzQ3NTk7NzQzMzQ2NjU2
-MzU1NjY2NDU1ODIzNDM2NTc3NDU5NTU2NzU6ODQzNzg7Njc5OTU1ODc2NTQ1NjU2
-NjU4NjY3OTY7NzUzMzM3Njg6NTU1NDQ0Mzc2OTY3MzI1NTg2NzMzNTY4Nzg4MzU1
-NDQyNzU2NzQ0NjQ0NDQ2ODYyNTY2NjQyMjAyMzMzMjE1MzIyMzQ1MjI0NzUyMjM0
-NTY1Nzo3NjY3Njg5NjQ0MTM0Njc3NjY5NzU5MzQ1NDU1MzIzNDQ0MjI6NDMyMzQ0
-Njg3ODg2MzQ1MzQzNjM1NDg3NTQ2NjU2Njg2NjIzNTMzNDU2MjM1NjU2Nzc3OTY4
-Nzc4OTc2ODY4NTM4OTg5NzY6Ojc1OTc5OTg2NTU4OTc0MTU1NzQzNTg1ODk0NTM0
-NTc1NDQ4NjQ3PDk1Nzk5NjU4Ozc3NzY2Nzk4NzU0NDc6ODw6Ojc5Ozw6ODk/Ozs1
-NzQ2QTs3Njk4Ojs8OT05NzY8OTk3Nz4+PTs5NzY1Nzg5NTY3Nzc4Nzg8Ojo5OTg5
-Njo5NTo4ODU0NjU0MzYzNDc2NjY3OTo5ODU2Njk2OTc2NDQzNjU2Njc4Ozc0NTU4
-NzU3NDY0MzM0NDU3Njc2OTo3NzU2ODs6NTM3ODc0Njc2NTY2MzMyNDY6ODk4NDY8
-ODc4Njc3NjU0NTY7OTU2Njg3ODg2NDc1NDI1NTQwMzQ4Ozs5ODg3OTg3ODs6PD06
-NzY3ODg4NTY4OTo4ODlJiHM/Ozo7OTo5PDs6OTc5OTg4Ojs4Njo7ODg6ODw7Nzc4
-PTo5Njs5Ojc8Z1tLQj47Oj46Nzw+PTo4Nzc6Oj4+PUFAPj4+PUA9Nzk9Pzw5NTg8
-Pjs+PUFDPDw7PT1DPUBBPj4/PzxBQkRCPT5ER0RJSk1KRkJGR0NDREBBQkhFSEVB
-R0lISkdKSElHSEVJRkRERURIRUZERkpJSExWTklMS0lISUtHSkhJTlpPTVNNTUxN
-TktNTElIS1BSTlJWTEpTT1VOUUxRU1NSTklJSoG8zNXc3+Pl5+np6upTVFNSXFdZ
-YFdRUUxNTVFQTkpGRkZDSEhQUEhOR0dIREZIS0hCRURDQUNGRkJBQkNCQTxAQUhB
-Pz1DRkNBPz1CSEA/P0E8Ojw8QDw3Pzw/Pj49Pjw9Pjw8Nzg7Ozs4Ojg9PD5BP0A8
-Ojk4ODo6Ozs5Pz89Qj46ODY8Ojo5Qzg9PDs4NTc9OT04Nzc5ODg6Ojo8Nzs9QEI6
-Ojc2NTk7O0NAPDw7Ozo9QD46Ozk4Nzk6ODk7Pzs9Ozk5PTw9PTc3ODk5OTs6Ojs6
-Ojs4OTk7PDo7OTY2OTc2NzY4OTs/PDg3NjY1OkE7OTo1Ozg3NTU5Nzc3Njo3Nzc2
-OTY3OTk7Ojk3NzY3ODU1OjU0Ojg2Nzg4OTY2NTg3MzY5Njg3ODk1NjY2NjU0Nzs6
-ODo3OTY5Ozg3NDk0NzY1NTQ3Nzk2NTY8NzU3NDU6NTc8PTc2MjU0MzY2NjQ5ODc0
-NTM1PTk4Ojc1ODY3NjY1NjQ3NTc2Nzc3Nzc6ODU5OTs5OTY3MzM0Njg5Nzk1NTY0
-Nzk3ODM0ODo6NzY3NTQ2Njc1NjY6NTUzMzIzNDMyMzQ0NDQ2ODY0NzY3OTc1NDY6
-Oz0/MzI0NTU1MjU0MzU1NDU1NTM4ODg3NTY1MzQ0Njg3Njg1NDY4Ojc1NDE2NTY0
-NjYzNjUyMjE2NTQzMzIzMTQ4OTY2NTQ1NTQ1MzQ2NTc1NTMyNTQ1NzY2MzU1NDUz
-NDM2MzMyMzIzMjIxNTU1MTQ1NTY2Njk5MjU0Njg2NjY3ODIzNTUzNjQ1ODs5OTo6
-ODY5NDQ1NjM0Mzc2OTMzMjI0NjQ4ODU1Njg4NTQ0MzY1NjQ1Njg5Nzg1NDM0NjM0
-NTQ3ODQ1NDU1NTQ0MjU1Nzc3Njc2NjY2Nzg3ODg2ODk5NzM2ODo5OjY1ODg1NjY2
-NTY2NTQ3Njk4NDMyNzU1NDYzNjg6PDg1NjU3OjY1NjU4Pz07Ozk3NTU4Nzc0Nzo3
-MzU2NjgzNDM5NzgzNzo3Ojc4Nz83NTc3NzlBPDc5Nzc1ODg3Ozs1NTo3ODg2OTo3
-ODg4NzY2Nzg6Ojs4Ozg2NzY6ODg5NzY1NjU0Njo0NTQ1Nzc0Njs5Nzc3NDI9Ojg1
-OTc4OTg3OTc1NDU0NzQ2OTgzNjk4NTU1Ojc1NDU3NzY5ODk6ODU0OTo6ODs9OTc1
-ODc2MzUzNDY3OTY2MTM1NDU4Njg2Ojk2Njk2NjU1Ojk1NTc3NjU2OjY0ODQ1NTY2
-ODc3NzU2NzY6Ozg3OTk4ODY2Njc8OzY2NTk4PDw9Nzg7OTs9P3KNUjo3Njc5Pjw7
-Ojg3ODw7ODc1NTY4NzczNTc3NTQ3ODc8QDo5Ozo6OUBkaUk/Oz08PDo5Oz08OTg4
-OjxCQTo6PUE/Pjw8QDs4PTk6PTo2Ojw9Pj0+Pj88Pzk9Pj8/OkBGPjk5Ozw7Ojs9
-QkFDQj9CQ0RDRUdGQ0A/TUZCQ0NDRD86QUNDRlBIQkdFSEhKSEVDRkdEQkhMUEtM
-TUxKRUhPUE9OS0pLS01OT0lOUk9XTEhHSlFMT1VTT1BTT1JVTU1TUE5KTUxST09S
-RVFRibrM1drg4uXn6Onq60tLV15YUlFVUFZVU1BUWk1QTE9NSkVERkhLTFBHSUxN
-S0xKSEJDQ0BDQ0FDRENEQ0JAPj5AQkBDQD08Pj4+PT9BQjw/P0A8OUJAQD1APTw8
-PT49OT4/PD89PTs8PDc8Ojs9QD09Pj06Ozk9OTk6OTk9PT88O0BEQTs7Ozw5Oj4+
-Ozs7OTo+Ozo2NDc5Njg5PDs7N0A6Ozo4Ojg8PD08Pzs7Ojs9Pj09QUFBPDk8Ojs+
-Pz87OzY5PD46Oj48PD05OTs9Ozw8OTo6ODg4Ozs7PT05OTo4Nzk2NTQ3OTs5ODM0
-NTk6PDw6OTk5OTk5OTk4Njg9PDY1ODo4Ojk8OjY2NTk4Nzk3NzY1ODg3Nzo5ODw5
-Ojc2NzQ4Njc5Ojk5NTg4ODYzNDs5PDw7Nzc2NjIzMzQ3NzQ0ODk3NjM0Nzc6Nzs9
-NjY1NjM3NjU4Nzw6NzY0Ojo3OTY5Nzs7ODk1NDc7Ojk3Ozo3ODU6ODQ1PDo4NzY5
-Nzg4Ojg2NzY3ODc4ODc1NDMzODU0MzQ3Nzg2NDY5Nzc3NzM2MzQ2OTU0Njc5NjU2
-OTQyMTAzNTg3NTU1NTg0MzU2NTc4Nzk4Njc3NjQxMjUyNDM2Nzc1NTU2NDQ3OTk1
-MzM1Njc4NDQ3Njc4ODg2NjY3NjU5NjY4NjI1MTIzNDM1NjQ1NjM2NzMzNTU2NTUy
-MDY0MjM0MzQ0MTM0NDU2NDUzNDY2NTQ0NjQ1Nz83ODAyMTA0NTo4NjIyNDM2ODU1
-NjM2MzU2NzM0NTQ0NTQ1NTU1NTY1NTU1Njg7NjY1NjQzNjY1Njc5Nzc4ODg1NDM2
-NzQyMzQzNTM2NTM2Njk3NTQ1NDM0Ozk2Nzk1MzM3Nzg1ODY2Ojc1NzQ0MzM0MjM6
-Nzk6Nzc1NzUyMjU4Njg0MzY4ODk3MzI1MzU0NTg5Ozg5OTg1OTY2Nzc3Njg6OTQ0
-Njc4Nzg1OTc3NTc3NDg3NTM1Nzo2NTY3NTE0ODU5NzI1NDc2Nzo3OTg1ODkyNjo4
-ODs+PTc2NTs5Njg5Ojc3NTY1Njs6OjY2Njk4ODg4Ojg1Nzk5Ozw5PTg2Nzk4Njg4
-NjY6NzY3NzU2Ojk3NjY1OTg3MzY2Nzo6ODg5NDYzNjc1NjU0NTc2NjY2ODc2NjU4
-Ojo3NzY1NjU1NzYzMjM0Njk3NDc5NzU2NjQ1NzY0NzY1Nzg3NDc1ODc1Ojk5Nzc3
-ODU2NzU5OTo6Nzg1NTU3OTczMzU3Njw5NzY2NTU1Njg7Ojo4OTc5Nzc3ODg4OTg5
-PD08PT06Ojw7PT9be2Y+PTo5Oz87Ojk7PT06NTU3NjU3Njg4OTk4Ojk2NTc5ODk9
-Ojc5PTk4Smx3WD8+PDk9Pzw6OTk3Ojo4PDo6QUA6PD07Ojg3O0A6Ojs5ODs9Qz09
-PT8/Pjo/QEA+Qj8+O0Q/Qj8/QD08OkFGQkNEQUM/PkNHTEI+OkBIREdFPz9FPz9E
-Q0tFRkRDSUdFREtKP0FFREhHS0dGRktJSUpFRkhKSUpNSUdJSlJSTUpOVk9NS0hJ
-TUxNS0pPUE5OUE5QTUtIU1BNUU5OUEtFTFyHucrV3ODj5ejp6enrVFFZXFhVVlJV
-VV1aWFZUVE5QTkpMSU9PS0tMTEpHSUpGRENGR0RAOj5CP0NDQUJERUA+S0pCQT1D
-QT5CQ0A9OTo9OzpBPUBBPj1CPDk7QkBAPztBQz46NTg6Oj47Ozo3ODw/PkE+OT4+
-Ojk7OTQ5PUFAPDs7Pj0+OTY4ODk9Ojw8Ozo8ODo6Njk4PD1BOjo5ODg1ODo2Oj49
-PztCQDw5Ozg2Njk7Ozo7PD46QD09PT07Ozw8Oj0+ODo7PDk6OTs9PT07Pzk7ODo2
-OTg6OTs5OTk7OTg8Ojk0NDU4ODk6NTg7Ozw7Ojo9ODc6ODc4NTg2Oj07ODc3ODg9
-OTo5ODU3OTk1NDM2NzUyNTg8Pjk2Ojk0OTg1MzQ2OTo8PTg6Ojo1Nzg2NTY7Ojc2
-Nzc2ODU4Njc1ODc1NjMyMzU2ODk4Njk5Nzc3NDU2NTc4NTM0Nzg5Nzg4ODg3ODk4
-Nzo6NzY5Ojs6Ozw3NzU2ODo4Nzg2NzY3ODM1Nzg3OTg4ODQ5NTc2NDc4ODU4OjQ0
-ODU0Ojc2MjQ3PDU6NzY1NDQ1NDQ3ODU1Njo4MTIyNjc3Nzc1NTQ0ODg2OTk3NDU3
-Nzg0NDU4Njk0NTU1MzY3NTUyNzc1NzMzNTQ0NjY0MTU1NjY0NTc1NTg1MzMyNDU2
-NDU2NTQ2MzQzNDQ3NTY3MTI0MzQ1MzMzMzM6ODU0NTQzODY1NTM2NDU0NTQ0Mjc4
-NTU0PDg5NjMyNDQ1NDc3NjU4NDY2MzQ2NTQzMjMyMjczMjQ0MjMyNzUyNDc2NDU1
-NjY2NzY3OTc1MzI1NjQ5OjY1NTQ1NjU2MzU0LjMzNzY0NDY0Njg2NTM2NDYyMzY1
-MTM2NDQ3NTU2NTM1Njk2NzU0MzE1ODY3NDY1Nzc1Njk3Njc5NDY2NTU3ODg5NzUz
-MzQzNjU3Nzk5Ojg4NjY5Nzg0MzQ1Nzc2Nzc8Njo6Ojg5OTg1MTU3NTQ3NTY2ODY0
-NDU2NzUzMjI0Nzg3ODY4OTY0Nzw5Nzc4ODc1NzU5Nzc5NzQ2PEM6Njc6Ozs2NzY1
-NTc2OTY4NzgzOTc4OTo3Nzo5ODg4ODU1NTc1MzU1NTk1MzU5Nzg4Nzk5ODUzNzo+
-Njc5NTM2NDMyMzY4OTY1NDU0NDMyNDU4ODg3Nzk3NTI0MzM2NTc4Njc4NTU2Nzk1
-NTY2NDYyNjg3OTk4NTk3Nzk2NzU2ODg2OTc0NDU1NjY0Oj87ODY7Ojk7PTs3OTc0
-NTU1ODY3NTY4NjU0NDY2Njg6Njg3ODw7PUBCQDs5Nzo8U4ZdQDk4Ojs6Ozo3OTs7
-PTs7ODU4Ojk5PTs7Ojc1Nzk6ODY2ODo4Ojs9QkJKeolwQz07Ojo7Nzk7Ojg5Ojo5
-PTo5OTo5Njk6OTw+Pj07Ojs8RkFAPT09PkE/Pj5AQEBBPj8+PUJAQjo9O0BDQkI8
-Oj5ERERCR0ZDQTpARERHREVDPkRFR0NEQUZMSEFGRkVHRkhGREJDTFJORkRISEZI
-SkZGRk9MTEpLUEdKSU9JTktJS0lLSk9KSkZGUlRQVU5MTEpPUlVRWVVQU1FPSExN
-U4C5y9Xc4OPk5+np6+tVWFlcZFhUV1NTVF1WUVRVUlBPTU1RUE9QTkpNVExMTk5J
-R0NGQz1AQ0A+P0hEQz0/Q0BEQkI9RkRCQkQ+PUM6PDtAPz8+PENBOzk8QEI/QD4+
-PT09PDw+Ojg5QD0+QDk+Ozo7ODg7QDk6ODY4Ozw+Ozs3ODg5PT04Njk8OTs5ODo4
-OTw9Njg5PDw6Ozk5OTg4Ozs7Ozw9QDg6Ojs8OTk+PT9AOzk8PT87Ozs6Pjw+Ozc8
-QEBAPTk6Ozk3Ojo7QD4+Nzo7Ozs5OTo6Pzg4Nzk5PT06PkA8Ojo/ODU8Oz5EPDc3
-OTw6Njk5Nzk2ODY4ODs8Ozo8Ozc7PTk5NTg7Ozc2Nzk4Njc3Mzk1NTc3ODc4PDs+
-ODc2NTc2NTk4ODo4Njc8PDc4OTg6Njc5Nzg5NDU4NDU2NjU2ODc5ODg6NjYxNzg4
-NTYzNDEzNjg4ODY3OTc3ODUzODc7ODc3ODY2Nzs6Ojs8Ojg5Nzc5ODY2NTg5ODY2
-Njg3ODk2NDU3ODg4NjQ3NTU2NTc2NjY6Njk2NzY4MjY2MzI1NTQ1Nzs4ODg6OTc1
-ODc4MzQ3Njc5NzY2OTg1Njc2OTg1MzY3MzMzNTg2NjU2ODM1NDM0NTQ1NTI0NTc3
-NTMwMjM0NTU2NTY1NDMzMzc3ODc1MDM0NjU1NzU1NDQ0NzU1NjY0MzI0NjI0NDc2
-OjQ0NDQ1Njg2NDg1NjU0NjY4NTc3NTE1Nzc1NDYrLDA1NDU2NTQ0NDg0NTU1NzY1
-MjMzNDA0NDY0NDM2NjU1NjY3NDY2NDY3ODU3NzM0NTM1NDMzNDUzNDg3Nzk2NjY3
-Njg1MzQ2Njg2NjQzNjQ2NDQ0MjM0NDQ3NDQ2Njg2ODU1MTQ3OTc1NTQzNjY2NjIz
-NTY2O0I3ODYzNjo3Ojg2NjU0NTU2Nzg0NjI2ODU1OTg2NzU1NTY5NTY2NjU4NTc2
-OzY4ODo5Njk6OTU1NjM2NzYzNjg0NDQ2NjUxNDs4NzU2NTQ0Njk6ODY2ODc3NzU0
-MTE1Nzg5Ojc4Nzg3PTU1NTg7Nzg3Nzk8ODQzODc4ODo4Nzo3NTQ1NzY3Nzc2NDg5
-NTk7OTU3MzU3ODk4Njg4ODY6NDQ6Njk4Njg3ODY2NjQ2Ojk4Nzg3NTY4OTk3NTM3
-NjY0OTU2NTM0Njs6OTQ1NTY1NjU0Njc1NDQzNzo2ODY3OT0+OTk3NTY0NTg6NjY3
-Njo0NDY1NTk4Ozk5Nzk4ODg5ODs4ODU3Nzc4NjY1OTM3ODg4Ojk2NzU2OT06OTg/
-VGFSU0E8P0aFl1Y9ODk6Ojo4ODc7ODM7Pjw7OTs8OTc5PDs6Nzg6PDg5PD43Nzk8
-OTk8QkaArKRgQEA8PDs7PDw9Ozg0PDk5Nzo7PDk5Oz09PkA+Ozw8PT9DQUBBPDo6
-PD0+Ozo5Nz1AQUBAPj4/Oj5BPkNEPz5AQEJIR0U/P0JAP0hFRkVERERBQUZGPkVC
-SEFFSUBERERBQkdFSkhGTU1IRkdKSEZGREtJSk9QTk9RT0tLTExMTkxVVVBRTklM
-S09VUVBSTUpPUlNSUk9PUkdPT0pKSkdKcLnM1d3g4+bo6urr61lUV2NlXldWU1tc
-XFNcVVRRVFJVV1JSWExJS0lMUE1QSk9LSENAQUZDSUNDR0lFR0hCQ0I/Q0NGRURC
-QEI/Pjs+P0BGQTw/P0REO0FCQz0/Pz0+Oj47Ozw9OzlAPTs8PT0/Ojk+Ozo9PTY5
-Nz0/Ojs5OTs8PkE7Ojw6OTk6ODg3ODc6Njk+Ozg6OTo7Njg4ODw+Nzk8Oz88ODo5
-Pj45Pz8/QUI7Ojo8PT45ODg9OjtBPDk4Q0c8ODY4ODg+QD49Pjo4PDs9P0A8Pjw7
-OTc5PkA8PTs6Ojw6OkM/P0A+QDs6Ozk3RT86OTY5NzU0Ojk5PTo4Ojs5Ojk5OTo7
-Ozs8ODg5OT45ODk3NDM2ODg0ODk5ODk6PDkzNzo2NDUzNDY6OTk6ODY3ODc4NTc2
-NDc4NTU2NTg2Njc5ODY3ODY2NTg6OjkzMjk6NjY7Njk3OjY2NTg1Mzo5OTs8OTk4
-OTY2ODg3Ozk5Ojo4NTU1NTUyNTU3ODo7NzY0Njc6Nzo5ODQ2Mjk5NzY3OTo5ODU0
-OTg0NjQ1NTU2NDQ3OTk3ODU3PDc6OTo3Ojk5Ozo3Nzk3Nzk3OjU2NzY1NzM1MjQ0
-MzU0NjMzNDI0MjE1NDQ0NjM2MzU0NTY1NDIyNjQ0NTYzMjMxMzQzNjU1Mzc2MjM0
-MzM1ODY1NzU2NDM0Nzg3MzQ1NTM4OTc1MjE0MzM1Nzc4MzI0NDQ0NTM2Njc1NzY1
-NjU2NCYyNDU1MjI0NDQyNDIyMTQzNTY2NTIxMzRDNzU2ODs3MjU2Ozg3NzY0OTc2
-NjQzMjc4NzM1NTU1NDM3NzU4NDY5NzU4Ojs4MjQ3OTc3NDM1NDU3NTM0Mzk2N0M5
-NjQ0MjI3NTU3MzM0NTc3NzU1ODY0MzUzNDU0OjIyNTM1Ojk4ODg4MzU0NDo6OTg3
-NzQ3ODk2NTo4OTg4NjY3NTU2Nzg0MTM6OTg1Nzo6ODU1Njo4MjI9NDY2NTs4Ojg4
-NDQ3ODY2NTQ0NTQ6Ozo5ODg1ODM3ODU3NjU1ODg7OzU2NjQzNDQ0NDIzNzc4NDc5
-ODo4ODc4NzQ1NDc4NDEzNDU4NzY3NjQ3MzE2NDY4ODc2NTY2ODY2NjlBNjU5NjIz
-MzY3ODY2OTY3ODw3MzU5NzY2Ozk2NjY0NDc2MzQ2Njc7OTk2NTMyNTYzNDQ2ODY1
-NzU0Nzo4Ojo5Ojo7PTg5Njc6ODg6Ojk2Nzg4ODg3NTY4Pjo5NDc5OTY5OTo7ODU3
-OTk5Nzc3ODg2Nzk3ODs5ODc5Ojo7QVRtYmqPb0NHeYqDTjo6Ozg6Ozg4Ozg4NjY3
-OD88Ojs5O0A9ODc4PTs6Ozg/Ozw6Ozo4Ojo8QFWQpmo9PDw6Ozo7PkA+PUA6Pjg5
-Oj0/P0RAPTw9PD5CPUE/QD4+PUI6Nz08Ozs7PTY4Oj48OD4+QEJBPUBCQkBAQ0ZF
-QUJFQD8+PkNHSEJERkE9RENBQkJERklEQT9DPkRKPkRMTk1ISkhNTE9HR0pIR1BT
-S0dJSkVJTkxQS0pMREdHS1JMS05NS01NT01STUpOUlhXWFNNUlVQU1JOS0lPSExz
-usvV2+Dk5efp6uvqWFdYXmlfWFtcXFdWU1NUUFRSVFBSUVJUVE1OSklKVEtOT0hH
-T0tHSUxKR0ZERj9BPDpDSERAQ0RGRT5BPkA9RUI+QDw+QEE8QkJBPzs9PEE8Ozo4
-NTc7OTpCPDg6Oz05Oz48Nzo8PUA/PURBPT87Nzo9Pjw7Ozw+Ozw4ODU1Ozc5PD06
-PDs7OTo7OT06OTY3Nzw8Nzc5Oz06Ojw7P0A+OTk9QD08Ojk9Ojo9PTk7Ojk4Oj09
-Ojk/PTw9PDs8QT46OTw7Oj1EPDw+Ozg8Pzs8PkA8PDc7Pjk8PD04OUE7Njk6ODU5
-ODc5OTk3Ojk4Njk4Ojw8PD48Ozo4OTo3NTc4OTk7Pjw5Ozs7ODQ2NDg1Nj05PDw6
-NDc2Njg7NTM0NTY3Njg3ODY3ODc2NTU4OTo3Njo4ODg4Nzs5OTk2Ozg5OTo4OTk+
-NzQ3Ojg5Nzc2Njc2MjQ2Njg5ODg6Ojc5NzY3Njk6ODY6NzU0Njc1NDc2Nzk5PTs6
-Ojg2Nzg3NTg2Nzc4PDY3NTU0NTQ0MzM5NjU0NTI1NzY3Nzs5NTU1NDk3ODY5ODY3
-Nzg5NDM3Ojo7Nzk2NzY5NTY2NzQzMzY4NDM1NDM3NTM0NDE0NjU0NDQ1NTI1NjY2
-NTc6NzY4NDU0MjExNDU0NjU2ODUxMTIzMzM0NTU2My8vMDEyNDU3NTMxNjg3NDU1
-NDQ1MzUxNDEzODo1MzMzMjIyMjIwMjIzNTUwNDIzMDIyNTQ0MzMxMzA0NTc0MzM2
-ODIyND04Njk6ODw9Nzc3Nzc2NTQ1NTQ3ODQ3NTI0OTU3ODU0NTg6NzU1MjY0NTQ2
-Ojk1NjY3NTQzNDQ2NjU3OTc3OTk3PDMwMjMyMjQ3ODU3NzU1NDg2Njc4ODY7OjY2
-MzMxMzM0NjQyNDQ3NjUzNTg3NDo8ODc1Njc6ODc3NTc1Nzc2NjI3NDM2NTY2NTg2
-OjU0NDMzNDY4ODk5ODU1OTk4ODk3Ozk2Njc2NDQ0NTc3ODg5Nzk4OTk6OzY5ODY4
-Nzg2Nzc2Ojs2Nzc1NTg3NTQ0Nzo9ODk4NzU3Ozk1MzM1MTc2NTc2NjY3OzQ3NzQ2
-NzU3MzIyNTU3ODY3ODQ2OUI8PTk2NTI3OjM0Nzs2OTc3Njk3OTg5ODU1Nzo3NzU1
-NDU0NDU2OTo6NzYzNTM2Njc0NDEzNjc1Nzk2ODg5OTY2Ojo3OTg6Pjo5Nzg6ODY4
-Njg7OTg3OTo6Ozc1NTc5Ojk8NTY5PTg1ODU3NDg4OEA3NDk3Nzg7Ojo5OjtHa2xX
-d6CZcm+QdEs7ODs/Ojk5OTo3Nzg4Nzc2OD0/Pz0+PTs2Nzo7Ojg7ODY2Oj06PT06
-OTo+QldjSEI/Oz8+Ozg5Ozw8PT46PEFBPTw7Ojw4PT07Ozs9OT1BPjs9Pz4/Ozg6
-OTk8OTw4ODw8Ojo9OkNDREFBPkBAQEFDQUNFPztBREVGQ0VEQT5ERkRFR0VEQ0BE
-RUZIREZBRkdGSUdHTExHRUlJTE9LR0lHS0tMSkpMS1NPVUpIS1BJTVFPSkVMT0dL
-UVBTUlFUT1FTUFBUUUxSUVBNTVFPVX+7y9Tc4OLl5+np6updWl1dXFZUUlVQUlZT
-VFFNUE5NT01QTEpKSEpJTk1HSkZHRE1MTUhJSEhGREM9REZEREE9P0JCPjxBQUFE
-QEFBQT4+PD9CQUQ/QT4+Oz1CPkA4Ojk+PDg7O0E/OTk7PD48OjY5Ozo9ODs7ODs0
-Oj49Ozs7Ozo9Ozw5NTY2OTk3Nzg4Ojo6OzxBPDxAPkM6ODQ3Ojo8QDk+Ozk7OztC
-PTo5P0A9PTw+PDs/QEE8ODtAPT09OTk4PT49QEFDPDg5Nzo9PDs8Ojs7Ojo6PDw9
-PTw5Pz0+PD4/Pjo4OTc5PDs7OTo9PDk7PT08Ozo7PTs+OTo5OT05OTs7OkA5OTc4
-ODk6PEFAPzw7PDg2NTY4NTY6OTo3Njc+OzY1NDk1Ojc3Njc6NzU3NjU3NTk2Njk2
-Nzo6OTU0Nzc2NTk9Nzc3ODk5OTU3ODw5ODg2NTc4NDM2NDU3Nzc3Nzc3Nzw5ODc2
-MzQ0NjY4OTY2NTU1MzdBOTo6Ojg4Ozc2NTY4NjY2MzY2NzY1ODY0NDQ3OTU3NjU0
-NzU2NjM2NTU1NzUzNTU4MTM3NTc2Njc3OTY2NTg3NTg3ODc1NTc2NjY3MzM0NjUz
-MjY4PDU1Nzc2NjMxMzQ3NDY1Nzg1Njc5ODU4NjUyMjIxMDAyMzUxMTIzNDQ1NTgz
-NDYzNDU0NDUzNDIyNDQ1NDY2NDMzMzU2NDU3NDY2NTM2Nzg3MzIyODs0MTM0NTQ0
-MzQ3NDw6NTU0NTkxNDAxMTU2NjIyNDY5NzYzMjU3Ojk5OTk3Nzc5NzY1NDI1NTY5
-NTQ2Nzg3ODk1MjI2Nzg5ODg6NjU1Nzg4NzY1NTY3NTU4Njc1Njc2ODg4NTQyMjI0
-NTc1MTQ2NjI3NjU2NTU0NTU5NDU1NDE0NDE3ODU2Njo3ODc4NTMyMzo4Nzg4OTs4
-NjI1Nzg5NDI4ODY3PTw5ODM2OTo6ODU1ODU3NzczMzU2ODs6Njg4ODo4Nzg5NDU2
-ODY0NjY3ODk2OTg0ODw9OTU1Nzg2ODg7ODg3ODY4NjUzMzg1NTMyNDQ3NzU3Njk7
-PDc4ODM2NTMzNDQ4NTUzNjk4NzY0NTUyNjQ1OTc5NjU1ODc5Nzw5NTc7NTg4NTY3
-OTY2ODY2ODU5Nzo3Njc5OTo6NzY4ODU2Njc5Njo+Ozk4NzU4OjU0ODY2NTU1Njo7
-OTc2NzUyNDY3ODg4OTc3NzU1MzY2NDQ1Nzc3OTU2NzU4ODg6ODg5OTk6ODk3Ojk3
-OTc4MzQ2Ojo4Njg5OTY3Ozs7QGKHXm2EnqmnoKF0Qj08PTw7NTc4ODk4ODo3Nzg6
-ODs8Ozs6PDg4Ojk2ODw+Nzk4O0BAQDg5PTs/QD4/Ojs4Oz5DRDw9PT5CQUE/PDo6
-PD8/Oz88Ozo8Qj0+Oz9DQT07QEE7Pjs8Ojc5ODw8OTg3Ozo/QD4+QTs9PT5DQkFF
-QT0+PkA/PEFAQERIRkVFRkhKS0VKREJKUU9PSERHS0pJTlFJR0pNUExJUE1JTE1O
-SkdLT0tLTlFJS0pMVFJKSElJSEtNTEpSU0lKTFFSS05NSkVRUU9VUFJNTU5Jf7jM
-1dvg4+Xo6err6lpZWlFXYmNaT1NQU1hQVVZVVE5SUFFPTlNKTk5PUkJFSUtITVBI
-UEhHS0VAQEJLRkVDQkNJREBBQkFAQ0M9PkNBPUA/QEJAREI6P0NBQz8/PUE+Ojo9
-Ojg6PT0+Ojs2OT8+OTs7PD04Nzo3ODs5PD4/PT08PTg2OTg5OTk5OTc3OTg5Ojk4
-Ozw7PTk6PDk4Nzg7PDo4PDk7QUA7OTs6OTc5PTw8Pjo6OTo8OTc5Nzk7Oz48PD1D
-QD87RD8/PDk8Ozs8PDo2ODc9PTw/PTw7Ojk8PDw6ODw8Ozc2ODk5OTk3Nz4/PDw5
-Ojs6Ojo7Pjw9PDs6Ojw6OTs6Ojg5Ojg6PDc5OTk4Nzw5ODg6NTc7ODg2NjUyMzc5
-OTo9OTk5PTc5OTk3ODg5OTg3NjM3NTY6PDk4NjM0Njg2ODk6OTk5NjY2NTQ2NTc0
-NDMxMTY3Ojc1NDU1Nzg3NjU4Njw5NjY2MjI1NTc5Nzk6Ozk3NTk3OTc3NTY4NTU1
-NzU2Njc1MzU2NDU3NDQ1ODc5OTk5NzY1NDM0MjQzNTc1NDQ1NTMyMjc3NDU2NjU3
-NzU4NDQ1NjU3NDU2NjQ0MTI0MjY0MzY4NDY4NzU3Ojc1NTY1NTk0NTYzNDc4NjU1
-Nzc3NzIzMzM1OjU1NTU0NjY3NzIzNTQ1NDY2NDQ1MDU2NTU1MzU3NjQyNDU0NzQ4
-ODc5NzM2ODQ1NTUzNDc6Ozk0MzQ0NDU1NzY2NTQ0NzY1NDE0NDQ1NTg1NjY6Nzc2
-NTU0NDQ4Njc2NzY1NjU3NTg3NDQ1Mzc8OzU2ODU1NzY3NTY5Nzg4Ozs6NjQ1NDQ1
-ODMzNDIzMjQ4OTc3NTg1NDg2MzQzNjc3NzQ2NDc4ODc3NTU4NTY2Nzc2Nzg3NDc3
-Nzk4ODU6Ojg2ODY0MjM5ODczMzY1NTY1ODQ1OTM2Ozc2NTY5OjY1NTc5Nzc0NjQ3
-Ojg3Nzw7Pjs6ODo5Njk6OTg6Nzg3Njc3ODw6Ozo7PDc1Njc4Nzg2ODc3ODY0NTk4
-NjUzMzQ0MzQ3NTU1NDg4OTo6Ozg3NzU2ODg4OTc1Ojc2Nzg2NDQ1MTg3OjQyNjo3
-Nzc5Nzg1NTU2Nzc2MjQ0Njc2ODU4NzYzNTU1NjY3NzY3Nzg5ODY2Ojk4QTc3NzUz
-MzY2NzY4NTc5OTc2NDY0NTUzNTQ5OTo7OTY3Ojo2Njc3NzU2NTc5ODg2ODY9OTY4
-Nzs3Nzc5NTU2Nzk3OTo6NzY5Oj44Nzc4NTc4NzU3ODw2Ojc4ODo7PjpBgIVSV5Wr
-srGbj04+PT06ODk4Nzo5Ojc3ODs5ODg4NTc6OTo6ODs7PTo7Oz89Ojg7PT8+QDs5
-OD9BQkA8Ojo8P0E9Pj06ODw7Ozo8Pj1AQkA/PDs5PkA9QTw8Ojw8PkBAQT5AOzo6
-Nzg4OD5BQENAPERERUFCREVBRkNHRkNCQkE/P0REQ0RGSEdEQ0RHRUFHSElJSkpI
-R01OTlBGRUdLS0lJSkVHT01HSkdKTEZFSk9OTE1LSktPTFBMTUtGTE1KS0xRVU1Q
-TUxNTlZTS0tLUU9UWVRbVFNMTFF5tsrU3OHj5ufq6uvrWFtcXlxWV11aU1hXU1RQ
-UFBTV1BJTU5JRklGR0lNSEpRTU5ISkdIRkhNSkVES0tFRkNER0dFP0JDSUE6PUJE
-Q0BAQ0BDQj5DQUJCPkI/PT5BQjw7Oz1CQEBBPzg7PTw7PENDPkE+OTs6Oj4/O0A/
-PT07PDw5PT48Ozw4ODpDQjs5Ojk7Ojs6ODs7QTo4Ojw+Ozk4OTk6PT09PDg5ODc5
-ODo9PTs6ODg4Ozs9PDs6PTs6OTo7Oz8/Oz09PTc6PT06OTo8NzY5OT06OD08Nzg4
-Ozw5Ozo9PTs5Ozk6OztAPDk3Ojo4Ojw7Pz07ODY6PTs7Ojw7PTw6Ojk5Ojo7Ozs6
-Njk4ODg4OD83Ojw7ODw7NTUzNDIzNTg6Ojs1NTw8OTk7Ojs5NzY0NTk4OTI3Nzc5
-NjU3OTg5ODY2NDY5OTk4OTk5NzQ3ODY4NzU2Nzc5NjY1MzU1MzU1NzY5PDo3QTUz
-Njc2ODk4Ojc6ODc6NzQ3Nzk2NTUzNzc2NDM4ODc4Njc1NDY4Njc2Njc2ODo7NzQ3
-NTU2MzM3NjMyNDIwNTg4NTQ0NjU4Njk5OTk3MTM2NzU4OTY0NDM2MzY1OTg1NTU1
-MzQ5NzY4OD4+Ojs2Njg1NDc5Ojc3NzY4Nzg1ODc2Nzk5NDQ0Nzc1NjM1NjcyNTU0
-Ojg1NjY3NjM2NjMwNDUzNTYzMjQ1NDY2NDYyMzY3ODk2NTQyNzY2Njc2NTQ1MzQy
-NDIzNzQ3NjE0NTk0NTU1NTczMTIxMTQ1NDUyNjg6Nzc1Njs6Njc1NjY1NjY2NTM2
-NjU2OTY2Njg3NzU0MjM1NDY3NTU1MzMzMzA0NzMyNDY1ODY2NDY0MzMzODg3NjU2
-NzY3NTo5NzMzNTc6OjU1NjQ1NTQzMTM1MzU2Ojg1Njc3NTUzNDUzNzUzNjk6ODY0
-Njo5ODc5NjY4OzpDOTk4Nzg6NjY1NjY5Ojg7OTo4Oj06OjQ1NTg4Ozk6OTk6NzU5
-NTU1NjU6Nzg3OTk2OTo4Nzg4ODI0MzQ1NjU3NTY5NTc4NTI0Nzg6PDw4NDU1ODc3
-Nzc3Nzg6PTo1ODc1MzM0MDk9NTc2ODY1OTc2NTo6Ojk3ODg6NDg3NzQ2Nzg6PDo5
-Ojg3OTU2ODc4OTc5NjQ0NTg5PDY3Nzg5Njg5NTU1Njs5NDY1NDMzMjEzMTQ4PDo5
-OTY2NDY2ODc2ODU1PTU2NzU2ODs3OTM1Nzw7OTg7ODc1Njg3NjQ3OTYzNzg4Ozg4
-NDY0Ozg8OTk3Ojw8PDs4OkpzYkVEhqajoJpbQDo5OTo7PDc5ODY3ODg5OTk8ODc5
-OTo3ODc5Nj1BP0NDPz47PDo8Ojo8PDk6Nzs4Nz05PEI+P0A+PT4/Pjw+PT5APTo8
-QUM8PD0/QkA8Ojo7QkBBP0BBQz86Ojo5OjtAPDpCQz49PUE9PkFART8/P0NGPkFD
-PkA+Q0NFRD9BRUVFRkZJRUVGSkxNS0ZLS0lITU1JSE1LTEpHR05NSlFOUEtNRkxP
-TVFITEZJTVFSTklNVFVNTklNSUxMTElNTEtVU1JMS0xQVldZV1liWUxOT3e4y9Xb
-4ePm6Onq6+tgYlFaYFpYXFhWWlpZWFRTUVBNSkxPUU9JS0tISk9OSFNSSklMSEdD
-R0ZIRkNDSERBRD8+QkVCQEVCQEBCQEI/Q0Q8Q0NGQD8/QkQ+PTs8PUJFRz45Pj5B
-QT09PD47PTw8PUJBQD1CRkA5Ojo7Ojs8Ojs5Ojw8PDw6OTk4Ojo9PDtCOjg2Ojs6
-Ozk4Nzc2Nzc2Nzk3Ojw6Oz47PD47Oz09Ozo5O0A9PTk7Ozw4Ojw7PDw6ODw7PTw5
-OTo7PDo6PTo7Ozg7PDw9Ozw9Ojs5Ojo7Pjk6PDo6Ojo6NTk3ODc2PD44Ojs8Pz4+
-OTg4Ojk3Oz02OT5CQzo5OTg5ODk7OTg1NzM3ODg6OTY4OTo5OTc5NjQ0MzQ6Ozk2
-NjU1NjY1ODU5Ojk9Ojc2NzY2NDQ1NTQ4Ozk5Pjk4Ozk3Njo4OTo4ODg3Nzk4Njo5
-PDY2Ozg1MzU2NzYzNzg1ODk4NTY0NTY3ODY5OTc1OTk5NjU1ODU2Njg5Njc3ODU2
-MzY7OTo4NDM3Nzg2NDY3NTY2ODk5NzU2NDg3ODQ1OTc0NjQ0NjczNTc1NTY2Nzc4
-ODUyMjY3NTU1NTQzNTUzNDc2Njc1MjU3NjY2ODU2OTk1NjU3NTY4Nzg0MzQ1NDU2
-ODg3NTU2ODg6NTMyMzY0NDQ1NTQ1NzY3ODQ1NjY2ODc2ODg0NjQ2Njc5NTQyMjIz
-MzU1Njc3NTUzMjQ0MzY0OTU1MzQyNDU1NjY4OTMzNjY5ODg3OjY2Njc1NDI0MzI0
-NzU4NjI3NzY1NzU1NTU4MzMxODo0NDExMzM0OTc3NTc2NDc4Njg4Nzk2NzQ2Njk1
-NDIyNjczMzY0Nzk2NDY0NDQ1MzY0NTU1Njk4NDQ2MjY2NTU1Nzc3OjY3ODI0MzQ1
-OTo4Nzg2Nzg2NjY2MzU2NzY3Nzk6ODY1Njg6Njg3ODg4OTk4OTg0ODcxMzQ2NTc3
-NTo5OTY4ODg5OTo4Nzg1Nzc3Njc6ODY4NjY2NjY4NzY1NjU3ODU1Njc1NzU0NDI2
-NjYzNzg4NjQ1NDQzNTo1NTY0NDIyMjM1ODc4NTY3NDM0ODQ0NTUzODY2Nzc2Njc4
-ODUzNjg4Ozo4ODg5OjY3NjQzNDc5OjY2Nzs4ODY2Njg2NTUzNDM2NjY2OTc4Nzc4
-OTk8NzY4Njg4OTo1NTs3NTk2NDQ6Nzc3NzQzNzo1Njc4NjY2NjQzNjg4OTY1NzU0
-NTg4ODo5Ojo2NTc8Ojk5Njc2NTc4Ozg7NjU3Njg5Ozw6NjU3NzpBanJMRVuInZeM
-aUM6Ojg7Njc2OTo5OTw7Ozs7ODk5ODw8OjY3Nzg9PDs7Ojo5Oj07PDs6Ojs8PDs8
-Ojo9Pz43OTk8PTw6Ozs+OTw6PTw8Ozw9PDpDPTs8Oz05OT09P0BAPkA+PTw6Ozg7
-Oz09Oz1FQzw6OTo7QT06PD9HQ0JCR0Q/PkFER0dGQ0RGSExMSEhJRU1NT0xMS0dK
-TU1IR0ZER0ZKT1RMTU1PT0xHS05QS09PSUpES09KSktOUlJaU1VTUFlUVk1LTEpM
-TEtOU1FYWFRQUVRUWU1GUVRQfbrM1dzh5Obo6err7FlcYFheX1pcVVZcXVhaWVRQ
-UFFNVE5OTFBPS0hHUU5OSk5MUE1PTElLTklGQ0FEQUA/QkFBQEJDRUVDQUM8PT9B
-QUdEQ0FGQEVEPz9FQTs9PD1CQT8+PEA9OzxCPjxBQD0/P0JAP0BBPj89RUI/PEI8
-PEA8PkNAQEVBPUM6ODs7PTo5ODc5Ojw/OTc4NTU4Njk4Oj08Ozg4QkA8QUVCPz0+
-ODo8PDs9QTs3Ozg7QEA9Ozk9OTk/QD87ODw8PD07OjpAODw8PDw7OT4/P0E8PDY2
-ODk5P0A/PDo5ODs9PDxBPTw4OT87PTs8PUNAOjo8Ojw8PDs8OjY0ODY4Nzo6Ojw6
-ODg4PTo4ODs6PD05Pzs5Ojo1Nzc5Ojo5OD02NjQ1NTc6NzU7PDw6ODc3OTo3MzQ6
-Nzk4ODY3OTs/NTQ1NDc2Njg5Ozg3NjY0Nzc6OTY0OTc3NTc8ODg3Nzg2OTY1NDY6
-OTY1NDc1NTc3ODg7OjY5ODg4Nzg3ODc1NjY0NDU0NDQ2Ojg3NjM3NjQyNTc6NTU3
-NTI2NTY4OTY4Nzo6OTc0ODY1NDY4NjU4NTM0NDQ1NTM2NDIyMjM3ODY3NTk1NDMz
-MTU5Nzc1OTY0NTg1Njc2NDQ2NDI0NzQ0NDM2NTc2OjY3MTMyNDY0MzMzNTM0MzI0
-NDM0NTU3NjYzNTc0NDM2Njg1MzY1MjEyNDMzMzYzMjQ1NTU2ODg1NTMyMjg1ODU1
-Mjc3NDc1NjQ0PTw5OzY2MjU0OjY0NjU2NTM1NTM3OTo3OTU1NTE4Nzc2MzMzNzY3
-ODk0NjY0NjE0Njk2ODc3NDc2NTc4NDY2NTU0NTM0MjU0NTQyNTU2NTM0Njg5NTU1
-Njo1OjY2NDU2NjY2NzY1Nzc4Ojg3Njc5ODQ0NDg4NzY3OjY4NzQ5ODY3ODc0Nzc5
-OTc6NzU3NjE0NjU3NjY2Nzg3Njc5Ojk4Ojg3Njg1Njs6Ozg2NDc3NTU0Ojk5NzY5
-NTQ5NzY2NzQ0Njc4Ozk2NDY5Njg1MzM4NDc6NzU7ODY2Ojk3ODo3NTc1NDM0NTU5
-Ojc0NjU4NzQzOTk1NzY3MTIzNTg3OTo4Ojo3NTY2NjY1ODs4OzQ3Ojo6ODY3Njk7
-Nzg4ODc0NTQ4OTg2NTk2NjY4Njc4NTc2Njg2NjYzNDc6OTc1Njo6ODc2NzQ1NjU1
-Nzg2NjY2Njc2NzY1ODg2Njk3Njg6ODc3OTc4ODc1ODw3NDc3ODY0Nzc1NDU3NDo5
-OTY4Ojg5Nzk7ODc4OUh/eVVxk5Wgo4NBODc4OTs7Ozk3Njc8PTs4Njk4OTo9OjUz
-ODU2Ozw3OTs8PDs6Ojo3OTg6PT8/Rj4/PT08PDw+Ojs7Pzs7PTs8Oz1AOjo/QkI/
-RD47OkFAP0I9PkRDPTtAOzw/PTs3ODk9Ozw7QkJEPz0+PTs/QT9DRkZDQkRDQkU/
-P0NFRUVGPj1BRUVMSUdDR0pNTUtMTVJRTE1NS0tJSUtMUE9KT1NUUU9LREhOTE1M
-SE1JR1BNTE5QUVdUU1NWU1VOTlNQRVFTVFBPU1deWldOUVpWVk9PV1CCusvV3OHk
-5ufq6uvrW2RcWFVVXF5ZVlhZWV1eW1lUUU1JS1RYVFFQTE9MR0tRV01PS0lEQ0ZD
-R0pHR0ZGREVDPkNHRkNAPUJDQUVEQkVFRUJCPj8+Pj9AOz9FPTo9REVEP0FAPUE/
-OTg9Pzw+Pz46PTo+Oz1EQkNEREA+Ojs+PT4+RDs3Pjw8Pz48PDs6ODc3PD0+Pjs9
-PDg3Nzs4PD08QUE8QDg5PDo7Oz1BOzs5Oj46QDw6PTg/QkRAPj07Qj07PTo5Oz1B
-PDtAQDs8QEFBQEFAPDs+Ozs9PD47Ojo3Ozs8PD45OTs8Oz1BOTo9PT45OTw6Ojw7
-OTk5Oz86OT06OTo8OD44OTw8Ozs6Ozk3Nzc4ODg5Nzc1ODc4PT8+Ojk5ODc5Pzo5
-ODc2OTYyMjYzNDY7Ojs5ODo8Ojo6PDg3NTc0Ojo5QTw4NzQ2NTM2ODk4NzY8Ojk6
-OTk3NTg5ODc9QEU5NTU3NTU2MzM4OjQwNTg1NjQ2ODY6NjY1OTY2Nj03NTg4Nzk4
-ODM2ODU3NDQzMjE0NjY2ODk2NzU4Nzg4Njg2NjQ4Njc3Ojg6Ozk1NTY2NTU1NTYy
-MjU1ODY1NTM1NTU2NjkyNTY2NDU1NDIwMzM3ODc2Nzc6Nzg5NzU3NzMyNDc3NjUy
-NTc5Njc4NjE0MjU1NDU1NTY2NzY1NjU0ODY2ODk0NDY3NTQzMzQ1NTg3NTIxNDMy
-MzM1NTM1NjQ1NTU1NTM2NDQ0OTQ1NDY1MzQ0Njk5Nzg3Nzc2NjQ0NDU2ODc2NDEx
-MzQ1NDQ3NzY0NTc1NTIzNDI2MzU3ODQ1MzU3MzI1OTU3NTU5ODc2MzU1NDUyNTcz
-ODw5NjI0NjMzNjMyNDM2NjM1ODg2ODs3ODY3NjMzNTY2Nzo8Ozk4OTU1NTk7NzY3
-OTQyMjU0Njg4ODc3NzM7NTQ3NDY2Nzg3Njk7NzQzMzM3ODU1NjU3Njg3ODg6OTo4
-Nzc4Nzo7Njg2Njg1NTY1NTc1Nzc4Nzg2OTc2Njc5NjU6Ojg6NjQ2NTM2OTU0NTQ4
-MzU1NjY0Nzo6OTg4OTs5ODg5OTo2Nzc4Ojg3Njg6NDU0NDo6OTc2NTU4Ozk3OTo4
-NjU2MzQ1ODs4Ojk4OTc6PDs+NjM4Njg5NjQ1Nzk1NTQ4OTk5ODY1NzY2NzY2MzM0
-Njg4NTU5Nzc2NjU0Nzk6ODUyMzQzMzQ2Ojg3OTUyNTg4NzY4OTg4NjY4OTc6Ojc3
-OTY3ODs2NTc3NTg2Oj08ODU1NDg7OTc3ODs6ODg5Ojg1ODtBZX5sd36enqucYD08
-Pzw8Pz44Ozs5OkA+Ozo4ODQ3Nzo9Ojs8Oz08OTc7Nzg/Pj09OjY1Pjs6Oz05Ojs5
-Ojo9PDw6Pzo+Oj1DQEBCOz07QkFBPzw9OTs7PDxAQkFBQEFAPjo8Pj9BPTw6Nz0+
-PkJDOzs8PEE/QEBERU1ERENBQkFAPEJAP0JFRkVJQz4/REtERE5HRExMTE9QTExL
-Sk5LSkxLSkVLTUxOUVZOUE9LSklKTktNTUhHSU1KSFBSVFhTVlVTT1BSS0xUVFVX
-UFBUT1FZWFNVU1BRT1xLWIa5ydTc4eTm6Ojq6+tcWFhhYFRWVVdYW2FcVlVXWVhS
-SUlNTU9UWFdST1ROUEpMTEdGSUpJTEJGR01LSkhJREdDQURFRkFEQUZFR0NAQkFE
-QkM9Pj5EQj1FQEBEPTk7QUFGQ0M7ODw7PTg4OTw8Ozs7Ojk/Ozk8Oj8/Pj08QT49
-PD48PUI/Pz08PDg5Ojo4Ojc2Ojs6ODc4OTc0Nz48PDg4PDU3Nzg4Pzs4PDw6ODo5
-Ozw8Pz09PTs+Ojk6PUA6PDc6Ojk5OT47QDw3OjlBQTtBREE7Oz07PD46Ozo5PTo2
-OTxAPD9DPT48OzxCPDk3ODg4OTg1MzU3OTw8Pz47OTk8PDs9PDxBPDs2ODw8PDk3
-OTQ3ODc1Nzc5ODg7OTk7PTc1MzY5Pjo4Nzg2Nzo4OjU2NDs6Ozo3NzU4Ojg2Ozg4
-Ojs3ODc1Nzg6Nzg2OTc1Nzg1NzY2Ozc7Nzc0NTQ1OTo4ODc3Nzc7NTQ2ODQ0MzQ2
-OTk3Njk1NjU4Ojs8OTc3NDY5ODc1Njc2NDg3Njw6NTQxMTQ2NTI2NzQ3Njc4PDU1
-Nzc4ODQ0Njk4NzU3NzY0NDYzNTUzMjMzNTo2Ozc2NTMzMTM1NjMzNDg3NzY1ODQ2
-NTQ3NDM0Nzc1Njg5NTYzMzU5ODc3NDU4NzU4ODQ1OTk3ODc0MjE0NDc6NjU3ODs5
-ODg1NDU1NzU2NTI1NTw5OTg5Nzg0OjI2NzU2ODY0MzM0Njc0NTY1MzIzNDM1NzQ0
-MzY2NDUzOTQzNjY1ODk1NDE1NjUyMzUzMzQ1NzY4NjU1MzUxMzIzODQ2NTM1ODQ0
-Nzc1NTc4OzY3NTU2ODk3Nzg1MjU0ODYzOTs4NjY3NzY0NTY3NjM0ODg3NjQ4NzY4
-NTU0Mzc4Nzc3OTs4ODc3NjQzNDg2NDQ3Ojg1NDQ0MzY4OTc3NzU5ODQ0NTczNDM0
-NDY0NTY5MzQ0NjYzMTk2ODY2NDc4NTY3NDg4Nzo4OjY2Njc1NTY3OTc2ODk2Njo5
-OTg6Njc4OTo4Njg5MzY3NzQ5NjY4NTY0NTY3NTU0Nzg4NDQ4UTw4ODU5NzU1NTc3
-OTU2Nzk6PDs4NjY2NDM4OjU4Ojc5ODc5NTQ4NzY6Ojk4OTk4Njk5Ozc4OzgzNTg1
-Njg2OTk1NjY4ODY2Ojc3Njg6Nzc3OjU4Ozc3OTk9OTk3OTg3NTo7Nzg0NzYzNzY2
-NDY1MzQ2Nzc0NTc5OTw7TEQ7Ozk+OTk7Nzg3NTc3Njk5NDc3Nzc5ODc2NTo1ODk5
-PDc2OTs3Nzo6QVB2aGVwboWInX9FPzw+Pzk5Oj08PDw9PTs/PTw6Pjs7Ozk4Ojg8
-PDw5OTY2Ojs8Pz8+Pjw4Ozc5P0JAOztCREI6OTw1Oj0+QDtAPD0/QEY9Oj9EOz07
-Pz89PT48Pjw9Qj9FQz49Ojo7Ozk+QkBBQT08O0I8PkJCP0NIQkU9QERGQ0dJP0NG
-RkNDQkhDQUFFQ0VGSEVGRUxIQEhJSUtNTUdHUUNKU01JTVNQTExLTU1JUlFNTlBP
-T0hFTlJLTk5UUFFSSE5PT1FHTVVQUklJUE9PTVFRUUtOVFFPTEpSgbrL1Nzg5Obp
-6urr61ZYWl5iXmBbWldeWl1NTVRKTVJTVEpMSk9STUxPVF5YUUtOSEZISkdFSkNK
-R0hBS0tEQ0dGREdFQkVFQEBERD49PkQ/Pjw+PUJBPj5CQkBEQUNERUJAO0E/QDo6
-Qjs4Oz08QD47PkE7QDg6PD4+PTpCPDs+Ojk+QT89PkJBPDw8Oz05Nzg6Ozo5Nzc1
-ODU2ODs5ODw5Ozc6Ojs4Ozo6ODg5OzY4OjY6Oz05Ojk6Oz85OTg3PT1AQj47Njs4
-OUU+Ojg/PT5BQT07Ozs6Ojs7PDo9QTo5OTo+PEFAQEA7OT4/PDg5Nzo5Ozs5Ozg3
-Ojo3OT07Ojk4ODo8Ojw6PEA/PDk3NjczNjg3NDg5OTs8OTk6Ozw9Nzc2Njo7ODo6
-OTk4Ojs3Njk4Nzw4NDU5NjY1Njo6ODg5OjY5Nz08PD07OTw5OTk6Ojo6Ojg2OVFO
-ODg2MzM3Ojo3Nzs6OTY1MDc9NzY2Ozk2OTw5OTs8Ojc5OTg6OTQ5OTo5Nzc2NDQ1
-Nzc4Njc3NDQ0Njk5NzU1Nzo4Ojg4OTc2NTY0ODc2Njg1ODU0NTk3ODQ1NzY0Njg2
-NjY1NTgxNDQzMzU0MzY2NTY2NjU2Njc2Nzc2OTg4ODczNjU1NjY3ODg1NzQ3NzY2
-NjU3NzY1NTc2NjU0NTQ3NDY2NDQ1NzY0NTAyNTY1NzIzMzU3NDc1MzAzNDMzNjcz
-NDU0NzM0NjU2ODo7OTk4OTU0MzQ0MzIzNTU1NjU3MzQ1NzQ0MzY1NjU2NzY1NTU0
-Nzc4Njc3MzMzMjI1NTM3NDc0NDQzODM2OTk0Njo2NzU0ODY0MzU1ODY3ODg6Njo7
-Oz03NTM0NTY3Nzc3MzM1NjY1NDU/Ozg3NjQ6Oj02NjU4ODc1MzU1ODgzNzU1NTQ3
-NzY2NTU2MzIzNTc1NDU3NTk6ODY0Njg4Njo5NzU1NDU0NDM2ODc5ODk4OTc5NjU3
-NzY4NzU1Njc2NjU4NzM1Nzc5NTQ1Njc1Njg4NjU2OTo2OTc4NTMxNTQ7Pjg1NTc4
-NTY3MzY5MzU5ODdVNjY2ODc2ODc0MTQzNjU5ODk6OTk1NzY1ODk1NjQ0NjU1NTY2
-Nzc2Nzc4NzU5NzY2MjM3ODs6Nzs3MzQ1NTk4ODY0NzY2NzY4Ojk4NzY4ODk5Nzc7
-PDw4OTs3NjY0Nzk2MjQ1NjY3ODk0NTc4Njg2NjQ1NjY1Ojs3O1mFclI9Pjs4PDg0
-MzY0NzY3NTY2NjQ2NjY3MzQ6NzQ0Nzw7Njc6OTk5OTs/d39rdHhufZCFXEI9ODw7
-PDw7PDk7Pj08Ozo4PT06PDw6OjY6PEA9Ozw8Ojs6Nzg6Oz9AQj49Ozg2ODw9Qj4+
-QD07PEE9PUA+QT89PT47Ozo7PT06Pj4/Q0BAPT9DQkNAPD0+Pjs+Ozg4Ozs/PD47
-Ozw+PEBAQkE8P0A/Pz4+Q0JDQkA/SERBQENHR0RDQz8+RklGSEVGTElLTlRLR0tH
-S0xLS05LS1BRTklTT0lJSEpVUlNSSEVMSEpKTVBRTU9OTEtVVVNOTk1NU1BRU09Q
-TExKSkxPUlJOTk5PR0p9uszV2+Dj5ufp6+rrWFtaV19nX1xZV1hXXVxbVVVTUVNT
-XFpZVFZRVk9TVlVST1BUTkZLR0ZERUhNS0pGRkpGSElHRkdFREJCPj9CQ0BBQ0RA
-QkNCQkdCPUE8P0NCQzxEPzc+Qzw9QEJCRD49Pj8/Pj04Nj06QUJAQT87PTs4PT9B
-Pzw8QDw9PTw7P0I7PTU4Njo5OTw/Qzo2ODhBOTs7OTg3OkE9Ojg9Pjs4Nzo9Ojg5
-Oz05Ojo4Pj47OTo6ODU1Njo6Ojg7PDs6PDw9Oz8+Ozo8Ojs4Oz4+QEBCQDk+PDk4
-OTg7PDs9QDw7OzlBQz1CQD47Ozc6Pj1BPj8/PDo7ODk3ODo5Oj0/Ozw8OTk+PDs6
-PTs8Ojg9PTw8ODg3ODo8Ozg7ODg4NTs7Oz07NzY1OTY4Njc0Nz04NjY2Nzo5OTk4
-Nzc4Nzo7Ozo6Ojs7OTg3OTg3NjdXbV47NzY4NjY3Ojg2Nz01NjU3Oj4+OTo6Njo5
-NTY4Ozg+Pjg3ODg2NjY3OTo2NjM3ODg5Oj08Nzc4NzU0NTg4Njc3Ozk5Ojg4Nzs8
-ODk4OTg3Nzg4NzUzOTY1MzU3NTc4Nzg0OTU1Njg0NTU1NDQ2MzM1NTU4NTM0NjM3
-ODg1NjU3NTc5Nzc5Nzg1NTY1Nzc2NTY4OTo5Ozk6ODY0NDY1NTY1Njg3NzY0NjY1
-MjY3NTQ1NTU1MzU1MTQ1ODc4Ozo5NjI0NjQzNjYxMjM3OzY1Njc2NjUzNjc3NTM0
-MzY3NTU1NDI0NDU3NzY0MzM1Nzg3NDU3Njg3NDM0NDY3NzY2ODs1ODU0MjU1MjI3
-ODs7NjU3OTk3Njk3Nzc1ODg4ODk6Njg7Ojg2NTk3Njg3NjU4ODk4NjE0NTg2Njg3
-Nzk1NTY4ODY1NTMxNDc4OTo0Njc2NTU3NzU2ODY2OTo4NTYzNDk8Nzc1ODc1NTc0
-Mjk4NjQzNTc1Nzo4OTY4ODY0Ojg5OTc3Nzc3MzU4ODY1MzU1LzU2Ozc1ODc0NDc2
-OTg3NDs4NTU2NTQzNjU2OTg3NzY2Njc0OzYzNzY0MzU5Oj43NTQ1NDM0Njc0MjI3
-PDk1MjQzNTY6Ojk7NjU0MzY2Nzc5Ojg0Njo4Ojc0NDM2ODg3OzY2Njg6OTg4NzYz
-NTQ3OTk0Njc3Nzg6PDo7Njg5Ozg2Njc4PDg2Njk0Njc0Njg5NTQ0NjM0ODg7Ojs5
-OTc1NjY1ODY1OTxPlaOeaD04ODg2Nzk0ODk2ODk4Nzc6OTg4ODY0NzY1ODc2Nzg5
-Ojo5ODo6PFd2Xk1UTVx5hnpENzY4Nzw6PTw4Ojg5OTs5Ojs9ODk5OTc4Ozw8OjU4
-OTo6PDg5Oz48PT06PUA9OTk7O0E8PD09PDo/PDo9Oz06Oz1BP0FBP0I7Nzg6Ozk6
-PT8/QD5EPTw6PTw8ODc5Ojw8QUJEPUFBQDw9Pj87Pz5AQT1BQ0FCQEFBQkZFSkZF
-S0ZGRURGSEdFTEpOS0dISkxRUUtMR0VMSUpRTUdLTE5TTUpNSEtOTE1PUlVNS0xK
-T09QUU5LTVBKSlNQT1FNU1VPUlVeW1VQS0tNTVFPT1JTT0pKTXy8zNTc4OPm6Onq
-6+pVUVFYXVxkX1tVVVhYUlNPVFhUTlhgU1hQR05PT1FST1JUUFNWTktIRkdKTE9I
-RUxITEpDQkE+QERDQEFDREZGQT5CQ0JCRkJCPkRDQT5FQEI9PT9BRERHRkNKRD4/
-Q0NCQkdFPT07Pzw8OUA/PkE9PkE5PkJGPDw5Ojs+PTs7PT07PDs7Ojc8OzxDOzk5
-OT07Ozs3NjQ0OTo7OjY2Ozs6Oj5BOjk5OT06Ozo6Pz47PDw5NzU1Ojs7PkE/QD47
-Oj0/Oz47ODw+OTs8Pz4+PDo5OTo7ODc9PT06Pz0+PT07OTo8PD08PTo4Ojk5PDg2
-OTo3Nzk5ODY4OTw5Ojs7PDs6Ojk6PDo4OTw7Ozo6Ojk+Pjk2OTo4OTg5ODU7OTo6
-Ozk4Nzs/NzY3MTY3Nzg3NjUzNjU2ODc3Njg6Ojk5NTc2NTc5OTg4NDM2ODpGRDI2
-Njk7ODk4OTo5Ojs1NTs0NzY2Njk3OTs8NTk7Nzc5Nzc3NDU6Ojc4Njc6OTY4OTo8
-PD06NDg5OTo4Njc9Ozg4NjY2NTU0ODw6OD46NTU3Njc3Nzk4OTk1Nzc3OjY0Mzkz
-NTU3NDQ1MzUzODQyNTc1NTg0NTY2NTU3NjY1NDQ4NTU3Njg4NzY2Njg4NjQ0ODc4
-Ozc2Nzg3NTU2OTk1NTo5NTQ2Nzc4ODg3Njk1NzY2ODg5NDc3NTQ1Njo5ODg4NzU0
-MzQ1NjY0NTUzNjQ1NDY4NjY2Mzc2NDI2NjUyMzY1NDM1MTIyMzY0NDQ1OjY5NzY1
-NDI3OTk4NDU4OTg3OTMzMTI2MzQ1NDM0Njg8ODo6ODc4NTQ2Nzc1OTk2NDU1ODw+
-QTs4ODY0NDQ0ODc3NjE0NTU4NzU3PDg5NzU3NzQ4ODY1ODY3MzU0NzMxMjU4NTU2
-OjU2ODg3NjY4OzU3ODszNTc5OTg+NTc5ODc4Nzs1NDY3OTg1NzQ2PTw6Njc0NTM0
-Njk4ODk4Ojs2MjQ2NTUzNjk4Ojk4Njg1ODo4NDI3NzM1ODk3NzU1NjQ2OTc2NjY3
-NzQyNDg1ODY0NTo3NzY4MjU5PTY2NTU3Ojo7MzQ1NDU2NTY1NjY3NDQ1NDQ1Ojo4
-ODo4Nzc1NTc1Ojw6Ojg0NTc5Ozg4MzU3OTc1Nzg1NDc4ODo2ODg5Ojg0NDc1OTw4
-ODc1NDIyMzY4Nzk3NTUyMjU4Nzc5PDg2NTc6NzU0ODQ6RJq8pYdTOjY3NTc5OTg3
-ODo3NTY2NjQ4NzY3Njk4Ojo6Ozg5Njg5OTk4OTtKeXdNPTs5P0tKQjo3NDg2NTY4
-Njg3Ojs7OTY6Nzo5Ojs5Oj0/PDY4OT07Ojg4Ozs5Pjw+PT5AQTw6P0RBREk+Pjg7
-Ojg4Oj0/Pzw+RD47QkU+Ozs7Ojk6ODk6PTw5PDs9Pj47OTk6OTo4Ojw7Pjs+Pz1B
-PTw/PD9APUJGS0U+Oj9DQkVGRkVDRERGTEZISEhJRUZLTEtNS0xSSklKTEpMS0ZL
-T01LSU1KTU1NTklKUE1QUE1PUE1JSk5TVFJOUlBQVlFPTVRSS1BSVU5NUFJUTk1O
-TlJUVFJVVUpKS0pOfr3M1dzg5Obo6Onr61dXUlJUXl5cW1dZXVxaVlNRTU9QUlNV
-UUlLTk9SUlRSUU5OU1RTT1BNS01IRkJHREhCREpCPz9CR0hHRUBLSUFCQT1ARkdK
-Q0NDREVCQz1BQD49Pj9DQ0NEPz4/RkBCPkNGPjw6OD08PT9FSUNBQTo6PEA9QDxE
-Pj9BPjg5P0A+P0E+Ozw4ODg9PzxAPDg6OTg7ODo6OTY0OTw8Ozk5Oz08ODg9QD06
-Ozw7Ojw9PDk8QT04Njo7PT49PTs/PDw7Ozk9Oz0+Oz1BODs9PTw9Ojo3OTo4Ozw/
-QUE+QDw9QD43Ozk9Pj08OTo6Nzg4Oz47Ojg4NzU0NDc4PDs4Nzc4OT06Njo6PTk4
-OTk2NTc0ODs7Pjw7Ojo5Ozk4ODw6Ozk5Pjw2NjY4ODY7Ojg1NDc1OTg2NTQ4ODY5
-NjQ3Nzc5OTk2Njo7ODc1OUA5MjM1OTo8Njc6PDs4Ojk6Njg0MzUzNzU4OTc4ODc8
-ODY1ODg2NjQ4ODo3NTY0ODU1Ozs7Oj1CPTg4ODg1Njc6ODc2NTQ6OjY2ODY3NzY1
-OTo5ODc3OTg4ODc1NTU3Ozk2OTY4NTMzNDY3NTU3OTc4NjY2MzY5NzY2MzEyMTU0
-NTU0MTM5OTY1Nzs6Ojs2NDQ4Ojg2NjU0NTg3NzQ2Njk0MjI1ODg0NDU3NTg5Njk2
-NDU2Njc1Nzg3ODc1Njc0NTU4ODc1NDQ7NjY0NTM0NzY0MzM1Njg2NzU2NDg4NTQ2
-ODYzNDQ4OTM1MTQzMjI2NzYzNDI0NTc4Ozg3NzM0NjY2ODY3MzE3ODY1Njg4NjQz
-Nzk6OTQ2ODY3OTgzNDY1Ojg1NDQ1ODc4ODk2OTc2NjQ5NTQzMTU0NDQ1Njg3NjY1
-Njc4NzY2NDk2ODY5ODY2NjUyMzc0Nzk5Nzc8PDs1Njs7OTs6OTc3Njk1Nzk6Ojc3
-NzQ2OjY0NjY3ODY5NTU2Njc2NjU2Njk1Njg3ODY2Njc1NDc4NDU3NzU3ODc3Nzc4
-Njc6OjY3Njc4ODY6QDc1NjU4NzU0NDY4NTc2MzU1Nzc2ODo5ODYzMTI0MzM2ODk2
-Njc3NDQ1Mjc4NTY2NDU1ODU0Nzc6OTk2ODU2OTc1Njk2Njc1Nzg2Njk4ODg5OTc2
-Nzg7OTc7OTY4OTc4Ozw8OTg4Ojs5ODw5NTU4ODs6Nzo9Ojs5NTk4OD05ODo1Ojc4
-NTk7NzY1OzlEhKGdg0w8Ojs6Ojs3OTk6Njg5ODc6OTg2NjU3Nzk5Ojk7ODk6OTg6
-PD09QmRwW0A9PD4+OjpAPDk7Ojk3ODg4NDk4Nzc4ODk6PTs4Nzg4Njc2OjtBPDcz
-ODg7OTw8Pj49PEA+PDs7Nzo7Pz9BPT9APTw/QD1ARUA/QEM/P0A/Pz0/PT07P0M+
-PkA/Oz4+PTw4Ojo7Oj05PDw7PT05Ojs+OTk8QUNEQEJBRUZJREE+QkZERkJBQ0VE
-RkNBRkhFQkVJSkpNTUtEREdPT1JKSExMTkxIS0pLS0ZLTVFOUlBQS1NRUVJXWE9W
-U1ZQTlNPUU5KTk5QVE9NU1dYTk5PTE9YWVdRUFNdWk5RSkx8vczU3ODj5ejp6uvr
-U1NVVldeXFhWW1dXWFVZWFFUT1NUTVVST1FPTlNWUU1MTE5MSktQSUlLSkRGQD5D
-SEdEQENAQ0ZKTUxNRUZHRD5EQUJBPkRFSUpGRT9BQj9APT1HSUI7QUVCPUZHPUNC
-Qj8/Q0Q/QjxEREVCQUFAQTs7PTs7PTs+Oz47Ojg6OTw+PD8/QEA7Njg6Ojw6PTg5
-ODg8OzY0NTc8Pjs2Njg3OTk5Ojk8Oz06Ojs+OTs6QD47Pzs5Nz06Oj49OTw4PUA6
-PTxBPkA8Nz0+Ojo6PTk3OTs6ODk5OkQ+Pz08Pz06PT07Ozo8PDk8Ozk5OT1BQT88
-OT06PTk8NTc+PTo7NjU2Nzs7ODc2ODk5Nzg3N0A8ODk4ODg1ODo8Njg3ODs5Ozs9
-Ojo2Ozk0ODk7PTg3OD06OTk0NDQ4Nzg4NTMzODs2ODw4ODc1Njo3OjYyMTE1NzY8
-Ozo3OTU5OTk6ODY2ODc3ODo4ODk5Njk7OTo6Ozk4ODk8NzYzNTU1Nzo5PT07OTo6
-Nzk6ODU2Njg5NjQ3OTs3OTg8Ojg1NTg4ODo2ODw7Ojk4NTQ3ODs3NTc5Ojo4ODU0
-NjU2NDc4Nzc7QTw6ODQ1Njg2NjM2NDM1NTY4OTo3Nzc2Njc8Ojk1NzY2NTU0NDc2
-RDc3NjQzNTI0Pzc2OTs1NTQ2Ojs7OTk3NTY2Njg4ODc0ODUzNTk1NDU0MzM1NDY2
-NTQ0NDYwMzU1MjMyMjIyMjQ0NTg5NDM2MjIzNTYzNDIxMDMyNTQ0MzU1Mzc2NTc1
-NjMzNTUzNTY0NTI1NjQ1NDQ0MzQ0Njk4NTc1OzY0Mzk4Nzo2MzY6OTc5NzY6Ojs5
-ODo8OTU1ODY4ODQ2Nzk5NTc2ODY3NjI1NjY2NDU3NTg1MzQ1NTo7OD06ODcyNDM4
-NzY4Ojo3NjQ2NjY4Ozo5PDg1NTU2Njc3NTc4ODg2NjM4NDU2ODQzNTU1Njc4Njk4
-NTo0NDU6NDk3Ojk2MzQzMjU3ODs3NTg7OjU1Ozw6OTg0NjUzOTUzODY5OTc5Nzk7
-PTs7OTY1MzI0NzY4ODo2Nzk5QDk2Mjc7NzU1Njg3ODo3Nzg3NDY0NDY2MzM1NDU2
-Njg1Nzo8OjU2NTc4ODU5OTY6Pj41ODY7OTc3ODY2ODg3ODg4PTo4Ozk3Nzc2NTc4
-ODc4Nzk3NTg3Ozk5PDc2Mzk4Nzg3ODo6Nzc7OTw8OjpBY3hzRDw8OTMzNjk4Nzc3
-ODg7ODc4NzU2NjY4Nzc4Ojo5PDk6PD0+P0Fid3VIOz47Ojw5OTo4PDo4PDk8Ozc3
-ODg5ODc8ODg8OkBCRjk2NTY+Ojs+PkA/PDg8Qj46PT47Ozw+PDs7Oj0/Pz5DQkFA
-RENFQEJAPjw9P0BBPzw9Qj47PT5AQDk+PTs9PT45Ozw7Oz08PD07P0A+Oz87QUM9
-OkNCQEVCREFFQkdBQ0lER0dERURMRkZCR0hGQkVJSkZKT1BRUEhFRUhMTUtGRkxL
-RkhGUE9FRUdRTk9WVEpLS0xTVVhXTlNUUVFUTEpVUE5ITlRQTVRVVVBOU1FPVVZW
-WF5WWFtWUVBGSny+zNXd4ePm6Orq6+tRWFJYXlVXWVlWV1VXWFZVVVBUUU9MTVBS
-VlVZT01MS0lKUlJOTVNSS0VER0VKREVHRkZGSkRARUJAR0dKRUZIRkhFP0RDSEdD
-RUNDREdCQUA7PD8+REU+RENCQDxGQ0I9QURARUU/QD5APj9CRkdCPjo+RkA5OjtA
-Ojo7Ozg0Oj48PT4+Ozs7NzU6PTo9OTs7Ojk5ODo8PDs5ODk5Pjw3NDY6Ojk6QUI9
-Pjo7Pjs6Nzw8Ozo7Ojk7OzxBO0A9Qj9CQz09QT9AOzw8Oj48PkA5Ojk7Ozo/PD46
-O0JAO0A8OTw8Oz1BOzo/Pj07OTw9PDw5Ojg4Nzg9PDs+Ozk6OjlANzY5Nzk7Ozo4
-ODk4ODs7PTw4OTs6OTs8Ozg5OTk3OTg6Ozs+OTk8PTs5PDg5OTo7ODg9NDc5NDQ2
-ODc4Njk2Ojg3Nzg1Nj05OTk3ODk0NDg7OTk6Njk2NTU2Nzc5Ojk3Ozk6OTc8PTw7
-ODo6OTk7PDk0NjY1OUA4NT06PD07PDY7Ozc6PDo1NTY3ODg4Njg4PTs7NTM4OTY3
-Njg0Nzo5NzY3NTU1NDIzNTY5NTc6Ozk4Ojk2Njw5OTg4Ozk4NzU4Ojc3ODo2Nzc5
-Njc4Nzg1NjQ3Njk2Nzs5ODk1Njc4NzxEODc5ODczNjo5NjY3ODo3NjQ1PDo4PDw7
-OTk7NjU2Nzc3ODY2NjU1OTo4NDU3NzQyNDQ1NjYzNDQ1NDU0MjE0NTMyMzQzNjc4
-NTYyLzEzNDU1NTc2Ojg5NzY3Njc3NzY0NTc0MzQyMjQyMzMyMTQzMjMxNDI2NDQ1
-Njk3NjQ2NTY0NTM1Nzo2NTU2Nzs5ODg1NTY4NTU1NjY6NjY7OTg4Njg3ODo2NzQz
-NjY2MzUzNTY7Ozg4OTk3Njg4Nzk0MzU1ODk4NjY1NDU2NjQ4Ojk7NzUwMC80Njc3
-NDQ3Ojg7OTY7NjY2Nzo4ODk4Njk7OTY4Nzg3ODg6PDk2Nzg3NDg5OTc6OTo2ODc5
-Ozo5OjQ4Njo5Nzw3Nzc3Njc4Ozo2Njs7OzU1OTg1ODY0Njc5OTY1ODg6OzU0NzY2
-PDs2Njg4MzQzNTc5MzQzNjUzMzg3Njk6ODY3Oj86ODc5OTg3Nzs4ODc3ODk6OkM3
-NjY5Oj05OTs1ODs3Nzk3Nzg1NDQ3ODc2Mzc4NDY2NDI1OTs6Pz04ODc2Njg2NjY1
-NTY2ODc6OTg6PTo6Ojk5Nzg6Ojw8Ojc6Ojo8Njg2MzY2Nzk3ODg3Nzs+QTg4Pjw9
-VotwXj86Ojw/PDxCQT08Nzc6PTk6Ojo5Ozg1OTo6Oz1JW1FNPjk4Oj08NzlARD4+
-Ozw4Njg8Ojo7Ojg7Oz07PDk9PUJARUI9PUBAOz4+Pz08PDw/PDo/QEBCQkM+OjtB
-QT8/PkA9PDs6Pjw6Oz89OTtDRERFPUFBPj48P0BDQEJAQUZESUhEQkVERUhLTEtK
-TExHSk1NS0tRTlNSU09PTlJQRkVJUEhOQklNUVBOUk5PTE5PSElHTFRYWFlVVlVQ
-VE1OSU5PT0xQU19dWlZUV1ROT05UTlJYWVhaVlNQTU1VfL7M1t3h5Obo6urr61dW
-VlhQUVhRU1hVU1JSUFlUVVJQVVJUVldRU1ROVFBNS0tGSUlSVVBLTklKSUBFRklH
-TkxGSUhHSURGP0RAP0Q+QkVFPD1JR0dBQ0JCRERDQ0FGRkE/REVERkZIQ0FBRURB
-RENCREY/QkNDPzxAREI8PT8/PDk7Pz06OT5DPDo3Ojs8QDo8Pj45Ojg9PDw7PTs6
-OTk7PT46OTk3PEk/NzU1OD4+OzxAQEFAPTc5PDw7Ozo7PDs7Ozs7PEM9Ojs7Oz09
-Pz48Pj9BPDlAQUFAPz45PDw6Pjw6Ojs+RURBOjw6PDs9OTk4NTU7PTg3ODg9Ojg4
-ODo6OTg8Ozs6Oz48Ojg3Njk5PDY1ODw5NDk2Mzk2Njk4NTg8Ozk9PTU2Nzg6PDk5
-Nzk2PDs5NzU4Ojc5Ojg1Nzc5OTo4ODc7OTg6ODc2Njg3Nzc3Ojc7OTc3OTc6OzU2
-NDM1NTc4OTw2Ozc2Nzc7ODs7Ojc1ODk2Njo5NzY5Nzc3OTg3PTc7OTo4ODo4Ojk3
-Nzg6ODc4Nzc6Ojg4Ojs3Njk7PDg2ODg3ODY1Njc8NjQ2NjI2NTg1NTY0NTc4Ozg1
-NTY2Nzc3Nzc7ODY3Njc1Njg5Njo6NzY5OTk5Ojc1NTQ2Njg1OT46ODc2NDM2OD83
-NjY3Njc3NjQ6ODQ3OTc3NzY2NTY2ODc2NzY3MzU0MzYxNDU2NTU2NTUzMzYyNTQy
-NDQ0NTQ0NDY2OTs6NzU2NjY0NjY1MjU5MzI0NTQzNjU0NTY1ODYzODc3ODU2Nzg3
-NjQ3NTQxMzQzNDM2MzY4Nzg5NjU3MzQ0NjQ1NzY6NjIxNzc4NjQ1Nzk4NTk6ODU3
-NTY0MzI0MjQ1OTc5Njk4NjU2ODc1Nzc2Nzc3NzY2PTo5ODYzNTU3Nzc2NDQ5NTc4
-OTk4ODg2NDUzMjg2NTQ5ODUzNzQ2NjM3Ojw7ODg3OD05Ojg7Ozg6Ojo7PDk1Nzg4
-ODc3OUA2NjY5Ojs5OTg5ODc2ODk3Njg2NTc3NjUzOTs4NzY7PTY5NDMzMzg9QD46
-ODw4ODk6ODw4ODk4NzM1OTo3OTY0MjU3NzU5Ojg4ODk4ODc5NTQ0Njs0NDg4ODg4
-ODo6NTw6Ojk4Ojg6ODc5NjU5Ozo8Qjg3Ojg5ODk2NDY4ODg1Njg6PD03Nzo5Nzc4
-NjY3OTg2Nzg5PDw6Nzs5ODo4Ojk5OTY2Ojw8Ojg2OTs5PTo5ODg6Ozg5ODc1Njc5
-ODo7NjY1ODY3ODo3Njg3PFJXTz5APFWCgF0/Oz0+PTxAQDw5Njg7Ozc6PUA7PDs+
-PT06OzlCWWhzbU48Pzw5Nz49PDg5OzxAPTo4O0A8PTw+OzxBQEA/Pj49Oz07PT06
-PTs4PT08OTo8PTw+P0RFRUVBPj0/PD0/QEFCPDs5PD1APj08O0I/PkFCPTo+Pjs2
-PEBAQD8+PkFHRUNESURCRkpKRklMTlJKSEpIREZLTFFPSE5RTEdHSkpKUVpcT0xE
-Rk1QUlFTSk5OSkhKTExPUU5KTFFQTU5QU1JWT1VQT1ZYWFVXUFBUUk9TXFRQYFxZ
-WVhcV05MTU90u83W3eDk5+jr6ezrW11QVlhaTU5TV1lVVVFRVVhVUVFUVlVRUldT
-U09aUEpLSktISEtJUVlPSEpKSEVJRkRDRkZLSUtFS0hFPkE8QklFSElIRkRFSUZF
-QEA+QkBDQkNERUM8Qj9DSUM6QD1BQTw/P0Q+PkA/RD9DOzk6PDk6OTs5PEA8PD03
-OTo9Ozs6PTs5OTk6OD09Ojw+PTw/Pj48PTo2Ojo4ODo6QUA3PT5APTw7PDs+Ojw7
-PDo+Pjs9PTtFRkFCPzk8P0I8OD0+PDo7PkBCPz49Pj1BPDtAQDw+Pjs/Pzs8PEA/
-Pjw9PUQ/Pz09PDk4NjQ7Oz48Ojo7Ojk7Pzw+Ozs9PTxBQjo6Ojo6ODg4Ojw7OTc9
-Ojc5ODU4OjxCPjs5OTw7Ojc3Ojs6Ozc0Nz06OTc3NzUzMTk5ODg5OjcyODU3ODc0
-NDQ2OTc3Ojk4Ojo4OTc4Nzg7PTo5Ozg2Nzw5OTc1NTc3ODk0Njc5ODk3NjY6Nzk8
-OTo3NjU3OTg7Nzc3Ojg3ODs5OTs5Ojw6OTg7Nzg5PDg2Nzc5Ojk8Nzo7NjU5OTg3
-NjM2Nzo6OUM/Ozg4NzY3Nzg3MzY0NTY2OTY2ODc0ODg4ODY4OTc2NTc1NjU3Njo7
-OTg3Nzs4ODo3NTc1NTc6Nzk4NTY3NTk5Njg4ODg6OjM1NjY3Nzg4NTQ0Nzg3ODk2
-MTU0Njc2Nz04NjY2NTU1NTY1NjY2MjY1ODUyMjQ2Nzc4Njg1MzM4OTU0NDY3OTUz
-NDczMjM4NjQ3NTc4NzU0NTg3Nzg0Mzc0NjMyNDQ1NDY5NTg5NTY1Nj05OTc4NzY2
-Nzg3NTI1Nzg2ODg4ODg6OTY1OD86OTQ0NjU1NjU0MzU3Nzg2NjU0Mzc5Nzs7NTU3
-Njc3Nzc2NTIyMjI0NDQ1NDI1NTY3NjQ2NTk3NzQ3NjU3MzQzODo5NjY0NzQ2Ojk6
-QTk3OTk1NTg0Mjk6NzY1Nzk3Njo4NTU4Ozc3Njc8Ojo7Ozw6ODk7ODs4NDI4Ojg1
-NjU3Ojs3ODY3NjQzNjc6NTk4OjU5ODU6ODk7Ojo4ODk8Ozs4NTY2Nzw2NDQ1Njc3
-Ojo6NzQ1ODg/OjUzMDI1NTo5OTk2NjM2Ozk4Njk7OjUzNDg3NjQzODY5OD07OTk7
-PDk3NzU4NjY5PDk1NDg4Ojo2Nzk5OTo7PDs4NjY4NjU3ODQ2ODk+OTk4NTk4ODo6
-ODQ2OTg5OTk6Ozo3OTw5ODg1Njg4Njg1Njg6PT07PDs6Ozs2OztQeIqAV0lZgYxs
-SDs7Oz07Ojs6Nzo8Ozs+PDg3OTg5PDo8PTk8P09rfl5VREE7Ozs9PD0+Ozo7Ozw5
-QD49PTg8O0A5PDs8PEFAOj1BPj08Oz0+PkVAPD4+PUA8QkRBPkA5QkE+QD89PUFC
-PkJAPj4/PT4/PUE/PUZJPz4/Ozw7Pz89QDo4P0M+QEJHRUVISEVGSEdFTUlER0pM
-TEVGRkxLR0xPTllPRkpHS0xJSEVLTkxLSURHSEhRUlBJTElNSk5RTUdLSkxPVFNQ
-VlZSTkxNU1JRUFBRU1NSUlNSVldVV1pZUVtdWFdVS3u+zNTc4OXl6Onp6+tbXF9b
-WVhSX19ZWVFUVl1bUlVTWVpXVlNPUFZUV1FOTUdITlRTRUZITUdKSkpKRUFBQUNF
-S01GRkVFR0A8QkZGTUg/Q0REP0NCREJDQ0JBRkNDO0JBPz4/PURGRERERDw9QTpB
-QENEQkdDPz5EQDw8PTw+OD0+P0M9OTw5OTk4PDw5Oz48PT1BOTo6Ojk7Pzw5O0FD
-PUA6ODo9Ozk7Ozw6PkI+Oj09Ojw3Oz05Pj49PDw7P0NFRUFEQUE/PT48Pj08ODs6
-ODs/QD1BPj5AQ0A9PjtBQkE/Pjs7Pz48PT48PUFBPj8/PUE6ODxCPz07Oj0+PDs6
-Pj1APT46PD08PD4/QDw7Ojk6PDo5Ojs4OTo4OTg5Pj06OjtAPTw4Ozs6Ozs5Ojg3
-OTw4Mjc5Nzg9ODo4Nzg6Nzs2PTs3OTc4Nzg2Njk4ODg4NTU2PDo1Nzo7ODc2Nzk7
-ODY3ODY3Njg3NTY6OTc8Ojg8Ojc6Nzk2ODs4NDM4PDg6Ozo6Ojg5Oz08PDk6PTk1
-NzY3OTo3Ojs6NzY4ODo6ODc3Nzc7OTg4NTM2NzY5OTk1ODY7NjQ4Njg4ODk1NjY3
-Njc3NDU1Nzg4Njc5ODg3OTg2NzY4OTk4NTU2NTg5NTY0NDg4Ozk4Nzs2ODY1NTc6
-NTk6Ozs5OzY3NjU3NzM5OTg5NzYzMzY2MTY2Njc2Ojg1NTU6ODY1Njg2ODY9OTk2
-NjczMzEwMzY1NzY0NTQ2NjY2NzQ3NjU3NTU2ODc2NTQ2MzU1NzU1NTY3PDY3Nzo2
-NDM1OTQ0Njc6Ojs/OTk6Ozk4NzY3NDY1NDc6ODc2ODk4OjY1Ojw5Nzg4PDw3MzM1
-NTY1NDc2NTU0Mzc8ODM1Njk7ODc5NzQ3OTg4NzY4MzMzNDM0Mzc1MzY3NzQyODY1
-NDQ3Nzc1NTQ4NDg2NjU3Ojk3ODc4Njg4ODg4NzU2MjQ3OTs3OTk7OTk8Ojc4ODg6
-Ojs6OTo6ODo7Ojk7PD06Ojo3ODY4Ojk3Njk4OTc2MzU3NjQ0MjM1Njc4NDY3OTY8
-OTk0Njg7Ozc4NTw6Njc6PDk4OTg5Ojw7Ojs1Njg2Njg5NjU0Nzg2PDg4Ojo3NzY3
-Nzk6Ojc2ODUzNDg3OTc5Ozo6ODk/OTU3Ojo0NjU3ODQ2OTo9Nzk3ODc3NjY5OTo8
-Ojs3OTs6NTY1Mjc2ODo6Njc5ODg6Ozo6OTs+Ojk9QT48NjY6OTg5ODc5OTg2Oj07
-Njk6PDs5PDs9Ozs8PmabpauclZuEd04+Ozk7Pj49Pzw4Ozw4Ojo6OT09PTw+Oz49
-QD5JbWZRRUA9PTo7Ozg6Ojs/PTw9Oz0/PD46ODs7Oz87Oj1APkA/Pjo9QT5AOTtB
-RkU+QDw8PUBCQEE+RUM+Pzw8Pjw+O0E9Oj5AQD85OjxBPD9EQT5BQTs7Ozw+QT48
-PkJAPD9BQD1CQkJHREZGQkhJRUpFRUhERUdQSklISEhJS05QSklKSkpDRkVER0lH
-SklHTlBVU09MTEVOTElIRUdPT1BPTE1UTkhKVE5QTVBRT1FXVU5QVFVSUlxWWFxa
-W1VcUE9Kfr3M1dzg4+bo6urq7F9fZ2FYVltTVU9PVlpXWFRSUlRTUVpXVk9MTktP
-SkpHSUhNSkxJRklLUU1FRUtLTEJFQkJFR0VCQkRJSEtCSUdOSUA9QkY8PURDSEJD
-QEBBQ0FBQkJBQUBAQztCR0M9PTw8PTw2Nz4+QT09QkRCOzo8PkJBQ0BCQTw9OT44
-Njk8Ozs8OTk5OTg9PDg4OjtAOzg9QkA/Qjw5PDpCQD05PkNAOT9CNzw7Ojg4ODk/
-PT5BQDw4Nj8+QT1AQD05PT08PEI9QD5CQkNBPj0+PUE+OztAQT48PD0/QUNBPkA/
-QUVEPj87QEE/PDs6OTw8OTg4OkFBPz47Ozs8PT09Ozk5Oz4/Pzo3OTo8ODc9Ojk9
-Ozk5Ojg6OTs7OjY3ODs+Ojo9ODY0Oj86Ojg4NTk4OTQ1Nzg9Pj07Ozg6OTg9PDc2
-Nzg2OTw2NTg3NTc3ODgzNDk7OTc2OTY3OTc4NTc7Ojc3OTg4Nzc5PTk6Ojg6Nzg5
-PDo2ODg5Oj1AOjk4ODg3Ojo6Ozw4NzY0PDo4OTg3NjY4NTY7PT42NzY2NjU0Njg1
-Njc5Ozk5NjU2Njg6OTg1ODY0NDMzNDY5Nzc1MzI0NjY5OTk5ODc3NjM0OjY4NjU1
-MzQ2Nzk4OTk3OTU0NTQ1NjUzOjU6OjU3NzY2OTo5NjU4NzYxNTY4ODc2NTQ1NDY4
-NjM1MzM0MjQ0NDk4NTc3NjY1NjY0Njc2NDQ1NjIyNTU3Njk5NjYzODcyMzU3ODY3
-OTc5Njk5ODI0ODk4OTo3NDM4OTc5NTQ0ODY3NjU2ODU3OTg4Ozs3Nzo1Njg1MzY2
-Nzc5NTo4Nzg3NTY4OzU1OTo3PDc4NjU1NzQzMzY1OTQ2OTc2Nzg3ODo3NzQ2NTY3
-NTY1Nzk5ODMyODk7NDU4ODU1Njk1ODc2ODc5OT04NjQ5NTU2ODo7Nzg4NTg3MjY2
-Nzo6ODc2OTk3Njc4ODs6Ojc4Ojg3OTw5OTk6Ozo3ODk7Pjw6Ojs5Ojk7OjU3Nzc2
-Nzg7OjQ5Nzg0NTMxMzU0ODo4OTw8Nzc3NzU0OTU2NzQ2ODg2NDY6ODo4OTg4Ojc1
-NTY5OjY6OjUxNz43Nzw6Ojk5Ojc2Njs8ODo6Ozs5Nzg4ODg6ODU2Njc6OTc5Ozc2
-Nzg5OTg4PDk6PTw5ODs6OTg5ODg2OTk4OTs6OTY5ODY4ODk1Ozo3ODc4ODc4PD4/
-Ozw5ODk8PDk3Njc6Ojg7Ojs7Ozw5Nzg7OTs9Pjo5Ozo7OTtEhZujtritmGlRPzo4
-OT07Ojk2Ojk3Mzo9OTpDPT48Ojs9Qj9BTFtqZkk9Pj49Ojo8Pz0/ODk8PT08Ozw+
-PDg4PD5AQT9FPD08PkI+PDk4PD8/PT1BP0RBQURERTs/QDw+QT9BPTk6Pzw7Pzg7
-PDw9Pz49QEFCREJFQkNCRjw8PDo9PT09QUJHQkE+PkBERkdBQUdGRUZKSEtER0pK
-S0VFR0pLTU5JSk1JRktBQ0dKSElGRUNKS0tKTEtPUE5TU0xQTUlOTk5NVFFMVFNT
-Uk9NTktPV1pRTFJSWFtQU1hdXVBMUllZVE9US0iEvszV2+Dk5ujp6uzrYGZgaGJb
-TlVRS0pXUVZYWGBVWFlYWFhSUkxOUktLSklIRU5MS0xKS09OR0lIREpOSUdKTk5I
-QkFDQkU/Rk1GQ0BBQ0NFR0hHRUVDSUxFPDlAQDw+P0FARENDQURBRD9APkNBQDo7
-PT09Q0E8QT48ODpAQD1BSUE9Ozs+PDw5ODU+PDs8OTs9Ojc5Oj07Oz07Ojo5Nzs6
-PD46PDw9Oj4/RDo+Oz1BPDw5ODc5Pjw6PD1HPj89Ozw8PT5AOzo8Oz5CP0A8Pj0+
-Pz46PUBAPjw7QT89PDw6Oz08Pjo7PT1AQEA+PDxAQUI7PDo7PTs2Njc1ODk+PTk6
-OD09PDo7PkE+PDo8OTg0Njw9Ojc2Ojo5PUA6Ojo6ODk4ODg8Ozw7OTc4ODk3Ozw3
-Nzs6OT07PTo3NDc3OTk6Ozs5PTg5ODc4Nzk0OTM6ODM1NjY6Nzg3Mzg2Njk4NzY7
-PDo4Nzc4OTQ0ODw6NjY3NDg5PDk7NzY3Nzc5PDg8PDk3ODU1Nzc3Ojo4Ojo6OTw6
-Pjw4ODw+NzU1NzU1NTY3NDQ0Mjc2NjUzNjk3OzY3OTg2Njk3NjY2NTYzODo1NDU3
-NzU0Mjc4NjU2Nzc5NzUzNDY1NTM2ODY5ODc4ODk1NDc1NzY4ODU2NDM0OTg3Ozk6
-ODk0Njk5Nzg2NTc4ODY1NTY4ODQ2Njg2ODczMzEzNjg2NDEzNTY0NDY1Nzs6ODc4
-NzQ0NTUxMzU2ODk3OTc3NDYzMzU2OTk4Nzo6OTk2NDY3Nzg6Ojg4Njg0NjU1NTc2
-NzU3ODQ0MzM2NTg1ODg4ODg4NzU2NTQ1NzU3OD89NjI2Mjc6OTg5OT06NjY3OjU0
-NjY3ODY2NjQzNDc2MzQ1Njg3Njg5Nzc4OTg5OTg2MTU0NjIxMzY5OTk4NzY0MzMz
-NTc4OTc3NTg4Nzs7Pzg3NzY7ODg2Nzg4NDY6Ozg5ODY3Njg6Nzg4ODU4PTk5Nzo8
-Ozk4Nzs4OT45PTs7PDg6Ojo3NzU1Njg7ODw5PDs8NzU0NzU3NTs9OTk7PDo3Njc4
-ODk4NTo6Ozk4Nzg6Njc1OTQ2Nzk2NTY2MTc7Ojc1Nj47NDg2OTk7PDg2Nzg7OTk6
-Ojo9OTg2Njc3Nzg5Njg4PDY2QD08OTczNzg4OjY3NDU5OTc4Njc4NjU2Nzs4Nzk4
-Ozk6Ozs6OTc0Ojc1ODo4Nzg6ODo9Pjs9Pzo4Oj45ODk4Ojs1Njg4N0A7PTo2Njk3
-Nzg5NTg5OTpAQVN3iZyrq5+IVUE+PT46ODg5Oj06OTY3ODo8Ozw7PDo8OTtBP09f
-amlbQj44Nz48QD08Ojo5OTo9Pj09Oj09Ozg4Ozs+QkA+QD1BOzo+PD0+PD48Pzw9
-QEA/PkFBQUI8QD06PT4+Pj08PTs7Ozw+Ozw+QDxDRD8+Pz48PDs+Oj08Pj0+Oz9B
-P0FFQD5BQkZDQkNKSkE+REVEP0JGSEpJSUFESEhLR0hIS0pJSU1FS0pLSktGSElG
-TEVPT05NVVJSVFZRVlFSUFNQVVROTVJVWFNRUExMT1JLSlVeXVVXXlxZU1JQUVdX
-VlJGSYO9zNXc3+Tm6Orr6+xXWFdiZGVaXlJNWGBRUVFVWlBYWVVXXFdSU1JTU1BS
-S0lHS0hMTklMTkhPTktGSUhNTEtLRkhGQ0BDPkZHSEhDR0VIRkhHSkdER0hGQkNA
-Oz08R0NGREQ/PUE9RUE+P0FBQEdAOUBHQURBQj4/ODo7PUE/QT06QEJEPT1BQDo+
-PTk7Ozs9PDs7OTg8Ozo8OTs7PDk6ODg9Ojs6Ojo7PTs6Pjw8PkM+PDo8PT09Pjo4
-QEBEREJAPjs7PD4+PDo4P0BAPkFEPT47OkA+QDtBQj8+QDs8Pj09Ozs/PT1AQT46
-PD09PT4+PUU+PTs8Pjg3ODk2Nzk5Ozo7OTxBPz05Ojk6Ozg+PDU5Ojo3NzY1Njk6
-PTs5NTY7ODg6O0A9OTg4ODk+Pjs5Ojo4Njc6Ozs5Nzg2NzY4OTY4ODo4PDs/Njk3
-OjkzNzw4NTg5ODk3NDM5ODw4OTo9Ozc4ODY1OTg4Ojs7OjM0ODo7ODo3ODY1Nzk2
-Oz8+PDo9OTg3NDUzNTo8Ozo4Njc5Oj5DQDs7Ozk2NTY3ODo7PDs5NjQ0OTk3NTg5
-Nzk5OTo4Ozk6OD04NjY1NDM0NzI2NDU1Nzk9NTQ2NzY0ODk4NDY2NDc1NTc2NzY3
-Nzk0NDU0NjY2MzY2NjY3Njc2Njg5OjY5OjU1NDU2NzY5Ojk3NDU5NzY3NTU4NTU4
-NjM3NzQ4NzY1MzU4ODY2NDQ6Ozc1NDQ3NjY0NjY0NDQ3NjU2NzY2Njg3NTY5NTU2
-NTU3Nzc0NTQ3PTk1Nzs5Ojg3NzM1NjU2Njc4Nzc3NjM2Ojw4OTg4OTY0MzU3NTQy
-Njg0Nzk4NzU6Ozc6OT07PDY2ODg7Ojc1NTY2NzY2OTc2NzM1NTU4Nzc4OTk6ODc3
-NTU4NzU4NzQyOTM1Njg2NjY6NTg3NTI1NTQ0NDc3NjY2ODY1Ozk2NDE2OTk2NTg4
-NjU3ODk6ODk8Njk3OTo3PDc1NzU1NjU2Njg5OTs7Ojw7ODo7Ozo4PDs6ODs2ODk6
-OTc5Ojo4Ozo5Ozg6PTw3Njc6Nzo7OTk8ODY4ODs+Pjs4OTs3OTc2NjU0NTY3NjY5
-ODo4NTc6Ojc7ODc6Ojg7ODc4Pjs6Njs5OTk3NTU1ODg5Nzc5ODw6ODg4OTc3OTk4
-Ozk8Ojo4Nzc1Nzg4Nzc7Nzo2Njw8OT05PTw6Nj08Ojk5OTY4NzY5ODk9PDo4ODg2
-OD47N0I9OzY4ODg5OTo7Njc4PDk6ODw4Ozo+Ojg2ODw/VmRfYICQh1I+OTk7PD1B
-PDo4Ozk2Ojs6Nzc9OTg3PT5BQEZme2Zia1g9Oz06Njw6Oz0+Ozs5OD1WXkI8PT48
-PkA6Oz49Ozs8Pjk8P0FDP0FCPDw4PD1AOT8+P0A9PkA9PD49QkI+QEI8Nz08Pzk8
-QEFAPkE+QEI6Pj88Ojs7PUA+PkBDRURCP0JFQ0E/QkVBQkdIS0Y9REVGS0hHSERD
-RUZJSUdHSkxOS0lKSVVPSUxGSklITk5GT1BKSVBSS09QTE5QVlZUVFJSWFhTTlFa
-VVJNUVNZXVhXUVJTV1pVV1ZVUlNQVFVSUVJThr/M1tzh5Obo6urs7FZPXltdW1ZV
-VFZaWVpbVlNXUkxYW1ZXU1NQT09QTFBSS01LSUpLS0lLTUdOSEpKREVHS01HQURF
-SEI8RUI/QT9HTEpKRkVCREhJTEVAREdBPkBBPj1BQEJEQT06PkFBQD0+P0JGPDxB
-P0M/PkM9REQ+OUJBRD9DSEA9Ozw9QkA9Ozk6Ojw2Nzw7Pj08Ozo4OTo7Ojk5PEE/
-Pzs+OUE+PTw7PTtAPT49QDs7Pz89Oj1APUBEREU/PUA6Ozw8PT5CQzxAP0E8PD09
-PD5BPj49PkE8O0FAQD48Pz08PUA9PD48PkBBPTw6PUA9Pj48Qjw4OT47Ozs5Ozs5
-Ojs4PDw7OTtAPjs8PD4/OkE6Ojc3Nzs5ODk+OTo5Ozs8OTw9PTs5Njc5Ojo6OTk5
-OTo7OTo7Ozc6NTg3OTU4OTg8Ozk2OTk5Ojs6Nzk8PDs9Ojs5NTg2ODo6Nzc5PDg7
-OT0/Ozo6PTk7NTU6ODo8OjY5ODg0Nzs8PDw3ODo5OTY8Njc5OTk6OTY1Ojs7Ozs4
-OT07Nzc2Ojo7PDs8OTczOTc1NTY7Ojg2NTc4OTo5Ojo4Nzk3ODY2NjU4Nzc0NTY2
-Nzg5OTY4ODo3ODc2OTw0Mzc2Nzc4Njc4Mjg4NzU1ODY1NzY3Mzc6ODU2Njg5Ozg3
-ODY7NjY3NDQ3NzQ3ODk2Njg7NTQ0NzcyMjg4NTU0MTIzNTY0ODY2OTs6OTo4NjM1
-MzQ3NTg1NTM2MzY6ODg0Njg+NjQ3NzU4ODg5PT45Ojo4Nzg8PDc0NTM0MTU0NzU5
-ODc5NjYyNzY4Ojs1NjI0NDg4NzY3NTU0Nzs2Njk6OTs3Njg5PTs4NTk4Nzk9OzU4
-ODg6ODc5OTk3OTc1NTo6ODc0MjQ2NTY3Njo8OD47PTo8Nzc5NzU1Njc1NjY3Njc4
-NzY4OTs5Nzg1Njo0MjY4Nzg9Ojs4NzU2Nzc4Nzg5Nzg2Mzg5Ojo8OTY3Nzo3Njg5
-ODk4ODk6ODo4ODs4OD06Njc2Nzg6Njg5ODc5OjtAPTg5ODo4OTc3OTc6ODw4OTk2
-ODY5Nzo5Ojs5NDk3Njg1NzYzNTc1OTk7OTo3Njc2Ozk5OTY7PTo5OTc4ODk7Ozo3
-NzU7Ozo6Ozk5ODo6OjY1OTo8OTU5ODo4OTs6OTk2NjQ2NTU0ODU0Nzk4Njc1PDo7
-OTg4Nzk8PDs5ODQ2Oz49PDY6NjY4PT8/PTxBPTw8ODg5NzY5Nzg2Njc5Oz0+PD07
-PD47Ojk3ODxwZUlHVklMQDg7OD4/PDw8PT07ODk8Pj04ODs5Pzs8P0FNc31tTWRf
-TT09OTw7O0BBPDg3O0E/UZ+cUkJHRT48O0FBOz4+Oj1DQTw7Pjo+Qj8/Oj09PUJD
-QkRCQ0A7PUBAPj9BQkNFQT0/ODk6P0JCQkNBPDw7PkE8Ozo9PT1CP0NARUNDQ0A/
-QUNEQUA/PkFDQkVGSU1JS01KRk5OTElOR0pIRURJRkdESU9HTFBLT0tMS09LSVBQ
-UlFRTlJQTklOUVVRVldQVFZaWFdQTlNST1JQUVJUVVhVTk1RU1JTU1pYVk9ZX1FJ
-T1KJvs3W3ODk5+np6+vrW1VlbWJYYF1aVlpaW2RkWldXWFNUV1ZaVl1TUFRWVVdS
-T1ZVTEtNS0pLUU5LSkZITk1KR0REP0NGQ0JBP0JKSUdFRURCQURIQ0VGRkVLTExF
-RUdERUBCQT9FQD89QUFDRD48REA+Ozk+Qj1AQTtBPjw/QkY/PT88QD07Pj08OTc5
-PDo5OTs6Qjk5OTk5ODpAPjw5PDo5QT8+PT9APUE9PT0/Pz09Pzo6PTw7OTk6Ojk9
-PEE+QkRDPjs3Oj89OzxDPz49PzxBPzxAQj49PT06PEFBPkBBQT5APT07OztBPT48
-PkJBPEJAQUI/QT0/QDk7PUBAPTw6OTo6Ojk7QT0/Ojk9OTo+Ozo3Ojw6OT08PTs4
-PUA6Ojs4Ojc4OT09Ozs4Nzk5Ojg5Nzo5Ozk1Mjg6ODs7Ojk5Nzc7Ozg4OTk2NTg6
-ODw3PEI8OTk5NTo4NDw/OTs5OTw4Nzc3Pjs5Nzc5Ojo4ODY6PDs+PTo3NjY3Ojs6
-OD47NzY5Nzg5Ojo3OjkzNzs4Ojg3OTw6NzY4ODs6OTk3ODc2OT06Ojo3Nzg6Njc2
-Njg6Pjw7Ozg4Oz06PTg4OTs4Nzc3ODk2OTg4ODY2Nzk3NTc7OTs4NTo5ODQ0MzQ0
-NDc5Ozg2NzQ2PTw7Nzo4NTY2Nzc1NjY5OTg9ODw6NjY2ODMzNjc5ODY4Nzc0NjY4
-ODc2NTEwMTYzODY1Mzg5NTY4ODg5OjY3NTc1NDU1NDU1NTk3NjY1Qj05OTg3NTI3
-Nzc8Ojw4OTY3ODczNDQ3NzQ1MjM3Ojg4ODU5Nzg4OTc7OTo2NTIyNjY2NDU3NTcz
-MjU3NTQ0NTYzNTk+Ozk4NDQ2NTg5PDg3Ojo6NzU4Nzc1OjY2NTc2NTk2MzQ1NDUz
-NTY5ODs6Oj05NjY3Nz49Ojo5NjU4NjQ3NTo6OTc3ODg3NTc7ODY3Oj04ODk3NDI1
-Ojg5NTU4OTQ3NDQ0NTY3Njg5Njc4Nzc5Nzo0NjY4ODY4NTg5ODg6OTc3NTg8ODo5
-Ojk3PDo3Ojg8Ozo3Nzo4ODc7Ozw8OTc2Nzc4OTc0Nzk8OzY0Njg4NTY2NTQ3Nzo4
-ODg0Njo4Njo2Nzg2NTY2NzU3Njg5Ojk2NzY2ODY4Nzc4ODY3Njc3Njc4PDo5Ojs5
-Nzc5NTIzNTc1NTU3Nzs3OTw3Nzo5OTg3ODg1ODo4Oz06Ojk9Ozo4OTs4ODg6Ojs9
-PTw8Njg8PDs2NzY5Ozc3OTk6Ozo2OTg7Pjs7PT5AS3ZkQkE+QUA+Oz86Ojw7Nz1A
-Ozw7PEA7Ojs8OD5GYEpFZHeBd1NBZGtCPT5AQD8/PTw9Pz09PT53rZBMQUNDQDw7
-Ozo6Pj8+QDs7QUNCPEJCQUA+PD0+Pj9CQT5AOzo5OkE+PTtBQkI/QUI8PUFAOzpC
-RERDP0BBPUBCQkVEQkVGQ0NERUVHREVFQ0JCRT4+QkhJRUVLRUlNTUhIRklHSUZI
-SkhGSEtITU1JSEpHR0xRSklITVBJR0pPT1BOTU9QUFBUU1RWVVhYVVNTVlZTTFBW
-UlJRUlVZVlFXVVBTWVVSV1RXZF5UUEZHT46/zdXc4eTm6Onq7OtPWGdeT1NcVl5j
-X2BaWFtdWVpaV1NUTFJUVldTVllSTUtRUFBOS0RFSkpQTUxNSkxORUtJSEJDS0ZF
-RUdHRUNGR0VGRUNCQkRFRkRFSEpGTkhFREBGSkhFQT9CQkFFQjw7QEFDREE+Oj89
-PD48Q0I/Ozg+P0I/Pjw8Pz49Pjs9Ozw8Ojw5OTw5OTc4PDw8ODs9QDs5OTs5P0A9
-PDs8Oz9BPUFAOzg5Pzw7PkM9OT09OTs6Ozw8OTw8Pj8+QUA/PDlDRUE8Q0FCQD47
-OUBBQD49QkNDPzw6Pzw5PDs6Ozw8Pj89Pj49QUBBQUFCQj89PzxAPj07OTo5OTY8
-Ojo7Ojo8PTw7OTg8Oz88PDo6Ojk4Oz49Ozs5PDg1OTo7PTs7OTo3Nzs5Nzc6Nzk2
-Qjk1NjQ5Ozg5ODk3OTk5OTo5OTo9OTg5OTg6OTs6Njc4Nzs9PUA5Ojg5NzQzOjk1
-NjU2NTY1Njo5PDo6ODo3Ozc7OTg9Ozc0ODs+Oz06ODk8OTc3Nzk6Nzk6Njg5Nzk1
-Nzg3Nzk4Njs6PDk5OTg7OTY5Ozk7Ojo7ODc9Pz47Ojo5Ojk4OjYyNDY3OTk4Ojc4
-ODc3NTc2Nzg0ODc2Ojs6ODk6OTk0ODU2OTY1NTU0ODs5Njg4Njo6OTg5ODc5Nzo5
-ODs5ODY5ODY4ODc4Ojo5OTc4ODY0Njo4NTY3Njo4NDU1NTM2NTQ2NTc3OTg5ODU3
-OTk3OTw1Njc1NDMyNjY0Njc4ODY4OTc1Njg+ODc1NzY2ODY1OjYxOTU0NDY2MzM1
-ODg1OTg3NDQ3ODY3NTg3NDg2Nzg4NjY3ODg3NjY0NjIzODs9ODo2NDM2OTo4Ojw4
-ODo7Ojc2NDQ6NTQ3ODg2NTAyMzUzMTo5Nzk7OTY7OTc9ODY4NTM1NjU3Ojs1MzY3
-NDc2NzQ4Ojc8PTk5NzY4Njc3OTc1NTU5ODk5Ojk4ODg2NTk2Nzo3ODg6QDs2NTU2
-Njw7OTo3ODg4ODU3Ojc3ODc7ODs6Ojk5Ojc5OTg4OTY0NDY4Nzs6OTc3OTo5Ozk4
-PDs5NjM0Njo5ODc9Pj89PTk7Pjg4ODk4Nzk5NzY3ODg4Njc0MzY4PDc0Njc5OTk2
-PTo4Nzc3Ojk3NTM4NzY0NDg8PDs2ODc2NTg4NjY2ODg1Ojk7OTo3Njs6OTo5OTU2
-NzY2OTw5Ozk8Ozo7Nzk7OjU4ODg7OTc3Njg8PT06Ozg7OjU3Ozs5Nzg2Ozc6Ojo6
-Ojw/PD5yhmU9Pj8+PDo5Njs9Pjo5Ozs6OD1BOTs4Ozk/PVSXh4GMiIVgQT9hZkg9
-PT89PD49P0JBPz4/SZqzgEc/OTs9PUA9PDs9OjY9Pjw8PEBBQEBBQz07Ojk5P0RG
-QTo5Nzk6PkBCQEE8QEJDRUU+PD1APDw9PT5DQUM9QUdGRz08RENFQUM8QERCQz9C
-RUlLS0hERUdIREZFR0xPTExMSUlKSktERkdLSk9JRUhGR0RQTEdMUUlHR0pMT0lL
-T05NS1RSVFFPVVhSWFZZWFRUWVhSTktNVlVUU1NRUl1bUFlcWF5hWVtWVlVRRUpH
-f77N1d3g5OXo6evr61lSXltUW1pXVFNYXlxYXVdWV1xXWVZUWVRRU1tZVlNNSEpQ
-TUpRUFVQT01LUUlJTEpOSEVGRkNFRkdLSkpNSkhJTEdIR0RBPkRERkRGREJFSUU+
-P0RCQ0I+QUdFREVFRENBRkhDPkJEQj0+PTw/Pj49QERFRkM9PTw6Ozs7Ojs5PDs8
-OTo7OTY2Njk7QD47Nzo8Ozs6Ojw9Ojo7OTo5PD45OkBCQj8/QT8+REFBQj89PDo4
-Nzc6PD5BPkE/QT09QD9BQUVFQUA+PUBAQUNBQT48PkE+QD85Ozw8PDw9Pz48PEBE
-PTo+QD0+QkI9PDw+Pz87Ozw8OzY5Ojg5ODo2Oj45NzU1ODg6Pzw4Nzo6Nzg5OTg5
-P0A4ODs7Ojk6PDk5OUE5OTc3ODc6OUE6OTc6Ozk3ODs6Ojg6Nzo5ODo4OTc3ODg3
-Ojo3OT03ODg6OT06PDo9Ojk4Ojo7ODk6Ojg5NTg3Ojk7Ozk7Ozg3ODo7Ozs6Njo5
-Ozs+Pj0/Nzk4Nzg4Nzo3OTc4Nzc4ODk4NzQ1NTk3ODk7OTk4ODo6PTg4Ozs9Oz44
-Nzc4ODg2NTg7Njc5Nzk2ODo4Ozs4NTY2MzQ3NzY2MjY1MzU2OTs3Nzg6OTw5Ojs6
-Njg5NTc3ODY3Njo4NTc3OTs7ODc7PDw3OT03OTc0OTQ3Nzk4ODg4Nzc3Njk7PDg1
-NDQ2OTg3NjY6Ojs9ODk4Njk3ODc5ODg3NzU0NTY1NjQ2Nzk1NTM2ODY0NTY4NTU2
-ODg5NzQ0NjY1NjU1NDc2NjQ1Njc0NTQ0MzU3Nzc1NjQxMTM4Ojc1NTg3NTM0Nzc4
-NjU4PDk4OTg6Ojo5OTc4OTs2Njc2Nzc0Nzk6ODU2ODU6NzY3ODU3OTU3Njc1NDg3
-Ojs2PDs7NTczOzc5ODUzMzU1OT08ODY5ODc2NTI1ODk4OTc4OTg7ODU3NTQ2Nzc4
-Nzg3Nzk8Ozk8Ozw5OTg3Ojo5ODw5OTg4OTk7Nzg7PTw3Nzc6OT06Ozo8Ozk2ODc2
-Nzk2OTc2NTc2NjU3Njc3Ozk2ODg6PDo3NTY4Nzo6Ojo7Ojc7Ozw6Ozs7NzQ2OTY2
-ODc4OD08OT06OTQ2NDU1Njg4ODk7OjtBODg7Ozw4PT88PDc3ODg5OTk6Ozs7Ozo1
-NzY3ODc2NjM0NTs6OT04Nzs6Ojk5OTs2OTc3NTk5PTo6OTk6ODk4Nzk8NjU1OzY1
-ODg7Ozo5ODo8Ozs6Nzw4Ojg5PDtGRD09PD9DVIOCUz49OTg6PTw/PT05Oz9BQD08
-Oj08PkNDQUBJeK+znXZmUUZDRGFlUUJAOz09Oj08OEA+PDxCZ411SDw6Oz06PUBB
-Pz0+REA9PDw7Ozo8QUE/Pz05ODo7PkM+Ozw8Ozo+Pjw8Pj49PkJBP0A+QURBPDo6
-Pj4+QkVFQ0FER0VEPT9FR0A+QkM9QUNIRj5JTEdHREhHRUhIRUZNSkdJRUlJS09I
-S0hOSUlHTUtESUdCR05QTUtKRkdKTEpFTVBQTVdXVVhRUVVRTFRVU1xWVE5KS1FW
-YFlXV1dZWlxbXF1YWVtcWlVPUlJUSUd5vc7W3OHk5+jq6uzsUFJWUVhbXFlYWlVc
-XVhXWVJPUlpUVFFVWlhRUVRTUkxMRk5MSlBUUVFKSE9KTk5NTUxNSUhMSlBMSEdN
-TElGREdDSkZFRUJBQURFS0dEQkNCR0RGQ0dEQEBBQ0pDQEBLSENHR0dEQD1AQDg3
-PUFFQj47PEE7PDs8PTw+PTc3ODw8Ozk4Ojk1OTs4Ojg5Ojk4PDo5PDw9PDs+Pjs9
-PkA/QkFCPkFDQUM9QUNCREI6OD0+Ojo8PD8+QDo8OkFAPUFCPz47P0NERUNDSEFB
-QT5CQjs+Pjs8PD4/Qj1DQEE9PDs5Oz0+Pj4+PEFCP0I+PDpAPzo5Ojk2OTw9Oj49
-Pzs6Oz09Njc3NTg7PTo5ODg2NTUzODk6Ozs5Ojw6Ojs4Ozc6NzQ1Njg4Nzo6ODk7
-OTU5QTw2ODw7OTg1ODs7Ojk3OTU3Ojg8Oj89ODs8OTk4ODU2OTw5Ozg4OTo/PD48
-PTw6ODc3OTs6Nzk1ODY6ODw6Ozs4MzU3Ojs7O0A+Ojs4Ozk2Njk6OTc5Ozk5Ojc2
-NDc5ODg8Ozc3Ojs+Ozo8PD43PTo6Ojk7Njc5ODg3Njg2Nzg6Oz08Nzc6PDg5Nzc5
-OTc5OzkyNjg4ODY3Ojo5NTU4Ojo5Ojs4ODU2Nz07PT06Nzw7Ozc4OTk3ODc1NTg2
-Nzk8OTY1MTg1NTQzNzs2NjQ0ODg2NDY1MzY2ODk1NjQ4NjY4Ojo1OTQzNTg5Mzc4
-MzIzNDMzOTc0MTUzNjY1Njc4NzY4ODc5NDQ2ODc3ODk7OzY1NjMzODc1NTc0NTc4
-NzIxNDg1NTY1NDY4OTk0Ojc1MzU1NTQ3Nzs3Oj9DRUZFODg8ODU5Ojs3Ojg3NzUy
-ODw3Ojk6Ozg4Nzk4NDU5QTY5NDg3ODU0Njc5Ozc6OjY7Ojg0NDk4NTczNTc3Njk5
-ODc4NzY0NTY4NzU5ODc2OjU2Nzk4Nzc4ODU0NzY5PD04OTg4ODc8ODc4Nzk8Ojk1
-Nzg6Njk5Ojs9PTg2ODs7Ozg3ODU2Nzs8ODc0ODQ3ODU4Njc1ODg5ODw4NTQ5Nzg5
-OTs7ODg3Nzc2Nzo7OTc2Nzc5OTY3NzU4Nzc3ODY2Njs6PDg6OTg3OTk3Nzg6ODc6
-Ojk5ODg4OTw4OTo4PDU2ODk6PEA8OTk4ODg3OjYzNjY0NTgzOEA6Ozc3Nzg4Nj0/
-OTs5Ojo3Ozs5Ojw7OTY2NjY2ODw9Ozg4Ojw6PD06PDs8OTg4Ojs+PDw8SV1SSz49
-PkV2kIBCQT1CQUE8OjxBPj5HXUxAPUBBPTw9R1lRVnijp5yHWUQ7PUJHXlJIP0BB
-Pj86Ozs8PDc1Ozo7S0M+Pj89PDk6Pjw9Pj8/Qz47Ojs5Pzw8Oj09Pz44OT4/QUVD
-Q0RAPT5COj1CPjo9Oz09QEFAPD4/Q0FBQkQ+P0RFP0NFRUI8OTo/QERCQUFDQURG
-SEVDRUhLSUVDQEhJR0hOSkZGTUxPTkxJR0hKTUlJTEhLSEJGT05RS0lKSkhOSFBQ
-UVNWWVJPV1dWVFZYVFJQU1hSWl1XVFBOV11ZVFVSWllUVGBdXlpZUlRTUU5KRn67
-zdbc4ePm6Onq6uxTVlRXW1tbXV9eWlVdUFFTU11WUlJRVFhVUktLVVhXV1ZQUUlO
-S09NTUpNTEtGS0RIR0ZJR0xRTU5ISUlHT0pLRkRDRkxOSkVLQj5DRUVCRElEQ0RF
-QEZCREBDQj1BPz1JR0ZHREtPRUI+Q0VEQz1CQ0FBQj8+Pjs8ODs8Ozs9Ojw9QDs8
-PTs9Ozk5PDk6ODo4PD07PDs5Nzg5OTc5PD9APkNHRD1DRUA+QUA/Oj09OT0/PDw8
-PT48QD4/QD9CNzxBRkVFQkJCREVFRUVDRkI9Qz08PDlAO0BAQD88Pjw6OzY6Pz47
-PUFDPzg8O0A+QUJMQkI/ODQ1ODo5PDs8Pkk6PDo3Nzs5OTo+QkI9PD87OTk2Nzg/
-Pzw3Ojo6Ojo3ODY8OjY4OTo5OEA5Nzg6OTk8Pjs4ODY4ODw7Ozo4NjU3ODg3OTk5
-Ojw6Ojk5PDw3NTc5PDs5OTo6OjtCPjs7Nzc3Ozw9Ojk4ODU6Ozc1Njc5Nzo3NTg5
-PD48NzY5OTY2ODo7OTg8PDo8OTk4OTc2Oz09Ojs5ODg7Ozw5ODo9Oj44NjY4Ojc2
-Njk4OTs4ODc3Njs6OTo4NjY3NzY3OTs7Nzg2Nzg4Nzs3Nzc1OTU4ODU4Ozo6Ojo2
-Njk6Ozg7Ojo6PDs7ODw6Njo3ODo6OTk3OTg4Ozc3Nzs9Ojc4NzY1MzE0Nzg3NzQ3
-MzU1NjM0NTU4NTc4NTg6OTY2OjY3MzEyNDIzNTU4OTc6NTQ4OTg3NzQ2NTc6ODg5
-NzU3Nzc4OTk6OTg3NDk4Ozs0NjY2NTY3OzY0Njc0OTk2NjU3Njo5Ojc5OjUzNTM2
-OTo8QURFRkI6ODc3OTs5ODc7OTw8Ojk4Ozg3Ojk4Ojo3ODg5Njg6OTc4OTg0Njc2
-NTc0NjY5NjUzNDU3Nzo4NTg5NjY2OTk6OTc7OTk4ODg4NzYyNjQ2NjU6PDs6NjY4
-Nzk3MzM4Nzg2NzU6PT06Ojg3ODY1MzM3OTo4Njc6Njc7OTg4PTk5NzU3NTc6NzY1
-Mzc1Ozo6NjY4NzY2OTg4OTo8Pjo4NjY1Nzc6PDY2OjY1ODU4OTc2ODo6NTQ3Njg1
-MzQ4ODg6ODk5ODc3OTs5NDk4Ojk9PTk8OTc5ODk7Nzg2OTg4Nzg5Ojo5PDo6Ojw+
-OjY4Ojw2NzY5Nzk6SDs6Nzo4OTk7Pjo2OTo5PD05PD05ODo4OTc2NTg5ODY+PDs6
-Ojk7Ojs5ODw9Ojs6PDs6OzpcfYNoVkNBUoePXkA+Qj4+PkM9Pz0+RmR1Vjw6Pj9A
-P0Fvl4ueqKWZj19CQUJDQ0VgTUM+PT08Oz09Nzw8Ojw6ODw7PDw/Qz06Nzw5OT08
-OTw8PDs8Ojg5OTg3Ojk4Nzk7QD87PEA+QkE+QDo/PT8+OzxBQTdAQUE/Q0Q8O0A/
-QEA7P0JAQkNFQ0JDQkhCPkdERUVJSERCSElMS0pERUhLSUhHSUdISExKT09OUlFM
-SElISU1NT1JNS0pNSktPUEhJSUtPUE9TU1NSWFlRUFNVWFNTUExNXFtcWlhTVWFc
-W1VVU1NTWU1UVFZYW1dYV1VOTU5FgrzM1dzg5Obp6err61RWZFpVU1taX2BgW1hW
-X1hPU1JOVVRRUlJOTE1RUVVWT09LSU1OTkhFRkdDRlNMTkpLSEpMSkhDRkVFR0pJ
-SUpRRkZIS0tQS0lFP0FDQkZGPz4+RUY+R0VCQkNFSUNDRT8+Pz1ER0dIRUU/PztE
-QUFDQUFCQ0RCOjs2OjxAPTw9ODo7PTw9Qjs4Ojg4OT0/NTk5Ojk3Nzs6OTo7Oj0+
-Ojk+QUZBPz4+PT09PT87OT5CQT88Ozo8PTs+Pz9BQEVEPkREP0BDQUE+Q0JCQERC
-QT4/QD0+QUI8Ojw/Pjo4Ozk9P0VDOj1BP0M9Oz9BO0BBPkFCPz46Ojk5OjxEPz07
-PD1AOzk7PT06Ojo+QD9AQUJARD42Ozk6Ojo6PT89Ojk7ODg5Nzo7PDw4Ojc6NTY7
-Ojo5OTc3Nzg2Nzw3Ojs7OTg3NTg3Nzk3OTo5Nzk2ODs5OTU4Nzc5Ozw5OTk6OTs9
-Oz05OTw5NjY5PTk6NjM3Njs5Oj04PDs9Ozs6Oz46NzY8QTw4Nzo7OTg6Ozs5Nzg4
-Ozo8Ojc4OTk6OTs9PTo4Nzg5Oj45Ozk5PDc6PDg2NTc3NTk9Ojk3Nzk4OTY5ODY1
-NzY2Njc4Nzg2NzU2NTc6Nzg0Njk3NzY2Njg6Ozo8Njw+ODg8OTk5NTY3ODk5OTk5
-Nzc1MTQ2OTc6Ojg5ODc0ODY2ODU2Ojo6Njc4OTYzMzQ3ODg6NTY3NTg6NzY1NDEz
-NzU6OjY0Ojk5OTg2NTs7NTk4PDg2NDQ3NDMzNjQ0OTo6ODc3ODk6Ojg1OTs5ODg3
-ODc2NjU1NDc2NzY3Njc4NzY1NDQ1Nzc8Ojo7Oz4+ODQ5OTw9ODo8Ojw6Ojo6PDg7
-Ozg1NjY5Nzg2NDc3Ojs5OTo3ODY2ODc4NDY2OzY0NDg1ODw4Mzc8PUA3NTU4Nzg6
-ODY6PTw2NDs7OTc5Pjo5ODs7OTc3NTY1Njg7OTc4NzQ3PDo5OTo5ODk6PTo3OTc1
-Nzs4Ojk3Nzc4PTk4Ojo4Ojo9NzU6ODQ3OTk5NzU6Ojc3OjY3Nzk4PDo2NTY1NjU3
-NzU4PDs3NjY1Nzk4OTc5Njg9OzU0Njo7OTg5ODg4OTk4Ojc7OTtAPDY3Ojk2Njc2
-Njc2OTk4NzU4Ozo3OTs7Ozs9Pjs7NzY4NTY1NzU4NjQ1Nzo/Rjk4Nzk3OTo6NTs3
-Nzc5OTY6Oj88Ozk6Ojk2OTk4OTk9OTo4Ojo9QDs6Ojw6NzY7Oj07UoSUjoCBYlF3
-hW5IPzs4Ojo8PUFDPkRhjZdVPD09Pz07QXuJmaSwpI1aRj8+QD07RGdUPj0/Pjw6
-ODg9PDs8Ozs3Pj0+OTo9PT06OjxCQUBAPjo7Oj5DQDk3ODs6PTs4ODo8PD8+Pj8+
-Oj9CP0E6P0I9O0Q+OzpBQkRDPj07Pj9DPkA+PT49QURGRUhIQD9BPUVIS01KRklE
-R0hFRkdKS0xNSkRFRUNCR0lISUhKTExHRUpITElLSUtKTE9LTlNSUU1KSk1SWFVR
-UVRZV1dXVE1SVlNRV1xWWVhWXltiWFpXWVFZV1VaV1FOVlpZW1lZWVJTTEeIu8vV
-3OHk5ujq6ezsX2BiYVtYV1dcW15cXFpVUVFOTU1RUExPUk1OTkpVUE1YUE9LSExH
-SkRDRUdHTk9QS0lOSkVJRUNHRkVKRENGSklGRUdGSEVAQ0NIQkRAQERCQUVJSEQ/
-P0RHSURDQ0JDQEJEQD1JRkZFRD5CRUFBPjw+OD5BQkE8Ojs8PTs8PT85Ozk6Ojo7
-PUBEQ0A9Ozo6OTo5PTs7PTk6Ojg8OD09Oz08RD89Ojk/Pjs7PD08QUU9QD1DQkA9
-PUE8PEFBPD49QkFCPj4+Pjs4PkNGQ0E+QkQ9PUFDPkM/QT1APTo9OjxAPD8/P0NB
-PT47ODw5Pjo8PkA9PT09Pjg5Pj07Ojg3PT06PDg6OT49Pj0+QDo+Qzs7PUM7Njc2
-Ojk8Oz1APjs3NzpBOzo+Ozo4OTg6OTs4OTo4ODg3ODk5Ozc4OTc2OTg7OTk3Mzg2
-NzY2OTs6OTo5Njc4NDc2QUM8Ozs7Pzs5Ozg6ODo9OTo3ODk4NTc4NjU4PD05ODU4
-Nzg7Ozg3Nzw7Njg6PDw6PD5ANjs9PTg5PD09OTc5OTk7OEA6OTw7PTw5Ozk5ODw4
-PDg5PDo4RDw4ODw9ODc4OTk5Oz46NzQ2NjMzNTc1Njo2Nzc3ODk6PD05NTY3NzY2
-Nzc4OTk7Ozs7Ojc2NTU1NjU1NzU2Nzg7Nzk5ODo3NjQ6Ozg5OTY2ODg3OTY1NTU5
-ODk4NDc6PDc2NTg3NTc4NDU2Ojc1Njc2Mzc2MzU3ODk4ODo3NTc5Nzc5ODg4ODY1
-NDYzNzs5NTc2Njg2NzUzNTg6OTU2Njg4OTg4NjY3NjU6Nzc4NjY2Njc3NDg7Ojk5
-Ozo7QT06OTk2NTg7OTc6Njc5ODs6PTg5Ojg3OTk7Ojg1MTM0Nzc1MzM1NDY4Njk5
-NzM0Ojk3NTY7ODc4Nzw7NTM2ODs5Ojs7ODk4Nzs9Ojk6NjY3Nzk6OjY2Nzg3NjU2
-ODY5NzY2NzU0ODo4OTc7OTc1NTQ1NTg7Oz86Ojc3ODo5OTc6Ozo7Pzo7ODg4PDk7
-ODg3Njk8OTk1NTg4OTc4OTMyNTg3Ojg5OTM0NDU1NTY4Ozk3NTY4O0E7Ojo8PTo7
-ODg6ODg5PUE9Ozc6QD89ODU3NT09Oj06OTw8OTg8Ojk5Ozs6PDw6PTo8OjY4OzY3
-OTk6Ozk7OTk5OT1FQDo6Ozs7OTg6Ojw5Nzo4Nzc4Pjw+PTs6Ozw3Njg4NDg7OTc5
-Ojo+Ozk7PDg7PDU4PDpbl6aVnJybg395UkBAPjxAPDk9Pj48RJG1tWBEQUQ/OT1F
-UlppgainkUtAQUA9PTxIYVdEQDs6OTY8PT0/PDk5Pjs7Pjw+Ozs8QUBAQT49Pjs8
-ODo6Pj05QD42ODo4NzU1Nzs8PT89PT48PkFERDw+QUBEQUJKRkZCQD5DQUBAPD8/
-QkBAOjw8QUVISEI9QEBCQUNFRURHTkhNTUtKTFBOSkxMSkBCRENIS0tISklHUExI
-SFBLSElST09NT0xKSk1SUE5PUlpXUVNTVFhWWVVOUFBRT1FWUElPWVxbUk9dXFVX
-XllVT1tbVlJUU1lXU1RVU1RMSoS5zdXc4eTn6Orr7etgWV1ZWVxcVltZV1lZWlRQ
-T09dV1NPTUpMTVRVT1NSTk9WVFFRTEhISUdLUlFPS0tPTEhIR0ZJRkVGSU1MRk5M
-SUZEQktFPj1ERkVDQz9ER0JCRT9CQElESEZISkI7QklHQkQ9PUBBQkBAQD5BPT49
-OTxAQEJBQDk7QDk8PTc4OTs6Njs5ODpBPkJEQj88Ojs5OTo7OTo5PDs8Ojg9OzxB
-QUZEREE9Ozo9Qz06OT4/QENBPzxDQUBBPDo8O0M/QDs9Pj07Ozw6Oj5CQj5AQEFB
-RUFFQ0E+PUNCQUA/PUFDPD0+OzxAP0A8PEFAPj4+QEFBQTw7OTw8Pjw8O0I8Ozg4
-PT04PD09PT09PD07QD06Ojk9QT07OTQ5OTY8O0A+Ojc6PEA+QD87Pz04ODM4NTg6
-Oj05ODc6Ojo8OTg7PTs4Oj1APDs6ODU1ODk7Ojs6PTk3NjU5OT4+PDg3Ozo6PDo6
-PDw7OTY2NDY3Nz42NjY2OTo5PDk2Ojs7Ozs3OTo6ODQ6Njk7Ozk8Ozs8Ojo6Oz49
-Ozw5Ozc6PDw8OjY4ODc7Ojg4Pjo5Ojo3Nzg6OTo6PDw9QkE5Ozc4Nzg7Oz07ODY3
-NDc6PDo6OTo4NTk4Ojk4Njk7Nzg5Ojg3NTU2Ozs9Pjs6OTc5Pjo2NDY0NzY4NzY5
-Oj08NjY5OTQ1OTg3ODg6OTczNDU2Ojs5ODg4ODo4NTY2NTc0MDY3NDc5Nzw7NTc6
-OjY5OTk5ODc2NjQ0MjY4OTY4ODc1NDM3Njg0NjU1Mzc5NjY0NDc7Ojk4ODg5Nzg4
-NjYyNTU0ODc5Njg4OjU0NTQ2NTM2Nz0+QD08PTw5Nzg9Ozk5ODg3Njo4NDo6OTc7
-Nzo7OTU2NDQ4NTU3OTU3NzU4NzczODk3ODY3ODg0MzQ1ODc4Njk5NzM4OTg4Nzg3
-ODY2Nzo6OTk4NDs4Nzs3ODY3NjY3ODc6ODg3Ozo3NTU2OTg4OTk3NTY5Nzk5OTs7
-OkFCNzg7ODg5OTg5ODY5OzYzNzk4Nzk6Nzo4ODc4NjU5ODs4OTg6NTg2NzY2ODs4
-Ojk9PDo6NzU0Ozs6PTg5PkA7OTw6OTs1ODs+OTg6Oz04PTo/Ojo9QDlDPDg1PDk5
-ODk9Pz4+OTo+Pj04PDo6Ojc3ODo6OTs7Nzg+PTw2OTs7O0JOOzo5NDg2OTo3NzhB
-ODU4Ojg7PT4+Ozw9OD05PkBAPDs3PT07Ojk5Oj05Ozk9Pj8/Pm6Ngn6msq+dhVg+
-QD48Ojo8PTxBQTxCj727hUNBPz0/Qj1DQUJbkpyGR0BCQT47PUNiX0hBP0A9QEFB
-P0A9Ojo5PT09PD9APD0/PTs+OTtAPz89Ojk5OTo6ODw5ODk5Ozw7OkBBPUA+PEBD
-Qj8+Qj4+RURDQkNCPkZIRkREQTw+QUZDQ0E+PkBEQ0BEQUA+Q0dFREdHQ0VLTU9L
-TktLT0xGSFFITEtKSEpGSEhMS0tLS0tNTlFQS01PTkxJS01NTU9PT1BYVlFVUlRU
-UlNSUFFRV1lVT1VMS1JYWVdPVVhVVVdaUlhcVFxbWVFPU1ZZVllgWVBMhLPK1d3h
-5Ofp6ers61hcXVdXXGBcVlNVWVtaW1ZcV1NOTU5VUVZRUFVSWFFQTkpOUk9QTEhL
-SUlITUxGSE1IRkZIRktJR0tMS0tQTUtHSk5MR0RCREFHR0M/PURESUU/PUNDRkRH
-Q0NCQkFIS0NERERCQD0+QT08Oz09RUVFQEE7Qj0+O0E7PD5DPTo4Ojc8Ojw9OjxB
-QT08Pjs9Pjw8OTc7QD07O0BAPj1AOTxAQkFAPz04Pzo5O0BDQkNDQURBQT5GRj46
-QUNFPUE+Ojs8P0ZAQEM/Qj4/Q0NCP0A/OztCPj4/QD5CPT9DQD4/OTc6PDs6OjY3
-Oj5AQzw+Pj08PD0+PDw9QUM/OT89Oj09Pj89Ojk4PD05Nzk+PDs9PDY8PDs2MjI5
-PTw9QD09PT0+PUA8PD44Ojw5PDg7ODU1ODs1ODs+Nzc6ODczNDo6OTo6Oz02PT86
-OTw6PT89PD05Ojs/PDg7OjU2NDg3OTk8Ojc3Nzk4Ozg1OTk9Nzg6Ojc7PEA6Pjk7
-Ojs8Oj08Ojo4Ozw8PDw6ODk5Ojg8PD49PTc0PDs7PDo1ODk4Nzo4ODk5Ojc6Njo4
-OTk3NzQ4PTc3ODY5NTY5ODo6OTs3Njc5Ojg4ODw3NTU1Ojc3OD07Ojg4Nzo9OTk4
-ODY3OTU5Ozs3Njk5NkA5Njo0Nzo9Ozg6Qjo6NzU2MzU3Nzk3OTo5NjY1Njc5Nzk5
-ODo5Nzc4Nzg7PTk5ODg6OTQ3Ozk2NTg2ODg3NzU3Nzg4Njg1Nzg6OTk3ODc2Nzk6
-MzQ3OTk5ODQ0NzU2OTk7OjY1ODY2NDUzNTc2MTg6Ozg6OTg2Ojo6OTc5Ozc8P0NC
-QD08OTk4Njs6OTg4Ojs5ODY6Nzk5Ojs6Njc2Njg4Oj85OTg5PDg5Nzk3Nzo6ODg2
-Nzo/Njc3OTg5Njc1NTg2NzY4ODg4Ozo5NzU2OTUzNDk4NjY3OTo5OTY0NzY1NTg1
-OTw6OzY2Ojk5OTk7ODo8Ojo5Nzc4Nzg7Ozs5PTw4OTo2ODw6OTo5QD47Ojs8Pjk5
-Ojs6PD05Nzc2Nzw7Nzs9ODk5PTw7Ojc7Ozw/Ozs4ODY3ODg4Njc6Pjs9Pj4/PDk4
-ODo4OTw9OTg3NTQ1OD89PD9COjs9Nzg7Ojk4ODg8PDs7Ojk6Ojo7Ojo8PTk6Pj89
-Qjk9P0A/PTg7OUVCOTc5OTw6ODk8PDs4PTs6Ojk7OTg2PDw7OTk8Pzk9PEA8Ozs8
-ODk5PTo8Ozw8QD9QeXRPb6iypY9mREE7OTo7OTk9PEE9QFKov8COS0I/QD9CQkJE
-R0d/koZJQkI/Pj47RFxgREE9QUE+P0JEOjs8OTg6PD47PEA+PD88Pjw5PD0/Ozs9
-QDw8PDw5PDw8QEA+QEE/QEE9PTw5PTs/PUBEPjw+Qj1AQ0VBRUE9P0FIQj8/P0FB
-PD5AQD5BQj46PkFDQEdERkdER0pPTktKS0RBRkxKSURJSk9NTUxGRkpKTk5OTEpM
-UlFOTEdLTExOT1VJR0hMVVNMTFVOUk5UTktPUVlWVVdYUU5UVVtTVVJTU1hWV1dV
-YGBST1lRUU5ZW2BbZl9XSkyHsMjW3uHk5ufp6uzsWVteZmFbXVlYUk9UWVtYV1pS
-VlNWVlVTVlVQUVJNWU1PTU5LSUxOSVBNTEhJSUZNSklFSkhJRkhLT1FNQ0dMSElM
-SENGSEVGRUZCPj5GQ0JESEVFTU5EPEFFR0VHR0dHSUI8QEJCPUFDQ0E7PDo/P0NA
-Pj5DQj5DQTs6Ozw7PTk2Njc6Ojw9Ojc3PTs+QkM9Pj47PDk2ODk7Pj83NzxBPT49
-PEBDQzs8PEFBQUNDQkA6PDw+QD48OT1ARUI/PDw+PTw8QEM9REFEPTtBQjw9Pzw+
-Oz9BPT4+Pj0/QD4/QEFBQDo8PUA9Pz5ARD09PD09QTs+Pjs9PD1AQD8+Ozc4OTs5
-QTs+OTk5OT8/PT09Ozs6PTw6Ojs4ODpBOTk5PT4/QT88PD89Oj03OD05Oz00NjY1
-Nzs4ODk7PTs5OD89Pj07Ojk5OTo9Ozk7Ozg5Ojs5OTk4Njw+OTc9Ozs4Njg3Nzc6
-PDs6PD45ODU4Njk4ODc4Nzk7PDs6Oj46PDo6Ojs7Ojs+PTk+Ozo2ODk5PTo9PTo9
-Ozo6PDw7Ojg0NzY5OTg3ODk6Ozo4ODw5OTU3Nzk7ODk6ODg4Ojo/PDk6ODs1NTc4
-NjY2NTk6OTw4Ozo4Ojk6Ozg5Nz05Ojk3Ozs3Mzg4NzQ4PD08PD07OTo6Ozg5Nzc4
-Oz46OTU3Njc2NDY3Njg6Ojc1ODo5Ojg3ODs7ODk3ODs9PDg3Ozw8Nzw8PDs3Nzc4
-OTo2Nzc2Njg4OTk2Ojg1Nzc1OTU1NTU6Njc7PTw5Njg3ODU2NTc5OTg6Nzc4Njc2
-NDU1NjU8Ojk4Ojc6Oj06Oz46OTs9PT47Ojc6Ojg4NjQzMjk4OTo6OTw5Ojc1NjY3
-Njo6ODg1OT87Nzg9OTk3ODs5Njc5OTg3OTg5PDc5PDs4ODQ0NDU5Njc2NDU3Nzk2
-ODo7OjU3Nzg4ODg5Ojs6Nzk6Nzo5Nzo3Nz04NDY4Nzc1MzY4OTk1Ozw5PTs6ODs6
-PD8/OT0+OTc1Njc5ODY8Ojs5OjY4Ojs6Ojw6OTo3OTs3NzY5Ojg5OTg5PDs5NDM4
-OTo6PDo2ODc3ODo9OTg5Ojw+Pjw5Nzc3Njk7OTY6OjU1NTY2PkI9O0E/QD09OUI6
-OTo5OTw7Pzw5ODU5OTg3Ozk5Nzk8Ojk8OTs/Pjs6OTc3OEI8Ozo7Nzs5NTg7PD09
-OTo8Oz48PDo5OTw8PT08PDk7PT48PTk6PDw9PkE9Oz4+QV+DX0JQg5iEYUI8Pz46
-OTs2OTxBPEFWb6y8vbFsQz89PT9BQkY/RXGThE1FRD47Ojc/Wl8/PTs5Ozw9QD0+
-PDo4Oj07OkA+PD48PT06PT09PT09QD48Ozw8OT9APj08QEFCPUFCOj0+P0I9PT5E
-Qj49O0BBPj08PD49PT08QEFEQUFBQj4+PkQ+PEA9OEA/QkZFQkJCQkNJSUpGS0tL
-S0tISUdDSE1QUE9OUUtCRElKSUtOTVRTUU9PUExRUFRST01MUFNPTVFST0tHTVZV
-T1FYVlFOVVpTT1dVU1lUVldTWl5ZVFhWVElSVFNZWlpdW1dbT09GT4ixydbd4ePm
-6unp6+xUVlpcXl9UU1VUVlNWWl5iWVdaW1ZXUE9VUVFPS0pQT1JTV1dNS01MRkNE
-SklHSEhPSUtLS0xHRkhMTk1GSE1SS0pGTEM/RUhHRkJEPkNESEZAQ0BJSEdGSEpF
-QkVESEZIREA/Oz1BQT49Pj0+QDw8SEY+OjxIQUA8O0M6Ozo5ODg5Njg6PDs8Ojk9
-PUA/Oj07PTg8Ozk7PT9AOzU1Nzs/Pj4/QUFDQD9CREVAPT48Pj86PDg7Pj8+Pz1D
-Q0Q/Pjo8Oz0/Qz9BQj5APj1APEA9QT1APD8/Pj09PUBCQ0I9PUBCOzs/QTk8PkI/
-PD46Ojs8PkA8PDw8PUA9PkA6OTs4OT06Oj06PTg8Pjw9PD0/PDs8PDs5Oj5CPkA+
-Ojw7Pj1BPDs+QUBDOzlBPjo4OTo5Pzk8Ozs8Oj4+PkA9ODk6Ozo7PDo4OTs9PTo2
-Ojo2NTw8PDk4ODo6Nzo7OTo3Njs7ODs5NTo7OTo+PTs9Ozc3ODw8Pj0+PDk3Ozg4
-Njc4ODg1OTo5Ojk6OT44Oj1DQz46PDk9PD4+Pjs5OTk7NjQ3ODo6Ozo3ODk7QDk3
-OTY6OTc6Pjo8Ozg3PTk4ODc4OTg4OTY5Nzo7Nzo9Ojg5OjY2NzU3Nzo4Ojw7ODk8
-PDU3ODY5Ojk6Ojg6Nzo3NzY4Njg7PDg3Njo+OTo6ODo2OjQ0Nzc3Nzg3NTk7ODg5
-Njc5OTg2NTg8Ozo3Ojo6Njs9OTk4Ojk1ODg2Nzc3Njo4Nzg4Ojc2ODk1NTc4OTg7
-OTk5Ozk9PTk4NTU0NjU3NzU2NTc3Ozg1ODc4ODk8OTs6Ojo7Oz49PDY7PkBDPjo7
-ODU4OTs3OTo0NDg6Ozo9ODg7Ozs6Oz06Ojk4NzU3Ozs5OTo9PTk3OTo4PDY6Ozs8
-Ojc2Nzg2PTk3ODs6Ozk4Njc6Ojk7OTw4NjU2Nzc/Ojc1OTk5Nzc6Ozw5NzY5NjY8
-Ojk3MzI3ODU2Ojk5NTg2Oj08Ojo2NzU4PDs4ODg2ODc2NzU8PTw7ODY0NTc4OTw6
-ODc3Mzg2Njk5Ojk4Pjo8Pz8+OTk2ODQ2OTg7PTk7OTs5OkE9Ozg+PDw7Oz87Ozk3
-OTk4Ozs7Ojo2NTU6PTo7NzQ4ODY6Njc5Ojo7ODo7Oz47Nzc5Njc6Ozo4OTY3NzY5
-OTo7Ojg1ODc4OTc5Nzc2ODo7ODk2OTs6Ozw6Ojg7PDs7PDk9OTs4Ozk4Nzg4NTs8
-OT4+QD4+QEJHZnVSPzxLV087PTs7Ojw5Nzo6OzxDVnN3ka+8qG9DPEE6Ojg3QUJD
-cqSPVkQ+PTg8PD9ZVEE+PT89PT0+QUI9Pjw7ODw7Ojw7PUA9PT46PkA+PT05Oj9C
-QEE/Ozk9Pzw7O0BFPTpAPTo9Pz9CQkNCOTtCQT1EQ0A8Q0NAQERDQUJDQD47PEFH
-RUNBRD1BR0dFRUdIREFDQEFDQ0RFSU5RUk1KSExOTklOR0pLSkVJSUdMUUxOUFBQ
-UFFIUFFPSE1RUFBSUlBTU09PSUZLUVBMT1ZTU1xXWlZOVFVVUlJVUk9SUl1ZXltQ
-VVVWVVlbXl9UT1VbUk1SgrLH1t3h5efo6urs7FVaXVViYFxSW1taVFpdXV1ZWlxX
-V1NTTk9PTlJWU05NTVVSUUhES0lIR0VPSUdISE1KR0NLT1dIRUVGS0tFSE9NRUZI
-Sk1DQ0JHSUlKSUxLTEhDRUlNTE1GRURCR0VJRkFAREA+PDpFQ0JDPj1CQkE8Pjw7
-Ozk+Pjw3Ozs8OTw6Nzc5Ojo9QT1ART88PEA+QD5APjs8Ojk9PTs5ODo5PTs6OTw9
-PkRFREA+RUNEQkE9Pj09Oj8+QT9CR0A7QUBCPjs9Pj5CRT5BREVBPj1CPz88PjxB
-QUREQ0E/PkJBQkBBOTc7Pz8+Pz8+PkBCOj4/Pj09QD9APTpAPT04Ozs7PD4+O0A6
-PDw5Q0I9Ozs7PTo+Pj0+Ozo6Q0RCQTs8Oz49OkA/ODk5OD5BPTs8ODg4PD5CQDxA
-Oz09Ozw9PD07OTo6ODc4OTc4Ojg5ODs7NzI5PT89Ozs2PDw+Ojs6OzY5Ozw4Nzc4
-NTk5ODo5PEA8Ojs6Ozo5Pzw5ODk2NzM2OTk2Njg8Nzg3NzU2PDs+PD87Pj03Oz8+
-Pj0+Pzg3OTY2Nzg6Ojg2ODk2OERCOjk5ODc5ODw8Pjk5OTs5Njo5MzI4Ozs1NTg+
-Ojk4ODs7PDw5Nzs5ODY1Ojo2OTo5ODk6Ojk5ODY5Nzk3OTg7OTo2Njc+ODg6Ojg5
-ODo+Pjs6Nzs3Nzc5Njk3OzYzNDY5OTo3ODo3NTg3NzY4NTc2NTc2Nzo8OTw4Ozo3
-MzY3NTY4NTczNTU2ODk6Nzc1Ojg4NTc2MjM1Nzg2Nzk3NTY0NDY2Njk5ODk4Nzs9
-OTo5ODg1Njc4Njg5ODo7PEJBQ0A7PD46ODY2PTw8NzU2ODM0Njk4Ozs8Ojc6Ozs7
-ODs6OTk2ODk3OjhAOzg1Nzk4Njk7Ojo6OTc2NTQ1OTw6ODk3ODY3ODw9Nzc3NDg2
-NzQ3NDk2MzY4NjY2NDk5OTw8Ojk4ODo4Ojo3PDg3OjY3OTg2OTo3OTs9PD47Njo7
-PDo5Nzk3Nz0+Oj48Ojg5OTg3ODc6OTg5NzQ6NzY2Njs8Ojo6ODk7PD47OTs+PTk2
-Nzc5Ozg2Njg7PD49OT4+Ojk5Ojk4OTg8OTk5ODY6ODg4ODw7Ojk4ODc0Nzg1Mzk4
-Ozg5OTk5PTtAOTg4OTk7Ojw3NjU+Nzk7PTw6Njk9OTk7Ozs7ODk3OTo6Ozo5OTg7
-ODc6PTk7OTs6Ojg4ODc6ODQ1Ojg5Ozs8Oj09Pj48P2B8VkI7Pj1APzw+QEA9PTo6
-QT9ASF9zZlZGWoOgfUVAPT46PD46QUZdhX9pRTs9Qz4+RWRjQjs8Ozo/QT9CRkJB
-RUBGREY/P0Q/QkI/QUA8Njw4OTg6Oz9AQkI9Nj4+PDw+PT8+Pj88QEA9PT1ARkRD
-QURCPkBEPkFDQT1DPkFEQ0E+PT9DQj8/QkNEQj9CRUNBSUdIQ0NEQ0RFSk1PS05K
-R0lOTk9OSUxPSkhMTk9IREpOTE5OTEhNTkpTT01LS1JPU1JUTkxPUU1PU1FTT1BR
-U1FPVlNQUUxPW1pWVFBZVVNMUFFWWFZXXFlYX2NgSUpUVl5OREqBtcfV3eHl5ujp
-6uzsYF9bW2FbYVRbXFpaVltTXFpaXF5fWlhTUVRPUFNSTkxUUlZOUU5JSEpKS0hN
-S1JOR0tDS0lGTEpGR0JGRkZLSUtPTEVKT0tLTUlKSklBRUJGSUdGSkxMS0VGRUZC
-RkZGQ0RCQkNBQUJGQD1AQDs9OTo/RUA8QUJAODo+O0E4ODw9PTw7Nzs6Oz0/QD48
-Ozw+QT8+Pzk6Ozk7PD1BQUQ/PT47Ozo8P0BDPzo9PD4/PDo9Nzg9PT07QEFFREVB
-QTw+ODc8QD4/PkJAPUBBPj9BQD47PUZGQ0BBPT1DQEFCQkE9Pj0+PDs4O0A+Pz49
-Ozo9P0BBQD5BPDs5OTw8PD0/PDo7Ozk3Ojk8PT48PTo6Ojo7PD9APkBCQD49Ozs7
-Ozc5PEU/ODs7NzpBO0JDPT09Pj4+PUI+Ozk4OTs7PTg8PTs6Ozk6Ozs7Oz07Ojk8
-Ozg5Nzs7Oj0+ODs8PD46QDw6PDc2Nzg3OTc7ODs/Ozo6PDw8PTs7Ojs/OTYzNTc0
-MzQ0OTg6OTY3Nzc5PT86ODs4PD0+PDs8PDs3ODg6Ozo3ODk3ODs5ODo8PTo9OjU5
-ODg2Njk4NzU3ODk4Njc2Ozo4Nzc2OTk6Ojo5ODk3ODU3Nzc2NjM2OTk7PDg6NTk7
-OTc3PDo5NjU4ODw4NTY4NzM3Njk7Ojk4Nzo9Ojg2Njc2NTc4Nzc4NDIyNzY6Ozs0
-Ojs7ODk6NjU4ODY7ODk3Nzc3OTo7Pjg4Ozg1ODY2Mjo6Nzg3NTU3ODs4Ojk7NzU3
-NzI2Mzg7ODY2ODg3Nzg3Ojc9OTY3Oz47Ojk7Nzw6Njg7OjU4NjQ8Pj9AQUFAPDk4
-OTk2NjU2Oz43Nzk5Nzc6OTk5QDw7Pjk5NzU3PDc5OTg2ODk6Nzc4OTk5OTo6OTk5
-Ojo6OjYzNTc3Njg5NTc1NTc1NTY3NjY2Nzk6ODc5ODg3OTg6Oj05Oj09Pz43Njo4
-ODg6OTg7ODc3Nzo4Ojk2OTs+Pjk8ODk6Nzc6OTg4ODxCQz05PUE4PTs5PDk2NzU3
-Ojs/PTs6ODc1NjY4OTY5ODg8OTgzNDM3OUM6OTk2OTtAPDo/PDo3Ojg4NjY2OTo4
-ODc4QDo5PDxAPjg2Nzc6PDg9Ozg3Nzc4NTY3ODk8PDw6Ojg7Pz1APTk7PDo5ODk8
-PDw9OTo9PTw5ODc5ODs9PDk7OT48Ojs3ODo8PD05PDs6Ojw6PDs7Pjw6Oj08PDo5
-ODs8Oz1GdW5GQT8+Pj0/QkA6Pz1AP0FBQENUeWdHQURFRmBVQz5APEVCPj09P02O
-oodMQz9CQEFIcX1ZPjo8QTo7OUBCQ0RBQ0Q9OUA8Q0NDQ0I+Qj05OTo/Pj47Ozs2
-ODc5Ojw+Pz1AQD09PT07Pz08P0E9O0RDQz1EQUA8Pj0/Pj8/QD46PkBCQj5CPkA9
-QUBDR0NHRERERT1AREVFRkNHTktJRERMTExOTk5LR0xKSkpLSUVESk5PUU9PU05S
-TkpPUFJTSlNUWFZUSEpWV1BOV1ZSTk5PVVRcVVdPUlRVWV1gXmJYV1hWUlVSWFtT
-V11aWlVdW1hbWVFRUoe8ytbd4eTn6Onq7OxXW1tfWl1eW15eXFxVVlhUWFtVV1pf
-YVlXUlJTS01TS1FMT1dVTkZHR01ORURBSU1IREpJR0RIR0hISUVFSElHSkZGR0hI
-Tk1GSEpBQURGRUZCR0dGRExMSkxIQkVGRkVCQD47QEdBQkRERUE+Oj0+PDs9PTw/
-QTs6Pj5CREhBPT87OjY6PD5BREFBPDo5Oj09OzlCPj06OTg5PT1ASUM8Ojo7Pzo9
-P0NDQzxAOjs9PD9DQDs/Qj85PUE9P0A8Ojw+Pjo7OjpDRUA9Qj8+Pz9BQkA+RUA8
-Oz9CPDw/Pj9CREBAO0BCPUA8PDo9Ozo6OTo6PD0+P0A+Oz1AQz0/QD09Ozg6PT0+
-Pzs8PD0+Qj85PT8+PUA7PEFDPD8/PT49Ozo8OTk8QDw8Oz07PTs6Pzs6PDw9PTw9
-Ojc6ODw6OTo8Ozo8Pz09PDo6Ozw8Pjc6OTg7Oz08OTg4PDs5Ozk7Ozs6ODo4Nzk+
-P0A+OTY8PUA9PTs8PDw5PD46Nzg1OzY2OTs5PTk3Njs9Pj4+O0A/PD48Oz47PDk9
-PTw5PDw9Ojk2Nzw7OkA+PTs8OTw8Ozs6OjgyNTY0ODw5OTY1Njk8OTk5OTs/PDw3
-OTs7PTg4NTg1ODg1Nzs3NzY3NjY4Ozg3OTo6Ojs8Ojs6OjU3ODY2NTM1Nzg3Nzs+
-ODk7PDg3Nzo5Oz06Ojc1NzQwNDY3ODU1Ojg4ODs5ODU0MzU2OTg3Nzc2NTMzPj07
-OTo3Njo6Nzg7ODc1ODk8Ozg3OTk4NjQ2OTg2Ojg4PTs3NjY1Mzk6OTk4NzQ4NjY3
-ODY7PDs3Nz06OTY5Ozs8O0E9P0E8PTo7Nzc6PTk8Pjw7PDw/PTo2Ojw7PDo4NjQ3
-Njc5Ozg1NjQ3Nzk5ODU5PDg2NzU5ODs4NDY4ODg4NTU5Ojc5PDc1Njg4OTg1NjY7
-Nzg4NTo+Ozc3ODk4OEU+Oj09Ojs5OD06Ozg5OTc2OTg7Pzs2NzY5Njo5PD05ODc4
-OTU3OTs6PTw5Ojo8PT49Oj08Pz03NTU9Tz03ODc3OTk7OjY5OTo8Ozk4ODY1Nzc3
-OTo7ODY4OjY4OjY3Nzg3ODY4PD03NTcyNzk+PTk3OjY5NTs6OT0/QD46OTc4NzU3
-OT06Ozo5NjY6Ozs6Oz46ODc2OTs3Ozs8PT46Ozw8OTo4Ojk7PTo8Ojs7Oj44ODo8
-PTo6OTg2OTs7Oz89Pjo8PTs9PkA+PEI+PT9BQVJ3XUQ9Pzw7Oz07Pj09PTw5PEFD
-Ul1wR0dDQEFCREA7PjxAQEA8PT1AQ3uyok9APUFBREV7hWFAPj0+PDo7Oz0+PDw/
-Pz5EQEY+Oj9CPz07Ozw8PDk6QDw9Ozs4Ojc2OTw+Pzw8PkA8PD5CQkFAPkI/QUZD
-QERAOz9FQD8+QUBCQj0+QEA6O0E+QUJBREVERURFTktLRkZCRkdITk1IR0JER0pG
-S0hJTUxOT01RS0lITEdLUE5RUVJNTkxPS0xRUktMT1ZSUFRNSU9TTktVUFlYWGFY
-YVlRVV1eXVpfX2FfXV9dWVpaVExUVFVOW1hXVlpSU1JRVlBMj73M193h5ebp6urs
-7F1XXV1hX1dUVl5gWVNUW2NXVFVRU1RcWVRcUlNXV1RRS1RUTk9UTE5PR0NHRkhK
-Tk1GTUlES1BKSklFSkdIRlBQTkpFQUFDSEdGQ01GQkZEREVASEdGTU9LRkJCPkBG
-QUI9PEVCR0NGS0dCQkNDPj49Ojs8PDw5Pz5AP0FCQD4+Pz03O0A+Ozw7PjpAOTs6
-OTk7Ozw+RT4+OjU1PkFGQjs7ODk8PTw8QkU+QDs9Pz0+PEE9Ojg8Pz4/OTtCPj9E
-QEQ/PDo2N0BBPz48O0FBP0I/REQ/PT4+Qj8+Pj1FQ0I8QDxDQj05Oj5BQEI+QkQ7
-PD07PTw/Pj0+QD0/PztAPT4/PjxCQkI9Pzs6Oz46Ojs8Pj09QUA+PT1AQkI/PT49
-PEA+PT9BQTo9PDw5Pzk/Qj43QUI+PTg3PDs3Nzk9Ozs6PTs/Ojw9Pjw7PT47ODo5
-Ojo8Ozo+PDo5Ozk6Ojw6NDk3OTw5Ojw7Pz09Pj46Ojg5Pzo5ODc3PDw6PDw5OTo7
-Ojg+ODs+Ozs/QkE7PD08Pj45OTs8Pjw5OTo7OTs6OTg5Oz0/PDw9PDk3OTs9PT06
-ODo2NDc9Ozk5Nzo4OTg3ODo8OTc3OTk4ODk6Ojg4Nzc2Ojo5ODk5NzQ2NzY4Oz45
-Ojk6NzpDNjk5ODU4OjM1ODY6OTQ5ODs5OTg5Ojk4Nzs7OTs7PTo4NDc2ODo0Njk6
-ODo4ODY2NDg5Nzc4Nz04Nzg7Ozo5Ojg6PDo4ODg8Ojk5OTY3Ojg3Nzk4Ojk5ODU2
-OTU0NTY4NzY0NTc4ODU0NDc4MzY5Ozo8OTc6Nzc7Ojo6Ojc8PkNAQ0NAQj06Pj09
-Ozk8Pj08Oz46Pjo9PTk6PDs9PTk8Ojo6Nzc4OTo5Ojc6OzcyNzc9PTc2Nzg3NjU5
-Njc4PTk4PDc4NjQ4OTg5NzY4NzUxODs5Mzc3Ozo+ODU3Nzs5Ojg5PDw4ODo5Ozs5
-PDc3Nzk3Njg4PTk3ODk0NDg8Ozk5Nzk9QTs5Ozw/QD49Ojk6ODg7Oz09PDc7Ozs5
-NzY1NzY1Njc2ODc0Njk6Ojo6Nzc7Ojg5OTk1Nzc3OTc8PDg3OkA8Ozc4Ojs6ODs7
-Pj87OTc5Ojk7Ozs7Pz05ODg1Nzo3Njc6Ozo4PTk3ODg7PDs9NjY7Ozo4OTk5ODo5
-PTo6PDs9OUI9OTw3ODY4Ojs+RDs7Ojk5PTo4OTg2PT4+Ozc1OT08OkBFRj4+Pjw/
-O0FIY2xTQDw8Oz47ODo7Oz5APz4/PkheYlBGQ0VFR1d1VUdDPkFERD9EQkJFXpuc
-UT9ARElGSXqGbEg+Oz07PDw9PDxBPz89QT47PEE9PDo7Ozs8Ozo5PT4/Ojk7PTtB
-QT0+QkE+Q0I8QEI/PkI8Pj4+PTxFQ0FHQUA+PjxCQz8/Pz47PUFAPTo6QENCPUJE
-Q0U/R05JRURBRUdHR0VKSk1MR0lMS1JRUEtJUFJPUU5MSkhNSktIS01ST09SV1JW
-VE5LSlJVUlJQT1VLT1NNV1ZRVFZYV1deXFxdVlheXGFgXFxWVFNWV1ZZX2RZV1pb
-VVNXUlRRWVlWS0mBvczW3eLl5+jr6uvsWV5XVFdaYVheXlZQU1pYX1xXU1FWXE9U
-T1dRU1RWVVhTTlNXUVJXVExLRlNNTUtNS0xQUlNTS01JSU5JSUhMTE9VT0RCR0RI
-S0hJSkZHS0ZCS1BFT0lISUZFQj9BPkI+QEI9PkBFR0hESUVERj8/PkA9Oz08OzlA
-Q0RBPj1EOj48Pjg2Oz1APD48PDk9RUM8PDs9Ozs+Qj4/OTlAQEBERT05O0NAPz07
-PUM+PEA9QD89PzxFO0BBPDw+Ozw9PkM+QD05OEE9QDlAQUJAQEE/QEFBQT1EQTs+
-QENFQj9BQkNDQENEPTs8Ozw7Q0JCRD0/PDw8PT1ARERDRD4/PEJCQ0E/QT08Ojg5
-Ozs7ODk/PUE+OjxBPT9CPT03Nj4+QkRCPjw/PD5APTs6Oj9APT89QDw7PT06Ojs7
-Pjw8OD06Ojk1ODg9Ojo7Ozk5Ojg4OTk5Njg7O0A8OTk7Ojo6Oz07ODg9PT46Ojs8
-PD9APTo8Ojo4Nzk6Ozk5PDc7ODQ1Nzg9PDpBQj49QDo8QDw9Ozw7PDk8Ozo4OTo8
-Ozo8Ozk2ODs+PT1APjo4Oj46Oz0/QDo3PDo6Ozs8Nzg4ODc4OTc2Oj0+Ojc1PDg8
-Ojc2OTY0NjY2Ojk2Nzk2NjU1ODtCPT06OTc4Ojo3NjY2Njc2NjE1Nzk4NTc2ODg2
-NTQ7Ozw8Ozk3Oj5APjw8PDk8Ojg2ODY2MzY4NDY8PTk7ODg5Ozk6PDk6Qj4+PDg4
-Ojo3ODc3Ozg8OTU+PDw3NjY3NDc2NDY1NTY0NTY4MzQ7ODc3ODY0Nzg4Njc3Ojg7
-PDY0ODs6OTg5PDo/PT48Pz87OTk8Ojg3Ojo6OTs6Pjk6OTc3NDUzNjc5NzU6OTs6
-OTc4Ozs7Ojw3Nzc4OUI7NzU2Nzk2OTs5ODw7Nzc8NzM3Ojc1NzU2NDg2Nzg3OTc3
-Nzs8Ojs6OTg4ODw6Ozw8Ozo5OTg7ODc7OTg6OjU4ODg3OT08Nzc7OzY2Njg3OTg4
-OTk4PTk4Njg6Nzc4ODlAPDo8Pjc8Ojo2NTc3ODo5Ojc7NjU3NjY4PTo3OTk7PDk4
-OTY0NjQ4Ozk4OTs6PDo8Ojk9OTo3Ozg9Oj08OTo5Ojo7PUM/OTw9OTg4OTo3Nzg6
-Ozk4Nzc2Njo8Ojs8Ojs8OTU3OTk9PDs6PDw5OTs8Oj1BPTo6Nzo6PTw3ODo4Ojk4
-Ojk6PDs7PDw6Pjw7Ojw+SlRKQTo+QDpBQlt6YUQ/Pjw6Oj07Pjk5Ozw+P0FIZWtZ
-R0ZFSlpmgq+OXkhFQD9BQEFDPT5RnZtXRUFBQlFkc39/Y0dCOzw+QD0/Qz1EQj1D
-Pz0+Pj49QD09Pz07PDxAPD88OD09Ojo9PT49QURFRkE+RERAPTw/Pz1BREJBQUBB
-REJBQ0NCPTw8PUBDP0VCPjo5Oz1FQ0E/REhDRkdDRERGTEdHQkhNTUxNTFNQT09R
-Tk1HR0tNSkRKRUxLTlRSUU1PVFJSVVZVT01VU1RTVU5OVk5SUUlNVFRQVVhZXFpc
-WVdXVlVaWV1bUVBUVVVZWV5dXV5jWFlVVVRXWV1dXVJJRXq4zNTd4eXo6evp6+xZ
-WVJVVFJYWFVcWFVWWllVWlRTT1VYTUtPVkxMT0tUWVdOUVJLVlFOTktJR0lMTUpK
-UUxOUU1HS0pJUExJSUdHU1FMTUZGQ0VEREhIRkZJSktIUEdJTUZER0hISUpHQj0+
-QUBBQURFRUZDREI9RUI+P0A+PkA6Ojk5PT49OzpCOTg7Pzs3Ojs8OTo6OD9CPz86
-Nzg7Pj89Pzw7PTtAQj8/QD5BQEI/PEBCP0NCPj48Q0VCRD4/QD9AQz47OUJDPkE/
-Qj44OTk6Rz5CQEE9QD09Oz9BQkU9Pz06Pz0+PTw7Pj9BQkI/Q0NDQD49PT1APD09
-Pjo6Oz5GRERDQDw5Ozk8Oz06OjY7Ojk8Pj06Ojo+Pz46Ojo7Ojs9Oj8+PD87Pjs8
-Oj09Ozw7Ojo6Ozc6Ojs7PTk5Oj9BPjo7PTw8Ojk3N0FBPjs6OTk5OT09Njo4Nzc+
-Ojk+PjxAR0A7Ozs6OTk5QD08Pzw9Pj09Pjw4Oz06OTg/Ozs7Pjk1NTw6OTs9PDw8
-OT47OTk9PkBDPz86ODk5Njo9OTk8OTg6Ojk7Ojk4Nj04PDw3NztAPDs5Njk7Ozs6
-Ojk4Pjs7OT08OTw7Ozs7Ozs/Ojo5PDw7OTU1Njc0NjA3Ojk2NTg8Ojs6Njg5OTk7
-Ojw5Nzg0Njg3NzY1NDM1NzdAODk3NDY2Ozk5PTw3OTg7Ojw8Nz07OTo3OTg0NjY2
-MzU1NDM4OTk6Ojc4ODc6OTk6Nzs5PDk4NzY6NTY4OTk9PDo9Ojg4NDMzNzk3ODg4
-ODg8ODk2Mzc5NjU3Oz47Njg3Nzk4ODo4NTI1Njg4NjU/QUI9PEA7PDk5OTc3Nzc2
-NTk8Ojs5OTg7OjY4Nzk5Ojs3Oz46Ozo6OjQ1Nzg5Nzc5OzU2NjU5NDo5Ozk4Nzc+
-OjY7Pzo5OTc3OzUzNTQ3NTgzODc9OzU3OTo5Ojs8PD0+OTk6ODw+QTk5Njc4Ojs5
-OTk5OTg4Njo5OTk/Ozg3Njg3NTc3Ojc4PUA7Pjs6OTc4Oz07Nzo9OTg1NzY4NTU3
-OTc4ODc6PDk6OTY1Njs5OTo2OjY2PDk1OTs4NTc6OTw/PTs9PT87OTs3Ozg4Nj08
-Oj1BOTc6OTg8Ojo4ODw7PDs7Ozg4NTU0Ojo6OTY4OTw7NzxAPT08Ozg6PTw6PTw4
-OTs6Ojw9Ozk4PT48ODg6PD85Njs6Ozs7ODk8Ojk3OTxBPT44OUhXXVA8PUBAQT1H
-dHhPQkJBQzs5Ozs5O0A/PDw9P05scXNPS2F3lqG0uKB6TUVCQkNUbU1CPUeWsXRF
-Q0JOWXF6gG+BXFNORUZBOz1APkBFRUE7OTw9PkI+PDw9Ojw8Pz46Oj1BP0BBOz48
-PkBCPj89QUBARD8/Pz1AQEQ/PUA9Oz1AQT1CPD5BP0E9PTo6QEFBPkBCREE+PERE
-RUpDQkVDRkZFTEdFQkVGT1JOTU1PTExITENFR0VMS0pIRk5SUE9QT01ITFNSU1JO
-VldPUlVVW1ZQU1lRU0pPUVtZYFhXWVtVVFlSS1FQUVlUU1lbW19ZU1tYWV1ZXVtX
-UldZUlheVURMg7jK0t3g5Obo6evt7FVYWFNeWVNRWV5cXFZTVlBQWldTUFBOUVhR
-TVNRVlJTUEtMUFJOTlVVT05LS0xGSVJNTUhGRUZDSU9QT09KS0dHSk5ST0lCQkJF
-RkxJRkNKSU9QR0JMPkBER0ZBREE/QUBARUJJREZFQT9HR0FAPj4+QEBBSD0+QUE+
-RkM9Pz47NztAOTQ5Ojo9PD86PkE+PztANzk7PD49PTo/PDk+QDs8QD4+QERGRj4/
-QUI+PUFBQjw+PT1AQ0A/Pz9DPzw+Qj89PkM+QUlDREA4Oj49O0FAQEE+PUM7OTo8
-PkFCPkE/QkM9QEI/QT09PT5BPjs8P0E9PDo+PTs9Pj9APzs5Ojk1Ojo8Oj08PT5A
-Ozo6PUA/PDk4OTo7QEE7PTg3OTo/Ozo6Ozs9OTs6Oj44ODs9Pjg9PTw+PD0+PDo3
-Nzk5Ojw6Pj1BPz08ODc6NzY/PDs7PkBAPUI9PD09PDw4PDs8PDw9PUA/PDw6OjxA
-QkA9NzQ2PTo7Ozo7PTo8PDc4Oz08Oz43OjZBPzw9Pjw/OTo4Ojo8Pz84Oz9AOj4+
-PTw7OzpAQD4/Oz47OTk6ODU3Njg3OjY2Nzk2ODtCOzw7Ojw8ODc6QD4+Pzs4Oz04
-NzU2NjczOTo8OTc6OTg3Ojg3NTg3OTY3Ojk3NDc5Ojk2NTU2OTY3OkI3NDg4OTs6
-Ozo7OjYzNDY5Pjw8NTY6PDo6PDg3NTQ2NjQ2OTY3NTc3NzY4Nzc5Ojg3ODo2Njk4
-PDk7OTpAOTs5ODg3Ozc5OTg0Njg6NjY4ODk3NjY8Ozw7Ozw8NzY2ODc0MjU5ODY3
-NzY4Njg3NjxBQT86PD09ODk3Njg5Ozc3Ozc4Njg5ODw4Ozg6PT07PDo6Oz49Ozk5
-Ozk6NjY2Njc7Oj06Ozc7Ozg7Ozg5Nzk4Ozw8ODo2ODY2Ojs3Nzg4NjQ3ODY5OTk2
-NDQ5PDk4Oj06Ojg3ODo6PTw8Ozg1PDg6ODk8PDo9OTk6OT08Ojw7OTU5OkE9Ozo4
-OT07PDo6ODo7OjY2OTs+ODo7OTw7OTY5Ozg6Ozk4NTo4Ojw5Ojg4OD07Ojo2OTk2
-Nzg4Nzg4PDw/PTs6OTo+PT07OTc3OTw8PEFDPjw9Ozs5Ojk7PDw/QT88ODo0Njo6
-OTc4ODk6PD05ODc6PT06Nj07Pj4/Pjk4OTw/PDk2ODg9PUA6ODw6OT09OTg4Ojw7
-Ozo9PDg3ODk9PUA8Vm9oTUQ+PkFGRlJ1YUJBQT5APkJCQEQ8QEFAPz48Vnl6kItx
-hXVvh7erlo1OREVDV5ysaUhGQoK2o1hJUlZqZnaLcHZgV1ZIQz89PT5DQkFBPTtB
-QUFAQj46OTs7PDw9PD9APz09QkA/Pj09PT89PUA8OTo9QUBBQj08OD1CQTxBRENA
-PEBAQEBAPz9DPUM/PT5AQT5BQEREP0JGRkdJSklFR0hMRklGR0lHRk9MTVFMSkpN
-R0tPS0VMSktMS0tPTUVITk9STlBQUVRZV1hSS09TU1JTU1VXU01TU1hXXFxbWFFT
-V1FfW1NVU1ZZWV1cW1xUWlBXVVBTXVtWWU9RWl9YTEuEt8rS2+Dk5ujr6+vsWV5d
-VVZYWU9ZWVdaWVlWWFVUVU9UVFNZXFJQTFBRUFJRTVJPTUtMTlJRR0lLTU5OTk5Q
-TEtFQ0ZQUExPSktKS0ZKSktKR0lLSkdNR0VLSkVNUktMS0hFQ0ZDRkNBQkBCRUtE
-SEM+RkdLQ0JFQz49QUBBREZCQDs+QkBAPkM+PkA+OTw9Nzo6NDc8Pz5APUFAPDpA
-Pjk7Pjk9PTtAQEVCPj5CO0BCRD9AQT9BPjxDOj5CQ0U+PkJBPj9FQz8+OUFEQUJA
-RkJDSUZDPEE6PTs+QUhDRUU+Pzs9Pj4/QkJBPT9BQ0E8QD5APDk7Ojw/Pz09QEE7
-Pj8+PDxAQD1IPDs7Ojw5PTk7Oz48OTc6OTw8Oz47PEE/QDs9PD0+Ozk8NzY4PDxA
-QT06ODg7OjxBQkA8OjpAODs9PUBFQD4+PEE7PDg5OD08QTw6ODw5OkA/Pjs6PEA9
-Pj45Ozw5OTs6Pjs9QTw+REdBPjw+Ozs7Ozs5Ozs9Oz88Oz09PUA5PDk5ODY4Ojw6
-OTs8Pjo6Ojo8PD46OTo7OTs9UDs5PDk6PD5DREBAPz49Pjo6Ojs6Ojo+Ozk4Ojg6
-ODY4PDo8PT8/PT1APT09Pj46OD1COzU2OTg5OTdBQzc4OTg6PDk5NjY1NTk5Ojg3
-Nzk7NTY4OTg4OTc4ODo7ODk7OTg3Ojk5OTc3NTM1Nzo5OkA6Ojk5Ojc5OzU2NTgz
-NDU5OzY4ODc3OTc5OTo8PDk5Nzg3ODlAP0E+ODo6Ojs6OTo3Nzo6NjY2NzY4Nzk6
-OTk4Ojg9PDo6Oz48ODg4OTY3Ojk4NzU1ODU3Nzk+Oz0+QUE+Qj05Ojg4NjY1ODs7
-Ozg5PUA6Oj48Pzo6Ojo8PDg6Oj0/Pzs7Njg6ODYzNTg0Ozo5OjY0Mzc4NzY3Nzk4
-ODc4ODg0NjQzNjc5Ozw4PDk5ODg4Nzk5OTg1NjU5OTg4Ozo7OTs+PDs5Ojo3ODk4
-ODk8QT86ODc5Ozo7QD88Pjs8Pj86Ojk2PDc5NzlBQDw8OTk8QD8+PDo7OTk5ODk6
-Ojg5PDo5Ozk5OT46PD49ODY5OTU2NDY3ODs5Nzo4PTw8PDw5Nzo6PDk5PEE3ODw5
-O0NBOjs7QDw8Qj48Pzw+PDlBPjo4OTw8Oz09ODs7PUA8Njg6QDo0ODs+Pj8+PDk5
-O0BAOjg8PTs6Oz0+OTc8PkI5PDw7Ozw6Nzc3OTo7P0BAQ0Jjal9SSUZERENffnNR
-QD08PEA+P0BFRT06Oj0+QEFShW2ZjXpzZlBcf4SchFRER0lyoaBZQj9GYJ6bc3Fs
-V1ZERV1rZVdhU0tCQjw8PUJBQUA9Pz87PDxCPzk6PD5BQz0+RUNAQUM+QD47Oj1C
-Qjw8OTo4PEFDQkRGREFCPD5APj1BPDw9PTw/PT49Q0I7PUBCQUFGRkBAQkhERkxC
-O0NGQkVEQkdGRUNFTVNTUVNNUEtKSERQUUpJUE9QTU9LSUpNSElLTVFVVFFUUE9Q
-UkxNTU1SWVZUVlpYUlFVVFZYWVZbXVNVU1tSVFNSTFBaWllXV1tVT1FWWFlcWFNQ
-TVFTW1FQTYu6ytTc3+Tn6Orr7OtUYFdfYVpQU09ZUVRUVl1TUllTUlBSWFhdXlVK
-TVBPT1BQUFFMTFFPSU1LTUpKSEhLUE5NSkdJR0ZIRlZRSkRFREZKTkhHSENLT09N
-SElJRUlNS0xJSEpMSktHRkVHQkNISkZERkNCQ0NEP0JKR0lBQjo+Pz9DPUNCQj8+
-Pj8+Pj45Oj88Ozk0Njs9Nzw9Pz8+PD49PEA5ODs6ODo7Pj1AOjs+QEJDPj9AQUE/
-PkJBPkJBQEA+Qj9CQ0JEQkBEPkQ9PEJCRURGRD5DREJBPEA/REI+QkE8Pjw+PT5A
-QUVGPz0/P0I9QUFEQkJFPD5CPT9EQ0NBPzw9QEE+PUFDQD49Ozs7OztAPjo5OTk5
-Oz48PT5APj46Njo7Ozk6PDw6Oj49Pj9APj03NzY9PTxAQkA6PUFCPj0/Pz5AQT89
-OT0+Ozw9Ojs4QEJEPDo8Oz08PkM+OTg7PT08ODg7PTg1Oz4+Ojo7PT09Pjs5Ojw5
-ODk9ODtBQD49PUJDQDw9PTo8PTo+PkBDPz0/QD0+OzxBOTo8PTg6Pjg5PDs8QD88
-Oz1DPTo6PUA8ODo9Ozg3NTU4NjY4OTs6QTs6Ozo4Ozw5Ozo6Ozk4Pjw7PDg3NjY5
-Nzk9PD5DOzg3OjY4ODQ3ODc1OTs8Pjw4OT07ODo6OTs5Nzk3Nzk7ODY1OTg7PDk6
-OzU2MjQ3OTg7OTo4Ozw5Ojo3ODk6NjU4OTs8Ojo8OTc0OTg5Ojk7Ojw5PT85Ojk9
-OTs8OzY3PDs8Ojk4ODc1MzQ0NDc4Ozc0Njc6Ozo8PDw5PDw7OTk6OjU1Njk3ODg2
-NTk4OT49Pj5DPDxCQTs6OTo7Ojs/PDs+PTs8Pj07QD89Pjo8PDg6Pj06PD0/PDo3
-Njg6Ojo1NkM+Ojc7ODg6NzY4OTo7OTg5NjY5OTo7Njc3OTg3NDMzNzk5Ojw5Njo5
-NDY1Nzc5Nzg3OTc2Ozo6PDo6Ojk8P0A8OTw5PUA4OTs4OTw6Ojo+Oz09PD1BOj07
-ODo5OTk4Oz85Njg7Ozs7Ozs3OTw8Ojk8PTk5Ozk6PUA7ODc0ODk/Ojc4Ozg7QEA5
-Ojk1OTs4Ozw7PD06ODY3PT48PUM6ODw3ODg6Ozw7PD1APj47OTo5PUI9PDo4PDs7
-Pz0/Pjg5Nzk6NjY/OjY1Oj89QD08QUA9Ozw/Ozo7OT48P0I7OTs8PD06PDg5P0A+
-Ozo5Pz9AQD0+QVx2dYZ6al1YYYSJWUREQD4/Pjw7PT8/Ozk5QUFERENbYIV7XlRH
-QUJKZJyASUQ/TYWHYkZCQEJHdHtwWkZBOzo/R1heWFhLRT4/PT9CQUM/Pz09PERC
-QT8/Pj0/PTxAQDc9PEJDQ0JAQUVBPTo8PT1APjs7QUI/Oj1DRENDQTlCRUNAPjs6
-Ozw+Qzs8QUZDPT1DQEFFRUQ9R0xBR0xGRUhKSEVFR0pISElMSk9QT1BQTUtLSk5N
-UE9RTE9OSk5KSUtLTU1RUU9PU1BST1FMS1BQUVRUTVFUUVhVT01SUU9RVlZVV1NW
-V1hYXFVVTVBTTVpYVk1PUlZcW1hRU0xJUVFPV1JKiL3M1drg5Ofo6err7FhZWlpc
-XlJUU1dSVllcWVRSWVZXVFJVWlNQUlNRUEtWUVFWVktRTk9TU1BGSkpHSEhJSU9M
-TExHRkxNSU5MSExLS0dJSUlFR0hHS0hJRkdCR0lMSkhGSUhHRElGRUVFRUdLSEdA
-Q0E+QkxLRj9DR0hDPT06PEJFQj4+Pjg1OjpDPTw+PD8+Pjg2Nzc4QEE9PDs7PUE9
-ODg4NTk9Nzs7PTs8QEFCRkY/PUE8P0NAPTo5P0JAQj09QkJBRUE9SERAQUVDQkFB
-PkNCQ0RBQUREQ0hBPkA+PkNIR0I7PDs+PDpAQT4/PT8+PUBGPUNCPj8/OkA+RD48
-PUJCQEBCRUNAPjs8Ojw/PD0+OTo7Oz08Pz0/QEI+PUBBPj08PDo7Oj0+P0FBOzw9
-Ozo5OjhBPTo9OUA8Pjw8PD5APT08Ozw8PDo/NztAOjw5OTs/QUE+QT08PT49OTo7
-PDg4OT4/OT9DPjs7QDw+Pzs8Oz49Ojo6Nzs9PD5BOUA9QkJERDw9QEA+Oj0+QkBB
-PT5AQD06ODs5Ozk5Nzc5Oz46Nzo6PTs8PDs+Pjs7PT9CPT48PDg2MzY6Nzk8OTw8
-Ozk7Ozo5OTg3OT08Ozs8OTY3OTs9Ojo3PDo+Ozs8PDw5ODk3Njg6Ojw7OTk8Nzk8
-PDw4Oj47Ojg3MzU4Ojg1ODk6Ojk4Ojo4PEA3MjY5OTo4OjY5Ozc4Pj48QDo4Ozo9
-Nzs7OTo4NTc5Ojg2NDk5NjQ6OTo5OTg1Nzk1ODs4ODg3Ozg4OTY2MzU4Oj47PUE5
-Ozo6PDk5OTk7ODs4Ozs6OTY1NTg4NDc3NTY7Pj0+QD4/QEBCOzc5Ojo4Oj86OTs+
-Pz09OTg5PDs5PT8+QEQ+Pj07OTY5PD43OTs6Ojg5Nzc3ODU2OTo3NTc4OjY6Ojk2
-OjtAQTc5ODk3ODk2MjU6ODo5Ozc5OjY2ODU4Njk8Ozg7PDo4Ojs6Ojw5Oj8/QDo2
-OTc2Pj05OTk3ODg4Ojk7Ojg7PTs+Pjo4PD0+PDk4OkI9Ojc7Oj45Njo7Ojo5PDw4
-Pjs7PTk5Ozs2Nzo6Ozo7PTw7OTo6Ozk6Oz09PD08Ojs7Ojw5Nzw8Nzg6OTs7Pz89
-Ojk6PDk9PT4+PT06OTg8OzxBRj04Ojw6PT4/Pzg6Ozo/QDo7Ozw/PTw6Pjs8PDw9
-Pj9DPj1AQTs8OTU7Ozo8PTo7Pj06Oz4/PkZBQkNAPj5AVnl3fpKamZSfl3pJOz5C
-PEE6PTw8QEA/OTo/P0A+QkNJS0xFQkBEREZ1m3hHQD1BU0tFQT09Qkl1fmVCPz9B
-PzxAQkhDQ0g+PjtEQT44PUFBREI8RERCQTs9Pjo4OzxBR0I7Pj9CPzk9QEBBQjxC
-QUE/PT5AREE8P0U/QD0+Pz8+Oj4+Pjw9Pz0+PEI/PkFBQUNBQkZGSkZDRUdER0ZH
-RUZHS0xKSkxJTEVLR05OTU5NSkxRTEtGT01MSU9TUVBHR05LS1BQTEpQU1NQTlBP
-T1BUUVdUVVNSVFdXU1FOV1FQUVhaVVlRU1daWFhSV15cWWRZWVJWVFlQTE5RUFBa
-VFdSTk2Bu8zU3ODk5unq6uvtUVRZWFhaZFhaW1ZWVVJOX1tXVlNPS01PUlNQUU9N
-TUxSVlZUTVBMTFVUVElLRklLT1FGSVVTUFFQSkpPTEtLR09PTk1MSktNS0NFSEFF
-SUpCRkpMSkpGRURJSUpIREdHQkNGR0U/QEBAP0NHQkVGREVFPENAREhCPjg2PD48
-PzxBPDw7QT8+PEE/ODY7Pj5FPzw6QkE9Ozg6Ojc9Ojs5PTw+QUNAQz8+PkE/QkZC
-Ojs9QkNEQEI+PUJDQ0ZEPUBCQkBDQ0FAO0hAPkFBQURHP0NBQUI/PT0+PURCP0E8
-PUI6OTs/QD07PkRDQT09Pjo5PTs9Oj1CQUBBPUBAQT49Pj1APT88OkJAPzpAQkM9
-QD8+OzlBQD49PUA+Pjk2Njg7QkM/PEI9OTo7Q0RGP0A+Ojw+PkM8O0E/Ojs6Oz87
-PDo+OTY6Ojg5PEFFQTs6PkJBQEI8Pjo8Ozg5OkA8PT09Ozs6Nz06Oz09PDo4Ojg3
-OTxFQz48P0JAPT1DRD4+Rj5DQTxAPz47Ojs9Pzg3ODo+Pjs7PDw9PTs2OTo+OzlA
-Pjw5PT1AQUA7Oj07Oz07OTo9PD44NTc7PT08Ojg4Ozk7OTg5Ojg3NTI1OT06PDk9
-PUE8OjpFPDc0Nzw5NTw6PTo7ODc8QDs8QD05NjY7ODY0NDc1NDg3Njk7Ozk7PT48
-Ojo+Ozo8Nzo7PDg3Nzc5OkE7Pzg3Njo4Nj43ODo/Pjw8Oj88Ojo+PTc2ODg7ODo4
-OTc4Nzc3NzU5NzY1ODY1Njc6Ojo7PTk6PDs4OTg4Nzg8Oj45Ojc1Nzc5PDc4NTU4
-Ojk9PT87Oz9FQkU9Ojg4Nzk7PTk3Ojs6PTc3ODg4P0VEOzo6PTo7Pzs6PkA6Ozg5
-Nzk3PDw5ODY2NjU3NTg2Nzk2NDg3Njg6PDc7PTs7Ojw4ODtCOTY4OTc3OTg4Ozo7
-Ojs6Ojk6OTo6QDo3Nzc7QUA8Oj07PTo3O0E/Pzo6Ozk6Pjo7Pjs+Pjo5PT5AP0E7
-Oz48Ozs5ODw9OTk6Njc3OTs9OTk5Nzs9Ozw7Oz85ODk6OT07Oj4+PEM9Pjs8ODxB
-PTo+Pjs4OTw8PTw8OzY3ODw9Oj89PEBEQj86Pjw6OTg5OTk5OD0+PkI7PDs4NzU6
-QDo7P0A9PD0+PDo4OTk7Pj0+OTk3Ojk7PT5CQT86Pz4+QTo3OkA9Pj8+PTk+Pj08
-Ozw+PD87P0FSXnR9f4aYjpd0SDxAPDxBODk1Nzw+QUE+Pj0+PjxAQEFDQz87QUFE
-VZaFbkY7Oz1APUBCQkdIXnxsRz8/PD5CREM/P0Q/P0JARkU8P0ZBQ0FCQT49QEI/
-Ojk6ODg9Pz45Oj1APT48Ojc8PD4/PEA7QUJCQUBBPDo5Oz1CPkVCRD9CPTk+Pz8+
-P0BAQkdJQkVERUVBQT1ARkE/PUFISkRGQ0ZERktKSklFTExMSkxNSEpJTU5QUUpO
-SkxSWFRRTExMS0lHR05TV1JVUk5QSlRTVlJXVltaVlVPVVZbVVRYXFhXWFpeW1pY
-U1dTUVlXW1pYVFdYXlRRXlVRTk5RVF1YVVdSRn67zNXb4OXn5+nr7OxUWVZZWF9h
-VF1ZUlhYWGJeWFhaWVBUVlBQT05MUVVRVFNUUVVXUE5OT09OTktSS1BPT1JUUVRM
-UFBMR0pQT0tJSUxKSklMS0pNSlRNRkNHR0NIR1BLR0tFSURIRURGQ0JDQUBCRURH
-P0NIQ0RBRkZDSElIPDo+Qz1ARkZEREBAREdCOjo9QEJAPD47Ojk7Q0JDQUJAQkJD
-QUE5Pz87PEVBQUA7PTs+REpEQkBCPkBERkRGREBCP0ZDOzxBQjs8RkBCRENHQjo7
-QUVGQT09P0RDQ0RDOjs+PT88Pz5CPj8/PEJAPTs3PT49Ozs+QTw9Pj49Ojo8PT9C
-RD4+PT9BQUNDQUBAPkBAOTxBQzg9Pj1APUNAPz06PDpAPz9BQEBBPT0+QT5AQDk8
-Ojw9Pj4+PDw8PDw4OT8+PDk+Pj49Ojs8Ozs4Ozw8REI9PDs7Oz45PD4+PDs+Qj0/
-PkNBPjs7PD0+Ojs6PD49Ojs5NDk+QUJBPT0/PT09PD09O0JCPT1APz89Pj47ODw9
-Ojo6PT06OD9CQTs7Ozo9Ozk1OTg9PTs7PT46PT4+PDg5OjxDQj88P0BCQ0Q+PTk6
-Nzg8Ojo7Pjo8Ozo4ODc3Njk5PTw6Nzs7PEE7PDk8QTw5ODg4Ozw9PDo4Nzc6Ozo6
-OTs6ODo4NDU6ODY+Pz49Ozc5Ojs6PkA7NTIzNzo5PTs5PT4+Ojg9Pjw5QTk3OT46
-OT48Oj03PTc2OTk5Oj48PD44PDs6ODk6Pzc6Njc2Nzo6PTs6NDc9Ozc2NTs6PTo6
-Ojw4OTc5ODY3NDc2Nzs2Njs6Nzk6ODc5Pj1BQURBQkJEQj06OTo3Pjs7Ozg5PDs8
-PD07ODg6PDc4PEA5ODo4OTk8Ojs4PDw6Ozk1Nzc3PDg3Ojk3Nzg4Nzw4Ojc7PTg3
-OTk3PDo5NTY4Oj09Pz03ODo3Nzk4PDc7Ojs5NTo3Nzo8Ozw6Ojo6Oj08PDw9Pjk3
-OTo6Ozw9Pzk4Oz0+PT0+OTg3ODk+PkE6PDw8PTg7PD06OTU5Ojo5ODg8Ojs7OztA
-PTk5Njc1NjY5ODc8Pjs+Pj09Ozs7OzxBPzo4Njg7OTk/PT07Ojk3OTw9Oj08PDk3
-OT88Oz09OTk6NzlBPzg6Ozk6Pj07QTo5Nzo8Oj0/Pz4+ODs7PUVFRT4+PDk5Pj87
-PTw+PTs6OT48O0E8PD09Pjo8PD08OTk7PDw+Pjw9QVxPX31/h42JdlBBOzs4Pzo5
-OTk7Oz47OT5EPTs+PT47Pj88Oz1AREl0k11BPz06PD0/PT0/SWV0XkVFRkZHREFB
-PD5AREZFQDs7PD0+QEJAPUFAQkA8Ozw8PTtBPzs9QkRAPz1BPT4/QEA7QD0+PUBB
-QkFAREQ9PUM+Pz1EQkJAQENESUZCQUFDPkBAQz9CQkFAQ0FEREJESEdHQUNIRkhD
-RkNGPkRKS01JTktISk5JS1FQWFJOR0FQU1BPUlFOTlFTU0xNTFFUU1FRUU5XWldU
-UVVPUE5MTU5LUVRUWVhWVVpaVVVVV1VYV1dYV1dVWFhUV1dcVldZWFpVUllhWVVY
-UkVMg7zM1dzh5Ofo6err7E5PWV5lX2BjXWBcVlZaXWdmX1hXVldXUllPV15aVlpO
-TlFTUFFUWFRJT0pHR0xNUE9RTk5MVUNETU5NT09UTklMT0lLT01LS0dGTElNU0pM
-SktIRUdFQ0ZFR0ZERUlJSEZFPkNHSEVJSUVCQD9CQkRCPkREPD5ERklFREBFRkRC
-PT07ODs7QUI/Oz88Pj46Oj4/QT4+QkY7Oz5APjw8PEI8QEA/Oj4/SElCQD88PkJC
-QkNDRUdCQUA9PkJFPj5BQD1FQT0+Q0VER0g/REBCQUZBREJAPUI+QkVCPz1CPkFA
-QUREQzw4OT09P0BAPjw8Ojk4OjpAPj86PD8+Pz9CQURFQUE6Pz5DQj88QERCQUBF
-QEBDPjw+Pzo8RkRDR0Y9Oz48OTs7QDw/REE+QEE8PEA9OTs9Pz8+PEJGPT8/QTw+
-P0I9OT49Pj5AQjw5PD06PDw8Nzo8PD4/QEM7PUFAPDs4PkA8QD88Pjo5PkBERT44
-Oj09Ojw7OT09QT8+P0A8Pjo6PD1BPzs7PT9AOD4+QEFEQD09Pjw/Pjo8Pjw6OD5C
-Q0BAQT06Pjk7Oj09Ozw7Oj5APDk5OkI8Oj87Ojo5ODg3PDo2NTQ2OT48PTs6PT06
-PD47PUA7ODU7Ojc2ODg7ODg8Njg6PDc5OT86PDs7OTg5OTo8PDs2Ozo7Oj07Ozk9
-Pzk2Ojc4Ojw5Pz5COzs9PDs4Nzg4Ozg5Ozg4Ozw4PT05PTo6Oz8+Pj05Nzs7Ojk7
-Ozs4Njc7Ojw7PkFAOTg6ODs5NDI2OTo4Njc9PDs4Nzs7Njo4ODs8Ojg2NDs8ODs8
-P0FDRUJAQ0JDQT45OzY4OD07Ojo5Ojo5Ojw7Pzs9QD48ODg5PDo8PTc4Pj47Ojw6
-ODs5ODk+OTc5Ozw5Nzo5OTs3PDk5NjY7Ozs5Oj4+Nzc6Pj45NTk3Nzo5OTo6NzY2
-NDQ5Nzc5NTc2Njg5Nzc4Ojo7ODk9Ozc5PT08Oz48PD89PDw6OTo3Ojo9Pjw9OTs7
-Ojo8PTw/Ojo7Ozo7OTk6OTk9Ojs5Oz49ODg7ODs7ODcyOjs9OjpCRD83PDg7PDpA
-Pjk4OD5BOzg9PDo5Oz0/PTs5Oz43PDk9OTc6Ojc1ODs4Ozo6Ozs6ODs5PD08Ojg7
-PT08QTtAPDpAPDs/QUJDQTtBPz08Ozo4Ozs8PDxAQT49PTo3PUA/QD08QD9AQUE7
-PEFBQkFBaGBUW2dlfVtHQj8+REU6Ozo7QkA5ODk5Oz1APUFCQUE/PDo6PEZJantk
-R0c/QkE+Pjs9P0hicmpGPkNBP0I/PUBCQEBBQ0M9QT1DQj5BOTk6PD0+PD07PUBB
-QD5APkI/P0A5O0JCQD5APD0/PD09PERAQERAPkFDP0NCQUA8QEZDQEJGRUE9O0BD
-RkRBQkA/PEA+Pz5BQkVISEJGP0NIS0VBRUVIRkZITUZISU5KSU9UUk9PTkhLUFFO
-T0lGTFBOTlBQUVJRUE9LTlZVV1JTW1hTVExST05PUU9UV1lST01VW2JlW1hbVlpW
-VVZbXGFdWFtcWFliZmBdXFRXWF9VW09GSU+JvM3W3eLk6Onq6uzsWVdhaV9gXFxg
-YF9hWl9fWF9ZVFRTT1JdVVZVVldTUFFTVlFPTU9STUxLTkpNSU1TS0xQSkVHS09O
-U09NTE9TTE1LS0tLS0dLSkhGSlJQTk9TSkZGSkxFREFGQkZGRURIRUdIRUNAQj9H
-SkdIQ0NDQ0RFSEdFREM+Q0RERUJBRz4+Oz9APTo/PUFCQT88QTw8Pj48PT08QD89
-PkE6PTw6Oz8/QUNHQUJER0Q+Pjg8QEFEQkNGREQ/QEhHQ0RGRz86PURBQ0RBQTs9
-P0VGQ0FAQ0JDRUlEQEFAPj4/QEJFQ0VCQD1APkA6PT09Pj5APEBAOzY3OD0/PjxA
-P0BBQENBQUNCPz8/P0A/QTxAQ0FCPkE6O0E9QTw4OT49QUA+PkQ+QEI/PkA+Oz5C
-Qz9AQzw7OTxAPT0/Pjs9PEFJQUBAQj89Pj5BPT89PDxCQD88OTw9PEJAOzk6PDw6
-Oz5APT07PD08RD87O0RCPDs6PD0/Ozo5O0E+QD48Pj08PDw+QEI6PTw7PkI8Ojg9
-PD9APkE+Pj1APUA+PDs7Pjo+PT05PDs6Pzw+Pjs6Ojo/Ozs9QEA/Pz5AQzo6PDw6
-PD49Ojo5PD87Oz07OTg6Nzc4Ojk0Njo4NzY5Ozs9Ojg3ODc4Ojs7Ojc8Pzk9QDs5
-Njg5ODpAOjo6ODo6Pz89Ojw6Ozo6OD08Nz46OTc7Ozk9Pjs8PDk3Nzw3PDs7PkA9
-Ojw7QTk2ODc3Njg5Ozo8QTk4Ozg5PD48Ozs+OTo6Ojo4Pjw6PDg2Ojg1Njc1Njg5
-OTg7Oz0+OT05OTQ1ODg2ODc5OT8+Pjw+Pz1BPz9BREM/QDs3Nzk/PkE8Ojk7PTg4
-Ozg7PEA8Pj07Ozk3NzY3PD47Ozw7Oz07Oz06ODU3Nzo4Ozw7PDc1NTc4Ojg6Ojw7
-NTQ3Ojo6ODk5OjY2NzY4Njs7OTg6Nzc6PDs6NzM0NTY3PD05Ojo5Ojk7PDg+Njo5
-Ozs7NjY5Ojs6OTg6PT47Oj08Oj1APDw6ODtBPjs5PD0/ODw7PDo9PTk4Njg6Oz07
-Ozg/QDo5NDk7Ojc7Pjs8PD0+Ojg4Ojg6ODY4Ozk6PTo5OkA8Oz9DTz0+PDk4Ojk2
-Nzg4PTo4PT07PDo/PTs8PkA+PT47PTs9QD06PT08PkJBQEE/PTxIST87PDw2OTw/
-Pjs8Pj07Ozk4PT49Ojg5OT5CP0JAPDk+QkA7P0VkY01DQ0hJQkZDQ0JBPUZDQj48
-Pj44ODg7Ozw+QURDRD1AT1RQU3NlVkQ/Pz1APD4+PT1FVGVeSEJCRUJBPT4/Q0FF
-RUdJREA+PT5DPjw+QEBBPUJCPzs4QEFAPkBGQUE7OTs8P0I/QD1BPT88OT0+Pj1B
-Pz8/RkNAPkFAQT9AQEBDRURCQkNDQ0A/Q0M/REJDRD4/Pz9BQ0tLR0VFP0tHR0dJ
-RkhPTUtISUpHSkxLT09OVE9NUExJS0tJSUlISE1JSU5RUlNQUlROUlJPT1NXUEpQ
-UVZNT1JOVFNVVFdXW1BTVFZWXVVTVV1WVGBeWkdOW11cWFxfYGBWVmFYVVNcVlBR
-UoG+ztfe4uXn6Orr7OxYV1tZWllXWlhaXWZjXlhWV11ZV1hTUVNXYVtUVVxYX1RS
-UEtLSktJVFFQTU1FRUhITlBQUU1PUVJQTVFNSk5QUFBNS0xPS0lJQ0RKSFJRTElJ
-SUpJSU5GR0VHRENGRElCQkFDQkRJQj9FSEVEPUI6P0NHSkJDQ0VEOkJFQzo9Oj5B
-QkJBQUJFREFDREM8Oz0+PTs3Oj08QUY+QD8+P0A9PTw9QD0+RENBQ0VFPkFCQ0ZB
-P0VEPj48QT89Pj5AP0I9PkVFQkVEPjtBPUI8QEJCRUJFRkRDQUA9QD87OT4/Q0lC
-QERGR0c+Rj4+Oz8+PTo+Njg9QEFBPEA9PT5FRUVDQD9BQT07Qj4+Pz1BRUE+PUFC
-Pjw8PT1CQEI9PTpAQz4+Qj4/PT1BQkBAQEJBPUE7OztCPkI6QD5BPzw9QEE9Ojs5
-O0NEPkFDREJHQT8+QUZAQ0Q+QTxCPD89P0E9QUJCREA+PDw/QD0/PDw9Oz0/Q0I/
-Oz49Pz48Pzk6O0I8QT86Oz9BOz49Oz8+QDo6Ozw5Ojk8PUE+O0A7PDo7Pj46OT49
-QTw7ODk5Nzs4Njg7OkBBPkNFPjs8QTw8Pjs8Oz4+PTc4OTs4OT0/Pz88OD0+Njg+
-Ozw9Ojk7OTg6Qzs3Ozw4Ozo9QTw7OTs6OTo5PT09Ojc6PDo5OTc5Ozk8PTo7OTs7
-Njk4Ozg7Pjo7PTo2NTo6PDs8PT8/Ozk6PkE9Pzk5OzY2Nzo7Pz85NzpAOzk7PEE/
-Pj08Nzo8P0E8Ojw4OTc3Njk7PDk6OTk6Pj89Pzg6Ozk4ODk3NTc5ODk5Ojo9QkNB
-QkI/QkA+REdBPz9BP0A/Oz89PTw4OkBAQD07PDo/Oj0+PTk6OTg6OTo5Ojg5Oz05
-Pjk5OTg3ODc6Ojs7ODk1ODc2PD07Pz83Ojo5Oj0+Ojo7Oz05Nzs6ODc8Ozc1Oj02
-NTg5OTg3OUM9Oj0+PTo6OTY3OTk9PT46Qjs8PTk4PDk6ODw+Pzw+Oz44OkRCQT07
-Oj5DPEA+PT5CPj85Ojo6OTo2NzU6Ojk6OUE/ODg7PTg3PT1CRT09PDk5Pjw5Oj9B
-QDo/OTo8ODY4PUA3PEFDQTk+RUM9Ozc4NjxARD09QT4+OT08OTw9QD8/PT5BQ0BB
-QUFCPTtCSEJCPjs9P0ZISj48Ojo7Pzw+PTw9O0E+Oj09QT88OT0+Ojk5Pj8+PDs7
-PkFASVxXSUBAPj1BQkRCPzxEQ0RBPj4+PT5CQz4/Ozo8PT1BSVZ+k3iDbF9NQUBA
-Pj08PUE+RldhTkY/QEJDPj5DQT49PD9BQ0ZEQj89REJDQz86Pj1APD5DQjc3PDs8
-Ozw+Ozw9OkA+QT8/QT8/Oz1CPjo6Oz0/QUA8Pjw7QD88PTs+O0FDQ0NHRUNEQT5A
-QkdERUM/OjxERkRGRkNFREZCQkVER0RHR0xIS0pMSkxOUExQTkhQS01QUk1MSEpP
-UU1NUE5MTlBUWlZOTU9RT1VRUVNTVEtNUVRNU1lZUk5ISlhgVlRWVmBYWFhVV1JU
-W2FZWk9XUFhdX1VWW1VTWllbXmJfXk1Mgr7O197i5ejp6uvt7VhYWmFlXlxbXV9g
-Y15iW1VaVllYWFVUUlVUUldYVlRQU1NPS1FRT05NSU9PU1JPTVFMS1FTUU9PUE1S
-UlJNTEhLUE5PTk5ORkpHR0ZKSU1PTUxISUlHSUlKSk5ISEVHREVFQ0NLR0RDQj9D
-RUtHRkRBQ0JHSUVFQURBPD9APj0/P0E+PEA9QEJBQj8/QEA9Pjw7P0E7Qj8/Q0tF
-REZDP0Y9PTs7OjxBREE/PzxDQT0/P0BBQDtAQzs8PT49PTpDPj4+Pz0/RUI+PkNC
-Q0NFQD9DRkVEREJDQkE/Q0I/OTs+PT0/PEBDQz1BQz07PUA6OjhAPD9CRkRAQUA9
-PUNCQUNAP0A/Pz5AQUBEQUA+QUM+PkNBOzw/PD8/PD48PT5EQz8+P0I/OztAQTw6
-Pz49QEE6Qz09QkBCQ0RDPzw9Ozw/Pj48QUU7QUVBRklDQkNFQEBAPUJEPjs5OzxA
-QUBBQUE+QEM9PENCOzg8PDw7Njs7QT1BPUFBPT5APEFDQ0FAPDg1PUE/Pjs9Ozo5
-Ojs7Oj05ODo7Pjo9Oj08Pjw/PDk9PDs6Ozs5Njc2OjY5PDpAO0FAPzs9QD47Qjs4
-Oj49PTtCPj49Ojk4PUQ9PkA9QEQ9ODg6PTo4Ojo3OTo9Ojs7ODc4Nzo6Ozg5PDs8
-OTo7PDo5Pjw6Ojk9QUA5PTw/OkA5ODc4Nzg8OTs7PDk6Oz06ODg4PTw7PDw8Oz07
-Ozs7PTk5Pj09ODc+PD46OTk5Pj1APD46Ojo5PEA8PDg6ODg2Njo9Ojs8ODY4ODg7
-Ojs4PDo3OTs9QDo8OTc4ODs6PTw+QkFDQUZFREI/QkA/QEA+PTxCQkFBOzs4OkBD
-QD47Oz0+PTc6Pjs6NjY5Ozg3Ojw8Ojw+PT03NDc5ODg4ODM2ODs5Nzk5ODg4Ojs5
-ODQ3ODo7Ojs9Ojs8OTg8OD48Ojk4Ojk7Ozo8Ojg4Oj06Oz04Nzc4Ojk3PDs7Oj47
-Ojk8Qj89PTw+OTg8PDs8OTo5Ojw8Oz08Ozk+PD08PT8+PDg2ODg+PDo5OjU7ODg4
-OTg3ODo8Oz1AQD4+Pzo8PD0/PTo3Oz8+Qz08Ozo8QDk9Pj86Ozw7OT5CPz85Ozs7
-Ojw/Pjw9PkM+PDk8Ozs9OjxGPD09QkE7Pj5BQUBFQkM9OTk8Q05MQDw/Ozo9Pj09
-PD5BP0M/Nzk/PkJAOjtARUZAOjpAQT4/QEZlflxEQkA+RD9FQkA/REFBQT89Pjw5
-PUFFQj5CQTlCQ0FijZuZbnFYSEFAQD07P0FBPkRZdlNAPT9FQD49Pj45Oz1FQj8+
-Oz5DQj89PUA+QD9CPj9DQUU9ODpBQjw9PT07PT49PTk6Pj5APD87Pj5EPTxAQkE9
-Pj9BQkE9QT85Oz5AP0RBP0VDRUBBQkRDREdDRkVBRUlJRUZFSUpGSUlISUlMS0ZF
-RUVIR0tKTlJOUk5HSVFSUlJOTFJRSkpNU1BLTlBTUldTVU1LRk1TU1NVU1VRSktQ
-TlRSVFNUUE9QVVdVWllRVFJXV1RYVU9XWldVVlBFUVZaVlJaYFhaXGNiVVhTREiC
-vs7X3uPm5+nr6+vsVFhdZWtgZF5hYVxcX2NhW1FMVVdSU1NUU1BYUVdWWlRYU1BJ
-UU5OUE9OUFdRVlJPTlRQTU5PUVFJSlRUUUtKT1RQTU5NUFRTUFFQT0xMSUlNSEVI
-Tk1BRlRIQENLQ0ZER0lKQEdLRkVBQUVERUdEQkJGSkRCQUA/OkA8OkFEPT9BQEA7
-QT5CQ0M/P0BARD4+QDw/Ozk9Ozs9P0NDPT5DQD1BQjo/PT1DQ0Q+QzxBRUBDREI8
-PD47QT9BOj08Pz9CQ0Q/Q0BCREI8P0JDSUhCOUFAQkQ9REVHPT4+RkM9PD1AQUhG
-SEBBQz09PDw9QDw6PkBBQD49QT9DP0RCPz48Oz8/QUE9QEFAQ0NCQD09QEBDRD9B
-QEQ9Pj47OTk9Ozg7Oz4/PT1BPz1CP0I9PUNDQ0A9Qjs8QkVEQjo8Ozw6PkQ+PT1B
-Pz9CRUJEPj8/QUE5Nzs+QDtAQUE8PD0/QkJAQD0/OTw7PT8/OTxAP0A8OTo5QEE+
-Pj1CRkM8PkNAQT8/PD0/PkBCPj46PTw5PDk6Ozs9PDk9QD0/Qz48Ozo3PD08PDtA
-Pj06Ozk7NzY7OTk9PT5AQUQ+Ojo9OTo6NTk/PT09PDo3Nzw+P0E/PDo8Pjo6Oz88
-Oz47Ojg+Pzo4PDg3OTo5OTk7ODo5Ojw6PDo7OTM4Ojc6OTs5Ozo4OTg5OTs3Oj05
-OTc4OTw4PD8+PUA6Njc5Oz47Oz48Pz47PTpCPjs8PTs6PT4+PUI4Ojk7QEBAOjs8
-Ojs+Pjs6Ozo7OjY1Nzs8Ozs6NTg4OTo8ODg5ODo6OT08PUA9PTo4OT1DQkBDQUBC
-RUVEQkFAPjw9QEFCQURBQ0I+Qjs6PDo/PT0+ODk8PDs4ODw9Ojw7Njc5ODs8OT1B
-OzU5Ozs4OTg1NjY4ODc3ODk5NzY5Ojc7Ojc5PTs5PTo4OTw6PTo+Pz49PDw6OTo4
-Ozk7OTo7Ozo+Ojg5NzY4Oz47Pj88Ojo4OkFFPjw+QUA9Pj49Pjw5Pzo7Oz07Oj04
-Ojk6OTs9PDw7ODg4Oz06Ozw4OjY5Ojs6OTY4PTk3Ojs9Ozs9PTo9Pjo+QD89QTs7
-Oz09PUJFPjw9PTk7PDpAOjo7Oz0+QkQ9Oj5BPDs6QEQ+Pzw+PDo8PT8+PkBDQjw7
-Pz89QEBDRjs7OkFLUkM5PT1CPjw/Oz5EPj1CP0NCOzw9Oz1BRUlTVktAPkFCREJB
-VXuJWkA9QEA/QEQ/Pjw8ODc8Ozw/Ozk9O0E9QUFAQkFARHumqY9yXEdFREFCQkNB
-RD4/TGhnRkNBRERDQz8/PkFBOkNCQUFBQUA+QUJBQT9BQ0ZIRT8+PkA6N0JEPjs9
-PDw7PkNCQ0A+PUA/RUVAQ0ZAQDw8Pz9BP0A/QUJEQT5BQD5APkFER0dAO0A/QT9C
-QkRFRktGREFDQ0dMSUxQT0tJT05HTEtMS1BQTElHSVJTT01LTFJQVFVRUE9OT1BQ
-T09QUlNSUVNOUU5NTE9NTU5TU0tLUE1PUE5VTlhWVFJXWFpaXFVhWlRbWFleX1Za
-XFpWVVlcUlNYWVdkaWFdXFNWWFZNToa/ztfe4ubn6err7O1SXFJZY15gYGBgZFxa
-XFddXFxVVlJVWFNVVVNPVVdUVlFUUUpHTEtOTUxNUFNRUVJXUEpMTU5PUExQU1BS
-TUhSUlNQUE5QUVRWUUxLRElIRUZJTUdHRkxKRFJHQkZIQUdIRUVHRkZISkpAQEJI
-R0VCPT9AQkVBPkFAQUA9PEFCPTw7PT88QD8+Pjw7PkM+P0JIQz5APDk9Q0BAOz9A
-RkdERUNFRUNGQD1DQkBDO0NWRERGQD08QkI/PD5CQT5APD5IQz1GS0REQz4/QEVD
-QUQ+PUFBQUBBQkRGQj9CQ0JCQUNBPUA8PEJAQT1CQDw+QEFDQD5AP0I9PD0+QT48
-OjpAPj0+Njs/PT48QD0+Oz5DQT89Ojs9Pz8+Pj06QT8+Pj08PkFCQUBAPTw8PDtA
-Pj4+PUFARUNDP0E+Qj89PDxEQUE9P0E/Pzo8PEA7QEBAOjo3PT5BPD5AQD48OEFB
-Pj48PzxAPT0/QUM9PD4/PEA9OkFEQkA9PEBAPz9ER0E+PjpBQkRAPjtAPTo8Ozg6
-Ojk4Ozw8Pjs5Oz8+PDo8Pz5AODo5PTw+PT4/PDw+Qjw+QEFAP0E/PTo8QDo6OTk8
-Pj1BPD05Nzs9Ojs8PD46QEI+PkFDQkFCPjs7Ozk7Oz08PDo5PTw8OT07Pz08OT87
-OTc4Njo7PDo3Oj08QD0+PTs9QD4+Nzc4Njs6Nzo5QD07Oz47Ojs+OTg9PDk6Ojo7
-PT49Oj8+Pj08Ojw7QT48PD49PD89Ozc6Ozk+OTs7Ozw4Nzs5PTs4OTw5Nzg4NTc6
-Pz49ODk3Nzk8Nzk7Ozs9P0VFQUBEQkVGR0NERkRBOzk7OT9BPUA+PUFBPz06ODw+
-Ozs+Pzw9PDs+Pjs9Ojg9Ojw9Ozo7NzY2OT06Njg7Njc3Njc5ODc6OTk6Ojo6Ojo5
-Ojo5OTo5Ojo7Ojw8PT04ODk+PTo7OTk5OTo5Oz04Ojs8Pj47QDw4PD46Oz49O0E+
-PD1BPzs7PD4/Pjs/QEE5PD05Ojo/PDo8PTg7Oz06PTw9PTk5Nzc+PTs7OTY5Pjs8
-ODo6PTs6ODs6PD49QT4+QT4/PDw5PDw6OTw9PUBAQT89Ozs7Pj07OTxAPkBFPkFI
-Qj48QERBQEA9OD5CQUNHQkA6PEBARD8+Pz48PFFHQ0NBQVBPPj88PkBAPjk5Pjs6
-PD0/PkA8Oz89P0VPXGhaTkVFQj1BQlV0gnxSQ0JBQDw8PD8+PTk6Pj06QD45OjxB
-RERBPjw7PD9IZYyNYFBLR0REQT5AQj9BQEhiVEVFQ0Q+Pj08Ozk7Pj0+PD0/QjxA
-Qj9AQDw7PT9CREVFPj89QEI+Oj4/QUI9PT4/QT4+Qj9FRUA9PT48OzpBPz5ARERD
-PTw/PURFQUZGQUA/QD08PkI+QUFCRURCRUVIQkhHSUVGRkdJTkhKRUhMTFBNT1JR
-UFFSVlFNS0dJTUlNTE5LU1NRS0pRTk1QUE5QT0pMUFdRUU9KS1BUVFRQU1hQVVlQ
-TVJWVFVTUlpUVFVhWlhXVldfWVVVWFBTXF1dW19fU1RbV11jXFhVVFpWUFFLir/P
-197i5efp6uzs7VpdVF1WWldgYWxbVFRcXVtXXV9YWVJRUUxRUVJcV2BXUE9NUFRP
-UFBOU1RVWElOTVZRS05RS0xNTU9NS0pGTVVNU1JKS1RbUExJTEdESkhQTE5JRktF
-R0ZBREVMSEZGRUZBQUNHSEhLSUdGRERERENAPj1DRURHQjk+Qj4/QEFBPUA/Pj5B
-Ojw/QkVFRkRFRkJERz07Oz9AQT0/Pz1BP0BEQEBDQEJBQ0FFQkVEQ09JQz5ERUVE
-REVAP0BBOTxCR0VEP0FHREJAQERHRkI+QD09Pz9ERENBQkM/Q0VAQ0NFQ0NGQT1A
-Q0M+REFGQz5EREA6Oj1CQ0RGSEFCPT42OUBCOzpAQEBAQkFBOzxAQEA9Ozw8QkE9
-Q0JBPUA5PD08QEJBPj9AQkFAPD1BQDxAQT5CQkFBPz5APD8+RD0+REFARD09PUJA
-QD06Oz07PUA9OD5AQD07P0BAQ0VDPTxAP0RDQkE8Pj88P0Y+Pj09OT49QTw9Ozs6
-QUVHRj8+Pz49Pjw8QkI/Pjs+PTw7Ojo5QTw+OTs9Ozw9OTc4OTw9QD49Oz1AQT47
-Pj49QEM/Oj1AP0FAPEA/Pz09PT06Ojw8PUA+PT09Pjs9PTk8Oz89QEBGQT08Pjk7
-PDo4Ojc5PD5AQTs5OjxBO0NAPkFAQDs7PDg5Ojw5Ojg5ODs7PDs5OTk5Oj08Ozo9
-PDw9Ojw8PDo4OD08PD07Njk4Ozo4Ojs+PD1APzg6Njw6PEBDPzo9Pjs6Oj07OTs9
-Ojw4OTk5PDg6Njk7PTk7Ozc5NjU3Ojs7Oj89PDo8OT4/PD4+Ozo9QD5APT1HRUZF
-REREQUA7Qj9APj5CQT07Ozw/QD1APTw9PDw9QEM+Ojw/QUU/QkM/Pzo8Ojo4PDo9
-QD4/PTo2NzU4Njg2NjQ4QTo4Njo4OD06Nzc5PTs9Qjs5Ozo6Ojk9QD04Ojg3Ojo5
-PDs/Pjs7PD89OkA+PDw6Nzo7Ozw9QE1BQkJEQDs7OTk8O0E9OD0+O0BBREA7PUA+
-OTg6Oz06Pj07OjxAOz49QTs4ODg7Ojk7PDo8PTk8OTk8Oj4+Pz48PDw4OTU6PDs7
-ODk9Qz46Pj07PD47PEBAPj0/QDw/SEg+OkBEPj9BQEBAQT9BREI/PD8+PT1FQUFC
-Qz9HXkhDREE/TE89PUFDQkFFPDk4Pj05Ojk6PDw9QEA/XXFZUEpWTlFFQ0dWgXx0
-VkVDPj46Ojs/PEE8Pj1BOzg9P0I6PD5BQDo7PTo8P0llh2VMRkNGREI9QURCQD5H
-ZmpDPENCQD9BQEBDRD5AQEFCPENBPT1CPkRDQUI/RkdERUFGQz9GREE8Pjs9PD49
-PTs6Nz1ARENBQTw+QEE8PUNFQ0NBPkA/PT8+QEJEPz9EQEQ8OT5BPEFAQUFBSEdJ
-SkNFSUdHREVIRUZHR0tHRkBESEtLTktMSlBLSU1KR01PTVBTUE1IUFJPTEpOVVhN
-TE1JRE9VWFZQT1BOS05MT1JWXVxVV1VSUk5RU1VYVk5QVV9YXVpeY1dbWlRZVllb
-WVpYWFlXVlpVXl5cXFlhWlhTT1GKv8/X3uLl5+nr6+3sXlpXVGFiWFleX15ZXFlb
-WFhaWFZfVVJXUE9SVVVcXVRVUFRUVlhUVVZXUk5PUk9YUFNTUUxNUlRNS1FPVVxW
-UU1PTVRISlJYT0lQTUtJSEhETEtGS1BGRkxMTUtITUZHR0ZFRkdLT0VKSUNCQ0JC
-R0NAOz9DQ0JBQ0I+QUZGQjw9QT9CPTxAP0JCQz4+QENDQkBCQ0ZJRD87PEBBREJB
-Ojw9Pj49PkE/RkFCREhER0RDR0NGQEdKQT4+QUNBREVDPEJBQEBAPT1EQkZEQD09
-QkFDQUFEQEFAPUBBQEFHRkE+RD5BPj4+Q0RBQT49P0NBQURBP0RCPT1AQkA9PkNF
-QT5BP0FAQkE8PkNDQj46QDs/Pzw9P0JBPjw9PD48Oj1CR0FGSUY8QUA+REM/PEBB
-RUNFQTtBQD5FRUI+Qj9CP0BHREA9QUE/OjtDPj9DQD08QD45Pz8/PkA/QTxAPj06
-OT5APD49Pj9CRD07Ojw8PDw8PTs9Pz9BPzxCQz0+PTpARUY8PT08QEJBPTs8Pj8+
-PDw7OT5AOjg6OT46PDk7QEI/Pj07PT49OjxAOzs3NjY9Ozs8Pz9BPz5EQzw9Pj0+
-PTs+Oz5AQUA6Ozk8PD09PTw9Ozw6Oz9BPj0+O0E7PDk7QDw/PTo7OT47Ozo+PkFB
-QTg5OjxAQTo2Nzk5O0A6PDo/PTs9QD06Ojw9Ojo5PDo6Oj45OTo7ODg6Ojk7Ojk8
-PUA7PDg2ODk4Ozo8Qjw2Oz88PDs6Ojk4OTs7Ojw5OT1BPDs4OT07PTk3ODY4OD0+
-PD07Nzc8Oz0+PTs4OTk8QkE+QUNERUNERUNBRUdEQD49Qj9BRUJAPT09PD09Q0BE
-Pjs9QUNAPT07PD0+QUM8Ozo7PDc9Pj09Ozs6ODg4Njo6Pzw9PDg6PTg4Nzg5OTs9
-PDs9PD05PDw7PEA5Ojc+Ojo4NzY+Pzo5PTw6Ojs5NjQ3OTk3Pz45OTw6Ozw+Ozo7
-QEs9PDtBPj8+Ozw7PTtBPz8/PEA/QT06Ozk3Ojo+QTs7PjpBPjw8Pj44NjY2NTc9
-Oz4+Qjw6Pj0/Nzo7O0A7NTg5OTo8OTs8QDw6PDw8PDxAPjc5ODk7PDw8PkFDQ0A/
-QD87Pjw7QUBAPj87PEI9QkE/QD5AQT0+REdKSERDPj5NSUBAQ0RFQT0+Pz4+ODo+
-Qj03Oz89Q1F8c2NVUltncVZXVWx2YVBERT8+Pj0+Pjo7QEE9Pj4+Pz47Pj48Qz9B
-QD09Oz9ESHGCWUtMRkJBQj0/QDw9Q1V1Z0VBPEA9PT49Oj08ODs/QEFFRENAQD4+
-QD5CQj8+PkNAQ0FDQkNBQzs9RENERUNBODQ8OD1FPTg5PkE/PD5AQ0RFPTo8QENB
-QTtBQUBDREREQkJEQkBESERFQ0ZHRkVHRkJAQUBER0VCPEBERkdJSklKTUxKT09M
-S0RGTUtNTE5TT1NXTVBPUU5QUVNRS0hJTk5RUlhVV1lVT0xMTU9PUlBOVFVPUFVO
-UlJVUl1cWVdQVFhaWGBaXWFdYmFdWlpZWldTXFpcWVpbXV9SVllXWVZXTI+/ztje
-4uXn6ezr6+1UW1lXWFtYWV5dW11dWFhdYlxaVFNZU1paWlVSV1dZWk5PUFRHUFFQ
-TlhaV09NS1NPTlJSWFZTUUxMTlBUW1NRUFBWU0tKVFJSUE5KSUlIT0pKSktOTlJN
-R0hFR0RIRUJER0lKR0BMUU1JSEVDQ0FEQ0ZFQ0BARUdFPTxAREJCRERDRUE8QTtE
-QkZDPTw8PTk5Oz5CQkI+Oj9DPj1BRD47QD89PUJBRUA9QEQ9QkNDQ0RFR0VAPEJI
-RkNCPUFIRENDQkdEQkNAQkBFQ0RCRUZAQkM8OTs9P0BCPj9FRUFFPjxBRUFCPUJA
-QkE/QEBAPzs9RT9AQUBCQkRFRUVBRUxDPT5DSEZCP0FARERBSUc/Q0FCPDxBP0I9
-PUBEQUBEQURCQEJCQEI8QD06Oz07OztDPjo9OTw8Ozw+RENBQ0NEQk1DPkFAQEVE
-PjtBQj0/QEBFQUJDQ0A9PDo+QkJDQUNDQUA8PjxAQD88OjY3Oz1BQUFDOj5BREBC
-QEJFPDw+Q0JBQD5COTs/Pj0+Pz1BQUA/Oj47PDczOT5AODo9PEA6QUA+Pj48Oz05
-PDtAPTw1NjY8PkA9P0BAQzw8PUE9PDw4PT08RENBOTo5Ozk4PT5AQz47Pzs4PT46
-Pjw7QD09Ojo4Nzw9Pjw+PT05PD07Qj89Ozg6OTs/O0BCP0A+OTk4Njo/QDk4Pjg4
-PD1CPzk8Ozg8PTw/Pjw5PD48NTU4OTk4OD8+PTw5Ojo7PDs+PDs3Oj43Oj07ODg0
-NDc5Nzc5PTo8PDw6Nzs6Oj46PDk6Ozc3Njk+QEA5Ozw/Oj0+Ozo/QkI+QUFFRENE
-RkZESENAPz08Qzs9QT9CPkA+Pj1AQT49P0BCPj9DQUQ9QEA6PD0/Ozw6PEA/Pjo8
-PDk8Ozc7O0A/NTc4NTg7Pzo9Nzk6OTw/PkBBPTo9Ozw8Ozg4ODs5QEA8Pjs/PTs6
-Oz04OTo4PD49Pj86PDo2PDo7PDw/PTk7Oz89Ojs9Pj8+PEE+Pj04QD8+QT86PT47
-P0E9QD4+RUM+PT06Ojc7Ozw3OTo8OTg5PD89PDw6PTk2Nzo6Oz0/Pj8+PT48QTw7
-Oj87Ozs8Ozo7Ojc7QTw7Ojo8PD0/PEA5OTY8Qj49PT4+QEE+P0BDQEI/PD5CQUND
-S1NLRERER1xORUM9Q0E8P0JERD09PT4+OD5AP0JFcYhvXElScmyAeYV9hntRQ0FA
-QkRDPkA/P0A8P0I/PEJAPz4+QEBBPTxBQT5FQkZLiYVjSklGQ0A+QEQ9QUZJbGhI
-RkpEOz4/QT0+QDo+OjxCR0FCPz0+PURBPURIREJAPkJEQ0BAOkNDPDs5Pz9CQkE+
-Pz49Pz9BQDs8Pz1BPUJEQ0NGQkJCR0VCQkJEREJBQ0ZBPkVCQ0lHREZFSUtIRkND
-Qz5GP0VFRUFIRkdFSUlJSkpGSUdKR0lNVU1NTktPTkxTSUtWTEtNTk5OUFFQTUtN
-UVRWU1NYV0hMRklMTFBPV1JPSkZIUFRSVlBYWlhcW1JVUVJRW1xYXVpdWVtdVllb
-ZF9hY1hcXlxXVU9OSUxSVE1JkcHO1t7i5ufp7Ovt7VdXXVxtYVlVWltgYFxgYFld
-X1tSV1FSXVxbV1lXVVNTTlBNTE1aU1ZXVVVTU1NMVVRSVFdRUlRJSlBRT1NNVVFP
-TU9LTk9LTk5STkxETEtMSEJEREpJSktRS01GR0hGSUtPSURLRkhGRUNCREREQ0NG
-R0hIRURESEpCQDw9Q0NDQ0FGQkA8PkJAP0FCOzs9Pjs7PDg9PkFBPT9BPj89Pz1C
-PD88QkFAREJBQUNBPURIPDw+PkA/PDs/Qj5EQkZISERGQkE+QUNCQkJBQUNDREQ8
-QUFDQUE/Oj4/QkRAQkE/Pj89Q0RFRTw9QUJBQERCO0FGQUNEQUNERUNHREdCQD0/
-Q0I+Pj9DREA/Q0NCQ0NGRENAPjs+Pz4/QUVERURCQUE+QT9DQUVAREA9Qj4/Pzw6
-PD5CPj5AQD0+Q0NFRDxBRUA+QD9CPz9AQENHQkA/RDxBQkNBQzs7PTo9PkBDP0A8
-Pj09Pz9DQzc2Ojs+QEBARUVEP0JEQEBDREFDPUA8PztDQjw8PTs8PkBAQT9BREJA
-Pj07PT83PEBCOTU5PEFAQTw7Ojw9Pj89Ojs6QEE3OkA+Pjo7Oj4+PDk6Pzk4Ozw9
-Ozg7QEA5Ojo4OT1APj07Njk6PD8+Ozo8Ozs5OTY6NzQ6OTw7Pz08QD45Ojo7PkE9
-PUM9PDs8QD5BPUA3Oz0+PDk9Pj89OEA9PT5AQDs4OUBAPUE/Ozo7PT89NzYzNjQ3
-PEI7PUE5Nz04Oj5AOzs9Ozg3Nzk8OTk5ODs+OTo9Oz47Ojs7ODw+Njs9Ozk7Ojo4
-Ojs8Pz47Ozs9Pj5AOz4/Q0JBRERFSEZCRERGQ0E9QT89Ojo8QEI9QT05Ozo8Pjw+
-Oz08PkE9P0M9PTg2ODs9Oz5BPkFANzU2PDc3Ojw7Pzw+ODo7Oj47QT86PTw7PDw9
-PTw8PT8/Ojo4ODg8OjY5Qjw7OTo+PDo3Ojc5O0FAQz06QD08QD07PDg6QkA7NzY9
-PUE+Pz09QD48NzlBPT08Pj86Nzg6Oz0+PDo7Pz5BQ0NDQz47ODg7Ozg6Pzo6PT5E
-RENAPTw4NjU1Ozw5Ozw9OT45PEVEQD05Oz5BQEI/Pz08QEBCQUJFQT4/RD1BOzk7
-P0NCPjo7QUNARDw/PD08Pjw8P0A8Pz5OV0FAQUFGW0VARUZDQ0BBQUE/Pzo4NzU6
-P0M/QU+JgmBHSWSQfYONm5SCVkRDPTw/PT9CQUE8P0FCQUJEREE8PkA9Pj88PD1B
-Q0ZGRU91dGdXTUtHRT9AO0BASFJ/YkZCSUU/QUA9Qjw8PUNAQkVGRkVDPj5BR0NF
-RUVBQUVDQEA/PkBCQT5DQDw9QUZCP0JBPT5BPkA9PD04PUJERERFRkVHR0hCPkFJ
-RkFCQkM/RUNDR0dFRUNFSUVGQEJBQURJTkRGTU9LTEhMTklJSk9OS0xKSUdITkxN
-S1BPUVZMTklGSk5PTlFTT09SUlFSUFJPUlRPTFBPU05QUUxOWFZWTktTSE9VVVRT
-U1dYWFlSUk1UWFhWUVhXWVZaXV1WV1haVVpVW1lhWUxJTlJQWFhST02Lwc3X3uLk
-6Onr6+zrWlxZXFtZVGFeYltZWlZgYVdgZVhRVVVYWFdZYWNVVlRTUVBTWV5ZVVdR
-V1BbWVZUUE5SVFFRTEtKWVJNUElLSVVZT05OUUhISU5PT0lGRERJUk5KS05OS0lN
-SkZHR0lKSUxKR0VIRkhJTEI+QUFGTEpMR0hISUJDSkVBOjpBQ0FHRUA6NjpGRERB
-Pjw9PD9CQz8+RT05QUNDQUE/PDlDRERDQUBAQkA9QkFGQ0E7Pj07QUVBR0tBPj9H
-Q0JAQEJDRkM9PERBQkhDQ0JAQD9BQUVCRUNBQUI5P0E+QDw9QDw8PUNCR0E/Q0NC
-QEREP0BDQURDR0I7QD1BPz9CQkFAQkdBQUA7O0Q/Q0pDR0VDQkVBRURFQ0M/PT8+
-PUBEQ0FDQUE+PkE9PT1BQT8/Pjk9QUE8Q0FAQkFBPkM/P0RCQUJAPjg+QDs7PUBA
-Qj47Pz5BPkNBPD4/PT48PD0+QEJBPkI/QT49Pj4+Ojk4Pj5BPkBERD0+PEE8P0FA
-QTw9Pzw9PEBAQT9DQ0M+QD49PUA/Pz5BOz8/Pjo/QT86PTtBQjw8PDk9Pj49PDo5
-PT5BQUA+Pzg9OkA7PDk7Ojs7PTtAPj89PDo+PDs7QEI8Ozs3Oz48PUBAP0A9PUU9
-Pjw6Pj8+PTg5OTk8QT1APj47Pjs7QkE/Pjk3PDo6OzxDQTk7O0A8ODc7PDw9PTk4
-OztAQD87OUJFPT07Pz08PD03Nzo7Pjs4PD5BOzk5Ozo6OTo4Ozw7PD08PDo6OzQ2
-Nz08Ojo5ODg+Ozg5Nzc5Ojg3Ojk6OjtBPj48Oj9DQD5BPURDR0hFQUE/QEBFREA/
-QkNFQUJBQT07Ozk6PDxBQkRDOj09Pj0/Ozw+PkE8OkE3Ojw3NTc8PUBAQD1APDw6
-Ojg5Ojs6PT07Njk7PDs6Ozk/Ozs6PD0+Oz09Pzo4ODk5Ojw8QT06PDk6Oz4+PDs6
-PT07Pj48PkBAQEI9Ojc6Pz09Ozk9PD5AQT09OT5AQUM9Oj9DQDxBPjs+PTg7PDc9
-PT06Oz89PDw6ODU1Oj08PDk6OjY6QD07QEA6PDo4Ojg3Nzg4PTs7PD49QDs+QT8/
-QUE+Pjw7PTw9PUBAQT0+Ozw/Ojw9Oz06PTw5PD09Pjs7PTtBQUE+Ozc9QEBARU1H
-Q0BBRUlUQkdGRkpLRkFAQEE9PTo3Oj08RENHZ35mUkRCXJGMiIuLe1VHRERBQEVB
-QTtAQkBBQDxAPDxDRUA8QEJCQ0REQ0NDQkRFTnBtfW1KR0NAQkM9Qj9BWGxNR0RF
-REZERUVGREI+P0E/QD9BQ0NDPURCQ0hFREI/Q0JDQEI9Pj0+Qj07QUM/PUBDQENE
-PT9CRD5CPTxCRT5BQkJAR0VCQDxDR0REQ0NDRUJER0ZDR0JAR0dIRUVMSUdJSktG
-REVAQktTTE5PTExHRkZGQ0VLSEhNUVFTTlBRUlFKSE1QTE9OWFdWVk1PUlZZVVBP
-S1NSUU5QTUtRU1NbVlBNTE9WWFNZXVtYUFVeWVZZV1RYVV1gVlFYYllhX11cXVxa
-WFxcWlRVUFtgWmBfVVlVVJPCz9ff4ebo6evs7OxaW2RuYltdWltaWldZV15gXlxo
-XVpXWFJRWlhUWFlXV1RST1RbWFJUWFxeVVRTVFVMSVdVU05bXFNSVFJOTFJVUlRQ
-UFBGSEVRU01QS0lLS0dFRElHSE1LTEpHSERESktOSElFRkVHRkNHSEVDRUVLTElF
-RUNCSUdDPjs6O0FESkJEQTg5PEFBQUE9PEBEQEZIQjw7PkE+QEJFREA9PT1DPj1B
-SURBPkJDREVCPUJDQUBCQUJBQUBGRkZIQTo9ODs+QURCRENAQENDRkVBQUBBQ0RA
-Q0FJRUs/Qzw9PkBAPj5DQTw/P0I+QEBAPkJCQkNBQUVEQUNBQz9BQT8/QEFFP0FA
-RD09OkFEREI+RUNAPTo/QENCP0I+Pj49PkFERURFQkBGQj1APz9CQUM6Oj4+Qj49
-QUFCREFCQz5FQkI+QD5AQjw9Oj1CQ0JEPjs8PT1BPj47Oj47Oz47PT09Oz09P0A8
-Ojw/QUA9Pz5BQT07Oz8/QDpAQTlGPz49QT5DRUE7PD5AQUA/P0I+Pjs9PUE+Pj88
-PUFBQkI+PT08PD09P0A6Ojo+Pjo7OD1DQUJBREI8PTk3PEFBOzs6PjxAPj0+OTs8
-Qz0+Qz9CQkA/PUE8P0E6OUFCPj07Pz89QD9AOjc0NT45Nzo5PEE/PT08PTk9PkM+
-PTo6OTc4OztCREFCPz06QENAPD06PTw8Ozs9Oj09Ozk5PUJDRD08OT46PDtAOz49
-Qz07Pjs6PDw/PT06PTw6PD08PT0+QUI7NzY4Ojg6OTo8OTs5NjU9PDo4Njg7PDw8
-Ozo3Oj1AO0A/QkhFREFFREFEREJBRERHR0VIQkFBQUZBQD4/PTs6PT1AOjw+QD06
-PD5CRUZDP0E8O0A7OTs9QEJDQENBOz88QD09PT5APDs8PDo3OTs8PDw8PUBAQ0A7
-PTk/PD09Ojw8P0A9PD4+Pjo5OT88QD0/PT5APDs5PkJAQkQ+PD5AR0NAPj07PTxA
-Pj46OT1CQD48PD0/PT09Pz4+Ojo6Oj8+PD08PTw8PDk6Pz07Nzw5NzQ6PD49PkFD
-QkE7Ojo8OjY6QD46OTo4OTg8P0A+Pj0+Oj8/Ozo6Pj8/Pz0/OUA+OzxAPD9AQUA+
-QDs2OjxCOjs7QT87Pjo3NTU4QElGTGlFQj9HXlxDPkJHWFVLQj5CQkA8Ozs/QkNK
-Q0l3gF5BREphkZKFf1lNSUVBQkRBQT8/QEBDQkM+PEJBPkI+PUNCRENAQUNBRkNE
-Q0RSaXB4W05IRkZDRUY+REhZXEdAPj5AQkdEQkFCQkJHRj1AQ0RGQ0VGQUFCRERE
-QEFESEZDP0A9PkNERkg+QEJBPT9BPj5DRD07QDk5P0NCQ0NCPjxAQ0ZFQUFERUZF
-Q0RFSkRFR0FESENGSUxJSUxQS0tQSExPS0lFTUxNTUpJSkxLSktPTE1RUU1OTFRU
-VVFMTEhES0lQTFBTT1JOUk5SVldUVk5VUFRWXlVOTFJXUE9LSU5STlNRWFhPTE5V
-VlJSV1RTU1ZVWlVWWF9fZVtiWldiX1xhYVxXYGNhWVpZVk1VVU9ThcDP2N7i5ujp
-6+zt7F5eX19cXVtcXldpX1NhZV9eXVdRT1dXWVdgY19YWFNZW1lRTFVRVVpbXVdY
-Vl1XUFNSVldQWVdfWFVPTEZKTFFOTlRVUFZNSUlLTU1ISUdFRj9KS0RGSU1RTUpH
-SklHR0pLR0VDRkhHRUdJS05GQ0JFSEhDREpGRUJAQjo9QT5BQEBEQj09Pjs8QEE9
-PTxBREdKRUQ+PT5CRkVCRklFPUNEQEdFR0hFQkNGREVGRUNGSERBQ0FDQUNAPz5B
-Oz07PD5FQ0dNRUVESEc+Pzw9REQ/QUdFQkFKSkJCQD88QUNDQT9CQkZBPT08P0NA
-PUJCPT49P0JDQUBEPT4/QEFGQ0NCQD8/REhDQ0Q+Q0RDQUFBPjk9Pz9FOz0/QEJC
-PUFFSEZDRUVBQT89OkFBQzw9QUJBRkdDREJCOjxASkVEQkQ+RUM+QDw+QENGQ0RA
-RUJBQD5CP0A9PUI9PEQ9PT8+O0A9QD4+PTw9RUFCQD5APT88QUVAQUFDRUI/PkZF
-P0BGPD9CQEdCPz08PD0/PD08Ojk7QkE8PUNAOz1AQ0c+Qz9DREE+ODg5Ojw9Ojw+
-P0BBPjo+OkNBQjw4Ozg6Pj06PT4/PT1COjg6PkFFQz05Oj5DQD89PDo4Pj8+PkBG
-Pjw+PUA5Oj5APzw7Oz0+QDo7OTo5PD41OT06Oz07Qz9AQD4+QEFERENCQT4/PT09
-PjxAPz89Pj88PUFDQDs8Ojo7Pj9CPzs/Pjs6Pz48PT49Pj07Ozk4OTk/Qj08OTs+
-NjQ5Ozs7Pjk/Pj0+QTs7Ojk5PDw+PDg9QT0/Pjs+Qj5IS0ZIQ0ZNQERCRERAQUFA
-QkU9PT9BQ0RDQUE7PDxCQj47PT49PT9HREJEPkBCRUBAQUI/OzxBQj08QD5BPzk6
-Ozw5PTo5OTc8Oj5DQD46Ojo6OT5CQUFAQj08OD0/Pjw8Ozw6OT09Pz08QD5APT8/
-OTw5O0M9Pjw/QERCPjw7Oj87OkNAPDk8PTw5PD8+PDs7PDk9Oj08PD87Ozs8PkBB
-QUFBQDw/Pz89Qj1CPDw/OzpAP0E8PkA8Ozg6Oz88Ojg3ODw4NjQ2Pj0+PTpBQUA9
-OzY5Ojo8PTw5PTw+O0A8Ozw8OUA+Qj9BPz1DQDw6PEA/PEJBPjw+Pj9CQkJLWEVE
-QEdnV0pHPU1vUkRFQDs6OztARkVEREBBUnxzVUpISGl4bVNLQ0NCQkM+QENBRUE8
-PD5BQUFDQUA/QUI8PUNCQUNERENCRkVGRlVybHZPSEdCQ0M/Oz9FT2xOQT0+Pz5B
-QUA/Q0lIR0lCPkBHRUJBQkJERUVIREFEQkQ/RkdERUZGQkNGQkFBPzw6Oj49Q0E/
-Pjk7O0JAPT5BQUBAQkRCQURBQEJDRUNGREdKSENFR0NLR0xHR0pLUU5PS0xGSlBL
-QURMSEtJUE9NTEtLR0tES1JPSUxNT09TUVFMTkxPT1BVUVdTUlRSUk1QU1FQUlRW
-TVJXV1FWXFhZTktNUlFWV1haWVFMT1hXVVZbVFNUWF1cWFleV1tfXGFhYF1ZXV5g
-ZFxgYF9dV1xbUF9cR0iLwM7Y3eLm6Orq6+ztV1xYV2JqaGFeW2RiY2FbY1tbVlha
-V1pbW2NjXVpUU1NeW1VXVVZTUFVYWlZUUExQUVJRUVRTVVNNTlJST1JLS0xIU1lV
-VVBRU0hLSkxGTElLSEZLVVBESElLTUlFRkRKS0hISktGTE5HQkdIS0tEPkRLRkhF
-RUZEREZDQD5IR0FDSUVBPkA+QT5BQEFAPTxBPUZGQD5DQEJEQEE/QURKRkFCQ0tM
-REFCQ0tGQ0dEQUVBQERARkFGQ0A/PT09PUA+QEJCQD08QklERUNBQD08PkVGRkRF
-P0VDREZGQUREQUBDQEdCQUQ+QEFCQEQ6PT4+PT89QEFDPkVBQkJDQkZKRz9CP0JC
-QUJCPkA/Qj5BQ0hIQTs8PUA+PUE9O0E/Qj4/QT9APDs+PjtBQkRES0dEQ0BFSUhE
-QUE6OTo+Pj5DO0FAQUBCPkBBP0NDQ0JBPT08QEBBP0JBQkNEPz07PTs9Pzw9Oz47
-QDw+Q0BAQUI/PUM+RkdDQEFCQz5ERUFHQz9CREI7PEJBPUJCREJAODo6PD48QD5C
-Qj05PEJFREQ/REVEP0JAODk4OTg7PEA/QT06OTk7P0BBPz46PTo3Oz0/Oz05PTw7
-PEA9PUFBPj48Qj5FRUE8PkA7Pjo4Oz5APjs4OT84QEBAQz86QD88PDg2Ojs7OT87
-Pzs+PkI+QkA9QEJBPj8+PT4/Ozk6OTs8REFDQT0/Pj49P0E9Ojg5Pj09Pjo7Oz08
-PUA+QDw8ODY2Njo3Njc6NzY4O0A7Ozg6OTo9PDs5OT9CPD5APzk8Pj44OTs8PDo7
-PDg9QEJHR0JAQURIR0JDRkZBQURFRUZGREdEQUFCPj1AOkJFQUE+PD4+PDs7QkI9
-QUVGRkVBPERIQkBCQj0/PD09Q0ZGQj0/Ozs5PT07Ojo8Qj8/PT49Pjs7Ojw/P0M/
-Pj5APDw/Ojg3Nzg+Oz5AQUQ9Q0FBPD09OTo/QDw9Qjs+QEM+OTc5Oz8/Pj5BQT5D
-QUA8PTs8QEBAOzxCPjs9Ozg3Nzo+QEA9PUA8PUE/QkREQUNDPj89Pjo6Oz1BQUA7
-QEI/OjQ2Njo9Ozc2Nj1EPz88PTo6PkA8Pj1GQT4/O0FAQD85QUE8QkE9Oj08QEBD
-Qz8+P0NCPzpAQ0REPjk8Qkc+QUdMSEdFSlhVSEVDSVtQQkFGQEBAQUJCR0hFR0RY
-e15JRUZPb1pEPkNDQkNEQkE8PzxBPT4+Oj9EREA+QENBPjw9PkFEQDw9QTw/RUFE
-RGl2XUxHPz48PkFDSkZNUEhKRj85QD9AQkJAQkVEQT0+Pjs9Pz43PT9CR0RDREFA
-QT9GRkA8QD9BQ0JBQUE9PT5APEA8PT1CPj8+Pj8+QkFAPj9BPUI9REFEQT1AQUFF
-RUNESEVKT0hJSkk/RVBKSUtJS01HR1RSSE9KTE1QT0lITUpLTk5OTk5TTUlJT0xO
-TElISUxMUE1UUk9UT1FST1VWT1VPUlJWVFJTUVVXVlZcWVRUVlhVT0tYWlZUWFdW
-VltXU1FVWlZVVl5gX2VfV1ddV15aYVdeYl9hY11gZmtZVVtKSpC/ztff4uXn6err
-7O1ZWl9iamZnZVphXmFcXF1XX1xTWFhUWl1cWltSU1ZVUVlZVVhPUVVQVlFNUlBU
-VFBOTUxNTU5SU1ZOTlBTTUlPSkZJTVNTWlpWTkxGSktGSkhGSUhKTVBMSEtFQ0VE
-REZGQkdIS0dIR0tIRkdJQERISEVJSEFCRD9HR0VRTUdDQkZFR0dIQ0dFQ0FAQ0I9
-OTxBPz49QD9EQEJBOzpARUNFPEFHRUNDREZEQkBBRUFGQkRFRkRFQkVLSUM+QUZD
-QEJAPj49PUFEQUFFQkRGSUZJRUJBQUNIQ0NIRD9ARENCQ0FAPD5BR0VEQkJBQURA
-RUBCQjxCQUBBQURCQD4/RUZBREA/Q0RDQUNFQkJBQj9DQD5DRD09P0U/OkFEQkJB
-PkRFRUA+Pj49QT1APj5BRUBAQj9FR0FDPj5CQjw/Pj88QEFDQkBAQ0RCQkNDQkJC
-REE8Qz0+Pz1CQT44OTtAQEA8OztCRkZFPT49P0Q+QERDPj9BRUtDQ0A9OztBQj48
-RUJEPj4+PEA9Oj9CPz06Nz48Oz9CQUFBOzs/QURIQz09PT4+Qz88PDk4OTs7QEA9
-PDs+Ojo8Oz49PT5CPzg6PD48Oz4+Oz84Ozk5PEJBPT4+QkJAPTs8PEI9PDs7QDw+
-OTc5Ojk8P0M9Pj5BPj89Ozs7Oj0/OjxAPj09QD9AQj1BPj4+Ozw8Pj08Qz49PUNA
-QEJCQD48PDw+Ozo7ODY3PTo7Ozk8PD09P0RBPTo6QEE9PEBCOz5DOj06Pjk3NT09
-OD8+P0A/PT48Pj04Ozc5Oj87PTxCQT44OTxCQkFFSEhJQzs9PkJCQERCQUZCQ0Q+
-QkI+Pjs7PTo6PD4/PkNDREM+Qz9CQUE/PD5BPj08QkREQD5BPUBEQEVGREJBQT09
-PDw7P0JCQkVGP0I+QD1AQD05Oz49QD46Pj4+Ojk5ODk5OkA/PkJGQj89Ozc9QEA+
-Pzo5PT0+PT1BPjw+OTc8PEE8Ojw+Oz09PTo8PEBBPz49REM/QUE7Pzw8OkRDQkFF
-QkFAPEFCQ0JDQkNAQD06Pj0+QUE/Pj1CQDs9RkNDQz07PT48PT4+Pj4+Ozs+PUE8
-REA/OjxDQkFGQD4+O0FAP0A+O0NBSEI7PUBCQkU+OjlBPDw+QkI+QkFASF1LRUJY
-XU1MSEdUYFNJRkNCQ0JBQUVITEtLTXeCUUFBQ11vWUlGP0RCQj87PkI5PT0+Pzw9
-PDtCP0RBQD49Pj09QDw+PTw7PT8+REhIREpGR0Y9PTo+QUdISllGRkJEQUVARkE+
-QDxGQ0I8PUFEP0A9Ozg+Oj0+QT9CRUJBQ0I+P0FERkY8PEM8Oj5CQD4+QD9ERUg8
-PEVDPz0+PD9EQ0NDP0E/RERFQ0BBQUBCR0ZISUVKTUhDRklLSklHRkRHS0pOUVRP
-SkhHUFBNTk9NTU9OTkpOTU9PSE5RTklLTkxKTUxPTlJYV1BQVlZUV1ZXV1JUT09R
-UlBUWldSVFpcVE9WUE5OTU9aYmRVVVxaVFVYWVpbV1teUlthW1xaV1xeW15hXVRW
-V1tgYF5cUVBeXkxOl7/P193h5ejp6urr7V9YYmFeXV1iYltbXVtUV2FbZ2BgWVRU
-UVRSWGJeWVZWWlNZWFlTTUxXUU1OUlFYVVBOT1RMT0xPUFNOTllSV1RTS01UTlNQ
-VlxQTElIR0xKTUxHRktLTktMSktMRklFSkRERklQTUpOSktISEpJTE1NRkdEQD9H
-SEVFTk9HR0RERUZHRkVBR0RBQ0VCQT05Oj1EQz4/Pz4/QEJARUNFRUJBQEJCQUBE
-RUNGRkZDRkNDRkdEPDpEREZEPztAQz0+QDw5PD9HQkJBRURCREVNSkRCQENARENF
-Q0BEQ0I/QEdGQjw9PUJEQ0dCQEFBP0RAPkBEREZIPkFCQT0/QkVHSEJCQkRJS0ZF
-P0FEQj5CREFGQUVEQUBEPz0/QD9BQjw9Pz1BQkRBQEFAREVDQEM/Qj4+Pz9DQUE+
-PENBPT5HRERAQT1DQz9BQkREQjtDR0E8Ojo9PkE/OztAPT06PEI7QD5AQkNFQUNB
-QzlAQUM+QD1CRUVEREM9Pj5BQz49QURAQkNBPzw8QUREP0JDPjo1Oj49QkNFQT49
-PD8+Pz0/Qj0+Ojk7PTo8QD06Pz5EQT88PD0/RUVBPT09P0ZCRD05OD07OzxAPzo3
-Oj07OT9AQUE/QDw9OzpBPT1BQkE5OEBDPj8+Q0A8PD09Qj07PDo9PTs9Pjs7PD48
-PjlCQTs+QT47PEBBPDc8PTs5Pjw/PkNCPkE7Oz1BPzo6PkE7Ojo6PT07Pjw7Ojo/
-Pjk7O0BCQkA/REZBOkFAOj87OTo8OUFAP0E9QT46Ojs6OUFBRTo8PUNBQT9CPj8+
-QTtDREFERkBBQj4/Pj9APT1COzxAQkhKSUZAPDY5OTk8O0FAQUdAQkA8Pjk8Pjs7
-QUFCQ0JDQkBDQENHRUE+Oj4/O0JCQj09QkY9QkFCREBCQUE9Pj9APUA+Oz07PDo7
-Pjs+PUFAPTk/PD0/QkNCPjw8O0A+Pj87Qjw6PUE8ODg+QD49ODg5Ozs9Oz88PD49
-Ozs9Qj8+PTs+PD8/QD89Pj1CQ0M/P0BCQ0JDPEFDQj07OT49PkJDP0A9PD5AQT04
-PEBBPD08PTw+QEFBPTs9OTg5Pz08Ozw+P0A+Qj07PkVBPD0/Pz88PTxCREI/PTc5
-QUFAQ0VDQUM/P0JEQT5APkJGXFtLSFtVTUtITHiRX0lFQEJBREJDQUNCRUhkjXJU
-SUFLeoBWRUNFR0M+PTxERUREP0FBP0RCRUREREJCPj5CPT1APj4+Q0I9PDo8RkZJ
-REJAQkNAPUI+Q0NKT0RAQD0+RUQ+RUZFQT5FPj49QEA9PTw/R0JEQ0NFQkFFQ0BA
-PTw9P0NAPUA7PDg6OzlCQUBCRUJBQ0FBPkJEREBEQ0ZCQEJJRUJGSEREQT88Oz9C
-RExHRUpLSUVGTlFNSERKTkxERUVITVJNUE5MTEtPT0ZITEtLUEpISFBPTU9NSUpL
-TUxOUFRWUlRVUE5QVVZRU09VWFRPS1JTUlBQWVRSWFZTV1VWW1hVVFNZXFJYXVlW
-X1lQUVVZXFZhX1haW15hWVtdWVtqYl1bV1NeYWJjXFdZTk6Xwc7X3uLl5+nq6+vt
-X2NpbGNcXllcX15YVFpeYWVbVllaW1pcW1NSWFNVVlhaUVBVVlRWTFBOSEtHUU9S
-UlRNSUxST09NSUtSTlJQTU9LVlNMVlVQTktMTExNS0pIS01JRUVKT0xKTUxLTklG
-TkZFR05OS0hKRkhGSUdFSUxJQkBGRkVGQ0VJREVIR0pBP0BGRkpEQUM/RT9DOjw9
-P0BERD89QEI/QkBEREdCQkI/OT48P0RCQ0hLSEdHRz9CREJAPkFBP0hEQUlCREBB
-QkFLQUVCRkRFQEZHR0pCRUhGRUNEQ0RBP0RGRUNDREI+Oz4/PkBBQENBRURERkRC
-Q0JDQkZDQ0ZISUNJTUhHQT5BSUhHRD9HQ0NAPD5FR0dERD9ERENAPkE7Nzk7O0E9
-PERCQkBESEFBQ0FFREdCQUI6PENIQ0BAQENGREVGR0dGPD1DRUFBQUFFQEJDQT1B
-Qj1BPDxFQT5AQUE+P0Y/Qj9DRkJDQ0VCPkJEP0BCQkNBRUNHR0M+RUVJRUNGQ0VD
-Q0FGSENAP0ZDQEdBQjs9REJCQ0ZCOjo9Pj0/QD9CPz08Ojs6PD0+Pj1BREFAPTk6
-OT07Q0BAPT49PT9BQEE9Q0BAO0JDQj86ODo5Pj0/PUE+Qz04Pjo7PDo+PDw9Pz1B
-Pj4+ODw7ODw/PD4+PD09OTs7Ozs8Ojw8PD0/PEI8Pj49OTo5Ojs7PUQ+QD5COzs6
-Ojw4PD08PDk9QEE7PT0/PT8+QT4/Qjg8PD4+PDw9OUBCQD5EPj1CPTw6Njg3PT88
-Ozw+QEBEQj07Oz5BOjw6PEE9Pj88Ojs8PUFIRENJRD0/Pj9APj48Pz0+RUNCR0NF
-RUU8Oj08PDw6PEJGRUQ/PT9GQUFCP0RBQkZERkJCQ0ZISEdDPz1BQUJEQEJEQEFA
-Pjw7PDo9PT4+Pzs8Pj0+Oj47PT08Oj07OUI+Oj5CQEE+Pj89Pj4+QkJAOz48QD48
-QkI8PDs6PDw9Qj88PTs6PDo+PkE/Oz5BPjs8PD08PUE8PzpBPENBQEE8O0BEPDpB
-Q0VCPz09QEA8PUFAPj5CQDs8Pj1APkNDPj4+Pjs5P0E/QT47QkE+PkE+Oj08PD87
-Ozw/OTg+QTw8Pj5BQT08Q0FCPTs9Pj1ERENCQ0E9QD9BPz9BRUM9O0dlY0tTZkpE
-Rklcco9zVEdGQ0BDSUxIR0NFTHyPcltJRmaci09HRUNEPj5CQ0E/REJDQUFFQ0NF
-QUBBPkFEQD8/QkE+PT9APT06PD5ARURAQENCREZCRUpER01IRkRGQkFCPkNIREBE
-QkFBREdDQjs7PkI+P0RFQkM/Pj4/QD5AREM+Pzw9Pj07O0E7O0BBREJCQUVGQz8+
-QURFQERDRkE7P0ZKR0FDREVCQ0JAQUNHRURJR0pLSkRJTU5ISkhLTEtITUtLTU5S
-TE1MTEdKTU1MSUxGR0dNUFVPTlJSUVJOUE1MT1BSUExKUk5RTUxSUlVRUVFWVFdc
-VlZWVlRWTlJRVFJaXF5RVlRTYVhbVVZfXVVSW1tZXWBjWltdZGBeXFxdXl5qY1pa
-VlxeW2FaWlVSU4/C0Nfe4uXp6err7OxnaWZiZ2BiXlpgX1tbVVNVV1RcXFNUWVha
-WldWWVdZXVhXVFNWU1lVUlJPTFNRVVVVVlNNUk9PS0tPUkxMTEpPUkxST05OTUlN
-VFhQS01NVlJRTE1LSE5NTktMT09JR0pGS0dGTE1IRUpJSklIQ0ZAREhDQ0NBRUVE
-QUNDREU+QkRDRUdJSEtESkdBQUA9PT5BPT1BQj48RENDREBCPkREPT1HQUFBQUJG
-R0JGRURGQkNBQD1KS0RBQEBHSEM/QT9AREZDQT1APj08Pj9BRENDQkZISEhGQUE/
-QkRBRUlGRktCQD8+QEA8RkU/SEBCPkE+QEVFRkhDQURLS0pHRUVHPkVEQUM+Pz9B
-QkFBQ01HQUNCREE/QTpEQkE7OTk6PT1CRERDRUFASEdCPD09RUdJRD48PkNHQkNE
-P0VERERDRUM+P0VDQT0/P0BAQD49REE9Pz5BQT48Qj5GRkFDQT89Pz1BQURDQkI7
-Oz5DPkJIQz9CQj5EQj9APj9FREFBREJEPj9DPDxEQkNBQUA/PkNDRUZDQUA9Ozk6
-PDo7Q0VDQT4+PD4+QT4+Qz1APUA7Ojk5QT49PUE9QUNAPD89P0JAREA+PT9CRkdF
-Qz1CQjw9OTo6OUE/PUI+PD9AP0VGQkZBPDo6Ojo8OEA8Oj0+PUBCPDs7PDw7PkJA
-QUJAPTo9Oz1DPj4/QUNDQUI8PDk5OTk6PDw7Ojk6PTo9QUA6PD48Ojw8QDw6PUBB
-Pz45Pjw9OUE/PTw5PDtAOjg1N0E+P0A9PT5APT0+Oj07OjxDOjc3O0M8R0BBPD5D
-P0NBQUVCQENEQkJEQUE9PkdEQ0RBQkNFQUFBPDo2Oz5CREFAP0RDSEZEQ0BERkA7
-QUE/Pj9AQkdIR0RFREI+REJCPTw/PD5ARD0+QT1BP0FBPzw5OTtCOzk8PT86OTo8
-Ojk8Pz05Pz5DQUA/Pj4+QEVEPT0+Pjk4PD48P0JBOT46PT88Ozo5OkFAPjs3Nzk6
-PUI+P0BFP0FCQTxCPD08Ojo8Qj9APz8+QkI8Ojw9QTs6Oj89QURCPkE/Qj4/QkBG
-PkA/PDo9PkBCPT8/REVBPD48QUNCPj47PDo9QEFAOzg+PkJBQUA+QkA7PkBAREM8
-QT9BRUQ7Pz0/PztAQDpDRlt2VlZRR0tISWVudoBhS0NGQURDRUZOXExtmZmSaEpT
-g55vSkVAQT9CQj4/Oz8/PUBBREJCQ0RCPkBBPj07Pz9CQEdGPzw9P0BDQ0NAQkJB
-Q0RFSERDR0pKUE5GREZCQT1CQUJFQ0RESUdBQkJDPjo7REFBSUdFQj5EOzs+QD1A
-QUI/QkRAPT9DPD89Pj49QUFCQ0VGSEM/P0FEQ0NBQkRGREZIQEI+Q0FFRUM/QkZG
-RUZCQ0pHSEpPTEhPUUtOSEtJTk1PUU1OUE9NSUlJTU5QT01IS05JTFFRTU1OTlFM
-S0dJUExRUFJPTU5TVFFWUFFUVVJPTFNTU1VaWlVVVlZWWFxdWl9aWFxdVVdWUlxY
-WlhXX11dXVpaYVtcVFJZWlxkX15fXFlYXVtVVFZfUktKicLO2N7h5ufp6+vt7GBc
-VWJlal9dY2BiX1dWVlVVVlxdWFVUVVFQUVJaWV1bX1xMUlBRV1hST1BWVlFPU1Na
-VlZXTkhPU09JT1RKRk9WWVFQT09TWFRVUVBSVFZVS01VUlRPUVBOSktISUZIR0hL
-S0ZHSE1MUExMR0NGRj9DRUlLREdBQkNAPUNDQkE9REVDQEBDRUtEQTs9PkNDQj89
-Pz1BQEJCQ0BEQ0JBQkFEQkNIQ0NEQ0FBQkRBRURGQkE+P0RGRElKS0dCRkFAQkRF
-REVHREM/QD1BPD1HRkpCQ0dKRUZCRENCQUFGSklJSkRDRkdHRUVAPj5DRkNCR0NA
-Q0JESERGSERDSUlHRERISEVER0JBRUZBQUZDSUpGQURFRUVNREM+Pj89PUA8OzxA
-P0FBRURARUZDRUJCQURBPj9DRUdJQUBDQkRFP0RCQ0FERD08QEBCREVBQz5BRUJD
-QkE+QURDREdLRz5APj9AQD07Pj9APz9CPkA/Q0VHQkJAPkRBPD1AQD9EQkRERD0+
-QEBFQ0A+REVDQURGSkdHQUE9PDlBPjo6PUFDQEE+PTk5PT87PTs9PT1BQDo4OT08
-OTs9PD5APkREPTo/Pjw7Pz9EQkZFRUJDRENDQkI9Pzw6PD9AQEBBPD8+P0RIPkE9
-OjtAQkJAPD9ERUE+Ozw8PTw9QEFAPjk6PD49PkBCPz5DPj9BRElCQUM+ODs7OTs8
-PTs6OTw9PT5EOjk9Ozo+PD1BPjxCPjo7PkA8Pjs9OUA9OT5APjs+Ozg8PT0/Pjw9
-Pz49PT8/Q0E/Ojo+RD88PEFAQ0A+PEFCPkBAP0BCRE5BQ0BAQkFBPUJCQTo+QEFE
-Qz09PDxCT0ZAQUM/Q0U+PTo/QUFBQURHRkNEQDlBPkA+Pj8/QD1CQ0NEQkA9Pz49
-Pj9FQz8/Pzw/PT09PTw8QT07PD85OTw6QEE9QT9CQj1AQEA+PT5BQEQ/Pj5BPzs3
-PDxBQkE9OTc6PTo6Ozs8QUA+OD47Pj0+QkJASD9BPDk3OzxAPztBPz4+QD9AREQ+
-Ozs7PD9BQEE+PEJDQj0+QD9FQ0JAOjo+PkQ/PDw6Ojg6Ojs8PUNEQkVHRUVIRENB
-Pj46QD4/Pz1BQjtDRj4+Qj0+PkE/O0I/P0NAP0JBP0E9QEFBQUJDTndrXkxFRUte
-Z01afG9MQj1CPkJAQ0ZGVI+WnZiMYXOUiVBHRUZGREZDQ0FDPj8/QT0+O0FEQkZG
-QEM8Pz48REM/PkI/PDtAPkRBQENDQ0VFREFFRUVHTVZZUkhDPkI/O0BFR0ZLQ0ZL
-TEU/PT06Ozs6PkFGR0RGRUdGPkA/QUNAPjpBPTo8PT1DQUNCQ0FFR0REQERDSERE
-R0dGQUU/RUVGRklEPkNHREJGTkhGRkVLSkpKQEVHSEhHTFBPU1BISkhJTU1SUldV
-T09NTkxOTVBRVFJSTk5NU1NUVFdYVVNWUUpKUE9QT1hSTVZVV09UVFVPSE9RTk9T
-TFRYVVtYVlZWVlhaVl5XVFhSVVdVUFdWW1pcXVpbYl5aXF5YVlZbXllYW11aX2Jd
-V15bTl1RTVOHv8/X3uLl6Orr6+ztWFxWV15jZmtpZlpcWVZcWWJkXltcWFRXVFBQ
-XF9UVVFOT1FPUFVXUlZYUVFMTlBQU05VV1VST09QW1VPUVFGS05QUFNSUE9QUk9M
-TExNTUtPR0tOUkxPSURKUUlDR0dGUUhJR0ZHSUlLSUVCREhJQUNEREJGSEY/Q0NH
-Q0NDSERKSEM/QUdER0ZJRz9DRUM+QkVBQUA9PDs9QUNBP0JFQ0FDP0FGRUZCQ0A9
-PkREPD1CPj9DO0BFR01IQUJBRUFDQkFAREJAQUM8PDo+QkVERURHSUVJRUVHRUNE
-QEVIRUdIRkpJRD5CQUFAS0pER0ZDSkNFP0RBRkdFR0ZERklHS0dFRUBDRT9ARUdC
-QD0/QUI8REFEPkA7PUE/QEE9PkNAPEZJRkdFQEFCQ0tLR0hDQEhDRUdGQkNFSEZI
-R0dERUZIS0NARkVIREdGRkRDQj5EPzs9Pz88QEQ9PkM/QkA6Oj8/PkFBQEQ+PkNB
-P0NDQ0JBPztBR0dHRERFQ0dHQURCQENCRUE+PkJGRENGRkdKRUNDQ0JAQT4/P0A/
-Qz1APkA/PkE9Oj4+Oj08Pjk+QUE+PERAPTo7PkJGRUVFRj9COT1BQUVCPkBBQUNF
-P0NDQUU9Oj06Oj5EQkNEQ0VEQj5BQ0FAQD4+P0JCQkNCQkZFQEA9QUI9PkBBQDw7
-Pz9BP0NCQ0JERUFAREE8Ojs6Ozs9Pz07PDg3OT1APzs/Pz5BPUA+PT9DQT8+PUBA
-RUBCPz4+QkBCPjs9Ozw3Ojw6QkNBPz1AQkM9OkFDRD0+Pz1AOTg8Oz87Njs8OkA+
-PkJHQTxARURBQT5GRkRBQj0/REA/QkJFQ0BDREdGSUhHRkFDQEM9QTs9QT0/REFD
-RUVEPj9CPTs+PT1BRENBRUhIQD1ARkJAQUJAP0A/PkJEQ0NBQkE7Ozg6QEI9PTw/
-PD1GRT06PT08PD4/PkA8PUJDQDw9Pz08OzxFQ0VAOjs5PD05ODo7PUBBPDo6PT4+
-QTw8PT87P0I4PD08QD1AQ0FAQEM8Ojk+PDw5ODc9QUY8PD4/Qz1AOzo9RENAPj9C
-PTo/NjI1ODg8Oz0+QkM8PkFEP0NFR0JCP0JDREFFQj5BQ0ZCPDY8PTo+Qzw9PDo7
-QT1BQkRBQUM9QD5EQEBHaHhyX1dKVGNeTk9yalBKSkFER0pFRUZedXmWnaOUn5xu
-T0dHQUJESENBRENEREI/REJFQ0NEQUE/Rj48QD47PT5BPD4/QT9CRT9AQz8+QkZI
-RT8/SUZFV2RXRUVGQ0JBR0JIRERLTUZEPkJAPEFAQEI6QEBBQUNERkZLSEVCPj5A
-REQ+Pjw8Oj08PDtERkJEREFDP0dHRUZGR0lGRkpGRklGQ0NISUhEQz9CR0hGR0lH
-RUZEQklLTElMTlNLRUZOTlBMT1FOVVRPT1BSU09PVE9UU09QVVJTV1ZXUldaWlRT
-VFBQUlZPVFdVVFdXVFBVU1haWlZWT0xSVlVcWk9TWVlUT1RaWlhWWVlXWFFPWV5c
-VldeW1xfYFxbV1daWFVdY2BdW2VoYl9jZVlUV1RQVIO/ztfe4+bo6uzs7OxhWFVa
-WGNvaF9mXlFVWlxbX2BfaF1WVWBhWldUZFZbXVVYWFFLUlVQVFRUV1NRV19aV1BN
-TlFRU1BWW1ZVVFVRUlNVT1VOUFFUTU5SUU5PS01bWlBOS0hKS0pMTUtQUlBOWFJM
-R0ZFSkVGRkhGRUlER0NERUhISEdCREZDRERFSEdBQUZCQ0VGR0dJRUJMQT8/PkE8
-QERDRENAPz9BQkFBRj5IRUZDQkNFREZIRUREPT5DQUlJRENAQkZCQkRGRUNBQDxA
-Qzw9PkM/O0BCQ0NCRkVHR0JEREJGR0dAPENDQEdEPkNFREVGRUpFRUVKR0tLQkVC
-QEJBPj5ESktLQUlGQEJFQD1DQ0ZGRkU+OUBEQz1DRD09RENFPT1BQEZBPj1CQ0dE
-RkhFQ0NDQUNBQkZGRURHS1BIRUlHQ0VCP0NFQD1BR0RFSEA+RUVDQkJBPUBBQUA/
-QTxDPzo+Q0VBPj09REFFRURAQUVHRkNCRDw/PUFBRUVDQ0RCP0dERkVEPz0/PUJA
-Q0NCPT9GRUdISkdGQ0I+PkVHQkJCPz05PD47QT1AQEI9O0I+Ojs8Pjo9QkREPD49
-PkI8QENAP0RGSkJMSkJFQz9AOz1ERkRAQEJCQ0A5Oj1ERT9CQz0+PkI9OjxBPj09
-Qz9DRz47QT4/QkM+QEM/Pzo9Oz48QkM9Qz49QkJCREJBQT87PD0/PD5AQDs/Pjw+
-Ojs+QD02NDw8QTxAPURGREI6NjxBQ0BCP0BEQkE7OTs+QD48Oz5APTw6PUA/PD89
-Oj1BPj9AQkM+QD05PkE+PDlEPj08PkRDQUJAQUZFQ0NERUBCQkM9QkFERUJCRkZG
-QT9DQkJGQ0RCQz45Pj0+PUFCRUNAQT9BQ0NGRERCQj9AQkZCQ0BCPkNKRUhFPkM/
-PUJDPj88P0JHPj1BP0A8Rj48PDc5PD0/PEE/QUU/Ozs8PUBDQkNCQDtAQz08PDxB
-QkA9P0M9Qz48PDw5Ozo6Pj88Pjs8Pz07PT1AQUVEPD1GQkA/PUBBRUdGQURBPzw7
-PD1AQUFBQz5CPj5BQDxBQUNCRUM9PT1DPD1CPTk5PDo8QT47PUJFRUBFQ0M/PkFE
-RkREQ0JBQTs+QkE+PTdFPEBCQ0I7PT5CQUA/PkBBQUFCRkRISEFGXGdwfH16Yk1J
-TFBbT0lHRkdGQURCUHxlTnibsquabktIRkREREVER0RERkRDREFFR0lFREVCOUFD
-REVCPjw/REJFRUNBREREQ0VFQEFDRkJGSEZCQklWaFdDRUBBQ0FDRkRFRkhHQ0JH
-QkFBQz48Pj46Pj1ESEhHQ0RHRkFCQD1AP0A9PD0/Qz9BRERGQkZFQ0RHRkJCR0dF
-R0lJSkdGQ0VBR0lIREZIR0VFRkZHSlBTSkhGTklKTkhKTEpJSEtHSkZFSkpPTU1N
-Tk1UVVZTT1BMTlNQVFdVUlJUVVVUVVtSUFJPT1FSVlhbWVpeV1lhWFVTVFlZVU9U
-WlhWW1NUUlNSWVFdW2NgXllXVWFhXFNSXGJfXFhVW1lUUFddXmBeV15sYllVYWde
-W1hWWVBLfb3O197i5ufo6+zt7GNbV1haXV9bXFtgXFpaXl1gX1pZV1paX2BpYF5W
-XllgYFdeVVhZVk9XYGVYXU9SWVpPU1BNTlpZWFRVUFBPTlBUTExLTlJUT0xNUExI
-SkhNUVFNTU9KR0dLS0RHS0lNTUxUTkxITktLSkpKRUlJRUdHSkhNR0VER0pJS0pH
-RERCRUdFRUNDPkJFRkZBPD88Ozs/QT5ERUY+QERCQT9APD08PkVCQkNJSUU8Qz47
-PD5ARkNIRkhFRT5BQ0A/QUFERENHRT4/PUJCPT9EQUBDPkJDPT4/Q0FBQkJCREVE
-RkZFPzw9PkNBQkRBREVAQ0dFRUJFSkhAQENEQ0dFREdIRURCREZAPztAQkZDQ0NB
-QEJAQUdEQ0RCSkhFR0U+Q0NCRkhDREJGRUVFQ0NKR0ZFQ0RDQ0VDS0RCREZGRUJC
-QkFBOzxDRD9CP0NGR0RCQEJEPkI+Oj1BPUI9QD49R0BARUZHQkRAP0lGRUdGSEdC
-QTtEPj49QENBQEBDRERBOjo3Pj5CQT9ARUZDR0RHR0ZFQ0NDSD5ARERBREFAR0JD
-Pz45Pj4/PTk+QUJCREA7PD07RENDQ0RDQD5CQEBHQ0RGQ0RCQkI7OztEQkFCQkJA
-Qzw7PTs9Q0NGRERDQD9BPz5DPTxDPj5ER0JCOz1BRUI9QD8+PUVLQTs9PEE9PzxF
-RkNDPUJBQENEQj5BQ0RAQzw/Qj1CPjs7Oj0/QTxAQT8+QUJLRj49Pz9CPD07QTw7
-PENCQ0JFP0E+PDY3Oz48PEM4OTs/Pj09PDs/QUFFR0JAPD1AP0E6QUE8Oz1BQUNC
-PDk6Q0BDQkJDRkNHQkdEQzw+PkNIS0ZCOzhAQ0M/P0A8PT08QkFCP0NEREY+Oj5B
-QUFCQkREQj1HSEJDRkFCQEdIQUFDREM8QD0/PT5BQ0RFQj1CPz4+QD45PTw6PT09
-QkJBQkBBPj9DP0BDREE+PkA+QUFBPzo7Oz45PUFGQkA8PDk6NjY6Oz5CPj4+Pj0+
-PkFFQkBBQ0FFQz09PkJCQkNBRkA8PD4+Q0ZAQ0NBRT1CQUA/PT5APjxBQz0+Pj06
-PDxCPzk6PTo9OkFCQUNAQUJDQ0VCPj4/QEFAPD5DRUVBQkM+QEA8PD49OD1BPzo9
-PUY/PEBBREREQUBCQUNTXmNkbGhWRUdOVGlgSkhISkZKSUlmfl5KaJyqoHhPTkpF
-RUpKRURDR0JDRUJAR0RBRUVDQEBARkdDREI8Ozo9Q0ZEPkFGRkJJSUhGREZHR0ZF
-RkhKSmZuWEhGQz5FRkRDRUI+Q0I/QkJDQkA7QEFLS0U6OT9BQz09RUhFQERDPj5B
-QkFCPkNEQD5AQT5BP0BCREBARktIRUZHTEdFR0pHR01HSEhKTExORUNOUExMUE5J
-S01IRk1PUFFOT0pDQkZOT1BIR0xGSVFRVVZTTkhOVlJPUE9PUVFPUVJUUlJXVlBO
-TVFTUlFSVFdVVVVbW1lYX1hUWFlUTFNaWVVYVFZXV1ZVXlJUWlpbXFZUWl5YV1hV
-V11lXF9fZWVkYl1gXV5hWFpgX19qXlRbXldVTUyDvs7Y3+Lm6Onq6+3sWVxYVFxi
-YmBiZVVUWFpcXV5mXmNZWVxbWl1aWVthWFVbYmJcW11UVVJcZV5XVlBTUk5RUk1T
-VllUUFRRSU9PSlFVVFNNTVNOR0tMTlFST0pOUE5MSUhJR0hKTExMSUxOTkpISEtK
-SUlOTkhEQkpIQ0dGS0xJREJEREdFQ0RESENERERFQ0VAREhDQkNBPkFAPUNBQD0/
-P0JDQUFDRUNDPj1EQUJBRkhKUUxBRkQ8P0ZJRkREQUNFREZHRUVCPkJBR0lDQUBC
-QUJDQEBDSUdCQUBDSEM+QUFBQkRGSUlKRkhGQz9CREVDQ0RHSUhGSUdHQ0BFSExD
-Pj88PTw/SUlEQkRHQT48Q0RER0NBPz49QkBARUhRSUJDQ0pGREA/Sk5FQ0hIREdB
-QUVJRUZHRklJSERCQkRIQkBGOkRAPkA+QD9JREdAPz1ERENERkRBQz07Q0NCRD9E
-QT09QEFCPkVCRURBQUJBQEFFREdHR0dIS0RHSUZIREE9PT86Pz88Oz4/QUdARUZE
-R0dIS0tLR0VGREZGRERBOz5AQEdJSUhEP0Y7Ozw+PTg8Q0dFQD4+PUFBP0dGRj4/
-QURFRkdDREZHRUVDPD87PTxDQEA+PkFARUFBQkVAQkFFRUVHRENBQEFBQUJBQj4/
-QT9APj1BPUE7PD0+Pz1AQUBBQEM9P0RDRENFQUBAQ0JDQUJGPUFBQkFBPz07Oz0/
-QEA9Q0NFQT8+PEFEQEFCPT9CRkVCOjo7OkBBP0JDPkI8Q0A9QkE+Njk6PD9APkA9
-Oz1BQkNDQD48Qj8+Qz5BRD0/PkFARUZEQz9AP0A/Q0VGRUdKQ0dHPz8+RERGSkQ/
-Q0dFQkFAQ0BBPj4/RkZCP0JESD49PkJEQUBFQ0NBREBCPj9CPz1CQ0hIQ0Q/ODc6
-PEBBO0E+QEE/PTpAQUJCRD9BP0A7PD5LTkNBPDxAQDs9PzxBQEE9Oz48QT8+Oj4/
-P0M9PUZCQEE8OkA7ODc8PUFCOjxAOT1DQkRERERDRUNFQ0A5Q0ZBQD9BQ0NERUBC
-REdEQ0RCQ0FAQUI/PUBCRUJAPj44PURFRUM+QD5EO0FBRkM5Ojs9PUFAQkJDQUA9
-PDxAPD1BQUA9REBDQj5AQD9FOzw8QEE8Ozw7Oz1AREVBRUhNTFpXTlNmUElGSElN
-c2dPREZIQUlLWIhjT0dPg5iJW0dHRkhJR0dFRUpFQkJHR0VHQkNCRUE9QkRFSUpF
-Q0I9RENBQD5CRUNCPkJFRkZGR0dCRkVDREdLcG9PR0ZERklISURDQz88QEhGRUI/
-PkFCSElKRjw6QURBPkE/REVCPT89PUBAQD9BQz8+PT1DQ0NCRURCQUFHSUhERUdE
-Q0RFRUhNSkhJSEhKVE9LTFFMU1JQTUlHRURISkxLS0hMSEVOUEZUU0xGT09PUFFU
-UU1QT05SSlVSUlFUVU1KT1RWUlNSUUtOT0xYUVFWWVZaVVdXWFlbWlpcWVVWW1tX
-VllWUFZSVVNTVVlaV1hZWl1TW1xiXFVdWmFiYl1dXFdbXF1gW1xbXF5eV1lTVltc
-T0xGUYi/z9je4+Xo6ezr7e1eXV1cYWllY2FiZF1TUVRgWmNjX1hcW2BZU1NWXGFa
-V1ZYYl5VUVZZUVdPUVtVUVVUUVdZUFNRUVFSUFBPT0tSW1NQVFRRTVVOR0RLTEtN
-Tk1PS09OTUdITU9MSUtLSk5NSUdHR0ZHSkdHR0VGSUhGSEtJUEtOT0ZEREU/QkVJ
-SEdIS0pERkdERkhIRkdDREhKRUI+RUlFP0JBRkU/QTw8QklER0ZGRUdBRj07PD4/
-QURGQj1DRT9AQUZBRkNHPzxEQ0BAQkRERURFQj48P0FAQURHQ0BGQ0dCRkxKSkZG
-SEVGSElHREdKS0xRTk9KREZGREBGTEI6QkY/R0NJSkVJSUNFP0JCPUdFQkNCREI9
-QUBCQ0dFR0ZARklCPzxBSUZKSEZAQ0ZFSUhJSEhFREVGSEhCRUlHR0dLQUFDREI+
-QENDRD5ERD9CSEFBRUJGQT09Q0ZEQkJCQUJCQkRBQ0RDQUJCQj9AQkJISEQ+QkpF
-SElIREZHQUFCP0RCQD86Pj9AREZGQUhGTEdIRkZJS0dIQkRERD5AQUNAQkRFRkVE
-Qj46Ozw5OjtAQ0RFQT0+PT5EREVCQDo/P0NHSEZESERDQD49Qj06PUJBPz1GRD5D
-RkVHRUZDREZHREFEPz1APUJDQ0JBPT89QD8+P0FAPkJDRkA7PEFFP0BCP0JAPD5C
-QkVCRkJEQkRCQ0BDQURFRD9AQDk3O0A/PT9JR0JAPz1AOz4/QEJCQkRBRUNFQT9B
-PT5BQEFBQEJERUU+Pz46Nzo+PDw+Pz0+Qj0/PUFAPkRDREI/QUVEQj4+QEJDRUZC
-RENDQDw6Ojw9QkVFREI/QDtCR0lGRkdDP0FBREhCP0NMTExKR0RCREBDQkJGRUVC
-Q0I8QUM/Pj9GSD8/QUBHS0lERD09Ojk5OTs8Pz9AQEM9PkFDQ0JARUNDQUE8QURD
-QkRDQ0M+QD8/P0M8PDs9QEVFQjw6ODtDREE+Oz0+PD0/QDs9Ojo/QT9APkBDQUBE
-P0RHRkZERUNCPzpCRUJCQD8/Pjw8PkZISEtFP0JAPTw+PkNDO0FEREJDQUI+QUBD
-QkBHRUJDQUNCPkM9PT08QkQ9Qz09OkNBPj4/RUFDPT07RURIQT49P0FDPj89PEE+
-QDs7QEFBREtFQ0lfXE1ITU1GRkdHR05ndlJKSElKTEVtdk5GQ0deYVVJSENAR0ZF
-R0NGSUVGRUNGRURFRkJGQ0Q/PkVFRkZFQjw+QDtGRUNDQUNGRkhFQz5DQ0BAQkZH
-SkljeGNHREpGRUNGRkFDRkZLSEdFPTs8OURHR0JFRkBHR0RBRENAOz0/PTxAPT1C
-Pj9NQ0BAQEVFOz9BQUBLSUhAQ0ZDQERCREJHR0lJRUVISk5MTVBRTUpTUExRTUtN
-TlFQSURJTk1DRk1aU1BPSEdKTE9KT1RPTVJPUFNVVlVWVFFUTUxMS0lNVVdXVFtW
-UVNXXllZWFVZVVNUV1ZdXF9VWVZUV1RYV1FVUlVXVVdWWVtYW1lWUVRdWltdXVti
-YWVcX11YW19YVlReXlZUWVhiXltZVVlhXU9Ti7/P2N/j5ufq6+vs7GJiZ2VjYF9f
-WlljXlpaXVtcWlhZWF5jX1ZSVFteYFhXY11YXFdVUFFUUVRRU1pcUFZaVkpYXVZX
-V05OT1JUT09QVl1YWltUWlVQS0xHTE5PUE5HRElKTElNSU5OUlFLSEtLTUhJSUdJ
-SEFKTElHRkpISUtJSVBLTEdFQkVFSkVGRURERklFQ0RESUpIRkVFRkdEQUA9P0ZI
-RUJCQkE+RT1EQUZCQkVDQEJEPz8+P0BBQj9CR0VFQkBEQ0hDRkRCQUBBQUNFRUtI
-R0ZGSEZFQj47QUNEQkFBQklEREFFSUNBRklHRkNKSkdETU9PS0xERURERkhKRUdC
-PUFJSENHTklDR0VJQkBFREdJSE1JREdFRUVCSURHQT9GTENIRUJDRkdDREZFR0tL
-REdDQ0JFREVBPkBCR0dIRkVDRUM/QUVEPUJJQUVCQkVFRURFSEJAQj89REVGRkZG
-QkE6QEE+Qj1BQj89P0FCQ0RISUZGREhGS0VHRUI/Oz1APj1ARDs5QUZEQkFBRUND
-TUhFRElKR0dHSUVEQUVBQEFBQERFQz5ERkVFQ0E9PD4/P0JBPz9BRERJQj9EQ0I/
-PUFEQT89Pzs8QEI+PT5BQkNDQj9BPT5BQ0NLRkVEQ0BBQEA+Oj1ESEVAQENEP0FE
-RUI9Pjo7P0E/P0VDRUFHQjs+OTo+Q0NGQUBDQD5DQUA/P0FBQ0BBOzo9Pj8+Pz09
-PUJAP0JCREZHREFBQUFDQ0RDRkRERkVFRD9CQkJBPD0+Q0FEQz1AQUE/PD07OUBA
-Pzo5P0A/Qz9CPDw5Oz47O0JCQkFCQkZFQEE+Nzw+QkRCQUdEQkFDRURER0FKR0VA
-Oz5ARD89QkZISUlISEhCR0pEQ0ZJSEJAPT4/QURGRkNDRUNFQ0JGR0VFQENBPkJC
-RkRBQD1CQ0NERUM/QEdERkJBQEI9QEREQEFBRUZCPDs9QERDQz5BPz8+Pzo3PT4+
-P0A8Pj1BQD5CPjk4Oz4/Qj89PUBAQD4/Pz9CREI/QkJAQ0FDQ0A5PEJEPjo6Pz9F
-QkI/Ojw7PzxDQTs5QUNBQkJAPz5CQUBCQ0NFREU+QkNCRUI8PkE8PUBBPj1BQUJE
-QEBDQkY9PkNDQ0A/PDtAQ0E9QEI9Pj1APTxCQEJAQ0VGSmNTR0xJSUhFRUdHSmxr
-W01PSkpNZX1cSklJRE9MSkVFS05JRkVDRUVISEdHRUpBRUtHRUBDQkc+QkJBQkJD
-R0hCSENGSEZERkZFRURGREVAPkJGRkNITHKJe1BHSkZEPj1HRkVDSEVGQjw7Ojk9
-Q0xFPkBARUdDPkRDRkU/Q0M9Oz08QTtCQFJJRERERkVFPkFBRUhISEJFQUJDQj1F
-R0xKTklIREtPSUpOUE9JU0tOUU9PTEdKS0tOUUtOTk9QTlFRTUxKSUxMTU5PUlVQ
-S0hMT1hQT1hSTU1MUVlUUVBSVFVXV1RWUVhaVllXVlNaW19aWVxjX1hXVVtXW1hX
-UlVgXldYXF9SWFpeW1hZWl1TXF5bYG1gXWBeVlVcYF1fXFxfXFpdX1pXWFZbXV5W
-SEqDvtDZ4OTn6Orr7OztbW9laWRlZF5YWFtZYF5VWWFVVVVaW1lZVllZV1pZWFBT
-X1NVWV1WV1ZaU1xcXF9TUk9aW1ZbWFtTTU5UU09PT1dWVVlbXFhST1VWT1FOT1NP
-S1BOTUdIRkhQTE1IR0lPTExKTEdGT01LUFNITE9OR0hKTkhEQ0I+QkdKTk1IRURG
-RkM9Q0dHRkdHSURER1FMSUdKRz1BREZCP0JEQ0FBP0VGRj5DQjs8QURDQUNDQ0M9
-P0JFSUhHRERDRkdFREhDP0BAQUNCQ0NDRkVFREZEQkBAREE+QEBHRUZBRkRBQUVD
-SVBJREVHRUVKSEhJR0ZDR0k/RUU/RkdCR0hKR0RBRUA8QkRCPUBER0dGR0xLSktK
-Rj1GRERGSkhCRUdLTUVIRElERkdERUZERERJSEZNSERBQUBESENDRENERkRKSUlA
-QUVCSUVFR0E+QEVFRUFCPUFBR0lCQkdGRkpIQkNDSURCQj9AQUJEREVLTEdDR0xK
-R0JBRUFDP0VCQ0BBPTtAQ0ZKSEVGR0hKSUdFQ0dHTEZBQ0ZGR0REQ0ZJRUJCRUBA
-PEA/R0pIQkNDQ0REQENDPkJAQ0JDNjo7OkNFQ0JDRkNHREE9OTxBSURFQj9CRUBC
-SUZEQ0ZGQT9CRkNEQkFFQkFAQ0BBQ0hGREI/PDw8PTxAQkJDRkVCQDs5Oz1FQEFB
-QUJBQ0VEQUJCQD08Pjk+OztBPT09PkNEQUBCPj9CQzw9QD05P0JDREE+PURITEpD
-Qj9CQ0Q9PkBCPj9EQ0A4OUA9PD9HSENAPjw7PT8+Oj9BQTw6PUNGQ0hJR0FAQUFB
-QEFEPURFREBESEZERUFGR0BBRUlFRz5FQUhDQUNFRkdDPUBHQ0FERUZHR0dDRkhA
-PDxASENGRUJEREVFRkZJSkFBRD8+P0FBQEA/QUE9REpDRT9BQUREQkRBPDs+Pz09
-PkA/QDs+PkI9PkA+Pz8/QD07ODlBPD09QkFNSkM+QUNBPTo6P0VDQj8+PD49REVF
-Q0NDQkNISD48QUNCQj09QURDSURDREE/QUFIRElBQDw+QkRCQEREQ0I+Oz09PkA+
-QEVGREBDRkRCRkRCPz0/Q0FBS0xIRElEREBGRDtDRUFDOzk8PT5ARkNCP0RFRD4/
-R0NCQEBCSExXcVBESEhISUZGRElIXGdfS0ZMTGKJfFBIRERCRUpGSERFSEZHSENE
-SEZHR0pIREBCQkdJRUdFRUFGR0dIQUFDREVFRz9GR0RFREJEQ0VDQzxCQEBCSE5O
-Yn59V0lISEZAPkFDSElJR0U/Pjw5Oj0/PUFERENDQUBDQkA/PT47PUNGPj47Pj48
-Sk1BQj9AREJCP0NKR0dEPT4/P0E/Q0RLUEpLSEZHSUdMS0lMUU5NTlJVTk5MSktK
-UlBNTk1RSU5TVVBUU01FSUxMT1FQTU1RUFBVVFJXUk1KT1VVWVRTUkpNU1ROVFZc
-WVhXV1dRVlVWXlhPV1paXltaVVFYW1thWlpYX1xeXV5WV1peYmFgW1xYWVRXV15a
-WVBUVlZYYltbXFxZYWRnWlhhX2BdU0pDSYK+z9ng4+bp6uzs7e5ham5YZGBqZmRX
-WVtbW15eWlViVE1bX2NeYVpTV1dWVltUUVFbXWFhYVpiU1RfV05MUVZdV1JXWVxU
-VVJUUlBUUVRWVlRUT1BMT1lRT0tHSExUVFFST0RKRkpSSkpFREtOVFBWUlRMS0pO
-UU5LT05KRkhHR0ZFR0pFRURHRkVLRUNDRVBLRklFR0xDPUNCREVDRkJFRU1LSkNC
-QUBFREFCQEZFSEI9QEJBREFFQkBCSEZCOTk9REdJQkVHQ0FCRkZHQTw9QEBAPkFD
-Q0JCQ0REREJAQUBAQEVLSUhJTENJRkRHRkhDQkdERERDQEVDQkVGRElFPzxBQ0VL
-SkxDREZERkpMQkE9QkJHSkJDRkhMR0xRRUFFSERARklISEVGSURDR0hLSEhJS0hE
-RUJIRUVFRkRAQklMREJHRkM+RUVER0c/P0hERU1NQ0FBRkhFRkhLREZDRENGPUdJ
-RUdJSUdERkpFRkRAREVCREVITU1KR0dERUVAR0NBREJAQUA7PTxFT0tIS0lHRUJE
-S0pNR0xHREJEPkNJSUNERERAQ0BBPkQ+QUdHSEZBP0M9Qj8+Pz09QURFPzw6OT1F
-QUZER0M/Q0BGREA9QURGREVAP0FBQkNCR0ZGR0RFQUNERERKSURCPURAPD5CPkJE
-QTw7PkJDQD8+QEBAPkJFPj45O0FCQUJEP0FBQD9CQj5BPTs9QUVCQkVBPERDQkJD
-Pz09QEFBQDw8PD47QkVGQkBARUdGRENBPkVDPkJEQkFFRD9AQEE9OkJBQkZFQ0FB
-ODlDP0E8PDo9RDo8Qj9ERUZERUM9RkJAQUFCOkJCQkVBRUJBQURFQT1GS0pLSD9D
-SEFDRDxPSEZCQEJDPUNCQERCSEZDQTxAQElFRT5CPkFGREdIQ0ZIRkA9P0BCPkVB
-Pzw8RUQ9QUZHQz5BQUJGRkI/PTw7PEI/Pj88PEVGREE8Pzo6PUBBPDo6Oz86REdA
-QEdDQkU/PUE9PkBBQkVBPjs/RENCRURCQD1HSkU8P0BDQ0NBQj5FQ0FERUVCQkVB
-RUVBQUM/Pjs4Q0ZDP0JFQEVDQT0/QkhFRENDREVAPkVEREdCRERBRUlGR0lKR0VE
-Q0RBRUdIQkA+Ozk5PD5AREM7PT9BQkNEREZDREJDRWBkUUlHSElHR0ZHTFBVeXlb
-UVJdkItiTUdGQ0REQEA+REVISkZCQUBERkdJRUNHRkVFQEJJRUdCRUhGR0hGQT9B
-QUVFR0RERUhGRUNBRENBOz0/QEFFQ0RUcXVtUVBNRkpEREZFQ0RHREE9PT1AQUFE
-RD5HRUlFQ0E+QkA/O0JDQ0VDPEBDQUNCQ0BBQENCQUJLSkdGR0NAQUVGR0dERUdL
-SklJRkVOTVBIS0dQUlBOT01SUk1OTlFSUlVRUFJZTVNVVlJVVFdQUFJWUlBNTk1Q
-UlVST1FSTlRYWVdUV1VYVlFSTVNTV1ddXVdUXFpUUlhgXF9ZXFtVWV9YU09WXFxd
-WV9jX2BlYWFeWWJfXF5fY19dXFFWVVlbWVhdW11bWF1bXF5iY15ZXFlZXVpRSkVG
-g8DQ2eDk5+jr7Ozs7WFpYGJZXl5tZGReW2FZVllYV1RSYGJbW15XXF5cWVhdVlpP
-VFdXXF1UUFpdYFlVWVNSU1FaUlZWUlZZV1BOU09LUVRNUFRQTU1PVE5NTkpPU1RR
-TUhMTUdMRUhJQkRLS09SUkhMT0VGTkxTUUhFSUhHRENBSkZJRkdJR0ZJRERFQ0JD
-R0lKSUlIRkE/PT48RkRBQUBGR0pGRURGRT0/Q0Q9Q0FEPTw7PUNARkBHQj1DQT1D
-Q0NCR0ZGQD47Q0VEQUJBPjk8PkJEQUA/SEdDR0ZDPEJGR0dFRUxLSElKS0RJUEZJ
-R0dFRUZFSEdJTEVIQ0ZGQ0NHQ0lKSUZHSEpJQEdLTEpBQUI/R0dEQERHRkNGQ0dK
-SklKTUlFRkVNTElJRUtJRkdGRUdISUNDQ0RERkBCQ0hJT0xGRUhDRkNEREdLR0VC
-QUpHSEZERD9CRkNHRUZER0hCREdFRkNFSU1JQkdDRUxEREJDQEBFSEhFR0ZJRElF
-RkNDPUdFQ0NCQ0FKRkRIUEtOSkhFSERBR0tJSENOSkVHRURDRkpFQ0VFRT49QD5A
-RkVFQz9BQEA/P0FAQURFQ0JFQEA+PkE/QUJETUdGSUVCRENFRUxFQ0M8Oz1CRT87
-QUVBRUJBQ0BDPD9BQT1AQ0JDQTxFRUJCQTo7QUA+Pz46Ozw+O0BFREI/Q0NEQ0RB
-RENBQUVAQURHQUBFSERBQ0E+QD5AQENBQTw9QEFBRkJAOzw+P0BDRENBRElJR0dJ
-Q0FCRUFBQEFCREFERDpBQUJEREM9QUA5Ojs7OTo8OzVBQj47PkZIRUFBRkRDREJA
-Q0Q8PD5CRUNBQ0JESEc/Q01QT0xGQEdBSElFSElDSElISD9BREg+QkRDQDxBREVD
-RUhIRT0+QEVGQURDRUU7PEBHQUNCQkVERkdEQUFAQD1BQ0VDRENCRkI6Ojw+PT8+
-QUJDQT4+Qj4+P0M+OztAQ0dIQkdCO0E6QUI+R0ZGREY9PTxAQkVDP0REQT08QEVE
-QkdGSElLREJCQ0JCQkE/PkFDQkRBQ0ZDQD87Ozw9PTxBQkNEPENFRUBAPkNAREZD
-QENFRkREQ0FDRkBBQUFDRUFGQUBDQ0JCRkBDQkJAREE6PUJCQ0FDRkE/PkRERkZF
-RkVBQkRUXFBKR0hIRkJDR0dGSE17iWVTYZKniFJJS0ZHRkRFRkE/R0pJSUNEQ0VI
-REhDRElKRkZHRkZDPkFBQkJFRkpKSEdHRUZAQEFCSEZBQj9IREBBQklCPkE+Q0t5
-iZ6IV0tKTEpMR0RFRURBOj48P0A+REZHRkNAPUFDQj5GPz0+QkFEQjw7PUJCREA8
-OkFGQT9GQkdDQURFQ0RFRkdLTE5JSUVJSEpNS05QUExHRktPUlFPUUtQTUxKSUpN
-VFxSVV9PUlFYUVJWTUlWV1JOS0pQTE9UUVFWV0xQVlxaVFZVUVJVVVRWXFxcW1ZQ
-Wl9gXltZWFlXWE5WVFBVWFFVW1dZW1ldXl1YVVxeWVtbWlxcWltZXFFUUlZXWVpi
-WllhW1dcYVpZXmJiXmJqaGFdXWBSVUd8v8/Y4OPn6Orr7O3tWFZjZWFbZ2RtcWdg
-XmFYWmBfXGRdWlxaXWJaYWBaWWVlW1dTTlBTWVhUWmNdWlVYUlNTU0tKWFdQU1JV
-VE5QTUpKVGJfVFdPTE5KUlBNSVJNSEtLSUlPTUZDRUZCRURHRUxNUVFQTERNT1JM
-RUdHRERFREhIRkZDSERFRklJTUpDSElGSkhCREhFQkBIRERHR0dFTUtEQERERERB
-R0ZNSUZDQ0JCQURIPkNMQkRCQUBBQENCQ0pCRT5IQjw+P0FKRUVEQDs7PD0+RUhE
-RkVGRUZJQkNDREdISUpLSEZEREtITExGRENCRElHRUVKSUdSVEpGSklMR0lHR0hJ
-SEtHSEFHSUtJRUhEPkJEREVEQ0ZCQUdCQEZFSkNCSENFSkVGQkhGRUpNSktGSU5J
-SEpESURHR0pGTUpLRERFRUhGRkJBQ0VGRUNLTUxGSEZGQkREQkNFRkU/PkBCQ0RE
-RElGQ0FEQUJERERBRUJESEVEREZHQUA+QUE9QT1CRUNHRUNDRERES0dGRklHSExN
-SkpIS0hAQUdDQUNFREA6QUFFQkZFRUJCPz4/QkBGQ0hLR0RDQUBJS0lGRUJDREdB
-P0JCRURFSUpJREI+Pj5BPDs5PEJFQT1EQ0BGQUM/QT9GRUNBPzs+QD5AR0REQj9D
-PkJBQT9CQkJCP0A9PEBBRkZFQkFCQEBFQUNCQTtAQENEQ0hIQ0JCRkA+OzpBP0FB
-Pz9BPT9APj05Ozs8QUNAP0FCQkZERkM/PUBDSEQ+P0BAQEBAPD1BQkNERUZAQURA
-Pj09QDxCPDw/Qj5CQkZFQkQ+Pz1BQkI+QUVCPkVIQz9AR0VEQTxISUlHSEhDPkFE
-QUZHR0xGQT9BS0JFRUREP0JAQENHRklHRUdHSEFCRkREQUhKQ0VFP0VGSUhEQkBB
-RURDP0RCQkJFR0BEQEBDQjw6PkVBQ0RBQUBAQEVBQ0JJRkRFRUVHREVERTw7O0A8
-QkZGRENBRT09PD1BQUBAQERBQEA+PD9AR0VFRUNDPz9DRkFEQ0ZCQkBBOzlDRkE+
-PT1BRkFCQ0NCQ0FFQkhHRUZEQERDQTs5PkRFQ0A+Pj9CS0hHSUxISkZIS0hHRENB
-RkRBP0ZCQUA/PDw/PEJCQ0ZAQkZBREVFRkdDSmdZTUtJR0RERUVFRUZDQm6PiHuR
-n5ljSUdHSklDQ0REQ0ZFQ0ZAPkJFSElHREZHRklHSExLRkVHSEZDQD9DRUVGRUlG
-RkRCQDxBPUFGSExKPjxBTEVFR0RHRmCMm6OCU1FMRkRFR0lMSUI8ODc+QUVIRENG
-SEVCRkRERUJCQD5APz1DPT9BPDpDRENEQz5EQkNEQkNIS0ZGRElJSkpLUVVLR0RL
-Rk1OS0pKS0tGSFBNTFFPUlFUW1NRUVBRUE1VVVRWVVJVVE9LT0xSUVRWS01SUlJR
-UExYVFBaV1dXVVhZWVlSWFpUWFJaV1laV1hVV1tST1ldU1BTVlxXWlVdZ15cWFxf
-W15aVFhdXF9cXGJgYWFeV1hXV1dYVlNYVVhYWV9cZF5dW15jZmhjYWFdXFxNRX7A
-0dng5Obo6evs7e1aVmRdXWlsZmNiWlhZWGBcamdeVllcWFhaWFRWYGFcYmReW1pR
-UlhcW1hYWF1qW1BNV1RUVVRTXV5aVFRRUUxPTlFTWVtSS01NTkpLUk9OT0VISEZL
-UU9MTElGRkVHSUlJTk9QTkhSUE5PT0dJTElNRkNGSEg/QkdHSkhJSktHTU5OS0pJ
-REZGR0ZDRkdGS0dHSEhJRkJGQkVEQ0VJRkRFS0Q9QkdHRkFEQkNGPT5CQUQ/QT9A
-REZDRkNDRkNCQ0dJSUdHQUJBSkVCR0ZEQUNES0xOS0lFSUVHRUhJRkdHSUpGRUZG
-R0VJQ0lNSk1KSktGQ0NEQ0RHRkRHTFFKTEVDQ0FDR0Q/QUVDQEVGTkpERlBMRUZE
-SExQTUhKTFBOSERFSUZFRkVHRkdOT1FMR0ZERUVJQkNJRkJHQ0JESkZAP0VARkVF
-SEhHRERGSUpKQkNDR0VERkhGQjxDQUE/RENISUhDREZKRUg/PEFER0FEQ0E/ODs9
-RUdFSURJRUREQEA9P0ZOSktGR0xMTUpHPkZKRUZFRD88Q0VGRURISUNHRUdDRURB
-P0NFQ0E/QkRBQkNAQj5DRERDQkNDPjxCQkE/REhCQkxJRUJGRUJBPzs+RUNDRURG
-RERGRD09QEBBRD9BRkNDQT0/REE+PkBER0NCQ0NDQkNJR0E+Qj5IQ0FAQkQ+P0NF
-Q0FCQT1EREdFQz4/P0JBPj08Nzo+Oz5APDk9RkQ8Ojk4Ozs9RkNDQUZDREZCRUNE
-Pz9DRUVBQkRFPDs6Pj4+QEVBQkdHSkc/QEE+QkdHPz08REdEPj1CRD1BPz0+PkVE
-RU1ERERCQT1CREhFR0pERUZIRkZJQkRFRkZKSktHREVERENDQEJHQUhMQkJEQ0hG
-RktLRkpGRkE+QUVHRkZFRUZFQkI8PUVGR0dMRkNBPz5APTxBQ0JEQkNDQj4+QEJB
-QkA+Oj1BO0E+QkRCRklDR0hBP0NBQ0RGQkQ+Q0ZESkdCSkM/O0RDQkZBQUNCPD89
-QkZEREJDQ0BCR0VDP0RHREE8P0ZCPj4+P0BFQj1CRT5ERkZEQENBRklGSEFDPD4/
-P0I/QkNFRUNGRUdOTEZISUhKSEZESUhKS0pBPUBDSEpDPj9FQEZCSEhMRkZJREJF
-SUtab1xKSkdFQkNISUpIQUNIZqatr7GefVFKSkxLRkJGSkdJTElFQUI9QUVETEdJ
-R0ZHRUhLR0VERklJSEhFQkNDRkxHREdBRUE9PkJFSEhHR05GQkRFSUhJTUlJTW6U
-lnBNSklHSEdKSEdGQjs3OzlDREBBQEJCR0hKSUdHP0VCQT1CR0U8PT1BRUY/Q0M+
-RUVKSkRKRUhJSUZHRUZJSEhPSUtOR0dQTklITU1ISElJTkpJRU1OTEpRUFNWTlJb
-WFJOUlFTUVRUUU5TUVNWUVNZWFZYUlVUWFVTVFFVVVZTU1NXVktTWFRNVVZVWllb
-WlVYWlhXXFlaV1dZYVxgYWBjWldYYF5ZXFdeXF9ZV19cXltkXVtWV15iVVliXFRX
-WFtcWlpXV2NdYmJjYV9caWJeZE9MisLQ2eDk5unr6+zt7ltbW11jWmNkYGBiXF1W
-U19fWl9ZWmBcVlVYXFpaYWVeWVtZWVRVVV9bVVVYXFNUW1FaUldfWlZSUFFbVU5Y
-V1NVUE9RV05LT09OVFhRTUtGR0hPTk5GSkpKSUlDRUNDSEhIRElKTE5PTk5IREhH
-TUpKSkxGQ0FHSUJBRUVFR0hJT0tMSkxNRkNBSEpEQEpHR0ZKRkVHSkVDPkFBR0hH
-RkVGQkFEQz5DRUNDQzxDRT5CREFATEJCQkZIS0FCRUVDQkNKSUxJRkBDRkVCQUE8
-Q0dNUE1OUU9PTkdGREJER0hJR0ZLR0VHREdJQkVIR0ZFR0hBQ0JESUpISUZIR0lG
-R0VCQ0lKSUdJSEFES0pHSkdIS0xHSkRHS0dGSkVDRkpNS0VLSUNBRUlMS0hJRUZE
-Q0NGRkRHRUI8PTo+Qz1DQ0JHUkZERkpHRkRJREVHRkdGREVFRkpKTEZESD5BRT5B
-Q0BJREFAQ0VOSkFEQUJCQUQ8OkE7Oj9DRENESEdGQENGQEJBRElJS0xKUFJQSkNM
-TEtJQj5CRURDR0ZCQ0ZIR0pNSEhNSEZFSUdHRj5BR0BBREZBQEFARkpGRUJGQUNG
-TEY+PT9GS0xJRUZIQkFBRD08Q0ZEQkBEQ0JAQD9ARUE9T0lFRkRDQz48NztAREND
-RURAQkNCPj5CPzw+QklIREdEQT5JSEFAPUI/QT9ERkdDQj9CPkI+QTk3PD1AQj83
-OUI9QT1EQTw8PUZGQkJAP0NDREA+REFARkRDRENAQ0E/P0BAQkFDQkNFSElKSD4/
-QklCREVGRkJCSkhEQEVGQkFFQ0JFQURCQkhIRkM9OEJERkFEQkVJRkhJSUVGQENG
-Q0JERkdIRURAPkVDRUhGR0c+QEFCQkBARUhKSUhGR0VIRkRBRUdDQkJDQURCQ0FC
-QUJBOD4+P0FFRUNCQ0BGQ0M8Pz09O0NBQkQ+Pj5BRkBAPkVDREZFSD4/PUE+PUBB
-REFDSERESUdCQEU/QUBBQ0BAQ0NAQUJARUpFOz9BPEhKREFDTEk/QklHQkNCQEE9
-QERDQj88QD9CQz8+Q0RFQEJBPz9AQEZDRkZCPUFHQkFERElKREVGREdGR0VIRUFE
-QDtAOz1GRUFEQURCRUFARURFR0pQSENKVXF5UU5GSExKSUxUT0pEREpXlLe5rIlX
-TktESVBOR0tJSUhLR0hJRUFCRURHS0FCRkdFR0hMT01JRUVCQ0hIR0xJREpHREJD
-QT06O0NJRENKSEdDS01IS0hJSEtMT1RTRk5MSURJTEpHRUNAPTs6REhHRD9DR0hM
-TEhDQkVHRENEQUNDOz5BPT9EQ0RCQkVHQz9EREdKSENFRUZMTEVLSEpKSUdKRkhJ
-TElMSExOTlZPT0xPTlFSUVJSVFRXVlNOVk1STVNYVVJQS05aVFhaUlBVTk5SSktX
-VVJaXFNTUlBSU1FPTVNUW15eWFVWV1RWV1laW1peVVdYWltfYV9hYmBaW2RgYFlX
-WllcU1pVY1xeWFxeW1lbZ2BeWV5UU1paYGNcXFpZW2JcYGFjbGVjZ2hhVlSSwtHZ
-4OPm6err7O7tXGZWaWBkZGRiZGVfXl5gYWReV1lbX1RXWFxbVl9fXVpcWmBTWVtc
-W19TWl1dVlJOVVpcWlhXUlZVUk5QUU1YV1NXV1RQTlJPUFNTVFJRSERCRUpMP0hG
-RUZKREBFRERJSEdDTkxNUEpMTkdLUFFKTExIRUJFRklGTElIRkRIRUpKSERIREND
-QUdKT0NJRk5KRUdHRklGP0ZFSkNFREU7QENGRkZGS0RFQkZBQUlCQEFFREtNR0VF
-QkBGS0xGSklBREJDRElLRUZIQ0NETE9IRE1NTU1MTExNT0pFRUxJUE1OSkpNRkdD
-RENJS0lKSUpGQzs6PUpNSENDRkhKUUhGREVISkdGSk8/QUNHSUhMS05JREhFQ0hF
-QkZGQUJGSk1KSURDRERFRkZMSkdHRUNCQUJCQkRDQUhCOj1ER0RFR0NIQUpIRUJF
-TUlJRURHSEVCQ0JCSElOS0FESkQ+QEhISUdJQz5DQkVERkRNTENCQkNAQ0NKR0RD
-RkhDSEE9O0E9P0NLRktMTUhQTUhNUUxNSkpIRD5DSklKR0JCQURDQkpKQkRIRkFB
-SEZDRkdFRUBBQ0RCQ0NFSElLSElERkxKQT4+SEtJR0ZEQ0dDRENESkREPj06P0BG
-REBCPkNBQkFJRUVFQkNCREM4Oz88Q0VAPkFCRUZMPzg8OjpEREVBQkNCQ0NBQT1C
-REZJQUVFRUVDR0dHRkFCPjs9Qjo6Pjs9REJGQkJGRUM6QENGRkVFRkdCPUFAQEFB
-RT5BP0A+QEFARUFEPjw9REZIR0hNRUI+R0M9PkM+RERJR0RLRkFCQ0A/RkA7PUBC
-RkdHQ0JEQUFCP0ZKRUhHR0tMTFBMR0I8RElLREJCPENKRUhISEZJR0VBP0RGQ0VF
-RkZJREBJSUVEQkFCQ0VBPkBERkNDREJCRkI8QEJEREBDS0I/RUtFRj8+QkA+Pj87
-PT4+QkhESUhHRD1HRkZER0VBPD08PUFAOkFHSUpGPjpBPj5AREdERD1BQkVDPUFG
-SUVCQUFIS0xOSEdCQURFRkdAQj9DQUVDRERFP0NCRUJCRkdGRUNESURCQkRERkVD
-R0RBQ0JERERERkNFREhDQkFGQkRISEZEQUJBRkZCREFAPUNDRUFGRkhKSUlNS1Fm
-fWpNS0lJSk1STk5PR0NFUmulrbCOXk9OTkNFR0dJSEhLRkRGR0ZCREFHREFFRkZF
-RkVESEhGRkRDSEZESEtJTEpOSkhGREVIQzo9Q0ZIS0lIRkU+SElITE1NSkpNS0hJ
-SEpJRkJFTUhCPkQ9Qj5BQ0ZJSUdIREhJREVHRUFAREA8PzxART8/QkNFRENGSUVC
-PjpDQUNARUlFSEhERkpHRkVHRUdESVFSSUhNSUhQUlFPTk1FRUtNUlhaVVdUU1JN
-V1ZYUlNQTkxTTVNTT1RTV1FPU1BXT1BSUVFVV1FLUUpOUFFOWV1eX11XWVhZUVZY
-WFpZV1paXllXVl9ZWFpcYGVjY2FhW1tZWFdSXVhhZGZmYF9bW1pbXl5YVltbWltf
-XV1aXWRdWl5ZXF1RX1lWW2BXUpDC0drg5Obo6uzt7e5uXVtmbW1jZGRuaF9ZW19f
-W2BiZWBaW1haWVxWUllYUFZXV1dhY1lZXFZeW1pWTUlVW1VXWlxZWU5OTk9RWVZY
-WlpYVVNOWFRQUEtJSU1SWFhNT1FNS0RDQ0dEPkBISkhISE9RTk9SVE1MSkZMQ0dK
-SkdHQ0hJTUpFRkpFQ0RERktOR0FFRUxKRUtPSUVLSkhJSEJDRkRDQkZBRUM9REk9
-QkdJSUxISExGREVFQ0FHSEVGSUZGQ0JHRkdFQURGS05IOzxDT0xNR0tFQ0FHS0VL
-RkRDTEdHSE1PTUtKSUhMSkxOREtLRktLTE1IR0xJR0lKQzpDRkpGRUJFQkZGSlBN
-TEVJSElESUdGRERLTU9PTUtGQ0RDTEpLRUZJS0hLRUhHRUhDSURGRktLSklASklJ
-SEpEPEJGSEpEPj5ERUVKRUZJQkNCQ0RCQ0ZGSEhGR0pBQUFDR0dHP0ZEQUVNRkM+
-SkVCR0lGSEhKSERAQUM/QUJBQktIR0lFREtHRz5CQUJGSElFTkxQTk9QTU5QSURC
-RUNISUZHQ0JEQkREPkFGR0tISUdDPDxDQ0dHP0NCQEBCQkREQ0BBRkhIRUNAQ0NG
-SEdGRENHSUpFRURFREJGQUNERkM9PkFBQkFDQ0NDR0lKRkREQURHQ0NBPj1DR0RC
-Q0NHRERARkFAR0ZGRkI9PkJFQkNGQT0+Qj9ER0JAQkFBQUJBRUVBPz5BRENDQkA+
-PkdGQkVDPkU/Q0M/QD4+QUJAQUNDSURDP0JCOz5BQUBBQUJCPUFAQEZEQ0lBPUI/
-Q0NEQ0JCREJFREZERUI/PkVFRUI7Pz1JSktGR0pIR0RBQkRKR0hIS05WTUlMRkZH
-RkZJSURGQ0VHS0tLSUVISktHSklDRUhIR0pEQUdGSEdIREZFQkRIQUFFQkRCQT9B
-Q0ZCP0FARERCRUdKRkRER0ZFQ0A/QUE9PDxFREVERUdIRERBRUxIQ0I8Oz0/Qz9A
-P0hKR0VEQD9DQEZAQ0U+P0BKSExHRURDRUZCRUlGRkVGREVGTUlFQUNFRT1EQUVH
-REBDPkVHR0BCRUZFQ0ZHSEpCP0FETENBQkI+P0ZFSEJDQ0VISEM+Oz8+QkRERkRF
-R0ZDSEZESkRBQ0BDR0tMSUhGQkdTa456VUhGTEhLS0VIRkdJSlNpjrOpmGROTUtO
-Tk5IREZJS0dEREdFREZFTUhEREVHSURHSUdISEdHSk1OT0tIS05MTk1FR0ZKU09M
-QkVESUxLRz9AQkdKSkhFSEpJSEBEQkdFSUlJRUFDQz0+QkM9QUtNRUpRTEhJSUdB
-REJCQ0hEQkM/QT08QUFCRD49RUpGPD9DRT5FRkdCQktIR0VGS0hCQUVESEVHTEhJ
-SkpGS0tUU0xHRk5OTEpOVFRQTFhRU1ZXV11UTk9LVVFRVFlTU1hXVE1SVlJMTlBV
-U05VU1JQT05NUlZXXVtXVFlZWVxbVVVZW1ddXVVQWVlPS09WYF1bXVlYWFRZYV9d
-VmBbX2FjWmBfX2ZjXV1jY11iXF5bXl5kX2ZcYWFfXlheXlVUXllhUUpQjMPS2uDj
-5ujq7Ovt7mlkZWllb2hgXmJnY1lXWFtcV1pfYFxZXl9bWlxZWlNQVFVXWlxiYGBf
-U1daVVhRXFdhX1RZVFNUWFVTVFJTVFVOVFRYWVVZWVJSUU1LSlJRUlJTT09OSEdI
-RD47PkNKRkFFSkdKVExMSElERENERUVEQEFHRklLSUhLSEBCQ0RIS01KSERDRERE
-SklNUEtIRjhBR0ZFRkNHTkxDQEBFRUdERUhKQ0dGRj9ARUFCSERHQUJARkQ/Q0dL
-RkZIQ0tMTkVAQk1LR0hCQURISkNIR0ZBRUdJSUtMSE1OTUpKSUxISkVITUdLTExF
-Q0dKS0tLSUJISExHQkVCQENDRUVFRUdFSkZHSUlERklHR0ZHSFBRTUpKTEdIS1BP
-TklLRkRERUZGREVLS0RGSEhHRkxIR09ORkRCQkNHSkZMREBBTUpHQUlNSUlFQEVK
-R0hHRkVGSUJDP0ZHSEpLTEhFRENIP0NBPz9ISUhGSUlDRUhMR0RDR0pCRkpJS0lD
-SUtGSko/PkhIRkRITktMTEtMTU5JS0ZERkNHSUQ8QUZIQ0ZEQkNIQEREPkNKTEZI
-RUNDQ0FCQEJJRkY+QT46REtJQz1BSUpIR0hGSElHRUZHRkNFRUVHR0ZDQkJAQUZC
-RUlDQkNCRkNCQUJBQUFCRD89RkVESEdFS0ZHPz9ERERBQkRIR0ZGR0NFQjlDQz46
-QkFBPj5DQUI8PT5APD5HSURFQkJFRUI/REZEQ0VDQkFGQURHQENCRkhGR0RDQD06
-PD07RENAQUNFQzo8P0FBRERDREJFQ0BCQ0JIRkFFQkU/P0VEQz5CQkNHQkFDSEZK
-SkU/QUNHSkhHSkRBQ0hNS0xKSURDSEhJSVNXU0pJRUZFSEVGRkdGSEZERENBR0xJ
-R0dGSkRDRUdISURERkNISkdKSUVEPkFBPD4/PkRDRERESEVIQkJEREVIQUM9QUVB
-QERCQD9BRUJDRUZGSEtHQkZGQkVFREpERUJGREhEREU/QEdFQ0JISEpLSEtLSEdG
-R0JESkRCSEdDSkZBQ0dJRkZCS0pDQT08R0ZDSEhFRERIRERGRkpLRD9FSEZDSENH
-SklGSUQ/RUhBRD1CRD9AQUREREQ/QkRBQUBDSEdGSEdHSURDQkhITE5ISV5WZVtL
-SVBJRkhMS05HTk1ZdIaKnZZ7Uk5PTlFPUUlFS0hGRkdJSkxNSU5OUU1IREJDQURG
-RElER0hES01OSUhKSkhISE9IS1JLR0hKQz1HSEZIPkc+P0RJS0dJSUVGR0JCR0ZI
-SElJTEZDQT09P0FEQ0VMS0tLTlBGQkJCQ0VHRENBQ0NCRT46O0NAQUFAS0dJR0ZI
-R0ZHSEtJTktHQkVJUU1HQkNNTUlOT0tLR0tJSlNQUVRQTE9OUU5PVlVRVFZSVVJU
-WFNTVVBRUVpZT09QVlRWTlRTU0lLTVJKUEZOVk5RV1RQU1JWWFlVVWFeU1RfWlBX
-YmRbWFRSU1tgW2JcW2BbW1pWU1pXWl1gWltcW15gXGNjWV1iZGNhXWNdVlddZGBi
-YmNgY2RfYFxjYVNcV1lOSE+EwNHa4ebm6urs7e3tWmBpa2RiZmhiYGBgXF9eXl5l
-ZlpaV1ZXXFlcWVVUUFRaWmFdYWNiW2BgXVhXWFheYVNVU1VSVlhSUlZfWlZWWl5W
-UVNQYVtaWU9MTUxLSk5MUk5NTEZKR0dHQEREQj9FSklHQkFHQktISUJCRUVIRUxL
-REZFQkFBRk5HT0pNT0pHT05GQ0NCQkhJTE5TSkdHSEZKT0lLSUlKSEFEP0I9QkdF
-SERDRUdGR0VHSENFSkpBQkE/QERBSEdJQUdFQURERkRITExNSUlFRERHRUVJSUZJ
-S0dGRUdOTEtITExKSExJU1FOTU5KSFJJQ0ZES05OU0hHREJBREBERUxEQUZESURH
-R0ZHS0hFRkVHRkVHSkZIRElPS0lJSUhISUpMSUdIRkdHSktJR0dHRUtISkhHRkhG
-Rk9MR0NDSERCP0NDSUhDQkVDSEZHSktIS0pMTUZERUZFQ0JHREdLSEhJQD0+QkVJ
-Q0JGRERJSEdOSUpMRkhGQkVLQkdKSUZJRUVDRUNDRkVFQkdMSEdJS0xNTUxES0lJ
-S0dDRkNARUhKS0lFQkNIRkZAREZHSkNAR0pFQkRDRkRDRjw8REFISERJRENFREdD
-QkpPSkVFR0NESkRBR0lHRkVFQkVERkVFRkRCRkVDRUpHRz5BQkJBRUdOSUJGQ0JF
-SEM9ODw8QEVER0lFR0RGRkRCQ0I+O0FARD5BQT4+O0FCQ0VDQkFCQEJFR0VDREZF
-RENHRD9ARkFDQ0dGRD49P0JAREFCOzo7Oj1CPTw9PT07OT0/Q0VDQEZJQ0VERkdE
-SEVBQEJDSEdEPztDQkZCQENDSEhGRkJDREBNSkZITEtGR0dKRk1PS0tCQEM/REVJ
-SEpJTEpIQ0ZERENKS0VDREVHRz1BQkhJT0lMR0NIS0tJRkNGQ0pJRUJCREREQ0FE
-P0JGRkNDRUVFQkJJR0NERURFRURFRkZJSEVAPkNCQ0RFRkVGQURCREVCQURJRkZG
-RUNGQkVKSkhJRUZDSk1LSUlIQkNIS0VEQkRCRUxGSEVESUJBSERIRUZGQUQ6PUFF
-REVHRUZGS0hIQ0ZKSENFRkREP0VGSEpNSUBBQ0NCQz0/PUFDRUNBRkZCSERDRkVG
-SEhJQkFGSEdHSUVERkVIRUtMdGxjTU1LSE5JTk1NUVZVWnKNipqLf2FSTUtJTVBR
-T0pISkhLSk5NTE9PSklNR0dIREhGQkBFRkRFRkVDREhIS0lHSUpITkhNUU9MRUlJ
-RUlJTEZLUExKSUZKR0lISExNTkZFRUZMSkhIR0A+Oz0/R0hESkpMTk1VVEdFRUJC
-REY/OkE/QEJBQD06P0BHRURBQ0hIRExRR0dDQklLTktISUVJS0pOSUtLTFBLTkxM
-TUhJUExMSU9VVVJUU05YWVVPWVtYU09RUkxQTlZSYFhPVllWVVFVUlJRUlJNUlRT
-UU5JR09ST1RbWVJaU01UW1tWV1dYV1RbWl1YWFNVWl5aWl5gV1xaUVVXWmJYYGBd
-X2NhXl5XXl9baGNjWF1bX2BhYmBcYWBiZWBpaVxiXmJeXF1hYk9OVIbB0drg5efp
-6uzt7u5ZZ25nZ19daGdraGNdZV5baGZiZFtbVVpfW19eW1ZQYF9cXmJeW19dWVZc
-WVpXWVlTUE9XVFJYVVNYWVpYV01SUE5MTlJOVlNYU01QR0tLTE1HSktKSElDSUVE
-QkNCSEhFS01JTERHTkxIRUFGQ0RDRkZHRUNCRkNGRkhNSUtJS0ZPTEpIRkJBRkRF
-SEdFSk1GSEVESEVDQ0RAQ0dDREVFQ0VFQENDREtIRkdETEhLUkpFQ0M9QUZJSEZH
-SEhFSkhHQklKSUhIS0tIQkZEQUxMS0hMT0hCSUdFRkpHTElTS0xOSUpOTUdGS1ZI
-Qk1JT1FQTUhLS0Y5P0RCQkhMTU1NSkdMSkdJSkpGR0JBREhJSUxJSUhGQ0hJSUhK
-SkxMTUlFRkpHR01HRU1KTk9SSkpNSEZKSUlERkpKRUM/RUZGQkZHSkpJTUpVUUpM
-RkhJR0hIR0NLQ0VDRklOSkNDRD4+P0VHRURJSUpHSktITE1IQ0FKS0hKRkxJRUNF
-RkFGREhHR0VISkpMREdLS1JNSUVHRElKSEVIQ0JFRkdHTEdJSEhGREVGRkpGRT9G
-SEdIRkhLSUhGSkJDQEZFQ0FDRkNARUQ+RkxGSkRFSEhFREVEQUlARkZDQkpGRkFG
-RkZCRUNARD9FQkJDRk1JR0lFQT9CSURERENBPUJCPkNCP0RBQ0FER0tJST9DSkQ9
-ODxAPkFCRj5DRkRDRERDREJHSENDREVDQUVCQkJBPj5BREM/QUFEQkNDQ0I9Ozs7
-QENKR0Y+REtFQEZCRkRDSEdFSUVER0ZFREA8QUZIRkVCQTs8PERDR0tJRkVGSkdJ
-SVBKSUpJRUdHSUlGSUpLS0dCQUZJTUlKSkVLS0lKSkVEQ0RHSkhFRElLTEVDSUhJ
-SUdEQ0RGRkxDR0lIRUZKRkFBQUA+QEVIRERCSUtDRENHR0lDRUVGQkc8QUdERkZB
-QD8/O0FFSkVDR0Q/R0E9QkA9QERGS0lKSkFARkZDRkRDSUlKS0dDRktJRUNHSUdG
-SUpGSEhGSkVBPEJJRkBHQ0ZDPz89QUxFSEdKSkpLTEVDQURKS0lHRkpGRUhFRD9F
-RkJHRkBCPj06PDs/RUVGSUhKTUpDQUZCRUlKR0VHQ0JDRkVIUEdOSkhlaFNOTk5L
-Uk9LS1RrbnFsbnt+hXpzXFJMTUxLT01NRkhQTktJUE5NTEtIVE1HSktKT01ORkVM
-P0JJSkZGR0tMTE1OSkhIS01RTEhGSk1LTkxQUExJSUlLTEdGQz5CSUZHSkdHSUlJ
-SEZEQkFAPD1FRUZGS0tRTk5LRkdHQT5EQkA6OT4+PDs9Q0Q/RE1EQERISEdGRUVK
-SUFIR0lLQ0VHRUdRT0lPSkxSTFFWT09OTlFMS05PUVBWVVhYXVtXVVBRVVVWV1ZR
-W1ZSVFtVUlBMTlNYVVNNTE1WVldWVllXUlFUUU1PWVRRUFFPVF5WW1VZXmBfWFdY
-UlRbXV5gX11XVVdUW1tZVFhYW1xbXF5iYl9cV2BgYFpcWl5ZXVlgZGNhXF1nZGpo
-amBaWV1cVlpiWWBbU1ZYgsPR2uHk5+nr7ezt7lxjYWJbWF1hZ2ZkW11obF1cX2Bg
-YWFhY2NeWl1jXVhXWmBfY1JQVllaYmFSWldhX1ZWVVVRUltYTk9UU1VXXlFSV1xW
-T0pMTFJaTVBQTk1JS0pGQ0hGRUdIQUNCRERFRERDRUVHRUlIRkpLRkQ+Q0RCQENC
-Q0NFSkhMTUxJS1VRTUxHRkRFSkhJRkZMTEtJRUpMRUVEQEBEQEBGSUlHRklKSTpB
-QjpDRkZCQUpKSUlLQURFQ0hFRkpJRUFETUxLQkZETU5LRUlSU05GQUZGSUtFRkhE
-TkxGR0ZERUZBREpJRkpITFBQUkpMTkZISkhLTkhLTUdGRkNGSE1NSkVLT01LS0hI
-RUdHSktFREJDRkdER0lIQ0hISUtKRUhHRUtHR0dLTEtJS0pFSUlFSkxJSk1KSkRF
-RUZDREZJR0RGSkVHTUVMTU9JR0ZES0ZFP0FFQj5FRkdGREVFTEpISEI+OjxBP0VJ
-SEVCRUVDS0RDR0pGQ0ZNSk1MSUtHQ0U/QUVKR0ZJRkZLTUZGUEpISkk/QkRCQz9H
-RkZERUVBREZJS0RGTEhITk1QUEdGR0tJRkpKSUpIRUZHRkVJR0lFQkpFSUZFQ0JH
-R01HREBKRkdCQkhIR0VGRUNEQkFGRkZCRUhHRkA9SUVAREhCRURER0ZGRkREQUJB
-PT1CQkVFREZERUZFR0dFREZJQ0RBQjo8PUFEQkJEQUNISUVGRklGQT8+Q0ZHRkZD
-QUNCQ0NEQkJFQENAQkFBPkNHP0JEQ0BFQ0VCRUhAR05JREdLR0hIRkhIR0hKSUM9
-PDw9QUNGRD46QT4+PkNDRkVFRERDRkhLQkdJSURKSUVFS0dIS0pLSUNESEhMTUtH
-QENIR0pFRUVGTEVGS0dIR0pHS0BAQ0pISEhFQktORERISUVISkZBREJHQUVISEVI
-RUVGSkRDRkZIRkNERENKSUFEQ0RDRUE+SkE/RUJFSEhDREVKSU5GQkZESlJOTUxE
-Sk1LR0pJRURHTUlNSU1JS0lHQEFITEdFSktISEpISkZDQUZGRUpNS0dFRUZCREZK
-SUxMTEtDQkJBSEtPTEtHQkROUEdGR0ZBPz1CQ0NAQENCQUA8P0RGRUVER0hHRkVL
-S0lDRkdHR0ZIRkdKTFBLS09VTFFRUUtKSkxfk3tpXWZvZGdke21PUU5NTUtMSUhI
-S0xRUExJT1NMSFBOR0VJSk1PTExOSEdPSUVKUUpKSEpKS01ESUhJSU9MSUtLSlFQ
-SktOT05KSk1OR0lKRkdMSkhHR0NFRUdHRUdFQT1CQUZERUdJRkhKSUdGQT5BPTw8
-Ozw8QURCPUNCRUJCRkNFRkVFR0hGRUVJSkZFSExOTUpOTUtMTFBPUEpMTlFNVFBO
-U1NLTlNRVFVYVFlYVlpQTlVXV1VTUFRXWltZV1BRUlJfWFVUUlhVVFRaVVRXW1dW
-V1NSUUtTVVNRTlJWVVdaYmJfWF1fW2BhZ2RhY1xaX2FeWVhhX1xWWFhgZGJiYmJg
-XmNgWWNmWVVXWlxkXGFjYF1dYGNdXl1hXVhZXl1nY2JaXWFbUlOFw9HZ4OTm6evs
-7O3taF5iWFFXYWFoZWddUFdhZ21uamVhYFpjXmFgYF9ZU1dWW2JbVlRQWldXWldS
-UGNfXWRVVlZaU1RgU1VfVVxeWFtcXVpWUVFPTVBQUVBMSkpJSEpFRkhFQUk9RUZM
-RUVCRUNKSEZJREpITElHSkJFQENAQkdCSEZGS0tHQ0FHTU1LTk5MR0lIUFFOSlJN
-UU5GQkxKSkhAQUhGRExJQkNNSUNDQzw+REVGRUpIQkZEREVGQUVDRUZBQkNDRUdJ
-S1JKSEtJSUhRT09MTEtKRkBFSklESE5OS0lFRkVDQkVGQkZLS1BOUlBPTEdHTVBL
-TU9MSUhIRUVARk1PTEtOTU1MR0xNS0ZISEpMQkFDS0xIRERHREZBQ0hJRklLR01F
-Rk1MS0hKTExMT0tGS09HRENGSEhOSEVER0ZIREZLT0hHRkREQERHTk9KTEdDRkhK
-RkpKRUNITUhHRUdFSUdEQ0VAQ0A+Q0NGRENEQ0dOS0VPTkhHUUhKSkpLS0pDQj5A
-Q0NFQ0lOTkdJR0lLSlBHR0A+PkFEQkZER0hGQDtCQUNITkZFSUdKSEdITEZESkpL
-SERGSENQS0RAS0pKTUtFRUtGSkdHTkdCSEhGRUVHQ0FBSEtIRkhKRkRFSUdIREZG
-S0hMR0hJR0RFSExJRkVDRklHSENIRkdHRkNHSElGR0lGSEtGSEVIQ0FDRUVCSEtE
-REA9PkNERENJSkhFQklLSEVAQkBCRUE+RDxAQjs8PEFAQUE9PkBAQz5CRkFCRktH
-QENBQkZFR0dDQkNEQkVHQ0dHRkhHQkFAQENDREFERT4+PEJDRERERUY/QkZLUE9H
-QUBISURISElIR0VLS0pJSUdMR0hKRkZHS0dIRkVGSEdGSElGSEpFSUtFRT9BQkNJ
-R0hITEpOT0pKRkJDRUdKRkNGQkREREZFR0E9QEVIRUpGQ0RGQ0VIR0hFQ0JCSEVA
-Q0M/Q0NGSkdHSklLT0tIRElKTEpJTUxGR0hJREVHS0pFSElJTEdHSEJCREpPTEpE
-QkNFRkZGSExNS0hFQ0ZETE5IQkZHRkhHR0pHSUlMR01IR01KRkxLUk5LSEVLRUg+
-QERHRT5BREFESUpDREZBRU5HS0lERE1PTE1LR0hISUtFSktIRUVFS0RHQ0dKSU1G
-SFh5bGlkaGRcX11hV1BMSURHSktPTlFRUFFPTUtLR0ZGSUtLSUhKR0dMUE1KRUpF
-SVBOTkZIRUxPSU1KTUhESkpMUUxJSUhFRk5PS0ZKTU9KSUpQTUxGTEpJTkRFQ0NB
-RUFBQkFAREVMS0lNSUdBOzw6QURHQjo9P0NEQUBDQ0BCRUA/QUJHREpGSklJQkNC
-SUZHR0dKS09QS0lKTFBTUVFLTk5MUVFUUlFOUlpVSlFVUFRWWU9QVVRXVlJWUVZe
-WlhXVVpXUVZZUVNSUFNVWFRbWV1VVldXUVZQTFVXVWFdWltcYmBiXlpSWFddYV1Y
-WFlgXltfYVlgWVxhYWFaY11dXWNeXF9bXGRbX1lcV1VWY2JgZWZeW1pYXFlbW2Jf
-YVpTWV9aYWZgY1ZZToHB0Nng5ebp6+zs7e1bWFVWW1pXZFxqXmJbUF5hY2llaWRh
-X19YYGdVT1ZdWFxQW1lYWldSWFRSU1ZgVlpkY2leW1tVVlZXVWBeXlhaVlBYWFdW
-U1xcU1FTUEZHQUVBRUlHSENDQ0VHRUpEREVJSEpJRUdORkdJRkVGRUVMSEJER0dI
-RUZGT0lBTFFMTEhNVFRUSEpQTUhNUFBQUEhDSUpJSklBQkhBRUZJRkhCQ0VGRkJG
-REdFRkY/QUlFR0pGRkhFSEZORkhEREdKSUFBREVGUU9QUElKS0VFSUlHS0pMTU9I
-TEVIS0tJTlFPV05KT0xOT1BRSUxHRkpOTktFSU9ITkRDR0pMUFhSSkhHTVBQSUhG
-R0tEQkJKRkVNTEpNR0ZDQ0tNSkZNTEhLS0lGR0hGR0VGSUFLSUlHQ0pLSFFWTk9I
-RkpKSk5PT0pGTEhCRUlPTUpITkdMRUZLT0lGRlFMS0lMUEhFSEdIRkVDPkNDR0RD
-QURFSUxNSEZMS0dFRkFGRUVHSUdER0pFSEVFR0xHSENHR0tLTEhFQkJBQUVHRkdJ
-REA8RUdDRUVHSUNAQUpLRkdHRkRDR0VHRTw8QEdGQkZHRUdKTElHSUZESkdKTklB
-Q0dGSEpIQEBGSERDQz9BRUZFSktIREdGR0tIRD8/QkRCR0dIRUVFQkZGRkZHQklF
-QUVHR0hDQUZKS0xIRkZGR0NAQD0+QUVJQz1CQkI9REpDPj1BQ0VHSEQ+P0RDQ0NA
-P0BAPjtGQEA+QUBEQUFESEJFRkRNR0hHRExKRkdAPDtAQUFBPUJFQUZFQ0RJSkxI
-SEhBQkNIREJAREVCPj89Q0hDQkdHSUdLSUxJSUlLTktKSkhLUFFPTUxNS0lLSE9I
-Q0RGSUhHSERGSUhGRkRKRkNEQz1FRklIRkdGQ0ZHS0xLRkZJSkZLUkVGQ0hFQkdE
-RkFCQkNDPT1HSkdGPkRGSEdIRUdHRkVEQkJFQkZIS05MTUlGSU5NSUlFSEdJUkdJ
-SElHQ0pMSUtOSkhMTEpFRUdETkpJSEpQS0dBSUxMUU9IRkZGREpGT0lHRUZGRERF
-RUdMTlNORkdISUxOUlJQS0tOR0lBP0ZCSEVFPjo+RUVLS0pDP0NNTVJOSkZESUhN
-VE5MSEpGSElKR0VEQ0NHRUdJTU9PTkxJTExOW11aVVlbTExLTVBNTVRMSE9MTVFN
-TExLSENGTFJOTE1OSklMS01MRkdJRUdGSU9LTUlNSUlITVBOTEdJS1FQRUdKSExI
-S0tPTU1LSElIRkpGSEpLTUpIQ0NEPzs9RUZCQ0VDQkZKSURFQ0NCQEBGQkRDQzpE
-Q0JAOzo7Oz8+QUZDRUdLRUFJTUlHQ0lDR09JSU5KSkpKSUlOUE1RUktLVFJaVFVT
-V1RPWl1aVVpWUVZTVFNTVVNRUFdfYF5cUVZTUk9QVVJYWlJPUVJUVVZVWFpTVE9L
-TkxSVltUWFteXFxiYWJcV1VZYFpbYVdVWF9bXF5dWmBeXVxeWmVoY1dQW1tdYldV
-XFtdWWBgV1xgYWNjXlddYWNfYVxgYGVmX1hZVFhbXl1gYlVQhsLR2eHk5ujr6+zs
-7VhWYFZjY1xcal9sYWdlXGRbWl1oaV9hXVtbXk9QU2BXTlJXXV5bW1dSUlRRUF5Z
-U15gWl5fW19aUFhZWmRjX19XVlBSUlVVUVFOUUlPRUVLSk5GRklEPz9CR01MSENH
-S09PTkxFTEpEREZDREJFQ0NEQ0VDRkZMSktHTlpaTUxPS09OUlVMT01MS1BYUUdM
-UVFLTkpLSEhSTEpLSlNLS0dJR0RDRUxIRkdHRkdARUZISUZDQkVEQ0REQj5ARUZG
-Qz5DREJOSkxJQkJFRkdOS0NGTElNTE5LSUlOVFNLS01UUlRST01MTVhXVk9JSUxO
-S01IS09LQ0NJSk1QT05NSUZHSkpKSUNIS0VIQ0hHT0xMU0xHRElHSElHS0hGS0tI
-REZDR0xOUUlHQkZDSFBNSURHRkNHRUlMSUdLTE5NUU9KS01JSElITU9RS0lLSU1I
-R0hJTEdGRktJSE5MTEZFQ0dKSEpHTEVEQ0pJTVFOTU5MQz9HRUNDRENKTExJQkNA
-REdHR0ZFSURDSURISklIQklIREZNSENER0NCQUhISkVDSExSTUZHSEhBRURDSEVD
-QztARktER0Y+Rk1QS0ZMTUZKR0hIRENHRkpJT1NJRUVDSEY/Oj5CREdIREhISEhF
-R0FGQ0ZHRkZJSUlMSUhHREVFQkRFRk1NQkJCQUVJSUdFRkZJRkVEQUhIQTxES0hG
-QUJKRkRBQ0FCP0RAREhHREdIR0ZIR0dLRkRGSkpHRkJDRENAPkNDSEZDSEZIR0RD
-RkdEREFBREVGQ0RDQUVMR0RISkpKRURGRkRERUhIQkZMQz9EQEBFSUtGQ0JESElL
-S0xHTVJOSElMS0xLSU1NSUlMTktNRUBBSk9JSEpJRUxQTE1PTElFSUdHR0pPSktI
-R0dDSVJPR0dGSUdERURFRktGR0RKSUVHQj87QEhAPTxAR0lMREtJRERFREhMRkNB
-RUNBRURHSkZGRUZKS0tJR0RFRk9MTUZJSUdNTE1IR05GS0xHSUdFRkpJTE5OUE1K
-R0tMSkdJUE1HS0lJS1FNTkpISEpHSkxKTUxRUU9EQkZISEtQT0pITlFLRkVHS01J
-R0M3Oj5CRklMS0dHSUtNT05KQT5ETFBQTUZFSUlKSkVBRUdDRERGSEpLT05MUlBP
-U01NWFdYV1ZWTE1PT0hUUk5PUE5SVkxLUE5FREZLTk1GTktHSEhOSkZJRkhNTVFO
-S0tGSk5PUUtNUFBNTk5MTVJRTk5MTExMTEpOSklHTEtLSkhNT1NQTkhHQz5APTtE
-Q0NAQkRJS0lLSk1NRUFBRkdJRkJDQkVFRkREQDs0NzxFSEhIRktJSUtCRUZGRElL
-SUtJSElFQTxHVVNOTlJKUElPTlVWTllXU1thXFlbU1RTVFNVVFJTU1FRVF1dWVVO
-U1dVVU1KT1lSVFZVUlVdWFlUU1ZbX1ZOUWFcX19eYV9iXVxiX2RcVFteXF9cZl1a
-XmFfWFZWV1peYGNgXmJgWVNYXVpZVF5eYVVNWVpdYl1cYGBfWmBZWVxgY2JkZ2Nf
-WF1VXVdUV01SV1mFwNDa4OTn6uvs7O7tWVlaWVdZWlphZGZlY2FcXGFgYV5lYFVd
-Xl5UTE1OXVdRVVZTU1pYWVpZWVdUW1tZYmNiW1VTVlRcVllbWF9cWl9YVVBPV1dU
-UldQTE5PSU1HQkRIRURFREFGQUhITEpKRk1NS0lIRUhCREJBQUBAREREQz9DQk1M
-SUxXVlJXUEtNUUpOUExPTkxPTVFSTlRMVEtMTU1KSkxSTk1JSlRJSkpIQ0VHSEpK
-SkdGRUNFP0dERkdISExKSEZERkJMRkVAR0hGRERISkdFRUZJREZRUU1LRUVHTUhF
-T0xLSk9NS0dKTFJQR0hJTlBJTVFMU05QTE1QTU5JQklHTU5NTUlMVE9OR0lMTkhI
-S0RPS01TU1NQTU5HR05HSk5PSU1LSUJERkxJSk5HRENLS0pKTEhGR0dITEhES0pL
-TEpLSk1GSkpJRkVHSEpNTEpTS0pKSEpKR0pJREpJQUdJRkVFRERGTVNQR0NIRktP
-TEpKT0xRU09RTUZFSEtNS0xMSklCREdNSUNDR0ZERERDSkFDRkJER0pMRkRETVJM
-TExPTUZFSENGRkhIREFESkZFQ0VJSkNIQkBMTVNJTEZFSEdNSklHS0ZJREBEQ0tK
-SEpKSEhFRERDQkJDQ0dHRERIREZFSEtIRUVDRUVJTU1MR0pHRkdHQkRISEdHSkdI
-SEBISUtGQURHR0ZHREhERklKTkZFQkFCPUNGQ0ZER0NEQT49QUhMR0tLS0NGR0VJ
-SUlKREJEQ0VGQ0JCRURHRklJTERDRUZKREM+QURFSEpMRkRERUhGR0dPSEhLR0lF
-RkpJSEhHRERDQUNKR0hHSU5IQ0xMSEhNREVKS01JRUVGSExLSktJS0xMTk5JRUlE
-TElFSUZJS05OUk1IR0VJSURESUVHRUdGREZJRkZCREhNTUZGSEpERkhGRUlCRUZN
-RUZEQkE+OkZGRU5MSElGRklGSEhLTUlIR0hLRkVGTEpER1FQRkVFQkBFSElGREdI
-RkVHSEtMTlFRTExFR05JSk1LS0pPUVNPUkpFSEtMREpLUkpJSk1LRkZISUM8R0lH
-SE9MS0lKSEFCTEZISElLT0lJS0pGQkVHQUJAPUJERkZLTkhFSElJUExNTk9OTkhH
-SURER0RFQ0VEREVHTEpOTU9OTk1ITUtGTE1TVFRTVFRQVlBOU1BPUVFRUk5PTklL
-TFVLSEtJRUNCR0pLTk1OTktOTVBMT1FJSklNRk1PS01OSExKSUtRUVJSVUpJTUpL
-TEpLTk5NS0tKSU5OUlROSkpKRDo5Oz0/QEVIR0lMSkhLTUpKQ0VHRUFFRENDQkdL
-RkM+PD08QEJJRUtJR0hFSUpFSktKR0ZGRkdETEpKUlNPTUtEQkVCQk5VUlJWWE5T
-WVRYVlVVWFdVUlRbVVtTTFNaXlNVTU1OV1pPT1tTVFVYU05SWVhZVk1QVl1iYFlZ
-XV9gW1xbX1diY2NgXFhjYllZYmNgXFxeY1hYY2FiXlpgXmRdXlpdX2JeXlFVV2dw
-YlZbUl1lXF5gYmBcX11YVmFpZWJdXVdRWWFgWFVVXlNTTYjB0dng5Ojq6+zs7e1X
-WlhcXVVcYlxeXl9hVVdaXVteX2FdVFJWWlhbVl5cVldOUFdeWlRWYVhgV1heVVte
-X2VcWVNYYV5aWVZUWVZYWVxaV1ZZWlZUU09OSEdGR0hGR0NGSUtFQ0NESEtLRkdJ
-S0hISkhMSkNGREU/QkNMR0lNQ0FCRU1LTU9PS0tQTUlNS01PVU1MR0tKUk1OWFVP
-SUhFR0pKU09QTk5NTEtISU5OT0xKS0pMTEdCQ0RGR0VHT05IR0ZBRkpKQkRFQ0RK
-REZLR0dJSEVNTEdMSk1LSUtLRUVGS01NSElNRklIS1BOUE5XTUpGRkxNSEZBSVBK
-SUpMU09GSU5GRUtFSEdQTk9TS0pMSUpITEpKTE9MT09QSkVFTUpMSk5PTkpLTlBM
-SU1KSU9CUFBMR0pQS0pMUFBKRkVFSU1QSEVGSEpKSk1MTE1IS0pHSUpIS01ERUlJ
-S0Y9QkpFQ0dFSEZESUhJUFBKSUdJSUxMT0tMS0tJSkZOTUxQT05HREpJREhGSEpF
-SUpCRERHQ0BCRUJARUVKTExISEpJSUlKTElQRENFREVGSElDQkJFSU1HSk5MTEpJ
-S1JRT01NS0dAREdGTEdFR0hERkRGREhGTUhJSktEP0JDREZMSEREQ0tJSU1KQkRJ
-QkhKTE1PS0dJRkNFSUNEQ0NIRk1OTU1KR0dJQUpER0RIR0I7PkRKSklMRkFESERE
-QkJGRERCRkRDSkNHREVGRkVFSURBQENESEJBPjxDP0NDQUBBREFHQ0RFR0VNTUM+
-QT9DRERESEZHSkZLTUpLS0xLR0lJRUZGSURHQUA/R0JHUEtIRUVJR0hJR0dGR0ZD
-RkpJS0RETUxPTUpFRkVDQUZHSU1RSlBLRk1MR0pHS0xESUpKTEVCQEJFRUZJSElI
-SkpKRUZDS0tMSU1GSERITE1IRklJSUtHR0pFRT9FRkZGRUpMSkhHR0dIR0pOTkxN
-R0dHQ0NGR0hKSkxFRT9ER0RITE5PTkpGSU1QTklLU05NTk9QTktHSUlMVVFOTlRS
-TlJMSURFRUpOTkhITElGRUVGQ0NESERGSUFFTUpIT0pHRkRHREtPSEhLUExNTk9K
-SEZGSk9NVkxMUEdFRklNUFBOTVBLTEhIR0pKTENGRkpHSEpFSEtQTFFMS0pMSktR
-T1RQUU5TT1RRTEpKTVVZVk9PUU9STExOTlFOSEpEQkhLTUtRUE1LTUpNS0tPUE5N
-T01QUEtIS0hMVE1PWFVSUldQUE5KSkxKR0lJRkpMSkdJTE5ISUtMTElEQ0hFQ0BE
-RUtMTk5LSk1PU0xIR0hERUhJSD5ERURFS0lEREdIUUxIS09KR0BARkpHSUVIR0pF
-RUlHTVBOT09STkRERUZKSFBQSVFVWVpdYF9bWVFQVFZXVVpZWF1WVl1WV1dVUVNN
-SElRWFZYU1VYU1deWV1fWVZVVFtaXmZgX15eYVdYW2BdXVhYXFpja2JeZ2VcWV1e
-YGBYWlZRWV9hXWFeYF9fXV9lW1tjZWRtYl5hYV1aW11bYmJjX1ZbYGNlY2dhWmZe
-YV5gXFVUVlFVicHR2+Hk6Orr7evu7llgXlxbX1ZYVFJYW11bX1xeXFZcVllbW1lZ
-Y2liYmBaWlNSVlxdV1xaXFhaWVVaVlhUXF1dXV9XWFlhY1xMUFddVldVWFNUW1RK
-TExLSUNCRUhHRE1IRkVDR0hNTEVGRklIRUJHSUlHREJKS0VDSUdGQz9IS0lLTktF
-TU1KSlFTUVNOU1JLTElJTkhNTVJOUFRTUk1JTE5UVVJUV01KTk9SVU5PVk9KS0lF
-SEdDTkhOS0dJSUZISE1LTE5JRUhJSEVKSEtHSUlLUlBOTVNPTk9PTU9IUk1MTk1P
-TUlFSE1JSk1QUFNERkpKSUdQTVNRSklQTVBOS0tLTUhNTUNDQ0lJTUpQRkhKR0JH
-Sk5MTU5LTUxKRUBISUpJT09MUk9VVVBRS0pGTklLUUpKT0tKTlBQUlJLTE1MS0pE
-Q0VGSEdLUU9LTlJITExPTEtNSUtGQ0ZNREFGSUpCRUVMTEtOR0ZLTk9NT0hJR0hF
-S0xKSklHRkVEQ0VMTkxHSEhJSkhKRUpMSEtJQkA+PkdIRUVAQUJHRkZJRUVCSkdG
-S1FRS0RLREpKS0pFREVBR0hCQkxKSUdSTVBUUkpISUlJR0xDSUhISUlFSEdJR1BH
-QklOSEFAQkxFRExKSElMRkdLSkhISEpJR0pMSUtRTUlLTUhHSUVAQkdHR0hIR0VK
-TUtJR0hFPj5AQENGQUFGREhGRkZBRUNFTElESEhHRlFJUE1CQ0pLR0dJSEhFQUpH
-R0JAPkFBQkZHQkBDRUZGRUhQTkhJS0FHRUhIRURGRkVFSklOTE1PTExLRk1DRUBC
-SUhJSVJCR0pNSkhMVk1DS0xIQkZISUNGQEZIRktNTkdJS1NKR0VPUUpLTE5TUk5L
-Uk9OSk1JTEZHS01IQERHQkNFR09LRkVCRUZOTUxJSEhMSktMQkdHR0VISEZHSEdJ
-SUpHQkRHTEZIS0dHR0lKSkVISExMSUlFQUVIR0FHR0hMTEpHSUxGRkdFSElMTUZG
-RkFJSk5ORUlGSEVJSUhJSE5LU01ITExOTFJLRUJISk9MSklFSUhIRkRDREROTk5M
-S0ZLTExISUdGQ0dLS0dLTElKS01RUU5GQ0ZKS1FRT0pNREJIS0pKSExMS01LSUZJ
-S0hJTU1LTEZGSUZMTlJSTkpLSU1LUFNTUEZCRExQUElLSktUUVlWVlBUUk1PS0xQ
-TVJISUpNTUtMTFJPTUZFSUtGR01OT1JSUFNSTEtITExSUlZRTU1SUE1MT0pMTk5N
-S0hJS1BVTUdJSUhMSUxLSUtJR0RJRkdJSExOTUdPUExMSkZFR0hDQkRGQj9CRUNE
-SEZCRUhHSERKT0lLTU9JR0hGSUhHRkpBQUNHS01LSU9NSktPTVJPVlBST1FYV1ZT
-UFZVUEtQVFVYWlNbWFdbVlhYU09OVUxMUE1NU1RZX1BSU1lcXlxVWllfW1laa2Fg
-YF5eW11WWVdXYWNeV1lhY2RlZGZmZWFaVmJeVlZbWlhdXFxhYWJhXmdiW11dYV5e
-YGFeYWRmXWZmYmdjXF9fYWZmamdoZmJfZGFlW1dUTWKHv9Hb4OXn6ers7e7uVVph
-ZWlmWVBWW19WWVxbZWBjWlZbVV9jYFtdXmBcXFldZGZlYWZbY15hWldYXlpbXl1b
-WlpeWVpWXVteXVBSU1NnYVxaWFFSUEtJSEVIQUFFSEVFRkJLRkpGSEpKRERMTE1E
-SElERUREQ0FCRT9DRUFDRkpMR1JTUk9PVE1SVk9PTVhTUlJOTklMTkxWUk5VTVNR
-TEhKUFNRWVRSWVBTTk5MSEpLUU9OTkpORUtOTENDSEpNREVER0pMTE5ISUlPS1BS
-TVBPTFBQSFJLUU5NSUpJSFBITVFJSUtNR0tITlBQTk5TUVBGSk1ITlJTS0tLSkpO
-TkxNSUdMS0pIR0lCRUtNUlRQRERKUU1FSkVJUE1MT0xLR0ZOSUpJSUtTUUtOVEpD
-SUdJSkhITUxRUE1FSE9YUklOUFJSSUpJSUVHTFFRTlNQU05KTExOUEtJSVNNS0tI
-SU5JS0VKTk9JRkNISExLTElKTEtESkpMT0NOTUpJS0hKRElOT05KRkxNSUVER0hI
-RkNAP0JGR0hKRj1DQ0hGRklMTklLS0ZQS0dJREpFTU1PSEdISEZJSUlDREZHTE1P
-R0xJREpMSUpHSEZGSkpIR0BDS0hBTEpMREZEREVES01JTUxOTkpNU01HRkhJTlBJ
-R0lJRUxLTE5ITEpKRkhGRUJIR0RITEtLS0tJSD5CRUJIRkRAPzw9R0lGQT9DRUpI
-SEdISkZGSUlFREZJR0pHRkZJRUFCREhORkdDQURFSEZCSEVEQkVDS0xLSUlDQ0FF
-RklIS0lISkpESEhMU0pNTUxQT05MQj5LTUxMUFNLT01NSktMTEtHSUtKTEpKRj8+
-RE5LTUtISUxFSkdLSURPT09NTklMTkxKRERMQ0tKRUlOTUZHS0VRTktKTUlFREVI
-Rk5MSUdHR0lGSkpLREZKSUdERkpER0lKTkpGQ0lLTU5OTEtKR0dEQ0hISENLR0dC
-RkNER0RGR0ZIS0dERUpLT05KR0hHRkdITUtJSklIRkNHS0dOSUtNSktMS0pST1JN
-Tk5JQUlKS01KTElJRUZFQ0VGRkhNS0xHQ0hHSU5KQUdKRUpJSENESUxGSE1LS0NL
-TkZHR0hQTUpGRENARExLTk1MSU5RUktQTk5KS01JTktFS01KSUxNRklKTktLTkRG
-RUJKUU9MSk1MT09MTFNRVlNNTE1OSVJTT05PU05OTEhLUFNMSEtRUUtJSklKT1BM
-T05OTE1LSE9STU1NTlFRTExNT1BQTUxMS1JVT01PV0tKSEtOS09QTUtLTElKQ0VL
-SEtNSkxKSUhTSkpISkRCQEVMSUI/QUE+QEZGSEpGREJIS0hOR01ISENCRklGSUtL
-SkhOT0tMS0xJRURCRU5OT1RXWFdWU1VUTlFdX1hgW1NRUFNXWVdXVFdUUFRRUl5T
-T1BTWWFZVVhbXFxaWWBgXGBZWV5hX1tdW1lVWF1eXVxeXlVTWmBlX1lcYF5gYVpR
-V1xoYF9uXlZgZGdgX2FkW2JiXFteYV5lamNeZWFiXFxbX15dZWhlZmBfW1xhZmVl
-Z2pfU01ZX4q90drh5ejp6uzs7uxpaWBvamZhXVxbV11hYWNiaGRZVVZTU1ddYVpg
-amZgX1xeYFxjaGVqY2dbXGFdXmBgWFdXVmBdWFxbV1tYXmBYT1FRU1dTT01JTEhN
-R0ZJRkVAPkdJR0VGSkVGR0ZFQUZGSElSSkhHRURDQEZLRUJFREhHRU5WVVJTUFdX
-VFJNTVNVU1NUU1hTTkxNUFBSUVZWTUtLS05QVEtKS09LSkpLR09SUEdHSktPTUtN
-TU9MR0ZLSEhMRkRISUpMTEtDPkBJR1JTUUlIUEdIUUhIUFJJSEtMSUtKT0tESE9J
-TExLTkxHSEdMR1JQSUdOT1RNSkpISEpNUVBRUk1RTUlJR0dDQkpNTUpITlRYUkhJ
-S0ZPT1FMSkpKS0lJS0lDRUhJTU9HSkhHRU9OTktMSVFOTk5JSktUU1JPT1FPSU5K
-Q0tRUVJRVVRUVlRUW1dOSUtMTk5JSUpLWE1SSktISEZRUkpJSU9PUVBMT1FJR0NF
-R05DRE5OS0xMT09PU0tMTUxCP0VNTUdAQUlBRUVFRklJSEZJSkZMSkZFSE1PT1BL
-Qz5GSEpMS0VGR0ZDRUlOS0xPTExOUExKR0pIR0dJSUhKSExNS0RNR0ZKSkhKSUxM
-S0dRSEtHUkxLQ0VLS0dLSElGRkxLSkVGRkhGSUxMTVpQR0VFRktHQEBFS0ZFQklJ
-R0RDQkVJR0ZFRUNGPkBDQ0NBRkJHSUZLS0RESUlPTUhFREtHRk5KRUJCRUpLRktN
-REZGSEhKSUtBQkRGQ0lKS0pGR0hJRElGQk1PTEtJS0hJSUhITUxKRUhJSEhIR0hM
-TVBPSVFOS0pFS0xQUEtQTEhISEhJSElKS05NUUtDSEtRVElJUk9RUE5KS0lHTU1I
-SUVGSUlFQ0ZJR0dPS0pMSk1ISEhJREZNS0xNSktJSERIS0dJSEdIRUVHSkpGQ0RF
-SklHSktNT0xOTEdFQ0RLRkVARERHSFNKRD9ARUZJSkc/QEdGRUhIRkhIRUQ+PkRG
-QkNJRkpES0lOSkNLSktER0xJTExLTU5KSklERUpEP0pLTERJTUlDQEBDSUlIRT9B
-Rk9PTkRES0tOTU9KT0xISk1LSkdLRUhMTklFTUtLS0dGSUVHS05QU05LTlFLSUtL
-UFBLSkhNRkFITU5MSUpJRU5NSktNS0tCSVFSUlBMSUtQSElMUFVUUlJQUFJHUk1P
-TU5QS01LSkxPTU1NW1tXV05SUk9MTUtPUk1PUlROTlRQT01NT1BPTU9OUU1QT01N
-S1NNS0lLSUtRUFFRUU9NRktLSEhLQ0dLR0pQSU1PTk1LRkpHQUNGRUlGREhDQ0JK
-SkhLR0hQS0ROSUdIQElIR1BNS0lLS0dJS01KSEhITUxKQ0VKTFNVU1VYWFNST1NS
-VVpcXVRZWlJWWVZXV1RaWFdRSVJVVVBPT1BbXVReWl1bWlxiXl1WVlVbYVxfYF1c
-YFdeWlpjYV5cU1JQXF5YUUxcY2JZWVdWWlxlZl5fVldbV1pgYF1bWl1iWFdkYF5f
-aF1gYFlhYF1dXmFkZGZmWllcW2BeV2RqY19aU0tMjb7Q2+Dl6Onr7e3u7WhoZGBd
-W1VXWlJYWF1gWlpgZ1xaVE5VU1dXX2BuaGRmXmNiXWZhY2FaW15hYFpgYlxYXVxj
-WWJjW1xZX1lbVmReWVpaVVFMS0ZHUEpGTkVJR0NBPEJDRERCQktGRUxPSktNUU9N
-SEpKRkhBSktHTEpNUExLQ0lNT1lbVU1QWlRLT1ZRUVBVTlBRT01PT1NRTE9OSU5P
-UEpOTU1MTVBRTVJUTk5PSVNQTlBXXFtMSUdHREZITUxLSURISEhKTkhLRUdOTklG
-UU5JTElKSUlRUFFLRUhQSUVCSE1LS05NVkxNS1BQT1BLSkhKTEtQS0lQSUdKSk1P
-VFFTUFBRU0tFR0VCSUhNS0xQUk9QTklHTk1HSkpIRkxNSU1SS0FEQ0lPUFZQS0RF
-QkRLUU9JRktQT0xPR0pJTU5KTk5GQk1FSkpHRkRDRUdIUEtJSldOTEpOT1JLTk5P
-UExPTk5JUlBITUZMTU1NVFJSTEtCRUtLSkRJTVFISlFSTktDR1JMT01BP0hFRUpP
-RklDR0pHREhBPkZKS05LQkNCR1FKSEZISEhHTUpHR0dERk1JR0tLSExLTEpITUdJ
-R0dGSEpKSUhOUk5HSUtMSEpMTUpMUVJKUE1LVVNMRk1PR0xLS0pMTE5LS0ZAREhL
-S0tJRUlKSkxGRUhNS0tEPj9ISEBKSEhHRklMQkNCRkI8SEpIQj9BSUdKSUZHSUZL
-SUdQSUlEQ0NGRklJTUlJRURBREVJTVNLSUlISkZGS1FPSkJERkxNSktISVBRSEpM
-S0dJRkxIR0JBRkdJS0pFRktKR0VHSUxIRklSUFBQS0RISk1MSkhFRUNGSElITk1L
-SE5KRUZERktOS0tJTk5NT09OSk1MRkpHTExQSEtFSkxJS0xLR0ROSUhFTUpHSUtM
-R0tLS0tKSUVKSEVLSUxKSEhJSklGQ0VKR0pLSUxLS01LQ0ZJSU1ERkNESERIT0xI
-RENFTEhKSUhFSUtKSEtHS0hLRz89Q0hIREZQUVFPTEtORkhHTUlQT1NOSklOTExM
-SExMRUtJTE1MSUxMSkpGQ0dJRURFQkJJTUpISUpKS0tLSkpMTkdESEpLSUhMTVBL
-RkRHSU1MS05RTExOTEZJSlVPTk5STlFRTFZQSUtGQkhNUFFNSEpLSUxHTEpPT1VT
-UVNSUUZIUk9NVFJSUlJVU1JMTkxKSEtOTUZJSUlFTU9MSFJPVE1TS0pVUlZWTlFP
-SUdWVVBOTUxOUk1JTU9PTkxJS0hISUxLTU9SR01MSkxLTk5QTUtNTE5NUlJUTk5Q
-TUxKRU1OTUxHQkNBP0NDQUBBQEVIQ0ZJUU1JTEpMSUlCSUdNSkpISUpIS0hGREVF
-Q0VISkpJSklHSktRUVBTWVZUTk5ZVlJTUVVUUlRZVVdYVl1VXlhZWlBVTU9RVFJL
-Ul5cXmNfXF1eVVxgWVVaW2JhY2FcaGRgYltWVVxbX2JcYmBjXmFdVV1kZWVcXltl
-YmBkYFpUT05aXGhlYlxYW2BfV2RgXW1nXGRjX2JeYWJhZF9nbGZhaGNoZGZdYlpe
-XGBeVVCNwNHb4OXo6evs7O7sYWVcW1pbW2JVYWBeV1RZWl9kXFlWV1NcW2NgXmVo
-Yl1gV11eV1pcWVhZV1pcYF1eYWFZXFxWWVlZYVpZWFdXVFpiWE9RVU5MSUdGR09N
-S0lGRkdESEVAREFGS0dCSUZFSEtMSExGRUpKT0dNTU5OTkZMUklJRklMSVVVS01U
-VU5RV1FSUFBXUVRYWVRPTE9MUU9NS0hQUExOS0pMSlFRUU5KR0dNUE1QUE9VVVdN
-Sk1MR0pLSk9LRklHR0pMTUtVR05NT01LR0dKR0dMSUtPUlVOSklNSEtOUE5OS0ZL
-TE9OT09MT01KTk1VU1JKR01LTExLS0pQTlJPT1JSTU5MPEFOT0hMTEtKSkhGS01S
-TUdMUEtFTkpST1BNRklERk9QTUpEP0JDRUxSU1NPTVBTVFFWUExVUEpHTUtFSUdG
-REhGQ0NDREhNRkRDQUZGR0tLUVBQUExMUEpHRklKRktNS05NTVBOTUxMUU5NS0hI
-TE9PRkpLTEpJSUhRS0pITUlHS0pKTFJNS1FIRUZLST87PUBISEdITk5QTEdFREhQ
-Rk1PS0pFREZMSElIRUxPUE1HSktOSERNS0tKTE5JS01MRUdOS0tJT05PTEtPUExI
-UVNSTk5LT0lGRExOS0hQUFBJQz8/RklLS0xJR0lJUk1NUVFIR0M/RElNTUlJSUdM
-SElHRkVDQ0hFSERFRkNDSERISkxKSU1MSktJR0xLR0lIR0VJSklHSkNFQ0hHSlRR
-SElLRkJGTkRISEdLTEpMRktLSUpESEhOTUpLSUxHREVHSEdFTEhJTEtJSUpLTFJS
-TU9QTk5ST0xPTEdDSEZCR0pFSUxKT01JUkxLSEVFS01LTE1SUE1MS01QTFBLR0VJ
-T05LTVVSSUdKSUxGSEtMSklGUVRPSklKSklMS0hESEZDR1BPSEdHSU1MR0NLS0ZM
-SURGQkJMSkhGR0ZMTUdFS0lFRk5MUlJJSUVITUhLUk1KS0xCQEpMSU5LR0VJSkhK
-Sk5QUEdKS01RUExPS0tNTElPSlBOUk1PTkpSUE1NUFJTVFBQSUhOS0pIS0ZERE5Q
-UUtJS05LSktLTUxKTFBPU01PTEpJSEpRTUhLRkhJSk1OTUpKTE5SUFFPTktLUlBP
-YVNHSEtNSkhERUxLQz5HSkdJUlRNU1VWVExNSUtMUFFTUU9MTFFST1VSTUVIR0tN
-R0NFS01DS1BMUUxUUVFSUEhLUk5SUFBSTU1RT01MSUpRSktIS0lQUU9MTExOR05Q
-UFFNT05PUkpPT0lMUE9OTlBWVVJUT0xKTk9QSktLTUQ9REVHQURDQEBBRERDQkJJ
-TExLSUxJSU5LTU1MUElJSUVHSk1LSUNFQ0FJSExRTUxLTEpUUlJWU1ROSVJTVFxY
-TVFWV1dXTFZVUVRcVVpYVFtWVVRSVlZbV1hcVldcYFhWWFZaWV1mXl1WVFBSXlxY
-WFNeWmFfXV5eYmFeXFxaWGVeXFxdXV9hYmFeYlxbVV9lYF9iXVljZGJhXWFhbmJh
-Zl5lYWNgY2ldZWpoXWFpZ2JlYmddY2VfXFxTVorB0tvh5efp6+zt7u5nYFlUUlVb
-aGljZVxaVVNWX2pjYVlUU1VXYF1fY2hnXmVcX2BhV19dWllXX1pnXVZgVVVZW2Fe
-XmhcY19SUlpVUFBYV1NQT0lHRVBKS0tKSUZGRElDREFGQkdPUEdHSVBKTU5VTkhK
-Sk5ST0xRVFBSWU1RUFBNS01ZWFNQUVdaVVlXUlFTV1RYVVFNSkxQTVJQVlNWWFJV
-WFJMSVBcVlRST05MUE1NUlBQV1RSTExOT0pLSkxJTUlLSkxLSkxQUFFOS0xMTENG
-SklJSE5TUVJXU1FMSUdLUkhRTUtJS0xLU1BKUEtMSENDUE1SSEdNVFBQUFNSTk9S
-T0tSVktJTU5QUEtLRk1PUFJPSURGSU5VUUZRUUdMTUxUUk1MSk5JTEtKRUE/PkVH
-S0tHTk9JRk1SU1RZT05MS0pMTVJPRUNFS0pGTVJPS0pQTU9WTEtFSElCSkxSS1BR
-T1JUTUxLR0xGSEVJT01PU1FTT0pKTFFPU09QTU5STktLSU5RTUtQVlBPUlFMTExK
-RElHSEdKSEVESU9QTlJLS0tHSktNS01NSkpKRk1ER0tMT0dGR0pKR0xJS0RGR0NG
-R0NNTEdQTEpNUFJJTkZKS05QUExHSExJS05JSExMSkRGQklMTlBSVU1JSUpJRUhG
-RElPUFJSU1BOTUZJQ0FESE9PTUtPSEpLR0hLRUVITVFHSEhNRT9FRUNIR0hJRkVD
-R0VLS05HSkdES0lLS0lHQ0pHRUVISUxHPkRFQUdJSkdLR0dIREVLRkVGREVNS01N
-S0tGSU1ORUdGR0pHTk1LSktHS0pDSk5LTkpHS01SVFBNSUlNTEhIQktLR0hGSUtL
-RUZFSE5SVVBNUk5OTEpOTUpSU1JRUU9QSkNHUU1MTVFPTlFMTElMT0pOSUxNSUZG
-SlBNTE1SSUNFRkZJRUZGTFFPSlJLSEZITUZKSUxKS0lIS1BOSEdHS09OSEhQUE1I
-SEpJSUxLU1lRTEdGSE1MTE1MTE5PTk1KSE5NSERHRktKTkdLSUlPS0hRTlBSUktK
-Uk5OTlBNTklUTk1QS0tNS1BQTU1UU09TTkxVTUhIS09MTktJT1BKTUlITklMUk9Q
-SEVFSEZDRUxUU1JMUFJRT0lIREtPT1BSUUhITE9KSExKUFBEPUhQUExNTk9OT1FS
-T1dTUEtLSkhHTU1PT09PUk9NREpMSkhMSklITEtVV1RPTVJVUFFPUFRWWVJMTk5R
-WlVUTUxHSU5NSU9JTExOTU9QU1BVUVJOTUtNTUxMSktSUUtJS09SSkpUVFBNSE1S
-UFVQSk1HREA8PkdJQ0NFSEdCPkBBSElRT0dMSkZISUhER0RES0xHR0lLT1JORk1I
-RkZFSE5OTEpKTlRVV09IU1JTTVJRU1FXTlJTU09UUFFVW1NQWVhYVlVTUllVU1lf
-VVVcVlVZUVJPTFhaXltUVFleWVtZYGhhXFpeYFxWWVheYWBiX1dcWlVdX11fZGBt
-bmJjZGtkYmRiZGFeZmxmWVlfXmZhYWFrZGFhZ2NpXmRpaF9mYmFmZF1fX11WYGlX
-WGlhjsPT2uHk6Ors6+zu7V9XX2xtWGVldWlgW1dTVVprYmFbU1RVYWFhXmdlYF9Z
-WVVbXWFjWF1YWV1gW1VaW1hZWl9hYmBeXV5ZXFtaUFBTUlBTU05LSEZHRkdKR0JF
-Q0ROR0tIRUtQUU9TUFFLRk1SRktOSE1PUlJPUFJPT1ZRSU5MSkxMTVVXUkxQTlBR
-UlNWT05NSVJSUldYVE9SS09UUlVWWldTUlNSVVJSWldYUE9STUtNTVdRVE9NTFZU
-VFdLSUhISkpISEtLTEhLTlZUTElHS1JPTFNNTk5QVlVRVVJPTlBMTlBSVVBWT01S
-UlNOTktJQkVNU1BLTFBMTVNKUE9OSk9TSUZKSUxPUlBKTEdMSFFMUVJPRk1NTU5O
-T1FSV09QVlRSUlFIUExSU0tMSEVEQkRHR0ZNTkxNUlNPSktKT1JUTk1NUE9OT1BI
-S01FTlBNS0xLTk1JS0hISFBISEpPUFVRVk9GRkpRUktMUU5IUFFNRklHPT9GTU5P
-TE1RS0pITU1KTklFRUpQTE1SU1FPTlJOTU9OSk5OUVJOU1BQS0pJUE1LTU5KS0pM
-UExLSkhJR0VASE9LS0ZLSEdHRUxGR0lLSU1MS0hHSFBLS0xIS0lNTVNQSkxKTEpG
-SklJRk1OTUhJSE1LSEtIRU1SUUxJRUNFS0tKSUpKTU5MTEpKQ0NESE1QS0xORktJ
-S0pHSUhKS0tFQkdGS0hQTUlNTElKSEZHQEVMTEhISEZIT0ZHSkZFPUJFPUVLTklK
-R0VISklKSURHR0dCQkZERk5PTUlKR0dDREFHTUtPSURIR0dGSkZJSUdIRkZLS0pN
-TUtOT1FLTU1JS09HRkRHRktGR0xISkxJRkpMUFxUU1JSTktLSlRUV1FQTk1NTUtN
-Q0lITE1TUExPSUhIRklKS0dKTEhKSEhMS0dGTU5LRUhFSEdQUE1NUVZWS0lISkpN
-R0xPSExLR01MREdKSUxKSlJUSUlMSUhNUU1KTFBQRUZLS09NT1BKS09STU1UT0tL
-UlRQSEpJTFVQT01ITkdPTE9RTkxPUFBRTklQS05NSlFQTktRTUtJSU1OUEpNUVBN
-T05MS01KSkxOTlFPRUVHSElSUVRST1FPRkVKRERKTExLR0pLSkxNSkhFS1VWV1RX
-SU1KRk5PTExLTUtFSlNPS0tMTFBRT1FQUU9IRkhLS1FRTVBNS0xKTElISkdKSE1O
-S1BKUFBVVFBSUk9MU1NQTVFRSlJXV1VUVVNRSkxKS0tOTExMT01NTk5OUVFPT1RV
-VlJMREtNTVRXUU5NTU1LVk1QUUtOS0xQU0pTTk9MR0JHRUdGQUJFRkNDQ0hITExL
-T1FKR0hKR0RGSk5OSkdKRkZNUk1QTUlJSEhISExQT09NV09SUFJLU1FOVFNQVFRQ
-Sk9QTVNXUVRSVFJSV1JXVlhWWFdhX1lPTVJVUVBPUVRIUE9QVFZTU1hcWF1tXldf
-Yl1aWVpdXVhdXF9iY1xZVFZiaWZlYWBlaGptZmNjZGFnY2loZWBdVVdTW2NkZmNi
-YGVmamZhYmVpXV1eXl5gYldUXFdXXF9eZ1CQw9Pb4ebo6evs7O3uX19ebFtgZGpi
-ZGFfWF9UZ2VpXVlXWlhWXWhkaWFbXV9fW1dbXGBZWVldXmFWU1heYWBfV15fXF9b
-W1doWFFXTktNSk5OSUdJRkdKR0dDP0RIRElHTk5OR0xMTVJOSUlOTEtQUVVUWFVM
-T01UUlBXWFJVW1VUT01ORkxRT1FZWU9RUFdTUk5TVFFTU1RTUE9MT0lJVFdUUk1R
-TE1RTk1MU1RTUUxHT05UUVBUUE9RTFFZVlNNSVBNR0JMVU9NTklITUtKR0lRU0xN
-VVZPTElJU1JPS0xJS0tKTlNZUFRUTE5XUU9KTElNUU9HT1JNUk9XV1RNUklTVFRZ
-VVVYTlRVTU1LS0NMSUNLT0xLRE5VVFRNUlhXWVtZXFJSTVFQTElITkJHRkRDQEpN
-S0tLSFBUTEpLTlFJTE1IS1FOTlNOTUtKTUhQVE1PTlBPTkxES0pJVVJITEdKTEtO
-SlBGTlFITk9RVFFRUU5IREQ8QkdLS01JS0tIS0VKSk5GQk1NRUhOSk1QVVJQUU9P
-Tk9QUktMUUhNTExNT1hUT1FRV1BMS0pNTkY/QEtEQkdHSUlITEpJRklHSEZNR0lI
-SU1HRklITExNTklJSURKTFFWR0tIR0xQTU1KSk5ORUtKSU5JRklJRktMR0VKSUZH
-RkVIRUpLS0tMSEtHSElJS0hESUdGREVOUFNLS0tJS0lHSUVKSUdKSEpLRkxMS0dJ
-R0ZLR0ZFRkdLR0pKSUdIRkVASEhHSEpER0lKR09MR0lLSEhJRUNLUFRSU05LS0hJ
-SEVHR0lFPj5IR0RHSkZFR0lHTUpHSU9MTk1MUFFITVJNTk5HSU5QTE5MSklJTEtL
-TkxMTk1RUVJSU0xJTVNUTE9LUUtNTktRUk5QTE5PS0tJSEpMREJOS0lJR0pKSUhJ
-SkxNUE9JR0dMTFBPT0xLTlFLSEVHS05LTUxHTkhJSkpHS0xOT0xQUVJMTVBYS1BO
-TVBKS0NHSk5OTElLUExOVVZTT1BWVU5SUExKS09QVVpaUk9RS0pKSlJNT1VSUk9T
-VE5PU01PUlJRUFNXV1JQTFNOTUxRTU9UTUpQUlBLSUhJUE9JT09LS0pKU0lHSUxI
-TE5OTVBNU0hITE1IT09LT1BNUlFNUlJIUE5MTU9TVE5QUE5OUE5LSktLUU9NTFFT
-SUJLTElRUUxOUEhKTkxLT1FPUE9MT0pUUFdRTExMTE9ITlFTUU1NTVFTWVVSTFFW
-VVZPTklJS1BLSEZHS1FRT05NTVJUVVNWU01RTEZQT0xRVlZSSEdMS0tQVlJKSEdL
-UEtKSUlJRExIS0ZGSUNCP0NKS1BMTFFSUVZWUExHSExLUUhLSUhOUEtQU05KTE9N
-SEhLSFFOTkhMVk5PTk1WUlJOU1JRV09NTVFTUlBWV1NUVVBUVFBXV1VZXVxcXVZU
-VVlWVFBRVU9VVVFQUl1dXlJZWlxiaWBlYWZYUlhaV1ZXYWVnZFxyZF5jYWFaXmFk
-ZmNeYGNha21raWRhXVpeW1xcXGRkYl9fWl1cXWRnXWttYl9iYWRZWVhlaGFfaFlH
-ZpXE0drh5ujq7Ozs7+5aY1xZVlxhW1ZZWFtdVE5eZFtfXl9dVl5dY2VdXFdYXVVb
-X2ZkXlVWWV9eXV9bYGFiX19XV11VUFVYXFhYVlVPTEhIRU5MSk1QSEhFQ0RDSEtI
-QkNKTk5USUZKTUhPUUdLWVtUVlZWT1hcUU9YU0xNTVBNWFBNUFJZU1JTVVVdUVFO
-VVlVVFdPU1RUUVJOVVdRVltWVVBQUU1PVFRNR09PTFVWSUpNUFFQTVBQUEhJUFNU
-VFBSTkhJPEtMUU9STE5LT0lGS1JRUFJSUFdQS0NISkhGS0lMVFZRUExRUFRZVldV
-VFtOSlBNTFNTUFdXUFRRT1RQUk5UT1NXVFFITkxQTktOSkZOQ0pQTEVPUk1RUFBT
-U09WUlVZVlNSTExITE1KS09KSUhKTE1ESkdMS09OUlRVU1JSTVFUU1FNS1NSTVBP
-T0xLTVRST1BKR0lMSEZRTkpESEdOTUxMUFZLTk5KS09RUVBPSkxEQ1NNSUpMRkZM
-S05NRERESEZHSU5NTE9OTlJTUUlJSFJUVVBMS05ST0hKTE9PUk9OUU9ST1JRUVBP
-U0xDSE9NREZLSk1RS0lLSEhJR0dHQ0lOR0xNS0tFTVBLUUhHSElLTkpVUVFKTVBR
-S0xLT0dHSk5SU1JOTU1LR0VFR09QSURHQUFJSk9PUlJOTE1KRUlLSExNSUNHQ0dP
-TU9LTUxGQkZHR0xISkxCSUxMSUdLUkxKTEdNTUtKS0lIS01HS0lKR0xKS0tOR0dM
-UlFNTkdGTU1KSUdJQUVJTk5NTElKRU9MSk5MRklNRkZJS0ZLSE5NUEtNS05ITEtG
-TUxOT0pMTk5MSk5NSE5JSkdLT0xMSU1QT0tNUFFPUEtQUlNQVVJPUVJPTEdJSllS
-VFhRUVNLSktQTVJKTU9LSkVERUhKS09MTE1JRUlIRkhMUFNNT0tLSUhATEtISkdJ
-SExOTUpISklNTE1NS0hLT0tLS0pMT09OVFNPRk1HRlBRR0xOUk5NU05PU1RVUktL
-UlNQUUtNWF1gWllPT1ZQUFNPU1BLTFBJTFBPVE1TVFVPUVBSTk1RVFJPTkZPU1JL
-T05XUExKTE9PVFBPSk1MSk5NWlFOT1BQUE1JUlFSUUxKT0xUSk5OU1RQUU1LT1dS
-VFJMSlRYUVFLRkdMTk1ISlJSUU9NV1JSRExQT0hMUE5HSE1SU1JQVlRVUVZTT1BR
-U1RTT0xMT1dPUVBITlFRUlZXSExXVldTTlBKSElLTUxKSEdJTlFVUk1JUE1LTFBP
-U1BLUlBOTFlVUkxRTkpPUlBRUFBISEZHV09MTUxERkJESkpHQ0BETUpKTUZERUZH
-Uk1ISElFRkZIR0xKR0ZLUU9OUVRTTkxNSUpOUVdRS0xNVVFWXlFRUldQTlRUUU9P
-TVRYVVRQTkpVUFRQUlhZVFtdWFpbWlldVFxVWFpbVVdXT0tUW2BhXFpWXFxbYmNl
-X1pXXVZeWlhWW2NsbWhjYGdmYWlkYGJiZF1dXFpobHRtaGZkX1lgYVZYYGdpY2Fk
-ZFpcXmRcY2VraF5dYVhhX2RdZWVhUUdNi8PS2+Hl5+rr7e3t7V5aWVlbXlhXW1xX
-VVVZXFZXVlRWX15gZVddaF1fWFNYWFhcV19lWFleW2ViZ2NnZGJjaWNbWFRPV1ZO
-TFpYS0tIRkVISEhNSUdGS0hGRkVNR0JFTUtJTFBHTE9PSU5SXVRcWFZYUlRgW1RX
-WVJTUlBRS1RRUVhVWV1ZWlVPTlRdUlNKWltVUVVQVlVYU1RVV1dbXU9QUExPUlVM
-TVBPSklKUkxIT01PTFFSS0pNS0pNTUxOU05PUFBITVFTUU1JTE1KSEhPVVROT1FL
-T1FRT0pITE1MS09SUFpTT1VRU1NUUE9RUFBKTldbUVhbVlZQTlBJUExSUlRSUlFQ
-VVNPTU5TVk5MSEtXVVFOUlZTTUtRWVNPUlRTVVRPT09RSU9KT0xJSkhKUE9ISUlX
-SUxLSFNRUVFRVEtPT09PUk9QU1dST0tMSExLUVtTT0xNUVFLUVFJR0tMSFNQS1BK
-TEtJTUxOUFFQUEtNSk1IUVFOT01OSE5PUEpETE1GUFFOUlFRSkpMTlJPUk9RT09V
-UkxJTFNVU09OTU9QUVJQTk1NSlJWVE9KTUxOTkhGQ0ZPT1BKSEtIRkVLRkpGRktN
-SU1MS1FXTExRSEdKRUlNTUhFTk1NUVNVVFJPSkhKSkpKSk9SSkZIR0ZLTEpHRkJG
-SExMSkhOTkpMTElHSExMSUxIR0ZGRklPS0pESEZKSkZKSExNSktNTktJRUZMTUVM
-TkNER0dESUpITFBNSERISEVKS05KS09RS1FPS0RKTUtJS0c9QUpMSk5JS0hHSU1P
-TU5STUhFSUZLRD9JTExKTUVLUkxOUEtMTUxOU09PSUtLSkZFTE5OSkhOTEpMTkdM
-UExMT1JTTVRZU09QUlNRTU5UTEtNTk5UTVFQUklKSUtOUlVRT01PUk9MSU1KTkpJ
-RklFSERIS0lNU05ITElFS09PS0xKUE1HS0xLTFVPSklLSEdOUkpNS0dMT0lMTU1N
-TExRTkpOTE1MTE9QS0VKS0pPWFRYUlBWU05LT09YUVJbU09TU01GSUtOUFBRUVZS
-TU5JS0tOT1RQUlNPUVBMTEtMS09OT1FQUlJQT0xHSU9STUZJTUpKRktPT0xTTE1N
-SUhHSU1RTkxQVVJZWVFTT01NR0pPU1RPSUpKTVBTT05MS0xOTFJSVldXTU1PT01U
-UVJVTkZMSEdQUlJOUVVVWFRYVlFPTkxOVlBSUlJQW1RQUU1KVVVXVlBTWVBNUFFP
-Vk9KSUlLTU9OUk1NTk5OUExJSkxPVVNPUU9NUE5WUVRcWk9QUE9KSkpPSkZGRklQ
-U1JLSENAR01HR0VDRUxMTk5KT0lKTFBOSUlKSU5JS0xITFBJSVBNSkpOTUlGSkxM
-TEpTUFNcVmBYX1hWU1dWV1tWTkxOU1BPTlBRU1NMTU5TT1RPWFdcXFlXWVtWVlNd
-V1ZUW1VTU1FUWmVmV1pdX15jaWNrXV9YX1teWFNZYF9mYWZsbW5lYWtkXWtsYWFb
-XlpeWFxlZmZramVhYmJkXVtcWmZiX2FmXVtaYV1jYVlbXGBfY2pdWFddYl9RTEqN
-w9Lb4OTo6ezt7e7uWVJdV15bYFZcYGZVV1RXWldVV1FUXF5oamFkZ2ZdXmJpZGlo
-Y19cXVdWX19faGdwcWRfX1hSVFJTUFNPTFJPTlpOUUtOTkhKSEpLSk1JRUdLUUlH
-SEdITkxPUVJOUVdOV1RUYFpTVk9XVlpXXVhOUFRPUFZNUlRUVlNUVFVWUlJRU1hU
-UVBMUVJQV1RdVVJZWFVYVFJNTlhQT1FUUVhQUlZPUVNMT1BOUE1ITUxRUVNNSktN
-SktLS05MTUpNUFBMUFJNVFFXWlZOSkxTUlJVUU5RUlNPV1JMUllbUUtNUk5LTEdH
-TE1QVV1UU1VRVVdOUExNUE5PTE5OT1BLQ1JRUE9RVlBDSVJOTUxQTExNUU9LTFJR
-UlVWUU9NU1VPUlBRT0lLTlRST0lSVEpOUlBNTlZVUlNaUVBUTE9LUE5MVFJJSEtH
-SUtPTlVQT1NQU1VQUExMTE1NUUxMVlFRUE1MTkxOVlRRTElIVVBQT1lNTE1KS01J
-TE5RWFJLWVlSWFJJR0xKS01OU1hVTlFQU1RRT1VUU1FNTlJST09FSlBRTlFRVE1J
-TUpMUUxHS09OSEpGR0hHQ0hER05JR0ZLRkxPT1BMTkxCRUdNRkJFS01JS0tJTVZY
-TFFRSkdCR0pMSk1MUE1DSEpNTVBRSEdNT09MS1FLS0lOTkdCTE1OUkxSSUhJS0tJ
-R0NHRkdJRktISElGSU9PT0xMSE5TUU1OUExISU1JSEhHRUZDQ0NDSU1JRUlLUEtN
-SkhMTkpISkdHRUpLSkxIUEZHTUNJSkdITEtLSUdEQUZFS0tJREdHQkVLT0xQS0pL
-SkJKT09OSkZMSEhITU9EQk1NS0tPSktESE1PTlFSTlRUUUxUUlNRS01OS0dHS0tL
-T05NS09LS09OTElGSk1KTExSTlVMTEdISklLSUtITUlLUVhOS05VUFBRUExJSkpO
-Uk5SUExORElOU01NUk9UTkpLU01KS05STkdJUE9ST0pIS01LT05MVVdTUk9RT1FY
-VFBPUlNRUVJXVU5QVU9KWVFRUFNNVFhVUk1NT01QTlBUWlNPUlFMTUtNVlFUUElP
-TVBRT0pGSk1OT0pHTExITE9PTE1RT1FOT05LS0hST1JUUFlcVlFSUUdDSFVTT0xK
-TExJSk9WVldSU1hTUVBSU1ZYT0pOUlhQTFNNR01TVlBQUU1PVVBQW1dVUVNVUE1R
-VVVXUltZUE1MS1NRVlhXUVVcVUlLUlRXUE5TTExMUFFQUFNPUk5MTE1QTVJQVFVT
-UVFUVFlVVVhXUk9QSkpMS01PSEhHVFNLTVFJSktHREhHRkNDSkpFREVHSktKSlNT
-RUhJUVJTUlBNSlBQSUlNRkZISUlKUFJQTkpJTFJYVVRQUFJUU1pVWF1UUE5VUFFS
-UVFOTVFNUlZNU1BXWF5bW1VfVlZUV1VQVVVSTlBSWFtfXllaVFVaWV9eXWFrZl5X
-XlhcXF5cYV9eWV1fXlxcW1dnbGRiXFxaYmZmY2Fram9mZGNfZGlhYWFZYWNmZV9d
-W15jZmBlXWBjY2leVWFZXFhdX1ZPTIzD0trh5efq6+zs7u9YYGBYXWhhX11eVFhS
-WldST1JcU1JTX19jZWVbYF9hXmNsbnFvbmdhXFtdW19aZGFiX2BYVUpQUU5TT0tO
-S1BLUFBVT0pJRUtKTEdITUtNTUlNUUlHTU1OVFBSV1lTWFdjW1ZVWFJRUUpRVFpT
-WFRQTVJQTl9WVFRUUlFPV1FWV1VTUFRWVFdTUllRU1dWUVRTUVBRV1BPS09PSFFU
-TVRUT0tOUFBSW1NSTE9PRlBSUFJSTUhGSUZHS05MTE5OTVBMTFFTUU9VUEpLTlRW
-VVJOTVZVUlNLUFRNVGFgVU9PUVJGSU5TTFJTVlxYVV9ZWFVVVFBPVFFTTEpQUVRM
-S0tKTk9LUVJSWEpFTFRWU1RVVFJSWVRNTlFUU1ZRVVRPT01KUE5RU1JWUExSUExO
-UFZSVE1SU09VVk9RUk1PR05OTE5RVFNPTEtMTVBTWFVWUFBOUk1MTUpMSU1NT01N
-TU9JS05NUlVQTkxQT1BQUVFQTkdFSU1MTUpQUFdPWFVNUUZBT1NQT09PTkxSUVBP
-U1BOUk9WU1hSR09LTk5OV1RPUElJS0tPUU5OTklLTVNTSkRDSU1RUEdCQ0ZGSUZI
-TlBLTFBQTUZFSU5OSEtQVU5OS0xIREtNT01KR0VHR0lKTEtHSUI/SkdHS09KUExJ
-SkVIRUhISUhMSktMSUtLSkhNSE1PSkxMSkVERktKTEZQSkxHSUlDSkpNTUpNUk9P
-R0hCR0ZFRkhBPEJGRERITE9JSExLTE5JR0hISElIS0dOS0pJR0xOUlBLTE5HSUlD
-S0tJSElHQ09OS0NISUdETkxNU05US1BXT0ZFTE5RTEVGRE1JRkRISUpQUVBUTUxP
-U1JZVVRQTk9OT1FLTE5TWlRSUEpJUVFKSUxPS1NUTU1OUk5MSkpMUk1NUUtJTUhN
-RU9MSElLTUtNTlBNT09SUU9JSVVOT1FRUExITVJSTlNRUVBUTUdNTUVQVVBPVVNU
-SU5JSElMSUZPTlFMTFFKTEtQV05VUlleXFxYXFpWXVhbWFNXVlNWUlFUVFNSU1BP
-WFlYU1NQT01RUlJTTlNXU09PU1FQTU1WTUxNSUVISkpUUUdPTlReWFVUTEtXVFNY
-Uk9OSlFUVVVQU1NUWE1OTENISU5KSU1NUVJLTk5RWllWXFVWV1RTVFJQUVFUUVJS
-SUpOSkpXVlJSU1VWVldUVFVSWFFKTVNOT0pSVE1MTlxhU1ZWVE9QT1BNSlJMU1NU
-WlNWU1NdWVVPUFFRU1JWV1ZQTE1NT09TVFNVU1RRTVdTU09LTlBRU0tJSUlOU0tH
-TlJKTExIQkJGREpKSkhJR0ZISU5MU1dSUElRTktNSklOUElHS01JRUlFSEtQUFNX
-U1BUVVhZWVxWUVphWFJbXFdWUlJRUVVQT09RUVRXVFJHSlJgV1NSTFZaU09TUVFQ
-UFJRU1ZWVVVbWlpZVVpZVFZfWl5eY1ZfXFlWWGFgZF5UXVpeVl1XaWhlXGBlZV5e
-VWBmW2dlZWtdWmBmaGteXGFiYVhcXl5dWl9YX2ViWGJpXlVhZGpZVWRpWWFZj8PS
-2+Hl6enr7O3u71dXWFheWFxdW1lUVV1WV1haWF1VUVlZWVphXmNcaGhiX2JlZmpo
-ZWFdXVtgX2JaX2FaWlNMTUlIRkhMUlJUT0hOVFRPSk1OT0ZDRkhLVkxSTUlPTlFQ
-TVNVYmJbWl9ZYVpWWVJWVVVPWllSUVhWXF5gWVBQV11ZVlRSWlJKTVNZV1ZOSUdO
-U1ZVWFhcVlVUU1RXVltVVVZOS09cU1RVUlhWV1lRUFBVWVNSVE1LSUtOUllRUFJN
-SUZQUUpJT09RTUpTVlFOT1BSS0tOTE9XVFRRTVJPTklLU1JQVltVTktGTFNSSEhE
-UU5XWF1UVVhZXldVV1hSVVZRWVFSUElISVFRSUdRUFNKSEtLU1RTWE5RWlNPT05P
-VFNQUVNOU1ZTTkxKSk9OUVNQVVVST1RWUlFXV1NPUVJUUVRQTktMSU9PT09NT05J
-REVMTVJSVFRSTUlKS0lQTUtVUU1MSkxQSENIVFVRTVVTUlNXUEtNT05SWlhOS01V
-U1BUUFJNSktPR05PU05QUlRRSkxOVlNOTE5PU1BUU05TUE1GRkxUUVBNUVRUUVRR
-TU1HRUxNUVdOSktNQ0dJTEdGSEhITE1QSVFUUkhLSk5MS1RMTFdTWFFPS05NS1ZW
-UE9JTUZFRUdLSEdJSUxLTktOU09LTVZSTkRJS0tITU1KTkpMTklKTkxIRUpJSElJ
-Tk5KR0hESk5HSU5NSkpIT0tKTElNUFJJSElITERMS0hARENGR0hKS09RUEpIRUdF
-R0VEQ0pUUUtLREZLSUZNTU1LR0RFQkZJQ0ZMRkNHT0pLSEpJTE1LTU1NUVhMRkhM
-SElUU1JNSU9UTUtJRElKTk5QVFRYUU5QUlNSUUtLTk5NSlRTT0xTS0xKS1BPR0hM
-TFJNT0tMUU9TTlBPTExOUUhGSUpJUVBLSE9KRklOTktDSUpMT1NOUElJSEVKS0pJ
-Sk1RVFZTVFNVV1FOUUlMT0lIUU9SU1FUT0pNTU1KTU1RUVFRVFNPTE1VWFhQU1VZ
-XV9YV1NUW11eWlNRTVVZU1lTVVBQT05VU1JSUkxNTlNUVlJVUVVUUE5QUlRSUFFR
-S0dDSEhHREpOTk1TTk1NUVNTV1VPT01QTVBPUFVWWVJPTlBRTkpISURGR01LT1BU
-UVJRTU5WV1peWFpWTUpQVFZVVVhOSUxSTE5OUU1NVFdVU1VVWVZXU1FUU1ZWTUtL
-S09QUkxTVFFPUk1STVNPUlFNTlJTVFVaUVRVUVRRV1JQUFdVWFdfVlRPSkxPSVVP
-UFBPVFdXV1NXU09PTVNSU1RRT1BNTEdGSk1FS0pHQkFHSENHSktLS0ZHSE1OUlFM
-RktSTUtIR0pLSUpJR0xJRlFMTE9YU1dQUVBWWVpYUlFVWlZaW15lXFlaWFhOT1BN
-T1dWVFlYU0hITVVVVVZNVVVcXE9QU09PTFJUVlplYl9eYVVZXlhQUVhTVlpZVWJi
-Zl1aV2NiWlpiYl1gXWVmaG9aXWJiYFtXZF1gY2tmamBdW2RnY2VkZGZjZV9hYmJY
-WmBcW15dZ2hfV1xfZlxTX2ldVVaOxNLb4ebo6uzt7u7tW19ST1hraGBZWllXVV5d
-WGNjYFdVW1xfZWBnaG9kWltiYmVjZ19iaWJgZmFjX1dTVFRPTk1KSUtNU1NVVE9S
-UVJPVFNQUk1MR0RJTVFTT0lNVFlcTk5YVV5dYmJcY19ZUlVbWFtaW1xaWV9bWVtb
-Y19eU1dbWFZQU1pdXF5dWldYVldPTE9ZXVZQU1BUVlJVVlpdXlpYTVNQVFFNU05Z
-V1xgWVpVUlFPTElLS1JJSElHUVVPS01LUlFLRkVJSklOVFNOUExOUUlHS05RTlBP
-Uk9LT05MVkpMUVVYWldLTVFTUlRRU09SVlhUVk9aWFVXWFRXWFlXWVVSU1BPUFRN
-Rk5KTU5QTUpKR0hLU09QVk1NTEtTV1VVWFZbT1BWUktLS0pJTktJTVFQTk9PUk5S
-VFFTVFdVUE9SXVhUT1JSUE5QR09OS0VHSU1RUFBPTk5PVE9PTVFUTk1QVFNNR0hE
-Qk1NUFFLUFJUT1FTTUxNTlJTTUpLT1RYU1BTT1BRUkpFT1BSTk1OTktUT05VUU1R
-VFVSUlJTUFVPT05PUVBRUElRVExLUlFMSEpIRktVUE1OT01PSkNKTkU/S0xMS0tN
-TlFQSkZLTU9STVJQU1RQTkxRTU5NRU9JTk5MTUxLTU1LSUhMTVBNUExRTUxRUVFK
-TEdMS0pGSEdJSUtLSElOT0xLS0ZBRUxKS0lOR0pKS0JHT1RJSE5MSUxOTFRTUEpI
-S0tKTEpIQ0BBRUNHTk5KTE1NSk5JQzxCR0RGRkZLS0RJUkpJSEVISkxHQENISU1F
-RUhFQ0pKS09PUExKTUlPT1JTTEpNRUBOVFJOTk9PTUlOTktKTVJSTlFTV1JVTkxR
-UE9OUlFVUU9OT01MUlFKTFJPTEpGRkZLS1FQTUlMTElLUExLT0tJS0pKSVBNSklO
-SkZIRkhNUFJNUE9MUlBPTUpOTk9KR1BQUlxaVVJPVlJUVFhRTE5MSlFRUFJPVExG
-UU1XVFZTUlVaVlRVUU1QUlJRU1NPU1lYVVhVVVNNVlVYVVNQVFJSVFZTV1RSVE1O
-T01PTUxNV1tVUlNQU1NVTU1LT05OTUlHSElNSUhLTkpOU1dRVFNVVVZZTE1UVlNT
-UFJUUVRSWlRTTU1NTEtLS1VWU09TUFdVVFJPRUhWXVxaVFVYWVtSVVdQUVFMTlFT
-UVJSUVFUVFNST05VVldaXlRRV1dVUlFPWVlWVlNSUlVYV0xQVVNPUEdOTVFRV1NL
-SUtNT1FRTk5ZV1VPT1FKTFFPTEtRTEtRU1RYUlVSUVJMS1JWVFBMTU9MR0ZHSkdF
-R0hJS0Q/PT5DREZISUxISkhHUUpGTkZJS1FRVUtRWU5ITFZOS05JR0pVXVtZUltd
-VlBXUlVTUk5RWVVaWlNRWFtUTk9PUVVRUlVVTVZRT1NUVlZXWFZTWFhZWVpSUVBP
-T1VXXWNdW1lcVltdT1BQTFFVW1hbamNmZGVqX1VcWV1iYWJkYmhnZmVbZ2RfY15k
-ZFhSXWFoaGdfXWBcYl5bZGpkYmRhW1lXX2NbW11eX1hUXV1gW1ZWW19XUn3D09vj
-5ujp7O3u7u5gXltjY2BjYV9aVlVTV1xjZWxoWmBYZF1gZ2JraW1nbGFkXlxkaWVf
-X2ZrYGFYWlpVT0tJUktMUVZWU1dUS0tLTlNSWFFMVE1ET05KTk9KTFZcWFtXW15f
-XV5eWGNjW2RjZWFWVVpYZ15dXFxfWl1VVFtfWFlXWlhXUWBcV1tiZV1XUElRWlBX
-UFFRUFZhVFNZVVVUWVlaXFlXV1VUWltWXF1TVVdUU1JIS05PTU9QT01MU1JSUlZS
-S0hLTUpNTE9OT01OSklKUVNPT1BQUU9MTlBYVFVNU01QVFNaVE1MS1ZRT1NPVU5U
-V11WXFdVWFVcWl5ZW1pUWFJWTExQS1dRTEpGSkpSUUtJTllSTU9RUVBSTUtLTVRQ
-VVNQVU9PTFBVTE5HQ1BTSk9PUVRRR05NUFFQUE5RVVFVVlhXUFJOS0ZIUFJVUVJU
-T1NOTE5NUVNSTk5TT1JORUxNUFJPTUJDSk5WWlhSV1ZYV05SU1lYTFhUUkxTUVNV
-TEtYTlJQSUtVTEtPT1RTSFFRV1lWVVNOUFRZUFNWUVBLSUxKUE9STVNOSU5RTlJS
-TU1GRklKUllSTU5PSkpLQz9JSUxNT01KUk5OSUtPUE1OS1BQTVNYVFZPTU5FTU5F
-TU5PUExKTk5MSkdMSEhMRUxJTktLTUxKS0tJSUlGQ0RKS0lHRklLSk5JREJBR0pL
-S0pISUdJSkZFRkhIS1RPUFBITElSWE1KTUdJTks+PEJDQ0hKSE5MUEtLSkg+PEJF
-SUdFSUpKTUdKTUdDPkZGREZIRUpOS0tJP0JDSkpMTVBLUEdJTVJPTE5QSk5MTEpI
-U1BNUFZPU1BPUFJITExRVFRSUFFQTkxPSE9UUlROUlhOS0lOUlNISkpFTEtHSU1R
-U1FLTk9QUVNMTUxRS1BFQ0tLTU9PUk1NTU5PTkpSUUxNSE1QTUxHTExVVk5QTk9a
-WlhWWVZTUlVUU1VVUUxPU1RXWVhPTkxQU01VXFNXVVBPVlRTVFZSUFBQVlNQVlJL
-VlVaVFZUU1JVVVVZVlpYVFZWUlVPU05MUlNWVllUUVBST1ZUUlJTU05QT0xMS0tI
-TkQ/RE1WUFhcWVZPU1tZVVNPTVNSV1pSVlBSUU5QVFJRU1xRV1NUVFRaVlJVVVBU
-UkZCS1ZWVFZVXVtXUk5OSk1PT0tLUFlUU1NST01NT05RUVVSV1hZVVFVVVFUVFZV
-UVpVU1ZSTlZTUVJSTFBSUk5OTlFPUlhTT1BQVU9GTUpQTk9RTU9NUlNTUVFFSE5M
-U1FOT09PSUZITUtIUE1PTUpOSUpKRUhNSkdEQkJGQkJEQ0pLSEtMS0hGQ0hKTExL
-UVNPSU9ST0tRUU1QU1NQWVVNUlVRVltOS1RcXlxTU1ZWXlxUUlZVVFVQVFVPUE5W
-W1VXVlVQVFRbXV9aWFVSWlpTVVdVT1daUFJVWVlSWFtaV1hVWFhSWF5bXFhjZ2Fj
-Y2VlWlVdW15gYWNnY2hmYWBkZ2llXGpoZWNkZG1kXmZdYl9dZmBbWWNjZmRhXV5Z
-WlpbX15fZ1ljZl9fYFddZlpghMPT3OPm6evr7e3v71pjW11dW1pfZ19bUlJXXF5e
-aWVqXV5nWlpmZWNdZmdhZV9eYWNfY2NgYmFgYV9UUk5MU0pOTk9UUVRMUEtJTklG
-Tk1UV1VMS0pMT1JSXF5ZW15XVltaXGBbYGVfYWBdXV5cXVpcXFlYVFlaX11bYVxb
-XWldWV5cVVRcXFdZVlZZW1xaZFlNTldTTk9RUlhYUVJRVFtSV1dUUk5RUlVXWlhb
-WFRaWlhSUU5SWFZPTU9PU0tNS1FRUE9LTElMTU1OT0pKTkdJTFFLT0hNVU1MUVJO
-T09SUE5JSk9UU1JRRk1UUVhTU1dTU1lWWlZaVVJXWlJWV1RSVElRUktKRkdOUFNM
-TT9FUVJST05PTU5NSkxRUExNTVNPSFBPVFZWTUxUVFZgT1BRSFBQTU9TUlFZUk9T
-VEpLU1leV1NSU0xLTk1GS01JS0tQV1dTUk9GS0tRVFVRUEpQTUtITE5ST0tLS0NI
-VFFWW11WVFVRVlRVUldVWVtZUklRUFRWUVJUUExJTFJVTU1SUFVSUFVYWVlSU1FN
-S1BSU1JSUU5OTFRYVFRNTUxQUk1OUlJRTEtERkpLSUtJS0xSSk1GRU5JUFNPS1BP
-TEpNS05OUUxJTlJRWVdbUVFSUFBNR0xOVEpLT1ROS0hHSEpMSEhNS1BLSktJSUpL
-TE5KTEk+RUdJSUtKSkVLSUhGQUNFRklKSUdLTElGS0hKSlBUUlBOT09QUlBMUlFN
-Sk5HRERFREFITklKSEZHSE1JRT1FQURLSUdGTUhJSE9QS0ZAP0JCQlFISEpNTUtL
-RT9NSUdSUU5OTVNTUEtFUExNU05RTlBQTUxOS01MTVBTTkhRUUxLTVNSU1FQSU1Q
-UlNPVEtOVFFOVU5MSk9PTEhQUlJIR09TU09MTU5QUk5OTVdRTU1GR01OT1BPVUlK
-SUxJSExJTVJNSkxMTVJSTE1OWU9OV1tYWVNWV1dVWlpTUlZXTVRUVFFPVVZNTVBP
-UldTWVZQTUtWWFtdV1hQVF5XWllZU1ZSWVpYWldWUVJUVFVOTlVUU09XVkxHSEtV
-VFhYW09QUEtOUE9ZVFBTUVFWWlxaVElCPTxJUk5OWFpZXVpVWFVXV05QVFZXSkhS
-Uk9HS0pMTVdWUkxRUlJVU1NVU1BSWVZQUk1QTlBRUVxZV1dWUVFXTlFSVFRPUVRV
-UE9TTk1PUE1NUVpXWFdTUVVSVVVVWFpZU1BRV1dWVVZXWlVTV1JSTUtMTk5OUE1P
-VVVNS1BbUU5NTk5QT1JSUFRWWlJRUVBVV1ZYV1RMRD9CS09LTVBQVE5OTFFORk1K
-Rz5DQ0ZFRUlJSU5NTUtGR01JS0hLSkpMTU9MT1RTUU1QVFlUUk9QVlVUUlRaXFdP
-VVVPUVRPVmBZVllXU1tcUlBVVFJVT09XWFNUWVxcW15dYmBVVFdTUVRUWVZZWFdY
-UV1ZUFZdVlFXUFRTY11kW1taYGJfW1xVWV1aW1xZVldbYFpeYF9bX2ZhZGNbYGpz
-aV5jZWZlY2BfZGVlaWZqYWlmYVxgXF1WXWNgZGlpaGNeYV5bWFttX2edxNPc4+bp
-6+vu7e7uZVtdWVtiYlxeZF1SX1xbV2RdYGVhWGdeX2BkX1pYXV5gamFmZm1rcG1l
-WmNhYFlXWFJXUlFSVVdOWFdOU1RTTklTVFVWUU1PSktOUlZVVVZZXF1aWWBfYFlW
-W2RkY11iXVteXltaWVdeYFtXV1hYWl1aWltaWVZWWFdcXl1SWV1dUlNhXFdZV1ZQ
-VFNOVFFUWllUVlhdV1RQU1JVVVNQU1ddVldWVU9PTlVWUVFMUVBMRkVKSU5KTk9H
-S0tOUkxMTEhIS0hLUlRHR0tKS01RUFJXUk1QT0dRWE9QVFhYU1laWFdWWVpSUlNV
-Uk9aV1VWV1VXWFJOVVFIRkdDSEZQT0hGSURNUlVQUE1RTENDUFZcVlJXU1VMVFJP
-WFxQUFdaW1hNR05XVldVUE9QUlhYSVJWUE9TV1tbVVJYXFZLR09OUUxSTVFRVFFN
-SktOTVBOUFRPT1BRTVBLTk5LS0pKR0tSVlFTWFJXU1VVXFpaUFJSV05RUFNVWFxb
-V05TVlNNUVNUVlRTUUxVW1RUU1ZWU05MVFJSVVROUFVRVVdWU1VWU1RTTUtTUk1L
-UE9LTU5VUFJSUlFQSVBKUFRSVVBTSVBSTVBQRktPUE9RV1dYUE1UXFZYUk9JSUtS
-UExMSE9PUFZXUVZSSExSVU5MSUxHQ0pLSElJUE9MR0RGTElFSktLTUM9Q0RESUZC
-RUZKRUdGRUhTU01OUE1RT0xLSEZJTkxKRk1QTFFFS0tOSUlKS0xER0VFREtGQkVH
-SUpKSElESklKQz5KRURISkxKTUtRUVFOSktQUktJTkhOUFFMSUpKUFFPTU1LTk9T
-UUlMTUlMSEVITlJOTkxLVFZTUEpLUFVRUktMUk9PVVNRUlBKT1FRTkpMTk1ITk1J
-SkpLUU9OSFBOTVBNTUlPVE1ZU1FTU1BNUExIR1JKS0hMSUtPT1JWTEtLUk9WWVdY
-XVxcW1lUTVdbWllVV1dSVE1NUUpPTk5QVlRWV1hJQ1BcWVZPUlpZWVxeVFVZU1lU
-VlZTVlBSUlRWT1FOUFVaWFZMSUpOT01TUFhXW1VRU1NZX1tVUFZYV1xaV1tVU0E8
-PENEQ09UWVlUVFhVVlZTW1NUVFVUT1BNSkxSU05PVFBRXFdVVlZWVFJNSk5SUFRZ
-V1JYVVFWVU9PVVlWVFNSUVRUV1JcWFlWUkxMUE9XVlhZV1FbU0xWWlVVVlFfWExZ
-WVFQVFRWU09TU1NMTVFPUVBMSVBTUFdVUFFVVlNUS1FRS1BQT1RUVFhVWVJRT09V
-XlVSVUxGPTlCRk5PTk5RSkxGTk5NUkpIRktJREZITklNUUxMTk1RT1NOTk5NTUlM
-TEpMS1FPTlRRTklSUltcVlZVT1BTU05NTlVUVVJRTVFZVlZbWFpXWFRSUVRVXFZS
-WFheWWNfXFpVXF1aWE9RUldXVVpdW1hVWF5XVlVUXlleWVJZYWdfWFtja2NeV1la
-WFVVVVdWXmBhYmhfZ2RmYWRhZmdlbWllaGtrYVZdZ11iY2pqZF5haG9tZl5caV9e
-YGJdVl1aVVVbWmJkXWBiiK7H1d3i5unr6+3u7u5sZmRjWVdYV2BhWVJWW19kYWpn
-cGZcXl9gYF9hX2FdYmdhZGRqZmRtaWlXW19gY15VT1RWUFVRUVFOSk1SWFRWTU1V
-U1JOUFNWUVFSWFxcWlhhWFhWW1tYVlxdX2JiZGJpZWJgX1tVXnFmYWljZGdlVlRf
-YFpfXllZV1tfWmFYX2FiYV5cV09ZXFZXWFVSVFNXV1ZVVFVVUltbVlhWWU5QUE5R
-VFpZUlJNTE9QT1pNT0xMUExOS0lKUExPVk5MSEhMUFFNS0lKQktOTVNTUlNWWFRX
-VllOUVFTVVBUVVpXUlJLTlNZWlVYUVVRTlJSWF1WVlFPTlNTUUtQRUhLSkhNTERF
-REZOT01RR1VTUElMWldXU1lWU05LUlVWVk9VVlRYUUlGUldYX11VWFNTU09TTE1N
-TlFSVVZVVFxaVU5LTkxOTFNLTFdST05LR0tOVEtPWlhRVlFSTkpUUFFNUU1MT09V
-Vk9PTlNXWVteXlteUlNSVlNUUVFZXVVTU1JUVVJQT2BcXFVUVFtfV1BYW1dRU1Fa
-UFJYUVJSW1NQUlZaWVRWU05XUk1QVVdRUFNSUVtUU1RQVE9LUFBTTkhMSU1WTlNZ
-U1JRUlFWYmZeXVdNTVBXWVdWVE1KS05PTVFQT1JQT1ZNUEtKTVBMTUtNSklDSEhE
-REJGS0xKS0NIRktIREVHQD0/QkVHSUpKVk1NUElNR0pSUkpNVVpQTUtRTU9KSEVI
-S0xOTk9MTE1PTU1IRUhFSE1OSUVFR0VITFBMSktLTE1FR0ZHS0lFT09QVWhUT1BS
-U1FNSUtKSktJU1FORUZNSEpPT1BNTlBLTVNUS1JRUlRRUlBQSk5NVFNWUE5TWFVV
-TUtNUFJQUFhYW1VPUFBPTktNSUhLSEtMU1BSU05KR0pKUVRRUU9SUFZXTFFUUVVO
-TU9LQ0hRUk1LSlBSTUtITVNOUExSW1BYVFVRUVFSU1NSVVVbVFBWVVJTUE9KVFJS
-U1RWU1RTVFlSV11ZUE1VXl5aWVROV1VTUVpaXFhQVFFOVFpXVFRTVFdXU1NVVVNY
-U1tbVlNWV1dUVFFUWFlVVFdZVVFPSDk6QUhFS09bW1VTVVNVVVFUVFFTUVRQTExO
-T09SUVFQUk1SVVdQT1NWTFZVUFJPWFhQVFZcW1VOT1ZYVFZWT1VQVlpUVVlWU1NS
-U09UU1FTVlVVVFdeWlZWVlZNTE5UUVlVUFFQVlpSUFNLSU9ST1JUWk9PS1FTUElL
-WVhYVFJTTktFTk1WWFJLUFVVU1BSUlVUVlNMT0xLQkZITlFQUlRSS0xVUktJRUNH
-TE5LRkdGSk1NT0tNV01QTk1KVFdUTEtTTVBPRkpKTEhKUk9MUU9OT1FPT1BOS0tV
-VVpXXllOTFFVVFhaW1tXV1dVVVtdWFhYU1VdYVxdYF9ZWVpWWVpdW1tfV1leWVpd
-X1tUVVVeV1ZSWl9nYWBVVF9iWlNdV1ldYFlXYVlXXVplal9jZV1gZGZuZGlsaWhm
-YmZpYWVoZ2BfYGNsaWVjZ2lmYFpiaWJjYmNlW19cW19ZW2VoXWePssfU3OPm6Ovs
-7e3v7mNpY2BfXVxbX2BdYGdcYmxjYmdkX1laYWJhXmhjZ21kY2VsY2NmYWVkaGZi
-ZmBcYFlTU1ZSUE5NTEtOUlJXXlxUUE9WUFFQW1VXV19hYV1dXVliZGZcXGRmXV1i
-YGRgXVtaWl5dW19mYmRfYWZgY1xYW11hX1xaVVhVWlhgZmFYXFhXW1pQUVNWU1ZV
-U1dPUVJXXl5jX19YUFNWXF9aWFFYWlZYWF1cWVZVTk1UVFFbWVhQUEdPUU5MUVNT
-VlNVVFJSU1NcTUlJRU5OTVhZUkxNTltUUFRRUktQUFVbV2JaUFRYW11fVFFQW1pT
-WVRRXlxZVlVRVVRXV01MS01MSU1MREJCQ0lLTVRbVFJPWVBPT1FPVFFQUk9TWFVP
-U1hTVVNRUVRVV1dZUlRVV1BRUlNOTFBXVlRUVVNWUlNYTkpNVVhRV1FST1NTU1JU
-Q0hMTU5PXWJYWlJUTVJUVFVUUU5LUFNQVFdVUFNaV1NZV1dVVlFUUFNRU1VUVU5S
-Vk5TVlRTW1xaVlFSVFhWWlNSWFVXYllaXVpeY1ZXVVFYVlVRUFBSU1RXWFRSU09O
-TFJZUVNWV1tTTVVSTU9QR0lNVlZYUlZOSlRcW1tYVVlYVktMU1VYU1tUUFFUT1RW
-T1FOT1RSUVNPUFJNT0xQVU5MS0tEQEFEQ0NIR05MR0lNTUdEQ0U9Ojs+RUdLS01M
-S0xUTk1JS0xNUlZVU1RUUE1RUkxPS0xOTkpNSkpNRktNTU1LUEpKSkRGR0dKSU5L
-S0pKSUpNSk9KSk5PTVBNSk5LS0pTUEtKTVBMTk9QS1RLTVFNR0lGS1NQUFFOU1JS
-Uk1QVFlXUktLR0ZNU0tOUFNTT0pRWFNPTk5LTFFYV1ZYWFRQUk5OVE5KS0xHSkxQ
-UUxSUktFQ0xTV1NVVVBSUk9MUE9ST1BOTUtPTFJTS0xKSE1OTkxVWU9OUVVUTUpN
-S0tPUU1MVVFSUFlUU1RSVVFTS0hOVldOU1ZlZFhZVlJbYFpOT1ZgVlhZXV5XW1xi
-W1ZcWVdYTU1YVldZVVZVU1VeXVBNUE5PVFdTVVNOUVJRUlVZVVZXVVVXU01NQjw/
-RExPT1dYYFlYWFZbWFZYV1NTTFBSU1JTUFJUTFZRTU5UU1FTUVRUUk9SVldRUk1N
-VFJXWlpRT1BMWlhVTlJUWVtXUVJTTlRUU1JVV1xXV1ZXVVdiY1xaVVRPTlRXVFRU
-T1BXWV1YVFRWUlBMTEtNT1JRTFRUS1BTVFVYVFRNTFxWVFhUVVVYVlpYWF5cWllS
-TFBNSExLSVFWTUxKT1ZVTk1PTklHRkpOUExDQ0dJQ0ZHS1BPT1FPSUpRV1ZNSE9Q
-SlJOSk1GSkpMUU5QVlxUTVBXU1JOUE9QTUtNUVhSUFFQVFZaWmFTUU9QXVlVVVRV
-Vl5jZVxZXmBgYF9bWlZeY2RiV1VXWV9aWVpPU1VcXVxdWWBeXFVaXF9iZV1XW1tb
-Yl1dXFZfY1lgVVhkX2ZmZmJla3FjamliY2hoYV1fYmReZGlxbWpkaGpaXl1gZGJq
-cmtfXF1fY2JiX2FjXpayxdPb4uXo6uzt7u7vZGVtaGdoYVxeWlxgZmFuaWlnWlxk
-U1pwWFdeXmhrbWpuZl1jZWpqYmllYGBgYGJmY1dXVExLTldcUFVTU1dXVF1WVFNT
-WVldXVtgXF5aVVtfXWBfY2BdYmNgXl9dZGZbZFpYXF1dYGdlYmBdY2FeY2VmYVlg
-W1peYWVZXFtdY2FeW1pYYlpUT1JYVlVWX1ZWWVlVWVpZVlNWVVNUX19XVVBTV2Jc
-XF1fWllWVlxVXV5kX1dVUFFXXVFVUVRYW11fVlVVUE5RU1BSUlBISVZRTEtTSU5R
-U1VWVFBOUVJeWllVWF1cX1pbWF9gWF1XXl5ZXVpXWFpYU1VWTktMT0hMTUtMUlRS
-UU5MVlhPTk1ZYF5UVlRaT0xRVFhXW1hPVVRZXVlXW1ZSU1BRU1JWWFdbXlpSWF9a
-VFlSVU9SUFNdVVFUVVFRUVRXU1BQUFFLUVBTWFtiXl5hW1VQUFtWUFBVVFBPUFNV
-WFpRWVpaW1hYWFRVVFNOS1JRUFBaWFJYVFJRUFNZVVhVVEtTU1RXWVhWVVZcXmJc
-VVlfXVRZV1xYV1VWUFFVUVNTTkxSV1NRUU1UVFZXVVNVUVNOUE5LTlVdWU1JT1NT
-WVNYVU9TV1pYUlJWWFVWUE9RUVJTW1hOT1RPWlFVUVdSTlBRTU1KT1BVTUhDR0VF
-Q0VBTEpQTU5IREdNRkBDR0dIR0lHSkxNSlBOTkpCS0xQVFVSTU9OUlJNTktPT09R
-T0tLSVFPSU1KTk9NSUtFREdFRElKS0pLSktJR0xLTk1SUFFTUVBIS0xPTlBLS01M
-S0pOT1JVV1VHUEtFSUxOVVJPVVFQUFdST1VXTlJUV09NUU5SVVFUUVdWUlFRUVFS
-UU5LVFVaUlNYWU9NTU5UUlJOS1NMUVJVV1JTVVFWW1hTU1RRUFZTVFFUUk1JUFNS
-WFRPU1JRUk5HTlRRUVJTVE9RV1NPSEBUWE5GR1VXVVZXUUxVVVNYVExGUlVPUFhd
-YVlUVFBVV1dZV1NYWVxgWlpeXFtaWmFhX1tYVlhdWlJTVVZSUlhWU1BSU1ddVlZW
-V1FUTlVZVVFVW1ZVU1ZUUlZVU1dLQj1CTU9UW11cYV1bWltVWV9bUk9QUllTUVJQ
-TU9UVFZRT1hUVlFTUVNKTE9TW1RQUVBTU1hYW1hXUlNcV0xLUFVZUVlaVE5UUVZa
-WVdXW1xbXFlbVllcX1JWV1dUVldbWlRUVldUVVJSWVxWWFBSS0xNTVJWVFNQT0tT
-VVVUUV5WWl5YWl5WWlNXVllgXFhbVFBSU1RMS09HSlBQTVRNUFBTUFJMUFBMTE5Q
-SkdGQ0FDREVESkpMR0lKS0pJT0pKR01TUUtQT09SS01OVlRQVFVZXVBQUFVUV1RW
-Vk5KT1JTUE1WWVZYVVFSU1ZVV1VSVVdaXF1YWltaYV9mYV9hY11bWl1dVlFRU1ti
-YFVVWF5fW1hWU2BhW1RWV2BaYWRcWVNbXmVZU1ZoZVxcXV9dYllYZmZoa2RfZWNi
-XVtgWVxfYF5jYWVlXmZhWWFiXFxganJnXWtiXmBcZmBgYmV/p7TC0dng5efq6+zt
-7u5eY2JeZGdmXVpZX2VgXWloaGZlV1NhXVxUU1ZUXmBzb2hlYVxeaGhxbWplY2Fe
-ZGVhWVFRS01UWVVSWFVPXWBeWFtgV2NgY15bXl5VYGliXltgZGNea2heYmhlZWFh
-ZWhaXltVYFxgZmNaU1hcW1lcX2ldXV9nW1tkXFpgX1RZW1liX1pTWFxUWFVRUFZS
-U1dTWFRUWFtVVVRYWlZWWlxgXFtZV09XV15hW1lkXV5aW2FfXlleWlVQUVNUVVhS
-WFZYUFFUWUtPVFZJSEZPT1NJUVlVWVNVXGJdVE1PUlNXW2FeW1daWF1bYWlkZmFY
-XV5dWlpXZlxXVllMSkpKSUZLU1RTW1pgU1BRUE9QTVZcXFhWXFxbTVFQW1RZXFFN
-WFtaXFRVVFBSV1hXXFpaVVZUVFhSU1ZXU1lSTE5OVFRWU11dV09JVFZWTlBUUVdS
-UVFXWltUVFldXFZYXlJWWFZXW1RTWVZbVVZTXFNYXF1YW1JQUlZVW1dXWVVTUlBN
-T1dVUVFVV1ZNUFBMT1pbWlthYWFiXlpaWVdTUl5XVFtcYltaVFZTVFBRT01TWlRS
-UFdUVFVRU1JVV1NVVFJRUlNWVFJNT1VRVlJOTFJTVldUVVhSWFxPVFBVUk5aWlJQ
-UlRTUlBVVUxRUk9UTEtPU1BMT0pKSUxMSU5KTFFITEtMREdLSkdKRExLRkpSU0pK
-TlJOTU9LS1VVVldPTVBKTFBQTk1PUFFPSElLTE1NTElMT0pNRkdKSEVLTEpPUU5I
-TFBLRk5LT1FSVU9MR01ESE1MTUtJTUpPS0ZNSUtPSktOT05SUEpMUlVVVVVUVlVT
-V1lWU1JPSEhKVVpWTE1OVllYUE9TUU9SVFdRWVZUVlpZUFRZVFNSS1VZVVVaUVtW
-V1hXVFdZVl5cYVNRVldbWVdZU1FVVFVRW1pSSU9bVE1OUlVQT09RVVhdWVJNTVRZ
-WVVWTlJOUVBTT01SVlJVWE9RS1FSVl1ZXFhUUFdQVVlfX1dcX19hXF1hXF1iZ2Bg
-X1xbUlVjXVlTUlZVWFxUT1hZW1FSWl5eVE1QV1lSWl5aWltZVFtXWFNTVmBVS0xP
-VFhZWV1aWGJYVllfXVJRSU1QS1RQTkpNWV9TUFldVFlWTE9RTlBRUVBQTlVSUlJR
-WV5XUlhaWVxZVFBPXlhQU1NTUVZQUVZVWFlaVFNcV1VTVVdWV1hTV05PUllcWlhc
-WVBaW19bXVdTUVNTUFRRV1ZRV01PUUpRVVlZVlFSWVpYUllXU1VaZF5WUldTXE9S
-Vl1SVlBJTU1bVVRKS09STktJSk5PVVBNSkdAREVIRkdIRUVKSU1LS0pITEtIU0xO
-TE1ITE5WVk1ST1NSWVlbU0ROUlNTS0tRVlBJT1JRUFBRUFBVXFVQUlFQTU5YV1VW
-WFhZWVlZWVpdXl9cWVxkZl9YZVlcZGBhXVVWXFlaXlVOVVZXT1BUV2JcWFNUT1FQ
-UlpYV1lebF5nWVVgZV1YYGJgZ2ZhZGJmYmFZXWFYVVddXltYYF9YYWRfZnJnVVdV
-ZmNbYGl2aV51l6WorL7N19/k6Onq7e7u7l1bXmlkbGhiXVluZGxsY2FjZWBXX2Jo
-X1ZbZWNhXGVmYGZpbWllZmRmZmxjZmZnYFlaXldZUVVYWF1hYGBdX15gXVlUYF1l
-Z2FaYWNjb2NiaGJhYF5nYWBcYGZoXFpaZGNiYF5cZmhfZWdfZFxla2NoYF5fYGBo
-X11aXGBlaV5gYFpbW1deX2RhW1RTXFpaUlNRVFJVU1dhWFVUVE1aVFdbVllTVFhX
-VFhZVlxfWlJTVVdYVVpcUlBXU1ZfXlBTU1FTWVZVWVlWWFdTTVNTWk9UVE9YXWFX
-UlhYVVpQU1VYYVlXWFlfVlNaVlhbX2JcXVpbYl5dXF1UVVFSTlJYV15VVFdXVldW
-V1NJT1RTVlpXWVhUWVlWU1JWWlVZW1tZV1ZTVVNQT05UVFJcU1JSUVVVVlVVU1dU
-UE5NUVdZU1tUVExRT1xQU1JVUk5RVFJSVVVXXllcXltbW1RcV1dWVVhbWVZTVFBR
-U1VSVFdUXlhVWVZVU1FOU1hZU1JOUU5SVFFYVFlWU0pLUE9NT11mX2JfWFxcXGNd
-WlpZXFpbXF1hYFhYVlZOS01QT0xUUE5PVlhYV1RaV1VXXFlYWVpYV1FUV0lLU1ZW
-WVdZV1NUUVZOXF5XUVVVV1NYWVFNS1RRUVBNTU9KSVFPTUxITFBOTE1NTkpNTk9L
-Tk9PSk9IT1FJS1BOT0lKU05JTkxSUVBSUEtNTlNUUVVSUFNOTlBRUVFRV09QTVNK
-Sk1QUE9OSEZHS0pGS0dJTkpHTVNTUVBMSUxLUE9NS0pRTExHRD5BS0pJUUpKSUxG
-RkhJSEhQUU9OV1hXSkxTUk1PT1FQUFRVVFJSUE9LTE5TVlJQTFJPVFVYVE5OUEtK
-TUlKTk9VVFVQUldWVk9PVVRQUlBVWVxdVVlZW1ZYXWRdWFNbVldbVFZWVVNXYWNZ
-UkpKTVNTUU5TUlJWU1JWW1ldWFFTV1VPVVhWVVVOVFhcVVdUSU9bX1dOVE5RU1Bc
-YFhbWFhUW15bX1daZmRmW1NUV11lZV1YWlNZXl1dYF1dW1lcU1pWUFVSUFZYV1ZT
-UFVYT1pbVldZV1NaW11fXVVaWVxVVFdeWlpXWl1YT1ddXVhVUE1NV1pWUFBSVFtf
-WlBTVFleV1FSUU5cWVBNUVBOUVJUUFRXXldWWV1YWltXVlBSU1VdV1JUWF1aV1JL
-UFhVW1xVVVVXWE5OV1RRUFZUU1ddWVZSTlZZYlpeXltVWVlWVFZWU1BWUUxOUlFR
-WV1YWWlUUVlJT1FNU1hbWlhVUlFNUFdYV1RRUUpKSlRWU1BQTE1RUk5OS05OUVNO
-RkhISEtCPjxARUlESUtMWEtNSUlKSE1QUUhJT1NNT0xQVVJaT0xNREdPTk9QTVRL
-TkpMTVBWU05OTk1VUE1RUE1UV1BPUFFVUldYW11WWFtdWFZbXlxhX1haV1xaU11a
-XVJXV1FSUkxNS01QU1BVU1BXWV1TUEVMVVBfTFRjXFpaXlxoY15fXlRaW1xhY2dv
-ZVJWWV9XVF1iVExTXVhZXGRhVV5XVl9jYFtlcVpohKSfmJuqvc3Y3+Pn6evt7u7u
-Z2RmYmxjYmhdYWNja2tgVV1pW2BmaGFdZWJoZGJpbmVlXV5oaGphYWloZmlqaGJg
-WGBjVlpYWVhZYmRlZVpiYmdiVl5oZ2hfYV5jZ2NjY2NqZ19dVVxtZ2NhZWRpXmFj
-YV1ZWVxkYWJfY11maWZpY2ZjaWhhW2FmY2VfXl1dXlZdY2BkaGNhWVpcX1VTWFpX
-VVxUUFFUU1dcXVlRVVddWF1PTlVYWl5ZX19ZW1daYVpXVVNXX2BjXVVVVlZPVVNS
-VlZRVVhfW11bW1pWUVFQV1NUV1ZUV1RbWFNZV1VUTlhYVVFbVFdYVFJPVF1fWlpW
-X1dbVltSVlFRV1ZYVVhVTU5TWlVXUVlYUFNPU1dRU1dbXVRSUExMUlVZT1BaXWJc
-UlJUVlZVUkZWXFlcWFZQUE5OTlJWXU9TWE9RTk5PWVRSW09PV1dVWFtVUFdZWVZY
-U1ZYWVZcYlthY1lXVldWVFdTWlhZUFRWVVRVV1ZZWFJRWVpXT1FSUldZXlhOU1ZZ
-W1ZVT1BUUk5LT1NcWVdXWFpcW1lWWlxTWFlVW1xWXF9bWFVaWlNUUktQVVpWUEtQ
-VldaWVRUUlpfW11aWVRYU05ORkxcVlhaWFlUUU5UWldYWFpTUFhNS1FRUk9OUFJM
-S1BOSkVKTU1RV1hTT01LSUtPTk5SUlBPTk5VVVFNUU5NT1NUTUlNUFBUS1BWUE9P
-UFBOU1pcWFFPT1BZVFBTUkxJSU5SVFBSUlRMT0xHQUZKT0xCQkRDRUVNT0xMSklM
-SUlOT1BOUEhKTERCREJFSUpIT09LS0dJSEpKSU5MUU1PU1lWTkxLS0xLS05NUExU
-VFBOR0tTU1FUU1ZYTVVXVVJUUlRUTk1OVlFPT1dcSUpRUUlKVFNWVUxQV1pYU1pl
-WlhcVllcWFJUVFVVW1hbWFhTVVlcX1RVTkpOTE9XVExOUlhZWFRWV1lWTFhjYGVY
-W1RYVU9OVlZbWVlcW11dWFROU1hPSk9VXGVeVlNWVl1gWVFWXl9TU1dXWl1jX11a
-WFxhXVZbYVxcXV1VUVRWUFdcXllZU1dXWVhbVVpVV1RVVVVaV1laU1dcaGFYVlZW
-WFhbTFFWWV9cYVpVUFlaXl1VUlRRVFhSTlBOUVRUU1lWUFVXVVRSU1JVU1BVV1dW
-UVVUVlNVUlpVU1BXWFFKUVNXWllXUEZNTlhXV1JXWU5LS1BQUE5PV1FTRk1UWE9N
-RExWUFtXUVpYVlFOT1FOUVVOVE9GUlRYVGBOUk5QU0lMUk9PT1RSS1BGSlJTV1JO
-U01SVEpGTlJQS0ZPUU1NTUxMTkpOT0dIQkRFREVDOz89RklIRUpLTUlJR0ZITExO
-SkdHUU1PTEhSVlNJS0tMUVJQUlBSTUtES0pKSklQTlBOUFdRTE1NTlNSTU1KTU5M
-TlFWVFFRUlRdX1ZVXFtQTVVUR01PSEtKSk1OS0xKSUxJR0tRUU5OS01TUFRWS01R
-TUtMUF5TT1hbV1hWVlFPU1BUVVdiZWNmVFpjZ11cXVpRT1JIS0pEUlBMT0dMWVZU
-X2ZubIebpJB/jqzAztff4+fq6+3t7+9mZmljX2ZjY15nZWJiZ2hfYWRlamVeXVlm
-a2lhZ2RmYl9fXFhiZ2duZ2plZ21hVGNhaGReY11kYF1hZmxqZWNdY2JlaGVfYl9g
-Y21tYGBnZF9iZmFgV1tiY2ptZmRqZmRcWVleXl1mZGhhYGdkbWtiYmJgYGFfX2Nk
-YmBZWF5cX2BZWlxcWWJpZlxcYFleY2BXV15dVldRUlVWXVlWVVNTUlJXW1tSVV5g
-WlZbXGBbVlNbVFRXVVxfWlJNUlJPTkhLVltWWVxaWlVXV1hRU1VeX1paW1lWYVxT
-VFpUUFdbW15cWVFcVk9ZS0tNWFdcWltYV1tgVVdhU1VYWVRWWFRYVFNaX1haWVxU
-VFVYWVtcXlxZVlJRUFFUVlNOV1RXWVdYWVhWW1NRUFNeXFZQWl1RVk1QVltSV1JS
-U09QT05XXlhYWFlZWlRWVVZUU1NQU1daW1tYXFVWVldcWlJXV1RTV1xcWVtUVlhU
-VFdbVVVPT1BVV1NZV1xeX1pZWlhdXVJYV1dXV1ZRT1VdVVBaWl5bW1dXV1xbXFhf
-XV5bWldYXVpbVFVTUlRSWFJVT0tLTFBTVlRWVlNQT1ZSU1diVlNRTUxNUVRXW1hc
-YFpWVlleXVlZV1ZWWVdZWFlWWVlNTktOUkxJR0tSUU9PUlNUT09RUFBVUlVRUFhS
-UFJRU09OUFFMT1BQS0tMUE5MTlRST1NWU05PUlRUVFVQVlVRUlZST05PUU9QU1BP
-TVJMSEZFTkpHSkVJTUpMSU5OTEhLUUpGSk1MT05LR0hISkZHTEdCTVBKT1BKR01N
-UVFSSEhMVk5WV1ZRT1FSU1JLR0lJS1JOUU5LUFNTVFBSVFdTVV9SV1BPVE9PUFJS
-WVdXVVRSVFZQUE5SU1FWSEpRV1hbW1tbXFNVXmBgYFxVWFNQVFdVVFZWWVtVUlZX
-UFNRVFZTS1FZVFJRVFhTWFFPVFxgXl1cUlVZWVJeXFtcW15cV1hbYF9YWFFUVFFT
-VFdTUlNdWlRYV1RYVU9UWVhTWFVZWFxcWl9VWFhbWFxbVldUVlVZVlVSVlVXXWFb
-VFRWVFNPTlBOWFxYU1JRU11aV1RWVldYVlRTV1FTT1VbWFFTWFVRU1lZUUlNTkpK
-S1NRUFBOT09RTlNSWldXWlNSUlFQT0xMVFFGTlRSS1JUTk5VVFNPVFNPVVVMP0FI
-SFFOVE9NT1RDUE9MTFFMSUtIRkZHSkpGRVlORkNHT1FHR05NTkxTU0xMSktLTFJP
-SUVHRkhFQ0ZJSUpOTVBRWkpLTE1OS0pOSUlKTUhJR0hHRktNT0pHQ0VDPzYzOjo5
-OkNGREc8NkREO0BEPDxPSElIQUNOUkVKS0dGR0dMSUlKTE9OTUxDQUBIR0RERUBC
-RkJAPklOUE1LTEdCSUdGS01HRUtGRklHSUtOUUxSS09TX2piVFRZTUhERkU9Qz1C
-RkROQExQRT5JTkc/SkpHRFBKRkdGR0dGQUFHTUdNUF9mVFVOR0xRUUpKUFpcWmZ0
-boB3aXqCenhPRT1BQEA/Pz5BPz9DSkpPb4KEeoSKgo6Yrb/N19/j5+nr7e7u72Rn
-ZWZjWl1gX2ZiZGRlYlxiaGlhYWJaWltkbGxqZFtbXFxdYGBmb2hmZWltZVlcVmNx
-b2dkYWNpYV9gZmltaWJgX2NvcmthaWZhaWFgZGFgW2RpbWlkYF9jZWhoaGdrbWdi
-XmBjYmdoZ2NgYWZjY11eY2VjZWJjZmVnZGBcV1lZXltWXVxcYV1mW1ZbXllXVFZd
-YVxcXlZTVlZVW19cXlhcXF5ZWFRVXFhWUl1cXmBZW1hZV1RYXltTWFZWUlRPVE1M
-UVVfXlpVUFRSVk9RV11aXlteXFNXWlVbVVtbYFtZWFVZXFxaVVVVTVNjXFtaW1Va
-Zl9fW1hXU1JRUlpbYF5bUFZcX1pWWVdTVVhVWVtmXFJVVlpUUFhWWVZYWVhaVllc
-Xl1XT1JdV1ZVWF9dW19YWlRbXFVWTk9RUlZTUVdYWVVXU1lQVVlVVl1UVFNaWl9c
-WVxdVVFRUVVWWVFdXVhaYGBcYFlbWldQV1RaVFJQU1ZbU1RVWVZcX15WVVpaWlRW
-WFFUV1VSWl1UTlhTXWRbVlNZXGBeXFpaV1ZUXmFgYFxRUlRWVFlfXFNOTE1KUVNZ
-W1dVTlRRUVROV1VRT1JOUVdXVVlYWF5cVFhUXl9cWV1cV1haXF1ZV1RUVVVOSUpS
-Uk1ST0xMTUpKT09QUU5SUlJRU09QTU5OTk5QUExPTExPTU9XTk1TWExPTlJQVVtV
-UVNQTFFWTlBYW1ZVWlZPUFFRUlJUT0xKSk9JSUpERkdMSUtUUEpNTEhLUVBPVFBO
-UlBTT05KSk1MS0RNTklKUE1LTk1KS0xKTU1MRFBXVVJTWVZTWVRPUU5SU0xLSlBM
-TU9NUFFRTE1NUVFOWWFVUUxTVVZWWVhZV1dZTlFUWFJQUU5NU1BXVVFXWFtZWVtW
-UVNRXWBaV1VUU1JSVlJTU1NTVVxYVVFWVVRaW2ViZF9YU1BbYllZVF1UV1lQUU9U
-UllbWlhdXVteX11iVF9eZ2JZWU9QU1taV09JVlpaXFdZVFZNWFhbVUtSUldXVVda
-X1pUV1xeXltTVlFOUlpaVk1LUFJbY11VV1NSUVFSV1JQWllOVFFWW1NNUVhPU1VS
-SlVVSk1NTlJQVFVPT0xPVE5PR0lOUUlKUVNNSkpNSEhMVVBQT0xNS0ZFTk1GR0ZQ
-SkBDSktDS0ZCPUNISkhLTEJFQkJDOzlBPUdLTkhMU0NFS0dERkdBRU1DQEI+PT0/
-Oz1AOj5CQD9LQ0hOSkdDTkhGSj9FP0Y8PEM+QkJBQTY2O0E+QUlGRUpOS0hHSE5H
-S01NRUJCSUhGSkxLPDc3MjMzLy4rMS45TU9EQDdBOjU1PDg+QkdCRUNFTF5rVE9W
-QTg3PTs5OD9ERkRDPTo5OTo+Pjc3QD46Nzo8TkpISERLTjw4QEFGQ0A9SVBFPkFH
-SUlJSExOUlteXGFSUVZXRUNCQkNFQUNDP0ZHVFpHQkFERkNERklHSUdES0lRSERD
-SERDQ0RKXVdNUE1HSFFTS01MUk5MUlpWX1ZPWFtdYk5IR0dHR0hHRkVHREZHR1FZ
-XmFaY3J8j5isvcvX3+To6uzt7u/vXWN2cm9kYGlnZ3BqYl1YWV9gW1xiZWRnYmFm
-aWplYGFbX2NjY2BeY2tiY2huYV9eaGdyaWFcXmZcZWRiY2ptamdmY2VqcGpnX1xg
-ZGVgZWFsYGpmYl9ka2Zka2x0ZWNiZWpmXl1kZGJjZmJnZmpraWVgZ2RjYFxmamVg
-Z2NhXGNkaWNnY2NkXFhQWVphXlVVW15YXmNdYllZWFxcW11cYFpbYl1ZX19hYGNk
-Xl9gX2JgXFZVT1haWVReXmNiWFlWVFhZVllbWFdZWlliYVpYWl9aYV1fW1ZVVllP
-WFteYGFcX2hhX11gX15gZ19YX1lYV1ZZWVhaW1hSTlBYVlhdX1xWVVNOTElPVVlc
-WlRVVVdTU1BTV1pXV1dSV1VRVktQVFhaV1JPVlpZWFdbX1tVV1RXVVVTVk9VVFZX
-UlVTV1NSUVVbXl9cWlxTWldYVVlaV1VcZmJdWFhWVFdWU1haW1tYWVtYVFhaV2Bg
-XlZNTU5VWFlXVF1cV1pfXmBWU1JSUlRSU1VNTVVUWFlZWltZXFdRWFpYXVxaXVdV
-W1NWV11aWF5XWlpbV1pWVU9PUFNRTkxSWFRZWFhdWEtXVk1UUVJbXlVYU01UUlFT
-WmJdWVVeWVdXWFNXXFpQUldWUVJWT1BRUVFQS01NT1JNTExTV09ZVk9OU05NUU5P
-T1NRUlNFREpKUFRUVVNSVFNVVFZVUVVUVVZRVlhRTk1TVVZXU1JUT09OTFJRUlNR
-UlJTT0tNU1VVUFRSTFFQTUhKS01KSEtKTk9TUE1MTUZITVJOSUdQUE9OUE9WTVFU
-WldST1JWUk1UU1JOTE9QUlRRUE5PT1JPTUpOTlJMS09QUVBLVFVSTVBRUlNRUVRT
-UFVPUlVUVlpUTktJSk1UT1RUUUtRV1VPVVFTV1dUU1ZWUFJOUFVNSU5UV1ZQU1VW
-WFhZWl9eU1RTVVhUSklTTE1XWFRNWVpWSVVSTVJPUU1PTlBQUl1eZF5dUlNUWVpT
-VE9RU1ZRTU1XUlRNUExPVUlNSk1PTk5PVFFQVVZVV05MTE1XW2NYTkpIT1BOSVNY
-UElJRUxQTkROSkBISVNRRU5ITENFRUlCQkFER0pMTEtFQ0JCSklORUdDSEJBQUFC
-SERAOj08QEdFP0A7QkVDRERRU0dEQEFGO0VERTxHRkY5NjlASks8Qj5CQTY6OD00
-QkQ6OTpBPzk6PT48NjY4Qjo2OTQ1NjQzNjk3NTk9Ozc7Pj07PDo/Ozs5NTY4Nzg9
-Q0Y/Ozw5NDM0ODk8QkFCRUtLYEc+QkJLTUxCP0M/QERFY19PQT9AQEVCQ0NBQEhb
-eG5CQkVCPTw8PkFGSURHRUpYZVtSWFtMQ0REQ0NDRkRJSUlHR0dGSUlIRURFSkZF
-SUpKRkZISEdGRERGRUdHR0dJSUhHR0hISElGR0dHR0dHR0ZHSkxIRUZGRkdGRkZH
-RkdISEhGRkVGRkZHSEhJSlJMUEtLSktKSkpKSkpLTU5OTk1NTU9OT09XWFNVWFla
-W1taWVhZVlRTU1NWVVVVVVVWVVZWV1pbXmNpcn+TorTD0Nnh5efq7e3u7+9mX2Vs
-anNmY2JmbGlnY1dcYF5gYmpmbGlfX2dlaGloWVxgZWNhY2RdY3BnYGVZYGdjbnNs
-ZWRoY15eXV5gZmtta2ZhbW9ycmhjYl5pZ19eYmxraWdodG5laV5fYG1mZ2tpZmFj
-a2htZ2tsaGZkYWFub15bW2BfXGBhZGRlXmJmY2Voa2FgaGhmUFRYYGRmWVRZXFhe
-XVdWYV5UV1xZYl5aWVxbW15fYVhZYGVgXVddYGFfWlNRV1liX2FbWV9ZWVVXU19g
-WF5ZWFhcWWFjY2BeXVxdV1hZVFZTVVJbX1pcZV5iaWdhWFtfXWFiYF5aWl1aWV1Z
-VFVWV1RNUlZTXFtXX2BVUlJLSFJUW1RPUlVWVVxbWFVWVmBWUlhUUk9VV05RU1lX
-WFVWVFVTXl1bVldSXFZaWFRXVlpZVFlfW1ldVVFVVlthYVRSUVZVVVlWVFRYV11a
-Wl1dWFlZVVpcX1hbVVRbWV5dW2FaYGBYW1VNTVJeWVdUWFtiXl9dW1BPVE1OVFNY
-XFlaUlZZYl9dYF5bW1ZUXGBbWmFfWVVeV1ZUU1heWVNUU1VZV1NOVVZVWmBWVFdc
-XFlUV1ZYVlZWTlBSUlVcV1NRT1VYW19eU1hSV1ZaWmJbWF5gWFRVWlVYU1RXT0lL
-UUtKTlhaWFZJT0pVUFdUT0pQWFBQUEpQUU5TTUVIUFJNUFdVU1ZSVFdYUk5PT05Q
-VU9SWVRQTVFXV01STFJSUkpLT1ROUVVWVFRPS01RUlROTFRVUEVRU05NT05NR05R
-UVVRREZMS1NOTUtGR0pQUlVaUk9SUFVXVVZTUlVOUVJNSEhHS0pQTk1PTk1OU1RO
-UEhMUFJQTUxOVFVMSkxOWldcSkpGUl5SUFJQVVZRUVZXSlBQSk1VTE1JSE1PTlNQ
-TlJPWFtcVk9NPUtOTFFNVVVUWE9LS01RUlhJVFxOTVJZS0tHSEpZSU5NR0xNRERC
-S0VDSUtPRkxRT0pKU1JLTU9KTlJYUU1JREFJSEBARkdKS0NCPUNFQEVBTk9OTk5J
-TFFPSVBOTUpTTUxKTU1JREk/Nj1GSj1CPTs7PDpAPjw8NzpAQ0A8REVHQDg5QExC
-ODpARVFJPkA3Nzk4PUM5Oz1BPzs8Ojw+OD47Nzg6PEBEQjY4Q0Q/R0BDNzQ2ODY5
-QURYR0dDQkQ+Qk9hYFlNTEhDPT9APUFBPT9FSEtIR0dDQUNDRUVHQ0NDQEJDQ0RI
-R0NERENFSEFDQUBAQEFDRUZIR0VEREVDRURDQUJERURFSElFQkFBRUpMREJCREVE
-QUFAQEBCQ0RNSUVFRUZKSkpNTE1PUE9TUU9NTU1MTE1NTUxNTUxMTVJXUVFSU1RV
-VVVVVFNSUlJSUlRUVVZYVVZXV1hYVlVVVVVTU1NSU1RVV1ZXWFpaW1xbXFtaWlla
-WVpaXF1dYGNjYWFgX19hYWJkZGVlZWZnZ2loaGdlZWVmZ2hpaWpscm1tbm9vb29v
-bm5tbW1vcHFxc3R2dnZ3eHl9fICDh4qMjoyJg4iGhoaGh4iKjIuLi4qJiIeHhoeI
-i4+Unqq5xtDa3+Tn6ezt7u7v8HVfXlxoZGlkY2Nnc21rZV1VWmFcX2Voa2RhZmNg
-YGJbY19icmxtZmJmYWVdYFxdbGZiZmVaZWBjXWBiX2VsbWxlY2hjanJoZWdnYmJq
-aWNnYmhoZ2xsbG1qbWZkY2BcZWpqX2Vma2ZqaGhkY2NgYGVmZWpna2RdZ15oaWpg
-ZmllaWNkbWpiYWZwZWFeXF9eZWFYWFVRV1pdVlpcWVRXVl9aV19kWltfYmBfXmph
-VlBcX1hfYV9ZWFxgW1lTWFlVVFBQTVdcWVhZXVZaWl1eXFdTWlVYVlxWRklSW2Ne
-WVBbZWJkYF1jZl5ZWFhYWFVgXV1cYl9dWVFWVVNNU11mX1tcWWFZVlJRV1RfWE1Q
-VlNQVlpbWFVaWFJZWVlTVVNVV1RaWFdZW2BZXWNfWVhST1FYWFZTUU9UVVdVWmBd
-U1hSTldYWl5hWFNbYFFTVVpVWVhWW11bVFZbWlpZUFNbWVpaXVZUWlhZXWJeWVVW
-XltWWVhaWVhPWFtbWlhVUlFVTkxaZGFfYVFQUlhaX11ZXFhVW1dWVllYXFhYVlJS
-TVRTUVhbTFBUUFNUVU5LT1NWV1pTVVZaWllaW01VUlFXWVpZVlZcV1dSWFNYWldQ
-UVNXXl5VVFRXWllcUUxRVVVSVFFNSUJFS0ZRVldSYFJITE1QUFJQSUVNUVJQTUZM
-VFFPTk9SUk1OUk9RU1RRT1JPUVZOS1dRUFBLUk1LVFlQTk9WUE5QSkdNT1ZSTk9N
-UFBLSk5KSktOTUxSTUdHTU1JS0tJRERGRUpGREBHTVJMT0tKRE5OTE5USk9ITEtP
-TlBRSk1MUUlGQ0M9RURERklKR0xKU05KSU9JRkdGTk9RTD5CQUtGSUVDRUlJT0xK
-TUNCQUJBR1JLR0lNRkdAREZJRklKQ0hMUkdHSUpJQDY9RD9CRUpPTFBESUZLTE5F
-STk6PUZGSEZFRkJCR0RGRD87Oj46OTk2QD5CRT9BQD0+REY+P0NIS0JEQ0BCQkE9
-NDc0OkJFQUdIPzo8OjY3Nzo+PT08PD5QVUZITEZHSUpIRUZCR1dCPTs5P0I+PT4/
-QT47PD5ERERAQkVERkdRXE5HRUxSY1tIRERJT0tIRUdIRkVCRkdIR0ZISUpJS0tG
-SEhFRENDRk1HQUNDRkFBQUA+QkRGRUdLUVFHRUNHRkE9Q0JHSUdBQEBAQkBAQEA/
-QEFAQkNCQkFBQUJCRERCQkJCQ0NDQkNDQkNCQkJDQkJDQkJERERERUVFRkZHSEdI
-SElJSUlJSkpKS0pJSUlJVFRLSUlJSktMTU9PUlNVV1pcXWBjZ2xwd3x/goOBgYB9
-e3l5e31+f39+fXt7fHp7fH2Bg4aJjZCRkY+OjIuJiYiKjI6PkJCRkZOSkZGRj4+N
-iYeGhoWGh4mLjY+QkZGRkpSTkZGNjY6Njo+PkJGTlJWTk5OTlpaZmZudnZ+fnp+g
-n56dnZydnZ2en6ChoqOio6Snpqeoqaemo6Sko6SkpqeoqKqqqqqqqqytr7K2ubu9
-u623ubm5urq8vL2+vr6+vLy5ubm5uLm6vcHEytPZ3uTn6uvs7u/v7vDwgGpnXmBh
-Y2RicW5yaW9ucWZnZl1ZW2FlY2BbXl9kY19YWm1rZGRibGBkYmRpamVdYF5gZWJg
-YmZqYWVgZ3NpZV5iZGRmZV9TYGhnZ2tva2JgYm1zcmxrb3JubXFucmhqbGdwbGll
-a2hkXl5fYl9bXWVhYl9hZ2ZoZGluaGxgZGNaYmJiZV9cY2FgXlhcWVtYYGBjVlpZ
-Xl1hXl1TWVRTW2BgWlZYV2BdWlliW15YWFxdWF5ZY19gW1hYWFJMUVNST1hZU15V
-V1tbVFxiXV5bWFdSXFpaVV5ZVlRcX15bVVxfWlpaa29mY1paVFZZWlpeYV5gZmRY
-WltWUlVZXFhYYGBiXllXW1hbYl1dVVlZV1ZXW11XVldcWmNdWWFbWVhSUVRZXFla
-VmBYYF9eV1JfWU9UVVZWWVZbU1tXY1dVU1BPUVNYWFteXVpWV1hWVlhWU1lQT1de
-WFRSUU9OVlVWVl9gW1hZW1hcXldSVlpdXFhdXV1XU1JZXWBXVldaU1RUVFxcW1Rc
-XlVaU1daXFxXWVRSTUxSYFtbU1NOUE9LRlFXVldXTlFSUlRSUVZWVlVRTVNaVlNU
-Vl1bXFVQVE5KSkVLVVpbU0hKUk9TTEhNUVdRVVVWVlhYWlRPSU9NTlhaWlRLPEBF
-T05PVElNTkpLUk5OUUtOU1FNSEdMR0pPRkhPSk5SUk9NTVBUTUlOUFlNU0lETkpO
-TElKSkpSTkVMSkNNSERERUZITElJRUhFR0lGSlBIRkNHR01KREJIRERERkE9OTk8
-QkE9PUFAR0ZGRT5HRkdCPTxEQ0tGTVVQUVVOQTw9PTs9PDg1NzxBPkI7NzY6Nz87
-Pjs5O0NJR0A4OTY5Ojo7OTo5Oz07QT84MzIxMzU4OTs6PEVCPT02PENCOTo7Pz08
-Pz89OTg1MTE0OT9EQkA8QT1AQENFSEM5Nj1CRD1CSEpESERERkZDQ0A9Ozs4PDw8
-R0dGQ0FEQURERUNCQUZGRkI9PkZJS0pCQUJHS0xFSEJERERESUVFQ0dKRUJAQk5L
-SkVPRD5BQkNDPUBKTkRCQD4+QkBAQkA/Pz4+Pz5AP0RHQkJIbWJORUJBQUFVXEhD
-QkFBQkFCQUtEQUFBQUFBQkFCQ0NHS0ZFRUNFREVFR0VFRUVERUVFRkZHRkhIR0hI
-SElISEhJSktLS0xOTU5PT1BQUVJTU1NTU1RUU1RVVldXWFlZWVpbXV9gYGJjY2Jj
-Y2JhYGBgYGJiZGVmaGlqbW5vcHFxcnJyc3R1dnh4eHd2dnVzcnFwcXBwcnR1d3l7
-fn+DhYeKjI+TlpufpKqvsra5ubq6uLe1tba2t7i5uLm3tra1tLOztLe5u77BxMTF
-xcTDwsDBwMDBwsLDxMXFxcXFxcTDwb68urq4uLi5u72+wMDAwsPDwsPBv7y8vby9
-vb29vb6+vr2+v7/Cw8TGx8fIysrJycrJyMjIx8jHyMrKysrKy8zMzc7Pz9DP0M7O
-zczMzM3Nzc3Pzs/Pz8/Q0M/R09TX19jT0NnY2djZ2drb3N3c3dzb3Nva2trb2t3d
-3uHi5Ofp6+zt7u7v7/Dw8fCCZ2hhYl5dYWVoa3VybGdkYGJebGNnW2htZmJfaGNl
-ZWJqZmtxcmlwYWt0a2ppYmdlYWlpZ2VlZ2FlampjZmtiW2ZgXl9mZFpma29paGtp
-Z2ZiY2tyanR3cXNya2xoamRkY29uaGlqYF9fYmhnc2heWWVpZWdkYmRoYGdlamhi
-ZWNpcW9gXmBkaGZoYFVUXFleXVRVVGJjXGBkXWRjWVdeXV1gX1RaYmdfWVhgX15T
-WGNcZ2VfX2BaWlZSWVJSWGJiVk9XWl9eXWNnYlxcYV1VYF5YYF9fYWJaZGFfXV9Z
-XlpdZGVkZWReXVRZVVRaX2FiWFlhX1lYV1ZaXF5cXmFfV15jYVdVWVxdYFdYXVtb
-YmFhYF9bXGZkWlxkYVhaVlpcTU5TXFZYXl1TW1ZYV1tbVFlZWV5cXFRQT1lbXlpT
-Uk5RVFZSWldZUE9YYmRaW1lUWFRSVVpXU1RRUVdaUlBYXFVYXldYW1lWWVZZXVVV
-VFdZW1dTTlldV1xUVlVUV1tYW1pUUlNaV1JTVVhRUUtIRkNCR09ITFJPTEVFTUxU
-W1pRTFNYUFROUFhXVlFOSk5NUE5TUFBVVVJPR0dOVU5JRUhEQkpJSkBER0VAQ0NI
-UVJQSkdKS0pTUEhKR05HTVJSVlFHTEdMTlRORkJESEhISkxMS0RKQ0NHPURFPDs8
-QENAQUdOUUtDRUpMSEhDRkZDOjpAPEFDRUdJPjc/QUdBO0M6PTk3ODk4Pz89PDhA
-RUtIRTs+PDtBOzxERDw2ODozMzEvMjc2NjY3OjU7NTUyNDg1MDAyNjo6P0VPVmFu
-bF1MPjw5Ozc4Nzc0ODg8NzQ4OT04OT1BQEFDREVFRUJDQD5DRUZFPztAQUZIRUZD
-Q0VGQ0VGRkRDRkVDP0NDSkxFRUdGQUZGREZGR0RDQkRGSERDRkhCRklEQEFBQ0FD
-REVEREFGRj88QEBDQj1BQD08QTw+PTw8PDw8PD08PD4/Pjw8PDw8PT4+REdGQT4/
-Pj9AQEFEQUFAQEBBQEFBQkNDQkNDQ0NEQ0RERERERUVERlFHR0dHR0hHRkdHR0hH
-SEhJSEhJSktMTU9cWFRSUVBSUlNTVVRUVFVVVldYWllZWVlaWVlaXF5eYGFiY2Rm
-ZmZmZmVmZWVkZWZmZ2lrbW1ub29vb3Bvb29xcXJyc3R1eHd3ent8foCAgoKDg4SE
-hYOEhoaHiIiKi4yNjo+RkpSWlZaXlpeWlZWVk5SUlJSXmJqbnZ+foKKjpKSkpKWl
-p6ipqquqqqiopqako6GhoKKjpKWmqautr7K0tri6vb/CxsnMztHU1tfY2djY2NnX
-2NjY2NjZ2dfX2NfW19XV1tjY2dvc3d3e3N3e3dzd3Nzc3d3e3t3d3d3d3dzb29ra
-2NjZ2NjY2dnb3Nvb3Nzb3NrZ2NnY2NfX19fY2NnZ2drZ2dvd3N3d3t7f397e397g
-39/e3+Df3uDf39/h4d/h4OLh4uHi4uLh4eHg4uHh4eLi4eLh4uLj4+Hi5OTl5OXl
-5OTl5Obl5uXm5+fm5+jn5+fp6Ono6err7Ovt7e7u7u/u8PDw8fDw7513bWlhXmJr
-ZG1tdG5sZGRdWVViZmhhZ2lrZ2xxaWVfY2ZhXmNnZmlscGlrdG1mZWNkZGdlbWpu
-bmVfYWNoZ2pcW1djbmdqamJtZ3NtbmtoZWJoa21wb3RyaGx2aWdpaHBvcHhzbWts
-amhsZGJoZ2hiYmVnZGViWWBpa2tsamdpZW51cWRqZVxcYWNkXWFYU1tdZV1hXWRg
-YmBeYmRkVVxfXVtbWFpYWldgXFtTXFJYYWBlaGBfY11gV1dgWUtRUlNRS0pUUmFe
-YWNhYmljamtaXFtbX2BcWmNhWFtbXFpZXFldW19bVVteYGFdW2BeXmJXV15eXV5S
-UlRXUVNZUmJhW1laVVlUXFxWW1haV15eW11aVV5bXV1fYFZhW15cU1pOVlJbXVJU
-WVtWWVtbVllWUFNYVlZVWFRZUldVVVdXVEtUU0tUWF1VUVpgYFpaXE1TVVZYWVlY
-S1FOV09bXFNYWlZQU1BTWFVSU1JQUEhXU1FRTktNS0dWWUtJTlBRVVZQTUxHRkdK
-TFZOSkJMTklRREtESEtEQEg+QUNDSlJSRkJDRUtLSktEQUhNVEdESkxLSExEQkZF
-SUpFRUVIR0dEQD88RkdBPz09OjxEREpITlBUSkFMRj89Qz8+QDk9P0REPERER0lJ
-R0A8QD1JS09JRVRGPDs7Pj08PDo7NDQ0NT84WHtwSTo7Pz9APDs5MjEvMDI0Nzc2
-Oz45OjlCRTw6OjxDQT45OTg6Pz08P0VNSUhCQUI/QUNDRkdEQT48QUBBPz5BREdB
-QkJGRUNBQkJAQklFREdDQkNESUpHSlBKSENFQ0JERUFBQ0VEQENAQEVCQUNDQ0JC
-P0A/Pz49QD1BREdNXFFNTEdBQ0A9PT09Pz49P0E9PT4+PT09PT09PD08PT0+PT4+
-P0BCQEBAQEFBQEFBQUFCRkRBQkJEQ0VERUVFRUZIRkdGRkZGRkdISEhHSElJSElK
-SkpKS0tKSkpJSkpKS0tMS01OT1BSU1RUVFVVU1VWVldYWVpcXF1eXl9gX2BgYGBf
-YWFiZWVmZmdobWpqa2tscGxubm9vb3BxcnJydHV3dnd4eHl5eXl6ent8fXx+f4GC
-g4WHiIqLi4yLjIyMjI6OkJOTlZaXl5iYmZmYmJeXl5aXmJqbnZ2foaGioqOio6Kj
-oqKjo6OlpaWmp6iqqausrq+vsLGxsrGxsrKzsrS0tba3uLm7u769vr7AwMG/v8C+
-v7++v76/v8HBxMPExcbHx8jIysnKy8rLy8zMzc3Ny8vLysnJyMjJycnJy8zMzc7Q
-0tLT1dfX2drb3d7f4eDi4ePj4uPj4+Pi4uLj4+Lj4+Li4uLj4+Lj4+Pk5OTk5eTk
-5OXk5OTl5eTk5eXk5OXl5eTk4+Tj4+Pj4+Tk5OTl5OPk5OTk5OTk4+Lj4uLi4+Pi
-4+Pk5OTk5OXk5ebl5Obk5ebm5OXm5ufm5ebm5+fm5ufn5+fn5ufn5+jo6Ono5+jp
-6Ofo6Ojn6Ojo6Ojo6Ono5+jq6ejo6efo6Ojo6erp6enq6+vr6+zq6+vt7O3s7e7v
-7u7v8O/w8PHw7/Dx8PHxoJZuZ21tbW55bm1waGxhZWZdWlleXWRpa25xcnBpZ19h
-ZlxfZ2pqaW1qaHBnbWdtcnFtaWJhZ2ZtaGBvdG5ubWRqZl9na29rbmpnbmlsbWxl
-bnFtamdpamZkamtyam1rbm1wbXV5cF1bZmJnYmNjYmhpZGNdXGRkZ2tnaGlpZGJk
-amppZl9gY1dkYFxmZ2BZZFxaWVlSXFtfXF9dWGBhW2VgV11uYltfXWNuaVxaXF5l
-ZmNoZmRiXVleXFNVTUlJTVBEOD0/PUlWYGZjalxaXl9hXVpZXFdWWlhbXFxYVlZS
-UVpcV1hMTlhcWlFeXWlbUVBdWFthYmFWTkxOTVdeWmBXVFJWWlFTWE9UWVVSSlZU
-U1NWV1dUSldVTVBQTllYWlxZXFFSWlNPTVdZWFZPT1ZVVk5QTU1RWFtZVVhdVVJV
-UEtHQ0lPVU5SVVpdXVNNTk9YUEtIRj9BTUpIQklWV0xNT09GRUpLSktWSURHSkZM
-RkpNT0Q9QkhAPkBERE1LUEhGQD09PjpGSUhGQjtFPUE/RUtLRzxBQD9DPDc7OEBD
-QkNHSUlIR0lPOj9AOjxDRUFIRDo7ODo3PUA/Qj9EQ0RCPUA+PTs7Ozs6NzpKUkxG
-R0hHQUA+Pzo4Nz1DRkVCPz87O0JJSEtMTEhJRExWVk5LVExEPkhHRkhCQ0NBQkJE
-SEJWXmdNREdHRERCREBAP0A8RkNGR0JBQUpDREVGREFBQ0FAQkJFQkJCQkI+P0I+
-Pz9AQT1BP0BBQD9CQUBCSERDQkFDQkFDQ0NERURERERGTUZHRkRERERDQ0RER0hI
-RUVFRUVGRkdGR0dHRkdHR0hISUhISElJSUpKS0pKSktNTU9WV1JOS0pKS0tLTU5O
-UFBTU1FRT1FPT09PT09QUFFRU1RWVldZWVpaW11dXV5eXl5fX19gYGJjZGZnaWpr
-a2psa2trbGtsbWxtbW9wcXFycnNzdHR1dXd1d3Z0dXR1dXR1dnd3enx+f4CCg4SE
-g4KBgoOFhoiKi42Nj4+QkJCRkpGRkJCRkpGSlJWWlpeZmZqbm5yen5+goKGhoaGj
-o6OkpKWlpqalpKOko6Wlpqenqamqra6vsLK0tLW3t7i3uLe3ubm7u76+v7/AwMHB
-wcHAv7+/v7/BwcPDxMbGx8jIyMjHx8bHyMjIycnJysrKy83Mzs3Pz9DQ0NHR0dHS
-0dLR0tLT1NTV1tXW19fY2djY2dnZ2NjY2NjY2NnZ2dva29vb29zc3N3d3t7d3d7e
-3t7e39/e3t3e3d7d3t7e3t7e39/f4ODg4OLh4+Li4+Tl5OXk5OTl5eXl5eXk5eXk
-5OXm5ubl5ebm5ubm5ebm5ufm5ebn5ubm5+fn5+fm5+bn5+bm5+jm5+fl5+Xm5+bm
-5+bl5+bm5ufn5+bo5+bm5uXm5ubm5+bm5+jn6Ofn5+fp6Ofn6Ofn6Ojo6Ojp6ejo
-6Ojp6Ojp6ejp6Orp6enq6unq6urq6erp6+rq6enr6uvr6unq6+rq6erq6erp6erq
-6err6urr6+vr7Ozs7Ozt7u7t7u7u8O7v7+/v7+/w7+/w8PHw8PCmr5GBdXJqcXV5
-dHNubGNkbGRiaWBdZmNpZm1vamhmc2NfaVtjaW5lYmlmbWtqamhrZ2ljZ1xhZWtl
-X2djZnRrZW5rcmtqdH1zcGVqbmhraWV0bWpobWtycW9zaHBrb25oaGpuc2ZjZV9k
-Y2NgY2JdZW1pYGVjZ29kcGpWZGhqYF1jZ2RhZGdbXlxnZF1kZFpYYVxcU1dfX1pd
-VFZfXVpZWVlgZ19fZ2BhaGhWWGRkYV1pZXV9XlhZXVhWSU9PUlJPU09IbIpnUElU
-VllfWFteYFhRXWBiXVVZW1laWVRQVE1WWVxXUFBEUlFQRVRWVE9LUlNQV1ZSUlFV
-TlJLT2BaUFlOU09RUllRUFJTVlNQUlJVV1pYTk1MT09GPkFBQ1BUVlNRT09KS0tI
-TFFUTU9PS0pSUU1QTUxTVVRMS0xKSUlNQkREPUBARElLT0o/QkdERD8/Oz04NzY5
-Qjk7QkM9RUJIRE9GQkBGSE5LP0M/PkhGQDw8RUM/QUdHPTlAOEFLSERAPTs7Oz1E
-QkNAPkBAQj1ARERLQkNBR0RBQD09P0NDREZHTElDRUhCQkE+PT9IRkVEQ0JBQkFB
-REJERkNIR0VFRkRBRkZBQkRERkZHS0dIRkE5Ozw7Oz5DQ0RBQjw8QDtAPTk6PD8/
-PTs7PDo7Ozo6Ojo7Ojo5Ozs7Ozs7PT09PTw9Pz09PDw8PT09Pj8/QEBBQUFCQkNE
-REREREVFRkZGR0lKTEpMTE1OUFFPUE9QUFJSU1VUVVZWVlZXV1laW15fYGBfYGFh
-YmJjZWVnZ2lpampqamloaGZkZWZmZ2dnaGlqamttbW9vb29vcG9wcXJzdXV0dHV1
-dXV1dnZ2dnZ3eXp2c3Nyc3R2eHl4en1+f3+Afn9+fn59fn59fn5/gIGChIaGh4mK
-i42Oj4+Pj5CQj5CQkZKSk5WWlpiZmpubm5ybnJuamZqcnJ2dnp+hoqGioqSkpKKk
-o6SkpaOioqKjo6OkpKaoqKqsrK6ur6+vrq6trrCvsrO0tbe5ubm4u7q6ubq5uLm5
-ubq7vL28vb+/vr6+wcPDxMTExcXGxcfGyMfIx8fHxsXFxcXEwsfIycjJysvMzM/P
-0NLT09TU1dTU1NTV1tbW19jZ19jY2dnY2dnY2NfX2dnZ2drb3Nvc3d3c3dzb3Nvc
-3dzc3d3d3d/d397f39/g4N/f3+Dg4eDg4OHg4eLh4uHi4uHi4uPk4+Pj4+Pj4uPi
-4+Lj5OTj4+Tj4+Pj5OPk5OTk5OTj5OXl5OTl4+Xl5OTk5eTl5OXl5OXk5ebl5ubl
-5ubl5ubn5ubm5+bm5ubn5ufn5+fn5ufn5+jn5+fm5+fo5+fo6Ojn5+fo5+jn6Ojn
-6Ojn6Ojo6Ojn5+jn5+fn5+jn5+jo6Ojn6Ofp6Onn6Ofo6eno6Ojo6Ojo5+jp5+jp
-6enp6Orp6erq6Ojp6ejp6ejp6erq6unp6erp6erq6unq6urr6urq6uvr6urq6+rr
-6+vr6uvr6+zr6+vs6+vq7Ovr6+rq6+zr6+zr6+zt7Ozs7O3t7O3u7u7v7u7v8PDw
-7/Dw8PLw8PHx8PHw8a2oo5p1b2pxdW5wbmpqaWVmZV9nY2JraGdkbmxoZ2FYXlxc
-XWBkY19qaGZnZGtrZWdkYGVbXW1jYGBdYmhyenpxeGpra2xlc3Nta2xqamdqa25v
-bGhoZmhwbGxlbXBza1xdZ2VjYmJpYmFgW2VlZ2VkYWhrWWVfZWhdXl9daWRcWVZf
-W2dpYmFWWmhkW1ZeW1FgW1BcWVZgXFdXV1hWWlpdVVpfXmZrWlVaXVZUVmVhZGBq
-fXd3fWVaWFZbT0hTUlFhZm1peIJ1W1tNTE5WVlFORUdPVlRVV1ZYVFleV1RXVVFO
-VFRPTU5FSU5EQExMSkxMU1VQUFBVUk9OTElIRkpPTlRQS0hOTU5OUlNQS0dGSlNe
-WFRFRUlPTlA7Ojg/Q0dEQTxBR0hDQkI+Q0FEP0FIQURAR0RES0hGSEZGQkNAQEE7
-PEE/Pz5DQ0VHQ0NAQUNFRENAQj48P0FARD5BR0BBQUJGRkZERklFRUVFRkdDQkFC
-REBBQEBCRUNAO0FEPkZAQ0M/PUA9PT07Qjs5Ojk7OTs+PDs4OTg4OTg3Njc3Nzg4
-OTo7ODc3ODg4ODc3Nzg5ODg4Nzg3ODg4ODk4OTk6OTw6OTc8PDk6P0VAQTs8PD9B
-PDs7Ozs8Ozw+PDs8PT09PT8/P0BAP0FBQUFCQkNEQ0RERURFRkZGR0hISEpJSkpO
-SktMTE1NT1FSU1VXWFpbXF5fYGJjY2VmZ2hqa2xub3FzdHd3ent9gICBgoGBgoOD
-hIWHh4mJiouMi4yOj5CSk5aWmJeXl5mYmJqanJydnZ2enp6enp2cmpiZmZiZmZia
-m5ucnqCgoaKioqKioqGko6SmpaSkpKWkpaSlpKWmpKampqWjoqKjpaamp6ipqqqr
-rKurq6urrKurq6urqqytrK6usLGxsrS1tre4uLe5ubm4uLm6urq8vL2+vr+/wMDB
-wcHBwMHAwcHCwsPDw8bGxsXGx8bGx8fGyMbGxsbGxsbFxsfHycnKy8vMzc7Nzs3M
-zs3Nz87P0NHS0tTT1NTV1tXV1dTU09TT1dXW1tbV1tfX19nZ2dna2dnb2tzb29vb
-29vb29va2tnZ2dna29vd3Nzd3t7e39/f4ODh4eHh4eHh4eLi4uPi4uLi4uPi4uPj
-4+Lk4uTk4+Pk4+Pj4+Pk4+Tk4+Xk5OTl4+Xl5OXl5OXm5eXl5ebm5eXk5eXl5eXl
-5eTl5eXl5eXk5ubm5ebl5ebm5eTl5eXm5uXm5eXm5ufl5+bm5OXl5ubm5ubn5ubn
-5ubl5+fm5ubm5+bm5+fn5ubo5ubn6Obm5ujn6Ofo5+jo5+jn5+fo5+fn6Ofn5+fn
-5+jo5+bo5+fo5+jn5+jo6Ojo6Onp6Ojo6Ofp6Ono6Ojo6Ojo6Ojp6Ono6ejp6Onp
-5+jo6Ojp6Ojp6Ojp6ejp6enp6enp6Onq6+np6ujq6urq6erp6err6unq6urp6urr
-6uvr6urq6uvq6uvr6uvq6+vr6+rr6+vs7Ovs7Ozs7Ovs7Ozt7Ozt7Ozt7Ozt7e3t
-7u3s7e3u7e7t7e7u7u/u7u/v7u/v8e/w8PHv8fDv8fHw8PHwoaOlo4N8cHBycG5w
-dHJ0YWNfWWBnbWVmaWloZWNdYl9eWVpfXltdXGNrYV5bYVpgWVxgY11dV1pYWV5f
-X2pycXNkYWloYmBdYVxiaV1fYmZjZ2ppYF1dZ2RuWVpeYGdmYFtVV1RZX2ZeXF1Z
-W11lY1RTamlkXl1bXl5eV15oY1RWVFJXW1pcWltXXFFNV1VLT1tWUFJPWE9MR0RP
-UUlTV1VeX1tjY2VWUU9MRltbTmFmbHuGdXeQinRcVU1AP0dERG9tXF5eZHdaRUJD
-SlRNRz09PT1CQkxVS1JTUk1IUFBGRkdMT0lNREdKRkE6PkFDR01TTE9KSkJGSUhG
-R0RBQ0VBQD1CPj9ER0dJTExLREFGSElLR0hHQ0dJSkQ+QT5AQ0RBRUBERkZARERD
-Q0VEP0JGRD9DRkNBQkRFRkFAP0NBPTtAPjw6OUFAQDk9OTg6O0FAQ0FERkdBPTs9
-ODk9Qj85Ojo/Ojk5Ozg3ODk5OTk4ODg3ODc3ODc3ODg3ODg4ODc4OTk4Ojk5OTk5
-OTk5Ojo5Ojs7Ozs7PDw8PDw9PD09PDw9Ojs9PD09Pz8/Pj9AQUFCQkNERUVFRkdG
-R0dHRkdGRkhHR0hJSUpKS0pLTE1OTk9QUVFTVFVWVlZWVldWVldXV1hZW1xdX2Bh
-YWNjZGVlZmlpamtrbG1ub3Fzc3R1dXZ2dnd4eHp9foCDhYeKjI6QkZOUl5eXmJqb
-m52dnaChoaSmpqiqq62ur6+wsLGxsLGysrW1t7e3uLi5ury8v77AwMHCwcHCwcLC
-w8PExcbExcXGxsTFxMLCwcC+v8C/wMDCw8PExcbHx8jHxsfHyMfGyMjIycjIx8fI
-x8fGyMfIyMjIycbHyMbHyMfIycrKy8rLzMvLy8zMzMvLy8rLy8vOzc3Nzs7P0NLR
-0tLT09TT1NPU1NTU09bW1tfX1tjZ2NjY19nY2NjX2NnY29va2tva29ra29zc3Nvb
-3Nvb29zb29zb29vc3d3d3d7e3t7f3t7e3t/e3t/f4OHh4uLh4eHh4eLh4eHh4eHi
-4eHh4uHg4eHh4eHh4eHi4uLj4+Pj4uPj5OPi4+Ph4uPj4uLj4+Tl5OTk5OPk5OTk
-5ebl5OTl5OXk5ubl5ebm5+Xm5ubm5uXm5ebm5ubm5+Xm5+bm5ubn5ubn5+bm5ubm
-5ufm5+fm5+fn5+bm6Ofm5ubm5+bm5ufm5ufm5ubo5ufm5ufn5+fn6Ofm5+bm5ubl
-6Ofn5+bn5+fn5+fm5+fn5+jo5+jm5+fn5+jn5+fn5+fn6Ofo6Ofo5+fn6Ofo5+fo
-6Ono6ejo6Ojp6Ojp6Onp5+jn6Ojo5+no6Ojo6Ofo6Ojp6Ojo6ejp6eno6Onp6uno
-6Ono6ejo6Ojp6enp6unp6enq6ujp6ejo6enp6enq6ejp6erp6urp6enq6ujq6urp
-6+rr6urq6+np6urq6+vp6evr6+rr6uzs6+vr6+vs6+zr6+vs6+zr7ezs7Ozt7Ozs
-7ezt7O3s6+zt7ezt7e3t7e3u7uzt7O7t7e3t7e3u7u7v7+7u7+/u8PDv7+/w7/Dw
-8fDx8PDw8fHx8PGipqaqppKIiIKBdXxvdmJfYl5cX1paZFphaWhpXlZTXFtXW1hT
-X19XVV9aXF1hXWRbWVZWXVlbVFBTS1deXFZgXmlgX2NeW1hgaVhaWl1dYGNlaGtm
-YGJfXWZlX2hhZGRfUFVOSllMTFZPVllQUFRQUVRPVF9cX2RbWlBMV1phYFRTSk9X
-VlNUVFFHTExISUhLSUtNTU1CQkFEQUZER0pIUlZPXllgZ29XQ0BCUFFSUVdwZXVp
-b3B7cG9QSEVARUhOXFtRTk9KTElLRVNHSUhHR1BHREZER0tJRkRIRUpIQkZHSEZG
-REZFQUJDQj8/PUBEQUNESkRAQUJBQDw7PDs9QTw7OTk6PDo5Ojk5OTg3Nzg4ODg3
-Nzg3ODc5Nzc4Nzc6UDo5OTo5Ojg4OTg4ODk4Nzg5OTk6OTk4OTpBOTk5Ojo6OTk6
-Ozo6Ojs7Ojs7Ozs6PTw8PDw+PTw7PTw8PD0/Pj8+Pj0+PT8+PkFAQEFAQUBAQUFB
-QUNDRERFRUZHR0dHSEdISEdJSElJSktLTU1OTk5QUVFRUlJVVFZXV1lZWllZWVlY
-V1dYWlpaW11eX2BhYmVlZ2lpbGxtbm5vcG9vb29wcG9vcHJxc3N0dHZ4eXp6e3t9
-f3+AgYKChIODhISEhYWEhoeHiouMjY6PkI+QkZGTk5WWl5mZmpucnp+foaGio6Ki
-pKSmpqiqrK6wsrK1t7m6u72+vsC/wMLCwsPDxMXFyMjJysvNzc3Ozs/P0M/R0NHR
-0tPT1NTU1dXU1tfW1tjZ2drZ2dra29vZ2tvb29na3Nra2tna2NnY2NjZ2NjY2NjZ
-2tnb29vb3Nrb29va29vb29zb3Nzc29rb3Nvb29zc29zc3Nvc29vb3Nvc29zc3N3c
-3d3d3d3d3d7d3d3c3d3f3d7e39/g39/f4d/g4eHh4ODg4eLg4eHh4uHi4eLi4uLi
-4+Pi4+Lj4uPk4uPj4+Pj5OPj4+Tj4+Pj4uTj4+Pk4+Pj4+Pk5OTk4+Tk5OXl5OTl
-4+Tk5eXl5OXl5OXl5ubm5ubl5ebl5uXl5OXk5OPj5OPi4uTj5OTj5ebm5uXl5ubl
-5ubm5ubm5ebl5ubm5ebm5ubn5ubm5ubo5ubm5ubm5+bn5+bm5ufn5+fm5+bn6Ojn
-5+bo5+jn6Ofn5+fn6Ofn5ufn5+fn5+bn6Ofo5+fn6Ofn6Ofo5+jn6Ojo5+fn6Ojn
-5+jn5+fn5+fn5+fo5+fo5+jo5+fn5+jn5+jo6Onn5+jn5ubo5+jo6Ojn6Ojn6ejn
-6Ojn6Onp6ejo6ejp6Ofo6ejo6Ono6enp6ejo6Ojo6Ojp6Ojo6enp6eno6Ojo6Ojp
-6ejo6enp6enp6enp6enp6Onq6erp6enq6ejp6erq6uvq6erp6ejq6erp6enp6ujp
-6urq6unp6unp6urp6urq6erq6erq6+rq6urr6+zr6+rr6urq6+rr6uvt7Ovr7Ovr
-7Ozs6+vt7Ozs7Ozs7ezs7ezr7O3t7ezs7Ozu7e3t7ezt7e3u7e7t7e7t7u/t7u7u
-7u3v7e7u7u/w7+/v8O/v8O/w8O/w8fHy8PDv8PHw8fHy8aGfp6usrquqq6KVgn13
-aGdZWVVTW19XWFlhXVxeVlFRUFJeWVBOVVRbVFZYUFpXU11ZXlxeV0xDR0lISUdO
-TU9SVFhWWFtXW11YVVZOTFFTXFpWXl5TUFhcWFtYTFRXWFRSTUFGR0BBQUJEQ0ZF
-SkVUUUxBRE5OVFFMSkpNTklNUkxHSkdITlFKRkhJSEZIRUZDQ0hJR0lHRUVIR0pK
-SUVHSEhKS0xMS0xJRkVJR0NEU09JUFBPTU9MT0pISEhHSEhLUU9SU1NTT0xJSUdC
-RUBAP0JEPT08PDs8Ozs7PDw7PDw8PDs7Ozs6Ozw7Ojs8Ozo6PDw9PDs7Ozo7Ojo6
-Ozo6Ozw8Ozs7PDw8Ozs9Ozs9PTw8PD08PD09Pj4/Pz4+Pz9CQUJBQkFAQUBBQUBA
-QEJBQkREQ0VFREVFRkZHSEhJSUpJSkpKS0tKSktLTUxMTk5PT09PTk9QUFBSUlRT
-VFVVYlZWV1dXWVlaXV1dXl9gYWFhYmNjZGdoaWprbW1wb29wb3BwcHJxc3N0dXZ2
-d3p6ent8fH5+f4CAgoKDhISFhYSFhISEhIWFhoeJioqMjI6PkJGSk5WWmJiZmpqa
-m5qZmpuam5uanJyenp+foqGho6Okpaalpqepqaqqqqurq6usq6ytra+wsLKzsbOz
-tLW1tra4uLm6u72+vr7BwcLBw8PDxMTGxMbHyMnLzM7Oz9DS09TV1dbW2NfZ19nZ
-2tnb2tvb3Nvc3d3d3t3e3t7f39/g4N/f4ODh4uDi4eDi4uHh4uPh4uLj4+Li4uPj
-4uPi4eLj4+Pj4uLj4eLi4uLi4+Lj4+Pj4uTj4+Lj4+Pi4uPk4+Pi4+Pi4+Tj4+Pj
-5OTj4+Tk5OTj4+Pi4+Pk4uPk4uLj4+Pk5OPj5eTk4+Tk5OPj5eTk5OTk5eXl5eXl
-5OXl5OXl5OTl5OPl5eXl5eXm5ebl5ubl5ubm5uXl5ubl5ebm5ubm5ubm5ebm5ebm
-5eXn5ubl5+Xm5ebm5uXm5ubn5+fm5ufm5ubn5ubn5ufm5+bo5+jn5+fm5ubm5+bm
-5+bm5uXl5OTl5ebl5ubm5+fm6Ofn6Ofn5+jm5+bn5+bm5+fn6Ofo6Ofn5+fo5+fn
-5+fo6Ofn5+fm5+jn5+jn6Ojn5+jo6Ojo5+fo6Ojn5+jo6Ofo6Ojn6Ojn5+fo6Ono
-6ejo6ejp6Ofn5+jo5+fo6Ojp6Ojp5+fo6Ojp6Ofn5+nn5+jo5+no6Ojn6Ojo6Ofo
-6Ojo6ejn6efo6Ojo6Onp6ejp6enp6Ojo5+no6eno6eno6eno6Onp6enp6ejp6urp
-6eno6eno6Ono6erp6enq6enp6eno6enp6enp6unp6enp6eno6urp6unp6urq6urp
-6evp6erq6urr6urr6enq6uvp6urp6enq6urq6enq6urp6+nq6urr6uvq6+rq6uvr
-6+vr7Ovq6uzr6+zr6uvs6+zr6+3s7Ozs7Ozr7ezs6+vs7Ozt7Ozt7e3t7ezs7e3t
-7e7t7u3t7e3t7e7u7e3t7u7v7e3u7+7t7u7u7u7u7u/v7u/v8PDw8PDw8PDx8fDx
-8fHx8fHx8fLygYWFgY2VnJeLgouTmolxWVNOREdMUU1UU1VTTVFQS0ZDRExNSUZD
-R01IS0tJTU5DR0xMTE1OS0tLR0hERUVDRUpLRUhJSklLS0hJR0ZHSUZKR0dHR0RD
-REZISURFR0NEREdISkdGRkZIRkdMSEpFRUdHR0dGR0hGSFJJUFJLSUlHSEhJSElK
-SktLTU1OT1FQUlJSU1JSU1NUVFVUVFNTUU9OTU1OT1FSU1VWVlZXVlhcXmBlZ2ho
-aWdnZWRkZGVnam90e4KGiIN8dm9nYVpVUlFPT09QTk9OUE5OT05NTk9OTk5OT09O
-Tk1OT09PUFBRT05OTUxNTU1NTE1IS01OUFBQUFFQUlNTU1RVVFVUVVVVVVVWVlRV
-V1lYWVlaW11dXV5eXl5eXV5dXV5eXl9fYGJjY2VlZmdnYGpramprbG5vbm9vcHFy
-cnNzc3N0dHR0dXV3d3d4eHl6e3x8fX59fX9/gH+AgYGCg4SGhYiIiYuKi4yMjY6Q
-kJGSkpSUl5eXl5eYmZmZmpqbnJycnp+eoaChoaKio6OkpKWnqKipqqmqqaioqqmp
-qqyrra2vrq+xsLK0s7a2t7i4ubq2u7u8u7u7vLy7u7u8vb6/v7+/v8DBwcLDw8PD
-xMTGx8bIx8jHyMfIyMnKy8vKy8zNzc3Ozs/PztDR0tLT1NTU1dbX19fX2NjY2NnZ
-2tvb3Nvc3d7e3t/f4ODh4OHh4OHi4+Hi4uLj4uPi4+Lj5OTj5OPj4uPj4+Tk4+Pj
-5OXl5OXk5OXk5eTk5OTl5eXk5OXl5OXl5OTj5eTk5eTl5eXk5eXm5eXm5eXl5eXl
-5ubl5eXm5eXn5ebl5eTl5ebk5ubk5OXm5eTl5ebl5ubl5eXm5uXl5uXl5OXm5ubm
-5ubm5ubm5ebl5efm5ebl5+bm5uXm5ubl5ebm5uXm5+bm5ubm5ubn5ufn5ufn5+fn
-5efn5+jm5+fn5ufm6Ojn5+fn6Ofn5+fn5ufn5+jo5+fn5+fo5+fo5+fo5ufo5ufo
-5+fn5+fo6Ofn6Ojo6Ofn6Ojo6efo6Ofn6Obm5ubm5ufo5+fn5+fn6Ofn6Ofo6ejn
-6Ojn6Ofo5+jn6Ojn6Ofo6Ojo5+jo6Ojn6Ojn5+fo6Ofn6ejo6Ojo5+nn6Ofo6Ojo
-6Ono6ejo6Ono6Ono6ejp6Ono6enp6Orp6Ojo6eno6enp6Ofo6Ojp6ejo6ejp6Ojp
-6ejo6Ojn6Ojp6ejp6Onp6Ojp6Onp6Ojp6eno6eno6ero6erq6erq6Ono6ejo6enp
-6ejo6urp6enp6erq6unp6unp6Onp6urp6erp6urp6unp6enp6enp6urq6erp6unq
-6unq6enq6urq6erq6urq6uvr6unr6urr6unr6uvq6+rq6+vr6urq6+rq6erq6urq
-6+nr6+rr6uvr6urr6+rr6+vq6+rr6+vs6+vr6+vs7Ovr6+vr6+vr7Ozs7O3r7Ozs
-7O3s7ezs7ezt7e3t7O3t7e3t7e3t7u3t7u7u7e3u7u7t7e7t7u7t7u/v7u7v7+7v
-7+/w8O/v8O/v7+/w8O/v8fHy8fHx8fHx8fHx8fLy8vF5b2RcVlZgUEpNTUpLS0RB
-Qz0+PD5BPT8+Pj88PDs3PD0+RD49Ozw7PkM+PD47Pzs7PDw9PDw7PUA8Pj5APz5A
-QEJAQUBBQUNCQ0RDQ0RFRkVGRUZGSEhJSElKSktMTE1NUFFSVFZXWVpaXWJdXV5f
-YGBhYmNkZGNlZWVmZmdpam1vcHJydHZ4en1/gYOFhomKi42NjY6NjY2Ojo+NjIiG
-g4B+fX1+gIOGioyOjo6Nj5CSlZmcn6KhoZ+enZycnZ+ipaqtsLGxraiimpONh4SA
-fn18fH18fH17e3x8e3p6enl4enl6enp6eXp7eXt6e3p6eXl3d3V0dXV1dXV2eHl5
-eXt8fHx9fn5/fn9/f39/fn5/gH9/fn+BgYKChISFhoaHh4eHh4eGhYaHhoeHiImJ
-iouNjY6Njo+PkZGSkpKTlJWWlpeWmJmZmZmZmpqam5ucnJ6dn6CgoaKioKKio6Sk
-pKWlpKSkpaamqKmpqqutrK2tr66wr7GxsbO0tLW1t7a3t7e4t7i5ubu6ury7vb2+
-vb6/v7/AwcHAwsPCxMPExMTDxMXExMTFxsbGx8jJysrLy83Nzc7Oz8/R0M3R0dLS
-09DR0tHS0tLS09PV09XV1dXV1dfW1tfX19jY2NnZ2dnZ29ra2tvb3Nvb3Nzc3d3d
-3d3e397e39/g4N/g4OHh4eHh4eLi4uLj4uLj4+Pj4+Pi5OTk4+Tk5OXl5uTk5eXl
-4+Xk5uTl5ebl5eTm5eXm5eXm5ubl5ebl5ebm5ubm5ebm5Obn5ebl5ebm5uXl5ufn
-5uXm5uXn5ufn5+bn5+fm5ubn5ubl5+bm5ufm5ufn5ubn5ubm5ubl5uXn5ubm5ubm
-5ufn5ubo6Obm5ebm5ubm5+Xm5ufn5ufm5ufn5ubn5ubm5uXm5ubm5ufn5ubm5+bm
-5+fn5+bm5+fm5ufn6Ofo6Ofo6Ojn5+bn5+fo5+fo5+bo6Ofo6Ojn5+fo5+bn5+jn
-6Ojn5+jo5+fn5ujn5ujp6Ojo6Ojn6Ofo6Ofo6Ono5+fo6Ojo6Ojo5+np5+jp6efo
-5+jn5+fo6Ojo5+fo6Ofo6Ojo6Ojo6ejo6Ojo6Onp6Onp6eno5uHo6Ojo6Ojp6Ojo
-6Ofo6enp6ejo6ejp5+no6eno6enp6Ojp6eno6ejo6Ojo6enp6Onp6eno6unp6ujp
-6enp6enp6Onp6Ono6ejo6ero5+Pn6ejp6Onp6Ojp6Onp6erp6ejp6Onp6ejp6enp
-6enp6erq6+rq6enq6erp6ejo6erp6Ojp6enq6enp6urq6enp6ujp6unq6uro6enq
-6urq6enq6unp6unp6enp6enq6unq6unp6erq6urq6urq6urq6uvq6urr6urq6uvr
-6urq6uvq6urr6urr6+vq6erq6+rp6+vr6+rr6+vq6uvr7Orq6uvq6uvs7Ovr6+vs
-7Ozs6+vs7Ovs7Ovq7Ozt7O3t7ezt7O3s7Ozt6+zt7O3t7u7t7u7t7u3t7e7u7e7t
-7u3t7u/u7u7u7+7v7u/u7+/v7+/v7u/u7+/w7/Dv7+/x8PDv8PDx8fHx8vHx8fLy
-8fHy8vLy8n91a2BZU1BTUE5JQD8+Pj09PDw8Ozk6Ojo8Ozw9PTs8PT4/QT5AQEJD
-RENDRERFRUhISEhJS0tNTk9QUlNTVVZYWVpbXV1fX19hYmRlZ2doaWpsbG1wcHFx
-c3R3eHh5e35/goWHioyQkpKTk5SVl5mZmZqbnp2dnp2enZ2dnqCjpKmqrK2usLG0
-tbe4uru9v8DBwsLBwsLBw8HBwb69u7m3trSzsrK0tbe6u72+vb2+vr/Bw8XHyMnJ
-ycjGxsfGyMnJzMzNzcrIxcG8uLSxrquqqqqpqampp6inpqempaSkpKOjpKKjo6Oj
-o6SkpKOko6KjoqChn5+dnp+en5+goKKio6OjpKSkpaSlpqWkpKWlpaSlpKWkoJ2k
-pqWnqKipqqqqqqqqqamqqaqqqqurrK2srayurq+vsLCxsrKxsrKztbW2tre3uLe4
-t7m3ubm5ubq7u7y8vb29vr6/v8DBwMDAwcHBwsHCw8PDw8TFxMfHxsfHx8nIycrJ
-y8vMzMzOzc3Oz8/Oz8/Q0M/Q0dHR0tPT09TT1NTV1dXU1NbV19bX1tfX2NfY19fY
-2dnZ2tna2tvb3Nzc3Nzd3d7e3d3e39/f3t7e3t7e39/g3t7e3+Df4N/f4ODg4eDg
-4OHh4uHi4uLh4uLj4uLi4uLj4uPi4+Pi4uTk5OTj5OTk5OPj5eTk5OTk5eTk5OTl
-5OXk5eXk5OXl5ebk5uXm5ubm5ubm5uTk5uXl5ebm5ubm5ufm5ebl5ubl5+bm5ubm
-5+bo5ufn5ufm5+fn6Obm5+fn5+bm5ufm5ebm5ubm5+fo5ufm5+bn5+fm5+fm5+fn
-5+fm5+bn5ufn5ubn6Ofm5ufn5+fm5+fn5+fn5+jn6Ofn5+bn5ujm5ubn5+fn5+fm
-5+fn6Ofn5+fn5+fm5+fn5+fn5+bn5+fn6efn5+fn6Obo5+fn5+fm5+jo6Obo6Ojo
-5+jo5+jp6Ojp6Ojn6OXo6Ojn6Ojo6Ofo6enp6ejp6Ojo6ejn6Ojp6eno6ejo6ejp
-6ejo6ejo6Ojq6Ojo6ejp6enp6ejp6Ono6ejp6Ono6enp6Ono6Ojp6ejp6enp6erq
-6enp6ejq6enp6ejo5+jp6ejp6Ofp6efp6ujo6Onp6Onp6enq6Orp6Orq6unq6enq
-6ejp6enq6enq6erp6enq6urq6erq6erp6erq6urp6unq6eno6enp6enp5+nq6urp
-6ejq6enp6urq6enp6erp6enp6Onq6urq6unq6urq6enr6erp6enq6unq6erq6erp
-6enq6urp6+rq6uvq6urq6urq7Orq6erq6urq6+vq6uvq6urp6urq6urq6erp6evq
-6uvr6unq6urq6+rq6+vr6+vq6+vr7Orq6urr6uvr6urq6+rr7Ovr6+rr6uvr6+vr
-6+vr6+vr6+vs6+vr6uvr6uzs6+zr7Ozr7O3s7Ozr6+zt6+vs7Ozr7Ozt7ezs7O3s
-6+zt6+3r7e7u7u3u7e3u7e/u7e/t7u/v7u7u7u/v7u7v7u7v7+7w7+7v7+7v7+/v
-7u/v7+/u8PDw8O/x8PHx8PHy8fLx8fLy8fHx8fLyiX92bGVcVVFYTUhERUNDRERD
-Q0NEQ0VFR0hJSkxMTU9QUlRUVlhbXF5gYWFjZGZnaWpsbm9xc3V3eXx+gIKEhoaI
-ioqMjY6PkJGSlJWWmJiampydnqCgoqKkpaaoqausr7Cxtbe5u7y+v8HCw8PExMXF
-xcjIyMfGx8nHxsbJysvMzc/R0NHT0tTV1dXX19jZ2NjZ2dnb2tnZ2djY19fW1dTU
-09LS0tHT1NTV1tbW1tfY19nY2dna29va2tna2tna2tra2tvZ2NfW1NHRzszKysnI
-yMjIyMfHxcXFxMTExMLCwsLDwsHBwcLBw8HBwcHBwcC/vr69vLy9vL2+vr6/v76/
-wMDAwcHBwcHBwMDAwcDAwcDAwL++vMDBwcLBwsPCxMTExMPDw8PExMPFxcTDxcbF
-xsfHyMfIycjIycrJysvKy83Nzc3NzszMzs3Ozc/Oz9DP0NDP0dHS0tLT09PT1NPU
-09PU1NXV1dbV1tbW19fX2NfZ2djZ2dnZ29na29vb29zc3Nzc3N3d3d3e393d4N7e
-3+Df39/f3+Dg4ODg3+Dh4eDi4OHi4eHh4uHi4+Lg4uLi4uPi4uLi4+Li4+Lj4uPi
-4+Pj5OPj4+Tj5OPj4+Pj5OTk5OTj4+Tk5OTl5OXk5eTk5eXm5eTl5OXk5OXk4+Xl
-5eXl5uXk5eTl5eXl5eXm5eXl5eXl5ebl5ufl5efl5ebm5+bm5ubm5ebm5ufm5ufm
-5uXm5+bl5+bl5ufm5+bm5ebn5ufm5ubn5+bm5+bm6Ofn5+fm6Ofn5+jn5ubn6Ofn
-5+fm5+fn5+fn6Ofo5ujo5+fo5+fn6Ofo6Ojn6Ojn5+nn5+fn5+jn5+fo5+fo6Ojn
-6Ofn5+np6Ofo5ufo6Ojn6Ojn6Ojn5+no5+bn6Ofn5+fn5ufn5+fo5+fo5+fm6Obn
-6Ofn5+jo6Ofo6Ojn6Ojo6Ono6eno6ejo5+jo6Ojo6Ono6Ono5Ofo6Ono6ejo6Orp
-6ejp6enp6Ojo6Onq6Onp6ejo6eno6enp6eno6ejp6Onp6enp6unp6eno6Onp6unp
-6ejp6enq6ejp6unp6enp6Ono6eno6uno6enp6Orp6enq6uno6unp6enp6erp6Orq
-6Onq6eno6unp6unp6urp6urq6enp6urq6uno6urp6erp6erq6urp6urq6erq6uvr
-6erq6enq6ejq6urp6uvr6erq6urp6unq6unq6Orp6+nq6unp6err6unp6+rp6urp
-6ejq6erp6enp6erp6erp6eno6ujp6unq6+rq6urp6+vq6uvr6urq6urp6+rq6urr
-6+nq6unq6urq6uvr6+rq6uvr6+rq6uvq6+rr6urr6uvr6+vr6+vq6+vr6+zr6+vr
-6+zr6+rr6uvr6+rr6urq6+vr6uzr6+zs6+zs6+zr6+zs7Ovr7Ozr7O3r7Ozs7Ozs
-7e3s7ezr6+3r7Ozs7e3t7O7t7O3t7ezt7u3t7u3t7u3t7+/t7e7u7+7u7+3u7+7v
-7u7u7u7v7+/w7u/v7/Dv7+/w7+/v7u/w7+/w8O/w8PDw8PDw8PHy8PHw8PHx8PLx
-8fLx8vGVjoV7dW1kYmJYVlRVVFRWVldZW1xdYGJjZmlqbG5wcnZ3en1+gYKFh4qL
-jY6OkZKTlJaXmZqdnqGjpqipq6yur7Gxs7S0tLa2uLm5u7u9vr7AwMHCw8PDxsbI
-ycrJy8zNz8/S09PU1dfX2dnZ2dja2tra2tvb2tra2tvb29rb3Nzd3d3d3t/e3d/f
-3t/f39/f197f39/f3+Df39/e3t7e3t/f3d7d3t7e397e39/e3uDg4ODg4OHh4ODg
-4d/f3+De3uDf39/g3d7d3Nza29va2tja2tnZ2NjW1tjX1tbW1tXU1dXV1dXV1dXV
-1dXW1dTT09PS0tLS0dHS0NHR09PS09LT0tTU1NPT0tPT09TS09PT0tLR09PT09PT
-09TU1NTU1NTU1dXV1dXU1dXV1dXV1dXW1tXX2NfX19fX2djZ2dnZ2drZ2tvb2trb
-29vc29vb29vc3Nzd3N7d3d/e3d7e397f3t7e39/f3uDg3t/g4OHg4ODh4d/h4OHh
-4OHh4eHh4eHi4uLi4uLj4ePi4+Li4uPj4+Pi4uPj4+Pj4+Ti5OPj4+Xj5eTk5OTk
-5OXj5OTk5OTk5OTk5OTj5OPj4+Tk5OTj5uXl5OXl5OTl5eTl5OXl5ebl5ebk5eXl
-5ubk5OXn5ubl5eXl5uXl5+Xk5eXl5ebl5ebm5ubl5uXm5ufn5uXn5ubn5ubm5+fm
-5ubl5ubn5ufm5ufm5ujn5ubn5+fn5ufm5+jo6Ofn5ubm5+bm5+fn5ufn5+fo6Ofo
-5+bn5+jn5ujn5+fn5+fo6Ofo6Ojn6Ofo6Ofn6Ojp5ufo6Ojo6ejn6efn6eno6ejo
-6Ojo5+fn6Ojp6Ojo6ejo5+no6Ojo6Ofo5+fo6Onp6Ojn6Obn6Ofm5+fn6Ojn6Ojo
-5+jo6Ojo5+jo6Ofo6Onp6Ojo6Ojn6Ojo6Ojo6eno6ejo6enp6Ojp6ejp6unp6Onp
-6enp6Onp6Onp6eno6Ono6enp6enp6Ono6enp6OXp6Ojq6ero6unp6unp6enp6unp
-6unp6eno6enp6urq6enp6enq6unq6enp6erq6urq6unq6unp6Orp6enp6erq6erp
-6erp6urq6urq6enq6+rp6urq6unp6ujp6unq6enq6evq6unq6unq6uvq6unq6urq
-6+vp6ujq6unq6urp6unq6unp6+rq6evq6urp6unq6+rq6urq6erp6urr6+nq6urp
-6unq6+nq6erp6erq6erq6erp6urq6urq6evq6enp6enp6ero6unq6+nr6urq6+rr
-6urq6uvr6urq6+vr6+vq6+zq6+vr6+vq6urr6+rr6+vr6urr6urq6+zr6+rq6+vr
-6+vr6uzt6uvr6+vr6+vr7Ovs6+vs6+zs6+vs6+rr6+rr6+vr6+vr6+vr6+zr6+zt
-7Ozr7Ovr7Ovr6+zt6+zs7e3t7O3s7O3t7ezt7O3t7ezs7O3u7ezt7O3s7u3t7e3u
-7ezu7e3u7e/u7u3u7+3u7u/u7u/v7u/u7+/v8O/v7+/v7+/v8PDv7/Dv7+7v7/Dx
-8PDw8PDx8PHv8fHy8PHw8fDx8fHw8fHy8vLy8qOdlpGLhH56dXRycHFxcnR1d3l6
-fH+ChIeJi42PkpSWmZyfoaKmqKqrra+vsLK0s7W3uLm6vL2+wMLExcfGyMrKzczN
-zs/OztDQ0tLT09TV1tbW1dfY2NfY2dra2trc3Nzd3N7e3t7e39/f39/f4ODg4ODg
-4eHg3+Hh4eDg4OHg4ODh4uHh4OHh4uHh4eHj4eHi4OHh4uHi4eDh4eHg4eHh4eHi
-4eHg4eHg4eDi4eLg4eLj4+Li4+Li4+Hi4eLg4eHh4uHi4uLh4OHg4eDh4N/e4ODf
-4ODf397e3d/f3d7f393e3t/d3t7d3d7d3d3d3d3d3tze3d3c3tzb3d3d3Nzc3N3d
-3d3e3d3d3d3d3d3d3t7e3tze3d3e3d3e3dzf3d7e3d7d3t7d3t7e3t7e3t7e3t7f
-3eDg3+Df4N/f4ODf4N/g4N/g4ODh4OHh4eHg4eDi4eHi4OHh4uHh4uLh4eLi4uLj
-4eLj4uLj4+Lh4uPj5OLj5OLj5OPk4+Tk4uPj4+Tj4+Ti5OPk4+Pj5OTl5OXj5OTj
-5OPk4+Tk5OXk5eXl5eXl5ubm5eXl5eXm5uXk5eTl5ubl5eXk5Obl5eTj5OXl5OTl
-5uXm5ubl4+bl5uXl5ebk5uXl5eXm5uTm5ubn5ubm5uXn5ubm5ufl5ebm5uXm5+Xm
-5ufm5+fm5ebm5ubn5ujn5ubl5ubn5ufn5ufn5+fn6Ojn5+fn5+jn5+fm5ufm5+fo
-5+fn5+nn5ufn5+bm6Ofm6Ojn5+jo6ejn6Ofp6Ojo5+jo6ejo6Ojn5+jp6ejo6Ojo
-6eno6Ojo6Onp6efn6ejo6Ojo6ejo6enp6Ojo6ejo6eno6ejp6eno6ejo6Ojn6Ono
-6Ojp6enp6Ojp5+fo6Ojn6Ojo6ejo6ejo6enp6Ofo6ejo6ejp6Ojo6eno6ejp6erp
-6Ono6Ojo6eno6eno6enq6erp6uno6ero6Orp6unp6erp6unq6unq6urp6uno6ero
-6Orp5eno6erq6urp6unp6erp6Onr6erp6+nq6unq6enq6+nq6urp6urq6erp6urp
-6urq6urq6urq6urq6+rp6+rq6uvq6Orq6unr6urs6urr6urp6unq6urq6urq6+rq
-6erp6urp6urq6urq6urq6err6err6+rq6+rr6urq6+rr6urr6urq6+vq6uvq6+zq
-6urr6uvr6+vq6+rr6urr6uvq6uvq6uvq6+vr6urq6err6evq6uvr6urr6urp6urq
-6+rq6unp6urq6urq6urq6uvq6urq6+vr6+vr6+vr6uvs7Ovr6+rq6+vq6+vq6+vq
-6+vq6+vr6urr6+zq6uzr6+zr6uvs6uvs6+zr6+zr6+zs6+vr6uzs7Ozs6+vq6+zr
-6+zs6+zr6+zs6+zs7Ovr7Ovs7Ovs7Ozs7Ozs7Ozs7O3s7Ozs7Ozt7O3s7Ozs7u3t
-7e3t7O3t7e3t7e3t7u3u7e3u7e3t7e3t7e7t7u7u7u7u7u3v7u3v7+/u7+7u7+7w
-7+7u7u/w8O7v7+/w8O/x8PDv8PDw8fHw7/Dv8fHx8fDx8fHx8fHx8fHw8fHx8vLy
-8vLxs6+qpqGcmJWRkI6Oj5CQkZOVl5mbnaCipKepq62wsrS2uLu9vsDDxMXGx8jI
-yszNzc7P0NDS0tLU1dXY2NjY2dnc3Nvc3Nzc3Nzd3d3d3t7e397e4N/f39/g4eHh
-4OLg4eHh4eHi4eLh4uDh4uHi4eLi4uPi4+Hh4ePh4uHi4+Lh4uLj4uLi4uPi4uLj
-4uPi4uLi4uLi4+Pi4uPi4uHi4uPj4uLj4uLi4uPh4eLi4uLi4+Li4+Pj4+Li4+Pj
-4uLi4uLi4uPi4+Lh4uHi4uLi4uPi4eHi4eDh4eHf4eHh4eLi4OLh4ODg4eDh4eHh
-4eDh4eDh4eLf4eHg4N/h4ODi4ODg4OHh4eDg4OHh4eDh4uDg4uHi4eLg4OHh4eDh
-4OHg4uHh4eHh4eDg4eHi4eHi4eHi4eLh4uLi4uHi4uLi4uPj4uPj4uHi5OPk4+Pj
-4+Pj4uPk5OLi5OPj4+Pk5OTi4+Pj5OPk5OPj5OPk5OPk5OTk5OXk5OTk4+Tl5eTk
-5eXk4+Tk5OTk5OXk5OXl5eXk5eXm5eXk5OXk5uXl5ebl5uXl5ubn5eXm5ubl5ubm
-5uXl5eXm5uXm5uXl5eXl5eXk5eXm5uXl5ubn5ubl5ubl5+Xm5ubm5ubn5ubn5efm
-5+fm5ubn5+fn6Ofn5+fm5ufo5ubn5+bm5+fn5+fn5+fn5+jm6Ofn5ujn5+fn5+fn
-6Ojo6Ojn6Ojn5+jo6Ofn6Ofo5+fn5+jo6Ofo5+jn5+jn5+jn5+jn6Ojo5+jo6Ono
-5+no6Ofo6Ojo6Ofp6ejn6Ojo6Onn6Onp6enp6eno6efo5+jo6Ono6Ojo6enp6Ojo
-6enp6enp6Ojq6Ojo6eno6Onp6ejp6unp6ujq6unq6enp6Ojp6Orp6Onp6enq6enq
-6eno6Onp6eno6ejp6eno6Onp6erp6ejq6eno6erp6enp6enq6urp6Onq6unp6urp
-6+nq6enp6enq6unp6erp6+nq6enp6unp6enp6erq6unr6uvq6erp6+rp6erp6unq
-6urq6urp6urq6err6uvq6urq6urp6+rq6uvr6uvq6+rp6urq6urq6uvp6+nq6urr
-6unq6+vq6+rr6urq6urq6uvq6urp6urq6urr6urq6urq6urr6+rr6urq6uvq6urr
-6uvq6erq6uvr6+rr6urr7Ovq6uvr6+vr6+vq6+vr6+rq6+rr6+vr6urq6+vr6uvq
-6uvq6+vq6+zq6uvr6+rq6uvq6+rq6urr6+vq6urq6+rq6urq6uvr6+vq6+rq6uvr
-6+zr6+vr6uvr6+vr6+vs6+vr6+vr7Ozr7Orr7evr6+rr6uvs6+vr6+rq6+zs6+vs
-7Ovs7Ozs7Ovs6+zs7Ozs7Ovs6+zs7Ovs7Ovs6+zs7O3s7Ovs7Ozr7Ozr7Ozs7Ozs
-7O3s7O3s7e3t7Ozr7Ozs7uzt7ezs7O3t7uzs7e3t7uzt7ezu7e7t7u3u7u7u7e7u
-7e7u7u/u7u/v7e/u7+7v7+/v7u/u7u/w7+/v7u/w7vHv7u/v7vDw8PDw8PHx8fHx
-8PDw8fDx8PLx8vLx8fDw8fHy8vLy8vLy8/MAEAEAAAMAAAABBJcAAAEBAAMAAAAB
-Bt4AAAECAAMAAAABAAgAAAEDAAMAAAABAAEAAAEGAAMAAAABAAEAAAERAAQAAAAQ
-AB+FwAESAAMAAAABAAEAAAEVAAMAAAABAAEAAAEWAAMAAAABAG8AAAEXAAQAAAAQ
-AB+GAAEaAAUAAAABAB+GQAEbAAUAAAABAB+GSAEcAAMAAAABAAEAAAEoAAMAAAAB
-AAIAAAFTAAMAAAABAAEAAIdzAAcAAASkAB+GUAAAAAAAAAAIAAH9gQAD+voABfhz
-AAf17AAJ82UAC/DeAA3uVwAP69AAEelJABPmwgAV5DsAF+G0ABnfLQAb3KYAHdof
-AAH9eQAB/XkAAf15AAH9eQAB/XkAAf15AAH9eQAB/XkAAf15AAH9eQAB/XkAAf15
-AAH9eQAB/XkAAf15AAGq2yWAAAAAIAAAJYAAAAAgAAAAAASkYXBwbAIgAABzY25y
-R1JBWVhZWiAH0wAHAAEAAAAAAABhY3NwQVBQTAAAAABub25lAAAAAAAAAAAAAAAA
-AAAAAAAA9tYAAQAAAADTLWFwcGyYcjd2/nI/x5EwPxA3BfUzAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAAAAAAAAVkZXNjAAAA5AAAAEF3dHB0AAAAwAAAABRrVFJD
-AAAA1AAAAA5jcHJ0AAAEYAAAAEFkc2NtAAABKAAAAzZYWVogAAAAAAAA81EAAQAA
-AAEWzGN1cnYAAAAAAAAAAQHNAABkZXNjAAAAAAAAABVTY2FubmVyIEdyYXkgUHJv
-ZmlsZQAAAAAAAAAAAAAAFVNjYW5uZXIgR3JheSBQcm9maWxlAAAAAG1sdWMAAAAA
-AAAADwAAAAxlblVTAAAAKAAAAw5lc0VTAAAAMAAAAYpkYURLAAAAPAAAAjZkZURF
-AAAAOgAAAeJmaUZJAAAAMgAAAMRmckZVAAAALgAAATBpdElUAAAALAAAAuJubE5M
-AAAAKAAAAnJub05PAAAAKAAAAbpwdEJSAAAALgAAArRzdlNFAAAAOgAAAPZqYUpQ
-AAAAGgAAAV5rb0tSAAAAGgAAApp6aFRXAAAAEgAAAXh6aENOAAAAGgAAAhwAUwBr
-AGEAbgBuAGUAcgBpAG4AIABIAGEAcgBtAGEAYQAtAHAAcgBvAGYAaQBpAGwAaQBH
-AHIA5QBzAGsAYQBsAGUAcAByAG8AZgBpAGwAIABmAPYAcgAgAEIAaQBsAGQAbADk
-AHMAYQByAGUAUAByAG8AZgBpAGwAIABHAHIAaQBzACAAZAB1ACAAUwBjAGEAbgBu
-AGUAdQByMLkwrTDjMMowsDDsMKQw1zDtMNUwoTCkMOtjg2PPVmhwcJaOgnJfaWPP
-j/AAUABlAHIAZgBpAGwAIABHAHIAaQBzACAAcABhAHIAYQAgAEUAcwBjAOEAbgBl
-AHIARwByAOUAdABvAG4AZQBzAGsAYQBuAG4AZQByAHAAcgBvAGYAaQBsAEcAcgBh
-AHUAcwB0AHUAZgBlAG4ALQBQAHIAbwBmAGkAbAAgAGYA/AByACAAUwBjAGEAbgBu
-AGUAcmJrY89O6gAgAEcAcgBhAHkAIGPPj/Blh072AEcAcgDlAHQAbwBuAGUAYgBl
-AHMAawByAGkAdgBlAGwAcwBlACAAdABpAGwAIABTAGMAYQBuAG4AZQByAEcAcgBp
-AGoAcwBwAHIAbwBmAGkAZQBsACAAUwBjAGEAbgBuAGUAcsKkzpCxCAAgAEcAcgBh
-AHkAINUEuFzTDMd8AFAAZQByAGYAaQBsACAAQwBpAG4AegBhACAAZABlACAAUwBj
-AGEAbgBuAGUAcgBQAHIAbwBmAGkAbABvACAARwByAGkAZwBpAG8AIABTAGMAYQBu
-AG4AZQByAFMAYwBhAG4AbgBlAHIAIABHAHIAYQB5ACAAUAByAG8AZgBpAGwAZQAA
-dGV4dAAAAABDb3B5cmlnaHQgMjAwMyBBcHBsZSBDb21wdXRlciBJbmMuLCBhbGwg
-cmlnaHRzIHJlc2VydmVkLgAAAAA=
diff --git a/third_party/boostorg/algorithm/test/search_test_data/0001b.pat b/third_party/boostorg/algorithm/test/search_test_data/0001b.pat
deleted file mode 100644
index 0ec191c..0000000
--- a/third_party/boostorg/algorithm/test/search_test_data/0001b.pat
+++ /dev/null
@@ -1,2 +0,0 @@
-TU0AKgAfhPqScHN4dnZ2e3p5e3h7eXl4dnd1dnV3enp5dnd3dHV1dHNzd3l3eHh5
-eXZ4dXd2dHNwcHFwcXBxc3h0dHN1eHVzcXV1dXV2c3h5dHV3eHVwcHF
\ No newline at end of file
diff --git a/third_party/boostorg/algorithm/test/search_test_data/0001e.pat b/third_party/boostorg/algorithm/test/search_test_data/0001e.pat
deleted file mode 100644
index 2a86572..0000000
--- a/third_party/boostorg/algorithm/test/search_test_data/0001e.pat
+++ /dev/null
@@ -1,2 +0,0 @@
-iBJbmMuLCBhbGwg
-cmlnaHRzIHJlc2VydmVkLgAAAAA=
diff --git a/third_party/boostorg/algorithm/test/search_test_data/0001f.pat b/third_party/boostorg/algorithm/test/search_test_data/0001f.pat
deleted file mode 100644
index 621f395..0000000
--- a/third_party/boostorg/algorithm/test/search_test_data/0001f.pat
+++ /dev/null
@@ -1,2 +0,0 @@
-q/y9PZ3uHj5ufo6UJDQ0NFQz8+RUZBQEBAOzo6Ozs4Nz06Ojs4Ojo9PT47Ojk0
-Nzc7OjQ6NzU6OjgxMzg1OjY2NjU3NTU1Nzc3NTU1NzU
\ No newline at end of file
diff --git a/third_party/boostorg/algorithm/test/search_test_data/0001n.pat b/third_party/boostorg/algorithm/test/search_test_data/0001n.pat
deleted file mode 100644
index 7a6966f..0000000
--- a/third_party/boostorg/algorithm/test/search_test_data/0001n.pat
+++ /dev/null
@@ -1,2 +0,0 @@
-TIzMjIyMjM0MjM1nTQzNTc3MzY1NDU2NzQ2MjEwMjU1MTQ2NzU0NDI1
-NDMyMzQxMzQ0NDU1MjU2NTc5NzU1NDc4ODY1
\ No newline at end of file
diff --git a/third_party/boostorg/algorithm/test/search_test_data/0002b.pat b/third_party/boostorg/algorithm/test/search_test_data/0002b.pat
deleted file mode 100644
index 1d19b5e..0000000
--- a/third_party/boostorg/algorithm/test/search_test_data/0002b.pat
+++ /dev/null
@@ -1,170 +0,0 @@
-TU0AKgAfhPqScHN4dnZ2e3p5e3h7eXl4dnd1dnV3enp5dnd3dHV1dHNzd3l3eHh5
-eXZ4dXd2dHNwcHFwcXBxc3h0dHN1eHVzcXV1dXV2c3h5dHV3eHVwcHF1d3V0dXJy
-cXNzcHBwcHJyc3R0dXl7eHJycnF1dHV2d3h4eHV0cnRycXN1dXN0c3R0c3R0cXJx
-cHNxb3B0c29scHFybm9sbGpscXJ1c3NycXN0cHFvb3JzdHBycG9vb29vb29ubXBt
-bG9vcW5tbGptb3Fwb3Bwb25vbXFtbGtvbGlrbnBuamxsbW1rbW1vbm1ub3Btamxw
-bWpsbG9xcG5ua2tpamhqZ2hnam5saWhsbG1nZmZnZ2lnamtxa2ppZWZmZWlrZ2hp
-Z2hraWRmZ2psa2pmZWVkZmplZWZiYF1gZGVlYmJiY2RhY2NiYVxdW1xgXl9iZV9d
-X11hXF5gZWJhYF5dXWFmYl9hXl5gXmBhZGFhYV1cYWFiX2FgXFxgYF5iYmNiYWRk
-YWFjYGFgXl9jYmVlZGZiYV1cXFtbW1pYV1dVV1VWVlFQT1FRUVBRTkxLSktNTVFP
-UU1RUVBSUlBPUlNTUVRWWFpcVldXWldXVVhYVlpbWV1dX1tbW11fYWFdXV5iY2Rg
-YFxeYmNmZGJgYFxdYmdjYWFhYGJoZGReXF9gX2BhYWJhXltdXVtaXGJlYV5gYF9f
-XVxcX2JiYWJkZmNjZGNhYmFcW15dXmBgX1xdYF5fXF5eX15bWV1dXl9eWlldXGJf
-XmFgXVpcXF1dXlpdXl9dXWRlYF5cXFtfYF1eYl1hX1xbXV1ZV1hZWltbW1peW1lb
-XlteW1xeXlxbXVpYW11dY19fW1xfX2NhXV5hZWNfX2FjZGJlZmVoaWtlY2FkXl5f
-YWJjX19fX15dXWBjYmJeWlteXVlaXmBeXF1fXVxdXFtaW19dXFtfWllZWVxeXlxb
-XV9eW1xcXF5cW1paVVdbWVteW1pbWlZeYV1ZV1tYVlVWV1hbWVhcWVhVWlhbWFtX
-V1ZWWFtXVlZaVlRVVVdVVldWVlNRU1NTUVNXVVlZWVZVVFNWVlRTU1JVV1ZWVFRU
-UVFVWVpZWlZWWFpYWllcXVpYWFdWWFxbWFlYWVhVVVRVV1dZWllYV1lcXVpcWVhY
-V1VbW1tVVVdaXFpXVlZZW1leW1lWWFdaXVpXWlZXW1xdXFxgX1xYXVxcXFhaW1tb
-WlxcWltdWldXVldXWVZVUVJVWFlcW1tcWVpgXF9hX11aWVpdXFxeXFtbX19aW1lb
-W1paWlxdXVpaXVxeX1xdX2JjYV9gYl9cW1xbWFpeXl1gYGBgYFtdY2JhYmBgYGBe
-YGJkY2NjYmNjYGNiY2JgX19hYV1gX2BgX19hZWRiZWJjZWVgYmFiZGRmaWdmYWJj
-YmJjZGFhY2FhY2RkZmVmY2FhYWVnaWVjZGNhY2RlZWZqZ2doZ2RjZGRmaGVlZmZm
-Z2dnaGhoZ2hmY2VmZWZjZWNlaWdlZmVoZ2hpamlra2hram1samtqZWpuc2tpa21u
-bmtraWtqYWNmZmZmZGVpZ2p0kMLk8vv+/////////5drcHZ1dnd7fHt6end0c3V1
-dnR0d3l3d3Z3fH16dnV2d3h0d3l2d3h4dXZ3d3d3dnBucW5vb3BxdHR3d3Jzd3Vz
-dndzc3V6eHVyc3JydXZxc3Z1d3V0eHZ1c3Bvc3V0dnR0dXR0dHd4dnRxc3J0c3d3
-dXNycXd1dnVzcHJ0d3h0dHFwcXFzc3N0dXFxcXFwcHFwcnFycHJwcHJzc3Fwc3Nx
-cnRyc29ucHJ0cnBwb29ub3Fwc3R0cnJubnBycG1ucHBxcXBuc3FwbGpoamlsb3By
-c25qaW1tcW1qbXFua25xb21vcG9tbG5qa2doaW1sbGpqamxsaWpmZ2VlZ2tqZ2Zn
-amVoa2tvZ2ttamxpamlmZmdoZ2dnZ2traWZmaGlpa2loaGdmZ2ZoZmRnZ2ZmZ2Vl
-ZmJiZWBhYmBhZGBhX2BfXVxfYmJkZF5dY2JjYWNhY2BgXl5gXl5dXl5cXFxgZGVh
-Xl5eXlxeXl5iYl9fYF9iYWFhZWRkY2RgX2BhX19gY2VlZWRkZF1gYV9bWV1dWVxX
-WldXVlZVU1BQUE5QUU9SUU5NUE9OTk9NT1FRUk5PUVRUUVJSVFVYWVpXVldVVlRV
-V1dZXl9bW1pYWVhaXl9gXl5bYGJhYWFhX19jZGZmZGJgYF9eX19fYWRmX2RgYWBe
-XmFhYWNhYmBeXV5eW1xdYF5dX2NiX19hX11dYmVoY2RlZWFeYF5fXV5hYGFgYF9i
-Y19dXl5dYGNgXFZbXlxZWVxhYl5gYF9cXV1cXFxdX19gYVxZW11dX2NjXF5eXF5e
-Xl9eYGJfXl9eW1xcX11ZW1xdW15eW11gXl1dW11cX15dXl1cXF5iX2BgYV9iYmJf
-XF1mY2NiamZiYWNiY2JiYmVpbGdkYGBmY2VoY11dXFxcX15iYmFiYmBeW19hX19h
-XV5aYlxZWlxcW1xcW2BfW1hZXF5cXVtcXlxbWldYW1xcXVpYWFlYWllbW1paWFpe
-XFtYWFdUU1ZVWFhVWFteWFdYW1pYVlhVU1RVWVZXVVdXVVFSU1ZWVFNSVVtSUlJT
-UlNUVVVWVVVWVVJWVVVTVldXXFlXVVlbVVNUWVtXWVtVVVZVWltZWltYVVVZWVlY
-V1ZXWVVUV1dXWl1dW1tcX1lZWl5cXVtcWVVXXFxZXVxZVVRUWVlcWVZWWFhaWFtd
-WV5YVlZZWVxbXFxdXFtbXl5dXVtbWV5dYF9cXl9eXF9iXl5bWlhbWltZWFpgXlpa
-W15iXF1jYVtcYGBfXVpbXFxcWlxbXFtZXFxaWllaXl1aW11gXV5fX19fX19hXl5f
-YGFgYWBhXmBiXlxfXFtcXmBgYmJkYWJgYmNgYGJiYWBfX19jZWBeYWJgYFxcY2Vj
-ZGJfX2JiYWNmY2RjYmFhYWFjYWNpZmNjZ2ZkZGBiYmVmZ2VjYmNlZV9iZWhlZWJi
-Y2JmZWdoZWVoZmdrZ2VmZmhmZGRkZ2VmZmVkZmppZmVnaGhqamhiZmZnaWdnamZm
-ZmtubGtpZWhna21rZmRpaWVlZ2l0bGtrbG1obmdmamZnaGVkZGNobYekwePx+///
-////////l2pzdnh3fHt8enp5fnl2d3Vyc3d5eXd1d3d3eXx+eHp3eHR0d3d6enl1
-dHNzc3d1dXNwcHJ0c3Byc3V3eXVycnZ5enl3dnh3dnJxdHZ3dXh8eHl1dXZ3dnZ1
-dHV0ent2dXl3dXV1d3h3dnZ3enV0d3d0dXN3dXZ1dnd3d3h6eHZzc3JwcnN0dHJy
-cXFub3FycW9vcHBvbGxsbXFzcHF1c3N0dHR0c3Bvc3N1cnFvcW9tbXFzcm5ub3Bw
-b21ucXBxcXBvc3Bvbm5sa25sbW1vbnJ0b2trbWpqbGtsbnFxcG9vbm1xbm5tbGpp
-amtqbm1rbW9sa21taWhoZmlqaGlramxqaWdraGdqaWluam9ua2psamhkZmxpaWpq
-aWVlZWdnaGZjY2RpZmZnZmZmZ2ZnZGFhZWNgZGJhY11dXV5hXF9lZGRiY2JhYV5f
-YGZgYF5fYWJgYmBgX15gZF9eYF1gYlxeYV5fYV5eYF5gYWJkX2JhZGNlZGJjYmFh
-YWNgY2VhZGZkY2JgYWBjY15fXFtaWllaXFlYVlNRUVNTUU5PUlNSVlRPU09PUVJS
-U1FNT1JVUk9RU1NRU1NSVFRWVllWWFtXWFhaWlpeXl1hYV9cXV5fXl9gXmFgYGBh
-ZmJjYmJjYV9fX19dYWFiZmRiYWBeX2FcW15fYWBfXmNkX11cXF1cXVxfYl9hYWJd
-X2BhY2JjX2BkZGFgYWFfYWFgYF9eX2JiZGVjY19dYF9eXV1eXF9hXmBeXF1fXV9b
-XGFhY2FkX11dW1peXl9hYWJgX19jYV5dYGFgYGBdWV5cXV9hYGBcX2BhYF5gZWBb
-X19dXWBeXl1eYWNhXl1eYGFhYGFgYWFhZGNiZGZoZWdnZWNgXWFhX2NlY2NjYGBl
-Y2NjY11bXl9eXmBiYGFgXF9kYF5iYF9iYl9cW15aWV5fXFtaWVpdW1xfYGBfXV5d
-XFtcWlhXVlZWWFhYV1ZWV1daWlhbWlpcXFhaWFdYVldYWFlZWWFYWltYWVhSVFdV
-VlpWV1RYV1VTVFRYVVRWU1RUVVhWVVRUVFZXVFVXV1VWV1ZYV1lcbltbWV5ZWlpX
-V1VSWFtfW1laXV5bWFpZWlpYWVhYW1hYV1daWVlZV15dWlxgXVpZWV1hXFpbW1dY
-WlpZVldXWlpWVlZXWlhaWVlZWVpZW1xbWltcWVhaXVxdWVxZWVtaX2BfXFlaWVte
-XV5dXV9eXF9bW1xbXl9hXFdcXlxbWVpbXFpcWF1gXV9gYF9aW11cX15dX19gXFtY
-WVtZW1tdXWBiYWBfXV5cXV5dX15eXV1iZWRhYWFiYF9eYmBeXV5dYWBdXWBlY2Fh
-YWRfYF9hY2RhZGJjZGZjYmJfYV5fZGJkYWBgYGBjY2RlaGNhY2BjYF9eYWJgY2Vk
-Y2JlZWRhYmRlY2FjY2RjY2RjY2NjYWBkY2NjZGVjYWRnaGhramtqZmdnZmhoZ2pn
-aGhoZmhpa2lpaGhoZ2hlZmdqaGZsbWhpamlqamppa21qaGhmaGxqZWVra25sbW5p
-aWhraWprbW1pZ2ZoZ2hsfKXG4/D7//////////+UbnR5e3t6eHmAfH58eHZ2dHd2
-d3x3dHZ2dnl4eHt8fHh4dXVydXR3eXd2dXR4d3Z1dXZ1dnhydHd5d3ZzdHV4dXR2
-eX13dnV2dHN1eXh4eHd3dnd2d3Z3dXZ3dXZ3eXh5dnR4eHV2eHt8e3l1d3d1dndz
-dXV2d3Z1eHh0dXR2d3V0dnNxcXFvb29yc3FucnJycW9ycnBvbG5ycXF0cnNyc3Nz
-cnFydHFvcHFycnNucXJvb3FzcHBxc3NxcG1ucG9wb3NxcXJwa2xtbXFwbnJtbnFv
-b25uamtqbG1scHJwa25tbXBycG9tbW5vbnBvb2xtb2xxbmtsbGtpamtrbGtnamlq
-a2pnaGhmZmptaGhoaGxqaGhpaGhpaWlpZmVkZWdoZ2lmY2RgYmhmamdlamdhYWNg
-X2BgYmNhYV1eYWFmZGRkZF9cXWBgX19fY2NdYWFiYWJhXl5kYWJhXF5eX19hYmZi
-Xl1gYmJiYV5iY2FhYmRhX2BlYWFiY2FfYWJhYmRjZGJeYmNmZGBhYmFaWVpaXFtZ
-V1ZUU1NSVVVXWVNSUE5NT1JPTk1NT09OUlVOTk1PU1NRU1JTVFRRU1NWV1dXWlhb
-W19bX11cX15fXFpeXV1dXV9fX15hYGBhZWJhX2FhYGBeXmFmZmJhYV9fYWFeXl1b
-XFxcXWJgYGFgXV5eW1peX2BgYF5iY2RiYV9gYmFhX2BhYmNiYWBhYWJfYWFfYF1f
-YWBgY19fXV5kYVlcX2FgYF1eW11gX15dXGBgX11hXltbW1teX2FdX15cW11cXF1e
-X2BfYGRmW1tfYFxeYWNiX15dXmBcXF5gX2RfW2BjYmFhY19bWl9hXl9hYmBeYWVk
-YmRiYmNjY2NjY2FjZGNgZGJjYmBfX19eYmJeXl1eXF5gXV1dXV1cXFtbW15fXV1e
-XlxdW1xeXmBbWVpYVltgYF5dXV1eWVlbW1xZVlZVWFhcXl1aWllWWllbXVxbWFha
-WFpbW1lZVldXVVZdV1daW1hYV1hVV1hTV1VUWFlXVVNVVVJSVVVWVlJWVlZWVFRW
-U1VVUlVUV1RWUlJYVldXVFdcXVlYV1JQVVlTVVZXV1ZTU1dYV1VUWV5bV1RWV1pY
-WFlbWldZWlpaWFdZXVlcXlpcXFlcWltfW1hZWltZXVteWFRSV1hZWlpZWFhXWFla
-WFheWVtZV1laW1tcWFlcXF1eXVtYWlpbXF1dX1xbWltcXl5hYVtZWl5ZWVxeXV5d
-XltaVlhaWVxcW1pbXV5eXlxdXV9eWllbW1tfXlxeXWBeXGFeXl5gX2BfXlxgY2Ji
-YWFhZWJgXWBfYV9dXl1cX2BeXl9eXFtcX19gX15hY2NgY2NnZWFjY2FkZGJjZGJi
-Y2BhYmNkY2NhYWJjZWJiYGJkY2BhZWVmYmRkZmVmZGNiX2FkZGNkZGJjZWRlZGJi
-YmRjYmhjYGJmaGlmY2NnZWhlaWtramhmZmZqZGhpaGZnZmRnaGloZGhqbW9tamdp
-bGtubGlsbmxpbW9wbGtra3ZoZGdpamhnZmhpaGhtbW1ra21qaGt+psnk8Pr+////
-/////3ZnbnV6fHh2eHp7fHx6d3h3dXV6eHd5dXNzc3h5eXl1d3d3d3h2dnd0dXR0
-d3h5eXh6dnV1cXR1dHV1dHR2d3h3d3p4enp5c3Z3dXV0dXl4dXV4eXt5dnZ0dnp5
-eHZ4dnd1dnNzdHJ4fHx8eX16eXh2dnR0dHV2eHV0eXRycnB0cnNwc3dzb3FvcXNv
-b3Fyc3Bxc3JwbmxucXJycnRzcXBwdHV1dnVzdHJvcnFycnZxcXFxcXJwb3BvcXRw
-bXFvbXBubGpwcXFtbG5ub25xcW9rbG1tbGxta2xrcnRxcm5qaWxra25wb3Byc3Zw
-b2xsbWtub21ra2xpam5tbG9ybGxrampramlpbHVza21maGxqaGhnaWxrbHBsa2tp
-amhnZ2ZmZWViYGNjZmRjZWRoZ2VhX2JjY2JiYGFhYGBfY2dnY2FiY2BiX19fX2Fh
-YWJgY2JjYGFgYV9dXF5fYFxfYWJkYmJeX2BiYmFiYGFkZGJfX19iZGlhYGBkZGJj
-YmRlZWRhYWBhYmBcXV1dXVxbW1xbW1ZXVFBXVFNUUFRUUk5RVldQT1FPTkxMT1JR
-UFFRUFBRU1NRUFBRU1RRUlRVUlNWWFhaWV1eW1tdX2BgXllYXV1bXWBgZWNgYV9g
-X2JfX2JlY2VmZ2RmZWZiYl9fX19hX2BeW15hYF9dW15gYWRiXVlbXF9hYmNjZWBm
-YmFdY2RkYWFhXV9gZGFhY2JeY2FeXmFgXl9iYl5dYGFcX15dYF5dXVpZXGBfXl1d
-W19gYF9cXF1cXl1bX2JeXl9aW11cXF5fYGBhY2ReYF9dYWFjYmFfXlxcXVtdXlpe
-X19lY2NgYWBeYmJdXl1hY2JiYl5fYGBhYmRkZGJjY2JjYmJiYGFiYmBfXV5eXV1g
-Y2VgX1xbW1teXVpZW15fXVxeXVteW1lZW1pcXFxfW1tZWmBeWl1eXV1eXlxYWlxc
-WlhZWFlYWFdbWltdWVlZXFlaW1xaWFdYXFtaWFhYVVZZWVhYWVdWVVZZWFxXW1lU
-WVhVU1dXU1JVVVZYWVlaWllYWVZWVldWV1hWVlhXXllYV1ZYVlZVW1lYWFRUU1RX
-VlZYWVlYWFhTWFZYW1xaW1lYWVtZVVZXWVpXWltZXFxaWFpXWV9fWltYVVhdWlla
-WltZV1hYWVxbV1hWWFxbWl1cWlddWVlbW11aV1daXFlZW1xbWlpeW1pdW1lXW11e
-W1tbXFhaXl1bX19cW1tcXF5cXF1aV15eWlpbWl5cXlxcWltcXFxbWlpcXFxdXV1i
-X1tcX11gX2BhYWNhYF1eYmJfXmFnZWBeX2JeX2FfX2FjYWNeX2BfYmRkYWBfXl1f
-YmNgX2BiYGFhYmNmZmZlZmRjZGVlZWZkYF9kZGNfXl9gX2BjZWVlYmJhZGJlZGJl
-Y2JkZmRjY2ZnZGVkaGZlamJiYGRlZGNhY2hoZGNiaGVpZmtpZmBkZ2plYmRnaWdl
-ZWdpZGZnaGdkZGlta2pqaGlnZ2RoamlmZ2pta2lqa2tqbG1sbGtqZmtsamhqbGlr
-aGloa2dnbmpwZ2lra3uly+Hv+P7/////////cHR9e3x6d3Z3eX16eXh3d3p6dnh3
-e3l0dnVxc3Z3dnd5dHV2eXh1c3N0d3Z4eXl8fXyAenl4c3RzcnJ2enZ2dnV0dnl+
-e3d0c3V2dXR4eHh2dnZ1eHZ2dHh4d3V3dXR1dnZ1dHR0dHd3eX97eXl2d3R1dnd2
-dnd5c3JzdXJ1c3Z3dXZ2eXRwc3FycHBwbnBvbnB3dXV1c3RwcW9ydXJzcnR3eHZ1
-eHR0dXRzb29wc3FwbXBtb21ubWxtcnNxbm1rb3Fubm9ubm5vbmlqbmxpamxvbW1s
-bm9vb3FycHJsamtra21rbW1vcG5xb25tbGlpbWttbmxsbGxpa2hoamtrbWxqamxt
-bW9ub21rbnBoamdlZ2ppbW5tbW1vbmxqZWdmY2VlZGNjZWZlYmVjZGBiZGBfYmJp
-Z2NkZWNgX2NjYmNhX15lZWBdYF5iXVxbYWBgYmJgYF5gX15cX19jY2BeXmBfYGJi
-YV9gYmFfYWJjYF5gY2RiX2FdYWJlZGRkZWVmZ2NhX2FhXmNiXVtcXV1dXVxaWFZX
-WFVUUlNTU1VTVVNVT05QUFFNTU5MTU9RT1JTUFBRU1BRU1VUVVVTU1NWVFZaW1pY
-WlpaX11eX15cXVxdXlteXF5hZF9fX2BiYWFgXmBhYmJiZGVkY2RjYGBfXWBiXVxg
-YWJhX19jYF9hYGFfXFpbXWJjYmFgYGFiYmNiY2VhXV9fZmNkYWFiZGNjYF5cXVtb
-Xl5eX2RkZF9eXF5dYF5hXlxbXFlYW2FiY2JfYlxZXl5dXV9dXV5gXl1cW1tbWVpd
-Yl9fYl9eY2FdXV5eXlxdXFtbXmBgYGJiYmhlX2FeXl5fYF9fX11fYGBiYGFiYWFl
-Y2NiYmBiYWJnZ2FfY2NgX2JkYmNlYl9fYFxdXV5fXVxZWlpdXF1cYF9gX19eXlla
-XWBiXlxYWFxcXVtdXFpfXmFgW1lbWlpYW1pbW1tZWFtdYF1aWVpZXWFfWFZUVlZX
-WVtaWFZWWVZUV1dYWFVUVFZVWFxcXVtaW1hWVlVUU1ZXWVhbWFlWVFhaW1lYVlVU
-V1pYVllZWFdaW1pYWFtVVlpcWlRUWFdXWVlWWVtbW15YWVtbXFtXWllWVVdZVlZY
-V1hZWFlYV1daVVZZV1tcXF5cW1paWVheXl5dW1xcWVpbWFhXWllYW1hZXlpWXFxc
-W15aWVpZWllZW15dWlxbW1lbXVxaXF5cXFpaWlpbXVtcXFxcXl1aWFpcYWNfXFxc
-XF1dX2FaWVtaWVpbXVxbVltaWVpfXF1cYWBeYWFeX19dX2FfXmBfX2NqZGFfYGJg
-YGBjY2VhYGFmZGFgYWJgYV9eXl5fYmNiYWJiYF9eX15gYGBiYGFjZGNjY2JjZmdj
-ZGFhX2JfYGJhY2RjYWRjYmNiZGBkZmZmZWRkZmVmZmVlZGlmZ2lpaGZmZWdkZWRl
-aWhjY2FjZmNmaGlnZmtqZ2dkZWdkZWVkZGdkZ2hpamhoZ2ltZmZnZWVlY2dpamtr
-a25wbWppbW9vcHFxb2xxbGxva2xoamlpaWprbGprb21mZ2h3gqDN4u/3/f//////
-//9ueHx9fnx5eHl8eXp7fnl4eX14e3l5d3l3dHV0dXZ4dHZ1c3N0dXRzdnR3eXd4
-dHV6e359eXd4dnZ5d3d3dXZzdHZ0eHh3d3h3dXh1cnB0c3R2d3Zyc3R4enp3eXh4
-dnd2d3p7e3x6dXh2eHd2dHd5dHR0dXd5d3V0dXR1dHRzdnZ3eHhzdHd4eHJxbmxu
-b29wcnJzdnV0dXBydnNxcHFwcnV2dHd1dHV3dXRycnRzdHFtb3FxcXBubm9wbG9w
-b29vbnFycXBsbW1xc3Jtbm9ubW9ubWxtb25wdG9tbm1wcWttbW5sbWtwbnFsamps
-bGtoaWtvcW9tbGxsa25vbWtoaWtsa2psa2tpbWxqbGloZ2loaWtsbG5tbGxraWlq
-a2lnZWVlY2JiYGJnY2JkZV9hY2JjYmBga2dhYWNkY2ZkYWBfXmBgXlpdYl9bW11h
-ZGRhX19fXGFcXWFfX19hXmBhYGBiZGZlY11cXWNkY2ZmYmJgXl9eXmFhZGFjY2Nm
-Z2hmZGJjYl9eYGNhXl9eXFpcXVxbXFhWU1FRUlNSUE1OUFFNTE1NTUxPT0xKT1JY
-U1JSUE9SVFZTUlJTVVdZWlhZWlhZWVhdXF9hXV9cXVxeYl9dYWBdXV9gYl9fXmBg
-ZGNfX2NjY2NkYl9jYGFjY2FfYV5gYV9gY2JhYV9lXWBiXmFjX2BfXV9eXF5dX2Bf
-ZWNkZWhmX19dYF9jYmFfY2NhXF1dYV1dXFxdXl9gYGFdXGBeXmFhXFtdXVxcXl5g
-YmFeXVtcYGBcXl9dYFxfXl9fXl9eXV1eYWBeXVxdYGNhX2FfYWFgYV9eYGNgYWJj
-X19gX11cXl5dXl5hX19hYWFkZGJiZGNiYmJjYWFjY2FfXmFkY2RlZmdhX2BhXmBi
-ZGRiY19dXVxZXF1eXV1gYV9eYl5hX19eX2JdWl1cXVxcXmFeXF5eXV5bXmBkXFla
-WFtbWltbWllbW1hbWVlZWltZW1hXWFdaWlpZW1tVVldYV1ZXWFRWU1NWV1hYWVpY
-V1hWVlRTV1ZXVlVTUVNaVlVUWFdVVVNUWl5bWldYWFpbWlhZV1ZYVldcWltaWVdY
-WVdVV1lbW1hXWFpXVVZWVlVUV1dZVVVVVVZVWFpXVlhaWlpYWFtaXWFdWllZWltb
-W1pbW1lYWVxZWFhUVlhZXF9bWVtYXFxcW1pdXV5YWFlXWltcWldaWVlbW1pXV1pa
-W15hXVxgXVlZW11aW1laW11bXF1ZWmBcXFxdXF9cX15aWlpZXFteWl1gYl9hW1la
-Xl1eXV1eXV9dXl1dXV9hYGFhX11dX2BgX2RlZWNlYl9hYWNgX2BfW1tdXV9iZGJg
-YF9gYGNiYGFjZ2FgZGFgYWVhY2ZkZWdgYGBmZGFgYWFiYmBfY2RlZWRkZGVkYmVn
-Z2RnamdoZ2VjY2ZnZGJiZmplZmVlaGNkZ2ZmZWFiY2ZmZ2trbGtpZ2hqa2xqaGZn
diff --git a/third_party/boostorg/algorithm/test/search_test_data/0002e.pat b/third_party/boostorg/algorithm/test/search_test_data/0002e.pat
deleted file mode 100644
index 9629c97..0000000
--- a/third_party/boostorg/algorithm/test/search_test_data/0002e.pat
+++ /dev/null
@@ -1,120 +0,0 @@
-5eXl5uXk5eTl5eXl5eXm5eXl5eXl5ebl5ufl5efl5ebm5+bm5ubm5ebm5ufm5ufm
-5uXm5+bl5+bl5ufm5+bm5ebn5ufm5ubn5+bm5+bm6Ofn5+fm6Ofn5+jn5ubn6Ofn
-5+fm5+fn5+fn6Ofo5ujo5+fo5+fn6Ofo6Ojn6Ojn5+nn5+fn5+jn5+fo5+fo6Ojn
-6Ofn5+np6Ofo5ufo6Ojn6Ojn6Ojn5+no5+bn6Ofn5+fn5ufn5+fo5+fo5+fm6Obn
-6Ofn5+jo6Ofo6Ojn6Ojo6Ono6eno6ejo5+jo6Ojo6Ono6Ono5Ofo6Ono6ejo6Orp
-6ejp6enp6Ojo6Onq6Onp6ejo6eno6enp6eno6ejp6Onp6enp6unp6eno6Onp6unp
-6ejp6enq6ejp6unp6enp6Ono6eno6uno6enp6Orp6enq6uno6unp6enp6erp6Orq
-6Onq6eno6unp6unp6urp6urq6enp6urq6uno6urp6erp6erq6urp6urq6erq6uvr
-6erq6enq6ejq6urp6uvr6erq6urp6unq6unq6Orp6+nq6unp6err6unp6+rp6urp
-6ejq6erp6enp6erp6erp6eno6ujp6unq6+rq6urp6+vq6uvr6urq6urp6+rq6urr
-6+nq6unq6urq6uvr6+rq6uvr6+rq6uvq6+rr6urr6uvr6+vr6+vq6+vr6+zr6+vr
-6+zr6+rr6uvr6+rr6urq6+vr6uzr6+zs6+zs6+zr6+zs7Ovr7Ozr7O3r7Ozs7Ozs
-7e3s7ezr6+3r7Ozs7e3t7O7t7O3t7ezt7u3t7u3t7u3t7+/t7e7u7+7u7+3u7+7v
-7u7u7u7v7+/w7u/v7/Dv7+/w7+/v7u/w7+/w8O/w8PDw8PDw8PHy8PHw8PHx8PLx
-8fLx8vGVjoV7dW1kYmJYVlRVVFRWVldZW1xdYGJjZmlqbG5wcnZ3en1+gYKFh4qL
-jY6OkZKTlJaXmZqdnqGjpqipq6yur7Gxs7S0tLa2uLm5u7u9vr7AwMHCw8PDxsbI
-ycrJy8zNz8/S09PU1dfX2dnZ2dja2tra2tvb2tra2tvb29rb3Nzd3d3d3t/e3d/f
-3t/f39/f197f39/f3+Df39/e3t7e3t/f3d7d3t7e397e39/e3uDg4ODg4OHh4ODg
-4d/f3+De3uDf39/g3d7d3Nza29va2tja2tnZ2NjW1tjX1tbW1tXU1dXV1dXV1dXV
-1dXW1dTT09PS0tLS0dHS0NHR09PS09LT0tTU1NPT0tPT09TS09PT0tLR09PT09PT
-09TU1NTU1NTU1dXV1dXU1dXV1dXV1dXW1tXX2NfX19fX2djZ2dnZ2drZ2tvb2trb
-29vc29vb29vc3Nzd3N7d3d/e3d7e397f3t7e39/f3uDg3t/g4OHg4ODh4d/h4OHh
-4OHh4eHh4eHi4uLi4uLj4ePi4+Li4uPj4+Pi4uPj4+Pj4+Ti5OPj4+Xj5eTk5OTk
-5OXj5OTk5OTk5OTk5OTj5OPj4+Tk5OTj5uXl5OXl5OTl5eTl5OXl5ebl5ebk5eXl
-5ubk5OXn5ubl5eXl5uXl5+Xk5eXl5ebl5ebm5ubl5uXm5ufn5uXn5ubn5ubm5+fm
-5ubl5ubn5ufm5ufm5ujn5ubn5+fn5ufm5+jo6Ofn5ubm5+bm5+fn5ufn5+fo6Ofo
-5+bn5+jn5ujn5+fn5+fo6Ofo6Ojn6Ofo6Ofn6Ojp5ufo6Ojo6ejn6efn6eno6ejo
-6Ojo5+fn6Ojp6Ojo6ejo5+no6Ojo6Ofo5+fo6Onp6Ojn6Obn6Ofm5+fn6Ojn6Ojo
-5+jo6Ojo5+jo6Ofo6Onp6Ojo6Ojn6Ojo6Ojo6eno6ejo6enp6Ojp6ejp6unp6Onp
-6enp6Onp6Onp6eno6Ono6enp6enp6Ono6enp6OXp6Ojq6ero6unp6unp6enp6unp
-6unp6eno6enp6urq6enp6enq6unq6enp6erq6urq6unq6unp6Orp6enp6erq6erp
-6erp6urq6urq6enq6+rp6urq6unp6ujp6unq6enq6evq6unq6unq6uvq6unq6urq
-6+vp6ujq6unq6urp6unq6unp6+rq6evq6urp6unq6+rq6urq6erp6urr6+nq6urp
-6unq6+nq6erp6erq6erq6erp6urq6urq6evq6enp6enp6ero6unq6+nr6urq6+rr
-6urq6uvr6urq6+vr6+vq6+zq6+vr6+vq6urr6+rr6+vr6urr6urq6+zr6+rq6+vr
-6+vr6uzt6uvr6+vr6+vr7Ovs6+vs6+zs6+vs6+rr6+rr6+vr6+vr6+vr6+zr6+zt
-7Ozr7Ovr7Ovr6+zt6+zs7e3t7O3s7O3t7ezt7O3t7ezs7O3u7ezt7O3s7u3t7e3u
-7ezu7e3u7e/u7u3u7+3u7u/u7u/v7u/u7+/v8O/v7+/v7+/v8PDv7/Dv7+7v7/Dx
-8PDw8PDx8PHv8fHy8PHw8fDx8fHw8fHy8vLy8qOdlpGLhH56dXRycHFxcnR1d3l6
-fH+ChIeJi42PkpSWmZyfoaKmqKqrra+vsLK0s7W3uLm6vL2+wMLExcfGyMrKzczN
-zs/OztDQ0tLT09TV1tbW1dfY2NfY2dra2trc3Nzd3N7e3t7e39/f39/f4ODg4ODg
-4eHg3+Hh4eDg4OHg4ODh4uHh4OHh4uHh4eHj4eHi4OHh4uHi4eDh4eHg4eHh4eHi
-4eHg4eHg4eDi4eLg4eLj4+Li4+Li4+Hi4eLg4eHh4uHi4uLh4OHg4eDh4N/e4ODf
-4ODf397e3d/f3d7f393e3t/d3t7d3d7d3d3d3d3d3tze3d3c3tzb3d3d3Nzc3N3d
-3d3e3d3d3d3d3d3d3t7e3tze3d3e3d3e3dzf3d7e3d7d3t7d3t7e3t7e3t7e3t7f
-3eDg3+Df4N/f4ODf4N/g4N/g4ODh4OHh4eHg4eDi4eHi4OHh4uHh4uLh4eLi4uLj
-4eLj4uLj4+Lh4uPj5OLj5OLj5OPk4+Tk4uPj4+Tj4+Ti5OPk4+Pj5OTl5OXj5OTj
-5OPk4+Tk5OXk5eXl5eXl5ubm5eXl5eXm5uXk5eTl5ubl5eXk5Obl5eTj5OXl5OTl
-5uXm5ubl4+bl5uXl5ebk5uXl5eXm5uTm5ubn5ubm5uXn5ubm5ufl5ebm5uXm5+Xm
-5ufm5+fm5ebm5ubn5ujn5ubl5ubn5ufn5ufn5+fn6Ojn5+fn5+jn5+fm5ufm5+fo
-5+fn5+nn5ufn5+bm6Ofm6Ojn5+jo6ejn6Ofp6Ojo5+jo6ejo6Ojn5+jp6ejo6Ojo
-6eno6Ojo6Onp6efn6ejo6Ojo6ejo6enp6Ojo6ejo6eno6ejp6eno6ejo6Ojn6Ono
-6Ojp6enp6Ojp5+fo6Ojn6Ojo6ejo6ejo6enp6Ofo6ejo6ejp6Ojo6eno6ejp6erp
-6Ono6Ojo6eno6eno6enq6erp6uno6ero6Orp6unp6erp6unq6unq6urp6uno6ero
-6Orp5eno6erq6urp6unp6erp6Onr6erp6+nq6unq6enq6+nq6urp6urq6erp6urp
-6urq6urq6urq6urq6+rp6+rq6uvq6Orq6unr6urs6urr6urp6unq6urq6urq6+rq
-6erp6urp6urq6urq6urq6err6err6+rq6+rr6urq6+rr6urr6urq6+vq6uvq6+zq
-6urr6uvr6+vq6+rr6urr6uvq6uvq6uvq6+vr6urq6err6evq6uvr6urr6urp6urq
-6+rq6unp6urq6urq6urq6uvq6urq6+vr6+vr6+vr6uvs7Ovr6+rq6+vq6+vq6+vq
-6+vq6+vr6urr6+zq6uzr6+zr6uvs6uvs6+zr6+zr6+zs6+vr6uzs7Ozs6+vq6+zr
-6+zs6+zr6+zs6+zs7Ovr7Ovs7Ovs7Ozs7Ozs7Ozs7O3s7Ozs7Ozt7O3s7Ozs7u3t
-7e3t7O3t7e3t7e3t7u3u7e3u7e3t7e3t7e7t7u7u7u7u7u3v7u3v7+/u7+7u7+7w
-7+7u7u/w8O7v7+/w8O/x8PDv8PDw8fHw7/Dv8fHx8fDx8fHx8fHx8fHw8fHx8vLy
-8vLxs6+qpqGcmJWRkI6Oj5CQkZOVl5mbnaCipKepq62wsrS2uLu9vsDDxMXGx8jI
-yszNzc7P0NDS0tLU1dXY2NjY2dnc3Nvc3Nzc3Nzd3d3d3t7e397e4N/f39/g4eHh
-4OLg4eHh4eHi4eLh4uDh4uHi4eLi4uPi4+Hh4ePh4uHi4+Lh4uLj4uLi4uPi4uLj
-4uPi4uLi4uLi4+Pi4uPi4uHi4uPj4uLj4uLi4uPh4eLi4uLi4+Li4+Pj4+Li4+Pj
-4uLi4uLi4uPi4+Lh4uHi4uLi4uPi4eHi4eDh4eHf4eHh4eLi4OLh4ODg4eDh4eHh
-4eDh4eDh4eLf4eHg4N/h4ODi4ODg4OHh4eDg4OHh4eDh4uDg4uHi4eLg4OHh4eDh
-4OHg4uHh4eHh4eDg4eHi4eHi4eHi4eLh4uLi4uHi4uLi4uPj4uPj4uHi5OPk4+Pj
-4+Pj4uPk5OLi5OPj4+Pk5OTi4+Pj5OPk5OPj5OPk5OPk5OTk5OXk5OTk4+Tl5eTk
-5eXk4+Tk5OTk5OXk5OXl5eXk5eXm5eXk5OXk5uXl5ebl5uXl5ubn5eXm5ubl5ubm
-5uXl5eXm5uXm5uXl5eXl5eXk5eXm5uXl5ubn5ubl5ubl5+Xm5ubm5ubn5ubn5efm
-5+fm5ubn5+fn6Ofn5+fm5ufo5ubn5+bm5+fn5+fn5+fn5+jm6Ofn5ujn5+fn5+fn
-6Ojo6Ojn6Ojn5+jo6Ofn6Ofo5+fn5+jo6Ofo5+jn5+jn5+jn5+jn6Ojo5+jo6Ono
-5+no6Ofo6Ojo6Ofp6ejn6Ojo6Onn6Onp6enp6eno6efo5+jo6Ono6Ojo6enp6Ojo
-6enp6enp6Ojq6Ojo6eno6Onp6ejp6unp6ujq6unq6enp6Ojp6Orp6Onp6enq6enq
-6eno6Onp6eno6ejp6eno6Onp6erp6ejq6eno6erp6enp6enq6urp6Onq6unp6urp
-6+nq6enp6enq6unp6erp6+nq6enp6unp6enp6erq6unr6uvq6erp6+rp6erp6unq
-6urq6urp6urq6err6uvq6urq6urp6+rq6uvr6uvq6+rp6urq6urq6uvp6+nq6urr
-6unq6+vq6+rr6urq6urq6uvq6urp6urq6urr6urq6urq6urr6+rr6urq6uvq6urr
-6uvq6erq6uvr6+rr6urr7Ovq6uvr6+vr6+vq6+vr6+rq6+rr6+vr6urq6+vr6uvq
-6uvq6+vq6+zq6uvr6+rq6uvq6+rq6urr6+vq6urq6+rq6urq6uvr6+vq6+rq6uvr
-6+zr6+vr6uvr6+vr6+vs6+vr6+vr7Ozr7Orr7evr6+rr6uvs6+vr6+rq6+zs6+vs
-7Ovs7Ozs7Ovs6+zs7Ozs7Ovs6+zs7Ovs7Ovs6+zs7O3s7Ovs7Ozr7Ozr7Ozs7Ozs
-7O3s7O3s7e3t7Ozr7Ozs7uzt7ezs7O3t7uzs7e3t7uzt7ezu7e7t7u3u7u7u7e7u
-7e7u7u/u7u/v7e/u7+7v7+/v7u/u7u/w7+/v7u/w7vHv7u/v7vDw8PDw8PHx8fHx
-8PDw8fDx8PLx8vLx8fDw8fHy8vLy8vLy8/MAEAEAAAMAAAABBJcAAAEBAAMAAAAB
-Bt4AAAECAAMAAAABAAgAAAEDAAMAAAABAAEAAAEGAAMAAAABAAEAAAERAAQAAAAQ
-AB+FwAESAAMAAAABAAEAAAEVAAMAAAABAAEAAAEWAAMAAAABAG8AAAEXAAQAAAAQ
-AB+GAAEaAAUAAAABAB+GQAEbAAUAAAABAB+GSAEcAAMAAAABAAEAAAEoAAMAAAAB
-AAIAAAFTAAMAAAABAAEAAIdzAAcAAASkAB+GUAAAAAAAAAAIAAH9gQAD+voABfhz
-AAf17AAJ82UAC/DeAA3uVwAP69AAEelJABPmwgAV5DsAF+G0ABnfLQAb3KYAHdof
-AAH9eQAB/XkAAf15AAH9eQAB/XkAAf15AAH9eQAB/XkAAf15AAH9eQAB/XkAAf15
-AAH9eQAB/XkAAf15AAGq2yWAAAAAIAAAJYAAAAAgAAAAAASkYXBwbAIgAABzY25y
-R1JBWVhZWiAH0wAHAAEAAAAAAABhY3NwQVBQTAAAAABub25lAAAAAAAAAAAAAAAA
-AAAAAAAA9tYAAQAAAADTLWFwcGyYcjd2/nI/x5EwPxA3BfUzAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAAAAAAAAVkZXNjAAAA5AAAAEF3dHB0AAAAwAAAABRrVFJD
-AAAA1AAAAA5jcHJ0AAAEYAAAAEFkc2NtAAABKAAAAzZYWVogAAAAAAAA81EAAQAA
-AAEWzGN1cnYAAAAAAAAAAQHNAABkZXNjAAAAAAAAABVTY2FubmVyIEdyYXkgUHJv
-ZmlsZQAAAAAAAAAAAAAAFVNjYW5uZXIgR3JheSBQcm9maWxlAAAAAG1sdWMAAAAA
-AAAADwAAAAxlblVTAAAAKAAAAw5lc0VTAAAAMAAAAYpkYURLAAAAPAAAAjZkZURF
-AAAAOgAAAeJmaUZJAAAAMgAAAMRmckZVAAAALgAAATBpdElUAAAALAAAAuJubE5M
-AAAAKAAAAnJub05PAAAAKAAAAbpwdEJSAAAALgAAArRzdlNFAAAAOgAAAPZqYUpQ
-AAAAGgAAAV5rb0tSAAAAGgAAApp6aFRXAAAAEgAAAXh6aENOAAAAGgAAAhwAUwBr
-AGEAbgBuAGUAcgBpAG4AIABIAGEAcgBtAGEAYQAtAHAAcgBvAGYAaQBpAGwAaQBH
-AHIA5QBzAGsAYQBsAGUAcAByAG8AZgBpAGwAIABmAPYAcgAgAEIAaQBsAGQAbADk
-AHMAYQByAGUAUAByAG8AZgBpAGwAIABHAHIAaQBzACAAZAB1ACAAUwBjAGEAbgBu
-AGUAdQByMLkwrTDjMMowsDDsMKQw1zDtMNUwoTCkMOtjg2PPVmhwcJaOgnJfaWPP
-j/AAUABlAHIAZgBpAGwAIABHAHIAaQBzACAAcABhAHIAYQAgAEUAcwBjAOEAbgBl
-AHIARwByAOUAdABvAG4AZQBzAGsAYQBuAG4AZQByAHAAcgBvAGYAaQBsAEcAcgBh
-AHUAcwB0AHUAZgBlAG4ALQBQAHIAbwBmAGkAbAAgAGYA/AByACAAUwBjAGEAbgBu
-AGUAcmJrY89O6gAgAEcAcgBhAHkAIGPPj/Blh072AEcAcgDlAHQAbwBuAGUAYgBl
-AHMAawByAGkAdgBlAGwAcwBlACAAdABpAGwAIABTAGMAYQBuAG4AZQByAEcAcgBp
-AGoAcwBwAHIAbwBmAGkAZQBsACAAUwBjAGEAbgBuAGUAcsKkzpCxCAAgAEcAcgBh
-AHkAINUEuFzTDMd8AFAAZQByAGYAaQBsACAAQwBpAG4AegBhACAAZABlACAAUwBj
-AGEAbgBuAGUAcgBQAHIAbwBmAGkAbABvACAARwByAGkAZwBpAG8AIABTAGMAYQBu
-AG4AZQByAFMAYwBhAG4AbgBlAHIAIABHAHIAYQB5ACAAUAByAG8AZgBpAGwAZQAA
-dGV4dAAAAABDb3B5cmlnaHQgMjAwMyBBcHBsZSBDb21wdXRlciBJbmMuLCBhbGwg
-cmlnaHRzIHJlc2VydmVkLgAAAAA=
diff --git a/third_party/boostorg/algorithm/test/search_test_data/0002f.pat b/third_party/boostorg/algorithm/test/search_test_data/0002f.pat
deleted file mode 100644
index b0b07a3..0000000
--- a/third_party/boostorg/algorithm/test/search_test_data/0002f.pat
+++ /dev/null
@@ -1,136 +0,0 @@
-TUxOT1JVUFNUVlVRT1VYWVdXUlJUT1FPU1VPT01PUE9WVlFSUFJRTkxKTU1PTEtJ
-R0JDR0RDQEBBQD8/PDs/PD0+Pz44NzY1NjY5ODw5OT08Nzk5ODs8QUNBQj9BQUND
-QklLSk5KSU9TXlhLT0xWUlFaVFdMTVdVW1VgWVdWXV9dWFtWUlFSTkVERTk1NTUz
-MzUzNjU0ODg1ODY2Nzk6PTo6P0NETE1JR1RSTFBISkpKVVVLSUpUTU5STVZRVFpT
-Vk5XXGBYUFhVWFJTV19kXVdVUFdVVlZUW1xbWlhZV1pcWE9TWlRWWVlXUE1MTlJD
-PDo8Ozo+P0pOTVhWVFJWW15oYFxbXlpUVlpZXFhYVVdXUFlcYFhUVFdaW1RQXVxc
-XltXUVBSTU1RUVRYVVNNSU1RUFFWVFFST05QUk9OSU5QTk1KS1BVSkxLTE5LT1RO
-VlRZWlZYVE1FRkVHR0ZLSU9LTlFOXWaJo7CspZ2WlZOJfXFmYmFqdG9eS0E+OTk4
-ODtBQTw9Ojg5PD5EQT8/P0BAPDc6OEE+Pzo+QENAPDtBQENDRkNDR0A6QEQ8ODU0
-MTQ3NTg3Mzc2NDg0OTs/RUpTS05JT1xYWVlUXFZWVVtdWVlXUE5TUlJQT1BUVlZR
-VFVYW1NRTkxMUlZUUEtMTk9PTE1MSERESUpLSkxJRklJSUtLRkRJSEVCRENERUZE
-QkNBQ0JEQ0FAPjk6PDo3OTc3ODU2ODY2NTc+Rk9YYmdqaGhna3B3e3x+gYGBgYaH
-iYuKi4uKioWCfXdwbmpnaWlsb3eCiIuMjYmJjo6SkJCTl5ORjIaBfnZnVEc/Ojo6
-ODo7P0RJSkxPUlVWWVhWVlVSVFFRT05JRkVARD5BPkBCRERDQ0ZGRUdFSUpKSkpK
-SklOS0xNTkxOT05MS0tMTk9NT1ZWU1dZWl1eXWJkZ2hqbm91dXV4fH5/gYB/gYOC
-hYmGiIiHh4iGh4qLi4qMjY2Li4uLjZKTlpeWlpSPjo+SkZOQkpGRlZiXlpaZmZuZ
-mZibl5mZmJiampual5aYnKCpr7KqmImGiJWosbzAvLu5u7i1tbGwsbKzt7a8vcC/
-vL29vr6/v7q8vLq8vrq4ube1tLe3t7S0ta2ysrK1tKyginBfXVhTUlBOTVBMTU9M
-S0tLSUpLSkhJSkdPT05KSUdNS0lJSE1LSkxKTk9MTUtNSk9OTUxPUU5NUE9OT0xM
-Tk5QUVFQUU9QUVJSVFVSUlNTU1JRTlJTUFBSVFRSUlFTT09RUlBSUVNTUU9QUVNV
-VVVVUVFSU1FVU1JSUFFSU1hUUVFPUVRQUVJRU1NUVVZYWFZWWlhYVFFVWFhXVlhW
-WFhaWltbWF5bV1dYVlhbWFZYWVlaWVlYV1laWVpZV1taWVlaWlpaWVtcXV1cXF1d
-YF9dYF5dXV1dYF5bXV1cWlxWV1dYV1ZWWFdYV1ZZXIO9ydPa3uHk5efo6el7enh5
-ent8eHd1dnl2dnh6enl4eXdwZ1pMR0VFSkVEQz09Q0hKQ0A/P0I/PEJCQT4+PTpA
-RURFQ0RHQ0RBPT5ARkhJSkxMTk1JS05QTklKTk5QUVNUU1FXU1ddWVVRUFFTU1JQ
-UlBQT0tRVVVVU1JSUFJNSktPT0tJSUhHREVFQ0dEQkNAPj0+PTw7Ozs6OTw2Nzk4
-ODg6PDs5OTg3Nzs4Ojw7Ozw9PURBQkQ/RktGS0tISUpRVU5RTUpTT1lTV1VOVlNc
-WV1cWVddY1xdXllPT1JVT0pAODc1MzU5Njc6OTs3Njg2NTc7ODg4OTg+Q0dPUEpH
-VVRMUUxRSkpUTklOSVNSTU1KV1RSWlFYVFRVWVdNUFJeVFRVYWFdXVZQUVVYV1VZ
-VFdYWVhZXGVhVlRbW1pXUU1LTFBMTkhAOTg6OTk7Q1BSVVldWFpeXmFhY2VjXlNR
-U1BXWFVVUVZYWFdWU1NTVVZaV09WWVlaWlNQTUtOUE1OUlhZVFFXUE1NUFBQTlFS
-UFJWTU1OS0pRSkVJUFBNSkpNUEtPVFVXV1lXVldXU0hGRUdJSVBJSUhOVE9RWnWU
-qauroZiVkYl8c2xmY2VrdWlSQTs6Ozg7OTw8Oz08PDw7Ozw7PTtAPj09Pj1BQUJA
-OTo+QUQ9OD1FQUM+RUJCOzY9Q0E4ODU0NDY2NjQ0MzI1Njg4Oz1ESkpISkpTWlhY
-W1ZZV1hTVldOUVNOTE5OUlFQVFlYU01XV1dWU09MSktOUlRTUU1PT0xQTklFR0ZL
-TU1LS0dHR0dJSkpGRUNGSExHQ0RDREVBQD9APz5AOzs+QD88Pjw4NjY0Njc5Nzg7
-QUpTXmRmaGdoaGxwdHp7fXmBh4eHhoyQj5CNjYuJhYJ8dnN1b2xtbGtze4SJjI6M
-iImOj5KUlpWVk5GQi4B3a11RRj5APkFAQUFCSEpLTVBQU1hYWFZWV1NRUlBQTkxJ
-RkdDQT5AP0NDQkNARERJR0lJRkhHSUtQUE1MS0tNTExNTE1NT1FSUFBVVVVXWF1f
-XmNjZGdsbG5zdXZ1dnt9f4CDg4F/goOFhYaGh4uHhoWJi4mKi4mIiouNjIuMkZGR
-lpSUj5GQkpKSkpWTkpaXl5OVlpmbmpiYmZmZmJqWlZeYl5iZmp+jqrO5s6aTgXeB
-mK64vby7urq9ure0srSztba1t7m9vsDDwr69vb+9uru+wL69vbi3tbSztbq1tLe4
-rrOztrW1saylknplXFZUVVNPTU9PTk5NTU1NSkhISkpJR0lKS0lKUEpKS0xPTVBL
-TUxKS01KTExMTkpPTktQUE5PT09QTU5NUFBSUlNST1BPT05ST05PUVFTVlJSU1NS
-UlRTUVJTUVBRUU5RTlFSU1NQTk5NUFVTUlBSUVBRVFRTU1NSVFJQUlVUVlNUVFFS
-UFNVU1JUVVVWVVZXV1VWV1dWVVVYWVhZW1pYWVtdXF5aW1hZWFZZWVhYWlpYV1dY
-WFZXW1tZWFlYWVhWWVlZXFxcW11cXltcXltdXlxdX19dXl5bXVtbWFlYV1VXV1ZU
-VlVVVVdgfLvJ0dre4eTm5+jp6nl7e3x7eHp6dnh5dnd4dXd4dnZ5cmlgUkhCQEJH
-QEM/PkFCRERBQERDQz4/QkU7PD1CPkBBQUFAQEFDR0I9PENCR0dISEdJTklMT01P
-UlFNUFBRUlJZVVVWV1ZUU1hSVFZVUlJTUlBRUVNTVFVSUFFOUE5NTExPTUtIREhH
-RENDQkJFQj5APkE/Pz87Oj08Oz04Nzc4NzxBODQ1Nzc4Ozo6Ojg2OTs7Ozs+QT9D
-SUdISEZGRkpQS01LRElHTEpLVVRaWVhZXFtZVlhgX1xXV1BVblVUSkA5NzQ1NDc1
-NTQzNDQ0NDg1Njc2NTg7PD8+PkpJTUZNT0dPS1JNSFBLRkxIS0hLSk1UVFlgVl5X
-UVJUVUxRT1ZVUlFZXVpWVFJTVllZV1dUVFtaWltfZl1VWFtcVlZVT0tTUElIR0Q5
-NzY3Njk/SVJWWmxlWlxdWWBpYV9kV1JUUVJcWlhTT1NSV1pTVFNTVlVXVlZVWFJU
-WlNPS01VVVpaWVVYV1VWUU5QTk5PUFVRTE1LS0xIRU5KSEpOTk1JR01MSVBVWlxT
-VFhcWFpZTkdDQEFFS0VISElTUU9WYniWpqiooJeOhn92a2NgYGZwbV5JOzs/QkI7
-ODs9OkA+PDw6OT1APj4+SDw6Oz4/QkA7O0A9Pz5CPDs8QTxDQkJJPzpBPzo8Ozg2
-NzQ3NjczMjQ2Nzg7PT9HS0RJR01SUldSUE9OT01VWFJWVE5LTlFTUlBSVldSVFhW
-VFJWUVBNTEhLTFJUTExNS05OSUZITE1LS0pKT0pGREVGRkdIRklKSUlERURCQ0NC
-Q0E+PDk9QD9BPzw6ODo4OTo9Ojg5PUdQW2VqamlrbWttcnd6fH19fYKDiI2Ojo2N
-jo6LjIiIhX58eXVzcW9sb3d/hIyOkI+Lio6QkZOYlpSTjYuHgXJkWE1IRD89QEJF
-RUZMT0xPUVNUVlhaW1lXVVNSUlJPTUtNS0dDQUJEQkJDRUZGR0hFR0dHQ0dHSEtI
-SEtJS05LS0xSUVNQTlFUUlVZWFlcXWFlZ2hsbW9zcnV8fH9+fX+AgYGBg4SEg4KD
-hYOCh4eHh4eJi4qKi4uOjI2KjpGSlZWUmZaSkpeWkI6TmJWYmpiXmJiampuXnJmY
-lpaZmJqYmZmbnJ2jrLGwsbSwopSDfIeXrLu/vr28urq4s7OzsrK0trW3ubu8v8G/
-v7++vr27ur27vLy7ubW2uby7t7e1ube1tbe2tLW0sa+mmIFmWldXUlBRT0xOS0xO
-TVJPTUtKR0lKTEpKTUpJSUlLTUxJTE1PUExJS0tNS0xOTU5MS0xLS09PUVFTUE9R
-UFNSU1FRVFJNT09OTlBQUlNSUVBRT1JUVFFPUVRUUVNRUE5QUVFSUFFPSk5RUE9P
-UVNTVlFRTFBPUVJSUlJRT09SUlBQUE9RUlJTVVVTVFVWV1ZYWFZYV1hVVVRTV1ta
-W11bWVxaXV1aWlpcWllZWllaXV5bV1dXWFpZWldXWVpZVlVXWVlbXFxaXF9cX11f
-XFxcX15iYWBdXFhaW1pfXFtZV1RWVllXU1NXW2Z9usjT2d/h4+bn6OnpeXp7enl7
-d3t7enl6eHp4eHl1dG9rZFtRSkdDQkBAQUY/PEVESUREREM9QUFEQUVBPT9BPT8/
-Pz8+QT5BRENHQkNFRkhJS0xNTkxQTE1SU1NQUlNRUlVWVVVZWlZVU1VWU1ZWVFJQ
-VlJPUFNXWFZVU1RUUFFPTkxISEdJTEpGREJCQUBAQENHQj07PTo8OzU5Nzc4ODg3
-Nzg1ODk4ODc5Ozg6ODk3OTk4PD5FQj5AQEBFREU/QlFPSklIR0dHSkxUU1xZUVVZ
-YFhPU2BaV1RYUU9UWFZKQDo3NzI0NTAyNjQ0Ozg4Njc2Nzk5ODw6Ozo+S0ZLRUdF
-Q1RMWlVMUU9KT0pPTFVWSlVTVFtTWlNUVlJSTFNOUlRQSlRUWVdaWVVSUldZWlVT
-U1dUU1pdXFZXW15VVVVOTldSTUdHRzo2NzU2OTk+TV1eYmJYX11aYGleX2lYTVJR
-Vl5gXFhRUVNYXFJSWl1ZVlFWWlJSU1JcVVhTTlhYWFNTU1lYVVhSTU9RUVRTT05F
-SEhJRk9NS01OTU9KS1FJTFFRUlNUV1FTV1xdX1lRSERBQkJHRkpJRVJRU1dVYnqW
-pKekno+Jg3pybWhkZW1yZ1BEPDw+PTs3PDpEQ0E9Ozs7PDs/PEdCOjk9Oz1AQkA9
-QEFEQj06Ojw7PT9ASUhAPEFCPDg4OTczNDM2NDQ1MzU2ODk6PD5FRk1JUE5WXlJU
-UkxMTFVVT1BSUFRVVk5ISlNVVVRWWFRUVltYWE9LTUtKSkxJSUhLTU9NSUlMTkxI
-TU9NR0NHSElLS0xISkhKRkNBQ0RCREJCQz49PDtAPjw6ODg5PDs5Ozg2OkBLWGNo
-am1tbWtubXB1enp7e36Bg4aGiZKRjo2NjY2LiIiFhoKCd3Vua25vdH2DjI6NjIyO
-kpGQkpOVko+NiYN4aV5XU01LSUI+PUNHTFFSU05RUlVUWVtcXFZUUlFUU1FPTkxK
-R0ZCQUBAQUFEQz5CR0pIR0hESUhHRElKSklLS05MUFJVU1JRU1VaWFxdX19iY2lr
-bW9ycnR3enp5e318fX9/gH6Cg4SFgIOFhoeGh4mMiouOjo6PjY+MjY2RlJKSlJuX
-lZSUlZaQkpKWl5qTk5aYmpqbm5yampyam5mWl5uenZ+lqK2ytby9tqmcmJiRjJit
-uL6/vru6uLKuq62vs7a2t7e6u73BwcHAwcG/vcC9vLm4u7e4t7i4ur29urq4tre6
-uLa5trWzs62onohtXFVUVFJRT01NS0tKS05RS0tKS0xPTklJS0pJSUdLTEtJTEpO
-S0tHSUpNT05QTk5LS0xQTlBPUFJRTVBRTk9OT05PUlJPUlBQTk9QUFFRT1FRVVNU
-VFRTUlFWU1JQU1JRUFNVV1RPT1BRT1FSUVNUU1JOTlBPUFBQU1RSUVBSUVNSUVNR
-UVNVVFRTVVRWVFJTVlhXWVdXU1NTWFtbWl5dXFlZWFhZWFxYVlpYWFlbWVpZWVZX
-V1dYWVhYWFdXVVRZW1pZWlxdW1peXl1cXGBgX19gYV9fW1pZW1lcW1hWVFVXVVlW
-U1NVaoS8ytPa3uLk5+jo6el4eXh1eXh9eHt7e3t4eHZ1eHh0cm5jWEtLTEY/PTs8
-P0NGSklGREVBOz0+RD9BRENAPkE/Oj0+P0A9Pz0+Q0NCQkRJSUlKSUtMTEpLTFBU
-VlVUVlZSWVteXFpYU1RXWFlVWFlXV1VTVVJPUlZaVldTUVJRUVFOUE1KTE1LSElF
-QUVEQkVCRUNERD8/Pjs8QDw6Ojo6Ozw5Nzk5PDc2NDQ4Ojg6OTk4Ojc4PEQ7Oj0/
-PUNCRkNBSUtHSERJTEdKR1BLUVlVV1ZiV1BLU1JVVVZWU1RbWVBJQzo9NzY3NzYy
-Mjc3ODg0MzU3OTk/Pjo8Oz1ERk9FRkVJSklXUkxWUVBRSlFPV1ZHTU9SXlVdVVdc
-VlZMUk1QWVJRUFNZWF5eWU9PU1VWVFRSWFBPVllbWlZYWVdUVFFYXFhTUkxKQTo0
-MjQ1OT5DVFhPV1paYFtgX15gX1FPUlRUWltbVlNPUVdWUlhXXVZVVldaVlNWVllZ
-VFNPTVNRTVFPUlNUUVVUUlhVU1ZRS0lLRk1LSFFPTVFNTUpLT01LUlRMTVNVU1BU
-V1tiWFVQSUNDQEVDSEhHVVJYWlBYXn6Ypqiil4qBfHp4cGttcHNtYFBDOz47Ojo3
-OT07ODo7Oz88OjxAREA8Pj07OjxBQTw4PUFEQTxAPTtBRUVGR0U+QEI+ODUzMzc2
-Njc4ODQxNDY3OTs8P0RKR0hSTlVfVVtRT09RU1VSUVJTU1JQS05OTVJXVVZUUVNY
-VVRSTUxOSUtMSktKTEpJTEpKRkdLTE1NT01NSklJSktOTkpGREdGQUFERUREREI9
-Pj4/Ozc5ODo5Njc6OTs5NzpBSlhja21vb3RxcHBucnp9eXl6fH+EhIWGjo2Njo+N
-j4+MiYuLhH54cm5tb3BzfYaLjo6MjpKSkpORlJeWkpCHfnNrYVdWVVRMQj0/RE9V
-UVJQUVFRVFtcXF1dXFlWVlZXV1FRTk9NS0VDQ0FBQUFAP0FER0dHR0dHSUhKSkxL
-TEpJTlJOUFFUVlNWVltcYGBfZGhnaW5wcnR2dXh6eXt8f4GBgoKAg4GBgoODgoSE
-iIiHio6Ni4+Sk46KjY6Qk5WVkpGUlJOSlpKSlJWTkpOYmJiYmZial5iZm5mZmpye
-nZ+foKGip7Cyt7e5vLyznpKcn5aVorO5wcC/wLm2sa2rrK+zuLa4uL2/v768vMDA
-wsG+wL68vra1tLK1tri8vbi6ubi2tre3tra1tbW4trGroY1tW1hUVFNNTk5KSktP
-T0tLSkpMSktNTExLS0tLSkxKSUlMTUtLSk5JSE1OTk9MTE9RT0xNUFBRU1BPTU1P
-UFBTUlFPUlFOUVNRUFFRUU9OUFFTU1NTUlNUVFJTUU5QUlJUVVRUU1ZST1BPUFFS
-UVBOUVJPUFJSUFJRU1NRUFFSVFNSVFNUVFFRU1RXVlVXVFlYWFhZWFhVWFZUWFdX
-XV1cWFdYWVdaWFdaVVVaWVlaWVdaWVlYVVdaWVlXWFlbWFdYWVpdW1xcXlpbX2Fj
-YmBhYV9fX15cXl9aWFlVUFZWV1lYWVlXU1VvorzJ0tre4eTm5+jq6nl3d3l4d3h6
-ent8fHh2dXN1d3NxbGFUSkdGSEM7Pj89QUhJRkZFQ0RFQ0JCQj09PTg8PUE7Oj4+
-Qz0/QEFDQkVHR0pJSUxLTU1MTExLT1FSVFNTVVRVWFdXV1ZRUlNWVlVXWFlXWlta
-VFZXWlxZV1VTVVBTUU1NTEtNS05MR0pMSEVER0ZISEVERD4+Ozs6Ozs6OTk6Ojc1
-NTY5ODY2Njg2NzY4ODk3PDo4Pjw8PD9CRUFBPT5GRkVLTEZLTVFKSkxSW1daV11Z
-SlJWVlNSV1pUU1dXVlZQQjo7OTU1NDMyNjc0NTZINzc3OTo8PT07QUJDUkpOS0hN
-R1RRTlRQTlFIUFJVVEhSUVJgV1ZSWmFeYFJVTk5VUlJQV1lUVVZQSk5OUVZYVFBP
-UFNUWV1XUlRZWFlXVFVcXlpWWFBCOTIyMzc2PUNNV01TWlpbW1paXl1VVFZXVVZZ
-V1lcUk1OU1BVXVlaVlNTUVNYVVVbUk9VT05JTE9SUU9QUVZVUlNVWFlUS05LTFFU
-V09SUFNSUVRNSkpNUk5OV1FNVFpSTlRcYVxYW1FKQ0JER0NFR0hTTFFUTlVWbYma
-paOelImBfXx7dnRxcXBpWkY9Oj1COTQ5Ozs7QTk6Pzw7Ozs/QEJDPDo6PkBFPjtA
-QkRFQjk6PUJGREZEQkFCRj04MzQ1ODk1NTM5NzQ0NTk6Oj49P0JFSFRRWFlSXFNS
-VlZXVVRTU1ZTUFNNTU5PUFRUV1lRTk5SU09MTU5PUVBKS01MSkhIS0pJR0hDR0tL
-TUtFSk1OS0tIR0VHRkhEQ0NGRENBPj89PT87PDs4Ojw4OTo4Njk9QUxZZG1vb3By
-cnFzcnR2fH99eHh7gISHjJKJjpGRk5WSkpKOiomIgHx0cm5vcXV+hIeLjo+SlZiY
-mJWUlZmSjIJ4cG1jVE5NTkxIQ0ZMVFhXVldUVldYXWBiX19gXFxbWlhXU1FPTk1L
diff --git a/third_party/boostorg/algorithm/test/search_test_data/0002n.pat b/third_party/boostorg/algorithm/test/search_test_data/0002n.pat
deleted file mode 100644
index c23eb28..0000000
--- a/third_party/boostorg/algorithm/test/search_test_data/0002n.pat
+++ /dev/null
@@ -1,136 +0,0 @@
-TUxOT1JVUFNUVlVRT1VYWVdXUlJUT1FPU1VPT01PUE9WVlFSUFJRTkxKTU1PTEtJ
-R0JDR0RDQEBBQD8/PDs/PD0+Pz44NzY1NjY5ODw5OT08Nzk5ODs8QUNBQj9BQUND
-QklLSk5KSU9TXlhLT0xWUlFaVFdMTVdVW1VgWVdWXV9dWFtWUlFSTkVERTk1NTUz
-MzUzNjU0ODg1ODY2Nzk6PTo6P0NETE1JR1RSTFBISkpKVVVLSUpUTU5STVZRVFpT
-Vk5XXGBYUFhVWFJTV19kXVdVUFdVVlZUW1xbWlhZV1pcWE9TWlRWWVlXUE1MTlJD
-PDo8Ozo+P0pOTVhWVFJWW15oYFxbXlpUVlpZXFhYVVdXUFlcYFhUVFdaW1RQXVxc
-XltXUVBSTU1RUVRYVVNNSU1RUFFWVFFST05QUk9OSU5QTk1KS1BVSkxLTE5LT1RO
-VlRZWlZYVE1FRkVHR0ZLSU9LTlFOXWaJo7CspZ2WlZOJfXFmYmFqdG9eS0E+OTk4
-ODtBQTw9Ojg5PD5EQT8/P0BAPDc6OEE+Pzo+QENAPDtBQENDRkNDR0A6QEQ8ODU0
-MTQ3NTg3Mzc2NDg0OTs/RUpTS05JT1xYWVlUXFZWVVtdWVlXUE5TUlJQT1BUVlZR
-VFVYW1NRTkxMUlZUUEtMTk9PTE1MSERESUpLSkxJRklJSUtLRkRJSEVCRENERUZE
-QkNBQ0JEQ0FAPjk6PDo3OTc3ODU2ODY2NTc+Rk9YYmdqaGhna3B3e3x+gYGBgYaH
-iYuKi4uKioWCfXdwbmpnaWlsb3eCiIuMjYmJjo6SkJCTl5ORjIaBfnZnVEc/Ojo6
-ODo7P0RJSkxPUlVWWVhWVlVSVFFRT05JRkVARD5BPkBCRERDQ0ZGRUdFSUpKSkpK
-SklOS0xNTkxOT05MS0tMTk9NT1ZWU1dZWl1eXWJkZ2hqbm91dXV4fH5/gYB/gYOC
-hYmGiIiHh4iGh4qLi4qMjY2Li4uLjZKTlpeWlpSPjo+SkZOQkpGRlZiXlpaZmZuZ
-mZibl5mZmJiampual5aYnKCpr7KqmImGiJWosbzAvLu5u7i1tbGwsbKzt7a8vcC/
-vL29vr6/v7q8vLq8vrq4ube1tLe3t7S0ta2ysrK1tKyginBfXVhTUlBOTVBMTU9M
-S0tLSUpLSkhJSkdPT05KSUdNS0lJSE1LSkxKTk9MTUtNSk9OTUxPUU5NUE9OT0xM
-Tk5QUVFQUU9QUVJSVFVSUlNTU1JRTlJTUFBSVFRSUlFTT09RUlBSUVNTUU9QUVNV
-VVVVUVFSU1FVU1JSUFFSU1hUUVFPUVRQUVJRU1NUVVZYWFZWWlhYVFFVWFhXVlhW
-WFhaWltbWF5bV1dYVlhbWFZYWVlaWVlYV1laWVpZV1taWVlaWlpaWVtcXV1cXF1d
-YF9dYF5dXV1dYF5bXV1cWlxWV1dYV1ZWWFdYV1ZZXIO9ydPa3uHk5efo6el7enh5
-ent8eHd1dnl2dnh6enl4eXdwZ1pMR0VFSkVEQz09Q0hKQ0A/P0I/PEJCQT4+PTpA
-RURFQ0RHQ0RBPT5ARkhJSkxMTk1JS05QTklKTk5QUVNUU1FXU1ddWVVRUFFTU1JQ
-UlBQT0tRVVVVU1JSUFJNSktPT0tJSUhHREVFQ0dEQkNAPj0+PTw7Ozs6OTw2Nzk4
-ODg6PDs5OTg3Nzs4Ojw7Ozw9PURBQkQ/RktGS0tISUpRVU5RTUpTT1lTV1VOVlNc
-WV1cWVddY1xdXllPT1JVT0pAODc1MzU5Njc6OTs3Njg2NTc7ODg4OTg+Q0dPUEpH
-VVRMUUxRSkpUTklOSVNSTU1KV1RSWlFYVFRVWVdNUFJeVFRVYWFdXVZQUVVYV1VZ
-VFdYWVhZXGVhVlRbW1pXUU1LTFBMTkhAOTg6OTk7Q1BSVVldWFpeXmFhY2VjXlNR
-U1BXWFVVUVZYWFdWU1NTVVZaV09WWVlaWlNQTUtOUE1OUlhZVFFXUE1NUFBQTlFS
-UFJWTU1OS0pRSkVJUFBNSkpNUEtPVFVXV1lXVldXU0hGRUdJSVBJSUhOVE9RWnWU
-qauroZiVkYl8c2xmY2VrdWlSQTs6Ozg7OTw8Oz08PDw7Ozw7PTtAPj09Pj1BQUJA
-OTo+QUQ9OD1FQUM+RUJCOzY9Q0E4ODU0NDY2NjQ0MzI1Njg4Oz1ESkpISkpTWlhY
-W1ZZV1hTVldOUVNOTE5OUlFQVFlYU01XV1dWU09MSktOUlRTUU1PT0xQTklFR0ZL
-TU1LS0dHR0dJSkpGRUNGSExHQ0RDREVBQD9APz5AOzs+QD88Pjw4NjY0Njc5Nzg7
-QUpTXmRmaGdoaGxwdHp7fXmBh4eHhoyQj5CNjYuJhYJ8dnN1b2xtbGtze4SJjI6M
-iImOj5KUlpWVk5GQi4B3a11RRj5APkFAQUFCSEpLTVBQU1hYWFZWV1NRUlBQTkxJ
-RkdDQT5AP0NDQkNARERJR0lJRkhHSUtQUE1MS0tNTExNTE1NT1FSUFBVVVVXWF1f
-XmNjZGdsbG5zdXZ1dnt9f4CDg4F/goOFhYaGh4uHhoWJi4mKi4mIiouNjIuMkZGR
-lpSUj5GQkpKSkpWTkpaXl5OVlpmbmpiYmZmZmJqWlZeYl5iZmp+jqrO5s6aTgXeB
-mK64vby7urq9ure0srSztba1t7m9vsDDwr69vb+9uru+wL69vbi3tbSztbq1tLe4
-rrOztrW1saylknplXFZUVVNPTU9PTk5NTU1NSkhISkpJR0lKS0lKUEpKS0xPTVBL
-TUxKS01KTExMTkpPTktQUE5PT09QTU5NUFBSUlNST1BPT05ST05PUVFTVlJSU1NS
-UlRTUVJTUVBRUU5RTlFSU1NQTk5NUFVTUlBSUVBRVFRTU1NSVFJQUlVUVlNUVFFS
-UFNVU1JUVVVWVVZXV1VWV1dWVVVYWVhZW1pYWVtdXF5aW1hZWFZZWVhYWlpYV1dY
-WFZXW1tZWFlYWVhWWVlZXFxcW11cXltcXltdXlxdX19dXl5bXVtbWFlYV1VXV1ZU
-VlVVVVdgfLvJ0dre4eTm5+jp6nl7e3x7eHp6dnh5dnd4dXd4dnZ5cmlgUkhCQEJH
-QEM/PkFCRERBQERDQz4/QkU7PD1CPkBBQUFAQEFDR0I9PENCR0dISEdJTklMT01P
-UlFNUFBRUlJZVVVWV1ZUU1hSVFZVUlJTUlBRUVNTVFVSUFFOUE5NTExPTUtIREhH
-RENDQkJFQj5APkE/Pz87Oj08Oz04Nzc4NzxBODQ1Nzc4Ozo6Ojg2OTs7Ozs+QT9D
-SUdISEZGRkpQS01LRElHTEpLVVRaWVhZXFtZVlhgX1xXV1BVblVUSkA5NzQ1NDc1
-NTQzNDQ0NDg1Njc2NTg7PD8+PkpJTUZNT0dPS1JNSFBLRkxIS0hLSk1UVFlgVl5X
-UVJUVUxRT1ZVUlFZXVpWVFJTVllZV1dUVFtaWltfZl1VWFtcVlZVT0tTUElIR0Q5
-NzY3Njk/SVJWWmxlWlxdWWBpYV9kV1JUUVJcWlhTT1NSV1pTVFNTVlVXVlZVWFJU
-WlNPS01VVVpaWVVYV1VWUU5QTk5PUFVRTE1LS0xIRU5KSEpOTk1JR01MSVBVWlxT
-VFhcWFpZTkdDQEFFS0VISElTUU9WYniWpqiooJeOhn92a2NgYGZwbV5JOzs/QkI7
-ODs9OkA+PDw6OT1APj4+SDw6Oz4/QkA7O0A9Pz5CPDs8QTxDQkJJPzpBPzo8Ozg2
-NzQ3NjczMjQ2Nzg7PT9HS0RJR01SUldSUE9OT01VWFJWVE5LTlFTUlBSVldSVFhW
-VFJWUVBNTEhLTFJUTExNS05OSUZITE1LS0pKT0pGREVGRkdIRklKSUlERURCQ0NC
-Q0E+PDk9QD9BPzw6ODo4OTo9Ojg5PUdQW2VqamlrbWttcnd6fH19fYKDiI2Ojo2N
-jo6LjIiIhX58eXVzcW9sb3d/hIyOkI+Lio6QkZOYlpSTjYuHgXJkWE1IRD89QEJF
-RUZMT0xPUVNUVlhaW1lXVVNSUlJPTUtNS0dDQUJEQkJDRUZGR0hFR0dHQ0dHSEtI
-SEtJS05LS0xSUVNQTlFUUlVZWFlcXWFlZ2hsbW9zcnV8fH9+fX+AgYGBg4SEg4KD
-hYOCh4eHh4eJi4qKi4uOjI2KjpGSlZWUmZaSkpeWkI6TmJWYmpiXmJiampuXnJmY
-lpaZmJqYmZmbnJ2jrLGwsbSwopSDfIeXrLu/vr28urq4s7OzsrK0trW3ubu8v8G/
-v7++vr27ur27vLy7ubW2uby7t7e1ube1tbe2tLW0sa+mmIFmWldXUlBRT0xOS0xO
-TVJPTUtKR0lKTEpKTUpJSUlLTUxJTE1PUExJS0tNS0xOTU5MS0xLS09PUVFTUE9R
-UFNSU1FRVFJNT09OTlBQUlNSUVBRT1JUVFFPUVRUUVNRUE5QUVFSUFFPSk5RUE9P
-UVNTVlFRTFBPUVJSUlJRT09SUlBQUE9RUlJTVVVTVFVWV1ZYWFZYV1hVVVRTV1ta
-W11bWVxaXV1aWlpcWllZWllaXV5bV1dXWFpZWldXWVpZVlVXWVlbXFxaXF9cX11f
-XFxcX15iYWBdXFhaW1pfXFtZV1RWVllXU1NXW2Z9usjT2d/h4+bn6OnpeXp7enl7
-d3t7enl6eHp4eHl1dG9rZFtRSkdDQkBAQUY/PEVESUREREM9QUFEQUVBPT9BPT8/
-Pz8+QT5BRENHQkNFRkhJS0xNTkxQTE1SU1NQUlNRUlVWVVVZWlZVU1VWU1ZWVFJQ
-VlJPUFNXWFZVU1RUUFFPTkxISEdJTEpGREJCQUBAQENHQj07PTo8OzU5Nzc4ODg3
-Nzg1ODk4ODc5Ozg6ODk3OTk4PD5FQj5AQEBFREU/QlFPSklIR0dHSkxUU1xZUVVZ
-YFhPU2BaV1RYUU9UWFZKQDo3NzI0NTAyNjQ0Ozg4Njc2Nzk5ODw6Ozo+S0ZLRUdF
-Q1RMWlVMUU9KT0pPTFVWSlVTVFtTWlNUVlJSTFNOUlRQSlRUWVdaWVVSUldZWlVT
-U1dUU1pdXFZXW15VVVVOTldSTUdHRzo2NzU2OTk+TV1eYmJYX11aYGleX2lYTVJR
-Vl5gXFhRUVNYXFJSWl1ZVlFWWlJSU1JcVVhTTlhYWFNTU1lYVVhSTU9RUVRTT05F
-SEhJRk9NS01OTU9KS1FJTFFRUlNUV1FTV1xdX1lRSERBQkJHRkpJRVJRU1dVYnqW
-pKekno+Jg3pybWhkZW1yZ1BEPDw+PTs3PDpEQ0E9Ozs7PDs/PEdCOjk9Oz1AQkA9
-QEFEQj06Ojw7PT9ASUhAPEFCPDg4OTczNDM2NDQ1MzU2ODk6PD5FRk1JUE5WXlJU
-UkxMTFVVT1BSUFRVVk5ISlNVVVRWWFRUVltYWE9LTUtKSkxJSUhLTU9NSUlMTkxI
-TU9NR0NHSElLS0xISkhKRkNBQ0RCREJCQz49PDtAPjw6ODg5PDs5Ozg2OkBLWGNo
-am1tbWtubXB1enp7e36Bg4aGiZKRjo2NjY2LiIiFhoKCd3Vua25vdH2DjI6NjIyO
-kpGQkpOVko+NiYN4aV5XU01LSUI+PUNHTFFSU05RUlVUWVtcXFZUUlFUU1FPTkxK
-R0ZCQUBAQUFEQz5CR0pIR0hESUhHRElKSklLS05MUFJVU1JRU1VaWFxdX19iY2lr
-bW9ycnR3enp5e318fX9/gH6Cg4SFgIOFhoeGh4mMiouOjo6PjY+MjY2RlJKSlJuX
-lZSUlZaQkpKWl5qTk5aYmpqbm5yampyam5mWl5uenZ+lqK2ytby9tqmcmJiRjJit
-uL6/vru6uLKuq62vs7a2t7e6u73BwcHAwcG/vcC9vLm4u7e4t7i4ur29urq4tre6
-uLa5trWzs62onohtXFVUVFJRT01NS0tKS05RS0tKS0xPTklJS0pJSUdLTEtJTEpO
-S0tHSUpNT05QTk5LS0xQTlBPUFJRTVBRTk9OT05PUlJPUlBQTk9QUFFRT1FRVVNU
-VFRTUlFWU1JQU1JRUFNVV1RPT1BRT1*SUVNUU1JOTlBPUFBQU1RSUVBSUVNSUVNR
-UVNVVFRTVVRWVFJTVlhXWVdXU1NTWFtbWl5dXFlZWFhZWFxYVlpYWFlbWVpZWVZX
-V1dYWVhYWFdXVVRZW1pZWlxdW1peXl1cXGBgX19gYV9fW1pZW1lcW1hWVFVXVVlW
-U1NVaoS8ytPa3uLk5+jo6el4eXh1eXh9eHt7e3t4eHZ1eHh0cm5jWEtLTEY/PTs8
-P0NGSklGREVBOz0+RD9BRENAPkE/Oj0+P0A9Pz0+Q0NCQkRJSUlKSUtMTEpLTFBU
-VlVUVlZSWVteXFpYU1RXWFlVWFlXV1VTVVJPUlZaVldTUVJRUVFOUE1KTE1LSElF
-QUVEQkVCRUNERD8/Pjs8QDw6Ojo6Ozw5Nzk5PDc2NDQ4Ojg6OTk4Ojc4PEQ7Oj0/
-PUNCRkNBSUtHSERJTEdKR1BLUVlVV1ZiV1BLU1JVVVZWU1RbWVBJQzo9NzY3NzYy
-Mjc3ODg0MzU3OTk/Pjo8Oz1ERk9FRkVJSklXUkxWUVBRSlFPV1ZHTU9SXlVdVVdc
-VlZMUk1QWVJRUFNZWF5eWU9PU1VWVFRSWFBPVllbWlZYWVdUVFFYXFhTUkxKQTo0
-MjQ1OT5DVFhPV1paYFtgX15gX1FPUlRUWltbVlNPUVdWUlhXXVZVVldaVlNWVllZ
-VFNPTVNRTVFPUlNUUVVUUlhVU1ZRS0lLRk1LSFFPTVFNTUpLT01LUlRMTVNVU1BU
-V1tiWFVQSUNDQEVDSEhHVVJYWlBYXn6Ypqiil4qBfHp4cGttcHNtYFBDOz47Ojo3
-OT07ODo7Oz88OjxAREA8Pj07OjxBQTw4PUFEQTxAPTtBRUVGR0U+QEI+ODUzMzc2
-Njc4ODQxNDY3OTs8P0RKR0hSTlVfVVtRT09RU1VSUVJTU1JQS05OTVJXVVZUUVNY
-VVRSTUxOSUtMSktKTEpJTEpKRkdLTE1NT01NSklJSktOTkpGREdGQUFERUREREI9
-Pj4/Ozc5ODo5Njc6OTs5NzpBSlhja21vb3RxcHBucnp9eXl6fH+EhIWGjo2Njo+N
-j4+MiYuLhH54cm5tb3BzfYaLjo6MjpKSkpORlJeWkpCHfnNrYVdWVVRMQj0/RE9V
-UVJQUVFRVFtcXF1dXFlWVlZXV1FRTk9NS0VDQ0FBQUFAP0FER0dHR0dHSUhKSkxL
-TEpJTlJOUFFUVlNWVltcYGBfZGhnaW5wcnR2dXh6eXt8f4GBgoKAg4GBgoODgoSE
-iIiHio6Ni4+Sk46KjY6Qk5WVkpGUlJOSlpKSlJWTkpOYmJiYmZial5iZm5mZmpye
-nZ+foKGip7Cyt7e5vLyznpKcn5aVorO5wcC/wLm2sa2rrK+zuLa4uL2/v768vMDA
-wsG+wL68vra1tLK1tri8vbi6ubi2tre3tra1tbW4trGroY1tW1hUVFNNTk5KSktP
-T0tLSkpMSktNTExLS0tLSkxKSUlMTUtLSk5JSE1OTk9MTE9RT0xNUFBRU1BPTU1P
-UFBTUlFPUlFOUVNRUFFRUU9OUFFTU1NTUlNUVFJTUU5QUlJUVVRUU1ZST1BPUFFS
-UVBOUVJPUFJSUFJRU1NRUFFSVFNSVFNUVFFRU1RXVlVXVFlYWFhZWFhVWFZUWFdX
-XV1cWFdYWVdaWFdaVVVaWVlaWVdaWVlYVVdaWVlXWFlbWFdYWVpdW1xcXlpbX2Fj
-YmBhYV9fX15cXl9aWFlVUFZWV1lYWVlXU1V#orzJ0tre4eTm5+jq6nl3d3l4d3h6
-ent8fHh2dXN1d3NxbGFUSkdGSEM7Pj89QUhJRkZFQ0RFQ0JCQj09PTg8PUE7Oj4+
-Qz0/QEFDQkVHR0pJSUxLTU1MTExLT1FSVFNTVVRVWFdXV1ZRUlNWVlVXWFlXWlta
-VFZXWlxZV1VTVVBTUU1NTEtNS05MR0pMSEVER0ZISEVERD4+Ozs6Ozs6OTk6Ojc1
-NTY5ODY2Njg2NzY4ODk3PDo4Pjw8PD9CRUFBPT5GRkVLTEZLTVFKSkxSW1daV11Z
-SlJWVlNSV1pUU1dXVlZQQjo7OTU1NDMyNjc0NTZINzc3OTo8PT07QUJDUkpOS0hN
-R1RRTlRQTlFIUFJVVEhSUVJgV1ZSWmFeYFJVTk5VUlJQV1lUVVZQSk5OUVZYVFBP
-UFNUWV1XUlRZWFlXVFVcXlpWWFBCOTIyMzc2PUNNV01TWlpbW1paXl1VVFZXVVZZ
-V1lcUk1OU1BVXVlaVlNTUVNYVVVbUk9VT05JTE9SUU9QUVZVUlNVWFlUS05LTFFU
-V09SUFNSUVRNSkpNUk5OV1FNVFpSTlRcYVxYW1FKQ0JER0NFR0hTTFFUTlVWbYma
-paOelImBfXx7dnRxcXBpWkY9Oj1COTQ5Ozs7QTk6Pzw7Ozs/QEJDPDo6PkBFPjtA
-QkRFQjk6PUJGREZEQkFCRj04MzQ1ODk1NTM5NzQ0NTk6Oj49P0JFSFRRWFlSXFNS
-VlZXVVRTU1ZTUFNNTU5PUFRUV1lRTk5SU09MTU5PUVBKS01MSkhIS0pJR0hDR0tL
-TUtFSk1OS0tIR0VHRkhEQ0NGRENBPj89PT87PDs4Ojw4OTo4Njk9QUxZZG1vb3By
-cnFzcnR2fH99eHh7gISHjJKJjpGRk5WSkpKOiomIgHx0cm5vcXV+hIeLjo+SlZiY
-mJWUlZmSjIJ4cG1jVE5NTkxIQ0ZMVFhXVldUVldYXWBiX19gXFxbWlhXU1FPTk1L
diff --git a/third_party/boostorg/algorithm/test/sort_subrange_test.cpp b/third_party/boostorg/algorithm/test/sort_subrange_test.cpp
deleted file mode 100644
index 0694af4..0000000
--- a/third_party/boostorg/algorithm/test/sort_subrange_test.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-#include <boost/config.hpp>
-#include <boost/algorithm/sort_subrange.hpp>
-#include <boost/algorithm/cxx11/is_sorted.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#include <vector>
-#include <iostream>
-
-#if (__cplusplus >= 201103L) || defined(BOOST_NO_CXX98_RANDOM_SHUFFLE)
-#include <random>
-
-std::default_random_engine gen;
-template<typename RandomIt>
-void do_shuffle(RandomIt first, RandomIt last)
-{ std::shuffle(first, last, gen); }
-#else
-template<typename RandomIt>
-void do_shuffle(RandomIt first, RandomIt last)
-{ std::random_shuffle(first, last); }
-#endif
-
-namespace ba = boost::algorithm;
-
-template <typename Iter>
-void check_sequence ( Iter first, Iter last, Iter sf, Iter sl )
-{
-	if (sf == sl) return;
-	for (Iter i = first; i < sf; ++i)
-		BOOST_CHECK(*i < *sf);
-	BOOST_CHECK(ba::is_sorted(sf, sl));
-	for (Iter i = sl; i < last; ++i)
-		BOOST_CHECK(*(sl-1) < *i);
-}
-
-template <typename Iter, typename Pred>
-void check_sequence ( Iter first, Iter last, Iter sf, Iter sl, Pred p )
-{
-	if (sf == sl) return;
-	for (Iter i = first; i < sf; ++i)
-		BOOST_CHECK(p(*i, *sf));
-	BOOST_CHECK(ba::is_sorted(sf, sl, p));
-	for (Iter i = sl; i < last; ++i)
-		BOOST_CHECK(p(*(sl-1), *i));
-
-}
-
-// 	for ( int i = 0; i < v.size(); ++i )
-// 		std::cout << v[i] << ' ';
-// 	std::cout << std::endl;
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-	{
-	std::vector<int> v;
-	for ( int i = 0; i < 10; ++i )
-		v.push_back(i);
-
-	const std::vector<int>::iterator b = v.begin();
-	ba::sort_subrange(b, v.end(), b + 3, b + 6);
-	check_sequence   (b, v.end(), b + 3, b + 6);
-
-	BOOST_CHECK_EQUAL(v[3], 3);
-	BOOST_CHECK_EQUAL(v[4], 4);
-	BOOST_CHECK_EQUAL(v[5], 5);
-
-//	Mix them up and try again - single element
-	do_shuffle(v.begin(), v.end());
-	ba::sort_subrange(b, v.end(), b + 7, b + 8);
-	check_sequence   (b, v.end(), b + 7, b + 8);
-
-	BOOST_CHECK_EQUAL(v[7], 7);
-
-//	Mix them up and try again - at the end
-	do_shuffle(v.begin(), v.end());
-	ba::sort_subrange(b, v.end(), b + 7, v.end());
-	check_sequence   (b, v.end(), b + 7, v.end());
-
-	BOOST_CHECK_EQUAL(v[7], 7);
-	BOOST_CHECK_EQUAL(v[8], 8);
-	BOOST_CHECK_EQUAL(v[9], 9);
-
-//	Mix them up and try again - at the beginning
-	do_shuffle(v.begin(), v.end());
-	ba::sort_subrange(b, v.end(), b, b + 2);
-	check_sequence   (b, v.end(), b, b + 2);
-
-	BOOST_CHECK_EQUAL(v[0], 0);
-	BOOST_CHECK_EQUAL(v[1], 1);
-
-//	Mix them up and try again - empty subrange
-	do_shuffle(v.begin(), v.end());
-	ba::sort_subrange(b, v.end(), b, b);
-	check_sequence   (b, v.end(), b, b);
-
-//	Mix them up and try again - entire subrange
-	do_shuffle(v.begin(), v.end());
-	ba::sort_subrange(b, v.end(), b, v.end());
-	check_sequence   (b, v.end(), b, v.end());
-	}
-
-	{
-	std::vector<int> v;
-	for ( int i = 0; i < 10; ++i )
-		v.push_back(i);
-
-	const std::vector<int>::iterator b = v.begin();
-	ba::sort_subrange(b, v.end(), b + 3, b + 6, std::greater<int>());
-	check_sequence   (b, v.end(), b + 3, b + 6, std::greater<int>());
-
-	BOOST_CHECK_EQUAL(v[3], 6);
-	BOOST_CHECK_EQUAL(v[4], 5);
-	BOOST_CHECK_EQUAL(v[5], 4);
-
-//	Mix them up and try again - single element
-	do_shuffle(v.begin(), v.end());
-	ba::sort_subrange(b, v.end(), b + 7, b + 8, std::greater<int>());
-	check_sequence   (b, v.end(), b + 7, b + 8, std::greater<int>());
-
-	BOOST_CHECK_EQUAL(v[7], 2);
-
-//	Mix them up and try again - at the end
-	do_shuffle(v.begin(), v.end());
-	ba::sort_subrange(b, v.end(), b + 7, v.end(), std::greater<int>());
-	check_sequence   (b, v.end(), b + 7, v.end(), std::greater<int>());
-
-	BOOST_CHECK_EQUAL(v[7], 2);
-	BOOST_CHECK_EQUAL(v[8], 1);
-	BOOST_CHECK_EQUAL(v[9], 0);
-
-//	Mix them up and try again - at the beginning
-	do_shuffle(v.begin(), v.end());
-	ba::sort_subrange(b, v.end(), b, b + 2, std::greater<int>());
-	check_sequence   (b, v.end(), b, b + 2, std::greater<int>());
-
-	BOOST_CHECK_EQUAL(v[0], 9);
-	BOOST_CHECK_EQUAL(v[1], 8);
-
-//	Mix them up and try again - empty subrange
-	do_shuffle(v.begin(), v.end());
-	ba::sort_subrange(b, v.end(), b, b, std::greater<int>());
-	check_sequence   (b, v.end(), b, b, std::greater<int>());
-
-//	Mix them up and try again - entire subrange
-	do_shuffle(v.begin(), v.end());
-	ba::sort_subrange(b, v.end(), b, v.end(), std::greater<int>());
-	check_sequence   (b, v.end(), b, v.end(), std::greater<int>());
-	}
-}
diff --git a/third_party/boostorg/algorithm/test/transform_exclusive_scan_test.cpp b/third_party/boostorg/algorithm/test/transform_exclusive_scan_test.cpp
deleted file mode 100644
index 6259f2b..0000000
--- a/third_party/boostorg/algorithm/test/transform_exclusive_scan_test.cpp
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <vector>
-#include <functional>
-#include <numeric>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/iota.hpp>
-#include <boost/algorithm/cxx17/transform_exclusive_scan.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-int triangle(int n) { return n*(n+1)/2; }
-
-template <class _Tp>
-struct identity
-{
-    const _Tp& operator()(const _Tp& __x) const { return __x;}
-};
-
-
-template <class Iter1, class BOp, class UOp, class T, class Iter2>
-void
-test(Iter1 first, Iter1 last, BOp bop, UOp uop, T init, Iter2 rFirst, Iter2 rLast)
-{
-    std::vector<typename std::iterator_traits<Iter1>::value_type> v;
-//  Test not in-place
-    ba::transform_exclusive_scan(first, last, std::back_inserter(v), init, bop, uop);
-    BOOST_CHECK(std::distance(rFirst, rLast) == v.size());
-    BOOST_CHECK(std::equal(v.begin(), v.end(), rFirst));
-
-//  Test in-place
-    v.clear();
-    v.assign(first, last);
-    ba::transform_exclusive_scan(v.begin(), v.end(), v.begin(), init, bop, uop);
-    BOOST_CHECK(std::distance(rFirst, rLast) == v.size());
-    BOOST_CHECK(std::equal(v.begin(), v.end(), rFirst));
-}
-
-
-template <class Iter>
-void
-test()
-{
-          int ia[]     = { 1,  3,  5,   7,   9};
-    const int pResI0[] = { 0,  1,  4,   9,  16};        // with identity
-    const int mResI0[] = { 0,  0,  0,   0,   0};
-    const int pResN0[] = { 0, -1, -4,  -9, -16};        // with negate
-    const int mResN0[] = { 0,  0,  0,   0,   0};
-    const int pResI2[] = { 2,  3,  6,  11,  18};        // with identity
-    const int mResI2[] = { 2,  2,  6,  30, 210};
-    const int pResN2[] = { 2,  1, -2,  -7, -14};        // with negate
-    const int mResN2[] = { 2, -2,  6, -30, 210};
-    const unsigned sa = sizeof(ia) / sizeof(ia[0]);
-    BOOST_CHECK(sa == sizeof(pResI0) / sizeof(pResI0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(mResI0) / sizeof(mResI0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(pResN0) / sizeof(pResN0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(mResN0) / sizeof(mResN0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(pResI2) / sizeof(pResI2[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(mResI2) / sizeof(mResI2[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(pResN2) / sizeof(pResN2[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(mResN2) / sizeof(mResN2[0]));       // just to be sure
-
-    for (unsigned int i = 0; i < sa; ++i ) {
-        test(Iter(ia), Iter(ia + i), std::plus<int>(),       identity<int>(),    0, pResI0, pResI0 + i);
-        test(Iter(ia), Iter(ia + i), std::multiplies<int>(), identity<int>(),    0, mResI0, mResI0 + i);
-        test(Iter(ia), Iter(ia + i), std::plus<int>(),       std::negate<int>(), 0, pResN0, pResN0 + i);
-        test(Iter(ia), Iter(ia + i), std::multiplies<int>(), std::negate<int>(), 0, mResN0, mResN0 + i);
-        test(Iter(ia), Iter(ia + i), std::plus<int>(),       identity<int>(),    2, pResI2, pResI2 + i);
-        test(Iter(ia), Iter(ia + i), std::multiplies<int>(), identity<int>(),    2, mResI2, mResI2 + i);
-        test(Iter(ia), Iter(ia + i), std::plus<int>(),       std::negate<int>(), 2, pResN2, pResN2 + i);
-        test(Iter(ia), Iter(ia + i), std::multiplies<int>(), std::negate<int>(), 2, mResN2, mResN2 + i);
-        }
-}
-
-void basic_tests()
-{
-    {
-    std::vector<int> v(10);
-    std::fill(v.begin(), v.end(), 3);
-    ba::transform_exclusive_scan(v.begin(), v.end(), v.begin(), 50, std::plus<int>(), identity<int>());
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 50 + (int) i * 3);
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 0);
-    ba::transform_exclusive_scan(v.begin(), v.end(), v.begin(), 30, std::plus<int>(), identity<int>());
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 30 + triangle(i-1));
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 1);
-    ba::transform_exclusive_scan(v.begin(), v.end(), v.begin(), 40, std::plus<int>(), identity<int>());
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 40 + triangle(i));
-    }
-
-    {
-    std::vector<int> v, res;
-    ba::transform_exclusive_scan(v.begin(), v.end(), std::back_inserter(res), 40, std::plus<int>(), identity<int>());
-    BOOST_CHECK(res.empty());
-    }
-}
-
-
-void test_transform_exclusive_scan_init()
-{
-	basic_tests();
-//  All the iterator categories
-    test<input_iterator        <const int*> >();
-    test<forward_iterator      <const int*> >();
-    test<bidirectional_iterator<const int*> >();
-    test<random_access_iterator<const int*> >();
-    test<const int*>();
-    test<      int*>();
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_transform_exclusive_scan_init();
-}
diff --git a/third_party/boostorg/algorithm/test/transform_inclusive_scan_test.cpp b/third_party/boostorg/algorithm/test/transform_inclusive_scan_test.cpp
deleted file mode 100644
index 1ce01c6..0000000
--- a/third_party/boostorg/algorithm/test/transform_inclusive_scan_test.cpp
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2017.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <vector>
-#include <functional>
-#include <numeric>
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx11/iota.hpp>
-#include <boost/algorithm/cxx17/transform_inclusive_scan.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-int triangle(int n) { return n*(n+1)/2; }
-
-template <class _Tp>
-struct identity
-{
-    const _Tp& operator()(const _Tp& __x) const { return __x;}
-};
-
-
-template <class Iter1, class BOp, class UOp, class Iter2>
-void
-transform_inclusive_scan_test(Iter1 first, Iter1 last, BOp bop, UOp uop, Iter2 rFirst, Iter2 rLast)
-{
-    std::vector<typename std::iterator_traits<Iter1>::value_type> v;
-//  Test not in-place
-    ba::transform_inclusive_scan(first, last, std::back_inserter(v), bop, uop);
-    BOOST_CHECK(std::distance(first, last) == v.size());
-    BOOST_CHECK(std::equal(v.begin(), v.end(), rFirst));
-
-//  Test in-place
-    v.clear();
-    v.assign(first, last);
-    ba::transform_inclusive_scan(v.begin(), v.end(), v.begin(), bop, uop);
-    BOOST_CHECK(std::distance(first, last) == v.size());
-    BOOST_CHECK(std::equal(v.begin(), v.end(), rFirst));
-}
-
-
-template <class Iter>
-void
-transform_inclusive_scan_test()
-{
-          int ia[]     = {  1,  3,   5,   7,    9};
-    const int pResI0[] = {  1,  4,   9,  16,   25};        // with identity
-    const int mResI0[] = {  1,  3,  15, 105,  945};
-    const int pResN0[] = { -1, -4,  -9, -16,  -25};        // with negate
-    const int mResN0[] = { -1,  3, -15, 105, -945};
-    const unsigned sa = sizeof(ia) / sizeof(ia[0]);
-    BOOST_CHECK(sa == sizeof(pResI0) / sizeof(pResI0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(mResI0) / sizeof(mResI0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(pResN0) / sizeof(pResN0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(mResN0) / sizeof(mResN0[0]));       // just to be sure
-
-    for (unsigned int i = 0; i < sa; ++i ) {
-        transform_inclusive_scan_test(Iter(ia), Iter(ia + i), std::plus<int>(),       identity<int>(),    pResI0, pResI0 + i);
-        transform_inclusive_scan_test(Iter(ia), Iter(ia + i), std::multiplies<int>(), identity<int>(),    mResI0, mResI0 + i);
-        transform_inclusive_scan_test(Iter(ia), Iter(ia + i), std::plus<int>(),       std::negate<int>(), pResN0, pResN0 + i);
-        transform_inclusive_scan_test(Iter(ia), Iter(ia + i), std::multiplies<int>(), std::negate<int>(), mResN0, mResN0 + i);
-        }
-}
-
-
-//  Basic sanity
-void basic_transform_inclusive_scan_tests()
-{
-    {
-    std::vector<int> v(10);
-    std::fill(v.begin(), v.end(), 3);
-    ba::transform_inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), identity<int>());
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == (int)(i+1) * 3);
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 0);
-    ba::transform_inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), identity<int>());
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == triangle(i));
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 1);
-    ba::transform_inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), identity<int>());
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == triangle(i + 1));
-    }
-
-    {
-    std::vector<int> v, res;
-    ba::transform_inclusive_scan(v.begin(), v.end(), std::back_inserter(res), std::plus<int>(), identity<int>());
-    BOOST_CHECK(res.empty());
-    }
-}
-
-void test_transform_inclusive_scan()
-{
-    basic_transform_inclusive_scan_tests();
-
-//  All the iterator categories
-    transform_inclusive_scan_test<input_iterator        <const int*> >();
-    transform_inclusive_scan_test<forward_iterator      <const int*> >();
-    transform_inclusive_scan_test<bidirectional_iterator<const int*> >();
-    transform_inclusive_scan_test<random_access_iterator<const int*> >();
-    transform_inclusive_scan_test<const int*>();
-    transform_inclusive_scan_test<      int*>();
-}
-
-
-template <class Iter1, class BOp, class UOp, class T, class Iter2>
-void
-transform_inclusive_scan_init_test(Iter1 first, Iter1 last, BOp bop, UOp uop, T init, Iter2 rFirst, Iter2 rLast)
-{
-    std::vector<typename std::iterator_traits<Iter1>::value_type> v;
-//  Test not in-place
-    ba::transform_inclusive_scan(first, last, std::back_inserter(v), bop, uop, init);
-    BOOST_CHECK(std::distance(rFirst, rLast) == v.size());
-    BOOST_CHECK(std::equal(v.begin(), v.end(), rFirst));
-
-//  Test in-place
-    v.clear();
-    v.assign(first, last);
-    ba::transform_inclusive_scan(v.begin(), v.end(), v.begin(), bop, uop, init);
-    BOOST_CHECK(std::distance(rFirst, rLast) == v.size());
-    BOOST_CHECK(std::equal(v.begin(), v.end(), rFirst));
-}
-
-
-template <class Iter>
-void
-transform_inclusive_scan_init_test()
-{
-          int ia[]     = {  1,  3,   5,    7,     9};
-    const int pResI0[] = {  1,  4,   9,   16,    25};        // with identity
-    const int mResI0[] = {  0,  0,   0,    0,     0};
-    const int pResN0[] = { -1, -4,  -9,  -16,   -25};        // with negate
-    const int mResN0[] = {  0,  0,   0,    0,     0};
-    const int pResI2[] = {  3,  6,  11,   18,    27};        // with identity
-    const int mResI2[] = {  2,  6,  30,  210,  1890};
-    const int pResN2[] = {  1, -2,  -7,  -14,   -23};        // with negate
-    const int mResN2[] = { -2,  6, -30,  210, -1890};
-    const unsigned sa = sizeof(ia) / sizeof(ia[0]);
-    BOOST_CHECK(sa == sizeof(pResI0) / sizeof(pResI0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(mResI0) / sizeof(mResI0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(pResN0) / sizeof(pResN0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(mResN0) / sizeof(mResN0[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(pResI2) / sizeof(pResI2[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(mResI2) / sizeof(mResI2[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(pResN2) / sizeof(pResN2[0]));       // just to be sure
-    BOOST_CHECK(sa == sizeof(mResN2) / sizeof(mResN2[0]));       // just to be sure
-
-    for (unsigned int i = 0; i < sa; ++i ) {
-        transform_inclusive_scan_init_test(Iter(ia), Iter(ia + i), std::plus<int>(),       identity<int>(),    0, pResI0, pResI0 + i);
-        transform_inclusive_scan_init_test(Iter(ia), Iter(ia + i), std::multiplies<int>(), identity<int>(),    0, mResI0, mResI0 + i);
-        transform_inclusive_scan_init_test(Iter(ia), Iter(ia + i), std::plus<int>(),       std::negate<int>(), 0, pResN0, pResN0 + i);
-        transform_inclusive_scan_init_test(Iter(ia), Iter(ia + i), std::multiplies<int>(), std::negate<int>(), 0, mResN0, mResN0 + i);
-        transform_inclusive_scan_init_test(Iter(ia), Iter(ia + i), std::plus<int>(),       identity<int>(),    2, pResI2, pResI2 + i);
-        transform_inclusive_scan_init_test(Iter(ia), Iter(ia + i), std::multiplies<int>(), identity<int>(),    2, mResI2, mResI2 + i);
-        transform_inclusive_scan_init_test(Iter(ia), Iter(ia + i), std::plus<int>(),       std::negate<int>(), 2, pResN2, pResN2 + i);
-        transform_inclusive_scan_init_test(Iter(ia), Iter(ia + i), std::multiplies<int>(), std::negate<int>(), 2, mResN2, mResN2 + i);
-        }
-}
-
-
-//  Basic sanity
-void basic_transform_inclusive_scan_init_tests()
-{
-    {
-    std::vector<int> v(10);
-    std::fill(v.begin(), v.end(), 3);
-    ba::transform_inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), identity<int>(), 50);
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 50 + (int) (i + 1) * 3);
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 0);
-    ba::transform_inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), identity<int>(), 30);
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 30 + triangle(i));
-    }
-
-    {
-    std::vector<int> v(10);
-    ba::iota(v.begin(), v.end(), 1);
-    ba::transform_inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<int>(), identity<int>(), 40);
-    for (size_t i = 0; i < v.size(); ++i)
-        BOOST_CHECK(v[i] == 40 + triangle(i + 1));
-    }
-
-    {
-    std::vector<int> v, res;
-    ba::transform_inclusive_scan(v.begin(), v.end(), std::back_inserter(res), std::plus<int>(), identity<int>(), 1);
-    BOOST_CHECK(res.empty());
-    }
-}
-
-void test_transform_inclusive_scan_init()
-{
-	basic_transform_inclusive_scan_init_tests();
-
-//  All the iterator categories
-    transform_inclusive_scan_init_test<input_iterator        <const int*> >();
-    transform_inclusive_scan_init_test<forward_iterator      <const int*> >();
-    transform_inclusive_scan_init_test<bidirectional_iterator<const int*> >();
-    transform_inclusive_scan_init_test<random_access_iterator<const int*> >();
-    transform_inclusive_scan_init_test<const int*>();
-    transform_inclusive_scan_init_test<      int*>();
-
-}
-
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_transform_inclusive_scan();
-  test_transform_inclusive_scan_init();
-}
diff --git a/third_party/boostorg/algorithm/test/transform_reduce_test.cpp b/third_party/boostorg/algorithm/test/transform_reduce_test.cpp
deleted file mode 100644
index 796dd8c..0000000
--- a/third_party/boostorg/algorithm/test/transform_reduce_test.cpp
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
-   Copyright (c) Marshall Clow 2013.
-
-   Distributed under the Boost Software License, Version 1.0. (See accompanying
-   file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-    For more information, see http://www.boost.org
-*/
-
-#include <boost/config.hpp>
-#include <boost/algorithm/cxx17/transform_reduce.hpp>
-
-#include "iterator_test.hpp"
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace ba = boost::algorithm;
-
-template <class _Tp>
-struct identity
-{
-    const _Tp& operator()(const _Tp& __x) const { return __x;}
-};
-
-template <class _Tp>
-struct twice
-{
-  	const _Tp operator()(const _Tp& __x) const { return 2 * __x; }
-};
-
-
-template <class Iter1, class T, class BOp, class UOp>
-void
-test_init_bop_uop(Iter1 first1, Iter1 last1, T init, BOp bOp, UOp uOp, T x)
-{
-    BOOST_CHECK(ba::transform_reduce(first1, last1, init, bOp, uOp) == x);
-}
-
-template <class Iter>
-void
-test_init_bop_uop()
-{
-    int ia[]          = {1, 2, 3, 4, 5, 6};
-    unsigned sa = sizeof(ia) / sizeof(ia[0]);
-
-    test_init_bop_uop(Iter(ia), Iter(ia),    0, std::plus<int>(),       identity<int>(),       0);
-    test_init_bop_uop(Iter(ia), Iter(ia),    1, std::multiplies<int>(), identity<int>(),       1);
-    test_init_bop_uop(Iter(ia), Iter(ia+1),  0, std::multiplies<int>(), identity<int>(),       0);
-    test_init_bop_uop(Iter(ia), Iter(ia+1),  2, std::plus<int>(),       identity<int>(),       3);
-    test_init_bop_uop(Iter(ia), Iter(ia+2),  0, std::plus<int>(),       identity<int>(),       3);
-    test_init_bop_uop(Iter(ia), Iter(ia+2),  3, std::multiplies<int>(), identity<int>(),       6);
-    test_init_bop_uop(Iter(ia), Iter(ia+sa), 4, std::multiplies<int>(), identity<int>(),    2880);
-    test_init_bop_uop(Iter(ia), Iter(ia+sa), 4, std::plus<int>(),       identity<int>(),      25);
-
-    test_init_bop_uop(Iter(ia), Iter(ia),    0, std::plus<int>(),       twice<int>(),       0);
-    test_init_bop_uop(Iter(ia), Iter(ia),    1, std::multiplies<int>(), twice<int>(),       1);
-    test_init_bop_uop(Iter(ia), Iter(ia+1),  0, std::multiplies<int>(), twice<int>(),       0);
-    test_init_bop_uop(Iter(ia), Iter(ia+1),  2, std::plus<int>(),       twice<int>(),       4);
-    test_init_bop_uop(Iter(ia), Iter(ia+2),  0, std::plus<int>(),       twice<int>(),       6);
-    test_init_bop_uop(Iter(ia), Iter(ia+2),  3, std::multiplies<int>(), twice<int>(),      24);
-    test_init_bop_uop(Iter(ia), Iter(ia+sa), 4, std::multiplies<int>(), twice<int>(),  184320); // 64 * 2880
-    test_init_bop_uop(Iter(ia), Iter(ia+sa), 4, std::plus<int>(),       twice<int>(),      46);
-}
-
-void test_transform_reduce_init_bop_uop()
-{
-	BOOST_CHECK ( true );
-}
-
-template <class Iter1, class Iter2, class T, class Op1, class Op2>
-void
-test_init_bop_bop(Iter1 first1, Iter1 last1, Iter2 first2, T init, Op1 op1, Op2 op2, T x)
-{
-    BOOST_CHECK(ba::transform_reduce(first1, last1, first2, init, op1, op2) == x);
-}
-
-template <class SIter, class UIter>
-void
-test_init_bop_bop()
-{
-    int ia[]          = {1, 2, 3, 4, 5, 6};
-    unsigned int ua[] = {2, 4, 6, 8, 10,12};
-    unsigned sa = sizeof(ia) / sizeof(ia[0]);
-    BOOST_CHECK(sa == sizeof(ua) / sizeof(ua[0]));       // just to be sure
-
-    test_init_bop_bop(SIter(ia), SIter(ia),    UIter(ua), 0, std::plus<int>(), std::multiplies<int>(),       0);
-    test_init_bop_bop(UIter(ua), UIter(ua),    SIter(ia), 1, std::multiplies<int>(), std::plus<int>(),       1);
-    test_init_bop_bop(SIter(ia), SIter(ia+1),  UIter(ua), 0, std::multiplies<int>(), std::plus<int>(),       0);
-    test_init_bop_bop(UIter(ua), UIter(ua+1),  SIter(ia), 2, std::plus<int>(), std::multiplies<int>(),       4);
-    test_init_bop_bop(SIter(ia), SIter(ia+2),  UIter(ua), 0, std::plus<int>(), std::multiplies<int>(),      10);
-    test_init_bop_bop(UIter(ua), UIter(ua+2),  SIter(ia), 3, std::multiplies<int>(), std::plus<int>(),      54);
-    test_init_bop_bop(SIter(ia), SIter(ia+sa), UIter(ua), 4, std::multiplies<int>(), std::plus<int>(), 2099520);
-    test_init_bop_bop(UIter(ua), UIter(ua+sa), SIter(ia), 4, std::plus<int>(), std::multiplies<int>(),     186);
-}
-
-void test_transform_reduce_init_bop_bop()
-{
-//  All the iterator categories
-    test_init_bop_bop<input_iterator        <const int*>, input_iterator        <const unsigned int*> >();
-    test_init_bop_bop<input_iterator        <const int*>, forward_iterator      <const unsigned int*> >();
-    test_init_bop_bop<input_iterator        <const int*>, bidirectional_iterator<const unsigned int*> >();
-    test_init_bop_bop<input_iterator        <const int*>, random_access_iterator<const unsigned int*> >();
-
-    test_init_bop_bop<forward_iterator      <const int*>, input_iterator        <const unsigned int*> >();
-    test_init_bop_bop<forward_iterator      <const int*>, forward_iterator      <const unsigned int*> >();
-    test_init_bop_bop<forward_iterator      <const int*>, bidirectional_iterator<const unsigned int*> >();
-    test_init_bop_bop<forward_iterator      <const int*>, random_access_iterator<const unsigned int*> >();
-
-    test_init_bop_bop<bidirectional_iterator<const int*>, input_iterator        <const unsigned int*> >();
-    test_init_bop_bop<bidirectional_iterator<const int*>, forward_iterator      <const unsigned int*> >();
-    test_init_bop_bop<bidirectional_iterator<const int*>, bidirectional_iterator<const unsigned int*> >();
-    test_init_bop_bop<bidirectional_iterator<const int*>, random_access_iterator<const unsigned int*> >();
-
-    test_init_bop_bop<random_access_iterator<const int*>, input_iterator        <const unsigned int*> >();
-    test_init_bop_bop<random_access_iterator<const int*>, forward_iterator      <const unsigned int*> >();
-    test_init_bop_bop<random_access_iterator<const int*>, bidirectional_iterator<const unsigned int*> >();
-    test_init_bop_bop<random_access_iterator<const int*>, random_access_iterator<const unsigned int*> >();
-
-//  just plain pointers (const vs. non-const, too)
-    test_init_bop_bop<const int*, const unsigned int *>();
-    test_init_bop_bop<const int*,       unsigned int *>();
-    test_init_bop_bop<      int*, const unsigned int *>();
-    test_init_bop_bop<      int*,       unsigned int *>();
-}
-
-template <class Iter1, class Iter2, class T>
-void
-test_init(Iter1 first1, Iter1 last1, Iter2 first2, T init, T x)
-{
-    BOOST_CHECK(ba::transform_reduce(first1, last1, first2, init) == x);
-}
-
-template <class SIter, class UIter>
-void
-test_init()
-{
-    int ia[]          = {1, 2, 3, 4, 5, 6};
-    unsigned int ua[] = {2, 4, 6, 8, 10,12};
-    unsigned sa = sizeof(ia) / sizeof(ia[0]);
-    BOOST_CHECK(sa == sizeof(ua) / sizeof(ua[0]));       // just to be sure
-
-    test_init(SIter(ia), SIter(ia),    UIter(ua), 0,   0);
-    test_init(UIter(ua), UIter(ua),    SIter(ia), 1,   1);
-    test_init(SIter(ia), SIter(ia+1),  UIter(ua), 0,   2);
-    test_init(UIter(ua), UIter(ua+1),  SIter(ia), 2,   4);
-    test_init(SIter(ia), SIter(ia+2),  UIter(ua), 0,  10);
-    test_init(UIter(ua), UIter(ua+2),  SIter(ia), 3,  13);
-    test_init(SIter(ia), SIter(ia+sa), UIter(ua), 0, 182);
-    test_init(UIter(ua), UIter(ua+sa), SIter(ia), 4, 186);
-}
-
-void test_transform_reduce_init()
-{
-//  All the iterator categories
-    test_init<input_iterator        <const int*>, input_iterator        <const unsigned int*> >();
-    test_init<input_iterator        <const int*>, forward_iterator      <const unsigned int*> >();
-    test_init<input_iterator        <const int*>, bidirectional_iterator<const unsigned int*> >();
-    test_init<input_iterator        <const int*>, random_access_iterator<const unsigned int*> >();
-
-    test_init<forward_iterator      <const int*>, input_iterator        <const unsigned int*> >();
-    test_init<forward_iterator      <const int*>, forward_iterator      <const unsigned int*> >();
-    test_init<forward_iterator      <const int*>, bidirectional_iterator<const unsigned int*> >();
-    test_init<forward_iterator      <const int*>, random_access_iterator<const unsigned int*> >();
-
-    test_init<bidirectional_iterator<const int*>, input_iterator        <const unsigned int*> >();
-    test_init<bidirectional_iterator<const int*>, forward_iterator      <const unsigned int*> >();
-    test_init<bidirectional_iterator<const int*>, bidirectional_iterator<const unsigned int*> >();
-    test_init<bidirectional_iterator<const int*>, random_access_iterator<const unsigned int*> >();
-
-    test_init<random_access_iterator<const int*>, input_iterator        <const unsigned int*> >();
-    test_init<random_access_iterator<const int*>, forward_iterator      <const unsigned int*> >();
-    test_init<random_access_iterator<const int*>, bidirectional_iterator<const unsigned int*> >();
-    test_init<random_access_iterator<const int*>, random_access_iterator<const unsigned int*> >();
-
-//  just plain pointers (const vs. non-const, too)
-    test_init<const int*, const unsigned int *>();
-    test_init<const int*,       unsigned int *>();
-    test_init<      int*, const unsigned int *>();
-    test_init<      int*,       unsigned int *>();
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-  test_transform_reduce_init();
-  test_transform_reduce_init_bop_uop();
-  test_transform_reduce_init_bop_bop();
-}
diff --git a/third_party/boostorg/any/.gitattributes b/third_party/boostorg/any/.gitattributes
deleted file mode 100644
index 3e84d7c..0000000
--- a/third_party/boostorg/any/.gitattributes
+++ /dev/null
@@ -1,96 +0,0 @@
-* text=auto !eol svneol=native#text/plain
-*.gitattributes text svneol=native#text/plain
-
-# Scriptish formats
-*.bat        text svneol=native#text/plain
-*.bsh        text svneol=native#text/x-beanshell
-*.cgi        text svneol=native#text/plain
-*.cmd        text svneol=native#text/plain
-*.js         text svneol=native#text/javascript
-*.php        text svneol=native#text/x-php
-*.pl         text svneol=native#text/x-perl
-*.pm         text svneol=native#text/x-perl
-*.py         text svneol=native#text/x-python
-*.sh         eol=lf svneol=LF#text/x-sh
-configure    eol=lf svneol=LF#text/x-sh
-
-# Image formats
-*.bmp        binary svneol=unset#image/bmp
-*.gif        binary svneol=unset#image/gif
-*.ico        binary svneol=unset#image/ico
-*.jpeg       binary svneol=unset#image/jpeg
-*.jpg        binary svneol=unset#image/jpeg
-*.png        binary svneol=unset#image/png
-*.tif        binary svneol=unset#image/tiff
-*.tiff       binary svneol=unset#image/tiff
-*.svg        text svneol=native#image/svg%2Bxml
-
-# Data formats
-*.pdf        binary svneol=unset#application/pdf
-*.avi        binary svneol=unset#video/avi
-*.doc        binary svneol=unset#application/msword
-*.dsp        text svneol=crlf#text/plain
-*.dsw        text svneol=crlf#text/plain
-*.eps        binary svneol=unset#application/postscript
-*.gz         binary svneol=unset#application/gzip
-*.mov        binary svneol=unset#video/quicktime
-*.mp3        binary svneol=unset#audio/mpeg
-*.ppt        binary svneol=unset#application/vnd.ms-powerpoint
-*.ps         binary svneol=unset#application/postscript
-*.psd        binary svneol=unset#application/photoshop
-*.rdf        binary svneol=unset#text/rdf
-*.rss        text svneol=unset#text/xml
-*.rtf        binary svneol=unset#text/rtf
-*.sln        text svneol=native#text/plain
-*.swf        binary svneol=unset#application/x-shockwave-flash
-*.tgz        binary svneol=unset#application/gzip
-*.vcproj     text svneol=native#text/xml
-*.vcxproj    text svneol=native#text/xml
-*.vsprops    text svneol=native#text/xml
-*.wav        binary svneol=unset#audio/wav
-*.xls        binary svneol=unset#application/vnd.ms-excel
-*.zip        binary svneol=unset#application/zip
-
-# Text formats
-.htaccess    text svneol=native#text/plain
-*.bbk        text svneol=native#text/xml
-*.cmake      text svneol=native#text/plain
-*.css        text svneol=native#text/css
-*.dtd        text svneol=native#text/xml
-*.htm        text svneol=native#text/html
-*.html       text svneol=native#text/html
-*.ini        text svneol=native#text/plain
-*.log        text svneol=native#text/plain
-*.mak        text svneol=native#text/plain
-*.qbk        text svneol=native#text/plain
-*.rst        text svneol=native#text/plain
-*.sql        text svneol=native#text/x-sql
-*.txt        text svneol=native#text/plain
-*.xhtml      text svneol=native#text/xhtml%2Bxml
-*.xml        text svneol=native#text/xml
-*.xsd        text svneol=native#text/xml
-*.xsl        text svneol=native#text/xml
-*.xslt       text svneol=native#text/xml
-*.xul        text svneol=native#text/xul
-*.yml        text svneol=native#text/plain
-boost-no-inspect text svneol=native#text/plain
-CHANGES      text svneol=native#text/plain
-COPYING      text svneol=native#text/plain
-INSTALL      text svneol=native#text/plain
-Jamfile      text svneol=native#text/plain
-Jamroot      text svneol=native#text/plain
-Jamfile.v2   text svneol=native#text/plain
-Jamrules     text svneol=native#text/plain
-Makefile*    text svneol=native#text/plain
-README       text svneol=native#text/plain
-TODO         text svneol=native#text/plain
-
-# Code formats
-*.c          text svneol=native#text/plain
-*.cpp        text svneol=native#text/plain
-*.h          text svneol=native#text/plain
-*.hpp        text svneol=native#text/plain
-*.ipp        text svneol=native#text/plain
-*.tpp        text svneol=native#text/plain
-*.jam        text svneol=native#text/plain
-*.java       text svneol=native#text/plain
diff --git a/third_party/boostorg/any/.travis.yml b/third_party/boostorg/any/.travis.yml
deleted file mode 100644
index 1c60324..0000000
--- a/third_party/boostorg/any/.travis.yml
+++ /dev/null
@@ -1,119 +0,0 @@
-# Use, modification, and distribution are
-# subject to the Boost Software License, Version 1.0. (See accompanying
-# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-#
-# Copyright Antony Polukhin 2014-2016.
-
-#
-# See https://svn.boost.org/trac/boost/wiki/TravisCoverals for description of this file
-# and how it can be used with Boost libraries.
-#
-# File revision #7
-
-sudo: false
-language: cpp
-compiler:
-    - gcc
-#    - clang
-
-os:
-    - linux
-
-env:
-    global:
-        # Autodetect Boost branch by using the following code: - BRANCH_TO_TEST=$TRAVIS_BRANCH
-        # or just directly specify it
-        - BRANCH_TO_TEST=$TRAVIS_BRANCH
-
-        # Files, which coverage results must be ignored (files from other projects).
-        # Example: - IGNORE_COVERAGE='*/boost/progress.hpp */filesystem/src/*'
-        - IGNORE_COVERAGE=''
-
-        # Explicitly remove the following library from Boost. This may be usefull, if you're for example running Travis
-        # from `Boost.DLL` repo, while Boost already has `dll`.
-        #
-        # By default is eaual to - BOOST_REMOVE=$(basename $TRAVIS_BUILD_DIR)
-        # This will force to use local repo content, instead of the Boost's default.
-        - BOOST_REMOVE=$(basename $TRAVIS_BUILD_DIR)
-
-    matrix:
-        # Note that "--coverage -fsanitize=address,leak,undefined -DBOOST_TRAVISCI_BUILD" are added automatically lower in code
-        - CXX_FLAGS="-std=c++98" LINK_FLAGS="" TOOLSET=gcc-6
-        - CXX_FLAGS="-std=c++11" LINK_FLAGS="" TOOLSET=gcc-6
-        - CXX_FLAGS="-std=c++1y" LINK_FLAGS="" TOOLSET=gcc-6
-        #- CXX_FLAGS="-std=c++11 -stdlib=libc++" LINK_FLAGS="-stdlib=libc++" TOOLSET=clang
-        #- CXX_FLAGS="-std=c++1y -stdlib=libc++" LINK_FLAGS="-stdlib=libc++" TOOLSET=clang
-
-###############################################################################################################
-# From this point and below code is same for all the Boost libs
-###############################################################################################################
-
-
-# Installing additional tools
-addons:
-  apt:
-    sources:
-    - ubuntu-toolchain-r-test
-    - git-core
-    packages:
-    - git
-    - python-yaml
-    - gcc-6
-    - g++-6
-    - clang
-    - libc++-dev
-
-before_install:
-    # Set this to the name of the library
-    - PROJECT_TO_TEST=`basename $TRAVIS_BUILD_DIR`
-    # Cloning Boost libraries (fast nondeep cloning)
-    - BOOST=$HOME/boost-local
-    - echo "Testing $PROJECT_TO_TEST, to remove $BOOST/libs/$BOOST_REMOVE, testing branch $BRANCH_TO_TEST"
-    - git init $BOOST
-    - cd $BOOST
-    - git remote add --no-tags -t $BRANCH_TO_TEST origin https://github.com/boostorg/boost.git
-    - git fetch --depth=1
-    - git checkout $BRANCH_TO_TEST
-    - git submodule update --jobs=3 --init --merge
-    - git remote set-branches --add origin $BRANCH_TO_TEST
-    - git pull --recurse-submodules
-    - git status
-    - rm -rf $BOOST/libs/$BOOST_REMOVE
-    - mv $TRAVIS_BUILD_DIR $BOOST/libs/$PROJECT_TO_TEST
-    - TRAVIS_BUILD_DIR=$BOOST/libs/$PROJECT_TO_TEST
-    - ./bootstrap.sh
-    - ./b2 headers
-    - cd $BOOST/libs/$PROJECT_TO_TEST/test/
-
-script:
-    # `--coverage` flags required to generate coverage info for Coveralls
-    - ../../../b2 "testing.launcher=LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libasan.so.3 " address-model=64 architecture=x86 toolset=$TOOLSET cxxflags="--coverage -fsanitize=address,leak,undefined -DBOOST_TRAVISCI_BUILD $CXX_FLAGS" linkflags="$LINK_FLAGS --coverage -lasan -lubsan"
-
-after_success:
-    # Copying Coveralls data to a separate folder
-    - mkdir -p $TRAVIS_BUILD_DIR/coverals
-    - find ../../../bin.v2/ -name "*.gcda" -exec cp "{}" $TRAVIS_BUILD_DIR/coverals/ \;
-    - find ../../../bin.v2/ -name "*.gcno" -exec cp "{}" $TRAVIS_BUILD_DIR/coverals/ \;
-    - find ../../../bin.v2/ -name "*.da" -exec cp "{}" $TRAVIS_BUILD_DIR/coverals/ \;
-    - find ../../../bin.v2/ -name "*.no" -exec cp "{}" $TRAVIS_BUILD_DIR/coverals/ \;
-    - wget https://github.com/linux-test-project/lcov/archive/v1.12.zip
-    - unzip v1.12.zip
-    - LCOV="`pwd`/lcov-1.12/bin/lcov --gcov-tool gcov-6"
-
-    # Preparing Coveralls data by changind data format to a readable one
-    - echo "$LCOV --directory $TRAVIS_BUILD_DIR/coverals --base-directory `pwd` --capture --output-file $TRAVIS_BUILD_DIR/coverals/coverage.info"
-    - $LCOV --directory $TRAVIS_BUILD_DIR/coverals --base-directory `pwd` --capture --output-file $TRAVIS_BUILD_DIR/coverals/coverage.info
-
-    # ... erasing /test/ /example/ folder data
-    - cd $BOOST
-    - $LCOV --remove $TRAVIS_BUILD_DIR/coverals/coverage.info "/usr*" "*/$PROJECT_TO_TEST/test/*" $IGNORE_COVERAGE "*/$PROJECT_TO_TEST/tests/*" "*/$PROJECT_TO_TEST/examples/*" "*/$PROJECT_TO_TEST/example/*" -o $TRAVIS_BUILD_DIR/coverals/coverage.info
-
-    # ... erasing data that is not related to this project directly
-    - OTHER_LIBS=`grep "submodule .*" .gitmodules | sed 's/\[submodule\ "\(.*\)"\]/"\*\/boost\/\1\.hpp" "\*\/boost\/\1\/\*"/g'| sed "/\"\*\/boost\/$PROJECT_TO_TEST\/\*\"/d" | sed ':a;N;$!ba;s/\n/ /g'`
-    - echo $OTHER_LIBS
-    - eval "$LCOV --remove $TRAVIS_BUILD_DIR/coverals/coverage.info $OTHER_LIBS -o $TRAVIS_BUILD_DIR/coverals/coverage.info"
-
-    # Sending data to Coveralls
-    - cd $TRAVIS_BUILD_DIR
-    - gem install coveralls-lcov
-    - coveralls-lcov coverals/coverage.info
diff --git a/third_party/boostorg/any/BUILD b/third_party/boostorg/any/BUILD
deleted file mode 100644
index 892d710..0000000
--- a/third_party/boostorg/any/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-licenses(["notice"])  # boost
-
-cc_library(
-    name = "any",
-    hdrs = glob(["include/**"]),
-    includes = ["include"],
-    target_compatible_with = ["@platforms//os:linux"],
-    visibility = ["//visibility:public"],
-)
diff --git a/third_party/boostorg/any/README.md b/third_party/boostorg/any/README.md
deleted file mode 100644
index 2da4bdc..0000000
--- a/third_party/boostorg/any/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# [Boost.Any](http://boost.org/libs/any)
-Boost.Any is a part of the [Boost C++ Libraries](http://github.com/boostorg). It is a safe, generic container for single values of different value types.
-
-### Test results
-
-@               | Build         | Tests coverage | More info
-----------------|-------------- | -------------- |-----------
-Develop branch: | [![Build Status](https://travis-ci.org/apolukhin/any.svg?branch=develop)](https://travis-ci.org/apolukhin/any) [![Build status](https://ci.appveyor.com/api/projects/status/dmugl75nfhjnx7ot/branch/develop?svg=true)](https://ci.appveyor.com/project/apolukhin/any/branch/develop) | [![Coverage Status](https://coveralls.io/repos/apolukhin/any/badge.png?branch=develop)](https://coveralls.io/r/apolukhin/any?branch=develop) | [details...](http://www.boost.org/development/tests/develop/developer/any.html)
-Master branch:  | [![Build Status](https://travis-ci.org/apolukhin/any.svg?branch=master)](https://travis-ci.org/apolukhin/any) [![Build status](https://ci.appveyor.com/api/projects/status/dmugl75nfhjnx7ot/branch/master?svg=true)](https://ci.appveyor.com/project/apolukhin/any/branch/master) | [![Coverage Status](https://coveralls.io/repos/apolukhin/any/badge.png?branch=master)](https://coveralls.io/r/apolukhin/any?branch=master) | [details...](http://www.boost.org/development/tests/master/developer/any.html)
-
-
-[Open Issues](https://svn.boost.org/trac/boost/query?status=!closed&component=any)
-
-### License
-
-Distributed under the [Boost Software License, Version 1.0](http://boost.org/LICENSE_1_0.txt).
diff --git a/third_party/boostorg/any/doc/Jamfile.v2 b/third_party/boostorg/any/doc/Jamfile.v2
deleted file mode 100644
index 8e2dca9..0000000
--- a/third_party/boostorg/any/doc/Jamfile.v2
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright Antony Polukhin 2013. Use, modification, and distribution are
-# subject to the Boost Software License, Version 1.0. (See accompanying
-# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-using boostbook ;
-
-boostbook standalone
-	: any.xml 
-	: <xsl:param>boost.root=../../../.. ;
-
-###############################################################################
-alias boostdoc
-    : any.xml
-    :
-    :
-    : ;
-explicit boostdoc ;
-alias boostrelease ;
-explicit boostrelease ;
diff --git a/third_party/boostorg/any/doc/any.xml b/third_party/boostorg/any/doc/any.xml
deleted file mode 100644
index 92944cc..0000000
--- a/third_party/boostorg/any/doc/any.xml
+++ /dev/null
@@ -1,579 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-  "http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-<library name="Any" dirname="any" xmlns:xi="http://www.w3.org/2001/XInclude" 
-         id="any" last-revision="$Date$">
-  <libraryinfo>
-    <author>
-      <firstname>Kevlin</firstname>
-      <surname>Henney</surname>
-    </author>
-
-    <copyright>
-      <year>2001</year>
-      <holder>Kevlin Henney</holder>
-    </copyright>
-
-    <librarypurpose>
-      Safe, generic container for single values of different value types
-    </librarypurpose> 
-    <librarycategory name="category:data-structures"/>
-
-    <legalnotice>
-      <para>Distributed under the Boost Software License, Version 1.0.
-      (See accompanying file <filename>LICENSE_1_0.txt</filename> or copy at 
-      <ulink
-      url="http://www.boost.org/LICENSE_1_0.txt">http://www.boost.org/LICENSE_1_0.txt</ulink>)
-      </para>
-    </legalnotice>
-  </libraryinfo>
-
-  <title>Boost.Any</title>
-
-  <section>
-    <title>Introduction</title>
-
-    <para>There are times when a generic (in the sense of
-    <emphasis>general</emphasis> as opposed to
-    <emphasis>template-based programming</emphasis>) type is needed:
-    variables that are truly variable, accommodating values of many
-    other more specific types rather than C++'s normal strict and
-    static types. We can distinguish three basic kinds of generic
-    type:</para>
-
-    <itemizedlist>
-      <listitem>
-        <para>Converting types that can hold one of a number of
-        possible value types, e.g. <code>int</code> and
-        <code>string</code>, and freely convert between them, for
-        instance interpreting <code>5</code> as <code>"5"</code> or
-        vice-versa.  Such types are common in scripting and other
-        interpreted
-        languages. 
-        <code><functionname>boost::lexical_cast</functionname></code>
-        supports such conversion functionality.</para>
-      </listitem>
-      <listitem>
-        <para>
-        Discriminated types that contain values of different types but
-        do not attempt conversion between them, i.e. <code>5</code> is
-        held strictly as an <code>int</code> and is not implicitly
-        convertible either to <code>"5"</code> or to
-        <code>5.0</code>. Their indifference to interpretation but
-        awareness of type effectively makes them safe, generic
-        containers of single values, with no scope for surprises from
-        ambiguous conversions.</para>
-      </listitem>
-      <listitem>
-        <para>
-        Indiscriminate types that can refer to anything but are
-        oblivious to the actual underlying type, entrusting all forms
-        of access and interpretation to the programmer. This niche is
-        dominated by <code>void *</code>, which offers plenty of scope
-        for surprising, undefined behavior.</para>
-      </listitem>
-    </itemizedlist>
-
-    <para>The <code><classname>boost::any</classname></code> class
-    (based on the class of the same name described in <ulink
-    url="http://www.two-sdg.demon.co.uk/curbralan/papers/ValuedConversions.pdf">"Valued
-    Conversions"</ulink> by Kevlin Henney, <emphasis>C++
-    Report</emphasis> 12(7), July/August 2000) is a variant value type
-    based on the second category. It supports copying of any value
-    type and safe checked extraction of that value strictly against
-    its type. A similar design, offering more appropriate operators,
-    can be used for a generalized function adaptor,
-    <code>any_function</code>, a generalized iterator adaptor,
-    <code>any_iterator</code>, and other object types that need
-    uniform runtime treatment but support only compile-time template
-    parameter conformance.</para>
-  </section>
-
-  <section>
-    <title>Examples</title>
-
-    <using-namespace name="boost"/>
-    <using-class name="boost::any"/>
-
-    <para>The following code demonstrates the syntax for using
-    implicit conversions to and copying of any objects:</para>
-
-<programlisting name="any.example.first">
-#include &lt;list&gt;
-#include &lt;boost/any.hpp&gt;
-
-using <functionname>boost::any_cast</functionname>;
-typedef std::list&lt;<classname>boost::any</classname>&gt; many;
-
-void append_int(many &amp; values, int value)
-{
-    <classname>boost::any</classname> to_append = value;
-    values.push_back(to_append);
-}
-
-void append_string(many &amp; values, const std::string &amp; value)
-{
-    values.push_back(value);
-}
-
-void append_char_ptr(many &amp; values, const char * value)
-{
-    values.push_back(value);
-}
-
-void append_any(many &amp; values, const <classname>boost::any</classname> &amp; value)
-{
-    values.push_back(value);
-}
-
-void append_nothing(many &amp; values)
-{
-    values.push_back(<classname>boost::any</classname>());
-}
-</programlisting>
-
-    <para>The following predicates follow on from the previous
-    definitions and demonstrate the use of queries on any
-    objects:</para>
-
-<programlisting name="any.example.second">
-bool is_empty(const <classname>boost::any</classname> &amp; operand)
-{
-    return operand.<methodname>empty</methodname>();
-}
-
-bool is_int(const <classname>boost::any</classname> &amp; operand)
-{
-    return operand.<methodname>type</methodname>() == typeid(int);
-}
-
-bool is_char_ptr(const <classname>boost::any</classname> &amp; operand)
-{
-    try
-    {
-        <functionname>any_cast</functionname>&lt;const char *&gt;(operand);
-        return true;
-    }
-    catch(const <classname>boost::bad_any_cast</classname> &amp;)
-    {
-        return false;
-    }
-}
-
-bool is_string(const <classname>boost::any</classname> &amp; operand)
-{
-    return <functionname>any_cast</functionname>&lt;std::string&gt;(&amp;operand);
-}
-
-void count_all(many &amp; values, std::ostream &amp; out)
-{
-    out &lt;&lt; "#empty == "
-        &lt;&lt; std::count_if(values.begin(), values.end(), is_empty) &lt;&lt; std::endl;
-    out &lt;&lt; "#int == "
-        &lt;&lt; std::count_if(values.begin(), values.end(), is_int) &lt;&lt; std::endl;
-    out &lt;&lt; "#const char * == "
-        &lt;&lt; std::count_if(values.begin(), values.end(), is_char_ptr) &lt;&lt; std::endl;
-    out &lt;&lt; "#string == "
-        &lt;&lt; std::count_if(values.begin(), values.end(), is_string) &lt;&lt; std::endl;
-}
-</programlisting>
-
-    <para>The following type, patterned after the OMG's Property Service, defines name-value pairs for arbitrary value types:</para>
-
-<programlisting>
-struct property
-{
-    property();
-    property(const std::string &amp;, const <classname>boost::any</classname> &amp;);
-
-    std::string name;
-    <classname>boost::any</classname> value;
-};
-
-typedef std::list&lt;property&gt; properties;
-</programlisting>
-
-    <para>The following base class demonstrates one approach to
-    runtime polymorphism based callbacks that also require arbitrary
-    argument types. The absence of virtual member templates requires
-    that different solutions have different trade-offs in terms of
-    efficiency, safety, and generality. Using a checked variant type
-    offers one approach:</para>
-
-<programlisting>
-class consumer
-{
-public:
-    virtual void notify(const <classname>any</classname> &amp;) = 0;
-    ...
-};
-</programlisting>
-  </section>
-
-  <library-reference>
-    <section id="any.ValueType">
-      <title><emphasis>ValueType</emphasis> requirements</title>
-
-      <para>Values are strongly informational objects for which
-      identity is not significant, i.e. the focus is principally on
-      their state content and any behavior organized around
-      that. Another distinguishing feature of values is their
-      granularity: normally fine-grained objects representing simple
-      concepts in the system such as quantities.</para>
-
-      <para>As the emphasis of a value lies in its state not its
-      identity, values can be copied and typically assigned one to
-      another, requiring the explicit or implicit definition of a
-      public copy constructor and public assignment operator. Values
-      typically live within other scopes, i.e. within objects or
-      blocks, rather than on the heap. Values are therefore normally
-      passed around and manipulated directly as variables or through
-      references, but not as pointers that emphasize identity and
-      indirection.</para>
-
-      <para>The specific requirements on value types to be used in an
-      <code><classname alt="boost::any">any</classname></code>
-      are:</para>
-
-      <itemizedlist spacing="compact">
-        <listitem><simpara>A <emphasis>ValueType</emphasis> is
-          <emphasis>CopyConstructible</emphasis> [20.1.3].</simpara>
-        </listitem>
-        
-        <listitem><simpara>The destructor for a
-        <emphasis>ValueType</emphasis> upholds the no-throw
-        exception-safety guarantee.</simpara>
-        </listitem>
-      </itemizedlist>
-    </section>
-
-    <header name="boost/any.hpp">
-      <namespace name="boost">
-        <class name="bad_any_cast">
-          <inherit access="public">
-            <classname>std::bad_cast</classname>
-          </inherit>
-          <purpose>The exception thrown in the event of a failed
-          <code><functionname>any_cast</functionname></code> of an
-          <code><classname>any</classname></code> value.</purpose>
-
-          <method name="what" specifiers="virtual" cv="const">
-            <type>const char *</type>
-          </method>
-        </class>
-
-        <class name="any">
-          <purpose>A class whose instances can hold instances of any
-          type that satisfies <link
-          linkend="any.ValueType">ValueType</link>
-          requirements.</purpose>
-
-          <constructor>
-            <postconditions><simpara><code>this-&gt;<methodname>empty</methodname>()</code></simpara></postconditions>
-          </constructor>
-
-          <constructor>
-            <parameter name="other">
-              <paramtype>const <classname>any</classname> &amp;</paramtype>
-            </parameter>
-
-            <effects><simpara> Copy constructor that copies content of
-            <code>other</code> into new instance, so that any content
-            is equivalent in both type and value to the content of
-            <code>other</code>, or empty if <code>other</code> is
-            empty. </simpara></effects>
-
-            <throws><simpara>May fail with a
-            <code><classname>std::bad_alloc</classname></code>
-            exception or any exceptions arising from the copy
-            constructor of the contained type.</simpara></throws>
-          </constructor>
-
-          <constructor>
-            <parameter name="other">
-              <paramtype><classname>any</classname> &amp;&amp;</paramtype>
-            </parameter>
-
-            <effects><simpara> Move constructor that moves content of
-            <code>other</code> into new instance and leaves <code>other</code>
-            empty. </simpara></effects>
-            <precondition>C++11 compatible compiler.</precondition>
-            <postconditions><simpara><code>other-&gt;<methodname>empty</methodname>()</code></simpara></postconditions>
-            <throws><simpara>Nothing.</simpara></throws>
-          </constructor>
-          
-          <constructor>
-            <template>
-              <template-type-parameter name="ValueType"/>
-            </template>
-
-            <parameter name="value">
-              <paramtype>const ValueType &amp;</paramtype>
-            </parameter>
-
-            <effects><simpara>Makes a copy of <code>value</code>, so
-            that the initial content of the new instance is equivalent
-            in both type and value to
-            <code>value</code>.</simpara></effects>
-
-            <throws><simpara><code><classname>std::bad_alloc</classname></code>
-            or any exceptions arising from the copy constructor of the
-            contained type.</simpara></throws>
-          </constructor>
-          
-          <constructor>
-            <template>
-              <template-type-parameter name="ValueType"/>
-            </template>
-
-            <parameter name="value">
-              <paramtype>ValueType &amp;&amp;</paramtype>
-            </parameter>
-
-            <effects><simpara>Forwards <code>value</code>, so
-            that the initial content of the new instance is equivalent
-            in both type and value to
-            <code>value</code> before the forward.</simpara></effects>
-
-            <precondition>C++11 compatible compiler.</precondition>
-            <throws><simpara><code><classname>std::bad_alloc</classname></code>
-            or any exceptions arising from the copy constructor of the
-            contained type.</simpara></throws>
-          </constructor>
-
-          <destructor>
-            <effects><simpara>Releases any and all resources used in
-            management of instance.</simpara></effects>
-            <throws><simpara>Nothing.</simpara></throws>
-          </destructor>
-
-          <copy-assignment>
-            <type><classname>any</classname> &amp;</type>
-
-            <parameter name="rhs">
-              <paramtype>const <classname>any</classname> &amp;</paramtype>
-            </parameter>
-
-            <effects><simpara>Copies content of <code>rhs</code> into
-            current instance, discarding previous content, so that the
-            new content is equivalent in both type and value to the
-            content of <code>rhs</code>, or empty if
-            <code>rhs.<methodname>empty</methodname>()</code>.</simpara></effects>
-
-            <throws><simpara><code><classname>std::bad_alloc</classname></code>
-            or any exceptions arising from the copy constructor of the
-            contained type. Assignment satisfies the strong guarantee
-            of exception safety.</simpara></throws>
-          </copy-assignment>
-          
-          <copy-assignment>
-            <type><classname>any</classname> &amp;</type>
-
-            <parameter name="rhs">
-              <paramtype><classname>any</classname> &amp;&amp;</paramtype>
-            </parameter>
-
-            <effects><simpara>Moves content of <code>rhs</code> into
-            current instance, discarding previous content, so that the
-            new content is equivalent in both type and value to the
-            content of <code>rhs</code> before move, or empty if
-            <code>rhs.<methodname>empty</methodname>()</code>.</simpara></effects>
-
-            <precondition>C++11 compatible compiler.</precondition>
-            <postconditions><simpara><code>rhs-&gt;<methodname>empty</methodname>()</code></simpara></postconditions>
-            <throws><simpara>Nothing.</simpara></throws>
-          </copy-assignment>
-
-          <copy-assignment>
-             <template>
-              <template-type-parameter name="ValueType"/>
-            </template>
-
-            <type><classname>any</classname> &amp;</type>
-
-            <parameter name="rhs">
-              <paramtype>const ValueType &amp;</paramtype>
-            </parameter>
-
-            <effects><simpara>Makes a copy of <code>rhs</code>,
-            discarding previous content, so that the new content of is
-            equivalent in both type and value to
-            <code>rhs</code>.</simpara></effects>
-
-            <throws><simpara><code><classname>std::bad_alloc</classname></code>
-            or any exceptions arising from the copy constructor of the
-            contained type. Assignment satisfies the strong guarantee
-            of exception safety.</simpara></throws>
-          </copy-assignment>
-
-          <copy-assignment>
-             <template>
-              <template-type-parameter name="ValueType"/>
-            </template>
-
-            <type><classname>any</classname> &amp;</type>
-
-            <parameter name="rhs">
-              <paramtype>ValueType &amp;&amp;</paramtype>
-            </parameter>
-
-            <effects><simpara>Forwards <code>rhs</code>,
-            discarding previous content, so that the new content of is
-            equivalent in both type and value to
-            <code>rhs</code> before forward.</simpara></effects>
-
-            <precondition>C++11 compatible compiler.</precondition>
-            <throws><simpara><code><classname>std::bad_alloc</classname></code>
-            or any exceptions arising from the move or copy constructor of the
-            contained type. Assignment satisfies the strong guarantee
-            of exception safety.</simpara></throws>
-          </copy-assignment>
-          
-          <method-group name="modifiers">
-            <method name="swap">
-              <type><classname>any</classname> &amp;</type>
-
-              <parameter name="rhs">
-                <paramtype><classname>any</classname> &amp;</paramtype>
-              </parameter>
-
-              <effects><simpara>Exchange of the contents of
-              <code>*this</code> and
-              <code>rhs</code>.</simpara></effects>
-
-              <returns><simpara><code>*this</code></simpara></returns>
-
-              <throws><simpara>Nothing.</simpara></throws>
-            </method>
-          </method-group>
-
-          <method-group name="queries">
-            <method name="empty" cv="const">
-              <type>bool</type>
-
-              <returns><simpara><code>true</code> if instance is
-              empty, otherwise <code>false</code>.</simpara></returns>
-              
-              <throws><simpara>Nothing.</simpara></throws>
-            </method>
-
-            <method name="type" cv="const">
-              <type>const <classname>std::type_info</classname> &amp;</type>
-              
-              <returns><simpara>the <code>typeid</code> of the
-              contained value if instance is non-empty, otherwise
-              <code>typeid(void)</code>.</simpara></returns>
-
-              <notes><simpara>Useful for querying against types known
-              either at compile time or only at
-              runtime.</simpara></notes>
-            </method>
-          </method-group>
-        </class>
-        
-        <function name="swap">
-          <type>void</type>
-          <parameter name="lhs">
-            <paramtype><classname>any</classname> &amp;</paramtype>
-          </parameter>
-          <parameter name="rhs">
-              <paramtype><classname>any</classname> &amp;</paramtype>
-          </parameter>
-
-            <effects><simpara>Exchange of the contents of
-            <code>lhs</code> and
-            <code>rhs</code>.</simpara></effects>
-
-            <throws><simpara>Nothing.</simpara></throws>
-        </function>
-
-        <overloaded-function name="any_cast">
-          <signature>
-            <template>
-              <template-type-parameter name="T"/>
-            </template>
-            
-            <type>T</type>
-            
-            <parameter name="operand">
-              <paramtype><classname>any</classname> &amp;</paramtype>
-            </parameter>
-          </signature>
-
-          <signature>
-            <template>
-              <template-type-parameter name="T"/>
-            </template>
-
-            <type>T</type>
-
-            <parameter name="operand">
-              <paramtype><classname>any</classname> &amp;&amp;</paramtype>
-            </parameter>
-          </signature>
-
-          <signature>
-            <template>
-              <template-type-parameter name="T"/>
-            </template>
-            
-            <type>T</type>
-            
-            <parameter name="operand">
-              <paramtype>const <classname>any</classname> &amp;</paramtype>
-            </parameter>
-          </signature>
-          
-          <signature>
-            <template>
-              <template-type-parameter name="ValueType"/>
-            </template>
-            
-            <type>const ValueType *</type>
-            
-            <parameter name="operand">
-              <paramtype>const <classname>any</classname> *</paramtype>
-            </parameter>
-          </signature>
-          
-          <signature>
-            <template>
-              <template-type-parameter name="ValueType"/>
-            </template>
-            
-            <type>ValueType *</type>
-            
-            <parameter name="operand">
-              <paramtype><classname>any</classname> *</paramtype>
-            </parameter>
-          </signature>
-          
-          <purpose><simpara>Custom keyword cast for extracting a value
-          of a given type from an
-          <code><classname>any</classname></code>.</simpara></purpose>
-
-          <returns><simpara> If passed a pointer, it returns a
-          similarly qualified pointer to the value content if
-	  successful, otherwise null is returned.
-	  If T is ValueType, it returns a copy of the held value, otherwise, if T is a reference 
-	  to (possibly const qualified) ValueType, it returns a reference to the held 
-	  value.</simpara></returns>
-
-          <throws><simpara>Overloads taking an
-          <code><classname>any</classname></code> pointer do not
-          throw; overloads taking an
-          <code><classname>any</classname></code> value or reference
-          throws <code><classname>bad_any_cast</classname></code> if
-          unsuccessful.</simpara></throws>
-
-        </overloaded-function>
-      </namespace>
-    </header>
-  </library-reference>
-
-  <section>
-    <title>Acknowledgements</title>
-
-    <para>Doug Gregor ported the documentation to the BoostBook format.</para>
-  </section>
-</library>
diff --git a/third_party/boostorg/any/include/boost/any.hpp b/third_party/boostorg/any/include/boost/any.hpp
deleted file mode 100644
index 9f6b313..0000000
--- a/third_party/boostorg/any/include/boost/any.hpp
+++ /dev/null
@@ -1,337 +0,0 @@
-// See http://www.boost.org/libs/any for Documentation.
-
-#ifndef BOOST_ANY_INCLUDED
-#define BOOST_ANY_INCLUDED
-
-#if defined(_MSC_VER)
-# pragma once
-#endif
-
-// what:  variant type boost::any
-// who:   contributed by Kevlin Henney,
-//        with features contributed and bugs found by
-//        Antony Polukhin, Ed Brey, Mark Rodgers, 
-//        Peter Dimov, and James Curran
-// when:  July 2001, April 2013 - May 2013
-
-#include <algorithm>
-
-#include <boost/config.hpp>
-#include <boost/type_index.hpp>
-#include <boost/type_traits/remove_reference.hpp>
-#include <boost/type_traits/decay.hpp>
-#include <boost/type_traits/remove_cv.hpp>
-#include <boost/type_traits/add_reference.hpp>
-#include <boost/type_traits/is_reference.hpp>
-#include <boost/type_traits/is_const.hpp>
-#include <boost/throw_exception.hpp>
-#include <boost/static_assert.hpp>
-#include <boost/utility/enable_if.hpp>
-#include <boost/core/addressof.hpp>
-#include <boost/type_traits/is_same.hpp>
-#include <boost/type_traits/is_const.hpp>
-#include <boost/mpl/if.hpp>
-
-namespace boost
-{
-    class any
-    {
-    public: // structors
-
-        any() BOOST_NOEXCEPT
-          : content(0)
-        {
-        }
-
-        template<typename ValueType>
-        any(const ValueType & value)
-          : content(new holder<
-                BOOST_DEDUCED_TYPENAME remove_cv<BOOST_DEDUCED_TYPENAME decay<const ValueType>::type>::type
-            >(value))
-        {
-        }
-
-        any(const any & other)
-          : content(other.content ? other.content->clone() : 0)
-        {
-        }
-
-#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES
-        // Move constructor
-        any(any&& other) BOOST_NOEXCEPT
-          : content(other.content)
-        {
-            other.content = 0;
-        }
-
-        // Perfect forwarding of ValueType
-        template<typename ValueType>
-        any(ValueType&& value
-            , typename boost::disable_if<boost::is_same<any&, ValueType> >::type* = 0 // disable if value has type `any&`
-            , typename boost::disable_if<boost::is_const<ValueType> >::type* = 0) // disable if value has type `const ValueType&&`
-          : content(new holder< typename decay<ValueType>::type >(static_cast<ValueType&&>(value)))
-        {
-        }
-#endif
-
-        ~any() BOOST_NOEXCEPT
-        {
-            delete content;
-        }
-
-    public: // modifiers
-
-        any & swap(any & rhs) BOOST_NOEXCEPT
-        {
-            std::swap(content, rhs.content);
-            return *this;
-        }
-
-
-#ifdef BOOST_NO_CXX11_RVALUE_REFERENCES
-        template<typename ValueType>
-        any & operator=(const ValueType & rhs)
-        {
-            any(rhs).swap(*this);
-            return *this;
-        }
-
-        any & operator=(any rhs)
-        {
-            any(rhs).swap(*this);
-            return *this;
-        }
-
-#else 
-        any & operator=(const any& rhs)
-        {
-            any(rhs).swap(*this);
-            return *this;
-        }
-
-        // move assignement
-        any & operator=(any&& rhs) BOOST_NOEXCEPT
-        {
-            rhs.swap(*this);
-            any().swap(rhs);
-            return *this;
-        }
-
-        // Perfect forwarding of ValueType
-        template <class ValueType>
-        any & operator=(ValueType&& rhs)
-        {
-            any(static_cast<ValueType&&>(rhs)).swap(*this);
-            return *this;
-        }
-#endif
-
-    public: // queries
-
-        bool empty() const BOOST_NOEXCEPT
-        {
-            return !content;
-        }
-
-        void clear() BOOST_NOEXCEPT
-        {
-            any().swap(*this);
-        }
-
-        const boost::typeindex::type_info& type() const BOOST_NOEXCEPT
-        {
-            return content ? content->type() : boost::typeindex::type_id<void>().type_info();
-        }
-
-#ifndef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
-    private: // types
-#else
-    public: // types (public so any_cast can be non-friend)
-#endif
-
-        class placeholder
-        {
-        public: // structors
-
-            virtual ~placeholder()
-            {
-            }
-
-        public: // queries
-
-            virtual const boost::typeindex::type_info& type() const BOOST_NOEXCEPT = 0;
-
-            virtual placeholder * clone() const = 0;
-
-        };
-
-        template<typename ValueType>
-        class holder : public placeholder
-        {
-        public: // structors
-
-            holder(const ValueType & value)
-              : held(value)
-            {
-            }
-
-#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES
-            holder(ValueType&& value)
-              : held(static_cast< ValueType&& >(value))
-            {
-            }
-#endif
-        public: // queries
-
-            virtual const boost::typeindex::type_info& type() const BOOST_NOEXCEPT
-            {
-                return boost::typeindex::type_id<ValueType>().type_info();
-            }
-
-            virtual placeholder * clone() const
-            {
-                return new holder(held);
-            }
-
-        public: // representation
-
-            ValueType held;
-
-        private: // intentionally left unimplemented
-            holder & operator=(const holder &);
-        };
-
-#ifndef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
-
-    private: // representation
-
-        template<typename ValueType>
-        friend ValueType * any_cast(any *) BOOST_NOEXCEPT;
-
-        template<typename ValueType>
-        friend ValueType * unsafe_any_cast(any *) BOOST_NOEXCEPT;
-
-#else
-
-    public: // representation (public so any_cast can be non-friend)
-
-#endif
-
-        placeholder * content;
-
-    };
- 
-    inline void swap(any & lhs, any & rhs) BOOST_NOEXCEPT
-    {
-        lhs.swap(rhs);
-    }
-
-    class BOOST_SYMBOL_VISIBLE bad_any_cast :
-#ifndef BOOST_NO_RTTI
-        public std::bad_cast
-#else
-        public std::exception
-#endif
-    {
-    public:
-        virtual const char * what() const BOOST_NOEXCEPT_OR_NOTHROW
-        {
-            return "boost::bad_any_cast: "
-                   "failed conversion using boost::any_cast";
-        }
-    };
-
-    template<typename ValueType>
-    ValueType * any_cast(any * operand) BOOST_NOEXCEPT
-    {
-        return operand && operand->type() == boost::typeindex::type_id<ValueType>()
-            ? boost::addressof(
-                static_cast<any::holder<BOOST_DEDUCED_TYPENAME remove_cv<ValueType>::type> *>(operand->content)->held
-              )
-            : 0;
-    }
-
-    template<typename ValueType>
-    inline const ValueType * any_cast(const any * operand) BOOST_NOEXCEPT
-    {
-        return any_cast<ValueType>(const_cast<any *>(operand));
-    }
-
-    template<typename ValueType>
-    ValueType any_cast(any & operand)
-    {
-        typedef BOOST_DEDUCED_TYPENAME remove_reference<ValueType>::type nonref;
-
-
-        nonref * result = any_cast<nonref>(boost::addressof(operand));
-        if(!result)
-            boost::throw_exception(bad_any_cast());
-
-        // Attempt to avoid construction of a temporary object in cases when 
-        // `ValueType` is not a reference. Example:
-        // `static_cast<std::string>(*result);` 
-        // which is equal to `std::string(*result);`
-        typedef BOOST_DEDUCED_TYPENAME boost::mpl::if_<
-            boost::is_reference<ValueType>,
-            ValueType,
-            BOOST_DEDUCED_TYPENAME boost::add_reference<ValueType>::type
-        >::type ref_type;
-
-#ifdef BOOST_MSVC
-#   pragma warning(push)
-#   pragma warning(disable: 4172) // "returning address of local variable or temporary" but *result is not local!
-#endif
-        return static_cast<ref_type>(*result);
-#ifdef BOOST_MSVC
-#   pragma warning(pop)
-#endif
-    }
-
-    template<typename ValueType>
-    inline ValueType any_cast(const any & operand)
-    {
-        typedef BOOST_DEDUCED_TYPENAME remove_reference<ValueType>::type nonref;
-        return any_cast<const nonref &>(const_cast<any &>(operand));
-    }
-
-#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES
-    template<typename ValueType>
-    inline ValueType any_cast(any&& operand)
-    {
-        BOOST_STATIC_ASSERT_MSG(
-            boost::is_rvalue_reference<ValueType&&>::value /*true if ValueType is rvalue or just a value*/
-            || boost::is_const< typename boost::remove_reference<ValueType>::type >::value,
-            "boost::any_cast shall not be used for getting nonconst references to temporary objects" 
-        );
-        return any_cast<ValueType>(operand);
-    }
-#endif
-
-
-    // Note: The "unsafe" versions of any_cast are not part of the
-    // public interface and may be removed at any time. They are
-    // required where we know what type is stored in the any and can't
-    // use typeid() comparison, e.g., when our types may travel across
-    // different shared libraries.
-    template<typename ValueType>
-    inline ValueType * unsafe_any_cast(any * operand) BOOST_NOEXCEPT
-    {
-        return boost::addressof(
-            static_cast<any::holder<ValueType> *>(operand->content)->held
-        );
-    }
-
-    template<typename ValueType>
-    inline const ValueType * unsafe_any_cast(const any * operand) BOOST_NOEXCEPT
-    {
-        return unsafe_any_cast<ValueType>(const_cast<any *>(operand));
-    }
-}
-
-// Copyright Kevlin Henney, 2000, 2001, 2002. All rights reserved.
-//
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#endif
diff --git a/third_party/boostorg/any/index.html b/third_party/boostorg/any/index.html
deleted file mode 100644
index 18d07f1..0000000
--- a/third_party/boostorg/any/index.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<html>
-<head>
-<meta http-equiv="refresh" content="0; URL=../../doc/html/any.html">
-</head>
-<body>
-Automatic redirection failed, please go to
-<a href="../../doc/html/any.html">../../doc/html/any.html</a>
-<hr>
-<p>© Copyright Beman Dawes, 2001</p>
-<p> Distributed under the Boost Software 
-License, Version 1.0. (See accompanying file <a href="../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or copy at <a href="http://www.boost.org/LICENSE_1_0.txt">
-www.boost.org/LICENSE_1_0.txt</a>)</p>
-</body>
-</html>
\ No newline at end of file
diff --git a/third_party/boostorg/any/meta/libraries.json b/third_party/boostorg/any/meta/libraries.json
deleted file mode 100644
index 3e6dd90..0000000
--- a/third_party/boostorg/any/meta/libraries.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-    "key": "any",
-    "name": "Any",
-    "authors": [
-        "Kevlin Henney"
-    ],
-    "description": "Safe, generic container for single values of different value types.",
-    "category": [
-        "Data"
-    ],
-    "maintainers": [
-        "Antony Polukhin <antoshkka -at- gmail.com>"
-    ]
-}
diff --git a/third_party/boostorg/any/test/Jamfile.v2 b/third_party/boostorg/any/test/Jamfile.v2
deleted file mode 100644
index ce2ba55..0000000
--- a/third_party/boostorg/any/test/Jamfile.v2
+++ /dev/null
@@ -1,20 +0,0 @@
-#  Copyright Vladimur Prus 2005. Use, modification and
-#  distribution is subject to the Boost Software License, Version
-#  1.0. (See accompanying file LICENSE_1_0.txt or copy at
-#  http://www.boost.org/LICENSE_1_0.txt)
-#
-# For more information, see http://www.boost.org/libs/any
-#
-
-test-suite any :
-    [ run any_test.cpp ]
-    [ run any_test.cpp : : : <rtti>off <define>BOOST_NO_RTTI <define>BOOST_NO_TYPEID : any_test_no_rtti  ]
-    [ run any_test_rv.cpp ]
-    [ run any_test_rv.cpp : : : <rtti>off <define>BOOST_NO_RTTI <define>BOOST_NO_TYPEID : any_test_rv_no_rtti  ]
-    [ run any_test_mplif.cpp ]
-    [ compile-fail any_cast_cv_failed.cpp ]
-    [ compile-fail any_test_temporary_to_ref_failed.cpp ]
-    [ compile-fail any_test_cv_to_rv_failed.cpp ]
-    ;
-
-
diff --git a/third_party/boostorg/any/test/any_cast_cv_failed.cpp b/third_party/boostorg/any/test/any_cast_cv_failed.cpp
deleted file mode 100644
index 860240f..0000000
--- a/third_party/boostorg/any/test/any_cast_cv_failed.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2006 Alexander Nasonov.
-//
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include <boost/any.hpp>
-
-int main()
-{
-    boost::any const a;
-    boost::any_cast<int&>(a);
-}
-
diff --git a/third_party/boostorg/any/test/any_test.cpp b/third_party/boostorg/any/test/any_test.cpp
deleted file mode 100644
index 4c65b0e..0000000
--- a/third_party/boostorg/any/test/any_test.cpp
+++ /dev/null
@@ -1,405 +0,0 @@
-// what:  unit tests for variant type boost::any
-// who:   contributed by Kevlin Henney
-// when:  July 2001, 2013, 2014
-// where: tested with BCC 5.5, MSVC 6.0, and g++ 2.95
-
-#include <cstdlib>
-#include <string>
-#include <vector>
-#include <utility>
-
-#include <boost/any.hpp>
-#include "test.hpp"
-
-namespace any_tests
-{
-    typedef test<const char *, void (*)()> test_case;
-    typedef const test_case * test_case_iterator;
-
-    extern const test_case_iterator begin, end;
-}
-
-int main()
-{
-    using namespace any_tests;
-    tester<test_case_iterator> test_suite(begin, end);
-    return test_suite() ? EXIT_SUCCESS : EXIT_FAILURE;
-}
-
-namespace any_tests // test suite
-{
-    void test_default_ctor();
-    void test_converting_ctor();
-    void test_copy_ctor();
-    void test_copy_assign();
-    void test_converting_assign();
-    void test_bad_cast();
-    void test_swap();
-    void test_null_copying();
-    void test_cast_to_reference();
-    void test_with_array();
-    void test_with_func();
-    void test_clear();
-    void test_vectors();
-    void test_addressof();
-
-    const test_case test_cases[] =
-    {
-        { "default construction",           test_default_ctor      },
-        { "single argument construction",   test_converting_ctor   },
-        { "copy construction",              test_copy_ctor         },
-        { "copy assignment operator",       test_copy_assign       },
-        { "converting assignment operator", test_converting_assign },
-        { "failed custom keyword cast",     test_bad_cast          },
-        { "swap member function",           test_swap              },
-        { "copying operations on a null",   test_null_copying      },
-        { "cast to reference types",        test_cast_to_reference },
-        { "storing an array inside",        test_with_array        },
-        { "implicit cast of returned value",test_with_func         },
-        { "clear() methods",                test_clear             },
-        { "testing with vectors",           test_vectors           },
-        { "class with operator&()",         test_addressof         }
-    };
-
-    const test_case_iterator begin = test_cases;
-    const test_case_iterator end =
-        test_cases + (sizeof test_cases / sizeof *test_cases);
-
-    
-
-    struct copy_counter
-    {
-
-    public:
-
-        copy_counter() {}
-        copy_counter(const copy_counter&) { ++count; }
-        copy_counter& operator=(const copy_counter&) { ++count; return *this; }
-        static int get_count() { return count; }
-
-    private:
-
-        static int count;
-
-    };
-
-    int copy_counter::count = 0;
-}
-
-namespace any_tests // test definitions
-{
-    using namespace boost;
-
-    void test_default_ctor()
-    {
-        const any value;
-
-        check_true(value.empty(), "empty");
-        check_null(any_cast<int>(&value), "any_cast<int>");
-        check_equal(value.type(), boost::typeindex::type_id<void>(), "type");
-    }
-
-    void test_converting_ctor()
-    {
-        std::string text = "test message";
-        any value = text;
-
-        check_false(value.empty(), "empty");
-        check_equal(value.type(), boost::typeindex::type_id<std::string>(), "type");
-        check_null(any_cast<int>(&value), "any_cast<int>");
-        check_non_null(any_cast<std::string>(&value), "any_cast<std::string>");
-        check_equal(
-            any_cast<std::string>(value), text,
-            "comparing cast copy against original text");
-        check_unequal(
-            any_cast<std::string>(&value), &text,
-            "comparing address in copy against original text");
-    }
-
-    void test_copy_ctor()
-    {
-        std::string text = "test message";
-        any original = text, copy = original;
-
-        check_false(copy.empty(), "empty");
-        check_equal(boost::typeindex::type_index(original.type()), copy.type(), "type");
-        check_equal(
-            any_cast<std::string>(original), any_cast<std::string>(copy),
-            "comparing cast copy against original");
-        check_equal(
-            text, any_cast<std::string>(copy),
-            "comparing cast copy against original text");
-        check_unequal(
-            any_cast<std::string>(&original),
-            any_cast<std::string>(&copy),
-            "comparing address in copy against original");
-    }
-
-    void test_copy_assign()
-    {
-        std::string text = "test message";
-        any original = text, copy;
-        any * assign_result = &(copy = original);
-
-        check_false(copy.empty(), "empty");
-        check_equal(boost::typeindex::type_index(original.type()), copy.type(), "type");
-        check_equal(
-            any_cast<std::string>(original), any_cast<std::string>(copy),
-            "comparing cast copy against cast original");
-        check_equal(
-            text, any_cast<std::string>(copy),
-            "comparing cast copy against original text");
-        check_unequal(
-            any_cast<std::string>(&original),
-            any_cast<std::string>(&copy),
-            "comparing address in copy against original");
-        check_equal(assign_result, &copy, "address of assignment result");
-    }
-
-    void test_converting_assign()
-    {
-        std::string text = "test message";
-        any value;
-        any * assign_result = &(value = text);
-
-        check_false(value.empty(), "type");
-        check_equal(value.type(), boost::typeindex::type_id<std::string>(), "type");
-        check_null(any_cast<int>(&value), "any_cast<int>");
-        check_non_null(any_cast<std::string>(&value), "any_cast<std::string>");
-        check_equal(
-            any_cast<std::string>(value), text,
-            "comparing cast copy against original text");
-        check_unequal(
-            any_cast<std::string>(&value),
-            &text,
-            "comparing address in copy against original text");
-        check_equal(assign_result, &value, "address of assignment result");
-    }
-
-    void test_bad_cast()
-    {
-        std::string text = "test message";
-        any value = text;
-
-        TEST_CHECK_THROW(
-            any_cast<const char *>(value),
-            bad_any_cast,
-            "any_cast to incorrect type");
-    }
-
-    void test_swap()
-    {
-        std::string text = "test message";
-        any original = text, swapped;
-        std::string * original_ptr = any_cast<std::string>(&original);
-        any * swap_result = &original.swap(swapped);
-
-        check_true(original.empty(), "empty on original");
-        check_false(swapped.empty(), "empty on swapped");
-        check_equal(swapped.type(), boost::typeindex::type_id<std::string>(), "type");
-        check_equal(
-            text, any_cast<std::string>(swapped),
-            "comparing swapped copy against original text");
-        check_non_null(original_ptr, "address in pre-swapped original");
-        check_equal(
-            original_ptr,
-            any_cast<std::string>(&swapped),
-            "comparing address in swapped against original");
-        check_equal(swap_result, &original, "address of swap result");
-
-        any copy1 = copy_counter();
-        any copy2 = copy_counter();
-        int count = copy_counter::get_count();
-        swap(copy1, copy2);
-        check_equal(count, copy_counter::get_count(), "checking that free swap doesn't make any copies.");
-    }
-
-    void test_null_copying()
-    {
-        const any null;
-        any copied = null, assigned;
-        assigned = null;
-
-        check_true(null.empty(), "empty on null");
-        check_true(copied.empty(), "empty on copied");
-        check_true(assigned.empty(), "empty on copied");
-    }
-
-    void test_cast_to_reference()
-    {
-        any a(137);
-        const any b(a);
-
-        int &                ra    = any_cast<int &>(a);
-        int const &          ra_c  = any_cast<int const &>(a);
-        int volatile &       ra_v  = any_cast<int volatile &>(a);
-        int const volatile & ra_cv = any_cast<int const volatile&>(a);
-
-        check_true(
-            &ra == &ra_c && &ra == &ra_v && &ra == &ra_cv,
-            "cv references to same obj");
-
-        int const &          rb_c  = any_cast<int const &>(b);
-        int const volatile & rb_cv = any_cast<int const volatile &>(b);
-
-        check_true(&rb_c == &rb_cv, "cv references to copied const obj");
-        check_true(&ra != &rb_c, "copies hold different objects");
-
-        ++ra;
-        int incremented = any_cast<int>(a);
-        check_true(incremented == 138, "increment by reference changes value");
-
-        TEST_CHECK_THROW(
-            any_cast<char &>(a),
-            bad_any_cast,
-            "any_cast to incorrect reference type");
-
-        TEST_CHECK_THROW(
-            any_cast<const char &>(b),
-            bad_any_cast,
-            "any_cast to incorrect const reference type");
-    }
-
-    void test_with_array()
-    {
-        any value1("Char array");
-        any value2;
-        value2 = "Char array";
-
-        check_false(value1.empty(), "type");
-        check_false(value2.empty(), "type");
-
-        check_equal(value1.type(), boost::typeindex::type_id<const char*>(), "type");
-        check_equal(value2.type(), boost::typeindex::type_id<const char*>(), "type");
-        
-        check_non_null(any_cast<const char*>(&value1), "any_cast<const char*>");
-        check_non_null(any_cast<const char*>(&value2), "any_cast<const char*>");
-    }
-
-    const std::string& returning_string1() 
-    {
-        static const std::string ret("foo"); 
-        return ret;
-    }
-
-    std::string returning_string2() 
-    {
-        static const std::string ret("foo"); 
-        return ret;
-    }
-
-    void test_with_func()
-    {
-        std::string s;
-        s = any_cast<std::string>(returning_string1());
-        s = any_cast<const std::string&>(returning_string1());
-
-        s = any_cast<std::string>(returning_string2());
-        s = any_cast<const std::string&>(returning_string2());
-
-#if !defined(BOOST_NO_CXX11_RVALUE_REFERENCES) 
-#if !defined(__INTEL_COMPILER) && !defined(__ICL) && (!defined(_MSC_VER) || _MSC_VER != 1600)
-        // Intel compiler thinks that it must choose the `any_cast(const any&)` function 
-        // instead of the `any_cast(const any&&)`.
-        // Bug was not reported because of missing premier support account + annoying 
-        // registrations requirements.
-
-        // MSVC-10 had a bug:
-        //
-        // any.hpp(291) : error C2440: 'return' : cannot convert.
-        // Conversion loses qualifiers
-        // any_test.cpp(304) : see reference to function template instantiation
-        //
-        // This issue was fixed in MSVC-11.
-
-        s = any_cast<std::string&&>(returning_string1());
-#endif
-
-        s = any_cast<std::string&&>(returning_string2());
-#endif
-    }
-
-    
-    void test_clear()
-    {
-        std::string text = "test message";
-        any value = text;
-
-        check_false(value.empty(), "empty");
-        
-        value.clear();
-        check_true(value.empty(), "non-empty after clear");
-
-        value.clear();
-        check_true(value.empty(), "non-empty after second clear");
-
-        value = text;
-        check_false(value.empty(), "empty");
-        
-        value.clear();
-        check_true(value.empty(), "non-empty after clear");
-    }
-
-    // Following tests cover the case from #9462
-    // https://svn.boost.org/trac/boost/ticket/9462
-    boost::any makeVec() 
-    {
-        return std::vector<int>(100 /*size*/, 7 /*value*/);
-    }
-
-    void test_vectors() 
-    {
-        const std::vector<int>& vec = boost::any_cast<std::vector<int> >(makeVec()); 
-        check_equal(vec.size(), 100u, "size of vector extracted from boost::any"); 
-        check_equal(vec.back(), 7, "back value of vector extracted from boost::any");
-        check_equal(vec.front(), 7, "front value of vector extracted from boost::any");
-
-        std::vector<int> vec1 = boost::any_cast<std::vector<int> >(makeVec()); 
-        check_equal(vec1.size(), 100u, "size of second vector extracted from boost::any"); 
-        check_equal(vec1.back(), 7, "back value of second vector extracted from boost::any");
-        check_equal(vec1.front(), 7, "front value of second vector extracted from boost::any");
-
-    }
-
-    template<typename T>
-    class class_with_address_op {
-    public:
-        class_with_address_op(const T* p)
-            : ptr(p)
-        {}
-
-	    const T** operator &() {
-            return &ptr;
-        }
-
-        const T* get() const {
-            return ptr;
-        }
-
-    private:
-        const T* ptr;
-    };
-
-    void test_addressof()
-    {
-        int val = 10;
-        const int* ptr = &val;
-        class_with_address_op<int> obj(ptr);
-        boost::any test_val(obj);
-
-        class_with_address_op<int> returned_obj = boost::any_cast<class_with_address_op<int> >(test_val);
-        check_equal(&val, returned_obj.get(), "any_cast incorrectly works with type that has operator&(): addresses differ");
-
-        check_true(!!boost::any_cast<class_with_address_op<int> >(&test_val), "any_cast incorrectly works with type that has operator&()");
-        check_equal(boost::unsafe_any_cast<class_with_address_op<int> >(&test_val)->get(), ptr, "unsafe_any_cast incorrectly works with type that has operator&()");
-    }
-
-}
-
-// Copyright Kevlin Henney, 2000, 2001. All rights reserved.
-// Copyright Antony Polukhin, 2013-2017.
-//
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-//
diff --git a/third_party/boostorg/any/test/any_test_cv_to_rv_failed.cpp b/third_party/boostorg/any/test/any_test_cv_to_rv_failed.cpp
deleted file mode 100644
index a4a34ed..0000000
--- a/third_party/boostorg/any/test/any_test_cv_to_rv_failed.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//  Unit test for boost::any.
-//
-//  See http://www.boost.org for most recent version, including documentation.
-//
-//  Copyright Antony Polukhin, 2013-2014.
-//
-//  Distributed under the Boost
-//  Software License, Version 1.0. (See accompanying file
-//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-
-#include <cstdlib>
-#include <string>
-#include <utility>
-
-#include <boost/any.hpp>
-#include "test.hpp"
-#include <boost/move/move.hpp>
-
-#ifdef BOOST_NO_CXX11_RVALUE_REFERENCES
-
-int main() 
-{
-    BOOST_STATIC_ASSERT(false);
-    return EXIT_SUCCESS;
-}
-
-#else 
-
-
-int main()
-{
-    boost::any const cvalue(10);
-    int i = boost::any_cast<int&&>(cvalue);
-    (void)i;
-    return EXIT_SUCCESS;
-}
-
-#endif
-
diff --git a/third_party/boostorg/any/test/any_test_mplif.cpp b/third_party/boostorg/any/test/any_test_mplif.cpp
deleted file mode 100644
index b0da7c1..0000000
--- a/third_party/boostorg/any/test/any_test_mplif.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright Antony Polukhin, 2017.
-//
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// This tests the issue from https://svn.boost.org/trac/boost/ticket/12052
-
-#include <iostream>
-#include <boost/any.hpp>
-
-int main() {
-    boost::any a = 1;
-    std::cout << boost::any_cast<int>(a) << '\n';
-    a = 3.14;
-    std::cout << boost::any_cast<double>(a) << '\n';
-    a = true;
-    std::cout << std::boolalpha << boost::any_cast<bool>(a) << '\n';
-}
diff --git a/third_party/boostorg/any/test/any_test_rv.cpp b/third_party/boostorg/any/test/any_test_rv.cpp
deleted file mode 100644
index 0571c70..0000000
--- a/third_party/boostorg/any/test/any_test_rv.cpp
+++ /dev/null
@@ -1,331 +0,0 @@
-//  Unit test for boost::any.
-//
-//  See http://www.boost.org for most recent version, including documentation.
-//
-//  Copyright Antony Polukhin, 2013-2014.
-//
-//  Distributed under the Boost
-//  Software License, Version 1.0. (See accompanying file
-//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-
-#include <cstdlib>
-#include <string>
-#include <utility>
-
-#include <boost/any.hpp>
-#include "test.hpp"
-#include <boost/move/move.hpp>
-
-#ifdef BOOST_NO_CXX11_RVALUE_REFERENCES
-
-int main() 
-{
-    return EXIT_SUCCESS;
-}
-
-#else 
-
-namespace any_tests
-{
-    typedef test<const char *, void (*)()> test_case;
-    typedef const test_case * test_case_iterator;
-
-    extern const test_case_iterator begin, end;
-}
-
-int main()
-{
-    using namespace any_tests;
-    tester<test_case_iterator> test_suite(begin, end);
-    return test_suite() ? EXIT_SUCCESS : EXIT_FAILURE;
-}
-
-namespace any_tests // test suite
-{
-    void test_move_construction();
-    void test_move_assignment();
-    void test_copy_construction();
-    void test_copy_assignment();
-    
-    void test_move_construction_from_value();
-    void test_move_assignment_from_value();
-    void test_copy_construction_from_value();
-    void test_copy_assignment_from_value();
-    void test_construction_from_const_any_rv();
-    void test_cast_to_rv();
-    
-
-    const test_case test_cases[] =
-    {
-        { "move construction of any",             test_move_construction      },
-        { "move assignment of any",               test_move_assignment        },
-        { "copy construction of any",             test_copy_construction      },
-        { "copy assignment of any",               test_copy_assignment        },
-
-        { "move construction from value",         test_move_construction_from_value },
-        { "move assignment from value",           test_move_assignment_from_value  },
-        { "copy construction from value",         test_copy_construction_from_value },
-        { "copy assignment from value",           test_copy_assignment_from_value },
-        { "constructing from const any&&",        test_construction_from_const_any_rv },
-        { "casting to rvalue reference",          test_cast_to_rv }
-    };
-
-    const test_case_iterator begin = test_cases;
-    const test_case_iterator end =
-        test_cases + (sizeof test_cases / sizeof *test_cases);
-
-    
-    class move_copy_conting_class {
-    public:
-        static unsigned int moves_count;
-        static unsigned int copy_count;
-
-        move_copy_conting_class(){}
-        move_copy_conting_class(move_copy_conting_class&& /*param*/) {
-            ++ moves_count;
-        }
-
-        move_copy_conting_class& operator=(move_copy_conting_class&& /*param*/) {
-            ++ moves_count;
-            return *this;
-        }
-
-        move_copy_conting_class(const move_copy_conting_class&) {
-            ++ copy_count;
-        }
-        move_copy_conting_class& operator=(const move_copy_conting_class& /*param*/) {
-            ++ copy_count;
-            return *this;
-        }
-    };
-
-    unsigned int move_copy_conting_class::moves_count = 0;
-    unsigned int move_copy_conting_class::copy_count = 0;
-}
-
-namespace any_tests // test definitions
-{
-    using namespace boost;
-
-    void test_move_construction()
-    {
-        any value0 = move_copy_conting_class();
-        move_copy_conting_class::copy_count = 0; 
-        move_copy_conting_class::moves_count = 0;
-        any value(boost::move(value0)); 
-
-        check(value0.empty(), "moved away value is empty");
-        check_false(value.empty(), "empty");
-        check_equal(value.type(), typeindex::type_id<move_copy_conting_class>(), "type");
-        check_non_null(any_cast<move_copy_conting_class>(&value), "any_cast<move_copy_conting_class>");
-        check_equal(
-            move_copy_conting_class::copy_count, 0u, 
-            "checking copy counts");
-        check_equal(
-            move_copy_conting_class::moves_count, 0u, 
-            "checking move counts");
-    }
-
-    void test_move_assignment()
-    {
-        any value0 = move_copy_conting_class();
-        any value = move_copy_conting_class();
-        move_copy_conting_class::copy_count = 0; 
-        move_copy_conting_class::moves_count = 0;
-        value = boost::move(value0); 
-
-        check(value0.empty(), "moved away is empty");
-        check_false(value.empty(), "empty");
-        check_equal(value.type(), typeindex::type_id<move_copy_conting_class>(), "type");
-        check_non_null(any_cast<move_copy_conting_class>(&value), "any_cast<move_copy_conting_class>");
-        check_equal(
-            move_copy_conting_class::copy_count, 0u, 
-            "checking copy counts");
-        check_equal(
-            move_copy_conting_class::moves_count, 0u, 
-            "checking move counts");
-    }
-
-    void test_copy_construction()
-    {
-        any value0 = move_copy_conting_class();
-        move_copy_conting_class::copy_count = 0; 
-        move_copy_conting_class::moves_count = 0;
-        any value(value0); 
-
-        check_false(value0.empty(), "copyed value is not empty");
-        check_false(value.empty(), "empty");
-        check_equal(value.type(), typeindex::type_id<move_copy_conting_class>(), "type");
-        check_non_null(any_cast<move_copy_conting_class>(&value), "any_cast<move_copy_conting_class>");
-        check_equal(
-            move_copy_conting_class::copy_count, 1u, 
-            "checking copy counts");
-        check_equal(
-            move_copy_conting_class::moves_count, 0u, 
-            "checking move counts");
-    }
-
-    void test_copy_assignment()
-    {
-        any value0 = move_copy_conting_class();
-        any value = move_copy_conting_class();
-        move_copy_conting_class::copy_count = 0; 
-        move_copy_conting_class::moves_count = 0;
-        value = value0; 
-
-        check_false(value0.empty(), "copyied value is not empty");
-        check_false(value.empty(), "empty");
-        check_equal(value.type(), typeindex::type_id<move_copy_conting_class>(), "type");
-        check_non_null(any_cast<move_copy_conting_class>(&value), "any_cast<move_copy_conting_class>");
-        check_equal(
-            move_copy_conting_class::copy_count, 1u, 
-            "checking copy counts");
-        check_equal(
-            move_copy_conting_class::moves_count, 0u, 
-            "checking move counts");
-    }
-
-     void test_move_construction_from_value()
-    {
-        move_copy_conting_class value0;
-        move_copy_conting_class::copy_count = 0; 
-        move_copy_conting_class::moves_count = 0;
-#ifndef BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION
-        any value(boost::move(value0)); 
-#else
-        any value(value0); 
-#endif
-
-        check_false(value.empty(), "empty");
-        check_equal(value.type(), typeindex::type_id<move_copy_conting_class>(), "type");
-        check_non_null(any_cast<move_copy_conting_class>(&value), "any_cast<move_copy_conting_class>");
-        
-#ifndef BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION
-        check_equal(
-            move_copy_conting_class::copy_count, 0u, 
-            "checking copy counts");
-        check_equal(
-            move_copy_conting_class::moves_count, 1u, 
-            "checking move counts");
-#endif
-
-     }
-
-    void test_move_assignment_from_value()
-    {
-        move_copy_conting_class value0;
-        any value;
-        move_copy_conting_class::copy_count = 0; 
-        move_copy_conting_class::moves_count = 0;
-#ifndef BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION
-        value = boost::move(value0); 
-#else
-        value = value0;
-#endif 
-
-        check_false(value.empty(), "empty");
-        check_equal(value.type(), typeindex::type_id<move_copy_conting_class>(), "type");
-        check_non_null(any_cast<move_copy_conting_class>(&value), "any_cast<move_copy_conting_class>");
-
-#ifndef BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION
-        check_equal(
-            move_copy_conting_class::copy_count, 0u, 
-            "checking copy counts");
-        check_equal(
-            move_copy_conting_class::moves_count, 1u, 
-            "checking move counts");
-#endif
-
-    }
-
-    void test_copy_construction_from_value()
-    {
-        move_copy_conting_class value0;
-        move_copy_conting_class::copy_count = 0; 
-        move_copy_conting_class::moves_count = 0;
-        any value(value0); 
-
-        check_false(value.empty(), "empty");
-        check_equal(value.type(), typeindex::type_id<move_copy_conting_class>(), "type");
-        check_non_null(any_cast<move_copy_conting_class>(&value), "any_cast<move_copy_conting_class>");
-
-        check_equal(
-            move_copy_conting_class::copy_count, 1u, 
-            "checking copy counts");
-        check_equal(
-            move_copy_conting_class::moves_count, 0u, 
-            "checking move counts");
-     }
-
-    void test_copy_assignment_from_value()
-    {
-        move_copy_conting_class value0;
-        any value;
-        move_copy_conting_class::copy_count = 0; 
-        move_copy_conting_class::moves_count = 0;
-        value = value0;
-
-        check_false(value.empty(), "empty");
-        check_equal(value.type(), typeindex::type_id<move_copy_conting_class>(), "type");
-        check_non_null(any_cast<move_copy_conting_class>(&value), "any_cast<move_copy_conting_class>");
-
-        check_equal(
-            move_copy_conting_class::copy_count, 1u, 
-            "checking copy counts");
-        check_equal(
-            move_copy_conting_class::moves_count, 0u, 
-            "checking move counts");
-    }
-
-    const any helper_method() {
-        return true;
-    }
-
-    const bool helper_method1() {
-        return true;
-    }
-
-    void test_construction_from_const_any_rv()
-    {
-        any values[] = {helper_method(), helper_method1() };
-        (void)values;
-    }
-
-    void test_cast_to_rv()
-    {
-        move_copy_conting_class value0;
-        any value;
-        value = value0;
-        move_copy_conting_class::copy_count = 0; 
-        move_copy_conting_class::moves_count = 0;
-
-        move_copy_conting_class value1 = any_cast<move_copy_conting_class&&>(value);
-
-        check_equal(
-            move_copy_conting_class::copy_count, 0u, 
-            "checking copy counts");
-        check_equal(
-            move_copy_conting_class::moves_count, 1u, 
-            "checking move counts");
-        (void)value1;
-/* Following code shall fail to compile
-        const any cvalue = value0;
-        move_copy_conting_class::copy_count = 0; 
-        move_copy_conting_class::moves_count = 0;
-
-        move_copy_conting_class value2 = any_cast<move_copy_conting_class&&>(cvalue);
-
-        check_equal(
-            move_copy_conting_class::copy_count, 1u, 
-            "checking copy counts");
-        check_equal(
-            move_copy_conting_class::moves_count, 0u, 
-            "checking move counts");
-        (void)value2;
-*/
-    }
-    
-}
-
-#endif
-
diff --git a/third_party/boostorg/any/test/any_test_temporary_to_ref_failed.cpp b/third_party/boostorg/any/test/any_test_temporary_to_ref_failed.cpp
deleted file mode 100644
index daa42ca..0000000
--- a/third_party/boostorg/any/test/any_test_temporary_to_ref_failed.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-//  Unit test for boost::any.
-//
-//  See http://www.boost.org for most recent version, including documentation.
-//
-//  Copyright Antony Polukhin, 2013-2014.
-//
-//  Distributed under the Boost
-//  Software License, Version 1.0. (See accompanying file
-//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
-
-#include <cstdlib>
-#include <string>
-#include <utility>
-
-#include <boost/any.hpp>
-#include "test.hpp"
-#include <boost/move/move.hpp>
-
-#ifdef BOOST_NO_CXX11_RVALUE_REFERENCES
-
-int main() 
-{
-    BOOST_STATIC_ASSERT(false);
-    return EXIT_SUCCESS;
-}
-
-#else 
-
-
-int main()
-{
-    int i = boost::any_cast<int&>(10);
-    (void)i;
-    return EXIT_SUCCESS;
-}
-
-#endif
-
diff --git a/third_party/boostorg/any/test/appveyor.yml b/third_party/boostorg/any/test/appveyor.yml
deleted file mode 100644
index 7da83ce..0000000
--- a/third_party/boostorg/any/test/appveyor.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-# Use, modification, and distribution are
-# subject to the Boost Software License, Version 1.0. (See accompanying
-# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-#
-# Copyright Antony Polukhin 2016-2017.
-
-#
-# See https://svn.boost.org/trac/boost/wiki/TravisCoverals for description of this file
-# and how it can be used with Boost libraries.
-#
-# File revision #5
-
-init:
-    - set BRANCH_TO_TEST=%APPVEYOR_REPO_BRANCH%  # Change to branch you wish to test. Use %APPVEYOR_REPO_BRANCH% for current branch.
-    - set BOOST_REMOVE=any                       # Remove this folder from lib from full clone of Boost. If you are testing `any` repo, write here `any`.
-
-###############################################################################################################
-# From this point and below code is same for all the Boost libs
-###############################################################################################################
-
-version: 1.64.{build}-{branch}
- 
-# branches to build
-branches:
-  except:
-    - gh-pages
-
-skip_tags: true
-
-before_build:
-    - set PATH=%PATH%;C:\\MinGW\\bin
-    - echo "Testing %APPVEYOR_PROJECT_NAME%"
-    # Cloning Boost libraries (fast nondeep cloning)
-    - set BOOST=C:/boost-local
-    - git init %BOOST%
-    - cd %BOOST%
-    - git remote add --no-tags -t %BRANCH_TO_TEST% origin https://github.com/boostorg/boost.git
-    - git fetch --depth=1
-    - git checkout %BRANCH_TO_TEST%
-    - git submodule update --init --merge --jobs 16
-    - git remote set-branches --add origin %BRANCH_TO_TEST%
-    #- git pull --recurse-submodules        # Updaes submodules to most recent version. Not required
-    - rm -rf %BOOST%/libs/%BOOST_REMOVE%
-    - mv %APPVEYOR_BUILD_FOLDER% %BOOST%/libs/%APPVEYOR_PROJECT_NAME%
-
-build_script:
-    - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x64
-    - bootstrap.bat
-    - b2.exe headers
-    - cd %BOOST%/libs/%APPVEYOR_PROJECT_NAME%/test
-
-after_build:
-before_test:
-test_script:
-    - ..\..\..\b2.exe address-model=32 architecture=x86 toolset=msvc,gcc cxxflags="-DBOOST_TRAVISCI_BUILD" -sBOOST_BUILD_PATH=.
-
-after_test:
-on_success:
-on_failure:
-on_finish:
diff --git a/third_party/boostorg/any/test/test.hpp b/third_party/boostorg/any/test/test.hpp
deleted file mode 100644
index 65a0c81..0000000
--- a/third_party/boostorg/any/test/test.hpp
+++ /dev/null
@@ -1,321 +0,0 @@
-// what:  simple unit test framework
-// who:   developed by Kevlin Henney
-// when:  November 2000
-// where: tested with BCC 5.5, MSVC 6.0, and g++ 2.91
-
-#ifndef TEST_INCLUDED
-#define TEST_INCLUDED
-
-#include <boost/config.hpp>
-#include <exception>
-#include <iostream>
-#ifdef BOOST_NO_STRINGSTREAM
-#include <strstream> // for out-of-the-box g++ pre-2.95.3
-#else
-#include <sstream>
-#endif
-#include <string>
-
-namespace any_tests // test tuple comprises name and nullary function (object)
-{
-    template<typename string_type, typename function_type>
-    struct test
-    {
-        string_type   name;
-        function_type action;
-
-        static test make(string_type name, function_type action)
-        {
-            test result; // MSVC aggreggate initializer bugs
-            result.name   = name;
-            result.action = action;
-            return result;
-        }
-    };
-}
-
-namespace any_tests // failure exception used to indicate checked test failures
-{
-    class failure : public std::exception
-    {
-    public: // struction (default cases are OK)
-
-        failure(const std::string & why) throw()
-          : reason(why)
-        {
-        }
-
-              ~failure() throw() {}
-
-    public: // usage
-
-        virtual const char * what() const throw()
-        {
-            return reason.c_str();
-        }
-
-    private: // representation
-
-        std::string reason;
-
-    };
-}
-
-namespace any_tests // not_implemented exception used to mark unimplemented tests
-{
-    class not_implemented : public std::exception
-    {
-    public: // usage (default ctor and dtor are OK)
-
-        virtual const char * what() const throw()
-        {
-            return "not implemented";
-        }
-
-    };
-}
-
-namespace any_tests // test utilities
-{
-    inline void check(bool condition, const std::string & description)
-    {
-        if(!condition)
-        {
-            throw failure(description);
-        }
-    }
-
-    inline void check_true(bool value, const std::string & description)
-    {
-        check(value, "expected true: " + description);
-    }
-
-    inline void check_false(bool value, const std::string & description)
-    {
-        check(!value, "expected false: " + description);
-    }
-
-    template<typename lhs_type, typename rhs_type>
-    void check_equal(
-        const lhs_type & lhs, const rhs_type & rhs,
-        const std::string & description)
-    {
-        check(lhs == rhs, "expected equal values: " + description);
-    }
-
-    template<typename lhs_type, typename rhs_type>
-    void check_unequal(
-        const lhs_type & lhs, const rhs_type & rhs,
-        const std::string & description)
-    {
-        check(lhs != rhs, "expected unequal values: " + description);
-    }
-
-    inline void check_null(const void * ptr, const std::string & description)
-    {
-        check(!ptr, "expected null pointer: " + description);
-    }
-
-    inline void check_non_null(const void * ptr, const std::string & description)
-    {
-        check(ptr != 0, "expected non-null pointer: " + description);
-    }
-}
-
-#define TEST_CHECK_THROW(expression, exception, description) \
-    try \
-    { \
-        expression; \
-        throw ::any_tests::failure(description); \
-    } \
-    catch(exception &) \
-    { \
-    }
-
-namespace any_tests // memory tracking (enabled if test new and delete linked in)
-{
-    class allocations
-    {
-    public: // singleton access
-
-        static allocations & instance()
-        {
-            static allocations singleton;
-            return singleton;
-        }
-
-    public: // logging
-
-        void clear()
-        {
-            alloc_count = dealloc_count = 0;
-        }
-
-        void allocation()
-        {
-            ++alloc_count;
-        }
-
-        void deallocation()
-        {
-            ++dealloc_count;
-        }
-
-    public: // reporting
-
-        unsigned long allocated() const
-        {
-            return alloc_count;
-        }
-
-        unsigned long deallocated() const
-        {
-            return dealloc_count;
-        }
-
-        bool balanced() const
-        {
-            return alloc_count == dealloc_count;
-        }
-
-    private: // structors (default dtor is fine)
-    
-        allocations()
-          : alloc_count(0), dealloc_count(0)
-        {
-        }
-
-    private: // prevention
-
-        allocations(const allocations &);
-        allocations & operator=(const allocations &);
-
-    private: // state
-
-        unsigned long alloc_count, dealloc_count;
-
-    };
-}
-
-namespace any_tests // tester is the driver class for a sequence of tests
-{
-    template<typename test_iterator>
-    class tester
-    {
-    public: // structors (default destructor is OK)
-
-        tester(test_iterator first_test, test_iterator after_last_test)
-          : begin(first_test), end(after_last_test)
-        {
-        }
-
-    public: // usage
-
-        bool operator()(); // returns true if all tests passed
-
-    private: // representation
-
-        test_iterator begin, end;
-
-    private: // prevention
-
-        tester(const tester &);
-        tester &operator=(const tester &);
-
-    };
-    
-#if defined(__GNUC__) && defined(__SGI_STL_PORT) && (__GNUC__ < 3)
-    // function scope using declarations don't work:
-    using namespace std;
-#endif
-
-    template<typename test_iterator>
-    bool tester<test_iterator>::operator()()
-    {
-        using std::cerr;
-        using std::endl;
-        using std::ends;
-        using std::exception;
-        using std::flush;
-        using std::string;
-
-        unsigned long passed = 0, failed = 0, unimplemented = 0;
-
-        for(test_iterator current = begin; current != end; ++current)
-        {
-            cerr << "[" << current->name << "] " << flush;
-            string result = "passed"; // optimistic
-
-            try
-            {
-                allocations::instance().clear();
-                current->action();
-
-                if(!allocations::instance().balanced())
-                {
-                    unsigned long allocated   = allocations::instance().allocated();
-                    unsigned long deallocated = allocations::instance().deallocated();
-#ifdef BOOST_NO_STRINGSTREAM
-                    std::ostrstream report;
-#else
-                    std::ostringstream report;
-#endif
-                    report << "new/delete ("
-                           << allocated << " allocated, "
-                           << deallocated << " deallocated)"
-                           << ends;
-                    const string text = report.str();
-#ifdef BOOST_NO_STRINGSTREAM
-                    report.freeze(false);
-#endif
-                    throw failure(text);
-                }
-
-                ++passed;
-            }
-            catch(const failure & caught)
-            {
-                (result = "failed: ") += caught.what();
-                ++failed;
-            }
-            catch(const not_implemented &)
-            {
-                result = "not implemented";
-                ++unimplemented;
-            }
-            catch(const exception & caught)
-            {
-                (result = "exception: ") += caught.what();
-                ++failed;
-            }
-            catch(...)
-            {
-                result = "failed with unknown exception";
-                ++failed;
-            }
-
-            cerr << result << endl;
-        }
-
-        cerr << (passed + failed) << " tests: "
-             << passed << " passed, "
-             << failed << " failed";
-
-        if(unimplemented)
-        {
-            cerr << " (" << unimplemented << " not implemented)";
-        }
-
-        cerr << endl;
-
-        return failed == 0;
-    }
-}
-
-#endif
-
-// Copyright Kevlin Henney, 2000. All rights reserved.
-//
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
diff --git a/third_party/boostorg/array/.gitattributes b/third_party/boostorg/array/.gitattributes
deleted file mode 100644
index 3e84d7c..0000000
--- a/third_party/boostorg/array/.gitattributes
+++ /dev/null
@@ -1,96 +0,0 @@
-* text=auto !eol svneol=native#text/plain
-*.gitattributes text svneol=native#text/plain
-
-# Scriptish formats
-*.bat        text svneol=native#text/plain
-*.bsh        text svneol=native#text/x-beanshell
-*.cgi        text svneol=native#text/plain
-*.cmd        text svneol=native#text/plain
-*.js         text svneol=native#text/javascript
-*.php        text svneol=native#text/x-php
-*.pl         text svneol=native#text/x-perl
-*.pm         text svneol=native#text/x-perl
-*.py         text svneol=native#text/x-python
-*.sh         eol=lf svneol=LF#text/x-sh
-configure    eol=lf svneol=LF#text/x-sh
-
-# Image formats
-*.bmp        binary svneol=unset#image/bmp
-*.gif        binary svneol=unset#image/gif
-*.ico        binary svneol=unset#image/ico
-*.jpeg       binary svneol=unset#image/jpeg
-*.jpg        binary svneol=unset#image/jpeg
-*.png        binary svneol=unset#image/png
-*.tif        binary svneol=unset#image/tiff
-*.tiff       binary svneol=unset#image/tiff
-*.svg        text svneol=native#image/svg%2Bxml
-
-# Data formats
-*.pdf        binary svneol=unset#application/pdf
-*.avi        binary svneol=unset#video/avi
-*.doc        binary svneol=unset#application/msword
-*.dsp        text svneol=crlf#text/plain
-*.dsw        text svneol=crlf#text/plain
-*.eps        binary svneol=unset#application/postscript
-*.gz         binary svneol=unset#application/gzip
-*.mov        binary svneol=unset#video/quicktime
-*.mp3        binary svneol=unset#audio/mpeg
-*.ppt        binary svneol=unset#application/vnd.ms-powerpoint
-*.ps         binary svneol=unset#application/postscript
-*.psd        binary svneol=unset#application/photoshop
-*.rdf        binary svneol=unset#text/rdf
-*.rss        text svneol=unset#text/xml
-*.rtf        binary svneol=unset#text/rtf
-*.sln        text svneol=native#text/plain
-*.swf        binary svneol=unset#application/x-shockwave-flash
-*.tgz        binary svneol=unset#application/gzip
-*.vcproj     text svneol=native#text/xml
-*.vcxproj    text svneol=native#text/xml
-*.vsprops    text svneol=native#text/xml
-*.wav        binary svneol=unset#audio/wav
-*.xls        binary svneol=unset#application/vnd.ms-excel
-*.zip        binary svneol=unset#application/zip
-
-# Text formats
-.htaccess    text svneol=native#text/plain
-*.bbk        text svneol=native#text/xml
-*.cmake      text svneol=native#text/plain
-*.css        text svneol=native#text/css
-*.dtd        text svneol=native#text/xml
-*.htm        text svneol=native#text/html
-*.html       text svneol=native#text/html
-*.ini        text svneol=native#text/plain
-*.log        text svneol=native#text/plain
-*.mak        text svneol=native#text/plain
-*.qbk        text svneol=native#text/plain
-*.rst        text svneol=native#text/plain
-*.sql        text svneol=native#text/x-sql
-*.txt        text svneol=native#text/plain
-*.xhtml      text svneol=native#text/xhtml%2Bxml
-*.xml        text svneol=native#text/xml
-*.xsd        text svneol=native#text/xml
-*.xsl        text svneol=native#text/xml
-*.xslt       text svneol=native#text/xml
-*.xul        text svneol=native#text/xul
-*.yml        text svneol=native#text/plain
-boost-no-inspect text svneol=native#text/plain
-CHANGES      text svneol=native#text/plain
-COPYING      text svneol=native#text/plain
-INSTALL      text svneol=native#text/plain
-Jamfile      text svneol=native#text/plain
-Jamroot      text svneol=native#text/plain
-Jamfile.v2   text svneol=native#text/plain
-Jamrules     text svneol=native#text/plain
-Makefile*    text svneol=native#text/plain
-README       text svneol=native#text/plain
-TODO         text svneol=native#text/plain
-
-# Code formats
-*.c          text svneol=native#text/plain
-*.cpp        text svneol=native#text/plain
-*.h          text svneol=native#text/plain
-*.hpp        text svneol=native#text/plain
-*.ipp        text svneol=native#text/plain
-*.tpp        text svneol=native#text/plain
-*.jam        text svneol=native#text/plain
-*.java       text svneol=native#text/plain
diff --git a/third_party/boostorg/array/BUILD b/third_party/boostorg/array/BUILD
deleted file mode 100644
index 2dbb0e9..0000000
--- a/third_party/boostorg/array/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-licenses(["notice"])  # boost
-
-cc_library(
-    name = "array",
-    hdrs = glob(["include/**"]),
-    includes = ["include"],
-    target_compatible_with = ["@platforms//os:linux"],
-    visibility = ["//visibility:public"],
-)
diff --git a/third_party/boostorg/array/doc/Jamfile.v2 b/third_party/boostorg/array/doc/Jamfile.v2
deleted file mode 100644
index b7f51b7..0000000
--- a/third_party/boostorg/array/doc/Jamfile.v2
+++ /dev/null
@@ -1,19 +0,0 @@
-#~ Copyright Marshall Clow 2013
-#~ Distributed under the Boost Software License, Version 1.0.
-#~ (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-using boostbook ;
-
-boostbook standalone
-	: array.xml 
-	: <xsl:param>boost.root=../../../.. ;
-
-###############################################################################
-alias boostdoc
-    : array.xml
-    :
-    :
-    : ;
-explicit boostdoc ;
-alias boostrelease ;
-explicit boostrelease ;
diff --git a/third_party/boostorg/array/doc/array.xml b/third_party/boostorg/array/doc/array.xml
deleted file mode 100644
index 68ee5a3..0000000
--- a/third_party/boostorg/array/doc/array.xml
+++ /dev/null
@@ -1,639 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE library PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
-  "http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
-<library name="Array" dirname="array" id="array" last-revision="$Date$">
-  <libraryinfo>
-    <author>
-      <firstname>Nicolai</firstname>
-      <surname>Josuttis</surname>
-    </author>
-    <maintainer>
-      <firstname>Marshall</firstname>
-      <surname>Clow</surname>
-    </maintainer>
-
-    <copyright>
-      <year>2001</year>
-      <year>2002</year>
-      <year>2003</year>
-      <year>2004</year>
-      <holder>Nicolai M. Josuttis</holder>
-    </copyright>
-   
-    <copyright>
-      <year>2012</year>
-      <holder>Marshall Clow</holder>
-    </copyright>
-
-    <legalnotice>
-      <para>Distributed under the Boost Software License, Version 1.0.
-      (See accompanying file <filename>LICENSE_1_0.txt</filename> or copy at 
-      <ulink
-      url="http://www.boost.org/LICENSE_1_0.txt">http://www.boost.org/LICENSE_1_0.txt</ulink>)
-      </para>
-    </legalnotice>
-
-    <librarypurpose>STL compliant container wrapper for arrays of constant size</librarypurpose>
-    <librarycategory name="category:containers"/>
-  </libraryinfo>
-
-  <title>Boost.Array</title>
- 
-  <section id="array.intro">
-    <title>Introduction</title> 
-
-    <using-namespace name="boost"/>
-    <using-class name="array"/>
-
-    <para>The C++ Standard Template Library STL as part of the C++
-    Standard Library provides a framework for processing algorithms on
-    different kind of containers. However, ordinary arrays don't
-    provide the interface of STL containers (although, they provide
-    the iterator interface of STL containers).</para>
-
-    <para>As replacement for ordinary arrays, the STL provides class
-    <code><classname>std::vector</classname></code>.  However,
-    <code><classname>std::vector&lt;&gt;</classname></code> provides
-    the semantics of dynamic arrays. Thus, it manages data to be able
-    to change the number of elements. This results in some overhead in
-    case only arrays with static size are needed.</para>
-
-    <para>In his book, <emphasis>Generic Programming and the
-    STL</emphasis>, Matthew H. Austern introduces a useful wrapper
-    class for ordinary arrays with static size, called
-    <code>block</code>.  It is safer and has no worse performance than
-    ordinary arrays. In <emphasis>The C++ Programming
-    Language</emphasis>, 3rd edition, Bjarne Stroustrup introduces a
-    similar class, called <code>c_array</code>, which I (<ulink
-    url="http://www.josuttis.com">Nicolai Josuttis</ulink>) present
-    slightly modified in my book <emphasis>The C++ Standard Library -
-    A Tutorial and Reference</emphasis>, called
-    <code>carray</code>. This is the essence of these approaches
-    spiced with many feedback from <ulink
-    url="http://www.boost.org">boost</ulink>.</para>
-
-    <para>After considering different names, we decided to name this
-    class simply <code><classname>array</classname></code>.</para>
-
-    <para>Note that this class is suggested to be part of the next
-    Technical Report, which will extend the C++ Standard (see
-    <ulink url="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2003/n1548.htm">http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2003/n1548.htm</ulink>).</para>
-
-    <para>Update: <code>std::array</code> is (as of C++11) part of the C++ standard.
-    The differences between <code>boost::array</code> and <code>std::array</code> are minimal.
-    If you are using C++11, you should consider using <code>std::array</code> instead of <code>boost::array</code>.
-    </para>
-
-    <para>Class <code><classname>array</classname></code> fulfills most
-    but not all of the requirements of "reversible containers" (see
-    Section 23.1, [lib.container.requirements] of the C++
-    Standard). The reasons array is not an reversible STL container is
-    because:
-      <itemizedlist spacing="compact">
-        <listitem><simpara>No constructors are provided.</simpara></listitem>
-        <listitem><simpara>Elements may have an undetermined initial value (see <xref linkend="array.rationale"/>).</simpara></listitem>
-        <listitem><simpara><functionname>swap</functionname>() has no constant complexity.</simpara></listitem>
-        <listitem><simpara><methodname>size</methodname>() is always constant, based on the second template argument of the type.</simpara></listitem>
-        <listitem><simpara>The container provides no allocator support.</simpara></listitem>
-      </itemizedlist>
-    </para>
-
-    <para>It doesn't fulfill the requirements of a "sequence" (see Section 23.1.1, [lib.sequence.reqmts] of the C++ Standard), except that:
-      <itemizedlist spacing="compact">
-        <listitem><simpara><methodname>front</methodname>() and <methodname>back</methodname>() are provided.</simpara></listitem>
-        <listitem><simpara><methodname>operator[]</methodname> and <methodname>at</methodname>() are provided.</simpara></listitem>
-      </itemizedlist>
-    </para>
-  </section>
-  
-  <library-reference>
-    <header name="boost/array.hpp">
-      <namespace name="boost">
-        <class name="array">
-          <template>
-            <template-type-parameter name="T"/>
-            <template-nontype-parameter name="N">
-              <type>std::size_t</type>
-            </template-nontype-parameter>
-          </template>
-
-          <purpose><para>STL compliant container wrapper for arrays of constant size</para></purpose>
-          <typedef name="value_type">
-            <type>T</type>
-          </typedef>
-          <typedef name="iterator">
-             <type>T*</type>
-          </typedef>
-          <typedef name="const_iterator">
-             <type>const T*</type>
-          </typedef>
-          <typedef name="reverse_iterator">
-             <type><classname>std::reverse_iterator</classname>&lt;iterator&gt;</type>
-          </typedef>
-          <typedef name="const_reverse_iterator">
-             <type><classname>std::reverse_iterator</classname>&lt;const_iterator&gt;</type>
-          </typedef>
-          <typedef name="reference">
-             <type>T&amp;</type>
-          </typedef>
-          <typedef name="const_reference">
-             <type>const T&amp;</type>
-          </typedef>
-          <typedef name="size_type">
-             <type>std::size_t</type>
-          </typedef>
-          <typedef name="difference_type">
-             <type>std::ptrdiff_t</type>
-          </typedef>
-
-          <static-constant name="static_size">
-            <type>size_type</type>
-            <default>N</default>
-          </static-constant>
-
-          <copy-assignment>
-            <template>
-              <template-type-parameter name="U"/>
-            </template>
-            <parameter name="other">
-              <paramtype>const <classname>array</classname>&lt;U, N&gt;&amp;</paramtype>
-            </parameter>
-            <effects><simpara><code>std::copy(rhs.<methodname>begin</methodname>(),rhs.<methodname>end</methodname>(), <methodname>begin</methodname>())</code></simpara></effects>
-          </copy-assignment>
-
-          <method-group name="iterator support">
-            <overloaded-method name="begin">
-              <signature>
-                <type>iterator</type>
-              </signature>
-              <signature cv="const">
-                <type>const_iterator</type>
-              </signature>
-
-              <returns><simpara>iterator for the first element</simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </overloaded-method>
-
-            <overloaded-method name="end">
-              <signature>
-                <type>iterator</type>
-              </signature>
-              <signature cv="const">
-                <type>const_iterator</type>
-              </signature>
-
-              <returns><simpara>iterator for position after the last element</simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </overloaded-method>
-
-            <overloaded-method name="cbegin" cv="const">
-              <signature>
-                <type>const_iterator</type>
-              </signature>
-
-              <returns><simpara>constant iterator for the first element</simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </overloaded-method>
-
-            <overloaded-method name="cend" cv="const">
-              <signature>
-                <type>const_iterator</type>
-              </signature>
-
-              <returns><simpara>constant iterator for position after the last element</simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </overloaded-method>
-          </method-group>
-
-          <method-group name="reverse iterator support">
-            <overloaded-method name="rbegin">
-              <signature>
-                <type>reverse_iterator</type>
-              </signature>
-              <signature cv="const">
-                <type>const_reverse_iterator</type>
-              </signature>
-
-              <returns><simpara>reverse iterator for the first element of reverse iteration</simpara></returns>
-            </overloaded-method>
-
-            <overloaded-method name="rend">
-              <signature>
-                <type>reverse_iterator</type>
-              </signature>
-              <signature cv="const">
-                <type>const_reverse_iterator</type>
-              </signature>
-
-              <returns><simpara>reverse iterator for position after the last element in reverse iteration</simpara></returns>
-            </overloaded-method>
-  
-            <overloaded-method name="crbegin" cv="const">
-              <signature>
-                <type>const_reverse_iterator</type>
-              </signature>
-
-              <returns><simpara>constant reverse iterator for the first element of reverse iteration</simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </overloaded-method>
-
-            <overloaded-method name="crend" cv="const">
-              <signature>
-                <type>const_reverse_iterator</type>
-              </signature>
-
-              <returns><simpara>constant reverse iterator for position after the last element in reverse iteration</simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </overloaded-method>
-          </method-group>
-
-          <method-group name="capacity">
-            <method name="size">
-              <type>size_type</type>
-              <returns><simpara><code>N</code></simpara></returns>
-            </method>
-            <method name="empty">
-              <type>bool</type>
-              <returns><simpara><code>N==0</code></simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </method>
-            <method name="max_size">
-              <type>size_type</type>
-              <returns><simpara><code>N</code></simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </method>
-          </method-group>
-
-          <method-group name="element access">
-            <overloaded-method name="operator[]">
-              <signature>
-                <type>reference</type>
-                <parameter name="i">
-                  <paramtype>size_type</paramtype>
-                </parameter>
-              </signature>
-
-              <signature cv="const">
-                <type>const_reference</type>
-                <parameter name="i">
-                  <paramtype>size_type</paramtype>
-                </parameter>
-              </signature>
-
-              <requires><simpara><code>i &lt; N</code></simpara></requires>
-              <returns><simpara>element with index <code>i</code></simpara></returns>
-              <throws><simpara>will not throw.</simpara></throws>
-            </overloaded-method>
-
-            <overloaded-method name="at">
-              <signature>
-                <type>reference</type>
-                <parameter name="i">
-                  <paramtype>size_type</paramtype>
-                </parameter>
-              </signature>
-
-              <signature cv="const">
-                <type>const_reference</type>
-                <parameter name="i">
-                  <paramtype>size_type</paramtype>
-                </parameter>
-              </signature>
-
-              <returns><simpara>element with index <code>i</code></simpara></returns>
-              <throws><simpara><code><classname>std::range_error</classname></code> if <code>i &gt;= N</code></simpara></throws>
-            </overloaded-method>
-
-            <overloaded-method name="front">
-              <signature>
-                <type>reference</type>
-              </signature>
-              <signature cv="const">
-                <type>const_reference</type>
-              </signature>
-              <requires><simpara><code>N &gt; 0</code></simpara></requires>
-              <returns><simpara>the first element</simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </overloaded-method>
-
-            <overloaded-method name="back">
-              <signature>
-                <type>reference</type>
-              </signature>
-              <signature cv="const">
-                <type>const_reference</type>
-              </signature>
-              <requires><simpara><code>N &gt; 0</code></simpara></requires>
-              <returns><simpara>the last element</simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </overloaded-method>
-
-            <method name="data" cv="const">
-              <type>const T*</type>
-              <returns><simpara><code>elems</code></simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </method>
-
-           <method name="c_array">
-              <type>T*</type>
-              <returns><simpara><code>elems</code></simpara></returns>
-              <throws><simpara>will not throw</simpara></throws>
-            </method>
-          </method-group>
-
-          <method-group name="modifiers">
-            <method name="swap">
-              <type>void</type>
-              <parameter name="other">
-                <paramtype><classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-              <effects><simpara><code>std::swap_ranges(<methodname>begin</methodname>(), <methodname>end</methodname>(), other.<methodname>begin</methodname>())</code></simpara></effects>
-              <complexity><simpara>linear in <code>N</code></simpara></complexity>
-            </method>
-            <method name="assign">
-              <type>void</type>
-              <parameter name="value">
-                <paramtype>const T&amp;</paramtype>
-              </parameter>
-              <effects><simpara><code>std::fill_n(<methodname>begin</methodname>(), N, value)</code></simpara></effects>
-            </method>
-          </method-group>
-
-          <data-member name="elems[N]"> <!-- HACK -->
-            <type>T</type>
-          </data-member>
-
-          <free-function-group name="specialized algorithms">
-            <function name="swap"> 
-              <template>
-                <template-type-parameter name="T"/>
-                <template-nontype-parameter name="N">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-              </template>
-
-              <type>void</type>
-              
-              <parameter name="x">
-                <paramtype><classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-              <parameter name="y">
-                <paramtype><classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-
-              <effects><simpara><code>x.<methodname>swap</methodname>(y)</code></simpara></effects>
-              <throws><simpara>will not throw.</simpara></throws>
-            </function>
-          </free-function-group>
-
-          <free-function-group name="comparisons">
-            <function name="operator==">
-              <template>
-                <template-type-parameter name="T"/>
-                <template-nontype-parameter name="N">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-              </template>
-
-              <type>bool</type>
-              
-              <parameter name="x">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-              <parameter name="y">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-
-              <returns><simpara><code>std::equal(x.<methodname>begin</methodname>(), x.<methodname>end</methodname>(), y.<methodname>begin</methodname>())</code></simpara>
-              </returns>
-            </function>
- 
-            <function name="operator!=">
-              <template>
-                <template-type-parameter name="T"/>
-                <template-nontype-parameter name="N">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-              </template>
-
-              <type>bool</type>
-              
-              <parameter name="x">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-              <parameter name="y">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-
-              <returns><simpara><code>!(x == y)</code></simpara>
-              </returns>
-            </function>
-
-            <function name="operator&lt;">
-              <template>
-                <template-type-parameter name="T"/>
-                <template-nontype-parameter name="N">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-              </template>
-
-              <type>bool</type>
-              
-              <parameter name="x">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-              <parameter name="y">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-
-              <returns><simpara><code>std::lexicographical_compare(x.<methodname>begin</methodname>(), x.<methodname>end</methodname>(), y.<methodname>begin</methodname>(), y.<methodname>end</methodname>())</code></simpara>
-              </returns>
-            </function>
-
-            <function name="operator&gt;">
-              <template>
-                <template-type-parameter name="T"/>
-                <template-nontype-parameter name="N">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-              </template>
-
-              <type>bool</type>
-              
-              <parameter name="x">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-              <parameter name="y">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-
-              <returns><simpara><code>y &lt; x</code></simpara></returns>
-            </function>
-
-            <function name="operator&lt;=">
-              <template>
-                <template-type-parameter name="T"/>
-                <template-nontype-parameter name="N">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-              </template>
-
-              <type>bool</type>
-              
-              <parameter name="x">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-              <parameter name="y">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-
-              <returns><simpara><code>!(y &lt; x)</code></simpara></returns>
-            </function>
-
-            <function name="operator&gt;=">
-              <template>
-                <template-type-parameter name="T"/>
-                <template-nontype-parameter name="N">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-              </template>
-
-              <type>bool</type>
-              
-              <parameter name="x">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-              <parameter name="y">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-
-              <returns><simpara><code>!(x &lt; y)</code></simpara></returns>
-            </function>
-          </free-function-group>
-            
-          <free-function-group name="specializations">
-            <function name="boost::get">
-              <template>
-                <template-type-parameter name="T"/>
-                <template-nontype-parameter name="N">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-                <template-nontype-parameter name="Idx">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-              </template>
-
-              <type>T</type>
-              
-              <parameter name="arr">
-                <paramtype><classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-              <returns><simpara>element of array with index <code>Idx</code></simpara></returns>
-              <effects><simpara>Will <code>static_assert</code> if <code>Idx >= N</code></simpara></effects>
-            </function>
-
-            <function name="boost::get">
-              <template>
-                <template-type-parameter name="T"/>
-                <template-nontype-parameter name="N">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-                <template-nontype-parameter name="Idx">
-                  <type>std::size_t</type>
-                </template-nontype-parameter>
-              </template>
-
-              <type>T</type>
-              
-              <parameter name="arr">
-                <paramtype>const <classname>array</classname>&lt;T, N&gt;&amp;</paramtype>
-              </parameter>
-              <returns><simpara>const element of array with index <code>Idx</code></simpara></returns>
-              <effects><simpara>Will <code>static_assert</code> if <code>Idx >= N</code></simpara></effects>
-            </function>
-          </free-function-group>
-
-        </class>
-      </namespace>
-    </header>
-  </library-reference>
-
-<section id="array.rationale">
-  <title>Design Rationale</title>
-
-  <para>There was an important design tradeoff regarding the
-  constructors: We could implement array as an "aggregate" (see
-  Section 8.5.1, [dcl.init.aggr], of the C++ Standard). This would
-  mean:
-    <itemizedlist>
-      <listitem><simpara>An array can be initialized with a
-      brace-enclosing, comma-separated list of initializers for the
-      elements of the container, written in increasing subscript
-      order:</simpara>
-
-      <programlisting><classname>boost::array</classname>&lt;int,4&gt; a = { { 1, 2, 3 } };</programlisting>
-
-      <simpara>Note that if there are fewer elements in the
-      initializer list, then each remaining element gets
-      default-initialized (thus, it has a defined value).</simpara>
-  </listitem></itemizedlist></para>
-
-  <para>However, this approach has its drawbacks: <emphasis
-  role="bold"> passing no initializer list means that the elements
-  have an indetermined initial value</emphasis>, because the rule says
-  that aggregates may have:
-    <itemizedlist>
-      <listitem><simpara>No user-declared constructors.</simpara></listitem>
-      <listitem><simpara>No private or protected non-static data members.</simpara></listitem>
-      <listitem><simpara>No base classes.</simpara></listitem>
-      <listitem><simpara>No virtual functions.</simpara></listitem>
-    </itemizedlist>
-  </para>
-
-  <para>Nevertheless, The current implementation uses this approach.</para>
-
-  <para>Note that for standard conforming compilers it is possible to
-  use fewer braces (according to 8.5.1 (11) of the Standard). That is,
-  you can initialize an array as follows:</para>
-
-<programlisting>
-<classname>boost::array</classname>&lt;int,4&gt; a = { 1, 2, 3 };
-</programlisting>
-
-  <para>I'd appreciate any constructive feedback. <emphasis
-  role="bold">Please note: I don't have time to read all boost
-  mails. Thus, to make sure that feedback arrives to me, please send
-  me a copy of each mail regarding this class.</emphasis></para>
-
-  <para>The code is provided "as is" without expressed or implied
-  warranty.</para>
-
-</section>
-
-<section id="array.more.info">
-  <title>For more information...</title>
-  <para>To find more details about using ordinary arrays in C++ and
-  the framework of the STL, see e.g.
-
-    <literallayout>The C++ Standard Library - A Tutorial and Reference
-by Nicolai M. Josuttis
-Addison Wesley Longman, 1999
-ISBN 0-201-37926-0</literallayout>
-   </para>
-
-  <para><ulink url="http://www.josuttis.com/">Home Page of Nicolai
-  Josuttis</ulink></para>
-</section>
-
-<section id="array.ack">
-  <title>Acknowledgements</title>
-  
-  <para>Doug Gregor ported the documentation to the BoostBook format.</para>
-</section>
-
-<!-- Notes:
-   empty() should return N != 0
-   size(), empty(), max_size() should be const
-   -->
-
-</library>
diff --git a/third_party/boostorg/array/include/boost/array.hpp b/third_party/boostorg/array/include/boost/array.hpp
deleted file mode 100644
index 210c072..0000000
--- a/third_party/boostorg/array/include/boost/array.hpp
+++ /dev/null
@@ -1,457 +0,0 @@
-/* The following code declares class array,
- * an STL container (as wrapper) for arrays of constant size.
- *
- * See
- *      http://www.boost.org/libs/array/
- * for documentation.
- *
- * The original author site is at: http://www.josuttis.com/
- *
- * (C) Copyright Nicolai M. Josuttis 2001.
- *
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- *  9 Jan 2013 - (mtc) Added constexpr
- * 14 Apr 2012 - (mtc) Added support for boost::hash
- * 28 Dec 2010 - (mtc) Added cbegin and cend (and crbegin and crend) for C++Ox compatibility.
- * 10 Mar 2010 - (mtc) fill method added, matching resolution of the standard library working group.
- *      See <http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#776> or Trac issue #3168
- *      Eventually, we should remove "assign" which is now a synonym for "fill" (Marshall Clow)
- * 10 Mar 2010 - added workaround for SUNCC and !STLPort [trac #3893] (Marshall Clow)
- * 29 Jan 2004 - c_array() added, BOOST_NO_PRIVATE_IN_AGGREGATE removed (Nico Josuttis)
- * 23 Aug 2002 - fix for Non-MSVC compilers combined with MSVC libraries.
- * 05 Aug 2001 - minor update (Nico Josuttis)
- * 20 Jan 2001 - STLport fix (Beman Dawes)
- * 29 Sep 2000 - Initial Revision (Nico Josuttis)
- *
- * Jan 29, 2004
- */
-#ifndef BOOST_ARRAY_HPP
-#define BOOST_ARRAY_HPP
-
-#include <boost/detail/workaround.hpp>
-
-#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400)  
-# pragma warning(push)  
-# pragma warning(disable:4996) // 'std::equal': Function call with parameters that may be unsafe
-# pragma warning(disable:4510) // boost::array<T,N>' : default constructor could not be generated 
-# pragma warning(disable:4610) // warning C4610: class 'boost::array<T,N>' can never be instantiated - user defined constructor required 
-#endif
-
-#include <cstddef>
-#include <stdexcept>
-#include <boost/assert.hpp>
-#include <boost/static_assert.hpp>
-#include <boost/swap.hpp>
-
-// Handles broken standard libraries better than <iterator>
-#include <boost/detail/iterator.hpp>
-#include <boost/throw_exception.hpp>
-#include <algorithm>
-
-// FIXES for broken compilers
-#include <boost/config.hpp>
-
-
-namespace boost {
-
-    template<class T, std::size_t N>
-    class array {
-      public:
-        T elems[N];    // fixed-size array of elements of type T
-
-      public:
-        // type definitions
-        typedef T              value_type;
-        typedef T*             iterator;
-        typedef const T*       const_iterator;
-        typedef T&             reference;
-        typedef const T&       const_reference;
-        typedef std::size_t    size_type;
-        typedef std::ptrdiff_t difference_type;
-
-        // iterator support
-        iterator        begin()       { return elems; }
-        const_iterator  begin() const { return elems; }
-        const_iterator cbegin() const { return elems; }
-        
-        iterator        end()       { return elems+N; }
-        const_iterator  end() const { return elems+N; }
-        const_iterator cend() const { return elems+N; }
-
-        // reverse iterator support
-#if !defined(BOOST_MSVC_STD_ITERATOR) && !defined(BOOST_NO_STD_ITERATOR_TRAITS)
-        typedef std::reverse_iterator<iterator> reverse_iterator;
-        typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
-#elif defined(_RWSTD_NO_CLASS_PARTIAL_SPEC) 
-        typedef std::reverse_iterator<iterator, std::random_access_iterator_tag, 
-              value_type, reference, iterator, difference_type> reverse_iterator; 
-        typedef std::reverse_iterator<const_iterator, std::random_access_iterator_tag,
-              value_type, const_reference, const_iterator, difference_type> const_reverse_iterator;
-#else
-        // workaround for broken reverse_iterator implementations
-        typedef std::reverse_iterator<iterator,T> reverse_iterator;
-        typedef std::reverse_iterator<const_iterator,T> const_reverse_iterator;
-#endif
-
-        reverse_iterator rbegin() { return reverse_iterator(end()); }
-        const_reverse_iterator rbegin() const {
-            return const_reverse_iterator(end());
-        }
-        const_reverse_iterator crbegin() const {
-            return const_reverse_iterator(end());
-        }
-
-        reverse_iterator rend() { return reverse_iterator(begin()); }
-        const_reverse_iterator rend() const {
-            return const_reverse_iterator(begin());
-        }
-        const_reverse_iterator crend() const {
-            return const_reverse_iterator(begin());
-        }
-
-        // operator[]
-        reference operator[](size_type i) 
-        { 
-            return BOOST_ASSERT_MSG( i < N, "out of range" ), elems[i]; 
-        }
-        
-        /*BOOST_CONSTEXPR*/ const_reference operator[](size_type i) const 
-        {     
-            return BOOST_ASSERT_MSG( i < N, "out of range" ), elems[i]; 
-        }
-
-        // at() with range check
-        reference                           at(size_type i)       { return rangecheck(i), elems[i]; }
-        /*BOOST_CONSTEXPR*/ const_reference at(size_type i) const { return rangecheck(i), elems[i]; }
-    
-        // front() and back()
-        reference front() 
-        { 
-            return elems[0]; 
-        }
-        
-        BOOST_CONSTEXPR const_reference front() const 
-        {
-            return elems[0];
-        }
-        
-        reference back() 
-        { 
-            return elems[N-1]; 
-        }
-        
-        BOOST_CONSTEXPR const_reference back() const 
-        { 
-            return elems[N-1]; 
-        }
-
-        // size is constant
-        static BOOST_CONSTEXPR size_type size() { return N; }
-        static BOOST_CONSTEXPR bool empty() { return false; }
-        static BOOST_CONSTEXPR size_type max_size() { return N; }
-        enum { static_size = N };
-
-        // swap (note: linear complexity)
-        void swap (array<T,N>& y) {
-            for (size_type i = 0; i < N; ++i)
-                boost::swap(elems[i],y.elems[i]);
-        }
-
-        // direct access to data (read-only)
-        const T* data() const { return elems; }
-        T* data() { return elems; }
-
-        // use array as C array (direct read/write access to data)
-        T* c_array() { return elems; }
-
-        // assignment with type conversion
-        template <typename T2>
-        array<T,N>& operator= (const array<T2,N>& rhs) {
-            std::copy(rhs.begin(),rhs.end(), begin());
-            return *this;
-        }
-
-        // assign one value to all elements
-        void assign (const T& value) { fill ( value ); }    // A synonym for fill
-        void fill   (const T& value)
-        {
-            std::fill_n(begin(),size(),value);
-        }
-
-        // check range (may be private because it is static)
-        static BOOST_CONSTEXPR bool rangecheck (size_type i) {
-            return i > size() ? boost::throw_exception(std::out_of_range ("array<>: index out of range")), true : true;
-        }
-
-    };
-
-    template< class T >
-    class array< T, 0 > {
-
-      public:
-        // type definitions
-        typedef T              value_type;
-        typedef T*             iterator;
-        typedef const T*       const_iterator;
-        typedef T&             reference;
-        typedef const T&       const_reference;
-        typedef std::size_t    size_type;
-        typedef std::ptrdiff_t difference_type;
-
-        // iterator support
-        iterator        begin()       { return       iterator( reinterpret_cast<       T * >( this ) ); }
-        const_iterator  begin() const { return const_iterator( reinterpret_cast< const T * >( this ) ); }
-        const_iterator cbegin() const { return const_iterator( reinterpret_cast< const T * >( this ) ); }
-
-        iterator        end()       { return  begin(); }
-        const_iterator  end() const { return  begin(); }
-        const_iterator cend() const { return cbegin(); }
-
-        // reverse iterator support
-#if !defined(BOOST_MSVC_STD_ITERATOR) && !defined(BOOST_NO_STD_ITERATOR_TRAITS)
-        typedef std::reverse_iterator<iterator> reverse_iterator;
-        typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
-#elif defined(_RWSTD_NO_CLASS_PARTIAL_SPEC) 
-        typedef std::reverse_iterator<iterator, std::random_access_iterator_tag, 
-              value_type, reference, iterator, difference_type> reverse_iterator; 
-        typedef std::reverse_iterator<const_iterator, std::random_access_iterator_tag,
-              value_type, const_reference, const_iterator, difference_type> const_reverse_iterator;
-#else
-        // workaround for broken reverse_iterator implementations
-        typedef std::reverse_iterator<iterator,T> reverse_iterator;
-        typedef std::reverse_iterator<const_iterator,T> const_reverse_iterator;
-#endif
-
-        reverse_iterator rbegin() { return reverse_iterator(end()); }
-        const_reverse_iterator rbegin() const {
-            return const_reverse_iterator(end());
-        }
-        const_reverse_iterator crbegin() const {
-            return const_reverse_iterator(end());
-        }
-
-        reverse_iterator rend() { return reverse_iterator(begin()); }
-        const_reverse_iterator rend() const {
-            return const_reverse_iterator(begin());
-        }
-        const_reverse_iterator crend() const {
-            return const_reverse_iterator(begin());
-        }
-
-        // operator[]
-        reference operator[](size_type /*i*/)
-        {
-            return failed_rangecheck();
-        }
-
-        /*BOOST_CONSTEXPR*/ const_reference operator[](size_type /*i*/) const
-        {
-            return failed_rangecheck();
-        }
-
-        // at() with range check
-        reference at(size_type /*i*/)               {   return failed_rangecheck(); }
-        /*BOOST_CONSTEXPR*/ const_reference at(size_type /*i*/) const   { return failed_rangecheck(); }
-
-        // front() and back()
-        reference front()
-        {
-            return failed_rangecheck();
-        }
-
-        BOOST_CONSTEXPR const_reference front() const
-        {
-            return failed_rangecheck();
-        }
-
-        reference back()
-        {
-            return failed_rangecheck();
-        }
-
-        BOOST_CONSTEXPR const_reference back() const
-        {
-            return failed_rangecheck();
-        }
-
-        // size is constant
-        static BOOST_CONSTEXPR size_type size() { return 0; }
-        static BOOST_CONSTEXPR bool empty() { return true; }
-        static BOOST_CONSTEXPR size_type max_size() { return 0; }
-        enum { static_size = 0 };
-
-        void swap (array<T,0>& /*y*/) {
-        }
-
-        // direct access to data (read-only)
-        const T* data() const { return 0; }
-        T* data() { return 0; }
-
-        // use array as C array (direct read/write access to data)
-        T* c_array() { return 0; }
-
-        // assignment with type conversion
-        template <typename T2>
-        array<T,0>& operator= (const array<T2,0>& ) {
-            return *this;
-        }
-
-        // assign one value to all elements
-        void assign (const T& value) { fill ( value ); }
-        void fill   (const T& ) {}
-        
-        // check range (may be private because it is static)
-        static reference failed_rangecheck () {
-                std::out_of_range e("attempt to access element of an empty array");
-                boost::throw_exception(e);
-#if defined(BOOST_NO_EXCEPTIONS) || (!defined(BOOST_MSVC) && !defined(__PATHSCALE__))
-                //
-                // We need to return something here to keep
-                // some compilers happy: however we will never
-                // actually get here....
-                //
-                static T placeholder;
-                return placeholder;
-#endif
-            }
-    };
-
-    // comparisons
-    template<class T, std::size_t N>
-    bool operator== (const array<T,N>& x, const array<T,N>& y) {
-        return std::equal(x.begin(), x.end(), y.begin());
-    }
-    template<class T, std::size_t N>
-    bool operator< (const array<T,N>& x, const array<T,N>& y) {
-        return std::lexicographical_compare(x.begin(),x.end(),y.begin(),y.end());
-    }
-    template<class T, std::size_t N>
-    bool operator!= (const array<T,N>& x, const array<T,N>& y) {
-        return !(x==y);
-    }
-    template<class T, std::size_t N>
-    bool operator> (const array<T,N>& x, const array<T,N>& y) {
-        return y<x;
-    }
-    template<class T, std::size_t N>
-    bool operator<= (const array<T,N>& x, const array<T,N>& y) {
-        return !(y<x);
-    }
-    template<class T, std::size_t N>
-    bool operator>= (const array<T,N>& x, const array<T,N>& y) {
-        return !(x<y);
-    }
-
-    // global swap()
-    template<class T, std::size_t N>
-    inline void swap (array<T,N>& x, array<T,N>& y) {
-        x.swap(y);
-    }
-
-#if defined(__SUNPRO_CC)
-//  Trac ticket #4757; the Sun Solaris compiler can't handle
-//  syntax like 'T(&get_c_array(boost::array<T,N>& arg))[N]'
-//  
-//  We can't just use this for all compilers, because the 
-//      borland compilers can't handle this form. 
-    namespace detail {
-       template <typename T, std::size_t N> struct c_array
-       {
-           typedef T type[N];
-       };
-    }
-    
-   // Specific for boost::array: simply returns its elems data member.
-   template <typename T, std::size_t N>
-   typename detail::c_array<T,N>::type& get_c_array(boost::array<T,N>& arg)
-   {
-       return arg.elems;
-   }
-
-   // Specific for boost::array: simply returns its elems data member.
-   template <typename T, std::size_t N>
-   typename detail::c_array<T,N>::type const& get_c_array(const boost::array<T,N>& arg)
-   {
-       return arg.elems;
-   }
-#else
-// Specific for boost::array: simply returns its elems data member.
-    template <typename T, std::size_t N>
-    T(&get_c_array(boost::array<T,N>& arg))[N]
-    {
-        return arg.elems;
-    }
-    
-    // Const version.
-    template <typename T, std::size_t N>
-    const T(&get_c_array(const boost::array<T,N>& arg))[N]
-    {
-        return arg.elems;
-    }
-#endif
-    
-#if 0
-    // Overload for std::array, assuming that std::array will have
-    // explicit conversion functions as discussed at the WG21 meeting
-    // in Summit, March 2009.
-    template <typename T, std::size_t N>
-    T(&get_c_array(std::array<T,N>& arg))[N]
-    {
-        return static_cast<T(&)[N]>(arg);
-    }
-    
-    // Const version.
-    template <typename T, std::size_t N>
-    const T(&get_c_array(const std::array<T,N>& arg))[N]
-    {
-        return static_cast<T(&)[N]>(arg);
-    }
-#endif
-
-    template <class It> std::size_t hash_range(It, It);
-
-    template<class T, std::size_t N>
-    std::size_t hash_value(const array<T,N>& arr)
-    {
-        return boost::hash_range(arr.begin(), arr.end());
-    }
-
-   template <size_t Idx, typename T, size_t N>
-   T &get(boost::array<T,N> &arr) BOOST_NOEXCEPT {
-       BOOST_STATIC_ASSERT_MSG ( Idx < N, "boost::get<>(boost::array &) index out of range" );
-       return arr[Idx];
-       }
-    
-   template <size_t Idx, typename T, size_t N>
-   const T &get(const boost::array<T,N> &arr) BOOST_NOEXCEPT {
-       BOOST_STATIC_ASSERT_MSG ( Idx < N, "boost::get<>(const boost::array &) index out of range" );
-       return arr[Idx];
-       }
-
-} /* namespace boost */
-
-#ifndef BOOST_NO_CXX11_HDR_ARRAY
-//  If we don't have std::array, I'm assuming that we don't have std::get
-namespace std {
-   template <size_t Idx, typename T, size_t N>
-   T &get(boost::array<T,N> &arr) BOOST_NOEXCEPT {
-       BOOST_STATIC_ASSERT_MSG ( Idx < N, "std::get<>(boost::array &) index out of range" );
-       return arr[Idx];
-       }
-    
-   template <size_t Idx, typename T, size_t N>
-   const T &get(const boost::array<T,N> &arr) BOOST_NOEXCEPT {
-       BOOST_STATIC_ASSERT_MSG ( Idx < N, "std::get<>(const boost::array &) index out of range" );
-       return arr[Idx];
-       }
-}
-#endif
-
-#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400)  
-# pragma warning(pop)  
-#endif 
-
-#endif /*BOOST_ARRAY_HPP*/
diff --git a/third_party/boostorg/array/index.html b/third_party/boostorg/array/index.html
deleted file mode 100644
index a9e3c34..0000000
--- a/third_party/boostorg/array/index.html
+++ /dev/null
@@ -1,13 +0,0 @@
-<html>
-<head>
-<meta http-equiv="refresh" content="0; URL=../../doc/html/array.html">
-</head>
-<body>
-Automatic redirection failed, please go to
-<a href="../../doc/html/array.html">../../doc/html/array.html</a> &nbsp;<hr>
-<p>© Copyright Beman Dawes, 2001</p>
-<p>Distributed under the Boost Software License, Version 1.0. (See accompanying 
-file <a href="../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or copy 
-at <a href="http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</p>
-</body>
-</html>
\ No newline at end of file
diff --git a/third_party/boostorg/array/meta/libraries.json b/third_party/boostorg/array/meta/libraries.json
deleted file mode 100644
index 7710ee2..0000000
--- a/third_party/boostorg/array/meta/libraries.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-    "key": "array",
-    "name": "Array",
-    "authors": [
-        "Nicolai Josuttis"
-    ],
-    "description": "STL compliant container wrapper for arrays of constant size.",
-    "std": [
-        "tr1"
-    ],
-    "category": [
-        "Containers"
-    ],
-    "maintainers": [
-        "Marshall Clow <marshall -at- idio.com>"
-    ]
-}
diff --git a/third_party/boostorg/array/test/Jamfile.v2 b/third_party/boostorg/array/test/Jamfile.v2
deleted file mode 100644
index 1a04d91..0000000
--- a/third_party/boostorg/array/test/Jamfile.v2
+++ /dev/null
@@ -1,25 +0,0 @@
-#~ Copyright Rene Rivera 2008
-#~ Distributed under the Boost Software License, Version 1.0.
-#~ (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-import testing ;
-
-alias unit_test_framework
-    : # sources
-        /boost//unit_test_framework
-    ;        
-
-test-suite array :
-    [ run array0.cpp unit_test_framework  : : : : array0 ]
-    [ run array1.cpp ]
-    [ run array2.cpp ]
-    [ run array3.cpp ]
-    [ run array4.cpp ]
-    [ run array5.cpp ]
-    [ run array6.cpp unit_test_framework  : : : : array6 ]
-    [ run array7.cpp unit_test_framework  : : : : array7 ]
-#    [ run array_constexpr.cpp unit_test_framework  : : : : array_constexpr ]
-    [ compile-fail array_getfail1.cpp ]
-    [ compile-fail array_getfail2.cpp ]
-    [ run array_hash.cpp unit_test_framework  : : : : array_hash ]
-    ;
diff --git a/third_party/boostorg/array/test/array0.cpp b/third_party/boostorg/array/test/array0.cpp
deleted file mode 100644
index c1c047e..0000000
--- a/third_party/boostorg/array/test/array0.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/* tests for using class array<> specialization for size 0
- * (C) Copyright Alisdair Meredith 2006.
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#include <string>
-#include <iostream>
-#include <boost/array.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace {
-
-template< class T >
-void    BadValue( const T &  )
-{
-    BOOST_CHECK ( false );
-}
-
-template< class T >
-void    RunTests()
-{
-    typedef boost::array< T, 0 >    test_type;
-
-    //  Test value and aggegrate initialization
-    test_type                   test_case   =   {};
-    const boost::array< T, 0 >  const_test_case = test_type();
-
-    test_case.fill ( T() );
-
-    //  front/back and operator[] must compile, but calling them is undefined
-    //  Likewise, all tests below should evaluate to false, avoiding undefined behaviour
-    BOOST_CHECK (       test_case.empty());
-    BOOST_CHECK ( const_test_case.empty());
-
-    BOOST_CHECK (       test_case.size() == 0 );
-    BOOST_CHECK ( const_test_case.size() == 0 );
-
-    //  Assert requirements of TR1 6.2.2.4
-    BOOST_CHECK ( test_case.begin()  == test_case.end());
-    BOOST_CHECK ( test_case.cbegin() == test_case.cend());
-    BOOST_CHECK ( const_test_case.begin() == const_test_case.end());
-    BOOST_CHECK ( const_test_case.cbegin() == const_test_case.cend());
-
-    BOOST_CHECK ( test_case.begin() != const_test_case.begin() );
-    if( test_case.data() == const_test_case.data() ) {
-    //  Value of data is unspecified in TR1, so no requirement this test pass or fail
-    //  However, it must compile!
-    }
-
-    //  Check can safely use all iterator types with std algorithms
-    std::for_each( test_case.begin(), test_case.end(), BadValue< T > );
-    std::for_each( test_case.rbegin(), test_case.rend(), BadValue< T > );
-    std::for_each( test_case.cbegin(), test_case.cend(), BadValue< T > );
-    std::for_each( const_test_case.begin(), const_test_case.end(), BadValue< T > );
-    std::for_each( const_test_case.rbegin(), const_test_case.rend(), BadValue< T > );
-    std::for_each( const_test_case.cbegin(), const_test_case.cend(), BadValue< T > );
-
-    //  Check swap is well formed
-    std::swap( test_case, test_case );
-
-    //  Check assignment operator and overloads are well formed
-    test_case   =   const_test_case;
-
-    //  Confirm at() throws the std lib defined exception
-    try {
-        BadValue( test_case.at( 0 ));
-    } catch ( const std::out_of_range & ) {
-    }
-
-    try {
-        BadValue( const_test_case.at( 0 ) );
-    } catch ( const std::out_of_range & ) {
-    }
-}
-
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    RunTests< bool >();
-    RunTests< void * >();
-    RunTests< long double >();
-    RunTests< std::string >();
-}
-
diff --git a/third_party/boostorg/array/test/array1.cpp b/third_party/boostorg/array/test/array1.cpp
deleted file mode 100644
index 740968f..0000000
--- a/third_party/boostorg/array/test/array1.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/* simple example for using class array<>
- *
- * (C) Copyright Nicolai M. Josuttis 2001.
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- * 
- * Changelog:
- * 20 Jan 2001 - Removed boolalpha use since stock GCC doesn't support it
- *               (David Abrahams)
- */
-
-#include <iostream>
-#include <boost/array.hpp>
-
-int main()
-{
-    // define special type name
-    typedef boost::array<float,6> Array;
-
-    // create and initialize an array
-    Array a = { { 42 } };
-
-    // access elements
-    for (unsigned i=1; i<a.size(); ++i) {
-        a[i] = a[i-1]+1;
-    }
-
-    // use some common STL container operations
-    std::cout << "size:     " << a.size() << std::endl;
-    std::cout << "empty:    " << (a.empty() ? "true" : "false") << std::endl;
-    std::cout << "max_size: " << a.max_size() << std::endl;
-    std::cout << "front:    " << a.front() << std::endl;
-    std::cout << "back:     " << a.back() << std::endl;
-    std::cout << "elems:    ";
-
-    // iterate through all elements
-    for (Array::const_iterator pos=a.begin(); pos<a.end(); ++pos) {
-        std::cout << *pos << ' ';
-    }
-    std::cout << std::endl;
-
-    // check copy constructor and assignment operator
-    Array b(a);
-    Array c;
-    c = a;
-    if (a==b && a==c) {
-        std::cout << "copy construction and copy assignment are OK"
-                  << std::endl;
-    }
-    else {
-        std::cout << "copy construction and copy assignment FAILED"
-                  << std::endl;
-    }
-
-    return 0;  // makes Visual-C++ compiler happy
-}
-
diff --git a/third_party/boostorg/array/test/array2.cpp b/third_party/boostorg/array/test/array2.cpp
deleted file mode 100644
index b33e0b5..0000000
--- a/third_party/boostorg/array/test/array2.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/* example for using class array<>
- * (C) Copyright Nicolai M. Josuttis 2001.
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#ifndef _SCL_SECURE_NO_WARNINGS
-// Suppress warnings from the std lib:
-#  define _SCL_SECURE_NO_WARNINGS
-#endif
-
-#include <algorithm>
-#include <functional>
-#include <boost/array.hpp>
-#include "print.hpp"
-using namespace std;
-
-int main()
-{
-    // create and initialize array
-    boost::array<int,10> a = { { 1, 2, 3, 4, 5 } };
-
-    print_elements(a);
-
-    // modify elements directly
-    for (unsigned i=0; i<a.size(); ++i) {
-        ++a[i];
-    }
-    print_elements(a);
-
-    // change order using an STL algorithm
-    reverse(a.begin(),a.end());
-    print_elements(a);
-
-    // negate elements using STL framework
-    transform(a.begin(),a.end(),    // source
-              a.begin(),            // destination
-              negate<int>());       // operation
-    print_elements(a);
-
-    return 0;  // makes Visual-C++ compiler happy
-}
-
diff --git a/third_party/boostorg/array/test/array3.cpp b/third_party/boostorg/array/test/array3.cpp
deleted file mode 100644
index 29aacb1..0000000
--- a/third_party/boostorg/array/test/array3.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-/* example for using class array<>
- * (C) Copyright Nicolai M. Josuttis 2001.
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#include <string>
-#include <iostream>
-#include <boost/array.hpp>
-
-template <class T>
-void print_elements (const T& x);
-
-int main()
-{
-    // create array of four seasons
-    boost::array<std::string,4> seasons = {
-        { "spring", "summer", "autumn", "winter" }
-    };
-
-    // copy and change order
-    boost::array<std::string,4> seasons_orig = seasons;
-    for (std::size_t i=seasons.size()-1; i>0; --i) {
-        std::swap(seasons.at(i),seasons.at((i+1)%seasons.size()));
-    }
-
-    std::cout << "one way:   ";
-    print_elements(seasons);
-
-    // try swap()
-    std::cout << "other way: ";
-    std::swap(seasons,seasons_orig);
-    print_elements(seasons);
-
-    // try reverse iterators
-    std::cout << "reverse:   ";
-    for (boost::array<std::string,4>::reverse_iterator pos
-           =seasons.rbegin(); pos<seasons.rend(); ++pos) {
-        std::cout << " " << *pos;
-    }
-
-    // try constant reverse iterators
-    std::cout << "reverse:   ";
-    for (boost::array<std::string,4>::const_reverse_iterator pos
-           =seasons.crbegin(); pos<seasons.crend(); ++pos) {
-        std::cout << " " << *pos;
-    }
-    std::cout << std::endl;
-
-    return 0;  // makes Visual-C++ compiler happy
-}
-
-template <class T>
-void print_elements (const T& x)
-{
-    for (unsigned i=0; i<x.size(); ++i) {
-        std::cout << " " << x[i];
-    }
-    std::cout << std::endl;
-}
-
diff --git a/third_party/boostorg/array/test/array4.cpp b/third_party/boostorg/array/test/array4.cpp
deleted file mode 100644
index 33b9819..0000000
--- a/third_party/boostorg/array/test/array4.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/* example for using class array<>
- * (C) Copyright Nicolai M. Josuttis 2001.
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#include <algorithm>
-#include <functional>
-#include <string>
-#include <iostream>
-#include <boost/array.hpp>
-
-int main()
-{
-    // array of arrays of seasons
-    boost::array<boost::array<std::string,4>,2> seasons_i18n = {
-        { { { "spring", "summer", "autumn", "winter", } },
-          { { "Fruehling", "Sommer", "Herbst", "Winter" } }
-        }
-    };
-
-    // for any array of seasons print seasons
-    for (unsigned i=0; i<seasons_i18n.size(); ++i) {
-        boost::array<std::string,4> seasons = seasons_i18n[i];
-        for (unsigned j=0; j<seasons.size(); ++j) {
-            std::cout << seasons[j] << " ";
-        }
-        std::cout << std::endl;
-    }
-
-    // print first element of first array
-    std::cout << "first element of first array: "
-              << seasons_i18n[0][0] << std::endl;
-
-    // print last element of last array
-    std::cout << "last element of last array: "
-              << seasons_i18n[seasons_i18n.size()-1][seasons_i18n[0].size()-1]
-              << std::endl;
-
-    return 0;  // makes Visual-C++ compiler happy
-}
-
diff --git a/third_party/boostorg/array/test/array5.cpp b/third_party/boostorg/array/test/array5.cpp
deleted file mode 100644
index 23156b9..0000000
--- a/third_party/boostorg/array/test/array5.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-/* simple example for using class array<>
- * (C) Copyright Nicolai M. Josuttis 2001.
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#include <iostream>
-#include <boost/array.hpp>
-
-template <typename T>
-void test_static_size (const T& cont)
-{
-    int tmp[T::static_size];
-    for (unsigned i=0; i<T::static_size; ++i) {
-        tmp[i] = int(cont[i]);
-    }
-    for (unsigned j=0; j<T::static_size; ++j) {
-        std::cout << tmp[j] << ' ';
-    }
-    std::cout << std::endl;
-}
-
-int main()
-{
-    // define special type name
-    typedef boost::array<float,6> Array;
-
-    // create and initialize an array
-    const Array a = { { 42.42f } };
-
-    // use some common STL container operations
-    std::cout << "static_size: " << a.size() << std::endl;
-    std::cout << "size:        " << a.size() << std::endl;
-    // Can't use std::boolalpha because it isn't portable
-    std::cout << "empty:       " << (a.empty()? "true" : "false") << std::endl;
-    std::cout << "max_size:    " << a.max_size() << std::endl;
-    std::cout << "front:       " << a.front() << std::endl;
-    std::cout << "back:        " << a.back() << std::endl;
-    std::cout << "[0]:         " << a[0] << std::endl;
-    std::cout << "elems:       ";
-
-    // iterate through all elements
-    for (Array::const_iterator pos=a.begin(); pos<a.end(); ++pos) {
-        std::cout << *pos << ' ';
-    }
-    std::cout << std::endl;
-    test_static_size(a);
-
-    // check copy constructor and assignment operator
-    Array b(a);
-    Array c;
-    c = a;
-    if (a==b && a==c) {
-        std::cout << "copy construction and copy assignment are OK"
-                  << std::endl;
-    }
-    else {
-        std::cout << "copy construction and copy assignment are BROKEN"
-                  << std::endl;
-    }
-
-    typedef boost::array<double,6> DArray;
-    typedef boost::array<int,6> IArray;
-    IArray ia = { { 1, 2, 3, 4, 5, 6 } } ; // extra braces silence GCC warning
-    DArray da;
-    da = ia;
-    da.assign(42);
-
-    return 0;  // makes Visual-C++ compiler happy
-}
-
diff --git a/third_party/boostorg/array/test/array6.cpp b/third_party/boostorg/array/test/array6.cpp
deleted file mode 100644
index 3d737fd..0000000
--- a/third_party/boostorg/array/test/array6.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/* tests for using class array<> specialization for size 0
- * (C) Copyright Alisdair Meredith 2006.
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#include <string>
-#include <iostream>
-#include <boost/array.hpp>
-#include <algorithm>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace {
-    template< class T >
-    void    RunTests()
-    {
-        typedef boost::array< T, 5 >    test_type;
-        typedef T arr[5];
-        test_type           test_case; //   =   { 1, 1, 2, 3, 5 };
-    
-        arr &aRef = get_c_array ( test_case );
-        BOOST_CHECK ( &*test_case.begin () == &aRef[0] );
-        
-        const arr &caRef = get_c_array ( test_case );
-        typename test_type::const_iterator iter = test_case.begin ();
-        BOOST_CHECK ( &*iter == &caRef[0] );
-    }
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    RunTests< bool >();
-    RunTests< void * >();
-    RunTests< long double >();
-    RunTests< std::string >();
-}
-
diff --git a/third_party/boostorg/array/test/array7.cpp b/third_party/boostorg/array/test/array7.cpp
deleted file mode 100644
index de2ebe0..0000000
--- a/third_party/boostorg/array/test/array7.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-/* tests using std::get on boost:array
- * (C) Copyright Marshall Clow 2012
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#include <string>
-#include <iostream>
-#include <boost/array.hpp>
-#include <algorithm>
-#ifndef BOOST_NO_CXX11_HDR_ARRAY
-#include <array>
-#endif
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace {
-
-    #ifndef BOOST_NO_CXX11_HDR_ARRAY
-    template< class T >
-    void    RunStdTests()
-    {
-        typedef boost::array< T, 5 >    test_type;
-        typedef T arr[5];
-        test_type           test_case; //   =   { 1, 1, 2, 3, 5 };
-    
-        T &aRef = std::get<0> ( test_case );
-        BOOST_CHECK ( &*test_case.begin () == &aRef );
-        
-        const T &caRef = std::get<0> ( test_case );
-        BOOST_CHECK ( &*test_case.cbegin () == &caRef );
-    }
-    #endif
-
-    template< class T >
-    void    RunBoostTests()
-    {
-        typedef boost::array< T, 5 >    test_type;
-        typedef T arr[5];
-        test_type           test_case; //   =   { 1, 1, 2, 3, 5 };
-    
-        T &aRef = boost::get<0> ( test_case );
-        BOOST_CHECK ( &*test_case.begin () == &aRef );
-        
-        const T &caRef = boost::get<0> ( test_case );
-        BOOST_CHECK ( &*test_case.cbegin () == &caRef );
-    }
-
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    RunBoostTests< bool >();
-    RunBoostTests< void * >();
-    RunBoostTests< long double >();
-    RunBoostTests< std::string >();
-
-#ifndef BOOST_NO_CXX11_HDR_ARRAY
-    RunStdTests< bool >();
-    RunStdTests< void * >();
-    RunStdTests< long double >();
-    RunStdTests< std::string >();
-#endif
-}
-
diff --git a/third_party/boostorg/array/test/array_constexpr.cpp b/third_party/boostorg/array/test/array_constexpr.cpp
deleted file mode 100644
index 927bdec..0000000
--- a/third_party/boostorg/array/test/array_constexpr.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-/* tests using constexpr on boost:array
- * (C) Copyright Marshall Clow 2012
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#include <string>
-#include <iostream>
-#include <boost/array.hpp>
-#include <algorithm>
-#ifndef BOOST_NO_CXX11_HDR_ARRAY
-#include <array>
-#endif
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-#ifndef BOOST_NO_CXX11_CONSTEXPR
-constexpr boost::array<int, 10> arr  {{ 0,1,2,3,4,5,6,7,8,9 }};
-constexpr std::array<int, 10> arr_std {{ 0,1,2,3,4,5,6,7,8,9 }};
-
-template <typename T>
-void sink ( T t ) {}
-
-template <typename T, size_t N>
-void sink ( boost::array<T,N> &arr ) {}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-//    constexpr int two = arr_std.at (2);
-    constexpr int three = arr.at (3);
-    int whatever [ arr.at(4) ];
-    (void)three;
-    (void) whatever;
-}
-
-#else   // no constexpr means no constexpr tests!
-BOOST_AUTO_TEST_CASE( test_main )
-{
-}
-#endif
diff --git a/third_party/boostorg/array/test/array_getfail1.cpp b/third_party/boostorg/array/test/array_getfail1.cpp
deleted file mode 100644
index 21ae62f..0000000
--- a/third_party/boostorg/array/test/array_getfail1.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-/* tests using std::get on boost:array
- * (C) Copyright Marshall Clow 2012
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#include <boost/array.hpp>
-#include <boost/static_assert.hpp>
-
-
-#include <string>
-#include <iostream>
-#include <algorithm>
-#ifndef BOOST_NO_CXX11_HDR_ARRAY
-#include <array>
-#endif
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace {
-
-    #ifndef BOOST_NO_CXX11_HDR_ARRAY
-    template< class T >
-    void    RunStdTests()
-    {
-        typedef boost::array< T, 5 >    test_type;
-        typedef T arr[5];
-        test_type           test_case; //   =   { 1, 1, 2, 3, 5 };
-    
-        T &aRef = std::get<5> ( test_case );    // should fail to compile
-        BOOST_CHECK ( &*test_case.begin () == &aRef );
-    }
-    #endif
-
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-#ifndef BOOST_NO_CXX11_HDR_ARRAY
-    RunStdTests< bool >();
-    RunStdTests< void * >();
-    RunStdTests< long double >();
-    RunStdTests< std::string >();
-#else
-    BOOST_STATIC_ASSERT ( false );  // fail on C++03 systems.
-#endif
-}
diff --git a/third_party/boostorg/array/test/array_getfail2.cpp b/third_party/boostorg/array/test/array_getfail2.cpp
deleted file mode 100644
index e2277b0..0000000
--- a/third_party/boostorg/array/test/array_getfail2.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/* tests using std::get on boost:array
- * (C) Copyright Marshall Clow 2012
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#include <string>
-#include <iostream>
-#include <boost/array.hpp>
-#include <algorithm>
-#ifndef BOOST_NO_CXX11_HDR_ARRAY
-#include <array>
-#endif
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace {
-
-    #ifndef BOOST_NO_CXX11_HDR_ARRAY
-    template< class T >
-    void    RunStdTests()
-    {
-        typedef boost::array< T, 5 >    test_type;
-        typedef T arr[5];
-        test_type           test_case; //   =   { 1, 1, 2, 3, 5 };
-    
-        T &aRef = std::get<0> ( test_case );
-        BOOST_CHECK ( &*test_case.begin () == &aRef );
-        
-        const T &caRef = std::get<0> ( test_case );
-        BOOST_CHECK ( &*test_case.cbegin () == &caRef );
-    }
-    #endif
-
-    template< class T >
-    void    RunBoostTests()
-    {
-        typedef boost::array< T, 5 >    test_type;
-        typedef T arr[5];
-        test_type           test_case; //   =   { 1, 1, 2, 3, 5 };
-    
-        T &aRef = boost::get<5> ( test_case );
-        BOOST_CHECK ( &*test_case.begin () == &aRef );
-    }
-
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    RunBoostTests< bool >();
-    RunBoostTests< void * >();
-    RunBoostTests< long double >();
-    RunBoostTests< std::string >();
-
-#ifndef BOOST_NO_CXX11_HDR_ARRAY
-    RunStdTests< bool >();
-    RunStdTests< void * >();
-    RunStdTests< long double >();
-    RunStdTests< std::string >();
-#endif
-}
-
diff --git a/third_party/boostorg/array/test/array_hash.cpp b/third_party/boostorg/array/test/array_hash.cpp
deleted file mode 100644
index a83eead..0000000
--- a/third_party/boostorg/array/test/array_hash.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/* tests for using boost::hash with boost::array
- * (C) Copyright Marshall Clow 2012
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-
-#include <string>
-#include <iostream>
-#include <boost/array.hpp>
-#include <algorithm>
-#include <boost/functional/hash.hpp>
-
-#define BOOST_TEST_MAIN
-#include <boost/test/unit_test.hpp>
-
-namespace {
-
-    template< class T >
-    void    RunTests()
-    {
-    //    std::size_t hash0 = boost::hash<boost::array<T,0> > () ( boost::array<T, 0> ());
-    //    std::size_t hash1 = boost::hash<boost::array<T,1> > () ( boost::array<T, 1> ());
-    
-        typedef boost::array< T, 5 >    barr;
-        typedef T arr[5];
-        barr           test_barr =   {{ 1, 1, 2, 3, 5 }};
-        arr            test_arr  =    { 1, 1, 2, 3, 5 };
-    
-        std::size_t bhash = boost::hash<barr> () ( test_barr );
-        std::size_t ahash = boost::hash<arr>  () ( test_arr );
-        BOOST_CHECK ( ahash == bhash );
-    }
-
-}
-
-BOOST_AUTO_TEST_CASE( test_main )
-{
-    RunTests< int >();
-    RunTests< long >();
-    RunTests< long double >();
-}
-
diff --git a/third_party/boostorg/array/test/print.hpp b/third_party/boostorg/array/test/print.hpp
deleted file mode 100644
index 6d68e8e..0000000
--- a/third_party/boostorg/array/test/print.hpp
+++ /dev/null
@@ -1,27 +0,0 @@
-/* The following code example is taken from the book
- * "The C++ Standard Library - A Tutorial and Reference"
- * by Nicolai M. Josuttis, Addison-Wesley, 1999
- *
- * (C) Copyright Nicolai M. Josuttis 1999.
- * Distributed under the Boost Software License, Version 1.0. (See
- * accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-#include <iostream>
-
-/* print_elements()
- * - prints optional C-string optcstr followed by
- * - all elements of the collection coll
- * - separated by spaces
- */
-template <class T>
-inline void print_elements (const T& coll, const char* optcstr="")
-{
-    typename T::const_iterator pos;
-
-    std::cout << optcstr;
-    for (pos=coll.begin(); pos!=coll.end(); ++pos) {
-        std::cout << *pos << ' ';
-    }
-    std::cout << std::endl;
-}
diff --git a/third_party/boostorg/assert/.gitattributes b/third_party/boostorg/assert/.gitattributes
deleted file mode 100644
index 3e84d7c..0000000
--- a/third_party/boostorg/assert/.gitattributes
+++ /dev/null
@@ -1,96 +0,0 @@
-* text=auto !eol svneol=native#text/plain
-*.gitattributes text svneol=native#text/plain
-
-# Scriptish formats
-*.bat        text svneol=native#text/plain
-*.bsh        text svneol=native#text/x-beanshell
-*.cgi        text svneol=native#text/plain
-*.cmd        text svneol=native#text/plain
-*.js         text svneol=native#text/javascript
-*.php        text svneol=native#text/x-php
-*.pl         text svneol=native#text/x-perl
-*.pm         text svneol=native#text/x-perl
-*.py         text svneol=native#text/x-python
-*.sh         eol=lf svneol=LF#text/x-sh
-configure    eol=lf svneol=LF#text/x-sh
-
-# Image formats
-*.bmp        binary svneol=unset#image/bmp
-*.gif        binary svneol=unset#image/gif
-*.ico        binary svneol=unset#image/ico
-*.jpeg       binary svneol=unset#image/jpeg
-*.jpg        binary svneol=unset#image/jpeg
-*.png        binary svneol=unset#image/png
-*.tif        binary svneol=unset#image/tiff
-*.tiff       binary svneol=unset#image/tiff
-*.svg        text svneol=native#image/svg%2Bxml
-
-# Data formats
-*.pdf        binary svneol=unset#application/pdf
-*.avi        binary svneol=unset#video/avi
-*.doc        binary svneol=unset#application/msword
-*.dsp        text svneol=crlf#text/plain
-*.dsw        text svneol=crlf#text/plain
-*.eps        binary svneol=unset#application/postscript
-*.gz         binary svneol=unset#application/gzip
-*.mov        binary svneol=unset#video/quicktime
-*.mp3        binary svneol=unset#audio/mpeg
-*.ppt        binary svneol=unset#application/vnd.ms-powerpoint
-*.ps         binary svneol=unset#application/postscript
-*.psd        binary svneol=unset#application/photoshop
-*.rdf        binary svneol=unset#text/rdf
-*.rss        text svneol=unset#text/xml
-*.rtf        binary svneol=unset#text/rtf
-*.sln        text svneol=native#text/plain
-*.swf        binary svneol=unset#application/x-shockwave-flash
-*.tgz        binary svneol=unset#application/gzip
-*.vcproj     text svneol=native#text/xml
-*.vcxproj    text svneol=native#text/xml
-*.vsprops    text svneol=native#text/xml
-*.wav        binary svneol=unset#audio/wav
-*.xls        binary svneol=unset#application/vnd.ms-excel
-*.zip        binary svneol=unset#application/zip
-
-# Text formats
-.htaccess    text svneol=native#text/plain
-*.bbk        text svneol=native#text/xml
-*.cmake      text svneol=native#text/plain
-*.css        text svneol=native#text/css
-*.dtd        text svneol=native#text/xml
-*.htm        text svneol=native#text/html
-*.html       text svneol=native#text/html
-*.ini        text svneol=native#text/plain
-*.log        text svneol=native#text/plain
-*.mak        text svneol=native#text/plain
-*.qbk        text svneol=native#text/plain
-*.rst        text svneol=native#text/plain
-*.sql        text svneol=native#text/x-sql
-*.txt        text svneol=native#text/plain
-*.xhtml      text svneol=native#text/xhtml%2Bxml
-*.xml        text svneol=native#text/xml
-*.xsd        text svneol=native#text/xml
-*.xsl        text svneol=native#text/xml
-*.xslt       text svneol=native#text/xml
-*.xul        text svneol=native#text/xul
-*.yml        text svneol=native#text/plain
-boost-no-inspect text svneol=native#text/plain
-CHANGES      text svneol=native#text/plain
-COPYING      text svneol=native#text/plain
-INSTALL      text svneol=native#text/plain
-Jamfile      text svneol=native#text/plain
-Jamroot      text svneol=native#text/plain
-Jamfile.v2   text svneol=native#text/plain
-Jamrules     text svneol=native#text/plain
-Makefile*    text svneol=native#text/plain
-README       text svneol=native#text/plain
-TODO         text svneol=native#text/plain
-
-# Code formats
-*.c          text svneol=native#text/plain
-*.cpp        text svneol=native#text/plain
-*.h          text svneol=native#text/plain
-*.hpp        text svneol=native#text/plain
-*.ipp        text svneol=native#text/plain
-*.tpp        text svneol=native#text/plain
-*.jam        text svneol=native#text/plain
-*.java       text svneol=native#text/plain
diff --git a/third_party/boostorg/assert/.travis.yml b/third_party/boostorg/assert/.travis.yml
deleted file mode 100644
index 2b1e13f..0000000
--- a/third_party/boostorg/assert/.travis.yml
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2016, 2017 Peter Dimov
-# Distributed under the Boost Software License, Version 1.0.
-# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt)
-
-language: cpp
-
-sudo: false
-
-branches:
-  only:
-    - master
-    - develop
-    - /feature\/.*/
-
-env:
-  matrix:
-    - BOGUS_JOB=true
-
-matrix:
-
-  exclude:
-    - env: BOGUS_JOB=true
-
-  include:
-    - os: linux
-      compiler: g++
-      env: TOOLSET=gcc CXXSTD=03,11
-
-    - os: linux
-      compiler: clang++
-      env: TOOLSET=clang CXXSTD=03,11,14,1z
-
-    - os: osx
-      compiler: clang++
-      env: TOOLSET=clang CXXSTD=03,11,14,1z
-
-install:
-  - BOOST_BRANCH=develop && [ "$TRAVIS_BRANCH" == "master" ] && BOOST_BRANCH=master || true
-  - cd ..
-  - git clone -b $BOOST_BRANCH https://github.com/boostorg/boost.git boost-root
-  - cd boost-root
-  - git submodule update --init tools/build
-  - git submodule update --init libs/config
-  - git submodule update --init libs/core
-  - cp -r $TRAVIS_BUILD_DIR/* libs/assert
-  - ./bootstrap.sh
-  - ./b2 headers
-
-script:
-  - ./b2 libs/assert/test toolset=$TOOLSET cxxstd=$CXXSTD
-
-notifications:
-  email:
-    on_success: always
diff --git a/third_party/boostorg/assert/BUILD b/third_party/boostorg/assert/BUILD
deleted file mode 100644
index db94e30..0000000
--- a/third_party/boostorg/assert/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-licenses(["notice"])  # boost
-
-cc_library(
-    name = "assert",
-    hdrs = glob(["include/**"]),
-    includes = ["include"],
-    target_compatible_with = ["@platforms//os:linux"],
-    visibility = ["//visibility:public"],
-)
diff --git a/third_party/boostorg/assert/README.md b/third_party/boostorg/assert/README.md
deleted file mode 100644
index 645b7bb..0000000
--- a/third_party/boostorg/assert/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Boost.Assert
-
-The Boost.Assert library, part of the collection of [Boost C++ Libraries](http://github.com/boostorg),
-provides several configurable diagnostic macros similar in behavior and purpose to the standard macro
-`assert` from `<cassert>`.
-
-## Documentation
-
-See the documentation of [BOOST_ASSERT](doc/assert.adoc) and
-[BOOST_CURRENT_FUNCTION](doc/current_function.adoc) for more information.
-
-## License
-
-Distributed under the [Boost Software License, Version 1.0](http://boost.org/LICENSE_1_0.txt).
diff --git a/third_party/boostorg/assert/appveyor.yml b/third_party/boostorg/assert/appveyor.yml
deleted file mode 100644
index 5a75678..0000000
--- a/third_party/boostorg/assert/appveyor.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2016 Peter Dimov
-# Distributed under the Boost Software License, Version 1.0.
-# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt)
-
-version: 1.0.{build}-{branch}
-
-shallow_clone: true
-
-branches:
-  only:
-    - master
-    - develop
-
-install:
-  - cd ..
-  - git clone -b %APPVEYOR_REPO_BRANCH% https://github.com/boostorg/boost.git boost-root
-  - cd boost-root
-  - git submodule init libs/config
-  - git submodule init libs/core
-  - git submodule init tools/build
-  - git submodule update
-  - xcopy /s /e /q %APPVEYOR_BUILD_FOLDER% libs\assert
-  - bootstrap
-  - b2 headers
-
-build: off
-
-test_script:
-  - b2 libs/assert/test toolset=msvc-9.0,msvc-10.0,msvc-11.0,msvc-14.0
diff --git a/third_party/boostorg/assert/doc/.gitignore b/third_party/boostorg/assert/doc/.gitignore
deleted file mode 100644
index 0972e2d..0000000
--- a/third_party/boostorg/assert/doc/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/html/
-/pdf/
diff --git a/third_party/boostorg/assert/doc/Jamfile b/third_party/boostorg/assert/doc/Jamfile
deleted file mode 100644
index d4a4802..0000000
--- a/third_party/boostorg/assert/doc/Jamfile
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2017 Peter Dimov
-# 
-# Distributed under the Boost Software License, Version 1.0.
-# (See accompanying file LICENSE_1_0.txt or copy at
-# http://www.boost.org/LICENSE_1_0.txt)
-
-project doc/assert ;
-
-import asciidoctor ;
-# import boostbook ;
-
-html assert.html : index.adoc ;
-
-# docbook assert.docbook : index.adoc ;
-# boostbook assert.html : assert.docbook : <format>onehtml ;
-
-install html_ : assert.html : <location>html ;
-
-pdf assert.pdf : index.adoc ;
-explicit assert.pdf ;
-
-install pdf_ : assert.pdf : <location>pdf ;
-explicit pdf_ ;
-
-###############################################################################
-alias boostdoc ;
-explicit boostdoc ;
-alias boostrelease : html_ ;
-explicit boostrelease ;
diff --git a/third_party/boostorg/assert/doc/assert.adoc b/third_party/boostorg/assert/doc/assert.adoc
deleted file mode 100644
index 5a7347c..0000000
--- a/third_party/boostorg/assert/doc/assert.adoc
+++ /dev/null
@@ -1,165 +0,0 @@
-////
-Copyright 2002, 2007, 2014, 2017 Peter Dimov
-Copyright 2011 Beman Dawes
-Copyright 2015 Ion Gaztañaga
-
-Distributed under the Boost Software License, Version 1.0.
-
-See accompanying file LICENSE_1_0.txt or copy at
-http://www.boost.org/LICENSE_1_0.txt
-////
-
-# Assertion Macros, <boost/assert.hpp>
-:toc:
-:toc-title:
-:idprefix:
-
-## BOOST_ASSERT
-
-The header `<boost/assert.hpp>` defines the macro `BOOST_ASSERT`,
-which is similar to the standard `assert` macro defined in `<cassert>`.
-The macro is intended to be used in both Boost libraries and user
-code.
-
-* By default, `BOOST_ASSERT(expr)` expands to `assert(expr)`.
-
-* If the macro `BOOST_DISABLE_ASSERTS` is defined when `<boost/assert.hpp>`
-  is included, `BOOST_ASSERT(expr)` expands to `((void)0)`, regardless of whether
-  the macro `NDEBUG` is defined. This allows users to selectively disable `BOOST_ASSERT` without 
-  affecting the definition of the standard `assert`.
-
-* If the macro `BOOST_ENABLE_ASSERT_HANDLER` is defined when `<boost/assert.hpp>`
-is included, `BOOST_ASSERT(expr)` expands to
-+
-```
-(BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed(#expr,
-    BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
-```
-+
-That is, it evaluates `expr` and if it's false, calls
-`::boost::assertion_failed(#expr, <<current_function.adoc#boost_current_function,BOOST_CURRENT_FUNCTION>>, \\__FILE__, \\__LINE__)`.
-This is true regardless of whether `NDEBUG` is defined.
-+
-`boost::assertion_failed` is declared in `<boost/assert.hpp>` as
-+
-```
-namespace boost
-{
-    void assertion_failed(char const * expr, char const * function,
-        char const * file, long line);
-}
-```
-+
-but it is never defined. The user is expected to supply an appropriate definition.
-
-* If the macro `BOOST_ENABLE_ASSERT_DEBUG_HANDLER` is defined when `<boost/assert.hpp>`
-is included, `BOOST_ASSERT(expr)` expands to `((void)0)` when `NDEBUG` is
-defined. Otherwise the behavior is as if `BOOST_ENABLE_ASSERT_HANDLER` has been defined.
-
-As is the case with `<cassert>`, `<boost/assert.hpp>`
-can be included multiple times in a single translation unit. `BOOST_ASSERT`
-will be redefined each time as specified above.
-
-## BOOST_ASSERT_MSG
-
-The macro `BOOST_ASSERT_MSG` is similar to `BOOST_ASSERT`, but it takes an additional argument,
-a character literal, supplying an error message.
-
-* By default, `BOOST_ASSERT_MSG(expr,msg)` expands to `assert\((expr)&&(msg))`.
-
-* If the macro `BOOST_DISABLE_ASSERTS` is defined when `<boost/assert.hpp>`
-is included, `BOOST_ASSERT_MSG(expr,msg)` expands to `((void)0)`, regardless of whether
-the macro `NDEBUG` is defined.
-
-* If the macro `BOOST_ENABLE_ASSERT_HANDLER` is defined when `<boost/assert.hpp>`
-is included, `BOOST_ASSERT_MSG(expr,msg)` expands to
-+
-```
-(BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed_msg(#expr,
-    msg, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
-```
-+
-This is true regardless of whether `NDEBUG` is defined.
-+
-`boost::assertion_failed_msg` is declared in `<boost/assert.hpp>` as
-+
-```
-namespace boost
-{
-    void assertion_failed_msg(char const * expr, char const * msg,
-        char const * function, char const * file, long line);
-}
-```
-+
-but it is never defined. The user is expected to supply an appropriate definition.
-
-* If the macro `BOOST_ENABLE_ASSERT_DEBUG_HANDLER` is defined when `<boost/assert.hpp>`
-is included, `BOOST_ASSERT_MSG(expr)` expands to `((void)0)` when `NDEBUG` is
-defined. Otherwise the behavior is as if `BOOST_ENABLE_ASSERT_HANDLER` has been defined.
-
-As is the case with `<cassert>`, `<boost/assert.hpp>`
-can be included multiple times in a single translation unit. `BOOST_ASSERT_MSG`
-will be redefined each time as specified above.
-
-## BOOST_VERIFY
-
-The macro `BOOST_VERIFY` has the same behavior as `BOOST_ASSERT`, except that 
-the expression that is passed to `BOOST_VERIFY` is always 
-evaluated. This is useful when the asserted expression has desirable side 
-effects; it can also help suppress warnings about unused variables when the 
-only use of the variable is inside an assertion.
-
-* If the macro `BOOST_DISABLE_ASSERTS` is defined when `<boost/assert.hpp>`
-  is included, `BOOST_VERIFY(expr)` expands to `\((void)(expr))`.
-
-* If the macro `BOOST_ENABLE_ASSERT_HANDLER` is defined when `<boost/assert.hpp>`
-  is included, `BOOST_VERIFY(expr)` expands to `BOOST_ASSERT(expr)`.
-
-* Otherwise, `BOOST_VERIFY(expr)` expands to `\((void)(expr))` when `NDEBUG` is
-  defined, to `BOOST_ASSERT(expr)` when it's not.
-
-## BOOST_VERIFY_MSG
-
-The macro `BOOST_VERIFY_MSG` is similar to `BOOST_VERIFY`, with an additional parameter, an error message.
-
-* If the macro `BOOST_DISABLE_ASSERTS` is defined when `<boost/assert.hpp>`
-  is included, `BOOST_VERIFY_MSG(expr,msg)` expands to `\((void)(expr))`.
-
-* If the macro `BOOST_ENABLE_ASSERT_HANDLER` is defined when `<boost/assert.hpp>`
-  is included, `BOOST_VERIFY_MSG(expr,msg)` expands to `BOOST_ASSERT_MSG(expr,msg)`.
-
-* Otherwise, `BOOST_VERIFY_MSG(expr,msg)` expands to `\((void)(expr))` when `NDEBUG` is
-  defined, to `BOOST_ASSERT_MSG(expr,msg)` when it's not.
-
-## BOOST_ASSERT_IS_VOID
-
-The macro `BOOST_ASSERT_IS_VOID` is defined when `BOOST_ASSERT` and `BOOST_ASSERT_MSG` are expanded to `((void)0)`.
-Its purpose is to avoid compiling and potentially running code that is only intended to prepare data to be used in the assertion.
-
-```
-void MyContainer::erase(iterator i)
-{
-// Some sanity checks, data must be ordered
-#ifndef BOOST_ASSERT_IS_VOID
-
-    if(i != c.begin()) {
-        iterator prev = i;
-        --prev;
-        BOOST_ASSERT(*prev < *i);
-    }
-    else if(i != c.end()) {
-        iterator next = i;
-        ++next;
-        BOOST_ASSERT(*i < *next);
-    }
-
-#endif
-
-    this->erase_impl(i);
-}
-```      
-
-* By default, `BOOST_ASSERT_IS_VOID` is defined if `NDEBUG` is defined.
-* If the macro `BOOST_DISABLE_ASSERTS` is defined, `BOOST_ASSERT_IS_VOID` is always defined.
-* If the macro `BOOST_ENABLE_ASSERT_HANDLER` is defined, `BOOST_ASSERT_IS_VOID` is never defined.
-* If the macro `BOOST_ENABLE_ASSERT_DEBUG_HANDLER` is defined, then `BOOST_ASSERT_IS_VOID` is defined when `NDEBUG` is defined.
diff --git a/third_party/boostorg/assert/doc/current_function.adoc b/third_party/boostorg/assert/doc/current_function.adoc
deleted file mode 100644
index 9235c1a..0000000
--- a/third_party/boostorg/assert/doc/current_function.adoc
+++ /dev/null
@@ -1,30 +0,0 @@
-////
-Copyright 2002, 2017 Peter Dimov
-
-Distributed under the Boost Software License, Version 1.0.
-
-See accompanying file LICENSE_1_0.txt or copy at
-http://www.boost.org/LICENSE_1_0.txt
-////
-
-# Current Function Macro, <boost/current_function.hpp>
-:toc:
-:toc-title:
-:idprefix:
-
-## BOOST_CURRENT_FUNCTION
-
-The header `<boost/current_function.hpp>` defines a single macro, `BOOST_CURRENT_FUNCTION`,
-similar to the C99 predefined identifier `\\__func__`.
-
-`BOOST_CURRENT_FUNCTION` expands to a string literal containing 
-the (fully qualified, if possible) name of the enclosing function. If there is
-no enclosing function, the behavior is unspecified.
-
-Some compilers do not provide a way to obtain the name of the current enclosing
-function. On such compilers, or when the macro `BOOST_DISABLE_CURRENT_FUNCTION`
-is defined, `BOOST_CURRENT_FUNCTION` expands to `"(unknown)"`.
-
-`BOOST_DISABLE_CURRENT_FUNCTION` addresses a use case in which the programmer
-wishes to eliminate the string literals produced by `BOOST_CURRENT_FUNCTION` from
-the final executable for security reasons.
diff --git a/third_party/boostorg/assert/doc/index-docinfo-footer.html b/third_party/boostorg/assert/doc/index-docinfo-footer.html
deleted file mode 100644
index b27346f..0000000
--- a/third_party/boostorg/assert/doc/index-docinfo-footer.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<style>
-
-*:not(pre)>code { background: none; color: #600000; }
-
-</style>
diff --git a/third_party/boostorg/assert/doc/index.adoc b/third_party/boostorg/assert/doc/index.adoc
deleted file mode 100644
index 6e5026e..0000000
--- a/third_party/boostorg/assert/doc/index.adoc
+++ /dev/null
@@ -1,35 +0,0 @@
-////
-Copyright 2017 Peter Dimov
-
-Distributed under the Boost Software License, Version 1.0.
-
-See accompanying file LICENSE_1_0.txt or copy at
-http://www.boost.org/LICENSE_1_0.txt
-////
-
-# Boost.Assert
-Peter Dimov
-:toc: left
-:idprefix:
-:docinfo: private-footer
-
-The Boost.Assert library provides several configurable diagnostic macros
-similar in behavior and purpose to the standard macro `assert` from `<cassert>`.
-
-:leveloffset: +1
-
-include::assert.adoc[]
-
-include::current_function.adoc[]
-
-:leveloffset: -1
-
-[appendix]
-## Copyright and License
-
-This documentation is
-
-* Copyright 2002, 2007, 2014, 2017 Peter Dimov
-* Copyright 2011 Beman Dawes
-* Copyright 2015 Ion Gaztañaga
-* Distributed under the http://www.boost.org/LICENSE_1_0.txt[Boost Software License, Version 1.0].
diff --git a/third_party/boostorg/assert/include/boost/assert.hpp b/third_party/boostorg/assert/include/boost/assert.hpp
deleted file mode 100644
index 9650d7a..0000000
--- a/third_party/boostorg/assert/include/boost/assert.hpp
+++ /dev/null
@@ -1,85 +0,0 @@
-//
-//  boost/assert.hpp - BOOST_ASSERT(expr)
-//                     BOOST_ASSERT_MSG(expr, msg)
-//                     BOOST_VERIFY(expr)
-//                     BOOST_VERIFY_MSG(expr, msg)
-//                     BOOST_ASSERT_IS_VOID
-//
-//  Copyright (c) 2001, 2002 Peter Dimov and Multi Media Ltd.
-//  Copyright (c) 2007, 2014 Peter Dimov
-//  Copyright (c) Beman Dawes 2011
-//  Copyright (c) 2015 Ion Gaztanaga
-//
-//  Distributed under the Boost Software License, Version 1.0.
-//  See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt
-//
-//  Note: There are no include guards. This is intentional.
-//
-//  See http://www.boost.org/libs/assert/assert.html for documentation.
-//
-
-//
-// Stop inspect complaining about use of 'assert':
-//
-// boostinspect:naassert_macro
-//
-
-//
-// BOOST_ASSERT, BOOST_ASSERT_MSG, BOOST_ASSERT_IS_VOID
-//
-
-#undef BOOST_ASSERT
-#undef BOOST_ASSERT_MSG
-#undef BOOST_ASSERT_IS_VOID
-
-#if defined(BOOST_DISABLE_ASSERTS) || ( defined(BOOST_ENABLE_ASSERT_DEBUG_HANDLER) && defined(NDEBUG) )
-
-# define BOOST_ASSERT(expr) ((void)0)
-# define BOOST_ASSERT_MSG(expr, msg) ((void)0)
-# define BOOST_ASSERT_IS_VOID
-
-#elif defined(BOOST_ENABLE_ASSERT_HANDLER) || ( defined(BOOST_ENABLE_ASSERT_DEBUG_HANDLER) && !defined(NDEBUG) )
-
-#include <boost/config.hpp> // for BOOST_LIKELY
-#include <boost/current_function.hpp>
-
-namespace boost
-{
-    void assertion_failed(char const * expr, char const * function, char const * file, long line); // user defined
-    void assertion_failed_msg(char const * expr, char const * msg, char const * function, char const * file, long line); // user defined
-} // namespace boost
-
-#define BOOST_ASSERT(expr) (BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed(#expr, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
-#define BOOST_ASSERT_MSG(expr, msg) (BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed_msg(#expr, msg, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
-
-#else
-
-# include <assert.h> // .h to support old libraries w/o <cassert> - effect is the same
-
-# define BOOST_ASSERT(expr) assert(expr)
-# define BOOST_ASSERT_MSG(expr, msg) assert((expr)&&(msg))
-#if defined(NDEBUG)
-# define BOOST_ASSERT_IS_VOID
-#endif
-
-#endif
-
-//
-// BOOST_VERIFY, BOOST_VERIFY_MSG
-//
-
-#undef BOOST_VERIFY
-#undef BOOST_VERIFY_MSG
-
-#if defined(BOOST_DISABLE_ASSERTS) || ( !defined(BOOST_ENABLE_ASSERT_HANDLER) && defined(NDEBUG) )
-
-# define BOOST_VERIFY(expr) ((void)(expr))
-# define BOOST_VERIFY_MSG(expr, msg) ((void)(expr))
-
-#else
-
-# define BOOST_VERIFY(expr) BOOST_ASSERT(expr)
-# define BOOST_VERIFY_MSG(expr, msg) BOOST_ASSERT_MSG(expr,msg)
-
-#endif
diff --git a/third_party/boostorg/assert/include/boost/current_function.hpp b/third_party/boostorg/assert/include/boost/current_function.hpp
deleted file mode 100644
index 86955cb..0000000
--- a/third_party/boostorg/assert/include/boost/current_function.hpp
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef BOOST_CURRENT_FUNCTION_HPP_INCLUDED
-#define BOOST_CURRENT_FUNCTION_HPP_INCLUDED
-
-// MS compatible compilers support #pragma once
-
-#if defined(_MSC_VER) && (_MSC_VER >= 1020)
-# pragma once
-#endif
-
-//
-//  boost/current_function.hpp - BOOST_CURRENT_FUNCTION
-//
-//  Copyright (c) 2002 Peter Dimov and Multi Media Ltd.
-//
-//  Distributed under the Boost Software License, Version 1.0.
-//  See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt
-//
-//  http://www.boost.org/libs/assert/current_function.html
-//
-
-namespace boost
-{
-
-namespace detail
-{
-
-inline void current_function_helper()
-{
-
-#if defined( BOOST_DISABLE_CURRENT_FUNCTION )
-
-# define BOOST_CURRENT_FUNCTION "(unknown)"
-
-#elif defined(__GNUC__) || (defined(__MWERKS__) && (__MWERKS__ >= 0x3000)) || (defined(__ICC) && (__ICC >= 600)) || defined(__ghs__)
-
-# define BOOST_CURRENT_FUNCTION __PRETTY_FUNCTION__
-
-#elif defined(__DMC__) && (__DMC__ >= 0x810)
-
-# define BOOST_CURRENT_FUNCTION __PRETTY_FUNCTION__
-
-#elif defined(__FUNCSIG__)
-
-# define BOOST_CURRENT_FUNCTION __FUNCSIG__
-
-#elif (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 600)) || (defined(__IBMCPP__) && (__IBMCPP__ >= 500))
-
-# define BOOST_CURRENT_FUNCTION __FUNCTION__
-
-#elif defined(__BORLANDC__) && (__BORLANDC__ >= 0x550)
-
-# define BOOST_CURRENT_FUNCTION __FUNC__
-
-#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)
-
-# define BOOST_CURRENT_FUNCTION __func__
-
-#elif defined(__cplusplus) && (__cplusplus >= 201103)
-
-# define BOOST_CURRENT_FUNCTION __func__
-
-#else
-
-# define BOOST_CURRENT_FUNCTION "(unknown)"
-
-#endif
-
-}
-
-} // namespace detail
-
-} // namespace boost
-
-#endif // #ifndef BOOST_CURRENT_FUNCTION_HPP_INCLUDED
diff --git a/third_party/boostorg/assert/index.html b/third_party/boostorg/assert/index.html
deleted file mode 100644
index a3c843e..0000000
--- a/third_party/boostorg/assert/index.html
+++ /dev/null
@@ -1,15 +0,0 @@
-<html>
-<head>
-<meta http-equiv="refresh" content="0; URL=doc/html/assert.html">
-</head>
-<body>
-Automatic redirection failed, please go to
-<a href="doc/html/assert.html">doc/html/assert.html</a>.
-</body>
-</html>
-<!--
-	© Copyright Beman Dawes, 2001
-	Distributed under the Boost Software License, Version 1.0.
-	See accompanying file LICENSE_1_0.txt or copy at
-	http://www.boost.org/LICENSE_1_0.txt
--->
diff --git a/third_party/boostorg/assert/meta/libraries.json b/third_party/boostorg/assert/meta/libraries.json
deleted file mode 100644
index 8337796..0000000
--- a/third_party/boostorg/assert/meta/libraries.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-    "key": "assert",
-    "name": "Assert",
-    "authors": [
-        "Peter Dimov"
-    ],
-    "maintainers": [
-        "Peter Dimov <pdimov -at- pdimov.com>"
-    ],
-    "description": "Customizable assert macros.",
-    "category": [
-        "Correctness"
-    ]
-}
diff --git a/third_party/boostorg/assert/test/Jamfile.v2 b/third_party/boostorg/assert/test/Jamfile.v2
deleted file mode 100644
index 0b46665..0000000
--- a/third_party/boostorg/assert/test/Jamfile.v2
+++ /dev/null
@@ -1,26 +0,0 @@
-#  Boost.Assert Library test Jamfile
-#
-#  Copyright (c) 2014, 2017 Peter Dimov
-#
-#  Distributed under the Boost Software License, Version 1.0.
-#  See accompanying file LICENSE_1_0.txt or copy at
-#  http://www.boost.org/LICENSE_1_0.txt
-
-# bring in rules for testing
-import testing ;
-
-run assert_test.cpp ;
-run current_function_test.cpp : : : <test-info>always_show_run_output ;
-run verify_test.cpp ;
-run assert_is_void_test.cpp ;
-
-# expansion tests are in exp/ so that there is a backslash in the path on Windows
-run exp/assert_exp_test.cpp ;
-run exp/assert_msg_exp_test.cpp ;
-run exp/verify_exp_test.cpp ;
-run exp/verify_msg_exp_test.cpp ;
-run assert_test2.cpp ;
-run assert_msg_test2.cpp ;
-
-# quick test (for CI)
-run quick.cpp ;
diff --git a/third_party/boostorg/assert/test/assert_is_void_test.cpp b/third_party/boostorg/assert/test/assert_is_void_test.cpp
deleted file mode 100644
index 5e64a68..0000000
--- a/third_party/boostorg/assert/test/assert_is_void_test.cpp
+++ /dev/null
@@ -1,108 +0,0 @@
-//
-//  assert_is_void_test.cpp - tests BOOST_ASSERT_IS_VOID
-//
-//  Copyright (c) 2015 Ion Gaztanaga
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt
-//
-
-#include <boost/config.hpp>
-
-// default case, !NDEBUG
-// BOOST_ASSERT(x) -> assert(x)
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-#ifdef BOOST_ASSERT_IS_VOID
-#error "BOOST_ASSERT should NOT be void if NDEBUG is not defined"
-#endif
-
-// default case, NDEBUG
-// BOOST_ASSERT(x) -> assert(x)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-#ifndef BOOST_ASSERT_IS_VOID
-#error "Error: BOOST_ASSERT should be void in NDEBUG"
-#endif
-
-// BOOST_DISABLE_ASSERTS, !NDEBUG
-// BOOST_ASSERT(x) -> ((void)0)
-
-#define BOOST_DISABLE_ASSERTS
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-#ifndef BOOST_ASSERT_IS_VOID
-#error "Error: BOOST_ASSERT should be void with BOOST_DISABLE_ASSERTS"
-#endif
-
-// BOOST_DISABLE_ASSERTS, NDEBUG
-// BOOST_ASSERT(x) -> ((void)0)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-#ifndef BOOST_ASSERT_IS_VOID
-#error "Error: BOOST_ASSERT should be void with BOOST_DISABLE_ASSERTS and NDEBUG"
-#endif
-
-#undef BOOST_DISABLE_ASSERTS
-
-// BOOST_ENABLE_ASSERT_HANDLER, !NDEBUG
-// BOOST_ASSERT(expr) -> (BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed(#expr, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
-
-#define BOOST_ENABLE_ASSERT_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-#ifdef BOOST_ASSERT_IS_VOID
-#error "Error: BOOST_ASSERT should NOT be void with BOOST_ENABLE_ASSERT_HANDLER"
-#endif
-
-// BOOST_ENABLE_ASSERT_HANDLER, NDEBUG
-// BOOST_ASSERT(expr) -> (BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed(#expr, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-#ifdef BOOST_ASSERT_IS_VOID
-#error "Error: BOOST_ASSERT should NOT be void with BOOST_ENABLE_ASSERT_HANDLER"
-#endif
-
-#undef BOOST_ENABLE_ASSERT_HANDLER
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, !NDEBUG
-// same as BOOST_ENABLE_ASSERT_HANDLER
-
-#define BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-#ifdef BOOST_ASSERT_IS_VOID
-#error "Error: BOOST_ASSERT should NOT be void with BOOST_ENABLE_ASSERT_DEBUG_HANDLER and !NDEBUG"
-#endif
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, NDEBUG
-// BOOST_ASSERT(x) -> ((void)0)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-#ifndef BOOST_ASSERT_IS_VOID
-#error "Error: BOOST_ASSERT should be void with BOOST_ENABLE_ASSERT_DEBUG_HANDLER and NDEBUG"
-#endif
-
-#undef BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-int main()
-{
-    return 0;
-}
diff --git a/third_party/boostorg/assert/test/assert_msg_test2.cpp b/third_party/boostorg/assert/test/assert_msg_test2.cpp
deleted file mode 100644
index 9d8f8da..0000000
--- a/third_party/boostorg/assert/test/assert_msg_test2.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-//
-//  assert_msg_test2.cpp - a test for BOOST_ASSERT_MSG and NDEBUG
-//
-//  Copyright (c) 2014 Peter Dimov
-//
-//  Distributed under the Boost Software License, Version 1.0.
-//  See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt
-//
-
-#include <boost/detail/lightweight_test.hpp>
-#include <stdio.h>
-
-// default case, !NDEBUG
-// BOOST_ASSERT_MSG(x) -> assert(x)
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_default()
-{
-    int x = 1;
-
-    BOOST_ASSERT_MSG( 1, "msg" );
-    BOOST_ASSERT_MSG( x, "msg" );
-    BOOST_ASSERT_MSG( x == 1, "msg" );
-}
-
-// default case, NDEBUG
-// BOOST_ASSERT_MSG(x) -> assert(x)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_default_ndebug()
-{
-    int x = 1;
-
-    BOOST_ASSERT_MSG( 1, "msg" );
-    BOOST_ASSERT_MSG( x, "msg" );
-    BOOST_ASSERT_MSG( x == 1, "msg" );
-
-    BOOST_ASSERT_MSG( 0, "msg" );
-    BOOST_ASSERT_MSG( !x, "msg" );
-    BOOST_ASSERT_MSG( x == 0, "msg" );
-}
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, !NDEBUG
-// same as BOOST_ENABLE_ASSERT_HANDLER
-
-#define BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-int handler_invoked = 0;
-
-void boost::assertion_failed_msg( char const * expr, char const * msg, char const * function, char const * file, long line )
-{
-    printf( "Expression: %s\nMessage: %s\nFunction: %s\nFile: %s\nLine: %ld\n\n", expr, msg, function, file, line );
-    ++handler_invoked;
-}
-
-void test_debug_handler()
-{
-    handler_invoked = 0;
-
-    int x = 1;
-
-    BOOST_ASSERT_MSG( 1, "msg" );
-    BOOST_ASSERT_MSG( x, "msg" );
-    BOOST_ASSERT_MSG( x == 1, "msg" );
-
-    BOOST_ASSERT_MSG( 0, "msg" );
-    BOOST_ASSERT_MSG( !x, "msg" );
-    BOOST_ASSERT_MSG( x == 0, "msg" );
-
-    BOOST_TEST( handler_invoked == 3 );
-}
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, NDEBUG
-// BOOST_ASSERT_MSG(x) -> ((void)0)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_debug_handler_ndebug()
-{
-    handler_invoked = 0;
-
-    int x = 1;
-
-    BOOST_ASSERT_MSG( 1, "msg" );
-    BOOST_ASSERT_MSG( x, "msg" );
-    BOOST_ASSERT_MSG( x == 1, "msg" );
-
-    BOOST_ASSERT_MSG( 0, "msg" );
-    BOOST_ASSERT_MSG( !x, "msg" );
-    BOOST_ASSERT_MSG( x == 0, "msg" );
-
-    BOOST_TEST( handler_invoked == 0 );
-}
-
-#undef BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-int main()
-{
-    test_default();
-    test_default_ndebug();
-    test_debug_handler();
-    test_debug_handler_ndebug();
-
-    return boost::report_errors();
-}
diff --git a/third_party/boostorg/assert/test/assert_test.cpp b/third_party/boostorg/assert/test/assert_test.cpp
deleted file mode 100644
index 3233828..0000000
--- a/third_party/boostorg/assert/test/assert_test.cpp
+++ /dev/null
@@ -1,153 +0,0 @@
-//
-//  assert_test.cpp - a test for boost/assert.hpp
-//
-//  Copyright (c) 2002 Peter Dimov and Multi Media Ltd.
-//  Copyright (2) Beman Dawes 2011
-//
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-//
-
-#include <boost/detail/lightweight_test.hpp>
-
-#include <boost/assert.hpp>
-
-void test_default()
-{
-    int x = 1;
-
-    BOOST_ASSERT(1);
-    BOOST_ASSERT(x);
-    BOOST_ASSERT(x == 1);
-    BOOST_ASSERT(&x);
-
-    BOOST_ASSERT_MSG(1, "msg");
-    BOOST_ASSERT_MSG(x, "msg");
-    BOOST_ASSERT_MSG(x == 1, "msg");
-    BOOST_ASSERT_MSG(&x, "msg");
-}
-
-#define BOOST_DISABLE_ASSERTS
-#include <boost/assert.hpp>
-
-void test_disabled()
-{
-    int x = 1;
-
-    BOOST_ASSERT(1);
-    BOOST_ASSERT(x);
-    BOOST_ASSERT(x == 1);
-    BOOST_ASSERT(&x);
-
-    BOOST_ASSERT_MSG(1, "msg");
-    BOOST_ASSERT_MSG(x, "msg");
-    BOOST_ASSERT_MSG(x == 1, "msg");
-    BOOST_ASSERT_MSG(&x, "msg");
-
-    BOOST_ASSERT(0);
-    BOOST_ASSERT(!x);
-    BOOST_ASSERT(x == 0);
-
-    BOOST_ASSERT_MSG(0, "msg");
-    BOOST_ASSERT_MSG(!x, "msg");
-    BOOST_ASSERT_MSG(x == 0, "msg");
-
-    void * p = 0;
-
-    BOOST_ASSERT(p);
-    BOOST_ASSERT_MSG(p, "msg");
-
-    // suppress warnings
-    p = &x;
-    p = &p;
-}
-
-#undef BOOST_DISABLE_ASSERTS
-
-#define BOOST_ENABLE_ASSERT_HANDLER
-#include <boost/assert.hpp>
-#include <boost/config.hpp>
-#include <cstdio>
-
-int handler_invoked = 0;
-int msg_handler_invoked = 0;
-
-void boost::assertion_failed(char const * expr, char const * function, char const * file, long line)
-{
-#if !defined(BOOST_NO_STDC_NAMESPACE)
-    using std::printf;
-#endif
-
-    printf("Expression: %s\nFunction: %s\nFile: %s\nLine: %ld\n\n", expr, function, file, line);
-    ++handler_invoked;
-}
-
-void boost::assertion_failed_msg(char const * expr, char const * msg, char const * function,
-  char const * file, long line)
-{
-#if !defined(BOOST_NO_STDC_NAMESPACE)
-    using std::printf;
-#endif
-
-    printf("Expression: %s Message: %s\nFunction: %s\nFile: %s\nLine: %ld\n\n",
-      expr, msg, function, file, line);
-    ++msg_handler_invoked;
-}
-
-struct X
-{
-    static void f()
-    {
-        BOOST_ASSERT(0);
-        BOOST_ASSERT_MSG(0, "msg f()");
-    }
-};
-
-void test_handler()
-{
-    int x = 1;
-
-    BOOST_ASSERT(1);
-    BOOST_ASSERT(x);
-    BOOST_ASSERT(x == 1);
-    BOOST_ASSERT(&x);
-
-    BOOST_ASSERT_MSG(1, "msg2");
-    BOOST_ASSERT_MSG(x, "msg3");
-    BOOST_ASSERT_MSG(x == 1, "msg4");
-    BOOST_ASSERT_MSG(&x, "msg5");
-
-    BOOST_ASSERT(0);
-    BOOST_ASSERT(!x);
-    BOOST_ASSERT(x == 0);
-
-    BOOST_ASSERT_MSG(0,"msg 0");
-    BOOST_ASSERT_MSG(!x, "msg !x");
-    BOOST_ASSERT_MSG(x == 0, "msg x == 0");
-
-    void * p = 0;
-
-    BOOST_ASSERT(p);
-    BOOST_ASSERT_MSG(p, "msg p");
-
-    X::f();
-
-    BOOST_ASSERT(handler_invoked == 5);
-    BOOST_TEST(handler_invoked == 5);
-
-    BOOST_ASSERT_MSG(msg_handler_invoked == 5, "msg_handler_invoked count is wrong");
-    BOOST_TEST(msg_handler_invoked == 5);
-}
-
-#undef BOOST_ENABLE_ASSERT_HANDLER
-#undef BOOST_ENABLE_ASSERT_MSG_HANDLER
-
-int main()
-{
-    test_default();
-    test_disabled();
-    test_handler();
-
-    return boost::report_errors();
-}
diff --git a/third_party/boostorg/assert/test/assert_test2.cpp b/third_party/boostorg/assert/test/assert_test2.cpp
deleted file mode 100644
index 5a8a37f..0000000
--- a/third_party/boostorg/assert/test/assert_test2.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-//
-//  assert_test2.cpp - a test for BOOST_ASSERT and NDEBUG
-//
-//  Copyright (c) 2014 Peter Dimov
-//
-//  Distributed under the Boost Software License, Version 1.0.
-//  See accompanying file LICENSE_1_0.txt or copy at
-//  http://www.boost.org/LICENSE_1_0.txt
-//
-
-#include <boost/detail/lightweight_test.hpp>
-#include <stdio.h>
-
-// default case, !NDEBUG
-// BOOST_ASSERT(x) -> assert(x)
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_default()
-{
-    int x = 1;
-
-    BOOST_ASSERT( 1 );
-    BOOST_ASSERT( x );
-    BOOST_ASSERT( x == 1 );
-}
-
-// default case, NDEBUG
-// BOOST_ASSERT(x) -> assert(x)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_default_ndebug()
-{
-    int x = 1;
-
-    BOOST_ASSERT( 1 );
-    BOOST_ASSERT( x );
-    BOOST_ASSERT( x == 1 );
-
-    BOOST_ASSERT( 0 );
-    BOOST_ASSERT( !x );
-    BOOST_ASSERT( x == 0 );
-}
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, !NDEBUG
-// same as BOOST_ENABLE_ASSERT_HANDLER
-
-#define BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-int handler_invoked = 0;
-
-void boost::assertion_failed( char const * expr, char const * function, char const * file, long line )
-{
-    printf( "Expression: %s\nFunction: %s\nFile: %s\nLine: %ld\n\n", expr, function, file, line );
-    ++handler_invoked;
-}
-
-void test_debug_handler()
-{
-    handler_invoked = 0;
-
-    int x = 1;
-
-    BOOST_ASSERT( 1 );
-    BOOST_ASSERT( x );
-    BOOST_ASSERT( x == 1 );
-
-    BOOST_ASSERT( 0 );
-    BOOST_ASSERT( !x );
-    BOOST_ASSERT( x == 0 );
-
-    BOOST_TEST( handler_invoked == 3 );
-}
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, NDEBUG
-// BOOST_ASSERT(x) -> ((void)0)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_debug_handler_ndebug()
-{
-    handler_invoked = 0;
-
-    int x = 1;
-
-    BOOST_ASSERT( 1 );
-    BOOST_ASSERT( x );
-    BOOST_ASSERT( x == 1 );
-
-    BOOST_ASSERT( 0 );
-    BOOST_ASSERT( !x );
-    BOOST_ASSERT( x == 0 );
-
-    BOOST_TEST( handler_invoked == 0 );
-}
-
-#undef BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-int main()
-{
-    test_default();
-    test_default_ndebug();
-    test_debug_handler();
-    test_debug_handler_ndebug();
-
-    return boost::report_errors();
-}
diff --git a/third_party/boostorg/assert/test/current_function_test.cpp b/third_party/boostorg/assert/test/current_function_test.cpp
deleted file mode 100644
index 1343901..0000000
--- a/third_party/boostorg/assert/test/current_function_test.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-#include <boost/config.hpp>
-
-#if defined(BOOST_MSVC)
-#pragma warning(disable: 4786)  // identifier truncated in debug info
-#pragma warning(disable: 4710)  // function not inlined
-#pragma warning(disable: 4711)  // function selected for automatic inline expansion
-#pragma warning(disable: 4514)  // unreferenced inline removed
-#endif
-
-//
-//  current_function_test.cpp - a test for boost/current_function.hpp
-//
-//  Copyright (c) 2002 Peter Dimov and Multi Media Ltd.
-//
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-//
-
-#include <boost/current_function.hpp>
-#include <boost/config.hpp>
-#include <cstdio>
-
-void message(char const * file, long line, char const * func, char const * msg)
-{
-#if !defined(BOOST_NO_STDC_NAMESPACE)
-    using std::printf;
-#endif
-
-    printf("%s(%ld): %s in function '%s'\n", file, line, msg, func);
-}
-
-#define MESSAGE(msg) message(__FILE__, __LINE__, BOOST_CURRENT_FUNCTION, msg)
-
-int main()
-{
-    MESSAGE("assertion failed");
-
-    return 0;
-}
diff --git a/third_party/boostorg/assert/test/exp/assert_exp_test.cpp b/third_party/boostorg/assert/test/exp/assert_exp_test.cpp
deleted file mode 100644
index c56cdef..0000000
--- a/third_party/boostorg/assert/test/exp/assert_exp_test.cpp
+++ /dev/null
@@ -1,164 +0,0 @@
-//
-//  assert_exp_test.cpp - tests BOOST_ASSERT expansion
-//
-//  Copyright (c) 2014 Peter Dimov
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt
-//
-
-#include <boost/config.hpp>
-#include <boost/current_function.hpp>
-#include <boost/detail/lightweight_test.hpp>
-#include <string>
-
-// Each backslash in __FILE__ when passed through BOOST_STRINGIZE is doubled
-static std::string quote( std::string const & s )
-{
-    std::string r;
-    r.reserve( s.size() );
-
-    for( char const * p = s.c_str(); *p; ++p )
-    {
-        r += *p;
-        if( *p == '\\' ) r += *p;
-    }
-
-    return r;
-}
-
-// default case, !NDEBUG
-// BOOST_ASSERT(x) -> assert(x)
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-#undef assert
-
-void test_default()
-{
-    std::string v1 = BOOST_STRINGIZE(BOOST_ASSERT(x1));
-    BOOST_TEST_EQ( v1, "assert(x1)" );
-}
-
-// default case, NDEBUG
-// BOOST_ASSERT(x) -> assert(x)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-#undef assert
-
-void test_default_ndebug()
-{
-    std::string v2 = BOOST_STRINGIZE(BOOST_ASSERT(x2));
-    BOOST_TEST_EQ( v2, "assert(x2)" );
-}
-
-// BOOST_DISABLE_ASSERTS, !NDEBUG
-// BOOST_ASSERT(x) -> ((void)0)
-
-#define BOOST_DISABLE_ASSERTS
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_disabled()
-{
-    std::string v3 = BOOST_STRINGIZE(BOOST_ASSERT(x3));
-    BOOST_TEST_EQ( v3, "((void)0)" );
-}
-
-// BOOST_DISABLE_ASSERTS, NDEBUG
-// BOOST_ASSERT(x) -> ((void)0)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_disabled_ndebug()
-{
-    std::string v4 = BOOST_STRINGIZE(BOOST_ASSERT(x4));
-    BOOST_TEST_EQ( v4, "((void)0)" );
-}
-
-#undef BOOST_DISABLE_ASSERTS
-
-// BOOST_ENABLE_ASSERT_HANDLER, !NDEBUG
-// BOOST_ASSERT(expr) -> (BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed(#expr, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
-
-#undef BOOST_LIKELY
-#undef BOOST_CURRENT_FUNCTION
-
-#define BOOST_ENABLE_ASSERT_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_handler()
-{
-    std::string v5 = BOOST_STRINGIZE(BOOST_ASSERT(x5)); std::string w5 = "(BOOST_LIKELY(!!(x5))? ((void)0): ::boost::assertion_failed(\"x5\", BOOST_CURRENT_FUNCTION, \"" + quote( __FILE__ ) + "\", " BOOST_STRINGIZE(__LINE__) "))";
-
-    char const * BOOST_CURRENT_FUNCTION = "void test_handler()";
-    BOOST_TEST_EQ( v5, w5 );
-}
-
-// BOOST_ENABLE_ASSERT_HANDLER, NDEBUG
-// BOOST_ASSERT(expr) -> (BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed(#expr, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_handler_ndebug()
-{
-    std::string v6 = BOOST_STRINGIZE(BOOST_ASSERT(x6)); std::string w6 = "(BOOST_LIKELY(!!(x6))? ((void)0): ::boost::assertion_failed(\"x6\", BOOST_CURRENT_FUNCTION, \"" + quote( __FILE__ ) + "\", " BOOST_STRINGIZE(__LINE__) "))";
-
-    char const * BOOST_CURRENT_FUNCTION = "void test_handler_ndebug()";
-    BOOST_TEST_EQ( v6, w6 );
-}
-
-#undef BOOST_ENABLE_ASSERT_HANDLER
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, !NDEBUG
-// same as BOOST_ENABLE_ASSERT_HANDLER
-
-#define BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_debug_handler()
-{
-    std::string v7 = BOOST_STRINGIZE(BOOST_ASSERT(x7)); std::string w7 = "(BOOST_LIKELY(!!(x7))? ((void)0): ::boost::assertion_failed(\"x7\", BOOST_CURRENT_FUNCTION, \"" + quote( __FILE__ ) + "\", " BOOST_STRINGIZE(__LINE__) "))";
-
-    char const * BOOST_CURRENT_FUNCTION = "void test_debug_handler()";
-    BOOST_TEST_EQ( v7, w7 );
-}
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, NDEBUG
-// BOOST_ASSERT(x) -> ((void)0)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_debug_handler_ndebug()
-{
-    std::string v8 = BOOST_STRINGIZE(BOOST_ASSERT(x8));
-
-    char const * BOOST_CURRENT_FUNCTION = "void test_debug_handler_ndebug()";
-    BOOST_TEST_EQ( v8, "((void)0)" );
-}
-
-#undef BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-int main()
-{
-    test_default();
-    test_default_ndebug();
-    test_disabled();
-    test_disabled_ndebug();
-    test_handler();
-    test_handler_ndebug();
-    test_debug_handler();
-    test_debug_handler_ndebug();
-
-    return boost::report_errors();
-}
diff --git a/third_party/boostorg/assert/test/exp/assert_msg_exp_test.cpp b/third_party/boostorg/assert/test/exp/assert_msg_exp_test.cpp
deleted file mode 100644
index faff616..0000000
--- a/third_party/boostorg/assert/test/exp/assert_msg_exp_test.cpp
+++ /dev/null
@@ -1,164 +0,0 @@
-//
-//  assert_msg_exp_test.cpp - tests BOOST_ASSERT_MSG expansion
-//
-//  Copyright (c) 2014 Peter Dimov
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt
-//
-
-#include <boost/config.hpp>
-#include <boost/current_function.hpp>
-#include <boost/detail/lightweight_test.hpp>
-#include <string>
-
-// Each backslash in __FILE__ when passed through BOOST_STRINGIZE is doubled
-static std::string quote( std::string const & s )
-{
-    std::string r;
-    r.reserve( s.size() );
-
-    for( char const * p = s.c_str(); *p; ++p )
-    {
-        r += *p;
-        if( *p == '\\' ) r += *p;
-    }
-
-    return r;
-}
-
-// default case, !NDEBUG
-// BOOST_ASSERT_MSG(x,"m") -> assert((x)&&("m"))
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-#undef assert
-
-void test_default()
-{
-    std::string v1 = BOOST_STRINGIZE(BOOST_ASSERT_MSG(x1, "m1"));
-    BOOST_TEST_EQ( v1, "assert((x1)&&(\"m1\"))" );
-}
-
-// default case, NDEBUG
-// BOOST_ASSERT_MSG(x,"m") -> assert((x)&&("m"))
-
-#define NDEBUG
-#include <boost/assert.hpp>
-#undef assert
-
-void test_default_ndebug()
-{
-    std::string v2 = BOOST_STRINGIZE(BOOST_ASSERT_MSG(x2, "m2"));
-    BOOST_TEST_EQ( v2, "assert((x2)&&(\"m2\"))" );
-}
-
-// BOOST_DISABLE_ASSERTS, !NDEBUG
-// BOOST_ASSERT_MSG(x,"m") -> ((void)0)
-
-#define BOOST_DISABLE_ASSERTS
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_disabled()
-{
-    std::string v3 = BOOST_STRINGIZE(BOOST_ASSERT_MSG(x3, "m3"));
-    BOOST_TEST_EQ( v3, "((void)0)" );
-}
-
-// BOOST_DISABLE_ASSERTS, NDEBUG
-// BOOST_ASSERT_MSG(x,"m") -> ((void)0)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_disabled_ndebug()
-{
-    std::string v4 = BOOST_STRINGIZE(BOOST_ASSERT_MSG(x4, "m4"));
-    BOOST_TEST_EQ( v4, "((void)0)" );
-}
-
-#undef BOOST_DISABLE_ASSERTS
-
-// BOOST_ENABLE_ASSERT_HANDLER, !NDEBUG
-// BOOST_ASSERT_MSG(expr, msg) -> (BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed_msg(#expr, msg, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
-
-#undef BOOST_LIKELY
-#undef BOOST_CURRENT_FUNCTION
-
-#define BOOST_ENABLE_ASSERT_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_handler()
-{
-    std::string v5 = BOOST_STRINGIZE(BOOST_ASSERT_MSG(x5, "m5")); std::string w5 = "(BOOST_LIKELY(!!(x5))? ((void)0): ::boost::assertion_failed_msg(\"x5\", \"m5\", BOOST_CURRENT_FUNCTION, \"" + quote( __FILE__ ) + "\", " BOOST_STRINGIZE(__LINE__) "))";
-
-    char const * BOOST_CURRENT_FUNCTION = "void test_handler()";
-    BOOST_TEST_EQ( v5, w5 );
-}
-
-// BOOST_ENABLE_ASSERT_HANDLER, NDEBUG
-// BOOST_ASSERT_MSG(expr, msg) -> (BOOST_LIKELY(!!(expr))? ((void)0): ::boost::assertion_failed_msg(#expr, msg, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_handler_ndebug()
-{
-    std::string v6 = BOOST_STRINGIZE(BOOST_ASSERT_MSG(x6, "m6")); std::string w6 = "(BOOST_LIKELY(!!(x6))? ((void)0): ::boost::assertion_failed_msg(\"x6\", \"m6\", BOOST_CURRENT_FUNCTION, \"" + quote( __FILE__ ) + "\", " BOOST_STRINGIZE(__LINE__) "))";
-
-    char const * BOOST_CURRENT_FUNCTION = "void test_handler_ndebug()";
-    BOOST_TEST_EQ( v6, w6 );
-}
-
-#undef BOOST_ENABLE_ASSERT_HANDLER
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, !NDEBUG
-// same as BOOST_ENABLE_ASSERT_HANDLER
-
-#define BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_debug_handler()
-{
-    std::string v7 = BOOST_STRINGIZE(BOOST_ASSERT_MSG(x7, "m7")); std::string w7 = "(BOOST_LIKELY(!!(x7))? ((void)0): ::boost::assertion_failed_msg(\"x7\", \"m7\", BOOST_CURRENT_FUNCTION, \"" + quote( __FILE__ ) + "\", " BOOST_STRINGIZE(__LINE__) "))";
-
-    char const * BOOST_CURRENT_FUNCTION = "void test_debug_handler()";
-    BOOST_TEST_EQ( v7, w7 );
-}
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, NDEBUG
-// BOOST_ASSERT_MSG(x,"m") -> ((void)0)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_debug_handler_ndebug()
-{
-    std::string v8 = BOOST_STRINGIZE(BOOST_ASSERT_MSG(x8, "m8"));
-
-    char const * BOOST_CURRENT_FUNCTION = "void test_debug_handler_ndebug()";
-    BOOST_TEST_EQ( v8, "((void)0)" );
-}
-
-#undef BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-int main()
-{
-    test_default();
-    test_default_ndebug();
-    test_disabled();
-    test_disabled_ndebug();
-    test_handler();
-    test_handler_ndebug();
-    test_debug_handler();
-    test_debug_handler_ndebug();
-
-    return boost::report_errors();
-}
diff --git a/third_party/boostorg/assert/test/exp/verify_exp_test.cpp b/third_party/boostorg/assert/test/exp/verify_exp_test.cpp
deleted file mode 100644
index 844513b..0000000
--- a/third_party/boostorg/assert/test/exp/verify_exp_test.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-//
-//  verify_exp_test.cpp - tests BOOST_ASSERT expansion
-//
-//  Copyright (c) 2014 Peter Dimov
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt
-//
-
-#include <boost/config.hpp>
-#include <boost/current_function.hpp>
-#include <boost/detail/lightweight_test.hpp>
-#include <string>
-
-// default case, !NDEBUG
-// BOOST_VERIFY(x) -> BOOST_ASSERT(x)
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-#undef BOOST_ASSERT
-
-void test_default()
-{
-    std::string v1 = BOOST_STRINGIZE(BOOST_VERIFY(x1));
-    BOOST_TEST_EQ( v1, "BOOST_ASSERT(x1)" );
-}
-
-// default case, NDEBUG
-// BOOST_VERIFY(x) -> ((void)(x))
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_default_ndebug()
-{
-    std::string v2 = BOOST_STRINGIZE(BOOST_VERIFY(x2));
-    BOOST_TEST_EQ( v2, "((void)(x2))" );
-}
-
-// BOOST_DISABLE_ASSERTS, !NDEBUG
-// BOOST_VERIFY(x) -> ((void)(x))
-
-#define BOOST_DISABLE_ASSERTS
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_disabled()
-{
-    std::string v3 = BOOST_STRINGIZE(BOOST_VERIFY(x3));
-    BOOST_TEST_EQ( v3, "((void)(x3))" );
-}
-
-// BOOST_DISABLE_ASSERTS, NDEBUG
-// BOOST_VERIFY(x) -> ((void)(x))
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_disabled_ndebug()
-{
-    std::string v4 = BOOST_STRINGIZE(BOOST_VERIFY(x4));
-    BOOST_TEST_EQ( v4, "((void)(x4))" );
-}
-
-#undef BOOST_DISABLE_ASSERTS
-
-// BOOST_ENABLE_ASSERT_HANDLER, !NDEBUG
-// BOOST_VERIFY(x) -> BOOST_ASSERT(x)
-
-#define BOOST_ENABLE_ASSERT_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-#undef BOOST_ASSERT
-
-void test_handler()
-{
-    std::string v5 = BOOST_STRINGIZE(BOOST_VERIFY(x5));
-    BOOST_TEST_EQ( v5, "BOOST_ASSERT(x5)" );
-}
-
-#define NDEBUG
-#include <boost/assert.hpp>
-#undef BOOST_ASSERT
-
-void test_handler_ndebug()
-{
-    std::string v6 = BOOST_STRINGIZE(BOOST_VERIFY(x6));
-    BOOST_TEST_EQ( v6, "BOOST_ASSERT(x6)" );
-}
-
-#undef BOOST_ENABLE_ASSERT_HANDLER
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, !NDEBUG
-// BOOST_VERIFY(x) -> BOOST_ASSERT(x)
-
-#define BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-#undef BOOST_ASSERT
-
-void test_debug_handler()
-{
-    std::string v7 = BOOST_STRINGIZE(BOOST_VERIFY(x7));
-    BOOST_TEST_EQ( v7, "BOOST_ASSERT(x7)" );
-}
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, NDEBUG
-// BOOST_VERIFY(x) -> ((void)(x))
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_debug_handler_ndebug()
-{
-    std::string v8 = BOOST_STRINGIZE(BOOST_VERIFY(x8));
-    BOOST_TEST_EQ( v8, "((void)(x8))" );
-}
-
-#undef BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-int main()
-{
-    test_default();
-    test_default_ndebug();
-    test_disabled();
-    test_disabled_ndebug();
-    test_handler();
-    test_handler_ndebug();
-    test_debug_handler();
-    test_debug_handler_ndebug();
-
-    return boost::report_errors();
-}
diff --git a/third_party/boostorg/assert/test/exp/verify_msg_exp_test.cpp b/third_party/boostorg/assert/test/exp/verify_msg_exp_test.cpp
deleted file mode 100644
index 02090fe..0000000
--- a/third_party/boostorg/assert/test/exp/verify_msg_exp_test.cpp
+++ /dev/null
@@ -1,140 +0,0 @@
-//
-//  verify_msg_exp_test.cpp - tests BOOST_VERIFY_MSG expansion
-//
-//  Copyright (c) 2014 Peter Dimov
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt
-//
-
-#include <boost/config.hpp>
-#include <boost/current_function.hpp>
-#include <boost/detail/lightweight_test.hpp>
-#include <string>
-
-// default case, !NDEBUG
-// BOOST_VERIFY_MSG(x,"m") -> BOOST_ASSERT_MSG(x,"m")
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-#undef BOOST_ASSERT_MSG
-
-void test_default()
-{
-    std::string v1 = BOOST_STRINGIZE(BOOST_VERIFY_MSG(x1, m1));
-    BOOST_TEST_EQ( v1, "BOOST_ASSERT_MSG(x1,m1)" );
-}
-
-// default case, NDEBUG
-// BOOST_VERIFY_MSG(x,"m") -> ((void)(x))
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_default_ndebug()
-{
-    std::string v2 = BOOST_STRINGIZE(BOOST_VERIFY_MSG(x2, m2));
-    BOOST_TEST_EQ( v2, "((void)(x2))" );
-}
-
-// BOOST_DISABLE_ASSERTS, !NDEBUG
-// BOOST_VERIFY_MSG(x,"m") -> ((void)(x))
-
-#define BOOST_DISABLE_ASSERTS
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-
-void test_disabled()
-{
-    std::string v3 = BOOST_STRINGIZE(BOOST_VERIFY_MSG(x3, "m3"));
-    BOOST_TEST_EQ( v3, "((void)(x3))" );
-}
-
-// BOOST_DISABLE_ASSERTS, NDEBUG
-// BOOST_VERIFY_MSG(x,"m") -> ((void)(x))
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_disabled_ndebug()
-{
-    std::string v4 = BOOST_STRINGIZE(BOOST_VERIFY_MSG(x4, "m4"));
-    BOOST_TEST_EQ( v4, "((void)(x4))" );
-}
-
-#undef BOOST_DISABLE_ASSERTS
-
-// BOOST_ENABLE_ASSERT_HANDLER, !NDEBUG
-// BOOST_VERIFY_MSG(x,m) -> BOOST_ASSERT_MSG(x,m)
-
-#define BOOST_ENABLE_ASSERT_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-#undef BOOST_ASSERT_MSG
-
-void test_handler()
-{
-    std::string v5 = BOOST_STRINGIZE(BOOST_VERIFY_MSG(x5, m5));
-    BOOST_TEST_EQ( v5, "BOOST_ASSERT_MSG(x5,m5)" );
-}
-
-// BOOST_ENABLE_ASSERT_HANDLER, NDEBUG
-// BOOST_VERIFY_MSG(x,n) -> BOOST_ASSERT_MSG(x,m)
-
-#define NDEBUG
-#include <boost/assert.hpp>
-#undef BOOST_ASSERT_MSG
-
-void test_handler_ndebug()
-{
-    std::string v6 = BOOST_STRINGIZE(BOOST_VERIFY_MSG(x6, m6));
-    BOOST_TEST_EQ( v6, "BOOST_ASSERT_MSG(x6,m6)" );
-}
-
-#undef BOOST_ENABLE_ASSERT_HANDLER
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, !NDEBUG
-// BOOST_VERIFY_MSG(x,n) -> BOOST_ASSERT_MSG(x,m)
-
-#define BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-#undef NDEBUG
-#include <boost/assert.hpp>
-#undef BOOST_ASSERT_MSG
-
-void test_debug_handler()
-{
-    std::string v7 = BOOST_STRINGIZE(BOOST_VERIFY_MSG(x7, m7));
-    BOOST_TEST_EQ( v7, "BOOST_ASSERT_MSG(x7,m7)" );
-}
-
-// BOOST_ENABLE_ASSERT_DEBUG_HANDLER, NDEBUG
-// BOOST_VERIFY_MSG(x,"m") -> ((void)(x))
-
-#define NDEBUG
-#include <boost/assert.hpp>
-
-void test_debug_handler_ndebug()
-{
-    std::string v8 = BOOST_STRINGIZE(BOOST_VERIFY_MSG(x8, "m8"));
-    BOOST_TEST_EQ( v8, "((void)(x8))" );
-}
-
-#undef BOOST_ENABLE_ASSERT_DEBUG_HANDLER
-
-int main()
-{
-    test_default();
-    test_default_ndebug();
-    test_disabled();
-    test_disabled_ndebug();
-    test_handler();
-    test_handler_ndebug();
-    test_debug_handler();
-    test_debug_handler_ndebug();
-
-    return boost::report_errors();
-}
diff --git a/third_party/boostorg/assert/test/quick.cpp b/third_party/boostorg/assert/test/quick.cpp
deleted file mode 100644
index ec3dba6..0000000
--- a/third_party/boostorg/assert/test/quick.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-//
-// quick.cpp - a quick test for boost/assert.hpp
-//
-// Copyright 2017 Peter Dimov
-//
-// Distributed under the Boost Software License, Version 1.0.
-//
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt
-//
-
-#include <boost/assert.hpp>
-
-int main()
-{
-    int x = 1;
-    BOOST_ASSERT( x == 1 );
-}
diff --git a/third_party/boostorg/assert/test/verify_test.cpp b/third_party/boostorg/assert/test/verify_test.cpp
deleted file mode 100644
index 3481636..0000000
--- a/third_party/boostorg/assert/test/verify_test.cpp
+++ /dev/null
@@ -1,126 +0,0 @@
-//
-//  verify_test.cpp - a test for BOOST_VERIFY
-//
-//  Copyright (c) 2002 Peter Dimov and Multi Media Ltd.
-//  Copyright (c) 2007 Peter Dimov
-//
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-//
-
-#include <boost/detail/lightweight_test.hpp>
-
-#include <boost/assert.hpp>
-
-int f( int & x )
-{
-    return ++x;
-}
-
-void test_default()
-{
-    int x = 1;
-
-    BOOST_VERIFY( 1 );
-    BOOST_VERIFY( x == 1 );
-    BOOST_VERIFY( ++x );
-    BOOST_VERIFY( f(x) );
-    BOOST_VERIFY( &x );
-
-    BOOST_TEST( x == 3 );
-}
-
-#define BOOST_DISABLE_ASSERTS
-#include <boost/assert.hpp>
-
-void test_disabled()
-{
-    int x = 1;
-
-    BOOST_VERIFY( 1 );
-    BOOST_VERIFY( x == 1 );
-    BOOST_VERIFY( ++x );
-    BOOST_VERIFY( f(x) );
-    BOOST_VERIFY( &x );
-
-    BOOST_TEST( x == 3 );
-
-    BOOST_VERIFY( 0 );
-    BOOST_VERIFY( !x );
-    BOOST_VERIFY( x == 0 );
-    BOOST_VERIFY( !++x );
-    BOOST_VERIFY( !f(x) );
-
-    BOOST_TEST( x == 5 );
-
-    void * p = 0;
-    BOOST_VERIFY( p );
-}
-
-#undef BOOST_DISABLE_ASSERTS
-
-#define BOOST_ENABLE_ASSERT_HANDLER
-#include <boost/assert.hpp>
-#include <boost/config.hpp>
-#include <cstdio>
-
-int handler_invoked = 0;
-
-void boost::assertion_failed(char const * expr, char const * function, char const * file, long line)
-{
-#if !defined(BOOST_NO_STDC_NAMESPACE)
-    using std::printf;
-#endif
-
-    printf("Expression: %s\nFunction: %s\nFile: %s\nLine: %ld\n\n", expr, function, file, line);
-    ++handler_invoked;
-}
-
-struct X
-{
-    static bool f()
-    {
-        BOOST_VERIFY( 0 );
-        return false;
-    }
-};
-
-void test_handler()
-{
-    int x = 1;
-
-    BOOST_VERIFY( 1 );
-    BOOST_VERIFY( x == 1 );
-    BOOST_VERIFY( ++x );
-    BOOST_VERIFY( f(x) );
-    BOOST_VERIFY( &x );
-
-    BOOST_TEST( x == 3 );
-
-    BOOST_VERIFY( 0 );
-    BOOST_VERIFY( !x );
-    BOOST_VERIFY( x == 0 );
-    BOOST_VERIFY( !++x );
-    BOOST_VERIFY( !f(x) );
-
-    BOOST_TEST( x == 5 );
-
-    void * p = 0;
-    BOOST_VERIFY( p );
-
-    BOOST_VERIFY( X::f() );
-
-    BOOST_TEST( handler_invoked == 8 );
-}
-
-#undef BOOST_ENABLE_ASSERT_HANDLER
-
-int main()
-{
-    test_default();
-    test_disabled();
-    test_handler();
-
-    return boost::report_errors();
-}
diff --git a/third_party/boostorg/atomic/.gitattributes b/third_party/boostorg/atomic/.gitattributes
deleted file mode 100644
index 3e84d7c..0000000
--- a/third_party/boostorg/atomic/.gitattributes
+++ /dev/null
@@ -1,96 +0,0 @@
-* text=auto !eol svneol=native#text/plain
-*.gitattributes text svneol=native#text/plain
-
-# Scriptish formats
-*.bat        text svneol=native#text/plain
-*.bsh        text svneol=native#text/x-beanshell
-*.cgi        text svneol=native#text/plain
-*.cmd        text svneol=native#text/plain
-*.js         text svneol=native#text/javascript
-*.php        text svneol=native#text/x-php
-*.pl         text svneol=native#text/x-perl
-*.pm         text svneol=native#text/x-perl
-*.py         text svneol=native#text/x-python
-*.sh         eol=lf svneol=LF#text/x-sh
-configure    eol=lf svneol=LF#text/x-sh
-
-# Image formats
-*.bmp        binary svneol=unset#image/bmp
-*.gif        binary svneol=unset#image/gif
-*.ico        binary svneol=unset#image/ico
-*.jpeg       binary svneol=unset#image/jpeg
-*.jpg        binary svneol=unset#image/jpeg
-*.png        binary svneol=unset#image/png
-*.tif        binary svneol=unset#image/tiff
-*.tiff       binary svneol=unset#image/tiff
-*.svg        text svneol=native#image/svg%2Bxml
-
-# Data formats
-*.pdf        binary svneol=unset#application/pdf
-*.avi        binary svneol=unset#video/avi
-*.doc        binary svneol=unset#application/msword
-*.dsp        text svneol=crlf#text/plain
-*.dsw        text svneol=crlf#text/plain
-*.eps        binary svneol=unset#application/postscript
-*.gz         binary svneol=unset#application/gzip
-*.mov        binary svneol=unset#video/quicktime
-*.mp3        binary svneol=unset#audio/mpeg
-*.ppt        binary svneol=unset#application/vnd.ms-powerpoint
-*.ps         binary svneol=unset#application/postscript
-*.psd        binary svneol=unset#application/photoshop
-*.rdf        binary svneol=unset#text/rdf
-*.rss        text svneol=unset#text/xml
-*.rtf        binary svneol=unset#text/rtf
-*.sln        text svneol=native#text/plain
-*.swf        binary svneol=unset#application/x-shockwave-flash
-*.tgz        binary svneol=unset#application/gzip
-*.vcproj     text svneol=native#text/xml
-*.vcxproj    text svneol=native#text/xml
-*.vsprops    text svneol=native#text/xml
-*.wav        binary svneol=unset#audio/wav
-*.xls        binary svneol=unset#application/vnd.ms-excel
-*.zip        binary svneol=unset#application/zip
-
-# Text formats
-.htaccess    text svneol=native#text/plain
-*.bbk        text svneol=native#text/xml
-*.cmake      text svneol=native#text/plain
-*.css        text svneol=native#text/css
-*.dtd        text svneol=native#text/xml
-*.htm        text svneol=native#text/html
-*.html       text svneol=native#text/html
-*.ini        text svneol=native#text/plain
-*.log        text svneol=native#text/plain
-*.mak        text svneol=native#text/plain
-*.qbk        text svneol=native#text/plain
-*.rst        text svneol=native#text/plain
-*.sql        text svneol=native#text/x-sql
-*.txt        text svneol=native#text/plain
-*.xhtml      text svneol=native#text/xhtml%2Bxml
-*.xml        text svneol=native#text/xml
-*.xsd        text svneol=native#text/xml
-*.xsl        text svneol=native#text/xml
-*.xslt       text svneol=native#text/xml
-*.xul        text svneol=native#text/xul
-*.yml        text svneol=native#text/plain
-boost-no-inspect text svneol=native#text/plain
-CHANGES      text svneol=native#text/plain
-COPYING      text svneol=native#text/plain
-INSTALL      text svneol=native#text/plain
-Jamfile      text svneol=native#text/plain
-Jamroot      text svneol=native#text/plain
-Jamfile.v2   text svneol=native#text/plain
-Jamrules     text svneol=native#text/plain
-Makefile*    text svneol=native#text/plain
-README       text svneol=native#text/plain
-TODO         text svneol=native#text/plain
-
-# Code formats
-*.c          text svneol=native#text/plain
-*.cpp        text svneol=native#text/plain
-*.h          text svneol=native#text/plain
-*.hpp        text svneol=native#text/plain
-*.ipp        text svneol=native#text/plain
-*.tpp        text svneol=native#text/plain
-*.jam        text svneol=native#text/plain
-*.java       text svneol=native#text/plain
diff --git a/third_party/boostorg/atomic/.travis.yml b/third_party/boostorg/atomic/.travis.yml
deleted file mode 100644
index 3d81714..0000000
--- a/third_party/boostorg/atomic/.travis.yml
+++ /dev/null
@@ -1,715 +0,0 @@
-# Copyright 2016, 2017 Peter Dimov
-# Distributed under the Boost Software License, Version 1.0.
-# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt)
-
-language: cpp
-
-sudo: false
-
-python: "2.7"
-
-os:
-  - linux
-  - osx
-
-branches:
-  only:
-    - master
-    - develop
-
-env:
-  matrix:
-    - BOGUS_JOB=true
-
-matrix:
-
-  exclude:
-    - env: BOGUS_JOB=true
-
-  include:
-# gcc, Linux, 64-bit
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++ CXXSTD=c++03 ADDRESS_MODEL=64
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.7 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-4.7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.7 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-4.7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.8 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-4.8
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.8 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-4.8
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.9 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-4.9
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.9 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-4.9
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-5
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-5
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++14 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-5
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-6
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-6
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++14 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-6
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++1z ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-6
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++14 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++1z ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++14 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-7
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++1z ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - g++-7
-          sources:
-            - ubuntu-toolchain-r-test
-
-# clang, Linux, 64-bit
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.5 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.5
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.5
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.5 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.5
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.5
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.6 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.6
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.6
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.6 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.6
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.6
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.7 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.7
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.7
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.7 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.7
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.7
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.8 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.8
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.8
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.8 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.8
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.8
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.8 CXXSTD=c++14 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.8
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.8
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.8 CXXSTD=c++1z ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.8
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.8
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.9 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.9
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-3.9
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.9 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.9
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-3.9
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.9 CXXSTD=c++14 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.9
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-3.9
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.9 CXXSTD=c++1z ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-3.9
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-3.9
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-4.0
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-4.0
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-4.0
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-4.0
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++14 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-4.0
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-4.0
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++1z ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-4.0
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-4.0
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-5.0 CXXSTD=c++03 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-5.0
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-5.0
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-5.0 CXXSTD=c++11 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-5.0
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-5.0
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-5.0 CXXSTD=c++14 ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-5.0
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-5.0
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-5.0 CXXSTD=c++1z ADDRESS_MODEL=64
-      addons:
-        apt:
-          packages:
-            - clang-5.0
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-5.0
-
-# Travis CI capacity of OS X testers is insufficient, tests disabled until Travis CI capacity is increased
-# clang, OS X, 64-bit
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
-#      osx_image: xcode8.3
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
-#      osx_image: xcode8.3
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
-#      osx_image: xcode8.3
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
-#      osx_image: xcode8.3
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
-#      osx_image: xcode8.2
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
-#      osx_image: xcode8.2
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
-#      osx_image: xcode8.2
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
-#      osx_image: xcode8.1
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
-#      osx_image: xcode8.1
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
-#      osx_image: xcode8.1
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
-#      osx_image: xcode8.1
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
-#      osx_image: xcode8.1
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
-#      osx_image: xcode8.0
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
-#      osx_image: xcode8.0
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
-#      osx_image: xcode8.0
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
-#      osx_image: xcode8.0
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
-#      osx_image: xcode7.3
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
-#      osx_image: xcode7.3
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
-#      osx_image: xcode7.3
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
-#      osx_image: xcode7.3
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
-#      osx_image: xcode6.4
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
-#      osx_image: xcode6.4
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
-#      osx_image: xcode6.4
-#
-#    - os: osx
-#      env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
-#      osx_image: xcode6.4
-
-# gcc, Linux, 32-bit
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.7 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - g++-4.7-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.8 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - g++-4.8-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-4.9 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - g++-4.9-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - g++-5-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - g++-6-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-
-    - os: linux
-      env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - g++-7-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-
-# clang, Linux, 32-bit
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.5 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - clang-3.5
-            - g++-4.8-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.5
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.6 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - clang-3.6
-            - g++-4.8-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.6
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.7 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - clang-3.7
-            - g++-4.8-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.7
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.8 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - clang-3.8
-            - g++-4.8-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-precise-3.8
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-3.9 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - clang-3.9
-            - g++-4.8-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-3.9
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - clang-4.0
-            - g++-4.8-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-4.0
-
-    - os: linux
-      env: TOOLSET=clang COMPILER=clang++-5.0 CXXSTD=c++11 ADDRESS_MODEL=32
-      addons:
-        apt:
-          packages:
-            - clang-5.0
-            - g++-4.8-multilib
-            - linux-libc-dev:i386
-          sources:
-            - ubuntu-toolchain-r-test
-            - llvm-toolchain-trusty-5.0
-
-
-install:
-  - cd ..
-  - git clone -b $TRAVIS_BRANCH --depth 1 https://github.com/boostorg/boost.git boost-root
-  - cd boost-root
-  - git submodule update --init tools/boostdep
-  - git submodule update --init tools/build
-  - git submodule update --init tools/inspect
-  - git submodule update --init libs/config
-  - cp -r $TRAVIS_BUILD_DIR/* libs/atomic
-  - python tools/boostdep/depinst/depinst.py atomic
-  - ./bootstrap.sh
-  - ./b2 headers
- 
-script:
-  - |-
-    echo "using $TOOLSET : : $COMPILER : <cxxflags>-std=$CXXSTD ;" > ~/user-config.jam
-  - ./b2 -j3 libs/atomic/test toolset=$TOOLSET address-model=$ADDRESS_MODEL
-
-notifications:
-  email:
-    on_success: always
diff --git a/third_party/boostorg/atomic/BUILD b/third_party/boostorg/atomic/BUILD
deleted file mode 100644
index a9aff83..0000000
--- a/third_party/boostorg/atomic/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-licenses(["notice"])  # boost
-
-cc_library(
-    name = "atomic",
-    hdrs = glob(["include/**"]),
-    includes = ["include"],
-    target_compatible_with = ["@platforms//os:linux"],
-    visibility = ["//visibility:public"],
-)
diff --git a/third_party/boostorg/atomic/README.md b/third_party/boostorg/atomic/README.md
deleted file mode 100644
index 3eb8875..0000000
--- a/third_party/boostorg/atomic/README.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# ![Boost.Atomic](doc/logo.png)
-
-Boost.Atomic, part of collection of the [Boost C++ Libraries](http://github.com/boostorg), implements atomic operations for various CPU architectures, reflecting and extending the standard interface defined in C++11.
-
-### Directories
-
-* **build** - Boost.Atomic build scripts
-* **doc** - QuickBook documentation sources
-* **include** - Interface headers of Boost.Atomic
-* **src** - Compilable source code of Boost.Atomic
-* **test** - Boost.Atomic unit tests
-
-### More information
-
-* [Documentation](http://boost.org/libs/atomic)
-* [Report bugs](https://svn.boost.org/trac/boost/newticket?component=atomic;version=Boost%20Release%20Branch). Be sure to mention Boost version, platform and compiler you're using. A small compilable code sample to reproduce the problem is always good as well.
-* Submit your patches as pull requests against **develop** branch. Note that by submitting patches you agree to license your modifications under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).
-
-### Build status
-
-Master: [![AppVeyor](https://ci.appveyor.com/api/projects/status/c64xu59bydnmb7kt/branch/master?svg=true)](https://ci.appveyor.com/project/Lastique/atomic/branch/master) [![Travis CI](https://travis-ci.org/boostorg/atomic.svg?branch=master)](https://travis-ci.org/boostorg/atomic)
-Develop: [![AppVeyor](https://ci.appveyor.com/api/projects/status/c64xu59bydnmb7kt/branch/develop?svg=true)](https://ci.appveyor.com/project/Lastique/atomic/branch/develop) [![Travis CI](https://travis-ci.org/boostorg/atomic.svg?branch=develop)](https://travis-ci.org/boostorg/atomic)
-
-### License
-
-Distributed under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).
diff --git a/third_party/boostorg/atomic/appveyor.yml b/third_party/boostorg/atomic/appveyor.yml
deleted file mode 100644
index 072410f..0000000
--- a/third_party/boostorg/atomic/appveyor.yml
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright 2016, 2017 Peter Dimov
-# Distributed under the Boost Software License, Version 1.0.
-# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt)
-
-version: 1.0.{build}-{branch}
-
-shallow_clone: true
-
-branches:
-  only:
-    - master
-    - develop
-
-environment:
-  matrix:
-# AppVeyor doesn't provide 64-bit compilers for these MSVC versions
-#    - TOOLSET: msvc-9.0
-#      ADDRESS_MODEL: 64
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-#    - TOOLSET: msvc-10.0
-#      ADDRESS_MODEL: 64
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-#    - TOOLSET: msvc-11.0
-#      ADDRESS_MODEL: 64
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: msvc-12.0
-      ADDRESS_MODEL: 64
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: msvc-14.0
-      ADDRESS_MODEL: 64
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: msvc-14.1
-      ADDRESS_MODEL: 64
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
-# Boost.Thread does not compile for Cygwin
-#    - TOOLSET: gcc
-#      ADDRESS_MODEL: 64
-#      B2_ARGS: cxxflags=-std=c++03
-#      ADDPATH: C:\cygwin64\bin
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-#    - TOOLSET: gcc
-#      ADDRESS_MODEL: 64
-#      B2_ARGS: cxxflags=-std=c++11
-#      ADDPATH: C:\cygwin64\bin
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: gcc
-      ADDRESS_MODEL: 64
-      B2_ARGS: cxxflags=-std=c++03
-      ADDPATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: gcc
-      ADDRESS_MODEL: 64
-      B2_ARGS: cxxflags=-std=c++11
-      ADDPATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: gcc
-      ADDRESS_MODEL: 64
-      B2_ARGS: cxxflags=-std=gnu++03
-      ADDPATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: gcc
-      ADDRESS_MODEL: 64
-      B2_ARGS: cxxflags=-std=gnu++11
-      ADDPATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-
-    - TOOLSET: msvc-9.0
-      ADDRESS_MODEL: 32
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: msvc-10.0
-      ADDRESS_MODEL: 32
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: msvc-11.0
-      ADDRESS_MODEL: 32
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: msvc-12.0
-      ADDRESS_MODEL: 32
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: msvc-14.0
-      ADDRESS_MODEL: 32
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: msvc-14.1
-      ADDRESS_MODEL: 32
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
-# Boost.Thread does not compile for Cygwin
-#    - TOOLSET: gcc
-#      ADDRESS_MODEL: 32
-#      B2_ARGS: cxxflags=-std=c++03
-#      ADDPATH: C:\cygwin\bin
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-#    - TOOLSET: gcc
-#      ADDRESS_MODEL: 32
-#      B2_ARGS: cxxflags=-std=c++11
-#      ADDPATH: C:\cygwin\bin
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: gcc
-      ADDRESS_MODEL: 32
-      B2_ARGS: cxxflags=-std=c++03
-      ADDPATH: C:\mingw\bin
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-    - TOOLSET: gcc
-      ADDRESS_MODEL: 32
-      B2_ARGS: cxxflags=-std=c++11
-      ADDPATH: C:\mingw\bin
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-# AppVeyor doesn't provide 32-bit compilers for MinGW-w64
-#    - TOOLSET: gcc
-#      ADDRESS_MODEL: 32
-#      B2_ARGS: cxxflags=-std=c++03
-#      ADDPATH: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\bin
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-#    - TOOLSET: gcc
-#      ADDRESS_MODEL: 32
-#      B2_ARGS: cxxflags=-std=c++11
-#      ADDPATH: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\bin
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-#    - TOOLSET: gcc
-#      ADDRESS_MODEL: 32
-#      B2_ARGS: cxxflags=-std=gnu++03
-#      ADDPATH: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\bin
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-#    - TOOLSET: gcc
-#      ADDRESS_MODEL: 32
-#      B2_ARGS: cxxflags=-std=gnu++11
-#      ADDPATH: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\bin
-#      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
-
-install:
-  - cd ..
-  - git clone -b %APPVEYOR_REPO_BRANCH% https://github.com/boostorg/boost.git boost-root
-  - cd boost-root
-  - git submodule update --init tools/boostdep
-  - git submodule update --init tools/build
-  - git submodule update --init tools/inspect
-  - xcopy /s /e /q %APPVEYOR_BUILD_FOLDER% libs\atomic
-  - python tools/boostdep/depinst/depinst.py atomic
-  - cmd /c bootstrap
-  - b2 headers
-
-build: off
-
-test_script:
-  - PATH=%ADDPATH%;%PATH%
-  - b2 libs/atomic/test variant=release toolset=%TOOLSET% address-model=%ADDRESS_MODEL% %B2_ARGS%
diff --git a/third_party/boostorg/atomic/build/Jamfile.v2 b/third_party/boostorg/atomic/build/Jamfile.v2
deleted file mode 100644
index 70e7720..0000000
--- a/third_party/boostorg/atomic/build/Jamfile.v2
+++ /dev/null
@@ -1,37 +0,0 @@
-#  Boost.Atomic Library Jamfile
-#
-#  Copyright Helge Bahmann 2011.
-#  Distributed under the Boost Software License, Version 1.0.
-#  (See accompanying file LICENSE_1_0.txt or copy at
-#  http://www.boost.org/LICENSE_1_0.txt)
-
-import common ;
-
-project boost/atomic
-    : requirements
-      <threading>multi
-      <link>shared:<define>BOOST_ATOMIC_DYN_LINK=1
-      <link>static:<define>BOOST_ATOMIC_STATIC_LINK=1
-      <define>BOOST_ATOMIC_SOURCE
-      <target-os>windows:<define>BOOST_USE_WINDOWS_H
-      <target-os>windows:<define>_WIN32_WINNT=0x0500
-      <toolset>gcc,<target-os>windows:<linkflags>"-lkernel32"
-    : usage-requirements
-      <link>shared:<define>BOOST_ATOMIC_DYN_LINK=1
-      <link>static:<define>BOOST_ATOMIC_STATIC_LINK=1
-    : source-location ../src
-    ;
-
-alias atomic_sources
-   : lockpool.cpp
-   ;
-
-explicit atomic_sources ;
-
-
-lib boost_atomic
-   : atomic_sources
-   ;
-
-
-boost-install boost_atomic ;
diff --git a/third_party/boostorg/atomic/doc/Jamfile.v2 b/third_party/boostorg/atomic/doc/Jamfile.v2
deleted file mode 100644
index 16bd022..0000000
--- a/third_party/boostorg/atomic/doc/Jamfile.v2
+++ /dev/null
@@ -1,36 +0,0 @@
-#  Boost.Atomic library documentation Jamfile
-#
-#  Copyright Helge Bahmann 2011.
-#  Copyright Tim Blechmann 2012.
-#  Distributed under the Boost Software License, Version 1.0.
-#     (See accompanying file LICENSE_1_0.txt or copy at
-#           http://www.boost.org/LICENSE_1_0.txt)
-
-import quickbook ;
-import boostbook : boostbook ;
-
-xml atomic : atomic.qbk ;
-
-boostbook standalone
-    : atomic
-    : <xsl:param>boost.root=../../../..
-      <xsl:param>boost.libraries=../../../libraries.htm
-      <format>pdf:<xsl:param>boost.url.prefix=http://www.boost.org/doc/libs/release/libs/atomic/doc/html
-    ;
-
-install css : [ glob $(BOOST_ROOT)/doc/src/*.css ]
-    : <location>html ;
-install images : [ glob $(BOOST_ROOT)/doc/src/images/*.png ]
-    : <location>html/images ;
-explicit css ;
-explicit images ;
-
-###############################################################################
-alias boostdoc
-    : atomic
-    :
-    :
-    : ;
-explicit boostdoc ;
-alias boostrelease ;
-explicit boostrelease ;
diff --git a/third_party/boostorg/atomic/doc/atomic.qbk b/third_party/boostorg/atomic/doc/atomic.qbk
deleted file mode 100644
index f48265b..0000000
--- a/third_party/boostorg/atomic/doc/atomic.qbk
+++ /dev/null
@@ -1,1279 +0,0 @@
-[/
- / Copyright (c) 2009 Helge Bahmann
- / Copyright (c) 2014, 2017, 2018 Andrey Semashev
- /
- / Distributed under the Boost Software License, Version 1.0. (See accompanying
- / file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
- /]
-
-[library Boost.Atomic
-    [quickbook 1.4]
-    [authors [Bahmann, Helge][Semashev, Andrey]]
-    [copyright 2011 Helge Bahmann]
-    [copyright 2012 Tim Blechmann]
-    [copyright 2013, 2017, 2018 Andrey Semashev]
-    [id atomic]
-    [dirname atomic]
-    [purpose Atomic operations]
-    [license
-        Distributed under the Boost Software License, Version 1.0.
-        (See accompanying file LICENSE_1_0.txt or copy at
-        [@http://www.boost.org/LICENSE_1_0.txt])
-    ]
-]
-
-[section:introduction Introduction]
-
-[section:introduction_presenting Presenting Boost.Atomic]
-
-[*Boost.Atomic] is a library that provides [^atomic]
-data types and operations on these data types, as well as memory
-ordering constraints required for coordinating multiple threads through
-atomic variables. It implements the interface as defined by the C++11
-standard, but makes this feature available for platforms lacking
-system/compiler support for this particular C++11 feature.
-
-Users of this library should already be familiar with concurrency
-in general, as well as elementary concepts such as "mutual exclusion".
-
-The implementation makes use of processor-specific instructions where
-possible (via inline assembler, platform libraries or compiler
-intrinsics), and falls back to "emulating" atomic operations through
-locking.
-
-[endsect]
-
-[section:introduction_purpose Purpose]
-
-Operations on "ordinary" variables are not guaranteed to be atomic.
-This means that with [^int n=0] initially, two threads concurrently
-executing
-
-[c++]
-
-  void function()
-  {
-    n ++;
-  }
-
-might result in [^n==1] instead of 2: Each thread will read the
-old value into a processor register, increment it and write the result
-back. Both threads may therefore write [^1], unaware that the other thread
-is doing likewise.
-
-Declaring [^atomic<int> n=0] instead, the same operation on
-this variable will always result in [^n==2] as each operation on this
-variable is ['atomic]: This means that each operation behaves as if it
-were strictly sequentialized with respect to the other.
-
-Atomic variables are useful for two purposes:
-
-* as a means for coordinating multiple threads via custom
-  coordination protocols
-* as faster alternatives to "locked" access to simple variables
-
-Take a look at the [link atomic.usage_examples examples] section
-for common patterns.
-
-[endsect]
-
-[endsect]
-
-[section:thread_coordination Thread coordination using Boost.Atomic]
-
-The most common use of [*Boost.Atomic] is to realize custom
-thread synchronization protocols: The goal is to coordinate
-accesses of threads to shared variables in order to avoid
-"conflicts". The
-programmer must be aware of the fact that
-compilers, CPUs and the cache
-hierarchies may generally reorder memory references at will.
-As a consequence a program such as:
-
-[c++]
-
-  int x = 0, int y = 0;
-
-  thread1:
-    x = 1;
-    y = 1;
-
-  thread2
-    if (y == 1) {
-      assert(x == 1);
-    }
-
-might indeed fail as there is no guarantee that the read of `x`
-by thread2 "sees" the write by thread1.
-
-[*Boost.Atomic] uses a synchronisation concept based on the
-['happens-before] relation to describe the guarantees under
-which situations such as the above one cannot occur.
-
-The remainder of this section will discuss ['happens-before] in
-a "hands-on" way instead of giving a fully formalized definition.
-The reader is encouraged to additionally have a
-look at the discussion of the correctness of a few of the
-[link atomic.usage_examples examples] afterwards.
-
-[section:mutex Enforcing ['happens-before] through mutual exclusion]
-
-As an introductory example to understand how arguing using
-['happens-before] works, consider two threads synchronizing
-using a common mutex:
-
-[c++]
-
-  mutex m;
-
-  thread1:
-    m.lock();
-    ... /* A */
-    m.unlock();
-
-  thread2:
-    m.lock();
-    ... /* B */
-    m.unlock();
-
-The "lockset-based intuition" would be to argue that A and B
-cannot be executed concurrently as the code paths require a
-common lock to be held.
-
-One can however also arrive at the same conclusion using
-['happens-before]: Either thread1 or thread2 will succeed first
-at [^m.lock()]. If this is be thread1, then as a consequence,
-thread2 cannot succeed at [^m.lock()] before thread1 has executed
-[^m.unlock()], consequently A ['happens-before] B in this case.
-By symmetry, if thread2 succeeds at [^m.lock()] first, we can
-conclude B ['happens-before] A.
-
-Since this already exhausts all options, we can conclude that
-either A ['happens-before] B or B ['happens-before] A must
-always hold. Obviously cannot state ['which] of the two relationships
-holds, but either one is sufficient to conclude that A and B
-cannot conflict.
-
-Compare the [link boost_atomic.usage_examples.example_spinlock spinlock]
-implementation to see how the mutual exclusion concept can be
-mapped to [*Boost.Atomic].
-
-[endsect]
-
-[section:release_acquire ['happens-before] through [^release] and [^acquire]]
-
-The most basic pattern for coordinating threads via [*Boost.Atomic]
-uses [^release] and [^acquire] on an atomic variable for coordination: If ...
-
-* ... thread1 performs an operation A,
-* ... thread1 subsequently writes (or atomically
-  modifies) an atomic variable with [^release] semantic,
-* ... thread2 reads (or atomically reads-and-modifies)
-  the value this value from the same atomic variable with
-  [^acquire] semantic and
-* ... thread2 subsequently performs an operation B,
-
-... then A ['happens-before] B.
-
-Consider the following example
-
-[c++]
-
-  atomic<int> a(0);
-
-  thread1:
-    ... /* A */
-    a.fetch_add(1, memory_order_release);
-
-  thread2:
-    int tmp = a.load(memory_order_acquire);
-    if (tmp == 1) {
-      ... /* B */
-    } else {
-      ... /* C */
-    }
-
-In this example, two avenues for execution are possible:
-
-* The [^store] operation by thread1 precedes the [^load] by thread2:
-  In this case thread2 will execute B and "A ['happens-before] B"
-  holds as all of the criteria above are satisfied.
-* The [^load] operation by thread2 precedes the [^store] by thread1:
-  In this case, thread2 will execute C, but "A ['happens-before] C"
-  does ['not] hold: thread2 does not read the value written by
-  thread1 through [^a].
-
-Therefore, A and B cannot conflict, but A and C ['can] conflict.
-
-[endsect]
-
-[section:fences Fences]
-
-Ordering constraints are generally specified together with an access to
-an atomic variable. It is however also possible to issue "fence"
-operations in isolation, in this case the fence operates in
-conjunction with preceding (for `acquire`, `consume` or `seq_cst`
-operations) or succeeding (for `release` or `seq_cst`) atomic
-operations.
-
-The example from the previous section could also be written in
-the following way:
-
-[c++]
-
-  atomic<int> a(0);
-
-  thread1:
-    ... /* A */
-    atomic_thread_fence(memory_order_release);
-    a.fetch_add(1, memory_order_relaxed);
-
-  thread2:
-    int tmp = a.load(memory_order_relaxed);
-    if (tmp == 1) {
-      atomic_thread_fence(memory_order_acquire);
-      ... /* B */
-    } else {
-      ... /* C */
-    }
-
-This provides the same ordering guarantees as previously, but
-elides a (possibly expensive) memory ordering operation in
-the case C is executed.
-
-[endsect]
-
-[section:release_consume ['happens-before] through [^release] and [^consume]]
-
-The second pattern for coordinating threads via [*Boost.Atomic]
-uses [^release] and [^consume] on an atomic variable for coordination: If ...
-
-* ... thread1 performs an operation A,
-* ... thread1 subsequently writes (or atomically modifies) an
-  atomic variable with [^release] semantic,
-* ... thread2 reads (or atomically reads-and-modifies)
-  the value this value from the same atomic variable with [^consume] semantic and
-* ... thread2 subsequently performs an operation B that is ['computationally
-  dependent on the value of the atomic variable],
-
-... then A ['happens-before] B.
-
-Consider the following example
-
-[c++]
-
-  atomic<int> a(0);
-  complex_data_structure data[2];
-
-  thread1:
-    data[1] = ...; /* A */
-    a.store(1, memory_order_release);
-
-  thread2:
-    int index = a.load(memory_order_consume);
-    complex_data_structure tmp = data[index]; /* B */
-
-In this example, two avenues for execution are possible:
-
-* The [^store] operation by thread1 precedes the [^load] by thread2:
-  In this case thread2 will read [^data\[1\]] and "A ['happens-before] B"
-  holds as all of the criteria above are satisfied.
-* The [^load] operation by thread2 precedes the [^store] by thread1:
-  In this case thread2 will read [^data\[0\]] and "A ['happens-before] B"
-  does ['not] hold: thread2 does not read the value written by
-  thread1 through [^a].
-
-Here, the ['happens-before] relationship helps ensure that any
-accesses (presumable writes) to [^data\[1\]] by thread1 happen before
-before the accesses (presumably reads) to [^data\[1\]] by thread2:
-Lacking this relationship, thread2 might see stale/inconsistent
-data.
-
-Note that in this example, the fact that operation B is computationally
-dependent on the atomic variable, therefore the following program would
-be erroneous:
-
-[c++]
-
-  atomic<int> a(0);
-  complex_data_structure data[2];
-
-  thread1:
-    data[1] = ...; /* A */
-    a.store(1, memory_order_release);
-
-  thread2:
-    int index = a.load(memory_order_consume);
-    complex_data_structure tmp;
-    if (index == 0)
-      tmp = data[0];
-    else
-      tmp = data[1];
-
-[^consume] is most commonly (and most safely! see
-[link atomic.limitations limitations]) used with
-pointers, compare for example the
-[link boost_atomic.usage_examples.singleton singleton with double-checked locking].
-
-[endsect]
-
-[section:seq_cst Sequential consistency]
-
-The third pattern for coordinating threads via [*Boost.Atomic]
-uses [^seq_cst] for coordination: If ...
-
-* ... thread1 performs an operation A,
-* ... thread1 subsequently performs any operation with [^seq_cst],
-* ... thread1 subsequently performs an operation B,
-* ... thread2 performs an operation C,
-* ... thread2 subsequently performs any operation with [^seq_cst],
-* ... thread2 subsequently performs an operation D,
-
-then either "A ['happens-before] D" or "C ['happens-before] B" holds.
-
-In this case it does not matter whether thread1 and thread2 operate
-on the same or different atomic variables, or use a "stand-alone"
-[^atomic_thread_fence] operation.
-
-[endsect]
-
-[endsect]
-
-[section:interface Programming interfaces]
-
-[section:configuration Configuration and building]
-
-The library contains header-only and compiled parts. The library is
-header-only for lock-free cases but requires a separate binary to
-implement the lock-based emulation. Users are able to detect whether
-linking to the compiled part is required by checking the
-[link atomic.interface.feature_macros feature macros].
-
-The following macros affect library behavior:
-
-[table
-    [[Macro] [Description]]
-    [[`BOOST_ATOMIC_NO_CMPXCHG8B`] [Affects 32-bit x86 Oracle Studio builds. When defined,
-      the library assumes the target CPU does not support `cmpxchg8b` instruction used
-      to support 64-bit atomic operations. This is the case with very old CPUs (pre-Pentium).
-      The library does not perform runtime detection of this instruction, so running the code
-      that uses 64-bit atomics on such CPUs will result in crashes, unless this macro is defined.
-      Note that the macro does not affect MSVC, GCC and compatible compilers because the library infers
-      this information from the compiler-defined macros.]]
-    [[`BOOST_ATOMIC_NO_CMPXCHG16B`] [Affects 64-bit x86 MSVC and Oracle Studio builds. When defined,
-      the library assumes the target CPU does not support `cmpxchg16b` instruction used
-      to support 128-bit atomic operations. This is the case with some early 64-bit AMD CPUs,
-      all Intel CPUs and current AMD CPUs support this instruction. The library does not
-      perform runtime detection of this instruction, so running the code that uses 128-bit
-      atomics on such CPUs will result in crashes, unless this macro is defined. Note that
-      the macro does not affect GCC and compatible compilers because the library infers
-      this information from the compiler-defined macros.]]
-    [[`BOOST_ATOMIC_NO_MFENCE`] [Affects 32-bit x86 Oracle Studio builds. When defined,
-      the library assumes the target CPU does not support `mfence` instruction used
-      to implement thread fences. This instruction was added with SSE2 instruction set extension,
-      which was available in CPUs since Intel Pentium 4. The library does not perform runtime detection
-      of this instruction, so running the library code on older CPUs will result in crashes, unless
-      this macro is defined. Note that the macro does not affect MSVC, GCC and compatible compilers
-      because the library infers this information from the compiler-defined macros.]]
-    [[`BOOST_ATOMIC_NO_FLOATING_POINT`] [When defined, support for floating point operations is disabled.
-      Floating point types shall be treated similar to trivially copyable structs and no capability macros
-      will be defined.]]
-    [[`BOOST_ATOMIC_FORCE_FALLBACK`] [When defined, all operations are implemented with locks.
-      This is mostly used for testing and should not be used in real world projects.]]
-    [[`BOOST_ATOMIC_DYN_LINK` and `BOOST_ALL_DYN_LINK`] [Control library linking. If defined,
-      the library assumes dynamic linking, otherwise static. The latter macro affects all Boost
-      libraries, not just [*Boost.Atomic].]]
-    [[`BOOST_ATOMIC_NO_LIB` and `BOOST_ALL_NO_LIB`] [Control library auto-linking on Windows.
-      When defined, disables auto-linking. The latter macro affects all Boost libraries,
-      not just [*Boost.Atomic].]]
-]
-
-Besides macros, it is important to specify the correct compiler options for the target CPU.
-With GCC and compatible compilers this affects whether particular atomic operations are
-lock-free or not.
-
-Boost building process is described in the [@http://www.boost.org/doc/libs/release/more/getting_started/ Getting Started guide].
-For example, you can build [*Boost.Atomic] with the following command line:
-
-[pre
-    bjam --with-atomic variant=release instruction-set=core2 stage
-]
-
-[endsect]
-
-[section:interface_memory_order Memory order]
-
-    #include <boost/memory_order.hpp>
-
-The enumeration [^boost::memory_order] defines the following
-values to represent memory ordering constraints:
-
-[table
-    [[Constant] [Description]]
-    [[`memory_order_relaxed`] [No ordering constraint.
-      Informally speaking, following operations may be reordered before,
-      preceding operations may be reordered after the atomic
-      operation. This constraint is suitable only when
-      either a) further operations do not depend on the outcome
-      of the atomic operation or b) ordering is enforced through
-      stand-alone `atomic_thread_fence` operations. The operation on
-      the atomic value itself is still atomic though.
-    ]]
-    [[`memory_order_release`] [
-      Perform `release` operation. Informally speaking,
-      prevents all preceding memory operations to be reordered
-      past this point.
-    ]]
-    [[`memory_order_acquire`] [
-      Perform `acquire` operation. Informally speaking,
-      prevents succeeding memory operations to be reordered
-      before this point.
-    ]]
-    [[`memory_order_consume`] [
-      Perform `consume` operation. More relaxed (and
-      on some architectures more efficient) than `memory_order_acquire`
-      as it only affects succeeding operations that are
-      computationally-dependent on the value retrieved from
-      an atomic variable.
-    ]]
-    [[`memory_order_acq_rel`] [Perform both `release` and `acquire` operation]]
-    [[`memory_order_seq_cst`] [
-      Enforce sequential consistency. Implies `memory_order_acq_rel`, but
-      additionally enforces total order for all operations such qualified.
-    ]]
-]
-
-For compilers that support C++11 scoped enums, the library also defines scoped synonyms
-that are preferred in modern programs:
-
-[table
-    [[Pre-C++11 constant] [C++11 equivalent]]
-    [[`memory_order_relaxed`] [`memory_order::relaxed`]]
-    [[`memory_order_release`] [`memory_order::release`]]
-    [[`memory_order_acquire`] [`memory_order::acquire`]]
-    [[`memory_order_consume`] [`memory_order::consume`]]
-    [[`memory_order_acq_rel`] [`memory_order::acq_rel`]]
-    [[`memory_order_seq_cst`] [`memory_order::seq_cst`]]
-]
-
-See section [link atomic.thread_coordination ['happens-before]] for explanation
-of the various ordering constraints.
-
-[endsect]
-
-[section:interface_atomic_flag Atomic flags]
-
-    #include <boost/atomic/atomic_flag.hpp>
-
-The `boost::atomic_flag` type provides the most basic set of atomic operations
-suitable for implementing mutually exclusive access to thread-shared data. The flag
-can have one of the two possible states: set and clear. The class implements the
-following operations:
-
-[table
-    [[Syntax] [Description]]
-    [
-      [`atomic_flag()`]
-      [Initialize to the clear state. See the discussion below.]
-    ]
-    [
-      [`bool test_and_set(memory_order order)`]
-      [Sets the atomic flag to the set state; returns `true` if the flag had been set prior to the operation]
-    ]
-    [
-      [`void clear(memory_order order)`]
-      [Sets the atomic flag to the clear state]
-    ]
-]
-
-`order` always has `memory_order_seq_cst` as default parameter.
-
-Note that the default constructor `atomic_flag()` is unlike `std::atomic_flag`, which
-leaves the default-constructed object uninitialized. This potentially requires dynamic
-initialization during the program startup to perform the object initialization, which
-makes it unsafe to create global `boost::atomic_flag` objects that can be used before
-entring `main()`. Some compilers though (especially those supporting C++11 `constexpr`)
-may be smart enough to perform flag initialization statically (which is, in C++11 terms,
-a constant initialization).
-
-This difference is deliberate and is done to support C++03 compilers. C++11 defines the
-`ATOMIC_FLAG_INIT` macro which can be used to statically initialize `std::atomic_flag`
-to a clear state like this:
-
-    std::atomic_flag flag = ATOMIC_FLAG_INIT; // constant initialization
-
-This macro cannot be implemented in C++03 because for that `atomic_flag` would have to be
-an aggregate type, which it cannot be because it has to prohibit copying and consequently
-define the default constructor. Thus the closest equivalent C++03 code using [*Boost.Atomic]
-would be:
-
-    boost::atomic_flag flag; // possibly, dynamic initialization in C++03;
-                             // constant initialization in C++11
-
-The same code is also valid in C++11, so this code can be used universally. However, for
-interface parity with `std::atomic_flag`, if possible, the library also defines the
-`BOOST_ATOMIC_FLAG_INIT` macro, which is equivalent to `ATOMIC_FLAG_INIT`:
-
-    boost::atomic_flag flag = BOOST_ATOMIC_FLAG_INIT; // constant initialization
-
-This macro will only be implemented on a C++11 compiler. When this macro is not available,
-the library defines `BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT`.
-
-[endsect]
-
-[section:interface_atomic_object Atomic objects]
-
-    #include <boost/atomic/atomic.hpp>
-
-[^boost::atomic<['T]>] provides methods for atomically accessing
-variables of a suitable type [^['T]]. The type is suitable if
-it is /trivially copyable/ (3.9/9 \[basic.types\]). Following are
-examples of the types compatible with this requirement:
-
-* a scalar type (e.g. integer, boolean, enum or pointer type)
-* a [^class] or [^struct] that has no non-trivial copy or move
-  constructors or assignment operators, has a trivial destructor,
-  and that is comparable via [^memcmp].
-
-Note that classes with virtual functions or virtual base classes
-do not satisfy the requirements. Also be warned
-that structures with "padding" between data members may compare
-non-equal via [^memcmp] even though all members are equal. This may also be
-the case with some floating point types, which include padding bits themselves.
-
-[section:interface_atomic_generic [^boost::atomic<['T]>] template class]
-
-All atomic objects support the following operations and properties:
-
-[table
-    [[Syntax] [Description]]
-    [
-      [`atomic()`]
-      [Initialize to an unspecified value]
-    ]
-    [
-      [`atomic(T initial_value)`]
-      [Initialize to [^initial_value]]
-    ]
-    [
-      [`bool is_lock_free()`]
-      [Checks if the atomic object is lock-free; the returned value is consistent with the `is_always_lock_free` static constant, see below]
-    ]
-    [
-      [`T load(memory_order order)`]
-      [Return current value]
-    ]
-    [
-      [`void store(T value, memory_order order)`]
-      [Write new value to atomic variable]
-    ]
-    [
-      [`T exchange(T new_value, memory_order order)`]
-      [Exchange current value with `new_value`, returning current value]
-    ]
-    [
-      [`bool compare_exchange_weak(T & expected, T desired, memory_order order)`]
-      [Compare current value with `expected`, change it to `desired` if matches.
-      Returns `true` if an exchange has been performed, and always writes the
-      previous value back in `expected`. May fail spuriously, so must generally be
-      retried in a loop.]
-    ]
-    [
-      [`bool compare_exchange_weak(T & expected, T desired, memory_order success_order, memory_order failure_order)`]
-      [Compare current value with `expected`, change it to `desired` if matches.
-      Returns `true` if an exchange has been performed, and always writes the
-      previous value back in `expected`. May fail spuriously, so must generally be
-      retried in a loop.]
-    ]
-    [
-      [`bool compare_exchange_strong(T & expected, T desired, memory_order order)`]
-      [Compare current value with `expected`, change it to `desired` if matches.
-      Returns `true` if an exchange has been performed, and always writes the
-      previous value back in `expected`.]
-    ]
-    [
-      [`bool compare_exchange_strong(T & expected, T desired, memory_order success_order, memory_order failure_order))`]
-      [Compare current value with `expected`, change it to `desired` if matches.
-      Returns `true` if an exchange has been performed, and always writes the
-      previous value back in `expected`.]
-    ]
-    [
-      [`static bool is_always_lock_free`]
-      [This static boolean constant indicates if any atomic object of this type is lock-free]
-    ]
-]
-
-`order` always has `memory_order_seq_cst` as default parameter.
-
-The `compare_exchange_weak`/`compare_exchange_strong` variants
-taking four parameters differ from the three parameter variants
-in that they allow a different memory ordering constraint to
-be specified in case the operation fails.
-
-In addition to these explicit operations, each
-[^atomic<['T]>] object also supports
-implicit [^store] and [^load] through the use of "assignment"
-and "conversion to [^T]" operators. Avoid using these operators,
-as they do not allow to specify a memory ordering
-constraint which always defaults to `memory_order_seq_cst`.
-
-[endsect]
-
-[section:interface_atomic_integral [^boost::atomic<['integral]>] template class]
-
-In addition to the operations listed in the previous section,
-[^boost::atomic<['I]>] for integral
-types [^['I]], except `bool`, supports the following operations,
-which correspond to [^std::atomic<['I]>]:
-
-[table
-    [[Syntax] [Description]]
-    [
-      [`I fetch_add(I v, memory_order order)`]
-      [Add `v` to variable, returning previous value]
-    ]
-    [
-      [`I fetch_sub(I v, memory_order order)`]
-      [Subtract `v` from variable, returning previous value]
-    ]
-    [
-      [`I fetch_and(I v, memory_order order)`]
-      [Apply bit-wise "and" with `v` to variable, returning previous value]
-    ]
-    [
-      [`I fetch_or(I v, memory_order order)`]
-      [Apply bit-wise "or" with `v` to variable, returning previous value]
-    ]
-    [
-      [`I fetch_xor(I v, memory_order order)`]
-      [Apply bit-wise "xor" with `v` to variable, returning previous value]
-    ]
-]
-
-Additionally, as a [*Boost.Atomic] extension, the following operations are also provided:
-
-[table
-    [[Syntax] [Description]]
-    [
-      [`I fetch_negate(memory_order order)`]
-      [Change the sign of the value stored in the variable, returning previous value]
-    ]
-    [
-      [`I fetch_complement(memory_order order)`]
-      [Set the variable to the one\'s complement of the current value, returning previous value]
-    ]
-    [
-      [`I negate(memory_order order)`]
-      [Change the sign of the value stored in the variable, returning the result]
-    ]
-    [
-      [`I add(I v, memory_order order)`]
-      [Add `v` to variable, returning the result]
-    ]
-    [
-      [`I sub(I v, memory_order order)`]
-      [Subtract `v` from variable, returning the result]
-    ]
-    [
-      [`I bitwise_and(I v, memory_order order)`]
-      [Apply bit-wise "and" with `v` to variable, returning the result]
-    ]
-    [
-      [`I bitwise_or(I v, memory_order order)`]
-      [Apply bit-wise "or" with `v` to variable, returning the result]
-    ]
-    [
-      [`I bitwise_xor(I v, memory_order order)`]
-      [Apply bit-wise "xor" with `v` to variable, returning the result]
-    ]
-    [
-      [`I bitwise_complement(memory_order order)`]
-      [Set the variable to the one\'s complement of the current value, returning the result]
-    ]
-    [
-      [`void opaque_negate(memory_order order)`]
-      [Change the sign of the value stored in the variable, returning nothing]
-    ]
-    [
-      [`void opaque_add(I v, memory_order order)`]
-      [Add `v` to variable, returning nothing]
-    ]
-    [
-      [`void opaque_sub(I v, memory_order order)`]
-      [Subtract `v` from variable, returning nothing]
-    ]
-    [
-      [`void opaque_and(I v, memory_order order)`]
-      [Apply bit-wise "and" with `v` to variable, returning nothing]
-    ]
-    [
-      [`void opaque_or(I v, memory_order order)`]
-      [Apply bit-wise "or" with `v` to variable, returning nothing]
-    ]
-    [
-      [`void opaque_xor(I v, memory_order order)`]
-      [Apply bit-wise "xor" with `v` to variable, returning nothing]
-    ]
-    [
-      [`void opaque_complement(memory_order order)`]
-      [Set the variable to the one\'s complement of the current value, returning nothing]
-    ]
-    [
-      [`bool negate_and_test(memory_order order)`]
-      [Change the sign of the value stored in the variable, returning `true` if the result is non-zero and `false` otherwise]
-    ]
-    [
-      [`bool add_and_test(I v, memory_order order)`]
-      [Add `v` to variable, returning `true` if the result is non-zero and `false` otherwise]
-    ]
-    [
-      [`bool sub_and_test(I v, memory_order order)`]
-      [Subtract `v` from variable, returning `true` if the result is non-zero and `false` otherwise]
-    ]
-    [
-      [`bool and_and_test(I v, memory_order order)`]
-      [Apply bit-wise "and" with `v` to variable, returning `true` if the result is non-zero and `false` otherwise]
-    ]
-    [
-      [`bool or_and_test(I v, memory_order order)`]
-      [Apply bit-wise "or" with `v` to variable, returning `true` if the result is non-zero and `false` otherwise]
-    ]
-    [
-      [`bool xor_and_test(I v, memory_order order)`]
-      [Apply bit-wise "xor" with `v` to variable, returning `true` if the result is non-zero and `false` otherwise]
-    ]
-    [
-      [`bool complement_and_test(memory_order order)`]
-      [Set the variable to the one\'s complement of the current value, returning `true` if the result is non-zero and `false` otherwise]
-    ]
-    [
-      [`bool bit_test_and_set(unsigned int n, memory_order order)`]
-      [Set bit number `n` in the variable to 1, returning `true` if the bit was previously set to 1 and `false` otherwise]
-    ]
-    [
-      [`bool bit_test_and_reset(unsigned int n, memory_order order)`]
-      [Set bit number `n` in the variable to 0, returning `true` if the bit was previously set to 1 and `false` otherwise]
-    ]
-    [
-      [`bool bit_test_and_complement(unsigned int n, memory_order order)`]
-      [Change bit number `n` in the variable to the opposite value, returning `true` if the bit was previously set to 1 and `false` otherwise]
-    ]
-]
-
-[note In Boost.Atomic 1.66 the [^['op]_and_test] operations returned the opposite value (i.e. `true` if the result is zero). This was changed
-to the current behavior in 1.67 for consistency with other operations in Boost.Atomic, as well as with conventions taken in the C++ standard library.
-Boost.Atomic 1.66 was the only release shipped with the old behavior. Users upgrading from Boost 1.66 to a later release can define
-`BOOST_ATOMIC_HIGHLIGHT_OP_AND_TEST` macro when building their code to generate deprecation warnings on the [^['op]_and_test] function calls
-(the functions are not actually deprecated though; this is just a way to highlight their use).]
-
-`order` always has `memory_order_seq_cst` as default parameter.
-
-The [^opaque_['op]] and [^['op]_and_test] variants of the operations
-may result in a more efficient code on some architectures because
-the original value of the atomic variable is not preserved. In the
-[^bit_test_and_['op]] operations, the bit number `n` starts from 0, which
-means the least significand bit, and must not exceed
-[^std::numeric_limits<['I]>::digits - 1].
-
-In addition to these explicit operations, each
-[^boost::atomic<['I]>] object also
-supports implicit pre-/post- increment/decrement, as well
-as the operators `+=`, `-=`, `&=`, `|=` and `^=`.
-Avoid using these operators, as they do not allow to specify a memory ordering
-constraint which always defaults to `memory_order_seq_cst`.
-
-[endsect]
-
-[section:interface_atomic_floating_point [^boost::atomic<['floating-point]>] template class]
-
-[note The support for floating point types is optional and can be disabled by defining `BOOST_ATOMIC_NO_FLOATING_POINT`.]
-
-In addition to the operations applicable to all atomic objects,
-[^boost::atomic<['F]>] for floating point
-types [^['F]] supports the following operations,
-which correspond to [^std::atomic<['F]>]:
-
-[table
-    [[Syntax] [Description]]
-    [
-      [`F fetch_add(F v, memory_order order)`]
-      [Add `v` to variable, returning previous value]
-    ]
-    [
-      [`F fetch_sub(F v, memory_order order)`]
-      [Subtract `v` from variable, returning previous value]
-    ]
-]
-
-Additionally, as a [*Boost.Atomic] extension, the following operations are also provided:
-
-[table
-    [[Syntax] [Description]]
-    [
-      [`F fetch_negate(memory_order order)`]
-      [Change the sign of the value stored in the variable, returning previous value]
-    ]
-    [
-      [`F negate(memory_order order)`]
-      [Change the sign of the value stored in the variable, returning the result]
-    ]
-    [
-      [`F add(F v, memory_order order)`]
-      [Add `v` to variable, returning the result]
-    ]
-    [
-      [`F sub(F v, memory_order order)`]
-      [Subtract `v` from variable, returning the result]
-    ]
-    [
-      [`void opaque_negate(memory_order order)`]
-      [Change the sign of the value stored in the variable, returning nothing]
-    ]
-    [
-      [`void opaque_add(F v, memory_order order)`]
-      [Add `v` to variable, returning nothing]
-    ]
-    [
-      [`void opaque_sub(F v, memory_order order)`]
-      [Subtract `v` from variable, returning nothing]
-    ]
-]
-
-`order` always has `memory_order_seq_cst` as default parameter.
-
-The [^opaque_['op]] variants of the operations
-may result in a more efficient code on some architectures because
-the original value of the atomic variable is not preserved.
-
-In addition to these explicit operations, each
-[^boost::atomic<['F]>] object also supports operators `+=` and `-=`.
-Avoid using these operators, as they do not allow to specify a memory ordering
-constraint which always defaults to `memory_order_seq_cst`.
-
-When using atomic operations with floating point types, bear in mind that [*Boost.Atomic]
-always performs bitwise comparison of the stored values. This means that operations like
-`compare_exchange*` may fail if the stored value and comparand have different binary representation,
-even if they would normally compare equal. This is typically the case when either of the numbers
-is [@https://en.wikipedia.org/wiki/Denormal_number denormalized]. This also means that the behavior
-with regard to special floating point values like NaN and signed zero is also different from normal C++.
-
-Another source of the problem is padding bits that are added to some floating point types for alignment.
-One widespread example of that is Intel x87 extended double format, which is typically stored as 80 bits
-of value padded with 16 or 48 unused bits. These padding bits are often uninitialized and contain garbage,
-which makes two equal numbers have different binary representation. The library attempts to account for
-the known such cases, but in general it is possible that some platforms are not covered. Note that the C++
-standard makes no guarantees about reliability of `compare_exchange*` operations in the face of padding or
-trap bits.
-
-[endsect]
-
-[section:interface_atomic_pointer [^boost::atomic<['pointer]>] template class]
-
-In addition to the operations applicable to all atomic objects,
-[^boost::atomic<['P]>] for pointer
-types [^['P]] (other than pointers to [^void], function or member pointers) support
-the following operations, which correspond to [^std::atomic<['P]>]:
-
-[table
-    [[Syntax] [Description]]
-    [
-      [`T fetch_add(ptrdiff_t v, memory_order order)`]
-      [Add `v` to variable, returning previous value]
-    ]
-    [
-      [`T fetch_sub(ptrdiff_t v, memory_order order)`]
-      [Subtract `v` from variable, returning previous value]
-    ]
-]
-
-Similarly to integers, the following [*Boost.Atomic] extensions are also provided:
-
-[table
-    [[Syntax] [Description]]
-    [
-      [`void add(ptrdiff_t v, memory_order order)`]
-      [Add `v` to variable, returning the result]
-    ]
-    [
-      [`void sub(ptrdiff_t v, memory_order order)`]
-      [Subtract `v` from variable, returning the result]
-    ]
-    [
-      [`void opaque_add(ptrdiff_t v, memory_order order)`]
-      [Add `v` to variable, returning nothing]
-    ]
-    [
-      [`void opaque_sub(ptrdiff_t v, memory_order order)`]
-      [Subtract `v` from variable, returning nothing]
-    ]
-    [
-      [`bool add_and_test(ptrdiff_t v, memory_order order)`]
-      [Add `v` to variable, returning `true` if the result is non-null and `false` otherwise]
-    ]
-    [
-      [`bool sub_and_test(ptrdiff_t v, memory_order order)`]
-      [Subtract `v` from variable, returning `true` if the result is non-null and `false` otherwise]
-    ]
-]
-
-`order` always has `memory_order_seq_cst` as default parameter.
-
-In addition to these explicit operations, each
-[^boost::atomic<['P]>] object also
-supports implicit pre-/post- increment/decrement, as well
-as the operators `+=`, `-=`. Avoid using these operators,
-as they do not allow explicit specification of a memory ordering
-constraint which always defaults to `memory_order_seq_cst`.
-
-[endsect]
-
-[section:interface_atomic_convenience_typedefs [^boost::atomic<['T]>] convenience typedefs]
-
-For convenience, several shorthand typedefs of [^boost::atomic<['T]>] are provided:
-
-[c++]
-
-    typedef atomic< char > atomic_char;
-    typedef atomic< unsigned char > atomic_uchar;
-    typedef atomic< signed char > atomic_schar;
-    typedef atomic< unsigned short > atomic_ushort;
-    typedef atomic< short > atomic_short;
-    typedef atomic< unsigned int > atomic_uint;
-    typedef atomic< int > atomic_int;
-    typedef atomic< unsigned long > atomic_ulong;
-    typedef atomic< long > atomic_long;
-    typedef atomic< unsigned long long > atomic_ullong;
-    typedef atomic< long long > atomic_llong;
-
-    typedef atomic< void* > atomic_address;
-    typedef atomic< bool > atomic_bool;
-    typedef atomic< wchar_t > atomic_wchar_t;
-    typedef atomic< char16_t > atomic_char16_t;
-    typedef atomic< char32_t > atomic_char32_t;
-
-    typedef atomic< uint8_t > atomic_uint8_t;
-    typedef atomic< int8_t > atomic_int8_t;
-    typedef atomic< uint16_t > atomic_uint16_t;
-    typedef atomic< int16_t > atomic_int16_t;
-    typedef atomic< uint32_t > atomic_uint32_t;
-    typedef atomic< int32_t > atomic_int32_t;
-    typedef atomic< uint64_t > atomic_uint64_t;
-    typedef atomic< int64_t > atomic_int64_t;
-
-    typedef atomic< int_least8_t > atomic_int_least8_t;
-    typedef atomic< uint_least8_t > atomic_uint_least8_t;
-    typedef atomic< int_least16_t > atomic_int_least16_t;
-    typedef atomic< uint_least16_t > atomic_uint_least16_t;
-    typedef atomic< int_least32_t > atomic_int_least32_t;
-    typedef atomic< uint_least32_t > atomic_uint_least32_t;
-    typedef atomic< int_least64_t > atomic_int_least64_t;
-    typedef atomic< uint_least64_t > atomic_uint_least64_t;
-    typedef atomic< int_fast8_t > atomic_int_fast8_t;
-    typedef atomic< uint_fast8_t > atomic_uint_fast8_t;
-    typedef atomic< int_fast16_t > atomic_int_fast16_t;
-    typedef atomic< uint_fast16_t > atomic_uint_fast16_t;
-    typedef atomic< int_fast32_t > atomic_int_fast32_t;
-    typedef atomic< uint_fast32_t > atomic_uint_fast32_t;
-    typedef atomic< int_fast64_t > atomic_int_fast64_t;
-    typedef atomic< uint_fast64_t > atomic_uint_fast64_t;
-    typedef atomic< intmax_t > atomic_intmax_t;
-    typedef atomic< uintmax_t > atomic_uintmax_t;
-
-    typedef atomic< std::size_t > atomic_size_t;
-    typedef atomic< std::ptrdiff_t > atomic_ptrdiff_t;
-
-    typedef atomic< intptr_t > atomic_intptr_t;
-    typedef atomic< uintptr_t > atomic_uintptr_t;
-
-The typedefs are provided only if the corresponding type is available.
-
-[endsect]
-
-[endsect]
-
-[section:interface_fences Fences]
-
-    #include <boost/atomic/fences.hpp>
-
-[table
-    [[Syntax] [Description]]
-    [
-      [`void atomic_thread_fence(memory_order order)`]
-      [Issue fence for coordination with other threads.]
-    ]
-    [
-      [`void atomic_signal_fence(memory_order order)`]
-      [Issue fence for coordination with signal handler (only in same thread).]
-    ]
-]
-
-[endsect]
-
-[section:feature_macros Feature testing macros]
-
-    #include <boost/atomic/capabilities.hpp>
-
-[*Boost.Atomic] defines a number of macros to allow compile-time
-detection whether an atomic data type is implemented using
-"true" atomic operations, or whether an internal "lock" is
-used to provide atomicity. The following macros will be
-defined to `0` if operations on the data type always
-require a lock, to `1` if operations on the data type may
-sometimes require a lock, and to `2` if they are always lock-free:
-
-[table
-    [[Macro] [Description]]
-    [
-      [`BOOST_ATOMIC_FLAG_LOCK_FREE`]
-      [Indicate whether `atomic_flag` is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_BOOL_LOCK_FREE`]
-      [Indicate whether `atomic<bool>` is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_CHAR_LOCK_FREE`]
-      [Indicate whether `atomic<char>` (including signed/unsigned variants) is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_CHAR16_T_LOCK_FREE`]
-      [Indicate whether `atomic<char16_t>` (including signed/unsigned variants) is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_CHAR32_T_LOCK_FREE`]
-      [Indicate whether `atomic<char32_t>` (including signed/unsigned variants) is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_WCHAR_T_LOCK_FREE`]
-      [Indicate whether `atomic<wchar_t>` (including signed/unsigned variants) is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_SHORT_LOCK_FREE`]
-      [Indicate whether `atomic<short>` (including signed/unsigned variants) is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_INT_LOCK_FREE`]
-      [Indicate whether `atomic<int>` (including signed/unsigned variants) is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_LONG_LOCK_FREE`]
-      [Indicate whether `atomic<long>` (including signed/unsigned variants) is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_LLONG_LOCK_FREE`]
-      [Indicate whether `atomic<long long>` (including signed/unsigned variants) is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_ADDRESS_LOCK_FREE` or `BOOST_ATOMIC_POINTER_LOCK_FREE`]
-      [Indicate whether `atomic<T *>` is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_THREAD_FENCE`]
-      [Indicate whether `atomic_thread_fence` function is lock-free]
-    ]
-    [
-      [`BOOST_ATOMIC_SIGNAL_FENCE`]
-      [Indicate whether `atomic_signal_fence` function is lock-free]
-    ]
-]
-
-In addition to these standard macros, [*Boost.Atomic] also defines a number of extension macros,
-which can also be useful. Like the standard ones, these macros are defined to values `0`, `1` and `2`
-to indicate whether the corresponding operations are lock-free or not.
-
-[table
-    [[Macro] [Description]]
-    [
-      [`BOOST_ATOMIC_INT8_LOCK_FREE`]
-      [Indicate whether `atomic<int8_type>` is lock-free.]
-    ]
-    [
-      [`BOOST_ATOMIC_INT16_LOCK_FREE`]
-      [Indicate whether `atomic<int16_type>` is lock-free.]
-    ]
-    [
-      [`BOOST_ATOMIC_INT32_LOCK_FREE`]
-      [Indicate whether `atomic<int32_type>` is lock-free.]
-    ]
-    [
-      [`BOOST_ATOMIC_INT64_LOCK_FREE`]
-      [Indicate whether `atomic<int64_type>` is lock-free.]
-    ]
-    [
-      [`BOOST_ATOMIC_INT128_LOCK_FREE`]
-      [Indicate whether `atomic<int128_type>` is lock-free.]
-    ]
-    [
-      [`BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT`]
-      [Defined after including `atomic_flag.hpp`, if the implementation
-      does not support the `BOOST_ATOMIC_FLAG_INIT` macro for static
-      initialization of `atomic_flag`. This macro is typically defined
-      for pre-C++11 compilers.]
-    ]
-]
-
-In the table above, `intN_type` is a type that fits storage of contiguous `N` bits, suitably aligned for atomic operations.
-
-For floating-point types the following macros are similarly defined:
-
-[table
-    [[Macro] [Description]]
-    [
-      [`BOOST_ATOMIC_FLOAT_LOCK_FREE`]
-      [Indicate whether `atomic<float>` is lock-free.]
-    ]
-    [
-      [`BOOST_ATOMIC_DOUBLE_LOCK_FREE`]
-      [Indicate whether `atomic<double>` is lock-free.]
-    ]
-    [
-      [`BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE`]
-      [Indicate whether `atomic<long double>` is lock-free.]
-    ]
-]
-
-These macros are not defined when support for floating point types is disabled by user.
-
-[endsect]
-
-[endsect]
-
-[section:usage_examples Usage examples]
-
-[include examples.qbk]
-
-[endsect]
-
-[/
-[section:platform_support Implementing support for additional platforms]
-
-[include platform.qbk]
-
-[endsect]
-]
-
-[/ [xinclude autodoc.xml] ]
-
-[section:limitations Limitations]
-
-While [*Boost.Atomic] strives to implement the atomic operations
-from C++11 and later as faithfully as possible, there are a few
-limitations that cannot be lifted without compiler support:
-
-* [*Aggregate initialization syntax is not supported]: Since [*Boost.Atomic]
-  sometimes uses storage type that is different from the value type,
-  the `atomic<>` template needs an initialization constructor that
-  performs the necessary conversion. This makes `atomic<>` a non-aggregate
-  type and prohibits aggregate initialization syntax (`atomic<int> a = {10}`).
-  [*Boost.Atomic] does support direct and unified initialization syntax though.
-  [*Advice]: Always use direct initialization (`atomic<int> a(10)`) or unified
-  initialization (`atomic<int> a{10}`) syntax.
-* [*Initializing constructor is not `constexpr` for some types]: For value types
-  other than integral types and `bool`, `atomic<>` initializing constructor needs
-  to perform runtime conversion to the storage type. This limitation may be
-  lifted for more categories of types in the future.
-* [*Default constructor is not trivial in C++03]: Because the initializing
-  constructor has to be defined in `atomic<>`, the default constructor
-  must also be defined. In C++03 the constructor cannot be defined as defaulted
-  and therefore it is not trivial. In C++11 the constructor is defaulted (and trivial,
-  if the default constructor of the value type is). In any case, the default
-  constructor of `atomic<>` performs default initialization of the atomic value,
-  as required in C++11. [*Advice]: In C++03, do not use [*Boost.Atomic] in contexts
-  where trivial default constructor is important (e.g. as a global variable which
-  is required to be statically initialized).
-* [*C++03 compilers may transform computation dependency to control dependency]:
-  Crucially, `memory_order_consume` only affects computationally-dependent
-  operations, but in general there is nothing preventing a compiler
-  from transforming a computation dependency into a control dependency.
-  A fully compliant C++11 compiler would be forbidden from such a transformation,
-  but in practice most if not all compilers have chosen to promote
-  `memory_order_consume` to `memory_order_acquire` instead
-  (see [@https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59448 this] gcc bug
-  for example). In the current implementation [*Boost.Atomic] follows that trend,
-  but this may change in the future.
-  [*Advice]: In general, avoid `memory_order_consume` and use `memory_order_acquire`
-  instead. Use `memory_order_consume` only in conjunction with
-  pointer values, and only if you can ensure that the compiler cannot
-  speculate and transform these into control dependencies.
-* [*Fence operations may enforce "too strong" compiler ordering]:
-  Semantically, `memory_order_acquire`/`memory_order_consume`
-  and `memory_order_release` need to restrain reordering of
-  memory operations only in one direction. Since in C++03 there is no
-  way to express this constraint to the compiler, these act
-  as "full compiler barriers" in C++03 implementation. In corner
-  cases this may result in a slightly less efficient code than a C++11 compiler
-  could generate. [*Boost.Atomic] will use compiler intrinsics, if possible,
-  to express the proper ordering constraints.
-* [*Atomic operations may enforce "too strong" memory ordering in debug mode]:
-  On some compilers, disabling optimizations makes it impossible to provide
-  memory ordering constraints as compile-time constants to the compiler intrinsics.
-  This causes the compiler to silently ignore the provided constraints and choose
-  the "strongest" memory order (`memory_order_seq_cst`) to generate code. Not only
-  this reduces performance, this may hide bugs in the user's code (e.g. if the user
-  used a wrong memory order constraint, which caused a data race).
-  [*Advice]: Always test your code with optimizations enabled.
-* [*No interprocess fallback]: using `atomic<T>` in shared memory only works
-  correctly, if `atomic<T>::is_lock_free() == true`.
-* [*Signed integers must use [@https://en.wikipedia.org/wiki/Two%27s_complement two's complement]
-  representation]: [*Boost.Atomic] makes this requirement in order to implement
-  conversions between signed and unsigned integers internally. C++11 requires all
-  atomic arithmetic operations on integers to be well defined according to two's complement
-  arithmetics, which means that Boost.Atomic has to operate on unsigned integers internally
-  to avoid undefined behavior that results from signed integer overflows. Platforms
-  with other signed integer representations are not supported.
-
-[endsect]
-
-[section:porting Porting]
-
-[section:unit_tests Unit tests]
-
-[*Boost.Atomic] provides a unit test suite to verify that the
-implementation behaves as expected:
-
-* [*fallback_api.cpp] verifies that the fallback-to-locking aspect
-  of [*Boost.Atomic] compiles and has correct value semantics.
-* [*native_api.cpp] verifies that all atomic operations have correct
-  value semantics (e.g. "fetch_add" really adds the desired value,
-  returning the previous). It is a rough "smoke-test" to help weed
-  out the most obvious mistakes (for example width overflow,
-  signed/unsigned extension, ...).
-* [*lockfree.cpp] verifies that the [*BOOST_ATOMIC_*_LOCKFREE] macros
-  are set properly according to the expectations for a given
-  platform, and that they match up with the [*is_always_lock_free] and
-  [*is_lock_free] members of the [*atomic] object instances.
-* [*atomicity.cpp] lets two threads race against each other modifying
-  a shared variable, verifying that the operations behave atomic
-  as appropriate. By nature, this test is necessarily stochastic, and
-  the test self-calibrates to yield 99% confidence that a
-  positive result indicates absence of an error. This test is
-  very useful on uni-processor systems with preemption already.
-* [*ordering.cpp] lets two threads race against each other accessing
-  multiple shared variables, verifying that the operations
-  exhibit the expected ordering behavior. By nature, this test is
-  necessarily stochastic, and the test attempts to self-calibrate to
-  yield 99% confidence that a positive result indicates absence
-  of an error. This only works on true multi-processor (or multi-core)
-  systems. It does not yield any result on uni-processor systems
-  or emulators (due to there being no observable reordering even
-  the order=relaxed case) and will report that fact.
-
-[endsect]
-
-[section:tested_compilers Tested compilers]
-
-[*Boost.Atomic] has been tested on and is known to work on
-the following compilers/platforms:
-
-* gcc 4.x: i386, x86_64, ppc32, ppc64, sparcv9, armv6, alpha
-* Visual Studio Express 2008/Windows XP, x86, x64, ARM
-
-[endsect]
-
-[section:acknowledgements Acknowledgements]
-
-* Adam Wulkiewicz created the logo used on the [@https://github.com/boostorg/atomic GitHub project page]. The logo was taken from his [@https://github.com/awulkiew/boost-logos collection] of Boost logos.
-
-[endsect]
-
-[endsect]
diff --git a/third_party/boostorg/atomic/doc/examples.qbk b/third_party/boostorg/atomic/doc/examples.qbk
deleted file mode 100644
index e34c402..0000000
--- a/third_party/boostorg/atomic/doc/examples.qbk
+++ /dev/null
@@ -1,398 +0,0 @@
-[/
- / Copyright (c) 2009 Helge Bahmann
- /
- / Distributed under the Boost Software License, Version 1.0. (See accompanying
- / file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
- /]
-
-[section:example_reference_counters Reference counting]
-
-The purpose of a ['reference counter] is to count the number
-of pointers to an object. The object can be destroyed as
-soon as the reference counter reaches zero.
-
-[section Implementation]
-
-[c++]
-
-  #include <boost/intrusive_ptr.hpp>
-  #include <boost/atomic.hpp>
-
-  class X {
-  public:
-    typedef boost::intrusive_ptr<X> pointer;
-    X() : refcount_(0) {}
-
-  private:
-    mutable boost::atomic<int> refcount_;
-    friend void intrusive_ptr_add_ref(const X * x)
-    {
-      x->refcount_.fetch_add(1, boost::memory_order_relaxed);
-    }
-    friend void intrusive_ptr_release(const X * x)
-    {
-      if (x->refcount_.fetch_sub(1, boost::memory_order_release) == 1) {
-        boost::atomic_thread_fence(boost::memory_order_acquire);
-        delete x;
-      }
-    }
-  };
-
-[endsect]
-
-[section Usage]
-
-[c++]
-
-  X::pointer x = new X;
-
-[endsect]
-
-[section Discussion]
-
-Increasing the reference counter can always be done with
-[^memory_order_relaxed]: New references to an object can only
-be formed from an existing reference, and passing an existing
-reference from one thread to another must already provide any
-required synchronization.
-
-It is important to enforce any possible access to the object in
-one thread (through an existing reference) to ['happen before]
-deleting the object in a different thread. This is achieved
-by a "release" operation after dropping a reference (any
-access to the object through this reference must obviously
-happened before), and an "acquire" operation before
-deleting the object.
-
-It would be possible to use [^memory_order_acq_rel] for the
-[^fetch_sub] operation, but this results in unneeded "acquire"
-operations when the reference counter does not yet reach zero
-and may impose a performance penalty.
-
-[endsect]
-
-[endsect]
-
-[section:example_spinlock Spinlock]
-
-The purpose of a ['spin lock] is to prevent multiple threads
-from concurrently accessing a shared data structure. In contrast
-to a mutex, threads will busy-wait and waste CPU cycles instead
-of yielding the CPU to another thread. ['Do not use spinlocks
-unless you are certain that you understand the consequences.]
-
-[section Implementation]
-
-[c++]
-
-  #include <boost/atomic.hpp>
-
-  class spinlock {
-  private:
-    typedef enum {Locked, Unlocked} LockState;
-    boost::atomic<LockState> state_;
-
-  public:
-    spinlock() : state_(Unlocked) {}
-
-    void lock()
-    {
-      while (state_.exchange(Locked, boost::memory_order_acquire) == Locked) {
-        /* busy-wait */
-      }
-    }
-    void unlock()
-    {
-      state_.store(Unlocked, boost::memory_order_release);
-    }
-  };
-
-[endsect]
-
-[section Usage]
-
-[c++]
-
-  spinlock s;
-
-  s.lock();
-  // access data structure here
-  s.unlock();
-
-[endsect]
-
-[section Discussion]
-
-The purpose of the spinlock is to make sure that one access
-to the shared data structure always strictly "happens before"
-another. The usage of acquire/release in lock/unlock is required
-and sufficient to guarantee this ordering.
-
-It would be correct to write the "lock" operation in the following
-way:
-
-[c++]
-
-  lock()
-  {
-    while (state_.exchange(Locked, boost::memory_order_relaxed) == Locked) {
-      /* busy-wait */
-    }
-    atomic_thread_fence(boost::memory_order_acquire);
-  }
-
-This "optimization" is however a) useless and b) may in fact hurt:
-a) Since the thread will be busily spinning on a blocked spinlock,
-it does not matter if it will waste the CPU cycles with just
-"exchange" operations or with both useless "exchange" and "acquire"
-operations. b) A tight "exchange" loop without any
-memory-synchronizing instruction introduced through an "acquire"
-operation will on some systems monopolize the memory subsystem
-and degrade the performance of other system components.
-
-[endsect]
-
-[endsect]
-
-[section:singleton Singleton with double-checked locking pattern]
-
-The purpose of the ['Singleton with double-checked locking pattern] is to ensure
-that at most one instance of a particular object is created.
-If one instance has been created already, access to the existing
-object should be as light-weight as possible.
-
-[section Implementation]
-
-[c++]
-
-  #include <boost/atomic.hpp>
-  #include <boost/thread/mutex.hpp>
-
-  class X {
-  public:
-    static X * instance()
-    {
-      X * tmp = instance_.load(boost::memory_order_consume);
-      if (!tmp) {
-        boost::mutex::scoped_lock guard(instantiation_mutex);
-        tmp = instance_.load(boost::memory_order_consume);
-        if (!tmp) {
-          tmp = new X;
-          instance_.store(tmp, boost::memory_order_release);
-        }
-      }
-      return tmp;
-    }
-  private:
-    static boost::atomic<X *> instance_;
-    static boost::mutex instantiation_mutex;
-  };
-
-  boost::atomic<X *> X::instance_(0);
-
-[endsect]
-
-[section Usage]
-
-[c++]
-
-  X * x = X::instance();
-  // dereference x
-
-[endsect]
-
-[section Discussion]
-
-The mutex makes sure that only one instance of the object is
-ever created. The [^instance] method must make sure that any
-dereference of the object strictly "happens after" creating
-the instance in another thread. The use of [^memory_order_release]
-after creating and initializing the object and [^memory_order_consume]
-before dereferencing the object provides this guarantee.
-
-It would be permissible to use [^memory_order_acquire] instead of
-[^memory_order_consume], but this provides a stronger guarantee
-than is required since only operations depending on the value of
-the pointer need to be ordered.
-
-[endsect]
-
-[endsect]
-
-[section:example_ringbuffer Wait-free ring buffer]
-
-A ['wait-free ring buffer] provides a mechanism for relaying objects
-from one single "producer" thread to one single "consumer" thread without
-any locks. The operations on this data structure are "wait-free" which
-means that each operation finishes within a constant number of steps.
-This makes this data structure suitable for use in hard real-time systems
-or for communication with interrupt/signal handlers.
-
-[section Implementation]
-
-[c++]
-
-  #include <boost/atomic.hpp>
-
-  template<typename T, size_t Size>
-  class ringbuffer {
-  public:
-    ringbuffer() : head_(0), tail_(0) {}
-
-    bool push(const T & value)
-    {
-      size_t head = head_.load(boost::memory_order_relaxed);
-      size_t next_head = next(head);
-      if (next_head == tail_.load(boost::memory_order_acquire))
-        return false;
-      ring_[head] = value;
-      head_.store(next_head, boost::memory_order_release);
-      return true;
-    }
-    bool pop(T & value)
-    {
-      size_t tail = tail_.load(boost::memory_order_relaxed);
-      if (tail == head_.load(boost::memory_order_acquire))
-        return false;
-      value = ring_[tail];
-      tail_.store(next(tail), boost::memory_order_release);
-      return true;
-    }
-  private:
-    size_t next(size_t current)
-    {
-      return (current + 1) % Size;
-    }
-    T ring_[Size];
-    boost::atomic<size_t> head_, tail_;
-  };
-
-[endsect]
-
-[section Usage]
-
-[c++]
-
-  ringbuffer<int, 32> r;
-
-  // try to insert an element
-  if (r.push(42)) { /* succeeded */ }
-  else { /* buffer full */ }
-
-  // try to retrieve an element
-  int value;
-  if (r.pop(value)) { /* succeeded */ }
-  else { /* buffer empty */ }
-
-[endsect]
-
-[section Discussion]
-
-The implementation makes sure that the ring indices do
-not "lap-around" each other to ensure that no elements
-are either lost or read twice.
-
-Furthermore it must guarantee that read-access to a
-particular object in [^pop] "happens after" it has been
-written in [^push]. This is achieved by writing [^head_ ]
-with "release" and reading it with "acquire". Conversely
-the implementation also ensures that read access to
-a particular ring element "happens before" before
-rewriting this element with a new value by accessing [^tail_]
-with appropriate ordering constraints.
-
-[endsect]
-
-[endsect]
-
-[section:mp_queue Wait-free multi-producer queue]
-
-The purpose of the ['wait-free multi-producer queue] is to allow
-an arbitrary number of producers to enqueue objects which are
-retrieved and processed in FIFO order by a single consumer.
-
-[section Implementation]
-
-[c++]
-
-  template<typename T>
-  class waitfree_queue {
-  public:
-    struct node {
-      T data;
-      node * next;
-    };
-    void push(const T &data)
-    {
-      node * n = new node;
-      n->data = data;
-      node * stale_head = head_.load(boost::memory_order_relaxed);
-      do {
-        n->next = stale_head;
-      } while (!head_.compare_exchange_weak(stale_head, n, boost::memory_order_release));
-    }
-
-    node * pop_all(void)
-    {
-      T * last = pop_all_reverse(), * first = 0;
-      while(last) {
-        T * tmp = last;
-        last = last->next;
-        tmp->next = first;
-        first = tmp;
-      }
-      return first;
-    }
-
-    waitfree_queue() : head_(0) {}
-
-    // alternative interface if ordering is of no importance
-    node * pop_all_reverse(void)
-    {
-      return head_.exchange(0, boost::memory_order_consume);
-    }
-  private:
-    boost::atomic<node *> head_;
-  };
-
-[endsect]
-
-[section Usage]
-
-[c++]
-
-  waitfree_queue<int> q;
-
-  // insert elements
-  q.push(42);
-  q.push(2);
-
-  // pop elements
-  waitfree_queue<int>::node * x = q.pop_all()
-  while(x) {
-    X * tmp = x;
-    x = x->next;
-    // process tmp->data, probably delete it afterwards
-    delete tmp;
-  }
-
-[endsect]
-
-[section Discussion]
-
-The implementation guarantees that all objects enqueued are
-processed in the order they were enqueued by building a singly-linked
-list of object in reverse processing order. The queue is atomically
-emptied by the consumer and brought into correct order.
-
-It must be guaranteed that any access to an object to be enqueued
-by the producer "happens before" any access by the consumer. This
-is assured by inserting objects into the list with ['release] and
-dequeuing them with ['consume] memory order. It is not
-necessary to use ['acquire] memory order in [^waitfree_queue::pop_all]
-because all operations involved depend on the value of
-the atomic pointer through dereference
-
-[endsect]
-
-[endsect]